summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.appveyor.yml2
-rw-r--r--.travis.yml15
-rw-r--r--AUTHORS4
-rw-r--r--CHANGES163
-rw-r--r--EXAMPLES1012
-rw-r--r--Makefile2
-rw-r--r--doc/_static/conf.py.txt2
-rw-r--r--doc/_static/themes/agogo.png (renamed from doc/themes/agogo.png)bin25792 -> 25792 bytes
-rw-r--r--doc/_static/themes/alabaster.png (renamed from doc/themes/alabaster.png)bin32356 -> 32356 bytes
-rw-r--r--doc/_static/themes/bizstyle.png (renamed from doc/themes/bizstyle.png)bin27139 -> 27139 bytes
-rw-r--r--doc/_static/themes/classic.png (renamed from doc/themes/classic.png)bin39927 -> 39927 bytes
-rw-r--r--doc/_static/themes/fullsize/agogo.png (renamed from doc/themes/fullsize/agogo.png)bin56954 -> 56954 bytes
-rw-r--r--doc/_static/themes/fullsize/alabaster.png (renamed from doc/themes/fullsize/alabaster.png)bin40248 -> 40248 bytes
-rw-r--r--doc/_static/themes/fullsize/bizstyle.png (renamed from doc/themes/fullsize/bizstyle.png)bin75192 -> 75192 bytes
-rw-r--r--doc/_static/themes/fullsize/classic.png (renamed from doc/themes/fullsize/classic.png)bin72597 -> 72597 bytes
-rw-r--r--doc/_static/themes/fullsize/haiku.png (renamed from doc/themes/fullsize/haiku.png)bin84200 -> 84200 bytes
-rw-r--r--doc/_static/themes/fullsize/nature.png (renamed from doc/themes/fullsize/nature.png)bin32266 -> 32266 bytes
-rw-r--r--doc/_static/themes/fullsize/pyramid.png (renamed from doc/themes/fullsize/pyramid.png)bin102717 -> 102717 bytes
-rw-r--r--doc/_static/themes/fullsize/scrolls.png (renamed from doc/themes/fullsize/scrolls.png)bin88111 -> 88111 bytes
-rw-r--r--doc/_static/themes/fullsize/sphinx_rtd_theme.png (renamed from doc/themes/fullsize/sphinx_rtd_theme.png)bin39411 -> 39411 bytes
-rw-r--r--doc/_static/themes/fullsize/sphinxdoc.png (renamed from doc/themes/fullsize/sphinxdoc.png)bin84439 -> 84439 bytes
-rw-r--r--doc/_static/themes/fullsize/traditional.png (renamed from doc/themes/fullsize/traditional.png)bin91744 -> 91744 bytes
-rw-r--r--doc/_static/themes/haiku.png (renamed from doc/themes/haiku.png)bin43184 -> 43184 bytes
-rw-r--r--doc/_static/themes/nature.png (renamed from doc/themes/nature.png)bin28536 -> 28536 bytes
-rw-r--r--doc/_static/themes/pyramid.png (renamed from doc/themes/pyramid.png)bin38805 -> 38805 bytes
-rw-r--r--doc/_static/themes/scrolls.png (renamed from doc/themes/scrolls.png)bin27800 -> 27800 bytes
-rw-r--r--doc/_static/themes/sphinx_rtd_theme.png (renamed from doc/themes/sphinx_rtd_theme.png)bin29138 -> 29138 bytes
-rw-r--r--doc/_static/themes/sphinxdoc.png (renamed from doc/themes/sphinxdoc.png)bin30225 -> 30225 bytes
-rw-r--r--doc/_static/themes/traditional.png (renamed from doc/themes/traditional.png)bin32258 -> 32258 bytes
-rw-r--r--doc/_static/translation.png (renamed from doc/translation.png)bin13791 -> 13791 bytes
-rw-r--r--doc/conf.py16
-rw-r--r--doc/contents.rst8
-rw-r--r--doc/develop.rst4
-rw-r--r--doc/development/tutorials/helloworld.rst162
-rw-r--r--doc/development/tutorials/index.rst11
-rw-r--r--doc/development/tutorials/todo.rst (renamed from doc/extdev/tutorial.rst)124
-rw-r--r--doc/extdev/appapi.rst30
-rw-r--r--doc/extdev/envapi.rst8
-rw-r--r--doc/extdev/index.rst334
-rw-r--r--doc/extdev/logging.rst2
-rw-r--r--doc/extdev/projectapi.rst9
-rw-r--r--doc/faq.rst6
-rw-r--r--doc/intro.rst8
-rw-r--r--doc/latex.rst13
-rw-r--r--doc/theming.rst319
-rw-r--r--doc/usage/advanced/intl.rst (renamed from doc/intl.rst)179
-rw-r--r--doc/usage/advanced/setuptools.rst (renamed from doc/setuptools.rst)0
-rw-r--r--doc/usage/advanced/websupport/api.rst (renamed from doc/web/api.rst)2
-rw-r--r--doc/usage/advanced/websupport/index.rst (renamed from doc/websupport.rst)8
-rw-r--r--doc/usage/advanced/websupport/quickstart.rst (renamed from doc/web/quickstart.rst)37
-rw-r--r--doc/usage/advanced/websupport/searchadapters.rst (renamed from doc/web/searchadapters.rst)12
-rw-r--r--doc/usage/advanced/websupport/storagebackends.rst (renamed from doc/web/storagebackends.rst)4
-rw-r--r--doc/usage/builders/index.rst52
-rw-r--r--doc/usage/configuration.rst303
-rw-r--r--doc/usage/extensions/autodoc.rst24
-rw-r--r--doc/usage/extensions/example_google.py3
-rw-r--r--doc/usage/extensions/example_numpy.py3
-rw-r--r--doc/usage/extensions/inheritance.rst2
-rw-r--r--doc/usage/extensions/math.rst2
-rw-r--r--doc/usage/extensions/napoleon.rst4
-rw-r--r--doc/usage/extensions/viewcode.rst7
-rw-r--r--doc/usage/installation.rst15
-rw-r--r--doc/usage/markdown.rst41
-rw-r--r--doc/usage/quickstart.rst4
-rw-r--r--doc/usage/restructuredtext/directives.rst9
-rw-r--r--doc/usage/theming.rst339
-rw-r--r--setup.cfg1
-rw-r--r--setup.py44
-rw-r--r--sphinx/__init__.py56
-rw-r--r--sphinx/__main__.py3
-rw-r--r--sphinx/addnodes.py94
-rw-r--r--sphinx/apidoc.py41
-rw-r--r--sphinx/application.py279
-rw-r--r--sphinx/builders/__init__.py99
-rw-r--r--sphinx/builders/_epub_base.py134
-rw-r--r--sphinx/builders/applehelp.py43
-rw-r--r--sphinx/builders/changes.py42
-rw-r--r--sphinx/builders/devhelp.py26
-rw-r--r--sphinx/builders/dummy.py11
-rw-r--r--sphinx/builders/epub3.py31
-rw-r--r--sphinx/builders/gettext.py88
-rw-r--r--sphinx/builders/html.py521
-rw-r--r--sphinx/builders/htmlhelp.py65
-rw-r--r--sphinx/builders/latex/__init__.py131
-rw-r--r--sphinx/builders/latex/nodes.py1
-rw-r--r--sphinx/builders/latex/transforms.py140
-rw-r--r--sphinx/builders/linkcheck.py78
-rw-r--r--sphinx/builders/manpage.py46
-rw-r--r--sphinx/builders/qthelp.py90
-rw-r--r--sphinx/builders/texinfo.py106
-rw-r--r--sphinx/builders/text.py25
-rw-r--r--sphinx/builders/websupport.py3
-rw-r--r--sphinx/builders/xml.py26
-rw-r--r--sphinx/cmd/__init__.py1
-rw-r--r--sphinx/cmd/build.py41
-rw-r--r--sphinx/cmd/make_mode.py12
-rw-r--r--sphinx/cmd/quickstart.py104
-rw-r--r--sphinx/cmdline.py7
-rw-r--r--sphinx/config.py251
-rw-r--r--sphinx/deprecation.py28
-rw-r--r--sphinx/directives/__init__.py51
-rw-r--r--sphinx/directives/code.py76
-rw-r--r--sphinx/directives/other.py65
-rw-r--r--sphinx/directives/patches.py41
-rw-r--r--sphinx/domains/__init__.py90
-rw-r--r--sphinx/domains/c.py34
-rw-r--r--sphinx/domains/changeset.py46
-rw-r--r--sphinx/domains/cpp.py1004
-rw-r--r--sphinx/domains/javascript.py31
-rw-r--r--sphinx/domains/math.py30
-rw-r--r--sphinx/domains/python.py143
-rw-r--r--sphinx/domains/rst.py38
-rw-r--r--sphinx/domains/std.py171
-rw-r--r--sphinx/environment/__init__.py289
-rw-r--r--sphinx/environment/adapters/__init__.py1
-rw-r--r--sphinx/environment/adapters/asset.py5
-rw-r--r--sphinx/environment/adapters/indexentries.py29
-rw-r--r--sphinx/environment/adapters/toctree.py55
-rw-r--r--sphinx/environment/collectors/__init__.py19
-rw-r--r--sphinx/environment/collectors/asset.py26
-rw-r--r--sphinx/environment/collectors/dependencies.py12
-rw-r--r--sphinx/environment/collectors/indexentries.py17
-rw-r--r--sphinx/environment/collectors/metadata.py62
-rw-r--r--sphinx/environment/collectors/title.py7
-rw-r--r--sphinx/environment/collectors/toctree.py141
-rw-r--r--sphinx/errors.py7
-rw-r--r--sphinx/events.py22
-rw-r--r--sphinx/ext/__init__.py1
-rw-r--r--sphinx/ext/apidoc.py93
-rw-r--r--sphinx/ext/autodoc/__init__.py454
-rw-r--r--sphinx/ext/autodoc/directive.py33
-rw-r--r--sphinx/ext/autodoc/importer.py103
-rw-r--r--sphinx/ext/autodoc/inspector.py187
-rw-r--r--sphinx/ext/autosectionlabel.py10
-rw-r--r--sphinx/ext/autosummary/__init__.py162
-rw-r--r--sphinx/ext/autosummary/generate.py47
-rw-r--r--sphinx/ext/coverage.py37
-rw-r--r--sphinx/ext/doctest.py125
-rw-r--r--sphinx/ext/extlinks.py10
-rw-r--r--sphinx/ext/githubpages.py3
-rw-r--r--sphinx/ext/graphviz.py122
-rw-r--r--sphinx/ext/ifconfig.py7
-rw-r--r--sphinx/ext/imgconverter.py20
-rw-r--r--sphinx/ext/imgmath.py54
-rw-r--r--sphinx/ext/inheritance_diagram.py88
-rw-r--r--sphinx/ext/intersphinx.py114
-rw-r--r--sphinx/ext/jsmath.py18
-rw-r--r--sphinx/ext/linkcode.py22
-rw-r--r--sphinx/ext/mathbase.py11
-rw-r--r--sphinx/ext/mathjax.py19
-rw-r--r--sphinx/ext/napoleon/__init__.py72
-rw-r--r--sphinx/ext/napoleon/docstring.py254
-rw-r--r--sphinx/ext/napoleon/iterators.py5
-rw-r--r--sphinx/ext/todo.py66
-rw-r--r--sphinx/ext/viewcode.py38
-rw-r--r--sphinx/extension.py11
-rw-r--r--sphinx/highlighting.py46
-rw-r--r--sphinx/io.py171
-rw-r--r--sphinx/jinja2glue.py26
-rw-r--r--sphinx/locale/__init__.py135
-rw-r--r--sphinx/make_mode.py3
-rw-r--r--sphinx/parsers.py59
-rw-r--r--sphinx/project.py97
-rw-r--r--sphinx/pycode/__init__.py33
-rw-r--r--sphinx/pycode/parser.py92
-rw-r--r--sphinx/pygments_styles.py1
-rw-r--r--sphinx/quickstart.py41
-rw-r--r--sphinx/registry.py182
-rw-r--r--sphinx/roles.py62
-rw-r--r--sphinx/search/__init__.py170
-rw-r--r--sphinx/search/da.py7
-rw-r--r--sphinx/search/de.py7
-rw-r--r--sphinx/search/en.py5
-rw-r--r--sphinx/search/es.py7
-rw-r--r--sphinx/search/fi.py7
-rw-r--r--sphinx/search/fr.py7
-rw-r--r--sphinx/search/hu.py7
-rw-r--r--sphinx/search/it.py7
-rw-r--r--sphinx/search/ja.py605
-rw-r--r--sphinx/search/jssplitter.py1
-rw-r--r--sphinx/search/nl.py7
-rw-r--r--sphinx/search/no.py7
-rw-r--r--sphinx/search/pt.py7
-rw-r--r--sphinx/search/ro.py7
-rw-r--r--sphinx/search/ru.py7
-rw-r--r--sphinx/search/sv.py7
-rw-r--r--sphinx/search/tr.py7
-rw-r--r--sphinx/search/zh.py13
-rw-r--r--sphinx/setup_command.py19
-rw-r--r--sphinx/templates/latex/latex.tex_t30
-rw-r--r--sphinx/templates/latex/sphinxmessages.sty_t11
-rw-r--r--sphinx/templates/quickstart/conf.py_t61
-rw-r--r--sphinx/templates/texinfo/Makefile50
-rw-r--r--sphinx/testing/__init__.py1
-rw-r--r--sphinx/testing/fixtures.py9
-rw-r--r--sphinx/testing/path.py43
-rw-r--r--sphinx/testing/util.py74
-rw-r--r--sphinx/texinputs/sphinx.xdy89
-rw-r--r--sphinx/texinputs/sphinxcyrillic.sty53
-rw-r--r--sphinx/texinputs/sphinxhowto.cls13
-rw-r--r--sphinx/texinputs/sphinxmanual.cls15
-rw-r--r--sphinx/themes/agogo/layout.html2
-rw-r--r--sphinx/themes/basic/opensearch.xml2
-rw-r--r--sphinx/themes/basic/searchbox.html2
-rw-r--r--sphinx/themes/basic/static/searchtools.js17
-rw-r--r--sphinx/theming.py63
-rw-r--r--sphinx/transforms/__init__.py115
-rw-r--r--sphinx/transforms/compact_bullet_list.py17
-rw-r--r--sphinx/transforms/i18n.py197
-rw-r--r--sphinx/transforms/post_transforms/__init__.py57
-rw-r--r--sphinx/transforms/post_transforms/code.py16
-rw-r--r--sphinx/transforms/post_transforms/compat.py48
-rw-r--r--sphinx/transforms/post_transforms/images.py49
-rw-r--r--sphinx/transforms/references.py16
-rw-r--r--sphinx/util/__init__.py191
-rw-r--r--sphinx/util/build_phase.py6
-rw-r--r--sphinx/util/compat.py33
-rw-r--r--sphinx/util/console.py5
-rw-r--r--sphinx/util/docfields.py148
-rw-r--r--sphinx/util/docstrings.py5
-rw-r--r--sphinx/util/docutils.py140
-rw-r--r--sphinx/util/fileutil.py19
-rw-r--r--sphinx/util/i18n.py40
-rw-r--r--sphinx/util/images.py35
-rw-r--r--sphinx/util/inspect.py418
-rw-r--r--sphinx/util/inventory.py53
-rw-r--r--sphinx/util/jsdump.py21
-rw-r--r--sphinx/util/jsonimpl.py13
-rw-r--r--sphinx/util/logging.py144
-rw-r--r--sphinx/util/matching.py21
-rw-r--r--sphinx/util/math.py17
-rw-r--r--sphinx/util/nodes.py113
-rw-r--r--sphinx/util/osutil.py166
-rw-r--r--sphinx/util/parallel.py12
-rw-r--r--sphinx/util/png.py5
-rw-r--r--sphinx/util/pycompat.py161
-rw-r--r--sphinx/util/requests.py35
-rw-r--r--sphinx/util/rst.py48
-rw-r--r--sphinx/util/smartypants.py182
-rw-r--r--sphinx/util/stemmer/__init__.py13
-rw-r--r--sphinx/util/stemmer/porter.py16
-rw-r--r--sphinx/util/tags.py15
-rw-r--r--sphinx/util/template.py26
-rw-r--r--sphinx/util/texescape.py65
-rw-r--r--sphinx/util/typing.py20
-rw-r--r--sphinx/util/websupport.py1
-rw-r--r--sphinx/versioning.py29
-rw-r--r--sphinx/websupport/__init__.py28
-rw-r--r--sphinx/websupport/errors.py12
-rw-r--r--sphinx/websupport/search/__init__.py12
-rw-r--r--sphinx/websupport/search/nullsearch.py12
-rw-r--r--sphinx/websupport/search/whooshsearch.py12
-rw-r--r--sphinx/websupport/search/xapiansearch.py12
-rw-r--r--sphinx/websupport/storage/__init__.py12
-rw-r--r--sphinx/websupport/storage/differ.py12
-rw-r--r--sphinx/websupport/storage/sqlalchemy_db.py13
-rw-r--r--sphinx/websupport/storage/sqlalchemystorage.py12
-rw-r--r--sphinx/writers/__init__.py1
-rw-r--r--sphinx/writers/html.py336
-rw-r--r--sphinx/writers/html5.py324
-rw-r--r--sphinx/writers/latex.py997
-rw-r--r--sphinx/writers/manpage.py273
-rw-r--r--sphinx/writers/texinfo.py600
-rw-r--r--sphinx/writers/text.py792
-rw-r--r--sphinx/writers/websupport.py1
-rw-r--r--sphinx/writers/xml.py14
-rw-r--r--tests/conftest.py11
-rw-r--r--tests/py3/test_util_inspect_py3.py26
-rw-r--r--tests/py35/test_autodoc_py35.py348
-rw-r--r--tests/roots/test-api-set-translator/conf.py6
-rwxr-xr-xtests/roots/test-apidoc-toc/mypackage/main.py3
-rw-r--r--tests/roots/test-autosummary/index.rst (renamed from tests/roots/test-autosummary/contents.rst)0
-rw-r--r--tests/roots/test-build-text/conf.py1
-rw-r--r--tests/roots/test-build-text/index.txt (renamed from tests/roots/test-build-text/contents.txt)0
-rw-r--r--tests/roots/test-build-text/table.txt14
-rw-r--r--tests/roots/test-build-text/table_colspan.txt7
-rw-r--r--tests/roots/test-build-text/table_colspan_and_rowspan.txt7
-rw-r--r--tests/roots/test-build-text/table_colspan_left.txt7
-rw-r--r--tests/roots/test-build-text/table_rowspan.txt7
-rw-r--r--tests/roots/test-circular/index.rst (renamed from tests/roots/test-circular/contents.rst)0
-rw-r--r--tests/roots/test-circular/sub.rst2
-rw-r--r--tests/roots/test-correct-year/conf.py2
-rw-r--r--tests/roots/test-correct-year/index.rst (renamed from tests/roots/test-correct-year/contents.rst)0
-rw-r--r--tests/roots/test-directive-only/index.rst (renamed from tests/roots/test-directive-only/contents.rst)0
-rw-r--r--tests/roots/test-docutilsconf/index.txt (renamed from tests/roots/test-docutilsconf/contents.txt)0
-rw-r--r--tests/roots/test-ext-autodoc/index.rst (renamed from tests/roots/test-ext-autodoc/contents.rst)0
-rw-r--r--tests/roots/test-ext-autodoc/target/__init__.py9
-rw-r--r--tests/roots/test-ext-autodoc/target/coroutine.py8
-rw-r--r--tests/roots/test-ext-autodoc/target/enum.py1
-rw-r--r--tests/roots/test-ext-autodoc/target/need_mocks.py (renamed from tests/roots/test-root/autodoc_missing_imports.py)0
-rw-r--r--tests/roots/test-ext-autosummary/index.rst (renamed from tests/roots/test-ext-autosummary/contents.rst)0
-rw-r--r--tests/roots/test-ext-graphviz/index.rst2
-rw-r--r--tests/roots/test-ext-viewcode-find/not_a_package/__init__.py2
-rw-r--r--tests/roots/test-ext-viewcode/spam/__init__.py2
-rw-r--r--tests/roots/test-gettext-template/index.rst (renamed from tests/roots/test-gettext-template/contents.rst)0
-rw-r--r--tests/roots/test-inheritance/index.rst (renamed from tests/roots/test-inheritance/contents.rst)0
-rw-r--r--tests/roots/test-intl/_templates/contents.html (renamed from tests/roots/test-intl/_templates/index.html)0
-rw-r--r--tests/roots/test-intl/conf.py2
-rw-r--r--tests/roots/test-intl/index.po (renamed from tests/roots/test-intl/contents.po)0
-rw-r--r--tests/roots/test-intl/index.txt (renamed from tests/roots/test-intl/contents.txt)2
-rw-r--r--tests/roots/test-intl/role_xref.po8
-rw-r--r--tests/roots/test-intl/role_xref.txt4
-rw-r--r--tests/roots/test-intl/subdir/index.txt (renamed from tests/roots/test-intl/subdir/contents.txt)0
-rw-r--r--tests/roots/test-metadata/conf.py3
-rw-r--r--tests/roots/test-metadata/index.rst (renamed from tests/roots/test-root/metadata.add)15
-rw-r--r--tests/roots/test-numbered-circular/index.rst (renamed from tests/roots/test-numbered-circular/contents.rst)0
-rw-r--r--tests/roots/test-numbered-circular/sub.rst2
-rw-r--r--tests/roots/test-root/autodoc.txt2
-rw-r--r--tests/roots/test-root/autodoc_target.py9
-rw-r--r--tests/roots/test-root/conf.py26
-rw-r--r--tests/roots/test-root/index.txt (renamed from tests/roots/test-root/contents.txt)1
-rw-r--r--tests/roots/test-setup/doc/index.txt (renamed from tests/roots/test-setup/doc/contents.txt)0
-rw-r--r--tests/roots/test-templating/index.txt (renamed from tests/roots/test-templating/contents.txt)0
-rw-r--r--tests/test_api_translator.py1
-rw-r--r--tests/test_application.py6
-rw-r--r--tests/test_autodoc.py251
-rw-r--r--tests/test_build.py20
-rw-r--r--tests/test_build_applehelp.py9
-rw-r--r--tests/test_build_epub.py13
-rw-r--r--tests/test_build_gettext.py4
-rw-r--r--tests/test_build_html.py38
-rw-r--r--tests/test_build_html5.py19
-rw-r--r--tests/test_build_htmlhelp.py49
-rw-r--r--tests/test_build_latex.py90
-rw-r--r--tests/test_build_linkcheck.py2
-rw-r--r--tests/test_build_manpage.py22
-rw-r--r--tests/test_build_qthelp.py1
-rw-r--r--tests/test_build_texinfo.py32
-rw-r--r--tests/test_build_text.py119
-rw-r--r--tests/test_builder.py5
-rw-r--r--tests/test_catalogs.py1
-rw-r--r--tests/test_config.py60
-rw-r--r--tests/test_correct_year.py3
-rw-r--r--tests/test_directive_code.py41
-rw-r--r--tests/test_directive_only.py1
-rw-r--r--tests/test_directive_other.py5
-rw-r--r--tests/test_docutilsconf.py11
-rw-r--r--tests/test_domain_cpp.py18
-rw-r--r--tests/test_domain_js.py69
-rw-r--r--tests/test_domain_py.py75
-rw-r--r--tests/test_domain_rst.py13
-rw-r--r--tests/test_domain_std.py1
-rw-r--r--tests/test_environment.py45
-rw-r--r--tests/test_environment_indexentries.py84
-rw-r--r--tests/test_environment_toctree.py7
-rw-r--r--tests/test_ext_apidoc.py19
-rw-r--r--tests/test_ext_autodoc.py3
-rw-r--r--tests/test_ext_autodoc_importer.py35
-rw-r--r--tests/test_ext_autosectionlabel.py9
-rw-r--r--tests/test_ext_autosummary.py10
-rw-r--r--tests/test_ext_coverage.py1
-rw-r--r--tests/test_ext_doctest.py23
-rw-r--r--tests/test_ext_githubpages.py1
-rw-r--r--tests/test_ext_graphviz.py8
-rw-r--r--tests/test_ext_ifconfig.py1
-rw-r--r--tests/test_ext_imgconverter.py1
-rw-r--r--tests/test_ext_inheritance.py1
-rw-r--r--tests/test_ext_inheritance_diagram.py1
-rw-r--r--tests/test_ext_intersphinx.py14
-rw-r--r--tests/test_ext_math.py25
-rw-r--r--tests/test_ext_napoleon.py3
-rw-r--r--tests/test_ext_napoleon_docstring.py100
-rw-r--r--tests/test_ext_napoleon_iterators.py5
-rw-r--r--tests/test_ext_todo.py1
-rw-r--r--tests/test_ext_viewcode.py9
-rw-r--r--tests/test_highlighting.py1
-rw-r--r--tests/test_intl.py318
-rw-r--r--tests/test_io.py4
-rw-r--r--tests/test_locale.py1
-rw-r--r--tests/test_markup.py30
-rw-r--r--tests/test_metadata.py51
-rw-r--r--tests/test_parser.py65
-rw-r--r--tests/test_project.py83
-rw-r--r--tests/test_pycode.py13
-rw-r--r--tests/test_pycode_parser.py3
-rw-r--r--tests/test_quickstart.py49
-rw-r--r--tests/test_roles.py37
-rw-r--r--tests/test_search.py13
-rw-r--r--tests/test_setup_command.py35
-rw-r--r--tests/test_smartquotes.py19
-rw-r--r--tests/test_templating.py3
-rw-r--r--tests/test_theming.py1
-rw-r--r--tests/test_toctree.py1
-rw-r--r--tests/test_transforms_post_transforms_code.py1
-rw-r--r--tests/test_util.py45
-rw-r--r--tests/test_util_docstrings.py1
-rw-r--r--tests/test_util_docutils.py1
-rw-r--r--tests/test_util_fileutil.py3
-rw-r--r--tests/test_util_i18n.py4
-rw-r--r--tests/test_util_images.py4
-rw-r--r--tests/test_util_inspect.py131
-rw-r--r--tests/test_util_inventory.py16
-rw-r--r--tests/test_util_jsdump.py3
-rw-r--r--tests/test_util_logging.py25
-rw-r--r--tests/test_util_matching.py1
-rw-r--r--tests/test_util_nodes.py40
-rw-r--r--tests/test_util_pycompat.py42
-rw-r--r--tests/test_util_rst.py71
-rw-r--r--tests/test_versioning.py3
-rw-r--r--tests/test_websupport.py36
-rw-r--r--tests/test_writer_latex.py2
-rw-r--r--tests/typing_test_data.py8
-rw-r--r--tox.ini23
-rwxr-xr-xutils/bump_version.py6
-rw-r--r--utils/checks.py16
-rw-r--r--utils/jssplitter_generator.py9
406 files changed, 11101 insertions, 11311 deletions
diff --git a/.appveyor.yml b/.appveyor.yml
index a0c948246..9da9bc56d 100644
--- a/.appveyor.yml
+++ b/.appveyor.yml
@@ -5,8 +5,6 @@ environment:
PYTHONWARNINGS: all
matrix:
- - PYTHON: 27
- TEST_IGNORE: --ignore py35
- PYTHON: 37
- PYTHON: 37-x64
diff --git a/.travis.yml b/.travis.yml
index f156d59d7..4d03e3e13 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -1,6 +1,6 @@
language: python
sudo: false
-dist: trusty
+dist: xenial
cache: pip
env:
@@ -11,28 +11,17 @@ env:
matrix:
include:
- - python: 'pypy'
- env: TOXENV=pypy
- - python: '2.7'
+ - python: '3.5'
env:
- TOXENV=du13
- - PYTEST_ADDOPTS="--cov ./ --cov-append --cov-config setup.cfg"
- - python: '3.4'
- env: TOXENV=py34
- - python: '3.5'
- env: TOXENV=py35
- python: '3.6'
env:
- TOXENV=py36
- PYTEST_ADDOPTS="--cov ./ --cov-append --cov-config setup.cfg"
- python: '3.7'
env: TOXENV=py37
- dist: xenial
- sudo: true
- python: 'nightly'
env: TOXENV=py38
- dist: xenial
- sudo: true
- python: '3.6'
env: TOXENV=docs
- python: '3.6'
diff --git a/AUTHORS b/AUTHORS
index 1cbb20137..aef4410be 100644
--- a/AUTHORS
+++ b/AUTHORS
@@ -38,6 +38,7 @@ Other contributors, listed alphabetically, are:
* Zac Hatfield-Dodds -- doctest reporting improvements
* Doug Hellmann -- graphviz improvements
* Tim Hoffmann -- theme improvements
+* Antti Kaihola -- doctest extension (skipif option)
* Dave Kuhlman -- original LaTeX writer
* Blaise Laflamme -- pyramid theme
* Chris Lamb -- reproducibility fixes
@@ -56,6 +57,7 @@ Other contributors, listed alphabetically, are:
* Ezio Melotti -- collapsible sidebar JavaScript
* Bruce Mitchener -- Minor epub improvement
* Daniel Neuhäuser -- JavaScript domain, Python 3 support (GSOC)
+* Julien Palard -- Colspan and rowspan in text builder
* Christopher Perkins -- autosummary integration
* Benjamin Peterson -- unittests
* \T. Powers -- HTML output improvements
@@ -67,6 +69,7 @@ Other contributors, listed alphabetically, are:
* Antonio Valentino -- qthelp builder, docstring inheritance
* Filip Vavera -- napoleon todo directive
* Pauli Virtanen -- autodoc improvements, autosummary extension
+* Eric N. Vander Weele -- autodoc improvements
* Stefan van der Walt -- autosummary extension
* Thomas Waldmann -- apidoc module fixes
* John Waltman -- Texinfo builder
@@ -78,6 +81,7 @@ Other contributors, listed alphabetically, are:
* Hong Xu -- svg support in imgmath extension and various bug fixes
* Stephen Finucane -- setup command improvements and documentation
* Daniel Pizetta -- inheritance diagram improvements
+* KINEBUCHI Tomohiko -- typing Sphinx as well as docutils
Many thanks for all contributions!
diff --git a/CHANGES b/CHANGES
index 9ce5d7074..7f8197c90 100644
--- a/CHANGES
+++ b/CHANGES
@@ -1,3 +1,164 @@
+Release 2.0.0 (in development)
+==============================
+
+Dependencies
+------------
+
+* LaTeX builder now depends on TeX Live 2015 or above.
+* LaTeX builder (with ``'pdflatex'`` :confval:`latex_engine`) will process
+ Unicode Greek letters in text (not in math mark-up) via the text font and
+ will not escape them to math mark-up. See the discussion of the
+ ``'fontenc'`` key of :confval:`latex_elements`; such (optional) support for
+ Greek adds, for example on Ubuntu xenial, the ``texlive-lang-greek`` and (if
+ default font set-up is not modified) ``cm-super(-minimal)`` as additional
+ Sphinx LaTeX requirements.
+* LaTeX builder with :confval:`latex_engine` set to ``'xelatex'`` or to
+ ``'lualatex'`` requires (by default) the ``FreeFont`` fonts,
+ which in Ubuntu xenial are provided by package ``fonts-freefont-otf``, and
+ e.g. in Fedora 29 via package ``texlive-gnu-freefont``.
+* requests 2.5.0 or above
+* The six package is no longer a dependency.
+
+Incompatible changes
+--------------------
+
+* Drop python 2.7 and 3.4 support
+* Drop docutils 0.11 support
+* The default setting for :confval:`master_doc` is changed to ``'index'`` which
+ has been longly used as default of sphinx-quickstart.
+* LaTeX: Move message resources to ``sphinxmessage.sty``
+* LaTeX: Stop using ``\captions<lang>`` macro for some labels
+* LaTeX: for ``'xelatex'`` and ``'lualatex'``, use the ``FreeFont`` OpenType
+ fonts as default choice (refs: #5645)
+* LaTeX: ``'xelatex'`` and ``'lualatex'`` now use ``\small`` in code-blocks
+ (due to ``FreeMono`` character width) like ``'pdflatex'`` already did (due
+ to ``Courier`` character width). You may need to adjust this via
+ :confval:`latex_elements` ``'fvset'`` key, in case of usage of some other
+ OpenType fonts (refs: #5768)
+* LaTeX: Greek letters in text are not escaped to math mode mark-up, and they
+ will use the text font not the math font. The ``LGR`` font encoding must be
+ added to the ``'fontenc'`` key of :confval:`latex_elements` for this to work
+ (only if it is needed by the document, of course).
+* LaTeX: setting the :confval:`language` to ``'en'`` triggered ``Sonny`` option
+ of ``fncychap``, now it is ``Bjarne`` to match case of no language specified.
+ (refs: #5772)
+* #5770: doctest: Follow :confval:`highlight_language` on highlighting doctest
+ block. As a result, they are highlighted as python3 by default.
+* The order of argument for ``HTMLTranslator``, ``HTML5Translator`` and
+ ``ManualPageTranslator`` are changed
+* LaTeX: hard-coded redefinitions of ``\l@section`` and ``\l@subsection``
+ formerly done during loading of ``'manual'`` docclass get executed later, at
+ time of ``\sphinxtableofcontents``. This means that custom user definitions
+ from LaTeX preamble now get overwritten. Use ``\sphinxtableofcontentshook``
+ to insert custom user definitions. See :ref:`latex-macros`.
+
+Deprecated
+----------
+
+* Support for evaluating Python 2 syntax is deprecated. This includes
+ configuration files which should be converted to Python 3.
+* The ``encoding`` argument of ``autodoc.Documenter.get_doc()``,
+ ``autodoc.DocstringSignatureMixin.get_doc()``,
+ ``autodoc.DocstringSignatureMixin._find_signature()``, and
+ ``autodoc.ClassDocumenter.get_doc()`` are deprecated.
+* The ``importer`` argument of ``sphinx.ext.autodoc.importer._MockModule``
+* The ``nodetype`` argument of ``sphinx.search.WordCollector.
+ is_meta_keywords()``
+* The ``suffix`` argument of ``env.doc2path()`` is deprecated.
+* The string style ``base`` argument of ``env.doc2path()`` is deprecated.
+* The fallback to allow omitting the ``filename`` argument from an overridden
+ ``IndexBuilder.feed()`` method is deprecated.
+* ``sphinx.addnodes.abbreviation``
+* ``sphinx.application.Sphinx._setting_up_extension``
+* ``sphinx.cmd.quickstart.term_decode()``
+* ``sphinx.cmd.quickstart.TERM_ENCODING``
+* ``sphinx.config.check_unicode()``
+* ``sphinx.config.string_classes``
+* ``sphinx.domains.cpp.DefinitionError.description``
+* ``sphinx.domains.cpp.NoOldIdError.description``
+* ``sphinx.domains.cpp.UnsupportedMultiCharacterCharLiteral.decoded``
+* ``sphinx.ext.autodoc.importer._MockImporter``
+* ``sphinx.ext.autosummary.Autosummary.warn()``
+* ``sphinx.ext.autosummary.Autosummary.genopt``
+* ``sphinx.ext.autosummary.Autosummary.warnings``
+* ``sphinx.ext.autosummary.Autosummary.result``
+* ``sphinx.ext.doctest.doctest_encode()``
+* ``sphinx.io.SphinxBaseFileInput``
+* ``sphinx.io.SphinxFileInput.supported``
+* ``sphinx.io.SphinxRSTFileInput``
+* ``sphinx.registry.SphinxComponentRegistry.add_source_input()``
+* ``sphinx.testing.util.remove_unicode_literal()``
+* ``sphinx.util.attrdict``
+* ``sphinx.util.force_decode()``
+* ``sphinx.util.get_matching_docs()``
+* ``sphinx.util.inspect.Parameter``
+* ``sphinx.util.osutil.EEXIST``
+* ``sphinx.util.osutil.EINVAL``
+* ``sphinx.util.osutil.ENOENT``
+* ``sphinx.util.osutil.EPIPE``
+* ``sphinx.util.osutil.walk()``
+* ``sphinx.util.PeekableIterator``
+* ``sphinx.util.pycompat.UnicodeMixin``
+* ``sphinx.util.pycompat.u``
+* ``sphinx.writers.latex.LaTeXTranslator._make_visit_admonition()``
+* ``sphinx.writers.latex.LaTeXTranslator.babel_defmacro()``
+* ``sphinx.writers.latex.LaTeXTranslator.collect_footnotes()``
+* ``sphinx.writers.texinfo.TexinfoTranslator._make_visit_admonition()``
+* ``sphinx.writers.text.TextTranslator._make_depart_admonition()``
+* template variables for LaTeX template
+
+ - ``logo``
+ - ``pageautorefname``
+ - ``translatablestrings``
+
+For more details, see :ref:`deprecation APIs list <dev-deprecated-apis>`.
+
+Features added
+--------------
+
+* #1618: The search results preview of generated HTML documentation is
+ reader-friendlier: instead of showing the snippets as raw reStructuredText
+ markup, Sphinx now renders the corresponding HTML. This means the Sphinx
+ extension `Sphinx: pretty search results`__ is no longer necessary. Note that
+ changes to the search function of your custom or 3rd-party HTML template might
+ overwrite this improvement.
+
+ __ https://github.com/sphinx-contrib/sphinx-pretty-searchresults
+
+* #4182: autodoc: Support :confval:`suppress_warnings`
+* #5533: autodoc: :confval:`autodoc_default_options` supports ``member-order``
+* #4018: htmlhelp: Add :confval:`htmlhelp_file_suffix` and
+ :confval:`htmlhelp_link_suffix`
+* #5559: text: Support complex tables (colspan and rowspan)
+* LaTeX: support rendering (not in math, yet) of Greek and Cyrillic Unicode
+ letters in non-Cyrillic document even with ``'pdflatex'`` as
+ :confval:`latex_engine` (refs: #5645)
+* #5660: The ``versionadded``, ``versionchanged`` and ``deprecated`` directives
+ are now generated with their own specific CSS classes
+ (``added``, ``changed`` and ``deprecated``, respectively) in addition to the
+ generic ``versionmodified`` class.
+* #5841: apidoc: Add --extensions option to sphinx-apidoc
+
+Bugs fixed
+----------
+
+* #1682: LaTeX: writer should not translate Greek unicode, but use textgreek
+ package
+* #5247: LaTeX: PDF does not build with default font config for Russian
+ language and ``'xelatex'`` or ``'lualatex'`` as :confval:`latex_engine`
+ (refs: #5251)
+* #5248: LaTeX: Greek letters in section titles disappear from PDF bookmarks
+* #5249: LaTeX: Unicode Greek letters in math directive break PDF build
+ (fix requires extra set-up, see :confval:`latex_elements` ``'textgreek'`` key
+ and/or :confval:`latex_engine` setting)
+* #5772: LaTeX: should the Bjarne style of fncychap be used for English also
+ if passed as language option?
+* #5179: LaTeX: (lualatex only) escaping of ``>`` by ``\textgreater{}`` is not
+ enough as ``\textgreater{}\textgreater{}`` applies TeX-ligature
+
+Testing
+--------
+
Release 1.8.4 (in development)
==============================
@@ -2142,7 +2303,7 @@ Incompatible changes
parsing is attempted to distinguish valid code. To get the old behavior back,
add ``highlight_language = "python"`` to conf.py.
* `Locale Date Markup Language
- <http://unicode.org/reports/tr35/tr35-dates.html#Date_Format_Patterns>`_ like
+ <https://unicode.org/reports/tr35/tr35-dates.html#Date_Format_Patterns>`_ like
``"MMMM dd, YYYY"`` is default format for `today_fmt` and `html_last_updated_fmt`.
However strftime format like ``"%B %d, %Y"`` is also supported for backward
compatibility until Sphinx-1.5. Later format will be disabled from Sphinx-1.5.
diff --git a/EXAMPLES b/EXAMPLES
index 7606bcd5b..2426c46e2 100644
--- a/EXAMPLES
+++ b/EXAMPLES
@@ -12,714 +12,398 @@ interesting examples.
Documentation using the alabaster theme
---------------------------------------
-* `Alabaster`__
-* `Blinker`__
-* `Calibre`__
-* `Click`__ (customized)
-* `coala`__ (customized)
-* `CodePy`__
-* `Fabric`__
-* `Fityk`__
-* `Flask`__
-* `Flask-OpenID`__
-* `Invoke`__
-* `Jinja`__
-* `Lino`__ (customized)
-* `marbl`__
-* `MDAnalysis`__ (customized)
-* `MeshPy`__
-* `PyCUDA`__
-* `PyOpenCL`__
-* `PyLangAcq`__
-* `pytest`__ (customized)
-* `python-apt`__
-* `PyVisfile`__
-* `Requests`__
-* `searx`__
-* `Spyder`__ (customized)
-* `Tablib`__
-* `urllib3`__ (customized)
-* `Werkzeug`__ (customized)
-
-__ https://alabaster.readthedocs.io/
-__ https://pythonhosted.org/blinker/
-__ https://manual.calibre-ebook.com/
-__ http://click.pocoo.org/
-__ https://docs.coala.io/
-__ https://documen.tician.de/codepy/
-__ http://docs.fabfile.org/
-__ http://fityk.nieto.pl/
-__ http://flask.pocoo.org/docs/
-__ https://pythonhosted.org/Flask-OpenID/
-__ http://docs.pyinvoke.org/
-__ http://jinja.pocoo.org/docs/
-__ http://www.lino-framework.org/
-__ https://getmarbl.readthedocs.io/
-__ http://www.mdanalysis.org/docs/
-__ https://documen.tician.de/meshpy/
-__ https://documen.tician.de/pycuda/
-__ https://documen.tician.de/pyopencl/
-__ http://pylangacq.org/
-__ https://docs.pytest.org/
-__ https://apt.alioth.debian.org/python-apt-doc/
-__ https://documen.tician.de/pyvisfile/
-__ http://www.python-requests.org/
-__ https://asciimoo.github.io/searx/
-__ https://docs.spyder-ide.org/
-__ http://docs.python-tablib.org/
-__ https://urllib3.readthedocs.io/
-__ http://werkzeug.pocoo.org/docs/
+* `Alabaster <https://alabaster.readthedocs.io/>`__
+* `Blinker <https://pythonhosted.org/blinker/>`__
+* `Calibre <https://manual.calibre-ebook.com/>`__
+* `Click <http://click.pocoo.org/>`__ (customized)
+* `coala <https://docs.coala.io/>`__ (customized)
+* `CodePy <https://documen.tician.de/codepy/>`__
+* `Fabric <https://docs.fabfile.org/>`__
+* `Fityk <https://fityk.nieto.pl/>`__
+* `Flask <http://flask.pocoo.org/docs/>`__
+* `Flask-OpenID <https://pythonhosted.org/Flask-OpenID/>`__
+* `Invoke <https://docs.pyinvoke.org/>`__
+* `Jinja <http://jinja.pocoo.org/docs/>`__
+* `Lino <http://www.lino-framework.org/>`__ (customized)
+* `marbl <https://getmarbl.readthedocs.io/>`__
+* `MDAnalysis <https://www.mdanalysis.org/docs/>`__ (customized)
+* `MeshPy <https://documen.tician.de/meshpy/>`__
+* `Molecule <https://molecule.readthedocs.io/>`__
+* `PyCUDA <https://documen.tician.de/pycuda/>`__
+* `PyOpenCL <https://documen.tician.de/pyopencl/>`__
+* `PyLangAcq <http://pylangacq.org/>`__
+* `pytest <https://docs.pytest.org/>`__ (customized)
+* `python-apt <https://apt.alioth.debian.org/python-apt-doc/>`__
+* `PyVisfile <https://documen.tician.de/pyvisfile/>`__
+* `Requests <http://www.python-requests.org/>`__
+* `searx <https://asciimoo.github.io/searx/>`__
+* `Spyder <https://docs.spyder-ide.org/>`__ (customized)
+* `Tablib <http://docs.python-tablib.org/>`__
+* `urllib3 <https://urllib3.readthedocs.io/>`__ (customized)
+* `Werkzeug <http://werkzeug.pocoo.org/docs/>`__ (customized)
Documentation using the classic theme
-------------------------------------
-* `Advanced Generic Widgets`__ (customized)
-* `Apache CouchDB`__ (customized)
-* `APSW`__
-* `Arb`__
-* `Bazaar`__ (customized)
-* `Beautiful Soup`__
-* `Blender`__
-* `Bugzilla`__
-* `Buildbot`__
-* `CMake`__ (customized)
-* `Chaco`__ (customized)
-* `Cormoran`__
-* `DEAP`__ (customized)
-* `Director`__
-* `EZ-Draw`__ (customized)
-* `F2py`__
-* `Generic Mapping Tools (GMT)`__ (customized)
-* `Genomedata`__
-* `GetFEM++`__ (customized)
-* `Glasgow Haskell Compiler`__ (customized)
-* `Grok`__ (customized)
-* `GROMACS`__
-* `GSL Shell`__
-* `Hands-on Python Tutorial`__
-* `Kaa`__ (customized)
-* `Leo`__
-* `LEPL`__ (customized)
-* `Mayavi`__ (customized)
-* `MediaGoblin`__ (customized)
-* `mpmath`__
-* `OpenCV`__ (customized)
-* `OpenEXR`__
-* `OpenGDA`__
-* `Peach^3`__ (customized)
-* `Plone`__ (customized)
-* `PyEMD`__
-* `Pyevolve`__
-* `Pygame`__ (customized)
-* `PyMQI`__
-* `PyQt4`__ (customized)
-* `PyQt5`__ (customized)
-* `Python 2`__
-* `Python 3`__ (customized)
-* `Python Packaging Authority`__ (customized)
-* `Ring programming language`__ (customized)
-* `SageMath`__ (customized)
-* `Segway`__
-* `simuPOP`__ (customized)
-* `Sprox`__ (customized)
-* `SymPy`__
-* `TurboGears`__ (customized)
-* `tvtk`__
-* `Varnish`__ (customized, alabaster for index)
-* `Waf`__
-* `wxPython Phoenix`__ (customized)
-* `z3c`__
-* `zc.async`__ (customized)
-* `Zope`__ (customized)
-
-__ http://xoomer.virgilio.it/infinity77/AGW_Docs/
-__ http://docs.couchdb.org/
-__ https://rogerbinns.github.io/apsw/
-__ http://arblib.org/
-__ http://doc.bazaar.canonical.com/
-__ https://www.crummy.com/software/BeautifulSoup/bs4/doc/
-__ https://docs.blender.org/api/current/
-__ https://bugzilla.readthedocs.io/
-__ https://docs.buildbot.net/latest/
-__ https://cmake.org/documentation/
-__ http://docs.enthought.com/chaco/
-__ http://cormoran.nhopkg.org/docs/
-__ https://deap.readthedocs.io/
-__ https://pythonhosted.org/director/
-__ https://pageperso.lif.univ-mrs.fr/~edouard.thiel/ez-draw/doc/en/html/ez-manual.html
-__ http://f2py.sourceforge.net/docs/
-__ http://gmt.soest.hawaii.edu/doc/latest/
-__ https://noble.gs.washington.edu/proj/genomedata/doc/1.3.3/
-__ http://getfem.org/
-__ https://downloads.haskell.org/~ghc/latest/docs/html/users_guide/
-__ http://grok.zope.org/doc/current/
-__ http://manual.gromacs.org/documentation/
-__ http://www.nongnu.org/gsl-shell/
-__ http://anh.cs.luc.edu/python/hands-on/3.1/handsonHtml/
-__ http://api.freevo.org/kaa-base/
-__ http://leoeditor.com/
-__ http://www.acooke.org/lepl/
-__ http://docs.enthought.com/mayavi/mayavi/
-__ https://mediagoblin.readthedocs.io/
-__ http://mpmath.org/doc/current/
-__ http://docs.opencv.org/
-__ http://excamera.com/articles/26/doc/index.html
-__ http://www.opengda.org/gdadoc/html/
-__ https://peach3.nl/doc/latest/userdoc/
-__ https://docs.plone.org/
-__ https://pyemd.readthedocs.io/
-__ http://pyevolve.sourceforge.net/
-__ https://www.pygame.org/docs/
-__ https://pythonhosted.org/pymqi/
-__ http://pyqt.sourceforge.net/Docs/PyQt4/
-__ http://pyqt.sourceforge.net/Docs/PyQt5/
-__ https://docs.python.org/2/
-__ https://docs.python.org/3/
-__ https://www.pypa.io/
-__ http://ring-lang.sourceforge.net/doc/
-__ https://doc.sagemath.org/
-__ http://noble.gs.washington.edu/proj/segway/doc/1.1.0/segway.html
-__ http://simupop.sourceforge.net/manual_release/build/userGuide.html
-__ http://sprox.org/
-__ http://docs.sympy.org/
-__ https://turbogears.readthedocs.io/
-__ http://docs.enthought.com/mayavi/tvtk/
-__ https://www.varnish-cache.org/docs/
-__ https://waf.io/apidocs/
-__ https://wxpython.org/Phoenix/docs/html/main.html
-__ http://www.ibiblio.org/paulcarduner/z3ctutorial/
-__ https://pythonhosted.org/zc.async/
-__ https://docs.zope.org/zope2/
+* `Advanced Generic Widgets <http://xoomer.virgilio.it/infinity77/AGW_Docs/>`__ (customized)
+* `Apache CouchDB <http://docs.couchdb.org/>`__ (customized)
+* `APSW <https://rogerbinns.github.io/apsw/>`__
+* `Arb <http://arblib.org/>`__
+* `Bazaar <http://doc.bazaar.canonical.com/>`__ (customized)
+* `Beautiful Soup <https://www.crummy.com/software/BeautifulSoup/bs4/doc/>`__
+* `Blender <https://docs.blender.org/api/current/>`__
+* `Bugzilla <https://bugzilla.readthedocs.io/>`__
+* `Buildbot <https://docs.buildbot.net/latest/>`__
+* `CMake <https://cmake.org/documentation/>`__ (customized)
+* `Chaco <https://docs.enthought.com/chaco/>`__ (customized)
+* `CORE <https://downloads.pf.itd.nrl.navy.mil/docs/core/core-html/>`__
+* `CORE Python modules <https://downloads.pf.itd.nrl.navy.mil/docs/core/core-python-html/>`__
+* `Cormoran <http://cormoran.nhopkg.org/docs/>`__
+* `DEAP <https://deap.readthedocs.io/>`__ (customized)
+* `Director <https://pythonhosted.org/director/>`__
+* `EZ-Draw <https://pageperso.lif.univ-mrs.fr/~edouard.thiel/ez-draw/doc/en/html/ez-manual.html>`__ (customized)
+* `F2py <http://f2py.sourceforge.net/docs/>`__
+* `Generic Mapping Tools (GMT) <https://gmt.soest.hawaii.edu/doc/latest/>`__ (customized)
+* `Genomedata <https://noble.gs.washington.edu/proj/genomedata/doc/1.3.3/>`__
+* `GetFEM++ <http://getfem.org/>`__ (customized)
+* `Glasgow Haskell Compiler <https://downloads.haskell.org/~ghc/latest/docs/html/users_guide/>`__ (customized)
+* `Grok <http://grok.zope.org/doc/current/>`__ (customized)
+* `GROMACS <http://manual.gromacs.org/documentation/>`__
+* `GSL Shell <https://www.nongnu.org/gsl-shell/>`__
+* `Hands-on Python Tutorial <https://anh.cs.luc.edu/python/hands-on/3.1/handsonHtml/>`__
+* `Kaa <https://api.freevo.org/kaa-base/>`__ (customized)
+* `Leo <https://leoeditor.com/>`__
+* `LEPL <http://www.acooke.org/lepl/>`__ (customized)
+* `Mayavi <https://docs.enthought.com/mayavi/mayavi/>`__ (customized)
+* `MediaGoblin <https://mediagoblin.readthedocs.io/>`__ (customized)
+* `mpmath <http://mpmath.org/doc/current/>`__
+* `OpenCV <https://docs.opencv.org/>`__ (customized)
+* `OpenEXR <http://excamera.com/articles/26/doc/index.html>`__
+* `OpenGDA <http://www.opengda.org/gdadoc/html/>`__
+* `Peach^3 <https://peach3.nl/doc/latest/userdoc/>`__ (customized)
+* `Plone <https://docs.plone.org/>`__ (customized)
+* `PyEMD <https://pyemd.readthedocs.io/>`__
+* `Pyevolve <http://pyevolve.sourceforge.net/>`__
+* `Pygame <https://www.pygame.org/docs/>`__ (customized)
+* `PyMQI <https://pythonhosted.org/pymqi/>`__
+* `PyQt4 <http://pyqt.sourceforge.net/Docs/PyQt4/>`__ (customized)
+* `PyQt5 <http://pyqt.sourceforge.net/Docs/PyQt5/>`__ (customized)
+* `Python 2 <https://docs.python.org/2/>`__
+* `Python 3 <https://docs.python.org/3/>`__ (customized)
+* `Python Packaging Authority <https://www.pypa.io/>`__ (customized)
+* `Ring programming language <http://ring-lang.sourceforge.net/doc/>`__ (customized)
+* `SageMath <https://doc.sagemath.org/>`__ (customized)
+* `Segway <https://noble.gs.washington.edu/proj/segway/doc/1.1.0/segway.html>`__
+* `simuPOP <http://simupop.sourceforge.net/manual_release/build/userGuide.html>`__ (customized)
+* `Sprox <http://sprox.org/>`__ (customized)
+* `SymPy <https://docs.sympy.org/>`__
+* `TurboGears <https://turbogears.readthedocs.io/>`__ (customized)
+* `tvtk <https://docs.enthought.com/mayavi/tvtk/>`__
+* `Varnish <https://www.varnish-cache.org/docs/>`__ (customized, alabaster for index)
+* `Waf <https://waf.io/apidocs/>`__
+* `wxPython Phoenix <https://wxpython.org/Phoenix/docs/html/main.html>`__ (customized)
+* `Yum <http://yum.baseurl.org/api/yum/>`__
+* `z3c <https://www.ibiblio.org/paulcarduner/z3ctutorial/>`__
+* `zc.async <https://pythonhosted.org/zc.async/>`__ (customized)
+* `Zope <https://docs.zope.org/zope2/>`__ (customized)
Documentation using the sphinxdoc theme
---------------------------------------
-* `cartopy`__
-* `Jython`__
-* `Matplotlib`__
-* `MDAnalysis Tutorial`__
-* `NetworkX`__
-* `PyCantonese`__
-* `Pyre`__
-* `pySPACE`__
-* `Pysparse`__
-* `PyTango`__
-* `Python Wild Magic`__ (customized)
-* `Reteisi`__ (customized)
-* `Sqlkit`__ (customized)
-* `Turbulenz`__
-
-__ http://scitools.org.uk/cartopy/docs/latest/
-__ http://www.jython.org/docs/
-__ https://matplotlib.org/
-__ http://www.mdanalysis.org/MDAnalysisTutorial/
-__ https://networkx.github.io/
-__ http://pycantonese.org/
-__ http://docs.danse.us/pyre/sphinx/
-__ https://pyspace.github.io/pyspace/
-__ http://pysparse.sourceforge.net/
-__ http://www.esrf.eu/computing/cs/tango/tango_doc/kernel_doc/pytango/latest/
-__ https://vmlaker.github.io/pythonwildmagic/
-__ http://www.reteisi.org/contents.html
-__ http://sqlkit.argolinux.org/
-__ http://docs.turbulenz.com/
+* `cartopy <https://scitools.org.uk/cartopy/docs/latest/>`__
+* `Jython <http://www.jython.org/docs/>`__
+* `Matplotlib <https://matplotlib.org/>`__
+* `MDAnalysis Tutorial <https://www.mdanalysis.org/MDAnalysisTutorial/>`__
+* `NetworkX <https://networkx.github.io/>`__
+* `PyCantonese <http://pycantonese.org/>`__
+* `Pyre <http://docs.danse.us/pyre/sphinx/>`__
+* `pySPACE <https://pyspace.github.io/pyspace/>`__
+* `Pysparse <http://pysparse.sourceforge.net/>`__
+* `PyTango <https://www.esrf.eu/computing/cs/tango/tango_doc/kernel_doc/pytango/latest/>`__
+* `Python Wild Magic <https://vmlaker.github.io/pythonwildmagic/>`__ (customized)
+* `Reteisi <http://www.reteisi.org/contents.html>`__ (customized)
+* `Sqlkit <http://sqlkit.argolinux.org/>`__ (customized)
+* `Turbulenz <http://docs.turbulenz.com/>`__
Documentation using the nature theme
------------------------------------
-* `Alembic`__
-* `Cython`__
-* `easybuild`__
-* `jsFiddle`__
-* `libLAS`__ (customized)
-* `Lmod`__
-* `MapServer`__ (customized)
-* `Pandas`__
-* `pyglet`__ (customized)
-* `Setuptools`__
-* `Spring Python`__
-* `StatsModels`__ (customized)
-* `Sylli`__
-
-__ http://alembic.zzzcomputing.com/
-__ http://docs.cython.org/
-__ https://easybuild.readthedocs.io/
-__ http://doc.jsfiddle.net/
-__ https://www.liblas.org/
-__ https://lmod.readthedocs.io/
-__ http://mapserver.org/
-__ https://pandas.pydata.org/pandas-docs/stable/
-__ https://pyglet.readthedocs.io/
-__ https://setuptools.readthedocs.io/
-__ https://docs.spring.io/spring-python/1.2.x/sphinx/html/
-__ http://www.statsmodels.org/
-__ http://sylli.sourceforge.net/
+* `Alembic <http://alembic.zzzcomputing.com/>`__
+* `Cython <http://docs.cython.org/>`__
+* `easybuild <https://easybuild.readthedocs.io/>`__
+* `jsFiddle <http://doc.jsfiddle.net/>`__
+* `libLAS <https://www.liblas.org/>`__ (customized)
+* `Lmod <https://lmod.readthedocs.io/>`__
+* `MapServer <https://mapserver.org/>`__ (customized)
+* `Pandas <https://pandas.pydata.org/pandas-docs/stable/>`__
+* `pyglet <https://pyglet.readthedocs.io/>`__ (customized)
+* `Setuptools <https://setuptools.readthedocs.io/>`__
+* `Spring Python <https://docs.spring.io/spring-python/1.2.x/sphinx/html/>`__
+* `StatsModels <https://www.statsmodels.org/>`__ (customized)
+* `Sylli <http://sylli.sourceforge.net/>`__
Documentation using another builtin theme
-----------------------------------------
-* `Breathe`__ (haiku)
-* `MPipe`__ (sphinx13)
-* `NLTK`__ (agogo)
-* `Programmieren mit PyGTK und Glade (German)`__ (agogo, customized)
-* `PyPubSub`__ (bizstyle)
-* `Pylons`__ (pyramid)
-* `Pyramid web framework`__ (pyramid)
-* `Sphinx`__ (sphinx13) :-)
-* `Valence`__ (haiku, customized)
-
-__ https://breathe.readthedocs.io/
-__ https://vmlaker.github.io/mpipe/
-__ http://www.nltk.org/
-__ http://www.florian-diesch.de/doc/python-und-glade/online/
-__ https://pypubsub.readthedocs.io/
-__ http://docs.pylonsproject.org/projects/pylons-webframework/
-__ https://docs.pylonsproject.org/projects/pyramid/
-__ http://www.sphinx-doc.org/
-__ http://docs.valence.desire2learn.com/
+* `Breathe <https://breathe.readthedocs.io/>`__ (haiku)
+* `MPipe <https://vmlaker.github.io/mpipe/>`__ (sphinx13)
+* `NLTK <https://www.nltk.org/>`__ (agogo)
+* `Programmieren mit PyGTK und Glade (German) <https://www.florian-diesch.de/doc/python-und-glade/online/>`__ (agogo, customized)
+* `PyPubSub <https://pypubsub.readthedocs.io/>`__ (bizstyle)
+* `Pylons <https://docs.pylonsproject.org/projects/pylons-webframework/>`__ (pyramid)
+* `Pyramid web framework <https://docs.pylonsproject.org/projects/pyramid/>`__ (pyramid)
+* `Sphinx <http://www.sphinx-doc.org/>`__ (sphinx13) :-)
+* `Valence <https://docs.valence.desire2learn.com/>`__ (haiku, customized)
Documentation using sphinx_rtd_theme
------------------------------------
-* `Annotator`__
-* `Ansible`__ (customized)
-* `Arcade`__
-* `aria2`__
-* `ASE`__
-* `Autofac`__
-* `BigchainDB`__
-* `Blocks`__
-* `bootstrap-datepicker`__
-* `Certbot`__
-* `Chainer`__ (customized)
-* `CherryPy`__
-* `CodeIgniter`__
-* `Conda`__
-* `Corda`__
-* `Dask`__
-* `Databricks`__ (customized)
-* `Dataiku DSS`__
-* `edX`__
-* `Electrum`__
-* `Elemental`__
-* `ESWP3`__
-* `Ethereum Homestead`__
-* `Fidimag`__
-* `Flake8`__
-* `GeoNode`__
-* `Godot`__
-* `Graylog`__
-* `GPAW`__ (customized)
-* `HDF5 for Python (h5py)`__
-* `Hyperledger Fabric`__
-* `Hyperledger Sawtooth`__
-* `IdentityServer`__
-* `Idris`__
-* `javasphinx`__
-* `Julia`__
-* `Jupyter Notebook`__
-* `Lasagne`__
-* `latexindent.pl`__
-* `Linguistica`__
-* `Linux kernel`__
-* `MathJax`__
-* `MDTraj`__ (customized)
-* `MICrobial Community Analysis (micca)`__
-* `MicroPython`__
-* `Minds`__ (customized)
-* `Mink`__
-* `Mockery`__
-* `mod_wsgi`__
-* `MoinMoin`__
-* `Mopidy`__
-* `MyHDL`__
-* `Nextflow`__
-* `NICOS`__ (customized)
-* `Pelican`__
-* `picamera`__
-* `Pillow`__
-* `pip`__
-* `Paver`__
-* `peewee`__
-* `Phinx`__
-* `phpMyAdmin`__
-* `PROS`__ (customized)
-* `Pweave`__
-* `PyPy`__
-* `python-sqlparse`__
-* `PyVISA`__
-* `Read The Docs`__
-* `Free your information from their silos (French)`__ (customized)
-* `Releases Sphinx extension`__
-* `Qtile`__
-* `Quex`__
-* `Satchmo`__
-* `Scapy`__
-* `SimPy`__
-* `SlamData`__
-* `Solidity`__
-* `Sonos Controller (SoCo)`__
-* `Sphinx AutoAPI`__
-* `sphinx-argparse`__
-* `Sphinx-Gallery`__ (customized)
-* `SpotBugs`__
-* `StarUML`__
-* `Sublime Text Unofficial Documentation`__
-* `SunPy`__
-* `Sylius`__
-* `Tango Controls`__ (customized)
-* `Topshelf`__
-* `Theano`__
-* `ThreatConnect`__
-* `Tuleap`__
-* `TYPO3`__ (customized)
-* `uWSGI`__
-* `Wagtail`__
-* `Web Application Attack and Audit Framework (w3af)`__
-* `Weblate`__
-* `x265`__
-* `ZeroNet`__
-
-__ http://docs.annotatorjs.org/
-__ https://docs.ansible.com/
-__ http://arcade.academy/
-__ https://aria2.github.io/manual/en/html/
-__ https://wiki.fysik.dtu.dk/ase/
-__ http://docs.autofac.org/
-__ https://docs.bigchaindb.com/
-__ https://blocks.readthedocs.io/
-__ https://bootstrap-datepicker.readthedocs.io/
-__ https://letsencrypt.readthedocs.io/
-__ https://docs.chainer.org/
-__ http://docs.cherrypy.org/
-__ https://www.codeigniter.com/user_guide/
-__ https://conda.io/docs/
-__ https://docs.corda.net/
-__ https://dask.pydata.org/
-__ https://docs.databricks.com/
-__ https://doc.dataiku.com/
-__ http://docs.edx.org/
-__ http://docs.electrum.org/
-__ http://libelemental.org/documentation/dev/
-__ https://eswp3.readthedocs.io/
-__ http://www.ethdocs.org/
-__ https://fidimag.readthedocs.io/
-__ http://flake8.pycqa.org/
-__ http://docs.geonode.org/
-__ https://godot.readthedocs.io/
-__ http://docs.graylog.org/
-__ https://wiki.fysik.dtu.dk/gpaw/
-__ http://docs.h5py.org/
-__ https://hyperledger-fabric.readthedocs.io/
-__ https://intelledger.github.io/
-__ http://docs.identityserver.io/
-__ http://docs.idris-lang.org/
-__ https://bronto-javasphinx.readthedocs.io/
-__ https://julia.readthedocs.io/
-__ https://jupyter-notebook.readthedocs.io/
-__ https://lasagne.readthedocs.io/
-__ https://latexindentpl.readthedocs.io/
-__ https://linguistica-uchicago.github.io/lxa5/
-__ https://www.kernel.org/doc/html/latest/index.html
-__ https://docs.mathjax.org/
-__ http://mdtraj.org/latest/
-__ http://micca.org/docs/latest/
-__ https://docs.micropython.org/
-__ https://www.minds.org/docs/
-__ http://mink.behat.org/
-__ http://docs.mockery.io/
-__ https://modwsgi.readthedocs.io/
-__ https://moin-20.readthedocs.io/
-__ https://docs.mopidy.com/
-__ http://docs.myhdl.org/
-__ https://www.nextflow.io/docs/latest/index.html
-__ https://forge.frm2.tum.de/nicos/doc/nicos-master/
-__ http://docs.getpelican.com/
-__ https://picamera.readthedocs.io/
-__ https://pillow.readthedocs.io/
-__ https://pip.pypa.io/
-__ https://paver.readthedocs.io/
-__ http://docs.peewee-orm.com/
-__ http://docs.phinx.org/
-__ https://docs.phpmyadmin.net/
-__ https://pros.cs.purdue.edu/v5/
-__ http://mpastell.com/pweave/
-__ http://doc.pypy.org/
-__ https://sqlparse.readthedocs.io/
-__ https://pyvisa.readthedocs.io/
-__ https://docs.readthedocs.io/
-__ http://redaction-technique.org/
-__ https://releases.readthedocs.io/
-__ http://docs.qtile.org/
-__ http://quex.sourceforge.net/doc/html/main.html
-__ http://docs.satchmoproject.com/
-__ https://scapy.readthedocs.io/
-__ http://simpy.readthedocs.io/
-__ http://docs.slamdata.com/
-__ https://solidity.readthedocs.io/
-__ http://docs.python-soco.com/
-__ https://sphinx-autoapi.readthedocs.io/
-__ https://sphinx-argparse.readthedocs.io/
-__ https://sphinx-gallery.readthedocs.io/
-__ https://spotbugs.readthedocs.io/
-__ http://docs.staruml.io/
-__ http://docs.sublimetext.info/
-__ http://docs.sunpy.org/
-__ http://docs.sylius.org/
-__ https://tango-controls.readthedocs.io/
-__ http://docs.topshelf-project.com/
-__ http://www.deeplearning.net/software/theano/
-__ https://docs.threatconnect.com/
-__ https://tuleap.net/doc/en/
-__ https://docs.typo3.org/
-__ https://uwsgi-docs.readthedocs.io/
-__ http://docs.wagtail.io/
-__ http://docs.w3af.org/
-__ https://docs.weblate.org/
-__ https://x265.readthedocs.io/
-__ https://zeronet.readthedocs.io/
+* `Annotator <http://docs.annotatorjs.org/>`__
+* `Ansible <https://docs.ansible.com/>`__ (customized)
+* `Arcade <http://arcade.academy/>`__
+* `aria2 <https://aria2.github.io/manual/en/html/>`__
+* `ASE <https://wiki.fysik.dtu.dk/ase/>`__
+* `Autofac <http://docs.autofac.org/>`__
+* `BigchainDB <https://docs.bigchaindb.com/>`__
+* `Blocks <https://blocks.readthedocs.io/>`__
+* `bootstrap-datepicker <https://bootstrap-datepicker.readthedocs.io/>`__
+* `Certbot <https://letsencrypt.readthedocs.io/>`__
+* `Chainer <https://docs.chainer.org/>`__ (customized)
+* `CherryPy <https://docs.cherrypy.org/>`__
+* `cloud-init <https://cloudinit.readthedocs.io/>`__
+* `CodeIgniter <https://www.codeigniter.com/user_guide/>`__
+* `Conda <https://conda.io/docs/>`__
+* `Corda <https://docs.corda.net/>`__
+* `Dask <https://dask.pydata.org/>`__
+* `Databricks <https://docs.databricks.com/>`__ (customized)
+* `Dataiku DSS <https://doc.dataiku.com/>`__
+* `DNF <https://dnf.readthedocs.io/>`__
+* `edX <https://docs.edx.org/>`__
+* `Electrum <http://docs.electrum.org/>`__
+* `Elemental <http://libelemental.org/documentation/dev/>`__
+* `ESWP3 <https://eswp3.readthedocs.io/>`__
+* `Ethereum Homestead <http://www.ethdocs.org/>`__
+* `Fidimag <https://fidimag.readthedocs.io/>`__
+* `Flake8 <http://flake8.pycqa.org/>`__
+* `FluidDyn <https://fluiddyn.readthedocs.io/>`__
+* `Fluidsim <https://fluidsim.readthedocs.io/>`__
+* `GeoNode <http://docs.geonode.org/>`__
+* `Godot <https://godot.readthedocs.io/>`__
+* `Graylog <http://docs.graylog.org/>`__
+* `GPAW <https://wiki.fysik.dtu.dk/gpaw/>`__ (customized)
+* `HDF5 for Python (h5py) <http://docs.h5py.org/>`__
+* `Hyperledger Fabric <https://hyperledger-fabric.readthedocs.io/>`__
+* `Hyperledger Sawtooth <https://intelledger.github.io/>`__
+* `IdentityServer <http://docs.identityserver.io/>`__
+* `Idris <http://docs.idris-lang.org/>`__
+* `javasphinx <https://bronto-javasphinx.readthedocs.io/>`__
+* `Julia <https://julia.readthedocs.io/>`__
+* `Jupyter Notebook <https://jupyter-notebook.readthedocs.io/>`__
+* `Lasagne <https://lasagne.readthedocs.io/>`__
+* `latexindent.pl <https://latexindentpl.readthedocs.io/>`__
+* `Linguistica <https://linguistica-uchicago.github.io/lxa5/>`__
+* `Linux kernel <https://www.kernel.org/doc/html/latest/index.html>`__
+* `MathJax <https://docs.mathjax.org/>`__
+* `MDTraj <http://mdtraj.org/latest/>`__ (customized)
+* `MICrobial Community Analysis (micca) <http://micca.org/docs/latest/>`__
+* `MicroPython <https://docs.micropython.org/>`__
+* `Minds <https://www.minds.org/docs/>`__ (customized)
+* `Mink <http://mink.behat.org/>`__
+* `Mockery <http://docs.mockery.io/>`__
+* `mod_wsgi <https://modwsgi.readthedocs.io/>`__
+* `MoinMoin <https://moin-20.readthedocs.io/>`__
+* `Mopidy <https://docs.mopidy.com/>`__
+* `MyHDL <http://docs.myhdl.org/>`__
+* `Nextflow <https://www.nextflow.io/docs/latest/index.html>`__
+* `NICOS <https://forge.frm2.tum.de/nicos/doc/nicos-master/>`__ (customized)
+* `Pelican <http://docs.getpelican.com/>`__
+* `picamera <https://picamera.readthedocs.io/>`__
+* `Pillow <https://pillow.readthedocs.io/>`__
+* `pip <https://pip.pypa.io/>`__
+* `Paver <https://paver.readthedocs.io/>`__
+* `peewee <http://docs.peewee-orm.com/>`__
+* `Phinx <http://docs.phinx.org/>`__
+* `phpMyAdmin <https://docs.phpmyadmin.net/>`__
+* `PROS <https://pros.cs.purdue.edu/v5/>`__ (customized)
+* `Pweave <http://mpastell.com/pweave/>`__
+* `PyPy <http://doc.pypy.org/>`__
+* `python-sqlparse <https://sqlparse.readthedocs.io/>`__
+* `PyVISA <https://pyvisa.readthedocs.io/>`__
+* `Read The Docs <https://docs.readthedocs.io/>`__
+* `Free your information from their silos (French) <http://redaction-technique.org/>`__ (customized)
+* `Releases Sphinx extension <https://releases.readthedocs.io/>`__
+* `Qtile <http://docs.qtile.org/>`__
+* `Quex <http://quex.sourceforge.net/doc/html/main.html>`__
+* `Satchmo <http://docs.satchmoproject.com/>`__
+* `Scapy <https://scapy.readthedocs.io/>`__
+* `SimPy <https://simpy.readthedocs.io/>`__
+* `six <https://six.readthedocs.io/>`__
+* `SlamData <https://newdocs.slamdata.com>`__
+* `Solidity <https://solidity.readthedocs.io/>`__
+* `Sonos Controller (SoCo) <http://docs.python-soco.com/>`__
+* `Sphinx AutoAPI <https://sphinx-autoapi.readthedocs.io/>`__
+* `sphinx-argparse <https://sphinx-argparse.readthedocs.io/>`__
+* `Sphinx-Gallery <https://sphinx-gallery.readthedocs.io/>`__ (customized)
+* `SpotBugs <https://spotbugs.readthedocs.io/>`__
+* `StarUML <https://docs.staruml.io/>`__
+* `Sublime Text Unofficial Documentation <http://docs.sublimetext.info/>`__
+* `SunPy <https://docs.sunpy.org/>`__
+* `Sylius <http://docs.sylius.org/>`__
+* `Tango Controls <https://tango-controls.readthedocs.io/>`__ (customized)
+* `Topshelf <http://docs.topshelf-project.com/>`__
+* `Theano <http://www.deeplearning.net/software/theano/>`__
+* `ThreatConnect <https://docs.threatconnect.com/>`__
+* `Tuleap <https://tuleap.net/doc/en/>`__
+* `TYPO3 <https://docs.typo3.org/>`__ (customized)
+* `uWSGI <https://uwsgi-docs.readthedocs.io/>`__
+* `virtualenv <https://virtualenv.readthedocs.io/>`__
+* `Wagtail <https://docs.wagtail.io/>`__
+* `Web Application Attack and Audit Framework (w3af) <http://docs.w3af.org/>`__
+* `Weblate <https://docs.weblate.org/>`__
+* `x265 <https://x265.readthedocs.io/>`__
+* `ZeroNet <https://zeronet.readthedocs.io/>`__
Documentation using sphinx_bootstrap_theme
------------------------------------------
-* `Bootstrap Theme`__
-* `C/C++ Software Development with Eclipse`__
-* `Dataverse`__
-* `e-cidadania`__
-* `Hangfire`__
-* `Hedge`__
-* `ObsPy`__
-* `Open Dylan`__
-* `Pootle`__
-* `PyUblas`__
-* `seaborn`__
-
-__ https://ryan-roemer.github.io/sphinx-bootstrap-theme/
-__ http://eclipsebook.in/
-__ http://guides.dataverse.org/
-__ https://e-cidadania.readthedocs.io/
-__ http://docs.hangfire.io/
-__ https://documen.tician.de/hedge/
-__ https://docs.obspy.org/
-__ https://opendylan.org/documentation/
-__ http://docs.translatehouse.org/projects/pootle/
-__ https://documen.tician.de/pyublas/
-__ https://seaborn.pydata.org/
+* `Bootstrap Theme <https://ryan-roemer.github.io/sphinx-bootstrap-theme/>`__
+* `C/C++ Software Development with Eclipse <https://eclipsebook.in/>`__
+* `Dataverse <http://guides.dataverse.org/>`__
+* `e-cidadania <https://e-cidadania.readthedocs.io/>`__
+* `Hangfire <http://docs.hangfire.io/>`__
+* `Hedge <https://documen.tician.de/hedge/>`__
+* `ObsPy <https://docs.obspy.org/>`__
+* `Open Dylan <https://opendylan.org/documentation/>`__
+* `Pootle <http://docs.translatehouse.org/projects/pootle/>`__
+* `PyUblas <https://documen.tician.de/pyublas/>`__
+* `seaborn <https://seaborn.pydata.org/>`__
Documentation using a custom theme or integrated in a website
-------------------------------------------------------------
-* `Apache Cassandra`__
-* `Astropy`__
-* `Bokeh`__
-* `Boto 3`__
-* `CakePHP`__
-* `CasperJS`__
-* `Ceph`__
-* `Chef`__
-* `CKAN`__
-* `Confluent Platform`__
-* `Django`__
-* `Doctrine`__
-* `Enterprise Toolkit for Acrobat products`__
-* `Gameduino`__
-* `gensim`__
-* `GeoServer`__
-* `gevent`__
-* `GHC - Glasgow Haskell Compiler`__
-* `Guzzle`__
-* `H2O.ai`__
-* `Istihza (Turkish Python documentation project)`__
-* `Kombu`__
-* `Lasso`__
-* `Mako`__
-* `MirrorBrain`__
-* `MongoDB`__
-* `Music21`__
-* `MyHDL`__
-* `nose`__
-* `ns-3`__
-* `NumPy`__
-* `ObjectListView`__
-* `OpenERP`__
-* `OpenCV`__
-* `OpenLayers`__
-* `OpenTURNS`__
-* `Open vSwitch`__
-* `PlatformIO`__
-* `PyEphem`__
-* `Pygments`__
-* `Plone User Manual (German)`__
-* `PSI4`__
-* `PyMOTW`__
-* `python-aspectlib`__ (`sphinx_py3doc_enhanced_theme <https://pypi.org/project/sphinx_py3doc_enhanced_theme/>`__)
-* `QGIS`__
-* `qooxdoo`__
-* `Roundup`__
-* `SaltStack`__
-* `scikit-learn`__
-* `SciPy`__
-* `Scrapy`__
-* `Seaborn`__
-* `Selenium`__
-* `Self`__
-* `Substance D`__
-* `Sulu`__
-* `SQLAlchemy`__
-* `tinyTiM`__
-* `Twisted`__
-* `Ubuntu Packaging Guide`__
-* `WebFaction`__
-* `WTForms`__
-
-__ https://cassandra.apache.org/doc/
-__ http://docs.astropy.org/
-__ https://bokeh.pydata.org/
-__ https://boto3.readthedocs.io/
-__ https://book.cakephp.org/
-__ http://docs.casperjs.org/
-__ http://docs.ceph.com/docs/master/
-__ https://docs.chef.io/
-__ http://docs.ckan.org/
-__ http://docs.confluent.io/
-__ https://docs.djangoproject.com/
-__ http://docs.doctrine-project.org/
-__ https://www.adobe.com/devnet-docs/acrobatetk/
-__ http://excamera.com/sphinx/gameduino/
-__ https://radimrehurek.com/gensim/
-__ http://docs.geoserver.org/
-__ http://www.gevent.org/
-__ http://downloads.haskell.org/~ghc/master/users-guide/
-__ http://docs.guzzlephp.org/en/stable/
-__ http://docs.h2o.ai/
-__ https://belgeler.yazbel.com/python-istihza/
-__ http://docs.kombu.me/
-__ http://lassoguide.com/
-__ http://docs.makotemplates.org/
-__ http://mirrorbrain.org/docs/
-__ https://docs.mongodb.com/
-__ http://web.mit.edu/music21/doc/
-__ http://docs.myhdl.org/en/latest/
-__ https://nose.readthedocs.io/
-__ https://www.nsnam.org/documentation/
-__ https://docs.scipy.org/doc/numpy/reference/
-__ http://objectlistview.sourceforge.net/python/
-__ https://doc.odoo.com/
-__ http://docs.opencv.org/
-__ http://docs.openlayers.org/
-__ http://openturns.github.io/openturns/master/
-__ http://docs.openvswitch.org/
-__ http://docs.platformio.org/
-__ http://rhodesmill.org/pyephem/
-__ http://pygments.org/docs/
-__ https://www.hasecke.com/plone-benutzerhandbuch/4.0/
-__ http://www.psicode.org/psi4manual/master/index.html
-__ https://pymotw.com/2/
-__ https://python-aspectlib.readthedocs.io/
-__ https://qgis.org/en/docs/index.html
-__ http://www.qooxdoo.org/current/
-__ http://www.roundup-tracker.org/
-__ https://docs.saltstack.com/
-__ http://scikit-learn.org/stable/
-__ https://docs.scipy.org/doc/scipy/refrence/
-__ https://doc.scrapy.org/
-__ https://seaborn.pydata.org/
-__ http://docs.seleniumhq.org/docs/
-__ http://www.selflanguage.org/
-__ https://docs.pylonsproject.org/projects/substanced/
-__ http://docs.sulu.io/
-__ https://docs.sqlalchemy.org/
-__ http://tinytim.sourceforge.net/docs/2.0/
-__ http://twistedmatrix.com/documents/current/
-__ http://packaging.ubuntu.com/html/
-__ https://docs.webfaction.com/
-__ https://wtforms.readthedocs.io/
+* `Apache Cassandra <https://cassandra.apache.org/doc/>`__
+* `Astropy <http://docs.astropy.org/>`__
+* `Bokeh <https://bokeh.pydata.org/>`__
+* `Boto 3 <https://boto3.readthedocs.io/>`__
+* `CakePHP <https://book.cakephp.org/>`__
+* `CasperJS <http://docs.casperjs.org/>`__
+* `Ceph <http://docs.ceph.com/docs/master/>`__
+* `Chef <https://docs.chef.io/>`__
+* `CKAN <https://docs.ckan.org/>`__
+* `Confluent Platform <https://docs.confluent.io/>`__
+* `Django <https://docs.djangoproject.com/>`__
+* `Doctrine <https://www.doctrine-project.org/>`__
+* `Enterprise Toolkit for Acrobat products <https://www.adobe.com/devnet-docs/acrobatetk/>`__
+* `Gameduino <http://excamera.com/sphinx/gameduino/>`__
+* `gensim <https://radimrehurek.com/gensim/>`__
+* `GeoServer <http://docs.geoserver.org/>`__
+* `gevent <http://www.gevent.org/>`__
+* `GHC - Glasgow Haskell Compiler <https://downloads.haskell.org/~ghc/master/users-guide/>`__
+* `Guzzle <http://docs.guzzlephp.org/>`__
+* `H2O.ai <http://docs.h2o.ai/>`__
+* `Istihza (Turkish Python documentation project) <https://belgeler.yazbel.com/python-istihza/>`__
+* `Kombu <http://docs.kombu.me/>`__
+* `Lasso <http://lassoguide.com/>`__
+* `Mako <http://docs.makotemplates.org/>`__
+* `MirrorBrain <http://mirrorbrain.org/docs/>`__
+* `MongoDB <https://docs.mongodb.com/>`__
+* `Music21 <https://web.mit.edu/music21/doc/>`__
+* `MyHDL <http://docs.myhdl.org/>`__
+* `nose <https://nose.readthedocs.io/>`__
+* `ns-3 <https://www.nsnam.org/documentation/>`__
+* `NumPy <https://docs.scipy.org/doc/numpy/reference/>`__
+* `ObjectListView <http://objectlistview.sourceforge.net/python/>`__
+* `OpenERP <https://doc.odoo.com/>`__
+* `OpenCV <https://docs.opencv.org/>`__
+* `OpenLayers <http://docs.openlayers.org/>`__
+* `OpenTURNS <https://openturns.github.io/openturns/master/>`__
+* `Open vSwitch <http://docs.openvswitch.org/>`__
+* `PlatformIO <https://docs.platformio.org/>`__
+* `PyEphem <http://rhodesmill.org/pyephem/>`__
+* `Pygments <http://pygments.org/docs/>`__
+* `Plone User Manual (German) <https://www.hasecke.com/plone-benutzerhandbuch/4.0/>`__
+* `PSI4 <http://www.psicode.org/psi4manual/master/index.html>`__
+* `PyMOTW <https://pymotw.com/2/>`__
+* `python-aspectlib <https://python-aspectlib.readthedocs.io/>`__ (`sphinx_py3doc_enhanced_theme <https://pypi.org/project/sphinx_py3doc_enhanced_theme/>`__)
+* `QGIS <https://qgis.org/en/docs/index.html>`__
+* `qooxdoo <https://www.qooxdoo.org/current/>`__
+* `Roundup <http://www.roundup-tracker.org/>`__
+* `SaltStack <https://docs.saltstack.com/>`__
+* `scikit-learn <http://scikit-learn.org/stable/>`__
+* `SciPy <https://docs.scipy.org/doc/scipy/refrence/>`__
+* `Scrapy <https://doc.scrapy.org/>`__
+* `Seaborn <https://seaborn.pydata.org/>`__
+* `Selenium <https://docs.seleniumhq.org/docs/>`__
+* `Self <http://www.selflanguage.org/>`__
+* `Substance D <https://docs.pylonsproject.org/projects/substanced/>`__
+* `Sulu <http://docs.sulu.io/>`__
+* `SQLAlchemy <https://docs.sqlalchemy.org/>`__
+* `tinyTiM <http://tinytim.sourceforge.net/docs/2.0/>`__
+* `Twisted <https://twistedmatrix.com/documents/current/>`__
+* `Ubuntu Packaging Guide <http://packaging.ubuntu.com/html/>`__
+* `WebFaction <https://docs.webfaction.com/>`__
+* `WTForms <https://wtforms.readthedocs.io/>`__
Homepages and other non-documentation sites
-------------------------------------------
-* `Arizona State University PHY494/PHY598/CHM598 Simulation approaches to Bio-and Nanophysics`__ (classic)
-* `Benoit Boissinot`__ (classic, customized)
-* `Computer Networks, Parallelization, and Simulation Laboratory (CNPSLab)`__ (sphinx_rtd_theme)
-* `Deep Learning Tutorials`__ (sphinxdoc)
-* `Loyola University Chicago COMP 339-439 Distributed Systems course`__ (sphinx_bootstrap_theme)
-* `Pylearn2`__ (sphinxdoc, customized)
-* `PyXLL`__ (sphinx_bootstrap_theme, customized)
-* `SciPy Cookbook`__ (sphinx_rtd_theme)
-* `The Wine Cellar Book`__ (sphinxdoc)
-* `Thomas Cokelaer's Python, Sphinx and reStructuredText tutorials`__ (standard)
-* `UC Berkeley ME233 Advanced Control Systems II course`__ (sphinxdoc)
-
-__ https://becksteinlab.physics.asu.edu/pages/courses/2013/SimBioNano/
-__ https://bboissin.appspot.com/
-__ https://lab.miletic.net/
-__ http://www.deeplearning.net/tutorial/
-__ http://books.cs.luc.edu/distributedsystems/
-__ http://www.deeplearning.net/software/pylearn2/
-__ https://www.pyxll.com/
-__ https://scipy-cookbook.readthedocs.io/
-__ https://www.thewinecellarbook.com/doc/en/
-__ http://thomas-cokelaer.info/tutorials/
-__ https://berkeley-me233.github.io/
+* `Arizona State University PHY494/PHY598/CHM598 Simulation approaches to Bio-and Nanophysics <https://becksteinlab.physics.asu.edu/pages/courses/2013/SimBioNano/>`__ (classic)
+* `Benoit Boissinot <https://bboissin.appspot.com/>`__ (classic, customized)
+* `Computer Networks, Parallelization, and Simulation Laboratory (CNPSLab) <https://lab.miletic.net/>`__ (sphinx_rtd_theme)
+* `Deep Learning Tutorials <http://www.deeplearning.net/tutorial/>`__ (sphinxdoc)
+* `Eric Holscher <http://ericholscher.com/>`__ (alabaster)
+* `Lei Ma's Statistical Mechanics lecture notes <http://statisticalphysics.openmetric.org/>`__ (sphinx_bootstrap_theme)
+* `Loyola University Chicago COMP 339-439 Distributed Systems course <https://books.cs.luc.edu/distributedsystems/>`__ (sphinx_bootstrap_theme)
+* `Pylearn2 <http://www.deeplearning.net/software/pylearn2/>`__ (sphinxdoc, customized)
+* `PyXLL <https://www.pyxll.com/>`__ (sphinx_bootstrap_theme, customized)
+* `SciPy Cookbook <https://scipy-cookbook.readthedocs.io/>`__ (sphinx_rtd_theme)
+* `The Wine Cellar Book <https://www.thewinecellarbook.com/doc/en/>`__ (sphinxdoc)
+* `Thomas Cokelaer's Python, Sphinx and reStructuredText tutorials <https://thomas-cokelaer.info/tutorials/>`__ (standard)
+* `UC Berkeley ME233 Advanced Control Systems II course <https://berkeley-me233.github.io/>`__ (sphinxdoc)
+* `Željko Svedružić's Biomolecular Structure and Function Laboratory (BioSFLab) <https://www.svedruziclab.com/>`__ (sphinx_bootstrap_theme)
Books produced using Sphinx
---------------------------
-* `"Die Wahrheit des Sehens. Der DEKALOG von Krzysztof Kieślowski"`__
-* `"Expert Python Programming"`__
-* `"Expert Python Programming" (Japanese translation)`__
-* `"The Hitchhiker's Guide to Python"`__
-* `"LassoGuide"`__
-* `"Learning Sphinx" (in Japanese)`__
-* `"Mercurial: the definitive guide (Second edition)"`__
-* `"Pioneers and Prominent Men of Utah"`__
-* `"Pomodoro Technique Illustrated" (Japanese translation)`__
-* `"Python Professional Programming" (in Japanese)`__
-* `"Redmine Primer 5th Edition (in Japanese)"`__
-* `"The repoze.bfg Web Application Framework"`__
-* `"Simple and Steady Way of Learning for Software Engineering" (in Japanese)`__
-* `"Software-Dokumentation mit Sphinx"`__
-* `"Theoretical Physics Reference"`__
-* `"The Varnish Book"`__
-
-__ https://literatur.hasecke.com/post/die-wahrheit-des-sehens-dekalog-kieslowski/
-__ https://www.packtpub.com/application-development/expert-python-programming
-__ https://www.amazon.co.jp/dp/4048686291/
-__ http://docs.python-guide.org/en/latest/
-__ http://www.lassosoft.com/Lasso-Documentation
-__ https://www.oreilly.co.jp/books/9784873116488/
-__ https://book.mercurial-scm.org/
-__ http://pioneers.rstebbing.com/
-__ https://www.amazon.co.jp/dp/4048689525/
-__ http://www.amazon.co.jp/dp/4798032948/
-__ https://www.shuwasystem.co.jp/products/7980html/4825.html
-__ https://www.amazon.com/repoze-bfg-Web-Application-Framework-Version/dp/0615345379
-__ https://www.amazon.co.jp/dp/477414259X/
-__ https://www.amazon.de/dp/1497448689/
-__ http://www.theoretical-physics.net/
-__ https://info.varnish-software.com/the-varnish-book
+* `"The Art of Community" (Japanese translation) <https://www.oreilly.co.jp/books/9784873114958/>`__
+* `"Die Wahrheit des Sehens. Der DEKALOG von Krzysztof Kieślowski" <https://literatur.hasecke.com/post/die-wahrheit-des-sehens-dekalog-kieslowski/>`__
+* `"Expert Python Programming" <https://www.packtpub.com/application-development/expert-python-programming>`__
+* `"Expert Python Programming" (Japanese translation) <https://www.amazon.co.jp/dp/4048686291/>`__
+* `"Expert Python Programming 2nd Edition" (Japanese translation) <https://www.amazon.co.jp/dp/4048930613/>`__
+* `"The Hitchhiker's Guide to Python" <https://docs.python-guide.org/>`__
+* `"LassoGuide" <http://www.lassosoft.com/Lasso-Documentation>`__
+* `"Learning Sphinx" (in Japanese) <https://www.oreilly.co.jp/books/9784873116488/>`__
+* `"Learning System Programming with Go (Japanese)" <https://www.lambdanote.com/products/go>`__
+* `"Mercurial: the definitive guide (Second edition)" <https://book.mercurial-scm.org/>`__
+* `"Mithril -- The fastest clientside MVC (Japanese)" <https://www.oreilly.co.jp/books/9784873117447/>`__
+* `"Pioneers and Prominent Men of Utah" <http://pioneers.rstebbing.com/>`__
+* `"Pomodoro Technique Illustrated" (Japanese translation) <https://www.amazon.co.jp/dp/4048689525/>`__
+* `"Python Professional Programming" (in Japanese) <http://www.amazon.co.jp/dp/4798032948/>`__
+* `"Python Professional Programming 2nd Edition" (in Japanese) <https://www.amazon.co.jp/dp/479804315X/>`__
+* `"Python Professional Programming 3rd Edition" (in Japanese) <https://www.amazon.co.jp/dp/4798053821/>`__
+* `"Real World HTTP -- Learning The Internet and Web Technology via its history and code (Japanese)" <https://www.oreilly.co.jp/books/9784873118048/>`__
+* `"Redmine Primer 5th Edition (in Japanese)" <https://www.shuwasystem.co.jp/products/7980html/4825.html>`__
+* `"The repoze.bfg Web Application Framework" <https://www.amazon.com/repoze-bfg-Web-Application-Framework-Version/dp/0615345379>`__
+* `"The Self-Taught Programmer" (Japanese translation) <https://www.amazon.co.jp/dp/4822292274/>`__
+* `"Simple and Steady Way of Learning for Software Engineering" (in Japanese) <https://www.amazon.co.jp/dp/477414259X/>`__
+* `"Software-Dokumentation mit Sphinx" <https://www.amazon.de/dp/1497448689/>`__
+* `"Theoretical Physics Reference" <https://www.theoretical-physics.net/>`__
+* `"The Varnish Book" <https://info.varnish-software.com/the-varnish-book>`__
Theses produced using Sphinx
----------------------------
* `"A Web-Based System for Comparative Analysis of OpenStreetMap Data by the Use
- of CouchDB"`__
-* `"Content Conditioning and Distribution for Dynamic Virtual Worlds"`__
-* `"The Sphinx Thesis Resource"`__
-
-__ https://www.yumpu.com/et/document/view/11722645/masterthesis-markusmayr-0542042
-__ https://www.cs.princeton.edu/research/techreps/TR-941-12
-__ https://jterrace.github.io/sphinxtr/
+ of CouchDB" <https://www.yumpu.com/et/document/view/11722645/masterthesis-markusmayr-0542042>`__
+* `"Content Conditioning and Distribution for Dynamic Virtual Worlds" <https://www.cs.princeton.edu/research/techreps/TR-941-12>`__
+* `"The Sphinx Thesis Resource" <https://jterrace.github.io/sphinxtr/>`__
Projects integrating Sphinx functionality
-----------------------------------------
-* `Read the Docs`__, a software-as-a-service documentation hosting platform, uses
- Sphinx to automatically build documentation updates that are pushed to GitHub
+* `Read the Docs <https://readthedocs.org/>`__, a software-as-a-service documentation hosting platform, uses
+ Sphinx to automatically build documentation updates that are pushed to GitHub.
-* `Spyder`__, the Scientific Python Development Environment, uses Sphinx in its
+* `Spyder <https://docs.spyder-ide.org/help.html>`__, the Scientific Python Development Environment, uses Sphinx in its
help pane to render rich documentation for functions, classes and methods
- automatically or on-demand
-
- __ https://readthedocs.org/
- __ https://docs.spyder-ide.org/help.html
+ automatically or on-demand.
diff --git a/Makefile b/Makefile
index 1d14ea277..89b5bb5aa 100644
--- a/Makefile
+++ b/Makefile
@@ -57,7 +57,7 @@ style-check:
.PHONY: type-check
type-check:
- mypy sphinx/
+ mypy sphinx
.PHONY: pylint
pylint:
diff --git a/doc/_static/conf.py.txt b/doc/_static/conf.py.txt
index 50c7bb782..5420e2717 100644
--- a/doc/_static/conf.py.txt
+++ b/doc/_static/conf.py.txt
@@ -1,5 +1,3 @@
-# -*- coding: utf-8 -*-
-#
# test documentation build configuration file, created by
# sphinx-quickstart on Sun Jun 26 00:00:43 2016.
#
diff --git a/doc/themes/agogo.png b/doc/_static/themes/agogo.png
index 5a09cb96c..5a09cb96c 100644
--- a/doc/themes/agogo.png
+++ b/doc/_static/themes/agogo.png
Binary files differ
diff --git a/doc/themes/alabaster.png b/doc/_static/themes/alabaster.png
index 4a49c1ad0..4a49c1ad0 100644
--- a/doc/themes/alabaster.png
+++ b/doc/_static/themes/alabaster.png
Binary files differ
diff --git a/doc/themes/bizstyle.png b/doc/_static/themes/bizstyle.png
index e19fb6b34..e19fb6b34 100644
--- a/doc/themes/bizstyle.png
+++ b/doc/_static/themes/bizstyle.png
Binary files differ
diff --git a/doc/themes/classic.png b/doc/_static/themes/classic.png
index 3b3c9cbd8..3b3c9cbd8 100644
--- a/doc/themes/classic.png
+++ b/doc/_static/themes/classic.png
Binary files differ
diff --git a/doc/themes/fullsize/agogo.png b/doc/_static/themes/fullsize/agogo.png
index 106a16cea..106a16cea 100644
--- a/doc/themes/fullsize/agogo.png
+++ b/doc/_static/themes/fullsize/agogo.png
Binary files differ
diff --git a/doc/themes/fullsize/alabaster.png b/doc/_static/themes/fullsize/alabaster.png
index 5eca20912..5eca20912 100644
--- a/doc/themes/fullsize/alabaster.png
+++ b/doc/_static/themes/fullsize/alabaster.png
Binary files differ
diff --git a/doc/themes/fullsize/bizstyle.png b/doc/_static/themes/fullsize/bizstyle.png
index 586064765..586064765 100644
--- a/doc/themes/fullsize/bizstyle.png
+++ b/doc/_static/themes/fullsize/bizstyle.png
Binary files differ
diff --git a/doc/themes/fullsize/classic.png b/doc/_static/themes/fullsize/classic.png
index 269dab22f..269dab22f 100644
--- a/doc/themes/fullsize/classic.png
+++ b/doc/_static/themes/fullsize/classic.png
Binary files differ
diff --git a/doc/themes/fullsize/haiku.png b/doc/_static/themes/fullsize/haiku.png
index 707d2bfec..707d2bfec 100644
--- a/doc/themes/fullsize/haiku.png
+++ b/doc/_static/themes/fullsize/haiku.png
Binary files differ
diff --git a/doc/themes/fullsize/nature.png b/doc/_static/themes/fullsize/nature.png
index 00730c0a5..00730c0a5 100644
--- a/doc/themes/fullsize/nature.png
+++ b/doc/_static/themes/fullsize/nature.png
Binary files differ
diff --git a/doc/themes/fullsize/pyramid.png b/doc/_static/themes/fullsize/pyramid.png
index 3b9d04d13..3b9d04d13 100644
--- a/doc/themes/fullsize/pyramid.png
+++ b/doc/_static/themes/fullsize/pyramid.png
Binary files differ
diff --git a/doc/themes/fullsize/scrolls.png b/doc/_static/themes/fullsize/scrolls.png
index 8a1c1faf5..8a1c1faf5 100644
--- a/doc/themes/fullsize/scrolls.png
+++ b/doc/_static/themes/fullsize/scrolls.png
Binary files differ
diff --git a/doc/themes/fullsize/sphinx_rtd_theme.png b/doc/_static/themes/fullsize/sphinx_rtd_theme.png
index 95cff4ccd..95cff4ccd 100644
--- a/doc/themes/fullsize/sphinx_rtd_theme.png
+++ b/doc/_static/themes/fullsize/sphinx_rtd_theme.png
Binary files differ
diff --git a/doc/themes/fullsize/sphinxdoc.png b/doc/_static/themes/fullsize/sphinxdoc.png
index eb498e3e8..eb498e3e8 100644
--- a/doc/themes/fullsize/sphinxdoc.png
+++ b/doc/_static/themes/fullsize/sphinxdoc.png
Binary files differ
diff --git a/doc/themes/fullsize/traditional.png b/doc/_static/themes/fullsize/traditional.png
index 07ad00875..07ad00875 100644
--- a/doc/themes/fullsize/traditional.png
+++ b/doc/_static/themes/fullsize/traditional.png
Binary files differ
diff --git a/doc/themes/haiku.png b/doc/_static/themes/haiku.png
index 4530debb9..4530debb9 100644
--- a/doc/themes/haiku.png
+++ b/doc/_static/themes/haiku.png
Binary files differ
diff --git a/doc/themes/nature.png b/doc/_static/themes/nature.png
index ad39b32b7..ad39b32b7 100644
--- a/doc/themes/nature.png
+++ b/doc/_static/themes/nature.png
Binary files differ
diff --git a/doc/themes/pyramid.png b/doc/_static/themes/pyramid.png
index 72749dd6b..72749dd6b 100644
--- a/doc/themes/pyramid.png
+++ b/doc/_static/themes/pyramid.png
Binary files differ
diff --git a/doc/themes/scrolls.png b/doc/_static/themes/scrolls.png
index 1a117379f..1a117379f 100644
--- a/doc/themes/scrolls.png
+++ b/doc/_static/themes/scrolls.png
Binary files differ
diff --git a/doc/themes/sphinx_rtd_theme.png b/doc/_static/themes/sphinx_rtd_theme.png
index 7c3b7ae05..7c3b7ae05 100644
--- a/doc/themes/sphinx_rtd_theme.png
+++ b/doc/_static/themes/sphinx_rtd_theme.png
Binary files differ
diff --git a/doc/themes/sphinxdoc.png b/doc/_static/themes/sphinxdoc.png
index 587363e61..587363e61 100644
--- a/doc/themes/sphinxdoc.png
+++ b/doc/_static/themes/sphinxdoc.png
Binary files differ
diff --git a/doc/themes/traditional.png b/doc/_static/themes/traditional.png
index 9820fd0ea..9820fd0ea 100644
--- a/doc/themes/traditional.png
+++ b/doc/_static/themes/traditional.png
Binary files differ
diff --git a/doc/translation.png b/doc/_static/translation.png
index 11f3d02cd..11f3d02cd 100644
--- a/doc/translation.png
+++ b/doc/_static/translation.png
Binary files differ
diff --git a/doc/conf.py b/doc/conf.py
index 89a5b0a4d..0be4672be 100644
--- a/doc/conf.py
+++ b/doc/conf.py
@@ -1,5 +1,3 @@
-# -*- coding: utf-8 -*-
-#
# Sphinx documentation build configuration file
import re
@@ -57,10 +55,17 @@ latex_documents = [('contents', 'sphinx.tex', 'Sphinx Documentation',
'Georg Brandl', 'manual', 1)]
latex_logo = '_static/sphinx.png'
latex_elements = {
+ 'fontenc': r'\usepackage[LGR,X2,T1]{fontenc}',
'fontpkg': r'''
\usepackage[sc]{mathpazo}
\usepackage[scaled]{helvet}
\usepackage{courier}
+\substitutefont{LGR}{\rmdefault}{cmr}
+\substitutefont{LGR}{\sfdefault}{cmss}
+\substitutefont{LGR}{\ttdefault}{cmtt}
+\substitutefont{X2}{\rmdefault}{cmr}
+\substitutefont{X2}{\sfdefault}{cmss}
+\substitutefont{X2}{\ttdefault}{cmtt}
''',
'passoptionstopackages': '\\PassOptionsToPackage{svgnames}{xcolor}',
'preamble': '\\DeclareUnicodeCharacter{229E}{\\ensuremath{\\boxplus}}',
@@ -145,3 +150,10 @@ def setup(app):
names=['param'], can_collapse=True)
app.add_object_type('event', 'event', 'pair: %s; event', parse_event,
doc_field_types=[fdesc])
+
+ # workaround for RTD
+ from sphinx.util import logging
+ logger = logging.getLogger(__name__)
+ app.info = lambda *args, **kwargs: logger.info(*args, **kwargs)
+ app.warn = lambda *args, **kwargs: logger.warning(*args, **kwargs)
+ app.debug = lambda *args, **kwargs: logger.debug(*args, **kwargs)
diff --git a/doc/contents.rst b/doc/contents.rst
index 93f89f388..f3a1cd2f8 100644
--- a/doc/contents.rst
+++ b/doc/contents.rst
@@ -14,16 +14,18 @@ Sphinx documentation contents
usage/configuration
usage/builders/index
usage/extensions/index
+ usage/theming
+ usage/advanced/intl
+ usage/advanced/setuptools
+ usage/advanced/websupport/index
intro
man/index
- intl
theming
- setuptools
templating
latex
extdev/index
- websupport
+ development/tutorials/index
faq
glossary
diff --git a/doc/develop.rst b/doc/develop.rst
index d2a51b8e2..d061aae61 100644
--- a/doc/develop.rst
+++ b/doc/develop.rst
@@ -100,7 +100,7 @@ This is the current list of contributed extensions in that repository:
- zopeext: provide an ``autointerface`` directive for using `Zope interfaces`_
-See the :ref:`extension tutorial <exttut>` on getting started with writing your
+See the :doc:`extension tutorials <../development/tutorials/index>` on getting started with writing your
own extensions.
@@ -127,7 +127,7 @@ own extensions.
.. _NumPy style: https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt
.. _hyphenator: https://github.com/mnater/hyphenator
.. _exceltable: https://pythonhosted.org/sphinxcontrib-exceltable/
-.. _YouTube: http://www.youtube.com/
+.. _YouTube: https://www.youtube.com/
.. _ClearQuest: https://www.ibm.com/us-en/marketplace/rational-clearquest
.. _Zope interfaces: https://zopeinterface.readthedocs.io/en/latest/README.html
.. _slideshare: https://www.slideshare.net/
diff --git a/doc/development/tutorials/helloworld.rst b/doc/development/tutorials/helloworld.rst
new file mode 100644
index 000000000..5ce8db66c
--- /dev/null
+++ b/doc/development/tutorials/helloworld.rst
@@ -0,0 +1,162 @@
+Developing a "Hello world" directive
+====================================
+
+The objective of this tutorial is to create a very basic extension that adds a new
+directive that outputs a paragraph containing `hello world`.
+
+Only basic information is provided in this tutorial. For more information,
+refer to the :doc:`other tutorials <index>` that go into more
+details.
+
+.. warning:: For this extension, you will need some basic understanding of docutils_
+ and Python.
+
+Creating a new extension file
+-----------------------------
+
+Your extension file could be in any folder of your project. In our case,
+let's do the following:
+
+#. Create an :file:`_ext` folder in :file:`source`.
+#. Create a new Python file in the :file:`_ext` folder called
+ :file:`helloworld.py`.
+
+ Here is an example of the folder structure you might obtain:
+
+ .. code-block:: text
+
+ └── source
+    ├── _ext
+ │   └── helloworld.py
+    ├── _static
+    ├── _themes
+    ├── conf.py
+    ├── somefolder
+    ├── somefile.rst
+    └── someotherfile.rst
+
+Writing the extension
+---------------------
+
+Open :file:`helloworld.py` and paste the following code in it:
+
+.. code-block:: python
+
+ from docutils import nodes
+ from docutils.parsers.rst import Directive
+
+
+ class HelloWorld(Directive):
+ def run(self):
+ paragraph_node = nodes.paragraph(text='Hello World!')
+ return [paragraph_node]
+
+
+ def setup(app):
+ app.add_directive("helloworld", HelloWorld)
+
+
+Some essential things are happening in this example, and you will see them
+in all directives:
+
+.. rubric:: Directive declaration
+
+Our new directive is declared in the ``HelloWorld`` class, it extends
+docutils_' ``Directive`` class. All extensions that create directives
+should extend this class.
+
+.. rubric:: ``run`` method
+
+This method is a requirement and it is part of every directive. It contains
+the main logic of the directive and it returns a list of docutils nodes to
+be processed by Sphinx.
+
+.. seealso::
+
+ :doc:`todo`
+
+.. rubric:: docutils nodes
+
+The ``run`` method returns a list of nodes. Nodes are docutils' way of
+representing the content of a document. There are many types of nodes
+available: text, paragraph, reference, table, etc.
+
+.. seealso::
+
+ `The docutils documentation on nodes <docutils nodes>`_
+
+The ``nodes.paragraph`` class creates a new paragraph node. A paragraph
+node typically contains some text that we can set during instantiation using
+the ``text`` parameter.
+
+.. rubric:: ``setup`` function
+
+This function is a requirement. We use it to plug our new directive into
+Sphinx.
+The simplest thing you can do it call the ``app.add_directive`` method.
+
+.. note::
+
+ The first argument is the name of the directive itself as used in an rST file.
+
+ In our case, we would use ``helloworld``:
+
+ .. code-block:: rst
+
+ Some intro text here...
+
+ .. helloworld::
+
+ Some more text here...
+
+
+Updating the conf.py file
+-------------------------
+
+The extension file has to be declared in your :file:`conf.py` file to make
+Sphinx aware of it:
+
+#. Open :file:`conf.py`. It is in the :file:`source` folder by default.
+#. Add ``sys.path.append(os.path.abspath("./_ext"))`` before
+ the ``extensions`` variable declaration (if it exists).
+#. Update or create the ``extensions`` list and add the
+ extension file name to the list:
+
+ .. code-block:: python
+
+ extensions.append('helloworld')
+
+You can now use the extension.
+
+.. admonition:: Example
+
+ .. code-block:: rst
+
+ Some intro text here...
+
+ .. helloworld::
+
+ Some more text here...
+
+ The sample above would generate:
+
+ .. code-block:: text
+
+ Some intro text here...
+
+ Hello World!
+
+ Some more text here...
+
+This is the very basic principle of an extension that creates a new directive.
+
+For a more advanced example, refer to :doc:`todo`.
+
+Further reading
+---------------
+
+You can create your own nodes if needed, refer to the
+:doc:`todo` for more information.
+
+.. _docutils: http://docutils.sourceforge.net/
+.. _`docutils nodes`: http://docutils.sourceforge.net/docs/ref/doctree.html \ No newline at end of file
diff --git a/doc/development/tutorials/index.rst b/doc/development/tutorials/index.rst
new file mode 100644
index 000000000..cb8dce435
--- /dev/null
+++ b/doc/development/tutorials/index.rst
@@ -0,0 +1,11 @@
+Extension tutorials
+===================
+
+Refer to the following tutorials to get started with extension development.
+
+.. toctree::
+ :caption: Directive tutorials
+ :maxdepth: 1
+
+ helloworld
+ todo
diff --git a/doc/extdev/tutorial.rst b/doc/development/tutorials/todo.rst
index 113cea119..e68a39342 100644
--- a/doc/extdev/tutorial.rst
+++ b/doc/development/tutorials/todo.rst
@@ -1,7 +1,5 @@
-.. _exttut:
-
-Tutorial: Writing a simple extension
-====================================
+Developing a "TODO" extension
+=============================
This section is intended as a walkthrough for the creation of custom extensions.
It covers the basics of writing and activating an extension, as well as
@@ -12,112 +10,12 @@ include todo entries in the documentation, and to collect these in a central
place. (A similar "todo" extension is distributed with Sphinx.)
-Important objects
------------------
-
-There are several key objects whose API you will use while writing an
-extension. These are:
-
-**Application**
- The application object (usually called ``app``) is an instance of
- :class:`.Sphinx`. It controls most high-level functionality, such as the
- setup of extensions, event dispatching and producing output (logging).
-
- If you have the environment object, the application is available as
- ``env.app``.
-
-**Environment**
- The build environment object (usually called ``env``) is an instance of
- :class:`.BuildEnvironment`. It is responsible for parsing the source
- documents, stores all metadata about the document collection and is
- serialized to disk after each build.
-
- Its API provides methods to do with access to metadata, resolving references,
- etc. It can also be used by extensions to cache information that should
- persist for incremental rebuilds.
-
- If you have the application or builder object, the environment is available
- as ``app.env`` or ``builder.env``.
-
-**Builder**
- The builder object (usually called ``builder``) is an instance of a specific
- subclass of :class:`.Builder`. Each builder class knows how to convert the
- parsed documents into an output format, or otherwise process them (e.g. check
- external links).
-
- If you have the application object, the builder is available as
- ``app.builder``.
-
-**Config**
- The config object (usually called ``config``) provides the values of
- configuration values set in :file:`conf.py` as attributes. It is an instance
- of :class:`.Config`.
-
- The config is available as ``app.config`` or ``env.config``.
-
-
-Build Phases
-------------
-
-One thing that is vital in order to understand extension mechanisms is the way
-in which a Sphinx project is built: this works in several phases.
-
-**Phase 0: Initialization**
-
- In this phase, almost nothing of interest to us happens. The source
- directory is searched for source files, and extensions are initialized.
- Should a stored build environment exist, it is loaded, otherwise a new one is
- created.
-
-**Phase 1: Reading**
-
- In Phase 1, all source files (and on subsequent builds, those that are new or
- changed) are read and parsed. This is the phase where directives and roles
- are encountered by docutils, and the corresponding code is executed. The
- output of this phase is a *doctree* for each source file; that is a tree of
- docutils nodes. For document elements that aren't fully known until all
- existing files are read, temporary nodes are created.
-
- There are nodes provided by docutils, which are documented `in the docutils
- documentation <http://docutils.sourceforge.net/docs/ref/doctree.html>`__.
- Additional nodes are provided by Sphinx and :ref:`documented here <nodes>`.
-
- During reading, the build environment is updated with all meta- and cross
- reference data of the read documents, such as labels, the names of headings,
- described Python objects and index entries. This will later be used to
- replace the temporary nodes.
-
- The parsed doctrees are stored on the disk, because it is not possible to
- hold all of them in memory.
-
-**Phase 2: Consistency checks**
-
- Some checking is done to ensure no surprises in the built documents.
-
-**Phase 3: Resolving**
-
- Now that the metadata and cross-reference data of all existing documents is
- known, all temporary nodes are replaced by nodes that can be converted into
- output using components called transforms. For example, links are created for
- object references that exist, and simple literal nodes are created for those
- that don't.
-
-**Phase 4: Writing**
-
- This phase converts the resolved doctrees to the desired output format, such
- as HTML or LaTeX. This happens via a so-called docutils writer that visits
- the individual nodes of each doctree and produces some output in the process.
-
-.. note::
-
- Some builders deviate from this general build plan, for example, the builder
- that checks external links does not need anything more than the parsed
- doctrees and therefore does not have phases 2--4.
-
-
Extension Design
----------------
+.. note:: To understand the design this extension, refer to
+ :ref:`important-objects` and :ref:`build-phases`.
+
We want the extension to add the following to Sphinx:
* A "todo" directive, containing some content that is marked with "TODO", and
@@ -174,12 +72,13 @@ the individual calls do is the following:
If the third argument was ``'html'``, HTML documents would be full rebuild if the
config value changed its value. This is needed for config values that
- influence reading (build phase 1).
+ influence reading (build :ref:`phase 1 <build-phases>`).
* :meth:`~Sphinx.add_node` adds a new *node class* to the build system. It also
can specify visitor functions for each supported output format. These visitor
- functions are needed when the new nodes stay until phase 4 -- since the
- ``todolist`` node is always replaced in phase 3, it doesn't need any.
+ functions are needed when the new nodes stay until :ref:`phase 4 <build-phases>`
+ -- since the ``todolist`` node is always replaced in :ref:`phase 3 <build-phases>`,
+ it doesn't need any.
We need to create the two node classes ``todo`` and ``todolist`` later.
@@ -276,7 +175,7 @@ The ``todo`` directive function looks like this::
return [targetnode, todo_node]
Several important things are covered here. First, as you can see, you can refer
-to the build environment instance using ``self.state.document.settings.env``.
+to the :ref:`build environment instance <important-objects>` using ``self.state.document.settings.env``.
Then, to act as a link target (from the todolist), the todo directive needs to
return a target node in addition to the todo node. The target ID (in HTML, this
@@ -340,7 +239,8 @@ Here we clear out all todos whose docname matches the given one from the
added again during parsing.
The other handler belongs to the :event:`doctree-resolved` event. This event is
-emitted at the end of phase 3 and allows custom resolving to be done::
+emitted at the end of :ref:`phase 3 <build-phases>` and allows custom resolving
+to be done::
def process_todo_nodes(app, doctree, fromdocname):
if not app.config.todo_include_todos:
diff --git a/doc/extdev/appapi.rst b/doc/extdev/appapi.rst
index ee612765c..fe64628a4 100644
--- a/doc/extdev/appapi.rst
+++ b/doc/extdev/appapi.rst
@@ -115,37 +115,15 @@ Emitting events
.. automethod:: emit_firstresult(event, \*arguments)
-Producing messages / logging
-----------------------------
-
-The application object also provides support for emitting leveled messages.
-
-.. note::
-
- There is no "error" call: in Sphinx, errors are defined as things that stop
- the build; just raise an exception (:exc:`sphinx.errors.SphinxError` or a
- custom subclass) to do that.
-
-.. deprecated:: 1.6
-
- Please use :ref:`logging-api` instead.
-
-.. automethod:: Sphinx.warn
-
-.. automethod:: Sphinx.info
-
-.. automethod:: Sphinx.verbose
-
-.. automethod:: Sphinx.debug
-
-.. automethod:: Sphinx.debug2
-
-
Sphinx runtime information
--------------------------
The application object also provides runtime information as attributes.
+.. attribute:: Sphinx.project
+
+ Target project. See :class:`.Project`.
+
.. attribute:: Sphinx.srcdir
Source directory.
diff --git a/doc/extdev/envapi.rst b/doc/extdev/envapi.rst
index 442cfde15..1dee6a576 100644
--- a/doc/extdev/envapi.rst
+++ b/doc/extdev/envapi.rst
@@ -15,6 +15,10 @@ Build environment API
Reference to the :class:`.Config` object.
+ .. attribute:: project
+
+ Target project. See :class:`.Project`.
+
.. attribute:: srcdir
Source directory.
@@ -39,10 +43,6 @@ Build environment API
**Utility methods**
- .. automethod:: warn
-
- .. automethod:: warn_node
-
.. automethod:: doc2path
.. automethod:: relfn2path
diff --git a/doc/extdev/index.rst b/doc/extdev/index.rst
index bbfcd4188..da0d0e2d0 100644
--- a/doc/extdev/index.rst
+++ b/doc/extdev/index.rst
@@ -52,6 +52,115 @@ Note that it is still necessary to register the builder using
.. _entry points: https://setuptools.readthedocs.io/en/latest/setuptools.html#dynamic-discovery-of-services-and-plugins
+.. _important-objects:
+
+Important objects
+-----------------
+
+There are several key objects whose API you will use while writing an
+extension. These are:
+
+**Application**
+ The application object (usually called ``app``) is an instance of
+ :class:`.Sphinx`. It controls most high-level functionality, such as the
+ setup of extensions, event dispatching and producing output (logging).
+
+ If you have the environment object, the application is available as
+ ``env.app``.
+
+**Environment**
+ The build environment object (usually called ``env``) is an instance of
+ :class:`.BuildEnvironment`. It is responsible for parsing the source
+ documents, stores all metadata about the document collection and is
+ serialized to disk after each build.
+
+ Its API provides methods to do with access to metadata, resolving references,
+ etc. It can also be used by extensions to cache information that should
+ persist for incremental rebuilds.
+
+ If you have the application or builder object, the environment is available
+ as ``app.env`` or ``builder.env``.
+
+**Builder**
+ The builder object (usually called ``builder``) is an instance of a specific
+ subclass of :class:`.Builder`. Each builder class knows how to convert the
+ parsed documents into an output format, or otherwise process them (e.g. check
+ external links).
+
+ If you have the application object, the builder is available as
+ ``app.builder``.
+
+**Config**
+ The config object (usually called ``config``) provides the values of
+ configuration values set in :file:`conf.py` as attributes. It is an instance
+ of :class:`.Config`.
+
+ The config is available as ``app.config`` or ``env.config``.
+
+To see an example of use of these objects, refer to :doc:`../development/tutorials/index`.
+
+.. _build-phases:
+
+Build Phases
+------------
+
+One thing that is vital in order to understand extension mechanisms is the way
+in which a Sphinx project is built: this works in several phases.
+
+**Phase 0: Initialization**
+
+ In this phase, almost nothing of interest to us happens. The source
+ directory is searched for source files, and extensions are initialized.
+ Should a stored build environment exist, it is loaded, otherwise a new one is
+ created.
+
+**Phase 1: Reading**
+
+ In Phase 1, all source files (and on subsequent builds, those that are new or
+ changed) are read and parsed. This is the phase where directives and roles
+ are encountered by docutils, and the corresponding code is executed. The
+ output of this phase is a *doctree* for each source file; that is a tree of
+ docutils nodes. For document elements that aren't fully known until all
+ existing files are read, temporary nodes are created.
+
+ There are nodes provided by docutils, which are documented `in the docutils
+ documentation <http://docutils.sourceforge.net/docs/ref/doctree.html>`__.
+ Additional nodes are provided by Sphinx and :ref:`documented here <nodes>`.
+
+ During reading, the build environment is updated with all meta- and cross
+ reference data of the read documents, such as labels, the names of headings,
+ described Python objects and index entries. This will later be used to
+ replace the temporary nodes.
+
+ The parsed doctrees are stored on the disk, because it is not possible to
+ hold all of them in memory.
+
+**Phase 2: Consistency checks**
+
+ Some checking is done to ensure no surprises in the built documents.
+
+**Phase 3: Resolving**
+
+ Now that the metadata and cross-reference data of all existing documents is
+ known, all temporary nodes are replaced by nodes that can be converted into
+ output using components called tranform. For example, links are created for
+ object references that exist, and simple literal nodes are created for those
+ that don't.
+
+**Phase 4: Writing**
+
+ This phase converts the resolved doctrees to the desired output format, such
+ as HTML or LaTeX. This happens via a so-called docutils writer that visits
+ the individual nodes of each doctree and produces some output in the process.
+
+.. note::
+
+ Some builders deviate from this general build plan, for example, the builder
+ that checks external links does not need anything more than the parsed
+ doctrees and therefore does not have phases 2--4.
+
+To see an example of application, refer to :doc:`../development/tutorials/todo`.
+
.. _ext-metadata:
Extension metadata
@@ -82,9 +191,10 @@ APIs used for writing extensions
--------------------------------
.. toctree::
+ :maxdepth: 2
- tutorial
appapi
+ projectapi
envapi
builderapi
collectorapi
@@ -96,6 +206,8 @@ APIs used for writing extensions
i18n
utils
+.. _dev-deprecated-apis:
+
Deprecated APIs
---------------
@@ -122,6 +234,226 @@ The following is a list of deprecated interfaces.
- (will be) Removed
- Alternatives
+ * - ``encoding`` argument of ``autodoc.Documenter.get_doc()``,
+ ``autodoc.DocstringSignatureMixin.get_doc()``,
+ ``autodoc.DocstringSignatureMixin._find_signature()``, and
+ ``autodoc.ClassDocumenter.get_doc()``
+ - 2.0
+ - 4.0
+ - N/A
+
+ * - ``nodetype`` argument of
+ ``sphinx.search.WordCollector.is_meta_keywords()``
+ - 2.0
+ - 4.0
+ - N/A
+
+ * - ``suffix`` argument of ``BuildEnvironment.doc2path()``
+ - 2.0
+ - 4.0
+ - N/A
+
+ * - string style ``base`` argument of ``BuildEnvironment.doc2path()``
+ - 2.0
+ - 4.0
+ - ``os.path.join()``
+
+ * - ``sphinx.addnodes.abbreviation``
+ - 2.0
+ - 4.0
+ - ``docutils.nodes.abbreviation``
+
+ * - ``sphinx.cmd.quickstart.term_decode()``
+ - 2.0
+ - 4.0
+ - N/A
+
+ * - ``sphinx.cmd.quickstart.TERM_ENCODING``
+ - 2.0
+ - 4.0
+ - ``sys.stdin.encoding``
+
+ * - ``sphinx.config.check_unicode()``
+ - 2.0
+ - 4.0
+ - N/A
+
+ * - ``sphinx.config.string_classes``
+ - 2.0
+ - 4.0
+ - ``[str]``
+
+ * - ``sphinx.domains.cpp.DefinitionError.description``
+ - 2.0
+ - 4.0
+ - ``str(exc)``
+
+ * - ``sphinx.domains.cpp.NoOldIdError.description``
+ - 2.0
+ - 4.0
+ - ``str(exc)``
+
+ * - ``sphinx.domains.cpp.UnsupportedMultiCharacterCharLiteral.decoded``
+ - 2.0
+ - 4.0
+ - ``str(exc)``
+
+ * - ``sphinx.ext.autosummary.Autosummary.warn()``
+ - 2.0
+ - 4.0
+ - N/A
+
+ * - ``sphinx.ext.autosummary.Autosummary.genopt``
+ - 2.0
+ - 4.0
+ - N/A
+
+ * - ``sphinx.ext.autosummary.Autosummary.warnings``
+ - 2.0
+ - 4.0
+ - N/A
+
+ * - ``sphinx.ext.autosummary.Autosummary.result``
+ - 2.0
+ - 4.0
+ - N/A
+
+ * - ``sphinx.ext.doctest.doctest_encode()``
+ - 2.0
+ - 4.0
+ - N/A
+
+ * - ``sphinx.testing.util.remove_unicode_literal()``
+ - 2.0
+ - 4.0
+ - N/A
+
+ * - ``sphinx.util.attrdict``
+ - 2.0
+ - 4.0
+ - N/A
+
+ * - ``sphinx.util.force_decode()``
+ - 2.0
+ - 4.0
+ - N/A
+
+ * - ``sphinx.util.get_matching_docs()``
+ - 2.0
+ - 4.0
+ - ``sphinx.util.get_matching_files()``
+
+ * - ``sphinx.util.inspect.Parameter``
+ - 2.0
+ - 3.0
+ - N/A
+
+ * - ``sphinx.util.osutil.EEXIST``
+ - 2.0
+ - 4.0
+ - ``errno.EEXIST`` or ``FileExistsError``
+
+ * - ``sphinx.util.osutil.EINVAL``
+ - 2.0
+ - 4.0
+ - ``errno.EINVAL``
+
+ * - ``sphinx.util.osutil.ENOENT``
+ - 2.0
+ - 4.0
+ - ``errno.ENOENT`` or ``FileNotFoundError``
+
+ * - ``sphinx.util.osutil.EPIPE``
+ - 2.0
+ - 4.0
+ - ``errno.ENOENT`` or ``BrokenPipeError``
+
+ * - ``sphinx.util.osutil.walk()``
+ - 2.0
+ - 4.0
+ - ``os.walk()``
+
+ * - ``sphinx.util.pycompat.UnicodeMixin``
+ - 2.0
+ - 4.0
+ - N/A
+
+ * - ``sphinx.util.pycompat.u``
+ - 2.0
+ - 4.0
+ - N/A
+
+ * - ``sphinx.util.PeekableIterator``
+ - 2.0
+ - 4.0
+ - N/A
+
+ * - Omitting the ``filename`` argument in an overriddent
+ ``IndexBuilder.feed()`` method.
+ - 2.0
+ - 4.0
+ - ``IndexBuilder.feed(docname, filename, title, doctree)``
+
+ * - ``sphinx.writers.latex.LaTeXTranslator.babel_defmacro()``
+ - 2.0
+ - 4.0
+ - N/A
+
+ * - ``sphinx.application.Sphinx._setting_up_extension``
+ - 2.0
+ - 3.0
+ - N/A
+
+ * - The ``importer`` argument of ``sphinx.ext.autodoc.importer._MockModule``
+ - 2.0
+ - 3.0
+ - N/A
+
+ * - ``sphinx.ext.autodoc.importer._MockImporter``
+ - 2.0
+ - 3.0
+ - N/A
+
+ * - ``sphinx.io.SphinxBaseFileInput``
+ - 2.0
+ - 3.0
+ - N/A
+
+ * - ``sphinx.io.SphinxFileInput.supported``
+ - 2.0
+ - 3.0
+ - N/A
+
+ * - ``sphinx.io.SphinxRSTFileInput``
+ - 2.0
+ - 3.0
+ - N/A
+
+ * - ``sphinx.registry.SphinxComponentRegistry.add_source_input()``
+ - 2.0
+ - 3.0
+ - N/A
+
+ * - ``sphinx.writers.latex.LaTeXTranslator._make_visit_admonition()``
+ - 2.0
+ - 3.0
+ - N/A
+
+ * - ``sphinx.writers.latex.LaTeXTranslator.collect_footnotes()``
+ - 2.0
+ - 4.0
+ - N/A
+
+ * - ``sphinx.writers.texinfo.TexinfoTranslator._make_visit_admonition()``
+ - 2.0
+ - 3.0
+ - N/A
+
+ * - ``sphinx.writers.text.TextTranslator._make_depart_admonition()``
+ - 2.0
+ - 3.0
+ - N/A
+
* - :rst:dir:`highlightlang`
- 1.8
- 4.0
diff --git a/doc/extdev/logging.rst b/doc/extdev/logging.rst
index e930db0c0..b66f11dbb 100644
--- a/doc/extdev/logging.rst
+++ b/doc/extdev/logging.rst
@@ -62,3 +62,5 @@ Logging API
.. autofunction:: pending_logging()
.. autofunction:: pending_warnings()
+
+.. autofunction:: prefixed_warnings()
diff --git a/doc/extdev/projectapi.rst b/doc/extdev/projectapi.rst
new file mode 100644
index 000000000..238aeb4f7
--- /dev/null
+++ b/doc/extdev/projectapi.rst
@@ -0,0 +1,9 @@
+.. _project-api:
+
+Project API
+===========
+
+.. currentmodule:: sphinx.project
+
+.. autoclass:: Project
+ :members:
diff --git a/doc/faq.rst b/doc/faq.rst
index b2d6cc9e6..4b608b780 100644
--- a/doc/faq.rst
+++ b/doc/faq.rst
@@ -20,7 +20,7 @@ How do I...
the :rst:dir:`toctree` directive where you want to start numbering.
... customize the look of the built HTML files?
- Use themes, see :doc:`theming`.
+ Use themes, see :doc:`/usage/theming`.
... add global substitutions or includes?
Add them in the :confval:`rst_prolog` or :confval:`rst_epilog` config value.
@@ -30,7 +30,7 @@ How do I...
``sidebartoc`` block.
... write my own extension?
- See the :ref:`extension tutorial <exttut>`.
+ See the :doc:`/development/tutorials/index`.
... convert from my existing docs using MoinMoin markup?
The easiest way is to convert to xhtml, then convert `xhtml to reST`_.
@@ -205,7 +205,7 @@ The following list gives some hints for the creation of epub files:
.. _Epubcheck: https://github.com/IDPF/epubcheck
.. _Calibre: https://calibre-ebook.com/
.. _FBreader: https://fbreader.org/
-.. _Bookworm: http://www.oreilly.com/bookworm/index.html
+.. _Bookworm: https://www.oreilly.com/bookworm/index.html
.. _kindlegen: https://www.amazon.com/gp/feature.html?docId=1000765211
.. _texinfo-faq:
diff --git a/doc/intro.rst b/doc/intro.rst
index d44003b71..622d16b83 100644
--- a/doc/intro.rst
+++ b/doc/intro.rst
@@ -55,15 +55,13 @@ See the :ref:`pertinent section in the FAQ list <usingwith>`.
Prerequisites
-------------
-Sphinx needs at least **Python 2.7** or **Python 3.4** to run, as well as the
-docutils_ and Jinja2_ libraries. Sphinx should work with docutils version 0.10
-or some (not broken) SVN trunk snapshot. If you like to have source code
-highlighting support, you must also install the Pygments_ library.
+Sphinx needs at least **Python 3.5** to run, as well as the docutils_ and
+Jinja2_ libraries. Sphinx should work with docutils version 0.12 or some (not
+broken) SVN trunk snapshot.
.. _reStructuredText: http://docutils.sourceforge.net/rst.html
.. _docutils: http://docutils.sourceforge.net/
.. _Jinja2: http://jinja.pocoo.org/
-.. _Pygments: http://pygments.org/
Usage
diff --git a/doc/latex.rst b/doc/latex.rst
index 0e2740d68..7a4d0be03 100644
--- a/doc/latex.rst
+++ b/doc/latex.rst
@@ -365,6 +365,8 @@ Here are some macros from the package file :file:`sphinx.sty` and class files
:file:`sphinxhowto.cls`, :file:`sphinxmanual.cls`, which have public names
thus allowing redefinitions. Check the respective files for the defaults.
+.. _latex-macros:
+
Macros
~~~~~~
@@ -390,11 +392,18 @@ Macros
.. versionadded:: 1.6.3
``\sphinxstylecodecontinued`` and ``\sphinxstylecodecontinues``.
- the table of contents is typeset via ``\sphinxtableofcontents`` which is a
- wrapper (whose definition can be found in :file:`sphinxhowto.cls` or in
- :file:`sphinxmanual.cls`) of standard ``\tableofcontents``.
+ wrapper (defined differently in :file:`sphinxhowto.cls` and in
+ :file:`sphinxmanual.cls`) of standard ``\tableofcontents``. The macro
+ ``\sphinxtableofcontentshook`` is executed during its expansion right before
+ ``\tableofcontents`` itself.
.. versionchanged:: 1.5
formerly, the meaning of ``\tableofcontents`` was modified by Sphinx.
+ .. versionchanged:: 2.0
+ hard-coded redefinitions of ``\l@section`` and ``\l@subsection`` formerly
+ done during loading of ``'manual'`` docclass are now executed later via
+ ``\sphinxtableofcontentshook``. This macro is also executed by the
+ ``'howto'`` docclass, but defaults to empty with it.
- a custom ``\sphinxmaketitle`` is defined in the class files
:file:`sphinxmanual.cls` and :file:`sphinxhowto.cls` and is used as
default setting of ``'maketitle'`` :confval:`latex_elements` key.
diff --git a/doc/theming.rst b/doc/theming.rst
index 3308fe632..4d4af4d90 100644
--- a/doc/theming.rst
+++ b/doc/theming.rst
@@ -5,6 +5,12 @@ HTML theming support
.. versionadded:: 0.6
+.. note::
+
+ This document provides information about creating your own theme. If you
+ simply wish to use a pre-existing HTML themes, refer to
+ :doc:`/usage/theming`.
+
Sphinx supports changing the appearance of its HTML output via *themes*. A
theme is a collection of HTML templates, stylesheet(s) and other static files.
Additionally, it has a configuration file which specifies from which theme to
@@ -15,265 +21,13 @@ Themes are meant to be project-unaware, so they can be used for different
projects without change.
-Using a theme
--------------
-
-Using an existing theme is easy. If the theme is builtin to Sphinx, you only
-need to set the :confval:`html_theme` config value. With the
-:confval:`html_theme_options` config value you can set theme-specific options
-that change the look and feel. For example, you could have the following in
-your :file:`conf.py`::
-
- html_theme = "classic"
- html_theme_options = {
- "rightsidebar": "true",
- "relbarbgcolor": "black"
- }
-
-That would give you the classic theme, but with a sidebar on the right side and
-a black background for the relation bar (the bar with the navigation links at
-the page's top and bottom).
-
-If the theme does not come with Sphinx, it can be in two static forms: either a
-directory (containing :file:`theme.conf` and other needed files), or a zip file
-with the same contents. Either of them must be put where Sphinx can find it;
-for this there is the config value :confval:`html_theme_path`. It gives a list
-of directories, relative to the directory containing :file:`conf.py`, that can
-contain theme directories or zip files. For example, if you have a theme in the
-file :file:`blue.zip`, you can put it right in the directory containing
-:file:`conf.py` and use this configuration::
-
- html_theme = "blue"
- html_theme_path = ["."]
-
-The third form is a python package. If a theme you want to use is distributed
-as a python package, you can use it after installing
-
-.. code-block:: bash
-
- # installing theme package
- $ pip install sphinxjp.themes.dotted
-
- # use it in your conf.py
- html_theme = "dotted"
-
-
-.. _builtin-themes:
-
-Builtin themes
---------------
-
-.. cssclass:: longtable
-
-+--------------------+--------------------+
-| **Theme overview** | |
-+--------------------+--------------------+
-| |alabaster| | |classic| |
-| | |
-| *alabaster* | *classic* |
-+--------------------+--------------------+
-| |sphinxdoc| | |scrolls| |
-| | |
-| *sphinxdoc* | *scrolls* |
-+--------------------+--------------------+
-| |agogo| | |traditional| |
-| | |
-| *agogo* | *traditional* |
-+--------------------+--------------------+
-| |nature| | |haiku| |
-| | |
-| *nature* | *haiku* |
-+--------------------+--------------------+
-| |pyramid| | |bizstyle| |
-| | |
-| *pyramid* | *bizstyle* |
-+--------------------+--------------------+
-
-.. |alabaster| image:: themes/alabaster.png
-.. |classic| image:: themes/classic.png
-.. |sphinxdoc| image:: themes/sphinxdoc.png
-.. |scrolls| image:: themes/scrolls.png
-.. |agogo| image:: themes/agogo.png
-.. |traditional| image:: themes/traditional.png
-.. |nature| image:: themes/nature.png
-.. |haiku| image:: themes/haiku.png
-.. |pyramid| image:: themes/pyramid.png
-.. |bizstyle| image:: themes/bizstyle.png
-
-Sphinx comes with a selection of themes to choose from.
-
-.. cssclass:: clear
-
-These themes are:
-
-* **basic** -- This is a basically unstyled layout used as the base for the
- other themes, and usable as the base for custom themes as well. The HTML
- contains all important elements like sidebar and relation bar. There are
- these options (which are inherited by the other themes):
-
- - **nosidebar** (true or false): Don't include the sidebar. Defaults to
- ``False``.
-
- - **sidebarwidth** (int or str): Width of the sidebar in pixels.
- This can be an int, which is interpreted as pixels or a valid CSS
- dimension string such as '70em' or '50%'. Defaults to 230 pixels.
-
- - **body_min_width** (int or str): Minimal width of the document body.
- This can be an int, which is interpreted as pixels or a valid CSS
- dimension string such as '70em' or '50%'. Use 0 if you don't want
- a width limit. Defaults may depend on the theme (often 450px).
-
- - **body_max_width** (int or str): Maximal width of the document body.
- This can be an int, which is interpreted as pixels or a valid CSS
- dimension string such as '70em' or '50%'. Use 'none' if you don't
- want a width limit. Defaults may depend on the theme (often 800px).
-
-* **alabaster** -- `Alabaster theme`_ is a modified "Kr" Sphinx theme from @kennethreitz
- (especially as used in his Requests project), which was itself originally based on
- @mitsuhiko's theme used for Flask & related projects.
- Check out at its `installation page`_ how to set up properly
- :confval:`html_sidebars` for its use.
-
- .. _Alabaster theme: https://pypi.org/project/alabaster/
- .. _installation page: https://alabaster.readthedocs.io/en/latest/installation.html
-
-* **classic** -- This is the classic theme, which looks like `the Python 2
- documentation <https://docs.python.org/2/>`_. It can be customized via
- these options:
-
- - **rightsidebar** (true or false): Put the sidebar on the right side.
- Defaults to ``False``.
-
- - **stickysidebar** (true or false): Make the sidebar "fixed" so that it
- doesn't scroll out of view for long body content. This may not work well
- with all browsers. Defaults to ``False``.
-
- - **collapsiblesidebar** (true or false): Add an *experimental* JavaScript
- snippet that makes the sidebar collapsible via a button on its side.
- Defaults to ``False``.
-
- - **externalrefs** (true or false): Display external links differently from
- internal links. Defaults to ``False``.
-
- There are also various color and font options that can change the color scheme
- without having to write a custom stylesheet:
-
- - **footerbgcolor** (CSS color): Background color for the footer line.
- - **footertextcolor** (CSS color): Text color for the footer line.
- - **sidebarbgcolor** (CSS color): Background color for the sidebar.
- - **sidebarbtncolor** (CSS color): Background color for the sidebar collapse
- button (used when *collapsiblesidebar* is ``True``).
- - **sidebartextcolor** (CSS color): Text color for the sidebar.
- - **sidebarlinkcolor** (CSS color): Link color for the sidebar.
- - **relbarbgcolor** (CSS color): Background color for the relation bar.
- - **relbartextcolor** (CSS color): Text color for the relation bar.
- - **relbarlinkcolor** (CSS color): Link color for the relation bar.
- - **bgcolor** (CSS color): Body background color.
- - **textcolor** (CSS color): Body text color.
- - **linkcolor** (CSS color): Body link color.
- - **visitedlinkcolor** (CSS color): Body color for visited links.
- - **headbgcolor** (CSS color): Background color for headings.
- - **headtextcolor** (CSS color): Text color for headings.
- - **headlinkcolor** (CSS color): Link color for headings.
- - **codebgcolor** (CSS color): Background color for code blocks.
- - **codetextcolor** (CSS color): Default text color for code blocks, if not
- set differently by the highlighting style.
-
- - **bodyfont** (CSS font-family): Font for normal text.
- - **headfont** (CSS font-family): Font for headings.
-
-* **sphinxdoc** -- The theme originally used by this documentation. It features
- a sidebar on the right side. There are currently no options beyond
- *nosidebar* and *sidebarwidth*.
-
- .. note::
-
- The Sphinx documentation now uses
- `an adjusted version of the sphinxdoc theme
- <https://github.com/sphinx-doc/sphinx/tree/master/doc/_themes/sphinx13>`_.
-
-* **scrolls** -- A more lightweight theme, based on `the Jinja documentation
- <http://jinja.pocoo.org/>`_. The following color options are available:
-
- - **headerbordercolor**
- - **subheadlinecolor**
- - **linkcolor**
- - **visitedlinkcolor**
- - **admonitioncolor**
-
-* **agogo** -- A theme created by Andi Albrecht. The following options are
- supported:
-
- - **bodyfont** (CSS font family): Font for normal text.
- - **headerfont** (CSS font family): Font for headings.
- - **pagewidth** (CSS length): Width of the page content, default 70em.
- - **documentwidth** (CSS length): Width of the document (without sidebar),
- default 50em.
- - **sidebarwidth** (CSS length): Width of the sidebar, default 20em.
- - **bgcolor** (CSS color): Background color.
- - **headerbg** (CSS value for "background"): background for the header area,
- default a grayish gradient.
- - **footerbg** (CSS value for "background"): background for the footer area,
- default a light gray gradient.
- - **linkcolor** (CSS color): Body link color.
- - **headercolor1**, **headercolor2** (CSS color): colors for <h1> and <h2>
- headings.
- - **headerlinkcolor** (CSS color): Color for the backreference link in
- headings.
- - **textalign** (CSS *text-align* value): Text alignment for the body, default
- is ``justify``.
-
-* **nature** -- A greenish theme. There are currently no options beyond
- *nosidebar* and *sidebarwidth*.
-
-* **pyramid** -- A theme from the Pyramid web framework project, designed by
- Blaise Laflamme. There are currently no options beyond *nosidebar* and
- *sidebarwidth*.
-
-* **haiku** -- A theme without sidebar inspired by the `Haiku OS user guide
- <https://www.haiku-os.org/docs/userguide/en/contents.html>`_. The following
- options are supported:
-
- - **full_logo** (true or false, default ``False``): If this is true, the
- header will only show the :confval:`html_logo`. Use this for large logos.
- If this is false, the logo (if present) will be shown floating right, and
- the documentation title will be put in the header.
- - **textcolor**, **headingcolor**, **linkcolor**, **visitedlinkcolor**,
- **hoverlinkcolor** (CSS colors): Colors for various body elements.
-
-* **traditional** -- A theme resembling the old Python documentation. There are
- currently no options beyond *nosidebar* and *sidebarwidth*.
-
-* **epub** -- A theme for the epub builder. This theme tries to save visual
- space which is a sparse resource on ebook readers. The following options
- are supported:
-
- - **relbar1** (true or false, default ``True``): If this is true, the
- `relbar1` block is inserted in the epub output, otherwise it is omitted.
- - **footer** (true or false, default ``True``): If this is true, the
- `footer` block is inserted in the epub output, otherwise it is omitted.
-
-- **bizstyle** -- A simple bluish theme. The following options are supported
- beyond *nosidebar* and *sidebarwidth*:
-
- - **rightsidebar** (true or false): Put the sidebar on the right side.
- Defaults to ``False``.
-
-.. versionadded:: 1.3
- 'alabaster', 'sphinx_rtd_theme' and 'bizstyle' theme.
-
-.. versionchanged:: 1.3
- The 'default' theme has been renamed to 'classic'. 'default' is still
- available, however it will emit a notice that it is an alias for the new
- 'alabaster' theme.
-
Creating themes
---------------
-As said, themes are either a directory or a zipfile (whose name is the theme
-name), containing the following:
+Themes take the form of either a directory or a zipfile (whose name is the
+theme name), containing the following:
-* A :file:`theme.conf` file, see below.
+* A :file:`theme.conf` file.
* HTML templates, if needed.
* A ``static/`` directory containing any static files that will be copied to the
output static directory on build. These can be images, styles, script files.
@@ -295,7 +49,8 @@ Python :mod:`ConfigParser` module) and has the following structure:
* The **inherit** setting gives the name of a "base theme", or ``none``. The
base theme will be used to locate missing templates (most themes will not have
to supply most templates if they use ``basic`` as the base theme), its options
- will be inherited, and all of its static files will be used as well.
+ will be inherited, and all of its static files will be used as well. If you want
+ to also inherit the stylesheet, include it via CSS' ``@import`` in your own.
* The **stylesheet** setting gives the name of a CSS file which will be
referenced in the HTML header. If you need more than one CSS file, either
@@ -318,16 +73,17 @@ Python :mod:`ConfigParser` module) and has the following structure:
.. versionadded:: 1.7
sidebar settings
+
.. _distribute-your-theme:
-Distribute your theme as a python package
+Distribute your theme as a Python package
-----------------------------------------
-As a way to distribute your theme, you can use python package. Python package
+As a way to distribute your theme, you can use Python package. Python package
brings to users easy setting up ways.
-To distribute your theme as a python package, please define an entry point
-called ``sphinx.html_themes`` in your setup.py file, and write a ``setup()``
+To distribute your theme as a Python package, please define an entry point
+called ``sphinx.html_themes`` in your ``setup.py`` file, and write a ``setup()``
function to register your themes using ``add_html_theme()`` API in it::
# 'setup.py'
@@ -347,9 +103,8 @@ function to register your themes using ``add_html_theme()`` API in it::
def setup(app):
app.add_html_theme('name_of_theme', path.abspath(path.dirname(__file__)))
-
-If your theme package contains two or more themes, please call ``add_html_theme()``
-twice or more.
+If your theme package contains two or more themes, please call
+``add_html_theme()`` twice or more.
.. versionadded:: 1.2
'sphinx_themes' entry_points feature.
@@ -360,8 +115,9 @@ twice or more.
.. versionadded:: 1.6
``sphinx.html_themes`` entry_points feature.
+
Templating
-~~~~~~~~~~
+----------
The :doc:`guide to templating <templating>` is helpful if you want to write your
own templates. What is important to keep in mind is the order in which Sphinx
@@ -376,7 +132,6 @@ name as an explicit directory: ``{% extends "basic/layout.html" %}``. From a
user ``templates_path`` template, you can still use the "exclamation mark"
syntax as described in the templating document.
-
Static templates
~~~~~~~~~~~~~~~~
@@ -393,40 +148,6 @@ templating to put the color options into the stylesheet. When a documentation
is built with the classic theme, the output directory will contain a
``_static/classic.css`` file where all template tags have been processed.
-
.. [1] It is not an executable Python file, as opposed to :file:`conf.py`,
because that would pose an unnecessary security risk if themes are
shared.
-
-Third Party Themes
-------------------
-
-.. cssclass:: longtable
-
-+--------------------+--------------------+
-| **Theme overview** | |
-+--------------------+--------------------+
-| |sphinx_rtd_theme| | |
-| | |
-| *sphinx_rtd_theme* | |
-+--------------------+--------------------+
-
-.. |sphinx_rtd_theme| image:: themes/sphinx_rtd_theme.png
-
-* **sphinx_rtd_theme** -- `Read the Docs Sphinx Theme`_.
- This is a mobile-friendly sphinx theme that was made for readthedocs.org.
- View a working demo over on readthedocs.org. You can get install and options
- information at `Read the Docs Sphinx Theme`_ page.
-
- .. _Read the Docs Sphinx Theme: https://pypi.org/project/sphinx_rtd_theme/
-
- .. versionchanged:: 1.4
- **sphinx_rtd_theme** has become optional.
-
-
-Besides this, there are a lot of third party themes. You can find them on
-PyPI__, GitHub__, sphinx-themes.org__ and so on.
-
-.. __: https://pypi.org/search/?q=&o=&c=Framework+%3A%3A+Sphinx+%3A%3A+Theme
-.. __: https://github.com/search?utf8=%E2%9C%93&q=sphinx+theme&type=
-.. __: https://sphinx-themes.org/
diff --git a/doc/intl.rst b/doc/usage/advanced/intl.rst
index 129665dde..0174078eb 100644
--- a/doc/intl.rst
+++ b/doc/usage/advanced/intl.rst
@@ -9,7 +9,7 @@ Complementary to translations provided for Sphinx-generated messages such as
navigation bars, Sphinx provides mechanisms facilitating *document* translations
in itself. See the :ref:`intl-options` for details on configuration.
-.. figure:: translation.png
+.. figure:: /_static/translation.png
:width: 100%
Workflow visualization of translations in Sphinx. (The stick-figure is taken
@@ -46,14 +46,14 @@ They can be delivered to translators which will transform them to ``.po`` files
--- so called **message catalogs** --- containing a mapping from the original
messages to foreign-language strings.
-Gettext compiles them into a binary format known as **binary catalogs** through
-:program:`msgfmt` for efficiency reasons. If you make these files discoverable
-with :confval:`locale_dirs` for your :confval:`language`, Sphinx will pick them
-up automatically.
+*gettext* compiles them into a binary format known as **binary catalogs**
+through :program:`msgfmt` for efficiency reasons. If you make these files
+discoverable with :confval:`locale_dirs` for your :confval:`language`, Sphinx
+will pick them up automatically.
An example: you have a document ``usage.rst`` in your Sphinx project. The
-gettext builder will put its messages into ``usage.pot``. Imagine you have
-Spanish translations [2]_ on your hands in ``usage.po`` --- for your builds to
+*gettext* builder will put its messages into ``usage.pot``. Imagine you have
+Spanish translations [2]_ stored in ``usage.po`` --- for your builds to
be translated you need to follow these instructions:
* Compile your message catalog to a locale directory, say ``locale``, so it
@@ -63,7 +63,8 @@ be translated you need to follow these instructions:
msgfmt "usage.po" -o "locale/es/LC_MESSAGES/usage.mo"
* Set :confval:`locale_dirs` to ``["locale/"]``.
-* Set :confval:`language` to ``es`` (also possible via :option:`-D <sphinx-build -D>`).
+* Set :confval:`language` to ``es`` (also possible via
+ :option:`-D <sphinx-build -D>`).
* Run your desired build.
@@ -71,118 +72,124 @@ Translating with sphinx-intl
----------------------------
Quick guide
-^^^^^^^^^^^
+~~~~~~~~~~~
-`sphinx-intl`_ is a useful tool to work with Sphinx translation flow.
-This section describe an easy way to translate with sphinx-intl.
+`sphinx-intl`_ is a useful tool to work with Sphinx translation flow. This
+section describe an easy way to translate with *sphinx-intl*.
-#. Install `sphinx-intl`_ by :command:`pip install sphinx-intl` or
- :command:`easy_install sphinx-intl`.
+#. Install `sphinx-intl`_.
-#. Add configurations to your `conf.py`::
+ .. code-block:: console
+
+ $ pip install sphinx-intl
+
+#. Add configurations to ``conf.py``.
+
+ ::
locale_dirs = ['locale/'] # path is example but recommended.
gettext_compact = False # optional.
- This case-study assumes that :confval:`locale_dirs` is set to 'locale/' and
- :confval:`gettext_compact` is set to `False` (the Sphinx document is
+ This case-study assumes that :confval:`locale_dirs` is set to ``locale/`` and
+ :confval:`gettext_compact` is set to ``False`` (the Sphinx document is
already configured as such).
-#. Extract document's translatable messages into pot files:
+#. Extract translatable messages into pot files.
.. code-block:: console
$ make gettext
- As a result, many pot files are generated under ``_build/gettext``
- directory.
+ The generated pot files will be placed in the ``_build/gettext`` directory.
-#. Setup/Update your `locale_dir`:
+#. Generate po files.
+
+ We'll use the pot files generated in the above step.
.. code-block:: console
$ sphinx-intl update -p _build/gettext -l de -l ja
- Done. You got these directories that contain po files:
+ Once completed, the generated po files will be placed in the below
+ directories:
+
+ * ``./locale/de/LC_MESSAGES/``
+ * ``./locale/ja/LC_MESSAGES/``
+
+#. Translate po files.
+
+ AS noted above, these are located in the ``./locale/<lang>/LC_MESSAGES``
+ directory. An example of one such file, from Sphinx, ``builders.po``, is
+ given below.
- * `./locale/de/LC_MESSAGES/`
- * `./locale/ja/LC_MESSAGES/`
+ .. code-block:: po
-#. Translate your po files under `./locale/<lang>/LC_MESSAGES/`.
+ # a5600c3d2e3d48fc8c261ea0284db79b
+ #: ../../builders.rst:4
+ msgid "Available builders"
+ msgstr "<FILL HERE BY TARGET LANGUAGE>"
-#. make translated document.
+ Another case, msgid is multi-line text and contains reStructuredText syntax:
+
+ .. code-block:: po
+
+ # 302558364e1d41c69b3277277e34b184
+ #: ../../builders.rst:9
+ msgid ""
+ "These are the built-in Sphinx builders. More builders can be added by "
+ ":ref:`extensions <extensions>`."
+ msgstr ""
+ "FILL HERE BY TARGET LANGUAGE FILL HERE BY TARGET LANGUAGE FILL HERE "
+ "BY TARGET LANGUAGE :ref:`EXTENSIONS <extensions>` FILL HERE."
+
+ Please be careful not to break reST notation. Most po-editors will help you
+ with that.
+
+#. Build translated document.
You need a :confval:`language` parameter in ``conf.py`` or you may also
- specify the parameter on the command line (for BSD/GNU make):
+ specify the parameter on the command line.
+
+ For for BSD/GNU make, run:
.. code-block:: console
$ make -e SPHINXOPTS="-D language='de'" html
- command line (for Windows cmd.exe):
+ For Windows :command:`cmd.exe`, run:
.. code-block:: console
> set SPHINXOPTS=-D language=de
> .\make.bat html
- command line (for PowerShell):
+ For PowerShell, run:
.. code-block:: console
> Set-Item env:SPHINXOPTS "-D language=de"
> .\make.bat html
-
Congratulations! You got the translated documentation in the ``_build/html``
directory.
.. versionadded:: 1.3
- sphinx-build that is invoked by make command will build po files into mo files.
-
- If you are using 1.2.x or earlier, please invoke ``sphinx-intl build`` command
- before make command.
+ :program:`sphinx-build` that is invoked by make command will build po files
+ into mo files.
+ If you are using 1.2.x or earlier, please invoke :command:`sphinx-intl build`
+ command before :command:`make` command.
Translating
-^^^^^^^^^^^
-
-Translate po file under ``./locale/de/LC_MESSAGES`` directory.
-The case of builders.po file for sphinx document:
-
-.. code-block:: po
-
- # a5600c3d2e3d48fc8c261ea0284db79b
- #: ../../builders.rst:4
- msgid "Available builders"
- msgstr "<FILL HERE BY TARGET LANGUAGE>"
-
-Another case, msgid is multi-line text and contains reStructuredText
-syntax:
-
-.. code-block:: po
-
- # 302558364e1d41c69b3277277e34b184
- #: ../../builders.rst:9
- msgid ""
- "These are the built-in Sphinx builders. More builders can be added by "
- ":ref:`extensions <extensions>`."
- msgstr ""
- "FILL HERE BY TARGET LANGUAGE FILL HERE BY TARGET LANGUAGE FILL HERE "
- "BY TARGET LANGUAGE :ref:`EXTENSIONS <extensions>` FILL HERE."
-
-Please be careful not to break reST notation. Most po-editors will help you
-with that.
-
+~~~~~~~~~~~
Update your po files by new pot files
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-If a document is updated, it is necessary to generate updated pot files
-and to apply differences to translated po files.
-In order to apply the updating difference of a pot file to po file,
-use the :command:`sphinx-intl update` command.
+If a document is updated, it is necessary to generate updated pot files and to
+apply differences to translated po files. In order to apply the updates from a
+pot file to the po file, use the :command:`sphinx-intl update` command.
.. code-block:: console
@@ -199,7 +206,7 @@ easy to fetch and push translations.
.. TODO: why use transifex?
-#. Install `transifex-client`_
+#. Install `transifex-client`_.
You need :command:`tx` command to upload resources (pot files).
@@ -209,8 +216,7 @@ easy to fetch and push translations.
.. seealso:: `Transifex Client documentation`_
-
-#. Create your transifex_ account and create new project for your document
+#. Create your transifex_ account and create new project for your document.
Currently, transifex does not allow for a translation project to have more
than one version of the document, so you'd better include a version number in
@@ -221,8 +227,7 @@ easy to fetch and push translations.
:Project ID: ``sphinx-document-test_1_0``
:Project URL: ``https://www.transifex.com/projects/p/sphinx-document-test_1_0/``
-
-#. Create config files for tx command
+#. Create config files for :command:`tx` command.
This process will create ``.tx/config`` in the current directory, as well as
a ``~/.transifexrc`` file that includes auth information.
@@ -238,7 +243,7 @@ easy to fetch and push translations.
...
Done.
-#. Upload pot files to transifex service
+#. Upload pot files to transifex service.
Register pot files to ``.tx/config`` file:
@@ -259,15 +264,14 @@ easy to fetch and push translations.
...
Done.
-
-#. Forward the translation on transifex
+#. Forward the translation on transifex.
.. TODO: write this section
+#. Pull translated po files and make translated HTML.
-#. Pull translated po files and make translated html
-
- Get translated catalogs and build mo files (ex. for 'de'):
+ Get translated catalogs and build mo files. For example, to build mo files
+ for German (de):
.. code-block:: console
@@ -278,32 +282,29 @@ easy to fetch and push translations.
...
Done.
- Invoke make html (for BSD/GNU make):
+ Invoke :command:`make html` (for BSD/GNU make):
.. code-block:: console
$ make -e SPHINXOPTS="-D language='de'" html
-
That's all!
-
.. tip:: Translating locally and on Transifex
If you want to push all language's po files, you can be done by using
- :command:`tx push -t` command.
- Watch out! This operation overwrites translations in transifex.
+ :command:`tx push -t` command. Watch out! This operation overwrites
+ translations in transifex.
In other words, if you have updated each in the service and local po files,
it would take much time and effort to integrate them.
-
Contributing to Sphinx reference translation
--------------------------------------------
-The recommended way for new contributors to translate Sphinx reference
-is to join the translation team on Transifex.
+The recommended way for new contributors to translate Sphinx reference is to
+join the translation team on Transifex.
There is `sphinx translation page`_ for Sphinx (master) documentation.
@@ -311,8 +312,7 @@ There is `sphinx translation page`_ for Sphinx (master) documentation.
2. Go to `sphinx translation page`_.
3. Click ``Request language`` and fill form.
4. Wait acceptance by transifex sphinx translation maintainers.
-5. (after acceptance) translate on transifex.
-
+5. (After acceptance) Translate on transifex.
.. rubric:: Footnotes
@@ -321,9 +321,8 @@ There is `sphinx translation page`_ for Sphinx (master) documentation.
for details on that software suite.
.. [2] Because nobody expects the Spanish Inquisition!
-
.. _`transifex-client`: https://pypi.org/project/transifex-client/
.. _`sphinx-intl`: https://pypi.org/project/sphinx-intl/
.. _Transifex: https://www.transifex.com/
.. _`sphinx translation page`: https://www.transifex.com/sphinx-doc/sphinx-doc/
-.. _`Transifex Client documentation`: http://docs.transifex.com/developer/client/
+.. _`Transifex Client documentation`: https://docs.transifex.com/client/introduction/
diff --git a/doc/setuptools.rst b/doc/usage/advanced/setuptools.rst
index 10cc6a77d..10cc6a77d 100644
--- a/doc/setuptools.rst
+++ b/doc/usage/advanced/setuptools.rst
diff --git a/doc/web/api.rst b/doc/usage/advanced/websupport/api.rst
index 81d25b79f..79b51ee6d 100644
--- a/doc/web/api.rst
+++ b/doc/usage/advanced/websupport/api.rst
@@ -65,7 +65,7 @@ The WebSupport Class
Methods
-~~~~~~~
+-------
.. automethod:: sphinxcontrib.websupport.WebSupport.build
diff --git a/doc/websupport.rst b/doc/usage/advanced/websupport/index.rst
index 3ccae2467..081664051 100644
--- a/doc/websupport.rst
+++ b/doc/usage/advanced/websupport/index.rst
@@ -10,7 +10,7 @@ web application. To learn more read the :ref:`websupportquickstart`.
.. toctree::
- web/quickstart
- web/api
- web/searchadapters
- web/storagebackends
+ quickstart
+ api
+ searchadapters
+ storagebackends
diff --git a/doc/web/quickstart.rst b/doc/usage/advanced/websupport/quickstart.rst
index 0ef735ca9..de7692231 100644
--- a/doc/web/quickstart.rst
+++ b/doc/usage/advanced/websupport/quickstart.rst
@@ -4,7 +4,7 @@ Web Support Quick Start
=======================
Building Documentation Data
-~~~~~~~~~~~~~~~~~~~~~~~~~~~
+----------------------------
To make use of the web support package in your application you'll need to build
the data it uses. This data includes pickle files representing documents,
@@ -20,11 +20,11 @@ things are in a document. To do this you will need to create an instance of the
support.build()
-This will read reStructuredText sources from `srcdir` and place the necessary
-data in `builddir`. The `builddir` will contain two sub-directories: one named
-"data" that contains all the data needed to display documents, search through
-documents, and add comments to documents. The other directory will be called
-"static" and contains static files that should be served from "/static".
+This will read reStructuredText sources from ``srcdir`` and place the necessary
+data in ``builddir``. The ``builddir`` will contain two sub-directories: one
+named "data" that contains all the data needed to display documents, search
+through documents, and add comments to documents. The other directory will be
+called "static" and contains static files that should be served from "/static".
.. note::
@@ -34,7 +34,7 @@ documents, and add comments to documents. The other directory will be called
Integrating Sphinx Documents Into Your Webapp
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+----------------------------------------------
Now that the data is built, it's time to do something useful with it. Start off
by creating a :class:`~.WebSupport` object for your application::
@@ -96,7 +96,7 @@ integrate with your existing templating system. An example using `Jinja2
Authentication
---------------
+~~~~~~~~~~~~~~
To use certain features such as voting, it must be possible to authenticate
users. The details of the authentication are left to your application. Once a
@@ -146,13 +146,14 @@ add this data to the ``COMMENT_OPTIONS`` that are used in the template.
Performing Searches
-~~~~~~~~~~~~~~~~~~~
+-------------------
To use the search form built-in to the Sphinx sidebar, create a function to
-handle requests to the url 'search' relative to the documentation root. The
+handle requests to the URL 'search' relative to the documentation root. The
user's search query will be in the GET parameters, with the key `q`. Then use
-the :meth:`~sphinxcontrib.websupport.WebSupport.get_search_results` method to retrieve
-search results. In `Flask <http://flask.pocoo.org/>`_ that would be like this::
+the :meth:`~sphinxcontrib.websupport.WebSupport.get_search_results` method to
+retrieve search results. In `Flask <http://flask.pocoo.org/>`_ that would be
+like this::
@app.route('/search')
def search():
@@ -167,7 +168,7 @@ does.
Comments & Proposals
-~~~~~~~~~~~~~~~~~~~~
+--------------------
Now that this is done it's time to define the functions that handle the AJAX
calls from the script. You will need three functions. The first function is
@@ -186,9 +187,9 @@ used to add a new comment, and will call the web support method
username=username, proposal=proposal)
return jsonify(comment=comment)
-You'll notice that both a `parent_id` and `node_id` are sent with the
-request. If the comment is being attached directly to a node, `parent_id`
-will be empty. If the comment is a child of another comment, then `node_id`
+You'll notice that both a ``parent_id`` and ``node_id`` are sent with the
+request. If the comment is being attached directly to a node, ``parent_id``
+will be empty. If the comment is a child of another comment, then ``node_id``
will be empty. Then next function handles the retrieval of comments for a
specific node, and is aptly named
:meth:`~sphinxcontrib.websupport.WebSupport.get_data`::
@@ -217,11 +218,11 @@ and will handle user votes on comments::
Comment Moderation
-~~~~~~~~~~~~~~~~~~
+------------------
By default, all comments added through :meth:`~.WebSupport.add_comment` are
automatically displayed. If you wish to have some form of moderation, you can
-pass the `displayed` keyword argument::
+pass the ``displayed`` keyword argument::
comment = support.add_comment(text, node_id='node_id',
parent_id='parent_id',
diff --git a/doc/web/searchadapters.rst b/doc/usage/advanced/websupport/searchadapters.rst
index e44584797..262d66692 100644
--- a/doc/web/searchadapters.rst
+++ b/doc/usage/advanced/websupport/searchadapters.rst
@@ -26,13 +26,13 @@ documentation of the :class:`BaseSearch` class below.
BaseSearch class is moved to sphinxcontrib.websupport.search from
sphinx.websupport.search.
-BaseSearch Methods
-~~~~~~~~~~~~~~~~~~
+Methods
+-------
- The following methods are defined in the BaseSearch class. Some methods do
- not need to be overridden, but some (:meth:`~BaseSearch.add_document` and
- :meth:`~BaseSearch.handle_query`) must be overridden in your subclass. For a
- working example, look at the built-in adapter for whoosh.
+The following methods are defined in the BaseSearch class. Some methods do not
+need to be overridden, but some (:meth:`~BaseSearch.add_document` and
+:meth:`~BaseSearch.handle_query`) must be overridden in your subclass. For a
+working example, look at the built-in adapter for whoosh.
.. automethod:: BaseSearch.init_indexing
diff --git a/doc/web/storagebackends.rst b/doc/usage/advanced/websupport/storagebackends.rst
index 1690a1420..ccb00b63c 100644
--- a/doc/web/storagebackends.rst
+++ b/doc/usage/advanced/websupport/storagebackends.rst
@@ -27,8 +27,8 @@ documentation of the :class:`StorageBackend` class below.
sphinx.websupport.storage.
-StorageBackend Methods
-~~~~~~~~~~~~~~~~~~~~~~
+Methods
+-------
.. automethod:: StorageBackend.pre_build
diff --git a/doc/usage/builders/index.rst b/doc/usage/builders/index.rst
index bb107162c..b9e107699 100644
--- a/doc/usage/builders/index.rst
+++ b/doc/usage/builders/index.rst
@@ -158,23 +158,41 @@ The builder's "name" must be given to the **-b** command-line option of
chapter :ref:`latex-options` for details.
The produced LaTeX file uses several LaTeX packages that may not be present
- in a "minimal" TeX distribution installation. For example, on Ubuntu, the
- following packages need to be installed for successful PDF builds:
+ in a "minimal" TeX distribution installation.
+
+ On Ubuntu xenial, the following packages need to be installed for
+ successful PDF builds:
* ``texlive-latex-recommended``
* ``texlive-fonts-recommended``
* ``texlive-latex-extra``
- * ``latexmk`` (for ``make latexpdf`` on GNU/Linux and MacOS X)
- * ``latex-xcolor`` (old Ubuntu)
- * ``texlive-luatex``, ``texlive-xetex`` (see :confval:`latex_engine`)
-
- The testing of Sphinx LaTeX is done on Ubuntu trusty with the above
- mentioned packages, which are from a TeXLive 2013 snapshot dated
- February 2014.
+ * ``latexmk`` (this is a Sphinx requirement on GNU/Linux and MacOS X
+ for functioning of ``make latexpdf``)
+
+ Additional packages are needed in some circumstances (see the discussion of
+ the ``'fontpkg'`` key of :confval:`latex_elements` for more information):
+
+ * to support occasional Cyrillic letters or words, and a fortiori if
+ :confval:`language` is set to a Cyrillic language, the package
+ ``texlive-lang-cyrillic`` is required, and, with unmodified ``'fontpkg'``,
+ also ``cm-super`` or ``cm-super-minimal``,
+ * to support occasional Greek letters or words (in text, not in
+ :rst:dir:`math` directive contents), ``texlive-lang-greek`` is required,
+ and, with unmodified ``'fontpkg'``, also ``cm-super`` or
+ ``cm-super-minimal``,
+ * for ``'xelatex'`` or ``'lualatex'`` (see :confval:`latex_engine`),
+ ``texlive-xetex`` resp. ``texlive-luatex``, and, if leaving unchanged
+ ``'fontpkg'``, ``fonts-freefont-otf``.
+
+ The testing of Sphinx LaTeX is done on Ubuntu xenial whose TeX distribution
+ is based on a TeXLive 2015 snapshot dated March 2016.
.. versionchanged:: 1.6
Formerly, testing had been done on Ubuntu precise (TeXLive 2009).
+ .. versionchanged:: 2.0
+ Formerly, testing had been done on Ubuntu trusty (TeXLive 2013).
+
.. note::
Since 1.6, ``make latexpdf`` uses ``latexmk`` (not on Windows). This
@@ -190,20 +208,16 @@ The builder's "name" must be given to the **-b** command-line option of
reduces console output to a minimum.
- Also, if ``latexmk`` version is 4.52b or higher (Jan 17)
- ``LATEXMKOPTS="-xelatex"`` will speed up PDF builds via XeLateX in case
+ Also, if ``latexmk`` is at version 4.52b or higher (January 2017)
+ ``LATEXMKOPTS="-xelatex"`` speeds up PDF builds via XeLateX in case
of numerous graphics inclusions.
- .. code-block:: console
-
- make latexpdf LATEXMKOPTS="-xelatex"
-
- To pass options directly to the ``(pdf|xe|lua)latex`` executable, use
- variable ``LATEXOPTS``.
+ To pass options directly to the ``(pdf|xe|lua)latex`` binary, use
+ variable ``LATEXOPTS``, for example:
.. code-block:: console
- make latexpdf LATEXOPTS="--interaction=nonstopmode"
+ make latexpdf LATEXOPTS="--halt-on-error"
.. autoattribute:: name
@@ -215,7 +229,7 @@ Note that a direct PDF builder is being provided by `rinohtype`_. The builder's
name is ``rinoh``. Refer to the `rinohtype manual`_ for details.
.. _rinohtype: https://github.com/brechtm/rinohtype
-.. _rinohtype manual: http://www.mos6581.org/rinohtype/quickstart.html#sphinx-builder
+.. _rinohtype manual: https://www.mos6581.org/rinohtype/quickstart.html#sphinx-builder
.. module:: sphinx.builders.text
.. class:: TextBuilder
diff --git a/doc/usage/configuration.rst b/doc/usage/configuration.rst
index bef43c81a..756e06cb9 100644
--- a/doc/usage/configuration.rst
+++ b/doc/usage/configuration.rst
@@ -40,9 +40,7 @@ Important points to note:
contain the file name extension.
* Since :file:`conf.py` is read as a Python file, the usual rules apply for
- encodings and Unicode support: declare the encoding using an encoding cookie
- (a comment like ``# -*- coding: utf-8 -*-``) and use Unicode string literals
- when you include non-ASCII characters in configuration values.
+ encodings and Unicode support.
* The contents of the config namespace are pickled (so that Sphinx can find out
when configuration changes), so it may not contain unpickleable values --
@@ -60,6 +58,36 @@ Important points to note:
created *after* the builder is initialized.
+Project information
+-------------------
+
+.. confval:: project
+
+ The documented project's name.
+
+.. confval:: author
+
+ The author name(s) of the document. The default value is ``'unknown'``.
+
+.. confval:: copyright
+
+ A copyright statement in the style ``'2008, Author Name'``.
+
+.. confval:: version
+
+ The major project version, used as the replacement for ``|version|``. For
+ example, for the Python documentation, this may be something like ``2.6``.
+
+.. confval:: release
+
+ The full project version, used as the replacement for ``|release|`` and
+ e.g. in the HTML templates. For example, for the Python documentation, this
+ may be something like ``2.6.0rc1``.
+
+ If you don't need the separation provided between :confval:`version` and
+ :confval:`release`, just set them both to the same value.
+
+
General configuration
---------------------
@@ -150,7 +178,10 @@ General configuration
.. confval:: master_doc
The document name of the "master" document, that is, the document that
- contains the root :rst:dir:`toctree` directive. Default is ``'contents'``.
+ contains the root :rst:dir:`toctree` directive. Default is ``'index'``.
+
+ .. versionchanged:: 2.0
+ The default is changed to ``'index'`` from ``'contents'``.
.. confval:: exclude_patterns
@@ -478,36 +509,6 @@ General configuration
.. versionadded:: 1.5
-
-Project information
--------------------
-
-.. confval:: project
-
- The documented project's name.
-
-.. confval:: author
-
- The author name(s) of the document. The default value is ``'unknown'``.
-
-.. confval:: copyright
-
- A copyright statement in the style ``'2008, Author Name'``.
-
-.. confval:: version
-
- The major project version, used as the replacement for ``|version|``. For
- example, for the Python documentation, this may be something like ``2.6``.
-
-.. confval:: release
-
- The full project version, used as the replacement for ``|release|`` and
- e.g. in the HTML templates. For example, for the Python documentation, this
- may be something like ``2.6.0rc1``.
-
- If you don't need the separation provided between :confval:`version` and
- :confval:`release`, just set them both to the same value.
-
.. confval:: today
today_fmt
@@ -814,7 +815,7 @@ that use Sphinx's HTMLWriter class.
.. confval:: html_theme
The "theme" that the HTML output should use. See the :doc:`section about
- theming </theming>`. The default is ``'alabaster'``.
+ theming </usage/theming>`. The default is ``'alabaster'``.
.. versionadded:: 0.6
@@ -1109,12 +1110,6 @@ that use Sphinx's HTMLWriter class.
If true, the reST sources are included in the HTML build as
:file:`_sources/{name}`. The default is ``True``.
- .. warning::
-
- If this config value is set to ``False``, the JavaScript search function
- will only display the titles of matching documents, and no excerpt from
- the matching contents.
-
.. confval:: html_show_sourcelink
If true (and :confval:`html_copy_source` is true as well), links to the
@@ -1252,7 +1247,7 @@ that use Sphinx's HTMLWriter class.
:'sphinx.search.ja.DefaultSplitter':
TinySegmenter algorithm. This is default splitter.
- :'sphinx.search.ja.MeCabSplitter':
+ :'sphinx.search.ja.MecabSplitter':
MeCab binding. To use this splitter, 'mecab' python binding or dynamic
link library ('libmecab.so' for linux, 'libmecab.dll' for windows) is
required.
@@ -1354,6 +1349,19 @@ Options for HTML help output
Output file base name for HTML help builder. Default is ``'pydoc'``.
+.. confval:: htmlhelp_file_suffix
+
+ This is the file name suffix for generated HTML help files. The
+ default is ``".html"``.
+
+ .. versionadded:: 2.0
+
+.. confval:: htmlhelp_link_suffix
+
+ Suffix for generated links to HTML files. The default is ``".html"``.
+
+ .. versionadded:: 2.0
+
.. _applehelp-options:
@@ -1787,42 +1795,29 @@ information.
* ``'lualatex'`` -- LuaLaTeX
* ``'platex'`` -- pLaTeX (default if :confval:`language` is ``'ja'``)
- PDFLaTeX's support for Unicode characters covers those from the document
- language (the LaTeX ``babel`` and ``inputenc`` packages map them to glyph
- slots in the document font, at various encodings allowing each only 256
- characters; Sphinx uses by default (except for Cyrillic languages) the
- ``times`` package), but stray characters from other scripts or special
- symbols may require adding extra LaTeX packages or macros to the LaTeX
- preamble.
-
- If your project uses such extra Unicode characters, switching the engine to
- XeLaTeX or LuaLaTeX and setting up the document to use an OpenType font
- with wide-enough glyph coverage is often easier than sticking with PDFLaTeX
- and trying to get it to work with the Unicode characters.
-
- The :confval:`latex_elements` ``'fontpkg'`` key allows to set up the
- document fonts, see :ref:`this example <latex-basic>`. Currently, for
- XeLaTeX and LuaLaTeX, Sphinx leaves this key empty and LaTeX then defaults
- to the `Latin Modern`_ font family (from the TeX distribution fonts). This
- font family provides good coverage of Latin scripts (European languages,
- Vietnamese) but Cyrillic requires some other OpenType font; for example
- Computer Modern Unicode (see `babel-russian`_ documentation on how to load
- it in the LaTeX document). In future, it is planned Sphinx will provide
- another default choice of OpenType font than `Latin Modern`_, perhaps
- `Libertinus`_, which is included in recent TeX distributions and supports
- Latin and Cyrillic and also has an accompanying math font.
-
- With XeLaTeX and LuaLaTeX, Sphinx configures the LaTeX document to use
- `polyglossia`_. For some languages the `babel`_ support appears
- preferable; Sphinx uses currently `babel`_ for French and perhaps will also
- for some more languages in future. One can use the
- :confval:`latex_elements` ``'babel'`` key to override Sphinx's default.
-
- .. _`Latin Modern`: http://www.gust.org.pl/projects/e-foundry/latin-modern
- .. _`polyglossia`: https://ctan.org/pkg/polyglossia
- .. _`babel`: https://ctan.org/pkg/babel
- .. _`babel-russian`: https://ctan.org/pkg/babel-russian
- .. _`Libertinus`: https://ctan.org/pkg/libertinus
+ ``'pdflatex'``\ 's support for Unicode characters is limited.
+
+ .. note::
+
+ 2.0 adds to ``'pdflatex'`` support in Latin language document of
+ occasional Cyrillic or Greek letters or words. This is not automatic,
+ see the discussion of the :confval:`latex_elements` ``'fontenc'`` key.
+
+ If your project uses Unicode characters, setting the engine to
+ ``'xelatex'`` or ``'lualatex'`` and making sure to use an OpenType font
+ with wide-enough glyph coverage is often easier than trying to make
+ ``'pdflatex'`` work with the extra Unicode characters. Since Sphinx 2.0
+ the default is the GNU FreeFont which covers well Latin, Cyrillic and Greek.
+
+ Contrarily to :ref:`MathJaX math rendering in HTML output <math-support>`,
+ LaTeX requires some extra configuration to support Unicode literals in
+ :rst:dir:`math`: the only comprehensive solution (as far as we know) is to
+ use ``'xelatex'`` or ``'lualatex'`` *and* to add
+ ``r'\usepackage{unicode-math}'`` (e.g. via the :confval:`latex_elements`
+ ``'preamble'`` key). You may prefer
+ ``r'\usepackage[math-style=literal]{unicode-math}'`` to keep a Unicode
+ literal such as ``α`` (U+03B1) for example as is in output, rather than
+ being rendered as :math:`\alpha`.
.. confval:: latex_documents
@@ -2024,6 +2019,20 @@ information.
``english`` is used if no language.) For Japanese documents, the
default is the empty string.
+ With XeLaTeX and LuaLaTeX, Sphinx configures the LaTeX document to use
+ `polyglossia`_, but one should be aware that current `babel`_ has
+ improved its support for Unicode engines in recent years and for some
+ languages it may make sense to prefer ``babel`` over ``polyglossia``.
+
+ .. hint::
+
+ After modifiying a core LaTeX key like this one, clean up the LaTeX
+ build repertory before next PDF build, else left-over auxiliary
+ files are likely to break the build.
+
+ .. _`polyglossia`: https://ctan.org/pkg/polyglossia
+ .. _`babel`: https://ctan.org/pkg/babel
+
.. versionchanged:: 1.5
For :confval:`latex_engine` set to ``'xelatex'``, the default
is ``'\\usepackage{polyglossia}\n\\setmainlanguage{<language>}'``.
@@ -2034,17 +2043,58 @@ information.
``babel``, not ``polyglossia``.
``'fontpkg'``
- Font package inclusion, default ``'\\usepackage{times}'`` (which uses
- Times for text, Helvetica for sans serif and Courier for code-blocks).
+ Font package inclusion, the default is ``'\\usepackage{times}'`` which
+ uses Times for text, Helvetica for sans serif and Courier for monospace.
.. versionchanged:: 1.2
Defaults to ``''`` when the :confval:`language` uses the Cyrillic
script.
- .. versionchanged:: 1.5
- Defaults to ``''`` when :confval:`latex_engine` is ``'xelatex'``.
- .. versionchanged:: 1.6
- Defaults to ``''`` also with ``'lualatex'``.
-
+ .. versionchanged:: 2.0
+ Support for individual Greek and Cyrillic letters:
+
+ - In order to support occasional Cyrillic (физика частиц)
+ or Greek letters (Σωματιδιακή φυσική) in
+ a document whose language is English or a Latin European
+ one, the default set-up is enhanced (only for ``'pdflatex'``
+ engine) to do:
+
+ .. code-block:: latex
+
+ \substitutefont{LGR}{\rmdefault}{cmr}
+ \substitutefont{LGR}{\sfdefault}{cmss}
+ \substitutefont{LGR}{\ttdefault}{cmtt}
+ \substitutefont{X2}{\rmdefault}{cmr}
+ \substitutefont{X2}{\sfdefault}{cmss}
+ \substitutefont{X2}{\ttdefault}{cmtt}
+
+ but this is activated only under the condition that the
+ ``'fontenc'`` key is configured to load the ``LGR`` (Greek)
+ and/or ``X2`` (Cyrillic) pdflatex-font encodings (if the
+ :confval:`language` is set to a Cyrillic language, this
+ ``'fontpkg'`` key must be used as "times" package has no direct
+ support for it; then keep only ``LGR`` lines from the above,
+ if support is needed for Greek in the text).
+
+ The ``\substitutefont`` command is from the eponymous LaTeX
+ package, which is loaded by Sphinx if needed (on Ubuntu xenial it
+ is part of ``texlive-latex-extra`` which is a Sphinx
+ requirement).
+
+ Only if the document actually does contain Unicode Greek letters
+ (in text) or Cyrillic letters, will the above default set-up
+ cause additional requirements for the PDF build. On Ubuntu
+ xenial, ``texlive-lang-greek``, ``texlive-lang-cyrillic``, and
+ (with the above choice of fonts) the ``cm-super`` (or
+ ``cm-super-minimal``) package.
+
+ - For ``'xelatex'`` and ``'lualatex'``, the default is to
+ use the FreeFont family: this OpenType font family
+ supports both Cyrillic and Greek scripts and is available as
+ separate Ubuntu xenial package ``fonts-freefont-otf``, it is not
+ needed to install the big package ``texlive-fonts-extra``.
+
+ - ``'platex'`` (Japanese documents) engine supports individual
+ Cyrillic and Greek letters with no need of extra user set-up.
``'fncychap'``
Inclusion of the "fncychap" package (which makes fancy chapter titles),
default ``'\\usepackage[Bjarne]{fncychap}'`` for English documentation
@@ -2054,6 +2104,8 @@ information.
"fncychap" styles you can try are "Lenny", "Glenn", "Conny", "Rejne" and
"Bjornstrup". You can also set this to ``''`` to disable fncychap.
+ The default is ``''`` for Japanese documents.
+
``'preamble'``
Additional preamble content, default empty. See :doc:`/latex`.
@@ -2121,14 +2173,69 @@ information.
.. versionadded:: 1.2
``'fontenc'``
- "fontenc" package inclusion, default ``'\\usepackage[T1]{fontenc}'``.
+ "fontenc" package inclusion, defaults to
+ ``'\\usepackage[T1]{fontenc}'``.
+
+ If ``'pdflatex'`` is the :confval:`latex_engine`, one can add ``LGR``
+ for support of Greek letters in the document, and also ``X2`` (or
+ ``T2A``) for Cyrillic letters, like this:
+
+ .. code-block:: latex
+
+ r'\usepackage[LGR,X2,T1]{fontenc}'
+
+ .. attention::
+
+ Prior to 2.0, Unicode Greek letters were escaped to use LaTeX math
+ mark-up. This is not the case anymore, and the above must be used
+ (only in case of ``'pdflatex'`` engine) if the source contains such
+ Unicode Greek.
+
+ On Ubuntu xenial, packages ``texlive-lang-greek`` and ``cm-super``
+ (for the latter, only if the ``'fontpkg'`` setting is left to its
+ default) are needed for ``LGR`` to work. In place of ``cm-super``
+ one can install smaller ``cm-super-minimal``, but it requires the
+ LaTeX document to execute ``\usepackage[10pt]{type1ec}`` before
+ loading ``fontenc``. Thus, use this key with this extra at its
+ start if needed.
.. versionchanged:: 1.5
Defaults to ``'\\usepackage{fontspec}'`` when
:confval:`latex_engine` is ``'xelatex'``.
.. versionchanged:: 1.6
- ``'lualatex'`` also uses ``fontspec`` per default.
+ ``'lualatex'`` uses ``fontspec`` per default like ``'xelatex'``.
+ .. versionchanged:: 2.0
+ ``'lualatex'`` executes
+ ``\defaultfontfeatures[\rmfamily,\sffamily]{}`` to disable TeX
+ ligatures.
+ .. versionchanged:: 2.0
+ Detection of ``LGR``, ``T2A``, ``X2`` to trigger support of
+ occasional Greek or Cyrillic (``'pdflatex'`` only, as this support
+ is provided natively by ``'platex'`` and only requires suitable
+ font with ``'xelatex'/'lualatex'``).
+
+ ``'textgreek'``
+ The default (``'pdflatex'`` only) is
+ ``'\\usepackage{textalpha}'``, but only if ``'fontenc'`` was
+ modified by user to include ``LGR`` option. If not, the key
+ value will be forced to be the empty string.
+
+ This is needed for ``pdfLaTeX`` to support Unicode input of Greek
+ letters such as φύσις. Expert users may want to load the ``textalpha``
+ package with its option ``normalize-symbols``.
+
+ .. hint::
+
+ Unicode Greek (but no further Unicode symbols) in :rst:dir:`math`
+ can be supported by ``'pdflatex'`` from setting this key to
+ ``r'\usepackage{textalpha,alphabeta}'``. Then ``:math:`α``` (U+03B1)
+ will render as :math:`\alpha`. For wider Unicode support in math
+ input, see the discussion of :confval:`latex_engine`.
+
+ With ``'platex'`` (Japanese), ``'xelatex'`` or ``'lualatex'``, this
+ key is ignored.
+ .. versionadded:: 2.0
``'geometry'``
"geometry" package inclusion, the default definition is:
@@ -2224,20 +2331,16 @@ information.
index is full of long entries.
``'fvset'``
- Customization of ``fancyvrb`` LaTeX package. Currently, Sphinx uses
- this key to set the fontsize in code-blocks according to the
- :confval:`latex_engine`.
-
- - ``'pdflatex'`` uses ``'fvset': '\\fvset{fontsize=\\small}'``,
- to mitigate the size difference between the default monospaced font
- (Courier) and the default text font (Times). You may need to modify
- this if you use custom fonts.
-
- - ``'xelatex'`` and ``'lualatex'`` use ``'\\fvset{fontsize=auto}'``,
- as there is no size difference between the regular and the
- monospaced fonts used by default by Sphinx with these engines.
+ Customization of ``fancyvrb`` LaTeX package. Sphinx does by default
+ ``'fvset': '\\fvset{fontsize=\\small}'``, to adjust for the large
+ character width of the monospace font, used in code-blocks.
+ You may need to modify this if you use custom fonts.
.. versionadded:: 1.8
+ .. versionchanged:: 2.0
+ Due to new default font choice for ``'xelatex'`` and ``'lualatex'``
+ (FreeFont), Sphinx does ``\\fvset{fontsize=\\small}`` also with these
+ engines (and not ``\\fvset{fontsize=auto}``).
* Keys that are set by other options and therefore should not be overridden
are:
@@ -2245,10 +2348,8 @@ information.
``'docclass'``
``'classoptions'``
``'title'``
- ``'date'``
``'release'``
``'author'``
- ``'logo'``
``'makeindex'``
.. confval:: latex_docclass
diff --git a/doc/usage/extensions/autodoc.rst b/doc/usage/extensions/autodoc.rst
index 064463691..36d497543 100644
--- a/doc/usage/extensions/autodoc.rst
+++ b/doc/usage/extensions/autodoc.rst
@@ -45,6 +45,10 @@ docstrings to correct reStructuredText before :mod:`autodoc` processes them.
.. _NumPy:
https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt
+
+Directives
+----------
+
:mod:`autodoc` provides several directives that are versions of the usual
:rst:dir:`py:module`, :rst:dir:`py:class` and so forth. On parsing time, they
import the corresponding module and extract the docstring of the given objects,
@@ -306,6 +310,9 @@ inserting them into the page source under a suitable :rst:dir:`py:module`,
well-behaved decorating functions.
+Configuration
+-------------
+
There are also new config values that you can set:
.. confval:: autoclass_content
@@ -376,9 +383,10 @@ There are also new config values that you can set:
Setting ``None`` is equivalent to giving the option name in the list format
(i.e. it means "yes/true/on").
- The supported options are ``'members'``, ``'undoc-members'``,
- ``'private-members'``, ``'special-members'``, ``'inherited-members'``,
- ``'show-inheritance'``, ``'ignore-module-all'`` and ``'exclude-members'``.
+ The supported options are ``'members'``, ``'member-order'``,
+ ``'undoc-members'``, ``'private-members'``, ``'special-members'``,
+ ``'inherited-members'``, ``'show-inheritance'``, ``'ignore-module-all'`` and
+ ``'exclude-members'``.
.. versionadded:: 1.8
@@ -432,6 +440,16 @@ There are also new config values that you can set:
.. versionadded:: 1.7
+.. confval:: suppress_warnings
+ :noindex:
+
+ :mod:`autodoc` supports to suppress warning messages via
+ :confval:`suppress_warnings`. It allows following warnings types in
+ addition:
+
+ * autodoc
+ * autodoc.import_object
+
Docstring preprocessing
-----------------------
diff --git a/doc/usage/extensions/example_google.py b/doc/usage/extensions/example_google.py
index 4f6abacdf..97ffe8a05 100644
--- a/doc/usage/extensions/example_google.py
+++ b/doc/usage/extensions/example_google.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""Example Google style docstrings.
This module demonstrates documentation as specified by the `Google Python
@@ -178,7 +177,7 @@ class ExampleError(Exception):
self.code = code
-class ExampleClass(object):
+class ExampleClass:
"""The summary line for a class docstring should fit on one line.
If the class has public attributes, they may be documented here
diff --git a/doc/usage/extensions/example_numpy.py b/doc/usage/extensions/example_numpy.py
index dbee080c3..2712447f4 100644
--- a/doc/usage/extensions/example_numpy.py
+++ b/doc/usage/extensions/example_numpy.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""Example NumPy style docstrings.
This module demonstrates documentation as specified by the `NumPy
@@ -223,7 +222,7 @@ class ExampleError(Exception):
self.code = code
-class ExampleClass(object):
+class ExampleClass:
"""The summary line for a class docstring should fit on one line.
If the class has public attributes, they may be documented here
diff --git a/doc/usage/extensions/inheritance.rst b/doc/usage/extensions/inheritance.rst
index ef78d04fe..c66f4130f 100644
--- a/doc/usage/extensions/inheritance.rst
+++ b/doc/usage/extensions/inheritance.rst
@@ -54,7 +54,7 @@ It adds this directive:
E D F
"""
- class A(object):
+ class A:
pass
class B(A):
diff --git a/doc/usage/extensions/math.rst b/doc/usage/extensions/math.rst
index 9daa03186..9e62c1425 100644
--- a/doc/usage/extensions/math.rst
+++ b/doc/usage/extensions/math.rst
@@ -183,7 +183,7 @@ Sphinx.
The default is empty (not configured).
-.. _Using in-line configuration options: http://docs.mathjax.org/en/latest/configuration.html#using-in-line-configuration-options
+.. _Using in-line configuration options: https://docs.mathjax.org/en/latest/configuration.html#using-in-line-configuration-options
:mod:`sphinx.ext.jsmath` -- Render math via JavaScript
------------------------------------------------------
diff --git a/doc/usage/extensions/napoleon.rst b/doc/usage/extensions/napoleon.rst
index 160375fe7..0460f1607 100644
--- a/doc/usage/extensions/napoleon.rst
+++ b/doc/usage/extensions/napoleon.rst
@@ -409,10 +409,10 @@ sure that "sphinx.ext.napoleon" is enabled in `conf.py`::
.. attribute:: attr1
- *int*
-
Description of `attr1`
+ :type: int
+
.. confval:: napoleon_use_param
True to use a ``:param:`` role for each function parameter. False to
diff --git a/doc/usage/extensions/viewcode.rst b/doc/usage/extensions/viewcode.rst
index cc7dbb07a..d3c3c44fb 100644
--- a/doc/usage/extensions/viewcode.rst
+++ b/doc/usage/extensions/viewcode.rst
@@ -41,10 +41,9 @@ Configuration
.. confval:: viewcode_follow_imported_members
- If this is ``True``, viewcode extension will follow alias objects that
- imported from another module such as functions, classes and attributes. As
- side effects, this option else they produce nothing. The default is
- ``True``.
+ If this is ``True``, viewcode extension will emit
+ :event:`viewcode-follow-imported` event to resolve the name of the module
+ by other extensions. The default is ``True``.
.. versionadded:: 1.3
diff --git a/doc/usage/installation.rst b/doc/usage/installation.rst
index 5c0d7de75..f51b3084e 100644
--- a/doc/usage/installation.rst
+++ b/doc/usage/installation.rst
@@ -12,10 +12,9 @@ Installing Sphinx
Overview
--------
-Sphinx is written in `Python`__ and supports both Python 2.7 and Python 3.3+.
-We recommend the latter.
+Sphinx is written in `Python`__ and supports Python 3.5+.
-__ http://docs.python-guide.org/en/latest/
+__ https://docs.python-guide.org/
Linux
@@ -73,7 +72,7 @@ Homebrew
For more information, refer to the `package overview`__.
-__ http://formulae.brew.sh/formula/sphinx-doc
+__ https://formulae.brew.sh/formula/sphinx-doc
MacPorts
~~~~~~~~
@@ -114,16 +113,14 @@ Prompt* (:kbd:`⊞Win-r` and type :command:`cmd`). Once the command prompt is
open, type :command:`python --version` and press Enter. If Python is
available, you will see the version of Python printed to the screen. If you do
not have Python installed, refer to the `Hitchhikers Guide to Python's`__
-Python on Windows installation guides. You can install either `Python 3`__ or
-`Python 2.7`__. Python 3 is recommended.
+Python on Windows installation guides. You must install `Python 3`__.
Once Python is installed, you can install Sphinx using :command:`pip`. Refer
to the :ref:`pip installation instructions <install-pypi>` below for more
information.
-__ http://docs.python-guide.org/en/latest/
-__ http://docs.python-guide.org/en/latest/starting/install3/win/
-__ http://docs.python-guide.org/en/latest/starting/install/win/
+__ https://docs.python-guide.org/
+__ https://docs.python-guide.org/starting/install3/win/
.. _install-pypi:
diff --git a/doc/usage/markdown.rst b/doc/usage/markdown.rst
index f67b94cbd..7593ea6b0 100644
--- a/doc/usage/markdown.rst
+++ b/doc/usage/markdown.rst
@@ -15,33 +15,48 @@ parsing the `CommonMark`__ Markdown flavor.
__ https://daringfireball.net/projects/markdown/
__ https://recommonmark.readthedocs.io/en/latest/index.html
__ https://github.com/rtfd/CommonMark-py
-__ http://commonmark.org/
+__ https://commonmark.org/
Configuration
-------------
To configure your Sphinx project for Markdown support, proceed as follows:
-#. Install *recommonmark*::
+#. Install the Markdown parser *recommonmark* from its source on GitHub::
- pip install recommonmark
+ pip install git+https://github.com/rtfd/recommonmark
-#. Add the Markdown parser to the ``source_parsers`` configuration variable in
- your Sphinx configuration file::
+ .. note::
- source_parsers = {
- '.md': 'recommonmark.parser.CommonMarkParser',
- }
+ The configuration as explained here requires recommonmark version
+ 0.5.0.dev or higher, which is at the time of writing not available on
+ PyPI. If you want to use a released recommonmark version, follow the
+ instructions in the `Sphinx 1.8 documentation`__.
+
+__ https://www.sphinx-doc.org/en/1.8/usage/markdown.html
+
+#. Add *recommonmark* to the
+ :confval:`list of configured extensions <extensions>`::
- You can replace ``.md`` with a filename extension of your choice.
+ extensions = ['recommonmark']
-#. Add the Markdown filename extension to the ``source_suffix`` configuration
- variable::
+ .. versionchanged:: 1.8
+ Version 1.8 deprecates and version 3.0 removes the ``source_parsers``
+ configuration variable that was used by older *recommonmark* versions.
- source_suffix = ['.rst', '.md']
+#. If you want to use Markdown files with extensions other than ``.md``, adjust
+ the :confval:`source_suffix` variable. The following example configures
+ Sphinx to parse all files with the extensions ``.md`` and ``.txt`` as
+ Markdown::
+
+ source_suffix = {
+ '.rst': 'restructuredtext',
+ '.txt': 'markdown',
+ '.md': 'markdown',
+ }
#. You can further configure *recommonmark* to allow custom syntax that
- standard *CommonMark* doesn't support. Read more in the `recommonmark
+ standard *CommonMark* doesn't support. Read more in the `recommonmark
documentation`__.
__ https://recommonmark.readthedocs.io/en/latest/auto_structify.html
diff --git a/doc/usage/quickstart.rst b/doc/usage/quickstart.rst
index 0f9452f05..b3eb7e1dd 100644
--- a/doc/usage/quickstart.rst
+++ b/doc/usage/quickstart.rst
@@ -320,8 +320,8 @@ More topics to be covered
- :doc:`Other extensions </usage/extensions/index>`:
- Static files
-- :doc:`Selecting a theme </theming>`
-- :doc:`/setuptools`
+- :doc:`Selecting a theme </usage/theming>`
+- :doc:`/usage/advanced/setuptools`
- :ref:`Templating <templating>`
- Using extensions
- :ref:`Writing extensions <dev-extensions>`
diff --git a/doc/usage/restructuredtext/directives.rst b/doc/usage/restructuredtext/directives.rst
index e9fe2e23d..35d4ac384 100644
--- a/doc/usage/restructuredtext/directives.rst
+++ b/doc/usage/restructuredtext/directives.rst
@@ -1063,6 +1063,15 @@ or use Python raw strings (``r"raw"``).
.. _AmSMath LaTeX package: https://www.ams.org/publications/authors/tex/amslatex
+.. seealso::
+
+ :ref:`math-support`
+ Rendering options for math with HTML builders.
+
+ :confval:`latex_engine`
+ Explains how to configure LaTeX builder to support Unicode literals in
+ math mark-up.
+
Grammar production displays
---------------------------
diff --git a/doc/usage/theming.rst b/doc/usage/theming.rst
new file mode 100644
index 000000000..e0d779b99
--- /dev/null
+++ b/doc/usage/theming.rst
@@ -0,0 +1,339 @@
+.. highlight:: python
+
+HTML
+====
+
+Sphinx provides a number of builders for HTML and HTML-based formats.
+
+Builders
+--------
+
+.. todo:: Populate when the 'builders' document is split up.
+
+
+Themes
+------
+
+.. versionadded:: 0.6
+
+.. note::
+
+ This section provides information about using pre-existing HTML themes. If
+ you wish to create your own theme, refer to :doc:`/theming`.
+
+Sphinx supports changing the appearance of its HTML output via *themes*. A
+theme is a collection of HTML templates, stylesheet(s) and other static files.
+Additionally, it has a configuration file which specifies from which theme to
+inherit, which highlighting style to use, and what options exist for customizing
+the theme's look and feel.
+
+Themes are meant to be project-unaware, so they can be used for different
+projects without change.
+
+Using a theme
+~~~~~~~~~~~~~
+
+Using a :ref:`theme provided with Sphinx <builtin-themes>` is easy. Since these
+do not need to be installed, you only need to set the :confval:`html_theme`
+config value. For example, to enable the ``classic`` theme, add the following
+to :file:`conf.py`::
+
+ html_theme = "classic"
+
+You can also set theme-specific options using the :confval:`html_theme_options`
+config value. These options are generally used to change the look and feel of
+the theme. For example, to place the sidebar on the right side and a black
+background for the relation bar (the bar with the navigation links at the
+page's top and bottom), add the following :file:`conf.py`::
+
+ html_theme_options = {
+ "rightsidebar": "true",
+ "relbarbgcolor": "black"
+ }
+
+If the theme does not come with Sphinx, it can be in two static forms or as a
+Python package. For the static forms, either a directory (containing
+:file:`theme.conf` and other needed files), or a zip file with the same
+contents is supported. The directory or zipfile must be put where Sphinx can
+find it; for this there is the config value :confval:`html_theme_path`. This
+can be a list of directories, relative to the directory containing
+:file:`conf.py`, that can contain theme directories or zip files. For example,
+if you have a theme in the file :file:`blue.zip`, you can put it right in the
+directory containing :file:`conf.py` and use this configuration::
+
+ html_theme = "blue"
+ html_theme_path = ["."]
+
+The third form is a Python package. If a theme you want to use is distributed
+as a Python package, you can use it after installing
+
+.. code-block:: bash
+
+ # installing theme package
+ $ pip install sphinxjp.themes.dotted
+
+Once installed, this can be used in the same manner as a directory or
+zipfile-based theme::
+
+ html_theme = "dotted"
+
+For more information on the design of themes, including information about
+writing your own themes, refer to :doc:`/theming`.
+
+.. _builtin-themes:
+
+Builtin themes
+~~~~~~~~~~~~~~
+
+.. cssclass:: longtable
+
++--------------------+--------------------+
+| **Theme overview** | |
++--------------------+--------------------+
+| |alabaster| | |classic| |
+| | |
+| *alabaster* | *classic* |
++--------------------+--------------------+
+| |sphinxdoc| | |scrolls| |
+| | |
+| *sphinxdoc* | *scrolls* |
++--------------------+--------------------+
+| |agogo| | |traditional| |
+| | |
+| *agogo* | *traditional* |
++--------------------+--------------------+
+| |nature| | |haiku| |
+| | |
+| *nature* | *haiku* |
++--------------------+--------------------+
+| |pyramid| | |bizstyle| |
+| | |
+| *pyramid* | *bizstyle* |
++--------------------+--------------------+
+
+.. |alabaster| image:: /_static/themes/alabaster.png
+.. |classic| image:: /_static/themes/classic.png
+.. |sphinxdoc| image:: /_static/themes/sphinxdoc.png
+.. |scrolls| image:: /_static/themes/scrolls.png
+.. |agogo| image:: /_static/themes/agogo.png
+.. |traditional| image:: /_static/themes/traditional.png
+.. |nature| image:: /_static/themes/nature.png
+.. |haiku| image:: /_static/themes/haiku.png
+.. |pyramid| image:: /_static/themes/pyramid.png
+.. |bizstyle| image:: /_static/themes/bizstyle.png
+
+Sphinx comes with a selection of themes to choose from.
+
+.. cssclass:: clear
+
+These themes are:
+
+**basic**
+ This is a basically unstyled layout used as the base for the
+ other themes, and usable as the base for custom themes as well. The HTML
+ contains all important elements like sidebar and relation bar. There are
+ these options (which are inherited by the other themes):
+
+ - **nosidebar** (true or false): Don't include the sidebar. Defaults to
+ ``False``.
+
+ - **sidebarwidth** (int or str): Width of the sidebar in pixels.
+ This can be an int, which is interpreted as pixels or a valid CSS
+ dimension string such as '70em' or '50%'. Defaults to 230 pixels.
+
+ - **body_min_width** (int or str): Minimal width of the document body.
+ This can be an int, which is interpreted as pixels or a valid CSS
+ dimension string such as '70em' or '50%'. Use 0 if you don't want
+ a width limit. Defaults may depend on the theme (often 450px).
+
+ - **body_max_width** (int or str): Maximal width of the document body.
+ This can be an int, which is interpreted as pixels or a valid CSS
+ dimension string such as '70em' or '50%'. Use 'none' if you don't
+ want a width limit. Defaults may depend on the theme (often 800px).
+
+**alabaster**
+ `Alabaster theme`_ is a modified "Kr" Sphinx theme from @kennethreitz
+ (especially as used in his Requests project), which was itself originally
+ based on @mitsuhiko's theme used for Flask & related projects. Refer to its
+ `installation page`_ for information on how to configure
+ :confval:`html_sidebars` for its use.
+
+ .. _Alabaster theme: https://pypi.org/project/alabaster/
+ .. _installation page: https://alabaster.readthedocs.io/en/latest/installation.html
+
+**classic**
+ This is the classic theme, which looks like `the Python 2
+ documentation <https://docs.python.org/2/>`_. It can be customized via
+ these options:
+
+ - **rightsidebar** (true or false): Put the sidebar on the right side.
+ Defaults to ``False``.
+
+ - **stickysidebar** (true or false): Make the sidebar "fixed" so that it
+ doesn't scroll out of view for long body content. This may not work well
+ with all browsers. Defaults to ``False``.
+
+ - **collapsiblesidebar** (true or false): Add an *experimental* JavaScript
+ snippet that makes the sidebar collapsible via a button on its side.
+ Defaults to ``False``.
+
+ - **externalrefs** (true or false): Display external links differently from
+ internal links. Defaults to ``False``.
+
+ There are also various color and font options that can change the color scheme
+ without having to write a custom stylesheet:
+
+ - **footerbgcolor** (CSS color): Background color for the footer line.
+ - **footertextcolor** (CSS color): Text color for the footer line.
+ - **sidebarbgcolor** (CSS color): Background color for the sidebar.
+ - **sidebarbtncolor** (CSS color): Background color for the sidebar collapse
+ button (used when *collapsiblesidebar* is ``True``).
+ - **sidebartextcolor** (CSS color): Text color for the sidebar.
+ - **sidebarlinkcolor** (CSS color): Link color for the sidebar.
+ - **relbarbgcolor** (CSS color): Background color for the relation bar.
+ - **relbartextcolor** (CSS color): Text color for the relation bar.
+ - **relbarlinkcolor** (CSS color): Link color for the relation bar.
+ - **bgcolor** (CSS color): Body background color.
+ - **textcolor** (CSS color): Body text color.
+ - **linkcolor** (CSS color): Body link color.
+ - **visitedlinkcolor** (CSS color): Body color for visited links.
+ - **headbgcolor** (CSS color): Background color for headings.
+ - **headtextcolor** (CSS color): Text color for headings.
+ - **headlinkcolor** (CSS color): Link color for headings.
+ - **codebgcolor** (CSS color): Background color for code blocks.
+ - **codetextcolor** (CSS color): Default text color for code blocks, if not
+ set differently by the highlighting style.
+
+ - **bodyfont** (CSS font-family): Font for normal text.
+ - **headfont** (CSS font-family): Font for headings.
+
+**sphinxdoc**
+ The theme originally used by this documentation. It features
+ a sidebar on the right side. There are currently no options beyond
+ *nosidebar* and *sidebarwidth*.
+
+ .. note::
+
+ The Sphinx documentation now uses
+ `an adjusted version of the sphinxdoc theme
+ <https://github.com/sphinx-doc/sphinx/tree/master/doc/_themes/sphinx13>`_.
+
+**scrolls**
+ A more lightweight theme, based on `the Jinja documentation
+ <http://jinja.pocoo.org/>`_. The following color options are available:
+
+ - **headerbordercolor**
+ - **subheadlinecolor**
+ - **linkcolor**
+ - **visitedlinkcolor**
+ - **admonitioncolor**
+
+**agogo**
+ A theme created by Andi Albrecht. The following options are supported:
+
+ - **bodyfont** (CSS font family): Font for normal text.
+ - **headerfont** (CSS font family): Font for headings.
+ - **pagewidth** (CSS length): Width of the page content, default 70em.
+ - **documentwidth** (CSS length): Width of the document (without sidebar),
+ default 50em.
+ - **sidebarwidth** (CSS length): Width of the sidebar, default 20em.
+ - **bgcolor** (CSS color): Background color.
+ - **headerbg** (CSS value for "background"): background for the header area,
+ default a grayish gradient.
+ - **footerbg** (CSS value for "background"): background for the footer area,
+ default a light gray gradient.
+ - **linkcolor** (CSS color): Body link color.
+ - **headercolor1**, **headercolor2** (CSS color): colors for <h1> and <h2>
+ headings.
+ - **headerlinkcolor** (CSS color): Color for the backreference link in
+ headings.
+ - **textalign** (CSS *text-align* value): Text alignment for the body, default
+ is ``justify``.
+
+**nature**
+ A greenish theme. There are currently no options beyond
+ *nosidebar* and *sidebarwidth*.
+
+**pyramid**
+ A theme from the Pyramid web framework project, designed by Blaise Laflamme.
+ There are currently no options beyond *nosidebar* and *sidebarwidth*.
+
+**haiku**
+ A theme without sidebar inspired by the `Haiku OS user guide
+ <https://www.haiku-os.org/docs/userguide/en/contents.html>`_. The following
+ options are supported:
+
+ - **full_logo** (true or false, default ``False``): If this is true, the
+ header will only show the :confval:`html_logo`. Use this for large logos.
+ If this is false, the logo (if present) will be shown floating right, and
+ the documentation title will be put in the header.
+
+ - **textcolor**, **headingcolor**, **linkcolor**, **visitedlinkcolor**,
+ **hoverlinkcolor** (CSS colors): Colors for various body elements.
+
+**traditional**
+ A theme resembling the old Python documentation. There are
+ currently no options beyond *nosidebar* and *sidebarwidth*.
+
+**epub**
+ A theme for the epub builder. This theme tries to save visual
+ space which is a sparse resource on ebook readers. The following options
+ are supported:
+
+ - **relbar1** (true or false, default ``True``): If this is true, the
+ `relbar1` block is inserted in the epub output, otherwise it is omitted.
+
+ - **footer** (true or false, default ``True``): If this is true, the
+ `footer` block is inserted in the epub output, otherwise it is omitted.
+
+**bizstyle**
+ A simple bluish theme. The following options are supported
+ beyond *nosidebar* and *sidebarwidth*:
+
+ - **rightsidebar** (true or false): Put the sidebar on the right side.
+ Defaults to ``False``.
+
+.. versionadded:: 1.3
+ 'alabaster', 'sphinx_rtd_theme' and 'bizstyle' theme.
+
+.. versionchanged:: 1.3
+ The 'default' theme has been renamed to 'classic'. 'default' is still
+ available, however it will emit a notice that it is an alias for the new
+ 'alabaster' theme.
+
+Third Party Themes
+~~~~~~~~~~~~~~~~~~
+
+.. cssclass:: longtable
+
++--------------------+--------------------+
+| **Theme overview** | |
++--------------------+--------------------+
+| |sphinx_rtd_theme| | |
+| | |
+| *sphinx_rtd_theme* | |
++--------------------+--------------------+
+
+.. |sphinx_rtd_theme| image:: /_static/themes/sphinx_rtd_theme.png
+
+There are many third-party themes available. Some of these are general use,
+while others are specific to an individual project. A section of third-party
+themes is listed below. Many more can be found on PyPI__, GitHub__ and
+sphinx-themes.org__.
+
+.. cssclass:: clear
+
+**sphinx_rtd_theme**
+ `Read the Docs Sphinx Theme`_.
+ This is a mobile-friendly sphinx theme that was made for readthedocs.org.
+ View a working demo over on readthedocs.org. You can get install and options
+ information at `Read the Docs Sphinx Theme`_ page.
+
+ .. _Read the Docs Sphinx Theme: https://pypi.org/project/sphinx_rtd_theme/
+
+ .. versionchanged:: 1.4
+ **sphinx_rtd_theme** has become optional.
+
+.. __: https://pypi.org/search/?q=&o=&c=Framework+%3A%3A+Sphinx+%3A%3A+Theme
+.. __: https://github.com/search?utf8=%E2%9C%93&q=sphinx+theme&type=
+.. __: https://sphinx-themes.org/
diff --git a/setup.cfg b/setup.cfg
index bd52e0cec..ea78b0c3e 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -43,7 +43,6 @@ paths =
.
[mypy]
-python_version = 2.7
show_column_numbers = True
show_error_context = True
ignore_missing_imports = True
diff --git a/setup.py b/setup.py
index b6b3bc259..6e28a10fc 100644
--- a/setup.py
+++ b/setup.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
import os
import sys
from distutils import log
@@ -11,23 +10,21 @@ import sphinx
with open('README.rst') as f:
long_desc = f.read()
-if sys.version_info < (2, 7) or (3, 0) <= sys.version_info < (3, 4):
- print('ERROR: Sphinx requires at least Python 2.7 or 3.4 to run.')
+if sys.version_info < (3, 5):
+ print('ERROR: Sphinx requires at least Python 3.5 to run.')
sys.exit(1)
install_requires = [
- 'six>=1.5',
'Jinja2>=2.3',
'Pygments>=2.0',
- 'docutils>=0.11',
+ 'docutils>=0.12',
'snowballstemmer>=1.1',
'babel>=1.3,!=2.0',
'alabaster>=0.7,<0.8',
'imagesize',
- 'requests>=2.0.0',
+ 'requests>=2.5.0',
'setuptools',
'packaging',
- 'sphinxcontrib-websupport',
]
extras_require = {
@@ -35,9 +32,6 @@ extras_require = {
':sys_platform=="win32"': [
'colorama>=0.3.5',
],
- ':python_version<"3.5"': [
- 'typing'
- ],
'websupport': [
'sqlalchemy>=0.9',
'whoosh>=2.0',
@@ -49,13 +43,8 @@ extras_require = {
'html5lib',
'flake8>=3.5.0',
'flake8-import-order',
- ],
- 'test:python_version<"3"': [
- 'enum34',
- ],
- 'test:python_version>="3"': [
- 'mypy',
- 'typed_ast',
+ 'mypy>=0.470',
+ 'docutils-stubs',
],
}
@@ -65,7 +54,7 @@ extras_require = {
cmdclass = {}
-class Tee(object):
+class Tee:
def __init__(self, stream):
self.stream = stream
self.buffer = StringIO()
@@ -142,7 +131,7 @@ else:
domain + '.js'))
for js_file, (locale, po_file) in zip(js_files, po_files):
- with open(po_file, 'r') as infile:
+ with open(po_file) as infile:
catalog = read_po(infile, locale)
if catalog.fuzzy and not self.use_fuzzy:
@@ -162,11 +151,11 @@ else:
with open(js_file, 'wt') as outfile:
outfile.write('Documentation.addTranslations(')
- dump(dict(
- messages=jscatalog,
- plural_expr=catalog.plural_expr,
- locale=str(catalog.locale)
- ), outfile, sort_keys=True)
+ dump({
+ 'messages': jscatalog,
+ 'plural_expr': catalog.plural_expr,
+ 'locale': str(catalog.locale)
+ }, outfile, sort_keys=True)
outfile.write(');')
cmdclass['compile_catalog'] = compile_catalog_plusjs
@@ -195,12 +184,11 @@ setup(
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
- 'Programming Language :: Python :: 2',
- 'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
- 'Programming Language :: Python :: 3.4',
+ 'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
+ 'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Framework :: Setuptools Plugin',
@@ -235,7 +223,7 @@ setup(
'build_sphinx = sphinx.setup_command:BuildDoc',
],
},
- python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*",
+ python_requires=">=3.5",
install_requires=install_requires,
extras_require=extras_require,
cmdclass=cmdclass,
diff --git a/sphinx/__init__.py b/sphinx/__init__.py
index 88dca5ce4..796a755bb 100644
--- a/sphinx/__init__.py
+++ b/sphinx/__init__.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
Sphinx
~~~~~~
@@ -12,19 +11,14 @@
# Keep this file executable as-is in Python 3!
# (Otherwise getting the version out of it from setup.py is impossible.)
-from __future__ import absolute_import
-
import os
-import sys
import warnings
from os import path
from .deprecation import RemovedInNextVersionWarning
-from .deprecation import RemovedInSphinx20Warning
if False:
# For type annotation
- # note: Don't use typing.TYPE_CHECK here (for py27 and py34).
from typing import Any # NOQA
@@ -36,8 +30,8 @@ if 'PYTHONWARNINGS' not in os.environ:
warnings.filterwarnings('ignore', "'U' mode is deprecated",
DeprecationWarning, module='docutils.io')
-__version__ = '1.8.4+'
-__released__ = '1.8.4' # used when Sphinx builds its own docs
+__version__ = '2.0.0+'
+__released__ = '2.0.0' # used when Sphinx builds its own docs
#: Version info for better programmatic use.
#:
@@ -47,7 +41,7 @@ __released__ = '1.8.4' # used when Sphinx builds its own docs
#:
#: .. versionadded:: 1.2
#: Before version 1.2, check the string ``sphinx.__version__``.
-version_info = (1, 8, 4, 'beta', 0)
+version_info = (2, 0, 0, 'beta', 0)
package_dir = path.abspath(path.dirname(__file__))
@@ -67,47 +61,3 @@ if __version__.endswith('+'):
__display_version__ += '/' + out.decode().strip()
except Exception:
pass
-
-
-def main(argv=sys.argv): # type: ignore
- # type: (List[unicode]) -> int
- from .cmd import build
- warnings.warn(
- '`sphinx.main()` has moved to `sphinx.cmd.build.main()`.',
- RemovedInSphinx20Warning,
- stacklevel=2,
- )
- argv = argv[1:] # skip first argument to adjust arguments (refs: #4615)
- return build.main(argv)
-
-
-def build_main(argv=sys.argv):
- """Sphinx build "main" command-line entry."""
- from .cmd import build
- warnings.warn(
- '`sphinx.build_main()` has moved to `sphinx.cmd.build.build_main()`.',
- RemovedInSphinx20Warning,
- stacklevel=2,
- )
- return build.build_main(argv[1:]) # skip first argument to adjust arguments (refs: #4615)
-
-
-def make_main(argv=sys.argv):
- """Sphinx build "make mode" entry."""
- from .cmd import build
- warnings.warn(
- '`sphinx.build_main()` has moved to `sphinx.cmd.build.make_main()`.',
- RemovedInSphinx20Warning,
- stacklevel=2,
- )
- return build.make_main(argv[1:]) # skip first argument to adjust arguments (refs: #4615)
-
-
-if __name__ == '__main__':
- from .cmd import build
- warnings.warn(
- '`sphinx` has moved to `sphinx.build`.',
- RemovedInSphinx20Warning,
- stacklevel=2,
- )
- build.main()
diff --git a/sphinx/__main__.py b/sphinx/__main__.py
index 02bc806e7..4badfcc4c 100644
--- a/sphinx/__main__.py
+++ b/sphinx/__main__.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.__main__
~~~~~~~~~~~~~~~
@@ -13,4 +12,4 @@ import sys
from sphinx.cmd.build import main
-sys.exit(main(sys.argv[1:])) # type: ignore
+sys.exit(main(sys.argv[1:]))
diff --git a/sphinx/addnodes.py b/sphinx/addnodes.py
index 03de7cb7e..f77ebda5e 100644
--- a/sphinx/addnodes.py
+++ b/sphinx/addnodes.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.addnodes
~~~~~~~~~~~~~~~
@@ -13,14 +12,15 @@ import warnings
from docutils import nodes
-from sphinx.deprecation import RemovedInSphinx30Warning
+from sphinx.deprecation import RemovedInSphinx30Warning, RemovedInSphinx40Warning
if False:
# For type annotation
- from typing import List, Sequence # NOQA
+ from typing import Any, Dict, List, Sequence # NOQA
+ from sphinx.application import Sphinx # NOQA
-class translatable(object):
+class translatable(nodes.Node):
"""Node which supports translation.
The translation goes forward with following steps:
@@ -40,12 +40,12 @@ class translatable(object):
raise NotImplementedError
def apply_translated_message(self, original_message, translated_message):
- # type: (unicode, unicode) -> None
+ # type: (str, str) -> None
"""Apply translated message."""
raise NotImplementedError
def extract_original_messages(self):
- # type: () -> Sequence[unicode]
+ # type: () -> Sequence[str]
"""Extract translation messages.
:returns: list of extracted messages or messages generator
@@ -53,7 +53,7 @@ class translatable(object):
raise NotImplementedError
-class not_smartquotable(object):
+class not_smartquotable:
"""A node which does not support smart-quotes."""
support_smartquotes = False
@@ -67,12 +67,12 @@ class toctree(nodes.General, nodes.Element, translatable):
self['rawcaption'] = self['caption']
def apply_translated_message(self, original_message, translated_message):
- # type: (unicode, unicode) -> None
+ # type: (str, str) -> None
if self.get('rawcaption') == original_message:
self['caption'] = translated_message
def extract_original_messages(self):
- # type: () -> List[unicode]
+ # type: () -> List[str]
if 'rawcaption' in self:
return [self['rawcaption']]
else:
@@ -106,6 +106,7 @@ class desc_signature_line(nodes.Part, nodes.Inline, nodes.FixedTextElement):
It should only be used in a ``desc_signature`` with ``is_multiline`` set.
Set ``add_permalink = True`` for the line that should get the permalink.
"""
+ sphinx_cpp_tagname = ''
# nodes to use within a desc_signature or desc_signature_line
@@ -125,8 +126,8 @@ class desc_type(nodes.Part, nodes.Inline, nodes.FixedTextElement):
class desc_returns(desc_type):
"""Node for a "returns" annotation (a la -> in Python)."""
def astext(self):
- # type: () -> unicode
- return ' -> ' + nodes.TextElement.astext(self)
+ # type: () -> str
+ return ' -> ' + super().astext()
class desc_name(nodes.Part, nodes.Inline, nodes.FixedTextElement):
@@ -147,8 +148,8 @@ class desc_optional(nodes.Part, nodes.Inline, nodes.FixedTextElement):
child_text_separator = ', '
def astext(self):
- # type: () -> unicode
- return '[' + nodes.TextElement.astext(self) + ']'
+ # type: () -> str
+ return '[' + super().astext() + ']'
class desc_annotation(nodes.Part, nodes.Inline, nodes.FixedTextElement):
@@ -208,7 +209,7 @@ class math(nodes.math):
RemovedInSphinx30Warning, stacklevel=2)
return self.astext()
else:
- return nodes.math.__getitem__(self, key)
+ return super().__getitem__(key)
class math_block(nodes.math_block):
@@ -227,7 +228,7 @@ class math_block(nodes.math_block):
RemovedInSphinx30Warning, stacklevel=2)
return self.astext()
else:
- return nodes.math_block.__getitem__(self, key)
+ return super().__getitem__(key)
class displaymath(math_block):
@@ -307,6 +308,7 @@ class meta(nodes.Special, nodes.PreBibliographic, nodes.Element):
"""Node for meta directive -- same as docutils' standard meta node,
but pickleable.
"""
+ rawcontent = None
# inline nodes
@@ -340,15 +342,65 @@ class literal_strong(nodes.strong, not_smartquotable):
"""
-class abbreviation(nodes.Inline, nodes.TextElement):
- """Node for abbreviations with explanations."""
+class abbreviation(nodes.abbreviation):
+ """Node for abbreviations with explanations.
+
+ .. deprecated:: 2.0
+ """
+
+ def __init__(self, rawsource='', text='', *children, **attributes):
+ # type: (str, str, *nodes.Node, **Any) -> None
+ warnings.warn("abbrevition node for Sphinx was replaced by docutils'.",
+ RemovedInSphinx40Warning, stacklevel=2)
+
+ super().__init__(rawsource, text, *children, **attributes)
class manpage(nodes.Inline, nodes.FixedTextElement):
"""Node for references to manpages."""
-# make the new nodes known to docutils; needed because the HTML writer will
-# choke at some point if these are not added
-nodes._add_node_class_names(k for k in globals().keys()
- if k != 'nodes' and k[0] != '_')
+def setup(app):
+ # type: (Sphinx) -> Dict[str, Any]
+ app.add_node(toctree)
+ app.add_node(desc)
+ app.add_node(desc_signature)
+ app.add_node(desc_signature_line)
+ app.add_node(desc_addname)
+ app.add_node(desc_type)
+ app.add_node(desc_returns)
+ app.add_node(desc_name)
+ app.add_node(desc_parameterlist)
+ app.add_node(desc_parameter)
+ app.add_node(desc_optional)
+ app.add_node(desc_annotation)
+ app.add_node(desc_content)
+ app.add_node(versionmodified)
+ app.add_node(seealso)
+ app.add_node(productionlist)
+ app.add_node(production)
+ app.add_node(displaymath)
+ app.add_node(index)
+ app.add_node(centered)
+ app.add_node(acks)
+ app.add_node(hlist)
+ app.add_node(hlistcol)
+ app.add_node(compact_paragraph)
+ app.add_node(glossary)
+ app.add_node(only)
+ app.add_node(start_of_file)
+ app.add_node(highlightlang)
+ app.add_node(tabular_col_spec)
+ app.add_node(meta)
+ app.add_node(pending_xref)
+ app.add_node(number_reference)
+ app.add_node(download_reference)
+ app.add_node(literal_emphasis)
+ app.add_node(literal_strong)
+ app.add_node(manpage)
+
+ return {
+ 'version': 'builtin',
+ 'parallel_read_safe': True,
+ 'parallel_write_safe': True,
+ }
diff --git a/sphinx/apidoc.py b/sphinx/apidoc.py
deleted file mode 100644
index 95a1d14f7..000000000
--- a/sphinx/apidoc.py
+++ /dev/null
@@ -1,41 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
- sphinx.apidoc
- ~~~~~~~~~~~~~
-
- This file has moved to :py:mod:`sphinx.ext.apidoc`.
-
- :copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
- :license: BSD, see LICENSE for details.
-"""
-
-import sys
-import warnings
-
-from sphinx.deprecation import RemovedInSphinx20Warning
-from sphinx.ext.apidoc import main as _main
-
-if False:
- # For type annotation
- from typing import List # NOQA
- from sphinx.application import Sphinx # NOQA
-
-
-def main(argv=sys.argv):
- # type: (List[str]) -> None
- warnings.warn(
- '`sphinx.apidoc.main()` has moved to `sphinx.ext.apidoc.main()`.',
- RemovedInSphinx20Warning,
- stacklevel=2,
- )
- _main(argv[1:]) # skip first argument to adjust arguments (refs: #4615)
-
-
-# So program can be started with "python -m sphinx.apidoc ..."
-if __name__ == "__main__":
- warnings.warn(
- '`sphinx.apidoc` has moved to `sphinx.ext.apidoc`.',
- RemovedInSphinx20Warning,
- stacklevel=2,
- )
- main()
diff --git a/sphinx/application.py b/sphinx/application.py
index 35ff80d33..50bb9dfda 100644
--- a/sphinx/application.py
+++ b/sphinx/application.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.application
~~~~~~~~~~~~~~~~~~
@@ -10,31 +9,30 @@
:copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
-from __future__ import print_function
import os
+import pickle
import sys
import warnings
from collections import deque
from inspect import isclass
+from io import StringIO
from os import path
-from docutils.parsers.rst import Directive, directives, roles
-from six import itervalues
-from six.moves import cPickle as pickle
-from six.moves import cStringIO
+from docutils.parsers.rst import Directive, roles
import sphinx
from sphinx import package_dir, locale
-from sphinx.config import Config, check_unicode
+from sphinx.config import Config
from sphinx.config import CONFIG_FILENAME # NOQA # for compatibility (RemovedInSphinx30)
from sphinx.deprecation import (
- RemovedInSphinx20Warning, RemovedInSphinx30Warning, RemovedInSphinx40Warning
+ RemovedInSphinx30Warning, RemovedInSphinx40Warning
)
from sphinx.environment import BuildEnvironment
from sphinx.errors import ApplicationError, ConfigError, VersionRequirementError
from sphinx.events import EventManager
from sphinx.locale import __
+from sphinx.project import Project
from sphinx.registry import SphinxComponentRegistry
from sphinx.util import docutils
from sphinx.util import import_object
@@ -44,6 +42,7 @@ from sphinx.util.build_phase import BuildPhase
from sphinx.util.console import bold # type: ignore
from sphinx.util.docutils import directive_helper
from sphinx.util.i18n import find_catalog_source_files
+from sphinx.util.logging import prefixed_warnings
from sphinx.util.osutil import abspath, ensuredir, relpath
from sphinx.util.tags import Tags
@@ -62,6 +61,7 @@ if False:
from sphinx.util.typing import RoleFunction, TitleGetter # NOQA
builtin_extensions = (
+ 'sphinx.addnodes',
'sphinx.builders.applehelp',
'sphinx.builders.changes',
'sphinx.builders.epub3',
@@ -92,7 +92,6 @@ builtin_extensions = (
'sphinx.directives.other',
'sphinx.directives.patches',
'sphinx.extension',
- 'sphinx.io',
'sphinx.parsers',
'sphinx.registry',
'sphinx.roles',
@@ -111,14 +110,14 @@ builtin_extensions = (
# Strictly, alabaster theme is not a builtin extension,
# but it is loaded automatically to use it as default theme.
'alabaster',
-) # type: Tuple[unicode, ...]
+)
ENV_PICKLE_FILENAME = 'environment.pickle'
logger = logging.getLogger(__name__)
-class Sphinx(object):
+class Sphinx:
"""The main application class and extensibility interface.
:ivar srcdir: Directory containing source.
@@ -131,20 +130,20 @@ class Sphinx(object):
confoverrides=None, status=sys.stdout, warning=sys.stderr,
freshenv=False, warningiserror=False, tags=None, verbosity=0,
parallel=0, keep_going=False):
- # type: (unicode, unicode, unicode, unicode, unicode, Dict, IO, IO, bool, bool, List[unicode], int, int, bool) -> None # NOQA
+ # type: (str, str, str, str, str, Dict, IO, IO, bool, bool, List[str], int, int, bool) -> None # NOQA
self.phase = BuildPhase.INITIALIZATION
self.verbosity = verbosity
- self.extensions = {} # type: Dict[unicode, Extension]
- self._setting_up_extension = ['?'] # type: List[unicode]
+ self.extensions = {} # type: Dict[str, Extension]
self.builder = None # type: Builder
self.env = None # type: BuildEnvironment
+ self.project = None # type: Project
self.registry = SphinxComponentRegistry()
- self.html_themes = {} # type: Dict[unicode, unicode]
+ self.html_themes = {} # type: Dict[str, str]
# validate provided directories
- self.srcdir = abspath(srcdir) # type: unicode
- self.outdir = abspath(outdir) # type: unicode
- self.doctreedir = abspath(doctreedir) # type: unicode
+ self.srcdir = abspath(srcdir)
+ self.outdir = abspath(outdir)
+ self.doctreedir = abspath(doctreedir)
self.confdir = confdir
if self.confdir: # confdir is optional
self.confdir = abspath(self.confdir)
@@ -163,14 +162,14 @@ class Sphinx(object):
self.parallel = parallel
if status is None:
- self._status = cStringIO() # type: IO
+ self._status = StringIO() # type: IO
self.quiet = True
else:
self._status = status
self.quiet = False
if warning is None:
- self._warning = cStringIO() # type: IO
+ self._warning = StringIO() # type: IO
else:
self._warning = warning
self._warncount = 0
@@ -199,7 +198,6 @@ class Sphinx(object):
self.config = Config({}, confoverrides or {})
else:
self.config = Config.read(self.confdir, confoverrides or {}, self.tags)
- check_unicode(self.config)
# initialize some limited config variables before initialize i18n and loading
# extensions
@@ -236,25 +234,23 @@ class Sphinx(object):
# the config file itself can be an extension
if self.config.setup:
- self._setting_up_extension = ['conf.py']
- if callable(self.config.setup):
- self.config.setup(self)
- else:
- raise ConfigError(
- __("'setup' as currently defined in conf.py isn't a Python callable. "
- "Please modify its definition to make it a callable function. This is "
- "needed for conf.py to behave as a Sphinx extension.")
- )
+ prefix = __('while setting up extension %s:') % "conf.py"
+ with prefixed_warnings(prefix):
+ if callable(self.config.setup):
+ self.config.setup(self)
+ else:
+ raise ConfigError(
+ __("'setup' as currently defined in conf.py isn't a Python callable. "
+ "Please modify its definition to make it a callable function. "
+ "This is needed for conf.py to behave as a Sphinx extension.")
+ )
# now that we know all config values, collect them from conf.py
self.config.init_values()
self.emit('config-inited', self.config)
- # check primary_domain if requested
- primary_domain = self.config.primary_domain
- if primary_domain and not self.registry.has_domain(primary_domain):
- logger.warning(__('primary_domain %r not found, ignored.'), primary_domain)
-
+ # create the project
+ self.project = Project(self.srcdir, self.config.source_suffix)
# create the builder
self.builder = self.create_builder(buildername)
# set up the build environment
@@ -277,10 +273,10 @@ class Sphinx(object):
user_locale_dirs, self.config.language, domains=['sphinx'],
charset=self.config.source_encoding):
catinfo.write_mo(self.config.language)
- locale_dirs = [None, path.join(package_dir, 'locale')] + user_locale_dirs # type: ignore # NOQA
+ locale_dirs = [None, path.join(package_dir, 'locale')] + user_locale_dirs
else:
locale_dirs = []
- self.translator, has_translation = locale.init(locale_dirs, self.config.language) # type: ignore # NOQA
+ self.translator, has_translation = locale.init(locale_dirs, self.config.language)
if self.config.language is not None:
if has_translation or self.config.language == 'en':
# "en" never needs to be translated
@@ -307,11 +303,11 @@ class Sphinx(object):
self._init_env(freshenv=True)
def preload_builder(self, name):
- # type: (unicode) -> None
+ # type: (str) -> None
self.registry.preload_builder(self, name)
def create_builder(self, name):
- # type: (unicode) -> Builder
+ # type: (str) -> Builder
if name is None:
logger.info(__('No builder selected, using default: html'))
name = 'html'
@@ -327,7 +323,7 @@ class Sphinx(object):
# ---- main "build" method -------------------------------------------------
def build(self, force_all=False, filenames=None):
- # type: (bool, List[unicode]) -> None
+ # type: (bool, List[str]) -> None
self.phase = BuildPhase.READING
try:
if force_all:
@@ -369,76 +365,10 @@ class Sphinx(object):
self.emit('build-finished', None)
self.builder.cleanup()
- # ---- logging handling ----------------------------------------------------
- def warn(self, message, location=None, type=None, subtype=None):
- # type: (unicode, unicode, unicode, unicode) -> None
- """Emit a warning.
-
- If *location* is given, it should either be a tuple of (*docname*,
- *lineno*) or a string describing the location of the warning as well as
- possible.
-
- *type* and *subtype* are used to suppress warnings with
- :confval:`suppress_warnings`.
-
- .. deprecated:: 1.6
- Use :mod:`sphinx.util.logging` instead.
- """
- warnings.warn('app.warning() is now deprecated. Use sphinx.util.logging instead.',
- RemovedInSphinx20Warning, stacklevel=2)
- logger.warning(message, type=type, subtype=subtype, location=location)
-
- def info(self, message='', nonl=False):
- # type: (unicode, bool) -> None
- """Emit an informational message.
-
- If *nonl* is true, don't emit a newline at the end (which implies that
- more info output will follow soon.)
-
- .. deprecated:: 1.6
- Use :mod:`sphinx.util.logging` instead.
- """
- warnings.warn('app.info() is now deprecated. Use sphinx.util.logging instead.',
- RemovedInSphinx20Warning, stacklevel=2)
- logger.info(message, nonl=nonl)
-
- def verbose(self, message, *args, **kwargs):
- # type: (unicode, Any, Any) -> None
- """Emit a verbose informational message.
-
- .. deprecated:: 1.6
- Use :mod:`sphinx.util.logging` instead.
- """
- warnings.warn('app.verbose() is now deprecated. Use sphinx.util.logging instead.',
- RemovedInSphinx20Warning, stacklevel=2)
- logger.verbose(message, *args, **kwargs)
-
- def debug(self, message, *args, **kwargs):
- # type: (unicode, Any, Any) -> None
- """Emit a debug-level informational message.
-
- .. deprecated:: 1.6
- Use :mod:`sphinx.util.logging` instead.
- """
- warnings.warn('app.debug() is now deprecated. Use sphinx.util.logging instead.',
- RemovedInSphinx20Warning, stacklevel=2)
- logger.debug(message, *args, **kwargs)
-
- def debug2(self, message, *args, **kwargs):
- # type: (unicode, Any, Any) -> None
- """Emit a lowlevel debug-level informational message.
-
- .. deprecated:: 1.6
- Use :mod:`sphinx.util.logging` instead.
- """
- warnings.warn('app.debug2() is now deprecated. Use debug() instead.',
- RemovedInSphinx20Warning, stacklevel=2)
- logger.debug(message, *args, **kwargs)
-
# ---- general extensibility interface -------------------------------------
def setup_extension(self, extname):
- # type: (unicode) -> None
+ # type: (str) -> None
"""Import and setup a Sphinx extension module.
Load the extension given by the module *name*. Use this if your
@@ -449,7 +379,7 @@ class Sphinx(object):
self.registry.load_extension(self, extname)
def require_sphinx(self, version):
- # type: (unicode) -> None
+ # type: (str) -> None
"""Check the Sphinx version if requested.
Compare *version* (which must be a ``major.minor`` version string, e.g.
@@ -462,7 +392,7 @@ class Sphinx(object):
raise VersionRequirementError(version)
def import_object(self, objname, source=None):
- # type: (str, unicode) -> Any
+ # type: (str, str) -> Any
"""Import an object from a ``module.name`` string.
.. deprecated:: 1.8
@@ -475,7 +405,7 @@ class Sphinx(object):
# event interface
def connect(self, event, callback):
- # type: (unicode, Callable) -> int
+ # type: (str, Callable) -> int
"""Register *callback* to be called when *event* is emitted.
For details on available core events and the arguments of callback
@@ -495,7 +425,7 @@ class Sphinx(object):
self.events.disconnect(listener_id)
def emit(self, event, *args):
- # type: (unicode, Any) -> List
+ # type: (str, Any) -> List
"""Emit *event* and pass *arguments* to the callback functions.
Return the return values of all callbacks as a list. Do not emit core
@@ -510,7 +440,7 @@ class Sphinx(object):
return self.events.emit(event, self, *args)
def emit_firstresult(self, event, *args):
- # type: (unicode, Any) -> Any
+ # type: (str, Any) -> Any
"""Emit *event* and pass *arguments* to the callback functions.
Return the result of the first callback that doesn't return ``None``.
@@ -535,7 +465,7 @@ class Sphinx(object):
# TODO(stephenfin): Describe 'types' parameter
def add_config_value(self, name, default, rebuild, types=()):
- # type: (unicode, Any, Union[bool, unicode], Any) -> None
+ # type: (str, Any, Union[bool, str], Any) -> None
"""Register a configuration value.
This is necessary for Sphinx to recognize new values and set default
@@ -568,7 +498,7 @@ class Sphinx(object):
self.config.add(name, default, rebuild, types)
def add_event(self, name):
- # type: (unicode) -> None
+ # type: (str) -> None
"""Register an event called *name*.
This is needed to be able to emit it.
@@ -577,7 +507,7 @@ class Sphinx(object):
self.events.add(name)
def set_translator(self, name, translator_class, override=False):
- # type: (unicode, Type[nodes.NodeVisitor], bool) -> None
+ # type: (str, Type[nodes.NodeVisitor], bool) -> None
"""Register or override a Docutils translator class.
This is used to register a custom output translator or to replace a
@@ -591,7 +521,7 @@ class Sphinx(object):
self.registry.add_translator(name, translator_class, override=override)
def add_node(self, node, override=False, **kwds):
- # type: (nodes.Node, bool, Any) -> None
+ # type: (Type[nodes.Element], bool, Any) -> None
"""Register a Docutils node class.
This is necessary for Docutils internals. It may also be used in the
@@ -623,15 +553,14 @@ class Sphinx(object):
"""
logger.debug('[app] adding node: %r', (node, kwds))
if not override and docutils.is_node_registered(node):
- logger.warning(__('while setting up extension %s: node class %r is '
- 'already registered, its visitors will be overridden'),
- self._setting_up_extension, node.__name__,
- type='app', subtype='add_node')
+ logger.warning(__('node class %r is already registered, '
+ 'its visitors will be overridden'),
+ node.__name__, type='app', subtype='add_node')
docutils.register_node(node)
self.registry.add_translation_handlers(node, **kwds)
def add_enumerable_node(self, node, figtype, title_getter=None, override=False, **kwds):
- # type: (nodes.Node, unicode, TitleGetter, bool, Any) -> None
+ # type: (Type[nodes.Element], str, TitleGetter, bool, Any) -> None
"""Register a Docutils node class as a numfig target.
Sphinx numbers the node automatically. And then the users can refer it
@@ -660,14 +589,14 @@ class Sphinx(object):
@property
def enumerable_nodes(self):
- # type: () -> Dict[nodes.Node, Tuple[unicode, TitleGetter]]
+ # type: () -> Dict[Type[nodes.Node], Tuple[str, TitleGetter]]
warnings.warn('app.enumerable_nodes() is deprecated. '
'Use app.get_domain("std").enumerable_nodes instead.',
RemovedInSphinx30Warning, stacklevel=2)
return self.registry.enumerable_nodes
def add_directive(self, name, obj, content=None, arguments=None, override=False, **options): # NOQA
- # type: (unicode, Any, bool, Tuple[int, int, bool], bool, Any) -> None
+ # type: (str, Any, bool, Tuple[int, int, bool], bool, Any) -> None
"""Register a Docutils directive.
*name* must be the prospective directive name. There are two possible
@@ -720,20 +649,18 @@ class Sphinx(object):
"""
logger.debug('[app] adding directive: %r',
(name, obj, content, arguments, options))
- if name in directives._directives and not override:
- logger.warning(__('while setting up extension %s: directive %r is '
- 'already registered, it will be overridden'),
- self._setting_up_extension[-1], name,
- type='app', subtype='add_directive')
+ if not override and docutils.is_directive_registered(name):
+ logger.warning(__('directive %r is already registered, it will be overridden'),
+ name, type='app', subtype='add_directive')
if not isclass(obj) or not issubclass(obj, Directive):
directive = directive_helper(obj, content, arguments, **options)
- directives.register_directive(name, directive)
+ docutils.register_directive(name, directive)
else:
- directives.register_directive(name, obj)
+ docutils.register_directive(name, obj)
def add_role(self, name, role, override=False):
- # type: (unicode, Any, bool) -> None
+ # type: (str, Any, bool) -> None
"""Register a Docutils role.
*name* must be the role name that occurs in the source, *role* the role
@@ -745,15 +672,13 @@ class Sphinx(object):
Add *override* keyword.
"""
logger.debug('[app] adding role: %r', (name, role))
- if name in roles._roles and not override:
- logger.warning(__('while setting up extension %s: role %r is '
- 'already registered, it will be overridden'),
- self._setting_up_extension[-1], name,
- type='app', subtype='add_role')
- roles.register_local_role(name, role)
+ if not override and docutils.is_role_registered(name):
+ logger.warning(__('role %r is already registered, it will be overridden'),
+ name, type='app', subtype='add_role')
+ docutils.register_role(name, role)
def add_generic_role(self, name, nodeclass, override=False):
- # type: (unicode, Any, bool) -> None
+ # type: (str, Any, bool) -> None
"""Register a generic Docutils role.
Register a Docutils role that does nothing but wrap its contents in the
@@ -766,13 +691,11 @@ class Sphinx(object):
# Don't use ``roles.register_generic_role`` because it uses
# ``register_canonical_role``.
logger.debug('[app] adding generic role: %r', (name, nodeclass))
- if name in roles._roles and not override:
- logger.warning(__('while setting up extension %s: role %r is '
- 'already registered, it will be overridden'),
- self._setting_up_extension[-1], name,
- type='app', subtype='add_generic_role')
+ if not override and docutils.is_role_registered(name):
+ logger.warning(__('role %r is already registered, it will be overridden'),
+ name, type='app', subtype='add_generic_role')
role = roles.GenericRole(name, nodeclass)
- roles.register_local_role(name, role)
+ docutils.register_role(name, role)
def add_domain(self, domain, override=False):
# type: (Type[Domain], bool) -> None
@@ -806,7 +729,7 @@ class Sphinx(object):
def add_directive_to_domain(self, domain, name, obj, has_content=None, argument_spec=None,
override=False, **option_spec):
- # type: (unicode, unicode, Any, bool, Any, bool, Any) -> None
+ # type: (str, str, Any, bool, Any, bool, Any) -> None
"""Register a Docutils directive in a domain.
Like :meth:`add_directive`, but the directive is added to the domain
@@ -821,7 +744,7 @@ class Sphinx(object):
**option_spec)
def add_role_to_domain(self, domain, name, role, override=False):
- # type: (unicode, unicode, Union[RoleFunction, XRefRole], bool) -> None
+ # type: (str, str, Union[RoleFunction, XRefRole], bool) -> None
"""Register a Docutils role in a domain.
Like :meth:`add_role`, but the role is added to the domain named
@@ -834,7 +757,7 @@ class Sphinx(object):
self.registry.add_role_to_domain(domain, name, role, override=override)
def add_index_to_domain(self, domain, index, override=False):
- # type: (unicode, Type[Index], bool) -> None
+ # type: (str, Type[Index], bool) -> None
"""Register a custom index for a domain.
Add a custom *index* class to the domain named *domain*. *index* must
@@ -849,7 +772,7 @@ class Sphinx(object):
def add_object_type(self, directivename, rolename, indextemplate='',
parse_node=None, ref_nodeclass=None, objname='',
doc_field_types=[], override=False):
- # type: (unicode, unicode, unicode, Callable, nodes.Node, unicode, List, bool) -> None
+ # type: (str, str, str, Callable, Type[nodes.TextElement], str, List, bool) -> None
"""Register a new object type.
This method is a very convenient way to add a new :term:`object` type
@@ -913,24 +836,9 @@ class Sphinx(object):
ref_nodeclass, objname, doc_field_types,
override=override)
- def add_description_unit(self, directivename, rolename, indextemplate='',
- parse_node=None, ref_nodeclass=None, objname='',
- doc_field_types=[]):
- # type: (unicode, unicode, unicode, Callable, nodes.Node, unicode, List) -> None
- """Deprecated alias for :meth:`add_object_type`.
-
- .. deprecated:: 1.6
- Use :meth:`add_object_type` instead.
- """
- warnings.warn('app.add_description_unit() is now deprecated. '
- 'Use app.add_object_type() instead.',
- RemovedInSphinx20Warning, stacklevel=2)
- self.add_object_type(directivename, rolename, indextemplate, parse_node,
- ref_nodeclass, objname, doc_field_types)
-
def add_crossref_type(self, directivename, rolename, indextemplate='',
ref_nodeclass=None, objname='', override=False):
- # type: (unicode, unicode, unicode, nodes.Node, unicode, bool) -> None
+ # type: (str, str, str, Type[nodes.TextElement], str, bool) -> None
"""Register a new crossref object type.
This method is very similar to :meth:`add_object_type` except that the
@@ -1009,7 +917,7 @@ class Sphinx(object):
self.registry.add_post_transform(transform)
def add_javascript(self, filename, **kwargs):
- # type: (unicode, **unicode) -> None
+ # type: (str, **str) -> None
"""An alias of :meth:`add_js_file`."""
warnings.warn('The app.add_javascript() is deprecated. '
'Please use app.add_js_file() instead.',
@@ -1017,7 +925,7 @@ class Sphinx(object):
self.add_js_file(filename, **kwargs)
def add_js_file(self, filename, **kwargs):
- # type: (unicode, **unicode) -> None
+ # type: (str, **str) -> None
"""Register a JavaScript file to include in the HTML output.
Add *filename* to the list of JavaScript files that the default HTML
@@ -1044,7 +952,7 @@ class Sphinx(object):
self.builder.add_js_file(filename, **kwargs) # type: ignore
def add_css_file(self, filename, **kwargs):
- # type: (unicode, **unicode) -> None
+ # type: (str, **str) -> None
"""Register a stylesheet to include in the HTML output.
Add *filename* to the list of CSS files that the default HTML template
@@ -1084,13 +992,13 @@ class Sphinx(object):
self.builder.add_css_file(filename, **kwargs) # type: ignore
def add_stylesheet(self, filename, alternate=False, title=None):
- # type: (unicode, bool, unicode) -> None
+ # type: (str, bool, str) -> None
"""An alias of :meth:`add_css_file`."""
warnings.warn('The app.add_stylesheet() is deprecated. '
'Please use app.add_css_file() instead.',
RemovedInSphinx40Warning, stacklevel=2)
- attributes = {} # type: Dict[unicode, unicode]
+ attributes = {} # type: Dict[str, str]
if alternate:
attributes['rel'] = 'alternate stylesheet'
else:
@@ -1102,7 +1010,7 @@ class Sphinx(object):
self.add_css_file(filename, **attributes)
def add_latex_package(self, packagename, options=None):
- # type: (unicode, unicode) -> None
+ # type: (str, str) -> None
r"""Register a package to include in the LaTeX source code.
Add *packagename* to the list of packages that LaTeX source code will
@@ -1121,7 +1029,7 @@ class Sphinx(object):
self.registry.add_latex_package(packagename, options)
def add_lexer(self, alias, lexer):
- # type: (unicode, Any) -> None
+ # type: (str, Any) -> None
"""Register a new lexer for source code.
Use *lexer*, which must be an instance of a Pygments lexer class, to
@@ -1155,7 +1063,7 @@ class Sphinx(object):
self.add_directive('auto' + cls.objtype, AutodocDirective)
def add_autodoc_attrgetter(self, typ, getter):
- # type: (Type, Callable[[Any, unicode, Any], Any]) -> None
+ # type: (Type, Callable[[Any, str, Any], Any]) -> None
"""Register a new ``getattr``-like function for the autodoc extension.
Add *getter*, which must be a function with an interface compatible to
@@ -1187,7 +1095,7 @@ class Sphinx(object):
languages[cls.lang] = cls
def add_source_suffix(self, suffix, filetype, override=False):
- # type: (unicode, unicode, bool) -> None
+ # type: (str, str, bool) -> None
"""Register a suffix of source files.
Same as :confval:`source_suffix`. The users can override this
@@ -1222,7 +1130,7 @@ class Sphinx(object):
collector().enable(self)
def add_html_theme(self, name, theme_path):
- # type: (unicode, unicode) -> None
+ # type: (str, str) -> None
"""Register a HTML Theme.
The *name* is a name of theme, and *path* is a full path to the theme
@@ -1234,7 +1142,7 @@ class Sphinx(object):
self.html_themes[name] = theme_path
def add_html_math_renderer(self, name, inline_renderers=None, block_renderers=None):
- # type: (unicode, Tuple[Callable, Callable], Tuple[Callable, Callable]) -> None
+ # type: (str, Tuple[Callable, Callable], Tuple[Callable, Callable]) -> None
"""Register a math renderer for HTML.
The *name* is a name of math renderer. Both *inline_renderers* and
@@ -1249,7 +1157,7 @@ class Sphinx(object):
self.registry.add_html_math_renderer(name, inline_renderers, block_renderers)
def add_message_catalog(self, catalog, locale_dir):
- # type: (unicode, unicode) -> None
+ # type: (str, str) -> None
"""Register a message catalog.
The *catalog* is a name of catalog, and *locale_dir* is a base path
@@ -1263,7 +1171,7 @@ class Sphinx(object):
# ---- other methods -------------------------------------------------
def is_parallel_allowed(self, typ):
- # type: (unicode) -> bool
+ # type: (str) -> bool
"""Check parallel processing is allowed or not.
``typ`` is a type of processing; ``'read'`` or ``'write'``.
@@ -1283,7 +1191,7 @@ class Sphinx(object):
else:
raise ValueError('parallel type %s is not supported' % typ)
- for ext in itervalues(self.extensions):
+ for ext in self.extensions.values():
allowed = getattr(ext, attrname, None)
if allowed is None:
logger.warning(message, ext.name)
@@ -1294,15 +1202,22 @@ class Sphinx(object):
return True
+ @property
+ def _setting_up_extension(self):
+ # type: () -> List[str]
+ warnings.warn('app._setting_up_extension is deprecated.',
+ RemovedInSphinx30Warning)
+ return ['?']
+
-class TemplateBridge(object):
+class TemplateBridge:
"""
This class defines the interface for a "template bridge", that is, a class
that renders templates given a template name and a context.
"""
def init(self, builder, theme=None, dirs=None):
- # type: (Builder, Theme, List[unicode]) -> None
+ # type: (Builder, Theme, List[str]) -> None
"""Called by the builder to initialize the template system.
*builder* is the builder object; you'll probably want to look at the
@@ -1322,14 +1237,14 @@ class TemplateBridge(object):
return 0
def render(self, template, context):
- # type: (unicode, Dict) -> None
+ # type: (str, Dict) -> None
"""Called by the builder to render a template given as a filename with
a specified context (a Python dictionary).
"""
raise NotImplementedError('must be implemented in subclasses')
def render_string(self, template, context):
- # type: (unicode, Dict) -> unicode
+ # type: (str, Dict) -> str
"""Called by the builder to render a template given as a string with a
specified context (a Python dictionary).
"""
diff --git a/sphinx/builders/__init__.py b/sphinx/builders/__init__.py
index a10ca9e0a..12dfb9277 100644
--- a/sphinx/builders/__init__.py
+++ b/sphinx/builders/__init__.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.builders
~~~~~~~~~~~~~~~
@@ -9,14 +8,12 @@
:license: BSD, see LICENSE for details.
"""
+import pickle
import time
-import warnings
from os import path
from docutils import nodes
-from six.moves import cPickle as pickle
-from sphinx.deprecation import RemovedInSphinx20Warning
from sphinx.environment import CONFIG_OK, CONFIG_CHANGED_REASON
from sphinx.environment.adapters.asset import ImageAdapter
from sphinx.errors import SphinxError
@@ -43,7 +40,7 @@ except ImportError:
if False:
# For type annotation
- from typing import Any, Callable, Dict, Iterable, List, Sequence, Set, Tuple, Union # NOQA
+ from typing import Any, Callable, Dict, Iterable, List, Sequence, Set, Tuple, Type, Union # NOQA
from sphinx.application import Sphinx # NOQA
from sphinx.config import Config # NOQA
from sphinx.environment import BuildEnvironment # NOQA
@@ -54,25 +51,25 @@ if False:
logger = logging.getLogger(__name__)
-class Builder(object):
+class Builder:
"""
Builds target formats from the reST sources.
"""
#: The builder's name, for the -b command line option.
- name = '' # type: unicode
+ name = ''
#: The builder's output format, or '' if no document output is produced.
- format = '' # type: unicode
+ format = ''
#: The message emitted upon successful build completion. This can be a
#: printf-style template string with the following keys: ``outdir``,
#: ``project``
- epilog = '' # type: unicode
+ epilog = ''
#: default translator class for the builder. This can be overridden by
#: :py:meth:`app.set_translator()`.
- default_translator_class = None # type: nodes.NodeVisitor
+ default_translator_class = None # type: Type[nodes.NodeVisitor]
# doctree versioning method
- versioning_method = 'none' # type: unicode
+ versioning_method = 'none'
versioning_compare = False
# allow parallel write_doc() calls
allow_parallel = False
@@ -81,7 +78,7 @@ class Builder(object):
#: The list of MIME types of image formats supported by the builder.
#: Image files are searched in the order in which they appear here.
- supported_image_types = [] # type: List[unicode]
+ supported_image_types = [] # type: List[str]
#: The builder supports remote images or not.
supported_remote_images = False
#: The builder supports data URIs or not.
@@ -97,8 +94,6 @@ class Builder(object):
self.app = app # type: Sphinx
self.env = None # type: BuildEnvironment
- self.warn = app.warn # type: Callable
- self.info = app.info # type: Callable
self.config = app.config # type: Config
self.tags = app.tags # type: Tags
self.tags.add(self.format)
@@ -107,11 +102,11 @@ class Builder(object):
self.tags.add("builder_%s" % self.name)
# images that need to be copied over (source -> dest)
- self.images = {} # type: Dict[unicode, unicode]
+ self.images = {} # type: Dict[str, str]
# basename of images directory
self.imagedir = ""
# relative path to image directory from current docname (used at writing docs)
- self.imgpath = "" # type: unicode
+ self.imgpath = ""
# these get set later
self.parallel_ok = False
@@ -125,7 +120,7 @@ class Builder(object):
self.versioning_compare)
def get_translator_class(self, *args):
- # type: (Any) -> nodes.NodeVisitor
+ # type: (Any) -> Type[nodes.NodeVisitor]
"""Return a class of translator."""
return self.app.registry.get_translator_class(self)
@@ -138,22 +133,6 @@ class Builder(object):
"""
return self.app.registry.create_translator(self, *args)
- @property
- def translator_class(self):
- # type: () -> Callable[[Any], nodes.NodeVisitor]
- """Return a class of translator.
-
- .. deprecated:: 1.6
- """
- translator_class = self.app.registry.get_translator_class(self)
- if translator_class is None and self.default_translator_class is None:
- warnings.warn('builder.translator_class() is now deprecated. '
- 'Please use builder.create_translator() and '
- 'builder.default_translator_class instead.',
- RemovedInSphinx20Warning, stacklevel=2)
- return None
- return self.create_translator
-
# helper methods
def init(self):
# type: () -> None
@@ -173,7 +152,7 @@ class Builder(object):
self.templates = BuiltinTemplateLoader()
def get_target_uri(self, docname, typ=None):
- # type: (unicode, unicode) -> unicode
+ # type: (str, str) -> str
"""Return the target URI for a document name.
*typ* can be used to qualify the link characteristic for individual
@@ -182,7 +161,7 @@ class Builder(object):
raise NotImplementedError
def get_relative_uri(self, from_, to, typ=None):
- # type: (unicode, unicode, unicode) -> unicode
+ # type: (str, str, str) -> str
"""Return a relative URI between two source filenames.
May raise environment.NoUri if there's no way to return a sensible URI.
@@ -191,7 +170,7 @@ class Builder(object):
self.get_target_uri(to, typ))
def get_outdated_docs(self):
- # type: () -> Union[unicode, Iterable[unicode]]
+ # type: () -> Union[str, Iterable[str]]
"""Return an iterable of output files that are outdated, or a string
describing what an update build will build.
@@ -202,7 +181,7 @@ class Builder(object):
raise NotImplementedError
def get_asset_paths(self):
- # type: () -> List[unicode]
+ # type: () -> List[str]
"""Return list of paths for assets (ex. templates, CSS, etc.)."""
return []
@@ -241,12 +220,12 @@ class Builder(object):
# compile po methods
def compile_catalogs(self, catalogs, message):
- # type: (Set[CatalogInfo], unicode) -> None
+ # type: (Set[CatalogInfo], str) -> None
if not self.config.gettext_auto_build:
return
def cat2relpath(cat):
- # type: (CatalogInfo) -> unicode
+ # type: (CatalogInfo) -> str
return relpath(cat.mo_path, self.env.srcdir).replace(path.sep, SEP)
logger.info(bold(__('building [mo]: ')) + message)
@@ -267,9 +246,9 @@ class Builder(object):
self.compile_catalogs(catalogs, message)
def compile_specific_catalogs(self, specified_files):
- # type: (List[unicode]) -> None
+ # type: (List[str]) -> None
def to_domain(fpath):
- # type: (unicode) -> unicode
+ # type: (str) -> str
docname = self.env.path2doc(path.abspath(fpath))
if docname:
return find_catalog(docname, self.config.gettext_compact)
@@ -305,13 +284,13 @@ class Builder(object):
self.build(None, summary=__('all source files'), method='all')
def build_specific(self, filenames):
- # type: (List[unicode]) -> None
+ # type: (List[str]) -> None
"""Only rebuild as much as needed for changes in the *filenames*."""
# bring the filenames to the canonical format, that is,
# relative to the source directory and without source_suffix.
dirlen = len(self.srcdir) + 1
to_write = []
- suffixes = None # type: Tuple[unicode]
+ suffixes = None # type: Tuple[str]
suffixes = tuple(self.config.source_suffix) # type: ignore
for filename in filenames:
filename = path.normpath(path.abspath(filename))
@@ -347,7 +326,7 @@ class Builder(object):
len(to_build))
def build(self, docnames, summary=None, method='update'):
- # type: (Iterable[unicode], unicode, unicode) -> None
+ # type: (Iterable[str], str, str) -> None
"""Main build method.
First updates the environment, and then calls :meth:`write`.
@@ -418,7 +397,7 @@ class Builder(object):
self.finish_tasks.join()
def read(self):
- # type: () -> List[unicode]
+ # type: () -> List[str]
"""(Re-)read all files new or changed since last update.
Store all environment docnames in the canonical format (ie using SEP as
@@ -481,7 +460,7 @@ class Builder(object):
return sorted(docnames)
def _read_serial(self, docnames):
- # type: (List[unicode]) -> None
+ # type: (List[str]) -> None
for docname in status_iterator(docnames, 'reading sources... ', "purple",
len(docnames), self.app.verbosity):
# remove all inventory entries for that file
@@ -490,14 +469,14 @@ class Builder(object):
self.read_doc(docname)
def _read_parallel(self, docnames, nproc):
- # type: (List[unicode], int) -> None
+ # type: (List[str], int) -> None
# clear all outdated docs at once
for docname in docnames:
self.app.emit('env-purge-doc', self.env, docname)
self.env.clear_doc(docname)
def read_process(docs):
- # type: (List[unicode]) -> bytes
+ # type: (List[str]) -> bytes
self.env.app = self.app
for docname in docs:
self.read_doc(docname)
@@ -505,7 +484,7 @@ class Builder(object):
return pickle.dumps(self.env, pickle.HIGHEST_PROTOCOL)
def merge(docs, otherenv):
- # type: (List[unicode], bytes) -> None
+ # type: (List[str], bytes) -> None
env = pickle.loads(otherenv)
self.env.merge_info_from(docs, env, self.app)
@@ -521,7 +500,7 @@ class Builder(object):
tasks.join()
def read_doc(self, docname):
- # type: (unicode) -> None
+ # type: (str) -> None
"""Parse a file and add/update inventory entries for the doctree."""
self.env.prepare_settings(docname)
@@ -547,7 +526,7 @@ class Builder(object):
self.write_doctree(docname, doctree)
def write_doctree(self, docname, doctree):
- # type: (unicode, nodes.Node) -> None
+ # type: (str, nodes.document) -> None
"""Write the doctree to a file."""
# make it picklable
doctree.reporter = None
@@ -556,13 +535,13 @@ class Builder(object):
doctree.settings.env = None
doctree.settings.record_dependencies = None
- doctree_filename = self.env.doc2path(docname, self.env.doctreedir, '.doctree')
+ doctree_filename = path.join(self.doctreedir, docname + '.doctree')
ensuredir(path.dirname(doctree_filename))
with open(doctree_filename, 'wb') as f:
pickle.dump(doctree, f, pickle.HIGHEST_PROTOCOL)
def write(self, build_docnames, updated_docnames, method='update'):
- # type: (Iterable[unicode], Sequence[unicode], unicode) -> None
+ # type: (Iterable[str], Sequence[str], str) -> None
if build_docnames is None or build_docnames == ['__all__']:
# build_all
build_docnames = self.env.found_docs
@@ -593,7 +572,7 @@ class Builder(object):
self._write_serial(sorted(docnames))
def _write_serial(self, docnames):
- # type: (Sequence[unicode]) -> None
+ # type: (Sequence[str]) -> None
with logging.pending_warnings():
for docname in status_iterator(docnames, __('writing output... '), "darkgreen",
len(docnames), self.app.verbosity):
@@ -604,9 +583,9 @@ class Builder(object):
self.write_doc(docname, doctree)
def _write_parallel(self, docnames, nproc):
- # type: (Sequence[unicode], int) -> None
+ # type: (Sequence[str], int) -> None
def write_process(docs):
- # type: (List[Tuple[unicode, nodes.Node]]) -> None
+ # type: (List[Tuple[str, nodes.document]]) -> None
self.app.phase = BuildPhase.WRITING
for docname, doctree in docs:
self.write_doc(docname, doctree)
@@ -637,17 +616,17 @@ class Builder(object):
tasks.join()
def prepare_writing(self, docnames):
- # type: (Set[unicode]) -> None
+ # type: (Set[str]) -> None
"""A place where you can add logic before :meth:`write_doc` is run"""
raise NotImplementedError
def write_doc(self, docname, doctree):
- # type: (unicode, nodes.Node) -> None
+ # type: (str, nodes.document) -> None
"""Where you actually write something to the filesystem."""
raise NotImplementedError
def write_doc_serialized(self, docname, doctree):
- # type: (unicode, nodes.Node) -> None
+ # type: (str, nodes.document) -> None
"""Handle parts of write_doc that must be called in the main process
if parallel build is active.
"""
@@ -670,7 +649,7 @@ class Builder(object):
pass
def get_builder_config(self, option, default):
- # type: (unicode, unicode) -> Any
+ # type: (str, str) -> Any
"""Return a builder specific option.
This method allows customization of common builder settings by
diff --git a/sphinx/builders/_epub_base.py b/sphinx/builders/_epub_base.py
index eaab43fda..abd757591 100644
--- a/sphinx/builders/_epub_base.py
+++ b/sphinx/builders/_epub_base.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.builders._epub_base
~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -50,22 +49,22 @@ logger = logging.getLogger(__name__)
# output but that may be customized by (re-)setting module attributes,
# e.g. from conf.py.
-COVERPAGE_NAME = u'epub-cover.xhtml'
+COVERPAGE_NAME = 'epub-cover.xhtml'
-TOCTREE_TEMPLATE = u'toctree-l%d'
+TOCTREE_TEMPLATE = 'toctree-l%d'
-LINK_TARGET_TEMPLATE = u' [%(uri)s]'
+LINK_TARGET_TEMPLATE = ' [%(uri)s]'
-FOOTNOTE_LABEL_TEMPLATE = u'#%d'
+FOOTNOTE_LABEL_TEMPLATE = '#%d'
-FOOTNOTES_RUBRIC_NAME = u'Footnotes'
+FOOTNOTES_RUBRIC_NAME = 'Footnotes'
-CSS_LINK_TARGET_CLASS = u'link-target'
+CSS_LINK_TARGET_CLASS = 'link-target'
# XXX These strings should be localized according to epub_language
GUIDE_TITLES = {
- 'toc': u'Table of Contents',
- 'cover': u'Cover'
+ 'toc': 'Table of Contents',
+ 'cover': 'Cover'
}
MEDIA_TYPES = {
@@ -79,7 +78,7 @@ MEDIA_TYPES = {
'.otf': 'application/x-font-otf',
'.ttf': 'application/x-font-ttf',
'.woff': 'application/font-woff',
-} # type: Dict[unicode, unicode]
+}
VECTOR_GRAPHICS_EXTENSIONS = ('.svg',)
@@ -96,7 +95,7 @@ NavPoint = namedtuple('NavPoint', ['navpoint', 'playorder', 'text', 'refuri', 'c
def sphinx_smarty_pants(t, language='en'):
- # type: (unicode, str) -> unicode
+ # type: (str, str) -> str
t = t.replace('&quot;', '"')
t = smartquotes.educateDashesOldSchool(t)
t = smartquotes.educateQuotes(t, language)
@@ -151,26 +150,27 @@ class EpubBuilder(StandaloneHTMLBuilder):
def init(self):
# type: () -> None
- StandaloneHTMLBuilder.init(self)
+ super().init()
# the output files for epub must be .html only
self.out_suffix = '.xhtml'
self.link_suffix = '.xhtml'
self.playorder = 0
self.tocid = 0
- self.id_cache = {} # type: Dict[unicode, unicode]
+ self.id_cache = {} # type: Dict[str, str]
self.use_index = self.get_builder_config('use_index', 'epub')
+ self.refnodes = [] # type: List[Dict[str, Any]]
def create_build_info(self):
# type: () -> BuildInfo
return BuildInfo(self.config, self.tags, ['html', 'epub'])
def get_theme_config(self):
- # type: () -> Tuple[unicode, Dict]
+ # type: () -> Tuple[str, Dict]
return self.config.epub_theme, self.config.epub_theme_options
# generic support functions
def make_id(self, name):
- # type: (unicode) -> unicode
+ # type: (str) -> str
# id_cache is intentionally mutable
"""Return a unique id for name."""
id = self.id_cache.get(name)
@@ -180,7 +180,7 @@ class EpubBuilder(StandaloneHTMLBuilder):
return id
def esc(self, name):
- # type: (unicode) -> unicode
+ # type: (str) -> str
"""Replace all characters not allowed in text an attribute values."""
# Like cgi.escape, but also replace apostrophe
name = name.replace('&', '&amp;')
@@ -191,7 +191,7 @@ class EpubBuilder(StandaloneHTMLBuilder):
return name
def get_refnodes(self, doctree, result):
- # type: (nodes.Node, List[Dict[unicode, Any]]) -> List[Dict[unicode, Any]]
+ # type: (nodes.Node, List[Dict[str, Any]]) -> List[Dict[str, Any]]
"""Collect section titles, their depth in the toc and the refuri."""
# XXX: is there a better way than checking the attribute
# toctree-l[1-8] on the parent node?
@@ -209,8 +209,8 @@ class EpubBuilder(StandaloneHTMLBuilder):
'text': ssp(self.esc(doctree.astext()))
})
break
- else:
- for elem in doctree.children:
+ elif isinstance(doctree, nodes.Element):
+ for elem in doctree:
result = self.get_refnodes(elem, result)
return result
@@ -231,7 +231,7 @@ class EpubBuilder(StandaloneHTMLBuilder):
self.toc_add_files(self.refnodes)
def toc_add_files(self, refnodes):
- # type: (List[nodes.Node]) -> None
+ # type: (List[Dict[str, Any]]) -> None
"""Add the master_doc, pre and post files to a list of refnodes.
"""
refnodes.insert(0, {
@@ -254,47 +254,49 @@ class EpubBuilder(StandaloneHTMLBuilder):
})
def fix_fragment(self, prefix, fragment):
- # type: (unicode, unicode) -> unicode
+ # type: (str, str) -> str
"""Return a href/id attribute with colons replaced by hyphens."""
return prefix + fragment.replace(':', '-')
def fix_ids(self, tree):
- # type: (nodes.Node) -> None
+ # type: (nodes.document) -> None
"""Replace colons with hyphens in href and id attributes.
Some readers crash because they interpret the part as a
transport protocol specification.
"""
- for node in tree.traverse(nodes.reference):
- if 'refuri' in node:
- m = self.refuri_re.match(node['refuri'])
+ for reference in tree.traverse(nodes.reference):
+ if 'refuri' in reference:
+ m = self.refuri_re.match(reference['refuri'])
if m:
- node['refuri'] = self.fix_fragment(m.group(1), m.group(2))
- if 'refid' in node:
- node['refid'] = self.fix_fragment('', node['refid'])
- for node in tree.traverse(nodes.target):
- for i, node_id in enumerate(node['ids']):
+ reference['refuri'] = self.fix_fragment(m.group(1), m.group(2))
+ if 'refid' in reference:
+ reference['refid'] = self.fix_fragment('', reference['refid'])
+
+ for target in tree.traverse(nodes.target):
+ for i, node_id in enumerate(target['ids']):
if ':' in node_id:
- node['ids'][i] = self.fix_fragment('', node_id)
+ target['ids'][i] = self.fix_fragment('', node_id)
- next_node = node.next_node(siblings=True)
- if next_node and isinstance(next_node, nodes.Element):
+ next_node = target.next_node(siblings=True) # type: nodes.Node
+ if isinstance(next_node, nodes.Element):
for i, node_id in enumerate(next_node['ids']):
if ':' in node_id:
next_node['ids'][i] = self.fix_fragment('', node_id)
- for node in tree.traverse(addnodes.desc_signature):
- ids = node.attributes['ids']
+
+ for desc_signature in tree.traverse(addnodes.desc_signature):
+ ids = desc_signature.attributes['ids']
newids = []
for id in ids:
newids.append(self.fix_fragment('', id))
- node.attributes['ids'] = newids
+ desc_signature.attributes['ids'] = newids
def add_visible_links(self, tree, show_urls='inline'):
- # type: (nodes.Node, unicode) -> None
+ # type: (nodes.document, str) -> None
"""Add visible link targets for external links"""
def make_footnote_ref(doc, label):
- # type: (nodes.Node, unicode) -> nodes.footnote_reference
+ # type: (nodes.document, str) -> nodes.footnote_reference
"""Create a footnote_reference node with children"""
footnote_ref = nodes.footnote_reference('[#]_')
footnote_ref.append(nodes.Text(label))
@@ -302,7 +304,7 @@ class EpubBuilder(StandaloneHTMLBuilder):
return footnote_ref
def make_footnote(doc, label, uri):
- # type: (nodes.Node, unicode, unicode) -> nodes.footnote
+ # type: (nodes.document, str, str) -> nodes.footnote
"""Create a footnote node with children"""
footnote = nodes.footnote(uri)
para = nodes.paragraph()
@@ -313,7 +315,7 @@ class EpubBuilder(StandaloneHTMLBuilder):
return footnote
def footnote_spot(tree):
- # type: (nodes.Node) -> Tuple[nodes.Node, int]
+ # type: (nodes.document) -> Tuple[nodes.Element, int]
"""Find or create a spot to place footnotes.
The function returns the tuple (parent, index)."""
@@ -326,8 +328,7 @@ class EpubBuilder(StandaloneHTMLBuilder):
fn = fns[-1]
return fn.parent, fn.parent.index(fn) + 1
for node in tree.traverse(nodes.rubric):
- if len(node.children) == 1 and \
- node.children[0].astext() == FOOTNOTES_RUBRIC_NAME:
+ if len(node) == 1 and node.astext() == FOOTNOTES_RUBRIC_NAME:
return node.parent, node.parent.index(node) + 1
doc = tree.traverse(nodes.document)[0]
rub = nodes.rubric()
@@ -363,7 +364,7 @@ class EpubBuilder(StandaloneHTMLBuilder):
fn_idx += 1
def write_doc(self, docname, doctree):
- # type: (unicode, nodes.Node) -> None
+ # type: (str, nodes.document) -> None
"""Write one document file.
This method is overwritten in order to fix fragment identifiers
@@ -371,10 +372,10 @@ class EpubBuilder(StandaloneHTMLBuilder):
"""
self.fix_ids(doctree)
self.add_visible_links(doctree, self.config.epub_show_urls)
- StandaloneHTMLBuilder.write_doc(self, docname, doctree)
+ super().write_doc(docname, doctree)
def fix_genindex(self, tree):
- # type: (nodes.Node) -> None
+ # type: (List[Tuple[str, List[Tuple[str, Any]]]]) -> None
"""Fix href attributes for genindex pages."""
# XXX: modifies tree inline
# Logic modeled from themes/basic/genindex.html
@@ -393,7 +394,7 @@ class EpubBuilder(StandaloneHTMLBuilder):
self.fix_fragment(m.group(1), m.group(2)))
def is_vector_graphics(self, filename):
- # type: (unicode) -> bool
+ # type: (str) -> bool
"""Does the filename extension indicate a vector graphic format?"""
ext = path.splitext(filename)[-1]
return ext in VECTOR_GRAPHICS_EXTENSIONS
@@ -410,14 +411,14 @@ class EpubBuilder(StandaloneHTMLBuilder):
dest = self.images[src]
try:
img = Image.open(path.join(self.srcdir, src))
- except IOError:
+ except OSError:
if not self.is_vector_graphics(src):
logger.warning(__('cannot read image file %r: copying it instead'),
path.join(self.srcdir, src))
try:
copyfile(path.join(self.srcdir, src),
path.join(self.outdir, self.imagedir, dest))
- except (IOError, OSError) as err:
+ except OSError as err:
logger.warning(__('cannot copy image file %r: %s'),
path.join(self.srcdir, src), err)
continue
@@ -433,7 +434,7 @@ class EpubBuilder(StandaloneHTMLBuilder):
img = img.resize((nw, nh), Image.BICUBIC)
try:
img.save(path.join(self.outdir, self.imagedir, dest))
- except (IOError, OSError) as err:
+ except OSError as err:
logger.warning(__('cannot write image file %r: %s'),
path.join(self.srcdir, src), err)
@@ -446,11 +447,11 @@ class EpubBuilder(StandaloneHTMLBuilder):
if self.config.epub_fix_images or self.config.epub_max_image_width:
if not Image:
logger.warning(__('PIL not found - copying image files'))
- super(EpubBuilder, self).copy_image_files()
+ super().copy_image_files()
else:
self.copy_image_files_pil()
else:
- super(EpubBuilder, self).copy_image_files()
+ super().copy_image_files()
def copy_download_files(self):
# type: () -> None
@@ -458,7 +459,7 @@ class EpubBuilder(StandaloneHTMLBuilder):
def handle_page(self, pagename, addctx, templatename='page.html',
outfilename=None, event_arg=None):
- # type: (unicode, Dict, unicode, unicode, Any) -> None
+ # type: (str, Dict, str, str, Any) -> None
"""Create a rendered page.
This method is overwritten for genindex pages in order to fix href link
@@ -469,18 +470,17 @@ class EpubBuilder(StandaloneHTMLBuilder):
return
self.fix_genindex(addctx['genindexentries'])
addctx['doctype'] = self.doctype
- StandaloneHTMLBuilder.handle_page(self, pagename, addctx, templatename,
- outfilename, event_arg)
+ super().handle_page(pagename, addctx, templatename, outfilename, event_arg)
def build_mimetype(self, outdir, outname):
- # type: (unicode, unicode) -> None
+ # type: (str, str) -> None
"""Write the metainfo file mimetype."""
logger.info(__('writing %s file...'), outname)
copy_asset_file(path.join(self.template_dir, 'mimetype'),
path.join(outdir, outname))
def build_container(self, outdir, outname):
- # type: (unicode, unicode) -> None
+ # type: (str, str) -> None
"""Write the metainfo file META-INF/container.xml."""
logger.info(__('writing %s file...'), outname)
filename = path.join(outdir, outname)
@@ -488,11 +488,11 @@ class EpubBuilder(StandaloneHTMLBuilder):
copy_asset_file(path.join(self.template_dir, 'container.xml'), filename)
def content_metadata(self):
- # type: () -> Dict[unicode, Any]
+ # type: () -> Dict[str, Any]
"""Create a dictionary with all metadata for the content.opf
file properly escaped.
"""
- metadata = {} # type: Dict[unicode, Any]
+ metadata = {} # type: Dict[str, Any]
metadata['title'] = self.esc(self.config.epub_title)
metadata['author'] = self.esc(self.config.epub_author)
metadata['uid'] = self.esc(self.config.epub_uid)
@@ -508,7 +508,7 @@ class EpubBuilder(StandaloneHTMLBuilder):
return metadata
def build_content(self, outdir, outname):
- # type: (unicode, unicode) -> None
+ # type: (str, str) -> None
"""Write the metainfo file content.opf It contains bibliographic data,
a file list and the spine (the reading order).
"""
@@ -519,7 +519,7 @@ class EpubBuilder(StandaloneHTMLBuilder):
if not outdir.endswith(os.sep):
outdir += os.sep
olen = len(outdir)
- self.files = [] # type: List[unicode]
+ self.files = [] # type: List[str]
self.ignored_files = ['.buildinfo', 'mimetype', 'content.opf',
'toc.ncx', 'META-INF/container.xml',
'Thumbs.db', 'ehthumbs.db', '.DS_Store',
@@ -622,7 +622,7 @@ class EpubBuilder(StandaloneHTMLBuilder):
metadata)
def new_navpoint(self, node, level, incr=True):
- # type: (nodes.Node, int, bool) -> NavPoint
+ # type: (Dict[str, Any], int, bool) -> NavPoint
"""Create a new entry in the toc from the node at given level."""
# XXX Modifies the node
if incr:
@@ -632,7 +632,7 @@ class EpubBuilder(StandaloneHTMLBuilder):
node['text'], node['refuri'], [])
def build_navpoints(self, nodes):
- # type: (nodes.Node) -> List[NavPoint]
+ # type: (List[Dict[str, Any]]) -> List[NavPoint]
"""Create the toc navigation structure.
Subelements of a node are nested inside the navpoint. For nested nodes
@@ -677,11 +677,11 @@ class EpubBuilder(StandaloneHTMLBuilder):
return navstack[0].children
def toc_metadata(self, level, navpoints):
- # type: (int, List[NavPoint]) -> Dict[unicode, Any]
+ # type: (int, List[NavPoint]) -> Dict[str, Any]
"""Create a dictionary with all metadata for the toc.ncx file
properly escaped.
"""
- metadata = {} # type: Dict[unicode, Any]
+ metadata = {} # type: Dict[str, Any]
metadata['uid'] = self.config.epub_uid
metadata['title'] = self.esc(self.config.epub_title)
metadata['level'] = level
@@ -689,7 +689,7 @@ class EpubBuilder(StandaloneHTMLBuilder):
return metadata
def build_toc(self, outdir, outname):
- # type: (unicode, unicode) -> None
+ # type: (str, str) -> None
"""Write the metainfo file toc.ncx."""
logger.info(__('writing %s file...'), outname)
@@ -710,7 +710,7 @@ class EpubBuilder(StandaloneHTMLBuilder):
self.toc_metadata(level, navpoints))
def build_epub(self, outdir, outname):
- # type: (unicode, unicode) -> None
+ # type: (str, str) -> None
"""Write the epub file.
It is a zip file with the mimetype file stored uncompressed as the first
@@ -720,7 +720,7 @@ class EpubBuilder(StandaloneHTMLBuilder):
epub_filename = path.join(outdir, outname)
with ZipFile(epub_filename, 'w', ZIP_DEFLATED) as epub:
epub.write(path.join(outdir, 'mimetype'), 'mimetype', ZIP_STORED)
- for filename in [u'META-INF/container.xml', u'content.opf', u'toc.ncx']:
+ for filename in ['META-INF/container.xml', 'content.opf', 'toc.ncx']:
epub.write(path.join(outdir, filename), filename, ZIP_DEFLATED)
for filename in self.files:
epub.write(path.join(outdir, filename), filename, ZIP_DEFLATED)
diff --git a/sphinx/builders/applehelp.py b/sphinx/builders/applehelp.py
index 79d57210c..6cfced62c 100644
--- a/sphinx/builders/applehelp.py
+++ b/sphinx/builders/applehelp.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.builders.applehelp
~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -8,9 +7,8 @@
:copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
-from __future__ import print_function
-import codecs
+import html
import pipes
import plistlib
import shlex
@@ -18,7 +16,6 @@ import subprocess
from os import path, environ
from sphinx.builders.html import StandaloneHTMLBuilder
-from sphinx.config import string_classes
from sphinx.errors import SphinxError
from sphinx.locale import __
from sphinx.util import logging
@@ -26,7 +23,6 @@ from sphinx.util.console import bold # type: ignore
from sphinx.util.fileutil import copy_asset
from sphinx.util.matching import Matcher
from sphinx.util.osutil import copyfile, ensuredir, make_filename
-from sphinx.util.pycompat import htmlescape
if False:
# For type annotation
@@ -36,13 +32,6 @@ if False:
logger = logging.getLogger(__name__)
-# Use plistlib.dump in 3.4 and above
-try:
- write_plist = plistlib.dump # type: ignore
-except AttributeError:
- write_plist = plistlib.writePlist
-
-
# False access page (used because helpd expects strict XHTML)
access_page_template = '''\
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN"\
@@ -95,7 +84,7 @@ class AppleHelpBuilder(StandaloneHTMLBuilder):
def init(self):
# type: () -> None
- super(AppleHelpBuilder, self).init()
+ super().init()
# the output files for HTML help must be .html only
self.out_suffix = '.html'
self.link_suffix = '.html'
@@ -114,7 +103,7 @@ class AppleHelpBuilder(StandaloneHTMLBuilder):
def handle_finish(self):
# type: () -> None
- super(AppleHelpBuilder, self).handle_finish()
+ super().handle_finish()
self.finish_tasks.add_task(self.copy_localized_files)
self.finish_tasks.add_task(self.build_helpbook)
@@ -173,8 +162,8 @@ class AppleHelpBuilder(StandaloneHTMLBuilder):
info_plist['HPDBookRemoteURL'] = self.config.applehelp_remote_url
logger.info(bold(__('writing Info.plist... ')), nonl=True)
- with open(path.join(contents_dir, 'Info.plist'), 'wb') as f:
- write_plist(info_plist, f)
+ with open(path.join(contents_dir, 'Info.plist'), 'wb') as fb:
+ plistlib.dump(info_plist, fb)
logger.info(__('done'))
# Copy the icon, if one is supplied
@@ -193,10 +182,10 @@ class AppleHelpBuilder(StandaloneHTMLBuilder):
# Build the access page
logger.info(bold(__('building access page...')), nonl=True)
- with codecs.open(path.join(language_dir, '_access.html'), 'w') as f: # type: ignore
- f.write(access_page_template % {
- 'toc': htmlescape(toc, quote=True),
- 'title': htmlescape(self.config.applehelp_title)
+ with open(path.join(language_dir, '_access.html'), 'w') as ft:
+ ft.write(access_page_template % {
+ 'toc': html.escape(toc, quote=True),
+ 'title': html.escape(self.config.applehelp_title)
})
logger.info(__('done'))
@@ -277,23 +266,23 @@ class AppleHelpBuilder(StandaloneHTMLBuilder):
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
app.setup_extension('sphinx.builders.html')
app.add_builder(AppleHelpBuilder)
app.add_config_value('applehelp_bundle_name',
lambda self: make_filename(self.project), 'applehelp')
- app.add_config_value('applehelp_bundle_id', None, 'applehelp', string_classes)
+ app.add_config_value('applehelp_bundle_id', None, 'applehelp', [str])
app.add_config_value('applehelp_dev_region', 'en-us', 'applehelp')
app.add_config_value('applehelp_bundle_version', '1', 'applehelp')
- app.add_config_value('applehelp_icon', None, 'applehelp', string_classes)
+ app.add_config_value('applehelp_icon', None, 'applehelp', [str])
app.add_config_value('applehelp_kb_product',
lambda self: '%s-%s' % (make_filename(self.project), self.release),
'applehelp')
- app.add_config_value('applehelp_kb_url', None, 'applehelp', string_classes)
- app.add_config_value('applehelp_remote_url', None, 'applehelp', string_classes)
- app.add_config_value('applehelp_index_anchors', False, 'applehelp', string_classes)
- app.add_config_value('applehelp_min_term_length', None, 'applehelp', string_classes)
+ app.add_config_value('applehelp_kb_url', None, 'applehelp', [str])
+ app.add_config_value('applehelp_remote_url', None, 'applehelp', [str])
+ app.add_config_value('applehelp_index_anchors', False, 'applehelp', [str])
+ app.add_config_value('applehelp_min_term_length', None, 'applehelp', [str])
app.add_config_value('applehelp_stopwords',
lambda self: self.language or 'en', 'applehelp')
app.add_config_value('applehelp_locale', lambda self: self.language or 'en', 'applehelp')
diff --git a/sphinx/builders/changes.py b/sphinx/builders/changes.py
index 3f9bffa0d..b6cfaa60b 100644
--- a/sphinx/builders/changes.py
+++ b/sphinx/builders/changes.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.builders.changes
~~~~~~~~~~~~~~~~~~~~~~~
@@ -9,12 +8,10 @@
:license: BSD, see LICENSE for details.
"""
-import codecs
+import html
from os import path
from typing import cast
-from six import iteritems
-
from sphinx import package_dir
from sphinx.builders import Builder
from sphinx.domains.changeset import ChangeSetDomain
@@ -24,7 +21,6 @@ from sphinx.util import logging
from sphinx.util.console import bold # type: ignore
from sphinx.util.fileutil import copy_asset_file
from sphinx.util.osutil import ensuredir, os_path
-from sphinx.util.pycompat import htmlescape
if False:
# For type annotation
@@ -50,22 +46,22 @@ class ChangesBuilder(Builder):
self.templates.init(self, self.theme)
def get_outdated_docs(self):
- # type: () -> unicode
+ # type: () -> str
return self.outdir
typemap = {
'versionadded': 'added',
'versionchanged': 'changed',
'deprecated': 'deprecated',
- } # type: Dict[unicode, unicode]
+ }
def write(self, *ignored):
# type: (Any) -> None
version = self.config.version
domain = cast(ChangeSetDomain, self.env.get_domain('changeset'))
- libchanges = {} # type: Dict[unicode, List[Tuple[unicode, unicode, int]]]
- apichanges = [] # type: List[Tuple[unicode, unicode, int]]
- otherchanges = {} # type: Dict[Tuple[unicode, unicode], List[Tuple[unicode, unicode, int]]] # NOQA
+ libchanges = {} # type: Dict[str, List[Tuple[str, str, int]]]
+ apichanges = [] # type: List[Tuple[str, str, int]]
+ otherchanges = {} # type: Dict[Tuple[str, str], List[Tuple[str, str, int]]]
if version not in self.env.versionchanges:
logger.info(bold(__('no changes in version %s.') % version))
return
@@ -109,15 +105,15 @@ class ChangesBuilder(Builder):
'version': version,
'docstitle': self.config.html_title,
'shorttitle': self.config.html_short_title,
- 'libchanges': sorted(iteritems(libchanges)),
+ 'libchanges': sorted(libchanges.items()),
'apichanges': sorted(apichanges),
- 'otherchanges': sorted(iteritems(otherchanges)),
+ 'otherchanges': sorted(otherchanges.items()),
'show_copyright': self.config.html_show_copyright,
'show_sphinx': self.config.html_show_sphinx,
}
- with codecs.open(path.join(self.outdir, 'index.html'), 'w', 'utf8') as f: # type: ignore # NOQA
+ with open(path.join(self.outdir, 'index.html'), 'w', encoding='utf8') as f:
f.write(self.templates.render('changes/frameset.html', ctx))
- with codecs.open(path.join(self.outdir, 'changes.html'), 'w', 'utf8') as f: # type: ignore # NOQA
+ with open(path.join(self.outdir, 'changes.html'), 'w', encoding='utf8') as f:
f.write(self.templates.render('changes/versionchanges.html', ctx))
hltext = ['.. versionadded:: %s' % version,
@@ -125,8 +121,8 @@ class ChangesBuilder(Builder):
'.. deprecated:: %s' % version]
def hl(no, line):
- # type: (int, unicode) -> unicode
- line = '<a name="L%s"> </a>' % no + htmlescape(line)
+ # type: (int, str) -> str
+ line = '<a name="L%s"> </a>' % no + html.escape(line)
for x in hltext:
if x in line:
line = '<span class="hl">%s</span>' % line
@@ -135,8 +131,8 @@ class ChangesBuilder(Builder):
logger.info(bold(__('copying source files...')))
for docname in self.env.all_docs:
- with codecs.open(self.env.doc2path(docname), 'r', # type: ignore
- self.env.config.source_encoding) as f:
+ with open(self.env.doc2path(docname),
+ encoding=self.env.config.source_encoding) as f:
try:
lines = f.readlines()
except UnicodeDecodeError:
@@ -144,7 +140,7 @@ class ChangesBuilder(Builder):
continue
targetfn = path.join(self.outdir, 'rst', os_path(docname)) + '.html'
ensuredir(path.dirname(targetfn))
- with codecs.open(targetfn, 'w', 'utf-8') as f: # type: ignore
+ with open(targetfn, 'w', encoding='utf-8') as f:
text = ''.join(hl(i + 1, line) for (i, line) in enumerate(lines))
ctx = {
'filename': self.env.doc2path(docname, None),
@@ -152,15 +148,15 @@ class ChangesBuilder(Builder):
}
f.write(self.templates.render('changes/rstsource.html', ctx))
themectx = dict(('theme_' + key, val) for (key, val) in
- iteritems(self.theme.get_options({})))
+ self.theme.get_options({}).items())
copy_asset_file(path.join(package_dir, 'themes', 'default', 'static', 'default.css_t'),
self.outdir, context=themectx, renderer=self.templates)
copy_asset_file(path.join(package_dir, 'themes', 'basic', 'static', 'basic.css'),
self.outdir)
def hl(self, text, version):
- # type: (unicode, unicode) -> unicode
- text = htmlescape(text)
+ # type: (str, str) -> str
+ text = html.escape(text)
for directive in ['versionchanged', 'versionadded', 'deprecated']:
text = text.replace('.. %s:: %s' % (directive, version),
'<b>.. %s:: %s</b>' % (directive, version))
@@ -172,7 +168,7 @@ class ChangesBuilder(Builder):
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
app.add_builder(ChangesBuilder)
return {
diff --git a/sphinx/builders/devhelp.py b/sphinx/builders/devhelp.py
index fc2c0b1c9..78843ea19 100644
--- a/sphinx/builders/devhelp.py
+++ b/sphinx/builders/devhelp.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.builders.devhelp
~~~~~~~~~~~~~~~~~~~~~~~
@@ -10,11 +9,11 @@
:copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
-from __future__ import absolute_import
import gzip
import re
from os import path
+from typing import Any
from docutils import nodes
@@ -23,6 +22,7 @@ from sphinx.builders.html import StandaloneHTMLBuilder
from sphinx.environment.adapters.indexentries import IndexEntries
from sphinx.locale import __
from sphinx.util import logging
+from sphinx.util.nodes import NodeMatcher
from sphinx.util.osutil import make_filename
try:
@@ -32,7 +32,7 @@ except ImportError:
if False:
# For type annotation
- from typing import Any, Dict, List # NOQA
+ from typing import Dict, List # NOQA
from sphinx.application import Sphinx # NOQA
@@ -60,7 +60,7 @@ class DevhelpBuilder(StandaloneHTMLBuilder):
def init(self):
# type: () -> None
- StandaloneHTMLBuilder.init(self)
+ super().init()
self.out_suffix = '.html'
self.link_suffix = '.html'
@@ -69,7 +69,7 @@ class DevhelpBuilder(StandaloneHTMLBuilder):
self.build_devhelp(self.outdir, self.config.devhelp_basename)
def build_devhelp(self, outdir, outname):
- # type: (unicode, unicode) -> None
+ # type: (str, str) -> None
logger.info(__('dumping devhelp index...'))
# Basic info
@@ -87,7 +87,7 @@ class DevhelpBuilder(StandaloneHTMLBuilder):
self.config.master_doc, self, prune_toctrees=False)
def write_toc(node, parent):
- # type: (nodes.Node, nodes.Node) -> None
+ # type: (nodes.Node, etree.Element) -> None
if isinstance(node, addnodes.compact_paragraph) or \
isinstance(node, nodes.bullet_list):
for subnode in node:
@@ -100,12 +100,8 @@ class DevhelpBuilder(StandaloneHTMLBuilder):
parent.attrib['link'] = node['refuri']
parent.attrib['name'] = node.astext()
- def istoctree(node):
- # type: (nodes.Node) -> bool
- return isinstance(node, addnodes.compact_paragraph) and \
- 'toctree' in node
-
- for node in tocdoc.traverse(istoctree):
+ matcher = NodeMatcher(addnodes.compact_paragraph, toctree=Any)
+ for node in tocdoc.traverse(matcher): # type: addnodes.compact_paragraph
write_toc(node, chapters)
# Index
@@ -113,7 +109,7 @@ class DevhelpBuilder(StandaloneHTMLBuilder):
index = IndexEntries(self.env).create_index(self)
def write_index(title, refs, subitems):
- # type: (unicode, List[Any], Any) -> None
+ # type: (str, List[Any], Any) -> None
if len(refs) == 0:
pass
elif len(refs) == 1:
@@ -137,12 +133,12 @@ class DevhelpBuilder(StandaloneHTMLBuilder):
# Dump the XML file
xmlfile = path.join(outdir, outname + '.devhelp.gz')
- with gzip.open(xmlfile, 'w') as f: # type: ignore
+ with gzip.open(xmlfile, 'w') as f:
tree.write(f, 'utf-8')
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
app.setup_extension('sphinx.builders.html')
app.add_builder(DevhelpBuilder)
diff --git a/sphinx/builders/dummy.py b/sphinx/builders/dummy.py
index 805924290..d5ae94a82 100644
--- a/sphinx/builders/dummy.py
+++ b/sphinx/builders/dummy.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.builders.dummy
~~~~~~~~~~~~~~~~~~~~~
@@ -31,19 +30,19 @@ class DummyBuilder(Builder):
pass
def get_outdated_docs(self):
- # type: () -> Set[unicode]
+ # type: () -> Set[str]
return self.env.found_docs
def get_target_uri(self, docname, typ=None):
- # type: (unicode, unicode) -> unicode
+ # type: (str, str) -> str
return ''
def prepare_writing(self, docnames):
- # type: (Set[unicode]) -> None
+ # type: (Set[str]) -> None
pass
def write_doc(self, docname, doctree):
- # type: (unicode, nodes.Node) -> None
+ # type: (str, nodes.Node) -> None
pass
def finish(self):
@@ -52,7 +51,7 @@ class DummyBuilder(Builder):
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
app.add_builder(DummyBuilder)
return {
diff --git a/sphinx/builders/epub3.py b/sphinx/builders/epub3.py
index c1e6ab98a..88f93e90f 100644
--- a/sphinx/builders/epub3.py
+++ b/sphinx/builders/epub3.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.builders.epub3
~~~~~~~~~~~~~~~~~~~~~
@@ -13,11 +12,9 @@
from collections import namedtuple
from os import path
-from six import string_types
-
from sphinx import package_dir
from sphinx.builders import _epub_base
-from sphinx.config import string_classes, ENUM
+from sphinx.config import ENUM
from sphinx.locale import __
from sphinx.util import logging, xmlname_checker
from sphinx.util.fileutil import copy_asset_file
@@ -26,7 +23,7 @@ from sphinx.util.osutil import make_filename
if False:
# For type annotation
- from typing import Any, Dict, Iterable, List, Tuple # NOQA
+ from typing import Any, Dict, Iterable, List, Set, Tuple # NOQA
from docutils import nodes # NOQA
from sphinx.application import Sphinx # NOQA
from sphinx.config import Config # NOQA
@@ -53,8 +50,8 @@ THEME_WRITING_MODES = {
DOCTYPE = '''<!DOCTYPE html>'''
HTML_TAG = (
- u'<html xmlns="http://www.w3.org/1999/xhtml" '
- u'xmlns:epub="http://www.idpf.org/2007/ops">'
+ '<html xmlns="http://www.w3.org/1999/xhtml" '
+ 'xmlns:epub="http://www.idpf.org/2007/ops">'
)
@@ -131,7 +128,7 @@ class Epub3Builder(_epub_base.EpubBuilder):
"""
writing_mode = self.config.epub_writing_mode
- metadata = super(Epub3Builder, self).content_metadata()
+ metadata = super().content_metadata()
metadata['description'] = self.esc(self.config.epub_description)
metadata['contributor'] = self.esc(self.config.epub_contributor)
metadata['page_progression_direction'] = PAGE_PROGRESSION_DIRECTIONS.get(writing_mode)
@@ -142,8 +139,8 @@ class Epub3Builder(_epub_base.EpubBuilder):
return metadata
def prepare_writing(self, docnames):
- # type: (Iterable[unicode]) -> None
- super(Epub3Builder, self).prepare_writing(docnames)
+ # type: (Set[str]) -> None
+ super().prepare_writing(docnames)
writing_mode = self.config.epub_writing_mode
self.globalcontext['theme_writing_mode'] = THEME_WRITING_MODES.get(writing_mode)
@@ -152,7 +149,7 @@ class Epub3Builder(_epub_base.EpubBuilder):
self.globalcontext['skip_ua_compatible'] = True
def build_navlist(self, navnodes):
- # type: (List[nodes.Node]) -> List[NavPoint]
+ # type: (List[Dict[str, Any]]) -> List[NavPoint]
"""Create the toc navigation structure.
This method is almost same as build_navpoints method in epub.py.
@@ -206,7 +203,7 @@ class Epub3Builder(_epub_base.EpubBuilder):
return metadata
def build_navigation_doc(self, outdir, outname):
- # type: (unicode, unicode) -> None
+ # type: (str, str) -> None
"""Write the metainfo file nav.xhtml."""
logger.info(__('writing %s file...'), outname)
@@ -232,9 +229,9 @@ class Epub3Builder(_epub_base.EpubBuilder):
def convert_epub_css_files(app, config):
# type: (Sphinx, Config) -> None
"""This converts string styled epub_css_files to tuple styled one."""
- epub_css_files = [] # type: List[Tuple[unicode, Dict]]
+ epub_css_files = [] # type: List[Tuple[str, Dict]]
for entry in config.epub_css_files:
- if isinstance(entry, string_types):
+ if isinstance(entry, str):
epub_css_files.append((entry, {}))
else:
try:
@@ -248,7 +245,7 @@ def convert_epub_css_files(app, config):
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
app.add_builder(Epub3Builder)
# config values
@@ -277,8 +274,8 @@ def setup(app):
app.add_config_value('epub_max_image_width', 0, 'env')
app.add_config_value('epub_show_urls', 'inline', 'epub')
app.add_config_value('epub_use_index', lambda self: self.html_use_index, 'epub')
- app.add_config_value('epub_description', 'unknown', 'epub', string_classes)
- app.add_config_value('epub_contributor', 'unknown', 'epub', string_classes)
+ app.add_config_value('epub_description', 'unknown', 'epub')
+ app.add_config_value('epub_contributor', 'unknown', 'epub')
app.add_config_value('epub_writing_mode', 'horizontal', 'epub',
ENUM('horizontal', 'vertical'))
diff --git a/sphinx/builders/gettext.py b/sphinx/builders/gettext.py
index 9981012a0..3c84f5179 100644
--- a/sphinx/builders/gettext.py
+++ b/sphinx/builders/gettext.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.builders.gettext
~~~~~~~~~~~~~~~~~~~~~~~
@@ -9,17 +8,14 @@
:license: BSD, see LICENSE for details.
"""
-from __future__ import unicode_literals
-
from codecs import open
from collections import defaultdict, OrderedDict
from datetime import datetime, tzinfo, timedelta
+from io import StringIO
from os import path, walk, getenv
from time import time
from uuid import uuid4
-from six import iteritems, StringIO
-
from sphinx.builders import Builder
from sphinx.domains.python import pairindextypes
from sphinx.errors import ThemeError
@@ -33,10 +29,10 @@ from sphinx.util.tags import Tags
if False:
# For type annotation
- from typing import Any, DefaultDict, Dict, Iterable, List, Set, Tuple # NOQA
+ from typing import Any, DefaultDict, Dict, Iterable, List, Set, Tuple, Union # NOQA
from docutils import nodes # NOQA
- from sphinx.util.i18n import CatalogInfo # NOQA
from sphinx.application import Sphinx # NOQA
+ from sphinx.util.i18n import CatalogInfo # NOQA
logger = logging.getLogger(__name__)
@@ -63,18 +59,18 @@ msgstr ""
"""[1:]
-class Catalog(object):
+class Catalog:
"""Catalog of translatable messages."""
def __init__(self):
# type: () -> None
- self.messages = [] # type: List[unicode]
+ self.messages = [] # type: List[str]
# retain insertion order, a la OrderedDict
- self.metadata = OrderedDict() # type: Dict[unicode, List[Tuple[unicode, int, unicode]]] # NOQA
+ self.metadata = OrderedDict() # type: Dict[str, List[Tuple[str, int, str]]]
# msgid -> file, line, uid
def add(self, msg, origin):
- # type: (unicode, MsgOrigin) -> None
+ # type: (str, Union[nodes.Element, MsgOrigin]) -> None
if not hasattr(origin, 'uid'):
# Nodes that are replicated like todo don't have a uid,
# however i18n is also unnecessary.
@@ -82,16 +78,16 @@ class Catalog(object):
if msg not in self.metadata: # faster lookup in hash
self.messages.append(msg)
self.metadata[msg] = []
- self.metadata[msg].append((origin.source, origin.line, origin.uid))
+ self.metadata[msg].append((origin.source, origin.line, origin.uid)) # type: ignore
-class MsgOrigin(object):
+class MsgOrigin:
"""
Origin holder for Catalog message origin.
"""
def __init__(self, source, line):
- # type: (unicode, int) -> None
+ # type: (str, int) -> None
self.source = source
self.line = line
self.uid = uuid4().hex
@@ -120,32 +116,31 @@ class I18nBuilder(Builder):
def init(self):
# type: () -> None
- Builder.init(self)
+ super().init()
self.env.set_versioning_method(self.versioning_method,
self.env.config.gettext_uuid)
self.tags = I18nTags()
- self.catalogs = defaultdict(Catalog) # type: DefaultDict[unicode, Catalog]
+ self.catalogs = defaultdict(Catalog) # type: DefaultDict[str, Catalog]
def get_target_uri(self, docname, typ=None):
- # type: (unicode, unicode) -> unicode
+ # type: (str, str) -> str
return ''
def get_outdated_docs(self):
- # type: () -> Set[unicode]
+ # type: () -> Set[str]
return self.env.found_docs
def prepare_writing(self, docnames):
- # type: (Set[unicode]) -> None
+ # type: (Set[str]) -> None
return
def compile_catalogs(self, catalogs, message):
- # type: (Set[CatalogInfo], unicode) -> None
+ # type: (Set[CatalogInfo], str) -> None
return
def write_doc(self, docname, doctree):
- # type: (unicode, nodes.Node) -> None
- catalog = self.catalogs[find_catalog(docname,
- self.config.gettext_compact)]
+ # type: (str, nodes.document) -> None
+ catalog = self.catalogs[find_catalog(docname, self.config.gettext_compact)]
for node, msg in extract_messages(doctree):
catalog.add(msg, node)
@@ -178,7 +173,7 @@ class LocalTimeZone(tzinfo):
def __init__(self, *args, **kw):
# type: (Any, Any) -> None
- super(LocalTimeZone, self).__init__(*args, **kw) # type: ignore
+ super().__init__(*args, **kw) # type: ignore
self.tzdelta = tzdelta
def utcoffset(self, dt):
@@ -194,11 +189,11 @@ ltz = LocalTimeZone()
def should_write(filepath, new_content):
- # type: (unicode, unicode) -> bool
+ # type: (str, str) -> bool
if not path.exists(filepath):
return True
try:
- with open(filepath, 'r', encoding='utf-8') as oldpot: # type: ignore
+ with open(filepath, encoding='utf-8') as oldpot:
old_content = oldpot.read()
old_header_index = old_content.index('"POT-Creation-Date:')
new_header_index = new_content.index('"POT-Creation-Date:')
@@ -221,12 +216,12 @@ class MessageCatalogBuilder(I18nBuilder):
def init(self):
# type: () -> None
- I18nBuilder.init(self)
+ super().init()
self.create_template_bridge()
self.templates.init(self)
def _collect_templates(self):
- # type: () -> Set[unicode]
+ # type: () -> Set[str]
template_files = set()
for template_path in self.config.templates_path:
tmpl_abs_path = path.join(self.app.srcdir, template_path)
@@ -246,10 +241,10 @@ class MessageCatalogBuilder(I18nBuilder):
extract_translations = self.templates.environment.extract_translations
- for template in status_iterator(files, __('reading templates... '), "purple", # type: ignore # NOQA
+ for template in status_iterator(files, __('reading templates... '), "purple",
len(files), self.app.verbosity):
try:
- with open(template, 'r', encoding='utf-8') as f: # type: ignore
+ with open(template, encoding='utf-8') as f:
context = f.read()
for line, meth, msg in extract_translations(context):
origin = MsgOrigin(template, line)
@@ -258,21 +253,21 @@ class MessageCatalogBuilder(I18nBuilder):
raise ThemeError('%s: %r' % (template, exc))
def build(self, docnames, summary=None, method='update'):
- # type: (Iterable[unicode], unicode, unicode) -> None
+ # type: (Iterable[str], str, str) -> None
self._extract_from_template()
- I18nBuilder.build(self, docnames, summary, method)
+ super().build(docnames, summary, method)
def finish(self):
# type: () -> None
- I18nBuilder.finish(self)
- data = dict(
- version = self.config.version,
- copyright = self.config.copyright,
- project = self.config.project,
- ctime = datetime.fromtimestamp(
+ super().finish()
+ data = {
+ 'version': self.config.version,
+ 'copyright': self.config.copyright,
+ 'project': self.config.project,
+ 'ctime': datetime.fromtimestamp(
timestamp, ltz).strftime('%Y-%m-%d %H:%M%z'),
- )
- for textdomain, catalog in status_iterator(iteritems(self.catalogs), # type: ignore
+ }
+ for textdomain, catalog in status_iterator(self.catalogs.items(),
__("writing message catalogs... "),
"darkgreen", len(self.catalogs),
self.app.verbosity,
@@ -282,36 +277,35 @@ class MessageCatalogBuilder(I18nBuilder):
pofn = path.join(self.outdir, textdomain + '.pot')
output = StringIO()
- output.write(POHEADER % data) # type: ignore
+ output.write(POHEADER % data)
for message in catalog.messages:
positions = catalog.metadata[message]
if self.config.gettext_location:
# generate "#: file1:line1\n#: file2:line2 ..."
- output.write("#: %s\n" % "\n#: ".join( # type: ignore
+ output.write("#: %s\n" % "\n#: ".join(
"%s:%s" % (canon_path(relpath(source, self.outdir)), line)
for source, line, _ in positions))
if self.config.gettext_uuid:
# generate "# uuid1\n# uuid2\n ..."
- output.write("# %s\n" % "\n# ".join( # type: ignore
- uid for _, _, uid in positions))
+ output.write("# %s\n" % "\n# ".join(uid for _, _, uid in positions))
# message contains *one* line of text ready for translation
message = message.replace('\\', r'\\'). \
replace('"', r'\"'). \
replace('\n', '\\n"\n"')
- output.write('msgid "%s"\nmsgstr ""\n\n' % message) # type: ignore
+ output.write('msgid "%s"\nmsgstr ""\n\n' % message)
content = output.getvalue()
if should_write(pofn, content):
- with open(pofn, 'w', encoding='utf-8') as pofile: # type: ignore
+ with open(pofn, 'w', encoding='utf-8') as pofile:
pofile.write(content)
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
app.add_builder(MessageCatalogBuilder)
app.add_config_value('gettext_compact', True, 'gettext')
diff --git a/sphinx/builders/html.py b/sphinx/builders/html.py
index 5060d1f02..b8b03c072 100644
--- a/sphinx/builders/html.py
+++ b/sphinx/builders/html.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.builders.html
~~~~~~~~~~~~~~~~~~~~
@@ -9,7 +8,8 @@
:license: BSD, see LICENSE for details.
"""
-import codecs
+import html
+import pickle
import posixpath
import re
import sys
@@ -25,14 +25,11 @@ from docutils.frontend import OptionParser
from docutils.io import DocTreeInput, StringOutput
from docutils.readers.doctree import Reader as DoctreeReader
from docutils.utils import relative_path
-from six import iteritems, text_type, string_types
-from six.moves import cPickle as pickle
from sphinx import package_dir, __display_version__
from sphinx.application import ENV_PICKLE_FILENAME
from sphinx.builders import Builder
-from sphinx.config import string_classes
-from sphinx.deprecation import RemovedInSphinx20Warning, RemovedInSphinx30Warning
+from sphinx.deprecation import RemovedInSphinx30Warning, RemovedInSphinx40Warning
from sphinx.environment.adapters.asset import ImageAdapter
from sphinx.environment.adapters.indexentries import IndexEntries
from sphinx.environment.adapters.toctree import TocTree
@@ -51,15 +48,14 @@ from sphinx.util.matching import patmatch, Matcher, DOTFILES
from sphinx.util.nodes import inline_all_toctrees
from sphinx.util.osutil import SEP, os_path, relative_uri, ensuredir, \
movefile, copyfile
-from sphinx.util.pycompat import htmlescape
from sphinx.writers.html import HTMLWriter, HTMLTranslator
if False:
# For type annotation
- from typing import Any, Dict, IO, Iterable, Iterator, List, Type, Tuple, Union # NOQA
+ from typing import Any, Dict, IO, Iterable, Iterator, List, Set, Type, Tuple, Union # NOQA
from sphinx.application import Sphinx # NOQA
from sphinx.config import Config # NOQA
- from sphinx.domains import Domain, Index # NOQA
+ from sphinx.domains import Domain, Index, IndexEntry # NOQA
from sphinx.util.tags import Tags # NOQA
# Experimental HTML5 Writer
@@ -79,7 +75,7 @@ return_codes_re = re.compile('[\r\n]+')
def get_stable_hash(obj):
- # type: (Any) -> unicode
+ # type: (Any) -> str
"""
Return a stable hash for a Python data structure. We can't just use
the md5 of str(obj) since for example dictionary items are enumerated
@@ -89,69 +85,22 @@ def get_stable_hash(obj):
return get_stable_hash(list(obj.items()))
elif isinstance(obj, (list, tuple)):
obj = sorted(get_stable_hash(o) for o in obj)
- return md5(text_type(obj).encode('utf8')).hexdigest()
+ return md5(str(obj).encode()).hexdigest()
-class CSSContainer(list):
- """The container for stylesheets.
-
- To support the extensions which access the container directly, this wraps
- the entry with Stylesheet class.
- """
- def append(self, obj):
- # type: (Union[unicode, Stylesheet]) -> None
- if isinstance(obj, Stylesheet):
- super(CSSContainer, self).append(obj)
- else:
- super(CSSContainer, self).append(Stylesheet(obj))
-
- def insert(self, index, obj):
- # type: (int, Union[unicode, Stylesheet]) -> None
- warnings.warn('builder.css_files is deprecated. '
- 'Please use app.add_stylesheet() instead.',
- RemovedInSphinx20Warning, stacklevel=2)
- if isinstance(obj, Stylesheet):
- super(CSSContainer, self).insert(index, obj)
- else:
- super(CSSContainer, self).insert(index, Stylesheet(obj))
-
- def extend(self, other): # type: ignore
- # type: (List[Union[unicode, Stylesheet]]) -> None
- warnings.warn('builder.css_files is deprecated. '
- 'Please use app.add_stylesheet() instead.',
- RemovedInSphinx20Warning, stacklevel=2)
- for item in other:
- self.append(item)
-
- def __iadd__(self, other): # type: ignore
- # type: (List[Union[unicode, Stylesheet]]) -> CSSContainer
- warnings.warn('builder.css_files is deprecated. '
- 'Please use app.add_stylesheet() instead.',
- RemovedInSphinx20Warning, stacklevel=2)
- for item in other:
- self.append(item)
- return self
-
- def __add__(self, other):
- # type: (List[Union[unicode, Stylesheet]]) -> CSSContainer
- ret = CSSContainer(self)
- ret += other
- return ret
-
-
-class Stylesheet(text_type):
+class Stylesheet(str):
"""A metadata of stylesheet.
To keep compatibility with old themes, an instance of stylesheet behaves as
its filename (str).
"""
- attributes = None # type: Dict[unicode, unicode]
- filename = None # type: unicode
+ attributes = None # type: Dict[str, str]
+ filename = None # type: str
def __new__(cls, filename, *args, **attributes):
- # type: (unicode, unicode, unicode) -> None
- self = text_type.__new__(cls, filename) # type: ignore
+ # type: (str, str, str) -> None
+ self = str.__new__(cls, filename) # type: ignore
self.filename = filename
self.attributes = attributes
self.attributes.setdefault('rel', 'stylesheet')
@@ -166,14 +115,14 @@ class Stylesheet(text_type):
class JSContainer(list):
"""The container for JavaScript scripts."""
def insert(self, index, obj):
- # type: (int, unicode) -> None
+ # type: (int, str) -> None
warnings.warn('builder.script_files is deprecated. '
'Please use app.add_js_file() instead.',
RemovedInSphinx30Warning, stacklevel=2)
- super(JSContainer, self).insert(index, obj)
+ super().insert(index, obj)
def extend(self, other): # type: ignore
- # type: (List[unicode]) -> None
+ # type: (List[str]) -> None
warnings.warn('builder.script_files is deprecated. '
'Please use app.add_js_file() instead.',
RemovedInSphinx30Warning, stacklevel=2)
@@ -181,7 +130,7 @@ class JSContainer(list):
self.append(item)
def __iadd__(self, other): # type: ignore
- # type: (List[unicode]) -> JSContainer
+ # type: (List[str]) -> JSContainer
warnings.warn('builder.script_files is deprecated. '
'Please use app.add_js_file() instead.',
RemovedInSphinx30Warning, stacklevel=2)
@@ -190,25 +139,25 @@ class JSContainer(list):
return self
def __add__(self, other):
- # type: (List[unicode]) -> JSContainer
+ # type: (List[str]) -> JSContainer
ret = JSContainer(self)
ret += other
return ret
-class JavaScript(text_type):
+class JavaScript(str):
"""A metadata of javascript file.
To keep compatibility with old themes, an instance of javascript behaves as
its filename (str).
"""
- attributes = None # type: Dict[unicode, unicode]
- filename = None # type: unicode
+ attributes = None # type: Dict[str, str]
+ filename = None # type: str
def __new__(cls, filename, **attributes):
- # type: (unicode, **unicode) -> None
- self = text_type.__new__(cls, filename) # type: ignore
+ # type: (str, **str) -> None
+ self = str.__new__(cls, filename) # type: ignore
self.filename = filename
self.attributes = attributes
self.attributes.setdefault('type', 'text/javascript')
@@ -216,7 +165,7 @@ class JavaScript(text_type):
return self
-class BuildInfo(object):
+class BuildInfo:
"""buildinfo file manipulator.
HTMLBuilder and its family are storing their own envdata to ``.buildinfo``.
@@ -240,9 +189,9 @@ class BuildInfo(object):
raise ValueError(__('build info file is broken: %r') % exc)
def __init__(self, config=None, tags=None, config_categories=[]):
- # type: (Config, Tags, List[unicode]) -> None
- self.config_hash = u''
- self.tags_hash = u''
+ # type: (Config, Tags, List[str]) -> None
+ self.config_hash = ''
+ self.tags_hash = ''
if config:
values = dict((c.name, c.value) for c in config.filter(config_categories))
@@ -256,10 +205,6 @@ class BuildInfo(object):
return (self.config_hash == other.config_hash and
self.tags_hash == other.tags_hash)
- def __ne__(self, other): # type: ignore
- # type: (BuildInfo) -> bool
- return not (self == other) # for py27
-
def dump(self, f):
# type: (IO) -> None
f.write('# Sphinx build info version 1\n'
@@ -300,18 +245,18 @@ class StandaloneHTMLBuilder(Builder):
# use html5 translator by default
default_html5_translator = False
- imgpath = None # type: unicode
- domain_indices = [] # type: List[Tuple[unicode, Type[Index], List[Tuple[unicode, List[List[Union[unicode, int]]]]], bool]] # NOQA
+ imgpath = None # type: str
+ domain_indices = [] # type: List[Tuple[str, Type[Index], List[Tuple[str, List[IndexEntry]]], bool]] # NOQA
# cached publisher object for snippets
_publisher = None
def __init__(self, app):
# type: (Sphinx) -> None
- super(StandaloneHTMLBuilder, self).__init__(app)
+ super().__init__(app)
# CSS files
- self.css_files = CSSContainer() # type: List[Dict[unicode, unicode]]
+ self.css_files = [] # type: List[Dict[str, str]]
# JS files
self.script_files = JSContainer() # type: List[JavaScript]
@@ -322,35 +267,38 @@ class StandaloneHTMLBuilder(Builder):
# basename of images directory
self.imagedir = '_images'
# section numbers for headings in the currently visited document
- self.secnumbers = {} # type: Dict[unicode, Tuple[int, ...]]
+ self.secnumbers = {} # type: Dict[str, Tuple[int, ...]]
# currently written docname
- self.current_docname = None # type: unicode
+ self.current_docname = None # type: str
self.init_templates()
self.init_highlighter()
self.init_css_files()
self.init_js_files()
- if self.config.html_file_suffix is not None:
- self.out_suffix = self.config.html_file_suffix
- if self.config.html_link_suffix is not None:
- self.link_suffix = self.config.html_link_suffix
+ html_file_suffix = self.get_builder_config('file_suffix', 'html')
+ if html_file_suffix is not None:
+ self.out_suffix = html_file_suffix
+
+ html_link_suffix = self.get_builder_config('link_suffix', 'html')
+ if html_link_suffix is not None:
+ self.link_suffix = html_link_suffix
else:
self.link_suffix = self.out_suffix
self.use_index = self.get_builder_config('use_index', 'html')
if self.config.html_experimental_html5_writer and not html5_ready:
- self.app.warn(('html_experimental_html5_writer is set, but current version '
- 'is old. Docutils\' version should be 0.13 or newer, but %s.') %
- docutils.__version__)
+ logger.warning(__('html_experimental_html5_writer is set, but current version '
+ 'is old. Docutils\' version should be 0.13 or newer, but %s.'),
+ docutils.__version__)
def create_build_info(self):
# type: () -> BuildInfo
return BuildInfo(self.config, self.tags, ['html'])
def _get_translations_js(self):
- # type: () -> unicode
+ # type: () -> str
candidates = [path.join(dir, self.config.language,
'LC_MESSAGES', 'sphinx.js')
for dir in self.config.locale_dirs] + \
@@ -365,7 +313,7 @@ class StandaloneHTMLBuilder(Builder):
return None
def get_theme_config(self):
- # type: () -> Tuple[unicode, Dict]
+ # type: () -> Tuple[str, Dict]
return self.config.html_theme, self.config.html_theme_options
def init_templates(self):
@@ -397,7 +345,7 @@ class StandaloneHTMLBuilder(Builder):
self.add_css_file(filename, **attrs)
def add_css_file(self, filename, **kwargs):
- # type: (unicode, **unicode) -> None
+ # type: (str, **str) -> None
if '://' not in filename:
filename = posixpath.join('_static', filename)
@@ -420,15 +368,15 @@ class StandaloneHTMLBuilder(Builder):
self.add_js_file('translations.js')
def add_js_file(self, filename, **kwargs):
- # type: (unicode, **unicode) -> None
+ # type: (str, **str) -> None
if filename and '://' not in filename:
filename = posixpath.join('_static', filename)
self.script_files.append(JavaScript(filename, **kwargs))
@property
- def default_translator_class(self):
- # type: () -> nodes.NodeVisitor
+ def default_translator_class(self): # type: ignore
+ # type: () -> Type[nodes.NodeVisitor]
use_html5_writer = self.config.html_experimental_html5_writer
if use_html5_writer is None:
use_html5_writer = self.default_html5_translator
@@ -440,7 +388,7 @@ class StandaloneHTMLBuilder(Builder):
@property
def math_renderer_name(self):
- # type: () -> unicode
+ # type: () -> str
name = self.get_builder_config('math_renderer', 'html')
if name is not None:
# use given name
@@ -460,18 +408,17 @@ class StandaloneHTMLBuilder(Builder):
return None
def get_outdated_docs(self):
- # type: () -> Iterator[unicode]
+ # type: () -> Iterator[str]
try:
with open(path.join(self.outdir, '.buildinfo')) as fp:
buildinfo = BuildInfo.load(fp)
if self.build_info != buildinfo:
- for docname in self.env.found_docs:
- yield docname
+ yield from self.env.found_docs
return
except ValueError as exc:
logger.warning(__('Failed to read build info file: %r'), exc)
- except IOError:
+ except OSError:
# ignore errors on reading
pass
@@ -493,20 +440,20 @@ class StandaloneHTMLBuilder(Builder):
template_mtime)
if srcmtime > targetmtime:
yield docname
- except EnvironmentError:
+ except OSError:
# source doesn't exist anymore
pass
def get_asset_paths(self):
- # type: () -> List[unicode]
+ # type: () -> List[str]
return self.config.html_extra_path + self.config.html_static_path
def render_partial(self, node):
- # type: (nodes.Nodes) -> Dict[unicode, unicode]
+ # type: (nodes.Node) -> Dict[str, str]
"""Utility: Render a lone doctree node."""
if node is None:
return {'fragment': ''}
- doc = new_document(b'<partial node>')
+ doc = new_document('<partial node>')
doc.append(node)
if self._publisher is None:
@@ -528,7 +475,7 @@ class StandaloneHTMLBuilder(Builder):
return pub.writer.parts
def prepare_writing(self, docnames):
- # type: (Iterable[unicode]) -> nodes.Node
+ # type: (Set[str]) -> None
# create the search indexer
self.indexer = None
if self.search:
@@ -545,7 +492,7 @@ class StandaloneHTMLBuilder(Builder):
self.docsettings = OptionParser(
defaults=self.env.settings,
components=(self.docwriter,),
- read_config_files=True).get_default_values()
+ read_config_files=True).get_default_values() # type: Any
self.docsettings.compact_lists = bool(self.config.html_compact_lists)
# determine the additional indices to include
@@ -557,7 +504,7 @@ class StandaloneHTMLBuilder(Builder):
domain = None # type: Domain
domain = self.env.domains[domain_name]
for indexcls in domain.indices:
- indexname = '%s-%s' % (domain.name, indexcls.name) # type: unicode
+ indexname = '%s-%s' % (domain.name, indexcls.name)
if isinstance(indices_config, list):
if indexname not in indices_config:
continue
@@ -581,12 +528,12 @@ class StandaloneHTMLBuilder(Builder):
favicon = self.config.html_favicon and \
path.basename(self.config.html_favicon) or ''
- if not isinstance(self.config.html_use_opensearch, string_types):
+ if not isinstance(self.config.html_use_opensearch, str):
logger.warning(__('html_use_opensearch config value must now be a string'))
self.relations = self.env.collect_relations()
- rellinks = [] # type: List[Tuple[unicode, unicode, unicode, unicode]]
+ rellinks = [] # type: List[Tuple[str, str, str, str]]
if self.use_index:
rellinks.append(('genindex', _('General Index'), 'I', _('index')))
for indexname, indexcls, content, collapse in self.domain_indices:
@@ -602,43 +549,43 @@ class StandaloneHTMLBuilder(Builder):
else:
stylename = 'default.css'
- self.globalcontext = dict(
- embedded = self.embedded,
- project = self.config.project,
- release = return_codes_re.sub('', self.config.release),
- version = self.config.version,
- last_updated = self.last_updated,
- copyright = self.config.copyright,
- master_doc = self.config.master_doc,
- use_opensearch = self.config.html_use_opensearch,
- docstitle = self.config.html_title,
- shorttitle = self.config.html_short_title,
- show_copyright = self.config.html_show_copyright,
- show_sphinx = self.config.html_show_sphinx,
- has_source = self.config.html_copy_source,
- show_source = self.config.html_show_sourcelink,
- sourcelink_suffix = self.config.html_sourcelink_suffix,
- file_suffix = self.out_suffix,
- script_files = self.script_files,
- language = self.config.language,
- css_files = self.css_files,
- sphinx_version = __display_version__,
- style = stylename,
- rellinks = rellinks,
- builder = self.name,
- parents = [],
- logo = logo,
- favicon = favicon,
- html5_doctype = self.config.html_experimental_html5_writer and html5_ready,
- ) # type: Dict[unicode, Any]
+ self.globalcontext = {
+ 'embedded': self.embedded,
+ 'project': self.config.project,
+ 'release': return_codes_re.sub('', self.config.release),
+ 'version': self.config.version,
+ 'last_updated': self.last_updated,
+ 'copyright': self.config.copyright,
+ 'master_doc': self.config.master_doc,
+ 'use_opensearch': self.config.html_use_opensearch,
+ 'docstitle': self.config.html_title,
+ 'shorttitle': self.config.html_short_title,
+ 'show_copyright': self.config.html_show_copyright,
+ 'show_sphinx': self.config.html_show_sphinx,
+ 'has_source': self.config.html_copy_source,
+ 'show_source': self.config.html_show_sourcelink,
+ 'sourcelink_suffix': self.config.html_sourcelink_suffix,
+ 'file_suffix': self.out_suffix,
+ 'script_files': self.script_files,
+ 'language': self.config.language,
+ 'css_files': self.css_files,
+ 'sphinx_version': __display_version__,
+ 'style': stylename,
+ 'rellinks': rellinks,
+ 'builder': self.name,
+ 'parents': [],
+ 'logo': logo,
+ 'favicon': favicon,
+ 'html5_doctype': self.config.html_experimental_html5_writer and html5_ready,
+ }
if self.theme:
self.globalcontext.update(
('theme_' + key, val) for (key, val) in
- iteritems(self.theme.get_options(self.theme_options)))
+ self.theme.get_options(self.theme_options).items())
self.globalcontext.update(self.config.html_context)
def get_doc_context(self, docname, body, metatags):
- # type: (unicode, unicode, Dict) -> Dict[unicode, Any]
+ # type: (str, str, str) -> Dict[str, Any]
"""Collect items for the template context of a page."""
# find out relations
prev = next = None
@@ -681,8 +628,8 @@ class StandaloneHTMLBuilder(Builder):
parents.reverse()
# title rendered as HTML
- title = self.env.longtitles.get(docname)
- title = title and self.render_partial(title)['title'] or ''
+ title_node = self.env.longtitles.get(docname)
+ title = title_node and self.render_partial(title_node)['title'] or ''
# Suffix for the document
source_suffix = path.splitext(self.env.doc2path(docname))[1]
@@ -702,31 +649,31 @@ class StandaloneHTMLBuilder(Builder):
self_toc = TocTree(self.env).get_toc_for(docname, self)
toc = self.render_partial(self_toc)['fragment']
- return dict(
- parents = parents,
- prev = prev,
- next = next,
- title = title,
- meta = meta,
- body = body,
- metatags = metatags,
- rellinks = rellinks,
- sourcename = sourcename,
- toc = toc,
+ return {
+ 'parents': parents,
+ 'prev': prev,
+ 'next': next,
+ 'title': title,
+ 'meta': meta,
+ 'body': body,
+ 'metatags': metatags,
+ 'rellinks': rellinks,
+ 'sourcename': sourcename,
+ 'toc': toc,
# only display a TOC if there's more than one item to show
- display_toc = (self.env.toc_num_entries[docname] > 1),
- page_source_suffix = source_suffix,
- )
+ 'display_toc': (self.env.toc_num_entries[docname] > 1),
+ 'page_source_suffix': source_suffix,
+ }
def write_doc(self, docname, doctree):
- # type: (unicode, nodes.Node) -> None
+ # type: (str, nodes.document) -> None
destination = StringOutput(encoding='utf-8')
doctree.settings = self.docsettings
self.secnumbers = self.env.toc_secnumbers.get(docname, {})
- self.fignumbers = self.env.toc_fignumbers.get(docname, {}) # type: Dict[unicode, Dict[unicode, Tuple[int, ...]]] # NOQA
+ self.fignumbers = self.env.toc_fignumbers.get(docname, {})
self.imgpath = relative_uri(self.get_target_uri(docname), '_images')
- self.dlpath = relative_uri(self.get_target_uri(docname), '_downloads') # type: unicode
+ self.dlpath = relative_uri(self.get_target_uri(docname), '_downloads')
self.current_docname = docname
self.docwriter.write(doctree, destination)
self.docwriter.assemble_parts()
@@ -737,11 +684,11 @@ class StandaloneHTMLBuilder(Builder):
self.handle_page(docname, ctx, event_arg=doctree)
def write_doc_serialized(self, docname, doctree):
- # type: (unicode, nodes.Node) -> None
+ # type: (str, nodes.document) -> None
self.imgpath = relative_uri(self.get_target_uri(docname), self.imagedir)
self.post_process_images(doctree)
- title = self.env.longtitles.get(docname)
- title = title and self.render_partial(title)['title'] or ''
+ title_node = self.env.longtitles.get(docname)
+ title = title_node and self.render_partial(title_node)['title'] or ''
self.index_page(docname, doctree, title)
def finish(self):
@@ -807,11 +754,11 @@ class StandaloneHTMLBuilder(Builder):
indexcounts.append(sum(1 + len(subitems)
for _, (_, subitems, _) in entries))
- genindexcontext = dict(
- genindexentries = genindex,
- genindexcounts = indexcounts,
- split_index = self.config.html_split_index,
- )
+ genindexcontext = {
+ 'genindexentries': genindex,
+ 'genindexcounts': indexcounts,
+ 'split_index': self.config.html_split_index,
+ }
logger.info(' genindex', nonl=1)
if self.config.html_split_index:
@@ -830,11 +777,11 @@ class StandaloneHTMLBuilder(Builder):
def write_domain_indices(self):
# type: () -> None
for indexname, indexcls, content, collapse in self.domain_indices:
- indexcontext = dict(
- indextitle = indexcls.localname,
- content = content,
- collapse_index = collapse,
- )
+ indexcontext = {
+ 'indextitle': indexcls.localname,
+ 'content': content,
+ 'collapse_index': collapse,
+ }
logger.info(' ' + indexname, nonl=1)
self.handle_page(indexname, indexcontext, 'domainindex.html')
@@ -857,7 +804,7 @@ class StandaloneHTMLBuilder(Builder):
def copy_download_files(self):
# type: () -> None
def to_relpath(f):
- # type: (unicode) -> unicode
+ # type: (str) -> str
return relative_path(self.srcdir, f)
# copy downloadable files
if self.env.dlfiles:
@@ -869,7 +816,7 @@ class StandaloneHTMLBuilder(Builder):
dest = path.join(self.outdir, '_downloads', self.env.dlfiles[src][1])
ensuredir(path.dirname(dest))
copyfile(path.join(self.srcdir, src), dest)
- except EnvironmentError as err:
+ except OSError as err:
logger.warning(__('cannot copy downloadable file %r: %s'),
path.join(self.srcdir, src), err)
@@ -881,7 +828,7 @@ class StandaloneHTMLBuilder(Builder):
ensuredir(path.join(self.outdir, '_static'))
# first, create pygments style file
with open(path.join(self.outdir, '_static', 'pygments.css'), 'w') as f:
- f.write(self.highlighter.get_stylesheet()) # type: ignore
+ f.write(self.highlighter.get_stylesheet())
# then, copy translations JavaScript file
if self.config.language is not None:
jsfile = self._get_translations_js()
@@ -935,9 +882,7 @@ class StandaloneHTMLBuilder(Builder):
copyfile(path.join(self.confdir, self.config.html_favicon),
icontarget)
logger.info('done')
- except EnvironmentError as err:
- # TODO: In py3, EnvironmentError (and IOError) was merged into OSError.
- # So it should be replaced by IOError on dropping py2 support
+ except OSError as err:
logger.warning(__('cannot copy static file %r'), err)
def copy_extra_files(self):
@@ -955,7 +900,7 @@ class StandaloneHTMLBuilder(Builder):
copy_asset(entry, self.outdir, excluded)
logger.info(__('done'))
- except EnvironmentError as err:
+ except OSError as err:
logger.warning(__('cannot copy extra file %r'), err)
def write_buildinfo(self):
@@ -963,7 +908,7 @@ class StandaloneHTMLBuilder(Builder):
try:
with open(path.join(self.outdir, '.buildinfo'), 'w') as fp:
self.build_info.dump(fp)
- except IOError as exc:
+ except OSError as exc:
logger.warning(__('Failed to write build info file: %r'), exc)
def cleanup(self):
@@ -999,17 +944,17 @@ class StandaloneHTMLBuilder(Builder):
reference.append(node)
def load_indexer(self, docnames):
- # type: (Iterable[unicode]) -> None
+ # type: (Iterable[str]) -> None
keep = set(self.env.all_docs) - set(docnames)
try:
searchindexfn = path.join(self.outdir, self.searchindex_filename)
if self.indexer_dumps_unicode:
- f = codecs.open(searchindexfn, 'r', encoding='utf-8') # type: ignore
+ with open(searchindexfn, encoding='utf-8') as ft:
+ self.indexer.load(ft, self.indexer_format)
else:
- f = open(searchindexfn, 'rb') # type: ignore
- with f:
- self.indexer.load(f, self.indexer_format)
- except (IOError, OSError, ValueError):
+ with open(searchindexfn, 'rb') as fb:
+ self.indexer.load(fb, self.indexer_format)
+ except (OSError, ValueError):
if keep:
logger.warning(__('search index couldn\'t be loaded, but not all '
'documents will be built: the index will be '
@@ -1018,7 +963,7 @@ class StandaloneHTMLBuilder(Builder):
self.indexer.prune(keep)
def index_page(self, pagename, doctree, title):
- # type: (unicode, nodes.Node, unicode) -> None
+ # type: (str, nodes.document, str) -> None
# only index pages with title
if self.indexer is not None and title:
filename = self.env.doc2path(pagename, base=None)
@@ -1027,22 +972,28 @@ class StandaloneHTMLBuilder(Builder):
except TypeError:
# fallback for old search-adapters
self.indexer.feed(pagename, title, doctree) # type: ignore
+ indexer_name = self.indexer.__class__.__name__
+ warnings.warn(
+ 'The %s.feed() method signature is deprecated. Update to '
+ '%s.feed(docname, filename, title, doctree).' % (
+ indexer_name, indexer_name),
+ RemovedInSphinx40Warning)
def _get_local_toctree(self, docname, collapse=True, **kwds):
- # type: (unicode, bool, Any) -> unicode
+ # type: (str, bool, Any) -> str
if 'includehidden' not in kwds:
kwds['includehidden'] = False
return self.render_partial(TocTree(self.env).get_toctree_for(
docname, self, collapse, **kwds))['fragment']
def get_outfilename(self, pagename):
- # type: (unicode) -> unicode
+ # type: (str) -> str
return path.join(self.outdir, os_path(pagename) + self.out_suffix)
def add_sidebars(self, pagename, ctx):
- # type: (unicode, Dict) -> None
+ # type: (str, Dict) -> None
def has_wildcard(pattern):
- # type: (unicode) -> bool
+ # type: (str) -> bool
return any(char in pattern for char in '*?[')
sidebars = None
matched = None
@@ -1068,7 +1019,7 @@ class StandaloneHTMLBuilder(Builder):
# user sidebar settings
html_sidebars = self.get_builder_config('sidebars', 'html')
- for pattern, patsidebars in iteritems(html_sidebars):
+ for pattern, patsidebars in html_sidebars.items():
if patmatch(pagename, pattern):
if matched:
if has_wildcard(pattern):
@@ -1086,14 +1037,6 @@ class StandaloneHTMLBuilder(Builder):
if sidebars is None:
# keep defaults
pass
- elif isinstance(sidebars, string_types):
- # 0.x compatible mode: insert custom sidebar before searchbox
- customsidebar = sidebars
- sidebars = None
- warnings.warn('Now html_sidebars only allows list of sidebar '
- 'templates as a value. Support for a string value '
- 'will be removed at Sphinx-2.0.',
- RemovedInSphinx20Warning, stacklevel=2)
ctx['sidebars'] = sidebars
ctx['customsidebar'] = customsidebar
@@ -1101,12 +1044,12 @@ class StandaloneHTMLBuilder(Builder):
# --------- these are overwritten by the serialization builder
def get_target_uri(self, docname, typ=None):
- # type: (unicode, unicode) -> unicode
+ # type: (str, str) -> str
return docname + self.link_suffix
def handle_page(self, pagename, addctx, templatename='page.html',
outfilename=None, event_arg=None):
- # type: (unicode, Dict, unicode, unicode, Any) -> None
+ # type: (str, Dict, str, str, Any) -> None
ctx = self.globalcontext.copy()
# current_page_name is backwards compatibility
ctx['pagename'] = ctx['current_page_name'] = pagename
@@ -1123,7 +1066,7 @@ class StandaloneHTMLBuilder(Builder):
ctx['pageurl'] = None
def pathto(otheruri, resource=False, baseuri=default_baseuri):
- # type: (unicode, bool, unicode) -> unicode
+ # type: (str, bool, str) -> str
if resource and '://' in otheruri:
# allow non-local resources given by scheme
return otheruri
@@ -1136,18 +1079,18 @@ class StandaloneHTMLBuilder(Builder):
ctx['pathto'] = pathto
def css_tag(css):
- # type: (Stylesheet) -> unicode
+ # type: (Stylesheet) -> str
attrs = []
for key in sorted(css.attributes):
value = css.attributes[key]
if value is not None:
- attrs.append('%s="%s"' % (key, htmlescape(value, True)))
+ attrs.append('%s="%s"' % (key, html.escape(value, True)))
attrs.append('href="%s"' % pathto(css.filename, resource=True))
return '<link %s />' % ' '.join(attrs)
ctx['css_tag'] = css_tag
def hasdoc(name):
- # type: (unicode) -> bool
+ # type: (str) -> bool
if name in self.env.all_docs:
return True
elif name == 'search' and self.search:
@@ -1158,12 +1101,12 @@ class StandaloneHTMLBuilder(Builder):
ctx['hasdoc'] = hasdoc
def warn(*args, **kwargs):
- # type: (Any, Any) -> unicode
+ # type: (Any, Any) -> str
"""Simple warn() wrapper for themes."""
warnings.warn('The template function warn() was deprecated. '
'Use warning() instead.',
RemovedInSphinx30Warning, stacklevel=2)
- self.warn(*args, **kwargs)
+ logger.warning(*args, **kwargs)
return '' # return empty string
ctx['warn'] = warn
@@ -1193,9 +1136,10 @@ class StandaloneHTMLBuilder(Builder):
# outfilename's path is in general different from self.outdir
ensuredir(path.dirname(outfilename))
try:
- with codecs.open(outfilename, 'w', ctx['encoding'], 'xmlcharrefreplace') as f: # type: ignore # NOQA
+ with open(outfilename, 'w', encoding=ctx['encoding'],
+ errors='xmlcharrefreplace') as f:
f.write(output)
- except (IOError, OSError) as err:
+ except OSError as err:
logger.warning(__("error writing file %s: %s"), outfilename, err)
if self.copysource and ctx.get('sourcename'):
# copy the source file for the "show source" link
@@ -1205,7 +1149,7 @@ class StandaloneHTMLBuilder(Builder):
copyfile(self.env.doc2path(pagename), source_name)
def update_page_context(self, pagename, templatename, ctx, event_arg):
- # type: (unicode, unicode, Dict, Any) -> None
+ # type: (str, str, Dict, Any) -> None
pass
def handle_finish(self):
@@ -1230,11 +1174,11 @@ class StandaloneHTMLBuilder(Builder):
# first write to a temporary file, so that if dumping fails,
# the existing index won't be overwritten
if self.indexer_dumps_unicode:
- f = codecs.open(searchindexfn + '.tmp', 'w', encoding='utf-8') # type: ignore
+ with open(searchindexfn + '.tmp', 'w', encoding='utf-8') as ft:
+ self.indexer.dump(ft, self.indexer_format)
else:
- f = open(searchindexfn + '.tmp', 'wb') # type: ignore
- with f:
- self.indexer.dump(f, self.indexer_format)
+ with open(searchindexfn + '.tmp', 'wb') as fb:
+ self.indexer.dump(fb, self.indexer_format)
movefile(searchindexfn + '.tmp', searchindexfn)
logger.info(__('done'))
@@ -1248,7 +1192,7 @@ class DirectoryHTMLBuilder(StandaloneHTMLBuilder):
name = 'dirhtml'
def get_target_uri(self, docname, typ=None):
- # type: (unicode, unicode) -> unicode
+ # type: (str, str) -> str
if docname == 'index':
return ''
if docname.endswith(SEP + 'index'):
@@ -1256,7 +1200,7 @@ class DirectoryHTMLBuilder(StandaloneHTMLBuilder):
return docname + SEP
def get_outfilename(self, pagename):
- # type: (unicode) -> unicode
+ # type: (str) -> str
if pagename == 'index' or pagename.endswith(SEP + 'index'):
outfilename = path.join(self.outdir, os_path(pagename) +
self.out_suffix)
@@ -1267,8 +1211,8 @@ class DirectoryHTMLBuilder(StandaloneHTMLBuilder):
return outfilename
def prepare_writing(self, docnames):
- # type: (Iterable[unicode]) -> None
- StandaloneHTMLBuilder.prepare_writing(self, docnames)
+ # type: (Set[str]) -> None
+ super().prepare_writing(docnames)
self.globalcontext['no_search_suffix'] = True
@@ -1283,11 +1227,11 @@ class SingleFileHTMLBuilder(StandaloneHTMLBuilder):
copysource = False
def get_outdated_docs(self): # type: ignore
- # type: () -> Union[unicode, List[unicode]]
+ # type: () -> Union[str, List[str]]
return 'all documents'
def get_target_uri(self, docname, typ=None):
- # type: (unicode, unicode) -> unicode
+ # type: (str, str) -> str
if docname in self.env.all_docs:
# all references are on the same page...
return self.config.master_doc + self.out_suffix + \
@@ -1297,7 +1241,7 @@ class SingleFileHTMLBuilder(StandaloneHTMLBuilder):
return docname + self.out_suffix
def get_relative_uri(self, from_, to, typ=None):
- # type: (unicode, unicode, unicode) -> unicode
+ # type: (str, str, str) -> str
# ignore source
return self.get_target_uri(to, typ)
@@ -1317,7 +1261,7 @@ class SingleFileHTMLBuilder(StandaloneHTMLBuilder):
refnode['refuri'] = fname + refuri[hashindex:]
def _get_local_toctree(self, docname, collapse=True, **kwds):
- # type: (unicode, bool, Any) -> unicode
+ # type: (str, bool, Any) -> str
if 'includehidden' not in kwds:
kwds['includehidden'] = False
toctree = TocTree(self.env).get_toctree_for(docname, self, collapse, **kwds)
@@ -1326,7 +1270,7 @@ class SingleFileHTMLBuilder(StandaloneHTMLBuilder):
return self.render_partial(toctree)['fragment']
def assemble_doctree(self):
- # type: () -> nodes.Node
+ # type: () -> nodes.document
master = self.config.master_doc
tree = self.env.get_doctree(master)
tree = inline_all_toctrees(self, set(), master, tree, darkgreen, [master])
@@ -1336,7 +1280,7 @@ class SingleFileHTMLBuilder(StandaloneHTMLBuilder):
return tree
def assemble_toc_secnumbers(self):
- # type: () -> Dict[unicode, Dict[unicode, Tuple[int, ...]]]
+ # type: () -> Dict[str, Dict[str, Tuple[int, ...]]]
# Assemble toc_secnumbers to resolve section numbers on SingleHTML.
# Merge all secnumbers to single secnumber.
#
@@ -1346,16 +1290,16 @@ class SingleFileHTMLBuilder(StandaloneHTMLBuilder):
#
# There are related codes in inline_all_toctres() and
# HTMLTranslter#add_secnumber().
- new_secnumbers = {} # type: Dict[unicode, Tuple[int, ...]]
- for docname, secnums in iteritems(self.env.toc_secnumbers):
- for id, secnum in iteritems(secnums):
+ new_secnumbers = {} # type: Dict[str, Tuple[int, ...]]
+ for docname, secnums in self.env.toc_secnumbers.items():
+ for id, secnum in secnums.items():
alias = "%s/%s" % (docname, id)
new_secnumbers[alias] = secnum
return {self.config.master_doc: new_secnumbers}
def assemble_toc_fignumbers(self):
- # type: () -> Dict[unicode, Dict[unicode, Dict[unicode, Tuple[int, ...]]]] # NOQA
+ # type: () -> Dict[str, Dict[str, Dict[str, Tuple[int, ...]]]]
# Assemble toc_fignumbers to resolve figure numbers on SingleHTML.
# Merge all fignumbers to single fignumber.
#
@@ -1365,51 +1309,50 @@ class SingleFileHTMLBuilder(StandaloneHTMLBuilder):
#
# There are related codes in inline_all_toctres() and
# HTMLTranslter#add_fignumber().
- new_fignumbers = {} # type: Dict[unicode, Dict[unicode, Tuple[int, ...]]]
- # {u'foo': {'figure': {'id2': (2,), 'id1': (1,)}}, u'bar': {'figure': {'id1': (3,)}}}
- for docname, fignumlist in iteritems(self.env.toc_fignumbers):
- for figtype, fignums in iteritems(fignumlist):
+ new_fignumbers = {} # type: Dict[str, Dict[str, Tuple[int, ...]]]
+ # {'foo': {'figure': {'id2': (2,), 'id1': (1,)}}, 'bar': {'figure': {'id1': (3,)}}}
+ for docname, fignumlist in self.env.toc_fignumbers.items():
+ for figtype, fignums in fignumlist.items():
alias = "%s/%s" % (docname, figtype)
new_fignumbers.setdefault(alias, {})
- for id, fignum in iteritems(fignums):
+ for id, fignum in fignums.items():
new_fignumbers[alias][id] = fignum
return {self.config.master_doc: new_fignumbers}
def get_doc_context(self, docname, body, metatags):
- # type: (unicode, unicode, Dict) -> Dict
+ # type: (str, str, str) -> Dict
# no relation links...
- toc = TocTree(self.env).get_toctree_for(self.config.master_doc,
- self, False)
+ toctree = TocTree(self.env).get_toctree_for(self.config.master_doc, self, False)
# if there is no toctree, toc is None
- if toc:
- self.fix_refuris(toc)
- toc = self.render_partial(toc)['fragment']
+ if toctree:
+ self.fix_refuris(toctree)
+ toc = self.render_partial(toctree)['fragment']
display_toc = True
else:
toc = ''
display_toc = False
- return dict(
- parents = [],
- prev = None,
- next = None,
- docstitle = None,
- title = self.config.html_title,
- meta = None,
- body = body,
- metatags = metatags,
- rellinks = [],
- sourcename = '',
- toc = toc,
- display_toc = display_toc,
- )
+ return {
+ 'parents': [],
+ 'prev': None,
+ 'next': None,
+ 'docstitle': None,
+ 'title': self.config.html_title,
+ 'meta': None,
+ 'body': body,
+ 'metatags': metatags,
+ 'rellinks': [],
+ 'sourcename': '',
+ 'toc': toc,
+ 'display_toc': display_toc,
+ }
def write(self, *ignored):
# type: (Any) -> None
docnames = self.env.all_docs
logger.info(bold(__('preparing documents... ')), nonl=True)
- self.prepare_writing(docnames)
+ self.prepare_writing(docnames) # type: ignore
logger.info(__('done'))
logger.info(bold(__('assembling single document... ')), nonl=True)
@@ -1460,7 +1403,7 @@ class SerializingHTMLBuilder(StandaloneHTMLBuilder):
additional_dump_args = () # type: Tuple
#: the filename for the global context file
- globalcontext_filename = None # type: unicode
+ globalcontext_filename = None # type: str
supported_image_types = ['image/svg+xml', 'image/png',
'image/gif', 'image/jpeg']
@@ -1479,7 +1422,7 @@ class SerializingHTMLBuilder(StandaloneHTMLBuilder):
self.use_index = self.get_builder_config('use_index', 'html')
def get_target_uri(self, docname, typ=None):
- # type: (unicode, unicode) -> unicode
+ # type: (str, str) -> str
if docname == 'index':
return ''
if docname.endswith(SEP + 'index'):
@@ -1487,17 +1430,17 @@ class SerializingHTMLBuilder(StandaloneHTMLBuilder):
return docname + SEP
def dump_context(self, context, filename):
- # type: (Dict, unicode) -> None
+ # type: (Dict, str) -> None
if self.implementation_dumps_unicode:
- f = codecs.open(filename, 'w', encoding='utf-8') # type: ignore
+ with open(filename, 'w', encoding='utf-8') as ft:
+ self.implementation.dump(context, ft, *self.additional_dump_args)
else:
- f = open(filename, 'wb') # type: ignore
- with f:
- self.implementation.dump(context, f, *self.additional_dump_args)
+ with open(filename, 'wb') as fb:
+ self.implementation.dump(context, fb, *self.additional_dump_args)
def handle_page(self, pagename, ctx, templatename='page.html',
outfilename=None, event_arg=None):
- # type: (unicode, Dict, unicode, unicode, Any) -> None
+ # type: (str, Dict, str, str, Any) -> None
ctx['current_page_name'] = pagename
self.add_sidebars(pagename, ctx)
@@ -1532,7 +1475,7 @@ class SerializingHTMLBuilder(StandaloneHTMLBuilder):
self.dump_context(self.globalcontext, outfilename)
# super here to dump the search index
- StandaloneHTMLBuilder.handle_finish(self)
+ super().handle_finish()
# copy the environment file from the doctree dir to the output dir
# as needed by the web app
@@ -1580,17 +1523,13 @@ class JSONHTMLBuilder(SerializingHTMLBuilder):
globalcontext_filename = 'globalcontext.json'
searchindex_filename = 'searchindex.json'
- def init(self):
- # type: () -> None
- SerializingHTMLBuilder.init(self)
-
def convert_html_css_files(app, config):
# type: (Sphinx, Config) -> None
"""This converts string styled html_css_files to tuple styled one."""
- html_css_files = [] # type: List[Tuple[unicode, Dict]]
+ html_css_files = [] # type: List[Tuple[str, Dict]]
for entry in config.html_css_files:
- if isinstance(entry, string_types):
+ if isinstance(entry, str):
html_css_files.append((entry, {}))
else:
try:
@@ -1606,9 +1545,9 @@ def convert_html_css_files(app, config):
def convert_html_js_files(app, config):
# type: (Sphinx, Config) -> None
"""This converts string styled html_js_files to tuple styled one."""
- html_js_files = [] # type: List[Tuple[unicode, Dict]]
+ html_js_files = [] # type: List[Tuple[str, Dict]]
for entry in config.html_js_files:
- if isinstance(entry, string_types):
+ if isinstance(entry, str):
html_js_files.append((entry, {}))
else:
try:
@@ -1622,7 +1561,7 @@ def convert_html_js_files(app, config):
def setup_js_tag_helper(app, pagename, templatexname, context, doctree):
- # type: (Sphinx, unicode, unicode, Dict, nodes.Node) -> None
+ # type: (Sphinx, str, str, Dict, nodes.Node) -> None
"""Set up js_tag() template helper.
.. note:: This set up function is added to keep compatibility with webhelper.
@@ -1630,9 +1569,9 @@ def setup_js_tag_helper(app, pagename, templatexname, context, doctree):
pathto = context.get('pathto')
def js_tag(js):
- # type: (JavaScript) -> unicode
+ # type: (JavaScript) -> str
attrs = []
- body = '' # type: unicode
+ body = ''
if isinstance(js, JavaScript):
for key in sorted(js.attributes):
value = js.attributes[key]
@@ -1640,7 +1579,7 @@ def setup_js_tag_helper(app, pagename, templatexname, context, doctree):
if key == 'body':
body = value
else:
- attrs.append('%s="%s"' % (key, htmlescape(value, True)))
+ attrs.append('%s="%s"' % (key, html.escape(value, True)))
if js.filename:
attrs.append('src="%s"' % pathto(js.filename, resource=True))
else:
@@ -1666,7 +1605,7 @@ def validate_math_renderer(app):
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
# builders
app.add_builder(StandaloneHTMLBuilder)
app.add_builder(DirectoryHTMLBuilder)
@@ -1680,35 +1619,35 @@ def setup(app):
app.add_config_value('html_theme_options', {}, 'html')
app.add_config_value('html_title',
lambda self: _('%s %s documentation') % (self.project, self.release),
- 'html', string_classes)
+ 'html', [str])
app.add_config_value('html_short_title', lambda self: self.html_title, 'html')
- app.add_config_value('html_style', None, 'html', string_classes)
- app.add_config_value('html_logo', None, 'html', string_classes)
- app.add_config_value('html_favicon', None, 'html', string_classes)
+ app.add_config_value('html_style', None, 'html', [str])
+ app.add_config_value('html_logo', None, 'html', [str])
+ app.add_config_value('html_favicon', None, 'html', [str])
app.add_config_value('html_css_files', [], 'html')
app.add_config_value('html_js_files', [], 'html')
app.add_config_value('html_static_path', [], 'html')
app.add_config_value('html_extra_path', [], 'html')
- app.add_config_value('html_last_updated_fmt', None, 'html', string_classes)
+ app.add_config_value('html_last_updated_fmt', None, 'html', [str])
app.add_config_value('html_sidebars', {}, 'html')
app.add_config_value('html_additional_pages', {}, 'html')
app.add_config_value('html_domain_indices', True, 'html', [list])
- app.add_config_value('html_add_permalinks', u'\u00B6', 'html')
+ app.add_config_value('html_add_permalinks', '¶', 'html')
app.add_config_value('html_use_index', True, 'html')
app.add_config_value('html_split_index', False, 'html')
app.add_config_value('html_copy_source', True, 'html')
app.add_config_value('html_show_sourcelink', True, 'html')
app.add_config_value('html_sourcelink_suffix', '.txt', 'html')
app.add_config_value('html_use_opensearch', '', 'html')
- app.add_config_value('html_file_suffix', None, 'html', string_classes)
- app.add_config_value('html_link_suffix', None, 'html', string_classes)
+ app.add_config_value('html_file_suffix', None, 'html', [str])
+ app.add_config_value('html_link_suffix', None, 'html', [str])
app.add_config_value('html_show_copyright', True, 'html')
app.add_config_value('html_show_sphinx', True, 'html')
app.add_config_value('html_context', {}, 'html')
app.add_config_value('html_output_encoding', 'utf-8', 'html')
app.add_config_value('html_compact_lists', True, 'html')
app.add_config_value('html_secnumber_suffix', '. ', 'html')
- app.add_config_value('html_search_language', None, 'html', string_classes)
+ app.add_config_value('html_search_language', None, 'html', [str])
app.add_config_value('html_search_options', {}, 'html')
app.add_config_value('html_search_scorer', '', None)
app.add_config_value('html_scaled_image_link', True, 'html')
diff --git a/sphinx/builders/htmlhelp.py b/sphinx/builders/htmlhelp.py
index dfb58ede6..641761b69 100644
--- a/sphinx/builders/htmlhelp.py
+++ b/sphinx/builders/htmlhelp.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.builders.htmlhelp
~~~~~~~~~~~~~~~~~~~~~~~~
@@ -9,27 +8,26 @@
:copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
-from __future__ import print_function
-import codecs
+import html
import os
from os import path
from docutils import nodes
-from six import PY3
from sphinx import addnodes
from sphinx.builders.html import StandaloneHTMLBuilder
from sphinx.environment.adapters.indexentries import IndexEntries
from sphinx.locale import __
from sphinx.util import logging
-from sphinx.util.osutil import make_filename
-from sphinx.util.pycompat import htmlescape
+from sphinx.util.nodes import NodeMatcher
+from sphinx.util.osutil import make_filename_from_project
if False:
# For type annotation
from typing import Any, Dict, IO, List, Match, Tuple # NOQA
from sphinx.application import Sphinx # NOQA
+ from sphinx.config import Config # NOQA
logger = logging.getLogger(__name__)
@@ -134,8 +132,9 @@ that the their then there these they this to
was will with
""".split()
-# The following list includes only languages supported by Sphinx.
-# See http://msdn.microsoft.com/en-us/library/ms930130.aspx for more.
+# The following list includes only languages supported by Sphinx. See
+# https://docs.microsoft.com/en-us/previous-versions/windows/embedded/ms930130(v=msdn.10)
+# for more.
chm_locales = {
# lang: LCID, encoding
'ca': (0x403, 'cp1252'),
@@ -170,8 +169,8 @@ chm_locales = {
}
-def chm_htmlescape(s, quote=None):
- # type: (unicode, bool) -> unicode
+def chm_htmlescape(s, quote=True):
+ # type: (str, bool) -> str
"""
chm_htmlescape() is a wrapper of html.escape().
.hhc/.hhk files don't recognize hex escaping, we need convert
@@ -179,10 +178,7 @@ def chm_htmlescape(s, quote=None):
html.escape() may generates a hex escaping ``&#x27;`` for single
quote ``'``, this wrapper fixes this.
"""
- if quote is None:
- quote = PY3 # True for py3, False for py2 (for compatibility)
-
- s = htmlescape(s, quote)
+ s = html.escape(s, quote)
s = s.replace('&#x27;', '&#39;') # re-escape as decimal
return s
@@ -213,23 +209,23 @@ class HTMLHelpBuilder(StandaloneHTMLBuilder):
def init(self):
# type: () -> None
- StandaloneHTMLBuilder.init(self)
- # the output files for HTML help must be .html only
+ # the output files for HTML help is .html by default
self.out_suffix = '.html'
self.link_suffix = '.html'
+ super().init()
# determine the correct locale setting
locale = chm_locales.get(self.config.language)
if locale is not None:
self.lcid, self.encoding = locale
def open_file(self, outdir, basename, mode='w'):
- # type: (unicode, unicode, unicode) -> IO
+ # type: (str, str, str) -> IO
# open a file with the correct encoding for the selected language
- return codecs.open(path.join(outdir, basename), mode, # type: ignore
- self.encoding, 'xmlcharrefreplace')
+ return open(path.join(outdir, basename), mode, encoding=self.encoding,
+ errors='xmlcharrefreplace')
def update_page_context(self, pagename, templatename, ctx, event_arg):
- # type: (unicode, unicode, Dict, unicode) -> None
+ # type: (str, str, Dict, str) -> None
ctx['encoding'] = self.encoding
def handle_finish(self):
@@ -237,16 +233,16 @@ class HTMLHelpBuilder(StandaloneHTMLBuilder):
self.build_hhx(self.outdir, self.config.htmlhelp_basename)
def write_doc(self, docname, doctree):
- # type: (unicode, nodes.Node) -> None
+ # type: (str, nodes.document) -> None
for node in doctree.traverse(nodes.reference):
# add ``target=_blank`` attributes to external links
if node.get('internal') is None and 'refuri' in node:
node['target'] = '_blank'
- StandaloneHTMLBuilder.write_doc(self, docname, doctree)
+ super().write_doc(docname, doctree)
def build_hhx(self, outdir, outname):
- # type: (unicode, unicode) -> None
+ # type: (str, str) -> None
logger.info(__('dumping stopword list...'))
with self.open_file(outdir, outname + '.stp') as f:
for word in sorted(stopwords):
@@ -309,11 +305,8 @@ class HTMLHelpBuilder(StandaloneHTMLBuilder):
for subnode in node:
write_toc(subnode, ullevel)
- def istoctree(node):
- # type: (nodes.Node) -> bool
- return isinstance(node, addnodes.compact_paragraph) and \
- 'toctree' in node
- for node in tocdoc.traverse(istoctree):
+ matcher = NodeMatcher(addnodes.compact_paragraph, toctree=True)
+ for node in tocdoc.traverse(matcher): # type: addnodes.compact_paragraph
write_toc(node)
f.write(contents_footer)
@@ -323,9 +316,9 @@ class HTMLHelpBuilder(StandaloneHTMLBuilder):
f.write('<UL>\n')
def write_index(title, refs, subitems):
- # type: (unicode, List[Tuple[unicode, unicode]], List[Tuple[unicode, List[Tuple[unicode, unicode]]]]) -> None # NOQA
+ # type: (str, List[Tuple[str, str]], List[Tuple[str, List[Tuple[str, str]]]]) -> None # NOQA
def write_param(name, value):
- # type: (unicode, unicode) -> None
+ # type: (str, str) -> None
item = ' <param name="%s" value="%s">\n' % (name, value)
f.write(item)
title = chm_htmlescape(title, True)
@@ -352,12 +345,20 @@ class HTMLHelpBuilder(StandaloneHTMLBuilder):
f.write('</UL>\n')
+def default_htmlhelp_basename(config):
+ # type: (Config) -> str
+ """Better default htmlhelp_basename setting."""
+ return make_filename_from_project(config.project) + 'doc'
+
+
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
app.setup_extension('sphinx.builders.html')
app.add_builder(HTMLHelpBuilder)
- app.add_config_value('htmlhelp_basename', lambda self: make_filename(self.project), None)
+ app.add_config_value('htmlhelp_basename', default_htmlhelp_basename, None)
+ app.add_config_value('htmlhelp_file_suffix', None, 'html', [str])
+ app.add_config_value('htmlhelp_link_suffix', None, 'html', [str])
return {
'version': 'builtin',
diff --git a/sphinx/builders/latex/__init__.py b/sphinx/builders/latex/__init__.py
index bbbcce0dc..2d423a1dd 100644
--- a/sphinx/builders/latex/__init__.py
+++ b/sphinx/builders/latex/__init__.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.builders.latex
~~~~~~~~~~~~~~~~~~~~~
@@ -13,7 +12,6 @@ import os
from os import path
from docutils.frontend import OptionParser
-from six import text_type
from sphinx import package_dir, addnodes, highlighting
from sphinx.builders import Builder
@@ -22,19 +20,23 @@ from sphinx.builders.latex.transforms import (
FootnoteDocnameUpdater, LaTeXFootnoteTransform, LiteralBlockTransform,
ShowUrlsTransform, DocumentTargetTransform,
)
-from sphinx.config import string_classes, ENUM
+from sphinx.config import ENUM
from sphinx.environment import NoUri
from sphinx.environment.adapters.asset import ImageAdapter
-from sphinx.errors import SphinxError, ConfigError
+from sphinx.errors import SphinxError
from sphinx.locale import _, __
from sphinx.transforms import SphinxTransformer
from sphinx.util import texescape, logging, status_iterator
from sphinx.util.console import bold, darkgreen # type: ignore
from sphinx.util.docutils import SphinxFileOutput, new_document
from sphinx.util.fileutil import copy_asset_file
+from sphinx.util.i18n import format_date
from sphinx.util.nodes import inline_all_toctrees
from sphinx.util.osutil import SEP, make_filename
-from sphinx.writers.latex import DEFAULT_SETTINGS, LaTeXWriter, LaTeXTranslator
+from sphinx.util.template import LaTeXRenderer
+from sphinx.writers.latex import (
+ ADDITIONAL_SETTINGS, DEFAULT_SETTINGS, LaTeXWriter, LaTeXTranslator
+)
if False:
# For type annotation
@@ -99,11 +101,11 @@ XINDY_LANG_OPTIONS = {
'el': '-L greek -C utf8 ',
# FIXME, not compatible with [:2] slice but does Sphinx support Greek ?
'el-polyton': '-L greek -C polytonic-utf8 ',
-} # type: Dict[unicode, unicode]
+}
XINDY_CYRILLIC_SCRIPTS = [
'be', 'bg', 'mk', 'mn', 'ru', 'sr', 'sh', 'uk',
-] # type: List[unicode]
+]
logger = logging.getLogger(__name__)
@@ -126,24 +128,27 @@ class LaTeXBuilder(Builder):
def init(self):
# type: () -> None
- self.docnames = [] # type: Iterable[unicode]
- self.document_data = [] # type: List[Tuple[unicode, unicode, unicode, unicode, unicode, bool]] # NOQA
+ self.context = {} # type: Dict[str, Any]
+ self.docnames = [] # type: Iterable[str]
+ self.document_data = [] # type: List[Tuple[str, str, str, str, str, bool]]
self.usepackages = self.app.registry.latex_packages
texescape.init()
+ self.init_context()
+
def get_outdated_docs(self):
- # type: () -> Union[unicode, List[unicode]]
+ # type: () -> Union[str, List[str]]
return 'all documents' # for now
def get_target_uri(self, docname, typ=None):
- # type: (unicode, unicode) -> unicode
+ # type: (str, str) -> str
if docname not in self.docnames:
raise NoUri
else:
return '%' + docname
def get_relative_uri(self, from_, to, typ=None):
- # type: (unicode, unicode, unicode) -> unicode
+ # type: (str, str, str) -> str
# ignore source path
return self.get_target_uri(to, typ)
@@ -155,7 +160,7 @@ class LaTeXBuilder(Builder):
'will be written'))
return
# assign subdirs to titles
- self.titles = [] # type: List[Tuple[unicode, unicode]]
+ self.titles = [] # type: List[Tuple[str, str]]
for entry in preliminary_document_data:
docname = entry[0]
if docname not in self.env.all_docs:
@@ -167,6 +172,43 @@ class LaTeXBuilder(Builder):
docname = docname[:-5]
self.titles.append((docname, entry[2]))
+ def init_context(self):
+ # type: () -> None
+ self.context = DEFAULT_SETTINGS.copy()
+
+ # Add special settings for latex_engine
+ self.context.update(ADDITIONAL_SETTINGS.get(self.config.latex_engine, {}))
+
+ # for xelatex+French, don't use polyglossia by default
+ if self.config.latex_engine == 'xelatex':
+ if self.config.language:
+ if self.config.language[:2] == 'fr':
+ self.context['polyglossia'] = ''
+ self.context['babel'] = r'\usepackage{babel}'
+
+ # Apply extension settings to context
+ self.context['packages'] = self.usepackages
+
+ # Apply user settings to context
+ self.context.update(self.config.latex_elements)
+ self.context['release'] = self.config.release
+ self.context['use_xindy'] = self.config.latex_use_xindy
+
+ if self.config.today:
+ self.context['date'] = self.config.today
+ else:
+ self.context['date'] = format_date(self.config.today_fmt or _('%b %d, %Y'),
+ language=self.config.language)
+
+ if self.config.latex_logo:
+ self.context['logofilename'] = path.basename(self.config.latex_logo)
+
+ # for compatibilities
+ self.context['indexname'] = _('Index')
+ if self.config.release:
+ # Show the release label only if release value exists
+ self.context['releasename'] = _('Release')
+
def write_stylesheet(self):
# type: () -> None
highlighter = highlighting.PygmentsBridge('latex', self.config.pygments_style)
@@ -175,7 +217,7 @@ class LaTeXBuilder(Builder):
f.write('\\NeedsTeXFormat{LaTeX2e}[1995/12/01]\n')
f.write('\\ProvidesPackage{sphinxhighlight}'
'[2016/05/29 stylesheet for highlighting with pygments]\n\n')
- f.write(highlighter.get_stylesheet()) # type: ignore
+ f.write(highlighter.get_stylesheet())
def write(self, *ignored):
# type: (Any) -> None
@@ -183,7 +225,7 @@ class LaTeXBuilder(Builder):
docsettings = OptionParser(
defaults=self.env.settings,
components=(docwriter,),
- read_config_files=True).get_default_values()
+ read_config_files=True).get_default_values() # type: Any
self.init_document_data()
self.write_stylesheet()
@@ -210,18 +252,21 @@ class LaTeXBuilder(Builder):
doctree['tocdepth'] = tocdepth
self.apply_transforms(doctree)
self.post_process_images(doctree)
+ self.update_doc_context(title, author)
+
logger.info(__("writing... "), nonl=1)
+ docsettings.author = author
+ docsettings.title = title
+ docsettings.contentsname = self.get_contentsname(docname)
+ docsettings.docname = docname
+ docsettings.docclass = docclass
+
doctree.settings = docsettings
- doctree.settings.author = author
- doctree.settings.title = title
- doctree.settings.contentsname = self.get_contentsname(docname)
- doctree.settings.docname = docname
- doctree.settings.docclass = docclass
docwriter.write(doctree, destination)
logger.info("done")
def get_contentsname(self, indexfile):
- # type: (unicode) -> unicode
+ # type: (str) -> str
tree = self.env.get_doctree(indexfile)
contentsname = None
for toctree in tree.traverse(addnodes.toctree):
@@ -231,8 +276,13 @@ class LaTeXBuilder(Builder):
return contentsname
+ def update_doc_context(self, title, author):
+ # type: (str, str) -> None
+ self.context['title'] = title
+ self.context['author'] = author
+
def assemble_doctree(self, indexfile, toctree_only, appendices):
- # type: (unicode, bool, List[unicode]) -> nodes.Node
+ # type: (str, bool, List[str]) -> nodes.document
from docutils import nodes # NOQA
self.docnames = set([indexfile] + appendices)
logger.info(darkgreen(indexfile) + " ", nonl=1)
@@ -243,8 +293,8 @@ class LaTeXBuilder(Builder):
# fresh document
new_tree = new_document('<latex output>')
new_sect = nodes.section()
- new_sect += nodes.title(u'<Set title in conf.py>',
- u'<Set title in conf.py>')
+ new_sect += nodes.title('<Set title in conf.py>',
+ '<Set title in conf.py>')
new_tree += new_sect
for node in tree.traverse(addnodes.toctree):
new_sect += node
@@ -264,7 +314,7 @@ class LaTeXBuilder(Builder):
for pendingnode in largetree.traverse(addnodes.pending_xref):
docname = pendingnode['refdocname']
sectname = pendingnode['refsectname']
- newnodes = [nodes.emphasis(sectname, sectname)]
+ newnodes = [nodes.emphasis(sectname, sectname)] # type: List[nodes.Node]
for subdir, title in self.titles:
if docname.startswith(subdir):
newnodes.append(nodes.Text(_(' (in '), _(' (in ')))
@@ -290,6 +340,7 @@ class LaTeXBuilder(Builder):
def finish(self):
# type: () -> None
self.copy_image_files()
+ self.write_message_catalog()
# copy TeX support files from texinputs
# configure usage of xindy (impacts Makefile and latexmkrc)
@@ -353,26 +404,14 @@ class LaTeXBuilder(Builder):
logger.warning(__('cannot copy image file %r: %s'),
path.join(self.srcdir, src), err)
+ def write_message_catalog(self):
+ # type: () -> None
+ filename = path.join(package_dir, 'templates', 'latex', 'sphinxmessages.sty_t')
+ copy_asset_file(filename, self.outdir, context={}, renderer=LaTeXRenderer())
+
def validate_config_values(app, config):
# type: (Sphinx, Config) -> None
- for document in config.latex_documents:
- try:
- text_type(document[2])
- except UnicodeDecodeError:
- raise ConfigError(
- __('Invalid latex_documents.title found (might contain non-ASCII chars. '
- 'Please use u"..." notation instead): %r') % (document,)
- )
-
- try:
- text_type(document[3])
- except UnicodeDecodeError:
- raise ConfigError(
- __('Invalid latex_documents.author found (might contain non-ASCII chars. '
- 'Please use u"..." notation instead): %r') % (document,)
- )
-
for key in list(config.latex_elements):
if key not in DEFAULT_SETTINGS:
msg = __("Unknown configure key: latex_elements[%r]. ignored.")
@@ -381,7 +420,7 @@ def validate_config_values(app, config):
def default_latex_engine(config):
- # type: (Config) -> unicode
+ # type: (Config) -> str
""" Better default latex_engine settings for specific languages. """
if config.language == 'ja':
return 'platex'
@@ -390,7 +429,7 @@ def default_latex_engine(config):
def default_latex_docclass(config):
- # type: (Config) -> Dict[unicode, unicode]
+ # type: (Config) -> Dict[str, str]
""" Better default latex_docclass settings for specific languages. """
if config.language == 'ja':
return {'manual': 'jsbook',
@@ -406,7 +445,7 @@ def default_latex_use_xindy(config):
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
app.add_builder(LaTeXBuilder)
app.add_post_transform(CitationReferenceTransform)
app.add_post_transform(MathReferenceTransform)
@@ -419,7 +458,7 @@ def setup(app):
lambda self: [(self.master_doc, make_filename(self.project) + '.tex',
self.project, '', 'manual')],
None)
- app.add_config_value('latex_logo', None, None, string_classes)
+ app.add_config_value('latex_logo', None, None, [str])
app.add_config_value('latex_appendices', [], None)
app.add_config_value('latex_use_latex_multicolumn', False, None)
app.add_config_value('latex_use_xindy', default_latex_use_xindy, None)
diff --git a/sphinx/builders/latex/nodes.py b/sphinx/builders/latex/nodes.py
index 2dfba1b57..f0dd0767a 100644
--- a/sphinx/builders/latex/nodes.py
+++ b/sphinx/builders/latex/nodes.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.builders.latex.nodes
~~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/sphinx/builders/latex/transforms.py b/sphinx/builders/latex/transforms.py
index 3b2de2c1f..c27724c41 100644
--- a/sphinx/builders/latex/transforms.py
+++ b/sphinx/builders/latex/transforms.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.builders.latex.transforms
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -9,6 +8,8 @@
:license: BSD, see LICENSE for details.
"""
+from typing import cast
+
from docutils import nodes
from sphinx import addnodes
@@ -16,10 +17,11 @@ from sphinx.builders.latex.nodes import (
captioned_literal_block, footnotemark, footnotetext, math_reference, thebibliography
)
from sphinx.transforms import SphinxTransform
+from sphinx.util.nodes import NodeMatcher
if False:
# For type annotation
- from typing import Dict, List, Set, Tuple, Union # NOQA
+ from typing import Any, Dict, List, Set, Tuple, Union # NOQA
URI_SCHEMES = ('mailto:', 'http:', 'https:', 'ftp:')
@@ -29,8 +31,10 @@ class FootnoteDocnameUpdater(SphinxTransform):
default_priority = 700
TARGET_NODES = (nodes.footnote, nodes.footnote_reference)
- def apply(self):
- for node in self.document.traverse(lambda n: isinstance(n, self.TARGET_NODES)):
+ def apply(self, **kwargs):
+ # type: (Any) -> None
+ matcher = NodeMatcher(*self.TARGET_NODES)
+ for node in self.document.traverse(matcher): # type: nodes.Element
node['docname'] = self.env.docname
@@ -46,23 +50,24 @@ class ShowUrlsTransform(SphinxTransform):
# references are expanded to footnotes (or not)
expanded = False
- def apply(self):
- # type: () -> None
+ def apply(self, **kwargs):
+ # type: (Any) -> None
try:
# replace id_prefix temporarily
- id_prefix = self.document.settings.id_prefix
- self.document.settings.id_prefix = 'show_urls'
+ settings = self.document.settings # type: Any
+ id_prefix = settings.id_prefix
+ settings.id_prefix = 'show_urls'
self.expand_show_urls()
if self.expanded:
self.renumber_footnotes()
finally:
# restore id_prefix
- self.document.settings.id_prefix = id_prefix
+ settings.id_prefix = id_prefix
def expand_show_urls(self):
# type: () -> None
- show_urls = self.document.settings.env.config.latex_show_urls
+ show_urls = self.config.latex_show_urls
if show_urls is False or show_urls == 'no':
return
@@ -85,7 +90,7 @@ class ShowUrlsTransform(SphinxTransform):
node.parent.insert(index + 1, textnode)
def get_docname_for_node(self, node):
- # type: (nodes.Node) -> unicode
+ # type: (nodes.Node) -> str
while node:
if isinstance(node, nodes.document):
return self.env.path2doc(node['source'])
@@ -97,17 +102,17 @@ class ShowUrlsTransform(SphinxTransform):
return None # never reached here. only for type hinting
def create_footnote(self, uri, docname):
- # type: (unicode, unicode) -> Tuple[nodes.footnote, nodes.footnote_ref]
- label = nodes.label('', '#')
- para = nodes.paragraph()
- para.append(nodes.reference('', nodes.Text(uri), refuri=uri, nolinkurl=True))
- footnote = nodes.footnote(uri, label, para, auto=1, docname=docname)
+ # type: (str, str) -> Tuple[nodes.footnote, nodes.footnote_reference]
+ reference = nodes.reference('', nodes.Text(uri), refuri=uri, nolinkurl=True)
+ footnote = nodes.footnote(uri, auto=1, docname=docname)
footnote['names'].append('#')
+ footnote += nodes.label('', '#')
+ footnote += nodes.paragraph('', '', reference)
self.document.note_autofootnote(footnote)
- label = nodes.Text('#')
- footnote_ref = nodes.footnote_reference('[#]_', label, auto=1,
+ footnote_ref = nodes.footnote_reference('[#]_', auto=1,
refid=footnote['ids'][0], docname=docname)
+ footnote_ref += nodes.Text('#')
self.document.note_autofootnote_ref(footnote_ref)
footnote.add_backref(footnote_ref['ids'][0])
@@ -127,10 +132,10 @@ class ShowUrlsTransform(SphinxTransform):
break
# assign new footnote number
- old_label = footnote[0].astext()
- footnote[0].replace_self(nodes.label('', str(num)))
+ old_label = cast(nodes.label, footnote[0])
+ old_label.replace_self(nodes.label('', str(num)))
if old_label in footnote['names']:
- footnote['names'].remove(old_label)
+ footnote['names'].remove(old_label.astext())
footnote['names'].append(str(num))
# update footnote_references by new footnote number
@@ -147,9 +152,9 @@ class FootnoteCollector(nodes.NodeVisitor):
def __init__(self, document):
# type: (nodes.document) -> None
self.auto_footnotes = [] # type: List[nodes.footnote]
- self.used_footnote_numbers = set() # type: Set[unicode]
+ self.used_footnote_numbers = set() # type: Set[str]
self.footnote_refs = [] # type: List[nodes.footnote_reference]
- nodes.NodeVisitor.__init__(self, document)
+ super().__init__(document)
def unknown_visit(self, node):
# type: (nodes.Node) -> None
@@ -341,7 +346,8 @@ class LaTeXFootnoteTransform(SphinxTransform):
default_priority = 600
- def apply(self):
+ def apply(self, **kwargs):
+ # type: (Any) -> None
footnotes = list(self.document.traverse(nodes.footnote))
for node in footnotes:
node.parent.remove(node)
@@ -353,12 +359,12 @@ class LaTeXFootnoteTransform(SphinxTransform):
class LaTeXFootnoteVisitor(nodes.NodeVisitor):
def __init__(self, document, footnotes):
# type: (nodes.document, List[nodes.footnote]) -> None
- self.appeared = set() # type: Set[Tuple[unicode, nodes.footnote]]
+ self.appeared = set() # type: Set[Tuple[str, str]]
self.footnotes = footnotes # type: List[nodes.footnote]
- self.pendings = [] # type: List[nodes.Node]
- self.table_footnotes = [] # type: List[nodes.Node]
- self.restricted = None # type: nodes.Node
- nodes.NodeVisitor.__init__(self, document)
+ self.pendings = [] # type: List[nodes.footnote]
+ self.table_footnotes = [] # type: List[nodes.footnote]
+ self.restricted = None # type: nodes.Element
+ super().__init__(document)
def unknown_visit(self, node):
# type: (nodes.Node) -> None
@@ -369,12 +375,12 @@ class LaTeXFootnoteVisitor(nodes.NodeVisitor):
pass
def restrict(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
if self.restricted is None:
self.restricted = node
def unrestrict(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
if self.restricted == node:
self.restricted = None
pos = node.parent.index(node)
@@ -384,36 +390,36 @@ class LaTeXFootnoteVisitor(nodes.NodeVisitor):
self.pendings = []
def visit_figure(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.figure) -> None
self.restrict(node)
def depart_figure(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.figure) -> None
self.unrestrict(node)
def visit_term(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.term) -> None
self.restrict(node)
def depart_term(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.term) -> None
self.unrestrict(node)
def visit_caption(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.caption) -> None
self.restrict(node)
def depart_caption(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.caption) -> None
self.unrestrict(node)
def visit_title(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.title) -> None
if isinstance(node.parent, (nodes.section, nodes.table)):
self.restrict(node)
def depart_title(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.title) -> None
if isinstance(node.parent, nodes.section):
self.unrestrict(node)
elif isinstance(node.parent, nodes.table):
@@ -422,17 +428,17 @@ class LaTeXFootnoteVisitor(nodes.NodeVisitor):
self.unrestrict(node)
def visit_thead(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.thead) -> None
self.restrict(node)
def depart_thead(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.thead) -> None
self.table_footnotes += self.pendings
self.pendings = []
self.unrestrict(node)
def depart_table(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.table) -> None
tbody = list(node.traverse(nodes.tbody))[0]
for footnote in reversed(self.table_footnotes):
fntext = footnotetext('', *footnote.children)
@@ -441,15 +447,15 @@ class LaTeXFootnoteVisitor(nodes.NodeVisitor):
self.table_footnotes = []
def visit_footnote(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.footnote) -> None
self.restrict(node)
def depart_footnote(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.footnote) -> None
self.unrestrict(node)
def visit_footnote_reference(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.footnote_reference) -> None
number = node.astext().strip()
docname = node['docname']
if self.restricted:
@@ -471,7 +477,7 @@ class LaTeXFootnoteVisitor(nodes.NodeVisitor):
raise nodes.SkipNode
def get_footnote_by_reference(self, node):
- # type: (nodes.Node) -> nodes.Node
+ # type: (nodes.footnote_reference) -> nodes.footnote
docname = node['docname']
for footnote in self.footnotes:
if docname == footnote['docname'] and footnote['ids'][0] == node['refid']:
@@ -512,8 +518,8 @@ class BibliographyTransform(SphinxTransform):
"""
default_priority = 750
- def apply(self):
- # type: () -> None
+ def apply(self, **kwargs):
+ # type: (Any) -> None
citations = thebibliography()
for node in self.document.traverse(nodes.citation):
node.parent.remove(node)
@@ -531,19 +537,19 @@ class CitationReferenceTransform(SphinxTransform):
"""
default_priority = 5 # before ReferencesResolver
- def apply(self):
- # type: () -> None
+ def apply(self, **kwargs):
+ # type: (Any) -> None
if self.app.builder.name != 'latex':
return
+ matcher = NodeMatcher(addnodes.pending_xref, refdomain='std', reftype='citation')
citations = self.env.get_domain('std').data['citations']
- for node in self.document.traverse(addnodes.pending_xref):
- if node['refdomain'] == 'std' and node['reftype'] == 'citation':
- docname, labelid, _ = citations.get(node['reftarget'], ('', '', 0))
- if docname:
- citation_ref = nodes.citation_reference('', *node.children,
- docname=docname, refname=labelid)
- node.replace_self(citation_ref)
+ for node in self.document.traverse(matcher): # type: addnodes.pending_xref
+ docname, labelid, _ = citations.get(node['reftarget'], ('', '', 0))
+ if docname:
+ citation_ref = nodes.citation_reference('', '', *node.children,
+ docname=docname, refname=labelid)
+ node.replace_self(citation_ref)
class MathReferenceTransform(SphinxTransform):
@@ -554,8 +560,8 @@ class MathReferenceTransform(SphinxTransform):
"""
default_priority = 5 # before ReferencesResolver
- def apply(self):
- # type: () -> None
+ def apply(self, **kwargs):
+ # type: (Any) -> None
if self.app.builder.name != 'latex':
return
@@ -572,23 +578,23 @@ class LiteralBlockTransform(SphinxTransform):
"""Replace container nodes for literal_block by captioned_literal_block."""
default_priority = 400
- def apply(self):
- # type: () -> None
+ def apply(self, **kwargs):
+ # type: (Any) -> None
if self.app.builder.name != 'latex':
return
- for node in self.document.traverse(nodes.container):
- if node.get('literal_block') is True:
- newnode = captioned_literal_block('', *node.children, **node.attributes)
- node.replace_self(newnode)
+ matcher = NodeMatcher(nodes.container, literal_block=True)
+ for node in self.document.traverse(matcher): # type: nodes.container
+ newnode = captioned_literal_block('', *node.children, **node.attributes)
+ node.replace_self(newnode)
class DocumentTargetTransform(SphinxTransform):
"""Add :doc label to the first section of each document."""
default_priority = 400
- def apply(self):
- # type: () -> None
+ def apply(self, **kwargs):
+ # type: (Any) -> None
if self.app.builder.name != 'latex':
return
diff --git a/sphinx/builders/linkcheck.py b/sphinx/builders/linkcheck.py
index c3fd1e88a..a94bd9442 100644
--- a/sphinx/builders/linkcheck.py
+++ b/sphinx/builders/linkcheck.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.builders.linkcheck
~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -9,26 +8,16 @@
:license: BSD, see LICENSE for details.
"""
-import codecs
+import queue
import re
import socket
import threading
+from html.parser import HTMLParser
from os import path
+from urllib.parse import unquote
from docutils import nodes
from requests.exceptions import HTTPError
-from six.moves import queue, html_parser
-from six.moves.urllib.parse import unquote
-
-# 2015-06-25 barry@python.org. This exception was deprecated in Python 3.3 and
-# removed in Python 3.5, however for backward compatibility reasons, we're not
-# going to just remove it. If it doesn't exist, define an exception that will
-# never be caught but leaves the code in check_anchor() intact.
-try:
- from six.moves.html_parser import HTMLParseError # type: ignore
-except ImportError:
- class HTMLParseError(Exception): # type: ignore
- pass
from sphinx.builders import Builder
from sphinx.locale import __
@@ -36,6 +25,7 @@ from sphinx.util import encode_uri, requests, logging
from sphinx.util.console import ( # type: ignore
purple, red, darkgreen, darkgray, darkred, turquoise
)
+from sphinx.util.nodes import traverse_parent
from sphinx.util.requests import is_ssl_error
if False:
@@ -48,12 +38,12 @@ if False:
logger = logging.getLogger(__name__)
-class AnchorCheckParser(html_parser.HTMLParser):
+class AnchorCheckParser(HTMLParser):
"""Specialized HTML parser that looks for a specific anchor."""
def __init__(self, search_anchor):
- # type: (unicode) -> None
- html_parser.HTMLParser.__init__(self)
+ # type: (str) -> None
+ super().__init__()
self.search_anchor = search_anchor
self.found = False
@@ -67,23 +57,18 @@ class AnchorCheckParser(html_parser.HTMLParser):
def check_anchor(response, anchor):
- # type: (Response, unicode) -> bool
+ # type: (Response, str) -> bool
"""Reads HTML data from a response object `response` searching for `anchor`.
Returns True if anchor was found, False otherwise.
"""
parser = AnchorCheckParser(anchor)
- try:
- # Read file in chunks. If we find a matching anchor, we break
- # the loop early in hopes not to have to download the whole thing.
- for chunk in response.iter_content(chunk_size=4096, decode_unicode=True):
- parser.feed(chunk)
- if parser.found:
- break
- parser.close()
- except HTMLParseError:
- # HTMLParser is usually pretty good with sloppy HTML, but it tends to
- # choke on EOF. But we're done then anyway.
- pass
+ # Read file in chunks. If we find a matching anchor, we break
+ # the loop early in hopes not to have to download the whole thing.
+ for chunk in response.iter_content(chunk_size=4096, decode_unicode=True):
+ parser.feed(chunk)
+ if parser.found:
+ break
+ parser.close()
return parser.found
@@ -100,9 +85,9 @@ class CheckExternalLinksBuilder(Builder):
self.to_ignore = [re.compile(x) for x in self.app.config.linkcheck_ignore]
self.anchors_ignore = [re.compile(x)
for x in self.app.config.linkcheck_anchors_ignore]
- self.good = set() # type: Set[unicode]
- self.broken = {} # type: Dict[unicode, unicode]
- self.redirected = {} # type: Dict[unicode, Tuple[unicode, int]]
+ self.good = set() # type: Set[str]
+ self.broken = {} # type: Dict[str, str]
+ self.redirected = {} # type: Dict[str, Tuple[str, int]]
# set a timeout for non-responding servers
socket.setdefaulttimeout(5.0)
# create output file
@@ -130,7 +115,7 @@ class CheckExternalLinksBuilder(Builder):
kwargs['timeout'] = self.app.config.linkcheck_timeout
def check_uri():
- # type: () -> Tuple[unicode, unicode, int]
+ # type: () -> Tuple[str, str, int]
# split off anchor
if '#' in uri:
req_url, anchor = uri.split('#', 1)
@@ -194,7 +179,7 @@ class CheckExternalLinksBuilder(Builder):
return 'redirected', new_url, 0
def check():
- # type: () -> Tuple[unicode, unicode, int]
+ # type: () -> Tuple[str, str, int]
# check for various conditions without bothering the network
if len(uri) == 0 or uri.startswith(('#', 'mailto:', 'ftp:')):
return 'unchecked', '', 0
@@ -233,7 +218,7 @@ class CheckExternalLinksBuilder(Builder):
self.rqueue.put((uri, docname, lineno, status, info, code))
def process_result(self, result):
- # type: (Tuple[unicode, unicode, int, unicode, unicode, int]) -> None
+ # type: (Tuple[str, str, int, str, str, int]) -> None
uri, docname, lineno, status, info, code = result
if status == 'unchecked':
return
@@ -271,19 +256,19 @@ class CheckExternalLinksBuilder(Builder):
logger.info(color('redirect ') + uri + color(' - ' + text + ' to ' + info))
def get_target_uri(self, docname, typ=None):
- # type: (unicode, unicode) -> unicode
+ # type: (str, str) -> str
return ''
def get_outdated_docs(self):
- # type: () -> Set[unicode]
+ # type: () -> Set[str]
return self.env.found_docs
def prepare_writing(self, docnames):
- # type: (nodes.Node) -> None
+ # type: (Set[str]) -> None
return
def write_doc(self, docname, doctree):
- # type: (unicode, nodes.Node) -> None
+ # type: (str, nodes.Node) -> None
logger.info('')
n = 0
for node in doctree.traverse(nodes.reference):
@@ -291,11 +276,10 @@ class CheckExternalLinksBuilder(Builder):
continue
uri = node['refuri']
lineno = None
- while lineno is None:
- node = node.parent
- if node is None:
+ for parent in traverse_parent(node):
+ if parent.line:
+ lineno = parent.line
break
- lineno = node.line
self.wqueue.put((uri, docname, lineno), False)
n += 1
done = 0
@@ -307,8 +291,8 @@ class CheckExternalLinksBuilder(Builder):
self.app.statuscode = 1
def write_entry(self, what, docname, line, uri):
- # type: (unicode, unicode, int, unicode) -> None
- with codecs.open(path.join(self.outdir, 'output.txt'), 'a', 'utf-8') as output: # type: ignore # NOQA
+ # type: (str, str, int, str) -> None
+ with open(path.join(self.outdir, 'output.txt'), 'a', encoding='utf-8') as output:
output.write("%s:%s: [%s] %s\n" % (self.env.doc2path(docname, None),
line, what, uri))
@@ -319,7 +303,7 @@ class CheckExternalLinksBuilder(Builder):
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
app.add_builder(CheckExternalLinksBuilder)
app.add_config_value('linkcheck_ignore', [], None)
diff --git a/sphinx/builders/manpage.py b/sphinx/builders/manpage.py
index 25287f2be..a277d4604 100644
--- a/sphinx/builders/manpage.py
+++ b/sphinx/builders/manpage.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.builders.manpage
~~~~~~~~~~~~~~~~~~~~~~~
@@ -13,7 +12,6 @@ from os import path
from docutils.frontend import OptionParser
from docutils.io import FileOutput
-from six import string_types
from sphinx import addnodes
from sphinx.builders import Builder
@@ -22,13 +20,14 @@ from sphinx.locale import __
from sphinx.util import logging
from sphinx.util.console import bold, darkgreen # type: ignore
from sphinx.util.nodes import inline_all_toctrees
-from sphinx.util.osutil import make_filename
+from sphinx.util.osutil import make_filename_from_project
from sphinx.writers.manpage import ManualPageWriter, ManualPageTranslator
if False:
# For type annotation
- from typing import Any, Dict, List, Set, Union # NOQA
+ from typing import Any, Dict, List, Set, Tuple, Union # NOQA
from sphinx.application import Sphinx # NOQA
+ from sphinx.config import Config # NOQA
logger = logging.getLogger(__name__)
@@ -43,7 +42,7 @@ class ManualPageBuilder(Builder):
epilog = __('The manual pages are in %(outdir)s.')
default_translator_class = ManualPageTranslator
- supported_image_types = [] # type: List[unicode]
+ supported_image_types = [] # type: List[str]
def init(self):
# type: () -> None
@@ -52,11 +51,11 @@ class ManualPageBuilder(Builder):
'will be written'))
def get_outdated_docs(self):
- # type: () -> Union[unicode, List[unicode]]
+ # type: () -> Union[str, List[str]]
return 'all manpages' # for now
def get_target_uri(self, docname, typ=None):
- # type: (unicode, unicode) -> unicode
+ # type: (str, str) -> str
if typ == 'token':
return ''
raise NoUri
@@ -67,7 +66,7 @@ class ManualPageBuilder(Builder):
docsettings = OptionParser(
defaults=self.env.settings,
components=(docwriter,),
- read_config_files=True).get_default_values()
+ read_config_files=True).get_default_values() # type: Any
logger.info(bold(__('writing... ')), nonl=True)
@@ -77,12 +76,17 @@ class ManualPageBuilder(Builder):
logger.warning(__('"man_pages" config value references unknown '
'document %s'), docname)
continue
- if isinstance(authors, string_types):
+ if isinstance(authors, str):
if authors:
authors = [authors]
else:
authors = []
+ docsettings.title = name
+ docsettings.subtitle = description
+ docsettings.authors = authors
+ docsettings.section = section
+
targetname = '%s.%s' % (name, section)
logger.info(darkgreen(targetname) + ' { ', nonl=True)
destination = FileOutput(
@@ -90,21 +94,16 @@ class ManualPageBuilder(Builder):
encoding='utf-8')
tree = self.env.get_doctree(docname)
- docnames = set() # type: Set[unicode]
+ docnames = set() # type: Set[str]
largetree = inline_all_toctrees(self, docnames, docname, tree,
darkgreen, [docname])
+ largetree.settings = docsettings
logger.info('} ', nonl=True)
self.env.resolve_references(largetree, docname, self)
# remove pending_xref nodes
for pendingnode in largetree.traverse(addnodes.pending_xref):
pendingnode.replace_self(pendingnode.children)
- largetree.settings = docsettings
- largetree.settings.title = name
- largetree.settings.subtitle = description
- largetree.settings.authors = authors
- largetree.settings.section = section
-
docwriter.write(largetree, destination)
logger.info('')
@@ -113,14 +112,19 @@ class ManualPageBuilder(Builder):
pass
+def default_man_pages(config):
+ # type: (Config) -> List[Tuple[str, str, str, List[str], int]]
+ """ Better default man_pages settings. """
+ filename = make_filename_from_project(config.project)
+ return [(config.master_doc, filename, '%s %s' % (config.project, config.release),
+ [config.author], 1)]
+
+
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
app.add_builder(ManualPageBuilder)
- app.add_config_value('man_pages',
- lambda self: [(self.master_doc, make_filename(self.project).lower(),
- '%s %s' % (self.project, self.release), [], 1)],
- None)
+ app.add_config_value('man_pages', default_man_pages, None)
app.add_config_value('man_show_urls', False, None)
return {
diff --git a/sphinx/builders/qthelp.py b/sphinx/builders/qthelp.py
index 341f1812e..fbf4e2665 100644
--- a/sphinx/builders/qthelp.py
+++ b/sphinx/builders/qthelp.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.builders.qthelp
~~~~~~~~~~~~~~~~~~~~~~
@@ -9,24 +8,23 @@
:license: BSD, see LICENSE for details.
"""
-import codecs
+import html
import os
import posixpath
import re
from os import path
+from typing import Iterable, cast
from docutils import nodes
-from six import text_type
from sphinx import addnodes
from sphinx import package_dir
from sphinx.builders.html import StandaloneHTMLBuilder
-from sphinx.config import string_classes
from sphinx.environment.adapters.indexentries import IndexEntries
from sphinx.locale import __
-from sphinx.util import force_decode, logging
+from sphinx.util import logging
+from sphinx.util.nodes import NodeMatcher
from sphinx.util.osutil import make_filename
-from sphinx.util.pycompat import htmlescape
from sphinx.util.template import SphinxRenderer
if False:
@@ -46,7 +44,7 @@ section_template = '<section title="%(title)s" ref="%(ref)s"/>'
def render_file(filename, **kwargs):
- # type: (unicode, Any) -> unicode
+ # type: (str, Any) -> str
pathname = os.path.join(package_dir, 'templates', 'qthelp', filename)
return SphinxRenderer.render_from_file(pathname, kwargs)
@@ -80,14 +78,14 @@ class QtHelpBuilder(StandaloneHTMLBuilder):
def init(self):
# type: () -> None
- StandaloneHTMLBuilder.init(self)
+ super().init()
# the output files for HTML help must be .html only
self.out_suffix = '.html'
self.link_suffix = '.html'
# self.config.html_style = 'traditional.css'
def get_theme_config(self):
- # type: () -> Tuple[unicode, Dict]
+ # type: () -> Tuple[str, Dict]
return self.config.qthelp_theme, self.config.qthelp_theme_options
def handle_finish(self):
@@ -95,34 +93,23 @@ class QtHelpBuilder(StandaloneHTMLBuilder):
self.build_qhp(self.outdir, self.config.qthelp_basename)
def build_qhp(self, outdir, outname):
- # type: (unicode, unicode) -> None
+ # type: (str, str) -> None
logger.info(__('writing project file...'))
# sections
tocdoc = self.env.get_and_resolve_doctree(self.config.master_doc, self,
prune_toctrees=False)
- def istoctree(node):
- # type: (nodes.Node) -> bool
- return isinstance(node, addnodes.compact_paragraph) and \
- 'toctree' in node
sections = []
- for node in tocdoc.traverse(istoctree):
+ matcher = NodeMatcher(addnodes.compact_paragraph, toctree=True)
+ for node in tocdoc.traverse(matcher): # type: addnodes.compact_paragraph
sections.extend(self.write_toc(node))
for indexname, indexcls, content, collapse in self.domain_indices:
item = section_template % {'title': indexcls.localname,
'ref': '%s.html' % indexname}
sections.append(' ' * 4 * 4 + item)
- # sections may be unicode strings or byte strings, we have to make sure
- # they are all unicode strings before joining them
- new_sections = []
- for section in sections:
- if not isinstance(section, text_type):
- new_sections.append(force_decode(section, None))
- else:
- new_sections.append(section)
- sections = u'\n'.join(new_sections) # type: ignore
+ sections = '\n'.join(sections) # type: ignore
# keywords
keywords = []
@@ -130,7 +117,7 @@ class QtHelpBuilder(StandaloneHTMLBuilder):
for (key, group) in index:
for title, (refs, subitems, key_) in group:
keywords.extend(self.build_keywords(title, refs, subitems))
- keywords = u'\n'.join(keywords) # type: ignore
+ keywords = '\n'.join(keywords) # type: ignore
# it seems that the "namespace" may not contain non-alphanumeric
# characters, and more than one successive dot, or leading/trailing
@@ -145,7 +132,7 @@ class QtHelpBuilder(StandaloneHTMLBuilder):
nspace = nspace.lower()
# write the project file
- with codecs.open(path.join(outdir, outname + '.qhp'), 'w', 'utf-8') as f: # type: ignore # NOQA
+ with open(path.join(outdir, outname + '.qhp'), 'w', encoding='utf-8') as f:
body = render_file('project.qhp', outname=outname,
title=self.config.html_title, version=self.config.version,
project=self.config.project, namespace=nspace,
@@ -159,7 +146,7 @@ class QtHelpBuilder(StandaloneHTMLBuilder):
startpage = 'qthelp://' + posixpath.join(nspace, 'doc', 'index.html')
logger.info(__('writing collection project file...'))
- with codecs.open(path.join(outdir, outname + '.qhcp'), 'w', 'utf-8') as f: # type: ignore # NOQA
+ with open(path.join(outdir, outname + '.qhcp'), 'w', encoding='utf-8') as f:
body = render_file('project.qhcp', outname=outname,
title=self.config.html_short_title,
homepage=homepage, startpage=startpage)
@@ -171,37 +158,40 @@ class QtHelpBuilder(StandaloneHTMLBuilder):
return False
if len(node.children) != 2:
return False
- if not isinstance(node.children[0], addnodes.compact_paragraph):
+ if not isinstance(node[0], addnodes.compact_paragraph):
return False
- if not isinstance(node.children[0][0], nodes.reference):
+ if not isinstance(node[0][0], nodes.reference):
return False
- if not isinstance(node.children[1], nodes.bullet_list):
+ if not isinstance(node[1], nodes.bullet_list):
return False
return True
def write_toc(self, node, indentlevel=4):
- # type: (nodes.Node, int) -> List[unicode]
- # XXX this should return a Unicode string, not a bytestring
- parts = [] # type: List[unicode]
- if self.isdocnode(node):
- refnode = node.children[0][0]
- link = refnode['refuri']
- title = htmlescape(refnode.astext()).replace('"', '&quot;')
+ # type: (nodes.Node, int) -> List[str]
+ parts = [] # type: List[str]
+ if isinstance(node, nodes.list_item) and self.isdocnode(node):
+ compact_paragraph = cast(addnodes.compact_paragraph, node[0])
+ reference = cast(nodes.reference, compact_paragraph[0])
+ link = reference['refuri']
+ title = html.escape(reference.astext()).replace('"', '&quot;')
item = '<section title="%(title)s" ref="%(ref)s">' % \
{'title': title, 'ref': link}
parts.append(' ' * 4 * indentlevel + item)
- for subnode in node.children[1]:
- parts.extend(self.write_toc(subnode, indentlevel + 1))
+
+ bullet_list = cast(nodes.bullet_list, node[1])
+ list_items = cast(Iterable[nodes.list_item], bullet_list)
+ for list_item in list_items:
+ parts.extend(self.write_toc(list_item, indentlevel + 1))
parts.append(' ' * 4 * indentlevel + '</section>')
elif isinstance(node, nodes.list_item):
for subnode in node:
parts.extend(self.write_toc(subnode, indentlevel))
elif isinstance(node, nodes.reference):
link = node['refuri']
- title = htmlescape(node.astext()).replace('"', '&quot;')
+ title = html.escape(node.astext()).replace('"', '&quot;')
item = section_template % {'title': title, 'ref': link}
- item = u' ' * 4 * indentlevel + item
- parts.append(item.encode('ascii', 'xmlcharrefreplace'))
+ item = ' ' * 4 * indentlevel + item
+ parts.append(item.encode('ascii', 'xmlcharrefreplace').decode())
elif isinstance(node, nodes.bullet_list):
for subnode in node:
parts.extend(self.write_toc(subnode, indentlevel))
@@ -212,7 +202,7 @@ class QtHelpBuilder(StandaloneHTMLBuilder):
return parts
def keyword_item(self, name, ref):
- # type: (unicode, Any) -> unicode
+ # type: (str, Any) -> str
matchobj = _idpattern.match(name)
if matchobj:
groupdict = matchobj.groupdict()
@@ -225,8 +215,8 @@ class QtHelpBuilder(StandaloneHTMLBuilder):
else:
id = None
- nameattr = htmlescape(name, quote=True)
- refattr = htmlescape(ref[1], quote=True)
+ nameattr = html.escape(name, quote=True)
+ refattr = html.escape(ref[1], quote=True)
if id:
item = ' ' * 12 + '<keyword name="%s" id="%s" ref="%s"/>' % (nameattr, id, refattr)
else:
@@ -235,8 +225,8 @@ class QtHelpBuilder(StandaloneHTMLBuilder):
return item
def build_keywords(self, title, refs, subitems):
- # type: (unicode, List[Any], Any) -> List[unicode]
- keywords = [] # type: List[unicode]
+ # type: (str, List[Any], Any) -> List[str]
+ keywords = [] # type: List[str]
# if len(refs) == 0: # XXX
# write_param('See Also', title)
@@ -258,7 +248,7 @@ class QtHelpBuilder(StandaloneHTMLBuilder):
return keywords
def get_project_files(self, outdir):
- # type: (unicode) -> List[unicode]
+ # type: (str) -> List[str]
if not outdir.endswith(os.sep):
outdir += os.sep
olen = len(outdir)
@@ -276,12 +266,12 @@ class QtHelpBuilder(StandaloneHTMLBuilder):
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
app.setup_extension('sphinx.builders.html')
app.add_builder(QtHelpBuilder)
app.add_config_value('qthelp_basename', lambda self: make_filename(self.project), None)
- app.add_config_value('qthelp_namespace', None, 'html', string_classes)
+ app.add_config_value('qthelp_namespace', None, 'html', [str])
app.add_config_value('qthelp_theme', 'nonav', 'html')
app.add_config_value('qthelp_theme_options', {}, 'html')
diff --git a/sphinx/builders/texinfo.py b/sphinx/builders/texinfo.py
index 2a022158f..967082c24 100644
--- a/sphinx/builders/texinfo.py
+++ b/sphinx/builders/texinfo.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.builders.texinfo
~~~~~~~~~~~~~~~~~~~~~~~
@@ -17,6 +16,7 @@ from docutils.frontend import OptionParser
from docutils.io import FileOutput
from sphinx import addnodes
+from sphinx import package_dir
from sphinx.builders import Builder
from sphinx.environment import NoUri
from sphinx.environment.adapters.asset import ImageAdapter
@@ -27,69 +27,18 @@ from sphinx.util.console import bold, darkgreen # type: ignore
from sphinx.util.docutils import new_document
from sphinx.util.fileutil import copy_asset_file
from sphinx.util.nodes import inline_all_toctrees
-from sphinx.util.osutil import SEP, make_filename
+from sphinx.util.osutil import SEP, make_filename_from_project
from sphinx.writers.texinfo import TexinfoWriter, TexinfoTranslator
if False:
# For type annotation
from sphinx.application import Sphinx # NOQA
+ from sphinx.config import Config # NOQA
from typing import Any, Dict, Iterable, List, Tuple, Union # NOQA
logger = logging.getLogger(__name__)
-
-TEXINFO_MAKEFILE = '''\
-# Makefile for Sphinx Texinfo output
-
-infodir ?= /usr/share/info
-
-MAKEINFO = makeinfo --no-split
-MAKEINFO_html = makeinfo --no-split --html
-MAKEINFO_plaintext = makeinfo --no-split --plaintext
-TEXI2PDF = texi2pdf --batch --expand
-INSTALL_INFO = install-info
-
-ALLDOCS = $(basename $(wildcard *.texi))
-
-all: info
-info: $(addsuffix .info,$(ALLDOCS))
-plaintext: $(addsuffix .txt,$(ALLDOCS))
-html: $(addsuffix .html,$(ALLDOCS))
-pdf: $(addsuffix .pdf,$(ALLDOCS))
-
-install-info: info
-\tfor f in *.info; do \\
-\t cp -t $(infodir) "$$f" && \\
-\t $(INSTALL_INFO) --info-dir=$(infodir) "$$f" ; \\
-\tdone
-
-uninstall-info: info
-\tfor f in *.info; do \\
-\t rm -f "$(infodir)/$$f" ; \\
-\t $(INSTALL_INFO) --delete --info-dir=$(infodir) "$$f" ; \\
-\tdone
-
-%.info: %.texi
-\t$(MAKEINFO) -o '$@' '$<'
-
-%.txt: %.texi
-\t$(MAKEINFO_plaintext) -o '$@' '$<'
-
-%.html: %.texi
-\t$(MAKEINFO_html) -o '$@' '$<'
-
-%.pdf: %.texi
-\t-$(TEXI2PDF) '$<'
-\t-$(TEXI2PDF) '$<'
-\t-$(TEXI2PDF) '$<'
-
-clean:
-\trm -f *.info *.pdf *.txt *.html
-\trm -f *.log *.ind *.aux *.toc *.syn *.idx *.out *.ilg *.pla *.ky *.pg
-\trm -f *.vr *.tp *.fn *.fns *.def *.defs *.cp *.cps *.ge *.ges *.mo
-
-.PHONY: all info plaintext html pdf install-info uninstall-info clean
-'''
+template_dir = os.path.join(package_dir, 'templates', 'texinfo')
class TexinfoBuilder(Builder):
@@ -110,22 +59,22 @@ class TexinfoBuilder(Builder):
def init(self):
# type: () -> None
- self.docnames = [] # type: Iterable[unicode]
- self.document_data = [] # type: List[Tuple[unicode, unicode, unicode, unicode, unicode, unicode, unicode, bool]] # NOQA
+ self.docnames = [] # type: Iterable[str]
+ self.document_data = [] # type: List[Tuple[str, str, str, str, str, str, str, bool]]
def get_outdated_docs(self):
- # type: () -> Union[unicode, List[unicode]]
+ # type: () -> Union[str, List[str]]
return 'all documents' # for now
def get_target_uri(self, docname, typ=None):
- # type: (unicode, unicode) -> unicode
+ # type: (str, str) -> str
if docname not in self.docnames:
raise NoUri
else:
return '%' + docname
def get_relative_uri(self, from_, to, typ=None):
- # type: (unicode, unicode, unicode) -> unicode
+ # type: (str, str, str) -> str
# ignore source path
return self.get_target_uri(to, typ)
@@ -137,7 +86,7 @@ class TexinfoBuilder(Builder):
'will be written'))
return
# assign subdirs to titles
- self.titles = [] # type: List[Tuple[unicode, unicode]]
+ self.titles = [] # type: List[Tuple[str, str]]
for entry in preliminary_document_data:
docname = entry[0]
if docname not in self.env.all_docs:
@@ -155,7 +104,7 @@ class TexinfoBuilder(Builder):
for entry in self.document_data:
docname, targetname, title, author = entry[:4]
targetname += '.texi'
- direntry = description = category = '' # type: unicode
+ direntry = description = category = ''
if len(entry) > 6:
direntry, description, category = entry[4:7]
toctree_only = False
@@ -174,7 +123,7 @@ class TexinfoBuilder(Builder):
settings = OptionParser(
defaults=self.env.settings,
components=(docwriter,),
- read_config_files=True).get_default_values()
+ read_config_files=True).get_default_values() # type: Any
settings.author = author
settings.title = title
settings.texinfo_filename = targetname[:-5] + '.info'
@@ -188,7 +137,7 @@ class TexinfoBuilder(Builder):
logger.info(__("done"))
def assemble_doctree(self, indexfile, toctree_only, appendices):
- # type: (unicode, bool, List[unicode]) -> nodes.Node
+ # type: (str, bool, List[str]) -> nodes.document
self.docnames = set([indexfile] + appendices)
logger.info(darkgreen(indexfile) + " ", nonl=1)
tree = self.env.get_doctree(indexfile)
@@ -198,8 +147,8 @@ class TexinfoBuilder(Builder):
# fresh document
new_tree = new_document('<texinfo output>')
new_sect = nodes.section()
- new_sect += nodes.title(u'<Set title in conf.py>',
- u'<Set title in conf.py>')
+ new_sect += nodes.title('<Set title in conf.py>',
+ '<Set title in conf.py>')
new_tree += new_sect
for node in tree.traverse(addnodes.toctree):
new_sect += node
@@ -218,7 +167,7 @@ class TexinfoBuilder(Builder):
for pendingnode in largetree.traverse(addnodes.pending_xref):
docname = pendingnode['refdocname']
sectname = pendingnode['refsectname']
- newnodes = [nodes.emphasis(sectname, sectname)]
+ newnodes = [nodes.emphasis(sectname, sectname)] # type: List[nodes.Node]
for subdir, title in self.titles:
if docname.startswith(subdir):
newnodes.append(nodes.Text(_(' (in '), _(' (in ')))
@@ -239,9 +188,8 @@ class TexinfoBuilder(Builder):
fn = path.join(self.outdir, 'Makefile')
logger.info(fn, nonl=1)
try:
- with open(fn, 'w') as mkfile:
- mkfile.write(TEXINFO_MAKEFILE)
- except (IOError, OSError) as err:
+ copy_asset_file(os.path.join(template_dir, 'Makefile'), fn)
+ except OSError as err:
logger.warning(__("error writing file %s: %s"), fn, err)
logger.info(__(' done'))
@@ -261,17 +209,19 @@ class TexinfoBuilder(Builder):
path.join(self.srcdir, src), err)
+def default_texinfo_documents(config):
+ # type: (Config) -> List[Tuple[str, str, str, str, str, str, str]]
+ """ Better default texinfo_documents settings. """
+ filename = make_filename_from_project(config.project)
+ return [(config.master_doc, filename, config.project, config.author, filename,
+ 'One line description of project', 'Miscellaneous')]
+
+
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
app.add_builder(TexinfoBuilder)
- app.add_config_value('texinfo_documents',
- lambda self: [(self.master_doc, make_filename(self.project).lower(),
- self.project, '', make_filename(self.project),
- 'The %s reference manual.' %
- make_filename(self.project),
- 'Python')],
- None)
+ app.add_config_value('texinfo_documents', default_texinfo_documents, None)
app.add_config_value('texinfo_appendices', [], None)
app.add_config_value('texinfo_elements', {}, None)
app.add_config_value('texinfo_domain_indices', True, None, [list])
diff --git a/sphinx/builders/text.py b/sphinx/builders/text.py
index 81209d165..1cbaa3d3f 100644
--- a/sphinx/builders/text.py
+++ b/sphinx/builders/text.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.builders.text
~~~~~~~~~~~~~~~~~~~~
@@ -9,7 +8,6 @@
:license: BSD, see LICENSE for details.
"""
-import codecs
from os import path
from docutils.io import StringOutput
@@ -38,21 +36,20 @@ class TextBuilder(Builder):
allow_parallel = True
default_translator_class = TextTranslator
- current_docname = None # type: unicode
+ current_docname = None # type: str
def init(self):
# type: () -> None
# section numbers for headings in the currently visited document
- self.secnumbers = {} # type: Dict[unicode, Tuple[int, ...]]
+ self.secnumbers = {} # type: Dict[str, Tuple[int, ...]]
def get_outdated_docs(self):
- # type: () -> Iterator[unicode]
+ # type: () -> Iterator[str]
for docname in self.env.found_docs:
if docname not in self.env.all_docs:
yield docname
continue
- targetname = self.env.doc2path(docname, self.outdir,
- self.out_suffix)
+ targetname = path.join(self.outdir, docname + self.out_suffix)
try:
targetmtime = path.getmtime(targetname)
except Exception:
@@ -61,20 +58,20 @@ class TextBuilder(Builder):
srcmtime = path.getmtime(self.env.doc2path(docname))
if srcmtime > targetmtime:
yield docname
- except EnvironmentError:
+ except OSError:
# source doesn't exist anymore
pass
def get_target_uri(self, docname, typ=None):
- # type: (unicode, unicode) -> unicode
+ # type: (str, str) -> str
return ''
def prepare_writing(self, docnames):
- # type: (Set[unicode]) -> None
+ # type: (Set[str]) -> None
self.writer = TextWriter(self)
def write_doc(self, docname, doctree):
- # type: (unicode, nodes.Node) -> None
+ # type: (str, nodes.Node) -> None
self.current_docname = docname
self.secnumbers = self.env.toc_secnumbers.get(docname, {})
destination = StringOutput(encoding='utf-8')
@@ -82,9 +79,9 @@ class TextBuilder(Builder):
outfilename = path.join(self.outdir, os_path(docname) + self.out_suffix)
ensuredir(path.dirname(outfilename))
try:
- with codecs.open(outfilename, 'w', 'utf-8') as f: # type: ignore
+ with open(outfilename, 'w', encoding='utf-8') as f:
f.write(self.writer.output)
- except (IOError, OSError) as err:
+ except OSError as err:
logger.warning(__("error writing file %s: %s"), outfilename, err)
def finish(self):
@@ -93,7 +90,7 @@ class TextBuilder(Builder):
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
app.add_builder(TextBuilder)
app.add_config_value('text_sectionchars', '*=-~"+`', 'env')
diff --git a/sphinx/builders/websupport.py b/sphinx/builders/websupport.py
index 1fe9e2001..e99e6ccbf 100644
--- a/sphinx/builders/websupport.py
+++ b/sphinx/builders/websupport.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.builders.websupport
~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -16,7 +15,7 @@ if False:
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
try:
from sphinxcontrib.websupport.builder import WebSupportBuilder
app.add_builder(WebSupportBuilder)
diff --git a/sphinx/builders/xml.py b/sphinx/builders/xml.py
index 6198532c9..6c6d5a9d8 100644
--- a/sphinx/builders/xml.py
+++ b/sphinx/builders/xml.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.builders.xml
~~~~~~~~~~~~~~~~~~~
@@ -9,7 +8,6 @@
:license: BSD, see LICENSE for details.
"""
-import codecs
from os import path
from docutils import nodes
@@ -24,7 +22,8 @@ from sphinx.writers.xml import XMLWriter, PseudoXMLWriter
if False:
# For type annotation
- from typing import Any, Dict, Iterator, Set # NOQA
+ from typing import Any, Dict, Iterator, Set, Type # NOQA
+ from docutils.writers.xml import BaseXMLWriter # NOQA
from sphinx.application import Sphinx # NOQA
logger = logging.getLogger(__name__)
@@ -41,7 +40,7 @@ class XMLBuilder(Builder):
out_suffix = '.xml'
allow_parallel = True
- _writer_class = XMLWriter
+ _writer_class = XMLWriter # type: Type[BaseXMLWriter]
default_translator_class = XMLTranslator
def init(self):
@@ -49,13 +48,12 @@ class XMLBuilder(Builder):
pass
def get_outdated_docs(self):
- # type: () -> Iterator[unicode]
+ # type: () -> Iterator[str]
for docname in self.env.found_docs:
if docname not in self.env.all_docs:
yield docname
continue
- targetname = self.env.doc2path(docname, self.outdir,
- self.out_suffix)
+ targetname = path.join(self.outdir, docname + self.out_suffix)
try:
targetmtime = path.getmtime(targetname)
except Exception:
@@ -64,20 +62,20 @@ class XMLBuilder(Builder):
srcmtime = path.getmtime(self.env.doc2path(docname))
if srcmtime > targetmtime:
yield docname
- except EnvironmentError:
+ except OSError:
# source doesn't exist anymore
pass
def get_target_uri(self, docname, typ=None):
- # type: (unicode, unicode) -> unicode
+ # type: (str, str) -> str
return docname
def prepare_writing(self, docnames):
- # type: (Set[unicode]) -> None
+ # type: (Set[str]) -> None
self.writer = self._writer_class(self)
def write_doc(self, docname, doctree):
- # type: (unicode, nodes.Node) -> None
+ # type: (str, nodes.Node) -> None
# work around multiple string % tuple issues in docutils;
# replace tuples in attribute values with lists
doctree = doctree.deepcopy()
@@ -95,9 +93,9 @@ class XMLBuilder(Builder):
outfilename = path.join(self.outdir, os_path(docname) + self.out_suffix)
ensuredir(path.dirname(outfilename))
try:
- with codecs.open(outfilename, 'w', 'utf-8') as f: # type: ignore
+ with open(outfilename, 'w', encoding='utf-8') as f:
f.write(self.writer.output)
- except (IOError, OSError) as err:
+ except OSError as err:
logger.warning(__("error writing file %s: %s"), outfilename, err)
def finish(self):
@@ -119,7 +117,7 @@ class PseudoXMLBuilder(XMLBuilder):
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
app.add_builder(XMLBuilder)
app.add_builder(PseudoXMLBuilder)
diff --git a/sphinx/cmd/__init__.py b/sphinx/cmd/__init__.py
index a559306d6..349e7863d 100644
--- a/sphinx/cmd/__init__.py
+++ b/sphinx/cmd/__init__.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.cmd
~~~~~~~~~~
diff --git a/sphinx/cmd/build.py b/sphinx/cmd/build.py
index 16aa41742..15c6827ff 100644
--- a/sphinx/cmd/build.py
+++ b/sphinx/cmd/build.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.cmd.build
~~~~~~~~~~~~~~~~
@@ -8,7 +7,6 @@
:copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
-from __future__ import print_function
import argparse
import locale
@@ -18,7 +16,6 @@ import sys
import traceback
from docutils.utils import SystemMessage
-from six import text_type, binary_type
import sphinx.locale
from sphinx import __display_version__, package_dir
@@ -55,17 +52,17 @@ def handle_exception(app, args, exception, stderr=sys.stderr):
print(terminal_safe(exception.args[0]), file=stderr)
elif isinstance(exception, SphinxError):
print(red('%s:' % exception.category), file=stderr)
- print(terminal_safe(text_type(exception)), file=stderr)
+ print(terminal_safe(str(exception)), file=stderr)
elif isinstance(exception, UnicodeError):
print(red(__('Encoding error:')), file=stderr)
- print(terminal_safe(text_type(exception)), file=stderr)
+ print(terminal_safe(str(exception)), file=stderr)
tbpath = save_traceback(app)
print(red(__('The full traceback has been saved in %s, if you want '
'to report the issue to the developers.') % tbpath),
file=stderr)
elif isinstance(exception, RuntimeError) and 'recursion depth' in str(exception):
print(red(__('Recursion error:')), file=stderr)
- print(terminal_safe(text_type(exception)), file=stderr)
+ print(terminal_safe(str(exception)), file=stderr)
print(file=stderr)
print(__('This can happen with very large or deeply nested source '
'files. You can carefully increase the default Python '
@@ -199,15 +196,15 @@ files can be built by specifying individual filenames.
return parser
-def make_main(argv=sys.argv[1:]): # type: ignore
- # type: (List[unicode]) -> int
+def make_main(argv=sys.argv[1:]):
+ # type: (List[str]) -> int
"""Sphinx build "make mode" entry."""
from sphinx.cmd import make_mode
return make_mode.run_make_mode(argv[1:])
-def build_main(argv=sys.argv[1:]): # type: ignore
- # type: (List[unicode]) -> int
+def build_main(argv=sys.argv[1:]):
+ # type: (List[str]) -> int
"""Sphinx build "main" command-line entry."""
parser = get_parser()
@@ -230,13 +227,6 @@ def build_main(argv=sys.argv[1:]): # type: ignore
if missing_files:
parser.error(__('cannot find files %r') % missing_files)
- # likely encoding used for command-line arguments
- try:
- locale = __import__('locale') # due to submodule of the same name
- likely_encoding = locale.getpreferredencoding()
- except Exception:
- likely_encoding = None
-
if args.force_all and filenames:
parser.error(__('cannot combine -a option and filenames'))
@@ -268,11 +258,6 @@ def build_main(argv=sys.argv[1:]): # type: ignore
key, val = val.split('=', 1)
except ValueError:
parser.error(__('-D option argument must be in the form name=value'))
- if likely_encoding and isinstance(val, binary_type):
- try:
- val = val.decode(likely_encoding)
- except UnicodeError:
- pass
confoverrides[key] = val
for val in args.htmldefine:
@@ -283,11 +268,7 @@ def build_main(argv=sys.argv[1:]): # type: ignore
try:
val = int(val)
except ValueError:
- if likely_encoding and isinstance(val, binary_type):
- try:
- val = val.decode(likely_encoding)
- except UnicodeError:
- pass
+ pass
confoverrides['html_context.%s' % key] = val
if args.nitpicky:
@@ -308,8 +289,8 @@ def build_main(argv=sys.argv[1:]): # type: ignore
return 2
-def main(argv=sys.argv[1:]): # type: ignore
- # type: (List[unicode]) -> int
+def main(argv=sys.argv[1:]):
+ # type: (List[str]) -> int
sphinx.locale.setlocale(locale.LC_ALL, '')
sphinx.locale.init_console(os.path.join(package_dir, 'locale'), 'sphinx')
@@ -320,4 +301,4 @@ def main(argv=sys.argv[1:]): # type: ignore
if __name__ == '__main__':
- sys.exit(main(sys.argv[1:])) # type: ignore
+ sys.exit(main(sys.argv[1:]))
diff --git a/sphinx/cmd/make_mode.py b/sphinx/cmd/make_mode.py
index cf8673623..6baf0de17 100644
--- a/sphinx/cmd/make_mode.py
+++ b/sphinx/cmd/make_mode.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.cmd.make_mode
~~~~~~~~~~~~~~~~~~~~
@@ -14,7 +13,6 @@
:copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
-from __future__ import print_function
import os
import subprocess
@@ -59,17 +57,17 @@ BUILDERS = [
]
-class Make(object):
+class Make:
def __init__(self, srcdir, builddir, opts):
- # type: (unicode, unicode, List[unicode]) -> None
+ # type: (str, str, List[str]) -> None
self.srcdir = srcdir
self.builddir = builddir
self.opts = opts
self.makecmd = os.environ.get('MAKE', 'make') # refer $MAKE to determine make command
def builddir_join(self, *comps):
- # type: (unicode) -> unicode
+ # type: (str) -> str
return path.join(self.builddir, *comps)
def build_clean(self):
@@ -146,7 +144,7 @@ class Make(object):
return 0
def run_generic_build(self, builder, doctreedir=None):
- # type: (unicode, unicode) -> int
+ # type: (str, str) -> int
# compatibility with old Makefile
papersize = os.getenv('PAPER', '')
opts = self.opts
@@ -163,7 +161,7 @@ class Make(object):
def run_make_mode(args):
- # type: (List[unicode]) -> int
+ # type: (List[str]) -> int
if len(args) < 3:
print('Error: at least 3 arguments (builder, source '
'dir, build dir) are required.', file=sys.stderr)
diff --git a/sphinx/cmd/quickstart.py b/sphinx/cmd/quickstart.py
index 5494423e4..d3e41aa54 100644
--- a/sphinx/cmd/quickstart.py
+++ b/sphinx/cmd/quickstart.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.cmd.quickstart
~~~~~~~~~~~~~~~~~~~~~
@@ -8,8 +7,6 @@
:copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
-from __future__ import absolute_import
-from __future__ import print_function
import argparse
import locale
@@ -17,9 +14,10 @@ import os
import re
import sys
import time
+import warnings
from collections import OrderedDict
-from io import open
from os import path
+from urllib.parse import quote
# try to import readline, unix specific enhancement
try:
@@ -34,12 +32,10 @@ except ImportError:
USE_LIBEDIT = False
from docutils.utils import column_width
-from six import PY2, PY3, text_type, binary_type
-from six.moves import input
-from six.moves.urllib.parse import quote as urlquote
import sphinx.locale
from sphinx import __display_version__, package_dir
+from sphinx.deprecation import RemovedInSphinx40Warning
from sphinx.locale import __
from sphinx.util import texescape
from sphinx.util.console import ( # type: ignore
@@ -52,7 +48,7 @@ if False:
# For type annotation
from typing import Any, Callable, Dict, List, Pattern, Union # NOQA
-TERM_ENCODING = getattr(sys.stdin, 'encoding', None)
+TERM_ENCODING = getattr(sys.stdin, 'encoding', None) # RemovedInSphinx40Warning
EXTENSIONS = OrderedDict([
('autodoc', __('automatically insert docstrings from modules')),
@@ -91,7 +87,7 @@ else:
# function to get input from terminal -- overridden by the test suite
def term_input(prompt):
- # type: (unicode) -> unicode
+ # type: (str) -> str
if sys.platform == 'win32':
# Important: On windows, readline is not enabled by default. In these
# environment, escape sequences have been broken. To avoid the
@@ -107,29 +103,29 @@ class ValidationError(Exception):
def is_path(x):
- # type: (unicode) -> unicode
+ # type: (str) -> str
x = path.expanduser(x)
- if path.exists(x) and not path.isdir(x):
+ if not path.isdir(x):
raise ValidationError(__("Please enter a valid path name."))
return x
def allow_empty(x):
- # type: (unicode) -> unicode
+ # type: (str) -> str
return x
def nonempty(x):
- # type: (unicode) -> unicode
+ # type: (str) -> str
if not x:
raise ValidationError(__("Please enter some text."))
return x
def choice(*l):
- # type: (unicode) -> Callable[[unicode], unicode]
+ # type: (str) -> Callable[[str], str]
def val(x):
- # type: (unicode) -> unicode
+ # type: (str) -> str
if x not in l:
raise ValidationError(__('Please enter one of %s.') % ', '.join(l))
return x
@@ -137,14 +133,14 @@ def choice(*l):
def boolean(x):
- # type: (unicode) -> bool
+ # type: (str) -> bool
if x.upper() not in ('Y', 'YES', 'N', 'NO'):
raise ValidationError(__("Please enter either 'y' or 'n'."))
return x.upper() in ('Y', 'YES')
def suffix(x):
- # type: (unicode) -> unicode
+ # type: (str) -> str
if not (x[0:1] == '.' and len(x) > 1):
raise ValidationError(__("Please enter a file suffix, "
"e.g. '.rst' or '.txt'."))
@@ -152,13 +148,16 @@ def suffix(x):
def ok(x):
- # type: (unicode) -> unicode
+ # type: (str) -> str
return x
def term_decode(text):
- # type: (Union[bytes,unicode]) -> unicode
- if isinstance(text, text_type):
+ # type: (Union[bytes,str]) -> str
+ warnings.warn('term_decode() is deprecated.',
+ RemovedInSphinx40Warning, stacklevel=2)
+
+ if isinstance(text, str):
return text
# Use the known encoding, if possible
@@ -173,32 +172,18 @@ def term_decode(text):
'and terminal encoding unknown -- assuming '
'UTF-8 or Latin-1.')))
try:
- return text.decode('utf-8')
+ return text.decode()
except UnicodeDecodeError:
return text.decode('latin1')
def do_prompt(text, default=None, validator=nonempty):
- # type: (unicode, unicode, Callable[[unicode], Any]) -> Union[unicode, bool]
+ # type: (str, str, Callable[[str], Any]) -> Union[str, bool]
while True:
if default is not None:
- prompt = PROMPT_PREFIX + '%s [%s]: ' % (text, default) # type: unicode
+ prompt = PROMPT_PREFIX + '%s [%s]: ' % (text, default)
else:
prompt = PROMPT_PREFIX + text + ': '
- if PY2:
- # for Python 2.x, try to get a Unicode string out of it
- if prompt.encode('ascii', 'replace').decode('ascii', 'replace') \
- != prompt:
- if TERM_ENCODING:
- prompt = prompt.encode(TERM_ENCODING)
- else:
- print(turquoise(__('* Note: non-ASCII default value provided '
- 'and terminal encoding unknown -- assuming '
- 'UTF-8 or Latin-1.')))
- try:
- prompt = prompt.encode('utf-8')
- except UnicodeEncodeError:
- prompt = prompt.encode('latin1')
if USE_LIBEDIT:
# Note: libedit has a problem for combination of ``input()`` and escape
# sequence (see #5335). To avoid the problem, all prompts are not colored
@@ -209,7 +194,6 @@ def do_prompt(text, default=None, validator=nonempty):
x = term_input(prompt).strip()
if default and not x:
x = default
- x = term_decode(x)
try:
x = validator(x)
except ValidationError as err:
@@ -220,27 +204,26 @@ def do_prompt(text, default=None, validator=nonempty):
def convert_python_source(source, rex=re.compile(r"[uU]('.*?')")):
- # type: (unicode, Pattern) -> unicode
+ # type: (str, Pattern) -> str
# remove Unicode literal prefixes
- if PY3:
- return rex.sub('\\1', source)
- else:
- return source
+ warnings.warn('convert_python_source() is deprecated.',
+ RemovedInSphinx40Warning)
+ return rex.sub('\\1', source)
class QuickstartRenderer(SphinxRenderer):
def __init__(self, templatedir):
- # type: (unicode) -> None
+ # type: (str) -> None
self.templatedir = templatedir or ''
- super(QuickstartRenderer, self).__init__()
+ super().__init__()
def render(self, template_name, context):
- # type: (unicode, Dict) -> unicode
+ # type: (str, Dict) -> str
user_template = path.join(self.templatedir, path.basename(template_name))
if self.templatedir and path.exists(user_template):
return self.render_from_file(user_template, context)
else:
- return super(QuickstartRenderer, self).render(template_name, context)
+ return super().render(template_name, context)
def ask_user(d):
@@ -388,7 +371,7 @@ directly.'''))
def generate(d, overwrite=True, silent=False, templatedir=None):
- # type: (Dict, bool, bool, unicode) -> None
+ # type: (Dict, bool, bool, str) -> None
"""Generate project based on values in *d*."""
template = QuickstartRenderer(templatedir=templatedir)
@@ -399,18 +382,17 @@ def generate(d, overwrite=True, silent=False, templatedir=None):
if 'mastertocmaxdepth' not in d:
d['mastertocmaxdepth'] = 2
- d['PY3'] = PY3
+ d['PY3'] = True
d['project_fn'] = make_filename(d['project'])
- d['project_url'] = urlquote(d['project'].encode('idna'))
+ d['project_url'] = quote(d['project'].encode('idna'))
d['project_manpage'] = d['project_fn'].lower()
d['now'] = time.asctime()
d['project_underline'] = column_width(d['project']) * '='
d.setdefault('extensions', [])
d['copyright'] = time.strftime('%Y') + ', ' + d['author']
- d['author_texescaped'] = text_type(d['author']).\
- translate(texescape.tex_escape_map)
+ d['author_texescaped'] = d['author'].translate(texescape.tex_escape_map)
d['project_doc'] = d['project'] + ' Documentation'
- d['project_doc_texescaped'] = text_type(d['project'] + ' Documentation').\
+ d['project_doc_texescaped'] = (d['project'] + ' Documentation').\
translate(texescape.tex_escape_map)
# escape backslashes and single quotes in strings that are put into
@@ -420,8 +402,7 @@ def generate(d, overwrite=True, silent=False, templatedir=None):
'version', 'release', 'master'):
d[key + '_str'] = d[key].replace('\\', '\\\\').replace("'", "\\'")
- if not path.isdir(d['path']):
- ensuredir(d['path'])
+ ensuredir(d['path'])
srcdir = d['sep'] and path.join(d['path'], 'source') or d['path']
@@ -441,7 +422,7 @@ def generate(d, overwrite=True, silent=False, templatedir=None):
ensuredir(path.join(srcdir, d['dot'] + 'static'))
def write_file(fpath, content, newline=None):
- # type: (unicode, unicode, unicode) -> None
+ # type: (str, str, str) -> None
if overwrite or not path.isfile(fpath):
if 'quiet' not in d:
print(__('Creating file %s.') % fpath)
@@ -455,7 +436,7 @@ def generate(d, overwrite=True, silent=False, templatedir=None):
if not conf_path or not path.isfile(conf_path):
conf_path = os.path.join(package_dir, 'templates', 'quickstart', 'conf.py_t')
with open(conf_path) as f:
- conf_text = convert_python_source(f.read())
+ conf_text = f.read()
write_file(path.join(srcdir, 'conf.py'), template.render_string(conf_text, d))
@@ -474,13 +455,13 @@ def generate(d, overwrite=True, silent=False, templatedir=None):
d['rbuilddir'] = d['sep'] and 'build' or d['dot'] + 'build'
# use binary mode, to avoid writing \r\n on Windows
write_file(path.join(d['path'], 'Makefile'),
- template.render(makefile_template, d), u'\n')
+ template.render(makefile_template, d), '\n')
if d['batchfile'] is True:
d['rsrcdir'] = d['sep'] and 'source' or '.'
d['rbuilddir'] = d['sep'] and 'build' or d['dot'] + 'build'
write_file(path.join(d['path'], 'make.bat'),
- template.render(batchfile_template, d), u'\r\n')
+ template.render(batchfile_template, d), '\r\n')
if silent:
return
@@ -659,11 +640,6 @@ def main(argv=sys.argv[1:]):
print('[Interrupted.]')
return 130 # 128 + SIGINT
- # decode values in d if value is a Python string literal
- for key, value in d.items():
- if isinstance(value, binary_type):
- d[key] = term_decode(value)
-
# handle use of CSV-style extension values
d.setdefault('extensions', [])
for ext in d['extensions'][:]:
diff --git a/sphinx/cmdline.py b/sphinx/cmdline.py
index 252f95bbc..1b2033ed6 100644
--- a/sphinx/cmdline.py
+++ b/sphinx/cmdline.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.cmdline
~~~~~~~~~~~~~~
@@ -8,8 +7,6 @@
:copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
-from __future__ import absolute_import
-from __future__ import print_function
import sys
import warnings
@@ -45,8 +42,8 @@ def get_parser():
return build.get_parser()
-def main(argv=sys.argv[1:]): # type: ignore
- # type: (List[unicode]) -> int
+def main(argv=sys.argv[1:]):
+ # type: (List[str]) -> int
warnings.warn('sphinx.cmdline module is deprecated. Use sphinx.cmd.build instead.',
RemovedInSphinx30Warning, stacklevel=2)
return build.main(argv)
diff --git a/sphinx/config.py b/sphinx/config.py
index e456d00b6..338de770c 100644
--- a/sphinx/config.py
+++ b/sphinx/config.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.config
~~~~~~~~~~~~~
@@ -17,11 +16,7 @@ from collections import OrderedDict
from os import path, getenv
from typing import Any, NamedTuple, Union
-from six import (
- PY2, PY3, iteritems, string_types, binary_type, text_type, integer_types, class_types
-)
-
-from sphinx.deprecation import RemovedInSphinx30Warning
+from sphinx.deprecation import RemovedInSphinx30Warning, RemovedInSphinx40Warning
from sphinx.errors import ConfigError, ExtensionError
from sphinx.locale import _, __
from sphinx.util import logging
@@ -31,22 +26,20 @@ from sphinx.util.pycompat import execfile_, NoneType
if False:
# For type annotation
- from typing import Any, Callable, Dict, Generator, Iterator, List, Tuple, Union # NOQA
+ from typing import Any, Callable, Dict, Generator, Iterator, List, Set, Tuple, Union # NOQA
from sphinx.application import Sphinx # NOQA
+ from sphinx.environment import BuildEnvironment # NOQA
from sphinx.util.tags import Tags # NOQA
logger = logging.getLogger(__name__)
CONFIG_FILENAME = 'conf.py'
-UNSERIALIZABLE_TYPES = class_types + (types.ModuleType, types.FunctionType)
+UNSERIALIZABLE_TYPES = (type, types.ModuleType, types.FunctionType)
copyright_year_re = re.compile(r'^((\d{4}-)?)(\d{4})(?=[ ,])')
-if PY3:
- unicode = str # special alias for static typing...
-
ConfigValue = NamedTuple('ConfigValue', [('name', str),
('value', Any),
- ('rebuild', Union[bool, unicode])])
+ ('rebuild', Union[bool, str])])
def is_serializable(obj):
@@ -55,7 +48,7 @@ def is_serializable(obj):
if isinstance(obj, UNSERIALIZABLE_TYPES):
return False
elif isinstance(obj, dict):
- for key, value in iteritems(obj):
+ for key, value in obj.items():
if not is_serializable(key) or not is_serializable(value):
return False
elif isinstance(obj, (list, tuple, set)):
@@ -64,30 +57,29 @@ def is_serializable(obj):
return True
-class ENUM(object):
+class ENUM:
"""represents the config value should be a one of candidates.
Example:
app.add_config_value('latex_show_urls', 'no', None, ENUM('no', 'footnote', 'inline'))
"""
def __init__(self, *candidates):
- # type: (unicode) -> None
+ # type: (str) -> None
self.candidates = candidates
def match(self, value):
- # type: (Union[unicode,List,Tuple]) -> bool
+ # type: (Union[str, List, Tuple]) -> bool
if isinstance(value, (list, tuple)):
return all(item in self.candidates for item in value)
else:
return value in self.candidates
-string_classes = [text_type] # type: List
-if PY2:
- string_classes.append(binary_type) # => [str, unicode]
+# RemovedInSphinx40Warning
+string_classes = [str] # type: List
-class Config(object):
+class Config:
"""Configuration file abstraction.
The config object makes the values of all config values available as
@@ -104,63 +96,63 @@ class Config(object):
# If you add a value here, don't forget to include it in the
# quickstart.py file template as well as in the docs!
- config_values = dict(
+ config_values = {
# general options
- project = ('Python', 'env', []),
- author = ('unknown', 'env', []),
- copyright = ('', 'html', []),
- version = ('', 'env', []),
- release = ('', 'env', []),
- today = ('', 'env', []),
+ 'project': ('Python', 'env', []),
+ 'author': ('unknown', 'env', []),
+ 'copyright': ('', 'html', []),
+ 'version': ('', 'env', []),
+ 'release': ('', 'env', []),
+ 'today': ('', 'env', []),
# the real default is locale-dependent
- today_fmt = (None, 'env', string_classes),
-
- language = (None, 'env', string_classes),
- locale_dirs = (['locales'], 'env', []),
- figure_language_filename = (u'{root}.{language}{ext}', 'env', [str]),
-
- master_doc = ('contents', 'env', []),
- source_suffix = ({'.rst': 'restructuredtext'}, 'env', Any),
- source_encoding = ('utf-8-sig', 'env', []),
- source_parsers = ({}, 'env', []),
- exclude_patterns = ([], 'env', []),
- default_role = (None, 'env', string_classes),
- add_function_parentheses = (True, 'env', []),
- add_module_names = (True, 'env', []),
- trim_footnote_reference_space = (False, 'env', []),
- show_authors = (False, 'env', []),
- pygments_style = (None, 'html', string_classes),
- highlight_language = ('default', 'env', []),
- highlight_options = ({}, 'env', []),
- templates_path = ([], 'html', []),
- template_bridge = (None, 'html', string_classes),
- keep_warnings = (False, 'env', []),
- suppress_warnings = ([], 'env', []),
- modindex_common_prefix = ([], 'html', []),
- rst_epilog = (None, 'env', string_classes),
- rst_prolog = (None, 'env', string_classes),
- trim_doctest_flags = (True, 'env', []),
- primary_domain = ('py', 'env', [NoneType]), # type: ignore
- needs_sphinx = (None, None, string_classes),
- needs_extensions = ({}, None, []),
- manpages_url = (None, 'env', []),
- nitpicky = (False, None, []),
- nitpick_ignore = ([], None, []),
- numfig = (False, 'env', []),
- numfig_secnum_depth = (1, 'env', []),
- numfig_format = ({}, 'env', []), # will be initialized in init_numfig_format()
-
- math_number_all = (False, 'env', []),
- math_eqref_format = (None, 'env', string_classes),
- math_numfig = (True, 'env', []),
- tls_verify = (True, 'env', []),
- tls_cacerts = (None, 'env', []),
- smartquotes = (True, 'env', []),
- smartquotes_action = ('qDe', 'env', []),
- smartquotes_excludes = ({'languages': ['ja'],
- 'builders': ['man', 'text']},
- 'env', []),
- ) # type: Dict[unicode, Tuple]
+ 'today_fmt': (None, 'env', [str]),
+
+ 'language': (None, 'env', [str]),
+ 'locale_dirs': (['locales'], 'env', []),
+ 'figure_language_filename': ('{root}.{language}{ext}', 'env', [str]),
+
+ 'master_doc': ('index', 'env', []),
+ 'source_suffix': ({'.rst': 'restructuredtext'}, 'env', Any),
+ 'source_encoding': ('utf-8-sig', 'env', []),
+ 'source_parsers': ({}, 'env', []),
+ 'exclude_patterns': ([], 'env', []),
+ 'default_role': (None, 'env', [str]),
+ 'add_function_parentheses': (True, 'env', []),
+ 'add_module_names': (True, 'env', []),
+ 'trim_footnote_reference_space': (False, 'env', []),
+ 'show_authors': (False, 'env', []),
+ 'pygments_style': (None, 'html', [str]),
+ 'highlight_language': ('default', 'env', []),
+ 'highlight_options': ({}, 'env', []),
+ 'templates_path': ([], 'html', []),
+ 'template_bridge': (None, 'html', [str]),
+ 'keep_warnings': (False, 'env', []),
+ 'suppress_warnings': ([], 'env', []),
+ 'modindex_common_prefix': ([], 'html', []),
+ 'rst_epilog': (None, 'env', [str]),
+ 'rst_prolog': (None, 'env', [str]),
+ 'trim_doctest_flags': (True, 'env', []),
+ 'primary_domain': ('py', 'env', [NoneType]), # type: ignore
+ 'needs_sphinx': (None, None, [str]),
+ 'needs_extensions': ({}, None, []),
+ 'manpages_url': (None, 'env', []),
+ 'nitpicky': (False, None, []),
+ 'nitpick_ignore': ([], None, []),
+ 'numfig': (False, 'env', []),
+ 'numfig_secnum_depth': (1, 'env', []),
+ 'numfig_format': ({}, 'env', []), # will be initialized in init_numfig_format()
+
+ 'math_number_all': (False, 'env', []),
+ 'math_eqref_format': (None, 'env', [str]),
+ 'math_numfig': (True, 'env', []),
+ 'tls_verify': (True, 'env', []),
+ 'tls_cacerts': (None, 'env', []),
+ 'smartquotes': (True, 'env', []),
+ 'smartquotes_action': ('qDe', 'env', []),
+ 'smartquotes_excludes': ({'languages': ['ja'],
+ 'builders': ['man', 'text']},
+ 'env', []),
+ } # type: Dict[str, Tuple]
def __init__(self, *args):
# type: (Any) -> None
@@ -171,7 +163,7 @@ class Config(object):
RemovedInSphinx30Warning, stacklevel=2)
dirname, filename, overrides, tags = args
if dirname is None:
- config = {} # type: Dict[unicode, Any]
+ config = {} # type: Dict[str, Any]
else:
config = eval_config_file(path.join(dirname, filename), tags)
else:
@@ -189,15 +181,15 @@ class Config(object):
self.setup = config.get('setup', None) # type: Callable
if 'extensions' in overrides:
- if isinstance(overrides['extensions'], string_types):
+ if isinstance(overrides['extensions'], str):
config['extensions'] = overrides.pop('extensions').split(',')
else:
config['extensions'] = overrides.pop('extensions')
- self.extensions = config.get('extensions', []) # type: List[unicode]
+ self.extensions = config.get('extensions', []) # type: List[str]
@classmethod
def read(cls, confdir, overrides=None, tags=None):
- # type: (unicode, Dict, Tags) -> Config
+ # type: (str, Dict, Tags) -> Config
"""Create a Config object from configuration file."""
filename = path.join(confdir, CONFIG_FILENAME)
namespace = eval_config_file(filename, tags)
@@ -216,8 +208,8 @@ class Config(object):
check_unicode(self)
def convert_overrides(self, name, value):
- # type: (unicode, Any) -> Any
- if not isinstance(value, string_types):
+ # type: (str, Any) -> Any
+ if not isinstance(value, str):
return value
else:
defvalue = self.values[name][0]
@@ -229,7 +221,7 @@ class Config(object):
(name, name + '.key=value'))
elif isinstance(defvalue, list):
return value.split(',')
- elif isinstance(defvalue, integer_types):
+ elif isinstance(defvalue, int):
try:
return int(value)
except ValueError:
@@ -237,7 +229,7 @@ class Config(object):
(value, name))
elif hasattr(defvalue, '__call__'):
return value
- elif defvalue is not None and not isinstance(defvalue, string_types):
+ elif defvalue is not None and not isinstance(defvalue, str):
raise ValueError(__('cannot override config setting %r with unsupported '
'type, ignoring') % name)
else:
@@ -261,7 +253,7 @@ class Config(object):
def init_values(self):
# type: () -> None
config = self._raw_config
- for valname, value in iteritems(self.overrides):
+ for valname, value in self.overrides.items():
try:
if '.' in valname:
realvalname, key = valname.split('.', 1)
@@ -271,7 +263,7 @@ class Config(object):
logger.warning(__('unknown config value %r in override, ignoring'),
valname)
continue
- if isinstance(value, string_types):
+ if isinstance(value, str):
config[valname] = self.convert_overrides(valname, value)
else:
config[valname] = value
@@ -279,10 +271,10 @@ class Config(object):
logger.warning("%s", exc)
for name in config:
if name in self.values:
- self.__dict__[name] = config[name] # type: ignore
+ self.__dict__[name] = config[name]
def __getattr__(self, name):
- # type: (unicode) -> Any
+ # type: (str) -> Any
if name.startswith('_'):
raise AttributeError(name)
if name not in self.values:
@@ -293,36 +285,36 @@ class Config(object):
return default
def __getitem__(self, name):
- # type: (unicode) -> unicode
+ # type: (str) -> str
return getattr(self, name)
def __setitem__(self, name, value):
- # type: (unicode, Any) -> None
+ # type: (str, Any) -> None
setattr(self, name, value)
def __delitem__(self, name):
- # type: (unicode) -> None
+ # type: (str) -> None
delattr(self, name)
def __contains__(self, name):
- # type: (unicode) -> bool
+ # type: (str) -> bool
return name in self.values
def __iter__(self):
# type: () -> Generator[ConfigValue, None, None]
- for name, value in iteritems(self.values):
- yield ConfigValue(name, getattr(self, name), value[1]) # type: ignore
+ for name, value in self.values.items():
+ yield ConfigValue(name, getattr(self, name), value[1])
def add(self, name, default, rebuild, types):
- # type: (unicode, Any, Union[bool, unicode], Any) -> None
+ # type: (str, Any, Union[bool, str], Any) -> None
if name in self.values:
raise ExtensionError(__('Config value %r already present') % name)
else:
self.values[name] = (default, rebuild, types)
def filter(self, rebuild):
- # type: (Union[unicode, List[unicode]]) -> Iterator[ConfigValue]
- if isinstance(rebuild, string_types):
+ # type: (Union[str, List[str]]) -> Iterator[ConfigValue]
+ if isinstance(rebuild, str):
rebuild = [rebuild]
return (value for value in self if value.rebuild in rebuild)
@@ -331,7 +323,7 @@ class Config(object):
"""Obtains serializable data for pickling."""
# remove potentially pickling-problematic values from config
__dict__ = {}
- for key, value in iteritems(self.__dict__):
+ for key, value in self.__dict__.items():
if key.startswith('_') or not is_serializable(value):
pass
else:
@@ -339,7 +331,7 @@ class Config(object):
# create a picklable copy of values list
__dict__['values'] = {}
- for key, value in iteritems(self.values): # type: ignore
+ for key, value in self.values.items():
real_value = getattr(self, key)
if not is_serializable(real_value):
# omit unserializable value
@@ -356,9 +348,9 @@ class Config(object):
def eval_config_file(filename, tags):
- # type: (unicode, Tags) -> Dict[unicode, Any]
+ # type: (str, Tags) -> Dict[str, Any]
"""Evaluate a config file."""
- namespace = {} # type: Dict[unicode, Any]
+ namespace = {} # type: Dict[str, Any]
namespace['__file__'] = filename
namespace['tags'] = tags
@@ -367,9 +359,7 @@ def eval_config_file(filename, tags):
try:
execfile_(filename, namespace)
except SyntaxError as err:
- msg = __("There is a syntax error in your configuration file: %s")
- if PY3:
- msg += __("\nDid you change the syntax from 2.x to 3.x?")
+ msg = __("There is a syntax error in your configuration file: %s\n")
raise ConfigError(msg % err)
except SystemExit:
msg = __("The configuration file (or one of the modules it imports) "
@@ -390,7 +380,7 @@ def convert_source_suffix(app, config):
* new style: a dict which maps from fileext to filetype
"""
source_suffix = config.source_suffix
- if isinstance(source_suffix, string_types):
+ if isinstance(source_suffix, str):
# if str, considers as default filetype (None)
#
# The default filetype is determined on later step.
@@ -403,8 +393,8 @@ def convert_source_suffix(app, config):
# if dict, convert it to OrderedDict
config.source_suffix = OrderedDict(config.source_suffix) # type: ignore
else:
- logger.warning(__("The config value `source_suffix' expected to "
- "a string, list of strings or dictionary. "
+ logger.warning(__("The config value `source_suffix' expects "
+ "a string, list of strings, or dictionary. "
"But `%r' is given." % source_suffix))
@@ -471,11 +461,18 @@ def check_confval_types(app, config):
continue # at least we share a non-trivial base class
if annotations:
- msg = __("The config value `{name}' has type `{current.__name__}', "
- "expected to {permitted}.")
+ msg = __("The config value `{name}' has type `{current.__name__}'; "
+ "expected {permitted}.")
+ wrapped_annotations = ["`{}'".format(c.__name__) for c in annotations]
+ if len(wrapped_annotations) > 2:
+ permitted = "{}, or {}".format(
+ ", ".join(wrapped_annotations[:-1]),
+ wrapped_annotations[-1])
+ else:
+ permitted = " or ".join(wrapped_annotations)
logger.warning(msg.format(name=confval.name,
current=type(confval.value),
- permitted=str([c.__name__ for c in annotations])))
+ permitted=permitted))
else:
msg = __("The config value `{name}' has type `{current.__name__}', "
"defaults to `{default.__name__}'.")
@@ -489,21 +486,49 @@ def check_unicode(config):
"""check all string values for non-ASCII characters in bytestrings,
since that can result in UnicodeErrors all over the place
"""
+ warnings.warn('sphinx.config.check_unicode() is deprecated.',
+ RemovedInSphinx40Warning)
+
nonascii_re = re.compile(br'[\x80-\xff]')
- for name, value in iteritems(config._raw_config):
- if isinstance(value, binary_type) and nonascii_re.search(value):
+ for name, value in config._raw_config.items():
+ if isinstance(value, bytes) and nonascii_re.search(value):
logger.warning(__('the config value %r is set to a string with non-ASCII '
'characters; this can lead to Unicode errors occurring. '
- 'Please use Unicode strings, e.g. %r.'), name, u'Content')
+ 'Please use Unicode strings, e.g. %r.'), name, 'Content')
+
+
+def check_primary_domain(app, config):
+ # type: (Sphinx, Config) -> None
+ primary_domain = config.primary_domain
+ if primary_domain and not app.registry.has_domain(primary_domain):
+ logger.warning(__('primary_domain %r not found, ignored.'), primary_domain)
+ config.primary_domain = None # type: ignore
+
+
+def check_master_doc(app, env, added, changed, removed):
+ # type: (Sphinx, BuildEnvironment, Set[str], Set[str], Set[str]) -> Set[str]
+ """Adjust master_doc to 'contents' to support an old project which does not have
+ no master_doc setting.
+ """
+ if (app.config.master_doc == 'index' and
+ 'index' not in app.project.docnames and
+ 'contents' in app.project.docnames):
+ logger.warning(__('Since v2.0, Sphinx uses "index" as master_doc by default. '
+ 'Please add "master_doc = \'contents\'" to your conf.py.'))
+ app.config.master_doc = "contents" # type: ignore
+
+ return changed
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
app.connect('config-inited', convert_source_suffix)
app.connect('config-inited', init_numfig_format)
app.connect('config-inited', correct_copyright_year)
app.connect('config-inited', check_confval_types)
+ app.connect('config-inited', check_primary_domain)
+ app.connect('env-get-outdated', check_master_doc)
return {
'version': 'builtin',
diff --git a/sphinx/deprecation.py b/sphinx/deprecation.py
index b3219a0b2..4882f5443 100644
--- a/sphinx/deprecation.py
+++ b/sphinx/deprecation.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.deprecation
~~~~~~~~~~~~~~~~~~
@@ -13,14 +12,9 @@ import warnings
if False:
# For type annotation
- # note: Don't use typing.TYPE_CHECK here (for py27 and py34).
from typing import Any, Dict, Type # NOQA
-class RemovedInSphinx20Warning(DeprecationWarning):
- pass
-
-
class RemovedInSphinx30Warning(PendingDeprecationWarning):
pass
@@ -29,7 +23,7 @@ class RemovedInSphinx40Warning(PendingDeprecationWarning):
pass
-RemovedInNextVersionWarning = RemovedInSphinx20Warning
+RemovedInNextVersionWarning = RemovedInSphinx30Warning
class DeprecatedDict(dict):
@@ -39,29 +33,29 @@ class DeprecatedDict(dict):
# type: (Dict, str, Type[Warning]) -> None
self.message = message
self.warning = warning
- super(DeprecatedDict, self).__init__(data)
+ super().__init__(data)
def __setitem__(self, key, value):
- # type: (unicode, Any) -> None
+ # type: (str, Any) -> None
warnings.warn(self.message, self.warning, stacklevel=2)
- super(DeprecatedDict, self).__setitem__(key, value)
+ super().__setitem__(key, value)
def setdefault(self, key, default=None):
- # type: (unicode, Any) -> None
+ # type: (str, Any) -> None
warnings.warn(self.message, self.warning, stacklevel=2)
- return super(DeprecatedDict, self).setdefault(key, default)
+ return super().setdefault(key, default)
def __getitem__(self, key):
- # type: (unicode) -> None
+ # type: (str) -> None
warnings.warn(self.message, self.warning, stacklevel=2)
- return super(DeprecatedDict, self).__getitem__(key)
+ return super().__getitem__(key)
def get(self, key, default=None):
- # type: (unicode, Any) -> None
+ # type: (str, Any) -> None
warnings.warn(self.message, self.warning, stacklevel=2)
- return super(DeprecatedDict, self).get(key, default)
+ return super().get(key, default)
def update(self, other=None): # type: ignore
# type: (Dict) -> None
warnings.warn(self.message, self.warning, stacklevel=2)
- super(DeprecatedDict, self).update(other)
+ super().update(other)
diff --git a/sphinx/directives/__init__.py b/sphinx/directives/__init__.py
index 1177a258e..5102e7e2b 100644
--- a/sphinx/directives/__init__.py
+++ b/sphinx/directives/__init__.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.directives
~~~~~~~~~~~~~~~~~
@@ -10,11 +9,13 @@
"""
import re
+from typing import List, cast
from docutils import nodes
from docutils.parsers.rst import directives, roles
from sphinx import addnodes
+from sphinx.util import docutils
from sphinx.util.docfields import DocFieldTransformer
from sphinx.util.docutils import SphinxDirective
@@ -32,10 +33,12 @@ from sphinx.directives.patches import ( # noqa
if False:
# For type annotation
- from typing import Any, Dict, List # NOQA
+ from typing import Any, Dict # NOQA
from sphinx.application import Sphinx # NOQA
from sphinx.config import Config # NOQA
from sphinx.environment import BuildEnvironment # NOQA
+ from sphinx.util.docfields import Field # NOQA
+ from sphinx.util.typing import DirectiveOption # NOQA
# RE to strip backslash escapes
@@ -56,16 +59,16 @@ class ObjectDescription(SphinxDirective):
final_argument_whitespace = True
option_spec = {
'noindex': directives.flag,
- }
+ } # type: Dict[str, DirectiveOption]
# types of doc fields that this directive handles, see sphinx.util.docfields
- doc_field_types = [] # type: List[Any]
- domain = None # type: unicode
- objtype = None # type: unicode
+ doc_field_types = [] # type: List[Field]
+ domain = None # type: str
+ objtype = None # type: str
indexnode = None # type: addnodes.index
def get_signatures(self):
- # type: () -> List[unicode]
+ # type: () -> List[str]
"""
Retrieve the signatures to document from the directive arguments. By
default, signatures are given as arguments, one per line.
@@ -77,7 +80,7 @@ class ObjectDescription(SphinxDirective):
return [strip_backslash_re.sub(r'\1', line.strip()) for line in lines]
def handle_signature(self, sig, signode):
- # type: (unicode, addnodes.desc_signature) -> Any
+ # type: (str, addnodes.desc_signature) -> Any
"""
Parse the signature *sig* into individual nodes and append them to
*signode*. If ValueError is raised, parsing is aborted and the whole
@@ -90,7 +93,7 @@ class ObjectDescription(SphinxDirective):
raise ValueError
def add_target_and_index(self, name, sig, signode):
- # type: (Any, unicode, addnodes.desc_signature) -> None
+ # type: (Any, str, addnodes.desc_signature) -> None
"""
Add cross-reference IDs and entries to self.indexnode, if applicable.
@@ -146,7 +149,7 @@ class ObjectDescription(SphinxDirective):
node['objtype'] = node['desctype'] = self.objtype
node['noindex'] = noindex = ('noindex' in self.options)
- self.names = [] # type: List[unicode]
+ self.names = [] # type: List[str]
signatures = self.get_signatures()
for i, sig in enumerate(signatures):
# add a signature node for each signature in the current unit
@@ -199,22 +202,22 @@ class DefaultRole(SphinxDirective):
def run(self):
# type: () -> List[nodes.Node]
if not self.arguments:
- if '' in roles._roles:
- # restore the "default" default role
- del roles._roles['']
+ docutils.unregister_role('')
return []
role_name = self.arguments[0]
role, messages = roles.role(role_name, self.state_machine.language,
self.lineno, self.state.reporter)
- if role is None:
- error = self.state.reporter.error(
- 'Unknown interpreted text role "%s".' % role_name,
- nodes.literal_block(self.block_text, self.block_text),
- line=self.lineno)
- return messages + [error]
- roles._roles[''] = role
- self.env.temp_data['default_role'] = role_name
- return messages
+ if role:
+ docutils.register_role('', role)
+ self.env.temp_data['default_role'] = role_name
+ else:
+ literal_block = nodes.literal_block(self.block_text, self.block_text)
+ reporter = self.state.reporter
+ error = reporter.error('Unknown interpreted text role "%s".' % role_name,
+ literal_block, line=self.lineno)
+ messages += [error]
+
+ return cast(List[nodes.Node], messages)
class DefaultDomain(SphinxDirective):
@@ -233,7 +236,7 @@ class DefaultDomain(SphinxDirective):
domain_name = self.arguments[0].lower()
# if domain_name not in env.domains:
# # try searching by label
- # for domain in itervalues(env.domains):
+ # for domain in env.domains.values():
# if domain.label.lower() == domain_name:
# domain_name = domain.name
# break
@@ -242,7 +245,7 @@ class DefaultDomain(SphinxDirective):
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
directives.register_directive('default-role', DefaultRole)
directives.register_directive('default-domain', DefaultDomain)
directives.register_directive('describe', ObjectDescription)
diff --git a/sphinx/directives/code.py b/sphinx/directives/code.py
index 30ded6dd4..c2acf8cca 100644
--- a/sphinx/directives/code.py
+++ b/sphinx/directives/code.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.directives.code
~~~~~~~~~~~~~~~~~~~~~~
@@ -7,15 +6,13 @@
:license: BSD, see LICENSE for details.
"""
-import codecs
import sys
import warnings
from difflib import unified_diff
from docutils import nodes
from docutils.parsers.rst import directives
-from docutils.statemachine import ViewList
-from six import text_type
+from docutils.statemachine import StringList
from sphinx import addnodes
from sphinx.deprecation import RemovedInSphinx40Warning
@@ -63,11 +60,11 @@ class HighlightLang(Highlight):
warnings.warn('highlightlang directive is deprecated. '
'Please use highlight directive instead.',
RemovedInSphinx40Warning, stacklevel=2)
- return Highlight.run(self)
+ return super().run()
def dedent_lines(lines, dedent, location=None):
- # type: (List[unicode], int, Any) -> List[unicode]
+ # type: (List[str], int, Tuple[str, int]) -> List[str]
if not dedent:
return lines
@@ -85,22 +82,25 @@ def dedent_lines(lines, dedent, location=None):
def container_wrapper(directive, literal_node, caption):
- # type: (SphinxDirective, nodes.Node, unicode) -> nodes.container
+ # type: (SphinxDirective, nodes.Node, str) -> nodes.container
container_node = nodes.container('', literal_block=True,
classes=['literal-block-wrapper'])
parsed = nodes.Element()
- directive.state.nested_parse(ViewList([caption], source=''),
+ directive.state.nested_parse(StringList([caption], source=''),
directive.content_offset, parsed)
if isinstance(parsed[0], nodes.system_message):
msg = __('Invalid caption: %s' % parsed[0].astext())
raise ValueError(msg)
- caption_node = nodes.caption(parsed[0].rawsource, '',
- *parsed[0].children)
- caption_node.source = literal_node.source
- caption_node.line = literal_node.line
- container_node += caption_node
- container_node += literal_node
- return container_node
+ elif isinstance(parsed[0], nodes.Element):
+ caption_node = nodes.caption(parsed[0].rawsource, '',
+ *parsed[0].children)
+ caption_node.source = literal_node.source
+ caption_node.line = literal_node.line
+ container_node += caption_node
+ container_node += literal_node
+ return container_node
+ else:
+ raise RuntimeError # never reached
class CodeBlock(SphinxDirective):
@@ -126,7 +126,7 @@ class CodeBlock(SphinxDirective):
def run(self):
# type: () -> List[nodes.Node]
document = self.state.document
- code = u'\n'.join(self.content)
+ code = '\n'.join(self.content)
location = self.state_machine.get_source_and_line(self.lineno)
linespec = self.options.get('emphasize-lines')
@@ -141,7 +141,7 @@ class CodeBlock(SphinxDirective):
hl_lines = [x + 1 for x in hl_lines if x < nlines]
except ValueError as err:
- return [document.reporter.warning(str(err), line=self.lineno)]
+ return [document.reporter.warning(err, line=self.lineno)]
else:
hl_lines = None
@@ -151,7 +151,7 @@ class CodeBlock(SphinxDirective):
lines = dedent_lines(lines, self.options['dedent'], location=location)
code = '\n'.join(lines)
- literal = nodes.literal_block(code, code)
+ literal = nodes.literal_block(code, code) # type: nodes.Element
literal['language'] = self.arguments[0]
literal['linenos'] = 'linenos' in self.options or \
'lineno-start' in self.options
@@ -168,7 +168,7 @@ class CodeBlock(SphinxDirective):
try:
literal = container_wrapper(self, literal, caption)
except ValueError as exc:
- return [document.reporter.warning(text_type(exc), line=self.lineno)]
+ return [document.reporter.warning(exc, line=self.lineno)]
# literal will be note_implicit_target that is linked from caption and numref.
# when options['name'] is provided, it should be primary ID.
@@ -177,7 +177,7 @@ class CodeBlock(SphinxDirective):
return [literal]
-class LiteralIncludeReader(object):
+class LiteralIncludeReader:
INVALID_OPTIONS_PAIR = [
('lineno-match', 'lineno-start'),
('lineno-match', 'append'),
@@ -195,7 +195,7 @@ class LiteralIncludeReader(object):
]
def __init__(self, filename, options, config):
- # type: (unicode, Dict, Config) -> None
+ # type: (str, Dict, Config) -> None
self.filename = filename
self.options = options
self.encoding = options.get('encoding', config.source_encoding)
@@ -211,23 +211,23 @@ class LiteralIncludeReader(object):
(option1, option2))
def read_file(self, filename, location=None):
- # type: (unicode, Any) -> List[unicode]
+ # type: (str, Tuple[str, int]) -> List[str]
try:
- with codecs.open(filename, 'r', self.encoding, errors='strict') as f: # type: ignore # NOQA
- text = f.read() # type: unicode
+ with open(filename, encoding=self.encoding, errors='strict') as f:
+ text = f.read()
if 'tab-width' in self.options:
text = text.expandtabs(self.options['tab-width'])
return text.splitlines(True)
- except (IOError, OSError):
- raise IOError(__('Include file %r not found or reading it failed') % filename)
+ except OSError:
+ raise OSError(__('Include file %r not found or reading it failed') % filename)
except UnicodeError:
raise UnicodeError(__('Encoding %r used for reading included file %r seems to '
'be wrong, try giving an :encoding: option') %
(self.encoding, filename))
def read(self, location=None):
- # type: (Any) -> Tuple[unicode, int]
+ # type: (Tuple[str, int]) -> Tuple[str, int]
if 'diff' in self.options:
lines = self.show_diff()
else:
@@ -245,7 +245,7 @@ class LiteralIncludeReader(object):
return ''.join(lines), len(lines)
def show_diff(self, location=None):
- # type: (Any) -> List[unicode]
+ # type: (Tuple[str, int]) -> List[str]
new_lines = self.read_file(self.filename)
old_filename = self.options.get('diff')
old_lines = self.read_file(old_filename)
@@ -253,7 +253,7 @@ class LiteralIncludeReader(object):
return list(diff)
def pyobject_filter(self, lines, location=None):
- # type: (List[unicode], Any) -> List[unicode]
+ # type: (List[str], Tuple[str, int]) -> List[str]
pyobject = self.options.get('pyobject')
if pyobject:
from sphinx.pycode import ModuleAnalyzer
@@ -272,7 +272,7 @@ class LiteralIncludeReader(object):
return lines
def lines_filter(self, lines, location=None):
- # type: (List[unicode], Any) -> List[unicode]
+ # type: (List[str], Tuple[str, int]) -> List[str]
linespec = self.options.get('lines')
if linespec:
linelist = parselinenos(linespec, len(lines))
@@ -297,7 +297,7 @@ class LiteralIncludeReader(object):
return lines
def start_filter(self, lines, location=None):
- # type: (List[unicode], Any) -> List[unicode]
+ # type: (List[str], Tuple[str, int]) -> List[str]
if 'start-at' in self.options:
start = self.options.get('start-at')
inclusive = False
@@ -329,7 +329,7 @@ class LiteralIncludeReader(object):
return lines
def end_filter(self, lines, location=None):
- # type: (List[unicode], Any) -> List[unicode]
+ # type: (List[str], Tuple[str, int]) -> List[str]
if 'end-at' in self.options:
end = self.options.get('end-at')
inclusive = True
@@ -357,7 +357,7 @@ class LiteralIncludeReader(object):
return lines
def prepend_filter(self, lines, location=None):
- # type: (List[unicode], Any) -> List[unicode]
+ # type: (List[str], Tuple[str, int]) -> List[str]
prepend = self.options.get('prepend')
if prepend:
lines.insert(0, prepend + '\n')
@@ -365,7 +365,7 @@ class LiteralIncludeReader(object):
return lines
def append_filter(self, lines, location=None):
- # type: (List[unicode], Any) -> List[unicode]
+ # type: (List[str], Tuple[str, int]) -> List[str]
append = self.options.get('append')
if append:
lines.append(append + '\n')
@@ -373,7 +373,7 @@ class LiteralIncludeReader(object):
return lines
def dedent_filter(self, lines, location=None):
- # type: (List[unicode], Any) -> List[unicode]
+ # type: (List[str], Tuple[str, int]) -> List[str]
if 'dedent' in self.options:
return dedent_lines(lines, self.options.get('dedent'), location=location)
else:
@@ -433,7 +433,7 @@ class LiteralInclude(SphinxDirective):
reader = LiteralIncludeReader(filename, self.options, self.config)
text, lines = reader.read(location=location)
- retnode = nodes.literal_block(text, text, source=filename)
+ retnode = nodes.literal_block(text, text, source=filename) # type: nodes.Element
set_source_info(self, retnode)
if self.options.get('diff'): # if diff is set, set udiff
retnode['language'] = 'udiff'
@@ -463,11 +463,11 @@ class LiteralInclude(SphinxDirective):
return [retnode]
except Exception as exc:
- return [document.reporter.warning(text_type(exc), line=self.lineno)]
+ return [document.reporter.warning(exc, line=self.lineno)]
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
directives.register_directive('highlight', Highlight)
directives.register_directive('highlightlang', HighlightLang)
directives.register_directive('code-block', CodeBlock)
diff --git a/sphinx/directives/other.py b/sphinx/directives/other.py
index 3776536b8..d2e7f2ddd 100644
--- a/sphinx/directives/other.py
+++ b/sphinx/directives/other.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.directives.other
~~~~~~~~~~~~~~~~~~~~~~~
@@ -8,13 +7,13 @@
"""
import re
+from typing import cast
from docutils import nodes
from docutils.parsers.rst import directives
from docutils.parsers.rst.directives.admonitions import BaseAdmonition
from docutils.parsers.rst.directives.misc import Class
from docutils.parsers.rst.directives.misc import Include as BaseInclude
-from six.moves import range
from sphinx import addnodes
from sphinx.domains.changeset import VersionChange # NOQA # for compatibility
@@ -35,7 +34,7 @@ glob_re = re.compile(r'.*[*?\[].*')
def int_or_nothing(argument):
- # type: (unicode) -> int
+ # type: (str) -> int
if not argument:
return 999
return int(argument)
@@ -166,7 +165,7 @@ class Author(SphinxDirective):
# type: () -> List[nodes.Node]
if not self.config.show_authors:
return []
- para = nodes.paragraph(translatable=False)
+ para = nodes.paragraph(translatable=False) # type: nodes.Element
emph = nodes.emphasis()
para += emph
if self.name == 'sectionauthor':
@@ -178,10 +177,12 @@ class Author(SphinxDirective):
else:
text = _('Author: ')
emph += nodes.Text(text, text)
- inodes, messages = self.state.inline_text(self.arguments[0],
- self.lineno)
+ inodes, messages = self.state.inline_text(self.arguments[0], self.lineno)
emph.extend(inodes)
- return [para] + messages
+
+ ret = [para] # type: List[nodes.Node]
+ ret += messages
+ return ret
class Index(SphinxDirective):
@@ -248,11 +249,13 @@ class Centered(SphinxDirective):
# type: () -> List[nodes.Node]
if not self.arguments:
return []
- subnode = addnodes.centered()
- inodes, messages = self.state.inline_text(self.arguments[0],
- self.lineno)
+ subnode = addnodes.centered() # type: nodes.Element
+ inodes, messages = self.state.inline_text(self.arguments[0], self.lineno)
subnode.extend(inodes)
- return [subnode] + messages
+
+ ret = [subnode] # type: List[nodes.Node]
+ ret += messages
+ return ret
class Acks(SphinxDirective):
@@ -272,8 +275,8 @@ class Acks(SphinxDirective):
self.state.nested_parse(self.content, self.content_offset, node)
if len(node.children) != 1 or not isinstance(node.children[0],
nodes.bullet_list):
- return [self.state.document.reporter.warning(
- '.. acks content is not a list', line=self.lineno)]
+ reporter = self.state.document.reporter
+ return [reporter.warning('.. acks content is not a list', line=self.lineno)]
return [node]
@@ -297,8 +300,8 @@ class HList(SphinxDirective):
self.state.nested_parse(self.content, self.content_offset, node)
if len(node.children) != 1 or not isinstance(node.children[0],
nodes.bullet_list):
- return [self.state.document.reporter.warning(
- '.. hlist content is not a list', line=self.lineno)]
+ reporter = self.state.document.reporter
+ return [reporter.warning('.. hlist content is not a list', line=self.lineno)]
fulllist = node.children[0]
# create a hlist node where the items are distributed
npercol, nmore = divmod(len(fulllist), ncolumns)
@@ -306,11 +309,10 @@ class HList(SphinxDirective):
newnode = addnodes.hlist()
for column in range(ncolumns):
endindex = index + (column < nmore and (npercol + 1) or npercol)
- col = addnodes.hlistcol()
- col += nodes.bullet_list()
- col[0] += fulllist.children[index:endindex]
+ bullet_list = nodes.bullet_list()
+ bullet_list += fulllist.children[index:endindex]
+ newnode += addnodes.hlistcol('', bullet_list)
index = endindex
- newnode += col
return [newnode]
@@ -333,14 +335,15 @@ class Only(SphinxDirective):
# Same as util.nested_parse_with_titles but try to handle nested
# sections which should be raised higher up the doctree.
- surrounding_title_styles = self.state.memo.title_styles
- surrounding_section_level = self.state.memo.section_level
- self.state.memo.title_styles = []
- self.state.memo.section_level = 0
+ memo = self.state.memo # type: Any
+ surrounding_title_styles = memo.title_styles
+ surrounding_section_level = memo.section_level
+ memo.title_styles = []
+ memo.section_level = 0
try:
self.state.nested_parse(self.content, self.content_offset,
- node, match_titles=1)
- title_styles = self.state.memo.title_styles
+ node, match_titles=True)
+ title_styles = memo.title_styles
if (not surrounding_title_styles or
not title_styles or
title_styles[0] not in surrounding_title_styles or
@@ -361,15 +364,15 @@ class Only(SphinxDirective):
# Use these depths to determine where the nested sections should
# be placed in the doctree.
n_sects_to_raise = current_depth - nested_depth + 1
- parent = self.state.parent
+ parent = cast(nodes.Element, self.state.parent)
for i in range(n_sects_to_raise):
if parent.parent:
parent = parent.parent
parent.append(node)
return []
finally:
- self.state.memo.title_styles = surrounding_title_styles
- self.state.memo.section_level = surrounding_section_level
+ memo.title_styles = surrounding_title_styles
+ memo.section_level = surrounding_section_level
class Include(BaseInclude, SphinxDirective):
@@ -383,15 +386,15 @@ class Include(BaseInclude, SphinxDirective):
if self.arguments[0].startswith('<') and \
self.arguments[0].endswith('>'):
# docutils "standard" includes, do not do path processing
- return BaseInclude.run(self)
+ return super().run()
rel_filename, filename = self.env.relfn2path(self.arguments[0])
self.arguments[0] = filename
self.env.note_included(filename)
- return BaseInclude.run(self)
+ return super().run()
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
directives.register_directive('toctree', TocTree)
directives.register_directive('sectionauthor', Author)
directives.register_directive('moduleauthor', Author)
diff --git a/sphinx/directives/patches.py b/sphinx/directives/patches.py
index 1c90d5313..ecee2b10c 100644
--- a/sphinx/directives/patches.py
+++ b/sphinx/directives/patches.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.directives.patches
~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -7,6 +6,8 @@
:license: BSD, see LICENSE for details.
"""
+from typing import cast
+
from docutils import nodes
from docutils.nodes import make_id
from docutils.parsers.rst import directives
@@ -30,18 +31,21 @@ class Figure(images.Figure):
def run(self):
# type: () -> List[nodes.Node]
name = self.options.pop('name', None)
- result = images.Figure.run(self)
+ result = super().run()
if len(result) == 2 or isinstance(result[0], nodes.system_message):
return result
- (figure_node,) = result
+ assert len(result) == 1
+ figure_node = cast(nodes.figure, result[0])
if name:
+ # set ``name`` to figure_node if given
self.options['name'] = name
self.add_name(figure_node)
- # fill lineno using image node
+ # copy lineno from image node
if figure_node.line is None and len(figure_node) == 2:
- figure_node.line = figure_node[1].line
+ caption = cast(nodes.caption, figure_node[1])
+ figure_node.line = caption.line
return [figure_node]
@@ -49,17 +53,17 @@ class Figure(images.Figure):
class Meta(html.Meta, SphinxDirective):
def run(self):
# type: () -> List[nodes.Node]
- result = html.Meta.run(self)
+ result = super().run()
for node in result:
if (isinstance(node, nodes.pending) and
isinstance(node.details['nodes'][0], html.MetaBody.meta)):
meta = node.details['nodes'][0]
meta.source = self.env.doc2path(self.env.docname)
meta.line = self.lineno
- meta.rawcontent = meta['content']
+ meta.rawcontent = meta['content'] # type: ignore
# docutils' meta nodes aren't picklable because the class is nested
- meta.__class__ = addnodes.meta
+ meta.__class__ = addnodes.meta # type: ignore
return result
@@ -70,8 +74,8 @@ class RSTTable(tables.RSTTable):
Only for docutils-0.13 or older version."""
def make_title(self):
- # type: () -> Tuple[nodes.Node, unicode]
- title, message = tables.RSTTable.make_title(self)
+ # type: () -> Tuple[nodes.title, List[nodes.system_message]]
+ title, message = super().make_title()
if title:
set_source_info(self, title)
@@ -84,8 +88,8 @@ class CSVTable(tables.CSVTable):
Only for docutils-0.13 or older version."""
def make_title(self):
- # type: () -> Tuple[nodes.Node, unicode]
- title, message = tables.CSVTable.make_title(self)
+ # type: () -> Tuple[nodes.title, List[nodes.system_message]]
+ title, message = super().make_title()
if title:
set_source_info(self, title)
@@ -98,8 +102,8 @@ class ListTable(tables.ListTable):
Only for docutils-0.13 or older version."""
def make_title(self):
- # type: () -> Tuple[nodes.Node, unicode]
- title, message = tables.ListTable.make_title(self)
+ # type: () -> Tuple[nodes.title, List[nodes.system_message]]
+ title, message = super().make_title()
if title:
set_source_info(self, title)
@@ -107,7 +111,6 @@ class ListTable(tables.ListTable):
class MathDirective(SphinxDirective):
-
has_content = True
required_arguments = 0
optional_arguments = 1
@@ -124,18 +127,18 @@ class MathDirective(SphinxDirective):
if self.arguments and self.arguments[0]:
latex = self.arguments[0] + '\n\n' + latex
node = nodes.math_block(latex, latex,
- docname=self.state.document.settings.env.docname,
+ docname=self.env.docname,
number=self.options.get('name'),
label=self.options.get('label'),
nowrap='nowrap' in self.options)
- ret = [node]
+ ret = [node] # type: List[nodes.Node]
set_source_info(self, node)
self.add_target(ret)
return ret
def add_target(self, ret):
# type: (List[nodes.Node]) -> None
- node = ret[0]
+ node = cast(nodes.math_block, ret[0])
# assign label automatically if math_number_all enabled
if node['label'] == '' or (self.config.math_number_all and not node['label']):
@@ -158,7 +161,7 @@ class MathDirective(SphinxDirective):
self.state.document.note_explicit_target(target)
ret.insert(0, target)
except UserWarning as exc:
- self.state_machine.reporter.warning(exc.args[0], line=self.lineno)
+ self.state_machine.reporter.warning(exc, line=self.lineno)
def setup(app):
diff --git a/sphinx/domains/__init__.py b/sphinx/domains/__init__.py
index 41db13cb6..34e2e1224 100644
--- a/sphinx/domains/__init__.py
+++ b/sphinx/domains/__init__.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.domains
~~~~~~~~~~~~~~
@@ -11,8 +10,7 @@
"""
import copy
-
-from six import iteritems
+from typing import NamedTuple
from sphinx.errors import SphinxError
from sphinx.locale import _
@@ -22,13 +20,14 @@ if False:
from typing import Any, Callable, Dict, Iterable, List, Tuple, Type, Union # NOQA
from docutils import nodes # NOQA
from docutils.parsers.rst.states import Inliner # NOQA
+ from sphinx import addnodes # NOQA
from sphinx.builders import Builder # NOQA
from sphinx.environment import BuildEnvironment # NOQA
from sphinx.roles import XRefRole # NOQA
from sphinx.util.typing import RoleFunction # NOQA
-class ObjType(object):
+class ObjType:
"""
An ObjType is the description for a type of object that a domain can
document. In the object_types attribute of Domain subclasses, object type
@@ -48,14 +47,23 @@ class ObjType(object):
}
def __init__(self, lname, *roles, **attrs):
- # type: (unicode, Any, Any) -> None
- self.lname = lname # type: unicode
+ # type: (str, Any, Any) -> None
+ self.lname = lname
self.roles = roles # type: Tuple
self.attrs = self.known_attrs.copy() # type: Dict
self.attrs.update(attrs)
-class Index(object):
+IndexEntry = NamedTuple('IndexEntry', [('name', str),
+ ('subtype', int),
+ ('docname', str),
+ ('anchor', str),
+ ('extra', str),
+ ('qualifier', str),
+ ('descr', str)])
+
+
+class Index:
"""
An Index is the description for a domain-specific index. To add an index to
a domain, subclass Index, overriding the three name attributes:
@@ -70,9 +78,9 @@ class Index(object):
domains using :meth:`~sphinx.application.Sphinx.add_index_to_domain()`.
"""
- name = None # type: unicode
- localname = None # type: unicode
- shortname = None # type: unicode
+ name = None # type: str
+ localname = None # type: str
+ shortname = None # type: str
def __init__(self, domain):
# type: (Domain) -> None
@@ -82,7 +90,7 @@ class Index(object):
self.domain = domain
def generate(self, docnames=None):
- # type: (Iterable[unicode]) -> Tuple[List[Tuple[unicode, List[List[Union[unicode, int]]]]], bool] # NOQA
+ # type: (Iterable[str]) -> Tuple[List[Tuple[str, List[IndexEntry]]], bool]
"""Return entries for the index given by *name*. If *docnames* is
given, restrict to entries referring to these docnames.
@@ -113,7 +121,7 @@ class Index(object):
raise NotImplementedError
-class Domain(object):
+class Domain:
"""
A Domain is meant to be a group of "object" description directives for
objects of a similar nature, and corresponding roles to create references to
@@ -132,7 +140,7 @@ class Domain(object):
build process starts, every active domain is instantiated and given the
environment object; the `domaindata` dict must then either be nonexistent or
a dictionary whose 'version' key is equal to the domain class'
- :attr:`data_version` attribute. Otherwise, `IOError` is raised and the
+ :attr:`data_version` attribute. Otherwise, `OSError` is raised and the
pickled environment is discarded.
"""
@@ -141,17 +149,17 @@ class Domain(object):
#: domain label: longer, more descriptive (used in messages)
label = ''
#: type (usually directive) name -> ObjType instance
- object_types = {} # type: Dict[unicode, ObjType]
+ object_types = {} # type: Dict[str, ObjType]
#: directive name -> directive class
- directives = {} # type: Dict[unicode, Any]
+ directives = {} # type: Dict[str, Any]
#: role name -> role callable
- roles = {} # type: Dict[unicode, Union[RoleFunction, XRefRole]]
+ roles = {} # type: Dict[str, Union[RoleFunction, XRefRole]]
#: a list of Index subclasses
indices = [] # type: List[Type[Index]]
#: role name -> a warning message if reference is missing
- dangling_warnings = {} # type: Dict[unicode, unicode]
+ dangling_warnings = {} # type: Dict[str, str]
#: node_class -> (enum_node_type, title_getter)
- enumerable_nodes = {} # type: Dict[nodes.Node, Tuple[unicode, Callable]]
+ enumerable_nodes = {} # type: Dict[Type[nodes.Node], Tuple[str, Callable]]
#: data value for a fresh environment
initial_data = {} # type: Dict
@@ -163,10 +171,10 @@ class Domain(object):
def __init__(self, env):
# type: (BuildEnvironment) -> None
self.env = env # type: BuildEnvironment
- self._role_cache = {} # type: Dict[unicode, Callable]
- self._directive_cache = {} # type: Dict[unicode, Callable]
- self._role2type = {} # type: Dict[unicode, List[unicode]]
- self._type2role = {} # type: Dict[unicode, unicode]
+ self._role_cache = {} # type: Dict[str, Callable]
+ self._directive_cache = {} # type: Dict[str, Callable]
+ self._role2type = {} # type: Dict[str, List[str]]
+ self._type2role = {} # type: Dict[str, str]
# convert class variables to instance one (to enhance through API)
self.object_types = dict(self.object_types)
@@ -182,16 +190,16 @@ class Domain(object):
else:
self.data = env.domaindata[self.name]
if self.data['version'] != self.data_version:
- raise IOError('data of %r domain out of date' % self.label)
- for name, obj in iteritems(self.object_types):
+ raise OSError('data of %r domain out of date' % self.label)
+ for name, obj in self.object_types.items():
for rolename in obj.roles:
self._role2type.setdefault(rolename, []).append(name)
self._type2role[name] = obj.roles[0] if obj.roles else ''
- self.objtypes_for_role = self._role2type.get # type: Callable[[unicode], List[unicode]] # NOQA
- self.role_for_objtype = self._type2role.get # type: Callable[[unicode], unicode]
+ self.objtypes_for_role = self._role2type.get # type: Callable[[str], List[str]]
+ self.role_for_objtype = self._type2role.get # type: Callable[[str], str]
def add_object_type(self, name, objtype):
- # type: (unicode, ObjType) -> None
+ # type: (str, ObjType) -> None
"""Add an object type."""
self.object_types[name] = objtype
if objtype.roles:
@@ -203,7 +211,7 @@ class Domain(object):
self._role2type.setdefault(role, []).append(name)
def role(self, name):
- # type: (unicode) -> Callable
+ # type: (str) -> RoleFunction
"""Return a role adapter function that always gives the registered
role its full name ('domain:name') as the first argument.
"""
@@ -214,14 +222,14 @@ class Domain(object):
fullname = '%s:%s' % (self.name, name)
def role_adapter(typ, rawtext, text, lineno, inliner, options={}, content=[]):
- # type: (unicode, unicode, unicode, int, Inliner, Dict, List[unicode]) -> nodes.Node # NOQA
+ # type: (str, str, str, int, Inliner, Dict, List[str]) -> Tuple[List[nodes.Node], List[nodes.system_message]] # NOQA
return self.roles[name](fullname, rawtext, text, lineno,
inliner, options, content)
self._role_cache[name] = role_adapter
return role_adapter
def directive(self, name):
- # type: (unicode) -> Callable
+ # type: (str) -> Callable
"""Return a directive adapter class that always gives the registered
directive its full name ('domain:name') as ``self.name``.
"""
@@ -236,19 +244,19 @@ class Domain(object):
def run(self):
# type: () -> List[nodes.Node]
self.name = fullname
- return BaseDirective.run(self)
+ return super().run()
self._directive_cache[name] = DirectiveAdapter
return DirectiveAdapter
# methods that should be overwritten
def clear_doc(self, docname):
- # type: (unicode) -> None
+ # type: (str) -> None
"""Remove traces of a document in the domain-specific inventories."""
pass
def merge_domaindata(self, docnames, otherdata):
- # type: (List[unicode], Dict) -> None
+ # type: (List[str], Dict) -> None
"""Merge in data regarding *docnames* from a different domaindata
inventory (coming from a subprocess in parallel builds).
"""
@@ -257,7 +265,7 @@ class Domain(object):
self.__class__)
def process_doc(self, env, docname, document):
- # type: (BuildEnvironment, unicode, nodes.Node) -> None
+ # type: (BuildEnvironment, str, nodes.document) -> None
"""Process a document after it is read by the environment."""
pass
@@ -267,7 +275,7 @@ class Domain(object):
pass
def process_field_xref(self, pnode):
- # type: (nodes.Node) -> None
+ # type: (addnodes.pending_xref) -> None
"""Process a pending xref created in a doc field.
For example, attach information about the current scope.
"""
@@ -275,7 +283,7 @@ class Domain(object):
def resolve_xref(self, env, fromdocname, builder,
typ, target, node, contnode):
- # type: (BuildEnvironment, unicode, Builder, unicode, unicode, nodes.Node, nodes.Node) -> nodes.Node # NOQA
+ # type: (BuildEnvironment, str, Builder, str, str, addnodes.pending_xref, nodes.Element) -> nodes.Element # NOQA
"""Resolve the pending_xref *node* with the given *typ* and *target*.
This method should return a new node, to replace the xref node,
@@ -292,7 +300,7 @@ class Domain(object):
pass
def resolve_any_xref(self, env, fromdocname, builder, target, node, contnode):
- # type: (BuildEnvironment, unicode, Builder, unicode, nodes.Node, nodes.Node) -> List[Tuple[unicode, nodes.Node]] # NOQA
+ # type: (BuildEnvironment, str, Builder, str, addnodes.pending_xref, nodes.Element) -> List[Tuple[str, nodes.Element]] # NOQA
"""Resolve the pending_xref *node* with the given *target*.
The reference comes from an "any" or similar role, which means that we
@@ -309,7 +317,7 @@ class Domain(object):
raise NotImplementedError
def get_objects(self):
- # type: () -> Iterable[Tuple[unicode, unicode, unicode, unicode, unicode, int]]
+ # type: () -> Iterable[Tuple[str, str, str, str, str, int]]
"""Return an iterable of "object descriptions", which are tuples with
five items:
@@ -329,19 +337,19 @@ class Domain(object):
return []
def get_type_name(self, type, primary=False):
- # type: (ObjType, bool) -> unicode
+ # type: (ObjType, bool) -> str
"""Return full name for given ObjType."""
if primary:
return type.lname
return _('%s %s') % (self.label, type.lname)
def get_enumerable_node_type(self, node):
- # type: (nodes.Node) -> unicode
+ # type: (nodes.Node) -> str
"""Get type of enumerable nodes (experimental)."""
enum_node_type, _ = self.enumerable_nodes.get(node.__class__, (None, None))
return enum_node_type
def get_full_qualified_name(self, node):
- # type: (nodes.Node) -> unicode
+ # type: (nodes.Element) -> str
"""Return full qualified name for given node."""
return None
diff --git a/sphinx/domains/c.py b/sphinx/domains/c.py
index 1104475f2..c6bed5139 100644
--- a/sphinx/domains/c.py
+++ b/sphinx/domains/c.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.domains.c
~~~~~~~~~~~~~~~~
@@ -81,7 +80,7 @@ class CObject(ObjectDescription):
))
def _parse_type(self, node, ctype):
- # type: (nodes.Node, unicode) -> None
+ # type: (nodes.Element, str) -> None
# add cross-ref nodes for all words
for part in [_f for _f in wsplit_re.split(ctype) if _f]:
tnode = nodes.Text(part, part)
@@ -96,7 +95,7 @@ class CObject(ObjectDescription):
node += tnode
def _parse_arglist(self, arglist):
- # type: (unicode) -> Iterator[unicode]
+ # type: (str) -> Iterator[str]
while True:
m = c_funcptr_arg_sig_re.match(arglist)
if m:
@@ -115,7 +114,7 @@ class CObject(ObjectDescription):
break
def handle_signature(self, sig, signode):
- # type: (unicode, addnodes.desc_signature) -> unicode
+ # type: (str, addnodes.desc_signature) -> str
"""Transform a C signature into RST nodes."""
# first try the function pointer signature regex, it's more specific
m = c_funcptr_sig_re.match(sig)
@@ -125,8 +124,9 @@ class CObject(ObjectDescription):
raise ValueError('no match')
rettype, name, arglist, const = m.groups()
- signode += addnodes.desc_type('', '')
- self._parse_type(signode[-1], rettype)
+ desc_type = addnodes.desc_type('', '')
+ signode += desc_type
+ self._parse_type(desc_type, rettype)
try:
classname, funcname = name.split('::', 1)
classname += '::'
@@ -173,7 +173,7 @@ class CObject(ObjectDescription):
ctype, argname = arg.rsplit(' ', 1)
self._parse_type(param, ctype)
# separate by non-breaking space in the output
- param += nodes.emphasis(' ' + argname, u'\xa0' + argname)
+ param += nodes.emphasis(' ' + argname, '\xa0' + argname)
except ValueError:
# no argument name given, only the type
self._parse_type(param, arg)
@@ -184,7 +184,7 @@ class CObject(ObjectDescription):
return fullname
def get_index_text(self, name):
- # type: (unicode) -> unicode
+ # type: (str) -> str
if self.objtype == 'function':
return _('%s (C function)') % name
elif self.objtype == 'member':
@@ -199,7 +199,7 @@ class CObject(ObjectDescription):
return ''
def add_target_and_index(self, name, sig, signode):
- # type: (unicode, unicode, addnodes.desc_signature) -> None
+ # type: (str, str, addnodes.desc_signature) -> None
# for C API items we add a prefix since names are usually not qualified
# by a module name and so easily clash with e.g. section titles
targetname = 'c.' + name
@@ -237,7 +237,7 @@ class CObject(ObjectDescription):
class CXRefRole(XRefRole):
def process_link(self, env, refnode, has_explicit_title, title, target):
- # type: (BuildEnvironment, nodes.Node, bool, unicode, unicode) -> Tuple[unicode, unicode] # NOQA
+ # type: (BuildEnvironment, nodes.Element, bool, str, str) -> Tuple[str, str]
if not has_explicit_title:
target = target.lstrip('~') # only has a meaning for the title
# if the first character is a tilde, don't display the module/class
@@ -278,16 +278,16 @@ class CDomain(Domain):
}
initial_data = {
'objects': {}, # fullname -> docname, objtype
- } # type: Dict[unicode, Dict[unicode, Tuple[unicode, Any]]]
+ } # type: Dict[str, Dict[str, Tuple[str, Any]]]
def clear_doc(self, docname):
- # type: (unicode) -> None
+ # type: (str) -> None
for fullname, (fn, _l) in list(self.data['objects'].items()):
if fn == docname:
del self.data['objects'][fullname]
def merge_domaindata(self, docnames, otherdata):
- # type: (List[unicode], Dict) -> None
+ # type: (List[str], Dict) -> None
# XXX check duplicates
for fullname, (fn, objtype) in otherdata['objects'].items():
if fn in docnames:
@@ -295,7 +295,7 @@ class CDomain(Domain):
def resolve_xref(self, env, fromdocname, builder,
typ, target, node, contnode):
- # type: (BuildEnvironment, unicode, Builder, unicode, unicode, nodes.Node, nodes.Node) -> nodes.Node # NOQA
+ # type: (BuildEnvironment, str, Builder, str, str, addnodes.pending_xref, nodes.Element) -> nodes.Element # NOQA
# strip pointer asterisk
target = target.rstrip(' *')
# becase TypedField can generate xrefs
@@ -309,7 +309,7 @@ class CDomain(Domain):
def resolve_any_xref(self, env, fromdocname, builder, target,
node, contnode):
- # type: (BuildEnvironment, unicode, Builder, unicode, nodes.Node, nodes.Node) -> List[Tuple[unicode, nodes.Node]] # NOQA
+ # type: (BuildEnvironment, str, Builder, str, addnodes.pending_xref, nodes.Element) -> List[Tuple[str, nodes.Element]] # NOQA
# strip pointer asterisk
target = target.rstrip(' *')
if target not in self.data['objects']:
@@ -320,13 +320,13 @@ class CDomain(Domain):
contnode, target))]
def get_objects(self):
- # type: () -> Iterator[Tuple[unicode, unicode, unicode, unicode, unicode, int]]
+ # type: () -> Iterator[Tuple[str, str, str, str, str, int]]
for refname, (docname, type) in list(self.data['objects'].items()):
yield (refname, refname, type, docname, 'c.' + refname, 1)
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
app.add_domain(CDomain)
return {
diff --git a/sphinx/domains/changeset.py b/sphinx/domains/changeset.py
index ed878b1d3..4a515d2cc 100644
--- a/sphinx/domains/changeset.py
+++ b/sphinx/domains/changeset.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.domains.changeset
~~~~~~~~~~~~~~~~~~~~~~~~
@@ -9,10 +8,9 @@
:license: BSD, see LICENSE for details.
"""
-from typing import NamedTuple
+from typing import NamedTuple, cast
from docutils import nodes
-from six import iteritems
from sphinx import addnodes
from sphinx import locale
@@ -34,7 +32,13 @@ versionlabels = {
'versionadded': _('New in version %s'),
'versionchanged': _('Changed in version %s'),
'deprecated': _('Deprecated since version %s'),
-} # type: Dict[unicode, unicode]
+}
+
+versionlabel_classes = {
+ 'versionadded': 'added',
+ 'versionchanged': 'changed',
+ 'deprecated': 'deprecated',
+}
locale.versionlabels = DeprecatedDict(
versionlabels,
@@ -80,6 +84,7 @@ class VersionChange(SphinxDirective):
messages = []
if self.content:
self.state.nested_parse(self.content, self.content_offset, node)
+ classes = ['versionmodified', versionlabel_classes[self.name]]
if len(node):
if isinstance(node[0], nodes.paragraph) and node[0].rawsource:
content = nodes.inline(node[0].rawsource, translatable=True)
@@ -87,17 +92,22 @@ class VersionChange(SphinxDirective):
content.line = node[0].line
content += node[0].children
node[0].replace_self(nodes.paragraph('', '', content, translatable=False))
- node[0].insert(0, nodes.inline('', '%s: ' % text,
- classes=['versionmodified']))
+
+ para = cast(nodes.paragraph, node[0])
+ para.insert(0, nodes.inline('', '%s: ' % text, classes=classes))
else:
para = nodes.paragraph('', '',
nodes.inline('', '%s.' % text,
- classes=['versionmodified']),
+ classes=classes),
translatable=False)
node.append(para)
- self.env.get_domain('changeset').note_changeset(node) # type: ignore
- return [node] + messages
+ domain = cast(ChangeSetDomain, self.env.get_domain('changeset'))
+ domain.note_changeset(node)
+
+ ret = [node] # type: List[nodes.Node]
+ ret += messages
+ return ret
class ChangeSetDomain(Domain):
@@ -111,41 +121,41 @@ class ChangeSetDomain(Domain):
} # type: Dict
def clear_doc(self, docname):
- # type: (unicode) -> None
- for version, changes in iteritems(self.data['changes']):
+ # type: (str) -> None
+ for version, changes in self.data['changes'].items():
for changeset in changes[:]:
if changeset.docname == docname:
changes.remove(changeset)
def merge_domaindata(self, docnames, otherdata):
- # type: (List[unicode], Dict) -> None
+ # type: (List[str], Dict) -> None
# XXX duplicates?
- for version, otherchanges in iteritems(otherdata['changes']):
+ for version, otherchanges in otherdata['changes'].items():
changes = self.data['changes'].setdefault(version, [])
for changeset in otherchanges:
if changeset.docname in docnames:
changes.append(changeset)
def process_doc(self, env, docname, document):
- # type: (BuildEnvironment, unicode, nodes.Node) -> None
+ # type: (BuildEnvironment, str, nodes.document) -> None
pass # nothing to do here. All changesets are registered on calling directive.
def note_changeset(self, node):
- # type: (nodes.Node) -> None
+ # type: (addnodes.versionmodified) -> None
version = node['version']
module = self.env.ref_context.get('py:module')
objname = self.env.temp_data.get('object')
- changeset = ChangeSet(node['type'], self.env.docname, node.line, # type: ignore
+ changeset = ChangeSet(node['type'], self.env.docname, node.line,
module, objname, node.astext())
self.data['changes'].setdefault(version, []).append(changeset)
def get_changesets_for(self, version):
- # type: (unicode) -> List[ChangeSet]
+ # type: (str) -> List[ChangeSet]
return self.data['changes'].get(version, [])
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
app.add_domain(ChangeSetDomain)
app.add_directive('deprecated', VersionChange)
app.add_directive('versionadded', VersionChange)
diff --git a/sphinx/domains/cpp.py b/sphinx/domains/cpp.py
index 62767c15e..51ebe35f1 100644
--- a/sphinx/domains/cpp.py
+++ b/sphinx/domains/cpp.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.domains.cpp
~~~~~~~~~~~~~~~~~~
@@ -10,13 +9,14 @@
"""
import re
+import warnings
from copy import deepcopy
from docutils import nodes, utils
from docutils.parsers.rst import directives
-from six import iteritems, text_type
from sphinx import addnodes
+from sphinx.deprecation import RemovedInSphinx40Warning
from sphinx.directives import ObjectDescription
from sphinx.domains import Domain, ObjType
from sphinx.environment import NoUri
@@ -26,7 +26,6 @@ from sphinx.util import logging
from sphinx.util.docfields import Field, GroupedField
from sphinx.util.docutils import SphinxDirective
from sphinx.util.nodes import make_refnode
-from sphinx.util.pycompat import UnicodeMixin
if False:
@@ -72,7 +71,7 @@ logger = logging.getLogger(__name__)
Grammar
----------------------------------------------------------------------------
- See http://www.nongnu.org/hcb/ for the grammar,
+ See https://www.nongnu.org/hcb/ for the grammar,
and https://github.com/cplusplus/draft/blob/master/source/grammar.tex,
and https://github.com/cplusplus/concepts-ts
for the newest grammar.
@@ -347,7 +346,7 @@ _fold_operator_re = re.compile(r'''(?x)
| !=
| [<>=/*%+|&^~-]=?
''')
-# see http://en.cppreference.com/w/cpp/keyword
+# see https://en.cppreference.com/w/cpp/keyword
_keywords = [
'alignas', 'alignof', 'and', 'and_eq', 'asm', 'auto', 'bitand', 'bitor',
'bool', 'break', 'case', 'catch', 'char', 'char16_t', 'char32_t', 'class',
@@ -382,7 +381,7 @@ _id_fundamental_v1 = {
'signed long': 'l',
'unsigned long': 'L',
'bool': 'b'
-} # type: Dict[unicode, unicode]
+}
_id_shorthands_v1 = {
'std::string': 'ss',
'std::ostream': 'os',
@@ -390,7 +389,7 @@ _id_shorthands_v1 = {
'std::iostream': 'ios',
'std::vector': 'v',
'std::map': 'm'
-} # type: Dict[unicode, unicode]
+}
_id_operator_v1 = {
'new': 'new-operator',
'new[]': 'new-array-operator',
@@ -439,7 +438,7 @@ _id_operator_v1 = {
'->': 'pointer-operator',
'()': 'call-operator',
'[]': 'subscript-operator'
-} # type: Dict[unicode, unicode]
+}
# ------------------------------------------------------------------------------
# Id v > 1 constants
@@ -484,7 +483,7 @@ _id_fundamental_v2 = {
'auto': 'Da',
'decltype(auto)': 'Dc',
'std::nullptr_t': 'Dn'
-} # type: Dict[unicode, unicode]
+}
_id_operator_v2 = {
'new': 'nw',
'new[]': 'na',
@@ -535,7 +534,7 @@ _id_operator_v2 = {
'()': 'cl',
'[]': 'ix',
'.*': 'ds' # this one is not overloadable, but we need it for expressions
-} # type: Dict[unicode, unicode]
+}
_id_operator_unary_v2 = {
'++': 'pp_',
'--': 'mm_',
@@ -549,7 +548,7 @@ _id_operator_unary_v2 = {
_id_char_from_prefix = {
None: 'c', 'u8': 'c',
'u': 'Ds', 'U': 'Di', 'L': 'w'
-} # type: Dict[unicode, unicode]
+} # type: Dict[Any, str]
# these are ordered by preceedence
_expression_bin_ops = [
['||'],
@@ -575,28 +574,28 @@ _id_explicit_cast = {
}
-class NoOldIdError(UnicodeMixin, Exception):
+class NoOldIdError(Exception):
# Used to avoid implementing unneeded id generation for old id schmes.
- def __init__(self, description=""):
- # type: (unicode) -> None
- self.description = description
-
- def __unicode__(self):
- # type: () -> unicode
- return self.description
-
+ @property
+ def description(self):
+ # type: () -> str
+ warnings.warn('%s.description is deprecated. '
+ 'Coerce the instance to a string instead.' % self.__class__.__name__,
+ RemovedInSphinx40Warning, stacklevel=2)
+ return str(self)
-class DefinitionError(UnicodeMixin, Exception):
- def __init__(self, description):
- # type: (unicode) -> None
- self.description = description
- def __unicode__(self):
- # type: () -> unicode
- return self.description
+class DefinitionError(Exception):
+ @property
+ def description(self):
+ # type: () -> str
+ warnings.warn('%s.description is deprecated. '
+ 'Coerce the instance to a string instead.' % self.__class__.__name__,
+ RemovedInSphinx40Warning, stacklevel=2)
+ return str(self)
-class _DuplicateSymbolError(UnicodeMixin, Exception):
+class _DuplicateSymbolError(Exception):
def __init__(self, symbol, declaration):
# type: (Symbol, Any) -> None
assert symbol
@@ -604,28 +603,24 @@ class _DuplicateSymbolError(UnicodeMixin, Exception):
self.symbol = symbol
self.declaration = declaration
- def __unicode__(self):
- # type: () -> unicode
+ def __str__(self):
+ # type: () -> str
return "Internal C++ duplicate symbol error:\n%s" % self.symbol.dump(0)
-class ASTBase(UnicodeMixin):
+class ASTBase:
def __eq__(self, other):
# type: (Any) -> bool
if type(self) is not type(other):
return False
try:
- for key, value in iteritems(self.__dict__):
+ for key, value in self.__dict__.items():
if value != getattr(other, key):
return False
except AttributeError:
return False
return True
- def __ne__(self, other):
- # type: (Any) -> bool
- return not self.__eq__(other)
-
__hash__ = None # type: Callable[[], int]
def clone(self):
@@ -634,15 +629,15 @@ class ASTBase(UnicodeMixin):
return deepcopy(self)
def _stringify(self, transform):
- # type: (Callable[[Any], unicode]) -> unicode
+ # type: (Callable[[Any], str]) -> str
raise NotImplementedError(repr(self))
- def __unicode__(self):
- # type: () -> unicode
- return self._stringify(lambda ast: text_type(ast))
+ def __str__(self):
+ # type: () -> str
+ return self._stringify(lambda ast: str(ast))
def get_display_string(self):
- # type: () -> unicode
+ # type: () -> str
return self._stringify(lambda ast: ast.get_display_string())
def __repr__(self):
@@ -651,7 +646,7 @@ class ASTBase(UnicodeMixin):
def _verify_description_mode(mode):
- # type: (unicode) -> None
+ # type: (str) -> None
if mode not in ('lastIsName', 'noneIsName', 'markType', 'param'):
raise Exception("Description mode '%s' is invalid." % mode)
@@ -662,7 +657,7 @@ def _verify_description_mode(mode):
class ASTCPPAttribute(ASTBase):
def __init__(self, arg):
- # type: (unicode) -> None
+ # type: (str) -> None
self.arg = arg
def _stringify(self, transform):
@@ -670,18 +665,19 @@ class ASTCPPAttribute(ASTBase):
def describe_signature(self, signode):
# type: (addnodes.desc_signature) -> None
- txt = text_type(self)
+ txt = str(self)
signode.append(nodes.Text(txt, txt))
class ASTGnuAttribute(ASTBase):
def __init__(self, name, args):
- # type: (unicode, Any) -> None
+ # type: (str, Any) -> None
self.name = name
self.args = args
def _stringify(self, transform):
- res = [self.name] # type: List[unicode]
+ # type: (Callable[[Any], str]) -> str
+ res = [self.name]
if self.args:
res.append('(')
res.append(transform(self.args))
@@ -695,7 +691,8 @@ class ASTGnuAttributeList(ASTBase):
self.attrs = attrs
def _stringify(self, transform):
- res = ['__attribute__(('] # type: List[unicode]
+ # type: (Callable[[Any], str]) -> str
+ res = ['__attribute__((']
first = True
for attr in self.attrs:
if not first:
@@ -707,7 +704,7 @@ class ASTGnuAttributeList(ASTBase):
def describe_signature(self, signode):
# type: (addnodes.desc_signature) -> None
- txt = text_type(self)
+ txt = str(self)
signode.append(nodes.Text(txt, txt))
@@ -715,10 +712,11 @@ class ASTIdAttribute(ASTBase):
"""For simple attributes defined by the user."""
def __init__(self, id):
- # type: (unicode) -> None
+ # type: (str) -> None
self.id = id
def _stringify(self, transform):
+ # type: (Callable[[Any], str]) -> str
return self.id
def describe_signature(self, signode):
@@ -730,16 +728,17 @@ class ASTParenAttribute(ASTBase):
"""For paren attributes defined by the user."""
def __init__(self, id, arg):
- # type: (unicode, unicode) -> None
+ # type: (str, str) -> None
self.id = id
self.arg = arg
def _stringify(self, transform):
+ # type: (Callable[[Any], str]) -> str
return self.id + '(' + self.arg + ')'
def describe_signature(self, signode):
# type: (addnodes.desc_signature) -> None
- txt = text_type(self)
+ txt = str(self)
signode.append(nodes.Text(txt, txt))
@@ -749,9 +748,11 @@ class ASTParenAttribute(ASTBase):
class ASTPointerLiteral(ASTBase):
def _stringify(self, transform):
- return u'nullptr'
+ # type: (Callable[[Any], str]) -> str
+ return 'nullptr'
def get_id(self, version):
+ # type: (int) -> str
return 'LDnE'
def describe_signature(self, signode, mode, env, symbol):
@@ -763,45 +764,54 @@ class ASTBooleanLiteral(ASTBase):
self.value = value
def _stringify(self, transform):
+ # type: (Callable[[Any], str]) -> str
if self.value:
- return u'true'
+ return 'true'
else:
- return u'false'
+ return 'false'
def get_id(self, version):
+ # type: (int) -> str
if self.value:
return 'L1E'
else:
return 'L0E'
def describe_signature(self, signode, mode, env, symbol):
- signode.append(nodes.Text(text_type(self)))
+ signode.append(nodes.Text(str(self)))
class ASTNumberLiteral(ASTBase):
def __init__(self, data):
- # type: (unicode) -> None
+ # type: (str) -> None
self.data = data
def _stringify(self, transform):
+ # type: (Callable[[Any], str]) -> str
return self.data
def get_id(self, version):
+ # type: (int) -> str
return "L%sE" % self.data
def describe_signature(self, signode, mode, env, symbol):
- txt = text_type(self)
+ txt = str(self)
signode.append(nodes.Text(txt, txt))
-class UnsupportedMultiCharacterCharLiteral(UnicodeMixin, Exception):
- def __init__(self, decoded):
- self.decoded = decoded
+class UnsupportedMultiCharacterCharLiteral(Exception):
+ @property
+ def decoded(self):
+ # type: () -> str
+ warnings.warn('%s.decoded is deprecated. '
+ 'Coerce the instance to a string instead.' % self.__class__.__name__,
+ RemovedInSphinx40Warning, stacklevel=2)
+ return str(self)
class ASTCharLiteral(ASTBase):
def __init__(self, prefix, data):
- # type: (unicode, unicode) -> None
+ # type: (str, str) -> None
self.prefix = prefix # may be None when no prefix
self.data = data
assert prefix in _id_char_from_prefix
@@ -813,41 +823,47 @@ class ASTCharLiteral(ASTBase):
raise UnsupportedMultiCharacterCharLiteral(decoded)
def _stringify(self, transform):
+ # type: (Callable[[Any], str]) -> str
if self.prefix is None:
return "'" + self.data + "'"
else:
return self.prefix + "'" + self.data + "'"
def get_id(self, version):
+ # type: (int) -> str
return self.type + str(self.value)
def describe_signature(self, signode, mode, env, symbol):
- txt = text_type(self)
+ txt = str(self)
signode.append(nodes.Text(txt, txt))
class ASTStringLiteral(ASTBase):
def __init__(self, data):
- # type: (unicode) -> None
+ # type: (str) -> None
self.data = data
def _stringify(self, transform):
+ # type: (Callable[[Any], str]) -> str
return self.data
def get_id(self, version):
+ # type: (int) -> str
# note: the length is not really correct with escaping
return "LA%d_KcE" % (len(self.data) - 2)
def describe_signature(self, signode, mode, env, symbol):
- txt = text_type(self)
+ txt = str(self)
signode.append(nodes.Text(txt, txt))
class ASTThisLiteral(ASTBase):
def _stringify(self, transform):
+ # type: (Callable[[Any], str]) -> str
return "this"
def get_id(self, version):
+ # type: (int) -> str
return "fpT"
def describe_signature(self, signode, mode, env, symbol):
@@ -859,9 +875,11 @@ class ASTParenExpr(ASTBase):
self.expr = expr
def _stringify(self, transform):
+ # type: (Callable[[Any], str]) -> str
return '(' + transform(self.expr) + ')'
def get_id(self, version):
+ # type: (int) -> str
return self.expr.get_id(version)
def describe_signature(self, signode, mode, env, symbol):
@@ -878,25 +896,27 @@ class ASTFoldExpr(ASTBase):
self.rightExpr = rightExpr
def _stringify(self, transform):
- res = [u'(']
+ # type: (Callable[[Any], str]) -> str
+ res = ['(']
if self.leftExpr:
res.append(transform(self.leftExpr))
- res.append(u' ')
+ res.append(' ')
res.append(transform(self.op))
- res.append(u' ')
- res.append(u'...')
+ res.append(' ')
+ res.append('...')
if self.rightExpr:
- res.append(u' ')
+ res.append(' ')
res.append(transform(self.op))
- res.append(u' ')
+ res.append(' ')
res.append(transform(self.rightExpr))
- res.append(u')')
- return u''.join(res)
+ res.append(')')
+ return ''.join(res)
def get_id(self, version):
+ # type: (int) -> str
assert version >= 3
if version == 3:
- return text_type(self)
+ return str(self)
# TODO: find the right mangling scheme
assert False
@@ -924,6 +944,7 @@ class ASTBinOpExpr(ASTBase):
self.ops = ops
def _stringify(self, transform):
+ # type: (Callable[[Any], str]) -> str
res = []
res.append(transform(self.exprs[0]))
for i in range(1, len(self.exprs)):
@@ -931,16 +952,17 @@ class ASTBinOpExpr(ASTBase):
res.append(self.ops[i - 1])
res.append(' ')
res.append(transform(self.exprs[i]))
- return u''.join(res)
+ return ''.join(res)
def get_id(self, version):
+ # type: (int) -> str
assert version >= 2
res = []
for i in range(len(self.ops)):
res.append(_id_operator_v2[self.ops[i]])
res.append(self.exprs[i].get_id(version))
res.append(self.exprs[-1].get_id(version))
- return u''.join(res)
+ return ''.join(res)
def describe_signature(self, signode, mode, env, symbol):
self.exprs[0].describe_signature(signode, mode, env, symbol)
@@ -959,6 +981,7 @@ class ASTAssignmentExpr(ASTBase):
self.ops = ops
def _stringify(self, transform):
+ # type: (Callable[[Any], str]) -> str
res = []
res.append(transform(self.exprs[0]))
for i in range(1, len(self.exprs)):
@@ -966,15 +989,16 @@ class ASTAssignmentExpr(ASTBase):
res.append(self.ops[i - 1])
res.append(' ')
res.append(transform(self.exprs[i]))
- return u''.join(res)
+ return ''.join(res)
def get_id(self, version):
+ # type: (int) -> str
res = []
for i in range(len(self.ops)):
res.append(_id_operator_v2[self.ops[i]])
res.append(self.exprs[i].get_id(version))
res.append(self.exprs[-1].get_id(version))
- return u''.join(res)
+ return ''.join(res)
def describe_signature(self, signode, mode, env, symbol):
self.exprs[0].describe_signature(signode, mode, env, symbol)
@@ -991,13 +1015,15 @@ class ASTCastExpr(ASTBase):
self.expr = expr
def _stringify(self, transform):
- res = [u'(']
+ # type: (Callable[[Any], str]) -> str
+ res = ['(']
res.append(transform(self.typ))
- res.append(u')')
+ res.append(')')
res.append(transform(self.expr))
- return u''.join(res)
+ return ''.join(res)
def get_id(self, version):
+ # type: (int) -> str
return 'cv' + self.typ.get_id(version) + self.expr.get_id(version)
def describe_signature(self, signode, mode, env, symbol):
@@ -1013,9 +1039,11 @@ class ASTUnaryOpExpr(ASTBase):
self.expr = expr
def _stringify(self, transform):
+ # type: (Callable[[Any], str]) -> str
return transform(self.op) + transform(self.expr)
def get_id(self, version):
+ # type: (int) -> str
return _id_operator_unary_v2[self.op] + self.expr.get_id(version)
def describe_signature(self, signode, mode, env, symbol):
@@ -1028,9 +1056,11 @@ class ASTSizeofParamPack(ASTBase):
self.identifier = identifier
def _stringify(self, transform):
+ # type: (Callable[[Any], str]) -> str
return "sizeof...(" + transform(self.identifier) + ")"
def get_id(self, version):
+ # type: (int) -> str
return 'sZ' + self.identifier.get_id(version)
def describe_signature(self, signode, mode, env, symbol):
@@ -1045,9 +1075,11 @@ class ASTSizeofType(ASTBase):
self.typ = typ
def _stringify(self, transform):
+ # type: (Callable[[Any], str]) -> str
return "sizeof(" + transform(self.typ) + ")"
def get_id(self, version):
+ # type: (int) -> str
return 'st' + self.typ.get_id(version)
def describe_signature(self, signode, mode, env, symbol):
@@ -1061,9 +1093,11 @@ class ASTSizeofExpr(ASTBase):
self.expr = expr
def _stringify(self, transform):
+ # type: (Callable[[Any], str]) -> str
return "sizeof " + transform(self.expr)
def get_id(self, version):
+ # type: (int) -> str
return 'sz' + self.expr.get_id(version)
def describe_signature(self, signode, mode, env, symbol):
@@ -1076,9 +1110,11 @@ class ASTAlignofExpr(ASTBase):
self.typ = typ
def _stringify(self, transform):
+ # type: (Callable[[Any], str]) -> str
return "alignof(" + transform(self.typ) + ")"
def get_id(self, version):
+ # type: (int) -> str
return 'at' + self.typ.get_id(version)
def describe_signature(self, signode, mode, env, symbol):
@@ -1092,9 +1128,11 @@ class ASTNoexceptExpr(ASTBase):
self.expr = expr
def _stringify(self, transform):
+ # type: (Callable[[Any], str]) -> str
return "noexcept(" + transform(self.expr) + ")"
def get_id(self, version):
+ # type: (int) -> str
return 'nx' + self.expr.get_id(version)
def describe_signature(self, signode, mode, env, symbol):
@@ -1105,7 +1143,7 @@ class ASTNoexceptExpr(ASTBase):
class ASTNewExpr(ASTBase):
def __init__(self, rooted, isNewTypeId, typ, initList, initType):
- # type: (bool, bool, ASTType, List[Any], unicode) -> None
+ # type: (bool, bool, ASTType, List[Any], str) -> None
self.rooted = rooted
self.isNewTypeId = isNewTypeId
self.typ = typ
@@ -1115,6 +1153,7 @@ class ASTNewExpr(ASTBase):
assert self.initType in ')}'
def _stringify(self, transform):
+ # type: (Callable[[Any], str]) -> str
res = []
if self.rooted:
res.append('::')
@@ -1134,9 +1173,10 @@ class ASTNewExpr(ASTBase):
first = False
res.append(transform(e))
res.append(self.initType)
- return u''.join(res)
+ return ''.join(res)
def get_id(self, version):
+ # type: (int) -> str
# the array part will be in the type mangling, so na is not used
res = ['nw']
# TODO: placement
@@ -1152,7 +1192,7 @@ class ASTNewExpr(ASTBase):
assert False
else:
res.append('E')
- return u''.join(res)
+ return ''.join(res)
def describe_signature(self, signode, mode, env, symbol):
if self.rooted:
@@ -1184,6 +1224,7 @@ class ASTDeleteExpr(ASTBase):
self.expr = expr
def _stringify(self, transform):
+ # type: (Callable[[Any], str]) -> str
res = []
if self.rooted:
res.append('::')
@@ -1191,9 +1232,10 @@ class ASTDeleteExpr(ASTBase):
if self.array:
res.append('[] ')
res.append(transform(self.expr))
- return u''.join(res)
+ return ''.join(res)
def get_id(self, version):
+ # type: (int) -> str
if self.array:
id = "da"
else:
@@ -1217,15 +1259,17 @@ class ASTExplicitCast(ASTBase):
self.expr = expr
def _stringify(self, transform):
+ # type: (Callable[[Any], str]) -> str
res = [self.cast]
res.append('<')
res.append(transform(self.typ))
res.append('>(')
res.append(transform(self.expr))
res.append(')')
- return u''.join(res)
+ return ''.join(res)
def get_id(self, version):
+ # type: (int) -> str
return (_id_explicit_cast[self.cast] +
self.typ.get_id(version) +
self.expr.get_id(version))
@@ -1246,9 +1290,11 @@ class ASTTypeId(ASTBase):
self.isType = isType
def _stringify(self, transform):
+ # type: (Callable[[Any], str]) -> str
return 'typeid(' + transform(self.typeOrExpr) + ')'
def get_id(self, version):
+ # type: (int) -> str
prefix = 'ti' if self.isType else 'te'
return prefix + self.typeOrExpr.get_id(version)
@@ -1264,22 +1310,24 @@ class ASTPostfixCallExpr(ASTBase):
self.exprs = exprs
def _stringify(self, transform):
- res = [u'(']
+ # type: (Callable[[Any], str]) -> str
+ res = ['(']
first = True
for e in self.exprs:
if not first:
- res.append(u', ')
+ res.append(', ')
first = False
res.append(transform(e))
- res.append(u')')
- return u''.join(res)
+ res.append(')')
+ return ''.join(res)
def get_id(self, idPrefix, version):
+ # type: (str, int) -> str
res = ['cl', idPrefix]
for e in self.exprs:
res.append(e.get_id(version))
res.append('E')
- return u''.join(res)
+ return ''.join(res)
def describe_signature(self, signode, mode, env, symbol):
signode.append(nodes.Text('('))
@@ -1297,9 +1345,11 @@ class ASTPostfixArray(ASTBase):
self.expr = expr
def _stringify(self, transform):
- return u'[' + transform(self.expr) + ']'
+ # type: (Callable[[Any], str]) -> str
+ return '[' + transform(self.expr) + ']'
def get_id(self, idPrefix, version):
+ # type: (str, int) -> str
return 'ix' + idPrefix + self.expr.get_id(version)
def describe_signature(self, signode, mode, env, symbol):
@@ -1310,9 +1360,11 @@ class ASTPostfixArray(ASTBase):
class ASTPostfixInc(ASTBase):
def _stringify(self, transform):
- return u'++'
+ # type: (Callable[[Any], str]) -> str
+ return '++'
def get_id(self, idPrefix, version):
+ # type: (str, int) -> str
return 'pp' + idPrefix
def describe_signature(self, signode, mode, env, symbol):
@@ -1321,9 +1373,11 @@ class ASTPostfixInc(ASTBase):
class ASTPostfixDec(ASTBase):
def _stringify(self, transform):
- return u'--'
+ # type: (Callable[[Any], str]) -> str
+ return '--'
def get_id(self, idPrefix, version):
+ # type: (str, int) -> str
return 'mm' + idPrefix
def describe_signature(self, signode, mode, env, symbol):
@@ -1335,9 +1389,11 @@ class ASTPostfixMember(ASTBase):
self.name = name
def _stringify(self, transform):
- return u'.' + transform(self.name)
+ # type: (Callable[[Any], str]) -> str
+ return '.' + transform(self.name)
def get_id(self, idPrefix, version):
+ # type: (str, int) -> str
return 'dt' + idPrefix + self.name.get_id(version)
def describe_signature(self, signode, mode, env, symbol):
@@ -1350,9 +1406,11 @@ class ASTPostfixMemberOfPointer(ASTBase):
self.name = name
def _stringify(self, transform):
- return u'->' + transform(self.name)
+ # type: (Callable[[Any], str]) -> str
+ return '->' + transform(self.name)
def get_id(self, idPrefix, version):
+ # type: (str, int) -> str
return 'pt' + idPrefix + self.name.get_id(version)
def describe_signature(self, signode, mode, env, symbol):
@@ -1367,12 +1425,14 @@ class ASTPostfixExpr(ASTBase):
self.postFixes = postFixes
def _stringify(self, transform):
+ # type: (Callable[[Any], str]) -> str
res = [transform(self.prefix)]
for p in self.postFixes:
res.append(transform(p))
- return u''.join(res)
+ return ''.join(res)
def get_id(self, version):
+ # type: (int) -> str
id = self.prefix.get_id(version)
for p in self.postFixes:
id = p.get_id(id, version)
@@ -1389,9 +1449,11 @@ class ASTPackExpansionExpr(ASTBase):
self.expr = expr
def _stringify(self, transform):
+ # type: (Callable[[Any], str]) -> str
return transform(self.expr) + '...'
def get_id(self, version):
+ # type: (int) -> str
id = self.expr.get_id(version)
return 'sp' + id
@@ -1405,10 +1467,12 @@ class ASTFallbackExpr(ASTBase):
self.expr = expr
def _stringify(self, transform):
+ # type: (Callable[[Any], str]) -> str
return self.expr
def get_id(self, version):
- return text_type(self.expr)
+ # type: (int) -> str
+ return str(self.expr)
def describe_signature(self, signode, mode, env, symbol):
signode += nodes.Text(self.expr)
@@ -1420,7 +1484,7 @@ class ASTFallbackExpr(ASTBase):
class ASTIdentifier(ASTBase):
def __init__(self, identifier):
- # type: (unicode) -> None
+ # type: (str) -> None
assert identifier is not None
assert len(identifier) != 0
self.identifier = identifier
@@ -1429,7 +1493,7 @@ class ASTIdentifier(ASTBase):
return self.identifier[0] == '@'
def get_id(self, version):
- # type: (int) -> unicode
+ # type: (int) -> str
if self.is_anon() and version < 3:
raise NoOldIdError()
if version == 1:
@@ -1444,22 +1508,22 @@ class ASTIdentifier(ASTBase):
return 'D0'
else:
if self.is_anon():
- return u'Ut%d_%s' % (len(self.identifier) - 1, self.identifier[1:])
+ return 'Ut%d_%s' % (len(self.identifier) - 1, self.identifier[1:])
else:
- return text_type(len(self.identifier)) + self.identifier
+ return str(len(self.identifier)) + self.identifier
- # and this is where we finally make a difference between __unicode__ and the display string
+ # and this is where we finally make a difference between __str__ and the display string
- def __unicode__(self):
- # type: () -> unicode
+ def __str__(self):
+ # type: () -> str
return self.identifier
def get_display_string(self):
- # type: () -> unicode
- return u"[anonymous]" if self.is_anon() else self.identifier
+ # type: () -> str
+ return "[anonymous]" if self.is_anon() else self.identifier
def describe_signature(self, signode, mode, env, prefix, templateArgs, symbol):
- # type: (Any, unicode, BuildEnvironment, unicode, unicode, Symbol) -> None
+ # type: (Any, str, BuildEnvironment, str, str, Symbol) -> None
_verify_description_mode(mode)
if mode == 'markType':
targetText = prefix + self.identifier + templateArgs
@@ -1490,7 +1554,7 @@ class ASTIdentifier(ASTBase):
class ASTTemplateKeyParamPackIdDefault(ASTBase):
def __init__(self, key, identifier, parameterPack, default):
- # type: (unicode, ASTIdentifier, bool, ASTType) -> None
+ # type: (str, ASTIdentifier, bool, ASTType) -> None
assert key
if parameterPack:
assert default is None
@@ -1504,7 +1568,7 @@ class ASTTemplateKeyParamPackIdDefault(ASTBase):
return self.identifier
def get_id(self, version):
- # type: (int) -> unicode
+ # type: (int) -> str
assert version >= 2
# this is not part of the normal name mangling in C++
res = []
@@ -1515,7 +1579,8 @@ class ASTTemplateKeyParamPackIdDefault(ASTBase):
return ''.join(res)
def _stringify(self, transform):
- res = [self.key] # type: List[unicode]
+ # type: (Callable[[Any], str]) -> str
+ res = [self.key]
if self.parameterPack:
if self.identifier:
res.append(' ')
@@ -1530,7 +1595,7 @@ class ASTTemplateKeyParamPackIdDefault(ASTBase):
return ''.join(res)
def describe_signature(self, signode, mode, env, symbol):
- # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None
+ # type: (addnodes.desc_signature, str, BuildEnvironment, Symbol) -> None
signode += nodes.Text(self.key)
if self.parameterPack:
if self.identifier:
@@ -1559,6 +1624,7 @@ class ASTTemplateParamType(ASTBase):
@property
def isPack(self):
+ # type: () -> bool
return self.data.parameterPack
def get_identifier(self):
@@ -1566,7 +1632,7 @@ class ASTTemplateParamType(ASTBase):
return self.data.get_identifier()
def get_id(self, version, objectType=None, symbol=None):
- # type: (int, unicode, Symbol) -> unicode
+ # type: (int, str, Symbol) -> str
# this is not part of the normal name mangling in C++
assert version >= 2
if symbol:
@@ -1576,10 +1642,11 @@ class ASTTemplateParamType(ASTBase):
return self.data.get_id(version)
def _stringify(self, transform):
+ # type: (Callable[[Any], str]) -> str
return transform(self.data)
def describe_signature(self, signode, mode, env, symbol):
- # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None
+ # type: (addnodes.desc_signature, str, BuildEnvironment, Symbol) -> None
self.data.describe_signature(signode, mode, env, symbol)
@@ -1597,10 +1664,11 @@ class ASTTemplateParamConstrainedTypeWithInit(ASTBase):
@property
def isPack(self):
+ # type: () -> bool
return self.type.isPack
def get_id(self, version, objectType=None, symbol=None):
- # type: (int, unicode, Symbol) -> unicode
+ # type: (int, str, Symbol) -> str
# this is not part of the normal name mangling in C++
assert version >= 2
if symbol:
@@ -1610,6 +1678,7 @@ class ASTTemplateParamConstrainedTypeWithInit(ASTBase):
return self.type.get_id(version)
def _stringify(self, transform):
+ # type: (Callable[[Any], str]) -> str
res = transform(self.type)
if self.init:
res += " = "
@@ -1617,7 +1686,7 @@ class ASTTemplateParamConstrainedTypeWithInit(ASTBase):
return res
def describe_signature(self, signode, mode, env, symbol):
- # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None
+ # type: (addnodes.desc_signature, str, BuildEnvironment, Symbol) -> None
self.type.describe_signature(signode, mode, env, symbol)
if self.init:
signode += nodes.Text(" = ")
@@ -1640,6 +1709,7 @@ class ASTTemplateParamTemplateType(ASTBase):
@property
def isPack(self):
+ # type: () -> bool
return self.data.parameterPack
def get_identifier(self):
@@ -1647,7 +1717,7 @@ class ASTTemplateParamTemplateType(ASTBase):
return self.data.get_identifier()
def get_id(self, version, objectType=None, symbol=None):
- # type: (int, unicode, Symbol) -> unicode
+ # type: (int, str, Symbol) -> str
assert version >= 2
# this is not part of the normal name mangling in C++
if symbol:
@@ -1657,10 +1727,11 @@ class ASTTemplateParamTemplateType(ASTBase):
return self.nestedParams.get_id(version) + self.data.get_id(version)
def _stringify(self, transform):
+ # type: (Callable[[Any], str]) -> str
return transform(self.nestedParams) + transform(self.data)
def describe_signature(self, signode, mode, env, symbol):
- # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None
+ # type: (addnodes.desc_signature, str, BuildEnvironment, Symbol) -> None
self.nestedParams.describe_signature(signode, 'noneIsName', env, symbol)
signode += nodes.Text(' ')
self.data.describe_signature(signode, mode, env, symbol)
@@ -1680,6 +1751,7 @@ class ASTTemplateParamNonType(ASTBase):
@property
def isPack(self):
+ # type: () -> bool
return self.param.isPack
def get_identifier(self):
@@ -1694,7 +1766,7 @@ class ASTTemplateParamNonType(ASTBase):
return None
def get_id(self, version, objectType=None, symbol=None):
- # type: (int, unicode, Symbol) -> unicode
+ # type: (int, str, Symbol) -> str
assert version >= 2
# this is not part of the normal name mangling in C++
if symbol:
@@ -1704,10 +1776,11 @@ class ASTTemplateParamNonType(ASTBase):
return '_' + self.param.get_id(version)
def _stringify(self, transform):
+ # type: (Callable[[Any], str]) -> str
return transform(self.param)
def describe_signature(self, signode, mode, env, symbol):
- # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None
+ # type: (addnodes.desc_signature, str, BuildEnvironment, Symbol) -> None
self.param.describe_signature(signode, mode, env, symbol)
@@ -1719,7 +1792,7 @@ class ASTTemplateParams(ASTBase):
self.isNested = False # whether it's a template template param
def get_id(self, version):
- # type: (int) -> unicode
+ # type: (int) -> str
assert version >= 2
res = []
res.append("I")
@@ -1729,14 +1802,15 @@ class ASTTemplateParams(ASTBase):
return ''.join(res)
def _stringify(self, transform):
+ # type: (Callable[[Any], str]) -> str
res = []
- res.append(u"template<")
- res.append(u", ".join(transform(a) for a in self.params))
- res.append(u"> ")
+ res.append("template<")
+ res.append(", ".join(transform(a) for a in self.params))
+ res.append("> ")
return ''.join(res)
def describe_signature(self, parentNode, mode, env, symbol, lineSpec=None):
- # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol, bool) -> None
+ # type: (addnodes.desc_signature, str, BuildEnvironment, Symbol, bool) -> None
# 'lineSpec' is defaulted becuase of template template parameters
def makeLine(parentNode=parentNode):
signode = addnodes.desc_signature_line()
@@ -1775,6 +1849,7 @@ class ASTTemplateIntroductionParameter(ASTBase):
@property
def isPack(self):
+ # type: () -> bool
return self.parameterPack
def get_identifier(self):
@@ -1782,7 +1857,7 @@ class ASTTemplateIntroductionParameter(ASTBase):
return self.identifier
def get_id(self, version, objectType=None, symbol=None):
- # type: (int, unicode, Symbol) -> unicode
+ # type: (int, str, Symbol) -> str
assert version >= 2
# this is not part of the normal name mangling in C++
if symbol:
@@ -1795,24 +1870,25 @@ class ASTTemplateIntroductionParameter(ASTBase):
return '0' # we need to put something
def get_id_as_arg(self, version):
- # type: (int) -> unicode
+ # type: (int) -> str
assert version >= 2
# used for the implicit requires clause
res = self.identifier.get_id(version)
if self.parameterPack:
- return u'sp' + res
+ return 'sp' + res
else:
return res
def _stringify(self, transform):
- res = [] # type: List[unicode]
+ # type: (Callable[[Any], str]) -> str
+ res = []
if self.parameterPack:
res.append('...')
res.append(transform(self.identifier))
return ''.join(res)
def describe_signature(self, signode, mode, env, symbol):
- # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None
+ # type: (addnodes.desc_signature, str, BuildEnvironment, Symbol) -> None
if self.parameterPack:
signode += nodes.Text('...')
self.identifier.describe_signature(signode, mode, env, '', '', symbol)
@@ -1826,7 +1902,7 @@ class ASTTemplateIntroduction(ASTBase):
self.params = params
def get_id(self, version):
- # type: (int) -> unicode
+ # type: (int) -> str
assert version >= 2
# first do the same as a normal template parameter list
res = []
@@ -1845,6 +1921,7 @@ class ASTTemplateIntroduction(ASTBase):
return ''.join(res)
def _stringify(self, transform):
+ # type: (Callable[[Any], str]) -> str
res = []
res.append(transform(self.concept))
res.append('{')
@@ -1853,7 +1930,7 @@ class ASTTemplateIntroduction(ASTBase):
return ''.join(res)
def describe_signature(self, parentNode, mode, env, symbol, lineSpec):
- # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol, bool) -> None
+ # type: (addnodes.desc_signature, str, BuildEnvironment, Symbol, bool) -> None
# Note: 'lineSpec' has no effect on template introductions.
signode = addnodes.desc_signature_line()
parentNode += signode
@@ -1876,22 +1953,23 @@ class ASTTemplateDeclarationPrefix(ASTBase):
self.templates = templates
def get_id(self, version):
- # type: (int) -> unicode
+ # type: (int) -> str
assert version >= 2
# this is not part of a normal name mangling system
res = []
for t in self.templates:
res.append(t.get_id(version))
- return u''.join(res)
+ return ''.join(res)
def _stringify(self, transform):
+ # type: (Callable[[Any], str]) -> str
res = []
for t in self.templates:
res.append(transform(t))
- return u''.join(res)
+ return ''.join(res)
def describe_signature(self, signode, mode, env, symbol, lineSpec):
- # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol, bool) -> None
+ # type: (addnodes.desc_signature, str, BuildEnvironment, Symbol, bool) -> None
_verify_description_mode(mode)
for t in self.templates:
t.describe_signature(signode, 'lastIsName', env, symbol, lineSpec)
@@ -1909,13 +1987,13 @@ class ASTOperator(ASTBase):
return True
def get_id(self, version):
- # type: (int) -> unicode
+ # type: (int) -> str
raise NotImplementedError()
def describe_signature(self, signode, mode, env, prefix, templateArgs, symbol):
- # type: (addnodes.desc_signature, unicode, Any, unicode, unicode, Symbol) -> None
+ # type: (addnodes.desc_signature, str, Any, str, str, Symbol) -> None
_verify_description_mode(mode)
- identifier = text_type(self)
+ identifier = str(self)
if mode == 'lastIsName':
signode += addnodes.desc_name(identifier, identifier)
else:
@@ -1924,11 +2002,11 @@ class ASTOperator(ASTBase):
class ASTOperatorBuildIn(ASTOperator):
def __init__(self, op):
- # type: (unicode) -> None
+ # type: (str) -> None
self.op = op
def get_id(self, version):
- # type: (int) -> unicode
+ # type: (int) -> str
if version == 1:
ids = _id_operator_v1
else:
@@ -1939,10 +2017,11 @@ class ASTOperatorBuildIn(ASTOperator):
return ids[self.op]
def _stringify(self, transform):
+ # type: (Callable[[Any], str]) -> str
if self.op in ('new', 'new[]', 'delete', 'delete[]'):
- return u'operator ' + self.op
+ return 'operator ' + self.op
else:
- return u'operator' + self.op
+ return 'operator' + self.op
class ASTOperatorType(ASTOperator):
@@ -1951,18 +2030,19 @@ class ASTOperatorType(ASTOperator):
self.type = type
def get_id(self, version):
- # type: (int) -> unicode
+ # type: (int) -> str
if version == 1:
- return u'castto-%s-operator' % self.type.get_id(version)
+ return 'castto-%s-operator' % self.type.get_id(version)
else:
- return u'cv' + self.type.get_id(version)
+ return 'cv' + self.type.get_id(version)
def _stringify(self, transform):
- return u''.join(['operator ', transform(self.type)])
+ # type: (Callable[[Any], str]) -> str
+ return ''.join(['operator ', transform(self.type)])
def get_name_no_template(self):
- # type: () -> unicode
- return text_type(self)
+ # type: () -> str
+ return str(self)
class ASTOperatorLiteral(ASTOperator):
@@ -1971,14 +2051,15 @@ class ASTOperatorLiteral(ASTOperator):
self.identifier = identifier
def get_id(self, version):
- # type: (int) -> unicode
+ # type: (int) -> str
if version == 1:
raise NoOldIdError()
else:
- return u'li' + self.identifier.get_id(version)
+ return 'li' + self.identifier.get_id(version)
def _stringify(self, transform):
- return u'operator""' + transform(self.identifier)
+ # type: (Callable[[Any], str]) -> str
+ return 'operator""' + transform(self.identifier)
##############################################################################################
@@ -1990,18 +2071,19 @@ class ASTTemplateArgConstant(ASTBase):
self.value = value
def _stringify(self, transform):
+ # type: (Callable[[Any], str]) -> str
return transform(self.value)
def get_id(self, version):
- # type: (int) -> unicode
+ # type: (int) -> str
if version == 1:
- return text_type(self).replace(u' ', u'-')
+ return str(self).replace(' ', '-')
if version == 2:
- return u'X' + text_type(self) + u'E'
- return u'X' + self.value.get_id(version) + u'E'
+ return 'X' + str(self) + 'E'
+ return 'X' + self.value.get_id(version) + 'E'
def describe_signature(self, signode, mode, env, symbol):
- # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None
+ # type: (addnodes.desc_signature, str, BuildEnvironment, Symbol) -> None
_verify_description_mode(mode)
self.value.describe_signature(signode, mode, env, symbol)
@@ -2013,27 +2095,28 @@ class ASTTemplateArgs(ASTBase):
self.args = args
def get_id(self, version):
- # type: (int) -> unicode
+ # type: (int) -> str
if version == 1:
- res = [] # type: List[unicode]
+ res = []
res.append(':')
- res.append(u'.'.join(a.get_id(version) for a in self.args))
+ res.append('.'.join(a.get_id(version) for a in self.args))
res.append(':')
- return u''.join(res)
+ return ''.join(res)
res = []
res.append('I')
for a in self.args:
res.append(a.get_id(version))
res.append('E')
- return u''.join(res)
+ return ''.join(res)
def _stringify(self, transform):
+ # type: (Callable[[Any], str]) -> str
res = ', '.join(transform(a) for a in self.args)
return '<' + res + '>'
def describe_signature(self, signode, mode, env, symbol):
- # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None
+ # type: (addnodes.desc_signature, str, BuildEnvironment, Symbol) -> None
_verify_description_mode(mode)
signode += nodes.Text('<')
first = True
@@ -2056,21 +2139,22 @@ class ASTNestedNameElement(ASTBase):
return False
def get_id(self, version):
- # type: (int) -> unicode
+ # type: (int) -> str
res = self.identOrOp.get_id(version)
if self.templateArgs:
res += self.templateArgs.get_id(version)
return res
def _stringify(self, transform):
+ # type: (Callable[[Any], str]) -> str
res = transform(self.identOrOp)
if self.templateArgs:
res += transform(self.templateArgs)
return res
def describe_signature(self, signode, mode, env, prefix, symbol):
- # type: (addnodes.desc_signature, unicode, BuildEnvironment, unicode, Symbol) -> None
- tArgs = text_type(self.templateArgs) if self.templateArgs is not None else ''
+ # type: (addnodes.desc_signature, str, BuildEnvironment, str, Symbol) -> None
+ tArgs = str(self.templateArgs) if self.templateArgs is not None else ''
self.identOrOp.describe_signature(signode, mode, env, prefix, tArgs, symbol)
if self.templateArgs is not None:
self.templateArgs.describe_signature(signode, mode, env, symbol)
@@ -2101,14 +2185,15 @@ class ASTNestedName(ASTBase):
return count
def get_id(self, version, modifiers=''):
- # type: (int, unicode) -> unicode
+ # type: (int, str) -> str
if version == 1:
- tt = text_type(self)
+ tt = str(self)
if tt in _id_shorthands_v1:
return _id_shorthands_v1[tt]
else:
- return u'::'.join(n.get_id(version) for n in self.names)
- res = [] # type: List[unicode]
+ return '::'.join(n.get_id(version) for n in self.names)
+
+ res = []
if len(self.names) > 1 or len(modifiers) > 0:
res.append('N')
res.append(modifiers)
@@ -2116,10 +2201,11 @@ class ASTNestedName(ASTBase):
res.append(n.get_id(version))
if len(self.names) > 1 or len(modifiers) > 0:
res.append('E')
- return u''.join(res)
+ return ''.join(res)
def _stringify(self, transform):
- res = [] # type: List[unicode]
+ # type: (Callable[[Any], str]) -> str
+ res = []
if self.rooted:
res.append('')
for i in range(len(self.names)):
@@ -2132,13 +2218,13 @@ class ASTNestedName(ASTBase):
return '::'.join(res)
def describe_signature(self, signode, mode, env, symbol):
- # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None
+ # type: (addnodes.desc_signature, str, BuildEnvironment, Symbol) -> None
_verify_description_mode(mode)
# just print the name part, with template args, not template params
if mode == 'noneIsName':
- signode += nodes.Text(text_type(self))
+ signode += nodes.Text(str(self))
elif mode == 'param':
- name = text_type(self)
+ name = str(self)
signode += nodes.emphasis(name, name)
elif mode == 'markType' or mode == 'lastIsName':
# Each element should be a pending xref targeting the complete
@@ -2151,8 +2237,8 @@ class ASTNestedName(ASTBase):
if symbol.declaration.templatePrefix is not None:
templateParams = symbol.declaration.templatePrefix.templates
iTemplateParams = 0
- templateParamsPrefix = u''
- prefix = '' # type: unicode
+ templateParamsPrefix = ''
+ prefix = ''
first = True
names = self.names[:-1] if mode == 'lastIsName' else self.names
# If lastIsName, then wrap all of the prefix in a desc_addname,
@@ -2161,7 +2247,7 @@ class ASTNestedName(ASTBase):
# so it can remove it in inner declarations.
dest = signode
if mode == 'lastIsName':
- dest = addnodes.desc_addname()
+ dest = addnodes.desc_addname() # type: ignore
for i in range(len(names)):
nne = names[i]
template = self.templates[i]
@@ -2171,10 +2257,10 @@ class ASTNestedName(ASTBase):
if template:
dest += nodes.Text("template ")
first = False
- txt_nne = text_type(nne)
+ txt_nne = str(nne)
if txt_nne != '':
if nne.templateArgs and iTemplateParams < len(templateParams):
- templateParamsPrefix += text_type(templateParams[iTemplateParams])
+ templateParamsPrefix += str(templateParams[iTemplateParams])
iTemplateParams += 1
nne.describe_signature(dest, 'markType',
env, templateParamsPrefix + prefix, symbol)
@@ -2192,14 +2278,15 @@ class ASTNestedName(ASTBase):
class ASTTrailingTypeSpecFundamental(ASTBase):
def __init__(self, name):
- # type: (unicode) -> None
+ # type: (str) -> None
self.name = name
def _stringify(self, transform):
+ # type: (Callable[[Any], str]) -> str
return self.name
def get_id(self, version):
- # type: (int) -> unicode
+ # type: (int) -> str
if version == 1:
res = []
for a in self.name.split(' '):
@@ -2207,7 +2294,7 @@ class ASTTrailingTypeSpecFundamental(ASTBase):
res.append(_id_fundamental_v1[a])
else:
res.append(a)
- return u'-'.join(res)
+ return '-'.join(res)
if self.name not in _id_fundamental_v2:
raise Exception(
@@ -2217,13 +2304,13 @@ class ASTTrailingTypeSpecFundamental(ASTBase):
return _id_fundamental_v2[self.name]
def describe_signature(self, signode, mode, env, symbol):
- # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None
- signode += nodes.Text(text_type(self.name))
+ # type: (addnodes.desc_signature, str, BuildEnvironment, Symbol) -> None
+ signode += nodes.Text(str(self.name))
class ASTTrailingTypeSpecName(ASTBase):
def __init__(self, prefix, nestedName):
- # type: (unicode, Any) -> None
+ # type: (str, Any) -> None
self.prefix = prefix
self.nestedName = nestedName
@@ -2233,19 +2320,20 @@ class ASTTrailingTypeSpecName(ASTBase):
return self.nestedName
def get_id(self, version):
- # type: (int) -> unicode
+ # type: (int) -> str
return self.nestedName.get_id(version)
def _stringify(self, transform):
- res = [] # type: List[unicode]
+ # type: (Callable[[Any], str]) -> str
+ res = []
if self.prefix:
res.append(self.prefix)
res.append(' ')
res.append(transform(self.nestedName))
- return u''.join(res)
+ return ''.join(res)
def describe_signature(self, signode, mode, env, symbol):
- # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None
+ # type: (addnodes.desc_signature, str, BuildEnvironment, Symbol) -> None
if self.prefix:
signode += addnodes.desc_annotation(self.prefix, self.prefix)
signode += nodes.Text(' ')
@@ -2254,16 +2342,18 @@ class ASTTrailingTypeSpecName(ASTBase):
class ASTTrailingTypeSpecDecltypeAuto(ASTBase):
def _stringify(self, transform):
- return u'decltype(auto)'
+ # type: (Callable[[Any], str]) -> str
+ return 'decltype(auto)'
def get_id(self, version):
+ # type: (int) -> str
if version == 1:
raise NoOldIdError()
return 'Dc'
def describe_signature(self, signode, mode, env, symbol):
- # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None
- signode.append(nodes.Text(text_type(self)))
+ # type: (addnodes.desc_signature, str, BuildEnvironment, Symbol) -> None
+ signode.append(nodes.Text(str(self)))
class ASTTrailingTypeSpecDecltype(ASTBase):
@@ -2271,14 +2361,17 @@ class ASTTrailingTypeSpecDecltype(ASTBase):
self.expr = expr
def _stringify(self, transform):
- return u'decltype(' + transform(self.expr) + ')'
+ # type: (Callable[[Any], str]) -> str
+ return 'decltype(' + transform(self.expr) + ')'
def get_id(self, version):
+ # type: (int) -> str
if version == 1:
raise NoOldIdError()
return 'DT' + self.expr.get_id(version) + "E"
def describe_signature(self, signode, mode, env, symbol):
+ # type: (addnodes.desc_signature, str, BuildEnvironment, Symbol) -> None
signode.append(nodes.Text('decltype('))
self.expr.describe_signature(signode, mode, env, symbol)
signode.append(nodes.Text(')'))
@@ -2291,7 +2384,7 @@ class ASTFunctionParameter(ASTBase):
self.ellipsis = ellipsis
def get_id(self, version, objectType=None, symbol=None):
- # type: (int, unicode, Symbol) -> unicode
+ # type: (int, str, Symbol) -> str
# this is not part of the normal name mangling in C++
if symbol:
# the anchor will be our parent
@@ -2303,13 +2396,14 @@ class ASTFunctionParameter(ASTBase):
return self.arg.get_id(version)
def _stringify(self, transform):
+ # type: (Callable[[Any], str]) -> str
if self.ellipsis:
return '...'
else:
return transform(self.arg)
def describe_signature(self, signode, mode, env, symbol):
- # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None
+ # type: (addnodes.desc_signature, str, BuildEnvironment, Symbol) -> None
_verify_description_mode(mode)
if self.ellipsis:
signode += nodes.Text('...')
@@ -2320,7 +2414,7 @@ class ASTFunctionParameter(ASTBase):
class ASTParametersQualifiers(ASTBase):
def __init__(self, args, volatile, const, refQual, exceptionSpec, override,
final, initializer):
- # type: (List[Any], bool, bool, unicode, unicode, bool, bool, unicode) -> None
+ # type: (List[Any], bool, bool, str, str, bool, bool, str) -> None
self.args = args
self.volatile = volatile
self.const = const
@@ -2336,7 +2430,7 @@ class ASTParametersQualifiers(ASTBase):
return self.args
def get_modifiers_id(self, version):
- # type: (int) -> unicode
+ # type: (int) -> str
res = []
if self.volatile:
res.append('V')
@@ -2349,29 +2443,30 @@ class ASTParametersQualifiers(ASTBase):
res.append('O')
elif self.refQual == '&':
res.append('R')
- return u''.join(res)
+ return ''.join(res)
def get_param_id(self, version):
- # type: (int) -> unicode
+ # type: (int) -> str
if version == 1:
if len(self.args) == 0:
return ''
else:
- return u'__' + u'.'.join(a.get_id(version) for a in self.args)
+ return '__' + '.'.join(a.get_id(version) for a in self.args)
if len(self.args) == 0:
return 'v'
else:
- return u''.join(a.get_id(version) for a in self.args)
+ return ''.join(a.get_id(version) for a in self.args)
def _stringify(self, transform):
- res = [] # type: List[unicode]
+ # type: (Callable[[Any], str]) -> str
+ res = []
res.append('(')
first = True
for a in self.args:
if not first:
res.append(', ')
first = False
- res.append(text_type(a))
+ res.append(str(a))
res.append(')')
if self.volatile:
res.append(' volatile')
@@ -2382,7 +2477,7 @@ class ASTParametersQualifiers(ASTBase):
res.append(self.refQual)
if self.exceptionSpec:
res.append(' ')
- res.append(text_type(self.exceptionSpec))
+ res.append(str(self.exceptionSpec))
if self.final:
res.append(' final')
if self.override:
@@ -2390,10 +2485,10 @@ class ASTParametersQualifiers(ASTBase):
if self.initializer:
res.append(' = ')
res.append(self.initializer)
- return u''.join(res)
+ return ''.join(res)
def describe_signature(self, signode, mode, env, symbol):
- # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None
+ # type: (addnodes.desc_signature, str, BuildEnvironment, Symbol) -> None
_verify_description_mode(mode)
paramlist = addnodes.desc_parameterlist()
for arg in self.args:
@@ -2419,19 +2514,19 @@ class ASTParametersQualifiers(ASTBase):
if self.refQual:
_add_text(signode, self.refQual)
if self.exceptionSpec:
- _add_anno(signode, text_type(self.exceptionSpec))
+ _add_anno(signode, str(self.exceptionSpec))
if self.final:
_add_anno(signode, 'final')
if self.override:
_add_anno(signode, 'override')
if self.initializer:
- _add_text(signode, '= ' + text_type(self.initializer))
+ _add_text(signode, '= ' + str(self.initializer))
class ASTDeclSpecsSimple(ASTBase):
def __init__(self, storage, threadLocal, inline, virtual, explicit,
constexpr, volatile, const, friend, attrs):
- # type: (unicode, bool, bool, bool, bool, bool, bool, bool, bool, List[Any]) -> None
+ # type: (str, bool, bool, bool, bool, bool, bool, bool, bool, List[Any]) -> None
self.storage = storage
self.threadLocal = threadLocal
self.inline = inline
@@ -2459,7 +2554,8 @@ class ASTDeclSpecsSimple(ASTBase):
self.attrs + other.attrs)
def _stringify(self, transform):
- res = [] # type: List[unicode]
+ # type: (Callable[[Any], str]) -> str
+ res = [] # type: List[str]
res.extend(transform(attr) for attr in self.attrs)
if self.storage:
res.append(self.storage)
@@ -2479,7 +2575,7 @@ class ASTDeclSpecsSimple(ASTBase):
res.append('volatile')
if self.const:
res.append('const')
- return u' '.join(res)
+ return ' '.join(res)
def describe_signature(self, modifiers):
# type: (List[nodes.Node]) -> None
@@ -2527,7 +2623,7 @@ class ASTDeclSpecs(ASTBase):
return self.trailingTypeSpec.name
def get_id(self, version):
- # type: (int) -> unicode
+ # type: (int) -> str
if version == 1:
res = []
res.append(self.trailingTypeSpec.get_id(version))
@@ -2535,17 +2631,18 @@ class ASTDeclSpecs(ASTBase):
res.append('V')
if self.allSpecs.const:
res.append('C')
- return u''.join(res)
+ return ''.join(res)
res = []
if self.leftSpecs.volatile or self.rightSpecs.volatile:
res.append('V')
if self.leftSpecs.const or self.rightSpecs.volatile:
res.append('K')
res.append(self.trailingTypeSpec.get_id(version))
- return u''.join(res)
+ return ''.join(res)
def _stringify(self, transform):
- res = [] # type: List[unicode]
+ # type: (Callable[[Any], str]) -> str
+ res = [] # type: List[str]
l = transform(self.leftSpecs)
if len(l) > 0:
if len(res) > 0:
@@ -2555,7 +2652,7 @@ class ASTDeclSpecs(ASTBase):
if len(res) > 0:
res.append(" ")
res.append(transform(self.trailingTypeSpec))
- r = text_type(self.rightSpecs)
+ r = str(self.rightSpecs)
if len(r) > 0:
if len(res) > 0:
res.append(" ")
@@ -2563,7 +2660,7 @@ class ASTDeclSpecs(ASTBase):
return "".join(res)
def describe_signature(self, signode, mode, env, symbol):
- # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None
+ # type: (addnodes.desc_signature, str, BuildEnvironment, Symbol) -> None
_verify_description_mode(mode)
modifiers = [] # type: List[nodes.Node]
@@ -2594,24 +2691,25 @@ class ASTArray(ASTBase):
self.size = size
def _stringify(self, transform):
+ # type: (Callable[[Any], str]) -> str
if self.size:
- return u'[' + transform(self.size) + ']'
+ return '[' + transform(self.size) + ']'
else:
- return u'[]'
+ return '[]'
def get_id(self, version):
- # type: (int) -> unicode
+ # type: (int) -> str
if version == 1:
- return u'A'
+ return 'A'
if version == 2:
if self.size:
- return u'A' + text_type(self.size) + u'_'
+ return 'A' + str(self.size) + '_'
else:
- return u'A_'
+ return 'A_'
if self.size:
- return u'A' + self.size.get_id(version) + u'_'
+ return 'A' + self.size.get_id(version) + '_'
else:
- return u'A_'
+ return 'A_'
def describe_signature(self, signode, mode, env, symbol):
_verify_description_mode(mode)
@@ -2646,7 +2744,8 @@ class ASTDeclaratorPtr(ASTBase):
return True
def _stringify(self, transform):
- res = ['*'] # type: List[unicode]
+ # type: (Callable[[Any], str]) -> str
+ res = ['*']
for a in self.attrs:
res.append(transform(a))
if len(self.attrs) > 0 and (self.volatile or self.const):
@@ -2661,18 +2760,18 @@ class ASTDeclaratorPtr(ASTBase):
if self.next.require_space_after_declSpecs:
res.append(' ')
res.append(transform(self.next))
- return u''.join(res)
+ return ''.join(res)
def get_modifiers_id(self, version):
- # type: (int) -> unicode
+ # type: (int) -> str
return self.next.get_modifiers_id(version)
def get_param_id(self, version):
- # type: (int) -> unicode
+ # type: (int) -> str
return self.next.get_param_id(version)
def get_ptr_suffix_id(self, version):
- # type: (int) -> unicode
+ # type: (int) -> str
if version == 1:
res = ['P']
if self.volatile:
@@ -2680,7 +2779,7 @@ class ASTDeclaratorPtr(ASTBase):
if self.const:
res.append('C')
res.append(self.next.get_ptr_suffix_id(version))
- return u''.join(res)
+ return ''.join(res)
res = [self.next.get_ptr_suffix_id(version)]
res.append('P')
@@ -2688,25 +2787,25 @@ class ASTDeclaratorPtr(ASTBase):
res.append('V')
if self.const:
res.append('C')
- return u''.join(res)
+ return ''.join(res)
def get_type_id(self, version, returnTypeId):
- # type: (int, unicode) -> unicode
+ # type: (int, str) -> str
# ReturnType *next, so we are part of the return type of 'next
- res = ['P'] # type: List[unicode]
+ res = ['P']
if self.volatile:
res.append('V')
if self.const:
res.append('C')
res.append(returnTypeId)
- return self.next.get_type_id(version, returnTypeId=u''.join(res))
+ return self.next.get_type_id(version, returnTypeId=''.join(res))
def is_function_type(self):
# type: () -> bool
return self.next.is_function_type()
def describe_signature(self, signode, mode, env, symbol):
- # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None
+ # type: (addnodes.desc_signature, str, BuildEnvironment, Symbol) -> None
_verify_description_mode(mode)
signode += nodes.Text("*")
for a in self.attrs:
@@ -2742,6 +2841,7 @@ class ASTDeclaratorRef(ASTBase):
@property
def isPack(self):
+ # type: () -> bool
return True
@property
@@ -2754,41 +2854,42 @@ class ASTDeclaratorRef(ASTBase):
return self.next.require_space_after_declSpecs()
def _stringify(self, transform):
+ # type: (Callable[[Any], str]) -> str
res = ['&']
for a in self.attrs:
res.append(transform(a))
if len(self.attrs) > 0 and self.next.require_space_after_declSpecs:
res.append(' ')
res.append(transform(self.next))
- return u''.join(res)
+ return ''.join(res)
def get_modifiers_id(self, version):
- # type: (int) -> unicode
+ # type: (int) -> str
return self.next.get_modifiers_id(version)
def get_param_id(self, version): # only the parameters (if any)
- # type: (int) -> unicode
+ # type: (int) -> str
return self.next.get_param_id(version)
def get_ptr_suffix_id(self, version):
- # type: (int) -> unicode
+ # type: (int) -> str
if version == 1:
- return u'R' + self.next.get_ptr_suffix_id(version)
+ return 'R' + self.next.get_ptr_suffix_id(version)
else:
- return self.next.get_ptr_suffix_id(version) + u'R'
+ return self.next.get_ptr_suffix_id(version) + 'R'
def get_type_id(self, version, returnTypeId):
- # type: (int, unicode) -> unicode
+ # type: (int, str) -> str
assert version >= 2
# ReturnType &next, so we are part of the return type of 'next
- return self.next.get_type_id(version, returnTypeId=u'R' + returnTypeId)
+ return self.next.get_type_id(version, returnTypeId='R' + returnTypeId)
def is_function_type(self):
# type: () -> bool
return self.next.is_function_type()
def describe_signature(self, signode, mode, env, symbol):
- # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None
+ # type: (addnodes.desc_signature, str, BuildEnvironment, Symbol) -> None
_verify_description_mode(mode)
signode += nodes.Text("&")
for a in self.attrs:
@@ -2819,38 +2920,39 @@ class ASTDeclaratorParamPack(ASTBase):
return False
def _stringify(self, transform):
+ # type: (Callable[[Any], str]) -> str
res = transform(self.next)
if self.next.name:
res = ' ' + res
return '...' + res
def get_modifiers_id(self, version):
- # type: (int) -> unicode
+ # type: (int) -> str
return self.next.get_modifiers_id(version)
def get_param_id(self, version): # only the parameters (if any)
- # type: (int) -> unicode
+ # type: (int) -> str
return self.next.get_param_id(version)
def get_ptr_suffix_id(self, version):
- # type: (int) -> unicode
+ # type: (int) -> str
if version == 1:
return 'Dp' + self.next.get_ptr_suffix_id(version)
else:
- return self.next.get_ptr_suffix_id(version) + u'Dp'
+ return self.next.get_ptr_suffix_id(version) + 'Dp'
def get_type_id(self, version, returnTypeId):
- # type: (int, unicode) -> unicode
+ # type: (int, str) -> str
assert version >= 2
# ReturnType... next, so we are part of the return type of 'next
- return self.next.get_type_id(version, returnTypeId=u'Dp' + returnTypeId)
+ return self.next.get_type_id(version, returnTypeId='Dp' + returnTypeId)
def is_function_type(self):
# type: () -> bool
return self.next.is_function_type()
def describe_signature(self, signode, mode, env, symbol):
- # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None
+ # type: (addnodes.desc_signature, str, BuildEnvironment, Symbol) -> None
_verify_description_mode(mode)
signode += nodes.Text("...")
if self.next.name:
@@ -2883,6 +2985,7 @@ class ASTDeclaratorMemPtr(ASTBase):
return True
def _stringify(self, transform):
+ # type: (Callable[[Any], str]) -> str
res = []
res.append(transform(self.className))
res.append('::*')
@@ -2896,32 +2999,32 @@ class ASTDeclaratorMemPtr(ASTBase):
return ''.join(res)
def get_modifiers_id(self, version):
- # type: (int) -> unicode
+ # type: (int) -> str
if version == 1:
raise NoOldIdError()
else:
return self.next.get_modifiers_id(version)
def get_param_id(self, version): # only the parameters (if any)
- # type: (int) -> unicode
+ # type: (int) -> str
if version == 1:
raise NoOldIdError()
else:
return self.next.get_param_id(version)
def get_ptr_suffix_id(self, version):
- # type: (int) -> unicode
+ # type: (int) -> str
if version == 1:
raise NoOldIdError()
else:
raise NotImplementedError()
- return self.next.get_ptr_suffix_id(version) + u'Dp'
+ return self.next.get_ptr_suffix_id(version) + 'Dp'
def get_type_id(self, version, returnTypeId):
- # type: (int, unicode) -> unicode
+ # type: (int, str) -> str
assert version >= 2
# ReturnType name::* next, so we are part of the return type of next
- nextReturnTypeId = '' # type: unicode
+ nextReturnTypeId = ''
if self.volatile:
nextReturnTypeId += 'V'
if self.const:
@@ -2936,7 +3039,7 @@ class ASTDeclaratorMemPtr(ASTBase):
return self.next.is_function_type()
def describe_signature(self, signode, mode, env, symbol):
- # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None
+ # type: (addnodes.desc_signature, str, BuildEnvironment, Symbol) -> None
_verify_description_mode(mode)
self.className.describe_signature(signode, mode, env, symbol)
signode += nodes.Text('::*')
@@ -2979,22 +3082,23 @@ class ASTDeclaratorParen(ASTBase):
return True
def _stringify(self, transform):
- res = ['('] # type: List[unicode]
+ # type: (Callable[[Any], str]) -> str
+ res = ['(']
res.append(transform(self.inner))
res.append(')')
res.append(transform(self.next))
return ''.join(res)
def get_modifiers_id(self, version):
- # type: (int) -> unicode
+ # type: (int) -> str
return self.inner.get_modifiers_id(version)
def get_param_id(self, version): # only the parameters (if any)
- # type: (int) -> unicode
+ # type: (int) -> str
return self.inner.get_param_id(version)
def get_ptr_suffix_id(self, version):
- # type: (int) -> unicode
+ # type: (int) -> str
if version == 1:
raise NoOldIdError() # TODO: was this implemented before?
return self.next.get_ptr_suffix_id(version) + \
@@ -3004,7 +3108,7 @@ class ASTDeclaratorParen(ASTBase):
self.next.get_ptr_suffix_id(version)
def get_type_id(self, version, returnTypeId):
- # type: (int, unicode) -> unicode
+ # type: (int, str) -> str
assert version >= 2
# ReturnType (inner)next, so 'inner' returns everything outside
nextId = self.next.get_type_id(version, returnTypeId)
@@ -3015,7 +3119,7 @@ class ASTDeclaratorParen(ASTBase):
return self.inner.is_function_type()
def describe_signature(self, signode, mode, env, symbol):
- # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None
+ # type: (addnodes.desc_signature, str, BuildEnvironment, Symbol) -> None
_verify_description_mode(mode)
signode += nodes.Text('(')
self.inner.describe_signature(signode, mode, env, symbol)
@@ -3037,6 +3141,7 @@ class ASTDeclaratorNameParamQual(ASTBase):
@property
def isPack(self):
+ # type: () -> bool
return False
@property
@@ -3045,26 +3150,25 @@ class ASTDeclaratorNameParamQual(ASTBase):
return self.paramQual.function_params
def get_modifiers_id(self, version): # only the modifiers for a function, e.g.,
- # type: (int) -> unicode
+ # type: (int) -> str
# cv-qualifiers
if self.paramQual:
return self.paramQual.get_modifiers_id(version)
- raise Exception(
- "This should only be called on a function: %s" % text_type(self))
+ raise Exception("This should only be called on a function: %s" % self)
def get_param_id(self, version): # only the parameters (if any)
- # type: (int) -> unicode
+ # type: (int) -> str
if self.paramQual:
return self.paramQual.get_param_id(version)
else:
return ''
def get_ptr_suffix_id(self, version): # only the array specifiers
- # type: (int) -> unicode
- return u''.join(a.get_id(version) for a in self.arrayOps)
+ # type: (int) -> str
+ return ''.join(a.get_id(version) for a in self.arrayOps)
def get_type_id(self, version, returnTypeId):
- # type: (int, unicode) -> unicode
+ # type: (int, str) -> str
assert version >= 2
res = []
# TOOD: can we actually have both array ops and paramQual?
@@ -3077,7 +3181,7 @@ class ASTDeclaratorNameParamQual(ASTBase):
res.append('E')
else:
res.append(returnTypeId)
- return u''.join(res)
+ return ''.join(res)
# ------------------------------------------------------------------------
@@ -3090,6 +3194,7 @@ class ASTDeclaratorNameParamQual(ASTBase):
return self.paramQual is not None
def _stringify(self, transform):
+ # type: (Callable[[Any], str]) -> str
res = []
if self.declId:
res.append(transform(self.declId))
@@ -3097,10 +3202,10 @@ class ASTDeclaratorNameParamQual(ASTBase):
res.append(transform(op))
if self.paramQual:
res.append(transform(self.paramQual))
- return u''.join(res)
+ return ''.join(res)
def describe_signature(self, signode, mode, env, symbol):
- # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None
+ # type: (addnodes.desc_signature, str, BuildEnvironment, Symbol) -> None
_verify_description_mode(mode)
if self.declId:
self.declId.describe_signature(signode, mode, env, symbol)
@@ -3115,10 +3220,11 @@ class ASTInitializer(ASTBase):
self.value = value
def _stringify(self, transform):
- return u' = ' + transform(self.value)
+ # type: (Callable[[Any], str]) -> str
+ return ' = ' + transform(self.value)
def describe_signature(self, signode, mode, env, symbol):
- # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None
+ # type: (addnodes.desc_signature, str, BuildEnvironment, Symbol) -> None
_verify_description_mode(mode)
signode.append(nodes.Text(' = '))
self.value.describe_signature(signode, 'markType', env, symbol)
@@ -3139,6 +3245,7 @@ class ASTType(ASTBase):
@property
def isPack(self):
+ # type: () -> bool
return self.decl.isPack
@property
@@ -3147,7 +3254,7 @@ class ASTType(ASTBase):
return self.decl.function_params
def get_id(self, version, objectType=None, symbol=None):
- # type: (int, unicode, Symbol) -> unicode
+ # type: (int, str, Symbol) -> str
if version == 1:
res = []
if objectType: # needs the name
@@ -3170,7 +3277,7 @@ class ASTType(ASTBase):
res.append(self.declSpecs.get_id(version))
res.append(self.decl.get_ptr_suffix_id(version))
res.append(self.decl.get_param_id(version))
- return u''.join(res)
+ return ''.join(res)
# other versions
res = []
if objectType: # needs the name
@@ -3189,30 +3296,31 @@ class ASTType(ASTBase):
returnTypeId = self.declSpecs.get_id(version)
typeId = self.decl.get_type_id(version, returnTypeId)
res.append(typeId)
- return u''.join(res)
+ return ''.join(res)
def _stringify(self, transform):
+ # type: (Callable[[Any], str]) -> str
res = []
declSpecs = transform(self.declSpecs)
res.append(declSpecs)
if self.decl.require_space_after_declSpecs() and len(declSpecs) > 0:
- res.append(u' ')
+ res.append(' ')
res.append(transform(self.decl))
- return u''.join(res)
+ return ''.join(res)
def get_type_declaration_prefix(self):
- # type: () -> unicode
+ # type: () -> str
if self.declSpecs.trailingTypeSpec:
return 'typedef'
else:
return 'type'
def describe_signature(self, signode, mode, env, symbol):
- # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None
+ # type: (addnodes.desc_signature, str, BuildEnvironment, Symbol) -> None
_verify_description_mode(mode)
self.declSpecs.describe_signature(signode, 'markType', env, symbol)
if (self.decl.require_space_after_declSpecs() and
- len(text_type(self.declSpecs)) > 0):
+ len(str(self.declSpecs)) > 0):
signode += nodes.Text(' ')
# for parameters that don't really declare new names we get 'markType',
# this should not be propagated, but be 'noneIsName'.
@@ -3234,26 +3342,28 @@ class ASTTypeWithInit(ASTBase):
@property
def isPack(self):
+ # type: () -> bool
return self.type.isPack
def get_id(self, version, objectType=None, symbol=None):
- # type: (int, unicode, Symbol) -> unicode
+ # type: (int, str, Symbol) -> str
if objectType != 'member':
return self.type.get_id(version, objectType)
if version == 1:
- return symbol.get_full_nested_name().get_id(version) + u'__' \
- + self.type.get_id(version)
+ return (symbol.get_full_nested_name().get_id(version) + '__' +
+ self.type.get_id(version))
return symbol.get_full_nested_name().get_id(version)
def _stringify(self, transform):
+ # type: (Callable[[Any], str]) -> str
res = []
res.append(transform(self.type))
if self.init:
res.append(transform(self.init))
- return u''.join(res)
+ return ''.join(res)
def describe_signature(self, signode, mode, env, symbol):
- # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None
+ # type: (addnodes.desc_signature, str, BuildEnvironment, Symbol) -> None
_verify_description_mode(mode)
self.type.describe_signature(signode, mode, env, symbol)
if self.init:
@@ -3267,25 +3377,26 @@ class ASTTypeUsing(ASTBase):
self.type = type
def get_id(self, version, objectType=None, symbol=None):
- # type: (int, unicode, Symbol) -> unicode
+ # type: (int, str, Symbol) -> str
if version == 1:
raise NoOldIdError()
return symbol.get_full_nested_name().get_id(version)
def _stringify(self, transform):
+ # type: (Callable[[Any], str]) -> str
res = []
res.append(transform(self.name))
if self.type:
res.append(' = ')
res.append(transform(self.type))
- return u''.join(res)
+ return ''.join(res)
def get_type_declaration_prefix(self):
- # type: () -> unicode
+ # type: () -> str
return 'using'
def describe_signature(self, signode, mode, env, symbol):
- # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None
+ # type: (addnodes.desc_signature, str, BuildEnvironment, Symbol) -> None
_verify_description_mode(mode)
self.name.describe_signature(signode, mode, env, symbol=symbol)
if self.type:
@@ -3305,19 +3416,20 @@ class ASTConcept(ASTBase):
return self.nestedName
def get_id(self, version, objectType=None, symbol=None):
- # type: (int, unicode, Symbol) -> unicode
+ # type: (int, str, Symbol) -> str
if version == 1:
raise NoOldIdError()
return symbol.get_full_nested_name().get_id(version)
def _stringify(self, transform):
+ # type: (Callable[[Any], str]) -> str
res = transform(self.nestedName)
if self.initializer:
res += transform(self.initializer)
return res
def describe_signature(self, signode, mode, env, symbol):
- # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None
+ # type: (addnodes.desc_signature, str, BuildEnvironment, Symbol) -> None
self.nestedName.describe_signature(signode, mode, env, symbol)
if self.initializer:
self.initializer.describe_signature(signode, mode, env, symbol)
@@ -3325,14 +3437,15 @@ class ASTConcept(ASTBase):
class ASTBaseClass(ASTBase):
def __init__(self, name, visibility, virtual, pack):
- # type: (Any, unicode, bool, bool) -> None
+ # type: (Any, str, bool, bool) -> None
self.name = name
self.visibility = visibility
self.virtual = virtual
self.pack = pack
def _stringify(self, transform):
- res = [] # type: List[unicode]
+ # type: (Callable[[Any], str]) -> str
+ res = []
if self.visibility != 'private':
res.append(self.visibility)
res.append(' ')
@@ -3341,10 +3454,10 @@ class ASTBaseClass(ASTBase):
res.append(transform(self.name))
if self.pack:
res.append('...')
- return u''.join(res)
+ return ''.join(res)
def describe_signature(self, signode, mode, env, symbol):
- # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None
+ # type: (addnodes.desc_signature, str, BuildEnvironment, Symbol) -> None
_verify_description_mode(mode)
if self.visibility != 'private':
signode += addnodes.desc_annotation(self.visibility,
@@ -3366,10 +3479,11 @@ class ASTClass(ASTBase):
self.bases = bases
def get_id(self, version, objectType, symbol):
- # type: (int, unicode, Symbol) -> unicode
+ # type: (int, str, Symbol) -> str
return symbol.get_full_nested_name().get_id(version)
def _stringify(self, transform):
+ # type: (Callable[[Any], str]) -> str
res = []
res.append(transform(self.name))
if self.final:
@@ -3382,10 +3496,10 @@ class ASTClass(ASTBase):
res.append(', ')
first = False
res.append(transform(b))
- return u''.join(res)
+ return ''.join(res)
def describe_signature(self, signode, mode, env, symbol):
- # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None
+ # type: (addnodes.desc_signature, str, BuildEnvironment, Symbol) -> None
_verify_description_mode(mode)
self.name.describe_signature(signode, mode, env, symbol=symbol)
if self.final:
@@ -3405,35 +3519,37 @@ class ASTUnion(ASTBase):
self.name = name
def get_id(self, version, objectType, symbol):
- # type: (int, unicode, Symbol) -> unicode
+ # type: (int, str, Symbol) -> str
if version == 1:
raise NoOldIdError()
return symbol.get_full_nested_name().get_id(version)
def _stringify(self, transform):
+ # type: (Callable[[Any], str]) -> str
return transform(self.name)
def describe_signature(self, signode, mode, env, symbol):
- # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None
+ # type: (addnodes.desc_signature, str, BuildEnvironment, Symbol) -> None
_verify_description_mode(mode)
self.name.describe_signature(signode, mode, env, symbol=symbol)
class ASTEnum(ASTBase):
def __init__(self, name, scoped, underlyingType):
- # type: (Any, unicode, Any) -> None
+ # type: (Any, str, Any) -> None
self.name = name
self.scoped = scoped
self.underlyingType = underlyingType
def get_id(self, version, objectType, symbol):
- # type: (int, unicode, Symbol) -> unicode
+ # type: (int, str, Symbol) -> str
if version == 1:
raise NoOldIdError()
return symbol.get_full_nested_name().get_id(version)
def _stringify(self, transform):
- res = [] # type: List[unicode]
+ # type: (Callable[[Any], str]) -> str
+ res = []
if self.scoped:
res.append(self.scoped)
res.append(' ')
@@ -3441,10 +3557,10 @@ class ASTEnum(ASTBase):
if self.underlyingType:
res.append(' : ')
res.append(transform(self.underlyingType))
- return u''.join(res)
+ return ''.join(res)
def describe_signature(self, signode, mode, env, symbol):
- # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None
+ # type: (addnodes.desc_signature, str, BuildEnvironment, Symbol) -> None
_verify_description_mode(mode)
# self.scoped has been done by the CPPEnumObject
self.name.describe_signature(signode, mode, env, symbol=symbol)
@@ -3461,20 +3577,21 @@ class ASTEnumerator(ASTBase):
self.init = init
def get_id(self, version, objectType, symbol):
- # type: (int, unicode, Symbol) -> unicode
+ # type: (int, str, Symbol) -> str
if version == 1:
raise NoOldIdError()
return symbol.get_full_nested_name().get_id(version)
def _stringify(self, transform):
+ # type: (Callable[[Any], str]) -> str
res = []
res.append(transform(self.name))
if self.init:
res.append(transform(self.init))
- return u''.join(res)
+ return ''.join(res)
def describe_signature(self, signode, mode, env, symbol):
- # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None
+ # type: (addnodes.desc_signature, str, BuildEnvironment, Symbol) -> None
_verify_description_mode(mode)
self.name.describe_signature(signode, mode, env, symbol)
if self.init:
@@ -3483,7 +3600,7 @@ class ASTEnumerator(ASTBase):
class ASTDeclaration(ASTBase):
def __init__(self, objectType, visibility, templatePrefix, declaration):
- # type: (unicode, unicode, Any, Any) -> None
+ # type: (str, str, Any, Any) -> None
self.objectType = objectType
self.visibility = visibility
self.templatePrefix = templatePrefix
@@ -3516,7 +3633,7 @@ class ASTDeclaration(ASTBase):
return self.declaration.function_params
def get_id(self, version, prefixed=True):
- # type: (int, bool) -> unicode
+ # type: (int, bool) -> str
if version == 1:
if self.templatePrefix:
raise NoOldIdError()
@@ -3533,24 +3650,25 @@ class ASTDeclaration(ASTBase):
if self.templatePrefix:
res.append(self.templatePrefix.get_id(version))
res.append(self.declaration.get_id(version, self.objectType, self.symbol))
- return u''.join(res)
+ return ''.join(res)
def get_newest_id(self):
- # type: () -> unicode
+ # type: () -> str
return self.get_id(_max_id, True)
def _stringify(self, transform):
- res = [] # type: List[unicode]
+ # type: (Callable[[Any], str]) -> str
+ res = []
if self.visibility and self.visibility != "public":
res.append(self.visibility)
- res.append(u' ')
+ res.append(' ')
if self.templatePrefix:
res.append(transform(self.templatePrefix))
res.append(transform(self.declaration))
- return u''.join(res)
+ return ''.join(res)
def describe_signature(self, signode, mode, env, options):
- # type: (addnodes.desc_signature, unicode, BuildEnvironment, Dict) -> None
+ # type: (addnodes.desc_signature, str, BuildEnvironment, Dict) -> None
_verify_description_mode(mode)
assert self.symbol
# The caller of the domain added a desc_signature node.
@@ -3603,7 +3721,7 @@ class ASTNamespace(ASTBase):
self.templatePrefix = templatePrefix
-class SymbolLookupResult(object):
+class SymbolLookupResult:
def __init__(self, symbols, parentSymbol, identOrOp, templateParams, templateArgs):
# type: (Iterator[Symbol], Symbol, Union[ASTIdentifier, ASTOperator], Any, ASTTemplateArgs) -> None # NOQA
self.symbols = symbols
@@ -3613,7 +3731,7 @@ class SymbolLookupResult(object):
self.templateArgs = templateArgs
-class Symbol(object):
+class Symbol:
debug_lookup = False
debug_show_tree = False
@@ -3634,7 +3752,7 @@ class Symbol(object):
if key == "children":
assert False
else:
- return object.__setattr__(self, key, value)
+ return super().__setattr__(key, value)
def __init__(self,
parent, # type: Symbol
@@ -3642,7 +3760,7 @@ class Symbol(object):
templateParams, # type: Any
templateArgs, # type: Any
declaration, # type: ASTDeclaration
- docname # type: unicode
+ docname # type: str
):
# type: (...) -> None
self.parent = parent
@@ -3667,7 +3785,7 @@ class Symbol(object):
self._add_template_and_function_params()
def _fill_empty(self, declaration, docname):
- # type: (ASTDeclaration, unicode) -> None
+ # type: (ASTDeclaration, str) -> None
self._assert_invariants()
assert not self.declaration
assert not self.docname
@@ -3719,7 +3837,7 @@ class Symbol(object):
self.parent = None
def clear_doc(self, docname):
- # type: (unicode) -> None
+ # type: (str) -> None
newChildren = []
for sChild in self._children:
sChild.clear_doc(docname)
@@ -3742,9 +3860,8 @@ class Symbol(object):
yield c
if not c.identOrOp.is_anon():
continue
- # TODO: change to 'yield from' when Python 2 support is dropped
- for nested in c.children_recurse_anon:
- yield nested
+
+ yield from c.children_recurse_anon
def get_lookup_key(self):
# type: () -> List[Tuple[ASTNestedNameElement, Any]]
@@ -3811,8 +3928,8 @@ class Symbol(object):
param = templateParams.params[i]
arg = templateArgs.args[i]
# TODO: doing this by string manipulation is probably not the most efficient
- paramName = text_type(param.name)
- argTxt = text_type(arg)
+ paramName = str(param.name)
+ argTxt = str(arg)
isArgPackExpansion = argTxt.endswith('...')
if param.isPack != isArgPackExpansion:
return True
@@ -3840,13 +3957,13 @@ class Symbol(object):
return False
if templateParams:
# TODO: do better comparison
- if text_type(s.templateParams) != text_type(templateParams):
+ if str(s.templateParams) != str(templateParams):
return False
if (s.templateArgs is None) != (templateArgs is None):
return False
if s.templateArgs:
# TODO: do better comparison
- if text_type(s.templateArgs) != text_type(templateArgs):
+ if str(s.templateArgs) != str(templateArgs):
return False
return True
if matchSelf and matches(self):
@@ -3863,7 +3980,7 @@ class Symbol(object):
onMissingQualifiedSymbol,
# type: Callable[[Symbol, Union[ASTIdentifier, ASTOperator], Any, ASTTemplateArgs], Symbol] # NOQA
strictTemplateParamArgLists, # type: bool
- ancestorLookupType, # type: unicode
+ ancestorLookupType, # type: str
templateShorthand, # type: bool
matchSelf, # type: bool
recurseInAnon, # type: bool
@@ -3964,7 +4081,7 @@ class Symbol(object):
identOrOp, templateParams, templateArgs)
def _add_symbols(self, nestedName, templateDecls, declaration, docname):
- # type: (ASTNestedName, List[Any], ASTDeclaration, unicode) -> Symbol
+ # type: (ASTNestedName, List[Any], ASTDeclaration, str) -> Symbol
# Used for adding a whole path of symbols, where the last may or may not
# be an actual declaration.
@@ -4116,7 +4233,7 @@ class Symbol(object):
return symbol
def merge_with(self, other, docnames, env):
- # type: (Symbol, List[unicode], BuildEnvironment) -> None
+ # type: (Symbol, List[str], BuildEnvironment) -> None
assert other is not None
for otherChild in other._children:
ourChild = self._find_first_named_symbol(
@@ -4135,7 +4252,7 @@ class Symbol(object):
if not ourChild.declaration:
ourChild._fill_empty(otherChild.declaration, otherChild.docname)
elif ourChild.docname != otherChild.docname:
- name = text_type(ourChild.declaration)
+ name = str(ourChild.declaration)
msg = __("Duplicate declaration, also defined in '%s'.\n"
"Declaration is '%s'.")
msg = msg % (ourChild.docname, name)
@@ -4157,7 +4274,7 @@ class Symbol(object):
declaration=None, docname=None)
def add_declaration(self, declaration, docname):
- # type: (ASTDeclaration, unicode) -> Symbol
+ # type: (ASTDeclaration, str) -> Symbol
assert declaration
assert docname
nestedName = declaration.name
@@ -4195,7 +4312,7 @@ class Symbol(object):
def find_name(self, nestedName, templateDecls, typ, templateShorthand,
matchSelf, recurseInAnon):
- # type: (ASTNestedName, List[Any], unicode, bool, bool, bool) -> Symbol
+ # type: (ASTNestedName, List[Any], str, bool, bool, bool) -> Symbol
# templateShorthand: missing template parameter lists for templates is ok
def onMissingQualifiedSymbol(parentSymbol, identOrOp, templateParams, templateArgs):
@@ -4233,7 +4350,7 @@ class Symbol(object):
def find_declaration(self, declaration, typ, templateShorthand,
matchSelf, recurseInAnon):
- # type: (ASTDeclaration, unicode, bool, bool, bool) -> Symbol
+ # type: (ASTDeclaration, str, bool, bool, bool) -> Symbol
# templateShorthand: missing template parameter lists for templates is ok
nestedName = declaration.name
if declaration.templatePrefix:
@@ -4277,26 +4394,26 @@ class Symbol(object):
return None
def to_string(self, indent):
- # type: (int) -> unicode
- res = ['\t' * indent] # type: List[unicode]
+ # type: (int) -> str
+ res = ['\t' * indent]
if not self.parent:
res.append('::')
else:
if self.templateParams:
- res.append(text_type(self.templateParams))
+ res.append(str(self.templateParams))
res.append('\n')
res.append('\t' * indent)
if self.identOrOp:
- res.append(text_type(self.identOrOp))
+ res.append(str(self.identOrOp))
else:
- res.append(text_type(self.declaration))
+ res.append(str(self.declaration))
if self.templateArgs:
- res.append(text_type(self.templateArgs))
+ res.append(str(self.templateArgs))
if self.declaration:
res.append(": ")
if self.isRedeclaration:
res.append('!!duplicate!! ')
- res.append(text_type(self.declaration))
+ res.append(str(self.declaration))
if self.docname:
res.append('\t(')
res.append(self.docname)
@@ -4305,16 +4422,16 @@ class Symbol(object):
return ''.join(res)
def dump(self, indent):
- # type: (int) -> unicode
+ # type: (int) -> str
res = [self.to_string(indent)]
for c in self._children:
res.append(c.dump(indent + 1))
return ''.join(res)
-class DefinitionParser(object):
+class DefinitionParser:
# those without signedness and size modifiers
- # see http://en.cppreference.com/w/cpp/language/types
+ # see https://en.cppreference.com/w/cpp/language/types
_simple_fundemental_types = (
'void', 'bool', 'char', 'wchar_t', 'char16_t', 'char32_t', 'int',
'float', 'double', 'auto'
@@ -4337,36 +4454,36 @@ class DefinitionParser(object):
self.config = config
def _make_multi_error(self, errors, header):
- # type: (List[Any], unicode) -> DefinitionError
+ # type: (List[Any], str) -> DefinitionError
if len(errors) == 1:
if len(header) > 0:
- return DefinitionError(header + '\n' + errors[0][0].description)
+ return DefinitionError(header + '\n' + str(errors[0][0]))
else:
- return DefinitionError(errors[0][0].description)
+ return DefinitionError(str(errors[0][0]))
result = [header, '\n']
for e in errors:
if len(e[1]) > 0:
ident = ' '
result.append(e[1])
result.append(':\n')
- for line in e[0].description.split('\n'):
+ for line in str(e[0]).split('\n'):
if len(line) == 0:
continue
result.append(ident)
result.append(line)
result.append('\n')
else:
- result.append(e[0].description)
+ result.append(str(e[0]))
return DefinitionError(''.join(result))
def status(self, msg):
- # type: (unicode) -> None
+ # type: (str) -> None
# for debugging
indicator = '-' * self.pos + '^'
print("%s\n%s\n%s" % (msg, self.definition, indicator))
def fail(self, msg):
- # type: (unicode) -> None
+ # type: (str) -> None
errors = []
indicator = '-' * self.pos + '^'
exMain = DefinitionError(
@@ -4379,7 +4496,7 @@ class DefinitionParser(object):
raise self._make_multi_error(errors, '')
def warn(self, msg):
- # type: (unicode) -> None
+ # type: (str) -> None
if self.warnEnv:
self.warnEnv.warn(msg)
else:
@@ -4400,7 +4517,7 @@ class DefinitionParser(object):
self.pos, self.last_match = self._previous_state
def skip_string(self, string):
- # type: (unicode) -> bool
+ # type: (str) -> bool
strlen = len(string)
if self.definition[self.pos:self.pos + strlen] == string:
self.pos += strlen
@@ -4408,7 +4525,7 @@ class DefinitionParser(object):
return False
def skip_word(self, word):
- # type: (unicode) -> bool
+ # type: (str) -> bool
return self.match(re.compile(r'\b%s\b' % re.escape(word)))
def skip_ws(self):
@@ -4416,14 +4533,14 @@ class DefinitionParser(object):
return self.match(_whitespace_re)
def skip_word_and_ws(self, word):
- # type: (unicode) -> bool
+ # type: (str) -> bool
if self.skip_word(word):
self.skip_ws()
return True
return False
def skip_string_and_ws(self, string):
- # type: (unicode) -> bool
+ # type: (str) -> bool
if self.skip_string(string):
self.skip_ws()
return True
@@ -4436,7 +4553,7 @@ class DefinitionParser(object):
@property
def current_char(self):
- # type: () -> unicode
+ # type: () -> str
try:
return self.definition[self.pos]
except IndexError:
@@ -4444,14 +4561,14 @@ class DefinitionParser(object):
@property
def matched_text(self):
- # type: () -> unicode
+ # type: () -> str
if self.last_match is not None:
return self.last_match.group()
else:
return None
def read_rest(self):
- # type: () -> unicode
+ # type: () -> str
rv = self.definition[self.pos:]
self.pos = self.end
return rv
@@ -4482,11 +4599,11 @@ class DefinitionParser(object):
return self.definition[startPos:self.pos]
def _parse_balanced_token_seq(self, end):
- # type: (List[unicode]) -> unicode
+ # type: (List[str]) -> str
# TODO: add handling of string literals and similar
- brackets = {'(': ')', '[': ']', '{': '}'} # type: Dict[unicode, unicode]
+ brackets = {'(': ')', '[': ']', '{': '}'}
startPos = self.pos
- symbols = [] # type: List[unicode]
+ symbols = [] # type: List[str]
while not self.eof:
if len(symbols) == 0 and self.current_char in end:
break
@@ -4652,7 +4769,7 @@ class DefinitionParser(object):
return self._parse_nested_name()
def _parse_expression_list_or_braced_init_list(self):
- # type: () -> Tuple[List[Any], unicode]
+ # type: () -> Tuple[List[Any], str]
self.skip_ws()
if self.skip_string_and_ws('('):
close = ')'
@@ -5064,7 +5181,7 @@ class DefinitionParser(object):
if not allow or not self.allowFallbackExpressionParsing:
raise
self.warn("Parsing of expression failed. Using fallback parser."
- " Error was:\n%s" % e.description)
+ " Error was:\n%s" % e)
self.pos = prevPos
# and then the fallback scanning
assert end is not None
@@ -5074,8 +5191,8 @@ class DefinitionParser(object):
value = self.matched_text
else:
# TODO: add handling of more bracket-like things, and quote handling
- brackets = {'(': ')', '[': ']', '<': '>'} # type: Dict[unicode, unicode]
- symbols = [] # type: List[unicode]
+ brackets = {'(': ')', '[': ']', '<': '>'}
+ symbols = [] # type: List[str]
while not self.eof:
if (len(symbols) == 0 and self.current_char in end):
break
@@ -5248,7 +5365,7 @@ class DefinitionParser(object):
elif self.skip_word_and_ws('double'):
elements.append('double')
if len(elements) > 0:
- return ASTTrailingTypeSpecFundamental(u' '.join(elements))
+ return ASTTrailingTypeSpecFundamental(' '.join(elements))
# decltype
self.skip_ws()
@@ -5277,7 +5394,7 @@ class DefinitionParser(object):
return ASTTrailingTypeSpecName(prefix, nestedName)
def _parse_parameters_and_qualifiers(self, paramMode):
- # type: (unicode) -> ASTParametersQualifiers
+ # type: (str) -> ASTParametersQualifiers
if paramMode == 'new':
return None
self.skip_ws()
@@ -5362,14 +5479,14 @@ class DefinitionParser(object):
if not initializer:
self.fail(
'Expected "%s" in initializer-specifier.'
- % u'" or "'.join(valid))
+ % '" or "'.join(valid))
return ASTParametersQualifiers(
args, volatile, const, refQual, exceptionSpec, override, final,
initializer)
def _parse_decl_specs_simple(self, outer, typed):
- # type: (unicode, bool) -> ASTDeclSpecsSimple
+ # type: (str, bool) -> ASTDeclSpecsSimple
"""Just parse the simple ones."""
storage = None
threadLocal = None
@@ -5444,7 +5561,7 @@ class DefinitionParser(object):
friend, attrs)
def _parse_decl_specs(self, outer, typed=True):
- # type: (unicode, bool) -> ASTDeclSpecs
+ # type: (str, bool) -> ASTDeclSpecs
if outer:
if outer not in ('type', 'member', 'function', 'templateParam'):
raise Exception('Internal error, unknown outer "%s".' % outer)
@@ -5472,7 +5589,7 @@ class DefinitionParser(object):
return ASTDeclSpecs(outer, leftSpecs, rightSpecs, trailing)
def _parse_declarator_name_param_qual(self, named, paramMode, typed):
- # type: (Union[bool, unicode], unicode, bool) -> ASTDeclaratorNameParamQual
+ # type: (Union[bool, str], str, bool) -> ASTDeclaratorNameParamQual
# now we should parse the name, and then suffixes
if named == 'maybe':
pos = self.pos
@@ -5519,7 +5636,7 @@ class DefinitionParser(object):
paramQual=paramQual)
def _parse_declarator(self, named, paramMode, typed=True):
- # type: (Union[bool, unicode], unicode, bool) -> Any
+ # type: (Union[bool, str], str, bool) -> Any
# 'typed' here means 'parse return type stuff'
if paramMode not in ('type', 'function', 'operatorCast', 'new'):
raise Exception(
@@ -5631,7 +5748,7 @@ class DefinitionParser(object):
raise self._make_multi_error(prevErrors, header)
def _parse_initializer(self, outer=None, allowFallback=True):
- # type: (unicode, bool) -> ASTInitializer
+ # type: (str, bool) -> ASTInitializer
self.skip_ws()
# TODO: support paren and brace initialization for memberObject
if not self.skip_string('='):
@@ -5658,7 +5775,7 @@ class DefinitionParser(object):
return ASTInitializer(value)
def _parse_type(self, named, outer=None):
- # type: (Union[bool, unicode], unicode) -> ASTType
+ # type: (Union[bool, str], str) -> ASTType
"""
named=False|'maybe'|True: 'maybe' is e.g., for function objects which
doesn't need to name the arguments
@@ -5741,7 +5858,7 @@ class DefinitionParser(object):
return ASTType(declSpecs, decl)
def _parse_type_with_init(self, named, outer):
- # type: (Union[bool, unicode], unicode) -> Any
+ # type: (Union[bool, str], str) -> Any
if outer:
assert outer in ('type', 'member', 'function', 'templateParam')
type = self._parse_type(outer=outer, named=named)
@@ -5810,7 +5927,7 @@ class DefinitionParser(object):
if self.skip_string(':'):
while 1:
self.skip_ws()
- visibility = 'private' # type: unicode
+ visibility = 'private'
virtual = False
pack = False
if self.skip_word_and_ws('virtual'):
@@ -5838,7 +5955,7 @@ class DefinitionParser(object):
def _parse_enum(self):
# type: () -> ASTEnum
- scoped = None # type: unicode # is set by CPPEnumObject
+ scoped = None # is set by CPPEnumObject
self.skip_ws()
name = self._parse_nested_name()
self.skip_ws()
@@ -5973,8 +6090,8 @@ class DefinitionParser(object):
return ASTTemplateIntroduction(concept, params)
def _parse_template_declaration_prefix(self, objectType):
- # type: (unicode) -> ASTTemplateDeclarationPrefix
- templates = [] # type: List
+ # type: (str) -> ASTTemplateDeclarationPrefix
+ templates = [] # type: List[str]
while 1:
self.skip_ws()
# the saved position is only used to provide a better error message
@@ -6029,11 +6146,11 @@ class DefinitionParser(object):
msg = "Too many template argument lists compared to parameter" \
" lists. Argument lists: %d, Parameter lists: %d," \
" Extra empty parameters lists prepended: %d." \
- % (numArgs, numParams, numExtra) # type: unicode
+ % (numArgs, numParams, numExtra)
msg += " Declaration:\n\t"
if templatePrefix:
- msg += "%s\n\t" % text_type(templatePrefix)
- msg += text_type(nestedName)
+ msg += "%s\n\t" % templatePrefix
+ msg += str(nestedName)
self.warn(msg)
newTemplates = []
@@ -6045,7 +6162,7 @@ class DefinitionParser(object):
return templatePrefix
def parse_declaration(self, objectType):
- # type: (unicode) -> ASTDeclaration
+ # type: (str) -> ASTDeclaration
if objectType not in ('type', 'concept', 'member',
'function', 'class', 'union', 'enum', 'enumerator'):
raise Exception('Internal error, unknown objectType "%s".' % objectType)
@@ -6188,7 +6305,7 @@ class CPPObject(ObjectDescription):
option_spec['tparam-line-spec'] = directives.flag
def warn(self, msg):
- # type: (unicode) -> None
+ # type: (Union[str, Exception]) -> None
self.state_machine.reporter.warning(msg, line=self.lineno)
def _add_enumerator_to_parent(self, ast):
@@ -6234,7 +6351,7 @@ class CPPObject(ObjectDescription):
docname=self.env.docname)
def add_target_and_index(self, ast, sig, signode):
- # type: (Any, unicode, addnodes.desc_signature) -> None
+ # type: (Any, str, addnodes.desc_signature) -> None
# general note: name must be lstrip(':')'ed, to remove "::"
ids = []
for i in range(1, _max_id + 1):
@@ -6249,7 +6366,7 @@ class CPPObject(ObjectDescription):
assert newestId # shouldn't be None
if not re.compile(r'^[a-zA-Z0-9_]*$').match(newestId):
self.warn('Index id generation for C++ object "%s" failed, please '
- 'report as bug (id=%s).' % (text_type(ast), newestId))
+ 'report as bug (id=%s).' % (ast, newestId))
name = ast.symbol.get_full_nested_name().get_display_string().lstrip(':')
# Add index entry, but not if it's a declaration inside a concept
@@ -6293,6 +6410,10 @@ class CPPObject(ObjectDescription):
signode['first'] = (not self.names) # hmm, what is this about?
self.state.document.note_explicit_target(signode)
+ def get_index_text(self, name):
+ # type: (str) -> str
+ raise NotImplementedError()
+
def parse_definition(self, parser):
# type: (Any) -> Any
raise NotImplementedError()
@@ -6323,15 +6444,15 @@ class CPPObject(ObjectDescription):
parentDecl = parentSymbol.declaration
if parentDecl is not None and parentDecl.objectType == 'function':
self.warn("C++ declarations inside functions are not supported." +
- " Parent function is " + text_type(parentSymbol.get_full_nested_name()))
+ " Parent function is " + str(parentSymbol.get_full_nested_name()))
name = _make_phony_error_name()
symbol = parentSymbol.add_name(name)
env.temp_data['cpp:last_symbol'] = symbol
return []
- return ObjectDescription.run(self)
+ return super().run()
def handle_signature(self, sig, signode):
- # type: (unicode, addnodes.desc_signature) -> Any
+ # type: (str, addnodes.desc_signature) -> Any
parentSymbol = self.env.temp_data['cpp:parent_symbol']
parser = DefinitionParser(sig, self, self.env.config)
@@ -6339,7 +6460,7 @@ class CPPObject(ObjectDescription):
ast = self.parse_definition(parser)
parser.assert_end()
except DefinitionError as e:
- self.warn(e.description)
+ self.warn(e)
# It is easier to assume some phony name than handling the error in
# the possibly inner declarations.
name = _make_phony_error_name()
@@ -6383,7 +6504,7 @@ class CPPObject(ObjectDescription):
class CPPTypeObject(CPPObject):
def get_index_text(self, name):
- # type: (unicode) -> unicode
+ # type: (str) -> str
return _('%s (C++ type)') % name
def parse_definition(self, parser):
@@ -6393,7 +6514,7 @@ class CPPTypeObject(CPPObject):
class CPPConceptObject(CPPObject):
def get_index_text(self, name):
- # type: (unicode) -> unicode
+ # type: (str) -> str
return _('%s (C++ concept)') % name
def parse_definition(self, parser):
@@ -6403,7 +6524,7 @@ class CPPConceptObject(CPPObject):
class CPPMemberObject(CPPObject):
def get_index_text(self, name):
- # type: (unicode) -> unicode
+ # type: (str) -> str
return _('%s (C++ member)') % name
def parse_definition(self, parser):
@@ -6413,7 +6534,7 @@ class CPPMemberObject(CPPObject):
class CPPFunctionObject(CPPObject):
def get_index_text(self, name):
- # type: (unicode) -> unicode
+ # type: (str) -> str
return _('%s (C++ function)') % name
def parse_definition(self, parser):
@@ -6423,7 +6544,7 @@ class CPPFunctionObject(CPPObject):
class CPPClassObject(CPPObject):
def get_index_text(self, name):
- # type: (unicode) -> unicode
+ # type: (str) -> str
return _('%s (C++ class)') % name
def parse_definition(self, parser):
@@ -6433,7 +6554,7 @@ class CPPClassObject(CPPObject):
class CPPUnionObject(CPPObject):
def get_index_text(self, name):
- # type: (unicode) -> unicode
+ # type: (str) -> str
return _('%s (C++ union)') % name
def parse_definition(self, parser):
@@ -6443,7 +6564,7 @@ class CPPUnionObject(CPPObject):
class CPPEnumObject(CPPObject):
def get_index_text(self, name):
- # type: (unicode) -> unicode
+ # type: (str) -> str
return _('%s (C++ enum)') % name
def parse_definition(self, parser):
@@ -6463,7 +6584,7 @@ class CPPEnumObject(CPPObject):
class CPPEnumeratorObject(CPPObject):
def get_index_text(self, name):
- # type: (unicode) -> unicode
+ # type: (str) -> str
return _('%s (C++ enumerator)') % name
def parse_definition(self, parser):
@@ -6484,7 +6605,7 @@ class CPPNamespaceObject(SphinxDirective):
option_spec = {} # type: Dict
def warn(self, msg):
- # type: (unicode) -> None
+ # type: (Union[str, Exception]) -> None
self.state_machine.reporter.warning(msg, line=self.lineno)
def run(self):
@@ -6499,7 +6620,7 @@ class CPPNamespaceObject(SphinxDirective):
ast = parser.parse_namespace_object()
parser.assert_end()
except DefinitionError as e:
- self.warn(e.description)
+ self.warn(e)
name = _make_phony_error_name()
ast = ASTNamespace(name, None)
symbol = rootSymbol.add_name(ast.nestedName, ast.templatePrefix)
@@ -6518,7 +6639,7 @@ class CPPNamespacePushObject(SphinxDirective):
option_spec = {} # type: Dict
def warn(self, msg):
- # type: (unicode) -> None
+ # type: (Union[str, Exception]) -> None
self.state_machine.reporter.warning(msg, line=self.lineno)
def run(self):
@@ -6530,7 +6651,7 @@ class CPPNamespacePushObject(SphinxDirective):
ast = parser.parse_namespace_object()
parser.assert_end()
except DefinitionError as e:
- self.warn(e.description)
+ self.warn(e)
name = _make_phony_error_name()
ast = ASTNamespace(name, None)
oldParent = self.env.temp_data.get('cpp:parent_symbol', None)
@@ -6553,7 +6674,7 @@ class CPPNamespacePopObject(SphinxDirective):
option_spec = {} # type: Dict
def warn(self, msg):
- # type: (unicode) -> None
+ # type: (Union[str, Exception]) -> None
self.state_machine.reporter.warning(msg, line=self.lineno)
def run(self):
@@ -6576,7 +6697,7 @@ class CPPNamespacePopObject(SphinxDirective):
class CPPXRefRole(XRefRole):
def process_link(self, env, refnode, has_explicit_title, title, target):
- # type: (BuildEnvironment, nodes.Node, bool, unicode, unicode) -> Tuple[unicode, unicode] # NOQA
+ # type: (BuildEnvironment, nodes.Element, bool, str, str) -> Tuple[str, str]
refnode.attributes.update(env.ref_context)
if not has_explicit_title:
@@ -6604,7 +6725,7 @@ class CPPXRefRole(XRefRole):
return title, target
-class CPPExprRole(object):
+class CPPExprRole:
def __init__(self, asCode):
if asCode:
# render the expression as inline code
@@ -6616,7 +6737,7 @@ class CPPExprRole(object):
self.node_type = nodes.inline
def __call__(self, typ, rawtext, text, lineno, inliner, options={}, content=[]):
- class Warner(object):
+ class Warner:
def warn(self, msg):
inliner.reporter.warning(msg, line=lineno)
text = utils.unescape(text).replace('\n', ' ')
@@ -6627,8 +6748,7 @@ class CPPExprRole(object):
try:
ast = parser.parse_expression()
except DefinitionError as ex:
- Warner().warn('Unparseable C++ expression: %r\n%s'
- % (text, text_type(ex.description)))
+ Warner().warn('Unparseable C++ expression: %r\n%s' % (text, ex))
# see below
return [self.node_type(text, text, classes=classes)], []
parentSymbol = env.temp_data.get('cpp:parent_symbol', None)
@@ -6692,7 +6812,7 @@ class CPPDomain(Domain):
}
def clear_doc(self, docname):
- # type: (unicode) -> None
+ # type: (str) -> None
if Symbol.debug_show_tree:
print("clear_doc:", docname)
print("\tbefore:")
@@ -6712,18 +6832,18 @@ class CPPDomain(Domain):
del self.data['names'][name]
def process_doc(self, env, docname, document):
- # type: (BuildEnvironment, unicode, nodes.Node) -> None
+ # type: (BuildEnvironment, str, nodes.document) -> None
if Symbol.debug_show_tree:
print("process_doc:", docname)
print(self.data['root_symbol'].dump(0))
print("process_doc end:", docname)
def process_field_xref(self, pnode):
- # type: (nodes.Node) -> None
+ # type: (addnodes.pending_xref) -> None
pnode.attributes.update(self.env.ref_context)
def merge_domaindata(self, docnames, otherdata):
- # type: (List[unicode], Dict) -> None
+ # type: (List[str], Dict) -> None
if Symbol.debug_show_tree:
print("merge_domaindata:")
print("\tself:")
@@ -6749,9 +6869,8 @@ class CPPDomain(Domain):
def _resolve_xref_inner(self, env, fromdocname, builder, typ,
target, node, contnode, emitWarnings=True):
- # type: (BuildEnvironment, unicode, Builder, unicode, unicode, nodes.Node, nodes.Node, bool) -> nodes.Node # NOQA
-
- class Warner(object):
+ # type: (BuildEnvironment, str, Builder, str, str, addnodes.pending_xref, nodes.Element, bool) -> Tuple[nodes.Element, str] # NOQA
+ class Warner:
def warn(self, msg):
if emitWarnings:
logger.warning(msg, location=node)
@@ -6777,8 +6896,7 @@ class CPPDomain(Domain):
# strange, that we don't get the error now, use the original
return target, e
t, ex = findWarning(e)
- warner.warn('Unparseable C++ cross-reference: %r\n%s'
- % (t, text_type(ex.description)))
+ warner.warn('Unparseable C++ cross-reference: %r\n%s' % (t, ex))
return None, None
parentKey = node.get("cpp:parent_key", None)
rootSymbol = self.data['root_symbol']
@@ -6809,7 +6927,7 @@ class CPPDomain(Domain):
templateShorthand=True,
matchSelf=True, recurseInAnon=True)
if s is None or s.declaration is None:
- txtName = text_type(name)
+ txtName = str(name)
if txtName.startswith('std::') or txtName == 'std':
raise NoUri()
return None, None
@@ -6847,7 +6965,7 @@ class CPPDomain(Domain):
# the non-identifier refs are cross-references, which should be processed:
# - fix parenthesis due to operator() and add_function_parentheses
if typ != "identifier":
- title = contnode.pop(0).astext()
+ title = contnode.pop(0).astext() # type: ignore
# If it's operator(), we need to add '()' if explicit function parens
# are requested. Then the Sphinx machinery will add another pair.
# Also, if it's an 'any' ref that resolves to a function, we need to add
@@ -6887,34 +7005,32 @@ class CPPDomain(Domain):
declaration.get_newest_id(), contnode, displayName
), declaration.objectType
- def resolve_xref(self, env, fromdocname, builder,
- typ, target, node, contnode):
- # type: (BuildEnvironment, unicode, Builder, unicode, unicode, nodes.Node, nodes.Node) -> nodes.Node # NOQA
+ def resolve_xref(self, env, fromdocname, builder, typ, target, node, contnode):
+ # type: (BuildEnvironment, str, Builder, str, str, addnodes.pending_xref, nodes.Element) -> nodes.Element # NOQA
return self._resolve_xref_inner(env, fromdocname, builder, typ,
target, node, contnode)[0]
- def resolve_any_xref(self, env, fromdocname, builder, target,
- node, contnode):
- # type: (BuildEnvironment, unicode, Builder, unicode, nodes.Node, nodes.Node) -> List[Tuple[unicode, nodes.Node]] # NOQA
- node, objtype = self._resolve_xref_inner(env, fromdocname, builder,
- 'any', target, node, contnode,
- emitWarnings=False)
- if node:
+ def resolve_any_xref(self, env, fromdocname, builder, target, node, contnode):
+ # type: (BuildEnvironment, str, Builder, str, addnodes.pending_xref, nodes.Element) -> List[Tuple[str, nodes.Element]] # NOQA
+ retnode, objtype = self._resolve_xref_inner(env, fromdocname, builder,
+ 'any', target, node, contnode,
+ emitWarnings=False)
+ if retnode:
if objtype == 'templateParam':
- return [('cpp:templateParam', node)]
+ return [('cpp:templateParam', retnode)]
else:
- return [('cpp:' + self.role_for_objtype(objtype), node)]
+ return [('cpp:' + self.role_for_objtype(objtype), retnode)]
return []
def get_objects(self):
- # type: () -> Iterator[Tuple[unicode, unicode, unicode, unicode, unicode, int]]
+ # type: () -> Iterator[Tuple[str, str, str, str, str, int]]
rootSymbol = self.data['root_symbol']
for symbol in rootSymbol.get_all_symbols():
if symbol.declaration is None:
continue
assert symbol.docname
fullNestedName = symbol.get_full_nested_name()
- name = text_type(fullNestedName).lstrip(':')
+ name = str(fullNestedName).lstrip(':')
dispname = fullNestedName.get_display_string().lstrip(':')
objectType = symbol.declaration.objectType
docname = symbol.docname
@@ -6922,7 +7038,7 @@ class CPPDomain(Domain):
yield (name, dispname, objectType, docname, newestId, 1)
def get_full_qualified_name(self, node):
- # type: (nodes.Node) -> unicode
+ # type: (nodes.Element) -> str
target = node.get('reftarget', None)
if target is None:
return None
@@ -6933,11 +7049,11 @@ class CPPDomain(Domain):
rootSymbol = self.data['root_symbol']
parentSymbol = rootSymbol.direct_lookup(parentKey)
parentName = parentSymbol.get_full_nested_name()
- return '::'.join([text_type(parentName), target])
+ return '::'.join([str(parentName), target])
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
app.add_domain(CPPDomain)
app.add_config_value("cpp_index_common_prefix", [], 'env')
app.add_config_value("cpp_id_attributes", [], 'env')
diff --git a/sphinx/domains/javascript.py b/sphinx/domains/javascript.py
index 511a058c8..adc75ab76 100644
--- a/sphinx/domains/javascript.py
+++ b/sphinx/domains/javascript.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.domains.javascript
~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -40,14 +39,14 @@ class JSObject(ObjectDescription):
has_arguments = False
#: what is displayed right before the documentation entry
- display_prefix = None # type: unicode
+ display_prefix = None # type: str
#: If ``allow_nesting`` is ``True``, the object prefixes will be accumulated
#: based on directive nesting
allow_nesting = False
def handle_signature(self, sig, signode):
- # type: (unicode, addnodes.desc_signature) -> Tuple[unicode, unicode]
+ # type: (str, addnodes.desc_signature) -> Tuple[str, str]
"""Breaks down construct signatures
Parses out prefix and argument list from construct definition. The
@@ -101,7 +100,7 @@ class JSObject(ObjectDescription):
return fullname, prefix
def add_target_and_index(self, name_obj, sig, signode):
- # type: (Tuple[unicode, unicode], unicode, addnodes.desc_signature) -> None
+ # type: (Tuple[str, str], str, addnodes.desc_signature) -> None
mod_name = self.env.ref_context.get('js:module')
fullname = (mod_name and mod_name + '.' or '') + name_obj[0]
if fullname not in self.state.document.ids:
@@ -125,7 +124,7 @@ class JSObject(ObjectDescription):
'', None))
def get_index_text(self, objectname, name_obj):
- # type: (unicode, Tuple[unicode, unicode]) -> unicode
+ # type: (str, Tuple[str, str]) -> str
name, obj = name_obj
if self.objtype == 'function':
if not obj:
@@ -253,7 +252,7 @@ class JSModule(SphinxDirective):
mod_name = self.arguments[0].strip()
self.env.ref_context['js:module'] = mod_name
noindex = 'noindex' in self.options
- ret = []
+ ret = [] # type: List[nodes.Node]
if not noindex:
self.env.domaindata['js']['modules'][mod_name] = self.env.docname
# Make a duplicate entry in 'objects' to facilitate searching for
@@ -272,7 +271,7 @@ class JSModule(SphinxDirective):
class JSXRefRole(XRefRole):
def process_link(self, env, refnode, has_explicit_title, title, target):
- # type: (BuildEnvironment, nodes.Node, bool, unicode, unicode) -> Tuple[unicode, unicode] # NOQA
+ # type: (BuildEnvironment, nodes.Element, bool, str, str) -> Tuple[str, str]
# basically what sphinx.domains.python.PyXRefRole does
refnode['js:object'] = env.ref_context.get('js:object')
refnode['js:module'] = env.ref_context.get('js:module')
@@ -322,10 +321,10 @@ class JavaScriptDomain(Domain):
initial_data = {
'objects': {}, # fullname -> docname, objtype
'modules': {}, # mod_name -> docname
- } # type: Dict[unicode, Dict[unicode, Tuple[unicode, unicode]]]
+ } # type: Dict[str, Dict[str, Tuple[str, str]]]
def clear_doc(self, docname):
- # type: (unicode) -> None
+ # type: (str) -> None
for fullname, (pkg_docname, _l) in list(self.data['objects'].items()):
if pkg_docname == docname:
del self.data['objects'][fullname]
@@ -334,7 +333,7 @@ class JavaScriptDomain(Domain):
del self.data['modules'][mod_name]
def merge_domaindata(self, docnames, otherdata):
- # type: (List[unicode], Dict) -> None
+ # type: (List[str], Dict) -> None
# XXX check duplicates
for fullname, (fn, objtype) in otherdata['objects'].items():
if fn in docnames:
@@ -344,7 +343,7 @@ class JavaScriptDomain(Domain):
self.data['modules'][mod_name] = pkg_docname
def find_obj(self, env, mod_name, prefix, name, typ, searchorder=0):
- # type: (BuildEnvironment, unicode, unicode, unicode, unicode, int) -> Tuple[unicode, Tuple[unicode, unicode]] # NOQA
+ # type: (BuildEnvironment, str, str, str, str, int) -> Tuple[str, Tuple[str, str]]
if name[-2:] == '()':
name = name[:-2]
objects = self.data['objects']
@@ -370,7 +369,7 @@ class JavaScriptDomain(Domain):
def resolve_xref(self, env, fromdocname, builder, typ, target, node,
contnode):
- # type: (BuildEnvironment, unicode, Builder, unicode, unicode, nodes.Node, nodes.Node) -> nodes.Node # NOQA
+ # type: (BuildEnvironment, str, Builder, str, str, addnodes.pending_xref, nodes.Element) -> nodes.Element # NOQA
mod_name = node.get('js:module')
prefix = node.get('js:object')
searchorder = node.hasattr('refspecific') and 1 or 0
@@ -382,7 +381,7 @@ class JavaScriptDomain(Domain):
def resolve_any_xref(self, env, fromdocname, builder, target, node,
contnode):
- # type: (BuildEnvironment, unicode, Builder, unicode, nodes.Node, nodes.Node) -> List[Tuple[unicode, nodes.Node]] # NOQA
+ # type: (BuildEnvironment, str, Builder, str, addnodes.pending_xref, nodes.Element) -> List[Tuple[str, nodes.Element]] # NOQA
mod_name = node.get('js:module')
prefix = node.get('js:object')
name, obj = self.find_obj(env, mod_name, prefix, target, None, 1)
@@ -393,13 +392,13 @@ class JavaScriptDomain(Domain):
name.replace('$', '_S_'), contnode, name))]
def get_objects(self):
- # type: () -> Iterator[Tuple[unicode, unicode, unicode, unicode, unicode, int]]
+ # type: () -> Iterator[Tuple[str, str, str, str, str, int]]
for refname, (docname, type) in list(self.data['objects'].items()):
yield refname, refname, type, docname, \
refname.replace('$', '_S_'), 1
def get_full_qualified_name(self, node):
- # type: (nodes.Node) -> unicode
+ # type: (nodes.Element) -> str
modname = node.get('js:module')
prefix = node.get('js:object')
target = node.get('reftarget')
@@ -410,7 +409,7 @@ class JavaScriptDomain(Domain):
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
app.add_domain(JavaScriptDomain)
return {
diff --git a/sphinx/domains/math.py b/sphinx/domains/math.py
index d56f45e0e..0b5d1f6dc 100644
--- a/sphinx/domains/math.py
+++ b/sphinx/domains/math.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.domains.math
~~~~~~~~~~~~~~~~~~~
@@ -21,18 +20,18 @@ from sphinx.util.nodes import make_refnode
if False:
# For type annotation
- from typing import Any, Callable, Dict, Iterable, List, Tuple, Union # NOQA
+ from typing import Any, Callable, Dict, Iterable, List, Tuple, Type, Union # NOQA
+ from sphinx import addnodes # NOQA
from sphinx.application import Sphinx # NOQA
from sphinx.builders import Builder # NOQA
from sphinx.environment import BuildEnvironment # NOQA
- from sphinx.util.typing import RoleFunction # NOQA
logger = logging.getLogger(__name__)
class MathReferenceRole(XRefRole):
def result_nodes(self, document, env, node, is_ref):
- # type: (nodes.Node, BuildEnvironment, nodes.Node, bool) -> Tuple[List[nodes.Node], List[nodes.Node]] # NOQA
+ # type: (nodes.document, BuildEnvironment, nodes.Element, bool) -> Tuple[List[nodes.Node], List[nodes.system_message]] # NOQA
node['refdomain'] = 'math'
return [node], []
@@ -45,27 +44,28 @@ class MathDomain(Domain):
initial_data = {
'objects': {}, # labelid -> (docname, eqno)
'has_equations': {}, # docname -> bool
- } # type: Dict[unicode, Dict[unicode, Tuple[unicode, int]]]
+ } # type: Dict[str, Dict[str, Tuple[str, int]]]
dangling_warnings = {
'eq': 'equation not found: %(target)s',
}
enumerable_nodes = { # node_class -> (figtype, title_getter)
displaymath: ('displaymath', None),
nodes.math_block: ('displaymath', None),
- } # type: Dict[nodes.Node, Tuple[unicode, Callable]]
+ }
roles = {
'numref': MathReferenceRole(),
}
def process_doc(self, env, docname, document):
- # type: (BuildEnvironment, unicode, nodes.Node) -> None
+ # type: (BuildEnvironment, str, nodes.document) -> None
def math_node(node):
+ # type: (nodes.Node) -> bool
return isinstance(node, (nodes.math, nodes.math_block))
self.data['has_equations'][docname] = any(document.traverse(math_node))
def clear_doc(self, docname):
- # type: (unicode) -> None
+ # type: (str) -> None
for equation_id, (doc, eqno) in list(self.data['objects'].items()):
if doc == docname:
del self.data['objects'][equation_id]
@@ -73,7 +73,7 @@ class MathDomain(Domain):
self.data['has_equations'].pop(docname, None)
def merge_domaindata(self, docnames, otherdata):
- # type: (Iterable[unicode], Dict) -> None
+ # type: (Iterable[str], Dict) -> None
for labelid, (doc, eqno) in otherdata['objects'].items():
if doc in docnames:
self.data['objects'][labelid] = (doc, eqno)
@@ -82,7 +82,7 @@ class MathDomain(Domain):
self.data['has_equations'][docname] = otherdata['has_equations'][docname]
def resolve_xref(self, env, fromdocname, builder, typ, target, node, contnode):
- # type: (BuildEnvironment, unicode, Builder, unicode, unicode, nodes.Node, nodes.Node) -> nodes.Node # NOQA
+ # type: (BuildEnvironment, str, Builder, str, str, addnodes.pending_xref, nodes.Element) -> nodes.Element # NOQA
assert typ in ('eq', 'numref')
docname, number = self.data['objects'].get(target, (None, None))
if docname:
@@ -107,19 +107,19 @@ class MathDomain(Domain):
return None
def resolve_any_xref(self, env, fromdocname, builder, target, node, contnode):
- # type: (BuildEnvironment, unicode, Builder, unicode, nodes.Node, nodes.Node) -> List[nodes.Node] # NOQA
+ # type: (BuildEnvironment, str, Builder, str, addnodes.pending_xref, nodes.Element) -> List[Tuple[str, nodes.Element]] # NOQA
refnode = self.resolve_xref(env, fromdocname, builder, 'eq', target, node, contnode)
if refnode is None:
return []
else:
- return [refnode]
+ return [('eq', refnode)]
def get_objects(self):
# type: () -> List
return []
def add_equation(self, env, docname, labelid):
- # type: (BuildEnvironment, unicode, unicode) -> int
+ # type: (BuildEnvironment, str, str) -> int
equations = self.data['objects']
if labelid in equations:
path = env.doc2path(equations[labelid][0])
@@ -131,7 +131,7 @@ class MathDomain(Domain):
return eqno
def get_next_equation_number(self, docname):
- # type: (unicode) -> int
+ # type: (str) -> int
targets = [eq for eq in self.data['objects'].values() if eq[0] == docname]
return len(targets) + 1
@@ -141,7 +141,7 @@ class MathDomain(Domain):
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
app.add_domain(MathDomain)
app.add_role('eq', MathReferenceRole(warn_dangling=True))
diff --git a/sphinx/domains/python.py b/sphinx/domains/python.py
index 427c86aa9..150067e99 100644
--- a/sphinx/domains/python.py
+++ b/sphinx/domains/python.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.domains.python
~~~~~~~~~~~~~~~~~~~~~
@@ -13,12 +12,11 @@ import re
from docutils import nodes
from docutils.parsers.rst import directives
-from six import iteritems
from sphinx import addnodes, locale
from sphinx.deprecation import DeprecatedDict, RemovedInSphinx30Warning
from sphinx.directives import ObjectDescription
-from sphinx.domains import Domain, ObjType, Index
+from sphinx.domains import Domain, ObjType, Index, IndexEntry
from sphinx.locale import _, __
from sphinx.roles import XRefRole
from sphinx.util import logging
@@ -28,10 +26,11 @@ from sphinx.util.nodes import make_refnode
if False:
# For type annotation
- from typing import Any, Dict, Iterable, Iterator, List, Tuple, Union # NOQA
+ from typing import Any, Dict, Iterable, Iterator, List, Tuple, Type, Union # NOQA
from sphinx.application import Sphinx # NOQA
from sphinx.builders import Builder # NOQA
from sphinx.environment import BuildEnvironment # NOQA
+ from sphinx.util.typing import TextlikeNode # NOQA
logger = logging.getLogger(__name__)
@@ -54,7 +53,7 @@ pairindextypes = {
'exception': _('exception'),
'statement': _('statement'),
'builtin': _('built-in function'),
-} # Dict[unicode, unicode]
+}
locale.pairindextypes = DeprecatedDict(
pairindextypes,
@@ -65,7 +64,7 @@ locale.pairindextypes = DeprecatedDict(
def _pseudo_parse_arglist(signode, arglist):
- # type: (addnodes.desc_signature, unicode) -> None
+ # type: (addnodes.desc_signature, str) -> None
""""Parse" a list of arguments separated by commas.
Arguments can have "optional" annotations given by enclosing them in
@@ -73,7 +72,7 @@ def _pseudo_parse_arglist(signode, arglist):
string literal (e.g. default argument value).
"""
paramlist = addnodes.desc_parameterlist()
- stack = [paramlist]
+ stack = [paramlist] # type: List[nodes.Element]
try:
for argument in arglist.split(','):
argument = argument.strip()
@@ -106,26 +105,27 @@ def _pseudo_parse_arglist(signode, arglist):
# if there are too few or too many elements on the stack, just give up
# and treat the whole argument list as one argument, discarding the
# already partially populated paramlist node
- signode += addnodes.desc_parameterlist()
- signode[-1] += addnodes.desc_parameter(arglist, arglist)
+ paramlist = addnodes.desc_parameterlist()
+ paramlist += addnodes.desc_parameter(arglist, arglist)
+ signode += paramlist
else:
signode += paramlist
# This override allows our inline type specifiers to behave like :class: link
# when it comes to handling "." and "~" prefixes.
-class PyXrefMixin(object):
+class PyXrefMixin:
def make_xref(self,
- rolename, # type: unicode
- domain, # type: unicode
- target, # type: unicode
- innernode=nodes.emphasis, # type: nodes.Node
+ rolename, # type: str
+ domain, # type: str
+ target, # type: str
+ innernode=nodes.emphasis, # type: Type[TextlikeNode]
contnode=None, # type: nodes.Node
env=None, # type: BuildEnvironment
):
# type: (...) -> nodes.Node
- result = super(PyXrefMixin, self).make_xref(rolename, domain, target, # type: ignore
- innernode, contnode, env)
+ result = super().make_xref(rolename, domain, target, # type: ignore
+ innernode, contnode, env)
result['refspecific'] = True
if target.startswith(('.', '~')):
prefix, result['reftarget'] = target[0], target[1:]
@@ -139,10 +139,10 @@ class PyXrefMixin(object):
return result
def make_xrefs(self,
- rolename, # type: unicode
- domain, # type: unicode
- target, # type: unicode
- innernode=nodes.emphasis, # type: nodes.Node
+ rolename, # type: str
+ domain, # type: str
+ target, # type: str
+ innernode=nodes.emphasis, # type: Type[TextlikeNode]
contnode=None, # type: nodes.Node
env=None, # type: BuildEnvironment
):
@@ -151,7 +151,7 @@ class PyXrefMixin(object):
delims_re = re.compile(delims)
sub_targets = re.split(delims, target)
- split_contnode = bool(contnode and contnode.astext() == target)
+ split_contnode = bool(contnode and contnode.astext() == target) # type: ignore
results = []
for sub_target in filter(None, sub_targets):
@@ -170,13 +170,12 @@ class PyXrefMixin(object):
class PyField(PyXrefMixin, Field):
def make_xref(self, rolename, domain, target,
innernode=nodes.emphasis, contnode=None, env=None):
- # type: (unicode, unicode, unicode, nodes.Node, nodes.Node, BuildEnvironment) -> nodes.Node # NOQA
+ # type: (str, str, str, Type[TextlikeNode], nodes.Node, BuildEnvironment) -> nodes.Node # NOQA
if rolename == 'class' and target == 'None':
# None is not a type, so use obj role instead.
rolename = 'obj'
- return super(PyField, self).make_xref(rolename, domain, target,
- innernode, contnode, env)
+ return super().make_xref(rolename, domain, target, innernode, contnode, env)
class PyGroupedField(PyXrefMixin, GroupedField):
@@ -186,13 +185,12 @@ class PyGroupedField(PyXrefMixin, GroupedField):
class PyTypedField(PyXrefMixin, TypedField):
def make_xref(self, rolename, domain, target,
innernode=nodes.emphasis, contnode=None, env=None):
- # type: (unicode, unicode, unicode, nodes.Node, nodes.Node, BuildEnvironment) -> nodes.Node # NOQA
+ # type: (str, str, str, Type[TextlikeNode], nodes.Node, BuildEnvironment) -> nodes.Node # NOQA
if rolename == 'class' and target == 'None':
# None is not a type, so use obj role instead.
rolename = 'obj'
- return super(PyTypedField, self).make_xref(rolename, domain, target,
- innernode, contnode, env)
+ return super().make_xref(rolename, domain, target, innernode, contnode, env)
class PyObject(ObjectDescription):
@@ -230,7 +228,7 @@ class PyObject(ObjectDescription):
allow_nesting = False
def get_signature_prefix(self, sig):
- # type: (unicode) -> unicode
+ # type: (str) -> str
"""May return a prefix to put before the object name in the
signature.
"""
@@ -244,7 +242,7 @@ class PyObject(ObjectDescription):
return False
def handle_signature(self, sig, signode):
- # type: (unicode, addnodes.desc_signature) -> Tuple[unicode, unicode]
+ # type: (str, addnodes.desc_signature) -> Tuple[str, str]
"""Transform a Python signature into RST nodes.
Return (fully qualified name of the thing, classname if any).
@@ -324,12 +322,12 @@ class PyObject(ObjectDescription):
return fullname, name_prefix
def get_index_text(self, modname, name):
- # type: (unicode, unicode) -> unicode
+ # type: (str, str) -> str
"""Return the text for the index entry of the object."""
raise NotImplementedError('must be implemented in subclasses')
def add_target_and_index(self, name_cls, sig, signode):
- # type: (unicode, unicode, addnodes.desc_signature) -> None
+ # type: (str, str, addnodes.desc_signature) -> None
modname = self.options.get(
'module', self.env.ref_context.get('py:module'))
fullname = (modname and modname + '.' or '') + name_cls[0]
@@ -425,7 +423,7 @@ class PyModulelevel(PyObject):
return self.objtype == 'function'
def get_index_text(self, modname, name_cls):
- # type: (unicode, unicode) -> unicode
+ # type: (str, str) -> str
if self.objtype == 'function':
if not modname:
return _('%s() (built-in function)') % name_cls[0]
@@ -446,11 +444,11 @@ class PyClasslike(PyObject):
allow_nesting = True
def get_signature_prefix(self, sig):
- # type: (unicode) -> unicode
+ # type: (str) -> str
return self.objtype + ' '
def get_index_text(self, modname, name_cls):
- # type: (unicode, unicode) -> unicode
+ # type: (str, str) -> str
if self.objtype == 'class':
if not modname:
return _('%s (built-in class)') % name_cls[0]
@@ -471,7 +469,7 @@ class PyClassmember(PyObject):
return self.objtype.endswith('method')
def get_signature_prefix(self, sig):
- # type: (unicode) -> unicode
+ # type: (str) -> str
if self.objtype == 'staticmethod':
return 'static '
elif self.objtype == 'classmethod':
@@ -479,7 +477,7 @@ class PyClassmember(PyObject):
return ''
def get_index_text(self, modname, name_cls):
- # type: (unicode, unicode) -> unicode
+ # type: (str, str) -> str
name, cls = name_cls
add_modules = self.env.config.add_module_names
if self.objtype == 'method':
@@ -536,13 +534,13 @@ class PyClassmember(PyObject):
return ''
-class PyDecoratorMixin(object):
+class PyDecoratorMixin:
"""
Mixin for decorator directives.
"""
def handle_signature(self, sig, signode):
- # type: (unicode, addnodes.desc_signature) -> Tuple[unicode, unicode]
- ret = super(PyDecoratorMixin, self).handle_signature(sig, signode) # type: ignore
+ # type: (str, addnodes.desc_signature) -> Tuple[str, str]
+ ret = super().handle_signature(sig, signode) # type: ignore
signode.insert(0, addnodes.desc_addname('@', '@'))
return ret
@@ -559,7 +557,7 @@ class PyDecoratorFunction(PyDecoratorMixin, PyModulelevel):
# type: () -> List[nodes.Node]
# a decorator function is a function after all
self.name = 'py:function'
- return PyModulelevel.run(self)
+ return super().run()
class PyDecoratorMethod(PyDecoratorMixin, PyClassmember):
@@ -569,7 +567,7 @@ class PyDecoratorMethod(PyDecoratorMixin, PyClassmember):
def run(self):
# type: () -> List[nodes.Node]
self.name = 'py:method'
- return PyClassmember.run(self)
+ return super().run()
class PyModule(SphinxDirective):
@@ -593,7 +591,7 @@ class PyModule(SphinxDirective):
modname = self.arguments[0].strip()
noindex = 'noindex' in self.options
self.env.ref_context['py:module'] = modname
- ret = []
+ ret = [] # type: List[nodes.Node]
if not noindex:
self.env.domaindata['py']['modules'][modname] = (self.env.docname,
self.options.get('synopsis', ''),
@@ -639,7 +637,7 @@ class PyCurrentModule(SphinxDirective):
class PyXRefRole(XRefRole):
def process_link(self, env, refnode, has_explicit_title, title, target):
- # type: (BuildEnvironment, nodes.Node, bool, unicode, unicode) -> Tuple[unicode, unicode] # NOQA
+ # type: (BuildEnvironment, nodes.Element, bool, str, str) -> Tuple[str, str]
refnode['py:module'] = env.ref_context.get('py:module')
refnode['py:class'] = env.ref_context.get('py:class')
if not has_explicit_title:
@@ -670,14 +668,14 @@ class PythonModuleIndex(Index):
shortname = _('modules')
def generate(self, docnames=None):
- # type: (Iterable[unicode]) -> Tuple[List[Tuple[unicode, List[List[Union[unicode, int]]]]], bool] # NOQA
- content = {} # type: Dict[unicode, List]
+ # type: (Iterable[str]) -> Tuple[List[Tuple[str, List[IndexEntry]]], bool]
+ content = {} # type: Dict[str, List[IndexEntry]]
# list of prefixes to ignore
- ignores = None # type: List[unicode]
+ ignores = None # type: List[str]
ignores = self.domain.env.config['modindex_common_prefix'] # type: ignore
ignores = sorted(ignores, key=len, reverse=True)
# list of all modules, sorted by module name
- modules = sorted(iteritems(self.domain.data['modules']),
+ modules = sorted(self.domain.data['modules'].items(),
key=lambda x: x[0].lower())
# sort out collapsable modules
prev_modname = ''
@@ -706,19 +704,22 @@ class PythonModuleIndex(Index):
if prev_modname == package:
# first submodule - make parent a group head
if entries:
- entries[-1][1] = 1
+ last = entries[-1]
+ entries[-1] = IndexEntry(last[0], 1, last[2], last[3],
+ last[4], last[5], last[6])
+ entries.append(IndexEntry(stripped + package, 1, '', '', '', '', ''))
elif not prev_modname.startswith(package):
# submodule without parent in list, add dummy entry
- entries.append([stripped + package, 1, '', '', '', '', ''])
+ entries.append(IndexEntry(stripped + package, 1, '', '', '', '', ''))
subtype = 2
else:
num_toplevels += 1
subtype = 0
qualifier = deprecated and _('Deprecated') or ''
- entries.append([stripped + modname, subtype, docname,
- 'module-' + stripped + modname, platforms,
- qualifier, synopsis])
+ entries.append(IndexEntry(stripped + modname, subtype, docname,
+ 'module-' + stripped + modname, platforms,
+ qualifier, synopsis))
prev_modname = modname
# apply heuristics when to collapse modindex at page load:
@@ -727,7 +728,7 @@ class PythonModuleIndex(Index):
collapse = len(modules) - num_toplevels < num_toplevels
# sort by first letter
- sorted_content = sorted(iteritems(content))
+ sorted_content = sorted(content.items())
return sorted_content, collapse
@@ -746,7 +747,7 @@ class PythonDomain(Domain):
'staticmethod': ObjType(_('static method'), 'meth', 'obj'),
'attribute': ObjType(_('attribute'), 'attr', 'obj'),
'module': ObjType(_('module'), 'mod', 'obj'),
- } # type: Dict[unicode, ObjType]
+ } # type: Dict[str, ObjType]
directives = {
'function': PyModulelevel,
@@ -776,13 +777,13 @@ class PythonDomain(Domain):
initial_data = {
'objects': {}, # fullname -> docname, objtype
'modules': {}, # modname -> docname, synopsis, platform, deprecated
- } # type: Dict[unicode, Dict[unicode, Tuple[Any]]]
+ } # type: Dict[str, Dict[str, Tuple[Any]]]
indices = [
PythonModuleIndex,
]
def clear_doc(self, docname):
- # type: (unicode) -> None
+ # type: (str) -> None
for fullname, (fn, _l) in list(self.data['objects'].items()):
if fn == docname:
del self.data['objects'][fullname]
@@ -791,7 +792,7 @@ class PythonDomain(Domain):
del self.data['modules'][modname]
def merge_domaindata(self, docnames, otherdata):
- # type: (List[unicode], Dict) -> None
+ # type: (List[str], Dict) -> None
# XXX check duplicates?
for fullname, (fn, objtype) in otherdata['objects'].items():
if fn in docnames:
@@ -801,7 +802,7 @@ class PythonDomain(Domain):
self.data['modules'][modname] = data
def find_obj(self, env, modname, classname, name, type, searchmode=0):
- # type: (BuildEnvironment, unicode, unicode, unicode, unicode, int) -> List[Tuple[unicode, Any]] # NOQA
+ # type: (BuildEnvironment, str, str, str, str, int) -> List[Tuple[str, Any]]
"""Find a Python object for "name", perhaps using the given module
and/or classname. Returns a list of (name, object entry) tuples.
"""
@@ -813,7 +814,7 @@ class PythonDomain(Domain):
return []
objects = self.data['objects']
- matches = [] # type: List[Tuple[unicode, Any]]
+ matches = [] # type: List[Tuple[str, Any]]
newname = None
if searchmode == 1:
@@ -866,7 +867,7 @@ class PythonDomain(Domain):
def resolve_xref(self, env, fromdocname, builder,
type, target, node, contnode):
- # type: (BuildEnvironment, unicode, Builder, unicode, unicode, nodes.Node, nodes.Node) -> nodes.Node # NOQA
+ # type: (BuildEnvironment, str, Builder, str, str, addnodes.pending_xref, nodes.Element) -> nodes.Element # NOQA
modname = node.get('py:module')
clsname = node.get('py:class')
searchmode = node.hasattr('refspecific') and 1 or 0
@@ -881,18 +882,16 @@ class PythonDomain(Domain):
name, obj = matches[0]
if obj[1] == 'module':
- return self._make_module_refnode(builder, fromdocname, name,
- contnode)
+ return self._make_module_refnode(builder, fromdocname, name, contnode)
else:
- return make_refnode(builder, fromdocname, obj[0], name,
- contnode, name)
+ return make_refnode(builder, fromdocname, obj[0], name, contnode, name)
def resolve_any_xref(self, env, fromdocname, builder, target,
node, contnode):
- # type: (BuildEnvironment, unicode, Builder, unicode, nodes.Node, nodes.Node) -> List[Tuple[unicode, nodes.Node]] # NOQA
+ # type: (BuildEnvironment, str, Builder, str, addnodes.pending_xref, nodes.Element) -> List[Tuple[str, nodes.Element]] # NOQA
modname = node.get('py:module')
clsname = node.get('py:class')
- results = [] # type: List[Tuple[unicode, nodes.Node]]
+ results = [] # type: List[Tuple[str, nodes.Element]]
# always search in "refspecific" mode with the :any: role
matches = self.find_obj(env, modname, clsname, target, None, 1)
@@ -908,7 +907,7 @@ class PythonDomain(Domain):
return results
def _make_module_refnode(self, builder, fromdocname, name, contnode):
- # type: (Builder, unicode, unicode, nodes.Node) -> nodes.Node
+ # type: (Builder, str, str, nodes.Node) -> nodes.Element
# get additional info for modules
docname, synopsis, platform, deprecated = self.data['modules'][name]
title = name
@@ -922,15 +921,15 @@ class PythonDomain(Domain):
'module-' + name, contnode, title)
def get_objects(self):
- # type: () -> Iterator[Tuple[unicode, unicode, unicode, unicode, unicode, int]]
- for modname, info in iteritems(self.data['modules']):
+ # type: () -> Iterator[Tuple[str, str, str, str, str, int]]
+ for modname, info in self.data['modules'].items():
yield (modname, modname, 'module', info[0], 'module-' + modname, 0)
- for refname, (docname, type) in iteritems(self.data['objects']):
+ for refname, (docname, type) in self.data['objects'].items():
if type != 'module': # modules are already handled
yield (refname, refname, type, docname, refname, 1)
def get_full_qualified_name(self, node):
- # type: (nodes.Node) -> unicode
+ # type: (nodes.Element) -> str
modname = node.get('py:module')
clsname = node.get('py:class')
target = node.get('reftarget')
@@ -941,7 +940,7 @@ class PythonDomain(Domain):
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
app.add_domain(PythonDomain)
return {
diff --git a/sphinx/domains/rst.py b/sphinx/domains/rst.py
index 1a5c3ba0c..aeabbd576 100644
--- a/sphinx/domains/rst.py
+++ b/sphinx/domains/rst.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.domains.rst
~~~~~~~~~~~~~~~~~~
@@ -11,8 +10,6 @@
import re
-from six import iteritems
-
from sphinx import addnodes
from sphinx.directives import ObjectDescription
from sphinx.domains import Domain, ObjType
@@ -38,7 +35,7 @@ class ReSTMarkup(ObjectDescription):
"""
def add_target_and_index(self, name, sig, signode):
- # type: (unicode, unicode, addnodes.desc_signature) -> None
+ # type: (str, str, addnodes.desc_signature) -> None
targetname = self.objtype + '-' + name
if targetname not in self.state.document.ids:
signode['names'].append(targetname)
@@ -60,7 +57,7 @@ class ReSTMarkup(ObjectDescription):
targetname, '', None))
def get_index_text(self, objectname, name):
- # type: (unicode, unicode) -> unicode
+ # type: (str, str) -> str
if self.objtype == 'directive':
return _('%s (directive)') % name
elif self.objtype == 'role':
@@ -69,7 +66,7 @@ class ReSTMarkup(ObjectDescription):
def parse_directive(d):
- # type: (unicode) -> Tuple[unicode, unicode]
+ # type: (str) -> Tuple[str, str]
"""Parse a directive signature.
Returns (directive, arguments) string tuple. If no arguments are given,
@@ -91,7 +88,7 @@ class ReSTDirective(ReSTMarkup):
Description of a reST directive.
"""
def handle_signature(self, sig, signode):
- # type: (unicode, addnodes.desc_signature) -> unicode
+ # type: (str, addnodes.desc_signature) -> str
name, args = parse_directive(sig)
desc_name = '.. %s::' % name
signode += addnodes.desc_name(desc_name, desc_name)
@@ -105,7 +102,7 @@ class ReSTRole(ReSTMarkup):
Description of a reST role.
"""
def handle_signature(self, sig, signode):
- # type: (unicode, addnodes.desc_signature) -> unicode
+ # type: (str, addnodes.desc_signature) -> str
signode += addnodes.desc_name(':%s:' % sig, ':%s:' % sig)
return sig
@@ -129,24 +126,23 @@ class ReSTDomain(Domain):
}
initial_data = {
'objects': {}, # fullname -> docname, objtype
- } # type: Dict[unicode, Dict[unicode, Tuple[unicode, ObjType]]]
+ } # type: Dict[str, Dict[str, Tuple[str, ObjType]]]
def clear_doc(self, docname):
- # type: (unicode) -> None
+ # type: (str) -> None
for (typ, name), doc in list(self.data['objects'].items()):
if doc == docname:
del self.data['objects'][typ, name]
def merge_domaindata(self, docnames, otherdata):
- # type: (List[unicode], Dict) -> None
+ # type: (List[str], Dict) -> None
# XXX check duplicates
for (typ, name), doc in otherdata['objects'].items():
if doc in docnames:
self.data['objects'][typ, name] = doc
- def resolve_xref(self, env, fromdocname, builder, typ, target, node,
- contnode):
- # type: (BuildEnvironment, unicode, Builder, unicode, unicode, nodes.Node, nodes.Node) -> nodes.Node # NOQA
+ def resolve_xref(self, env, fromdocname, builder, typ, target, node, contnode):
+ # type: (BuildEnvironment, str, Builder, str, str, addnodes.pending_xref, nodes.Element) -> nodes.Element # NOQA
objects = self.data['objects']
objtypes = self.objtypes_for_role(typ)
for objtype in objtypes:
@@ -155,12 +151,12 @@ class ReSTDomain(Domain):
objects[objtype, target],
objtype + '-' + target,
contnode, target + ' ' + objtype)
+ return None
- def resolve_any_xref(self, env, fromdocname, builder, target,
- node, contnode):
- # type: (BuildEnvironment, unicode, Builder, unicode, nodes.Node, nodes.Node) -> List[nodes.Node] # NOQA
+ def resolve_any_xref(self, env, fromdocname, builder, target, node, contnode):
+ # type: (BuildEnvironment, str, Builder, str, addnodes.pending_xref, nodes.Element) -> List[Tuple[str, nodes.Element]] # NOQA
objects = self.data['objects']
- results = []
+ results = [] # type: List[Tuple[str, nodes.Element]]
for objtype in self.object_types:
if (objtype, target) in self.data['objects']:
results.append(('rst:' + self.role_for_objtype(objtype),
@@ -171,13 +167,13 @@ class ReSTDomain(Domain):
return results
def get_objects(self):
- # type: () -> Iterator[Tuple[unicode, unicode, unicode, unicode, unicode, int]]
- for (typ, name), docname in iteritems(self.data['objects']):
+ # type: () -> Iterator[Tuple[str, str, str, str, str, int]]
+ for (typ, name), docname in self.data['objects'].items():
yield name, name, typ, docname, typ + '-' + name, 1
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
app.add_domain(ReSTDomain)
return {
diff --git a/sphinx/domains/std.py b/sphinx/domains/std.py
index 7f06acd1e..12a29de7d 100644
--- a/sphinx/domains/std.py
+++ b/sphinx/domains/std.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.domains.std
~~~~~~~~~~~~~~~~~~
@@ -13,11 +12,11 @@ import re
import unicodedata
import warnings
from copy import copy
+from typing import cast
from docutils import nodes
from docutils.parsers.rst import directives
-from docutils.statemachine import ViewList
-from six import iteritems
+from docutils.statemachine import StringList
from sphinx import addnodes
from sphinx.deprecation import RemovedInSphinx30Warning
@@ -31,7 +30,7 @@ from sphinx.util.nodes import clean_astext, make_refnode
if False:
# For type annotation
- from typing import Any, Callable, Dict, Iterator, List, Tuple, Type, Union # NOQA
+ from typing import Any, Callable, Dict, Iterable, Iterator, List, Tuple, Type, Union # NOQA
from docutils.parsers.rst import Directive # NOQA
from sphinx.application import Sphinx # NOQA
from sphinx.builders import Builder # NOQA
@@ -51,11 +50,11 @@ class GenericObject(ObjectDescription):
"""
A generic x-ref directive registered with Sphinx.add_object_type().
"""
- indextemplate = '' # type: unicode
- parse_node = None # type: Callable[[GenericObject, BuildEnvironment, unicode, addnodes.desc_signature], unicode] # NOQA
+ indextemplate = ''
+ parse_node = None # type: Callable[[GenericObject, BuildEnvironment, str, addnodes.desc_signature], str] # NOQA
def handle_signature(self, sig, signode):
- # type: (unicode, addnodes.desc_signature) -> unicode
+ # type: (str, addnodes.desc_signature) -> str
if self.parse_node:
name = self.parse_node(self.env, sig, signode)
else:
@@ -66,7 +65,7 @@ class GenericObject(ObjectDescription):
return name
def add_target_and_index(self, name, sig, signode):
- # type: (unicode, unicode, addnodes.desc_signature) -> None
+ # type: (str, str, addnodes.desc_signature) -> None
targetname = '%s-%s' % (self.objtype, name)
signode['ids'].append(targetname)
self.state.document.note_explicit_target(signode)
@@ -80,8 +79,9 @@ class GenericObject(ObjectDescription):
indexentry = self.indextemplate % (name,)
self.indexnode['entries'].append((indextype, indexentry,
targetname, '', None))
- self.env.domaindata['std']['objects'][self.objtype, name] = \
- self.env.docname, targetname
+
+ std = cast(StandardDomain, self.env.get_domain('std'))
+ std.add_object(self.objtype, name, self.env.docname, targetname)
class EnvVar(GenericObject):
@@ -94,7 +94,7 @@ class EnvVarXRefRole(XRefRole):
"""
def result_nodes(self, document, env, node, is_ref):
- # type: (nodes.Node, BuildEnvironment, nodes.Node, bool) -> Tuple[List[nodes.Node], List[nodes.Node]] # NOQA
+ # type: (nodes.document, BuildEnvironment, nodes.Element, bool) -> Tuple[List[nodes.Node], List[nodes.system_message]] # NOQA
if not is_ref:
return [node], []
varname = node['reftarget']
@@ -128,7 +128,7 @@ class Target(SphinxDirective):
targetname = '%s-%s' % (self.name, fullname)
node = nodes.target('', '', ids=[targetname])
self.state.document.note_explicit_target(node)
- ret = [node]
+ ret = [node] # type: List[nodes.Node]
if self.indextemplate:
indexentry = self.indextemplate % (fullname,)
indextype = 'single'
@@ -142,8 +142,10 @@ class Target(SphinxDirective):
name = self.name
if ':' in self.name:
_, name = self.name.split(':', 1)
- self.env.domaindata['std']['objects'][name, fullname] = \
- self.env.docname, targetname
+
+ std = cast(StandardDomain, self.env.get_domain('std'))
+ std.add_object(name, fullname, self.env.docname, targetname)
+
return ret
@@ -153,10 +155,10 @@ class Cmdoption(ObjectDescription):
"""
def handle_signature(self, sig, signode):
- # type: (unicode, addnodes.desc_signature) -> unicode
+ # type: (str, addnodes.desc_signature) -> str
"""Transform an option description into RST nodes."""
count = 0
- firstname = '' # type: unicode
+ firstname = ''
for potential_option in sig.split(', '):
potential_option = potential_option.strip()
m = option_desc_re.match(potential_option)
@@ -182,7 +184,7 @@ class Cmdoption(ObjectDescription):
return firstname
def add_target_and_index(self, firstname, sig, signode):
- # type: (unicode, unicode, addnodes.desc_signature) -> None
+ # type: (str, str, addnodes.desc_signature) -> None
currprogram = self.env.ref_context.get('std:program')
for optname in signode.get('allnames', []):
targetname = optname.replace('/', '-')
@@ -193,10 +195,12 @@ class Cmdoption(ObjectDescription):
targetname = 'cmdoption' + targetname
signode['names'].append(targetname)
+ domain = cast(StandardDomain, self.env.get_domain('std'))
self.state.document.note_explicit_target(signode)
for optname in signode.get('allnames', []):
- self.env.domaindata['std']['progoptions'][currprogram, optname] = \
- self.env.docname, signode['ids'][0]
+ domain.add_program_option(currprogram, optname,
+ self.env.docname, signode['ids'][0])
+
# create only one index entry for the whole option
if optname == firstname:
self.indexnode['entries'].append(
@@ -228,20 +232,20 @@ class Program(SphinxDirective):
class OptionXRefRole(XRefRole):
def process_link(self, env, refnode, has_explicit_title, title, target):
- # type: (BuildEnvironment, nodes.Node, bool, unicode, unicode) -> Tuple[unicode, unicode] # NOQA
+ # type: (BuildEnvironment, nodes.Element, bool, str, str) -> Tuple[str, str]
refnode['std:program'] = env.ref_context.get('std:program')
return title, target
def split_term_classifiers(line):
- # type: (unicode) -> List[Union[unicode, None]]
+ # type: (str) -> List[Union[str, None]]
# split line into a term and classifiers. if no classifier, None is used..
parts = re.split(' +: +', line) + [None]
return parts
def make_glossary_term(env, textnodes, index_key, source, lineno, new_id=None):
- # type: (BuildEnvironment, List[nodes.Node], unicode, unicode, int, unicode) -> nodes.term
+ # type: (BuildEnvironment, Iterable[nodes.Node], str, str, int, str) -> nodes.term
# get a text-only representation of the term and register it
# as a cross-reference target
term = nodes.term('', '', *textnodes)
@@ -249,15 +253,15 @@ def make_glossary_term(env, textnodes, index_key, source, lineno, new_id=None):
term.line = lineno
gloss_entries = env.temp_data.setdefault('gloss_entries', set())
- objects = env.domaindata['std']['objects']
-
termtext = term.astext()
if new_id is None:
new_id = nodes.make_id('term-' + termtext)
if new_id in gloss_entries:
new_id = 'term-' + str(len(gloss_entries))
gloss_entries.add(new_id)
- objects['term', termtext.lower()] = env.docname, new_id
+
+ std = cast(StandardDomain, env.get_domain('std'))
+ std.add_object('term', termtext.lower(), env.docname, new_id)
# add an index entry too
indexnode = addnodes.index()
@@ -295,10 +299,10 @@ class Glossary(SphinxDirective):
# be* a definition list.
# first, collect single entries
- entries = [] # type: List[Tuple[List[Tuple[unicode, unicode, int]], ViewList]]
+ entries = [] # type: List[Tuple[List[Tuple[str, str, int]], StringList]]
in_definition = True
was_empty = True
- messages = []
+ messages = [] # type: List[nodes.Node]
for line, (source, lineno) in zip(self.content, self.content.items):
# empty line -> add to last definition
if not line:
@@ -317,7 +321,7 @@ class Glossary(SphinxDirective):
messages.append(self.state.reporter.system_message(
2, 'glossary term must be preceded by empty line',
source=source, line=lineno))
- entries.append(([(line, source, lineno)], ViewList()))
+ entries.append(([(line, source, lineno)], StringList()))
in_definition = False
# second term and following
else:
@@ -347,9 +351,9 @@ class Glossary(SphinxDirective):
# now, parse all the entries into a big definition list
items = []
for terms, definition in entries:
- termtexts = []
- termnodes = []
- system_messages = [] # type: List[unicode]
+ termtexts = [] # type: List[str]
+ termnodes = [] # type: List[nodes.Node]
+ system_messages = [] # type: List[nodes.Node]
for line, source, lineno in terms:
parts = split_term_classifiers(line)
# parse the term with inline markup
@@ -385,8 +389,8 @@ class Glossary(SphinxDirective):
def token_xrefs(text):
- # type: (unicode) -> List[nodes.Node]
- retnodes = []
+ # type: (str) -> List[nodes.Node]
+ retnodes = [] # type: List[nodes.Node]
pos = 0
for m in token_re.finditer(text):
if m.start() > pos:
@@ -415,9 +419,8 @@ class ProductionList(SphinxDirective):
def run(self):
# type: () -> List[nodes.Node]
- objects = self.env.domaindata['std']['objects']
- node = addnodes.productionlist()
- messages = [] # type: List[nodes.Node]
+ domain = cast(StandardDomain, self.env.get_domain('std'))
+ node = addnodes.productionlist() # type: nodes.Element
i = 0
for rule in self.arguments[0].split('\n'):
@@ -436,10 +439,10 @@ class ProductionList(SphinxDirective):
if idname not in self.state.document.ids:
subnode['ids'].append(idname)
self.state.document.note_implicit_target(subnode, subnode)
- objects['token', subnode['tokenname']] = self.env.docname, idname
+ domain.add_object('token', subnode['tokenname'], self.env.docname, idname)
subnode.extend(token_xrefs(tokens))
node.append(subnode)
- return [node] + messages
+ return [node]
class StandardDomain(Domain):
@@ -459,7 +462,7 @@ class StandardDomain(Domain):
'envvar': ObjType(_('environment variable'), 'envvar'),
'cmdoption': ObjType(_('program option'), 'option'),
'doc': ObjType(_('document'), 'doc', searchprio=-1)
- } # type: Dict[unicode, ObjType]
+ } # type: Dict[str, ObjType]
directives = {
'program': Program,
@@ -468,7 +471,7 @@ class StandardDomain(Domain):
'envvar': EnvVar,
'glossary': Glossary,
'productionlist': ProductionList,
- } # type: Dict[unicode, Type[Directive]]
+ } # type: Dict[str, Type[Directive]]
roles = {
'option': OptionXRefRole(warn_dangling=True),
'envvar': EnvVarXRefRole(),
@@ -487,7 +490,7 @@ class StandardDomain(Domain):
'keyword': XRefRole(warn_dangling=True),
# links to documents
'doc': XRefRole(warn_dangling=True, innernodeclass=nodes.inline),
- } # type: Dict[unicode, Union[RoleFunction, XRefRole]]
+ } # type: Dict[str, Union[RoleFunction, XRefRole]]
initial_data = {
'progoptions': {}, # (program, name) -> docname, labelid
@@ -521,19 +524,19 @@ class StandardDomain(Domain):
nodes.figure: ('figure', None),
nodes.table: ('table', None),
nodes.container: ('code-block', None),
- } # type: Dict[nodes.Node, Tuple[unicode, Callable]]
+ } # type: Dict[Type[nodes.Node], Tuple[str, Callable]]
def __init__(self, env):
# type: (BuildEnvironment) -> None
- super(StandardDomain, self).__init__(env)
+ super().__init__(env)
# set up enumerable nodes
self.enumerable_nodes = copy(self.enumerable_nodes) # create a copy for this instance
- for node, settings in iteritems(env.app.registry.enumerable_nodes):
+ for node, settings in env.app.registry.enumerable_nodes.items():
self.enumerable_nodes[node] = settings
def clear_doc(self, docname):
- # type: (unicode) -> None
+ # type: (str) -> None
for key, (fn, _l) in list(self.data['progoptions'].items()):
if fn == docname:
del self.data['progoptions'][key]
@@ -556,7 +559,7 @@ class StandardDomain(Domain):
del self.data['anonlabels'][key]
def merge_domaindata(self, docnames, otherdata):
- # type: (List[unicode], Dict) -> None
+ # type: (List[str], Dict) -> None
# XXX duplicates?
for key, data in otherdata['progoptions'].items():
if data[0] in docnames:
@@ -580,16 +583,16 @@ class StandardDomain(Domain):
self.data['anonlabels'][key] = data
def process_doc(self, env, docname, document):
- # type: (BuildEnvironment, unicode, nodes.Node) -> None
+ # type: (BuildEnvironment, str, nodes.document) -> None
self.note_citations(env, docname, document)
self.note_citation_refs(env, docname, document)
self.note_labels(env, docname, document)
def note_citations(self, env, docname, document):
- # type: (BuildEnvironment, unicode, nodes.Node) -> None
+ # type: (BuildEnvironment, str, nodes.document) -> None
for node in document.traverse(nodes.citation):
node['docname'] = docname
- label = node[0].astext()
+ label = cast(nodes.label, node[0]).astext()
if label in self.data['citations']:
path = env.doc2path(self.data['citations'][label][0])
logger.warning(__('duplicate citation %s, other instance in %s'), label, path,
@@ -597,7 +600,7 @@ class StandardDomain(Domain):
self.data['citations'][label] = (docname, node['ids'][0], node.line)
def note_citation_refs(self, env, docname, document):
- # type: (BuildEnvironment, unicode, nodes.Node) -> None
+ # type: (BuildEnvironment, str, nodes.document) -> None
for node in document.traverse(addnodes.pending_xref):
if node['refdomain'] == 'std' and node['reftype'] == 'citation':
label = node['reftarget']
@@ -605,16 +608,17 @@ class StandardDomain(Domain):
citation_refs.append(docname)
def note_labels(self, env, docname, document):
- # type: (BuildEnvironment, unicode, nodes.Node) -> None
+ # type: (BuildEnvironment, str, nodes.document) -> None
labels, anonlabels = self.data['labels'], self.data['anonlabels']
- for name, explicit in iteritems(document.nametypes):
+ for name, explicit in document.nametypes.items():
if not explicit:
continue
labelid = document.nameids[name]
if labelid is None:
continue
node = document.ids[labelid]
- if node.tagname == 'target' and 'refid' in node: # indirect hyperlink targets
+ if isinstance(node, nodes.target) and 'refid' in node:
+ # indirect hyperlink targets
node = document.ids.get(node['refid'])
labelid = node['names'][0]
if (node.tagname == 'footnote' or
@@ -629,7 +633,8 @@ class StandardDomain(Domain):
location=node)
anonlabels[name] = docname, labelid
if node.tagname in ('section', 'rubric'):
- sectname = clean_astext(node[0]) # node[0] == title node
+ title = cast(nodes.title, node[0])
+ sectname = clean_astext(title)
elif self.is_enumerable_node(node):
sectname = self.get_numfig_title(node)
if not sectname:
@@ -645,9 +650,17 @@ class StandardDomain(Domain):
continue
labels[name] = docname, labelid, sectname
+ def add_object(self, objtype, name, docname, labelid):
+ # type: (str, str, str, str) -> None
+ self.data['objects'][objtype, name] = (docname, labelid)
+
+ def add_program_option(self, program, name, docname, labelid):
+ # type: (str, str, str, str) -> None
+ self.data['progoptions'][program, name] = (docname, labelid)
+
def check_consistency(self):
# type: () -> None
- for name, (docname, labelid, lineno) in iteritems(self.data['citations']):
+ for name, (docname, labelid, lineno) in self.data['citations'].items():
if name not in self.data['citation_refs']:
logger.warning(__('Citation [%s] is not referenced.'), name,
type='ref', subtype='citation',
@@ -655,7 +668,7 @@ class StandardDomain(Domain):
def build_reference_node(self, fromdocname, builder, docname, labelid,
sectname, rolename, **options):
- # type: (unicode, Builder, unicode, unicode, unicode, unicode, Any) -> nodes.Node
+ # type: (str, Builder, str, str, str, str, Any) -> nodes.Element
nodeclass = options.pop('nodeclass', nodes.reference)
newnode = nodeclass('', '', internal=True, **options)
innernode = nodes.inline(sectname, sectname)
@@ -679,7 +692,7 @@ class StandardDomain(Domain):
return newnode
def resolve_xref(self, env, fromdocname, builder, typ, target, node, contnode):
- # type: (BuildEnvironment, unicode, Builder, unicode, unicode, nodes.Node, nodes.Node) -> nodes.Node # NOQA
+ # type: (BuildEnvironment, str, Builder, str, str, addnodes.pending_xref, nodes.Element) -> nodes.Element # NOQA
if typ == 'ref':
resolver = self._resolve_ref_xref
elif typ == 'numref':
@@ -698,7 +711,7 @@ class StandardDomain(Domain):
return resolver(env, fromdocname, builder, typ, target, node, contnode)
def _resolve_ref_xref(self, env, fromdocname, builder, typ, target, node, contnode):
- # type: (BuildEnvironment, unicode, Builder, unicode, unicode, nodes.Node, nodes.Node) -> nodes.Node # NOQA
+ # type: (BuildEnvironment, str, Builder, str, str, addnodes.pending_xref, nodes.Element) -> nodes.Element # NOQA
if node['refexplicit']:
# reference to anonymous label; the reference uses
# the supplied link caption
@@ -716,7 +729,7 @@ class StandardDomain(Domain):
docname, labelid, sectname, 'ref')
def _resolve_numref_xref(self, env, fromdocname, builder, typ, target, node, contnode):
- # type: (BuildEnvironment, unicode, Builder, unicode, unicode, nodes.Node, nodes.Node) -> nodes.Node # NOQA
+ # type: (BuildEnvironment, str, Builder, str, str, addnodes.pending_xref, nodes.Element) -> nodes.Element # NOQA
if target in self.data['labels']:
docname, labelid, figname = self.data['labels'].get(target, ('', '', ''))
else:
@@ -777,7 +790,7 @@ class StandardDomain(Domain):
title=title)
def _resolve_keyword_xref(self, env, fromdocname, builder, typ, target, node, contnode):
- # type: (BuildEnvironment, unicode, Builder, unicode, unicode, nodes.Node, nodes.Node) -> nodes.Node # NOQA
+ # type: (BuildEnvironment, str, Builder, str, str, addnodes.pending_xref, nodes.Element) -> nodes.Element # NOQA
# keywords are oddballs: they are referenced by named labels
docname, labelid, _ = self.data['labels'].get(target, ('', '', ''))
if not docname:
@@ -786,7 +799,7 @@ class StandardDomain(Domain):
labelid, contnode)
def _resolve_doc_xref(self, env, fromdocname, builder, typ, target, node, contnode):
- # type: (BuildEnvironment, unicode, Builder, unicode, unicode, nodes.Node, nodes.Node) -> nodes.Node # NOQA
+ # type: (BuildEnvironment, str, Builder, str, str, addnodes.pending_xref, nodes.Element) -> nodes.Element # NOQA
# directly reference to document by source name; can be absolute or relative
refdoc = node.get('refdoc', fromdocname)
docname = docname_join(refdoc, node['reftarget'])
@@ -802,7 +815,7 @@ class StandardDomain(Domain):
return make_refnode(builder, fromdocname, docname, None, innernode)
def _resolve_option_xref(self, env, fromdocname, builder, typ, target, node, contnode):
- # type: (BuildEnvironment, unicode, Builder, unicode, unicode, nodes.Node, nodes.Node) -> nodes.Node # NOQA
+ # type: (BuildEnvironment, str, Builder, str, str, addnodes.pending_xref, nodes.Element) -> nodes.Element # NOQA
progname = node.get('std:program')
target = target.strip()
docname, labelid = self.data['progoptions'].get((progname, target), ('', ''))
@@ -824,7 +837,7 @@ class StandardDomain(Domain):
labelid, contnode)
def _resolve_citation_xref(self, env, fromdocname, builder, typ, target, node, contnode):
- # type: (BuildEnvironment, unicode, Builder, unicode, unicode, nodes.Node, nodes.Node) -> nodes.Node # NOQA
+ # type: (BuildEnvironment, str, Builder, str, str, addnodes.pending_xref, nodes.Element) -> nodes.Element # NOQA
from sphinx.environment import NoUri
docname, labelid, lineno = self.data['citations'].get(target, ('', '', 0))
@@ -847,7 +860,7 @@ class StandardDomain(Domain):
raise
def _resolve_obj_xref(self, env, fromdocname, builder, typ, target, node, contnode):
- # type: (BuildEnvironment, unicode, Builder, unicode, unicode, nodes.Node, nodes.Node) -> nodes.Node # NOQA
+ # type: (BuildEnvironment, str, Builder, str, str, addnodes.pending_xref, nodes.Element) -> nodes.Element # NOQA
objtypes = self.objtypes_for_role(typ) or []
for objtype in objtypes:
if (objtype, target) in self.data['objects']:
@@ -861,8 +874,8 @@ class StandardDomain(Domain):
labelid, contnode)
def resolve_any_xref(self, env, fromdocname, builder, target, node, contnode):
- # type: (BuildEnvironment, unicode, Builder, unicode, nodes.Node, nodes.Node) -> List[Tuple[unicode, nodes.Node]] # NOQA
- results = [] # type: List[Tuple[unicode, nodes.Node]]
+ # type: (BuildEnvironment, str, Builder, str, addnodes.pending_xref, nodes.Element) -> List[Tuple[str, nodes.Element]] # NOQA
+ results = [] # type: List[Tuple[str, nodes.Element]]
ltarget = target.lower() # :ref: lowercases its target automatically
for role in ('ref', 'option'): # do not try "keyword"
res = self.resolve_xref(env, fromdocname, builder, role,
@@ -883,29 +896,29 @@ class StandardDomain(Domain):
return results
def get_objects(self):
- # type: () -> Iterator[Tuple[unicode, unicode, unicode, unicode, unicode, int]]
+ # type: () -> Iterator[Tuple[str, str, str, str, str, int]]
# handle the special 'doc' reference here
for doc in self.env.all_docs:
yield (doc, clean_astext(self.env.titles[doc]), 'doc', doc, '', -1)
- for (prog, option), info in iteritems(self.data['progoptions']):
+ for (prog, option), info in self.data['progoptions'].items():
if prog:
fullname = ".".join([prog, option])
yield (fullname, fullname, 'cmdoption', info[0], info[1], 1)
else:
yield (option, option, 'cmdoption', info[0], info[1], 1)
- for (type, name), info in iteritems(self.data['objects']):
+ for (type, name), info in self.data['objects'].items():
yield (name, name, type, info[0], info[1],
self.object_types[type].attrs['searchprio'])
- for name, info in iteritems(self.data['labels']):
+ for name, info in self.data['labels'].items():
yield (name, info[2], 'label', info[0], info[1], -1)
# add anonymous-only labels as well
non_anon_labels = set(self.data['labels'])
- for name, info in iteritems(self.data['anonlabels']):
+ for name, info in self.data['anonlabels'].items():
if name not in non_anon_labels:
yield (name, name, 'label', info[0], info[1], -1)
def get_type_name(self, type, primary=False):
- # type: (ObjType, bool) -> unicode
+ # type: (ObjType, bool) -> str
# never prepend "Default"
return type.lname
@@ -914,7 +927,7 @@ class StandardDomain(Domain):
return node.__class__ in self.enumerable_nodes
def get_numfig_title(self, node):
- # type: (nodes.Node) -> unicode
+ # type: (nodes.Node) -> str
"""Get the title of enumerable nodes to refer them using its title"""
if self.is_enumerable_node(node):
_, title_getter = self.enumerable_nodes.get(node.__class__, (None, None))
@@ -928,10 +941,10 @@ class StandardDomain(Domain):
return None
def get_enumerable_node_type(self, node):
- # type: (nodes.Node) -> unicode
+ # type: (nodes.Node) -> str
"""Get type of enumerable nodes."""
def has_child(node, cls):
- # type: (nodes.Node, Type) -> bool
+ # type: (nodes.Element, Type) -> bool
return any(isinstance(child, cls) for child in node)
if isinstance(node, nodes.section):
@@ -946,7 +959,7 @@ class StandardDomain(Domain):
return figtype
def get_figtype(self, node):
- # type: (nodes.Node) -> unicode
+ # type: (nodes.Node) -> str
"""Get figure type of nodes.
.. deprecated:: 1.8
@@ -957,7 +970,7 @@ class StandardDomain(Domain):
return self.get_enumerable_node_type(node)
def get_fignumber(self, env, builder, figtype, docname, target_node):
- # type: (BuildEnvironment, Builder, unicode, unicode, nodes.Node) -> Tuple[int, ...]
+ # type: (BuildEnvironment, Builder, str, str, nodes.Element) -> Tuple[int, ...]
if figtype == 'section':
if builder.name == 'latex':
return tuple()
@@ -980,7 +993,7 @@ class StandardDomain(Domain):
raise ValueError
def get_full_qualified_name(self, node):
- # type: (nodes.Node) -> unicode
+ # type: (nodes.Element) -> str
if node.get('reftype') == 'option':
progname = node.get('std:program')
command = ws_re.split(node.get('reftarget'))
@@ -996,7 +1009,7 @@ class StandardDomain(Domain):
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
app.add_domain(StandardDomain)
return {
diff --git a/sphinx/environment/__init__.py b/sphinx/environment/__init__.py
index 058cbc8e9..d31624485 100644
--- a/sphinx/environment/__init__.py
+++ b/sphinx/environment/__init__.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.environment
~~~~~~~~~~~~~~~~~~
@@ -10,31 +9,25 @@
"""
import os
-import re
+import pickle
import sys
import warnings
from collections import defaultdict
from copy import copy
+from io import BytesIO
from os import path
-from docutils.utils import get_source_line
-from six import BytesIO, next
-from six.moves import cPickle as pickle
-
from sphinx import addnodes
-from sphinx.deprecation import RemovedInSphinx20Warning, RemovedInSphinx30Warning
-from sphinx.environment.adapters.indexentries import IndexEntries
+from sphinx.deprecation import RemovedInSphinx30Warning, RemovedInSphinx40Warning
from sphinx.environment.adapters.toctree import TocTree
from sphinx.errors import SphinxError, BuildEnvironmentError, DocumentError, ExtensionError
from sphinx.locale import __
from sphinx.transforms import SphinxTransformer
-from sphinx.util import get_matching_docs, DownloadFiles, FilenameUniqDict
+from sphinx.util import DownloadFiles, FilenameUniqDict
from sphinx.util import logging
from sphinx.util.docutils import LoggingReporter
from sphinx.util.i18n import find_catalog_files
-from sphinx.util.matching import compile_matchers
from sphinx.util.nodes import is_translatable
-from sphinx.util.osutil import SEP, relpath
from sphinx.util.websupport import is_commentable
if False:
@@ -45,6 +38,7 @@ if False:
from sphinx.builders import Builder # NOQA
from sphinx.config import Config # NOQA
from sphinx.domains import Domain # NOQA
+ from sphinx.project import Project # NOQA
logger = logging.getLogger(__name__)
@@ -61,13 +55,11 @@ default_settings = {
'halt_level': 5,
'file_insertion_enabled': True,
'smartquotes_locales': [],
-}
+} # type: Dict[str, Any]
# This is increased every time an environment attribute is added
# or changed to properly invalidate pickle files.
-#
-# NOTE: increase base version by 2 to have distinct numbers for Py2 and 3
-ENV_VERSION = 54 + (sys.version_info[0] - 2)
+ENV_VERSION = 56
# config status
CONFIG_OK = 1
@@ -86,7 +78,7 @@ versioning_conditions = {
'none': False,
'text': is_translatable,
'commentable': is_commentable,
-} # type: Dict[unicode, Union[bool, Callable]]
+}
class NoUri(Exception):
@@ -94,25 +86,26 @@ class NoUri(Exception):
pass
-class BuildEnvironment(object):
+class BuildEnvironment:
"""
The environment in which the ReST files are translated.
Stores an inventory of cross-file targets and provides doctree
transformations to resolve links to them.
"""
- domains = None # type: Dict[unicode, Domain]
+ domains = None # type: Dict[str, Domain]
# --------- ENVIRONMENT INITIALIZATION -------------------------------------
def __init__(self, app=None):
# type: (Sphinx) -> None
self.app = None # type: Sphinx
- self.doctreedir = None # type: unicode
- self.srcdir = None # type: unicode
+ self.doctreedir = None # type: str
+ self.srcdir = None # type: str
self.config = None # type: Config
self.config_status = None # type: int
- self.version = None # type: Dict[unicode, unicode]
+ self.project = None # type: Project
+ self.version = None # type: Dict[str, str]
# the method of doctree versioning; see set_versioning_method
self.versioning_condition = None # type: Union[bool, Callable]
@@ -125,68 +118,63 @@ class BuildEnvironment(object):
self.settings = default_settings.copy()
self.settings['env'] = self
- # the function to write warning messages with
- self._warnfunc = None # type: Callable
-
# All "docnames" here are /-separated and relative and exclude
# the source suffix.
- self.found_docs = set() # type: Set[unicode]
- # contains all existing docnames
- self.all_docs = {} # type: Dict[unicode, float]
+ self.all_docs = {} # type: Dict[str, float]
# docname -> mtime at the time of reading
# contains all read docnames
- self.dependencies = defaultdict(set) # type: Dict[unicode, Set[unicode]]
+ self.dependencies = defaultdict(set) # type: Dict[str, Set[str]]
# docname -> set of dependent file
# names, relative to documentation root
- self.included = defaultdict(set) # type: Dict[unicode, Set[unicode]]
+ self.included = defaultdict(set) # type: Dict[str, Set[str]]
# docname -> set of included file
# docnames included from other documents
- self.reread_always = set() # type: Set[unicode]
+ self.reread_always = set() # type: Set[str]
# docnames to re-read unconditionally on
# next build
# File metadata
- self.metadata = defaultdict(dict) # type: Dict[unicode, Dict[unicode, Any]]
+ self.metadata = defaultdict(dict) # type: Dict[str, Dict[str, Any]]
# docname -> dict of metadata items
# TOC inventory
- self.titles = {} # type: Dict[unicode, nodes.Node]
+ self.titles = {} # type: Dict[str, nodes.title]
# docname -> title node
- self.longtitles = {} # type: Dict[unicode, nodes.Node]
+ self.longtitles = {} # type: Dict[str, nodes.title]
# docname -> title node; only different if
# set differently with title directive
- self.tocs = {} # type: Dict[unicode, nodes.Node]
+ self.tocs = {} # type: Dict[str, nodes.bullet_list]
# docname -> table of contents nodetree
- self.toc_num_entries = {} # type: Dict[unicode, int]
+ self.toc_num_entries = {} # type: Dict[str, int]
# docname -> number of real entries
# used to determine when to show the TOC
# in a sidebar (don't show if it's only one item)
- self.toc_secnumbers = {} # type: Dict[unicode, Dict[unicode, Tuple[int, ...]]]
+ self.toc_secnumbers = {} # type: Dict[str, Dict[str, Tuple[int, ...]]]
# docname -> dict of sectionid -> number
- self.toc_fignumbers = {} # type: Dict[unicode, Dict[unicode, Dict[unicode, Tuple[int, ...]]]] # NOQA
+ self.toc_fignumbers = {} # type: Dict[str, Dict[str, Dict[str, Tuple[int, ...]]]]
# docname -> dict of figtype ->
# dict of figureid -> number
- self.toctree_includes = {} # type: Dict[unicode, List[unicode]]
+ self.toctree_includes = {} # type: Dict[str, List[str]]
# docname -> list of toctree includefiles
- self.files_to_rebuild = {} # type: Dict[unicode, Set[unicode]]
+ self.files_to_rebuild = {} # type: Dict[str, Set[str]]
# docname -> set of files
# (containing its TOCs) to rebuild too
- self.glob_toctrees = set() # type: Set[unicode]
+ self.glob_toctrees = set() # type: Set[str]
# docnames that have :glob: toctrees
- self.numbered_toctrees = set() # type: Set[unicode]
+ self.numbered_toctrees = set() # type: Set[str]
# docnames that have :numbered: toctrees
# domain-specific inventories, here to be pickled
- self.domaindata = {} # type: Dict[unicode, Dict]
+ self.domaindata = {} # type: Dict[str, Dict]
# domainname -> domain-specific dict
# Other inventories
- self.indexentries = {} # type: Dict[unicode, List[Tuple[unicode, unicode, unicode, unicode, unicode]]] # NOQA
+ self.indexentries = {} # type: Dict[str, List[Tuple[str, str, str, str, str]]]
# docname -> list of
- # (type, unicode, target, aliasname)
+ # (type, str, target, aliasname)
# these map absolute path -> (docnames, unique filename)
self.images = FilenameUniqDict() # type: FilenameUniqDict
@@ -194,14 +182,14 @@ class BuildEnvironment(object):
# filename -> (set of docnames, destination)
# the original URI for images
- self.original_image_uri = {} # type: Dict[unicode, unicode]
+ self.original_image_uri = {} # type: Dict[str, str]
# temporary data storage while reading a document
- self.temp_data = {} # type: Dict[unicode, Any]
+ self.temp_data = {} # type: Dict[str, Any]
# context for cross-references (e.g. current module or class)
# this is similar to temp_data, but will for example be copied to
# attributes of "any" cross references
- self.ref_context = {} # type: Dict[unicode, Any]
+ self.ref_context = {} # type: Dict[str, Any]
# set up environment
if app:
@@ -226,9 +214,13 @@ class BuildEnvironment(object):
elif self.srcdir and self.srcdir != app.srcdir:
raise BuildEnvironmentError(__('source directory has changed'))
+ if self.project:
+ app.project.restore(self.project)
+
self.app = app
self.doctreedir = app.doctreedir
self.srcdir = app.srcdir
+ self.project = app.project
self.version = app.registry.get_envversion(app)
# initialize domains
@@ -273,13 +265,8 @@ class BuildEnvironment(object):
# Allow to disable by 3rd party extension (workaround)
self.settings.setdefault('smart_quotes', True)
- def set_warnfunc(self, func):
- # type: (Callable) -> None
- warnings.warn('env.set_warnfunc() is now deprecated. Use sphinx.util.logging instead.',
- RemovedInSphinx20Warning, stacklevel=2)
-
def set_versioning_method(self, method, compare):
- # type: (unicode, bool) -> None
+ # type: (str, bool) -> None
"""This sets the doctree versioning method for this environment.
Versioning methods are a builder property; only builders with the same
@@ -297,23 +284,8 @@ class BuildEnvironment(object):
self.versioning_condition = condition
self.versioning_compare = compare
- def warn(self, docname, msg, lineno=None, **kwargs):
- # type: (unicode, unicode, int, Any) -> None
- """Emit a warning.
-
- This differs from using ``app.warn()`` in that the warning may not
- be emitted instantly, but collected for emitting all warnings after
- the update of the environment.
- """
- self.app.warn(msg, location=(docname, lineno), **kwargs) # type: ignore
-
- def warn_node(self, msg, node, **kwargs):
- # type: (unicode, nodes.Node, Any) -> None
- """Like :meth:`warn`, but with source information taken from *node*."""
- self._warnfunc(msg, '%s:%s' % get_source_line(node), **kwargs)
-
def clear_doc(self, docname):
- # type: (unicode) -> None
+ # type: (str) -> None
"""Remove all traces of a source file in the inventory."""
if docname in self.all_docs:
self.all_docs.pop(docname, None)
@@ -324,7 +296,7 @@ class BuildEnvironment(object):
domain.clear_doc(docname)
def merge_info_from(self, docnames, other, app):
- # type: (List[unicode], BuildEnvironment, Sphinx) -> None
+ # type: (List[str], BuildEnvironment, Sphinx) -> None
"""Merge global information gathered about *docnames* while reading them
from the *other* environment.
@@ -346,20 +318,15 @@ class BuildEnvironment(object):
app.emit('env-merge-info', self, docnames, other)
def path2doc(self, filename):
- # type: (unicode) -> Optional[unicode]
+ # type: (str) -> Optional[str]
"""Return the docname for the filename if the file is document.
*filename* should be absolute or relative to the source directory.
"""
- if filename.startswith(self.srcdir):
- filename = relpath(filename, self.srcdir)
- for suffix in self.config.source_suffix:
- if filename.endswith(suffix):
- return filename[:-len(suffix)]
- return None
+ return self.project.path2doc(filename)
def doc2path(self, docname, base=True, suffix=None):
- # type: (unicode, Union[bool, unicode], unicode) -> unicode
+ # type: (str, Union[bool, str], str) -> str
"""Return the filename for the document name.
If *base* is True, return absolute path under self.srcdir.
@@ -367,24 +334,23 @@ class BuildEnvironment(object):
If *base* is a path string, return absolute path under that.
If *suffix* is not None, add it instead of config.source_suffix.
"""
- docname = docname.replace(SEP, path.sep)
- if suffix is None:
- # Use first candidate if there is not a file for any suffix
- suffix = next(iter(self.config.source_suffix))
- for candidate_suffix in self.config.source_suffix:
- if path.isfile(path.join(self.srcdir, docname) +
- candidate_suffix):
- suffix = candidate_suffix
- break
- if base is True:
- return path.join(self.srcdir, docname) + suffix
- elif base is None:
- return docname + suffix
- else:
- return path.join(base, docname) + suffix # type: ignore
+ if suffix:
+ warnings.warn('The suffix argument for doc2path() is deprecated.',
+ RemovedInSphinx40Warning)
+ if base not in (True, None):
+ warnings.warn('The string style base argument for doc2path() is deprecated.',
+ RemovedInSphinx40Warning)
+
+ pathname = self.project.doc2path(docname, base is True)
+ if suffix:
+ filename, _ = path.splitext(pathname)
+ pathname = filename + suffix
+ if base and base is not True:
+ pathname = path.join(base, pathname) # type: ignore
+ return pathname
def relfn2path(self, filename, docname=None):
- # type: (unicode, unicode) -> Tuple[unicode, unicode]
+ # type: (str, str) -> Tuple[str, str]
"""Return paths to a file referenced from a document, relative to
documentation root and absolute.
@@ -406,7 +372,13 @@ class BuildEnvironment(object):
# the source directory is a bytestring with non-ASCII characters;
# let's try to encode the rel_fn in the file system encoding
enc_rel_fn = rel_fn.encode(sys.getfilesystemencoding())
- return rel_fn, path.abspath(path.join(self.srcdir, enc_rel_fn))
+ return rel_fn, path.abspath(path.join(self.srcdir, enc_rel_fn)) # type: ignore
+
+ @property
+ def found_docs(self):
+ # type: () -> Set[str]
+ """contains all existing docnames."""
+ return self.project.docnames
def find_files(self, config, builder):
# type: (Config, Builder) -> None
@@ -414,19 +386,10 @@ class BuildEnvironment(object):
self.found_docs.
"""
try:
- matchers = compile_matchers(
- config.exclude_patterns[:] +
- config.templates_path +
- builder.get_asset_paths() +
- ['**/_sources', '.#*', '**/.#*', '*.lproj/**']
- )
- self.found_docs = set()
- for docname in get_matching_docs(self.srcdir, config.source_suffix, # type: ignore
- exclude_matchers=matchers):
- if os.access(self.doc2path(docname), os.R_OK):
- self.found_docs.add(docname)
- else:
- logger.warning(__("document not readable. Ignored."), location=docname)
+ exclude_paths = (self.config.exclude_patterns +
+ self.config.templates_path +
+ builder.get_asset_paths())
+ self.project.discover(exclude_paths)
# Current implementation is applying translated messages in the reading
# phase.Therefore, in order to apply the updated message catalog, it is
@@ -445,17 +408,17 @@ class BuildEnvironment(object):
self.config.gettext_compact)
for filename in catalog_files:
self.dependencies[docname].add(filename)
- except EnvironmentError as exc:
+ except OSError as exc:
raise DocumentError(__('Failed to scan documents in %s: %r') % (self.srcdir, exc))
def get_outdated_files(self, config_changed):
- # type: (bool) -> Tuple[Set[unicode], Set[unicode], Set[unicode]]
+ # type: (bool) -> Tuple[Set[str], Set[str], Set[str]]
"""Return (added, changed, removed) sets."""
# clear all files no longer present
removed = set(self.all_docs) - self.found_docs
- added = set() # type: Set[unicode]
- changed = set() # type: Set[unicode]
+ added = set() # type: Set[str]
+ changed = set() # type: Set[str]
if config_changed:
# config values affect e.g. substitutions
@@ -466,8 +429,8 @@ class BuildEnvironment(object):
added.add(docname)
continue
# if the doctree file is not there, rebuild
- if not path.isfile(self.doc2path(docname, self.doctreedir,
- '.doctree')):
+ filename = path.join(self.doctreedir, docname + '.doctree')
+ if not path.isfile(filename):
changed.add(docname)
continue
# check the "reread always" list
@@ -492,7 +455,7 @@ class BuildEnvironment(object):
if depmtime > mtime:
changed.add(docname)
break
- except EnvironmentError:
+ except OSError:
# give it another chance
changed.add(docname)
break
@@ -500,8 +463,8 @@ class BuildEnvironment(object):
return added, changed, removed
def check_dependents(self, app, already):
- # type: (Sphinx, Set[unicode]) -> Iterator[unicode]
- to_rewrite = [] # type: List[unicode]
+ # type: (Sphinx, Set[str]) -> Iterator[str]
+ to_rewrite = [] # type: List[str]
for docnames in app.emit('env-get-updated', self):
to_rewrite.extend(docnames)
for docname in set(to_rewrite):
@@ -511,7 +474,7 @@ class BuildEnvironment(object):
# --------- SINGLE FILE READING --------------------------------------------
def prepare_settings(self, docname):
- # type: (unicode) -> None
+ # type: (str) -> None
"""Prepare to set up environment for reading."""
self.temp_data['docname'] = docname
# defaults to the global default, but can be re-set in a document
@@ -523,12 +486,12 @@ class BuildEnvironment(object):
@property
def docname(self):
- # type: () -> unicode
+ # type: () -> str
"""Returns the docname of the document currently being parsed."""
return self.temp_data['docname']
def new_serialno(self, category=''):
- # type: (unicode) -> int
+ # type: (str) -> int
"""Return a serial number, e.g. for index entry targets.
The number is guaranteed to be unique in the current document.
@@ -539,7 +502,7 @@ class BuildEnvironment(object):
return cur
def note_dependency(self, filename):
- # type: (unicode) -> None
+ # type: (str) -> None
"""Add *filename* as a dependency of the current document.
This means that the document will be rebuilt if this file changes.
@@ -549,7 +512,7 @@ class BuildEnvironment(object):
self.dependencies[self.docname].add(filename)
def note_included(self, filename):
- # type: (unicode) -> None
+ # type: (str) -> None
"""Add *filename* as a included from other document.
This means the document is not orphaned.
@@ -565,34 +528,8 @@ class BuildEnvironment(object):
"""
self.reread_always.add(self.docname)
- def note_toctree(self, docname, toctreenode):
- # type: (unicode, addnodes.toctree) -> None
- """Note a TOC tree directive in a document and gather information about
- file relations from it.
- """
- warnings.warn('env.note_toctree() is deprecated. '
- 'Use sphinx.environment.adapters.toctree.TocTree instead.',
- RemovedInSphinx20Warning, stacklevel=2)
- TocTree(self).note(docname, toctreenode)
-
- def get_toc_for(self, docname, builder):
- # type: (unicode, Builder) -> Dict[unicode, nodes.Node]
- """Return a TOC nodetree -- for use on the same page only!"""
- warnings.warn('env.get_toc_for() is deprecated. '
- 'Use sphinx.environment.adapters.toctre.TocTree instead.',
- RemovedInSphinx20Warning, stacklevel=2)
- return TocTree(self).get_toc_for(docname, builder)
-
- def get_toctree_for(self, docname, builder, collapse, **kwds):
- # type: (unicode, Builder, bool, Any) -> addnodes.toctree
- """Return the global TOC nodetree."""
- warnings.warn('env.get_toctree_for() is deprecated. '
- 'Use sphinx.environment.adapters.toctre.TocTree instead.',
- RemovedInSphinx20Warning, stacklevel=2)
- return TocTree(self).get_toctree_for(docname, builder, collapse, **kwds)
-
def get_domain(self, domainname):
- # type: (unicode) -> Domain
+ # type: (str) -> Domain
"""Return the domain instance with the specified name.
Raises an ExtensionError if the domain is not registered.
@@ -605,10 +542,10 @@ class BuildEnvironment(object):
# --------- RESOLVING REFERENCES AND TOCTREES ------------------------------
def get_doctree(self, docname):
- # type: (unicode) -> nodes.Node
+ # type: (str) -> nodes.document
"""Read the doctree for a file from the pickle and return it."""
- doctree_filename = self.doc2path(docname, self.doctreedir, '.doctree')
- with open(doctree_filename, 'rb') as f:
+ filename = path.join(self.doctreedir, docname + '.doctree')
+ with open(filename, 'rb') as f:
doctree = pickle.load(f)
doctree.settings.env = self
doctree.reporter = LoggingReporter(self.doc2path(docname))
@@ -616,7 +553,7 @@ class BuildEnvironment(object):
def get_and_resolve_doctree(self, docname, builder, doctree=None,
prune_toctrees=True, includehidden=False):
- # type: (unicode, Builder, nodes.Node, bool, bool) -> nodes.Node
+ # type: (str, Builder, nodes.document, bool, bool) -> nodes.document
"""Read the doctree from the pickle, resolve cross-references and
toctrees and return it.
"""
@@ -640,7 +577,7 @@ class BuildEnvironment(object):
def resolve_toctree(self, docname, builder, toctree, prune=True, maxdepth=0,
titles_only=False, collapse=False, includehidden=False):
- # type: (unicode, Builder, addnodes.toctree, bool, int, bool, bool, bool) -> nodes.Node
+ # type: (str, Builder, addnodes.toctree, bool, int, bool, bool, bool) -> nodes.Node
"""Resolve a *toctree* node into individual bullet lists with titles
as items, returning None (if no containing titles are found) or
a new node.
@@ -657,11 +594,11 @@ class BuildEnvironment(object):
includehidden)
def resolve_references(self, doctree, fromdocname, builder):
- # type: (nodes.Node, unicode, Builder) -> None
+ # type: (nodes.document, str, Builder) -> None
self.apply_post_transforms(doctree, fromdocname)
def apply_post_transforms(self, doctree, docname):
- # type: (nodes.Node, unicode) -> None
+ # type: (nodes.document, str) -> None
"""Apply all post-transforms."""
try:
# set env.docname during applying post-transforms
@@ -678,22 +615,12 @@ class BuildEnvironment(object):
# allow custom references to be resolved
self.app.emit('doctree-resolved', doctree, docname)
- def create_index(self, builder, group_entries=True,
- _fixre=re.compile(r'(.*) ([(][^()]*[)])')):
- # type: (Builder, bool, Pattern) -> List[Tuple[unicode, List[Tuple[unicode, List[unicode]]]]] # NOQA
- warnings.warn('env.create_index() is deprecated. '
- 'Use sphinx.environment.adapters.indexentreis.IndexEntries instead.',
- RemovedInSphinx20Warning, stacklevel=2)
- return IndexEntries(self).create_index(builder,
- group_entries=group_entries,
- _fixre=_fixre)
-
def collect_relations(self):
- # type: () -> Dict[unicode, List[unicode]]
+ # type: () -> Dict[str, List[str]]
traversed = set()
def traverse_toctree(parent, docname):
- # type: (unicode, unicode) -> Iterator[Tuple[unicode, unicode]]
+ # type: (str, str) -> Iterator[Tuple[str, str]]
if parent == docname:
logger.warning(__('self referenced toctree found. Ignored.'), location=docname)
return
@@ -747,31 +674,31 @@ class BuildEnvironment(object):
# --------- METHODS FOR COMPATIBILITY --------------------------------------
def update(self, config, srcdir, doctreedir):
- # type: (Config, unicode, unicode) -> List[unicode]
+ # type: (Config, str, str) -> List[str]
warnings.warn('env.update() is deprecated. Please use builder.read() instead.',
RemovedInSphinx30Warning, stacklevel=2)
return self.app.builder.read()
def _read_serial(self, docnames, app):
- # type: (List[unicode], Sphinx) -> None
+ # type: (List[str], Sphinx) -> None
warnings.warn('env._read_serial() is deprecated. Please use builder.read() instead.',
RemovedInSphinx30Warning, stacklevel=2)
return self.app.builder._read_serial(docnames)
def _read_parallel(self, docnames, app, nproc):
- # type: (List[unicode], Sphinx, int) -> None
+ # type: (List[str], Sphinx, int) -> None
warnings.warn('env._read_parallel() is deprecated. Please use builder.read() instead.',
RemovedInSphinx30Warning, stacklevel=2)
return self.app.builder._read_parallel(docnames, nproc)
def read_doc(self, docname, app=None):
- # type: (unicode, Sphinx) -> None
+ # type: (str, Sphinx) -> None
warnings.warn('env.read_doc() is deprecated. Please use builder.read_doc() instead.',
RemovedInSphinx30Warning, stacklevel=2)
self.app.builder.read_doc(docname)
def write_doctree(self, docname, doctree):
- # type: (unicode, nodes.Node) -> None
+ # type: (str, nodes.document) -> None
warnings.warn('env.write_doctree() is deprecated. '
'Please use builder.write_doctree() instead.',
RemovedInSphinx30Warning, stacklevel=2)
@@ -779,7 +706,7 @@ class BuildEnvironment(object):
@property
def _nitpick_ignore(self):
- # type: () -> List[unicode]
+ # type: () -> List[str]
warnings.warn('env._nitpick_ignore is deprecated. '
'Please use config.nitpick_ignore instead.',
RemovedInSphinx30Warning, stacklevel=2)
@@ -796,7 +723,7 @@ class BuildEnvironment(object):
except Exception as exc:
# This can happen for example when the pickle is from a
# different version of Sphinx.
- raise IOError(exc)
+ raise OSError(exc)
if app:
env.app = app
env.config.values = app.config.values
@@ -804,7 +731,7 @@ class BuildEnvironment(object):
@classmethod
def loads(cls, string, app=None):
- # type: (unicode, Sphinx) -> BuildEnvironment
+ # type: (bytes, Sphinx) -> BuildEnvironment
warnings.warn('BuildEnvironment.loads() is deprecated. '
'Please use pickle.loads() instead.',
RemovedInSphinx30Warning, stacklevel=2)
@@ -813,7 +740,7 @@ class BuildEnvironment(object):
@classmethod
def frompickle(cls, filename, app):
- # type: (unicode, Sphinx) -> BuildEnvironment
+ # type: (str, Sphinx) -> BuildEnvironment
warnings.warn('BuildEnvironment.frompickle() is deprecated. '
'Please use pickle.load() instead.',
RemovedInSphinx30Warning, stacklevel=2)
@@ -830,7 +757,7 @@ class BuildEnvironment(object):
@classmethod
def dumps(cls, env):
- # type: (BuildEnvironment) -> unicode
+ # type: (BuildEnvironment) -> bytes
warnings.warn('BuildEnvironment.dumps() is deprecated. '
'Please use pickle.dumps() instead.',
RemovedInSphinx30Warning, stacklevel=2)
@@ -839,7 +766,7 @@ class BuildEnvironment(object):
return io.getvalue()
def topickle(self, filename):
- # type: (unicode) -> None
+ # type: (str) -> None
warnings.warn('env.topickle() is deprecated. '
'Please use pickle.dump() instead.',
RemovedInSphinx30Warning, stacklevel=2)
@@ -848,14 +775,14 @@ class BuildEnvironment(object):
@property
def versionchanges(self):
- # type: () -> Dict[unicode, List[Tuple[unicode, unicode, int, unicode, unicode, unicode]]] # NOQA
+ # type: () -> Dict[str, List[Tuple[str, str, int, str, str, str]]]
warnings.warn('env.versionchanges() is deprecated. '
'Please use ChangeSetDomain instead.',
RemovedInSphinx30Warning, stacklevel=2)
return self.domaindata['changeset']['changes']
def note_versionchange(self, type, version, node, lineno):
- # type: (unicode, unicode, nodes.Node, int) -> None
+ # type: (str, str, addnodes.versionmodified, int) -> None
warnings.warn('env.note_versionchange() is deprecated. '
'Please use ChangeSetDomain.note_changeset() instead.',
RemovedInSphinx30Warning, stacklevel=2)
diff --git a/sphinx/environment/adapters/__init__.py b/sphinx/environment/adapters/__init__.py
index f945c4250..5e2fa3b2f 100644
--- a/sphinx/environment/adapters/__init__.py
+++ b/sphinx/environment/adapters/__init__.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.environment.adapters
~~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/sphinx/environment/adapters/asset.py b/sphinx/environment/adapters/asset.py
index 91f2cf8eb..c20002638 100644
--- a/sphinx/environment/adapters/asset.py
+++ b/sphinx/environment/adapters/asset.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.environment.adapters.asset
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -14,13 +13,13 @@ if False:
from sphinx.environment import BuildEnvironment # NOQA
-class ImageAdapter(object):
+class ImageAdapter:
def __init__(self, env):
# type: (BuildEnvironment) -> None
self.env = env
def get_original_image_uri(self, name):
- # type: (unicode) -> unicode
+ # type: (str) -> str
"""Get the original image URI."""
while name in self.env.original_image_uri:
name = self.env.original_image_uri[name]
diff --git a/sphinx/environment/adapters/indexentries.py b/sphinx/environment/adapters/indexentries.py
index 7c31fc3d5..4ca1c71f6 100644
--- a/sphinx/environment/adapters/indexentries.py
+++ b/sphinx/environment/adapters/indexentries.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.environment.adapters.indexentries
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -13,8 +12,6 @@ import re
import unicodedata
from itertools import groupby
-from six import text_type, iteritems
-
from sphinx.locale import _, __
from sphinx.util import split_into, logging
@@ -27,25 +24,25 @@ if False:
logger = logging.getLogger(__name__)
-class IndexEntries(object):
+class IndexEntries:
def __init__(self, env):
# type: (BuildEnvironment) -> None
self.env = env
def create_index(self, builder, group_entries=True,
_fixre=re.compile(r'(.*) ([(][^()]*[)])')):
- # type: (Builder, bool, Pattern) -> List[Tuple[unicode, List[Tuple[unicode, Any]]]] # NOQA
+ # type: (Builder, bool, Pattern) -> List[Tuple[str, List[Tuple[str, Any]]]]
"""Create the real index from the collected index entries."""
from sphinx.environment import NoUri
- new = {} # type: Dict[unicode, List]
+ new = {} # type: Dict[str, List]
def add_entry(word, subword, main, link=True, dic=new, key=None):
- # type: (unicode, unicode, unicode, bool, Dict, unicode) -> None
+ # type: (str, str, str, bool, Dict, str) -> None
# Force the word to be unicode if it's a ASCII bytestring.
# This will solve problems with unicode normalization later.
# For instance the RFC role will add bytestrings at the moment
- word = text_type(word)
+ word = str(word)
entry = dic.get(word)
if not entry:
dic[word] = entry = [[], {}, key]
@@ -60,7 +57,7 @@ class IndexEntries(object):
# maintain links in sorted/deterministic order
bisect.insort(entry[0], (main, uri))
- for fn, entries in iteritems(self.env.indexentries):
+ for fn, entries in self.env.indexentries.items():
# new entry types must be listed in directives/other.py!
for type, value, tid, main, index_key in entries:
try:
@@ -96,13 +93,13 @@ class IndexEntries(object):
# sort the index entries; put all symbols at the front, even those
# following the letters in ASCII, this is where the chr(127) comes from
def keyfunc(entry):
- # type: (Tuple[unicode, List]) -> Tuple[unicode, unicode]
+ # type: (Tuple[str, List]) -> Tuple[str, str]
key, (void, void, category_key) = entry
if category_key:
# using specified category key to sort
key = category_key
lckey = unicodedata.normalize('NFD', key.lower())
- if lckey.startswith(u'\N{RIGHT-TO-LEFT MARK}'):
+ if lckey.startswith('\N{RIGHT-TO-LEFT MARK}'):
lckey = lckey[1:]
if lckey[0:1].isalpha() or lckey.startswith('_'):
lckey = chr(127) + lckey
@@ -119,8 +116,8 @@ class IndexEntries(object):
# func()
# (in module foo)
# (in module bar)
- oldkey = '' # type: unicode
- oldsubitems = None # type: Dict[unicode, List]
+ oldkey = ''
+ oldsubitems = None # type: Dict[str, List]
i = 0
while i < len(newlist):
key, (targets, subitems, _key) = newlist[i]
@@ -143,13 +140,13 @@ class IndexEntries(object):
# group the entries by letter
def keyfunc2(item):
- # type: (Tuple[unicode, List]) -> unicode
+ # type: (Tuple[str, List]) -> str
# hack: mutating the subitems dicts to a list in the keyfunc
k, v = item
- v[1] = sorted((si, se) for (si, (se, void, void)) in iteritems(v[1]))
+ v[1] = sorted((si, se) for (si, (se, void, void)) in v[1].items())
if v[2] is None:
# now calculate the key
- if k.startswith(u'\N{RIGHT-TO-LEFT MARK}'):
+ if k.startswith('\N{RIGHT-TO-LEFT MARK}'):
k = k[1:]
letter = unicodedata.normalize('NFD', k[0])[0].upper()
if letter.isalpha() or letter == '_':
diff --git a/sphinx/environment/adapters/toctree.py b/sphinx/environment/adapters/toctree.py
index 565396ec4..76e8a7778 100644
--- a/sphinx/environment/adapters/toctree.py
+++ b/sphinx/environment/adapters/toctree.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.environment.adapters.toctree
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -9,8 +8,9 @@
:license: BSD, see LICENSE for details.
"""
+from typing import Iterable, cast
+
from docutils import nodes
-from six import iteritems
from sphinx import addnodes
from sphinx.locale import __
@@ -27,13 +27,13 @@ if False:
logger = logging.getLogger(__name__)
-class TocTree(object):
+class TocTree:
def __init__(self, env):
# type: (BuildEnvironment) -> None
self.env = env
def note(self, docname, toctreenode):
- # type: (unicode, addnodes.toctree) -> None
+ # type: (str, addnodes.toctree) -> None
"""Note a TOC tree directive in a document and gather information about
file relations from it.
"""
@@ -50,7 +50,7 @@ class TocTree(object):
def resolve(self, docname, builder, toctree, prune=True, maxdepth=0,
titles_only=False, collapse=False, includehidden=False):
- # type: (unicode, Builder, addnodes.toctree, bool, int, bool, bool, bool) -> nodes.Node
+ # type: (str, Builder, addnodes.toctree, bool, int, bool, bool, bool) -> nodes.Element
"""Resolve a *toctree* node into individual bullet lists with titles
as items, returning None (if no containing titles are found) or
a new node.
@@ -87,7 +87,7 @@ class TocTree(object):
excluded = Matcher(self.env.config.exclude_patterns)
def _toctree_add_classes(node, depth):
- # type: (nodes.Node, int) -> None
+ # type: (nodes.Element, int) -> None
"""Add 'toctree-l%d' and 'current' classes to the toctree."""
for subnode in node.children:
if isinstance(subnode, (addnodes.compact_paragraph,
@@ -105,7 +105,7 @@ class TocTree(object):
if not subnode['anchorname']:
# give the whole branch a 'current' class
# (useful for styling it differently)
- branchnode = subnode
+ branchnode = subnode # type: nodes.Element
while branchnode:
branchnode['classes'].append('current')
branchnode = branchnode.parent
@@ -118,10 +118,10 @@ class TocTree(object):
subnode = subnode.parent
def _entries_from_toctree(toctreenode, parents, separate=False, subtree=False):
- # type: (addnodes.toctree, List[nodes.Node], bool, bool) -> List[nodes.Node]
+ # type: (addnodes.toctree, List[str], bool, bool) -> List[nodes.Element]
"""Return TOC entries for a toctree node."""
refs = [(e[0], e[1]) for e in toctreenode['entries']]
- entries = []
+ entries = [] # type: List[nodes.Element]
for (title, ref) in refs:
try:
refdoc = None
@@ -184,14 +184,20 @@ class TocTree(object):
# if titles_only is given, only keep the main title and
# sub-toctrees
if titles_only:
+ # children of toc are:
+ # - list_item + compact_paragraph + (reference and subtoc)
+ # - only + subtoc
+ # - toctree
+ children = cast(Iterable[nodes.Element], toc)
+
# delete everything but the toplevel title(s)
# and toctrees
- for toplevel in toc:
+ for toplevel in children:
# nodes with length 1 don't have any children anyway
if len(toplevel) > 1:
subtrees = toplevel.traverse(addnodes.toctree)
if subtrees:
- toplevel[1][:] = subtrees
+ toplevel[1][:] = subtrees # type: ignore
else:
toplevel.pop(1)
# resolve all sub-toctrees
@@ -199,16 +205,17 @@ class TocTree(object):
if not (subtocnode.get('hidden', False) and
not includehidden):
i = subtocnode.parent.index(subtocnode) + 1
- for item in _entries_from_toctree(
+ for entry in _entries_from_toctree(
subtocnode, [refdoc] + parents,
subtree=True):
- subtocnode.parent.insert(i, item)
+ subtocnode.parent.insert(i, entry)
i += 1
subtocnode.parent.remove(subtocnode)
if separate:
entries.append(toc)
else:
- entries.extend(toc.children)
+ children = cast(Iterable[nodes.Element], toc)
+ entries.extend(children)
if not subtree and not separate:
ret = nodes.bullet_list()
ret += entries
@@ -237,8 +244,8 @@ class TocTree(object):
caption_node.rawsource = toctree['rawcaption']
if hasattr(toctree, 'uid'):
# move uid to caption_node to translate it
- caption_node.uid = toctree.uid
- del toctree.uid
+ caption_node.uid = toctree.uid # type: ignore
+ del toctree.uid # type: ignore
newnode += caption_node
newnode.extend(tocentries)
newnode['toctree'] = True
@@ -247,7 +254,7 @@ class TocTree(object):
_toctree_add_classes(newnode, 1)
self._toctree_prune(newnode, 1, prune and maxdepth or 0, collapse)
- if len(newnode[-1]) == 0: # No titles found
+ if isinstance(newnode[-1], nodes.Element) and len(newnode[-1]) == 0: # No titles found
return None
# set the target paths in the toctrees (they are not known at TOC
@@ -259,12 +266,12 @@ class TocTree(object):
return newnode
def get_toctree_ancestors(self, docname):
- # type: (unicode) -> List[unicode]
+ # type: (str) -> List[str]
parent = {}
- for p, children in iteritems(self.env.toctree_includes):
+ for p, children in self.env.toctree_includes.items():
for child in children:
parent[child] = p
- ancestors = [] # type: List[unicode]
+ ancestors = [] # type: List[str]
d = docname
while d in parent and d not in ancestors:
ancestors.append(d)
@@ -272,7 +279,7 @@ class TocTree(object):
return ancestors
def _toctree_prune(self, node, depth, maxdepth, collapse=False):
- # type: (nodes.Node, int, int, bool) -> None
+ # type: (nodes.Element, int, int, bool) -> None
"""Utility: Cut a TOC at a specified depth."""
for subnode in node.children[:]:
if isinstance(subnode, (addnodes.compact_paragraph,
@@ -294,7 +301,7 @@ class TocTree(object):
self._toctree_prune(subnode, depth + 1, maxdepth, collapse)
def get_toc_for(self, docname, builder):
- # type: (unicode, Builder) -> Dict[unicode, nodes.Node]
+ # type: (str, Builder) -> nodes.Node
"""Return a TOC nodetree -- for use on the same page only!"""
tocdepth = self.env.metadata[docname].get('tocdepth', 0)
try:
@@ -310,10 +317,10 @@ class TocTree(object):
return toc
def get_toctree_for(self, docname, builder, collapse, **kwds):
- # type: (unicode, Builder, bool, Any) -> nodes.Node
+ # type: (str, Builder, bool, Any) -> nodes.Element
"""Return the global TOC nodetree."""
doctree = self.env.get_doctree(self.env.config.master_doc)
- toctrees = []
+ toctrees = [] # type: List[nodes.Element]
if 'includehidden' not in kwds:
kwds['includehidden'] = True
if 'maxdepth' not in kwds:
diff --git a/sphinx/environment/collectors/__init__.py b/sphinx/environment/collectors/__init__.py
index 9d9f5347c..ea6e34ce7 100644
--- a/sphinx/environment/collectors/__init__.py
+++ b/sphinx/environment/collectors/__init__.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.environment.collectors
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -9,8 +8,6 @@
:license: BSD, see LICENSE for details.
"""
-from six import itervalues
-
if False:
# For type annotation
from typing import Dict, List, Set # NOQA
@@ -19,7 +16,7 @@ if False:
from sphinx.environment import BuildEnvironment # NOQA
-class EnvironmentCollector(object):
+class EnvironmentCollector:
"""An EnvironmentCollector is a specific data collector from each document.
It gathers data and stores :py:class:`BuildEnvironment
@@ -28,7 +25,7 @@ class EnvironmentCollector(object):
entries and toctrees, etc.
"""
- listener_ids = None # type: Dict[unicode, int]
+ listener_ids = None # type: Dict[str, int]
def enable(self, app):
# type: (Sphinx) -> None
@@ -44,32 +41,32 @@ class EnvironmentCollector(object):
def disable(self, app):
# type: (Sphinx) -> None
assert self.listener_ids is not None
- for listener_id in itervalues(self.listener_ids):
+ for listener_id in self.listener_ids.values():
app.disconnect(listener_id)
self.listener_ids = None
def clear_doc(self, app, env, docname):
- # type: (Sphinx, BuildEnvironment, unicode) -> None
+ # type: (Sphinx, BuildEnvironment, str) -> None
"""Remove specified data of a document.
This method is called on the removal of the document."""
raise NotImplementedError
def merge_other(self, app, env, docnames, other):
- # type: (Sphinx, BuildEnvironment, Set[unicode], BuildEnvironment) -> None
+ # type: (Sphinx, BuildEnvironment, Set[str], BuildEnvironment) -> None
"""Merge in specified data regarding docnames from a different `BuildEnvironment`
object which coming from a subprocess in parallel builds."""
raise NotImplementedError
def process_doc(self, app, doctree):
- # type: (Sphinx, nodes.Node) -> None
+ # type: (Sphinx, nodes.document) -> None
"""Process a document and gather specific data from it.
This method is called after the document is read."""
raise NotImplementedError
def get_updated_docs(self, app, env):
- # type: (Sphinx, BuildEnvironment) -> List[unicode]
+ # type: (Sphinx, BuildEnvironment) -> List[str]
"""Return a list of docnames to re-read.
This methods is called after reading the whole of documents (experimental).
@@ -77,7 +74,7 @@ class EnvironmentCollector(object):
return []
def get_outdated_docs(self, app, env, added, changed, removed):
- # type: (Sphinx, BuildEnvironment, unicode, Set[unicode], Set[unicode], Set[unicode]) -> List[unicode] # NOQA
+ # type: (Sphinx, BuildEnvironment, str, Set[str], Set[str], Set[str]) -> List[str]
"""Return a list of docnames to re-read.
This methods is called before reading the documents.
diff --git a/sphinx/environment/collectors/asset.py b/sphinx/environment/collectors/asset.py
index 725431dfa..1e7756914 100644
--- a/sphinx/environment/collectors/asset.py
+++ b/sphinx/environment/collectors/asset.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.environment.collectors.asset
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -15,7 +14,6 @@ from os import path
from docutils import nodes
from docutils.utils import relative_path
-from six import iteritems, itervalues
from sphinx import addnodes
from sphinx.environment.collectors import EnvironmentCollector
@@ -38,15 +36,15 @@ class ImageCollector(EnvironmentCollector):
"""Image files collector for sphinx.environment."""
def clear_doc(self, app, env, docname):
- # type: (Sphinx, BuildEnvironment, unicode) -> None
+ # type: (Sphinx, BuildEnvironment, str) -> None
env.images.purge_doc(docname)
def merge_other(self, app, env, docnames, other):
- # type: (Sphinx, BuildEnvironment, Set[unicode], BuildEnvironment) -> None
+ # type: (Sphinx, BuildEnvironment, Set[str], BuildEnvironment) -> None
env.images.merge_other(docnames, other.images)
def process_doc(self, app, doctree):
- # type: (Sphinx, nodes.Node) -> None
+ # type: (Sphinx, nodes.document) -> None
"""Process and rewrite image URIs."""
docname = app.env.docname
@@ -55,7 +53,7 @@ class ImageCollector(EnvironmentCollector):
# choose the best image from these candidates. The special key * is
# set if there is only single candidate to be used by a writer.
# The special key ? is set for nonlocal URIs.
- candidates = {} # type: Dict[unicode, unicode]
+ candidates = {} # type: Dict[str, str]
node['candidates'] = candidates
imguri = node['uri']
if imguri.startswith('data:'):
@@ -87,7 +85,7 @@ class ImageCollector(EnvironmentCollector):
# map image paths to unique image names (so that they can be put
# into a single directory)
- for imgpath in itervalues(candidates):
+ for imgpath in candidates.values():
app.env.dependencies[docname].add(imgpath)
if not os.access(path.join(app.srcdir, imgpath), os.R_OK):
logger.warning(__('image file not readable: %s') % imgpath,
@@ -96,8 +94,8 @@ class ImageCollector(EnvironmentCollector):
app.env.images.add_file(docname, imgpath)
def collect_candidates(self, env, imgpath, candidates, node):
- # type: (BuildEnvironment, unicode, Dict[unicode, unicode], nodes.Node) -> None
- globbed = {} # type: Dict[unicode, List[unicode]]
+ # type: (BuildEnvironment, str, Dict[str, str], nodes.Node) -> None
+ globbed = {} # type: Dict[str, List[str]]
for filename in glob(imgpath):
new_imgpath = relative_path(path.join(env.srcdir, 'dummy'),
filename)
@@ -105,10 +103,10 @@ class ImageCollector(EnvironmentCollector):
mimetype = guess_mimetype(filename)
if mimetype not in candidates:
globbed.setdefault(mimetype, []).append(new_imgpath)
- except (OSError, IOError) as err:
+ except OSError as err:
logger.warning(__('image file %s not readable: %s') % (filename, err),
location=node, type='image', subtype='not_readable')
- for key, files in iteritems(globbed):
+ for key, files in globbed.items():
candidates[key] = sorted(files, key=len)[0] # select by similarity
@@ -116,15 +114,15 @@ class DownloadFileCollector(EnvironmentCollector):
"""Download files collector for sphinx.environment."""
def clear_doc(self, app, env, docname):
- # type: (Sphinx, BuildEnvironment, unicode) -> None
+ # type: (Sphinx, BuildEnvironment, str) -> None
env.dlfiles.purge_doc(docname)
def merge_other(self, app, env, docnames, other):
- # type: (Sphinx, BuildEnvironment, Set[unicode], BuildEnvironment) -> None
+ # type: (Sphinx, BuildEnvironment, Set[str], BuildEnvironment) -> None
env.dlfiles.merge_other(docnames, other.dlfiles)
def process_doc(self, app, doctree):
- # type: (Sphinx, nodes.Node) -> None
+ # type: (Sphinx, nodes.document) -> None
"""Process downloadable file paths. """
for node in doctree.traverse(addnodes.download_reference):
targetname = node['reftarget']
diff --git a/sphinx/environment/collectors/dependencies.py b/sphinx/environment/collectors/dependencies.py
index de0b7c080..db599c37f 100644
--- a/sphinx/environment/collectors/dependencies.py
+++ b/sphinx/environment/collectors/dependencies.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.environment.collectors.dependencies
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -9,12 +8,13 @@
:license: BSD, see LICENSE for details.
"""
+import os
from os import path
from docutils.utils import relative_path
from sphinx.environment.collectors import EnvironmentCollector
-from sphinx.util.osutil import getcwd, fs_encoding
+from sphinx.util.osutil import fs_encoding
if False:
# For type annotation
@@ -28,19 +28,19 @@ class DependenciesCollector(EnvironmentCollector):
"""dependencies collector for sphinx.environment."""
def clear_doc(self, app, env, docname):
- # type: (Sphinx, BuildEnvironment, unicode) -> None
+ # type: (Sphinx, BuildEnvironment, str) -> None
env.dependencies.pop(docname, None)
def merge_other(self, app, env, docnames, other):
- # type: (Sphinx, BuildEnvironment, Set[unicode], BuildEnvironment) -> None
+ # type: (Sphinx, BuildEnvironment, Set[str], BuildEnvironment) -> None
for docname in docnames:
if docname in other.dependencies:
env.dependencies[docname] = other.dependencies[docname]
def process_doc(self, app, doctree):
- # type: (Sphinx, nodes.Node) -> None
+ # type: (Sphinx, nodes.document) -> None
"""Process docutils-generated dependency info."""
- cwd = getcwd()
+ cwd = os.getcwd()
frompath = path.join(path.normpath(app.srcdir), 'dummy')
deps = doctree.settings.record_dependencies
if not deps:
diff --git a/sphinx/environment/collectors/indexentries.py b/sphinx/environment/collectors/indexentries.py
index a9ba897d0..27d2c771b 100644
--- a/sphinx/environment/collectors/indexentries.py
+++ b/sphinx/environment/collectors/indexentries.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.environment.collectors.indexentries
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -9,8 +8,6 @@
:license: BSD, see LICENSE for details.
"""
-from six import text_type
-
from sphinx import addnodes
from sphinx.environment.collectors import EnvironmentCollector
from sphinx.util import split_index_msg, logging
@@ -29,16 +26,16 @@ class IndexEntriesCollector(EnvironmentCollector):
name = 'indices'
def clear_doc(self, app, env, docname):
- # type: (Sphinx, BuildEnvironment, unicode) -> None
+ # type: (Sphinx, BuildEnvironment, str) -> None
env.indexentries.pop(docname, None)
def merge_other(self, app, env, docnames, other):
- # type: (Sphinx, BuildEnvironment, Set[unicode], BuildEnvironment) -> None
+ # type: (Sphinx, BuildEnvironment, Set[str], BuildEnvironment) -> None
for docname in docnames:
env.indexentries[docname] = other.indexentries[docname]
def process_doc(self, app, doctree):
- # type: (Sphinx, nodes.Node) -> None
+ # type: (Sphinx, nodes.document) -> None
docname = app.env.docname
entries = app.env.indexentries[docname] = []
for node in doctree.traverse(addnodes.index):
@@ -46,15 +43,11 @@ class IndexEntriesCollector(EnvironmentCollector):
for entry in node['entries']:
split_index_msg(entry[0], entry[1])
except ValueError as exc:
- logger.warning(text_type(exc), location=node)
+ logger.warning(str(exc), location=node)
node.parent.remove(node)
else:
for entry in node['entries']:
- if len(entry) == 5:
- # Since 1.4: new index structure including index_key (5th column)
- entries.append(entry)
- else:
- entries.append(entry + (None,))
+ entries.append(entry)
def setup(app):
diff --git a/sphinx/environment/collectors/metadata.py b/sphinx/environment/collectors/metadata.py
index 7d54d2fe6..7031f50b9 100644
--- a/sphinx/environment/collectors/metadata.py
+++ b/sphinx/environment/collectors/metadata.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.environment.collectors.metadata
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -9,6 +8,8 @@
:license: BSD, see LICENSE for details.
"""
+from typing import List, cast
+
from docutils import nodes
from sphinx.environment.collectors import EnvironmentCollector
@@ -25,47 +26,46 @@ class MetadataCollector(EnvironmentCollector):
"""metadata collector for sphinx.environment."""
def clear_doc(self, app, env, docname):
- # type: (Sphinx, BuildEnvironment, unicode) -> None
+ # type: (Sphinx, BuildEnvironment, str) -> None
env.metadata.pop(docname, None)
def merge_other(self, app, env, docnames, other):
- # type: (Sphinx, BuildEnvironment, Set[unicode], BuildEnvironment) -> None
+ # type: (Sphinx, BuildEnvironment, Set[str], BuildEnvironment) -> None
for docname in docnames:
env.metadata[docname] = other.metadata[docname]
def process_doc(self, app, doctree):
- # type: (Sphinx, nodes.Node) -> None
+ # type: (Sphinx, nodes.document) -> None
"""Process the docinfo part of the doctree as metadata.
Keep processing minimal -- just return what docutils says.
"""
- md = app.env.metadata[app.env.docname]
- try:
- docinfo = doctree[0]
- except IndexError:
- # probably an empty document
- return
- if docinfo.__class__ is not nodes.docinfo:
- # nothing to see here
- return
- for node in docinfo:
- # nodes are multiply inherited...
- if isinstance(node, nodes.authors):
- md['authors'] = [author.astext() for author in node]
- elif isinstance(node, nodes.TextElement): # e.g. author
- md[node.__class__.__name__] = node.astext()
- else:
- name, body = node
- md[name.astext()] = body.astext()
- for name, value in md.items():
- if name in ('tocdepth',):
- try:
- value = int(value)
- except ValueError:
- value = 0
- md[name] = value
-
- del doctree[0]
+ if len(doctree) > 0 and isinstance(doctree[0], nodes.docinfo):
+ md = app.env.metadata[app.env.docname]
+ for node in doctree[0]:
+ # nodes are multiply inherited...
+ if isinstance(node, nodes.authors):
+ authors = cast(List[nodes.author], node)
+ md['authors'] = [author.astext() for author in authors]
+ elif isinstance(node, nodes.field):
+ assert len(node) == 2
+ field_name = cast(nodes.field_name, node[0])
+ field_body = cast(nodes.field_body, node[1])
+ md[field_name.astext()] = field_body.astext()
+ elif isinstance(node, nodes.TextElement):
+ # other children must be TextElement
+ # see: http://docutils.sourceforge.net/docs/ref/doctree.html#bibliographic-elements # NOQA
+ md[node.__class__.__name__] = node.astext()
+
+ for name, value in md.items():
+ if name in ('tocdepth',):
+ try:
+ value = int(value)
+ except ValueError:
+ value = 0
+ md[name] = value
+
+ doctree.pop(0)
def setup(app):
diff --git a/sphinx/environment/collectors/title.py b/sphinx/environment/collectors/title.py
index eb23b975f..9fcb98237 100644
--- a/sphinx/environment/collectors/title.py
+++ b/sphinx/environment/collectors/title.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.environment.collectors.title
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -26,18 +25,18 @@ class TitleCollector(EnvironmentCollector):
"""title collector for sphinx.environment."""
def clear_doc(self, app, env, docname):
- # type: (Sphinx, BuildEnvironment, unicode) -> None
+ # type: (Sphinx, BuildEnvironment, str) -> None
env.titles.pop(docname, None)
env.longtitles.pop(docname, None)
def merge_other(self, app, env, docnames, other):
- # type: (Sphinx, BuildEnvironment, Set[unicode], BuildEnvironment) -> None
+ # type: (Sphinx, BuildEnvironment, Set[str], BuildEnvironment) -> None
for docname in docnames:
env.titles[docname] = other.titles[docname]
env.longtitles[docname] = other.longtitles[docname]
def process_doc(self, app, doctree):
- # type: (Sphinx, nodes.Node) -> None
+ # type: (Sphinx, nodes.document) -> None
"""Add a title node to the document (just copy the first section title),
and store that title in the environment.
"""
diff --git a/sphinx/environment/collectors/toctree.py b/sphinx/environment/collectors/toctree.py
index fe67bbcba..b3a0df82f 100644
--- a/sphinx/environment/collectors/toctree.py
+++ b/sphinx/environment/collectors/toctree.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.environment.collectors.toctree
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -9,8 +8,9 @@
:license: BSD, see LICENSE for details.
"""
+from typing import cast
+
from docutils import nodes
-from six import iteritems
from sphinx import addnodes
from sphinx.environment.adapters.toctree import TocTree
@@ -21,17 +21,19 @@ from sphinx.util import url_re, logging
if False:
# For type annotation
- from typing import Any, Dict, List, Set, Tuple # NOQA
+ from typing import Any, Dict, List, Set, Tuple, Type, TypeVar # NOQA
from sphinx.application import Sphinx # NOQA
from sphinx.builders import Builder # NOQA
from sphinx.environment import BuildEnvironment # NOQA
+ N = TypeVar('N')
+
logger = logging.getLogger(__name__)
class TocTreeCollector(EnvironmentCollector):
def clear_doc(self, app, env, docname):
- # type: (Sphinx, BuildEnvironment, unicode) -> None
+ # type: (Sphinx, BuildEnvironment, str) -> None
env.tocs.pop(docname, None)
env.toc_secnumbers.pop(docname, None)
env.toc_fignumbers.pop(docname, None)
@@ -46,7 +48,7 @@ class TocTreeCollector(EnvironmentCollector):
del env.files_to_rebuild[subfn]
def merge_other(self, app, env, docnames, other):
- # type: (Sphinx, BuildEnvironment, Set[unicode], BuildEnvironment) -> None
+ # type: (Sphinx, BuildEnvironment, Set[str], BuildEnvironment) -> None
for docname in docnames:
env.tocs[docname] = other.tocs[docname]
env.toc_num_entries[docname] = other.toc_num_entries[docname]
@@ -61,71 +63,72 @@ class TocTreeCollector(EnvironmentCollector):
env.files_to_rebuild.setdefault(subfn, set()).update(fnset & set(docnames))
def process_doc(self, app, doctree):
- # type: (Sphinx, nodes.Node) -> None
+ # type: (Sphinx, nodes.document) -> None
"""Build a TOC from the doctree and store it in the inventory."""
docname = app.env.docname
numentries = [0] # nonlocal again...
def traverse_in_section(node, cls):
- # type: (nodes.Node, Any) -> List[nodes.Node]
+ # type: (nodes.Element, Type[N]) -> List[N]
"""Like traverse(), but stay within the same section."""
- result = []
+ result = [] # type: List[N]
if isinstance(node, cls):
result.append(node)
for child in node.children:
if isinstance(child, nodes.section):
continue
- result.extend(traverse_in_section(child, cls))
+ elif isinstance(child, nodes.Element):
+ result.extend(traverse_in_section(child, cls))
return result
def build_toc(node, depth=1):
- # type: (nodes.Node, int) -> List[nodes.Node]
- entries = []
+ # type: (nodes.Element, int) -> nodes.bullet_list
+ entries = [] # type: List[nodes.Element]
for sectionnode in node:
# find all toctree nodes in this section and add them
# to the toc (just copying the toctree node which is then
# resolved in self.get_and_resolve_doctree)
- if isinstance(sectionnode, addnodes.only):
+ if isinstance(sectionnode, nodes.section):
+ title = sectionnode[0]
+ # copy the contents of the section title, but without references
+ # and unnecessary stuff
+ visitor = SphinxContentsFilter(doctree)
+ title.walkabout(visitor)
+ nodetext = visitor.get_entry_text()
+ if not numentries[0]:
+ # for the very first toc entry, don't add an anchor
+ # as it is the file's title anyway
+ anchorname = ''
+ else:
+ anchorname = '#' + sectionnode['ids'][0]
+ numentries[0] += 1
+ # make these nodes:
+ # list_item -> compact_paragraph -> reference
+ reference = nodes.reference(
+ '', '', internal=True, refuri=docname,
+ anchorname=anchorname, *nodetext)
+ para = addnodes.compact_paragraph('', '', reference)
+ item = nodes.list_item('', para) # type: nodes.Element
+ sub_item = build_toc(sectionnode, depth + 1)
+ if sub_item:
+ item += sub_item
+ entries.append(item)
+ elif isinstance(sectionnode, addnodes.only):
onlynode = addnodes.only(expr=sectionnode['expr'])
blist = build_toc(sectionnode, depth)
if blist:
- onlynode += blist.children # type: ignore
+ onlynode += blist.children
entries.append(onlynode)
- continue
- if not isinstance(sectionnode, nodes.section):
+ elif isinstance(sectionnode, nodes.Element):
for toctreenode in traverse_in_section(sectionnode,
addnodes.toctree):
item = toctreenode.copy()
entries.append(item)
# important: do the inventory stuff
TocTree(app.env).note(docname, toctreenode)
- continue
- title = sectionnode[0]
- # copy the contents of the section title, but without references
- # and unnecessary stuff
- visitor = SphinxContentsFilter(doctree)
- title.walkabout(visitor)
- nodetext = visitor.get_entry_text()
- if not numentries[0]:
- # for the very first toc entry, don't add an anchor
- # as it is the file's title anyway
- anchorname = ''
- else:
- anchorname = '#' + sectionnode['ids'][0]
- numentries[0] += 1
- # make these nodes:
- # list_item -> compact_paragraph -> reference
- reference = nodes.reference(
- '', '', internal=True, refuri=docname,
- anchorname=anchorname, *nodetext)
- para = addnodes.compact_paragraph('', '', reference)
- item = nodes.list_item('', para)
- sub_item = build_toc(sectionnode, depth + 1)
- item += sub_item
- entries.append(item)
if entries:
return nodes.bullet_list('', *entries)
- return []
+ return None
toc = build_toc(doctree)
if toc:
app.env.tocs[docname] = toc
@@ -134,21 +137,21 @@ class TocTreeCollector(EnvironmentCollector):
app.env.toc_num_entries[docname] = numentries[0]
def get_updated_docs(self, app, env):
- # type: (Sphinx, BuildEnvironment) -> List[unicode]
+ # type: (Sphinx, BuildEnvironment) -> List[str]
return self.assign_section_numbers(env) + self.assign_figure_numbers(env)
def assign_section_numbers(self, env):
- # type: (BuildEnvironment) -> List[unicode]
+ # type: (BuildEnvironment) -> List[str]
"""Assign a section number to each heading under a numbered toctree."""
# a list of all docnames whose section numbers changed
rewrite_needed = []
- assigned = set() # type: Set[unicode]
+ assigned = set() # type: Set[str]
old_secnumbers = env.toc_secnumbers
env.toc_secnumbers = {}
def _walk_toc(node, secnums, depth, titlenode=None):
- # type: (nodes.Node, Dict, int, nodes.Node) -> None
+ # type: (nodes.Element, Dict, int, nodes.title) -> None
# titlenode is the title of the document, it will get assigned a
# secnumber too, so that it shows up in next/prev/parent rellinks
for subnode in node.children:
@@ -168,13 +171,14 @@ class TocTreeCollector(EnvironmentCollector):
titlenode = None
elif isinstance(subnode, addnodes.compact_paragraph):
numstack[-1] += 1
+ reference = cast(nodes.reference, subnode[0])
if depth > 0:
number = list(numstack)
- secnums[subnode[0]['anchorname']] = tuple(numstack)
+ secnums[reference['anchorname']] = tuple(numstack)
else:
number = None
- secnums[subnode[0]['anchorname']] = None
- subnode[0]['secnumber'] = number
+ secnums[reference['anchorname']] = None
+ reference['secnumber'] = number
if titlenode:
titlenode['secnumber'] = number
titlenode = None
@@ -182,7 +186,7 @@ class TocTreeCollector(EnvironmentCollector):
_walk_toctree(subnode, depth)
def _walk_toctree(toctreenode, depth):
- # type: (nodes.Node, int) -> None
+ # type: (addnodes.toctree, int) -> None
if depth == 0:
return
for (title, ref) in toctreenode['entries']:
@@ -194,10 +198,10 @@ class TocTreeCollector(EnvironmentCollector):
'(nested numbered toctree?)'), ref,
location=toctreenode, type='toc', subtype='secnum')
elif ref in env.tocs:
- secnums = env.toc_secnumbers[ref] = {}
+ secnums = {} # type: Dict[str, Tuple[int, ...]]
+ env.toc_secnumbers[ref] = secnums
assigned.add(ref)
- _walk_toc(env.tocs[ref], secnums, depth,
- env.titles.get(ref))
+ _walk_toc(env.tocs[ref], secnums, depth, env.titles.get(ref))
if secnums != old_secnumbers.get(ref):
rewrite_needed.append(ref)
@@ -214,18 +218,18 @@ class TocTreeCollector(EnvironmentCollector):
return rewrite_needed
def assign_figure_numbers(self, env):
- # type: (BuildEnvironment) -> List[unicode]
+ # type: (BuildEnvironment) -> List[str]
"""Assign a figure number to each figure under a numbered toctree."""
rewrite_needed = []
- assigned = set() # type: Set[unicode]
+ assigned = set() # type: Set[str]
old_fignumbers = env.toc_fignumbers
env.toc_fignumbers = {}
- fignum_counter = {} # type: Dict[unicode, Dict[Tuple[int, ...], int]]
+ fignum_counter = {} # type: Dict[str, Dict[Tuple[int, ...], int]]
def get_figtype(node):
- # type: (nodes.Node) -> unicode
+ # type: (nodes.Node) -> str
for domain in env.domains.values():
figtype = domain.get_enumerable_node_type(node)
if figtype:
@@ -234,7 +238,7 @@ class TocTreeCollector(EnvironmentCollector):
return None
def get_section_number(docname, section):
- # type: (unicode, nodes.Node) -> Tuple[int, ...]
+ # type: (str, nodes.section) -> Tuple[int, ...]
anchorname = '#' + section['ids'][0]
secnumbers = env.toc_secnumbers.get(docname, {})
if anchorname in secnumbers:
@@ -245,7 +249,7 @@ class TocTreeCollector(EnvironmentCollector):
return secnum or tuple()
def get_next_fignumber(figtype, secnum):
- # type: (unicode, Tuple[int, ...]) -> Tuple[int, ...]
+ # type: (str, Tuple[int, ...]) -> Tuple[int, ...]
counter = fignum_counter.setdefault(figtype, {})
secnum = secnum[:env.config.numfig_secnum_depth]
@@ -253,7 +257,7 @@ class TocTreeCollector(EnvironmentCollector):
return secnum + (counter[secnum],)
def register_fignumber(docname, secnum, figtype, fignode):
- # type: (unicode, Tuple[int], unicode, nodes.Node) -> None
+ # type: (str, Tuple[int, ...], str, nodes.Element) -> None
env.toc_fignumbers.setdefault(docname, {})
fignumbers = env.toc_fignumbers[docname].setdefault(figtype, {})
figure_id = fignode['ids'][0]
@@ -261,7 +265,7 @@ class TocTreeCollector(EnvironmentCollector):
fignumbers[figure_id] = get_next_fignumber(figtype, secnum)
def _walk_doctree(docname, doctree, secnum):
- # type: (unicode, nodes.Node, Tuple[int, ...]) -> None
+ # type: (str, nodes.Element, Tuple[int, ...]) -> None
for subnode in doctree.children:
if isinstance(subnode, nodes.section):
next_secnum = get_section_number(docname, subnode)
@@ -269,7 +273,6 @@ class TocTreeCollector(EnvironmentCollector):
_walk_doctree(docname, subnode, next_secnum)
else:
_walk_doctree(docname, subnode, secnum)
- continue
elif isinstance(subnode, addnodes.toctree):
for title, subdocname in subnode['entries']:
if url_re.match(subdocname) or subdocname == 'self':
@@ -277,25 +280,23 @@ class TocTreeCollector(EnvironmentCollector):
continue
_walk_doc(subdocname, secnum)
+ elif isinstance(subnode, nodes.Element):
+ figtype = get_figtype(subnode)
+ if figtype and subnode['ids']:
+ register_fignumber(docname, secnum, figtype, subnode)
- continue
-
- figtype = get_figtype(subnode)
- if figtype and subnode['ids']:
- register_fignumber(docname, secnum, figtype, subnode)
-
- _walk_doctree(docname, subnode, secnum)
+ _walk_doctree(docname, subnode, secnum)
def _walk_doc(docname, secnum):
- # type: (unicode, Tuple[int]) -> None
+ # type: (str, Tuple[int, ...]) -> None
if docname not in assigned:
assigned.add(docname)
doctree = env.get_doctree(docname)
_walk_doctree(docname, doctree, secnum)
if env.config.numfig:
- _walk_doc(env.config.master_doc, tuple()) # type: ignore
- for docname, fignums in iteritems(env.toc_fignumbers):
+ _walk_doc(env.config.master_doc, tuple())
+ for docname, fignums in env.toc_fignumbers.items():
if fignums != old_fignumbers.get(docname):
rewrite_needed.append(docname)
diff --git a/sphinx/errors.py b/sphinx/errors.py
index 005605f1c..f2cbb843c 100644
--- a/sphinx/errors.py
+++ b/sphinx/errors.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.errors
~~~~~~~~~~~~~
@@ -53,8 +52,8 @@ class ExtensionError(SphinxError):
category = 'Extension error'
def __init__(self, message, orig_exc=None):
- # type: (unicode, Exception) -> None
- SphinxError.__init__(self, message)
+ # type: (str, Exception) -> None
+ super().__init__(message)
self.message = message
self.orig_exc = orig_exc
@@ -67,7 +66,7 @@ class ExtensionError(SphinxError):
def __str__(self):
# type: () -> str
- parent_str = SphinxError.__str__(self)
+ parent_str = super().__str__()
if self.orig_exc:
return '%s (exception: %s)' % (parent_str, self.orig_exc)
return parent_str
diff --git a/sphinx/events.py b/sphinx/events.py
index fb62d1776..c6aa396a3 100644
--- a/sphinx/events.py
+++ b/sphinx/events.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.events
~~~~~~~~~~~~~
@@ -10,12 +9,9 @@
:copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
-from __future__ import print_function
from collections import OrderedDict, defaultdict
-from six import itervalues
-
from sphinx.errors import ExtensionError
from sphinx.locale import __
@@ -42,24 +38,24 @@ core_events = {
'html-collect-pages': 'builder',
'html-page-context': 'pagename, context, doctree or None',
'build-finished': 'exception',
-} # type: Dict[unicode, unicode]
+}
-class EventManager(object):
+class EventManager:
def __init__(self):
# type: () -> None
self.events = core_events.copy()
- self.listeners = defaultdict(OrderedDict) # type: Dict[unicode, Dict[int, Callable]]
+ self.listeners = defaultdict(OrderedDict) # type: Dict[str, Dict[int, Callable]]
self.next_listener_id = 0
def add(self, name):
- # type: (unicode) -> None
+ # type: (str) -> None
if name in self.events:
raise ExtensionError(__('Event %r already present') % name)
self.events[name] = ''
def connect(self, name, callback):
- # type: (unicode, Callable) -> int
+ # type: (str, Callable) -> int
if name not in self.events:
raise ExtensionError(__('Unknown event name: %s') % name)
@@ -70,18 +66,18 @@ class EventManager(object):
def disconnect(self, listener_id):
# type: (int) -> None
- for event in itervalues(self.listeners):
+ for event in self.listeners.values():
event.pop(listener_id, None)
def emit(self, name, *args):
- # type: (unicode, Any) -> List
+ # type: (str, Any) -> List
results = []
- for callback in itervalues(self.listeners[name]):
+ for callback in self.listeners[name].values():
results.append(callback(*args))
return results
def emit_firstresult(self, name, *args):
- # type: (unicode, Any) -> Any
+ # type: (str, Any) -> Any
for result in self.emit(name, *args):
if result is not None:
return result
diff --git a/sphinx/ext/__init__.py b/sphinx/ext/__init__.py
index 440c01a15..04773d04d 100644
--- a/sphinx/ext/__init__.py
+++ b/sphinx/ext/__init__.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.ext
~~~~~~~~~~
diff --git a/sphinx/ext/apidoc.py b/sphinx/ext/apidoc.py
index 0241d6239..6fe4921ab 100644
--- a/sphinx/ext/apidoc.py
+++ b/sphinx/ext/apidoc.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.ext.apidoc
~~~~~~~~~~~~~~~~~
@@ -15,8 +14,6 @@
:license: BSD, see LICENSE for details.
"""
-from __future__ import print_function
-
import argparse
import glob
import locale
@@ -25,14 +22,12 @@ import sys
from fnmatch import fnmatch
from os import path
-from six import binary_type
-
import sphinx.locale
from sphinx import __display_version__, package_dir
from sphinx.cmd.quickstart import EXTENSIONS
from sphinx.locale import __
from sphinx.util import rst
-from sphinx.util.osutil import FileAvoidWrite, ensuredir, walk
+from sphinx.util.osutil import FileAvoidWrite, ensuredir
if False:
# For type annotation
@@ -54,7 +49,7 @@ PY_SUFFIXES = set(['.py', '.pyx'])
def makename(package, module):
- # type: (unicode, unicode) -> unicode
+ # type: (str, str) -> str
"""Join package and module with a dot."""
# Both package and module can be None/empty.
if package:
@@ -67,7 +62,7 @@ def makename(package, module):
def write_file(name, text, opts):
- # type: (unicode, unicode, Any) -> None
+ # type: (str, str, Any) -> None
"""Write the output file for module/package <name>."""
fname = path.join(opts.destdir, '%s.%s' % (name, opts.suffix))
if opts.dryrun:
@@ -82,7 +77,7 @@ def write_file(name, text, opts):
def format_heading(level, text, escape=True):
- # type: (int, unicode, bool) -> unicode
+ # type: (int, str, bool) -> str
"""Create a heading of <level> [1, 2 or 3 supported]."""
if escape:
text = rst.escape(text)
@@ -91,7 +86,7 @@ def format_heading(level, text, escape=True):
def format_directive(module, package=None):
- # type: (unicode, unicode) -> unicode
+ # type: (str, str) -> str
"""Create the automodule directive and add the options."""
directive = '.. automodule:: %s\n' % makename(package, module)
for option in OPTIONS:
@@ -100,7 +95,7 @@ def format_directive(module, package=None):
def create_module_file(package, module, opts):
- # type: (unicode, unicode, Any) -> None
+ # type: (str, str, Any) -> None
"""Build the text of the file and write the file."""
if not opts.noheadings:
text = format_heading(1, '%s module' % module)
@@ -112,7 +107,7 @@ def create_module_file(package, module, opts):
def create_package_file(root, master_package, subroot, py_files, opts, subs, is_namespace, excludes=[]): # NOQA
- # type: (unicode, unicode, unicode, List[unicode], Any, List[unicode], bool, List[unicode]) -> None # NOQA
+ # type: (str, str, str, List[str], Any, List[str], bool, List[str]) -> None
"""Build the text of the file and write the file."""
text = format_heading(1, ('%s package' if not is_namespace else "%s namespace")
% makename(master_package, subroot))
@@ -172,14 +167,14 @@ def create_package_file(root, master_package, subroot, py_files, opts, subs, is_
def create_modules_toc_file(modules, opts, name='modules'):
- # type: (List[unicode], Any, unicode) -> None
+ # type: (List[str], Any, str) -> None
"""Create the module's index."""
text = format_heading(1, '%s' % opts.header, escape=False)
text += '.. toctree::\n'
text += ' :maxdepth: %s\n\n' % opts.maxdepth
modules.sort()
- prev_module = '' # type: unicode
+ prev_module = ''
for module in modules:
# look if the module is a subpackage and, if yes, ignore it
if module.startswith(prev_module + '.'):
@@ -191,7 +186,7 @@ def create_modules_toc_file(modules, opts, name='modules'):
def shall_skip(module, opts, excludes=[]):
- # type: (unicode, Any, List[unicode]) -> bool
+ # type: (str, Any, List[str]) -> bool
"""Check if we want to skip this module."""
# skip if the file doesn't exist and not using implicit namespaces
if not opts.implicit_namespaces and not path.exists(module):
@@ -218,7 +213,7 @@ def shall_skip(module, opts, excludes=[]):
def recurse_tree(rootpath, excludes, opts):
- # type: (unicode, List[unicode], Any) -> List[unicode]
+ # type: (str, List[str], Any) -> List[str]
"""
Look for every file in the directory tree and create the corresponding
ReST files.
@@ -235,7 +230,7 @@ def recurse_tree(rootpath, excludes, opts):
root_package = None
toplevels = []
- for root, subs, files in walk(rootpath, followlinks=followlinks):
+ for root, subs, files in os.walk(rootpath, followlinks=followlinks):
# document only Python module files (that aren't excluded)
py_files = sorted(f for f in files
if path.splitext(f)[1] in PY_SUFFIXES and
@@ -253,7 +248,7 @@ def recurse_tree(rootpath, excludes, opts):
# remove hidden ('.') and private ('_') directories, as well as
# excluded dirs
if includeprivate:
- exclude_prefixes = ('.',) # type: Tuple[unicode, ...]
+ exclude_prefixes = ('.',) # type: Tuple[str, ...]
else:
exclude_prefixes = ('.', '_')
subs[:] = sorted(sub for sub in subs if not sub.startswith(exclude_prefixes) and
@@ -283,7 +278,7 @@ def recurse_tree(rootpath, excludes, opts):
def is_excluded(root, excludes):
- # type: (unicode, List[unicode]) -> bool
+ # type: (str, List[str]) -> bool
"""Check if the directory is in the exclude list.
Note: by having trailing slashes, we avoid common prefix issues, like
@@ -376,6 +371,8 @@ Note: By default this script will not overwrite already created files."""))
'defaults to --doc-version'))
group = parser.add_argument_group(__('extension options'))
+ group.add_argument('--extensions', metavar='EXTENSIONS', dest='extensions',
+ action='append', help=__('enable arbitrary extensions'))
for ext in EXTENSIONS:
group.add_argument('--ext-%s' % ext, action='append_const',
const='sphinx.ext.%s' % ext, dest='extensions',
@@ -412,46 +409,42 @@ def main(argv=sys.argv[1:]):
if args.full:
from sphinx.cmd import quickstart as qs
modules.sort()
- prev_module = '' # type: unicode
+ prev_module = ''
text = ''
for module in modules:
if module.startswith(prev_module + '.'):
continue
prev_module = module
text += ' %s\n' % module
- d = dict(
- path = args.destdir,
- sep = False,
- dot = '_',
- project = args.header,
- author = args.author or 'Author',
- version = args.version or '',
- release = args.release or args.version or '',
- suffix = '.' + args.suffix,
- master = 'index',
- epub = True,
- extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode',
- 'sphinx.ext.todo'],
- makefile = True,
- batchfile = True,
- make_mode = True,
- mastertocmaxdepth = args.maxdepth,
- mastertoctree = text,
- language = 'en',
- module_path = rootpath,
- append_syspath = args.append_syspath,
- )
+ d = {
+ 'path': args.destdir,
+ 'sep': False,
+ 'dot': '_',
+ 'project': args.header,
+ 'author': args.author or 'Author',
+ 'version': args.version or '',
+ 'release': args.release or args.version or '',
+ 'suffix': '.' + args.suffix,
+ 'master': 'index',
+ 'epub': True,
+ 'extensions': ['sphinx.ext.autodoc', 'sphinx.ext.viewcode',
+ 'sphinx.ext.todo'],
+ 'makefile': True,
+ 'batchfile': True,
+ 'make_mode': True,
+ 'mastertocmaxdepth': args.maxdepth,
+ 'mastertoctree': text,
+ 'language': 'en',
+ 'module_path': rootpath,
+ 'append_syspath': args.append_syspath,
+ }
if args.extensions:
d['extensions'].extend(args.extensions)
- if isinstance(args.header, binary_type):
- d['project'] = d['project'].decode('utf-8')
- if isinstance(args.author, binary_type):
- d['author'] = d['author'].decode('utf-8')
- if isinstance(args.version, binary_type):
- d['version'] = d['version'].decode('utf-8')
- if isinstance(args.release, binary_type):
- d['release'] = d['release'].decode('utf-8')
+ for ext in d['extensions'][:]:
+ if ',' in ext:
+ d['extensions'].remove(ext)
+ d['extensions'].extend(ext.split(','))
if not args.dryrun:
qs.generate(d, silent=True, overwrite=args.force)
diff --git a/sphinx/ext/autodoc/__init__.py b/sphinx/ext/autodoc/__init__.py
index 1b1f7a24b..ebd3a37b7 100644
--- a/sphinx/ext/autodoc/__init__.py
+++ b/sphinx/ext/autodoc/__init__.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.ext.autodoc
~~~~~~~~~~~~~~~~~~
@@ -13,23 +12,19 @@
import inspect
import re
-import sys
import warnings
from typing import Any
-from docutils.statemachine import ViewList
-from six import iteritems, itervalues, text_type, class_types, string_types
+from docutils.statemachine import StringList
import sphinx
-from sphinx.deprecation import RemovedInSphinx20Warning, RemovedInSphinx30Warning
-from sphinx.errors import ExtensionError
+from sphinx.deprecation import RemovedInSphinx30Warning, RemovedInSphinx40Warning
from sphinx.ext.autodoc.importer import mock, import_object, get_object_members
from sphinx.ext.autodoc.importer import _MockImporter # to keep compatibility # NOQA
-from sphinx.ext.autodoc.inspector import format_annotation, formatargspec # to keep compatibility # NOQA
from sphinx.locale import _, __
from sphinx.pycode import ModuleAnalyzer, PycodeError
from sphinx.util import logging
-from sphinx.util import rpartition, force_decode
+from sphinx.util import rpartition
from sphinx.util.docstrings import prepare_docstring
from sphinx.util.inspect import Signature, isdescriptor, safe_getmembers, \
safe_getattr, object_description, is_builtin_class_method, \
@@ -75,7 +70,7 @@ INSTANCEATTR = object()
def members_option(arg):
- # type: (Any) -> Union[object, List[unicode]]
+ # type: (Any) -> Union[object, List[str]]
"""Used to convert the :members: option to auto directives."""
if arg is None:
return ALL
@@ -83,7 +78,7 @@ def members_option(arg):
def members_set_option(arg):
- # type: (Any) -> Union[object, Set[unicode]]
+ # type: (Any) -> Union[object, Set[str]]
"""Used to convert the :members: option to auto directives."""
if arg is None:
return ALL
@@ -124,62 +119,10 @@ def merge_special_members_option(options):
options['members'] = options['special-members']
-class AutodocReporter(object):
- """
- A reporter replacement that assigns the correct source name
- and line number to a system message, as recorded in a ViewList.
- """
- def __init__(self, viewlist, reporter):
- # type: (ViewList, Reporter) -> None
- warnings.warn('AutodocReporter is now deprecated. '
- 'Use sphinx.util.docutils.switch_source_input() instead.',
- RemovedInSphinx20Warning, stacklevel=2)
- self.viewlist = viewlist
- self.reporter = reporter
-
- def __getattr__(self, name):
- # type: (unicode) -> Any
- return getattr(self.reporter, name)
-
- def system_message(self, level, message, *children, **kwargs):
- # type: (int, unicode, Any, Any) -> nodes.system_message
- if 'line' in kwargs and 'source' not in kwargs:
- try:
- source, line = self.viewlist.items[kwargs['line']]
- except IndexError:
- pass
- else:
- kwargs['source'] = source
- kwargs['line'] = line
- return self.reporter.system_message(level, message,
- *children, **kwargs)
-
- def debug(self, *args, **kwargs):
- # type: (Any, Any) -> nodes.system_message
- if self.reporter.debug_flag:
- return self.system_message(0, *args, **kwargs)
-
- def info(self, *args, **kwargs):
- # type: (Any, Any) -> nodes.system_message
- return self.system_message(1, *args, **kwargs)
-
- def warning(self, *args, **kwargs):
- # type: (Any, Any) -> nodes.system_message
- return self.system_message(2, *args, **kwargs)
-
- def error(self, *args, **kwargs):
- # type: (Any, Any) -> nodes.system_message
- return self.system_message(3, *args, **kwargs)
-
- def severe(self, *args, **kwargs):
- # type: (Any, Any) -> nodes.system_message
- return self.system_message(4, *args, **kwargs)
-
-
# Some useful event listener factories for autodoc-process-docstring.
def cut_lines(pre, post=0, what=None):
- # type: (int, int, unicode) -> Callable
+ # type: (int, int, str) -> Callable
"""Return a listener that removes the first *pre* and last *post*
lines of every docstring. If *what* is a sequence of strings,
only docstrings of a type in *what* will be processed.
@@ -192,7 +135,7 @@ def cut_lines(pre, post=0, what=None):
This can (and should) be used in place of :confval:`automodule_skip_lines`.
"""
def process(app, what_, name, obj, options, lines):
- # type: (Sphinx, unicode, unicode, Any, Any, List[unicode]) -> None
+ # type: (Sphinx, str, str, Any, Any, List[str]) -> None
if what and what_ not in what:
return
del lines[:pre]
@@ -208,7 +151,7 @@ def cut_lines(pre, post=0, what=None):
def between(marker, what=None, keepempty=False, exclude=False):
- # type: (unicode, Sequence[unicode], bool, bool) -> Callable
+ # type: (str, Sequence[str], bool, bool) -> Callable
"""Return a listener that either keeps, or if *exclude* is True excludes,
lines between lines that match the *marker* regular expression. If no line
matches, the resulting docstring would be empty, so no change will be made
@@ -220,7 +163,7 @@ def between(marker, what=None, keepempty=False, exclude=False):
marker_re = re.compile(marker)
def process(app, what_, name, obj, options, lines):
- # type: (Sphinx, unicode, unicode, Any, Any, List[unicode]) -> None
+ # type: (Sphinx, str, str, Any, Any, List[str]) -> None
if what and what_ not in what:
return
deleted = 0
@@ -248,14 +191,14 @@ def between(marker, what=None, keepempty=False, exclude=False):
class Options(dict):
"""A dict/attribute hybrid that returns None on nonexisting keys."""
def __getattr__(self, name):
- # type: (unicode) -> Any
+ # type: (str) -> Any
try:
return self[name.replace('_', '-')]
except KeyError:
return None
-class Documenter(object):
+class Documenter:
"""
A Documenter knows how to autodocument a single object type. When
registered with the AutoDirective, it will be used to document objects
@@ -274,7 +217,7 @@ class Documenter(object):
#: generated directive name
objtype = 'object'
#: indentation by which to indent the directive content
- content_indent = u' '
+ content_indent = ' '
#: priority if multiple documenters return True from can_document_member
priority = 0
#: order if autodoc_member_order is set to 'groupwise'
@@ -282,21 +225,21 @@ class Documenter(object):
#: true if the generated content may contain titles
titles_allowed = False
- option_spec = {'noindex': bool_option} # type: Dict[unicode, Callable]
+ option_spec = {'noindex': bool_option} # type: Dict[str, Callable]
def get_attr(self, obj, name, *defargs):
- # type: (Any, unicode, Any) -> Any
+ # type: (Any, str, Any) -> Any
"""getattr() override for types such as Zope interfaces."""
return autodoc_attrgetter(self.env.app, obj, name, *defargs)
@classmethod
def can_document_member(cls, member, membername, isattr, parent):
- # type: (Any, unicode, bool, Any) -> bool
+ # type: (Any, str, bool, Any) -> bool
"""Called to see if a member can be documented by this documenter."""
raise NotImplementedError('must be implemented in subclasses')
- def __init__(self, directive, name, indent=u''):
- # type: (DocumenterBridge, unicode, unicode) -> None
+ def __init__(self, directive, name, indent=''):
+ # type: (DocumenterBridge, str, str) -> None
self.directive = directive
self.env = directive.env # type: BuildEnvironment
self.options = directive.genopt
@@ -306,33 +249,33 @@ class Documenter(object):
# qualified name (all set after resolve_name succeeds)
self.modname = None # type: str
self.module = None # type: ModuleType
- self.objpath = None # type: List[unicode]
- self.fullname = None # type: unicode
+ self.objpath = None # type: List[str]
+ self.fullname = None # type: str
# extra signature items (arguments and return annotation,
# also set after resolve_name succeeds)
- self.args = None # type: unicode
- self.retann = None # type: unicode
+ self.args = None # type: str
+ self.retann = None # type: str
# the object to document (set after import_object succeeds)
self.object = None # type: Any
- self.object_name = None # type: unicode
+ self.object_name = None # type: str
# the parent/owner of the object to document
self.parent = None # type: Any
# the module analyzer to get at attribute docs, or None
- self.analyzer = None # type: Any
+ self.analyzer = None # type: ModuleAnalyzer
@property
def documenters(self):
- # type: () -> Dict[unicode, Type[Documenter]]
+ # type: () -> Dict[str, Type[Documenter]]
"""Returns registered Documenter classes"""
return get_documenters(self.env.app)
def add_line(self, line, source, *lineno):
- # type: (unicode, unicode, int) -> None
+ # type: (str, str, int) -> None
"""Append one line of generated reST to the output."""
self.directive.result.append(self.indent + line, source, *lineno)
def resolve_name(self, modname, parents, path, base):
- # type: (str, Any, str, Any) -> Tuple[str, List[unicode]]
+ # type: (str, Any, str, Any) -> Tuple[str, List[str]]
"""Resolve the module and name of the object to document given by the
arguments and the current module/class.
@@ -356,7 +299,8 @@ class Documenter(object):
explicit_modname, path, base, args, retann = \
py_ext_sig_re.match(self.name).groups()
except AttributeError:
- logger.warning(__('invalid signature for auto%s (%r)') % (self.objtype, self.name))
+ logger.warning(__('invalid signature for auto%s (%r)') % (self.objtype, self.name),
+ type='autodoc')
return False
# support explicit module and class name separation via ::
@@ -367,7 +311,7 @@ class Documenter(object):
modname = None
parents = []
- self.modname, self.objpath = self.resolve_name(modname, parents, path, base) # type: ignore # NOQA
+ self.modname, self.objpath = self.resolve_name(modname, parents, path, base)
if not self.modname:
return False
@@ -393,7 +337,7 @@ class Documenter(object):
self.module, self.parent, self.object_name, self.object = ret
return True
except ImportError as exc:
- logger.warning(exc.args[0])
+ logger.warning(exc.args[0], type='autodoc', subtype='import_object')
self.env.note_reread()
return False
@@ -422,7 +366,7 @@ class Documenter(object):
return True
def format_args(self):
- # type: () -> unicode
+ # type: () -> str
"""Format the argument signature of *self.object*.
Should return None if the object does not have a signature.
@@ -430,7 +374,7 @@ class Documenter(object):
return None
def format_name(self):
- # type: () -> unicode
+ # type: () -> str
"""Format the name of *self.object*.
This normally should be something that can be parsed by the generated
@@ -442,21 +386,21 @@ class Documenter(object):
return '.'.join(self.objpath) or self.modname
def format_signature(self):
- # type: () -> unicode
+ # type: () -> str
"""Format the signature (arguments and return annotation) of the object.
Let the user process it via the ``autodoc-process-signature`` event.
"""
if self.args is not None:
# signature given explicitly
- args = "(%s)" % self.args # type: unicode
+ args = "(%s)" % self.args
else:
# try to introspect the signature
try:
args = self.format_args()
except Exception as err:
logger.warning(__('error while formatting arguments for %s: %s') %
- (self.fullname, err))
+ (self.fullname, err), type='autodoc')
args = None
retann = self.retann
@@ -473,38 +417,36 @@ class Documenter(object):
return ''
def add_directive_header(self, sig):
- # type: (unicode) -> None
+ # type: (str) -> None
"""Add the directive header and options to the generated content."""
domain = getattr(self, 'domain', 'py')
directive = getattr(self, 'directivetype', self.objtype)
name = self.format_name()
sourcename = self.get_sourcename()
- self.add_line(u'.. %s:%s:: %s%s' % (domain, directive, name, sig),
+ self.add_line('.. %s:%s:: %s%s' % (domain, directive, name, sig),
sourcename)
if self.options.noindex:
- self.add_line(u' :noindex:', sourcename)
+ self.add_line(' :noindex:', sourcename)
if self.objpath:
# Be explicit about the module, this is necessary since .. class::
# etc. don't support a prepended module name
- self.add_line(u' :module: %s' % self.modname, sourcename)
+ self.add_line(' :module: %s' % self.modname, sourcename)
def get_doc(self, encoding=None, ignore=1):
- # type: (unicode, int) -> List[List[unicode]]
+ # type: (str, int) -> List[List[str]]
"""Decode and return lines of the docstring(s) for the object."""
+ if encoding is not None:
+ warnings.warn("The 'encoding' argument to autodoc.%s.get_doc() is deprecated."
+ % self.__class__.__name__,
+ RemovedInSphinx40Warning)
docstring = getdoc(self.object, self.get_attr,
self.env.config.autodoc_inherit_docstrings)
- # make sure we have Unicode docstrings, then sanitize and split
- # into lines
- if isinstance(docstring, text_type):
+ if docstring:
return [prepare_docstring(docstring, ignore)]
- elif isinstance(docstring, str): # this will not trigger on Py3
- return [prepare_docstring(force_decode(docstring, encoding),
- ignore)]
- # ... else it is something strange, let's ignore it
return []
def process_doc(self, docstrings):
- # type: (List[List[unicode]]) -> Iterator[unicode]
+ # type: (List[List[str]]) -> Iterator[str]
"""Let the user process the docstrings before adding them."""
for docstringlines in docstrings:
if self.env.app:
@@ -512,20 +454,13 @@ class Documenter(object):
self.env.app.emit('autodoc-process-docstring',
self.objtype, self.fullname, self.object,
self.options, docstringlines)
- for line in docstringlines:
- yield line
+ yield from docstringlines
def get_sourcename(self):
- # type: () -> unicode
+ # type: () -> str
if self.analyzer:
- # prevent encoding errors when the file name is non-ASCII
- if not isinstance(self.analyzer.srcname, text_type):
- filename = text_type(self.analyzer.srcname,
- sys.getfilesystemencoding(), 'replace')
- else:
- filename = self.analyzer.srcname
- return u'%s:docstring of %s' % (filename, self.fullname)
- return u'docstring of %s' % self.fullname
+ return '%s:docstring of %s' % (self.analyzer.srcname, self.fullname)
+ return 'docstring of %s' % self.fullname
def add_content(self, more_content, no_docstring=False):
# type: (Any, bool) -> None
@@ -544,8 +479,7 @@ class Documenter(object):
# add content from docstrings
if not no_docstring:
- encoding = self.analyzer and self.analyzer.encoding
- docstrings = self.get_doc(encoding)
+ docstrings = self.get_doc()
if not docstrings:
# append at least a dummy docstring, so that the event
# autodoc-process-docstring is fired and can add some
@@ -560,7 +494,7 @@ class Documenter(object):
self.add_line(line, src[0], src[1])
def get_object_members(self, want_all):
- # type: (bool) -> Tuple[bool, List[Tuple[unicode, Any]]]
+ # type: (bool) -> Tuple[bool, List[Tuple[str, Any]]]
"""Return `(members_check_module, members)` where `members` is a
list of `(membername, member)` pairs of the members of *self.object*.
@@ -578,16 +512,16 @@ class Documenter(object):
selected.append((name, members[name].value))
else:
logger.warning(__('missing attribute %s in object %s') %
- (name, self.fullname))
+ (name, self.fullname), type='autodoc')
return False, sorted(selected)
elif self.options.inherited_members:
- return False, sorted((m.name, m.value) for m in itervalues(members))
+ return False, sorted((m.name, m.value) for m in members.values())
else:
- return False, sorted((m.name, m.value) for m in itervalues(members)
+ return False, sorted((m.name, m.value) for m in members.values()
if m.directly_defined)
def filter_members(self, members, want_all):
- # type: (List[Tuple[unicode, Any]], bool) -> List[Tuple[unicode, Any, bool]]
+ # type: (List[Tuple[str, Any]], bool) -> List[Tuple[str, Any, bool]]
"""Filter the given member list.
Members are skipped if
@@ -667,7 +601,7 @@ class Documenter(object):
except Exception as exc:
logger.warning(__('autodoc: failed to determine %r to be documented.'
'the following exception was raised:\n%s'),
- member, exc)
+ member, exc, type='autodoc')
keep = False
if keep:
@@ -705,7 +639,7 @@ class Documenter(object):
# document non-skipped members
memberdocumenters = [] # type: List[Tuple[Documenter, bool]]
for (mname, member, isattr) in self.filter_members(members, want_all):
- classes = [cls for cls in itervalues(self.documenters)
+ classes = [cls for cls in self.documenters.values()
if cls.can_document_member(member, mname, isattr, self)]
if not classes:
# don't know how to document this member
@@ -760,7 +694,7 @@ class Documenter(object):
__('don\'t know which module to import for autodocumenting '
'%r (try placing a "module" or "currentmodule" directive '
'in the document, or giving an explicit module name)') %
- self.name)
+ self.name, type='autodoc')
return
# now, import the module and get object to document
@@ -800,14 +734,14 @@ class Documenter(object):
# make sure that the result starts with an empty line. This is
# necessary for some situations where another directive preprocesses
# reST and no starting newline is present
- self.add_line(u'', sourcename)
+ self.add_line('', sourcename)
# format the object's signature, if any
sig = self.format_signature()
# generate the directive header and options, if applicable
self.add_directive_header(sig)
- self.add_line(u'', sourcename)
+ self.add_line('', sourcename)
# e.g. the module directive doesn't have content
self.indent += self.content_indent
@@ -824,7 +758,7 @@ class ModuleDocumenter(Documenter):
Specialized Documenter subclass for modules.
"""
objtype = 'module'
- content_indent = u''
+ content_indent = ''
titles_allowed = True
option_spec = {
@@ -835,51 +769,51 @@ class ModuleDocumenter(Documenter):
'member-order': identity, 'exclude-members': members_set_option,
'private-members': bool_option, 'special-members': members_option,
'imported-members': bool_option, 'ignore-module-all': bool_option
- } # type: Dict[unicode, Callable]
+ } # type: Dict[str, Callable]
def __init__(self, *args):
# type: (Any) -> None
- super(ModuleDocumenter, self).__init__(*args)
+ super().__init__(*args)
merge_special_members_option(self.options)
@classmethod
def can_document_member(cls, member, membername, isattr, parent):
- # type: (Any, unicode, bool, Any) -> bool
+ # type: (Any, str, bool, Any) -> bool
# don't document submodules automatically
return False
def resolve_name(self, modname, parents, path, base):
- # type: (str, Any, str, Any) -> Tuple[str, List[unicode]]
+ # type: (str, Any, str, Any) -> Tuple[str, List[str]]
if modname is not None:
- logger.warning(__('"::" in automodule name doesn\'t make sense'))
+ logger.warning(__('"::" in automodule name doesn\'t make sense'),
+ type='autodoc')
return (path or '') + base, []
def parse_name(self):
# type: () -> bool
- ret = Documenter.parse_name(self)
+ ret = super().parse_name()
if self.args or self.retann:
logger.warning(__('signature arguments or return annotation '
- 'given for automodule %s') % self.fullname)
+ 'given for automodule %s') % self.fullname,
+ type='autodoc')
return ret
def add_directive_header(self, sig):
- # type: (unicode) -> None
+ # type: (str) -> None
Documenter.add_directive_header(self, sig)
sourcename = self.get_sourcename()
# add some module-specific options
if self.options.synopsis:
- self.add_line(
- u' :synopsis: ' + self.options.synopsis, sourcename)
+ self.add_line(' :synopsis: ' + self.options.synopsis, sourcename)
if self.options.platform:
- self.add_line(
- u' :platform: ' + self.options.platform, sourcename)
+ self.add_line(' :platform: ' + self.options.platform, sourcename)
if self.options.deprecated:
- self.add_line(u' :deprecated:', sourcename)
+ self.add_line(' :deprecated:', sourcename)
def get_object_members(self, want_all):
- # type: (bool) -> Tuple[bool, List[Tuple[unicode, object]]]
+ # type: (bool) -> Tuple[bool, List[Tuple[str, object]]]
if want_all:
if (self.options.ignore_module_all or not
hasattr(self.object, '__all__')):
@@ -890,11 +824,13 @@ class ModuleDocumenter(Documenter):
memberlist = self.object.__all__
# Sometimes __all__ is broken...
if not isinstance(memberlist, (list, tuple)) or not \
- all(isinstance(entry, string_types) for entry in memberlist):
+ all(isinstance(entry, str) for entry in memberlist):
logger.warning(
__('__all__ should be a list of strings, not %r '
'(in module %s) -- ignoring __all__') %
- (memberlist, self.fullname))
+ (memberlist, self.fullname),
+ type='autodoc'
+ )
# fall back to all members
return True, safe_getmembers(self.object)
else:
@@ -907,7 +843,9 @@ class ModuleDocumenter(Documenter):
logger.warning(
__('missing attribute mentioned in :members: or __all__: '
'module %s, attribute %s') %
- (safe_getattr(self.object, '__name__', '???'), mname))
+ (safe_getattr(self.object, '__name__', '???'), mname),
+ type='autodoc'
+ )
return False, ret
@@ -917,7 +855,7 @@ class ModuleLevelDocumenter(Documenter):
classes, data/constants).
"""
def resolve_name(self, modname, parents, path, base):
- # type: (str, Any, str, Any) -> Tuple[str, List[unicode]]
+ # type: (str, Any, str, Any) -> Tuple[str, List[str]]
if modname is None:
if path:
modname = path.rstrip('.')
@@ -938,7 +876,7 @@ class ClassLevelDocumenter(Documenter):
attributes).
"""
def resolve_name(self, modname, parents, path, base):
- # type: (str, Any, str, Any) -> Tuple[str, List[unicode]]
+ # type: (str, Any, str, Any) -> Tuple[str, List[str]]
if modname is None:
if path:
mod_cls = path.rstrip('.')
@@ -954,7 +892,7 @@ class ClassLevelDocumenter(Documenter):
# ... if still None, there's no way to know
if mod_cls is None:
return None, []
- modname, cls = rpartition(mod_cls, '.') # type: ignore
+ modname, cls = rpartition(mod_cls, '.')
parents = [cls]
# if the module name is still missing, get it like above
if not modname:
@@ -965,15 +903,19 @@ class ClassLevelDocumenter(Documenter):
return modname, parents + [base]
-class DocstringSignatureMixin(object):
+class DocstringSignatureMixin:
"""
Mixin for FunctionDocumenter and MethodDocumenter to provide the
feature of reading the signature from the docstring.
"""
def _find_signature(self, encoding=None):
- # type: (unicode) -> Tuple[str, str]
- docstrings = self.get_doc(encoding)
+ # type: (str) -> Tuple[str, str]
+ if encoding is not None:
+ warnings.warn("The 'encoding' argument to autodoc.%s._find_signature() is "
+ "deprecated." % self.__class__.__name__,
+ RemovedInSphinx40Warning)
+ docstrings = self.get_doc()
self._new_docstrings = docstrings[:]
result = None
for i, doclines in enumerate(docstrings):
@@ -998,24 +940,28 @@ class DocstringSignatureMixin(object):
result = args, retann
# don't look any further
break
- return result # type: ignore
+ return result
def get_doc(self, encoding=None, ignore=1):
- # type: (unicode, int) -> List[List[unicode]]
+ # type: (str, int) -> List[List[str]]
+ if encoding is not None:
+ warnings.warn("The 'encoding' argument to autodoc.%s.get_doc() is deprecated."
+ % self.__class__.__name__,
+ RemovedInSphinx40Warning)
lines = getattr(self, '_new_docstrings', None)
if lines is not None:
return lines
- return Documenter.get_doc(self, encoding, ignore) # type: ignore
+ return super().get_doc(None, ignore) # type: ignore
def format_signature(self):
- # type: () -> unicode
+ # type: () -> str
if self.args is None and self.env.config.autodoc_docstring_signature: # type: ignore
# only act if a signature is not explicitly given already, and if
# the feature is enabled
result = self._find_signature()
if result is not None:
self.args, self.retann = result
- return Documenter.format_signature(self) # type: ignore
+ return super().format_signature() # type: ignore
class DocstringStripSignatureMixin(DocstringSignatureMixin):
@@ -1024,7 +970,7 @@ class DocstringStripSignatureMixin(DocstringSignatureMixin):
feature of stripping any function signature from the docstring.
"""
def format_signature(self):
- # type: () -> unicode
+ # type: () -> str
if self.args is None and self.env.config.autodoc_docstring_signature: # type: ignore
# only act if a signature is not explicitly given already, and if
# the feature is enabled
@@ -1034,7 +980,7 @@ class DocstringStripSignatureMixin(DocstringSignatureMixin):
# DocstringSignatureMixin.format_signature.
# Documenter.format_signature use self.args value to format.
_args, self.retann = result
- return Documenter.format_signature(self) # type: ignore
+ return super().format_signature()
class FunctionDocumenter(DocstringSignatureMixin, ModuleLevelDocumenter): # type: ignore
@@ -1046,11 +992,11 @@ class FunctionDocumenter(DocstringSignatureMixin, ModuleLevelDocumenter): # typ
@classmethod
def can_document_member(cls, member, membername, isattr, parent):
- # type: (Any, unicode, bool, Any) -> bool
+ # type: (Any, str, bool, Any) -> bool
return isfunction(member) or isbuiltin(member)
def format_args(self):
- # type: () -> unicode
+ # type: () -> str
if isbuiltin(self.object) or inspect.ismethoddescriptor(self.object):
# cannot introspect arguments of a C function or method
return None
@@ -1098,21 +1044,21 @@ class ClassDocumenter(DocstringSignatureMixin, ModuleLevelDocumenter): # type:
'show-inheritance': bool_option, 'member-order': identity,
'exclude-members': members_set_option,
'private-members': bool_option, 'special-members': members_option,
- } # type: Dict[unicode, Callable]
+ } # type: Dict[str, Callable]
def __init__(self, *args):
# type: (Any) -> None
- super(ClassDocumenter, self).__init__(*args)
+ super().__init__(*args)
merge_special_members_option(self.options)
@classmethod
def can_document_member(cls, member, membername, isattr, parent):
- # type: (Any, unicode, bool, Any) -> bool
- return isinstance(member, class_types)
+ # type: (Any, str, bool, Any) -> bool
+ return isinstance(member, type)
def import_object(self):
# type: () -> Any
- ret = ModuleLevelDocumenter.import_object(self)
+ ret = super().import_object()
# if the class is documented under another name, document it
# as data/attribute
if ret:
@@ -1123,7 +1069,7 @@ class ClassDocumenter(DocstringSignatureMixin, ModuleLevelDocumenter): # type:
return ret
def format_args(self):
- # type: () -> unicode
+ # type: () -> str
# for classes, the relevant signature is the __init__ method's
initmeth = self.get_attr(self.object, '__init__', None)
# classes without __init__ method, default __init__ or
@@ -1140,32 +1086,36 @@ class ClassDocumenter(DocstringSignatureMixin, ModuleLevelDocumenter): # type:
return None
def format_signature(self):
- # type: () -> unicode
+ # type: () -> str
if self.doc_as_attr:
return ''
- return DocstringSignatureMixin.format_signature(self)
+ return super().format_signature()
def add_directive_header(self, sig):
- # type: (unicode) -> None
+ # type: (str) -> None
if self.doc_as_attr:
self.directivetype = 'attribute'
- Documenter.add_directive_header(self, sig)
+ super().add_directive_header(sig)
# add inheritance info, if wanted
if not self.doc_as_attr and self.options.show_inheritance:
sourcename = self.get_sourcename()
- self.add_line(u'', sourcename)
+ self.add_line('', sourcename)
if hasattr(self.object, '__bases__') and len(self.object.__bases__):
bases = [b.__module__ in ('__builtin__', 'builtins') and
- u':class:`%s`' % b.__name__ or
- u':class:`%s.%s`' % (b.__module__, b.__name__)
+ ':class:`%s`' % b.__name__ or
+ ':class:`%s.%s`' % (b.__module__, b.__name__)
for b in self.object.__bases__]
- self.add_line(u' ' + _(u'Bases: %s') % ', '.join(bases),
+ self.add_line(' ' + _('Bases: %s') % ', '.join(bases),
sourcename)
def get_doc(self, encoding=None, ignore=1):
- # type: (unicode, int) -> List[List[unicode]]
+ # type: (str, int) -> List[List[str]]
+ if encoding is not None:
+ warnings.warn("The 'encoding' argument to autodoc.%s.get_doc() is deprecated."
+ % self.__class__.__name__,
+ RemovedInSphinx40Warning)
lines = getattr(self, '_new_docstrings', None)
if lines is not None:
return lines
@@ -1201,14 +1151,7 @@ class ClassDocumenter(DocstringSignatureMixin, ModuleLevelDocumenter): # type:
docstrings = [initdocstring]
else:
docstrings.append(initdocstring)
- doc = []
- for docstring in docstrings:
- if isinstance(docstring, text_type):
- doc.append(prepare_docstring(docstring, ignore))
- elif isinstance(docstring, str): # this will not trigger on Py3
- doc.append(prepare_docstring(force_decode(docstring, encoding),
- ignore))
- return doc
+ return [prepare_docstring(docstring, ignore) for docstring in docstrings]
def add_content(self, more_content, no_docstring=False):
# type: (Any, bool) -> None
@@ -1220,19 +1163,17 @@ class ClassDocumenter(DocstringSignatureMixin, ModuleLevelDocumenter): # type:
module = safe_getattr(self.object, '__module__', None)
parentmodule = safe_getattr(self.parent, '__module__', None)
if module and module != parentmodule:
- classname = str(module) + u'.' + str(classname)
- content = ViewList(
- [_('alias of :class:`%s`') % classname], source='')
- ModuleLevelDocumenter.add_content(self, content,
- no_docstring=True)
+ classname = str(module) + '.' + str(classname)
+ content = StringList([_('alias of :class:`%s`') % classname], source='')
+ super().add_content(content, no_docstring=True)
else:
- ModuleLevelDocumenter.add_content(self, more_content)
+ super().add_content(more_content)
def document_members(self, all_members=False):
# type: (bool) -> None
if self.doc_as_attr:
return
- ModuleLevelDocumenter.document_members(self, all_members)
+ super().document_members(all_members)
def generate(self, more_content=None, real_modname=None,
check_module=False, all_members=False):
@@ -1242,9 +1183,9 @@ class ClassDocumenter(DocstringSignatureMixin, ModuleLevelDocumenter): # type:
# If a class gets imported into the module real_modname
# the analyzer won't find the source of the class, if
# it looks in real_modname.
- return super(ClassDocumenter, self).generate(more_content=more_content,
- check_module=check_module,
- all_members=all_members)
+ return super().generate(more_content=more_content,
+ check_module=check_module,
+ all_members=all_members)
class ExceptionDocumenter(ClassDocumenter):
@@ -1259,9 +1200,8 @@ class ExceptionDocumenter(ClassDocumenter):
@classmethod
def can_document_member(cls, member, membername, isattr, parent):
- # type: (Any, unicode, bool, Any) -> bool
- return isinstance(member, class_types) and \
- issubclass(member, BaseException) # type: ignore
+ # type: (Any, str, bool, Any) -> bool
+ return isinstance(member, type) and issubclass(member, BaseException)
class DataDocumenter(ModuleLevelDocumenter):
@@ -1276,12 +1216,12 @@ class DataDocumenter(ModuleLevelDocumenter):
@classmethod
def can_document_member(cls, member, membername, isattr, parent):
- # type: (Any, unicode, bool, Any) -> bool
+ # type: (Any, str, bool, Any) -> bool
return isinstance(parent, ModuleDocumenter) and isattr
def add_directive_header(self, sig):
- # type: (unicode) -> None
- ModuleLevelDocumenter.add_directive_header(self, sig)
+ # type: (str) -> None
+ super().add_directive_header(sig)
sourcename = self.get_sourcename()
if not self.options.annotation:
try:
@@ -1289,11 +1229,11 @@ class DataDocumenter(ModuleLevelDocumenter):
except ValueError:
pass
else:
- self.add_line(u' :annotation: = ' + objrepr, sourcename)
+ self.add_line(' :annotation: = ' + objrepr, sourcename)
elif self.options.annotation is SUPPRESS:
pass
else:
- self.add_line(u' :annotation: %s' % self.options.annotation,
+ self.add_line(' :annotation: %s' % self.options.annotation,
sourcename)
def document_members(self, all_members=False):
@@ -1316,13 +1256,13 @@ class MethodDocumenter(DocstringSignatureMixin, ClassLevelDocumenter): # type:
@classmethod
def can_document_member(cls, member, membername, isattr, parent):
- # type: (Any, unicode, bool, Any) -> bool
+ # type: (Any, str, bool, Any) -> bool
return inspect.isroutine(member) and \
not isinstance(parent, ModuleDocumenter)
def import_object(self):
# type: () -> Any
- ret = ClassLevelDocumenter.import_object(self)
+ ret = super().import_object()
if not ret:
return ret
@@ -1344,7 +1284,7 @@ class MethodDocumenter(DocstringSignatureMixin, ClassLevelDocumenter): # type:
return ret
def format_args(self):
- # type: () -> unicode
+ # type: () -> str
if isbuiltin(self.object) or inspect.ismethoddescriptor(self.object):
# can never get arguments of a C function or method
return None
@@ -1381,7 +1321,7 @@ class AttributeDocumenter(DocstringStripSignatureMixin, ClassLevelDocumenter):
@classmethod
def can_document_member(cls, member, membername, isattr, parent):
- # type: (Any, unicode, bool, Any) -> bool
+ # type: (Any, str, bool, Any) -> bool
non_attr_types = (type, MethodDescriptorType)
isdatadesc = isdescriptor(member) and not \
cls.is_function_or_method(member) and not \
@@ -1392,7 +1332,7 @@ class AttributeDocumenter(DocstringStripSignatureMixin, ClassLevelDocumenter):
# exported anywhere by Python
return isdatadesc or (not isinstance(parent, ModuleDocumenter) and
not inspect.isroutine(member) and
- not isinstance(member, class_types))
+ not isinstance(member, type))
def document_members(self, all_members=False):
# type: (bool) -> None
@@ -1400,7 +1340,7 @@ class AttributeDocumenter(DocstringStripSignatureMixin, ClassLevelDocumenter):
def import_object(self):
# type: () -> Any
- ret = ClassLevelDocumenter.import_object(self)
+ ret = super().import_object()
if isenumattribute(self.object):
self.object = self.object.value
if isdescriptor(self.object) and \
@@ -1417,8 +1357,8 @@ class AttributeDocumenter(DocstringStripSignatureMixin, ClassLevelDocumenter):
or self.modname
def add_directive_header(self, sig):
- # type: (unicode) -> None
- ClassLevelDocumenter.add_directive_header(self, sig)
+ # type: (str) -> None
+ super().add_directive_header(sig)
sourcename = self.get_sourcename()
if not self.options.annotation:
if not self._datadescriptor:
@@ -1427,12 +1367,11 @@ class AttributeDocumenter(DocstringStripSignatureMixin, ClassLevelDocumenter):
except ValueError:
pass
else:
- self.add_line(u' :annotation: = ' + objrepr, sourcename)
+ self.add_line(' :annotation: = ' + objrepr, sourcename)
elif self.options.annotation is SUPPRESS:
pass
else:
- self.add_line(u' :annotation: %s' % self.options.annotation,
- sourcename)
+ self.add_line(' :annotation: %s' % self.options.annotation, sourcename)
def add_content(self, more_content, no_docstring=False):
# type: (Any, bool) -> None
@@ -1440,7 +1379,7 @@ class AttributeDocumenter(DocstringStripSignatureMixin, ClassLevelDocumenter):
# if it's not a data descriptor, its docstring is very probably the
# wrong thing to display
no_docstring = True
- ClassLevelDocumenter.add_content(self, more_content, no_docstring)
+ super().add_content(more_content, no_docstring)
class InstanceAttributeDocumenter(AttributeDocumenter):
@@ -1457,7 +1396,7 @@ class InstanceAttributeDocumenter(AttributeDocumenter):
@classmethod
def can_document_member(cls, member, membername, isattr, parent):
- # type: (Any, unicode, bool, Any) -> bool
+ # type: (Any, str, bool, Any) -> bool
"""This documents only INSTANCEATTR members."""
return isattr and (member is INSTANCEATTR)
@@ -1472,90 +1411,19 @@ class InstanceAttributeDocumenter(AttributeDocumenter):
def add_content(self, more_content, no_docstring=False):
# type: (Any, bool) -> None
"""Never try to get a docstring from the object."""
- AttributeDocumenter.add_content(self, more_content, no_docstring=True)
-
-
-class DeprecatedDict(dict):
- def __init__(self, message):
- # type: (str) -> None
- self.message = message
- super(DeprecatedDict, self).__init__()
-
- def __setitem__(self, key, value):
- # type: (unicode, Any) -> None
- warnings.warn(self.message, RemovedInSphinx20Warning, stacklevel=2)
- super(DeprecatedDict, self).__setitem__(key, value)
-
- def setdefault(self, key, default=None):
- # type: (unicode, Any) -> None
- warnings.warn(self.message, RemovedInSphinx20Warning, stacklevel=2)
- super(DeprecatedDict, self).setdefault(key, default)
-
- def update(self, other=None): # type: ignore
- # type: (Dict) -> None
- warnings.warn(self.message, RemovedInSphinx20Warning, stacklevel=2)
- super(DeprecatedDict, self).update(other)
-
-
-class AutodocRegistry(object):
- """
- A registry of Documenters and attrgetters.
-
- Note: When importing an object, all items along the import chain are
- accessed using the descendant's *_special_attrgetters*, thus this
- dictionary should include all necessary functions for accessing
- attributes of the parents.
- """
- # a registry of objtype -> documenter class (Deprecated)
- _registry = DeprecatedDict(
- 'AutoDirective._registry has been deprecated. '
- 'Please use app.add_autodocumenter() instead.'
- ) # type: Dict[unicode, Type[Documenter]]
-
- # a registry of type -> getattr function
- _special_attrgetters = DeprecatedDict(
- 'AutoDirective._special_attrgetters has been deprecated. '
- 'Please use app.add_autodoc_attrgetter() instead.'
- ) # type: Dict[Type, Callable]
-
-
-AutoDirective = AutodocRegistry # for backward compatibility
-
-
-def add_documenter(cls):
- # type: (Type[Documenter]) -> None
- """Register a new Documenter."""
- warnings.warn('sphinx.ext.autodoc.add_documenter() has been deprecated. '
- 'Please use app.add_autodocumenter() instead.',
- RemovedInSphinx20Warning, stacklevel=2)
-
- if not issubclass(cls, Documenter):
- raise ExtensionError('autodoc documenter %r must be a subclass '
- 'of Documenter' % cls)
- # actually, it should be possible to override Documenters
- # if cls.objtype in AutoDirective._registry:
- # raise ExtensionError('autodoc documenter for %r is already '
- # 'registered' % cls.objtype)
- AutoDirective._registry[cls.objtype] = cls
+ super().add_content(more_content, no_docstring=True)
def get_documenters(app):
- # type: (Sphinx) -> Dict[unicode, Type[Documenter]]
+ # type: (Sphinx) -> Dict[str, Type[Documenter]]
"""Returns registered Documenter classes"""
- classes = dict(AutoDirective._registry) # registered directly
- if app:
- classes.update(app.registry.documenters) # registered by API
- return classes
+ return app.registry.documenters
def autodoc_attrgetter(app, obj, name, *defargs):
- # type: (Sphinx, Any, unicode, Any) -> Any
+ # type: (Sphinx, Any, str, Any) -> Any
"""Alternative getattr() for types"""
- candidates = dict(AutoDirective._special_attrgetters)
- if app:
- candidates.update(app.registry.autodoc_attrgettrs)
-
- for typ, func in iteritems(candidates):
+ for typ, func in app.registry.autodoc_attrgettrs.items():
if isinstance(obj, typ):
return func(obj, name, *defargs)
@@ -1577,17 +1445,17 @@ def merge_autodoc_default_flags(app, config):
RemovedInSphinx30Warning, stacklevel=2)
for option in config.autodoc_default_flags:
- if isinstance(option, string_types):
+ if isinstance(option, str):
config.autodoc_default_options[option] = None
else:
logger.warning(
__("Ignoring invalid option in autodoc_default_flags: %r"),
- option
+ option, type='autodoc'
)
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
app.add_autodocumenter(ModuleDocumenter)
app.add_autodocumenter(ClassDocumenter)
app.add_autodocumenter(ExceptionDocumenter)
diff --git a/sphinx/ext/autodoc/directive.py b/sphinx/ext/autodoc/directive.py
index 3a3434fc8..57ba12017 100644
--- a/sphinx/ext/autodoc/directive.py
+++ b/sphinx/ext/autodoc/directive.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.ext.autodoc.directive
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -8,7 +7,7 @@
"""
from docutils import nodes
-from docutils.statemachine import ViewList
+from docutils.statemachine import StringList
from docutils.utils import assemble_option_dict
from sphinx.ext.autodoc import Options, get_documenters
@@ -18,8 +17,9 @@ from sphinx.util.nodes import nested_parse_with_titles
if False:
# For type annotation
- from typing import Any, Dict, List, Set, Type # NOQA
- from docutils.statemachine import State, StateMachine, StringList # NOQA
+ from typing import Any, Callable, Dict, List, Set, Type # NOQA
+ from docutils.parsers.rst.state import RSTState # NOQA
+ from docutils.statemachine import StateMachine, StringList # NOQA
from docutils.utils import Reporter # NOQA
from sphinx.config import Config # NOQA
from sphinx.environment import BuildEnvironment # NOQA
@@ -31,18 +31,23 @@ logger = logging.getLogger(__name__)
# common option names for autodoc directives
AUTODOC_DEFAULT_OPTIONS = ['members', 'undoc-members', 'inherited-members',
'show-inheritance', 'private-members', 'special-members',
- 'ignore-module-all', 'exclude-members']
+ 'ignore-module-all', 'exclude-members', 'member-order']
-class DummyOptionSpec(object):
+class DummyOptionSpec(dict):
"""An option_spec allows any options."""
+ def __bool__(self):
+ # type: () -> bool
+ """Behaves like some options are defined."""
+ return True
+
def __getitem__(self, key):
- # type: (Any) -> Any
+ # type: (str) -> Callable[[str], str]
return lambda x: x
-class DocumenterBridge(object):
+class DocumenterBridge:
"""A parameters container for Documenters."""
def __init__(self, env, reporter, options, lineno):
@@ -51,11 +56,11 @@ class DocumenterBridge(object):
self.reporter = reporter
self.genopt = options
self.lineno = lineno
- self.filename_set = set() # type: Set[unicode]
- self.result = ViewList()
+ self.filename_set = set() # type: Set[str]
+ self.result = StringList()
def warn(self, msg):
- # type: (unicode) -> None
+ # type: (str) -> None
logger.warning(msg, location=(self.env.docname, self.lineno))
@@ -74,11 +79,11 @@ def process_documenter_options(documenter, config, options):
def parse_generated_content(state, content, documenter):
- # type: (State, StringList, Documenter) -> List[nodes.Node]
+ # type: (RSTState, StringList, Documenter) -> List[nodes.Node]
"""Parse a generated content by Documenter."""
with switch_source_input(state, content):
if documenter.titles_allowed:
- node = nodes.section()
+ node = nodes.section() # type: nodes.Element
# necessary so that the child nodes get the right source/line set
node.document = state.document
nested_parse_with_titles(state, content, node)
@@ -107,7 +112,7 @@ class AutodocDirective(SphinxDirective):
reporter = self.state.document.reporter
try:
- source, lineno = reporter.get_source_and_line(self.lineno)
+ source, lineno = reporter.get_source_and_line(self.lineno) # type: ignore
except AttributeError:
source, lineno = (None, None)
logger.debug('[autodoc] %s:%s: input:\n%s', source, lineno, self.block_text)
diff --git a/sphinx/ext/autodoc/importer.py b/sphinx/ext/autodoc/importer.py
index a2280e82b..be971adbb 100644
--- a/sphinx/ext/autodoc/importer.py
+++ b/sphinx/ext/autodoc/importer.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.ext.autodoc.importer
~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -10,25 +9,27 @@
"""
import contextlib
+import os
import sys
import traceback
import warnings
from collections import namedtuple
+from importlib.abc import Loader, MetaPathFinder
+from importlib.machinery import ModuleSpec
from types import FunctionType, MethodType, ModuleType
-from six import PY2, iteritems
-
+from sphinx.deprecation import RemovedInSphinx30Warning
from sphinx.util import logging
from sphinx.util.inspect import isenumclass, safe_getattr
if False:
# For type annotation
- from typing import Any, Callable, Dict, Generator, Iterator, List, Optional, Tuple # NOQA
+ from typing import Any, Callable, Dict, Generator, Iterator, List, Optional, Sequence, Tuple, Union # NOQA
logger = logging.getLogger(__name__)
-class _MockObject(object):
+class _MockObject:
"""Used by autodoc_mock_imports."""
def __new__(cls, *args, **kwargs):
@@ -77,15 +78,18 @@ class _MockObject(object):
class _MockModule(ModuleType):
"""Used by autodoc_mock_imports."""
- __file__ = '/dev/null'
+ __file__ = os.devnull
- def __init__(self, name, loader):
+ def __init__(self, name, loader=None):
# type: (str, _MockImporter) -> None
- self.__name__ = self.__package__ = name
- self.__loader__ = loader
+ super().__init__(name)
self.__all__ = [] # type: List[str]
self.__path__ = [] # type: List[str]
+ if loader is not None:
+ warnings.warn('The loader argument for _MockModule is deprecated.',
+ RemovedInSphinx30Warning)
+
def __getattr__(self, name):
# type: (str) -> _MockObject
o = _MockObject()
@@ -93,7 +97,7 @@ class _MockModule(ModuleType):
return o
-class _MockImporter(object):
+class _MockImporter(MetaPathFinder):
def __init__(self, names):
# type: (List[str]) -> None
self.names = names
@@ -101,6 +105,9 @@ class _MockImporter(object):
# enable hook by adding itself to meta_path
sys.meta_path.insert(0, self)
+ warnings.warn('_MockImporter is now deprecated.',
+ RemovedInSphinx30Warning)
+
def disable(self):
# type: () -> None
# remove `self` from `sys.meta_path` to disable import hook
@@ -112,7 +119,7 @@ class _MockImporter(object):
del sys.modules[m]
def find_module(self, name, path=None):
- # type: (str, str) -> Any
+ # type: (str, Sequence[Union[bytes, str]]) -> Any
# check if name is (or is a descendant of) one of our base_packages
for n in self.names:
if n == name or name.startswith(n + '.'):
@@ -132,14 +139,66 @@ class _MockImporter(object):
return module
+class MockLoader(Loader):
+ """A loader for mocking."""
+ def __init__(self, finder):
+ # type: (MockFinder) -> None
+ super().__init__()
+ self.finder = finder
+
+ def create_module(self, spec):
+ # type: (ModuleSpec) -> ModuleType
+ logger.debug('[autodoc] adding a mock module as %s!', spec.name)
+ self.finder.mocked_modules.append(spec.name)
+ return _MockModule(spec.name)
+
+ def exec_module(self, module):
+ # type: (ModuleType) -> None
+ pass # nothing to do
+
+
+class MockFinder(MetaPathFinder):
+ """A finder for mocking."""
+
+ def __init__(self, modnames):
+ # type: (List[str]) -> None
+ super().__init__()
+ self.modnames = modnames
+ self.loader = MockLoader(self)
+ self.mocked_modules = [] # type: List[str]
+
+ def find_spec(self, fullname, path, target=None):
+ # type: (str, Sequence[Union[bytes, str]], ModuleType) -> ModuleSpec
+ for modname in self.modnames:
+ # check if fullname is (or is a descendant of) one of our targets
+ if modname == fullname or fullname.startswith(modname + '.'):
+ return ModuleSpec(fullname, self.loader)
+
+ return None
+
+ def invalidate_caches(self):
+ # type: () -> None
+ """Invalidate mocked modules on sys.modules."""
+ for modname in self.mocked_modules:
+ sys.modules.pop(modname, None)
+
+
@contextlib.contextmanager
-def mock(names):
- # type: (List[str]) -> Generator
+def mock(modnames):
+ # type: (List[str]) -> Generator[None, None, None]
+ """Insert mock modules during context::
+
+ with mock(['target.module.name']):
+ # mock modules are enabled here
+ ...
+ """
try:
- importer = _MockImporter(names)
+ finder = MockFinder(modnames)
+ sys.meta_path.insert(0, finder)
yield
finally:
- importer.disable()
+ sys.meta_path.remove(finder)
+ finder.invalidate_caches()
def import_module(modname, warningiserror=False):
@@ -160,7 +219,7 @@ def import_module(modname, warningiserror=False):
def import_object(modname, objpath, objtype='', attrgetter=safe_getattr, warningiserror=False):
- # type: (str, List[unicode], str, Callable[[Any, unicode], Any], bool) -> Any
+ # type: (str, List[str], str, Callable[[Any, str], Any], bool) -> Any
if objpath:
logger.debug('[autodoc] from %s import %s', modname, '.'.join(objpath))
else:
@@ -219,8 +278,6 @@ def import_object(modname, objpath, objtype='', attrgetter=safe_getattr, warning
else:
errmsg += '; the following exception was raised:\n%s' % traceback.format_exc()
- if PY2:
- errmsg = errmsg.decode('utf-8') # type: ignore
logger.debug(errmsg)
raise ImportError(errmsg)
@@ -229,17 +286,11 @@ Attribute = namedtuple('Attribute', ['name', 'directly_defined', 'value'])
def get_object_members(subject, objpath, attrgetter, analyzer=None):
- # type: (Any, List[unicode], Callable, Any) -> Dict[str, Attribute] # NOQA
+ # type: (Any, List[str], Callable, Any) -> Dict[str, Attribute] # NOQA
"""Get members and attributes of target object."""
# the members directly defined in the class
obj_dict = attrgetter(subject, '__dict__', {})
- # Py34 doesn't have enum members in __dict__.
- if sys.version_info[:2] == (3, 4) and isenumclass(subject):
- obj_dict = dict(obj_dict)
- for name, value in subject.__members__.items():
- obj_dict[name] = value
-
members = {} # type: Dict[str, Attribute]
# enum members
@@ -249,7 +300,7 @@ def get_object_members(subject, objpath, attrgetter, analyzer=None):
members[name] = Attribute(name, True, value)
superclass = subject.__mro__[1]
- for name, value in iteritems(obj_dict):
+ for name, value in obj_dict.items():
if name not in superclass.__dict__:
members[name] = Attribute(name, True, value)
diff --git a/sphinx/ext/autodoc/inspector.py b/sphinx/ext/autodoc/inspector.py
deleted file mode 100644
index 52a79fed5..000000000
--- a/sphinx/ext/autodoc/inspector.py
+++ /dev/null
@@ -1,187 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
- sphinx.ext.autodoc.inspector
- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
- Inspect utilities for autodoc
-
- :copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
- :license: BSD, see LICENSE for details.
-"""
-
-import typing
-import warnings
-
-from six import StringIO, string_types
-
-from sphinx.deprecation import RemovedInSphinx20Warning
-from sphinx.util.inspect import object_description
-
-if False:
- # For type annotation
- from typing import Any, Callable, Dict, Tuple # NOQA
-
-
-def format_annotation(annotation):
- # type: (Any) -> str
- """Return formatted representation of a type annotation.
-
- Show qualified names for types and additional details for types from
- the ``typing`` module.
-
- Displaying complex types from ``typing`` relies on its private API.
- """
- warnings.warn('format_annotation() is now deprecated. '
- 'Please use sphinx.util.inspect.Signature instead.',
- RemovedInSphinx20Warning, stacklevel=2)
- if isinstance(annotation, typing.TypeVar): # type: ignore
- return annotation.__name__
- if annotation == Ellipsis:
- return '...'
- if not isinstance(annotation, type):
- return repr(annotation)
-
- qualified_name = (annotation.__module__ + '.' + annotation.__qualname__ # type: ignore
- if annotation else repr(annotation))
-
- if annotation.__module__ == 'builtins':
- return annotation.__qualname__ # type: ignore
- else:
- if hasattr(typing, 'GenericMeta') and \
- isinstance(annotation, typing.GenericMeta):
- # In Python 3.5.2+, all arguments are stored in __args__,
- # whereas __parameters__ only contains generic parameters.
- #
- # Prior to Python 3.5.2, __args__ is not available, and all
- # arguments are in __parameters__.
- params = None
- if hasattr(annotation, '__args__'):
- if annotation.__args__ is None or len(annotation.__args__) <= 2: # type: ignore # NOQA
- params = annotation.__args__ # type: ignore
- else: # typing.Callable
- args = ', '.join(format_annotation(a) for a in annotation.__args__[:-1]) # type: ignore # NOQA
- result = format_annotation(annotation.__args__[-1]) # type: ignore
- return '%s[[%s], %s]' % (qualified_name, args, result)
- elif hasattr(annotation, '__parameters__'):
- params = annotation.__parameters__ # type: ignore
- if params is not None:
- param_str = ', '.join(format_annotation(p) for p in params)
- return '%s[%s]' % (qualified_name, param_str)
- elif (hasattr(typing, 'UnionMeta') and
- isinstance(annotation, typing.UnionMeta) and # type: ignore
- hasattr(annotation, '__union_params__')):
- params = annotation.__union_params__
- if params is not None:
- param_str = ', '.join(format_annotation(p) for p in params)
- return '%s[%s]' % (qualified_name, param_str)
- elif (hasattr(typing, 'CallableMeta') and
- isinstance(annotation, typing.CallableMeta) and # type: ignore
- getattr(annotation, '__args__', None) is not None and
- hasattr(annotation, '__result__')):
- # Skipped in the case of plain typing.Callable
- args = annotation.__args__
- if args is None:
- return qualified_name
- elif args is Ellipsis:
- args_str = '...'
- else:
- formatted_args = (format_annotation(a) for a in args)
- args_str = '[%s]' % ', '.join(formatted_args)
- return '%s[%s, %s]' % (qualified_name,
- args_str,
- format_annotation(annotation.__result__))
- elif (hasattr(typing, 'TupleMeta') and
- isinstance(annotation, typing.TupleMeta) and # type: ignore
- hasattr(annotation, '__tuple_params__') and
- hasattr(annotation, '__tuple_use_ellipsis__')):
- params = annotation.__tuple_params__
- if params is not None:
- param_strings = [format_annotation(p) for p in params]
- if annotation.__tuple_use_ellipsis__:
- param_strings.append('...')
- return '%s[%s]' % (qualified_name,
- ', '.join(param_strings))
- return qualified_name
-
-
-def formatargspec(function, args, varargs=None, varkw=None, defaults=None,
- kwonlyargs=(), kwonlydefaults={}, annotations={}):
- # type: (Callable, Tuple[str, ...], str, str, Any, Tuple, Dict, Dict[str, Any]) -> str
- """Return a string representation of an ``inspect.FullArgSpec`` tuple.
-
- An enhanced version of ``inspect.formatargspec()`` that handles typing
- annotations better.
- """
- warnings.warn('formatargspec() is now deprecated. '
- 'Please use sphinx.util.inspect.Signature instead.',
- RemovedInSphinx20Warning, stacklevel=2)
-
- def format_arg_with_annotation(name):
- # type: (str) -> str
- if name in annotations:
- return '%s: %s' % (name, format_annotation(get_annotation(name)))
- return name
-
- def get_annotation(name):
- # type: (str) -> str
- value = annotations[name]
- if isinstance(value, string_types):
- return introspected_hints.get(name, value)
- else:
- return value
-
- try:
- introspected_hints = (typing.get_type_hints(function) # type: ignore
- if typing and hasattr(function, '__code__') else {})
- except Exception:
- introspected_hints = {}
-
- fd = StringIO()
- fd.write('(')
-
- formatted = []
- defaults_start = len(args) - len(defaults) if defaults else len(args)
-
- for i, arg in enumerate(args):
- arg_fd = StringIO()
- if isinstance(arg, list):
- # support tupled arguments list (only for py2): def foo((x, y))
- arg_fd.write('(')
- arg_fd.write(format_arg_with_annotation(arg[0]))
- for param in arg[1:]:
- arg_fd.write(', ')
- arg_fd.write(format_arg_with_annotation(param))
- arg_fd.write(')')
- else:
- arg_fd.write(format_arg_with_annotation(arg))
- if defaults and i >= defaults_start:
- arg_fd.write(' = ' if arg in annotations else '=')
- arg_fd.write(object_description(defaults[i - defaults_start])) # type: ignore
- formatted.append(arg_fd.getvalue())
-
- if varargs:
- formatted.append('*' + format_arg_with_annotation(varargs))
-
- if kwonlyargs:
- if not varargs:
- formatted.append('*')
-
- for kwarg in kwonlyargs:
- arg_fd = StringIO()
- arg_fd.write(format_arg_with_annotation(kwarg))
- if kwonlydefaults and kwarg in kwonlydefaults:
- arg_fd.write(' = ' if kwarg in annotations else '=')
- arg_fd.write(object_description(kwonlydefaults[kwarg])) # type: ignore
- formatted.append(arg_fd.getvalue())
-
- if varkw:
- formatted.append('**' + format_arg_with_annotation(varkw))
-
- fd.write(', '.join(formatted))
- fd.write(')')
-
- if 'return' in annotations:
- fd.write(' -> ')
- fd.write(format_annotation(get_annotation('return')))
-
- return fd.getvalue()
diff --git a/sphinx/ext/autosectionlabel.py b/sphinx/ext/autosectionlabel.py
index 5713828a4..1014afda8 100644
--- a/sphinx/ext/autosectionlabel.py
+++ b/sphinx/ext/autosectionlabel.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.ext.autosectionlabel
~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -9,6 +8,8 @@
:license: BSD, see LICENSE for details.
"""
+from typing import cast
+
from docutils import nodes
from sphinx.locale import __
@@ -36,12 +37,13 @@ def register_sections_as_label(app, document):
for node in document.traverse(nodes.section):
labelid = node['ids'][0]
docname = app.env.docname
- ref_name = getattr(node[0], 'rawsource', node[0].astext())
+ title = cast(nodes.title, node[0])
+ ref_name = getattr(title, 'rawsource', title.astext())
if app.config.autosectionlabel_prefix_document:
name = nodes.fully_normalize_name(docname + ':' + ref_name)
else:
name = nodes.fully_normalize_name(ref_name)
- sectname = clean_astext(node[0])
+ sectname = clean_astext(title)
if name in labels:
logger.warning(__('duplicate label %s, other instance in %s'),
@@ -53,7 +55,7 @@ def register_sections_as_label(app, document):
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
app.add_config_value('autosectionlabel_prefix_document', False, 'env')
app.connect('doctree-read', register_sections_as_label)
diff --git a/sphinx/ext/autosummary/__init__.py b/sphinx/ext/autosummary/__init__.py
index 8ffa8fcdc..45e66e911 100644
--- a/sphinx/ext/autosummary/__init__.py
+++ b/sphinx/ext/autosummary/__init__.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.ext.autosummary
~~~~~~~~~~~~~~~~~~~~~~
@@ -60,17 +59,16 @@ import re
import sys
import warnings
from types import ModuleType
+from typing import List, cast
from docutils import nodes
from docutils.parsers.rst import directives
from docutils.parsers.rst.states import RSTStateMachine, state_classes
-from docutils.statemachine import ViewList
-from six import string_types
-from six import text_type
+from docutils.statemachine import StringList
import sphinx
from sphinx import addnodes
-from sphinx.deprecation import RemovedInSphinx20Warning
+from sphinx.deprecation import RemovedInSphinx40Warning
from sphinx.environment.adapters.toctree import TocTree
from sphinx.ext.autodoc import get_documenters
from sphinx.ext.autodoc.directive import DocumenterBridge, Options
@@ -85,11 +83,12 @@ from sphinx.util.matching import Matcher
if False:
# For type annotation
- from typing import Any, Dict, List, Tuple, Type, Union # NOQA
- from docutils.utils import Inliner # NOQA
+ from typing import Any, Dict, Tuple, Type # NOQA
+ from docutils.parsers.rst.states import Inliner # NOQA
from sphinx.application import Sphinx # NOQA
from sphinx.environment import BuildEnvironment # NOQA
from sphinx.ext.autodoc import Documenter # NOQA
+ from sphinx.writers.html import HTMLTranslator # NOQA
logger = logging.getLogger(__name__)
@@ -105,7 +104,7 @@ class autosummary_toc(nodes.comment):
def process_autosummary_toc(app, doctree):
- # type: (Sphinx, nodes.Node) -> None
+ # type: (Sphinx, nodes.document) -> None
"""Insert items described in autosummary:: to the TOC tree, but do
not generate the toctree:: list.
"""
@@ -113,7 +112,7 @@ def process_autosummary_toc(app, doctree):
crawled = {}
def crawl_toc(node, depth=1):
- # type: (nodes.Node, int) -> None
+ # type: (nodes.Element, int) -> None
crawled[node] = True
for j, subnode in enumerate(node):
try:
@@ -148,17 +147,19 @@ class autosummary_table(nodes.comment):
def autosummary_table_visit_html(self, node):
- # type: (nodes.NodeVisitor, autosummary_table) -> None
+ # type: (HTMLTranslator, autosummary_table) -> None
"""Make the first column of the table non-breaking."""
try:
- tbody = node[0][0][-1]
- for row in tbody:
- col1_entry = row[0]
- par = col1_entry[0]
+ table = cast(nodes.table, node[0])
+ tgroup = cast(nodes.tgroup, table[0])
+ tbody = cast(nodes.tbody, tgroup[-1])
+ rows = cast(List[nodes.row], tbody)
+ for row in rows:
+ col1_entry = cast(nodes.entry, row[0])
+ par = cast(nodes.paragraph, col1_entry[0])
for j, subnode in enumerate(list(par)):
if isinstance(subnode, nodes.Text):
- new_text = text_type(subnode.astext())
- new_text = new_text.replace(u" ", u"\u00a0")
+ new_text = subnode.astext().replace(" ", "\u00a0")
par[j] = nodes.Text(new_text)
except IndexError:
pass
@@ -173,11 +174,11 @@ _app = None # type: Sphinx
class FakeDirective(DocumenterBridge):
def __init__(self):
# type: () -> None
- super(FakeDirective, self).__init__({}, None, Options(), 0) # type: ignore
+ super().__init__({}, None, Options(), 0) # type: ignore
-def get_documenter(*args):
- # type: (Any) -> Type[Documenter]
+def get_documenter(app, obj, parent):
+ # type: (Sphinx, Any, Any) -> Type[Documenter]
"""Get an autodoc.Documenter class suitable for documenting the given
object.
@@ -186,16 +187,6 @@ def get_documenter(*args):
belongs to.
"""
from sphinx.ext.autodoc import DataDocumenter, ModuleDocumenter
- if len(args) == 3:
- # new style arguments: (app, obj, parent)
- app, obj, parent = args
- else:
- # old style arguments: (obj, parent)
- app = _app
- obj, parent = args
- warnings.warn('the interface of get_documenter() has been changed. '
- 'Please give application object as first argument.',
- RemovedInSphinx20Warning, stacklevel=2)
if inspect.ismodule(obj):
# ModuleDocumenter.can_document_member always returns False
@@ -241,16 +232,10 @@ class Autosummary(SphinxDirective):
'template': directives.unchanged,
}
- def warn(self, msg):
- # type: (unicode) -> None
- self.warnings.append(self.state.document.reporter.warning(
- msg, line=self.lineno))
-
def run(self):
# type: () -> List[nodes.Node]
- self.genopt = Options()
- self.warnings = [] # type: List[nodes.Node]
- self.result = ViewList()
+ self.bridge = DocumenterBridge(self.env, self.state.document.reporter,
+ Options(), self.lineno)
names = [x.strip().split()[0] for x in self.content
if x.strip() and re.search(r'^[~a-zA-Z_]', x.strip()[0])]
@@ -281,19 +266,18 @@ class Autosummary(SphinxDirective):
tocnode['maxdepth'] = -1
tocnode['glob'] = None
- tocnode = autosummary_toc('', '', tocnode)
- nodes.append(tocnode)
+ nodes.append(autosummary_toc('', '', tocnode))
- return self.warnings + nodes
+ return nodes
def get_items(self, names):
- # type: (List[unicode]) -> List[Tuple[unicode, unicode, unicode, unicode]]
+ # type: (List[str]) -> List[Tuple[str, str, str, str]]
"""Try to import the given names, and return a list of
``[(name, signature, summary_string, real_name), ...]``.
"""
prefixes = get_import_prefixes_from_env(self.env)
- items = [] # type: List[Tuple[unicode, unicode, unicode, unicode]]
+ items = [] # type: List[Tuple[str, str, str, str]]
max_item_chars = 50
@@ -310,7 +294,7 @@ class Autosummary(SphinxDirective):
items.append((name, '', '', name))
continue
- self.result = ViewList() # initialize for each documenter
+ self.bridge.result = StringList() # initialize for each documenter
full_name = real_name
if not isinstance(obj, ModuleType):
# give explicitly separated module name, so that members
@@ -318,7 +302,8 @@ class Autosummary(SphinxDirective):
full_name = modname + '::' + full_name[len(modname) + 1:]
# NB. using full_name here is important, since Documenters
# handle module prefixes slightly differently
- documenter = get_documenter(self.env.app, obj, parent)(self, full_name)
+ doccls = get_documenter(self.env.app, obj, parent)
+ documenter = doccls(self.bridge, full_name)
if not documenter.parse_name():
self.warn('failed to parse name %s' % real_name)
items.append((display_name, '', '', real_name))
@@ -354,14 +339,14 @@ class Autosummary(SphinxDirective):
# -- Grab the summary
documenter.add_content(None)
- summary = extract_summary(self.result.data[:], self.state.document)
+ summary = extract_summary(self.bridge.result.data[:], self.state.document)
items.append((display_name, sig, summary, real_name))
return items
def get_table(self, items):
- # type: (List[Tuple[unicode, unicode, unicode, unicode]]) -> List[Union[addnodes.tabular_col_spec, autosummary_table]] # NOQA
+ # type: (List[Tuple[str, str, str, str]]) -> List[nodes.Node]
"""Generate a proper list of table nodes for autosummary:: directive.
*items* is a list produced by :meth:`get_items`.
@@ -380,12 +365,12 @@ class Autosummary(SphinxDirective):
group.append(body)
def append_row(*column_texts):
- # type: (unicode) -> None
+ # type: (str) -> None
row = nodes.row('')
source, line = self.state_machine.get_source_and_line()
for text in column_texts:
node = nodes.paragraph('')
- vl = ViewList()
+ vl = StringList()
vl.append(text, '%s:%d:<autosummary>' % (source, line))
with switch_source_input(self.state, vl):
self.state.nested_parse(vl, 0, node)
@@ -400,7 +385,7 @@ class Autosummary(SphinxDirective):
for name, sig, summary, real_name in items:
qualifier = 'obj'
if 'nosignatures' not in self.options:
- col1 = ':%s:`%s <%s>`\\ %s' % (qualifier, name, real_name, rst.escape(sig)) # type: unicode # NOQA
+ col1 = ':%s:`%s <%s>`\\ %s' % (qualifier, name, real_name, rst.escape(sig))
else:
col1 = ':%s:`%s <%s>`' % (qualifier, name, real_name)
col2 = summary
@@ -408,15 +393,42 @@ class Autosummary(SphinxDirective):
return [table_spec, table]
+ def warn(self, msg):
+ # type: (str) -> None
+ warnings.warn('Autosummary.warn() is deprecated',
+ RemovedInSphinx40Warning, stacklevel=2)
+ logger.warning(msg)
+
+ @property
+ def genopt(self):
+ # type: () -> Options
+ warnings.warn('Autosummary.genopt is deprecated',
+ RemovedInSphinx40Warning, stacklevel=2)
+ return self.bridge.genopt
+
+ @property
+ def warnings(self):
+ # type: () -> List[nodes.Node]
+ warnings.warn('Autosummary.warnings is deprecated',
+ RemovedInSphinx40Warning, stacklevel=2)
+ return []
+
+ @property
+ def result(self):
+ # type: () -> StringList
+ warnings.warn('Autosummary.result is deprecated',
+ RemovedInSphinx40Warning, stacklevel=2)
+ return self.bridge.result
+
def strip_arg_typehint(s):
- # type: (unicode) -> unicode
+ # type: (str) -> str
"""Strip a type hint from argument definition."""
return s.split(':')[0].strip()
def mangle_signature(sig, max_chars=30):
- # type: (unicode, int) -> unicode
+ # type: (str, int) -> str
"""Reformat a function signature to a more compact form."""
# Strip return type annotation
s = re.sub(r"\)\s*->\s.*$", ")", sig)
@@ -430,8 +442,8 @@ def mangle_signature(sig, max_chars=30):
s = re.sub(r"'[^']*'", "", s)
# Parse the signature to arguments + options
- args = [] # type: List[unicode]
- opts = [] # type: List[unicode]
+ args = [] # type: List[str]
+ opts = [] # type: List[str]
opt_re = re.compile(r"^(.*, |)([a-zA-Z0-9_*]+)=")
while s:
@@ -460,11 +472,11 @@ def mangle_signature(sig, max_chars=30):
sig += "[, %s]" % limited_join(", ", opts,
max_chars=max_chars - len(sig) - 4 - 2)
- return u"(%s)" % sig
+ return "(%s)" % sig
def extract_summary(doc, document):
- # type: (List[unicode], Any) -> unicode
+ # type: (List[str], Any) -> str
"""Extract summary from docstring."""
# Skip a blank lines at the top
@@ -513,7 +525,7 @@ def extract_summary(doc, document):
def limited_join(sep, items, max_chars=30, overflow_marker="..."):
- # type: (unicode, List[unicode], int, unicode) -> unicode
+ # type: (str, List[str], int, str) -> str
"""Join a number of strings to one, limiting the length to *max_chars*.
If the string overflows this limit, replace the last fitting item by
@@ -562,7 +574,7 @@ def get_import_prefixes_from_env(env):
def import_by_name(name, prefixes=[None]):
- # type: (unicode, List) -> Tuple[unicode, Any, Any, unicode]
+ # type: (str, List) -> Tuple[str, Any, Any, str]
"""Import a Python object that has the given *name*, under one of the
*prefixes*. The first name that succeeds is used.
"""
@@ -581,7 +593,7 @@ def import_by_name(name, prefixes=[None]):
def _import_by_name(name):
- # type: (str) -> Tuple[Any, Any, unicode]
+ # type: (str) -> Tuple[Any, Any, str]
"""Import a Python object given its full name."""
try:
name_parts = name.split('.')
@@ -625,40 +637,42 @@ def _import_by_name(name):
# -- :autolink: (smart default role) -------------------------------------------
def autolink_role(typ, rawtext, etext, lineno, inliner, options={}, content=[]):
- # type: (unicode, unicode, unicode, int, Inliner, Dict, List[unicode]) -> Tuple[List[nodes.Node], List[nodes.Node]] # NOQA
+ # type: (str, str, str, int, Inliner, Dict, List[str]) -> Tuple[List[nodes.Node], List[nodes.system_message]] # NOQA
"""Smart linking role.
Expands to ':obj:`text`' if `text` is an object that can be imported;
otherwise expands to '*text*'.
"""
env = inliner.document.settings.env
- r = None # type: Tuple[List[nodes.Node], List[nodes.Node]]
- r = env.get_domain('py').role('obj')(
- 'obj', rawtext, etext, lineno, inliner, options, content)
- pnode = r[0][0]
+ pyobj_role = env.get_domain('py').role('obj')
+ objects, msg = pyobj_role('obj', rawtext, etext, lineno, inliner, options, content)
+ if msg != []:
+ return objects, msg
+ assert len(objects) == 1
+ pending_xref = cast(addnodes.pending_xref, objects[0])
prefixes = get_import_prefixes_from_env(env)
try:
- name, obj, parent, modname = import_by_name(pnode['reftarget'], prefixes)
+ name, obj, parent, modname = import_by_name(pending_xref['reftarget'], prefixes)
except ImportError:
- content_node = pnode[0]
- r[0][0] = nodes.emphasis(rawtext, content_node[0].astext(),
- classes=content_node['classes'])
- return r
+ literal = cast(nodes.literal, pending_xref[0])
+ objects[0] = nodes.emphasis(rawtext, literal.astext(), classes=literal['classes'])
+
+ return objects, msg
def get_rst_suffix(app):
- # type: (Sphinx) -> unicode
+ # type: (Sphinx) -> str
def get_supported_format(suffix):
- # type: (unicode) -> Tuple[unicode]
+ # type: (str) -> Tuple[str, ...]
parser_class = app.registry.get_source_parsers().get(suffix)
if parser_class is None:
return ('restructuredtext',)
- if isinstance(parser_class, string_types):
- parser_class = import_object(parser_class, 'source parser') # type: ignore
+ if isinstance(parser_class, str):
+ parser_class = import_object(parser_class, 'source parser')
return parser_class.supported
- suffix = None # type: unicode
+ suffix = None # type: str
for suffix in app.config.source_suffix:
if 'restructuredtext' in get_supported_format(suffix):
return suffix
@@ -697,7 +711,7 @@ def process_generate_options(app):
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
# I need autodoc
app.setup_extension('sphinx.ext.autodoc')
app.add_node(autosummary_toc,
diff --git a/sphinx/ext/autosummary/generate.py b/sphinx/ext/autosummary/generate.py
index dbe997c01..f8274379e 100644
--- a/sphinx/ext/autosummary/generate.py
+++ b/sphinx/ext/autosummary/generate.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.ext.autosummary.generate
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -17,10 +16,8 @@
:copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
-from __future__ import print_function
import argparse
-import codecs
import locale
import os
import pydoc
@@ -43,15 +40,14 @@ from sphinx.util.rst import escape as rst_escape
if False:
# For type annotation
- from typing import Any, Callable, Dict, List, Tuple, Type # NOQA
- from jinja2 import BaseLoader # NOQA
+ from typing import Any, Callable, Dict, List, Tuple, Type, Union # NOQA
from sphinx import addnodes # NOQA
from sphinx.builders import Builder # NOQA
from sphinx.environment import BuildEnvironment # NOQA
from sphinx.ext.autodoc import Documenter # NOQA
-class DummyApplication(object):
+class DummyApplication:
"""Dummy Application class for sphinx-autogen command."""
def __init__(self):
@@ -76,17 +72,17 @@ def setup_documenters(app):
def _simple_info(msg):
- # type: (unicode) -> None
+ # type: (str) -> None
print(msg)
def _simple_warn(msg):
- # type: (unicode) -> None
+ # type: (str) -> None
print('WARNING: ' + msg, file=sys.stderr)
def _underline(title, line='='):
- # type: (unicode, unicode) -> unicode
+ # type: (str, str) -> str
if '\n' in title:
raise ValueError('Can only underline single lines')
return title + '\n' + line * len(title)
@@ -98,7 +94,7 @@ def generate_autosummary_docs(sources, output_dir=None, suffix='.rst',
warn=_simple_warn, info=_simple_info,
base_path=None, builder=None, template_dir=None,
imported_members=False, app=None):
- # type: (List[unicode], unicode, unicode, Callable, Callable, unicode, Builder, unicode, bool, Any) -> None # NOQA
+ # type: (List[str], str, str, Callable, Callable, str, Builder, str, bool, Any) -> None
showed_sources = list(sorted(sources))
if len(showed_sources) > 20:
@@ -113,11 +109,11 @@ def generate_autosummary_docs(sources, output_dir=None, suffix='.rst',
sources = [os.path.join(base_path, filename) for filename in sources]
# create our own templating environment
- template_dirs = None # type: List[unicode]
+ template_dirs = None # type: List[str]
template_dirs = [os.path.join(package_dir, 'ext',
'autosummary', 'templates')]
- template_loader = None # type: BaseLoader
+ template_loader = None # type: Union[BuiltinTemplateLoader, FileSystemLoader]
if builder is not None:
# allow the user to override the templates
template_loader = BuiltinTemplateLoader()
@@ -176,8 +172,8 @@ def generate_autosummary_docs(sources, output_dir=None, suffix='.rst',
template = template_env.get_template('autosummary/base.rst')
def get_members(obj, typ, include_public=[], imported=True):
- # type: (Any, unicode, List[unicode], bool) -> Tuple[List[unicode], List[unicode]] # NOQA
- items = [] # type: List[unicode]
+ # type: (Any, str, List[str], bool) -> Tuple[List[str], List[str]]
+ items = [] # type: List[str]
for name in dir(obj):
try:
value = safe_getattr(obj, name)
@@ -192,7 +188,7 @@ def generate_autosummary_docs(sources, output_dir=None, suffix='.rst',
if x in include_public or not x.startswith('_')]
return public, items
- ns = {} # type: Dict[unicode, Any]
+ ns = {} # type: Dict[str, Any]
if doc.objtype == 'module':
ns['members'] = dir(obj)
@@ -229,7 +225,7 @@ def generate_autosummary_docs(sources, output_dir=None, suffix='.rst',
ns['underline'] = len(name) * '='
rendered = template.render(**ns)
- f.write(rendered) # type: ignore
+ f.write(rendered)
# descend recursively to new files
if new_files:
@@ -242,22 +238,21 @@ def generate_autosummary_docs(sources, output_dir=None, suffix='.rst',
# -- Finding documented entries in files ---------------------------------------
def find_autosummary_in_files(filenames):
- # type: (List[unicode]) -> List[Tuple[unicode, unicode, unicode]]
+ # type: (List[str]) -> List[Tuple[str, str, str]]
"""Find out what items are documented in source/*.rst.
See `find_autosummary_in_lines`.
"""
- documented = [] # type: List[Tuple[unicode, unicode, unicode]]
+ documented = [] # type: List[Tuple[str, str, str]]
for filename in filenames:
- with codecs.open(filename, 'r', encoding='utf-8', # type: ignore
- errors='ignore') as f:
+ with open(filename, encoding='utf-8', errors='ignore') as f:
lines = f.read().splitlines()
documented.extend(find_autosummary_in_lines(lines, filename=filename))
return documented
def find_autosummary_in_docstring(name, module=None, filename=None):
- # type: (unicode, Any, unicode) -> List[Tuple[unicode, unicode, unicode]]
+ # type: (str, Any, str) -> List[Tuple[str, str, str]]
"""Find out what items are documented in the given object's docstring.
See `find_autosummary_in_lines`.
@@ -265,7 +260,7 @@ def find_autosummary_in_docstring(name, module=None, filename=None):
try:
real_name, obj, parent, modname = import_by_name(name)
lines = pydoc.getdoc(obj).splitlines()
- return find_autosummary_in_lines(lines, module=name, filename=filename) # type: ignore
+ return find_autosummary_in_lines(lines, module=name, filename=filename)
except AttributeError:
pass
except ImportError as e:
@@ -277,7 +272,7 @@ def find_autosummary_in_docstring(name, module=None, filename=None):
def find_autosummary_in_lines(lines, module=None, filename=None):
- # type: (List[unicode], Any, unicode) -> List[Tuple[unicode, unicode, unicode]]
+ # type: (List[str], Any, str) -> List[Tuple[str, str, str]]
"""Find out what items appear in autosummary:: directives in the
given lines.
@@ -297,13 +292,13 @@ def find_autosummary_in_lines(lines, module=None, filename=None):
toctree_arg_re = re.compile(r'^\s+:toctree:\s*(.*?)\s*$')
template_arg_re = re.compile(r'^\s+:template:\s*(.*?)\s*$')
- documented = [] # type: List[Tuple[unicode, unicode, unicode]]
+ documented = [] # type: List[Tuple[str, str, str]]
- toctree = None # type: unicode
+ toctree = None # type: str
template = None
current_module = module
in_autosummary = False
- base_indent = "" # type: unicode
+ base_indent = ""
for line in lines:
if in_autosummary:
diff --git a/sphinx/ext/coverage.py b/sphinx/ext/coverage.py
index d6d865665..dde28bb8d 100644
--- a/sphinx/ext/coverage.py
+++ b/sphinx/ext/coverage.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.ext.coverage
~~~~~~~~~~~~~~~~~~~
@@ -12,12 +11,10 @@
import glob
import inspect
+import pickle
import re
from os import path
-from six import iteritems
-from six.moves import cPickle as pickle
-
import sphinx
from sphinx.builders import Builder
from sphinx.locale import __
@@ -34,13 +31,13 @@ logger = logging.getLogger(__name__)
# utility
def write_header(f, text, char='-'):
- # type:(IO, unicode, unicode) -> None
+ # type:(IO, str, str) -> None
f.write(text + '\n')
f.write(char * len(text) + '\n')
def compile_regex_list(name, exps):
- # type: (unicode, unicode) -> List[Pattern]
+ # type: (str, str) -> List[Pattern]
lst = []
for exp in exps:
try:
@@ -60,20 +57,20 @@ class CoverageBuilder(Builder):
def init(self):
# type: () -> None
- self.c_sourcefiles = [] # type: List[unicode]
+ self.c_sourcefiles = [] # type: List[str]
for pattern in self.config.coverage_c_path:
pattern = path.join(self.srcdir, pattern)
self.c_sourcefiles.extend(glob.glob(pattern))
- self.c_regexes = [] # type: List[Tuple[unicode, Pattern]]
+ self.c_regexes = [] # type: List[Tuple[str, Pattern]]
for (name, exp) in self.config.coverage_c_regexes.items():
try:
self.c_regexes.append((name, re.compile(exp)))
except Exception:
logger.warning(__('invalid regex %r in coverage_c_regexes'), exp)
- self.c_ignorexps = {} # type: Dict[unicode, List[Pattern]]
- for (name, exps) in iteritems(self.config.coverage_ignore_c_items):
+ self.c_ignorexps = {} # type: Dict[str, List[Pattern]]
+ for (name, exps) in self.config.coverage_ignore_c_items.items():
self.c_ignorexps[name] = compile_regex_list('coverage_ignore_c_items',
exps)
self.mod_ignorexps = compile_regex_list('coverage_ignore_modules',
@@ -84,16 +81,16 @@ class CoverageBuilder(Builder):
self.config.coverage_ignore_functions)
def get_outdated_docs(self):
- # type: () -> unicode
+ # type: () -> str
return 'coverage overview'
def write(self, *ignored):
# type: (Any) -> None
- self.py_undoc = {} # type: Dict[unicode, Dict[unicode, Any]]
+ self.py_undoc = {} # type: Dict[str, Dict[str, Any]]
self.build_py_coverage()
self.write_py_coverage()
- self.c_undoc = {} # type: Dict[unicode, Set[Tuple[unicode, unicode]]]
+ self.c_undoc = {} # type: Dict[str, Set[Tuple[str, str]]]
self.build_c_coverage()
self.write_c_coverage()
@@ -102,8 +99,8 @@ class CoverageBuilder(Builder):
# Fetch all the info from the header files
c_objects = self.env.domaindata['c']['objects']
for filename in self.c_sourcefiles:
- undoc = set() # type: Set[Tuple[unicode, unicode]]
- with open(filename, 'r') as f:
+ undoc = set() # type: Set[Tuple[str, str]]
+ with open(filename) as f:
for line in f:
for key, regex in self.c_regexes:
match = regex.match(line)
@@ -127,7 +124,7 @@ class CoverageBuilder(Builder):
write_header(op, 'Undocumented C API elements', '=')
op.write('\n')
- for filename, undoc in iteritems(self.c_undoc):
+ for filename, undoc in self.c_undoc.items():
write_header(op, filename)
for typ, name in sorted(undoc):
op.write(' * %-50s [%9s]\n' % (name, typ))
@@ -157,7 +154,7 @@ class CoverageBuilder(Builder):
continue
funcs = []
- classes = {} # type: Dict[unicode, List[unicode]]
+ classes = {} # type: Dict[str, List[str]]
for name, obj in inspect.getmembers(mod):
# diverse module attributes are ignored:
@@ -194,7 +191,7 @@ class CoverageBuilder(Builder):
classes[name] = []
continue
- attrs = [] # type: List[unicode]
+ attrs = [] # type: List[str]
for attr_name in dir(obj):
if attr_name not in obj.__dict__:
@@ -247,7 +244,7 @@ class CoverageBuilder(Builder):
if undoc['classes']:
op.write('Classes:\n')
for name, methods in sorted(
- iteritems(undoc['classes'])):
+ undoc['classes'].items()):
if not methods:
op.write(' * %s\n' % name)
else:
@@ -268,7 +265,7 @@ class CoverageBuilder(Builder):
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
app.add_builder(CoverageBuilder)
app.add_config_value('coverage_ignore_modules', [], False)
app.add_config_value('coverage_ignore_functions', [], False)
diff --git a/sphinx/ext/doctest.py b/sphinx/ext/doctest.py
index c35b3d284..f873cb7b5 100644
--- a/sphinx/ext/doctest.py
+++ b/sphinx/ext/doctest.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.ext.doctest
~~~~~~~~~~~~~~~~~~
@@ -9,33 +8,33 @@
:copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
-from __future__ import absolute_import
-import codecs
import doctest
import re
import sys
import time
+import warnings
+from io import StringIO
from os import path
from docutils import nodes
from docutils.parsers.rst import directives
from packaging.specifiers import SpecifierSet, InvalidSpecifier
from packaging.version import Version
-from six import itervalues, StringIO, binary_type, text_type, PY2
import sphinx
from sphinx.builders import Builder
+from sphinx.deprecation import RemovedInSphinx40Warning
from sphinx.locale import __
-from sphinx.util import force_decode, logging
+from sphinx.util import logging
from sphinx.util.console import bold # type: ignore
from sphinx.util.docutils import SphinxDirective
from sphinx.util.nodes import set_source_info
-from sphinx.util.osutil import fs_encoding, relpath
+from sphinx.util.osutil import relpath
if False:
# For type annotation
- from typing import Any, Callable, Dict, IO, Iterable, List, Optional, Sequence, Set, Tuple # NOQA
+ from typing import Any, Callable, Dict, IO, Iterable, List, Optional, Sequence, Set, Tuple, Type # NOQA
from sphinx.application import Sphinx # NOQA
logger = logging.getLogger(__name__)
@@ -43,22 +42,16 @@ logger = logging.getLogger(__name__)
blankline_re = re.compile(r'^\s*<BLANKLINE>', re.MULTILINE)
doctestopt_re = re.compile(r'#\s*doctest:.+$', re.MULTILINE)
-if PY2:
- def doctest_encode(text, encoding):
- # type: (str, unicode) -> unicode
- if isinstance(text, text_type):
- text = text.encode(encoding)
- if text.startswith(codecs.BOM_UTF8):
- text = text[len(codecs.BOM_UTF8):]
- return text
-else:
- def doctest_encode(text, encoding):
- # type: (unicode, unicode) -> unicode
- return text
+
+def doctest_encode(text, encoding):
+ # type: (str, str) -> str
+ warnings.warn('doctest_encode() is deprecated.',
+ RemovedInSphinx40Warning)
+ return text
def is_allowed_version(spec, version):
- # type: (unicode, unicode) -> bool
+ # type: (str, str) -> bool
"""Check `spec` satisfies `version` or not.
This obeys PEP-440 specifiers:
@@ -113,7 +106,7 @@ class TestDirective(SphinxDirective):
if not test:
test = code
code = doctestopt_re.sub('', code)
- nodetype = nodes.literal_block
+ nodetype = nodes.literal_block # type: Type[nodes.TextElement]
if self.name in ('testsetup', 'testcleanup') or 'hide' in self.options:
nodetype = nodes.comment
if self.arguments:
@@ -126,9 +119,15 @@ class TestDirective(SphinxDirective):
# only save if it differs from code
node['test'] = test
if self.name == 'doctest':
- node['language'] = 'pycon'
+ if self.config.highlight_language in ('py', 'python'):
+ node['language'] = 'pycon'
+ else:
+ node['language'] = 'pycon3' # default
elif self.name == 'testcode':
- node['language'] = 'python'
+ if self.config.highlight_language in ('py', 'python'):
+ node['language'] = 'python'
+ else:
+ node['language'] = 'python3' # default
elif self.name == 'testoutput':
# don't try to highlight output
node['language'] = 'none'
@@ -203,9 +202,9 @@ parser = doctest.DocTestParser()
# helper classes
-class TestGroup(object):
+class TestGroup:
def __init__(self, name):
- # type: (unicode) -> None
+ # type: (str) -> None
self.name = name
self.setup = [] # type: List[TestCode]
self.tests = [] # type: List[List[TestCode]]
@@ -230,23 +229,23 @@ class TestGroup(object):
else:
raise RuntimeError(__('invalid TestCode type'))
- def __repr__(self): # type: ignore
- # type: () -> unicode
+ def __repr__(self):
+ # type: () -> str
return 'TestGroup(name=%r, setup=%r, cleanup=%r, tests=%r)' % (
self.name, self.setup, self.cleanup, self.tests)
-class TestCode(object):
+class TestCode:
def __init__(self, code, type, filename, lineno, options=None):
- # type: (unicode, unicode, Optional[str], int, Optional[Dict]) -> None
+ # type: (str, str, Optional[str], int, Optional[Dict]) -> None
self.code = code
self.type = type
self.filename = filename
self.lineno = lineno
self.options = options or {}
- def __repr__(self): # type: ignore
- # type: () -> unicode
+ def __repr__(self):
+ # type: () -> str
return 'TestCode(%r, %r, filename=%r, lineno=%r, options=%r)' % (
self.code, self.type, self.filename, self.lineno, self.options)
@@ -258,7 +257,7 @@ class SphinxDocTestRunner(doctest.DocTestRunner):
old_stdout = sys.stdout
sys.stdout = string_io
try:
- res = doctest.DocTestRunner.summarize(self, verbose)
+ res = super().summarize(verbose)
finally:
sys.stdout = old_stdout
out(string_io.getvalue())
@@ -266,7 +265,7 @@ class SphinxDocTestRunner(doctest.DocTestRunner):
def _DocTestRunner__patched_linecache_getlines(self, filename,
module_globals=None):
- # type: (unicode, Any) -> Any
+ # type: (str, Any) -> Any
# this is overridden from DocTestRunner adding the try-except below
m = self._DocTestRunner__LINECACHE_FILENAME_RE.match(filename) # type: ignore
if m and m.group('name') == self.test.name:
@@ -317,41 +316,37 @@ class DocTestBuilder(Builder):
date = time.strftime('%Y-%m-%d %H:%M:%S')
- self.outfile = None # type: IO
- self.outfile = codecs.open(path.join(self.outdir, 'output.txt'), # type: ignore
- 'w', encoding='utf-8')
+ self.outfile = open(path.join(self.outdir, 'output.txt'), 'w', encoding='utf-8')
self.outfile.write(('Results of doctest builder run on %s\n'
'==================================%s\n') %
(date, '=' * len(date)))
def _out(self, text):
- # type: (unicode) -> None
+ # type: (str) -> None
logger.info(text, nonl=True)
self.outfile.write(text)
def _warn_out(self, text):
- # type: (unicode) -> None
+ # type: (str) -> None
if self.app.quiet or self.app.warningiserror:
logger.warning(text)
else:
logger.info(text, nonl=True)
- if isinstance(text, binary_type):
- text = force_decode(text, None)
self.outfile.write(text)
def get_target_uri(self, docname, typ=None):
- # type: (unicode, unicode) -> unicode
+ # type: (str, str) -> str
return ''
def get_outdated_docs(self):
- # type: () -> Set[unicode]
+ # type: () -> Set[str]
return self.env.found_docs
def finish(self):
# type: () -> None
# write executive summary
def s(v):
- # type: (int) -> unicode
+ # type: (int) -> str
return v != 1 and 's' or ''
repl = (self.total_tries, s(self.total_tries),
self.total_failures, s(self.total_failures),
@@ -371,7 +366,7 @@ Doctest summary
self.app.statuscode = 1
def write(self, build_docnames, updated_docnames, method='update'):
- # type: (Iterable[unicode], Sequence[unicode], unicode) -> None
+ # type: (Iterable[str], Sequence[str], str) -> None
if build_docnames is None:
build_docnames = sorted(self.env.all_docs)
@@ -382,7 +377,7 @@ Doctest summary
self.test_doc(docname, doctree)
def get_filename_for_node(self, node, docname):
- # type: (nodes.Node, unicode) -> str
+ # type: (nodes.Node, str) -> str
"""Try to get the file which actually contains the doctest, not the
filename of the document it's included in."""
try:
@@ -390,8 +385,6 @@ Doctest summary
.rsplit(':docstring of ', maxsplit=1)[0]
except Exception:
filename = self.env.doc2path(docname, base=None)
- if PY2:
- return filename.encode(fs_encoding)
return filename
@staticmethod
@@ -412,8 +405,8 @@ Doctest summary
return None
def test_doc(self, docname, doctree):
- # type: (unicode, nodes.Node) -> None
- groups = {} # type: Dict[unicode, TestGroup]
+ # type: (str, nodes.Node) -> None
+ groups = {} # type: Dict[str, TestGroup]
add_to_all_groups = []
self.setup_runner = SphinxDocTestRunner(verbose=False,
optionflags=self.opt)
@@ -436,7 +429,8 @@ Doctest summary
# type: (nodes.Node) -> bool
return isinstance(node, (nodes.literal_block, nodes.comment)) \
and 'testnodetype' in node
- for node in doctree.traverse(condition):
+
+ for node in doctree.traverse(condition): # type: nodes.Element
source = node['test'] if 'test' in node else node.astext()
filename = self.get_filename_for_node(node, docname)
line_number = self.get_line_number(node)
@@ -456,24 +450,24 @@ Doctest summary
groups[groupname] = TestGroup(groupname)
groups[groupname].add_code(code)
for code in add_to_all_groups:
- for group in itervalues(groups):
+ for group in groups.values():
group.add_code(code)
if self.config.doctest_global_setup:
code = TestCode(self.config.doctest_global_setup,
'testsetup', filename=None, lineno=0)
- for group in itervalues(groups):
+ for group in groups.values():
group.add_code(code, prepend=True)
if self.config.doctest_global_cleanup:
code = TestCode(self.config.doctest_global_cleanup,
'testcleanup', filename=None, lineno=0)
- for group in itervalues(groups):
+ for group in groups.values():
group.add_code(code)
if not groups:
return
self._out('\nDocument: %s\n----------%s\n' %
(docname, '-' * len(docname)))
- for group in itervalues(groups):
+ for group in groups.values():
self.test_group(group)
# Separately count results from setup code
res_f, res_t = self.setup_runner.summarize(self._out, verbose=False)
@@ -490,7 +484,7 @@ Doctest summary
self.cleanup_tries += res_t
def compile(self, code, name, type, flags, dont_inherit):
- # type: (unicode, unicode, unicode, Any, bool) -> Any
+ # type: (str, str, str, Any, bool) -> Any
return compile(code, name, self.type, flags, dont_inherit)
def test_group(self, group):
@@ -501,9 +495,8 @@ Doctest summary
# type: (Any, List[TestCode], Any) -> bool
examples = []
for testcode in testcodes:
- examples.append(doctest.Example( # type: ignore
- doctest_encode(testcode.code, self.env.config.source_encoding), '', # type: ignore # NOQA
- lineno=testcode.lineno))
+ example = doctest.Example(testcode.code, '', lineno=testcode.lineno)
+ examples.append(example)
if not examples:
return True
# simulate a doctest with the code
@@ -528,9 +521,8 @@ Doctest summary
if len(code) == 1:
# ordinary doctests (code/output interleaved)
try:
- test = parser.get_doctest( # type: ignore
- doctest_encode(code[0].code, self.env.config.source_encoding), {}, # type: ignore # NOQA
- group.name, code[0].filename, code[0].lineno)
+ test = parser.get_doctest(code[0].code, {}, group.name, # type: ignore
+ code[0].filename, code[0].lineno)
except Exception:
logger.warning(__('ignoring invalid doctest code: %r'), code[0].code,
location=(code[0].filename, code[0].lineno))
@@ -555,12 +547,9 @@ Doctest summary
exc_msg = m.group('msg')
else:
exc_msg = None
- example = doctest.Example( # type: ignore
- doctest_encode(code[0].code, self.env.config.source_encoding), output, # type: ignore # NOQA
- exc_msg=exc_msg,
- lineno=code[0].lineno,
- options=options)
- test = doctest.DocTest([example], {}, group.name, # type: ignore
+ example = doctest.Example(code[0].code, output, exc_msg=exc_msg,
+ lineno=code[0].lineno, options=options)
+ test = doctest.DocTest([example], {}, group.name,
code[0].filename, code[0].lineno, None)
self.type = 'exec' # multiple statements again
# DocTest.__init__ copies the globs namespace, which we don't want
@@ -573,7 +562,7 @@ Doctest summary
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
app.add_directive('testsetup', TestsetupDirective)
app.add_directive('testcleanup', TestcleanupDirective)
app.add_directive('doctest', DoctestDirective)
diff --git a/sphinx/ext/extlinks.py b/sphinx/ext/extlinks.py
index 29bfe928f..d284c6e82 100644
--- a/sphinx/ext/extlinks.py
+++ b/sphinx/ext/extlinks.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.ext.extlinks
~~~~~~~~~~~~~~~~~~~
@@ -25,7 +24,6 @@
"""
from docutils import nodes, utils
-from six import iteritems
import sphinx
from sphinx.util.nodes import split_explicit_title
@@ -39,9 +37,9 @@ if False:
def make_link_role(base_url, prefix):
- # type: (unicode, unicode) -> RoleFunction
+ # type: (str, str) -> RoleFunction
def role(typ, rawtext, text, lineno, inliner, options={}, content=[]):
- # type: (unicode, unicode, unicode, int, Inliner, Dict, List[unicode]) -> Tuple[List[nodes.Node], List[nodes.Node]] # NOQA
+ # type: (str, str, str, int, Inliner, Dict, List[str]) -> Tuple[List[nodes.Node], List[nodes.system_message]] # NOQA
text = utils.unescape(text)
has_explicit_title, title, part = split_explicit_title(text)
try:
@@ -64,12 +62,12 @@ def make_link_role(base_url, prefix):
def setup_link_roles(app):
# type: (Sphinx) -> None
- for name, (base_url, prefix) in iteritems(app.config.extlinks):
+ for name, (base_url, prefix) in app.config.extlinks.items():
app.add_role(name, make_link_role(base_url, prefix))
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
app.add_config_value('extlinks', {}, 'env')
app.connect('builder-inited', setup_link_roles)
return {'version': sphinx.__display_version__, 'parallel_read_safe': True}
diff --git a/sphinx/ext/githubpages.py b/sphinx/ext/githubpages.py
index bd7061fb7..1b1a604eb 100644
--- a/sphinx/ext/githubpages.py
+++ b/sphinx/ext/githubpages.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.ext.githubpages
~~~~~~~~~~~~~~~~~~~~~~
@@ -28,6 +27,6 @@ def create_nojekyll(app, env):
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
app.connect('env-updated', create_nojekyll)
return {'version': sphinx.__display_version__, 'parallel_read_safe': True}
diff --git a/sphinx/ext/graphviz.py b/sphinx/ext/graphviz.py
index c9b1541e7..dcd035713 100644
--- a/sphinx/ext/graphviz.py
+++ b/sphinx/ext/graphviz.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.ext.graphviz
~~~~~~~~~~~~~~~~~~~
@@ -10,7 +9,6 @@
:license: BSD, see LICENSE for details.
"""
-import codecs
import posixpath
import re
from hashlib import sha1
@@ -19,8 +17,6 @@ from subprocess import Popen, PIPE
from docutils import nodes
from docutils.parsers.rst import directives
-from docutils.statemachine import ViewList
-from six import text_type
import sphinx
from sphinx.errors import SphinxError
@@ -29,13 +25,20 @@ from sphinx.util import logging
from sphinx.util.docutils import SphinxDirective
from sphinx.util.fileutil import copy_asset
from sphinx.util.i18n import search_image_for_language
-from sphinx.util.osutil import ensuredir, ENOENT, EPIPE, EINVAL
+from sphinx.util.nodes import set_source_info
+from sphinx.util.osutil import ensuredir
if False:
# For type annotation
from docutils.parsers.rst import Directive # NOQA
from typing import Any, Dict, List, Tuple # NOQA
from sphinx.application import Sphinx # NOQA
+ from sphinx.util.docutils import SphinxTranslator # NOQA
+ from sphinx.writers.html import HTMLTranslator # NOQA
+ from sphinx.writers.latex import LaTeXTranslator # NOQA
+ from sphinx.writers.manpage import ManualPageTranslator # NOQA
+ from sphinx.writers.texinfo import TexinfoTranslator # NOQA
+ from sphinx.writers.text import TextTranslator # NOQA
logger = logging.getLogger(__name__)
@@ -44,22 +47,22 @@ class GraphvizError(SphinxError):
category = 'Graphviz error'
-class ClickableMapDefinition(object):
+class ClickableMapDefinition:
"""A manipulator for clickable map file of graphviz."""
maptag_re = re.compile('<map id="(.*?)"')
href_re = re.compile('href=".*?"')
def __init__(self, filename, content, dot=''):
- # type: (unicode, unicode, unicode) -> None
- self.id = None # type: unicode
+ # type: (str, str, str) -> None
+ self.id = None # type: str
self.filename = filename
self.content = content.splitlines()
- self.clickable = [] # type: List[unicode]
+ self.clickable = [] # type: List[str]
self.parse(dot=dot)
def parse(self, dot=None):
- # type: (unicode) -> None
+ # type: (str) -> None
matched = self.maptag_re.match(self.content[0])
if not matched:
raise GraphvizError('Invalid clickable map file found: %s' % self.filename)
@@ -68,7 +71,7 @@ class ClickableMapDefinition(object):
if self.id == '%3':
# graphviz generates wrong ID if graph name not specified
# https://gitlab.com/graphviz/graphviz/issues/1327
- hashed = sha1(dot.encode('utf-8')).hexdigest()
+ hashed = sha1(dot.encode()).hexdigest()
self.id = 'grapviz%s' % hashed[-10:]
self.content[0] = self.content[0].replace('%3', self.id)
@@ -77,7 +80,7 @@ class ClickableMapDefinition(object):
self.clickable.append(line)
def generate_clickable_map(self):
- # type: () -> unicode
+ # type: () -> str
"""Generate clickable map tags if clickable item exists.
If not exists, this only returns empty string.
@@ -93,24 +96,21 @@ class graphviz(nodes.General, nodes.Inline, nodes.Element):
def figure_wrapper(directive, node, caption):
- # type: (Directive, nodes.Node, unicode) -> nodes.figure
+ # type: (Directive, graphviz, str) -> nodes.figure
figure_node = nodes.figure('', node)
if 'align' in node:
figure_node['align'] = node.attributes.pop('align')
- parsed = nodes.Element()
- directive.state.nested_parse(ViewList([caption], source=''),
- directive.content_offset, parsed)
- caption_node = nodes.caption(parsed[0].rawsource, '',
- *parsed[0].children)
- caption_node.source = parsed[0].source
- caption_node.line = parsed[0].line
+ inodes, messages = directive.state.inline_text(caption, directive.lineno)
+ caption_node = nodes.caption(caption, '', *inodes)
+ caption_node.extend(messages)
+ set_source_info(directive, caption_node)
figure_node += caption_node
return figure_node
def align_spec(argument):
- # type: (Any) -> bool
+ # type: (Any) -> str
return directives.choice(argument, ('left', 'center', 'right'))
@@ -142,9 +142,9 @@ class Graphviz(SphinxDirective):
rel_filename, filename = self.env.relfn2path(argument)
self.env.note_dependency(rel_filename)
try:
- with codecs.open(filename, 'r', 'utf-8') as fp: # type: ignore
+ with open(filename, encoding='utf-8') as fp:
dotcode = fp.read()
- except (IOError, OSError):
+ except OSError:
return [document.reporter.warning(
__('External Graphviz file %r not found or reading '
'it failed') % filename, line=self.lineno)]
@@ -165,12 +165,13 @@ class Graphviz(SphinxDirective):
if 'align' in self.options:
node['align'] = self.options['align']
- caption = self.options.get('caption')
- if caption:
- node = figure_wrapper(self, node, caption)
-
- self.add_name(node)
- return [node]
+ if 'caption' not in self.options:
+ self.add_name(node)
+ return [node]
+ else:
+ figure = figure_wrapper(self, node, self.options['caption'])
+ self.add_name(figure)
+ return [figure]
class GraphvizSimple(SphinxDirective):
@@ -204,20 +205,21 @@ class GraphvizSimple(SphinxDirective):
if 'align' in self.options:
node['align'] = self.options['align']
- caption = self.options.get('caption')
- if caption:
- node = figure_wrapper(self, node, caption)
-
- self.add_name(node)
- return [node]
+ if 'caption' not in self.options:
+ self.add_name(node)
+ return [node]
+ else:
+ figure = figure_wrapper(self, node, self.options['caption'])
+ self.add_name(figure)
+ return [figure]
def render_dot(self, code, options, format, prefix='graphviz'):
- # type: (nodes.NodeVisitor, unicode, Dict, unicode, unicode) -> Tuple[unicode, unicode]
+ # type: (SphinxTranslator, str, Dict, str, str) -> Tuple[str, str]
"""Render graphviz code into a PNG or PDF output file."""
graphviz_dot = options.get('graphviz_dot', self.builder.config.graphviz_dot)
hashkey = (code + str(options) + str(graphviz_dot) +
- str(self.builder.config.graphviz_dot_args)).encode('utf-8')
+ str(self.builder.config.graphviz_dot_args)).encode()
fname = '%s-%s.%s' % (prefix, sha1(hashkey).hexdigest(), format)
relfn = posixpath.join(self.builder.imgpath, fname)
@@ -227,15 +229,11 @@ def render_dot(self, code, options, format, prefix='graphviz'):
return relfn, outfn
if (hasattr(self.builder, '_graphviz_warned_dot') and
- self.builder._graphviz_warned_dot.get(graphviz_dot)):
+ self.builder._graphviz_warned_dot.get(graphviz_dot)): # type: ignore # NOQA
return None, None
ensuredir(path.dirname(outfn))
- # graphviz expects UTF-8 by default
- if isinstance(code, text_type):
- code = code.encode('utf-8')
-
dot_args = [graphviz_dot]
dot_args.extend(self.builder.config.graphviz_dot_args)
dot_args.extend(['-T' + format, '-o' + outfn])
@@ -247,22 +245,18 @@ def render_dot(self, code, options, format, prefix='graphviz'):
dot_args.extend(['-Tcmapx', '-o%s.map' % outfn])
try:
p = Popen(dot_args, stdout=PIPE, stdin=PIPE, stderr=PIPE, cwd=cwd)
- except OSError as err:
- if err.errno != ENOENT: # No such file or directory
- raise
+ except FileNotFoundError:
logger.warning(__('dot command %r cannot be run (needed for graphviz '
'output), check the graphviz_dot setting'), graphviz_dot)
if not hasattr(self.builder, '_graphviz_warned_dot'):
- self.builder._graphviz_warned_dot = {}
- self.builder._graphviz_warned_dot[graphviz_dot] = True
+ self.builder._graphviz_warned_dot = {} # type: ignore
+ self.builder._graphviz_warned_dot[graphviz_dot] = True # type: ignore
return None, None
try:
# Graphviz may close standard input when an error occurs,
# resulting in a broken pipe on communicate()
- stdout, stderr = p.communicate(code)
- except (OSError, IOError) as err:
- if err.errno not in (EPIPE, EINVAL):
- raise
+ stdout, stderr = p.communicate(code.encode())
+ except BrokenPipeError:
# in this case, read the standard output and standard error streams
# directly, to get the error message(s)
stdout, stderr = p.stdout.read(), p.stderr.read()
@@ -278,7 +272,7 @@ def render_dot(self, code, options, format, prefix='graphviz'):
def render_dot_html(self, node, code, options, prefix='graphviz',
imgcls=None, alt=None):
- # type: (nodes.NodeVisitor, graphviz, unicode, Dict, unicode, unicode, unicode) -> Tuple[unicode, unicode] # NOQA
+ # type: (HTMLTranslator, graphviz, str, Dict, str, str, str) -> Tuple[str, str]
format = self.builder.config.graphviz_output_format
try:
if format not in ('png', 'svg'):
@@ -286,7 +280,7 @@ def render_dot_html(self, node, code, options, prefix='graphviz',
"'svg', but is %r") % format)
fname, outfn = render_dot(self, code, options, format, prefix)
except GraphvizError as exc:
- logger.warning(__('dot code %r: %s'), code, text_type(exc))
+ logger.warning(__('dot code %r: %s'), code, exc)
raise nodes.SkipNode
if imgcls:
@@ -309,7 +303,7 @@ def render_dot_html(self, node, code, options, prefix='graphviz',
self.body.append('<p class="warning">%s</p>' % alt)
self.body.append('</object></div>\n')
else:
- with codecs.open(outfn + '.map', 'r', encoding='utf-8') as mapfile: # type: ignore
+ with open(outfn + '.map', encoding='utf-8') as mapfile:
imgmap = ClickableMapDefinition(outfn + '.map', mapfile.read(), dot=code)
if imgmap.clickable:
# has a map
@@ -331,16 +325,16 @@ def render_dot_html(self, node, code, options, prefix='graphviz',
def html_visit_graphviz(self, node):
- # type: (nodes.NodeVisitor, graphviz) -> None
+ # type: (HTMLTranslator, graphviz) -> None
render_dot_html(self, node, node['code'], node['options'])
def render_dot_latex(self, node, code, options, prefix='graphviz'):
- # type: (nodes.NodeVisitor, graphviz, unicode, Dict, unicode) -> None
+ # type: (LaTeXTranslator, graphviz, str, Dict, str) -> None
try:
fname, outfn = render_dot(self, code, options, 'pdf', prefix)
except GraphvizError as exc:
- logger.warning(__('dot code %r: %s'), code, text_type(exc))
+ logger.warning(__('dot code %r: %s'), code, exc)
raise nodes.SkipNode
is_inline = self.is_inline(node)
@@ -369,16 +363,16 @@ def render_dot_latex(self, node, code, options, prefix='graphviz'):
def latex_visit_graphviz(self, node):
- # type: (nodes.NodeVisitor, graphviz) -> None
+ # type: (LaTeXTranslator, graphviz) -> None
render_dot_latex(self, node, node['code'], node['options'])
def render_dot_texinfo(self, node, code, options, prefix='graphviz'):
- # type: (nodes.NodeVisitor, graphviz, unicode, Dict, unicode) -> None
+ # type: (TexinfoTranslator, graphviz, str, Dict, str) -> None
try:
fname, outfn = render_dot(self, code, options, 'png', prefix)
except GraphvizError as exc:
- logger.warning(__('dot code %r: %s'), code, text_type(exc))
+ logger.warning(__('dot code %r: %s'), code, exc)
raise nodes.SkipNode
if fname is not None:
self.body.append('@image{%s,,,[graphviz],png}\n' % fname[:-4])
@@ -386,12 +380,12 @@ def render_dot_texinfo(self, node, code, options, prefix='graphviz'):
def texinfo_visit_graphviz(self, node):
- # type: (nodes.NodeVisitor, graphviz) -> None
+ # type: (TexinfoTranslator, graphviz) -> None
render_dot_texinfo(self, node, node['code'], node['options'])
def text_visit_graphviz(self, node):
- # type: (nodes.NodeVisitor, graphviz) -> None
+ # type: (TextTranslator, graphviz) -> None
if 'alt' in node.attributes:
self.add_text(_('[graph: %s]') % node['alt'])
else:
@@ -400,7 +394,7 @@ def text_visit_graphviz(self, node):
def man_visit_graphviz(self, node):
- # type: (nodes.NodeVisitor, graphviz) -> None
+ # type: (ManualPageTranslator, graphviz) -> None
if 'alt' in node.attributes:
self.body.append(_('[graph: %s]') % node['alt'])
else:
@@ -417,7 +411,7 @@ def on_build_finished(app, exc):
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
app.add_node(graphviz,
html=(html_visit_graphviz, None),
latex=(latex_visit_graphviz, None),
diff --git a/sphinx/ext/ifconfig.py b/sphinx/ext/ifconfig.py
index f22a37e92..a4bd03add 100644
--- a/sphinx/ext/ifconfig.py
+++ b/sphinx/ext/ifconfig.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.ext.ifconfig
~~~~~~~~~~~~~~~~~~~
@@ -51,12 +50,12 @@ class IfConfig(SphinxDirective):
set_source_info(self, node)
node['expr'] = self.arguments[0]
self.state.nested_parse(self.content, self.content_offset,
- node, match_titles=1)
+ node, match_titles=True)
return [node]
def process_ifconfig_nodes(app, doctree, docname):
- # type: (Sphinx, nodes.Node, unicode) -> None
+ # type: (Sphinx, nodes.document, str) -> None
ns = dict((confval.name, confval.value) for confval in app.config)
ns.update(app.config.__dict__.copy())
ns['builder'] = app.builder.name
@@ -79,7 +78,7 @@ def process_ifconfig_nodes(app, doctree, docname):
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
app.add_node(ifconfig)
app.add_directive('ifconfig', IfConfig)
app.connect('doctree-resolved', process_ifconfig_nodes)
diff --git a/sphinx/ext/imgconverter.py b/sphinx/ext/imgconverter.py
index fe086b1fe..9e628b13a 100644
--- a/sphinx/ext/imgconverter.py
+++ b/sphinx/ext/imgconverter.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.ext.imgconverter
~~~~~~~~~~~~~~~~~~~~~~~
@@ -15,7 +14,6 @@ from sphinx.errors import ExtensionError
from sphinx.locale import __
from sphinx.transforms.post_transforms.images import ImageConverter
from sphinx.util import logging
-from sphinx.util.osutil import ENOENT, EPIPE, EINVAL
if False:
# For type annotation
@@ -40,7 +38,7 @@ class ImagemagickConverter(ImageConverter):
args = [self.config.image_converter, '-version']
logger.debug('Invoking %r ...', args)
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- except (OSError, IOError):
+ except OSError:
logger.warning(__('convert command %r cannot be run.'
'check the image_converter setting'),
self.config.image_converter)
@@ -48,9 +46,7 @@ class ImagemagickConverter(ImageConverter):
try:
stdout, stderr = p.communicate()
- except (OSError, IOError) as err:
- if err.errno not in (EPIPE, EINVAL):
- raise
+ except BrokenPipeError:
stdout, stderr = p.stdout.read(), p.stderr.read()
p.wait()
if p.returncode != 0:
@@ -63,7 +59,7 @@ class ImagemagickConverter(ImageConverter):
return True
def convert(self, _from, _to):
- # type: (unicode, unicode) -> bool
+ # type: (str, str) -> bool
"""Converts the image to expected one."""
try:
if _from.lower().endswith('.gif'):
@@ -75,9 +71,7 @@ class ImagemagickConverter(ImageConverter):
[_from, _to])
logger.debug('Invoking %r ...', args)
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- except OSError as err:
- if err.errno != ENOENT: # No such file or directory
- raise
+ except FileNotFoundError:
logger.warning(__('convert command %r cannot be run.'
'check the image_converter setting'),
self.config.image_converter)
@@ -85,9 +79,7 @@ class ImagemagickConverter(ImageConverter):
try:
stdout, stderr = p.communicate()
- except (OSError, IOError) as err:
- if err.errno not in (EPIPE, EINVAL):
- raise
+ except BrokenPipeError:
stdout, stderr = p.stdout.read(), p.stderr.read()
p.wait()
if p.returncode != 0:
@@ -99,7 +91,7 @@ class ImagemagickConverter(ImageConverter):
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
app.add_post_transform(ImagemagickConverter)
app.add_config_value('image_converter', 'convert', 'env')
app.add_config_value('image_converter_args', [], 'env')
diff --git a/sphinx/ext/imgmath.py b/sphinx/ext/imgmath.py
index 18f04a095..377da12f1 100644
--- a/sphinx/ext/imgmath.py
+++ b/sphinx/ext/imgmath.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.ext.imgmath
~~~~~~~~~~~~~~~~~~
@@ -9,7 +8,6 @@
:license: BSD, see LICENSE for details.
"""
-import codecs
import posixpath
import re
import shutil
@@ -19,24 +17,24 @@ from os import path
from subprocess import Popen, PIPE
from docutils import nodes
-from six import text_type
import sphinx
from sphinx.errors import SphinxError
from sphinx.locale import _, __
from sphinx.util import logging
from sphinx.util.math import get_node_equation_number, wrap_displaymath
-from sphinx.util.osutil import ensuredir, ENOENT, cd
+from sphinx.util.osutil import ensuredir, cd
from sphinx.util.png import read_png_depth, write_png_depth
from sphinx.util.pycompat import sys_encoding
if False:
# For type annotation
- from typing import Any, Dict, List, Tuple # NOQA
+ from typing import Any, Dict, List, Tuple, Union # NOQA
from sphinx.addnodes import displaymath # NOQA
from sphinx.application import Sphinx # NOQA
from sphinx.builders import Builder # NOQA
from sphinx.config import Config # NOQA
+ from sphinx.writers.html import HTMLTranslator # NOQA
logger = logging.getLogger(__name__)
@@ -45,12 +43,12 @@ class MathExtError(SphinxError):
category = 'Math extension error'
def __init__(self, msg, stderr=None, stdout=None):
- # type: (unicode, unicode, unicode) -> None
+ # type: (str, bytes, bytes) -> None
if stderr:
msg += '\n[stderr]\n' + stderr.decode(sys_encoding, 'replace')
if stdout:
msg += '\n[stdout]\n' + stdout.decode(sys_encoding, 'replace')
- SphinxError.__init__(self, msg)
+ super().__init__(msg)
class InvokeError(SphinxError):
@@ -90,7 +88,7 @@ depth_re = re.compile(br'\[\d+ depth=(-?\d+)\]')
def generate_latex_macro(math, config):
- # type: (unicode, Config) -> unicode
+ # type: (str, Config) -> str
"""Generate LaTeX macro."""
fontsize = config.imgmath_font_size
baselineskip = int(round(fontsize * 1.2))
@@ -105,7 +103,7 @@ def generate_latex_macro(math, config):
def ensure_tempdir(builder):
- # type: (Builder) -> unicode
+ # type: (Builder) -> str
"""Create temporary directory.
use only one tempdir per build -- the use of a directory is cleaner
@@ -119,11 +117,11 @@ def ensure_tempdir(builder):
def compile_math(latex, builder):
- # type: (unicode, Builder) -> unicode
+ # type: (str, Builder) -> str
"""Compile LaTeX macros for math to DVI."""
tempdir = ensure_tempdir(builder)
filename = path.join(tempdir, 'math.tex')
- with codecs.open(filename, 'w', 'utf-8') as f: # type: ignore
+ with open(filename, 'w', encoding='utf-8') as f:
f.write(latex)
# build latex command; old versions of latex don't have the
@@ -137,9 +135,7 @@ def compile_math(latex, builder):
with cd(tempdir):
try:
p = Popen(command, stdout=PIPE, stderr=PIPE)
- except OSError as err:
- if err.errno != ENOENT: # No such file or directory
- raise
+ except FileNotFoundError:
logger.warning(__('LaTeX command %r cannot be run (needed for math '
'display), check the imgmath_latex setting'),
builder.config.imgmath_latex)
@@ -153,13 +149,11 @@ def compile_math(latex, builder):
def convert_dvi_to_image(command, name):
- # type: (List[unicode], unicode) -> Tuple[unicode, unicode]
+ # type: (List[str], str) -> Tuple[bytes, bytes]
"""Convert DVI file to specific image format."""
try:
p = Popen(command, stdout=PIPE, stderr=PIPE)
- except OSError as err:
- if err.errno != ENOENT: # No such file or directory
- raise
+ except FileNotFoundError:
logger.warning(__('%s command %r cannot be run (needed for math '
'display), check the imgmath_%s setting'),
name, command[0], name)
@@ -173,7 +167,7 @@ def convert_dvi_to_image(command, name):
def convert_dvi_to_png(dvipath, builder):
- # type: (unicode, Builder) -> Tuple[unicode, int]
+ # type: (str, Builder) -> Tuple[str, int]
"""Convert DVI file to PNG image."""
tempdir = ensure_tempdir(builder)
filename = path.join(tempdir, 'math.png')
@@ -200,7 +194,7 @@ def convert_dvi_to_png(dvipath, builder):
def convert_dvi_to_svg(dvipath, builder):
- # type: (unicode, Builder) -> Tuple[unicode, int]
+ # type: (str, Builder) -> Tuple[str, int]
"""Convert DVI file to SVG image."""
tempdir = ensure_tempdir(builder)
filename = path.join(tempdir, 'math.svg')
@@ -215,7 +209,7 @@ def convert_dvi_to_svg(dvipath, builder):
def render_math(self, math):
- # type: (nodes.NodeVisitor, unicode) -> Tuple[unicode, int]
+ # type: (HTMLTranslator, str) -> Tuple[str, int]
"""Render the LaTeX math expression *math* using latex and dvipng or
dvisvgm.
@@ -235,7 +229,7 @@ def render_math(self, math):
latex = generate_latex_macro(math, self.builder.config)
- filename = "%s.%s" % (sha1(latex.encode('utf-8')).hexdigest(), image_format)
+ filename = "%s.%s" % (sha1(latex.encode()).hexdigest(), image_format)
relfn = posixpath.join(self.builder.imgpath, 'math', filename)
outfn = path.join(self.builder.outdir, self.builder.imagedir, 'math', filename)
if path.isfile(outfn):
@@ -251,7 +245,7 @@ def render_math(self, math):
try:
dvipath = compile_math(latex, self.builder)
except InvokeError:
- self.builder._imgmath_warned_latex = True
+ self.builder._imgmath_warned_latex = True # type: ignore
return None, None
# .dvi -> .png/.svg
@@ -261,7 +255,7 @@ def render_math(self, math):
elif image_format == 'svg':
imgpath, depth = convert_dvi_to_svg(dvipath, self.builder)
except InvokeError:
- self.builder._imgmath_warned_image_translator = True
+ self.builder._imgmath_warned_image_translator = True # type: ignore
return None, None
# Move generated image on tempdir to build dir
@@ -284,18 +278,18 @@ def cleanup_tempdir(app, exc):
def get_tooltip(self, node):
- # type: (nodes.NodeVisitor, nodes.math) -> unicode
+ # type: (HTMLTranslator, Union[nodes.math, nodes.math_block]) -> str
if self.builder.config.imgmath_add_tooltips:
return ' alt="%s"' % self.encode(node.astext()).strip()
return ''
def html_visit_math(self, node):
- # type: (nodes.NodeVisitor, nodes.math) -> None
+ # type: (HTMLTranslator, nodes.math) -> None
try:
fname, depth = render_math(self, '$' + node.astext() + '$')
except MathExtError as exc:
- msg = text_type(exc)
+ msg = str(exc)
sm = nodes.system_message(msg, type='WARNING', level=2,
backrefs=[], source=node.astext())
sm.walkabout(self)
@@ -314,7 +308,7 @@ def html_visit_math(self, node):
def html_visit_displaymath(self, node):
- # type: (nodes.NodeVisitor, nodes.math_block) -> None
+ # type: (HTMLTranslator, nodes.math_block) -> None
if node['nowrap']:
latex = node.astext()
else:
@@ -322,7 +316,7 @@ def html_visit_displaymath(self, node):
try:
fname, depth = render_math(self, latex)
except MathExtError as exc:
- msg = text_type(exc)
+ msg = str(exc)
sm = nodes.system_message(msg, type='WARNING', level=2,
backrefs=[], source=node.astext())
sm.walkabout(self)
@@ -346,7 +340,7 @@ def html_visit_displaymath(self, node):
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
app.add_html_math_renderer('imgmath',
(html_visit_math, None),
(html_visit_displaymath, None))
diff --git a/sphinx/ext/inheritance_diagram.py b/sphinx/ext/inheritance_diagram.py
index 750fcd4f7..e56e17cbe 100644
--- a/sphinx/ext/inheritance_diagram.py
+++ b/sphinx/ext/inheritance_diagram.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
r"""
sphinx.ext.inheritance_diagram
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -36,21 +35,22 @@ r"""
:license: BSD, see LICENSE for details.
"""
+import builtins
import inspect
import re
import sys
from hashlib import md5
+from typing import Iterable, cast
from docutils import nodes
from docutils.parsers.rst import directives
-from six import text_type
-from six.moves import builtins
import sphinx
-from sphinx.ext.graphviz import render_dot_html, render_dot_latex, \
- render_dot_texinfo, figure_wrapper
-from sphinx.pycode import ModuleAnalyzer
-from sphinx.util import force_decode
+from sphinx import addnodes
+from sphinx.ext.graphviz import (
+ graphviz, figure_wrapper,
+ render_dot_html, render_dot_latex, render_dot_texinfo
+)
from sphinx.util.docutils import SphinxDirective
if False:
@@ -58,6 +58,9 @@ if False:
from typing import Any, Dict, List, Tuple, Dict, Optional # NOQA
from sphinx.application import Sphinx # NOQA
from sphinx.environment import BuildEnvironment # NOQA
+ from sphinx.writers.html import HTMLTranslator # NOQA
+ from sphinx.writers.latex import LaTeXTranslator # NOQA
+ from sphinx.writers.texinfo import TexinfoTranslator # NOQA
module_sig_re = re.compile(r'''^(?:([\w.]*)\.)? # module names
@@ -66,7 +69,7 @@ module_sig_re = re.compile(r'''^(?:([\w.]*)\.)? # module names
def try_import(objname):
- # type: (unicode) -> Any
+ # type: (str) -> Any
"""Import a object or module using *name* and *currentmodule*.
*name* should be a relative name from *currentmodule* or
a fully-qualified name.
@@ -75,8 +78,8 @@ def try_import(objname):
"""
try:
__import__(objname)
- return sys.modules.get(objname) # type: ignore
- except (ImportError, ValueError): # ValueError,py27 -> ImportError,py3
+ return sys.modules.get(objname)
+ except ImportError:
matched = module_sig_re.match(objname)
if not matched:
@@ -88,13 +91,13 @@ def try_import(objname):
return None
try:
__import__(modname)
- return getattr(sys.modules.get(modname), attrname, None) # type: ignore
- except (ImportError, ValueError): # ValueError,py27 -> ImportError,py3
+ return getattr(sys.modules.get(modname), attrname, None)
+ except ImportError:
return None
def import_classes(name, currmodule):
- # type: (unicode, unicode) -> Any
+ # type: (str, str) -> Any
"""Import a class using its fully-qualified *name*."""
target = None
@@ -129,7 +132,7 @@ class InheritanceException(Exception):
pass
-class InheritanceGraph(object):
+class InheritanceGraph:
"""
Given a list of classes, determines the set of classes that they inherit
from all the way to the root "object", and then is able to generate a
@@ -137,7 +140,7 @@ class InheritanceGraph(object):
"""
def __init__(self, class_names, currmodule, show_builtins=False,
private_bases=False, parts=0, aliases=None, top_classes=[]):
- # type: (unicode, str, bool, bool, int, Optional[Dict[unicode, unicode]], List[Any]) -> None # NOQA
+ # type: (List[str], str, bool, bool, int, Optional[Dict[str, str]], List[Any]) -> None
"""*class_names* is a list of child classes to show bases from.
If *show_builtins* is True, then Python builtins will be shown
@@ -152,7 +155,7 @@ class InheritanceGraph(object):
'inheritance diagram')
def _import_classes(self, class_names, currmodule):
- # type: (unicode, str) -> List[Any]
+ # type: (List[str], str) -> List[Any]
"""Import a list of classes."""
classes = [] # type: List[Any]
for name in class_names:
@@ -160,7 +163,7 @@ class InheritanceGraph(object):
return classes
def _class_info(self, classes, show_builtins, private_bases, parts, aliases, top_classes):
- # type: (List[Any], bool, bool, int, Optional[Dict[unicode, unicode]], List[Any]) -> List[Tuple[unicode, unicode, List[unicode], unicode]] # NOQA
+ # type: (List[Any], bool, bool, int, Optional[Dict[str, str]], List[Any]) -> List[Tuple[str, str, List[str], str]] # NOQA
"""Return name and bases for all classes that are ancestors of
*classes*.
@@ -187,16 +190,13 @@ class InheritanceGraph(object):
tooltip = None
try:
if cls.__doc__:
- enc = ModuleAnalyzer.for_module(cls.__module__).encoding
doc = cls.__doc__.strip().split("\n")[0]
- if not isinstance(doc, text_type):
- doc = force_decode(doc, enc)
if doc:
tooltip = '"%s"' % doc.replace('"', '\\"')
except Exception: # might raise AttributeError for strange classes
pass
- baselist = [] # type: List[unicode]
+ baselist = [] # type: List[str]
all_classes[cls] = (nodename, fullname, baselist, tooltip)
if fullname in top_classes:
@@ -217,7 +217,7 @@ class InheritanceGraph(object):
return list(all_classes.values())
def class_name(self, cls, parts=0, aliases=None):
- # type: (Any, int, Optional[Dict[unicode, unicode]]) -> unicode
+ # type: (Any, int, Optional[Dict[str, str]]) -> str
"""Given a class object, return a fully-qualified name.
This works for things I've tested in matplotlib so far, but may not be
@@ -238,7 +238,7 @@ class InheritanceGraph(object):
return result
def get_all_class_names(self):
- # type: () -> List[unicode]
+ # type: () -> List[str]
"""Get all of the class names involved in the graph."""
return [fullname for (_, fullname, _, _) in self.class_info]
@@ -261,16 +261,16 @@ class InheritanceGraph(object):
}
def _format_node_attrs(self, attrs):
- # type: (Dict) -> unicode
+ # type: (Dict) -> str
return ','.join(['%s=%s' % x for x in sorted(attrs.items())])
def _format_graph_attrs(self, attrs):
- # type: (Dict) -> unicode
+ # type: (Dict) -> str
return ''.join(['%s=%s;\n' % x for x in sorted(attrs.items())])
def generate_dot(self, name, urls={}, env=None,
graph_attrs={}, node_attrs={}, edge_attrs={}):
- # type: (unicode, Dict, BuildEnvironment, Dict, Dict, Dict) -> unicode
+ # type: (str, Dict, BuildEnvironment, Dict, Dict, Dict) -> str
"""Generate a graphviz dot graph from the classes that were passed in
to __init__.
@@ -292,7 +292,7 @@ class InheritanceGraph(object):
n_attrs.update(env.config.inheritance_node_attrs)
e_attrs.update(env.config.inheritance_edge_attrs)
- res = [] # type: List[unicode]
+ res = [] # type: List[str]
res.append('digraph %s {\n' % name)
res.append(self._format_graph_attrs(g_attrs))
@@ -316,7 +316,7 @@ class InheritanceGraph(object):
return ''.join(res)
-class inheritance_diagram(nodes.General, nodes.Element):
+class inheritance_diagram(graphviz):
"""
A docutils node to use as a placeholder for the inheritance diagram.
"""
@@ -362,36 +362,37 @@ class InheritanceDiagram(SphinxDirective):
aliases=self.config.inheritance_alias,
top_classes=node['top-classes'])
except InheritanceException as err:
- return [node.document.reporter.warning(err.args[0],
- line=self.lineno)]
+ return [node.document.reporter.warning(err, line=self.lineno)]
# Create xref nodes for each target of the graph's image map and
# add them to the doc tree so that Sphinx can resolve the
# references to real URLs later. These nodes will eventually be
# removed from the doctree after we're done with them.
for name in graph.get_all_class_names():
- refnodes, x = class_role(
+ refnodes, x = class_role( # type: ignore
'class', ':class:`%s`' % name, name, 0, self.state)
node.extend(refnodes)
# Store the graph object so we can use it to generate the
# dot file later
node['graph'] = graph
- # wrap the result in figure node
- caption = self.options.get('caption')
- if caption:
- node = figure_wrapper(self, node, caption)
- return [node]
+ if 'caption' not in self.options:
+ self.add_name(node)
+ return [node]
+ else:
+ figure = figure_wrapper(self, node, self.options['caption'])
+ self.add_name(figure)
+ return [figure]
def get_graph_hash(node):
- # type: (inheritance_diagram) -> unicode
- encoded = (node['content'] + str(node['parts'])).encode('utf-8')
+ # type: (inheritance_diagram) -> str
+ encoded = (node['content'] + str(node['parts'])).encode()
return md5(encoded).hexdigest()[-10:]
def html_visit_inheritance_diagram(self, node):
- # type: (nodes.NodeVisitor, inheritance_diagram) -> None
+ # type: (HTMLTranslator, inheritance_diagram) -> None
"""
Output the graph for HTML. This will insert a PNG with clickable
image map.
@@ -405,7 +406,8 @@ def html_visit_inheritance_diagram(self, node):
graphviz_output_format = self.builder.env.config.graphviz_output_format.upper()
current_filename = self.builder.current_docname + self.builder.out_suffix
urls = {}
- for child in node:
+ pending_xrefs = cast(Iterable[addnodes.pending_xref], node)
+ for child in pending_xrefs:
if child.get('refuri') is not None:
if graphviz_output_format == 'SVG':
urls[child['reftitle']] = "../" + child.get('refuri')
@@ -424,7 +426,7 @@ def html_visit_inheritance_diagram(self, node):
def latex_visit_inheritance_diagram(self, node):
- # type: (nodes.NodeVisitor, inheritance_diagram) -> None
+ # type: (LaTeXTranslator, inheritance_diagram) -> None
"""
Output the graph for LaTeX. This will insert a PDF.
"""
@@ -440,7 +442,7 @@ def latex_visit_inheritance_diagram(self, node):
def texinfo_visit_inheritance_diagram(self, node):
- # type: (nodes.NodeVisitor, inheritance_diagram) -> None
+ # type: (TexinfoTranslator, inheritance_diagram) -> None
"""
Output the graph for Texinfo. This will insert a PNG.
"""
@@ -461,7 +463,7 @@ def skip(self, node):
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
app.setup_extension('sphinx.ext.graphviz')
app.add_node(
inheritance_diagram,
diff --git a/sphinx/ext/intersphinx.py b/sphinx/ext/intersphinx.py
index 8d6a3e164..fc2a525f7 100644
--- a/sphinx/ext/intersphinx.py
+++ b/sphinx/ext/intersphinx.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.ext.intersphinx
~~~~~~~~~~~~~~~~~~~~~~
@@ -24,23 +23,18 @@
:license: BSD, see LICENSE for details.
"""
-from __future__ import print_function
-
import functools
import posixpath
import sys
import time
-import warnings
from os import path
+from urllib.parse import urlsplit, urlunsplit
from docutils import nodes
from docutils.utils import relative_path
-from six import PY3, iteritems, string_types
-from six.moves.urllib.parse import urlsplit, urlunsplit
import sphinx
from sphinx.builders.html import INVENTORY_FILENAME
-from sphinx.deprecation import RemovedInSphinx20Warning
from sphinx.locale import _, __
from sphinx.util import requests, logging
from sphinx.util.inventory import InventoryFile
@@ -51,16 +45,12 @@ if False:
from sphinx.application import Sphinx # NOQA
from sphinx.config import Config # NOQA
from sphinx.environment import BuildEnvironment # NOQA
-
- if PY3:
- unicode = str
-
- Inventory = Dict[unicode, Dict[unicode, Tuple[unicode, unicode, unicode, unicode]]]
+ from sphinx.util.typing import Inventory # NOQA
logger = logging.getLogger(__name__)
-class InventoryAdapter(object):
+class InventoryAdapter:
"""Inventory adapter for environment"""
def __init__(self, env):
@@ -74,7 +64,7 @@ class InventoryAdapter(object):
@property
def cache(self):
- # type: () -> Dict[unicode, Tuple[unicode, int, Inventory]]
+ # type: () -> Dict[str, Tuple[str, int, Inventory]]
return self.env.intersphinx_cache # type: ignore
@property
@@ -84,7 +74,7 @@ class InventoryAdapter(object):
@property
def named_inventory(self):
- # type: () -> Dict[unicode, Inventory]
+ # type: () -> Dict[str, Inventory]
return self.env.intersphinx_named_inventory # type: ignore
def clear(self):
@@ -94,7 +84,7 @@ class InventoryAdapter(object):
def _strip_basic_auth(url):
- # type: (unicode) -> unicode
+ # type: (str) -> str
"""Returns *url* with basic auth credentials removed. Also returns the
basic auth username and password if they're present in *url*.
@@ -116,7 +106,7 @@ def _strip_basic_auth(url):
def _read_from_url(url, config=None):
- # type: (unicode, Config) -> IO
+ # type: (str, Config) -> IO
"""Reads data from *url* with an HTTP *GET*.
This function supports fetching from resources which use basic HTTP auth as
@@ -142,7 +132,7 @@ def _read_from_url(url, config=None):
def _get_safe_url(url):
- # type: (unicode) -> unicode
+ # type: (str) -> str
"""Gets version of *url* with basic auth passwords obscured. This function
returns results suitable for printing and logging.
@@ -168,7 +158,7 @@ def _get_safe_url(url):
def fetch_inventory(app, uri, inv):
- # type: (Sphinx, unicode, Any) -> Any
+ # type: (Sphinx, str, Any) -> Any
"""Fetch, parse and return an intersphinx inventory file."""
# both *uri* (base URI of the links to generate) and *inv* (actual
# location of the inventory file) can be local or remote URIs
@@ -214,28 +204,7 @@ def load_mappings(app):
cache_time = now - app.config.intersphinx_cache_limit * 86400
inventories = InventoryAdapter(app.builder.env)
update = False
- for key, value in iteritems(app.config.intersphinx_mapping):
- name = None # type: unicode
- uri = None # type: unicode
- inv = None # type: Union[unicode, Tuple[unicode, ...]]
-
- if isinstance(value, (list, tuple)):
- # new format
- name, (uri, inv) = key, value
- if not isinstance(name, string_types):
- logger.warning(__('intersphinx identifier %r is not string. Ignored'), name)
- continue
- else:
- # old format, no name
- name, uri, inv = None, key, value
- # we can safely assume that the uri<->inv mapping is not changed
- # during partial rebuilds since a changed intersphinx_mapping
- # setting will cause a full environment reread
- if not isinstance(inv, tuple):
- invs = (inv, )
- else:
- invs = inv # type: ignore
-
+ for key, (name, (uri, invs)) in app.config.intersphinx_mapping.items():
failures = []
for inv in invs:
if not inv:
@@ -244,7 +213,7 @@ def load_mappings(app):
# files; remote ones only if the cache time is expired
if '://' not in inv or uri not in inventories.cache \
or inventories.cache[uri][1] < cache_time:
- safe_inv_url = _get_safe_url(inv) # type: ignore
+ safe_inv_url = _get_safe_url(inv)
logger.info('loading intersphinx inventory from %s...', safe_inv_url)
try:
invdata = fetch_inventory(app, uri, inv)
@@ -286,16 +255,16 @@ def load_mappings(app):
for name, _x, invdata in named_vals + unnamed_vals:
if name:
inventories.named_inventory[name] = invdata
- for type, objects in iteritems(invdata):
+ for type, objects in invdata.items():
inventories.main_inventory.setdefault(type, {}).update(objects)
def missing_reference(app, env, node, contnode):
- # type: (Sphinx, BuildEnvironment, nodes.Node, nodes.Node) -> None
+ # type: (Sphinx, BuildEnvironment, nodes.Element, nodes.TextElement) -> nodes.reference
"""Attempt to resolve a missing reference via intersphinx references."""
target = node['reftarget']
inventories = InventoryAdapter(env)
- objtypes = None # type: List[unicode]
+ objtypes = None # type: List[str]
if node['reftype'] == 'any':
# we search anything!
objtypes = ['%s:%s' % (domain.name, objtype)
@@ -306,10 +275,10 @@ def missing_reference(app, env, node, contnode):
domain = node.get('refdomain')
if not domain:
# only objects in domains are in the inventory
- return
+ return None
objtypes = env.get_domain(domain).objtypes_for_role(node['reftype'])
if not objtypes:
- return
+ return None
objtypes = ['%s:%s' % (domain, objtype) for objtype in objtypes]
if 'std:cmdoption' in objtypes:
# until Sphinx-1.6, cmdoptions are stored as std:option
@@ -365,14 +334,42 @@ def missing_reference(app, env, node, contnode):
if len(contnode) and isinstance(contnode[0], nodes.Text):
contnode[0] = nodes.Text(newtarget, contnode[0].rawsource)
+ return None
+
+
+def normalize_intersphinx_mapping(app, config):
+ # type: (Sphinx, Config) -> None
+ for key, value in config.intersphinx_mapping.copy().items():
+ try:
+ if isinstance(value, (list, tuple)):
+ # new format
+ name, (uri, inv) = key, value
+ if not isinstance(name, str):
+ logger.warning(__('intersphinx identifier %r is not string. Ignored'),
+ name)
+ config.intersphinx_mapping.pop(key)
+ continue
+ else:
+ # old format, no name
+ name, uri, inv = None, key, value
+
+ if not isinstance(inv, tuple):
+ config.intersphinx_mapping[key] = (name, (uri, (inv,)))
+ else:
+ config.intersphinx_mapping[key] = (name, (uri, inv))
+ except Exception as exc:
+ logger.warning(__('Fail to read intersphinx_mapping[%s], Ignored: %r'), key, exc)
+ config.intersphinx_mapping.pop(key)
+
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
app.add_config_value('intersphinx_mapping', {}, True)
app.add_config_value('intersphinx_cache_limit', 5, False)
app.add_config_value('intersphinx_timeout', None, False)
- app.connect('missing-reference', missing_reference)
+ app.connect('config-inited', normalize_intersphinx_mapping)
app.connect('builder-inited', load_mappings)
+ app.connect('missing-reference', missing_reference)
return {
'version': sphinx.__display_version__,
'env_version': 1,
@@ -380,17 +377,8 @@ def setup(app):
}
-def debug(argv):
- # type: (List[unicode]) -> None
- """Debug functionality to print out an inventory"""
- warnings.warn('sphinx.ext.intersphinx.debug() is deprecated. '
- 'Please use inspect_main() instead',
- RemovedInSphinx20Warning, stacklevel=2)
- inspect_main(argv[1:])
-
-
def inspect_main(argv):
- # type: (List[unicode]) -> None
+ # type: (List[str]) -> None
"""Debug functionality to print out an inventory"""
if len(argv) < 1:
print("Print out an inventory file.\n"
@@ -398,16 +386,16 @@ def inspect_main(argv):
file=sys.stderr)
sys.exit(1)
- class MockConfig(object):
+ class MockConfig:
intersphinx_timeout = None # type: int
tls_verify = False
- class MockApp(object):
+ class MockApp:
srcdir = ''
config = MockConfig()
def warn(self, msg):
- # type: (unicode) -> None
+ # type: (str) -> None
print(msg, file=sys.stderr)
try:
@@ -429,4 +417,4 @@ if __name__ == '__main__':
import logging # type: ignore
logging.basicConfig() # type: ignore
- inspect_main(argv=sys.argv[1:]) # type: ignore
+ inspect_main(argv=sys.argv[1:])
diff --git a/sphinx/ext/jsmath.py b/sphinx/ext/jsmath.py
index 3babd408e..0b7c63464 100644
--- a/sphinx/ext/jsmath.py
+++ b/sphinx/ext/jsmath.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.ext.jsmath
~~~~~~~~~~~~~~~~~
@@ -10,9 +9,13 @@
:license: BSD, see LICENSE for details.
"""
+from typing import cast
+
from docutils import nodes
import sphinx
+from sphinx.builders.html import StandaloneHTMLBuilder
+from sphinx.domains.math import MathDomain
from sphinx.errors import ExtensionError
from sphinx.locale import _
from sphinx.util.math import get_node_equation_number
@@ -22,17 +25,18 @@ if False:
from typing import Any, Dict # NOQA
from sphinx.application import Sphinx # NOQA
from sphinx.environment import BuildEnvironment # NOQA
+ from sphinx.writers.html import HTMLTranslator # NOQA
def html_visit_math(self, node):
- # type: (nodes.NodeVisitor, nodes.Node) -> None
+ # type: (HTMLTranslator, nodes.math) -> None
self.body.append(self.starttag(node, 'span', '', CLASS='math notranslate nohighlight'))
self.body.append(self.encode(node.astext()) + '</span>')
raise nodes.SkipNode
def html_visit_displaymath(self, node):
- # type: (nodes.NodeVisitor, nodes.Node) -> None
+ # type: (HTMLTranslator, nodes.math_block) -> None
if node['nowrap']:
self.body.append(self.starttag(node, 'div', CLASS='math notranslate nohighlight'))
self.body.append(self.encode(node.astext()))
@@ -67,13 +71,15 @@ def install_jsmath(app, env):
raise ExtensionError('jsmath_path config value must be set for the '
'jsmath extension to work')
- if env.get_domain('math').has_equations(): # type: ignore
+ builder = cast(StandaloneHTMLBuilder, app.builder)
+ domain = cast(MathDomain, env.get_domain('math'))
+ if domain.has_equations():
# Enable jsmath only if equations exists
- app.builder.add_js_file(app.config.jsmath_path) # type: ignore
+ builder.add_js_file(app.config.jsmath_path)
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
app.add_html_math_renderer('jsmath',
(html_visit_math, None),
(html_visit_displaymath, None))
diff --git a/sphinx/ext/linkcode.py b/sphinx/ext/linkcode.py
index af45f32fa..67eb503f3 100644
--- a/sphinx/ext/linkcode.py
+++ b/sphinx/ext/linkcode.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.ext.linkcode
~~~~~~~~~~~~~~~~~~~
@@ -35,16 +34,16 @@ def doctree_read(app, doctree):
raise LinkcodeError(
"Function `linkcode_resolve` is not given in conf.py")
- domain_keys = dict(
- py=['module', 'fullname'],
- c=['names'],
- cpp=['names'],
- js=['object', 'fullname'],
- )
+ domain_keys = {
+ 'py': ['module', 'fullname'],
+ 'c': ['names'],
+ 'cpp': ['names'],
+ 'js': ['object', 'fullname'],
+ }
for objnode in doctree.traverse(addnodes.desc):
domain = objnode.get('domain')
- uris = set() # type: Set[unicode]
+ uris = set() # type: Set[str]
for signode in objnode:
if not isinstance(signode, addnodes.desc_signature):
continue
@@ -70,15 +69,14 @@ def doctree_read(app, doctree):
continue
uris.add(uri)
+ inline = nodes.inline('', _('[source]'), classes=['viewcode-link'])
onlynode = addnodes.only(expr='html')
- onlynode += nodes.reference('', '', internal=False, refuri=uri)
- onlynode[0] += nodes.inline('', _('[source]'),
- classes=['viewcode-link'])
+ onlynode += nodes.reference('', '', inline, internal=False, refuri=uri)
signode += onlynode
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
app.connect('doctree-read', doctree_read)
app.add_config_value('linkcode_resolve', None, '')
return {'version': sphinx.__display_version__, 'parallel_read_safe': True}
diff --git a/sphinx/ext/mathbase.py b/sphinx/ext/mathbase.py
index 076edaf37..2c37ba65a 100644
--- a/sphinx/ext/mathbase.py
+++ b/sphinx/ext/mathbase.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.ext.mathbase
~~~~~~~~~~~~~~~~~~~
@@ -24,8 +23,8 @@ from sphinx.domains.math import MathReferenceRole as EqXRefRole # NOQA # to ke
if False:
# For type annotation
from typing import Any, Callable, List, Tuple # NOQA
- from docutils.writers.html4css1 import Writer # NOQA
from sphinx.application import Sphinx # NOQA
+ from sphinx.writers.html import HTMLTranslator # NOQA
class MathDirective(MathDirectiveBase):
@@ -33,7 +32,7 @@ class MathDirective(MathDirectiveBase):
warnings.warn('sphinx.ext.mathbase.MathDirective is moved to '
'sphinx.directives.patches package.',
RemovedInSphinx30Warning, stacklevel=2)
- return super(MathDirective, self).run()
+ return super().run()
def math_role(role, rawtext, text, lineno, inliner, options={}, content=[]):
@@ -44,7 +43,7 @@ def math_role(role, rawtext, text, lineno, inliner, options={}, content=[]):
def get_node_equation_number(writer, node):
- # type: (Writer, nodes.Node) -> unicode
+ # type: (HTMLTranslator, nodes.math_block) -> str
warnings.warn('sphinx.ext.mathbase.get_node_equation_number() is moved to '
'sphinx.util.math package.',
RemovedInSphinx30Warning, stacklevel=2)
@@ -53,7 +52,7 @@ def get_node_equation_number(writer, node):
def wrap_displaymath(text, label, numbering):
- # type: (unicode, unicode, bool) -> unicode
+ # type: (str, str, bool) -> str
warnings.warn('sphinx.ext.mathbase.wrap_displaymath() is moved to '
'sphinx.util.math package.',
RemovedInSphinx30Warning, stacklevel=2)
@@ -62,7 +61,7 @@ def wrap_displaymath(text, label, numbering):
def is_in_section_title(node):
- # type: (nodes.Node) -> bool
+ # type: (nodes.Element) -> bool
"""Determine whether the node is in a section title"""
from sphinx.util.nodes import traverse_parent
diff --git a/sphinx/ext/mathjax.py b/sphinx/ext/mathjax.py
index 2a9f37699..da306386b 100644
--- a/sphinx/ext/mathjax.py
+++ b/sphinx/ext/mathjax.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.ext.mathjax
~~~~~~~~~~~~~~~~~~
@@ -12,10 +11,13 @@
"""
import json
+from typing import cast
from docutils import nodes
import sphinx
+from sphinx.builders.html import StandaloneHTMLBuilder
+from sphinx.domains.math import MathDomain
from sphinx.errors import ExtensionError
from sphinx.locale import _
from sphinx.util.math import get_node_equation_number
@@ -25,10 +27,11 @@ if False:
from typing import Any, Dict # NOQA
from sphinx.application import Sphinx # NOQA
from sphinx.environment import BuildEnvironment # NOQA
+ from sphinx.writers.html import HTMLTranslator # NOQA
def html_visit_math(self, node):
- # type: (nodes.NodeVisitor, nodes.Node) -> None
+ # type: (HTMLTranslator, nodes.math) -> None
self.body.append(self.starttag(node, 'span', '', CLASS='math notranslate nohighlight'))
self.body.append(self.builder.config.mathjax_inline[0] +
self.encode(node.astext()) +
@@ -37,7 +40,7 @@ def html_visit_math(self, node):
def html_visit_displaymath(self, node):
- # type: (nodes.NodeVisitor, nodes.Node) -> None
+ # type: (HTMLTranslator, nodes.math_block) -> None
self.body.append(self.starttag(node, 'div', CLASS='math notranslate nohighlight'))
if node['nowrap']:
self.body.append(self.encode(node.astext()))
@@ -77,20 +80,22 @@ def install_mathjax(app, env):
raise ExtensionError('mathjax_path config value must be set for the '
'mathjax extension to work')
- if env.get_domain('math').has_equations(): # type: ignore
+ builder = cast(StandaloneHTMLBuilder, app.builder)
+ domain = cast(MathDomain, env.get_domain('math'))
+ if domain.has_equations():
# Enable mathjax only if equations exists
options = {'async': 'async'}
if app.config.mathjax_options:
options.update(app.config.mathjax_options)
- app.builder.add_js_file(app.config.mathjax_path, **options) # type: ignore
+ builder.add_js_file(app.config.mathjax_path, **options)
if app.config.mathjax_config:
body = "MathJax.Hub.Config(%s)" % json.dumps(app.config.mathjax_config)
- app.builder.add_js_file(None, type="text/x-mathjax-config", body=body) # type: ignore # NOQA
+ builder.add_js_file(None, type="text/x-mathjax-config", body=body)
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
app.add_html_math_renderer('mathjax',
(html_visit_math, None),
(html_visit_displaymath, None))
diff --git a/sphinx/ext/napoleon/__init__.py b/sphinx/ext/napoleon/__init__.py
index b968f5948..d4a21ef2e 100644
--- a/sphinx/ext/napoleon/__init__.py
+++ b/sphinx/ext/napoleon/__init__.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.ext.napoleon
~~~~~~~~~~~~~~~~~~~
@@ -9,11 +8,7 @@
:license: BSD, see LICENSE for details.
"""
-import sys
-
-from six import PY2, iteritems
-
-import sphinx
+from sphinx import __display_version__ as __version__
from sphinx.application import Sphinx
from sphinx.ext.napoleon.docstring import GoogleDocstring, NumpyDocstring
@@ -22,7 +17,7 @@ if False:
from typing import Any, Dict, List # NOQA
-class Config(object):
+class Config:
"""Sphinx napoleon extension settings in `conf.py`.
Listed below are all the settings used by napoleon and their default
@@ -176,10 +171,10 @@ class Config(object):
.. attribute:: attr1
- *int*
-
Description of `attr1`
+ :type: int
+
napoleon_use_param : :obj:`bool` (Defaults to True)
True to use a ``:param:`` role for each function parameter. False to
use a single ``:parameters:`` role for all the parameters.
@@ -274,14 +269,14 @@ class Config(object):
def __init__(self, **settings):
# type: (Any) -> None
- for name, (default, rebuild) in iteritems(self._config_values):
+ for name, (default, rebuild) in self._config_values.items():
setattr(self, name, default)
- for name, value in iteritems(settings):
+ for name, value in settings.items():
setattr(self, name, value)
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
"""Sphinx extension setup function.
When the extension is loaded, Sphinx imports this module and executes
@@ -304,7 +299,8 @@ def setup(app):
"""
if not isinstance(app, Sphinx):
- return # probably called by tests
+ # probably called by tests
+ return {'version': __version__, 'parallel_read_safe': True}
_patch_python_domain()
@@ -312,9 +308,9 @@ def setup(app):
app.connect('autodoc-process-docstring', _process_docstring)
app.connect('autodoc-skip-member', _skip_member)
- for name, (default, rebuild) in iteritems(Config._config_values):
+ for name, (default, rebuild) in Config._config_values.items():
app.add_config_value(name, default, rebuild)
- return {'version': sphinx.__display_version__, 'parallel_read_safe': True}
+ return {'version': __version__, 'parallel_read_safe': True}
def _patch_python_domain():
@@ -338,7 +334,7 @@ def _patch_python_domain():
def _process_docstring(app, what, name, obj, options, lines):
- # type: (Sphinx, unicode, unicode, Any, Any, List[unicode]) -> None
+ # type: (Sphinx, str, str, Any, Any, List[str]) -> None
"""Process the docstring for a given python object.
Called when autodoc has read and processed a docstring. `lines` is a list
@@ -388,7 +384,7 @@ def _process_docstring(app, what, name, obj, options, lines):
def _skip_member(app, what, name, obj, skip, options):
- # type: (Sphinx, unicode, unicode, Any, bool, Any) -> bool
+ # type: (Sphinx, str, str, Any, bool, Any) -> bool
"""Determine if private and special class members are included in docs.
The following settings in conf.py determine if private and special class
@@ -435,34 +431,26 @@ def _skip_member(app, what, name, obj, skip, options):
if name != '__weakref__' and has_doc and is_member:
cls_is_owner = False
if what == 'class' or what == 'exception':
- if PY2:
- cls = getattr(obj, 'im_class', getattr(obj, '__objclass__',
- None))
- cls_is_owner = (cls and hasattr(cls, name) and
- name in cls.__dict__)
- elif sys.version_info >= (3, 3):
- qualname = getattr(obj, '__qualname__', '')
- cls_path, _, _ = qualname.rpartition('.')
- if cls_path:
- try:
- if '.' in cls_path:
- import importlib
- import functools
-
- mod = importlib.import_module(obj.__module__)
- mod_path = cls_path.split('.')
- cls = functools.reduce(getattr, mod_path, mod)
- else:
- cls = obj.__globals__[cls_path]
- except Exception:
- cls_is_owner = False
+ qualname = getattr(obj, '__qualname__', '')
+ cls_path, _, _ = qualname.rpartition('.')
+ if cls_path:
+ try:
+ if '.' in cls_path:
+ import importlib
+ import functools
+
+ mod = importlib.import_module(obj.__module__)
+ mod_path = cls_path.split('.')
+ cls = functools.reduce(getattr, mod_path, mod)
else:
- cls_is_owner = (cls and hasattr(cls, name) and
- name in cls.__dict__)
- else:
+ cls = obj.__globals__[cls_path]
+ except Exception:
cls_is_owner = False
+ else:
+ cls_is_owner = (cls and hasattr(cls, name) and # type: ignore
+ name in cls.__dict__)
else:
- cls_is_owner = True
+ cls_is_owner = False
if what == 'module' or cls_is_owner:
is_init = (name == '__init__')
diff --git a/sphinx/ext/napoleon/docstring.py b/sphinx/ext/napoleon/docstring.py
index 2e5380472..55b607faa 100644
--- a/sphinx/ext/napoleon/docstring.py
+++ b/sphinx/ext/napoleon/docstring.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.ext.napoleon.docstring
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -11,21 +10,16 @@
:license: BSD, see LICENSE for details.
"""
-import collections
import inspect
import re
from functools import partial
-from six import string_types, u
-from six.moves import range
-
from sphinx.ext.napoleon.iterators import modify_iter
from sphinx.locale import _
-from sphinx.util.pycompat import UnicodeMixin
if False:
# For type annotation
- from typing import Any, Callable, Dict, List, Tuple, Union # NOQA
+ from typing import Any, Callable, Dict, List, Tuple, Type, Union # NOQA
from sphinx.application import Sphinx # NOQA
from sphinx.config import Config as SphinxConfig # NOQA
@@ -43,7 +37,7 @@ _enumerated_list_regex = re.compile(
r'(?(paren)\)|\.)(\s+\S|\s*$)')
-class GoogleDocstring(UnicodeMixin):
+class GoogleDocstring:
"""Convert Google style docstrings to reStructuredText.
Parameters
@@ -105,9 +99,13 @@ class GoogleDocstring(UnicodeMixin):
<BLANKLINE>
"""
+
+ _name_rgx = re.compile(r"^\s*(:(?P<role>\w+):`(?P<name>[a-zA-Z0-9_.-]+)`|"
+ r" (?P<name2>[a-zA-Z0-9_.-]+))\s*", re.X)
+
def __init__(self, docstring, config=None, app=None, what='', name='',
obj=None, options=None):
- # type: (Union[unicode, List[unicode]], SphinxConfig, Sphinx, unicode, unicode, Any, Any) -> None # NOQA
+ # type: (Union[str, List[str]], SphinxConfig, Sphinx, str, str, Any, Any) -> None
self._config = config
self._app = app
@@ -120,7 +118,7 @@ class GoogleDocstring(UnicodeMixin):
what = 'class'
elif inspect.ismodule(obj):
what = 'module'
- elif isinstance(obj, collections.Callable): # type: ignore
+ elif callable(obj):
what = 'function'
else:
what = 'object'
@@ -129,15 +127,16 @@ class GoogleDocstring(UnicodeMixin):
self._name = name
self._obj = obj
self._opt = options
- if isinstance(docstring, string_types):
- docstring = docstring.splitlines()
- self._lines = docstring
- self._line_iter = modify_iter(docstring, modifier=lambda s: s.rstrip())
- self._parsed_lines = [] # type: List[unicode]
+ if isinstance(docstring, str):
+ lines = docstring.splitlines()
+ else:
+ lines = docstring
+ self._line_iter = modify_iter(lines, modifier=lambda s: s.rstrip())
+ self._parsed_lines = [] # type: List[str]
self._is_in_section = False
self._section_indent = 0
if not hasattr(self, '_directive_sections'):
- self._directive_sections = [] # type: List[unicode]
+ self._directive_sections = [] # type: List[str]
if not hasattr(self, '_sections'):
self._sections = {
'args': self._parse_parameters_section,
@@ -170,14 +169,14 @@ class GoogleDocstring(UnicodeMixin):
'warns': self._parse_warns_section,
'yield': self._parse_yields_section,
'yields': self._parse_yields_section,
- } # type: Dict[unicode, Callable]
+ } # type: Dict[str, Callable]
self._load_custom_sections()
self._parse()
- def __unicode__(self):
- # type: () -> unicode
+ def __str__(self):
+ # type: () -> str
"""Return the parsed docstring in reStructuredText format.
Returns
@@ -186,10 +185,10 @@ class GoogleDocstring(UnicodeMixin):
Unicode version of the docstring.
"""
- return u('\n').join(self.lines())
+ return '\n'.join(self.lines())
def lines(self):
- # type: () -> List[unicode]
+ # type: () -> List[str]
"""Return the parsed lines of the docstring in reStructuredText format.
Returns
@@ -201,7 +200,7 @@ class GoogleDocstring(UnicodeMixin):
return self._parsed_lines
def _consume_indented_block(self, indent=1):
- # type: (int) -> List[unicode]
+ # type: (int) -> List[str]
lines = []
line = self._line_iter.peek()
while(not self._is_section_break() and
@@ -211,7 +210,7 @@ class GoogleDocstring(UnicodeMixin):
return lines
def _consume_contiguous(self):
- # type: () -> List[unicode]
+ # type: () -> List[str]
lines = []
while (self._line_iter.has_next() and
self._line_iter.peek() and
@@ -220,7 +219,7 @@ class GoogleDocstring(UnicodeMixin):
return lines
def _consume_empty(self):
- # type: () -> List[unicode]
+ # type: () -> List[str]
lines = []
line = self._line_iter.peek()
while self._line_iter.has_next() and not line:
@@ -229,11 +228,11 @@ class GoogleDocstring(UnicodeMixin):
return lines
def _consume_field(self, parse_type=True, prefer_type=False):
- # type: (bool, bool) -> Tuple[unicode, unicode, List[unicode]]
+ # type: (bool, bool) -> Tuple[str, str, List[str]]
line = next(self._line_iter)
before, colon, after = self._partition_field_on_colon(line)
- _name, _type, _desc = before, '', after # type: unicode, unicode, unicode
+ _name, _type, _desc = before, '', after
if parse_type:
match = _google_typed_arg_regex.match(before)
@@ -251,7 +250,7 @@ class GoogleDocstring(UnicodeMixin):
return _name, _type, _descs
def _consume_fields(self, parse_type=True, prefer_type=False):
- # type: (bool, bool) -> List[Tuple[unicode, unicode, List[unicode]]]
+ # type: (bool, bool) -> List[Tuple[str, str, List[str]]]
self._consume_empty()
fields = []
while not self._is_section_break():
@@ -261,21 +260,22 @@ class GoogleDocstring(UnicodeMixin):
return fields
def _consume_inline_attribute(self):
- # type: () -> Tuple[unicode, List[unicode]]
+ # type: () -> Tuple[str, List[str]]
line = next(self._line_iter)
_type, colon, _desc = self._partition_field_on_colon(line)
- if not colon:
+ if not colon or not _desc:
_type, _desc = _desc, _type
+ _desc += colon
_descs = [_desc] + self._dedent(self._consume_to_end())
_descs = self.__class__(_descs, self._config).lines()
return _type, _descs
def _consume_returns_section(self):
- # type: () -> List[Tuple[unicode, unicode, List[unicode]]]
+ # type: () -> List[Tuple[str, str, List[str]]]
lines = self._dedent(self._consume_to_next_section())
if lines:
before, colon, after = self._partition_field_on_colon(lines[0])
- _name, _type, _desc = '', '', lines # type: unicode, unicode, List[unicode]
+ _name, _type, _desc = '', '', lines
if colon:
if after:
@@ -291,12 +291,12 @@ class GoogleDocstring(UnicodeMixin):
return []
def _consume_usage_section(self):
- # type: () -> List[unicode]
+ # type: () -> List[str]
lines = self._dedent(self._consume_to_next_section())
return lines
def _consume_section_header(self):
- # type: () -> unicode
+ # type: () -> str
section = next(self._line_iter)
stripped_section = section.strip(':')
if stripped_section.lower() in self._sections:
@@ -304,14 +304,14 @@ class GoogleDocstring(UnicodeMixin):
return section
def _consume_to_end(self):
- # type: () -> List[unicode]
+ # type: () -> List[str]
lines = []
while self._line_iter.has_next():
lines.append(next(self._line_iter))
return lines
def _consume_to_next_section(self):
- # type: () -> List[unicode]
+ # type: () -> List[str]
self._consume_empty()
lines = []
while not self._is_section_break():
@@ -319,7 +319,7 @@ class GoogleDocstring(UnicodeMixin):
return lines + self._consume_empty()
def _dedent(self, lines, full=False):
- # type: (List[unicode], bool) -> List[unicode]
+ # type: (List[str], bool) -> List[str]
if full:
return [line.lstrip() for line in lines]
else:
@@ -327,7 +327,7 @@ class GoogleDocstring(UnicodeMixin):
return [line[min_indent:] for line in lines]
def _escape_args_and_kwargs(self, name):
- # type: (unicode) -> unicode
+ # type: (str) -> str
if name[:2] == '**':
return r'\*\*' + name[2:]
elif name[:1] == '*':
@@ -336,32 +336,32 @@ class GoogleDocstring(UnicodeMixin):
return name
def _fix_field_desc(self, desc):
- # type: (List[unicode]) -> List[unicode]
+ # type: (List[str]) -> List[str]
if self._is_list(desc):
- desc = [u''] + desc
+ desc = [''] + desc
elif desc[0].endswith('::'):
desc_block = desc[1:]
indent = self._get_indent(desc[0])
block_indent = self._get_initial_indent(desc_block)
if block_indent > indent:
- desc = [u''] + desc
+ desc = [''] + desc
else:
desc = ['', desc[0]] + self._indent(desc_block, 4)
return desc
def _format_admonition(self, admonition, lines):
- # type: (unicode, List[unicode]) -> List[unicode]
+ # type: (str, List[str]) -> List[str]
lines = self._strip_empty(lines)
if len(lines) == 1:
return ['.. %s:: %s' % (admonition, lines[0].strip()), '']
elif lines:
lines = self._indent(self._dedent(lines), 3)
- return [u'.. %s::' % admonition, u''] + lines + [u'']
+ return ['.. %s::' % admonition, ''] + lines + ['']
else:
- return [u'.. %s::' % admonition, u'']
+ return ['.. %s::' % admonition, '']
def _format_block(self, prefix, lines, padding=None):
- # type: (unicode, List[unicode], unicode) -> List[unicode]
+ # type: (str, List[str], str) -> List[str]
if lines:
if padding is None:
padding = ' ' * len(prefix)
@@ -379,7 +379,7 @@ class GoogleDocstring(UnicodeMixin):
def _format_docutils_params(self, fields, field_role='param',
type_role='type'):
- # type: (List[Tuple[unicode, unicode, List[unicode]]], unicode, unicode) -> List[unicode] # NOQA
+ # type: (List[Tuple[str, str, List[str]]], str, str) -> List[str]
lines = []
for _name, _type, _desc in fields:
_desc = self._strip_empty(_desc)
@@ -395,14 +395,14 @@ class GoogleDocstring(UnicodeMixin):
return lines + ['']
def _format_field(self, _name, _type, _desc):
- # type: (unicode, unicode, List[unicode]) -> List[unicode]
+ # type: (str, str, List[str]) -> List[str]
_desc = self._strip_empty(_desc)
has_desc = any(_desc)
separator = has_desc and ' -- ' or ''
if _name:
if _type:
if '`' in _type:
- field = '**%s** (%s)%s' % (_name, _type, separator) # type: unicode
+ field = '**%s** (%s)%s' % (_name, _type, separator)
else:
field = '**%s** (*%s*)%s' % (_name, _type, separator)
else:
@@ -425,11 +425,11 @@ class GoogleDocstring(UnicodeMixin):
return [field]
def _format_fields(self, field_type, fields):
- # type: (unicode, List[Tuple[unicode, unicode, List[unicode]]]) -> List[unicode]
+ # type: (str, List[Tuple[str, str, List[str]]]) -> List[str]
field_type = ':%s:' % field_type.strip()
padding = ' ' * len(field_type)
multi = len(fields) > 1
- lines = [] # type: List[unicode]
+ lines = [] # type: List[str]
for _name, _type, _desc in fields:
field = self._format_field(_name, _type, _desc)
if multi:
@@ -454,21 +454,21 @@ class GoogleDocstring(UnicodeMixin):
return 0
def _get_indent(self, line):
- # type: (unicode) -> int
+ # type: (str) -> int
for i, s in enumerate(line):
if not s.isspace():
return i
return len(line)
def _get_initial_indent(self, lines):
- # type: (List[unicode]) -> int
+ # type: (List[str]) -> int
for line in lines:
if line:
return self._get_indent(line)
return 0
def _get_min_indent(self, lines):
- # type: (List[unicode]) -> int
+ # type: (List[str]) -> int
min_indent = None
for line in lines:
if line:
@@ -480,11 +480,11 @@ class GoogleDocstring(UnicodeMixin):
return min_indent or 0
def _indent(self, lines, n=4):
- # type: (List[unicode], int) -> List[unicode]
+ # type: (List[str], int) -> List[str]
return [(' ' * n) + line for line in lines]
def _is_indented(self, line, indent=1):
- # type: (unicode, int) -> bool
+ # type: (str, int) -> bool
for i, s in enumerate(line):
if i >= indent:
return True
@@ -493,7 +493,7 @@ class GoogleDocstring(UnicodeMixin):
return False
def _is_list(self, lines):
- # type: (List[unicode]) -> bool
+ # type: (List[str]) -> bool
if not lines:
return False
if _bullet_list_regex.match(lines[0]):
@@ -539,7 +539,7 @@ class GoogleDocstring(UnicodeMixin):
if self._config.napoleon_custom_sections is not None:
for entry in self._config.napoleon_custom_sections:
- if isinstance(entry, string_types):
+ if isinstance(entry, str):
# if entry is just a label, add to sections list,
# using generic section logic.
self._sections[entry.lower()] = self._parse_custom_generic_section
@@ -558,7 +558,7 @@ class GoogleDocstring(UnicodeMixin):
if self._name and (self._what == 'attribute' or self._what == 'data'):
# Implicit stop using StopIteration no longer allowed in
# Python 3.7; see PEP 479
- res = [] # type: List[unicode]
+ res = [] # type: List[str]
try:
res = self._parse_attribute_docstring()
except StopIteration:
@@ -587,12 +587,12 @@ class GoogleDocstring(UnicodeMixin):
self._parsed_lines.extend(lines)
def _parse_admonition(self, admonition, section):
- # type (unicode, unicode) -> List[unicode]
+ # type (str, str) -> List[str]
lines = self._consume_to_next_section()
return self._format_admonition(admonition, lines)
def _parse_attribute_docstring(self):
- # type: () -> List[unicode]
+ # type: () -> List[str]
_type, _desc = self._consume_inline_attribute()
lines = self._format_field('', '', _desc)
if _type:
@@ -600,11 +600,12 @@ class GoogleDocstring(UnicodeMixin):
return lines
def _parse_attributes_section(self, section):
- # type: (unicode) -> List[unicode]
+ # type: (str) -> List[str]
lines = []
for _name, _type, _desc in self._consume_fields():
if self._config.napoleon_use_ivar:
- field = ':ivar %s: ' % _name # type: unicode
+ _name = self._qualify_name(_name, self._obj)
+ field = ':ivar %s: ' % _name
lines.extend(self._format_block(field, _desc))
if _type:
lines.append(':vartype %s: %s' % (_name, _type))
@@ -621,11 +622,11 @@ class GoogleDocstring(UnicodeMixin):
return lines
def _parse_examples_section(self, section):
- # type: (unicode) -> List[unicode]
+ # type: (str) -> List[str]
labels = {
'example': _('Example'),
'examples': _('Examples'),
- } # type: Dict[unicode, unicode]
+ }
use_admonition = self._config.napoleon_use_admonition_for_examples
label = labels.get(section.lower(), section)
return self._parse_generic_section(label, use_admonition)
@@ -635,19 +636,19 @@ class GoogleDocstring(UnicodeMixin):
return self._parse_generic_section(section, False)
def _parse_usage_section(self, section):
- # type: (unicode) -> List[unicode]
- header = ['.. rubric:: Usage:', ''] # type: List[unicode]
- block = ['.. code-block:: python', ''] # type: List[unicode]
+ # type: (str) -> List[str]
+ header = ['.. rubric:: Usage:', '']
+ block = ['.. code-block:: python', '']
lines = self._consume_usage_section()
lines = self._indent(lines, 3)
return header + block + lines + ['']
def _parse_generic_section(self, section, use_admonition):
- # type: (unicode, bool) -> List[unicode]
+ # type: (str, bool) -> List[str]
lines = self._strip_empty(self._consume_to_next_section())
lines = self._dedent(lines)
if use_admonition:
- header = '.. admonition:: %s' % section # type: unicode
+ header = '.. admonition:: %s' % section
lines = self._indent(lines, 3)
else:
header = '.. rubric:: %s' % section
@@ -657,7 +658,7 @@ class GoogleDocstring(UnicodeMixin):
return [header, '']
def _parse_keyword_arguments_section(self, section):
- # type: (unicode) -> List[unicode]
+ # type: (str) -> List[str]
fields = self._consume_fields()
if self._config.napoleon_use_keyword:
return self._format_docutils_params(
@@ -668,26 +669,26 @@ class GoogleDocstring(UnicodeMixin):
return self._format_fields(_('Keyword Arguments'), fields)
def _parse_methods_section(self, section):
- # type: (unicode) -> List[unicode]
- lines = [] # type: List[unicode]
+ # type: (str) -> List[str]
+ lines = [] # type: List[str]
for _name, _type, _desc in self._consume_fields(parse_type=False):
lines.append('.. method:: %s' % _name)
if _desc:
- lines.extend([u''] + self._indent(_desc, 3))
+ lines.extend([''] + self._indent(_desc, 3))
lines.append('')
return lines
def _parse_notes_section(self, section):
- # type: (unicode) -> List[unicode]
+ # type: (str) -> List[str]
use_admonition = self._config.napoleon_use_admonition_for_notes
return self._parse_generic_section(_('Notes'), use_admonition)
def _parse_other_parameters_section(self, section):
- # type: (unicode) -> List[unicode]
+ # type: (str) -> List[str]
return self._format_fields(_('Other Parameters'), self._consume_fields())
def _parse_parameters_section(self, section):
- # type: (unicode) -> List[unicode]
+ # type: (str) -> List[str]
fields = self._consume_fields()
if self._config.napoleon_use_param:
return self._format_docutils_params(fields)
@@ -695,51 +696,28 @@ class GoogleDocstring(UnicodeMixin):
return self._format_fields(_('Parameters'), fields)
def _parse_raises_section(self, section):
- # type: (unicode) -> List[unicode]
+ # type: (str) -> List[str]
fields = self._consume_fields(parse_type=False, prefer_type=True)
- field_type = ':raises:'
- padding = ' ' * len(field_type)
- multi = len(fields) > 1
- lines = [] # type: List[unicode]
+ lines = [] # type: List[str]
for _name, _type, _desc in fields:
+ m = self._name_rgx.match(_type).groupdict()
+ if m['role']:
+ _type = m['name']
+ _type = ' ' + _type if _type else ''
_desc = self._strip_empty(_desc)
- has_desc = any(_desc)
- separator = has_desc and ' -- ' or ''
- if _type:
- has_refs = '`' in _type or ':' in _type
- has_space = any(c in ' \t\n\v\f ' for c in _type)
-
- if not has_refs and not has_space:
- _type = ':exc:`%s`%s' % (_type, separator)
- elif has_desc and has_space:
- _type = '*%s*%s' % (_type, separator)
- else:
- _type = '%s%s' % (_type, separator)
-
- if has_desc:
- field = [_type + _desc[0]] + _desc[1:]
- else:
- field = [_type]
- else:
- field = _desc
- if multi:
- if lines:
- lines.extend(self._format_block(padding + ' * ', field))
- else:
- lines.extend(self._format_block(field_type + ' * ', field))
- else:
- lines.extend(self._format_block(field_type + ' ', field))
- if lines and lines[-1]:
+ _descs = ' ' + '\n '.join(_desc) if any(_desc) else ''
+ lines.append(':raises%s:%s' % (_type, _descs))
+ if lines:
lines.append('')
return lines
def _parse_references_section(self, section):
- # type: (unicode) -> List[unicode]
+ # type: (str) -> List[str]
use_admonition = self._config.napoleon_use_admonition_for_references
return self._parse_generic_section(_('References'), use_admonition)
def _parse_returns_section(self, section):
- # type: (unicode) -> List[unicode]
+ # type: (str) -> List[str]
fields = self._consume_returns_section()
multi = len(fields) > 1
if multi:
@@ -747,7 +725,7 @@ class GoogleDocstring(UnicodeMixin):
else:
use_rtype = self._config.napoleon_use_rtype
- lines = [] # type: List[unicode]
+ lines = [] # type: List[str]
for _name, _type, _desc in fields:
if use_rtype:
field = self._format_field(_name, '', _desc)
@@ -768,23 +746,23 @@ class GoogleDocstring(UnicodeMixin):
return lines
def _parse_see_also_section(self, section):
- # type (unicode) -> List[unicode]
+ # type (str) -> List[str]
return self._parse_admonition('seealso', section)
def _parse_warns_section(self, section):
- # type: (unicode) -> List[unicode]
+ # type: (str) -> List[str]
return self._format_fields(_('Warns'), self._consume_fields())
def _parse_yields_section(self, section):
- # type: (unicode) -> List[unicode]
+ # type: (str) -> List[str]
fields = self._consume_returns_section()
return self._format_fields(_('Yields'), fields)
def _partition_field_on_colon(self, line):
- # type: (unicode) -> Tuple[unicode, unicode, unicode]
+ # type: (str) -> Tuple[str, str, str]
before_colon = []
after_colon = []
- colon = '' # type: unicode
+ colon = ''
found_colon = False
for i, source in enumerate(_xref_regex.split(line)):
if found_colon:
@@ -803,8 +781,20 @@ class GoogleDocstring(UnicodeMixin):
colon,
"".join(after_colon).strip())
+ def _qualify_name(self, attr_name, klass):
+ # type: (str, Type) -> str
+ if klass and '.' not in attr_name:
+ if attr_name.startswith('~'):
+ attr_name = attr_name[1:]
+ try:
+ q = klass.__qualname__
+ except AttributeError:
+ q = klass.__name__
+ return '~%s.%s' % (q, attr_name)
+ return attr_name
+
def _strip_empty(self, lines):
- # type: (List[unicode]) -> List[unicode]
+ # type: (List[str]) -> List[str]
if lines:
start = -1
for i, line in enumerate(lines):
@@ -919,13 +909,12 @@ class NumpyDocstring(GoogleDocstring):
"""
def __init__(self, docstring, config=None, app=None, what='', name='',
obj=None, options=None):
- # type: (Union[unicode, List[unicode]], SphinxConfig, Sphinx, unicode, unicode, Any, Any) -> None # NOQA
+ # type: (Union[str, List[str]], SphinxConfig, Sphinx, str, str, Any, Any) -> None
self._directive_sections = ['.. index::']
- super(NumpyDocstring, self).__init__(docstring, config, app, what,
- name, obj, options)
+ super().__init__(docstring, config, app, what, name, obj, options)
def _consume_field(self, parse_type=True, prefer_type=False):
- # type: (bool, bool) -> Tuple[unicode, unicode, List[unicode]]
+ # type: (bool, bool) -> Tuple[str, str, List[str]]
line = next(self._line_iter)
if parse_type:
_name, _, _type = self._partition_field_on_colon(line)
@@ -942,11 +931,11 @@ class NumpyDocstring(GoogleDocstring):
return _name, _type, _desc
def _consume_returns_section(self):
- # type: () -> List[Tuple[unicode, unicode, List[unicode]]]
+ # type: () -> List[Tuple[str, str, List[str]]]
return self._consume_fields(prefer_type=True)
def _consume_section_header(self):
- # type: () -> unicode
+ # type: () -> str
section = next(self._line_iter)
if not _directive_regex.match(section):
# Consume the header underline
@@ -967,7 +956,7 @@ class NumpyDocstring(GoogleDocstring):
# type: () -> bool
section, underline = self._line_iter.peek(2)
section = section.lower()
- if section in self._sections and isinstance(underline, string_types):
+ if section in self._sections and isinstance(underline, str):
return bool(_numpy_section_regex.match(underline))
elif self._directive_sections:
if _directive_regex.match(section):
@@ -976,11 +965,8 @@ class NumpyDocstring(GoogleDocstring):
return True
return False
- _name_rgx = re.compile(r"^\s*(:(?P<role>\w+):`(?P<name>[a-zA-Z0-9_.-]+)`|"
- r" (?P<name2>[a-zA-Z0-9_.-]+))\s*", re.X)
-
def _parse_see_also_section(self, section):
- # type: (unicode) -> List[unicode]
+ # type: (str) -> List[str]
lines = self._consume_to_next_section()
try:
return self._parse_numpydoc_see_also_section(lines)
@@ -988,7 +974,7 @@ class NumpyDocstring(GoogleDocstring):
return self._format_admonition('seealso', lines)
def _parse_numpydoc_see_also_section(self, content):
- # type: (List[unicode]) -> List[unicode]
+ # type: (List[str]) -> List[str]
"""
Derived from the NumpyDoc implementation of _parse_see_also.
@@ -1003,7 +989,7 @@ class NumpyDocstring(GoogleDocstring):
items = []
def parse_item_name(text):
- # type: (unicode) -> Tuple[unicode, unicode]
+ # type: (str) -> Tuple[str, str]
"""Match ':role:`name`' or 'name'"""
m = self._name_rgx.match(text)
if m:
@@ -1015,7 +1001,7 @@ class NumpyDocstring(GoogleDocstring):
raise ValueError("%s is not a item name" % text)
def push_item(name, rest):
- # type: (unicode, List[unicode]) -> None
+ # type: (str, List[str]) -> None
if not name:
return
name, role = parse_item_name(name)
@@ -1023,7 +1009,7 @@ class NumpyDocstring(GoogleDocstring):
del rest[:]
current_func = None
- rest = [] # type: List[unicode]
+ rest = [] # type: List[str]
for line in content:
if not line.strip():
@@ -1069,12 +1055,12 @@ class NumpyDocstring(GoogleDocstring):
'const': 'const',
'attribute': 'attr',
'attr': 'attr'
- } # type: Dict[unicode, unicode]
+ }
if self._what is None:
- func_role = 'obj' # type: unicode
+ func_role = 'obj'
else:
func_role = roles.get(self._what, '')
- lines = [] # type: List[unicode]
+ lines = [] # type: List[str]
last_had_desc = True
for func, desc, role in items:
if role:
diff --git a/sphinx/ext/napoleon/iterators.py b/sphinx/ext/napoleon/iterators.py
index 8926c865c..eda3e1193 100644
--- a/sphinx/ext/napoleon/iterators.py
+++ b/sphinx/ext/napoleon/iterators.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.ext.napoleon.iterators
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -18,7 +17,7 @@ if False:
from typing import Any, Iterable # NOQA
-class peek_iter(object):
+class peek_iter:
"""An iterator object that supports peeking ahead.
Parameters
@@ -232,7 +231,7 @@ class modify_iter(peek_iter):
if not callable(self.modifier):
raise TypeError('modify_iter(o, modifier): '
'modifier must be callable')
- super(modify_iter, self).__init__(*args)
+ super().__init__(*args)
def _fillcache(self, n):
# type: (int) -> None
diff --git a/sphinx/ext/todo.py b/sphinx/ext/todo.py
index 76e40c766..1d8f9cb60 100644
--- a/sphinx/ext/todo.py
+++ b/sphinx/ext/todo.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.ext.todo
~~~~~~~~~~~~~~~
@@ -12,6 +11,8 @@
:license: BSD, see LICENSE for details.
"""
+from typing import cast
+
from docutils import nodes
from docutils.parsers.rst import directives
from docutils.parsers.rst.directives.admonitions import BaseAdmonition
@@ -26,9 +27,11 @@ from sphinx.util.texescape import tex_escape_map
if False:
# For type annotation
- from typing import Any, Dict, Iterable, List # NOQA
+ from typing import Any, Dict, Iterable, List, Tuple # NOQA
from sphinx.application import Sphinx # NOQA
from sphinx.environment import BuildEnvironment # NOQA
+ from sphinx.writers.html import HTMLTranslator # NOQA
+ from sphinx.writers.latex import LaTeXTranslator # NOQA
logger = logging.getLogger(__name__)
@@ -60,22 +63,24 @@ class Todo(BaseAdmonition, SphinxDirective):
if not self.options.get('class'):
self.options['class'] = ['admonition-todo']
- (todo,) = super(Todo, self).run()
+ (todo,) = super().run() # type: Tuple[nodes.Node]
if isinstance(todo, nodes.system_message):
return [todo]
-
- todo.insert(0, nodes.title(text=_('Todo')))
- set_source_info(self, todo)
-
- targetid = 'index-%s' % self.env.new_serialno('index')
- # Stash the target to be retrieved later in latex_visit_todo_node.
- todo['targetref'] = '%s:%s' % (self.env.docname, targetid)
- targetnode = nodes.target('', '', ids=[targetid])
- return [targetnode, todo]
+ elif isinstance(todo, todo_node):
+ todo.insert(0, nodes.title(text=_('Todo')))
+ set_source_info(self, todo)
+
+ targetid = 'index-%s' % self.env.new_serialno('index')
+ # Stash the target to be retrieved later in latex_visit_todo_node.
+ todo['targetref'] = '%s:%s' % (self.env.docname, targetid)
+ targetnode = nodes.target('', '', ids=[targetid])
+ return [targetnode, todo]
+ else:
+ raise RuntimeError # never reached here
def process_todos(app, doctree):
- # type: (Sphinx, nodes.Node) -> None
+ # type: (Sphinx, nodes.document) -> None
# collect all todos in the environment
# this is not done in the directive itself because it some transformations
# must have already been run, e.g. substitutions
@@ -102,7 +107,8 @@ def process_todos(app, doctree):
})
if env.config.todo_emit_warnings:
- logger.warning(__("TODO entry found: %s"), node[1].astext(),
+ label = cast(nodes.Element, node[1])
+ logger.warning(__("TODO entry found: %s"), label.astext(),
location=node)
@@ -118,14 +124,15 @@ class TodoList(SphinxDirective):
option_spec = {} # type: Dict
def run(self):
- # type: () -> List[todolist]
+ # type: () -> List[nodes.Node]
# Simply insert an empty todolist node which will be replaced later
# when process_todo_nodes is called
return [todolist('')]
def process_todo_nodes(app, doctree, fromdocname):
- # type: (Sphinx, nodes.Node, unicode) -> None
+ # type: (Sphinx, nodes.document, str) -> None
+ node = None # type: nodes.Element
if not app.config['todo_include_todos']:
for node in doctree.traverse(todo_node):
node.parent.remove(node)
@@ -139,7 +146,7 @@ def process_todo_nodes(app, doctree, fromdocname):
for node in doctree.traverse(todolist):
if node.get('ids'):
- content = [nodes.target()]
+ content = [nodes.target()] # type: List[nodes.Element]
else:
content = []
@@ -195,7 +202,7 @@ def process_todo_nodes(app, doctree, fromdocname):
def purge_todos(app, env, docname):
- # type: (Sphinx, BuildEnvironment, unicode) -> None
+ # type: (Sphinx, BuildEnvironment, str) -> None
if not hasattr(env, 'todo_all_todos'):
return
env.todo_all_todos = [todo for todo in env.todo_all_todos # type: ignore
@@ -203,7 +210,7 @@ def purge_todos(app, env, docname):
def merge_info(app, env, docnames, other):
- # type: (Sphinx, BuildEnvironment, Iterable[unicode], BuildEnvironment) -> None
+ # type: (Sphinx, BuildEnvironment, Iterable[str], BuildEnvironment) -> None
if not hasattr(other, 'todo_all_todos'):
return
if not hasattr(env, 'todo_all_todos'):
@@ -212,35 +219,36 @@ def merge_info(app, env, docnames, other):
def visit_todo_node(self, node):
- # type: (nodes.NodeVisitor, todo_node) -> None
+ # type: (HTMLTranslator, todo_node) -> None
self.visit_admonition(node)
- # self.visit_admonition(node, 'todo')
def depart_todo_node(self, node):
- # type: (nodes.NodeVisitor, todo_node) -> None
+ # type: (HTMLTranslator, todo_node) -> None
self.depart_admonition(node)
def latex_visit_todo_node(self, node):
- # type: (nodes.NodeVisitor, todo_node) -> None
- title = node.pop(0).astext().translate(tex_escape_map)
- self.body.append(u'\n\\begin{sphinxadmonition}{note}{')
+ # type: (LaTeXTranslator, todo_node) -> None
+ self.body.append('\n\\begin{sphinxadmonition}{note}{')
# If this is the original todo node, emit a label that will be referenced by
# a hyperref in the todolist.
target = node.get('targetref')
if target is not None:
- self.body.append(u'\\label{%s}' % target)
- self.body.append('%s:}' % title)
+ self.body.append('\\label{%s}' % target)
+
+ title_node = cast(nodes.title, node[0])
+ self.body.append('%s:}' % title_node.astext().translate(tex_escape_map))
+ node.pop(0)
def latex_depart_todo_node(self, node):
- # type: (nodes.NodeVisitor, todo_node) -> None
+ # type: (LaTeXTranslator, todo_node) -> None
self.body.append('\\end{sphinxadmonition}\n')
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
app.add_event('todo-defined')
app.add_config_value('todo_include_todos', False, 'html')
app.add_config_value('todo_link_only', False, 'html')
diff --git a/sphinx/ext/viewcode.py b/sphinx/ext/viewcode.py
index 425482c3a..7c92a02fa 100644
--- a/sphinx/ext/viewcode.py
+++ b/sphinx/ext/viewcode.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.ext.viewcode
~~~~~~~~~~~~~~~~~~~
@@ -13,7 +12,6 @@ import traceback
import warnings
from docutils import nodes
-from six import iteritems, text_type
import sphinx
from sphinx import addnodes
@@ -34,7 +32,7 @@ logger = logging.getLogger(__name__)
def _get_full_modname(app, modname, attribute):
- # type: (Sphinx, str, unicode) -> unicode
+ # type: (Sphinx, str, str) -> str
try:
return get_full_modname(modname, attribute)
except AttributeError:
@@ -75,12 +73,8 @@ def doctree_read(app, doctree):
env._viewcode_modules[modname] = False # type: ignore
return
- if not isinstance(analyzer.code, text_type):
- code = analyzer.code.decode(analyzer.encoding)
- else:
- code = analyzer.code
-
analyzer.find_tags()
+ code = analyzer.code
tags = analyzer.tags
else:
code, tags = code_tags
@@ -96,7 +90,7 @@ def doctree_read(app, doctree):
for objnode in doctree.traverse(addnodes.desc):
if objnode.get('domain') != 'py':
continue
- names = set() # type: Set[unicode]
+ names = set() # type: Set[str]
for signode in objnode:
if not isinstance(signode, addnodes.desc_signature):
continue
@@ -120,18 +114,16 @@ def doctree_read(app, doctree):
continue
names.add(fullname)
pagename = '_modules/' + modname.replace('.', '/')
+ inline = nodes.inline('', _('[source]'), classes=['viewcode-link'])
onlynode = addnodes.only(expr='html')
- onlynode += addnodes.pending_xref(
- '', reftype='viewcode', refdomain='std', refexplicit=False,
- reftarget=pagename, refid=fullname,
- refdoc=env.docname)
- onlynode[0] += nodes.inline('', _('[source]'),
- classes=['viewcode-link'])
+ onlynode += addnodes.pending_xref('', inline, reftype='viewcode', refdomain='std',
+ refexplicit=False, reftarget=pagename,
+ refid=fullname, refdoc=env.docname)
signode += onlynode
def env_merge_info(app, env, docnames, other):
- # type: (Sphinx, BuildEnvironment, Iterable[unicode], BuildEnvironment) -> None
+ # type: (Sphinx, BuildEnvironment, Iterable[str], BuildEnvironment) -> None
if not hasattr(other, '_viewcode_modules'):
return
# create a _viewcode_modules dict on the main environment
@@ -142,15 +134,17 @@ def env_merge_info(app, env, docnames, other):
def missing_reference(app, env, node, contnode):
- # type: (Sphinx, BuildEnvironment, nodes.Node, nodes.Node) -> nodes.Node
+ # type: (Sphinx, BuildEnvironment, nodes.Element, nodes.Node) -> nodes.Node
# resolve our "viewcode" reference nodes -- they need special treatment
if node['reftype'] == 'viewcode':
return make_refnode(app.builder, node['refdoc'], node['reftarget'],
node['refid'], contnode)
+ return None
+
def collect_pages(app):
- # type: (Sphinx) -> Iterator[Tuple[unicode, Dict[unicode, Any], unicode]]
+ # type: (Sphinx) -> Iterator[Tuple[str, Dict[str, Any], str]]
env = app.builder.env
if not hasattr(env, '_viewcode_modules'):
return
@@ -163,7 +157,7 @@ def collect_pages(app):
# len(env._viewcode_modules), nonl=1)
for modname, entry in status_iterator(
- sorted(iteritems(env._viewcode_modules)), # type: ignore
+ sorted(env._viewcode_modules.items()), # type: ignore
'highlighting module code... ', "blue",
len(env._viewcode_modules), # type: ignore
app.verbosity, lambda x: x[0]):
@@ -188,7 +182,7 @@ def collect_pages(app):
# the collected tags (HACK: this only works if the tag boundaries are
# properly nested!)
maxindex = len(lines) - 1
- for name, docname in iteritems(used):
+ for name, docname in used.items():
type, start, end = tags[name]
backlink = urito(pagename, docname) + '#' + refname + '.' + name
lines[start] = (
@@ -215,7 +209,7 @@ def collect_pages(app):
'title': modname,
'body': (_('<h1>Source code for %s</h1>') % modname +
'\n'.join(lines)),
- } # type: Dict[unicode, Any]
+ }
yield (pagename, context, 'page.html')
if not modnames:
@@ -256,7 +250,7 @@ def migrate_viewcode_import(app, config):
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
app.add_config_value('viewcode_import', None, False)
app.add_config_value('viewcode_enable_epub', False, False)
app.add_config_value('viewcode_follow_imported_members', True, False)
diff --git a/sphinx/extension.py b/sphinx/extension.py
index 732ea327c..8302594d9 100644
--- a/sphinx/extension.py
+++ b/sphinx/extension.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.extension
~~~~~~~~~~~~~~~~
@@ -9,8 +8,6 @@
:license: BSD, see LICENSE for details.
"""
-from six import iteritems
-
from sphinx.errors import VersionRequirementError
from sphinx.locale import __
from sphinx.util import logging
@@ -24,9 +21,9 @@ if False:
logger = logging.getLogger(__name__)
-class Extension(object):
+class Extension:
def __init__(self, name, module, **kwargs):
- # type: (unicode, Any, Any) -> None
+ # type: (str, Any, Any) -> None
self.name = name
self.module = module
self.metadata = kwargs
@@ -49,7 +46,7 @@ def verify_needs_extensions(app, config):
if config.needs_extensions is None:
return
- for extname, reqversion in iteritems(config.needs_extensions):
+ for extname, reqversion in config.needs_extensions.items():
extension = app.extensions.get(extname)
if extension is None:
logger.warning(__('The %s extension is required by needs_extensions settings, '
@@ -64,7 +61,7 @@ def verify_needs_extensions(app, config):
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
app.connect('config-inited', verify_needs_extensions)
return {
diff --git a/sphinx/highlighting.py b/sphinx/highlighting.py
index f52e99019..5fac63963 100644
--- a/sphinx/highlighting.py
+++ b/sphinx/highlighting.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.highlighting
~~~~~~~~~~~~~~~~~~~
@@ -9,6 +8,7 @@
:license: BSD, see LICENSE for details.
"""
+import html
import warnings
from pygments import highlight
@@ -20,14 +20,12 @@ from pygments.lexers import PythonLexer, Python3Lexer, PythonConsoleLexer, \
CLexer, TextLexer, RstLexer
from pygments.styles import get_style_by_name
from pygments.util import ClassNotFound
-from six import text_type
from sphinx.deprecation import RemovedInSphinx30Warning
from sphinx.ext import doctest
from sphinx.locale import __
from sphinx.pygments_styles import SphinxStyle, NoneStyle
from sphinx.util import logging
-from sphinx.util.pycompat import htmlescape
from sphinx.util.texescape import tex_hl_escape_map_new
if False:
@@ -38,22 +36,22 @@ if False:
logger = logging.getLogger(__name__)
-lexers = dict(
- none = TextLexer(stripnl=False),
- python = PythonLexer(stripnl=False),
- python3 = Python3Lexer(stripnl=False),
- pycon = PythonConsoleLexer(stripnl=False),
- pycon3 = PythonConsoleLexer(python3=True, stripnl=False),
- rest = RstLexer(stripnl=False),
- c = CLexer(stripnl=False),
-) # type: Dict[unicode, Lexer]
+lexers = {
+ 'none': TextLexer(stripnl=False),
+ 'python': PythonLexer(stripnl=False),
+ 'python3': Python3Lexer(stripnl=False),
+ 'pycon': PythonConsoleLexer(stripnl=False),
+ 'pycon3': PythonConsoleLexer(python3=True, stripnl=False),
+ 'rest': RstLexer(stripnl=False),
+ 'c': CLexer(stripnl=False),
+} # type: Dict[str, Lexer]
for _lexer in lexers.values():
_lexer.add_filter('raiseonerror')
-escape_hl_chars = {ord(u'\\'): u'\\PYGZbs{}',
- ord(u'{'): u'\\PYGZob{}',
- ord(u'}'): u'\\PYGZcb{}'}
+escape_hl_chars = {ord('\\'): '\\PYGZbs{}',
+ ord('{'): '\\PYGZob{}',
+ ord('}'): '\\PYGZcb{}'}
# used if Pygments is available
# use textcomp quote to get a true single quote
@@ -62,14 +60,14 @@ _LATEX_ADD_STYLES = r'''
'''
-class PygmentsBridge(object):
+class PygmentsBridge:
# Set these attributes if you want to have different Pygments formatters
# than the default ones.
html_formatter = HtmlFormatter
latex_formatter = LatexFormatter
def __init__(self, dest='html', stylename='sphinx', trim_doctest_flags=None):
- # type: (unicode, unicode, bool) -> None
+ # type: (str, str, bool) -> None
self.dest = dest
if stylename is None or stylename == 'sphinx':
style = SphinxStyle
@@ -81,7 +79,7 @@ class PygmentsBridge(object):
stylename)
else:
style = get_style_by_name(stylename)
- self.formatter_args = {'style': style} # type: Dict[unicode, Any]
+ self.formatter_args = {'style': style} # type: Dict[str, Any]
if dest == 'html':
self.formatter = self.html_formatter
else:
@@ -95,15 +93,15 @@ class PygmentsBridge(object):
def get_formatter(self, **kwargs):
# type: (Any) -> Formatter
- kwargs.update(self.formatter_args) # type: ignore
+ kwargs.update(self.formatter_args)
return self.formatter(**kwargs)
def unhighlighted(self, source):
- # type: (unicode) -> unicode
+ # type: (str) -> str
warnings.warn('PygmentsBridge.unhighlighted() is now deprecated.',
RemovedInSphinx30Warning, stacklevel=2)
if self.dest == 'html':
- return '<pre>' + htmlescape(source) + '</pre>\n'
+ return '<pre>' + html.escape(source) + '</pre>\n'
else:
# first, escape highlighting characters like Pygments does
source = source.translate(escape_hl_chars)
@@ -113,8 +111,8 @@ class PygmentsBridge(object):
source + '\\end{Verbatim}\n'
def highlight_block(self, source, lang, opts=None, location=None, force=False, **kwargs):
- # type: (unicode, unicode, Any, Any, bool, Any) -> unicode
- if not isinstance(source, text_type):
+ # type: (str, str, Any, Any, bool, Any) -> str
+ if not isinstance(source, str):
source = source.decode()
# find out which lexer to use
@@ -173,7 +171,7 @@ class PygmentsBridge(object):
return hlsource.translate(tex_hl_escape_map_new)
def get_stylesheet(self):
- # type: () -> unicode
+ # type: () -> str
formatter = self.get_formatter()
if self.dest == 'html':
return formatter.get_style_defs('.highlight')
diff --git a/sphinx/io.py b/sphinx/io.py
index 4da4e95a9..ee4891a2b 100644
--- a/sphinx/io.py
+++ b/sphinx/io.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.io
~~~~~~~~~
@@ -9,7 +8,6 @@
:license: BSD, see LICENSE for details.
"""
import codecs
-import re
import warnings
from docutils.core import Publisher
@@ -18,11 +16,9 @@ from docutils.parsers.rst import Parser as RSTParser
from docutils.readers import standalone
from docutils.statemachine import StringList, string2lines
from docutils.writers import UnfilteredWriter
-from six import text_type, iteritems
from typing import Any, Union # NOQA
from sphinx.deprecation import RemovedInSphinx30Warning
-from sphinx.locale import __
from sphinx.transforms import (
ApplySourceWorkaround, ExtraTranslatableNodes, CitationReferences,
DefaultSubstitutions, MoveModuleTargets, HandleCodeBlocks, SortIds,
@@ -36,13 +32,16 @@ from sphinx.transforms.i18n import (
)
from sphinx.transforms.references import SphinxDomains, SubstitutionDefinitionsRemover
from sphinx.util import logging
+from sphinx.util import UnicodeDecodeErrorHandler
from sphinx.util.docutils import LoggingReporter
+from sphinx.util.rst import append_epilog, docinfo_re, prepend_prolog
from sphinx.versioning import UIDTransform
if False:
# For type annotation
- from typing import Any, Dict, List, Tuple, Union # NOQA
+ from typing import Any, Dict, List, Tuple, Type, Union # NOQA
from docutils import nodes # NOQA
+ from docutils.frontend import Values # NOQA
from docutils.io import Input # NOQA
from docutils.parsers import Parser # NOQA
from docutils.transforms import Transform # NOQA
@@ -50,8 +49,6 @@ if False:
from sphinx.builders import Builder # NOQA
from sphinx.environment import BuildEnvironment # NOQA
-docinfo_re = re.compile(':\\w+:.*?')
-
logger = logging.getLogger(__name__)
@@ -63,21 +60,24 @@ class SphinxBaseReader(standalone.Reader):
This replaces reporter by Sphinx's on generating document.
"""
+ transforms = [] # type: List[Type[Transform]]
+
def __init__(self, app, *args, **kwargs):
# type: (Sphinx, Any, Any) -> None
+ self.app = app
self.env = app.env
- standalone.Reader.__init__(self, *args, **kwargs)
+ super().__init__(*args, **kwargs)
def get_transforms(self):
- # type: () -> List[Transform]
- return standalone.Reader.get_transforms(self) + self.transforms
+ # type: () -> List[Type[Transform]]
+ return super().get_transforms() + self.transforms
def new_document(self):
# type: () -> nodes.document
"""Creates a new document object which having a special reporter object good
for logging.
"""
- document = standalone.Reader.new_document(self)
+ document = super().new_document()
# substitute transformer
document.transformer = SphinxTransformer(document)
@@ -100,13 +100,32 @@ class SphinxStandaloneReader(SphinxBaseReader):
RemoveTranslatableInline, FilterSystemMessages, RefOnlyBulletListTransform,
UnreferencedFootnotesDetector, SphinxSmartQuotes, ManpageLink,
SphinxDomains, SubstitutionDefinitionsRemover, DoctreeReadEvent,
- UIDTransform,
- ] # type: List[Transform]
+ UIDTransform]
def __init__(self, app, *args, **kwargs):
# type: (Sphinx, Any, Any) -> None
self.transforms = self.transforms + app.registry.get_transforms()
- SphinxBaseReader.__init__(self, app, *args, **kwargs)
+ super().__init__(app, *args, **kwargs)
+
+ def read(self, source, parser, settings):
+ # type: (Input, Parser, Values) -> nodes.document
+ self.source = source
+ if not self.parser:
+ self.parser = parser
+ self.settings = settings
+ self.input = self.read_source()
+ self.parse()
+ return self.document
+
+ def read_source(self):
+ # type: () -> str
+ """Read content from source and do post-process."""
+ content = self.source.read()
+
+ # emit "source-read" event
+ arg = [content]
+ self.app.emit('source-read', self.env.docname, arg)
+ return arg[0]
class SphinxI18nReader(SphinxBaseReader):
@@ -158,8 +177,7 @@ def SphinxDummySourceClass(source, *args, **kwargs):
class SphinxBaseFileInput(FileInput):
"""A base class of SphinxFileInput.
- It supports to replace unknown Unicode characters to '?'. And it also emits
- Sphinx events :event:`source-read` on reading.
+ It supports to replace unknown Unicode characters to '?'.
"""
def __init__(self, app, env, *args, **kwds):
@@ -167,50 +185,25 @@ class SphinxBaseFileInput(FileInput):
self.app = app
self.env = env
- # set up error handler
- codecs.register_error('sphinx', self.warn_and_replace) # type: ignore
+ warnings.warn('%s is deprecated.' % self.__class__.__name__,
+ RemovedInSphinx30Warning, stacklevel=2)
kwds['error_handler'] = 'sphinx' # py3: handle error on open.
- FileInput.__init__(self, *args, **kwds)
-
- def decode(self, data):
- # type: (Union[unicode, bytes]) -> unicode
- if isinstance(data, text_type): # py3: `data` already decoded.
- return data
- return data.decode(self.encoding, 'sphinx') # py2: decoding
-
- def read(self):
- # type: () -> unicode
- """Reads the contents from file.
-
- After reading, it emits Sphinx event ``source-read``.
- """
- data = FileInput.read(self)
-
- # emit source-read event
- arg = [data]
- self.app.emit('source-read', self.env.docname, arg)
- return arg[0]
+ super().__init__(*args, **kwds)
def warn_and_replace(self, error):
# type: (Any) -> Tuple
- """Custom decoding error handler that warns and replaces."""
- linestart = error.object.rfind(b'\n', 0, error.start)
- lineend = error.object.find(b'\n', error.start)
- if lineend == -1:
- lineend = len(error.object)
- lineno = error.object.count(b'\n', 0, error.start) + 1
- logger.warning(__('undecodable source characters, replacing with "?": %r'),
- (error.object[linestart + 1:error.start] + b'>>>' +
- error.object[error.start:error.end] + b'<<<' +
- error.object[error.end:lineend]),
- location=(self.env.docname, lineno))
- return (u'?', error.end)
-
-
-class SphinxFileInput(SphinxBaseFileInput):
+ return UnicodeDecodeErrorHandler(self.env.docname)(error)
+
+
+class SphinxFileInput(FileInput):
"""A basic FileInput for Sphinx."""
- supported = ('*',) # special source input
+ supported = ('*',) # RemovedInSphinx30Warning
+
+ def __init__(self, *args, **kwargs):
+ # type: (Any, Any) -> None
+ kwargs['error_handler'] = 'sphinx'
+ super(SphinxFileInput, self).__init__(*args, **kwargs)
class SphinxRSTFileInput(SphinxBaseFileInput):
@@ -230,7 +223,7 @@ class SphinxRSTFileInput(SphinxBaseFileInput):
supported = ('restructuredtext',)
def prepend_prolog(self, text, prolog):
- # type: (StringList, unicode) -> None
+ # type: (StringList, str) -> None
docinfo = self.count_docinfo_lines(text)
if docinfo:
# insert a blank line after docinfo
@@ -244,24 +237,22 @@ class SphinxRSTFileInput(SphinxBaseFileInput):
text.insert(docinfo + lineno + 1, '', '<generated>', 0)
def append_epilog(self, text, epilog):
- # type: (StringList, unicode) -> None
+ # type: (StringList, str) -> None
# append a blank line and rst_epilog
text.append('', '<generated>', 0)
for lineno, line in enumerate(epilog.splitlines()):
text.append(line, '<rst_epilog>', lineno)
- def read(self):
+ def read(self): # type: ignore
# type: () -> StringList
- inputstring = SphinxBaseFileInput.read(self)
+ inputstring = super().read()
lines = string2lines(inputstring, convert_whitespace=True)
content = StringList()
for lineno, line in enumerate(lines):
content.append(line, self.source_path, lineno)
- if self.env.config.rst_prolog:
- self.prepend_prolog(content, self.env.config.rst_prolog)
- if self.env.config.rst_epilog:
- self.append_epilog(content, self.env.config.rst_epilog)
+ prepend_prolog(content, self.env.config.rst_prolog)
+ append_epilog(content, self.env.config.rst_epilog)
return content
@@ -281,8 +272,8 @@ class FiletypeNotFoundError(Exception):
def get_filetype(source_suffix, filename):
- # type: (Dict[unicode, unicode], unicode) -> unicode
- for suffix, filetype in iteritems(source_suffix):
+ # type: (Dict[str, str], str) -> str
+ for suffix, filetype in source_suffix.items():
if filename.endswith(suffix):
# If default filetype (None), considered as restructuredtext.
return filetype or 'restructuredtext'
@@ -291,13 +282,14 @@ def get_filetype(source_suffix, filename):
def read_doc(app, env, filename):
- # type: (Sphinx, BuildEnvironment, unicode) -> nodes.document
+ # type: (Sphinx, BuildEnvironment, str) -> nodes.document
"""Parse a document and convert to doctree."""
- filetype = get_filetype(app.config.source_suffix, filename)
- input_class = app.registry.get_source_input(filetype)
+ # set up error_handler for the target document
+ error_handler = UnicodeDecodeErrorHandler(env.docname)
+ codecs.register_error('sphinx', error_handler) # type: ignore
+
reader = SphinxStandaloneReader(app)
- source = input_class(app, env, source=None, source_path=filename,
- encoding=env.config.source_encoding)
+ filetype = get_filetype(app.config.source_suffix, filename)
parser = app.registry.create_source_parser(app, filetype)
if parser.__class__.__name__ == 'CommonMarkParser' and parser.settings_spec == ():
# a workaround for recommonmark
@@ -307,25 +299,26 @@ def read_doc(app, env, filename):
# CommonMarkParser.
parser.settings_spec = RSTParser.settings_spec
- pub = Publisher(reader=reader,
- parser=parser,
- writer=SphinxDummyWriter(),
- source_class=SphinxDummySourceClass,
- destination=NullOutput())
- pub.set_components(None, 'restructuredtext', None)
- pub.process_programmatic_settings(None, env.settings, None)
- pub.set_source(source, filename)
+ input_class = app.registry.get_source_input(filetype)
+ if input_class:
+ # Sphinx-1.8 style
+ source = input_class(app, env, source=None, source_path=filename, # type: ignore
+ encoding=env.config.source_encoding)
+ pub = Publisher(reader=reader, # type: ignore
+ parser=parser,
+ writer=SphinxDummyWriter(),
+ source_class=SphinxDummySourceClass,
+ destination=NullOutput())
+ pub.process_programmatic_settings(None, env.settings, None)
+ pub.set_source(source, filename)
+ else:
+ # Sphinx-2.0 style
+ pub = Publisher(reader=reader,
+ parser=parser,
+ writer=SphinxDummyWriter(),
+ source_class=SphinxFileInput)
+ pub.process_programmatic_settings(None, env.settings, None)
+ pub.set_source(source_path=filename)
+
pub.publish()
return pub.document
-
-
-def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
- app.registry.add_source_input(SphinxFileInput)
- app.registry.add_source_input(SphinxRSTFileInput)
-
- return {
- 'version': 'builtin',
- 'parallel_read_safe': True,
- 'parallel_write_safe': True,
- }
diff --git a/sphinx/jinja2glue.py b/sphinx/jinja2glue.py
index f19454c27..b491ce44e 100644
--- a/sphinx/jinja2glue.py
+++ b/sphinx/jinja2glue.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.jinja2glue
~~~~~~~~~~~~~~~~~
@@ -17,7 +16,6 @@ from jinja2 import FileSystemLoader, BaseLoader, TemplateNotFound, \
contextfunction
from jinja2.sandbox import SandboxedEnvironment
from jinja2.utils import open_if_exists
-from six import string_types
from sphinx.application import TemplateBridge
from sphinx.util import logging
@@ -32,14 +30,14 @@ if False:
def _tobool(val):
- # type: (unicode) -> bool
- if isinstance(val, string_types):
+ # type: (str) -> bool
+ if isinstance(val, str):
return val.lower() in ('true', '1', 'yes', 'on')
return bool(val)
def _toint(val):
- # type: (unicode) -> int
+ # type: (str) -> int
try:
return int(val)
except ValueError:
@@ -47,7 +45,7 @@ def _toint(val):
def _todim(val):
- # type: (Union[int, unicode]) -> unicode
+ # type: (Union[int, str]) -> str
"""
Make val a css dimension. In particular the following transformations
are performed:
@@ -88,7 +86,7 @@ def _slice_index(values, slices):
def accesskey(context, key):
- # type: (Any, unicode) -> unicode
+ # type: (Any, str) -> str
"""Helper to output each access key only once."""
if '_accesskeys' not in context:
context.vars['_accesskeys'] = {}
@@ -98,7 +96,7 @@ def accesskey(context, key):
return ''
-class idgen(object):
+class idgen:
def __init__(self):
# type: () -> None
self.id = 0
@@ -116,7 +114,7 @@ class idgen(object):
@contextfunction
def warning(context, message, *args, **kwargs):
- # type: (Dict, unicode, Any, Any) -> unicode
+ # type: (Dict, str, Any, Any) -> str
if 'pagename' in context:
filename = context.get('pagename') + context.get('file_suffix', '')
message = 'in rendering %s: %s' % (filename, message)
@@ -132,7 +130,7 @@ class SphinxFileSystemLoader(FileSystemLoader):
"""
def get_source(self, environment, template):
- # type: (Environment, unicode) -> Tuple[unicode, unicode, Callable]
+ # type: (Environment, str) -> Tuple[str, str, Callable]
for searchpath in self.searchpath:
filename = path.join(searchpath, template)
f = open_if_exists(filename)
@@ -161,7 +159,7 @@ class BuiltinTemplateLoader(TemplateBridge, BaseLoader):
# TemplateBridge interface
def init(self, builder, theme=None, dirs=None):
- # type: (Builder, Theme, List[unicode]) -> None
+ # type: (Builder, Theme, List[str]) -> None
# create a chain of paths to search
if theme:
# the theme's own dir and its bases' dirs
@@ -205,11 +203,11 @@ class BuiltinTemplateLoader(TemplateBridge, BaseLoader):
self.environment.install_gettext_translations(builder.app.translator) # type: ignore # NOQA
def render(self, template, context): # type: ignore
- # type: (unicode, Dict) -> unicode
+ # type: (str, Dict) -> str
return self.environment.get_template(template).render(context)
def render_string(self, source, context):
- # type: (unicode, Dict) -> unicode
+ # type: (str, Dict) -> str
return self.environment.from_string(source).render(context)
def newest_template_mtime(self):
@@ -219,7 +217,7 @@ class BuiltinTemplateLoader(TemplateBridge, BaseLoader):
# Loader interface
def get_source(self, environment, template):
- # type: (Environment, unicode) -> Tuple[unicode, unicode, Callable]
+ # type: (Environment, str) -> Tuple[str, str, Callable]
loaders = self.loaders
# exclamation mark starts search from theme
if template.startswith('!'):
diff --git a/sphinx/locale/__init__.py b/sphinx/locale/__init__.py
index 62f955535..e955e34f5 100644
--- a/sphinx/locale/__init__.py
+++ b/sphinx/locale/__init__.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.locale
~~~~~~~~~~~~~
@@ -12,12 +11,9 @@
import gettext
import locale
import warnings
-from collections import defaultdict
+from collections import UserString, defaultdict
from gettext import NullTranslations
-from six import text_type
-from six.moves import UserString
-
from sphinx.deprecation import RemovedInSphinx30Warning
if False:
@@ -25,7 +21,7 @@ if False:
from typing import Any, Callable, Dict, Iterable, Iterator, List, Tuple, Union # NOQA
-class _TranslationProxy(UserString, object):
+class _TranslationProxy(UserString):
"""
Class for proxy strings from gettext translations. This is a helper for the
lazy_* functions from this module.
@@ -36,36 +32,35 @@ class _TranslationProxy(UserString, object):
This inherits from UserString because some docutils versions use UserString
for their Text nodes, which then checks its argument for being either a
basestring or UserString, otherwise calls str() -- not unicode() -- on it.
- This also inherits from object to make the __new__ method work.
"""
__slots__ = ('_func', '_args')
def __new__(cls, func, *args):
- # type: (Callable, unicode) -> object
+ # type: (Callable, str) -> object
if not args:
# not called with "function" and "arguments", but a plain string
- return text_type(func)
+ return str(func)
return object.__new__(cls)
def __getnewargs__(self):
- # type: () -> Tuple
+ # type: () -> Tuple[str]
return (self._func,) + self._args # type: ignore
def __init__(self, func, *args):
- # type: (Callable, unicode) -> None
+ # type: (Callable, str) -> None
self._func = func
self._args = args
@property
def data(self): # type: ignore
- # type: () -> unicode
+ # type: () -> str
return self._func(*self._args)
# replace function from UserString; it instantiates a self.__class__
# for the encoding result
def encode(self, encoding=None, errors=None): # type: ignore
- # type: (unicode, unicode) -> str
+ # type: (str, str) -> bytes
if encoding:
if errors:
return self.data.encode(encoding, errors)
@@ -74,101 +69,52 @@ class _TranslationProxy(UserString, object):
else:
return self.data.encode()
- def __contains__(self, key):
- # type: (Any) -> bool
- return key in self.data
-
- def __bool__(self):
- # type: () -> bool
- return bool(self.data)
- __nonzero__ = __bool__ # for python2 compatibility
-
def __dir__(self):
# type: () -> List[str]
- return dir(text_type)
-
- def __iter__(self):
- # type: () -> Iterator
- return iter(self.data)
-
- def __len__(self):
- # type: () -> int
- return len(self.data)
+ return dir(str)
def __str__(self):
# type: () -> str
return str(self.data)
- def __unicode__(self):
- # type: () -> unicode
- return text_type(self.data)
-
def __add__(self, other): # type: ignore
- # type: (unicode) -> unicode
+ # type: (str) -> str
return self.data + other
- def __radd__(self, other): # type: ignore
- # type: (unicode) -> unicode
+ def __radd__(self, other):
+ # type: (str) -> str
return other + self.data
def __mod__(self, other): # type: ignore
- # type: (unicode) -> unicode
+ # type: (str) -> str
return self.data % other
def __rmod__(self, other):
- # type: (unicode) -> unicode
+ # type: (str) -> str
return other % self.data
def __mul__(self, other): # type: ignore
- # type: (Any) -> unicode
+ # type: (Any) -> str
return self.data * other
- def __rmul__(self, other): # type: ignore
- # type: (Any) -> unicode
+ def __rmul__(self, other):
+ # type: (Any) -> str
return other * self.data
- def __lt__(self, other):
- # type: (unicode) -> bool
- return self.data < other
-
- def __le__(self, other):
- # type: (unicode) -> bool
- return self.data <= other
-
- def __eq__(self, other):
- # type: (Any) -> bool
- return self.data == other
-
- def __ne__(self, other):
- # type: (Any) -> bool
- return self.data != other
-
- def __gt__(self, other):
- # type: (unicode) -> bool
- return self.data > other
-
- def __ge__(self, other):
- # type: (unicode) -> bool
- return self.data >= other
-
def __getattr__(self, name):
- # type: (unicode) -> Any
+ # type: (str) -> Any
if name == '__members__':
return self.__dir__()
return getattr(self.data, name)
def __getstate__(self):
- # type: () -> Tuple[Callable, Tuple[unicode, ...]]
+ # type: () -> Tuple[Callable, Tuple[str, ...]]
return self._func, self._args
def __setstate__(self, tup):
- # type: (Tuple[Callable, Tuple[unicode]]) -> None
+ # type: (Tuple[Callable, Tuple[str]]) -> None
self._func, self._args = tup
- def __getitem__(self, key): # type: ignore
- # type: (Any) -> unicode
- return self.data[key]
-
def __copy__(self):
# type: () -> _TranslationProxy
return self
@@ -176,13 +122,13 @@ class _TranslationProxy(UserString, object):
def __repr__(self):
# type: () -> str
try:
- return 'i' + repr(text_type(self.data))
+ return 'i' + repr(str(self.data))
except Exception:
return '<%s broken>' % self.__class__.__name__
def mygettext(string):
- # type: (unicode) -> unicode
+ # type: (str) -> str
"""Used instead of _ when creating TranslationProxies, because _ is
not bound yet at that time.
"""
@@ -192,7 +138,7 @@ def mygettext(string):
def lazy_gettext(string):
- # type: (unicode) -> unicode
+ # type: (str) -> str
"""A lazy version of `gettext`."""
# if isinstance(string, _TranslationProxy):
# return string
@@ -201,11 +147,11 @@ def lazy_gettext(string):
return _TranslationProxy(mygettext, string) # type: ignore
-translators = defaultdict(NullTranslations) # type: Dict[Tuple[unicode, unicode], NullTranslations] # NOQA
+translators = defaultdict(NullTranslations) # type: Dict[Tuple[str, str], NullTranslations]
def init(locale_dirs, language, catalog='sphinx', namespace='general'):
- # type: (List[unicode], unicode, unicode, unicode) -> Tuple[NullTranslations, bool]
+ # type: (List[str], str, str, str) -> Tuple[NullTranslations, bool]
"""Look for message catalogs in `locale_dirs` and *ensure* that there is at
least a NullTranslations catalog set in `translators`. If called multiple
times or if several ``.mo`` files are found, their contents are merged
@@ -228,8 +174,7 @@ def init(locale_dirs, language, catalog='sphinx', namespace='general'):
# loading
for dir_ in locale_dirs:
try:
- trans = gettext.translation(catalog, localedir=dir_, # type: ignore
- languages=languages)
+ trans = gettext.translation(catalog, localedir=dir_, languages=languages)
if translator is None:
translator = trans
else:
@@ -242,8 +187,6 @@ def init(locale_dirs, language, catalog='sphinx', namespace='general'):
translator = NullTranslations()
has_translation = False
translators[(namespace, catalog)] = translator
- if hasattr(translator, 'ugettext'):
- translator.gettext = translator.ugettext # type: ignore
return translator, has_translation
@@ -269,7 +212,7 @@ def setlocale(category, value=None):
def init_console(locale_dir, catalog):
- # type: (unicode, unicode) -> Tuple[NullTranslations, bool]
+ # type: (str, str) -> Tuple[NullTranslations, bool]
"""Initialize locale for console.
.. versionadded:: 1.8
@@ -285,22 +228,22 @@ def init_console(locale_dir, catalog):
def get_translator(catalog='sphinx', namespace='general'):
- # type: (unicode, unicode) -> NullTranslations
+ # type: (str, str) -> NullTranslations
return translators[(namespace, catalog)]
def is_translator_registered(catalog='sphinx', namespace='general'):
- # type: (unicode, unicode) -> bool
+ # type: (str, str) -> bool
return (namespace, catalog) in translators
def _lazy_translate(catalog, namespace, message):
- # type: (unicode, unicode, unicode) -> unicode
+ # type: (str, str, str) -> str
"""Used instead of _ when creating TranslationProxy, because _ is
not bound yet at that time.
"""
translator = get_translator(catalog, namespace)
- return translator.gettext(message) # type: ignore
+ return translator.gettext(message)
def get_translation(catalog, namespace='general'):
@@ -328,16 +271,16 @@ def get_translation(catalog, namespace='general'):
.. versionadded:: 1.8
"""
def gettext(message, *args):
- # type: (unicode, *Any) -> unicode
+ # type: (str, *Any) -> str
if not is_translator_registered(catalog, namespace):
# not initialized yet
return _TranslationProxy(_lazy_translate, catalog, namespace, message) # type: ignore # NOQA
else:
translator = get_translator(catalog, namespace)
if len(args) <= 1:
- return translator.gettext(message) # type: ignore
+ return translator.gettext(message)
else: # support pluralization
- return translator.ngettext(message, args[0], args[1]) # type: ignore
+ return translator.ngettext(message, args[0], args[1])
return gettext
@@ -369,10 +312,10 @@ admonitionlabels = {
'seealso': _('See also'),
'tip': _('Tip'),
'warning': _('Warning'),
-} # type: Dict[unicode, unicode]
+}
-# Moved to sphinx.directives.other (will be overridden later)
-versionlabels = {} # type: Dict[unicode, unicode]
+# Moved to sphinx.directives.other (will be overriden later)
+versionlabels = {} # type: Dict[str, str]
-# Moved to sphinx.domains.python (will be overridden later)
-pairindextypes = {} # type: Dict[unicode, unicode]
+# Moved to sphinx.domains.python (will be overriden later)
+pairindextypes = {} # type: Dict[str, str]
diff --git a/sphinx/make_mode.py b/sphinx/make_mode.py
index ffc609a03..9e4c0113c 100644
--- a/sphinx/make_mode.py
+++ b/sphinx/make_mode.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.make_mode
~~~~~~~~~~~~~~~~
@@ -29,7 +28,7 @@ class Make(make_mode.Make):
warnings.warn('sphinx.make_mode.Make is deprecated. '
'Please use sphinx.cmd.make_mode.Make instead.',
RemovedInSphinx30Warning, stacklevel=2)
- super(Make, self).__init__(*args)
+ super().__init__(*args)
def run_make_mode(args):
diff --git a/sphinx/parsers.py b/sphinx/parsers.py
index 780b2e970..438f18257 100644
--- a/sphinx/parsers.py
+++ b/sphinx/parsers.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.parsers
~~~~~~~~~~~~~~
@@ -15,9 +14,11 @@ from docutils.parsers.rst import states
from docutils.statemachine import StringList
from docutils.transforms.universal import SmartQuotes
+from sphinx.util.rst import append_epilog, prepend_prolog
+
if False:
# For type annotation
- from typing import Any, Dict, List, Type # NOQA
+ from typing import Any, Dict, List, Type, Union # NOQA
from docutils import nodes # NOQA
from docutils.transforms import Transform # NOQA
from sphinx.application import Sphinx # NOQA
@@ -55,45 +56,53 @@ class Parser(docutils.parsers.Parser):
self.app = app
self.config = app.config
self.env = app.env
- self.warn = app.warn
- self.info = app.info
-class RSTParser(docutils.parsers.rst.Parser):
+class RSTParser(docutils.parsers.rst.Parser, Parser):
"""A reST parser for Sphinx."""
def get_transforms(self):
# type: () -> List[Type[Transform]]
"""Sphinx's reST parser replaces a transform class for smart-quotes by own's
- refs: sphinx.io.SphinxStandaloneReader"""
- transforms = docutils.parsers.rst.Parser.get_transforms(self)
+ refs: sphinx.io.SphinxStandaloneReader
+ """
+ transforms = super().get_transforms()
transforms.remove(SmartQuotes)
return transforms
def parse(self, inputstring, document):
- # type: (Any, nodes.document) -> None
- """Parse text and generate a document tree.
-
- This accepts StringList as an inputstring parameter.
- It enables to handle mixed contents (cf. :confval:`rst_prolog`) correctly.
- """
- if isinstance(inputstring, StringList):
- self.setup_parse(inputstring, document)
- self.statemachine = states.RSTStateMachine(
- state_classes=self.state_classes,
- initial_state=self.initial_state,
- debug=document.reporter.debug_flag)
- # Give inputstring directly to statemachine.
- self.statemachine.run(inputstring, document, inliner=self.inliner)
- self.finish_parse()
+ # type: (Union[str, StringList], nodes.document) -> None
+ """Parse text and generate a document tree."""
+ self.setup_parse(inputstring, document) # type: ignore
+ self.statemachine = states.RSTStateMachine(
+ state_classes=self.state_classes,
+ initial_state=self.initial_state,
+ debug=document.reporter.debug_flag)
+
+ # preprocess inputstring
+ if isinstance(inputstring, str):
+ lines = docutils.statemachine.string2lines(
+ inputstring, tab_width=document.settings.tab_width,
+ convert_whitespace=True)
+
+ inputlines = StringList(lines, document.current_source)
else:
- # otherwise, inputstring might be a string. It will be handled by superclass.
- docutils.parsers.rst.Parser.parse(self, inputstring, document)
+ inputlines = inputstring
+
+ self.decorate(inputlines)
+ self.statemachine.run(inputlines, document, inliner=self.inliner)
+ self.finish_parse()
+
+ def decorate(self, content):
+ # type: (StringList) -> None
+ """Preprocess reST content before parsing."""
+ prepend_prolog(content, self.config.rst_prolog)
+ append_epilog(content, self.config.rst_epilog)
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
app.add_source_parser(RSTParser)
return {
diff --git a/sphinx/project.py b/sphinx/project.py
new file mode 100644
index 000000000..8b8aa5794
--- /dev/null
+++ b/sphinx/project.py
@@ -0,0 +1,97 @@
+"""
+ sphinx.project
+ ~~~~~~~~~~~~~~
+
+ Utility function and classes for Sphinx projects.
+
+ :copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import os
+from typing import TYPE_CHECKING
+
+from sphinx.locale import __
+from sphinx.util import get_matching_files
+from sphinx.util import logging
+from sphinx.util.matching import compile_matchers
+from sphinx.util.osutil import SEP, relpath
+
+if TYPE_CHECKING:
+ from typing import Dict, List, Set # NOQA
+
+logger = logging.getLogger(__name__)
+EXCLUDE_PATHS = ['**/_sources', '.#*', '**/.#*', '*.lproj/**']
+
+
+class Project(object):
+ """A project is source code set of Sphinx document."""
+
+ def __init__(self, srcdir, source_suffix):
+ # type: (str, Dict[str, str]) -> None
+ #: Source directory.
+ self.srcdir = srcdir
+
+ #: source_suffix. Same as :confval:`source_suffix`.
+ self.source_suffix = source_suffix
+
+ #: The name of documents belongs to this project.
+ self.docnames = set() # type: Set[str]
+
+ def restore(self, other):
+ # type: (Project) -> None
+ """Take over a result of last build."""
+ self.docnames = other.docnames
+
+ def discover(self, exclude_paths=[]):
+ # type: (List[str]) -> Set[str]
+ """Find all document files in the source directory and put them in
+ :attr:`docnames`.
+ """
+ self.docnames = set()
+ excludes = compile_matchers(exclude_paths + EXCLUDE_PATHS)
+ for filename in get_matching_files(self.srcdir, excludes): # type: ignore
+ docname = self.path2doc(filename)
+ if docname:
+ if os.access(os.path.join(self.srcdir, filename), os.R_OK):
+ self.docnames.add(docname)
+ else:
+ logger.warning(__("document not readable. Ignored."), location=docname)
+
+ return self.docnames
+
+ def path2doc(self, filename):
+ # type: (str) -> str
+ """Return the docname for the filename if the file is document.
+
+ *filename* should be absolute or relative to the source directory.
+ """
+ if filename.startswith(self.srcdir):
+ filename = relpath(filename, self.srcdir)
+ for suffix in self.source_suffix:
+ if filename.endswith(suffix):
+ return filename[:-len(suffix)]
+
+ # the file does not have docname
+ return None
+
+ def doc2path(self, docname, basedir=True):
+ # type: (str, bool) -> str
+ """Return the filename for the document name.
+
+ If *basedir* is True, return as an absolute path.
+ Else, return as a relative path to the source directory.
+ """
+ docname = docname.replace(SEP, os.path.sep)
+ basename = os.path.join(self.srcdir, docname)
+ for suffix in self.source_suffix:
+ if os.path.isfile(basename + suffix):
+ break
+ else:
+ # document does not exist
+ suffix = list(self.source_suffix)[0]
+
+ if basedir:
+ return basename + suffix
+ else:
+ return docname + suffix
diff --git a/sphinx/pycode/__init__.py b/sphinx/pycode/__init__.py
index 8561169b5..e97dfeb21 100644
--- a/sphinx/pycode/__init__.py
+++ b/sphinx/pycode/__init__.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.pycode
~~~~~~~~~~~~~
@@ -8,13 +7,11 @@
:copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
-from __future__ import print_function
import re
+from io import StringIO
from zipfile import ZipFile
-from six import iteritems, BytesIO, StringIO
-
from sphinx.errors import PycodeError
from sphinx.pycode.parser import Parser
from sphinx.util import get_module_source, detect_encoding
@@ -24,20 +21,18 @@ if False:
from typing import Any, Dict, IO, List, Tuple # NOQA
-class ModuleAnalyzer(object):
+class ModuleAnalyzer:
# cache for analyzer objects -- caches both by module and file name
- cache = {} # type: Dict[Tuple[unicode, unicode], Any]
+ cache = {} # type: Dict[Tuple[str, str], Any]
@classmethod
def for_string(cls, string, modname, srcname='<string>'):
- # type: (unicode, unicode, unicode) -> ModuleAnalyzer
- if isinstance(string, bytes):
- return cls(BytesIO(string), modname, srcname)
+ # type: (str, str, str) -> ModuleAnalyzer
return cls(StringIO(string), modname, srcname, decoded=True)
@classmethod
def for_file(cls, filename, modname):
- # type: (unicode, unicode) -> ModuleAnalyzer
+ # type: (str, str) -> ModuleAnalyzer
if ('file', filename) in cls.cache:
return cls.cache['file', filename]
try:
@@ -53,11 +48,11 @@ class ModuleAnalyzer(object):
@classmethod
def for_egg(cls, filename, modname):
- # type: (unicode, unicode) -> ModuleAnalyzer
+ # type: (str, str) -> ModuleAnalyzer
eggpath, relpath = re.split('(?<=\\.egg)/', filename)
try:
with ZipFile(eggpath) as egg:
- code = egg.read(relpath).decode('utf-8')
+ code = egg.read(relpath).decode()
return cls.for_string(code, modname, filename)
except Exception as exc:
raise PycodeError('error opening %r' % filename, exc)
@@ -84,7 +79,7 @@ class ModuleAnalyzer(object):
return obj
def __init__(self, source, modname, srcname, decoded=False):
- # type: (IO, unicode, unicode, bool) -> None
+ # type: (IO, str, str, bool) -> None
self.modname = modname # name of the module
self.srcname = srcname # name of the source file
@@ -99,9 +94,9 @@ class ModuleAnalyzer(object):
self.code = source.read()
# will be filled by parse()
- self.attr_docs = None # type: Dict[Tuple[unicode, unicode], List[unicode]]
- self.tagorder = None # type: Dict[unicode, int]
- self.tags = None # type: Dict[unicode, Tuple[unicode, int, int]]
+ self.attr_docs = None # type: Dict[Tuple[str, str], List[str]]
+ self.tagorder = None # type: Dict[str, int]
+ self.tags = None # type: Dict[str, Tuple[str, int, int]]
def parse(self):
# type: () -> None
@@ -111,7 +106,7 @@ class ModuleAnalyzer(object):
parser.parse()
self.attr_docs = {}
- for (scope, comment) in iteritems(parser.comments):
+ for (scope, comment) in parser.comments.items():
if comment:
self.attr_docs[scope] = comment.splitlines() + ['']
else:
@@ -123,7 +118,7 @@ class ModuleAnalyzer(object):
raise PycodeError('parsing %r failed: %r' % (self.srcname, exc))
def find_attr_docs(self):
- # type: () -> Dict[Tuple[unicode, unicode], List[unicode]]
+ # type: () -> Dict[Tuple[str, str], List[str]]
"""Find class and module-level attributes and their documentation."""
if self.attr_docs is None:
self.parse()
@@ -131,7 +126,7 @@ class ModuleAnalyzer(object):
return self.attr_docs
def find_tags(self):
- # type: () -> Dict[unicode, Tuple[unicode, int, int]]
+ # type: () -> Dict[str, Tuple[str, int, int]]
"""Find class, function and method definitions and their location."""
if self.tags is None:
self.parse()
diff --git a/sphinx/pycode/parser.py b/sphinx/pycode/parser.py
index 9d464a253..8aa2815d0 100644
--- a/sphinx/pycode/parser.py
+++ b/sphinx/pycode/parser.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.pycode.parser
~~~~~~~~~~~~~~~~~~~~
@@ -17,15 +16,13 @@ import tokenize
from token import NAME, NEWLINE, INDENT, DEDENT, NUMBER, OP, STRING
from tokenize import COMMENT, NL
-from six import PY2, text_type
-
if False:
# For type annotation
from typing import Any, Dict, IO, List, Tuple # NOQA
-comment_re = re.compile(u'^\\s*#: ?(.*)\r?\n?$')
-indent_re = re.compile(u'^\\s*$')
-emptyline_re = re.compile(u'^\\s*(#.*)?$')
+comment_re = re.compile('^\\s*#: ?(.*)\r?\n?$')
+indent_re = re.compile('^\\s*$')
+emptyline_re = re.compile('^\\s*(#.*)?$')
if sys.version_info >= (3, 6):
@@ -35,7 +32,7 @@ else:
def filter_whitespace(code):
- # type: (unicode) -> unicode
+ # type: (str) -> str
return code.replace('\f', ' ') # replace FF (form feed) with whitespace
@@ -49,7 +46,7 @@ def get_assign_targets(node):
def get_lvar_names(node, self=None):
- # type: (ast.AST, ast.expr) -> List[unicode]
+ # type: (ast.AST, ast.arg) -> List[str]
"""Convert assignment-AST to variable names.
This raises `TypeError` if the assignment does not create new variable::
@@ -59,10 +56,7 @@ def get_lvar_names(node, self=None):
# => TypeError
"""
if self:
- if PY2:
- self_id = self.id # type: ignore
- else:
- self_id = self.arg
+ self_id = self.arg
node_name = node.__class__.__name__
if node_name in ('Index', 'Num', 'Slice', 'Str', 'Subscript'):
@@ -95,23 +89,23 @@ def get_lvar_names(node, self=None):
def dedent_docstring(s):
- # type: (unicode) -> unicode
+ # type: (str) -> str
"""Remove common leading indentation from docstring."""
def dummy():
# type: () -> None
# dummy function to mock `inspect.getdoc`.
pass
- dummy.__doc__ = s # type: ignore
+ dummy.__doc__ = s
docstring = inspect.getdoc(dummy)
return docstring.lstrip("\r\n").rstrip("\r\n")
-class Token(object):
+class Token:
"""Better token wrapper for tokenize module."""
def __init__(self, kind, value, start, end, source):
- # type: (int, Any, Tuple[int, int], Tuple[int, int], unicode) -> None # NOQA
+ # type: (int, Any, Tuple[int, int], Tuple[int, int], str) -> None
self.kind = kind
self.value = value
self.start = start
@@ -131,10 +125,6 @@ class Token(object):
else:
raise ValueError('Unknown value: %r' % other)
- def __ne__(self, other):
- # type: (Any) -> bool
- return not (self == other)
-
def match(self, *conditions):
# type: (Any) -> bool
return any(self == candidate for candidate in conditions)
@@ -145,17 +135,17 @@ class Token(object):
self.value.strip())
-class TokenProcessor(object):
+class TokenProcessor:
def __init__(self, buffers):
- # type: (List[unicode]) -> None
+ # type: (List[str]) -> None
lines = iter(buffers)
self.buffers = buffers
- self.tokens = tokenize.generate_tokens(lambda: next(lines)) # type: ignore # NOQA
+ self.tokens = tokenize.generate_tokens(lambda: next(lines))
self.current = None # type: Token
self.previous = None # type: Token
def get_line(self, lineno):
- # type: (int) -> unicode
+ # type: (int) -> str
"""Returns specified line."""
return self.buffers[lineno - 1]
@@ -202,9 +192,9 @@ class AfterCommentParser(TokenProcessor):
"""
def __init__(self, lines):
- # type: (List[unicode]) -> None
- super(AfterCommentParser, self).__init__(lines)
- self.comment = None # type: unicode
+ # type: (List[str]) -> None
+ super().__init__(lines)
+ self.comment = None # type: str
def fetch_rvalue(self):
# type: () -> List[Token]
@@ -246,20 +236,20 @@ class VariableCommentPicker(ast.NodeVisitor):
"""Python source code parser to pick up variable comments."""
def __init__(self, buffers, encoding):
- # type: (List[unicode], unicode) -> None
+ # type: (List[str], str) -> None
self.counter = itertools.count()
self.buffers = buffers
self.encoding = encoding
- self.context = [] # type: List[unicode]
- self.current_classes = [] # type: List[unicode]
+ self.context = [] # type: List[str]
+ self.current_classes = [] # type: List[str]
self.current_function = None # type: ast.FunctionDef
- self.comments = {} # type: Dict[Tuple[unicode, unicode], unicode]
+ self.comments = {} # type: Dict[Tuple[str, str], str]
self.previous = None # type: ast.AST
- self.deforders = {} # type: Dict[unicode, int]
- super(VariableCommentPicker, self).__init__()
+ self.deforders = {} # type: Dict[str, int]
+ super().__init__()
def add_entry(self, name):
- # type: (unicode) -> None
+ # type: (str) -> None
if self.current_function:
if self.current_classes and self.context[-1] == "__init__":
# store variable comments inside __init__ method of classes
@@ -272,7 +262,7 @@ class VariableCommentPicker(ast.NodeVisitor):
self.deforders[".".join(definition)] = next(self.counter)
def add_variable_comment(self, name, comment):
- # type: (unicode, unicode) -> None
+ # type: (str, str) -> None
if self.current_function:
if self.current_classes and self.context[-1] == "__init__":
# store variable comments inside __init__ method of classes
@@ -285,7 +275,7 @@ class VariableCommentPicker(ast.NodeVisitor):
self.comments[(context, name)] = comment
def get_self(self):
- # type: () -> ast.expr
+ # type: () -> ast.arg
"""Returns the name of first argument if in function."""
if self.current_function and self.current_function.args.args:
return self.current_function.args.args[0]
@@ -293,14 +283,14 @@ class VariableCommentPicker(ast.NodeVisitor):
return None
def get_line(self, lineno):
- # type: (int) -> unicode
+ # type: (int) -> str
"""Returns specified line."""
return self.buffers[lineno - 1]
def visit(self, node):
# type: (ast.AST) -> None
"""Updates self.previous to ."""
- super(VariableCommentPicker, self).visit(node)
+ super().visit(node)
self.previous = node
def visit_Assign(self, node):
@@ -357,7 +347,7 @@ class VariableCommentPicker(ast.NodeVisitor):
targets = get_assign_targets(self.previous)
varnames = get_lvar_names(targets[0], self.get_self())
for varname in varnames:
- if isinstance(node.value.s, text_type):
+ if isinstance(node.value.s, str):
docstring = node.value.s
else:
docstring = node.value.s.decode(self.encoding or 'utf-8')
@@ -394,15 +384,15 @@ class VariableCommentPicker(ast.NodeVisitor):
class DefinitionFinder(TokenProcessor):
def __init__(self, lines):
- # type: (List[unicode]) -> None
- super(DefinitionFinder, self).__init__(lines)
+ # type: (List[str]) -> None
+ super().__init__(lines)
self.decorator = None # type: Token
- self.context = [] # type: List[unicode]
+ self.context = [] # type: List[str]
self.indents = [] # type: List
- self.definitions = {} # type: Dict[unicode, Tuple[unicode, int, int]]
+ self.definitions = {} # type: Dict[str, Tuple[str, int, int]]
def add_definition(self, name, entry):
- # type: (unicode, Tuple[unicode, int, int]) -> None
+ # type: (str, Tuple[str, int, int]) -> None
if self.indents and self.indents[-1][0] == 'def' and entry[0] == 'def':
# ignore definition of inner function
pass
@@ -431,7 +421,7 @@ class DefinitionFinder(TokenProcessor):
self.finalize_block()
def parse_definition(self, typ):
- # type: (unicode) -> None
+ # type: (str) -> None
name = self.fetch_token()
self.context.append(name.value)
funcname = '.'.join(self.context)
@@ -464,19 +454,19 @@ class DefinitionFinder(TokenProcessor):
self.context.pop()
-class Parser(object):
+class Parser:
"""Python source code parser to pick up variable comments.
This is a better wrapper for ``VariableCommentPicker``.
"""
def __init__(self, code, encoding='utf-8'):
- # type: (unicode, unicode) -> None
+ # type: (str, str) -> None
self.code = filter_whitespace(code)
self.encoding = encoding
- self.comments = {} # type: Dict[Tuple[unicode, unicode], unicode]
- self.deforders = {} # type: Dict[unicode, int]
- self.definitions = {} # type: Dict[unicode, Tuple[unicode, int, int]]
+ self.comments = {} # type: Dict[Tuple[str, str], str]
+ self.deforders = {} # type: Dict[str, int]
+ self.definitions = {} # type: Dict[str, Tuple[str, int, int]]
def parse(self):
# type: () -> None
@@ -487,7 +477,7 @@ class Parser(object):
def parse_comments(self):
# type: () -> None
"""Parse the code and pick up comments."""
- tree = ast.parse(self.code.encode('utf-8'))
+ tree = ast.parse(self.code.encode())
picker = VariableCommentPicker(self.code.splitlines(True), self.encoding)
picker.visit(tree)
self.comments = picker.comments
diff --git a/sphinx/pygments_styles.py b/sphinx/pygments_styles.py
index d29d825d5..796ac3843 100644
--- a/sphinx/pygments_styles.py
+++ b/sphinx/pygments_styles.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.pygments_styles
~~~~~~~~~~~~~~~~~~~~~~
diff --git a/sphinx/quickstart.py b/sphinx/quickstart.py
deleted file mode 100644
index 8cad0640b..000000000
--- a/sphinx/quickstart.py
+++ /dev/null
@@ -1,41 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
- sphinx.quickstart
- ~~~~~~~~~~~~~~~~~
-
- This file has moved to :py:mod:`sphinx.cmd.quickstart`.
-
- :copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
- :license: BSD, see LICENSE for details.
-"""
-
-import warnings
-
-from sphinx.cmd.quickstart import main as _main
-from sphinx.deprecation import RemovedInSphinx20Warning
-
-if False:
- # For type annotation
- from typing import Any # NOQA
-
-
-def main(*args, **kwargs):
- # type: (Any, Any) -> None
- warnings.warn(
- '`sphinx.quickstart.main()` has moved to `sphinx.cmd.quickstart.'
- 'main()`.',
- RemovedInSphinx20Warning,
- stacklevel=2,
- )
- args = args[1:] # skip first argument to adjust arguments (refs: #4615)
- _main(*args, **kwargs)
-
-
-# So program can be started with "python -m sphinx.quickstart ..."
-if __name__ == "__main__":
- warnings.warn(
- '`sphinx.quickstart` has moved to `sphinx.cmd.quickstart`.',
- RemovedInSphinx20Warning,
- stacklevel=2,
- )
- main()
diff --git a/sphinx/registry.py b/sphinx/registry.py
index 4fd808f04..311270ccb 100644
--- a/sphinx/registry.py
+++ b/sphinx/registry.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.registry
~~~~~~~~~~~~~~~
@@ -8,7 +7,6 @@
:copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
-from __future__ import print_function
import traceback
import warnings
@@ -17,7 +15,6 @@ from types import MethodType
from docutils.parsers.rst import Directive
from pkg_resources import iter_entry_points
-from six import iteritems, itervalues
from sphinx.deprecation import RemovedInSphinx30Warning
from sphinx.domains import ObjType
@@ -29,6 +26,7 @@ from sphinx.parsers import Parser as SphinxParser
from sphinx.roles import XRefRole
from sphinx.util import logging
from sphinx.util.docutils import directive_helper
+from sphinx.util.logging import prefixed_warnings
if False:
# For type annotation
@@ -43,6 +41,7 @@ if False:
from sphinx.domains import Domain, Index # NOQA
from sphinx.environment import BuildEnvironment # NOQA
from sphinx.ext.autodoc import Documenter # NOQA
+ from sphinx.io import SphinxFileInput # NOQA
from sphinx.util.typing import RoleFunction, TitleGetter # NOQA
logger = logging.getLogger(__name__)
@@ -51,76 +50,76 @@ logger = logging.getLogger(__name__)
# Values are Sphinx version that merge the extension.
EXTENSION_BLACKLIST = {
"sphinxjp.themecore": "1.2"
-} # type: Dict[unicode, unicode]
+}
-class SphinxComponentRegistry(object):
+class SphinxComponentRegistry:
def __init__(self):
# type: () -> None
#: special attrgetter for autodoc; class object -> attrgetter
- self.autodoc_attrgettrs = {} # type: Dict[Type, Callable[[Any, unicode, Any], Any]]
+ self.autodoc_attrgettrs = {} # type: Dict[Type, Callable[[Any, str, Any], Any]]
#: builders; a dict of builder name -> bulider class
- self.builders = {} # type: Dict[unicode, Type[Builder]]
+ self.builders = {} # type: Dict[str, Type[Builder]]
#: autodoc documenters; a dict of documenter name -> documenter class
- self.documenters = {} # type: Dict[unicode, Type[Documenter]]
+ self.documenters = {} # type: Dict[str, Type[Documenter]]
#: css_files; a list of tuple of filename and attributes
- self.css_files = [] # type: List[Tuple[unicode, Dict[unicode, unicode]]]
+ self.css_files = [] # type: List[Tuple[str, Dict[str, str]]]
#: domains; a dict of domain name -> domain class
- self.domains = {} # type: Dict[unicode, Type[Domain]]
+ self.domains = {} # type: Dict[str, Type[Domain]]
#: additional directives for domains
#: a dict of domain name -> dict of directive name -> directive
- self.domain_directives = {} # type: Dict[unicode, Dict[unicode, Any]]
+ self.domain_directives = {} # type: Dict[str, Dict[str, Any]]
#: additional indices for domains
#: a dict of domain name -> list of index class
- self.domain_indices = {} # type: Dict[unicode, List[Type[Index]]]
+ self.domain_indices = {} # type: Dict[str, List[Type[Index]]]
#: additional object types for domains
#: a dict of domain name -> dict of objtype name -> objtype
- self.domain_object_types = {} # type: Dict[unicode, Dict[unicode, ObjType]]
+ self.domain_object_types = {} # type: Dict[str, Dict[str, ObjType]]
#: additional roles for domains
#: a dict of domain name -> dict of role name -> role impl.
- self.domain_roles = {} # type: Dict[unicode, Dict[unicode, Union[RoleFunction, XRefRole]]] # NOQA
+ self.domain_roles = {} # type: Dict[str, Dict[str, Union[RoleFunction, XRefRole]]] # NOQA
#: additional enumerable nodes
#: a dict of node class -> tuple of figtype and title_getter function
- self.enumerable_nodes = {} # type: Dict[nodes.Node, Tuple[unicode, TitleGetter]]
+ self.enumerable_nodes = {} # type: Dict[Type[nodes.Node], Tuple[str, TitleGetter]]
#: HTML inline and block math renderers
#: a dict of name -> tuple of visit function and depart function
- self.html_inline_math_renderers = {} # type: Dict[unicode, Tuple[Callable, Callable]] # NOQA
- self.html_block_math_renderers = {} # type: Dict[unicode, Tuple[Callable, Callable]] # NOQA
+ self.html_inline_math_renderers = {} # type: Dict[str, Tuple[Callable, Callable]]
+ self.html_block_math_renderers = {} # type: Dict[str, Tuple[Callable, Callable]]
#: js_files; list of JS paths or URLs
- self.js_files = [] # type: List[Tuple[unicode, Dict[unicode, unicode]]]
+ self.js_files = [] # type: List[Tuple[str, Dict[str, str]]]
#: LaTeX packages; list of package names and its options
- self.latex_packages = [] # type: List[Tuple[unicode, unicode]]
+ self.latex_packages = [] # type: List[Tuple[str, str]]
#: post transforms; list of transforms
self.post_transforms = [] # type: List[Type[Transform]]
#: source paresrs; file type -> parser class
- self.source_parsers = {} # type: Dict[unicode, Type[Parser]]
+ self.source_parsers = {} # type: Dict[str, Type[Parser]]
#: source inputs; file type -> input class
- self.source_inputs = {} # type: Dict[unicode, Input]
+ self.source_inputs = {} # type: Dict[str, Type[Input]]
#: source suffix: suffix -> file type
- self.source_suffix = {} # type: Dict[unicode, unicode]
+ self.source_suffix = {} # type: Dict[str, str]
#: custom translators; builder name -> translator class
- self.translators = {} # type: Dict[unicode, nodes.NodeVisitor]
+ self.translators = {} # type: Dict[str, Type[nodes.NodeVisitor]]
#: custom handlers for translators
#: a dict of builder name -> dict of node name -> visitor and departure functions
- self.translation_handlers = {} # type: Dict[unicode, Dict[unicode, Tuple[Callable, Callable]]] # NOQA
+ self.translation_handlers = {} # type: Dict[str, Dict[str, Tuple[Callable, Callable]]]
#: additional transforms; list of transforms
self.transforms = [] # type: List[Type[Transform]]
@@ -136,7 +135,7 @@ class SphinxComponentRegistry(object):
self.builders[builder.name] = builder
def preload_builder(self, app, name):
- # type: (Sphinx, unicode) -> None
+ # type: (Sphinx, str) -> None
if name is None:
return
@@ -151,7 +150,7 @@ class SphinxComponentRegistry(object):
self.load_extension(app, entry_point.module_name)
def create_builder(self, app, name):
- # type: (Sphinx, unicode) -> Builder
+ # type: (Sphinx, str) -> Builder
if name not in self.builders:
raise SphinxError(__('Builder name %s not registered') % name)
@@ -165,19 +164,19 @@ class SphinxComponentRegistry(object):
self.domains[domain.name] = domain
def has_domain(self, domain):
- # type: (unicode) -> bool
+ # type: (str) -> bool
return domain in self.domains
def create_domains(self, env):
# type: (BuildEnvironment) -> Iterator[Domain]
- for DomainClass in itervalues(self.domains):
+ for DomainClass in self.domains.values():
domain = DomainClass(env)
# transplant components added by extensions
domain.directives.update(self.domain_directives.get(domain.name, {}))
domain.roles.update(self.domain_roles.get(domain.name, {}))
domain.indices.extend(self.domain_indices.get(domain.name, []))
- for name, objtype in iteritems(self.domain_object_types.get(domain.name, {})):
+ for name, objtype in self.domain_object_types.get(domain.name, {}).items():
domain.add_object_type(name, objtype)
yield domain
@@ -191,7 +190,7 @@ class SphinxComponentRegistry(object):
def add_directive_to_domain(self, domain, name, obj, has_content=None, argument_spec=None,
override=False, **option_spec):
- # type: (unicode, unicode, Any, bool, Any, bool, Any) -> None
+ # type: (str, str, Any, bool, Any, bool, Any) -> None
logger.debug('[app] adding directive to domain: %r',
(domain, name, obj, has_content, argument_spec, option_spec))
if domain not in self.domains:
@@ -207,7 +206,7 @@ class SphinxComponentRegistry(object):
directives[name] = obj
def add_role_to_domain(self, domain, name, role, override=False):
- # type: (unicode, unicode, Union[RoleFunction, XRefRole], bool) -> None
+ # type: (str, str, Union[RoleFunction, XRefRole], bool) -> None
logger.debug('[app] adding role to domain: %r', (domain, name, role))
if domain not in self.domains:
raise ExtensionError(__('domain %s not yet registered') % domain)
@@ -218,7 +217,7 @@ class SphinxComponentRegistry(object):
roles[name] = role
def add_index_to_domain(self, domain, index, override=False):
- # type: (unicode, Type[Index], bool) -> None
+ # type: (str, Type[Index], bool) -> None
logger.debug('[app] adding index to domain: %r', (domain, index))
if domain not in self.domains:
raise ExtensionError(__('domain %s not yet registered') % domain)
@@ -231,13 +230,13 @@ class SphinxComponentRegistry(object):
def add_object_type(self, directivename, rolename, indextemplate='',
parse_node=None, ref_nodeclass=None, objname='',
doc_field_types=[], override=False):
- # type: (unicode, unicode, unicode, Callable, nodes.Node, unicode, List, bool) -> None
+ # type: (str, str, str, Callable, Type[nodes.TextElement], str, List, bool) -> None
logger.debug('[app] adding object type: %r',
(directivename, rolename, indextemplate, parse_node,
ref_nodeclass, objname, doc_field_types))
# create a subclass of GenericObject as the new directive
- directive = type(directivename, # type: ignore
+ directive = type(directivename,
(GenericObject, object),
{'indextemplate': indextemplate,
'parse_node': staticmethod(parse_node),
@@ -254,12 +253,12 @@ class SphinxComponentRegistry(object):
def add_crossref_type(self, directivename, rolename, indextemplate='',
ref_nodeclass=None, objname='', override=False):
- # type: (unicode, unicode, unicode, nodes.Node, unicode, bool) -> None
+ # type: (str, str, str, Type[nodes.TextElement], str, bool) -> None
logger.debug('[app] adding crossref type: %r',
(directivename, rolename, indextemplate, ref_nodeclass, objname))
# create a subclass of Target as the new directive
- directive = type(directivename, # type: ignore
+ directive = type(directivename,
(Target, object),
{'indextemplate': indextemplate})
@@ -273,7 +272,7 @@ class SphinxComponentRegistry(object):
object_types[directivename] = ObjType(objname or directivename, rolename)
def add_source_suffix(self, suffix, filetype, override=False):
- # type: (unicode, unicode, bool) -> None
+ # type: (str, str, bool) -> None
logger.debug('[app] adding source_suffix: %r, %r', suffix, filetype)
if suffix in self.source_suffix and not override:
raise ExtensionError(__('source_suffix %r is already registered') % suffix)
@@ -285,7 +284,7 @@ class SphinxComponentRegistry(object):
logger.debug('[app] adding search source_parser: %r', args)
if len(args) == 1:
# new sytle arguments: (source_parser)
- suffix = None # type: unicode
+ suffix = None # type: str
parser = args[0] # type: Type[Parser]
else:
# old style arguments: (suffix, source_parser)
@@ -318,18 +317,18 @@ class SphinxComponentRegistry(object):
self.source_parsers[suffix] = parser
def get_source_parser(self, filetype):
- # type: (unicode) -> Type[Parser]
+ # type: (str) -> Type[Parser]
try:
return self.source_parsers[filetype]
except KeyError:
raise SphinxError(__('Source parser for %s not registered') % filetype)
def get_source_parsers(self):
- # type: () -> Dict[unicode, Parser]
+ # type: () -> Dict[str, Type[Parser]]
return self.source_parsers
def create_source_parser(self, app, filename):
- # type: (Sphinx, unicode) -> Parser
+ # type: (Sphinx, str) -> Parser
parser_class = self.get_source_parser(filename)
parser = parser_class()
if isinstance(parser, SphinxParser):
@@ -337,7 +336,9 @@ class SphinxComponentRegistry(object):
return parser
def add_source_input(self, input_class, override=False):
- # type: (Type[Input], bool) -> None
+ # type: (Type[SphinxFileInput], bool) -> None
+ warnings.warn('registry.source_input() is deprecated.',
+ RemovedInSphinx30Warning, stacklevel=2)
for filetype in input_class.supported:
if filetype in self.source_inputs and not override:
raise ExtensionError(__('source_input for %r is already registered') %
@@ -345,7 +346,7 @@ class SphinxComponentRegistry(object):
self.source_inputs[filetype] = input_class
def get_source_input(self, filetype):
- # type: (unicode) -> Type[Input]
+ # type: (str) -> Type[Input]
try:
return self.source_inputs[filetype]
except KeyError:
@@ -353,19 +354,19 @@ class SphinxComponentRegistry(object):
# use special source_input for unknown filetype
return self.source_inputs['*']
except KeyError:
- raise SphinxError(__('source_input for %s not registered') % filetype)
+ return None
def add_translator(self, name, translator, override=False):
- # type: (unicode, Type[nodes.NodeVisitor], bool) -> None
+ # type: (str, Type[nodes.NodeVisitor], bool) -> None
logger.debug('[app] Change of translator for the %s builder.' % name)
if name in self.translators and not override:
raise ExtensionError(__('Translator for %r already exists') % name)
self.translators[name] = translator
def add_translation_handlers(self, node, **kwargs):
- # type: (nodes.Node, Any) -> None
+ # type: (Type[nodes.Element], Any) -> None
logger.debug('[app] adding translation_handlers: %r, %r', node, kwargs)
- for builder_name, handlers in iteritems(kwargs):
+ for builder_name, handlers in kwargs.items():
translation_handlers = self.translation_handlers.setdefault(builder_name, {})
try:
visit, depart = handlers # unpack once for assertion
@@ -391,7 +392,7 @@ class SphinxComponentRegistry(object):
# retry with builder.format
handlers = self.translation_handlers.get(builder.format, {})
- for name, (visit, depart) in iteritems(handlers):
+ for name, (visit, depart) in handlers.items():
setattr(translator, 'visit_' + name, MethodType(visit, translator))
if depart:
setattr(translator, 'depart_' + name, MethodType(depart, translator))
@@ -417,35 +418,35 @@ class SphinxComponentRegistry(object):
return self.post_transforms
def add_documenter(self, objtype, documenter):
- # type: (unicode, Type[Documenter]) -> None
+ # type: (str, Type[Documenter]) -> None
self.documenters[objtype] = documenter
def add_autodoc_attrgetter(self, typ, attrgetter):
- # type: (Type, Callable[[Any, unicode, Any], Any]) -> None
+ # type: (Type, Callable[[Any, str, Any], Any]) -> None
self.autodoc_attrgettrs[typ] = attrgetter
def add_css_files(self, filename, **attributes):
self.css_files.append((filename, attributes))
def add_js_file(self, filename, **attributes):
- # type: (unicode, **unicode) -> None
+ # type: (str, **str) -> None
logger.debug('[app] adding js_file: %r, %r', filename, attributes)
- self.js_files.append((filename, attributes)) # type: ignore
+ self.js_files.append((filename, attributes))
def add_latex_package(self, name, options):
- # type: (unicode, unicode) -> None
+ # type: (str, str) -> None
logger.debug('[app] adding latex package: %r', name)
self.latex_packages.append((name, options))
def add_enumerable_node(self, node, figtype, title_getter=None, override=False):
- # type: (nodes.Node, unicode, TitleGetter, bool) -> None
+ # type: (Type[nodes.Node], str, TitleGetter, bool) -> None
logger.debug('[app] adding enumerable node: (%r, %r, %r)', node, figtype, title_getter)
if node in self.enumerable_nodes and not override:
raise ExtensionError(__('enumerable_node %r already registered') % node)
self.enumerable_nodes[node] = (figtype, title_getter)
def add_html_math_renderer(self, name, inline_renderers, block_renderers):
- # type: (unicode, Tuple[Callable, Callable], Tuple[Callable, Callable]) -> None
+ # type: (str, Tuple[Callable, Callable], Tuple[Callable, Callable]) -> None
logger.debug('[app] adding html_math_renderer: %s, %r, %r',
name, inline_renderers, block_renderers)
if name in self.html_inline_math_renderers:
@@ -455,7 +456,7 @@ class SphinxComponentRegistry(object):
self.html_block_math_renderers[name] = block_renderers
def load_extension(self, app, extname):
- # type: (Sphinx, unicode) -> None
+ # type: (Sphinx, str) -> None
"""Load a Sphinx extension."""
if extname in app.extensions: # alread loaded
return
@@ -466,42 +467,41 @@ class SphinxComponentRegistry(object):
return
# update loading context
- app._setting_up_extension.append(extname)
-
- try:
- mod = __import__(extname, None, None, ['setup'])
- except ImportError as err:
- logger.verbose(__('Original exception:\n') + traceback.format_exc())
- raise ExtensionError(__('Could not import extension %s') % extname, err)
-
- if not hasattr(mod, 'setup'):
- logger.warning(__('extension %r has no setup() function; is it really '
- 'a Sphinx extension module?'), extname)
- metadata = {} # type: Dict[unicode, Any]
- else:
+ prefix = __('while setting up extension %s:') % extname
+ with prefixed_warnings(prefix):
try:
- metadata = mod.setup(app)
- except VersionRequirementError as err:
- # add the extension name to the version required
- raise VersionRequirementError(
- __('The %s extension used by this project needs at least '
- 'Sphinx v%s; it therefore cannot be built with this '
- 'version.') % (extname, err)
- )
-
- if metadata is None:
- metadata = {}
- elif not isinstance(metadata, dict):
- logger.warning(__('extension %r returned an unsupported object from '
- 'its setup() function; it should return None or a '
- 'metadata dictionary'), extname)
- metadata = {}
-
- app.extensions[extname] = Extension(extname, mod, **metadata)
- app._setting_up_extension.pop()
+ mod = __import__(extname, None, None, ['setup'])
+ except ImportError as err:
+ logger.verbose(__('Original exception:\n') + traceback.format_exc())
+ raise ExtensionError(__('Could not import extension %s') % extname, err)
+
+ if not hasattr(mod, 'setup'):
+ logger.warning(__('extension %r has no setup() function; is it really '
+ 'a Sphinx extension module?'), extname)
+ metadata = {} # type: Dict[str, Any]
+ else:
+ try:
+ metadata = mod.setup(app)
+ except VersionRequirementError as err:
+ # add the extension name to the version required
+ raise VersionRequirementError(
+ __('The %s extension used by this project needs at least '
+ 'Sphinx v%s; it therefore cannot be built with this '
+ 'version.') % (extname, err)
+ )
+
+ if metadata is None:
+ metadata = {}
+ elif not isinstance(metadata, dict):
+ logger.warning(__('extension %r returned an unsupported object from '
+ 'its setup() function; it should return None or a '
+ 'metadata dictionary'), extname)
+ metadata = {}
+
+ app.extensions[extname] = Extension(extname, mod, **metadata)
def get_envversion(self, app):
- # type: (Sphinx) -> Dict[unicode, unicode]
+ # type: (Sphinx) -> Dict[str, str]
from sphinx.environment import ENV_VERSION
envversion = {ext.name: ext.metadata['env_version'] for ext in app.extensions.values()
if ext.metadata.get('env_version')}
@@ -512,7 +512,7 @@ class SphinxComponentRegistry(object):
def merge_source_suffix(app, config):
# type: (Sphinx, Config) -> None
"""Merge source_suffix which specified by user and added by extensions."""
- for suffix, filetype in iteritems(app.registry.source_suffix):
+ for suffix, filetype in app.registry.source_suffix.items():
if suffix not in app.config.source_suffix:
app.config.source_suffix[suffix] = filetype
elif app.config.source_suffix[suffix] is None:
@@ -525,7 +525,7 @@ def merge_source_suffix(app, config):
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
app.connect('config-inited', merge_source_suffix)
return {
diff --git a/sphinx/roles.py b/sphinx/roles.py
index b2a540122..e40de5f19 100644
--- a/sphinx/roles.py
+++ b/sphinx/roles.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.roles
~~~~~~~~~~~~
@@ -12,7 +11,6 @@
import re
from docutils import nodes, utils
-from six import iteritems
from sphinx import addnodes
from sphinx.errors import SphinxError
@@ -27,6 +25,7 @@ if False:
from docutils.parsers.rst.states import Inliner # NOQA
from sphinx.application import Sphinx # NOQA
from sphinx.environment import BuildEnvironment # NOQA
+ from sphinx.util.typing import RoleFunction # NOQA
generic_docroles = {
@@ -45,7 +44,7 @@ generic_docroles = {
# -- generic cross-reference role ----------------------------------------------
-class XRefRole(object):
+class XRefRole:
"""
A generic cross-referencing role. To create a callable that can be used as
a role function, create an instance of this class.
@@ -69,12 +68,12 @@ class XRefRole(object):
* Subclassing and overwriting `process_link()` and/or `result_nodes()`.
"""
- nodeclass = addnodes.pending_xref # type: Type[nodes.Node]
- innernodeclass = nodes.literal
+ nodeclass = addnodes.pending_xref # type: Type[nodes.Element]
+ innernodeclass = nodes.literal # type: Type[nodes.TextElement]
def __init__(self, fix_parens=False, lowercase=False,
nodeclass=None, innernodeclass=None, warn_dangling=False):
- # type: (bool, bool, Type[nodes.Node], Type[nodes.Node], bool) -> None
+ # type: (bool, bool, Type[nodes.Element], Type[nodes.TextElement], bool) -> None
self.fix_parens = fix_parens
self.lowercase = lowercase
self.warn_dangling = warn_dangling
@@ -84,7 +83,7 @@ class XRefRole(object):
self.innernodeclass = innernodeclass
def _fix_parens(self, env, has_explicit_title, title, target):
- # type: (BuildEnvironment, bool, unicode, unicode) -> Tuple[unicode, unicode]
+ # type: (BuildEnvironment, bool, str, str) -> Tuple[str, str]
if not has_explicit_title:
if title.endswith('()'):
# remove parentheses
@@ -99,7 +98,7 @@ class XRefRole(object):
def __call__(self, typ, rawtext, text, lineno, inliner,
options={}, content=[]):
- # type: (unicode, unicode, unicode, int, Inliner, Dict, List[unicode]) -> Tuple[List[nodes.Node], List[nodes.Node]] # NOQA
+ # type: (str, str, str, int, Inliner, Dict, List[str]) -> Tuple[List[nodes.Node], List[nodes.system_message]] # NOQA
env = inliner.document.settings.env
if not typ:
typ = env.temp_data.get('default_role')
@@ -110,7 +109,7 @@ class XRefRole(object):
else:
typ = typ.lower()
if ':' not in typ:
- domain, role = '', typ # type: unicode, unicode
+ domain, role = '', typ
classes = ['xref', role]
else:
domain, role = typ.split(':', 1)
@@ -121,8 +120,7 @@ class XRefRole(object):
if self.fix_parens:
text, tgt = self._fix_parens(env, False, text, "")
innernode = self.innernodeclass(rawtext, text, classes=classes)
- return self.result_nodes(inliner.document, env, innernode,
- is_ref=False)
+ return self.result_nodes(inliner.document, env, innernode, is_ref=False)
# split title and target in role content
has_explicit_title, title, target = split_explicit_title(text)
title = utils.unescape(title)
@@ -137,9 +135,8 @@ class XRefRole(object):
refnode = self.nodeclass(rawtext, reftype=role, refdomain=domain,
refexplicit=has_explicit_title)
# we may need the line number for warnings
- set_role_source_info(inliner, lineno, refnode) # type: ignore
- title, target = self.process_link(
- env, refnode, has_explicit_title, title, target)
+ set_role_source_info(inliner, lineno, refnode)
+ title, target = self.process_link(env, refnode, has_explicit_title, title, target)
# now that the target and title are finally determined, set them
refnode['reftarget'] = target
refnode += self.innernodeclass(rawtext, title, classes=classes)
@@ -152,7 +149,7 @@ class XRefRole(object):
# methods that can be overwritten
def process_link(self, env, refnode, has_explicit_title, title, target):
- # type: (BuildEnvironment, nodes.reference, bool, unicode, unicode) -> Tuple[unicode, unicode] # NOQA
+ # type: (BuildEnvironment, nodes.Element, bool, str, str) -> Tuple[str, str]
"""Called after parsing title and target text, and creating the
reference node (given in *refnode*). This method can alter the
reference node and must return a new (or the same) ``(title, target)``
@@ -161,7 +158,7 @@ class XRefRole(object):
return title, ws_re.sub(' ', target)
def result_nodes(self, document, env, node, is_ref):
- # type: (nodes.document, BuildEnvironment, nodes.Node, bool) -> Tuple[List[nodes.Node], List[nodes.Node]] # NOQA
+ # type: (nodes.document, BuildEnvironment, nodes.Element, bool) -> Tuple[List[nodes.Node], List[nodes.system_message]] # NOQA
"""Called before returning the finished nodes. *node* is the reference
node if one was created (*is_ref* is then true), else the content node.
This method can add other nodes and must return a ``(nodes, messages)``
@@ -172,16 +169,15 @@ class XRefRole(object):
class AnyXRefRole(XRefRole):
def process_link(self, env, refnode, has_explicit_title, title, target):
- # type: (BuildEnvironment, nodes.reference, bool, unicode, unicode) -> Tuple[unicode, unicode] # NOQA
- result = XRefRole.process_link(self, env, refnode, has_explicit_title,
- title, target)
+ # type: (BuildEnvironment, nodes.Element, bool, str, str) -> Tuple[str, str]
+ result = super().process_link(env, refnode, has_explicit_title, title, target)
# add all possible context info (i.e. std:program, py:module etc.)
refnode.attributes.update(env.ref_context)
return result
def indexmarkup_role(typ, rawtext, text, lineno, inliner, options={}, content=[]):
- # type: (unicode, unicode, unicode, int, Inliner, Dict, List[unicode]) -> Tuple[List[nodes.Node], List[nodes.Node]] # NOQA
+ # type: (str, str, str, int, Inliner, Dict, List[str]) -> Tuple[List[nodes.Node], List[nodes.system_message]] # NOQA
"""Role for PEP/RFC references that generate an index entry."""
env = inliner.document.settings.env
if not typ:
@@ -201,7 +197,7 @@ def indexmarkup_role(typ, rawtext, text, lineno, inliner, options={}, content=[]
indexnode['entries'] = [
('single', _('Python Enhancement Proposals; PEP %s') % target,
targetid, '', None)]
- anchor = '' # type: unicode
+ anchor = ''
anchorindex = target.find('#')
if anchorindex > 0:
target, anchor = target[:anchorindex], target[anchorindex:]
@@ -250,7 +246,7 @@ _amp_re = re.compile(r'(?<!&)&(?![&\s])')
def menusel_role(typ, rawtext, text, lineno, inliner, options={}, content=[]):
- # type: (unicode, unicode, unicode, int, Inliner, Dict, List[unicode]) -> Tuple[List[nodes.Node], List[nodes.Node]] # NOQA
+ # type: (str, str, str, int, Inliner, Dict, List[str]) -> Tuple[List[nodes.Node], List[nodes.system_message]] # NOQA
env = inliner.document.settings.env
if not typ:
assert env.temp_data['default_role']
@@ -260,7 +256,7 @@ def menusel_role(typ, rawtext, text, lineno, inliner, options={}, content=[]):
text = utils.unescape(text)
if typ == 'menuselection':
- text = text.replace('-->', u'\N{TRIANGULAR BULLET}')
+ text = text.replace('-->', '\N{TRIANGULAR BULLET}')
spans = _amp_re.split(text)
node = nodes.inline(rawtext=rawtext)
@@ -289,7 +285,7 @@ parens_re = re.compile(r'(\\*{|\\*})')
def emph_literal_role(typ, rawtext, text, lineno, inliner,
options={}, content=[]):
- # type: (unicode, unicode, unicode, int, Inliner, Dict, List[unicode]) -> Tuple[List[nodes.Node], List[nodes.Node]] # NOQA
+ # type: (str, str, str, int, Inliner, Dict, List[str]) -> Tuple[List[nodes.Node], List[nodes.system_message]] # NOQA
env = inliner.document.settings.env
if not typ:
assert env.temp_data['default_role']
@@ -341,20 +337,20 @@ _abbr_re = re.compile(r'\((.*)\)$', re.S)
def abbr_role(typ, rawtext, text, lineno, inliner, options={}, content=[]):
- # type: (unicode, unicode, unicode, int, Inliner, Dict, List[unicode]) -> Tuple[List[nodes.Node], List[nodes.Node]] # NOQA
+ # type: (str, str, str, int, Inliner, Dict, List[str]) -> Tuple[List[nodes.Node], List[nodes.system_message]] # NOQA
text = utils.unescape(text)
m = _abbr_re.search(text)
if m is None:
- return [addnodes.abbreviation(text, text, **options)], []
+ return [nodes.abbreviation(text, text, **options)], []
abbr = text[:m.start()].strip()
expl = m.group(1)
options = options.copy()
options['explanation'] = expl
- return [addnodes.abbreviation(abbr, abbr, **options)], []
+ return [nodes.abbreviation(abbr, abbr, **options)], []
def index_role(typ, rawtext, text, lineno, inliner, options={}, content=[]):
- # type: (unicode, unicode, unicode, int, Inliner, Dict, List[unicode]) -> Tuple[List[nodes.Node], List[nodes.Node]] # NOQA
+ # type: (str, str, str, int, Inliner, Dict, List[str]) -> Tuple[List[nodes.Node], List[nodes.system_message]] # NOQA
# create new reference target
env = inliner.document.settings.env
targetid = 'index-%s' % env.new_serialno('index')
@@ -377,7 +373,7 @@ def index_role(typ, rawtext, text, lineno, inliner, options={}, content=[]):
entries = [('single', target, targetid, main, None)]
indexnode = addnodes.index()
indexnode['entries'] = entries
- set_role_source_info(inliner, lineno, indexnode) # type: ignore
+ set_role_source_info(inliner, lineno, indexnode)
textnode = nodes.Text(title, title)
return [indexnode, targetnode, textnode], []
@@ -396,19 +392,19 @@ specific_docroles = {
'samp': emph_literal_role,
'abbr': abbr_role,
'index': index_role,
-}
+} # type: Dict[str, RoleFunction]
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
from docutils.parsers.rst import roles
- for rolename, nodeclass in iteritems(generic_docroles):
+ for rolename, nodeclass in generic_docroles.items():
generic = roles.GenericRole(rolename, nodeclass)
role = roles.CustomRole(rolename, generic, {'classes': [rolename]})
roles.register_local_role(rolename, role)
- for rolename, func in iteritems(specific_docroles):
+ for rolename, func in specific_docroles.items():
roles.register_local_role(rolename, func)
return {
diff --git a/sphinx/search/__init__.py b/sphinx/search/__init__.py
index 3da6c4ba5..6100fc453 100644
--- a/sphinx/search/__init__.py
+++ b/sphinx/search/__init__.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.search
~~~~~~~~~~~~~
@@ -8,17 +7,18 @@
:copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
+import html
+import pickle
import re
+import warnings
from os import path
-from six import iteritems, itervalues, text_type, string_types
-from six.moves import cPickle as pickle
+from docutils import nodes
-from docutils.nodes import raw, comment, title, Text, NodeVisitor, SkipNode
-
-import sphinx
+from sphinx import addnodes
+from sphinx import package_dir
+from sphinx.deprecation import RemovedInSphinx40Warning
from sphinx.util import jsdump, rpartition
-from sphinx.util.pycompat import htmlescape
from sphinx.search.jssplitter import splitter_code
if False:
@@ -28,7 +28,7 @@ if False:
from sphinx.environment import BuildEnvironment # NOQA
-class SearchLanguage(object):
+class SearchLanguage:
"""
This class is the base class for search natural language preprocessors. If
you want to add support for a new language, you should override the methods
@@ -50,10 +50,10 @@ class SearchLanguage(object):
This class is used to preprocess search word which Sphinx HTML readers
type, before searching index. Default implementation does nothing.
"""
- lang = None # type: unicode
- language_name = None # type: unicode
- stopwords = set() # type: Set[unicode]
- js_stemmer_rawcode = None # type: unicode
+ lang = None # type: str
+ language_name = None # type: str
+ stopwords = set() # type: Set[str]
+ js_stemmer_rawcode = None # type: str
js_stemmer_code = """
/**
* Dummy stemmer for languages without stemming rules.
@@ -63,7 +63,7 @@ var Stemmer = function() {
return w;
}
}
-""" # type: unicode
+"""
_word_re = re.compile(r'(?u)\w+')
@@ -79,7 +79,7 @@ var Stemmer = function() {
"""
def split(self, input):
- # type: (unicode) -> List[unicode]
+ # type: (str) -> List[str]
"""
This method splits a sentence into words. Default splitter splits input
at white spaces, which should be enough for most languages except CJK
@@ -88,7 +88,7 @@ var Stemmer = function() {
return self._word_re.findall(input)
def stem(self, word):
- # type: (unicode) -> unicode
+ # type: (str) -> str
"""
This method implements stemming algorithm of the Python version.
@@ -102,7 +102,7 @@ var Stemmer = function() {
return word
def word_filter(self, word):
- # type: (unicode) -> bool
+ # type: (str) -> bool
"""
Return true if the target word should be registered in the search index.
This method is called after stemming.
@@ -120,13 +120,13 @@ from sphinx.search.en import SearchEnglish
def parse_stop_word(source):
- # type: (unicode) -> Set[unicode]
+ # type: (str) -> Set[str]
"""
parse snowball style word list like this:
* http://snowball.tartarus.org/algorithms/finnish/stop.txt
"""
- result = set()
+ result = set() # type: Set[str]
for line in source.splitlines():
line = line.split('|')[0] # remove comment
result.update(line.split())
@@ -152,10 +152,10 @@ languages = {
'sv': 'sphinx.search.sv.SearchSwedish',
'tr': 'sphinx.search.tr.SearchTurkish',
'zh': 'sphinx.search.zh.SearchChinese',
-} # type: Dict[unicode, Any]
+} # type: Dict[str, Any]
-class _JavaScriptIndex(object):
+class _JavaScriptIndex:
"""
The search index as javascript file that calls a function
on the documentation search object to register the index.
@@ -165,7 +165,7 @@ class _JavaScriptIndex(object):
SUFFIX = ')'
def dumps(self, data):
- # type: (Any) -> unicode
+ # type: (Any) -> str
return self.PREFIX + jsdump.dumps(data) + self.SUFFIX
def loads(self, s):
@@ -188,21 +188,25 @@ class _JavaScriptIndex(object):
js_index = _JavaScriptIndex()
-class WordCollector(NodeVisitor):
+class WordCollector(nodes.NodeVisitor):
"""
A special visitor that collects words for the `IndexBuilder`.
"""
def __init__(self, document, lang):
- # type: (nodes.Node, SearchLanguage) -> None
- NodeVisitor.__init__(self, document)
- self.found_words = [] # type: List[unicode]
- self.found_title_words = [] # type: List[unicode]
+ # type: (nodes.document, SearchLanguage) -> None
+ super().__init__(document)
+ self.found_words = [] # type: List[str]
+ self.found_title_words = [] # type: List[str]
self.lang = lang
- def is_meta_keywords(self, node, nodetype):
- # type: (nodes.Node, Type) -> bool
- if isinstance(node, sphinx.addnodes.meta) and node.get('name') == 'keywords':
+ def is_meta_keywords(self, node, nodetype=None):
+ # type: (addnodes.meta, Any) -> bool
+ if nodetype is not None:
+ warnings.warn('"nodetype" argument for WordCollector.is_meta_keywords() '
+ 'is deprecated.', RemovedInSphinx40Warning)
+
+ if isinstance(node, addnodes.meta) and node.get('name') == 'keywords':
meta_lang = node.get('lang')
if meta_lang is None: # lang not specified
return True
@@ -213,10 +217,9 @@ class WordCollector(NodeVisitor):
def dispatch_visit(self, node):
# type: (nodes.Node) -> None
- nodetype = type(node)
- if issubclass(nodetype, comment):
- raise SkipNode
- if issubclass(nodetype, raw):
+ if isinstance(node, nodes.comment):
+ raise nodes.SkipNode
+ elif isinstance(node, nodes.raw):
if 'html' in node.get('format', '').split():
# Some people might put content in raw HTML that should be searched,
# so we just amateurishly strip HTML tags and index the remaining
@@ -225,18 +228,18 @@ class WordCollector(NodeVisitor):
nodetext = re.sub(r'(?is)<script.*?</script>', '', nodetext)
nodetext = re.sub(r'<[^<]+?>', '', nodetext)
self.found_words.extend(self.lang.split(nodetext))
- raise SkipNode
- if issubclass(nodetype, Text):
+ raise nodes.SkipNode
+ elif isinstance(node, nodes.Text):
self.found_words.extend(self.lang.split(node.astext()))
- elif issubclass(nodetype, title):
+ elif isinstance(node, nodes.title):
self.found_title_words.extend(self.lang.split(node.astext()))
- elif self.is_meta_keywords(node, nodetype):
+ elif isinstance(node, addnodes.meta) and self.is_meta_keywords(node):
keywords = node['content']
keywords = [keyword.strip() for keyword in keywords.split(',')]
self.found_words.extend(keywords)
-class IndexBuilder(object):
+class IndexBuilder:
"""
Helper class that creates a searchindex based on the doctrees
passed to the `feed` method.
@@ -244,24 +247,24 @@ class IndexBuilder(object):
formats = {
'jsdump': jsdump,
'pickle': pickle
- } # type: Dict[unicode, Any]
+ }
def __init__(self, env, lang, options, scoring):
- # type: (BuildEnvironment, unicode, Dict, unicode) -> None
+ # type: (BuildEnvironment, str, Dict, str) -> None
self.env = env
- self._titles = {} # type: Dict[unicode, unicode]
+ self._titles = {} # type: Dict[str, str]
# docname -> title
- self._filenames = {} # type: Dict[unicode, unicode]
+ self._filenames = {} # type: Dict[str, str]
# docname -> filename
- self._mapping = {} # type: Dict[unicode, Set[unicode]]
+ self._mapping = {} # type: Dict[str, Set[str]]
# stemmed word -> set(docname)
- self._title_mapping = {} # type: Dict[unicode, Set[unicode]]
+ self._title_mapping = {} # type: Dict[str, Set[str]]
# stemmed words in titles -> set(docname)
- self._stem_cache = {} # type: Dict[unicode, unicode]
+ self._stem_cache = {} # type: Dict[str, str]
# word -> stemmed word
- self._objtypes = {} # type: Dict[Tuple[unicode, unicode], int]
+ self._objtypes = {} # type: Dict[Tuple[str, str], int]
# objtype -> index
- self._objnames = {} # type: Dict[int, Tuple[unicode, unicode, unicode]]
+ self._objnames = {} # type: Dict[int, Tuple[str, str, str]]
# objtype index -> (domain, type, objname (localized))
lang_class = languages.get(lang) # type: Type[SearchLanguage]
# add language-specific SearchLanguage instance
@@ -283,15 +286,15 @@ class IndexBuilder(object):
if scoring:
with open(scoring, 'rb') as fp:
- self.js_scorer_code = fp.read().decode('utf-8')
+ self.js_scorer_code = fp.read().decode()
else:
- self.js_scorer_code = u''
+ self.js_scorer_code = ''
self.js_splitter_code = splitter_code
def load(self, stream, format):
# type: (IO, Any) -> None
"""Reconstruct from frozen data."""
- if isinstance(format, string_types):
+ if isinstance(format, str):
format = self.formats[format]
frozen = format.load(stream)
# if an old index is present, we treat it as not existing.
@@ -303,9 +306,9 @@ class IndexBuilder(object):
self._titles = dict(zip(index2fn, frozen['titles']))
def load_terms(mapping):
- # type: (Dict[unicode, Any]) -> Dict[unicode, Set[unicode]]
+ # type: (Dict[str, Any]) -> Dict[str, Set[str]]
rv = {}
- for k, v in iteritems(mapping):
+ for k, v in mapping.items():
if isinstance(v, int):
rv[k] = set([index2fn[v]])
else:
@@ -319,24 +322,24 @@ class IndexBuilder(object):
def dump(self, stream, format):
# type: (IO, Any) -> None
"""Dump the frozen index to a stream."""
- if isinstance(format, string_types):
+ if isinstance(format, str):
format = self.formats[format]
format.dump(self.freeze(), stream)
def get_objects(self, fn2index):
- # type: (Dict[unicode, int]) -> Dict[unicode, Dict[unicode, Tuple[int, int, int, unicode]]] # NOQA
- rv = {} # type: Dict[unicode, Dict[unicode, Tuple[int, int, int, unicode]]]
+ # type: (Dict[str, int]) -> Dict[str, Dict[str, Tuple[int, int, int, str]]]
+ rv = {} # type: Dict[str, Dict[str, Tuple[int, int, int, str]]]
otypes = self._objtypes
onames = self._objnames
- for domainname, domain in sorted(iteritems(self.env.domains)):
+ for domainname, domain in sorted(self.env.domains.items()):
for fullname, dispname, type, docname, anchor, prio in \
sorted(domain.get_objects()):
if docname not in fn2index:
continue
if prio < 0:
continue
- fullname = htmlescape(fullname)
- dispname = htmlescape(dispname)
+ fullname = html.escape(fullname)
+ dispname = html.escape(dispname)
prefix, name = rpartition(dispname, '.')
pdict = rv.setdefault(prefix, {})
try:
@@ -346,13 +349,13 @@ class IndexBuilder(object):
otypes[domainname, type] = typeindex
otype = domain.object_types.get(type)
if otype:
- # use unicode() to fire translation proxies
+ # use str() to fire translation proxies
onames[typeindex] = (domainname, type,
- text_type(domain.get_type_name(otype)))
+ str(domain.get_type_name(otype)))
else:
onames[typeindex] = (domainname, type, type)
if anchor == fullname:
- shortanchor = '' # type: unicode
+ shortanchor = ''
elif anchor == type + '-' + fullname:
shortanchor = '-'
else:
@@ -361,10 +364,10 @@ class IndexBuilder(object):
return rv
def get_terms(self, fn2index):
- # type: (Dict) -> Tuple[Dict[unicode, List[unicode]], Dict[unicode, List[unicode]]]
- rvs = {}, {} # type: Tuple[Dict[unicode, List[unicode]], Dict[unicode, List[unicode]]]
+ # type: (Dict) -> Tuple[Dict[str, List[str]], Dict[str, List[str]]]
+ rvs = {}, {} # type: Tuple[Dict[str, List[str]], Dict[str, List[str]]]
for rv, mapping in zip(rvs, (self._mapping, self._title_mapping)):
- for k, v in iteritems(mapping):
+ for k, v in mapping.items():
if len(v) == 1:
fn, = v
if fn in fn2index:
@@ -374,7 +377,7 @@ class IndexBuilder(object):
return rvs
def freeze(self):
- # type: () -> Dict[unicode, Any]
+ # type: () -> Dict[str, Any]
"""Create a usable data structure for serializing."""
docnames, titles = zip(*sorted(self._titles.items()))
filenames = [self._filenames.get(docname) for docname in docnames]
@@ -383,18 +386,18 @@ class IndexBuilder(object):
objects = self.get_objects(fn2index) # populates _objtypes
objtypes = dict((v, k[0] + ':' + k[1])
- for (k, v) in iteritems(self._objtypes))
+ for (k, v) in self._objtypes.items())
objnames = self._objnames
return dict(docnames=docnames, filenames=filenames, titles=titles, terms=terms,
objects=objects, objtypes=objtypes, objnames=objnames,
titleterms=title_terms, envversion=self.env.version)
def label(self):
- # type: () -> unicode
+ # type: () -> str
return "%s (code: %s)" % (self.lang.language_name, self.lang.lang)
def prune(self, docnames):
- # type: (Iterable[unicode]) -> None
+ # type: (Iterable[str]) -> None
"""Remove data for all docnames not in the list."""
new_titles = {}
new_filenames = {}
@@ -404,13 +407,13 @@ class IndexBuilder(object):
new_filenames[docname] = self._filenames[docname]
self._titles = new_titles
self._filenames = new_filenames
- for wordnames in itervalues(self._mapping):
+ for wordnames in self._mapping.values():
wordnames.intersection_update(docnames)
- for wordnames in itervalues(self._title_mapping):
+ for wordnames in self._title_mapping.values():
wordnames.intersection_update(docnames)
def feed(self, docname, filename, title, doctree):
- # type: (unicode, unicode, unicode, nodes.Node) -> None
+ # type: (str, str, str, nodes.document) -> None
"""Feed a doctree to the index."""
self._titles[docname] = title
self._filenames[docname] = filename
@@ -420,7 +423,7 @@ class IndexBuilder(object):
# memoize self.lang.stem
def stem(word):
- # type: (unicode) -> unicode
+ # type: (str) -> str
try:
return self._stem_cache[word]
except KeyError:
@@ -445,21 +448,18 @@ class IndexBuilder(object):
self._mapping.setdefault(stemmed_word, set()).add(docname)
def context_for_searchtool(self):
- # type: () -> Dict[unicode, Any]
- return dict(
- search_language_stemming_code = self.lang.js_stemmer_code,
- search_language_stop_words = jsdump.dumps(sorted(self.lang.stopwords)),
- search_scorer_tool = self.js_scorer_code,
- search_word_splitter_code = self.js_splitter_code,
- )
+ # type: () -> Dict[str, Any]
+ return {
+ 'search_language_stemming_code': self.lang.js_stemmer_code,
+ 'search_language_stop_words': jsdump.dumps(sorted(self.lang.stopwords)),
+ 'search_scorer_tool': self.js_scorer_code,
+ 'search_word_splitter_code': self.js_splitter_code,
+ }
def get_js_stemmer_rawcode(self):
- # type: () -> unicode
+ # type: () -> str
if self.lang.js_stemmer_rawcode:
- return path.join(
- sphinx.package_dir, 'search',
- 'non-minified-js',
- self.lang.js_stemmer_rawcode
- )
+ return path.join(package_dir, 'search', 'non-minified-js',
+ self.lang.js_stemmer_rawcode)
else:
return None
diff --git a/sphinx/search/da.py b/sphinx/search/da.py
index eb7394b5e..228fdf086 100644
--- a/sphinx/search/da.py
+++ b/sphinx/search/da.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.search.da
~~~~~~~~~~~~~~~~
@@ -18,7 +17,7 @@ if False:
from typing import Any # NOQA
-danish_stopwords = parse_stop_word(u'''
+danish_stopwords = parse_stop_word('''
| source: http://snowball.tartarus.org/algorithms/danish/stop.txt
og | and
i | in
@@ -116,7 +115,7 @@ jer | you
sådan | such, like this/like that
''')
-js_stemmer = u"""
+js_stemmer = """
var JSX={};(function(g){function j(b,e){var a=function(){};a.prototype=e.prototype;var c=new a;for(var d in b){b[d].prototype=c}}function I(c,b){for(var a in b.prototype)if(b.prototype.hasOwnProperty(a))c.prototype[a]=b.prototype[a]}function i(a,b,d){function c(a,b,c){delete a[b];a[b]=c;return c}Object.defineProperty(a,b,{get:function(){return c(a,b,d())},set:function(d){c(a,b,d)},enumerable:true,configurable:true})}function J(a,b,c){return a[b]=a[b]/c|0}var E=parseInt;var D=parseFloat;function K(a){return a!==a}var A=isFinite;var z=encodeURIComponent;var y=decodeURIComponent;var x=encodeURI;var w=decodeURI;var u=Object.prototype.toString;var C=Object.prototype.hasOwnProperty;function f(){}g.require=function(b){var a=p[b];return a!==undefined?a:null};g.profilerIsRunning=function(){return f.getResults!=null};g.getProfileResults=function(){return(f.getResults||function(){return{}})()};g.postProfileResults=function(a,b){if(f.postResults==null)throw new Error('profiler has not been turned on');return f.postResults(a,b)};g.resetProfileResults=function(){if(f.resetResults==null)throw new Error('profiler has not been turned on');return f.resetResults()};g.DEBUG=false;function t(){};j([t],Error);function b(a,b,c){this.G=a.length;this.S=a;this.V=b;this.J=c;this.I=null;this.W=null};j([b],Object);function l(){};j([l],Object);function d(){var a;var b;var c;this.F={};a=this.D='';b=this._=0;c=this.A=a.length;this.B=0;this.C=b;this.E=c};j([d],l);function v(a,b){a.D=b.D;a._=b._;a.A=b.A;a.B=b.B;a.C=b.C;a.E=b.E};function n(b,d,c,e){var a;if(b._>=b.A){return false}a=b.D.charCodeAt(b._);if(a>e||a<c){return false}a-=c;if((d[a>>>3]&1<<(a&7))===0){return false}b._++;return true};function m(b,d,c,e){var a;if(b._<=b.B){return false}a=b.D.charCodeAt(b._-1);if(a>e||a<c){return false}a-=c;if((d[a>>>3]&1<<(a&7))===0){return false}b._--;return true};function r(a,d,c,e){var b;if(a._>=a.A){return false}b=a.D.charCodeAt(a._);if(b>e||b<c){a._++;return true}b-=c;if((d[b>>>3]&1<<(b&7))===0){a._++;return true}return false};function q(a,d,c,e){var b;if(a._<=a.B){return false}b=a.D.charCodeAt(a._-1);if(b>e||b<c){a._--;return true}b-=c;if((d[b>>>3]&1<<(b&7))===0){a._--;return true}return false};function h(a,b,d){var c;if(a._-a.B<b){return false}if(a.D.slice((c=a._)-b,c)!==d){return false}a._-=b;return true};function e(d,m,p){var b;var g;var e;var n;var f;var k;var l;var i;var h;var c;var a;var j;var o;b=0;g=p;e=d._;n=d.B;f=0;k=0;l=false;while(true){i=b+(g-b>>1);h=0;c=f<k?f:k;a=m[i];for(j=a.G-1-c;j>=0;j--){if(e-c===n){h=-1;break}h=d.D.charCodeAt(e-1-c)-a.S.charCodeAt(j);if(h!==0){break}c++}if(h<0){g=i;k=c}else{b=i;f=c}if(g-b<=1){if(b>0){break}if(g===b){break}if(l){break}l=true}}while(true){a=m[b];if(f>=a.G){d._=e-a.G|0;if(a.I==null){return a.J}o=a.I(d);d._=e-a.G|0;if(o){return a.J}}b=a.V;if(b<0){return 0}}return-1};function s(a,b,d,e){var c;c=e.length-(d-b);a.D=a.D.slice(0,b)+e+a.D.slice(d);a.A+=c|0;if(a._>=d){a._+=c|0}else if(a._>b){a._=b}return c|0};function c(a,f){var b;var c;var d;var e;b=false;if((c=a.C)<0||c>(d=a.E)||d>(e=a.A)||e>a.D.length?false:true){s(a,a.C,a.E,f);b=true}return b};function o(a,f){var b;var c;var d;var e;b='';if((c=a.C)<0||c>(d=a.E)||d>(e=a.A)||e>a.D.length?false:true){b=a.D.slice(a.C,a.E)}return b};d.prototype.H=function(){return false};d.prototype.T=function(b){var a;var c;var d;var e;a=this.F['.'+b];if(a==null){c=this.D=b;d=this._=0;e=this.A=c.length;this.B=0;this.C=d;this.E=e;this.H();a=this.D;this.F['.'+b]=a}return a};d.prototype.stemWord=d.prototype.T;d.prototype.U=function(e){var d;var b;var c;var a;var f;var g;var h;d=[];for(b=0;b<e.length;b++){c=e[b];a=this.F['.'+c];if(a==null){f=this.D=c;g=this._=0;h=this.A=f.length;this.B=0;this.C=g;this.E=h;this.H();a=this.D;this.F['.'+c]=a}d.push(a)}return d};d.prototype.stemWords=d.prototype.U;function a(){d.call(this);this.I_x=0;this.I_p1=0;this.S_ch=''};j([a],d);a.prototype.K=function(a){this.I_x=a.I_x;this.I_p1=a.I_p1;this.S_ch=a.S_ch;v(this,a)};a.prototype.copy_from=a.prototype.K;a.prototype.P=function(){var g;var d;var b;var e;var c;var f;var i;var j;var k;var h;this.I_p1=j=this.A;g=i=this._;b=i+3|0;if(0>b||b>j){return false}h=this._=b;this.I_x=h;this._=g;a:while(true){d=this._;e=true;b:while(e===true){e=false;if(!n(this,a.g_v,97,248)){break b}this._=d;break a}k=this._=d;if(k>=this.A){return false}this._++}a:while(true){c=true;b:while(c===true){c=false;if(!r(this,a.g_v,97,248)){break b}break a}if(this._>=this.A){return false}this._++}this.I_p1=this._;f=true;a:while(f===true){f=false;if(!(this.I_p1<this.I_x)){break a}this.I_p1=this.I_x}return true};a.prototype.r_mark_regions=a.prototype.P;function G(b){var h;var e;var c;var f;var d;var g;var j;var k;var l;var i;b.I_p1=k=b.A;h=j=b._;c=j+3|0;if(0>c||c>k){return false}i=b._=c;b.I_x=i;b._=h;a:while(true){e=b._;f=true;b:while(f===true){f=false;if(!n(b,a.g_v,97,248)){break b}b._=e;break a}l=b._=e;if(l>=b.A){return false}b._++}a:while(true){d=true;b:while(d===true){d=false;if(!r(b,a.g_v,97,248)){break b}break a}if(b._>=b.A){return false}b._++}b.I_p1=b._;g=true;a:while(g===true){g=false;if(!(b.I_p1<b.I_x)){break a}b.I_p1=b.I_x}return true};a.prototype.O=function(){var b;var f;var d;var g;var h;var i;f=this.A-(g=this._);if(g<this.I_p1){return false}h=this._=this.I_p1;d=this.B;this.B=h;i=this._=this.A-f;this.E=i;b=e(this,a.a_0,32);if(b===0){this.B=d;return false}this.C=this._;this.B=d;switch(b){case 0:return false;case 1:if(!c(this,'')){return false}break;case 2:if(!m(this,a.g_s_ending,97,229)){return false}if(!c(this,'')){return false}break}return true};a.prototype.r_main_suffix=a.prototype.O;function H(b){var d;var g;var f;var h;var i;var j;g=b.A-(h=b._);if(h<b.I_p1){return false}i=b._=b.I_p1;f=b.B;b.B=i;j=b._=b.A-g;b.E=j;d=e(b,a.a_0,32);if(d===0){b.B=f;return false}b.C=b._;b.B=f;switch(d){case 0:return false;case 1:if(!c(b,'')){return false}break;case 2:if(!m(b,a.g_s_ending,97,229)){return false}if(!c(b,'')){return false}break}return true};a.prototype.N=function(){var f;var g;var b;var h;var d;var i;var j;var k;var l;f=(h=this.A)-(d=this._);g=h-d;if(d<this.I_p1){return false}i=this._=this.I_p1;b=this.B;this.B=i;j=this._=this.A-g;this.E=j;if(e(this,a.a_1,4)===0){this.B=b;return false}this.C=this._;l=this.B=b;k=this._=this.A-f;if(k<=l){return false}this._--;this.C=this._;return!c(this,'')?false:true};a.prototype.r_consonant_pair=a.prototype.N;function k(b){var i;var j;var d;var g;var f;var k;var l;var m;var h;i=(g=b.A)-(f=b._);j=g-f;if(f<b.I_p1){return false}k=b._=b.I_p1;d=b.B;b.B=k;l=b._=b.A-j;b.E=l;if(e(b,a.a_1,4)===0){b.B=d;return false}b.C=b._;h=b.B=d;m=b._=b.A-i;if(m<=h){return false}b._--;b.C=b._;return!c(b,'')?false:true};a.prototype.Q=function(){var f;var l;var m;var d;var j;var b;var g;var n;var i;var p;var o;l=this.A-this._;b=true;a:while(b===true){b=false;this.E=this._;if(!h(this,2,'st')){break a}this.C=this._;if(!h(this,2,'ig')){break a}if(!c(this,'')){return false}}i=this._=(n=this.A)-l;m=n-i;if(i<this.I_p1){return false}p=this._=this.I_p1;d=this.B;this.B=p;o=this._=this.A-m;this.E=o;f=e(this,a.a_2,5);if(f===0){this.B=d;return false}this.C=this._;this.B=d;switch(f){case 0:return false;case 1:if(!c(this,'')){return false}j=this.A-this._;g=true;a:while(g===true){g=false;if(!k(this)){break a}}this._=this.A-j;break;case 2:if(!c(this,'løs')){return false}break}return true};a.prototype.r_other_suffix=a.prototype.Q;function F(b){var d;var p;var m;var f;var l;var g;var i;var o;var j;var q;var n;p=b.A-b._;g=true;a:while(g===true){g=false;b.E=b._;if(!h(b,2,'st')){break a}b.C=b._;if(!h(b,2,'ig')){break a}if(!c(b,'')){return false}}j=b._=(o=b.A)-p;m=o-j;if(j<b.I_p1){return false}q=b._=b.I_p1;f=b.B;b.B=q;n=b._=b.A-m;b.E=n;d=e(b,a.a_2,5);if(d===0){b.B=f;return false}b.C=b._;b.B=f;switch(d){case 0:return false;case 1:if(!c(b,'')){return false}l=b.A-b._;i=true;a:while(i===true){i=false;if(!k(b)){break a}}b._=b.A-l;break;case 2:if(!c(b,'løs')){return false}break}return true};a.prototype.R=function(){var e;var b;var d;var f;var g;var i;var j;e=this.A-(f=this._);if(f<this.I_p1){return false}g=this._=this.I_p1;b=this.B;this.B=g;i=this._=this.A-e;this.E=i;if(!q(this,a.g_v,97,248)){this.B=b;return false}this.C=this._;j=this.S_ch=o(this,this.S_ch);if(j===''){return false}this.B=b;return!(d=this.S_ch,h(this,d.length,d))?false:!c(this,'')?false:true};a.prototype.r_undouble=a.prototype.R;function B(b){var f;var d;var e;var g;var i;var j;var k;f=b.A-(g=b._);if(g<b.I_p1){return false}i=b._=b.I_p1;d=b.B;b.B=i;j=b._=b.A-f;b.E=j;if(!q(b,a.g_v,97,248)){b.B=d;return false}b.C=b._;k=b.S_ch=o(b,b.S_ch);if(k===''){return false}b.B=d;return!(e=b.S_ch,h(b,e.length,e))?false:!c(b,'')?false:true};a.prototype.H=function(){var i;var g;var h;var j;var b;var c;var d;var a;var e;var l;var m;var n;var o;var p;var q;var f;i=this._;b=true;a:while(b===true){b=false;if(!G(this)){break a}}l=this._=i;this.B=l;n=this._=m=this.A;g=m-n;c=true;a:while(c===true){c=false;if(!H(this)){break a}}p=this._=(o=this.A)-g;h=o-p;d=true;a:while(d===true){d=false;if(!k(this)){break a}}f=this._=(q=this.A)-h;j=q-f;a=true;a:while(a===true){a=false;if(!F(this)){break a}}this._=this.A-j;e=true;a:while(e===true){e=false;if(!B(this)){break a}}this._=this.B;return true};a.prototype.stem=a.prototype.H;a.prototype.L=function(b){return b instanceof a};a.prototype.equals=a.prototype.L;a.prototype.M=function(){var c;var a;var b;var d;c='DanishStemmer';a=0;for(b=0;b<c.length;b++){d=c.charCodeAt(b);a=(a<<5)-a+d;a=a&a}return a|0};a.prototype.hashCode=a.prototype.M;a.serialVersionUID=1;i(a,'methodObject',function(){return new a});i(a,'a_0',function(){return[new b('hed',-1,1),new b('ethed',0,1),new b('ered',-1,1),new b('e',-1,1),new b('erede',3,1),new b('ende',3,1),new b('erende',5,1),new b('ene',3,1),new b('erne',3,1),new b('ere',3,1),new b('en',-1,1),new b('heden',10,1),new b('eren',10,1),new b('er',-1,1),new b('heder',13,1),new b('erer',13,1),new b('s',-1,2),new b('heds',16,1),new b('es',16,1),new b('endes',18,1),new b('erendes',19,1),new b('enes',18,1),new b('ernes',18,1),new b('eres',18,1),new b('ens',16,1),new b('hedens',24,1),new b('erens',24,1),new b('ers',16,1),new b('ets',16,1),new b('erets',28,1),new b('et',-1,1),new b('eret',30,1)]});i(a,'a_1',function(){return[new b('gd',-1,-1),new b('dt',-1,-1),new b('gt',-1,-1),new b('kt',-1,-1)]});i(a,'a_2',function(){return[new b('ig',-1,1),new b('lig',0,1),new b('elig',1,1),new b('els',-1,1),new b('løst',-1,2)]});i(a,'g_v',function(){return[17,65,16,1,0,0,0,0,0,0,0,0,0,0,0,0,48,0,128]});i(a,'g_s_ending',function(){return[239,254,42,3,0,0,0,0,0,0,0,0,0,0,0,0,16]});var p={'src/stemmer.jsx':{Stemmer:l},'src/danish-stemmer.jsx':{DanishStemmer:a}}}(JSX))
var Stemmer = JSX.require("src/danish-stemmer.jsx").DanishStemmer;
"""
@@ -134,5 +133,5 @@ class SearchDanish(SearchLanguage):
self.stemmer = snowballstemmer.stemmer('danish')
def stem(self, word):
- # type: (unicode) -> unicode
+ # type: (str) -> str
return self.stemmer.stemWord(word.lower())
diff --git a/sphinx/search/de.py b/sphinx/search/de.py
index 90fc4d0fe..5413e0732 100644
--- a/sphinx/search/de.py
+++ b/sphinx/search/de.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.search.de
~~~~~~~~~~~~~~~~
@@ -18,7 +17,7 @@ if False:
from typing import Any # NOQA
-german_stopwords = parse_stop_word(u'''
+german_stopwords = parse_stop_word('''
|source: http://snowball.tartarus.org/algorithms/german/stop.txt
aber | but
@@ -299,7 +298,7 @@ zwar | indeed
zwischen | between
''')
-js_stemmer = u"""
+js_stemmer = """
var JSX={};(function(j){function l(b,e){var a=function(){};a.prototype=e.prototype;var c=new a;for(var d in b){b[d].prototype=c}}function H(c,b){for(var a in b.prototype)if(b.prototype.hasOwnProperty(a))c.prototype[a]=b.prototype[a]}function g(a,b,d){function c(a,b,c){delete a[b];a[b]=c;return c}Object.defineProperty(a,b,{get:function(){return c(a,b,d())},set:function(d){c(a,b,d)},enumerable:true,configurable:true})}function I(a,b,c){return a[b]=a[b]/c|0}var C=parseInt;var r=parseFloat;function J(a){return a!==a}var z=isFinite;var y=encodeURIComponent;var x=decodeURIComponent;var w=encodeURI;var u=decodeURI;var t=Object.prototype.toString;var B=Object.prototype.hasOwnProperty;function i(){}j.require=function(b){var a=q[b];return a!==undefined?a:null};j.profilerIsRunning=function(){return i.getResults!=null};j.getProfileResults=function(){return(i.getResults||function(){return{}})()};j.postProfileResults=function(a,b){if(i.postResults==null)throw new Error('profiler has not been turned on');return i.postResults(a,b)};j.resetProfileResults=function(){if(i.resetResults==null)throw new Error('profiler has not been turned on');return i.resetResults()};j.DEBUG=false;function s(){};l([s],Error);function c(a,b,c){this.F=a.length;this.K=a;this.L=b;this.I=c;this.H=null;this.P=null};l([c],Object);function o(){};l([o],Object);function e(){var a;var b;var c;this.G={};a=this.D='';b=this._=0;c=this.A=a.length;this.E=0;this.C=b;this.B=c};l([e],o);function v(a,b){a.D=b.D;a._=b._;a.A=b.A;a.E=b.E;a.C=b.C;a.B=b.B};function f(b,d,c,e){var a;if(b._>=b.A){return false}a=b.D.charCodeAt(b._);if(a>e||a<c){return false}a-=c;if((d[a>>>3]&1<<(a&7))===0){return false}b._++;return true};function m(b,d,c,e){var a;if(b._<=b.E){return false}a=b.D.charCodeAt(b._-1);if(a>e||a<c){return false}a-=c;if((d[a>>>3]&1<<(a&7))===0){return false}b._--;return true};function n(a,d,c,e){var b;if(a._>=a.A){return false}b=a.D.charCodeAt(a._);if(b>e||b<c){a._++;return true}b-=c;if((d[b>>>3]&1<<(b&7))===0){a._++;return true}return false};function k(a,b,d){var c;if(a.A-a._<b){return false}if(a.D.slice(c=a._,c+b)!==d){return false}a._+=b;return true};function d(a,b,d){var c;if(a._-a.E<b){return false}if(a.D.slice((c=a._)-b,c)!==d){return false}a._-=b;return true};function p(f,m,p){var b;var d;var e;var n;var g;var k;var l;var i;var h;var c;var a;var j;var o;b=0;d=p;e=f._;n=f.A;g=0;k=0;l=false;while(true){i=b+(d-b>>>1);h=0;c=g<k?g:k;a=m[i];for(j=c;j<a.F;j++){if(e+c===n){h=-1;break}h=f.D.charCodeAt(e+c)-a.K.charCodeAt(j);if(h!==0){break}c++}if(h<0){d=i;k=c}else{b=i;g=c}if(d-b<=1){if(b>0){break}if(d===b){break}if(l){break}l=true}}while(true){a=m[b];if(g>=a.F){f._=e+a.F|0;if(a.H==null){return a.I}o=a.H(a.P);f._=e+a.F|0;if(o){return a.I}}b=a.L;if(b<0){return 0}}return-1};function h(d,m,p){var b;var g;var e;var n;var f;var k;var l;var i;var h;var c;var a;var j;var o;b=0;g=p;e=d._;n=d.E;f=0;k=0;l=false;while(true){i=b+(g-b>>1);h=0;c=f<k?f:k;a=m[i];for(j=a.F-1-c;j>=0;j--){if(e-c===n){h=-1;break}h=d.D.charCodeAt(e-1-c)-a.K.charCodeAt(j);if(h!==0){break}c++}if(h<0){g=i;k=c}else{b=i;f=c}if(g-b<=1){if(b>0){break}if(g===b){break}if(l){break}l=true}}while(true){a=m[b];if(f>=a.F){d._=e-a.F|0;if(a.H==null){return a.I}o=a.H(d);d._=e-a.F|0;if(o){return a.I}}b=a.L;if(b<0){return 0}}return-1};function D(a,b,d,e){var c;c=e.length-(d-b);a.D=a.D.slice(0,b)+e+a.D.slice(d);a.A+=c|0;if(a._>=d){a._+=c|0}else if(a._>b){a._=b}return c|0};function b(a,f){var b;var c;var d;var e;b=false;if((c=a.C)<0||c>(d=a.B)||d>(e=a.A)||e>a.D.length?false:true){D(a,a.C,a.B,f);b=true}return b};e.prototype.J=function(){return false};e.prototype.W=function(b){var a;var c;var d;var e;a=this.G['.'+b];if(a==null){c=this.D=b;d=this._=0;e=this.A=c.length;this.E=0;this.C=d;this.B=e;this.J();a=this.D;this.G['.'+b]=a}return a};e.prototype.stemWord=e.prototype.W;e.prototype.X=function(e){var d;var b;var c;var a;var f;var g;var h;d=[];for(b=0;b<e.length;b++){c=e[b];a=this.G['.'+c];if(a==null){f=this.D=c;g=this._=0;h=this.A=f.length;this.E=0;this.C=g;this.B=h;this.J();a=this.D;this.G['.'+c]=a}d.push(a)}return d};e.prototype.stemWords=e.prototype.X;function a(){e.call(this);this.I_x=0;this.I_p2=0;this.I_p1=0};l([a],e);a.prototype.M=function(a){this.I_x=a.I_x;this.I_p2=a.I_p2;this.I_p1=a.I_p1;v(this,a)};a.prototype.copy_from=a.prototype.M;a.prototype.U=function(){var m;var r;var n;var o;var d;var q;var e;var c;var g;var h;var i;var j;var l;var s;var p;m=this._;a:while(true){r=this._;e=true;b:while(e===true){e=false;c=true;c:while(c===true){c=false;n=this._;g=true;d:while(g===true){g=false;this.C=this._;if(!k(this,1,'ß')){break d}this.B=this._;if(!b(this,'ss')){return false}break c}s=this._=n;if(s>=this.A){break b}this._++}continue a}this._=r;break a}this._=m;b:while(true){o=this._;h=true;d:while(h===true){h=false;e:while(true){d=this._;i=true;a:while(i===true){i=false;if(!f(this,a.g_v,97,252)){break a}this.C=this._;j=true;f:while(j===true){j=false;q=this._;l=true;c:while(l===true){l=false;if(!k(this,1,'u')){break c}this.B=this._;if(!f(this,a.g_v,97,252)){break c}if(!b(this,'U')){return false}break f}this._=q;if(!k(this,1,'y')){break a}this.B=this._;if(!f(this,a.g_v,97,252)){break a}if(!b(this,'Y')){return false}}this._=d;break e}p=this._=d;if(p>=this.A){break d}this._++}continue b}this._=o;break b}return true};a.prototype.r_prelude=a.prototype.U;function G(c){var s;var n;var o;var p;var e;var r;var d;var g;var h;var i;var j;var l;var m;var t;var q;s=c._;a:while(true){n=c._;d=true;b:while(d===true){d=false;g=true;c:while(g===true){g=false;o=c._;h=true;d:while(h===true){h=false;c.C=c._;if(!k(c,1,'ß')){break d}c.B=c._;if(!b(c,'ss')){return false}break c}t=c._=o;if(t>=c.A){break b}c._++}continue a}c._=n;break a}c._=s;b:while(true){p=c._;i=true;d:while(i===true){i=false;e:while(true){e=c._;j=true;a:while(j===true){j=false;if(!f(c,a.g_v,97,252)){break a}c.C=c._;l=true;f:while(l===true){l=false;r=c._;m=true;c:while(m===true){m=false;if(!k(c,1,'u')){break c}c.B=c._;if(!f(c,a.g_v,97,252)){break c}if(!b(c,'U')){return false}break f}c._=r;if(!k(c,1,'y')){break a}c.B=c._;if(!f(c,a.g_v,97,252)){break a}if(!b(c,'Y')){return false}}c._=e;break e}q=c._=e;if(q>=c.A){break d}c._++}continue b}c._=p;break b}return true};a.prototype.S=function(){var j;var b;var d;var e;var c;var g;var h;var i;var l;var k;this.I_p1=i=this.A;this.I_p2=i;j=l=this._;b=l+3|0;if(0>b||b>i){return false}k=this._=b;this.I_x=k;this._=j;a:while(true){d=true;b:while(d===true){d=false;if(!f(this,a.g_v,97,252)){break b}break a}if(this._>=this.A){return false}this._++}a:while(true){e=true;b:while(e===true){e=false;if(!n(this,a.g_v,97,252)){break b}break a}if(this._>=this.A){return false}this._++}this.I_p1=this._;c=true;a:while(c===true){c=false;if(!(this.I_p1<this.I_x)){break a}this.I_p1=this.I_x}a:while(true){g=true;b:while(g===true){g=false;if(!f(this,a.g_v,97,252)){break b}break a}if(this._>=this.A){return false}this._++}a:while(true){h=true;b:while(h===true){h=false;if(!n(this,a.g_v,97,252)){break b}break a}if(this._>=this.A){return false}this._++}this.I_p2=this._;return true};a.prototype.r_mark_regions=a.prototype.S;function F(b){var k;var c;var e;var g;var d;var h;var i;var j;var m;var l;b.I_p1=j=b.A;b.I_p2=j;k=m=b._;c=m+3|0;if(0>c||c>j){return false}l=b._=c;b.I_x=l;b._=k;a:while(true){e=true;b:while(e===true){e=false;if(!f(b,a.g_v,97,252)){break b}break a}if(b._>=b.A){return false}b._++}a:while(true){g=true;b:while(g===true){g=false;if(!n(b,a.g_v,97,252)){break b}break a}if(b._>=b.A){return false}b._++}b.I_p1=b._;d=true;a:while(d===true){d=false;if(!(b.I_p1<b.I_x)){break a}b.I_p1=b.I_x}a:while(true){h=true;b:while(h===true){h=false;if(!f(b,a.g_v,97,252)){break b}break a}if(b._>=b.A){return false}b._++}a:while(true){i=true;b:while(i===true){i=false;if(!n(b,a.g_v,97,252)){break b}break a}if(b._>=b.A){return false}b._++}b.I_p2=b._;return true};a.prototype.T=function(){var c;var e;var d;b:while(true){e=this._;d=true;a:while(d===true){d=false;this.C=this._;c=p(this,a.a_0,6);if(c===0){break a}this.B=this._;switch(c){case 0:break a;case 1:if(!b(this,'y')){return false}break;case 2:if(!b(this,'u')){return false}break;case 3:if(!b(this,'a')){return false}break;case 4:if(!b(this,'o')){return false}break;case 5:if(!b(this,'u')){return false}break;case 6:if(this._>=this.A){break a}this._++;break}continue b}this._=e;break b}return true};a.prototype.r_postlude=a.prototype.T;function E(c){var d;var f;var e;b:while(true){f=c._;e=true;a:while(e===true){e=false;c.C=c._;d=p(c,a.a_0,6);if(d===0){break a}c.B=c._;switch(d){case 0:break a;case 1:if(!b(c,'y')){return false}break;case 2:if(!b(c,'u')){return false}break;case 3:if(!b(c,'a')){return false}break;case 4:if(!b(c,'o')){return false}break;case 5:if(!b(c,'u')){return false}break;case 6:if(c._>=c.A){break a}c._++;break}continue b}c._=f;break b}return true};a.prototype.Q=function(){return!(this.I_p1<=this._)?false:true};a.prototype.r_R1=a.prototype.Q;a.prototype.R=function(){return!(this.I_p2<=this._)?false:true};a.prototype.r_R2=a.prototype.R;a.prototype.V=function(){var c;var z;var n;var x;var y;var f;var A;var B;var p;var w;var g;var j;var k;var l;var e;var o;var i;var q;var r;var s;var t;var u;var v;var D;var E;var F;var G;var H;var I;var J;var K;var L;var M;var C;z=this.A-this._;j=true;a:while(j===true){j=false;this.B=this._;c=h(this,a.a_1,7);if(c===0){break a}this.C=D=this._;if(!(!(this.I_p1<=D)?false:true)){break a}switch(c){case 0:break a;case 1:if(!b(this,'')){return false}break;case 2:if(!b(this,'')){return false}n=this.A-this._;k=true;b:while(k===true){k=false;this.B=this._;if(!d(this,1,'s')){this._=this.A-n;break b}this.C=this._;if(!d(this,3,'nis')){this._=this.A-n;break b}if(!b(this,'')){return false}}break;case 3:if(!m(this,a.g_s_ending,98,116)){break a}if(!b(this,'')){return false}break}}G=this._=(F=this.A)-z;x=F-G;l=true;a:while(l===true){l=false;this.B=this._;c=h(this,a.a_2,4);if(c===0){break a}this.C=E=this._;if(!(!(this.I_p1<=E)?false:true)){break a}switch(c){case 0:break a;case 1:if(!b(this,'')){return false}break;case 2:if(!m(this,a.g_st_ending,98,116)){break a}e=this._-3|0;if(this.E>e||e>this.A){break a}this._=e;if(!b(this,'')){return false}break}}C=this._=(M=this.A)-x;y=M-C;o=true;a:while(o===true){o=false;this.B=this._;c=h(this,a.a_4,8);if(c===0){break a}this.C=H=this._;if(!(!(this.I_p2<=H)?false:true)){break a}switch(c){case 0:break a;case 1:if(!b(this,'')){return false}f=this.A-this._;i=true;b:while(i===true){i=false;this.B=this._;if(!d(this,2,'ig')){this._=this.A-f;break b}this.C=I=this._;A=this.A-I;q=true;c:while(q===true){q=false;if(!d(this,1,'e')){break c}this._=this.A-f;break b}J=this._=this.A-A;if(!(!(this.I_p2<=J)?false:true)){this._=this.A-f;break b}if(!b(this,'')){return false}}break;case 2:B=this.A-this._;r=true;b:while(r===true){r=false;if(!d(this,1,'e')){break b}break a}this._=this.A-B;if(!b(this,'')){return false}break;case 3:if(!b(this,'')){return false}p=this.A-this._;s=true;b:while(s===true){s=false;this.B=this._;t=true;c:while(t===true){t=false;w=this.A-this._;u=true;d:while(u===true){u=false;if(!d(this,2,'er')){break d}break c}this._=this.A-w;if(!d(this,2,'en')){this._=this.A-p;break b}}this.C=K=this._;if(!(!(this.I_p1<=K)?false:true)){this._=this.A-p;break b}if(!b(this,'')){return false}}break;case 4:if(!b(this,'')){return false}g=this.A-this._;v=true;b:while(v===true){v=false;this.B=this._;c=h(this,a.a_3,2);if(c===0){this._=this.A-g;break b}this.C=L=this._;if(!(!(this.I_p2<=L)?false:true)){this._=this.A-g;break b}switch(c){case 0:this._=this.A-g;break b;case 1:if(!b(this,'')){return false}break}}break}}this._=this.A-y;return true};a.prototype.r_standard_suffix=a.prototype.V;function A(c){var e;var A;var j;var y;var z;var g;var B;var C;var q;var x;var i;var k;var l;var n;var f;var p;var o;var r;var s;var t;var u;var v;var w;var E;var F;var G;var H;var I;var J;var K;var L;var M;var N;var D;A=c.A-c._;k=true;a:while(k===true){k=false;c.B=c._;e=h(c,a.a_1,7);if(e===0){break a}c.C=E=c._;if(!(!(c.I_p1<=E)?false:true)){break a}switch(e){case 0:break a;case 1:if(!b(c,'')){return false}break;case 2:if(!b(c,'')){return false}j=c.A-c._;l=true;b:while(l===true){l=false;c.B=c._;if(!d(c,1,'s')){c._=c.A-j;break b}c.C=c._;if(!d(c,3,'nis')){c._=c.A-j;break b}if(!b(c,'')){return false}}break;case 3:if(!m(c,a.g_s_ending,98,116)){break a}if(!b(c,'')){return false}break}}H=c._=(G=c.A)-A;y=G-H;n=true;a:while(n===true){n=false;c.B=c._;e=h(c,a.a_2,4);if(e===0){break a}c.C=F=c._;if(!(!(c.I_p1<=F)?false:true)){break a}switch(e){case 0:break a;case 1:if(!b(c,'')){return false}break;case 2:if(!m(c,a.g_st_ending,98,116)){break a}f=c._-3|0;if(c.E>f||f>c.A){break a}c._=f;if(!b(c,'')){return false}break}}D=c._=(N=c.A)-y;z=N-D;p=true;a:while(p===true){p=false;c.B=c._;e=h(c,a.a_4,8);if(e===0){break a}c.C=I=c._;if(!(!(c.I_p2<=I)?false:true)){break a}switch(e){case 0:break a;case 1:if(!b(c,'')){return false}g=c.A-c._;o=true;b:while(o===true){o=false;c.B=c._;if(!d(c,2,'ig')){c._=c.A-g;break b}c.C=J=c._;B=c.A-J;r=true;c:while(r===true){r=false;if(!d(c,1,'e')){break c}c._=c.A-g;break b}K=c._=c.A-B;if(!(!(c.I_p2<=K)?false:true)){c._=c.A-g;break b}if(!b(c,'')){return false}}break;case 2:C=c.A-c._;s=true;b:while(s===true){s=false;if(!d(c,1,'e')){break b}break a}c._=c.A-C;if(!b(c,'')){return false}break;case 3:if(!b(c,'')){return false}q=c.A-c._;t=true;b:while(t===true){t=false;c.B=c._;u=true;c:while(u===true){u=false;x=c.A-c._;v=true;d:while(v===true){v=false;if(!d(c,2,'er')){break d}break c}c._=c.A-x;if(!d(c,2,'en')){c._=c.A-q;break b}}c.C=L=c._;if(!(!(c.I_p1<=L)?false:true)){c._=c.A-q;break b}if(!b(c,'')){return false}}break;case 4:if(!b(c,'')){return false}i=c.A-c._;w=true;b:while(w===true){w=false;c.B=c._;e=h(c,a.a_3,2);if(e===0){c._=c.A-i;break b}c.C=M=c._;if(!(!(c.I_p2<=M)?false:true)){c._=c.A-i;break b}switch(e){case 0:c._=c.A-i;break b;case 1:if(!b(c,'')){return false}break}}break}}c._=c.A-z;return true};a.prototype.J=function(){var f;var g;var h;var b;var a;var c;var d;var i;var j;var e;f=this._;b=true;a:while(b===true){b=false;if(!G(this)){break a}}i=this._=f;g=i;a=true;a:while(a===true){a=false;if(!F(this)){break a}}j=this._=g;this.E=j;this._=this.A;c=true;a:while(c===true){c=false;if(!A(this)){break a}}e=this._=this.E;h=e;d=true;a:while(d===true){d=false;if(!E(this)){break a}}this._=h;return true};a.prototype.stem=a.prototype.J;a.prototype.N=function(b){return b instanceof a};a.prototype.equals=a.prototype.N;a.prototype.O=function(){var c;var a;var b;var d;c='GermanStemmer';a=0;for(b=0;b<c.length;b++){d=c.charCodeAt(b);a=(a<<5)-a+d;a=a&a}return a|0};a.prototype.hashCode=a.prototype.O;a.serialVersionUID=1;g(a,'methodObject',function(){return new a});g(a,'a_0',function(){return[new c('',-1,6),new c('U',0,2),new c('Y',0,1),new c('ä',0,3),new c('ö',0,4),new c('ü',0,5)]});g(a,'a_1',function(){return[new c('e',-1,2),new c('em',-1,1),new c('en',-1,2),new c('ern',-1,1),new c('er',-1,1),new c('s',-1,3),new c('es',5,2)]});g(a,'a_2',function(){return[new c('en',-1,1),new c('er',-1,1),new c('st',-1,2),new c('est',2,1)]});g(a,'a_3',function(){return[new c('ig',-1,1),new c('lich',-1,1)]});g(a,'a_4',function(){return[new c('end',-1,1),new c('ig',-1,2),new c('ung',-1,1),new c('lich',-1,3),new c('isch',-1,2),new c('ik',-1,2),new c('heit',-1,3),new c('keit',-1,4)]});g(a,'g_v',function(){return[17,65,16,1,0,0,0,0,0,0,0,0,0,0,0,0,8,0,32,8]});g(a,'g_s_ending',function(){return[117,30,5]});g(a,'g_st_ending',function(){return[117,30,4]});var q={'src/stemmer.jsx':{Stemmer:o},'src/german-stemmer.jsx':{GermanStemmer:a}}}(JSX))
var Stemmer = JSX.require("src/german-stemmer.jsx").GermanStemmer;
"""
@@ -317,5 +316,5 @@ class SearchGerman(SearchLanguage):
self.stemmer = snowballstemmer.stemmer('german')
def stem(self, word):
- # type: (unicode) -> unicode
+ # type: (str) -> str
return self.stemmer.stemWord(word.lower())
diff --git a/sphinx/search/en.py b/sphinx/search/en.py
index fe9b7d8da..ebf13b958 100644
--- a/sphinx/search/en.py
+++ b/sphinx/search/en.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.search.en
~~~~~~~~~~~~~~~~
@@ -16,7 +15,7 @@ if False:
# For type annotation
from typing import Dict # NOQA
-english_stopwords = set(u"""
+english_stopwords = set("""
a and are as at
be but by
for
@@ -226,5 +225,5 @@ class SearchEnglish(SearchLanguage):
self.stemmer = get_stemmer()
def stem(self, word):
- # type: (unicode) -> unicode
+ # type: (str) -> str
return self.stemmer.stem(word.lower())
diff --git a/sphinx/search/es.py b/sphinx/search/es.py
index 6fda9caba..c6f0dae9c 100644
--- a/sphinx/search/es.py
+++ b/sphinx/search/es.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.search.es
~~~~~~~~~~~~~~~~
@@ -18,7 +17,7 @@ if False:
from typing import Any # NOQA
-spanish_stopwords = parse_stop_word(u'''
+spanish_stopwords = parse_stop_word('''
|source: http://snowball.tartarus.org/algorithms/spanish/stop.txt
de | from, of
la | the, her
@@ -359,7 +358,7 @@ tenidas
tened
''')
-js_stemmer = u"""
+js_stemmer = """
var JSX={};(function(k){function l(b,e){var a=function(){};a.prototype=e.prototype;var c=new a;for(var d in b){b[d].prototype=c}}function I(c,b){for(var a in b.prototype)if(b.prototype.hasOwnProperty(a))c.prototype[a]=b.prototype[a]}function g(a,b,d){function c(a,b,c){delete a[b];a[b]=c;return c}Object.defineProperty(a,b,{get:function(){return c(a,b,d())},set:function(d){c(a,b,d)},enumerable:true,configurable:true})}function J(a,b,c){return a[b]=a[b]/c|0}var p=parseInt;var z=parseFloat;function K(a){return a!==a}var x=isFinite;var w=encodeURIComponent;var u=decodeURIComponent;var t=encodeURI;var s=decodeURI;var A=Object.prototype.toString;var q=Object.prototype.hasOwnProperty;function j(){}k.require=function(b){var a=o[b];return a!==undefined?a:null};k.profilerIsRunning=function(){return j.getResults!=null};k.getProfileResults=function(){return(j.getResults||function(){return{}})()};k.postProfileResults=function(a,b){if(j.postResults==null)throw new Error('profiler has not been turned on');return j.postResults(a,b)};k.resetProfileResults=function(){if(j.resetResults==null)throw new Error('profiler has not been turned on');return j.resetResults()};k.DEBUG=false;function r(){};l([r],Error);function a(a,b,c){this.F=a.length;this.K=a;this.L=b;this.I=c;this.H=null;this.P=null};l([a],Object);function m(){};l([m],Object);function i(){var a;var b;var c;this.G={};a=this.E='';b=this._=0;c=this.A=a.length;this.D=0;this.B=b;this.C=c};l([i],m);function v(a,b){a.E=b.E;a._=b._;a.A=b.A;a.D=b.D;a.B=b.B;a.C=b.C};function f(b,d,c,e){var a;if(b._>=b.A){return false}a=b.E.charCodeAt(b._);if(a>e||a<c){return false}a-=c;if((d[a>>>3]&1<<(a&7))===0){return false}b._++;return true};function h(a,d,c,e){var b;if(a._>=a.A){return false}b=a.E.charCodeAt(a._);if(b>e||b<c){a._++;return true}b-=c;if((d[b>>>3]&1<<(b&7))===0){a._++;return true}return false};function d(a,b,d){var c;if(a._-a.D<b){return false}if(a.E.slice((c=a._)-b,c)!==d){return false}a._-=b;return true};function n(f,m,p){var b;var d;var e;var n;var g;var k;var l;var i;var h;var c;var a;var j;var o;b=0;d=p;e=f._;n=f.A;g=0;k=0;l=false;while(true){i=b+(d-b>>>1);h=0;c=g<k?g:k;a=m[i];for(j=c;j<a.F;j++){if(e+c===n){h=-1;break}h=f.E.charCodeAt(e+c)-a.K.charCodeAt(j);if(h!==0){break}c++}if(h<0){d=i;k=c}else{b=i;g=c}if(d-b<=1){if(b>0){break}if(d===b){break}if(l){break}l=true}}while(true){a=m[b];if(g>=a.F){f._=e+a.F|0;if(a.H==null){return a.I}o=a.H(a.P);f._=e+a.F|0;if(o){return a.I}}b=a.L;if(b<0){return 0}}return-1};function e(d,m,p){var b;var g;var e;var n;var f;var k;var l;var i;var h;var c;var a;var j;var o;b=0;g=p;e=d._;n=d.D;f=0;k=0;l=false;while(true){i=b+(g-b>>1);h=0;c=f<k?f:k;a=m[i];for(j=a.F-1-c;j>=0;j--){if(e-c===n){h=-1;break}h=d.E.charCodeAt(e-1-c)-a.K.charCodeAt(j);if(h!==0){break}c++}if(h<0){g=i;k=c}else{b=i;f=c}if(g-b<=1){if(b>0){break}if(g===b){break}if(l){break}l=true}}while(true){a=m[b];if(f>=a.F){d._=e-a.F|0;if(a.H==null){return a.I}o=a.H(d);d._=e-a.F|0;if(o){return a.I}}b=a.L;if(b<0){return 0}}return-1};function B(a,b,d,e){var c;c=e.length-(d-b);a.E=a.E.slice(0,b)+e+a.E.slice(d);a.A+=c|0;if(a._>=d){a._+=c|0}else if(a._>b){a._=b}return c|0};function c(a,f){var b;var c;var d;var e;b=false;if((c=a.B)<0||c>(d=a.C)||d>(e=a.A)||e>a.E.length?false:true){B(a,a.B,a.C,f);b=true}return b};i.prototype.J=function(){return false};i.prototype.a=function(b){var a;var c;var d;var e;a=this.G['.'+b];if(a==null){c=this.E=b;d=this._=0;e=this.A=c.length;this.D=0;this.B=d;this.C=e;this.J();a=this.E;this.G['.'+b]=a}return a};i.prototype.stemWord=i.prototype.a;i.prototype.b=function(e){var d;var b;var c;var a;var f;var g;var h;d=[];for(b=0;b<e.length;b++){c=e[b];a=this.G['.'+c];if(a==null){f=this.E=c;g=this._=0;h=this.A=f.length;this.D=0;this.B=g;this.C=h;this.J();a=this.E;this.G['.'+c]=a}d.push(a)}return d};i.prototype.stemWords=i.prototype.b;function b(){i.call(this);this.I_p2=0;this.I_p1=0;this.I_pV=0};l([b],i);b.prototype.M=function(a){this.I_p2=a.I_p2;this.I_p1=a.I_p1;this.I_pV=a.I_pV;v(this,a)};b.prototype.copy_from=b.prototype.M;b.prototype.U=function(){var u;var w;var x;var y;var t;var l;var d;var e;var g;var i;var c;var j;var k;var a;var m;var n;var o;var p;var q;var r;var s;var v;this.I_pV=s=this.A;this.I_p1=s;this.I_p2=s;u=this._;l=true;a:while(l===true){l=false;d=true;g:while(d===true){d=false;w=this._;e=true;b:while(e===true){e=false;if(!f(this,b.g_v,97,252)){break b}g=true;f:while(g===true){g=false;x=this._;i=true;c:while(i===true){i=false;if(!h(this,b.g_v,97,252)){break c}d:while(true){c=true;e:while(c===true){c=false;if(!f(this,b.g_v,97,252)){break e}break d}if(this._>=this.A){break c}this._++}break f}this._=x;if(!f(this,b.g_v,97,252)){break b}c:while(true){j=true;d:while(j===true){j=false;if(!h(this,b.g_v,97,252)){break d}break c}if(this._>=this.A){break b}this._++}}break g}this._=w;if(!h(this,b.g_v,97,252)){break a}k=true;c:while(k===true){k=false;y=this._;a=true;b:while(a===true){a=false;if(!h(this,b.g_v,97,252)){break b}e:while(true){m=true;d:while(m===true){m=false;if(!f(this,b.g_v,97,252)){break d}break e}if(this._>=this.A){break b}this._++}break c}this._=y;if(!f(this,b.g_v,97,252)){break a}if(this._>=this.A){break a}this._++}}this.I_pV=this._}v=this._=u;t=v;n=true;a:while(n===true){n=false;b:while(true){o=true;c:while(o===true){o=false;if(!f(this,b.g_v,97,252)){break c}break b}if(this._>=this.A){break a}this._++}b:while(true){p=true;c:while(p===true){p=false;if(!h(this,b.g_v,97,252)){break c}break b}if(this._>=this.A){break a}this._++}this.I_p1=this._;b:while(true){q=true;c:while(q===true){q=false;if(!f(this,b.g_v,97,252)){break c}break b}if(this._>=this.A){break a}this._++}c:while(true){r=true;b:while(r===true){r=false;if(!h(this,b.g_v,97,252)){break b}break c}if(this._>=this.A){break a}this._++}this.I_p2=this._}this._=t;return true};b.prototype.r_mark_regions=b.prototype.U;function E(a){var x;var y;var z;var u;var v;var l;var d;var e;var g;var i;var j;var k;var c;var m;var n;var o;var p;var q;var r;var s;var t;var w;a.I_pV=t=a.A;a.I_p1=t;a.I_p2=t;x=a._;l=true;a:while(l===true){l=false;d=true;g:while(d===true){d=false;y=a._;e=true;b:while(e===true){e=false;if(!f(a,b.g_v,97,252)){break b}g=true;f:while(g===true){g=false;z=a._;i=true;c:while(i===true){i=false;if(!h(a,b.g_v,97,252)){break c}d:while(true){j=true;e:while(j===true){j=false;if(!f(a,b.g_v,97,252)){break e}break d}if(a._>=a.A){break c}a._++}break f}a._=z;if(!f(a,b.g_v,97,252)){break b}c:while(true){k=true;d:while(k===true){k=false;if(!h(a,b.g_v,97,252)){break d}break c}if(a._>=a.A){break b}a._++}}break g}a._=y;if(!h(a,b.g_v,97,252)){break a}c=true;c:while(c===true){c=false;u=a._;m=true;b:while(m===true){m=false;if(!h(a,b.g_v,97,252)){break b}e:while(true){n=true;d:while(n===true){n=false;if(!f(a,b.g_v,97,252)){break d}break e}if(a._>=a.A){break b}a._++}break c}a._=u;if(!f(a,b.g_v,97,252)){break a}if(a._>=a.A){break a}a._++}}a.I_pV=a._}w=a._=x;v=w;o=true;a:while(o===true){o=false;b:while(true){p=true;c:while(p===true){p=false;if(!f(a,b.g_v,97,252)){break c}break b}if(a._>=a.A){break a}a._++}b:while(true){q=true;c:while(q===true){q=false;if(!h(a,b.g_v,97,252)){break c}break b}if(a._>=a.A){break a}a._++}a.I_p1=a._;b:while(true){r=true;c:while(r===true){r=false;if(!f(a,b.g_v,97,252)){break c}break b}if(a._>=a.A){break a}a._++}c:while(true){s=true;b:while(s===true){s=false;if(!h(a,b.g_v,97,252)){break b}break c}if(a._>=a.A){break a}a._++}a.I_p2=a._}a._=v;return true};b.prototype.V=function(){var a;var e;var d;b:while(true){e=this._;d=true;a:while(d===true){d=false;this.B=this._;a=n(this,b.a_0,6);if(a===0){break a}this.C=this._;switch(a){case 0:break a;case 1:if(!c(this,'a')){return false}break;case 2:if(!c(this,'e')){return false}break;case 3:if(!c(this,'i')){return false}break;case 4:if(!c(this,'o')){return false}break;case 5:if(!c(this,'u')){return false}break;case 6:if(this._>=this.A){break a}this._++;break}continue b}this._=e;break b}return true};b.prototype.r_postlude=b.prototype.V;function F(a){var d;var f;var e;b:while(true){f=a._;e=true;a:while(e===true){e=false;a.B=a._;d=n(a,b.a_0,6);if(d===0){break a}a.C=a._;switch(d){case 0:break a;case 1:if(!c(a,'a')){return false}break;case 2:if(!c(a,'e')){return false}break;case 3:if(!c(a,'i')){return false}break;case 4:if(!c(a,'o')){return false}break;case 5:if(!c(a,'u')){return false}break;case 6:if(a._>=a.A){break a}a._++;break}continue b}a._=f;break b}return true};b.prototype.S=function(){return!(this.I_pV<=this._)?false:true};b.prototype.r_RV=b.prototype.S;b.prototype.Q=function(){return!(this.I_p1<=this._)?false:true};b.prototype.r_R1=b.prototype.Q;b.prototype.R=function(){return!(this.I_p2<=this._)?false:true};b.prototype.r_R2=b.prototype.R;b.prototype.T=function(){var a;this.C=this._;if(e(this,b.a_1,13)===0){return false}this.B=this._;a=e(this,b.a_2,11);if(a===0){return false}if(!(!(this.I_pV<=this._)?false:true)){return false}switch(a){case 0:return false;case 1:this.B=this._;if(!c(this,'iendo')){return false}break;case 2:this.B=this._;if(!c(this,'ando')){return false}break;case 3:this.B=this._;if(!c(this,'ar')){return false}break;case 4:this.B=this._;if(!c(this,'er')){return false}break;case 5:this.B=this._;if(!c(this,'ir')){return false}break;case 6:if(!c(this,'')){return false}break;case 7:if(!d(this,1,'u')){return false}if(!c(this,'')){return false}break}return true};b.prototype.r_attached_pronoun=b.prototype.T;function G(a){var f;a.C=a._;if(e(a,b.a_1,13)===0){return false}a.B=a._;f=e(a,b.a_2,11);if(f===0){return false}if(!(!(a.I_pV<=a._)?false:true)){return false}switch(f){case 0:return false;case 1:a.B=a._;if(!c(a,'iendo')){return false}break;case 2:a.B=a._;if(!c(a,'ando')){return false}break;case 3:a.B=a._;if(!c(a,'ar')){return false}break;case 4:a.B=a._;if(!c(a,'er')){return false}break;case 5:a.B=a._;if(!c(a,'ir')){return false}break;case 6:if(!c(a,'')){return false}break;case 7:if(!d(a,1,'u')){return false}if(!c(a,'')){return false}break}return true};b.prototype.X=function(){var a;var j;var f;var g;var h;var i;var k;var l;var m;var n;var o;var q;var r;var s;var p;this.C=this._;a=e(this,b.a_6,46);if(a===0){return false}this.B=this._;switch(a){case 0:return false;case 1:if(!(!(this.I_p2<=this._)?false:true)){return false}if(!c(this,'')){return false}break;case 2:if(!(!(this.I_p2<=this._)?false:true)){return false}if(!c(this,'')){return false}j=this.A-this._;k=true;a:while(k===true){k=false;this.C=this._;if(!d(this,2,'ic')){this._=this.A-j;break a}this.B=q=this._;if(!(!(this.I_p2<=q)?false:true)){this._=this.A-j;break a}if(!c(this,'')){return false}}break;case 3:if(!(!(this.I_p2<=this._)?false:true)){return false}if(!c(this,'log')){return false}break;case 4:if(!(!(this.I_p2<=this._)?false:true)){return false}if(!c(this,'u')){return false}break;case 5:if(!(!(this.I_p2<=this._)?false:true)){return false}if(!c(this,'ente')){return false}break;case 6:if(!(!(this.I_p1<=this._)?false:true)){return false}if(!c(this,'')){return false}f=this.A-this._;l=true;a:while(l===true){l=false;this.C=this._;a=e(this,b.a_3,4);if(a===0){this._=this.A-f;break a}this.B=r=this._;if(!(!(this.I_p2<=r)?false:true)){this._=this.A-f;break a}if(!c(this,'')){return false}switch(a){case 0:this._=this.A-f;break a;case 1:this.C=this._;if(!d(this,2,'at')){this._=this.A-f;break a}this.B=s=this._;if(!(!(this.I_p2<=s)?false:true)){this._=this.A-f;break a}if(!c(this,'')){return false}break}}break;case 7:if(!(!(this.I_p2<=this._)?false:true)){return false}if(!c(this,'')){return false}g=this.A-this._;m=true;a:while(m===true){m=false;this.C=this._;a=e(this,b.a_4,3);if(a===0){this._=this.A-g;break a}this.B=this._;switch(a){case 0:this._=this.A-g;break a;case 1:if(!(!(this.I_p2<=this._)?false:true)){this._=this.A-g;break a}if(!c(this,'')){return false}break}}break;case 8:if(!(!(this.I_p2<=this._)?false:true)){return false}if(!c(this,'')){return false}h=this.A-this._;n=true;a:while(n===true){n=false;this.C=this._;a=e(this,b.a_5,3);if(a===0){this._=this.A-h;break a}this.B=this._;switch(a){case 0:this._=this.A-h;break a;case 1:if(!(!(this.I_p2<=this._)?false:true)){this._=this.A-h;break a}if(!c(this,'')){return false}break}}break;case 9:if(!(!(this.I_p2<=this._)?false:true)){return false}if(!c(this,'')){return false}i=this.A-this._;o=true;a:while(o===true){o=false;this.C=this._;if(!d(this,2,'at')){this._=this.A-i;break a}this.B=p=this._;if(!(!(this.I_p2<=p)?false:true)){this._=this.A-i;break a}if(!c(this,'')){return false}}break}return true};b.prototype.r_standard_suffix=b.prototype.X;function H(a){var f;var k;var g;var h;var i;var j;var l;var m;var n;var o;var p;var r;var s;var t;var q;a.C=a._;f=e(a,b.a_6,46);if(f===0){return false}a.B=a._;switch(f){case 0:return false;case 1:if(!(!(a.I_p2<=a._)?false:true)){return false}if(!c(a,'')){return false}break;case 2:if(!(!(a.I_p2<=a._)?false:true)){return false}if(!c(a,'')){return false}k=a.A-a._;l=true;a:while(l===true){l=false;a.C=a._;if(!d(a,2,'ic')){a._=a.A-k;break a}a.B=r=a._;if(!(!(a.I_p2<=r)?false:true)){a._=a.A-k;break a}if(!c(a,'')){return false}}break;case 3:if(!(!(a.I_p2<=a._)?false:true)){return false}if(!c(a,'log')){return false}break;case 4:if(!(!(a.I_p2<=a._)?false:true)){return false}if(!c(a,'u')){return false}break;case 5:if(!(!(a.I_p2<=a._)?false:true)){return false}if(!c(a,'ente')){return false}break;case 6:if(!(!(a.I_p1<=a._)?false:true)){return false}if(!c(a,'')){return false}g=a.A-a._;m=true;a:while(m===true){m=false;a.C=a._;f=e(a,b.a_3,4);if(f===0){a._=a.A-g;break a}a.B=s=a._;if(!(!(a.I_p2<=s)?false:true)){a._=a.A-g;break a}if(!c(a,'')){return false}switch(f){case 0:a._=a.A-g;break a;case 1:a.C=a._;if(!d(a,2,'at')){a._=a.A-g;break a}a.B=t=a._;if(!(!(a.I_p2<=t)?false:true)){a._=a.A-g;break a}if(!c(a,'')){return false}break}}break;case 7:if(!(!(a.I_p2<=a._)?false:true)){return false}if(!c(a,'')){return false}h=a.A-a._;n=true;a:while(n===true){n=false;a.C=a._;f=e(a,b.a_4,3);if(f===0){a._=a.A-h;break a}a.B=a._;switch(f){case 0:a._=a.A-h;break a;case 1:if(!(!(a.I_p2<=a._)?false:true)){a._=a.A-h;break a}if(!c(a,'')){return false}break}}break;case 8:if(!(!(a.I_p2<=a._)?false:true)){return false}if(!c(a,'')){return false}i=a.A-a._;o=true;a:while(o===true){o=false;a.C=a._;f=e(a,b.a_5,3);if(f===0){a._=a.A-i;break a}a.B=a._;switch(f){case 0:a._=a.A-i;break a;case 1:if(!(!(a.I_p2<=a._)?false:true)){a._=a.A-i;break a}if(!c(a,'')){return false}break}}break;case 9:if(!(!(a.I_p2<=a._)?false:true)){return false}if(!c(a,'')){return false}j=a.A-a._;p=true;a:while(p===true){p=false;a.C=a._;if(!d(a,2,'at')){a._=a.A-j;break a}a.B=q=a._;if(!(!(a.I_p2<=q)?false:true)){a._=a.A-j;break a}if(!c(a,'')){return false}}break}return true};b.prototype.Z=function(){var a;var g;var f;var h;var i;var j;g=this.A-(h=this._);if(h<this.I_pV){return false}i=this._=this.I_pV;f=this.D;this.D=i;j=this._=this.A-g;this.C=j;a=e(this,b.a_7,12);if(a===0){this.D=f;return false}this.B=this._;this.D=f;switch(a){case 0:return false;case 1:if(!d(this,1,'u')){return false}if(!c(this,'')){return false}break}return true};b.prototype.r_y_verb_suffix=b.prototype.Z;function D(a){var f;var h;var g;var i;var j;var k;h=a.A-(i=a._);if(i<a.I_pV){return false}j=a._=a.I_pV;g=a.D;a.D=j;k=a._=a.A-h;a.C=k;f=e(a,b.a_7,12);if(f===0){a.D=g;return false}a.B=a._;a.D=g;switch(f){case 0:return false;case 1:if(!d(a,1,'u')){return false}if(!c(a,'')){return false}break}return true};b.prototype.Y=function(){var a;var i;var f;var g;var j;var h;var k;var l;var m;i=this.A-(k=this._);if(k<this.I_pV){return false}l=this._=this.I_pV;f=this.D;this.D=l;m=this._=this.A-i;this.C=m;a=e(this,b.a_8,96);if(a===0){this.D=f;return false}this.B=this._;this.D=f;switch(a){case 0:return false;case 1:g=this.A-this._;h=true;a:while(h===true){h=false;if(!d(this,1,'u')){this._=this.A-g;break a}j=this.A-this._;if(!d(this,1,'g')){this._=this.A-g;break a}this._=this.A-j}this.B=this._;if(!c(this,'')){return false}break;case 2:if(!c(this,'')){return false}break}return true};b.prototype.r_verb_suffix=b.prototype.Y;function C(a){var g;var j;var h;var f;var k;var i;var m;var n;var l;j=a.A-(m=a._);if(m<a.I_pV){return false}n=a._=a.I_pV;h=a.D;a.D=n;l=a._=a.A-j;a.C=l;g=e(a,b.a_8,96);if(g===0){a.D=h;return false}a.B=a._;a.D=h;switch(g){case 0:return false;case 1:f=a.A-a._;i=true;a:while(i===true){i=false;if(!d(a,1,'u')){a._=a.A-f;break a}k=a.A-a._;if(!d(a,1,'g')){a._=a.A-f;break a}a._=a.A-k}a.B=a._;if(!c(a,'')){return false}break;case 2:if(!c(a,'')){return false}break}return true};b.prototype.W=function(){var f;var a;var h;var g;var i;var j;this.C=this._;f=e(this,b.a_9,8);if(f===0){return false}this.B=this._;switch(f){case 0:return false;case 1:if(!(!(this.I_pV<=this._)?false:true)){return false}if(!c(this,'')){return false}break;case 2:if(!(!(this.I_pV<=this._)?false:true)){return false}if(!c(this,'')){return false}a=this.A-this._;g=true;a:while(g===true){g=false;this.C=this._;if(!d(this,1,'u')){this._=this.A-a;break a}this.B=i=this._;h=this.A-i;if(!d(this,1,'g')){this._=this.A-a;break a}j=this._=this.A-h;if(!(!(this.I_pV<=j)?false:true)){this._=this.A-a;break a}if(!c(this,'')){return false}}break}return true};b.prototype.r_residual_suffix=b.prototype.W;function y(a){var g;var f;var i;var h;var j;var k;a.C=a._;g=e(a,b.a_9,8);if(g===0){return false}a.B=a._;switch(g){case 0:return false;case 1:if(!(!(a.I_pV<=a._)?false:true)){return false}if(!c(a,'')){return false}break;case 2:if(!(!(a.I_pV<=a._)?false:true)){return false}if(!c(a,'')){return false}f=a.A-a._;h=true;a:while(h===true){h=false;a.C=a._;if(!d(a,1,'u')){a._=a.A-f;break a}a.B=j=a._;i=a.A-j;if(!d(a,1,'g')){a._=a.A-f;break a}k=a._=a.A-i;if(!(!(a.I_pV<=k)?false:true)){a._=a.A-f;break a}if(!c(a,'')){return false}}break}return true};b.prototype.J=function(){var k;var l;var m;var b;var j;var c;var d;var e;var f;var a;var g;var h;var i;var o;var p;var q;var r;var s;var n;k=this._;c=true;a:while(c===true){c=false;if(!E(this)){break a}}o=this._=k;this.D=o;q=this._=p=this.A;l=p-q;d=true;a:while(d===true){d=false;if(!G(this)){break a}}s=this._=(r=this.A)-l;m=r-s;e=true;b:while(e===true){e=false;f=true;a:while(f===true){f=false;b=this.A-this._;a=true;c:while(a===true){a=false;if(!H(this)){break c}break a}this._=this.A-b;g=true;c:while(g===true){g=false;if(!D(this)){break c}break a}this._=this.A-b;if(!C(this)){break b}}}this._=this.A-m;h=true;a:while(h===true){h=false;if(!y(this)){break a}}n=this._=this.D;j=n;i=true;a:while(i===true){i=false;if(!F(this)){break a}}this._=j;return true};b.prototype.stem=b.prototype.J;b.prototype.N=function(a){return a instanceof b};b.prototype.equals=b.prototype.N;b.prototype.O=function(){var c;var a;var b;var d;c='SpanishStemmer';a=0;for(b=0;b<c.length;b++){d=c.charCodeAt(b);a=(a<<5)-a+d;a=a&a}return a|0};b.prototype.hashCode=b.prototype.O;b.serialVersionUID=1;g(b,'methodObject',function(){return new b});g(b,'a_0',function(){return[new a('',-1,6),new a('á',0,1),new a('é',0,2),new a('í',0,3),new a('ó',0,4),new a('ú',0,5)]});g(b,'a_1',function(){return[new a('la',-1,-1),new a('sela',0,-1),new a('le',-1,-1),new a('me',-1,-1),new a('se',-1,-1),new a('lo',-1,-1),new a('selo',5,-1),new a('las',-1,-1),new a('selas',7,-1),new a('les',-1,-1),new a('los',-1,-1),new a('selos',10,-1),new a('nos',-1,-1)]});g(b,'a_2',function(){return[new a('ando',-1,6),new a('iendo',-1,6),new a('yendo',-1,7),new a('ándo',-1,2),new a('iéndo',-1,1),new a('ar',-1,6),new a('er',-1,6),new a('ir',-1,6),new a('ár',-1,3),new a('ér',-1,4),new a('ír',-1,5)]});g(b,'a_3',function(){return[new a('ic',-1,-1),new a('ad',-1,-1),new a('os',-1,-1),new a('iv',-1,1)]});g(b,'a_4',function(){return[new a('able',-1,1),new a('ible',-1,1),new a('ante',-1,1)]});g(b,'a_5',function(){return[new a('ic',-1,1),new a('abil',-1,1),new a('iv',-1,1)]});g(b,'a_6',function(){return[new a('ica',-1,1),new a('ancia',-1,2),new a('encia',-1,5),new a('adora',-1,2),new a('osa',-1,1),new a('ista',-1,1),new a('iva',-1,9),new a('anza',-1,1),new a('logía',-1,3),new a('idad',-1,8),new a('able',-1,1),new a('ible',-1,1),new a('ante',-1,2),new a('mente',-1,7),new a('amente',13,6),new a('ación',-1,2),new a('ución',-1,4),new a('ico',-1,1),new a('ismo',-1,1),new a('oso',-1,1),new a('amiento',-1,1),new a('imiento',-1,1),new a('ivo',-1,9),new a('ador',-1,2),new a('icas',-1,1),new a('ancias',-1,2),new a('encias',-1,5),new a('adoras',-1,2),new a('osas',-1,1),new a('istas',-1,1),new a('ivas',-1,9),new a('anzas',-1,1),new a('logías',-1,3),new a('idades',-1,8),new a('ables',-1,1),new a('ibles',-1,1),new a('aciones',-1,2),new a('uciones',-1,4),new a('adores',-1,2),new a('antes',-1,2),new a('icos',-1,1),new a('ismos',-1,1),new a('osos',-1,1),new a('amientos',-1,1),new a('imientos',-1,1),new a('ivos',-1,9)]});g(b,'a_7',function(){return[new a('ya',-1,1),new a('ye',-1,1),new a('yan',-1,1),new a('yen',-1,1),new a('yeron',-1,1),new a('yendo',-1,1),new a('yo',-1,1),new a('yas',-1,1),new a('yes',-1,1),new a('yais',-1,1),new a('yamos',-1,1),new a('yó',-1,1)]});g(b,'a_8',function(){return[new a('aba',-1,2),new a('ada',-1,2),new a('ida',-1,2),new a('ara',-1,2),new a('iera',-1,2),new a('ía',-1,2),new a('aría',5,2),new a('ería',5,2),new a('iría',5,2),new a('ad',-1,2),new a('ed',-1,2),new a('id',-1,2),new a('ase',-1,2),new a('iese',-1,2),new a('aste',-1,2),new a('iste',-1,2),new a('an',-1,2),new a('aban',16,2),new a('aran',16,2),new a('ieran',16,2),new a('ían',16,2),new a('arían',20,2),new a('erían',20,2),new a('irían',20,2),new a('en',-1,1),new a('asen',24,2),new a('iesen',24,2),new a('aron',-1,2),new a('ieron',-1,2),new a('arán',-1,2),new a('erán',-1,2),new a('irán',-1,2),new a('ado',-1,2),new a('ido',-1,2),new a('ando',-1,2),new a('iendo',-1,2),new a('ar',-1,2),new a('er',-1,2),new a('ir',-1,2),new a('as',-1,2),new a('abas',39,2),new a('adas',39,2),new a('idas',39,2),new a('aras',39,2),new a('ieras',39,2),new a('ías',39,2),new a('arías',45,2),new a('erías',45,2),new a('irías',45,2),new a('es',-1,1),new a('ases',49,2),new a('ieses',49,2),new a('abais',-1,2),new a('arais',-1,2),new a('ierais',-1,2),new a('íais',-1,2),new a('aríais',55,2),new a('eríais',55,2),new a('iríais',55,2),new a('aseis',-1,2),new a('ieseis',-1,2),new a('asteis',-1,2),new a('isteis',-1,2),new a('áis',-1,2),new a('éis',-1,1),new a('aréis',64,2),new a('eréis',64,2),new a('iréis',64,2),new a('ados',-1,2),new a('idos',-1,2),new a('amos',-1,2),new a('ábamos',70,2),new a('áramos',70,2),new a('iéramos',70,2),new a('íamos',70,2),new a('aríamos',74,2),new a('eríamos',74,2),new a('iríamos',74,2),new a('emos',-1,1),new a('aremos',78,2),new a('eremos',78,2),new a('iremos',78,2),new a('ásemos',78,2),new a('iésemos',78,2),new a('imos',-1,2),new a('arás',-1,2),new a('erás',-1,2),new a('irás',-1,2),new a('ís',-1,2),new a('ará',-1,2),new a('erá',-1,2),new a('irá',-1,2),new a('aré',-1,2),new a('eré',-1,2),new a('iré',-1,2),new a('ió',-1,2)]});g(b,'a_9',function(){return[new a('a',-1,1),new a('e',-1,2),new a('o',-1,1),new a('os',-1,1),new a('á',-1,1),new a('é',-1,2),new a('í',-1,1),new a('ó',-1,1)]});g(b,'g_v',function(){return[17,65,16,0,0,0,0,0,0,0,0,0,0,0,0,0,1,17,4,10]});var o={'src/stemmer.jsx':{Stemmer:m},'src/spanish-stemmer.jsx':{SpanishStemmer:b}}}(JSX))
var Stemmer = JSX.require("src/spanish-stemmer.jsx").SpanishStemmer;
"""
@@ -377,5 +376,5 @@ class SearchSpanish(SearchLanguage):
self.stemmer = snowballstemmer.stemmer('spanish')
def stem(self, word):
- # type: (unicode) -> unicode
+ # type: (str) -> str
return self.stemmer.stemWord(word.lower())
diff --git a/sphinx/search/fi.py b/sphinx/search/fi.py
index bf757d54a..b8ff1d1f8 100644
--- a/sphinx/search/fi.py
+++ b/sphinx/search/fi.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.search.fi
~~~~~~~~~~~~~~~~
@@ -18,7 +17,7 @@ if False:
from typing import Any # NOQA
-finnish_stopwords = parse_stop_word(u'''
+finnish_stopwords = parse_stop_word('''
| source: http://snowball.tartarus.org/algorithms/finnish/stop.txt
| forms of BE
@@ -109,7 +108,7 @@ nyt | now
itse | self
''')
-js_stemmer = u"""
+js_stemmer = """
var JSX={};(function(j){function l(b,e){var a=function(){};a.prototype=e.prototype;var c=new a;for(var d in b){b[d].prototype=c}}function M(c,b){for(var a in b.prototype)if(b.prototype.hasOwnProperty(a))c.prototype[a]=b.prototype[a]}function f(a,b,d){function c(a,b,c){delete a[b];a[b]=c;return c}Object.defineProperty(a,b,{get:function(){return c(a,b,d())},set:function(d){c(a,b,d)},enumerable:true,configurable:true})}function N(a,b,c){return a[b]=a[b]/c|0}var s=parseInt;var C=parseFloat;function O(a){return a!==a}var A=isFinite;var z=encodeURIComponent;var y=decodeURIComponent;var x=encodeURI;var v=decodeURI;var u=Object.prototype.toString;var E=Object.prototype.hasOwnProperty;function k(){}j.require=function(b){var a=q[b];return a!==undefined?a:null};j.profilerIsRunning=function(){return k.getResults!=null};j.getProfileResults=function(){return(k.getResults||function(){return{}})()};j.postProfileResults=function(a,b){if(k.postResults==null)throw new Error('profiler has not been turned on');return k.postResults(a,b)};j.resetProfileResults=function(){if(k.resetResults==null)throw new Error('profiler has not been turned on');return k.resetResults()};j.DEBUG=false;function t(){};l([t],Error);function b(a,b,c){this.F=a.length;this.M=a;this.N=b;this.H=c;this.G=null;this.S=null};function m(a,b,c,d,e){this.F=a.length;this.M=a;this.N=b;this.H=c;this.G=d;this.S=e};l([b,m],Object);function p(){};l([p],Object);function g(){var a;var b;var c;this.I={};a=this.E='';b=this._=0;c=this.A=a.length;this.B=0;this.C=b;this.D=c};l([g],p);function w(a,b){a.E=b.E;a._=b._;a.A=b.A;a.B=b.B;a.C=b.C;a.D=b.D};function n(b,d,c,e){var a;if(b._>=b.A){return false}a=b.E.charCodeAt(b._);if(a>e||a<c){return false}a-=c;if((d[a>>>3]&1<<(a&7))===0){return false}b._++;return true};g.prototype.L=function(c,b,d){var a;if(this._<=this.B){return false}a=this.E.charCodeAt(this._-1);if(a>d||a<b){return false}a-=b;if((c[a>>>3]&1<<(a&7))===0){return false}this._--;return true};function h(b,d,c,e){var a;if(b._<=b.B){return false}a=b.E.charCodeAt(b._-1);if(a>e||a<c){return false}a-=c;if((d[a>>>3]&1<<(a&7))===0){return false}b._--;return true};function o(a,d,c,e){var b;if(a._>=a.A){return false}b=a.E.charCodeAt(a._);if(b>e||b<c){a._++;return true}b-=c;if((d[b>>>3]&1<<(b&7))===0){a._++;return true}return false};function i(a,d,c,e){var b;if(a._<=a.B){return false}b=a.E.charCodeAt(a._-1);if(b>e||b<c){a._--;return true}b-=c;if((d[b>>>3]&1<<(b&7))===0){a._--;return true}return false};g.prototype.K=function(a,c){var b;if(this._-this.B<a){return false}if(this.E.slice((b=this._)-a,b)!==c){return false}this._-=a;return true};function c(a,b,d){var c;if(a._-a.B<b){return false}if(a.E.slice((c=a._)-b,c)!==d){return false}a._-=b;return true};g.prototype.Q=function(l,o){var b;var d;var e;var m;var f;var j;var k;var h;var g;var c;var a;var i;var n;b=0;d=o;e=this._;m=this.B;f=0;j=0;k=false;while(true){h=b+(d-b>>1);g=0;c=f<j?f:j;a=l[h];for(i=a.F-1-c;i>=0;i--){if(e-c===m){g=-1;break}g=this.E.charCodeAt(e-1-c)-a.M.charCodeAt(i);if(g!==0){break}c++}if(g<0){d=h;j=c}else{b=h;f=c}if(d-b<=1){if(b>0){break}if(d===b){break}if(k){break}k=true}}while(true){a=l[b];if(f>=a.F){this._=e-a.F|0;if(a.G==null){return a.H}n=a.G(this);this._=e-a.F|0;if(n){return a.H}}b=a.N;if(b<0){return 0}}return-1};function e(d,m,p){var b;var g;var e;var n;var f;var k;var l;var i;var h;var c;var a;var j;var o;b=0;g=p;e=d._;n=d.B;f=0;k=0;l=false;while(true){i=b+(g-b>>1);h=0;c=f<k?f:k;a=m[i];for(j=a.F-1-c;j>=0;j--){if(e-c===n){h=-1;break}h=d.E.charCodeAt(e-1-c)-a.M.charCodeAt(j);if(h!==0){break}c++}if(h<0){g=i;k=c}else{b=i;f=c}if(g-b<=1){if(b>0){break}if(g===b){break}if(l){break}l=true}}while(true){a=m[b];if(f>=a.F){d._=e-a.F|0;if(a.G==null){return a.H}o=a.G(d);d._=e-a.F|0;if(o){return a.H}}b=a.N;if(b<0){return 0}}return-1};function D(a,b,d,e){var c;c=e.length-(d-b);a.E=a.E.slice(0,b)+e+a.E.slice(d);a.A+=c|0;if(a._>=d){a._+=c|0}else if(a._>b){a._=b}return c|0};function d(a,f){var b;var c;var d;var e;b=false;if((c=a.C)<0||c>(d=a.D)||d>(e=a.A)||e>a.E.length?false:true){D(a,a.C,a.D,f);b=true}return b};function r(a,f){var b;var c;var d;var e;b='';if((c=a.C)<0||c>(d=a.D)||d>(e=a.A)||e>a.E.length?false:true){b=a.E.slice(a.C,a.D)}return b};g.prototype.J=function(){return false};g.prototype.e=function(b){var a;var c;var d;var e;a=this.I['.'+b];if(a==null){c=this.E=b;d=this._=0;e=this.A=c.length;this.B=0;this.C=d;this.D=e;this.J();a=this.E;this.I['.'+b]=a}return a};g.prototype.stemWord=g.prototype.e;g.prototype.f=function(e){var d;var b;var c;var a;var f;var g;var h;d=[];for(b=0;b<e.length;b++){c=e[b];a=this.I['.'+c];if(a==null){f=this.E=c;g=this._=0;h=this.A=f.length;this.B=0;this.C=g;this.D=h;this.J();a=this.E;this.I['.'+c]=a}d.push(a)}return d};g.prototype.stemWords=g.prototype.f;function a(){g.call(this);this.B_ending_removed=false;this.S_x='';this.I_p2=0;this.I_p1=0};l([a],g);a.prototype.O=function(a){this.B_ending_removed=a.B_ending_removed;this.S_x=a.S_x;this.I_p2=a.I_p2;this.I_p1=a.I_p1;w(this,a)};a.prototype.copy_from=a.prototype.O;a.prototype.Y=function(){var b;var c;var d;var e;var f;var g;var h;var i;var j;this.I_p1=i=this.A;this.I_p2=i;a:while(true){b=this._;d=true;b:while(d===true){d=false;if(!n(this,a.g_V1,97,246)){break b}this._=b;break a}h=this._=b;if(h>=this.A){return false}this._++}a:while(true){e=true;b:while(e===true){e=false;if(!o(this,a.g_V1,97,246)){break b}break a}if(this._>=this.A){return false}this._++}this.I_p1=this._;a:while(true){c=this._;f=true;b:while(f===true){f=false;if(!n(this,a.g_V1,97,246)){break b}this._=c;break a}j=this._=c;if(j>=this.A){return false}this._++}a:while(true){g=true;b:while(g===true){g=false;if(!o(this,a.g_V1,97,246)){break b}break a}if(this._>=this.A){return false}this._++}this.I_p2=this._;return true};a.prototype.r_mark_regions=a.prototype.Y;function H(b){var d;var e;var f;var c;var g;var h;var j;var k;var i;b.I_p1=k=b.A;b.I_p2=k;a:while(true){d=b._;f=true;b:while(f===true){f=false;if(!n(b,a.g_V1,97,246)){break b}b._=d;break a}j=b._=d;if(j>=b.A){return false}b._++}a:while(true){c=true;b:while(c===true){c=false;if(!o(b,a.g_V1,97,246)){break b}break a}if(b._>=b.A){return false}b._++}b.I_p1=b._;a:while(true){e=b._;g=true;b:while(g===true){g=false;if(!n(b,a.g_V1,97,246)){break b}b._=e;break a}i=b._=e;if(i>=b.A){return false}b._++}a:while(true){h=true;b:while(h===true){h=false;if(!o(b,a.g_V1,97,246)){break b}break a}if(b._>=b.A){return false}b._++}b.I_p2=b._;return true};a.prototype.U=function(){return!(this.I_p2<=this._)?false:true};a.prototype.r_R2=a.prototype.U;a.prototype.a=function(){var b;var f;var c;var g;var i;var j;f=this.A-(g=this._);if(g<this.I_p1){return false}i=this._=this.I_p1;c=this.B;this.B=i;j=this._=this.A-f;this.D=j;b=e(this,a.a_0,10);if(b===0){this.B=c;return false}this.C=this._;this.B=c;switch(b){case 0:return false;case 1:if(!h(this,a.g_particle_end,97,246)){return false}break;case 2:if(!(!(this.I_p2<=this._)?false:true)){return false}break}return!d(this,'')?false:true};a.prototype.r_particle_etc=a.prototype.a;function I(b){var c;var g;var f;var i;var j;var k;g=b.A-(i=b._);if(i<b.I_p1){return false}j=b._=b.I_p1;f=b.B;b.B=j;k=b._=b.A-g;b.D=k;c=e(b,a.a_0,10);if(c===0){b.B=f;return false}b.C=b._;b.B=f;switch(c){case 0:return false;case 1:if(!h(b,a.g_particle_end,97,246)){return false}break;case 2:if(!(!(b.I_p2<=b._)?false:true)){return false}break}return!d(b,'')?false:true};a.prototype.b=function(){var b;var h;var f;var i;var g;var j;var k;var l;h=this.A-(j=this._);if(j<this.I_p1){return false}k=this._=this.I_p1;f=this.B;this.B=k;l=this._=this.A-h;this.D=l;b=e(this,a.a_4,9);if(b===0){this.B=f;return false}this.C=this._;this.B=f;switch(b){case 0:return false;case 1:i=this.A-this._;g=true;a:while(g===true){g=false;if(!c(this,1,'k')){break a}return false}this._=this.A-i;if(!d(this,'')){return false}break;case 2:if(!d(this,'')){return false}this.D=this._;if(!c(this,3,'kse')){return false}this.C=this._;if(!d(this,'ksi')){return false}break;case 3:if(!d(this,'')){return false}break;case 4:if(e(this,a.a_1,6)===0){return false}if(!d(this,'')){return false}break;case 5:if(e(this,a.a_2,6)===0){return false}if(!d(this,'')){return false}break;case 6:if(e(this,a.a_3,2)===0){return false}if(!d(this,'')){return false}break}return true};a.prototype.r_possessive=a.prototype.b;function J(b){var f;var i;var g;var j;var h;var k;var l;var m;i=b.A-(k=b._);if(k<b.I_p1){return false}l=b._=b.I_p1;g=b.B;b.B=l;m=b._=b.A-i;b.D=m;f=e(b,a.a_4,9);if(f===0){b.B=g;return false}b.C=b._;b.B=g;switch(f){case 0:return false;case 1:j=b.A-b._;h=true;a:while(h===true){h=false;if(!c(b,1,'k')){break a}return false}b._=b.A-j;if(!d(b,'')){return false}break;case 2:if(!d(b,'')){return false}b.D=b._;if(!c(b,3,'kse')){return false}b.C=b._;if(!d(b,'ksi')){return false}break;case 3:if(!d(b,'')){return false}break;case 4:if(e(b,a.a_1,6)===0){return false}if(!d(b,'')){return false}break;case 5:if(e(b,a.a_2,6)===0){return false}if(!d(b,'')){return false}break;case 6:if(e(b,a.a_3,2)===0){return false}if(!d(b,'')){return false}break}return true};a.prototype.T=function(){return e(this,a.a_5,7)===0?false:true};a.prototype.r_LONG=a.prototype.T;a.prototype.V=function(){return!c(this,1,'i')?false:!h(this,a.g_V2,97,246)?false:true};a.prototype.r_VI=a.prototype.V;a.prototype.W=function(){var j;var o;var f;var g;var p;var m;var b;var k;var l;var q;var r;var s;var n;o=this.A-(q=this._);if(q<this.I_p1){return false}r=this._=this.I_p1;f=this.B;this.B=r;s=this._=this.A-o;this.D=s;j=e(this,a.a_6,30);if(j===0){this.B=f;return false}this.C=this._;this.B=f;switch(j){case 0:return false;case 1:if(!c(this,1,'a')){return false}break;case 2:if(!c(this,1,'e')){return false}break;case 3:if(!c(this,1,'i')){return false}break;case 4:if(!c(this,1,'o')){return false}break;case 5:if(!c(this,1,'ä')){return false}break;case 6:if(!c(this,1,'ö')){return false}break;case 7:g=this.A-this._;b=true;a:while(b===true){b=false;p=this.A-this._;k=true;b:while(k===true){k=false;m=this.A-this._;l=true;c:while(l===true){l=false;if(!(e(this,a.a_5,7)===0?false:true)){break c}break b}this._=this.A-m;if(!c(this,2,'ie')){this._=this.A-g;break a}}n=this._=this.A-p;if(n<=this.B){this._=this.A-g;break a}this._--;this.C=this._}break;case 8:if(!h(this,a.g_V1,97,246)){return false}if(!i(this,a.g_V1,97,246)){return false}break;case 9:if(!c(this,1,'e')){return false}break}if(!d(this,'')){return false}this.B_ending_removed=true;return true};a.prototype.r_case_ending=a.prototype.W;function K(b){var f;var o;var g;var j;var p;var n;var k;var l;var m;var r;var s;var t;var q;o=b.A-(r=b._);if(r<b.I_p1){return false}s=b._=b.I_p1;g=b.B;b.B=s;t=b._=b.A-o;b.D=t;f=e(b,a.a_6,30);if(f===0){b.B=g;return false}b.C=b._;b.B=g;switch(f){case 0:return false;case 1:if(!c(b,1,'a')){return false}break;case 2:if(!c(b,1,'e')){return false}break;case 3:if(!c(b,1,'i')){return false}break;case 4:if(!c(b,1,'o')){return false}break;case 5:if(!c(b,1,'ä')){return false}break;case 6:if(!c(b,1,'ö')){return false}break;case 7:j=b.A-b._;k=true;a:while(k===true){k=false;p=b.A-b._;l=true;b:while(l===true){l=false;n=b.A-b._;m=true;c:while(m===true){m=false;if(!(e(b,a.a_5,7)===0?false:true)){break c}break b}b._=b.A-n;if(!c(b,2,'ie')){b._=b.A-j;break a}}q=b._=b.A-p;if(q<=b.B){b._=b.A-j;break a}b._--;b.C=b._}break;case 8:if(!h(b,a.g_V1,97,246)){return false}if(!i(b,a.g_V1,97,246)){return false}break;case 9:if(!c(b,1,'e')){return false}break}if(!d(b,'')){return false}b.B_ending_removed=true;return true};a.prototype.Z=function(){var b;var h;var f;var i;var g;var j;var k;var l;h=this.A-(j=this._);if(j<this.I_p2){return false}k=this._=this.I_p2;f=this.B;this.B=k;l=this._=this.A-h;this.D=l;b=e(this,a.a_7,14);if(b===0){this.B=f;return false}this.C=this._;this.B=f;switch(b){case 0:return false;case 1:i=this.A-this._;g=true;a:while(g===true){g=false;if(!c(this,2,'po')){break a}return false}this._=this.A-i;break}return!d(this,'')?false:true};a.prototype.r_other_endings=a.prototype.Z;function L(b){var f;var i;var g;var j;var h;var k;var l;var m;i=b.A-(k=b._);if(k<b.I_p2){return false}l=b._=b.I_p2;g=b.B;b.B=l;m=b._=b.A-i;b.D=m;f=e(b,a.a_7,14);if(f===0){b.B=g;return false}b.C=b._;b.B=g;switch(f){case 0:return false;case 1:j=b.A-b._;h=true;a:while(h===true){h=false;if(!c(b,2,'po')){break a}return false}b._=b.A-j;break}return!d(b,'')?false:true};a.prototype.X=function(){var c;var b;var f;var g;var h;c=this.A-(f=this._);if(f<this.I_p1){return false}g=this._=this.I_p1;b=this.B;this.B=g;h=this._=this.A-c;this.D=h;if(e(this,a.a_8,2)===0){this.B=b;return false}this.C=this._;this.B=b;return!d(this,'')?false:true};a.prototype.r_i_plural=a.prototype.X;function G(b){var f;var c;var g;var h;var i;f=b.A-(g=b._);if(g<b.I_p1){return false}h=b._=b.I_p1;c=b.B;b.B=h;i=b._=b.A-f;b.D=i;if(e(b,a.a_8,2)===0){b.B=c;return false}b.C=b._;b.B=c;return!d(b,'')?false:true};a.prototype.c=function(){var i;var l;var b;var j;var k;var g;var m;var f;var o;var p;var q;var r;var s;var t;var n;l=this.A-(o=this._);if(o<this.I_p1){return false}p=this._=this.I_p1;b=this.B;this.B=p;q=this._=this.A-l;this.D=q;if(!c(this,1,'t')){this.B=b;return false}this.C=r=this._;j=this.A-r;if(!h(this,a.g_V1,97,246)){this.B=b;return false}this._=this.A-j;if(!d(this,'')){return false}this.B=b;k=this.A-(s=this._);if(s<this.I_p2){return false}t=this._=this.I_p2;g=this.B;this.B=t;n=this._=this.A-k;this.D=n;i=e(this,a.a_9,2);if(i===0){this.B=g;return false}this.C=this._;this.B=g;switch(i){case 0:return false;case 1:m=this.A-this._;f=true;a:while(f===true){f=false;if(!c(this,2,'po')){break a}return false}this._=this.A-m;break}return!d(this,'')?false:true};a.prototype.r_t_plural=a.prototype.c;function F(b){var g;var m;var f;var o;var l;var i;var k;var j;var p;var q;var r;var s;var t;var u;var n;m=b.A-(p=b._);if(p<b.I_p1){return false}q=b._=b.I_p1;f=b.B;b.B=q;r=b._=b.A-m;b.D=r;if(!c(b,1,'t')){b.B=f;return false}b.C=s=b._;o=b.A-s;if(!h(b,a.g_V1,97,246)){b.B=f;return false}b._=b.A-o;if(!d(b,'')){return false}b.B=f;l=b.A-(t=b._);if(t<b.I_p2){return false}u=b._=b.I_p2;i=b.B;b.B=u;n=b._=b.A-l;b.D=n;g=e(b,a.a_9,2);if(g===0){b.B=i;return false}b.C=b._;b.B=i;switch(g){case 0:return false;case 1:k=b.A-b._;j=true;a:while(j===true){j=false;if(!c(b,2,'po')){break a}return false}b._=b.A-k;break}return!d(b,'')?false:true};a.prototype.d=function(){var x;var q;var s;var t;var u;var v;var w;var y;var f;var g;var j;var k;var l;var m;var n;var b;var o;var z;var p;var B;var C;var D;var E;var F;var G;var H;var I;var J;var K;var L;var A;x=this.A-(z=this._);if(z<this.I_p1){return false}B=this._=this.I_p1;q=this.B;this.B=B;D=this._=(C=this.A)-x;s=C-D;g=true;a:while(g===true){g=false;t=this.A-this._;if(!(e(this,a.a_5,7)===0?false:true)){break a}p=this._=this.A-t;this.D=p;if(p<=this.B){break a}this._--;this.C=this._;if(!d(this,'')){return false}}F=this._=(E=this.A)-s;u=E-F;j=true;a:while(j===true){j=false;this.D=this._;if(!h(this,a.g_AEI,97,228)){break a}this.C=this._;if(!i(this,a.g_V1,97,246)){break a}if(!d(this,'')){return false}}H=this._=(G=this.A)-u;v=G-H;k=true;a:while(k===true){k=false;this.D=this._;if(!c(this,1,'j')){break a}this.C=this._;l=true;b:while(l===true){l=false;w=this.A-this._;m=true;c:while(m===true){m=false;if(!c(this,1,'o')){break c}break b}this._=this.A-w;if(!c(this,1,'u')){break a}}if(!d(this,'')){return false}}J=this._=(I=this.A)-v;y=I-J;n=true;a:while(n===true){n=false;this.D=this._;if(!c(this,1,'o')){break a}this.C=this._;if(!c(this,1,'j')){break a}if(!d(this,'')){return false}}this._=this.A-y;this.B=q;a:while(true){f=this.A-this._;b=true;b:while(b===true){b=false;if(!i(this,a.g_V1,97,246)){break b}this._=this.A-f;break a}K=this._=this.A-f;if(K<=this.B){return false}this._--}this.D=L=this._;if(L<=this.B){return false}this._--;this.C=this._;A=this.S_x=r(this,this.S_x);return A===''?false:!(o=this.S_x,c(this,o.length,o))?false:!d(this,'')?false:true};a.prototype.r_tidy=a.prototype.d;function B(b){var s;var t;var u;var v;var w;var x;var y;var z;var l;var g;var j;var k;var f;var m;var n;var o;var p;var A;var q;var C;var D;var E;var F;var G;var H;var I;var J;var K;var L;var M;var B;s=b.A-(A=b._);if(A<b.I_p1){return false}C=b._=b.I_p1;t=b.B;b.B=C;E=b._=(D=b.A)-s;u=D-E;g=true;a:while(g===true){g=false;v=b.A-b._;if(!(e(b,a.a_5,7)===0?false:true)){break a}q=b._=b.A-v;b.D=q;if(q<=b.B){break a}b._--;b.C=b._;if(!d(b,'')){return false}}G=b._=(F=b.A)-u;w=F-G;j=true;a:while(j===true){j=false;b.D=b._;if(!h(b,a.g_AEI,97,228)){break a}b.C=b._;if(!i(b,a.g_V1,97,246)){break a}if(!d(b,'')){return false}}I=b._=(H=b.A)-w;x=H-I;k=true;a:while(k===true){k=false;b.D=b._;if(!c(b,1,'j')){break a}b.C=b._;f=true;b:while(f===true){f=false;y=b.A-b._;m=true;c:while(m===true){m=false;if(!c(b,1,'o')){break c}break b}b._=b.A-y;if(!c(b,1,'u')){break a}}if(!d(b,'')){return false}}K=b._=(J=b.A)-x;z=J-K;n=true;a:while(n===true){n=false;b.D=b._;if(!c(b,1,'o')){break a}b.C=b._;if(!c(b,1,'j')){break a}if(!d(b,'')){return false}}b._=b.A-z;b.B=t;a:while(true){l=b.A-b._;o=true;b:while(o===true){o=false;if(!i(b,a.g_V1,97,246)){break b}b._=b.A-l;break a}L=b._=b.A-l;if(L<=b.B){return false}b._--}b.D=M=b._;if(M<=b.B){return false}b._--;b.C=b._;B=b.S_x=r(b,b.S_x);return B===''?false:!(p=b.S_x,c(b,p.length,p))?false:!d(b,'')?false:true};a.prototype.J=function(){var p;var k;var l;var m;var n;var o;var q;var r;var b;var c;var d;var e;var f;var g;var a;var h;var i;var j;var t;var u;var v;var w;var x;var y;var z;var A;var C;var D;var s;p=this._;b=true;a:while(b===true){b=false;if(!H(this)){break a}}t=this._=p;this.B_ending_removed=false;this.B=t;v=this._=u=this.A;k=u-v;c=true;a:while(c===true){c=false;if(!I(this)){break a}}x=this._=(w=this.A)-k;l=w-x;d=true;a:while(d===true){d=false;if(!J(this)){break a}}z=this._=(y=this.A)-l;m=y-z;e=true;a:while(e===true){e=false;if(!K(this)){break a}}C=this._=(A=this.A)-m;n=A-C;f=true;a:while(f===true){f=false;if(!L(this)){break a}}this._=this.A-n;g=true;a:while(g===true){g=false;o=this.A-this._;a=true;b:while(a===true){a=false;if(!this.B_ending_removed){break b}q=this.A-this._;h=true;c:while(h===true){h=false;if(!G(this)){break c}}this._=this.A-q;break a}s=this._=(D=this.A)-o;r=D-s;i=true;b:while(i===true){i=false;if(!F(this)){break b}}this._=this.A-r}j=true;a:while(j===true){j=false;if(!B(this)){break a}}this._=this.B;return true};a.prototype.stem=a.prototype.J;a.prototype.P=function(b){return b instanceof a};a.prototype.equals=a.prototype.P;a.prototype.R=function(){var c;var a;var b;var d;c='FinnishStemmer';a=0;for(b=0;b<c.length;b++){d=c.charCodeAt(b);a=(a<<5)-a+d;a=a&a}return a|0};a.prototype.hashCode=a.prototype.R;a.serialVersionUID=1;f(a,'methodObject',function(){return new a});f(a,'a_0',function(){return[new b('pa',-1,1),new b('sti',-1,2),new b('kaan',-1,1),new b('han',-1,1),new b('kin',-1,1),new b('hän',-1,1),new b('kään',-1,1),new b('ko',-1,1),new b('pä',-1,1),new b('kö',-1,1)]});f(a,'a_1',function(){return[new b('lla',-1,-1),new b('na',-1,-1),new b('ssa',-1,-1),new b('ta',-1,-1),new b('lta',3,-1),new b('sta',3,-1)]});f(a,'a_2',function(){return[new b('llä',-1,-1),new b('nä',-1,-1),new b('ssä',-1,-1),new b('tä',-1,-1),new b('ltä',3,-1),new b('stä',3,-1)]});f(a,'a_3',function(){return[new b('lle',-1,-1),new b('ine',-1,-1)]});f(a,'a_4',function(){return[new b('nsa',-1,3),new b('mme',-1,3),new b('nne',-1,3),new b('ni',-1,2),new b('si',-1,1),new b('an',-1,4),new b('en',-1,6),new b('än',-1,5),new b('nsä',-1,3)]});f(a,'a_5',function(){return[new b('aa',-1,-1),new b('ee',-1,-1),new b('ii',-1,-1),new b('oo',-1,-1),new b('uu',-1,-1),new b('ää',-1,-1),new b('öö',-1,-1)]});f(a,'a_6',function(){return[new b('a',-1,8),new b('lla',0,-1),new b('na',0,-1),new b('ssa',0,-1),new b('ta',0,-1),new b('lta',4,-1),new b('sta',4,-1),new b('tta',4,9),new b('lle',-1,-1),new b('ine',-1,-1),new b('ksi',-1,-1),new b('n',-1,7),new b('han',11,1),new m('den',11,-1,function(c){var b;b=c;return!b.K(1,'i')?false:!b.L(a.g_V2,97,246)?false:true},a.methodObject),new m('seen',11,-1,function(c){var b;b=c;return b.Q(a.a_5,7)===0?false:true},a.methodObject),new b('hen',11,2),new m('tten',11,-1,function(c){var b;b=c;return!b.K(1,'i')?false:!b.L(a.g_V2,97,246)?false:true},a.methodObject),new b('hin',11,3),new m('siin',11,-1,function(c){var b;b=c;return!b.K(1,'i')?false:!b.L(a.g_V2,97,246)?false:true},a.methodObject),new b('hon',11,4),new b('hän',11,5),new b('hön',11,6),new b('ä',-1,8),new b('llä',22,-1),new b('nä',22,-1),new b('ssä',22,-1),new b('tä',22,-1),new b('ltä',26,-1),new b('stä',26,-1),new b('ttä',26,9)]});f(a,'a_7',function(){return[new b('eja',-1,-1),new b('mma',-1,1),new b('imma',1,-1),new b('mpa',-1,1),new b('impa',3,-1),new b('mmi',-1,1),new b('immi',5,-1),new b('mpi',-1,1),new b('impi',7,-1),new b('ejä',-1,-1),new b('mmä',-1,1),new b('immä',10,-1),new b('mpä',-1,1),new b('impä',12,-1)]});f(a,'a_8',function(){return[new b('i',-1,-1),new b('j',-1,-1)]});f(a,'a_9',function(){return[new b('mma',-1,1),new b('imma',0,-1)]});f(a,'g_AEI',function(){return[17,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,8]});f(a,'g_V1',function(){return[17,65,16,1,0,0,0,0,0,0,0,0,0,0,0,0,8,0,32]});f(a,'g_V2',function(){return[17,65,16,0,0,0,0,0,0,0,0,0,0,0,0,0,8,0,32]});f(a,'g_particle_end',function(){return[17,97,24,1,0,0,0,0,0,0,0,0,0,0,0,0,8,0,32]});var q={'src/stemmer.jsx':{Stemmer:p},'src/finnish-stemmer.jsx':{FinnishStemmer:a}}}(JSX))
var Stemmer = JSX.require("src/finnish-stemmer.jsx").FinnishStemmer;
"""
@@ -127,5 +126,5 @@ class SearchFinnish(SearchLanguage):
self.stemmer = snowballstemmer.stemmer('finnish')
def stem(self, word):
- # type: (unicode) -> unicode
+ # type: (str) -> str
return self.stemmer.stemWord(word.lower())
diff --git a/sphinx/search/fr.py b/sphinx/search/fr.py
index 9976c1ca7..0848843f3 100644
--- a/sphinx/search/fr.py
+++ b/sphinx/search/fr.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.search.fr
~~~~~~~~~~~~~~~~
@@ -18,7 +17,7 @@ if False:
from typing import Any # NOQA
-french_stopwords = parse_stop_word(u'''
+french_stopwords = parse_stop_word('''
| source: http://snowball.tartarus.org/algorithms/french/stop.txt
au | a + le
aux | a + les
@@ -195,7 +194,7 @@ sans | without
soi | oneself
''')
-js_stemmer = u"""
+js_stemmer = """
var JSX={};(function(l){function m(b,e){var a=function(){};a.prototype=e.prototype;var c=new a;for(var d in b){b[d].prototype=c}}function P(c,b){for(var a in b.prototype)if(b.prototype.hasOwnProperty(a))c.prototype[a]=b.prototype[a]}function g(a,b,d){function c(a,b,c){delete a[b];a[b]=c;return c}Object.defineProperty(a,b,{get:function(){return c(a,b,d())},set:function(d){c(a,b,d)},enumerable:true,configurable:true})}function O(a,b,c){return a[b]=a[b]/c|0}var u=parseInt;var v=parseFloat;function N(a){return a!==a}var x=isFinite;var y=encodeURIComponent;var z=decodeURIComponent;var A=encodeURI;var B=decodeURI;var C=Object.prototype.toString;var D=Object.prototype.hasOwnProperty;function k(){}l.require=function(b){var a=q[b];return a!==undefined?a:null};l.profilerIsRunning=function(){return k.getResults!=null};l.getProfileResults=function(){return(k.getResults||function(){return{}})()};l.postProfileResults=function(a,b){if(k.postResults==null)throw new Error('profiler has not been turned on');return k.postResults(a,b)};l.resetProfileResults=function(){if(k.resetResults==null)throw new Error('profiler has not been turned on');return k.resetResults()};l.DEBUG=false;function G(){};m([G],Error);function a(a,b,c){this.F=a.length;this.K=a;this.L=b;this.I=c;this.H=null;this.P=null};m([a],Object);function p(){};m([p],Object);function i(){var a;var b;var c;this.G={};a=this.E='';b=this._=0;c=this.A=a.length;this.B=0;this.D=b;this.C=c};m([i],p);function s(a,b){a.E=b.E;a._=b._;a.A=b.A;a.B=b.B;a.D=b.D;a.C=b.C};function e(b,d,c,e){var a;if(b._>=b.A){return false}a=b.E.charCodeAt(b._);if(a>e||a<c){return false}a-=c;if((d[a>>>3]&1<<(a&7))===0){return false}b._++;return true};function r(b,d,c,e){var a;if(b._<=b.B){return false}a=b.E.charCodeAt(b._-1);if(a>e||a<c){return false}a-=c;if((d[a>>>3]&1<<(a&7))===0){return false}b._--;return true};function o(a,d,c,e){var b;if(a._>=a.A){return false}b=a.E.charCodeAt(a._);if(b>e||b<c){a._++;return true}b-=c;if((d[b>>>3]&1<<(b&7))===0){a._++;return true}return false};function j(a,d,c,e){var b;if(a._<=a.B){return false}b=a.E.charCodeAt(a._-1);if(b>e||b<c){a._--;return true}b-=c;if((d[b>>>3]&1<<(b&7))===0){a._--;return true}return false};function h(a,b,d){var c;if(a.A-a._<b){return false}if(a.E.slice(c=a._,c+b)!==d){return false}a._+=b;return true};function d(a,b,d){var c;if(a._-a.B<b){return false}if(a.E.slice((c=a._)-b,c)!==d){return false}a._-=b;return true};function n(f,m,p){var b;var d;var e;var n;var g;var k;var l;var i;var h;var c;var a;var j;var o;b=0;d=p;e=f._;n=f.A;g=0;k=0;l=false;while(true){i=b+(d-b>>>1);h=0;c=g<k?g:k;a=m[i];for(j=c;j<a.F;j++){if(e+c===n){h=-1;break}h=f.E.charCodeAt(e+c)-a.K.charCodeAt(j);if(h!==0){break}c++}if(h<0){d=i;k=c}else{b=i;g=c}if(d-b<=1){if(b>0){break}if(d===b){break}if(l){break}l=true}}while(true){a=m[b];if(g>=a.F){f._=e+a.F|0;if(a.H==null){return a.I}o=a.H(a.P);f._=e+a.F|0;if(o){return a.I}}b=a.L;if(b<0){return 0}}return-1};function f(d,m,p){var b;var g;var e;var n;var f;var k;var l;var i;var h;var c;var a;var j;var o;b=0;g=p;e=d._;n=d.B;f=0;k=0;l=false;while(true){i=b+(g-b>>1);h=0;c=f<k?f:k;a=m[i];for(j=a.F-1-c;j>=0;j--){if(e-c===n){h=-1;break}h=d.E.charCodeAt(e-1-c)-a.K.charCodeAt(j);if(h!==0){break}c++}if(h<0){g=i;k=c}else{b=i;f=c}if(g-b<=1){if(b>0){break}if(g===b){break}if(l){break}l=true}}while(true){a=m[b];if(f>=a.F){d._=e-a.F|0;if(a.H==null){return a.I}o=a.H(d);d._=e-a.F|0;if(o){return a.I}}b=a.L;if(b<0){return 0}}return-1};function E(a,b,d,e){var c;c=e.length-(d-b);a.E=a.E.slice(0,b)+e+a.E.slice(d);a.A+=c|0;if(a._>=d){a._+=c|0}else if(a._>b){a._=b}return c|0};function c(a,f){var b;var c;var d;var e;b=false;if((c=a.D)<0||c>(d=a.C)||d>(e=a.A)||e>a.E.length?false:true){E(a,a.D,a.C,f);b=true}return b};i.prototype.J=function(){return false};i.prototype.c=function(b){var a;var c;var d;var e;a=this.G['.'+b];if(a==null){c=this.E=b;d=this._=0;e=this.A=c.length;this.B=0;this.D=d;this.C=e;this.J();a=this.E;this.G['.'+b]=a}return a};i.prototype.stemWord=i.prototype.c;i.prototype.d=function(e){var d;var b;var c;var a;var f;var g;var h;d=[];for(b=0;b<e.length;b++){c=e[b];a=this.G['.'+c];if(a==null){f=this.E=c;g=this._=0;h=this.A=f.length;this.B=0;this.D=g;this.C=h;this.J();a=this.E;this.G['.'+c]=a}d.push(a)}return d};i.prototype.stemWords=i.prototype.d;function b(){i.call(this);this.I_p2=0;this.I_p1=0;this.I_pV=0};m([b],i);b.prototype.M=function(a){this.I_p2=a.I_p2;this.I_p1=a.I_p1;this.I_pV=a.I_pV;s(this,a)};b.prototype.copy_from=b.prototype.M;b.prototype.W=function(){var p;var j;var f;var g;var i;var a;var d;var k;var l;var m;var n;var o;var q;a:while(true){p=this._;i=true;g:while(i===true){i=false;h:while(true){j=this._;a=true;b:while(a===true){a=false;d=true;c:while(d===true){d=false;f=this._;k=true;d:while(k===true){k=false;if(!e(this,b.g_v,97,251)){break d}this.D=this._;l=true;e:while(l===true){l=false;g=this._;m=true;f:while(m===true){m=false;if(!h(this,1,'u')){break f}this.C=this._;if(!e(this,b.g_v,97,251)){break f}if(!c(this,'U')){return false}break e}this._=g;n=true;f:while(n===true){n=false;if(!h(this,1,'i')){break f}this.C=this._;if(!e(this,b.g_v,97,251)){break f}if(!c(this,'I')){return false}break e}this._=g;if(!h(this,1,'y')){break d}this.C=this._;if(!c(this,'Y')){return false}}break c}this._=f;o=true;d:while(o===true){o=false;this.D=this._;if(!h(this,1,'y')){break d}this.C=this._;if(!e(this,b.g_v,97,251)){break d}if(!c(this,'Y')){return false}break c}this._=f;if(!h(this,1,'q')){break b}this.D=this._;if(!h(this,1,'u')){break b}this.C=this._;if(!c(this,'U')){return false}}this._=j;break h}q=this._=j;if(q>=this.A){break g}this._++}continue a}this._=p;break a}return true};b.prototype.r_prelude=b.prototype.W;function H(a){var q;var k;var f;var g;var i;var j;var d;var l;var m;var n;var o;var p;var r;a:while(true){q=a._;i=true;g:while(i===true){i=false;h:while(true){k=a._;j=true;b:while(j===true){j=false;d=true;c:while(d===true){d=false;f=a._;l=true;d:while(l===true){l=false;if(!e(a,b.g_v,97,251)){break d}a.D=a._;m=true;e:while(m===true){m=false;g=a._;n=true;f:while(n===true){n=false;if(!h(a,1,'u')){break f}a.C=a._;if(!e(a,b.g_v,97,251)){break f}if(!c(a,'U')){return false}break e}a._=g;o=true;f:while(o===true){o=false;if(!h(a,1,'i')){break f}a.C=a._;if(!e(a,b.g_v,97,251)){break f}if(!c(a,'I')){return false}break e}a._=g;if(!h(a,1,'y')){break d}a.C=a._;if(!c(a,'Y')){return false}}break c}a._=f;p=true;d:while(p===true){p=false;a.D=a._;if(!h(a,1,'y')){break d}a.C=a._;if(!e(a,b.g_v,97,251)){break d}if(!c(a,'Y')){return false}break c}a._=f;if(!h(a,1,'q')){break b}a.D=a._;if(!h(a,1,'u')){break b}a.C=a._;if(!c(a,'U')){return false}}a._=k;break h}r=a._=k;if(r>=a.A){break g}a._++}continue a}a._=q;break a}return true};b.prototype.U=function(){var t;var i;var r;var d;var f;var g;var h;var c;var a;var j;var k;var l;var m;var s;var p;var q;this.I_pV=p=this.A;this.I_p1=p;this.I_p2=p;t=this._;d=true;b:while(d===true){d=false;f=true;c:while(f===true){f=false;i=this._;g=true;a:while(g===true){g=false;if(!e(this,b.g_v,97,251)){break a}if(!e(this,b.g_v,97,251)){break a}if(this._>=this.A){break a}this._++;break c}this._=i;h=true;a:while(h===true){h=false;if(n(this,b.a_0,3)===0){break a}break c}s=this._=i;if(s>=this.A){break b}this._++;a:while(true){c=true;d:while(c===true){c=false;if(!e(this,b.g_v,97,251)){break d}break a}if(this._>=this.A){break b}this._++}}this.I_pV=this._}q=this._=t;r=q;a=true;a:while(a===true){a=false;c:while(true){j=true;b:while(j===true){j=false;if(!e(this,b.g_v,97,251)){break b}break c}if(this._>=this.A){break a}this._++}b:while(true){k=true;c:while(k===true){k=false;if(!o(this,b.g_v,97,251)){break c}break b}if(this._>=this.A){break a}this._++}this.I_p1=this._;b:while(true){l=true;c:while(l===true){l=false;if(!e(this,b.g_v,97,251)){break c}break b}if(this._>=this.A){break a}this._++}c:while(true){m=true;b:while(m===true){m=false;if(!o(this,b.g_v,97,251)){break b}break c}if(this._>=this.A){break a}this._++}this.I_p2=this._}this._=r;return true};b.prototype.r_mark_regions=b.prototype.U;function I(a){var s;var i;var r;var d;var f;var g;var h;var c;var j;var k;var l;var m;var p;var t;var q;var u;a.I_pV=q=a.A;a.I_p1=q;a.I_p2=q;s=a._;d=true;b:while(d===true){d=false;f=true;c:while(f===true){f=false;i=a._;g=true;a:while(g===true){g=false;if(!e(a,b.g_v,97,251)){break a}if(!e(a,b.g_v,97,251)){break a}if(a._>=a.A){break a}a._++;break c}a._=i;h=true;a:while(h===true){h=false;if(n(a,b.a_0,3)===0){break a}break c}t=a._=i;if(t>=a.A){break b}a._++;a:while(true){c=true;d:while(c===true){c=false;if(!e(a,b.g_v,97,251)){break d}break a}if(a._>=a.A){break b}a._++}}a.I_pV=a._}u=a._=s;r=u;j=true;a:while(j===true){j=false;c:while(true){k=true;b:while(k===true){k=false;if(!e(a,b.g_v,97,251)){break b}break c}if(a._>=a.A){break a}a._++}b:while(true){l=true;c:while(l===true){l=false;if(!o(a,b.g_v,97,251)){break c}break b}if(a._>=a.A){break a}a._++}a.I_p1=a._;b:while(true){m=true;c:while(m===true){m=false;if(!e(a,b.g_v,97,251)){break c}break b}if(a._>=a.A){break a}a._++}c:while(true){p=true;b:while(p===true){p=false;if(!o(a,b.g_v,97,251)){break b}break c}if(a._>=a.A){break a}a._++}a.I_p2=a._}a._=r;return true};b.prototype.V=function(){var a;var e;var d;b:while(true){e=this._;d=true;a:while(d===true){d=false;this.D=this._;a=n(this,b.a_1,4);if(a===0){break a}this.C=this._;switch(a){case 0:break a;case 1:if(!c(this,'i')){return false}break;case 2:if(!c(this,'u')){return false}break;case 3:if(!c(this,'y')){return false}break;case 4:if(this._>=this.A){break a}this._++;break}continue b}this._=e;break b}return true};b.prototype.r_postlude=b.prototype.V;function J(a){var d;var f;var e;b:while(true){f=a._;e=true;a:while(e===true){e=false;a.D=a._;d=n(a,b.a_1,4);if(d===0){break a}a.C=a._;switch(d){case 0:break a;case 1:if(!c(a,'i')){return false}break;case 2:if(!c(a,'u')){return false}break;case 3:if(!c(a,'y')){return false}break;case 4:if(a._>=a.A){break a}a._++;break}continue b}a._=f;break b}return true};b.prototype.S=function(){return!(this.I_pV<=this._)?false:true};b.prototype.r_RV=b.prototype.S;b.prototype.Q=function(){return!(this.I_p1<=this._)?false:true};b.prototype.r_R1=b.prototype.Q;b.prototype.R=function(){return!(this.I_p2<=this._)?false:true};b.prototype.r_R2=b.prototype.R;b.prototype.Y=function(){var a;var E;var H;var e;var D;var g;var F;var G;var h;var I;var A;var B;var p;var k;var l;var m;var n;var o;var i;var q;var s;var t;var u;var v;var w;var x;var y;var z;var J;var K;var L;var C;this.C=this._;a=f(this,b.a_4,43);if(a===0){return false}this.D=this._;switch(a){case 0:return false;case 1:if(!(!(this.I_p2<=this._)?false:true)){return false}if(!c(this,'')){return false}break;case 2:if(!(!(this.I_p2<=this._)?false:true)){return false}if(!c(this,'')){return false}E=this.A-this._;p=true;c:while(p===true){p=false;this.C=this._;if(!d(this,2,'ic')){this._=this.A-E;break c}this.D=this._;k=true;b:while(k===true){k=false;H=this.A-this._;l=true;a:while(l===true){l=false;if(!(!(this.I_p2<=this._)?false:true)){break a}if(!c(this,'')){return false}break b}this._=this.A-H;if(!c(this,'iqU')){return false}}}break;case 3:if(!(!(this.I_p2<=this._)?false:true)){return false}if(!c(this,'log')){return false}break;case 4:if(!(!(this.I_p2<=this._)?false:true)){return false}if(!c(this,'u')){return false}break;case 5:if(!(!(this.I_p2<=this._)?false:true)){return false}if(!c(this,'ent')){return false}break;case 6:if(!(!(this.I_pV<=this._)?false:true)){return false}if(!c(this,'')){return false}e=this.A-this._;m=true;a:while(m===true){m=false;this.C=this._;a=f(this,b.a_2,6);if(a===0){this._=this.A-e;break a}this.D=this._;switch(a){case 0:this._=this.A-e;break a;case 1:if(!(!(this.I_p2<=this._)?false:true)){this._=this.A-e;break a}if(!c(this,'')){return false}this.C=this._;if(!d(this,2,'at')){this._=this.A-e;break a}this.D=J=this._;if(!(!(this.I_p2<=J)?false:true)){this._=this.A-e;break a}if(!c(this,'')){return false}break;case 2:n=true;b:while(n===true){n=false;D=this.A-this._;o=true;c:while(o===true){o=false;if(!(!(this.I_p2<=this._)?false:true)){break c}if(!c(this,'')){return false}break b}K=this._=this.A-D;if(!(!(this.I_p1<=K)?false:true)){this._=this.A-e;break a}if(!c(this,'eux')){return false}}break;case 3:if(!(!(this.I_p2<=this._)?false:true)){this._=this.A-e;break a}if(!c(this,'')){return false}break;case 4:if(!(!(this.I_pV<=this._)?false:true)){this._=this.A-e;break a}if(!c(this,'i')){return false}break}}break;case 7:if(!(!(this.I_p2<=this._)?false:true)){return false}if(!c(this,'')){return false}g=this.A-this._;i=true;a:while(i===true){i=false;this.C=this._;a=f(this,b.a_3,3);if(a===0){this._=this.A-g;break a}this.D=this._;switch(a){case 0:this._=this.A-g;break a;case 1:q=true;c:while(q===true){q=false;F=this.A-this._;s=true;b:while(s===true){s=false;if(!(!(this.I_p2<=this._)?false:true)){break b}if(!c(this,'')){return false}break c}this._=this.A-F;if(!c(this,'abl')){return false}}break;case 2:t=true;b:while(t===true){t=false;G=this.A-this._;u=true;c:while(u===true){u=false;if(!(!(this.I_p2<=this._)?false:true)){break c}if(!c(this,'')){return false}break b}this._=this.A-G;if(!c(this,'iqU')){return false}}break;case 3:if(!(!(this.I_p2<=this._)?false:true)){this._=this.A-g;break a}if(!c(this,'')){return false}break}}break;case 8:if(!(!(this.I_p2<=this._)?false:true)){return false}if(!c(this,'')){return false}h=this.A-this._;v=true;a:while(v===true){v=false;this.C=this._;if(!d(this,2,'at')){this._=this.A-h;break a}this.D=L=this._;if(!(!(this.I_p2<=L)?false:true)){this._=this.A-h;break a}if(!c(this,'')){return false}this.C=this._;if(!d(this,2,'ic')){this._=this.A-h;break a}this.D=this._;w=true;b:while(w===true){w=false;I=this.A-this._;x=true;c:while(x===true){x=false;if(!(!(this.I_p2<=this._)?false:true)){break c}if(!c(this,'')){return false}break b}this._=this.A-I;if(!c(this,'iqU')){return false}}}break;case 9:if(!c(this,'eau')){return false}break;case 10:if(!(!(this.I_p1<=this._)?false:true)){return false}if(!c(this,'al')){return false}break;case 11:y=true;a:while(y===true){y=false;A=this.A-this._;z=true;b:while(z===true){z=false;if(!(!(this.I_p2<=this._)?false:true)){break b}if(!c(this,'')){return false}break a}C=this._=this.A-A;if(!(!(this.I_p1<=C)?false:true)){return false}if(!c(this,'eux')){return false}}break;case 12:if(!(!(this.I_p1<=this._)?false:true)){return false}if(!j(this,b.g_v,97,251)){return false}if(!c(this,'')){return false}break;case 13:if(!(!(this.I_pV<=this._)?false:true)){return false}if(!c(this,'ant')){return false}return false;case 14:if(!(!(this.I_pV<=this._)?false:true)){return false}if(!c(this,'ent')){return false}return false;case 15:B=this.A-this._;if(!r(this,b.g_v,97,251)){return false}if(!(!(this.I_pV<=this._)?false:true)){return false}this._=this.A-B;if(!c(this,'')){return false}return false}return true};b.prototype.r_standard_suffix=b.prototype.Y;function K(a){var g;var F;var I;var e;var E;var h;var G;var H;var i;var J;var B;var C;var p;var l;var m;var n;var o;var k;var q;var s;var t;var u;var v;var w;var x;var y;var z;var A;var K;var L;var M;var D;a.C=a._;g=f(a,b.a_4,43);if(g===0){return false}a.D=a._;switch(g){case 0:return false;case 1:if(!(!(a.I_p2<=a._)?false:true)){return false}if(!c(a,'')){return false}break;case 2:if(!(!(a.I_p2<=a._)?false:true)){return false}if(!c(a,'')){return false}F=a.A-a._;p=true;c:while(p===true){p=false;a.C=a._;if(!d(a,2,'ic')){a._=a.A-F;break c}a.D=a._;l=true;b:while(l===true){l=false;I=a.A-a._;m=true;a:while(m===true){m=false;if(!(!(a.I_p2<=a._)?false:true)){break a}if(!c(a,'')){return false}break b}a._=a.A-I;if(!c(a,'iqU')){return false}}}break;case 3:if(!(!(a.I_p2<=a._)?false:true)){return false}if(!c(a,'log')){return false}break;case 4:if(!(!(a.I_p2<=a._)?false:true)){return false}if(!c(a,'u')){return false}break;case 5:if(!(!(a.I_p2<=a._)?false:true)){return false}if(!c(a,'ent')){return false}break;case 6:if(!(!(a.I_pV<=a._)?false:true)){return false}if(!c(a,'')){return false}e=a.A-a._;n=true;a:while(n===true){n=false;a.C=a._;g=f(a,b.a_2,6);if(g===0){a._=a.A-e;break a}a.D=a._;switch(g){case 0:a._=a.A-e;break a;case 1:if(!(!(a.I_p2<=a._)?false:true)){a._=a.A-e;break a}if(!c(a,'')){return false}a.C=a._;if(!d(a,2,'at')){a._=a.A-e;break a}a.D=K=a._;if(!(!(a.I_p2<=K)?false:true)){a._=a.A-e;break a}if(!c(a,'')){return false}break;case 2:o=true;b:while(o===true){o=false;E=a.A-a._;k=true;c:while(k===true){k=false;if(!(!(a.I_p2<=a._)?false:true)){break c}if(!c(a,'')){return false}break b}L=a._=a.A-E;if(!(!(a.I_p1<=L)?false:true)){a._=a.A-e;break a}if(!c(a,'eux')){return false}}break;case 3:if(!(!(a.I_p2<=a._)?false:true)){a._=a.A-e;break a}if(!c(a,'')){return false}break;case 4:if(!(!(a.I_pV<=a._)?false:true)){a._=a.A-e;break a}if(!c(a,'i')){return false}break}}break;case 7:if(!(!(a.I_p2<=a._)?false:true)){return false}if(!c(a,'')){return false}h=a.A-a._;q=true;a:while(q===true){q=false;a.C=a._;g=f(a,b.a_3,3);if(g===0){a._=a.A-h;break a}a.D=a._;switch(g){case 0:a._=a.A-h;break a;case 1:s=true;c:while(s===true){s=false;G=a.A-a._;t=true;b:while(t===true){t=false;if(!(!(a.I_p2<=a._)?false:true)){break b}if(!c(a,'')){return false}break c}a._=a.A-G;if(!c(a,'abl')){return false}}break;case 2:u=true;b:while(u===true){u=false;H=a.A-a._;v=true;c:while(v===true){v=false;if(!(!(a.I_p2<=a._)?false:true)){break c}if(!c(a,'')){return false}break b}a._=a.A-H;if(!c(a,'iqU')){return false}}break;case 3:if(!(!(a.I_p2<=a._)?false:true)){a._=a.A-h;break a}if(!c(a,'')){return false}break}}break;case 8:if(!(!(a.I_p2<=a._)?false:true)){return false}if(!c(a,'')){return false}i=a.A-a._;w=true;a:while(w===true){w=false;a.C=a._;if(!d(a,2,'at')){a._=a.A-i;break a}a.D=M=a._;if(!(!(a.I_p2<=M)?false:true)){a._=a.A-i;break a}if(!c(a,'')){return false}a.C=a._;if(!d(a,2,'ic')){a._=a.A-i;break a}a.D=a._;x=true;b:while(x===true){x=false;J=a.A-a._;y=true;c:while(y===true){y=false;if(!(!(a.I_p2<=a._)?false:true)){break c}if(!c(a,'')){return false}break b}a._=a.A-J;if(!c(a,'iqU')){return false}}}break;case 9:if(!c(a,'eau')){return false}break;case 10:if(!(!(a.I_p1<=a._)?false:true)){return false}if(!c(a,'al')){return false}break;case 11:z=true;a:while(z===true){z=false;B=a.A-a._;A=true;b:while(A===true){A=false;if(!(!(a.I_p2<=a._)?false:true)){break b}if(!c(a,'')){return false}break a}D=a._=a.A-B;if(!(!(a.I_p1<=D)?false:true)){return false}if(!c(a,'eux')){return false}}break;case 12:if(!(!(a.I_p1<=a._)?false:true)){return false}if(!j(a,b.g_v,97,251)){return false}if(!c(a,'')){return false}break;case 13:if(!(!(a.I_pV<=a._)?false:true)){return false}if(!c(a,'ant')){return false}return false;case 14:if(!(!(a.I_pV<=a._)?false:true)){return false}if(!c(a,'ent')){return false}return false;case 15:C=a.A-a._;if(!r(a,b.g_v,97,251)){return false}if(!(!(a.I_pV<=a._)?false:true)){return false}a._=a.A-C;if(!c(a,'')){return false}return false}return true};b.prototype.T=function(){var d;var e;var a;var g;var h;var i;e=this.A-(g=this._);if(g<this.I_pV){return false}h=this._=this.I_pV;a=this.B;this.B=h;i=this._=this.A-e;this.C=i;d=f(this,b.a_5,35);if(d===0){this.B=a;return false}this.D=this._;switch(d){case 0:this.B=a;return false;case 1:if(!j(this,b.g_v,97,251)){this.B=a;return false}if(!c(this,'')){return false}break}this.B=a;return true};b.prototype.r_i_verb_suffix=b.prototype.T;function L(a){var e;var g;var d;var h;var i;var k;g=a.A-(h=a._);if(h<a.I_pV){return false}i=a._=a.I_pV;d=a.B;a.B=i;k=a._=a.A-g;a.C=k;e=f(a,b.a_5,35);if(e===0){a.B=d;return false}a.D=a._;switch(e){case 0:a.B=d;return false;case 1:if(!j(a,b.g_v,97,251)){a.B=d;return false}if(!c(a,'')){return false}break}a.B=d;return true};b.prototype.b=function(){var e;var h;var a;var i;var g;var j;var k;var l;h=this.A-(j=this._);if(j<this.I_pV){return false}k=this._=this.I_pV;a=this.B;this.B=k;l=this._=this.A-h;this.C=l;e=f(this,b.a_6,38);if(e===0){this.B=a;return false}this.D=this._;switch(e){case 0:this.B=a;return false;case 1:if(!(!(this.I_p2<=this._)?false:true)){this.B=a;return false}if(!c(this,'')){return false}break;case 2:if(!c(this,'')){return false}break;case 3:if(!c(this,'')){return false}i=this.A-this._;g=true;a:while(g===true){g=false;this.C=this._;if(!d(this,1,'e')){this._=this.A-i;break a}this.D=this._;if(!c(this,'')){return false}}break}this.B=a;return true};b.prototype.r_verb_suffix=b.prototype.b;function M(a){var g;var i;var e;var j;var h;var k;var l;var m;i=a.A-(k=a._);if(k<a.I_pV){return false}l=a._=a.I_pV;e=a.B;a.B=l;m=a._=a.A-i;a.C=m;g=f(a,b.a_6,38);if(g===0){a.B=e;return false}a.D=a._;switch(g){case 0:a.B=e;return false;case 1:if(!(!(a.I_p2<=a._)?false:true)){a.B=e;return false}if(!c(a,'')){return false}break;case 2:if(!c(a,'')){return false}break;case 3:if(!c(a,'')){return false}j=a.A-a._;h=true;a:while(h===true){h=false;a.C=a._;if(!d(a,1,'e')){a._=a.A-j;break a}a.D=a._;if(!c(a,'')){return false}}break}a.B=e;return true};b.prototype.X=function(){var h;var g;var m;var n;var a;var l;var e;var i;var k;var p;var q;var r;var o;g=this.A-this._;e=true;a:while(e===true){e=false;this.C=this._;if(!d(this,1,'s')){this._=this.A-g;break a}this.D=p=this._;m=this.A-p;if(!j(this,b.g_keep_with_s,97,232)){this._=this.A-g;break a}this._=this.A-m;if(!c(this,'')){return false}}n=this.A-(q=this._);if(q<this.I_pV){return false}r=this._=this.I_pV;a=this.B;this.B=r;o=this._=this.A-n;this.C=o;h=f(this,b.a_7,7);if(h===0){this.B=a;return false}this.D=this._;switch(h){case 0:this.B=a;return false;case 1:if(!(!(this.I_p2<=this._)?false:true)){this.B=a;return false}i=true;a:while(i===true){i=false;l=this.A-this._;k=true;b:while(k===true){k=false;if(!d(this,1,'s')){break b}break a}this._=this.A-l;if(!d(this,1,'t')){this.B=a;return false}}if(!c(this,'')){return false}break;case 2:if(!c(this,'i')){return false}break;case 3:if(!c(this,'')){return false}break;case 4:if(!d(this,2,'gu')){this.B=a;return false}if(!c(this,'')){return false}break}this.B=a;return true};b.prototype.r_residual_suffix=b.prototype.X;function w(a){var g;var h;var p;var n;var e;var m;var i;var k;var l;var q;var r;var s;var o;h=a.A-a._;i=true;a:while(i===true){i=false;a.C=a._;if(!d(a,1,'s')){a._=a.A-h;break a}a.D=q=a._;p=a.A-q;if(!j(a,b.g_keep_with_s,97,232)){a._=a.A-h;break a}a._=a.A-p;if(!c(a,'')){return false}}n=a.A-(r=a._);if(r<a.I_pV){return false}s=a._=a.I_pV;e=a.B;a.B=s;o=a._=a.A-n;a.C=o;g=f(a,b.a_7,7);if(g===0){a.B=e;return false}a.D=a._;switch(g){case 0:a.B=e;return false;case 1:if(!(!(a.I_p2<=a._)?false:true)){a.B=e;return false}k=true;a:while(k===true){k=false;m=a.A-a._;l=true;b:while(l===true){l=false;if(!d(a,1,'s')){break b}break a}a._=a.A-m;if(!d(a,1,'t')){a.B=e;return false}}if(!c(a,'')){return false}break;case 2:if(!c(a,'i')){return false}break;case 3:if(!c(a,'')){return false}break;case 4:if(!d(a,2,'gu')){a.B=e;return false}if(!c(a,'')){return false}break}a.B=e;return true};b.prototype.a=function(){var d;var a;d=this.A-this._;if(f(this,b.a_8,5)===0){return false}a=this._=this.A-d;this.C=a;if(a<=this.B){return false}this._--;this.D=this._;return!c(this,'')?false:true};b.prototype.r_un_double=b.prototype.a;function t(a){var e;var d;e=a.A-a._;if(f(a,b.a_8,5)===0){return false}d=a._=a.A-e;a.C=d;if(d<=a.B){return false}a._--;a.D=a._;return!c(a,'')?false:true};b.prototype.Z=function(){var h;var a;var e;var f;var g;a=1;a:while(true){e=true;b:while(e===true){e=false;if(!j(this,b.g_v,97,251)){break b}a--;continue a}break a}if(a>0){return false}this.C=this._;f=true;a:while(f===true){f=false;h=this.A-this._;g=true;b:while(g===true){g=false;if(!d(this,1,'é')){break b}break a}this._=this.A-h;if(!d(this,1,'è')){return false}}this.D=this._;return!c(this,'e')?false:true};b.prototype.r_un_accent=b.prototype.Z;function F(a){var i;var e;var f;var g;var h;e=1;a:while(true){f=true;b:while(f===true){f=false;if(!j(a,b.g_v,97,251)){break b}e--;continue a}break a}if(e>0){return false}a.C=a._;g=true;a:while(g===true){g=false;i=a.A-a._;h=true;b:while(h===true){h=false;if(!d(a,1,'é')){break b}break a}a._=a.A-i;if(!d(a,1,'è')){return false}}a.D=a._;return!c(a,'e')?false:true};b.prototype.J=function(){var u;var z;var A;var B;var C;var j;var s;var v;var x;var y;var e;var f;var g;var h;var i;var a;var b;var k;var l;var m;var n;var o;var p;var q;var D;var E;var G;var N;var O;var P;var Q;var R;var r;u=this._;e=true;a:while(e===true){e=false;if(!H(this)){break a}}D=this._=u;z=D;f=true;a:while(f===true){f=false;if(!I(this)){break a}}N=this._=z;this.B=N;P=this._=O=this.A;A=O-P;g=true;c:while(g===true){g=false;h=true;d:while(h===true){h=false;B=this.A-this._;i=true;e:while(i===true){i=false;C=this.A-this._;a=true;a:while(a===true){a=false;j=this.A-this._;b=true;b:while(b===true){b=false;if(!K(this)){break b}break a}this._=this.A-j;k=true;b:while(k===true){k=false;if(!L(this)){break b}break a}this._=this.A-j;if(!M(this)){break e}}G=this._=(E=this.A)-C;s=E-G;l=true;a:while(l===true){l=false;this.C=this._;m=true;b:while(m===true){m=false;v=this.A-this._;n=true;f:while(n===true){n=false;if(!d(this,1,'Y')){break f}this.D=this._;if(!c(this,'i')){return false}break b}this._=this.A-v;if(!d(this,1,'ç')){this._=this.A-s;break a}this.D=this._;if(!c(this,'c')){return false}}}break d}this._=this.A-B;if(!w(this)){break c}}}R=this._=(Q=this.A)-A;x=Q-R;o=true;a:while(o===true){o=false;if(!t(this)){break a}}this._=this.A-x;p=true;a:while(p===true){p=false;if(!F(this)){break a}}r=this._=this.B;y=r;q=true;a:while(q===true){q=false;if(!J(this)){break a}}this._=y;return true};b.prototype.stem=b.prototype.J;b.prototype.N=function(a){return a instanceof b};b.prototype.equals=b.prototype.N;b.prototype.O=function(){var c;var a;var b;var d;c='FrenchStemmer';a=0;for(b=0;b<c.length;b++){d=c.charCodeAt(b);a=(a<<5)-a+d;a=a&a}return a|0};b.prototype.hashCode=b.prototype.O;b.serialVersionUID=1;g(b,'methodObject',function(){return new b});g(b,'a_0',function(){return[new a('col',-1,-1),new a('par',-1,-1),new a('tap',-1,-1)]});g(b,'a_1',function(){return[new a('',-1,4),new a('I',0,1),new a('U',0,2),new a('Y',0,3)]});g(b,'a_2',function(){return[new a('iqU',-1,3),new a('abl',-1,3),new a('Ièr',-1,4),new a('ièr',-1,4),new a('eus',-1,2),new a('iv',-1,1)]});g(b,'a_3',function(){return[new a('ic',-1,2),new a('abil',-1,1),new a('iv',-1,3)]});g(b,'a_4',function(){return[new a('iqUe',-1,1),new a('atrice',-1,2),new a('ance',-1,1),new a('ence',-1,5),new a('logie',-1,3),new a('able',-1,1),new a('isme',-1,1),new a('euse',-1,11),new a('iste',-1,1),new a('ive',-1,8),new a('if',-1,8),new a('usion',-1,4),new a('ation',-1,2),new a('ution',-1,4),new a('ateur',-1,2),new a('iqUes',-1,1),new a('atrices',-1,2),new a('ances',-1,1),new a('ences',-1,5),new a('logies',-1,3),new a('ables',-1,1),new a('ismes',-1,1),new a('euses',-1,11),new a('istes',-1,1),new a('ives',-1,8),new a('ifs',-1,8),new a('usions',-1,4),new a('ations',-1,2),new a('utions',-1,4),new a('ateurs',-1,2),new a('ments',-1,15),new a('ements',30,6),new a('issements',31,12),new a('ités',-1,7),new a('ment',-1,15),new a('ement',34,6),new a('issement',35,12),new a('amment',34,13),new a('emment',34,14),new a('aux',-1,10),new a('eaux',39,9),new a('eux',-1,1),new a('ité',-1,7)]});g(b,'a_5',function(){return[new a('ira',-1,1),new a('ie',-1,1),new a('isse',-1,1),new a('issante',-1,1),new a('i',-1,1),new a('irai',4,1),new a('ir',-1,1),new a('iras',-1,1),new a('ies',-1,1),new a('îmes',-1,1),new a('isses',-1,1),new a('issantes',-1,1),new a('îtes',-1,1),new a('is',-1,1),new a('irais',13,1),new a('issais',13,1),new a('irions',-1,1),new a('issions',-1,1),new a('irons',-1,1),new a('issons',-1,1),new a('issants',-1,1),new a('it',-1,1),new a('irait',21,1),new a('issait',21,1),new a('issant',-1,1),new a('iraIent',-1,1),new a('issaIent',-1,1),new a('irent',-1,1),new a('issent',-1,1),new a('iront',-1,1),new a('ît',-1,1),new a('iriez',-1,1),new a('issiez',-1,1),new a('irez',-1,1),new a('issez',-1,1)]});g(b,'a_6',function(){return[new a('a',-1,3),new a('era',0,2),new a('asse',-1,3),new a('ante',-1,3),new a('ée',-1,2),new a('ai',-1,3),new a('erai',5,2),new a('er',-1,2),new a('as',-1,3),new a('eras',8,2),new a('âmes',-1,3),new a('asses',-1,3),new a('antes',-1,3),new a('âtes',-1,3),new a('ées',-1,2),new a('ais',-1,3),new a('erais',15,2),new a('ions',-1,1),new a('erions',17,2),new a('assions',17,3),new a('erons',-1,2),new a('ants',-1,3),new a('és',-1,2),new a('ait',-1,3),new a('erait',23,2),new a('ant',-1,3),new a('aIent',-1,3),new a('eraIent',26,2),new a('èrent',-1,2),new a('assent',-1,3),new a('eront',-1,2),new a('ât',-1,3),new a('ez',-1,2),new a('iez',32,2),new a('eriez',33,2),new a('assiez',33,3),new a('erez',32,2),new a('é',-1,2)]});g(b,'a_7',function(){return[new a('e',-1,3),new a('Ière',0,2),new a('ière',0,2),new a('ion',-1,1),new a('Ier',-1,2),new a('ier',-1,2),new a('ë',-1,4)]});g(b,'a_8',function(){return[new a('ell',-1,-1),new a('eill',-1,-1),new a('enn',-1,-1),new a('onn',-1,-1),new a('ett',-1,-1)]});g(b,'g_v',function(){return[17,65,16,1,0,0,0,0,0,0,0,0,0,0,0,128,130,103,8,5]});g(b,'g_keep_with_s',function(){return[1,65,20,0,0,0,0,0,0,0,0,0,0,0,0,0,128]});var q={'src/stemmer.jsx':{Stemmer:p},'src/french-stemmer.jsx':{FrenchStemmer:b}}}(JSX))
var Stemmer = JSX.require("src/french-stemmer.jsx").FrenchStemmer;
"""
@@ -213,5 +212,5 @@ class SearchFrench(SearchLanguage):
self.stemmer = snowballstemmer.stemmer('french')
def stem(self, word):
- # type: (unicode) -> unicode
+ # type: (str) -> str
return self.stemmer.stemWord(word.lower())
diff --git a/sphinx/search/hu.py b/sphinx/search/hu.py
index 03cbf8c29..973475cb3 100644
--- a/sphinx/search/hu.py
+++ b/sphinx/search/hu.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.search.hu
~~~~~~~~~~~~~~~~
@@ -18,7 +17,7 @@ if False:
from typing import Any # NOQA
-hungarian_stopwords = parse_stop_word(u'''
+hungarian_stopwords = parse_stop_word('''
| source: http://snowball.tartarus.org/algorithms/hungarian/stop.txt
| prepared by Anna Tordai
a
@@ -222,7 +221,7 @@ viszont
volna
''')
-js_stemmer = u"""
+js_stemmer = """
var JSX={};(function(h){function j(b,e){var a=function(){};a.prototype=e.prototype;var c=new a;for(var d in b){b[d].prototype=c}}function P(c,b){for(var a in b.prototype)if(b.prototype.hasOwnProperty(a))c.prototype[a]=b.prototype[a]}function e(a,b,d){function c(a,b,c){delete a[b];a[b]=c;return c}Object.defineProperty(a,b,{get:function(){return c(a,b,d())},set:function(d){c(a,b,d)},enumerable:true,configurable:true})}function O(a,b,c){return a[b]=a[b]/c|0}var u=parseInt;var v=parseFloat;function N(a){return a!==a}var x=isFinite;var y=encodeURIComponent;var z=decodeURIComponent;var B=encodeURI;var C=decodeURI;var E=Object.prototype.toString;var F=Object.prototype.hasOwnProperty;function i(){}h.require=function(b){var a=q[b];return a!==undefined?a:null};h.profilerIsRunning=function(){return i.getResults!=null};h.getProfileResults=function(){return(i.getResults||function(){return{}})()};h.postProfileResults=function(a,b){if(i.postResults==null)throw new Error('profiler has not been turned on');return i.postResults(a,b)};h.resetProfileResults=function(){if(i.resetResults==null)throw new Error('profiler has not been turned on');return i.resetResults()};h.DEBUG=false;function r(){};j([r],Error);function a(a,b,c){this.F=a.length;this.K=a;this.L=b;this.I=c;this.H=null;this.P=null};j([a],Object);function n(){};j([n],Object);function f(){var a;var b;var c;this.G={};a=this.D='';b=this._=0;c=this.A=a.length;this.E=0;this.B=b;this.C=c};j([f],n);function s(a,b){a.D=b.D;a._=b._;a.A=b.A;a.E=b.E;a.B=b.B;a.C=b.C};function k(b,d,c,e){var a;if(b._>=b.A){return false}a=b.D.charCodeAt(b._);if(a>e||a<c){return false}a-=c;if((d[a>>>3]&1<<(a&7))===0){return false}b._++;return true};function l(a,d,c,e){var b;if(a._>=a.A){return false}b=a.D.charCodeAt(a._);if(b>e||b<c){a._++;return true}b-=c;if((d[b>>>3]&1<<(b&7))===0){a._++;return true}return false};function o(f,m,p){var b;var d;var e;var n;var g;var k;var l;var i;var h;var c;var a;var j;var o;b=0;d=p;e=f._;n=f.A;g=0;k=0;l=false;while(true){i=b+(d-b>>>1);h=0;c=g<k?g:k;a=m[i];for(j=c;j<a.F;j++){if(e+c===n){h=-1;break}h=f.D.charCodeAt(e+c)-a.K.charCodeAt(j);if(h!==0){break}c++}if(h<0){d=i;k=c}else{b=i;g=c}if(d-b<=1){if(b>0){break}if(d===b){break}if(l){break}l=true}}while(true){a=m[b];if(g>=a.F){f._=e+a.F|0;if(a.H==null){return a.I}o=a.H(a.P);f._=e+a.F|0;if(o){return a.I}}b=a.L;if(b<0){return 0}}return-1};function d(d,m,p){var b;var g;var e;var n;var f;var k;var l;var i;var h;var c;var a;var j;var o;b=0;g=p;e=d._;n=d.E;f=0;k=0;l=false;while(true){i=b+(g-b>>1);h=0;c=f<k?f:k;a=m[i];for(j=a.F-1-c;j>=0;j--){if(e-c===n){h=-1;break}h=d.D.charCodeAt(e-1-c)-a.K.charCodeAt(j);if(h!==0){break}c++}if(h<0){g=i;k=c}else{b=i;f=c}if(g-b<=1){if(b>0){break}if(g===b){break}if(l){break}l=true}}while(true){a=m[b];if(f>=a.F){d._=e-a.F|0;if(a.H==null){return a.I}o=a.H(d);d._=e-a.F|0;if(o){return a.I}}b=a.L;if(b<0){return 0}}return-1};function A(a,b,d,e){var c;c=e.length-(d-b);a.D=a.D.slice(0,b)+e+a.D.slice(d);a.A+=c|0;if(a._>=d){a._+=c|0}else if(a._>b){a._=b}return c|0};function b(a,f){var b;var c;var d;var e;b=false;if((c=a.B)<0||c>(d=a.C)||d>(e=a.A)||e>a.D.length?false:true){A(a,a.B,a.C,f);b=true}return b};f.prototype.J=function(){return false};f.prototype.e=function(b){var a;var c;var d;var e;a=this.G['.'+b];if(a==null){c=this.D=b;d=this._=0;e=this.A=c.length;this.E=0;this.B=d;this.C=e;this.J();a=this.D;this.G['.'+b]=a}return a};f.prototype.stemWord=f.prototype.e;f.prototype.f=function(e){var d;var b;var c;var a;var f;var g;var h;d=[];for(b=0;b<e.length;b++){c=e[b];a=this.G['.'+c];if(a==null){f=this.D=c;g=this._=0;h=this.A=f.length;this.E=0;this.B=g;this.C=h;this.J();a=this.D;this.G['.'+c]=a}d.push(a)}return d};f.prototype.stemWords=f.prototype.f;function c(){f.call(this);this.I_p1=0};j([c],f);c.prototype.M=function(a){this.I_p1=a.I_p1;s(this,a)};c.prototype.copy_from=c.prototype.M;c.prototype.X=function(){var m;var b;var j;var d;var e;var a;var f;var g;var h;var n;var i;this.I_p1=this.A;d=true;b:while(d===true){d=false;m=this._;e=true;a:while(e===true){e=false;if(!k(this,c.g_v,97,252)){break a}c:while(true){b=this._;a=true;d:while(a===true){a=false;if(!l(this,c.g_v,97,252)){break d}this._=b;break c}n=this._=b;if(n>=this.A){break a}this._++}f=true;c:while(f===true){f=false;j=this._;g=true;d:while(g===true){g=false;if(o(this,c.a_0,8)===0){break d}break c}i=this._=j;if(i>=this.A){break a}this._++}this.I_p1=this._;break b}this._=m;if(!l(this,c.g_v,97,252)){return false}a:while(true){h=true;c:while(h===true){h=false;if(!k(this,c.g_v,97,252)){break c}break a}if(this._>=this.A){return false}this._++}this.I_p1=this._}return true};c.prototype.r_mark_regions=c.prototype.X;function D(a){var j;var d;var n;var e;var b;var f;var g;var h;var i;var p;var m;a.I_p1=a.A;e=true;b:while(e===true){e=false;j=a._;b=true;a:while(b===true){b=false;if(!k(a,c.g_v,97,252)){break a}c:while(true){d=a._;f=true;d:while(f===true){f=false;if(!l(a,c.g_v,97,252)){break d}a._=d;break c}p=a._=d;if(p>=a.A){break a}a._++}g=true;c:while(g===true){g=false;n=a._;h=true;d:while(h===true){h=false;if(o(a,c.a_0,8)===0){break d}break c}m=a._=n;if(m>=a.A){break a}a._++}a.I_p1=a._;break b}a._=j;if(!l(a,c.g_v,97,252)){return false}a:while(true){i=true;c:while(i===true){i=false;if(!k(a,c.g_v,97,252)){break c}break a}if(a._>=a.A){return false}a._++}a.I_p1=a._}return true};c.prototype.Q=function(){return!(this.I_p1<=this._)?false:true};c.prototype.r_R1=c.prototype.Q;c.prototype.d=function(){var a;var e;this.C=this._;a=d(this,c.a_1,2);if(a===0){return false}this.B=e=this._;if(!(!(this.I_p1<=e)?false:true)){return false}switch(a){case 0:return false;case 1:if(!b(this,'a')){return false}break;case 2:if(!b(this,'e')){return false}break}return true};c.prototype.r_v_ending=c.prototype.d;function p(a){var e;var f;a.C=a._;e=d(a,c.a_1,2);if(e===0){return false}a.B=f=a._;if(!(!(a.I_p1<=f)?false:true)){return false}switch(e){case 0:return false;case 1:if(!b(a,'a')){return false}break;case 2:if(!b(a,'e')){return false}break}return true};c.prototype.U=function(){var a;a=this.A-this._;if(d(this,c.a_2,23)===0){return false}this._=this.A-a;return true};c.prototype.r_double=c.prototype.U;function g(a){var b;b=a.A-a._;if(d(a,c.a_2,23)===0){return false}a._=a.A-b;return true};c.prototype.c=function(){var a;var c;var d;if(this._<=this.E){return false}this._--;this.C=c=this._;a=c-1|0;if(this.E>a||a>this.A){return false}d=this._=a;this.B=d;return!b(this,'')?false:true};c.prototype.r_undouble=c.prototype.c;function m(a){var c;var d;var e;if(a._<=a.E){return false}a._--;a.C=d=a._;c=d-1|0;if(a.E>c||c>a.A){return false}e=a._=c;a.B=e;return!b(a,'')?false:true};c.prototype.W=function(){var a;var e;this.C=this._;a=d(this,c.a_3,2);if(a===0){return false}this.B=e=this._;if(!(!(this.I_p1<=e)?false:true)){return false}switch(a){case 0:return false;case 1:if(!g(this)){return false}break;case 2:if(!g(this)){return false}break}return!b(this,'')?false:!m(this)?false:true};c.prototype.r_instrum=c.prototype.W;function H(a){var e;var f;a.C=a._;e=d(a,c.a_3,2);if(e===0){return false}a.B=f=a._;if(!(!(a.I_p1<=f)?false:true)){return false}switch(e){case 0:return false;case 1:if(!g(a)){return false}break;case 2:if(!g(a)){return false}break}return!b(a,'')?false:!m(a)?false:true};c.prototype.R=function(){var a;this.C=this._;if(d(this,c.a_4,44)===0){return false}this.B=a=this._;return!(!(this.I_p1<=a)?false:true)?false:!b(this,'')?false:!p(this)?false:true};c.prototype.r_case=c.prototype.R;function I(a){var e;a.C=a._;if(d(a,c.a_4,44)===0){return false}a.B=e=a._;return!(!(a.I_p1<=e)?false:true)?false:!b(a,'')?false:!p(a)?false:true};c.prototype.T=function(){var a;var e;this.C=this._;a=d(this,c.a_5,3);if(a===0){return false}this.B=e=this._;if(!(!(this.I_p1<=e)?false:true)){return false}switch(a){case 0:return false;case 1:if(!b(this,'e')){return false}break;case 2:if(!b(this,'a')){return false}break;case 3:if(!b(this,'a')){return false}break}return true};c.prototype.r_case_special=c.prototype.T;function J(a){var e;var f;a.C=a._;e=d(a,c.a_5,3);if(e===0){return false}a.B=f=a._;if(!(!(a.I_p1<=f)?false:true)){return false}switch(e){case 0:return false;case 1:if(!b(a,'e')){return false}break;case 2:if(!b(a,'a')){return false}break;case 3:if(!b(a,'a')){return false}break}return true};c.prototype.S=function(){var a;var e;this.C=this._;a=d(this,c.a_6,6);if(a===0){return false}this.B=e=this._;if(!(!(this.I_p1<=e)?false:true)){return false}switch(a){case 0:return false;case 1:if(!b(this,'')){return false}break;case 2:if(!b(this,'')){return false}break;case 3:if(!b(this,'a')){return false}break;case 4:if(!b(this,'e')){return false}break}return true};c.prototype.r_case_other=c.prototype.S;function K(a){var e;var f;a.C=a._;e=d(a,c.a_6,6);if(e===0){return false}a.B=f=a._;if(!(!(a.I_p1<=f)?false:true)){return false}switch(e){case 0:return false;case 1:if(!b(a,'')){return false}break;case 2:if(!b(a,'')){return false}break;case 3:if(!b(a,'a')){return false}break;case 4:if(!b(a,'e')){return false}break}return true};c.prototype.V=function(){var a;var e;this.C=this._;a=d(this,c.a_7,2);if(a===0){return false}this.B=e=this._;if(!(!(this.I_p1<=e)?false:true)){return false}switch(a){case 0:return false;case 1:if(!g(this)){return false}break;case 2:if(!g(this)){return false}break}return!b(this,'')?false:!m(this)?false:true};c.prototype.r_factive=c.prototype.V;function L(a){var e;var f;a.C=a._;e=d(a,c.a_7,2);if(e===0){return false}a.B=f=a._;if(!(!(a.I_p1<=f)?false:true)){return false}switch(e){case 0:return false;case 1:if(!g(a)){return false}break;case 2:if(!g(a)){return false}break}return!b(a,'')?false:!m(a)?false:true};c.prototype.a=function(){var a;var e;this.C=this._;a=d(this,c.a_8,7);if(a===0){return false}this.B=e=this._;if(!(!(this.I_p1<=e)?false:true)){return false}switch(a){case 0:return false;case 1:if(!b(this,'a')){return false}break;case 2:if(!b(this,'e')){return false}break;case 3:if(!b(this,'')){return false}break;case 4:if(!b(this,'')){return false}break;case 5:if(!b(this,'')){return false}break;case 6:if(!b(this,'')){return false}break;case 7:if(!b(this,'')){return false}break}return true};c.prototype.r_plural=c.prototype.a;function M(a){var e;var f;a.C=a._;e=d(a,c.a_8,7);if(e===0){return false}a.B=f=a._;if(!(!(a.I_p1<=f)?false:true)){return false}switch(e){case 0:return false;case 1:if(!b(a,'a')){return false}break;case 2:if(!b(a,'e')){return false}break;case 3:if(!b(a,'')){return false}break;case 4:if(!b(a,'')){return false}break;case 5:if(!b(a,'')){return false}break;case 6:if(!b(a,'')){return false}break;case 7:if(!b(a,'')){return false}break}return true};c.prototype.Y=function(){var a;var e;this.C=this._;a=d(this,c.a_9,12);if(a===0){return false}this.B=e=this._;if(!(!(this.I_p1<=e)?false:true)){return false}switch(a){case 0:return false;case 1:if(!b(this,'')){return false}break;case 2:if(!b(this,'e')){return false}break;case 3:if(!b(this,'a')){return false}break;case 4:if(!b(this,'')){return false}break;case 5:if(!b(this,'e')){return false}break;case 6:if(!b(this,'a')){return false}break;case 7:if(!b(this,'')){return false}break;case 8:if(!b(this,'e')){return false}break;case 9:if(!b(this,'')){return false}break}return true};c.prototype.r_owned=c.prototype.Y;function w(a){var e;var f;a.C=a._;e=d(a,c.a_9,12);if(e===0){return false}a.B=f=a._;if(!(!(a.I_p1<=f)?false:true)){return false}switch(e){case 0:return false;case 1:if(!b(a,'')){return false}break;case 2:if(!b(a,'e')){return false}break;case 3:if(!b(a,'a')){return false}break;case 4:if(!b(a,'')){return false}break;case 5:if(!b(a,'e')){return false}break;case 6:if(!b(a,'a')){return false}break;case 7:if(!b(a,'')){return false}break;case 8:if(!b(a,'e')){return false}break;case 9:if(!b(a,'')){return false}break}return true};c.prototype.b=function(){var a;var e;this.C=this._;a=d(this,c.a_10,31);if(a===0){return false}this.B=e=this._;if(!(!(this.I_p1<=e)?false:true)){return false}switch(a){case 0:return false;case 1:if(!b(this,'')){return false}break;case 2:if(!b(this,'a')){return false}break;case 3:if(!b(this,'e')){return false}break;case 4:if(!b(this,'')){return false}break;case 5:if(!b(this,'a')){return false}break;case 6:if(!b(this,'e')){return false}break;case 7:if(!b(this,'')){return false}break;case 8:if(!b(this,'')){return false}break;case 9:if(!b(this,'')){return false}break;case 10:if(!b(this,'a')){return false}break;case 11:if(!b(this,'e')){return false}break;case 12:if(!b(this,'')){return false}break;case 13:if(!b(this,'')){return false}break;case 14:if(!b(this,'a')){return false}break;case 15:if(!b(this,'e')){return false}break;case 16:if(!b(this,'')){return false}break;case 17:if(!b(this,'')){return false}break;case 18:if(!b(this,'')){return false}break;case 19:if(!b(this,'a')){return false}break;case 20:if(!b(this,'e')){return false}break}return true};c.prototype.r_sing_owner=c.prototype.b;function t(a){var e;var f;a.C=a._;e=d(a,c.a_10,31);if(e===0){return false}a.B=f=a._;if(!(!(a.I_p1<=f)?false:true)){return false}switch(e){case 0:return false;case 1:if(!b(a,'')){return false}break;case 2:if(!b(a,'a')){return false}break;case 3:if(!b(a,'e')){return false}break;case 4:if(!b(a,'')){return false}break;case 5:if(!b(a,'a')){return false}break;case 6:if(!b(a,'e')){return false}break;case 7:if(!b(a,'')){return false}break;case 8:if(!b(a,'')){return false}break;case 9:if(!b(a,'')){return false}break;case 10:if(!b(a,'a')){return false}break;case 11:if(!b(a,'e')){return false}break;case 12:if(!b(a,'')){return false}break;case 13:if(!b(a,'')){return false}break;case 14:if(!b(a,'a')){return false}break;case 15:if(!b(a,'e')){return false}break;case 16:if(!b(a,'')){return false}break;case 17:if(!b(a,'')){return false}break;case 18:if(!b(a,'')){return false}break;case 19:if(!b(a,'a')){return false}break;case 20:if(!b(a,'e')){return false}break}return true};c.prototype.Z=function(){var a;var e;this.C=this._;a=d(this,c.a_11,42);if(a===0){return false}this.B=e=this._;if(!(!(this.I_p1<=e)?false:true)){return false}switch(a){case 0:return false;case 1:if(!b(this,'')){return false}break;case 2:if(!b(this,'a')){return false}break;case 3:if(!b(this,'e')){return false}break;case 4:if(!b(this,'')){return false}break;case 5:if(!b(this,'')){return false}break;case 6:if(!b(this,'')){return false}break;case 7:if(!b(this,'a')){return false}break;case 8:if(!b(this,'e')){return false}break;case 9:if(!b(this,'')){return false}break;case 10:if(!b(this,'')){return false}break;case 11:if(!b(this,'')){return false}break;case 12:if(!b(this,'a')){return false}break;case 13:if(!b(this,'e')){return false}break;case 14:if(!b(this,'')){return false}break;case 15:if(!b(this,'')){return false}break;case 16:if(!b(this,'')){return false}break;case 17:if(!b(this,'')){return false}break;case 18:if(!b(this,'a')){return false}break;case 19:if(!b(this,'e')){return false}break;case 20:if(!b(this,'')){return false}break;case 21:if(!b(this,'')){return false}break;case 22:if(!b(this,'a')){return false}break;case 23:if(!b(this,'e')){return false}break;case 24:if(!b(this,'')){return false}break;case 25:if(!b(this,'')){return false}break;case 26:if(!b(this,'')){return false}break;case 27:if(!b(this,'a')){return false}break;case 28:if(!b(this,'e')){return false}break;case 29:if(!b(this,'')){return false}break}return true};c.prototype.r_plur_owner=c.prototype.Z;function G(a){var e;var f;a.C=a._;e=d(a,c.a_11,42);if(e===0){return false}a.B=f=a._;if(!(!(a.I_p1<=f)?false:true)){return false}switch(e){case 0:return false;case 1:if(!b(a,'')){return false}break;case 2:if(!b(a,'a')){return false}break;case 3:if(!b(a,'e')){return false}break;case 4:if(!b(a,'')){return false}break;case 5:if(!b(a,'')){return false}break;case 6:if(!b(a,'')){return false}break;case 7:if(!b(a,'a')){return false}break;case 8:if(!b(a,'e')){return false}break;case 9:if(!b(a,'')){return false}break;case 10:if(!b(a,'')){return false}break;case 11:if(!b(a,'')){return false}break;case 12:if(!b(a,'a')){return false}break;case 13:if(!b(a,'e')){return false}break;case 14:if(!b(a,'')){return false}break;case 15:if(!b(a,'')){return false}break;case 16:if(!b(a,'')){return false}break;case 17:if(!b(a,'')){return false}break;case 18:if(!b(a,'a')){return false}break;case 19:if(!b(a,'e')){return false}break;case 20:if(!b(a,'')){return false}break;case 21:if(!b(a,'')){return false}break;case 22:if(!b(a,'a')){return false}break;case 23:if(!b(a,'e')){return false}break;case 24:if(!b(a,'')){return false}break;case 25:if(!b(a,'')){return false}break;case 26:if(!b(a,'')){return false}break;case 27:if(!b(a,'a')){return false}break;case 28:if(!b(a,'e')){return false}break;case 29:if(!b(a,'')){return false}break}return true};c.prototype.J=function(){var s;var l;var m;var n;var o;var p;var q;var r;var u;var b;var c;var d;var e;var f;var g;var h;var i;var a;var j;var v;var x;var y;var z;var A;var B;var C;var E;var F;var N;var O;var P;var Q;var R;var S;var T;var k;s=this._;b=true;a:while(b===true){b=false;if(!D(this)){break a}}v=this._=s;this.E=v;y=this._=x=this.A;l=x-y;c=true;a:while(c===true){c=false;if(!H(this)){break a}}A=this._=(z=this.A)-l;m=z-A;d=true;a:while(d===true){d=false;if(!I(this)){break a}}C=this._=(B=this.A)-m;n=B-C;e=true;a:while(e===true){e=false;if(!J(this)){break a}}F=this._=(E=this.A)-n;o=E-F;f=true;a:while(f===true){f=false;if(!K(this)){break a}}O=this._=(N=this.A)-o;p=N-O;g=true;a:while(g===true){g=false;if(!L(this)){break a}}Q=this._=(P=this.A)-p;q=P-Q;h=true;a:while(h===true){h=false;if(!w(this)){break a}}S=this._=(R=this.A)-q;r=R-S;i=true;a:while(i===true){i=false;if(!t(this)){break a}}k=this._=(T=this.A)-r;u=T-k;a=true;a:while(a===true){a=false;if(!G(this)){break a}}this._=this.A-u;j=true;a:while(j===true){j=false;if(!M(this)){break a}}this._=this.E;return true};c.prototype.stem=c.prototype.J;c.prototype.N=function(a){return a instanceof c};c.prototype.equals=c.prototype.N;c.prototype.O=function(){var c;var a;var b;var d;c='HungarianStemmer';a=0;for(b=0;b<c.length;b++){d=c.charCodeAt(b);a=(a<<5)-a+d;a=a&a}return a|0};c.prototype.hashCode=c.prototype.O;c.serialVersionUID=1;e(c,'methodObject',function(){return new c});e(c,'a_0',function(){return[new a('cs',-1,-1),new a('dzs',-1,-1),new a('gy',-1,-1),new a('ly',-1,-1),new a('ny',-1,-1),new a('sz',-1,-1),new a('ty',-1,-1),new a('zs',-1,-1)]});e(c,'a_1',function(){return[new a('á',-1,1),new a('é',-1,2)]});e(c,'a_2',function(){return[new a('bb',-1,-1),new a('cc',-1,-1),new a('dd',-1,-1),new a('ff',-1,-1),new a('gg',-1,-1),new a('jj',-1,-1),new a('kk',-1,-1),new a('ll',-1,-1),new a('mm',-1,-1),new a('nn',-1,-1),new a('pp',-1,-1),new a('rr',-1,-1),new a('ccs',-1,-1),new a('ss',-1,-1),new a('zzs',-1,-1),new a('tt',-1,-1),new a('vv',-1,-1),new a('ggy',-1,-1),new a('lly',-1,-1),new a('nny',-1,-1),new a('tty',-1,-1),new a('ssz',-1,-1),new a('zz',-1,-1)]});e(c,'a_3',function(){return[new a('al',-1,1),new a('el',-1,2)]});e(c,'a_4',function(){return[new a('ba',-1,-1),new a('ra',-1,-1),new a('be',-1,-1),new a('re',-1,-1),new a('ig',-1,-1),new a('nak',-1,-1),new a('nek',-1,-1),new a('val',-1,-1),new a('vel',-1,-1),new a('ul',-1,-1),new a('nál',-1,-1),new a('nél',-1,-1),new a('ból',-1,-1),new a('ról',-1,-1),new a('tól',-1,-1),new a('bõl',-1,-1),new a('rõl',-1,-1),new a('tõl',-1,-1),new a('ül',-1,-1),new a('n',-1,-1),new a('an',19,-1),new a('ban',20,-1),new a('en',19,-1),new a('ben',22,-1),new a('képpen',22,-1),new a('on',19,-1),new a('ön',19,-1),new a('képp',-1,-1),new a('kor',-1,-1),new a('t',-1,-1),new a('at',29,-1),new a('et',29,-1),new a('ként',29,-1),new a('anként',32,-1),new a('enként',32,-1),new a('onként',32,-1),new a('ot',29,-1),new a('ért',29,-1),new a('öt',29,-1),new a('hez',-1,-1),new a('hoz',-1,-1),new a('höz',-1,-1),new a('vá',-1,-1),new a('vé',-1,-1)]});e(c,'a_5',function(){return[new a('án',-1,2),new a('én',-1,1),new a('ánként',-1,3)]});e(c,'a_6',function(){return[new a('stul',-1,2),new a('astul',0,1),new a('ástul',0,3),new a('stül',-1,2),new a('estül',3,1),new a('éstül',3,4)]});e(c,'a_7',function(){return[new a('á',-1,1),new a('é',-1,2)]});e(c,'a_8',function(){return[new a('k',-1,7),new a('ak',0,4),new a('ek',0,6),new a('ok',0,5),new a('ák',0,1),new a('ék',0,2),new a('ök',0,3)]});e(c,'a_9',function(){return[new a('éi',-1,7),new a('áéi',0,6),new a('ééi',0,5),new a('é',-1,9),new a('ké',3,4),new a('aké',4,1),new a('eké',4,1),new a('oké',4,1),new a('áké',4,3),new a('éké',4,2),new a('öké',4,1),new a('éé',3,8)]});e(c,'a_10',function(){return[new a('a',-1,18),new a('ja',0,17),new a('d',-1,16),new a('ad',2,13),new a('ed',2,13),new a('od',2,13),new a('ád',2,14),new a('éd',2,15),new a('öd',2,13),new a('e',-1,18),new a('je',9,17),new a('nk',-1,4),new a('unk',11,1),new a('ánk',11,2),new a('énk',11,3),new a('ünk',11,1),new a('uk',-1,8),new a('juk',16,7),new a('ájuk',17,5),new a('ük',-1,8),new a('jük',19,7),new a('éjük',20,6),new a('m',-1,12),new a('am',22,9),new a('em',22,9),new a('om',22,9),new a('ám',22,10),new a('ém',22,11),new a('o',-1,18),new a('á',-1,19),new a('é',-1,20)]});e(c,'a_11',function(){return[new a('id',-1,10),new a('aid',0,9),new a('jaid',1,6),new a('eid',0,9),new a('jeid',3,6),new a('áid',0,7),new a('éid',0,8),new a('i',-1,15),new a('ai',7,14),new a('jai',8,11),new a('ei',7,14),new a('jei',10,11),new a('ái',7,12),new a('éi',7,13),new a('itek',-1,24),new a('eitek',14,21),new a('jeitek',15,20),new a('éitek',14,23),new a('ik',-1,29),new a('aik',18,26),new a('jaik',19,25),new a('eik',18,26),new a('jeik',21,25),new a('áik',18,27),new a('éik',18,28),new a('ink',-1,20),new a('aink',25,17),new a('jaink',26,16),new a('eink',25,17),new a('jeink',28,16),new a('áink',25,18),new a('éink',25,19),new a('aitok',-1,21),new a('jaitok',32,20),new a('áitok',-1,22),new a('im',-1,5),new a('aim',35,4),new a('jaim',36,1),new a('eim',35,4),new a('jeim',38,1),new a('áim',35,2),new a('éim',35,3)]});e(c,'g_v',function(){return[17,65,16,0,0,0,0,0,0,0,0,0,0,0,0,0,1,17,52,14]});var q={'src/stemmer.jsx':{Stemmer:n},'src/hungarian-stemmer.jsx':{HungarianStemmer:c}}}(JSX))
var Stemmer = JSX.require("src/hungarian-stemmer.jsx").HungarianStemmer;
@@ -241,5 +240,5 @@ class SearchHungarian(SearchLanguage):
self.stemmer = snowballstemmer.stemmer('hungarian')
def stem(self, word):
- # type: (unicode) -> unicode
+ # type: (str) -> str
return self.stemmer.stemWord(word.lower())
diff --git a/sphinx/search/it.py b/sphinx/search/it.py
index 860ab9325..41039818b 100644
--- a/sphinx/search/it.py
+++ b/sphinx/search/it.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.search.it
~~~~~~~~~~~~~~~~
@@ -18,7 +17,7 @@ if False:
from typing import Any # NOQA
-italian_stopwords = parse_stop_word(u'''
+italian_stopwords = parse_stop_word('''
| source: http://snowball.tartarus.org/algorithms/italian/stop.txt
ad | a (to) before vowel
al | a + il
@@ -312,7 +311,7 @@ stessimo
stessero
''')
-js_stemmer = u"""
+js_stemmer = """
var JSX={};(function(k){function l(b,e){var a=function(){};a.prototype=e.prototype;var c=new a;for(var d in b){b[d].prototype=c}}function K(c,b){for(var a in b.prototype)if(b.prototype.hasOwnProperty(a))c.prototype[a]=b.prototype[a]}function e(a,b,d){function c(a,b,c){delete a[b];a[b]=c;return c}Object.defineProperty(a,b,{get:function(){return c(a,b,d())},set:function(d){c(a,b,d)},enumerable:true,configurable:true})}function L(a,b,c){return a[b]=a[b]/c|0}var r=parseInt;var B=parseFloat;function M(a){return a!==a}var z=isFinite;var y=encodeURIComponent;var x=decodeURIComponent;var w=encodeURI;var u=decodeURI;var t=Object.prototype.toString;var C=Object.prototype.hasOwnProperty;function j(){}k.require=function(b){var a=q[b];return a!==undefined?a:null};k.profilerIsRunning=function(){return j.getResults!=null};k.getProfileResults=function(){return(j.getResults||function(){return{}})()};k.postProfileResults=function(a,b){if(j.postResults==null)throw new Error('profiler has not been turned on');return j.postResults(a,b)};k.resetProfileResults=function(){if(j.resetResults==null)throw new Error('profiler has not been turned on');return j.resetResults()};k.DEBUG=false;function s(){};l([s],Error);function a(a,b,c){this.F=a.length;this.K=a;this.L=b;this.I=c;this.H=null;this.P=null};l([a],Object);function p(){};l([p],Object);function i(){var a;var b;var c;this.G={};a=this.E='';b=this._=0;c=this.A=a.length;this.D=0;this.C=b;this.B=c};l([i],p);function v(a,b){a.E=b.E;a._=b._;a.A=b.A;a.D=b.D;a.C=b.C;a.B=b.B};function d(b,d,c,e){var a;if(b._>=b.A){return false}a=b.E.charCodeAt(b._);if(a>e||a<c){return false}a-=c;if((d[a>>>3]&1<<(a&7))===0){return false}b._++;return true};function m(b,d,c,e){var a;if(b._<=b.D){return false}a=b.E.charCodeAt(b._-1);if(a>e||a<c){return false}a-=c;if((d[a>>>3]&1<<(a&7))===0){return false}b._--;return true};function h(a,d,c,e){var b;if(a._>=a.A){return false}b=a.E.charCodeAt(a._);if(b>e||b<c){a._++;return true}b-=c;if((d[b>>>3]&1<<(b&7))===0){a._++;return true}return false};function o(a,b,d){var c;if(a.A-a._<b){return false}if(a.E.slice(c=a._,c+b)!==d){return false}a._+=b;return true};function g(a,b,d){var c;if(a._-a.D<b){return false}if(a.E.slice((c=a._)-b,c)!==d){return false}a._-=b;return true};function n(f,m,p){var b;var d;var e;var n;var g;var k;var l;var i;var h;var c;var a;var j;var o;b=0;d=p;e=f._;n=f.A;g=0;k=0;l=false;while(true){i=b+(d-b>>>1);h=0;c=g<k?g:k;a=m[i];for(j=c;j<a.F;j++){if(e+c===n){h=-1;break}h=f.E.charCodeAt(e+c)-a.K.charCodeAt(j);if(h!==0){break}c++}if(h<0){d=i;k=c}else{b=i;g=c}if(d-b<=1){if(b>0){break}if(d===b){break}if(l){break}l=true}}while(true){a=m[b];if(g>=a.F){f._=e+a.F|0;if(a.H==null){return a.I}o=a.H(a.P);f._=e+a.F|0;if(o){return a.I}}b=a.L;if(b<0){return 0}}return-1};function f(d,m,p){var b;var g;var e;var n;var f;var k;var l;var i;var h;var c;var a;var j;var o;b=0;g=p;e=d._;n=d.D;f=0;k=0;l=false;while(true){i=b+(g-b>>1);h=0;c=f<k?f:k;a=m[i];for(j=a.F-1-c;j>=0;j--){if(e-c===n){h=-1;break}h=d.E.charCodeAt(e-1-c)-a.K.charCodeAt(j);if(h!==0){break}c++}if(h<0){g=i;k=c}else{b=i;f=c}if(g-b<=1){if(b>0){break}if(g===b){break}if(l){break}l=true}}while(true){a=m[b];if(f>=a.F){d._=e-a.F|0;if(a.H==null){return a.I}o=a.H(d);d._=e-a.F|0;if(o){return a.I}}b=a.L;if(b<0){return 0}}return-1};function D(a,b,d,e){var c;c=e.length-(d-b);a.E=a.E.slice(0,b)+e+a.E.slice(d);a.A+=c|0;if(a._>=d){a._+=c|0}else if(a._>b){a._=b}return c|0};function c(a,f){var b;var c;var d;var e;b=false;if((c=a.C)<0||c>(d=a.B)||d>(e=a.A)||e>a.E.length?false:true){D(a,a.C,a.B,f);b=true}return b};i.prototype.J=function(){return false};i.prototype.a=function(b){var a;var c;var d;var e;a=this.G['.'+b];if(a==null){c=this.E=b;d=this._=0;e=this.A=c.length;this.D=0;this.C=d;this.B=e;this.J();a=this.E;this.G['.'+b]=a}return a};i.prototype.stemWord=i.prototype.a;i.prototype.b=function(e){var d;var b;var c;var a;var f;var g;var h;d=[];for(b=0;b<e.length;b++){c=e[b];a=this.G['.'+c];if(a==null){f=this.E=c;g=this._=0;h=this.A=f.length;this.D=0;this.C=g;this.B=h;this.J();a=this.E;this.G['.'+c]=a}d.push(a)}return d};i.prototype.stemWords=i.prototype.b;function b(){i.call(this);this.I_p2=0;this.I_p1=0;this.I_pV=0};l([b],i);b.prototype.M=function(a){this.I_p2=a.I_p2;this.I_p1=a.I_p1;this.I_pV=a.I_pV;v(this,a)};b.prototype.copy_from=b.prototype.M;b.prototype.W=function(){var e;var p;var q;var l;var a;var k;var f;var g;var h;var i;var j;var m;p=this._;b:while(true){q=this._;f=true;a:while(f===true){f=false;this.C=this._;e=n(this,b.a_0,7);if(e===0){break a}this.B=this._;switch(e){case 0:break a;case 1:if(!c(this,'à')){return false}break;case 2:if(!c(this,'è')){return false}break;case 3:if(!c(this,'ì')){return false}break;case 4:if(!c(this,'ò')){return false}break;case 5:if(!c(this,'ù')){return false}break;case 6:if(!c(this,'qU')){return false}break;case 7:if(this._>=this.A){break a}this._++;break}continue b}this._=q;break b}this._=p;b:while(true){l=this._;g=true;d:while(g===true){g=false;e:while(true){a=this._;h=true;a:while(h===true){h=false;if(!d(this,b.g_v,97,249)){break a}this.C=this._;i=true;f:while(i===true){i=false;k=this._;j=true;c:while(j===true){j=false;if(!o(this,1,'u')){break c}this.B=this._;if(!d(this,b.g_v,97,249)){break c}if(!c(this,'U')){return false}break f}this._=k;if(!o(this,1,'i')){break a}this.B=this._;if(!d(this,b.g_v,97,249)){break a}if(!c(this,'I')){return false}}this._=a;break e}m=this._=a;if(m>=this.A){break d}this._++}continue b}this._=l;break b}return true};b.prototype.r_prelude=b.prototype.W;function G(a){var e;var q;var r;var m;var f;var l;var g;var h;var i;var j;var k;var p;q=a._;b:while(true){r=a._;g=true;a:while(g===true){g=false;a.C=a._;e=n(a,b.a_0,7);if(e===0){break a}a.B=a._;switch(e){case 0:break a;case 1:if(!c(a,'à')){return false}break;case 2:if(!c(a,'è')){return false}break;case 3:if(!c(a,'ì')){return false}break;case 4:if(!c(a,'ò')){return false}break;case 5:if(!c(a,'ù')){return false}break;case 6:if(!c(a,'qU')){return false}break;case 7:if(a._>=a.A){break a}a._++;break}continue b}a._=r;break b}a._=q;b:while(true){m=a._;h=true;d:while(h===true){h=false;e:while(true){f=a._;i=true;a:while(i===true){i=false;if(!d(a,b.g_v,97,249)){break a}a.C=a._;j=true;f:while(j===true){j=false;l=a._;k=true;c:while(k===true){k=false;if(!o(a,1,'u')){break c}a.B=a._;if(!d(a,b.g_v,97,249)){break c}if(!c(a,'U')){return false}break f}a._=l;if(!o(a,1,'i')){break a}a.B=a._;if(!d(a,b.g_v,97,249)){break a}if(!c(a,'I')){return false}}a._=f;break e}p=a._=f;if(p>=a.A){break d}a._++}continue b}a._=m;break b}return true};b.prototype.U=function(){var u;var w;var x;var y;var t;var l;var e;var f;var g;var i;var c;var j;var k;var a;var m;var n;var o;var p;var q;var r;var s;var v;this.I_pV=s=this.A;this.I_p1=s;this.I_p2=s;u=this._;l=true;a:while(l===true){l=false;e=true;g:while(e===true){e=false;w=this._;f=true;b:while(f===true){f=false;if(!d(this,b.g_v,97,249)){break b}g=true;f:while(g===true){g=false;x=this._;i=true;c:while(i===true){i=false;if(!h(this,b.g_v,97,249)){break c}d:while(true){c=true;e:while(c===true){c=false;if(!d(this,b.g_v,97,249)){break e}break d}if(this._>=this.A){break c}this._++}break f}this._=x;if(!d(this,b.g_v,97,249)){break b}c:while(true){j=true;d:while(j===true){j=false;if(!h(this,b.g_v,97,249)){break d}break c}if(this._>=this.A){break b}this._++}}break g}this._=w;if(!h(this,b.g_v,97,249)){break a}k=true;c:while(k===true){k=false;y=this._;a=true;b:while(a===true){a=false;if(!h(this,b.g_v,97,249)){break b}e:while(true){m=true;d:while(m===true){m=false;if(!d(this,b.g_v,97,249)){break d}break e}if(this._>=this.A){break b}this._++}break c}this._=y;if(!d(this,b.g_v,97,249)){break a}if(this._>=this.A){break a}this._++}}this.I_pV=this._}v=this._=u;t=v;n=true;a:while(n===true){n=false;b:while(true){o=true;c:while(o===true){o=false;if(!d(this,b.g_v,97,249)){break c}break b}if(this._>=this.A){break a}this._++}b:while(true){p=true;c:while(p===true){p=false;if(!h(this,b.g_v,97,249)){break c}break b}if(this._>=this.A){break a}this._++}this.I_p1=this._;b:while(true){q=true;c:while(q===true){q=false;if(!d(this,b.g_v,97,249)){break c}break b}if(this._>=this.A){break a}this._++}c:while(true){r=true;b:while(r===true){r=false;if(!h(this,b.g_v,97,249)){break b}break c}if(this._>=this.A){break a}this._++}this.I_p2=this._}this._=t;return true};b.prototype.r_mark_regions=b.prototype.U;function H(a){var x;var y;var z;var u;var v;var l;var e;var f;var g;var i;var j;var k;var c;var m;var n;var o;var p;var q;var r;var s;var t;var w;a.I_pV=t=a.A;a.I_p1=t;a.I_p2=t;x=a._;l=true;a:while(l===true){l=false;e=true;g:while(e===true){e=false;y=a._;f=true;b:while(f===true){f=false;if(!d(a,b.g_v,97,249)){break b}g=true;f:while(g===true){g=false;z=a._;i=true;c:while(i===true){i=false;if(!h(a,b.g_v,97,249)){break c}d:while(true){j=true;e:while(j===true){j=false;if(!d(a,b.g_v,97,249)){break e}break d}if(a._>=a.A){break c}a._++}break f}a._=z;if(!d(a,b.g_v,97,249)){break b}c:while(true){k=true;d:while(k===true){k=false;if(!h(a,b.g_v,97,249)){break d}break c}if(a._>=a.A){break b}a._++}}break g}a._=y;if(!h(a,b.g_v,97,249)){break a}c=true;c:while(c===true){c=false;u=a._;m=true;b:while(m===true){m=false;if(!h(a,b.g_v,97,249)){break b}e:while(true){n=true;d:while(n===true){n=false;if(!d(a,b.g_v,97,249)){break d}break e}if(a._>=a.A){break b}a._++}break c}a._=u;if(!d(a,b.g_v,97,249)){break a}if(a._>=a.A){break a}a._++}}a.I_pV=a._}w=a._=x;v=w;o=true;a:while(o===true){o=false;b:while(true){p=true;c:while(p===true){p=false;if(!d(a,b.g_v,97,249)){break c}break b}if(a._>=a.A){break a}a._++}b:while(true){q=true;c:while(q===true){q=false;if(!h(a,b.g_v,97,249)){break c}break b}if(a._>=a.A){break a}a._++}a.I_p1=a._;b:while(true){r=true;c:while(r===true){r=false;if(!d(a,b.g_v,97,249)){break c}break b}if(a._>=a.A){break a}a._++}c:while(true){s=true;b:while(s===true){s=false;if(!h(a,b.g_v,97,249)){break b}break c}if(a._>=a.A){break a}a._++}a.I_p2=a._}a._=v;return true};b.prototype.V=function(){var a;var e;var d;b:while(true){e=this._;d=true;a:while(d===true){d=false;this.C=this._;a=n(this,b.a_1,3);if(a===0){break a}this.B=this._;switch(a){case 0:break a;case 1:if(!c(this,'i')){return false}break;case 2:if(!c(this,'u')){return false}break;case 3:if(this._>=this.A){break a}this._++;break}continue b}this._=e;break b}return true};b.prototype.r_postlude=b.prototype.V;function I(a){var d;var f;var e;b:while(true){f=a._;e=true;a:while(e===true){e=false;a.C=a._;d=n(a,b.a_1,3);if(d===0){break a}a.B=a._;switch(d){case 0:break a;case 1:if(!c(a,'i')){return false}break;case 2:if(!c(a,'u')){return false}break;case 3:if(a._>=a.A){break a}a._++;break}continue b}a._=f;break b}return true};b.prototype.S=function(){return!(this.I_pV<=this._)?false:true};b.prototype.r_RV=b.prototype.S;b.prototype.Q=function(){return!(this.I_p1<=this._)?false:true};b.prototype.r_R1=b.prototype.Q;b.prototype.R=function(){return!(this.I_p2<=this._)?false:true};b.prototype.r_R2=b.prototype.R;b.prototype.T=function(){var a;this.B=this._;if(f(this,b.a_2,37)===0){return false}this.C=this._;a=f(this,b.a_3,5);if(a===0){return false}if(!(!(this.I_pV<=this._)?false:true)){return false}switch(a){case 0:return false;case 1:if(!c(this,'')){return false}break;case 2:if(!c(this,'e')){return false}break}return true};b.prototype.r_attached_pronoun=b.prototype.T;function J(a){var d;a.B=a._;if(f(a,b.a_2,37)===0){return false}a.C=a._;d=f(a,b.a_3,5);if(d===0){return false}if(!(!(a.I_pV<=a._)?false:true)){return false}switch(d){case 0:return false;case 1:if(!c(a,'')){return false}break;case 2:if(!c(a,'e')){return false}break}return true};b.prototype.X=function(){var a;var j;var d;var h;var e;var k;var i;var l;var m;var o;var p;var q;var r;var n;this.B=this._;a=f(this,b.a_6,51);if(a===0){return false}this.C=this._;switch(a){case 0:return false;case 1:if(!(!(this.I_p2<=this._)?false:true)){return false}if(!c(this,'')){return false}break;case 2:if(!(!(this.I_p2<=this._)?false:true)){return false}if(!c(this,'')){return false}j=this.A-this._;k=true;a:while(k===true){k=false;this.B=this._;if(!g(this,2,'ic')){this._=this.A-j;break a}this.C=o=this._;if(!(!(this.I_p2<=o)?false:true)){this._=this.A-j;break a}if(!c(this,'')){return false}}break;case 3:if(!(!(this.I_p2<=this._)?false:true)){return false}if(!c(this,'log')){return false}break;case 4:if(!(!(this.I_p2<=this._)?false:true)){return false}if(!c(this,'u')){return false}break;case 5:if(!(!(this.I_p2<=this._)?false:true)){return false}if(!c(this,'ente')){return false}break;case 6:if(!(!(this.I_pV<=this._)?false:true)){return false}if(!c(this,'')){return false}break;case 7:if(!(!(this.I_p1<=this._)?false:true)){return false}if(!c(this,'')){return false}d=this.A-this._;i=true;a:while(i===true){i=false;this.B=this._;a=f(this,b.a_4,4);if(a===0){this._=this.A-d;break a}this.C=p=this._;if(!(!(this.I_p2<=p)?false:true)){this._=this.A-d;break a}if(!c(this,'')){return false}switch(a){case 0:this._=this.A-d;break a;case 1:this.B=this._;if(!g(this,2,'at')){this._=this.A-d;break a}this.C=q=this._;if(!(!(this.I_p2<=q)?false:true)){this._=this.A-d;break a}if(!c(this,'')){return false}break}}break;case 8:if(!(!(this.I_p2<=this._)?false:true)){return false}if(!c(this,'')){return false}h=this.A-this._;l=true;a:while(l===true){l=false;this.B=this._;a=f(this,b.a_5,3);if(a===0){this._=this.A-h;break a}this.C=this._;switch(a){case 0:this._=this.A-h;break a;case 1:if(!(!(this.I_p2<=this._)?false:true)){this._=this.A-h;break a}if(!c(this,'')){return false}break}}break;case 9:if(!(!(this.I_p2<=this._)?false:true)){return false}if(!c(this,'')){return false}e=this.A-this._;m=true;a:while(m===true){m=false;this.B=this._;if(!g(this,2,'at')){this._=this.A-e;break a}this.C=r=this._;if(!(!(this.I_p2<=r)?false:true)){this._=this.A-e;break a}if(!c(this,'')){return false}this.B=this._;if(!g(this,2,'ic')){this._=this.A-e;break a}this.C=n=this._;if(!(!(this.I_p2<=n)?false:true)){this._=this.A-e;break a}if(!c(this,'')){return false}}break}return true};b.prototype.r_standard_suffix=b.prototype.X;function F(a){var d;var k;var e;var i;var h;var l;var j;var m;var n;var p;var q;var r;var s;var o;a.B=a._;d=f(a,b.a_6,51);if(d===0){return false}a.C=a._;switch(d){case 0:return false;case 1:if(!(!(a.I_p2<=a._)?false:true)){return false}if(!c(a,'')){return false}break;case 2:if(!(!(a.I_p2<=a._)?false:true)){return false}if(!c(a,'')){return false}k=a.A-a._;l=true;a:while(l===true){l=false;a.B=a._;if(!g(a,2,'ic')){a._=a.A-k;break a}a.C=p=a._;if(!(!(a.I_p2<=p)?false:true)){a._=a.A-k;break a}if(!c(a,'')){return false}}break;case 3:if(!(!(a.I_p2<=a._)?false:true)){return false}if(!c(a,'log')){return false}break;case 4:if(!(!(a.I_p2<=a._)?false:true)){return false}if(!c(a,'u')){return false}break;case 5:if(!(!(a.I_p2<=a._)?false:true)){return false}if(!c(a,'ente')){return false}break;case 6:if(!(!(a.I_pV<=a._)?false:true)){return false}if(!c(a,'')){return false}break;case 7:if(!(!(a.I_p1<=a._)?false:true)){return false}if(!c(a,'')){return false}e=a.A-a._;j=true;a:while(j===true){j=false;a.B=a._;d=f(a,b.a_4,4);if(d===0){a._=a.A-e;break a}a.C=q=a._;if(!(!(a.I_p2<=q)?false:true)){a._=a.A-e;break a}if(!c(a,'')){return false}switch(d){case 0:a._=a.A-e;break a;case 1:a.B=a._;if(!g(a,2,'at')){a._=a.A-e;break a}a.C=r=a._;if(!(!(a.I_p2<=r)?false:true)){a._=a.A-e;break a}if(!c(a,'')){return false}break}}break;case 8:if(!(!(a.I_p2<=a._)?false:true)){return false}if(!c(a,'')){return false}i=a.A-a._;m=true;a:while(m===true){m=false;a.B=a._;d=f(a,b.a_5,3);if(d===0){a._=a.A-i;break a}a.C=a._;switch(d){case 0:a._=a.A-i;break a;case 1:if(!(!(a.I_p2<=a._)?false:true)){a._=a.A-i;break a}if(!c(a,'')){return false}break}}break;case 9:if(!(!(a.I_p2<=a._)?false:true)){return false}if(!c(a,'')){return false}h=a.A-a._;n=true;a:while(n===true){n=false;a.B=a._;if(!g(a,2,'at')){a._=a.A-h;break a}a.C=s=a._;if(!(!(a.I_p2<=s)?false:true)){a._=a.A-h;break a}if(!c(a,'')){return false}a.B=a._;if(!g(a,2,'ic')){a._=a.A-h;break a}a.C=o=a._;if(!(!(a.I_p2<=o)?false:true)){a._=a.A-h;break a}if(!c(a,'')){return false}}break}return true};b.prototype.Y=function(){var d;var e;var a;var g;var h;var i;e=this.A-(g=this._);if(g<this.I_pV){return false}h=this._=this.I_pV;a=this.D;this.D=h;i=this._=this.A-e;this.B=i;d=f(this,b.a_7,87);if(d===0){this.D=a;return false}this.C=this._;switch(d){case 0:this.D=a;return false;case 1:if(!c(this,'')){return false}break}this.D=a;return true};b.prototype.r_verb_suffix=b.prototype.Y;function E(a){var e;var g;var d;var h;var i;var j;g=a.A-(h=a._);if(h<a.I_pV){return false}i=a._=a.I_pV;d=a.D;a.D=i;j=a._=a.A-g;a.B=j;e=f(a,b.a_7,87);if(e===0){a.D=d;return false}a.C=a._;switch(e){case 0:a.D=d;return false;case 1:if(!c(a,'')){return false}break}a.D=d;return true};b.prototype.Z=function(){var a;var d;var e;var f;var h;var i;a=this.A-this._;e=true;a:while(e===true){e=false;this.B=this._;if(!m(this,b.g_AEIO,97,242)){this._=this.A-a;break a}this.C=h=this._;if(!(!(this.I_pV<=h)?false:true)){this._=this.A-a;break a}if(!c(this,'')){return false}this.B=this._;if(!g(this,1,'i')){this._=this.A-a;break a}this.C=i=this._;if(!(!(this.I_pV<=i)?false:true)){this._=this.A-a;break a}if(!c(this,'')){return false}}d=this.A-this._;f=true;a:while(f===true){f=false;this.B=this._;if(!g(this,1,'h')){this._=this.A-d;break a}this.C=this._;if(!m(this,b.g_CG,99,103)){this._=this.A-d;break a}if(!(!(this.I_pV<=this._)?false:true)){this._=this.A-d;break a}if(!c(this,'')){return false}}return true};b.prototype.r_vowel_suffix=b.prototype.Z;function A(a){var d;var e;var f;var h;var i;var j;d=a.A-a._;f=true;a:while(f===true){f=false;a.B=a._;if(!m(a,b.g_AEIO,97,242)){a._=a.A-d;break a}a.C=i=a._;if(!(!(a.I_pV<=i)?false:true)){a._=a.A-d;break a}if(!c(a,'')){return false}a.B=a._;if(!g(a,1,'i')){a._=a.A-d;break a}a.C=j=a._;if(!(!(a.I_pV<=j)?false:true)){a._=a.A-d;break a}if(!c(a,'')){return false}}e=a.A-a._;h=true;a:while(h===true){h=false;a.B=a._;if(!g(a,1,'h')){a._=a.A-e;break a}a.C=a._;if(!m(a,b.g_CG,99,103)){a._=a.A-e;break a}if(!(!(a.I_pV<=a._)?false:true)){a._=a.A-e;break a}if(!c(a,'')){return false}}return true};b.prototype.J=function(){var l;var i;var j;var k;var m;var n;var b;var c;var d;var e;var a;var f;var g;var h;var p;var q;var r;var s;var t;var u;var o;l=this._;b=true;a:while(b===true){b=false;if(!G(this)){break a}}p=this._=l;i=p;c=true;a:while(c===true){c=false;if(!H(this)){break a}}q=this._=i;this.D=q;s=this._=r=this.A;j=r-s;d=true;a:while(d===true){d=false;if(!J(this)){break a}}u=this._=(t=this.A)-j;k=t-u;e=true;a:while(e===true){e=false;a=true;b:while(a===true){a=false;m=this.A-this._;f=true;c:while(f===true){f=false;if(!F(this)){break c}break b}this._=this.A-m;if(!E(this)){break a}}}this._=this.A-k;g=true;a:while(g===true){g=false;if(!A(this)){break a}}o=this._=this.D;n=o;h=true;a:while(h===true){h=false;if(!I(this)){break a}}this._=n;return true};b.prototype.stem=b.prototype.J;b.prototype.N=function(a){return a instanceof b};b.prototype.equals=b.prototype.N;b.prototype.O=function(){var c;var a;var b;var d;c='ItalianStemmer';a=0;for(b=0;b<c.length;b++){d=c.charCodeAt(b);a=(a<<5)-a+d;a=a&a}return a|0};b.prototype.hashCode=b.prototype.O;b.serialVersionUID=1;e(b,'methodObject',function(){return new b});e(b,'a_0',function(){return[new a('',-1,7),new a('qu',0,6),new a('á',0,1),new a('é',0,2),new a('í',0,3),new a('ó',0,4),new a('ú',0,5)]});e(b,'a_1',function(){return[new a('',-1,3),new a('I',0,1),new a('U',0,2)]});e(b,'a_2',function(){return[new a('la',-1,-1),new a('cela',0,-1),new a('gliela',0,-1),new a('mela',0,-1),new a('tela',0,-1),new a('vela',0,-1),new a('le',-1,-1),new a('cele',6,-1),new a('gliele',6,-1),new a('mele',6,-1),new a('tele',6,-1),new a('vele',6,-1),new a('ne',-1,-1),new a('cene',12,-1),new a('gliene',12,-1),new a('mene',12,-1),new a('sene',12,-1),new a('tene',12,-1),new a('vene',12,-1),new a('ci',-1,-1),new a('li',-1,-1),new a('celi',20,-1),new a('glieli',20,-1),new a('meli',20,-1),new a('teli',20,-1),new a('veli',20,-1),new a('gli',20,-1),new a('mi',-1,-1),new a('si',-1,-1),new a('ti',-1,-1),new a('vi',-1,-1),new a('lo',-1,-1),new a('celo',31,-1),new a('glielo',31,-1),new a('melo',31,-1),new a('telo',31,-1),new a('velo',31,-1)]});e(b,'a_3',function(){return[new a('ando',-1,1),new a('endo',-1,1),new a('ar',-1,2),new a('er',-1,2),new a('ir',-1,2)]});e(b,'a_4',function(){return[new a('ic',-1,-1),new a('abil',-1,-1),new a('os',-1,-1),new a('iv',-1,1)]});e(b,'a_5',function(){return[new a('ic',-1,1),new a('abil',-1,1),new a('iv',-1,1)]});e(b,'a_6',function(){return[new a('ica',-1,1),new a('logia',-1,3),new a('osa',-1,1),new a('ista',-1,1),new a('iva',-1,9),new a('anza',-1,1),new a('enza',-1,5),new a('ice',-1,1),new a('atrice',7,1),new a('iche',-1,1),new a('logie',-1,3),new a('abile',-1,1),new a('ibile',-1,1),new a('usione',-1,4),new a('azione',-1,2),new a('uzione',-1,4),new a('atore',-1,2),new a('ose',-1,1),new a('ante',-1,1),new a('mente',-1,1),new a('amente',19,7),new a('iste',-1,1),new a('ive',-1,9),new a('anze',-1,1),new a('enze',-1,5),new a('ici',-1,1),new a('atrici',25,1),new a('ichi',-1,1),new a('abili',-1,1),new a('ibili',-1,1),new a('ismi',-1,1),new a('usioni',-1,4),new a('azioni',-1,2),new a('uzioni',-1,4),new a('atori',-1,2),new a('osi',-1,1),new a('anti',-1,1),new a('amenti',-1,6),new a('imenti',-1,6),new a('isti',-1,1),new a('ivi',-1,9),new a('ico',-1,1),new a('ismo',-1,1),new a('oso',-1,1),new a('amento',-1,6),new a('imento',-1,6),new a('ivo',-1,9),new a('ità',-1,8),new a('istà',-1,1),new a('istè',-1,1),new a('istì',-1,1)]});e(b,'a_7',function(){return[new a('isca',-1,1),new a('enda',-1,1),new a('ata',-1,1),new a('ita',-1,1),new a('uta',-1,1),new a('ava',-1,1),new a('eva',-1,1),new a('iva',-1,1),new a('erebbe',-1,1),new a('irebbe',-1,1),new a('isce',-1,1),new a('ende',-1,1),new a('are',-1,1),new a('ere',-1,1),new a('ire',-1,1),new a('asse',-1,1),new a('ate',-1,1),new a('avate',16,1),new a('evate',16,1),new a('ivate',16,1),new a('ete',-1,1),new a('erete',20,1),new a('irete',20,1),new a('ite',-1,1),new a('ereste',-1,1),new a('ireste',-1,1),new a('ute',-1,1),new a('erai',-1,1),new a('irai',-1,1),new a('isci',-1,1),new a('endi',-1,1),new a('erei',-1,1),new a('irei',-1,1),new a('assi',-1,1),new a('ati',-1,1),new a('iti',-1,1),new a('eresti',-1,1),new a('iresti',-1,1),new a('uti',-1,1),new a('avi',-1,1),new a('evi',-1,1),new a('ivi',-1,1),new a('isco',-1,1),new a('ando',-1,1),new a('endo',-1,1),new a('Yamo',-1,1),new a('iamo',-1,1),new a('avamo',-1,1),new a('evamo',-1,1),new a('ivamo',-1,1),new a('eremo',-1,1),new a('iremo',-1,1),new a('assimo',-1,1),new a('ammo',-1,1),new a('emmo',-1,1),new a('eremmo',54,1),new a('iremmo',54,1),new a('immo',-1,1),new a('ano',-1,1),new a('iscano',58,1),new a('avano',58,1),new a('evano',58,1),new a('ivano',58,1),new a('eranno',-1,1),new a('iranno',-1,1),new a('ono',-1,1),new a('iscono',65,1),new a('arono',65,1),new a('erono',65,1),new a('irono',65,1),new a('erebbero',-1,1),new a('irebbero',-1,1),new a('assero',-1,1),new a('essero',-1,1),new a('issero',-1,1),new a('ato',-1,1),new a('ito',-1,1),new a('uto',-1,1),new a('avo',-1,1),new a('evo',-1,1),new a('ivo',-1,1),new a('ar',-1,1),new a('ir',-1,1),new a('erà',-1,1),new a('irà',-1,1),new a('erò',-1,1),new a('irò',-1,1)]});e(b,'g_v',function(){return[17,65,16,0,0,0,0,0,0,0,0,0,0,0,0,128,128,8,2,1]});e(b,'g_AEIO',function(){return[17,65,0,0,0,0,0,0,0,0,0,0,0,0,0,128,128,8,2]});e(b,'g_CG',function(){return[17]});var q={'src/stemmer.jsx':{Stemmer:p},'src/italian-stemmer.jsx':{ItalianStemmer:b}}}(JSX))
var Stemmer = JSX.require("src/italian-stemmer.jsx").ItalianStemmer;
"""
@@ -330,5 +329,5 @@ class SearchItalian(SearchLanguage):
self.stemmer = snowballstemmer.stemmer('italian')
def stem(self, word):
- # type: (unicode) -> unicode
+ # type: (str) -> str
return self.stemmer.stemWord(word.lower())
diff --git a/sphinx/search/ja.py b/sphinx/search/ja.py
index e1fcaa920..829cc424d 100644
--- a/sphinx/search/ja.py
+++ b/sphinx/search/ja.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.search.ja
~~~~~~~~~~~~~~~~
@@ -22,8 +21,6 @@ import re
import sys
import warnings
-from six import iteritems, PY3
-
try:
import MeCab
native_module = True
@@ -46,14 +43,14 @@ if False:
from typing import Any, Dict, List # NOQA
-class BaseSplitter(object):
+class BaseSplitter:
def __init__(self, options):
# type: (Dict) -> None
self.options = options
def split(self, input):
- # type: (unicode) -> List[unicode]
+ # type: (str) -> List[str]
"""
:param str input:
@@ -66,7 +63,7 @@ class BaseSplitter(object):
class MecabSplitter(BaseSplitter):
def __init__(self, options):
# type: (Dict) -> None
- super(MecabSplitter, self).__init__(options)
+ super().__init__(options)
self.ctypes_libmecab = None # type: Any
self.ctypes_mecab = None # type: Any
if not native_module:
@@ -76,17 +73,13 @@ class MecabSplitter(BaseSplitter):
self.dict_encode = options.get('dic_enc', 'utf-8')
def split(self, input):
- # type: (unicode) -> List[unicode]
- input2 = input if PY3 else input.encode(self.dict_encode)
+ # type: (str) -> List[str]
if native_module:
- result = self.native.parse(input2)
+ result = self.native.parse(input)
else:
result = self.ctypes_libmecab.mecab_sparse_tostr(
self.ctypes_mecab, input.encode(self.dict_encode))
- if PY3:
- return result.split(' ')
- else:
- return result.decode(self.dict_encode).split(' ')
+ return result.split(' ')
def init_native(self, options):
# type: (Dict) -> None
@@ -144,7 +137,7 @@ MeCabBinder = MecabSplitter # keep backward compatibility until Sphinx-1.6
class JanomeSplitter(BaseSplitter):
def __init__(self, options):
# type: (Dict) -> None
- super(JanomeSplitter, self).__init__(options)
+ super().__init__(options)
self.user_dict = options.get('user_dic')
self.user_dict_enc = options.get('user_dic_enc', 'utf8')
self.init_tokenizer()
@@ -156,318 +149,318 @@ class JanomeSplitter(BaseSplitter):
self.tokenizer = janome.tokenizer.Tokenizer(udic=self.user_dict, udic_enc=self.user_dict_enc)
def split(self, input):
- # type: (unicode) -> List[unicode]
- result = u' '.join(token.surface for token in self.tokenizer.tokenize(input))
- return result.split(u' ')
+ # type: (str) -> List[str]
+ result = ' '.join(token.surface for token in self.tokenizer.tokenize(input))
+ return result.split(' ')
class DefaultSplitter(BaseSplitter):
- patterns_ = dict([(re.compile(pattern), value) for pattern, value in iteritems({
- u'[一二三四五六七八九十百千万億兆]': u'M',
- u'[一-龠々〆ヵヶ]': u'H',
- u'[ぁ-ん]': u'I',
- u'[ァ-ヴーア-ン゙ー]': u'K',
- u'[a-zA-Za-zA-Z]': u'A',
- u'[0-90-9]': u'N',
- })])
+ patterns_ = dict([(re.compile(pattern), value) for pattern, value in {
+ '[一二三四五六七八九十百千万億兆]': 'M',
+ '[一-龠々〆ヵヶ]': 'H',
+ '[ぁ-ん]': 'I',
+ '[ァ-ヴーア-ン゙ー]': 'K',
+ '[a-zA-Za-zA-Z]': 'A',
+ '[0-90-9]': 'N',
+ }.items()])
BIAS__ = -332
- BC1__ = {u'HH': 6, u'II': 2461, u'KH': 406, u'OH': -1378}
- BC2__ = {u'AA': -3267, u'AI': 2744, u'AN': -878, u'HH': -4070, u'HM': -1711,
- u'HN': 4012, u'HO': 3761, u'IA': 1327, u'IH': -1184, u'II': -1332,
- u'IK': 1721, u'IO': 5492, u'KI': 3831, u'KK': -8741, u'MH': -3132,
- u'MK': 3334, u'OO': -2920}
- BC3__ = {u'HH': 996, u'HI': 626, u'HK': -721, u'HN': -1307, u'HO': -836, u'IH': -301,
- u'KK': 2762, u'MK': 1079, u'MM': 4034, u'OA': -1652, u'OH': 266}
- BP1__ = {u'BB': 295, u'OB': 304, u'OO': -125, u'UB': 352}
- BP2__ = {u'BO': 60, u'OO': -1762}
- BQ1__ = {u'BHH': 1150, u'BHM': 1521, u'BII': -1158, u'BIM': 886, u'BMH': 1208,
- u'BNH': 449, u'BOH': -91, u'BOO': -2597, u'OHI': 451, u'OIH': -296,
- u'OKA': 1851, u'OKH': -1020, u'OKK': 904, u'OOO': 2965}
- BQ2__ = {u'BHH': 118, u'BHI': -1159, u'BHM': 466, u'BIH': -919, u'BKK': -1720,
- u'BKO': 864, u'OHH': -1139, u'OHM': -181, u'OIH': 153, u'UHI': -1146}
- BQ3__ = {u'BHH': -792, u'BHI': 2664, u'BII': -299, u'BKI': 419, u'BMH': 937,
- u'BMM': 8335, u'BNN': 998, u'BOH': 775, u'OHH': 2174, u'OHM': 439, u'OII': 280,
- u'OKH': 1798, u'OKI': -793, u'OKO': -2242, u'OMH': -2402, u'OOO': 11699}
- BQ4__ = {u'BHH': -3895, u'BIH': 3761, u'BII': -4654, u'BIK': 1348, u'BKK': -1806,
- u'BMI': -3385, u'BOO': -12396, u'OAH': 926, u'OHH': 266, u'OHK': -2036,
- u'ONN': -973}
- BW1__ = {u',と': 660, u',同': 727, u'B1あ': 1404, u'B1同': 542, u'、と': 660,
- u'、同': 727, u'」と': 1682, u'あっ': 1505, u'いう': 1743, u'いっ': -2055,
- u'いる': 672, u'うし': -4817, u'うん': 665, u'から': 3472, u'がら': 600,
- u'こう': -790, u'こと': 2083, u'こん': -1262, u'さら': -4143, u'さん': 4573,
- u'した': 2641, u'して': 1104, u'すで': -3399, u'そこ': 1977, u'それ': -871,
- u'たち': 1122, u'ため': 601, u'った': 3463, u'つい': -802, u'てい': 805,
- u'てき': 1249, u'でき': 1127, u'です': 3445, u'では': 844, u'とい': -4915,
- u'とみ': 1922, u'どこ': 3887, u'ない': 5713, u'なっ': 3015, u'など': 7379,
- u'なん': -1113, u'にし': 2468, u'には': 1498, u'にも': 1671, u'に対': -912,
- u'の一': -501, u'の中': 741, u'ませ': 2448, u'まで': 1711, u'まま': 2600,
- u'まる': -2155, u'やむ': -1947, u'よっ': -2565, u'れた': 2369, u'れで': -913,
- u'をし': 1860, u'を見': 731, u'亡く': -1886, u'京都': 2558, u'取り': -2784,
- u'大き': -2604, u'大阪': 1497, u'平方': -2314, u'引き': -1336, u'日本': -195,
- u'本当': -2423, u'毎日': -2113, u'目指': -724, u'B1あ': 1404, u'B1同': 542,
- u'」と': 1682}
- BW2__ = {u'..': -11822, u'11': -669, u'――': -5730, u'−−': -13175, u'いう': -1609,
- u'うか': 2490, u'かし': -1350, u'かも': -602, u'から': -7194, u'かれ': 4612,
- u'がい': 853, u'がら': -3198, u'きた': 1941, u'くな': -1597, u'こと': -8392,
- u'この': -4193, u'させ': 4533, u'され': 13168, u'さん': -3977, u'しい': -1819,
- u'しか': -545, u'した': 5078, u'して': 972, u'しな': 939, u'その': -3744,
- u'たい': -1253, u'たた': -662, u'ただ': -3857, u'たち': -786, u'たと': 1224,
- u'たは': -939, u'った': 4589, u'って': 1647, u'っと': -2094, u'てい': 6144,
- u'てき': 3640, u'てく': 2551, u'ては': -3110, u'ても': -3065, u'でい': 2666,
- u'でき': -1528, u'でし': -3828, u'です': -4761, u'でも': -4203, u'とい': 1890,
- u'とこ': -1746, u'とと': -2279, u'との': 720, u'とみ': 5168, u'とも': -3941,
- u'ない': -2488, u'なが': -1313, u'など': -6509, u'なの': 2614, u'なん': 3099,
- u'にお': -1615, u'にし': 2748, u'にな': 2454, u'によ': -7236, u'に対': -14943,
- u'に従': -4688, u'に関': -11388, u'のか': 2093, u'ので': -7059, u'のに': -6041,
- u'のの': -6125, u'はい': 1073, u'はが': -1033, u'はず': -2532, u'ばれ': 1813,
- u'まし': -1316, u'まで': -6621, u'まれ': 5409, u'めて': -3153, u'もい': 2230,
- u'もの': -10713, u'らか': -944, u'らし': -1611, u'らに': -1897, u'りし': 651,
- u'りま': 1620, u'れた': 4270, u'れて': 849, u'れば': 4114, u'ろう': 6067,
- u'われ': 7901, u'を通': -11877, u'んだ': 728, u'んな': -4115, u'一人': 602,
- u'一方': -1375, u'一日': 970, u'一部': -1051, u'上が': -4479, u'会社': -1116,
- u'出て': 2163, u'分の': -7758, u'同党': 970, u'同日': -913, u'大阪': -2471,
- u'委員': -1250, u'少な': -1050, u'年度': -8669, u'年間': -1626, u'府県': -2363,
- u'手権': -1982, u'新聞': -4066, u'日新': -722, u'日本': -7068, u'日米': 3372,
- u'曜日': -601, u'朝鮮': -2355, u'本人': -2697, u'東京': -1543, u'然と': -1384,
- u'社会': -1276, u'立て': -990, u'第に': -1612, u'米国': -4268, u'11': -669}
- BW3__ = {u'あた': -2194, u'あり': 719, u'ある': 3846, u'い.': -1185, u'い。': -1185,
- u'いい': 5308, u'いえ': 2079, u'いく': 3029, u'いた': 2056, u'いっ': 1883,
- u'いる': 5600, u'いわ': 1527, u'うち': 1117, u'うと': 4798, u'えと': 1454,
- u'か.': 2857, u'か。': 2857, u'かけ': -743, u'かっ': -4098, u'かに': -669,
- u'から': 6520, u'かり': -2670, u'が,': 1816, u'が、': 1816, u'がき': -4855,
- u'がけ': -1127, u'がっ': -913, u'がら': -4977, u'がり': -2064, u'きた': 1645,
- u'けど': 1374, u'こと': 7397, u'この': 1542, u'ころ': -2757, u'さい': -714,
- u'さを': 976, u'し,': 1557, u'し、': 1557, u'しい': -3714, u'した': 3562,
- u'して': 1449, u'しな': 2608, u'しま': 1200, u'す.': -1310, u'す。': -1310,
- u'する': 6521, u'ず,': 3426, u'ず、': 3426, u'ずに': 841, u'そう': 428,
- u'た.': 8875, u'た。': 8875, u'たい': -594, u'たの': 812, u'たり': -1183,
- u'たる': -853, u'だ.': 4098, u'だ。': 4098, u'だっ': 1004, u'った': -4748,
- u'って': 300, u'てい': 6240, u'てお': 855, u'ても': 302, u'です': 1437,
- u'でに': -1482, u'では': 2295, u'とう': -1387, u'とし': 2266, u'との': 541,
- u'とも': -3543, u'どう': 4664, u'ない': 1796, u'なく': -903, u'など': 2135,
- u'に,': -1021, u'に、': -1021, u'にし': 1771, u'にな': 1906, u'には': 2644,
- u'の,': -724, u'の、': -724, u'の子': -1000, u'は,': 1337, u'は、': 1337,
- u'べき': 2181, u'まし': 1113, u'ます': 6943, u'まっ': -1549, u'まで': 6154,
- u'まれ': -793, u'らし': 1479, u'られ': 6820, u'るる': 3818, u'れ,': 854,
- u'れ、': 854, u'れた': 1850, u'れて': 1375, u'れば': -3246, u'れる': 1091,
- u'われ': -605, u'んだ': 606, u'んで': 798, u'カ月': 990, u'会議': 860,
- u'入り': 1232, u'大会': 2217, u'始め': 1681, u'市': 965, u'新聞': -5055,
- u'日,': 974, u'日、': 974, u'社会': 2024, u'カ月': 990}
- TC1__ = {u'AAA': 1093, u'HHH': 1029, u'HHM': 580, u'HII': 998, u'HOH': -390,
- u'HOM': -331, u'IHI': 1169, u'IOH': -142, u'IOI': -1015, u'IOM': 467,
- u'MMH': 187, u'OOI': -1832}
- TC2__ = {u'HHO': 2088, u'HII': -1023, u'HMM': -1154, u'IHI': -1965,
- u'KKH': 703, u'OII': -2649}
- TC3__ = {u'AAA': -294, u'HHH': 346, u'HHI': -341, u'HII': -1088, u'HIK': 731,
- u'HOH': -1486, u'IHH': 128, u'IHI': -3041, u'IHO': -1935, u'IIH': -825,
- u'IIM': -1035, u'IOI': -542, u'KHH': -1216, u'KKA': 491, u'KKH': -1217,
- u'KOK': -1009, u'MHH': -2694, u'MHM': -457, u'MHO': 123, u'MMH': -471,
- u'NNH': -1689, u'NNO': 662, u'OHO': -3393}
- TC4__ = {u'HHH': -203, u'HHI': 1344, u'HHK': 365, u'HHM': -122, u'HHN': 182,
- u'HHO': 669, u'HIH': 804, u'HII': 679, u'HOH': 446, u'IHH': 695,
- u'IHO': -2324, u'IIH': 321, u'III': 1497, u'IIO': 656, u'IOO': 54,
- u'KAK': 4845, u'KKA': 3386, u'KKK': 3065, u'MHH': -405, u'MHI': 201,
- u'MMH': -241, u'MMM': 661, u'MOM': 841}
- TQ1__ = {u'BHHH': -227, u'BHHI': 316, u'BHIH': -132, u'BIHH': 60, u'BIII': 1595,
- u'BNHH': -744, u'BOHH': 225, u'BOOO': -908, u'OAKK': 482, u'OHHH': 281,
- u'OHIH': 249, u'OIHI': 200, u'OIIH': -68}
- TQ2__ = {u'BIHH': -1401, u'BIII': -1033, u'BKAK': -543, u'BOOO': -5591}
- TQ3__ = {u'BHHH': 478, u'BHHM': -1073, u'BHIH': 222, u'BHII': -504, u'BIIH': -116,
- u'BIII': -105, u'BMHI': -863, u'BMHM': -464, u'BOMH': 620, u'OHHH': 346,
- u'OHHI': 1729, u'OHII': 997, u'OHMH': 481, u'OIHH': 623, u'OIIH': 1344,
- u'OKAK': 2792, u'OKHH': 587, u'OKKA': 679, u'OOHH': 110, u'OOII': -685}
- TQ4__ = {u'BHHH': -721, u'BHHM': -3604, u'BHII': -966, u'BIIH': -607, u'BIII': -2181,
- u'OAAA': -2763, u'OAKK': 180, u'OHHH': -294, u'OHHI': 2446, u'OHHO': 480,
- u'OHIH': -1573, u'OIHH': 1935, u'OIHI': -493, u'OIIH': 626, u'OIII': -4007,
- u'OKAK': -8156}
- TW1__ = {u'につい': -4681, u'東京都': 2026}
- TW2__ = {u'ある程': -2049, u'いった': -1256, u'ころが': -2434, u'しょう': 3873,
- u'その後': -4430, u'だって': -1049, u'ていた': 1833, u'として': -4657,
- u'ともに': -4517, u'もので': 1882, u'一気に': -792, u'初めて': -1512,
- u'同時に': -8097, u'大きな': -1255, u'対して': -2721, u'社会党': -3216}
- TW3__ = {u'いただ': -1734, u'してい': 1314, u'として': -4314, u'につい': -5483,
- u'にとっ': -5989, u'に当た': -6247, u'ので,': -727, u'ので、': -727,
- u'のもの': -600, u'れから': -3752, u'十二月': -2287}
- TW4__ = {u'いう.': 8576, u'いう。': 8576, u'からな': -2348, u'してい': 2958,
- u'たが,': 1516, u'たが、': 1516, u'ている': 1538, u'という': 1349,
- u'ました': 5543, u'ません': 1097, u'ようと': -4258, u'よると': 5865}
- UC1__ = {u'A': 484, u'K': 93, u'M': 645, u'O': -505}
- UC2__ = {u'A': 819, u'H': 1059, u'I': 409, u'M': 3987, u'N': 5775, u'O': 646}
- UC3__ = {u'A': -1370, u'I': 2311}
- UC4__ = {u'A': -2643, u'H': 1809, u'I': -1032, u'K': -3450, u'M': 3565,
- u'N': 3876, u'O': 6646}
- UC5__ = {u'H': 313, u'I': -1238, u'K': -799, u'M': 539, u'O': -831}
- UC6__ = {u'H': -506, u'I': -253, u'K': 87, u'M': 247, u'O': -387}
- UP1__ = {u'O': -214}
- UP2__ = {u'B': 69, u'O': 935}
- UP3__ = {u'B': 189}
- UQ1__ = {u'BH': 21, u'BI': -12, u'BK': -99, u'BN': 142, u'BO': -56, u'OH': -95,
- u'OI': 477, u'OK': 410, u'OO': -2422}
- UQ2__ = {u'BH': 216, u'BI': 113, u'OK': 1759}
- UQ3__ = {u'BA': -479, u'BH': 42, u'BI': 1913, u'BK': -7198, u'BM': 3160,
- u'BN': 6427, u'BO': 14761, u'OI': -827, u'ON': -3212}
- UW1__ = {u',': 156, u'、': 156, u'「': -463, u'あ': -941, u'う': -127, u'が': -553,
- u'き': 121, u'こ': 505, u'で': -201, u'と': -547, u'ど': -123, u'に': -789,
- u'の': -185, u'は': -847, u'も': -466, u'や': -470, u'よ': 182, u'ら': -292,
- u'り': 208, u'れ': 169, u'を': -446, u'ん': -137, u'・': -135, u'主': -402,
- u'京': -268, u'区': -912, u'午': 871, u'国': -460, u'大': 561, u'委': 729,
- u'市': -411, u'日': -141, u'理': 361, u'生': -408, u'県': -386, u'都': -718,
- u'「': -463, u'・': -135}
- UW2__ = {u',': -829, u'、': -829, u'〇': 892, u'「': -645, u'」': 3145, u'あ': -538,
- u'い': 505, u'う': 134, u'お': -502, u'か': 1454, u'が': -856, u'く': -412,
- u'こ': 1141, u'さ': 878, u'ざ': 540, u'し': 1529, u'す': -675, u'せ': 300,
- u'そ': -1011, u'た': 188, u'だ': 1837, u'つ': -949, u'て': -291, u'で': -268,
- u'と': -981, u'ど': 1273, u'な': 1063, u'に': -1764, u'の': 130, u'は': -409,
- u'ひ': -1273, u'べ': 1261, u'ま': 600, u'も': -1263, u'や': -402, u'よ': 1639,
- u'り': -579, u'る': -694, u'れ': 571, u'を': -2516, u'ん': 2095, u'ア': -587,
- u'カ': 306, u'キ': 568, u'ッ': 831, u'三': -758, u'不': -2150, u'世': -302,
- u'中': -968, u'主': -861, u'事': 492, u'人': -123, u'会': 978, u'保': 362,
- u'入': 548, u'初': -3025, u'副': -1566, u'北': -3414, u'区': -422, u'大': -1769,
- u'天': -865, u'太': -483, u'子': -1519, u'学': 760, u'実': 1023, u'小': -2009,
- u'市': -813, u'年': -1060, u'強': 1067, u'手': -1519, u'揺': -1033, u'政': 1522,
- u'文': -1355, u'新': -1682, u'日': -1815, u'明': -1462, u'最': -630, u'朝': -1843,
- u'本': -1650, u'東': -931, u'果': -665, u'次': -2378, u'民': -180, u'気': -1740,
- u'理': 752, u'発': 529, u'目': -1584, u'相': -242, u'県': -1165, u'立': -763,
- u'第': 810, u'米': 509, u'自': -1353, u'行': 838, u'西': -744, u'見': -3874,
- u'調': 1010, u'議': 1198, u'込': 3041, u'開': 1758, u'間': -1257, u'「': -645,
- u'」': 3145, u'ッ': 831, u'ア': -587, u'カ': 306, u'キ': 568}
- UW3__ = {u',': 4889, u'1': -800, u'−': -1723, u'、': 4889, u'々': -2311, u'〇': 5827,
- u'」': 2670, u'〓': -3573, u'あ': -2696, u'い': 1006, u'う': 2342, u'え': 1983,
- u'お': -4864, u'か': -1163, u'が': 3271, u'く': 1004, u'け': 388, u'げ': 401,
- u'こ': -3552, u'ご': -3116, u'さ': -1058, u'し': -395, u'す': 584, u'せ': 3685,
- u'そ': -5228, u'た': 842, u'ち': -521, u'っ': -1444, u'つ': -1081, u'て': 6167,
- u'で': 2318, u'と': 1691, u'ど': -899, u'な': -2788, u'に': 2745, u'の': 4056,
- u'は': 4555, u'ひ': -2171, u'ふ': -1798, u'へ': 1199, u'ほ': -5516, u'ま': -4384,
- u'み': -120, u'め': 1205, u'も': 2323, u'や': -788, u'よ': -202, u'ら': 727,
- u'り': 649, u'る': 5905, u'れ': 2773, u'わ': -1207, u'を': 6620, u'ん': -518,
- u'ア': 551, u'グ': 1319, u'ス': 874, u'ッ': -1350, u'ト': 521, u'ム': 1109,
- u'ル': 1591, u'ロ': 2201, u'ン': 278, u'・': -3794, u'一': -1619, u'下': -1759,
- u'世': -2087, u'両': 3815, u'中': 653, u'主': -758, u'予': -1193, u'二': 974,
- u'人': 2742, u'今': 792, u'他': 1889, u'以': -1368, u'低': 811, u'何': 4265,
- u'作': -361, u'保': -2439, u'元': 4858, u'党': 3593, u'全': 1574, u'公': -3030,
- u'六': 755, u'共': -1880, u'円': 5807, u'再': 3095, u'分': 457, u'初': 2475,
- u'別': 1129, u'前': 2286, u'副': 4437, u'力': 365, u'動': -949, u'務': -1872,
- u'化': 1327, u'北': -1038, u'区': 4646, u'千': -2309, u'午': -783, u'協': -1006,
- u'口': 483, u'右': 1233, u'各': 3588, u'合': -241, u'同': 3906, u'和': -837,
- u'員': 4513, u'国': 642, u'型': 1389, u'場': 1219, u'外': -241, u'妻': 2016,
- u'学': -1356, u'安': -423, u'実': -1008, u'家': 1078, u'小': -513, u'少': -3102,
- u'州': 1155, u'市': 3197, u'平': -1804, u'年': 2416, u'広': -1030, u'府': 1605,
- u'度': 1452, u'建': -2352, u'当': -3885, u'得': 1905, u'思': -1291, u'性': 1822,
- u'戸': -488, u'指': -3973, u'政': -2013, u'教': -1479, u'数': 3222, u'文': -1489,
- u'新': 1764, u'日': 2099, u'旧': 5792, u'昨': -661, u'時': -1248, u'曜': -951,
- u'最': -937, u'月': 4125, u'期': 360, u'李': 3094, u'村': 364, u'東': -805,
- u'核': 5156, u'森': 2438, u'業': 484, u'氏': 2613, u'民': -1694, u'決': -1073,
- u'法': 1868, u'海': -495, u'無': 979, u'物': 461, u'特': -3850, u'生': -273,
- u'用': 914, u'町': 1215, u'的': 7313, u'直': -1835, u'省': 792, u'県': 6293,
- u'知': -1528, u'私': 4231, u'税': 401, u'立': -960, u'第': 1201, u'米': 7767,
- u'系': 3066, u'約': 3663, u'級': 1384, u'統': -4229, u'総': 1163, u'線': 1255,
- u'者': 6457, u'能': 725, u'自': -2869, u'英': 785, u'見': 1044, u'調': -562,
- u'財': -733, u'費': 1777, u'車': 1835, u'軍': 1375, u'込': -1504, u'通': -1136,
- u'選': -681, u'郎': 1026, u'郡': 4404, u'部': 1200, u'金': 2163, u'長': 421,
- u'開': -1432, u'間': 1302, u'関': -1282, u'雨': 2009, u'電': -1045, u'非': 2066,
- u'駅': 1620, u'1': -800, u'」': 2670, u'・': -3794, u'ッ': -1350, u'ア': 551,
- u'グ': 1319, u'ス': 874, u'ト': 521, u'ム': 1109, u'ル': 1591, u'ロ': 2201, u'ン': 278}
- UW4__ = {u',': 3930, u'.': 3508, u'―': -4841, u'、': 3930, u'。': 3508, u'〇': 4999,
- u'「': 1895, u'」': 3798, u'〓': -5156, u'あ': 4752, u'い': -3435, u'う': -640,
- u'え': -2514, u'お': 2405, u'か': 530, u'が': 6006, u'き': -4482, u'ぎ': -3821,
- u'く': -3788, u'け': -4376, u'げ': -4734, u'こ': 2255, u'ご': 1979, u'さ': 2864,
- u'し': -843, u'じ': -2506, u'す': -731, u'ず': 1251, u'せ': 181, u'そ': 4091,
- u'た': 5034, u'だ': 5408, u'ち': -3654, u'っ': -5882, u'つ': -1659, u'て': 3994,
- u'で': 7410, u'と': 4547, u'な': 5433, u'に': 6499, u'ぬ': 1853, u'ね': 1413,
- u'の': 7396, u'は': 8578, u'ば': 1940, u'ひ': 4249, u'び': -4134, u'ふ': 1345,
- u'へ': 6665, u'べ': -744, u'ほ': 1464, u'ま': 1051, u'み': -2082, u'む': -882,
- u'め': -5046, u'も': 4169, u'ゃ': -2666, u'や': 2795, u'ょ': -1544, u'よ': 3351,
- u'ら': -2922, u'り': -9726, u'る': -14896, u'れ': -2613, u'ろ': -4570,
- u'わ': -1783, u'を': 13150, u'ん': -2352, u'カ': 2145, u'コ': 1789, u'セ': 1287,
- u'ッ': -724, u'ト': -403, u'メ': -1635, u'ラ': -881, u'リ': -541, u'ル': -856,
- u'ン': -3637, u'・': -4371, u'ー': -11870, u'一': -2069, u'中': 2210, u'予': 782,
- u'事': -190, u'井': -1768, u'人': 1036, u'以': 544, u'会': 950, u'体': -1286,
- u'作': 530, u'側': 4292, u'先': 601, u'党': -2006, u'共': -1212, u'内': 584,
- u'円': 788, u'初': 1347, u'前': 1623, u'副': 3879, u'力': -302, u'動': -740,
- u'務': -2715, u'化': 776, u'区': 4517, u'協': 1013, u'参': 1555, u'合': -1834,
- u'和': -681, u'員': -910, u'器': -851, u'回': 1500, u'国': -619, u'園': -1200,
- u'地': 866, u'場': -1410, u'塁': -2094, u'士': -1413, u'多': 1067, u'大': 571,
- u'子': -4802, u'学': -1397, u'定': -1057, u'寺': -809, u'小': 1910, u'屋': -1328,
- u'山': -1500, u'島': -2056, u'川': -2667, u'市': 2771, u'年': 374, u'庁': -4556,
- u'後': 456, u'性': 553, u'感': 916, u'所': -1566, u'支': 856, u'改': 787,
- u'政': 2182, u'教': 704, u'文': 522, u'方': -856, u'日': 1798, u'時': 1829,
- u'最': 845, u'月': -9066, u'木': -485, u'来': -442, u'校': -360, u'業': -1043,
- u'氏': 5388, u'民': -2716, u'気': -910, u'沢': -939, u'済': -543, u'物': -735,
- u'率': 672, u'球': -1267, u'生': -1286, u'産': -1101, u'田': -2900, u'町': 1826,
- u'的': 2586, u'目': 922, u'省': -3485, u'県': 2997, u'空': -867, u'立': -2112,
- u'第': 788, u'米': 2937, u'系': 786, u'約': 2171, u'経': 1146, u'統': -1169,
- u'総': 940, u'線': -994, u'署': 749, u'者': 2145, u'能': -730, u'般': -852,
- u'行': -792, u'規': 792, u'警': -1184, u'議': -244, u'谷': -1000, u'賞': 730,
- u'車': -1481, u'軍': 1158, u'輪': -1433, u'込': -3370, u'近': 929, u'道': -1291,
- u'選': 2596, u'郎': -4866, u'都': 1192, u'野': -1100, u'銀': -2213, u'長': 357,
- u'間': -2344, u'院': -2297, u'際': -2604, u'電': -878, u'領': -1659, u'題': -792,
- u'館': -1984, u'首': 1749, u'高': 2120, u'「': 1895, u'」': 3798, u'・': -4371,
- u'ッ': -724, u'ー': -11870, u'カ': 2145, u'コ': 1789, u'セ': 1287, u'ト': -403,
- u'メ': -1635, u'ラ': -881, u'リ': -541, u'ル': -856, u'ン': -3637}
- UW5__ = {u',': 465, u'.': -299, u'1': -514, u'E2': -32768, u']': -2762, u'、': 465,
- u'。': -299, u'「': 363, u'あ': 1655, u'い': 331, u'う': -503, u'え': 1199,
- u'お': 527, u'か': 647, u'が': -421, u'き': 1624, u'ぎ': 1971, u'く': 312,
- u'げ': -983, u'さ': -1537, u'し': -1371, u'す': -852, u'だ': -1186, u'ち': 1093,
- u'っ': 52, u'つ': 921, u'て': -18, u'で': -850, u'と': -127, u'ど': 1682,
- u'な': -787, u'に': -1224, u'の': -635, u'は': -578, u'べ': 1001, u'み': 502,
- u'め': 865, u'ゃ': 3350, u'ょ': 854, u'り': -208, u'る': 429, u'れ': 504,
- u'わ': 419, u'を': -1264, u'ん': 327, u'イ': 241, u'ル': 451, u'ン': -343,
- u'中': -871, u'京': 722, u'会': -1153, u'党': -654, u'務': 3519, u'区': -901,
- u'告': 848, u'員': 2104, u'大': -1296, u'学': -548, u'定': 1785, u'嵐': -1304,
- u'市': -2991, u'席': 921, u'年': 1763, u'思': 872, u'所': -814, u'挙': 1618,
- u'新': -1682, u'日': 218, u'月': -4353, u'査': 932, u'格': 1356, u'機': -1508,
- u'氏': -1347, u'田': 240, u'町': -3912, u'的': -3149, u'相': 1319, u'省': -1052,
- u'県': -4003, u'研': -997, u'社': -278, u'空': -813, u'統': 1955, u'者': -2233,
- u'表': 663, u'語': -1073, u'議': 1219, u'選': -1018, u'郎': -368, u'長': 786,
- u'間': 1191, u'題': 2368, u'館': -689, u'1': -514, u'E2': -32768, u'「': 363,
- u'イ': 241, u'ル': 451, u'ン': -343}
- UW6__ = {u',': 227, u'.': 808, u'1': -270, u'E1': 306, u'、': 227, u'。': 808,
- u'あ': -307, u'う': 189, u'か': 241, u'が': -73, u'く': -121, u'こ': -200,
- u'じ': 1782, u'す': 383, u'た': -428, u'っ': 573, u'て': -1014, u'で': 101,
- u'と': -105, u'な': -253, u'に': -149, u'の': -417, u'は': -236, u'も': -206,
- u'り': 187, u'る': -135, u'を': 195, u'ル': -673, u'ン': -496, u'一': -277,
- u'中': 201, u'件': -800, u'会': 624, u'前': 302, u'区': 1792, u'員': -1212,
- u'委': 798, u'学': -960, u'市': 887, u'広': -695, u'後': 535, u'業': -697,
- u'相': 753, u'社': -507, u'福': 974, u'空': -822, u'者': 1811, u'連': 463,
- u'郎': 1082, u'1': -270, u'E1': 306, u'ル': -673, u'ン': -496}
+ BC1__ = {'HH': 6, 'II': 2461, 'KH': 406, 'OH': -1378}
+ BC2__ = {'AA': -3267, 'AI': 2744, 'AN': -878, 'HH': -4070, 'HM': -1711,
+ 'HN': 4012, 'HO': 3761, 'IA': 1327, 'IH': -1184, 'II': -1332,
+ 'IK': 1721, 'IO': 5492, 'KI': 3831, 'KK': -8741, 'MH': -3132,
+ 'MK': 3334, 'OO': -2920}
+ BC3__ = {'HH': 996, 'HI': 626, 'HK': -721, 'HN': -1307, 'HO': -836, 'IH': -301,
+ 'KK': 2762, 'MK': 1079, 'MM': 4034, 'OA': -1652, 'OH': 266}
+ BP1__ = {'BB': 295, 'OB': 304, 'OO': -125, 'UB': 352}
+ BP2__ = {'BO': 60, 'OO': -1762}
+ BQ1__ = {'BHH': 1150, 'BHM': 1521, 'BII': -1158, 'BIM': 886, 'BMH': 1208,
+ 'BNH': 449, 'BOH': -91, 'BOO': -2597, 'OHI': 451, 'OIH': -296,
+ 'OKA': 1851, 'OKH': -1020, 'OKK': 904, 'OOO': 2965}
+ BQ2__ = {'BHH': 118, 'BHI': -1159, 'BHM': 466, 'BIH': -919, 'BKK': -1720,
+ 'BKO': 864, 'OHH': -1139, 'OHM': -181, 'OIH': 153, 'UHI': -1146}
+ BQ3__ = {'BHH': -792, 'BHI': 2664, 'BII': -299, 'BKI': 419, 'BMH': 937,
+ 'BMM': 8335, 'BNN': 998, 'BOH': 775, 'OHH': 2174, 'OHM': 439, 'OII': 280,
+ 'OKH': 1798, 'OKI': -793, 'OKO': -2242, 'OMH': -2402, 'OOO': 11699}
+ BQ4__ = {'BHH': -3895, 'BIH': 3761, 'BII': -4654, 'BIK': 1348, 'BKK': -1806,
+ 'BMI': -3385, 'BOO': -12396, 'OAH': 926, 'OHH': 266, 'OHK': -2036,
+ 'ONN': -973}
+ BW1__ = {',と': 660, ',同': 727, 'B1あ': 1404, 'B1同': 542, '、と': 660,
+ '、同': 727, '」と': 1682, 'あっ': 1505, 'いう': 1743, 'いっ': -2055,
+ 'いる': 672, 'うし': -4817, 'うん': 665, 'から': 3472, 'がら': 600,
+ 'こう': -790, 'こと': 2083, 'こん': -1262, 'さら': -4143, 'さん': 4573,
+ 'した': 2641, 'して': 1104, 'すで': -3399, 'そこ': 1977, 'それ': -871,
+ 'たち': 1122, 'ため': 601, 'った': 3463, 'つい': -802, 'てい': 805,
+ 'てき': 1249, 'でき': 1127, 'です': 3445, 'では': 844, 'とい': -4915,
+ 'とみ': 1922, 'どこ': 3887, 'ない': 5713, 'なっ': 3015, 'など': 7379,
+ 'なん': -1113, 'にし': 2468, 'には': 1498, 'にも': 1671, 'に対': -912,
+ 'の一': -501, 'の中': 741, 'ませ': 2448, 'まで': 1711, 'まま': 2600,
+ 'まる': -2155, 'やむ': -1947, 'よっ': -2565, 'れた': 2369, 'れで': -913,
+ 'をし': 1860, 'を見': 731, '亡く': -1886, '京都': 2558, '取り': -2784,
+ '大き': -2604, '大阪': 1497, '平方': -2314, '引き': -1336, '日本': -195,
+ '本当': -2423, '毎日': -2113, '目指': -724, 'B1あ': 1404, 'B1同': 542,
+ '」と': 1682}
+ BW2__ = {'..': -11822, '11': -669, '――': -5730, '−−': -13175, 'いう': -1609,
+ 'うか': 2490, 'かし': -1350, 'かも': -602, 'から': -7194, 'かれ': 4612,
+ 'がい': 853, 'がら': -3198, 'きた': 1941, 'くな': -1597, 'こと': -8392,
+ 'この': -4193, 'させ': 4533, 'され': 13168, 'さん': -3977, 'しい': -1819,
+ 'しか': -545, 'した': 5078, 'して': 972, 'しな': 939, 'その': -3744,
+ 'たい': -1253, 'たた': -662, 'ただ': -3857, 'たち': -786, 'たと': 1224,
+ 'たは': -939, 'った': 4589, 'って': 1647, 'っと': -2094, 'てい': 6144,
+ 'てき': 3640, 'てく': 2551, 'ては': -3110, 'ても': -3065, 'でい': 2666,
+ 'でき': -1528, 'でし': -3828, 'です': -4761, 'でも': -4203, 'とい': 1890,
+ 'とこ': -1746, 'とと': -2279, 'との': 720, 'とみ': 5168, 'とも': -3941,
+ 'ない': -2488, 'なが': -1313, 'など': -6509, 'なの': 2614, 'なん': 3099,
+ 'にお': -1615, 'にし': 2748, 'にな': 2454, 'によ': -7236, 'に対': -14943,
+ 'に従': -4688, 'に関': -11388, 'のか': 2093, 'ので': -7059, 'のに': -6041,
+ 'のの': -6125, 'はい': 1073, 'はが': -1033, 'はず': -2532, 'ばれ': 1813,
+ 'まし': -1316, 'まで': -6621, 'まれ': 5409, 'めて': -3153, 'もい': 2230,
+ 'もの': -10713, 'らか': -944, 'らし': -1611, 'らに': -1897, 'りし': 651,
+ 'りま': 1620, 'れた': 4270, 'れて': 849, 'れば': 4114, 'ろう': 6067,
+ 'われ': 7901, 'を通': -11877, 'んだ': 728, 'んな': -4115, '一人': 602,
+ '一方': -1375, '一日': 970, '一部': -1051, '上が': -4479, '会社': -1116,
+ '出て': 2163, '分の': -7758, '同党': 970, '同日': -913, '大阪': -2471,
+ '委員': -1250, '少な': -1050, '年度': -8669, '年間': -1626, '府県': -2363,
+ '手権': -1982, '新聞': -4066, '日新': -722, '日本': -7068, '日米': 3372,
+ '曜日': -601, '朝鮮': -2355, '本人': -2697, '東京': -1543, '然と': -1384,
+ '社会': -1276, '立て': -990, '第に': -1612, '米国': -4268, '11': -669}
+ BW3__ = {'あた': -2194, 'あり': 719, 'ある': 3846, 'い.': -1185, 'い。': -1185,
+ 'いい': 5308, 'いえ': 2079, 'いく': 3029, 'いた': 2056, 'いっ': 1883,
+ 'いる': 5600, 'いわ': 1527, 'うち': 1117, 'うと': 4798, 'えと': 1454,
+ 'か.': 2857, 'か。': 2857, 'かけ': -743, 'かっ': -4098, 'かに': -669,
+ 'から': 6520, 'かり': -2670, 'が,': 1816, 'が、': 1816, 'がき': -4855,
+ 'がけ': -1127, 'がっ': -913, 'がら': -4977, 'がり': -2064, 'きた': 1645,
+ 'けど': 1374, 'こと': 7397, 'この': 1542, 'ころ': -2757, 'さい': -714,
+ 'さを': 976, 'し,': 1557, 'し、': 1557, 'しい': -3714, 'した': 3562,
+ 'して': 1449, 'しな': 2608, 'しま': 1200, 'す.': -1310, 'す。': -1310,
+ 'する': 6521, 'ず,': 3426, 'ず、': 3426, 'ずに': 841, 'そう': 428,
+ 'た.': 8875, 'た。': 8875, 'たい': -594, 'たの': 812, 'たり': -1183,
+ 'たる': -853, 'だ.': 4098, 'だ。': 4098, 'だっ': 1004, 'った': -4748,
+ 'って': 300, 'てい': 6240, 'てお': 855, 'ても': 302, 'です': 1437,
+ 'でに': -1482, 'では': 2295, 'とう': -1387, 'とし': 2266, 'との': 541,
+ 'とも': -3543, 'どう': 4664, 'ない': 1796, 'なく': -903, 'など': 2135,
+ 'に,': -1021, 'に、': -1021, 'にし': 1771, 'にな': 1906, 'には': 2644,
+ 'の,': -724, 'の、': -724, 'の子': -1000, 'は,': 1337, 'は、': 1337,
+ 'べき': 2181, 'まし': 1113, 'ます': 6943, 'まっ': -1549, 'まで': 6154,
+ 'まれ': -793, 'らし': 1479, 'られ': 6820, 'るる': 3818, 'れ,': 854,
+ 'れ、': 854, 'れた': 1850, 'れて': 1375, 'れば': -3246, 'れる': 1091,
+ 'われ': -605, 'んだ': 606, 'んで': 798, 'カ月': 990, '会議': 860,
+ '入り': 1232, '大会': 2217, '始め': 1681, '市': 965, '新聞': -5055,
+ '日,': 974, '日、': 974, '社会': 2024, 'カ月': 990}
+ TC1__ = {'AAA': 1093, 'HHH': 1029, 'HHM': 580, 'HII': 998, 'HOH': -390,
+ 'HOM': -331, 'IHI': 1169, 'IOH': -142, 'IOI': -1015, 'IOM': 467,
+ 'MMH': 187, 'OOI': -1832}
+ TC2__ = {'HHO': 2088, 'HII': -1023, 'HMM': -1154, 'IHI': -1965,
+ 'KKH': 703, 'OII': -2649}
+ TC3__ = {'AAA': -294, 'HHH': 346, 'HHI': -341, 'HII': -1088, 'HIK': 731,
+ 'HOH': -1486, 'IHH': 128, 'IHI': -3041, 'IHO': -1935, 'IIH': -825,
+ 'IIM': -1035, 'IOI': -542, 'KHH': -1216, 'KKA': 491, 'KKH': -1217,
+ 'KOK': -1009, 'MHH': -2694, 'MHM': -457, 'MHO': 123, 'MMH': -471,
+ 'NNH': -1689, 'NNO': 662, 'OHO': -3393}
+ TC4__ = {'HHH': -203, 'HHI': 1344, 'HHK': 365, 'HHM': -122, 'HHN': 182,
+ 'HHO': 669, 'HIH': 804, 'HII': 679, 'HOH': 446, 'IHH': 695,
+ 'IHO': -2324, 'IIH': 321, 'III': 1497, 'IIO': 656, 'IOO': 54,
+ 'KAK': 4845, 'KKA': 3386, 'KKK': 3065, 'MHH': -405, 'MHI': 201,
+ 'MMH': -241, 'MMM': 661, 'MOM': 841}
+ TQ1__ = {'BHHH': -227, 'BHHI': 316, 'BHIH': -132, 'BIHH': 60, 'BIII': 1595,
+ 'BNHH': -744, 'BOHH': 225, 'BOOO': -908, 'OAKK': 482, 'OHHH': 281,
+ 'OHIH': 249, 'OIHI': 200, 'OIIH': -68}
+ TQ2__ = {'BIHH': -1401, 'BIII': -1033, 'BKAK': -543, 'BOOO': -5591}
+ TQ3__ = {'BHHH': 478, 'BHHM': -1073, 'BHIH': 222, 'BHII': -504, 'BIIH': -116,
+ 'BIII': -105, 'BMHI': -863, 'BMHM': -464, 'BOMH': 620, 'OHHH': 346,
+ 'OHHI': 1729, 'OHII': 997, 'OHMH': 481, 'OIHH': 623, 'OIIH': 1344,
+ 'OKAK': 2792, 'OKHH': 587, 'OKKA': 679, 'OOHH': 110, 'OOII': -685}
+ TQ4__ = {'BHHH': -721, 'BHHM': -3604, 'BHII': -966, 'BIIH': -607, 'BIII': -2181,
+ 'OAAA': -2763, 'OAKK': 180, 'OHHH': -294, 'OHHI': 2446, 'OHHO': 480,
+ 'OHIH': -1573, 'OIHH': 1935, 'OIHI': -493, 'OIIH': 626, 'OIII': -4007,
+ 'OKAK': -8156}
+ TW1__ = {'につい': -4681, '東京都': 2026}
+ TW2__ = {'ある程': -2049, 'いった': -1256, 'ころが': -2434, 'しょう': 3873,
+ 'その後': -4430, 'だって': -1049, 'ていた': 1833, 'として': -4657,
+ 'ともに': -4517, 'もので': 1882, '一気に': -792, '初めて': -1512,
+ '同時に': -8097, '大きな': -1255, '対して': -2721, '社会党': -3216}
+ TW3__ = {'いただ': -1734, 'してい': 1314, 'として': -4314, 'につい': -5483,
+ 'にとっ': -5989, 'に当た': -6247, 'ので,': -727, 'ので、': -727,
+ 'のもの': -600, 'れから': -3752, '十二月': -2287}
+ TW4__ = {'いう.': 8576, 'いう。': 8576, 'からな': -2348, 'してい': 2958,
+ 'たが,': 1516, 'たが、': 1516, 'ている': 1538, 'という': 1349,
+ 'ました': 5543, 'ません': 1097, 'ようと': -4258, 'よると': 5865}
+ UC1__ = {'A': 484, 'K': 93, 'M': 645, 'O': -505}
+ UC2__ = {'A': 819, 'H': 1059, 'I': 409, 'M': 3987, 'N': 5775, 'O': 646}
+ UC3__ = {'A': -1370, 'I': 2311}
+ UC4__ = {'A': -2643, 'H': 1809, 'I': -1032, 'K': -3450, 'M': 3565,
+ 'N': 3876, 'O': 6646}
+ UC5__ = {'H': 313, 'I': -1238, 'K': -799, 'M': 539, 'O': -831}
+ UC6__ = {'H': -506, 'I': -253, 'K': 87, 'M': 247, 'O': -387}
+ UP1__ = {'O': -214}
+ UP2__ = {'B': 69, 'O': 935}
+ UP3__ = {'B': 189}
+ UQ1__ = {'BH': 21, 'BI': -12, 'BK': -99, 'BN': 142, 'BO': -56, 'OH': -95,
+ 'OI': 477, 'OK': 410, 'OO': -2422}
+ UQ2__ = {'BH': 216, 'BI': 113, 'OK': 1759}
+ UQ3__ = {'BA': -479, 'BH': 42, 'BI': 1913, 'BK': -7198, 'BM': 3160,
+ 'BN': 6427, 'BO': 14761, 'OI': -827, 'ON': -3212}
+ UW1__ = {',': 156, '、': 156, '「': -463, 'あ': -941, 'う': -127, 'が': -553,
+ 'き': 121, 'こ': 505, 'で': -201, 'と': -547, 'ど': -123, 'に': -789,
+ 'の': -185, 'は': -847, 'も': -466, 'や': -470, 'よ': 182, 'ら': -292,
+ 'り': 208, 'れ': 169, 'を': -446, 'ん': -137, '・': -135, '主': -402,
+ '京': -268, '区': -912, '午': 871, '国': -460, '大': 561, '委': 729,
+ '市': -411, '日': -141, '理': 361, '生': -408, '県': -386, '都': -718,
+ '「': -463, '・': -135}
+ UW2__ = {',': -829, '、': -829, '〇': 892, '「': -645, '」': 3145, 'あ': -538,
+ 'い': 505, 'う': 134, 'お': -502, 'か': 1454, 'が': -856, 'く': -412,
+ 'こ': 1141, 'さ': 878, 'ざ': 540, 'し': 1529, 'す': -675, 'せ': 300,
+ 'そ': -1011, 'た': 188, 'だ': 1837, 'つ': -949, 'て': -291, 'で': -268,
+ 'と': -981, 'ど': 1273, 'な': 1063, 'に': -1764, 'の': 130, 'は': -409,
+ 'ひ': -1273, 'べ': 1261, 'ま': 600, 'も': -1263, 'や': -402, 'よ': 1639,
+ 'り': -579, 'る': -694, 'れ': 571, 'を': -2516, 'ん': 2095, 'ア': -587,
+ 'カ': 306, 'キ': 568, 'ッ': 831, '三': -758, '不': -2150, '世': -302,
+ '中': -968, '主': -861, '事': 492, '人': -123, '会': 978, '保': 362,
+ '入': 548, '初': -3025, '副': -1566, '北': -3414, '区': -422, '大': -1769,
+ '天': -865, '太': -483, '子': -1519, '学': 760, '実': 1023, '小': -2009,
+ '市': -813, '年': -1060, '強': 1067, '手': -1519, '揺': -1033, '政': 1522,
+ '文': -1355, '新': -1682, '日': -1815, '明': -1462, '最': -630, '朝': -1843,
+ '本': -1650, '東': -931, '果': -665, '次': -2378, '民': -180, '気': -1740,
+ '理': 752, '発': 529, '目': -1584, '相': -242, '県': -1165, '立': -763,
+ '第': 810, '米': 509, '自': -1353, '行': 838, '西': -744, '見': -3874,
+ '調': 1010, '議': 1198, '込': 3041, '開': 1758, '間': -1257, '「': -645,
+ '」': 3145, 'ッ': 831, 'ア': -587, 'カ': 306, 'キ': 568}
+ UW3__ = {',': 4889, '1': -800, '−': -1723, '、': 4889, '々': -2311, '〇': 5827,
+ '」': 2670, '〓': -3573, 'あ': -2696, 'い': 1006, 'う': 2342, 'え': 1983,
+ 'お': -4864, 'か': -1163, 'が': 3271, 'く': 1004, 'け': 388, 'げ': 401,
+ 'こ': -3552, 'ご': -3116, 'さ': -1058, 'し': -395, 'す': 584, 'せ': 3685,
+ 'そ': -5228, 'た': 842, 'ち': -521, 'っ': -1444, 'つ': -1081, 'て': 6167,
+ 'で': 2318, 'と': 1691, 'ど': -899, 'な': -2788, 'に': 2745, 'の': 4056,
+ 'は': 4555, 'ひ': -2171, 'ふ': -1798, 'へ': 1199, 'ほ': -5516, 'ま': -4384,
+ 'み': -120, 'め': 1205, 'も': 2323, 'や': -788, 'よ': -202, 'ら': 727,
+ 'り': 649, 'る': 5905, 'れ': 2773, 'わ': -1207, 'を': 6620, 'ん': -518,
+ 'ア': 551, 'グ': 1319, 'ス': 874, 'ッ': -1350, 'ト': 521, 'ム': 1109,
+ 'ル': 1591, 'ロ': 2201, 'ン': 278, '・': -3794, '一': -1619, '下': -1759,
+ '世': -2087, '両': 3815, '中': 653, '主': -758, '予': -1193, '二': 974,
+ '人': 2742, '今': 792, '他': 1889, '以': -1368, '低': 811, '何': 4265,
+ '作': -361, '保': -2439, '元': 4858, '党': 3593, '全': 1574, '公': -3030,
+ '六': 755, '共': -1880, '円': 5807, '再': 3095, '分': 457, '初': 2475,
+ '別': 1129, '前': 2286, '副': 4437, '力': 365, '動': -949, '務': -1872,
+ '化': 1327, '北': -1038, '区': 4646, '千': -2309, '午': -783, '協': -1006,
+ '口': 483, '右': 1233, '各': 3588, '合': -241, '同': 3906, '和': -837,
+ '員': 4513, '国': 642, '型': 1389, '場': 1219, '外': -241, '妻': 2016,
+ '学': -1356, '安': -423, '実': -1008, '家': 1078, '小': -513, '少': -3102,
+ '州': 1155, '市': 3197, '平': -1804, '年': 2416, '広': -1030, '府': 1605,
+ '度': 1452, '建': -2352, '当': -3885, '得': 1905, '思': -1291, '性': 1822,
+ '戸': -488, '指': -3973, '政': -2013, '教': -1479, '数': 3222, '文': -1489,
+ '新': 1764, '日': 2099, '旧': 5792, '昨': -661, '時': -1248, '曜': -951,
+ '最': -937, '月': 4125, '期': 360, '李': 3094, '村': 364, '東': -805,
+ '核': 5156, '森': 2438, '業': 484, '氏': 2613, '民': -1694, '決': -1073,
+ '法': 1868, '海': -495, '無': 979, '物': 461, '特': -3850, '生': -273,
+ '用': 914, '町': 1215, '的': 7313, '直': -1835, '省': 792, '県': 6293,
+ '知': -1528, '私': 4231, '税': 401, '立': -960, '第': 1201, '米': 7767,
+ '系': 3066, '約': 3663, '級': 1384, '統': -4229, '総': 1163, '線': 1255,
+ '者': 6457, '能': 725, '自': -2869, '英': 785, '見': 1044, '調': -562,
+ '財': -733, '費': 1777, '車': 1835, '軍': 1375, '込': -1504, '通': -1136,
+ '選': -681, '郎': 1026, '郡': 4404, '部': 1200, '金': 2163, '長': 421,
+ '開': -1432, '間': 1302, '関': -1282, '雨': 2009, '電': -1045, '非': 2066,
+ '駅': 1620, '1': -800, '」': 2670, '・': -3794, 'ッ': -1350, 'ア': 551,
+ 'グ': 1319, 'ス': 874, 'ト': 521, 'ム': 1109, 'ル': 1591, 'ロ': 2201, 'ン': 278}
+ UW4__ = {',': 3930, '.': 3508, '―': -4841, '、': 3930, '。': 3508, '〇': 4999,
+ '「': 1895, '」': 3798, '〓': -5156, 'あ': 4752, 'い': -3435, 'う': -640,
+ 'え': -2514, 'お': 2405, 'か': 530, 'が': 6006, 'き': -4482, 'ぎ': -3821,
+ 'く': -3788, 'け': -4376, 'げ': -4734, 'こ': 2255, 'ご': 1979, 'さ': 2864,
+ 'し': -843, 'じ': -2506, 'す': -731, 'ず': 1251, 'せ': 181, 'そ': 4091,
+ 'た': 5034, 'だ': 5408, 'ち': -3654, 'っ': -5882, 'つ': -1659, 'て': 3994,
+ 'で': 7410, 'と': 4547, 'な': 5433, 'に': 6499, 'ぬ': 1853, 'ね': 1413,
+ 'の': 7396, 'は': 8578, 'ば': 1940, 'ひ': 4249, 'び': -4134, 'ふ': 1345,
+ 'へ': 6665, 'べ': -744, 'ほ': 1464, 'ま': 1051, 'み': -2082, 'む': -882,
+ 'め': -5046, 'も': 4169, 'ゃ': -2666, 'や': 2795, 'ょ': -1544, 'よ': 3351,
+ 'ら': -2922, 'り': -9726, 'る': -14896, 'れ': -2613, 'ろ': -4570,
+ 'わ': -1783, 'を': 13150, 'ん': -2352, 'カ': 2145, 'コ': 1789, 'セ': 1287,
+ 'ッ': -724, 'ト': -403, 'メ': -1635, 'ラ': -881, 'リ': -541, 'ル': -856,
+ 'ン': -3637, '・': -4371, 'ー': -11870, '一': -2069, '中': 2210, '予': 782,
+ '事': -190, '井': -1768, '人': 1036, '以': 544, '会': 950, '体': -1286,
+ '作': 530, '側': 4292, '先': 601, '党': -2006, '共': -1212, '内': 584,
+ '円': 788, '初': 1347, '前': 1623, '副': 3879, '力': -302, '動': -740,
+ '務': -2715, '化': 776, '区': 4517, '協': 1013, '参': 1555, '合': -1834,
+ '和': -681, '員': -910, '器': -851, '回': 1500, '国': -619, '園': -1200,
+ '地': 866, '場': -1410, '塁': -2094, '士': -1413, '多': 1067, '大': 571,
+ '子': -4802, '学': -1397, '定': -1057, '寺': -809, '小': 1910, '屋': -1328,
+ '山': -1500, '島': -2056, '川': -2667, '市': 2771, '年': 374, '庁': -4556,
+ '後': 456, '性': 553, '感': 916, '所': -1566, '支': 856, '改': 787,
+ '政': 2182, '教': 704, '文': 522, '方': -856, '日': 1798, '時': 1829,
+ '最': 845, '月': -9066, '木': -485, '来': -442, '校': -360, '業': -1043,
+ '氏': 5388, '民': -2716, '気': -910, '沢': -939, '済': -543, '物': -735,
+ '率': 672, '球': -1267, '生': -1286, '産': -1101, '田': -2900, '町': 1826,
+ '的': 2586, '目': 922, '省': -3485, '県': 2997, '空': -867, '立': -2112,
+ '第': 788, '米': 2937, '系': 786, '約': 2171, '経': 1146, '統': -1169,
+ '総': 940, '線': -994, '署': 749, '者': 2145, '能': -730, '般': -852,
+ '行': -792, '規': 792, '警': -1184, '議': -244, '谷': -1000, '賞': 730,
+ '車': -1481, '軍': 1158, '輪': -1433, '込': -3370, '近': 929, '道': -1291,
+ '選': 2596, '郎': -4866, '都': 1192, '野': -1100, '銀': -2213, '長': 357,
+ '間': -2344, '院': -2297, '際': -2604, '電': -878, '領': -1659, '題': -792,
+ '館': -1984, '首': 1749, '高': 2120, '「': 1895, '」': 3798, '・': -4371,
+ 'ッ': -724, 'ー': -11870, 'カ': 2145, 'コ': 1789, 'セ': 1287, 'ト': -403,
+ 'メ': -1635, 'ラ': -881, 'リ': -541, 'ル': -856, 'ン': -3637}
+ UW5__ = {',': 465, '.': -299, '1': -514, 'E2': -32768, ']': -2762, '、': 465,
+ '。': -299, '「': 363, 'あ': 1655, 'い': 331, 'う': -503, 'え': 1199,
+ 'お': 527, 'か': 647, 'が': -421, 'き': 1624, 'ぎ': 1971, 'く': 312,
+ 'げ': -983, 'さ': -1537, 'し': -1371, 'す': -852, 'だ': -1186, 'ち': 1093,
+ 'っ': 52, 'つ': 921, 'て': -18, 'で': -850, 'と': -127, 'ど': 1682,
+ 'な': -787, 'に': -1224, 'の': -635, 'は': -578, 'べ': 1001, 'み': 502,
+ 'め': 865, 'ゃ': 3350, 'ょ': 854, 'り': -208, 'る': 429, 'れ': 504,
+ 'わ': 419, 'を': -1264, 'ん': 327, 'イ': 241, 'ル': 451, 'ン': -343,
+ '中': -871, '京': 722, '会': -1153, '党': -654, '務': 3519, '区': -901,
+ '告': 848, '員': 2104, '大': -1296, '学': -548, '定': 1785, '嵐': -1304,
+ '市': -2991, '席': 921, '年': 1763, '思': 872, '所': -814, '挙': 1618,
+ '新': -1682, '日': 218, '月': -4353, '査': 932, '格': 1356, '機': -1508,
+ '氏': -1347, '田': 240, '町': -3912, '的': -3149, '相': 1319, '省': -1052,
+ '県': -4003, '研': -997, '社': -278, '空': -813, '統': 1955, '者': -2233,
+ '表': 663, '語': -1073, '議': 1219, '選': -1018, '郎': -368, '長': 786,
+ '間': 1191, '題': 2368, '館': -689, '1': -514, 'E2': -32768, '「': 363,
+ 'イ': 241, 'ル': 451, 'ン': -343}
+ UW6__ = {',': 227, '.': 808, '1': -270, 'E1': 306, '、': 227, '。': 808,
+ 'あ': -307, 'う': 189, 'か': 241, 'が': -73, 'く': -121, 'こ': -200,
+ 'じ': 1782, 'す': 383, 'た': -428, 'っ': 573, 'て': -1014, 'で': 101,
+ 'と': -105, 'な': -253, 'に': -149, 'の': -417, 'は': -236, 'も': -206,
+ 'り': 187, 'る': -135, 'を': 195, 'ル': -673, 'ン': -496, '一': -277,
+ '中': 201, '件': -800, '会': 624, '前': 302, '区': 1792, '員': -1212,
+ '委': 798, '学': -960, '市': 887, '広': -695, '後': 535, '業': -697,
+ '相': 753, '社': -507, '福': 974, '空': -822, '者': 1811, '連': 463,
+ '郎': 1082, '1': -270, 'E1': 306, 'ル': -673, 'ン': -496}
# ctype_
def ctype_(self, char):
- # type: (unicode) -> unicode
- for pattern, value in iteritems(self.patterns_):
+ # type: (str) -> str
+ for pattern, value in self.patterns_.items():
if pattern.match(char):
return value
- return u'O'
+ return 'O'
# ts_
def ts_(self, dict, key):
- # type: (Dict[unicode, int], unicode) -> int
+ # type: (Dict[str, int], str) -> int
if key in dict:
return dict[key]
return 0
# segment
def split(self, input):
- # type: (unicode) -> List[unicode]
+ # type: (str) -> List[str]
if not input:
return []
result = []
- seg = [u'B3', u'B2', u'B1']
- ctype = [u'O', u'O', u'O']
+ seg = ['B3', 'B2', 'B1']
+ ctype = ['O', 'O', 'O']
for t in input:
seg.append(t)
ctype.append(self.ctype_(t))
- seg.append(u'E1')
- seg.append(u'E2')
- seg.append(u'E3')
- ctype.append(u'O')
- ctype.append(u'O')
- ctype.append(u'O')
+ seg.append('E1')
+ seg.append('E2')
+ seg.append('E3')
+ ctype.append('O')
+ ctype.append('O')
+ ctype.append('O')
word = seg[3]
- p1 = u'U'
- p2 = u'U'
- p3 = u'U'
+ p1 = 'U'
+ p2 = 'U'
+ p3 = 'U'
for i in range(4, len(seg) - 3):
score = self.BIAS__
@@ -526,11 +519,11 @@ class DefaultSplitter(BaseSplitter):
score += self.ts_(self.TQ2__, p2 + c2 + c3 + c4)
score += self.ts_(self.TQ3__, p3 + c1 + c2 + c3)
score += self.ts_(self.TQ4__, p3 + c2 + c3 + c4)
- p = u'O'
+ p = 'O'
if score > 0:
result.append(word.strip())
- word = u''
- p = u'B'
+ word = ''
+ p = 'B'
p1 = p2
p2 = p3
p3 = p
@@ -573,13 +566,13 @@ class SearchJapanese(SearchLanguage):
dotted_path)
def split(self, input):
- # type: (unicode) -> List[unicode]
+ # type: (str) -> List[str]
return self.splitter.split(input)
def word_filter(self, stemmed_word):
- # type: (unicode) -> bool
+ # type: (str) -> bool
return len(stemmed_word) > 1
def stem(self, word):
- # type: (unicode) -> unicode
+ # type: (str) -> str
return word
diff --git a/sphinx/search/jssplitter.py b/sphinx/search/jssplitter.py
index 7166565f1..71245319b 100644
--- a/sphinx/search/jssplitter.py
+++ b/sphinx/search/jssplitter.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.search.jssplitter
~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/sphinx/search/nl.py b/sphinx/search/nl.py
index de4fd13ec..076c190b2 100644
--- a/sphinx/search/nl.py
+++ b/sphinx/search/nl.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.search.nl
~~~~~~~~~~~~~~~~
@@ -18,7 +17,7 @@ if False:
from typing import Any # NOQA
-dutch_stopwords = parse_stop_word(u'''
+dutch_stopwords = parse_stop_word('''
| source: http://snowball.tartarus.org/algorithms/dutch/stop.txt
de | the
en | and
@@ -123,7 +122,7 @@ geweest | been; past participle of 'be'
andere | other
''')
-js_stemmer = u"""
+js_stemmer = """
var JSX={};(function(m){function n(b,e){var a=function(){};a.prototype=e.prototype;var c=new a;for(var d in b){b[d].prototype=c}}function L(c,b){for(var a in b.prototype)if(b.prototype.hasOwnProperty(a))c.prototype[a]=b.prototype[a]}function e(a,b,d){function c(a,b,c){delete a[b];a[b]=c;return c}Object.defineProperty(a,b,{get:function(){return c(a,b,d())},set:function(d){c(a,b,d)},enumerable:true,configurable:true})}function K(a,b,c){return a[b]=a[b]/c|0}var I=parseInt;var E=parseFloat;function M(a){return a!==a}var B=isFinite;var A=encodeURIComponent;var z=decodeURIComponent;var y=encodeURI;var x=decodeURI;var w=Object.prototype.toString;var C=Object.prototype.hasOwnProperty;function l(){}m.require=function(b){var a=t[b];return a!==undefined?a:null};m.profilerIsRunning=function(){return l.getResults!=null};m.getProfileResults=function(){return(l.getResults||function(){return{}})()};m.postProfileResults=function(a,b){if(l.postResults==null)throw new Error('profiler has not been turned on');return l.postResults(a,b)};m.resetProfileResults=function(){if(l.resetResults==null)throw new Error('profiler has not been turned on');return l.resetResults()};m.DEBUG=false;function v(){};n([v],Error);function c(a,b,c){this.F=a.length;this.K=a;this.L=b;this.I=c;this.H=null;this.P=null};n([c],Object);function s(){};n([s],Object);function g(){var a;var b;var c;this.G={};a=this.D='';b=this._=0;c=this.A=a.length;this.E=0;this.C=b;this.B=c};n([g],s);function D(a,b){a.D=b.D;a._=b._;a.A=b.A;a.E=b.E;a.C=b.C;a.B=b.B};function i(b,d,c,e){var a;if(b._>=b.A){return false}a=b.D.charCodeAt(b._);if(a>e||a<c){return false}a-=c;if((d[a>>>3]&1<<(a&7))===0){return false}b._++;return true};function r(a,d,c,e){var b;if(a._>=a.A){return false}b=a.D.charCodeAt(a._);if(b>e||b<c){a._++;return true}b-=c;if((d[b>>>3]&1<<(b&7))===0){a._++;return true}return false};function f(a,d,c,e){var b;if(a._<=a.E){return false}b=a.D.charCodeAt(a._-1);if(b>e||b<c){a._--;return true}b-=c;if((d[b>>>3]&1<<(b&7))===0){a._--;return true}return false};function k(a,b,d){var c;if(a.A-a._<b){return false}if(a.D.slice(c=a._,c+b)!==d){return false}a._+=b;return true};function d(a,b,d){var c;if(a._-a.E<b){return false}if(a.D.slice((c=a._)-b,c)!==d){return false}a._-=b;return true};function q(f,m,p){var b;var d;var e;var n;var g;var k;var l;var i;var h;var c;var a;var j;var o;b=0;d=p;e=f._;n=f.A;g=0;k=0;l=false;while(true){i=b+(d-b>>>1);h=0;c=g<k?g:k;a=m[i];for(j=c;j<a.F;j++){if(e+c===n){h=-1;break}h=f.D.charCodeAt(e+c)-a.K.charCodeAt(j);if(h!==0){break}c++}if(h<0){d=i;k=c}else{b=i;g=c}if(d-b<=1){if(b>0){break}if(d===b){break}if(l){break}l=true}}while(true){a=m[b];if(g>=a.F){f._=e+a.F|0;if(a.H==null){return a.I}o=a.H(a.P);f._=e+a.F|0;if(o){return a.I}}b=a.L;if(b<0){return 0}}return-1};function h(d,m,p){var b;var g;var e;var n;var f;var k;var l;var i;var h;var c;var a;var j;var o;b=0;g=p;e=d._;n=d.E;f=0;k=0;l=false;while(true){i=b+(g-b>>1);h=0;c=f<k?f:k;a=m[i];for(j=a.F-1-c;j>=0;j--){if(e-c===n){h=-1;break}h=d.D.charCodeAt(e-1-c)-a.K.charCodeAt(j);if(h!==0){break}c++}if(h<0){g=i;k=c}else{b=i;f=c}if(g-b<=1){if(b>0){break}if(g===b){break}if(l){break}l=true}}while(true){a=m[b];if(f>=a.F){d._=e-a.F|0;if(a.H==null){return a.I}o=a.H(d);d._=e-a.F|0;if(o){return a.I}}b=a.L;if(b<0){return 0}}return-1};function u(a,b,d,e){var c;c=e.length-(d-b);a.D=a.D.slice(0,b)+e+a.D.slice(d);a.A+=c|0;if(a._>=d){a._+=c|0}else if(a._>b){a._=b}return c|0};function b(a,f){var b;var c;var d;var e;b=false;if((c=a.C)<0||c>(d=a.B)||d>(e=a.A)||e>a.D.length?false:true){u(a,a.C,a.B,f);b=true}return b};g.prototype.J=function(){return false};g.prototype.Z=function(b){var a;var c;var d;var e;a=this.G['.'+b];if(a==null){c=this.D=b;d=this._=0;e=this.A=c.length;this.E=0;this.C=d;this.B=e;this.J();a=this.D;this.G['.'+b]=a}return a};g.prototype.stemWord=g.prototype.Z;g.prototype.a=function(e){var d;var b;var c;var a;var f;var g;var h;d=[];for(b=0;b<e.length;b++){c=e[b];a=this.G['.'+c];if(a==null){f=this.D=c;g=this._=0;h=this.A=f.length;this.E=0;this.C=g;this.B=h;this.J();a=this.D;this.G['.'+c]=a}d.push(a)}return d};g.prototype.stemWords=g.prototype.a;function a(){g.call(this);this.I_p2=0;this.I_p1=0;this.B_e_found=false};n([a],g);a.prototype.M=function(a){this.I_p2=a.I_p2;this.I_p1=a.I_p1;this.B_e_found=a.B_e_found;D(this,a)};a.prototype.copy_from=a.prototype.M;a.prototype.W=function(){var e;var m;var n;var o;var p;var d;var s;var c;var f;var g;var h;var j;var l;var t;var r;m=this._;b:while(true){n=this._;c=true;a:while(c===true){c=false;this.C=this._;e=q(this,a.a_0,11);if(e===0){break a}this.B=this._;switch(e){case 0:break a;case 1:if(!b(this,'a')){return false}break;case 2:if(!b(this,'e')){return false}break;case 3:if(!b(this,'i')){return false}break;case 4:if(!b(this,'o')){return false}break;case 5:if(!b(this,'u')){return false}break;case 6:if(this._>=this.A){break a}this._++;break}continue b}this._=n;break b}t=this._=m;o=t;f=true;a:while(f===true){f=false;this.C=this._;if(!k(this,1,'y')){this._=o;break a}this.B=this._;if(!b(this,'Y')){return false}}a:while(true){p=this._;g=true;d:while(g===true){g=false;e:while(true){d=this._;h=true;b:while(h===true){h=false;if(!i(this,a.g_v,97,232)){break b}this.C=this._;j=true;f:while(j===true){j=false;s=this._;l=true;c:while(l===true){l=false;if(!k(this,1,'i')){break c}this.B=this._;if(!i(this,a.g_v,97,232)){break c}if(!b(this,'I')){return false}break f}this._=s;if(!k(this,1,'y')){break b}this.B=this._;if(!b(this,'Y')){return false}}this._=d;break e}r=this._=d;if(r>=this.A){break d}this._++}continue a}this._=p;break a}return true};a.prototype.r_prelude=a.prototype.W;function F(c){var d;var s;var t;var o;var p;var e;var n;var f;var g;var h;var j;var l;var m;var u;var r;s=c._;b:while(true){t=c._;f=true;a:while(f===true){f=false;c.C=c._;d=q(c,a.a_0,11);if(d===0){break a}c.B=c._;switch(d){case 0:break a;case 1:if(!b(c,'a')){return false}break;case 2:if(!b(c,'e')){return false}break;case 3:if(!b(c,'i')){return false}break;case 4:if(!b(c,'o')){return false}break;case 5:if(!b(c,'u')){return false}break;case 6:if(c._>=c.A){break a}c._++;break}continue b}c._=t;break b}u=c._=s;o=u;g=true;a:while(g===true){g=false;c.C=c._;if(!k(c,1,'y')){c._=o;break a}c.B=c._;if(!b(c,'Y')){return false}}a:while(true){p=c._;h=true;d:while(h===true){h=false;e:while(true){e=c._;j=true;b:while(j===true){j=false;if(!i(c,a.g_v,97,232)){break b}c.C=c._;l=true;f:while(l===true){l=false;n=c._;m=true;c:while(m===true){m=false;if(!k(c,1,'i')){break c}c.B=c._;if(!i(c,a.g_v,97,232)){break c}if(!b(c,'I')){return false}break f}c._=n;if(!k(c,1,'y')){break b}c.B=c._;if(!b(c,'Y')){return false}}c._=e;break e}r=c._=e;if(r>=c.A){break d}c._++}continue a}c._=p;break a}return true};a.prototype.U=function(){var b;var c;var d;var e;var f;var g;this.I_p1=g=this.A;this.I_p2=g;a:while(true){b=true;b:while(b===true){b=false;if(!i(this,a.g_v,97,232)){break b}break a}if(this._>=this.A){return false}this._++}a:while(true){c=true;b:while(c===true){c=false;if(!r(this,a.g_v,97,232)){break b}break a}if(this._>=this.A){return false}this._++}this.I_p1=this._;d=true;a:while(d===true){d=false;if(!(this.I_p1<3)){break a}this.I_p1=3}a:while(true){e=true;b:while(e===true){e=false;if(!i(this,a.g_v,97,232)){break b}break a}if(this._>=this.A){return false}this._++}a:while(true){f=true;b:while(f===true){f=false;if(!r(this,a.g_v,97,232)){break b}break a}if(this._>=this.A){return false}this._++}this.I_p2=this._;return true};a.prototype.r_mark_regions=a.prototype.U;function G(b){var c;var d;var e;var f;var g;var h;b.I_p1=h=b.A;b.I_p2=h;a:while(true){c=true;b:while(c===true){c=false;if(!i(b,a.g_v,97,232)){break b}break a}if(b._>=b.A){return false}b._++}a:while(true){d=true;b:while(d===true){d=false;if(!r(b,a.g_v,97,232)){break b}break a}if(b._>=b.A){return false}b._++}b.I_p1=b._;e=true;a:while(e===true){e=false;if(!(b.I_p1<3)){break a}b.I_p1=3}a:while(true){f=true;b:while(f===true){f=false;if(!i(b,a.g_v,97,232)){break b}break a}if(b._>=b.A){return false}b._++}a:while(true){g=true;b:while(g===true){g=false;if(!r(b,a.g_v,97,232)){break b}break a}if(b._>=b.A){return false}b._++}b.I_p2=b._;return true};a.prototype.V=function(){var c;var e;var d;b:while(true){e=this._;d=true;a:while(d===true){d=false;this.C=this._;c=q(this,a.a_1,3);if(c===0){break a}this.B=this._;switch(c){case 0:break a;case 1:if(!b(this,'y')){return false}break;case 2:if(!b(this,'i')){return false}break;case 3:if(this._>=this.A){break a}this._++;break}continue b}this._=e;break b}return true};a.prototype.r_postlude=a.prototype.V;function H(c){var d;var f;var e;b:while(true){f=c._;e=true;a:while(e===true){e=false;c.C=c._;d=q(c,a.a_1,3);if(d===0){break a}c.B=c._;switch(d){case 0:break a;case 1:if(!b(c,'y')){return false}break;case 2:if(!b(c,'i')){return false}break;case 3:if(c._>=c.A){break a}c._++;break}continue b}c._=f;break b}return true};a.prototype.Q=function(){return!(this.I_p1<=this._)?false:true};a.prototype.r_R1=a.prototype.Q;a.prototype.R=function(){return!(this.I_p2<=this._)?false:true};a.prototype.r_R2=a.prototype.R;a.prototype.Y=function(){var d;var c;d=this.A-this._;if(h(this,a.a_2,3)===0){return false}c=this._=this.A-d;this.B=c;if(c<=this.E){return false}this._--;this.C=this._;return!b(this,'')?false:true};a.prototype.r_undouble=a.prototype.Y;function j(c){var e;var d;e=c.A-c._;if(h(c,a.a_2,3)===0){return false}d=c._=c.A-e;c.B=d;if(d<=c.E){return false}c._--;c.C=c._;return!b(c,'')?false:true};a.prototype.S=function(){var c;var e;this.B_e_found=false;this.B=this._;if(!d(this,1,'e')){return false}this.C=e=this._;if(!(!(this.I_p1<=e)?false:true)){return false}c=this.A-this._;if(!f(this,a.g_v,97,232)){return false}this._=this.A-c;if(!b(this,'')){return false}this.B_e_found=true;return!j(this)?false:true};a.prototype.r_e_ending=a.prototype.S;function o(c){var e;var g;c.B_e_found=false;c.B=c._;if(!d(c,1,'e')){return false}c.C=g=c._;if(!(!(c.I_p1<=g)?false:true)){return false}e=c.A-c._;if(!f(c,a.g_v,97,232)){return false}c._=c.A-e;if(!b(c,'')){return false}c.B_e_found=true;return!j(c)?false:true};a.prototype.T=function(){var e;var g;var c;var h;var i;if(!(!(this.I_p1<=this._)?false:true)){return false}e=this.A-this._;if(!f(this,a.g_v,97,232)){return false}i=this._=(h=this.A)-e;g=h-i;c=true;a:while(c===true){c=false;if(!d(this,3,'gem')){break a}return false}this._=this.A-g;return!b(this,'')?false:!j(this)?false:true};a.prototype.r_en_ending=a.prototype.T;function p(c){var g;var h;var e;var i;var k;if(!(!(c.I_p1<=c._)?false:true)){return false}g=c.A-c._;if(!f(c,a.g_v,97,232)){return false}k=c._=(i=c.A)-g;h=i-k;e=true;a:while(e===true){e=false;if(!d(c,3,'gem')){break a}return false}c._=c.A-h;return!b(c,'')?false:!j(c)?false:true};a.prototype.X=function(){var c;var v;var w;var x;var y;var z;var A;var B;var C;var D;var M;var m;var g;var i;var k;var l;var e;var n;var q;var r;var s;var E;var F;var G;var H;var I;var J;var K;var L;var t;var N;var u;v=this.A-this._;m=true;a:while(m===true){m=false;this.B=this._;c=h(this,a.a_3,5);if(c===0){break a}this.C=this._;switch(c){case 0:break a;case 1:if(!(!(this.I_p1<=this._)?false:true)){break a}if(!b(this,'heid')){return false}break;case 2:if(!p(this)){break a}break;case 3:if(!(!(this.I_p1<=this._)?false:true)){break a}if(!f(this,a.g_v_j,97,232)){break a}if(!b(this,'')){return false}break}}F=this._=(E=this.A)-v;w=E-F;g=true;a:while(g===true){g=false;if(!o(this)){break a}}I=this._=(H=this.A)-w;x=H-I;i=true;a:while(i===true){i=false;this.B=this._;if(!d(this,4,'heid')){break a}this.C=G=this._;if(!(!(this.I_p2<=G)?false:true)){break a}y=this.A-this._;k=true;b:while(k===true){k=false;if(!d(this,1,'c')){break b}break a}this._=this.A-y;if(!b(this,'')){return false}this.B=this._;if(!d(this,2,'en')){break a}this.C=this._;if(!p(this)){break a}}L=this._=(K=this.A)-x;z=K-L;l=true;a:while(l===true){l=false;this.B=this._;c=h(this,a.a_4,6);if(c===0){break a}this.C=this._;switch(c){case 0:break a;case 1:if(!(!(this.I_p2<=this._)?false:true)){break a}if(!b(this,'')){return false}e=true;c:while(e===true){e=false;A=this.A-this._;n=true;b:while(n===true){n=false;this.B=this._;if(!d(this,2,'ig')){break b}this.C=J=this._;if(!(!(this.I_p2<=J)?false:true)){break b}B=this.A-this._;q=true;d:while(q===true){q=false;if(!d(this,1,'e')){break d}break b}this._=this.A-B;if(!b(this,'')){return false}break c}this._=this.A-A;if(!j(this)){break a}}break;case 2:if(!(!(this.I_p2<=this._)?false:true)){break a}C=this.A-this._;r=true;b:while(r===true){r=false;if(!d(this,1,'e')){break b}break a}this._=this.A-C;if(!b(this,'')){return false}break;case 3:if(!(!(this.I_p2<=this._)?false:true)){break a}if(!b(this,'')){return false}if(!o(this)){break a}break;case 4:if(!(!(this.I_p2<=this._)?false:true)){break a}if(!b(this,'')){return false}break;case 5:if(!(!(this.I_p2<=this._)?false:true)){break a}if(!this.B_e_found){break a}if(!b(this,'')){return false}break}}u=this._=(N=this.A)-z;D=N-u;s=true;a:while(s===true){s=false;if(!f(this,a.g_v_I,73,232)){break a}M=this.A-this._;if(h(this,a.a_5,4)===0){break a}if(!f(this,a.g_v,97,232)){break a}t=this._=this.A-M;this.B=t;if(t<=this.E){break a}this._--;this.C=this._;if(!b(this,'')){return false}}this._=this.A-D;return true};a.prototype.r_standard_suffix=a.prototype.X;function J(c){var e;var w;var x;var y;var z;var A;var B;var C;var D;var E;var N;var g;var i;var k;var l;var m;var n;var q;var r;var s;var t;var F;var G;var H;var I;var J;var K;var L;var M;var u;var O;var v;w=c.A-c._;g=true;a:while(g===true){g=false;c.B=c._;e=h(c,a.a_3,5);if(e===0){break a}c.C=c._;switch(e){case 0:break a;case 1:if(!(!(c.I_p1<=c._)?false:true)){break a}if(!b(c,'heid')){return false}break;case 2:if(!p(c)){break a}break;case 3:if(!(!(c.I_p1<=c._)?false:true)){break a}if(!f(c,a.g_v_j,97,232)){break a}if(!b(c,'')){return false}break}}G=c._=(F=c.A)-w;x=F-G;i=true;a:while(i===true){i=false;if(!o(c)){break a}}J=c._=(I=c.A)-x;y=I-J;k=true;a:while(k===true){k=false;c.B=c._;if(!d(c,4,'heid')){break a}c.C=H=c._;if(!(!(c.I_p2<=H)?false:true)){break a}z=c.A-c._;l=true;b:while(l===true){l=false;if(!d(c,1,'c')){break b}break a}c._=c.A-z;if(!b(c,'')){return false}c.B=c._;if(!d(c,2,'en')){break a}c.C=c._;if(!p(c)){break a}}M=c._=(L=c.A)-y;A=L-M;m=true;a:while(m===true){m=false;c.B=c._;e=h(c,a.a_4,6);if(e===0){break a}c.C=c._;switch(e){case 0:break a;case 1:if(!(!(c.I_p2<=c._)?false:true)){break a}if(!b(c,'')){return false}n=true;c:while(n===true){n=false;B=c.A-c._;q=true;b:while(q===true){q=false;c.B=c._;if(!d(c,2,'ig')){break b}c.C=K=c._;if(!(!(c.I_p2<=K)?false:true)){break b}C=c.A-c._;r=true;d:while(r===true){r=false;if(!d(c,1,'e')){break d}break b}c._=c.A-C;if(!b(c,'')){return false}break c}c._=c.A-B;if(!j(c)){break a}}break;case 2:if(!(!(c.I_p2<=c._)?false:true)){break a}D=c.A-c._;s=true;b:while(s===true){s=false;if(!d(c,1,'e')){break b}break a}c._=c.A-D;if(!b(c,'')){return false}break;case 3:if(!(!(c.I_p2<=c._)?false:true)){break a}if(!b(c,'')){return false}if(!o(c)){break a}break;case 4:if(!(!(c.I_p2<=c._)?false:true)){break a}if(!b(c,'')){return false}break;case 5:if(!(!(c.I_p2<=c._)?false:true)){break a}if(!c.B_e_found){break a}if(!b(c,'')){return false}break}}v=c._=(O=c.A)-A;E=O-v;t=true;a:while(t===true){t=false;if(!f(c,a.g_v_I,73,232)){break a}N=c.A-c._;if(h(c,a.a_5,4)===0){break a}if(!f(c,a.g_v,97,232)){break a}u=c._=c.A-N;c.B=u;if(u<=c.E){break a}c._--;c.C=c._;if(!b(c,'')){return false}}c._=c.A-E;return true};a.prototype.J=function(){var f;var g;var h;var b;var a;var c;var d;var i;var j;var e;f=this._;b=true;a:while(b===true){b=false;if(!F(this)){break a}}i=this._=f;g=i;a=true;a:while(a===true){a=false;if(!G(this)){break a}}j=this._=g;this.E=j;this._=this.A;c=true;a:while(c===true){c=false;if(!J(this)){break a}}e=this._=this.E;h=e;d=true;a:while(d===true){d=false;if(!H(this)){break a}}this._=h;return true};a.prototype.stem=a.prototype.J;a.prototype.N=function(b){return b instanceof a};a.prototype.equals=a.prototype.N;a.prototype.O=function(){var c;var a;var b;var d;c='DutchStemmer';a=0;for(b=0;b<c.length;b++){d=c.charCodeAt(b);a=(a<<5)-a+d;a=a&a}return a|0};a.prototype.hashCode=a.prototype.O;a.serialVersionUID=1;e(a,'methodObject',function(){return new a});e(a,'a_0',function(){return[new c('',-1,6),new c('á',0,1),new c('ä',0,1),new c('é',0,2),new c('ë',0,2),new c('í',0,3),new c('ï',0,3),new c('ó',0,4),new c('ö',0,4),new c('ú',0,5),new c('ü',0,5)]});e(a,'a_1',function(){return[new c('',-1,3),new c('I',0,2),new c('Y',0,1)]});e(a,'a_2',function(){return[new c('dd',-1,-1),new c('kk',-1,-1),new c('tt',-1,-1)]});e(a,'a_3',function(){return[new c('ene',-1,2),new c('se',-1,3),new c('en',-1,2),new c('heden',2,1),new c('s',-1,3)]});e(a,'a_4',function(){return[new c('end',-1,1),new c('ig',-1,2),new c('ing',-1,1),new c('lijk',-1,3),new c('baar',-1,4),new c('bar',-1,5)]});e(a,'a_5',function(){return[new c('aa',-1,-1),new c('ee',-1,-1),new c('oo',-1,-1),new c('uu',-1,-1)]});e(a,'g_v',function(){return[17,65,16,1,0,0,0,0,0,0,0,0,0,0,0,0,128]});e(a,'g_v_I',function(){return[1,0,0,17,65,16,1,0,0,0,0,0,0,0,0,0,0,0,0,128]});e(a,'g_v_j',function(){return[17,67,16,1,0,0,0,0,0,0,0,0,0,0,0,0,128]});var t={'src/stemmer.jsx':{Stemmer:s},'src/dutch-stemmer.jsx':{DutchStemmer:a}}}(JSX))
var Stemmer = JSX.require("src/dutch-stemmer.jsx").DutchStemmer;
"""
@@ -141,5 +140,5 @@ class SearchDutch(SearchLanguage):
self.stemmer = snowballstemmer.stemmer('dutch')
def stem(self, word):
- # type: (unicode) -> unicode
+ # type: (str) -> str
return self.stemmer.stemWord(word.lower())
diff --git a/sphinx/search/no.py b/sphinx/search/no.py
index 81876bcdd..106c6b670 100644
--- a/sphinx/search/no.py
+++ b/sphinx/search/no.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.search.no
~~~~~~~~~~~~~~~~
@@ -18,7 +17,7 @@ if False:
from typing import Any # NOQA
-norwegian_stopwords = parse_stop_word(u'''
+norwegian_stopwords = parse_stop_word('''
| source: http://snowball.tartarus.org/algorithms/norwegian/stop.txt
og | and
i | in
@@ -198,7 +197,7 @@ varte | became *
vart | became *
''')
-js_stemmer = u"""
+js_stemmer = """
var JSX={};(function(g){function i(b,e){var a=function(){};a.prototype=e.prototype;var c=new a;for(var d in b){b[d].prototype=c}}function G(c,b){for(var a in b.prototype)if(b.prototype.hasOwnProperty(a))c.prototype[a]=b.prototype[a]}function e(a,b,d){function c(a,b,c){delete a[b];a[b]=c;return c}Object.defineProperty(a,b,{get:function(){return c(a,b,d())},set:function(d){c(a,b,d)},enumerable:true,configurable:true})}function H(a,b,c){return a[b]=a[b]/c|0}var B=parseInt;var q=parseFloat;function I(a){return a!==a}var y=isFinite;var x=encodeURIComponent;var w=decodeURIComponent;var u=encodeURI;var t=decodeURI;var s=Object.prototype.toString;var r=Object.prototype.hasOwnProperty;function h(){}g.require=function(b){var a=m[b];return a!==undefined?a:null};g.profilerIsRunning=function(){return h.getResults!=null};g.getProfileResults=function(){return(h.getResults||function(){return{}})()};g.postProfileResults=function(a,b){if(h.postResults==null)throw new Error('profiler has not been turned on');return h.postResults(a,b)};g.resetProfileResults=function(){if(h.resetResults==null)throw new Error('profiler has not been turned on');return h.resetResults()};g.DEBUG=false;function A(){};i([A],Error);function b(a,b,c){this.G=a.length;this.R=a;this.U=b;this.J=c;this.I=null;this.V=null};i([b],Object);function j(){};i([j],Object);function d(){var a;var b;var c;this.F={};a=this.C='';b=this._=0;c=this.A=a.length;this.B=0;this.D=b;this.E=c};i([d],j);function v(a,b){a.C=b.C;a._=b._;a.A=b.A;a.B=b.B;a.D=b.D;a.E=b.E};function l(b,d,c,e){var a;if(b._>=b.A){return false}a=b.C.charCodeAt(b._);if(a>e||a<c){return false}a-=c;if((d[a>>>3]&1<<(a&7))===0){return false}b._++;return true};function k(b,d,c,e){var a;if(b._<=b.B){return false}a=b.C.charCodeAt(b._-1);if(a>e||a<c){return false}a-=c;if((d[a>>>3]&1<<(a&7))===0){return false}b._--;return true};function p(a,d,c,e){var b;if(a._>=a.A){return false}b=a.C.charCodeAt(a._);if(b>e||b<c){a._++;return true}b-=c;if((d[b>>>3]&1<<(b&7))===0){a._++;return true}return false};function o(a,d,c,e){var b;if(a._<=a.B){return false}b=a.C.charCodeAt(a._-1);if(b>e||b<c){a._--;return true}b-=c;if((d[b>>>3]&1<<(b&7))===0){a._--;return true}return false};function n(a,b,d){var c;if(a._-a.B<b){return false}if(a.C.slice((c=a._)-b,c)!==d){return false}a._-=b;return true};function f(d,m,p){var b;var g;var e;var n;var f;var k;var l;var i;var h;var c;var a;var j;var o;b=0;g=p;e=d._;n=d.B;f=0;k=0;l=false;while(true){i=b+(g-b>>1);h=0;c=f<k?f:k;a=m[i];for(j=a.G-1-c;j>=0;j--){if(e-c===n){h=-1;break}h=d.C.charCodeAt(e-1-c)-a.R.charCodeAt(j);if(h!==0){break}c++}if(h<0){g=i;k=c}else{b=i;f=c}if(g-b<=1){if(b>0){break}if(g===b){break}if(l){break}l=true}}while(true){a=m[b];if(f>=a.G){d._=e-a.G|0;if(a.I==null){return a.J}o=a.I(d);d._=e-a.G|0;if(o){return a.J}}b=a.U;if(b<0){return 0}}return-1};function C(a,b,d,e){var c;c=e.length-(d-b);a.C=a.C.slice(0,b)+e+a.C.slice(d);a.A+=c|0;if(a._>=d){a._+=c|0}else if(a._>b){a._=b}return c|0};function c(a,f){var b;var c;var d;var e;b=false;if((c=a.D)<0||c>(d=a.E)||d>(e=a.A)||e>a.C.length?false:true){C(a,a.D,a.E,f);b=true}return b};d.prototype.H=function(){return false};d.prototype.S=function(b){var a;var c;var d;var e;a=this.F['.'+b];if(a==null){c=this.C=b;d=this._=0;e=this.A=c.length;this.B=0;this.D=d;this.E=e;this.H();a=this.C;this.F['.'+b]=a}return a};d.prototype.stemWord=d.prototype.S;d.prototype.T=function(e){var d;var b;var c;var a;var f;var g;var h;d=[];for(b=0;b<e.length;b++){c=e[b];a=this.F['.'+c];if(a==null){f=this.C=c;g=this._=0;h=this.A=f.length;this.B=0;this.D=g;this.E=h;this.H();a=this.C;this.F['.'+c]=a}d.push(a)}return d};d.prototype.stemWords=d.prototype.T;function a(){d.call(this);this.I_x=0;this.I_p1=0};i([a],d);a.prototype.K=function(a){this.I_x=a.I_x;this.I_p1=a.I_p1;v(this,a)};a.prototype.copy_from=a.prototype.K;a.prototype.P=function(){var g;var d;var b;var e;var c;var f;var i;var j;var k;var h;this.I_p1=j=this.A;g=i=this._;b=i+3|0;if(0>b||b>j){return false}h=this._=b;this.I_x=h;this._=g;a:while(true){d=this._;e=true;b:while(e===true){e=false;if(!l(this,a.g_v,97,248)){break b}this._=d;break a}k=this._=d;if(k>=this.A){return false}this._++}a:while(true){c=true;b:while(c===true){c=false;if(!p(this,a.g_v,97,248)){break b}break a}if(this._>=this.A){return false}this._++}this.I_p1=this._;f=true;a:while(f===true){f=false;if(!(this.I_p1<this.I_x)){break a}this.I_p1=this.I_x}return true};a.prototype.r_mark_regions=a.prototype.P;function F(b){var h;var e;var c;var f;var d;var g;var j;var k;var m;var i;b.I_p1=k=b.A;h=j=b._;c=j+3|0;if(0>c||c>k){return false}i=b._=c;b.I_x=i;b._=h;a:while(true){e=b._;f=true;b:while(f===true){f=false;if(!l(b,a.g_v,97,248)){break b}b._=e;break a}m=b._=e;if(m>=b.A){return false}b._++}a:while(true){d=true;b:while(d===true){d=false;if(!p(b,a.g_v,97,248)){break b}break a}if(b._>=b.A){return false}b._++}b.I_p1=b._;g=true;a:while(g===true){g=false;if(!(b.I_p1<b.I_x)){break a}b.I_p1=b.I_x}return true};a.prototype.O=function(){var b;var h;var d;var i;var e;var g;var j;var l;var m;h=this.A-(j=this._);if(j<this.I_p1){return false}l=this._=this.I_p1;d=this.B;this.B=l;m=this._=this.A-h;this.E=m;b=f(this,a.a_0,29);if(b===0){this.B=d;return false}this.D=this._;this.B=d;switch(b){case 0:return false;case 1:if(!c(this,'')){return false}break;case 2:e=true;a:while(e===true){e=false;i=this.A-this._;g=true;b:while(g===true){g=false;if(!k(this,a.g_s_ending,98,122)){break b}break a}this._=this.A-i;if(!n(this,1,'k')){return false}if(!o(this,a.g_v,97,248)){return false}}if(!c(this,'')){return false}break;case 3:if(!c(this,'er')){return false}break}return true};a.prototype.r_main_suffix=a.prototype.O;function E(b){var d;var l;var e;var i;var g;var h;var m;var p;var j;l=b.A-(m=b._);if(m<b.I_p1){return false}p=b._=b.I_p1;e=b.B;b.B=p;j=b._=b.A-l;b.E=j;d=f(b,a.a_0,29);if(d===0){b.B=e;return false}b.D=b._;b.B=e;switch(d){case 0:return false;case 1:if(!c(b,'')){return false}break;case 2:g=true;a:while(g===true){g=false;i=b.A-b._;h=true;b:while(h===true){h=false;if(!k(b,a.g_s_ending,98,122)){break b}break a}b._=b.A-i;if(!n(b,1,'k')){return false}if(!o(b,a.g_v,97,248)){return false}}if(!c(b,'')){return false}break;case 3:if(!c(b,'er')){return false}break}return true};a.prototype.N=function(){var e;var g;var b;var h;var d;var i;var j;var k;var l;e=(h=this.A)-(d=this._);g=h-d;if(d<this.I_p1){return false}i=this._=this.I_p1;b=this.B;this.B=i;j=this._=this.A-g;this.E=j;if(f(this,a.a_1,2)===0){this.B=b;return false}this.D=this._;l=this.B=b;k=this._=this.A-e;if(k<=l){return false}this._--;this.D=this._;return!c(this,'')?false:true};a.prototype.r_consonant_pair=a.prototype.N;function D(b){var i;var j;var d;var g;var e;var k;var l;var m;var h;i=(g=b.A)-(e=b._);j=g-e;if(e<b.I_p1){return false}k=b._=b.I_p1;d=b.B;b.B=k;l=b._=b.A-j;b.E=l;if(f(b,a.a_1,2)===0){b.B=d;return false}b.D=b._;h=b.B=d;m=b._=b.A-i;if(m<=h){return false}b._--;b.D=b._;return!c(b,'')?false:true};a.prototype.Q=function(){var b;var e;var d;var g;var h;var i;e=this.A-(g=this._);if(g<this.I_p1){return false}h=this._=this.I_p1;d=this.B;this.B=h;i=this._=this.A-e;this.E=i;b=f(this,a.a_2,11);if(b===0){this.B=d;return false}this.D=this._;this.B=d;switch(b){case 0:return false;case 1:if(!c(this,'')){return false}break}return true};a.prototype.r_other_suffix=a.prototype.Q;function z(b){var d;var g;var e;var h;var i;var j;g=b.A-(h=b._);if(h<b.I_p1){return false}i=b._=b.I_p1;e=b.B;b.B=i;j=b._=b.A-g;b.E=j;d=f(b,a.a_2,11);if(d===0){b.B=e;return false}b.D=b._;b.B=e;switch(d){case 0:return false;case 1:if(!c(b,'')){return false}break}return true};a.prototype.H=function(){var g;var f;var h;var b;var c;var a;var d;var i;var j;var k;var l;var e;g=this._;b=true;a:while(b===true){b=false;if(!F(this)){break a}}i=this._=g;this.B=i;k=this._=j=this.A;f=j-k;c=true;a:while(c===true){c=false;if(!E(this)){break a}}e=this._=(l=this.A)-f;h=l-e;a=true;a:while(a===true){a=false;if(!D(this)){break a}}this._=this.A-h;d=true;a:while(d===true){d=false;if(!z(this)){break a}}this._=this.B;return true};a.prototype.stem=a.prototype.H;a.prototype.L=function(b){return b instanceof a};a.prototype.equals=a.prototype.L;a.prototype.M=function(){var c;var a;var b;var d;c='NorwegianStemmer';a=0;for(b=0;b<c.length;b++){d=c.charCodeAt(b);a=(a<<5)-a+d;a=a&a}return a|0};a.prototype.hashCode=a.prototype.M;a.serialVersionUID=1;e(a,'methodObject',function(){return new a});e(a,'a_0',function(){return[new b('a',-1,1),new b('e',-1,1),new b('ede',1,1),new b('ande',1,1),new b('ende',1,1),new b('ane',1,1),new b('ene',1,1),new b('hetene',6,1),new b('erte',1,3),new b('en',-1,1),new b('heten',9,1),new b('ar',-1,1),new b('er',-1,1),new b('heter',12,1),new b('s',-1,2),new b('as',14,1),new b('es',14,1),new b('edes',16,1),new b('endes',16,1),new b('enes',16,1),new b('hetenes',19,1),new b('ens',14,1),new b('hetens',21,1),new b('ers',14,1),new b('ets',14,1),new b('et',-1,1),new b('het',25,1),new b('ert',-1,3),new b('ast',-1,1)]});e(a,'a_1',function(){return[new b('dt',-1,-1),new b('vt',-1,-1)]});e(a,'a_2',function(){return[new b('leg',-1,1),new b('eleg',0,1),new b('ig',-1,1),new b('eig',2,1),new b('lig',2,1),new b('elig',4,1),new b('els',-1,1),new b('lov',-1,1),new b('elov',7,1),new b('slov',7,1),new b('hetslov',9,1)]});e(a,'g_v',function(){return[17,65,16,1,0,0,0,0,0,0,0,0,0,0,0,0,48,0,128]});e(a,'g_s_ending',function(){return[119,125,149,1]});var m={'src/stemmer.jsx':{Stemmer:j},'src/norwegian-stemmer.jsx':{NorwegianStemmer:a}}}(JSX))
var Stemmer = JSX.require("src/norwegian-stemmer.jsx").NorwegianStemmer;
"""
@@ -216,5 +215,5 @@ class SearchNorwegian(SearchLanguage):
self.stemmer = snowballstemmer.stemmer('norwegian')
def stem(self, word):
- # type: (unicode) -> unicode
+ # type: (str) -> str
return self.stemmer.stemWord(word.lower())
diff --git a/sphinx/search/pt.py b/sphinx/search/pt.py
index 9afe80870..143759387 100644
--- a/sphinx/search/pt.py
+++ b/sphinx/search/pt.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.search.pt
~~~~~~~~~~~~~~~~
@@ -18,7 +17,7 @@ if False:
from typing import Any # NOQA
-portuguese_stopwords = parse_stop_word(u'''
+portuguese_stopwords = parse_stop_word('''
| source: http://snowball.tartarus.org/algorithms/portuguese/stop.txt
de | of, from
a | the; to, at; her
@@ -257,7 +256,7 @@ teríamos
teriam
''')
-js_stemmer = u"""
+js_stemmer = """
var JSX={};(function(j){function l(b,e){var a=function(){};a.prototype=e.prototype;var c=new a;for(var d in b){b[d].prototype=c}}function I(c,b){for(var a in b.prototype)if(b.prototype.hasOwnProperty(a))c.prototype[a]=b.prototype[a]}function h(a,b,d){function c(a,b,c){delete a[b];a[b]=c;return c}Object.defineProperty(a,b,{get:function(){return c(a,b,d())},set:function(d){c(a,b,d)},enumerable:true,configurable:true})}function J(a,b,c){return a[b]=a[b]/c|0}var p=parseInt;var z=parseFloat;function K(a){return a!==a}var x=isFinite;var w=encodeURIComponent;var u=decodeURIComponent;var t=encodeURI;var s=decodeURI;var A=Object.prototype.toString;var q=Object.prototype.hasOwnProperty;function k(){}j.require=function(b){var a=o[b];return a!==undefined?a:null};j.profilerIsRunning=function(){return k.getResults!=null};j.getProfileResults=function(){return(k.getResults||function(){return{}})()};j.postProfileResults=function(a,b){if(k.postResults==null)throw new Error('profiler has not been turned on');return k.postResults(a,b)};j.resetProfileResults=function(){if(k.resetResults==null)throw new Error('profiler has not been turned on');return k.resetResults()};j.DEBUG=false;function r(){};l([r],Error);function a(a,b,c){this.F=a.length;this.K=a;this.L=b;this.I=c;this.H=null;this.P=null};l([a],Object);function n(){};l([n],Object);function i(){var a;var b;var c;this.G={};a=this.E='';b=this._=0;c=this.A=a.length;this.D=0;this.B=b;this.C=c};l([i],n);function v(a,b){a.E=b.E;a._=b._;a.A=b.A;a.D=b.D;a.B=b.B;a.C=b.C};function f(b,d,c,e){var a;if(b._>=b.A){return false}a=b.E.charCodeAt(b._);if(a>e||a<c){return false}a-=c;if((d[a>>>3]&1<<(a&7))===0){return false}b._++;return true};function g(a,d,c,e){var b;if(a._>=a.A){return false}b=a.E.charCodeAt(a._);if(b>e||b<c){a._++;return true}b-=c;if((d[b>>>3]&1<<(b&7))===0){a._++;return true}return false};function d(a,b,d){var c;if(a._-a.D<b){return false}if(a.E.slice((c=a._)-b,c)!==d){return false}a._-=b;return true};function m(f,m,p){var b;var d;var e;var n;var g;var k;var l;var i;var h;var c;var a;var j;var o;b=0;d=p;e=f._;n=f.A;g=0;k=0;l=false;while(true){i=b+(d-b>>>1);h=0;c=g<k?g:k;a=m[i];for(j=c;j<a.F;j++){if(e+c===n){h=-1;break}h=f.E.charCodeAt(e+c)-a.K.charCodeAt(j);if(h!==0){break}c++}if(h<0){d=i;k=c}else{b=i;g=c}if(d-b<=1){if(b>0){break}if(d===b){break}if(l){break}l=true}}while(true){a=m[b];if(g>=a.F){f._=e+a.F|0;if(a.H==null){return a.I}o=a.H(a.P);f._=e+a.F|0;if(o){return a.I}}b=a.L;if(b<0){return 0}}return-1};function e(d,m,p){var b;var g;var e;var n;var f;var k;var l;var i;var h;var c;var a;var j;var o;b=0;g=p;e=d._;n=d.D;f=0;k=0;l=false;while(true){i=b+(g-b>>1);h=0;c=f<k?f:k;a=m[i];for(j=a.F-1-c;j>=0;j--){if(e-c===n){h=-1;break}h=d.E.charCodeAt(e-1-c)-a.K.charCodeAt(j);if(h!==0){break}c++}if(h<0){g=i;k=c}else{b=i;f=c}if(g-b<=1){if(b>0){break}if(g===b){break}if(l){break}l=true}}while(true){a=m[b];if(f>=a.F){d._=e-a.F|0;if(a.H==null){return a.I}o=a.H(d);d._=e-a.F|0;if(o){return a.I}}b=a.L;if(b<0){return 0}}return-1};function B(a,b,d,e){var c;c=e.length-(d-b);a.E=a.E.slice(0,b)+e+a.E.slice(d);a.A+=c|0;if(a._>=d){a._+=c|0}else if(a._>b){a._=b}return c|0};function c(a,f){var b;var c;var d;var e;b=false;if((c=a.B)<0||c>(d=a.C)||d>(e=a.A)||e>a.E.length?false:true){B(a,a.B,a.C,f);b=true}return b};i.prototype.J=function(){return false};i.prototype.a=function(b){var a;var c;var d;var e;a=this.G['.'+b];if(a==null){c=this.E=b;d=this._=0;e=this.A=c.length;this.D=0;this.B=d;this.C=e;this.J();a=this.E;this.G['.'+b]=a}return a};i.prototype.stemWord=i.prototype.a;i.prototype.b=function(e){var d;var b;var c;var a;var f;var g;var h;d=[];for(b=0;b<e.length;b++){c=e[b];a=this.G['.'+c];if(a==null){f=this.E=c;g=this._=0;h=this.A=f.length;this.D=0;this.B=g;this.C=h;this.J();a=this.E;this.G['.'+c]=a}d.push(a)}return d};i.prototype.stemWords=i.prototype.b;function b(){i.call(this);this.I_p2=0;this.I_p1=0;this.I_pV=0};l([b],i);b.prototype.M=function(a){this.I_p2=a.I_p2;this.I_p1=a.I_p1;this.I_pV=a.I_pV;v(this,a)};b.prototype.copy_from=b.prototype.M;b.prototype.V=function(){var a;var e;var d;b:while(true){e=this._;d=true;a:while(d===true){d=false;this.B=this._;a=m(this,b.a_0,3);if(a===0){break a}this.C=this._;switch(a){case 0:break a;case 1:if(!c(this,'a~')){return false}break;case 2:if(!c(this,'o~')){return false}break;case 3:if(this._>=this.A){break a}this._++;break}continue b}this._=e;break b}return true};b.prototype.r_prelude=b.prototype.V;function E(a){var d;var f;var e;b:while(true){f=a._;e=true;a:while(e===true){e=false;a.B=a._;d=m(a,b.a_0,3);if(d===0){break a}a.C=a._;switch(d){case 0:break a;case 1:if(!c(a,'a~')){return false}break;case 2:if(!c(a,'o~')){return false}break;case 3:if(a._>=a.A){break a}a._++;break}continue b}a._=f;break b}return true};b.prototype.T=function(){var u;var w;var x;var y;var t;var l;var d;var e;var h;var i;var c;var j;var k;var a;var m;var n;var o;var p;var q;var r;var s;var v;this.I_pV=s=this.A;this.I_p1=s;this.I_p2=s;u=this._;l=true;a:while(l===true){l=false;d=true;g:while(d===true){d=false;w=this._;e=true;b:while(e===true){e=false;if(!f(this,b.g_v,97,250)){break b}h=true;f:while(h===true){h=false;x=this._;i=true;c:while(i===true){i=false;if(!g(this,b.g_v,97,250)){break c}d:while(true){c=true;e:while(c===true){c=false;if(!f(this,b.g_v,97,250)){break e}break d}if(this._>=this.A){break c}this._++}break f}this._=x;if(!f(this,b.g_v,97,250)){break b}c:while(true){j=true;d:while(j===true){j=false;if(!g(this,b.g_v,97,250)){break d}break c}if(this._>=this.A){break b}this._++}}break g}this._=w;if(!g(this,b.g_v,97,250)){break a}k=true;c:while(k===true){k=false;y=this._;a=true;b:while(a===true){a=false;if(!g(this,b.g_v,97,250)){break b}e:while(true){m=true;d:while(m===true){m=false;if(!f(this,b.g_v,97,250)){break d}break e}if(this._>=this.A){break b}this._++}break c}this._=y;if(!f(this,b.g_v,97,250)){break a}if(this._>=this.A){break a}this._++}}this.I_pV=this._}v=this._=u;t=v;n=true;a:while(n===true){n=false;b:while(true){o=true;c:while(o===true){o=false;if(!f(this,b.g_v,97,250)){break c}break b}if(this._>=this.A){break a}this._++}b:while(true){p=true;c:while(p===true){p=false;if(!g(this,b.g_v,97,250)){break c}break b}if(this._>=this.A){break a}this._++}this.I_p1=this._;b:while(true){q=true;c:while(q===true){q=false;if(!f(this,b.g_v,97,250)){break c}break b}if(this._>=this.A){break a}this._++}c:while(true){r=true;b:while(r===true){r=false;if(!g(this,b.g_v,97,250)){break b}break c}if(this._>=this.A){break a}this._++}this.I_p2=this._}this._=t;return true};b.prototype.r_mark_regions=b.prototype.T;function F(a){var x;var y;var z;var u;var v;var l;var d;var e;var h;var i;var j;var k;var c;var m;var n;var o;var p;var q;var r;var s;var t;var w;a.I_pV=t=a.A;a.I_p1=t;a.I_p2=t;x=a._;l=true;a:while(l===true){l=false;d=true;g:while(d===true){d=false;y=a._;e=true;b:while(e===true){e=false;if(!f(a,b.g_v,97,250)){break b}h=true;f:while(h===true){h=false;z=a._;i=true;c:while(i===true){i=false;if(!g(a,b.g_v,97,250)){break c}d:while(true){j=true;e:while(j===true){j=false;if(!f(a,b.g_v,97,250)){break e}break d}if(a._>=a.A){break c}a._++}break f}a._=z;if(!f(a,b.g_v,97,250)){break b}c:while(true){k=true;d:while(k===true){k=false;if(!g(a,b.g_v,97,250)){break d}break c}if(a._>=a.A){break b}a._++}}break g}a._=y;if(!g(a,b.g_v,97,250)){break a}c=true;c:while(c===true){c=false;u=a._;m=true;b:while(m===true){m=false;if(!g(a,b.g_v,97,250)){break b}e:while(true){n=true;d:while(n===true){n=false;if(!f(a,b.g_v,97,250)){break d}break e}if(a._>=a.A){break b}a._++}break c}a._=u;if(!f(a,b.g_v,97,250)){break a}if(a._>=a.A){break a}a._++}}a.I_pV=a._}w=a._=x;v=w;o=true;a:while(o===true){o=false;b:while(true){p=true;c:while(p===true){p=false;if(!f(a,b.g_v,97,250)){break c}break b}if(a._>=a.A){break a}a._++}b:while(true){q=true;c:while(q===true){q=false;if(!g(a,b.g_v,97,250)){break c}break b}if(a._>=a.A){break a}a._++}a.I_p1=a._;b:while(true){r=true;c:while(r===true){r=false;if(!f(a,b.g_v,97,250)){break c}break b}if(a._>=a.A){break a}a._++}c:while(true){s=true;b:while(s===true){s=false;if(!g(a,b.g_v,97,250)){break b}break c}if(a._>=a.A){break a}a._++}a.I_p2=a._}a._=v;return true};b.prototype.U=function(){var a;var e;var d;b:while(true){e=this._;d=true;a:while(d===true){d=false;this.B=this._;a=m(this,b.a_1,3);if(a===0){break a}this.C=this._;switch(a){case 0:break a;case 1:if(!c(this,'ã')){return false}break;case 2:if(!c(this,'õ')){return false}break;case 3:if(this._>=this.A){break a}this._++;break}continue b}this._=e;break b}return true};b.prototype.r_postlude=b.prototype.U;function G(a){var d;var f;var e;b:while(true){f=a._;e=true;a:while(e===true){e=false;a.B=a._;d=m(a,b.a_1,3);if(d===0){break a}a.C=a._;switch(d){case 0:break a;case 1:if(!c(a,'ã')){return false}break;case 2:if(!c(a,'õ')){return false}break;case 3:if(a._>=a.A){break a}a._++;break}continue b}a._=f;break b}return true};b.prototype.S=function(){return!(this.I_pV<=this._)?false:true};b.prototype.r_RV=b.prototype.S;b.prototype.Q=function(){return!(this.I_p1<=this._)?false:true};b.prototype.r_R1=b.prototype.Q;b.prototype.R=function(){return!(this.I_p2<=this._)?false:true};b.prototype.r_R2=b.prototype.R;b.prototype.Y=function(){var a;var f;var g;var h;var j;var i;var k;var l;var m;var o;var p;var n;this.C=this._;a=e(this,b.a_5,45);if(a===0){return false}this.B=this._;switch(a){case 0:return false;case 1:if(!(!(this.I_p2<=this._)?false:true)){return false}if(!c(this,'')){return false}break;case 2:if(!(!(this.I_p2<=this._)?false:true)){return false}if(!c(this,'log')){return false}break;case 3:if(!(!(this.I_p2<=this._)?false:true)){return false}if(!c(this,'u')){return false}break;case 4:if(!(!(this.I_p2<=this._)?false:true)){return false}if(!c(this,'ente')){return false}break;case 5:if(!(!(this.I_p1<=this._)?false:true)){return false}if(!c(this,'')){return false}f=this.A-this._;i=true;a:while(i===true){i=false;this.C=this._;a=e(this,b.a_2,4);if(a===0){this._=this.A-f;break a}this.B=o=this._;if(!(!(this.I_p2<=o)?false:true)){this._=this.A-f;break a}if(!c(this,'')){return false}switch(a){case 0:this._=this.A-f;break a;case 1:this.C=this._;if(!d(this,2,'at')){this._=this.A-f;break a}this.B=p=this._;if(!(!(this.I_p2<=p)?false:true)){this._=this.A-f;break a}if(!c(this,'')){return false}break}}break;case 6:if(!(!(this.I_p2<=this._)?false:true)){return false}if(!c(this,'')){return false}g=this.A-this._;k=true;a:while(k===true){k=false;this.C=this._;a=e(this,b.a_3,3);if(a===0){this._=this.A-g;break a}this.B=this._;switch(a){case 0:this._=this.A-g;break a;case 1:if(!(!(this.I_p2<=this._)?false:true)){this._=this.A-g;break a}if(!c(this,'')){return false}break}}break;case 7:if(!(!(this.I_p2<=this._)?false:true)){return false}if(!c(this,'')){return false}h=this.A-this._;l=true;a:while(l===true){l=false;this.C=this._;a=e(this,b.a_4,3);if(a===0){this._=this.A-h;break a}this.B=this._;switch(a){case 0:this._=this.A-h;break a;case 1:if(!(!(this.I_p2<=this._)?false:true)){this._=this.A-h;break a}if(!c(this,'')){return false}break}}break;case 8:if(!(!(this.I_p2<=this._)?false:true)){return false}if(!c(this,'')){return false}j=this.A-this._;m=true;a:while(m===true){m=false;this.C=this._;if(!d(this,2,'at')){this._=this.A-j;break a}this.B=n=this._;if(!(!(this.I_p2<=n)?false:true)){this._=this.A-j;break a}if(!c(this,'')){return false}}break;case 9:if(!(!(this.I_pV<=this._)?false:true)){return false}if(!d(this,1,'e')){return false}if(!c(this,'ir')){return false}break}return true};b.prototype.r_standard_suffix=b.prototype.Y;function H(a){var f;var g;var h;var i;var k;var j;var l;var m;var n;var p;var q;var o;a.C=a._;f=e(a,b.a_5,45);if(f===0){return false}a.B=a._;switch(f){case 0:return false;case 1:if(!(!(a.I_p2<=a._)?false:true)){return false}if(!c(a,'')){return false}break;case 2:if(!(!(a.I_p2<=a._)?false:true)){return false}if(!c(a,'log')){return false}break;case 3:if(!(!(a.I_p2<=a._)?false:true)){return false}if(!c(a,'u')){return false}break;case 4:if(!(!(a.I_p2<=a._)?false:true)){return false}if(!c(a,'ente')){return false}break;case 5:if(!(!(a.I_p1<=a._)?false:true)){return false}if(!c(a,'')){return false}g=a.A-a._;j=true;a:while(j===true){j=false;a.C=a._;f=e(a,b.a_2,4);if(f===0){a._=a.A-g;break a}a.B=p=a._;if(!(!(a.I_p2<=p)?false:true)){a._=a.A-g;break a}if(!c(a,'')){return false}switch(f){case 0:a._=a.A-g;break a;case 1:a.C=a._;if(!d(a,2,'at')){a._=a.A-g;break a}a.B=q=a._;if(!(!(a.I_p2<=q)?false:true)){a._=a.A-g;break a}if(!c(a,'')){return false}break}}break;case 6:if(!(!(a.I_p2<=a._)?false:true)){return false}if(!c(a,'')){return false}h=a.A-a._;l=true;a:while(l===true){l=false;a.C=a._;f=e(a,b.a_3,3);if(f===0){a._=a.A-h;break a}a.B=a._;switch(f){case 0:a._=a.A-h;break a;case 1:if(!(!(a.I_p2<=a._)?false:true)){a._=a.A-h;break a}if(!c(a,'')){return false}break}}break;case 7:if(!(!(a.I_p2<=a._)?false:true)){return false}if(!c(a,'')){return false}i=a.A-a._;m=true;a:while(m===true){m=false;a.C=a._;f=e(a,b.a_4,3);if(f===0){a._=a.A-i;break a}a.B=a._;switch(f){case 0:a._=a.A-i;break a;case 1:if(!(!(a.I_p2<=a._)?false:true)){a._=a.A-i;break a}if(!c(a,'')){return false}break}}break;case 8:if(!(!(a.I_p2<=a._)?false:true)){return false}if(!c(a,'')){return false}k=a.A-a._;n=true;a:while(n===true){n=false;a.C=a._;if(!d(a,2,'at')){a._=a.A-k;break a}a.B=o=a._;if(!(!(a.I_p2<=o)?false:true)){a._=a.A-k;break a}if(!c(a,'')){return false}}break;case 9:if(!(!(a.I_pV<=a._)?false:true)){return false}if(!d(a,1,'e')){return false}if(!c(a,'ir')){return false}break}return true};b.prototype.Z=function(){var d;var f;var a;var g;var h;var i;f=this.A-(g=this._);if(g<this.I_pV){return false}h=this._=this.I_pV;a=this.D;this.D=h;i=this._=this.A-f;this.C=i;d=e(this,b.a_6,120);if(d===0){this.D=a;return false}this.B=this._;switch(d){case 0:this.D=a;return false;case 1:if(!c(this,'')){return false}break}this.D=a;return true};b.prototype.r_verb_suffix=b.prototype.Z;function D(a){var f;var g;var d;var h;var i;var j;g=a.A-(h=a._);if(h<a.I_pV){return false}i=a._=a.I_pV;d=a.D;a.D=i;j=a._=a.A-g;a.C=j;f=e(a,b.a_6,120);if(f===0){a.D=d;return false}a.B=a._;switch(f){case 0:a.D=d;return false;case 1:if(!c(a,'')){return false}break}a.D=d;return true};b.prototype.X=function(){var a;this.C=this._;a=e(this,b.a_7,7);if(a===0){return false}this.B=this._;switch(a){case 0:return false;case 1:if(!(!(this.I_pV<=this._)?false:true)){return false}if(!c(this,'')){return false}break}return true};b.prototype.r_residual_suffix=b.prototype.X;function C(a){var d;a.C=a._;d=e(a,b.a_7,7);if(d===0){return false}a.B=a._;switch(d){case 0:return false;case 1:if(!(!(a.I_pV<=a._)?false:true)){return false}if(!c(a,'')){return false}break}return true};b.prototype.W=function(){var a;var h;var i;var j;var f;var g;var k;var l;this.C=this._;a=e(this,b.a_8,4);if(a===0){return false}this.B=this._;switch(a){case 0:return false;case 1:if(!(!(this.I_pV<=this._)?false:true)){return false}if(!c(this,'')){return false}this.C=this._;f=true;b:while(f===true){f=false;h=this.A-this._;g=true;a:while(g===true){g=false;if(!d(this,1,'u')){break a}this.B=k=this._;i=this.A-k;if(!d(this,1,'g')){break a}this._=this.A-i;break b}this._=this.A-h;if(!d(this,1,'i')){return false}this.B=l=this._;j=this.A-l;if(!d(this,1,'c')){return false}this._=this.A-j}if(!(!(this.I_pV<=this._)?false:true)){return false}if(!c(this,'')){return false}break;case 2:if(!c(this,'c')){return false}break}return true};b.prototype.r_residual_form=b.prototype.W;function y(a){var f;var i;var j;var k;var g;var h;var l;var m;a.C=a._;f=e(a,b.a_8,4);if(f===0){return false}a.B=a._;switch(f){case 0:return false;case 1:if(!(!(a.I_pV<=a._)?false:true)){return false}if(!c(a,'')){return false}a.C=a._;g=true;b:while(g===true){g=false;i=a.A-a._;h=true;a:while(h===true){h=false;if(!d(a,1,'u')){break a}a.B=l=a._;j=a.A-l;if(!d(a,1,'g')){break a}a._=a.A-j;break b}a._=a.A-i;if(!d(a,1,'i')){return false}a.B=m=a._;k=a.A-m;if(!d(a,1,'c')){return false}a._=a.A-k}if(!(!(a.I_pV<=a._)?false:true)){return false}if(!c(a,'')){return false}break;case 2:if(!c(a,'c')){return false}break}return true};b.prototype.J=function(){var q;var n;var o;var p;var r;var s;var t;var u;var v;var b;var e;var f;var g;var a;var h;var i;var j;var k;var l;var w;var x;var z;var A;var B;var I;var J;var K;var m;q=this._;b=true;a:while(b===true){b=false;if(!E(this)){break a}}w=this._=q;n=w;e=true;a:while(e===true){e=false;if(!F(this)){break a}}I=this._=n;this.D=I;K=this._=J=this.A;o=J-K;f=true;b:while(f===true){f=false;g=true;c:while(g===true){g=false;p=this.A-this._;a=true;d:while(a===true){a=false;r=this.A-this._;h=true;a:while(h===true){h=false;s=this.A-this._;i=true;e:while(i===true){i=false;if(!H(this)){break e}break a}this._=this.A-s;if(!D(this)){break d}}B=this._=(A=this.A)-r;t=A-B;j=true;a:while(j===true){j=false;this.C=this._;if(!d(this,1,'i')){break a}this.B=x=this._;u=this.A-x;if(!d(this,1,'c')){break a}z=this._=this.A-u;if(!(!(this.I_pV<=z)?false:true)){break a}if(!c(this,'')){return false}}this._=this.A-t;break c}this._=this.A-p;if(!C(this)){break b}}}this._=this.A-o;k=true;a:while(k===true){k=false;if(!y(this)){break a}}m=this._=this.D;v=m;l=true;a:while(l===true){l=false;if(!G(this)){break a}}this._=v;return true};b.prototype.stem=b.prototype.J;b.prototype.N=function(a){return a instanceof b};b.prototype.equals=b.prototype.N;b.prototype.O=function(){var c;var a;var b;var d;c='PortugueseStemmer';a=0;for(b=0;b<c.length;b++){d=c.charCodeAt(b);a=(a<<5)-a+d;a=a&a}return a|0};b.prototype.hashCode=b.prototype.O;b.serialVersionUID=1;h(b,'methodObject',function(){return new b});h(b,'a_0',function(){return[new a('',-1,3),new a('ã',0,1),new a('õ',0,2)]});h(b,'a_1',function(){return[new a('',-1,3),new a('a~',0,1),new a('o~',0,2)]});h(b,'a_2',function(){return[new a('ic',-1,-1),new a('ad',-1,-1),new a('os',-1,-1),new a('iv',-1,1)]});h(b,'a_3',function(){return[new a('ante',-1,1),new a('avel',-1,1),new a('ível',-1,1)]});h(b,'a_4',function(){return[new a('ic',-1,1),new a('abil',-1,1),new a('iv',-1,1)]});h(b,'a_5',function(){return[new a('ica',-1,1),new a('ância',-1,1),new a('ência',-1,4),new a('ira',-1,9),new a('adora',-1,1),new a('osa',-1,1),new a('ista',-1,1),new a('iva',-1,8),new a('eza',-1,1),new a('logía',-1,2),new a('idade',-1,7),new a('ante',-1,1),new a('mente',-1,6),new a('amente',12,5),new a('ável',-1,1),new a('ível',-1,1),new a('ución',-1,3),new a('ico',-1,1),new a('ismo',-1,1),new a('oso',-1,1),new a('amento',-1,1),new a('imento',-1,1),new a('ivo',-1,8),new a('aça~o',-1,1),new a('ador',-1,1),new a('icas',-1,1),new a('ências',-1,4),new a('iras',-1,9),new a('adoras',-1,1),new a('osas',-1,1),new a('istas',-1,1),new a('ivas',-1,8),new a('ezas',-1,1),new a('logías',-1,2),new a('idades',-1,7),new a('uciones',-1,3),new a('adores',-1,1),new a('antes',-1,1),new a('aço~es',-1,1),new a('icos',-1,1),new a('ismos',-1,1),new a('osos',-1,1),new a('amentos',-1,1),new a('imentos',-1,1),new a('ivos',-1,8)]});h(b,'a_6',function(){return[new a('ada',-1,1),new a('ida',-1,1),new a('ia',-1,1),new a('aria',2,1),new a('eria',2,1),new a('iria',2,1),new a('ara',-1,1),new a('era',-1,1),new a('ira',-1,1),new a('ava',-1,1),new a('asse',-1,1),new a('esse',-1,1),new a('isse',-1,1),new a('aste',-1,1),new a('este',-1,1),new a('iste',-1,1),new a('ei',-1,1),new a('arei',16,1),new a('erei',16,1),new a('irei',16,1),new a('am',-1,1),new a('iam',20,1),new a('ariam',21,1),new a('eriam',21,1),new a('iriam',21,1),new a('aram',20,1),new a('eram',20,1),new a('iram',20,1),new a('avam',20,1),new a('em',-1,1),new a('arem',29,1),new a('erem',29,1),new a('irem',29,1),new a('assem',29,1),new a('essem',29,1),new a('issem',29,1),new a('ado',-1,1),new a('ido',-1,1),new a('ando',-1,1),new a('endo',-1,1),new a('indo',-1,1),new a('ara~o',-1,1),new a('era~o',-1,1),new a('ira~o',-1,1),new a('ar',-1,1),new a('er',-1,1),new a('ir',-1,1),new a('as',-1,1),new a('adas',47,1),new a('idas',47,1),new a('ias',47,1),new a('arias',50,1),new a('erias',50,1),new a('irias',50,1),new a('aras',47,1),new a('eras',47,1),new a('iras',47,1),new a('avas',47,1),new a('es',-1,1),new a('ardes',58,1),new a('erdes',58,1),new a('irdes',58,1),new a('ares',58,1),new a('eres',58,1),new a('ires',58,1),new a('asses',58,1),new a('esses',58,1),new a('isses',58,1),new a('astes',58,1),new a('estes',58,1),new a('istes',58,1),new a('is',-1,1),new a('ais',71,1),new a('eis',71,1),new a('areis',73,1),new a('ereis',73,1),new a('ireis',73,1),new a('áreis',73,1),new a('éreis',73,1),new a('íreis',73,1),new a('ásseis',73,1),new a('ésseis',73,1),new a('ísseis',73,1),new a('áveis',73,1),new a('íeis',73,1),new a('aríeis',84,1),new a('eríeis',84,1),new a('iríeis',84,1),new a('ados',-1,1),new a('idos',-1,1),new a('amos',-1,1),new a('áramos',90,1),new a('éramos',90,1),new a('íramos',90,1),new a('ávamos',90,1),new a('íamos',90,1),new a('aríamos',95,1),new a('eríamos',95,1),new a('iríamos',95,1),new a('emos',-1,1),new a('aremos',99,1),new a('eremos',99,1),new a('iremos',99,1),new a('ássemos',99,1),new a('êssemos',99,1),new a('íssemos',99,1),new a('imos',-1,1),new a('armos',-1,1),new a('ermos',-1,1),new a('irmos',-1,1),new a('ámos',-1,1),new a('arás',-1,1),new a('erás',-1,1),new a('irás',-1,1),new a('eu',-1,1),new a('iu',-1,1),new a('ou',-1,1),new a('ará',-1,1),new a('erá',-1,1),new a('irá',-1,1)]});h(b,'a_7',function(){return[new a('a',-1,1),new a('i',-1,1),new a('o',-1,1),new a('os',-1,1),new a('á',-1,1),new a('í',-1,1),new a('ó',-1,1)]});h(b,'a_8',function(){return[new a('e',-1,1),new a('ç',-1,2),new a('é',-1,1),new a('ê',-1,1)]});h(b,'g_v',function(){return[17,65,16,0,0,0,0,0,0,0,0,0,0,0,0,0,3,19,12,2]});var o={'src/stemmer.jsx':{Stemmer:n},'src/portuguese-stemmer.jsx':{PortugueseStemmer:b}}}(JSX))
var Stemmer = JSX.require("src/portuguese-stemmer.jsx").PortugueseStemmer;
@@ -276,5 +275,5 @@ class SearchPortuguese(SearchLanguage):
self.stemmer = snowballstemmer.stemmer('portuguese')
def stem(self, word):
- # type: (unicode) -> unicode
+ # type: (str) -> str
return self.stemmer.stemWord(word.lower())
diff --git a/sphinx/search/ro.py b/sphinx/search/ro.py
index 69d6edd8e..e385d6f01 100644
--- a/sphinx/search/ro.py
+++ b/sphinx/search/ro.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.search.ro
~~~~~~~~~~~~~~~~
@@ -17,7 +16,7 @@ if False:
# For type annotation
from typing import Dict, Set # NOQA
-js_stemmer = u"""
+js_stemmer = """
var JSX={};(function(j){function l(b,e){var a=function(){};a.prototype=e.prototype;var c=new a;for(var d in b){b[d].prototype=c}}function L(c,b){for(var a in b.prototype)if(b.prototype.hasOwnProperty(a))c.prototype[a]=b.prototype[a]}function h(a,b,d){function c(a,b,c){delete a[b];a[b]=c;return c}Object.defineProperty(a,b,{get:function(){return c(a,b,d())},set:function(d){c(a,b,d)},enumerable:true,configurable:true})}function M(a,b,c){return a[b]=a[b]/c|0}var E=parseInt;var C=parseFloat;function N(a){return a!==a}var A=isFinite;var z=encodeURIComponent;var y=decodeURIComponent;var x=encodeURI;var w=decodeURI;var u=Object.prototype.toString;var D=Object.prototype.hasOwnProperty;function k(){}j.require=function(b){var a=r[b];return a!==undefined?a:null};j.profilerIsRunning=function(){return k.getResults!=null};j.getProfileResults=function(){return(k.getResults||function(){return{}})()};j.postProfileResults=function(a,b){if(k.postResults==null)throw new Error('profiler has not been turned on');return k.postResults(a,b)};j.resetProfileResults=function(){if(k.resetResults==null)throw new Error('profiler has not been turned on');return k.resetResults()};j.DEBUG=false;function t(){};l([t],Error);function a(a,b,c){this.F=a.length;this.K=a;this.L=b;this.I=c;this.H=null;this.P=null};l([a],Object);function n(){};l([n],Object);function g(){var a;var b;var c;this.G={};a=this.E='';b=this._=0;c=this.A=a.length;this.D=0;this.B=b;this.C=c};l([g],n);function v(a,b){a.E=b.E;a._=b._;a.A=b.A;a.D=b.D;a.B=b.B;a.C=b.C};function d(b,d,c,e){var a;if(b._>=b.A){return false}a=b.E.charCodeAt(b._);if(a>e||a<c){return false}a-=c;if((d[a>>>3]&1<<(a&7))===0){return false}b._++;return true};function e(a,d,c,e){var b;if(a._>=a.A){return false}b=a.E.charCodeAt(a._);if(b>e||b<c){a._++;return true}b-=c;if((d[b>>>3]&1<<(b&7))===0){a._++;return true}return false};function p(a,d,c,e){var b;if(a._<=a.D){return false}b=a.E.charCodeAt(a._-1);if(b>e||b<c){a._--;return true}b-=c;if((d[b>>>3]&1<<(b&7))===0){a._--;return true}return false};function m(a,b,d){var c;if(a.A-a._<b){return false}if(a.E.slice(c=a._,c+b)!==d){return false}a._+=b;return true};function i(a,b,d){var c;if(a._-a.D<b){return false}if(a.E.slice((c=a._)-b,c)!==d){return false}a._-=b;return true};function q(f,m,p){var b;var d;var e;var n;var g;var k;var l;var i;var h;var c;var a;var j;var o;b=0;d=p;e=f._;n=f.A;g=0;k=0;l=false;while(true){i=b+(d-b>>>1);h=0;c=g<k?g:k;a=m[i];for(j=c;j<a.F;j++){if(e+c===n){h=-1;break}h=f.E.charCodeAt(e+c)-a.K.charCodeAt(j);if(h!==0){break}c++}if(h<0){d=i;k=c}else{b=i;g=c}if(d-b<=1){if(b>0){break}if(d===b){break}if(l){break}l=true}}while(true){a=m[b];if(g>=a.F){f._=e+a.F|0;if(a.H==null){return a.I}o=a.H(a.P);f._=e+a.F|0;if(o){return a.I}}b=a.L;if(b<0){return 0}}return-1};function f(d,m,p){var b;var g;var e;var n;var f;var k;var l;var i;var h;var c;var a;var j;var o;b=0;g=p;e=d._;n=d.D;f=0;k=0;l=false;while(true){i=b+(g-b>>1);h=0;c=f<k?f:k;a=m[i];for(j=a.F-1-c;j>=0;j--){if(e-c===n){h=-1;break}h=d.E.charCodeAt(e-1-c)-a.K.charCodeAt(j);if(h!==0){break}c++}if(h<0){g=i;k=c}else{b=i;f=c}if(g-b<=1){if(b>0){break}if(g===b){break}if(l){break}l=true}}while(true){a=m[b];if(f>=a.F){d._=e-a.F|0;if(a.H==null){return a.I}o=a.H(d);d._=e-a.F|0;if(o){return a.I}}b=a.L;if(b<0){return 0}}return-1};function s(a,b,d,e){var c;c=e.length-(d-b);a.E=a.E.slice(0,b)+e+a.E.slice(d);a.A+=c|0;if(a._>=d){a._+=c|0}else if(a._>b){a._=b}return c|0};function c(a,f){var b;var c;var d;var e;b=false;if((c=a.B)<0||c>(d=a.C)||d>(e=a.A)||e>a.E.length?false:true){s(a,a.B,a.C,f);b=true}return b};g.prototype.J=function(){return false};g.prototype.b=function(b){var a;var c;var d;var e;a=this.G['.'+b];if(a==null){c=this.E=b;d=this._=0;e=this.A=c.length;this.D=0;this.B=d;this.C=e;this.J();a=this.E;this.G['.'+b]=a}return a};g.prototype.stemWord=g.prototype.b;g.prototype.c=function(e){var d;var b;var c;var a;var f;var g;var h;d=[];for(b=0;b<e.length;b++){c=e[b];a=this.G['.'+c];if(a==null){f=this.E=c;g=this._=0;h=this.A=f.length;this.D=0;this.B=g;this.C=h;this.J();a=this.E;this.G['.'+c]=a}d.push(a)}return d};g.prototype.stemWords=g.prototype.c;function b(){g.call(this);this.B_standard_suffix_removed=false;this.I_p2=0;this.I_p1=0;this.I_pV=0};l([b],g);b.prototype.M=function(a){this.B_standard_suffix_removed=a.B_standard_suffix_removed;this.I_p2=a.I_p2;this.I_p1=a.I_p1;this.I_pV=a.I_pV;v(this,a)};b.prototype.copy_from=b.prototype.M;b.prototype.W=function(){var i;var a;var j;var e;var f;var g;var h;var k;b:while(true){i=this._;e=true;d:while(e===true){e=false;e:while(true){a=this._;f=true;a:while(f===true){f=false;if(!d(this,b.g_v,97,259)){break a}this.B=this._;g=true;f:while(g===true){g=false;j=this._;h=true;c:while(h===true){h=false;if(!m(this,1,'u')){break c}this.C=this._;if(!d(this,b.g_v,97,259)){break c}if(!c(this,'U')){return false}break f}this._=j;if(!m(this,1,'i')){break a}this.C=this._;if(!d(this,b.g_v,97,259)){break a}if(!c(this,'I')){return false}}this._=a;break e}k=this._=a;if(k>=this.A){break d}this._++}continue b}this._=i;break b}return true};b.prototype.r_prelude=b.prototype.W;function G(a){var j;var e;var k;var f;var g;var h;var i;var l;b:while(true){j=a._;f=true;d:while(f===true){f=false;e:while(true){e=a._;g=true;a:while(g===true){g=false;if(!d(a,b.g_v,97,259)){break a}a.B=a._;h=true;f:while(h===true){h=false;k=a._;i=true;c:while(i===true){i=false;if(!m(a,1,'u')){break c}a.C=a._;if(!d(a,b.g_v,97,259)){break c}if(!c(a,'U')){return false}break f}a._=k;if(!m(a,1,'i')){break a}a.C=a._;if(!d(a,b.g_v,97,259)){break a}if(!c(a,'I')){return false}}a._=e;break e}l=a._=e;if(l>=a.A){break d}a._++}continue b}a._=j;break b}return true};b.prototype.U=function(){var u;var w;var x;var y;var t;var l;var f;var g;var h;var i;var c;var j;var k;var a;var m;var n;var o;var p;var q;var r;var s;var v;this.I_pV=s=this.A;this.I_p1=s;this.I_p2=s;u=this._;l=true;a:while(l===true){l=false;f=true;g:while(f===true){f=false;w=this._;g=true;b:while(g===true){g=false;if(!d(this,b.g_v,97,259)){break b}h=true;f:while(h===true){h=false;x=this._;i=true;c:while(i===true){i=false;if(!e(this,b.g_v,97,259)){break c}d:while(true){c=true;e:while(c===true){c=false;if(!d(this,b.g_v,97,259)){break e}break d}if(this._>=this.A){break c}this._++}break f}this._=x;if(!d(this,b.g_v,97,259)){break b}c:while(true){j=true;d:while(j===true){j=false;if(!e(this,b.g_v,97,259)){break d}break c}if(this._>=this.A){break b}this._++}}break g}this._=w;if(!e(this,b.g_v,97,259)){break a}k=true;c:while(k===true){k=false;y=this._;a=true;b:while(a===true){a=false;if(!e(this,b.g_v,97,259)){break b}e:while(true){m=true;d:while(m===true){m=false;if(!d(this,b.g_v,97,259)){break d}break e}if(this._>=this.A){break b}this._++}break c}this._=y;if(!d(this,b.g_v,97,259)){break a}if(this._>=this.A){break a}this._++}}this.I_pV=this._}v=this._=u;t=v;n=true;a:while(n===true){n=false;b:while(true){o=true;c:while(o===true){o=false;if(!d(this,b.g_v,97,259)){break c}break b}if(this._>=this.A){break a}this._++}b:while(true){p=true;c:while(p===true){p=false;if(!e(this,b.g_v,97,259)){break c}break b}if(this._>=this.A){break a}this._++}this.I_p1=this._;b:while(true){q=true;c:while(q===true){q=false;if(!d(this,b.g_v,97,259)){break c}break b}if(this._>=this.A){break a}this._++}c:while(true){r=true;b:while(r===true){r=false;if(!e(this,b.g_v,97,259)){break b}break c}if(this._>=this.A){break a}this._++}this.I_p2=this._}this._=t;return true};b.prototype.r_mark_regions=b.prototype.U;function H(a){var x;var y;var z;var u;var v;var l;var f;var g;var h;var i;var j;var k;var c;var m;var n;var o;var p;var q;var r;var s;var t;var w;a.I_pV=t=a.A;a.I_p1=t;a.I_p2=t;x=a._;l=true;a:while(l===true){l=false;f=true;g:while(f===true){f=false;y=a._;g=true;b:while(g===true){g=false;if(!d(a,b.g_v,97,259)){break b}h=true;f:while(h===true){h=false;z=a._;i=true;c:while(i===true){i=false;if(!e(a,b.g_v,97,259)){break c}d:while(true){j=true;e:while(j===true){j=false;if(!d(a,b.g_v,97,259)){break e}break d}if(a._>=a.A){break c}a._++}break f}a._=z;if(!d(a,b.g_v,97,259)){break b}c:while(true){k=true;d:while(k===true){k=false;if(!e(a,b.g_v,97,259)){break d}break c}if(a._>=a.A){break b}a._++}}break g}a._=y;if(!e(a,b.g_v,97,259)){break a}c=true;c:while(c===true){c=false;u=a._;m=true;b:while(m===true){m=false;if(!e(a,b.g_v,97,259)){break b}e:while(true){n=true;d:while(n===true){n=false;if(!d(a,b.g_v,97,259)){break d}break e}if(a._>=a.A){break b}a._++}break c}a._=u;if(!d(a,b.g_v,97,259)){break a}if(a._>=a.A){break a}a._++}}a.I_pV=a._}w=a._=x;v=w;o=true;a:while(o===true){o=false;b:while(true){p=true;c:while(p===true){p=false;if(!d(a,b.g_v,97,259)){break c}break b}if(a._>=a.A){break a}a._++}b:while(true){q=true;c:while(q===true){q=false;if(!e(a,b.g_v,97,259)){break c}break b}if(a._>=a.A){break a}a._++}a.I_p1=a._;b:while(true){r=true;c:while(r===true){r=false;if(!d(a,b.g_v,97,259)){break c}break b}if(a._>=a.A){break a}a._++}c:while(true){s=true;b:while(s===true){s=false;if(!e(a,b.g_v,97,259)){break b}break c}if(a._>=a.A){break a}a._++}a.I_p2=a._}a._=v;return true};b.prototype.V=function(){var a;var e;var d;b:while(true){e=this._;d=true;a:while(d===true){d=false;this.B=this._;a=q(this,b.a_0,3);if(a===0){break a}this.C=this._;switch(a){case 0:break a;case 1:if(!c(this,'i')){return false}break;case 2:if(!c(this,'u')){return false}break;case 3:if(this._>=this.A){break a}this._++;break}continue b}this._=e;break b}return true};b.prototype.r_postlude=b.prototype.V;function I(a){var d;var f;var e;b:while(true){f=a._;e=true;a:while(e===true){e=false;a.B=a._;d=q(a,b.a_0,3);if(d===0){break a}a.C=a._;switch(d){case 0:break a;case 1:if(!c(a,'i')){return false}break;case 2:if(!c(a,'u')){return false}break;case 3:if(a._>=a.A){break a}a._++;break}continue b}a._=f;break b}return true};b.prototype.S=function(){return!(this.I_pV<=this._)?false:true};b.prototype.r_RV=b.prototype.S;b.prototype.Q=function(){return!(this.I_p1<=this._)?false:true};b.prototype.r_R1=b.prototype.Q;b.prototype.R=function(){return!(this.I_p2<=this._)?false:true};b.prototype.r_R2=b.prototype.R;b.prototype.Y=function(){var a;var e;var d;var g;this.C=this._;a=f(this,b.a_1,16);if(a===0){return false}this.B=g=this._;if(!(!(this.I_p1<=g)?false:true)){return false}switch(a){case 0:return false;case 1:if(!c(this,'')){return false}break;case 2:if(!c(this,'a')){return false}break;case 3:if(!c(this,'e')){return false}break;case 4:if(!c(this,'i')){return false}break;case 5:e=this.A-this._;d=true;a:while(d===true){d=false;if(!i(this,2,'ab')){break a}return false}this._=this.A-e;if(!c(this,'i')){return false}break;case 6:if(!c(this,'at')){return false}break;case 7:if(!c(this,'aţi')){return false}break}return true};b.prototype.r_step_0=b.prototype.Y;function J(a){var d;var g;var e;var h;a.C=a._;d=f(a,b.a_1,16);if(d===0){return false}a.B=h=a._;if(!(!(a.I_p1<=h)?false:true)){return false}switch(d){case 0:return false;case 1:if(!c(a,'')){return false}break;case 2:if(!c(a,'a')){return false}break;case 3:if(!c(a,'e')){return false}break;case 4:if(!c(a,'i')){return false}break;case 5:g=a.A-a._;e=true;a:while(e===true){e=false;if(!i(a,2,'ab')){break a}return false}a._=a.A-g;if(!c(a,'i')){return false}break;case 6:if(!c(a,'at')){return false}break;case 7:if(!c(a,'aţi')){return false}break}return true};b.prototype.T=function(){var a;var d;var e;var g;d=this.A-(e=this._);this.C=e;a=f(this,b.a_2,46);if(a===0){return false}this.B=g=this._;if(!(!(this.I_p1<=g)?false:true)){return false}switch(a){case 0:return false;case 1:if(!c(this,'abil')){return false}break;case 2:if(!c(this,'ibil')){return false}break;case 3:if(!c(this,'iv')){return false}break;case 4:if(!c(this,'ic')){return false}break;case 5:if(!c(this,'at')){return false}break;case 6:if(!c(this,'it')){return false}break}this.B_standard_suffix_removed=true;this._=this.A-d;return true};b.prototype.r_combo_suffix=b.prototype.T;function o(a){var d;var e;var g;var h;e=a.A-(g=a._);a.C=g;d=f(a,b.a_2,46);if(d===0){return false}a.B=h=a._;if(!(!(a.I_p1<=h)?false:true)){return false}switch(d){case 0:return false;case 1:if(!c(a,'abil')){return false}break;case 2:if(!c(a,'ibil')){return false}break;case 3:if(!c(a,'iv')){return false}break;case 4:if(!c(a,'ic')){return false}break;case 5:if(!c(a,'at')){return false}break;case 6:if(!c(a,'it')){return false}break}a.B_standard_suffix_removed=true;a._=a.A-e;return true};b.prototype.X=function(){var a;var e;var d;var g;this.B_standard_suffix_removed=false;a:while(true){e=this.A-this._;d=true;b:while(d===true){d=false;if(!o(this)){break b}continue a}this._=this.A-e;break a}this.C=this._;a=f(this,b.a_3,62);if(a===0){return false}this.B=g=this._;if(!(!(this.I_p2<=g)?false:true)){return false}switch(a){case 0:return false;case 1:if(!c(this,'')){return false}break;case 2:if(!i(this,1,'ţ')){return false}this.B=this._;if(!c(this,'t')){return false}break;case 3:if(!c(this,'ist')){return false}break}this.B_standard_suffix_removed=true;return true};b.prototype.r_standard_suffix=b.prototype.X;function K(a){var d;var g;var e;var h;a.B_standard_suffix_removed=false;a:while(true){g=a.A-a._;e=true;b:while(e===true){e=false;if(!o(a)){break b}continue a}a._=a.A-g;break a}a.C=a._;d=f(a,b.a_3,62);if(d===0){return false}a.B=h=a._;if(!(!(a.I_p2<=h)?false:true)){return false}switch(d){case 0:return false;case 1:if(!c(a,'')){return false}break;case 2:if(!i(a,1,'ţ')){return false}a.B=a._;if(!c(a,'t')){return false}break;case 3:if(!c(a,'ist')){return false}break}a.B_standard_suffix_removed=true;return true};b.prototype.Z=function(){var d;var h;var a;var j;var e;var g;var k;var l;var m;h=this.A-(k=this._);if(k<this.I_pV){return false}l=this._=this.I_pV;a=this.D;this.D=l;m=this._=this.A-h;this.C=m;d=f(this,b.a_4,94);if(d===0){this.D=a;return false}this.B=this._;switch(d){case 0:this.D=a;return false;case 1:e=true;a:while(e===true){e=false;j=this.A-this._;g=true;b:while(g===true){g=false;if(!p(this,b.g_v,97,259)){break b}break a}this._=this.A-j;if(!i(this,1,'u')){this.D=a;return false}}if(!c(this,'')){return false}break;case 2:if(!c(this,'')){return false}break}this.D=a;return true};b.prototype.r_verb_suffix=b.prototype.Z;function F(a){var e;var l;var d;var j;var g;var h;var m;var n;var k;l=a.A-(m=a._);if(m<a.I_pV){return false}n=a._=a.I_pV;d=a.D;a.D=n;k=a._=a.A-l;a.C=k;e=f(a,b.a_4,94);if(e===0){a.D=d;return false}a.B=a._;switch(e){case 0:a.D=d;return false;case 1:g=true;a:while(g===true){g=false;j=a.A-a._;h=true;b:while(h===true){h=false;if(!p(a,b.g_v,97,259)){break b}break a}a._=a.A-j;if(!i(a,1,'u')){a.D=d;return false}}if(!c(a,'')){return false}break;case 2:if(!c(a,'')){return false}break}a.D=d;return true};b.prototype.a=function(){var a;var d;this.C=this._;a=f(this,b.a_5,5);if(a===0){return false}this.B=d=this._;if(!(!(this.I_pV<=d)?false:true)){return false}switch(a){case 0:return false;case 1:if(!c(this,'')){return false}break}return true};b.prototype.r_vowel_suffix=b.prototype.a;function B(a){var d;var e;a.C=a._;d=f(a,b.a_5,5);if(d===0){return false}a.B=e=a._;if(!(!(a.I_pV<=e)?false:true)){return false}switch(d){case 0:return false;case 1:if(!c(a,'')){return false}break}return true};b.prototype.J=function(){var n;var j;var k;var l;var m;var o;var p;var b;var c;var d;var e;var f;var a;var g;var h;var i;var r;var s;var t;var u;var v;var w;var x;var y;var q;n=this._;b=true;a:while(b===true){b=false;if(!G(this)){break a}}r=this._=n;j=r;c=true;a:while(c===true){c=false;if(!H(this)){break a}}s=this._=j;this.D=s;u=this._=t=this.A;k=t-u;d=true;a:while(d===true){d=false;if(!J(this)){break a}}w=this._=(v=this.A)-k;l=v-w;e=true;a:while(e===true){e=false;if(!K(this)){break a}}y=this._=(x=this.A)-l;m=x-y;f=true;a:while(f===true){f=false;a=true;b:while(a===true){a=false;o=this.A-this._;g=true;c:while(g===true){g=false;if(!this.B_standard_suffix_removed){break c}break b}this._=this.A-o;if(!F(this)){break a}}}this._=this.A-m;h=true;a:while(h===true){h=false;if(!B(this)){break a}}q=this._=this.D;p=q;i=true;a:while(i===true){i=false;if(!I(this)){break a}}this._=p;return true};b.prototype.stem=b.prototype.J;b.prototype.N=function(a){return a instanceof b};b.prototype.equals=b.prototype.N;b.prototype.O=function(){var c;var a;var b;var d;c='RomanianStemmer';a=0;for(b=0;b<c.length;b++){d=c.charCodeAt(b);a=(a<<5)-a+d;a=a&a}return a|0};b.prototype.hashCode=b.prototype.O;b.serialVersionUID=1;h(b,'methodObject',function(){return new b});h(b,'a_0',function(){return[new a('',-1,3),new a('I',0,1),new a('U',0,2)]});h(b,'a_1',function(){return[new a('ea',-1,3),new a('aţia',-1,7),new a('aua',-1,2),new a('iua',-1,4),new a('aţie',-1,7),new a('ele',-1,3),new a('ile',-1,5),new a('iile',6,4),new a('iei',-1,4),new a('atei',-1,6),new a('ii',-1,4),new a('ului',-1,1),new a('ul',-1,1),new a('elor',-1,3),new a('ilor',-1,4),new a('iilor',14,4)]});h(b,'a_2',function(){return[new a('icala',-1,4),new a('iciva',-1,4),new a('ativa',-1,5),new a('itiva',-1,6),new a('icale',-1,4),new a('aţiune',-1,5),new a('iţiune',-1,6),new a('atoare',-1,5),new a('itoare',-1,6),new a('ătoare',-1,5),new a('icitate',-1,4),new a('abilitate',-1,1),new a('ibilitate',-1,2),new a('ivitate',-1,3),new a('icive',-1,4),new a('ative',-1,5),new a('itive',-1,6),new a('icali',-1,4),new a('atori',-1,5),new a('icatori',18,4),new a('itori',-1,6),new a('ători',-1,5),new a('icitati',-1,4),new a('abilitati',-1,1),new a('ivitati',-1,3),new a('icivi',-1,4),new a('ativi',-1,5),new a('itivi',-1,6),new a('icităi',-1,4),new a('abilităi',-1,1),new a('ivităi',-1,3),new a('icităţi',-1,4),new a('abilităţi',-1,1),new a('ivităţi',-1,3),new a('ical',-1,4),new a('ator',-1,5),new a('icator',35,4),new a('itor',-1,6),new a('ător',-1,5),new a('iciv',-1,4),new a('ativ',-1,5),new a('itiv',-1,6),new a('icală',-1,4),new a('icivă',-1,4),new a('ativă',-1,5),new a('itivă',-1,6)]});h(b,'a_3',function(){return[new a('ica',-1,1),new a('abila',-1,1),new a('ibila',-1,1),new a('oasa',-1,1),new a('ata',-1,1),new a('ita',-1,1),new a('anta',-1,1),new a('ista',-1,3),new a('uta',-1,1),new a('iva',-1,1),new a('ic',-1,1),new a('ice',-1,1),new a('abile',-1,1),new a('ibile',-1,1),new a('isme',-1,3),new a('iune',-1,2),new a('oase',-1,1),new a('ate',-1,1),new a('itate',17,1),new a('ite',-1,1),new a('ante',-1,1),new a('iste',-1,3),new a('ute',-1,1),new a('ive',-1,1),new a('ici',-1,1),new a('abili',-1,1),new a('ibili',-1,1),new a('iuni',-1,2),new a('atori',-1,1),new a('osi',-1,1),new a('ati',-1,1),new a('itati',30,1),new a('iti',-1,1),new a('anti',-1,1),new a('isti',-1,3),new a('uti',-1,1),new a('işti',-1,3),new a('ivi',-1,1),new a('ităi',-1,1),new a('oşi',-1,1),new a('ităţi',-1,1),new a('abil',-1,1),new a('ibil',-1,1),new a('ism',-1,3),new a('ator',-1,1),new a('os',-1,1),new a('at',-1,1),new a('it',-1,1),new a('ant',-1,1),new a('ist',-1,3),new a('ut',-1,1),new a('iv',-1,1),new a('ică',-1,1),new a('abilă',-1,1),new a('ibilă',-1,1),new a('oasă',-1,1),new a('ată',-1,1),new a('ită',-1,1),new a('antă',-1,1),new a('istă',-1,3),new a('ută',-1,1),new a('ivă',-1,1)]});h(b,'a_4',function(){return[new a('ea',-1,1),new a('ia',-1,1),new a('esc',-1,1),new a('ăsc',-1,1),new a('ind',-1,1),new a('ând',-1,1),new a('are',-1,1),new a('ere',-1,1),new a('ire',-1,1),new a('âre',-1,1),new a('se',-1,2),new a('ase',10,1),new a('sese',10,2),new a('ise',10,1),new a('use',10,1),new a('âse',10,1),new a('eşte',-1,1),new a('ăşte',-1,1),new a('eze',-1,1),new a('ai',-1,1),new a('eai',19,1),new a('iai',19,1),new a('sei',-1,2),new a('eşti',-1,1),new a('ăşti',-1,1),new a('ui',-1,1),new a('ezi',-1,1),new a('âi',-1,1),new a('aşi',-1,1),new a('seşi',-1,2),new a('aseşi',29,1),new a('seseşi',29,2),new a('iseşi',29,1),new a('useşi',29,1),new a('âseşi',29,1),new a('işi',-1,1),new a('uşi',-1,1),new a('âşi',-1,1),new a('aţi',-1,2),new a('eaţi',38,1),new a('iaţi',38,1),new a('eţi',-1,2),new a('iţi',-1,2),new a('âţi',-1,2),new a('arăţi',-1,1),new a('serăţi',-1,2),new a('aserăţi',45,1),new a('seserăţi',45,2),new a('iserăţi',45,1),new a('userăţi',45,1),new a('âserăţi',45,1),new a('irăţi',-1,1),new a('urăţi',-1,1),new a('ârăţi',-1,1),new a('am',-1,1),new a('eam',54,1),new a('iam',54,1),new a('em',-1,2),new a('asem',57,1),new a('sesem',57,2),new a('isem',57,1),new a('usem',57,1),new a('âsem',57,1),new a('im',-1,2),new a('âm',-1,2),new a('ăm',-1,2),new a('arăm',65,1),new a('serăm',65,2),new a('aserăm',67,1),new a('seserăm',67,2),new a('iserăm',67,1),new a('userăm',67,1),new a('âserăm',67,1),new a('irăm',65,1),new a('urăm',65,1),new a('ârăm',65,1),new a('au',-1,1),new a('eau',76,1),new a('iau',76,1),new a('indu',-1,1),new a('ându',-1,1),new a('ez',-1,1),new a('ească',-1,1),new a('ară',-1,1),new a('seră',-1,2),new a('aseră',84,1),new a('seseră',84,2),new a('iseră',84,1),new a('useră',84,1),new a('âseră',84,1),new a('iră',-1,1),new a('ură',-1,1),new a('âră',-1,1),new a('ează',-1,1)]});h(b,'a_5',function(){return[new a('a',-1,1),new a('e',-1,1),new a('ie',1,1),new a('i',-1,1),new a('ă',-1,1)]});h(b,'g_v',function(){return[17,65,16,0,0,0,0,0,0,0,0,0,0,0,0,0,2,32,0,0,4]});var r={'src/stemmer.jsx':{Stemmer:n},'src/romanian-stemmer.jsx':{RomanianStemmer:b}}}(JSX))
var Stemmer = JSX.require("src/romanian-stemmer.jsx").RomanianStemmer;
"""
@@ -28,12 +27,12 @@ class SearchRomanian(SearchLanguage):
language_name = 'Romanian'
js_stemmer_rawcode = 'romanian-stemmer.js'
js_stemmer_code = js_stemmer
- stopwords = set() # type: Set[unicode]
+ stopwords = set() # type: Set[str]
def init(self, options):
# type: (Dict) -> None
self.stemmer = snowballstemmer.stemmer('romanian')
def stem(self, word):
- # type: (unicode) -> unicode
+ # type: (str) -> str
return self.stemmer.stemWord(word.lower())
diff --git a/sphinx/search/ru.py b/sphinx/search/ru.py
index cc189a953..8719ef1d2 100644
--- a/sphinx/search/ru.py
+++ b/sphinx/search/ru.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.search.ru
~~~~~~~~~~~~~~~~
@@ -18,7 +17,7 @@ if False:
from typing import Any # NOQA
-russian_stopwords = parse_stop_word(u'''
+russian_stopwords = parse_stop_word('''
| source: http://snowball.tartarus.org/algorithms/russian/stop.txt
и | and
в | in/into
@@ -247,7 +246,7 @@ russian_stopwords = parse_stop_word(u'''
| нельзя
''')
-js_stemmer = u"""
+js_stemmer = """
var JSX={};(function(h){function j(b,e){var a=function(){};a.prototype=e.prototype;var c=new a;for(var d in b){b[d].prototype=c}}function J(c,b){for(var a in b.prototype)if(b.prototype.hasOwnProperty(a))c.prototype[a]=b.prototype[a]}function f(a,b,d){function c(a,b,c){delete a[b];a[b]=c;return c}Object.defineProperty(a,b,{get:function(){return c(a,b,d())},set:function(d){c(a,b,d)},enumerable:true,configurable:true})}function K(a,b,c){return a[b]=a[b]/c|0}var p=parseInt;var z=parseFloat;function L(a){return a!==a}var x=isFinite;var w=encodeURIComponent;var u=decodeURIComponent;var t=encodeURI;var s=decodeURI;var B=Object.prototype.toString;var q=Object.prototype.hasOwnProperty;function i(){}h.require=function(b){var a=o[b];return a!==undefined?a:null};h.profilerIsRunning=function(){return i.getResults!=null};h.getProfileResults=function(){return(i.getResults||function(){return{}})()};h.postProfileResults=function(a,b){if(i.postResults==null)throw new Error('profiler has not been turned on');return i.postResults(a,b)};h.resetProfileResults=function(){if(i.resetResults==null)throw new Error('profiler has not been turned on');return i.resetResults()};h.DEBUG=false;function r(){};j([r],Error);function a(a,b,c){this.G=a.length;this.X=a;this.a=b;this.J=c;this.I=null;this.b=null};j([a],Object);function m(){};j([m],Object);function g(){var a;var b;var c;this.F={};a=this.D='';b=this._=0;c=this.A=a.length;this.E=0;this.B=b;this.C=c};j([g],m);function v(a,b){a.D=b.D;a._=b._;a.A=b.A;a.E=b.E;a.B=b.B;a.C=b.C};function k(b,d,c,e){var a;if(b._>=b.A){return false}a=b.D.charCodeAt(b._);if(a>e||a<c){return false}a-=c;if((d[a>>>3]&1<<(a&7))===0){return false}b._++;return true};function l(a,d,c,e){var b;if(a._>=a.A){return false}b=a.D.charCodeAt(a._);if(b>e||b<c){a._++;return true}b-=c;if((d[b>>>3]&1<<(b&7))===0){a._++;return true}return false};function d(a,b,d){var c;if(a._-a.E<b){return false}if(a.D.slice((c=a._)-b,c)!==d){return false}a._-=b;return true};function e(d,m,p){var b;var g;var e;var n;var f;var k;var l;var i;var h;var c;var a;var j;var o;b=0;g=p;e=d._;n=d.E;f=0;k=0;l=false;while(true){i=b+(g-b>>1);h=0;c=f<k?f:k;a=m[i];for(j=a.G-1-c;j>=0;j--){if(e-c===n){h=-1;break}h=d.D.charCodeAt(e-1-c)-a.X.charCodeAt(j);if(h!==0){break}c++}if(h<0){g=i;k=c}else{b=i;f=c}if(g-b<=1){if(b>0){break}if(g===b){break}if(l){break}l=true}}while(true){a=m[b];if(f>=a.G){d._=e-a.G|0;if(a.I==null){return a.J}o=a.I(d);d._=e-a.G|0;if(o){return a.J}}b=a.a;if(b<0){return 0}}return-1};function A(a,b,d,e){var c;c=e.length-(d-b);a.D=a.D.slice(0,b)+e+a.D.slice(d);a.A+=c|0;if(a._>=d){a._+=c|0}else if(a._>b){a._=b}return c|0};function c(a,f){var b;var c;var d;var e;b=false;if((c=a.B)<0||c>(d=a.C)||d>(e=a.A)||e>a.D.length?false:true){A(a,a.B,a.C,f);b=true}return b};g.prototype.H=function(){return false};g.prototype.Y=function(b){var a;var c;var d;var e;a=this.F['.'+b];if(a==null){c=this.D=b;d=this._=0;e=this.A=c.length;this.E=0;this.B=d;this.C=e;this.H();a=this.D;this.F['.'+b]=a}return a};g.prototype.stemWord=g.prototype.Y;g.prototype.Z=function(e){var d;var b;var c;var a;var f;var g;var h;d=[];for(b=0;b<e.length;b++){c=e[b];a=this.F['.'+c];if(a==null){f=this.D=c;g=this._=0;h=this.A=f.length;this.E=0;this.B=g;this.C=h;this.H();a=this.D;this.F['.'+c]=a}d.push(a)}return d};g.prototype.stemWords=g.prototype.Z;function b(){g.call(this);this.I_p2=0;this.I_pV=0};j([b],g);b.prototype.K=function(a){this.I_p2=a.I_p2;this.I_pV=a.I_pV;v(this,a)};b.prototype.copy_from=b.prototype.K;b.prototype.R=function(){var g;var a;var c;var d;var e;var f;var h;this.I_pV=h=this.A;this.I_p2=h;g=this._;a=true;a:while(a===true){a=false;b:while(true){c=true;c:while(c===true){c=false;if(!k(this,b.g_v,1072,1103)){break c}break b}if(this._>=this.A){break a}this._++}this.I_pV=this._;b:while(true){d=true;c:while(d===true){d=false;if(!l(this,b.g_v,1072,1103)){break c}break b}if(this._>=this.A){break a}this._++}b:while(true){e=true;c:while(e===true){e=false;if(!k(this,b.g_v,1072,1103)){break c}break b}if(this._>=this.A){break a}this._++}b:while(true){f=true;c:while(f===true){f=false;if(!l(this,b.g_v,1072,1103)){break c}break b}if(this._>=this.A){break a}this._++}this.I_p2=this._}this._=g;return true};b.prototype.r_mark_regions=b.prototype.R;function D(a){var h;var c;var d;var e;var f;var g;var i;a.I_pV=i=a.A;a.I_p2=i;h=a._;c=true;a:while(c===true){c=false;b:while(true){d=true;c:while(d===true){d=false;if(!k(a,b.g_v,1072,1103)){break c}break b}if(a._>=a.A){break a}a._++}a.I_pV=a._;b:while(true){e=true;c:while(e===true){e=false;if(!l(a,b.g_v,1072,1103)){break c}break b}if(a._>=a.A){break a}a._++}b:while(true){f=true;c:while(f===true){f=false;if(!k(a,b.g_v,1072,1103)){break c}break b}if(a._>=a.A){break a}a._++}b:while(true){g=true;c:while(g===true){g=false;if(!l(a,b.g_v,1072,1103)){break c}break b}if(a._>=a.A){break a}a._++}a.I_p2=a._}a._=h;return true};b.prototype.N=function(){return!(this.I_p2<=this._)?false:true};b.prototype.r_R2=b.prototype.N;b.prototype.T=function(){var a;var h;var f;var g;this.C=this._;a=e(this,b.a_0,9);if(a===0){return false}this.B=this._;switch(a){case 0:return false;case 1:f=true;a:while(f===true){f=false;h=this.A-this._;g=true;b:while(g===true){g=false;if(!d(this,1,'а')){break b}break a}this._=this.A-h;if(!d(this,1,'я')){return false}}if(!c(this,'')){return false}break;case 2:if(!c(this,'')){return false}break}return true};b.prototype.r_perfective_gerund=b.prototype.T;function E(a){var f;var i;var g;var h;a.C=a._;f=e(a,b.a_0,9);if(f===0){return false}a.B=a._;switch(f){case 0:return false;case 1:g=true;a:while(g===true){g=false;i=a.A-a._;h=true;b:while(h===true){h=false;if(!d(a,1,'а')){break b}break a}a._=a.A-i;if(!d(a,1,'я')){return false}}if(!c(a,'')){return false}break;case 2:if(!c(a,'')){return false}break}return true};b.prototype.P=function(){var a;this.C=this._;a=e(this,b.a_1,26);if(a===0){return false}this.B=this._;switch(a){case 0:return false;case 1:if(!c(this,'')){return false}break}return true};b.prototype.r_adjective=b.prototype.P;function n(a){var d;a.C=a._;d=e(a,b.a_1,26);if(d===0){return false}a.B=a._;switch(d){case 0:return false;case 1:if(!c(a,'')){return false}break}return true};b.prototype.O=function(){var f;var a;var j;var g;var h;var i;if(!n(this)){return false}a=this.A-this._;g=true;a:while(g===true){g=false;this.C=this._;f=e(this,b.a_2,8);if(f===0){this._=this.A-a;break a}this.B=this._;switch(f){case 0:this._=this.A-a;break a;case 1:h=true;b:while(h===true){h=false;j=this.A-this._;i=true;c:while(i===true){i=false;if(!d(this,1,'а')){break c}break b}this._=this.A-j;if(!d(this,1,'я')){this._=this.A-a;break a}}if(!c(this,'')){return false}break;case 2:if(!c(this,'')){return false}break}}return true};b.prototype.r_adjectival=b.prototype.O;function G(a){var g;var f;var k;var h;var i;var j;if(!n(a)){return false}f=a.A-a._;h=true;a:while(h===true){h=false;a.C=a._;g=e(a,b.a_2,8);if(g===0){a._=a.A-f;break a}a.B=a._;switch(g){case 0:a._=a.A-f;break a;case 1:i=true;b:while(i===true){i=false;k=a.A-a._;j=true;c:while(j===true){j=false;if(!d(a,1,'а')){break c}break b}a._=a.A-k;if(!d(a,1,'я')){a._=a.A-f;break a}}if(!c(a,'')){return false}break;case 2:if(!c(a,'')){return false}break}}return true};b.prototype.U=function(){var a;this.C=this._;a=e(this,b.a_3,2);if(a===0){return false}this.B=this._;switch(a){case 0:return false;case 1:if(!c(this,'')){return false}break}return true};b.prototype.r_reflexive=b.prototype.U;function H(a){var d;a.C=a._;d=e(a,b.a_3,2);if(d===0){return false}a.B=a._;switch(d){case 0:return false;case 1:if(!c(a,'')){return false}break}return true};b.prototype.W=function(){var a;var h;var f;var g;this.C=this._;a=e(this,b.a_4,46);if(a===0){return false}this.B=this._;switch(a){case 0:return false;case 1:f=true;a:while(f===true){f=false;h=this.A-this._;g=true;b:while(g===true){g=false;if(!d(this,1,'а')){break b}break a}this._=this.A-h;if(!d(this,1,'я')){return false}}if(!c(this,'')){return false}break;case 2:if(!c(this,'')){return false}break}return true};b.prototype.r_verb=b.prototype.W;function I(a){var f;var i;var g;var h;a.C=a._;f=e(a,b.a_4,46);if(f===0){return false}a.B=a._;switch(f){case 0:return false;case 1:g=true;a:while(g===true){g=false;i=a.A-a._;h=true;b:while(h===true){h=false;if(!d(a,1,'а')){break b}break a}a._=a.A-i;if(!d(a,1,'я')){return false}}if(!c(a,'')){return false}break;case 2:if(!c(a,'')){return false}break}return true};b.prototype.S=function(){var a;this.C=this._;a=e(this,b.a_5,36);if(a===0){return false}this.B=this._;switch(a){case 0:return false;case 1:if(!c(this,'')){return false}break}return true};b.prototype.r_noun=b.prototype.S;function F(a){var d;a.C=a._;d=e(a,b.a_5,36);if(d===0){return false}a.B=a._;switch(d){case 0:return false;case 1:if(!c(a,'')){return false}break}return true};b.prototype.Q=function(){var a;var d;this.C=this._;a=e(this,b.a_6,2);if(a===0){return false}this.B=d=this._;if(!(!(this.I_p2<=d)?false:true)){return false}switch(a){case 0:return false;case 1:if(!c(this,'')){return false}break}return true};b.prototype.r_derivational=b.prototype.Q;function C(a){var d;var f;a.C=a._;d=e(a,b.a_6,2);if(d===0){return false}a.B=f=a._;if(!(!(a.I_p2<=f)?false:true)){return false}switch(d){case 0:return false;case 1:if(!c(a,'')){return false}break}return true};b.prototype.V=function(){var a;this.C=this._;a=e(this,b.a_7,4);if(a===0){return false}this.B=this._;switch(a){case 0:return false;case 1:if(!c(this,'')){return false}this.C=this._;if(!d(this,1,'н')){return false}this.B=this._;if(!d(this,1,'н')){return false}if(!c(this,'')){return false}break;case 2:if(!d(this,1,'н')){return false}if(!c(this,'')){return false}break;case 3:if(!c(this,'')){return false}break}return true};b.prototype.r_tidy_up=b.prototype.V;function y(a){var f;a.C=a._;f=e(a,b.a_7,4);if(f===0){return false}a.B=a._;switch(f){case 0:return false;case 1:if(!c(a,'')){return false}a.C=a._;if(!d(a,1,'н')){return false}a.B=a._;if(!d(a,1,'н')){return false}if(!c(a,'')){return false}break;case 2:if(!d(a,1,'н')){return false}if(!c(a,'')){return false}break;case 3:if(!c(a,'')){return false}break}return true};b.prototype.H=function(){var s;var v;var w;var A;var p;var q;var i;var t;var u;var e;var f;var g;var h;var a;var j;var b;var k;var l;var m;var n;var x;var z;var o;var B;var J;var K;var L;var M;var N;var O;var r;s=this._;e=true;a:while(e===true){e=false;if(!D(this)){break a}}x=this._=s;this.E=x;o=this._=z=this.A;v=z-o;if(o<this.I_pV){return false}K=this._=this.I_pV;w=this.E;this.E=K;M=this._=(L=this.A)-v;A=L-M;f=true;c:while(f===true){f=false;g=true;b:while(g===true){g=false;p=this.A-this._;h=true;a:while(h===true){h=false;if(!E(this)){break a}break b}J=this._=(B=this.A)-p;q=B-J;a=true;a:while(a===true){a=false;if(!H(this)){this._=this.A-q;break a}}j=true;a:while(j===true){j=false;i=this.A-this._;b=true;d:while(b===true){b=false;if(!G(this)){break d}break a}this._=this.A-i;k=true;d:while(k===true){k=false;if(!I(this)){break d}break a}this._=this.A-i;if(!F(this)){break c}}}}O=this._=(N=this.A)-A;t=N-O;l=true;a:while(l===true){l=false;this.C=this._;if(!d(this,1,'и')){this._=this.A-t;break a}this.B=this._;if(!c(this,'')){return false}}u=this.A-this._;m=true;a:while(m===true){m=false;if(!C(this)){break a}}this._=this.A-u;n=true;a:while(n===true){n=false;if(!y(this)){break a}}r=this.E=w;this._=r;return true};b.prototype.stem=b.prototype.H;b.prototype.L=function(a){return a instanceof b};b.prototype.equals=b.prototype.L;b.prototype.M=function(){var c;var a;var b;var d;c='RussianStemmer';a=0;for(b=0;b<c.length;b++){d=c.charCodeAt(b);a=(a<<5)-a+d;a=a&a}return a|0};b.prototype.hashCode=b.prototype.M;b.serialVersionUID=1;f(b,'methodObject',function(){return new b});f(b,'a_0',function(){return[new a('в',-1,1),new a('ив',0,2),new a('ыв',0,2),new a('вши',-1,1),new a('ивши',3,2),new a('ывши',3,2),new a('вшись',-1,1),new a('ившись',6,2),new a('ывшись',6,2)]});f(b,'a_1',function(){return[new a('ее',-1,1),new a('ие',-1,1),new a('ое',-1,1),new a('ые',-1,1),new a('ими',-1,1),new a('ыми',-1,1),new a('ей',-1,1),new a('ий',-1,1),new a('ой',-1,1),new a('ый',-1,1),new a('ем',-1,1),new a('им',-1,1),new a('ом',-1,1),new a('ым',-1,1),new a('его',-1,1),new a('ого',-1,1),new a('ему',-1,1),new a('ому',-1,1),new a('их',-1,1),new a('ых',-1,1),new a('ею',-1,1),new a('ою',-1,1),new a('ую',-1,1),new a('юю',-1,1),new a('ая',-1,1),new a('яя',-1,1)]});f(b,'a_2',function(){return[new a('ем',-1,1),new a('нн',-1,1),new a('вш',-1,1),new a('ивш',2,2),new a('ывш',2,2),new a('щ',-1,1),new a('ющ',5,1),new a('ующ',6,2)]});f(b,'a_3',function(){return[new a('сь',-1,1),new a('ся',-1,1)]});f(b,'a_4',function(){return[new a('ла',-1,1),new a('ила',0,2),new a('ыла',0,2),new a('на',-1,1),new a('ена',3,2),new a('ете',-1,1),new a('ите',-1,2),new a('йте',-1,1),new a('ейте',7,2),new a('уйте',7,2),new a('ли',-1,1),new a('или',10,2),new a('ыли',10,2),new a('й',-1,1),new a('ей',13,2),new a('уй',13,2),new a('л',-1,1),new a('ил',16,2),new a('ыл',16,2),new a('ем',-1,1),new a('им',-1,2),new a('ым',-1,2),new a('н',-1,1),new a('ен',22,2),new a('ло',-1,1),new a('ило',24,2),new a('ыло',24,2),new a('но',-1,1),new a('ено',27,2),new a('нно',27,1),new a('ет',-1,1),new a('ует',30,2),new a('ит',-1,2),new a('ыт',-1,2),new a('ют',-1,1),new a('уют',34,2),new a('ят',-1,2),new a('ны',-1,1),new a('ены',37,2),new a('ть',-1,1),new a('ить',39,2),new a('ыть',39,2),new a('ешь',-1,1),new a('ишь',-1,2),new a('ю',-1,2),new a('ую',44,2)]});f(b,'a_5',function(){return[new a('а',-1,1),new a('ев',-1,1),new a('ов',-1,1),new a('е',-1,1),new a('ие',3,1),new a('ье',3,1),new a('и',-1,1),new a('еи',6,1),new a('ии',6,1),new a('ами',6,1),new a('ями',6,1),new a('иями',10,1),new a('й',-1,1),new a('ей',12,1),new a('ией',13,1),new a('ий',12,1),new a('ой',12,1),new a('ам',-1,1),new a('ем',-1,1),new a('ием',18,1),new a('ом',-1,1),new a('ям',-1,1),new a('иям',21,1),new a('о',-1,1),new a('у',-1,1),new a('ах',-1,1),new a('ях',-1,1),new a('иях',26,1),new a('ы',-1,1),new a('ь',-1,1),new a('ю',-1,1),new a('ию',30,1),new a('ью',30,1),new a('я',-1,1),new a('ия',33,1),new a('ья',33,1)]});f(b,'a_6',function(){return[new a('ост',-1,1),new a('ость',-1,1)]});f(b,'a_7',function(){return[new a('ейше',-1,1),new a('н',-1,2),new a('ейш',-1,1),new a('ь',-1,3)]});f(b,'g_v',function(){return[33,65,8,232]});var o={'src/stemmer.jsx':{Stemmer:m},'src/russian-stemmer.jsx':{RussianStemmer:b}}}(JSX))
var Stemmer = JSX.require("src/russian-stemmer.jsx").RussianStemmer;
"""
@@ -265,5 +264,5 @@ class SearchRussian(SearchLanguage):
self.stemmer = snowballstemmer.stemmer('russian')
def stem(self, word):
- # type: (unicode) -> unicode
+ # type: (str) -> str
return self.stemmer.stemWord(word.lower())
diff --git a/sphinx/search/sv.py b/sphinx/search/sv.py
index fdb64685e..cfdd15f92 100644
--- a/sphinx/search/sv.py
+++ b/sphinx/search/sv.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.search.sv
~~~~~~~~~~~~~~~~
@@ -17,7 +16,7 @@ if False:
# For type annotation
from typing import Any
-swedish_stopwords = parse_stop_word(u'''
+swedish_stopwords = parse_stop_word('''
| source: http://snowball.tartarus.org/algorithms/swedish/stop.txt
och | and
det | it, this/that
@@ -135,7 +134,7 @@ era | your
vilkas | whose
''')
-js_stemmer = u"""
+js_stemmer = """
var JSX={};(function(e){function i(b,e){var a=function(){};a.prototype=e.prototype;var c=new a;for(var d in b){b[d].prototype=c}}function G(c,b){for(var a in b.prototype)if(b.prototype.hasOwnProperty(a))c.prototype[a]=b.prototype[a]}function h(a,b,d){function c(a,b,c){delete a[b];a[b]=c;return c}Object.defineProperty(a,b,{get:function(){return c(a,b,d())},set:function(d){c(a,b,d)},enumerable:true,configurable:true})}function F(a,b,c){return a[b]=a[b]/c|0}var t=parseInt;var u=parseFloat;function E(a){return a!==a}var x=isFinite;var y=encodeURIComponent;var z=decodeURIComponent;var B=encodeURI;var C=decodeURI;var o=Object.prototype.toString;var p=Object.prototype.hasOwnProperty;function f(){}e.require=function(b){var a=n[b];return a!==undefined?a:null};e.profilerIsRunning=function(){return f.getResults!=null};e.getProfileResults=function(){return(f.getResults||function(){return{}})()};e.postProfileResults=function(a,b){if(f.postResults==null)throw new Error('profiler has not been turned on');return f.postResults(a,b)};e.resetProfileResults=function(){if(f.resetResults==null)throw new Error('profiler has not been turned on');return f.resetResults()};e.DEBUG=false;function r(){};i([r],Error);function a(a,b,c){this.G=a.length;this.R=a;this.U=b;this.J=c;this.I=null;this.V=null};i([a],Object);function j(){};i([j],Object);function d(){var a;var b;var c;this.F={};a=this.C='';b=this._=0;c=this.B=a.length;this.A=0;this.D=b;this.E=c};i([d],j);function v(a,b){a.C=b.C;a._=b._;a.B=b.B;a.A=b.A;a.D=b.D;a.E=b.E};function k(b,d,c,e){var a;if(b._>=b.B){return false}a=b.C.charCodeAt(b._);if(a>e||a<c){return false}a-=c;if((d[a>>>3]&1<<(a&7))===0){return false}b._++;return true};function l(b,d,c,e){var a;if(b._<=b.A){return false}a=b.C.charCodeAt(b._-1);if(a>e||a<c){return false}a-=c;if((d[a>>>3]&1<<(a&7))===0){return false}b._--;return true};function m(a,d,c,e){var b;if(a._>=a.B){return false}b=a.C.charCodeAt(a._);if(b>e||b<c){a._++;return true}b-=c;if((d[b>>>3]&1<<(b&7))===0){a._++;return true}return false};function g(d,m,p){var b;var g;var e;var n;var f;var k;var l;var i;var h;var c;var a;var j;var o;b=0;g=p;e=d._;n=d.A;f=0;k=0;l=false;while(true){i=b+(g-b>>1);h=0;c=f<k?f:k;a=m[i];for(j=a.G-1-c;j>=0;j--){if(e-c===n){h=-1;break}h=d.C.charCodeAt(e-1-c)-a.R.charCodeAt(j);if(h!==0){break}c++}if(h<0){g=i;k=c}else{b=i;f=c}if(g-b<=1){if(b>0){break}if(g===b){break}if(l){break}l=true}}while(true){a=m[b];if(f>=a.G){d._=e-a.G|0;if(a.I==null){return a.J}o=a.I(d);d._=e-a.G|0;if(o){return a.J}}b=a.U;if(b<0){return 0}}return-1};function A(a,b,d,e){var c;c=e.length-(d-b);a.C=a.C.slice(0,b)+e+a.C.slice(d);a.B+=c|0;if(a._>=d){a._+=c|0}else if(a._>b){a._=b}return c|0};function c(a,f){var b;var c;var d;var e;b=false;if((c=a.D)<0||c>(d=a.E)||d>(e=a.B)||e>a.C.length?false:true){A(a,a.D,a.E,f);b=true}return b};d.prototype.H=function(){return false};d.prototype.S=function(b){var a;var c;var d;var e;a=this.F['.'+b];if(a==null){c=this.C=b;d=this._=0;e=this.B=c.length;this.A=0;this.D=d;this.E=e;this.H();a=this.C;this.F['.'+b]=a}return a};d.prototype.stemWord=d.prototype.S;d.prototype.T=function(e){var d;var b;var c;var a;var f;var g;var h;d=[];for(b=0;b<e.length;b++){c=e[b];a=this.F['.'+c];if(a==null){f=this.C=c;g=this._=0;h=this.B=f.length;this.A=0;this.D=g;this.E=h;this.H();a=this.C;this.F['.'+c]=a}d.push(a)}return d};d.prototype.stemWords=d.prototype.T;function b(){d.call(this);this.I_x=0;this.I_p1=0};i([b],d);b.prototype.K=function(a){this.I_x=a.I_x;this.I_p1=a.I_p1;v(this,a)};b.prototype.copy_from=b.prototype.K;b.prototype.P=function(){var g;var d;var a;var e;var c;var f;var i;var j;var l;var h;this.I_p1=j=this.B;g=i=this._;a=i+3|0;if(0>a||a>j){return false}h=this._=a;this.I_x=h;this._=g;a:while(true){d=this._;e=true;b:while(e===true){e=false;if(!k(this,b.g_v,97,246)){break b}this._=d;break a}l=this._=d;if(l>=this.B){return false}this._++}a:while(true){c=true;b:while(c===true){c=false;if(!m(this,b.g_v,97,246)){break b}break a}if(this._>=this.B){return false}this._++}this.I_p1=this._;f=true;a:while(f===true){f=false;if(!(this.I_p1<this.I_x)){break a}this.I_p1=this.I_x}return true};b.prototype.r_mark_regions=b.prototype.P;function D(a){var h;var e;var c;var f;var d;var g;var j;var l;var n;var i;a.I_p1=l=a.B;h=j=a._;c=j+3|0;if(0>c||c>l){return false}i=a._=c;a.I_x=i;a._=h;a:while(true){e=a._;f=true;b:while(f===true){f=false;if(!k(a,b.g_v,97,246)){break b}a._=e;break a}n=a._=e;if(n>=a.B){return false}a._++}a:while(true){d=true;b:while(d===true){d=false;if(!m(a,b.g_v,97,246)){break b}break a}if(a._>=a.B){return false}a._++}a.I_p1=a._;g=true;a:while(g===true){g=false;if(!(a.I_p1<a.I_x)){break a}a.I_p1=a.I_x}return true};b.prototype.O=function(){var a;var e;var d;var f;var h;var i;e=this.B-(f=this._);if(f<this.I_p1){return false}h=this._=this.I_p1;d=this.A;this.A=h;i=this._=this.B-e;this.E=i;a=g(this,b.a_0,37);if(a===0){this.A=d;return false}this.D=this._;this.A=d;switch(a){case 0:return false;case 1:if(!c(this,'')){return false}break;case 2:if(!l(this,b.g_s_ending,98,121)){return false}if(!c(this,'')){return false}break}return true};b.prototype.r_main_suffix=b.prototype.O;function w(a){var d;var f;var e;var h;var i;var j;f=a.B-(h=a._);if(h<a.I_p1){return false}i=a._=a.I_p1;e=a.A;a.A=i;j=a._=a.B-f;a.E=j;d=g(a,b.a_0,37);if(d===0){a.A=e;return false}a.D=a._;a.A=e;switch(d){case 0:return false;case 1:if(!c(a,'')){return false}break;case 2:if(!l(a,b.g_s_ending,98,121)){return false}if(!c(a,'')){return false}break}return true};b.prototype.N=function(){var e;var a;var f;var h;var i;var j;var k;var d;e=this.B-(h=this._);if(h<this.I_p1){return false}i=this._=this.I_p1;a=this.A;this.A=i;k=this._=(j=this.B)-e;f=j-k;if(g(this,b.a_1,7)===0){this.A=a;return false}d=this._=this.B-f;this.E=d;if(d<=this.A){this.A=a;return false}this._--;this.D=this._;if(!c(this,'')){return false}this.A=a;return true};b.prototype.r_consonant_pair=b.prototype.N;function s(a){var f;var d;var h;var i;var j;var k;var l;var e;f=a.B-(i=a._);if(i<a.I_p1){return false}j=a._=a.I_p1;d=a.A;a.A=j;l=a._=(k=a.B)-f;h=k-l;if(g(a,b.a_1,7)===0){a.A=d;return false}e=a._=a.B-h;a.E=e;if(e<=a.A){a.A=d;return false}a._--;a.D=a._;if(!c(a,'')){return false}a.A=d;return true};b.prototype.Q=function(){var d;var e;var a;var f;var h;var i;e=this.B-(f=this._);if(f<this.I_p1){return false}h=this._=this.I_p1;a=this.A;this.A=h;i=this._=this.B-e;this.E=i;d=g(this,b.a_2,5);if(d===0){this.A=a;return false}this.D=this._;switch(d){case 0:this.A=a;return false;case 1:if(!c(this,'')){return false}break;case 2:if(!c(this,'lös')){return false}break;case 3:if(!c(this,'full')){return false}break}this.A=a;return true};b.prototype.r_other_suffix=b.prototype.Q;function q(a){var e;var f;var d;var h;var i;var j;f=a.B-(h=a._);if(h<a.I_p1){return false}i=a._=a.I_p1;d=a.A;a.A=i;j=a._=a.B-f;a.E=j;e=g(a,b.a_2,5);if(e===0){a.A=d;return false}a.D=a._;switch(e){case 0:a.A=d;return false;case 1:if(!c(a,'')){return false}break;case 2:if(!c(a,'lös')){return false}break;case 3:if(!c(a,'full')){return false}break}a.A=d;return true};b.prototype.H=function(){var g;var f;var h;var b;var c;var a;var d;var i;var j;var k;var l;var e;g=this._;b=true;a:while(b===true){b=false;if(!D(this)){break a}}i=this._=g;this.A=i;k=this._=j=this.B;f=j-k;c=true;a:while(c===true){c=false;if(!w(this)){break a}}e=this._=(l=this.B)-f;h=l-e;a=true;a:while(a===true){a=false;if(!s(this)){break a}}this._=this.B-h;d=true;a:while(d===true){d=false;if(!q(this)){break a}}this._=this.A;return true};b.prototype.stem=b.prototype.H;b.prototype.L=function(a){return a instanceof b};b.prototype.equals=b.prototype.L;b.prototype.M=function(){var c;var a;var b;var d;c='SwedishStemmer';a=0;for(b=0;b<c.length;b++){d=c.charCodeAt(b);a=(a<<5)-a+d;a=a&a}return a|0};b.prototype.hashCode=b.prototype.M;b.serialVersionUID=1;h(b,'methodObject',function(){return new b});h(b,'a_0',function(){return[new a('a',-1,1),new a('arna',0,1),new a('erna',0,1),new a('heterna',2,1),new a('orna',0,1),new a('ad',-1,1),new a('e',-1,1),new a('ade',6,1),new a('ande',6,1),new a('arne',6,1),new a('are',6,1),new a('aste',6,1),new a('en',-1,1),new a('anden',12,1),new a('aren',12,1),new a('heten',12,1),new a('ern',-1,1),new a('ar',-1,1),new a('er',-1,1),new a('heter',18,1),new a('or',-1,1),new a('s',-1,2),new a('as',21,1),new a('arnas',22,1),new a('ernas',22,1),new a('ornas',22,1),new a('es',21,1),new a('ades',26,1),new a('andes',26,1),new a('ens',21,1),new a('arens',29,1),new a('hetens',29,1),new a('erns',21,1),new a('at',-1,1),new a('andet',-1,1),new a('het',-1,1),new a('ast',-1,1)]});h(b,'a_1',function(){return[new a('dd',-1,-1),new a('gd',-1,-1),new a('nn',-1,-1),new a('dt',-1,-1),new a('gt',-1,-1),new a('kt',-1,-1),new a('tt',-1,-1)]});h(b,'a_2',function(){return[new a('ig',-1,1),new a('lig',0,1),new a('els',-1,1),new a('fullt',-1,3),new a('löst',-1,2)]});h(b,'g_v',function(){return[17,65,16,1,0,0,0,0,0,0,0,0,0,0,0,0,24,0,32]});h(b,'g_s_ending',function(){return[119,127,149]});var n={'src/stemmer.jsx':{Stemmer:j},'src/swedish-stemmer.jsx':{SwedishStemmer:b}}}(JSX))
var Stemmer = JSX.require("src/swedish-stemmer.jsx").SwedishStemmer;
"""
@@ -153,5 +152,5 @@ class SearchSwedish(SearchLanguage):
self.stemmer = snowballstemmer.stemmer('swedish')
def stem(self, word):
- # type: (unicode) -> unicode
+ # type: (str) -> str
return self.stemmer.stemWord(word.lower())
diff --git a/sphinx/search/tr.py b/sphinx/search/tr.py
index 1b8a0f94f..ba2cdf951 100644
--- a/sphinx/search/tr.py
+++ b/sphinx/search/tr.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.search.tr
~~~~~~~~~~~~~~~~
@@ -17,7 +16,7 @@ if False:
# For type annotation
from typing import Dict, Set # NOQA
-js_stemmer = u"""
+js_stemmer = """
var JSX={};(function(q){function r(b,e){var a=function(){};a.prototype=e.prototype;var c=new a;for(var d in b){b[d].prototype=c}}function Q(c,b){for(var a in b.prototype)if(b.prototype.hasOwnProperty(a))c.prototype[a]=b.prototype[a]}function j(a,b,d){function c(a,b,c){delete a[b];a[b]=c;return c}Object.defineProperty(a,b,{get:function(){return c(a,b,d())},set:function(d){c(a,b,d)},enumerable:true,configurable:true})}function R(a,b,c){return a[b]=a[b]/c|0}var M=parseInt;var K=parseFloat;function P(a){return a!==a}var A=isFinite;var G=encodeURIComponent;var F=decodeURIComponent;var E=encodeURI;var D=decodeURI;var C=Object.prototype.toString;var H=Object.prototype.hasOwnProperty;function p(){}q.require=function(b){var a=y[b];return a!==undefined?a:null};q.profilerIsRunning=function(){return p.getResults!=null};q.getProfileResults=function(){return(p.getResults||function(){return{}})()};q.postProfileResults=function(a,b){if(p.postResults==null)throw new Error('profiler has not been turned on');return p.postResults(a,b)};q.resetProfileResults=function(){if(p.resetResults==null)throw new Error('profiler has not been turned on');return p.resetResults()};q.DEBUG=false;function I(){};r([I],Error);function d(a,b,c){this.G=a.length;this.A_=a;this.D_=b;this.J=c;this.I=null;this.E_=null};r([d],Object);function u(){};r([u],Object);function m(){var a;var b;var c;this.F={};a=this.E='';b=this._=0;c=this.A=a.length;this.D=0;this.B=b;this.C=c};r([m],u);function B(a,b){a.E=b.E;a._=b._;a.A=b.A;a.D=b.D;a.B=b.B;a.C=b.C};function v(b,d,c,e){var a;if(b._>=b.A){return false}a=b.E.charCodeAt(b._);if(a>e||a<c){return false}a-=c;if((d[a>>>3]&1<<(a&7))===0){return false}b._++;return true};function f(b,d,c,e){var a;if(b._<=b.D){return false}a=b.E.charCodeAt(b._-1);if(a>e||a<c){return false}a-=c;if((d[a>>>3]&1<<(a&7))===0){return false}b._--;return true};function t(a,d,c,e){var b;if(a._<=a.D){return false}b=a.E.charCodeAt(a._-1);if(b>e||b<c){a._--;return true}b-=c;if((d[b>>>3]&1<<(b&7))===0){a._--;return true}return false};function s(a,b,d){var c;if(a.A-a._<b){return false}if(a.E.slice(c=a._,c+b)!==d){return false}a._+=b;return true};function g(a,b,d){var c;if(a._-a.D<b){return false}if(a.E.slice((c=a._)-b,c)!==d){return false}a._-=b;return true};function b(d,m,p){var b;var g;var e;var n;var f;var k;var l;var i;var h;var c;var a;var j;var o;b=0;g=p;e=d._;n=d.D;f=0;k=0;l=false;while(true){i=b+(g-b>>1);h=0;c=f<k?f:k;a=m[i];for(j=a.G-1-c;j>=0;j--){if(e-c===n){h=-1;break}h=d.E.charCodeAt(e-1-c)-a.A_.charCodeAt(j);if(h!==0){break}c++}if(h<0){g=i;k=c}else{b=i;f=c}if(g-b<=1){if(b>0){break}if(g===b){break}if(l){break}l=true}}while(true){a=m[b];if(f>=a.G){d._=e-a.G|0;if(a.I==null){return a.J}o=a.I(d);d._=e-a.G|0;if(o){return a.J}}b=a.D_;if(b<0){return 0}}return-1};function n(a,b,d,e){var c;c=e.length-(d-b);a.E=a.E.slice(0,b)+e+a.E.slice(d);a.A+=c|0;if(a._>=d){a._+=c|0}else if(a._>b){a._=b}return c|0};function e(a,f){var b;var c;var d;var e;b=false;if((c=a.B)<0||c>(d=a.C)||d>(e=a.A)||e>a.E.length?false:true){n(a,a.B,a.C,f);b=true}return b};m.prototype.H=function(){return false};m.prototype.B_=function(b){var a;var c;var d;var e;a=this.F['.'+b];if(a==null){c=this.E=b;d=this._=0;e=this.A=c.length;this.D=0;this.B=d;this.C=e;this.H();a=this.E;this.F['.'+b]=a}return a};m.prototype.stemWord=m.prototype.B_;m.prototype.C_=function(e){var d;var b;var c;var a;var f;var g;var h;d=[];for(b=0;b<e.length;b++){c=e[b];a=this.F['.'+c];if(a==null){f=this.E=c;g=this._=0;h=this.A=f.length;this.D=0;this.B=g;this.C=h;this.H();a=this.E;this.F['.'+c]=a}d.push(a)}return d};m.prototype.stemWords=m.prototype.C_;function a(){m.call(this);this.B_continue_stemming_noun_suffixes=false;this.I_strlen=0};r([a],m);a.prototype.K=function(a){this.B_continue_stemming_noun_suffixes=a.B_continue_stemming_noun_suffixes;this.I_strlen=a.I_strlen;B(this,a)};a.prototype.copy_from=a.prototype.K;a.prototype.O=function(){var E;var q;var b;var e;var h;var i;var j;var k;var l;var m;var n;var o;var p;var c;var r;var s;var t;var u;var d;var v;var w;var x;var y;var z;var A;var B;var C;var D;var G;var H;var I;var J;var K;var L;var M;var N;var F;E=this.A-this._;b:while(true){q=this.A-this._;o=true;a:while(o===true){o=false;if(!f(this,a.g_vowel,97,305)){break a}this._=this.A-q;break b}G=this._=this.A-q;if(G<=this.D){return false}this._--}p=true;a:while(p===true){p=false;b=this.A-this._;c=true;b:while(c===true){c=false;if(!g(this,1,'a')){break b}c:while(true){e=this.A-this._;r=true;d:while(r===true){r=false;if(!f(this,a.g_vowel1,97,305)){break d}this._=this.A-e;break c}H=this._=this.A-e;if(H<=this.D){break b}this._--}break a}this._=this.A-b;s=true;b:while(s===true){s=false;if(!g(this,1,'e')){break b}c:while(true){h=this.A-this._;t=true;d:while(t===true){t=false;if(!f(this,a.g_vowel2,101,252)){break d}this._=this.A-h;break c}I=this._=this.A-h;if(I<=this.D){break b}this._--}break a}this._=this.A-b;u=true;b:while(u===true){u=false;if(!g(this,1,'ı')){break b}c:while(true){i=this.A-this._;d=true;d:while(d===true){d=false;if(!f(this,a.g_vowel3,97,305)){break d}this._=this.A-i;break c}J=this._=this.A-i;if(J<=this.D){break b}this._--}break a}this._=this.A-b;v=true;b:while(v===true){v=false;if(!g(this,1,'i')){break b}c:while(true){j=this.A-this._;w=true;d:while(w===true){w=false;if(!f(this,a.g_vowel4,101,105)){break d}this._=this.A-j;break c}K=this._=this.A-j;if(K<=this.D){break b}this._--}break a}this._=this.A-b;x=true;b:while(x===true){x=false;if(!g(this,1,'o')){break b}c:while(true){k=this.A-this._;y=true;d:while(y===true){y=false;if(!f(this,a.g_vowel5,111,117)){break d}this._=this.A-k;break c}L=this._=this.A-k;if(L<=this.D){break b}this._--}break a}this._=this.A-b;z=true;b:while(z===true){z=false;if(!g(this,1,'ö')){break b}c:while(true){l=this.A-this._;A=true;d:while(A===true){A=false;if(!f(this,a.g_vowel6,246,252)){break d}this._=this.A-l;break c}M=this._=this.A-l;if(M<=this.D){break b}this._--}break a}this._=this.A-b;B=true;b:while(B===true){B=false;if(!g(this,1,'u')){break b}c:while(true){m=this.A-this._;C=true;d:while(C===true){C=false;if(!f(this,a.g_vowel5,111,117)){break d}this._=this.A-m;break c}N=this._=this.A-m;if(N<=this.D){break b}this._--}break a}this._=this.A-b;if(!g(this,1,'ü')){return false}b:while(true){n=this.A-this._;D=true;c:while(D===true){D=false;if(!f(this,a.g_vowel6,246,252)){break c}this._=this.A-n;break b}F=this._=this.A-n;if(F<=this.D){return false}this._--}}this._=this.A-E;return true};a.prototype.r_check_vowel_harmony=a.prototype.O;function c(b){var F;var r;var c;var e;var h;var i;var j;var k;var l;var m;var n;var o;var p;var q;var d;var s;var t;var u;var v;var w;var x;var y;var z;var A;var B;var C;var D;var E;var H;var I;var J;var K;var L;var M;var N;var O;var G;F=b.A-b._;b:while(true){r=b.A-b._;o=true;a:while(o===true){o=false;if(!f(b,a.g_vowel,97,305)){break a}b._=b.A-r;break b}H=b._=b.A-r;if(H<=b.D){return false}b._--}p=true;a:while(p===true){p=false;c=b.A-b._;q=true;b:while(q===true){q=false;if(!g(b,1,'a')){break b}c:while(true){e=b.A-b._;d=true;d:while(d===true){d=false;if(!f(b,a.g_vowel1,97,305)){break d}b._=b.A-e;break c}I=b._=b.A-e;if(I<=b.D){break b}b._--}break a}b._=b.A-c;s=true;b:while(s===true){s=false;if(!g(b,1,'e')){break b}c:while(true){h=b.A-b._;t=true;d:while(t===true){t=false;if(!f(b,a.g_vowel2,101,252)){break d}b._=b.A-h;break c}J=b._=b.A-h;if(J<=b.D){break b}b._--}break a}b._=b.A-c;u=true;b:while(u===true){u=false;if(!g(b,1,'ı')){break b}c:while(true){i=b.A-b._;v=true;d:while(v===true){v=false;if(!f(b,a.g_vowel3,97,305)){break d}b._=b.A-i;break c}K=b._=b.A-i;if(K<=b.D){break b}b._--}break a}b._=b.A-c;w=true;b:while(w===true){w=false;if(!g(b,1,'i')){break b}c:while(true){j=b.A-b._;x=true;d:while(x===true){x=false;if(!f(b,a.g_vowel4,101,105)){break d}b._=b.A-j;break c}L=b._=b.A-j;if(L<=b.D){break b}b._--}break a}b._=b.A-c;y=true;b:while(y===true){y=false;if(!g(b,1,'o')){break b}c:while(true){k=b.A-b._;z=true;d:while(z===true){z=false;if(!f(b,a.g_vowel5,111,117)){break d}b._=b.A-k;break c}M=b._=b.A-k;if(M<=b.D){break b}b._--}break a}b._=b.A-c;A=true;b:while(A===true){A=false;if(!g(b,1,'ö')){break b}c:while(true){l=b.A-b._;B=true;d:while(B===true){B=false;if(!f(b,a.g_vowel6,246,252)){break d}b._=b.A-l;break c}N=b._=b.A-l;if(N<=b.D){break b}b._--}break a}b._=b.A-c;C=true;b:while(C===true){C=false;if(!g(b,1,'u')){break b}c:while(true){m=b.A-b._;D=true;d:while(D===true){D=false;if(!f(b,a.g_vowel5,111,117)){break d}b._=b.A-m;break c}O=b._=b.A-m;if(O<=b.D){break b}b._--}break a}b._=b.A-c;if(!g(b,1,'ü')){return false}b:while(true){n=b.A-b._;E=true;c:while(E===true){E=false;if(!f(b,a.g_vowel6,246,252)){break c}b._=b.A-n;break b}G=b._=b.A-n;if(G<=b.D){return false}b._--}}b._=b.A-F;return true};a.prototype.j=function(){var k;var h;var l;var i;var m;var j;var b;var e;var d;var n;var o;var p;var q;var c;b=true;b:while(b===true){b=false;k=this.A-this._;e=true;a:while(e===true){e=false;h=this.A-this._;if(!g(this,1,'n')){break a}n=this._=this.A-h;if(n<=this.D){break a}this._--;l=this.A-this._;if(!f(this,a.g_vowel,97,305)){break a}this._=this.A-l;break b}p=this._=(o=this.A)-k;i=o-p;d=true;a:while(d===true){d=false;m=this.A-this._;if(!g(this,1,'n')){break a}this._=this.A-m;return false}c=this._=(q=this.A)-i;j=q-c;if(c<=this.D){return false}this._--;if(!f(this,a.g_vowel,97,305)){return false}this._=this.A-j}return true};a.prototype.r_mark_suffix_with_optional_n_consonant=a.prototype.j;function o(b){var i;var m;var l;var j;var n;var k;var c;var e;var d;var o;var p;var q;var r;var h;c=true;b:while(c===true){c=false;i=b.A-b._;e=true;a:while(e===true){e=false;m=b.A-b._;if(!g(b,1,'n')){break a}o=b._=b.A-m;if(o<=b.D){break a}b._--;l=b.A-b._;if(!f(b,a.g_vowel,97,305)){break a}b._=b.A-l;break b}q=b._=(p=b.A)-i;j=p-q;d=true;a:while(d===true){d=false;n=b.A-b._;if(!g(b,1,'n')){break a}b._=b.A-n;return false}h=b._=(r=b.A)-j;k=r-h;if(h<=b.D){return false}b._--;if(!f(b,a.g_vowel,97,305)){return false}b._=b.A-k}return true};a.prototype.k=function(){var k;var h;var l;var i;var m;var j;var b;var e;var d;var n;var o;var p;var q;var c;b=true;b:while(b===true){b=false;k=this.A-this._;e=true;a:while(e===true){e=false;h=this.A-this._;if(!g(this,1,'s')){break a}n=this._=this.A-h;if(n<=this.D){break a}this._--;l=this.A-this._;if(!f(this,a.g_vowel,97,305)){break a}this._=this.A-l;break b}p=this._=(o=this.A)-k;i=o-p;d=true;a:while(d===true){d=false;m=this.A-this._;if(!g(this,1,'s')){break a}this._=this.A-m;return false}c=this._=(q=this.A)-i;j=q-c;if(c<=this.D){return false}this._--;if(!f(this,a.g_vowel,97,305)){return false}this._=this.A-j}return true};a.prototype.r_mark_suffix_with_optional_s_consonant=a.prototype.k;function l(b){var i;var m;var l;var j;var n;var k;var c;var e;var d;var o;var p;var q;var r;var h;c=true;b:while(c===true){c=false;i=b.A-b._;e=true;a:while(e===true){e=false;m=b.A-b._;if(!g(b,1,'s')){break a}o=b._=b.A-m;if(o<=b.D){break a}b._--;l=b.A-b._;if(!f(b,a.g_vowel,97,305)){break a}b._=b.A-l;break b}q=b._=(p=b.A)-i;j=p-q;d=true;a:while(d===true){d=false;n=b.A-b._;if(!g(b,1,'s')){break a}b._=b.A-n;return false}h=b._=(r=b.A)-j;k=r-h;if(h<=b.D){return false}b._--;if(!f(b,a.g_vowel,97,305)){return false}b._=b.A-k}return true};a.prototype.l=function(){var k;var h;var l;var i;var m;var j;var b;var e;var d;var n;var o;var p;var q;var c;b=true;b:while(b===true){b=false;k=this.A-this._;e=true;a:while(e===true){e=false;h=this.A-this._;if(!g(this,1,'y')){break a}n=this._=this.A-h;if(n<=this.D){break a}this._--;l=this.A-this._;if(!f(this,a.g_vowel,97,305)){break a}this._=this.A-l;break b}p=this._=(o=this.A)-k;i=o-p;d=true;a:while(d===true){d=false;m=this.A-this._;if(!g(this,1,'y')){break a}this._=this.A-m;return false}c=this._=(q=this.A)-i;j=q-c;if(c<=this.D){return false}this._--;if(!f(this,a.g_vowel,97,305)){return false}this._=this.A-j}return true};a.prototype.r_mark_suffix_with_optional_y_consonant=a.prototype.l;function h(b){var i;var m;var l;var j;var n;var k;var c;var e;var d;var o;var p;var q;var r;var h;c=true;b:while(c===true){c=false;i=b.A-b._;e=true;a:while(e===true){e=false;m=b.A-b._;if(!g(b,1,'y')){break a}o=b._=b.A-m;if(o<=b.D){break a}b._--;l=b.A-b._;if(!f(b,a.g_vowel,97,305)){break a}b._=b.A-l;break b}q=b._=(p=b.A)-i;j=p-q;d=true;a:while(d===true){d=false;n=b.A-b._;if(!g(b,1,'y')){break a}b._=b.A-n;return false}h=b._=(r=b.A)-j;k=r-h;if(h<=b.D){return false}b._--;if(!f(b,a.g_vowel,97,305)){return false}b._=b.A-k}return true};a.prototype.i=function(){var j;var g;var k;var h;var l;var i;var b;var e;var d;var m;var n;var o;var p;var c;b=true;b:while(b===true){b=false;j=this.A-this._;e=true;a:while(e===true){e=false;g=this.A-this._;if(!f(this,a.g_U,105,305)){break a}m=this._=this.A-g;if(m<=this.D){break a}this._--;k=this.A-this._;if(!t(this,a.g_vowel,97,305)){break a}this._=this.A-k;break b}o=this._=(n=this.A)-j;h=n-o;d=true;a:while(d===true){d=false;l=this.A-this._;if(!f(this,a.g_U,105,305)){break a}this._=this.A-l;return false}c=this._=(p=this.A)-h;i=p-c;if(c<=this.D){return false}this._--;if(!t(this,a.g_vowel,97,305)){return false}this._=this.A-i}return true};a.prototype.r_mark_suffix_with_optional_U_vowel=a.prototype.i;function k(b){var h;var l;var k;var i;var m;var j;var c;var e;var d;var n;var o;var p;var q;var g;c=true;b:while(c===true){c=false;h=b.A-b._;e=true;a:while(e===true){e=false;l=b.A-b._;if(!f(b,a.g_U,105,305)){break a}n=b._=b.A-l;if(n<=b.D){break a}b._--;k=b.A-b._;if(!t(b,a.g_vowel,97,305)){break a}b._=b.A-k;break b}p=b._=(o=b.A)-h;i=o-p;d=true;a:while(d===true){d=false;m=b.A-b._;if(!f(b,a.g_U,105,305)){break a}b._=b.A-m;return false}g=b._=(q=b.A)-i;j=q-g;if(g<=b.D){return false}b._--;if(!t(b,a.g_vowel,97,305)){return false}b._=b.A-j}return true};a.prototype.e=function(){return b(this,a.a_0,10)===0?false:!k(this)?false:true};a.prototype.r_mark_possessives=a.prototype.e;a.prototype.f=function(){return!c(this)?false:!f(this,a.g_U,105,305)?false:!l(this)?false:true};a.prototype.r_mark_sU=a.prototype.f;a.prototype.W=function(){return b(this,a.a_1,2)===0?false:true};a.prototype.r_mark_lArI=a.prototype.W;a.prototype.o=function(){return!c(this)?false:!f(this,a.g_U,105,305)?false:!h(this)?false:true};a.prototype.r_mark_yU=a.prototype.o;a.prototype.Y=function(){return!c(this)?false:b(this,a.a_2,4)===0?false:true};a.prototype.r_mark_nU=a.prototype.Y;a.prototype.Z=function(){return!c(this)?false:b(this,a.a_3,4)===0?false:!o(this)?false:true};a.prototype.r_mark_nUn=a.prototype.Z;a.prototype.m=function(){return!c(this)?false:b(this,a.a_4,2)===0?false:!h(this)?false:true};a.prototype.r_mark_yA=a.prototype.m;a.prototype.X=function(){return!c(this)?false:b(this,a.a_5,2)===0?false:true};a.prototype.r_mark_nA=a.prototype.X;a.prototype.Q=function(){return!c(this)?false:b(this,a.a_6,4)===0?false:true};a.prototype.r_mark_DA=a.prototype.Q;a.prototype.c=function(){return!c(this)?false:b(this,a.a_7,2)===0?false:true};a.prototype.r_mark_ndA=a.prototype.c;a.prototype.R=function(){return!c(this)?false:b(this,a.a_8,4)===0?false:true};a.prototype.r_mark_DAn=a.prototype.R;a.prototype.d=function(){return!c(this)?false:b(this,a.a_9,2)===0?false:true};a.prototype.r_mark_ndAn=a.prototype.d;a.prototype.s=function(){return!c(this)?false:b(this,a.a_10,2)===0?false:!h(this)?false:true};a.prototype.r_mark_ylA=a.prototype.s;a.prototype.U=function(){return!g(this,2,'ki')?false:true};a.prototype.r_mark_ki=a.prototype.U;a.prototype.b=function(){return!c(this)?false:b(this,a.a_11,2)===0?false:!o(this)?false:true};a.prototype.r_mark_ncA=a.prototype.b;a.prototype.p=function(){return!c(this)?false:b(this,a.a_12,4)===0?false:!h(this)?false:true};a.prototype.r_mark_yUm=a.prototype.p;a.prototype.g=function(){return!c(this)?false:b(this,a.a_13,4)===0?false:true};a.prototype.r_mark_sUn=a.prototype.g;a.prototype.q=function(){return!c(this)?false:b(this,a.a_14,4)===0?false:!h(this)?false:true};a.prototype.r_mark_yUz=a.prototype.q;a.prototype.h=function(){return b(this,a.a_15,4)===0?false:true};a.prototype.r_mark_sUnUz=a.prototype.h;a.prototype.V=function(){return!c(this)?false:b(this,a.a_16,2)===0?false:true};a.prototype.r_mark_lAr=a.prototype.V;a.prototype.a=function(){return!c(this)?false:b(this,a.a_17,4)===0?false:true};a.prototype.r_mark_nUz=a.prototype.a;a.prototype.S=function(){return!c(this)?false:b(this,a.a_18,8)===0?false:true};a.prototype.r_mark_DUr=a.prototype.S;a.prototype.T=function(){return b(this,a.a_19,2)===0?false:true};a.prototype.r_mark_cAsInA=a.prototype.T;a.prototype.n=function(){return!c(this)?false:b(this,a.a_20,32)===0?false:!h(this)?false:true};a.prototype.r_mark_yDU=a.prototype.n;a.prototype.u=function(){return b(this,a.a_21,8)===0?false:!h(this)?false:true};a.prototype.r_mark_ysA=a.prototype.u;a.prototype.t=function(){return!c(this)?false:b(this,a.a_22,4)===0?false:!h(this)?false:true};a.prototype.r_mark_ymUs_=a.prototype.t;a.prototype.r=function(){return!g(this,3,'ken')?false:!h(this)?false:true};a.prototype.r_mark_yken=a.prototype.r;a.prototype.y=function(){var i;var j;var d;var Y;var k;var X;var l;var W;var V;var f;var r;var s;var t;var u;var v;var w;var x;var y;var z;var A;var B;var C;var m;var E;var F;var G;var H;var I;var J;var K;var L;var M;var N;var O;var P;var Q;var R;var S;var T;var U;var p;var o;var D;var n;var q;this.C=this._;this.B_continue_stemming_noun_suffixes=true;r=true;a:while(r===true){r=false;i=this.A-this._;s=true;d:while(s===true){s=false;t=true;b:while(t===true){t=false;j=this.A-this._;u=true;c:while(u===true){u=false;if(!(!c(this)?false:b(this,a.a_22,4)===0?false:!h(this)?false:true)){break c}break b}this._=this.A-j;v=true;c:while(v===true){v=false;if(!(!c(this)?false:b(this,a.a_20,32)===0?false:!h(this)?false:true)){break c}break b}this._=this.A-j;w=true;c:while(w===true){w=false;if(!(b(this,a.a_21,8)===0?false:!h(this)?false:true)){break c}break b}this._=this.A-j;if(!(!g(this,3,'ken')?false:!h(this)?false:true)){break d}}break a}this._=this.A-i;x=true;c:while(x===true){x=false;if(!(b(this,a.a_19,2)===0?false:true)){break c}y=true;b:while(y===true){y=false;d=this.A-this._;z=true;d:while(z===true){z=false;if(!(b(this,a.a_15,4)===0?false:true)){break d}break b}this._=this.A-d;A=true;d:while(A===true){A=false;if(!(!c(this)?false:b(this,a.a_16,2)===0?false:true)){break d}break b}this._=this.A-d;B=true;d:while(B===true){B=false;if(!(!c(this)?false:b(this,a.a_12,4)===0?false:!h(this)?false:true)){break d}break b}this._=this.A-d;C=true;d:while(C===true){C=false;if(!(!c(this)?false:b(this,a.a_13,4)===0?false:true)){break d}break b}this._=this.A-d;m=true;d:while(m===true){m=false;if(!(!c(this)?false:b(this,a.a_14,4)===0?false:!h(this)?false:true)){break d}break b}this._=this.A-d}if(!(!c(this)?false:b(this,a.a_22,4)===0?false:!h(this)?false:true)){break c}break a}this._=this.A-i;E=true;c:while(E===true){E=false;if(!(!c(this)?false:b(this,a.a_16,2)===0?false:true)){break c}this.B=this._;if(!e(this,'')){return false}Y=this.A-this._;F=true;d:while(F===true){F=false;this.C=this._;G=true;b:while(G===true){G=false;k=this.A-this._;H=true;e:while(H===true){H=false;if(!(!c(this)?false:b(this,a.a_18,8)===0?false:true)){break e}break b}this._=this.A-k;I=true;e:while(I===true){I=false;if(!(!c(this)?false:b(this,a.a_20,32)===0?false:!h(this)?false:true)){break e}break b}this._=this.A-k;J=true;e:while(J===true){J=false;if(!(b(this,a.a_21,8)===0?false:!h(this)?false:true)){break e}break b}this._=this.A-k;if(!(!c(this)?false:b(this,a.a_22,4)===0?false:!h(this)?false:true)){this._=this.A-Y;break d}}}this.B_continue_stemming_noun_suffixes=false;break a}this._=this.A-i;K=true;b:while(K===true){K=false;if(!(!c(this)?false:b(this,a.a_17,4)===0?false:true)){break b}L=true;c:while(L===true){L=false;X=this.A-this._;M=true;d:while(M===true){M=false;if(!(!c(this)?false:b(this,a.a_20,32)===0?false:!h(this)?false:true)){break d}break c}this._=this.A-X;if(!(b(this,a.a_21,8)===0?false:!h(this)?false:true)){break b}}break a}this._=this.A-i;N=true;c:while(N===true){N=false;O=true;b:while(O===true){O=false;l=this.A-this._;P=true;d:while(P===true){P=false;if(!(b(this,a.a_15,4)===0?false:true)){break d}break b}this._=this.A-l;Q=true;d:while(Q===true){Q=false;if(!(!c(this)?false:b(this,a.a_14,4)===0?false:!h(this)?false:true)){break d}break b}this._=this.A-l;R=true;d:while(R===true){R=false;if(!(!c(this)?false:b(this,a.a_13,4)===0?false:true)){break d}break b}this._=this.A-l;if(!(!c(this)?false:b(this,a.a_12,4)===0?false:!h(this)?false:true)){break c}}this.B=this._;if(!e(this,'')){return false}W=this.A-this._;S=true;b:while(S===true){S=false;this.C=this._;if(!(!c(this)?false:b(this,a.a_22,4)===0?false:!h(this)?false:true)){this._=this.A-W;break b}}break a}this._=this.A-i;if(!(!c(this)?false:b(this,a.a_18,8)===0?false:true)){return false}this.B=this._;if(!e(this,'')){return false}V=this.A-this._;T=true;d:while(T===true){T=false;this.C=this._;U=true;b:while(U===true){U=false;f=this.A-this._;p=true;c:while(p===true){p=false;if(!(b(this,a.a_15,4)===0?false:true)){break c}break b}this._=this.A-f;o=true;c:while(o===true){o=false;if(!(!c(this)?false:b(this,a.a_16,2)===0?false:true)){break c}break b}this._=this.A-f;D=true;c:while(D===true){D=false;if(!(!c(this)?false:b(this,a.a_12,4)===0?false:!h(this)?false:true)){break c}break b}this._=this.A-f;n=true;c:while(n===true){n=false;if(!(!c(this)?false:b(this,a.a_13,4)===0?false:true)){break c}break b}this._=this.A-f;q=true;c:while(q===true){q=false;if(!(!c(this)?false:b(this,a.a_14,4)===0?false:!h(this)?false:true)){break c}break b}this._=this.A-f}if(!(!c(this)?false:b(this,a.a_22,4)===0?false:!h(this)?false:true)){this._=this.A-V;break d}}}this.B=this._;return!e(this,'')?false:true};a.prototype.r_stem_nominal_verb_suffixes=a.prototype.y;function J(d){var f;var k;var i;var Z;var l;var Y;var m;var X;var W;var j;var s;var t;var u;var v;var w;var x;var y;var z;var A;var B;var C;var n;var E;var F;var G;var H;var I;var J;var K;var L;var M;var N;var O;var P;var Q;var R;var S;var T;var U;var V;var q;var p;var D;var o;var r;d.C=d._;d.B_continue_stemming_noun_suffixes=true;s=true;a:while(s===true){s=false;f=d.A-d._;t=true;d:while(t===true){t=false;u=true;b:while(u===true){u=false;k=d.A-d._;v=true;c:while(v===true){v=false;if(!(!c(d)?false:b(d,a.a_22,4)===0?false:!h(d)?false:true)){break c}break b}d._=d.A-k;w=true;c:while(w===true){w=false;if(!(!c(d)?false:b(d,a.a_20,32)===0?false:!h(d)?false:true)){break c}break b}d._=d.A-k;x=true;c:while(x===true){x=false;if(!(b(d,a.a_21,8)===0?false:!h(d)?false:true)){break c}break b}d._=d.A-k;if(!(!g(d,3,'ken')?false:!h(d)?false:true)){break d}}break a}d._=d.A-f;y=true;c:while(y===true){y=false;if(!(b(d,a.a_19,2)===0?false:true)){break c}z=true;b:while(z===true){z=false;i=d.A-d._;A=true;d:while(A===true){A=false;if(!(b(d,a.a_15,4)===0?false:true)){break d}break b}d._=d.A-i;B=true;d:while(B===true){B=false;if(!(!c(d)?false:b(d,a.a_16,2)===0?false:true)){break d}break b}d._=d.A-i;C=true;d:while(C===true){C=false;if(!(!c(d)?false:b(d,a.a_12,4)===0?false:!h(d)?false:true)){break d}break b}d._=d.A-i;n=true;d:while(n===true){n=false;if(!(!c(d)?false:b(d,a.a_13,4)===0?false:true)){break d}break b}d._=d.A-i;E=true;d:while(E===true){E=false;if(!(!c(d)?false:b(d,a.a_14,4)===0?false:!h(d)?false:true)){break d}break b}d._=d.A-i}if(!(!c(d)?false:b(d,a.a_22,4)===0?false:!h(d)?false:true)){break c}break a}d._=d.A-f;F=true;c:while(F===true){F=false;if(!(!c(d)?false:b(d,a.a_16,2)===0?false:true)){break c}d.B=d._;if(!e(d,'')){return false}Z=d.A-d._;G=true;d:while(G===true){G=false;d.C=d._;H=true;b:while(H===true){H=false;l=d.A-d._;I=true;e:while(I===true){I=false;if(!(!c(d)?false:b(d,a.a_18,8)===0?false:true)){break e}break b}d._=d.A-l;J=true;e:while(J===true){J=false;if(!(!c(d)?false:b(d,a.a_20,32)===0?false:!h(d)?false:true)){break e}break b}d._=d.A-l;K=true;e:while(K===true){K=false;if(!(b(d,a.a_21,8)===0?false:!h(d)?false:true)){break e}break b}d._=d.A-l;if(!(!c(d)?false:b(d,a.a_22,4)===0?false:!h(d)?false:true)){d._=d.A-Z;break d}}}d.B_continue_stemming_noun_suffixes=false;break a}d._=d.A-f;L=true;b:while(L===true){L=false;if(!(!c(d)?false:b(d,a.a_17,4)===0?false:true)){break b}M=true;c:while(M===true){M=false;Y=d.A-d._;N=true;d:while(N===true){N=false;if(!(!c(d)?false:b(d,a.a_20,32)===0?false:!h(d)?false:true)){break d}break c}d._=d.A-Y;if(!(b(d,a.a_21,8)===0?false:!h(d)?false:true)){break b}}break a}d._=d.A-f;O=true;c:while(O===true){O=false;P=true;b:while(P===true){P=false;m=d.A-d._;Q=true;d:while(Q===true){Q=false;if(!(b(d,a.a_15,4)===0?false:true)){break d}break b}d._=d.A-m;R=true;d:while(R===true){R=false;if(!(!c(d)?false:b(d,a.a_14,4)===0?false:!h(d)?false:true)){break d}break b}d._=d.A-m;S=true;d:while(S===true){S=false;if(!(!c(d)?false:b(d,a.a_13,4)===0?false:true)){break d}break b}d._=d.A-m;if(!(!c(d)?false:b(d,a.a_12,4)===0?false:!h(d)?false:true)){break c}}d.B=d._;if(!e(d,'')){return false}X=d.A-d._;T=true;b:while(T===true){T=false;d.C=d._;if(!(!c(d)?false:b(d,a.a_22,4)===0?false:!h(d)?false:true)){d._=d.A-X;break b}}break a}d._=d.A-f;if(!(!c(d)?false:b(d,a.a_18,8)===0?false:true)){return false}d.B=d._;if(!e(d,'')){return false}W=d.A-d._;U=true;d:while(U===true){U=false;d.C=d._;V=true;b:while(V===true){V=false;j=d.A-d._;q=true;c:while(q===true){q=false;if(!(b(d,a.a_15,4)===0?false:true)){break c}break b}d._=d.A-j;p=true;c:while(p===true){p=false;if(!(!c(d)?false:b(d,a.a_16,2)===0?false:true)){break c}break b}d._=d.A-j;D=true;c:while(D===true){D=false;if(!(!c(d)?false:b(d,a.a_12,4)===0?false:!h(d)?false:true)){break c}break b}d._=d.A-j;o=true;c:while(o===true){o=false;if(!(!c(d)?false:b(d,a.a_13,4)===0?false:true)){break c}break b}d._=d.A-j;r=true;c:while(r===true){r=false;if(!(!c(d)?false:b(d,a.a_14,4)===0?false:!h(d)?false:true)){break c}break b}d._=d.A-j}if(!(!c(d)?false:b(d,a.a_22,4)===0?false:!h(d)?false:true)){d._=d.A-W;break d}}}d.B=d._;return!e(d,'')?false:true};a.prototype.__=function(){var z;var N;var M;var L;var p;var K;var r;var J;var t;var u;var v;var w;var x;var y;var d;var A;var B;var C;var D;var E;var F;var G;var H;var I;var s;var q;var n;var m;var j;var h;this.C=this._;if(!(!g(this,2,'ki')?false:true)){return false}w=true;b:while(w===true){w=false;z=this.A-this._;x=true;c:while(x===true){x=false;if(!(!c(this)?false:b(this,a.a_6,4)===0?false:true)){break c}this.B=this._;if(!e(this,'')){return false}N=this.A-this._;y=true;f:while(y===true){y=false;this.C=this._;d=true;e:while(d===true){d=false;M=this.A-this._;A=true;d:while(A===true){A=false;if(!(!c(this)?false:b(this,a.a_16,2)===0?false:true)){break d}this.B=this._;if(!e(this,'')){return false}L=this.A-this._;B=true;a:while(B===true){B=false;if(!i(this)){this._=this.A-L;break a}}break e}this._=this.A-M;if(!(b(this,a.a_0,10)===0?false:!k(this)?false:true)){this._=this.A-N;break f}this.B=this._;if(!e(this,'')){return false}p=this.A-this._;C=true;a:while(C===true){C=false;this.C=this._;if(!(!c(this)?false:b(this,a.a_16,2)===0?false:true)){this._=this.A-p;break a}this.B=this._;if(!e(this,'')){return false}if(!i(this)){this._=this.A-p;break a}}}}break b}this._=this.A-z;D=true;d:while(D===true){D=false;if(!(!c(this)?false:b(this,a.a_3,4)===0?false:!o(this)?false:true)){break d}this.B=this._;if(!e(this,'')){return false}K=this.A-this._;E=true;e:while(E===true){E=false;this.C=this._;F=true;a:while(F===true){F=false;r=this.A-this._;G=true;c:while(G===true){G=false;if(!(b(this,a.a_1,2)===0?false:true)){break c}this.B=this._;if(!e(this,'')){return false}break a}this._=this.A-r;H=true;f:while(H===true){H=false;this.C=this._;I=true;g:while(I===true){I=false;J=this.A-this._;s=true;c:while(s===true){s=false;if(!(b(this,a.a_0,10)===0?false:!k(this)?false:true)){break c}break g}this._=this.A-J;if(!(!c(this)?false:!f(this,a.g_U,105,305)?false:!l(this)?false:true)){break f}}this.B=this._;if(!e(this,'')){return false}t=this.A-this._;q=true;c:while(q===true){q=false;this.C=this._;if(!(!c(this)?false:b(this,a.a_16,2)===0?false:true)){this._=this.A-t;break c}this.B=this._;if(!e(this,'')){return false}if(!i(this)){this._=this.A-t;break c}}break a}this._=this.A-r;if(!i(this)){this._=this.A-K;break e}}}break b}this._=this.A-z;if(!(!c(this)?false:b(this,a.a_7,2)===0?false:true)){return false}n=true;a:while(n===true){n=false;u=this.A-this._;m=true;c:while(m===true){m=false;if(!(b(this,a.a_1,2)===0?false:true)){break c}this.B=this._;if(!e(this,'')){return false}break a}this._=this.A-u;j=true;d:while(j===true){j=false;if(!(!c(this)?false:!f(this,a.g_U,105,305)?false:!l(this)?false:true)){break d}this.B=this._;if(!e(this,'')){return false}v=this.A-this._;h=true;c:while(h===true){h=false;this.C=this._;if(!(!c(this)?false:b(this,a.a_16,2)===0?false:true)){this._=this.A-v;break c}this.B=this._;if(!e(this,'')){return false}if(!i(this)){this._=this.A-v;break c}}break a}this._=this.A-u;if(!i(this)){return false}}}return true};a.prototype.r_stem_suffix_chain_before_ki=a.prototype.__;function i(d){var j;var O;var N;var M;var q;var L;var s;var K;var u;var v;var w;var x;var y;var z;var h;var B;var C;var D;var E;var F;var G;var H;var I;var J;var t;var r;var p;var n;var m;var A;d.C=d._;if(!(!g(d,2,'ki')?false:true)){return false}x=true;b:while(x===true){x=false;j=d.A-d._;y=true;c:while(y===true){y=false;if(!(!c(d)?false:b(d,a.a_6,4)===0?false:true)){break c}d.B=d._;if(!e(d,'')){return false}O=d.A-d._;z=true;f:while(z===true){z=false;d.C=d._;h=true;e:while(h===true){h=false;N=d.A-d._;B=true;d:while(B===true){B=false;if(!(!c(d)?false:b(d,a.a_16,2)===0?false:true)){break d}d.B=d._;if(!e(d,'')){return false}M=d.A-d._;C=true;a:while(C===true){C=false;if(!i(d)){d._=d.A-M;break a}}break e}d._=d.A-N;if(!(b(d,a.a_0,10)===0?false:!k(d)?false:true)){d._=d.A-O;break f}d.B=d._;if(!e(d,'')){return false}q=d.A-d._;D=true;a:while(D===true){D=false;d.C=d._;if(!(!c(d)?false:b(d,a.a_16,2)===0?false:true)){d._=d.A-q;break a}d.B=d._;if(!e(d,'')){return false}if(!i(d)){d._=d.A-q;break a}}}}break b}d._=d.A-j;E=true;d:while(E===true){E=false;if(!(!c(d)?false:b(d,a.a_3,4)===0?false:!o(d)?false:true)){break d}d.B=d._;if(!e(d,'')){return false}L=d.A-d._;F=true;e:while(F===true){F=false;d.C=d._;G=true;a:while(G===true){G=false;s=d.A-d._;H=true;c:while(H===true){H=false;if(!(b(d,a.a_1,2)===0?false:true)){break c}d.B=d._;if(!e(d,'')){return false}break a}d._=d.A-s;I=true;f:while(I===true){I=false;d.C=d._;J=true;g:while(J===true){J=false;K=d.A-d._;t=true;c:while(t===true){t=false;if(!(b(d,a.a_0,10)===0?false:!k(d)?false:true)){break c}break g}d._=d.A-K;if(!(!c(d)?false:!f(d,a.g_U,105,305)?false:!l(d)?false:true)){break f}}d.B=d._;if(!e(d,'')){return false}u=d.A-d._;r=true;c:while(r===true){r=false;d.C=d._;if(!(!c(d)?false:b(d,a.a_16,2)===0?false:true)){d._=d.A-u;break c}d.B=d._;if(!e(d,'')){return false}if(!i(d)){d._=d.A-u;break c}}break a}d._=d.A-s;if(!i(d)){d._=d.A-L;break e}}}break b}d._=d.A-j;if(!(!c(d)?false:b(d,a.a_7,2)===0?false:true)){return false}p=true;a:while(p===true){p=false;v=d.A-d._;n=true;c:while(n===true){n=false;if(!(b(d,a.a_1,2)===0?false:true)){break c}d.B=d._;if(!e(d,'')){return false}break a}d._=d.A-v;m=true;d:while(m===true){m=false;if(!(!c(d)?false:!f(d,a.g_U,105,305)?false:!l(d)?false:true)){break d}d.B=d._;if(!e(d,'')){return false}w=d.A-d._;A=true;c:while(A===true){A=false;d.C=d._;if(!(!c(d)?false:b(d,a.a_16,2)===0?false:true)){d._=d.A-w;break c}d.B=d._;if(!e(d,'')){return false}if(!i(d)){d._=d.A-w;break c}}break a}d._=d.A-v;if(!i(d)){return false}}}return true};a.prototype.z=function(){var d;var ar;var S;var j;var av;var m;var aq;var n;var p;var ax;var ay;var q;var ap;var r;var s;var as;var at;var au;var t;var aw;var u;var v;var w;var aA;var aB;var ao;var x;var y;var z;var A;var B;var C;var D;var E;var F;var G;var H;var I;var J;var K;var L;var M;var N;var O;var P;var Q;var R;var g;var T;var U;var V;var W;var X;var Y;var Z;var _;var $;var a0;var a1;var a2;var a3;var a4;var a5;var a6;var a7;var a8;var a9;var aa;var ab;var ac;var ad;var ae;var af;var ag;var ah;var ai;var aj;var ak;var al;var am;var an;var aC;var az;y=true;a:while(y===true){y=false;d=this.A-this._;z=true;b:while(z===true){z=false;this.C=this._;if(!(!c(this)?false:b(this,a.a_16,2)===0?false:true)){break b}this.B=this._;if(!e(this,'')){return false}ar=this.A-this._;A=true;c:while(A===true){A=false;if(!i(this)){this._=this.A-ar;break c}}break a}this._=this.A-d;B=true;g:while(B===true){B=false;this.C=this._;if(!(!c(this)?false:b(this,a.a_11,2)===0?false:!o(this)?false:true)){break g}this.B=this._;if(!e(this,'')){return false}S=this.A-this._;C=true;b:while(C===true){C=false;D=true;c:while(D===true){D=false;j=this.A-this._;E=true;d:while(E===true){E=false;this.C=this._;if(!(b(this,a.a_1,2)===0?false:true)){break d}this.B=this._;if(!e(this,'')){return false}break c}this._=this.A-j;F=true;f:while(F===true){F=false;this.C=this._;G=true;d:while(G===true){G=false;av=this.A-this._;H=true;e:while(H===true){H=false;if(!(b(this,a.a_0,10)===0?false:!k(this)?false:true)){break e}break d}this._=this.A-av;if(!(!c(this)?false:!f(this,a.g_U,105,305)?false:!l(this)?false:true)){break f}}this.B=this._;if(!e(this,'')){return false}m=this.A-this._;I=true;d:while(I===true){I=false;this.C=this._;if(!(!c(this)?false:b(this,a.a_16,2)===0?false:true)){this._=this.A-m;break d}this.B=this._;if(!e(this,'')){return false}if(!i(this)){this._=this.A-m;break d}}break c}aC=this._=this.A-j;this.C=aC;if(!(!c(this)?false:b(this,a.a_16,2)===0?false:true)){this._=this.A-S;break b}this.B=this._;if(!e(this,'')){return false}if(!i(this)){this._=this.A-S;break b}}}break a}this._=this.A-d;J=true;b:while(J===true){J=false;this.C=this._;K=true;d:while(K===true){K=false;aq=this.A-this._;L=true;c:while(L===true){L=false;if(!(!c(this)?false:b(this,a.a_7,2)===0?false:true)){break c}break d}this._=this.A-aq;if(!(!c(this)?false:b(this,a.a_5,2)===0?false:true)){break b}}M=true;c:while(M===true){M=false;n=this.A-this._;N=true;d:while(N===true){N=false;if(!(b(this,a.a_1,2)===0?false:true)){break d}this.B=this._;if(!e(this,'')){return false}break c}this._=this.A-n;O=true;e:while(O===true){O=false;if(!(!c(this)?false:!f(this,a.g_U,105,305)?false:!l(this)?false:true)){break e}this.B=this._;if(!e(this,'')){return false}p=this.A-this._;P=true;d:while(P===true){P=false;this.C=this._;if(!(!c(this)?false:b(this,a.a_16,2)===0?false:true)){this._=this.A-p;break d}this.B=this._;if(!e(this,'')){return false}if(!i(this)){this._=this.A-p;break d}}break c}this._=this.A-n;if(!i(this)){break b}}break a}this._=this.A-d;Q=true;c:while(Q===true){Q=false;this.C=this._;R=true;b:while(R===true){R=false;ax=this.A-this._;g=true;d:while(g===true){g=false;if(!(!c(this)?false:b(this,a.a_9,2)===0?false:true)){break d}break b}this._=this.A-ax;if(!(!c(this)?false:b(this,a.a_2,4)===0?false:true)){break c}}T=true;d:while(T===true){T=false;ay=this.A-this._;U=true;e:while(U===true){U=false;if(!(!c(this)?false:!f(this,a.g_U,105,305)?false:!l(this)?false:true)){break e}this.B=this._;if(!e(this,'')){return false}q=this.A-this._;V=true;b:while(V===true){V=false;this.C=this._;if(!(!c(this)?false:b(this,a.a_16,2)===0?false:true)){this._=this.A-q;break b}this.B=this._;if(!e(this,'')){return false}if(!i(this)){this._=this.A-q;break b}}break d}this._=this.A-ay;if(!(b(this,a.a_1,2)===0?false:true)){break c}}break a}this._=this.A-d;W=true;d:while(W===true){W=false;this.C=this._;if(!(!c(this)?false:b(this,a.a_8,4)===0?false:true)){break d}this.B=this._;if(!e(this,'')){return false}ap=this.A-this._;X=true;e:while(X===true){X=false;this.C=this._;Y=true;c:while(Y===true){Y=false;r=this.A-this._;Z=true;f:while(Z===true){Z=false;if(!(b(this,a.a_0,10)===0?false:!k(this)?false:true)){break f}this.B=this._;if(!e(this,'')){return false}s=this.A-this._;_=true;b:while(_===true){_=false;this.C=this._;if(!(!c(this)?false:b(this,a.a_16,2)===0?false:true)){this._=this.A-s;break b}this.B=this._;if(!e(this,'')){return false}if(!i(this)){this._=this.A-s;break b}}break c}this._=this.A-r;$=true;b:while($===true){$=false;if(!(!c(this)?false:b(this,a.a_16,2)===0?false:true)){break b}this.B=this._;if(!e(this,'')){return false}as=this.A-this._;a0=true;f:while(a0===true){a0=false;if(!i(this)){this._=this.A-as;break f}}break c}this._=this.A-r;if(!i(this)){this._=this.A-ap;break e}}}break a}this._=this.A-d;a1=true;d:while(a1===true){a1=false;this.C=this._;a2=true;b:while(a2===true){a2=false;at=this.A-this._;a3=true;c:while(a3===true){a3=false;if(!(!c(this)?false:b(this,a.a_3,4)===0?false:!o(this)?false:true)){break c}break b}this._=this.A-at;if(!(!c(this)?false:b(this,a.a_10,2)===0?false:!h(this)?false:true)){break d}}this.B=this._;if(!e(this,'')){return false}au=this.A-this._;a4=true;e:while(a4===true){a4=false;a5=true;c:while(a5===true){a5=false;t=this.A-this._;a6=true;b:while(a6===true){a6=false;this.C=this._;if(!(!c(this)?false:b(this,a.a_16,2)===0?false:true)){break b}this.B=this._;if(!e(this,'')){return false}if(!i(this)){break b}break c}this._=this.A-t;a7=true;f:while(a7===true){a7=false;this.C=this._;a8=true;b:while(a8===true){a8=false;aw=this.A-this._;a9=true;g:while(a9===true){a9=false;if(!(b(this,a.a_0,10)===0?false:!k(this)?false:true)){break g}break b}this._=this.A-aw;if(!(!c(this)?false:!f(this,a.g_U,105,305)?false:!l(this)?false:true)){break f}}this.B=this._;if(!e(this,'')){return false}u=this.A-this._;aa=true;b:while(aa===true){aa=false;this.C=this._;if(!(!c(this)?false:b(this,a.a_16,2)===0?false:true)){this._=this.A-u;break b}this.B=this._;if(!e(this,'')){return false}if(!i(this)){this._=this.A-u;break b}}break c}this._=this.A-t;if(!i(this)){this._=this.A-au;break e}}}break a}this._=this.A-d;ab=true;b:while(ab===true){ab=false;this.C=this._;if(!(b(this,a.a_1,2)===0?false:true)){break b}this.B=this._;if(!e(this,'')){return false}break a}this._=this.A-d;ac=true;b:while(ac===true){ac=false;if(!i(this)){break b}break a}this._=this.A-d;ad=true;c:while(ad===true){ad=false;this.C=this._;ae=true;b:while(ae===true){ae=false;v=this.A-this._;af=true;d:while(af===true){af=false;if(!(!c(this)?false:b(this,a.a_6,4)===0?false:true)){break d}break b}this._=this.A-v;ag=true;d:while(ag===true){ag=false;if(!(!c(this)?false:!f(this,a.g_U,105,305)?false:!h(this)?false:true)){break d}break b}this._=this.A-v;if(!(!c(this)?false:b(this,a.a_4,2)===0?false:!h(this)?false:true)){break c}}this.B=this._;if(!e(this,'')){return false}w=this.A-this._;ah=true;b:while(ah===true){ah=false;this.C=this._;ai=true;d:while(ai===true){ai=false;aA=this.A-this._;aj=true;e:while(aj===true){aj=false;if(!(b(this,a.a_0,10)===0?false:!k(this)?false:true)){break e}this.B=this._;if(!e(this,'')){return false}aB=this.A-this._;ak=true;f:while(ak===true){ak=false;this.C=this._;if(!(!c(this)?false:b(this,a.a_16,2)===0?false:true)){this._=this.A-aB;break f}}break d}this._=this.A-aA;if(!(!c(this)?false:b(this,a.a_16,2)===0?false:true)){this._=this.A-w;break b}}this.B=this._;if(!e(this,'')){return false}this.C=this._;if(!i(this)){this._=this.A-w;break b}}break a}az=this._=this.A-d;this.C=az;al=true;b:while(al===true){al=false;ao=this.A-this._;am=true;c:while(am===true){am=false;if(!(b(this,a.a_0,10)===0?false:!k(this)?false:true)){break c}break b}this._=this.A-ao;if(!(!c(this)?false:!f(this,a.g_U,105,305)?false:!l(this)?false:true)){return false}}this.B=this._;if(!e(this,'')){return false}x=this.A-this._;an=true;b:while(an===true){an=false;this.C=this._;if(!(!c(this)?false:b(this,a.a_16,2)===0?false:true)){this._=this.A-x;break b}this.B=this._;if(!e(this,'')){return false}if(!i(this)){this._=this.A-x;break b}}}return true};a.prototype.r_stem_noun_suffixes=a.prototype.z;function L(d){var g;var as;var S;var m;var aw;var n;var ar;var p;var q;var ay;var az;var r;var aq;var s;var t;var at;var au;var av;var u;var ax;var v;var w;var x;var aB;var aC;var ap;var y;var z;var A;var B;var C;var D;var E;var F;var G;var H;var I;var J;var K;var L;var M;var N;var O;var P;var Q;var R;var j;var T;var U;var V;var W;var X;var Y;var Z;var _;var $;var a0;var a1;var a2;var a3;var a4;var a5;var a6;var a7;var a8;var a9;var aa;var ab;var ac;var ad;var ae;var af;var ag;var ah;var ai;var aj;var ak;var al;var am;var an;var ao;var aD;var aA;z=true;a:while(z===true){z=false;g=d.A-d._;A=true;b:while(A===true){A=false;d.C=d._;if(!(!c(d)?false:b(d,a.a_16,2)===0?false:true)){break b}d.B=d._;if(!e(d,'')){return false}as=d.A-d._;B=true;c:while(B===true){B=false;if(!i(d)){d._=d.A-as;break c}}break a}d._=d.A-g;C=true;g:while(C===true){C=false;d.C=d._;if(!(!c(d)?false:b(d,a.a_11,2)===0?false:!o(d)?false:true)){break g}d.B=d._;if(!e(d,'')){return false}S=d.A-d._;D=true;b:while(D===true){D=false;E=true;c:while(E===true){E=false;m=d.A-d._;F=true;d:while(F===true){F=false;d.C=d._;if(!(b(d,a.a_1,2)===0?false:true)){break d}d.B=d._;if(!e(d,'')){return false}break c}d._=d.A-m;G=true;f:while(G===true){G=false;d.C=d._;H=true;d:while(H===true){H=false;aw=d.A-d._;I=true;e:while(I===true){I=false;if(!(b(d,a.a_0,10)===0?false:!k(d)?false:true)){break e}break d}d._=d.A-aw;if(!(!c(d)?false:!f(d,a.g_U,105,305)?false:!l(d)?false:true)){break f}}d.B=d._;if(!e(d,'')){return false}n=d.A-d._;J=true;d:while(J===true){J=false;d.C=d._;if(!(!c(d)?false:b(d,a.a_16,2)===0?false:true)){d._=d.A-n;break d}d.B=d._;if(!e(d,'')){return false}if(!i(d)){d._=d.A-n;break d}}break c}aD=d._=d.A-m;d.C=aD;if(!(!c(d)?false:b(d,a.a_16,2)===0?false:true)){d._=d.A-S;break b}d.B=d._;if(!e(d,'')){return false}if(!i(d)){d._=d.A-S;break b}}}break a}d._=d.A-g;K=true;b:while(K===true){K=false;d.C=d._;L=true;d:while(L===true){L=false;ar=d.A-d._;M=true;c:while(M===true){M=false;if(!(!c(d)?false:b(d,a.a_7,2)===0?false:true)){break c}break d}d._=d.A-ar;if(!(!c(d)?false:b(d,a.a_5,2)===0?false:true)){break b}}N=true;c:while(N===true){N=false;p=d.A-d._;O=true;d:while(O===true){O=false;if(!(b(d,a.a_1,2)===0?false:true)){break d}d.B=d._;if(!e(d,'')){return false}break c}d._=d.A-p;P=true;e:while(P===true){P=false;if(!(!c(d)?false:!f(d,a.g_U,105,305)?false:!l(d)?false:true)){break e}d.B=d._;if(!e(d,'')){return false}q=d.A-d._;Q=true;d:while(Q===true){Q=false;d.C=d._;if(!(!c(d)?false:b(d,a.a_16,2)===0?false:true)){d._=d.A-q;break d}d.B=d._;if(!e(d,'')){return false}if(!i(d)){d._=d.A-q;break d}}break c}d._=d.A-p;if(!i(d)){break b}}break a}d._=d.A-g;R=true;c:while(R===true){R=false;d.C=d._;j=true;b:while(j===true){j=false;ay=d.A-d._;T=true;d:while(T===true){T=false;if(!(!c(d)?false:b(d,a.a_9,2)===0?false:true)){break d}break b}d._=d.A-ay;if(!(!c(d)?false:b(d,a.a_2,4)===0?false:true)){break c}}U=true;d:while(U===true){U=false;az=d.A-d._;V=true;e:while(V===true){V=false;if(!(!c(d)?false:!f(d,a.g_U,105,305)?false:!l(d)?false:true)){break e}d.B=d._;if(!e(d,'')){return false}r=d.A-d._;W=true;b:while(W===true){W=false;d.C=d._;if(!(!c(d)?false:b(d,a.a_16,2)===0?false:true)){d._=d.A-r;break b}d.B=d._;if(!e(d,'')){return false}if(!i(d)){d._=d.A-r;break b}}break d}d._=d.A-az;if(!(b(d,a.a_1,2)===0?false:true)){break c}}break a}d._=d.A-g;X=true;d:while(X===true){X=false;d.C=d._;if(!(!c(d)?false:b(d,a.a_8,4)===0?false:true)){break d}d.B=d._;if(!e(d,'')){return false}aq=d.A-d._;Y=true;e:while(Y===true){Y=false;d.C=d._;Z=true;c:while(Z===true){Z=false;s=d.A-d._;_=true;f:while(_===true){_=false;if(!(b(d,a.a_0,10)===0?false:!k(d)?false:true)){break f}d.B=d._;if(!e(d,'')){return false}t=d.A-d._;$=true;b:while($===true){$=false;d.C=d._;if(!(!c(d)?false:b(d,a.a_16,2)===0?false:true)){d._=d.A-t;break b}d.B=d._;if(!e(d,'')){return false}if(!i(d)){d._=d.A-t;break b}}break c}d._=d.A-s;a0=true;b:while(a0===true){a0=false;if(!(!c(d)?false:b(d,a.a_16,2)===0?false:true)){break b}d.B=d._;if(!e(d,'')){return false}at=d.A-d._;a1=true;f:while(a1===true){a1=false;if(!i(d)){d._=d.A-at;break f}}break c}d._=d.A-s;if(!i(d)){d._=d.A-aq;break e}}}break a}d._=d.A-g;a2=true;d:while(a2===true){a2=false;d.C=d._;a3=true;b:while(a3===true){a3=false;au=d.A-d._;a4=true;c:while(a4===true){a4=false;if(!(!c(d)?false:b(d,a.a_3,4)===0?false:!o(d)?false:true)){break c}break b}d._=d.A-au;if(!(!c(d)?false:b(d,a.a_10,2)===0?false:!h(d)?false:true)){break d}}d.B=d._;if(!e(d,'')){return false}av=d.A-d._;a5=true;e:while(a5===true){a5=false;a6=true;c:while(a6===true){a6=false;u=d.A-d._;a7=true;b:while(a7===true){a7=false;d.C=d._;if(!(!c(d)?false:b(d,a.a_16,2)===0?false:true)){break b}d.B=d._;if(!e(d,'')){return false}if(!i(d)){break b}break c}d._=d.A-u;a8=true;f:while(a8===true){a8=false;d.C=d._;a9=true;b:while(a9===true){a9=false;ax=d.A-d._;aa=true;g:while(aa===true){aa=false;if(!(b(d,a.a_0,10)===0?false:!k(d)?false:true)){break g}break b}d._=d.A-ax;if(!(!c(d)?false:!f(d,a.g_U,105,305)?false:!l(d)?false:true)){break f}}d.B=d._;if(!e(d,'')){return false}v=d.A-d._;ab=true;b:while(ab===true){ab=false;d.C=d._;if(!(!c(d)?false:b(d,a.a_16,2)===0?false:true)){d._=d.A-v;break b}d.B=d._;if(!e(d,'')){return false}if(!i(d)){d._=d.A-v;break b}}break c}d._=d.A-u;if(!i(d)){d._=d.A-av;break e}}}break a}d._=d.A-g;ac=true;b:while(ac===true){ac=false;d.C=d._;if(!(b(d,a.a_1,2)===0?false:true)){break b}d.B=d._;if(!e(d,'')){return false}break a}d._=d.A-g;ad=true;b:while(ad===true){ad=false;if(!i(d)){break b}break a}d._=d.A-g;ae=true;c:while(ae===true){ae=false;d.C=d._;af=true;b:while(af===true){af=false;w=d.A-d._;ag=true;d:while(ag===true){ag=false;if(!(!c(d)?false:b(d,a.a_6,4)===0?false:true)){break d}break b}d._=d.A-w;ah=true;d:while(ah===true){ah=false;if(!(!c(d)?false:!f(d,a.g_U,105,305)?false:!h(d)?false:true)){break d}break b}d._=d.A-w;if(!(!c(d)?false:b(d,a.a_4,2)===0?false:!h(d)?false:true)){break c}}d.B=d._;if(!e(d,'')){return false}x=d.A-d._;ai=true;b:while(ai===true){ai=false;d.C=d._;aj=true;d:while(aj===true){aj=false;aB=d.A-d._;ak=true;e:while(ak===true){ak=false;if(!(b(d,a.a_0,10)===0?false:!k(d)?false:true)){break e}d.B=d._;if(!e(d,'')){return false}aC=d.A-d._;al=true;f:while(al===true){al=false;d.C=d._;if(!(!c(d)?false:b(d,a.a_16,2)===0?false:true)){d._=d.A-aC;break f}}break d}d._=d.A-aB;if(!(!c(d)?false:b(d,a.a_16,2)===0?false:true)){d._=d.A-x;break b}}d.B=d._;if(!e(d,'')){return false}d.C=d._;if(!i(d)){d._=d.A-x;break b}}break a}aA=d._=d.A-g;d.C=aA;am=true;b:while(am===true){am=false;ap=d.A-d._;an=true;c:while(an===true){an=false;if(!(b(d,a.a_0,10)===0?false:!k(d)?false:true)){break c}break b}d._=d.A-ap;if(!(!c(d)?false:!f(d,a.g_U,105,305)?false:!l(d)?false:true)){return false}}d.B=d._;if(!e(d,'')){return false}y=d.A-d._;ao=true;b:while(ao===true){ao=false;d.C=d._;if(!(!c(d)?false:b(d,a.a_16,2)===0?false:true)){d._=d.A-y;break b}d.B=d._;if(!e(d,'')){return false}if(!i(d)){d._=d.A-y;break b}}}return true};a.prototype.w=function(){var c;this.C=this._;c=b(this,a.a_23,4);if(c===0){return false}this.B=this._;switch(c){case 0:return false;case 1:if(!e(this,'p')){return false}break;case 2:if(!e(this,'ç')){return false}break;case 3:if(!e(this,'t')){return false}break;case 4:if(!e(this,'k')){return false}break}return true};a.prototype.r_post_process_last_consonants=a.prototype.w;function w(c){var d;c.C=c._;d=b(c,a.a_23,4);if(d===0){return false}c.B=c._;switch(d){case 0:return false;case 1:if(!e(c,'p')){return false}break;case 2:if(!e(c,'ç')){return false}break;case 3:if(!e(c,'t')){return false}break;case 4:if(!e(c,'k')){return false}break}return true};a.prototype.N=function(){var L;var _;var i;var Y;var B;var W;var K;var l;var S;var Q;var p;var O;var M;var s;var U;var u;var v;var w;var x;var y;var z;var A;var b;var C;var D;var j;var F;var G;var H;var I;var J;var E;var t;var r;var N;var q;var P;var o;var R;var m;var T;var k;var V;var h;var X;var e;var Z;var d;var $;var a0;var a1;var c;L=this.A-this._;u=true;a:while(u===true){u=false;_=this.A-this._;v=true;b:while(v===true){v=false;if(!g(this,1,'d')){break b}break a}this._=this.A-_;if(!g(this,1,'g')){return false}}this._=this.A-L;w=true;a:while(w===true){w=false;i=this.A-this._;x=true;b:while(x===true){x=false;Y=this.A-this._;d:while(true){B=this.A-this._;y=true;c:while(y===true){y=false;if(!f(this,a.g_vowel,97,305)){break c}this._=this.A-B;break d}V=this._=this.A-B;if(V<=this.D){break b}this._--}z=true;c:while(z===true){z=false;W=this.A-this._;A=true;d:while(A===true){A=false;if(!g(this,1,'a')){break d}break c}this._=this.A-W;if(!g(this,1,'ı')){break b}}h=this._=this.A-Y;b=h;N=h;q=n(this,h,h,'ı');if(h<=this.B){this.B+=q|0}if(N<=this.C){this.C+=q|0}this._=b;break a}this._=this.A-i;C=true;b:while(C===true){C=false;K=this.A-this._;c:while(true){l=this.A-this._;D=true;d:while(D===true){D=false;if(!f(this,a.g_vowel,97,305)){break d}this._=this.A-l;break c}X=this._=this.A-l;if(X<=this.D){break b}this._--}j=true;c:while(j===true){j=false;S=this.A-this._;F=true;d:while(F===true){F=false;if(!g(this,1,'e')){break d}break c}this._=this.A-S;if(!g(this,1,'i')){break b}}e=this._=this.A-K;b=e;P=e;o=n(this,e,e,'i');if(e<=this.B){this.B+=o|0}if(P<=this.C){this.C+=o|0}this._=b;break a}this._=this.A-i;G=true;b:while(G===true){G=false;Q=this.A-this._;c:while(true){p=this.A-this._;H=true;d:while(H===true){H=false;if(!f(this,a.g_vowel,97,305)){break d}this._=this.A-p;break c}Z=this._=this.A-p;if(Z<=this.D){break b}this._--}I=true;c:while(I===true){I=false;O=this.A-this._;J=true;d:while(J===true){J=false;if(!g(this,1,'o')){break d}break c}this._=this.A-O;if(!g(this,1,'u')){break b}}d=this._=this.A-Q;b=d;R=d;m=n(this,d,d,'u');if(d<=this.B){this.B+=m|0}if(R<=this.C){this.C+=m|0}this._=b;break a}a1=this._=(a0=this.A)-i;M=a0-a1;b:while(true){s=this.A-this._;E=true;c:while(E===true){E=false;if(!f(this,a.g_vowel,97,305)){break c}this._=this.A-s;break b}$=this._=this.A-s;if($<=this.D){return false}this._--}t=true;b:while(t===true){t=false;U=this.A-this._;r=true;c:while(r===true){r=false;if(!g(this,1,'ö')){break c}break b}this._=this.A-U;if(!g(this,1,'ü')){return false}}c=this._=this.A-M;b=c;T=c;k=n(this,c,c,'ü');if(c<=this.B){this.B+=k|0}if(T<=this.C){this.C+=k|0}this._=b}return true};a.prototype.r_append_U_to_stems_ending_with_d_or_g=a.prototype.N;function z(b){var $;var Z;var j;var X;var F;var L;var T;var m;var R;var P;var q;var N;var V;var t;var M;var v;var w;var x;var y;var z;var A;var B;var c;var D;var E;var C;var G;var H;var I;var J;var K;var u;var s;var r;var O;var p;var Q;var o;var S;var l;var U;var k;var W;var i;var Y;var h;var _;var e;var a0;var a1;var a2;var d;$=b.A-b._;v=true;a:while(v===true){v=false;Z=b.A-b._;w=true;b:while(w===true){w=false;if(!g(b,1,'d')){break b}break a}b._=b.A-Z;if(!g(b,1,'g')){return false}}b._=b.A-$;x=true;a:while(x===true){x=false;j=b.A-b._;y=true;b:while(y===true){y=false;X=b.A-b._;d:while(true){F=b.A-b._;z=true;c:while(z===true){z=false;if(!f(b,a.g_vowel,97,305)){break c}b._=b.A-F;break d}W=b._=b.A-F;if(W<=b.D){break b}b._--}A=true;c:while(A===true){A=false;L=b.A-b._;B=true;d:while(B===true){B=false;if(!g(b,1,'a')){break d}break c}b._=b.A-L;if(!g(b,1,'ı')){break b}}i=b._=b.A-X;c=i;O=i;p=n(b,i,i,'ı');if(i<=b.B){b.B+=p|0}if(O<=b.C){b.C+=p|0}b._=c;break a}b._=b.A-j;D=true;b:while(D===true){D=false;T=b.A-b._;c:while(true){m=b.A-b._;E=true;d:while(E===true){E=false;if(!f(b,a.g_vowel,97,305)){break d}b._=b.A-m;break c}Y=b._=b.A-m;if(Y<=b.D){break b}b._--}C=true;c:while(C===true){C=false;R=b.A-b._;G=true;d:while(G===true){G=false;if(!g(b,1,'e')){break d}break c}b._=b.A-R;if(!g(b,1,'i')){break b}}h=b._=b.A-T;c=h;Q=h;o=n(b,h,h,'i');if(h<=b.B){b.B+=o|0}if(Q<=b.C){b.C+=o|0}b._=c;break a}b._=b.A-j;H=true;b:while(H===true){H=false;P=b.A-b._;c:while(true){q=b.A-b._;I=true;d:while(I===true){I=false;if(!f(b,a.g_vowel,97,305)){break d}b._=b.A-q;break c}_=b._=b.A-q;if(_<=b.D){break b}b._--}J=true;c:while(J===true){J=false;N=b.A-b._;K=true;d:while(K===true){K=false;if(!g(b,1,'o')){break d}break c}b._=b.A-N;if(!g(b,1,'u')){break b}}e=b._=b.A-P;c=e;S=e;l=n(b,e,e,'u');if(e<=b.B){b.B+=l|0}if(S<=b.C){b.C+=l|0}b._=c;break a}a2=b._=(a1=b.A)-j;V=a1-a2;b:while(true){t=b.A-b._;u=true;c:while(u===true){u=false;if(!f(b,a.g_vowel,97,305)){break c}b._=b.A-t;break b}a0=b._=b.A-t;if(a0<=b.D){return false}b._--}s=true;b:while(s===true){s=false;M=b.A-b._;r=true;c:while(r===true){r=false;if(!g(b,1,'ö')){break c}break b}b._=b.A-M;if(!g(b,1,'ü')){return false}}d=b._=b.A-V;c=d;U=d;k=n(b,d,d,'ü');if(d<=b.B){b.B+=k|0}if(U<=b.C){b.C+=k|0}b._=c}return true};a.prototype.v=function(){var e;var f;var b;var c;var d;e=this._;b=2;a:while(true){f=this._;c=true;b:while(c===true){c=false;c:while(true){d=true;d:while(d===true){d=false;if(!v(this,a.g_vowel,97,305)){break d}break c}if(this._>=this.A){break b}this._++}b--;continue a}this._=f;break a}if(b>0){return false}this._=e;return true};a.prototype.r_more_than_one_syllable_word=a.prototype.v;function N(b){var f;var g;var c;var d;var e;f=b._;c=2;a:while(true){g=b._;d=true;b:while(d===true){d=false;c:while(true){e=true;d:while(e===true){e=false;if(!v(b,a.g_vowel,97,305)){break d}break c}if(b._>=b.A){break b}b._++}c--;continue a}b._=g;break a}if(c>0){return false}b._=f;return true};a.prototype.P=function(){var f;var g;var h;var b;var a;var c;var d;var i;var j;var e;b=true;b:while(b===true){b=false;f=this._;a=true;a:while(a===true){a=false;g=this._;c:while(true){c=true;d:while(c===true){c=false;if(!s(this,2,'ad')){break d}break c}if(this._>=this.A){break a}this._++}i=this.I_strlen=2;if(!(i===this.A)){break a}this._=g;break b}j=this._=f;h=j;a:while(true){d=true;c:while(d===true){d=false;if(!s(this,5,'soyad')){break c}break a}if(this._>=this.A){return false}this._++}e=this.I_strlen=5;if(!(e===this.A)){return false}this._=h}return true};a.prototype.r_is_reserved_word=a.prototype.P;function x(a){var g;var h;var i;var c;var b;var d;var e;var j;var k;var f;c=true;b:while(c===true){c=false;g=a._;b=true;a:while(b===true){b=false;h=a._;c:while(true){d=true;d:while(d===true){d=false;if(!s(a,2,'ad')){break d}break c}if(a._>=a.A){break a}a._++}j=a.I_strlen=2;if(!(j===a.A)){break a}a._=h;break b}k=a._=g;i=k;a:while(true){e=true;c:while(e===true){e=false;if(!s(a,5,'soyad')){break c}break a}if(a._>=a.A){return false}a._++}f=a.I_strlen=5;if(!(f===a.A)){return false}a._=i}return true};a.prototype.x=function(){var d;var e;var a;var b;var c;var f;var g;var h;d=this._;a=true;a:while(a===true){a=false;if(!x(this)){break a}return false}f=this._=d;this.D=f;h=this._=g=this.A;e=g-h;b=true;a:while(b===true){b=false;if(!z(this)){break a}}this._=this.A-e;c=true;a:while(c===true){c=false;if(!w(this)){break a}}this._=this.D;return true};a.prototype.r_postlude=a.prototype.x;function O(a){var e;var f;var b;var c;var d;var g;var h;var i;e=a._;b=true;a:while(b===true){b=false;if(!x(a)){break a}return false}g=a._=e;a.D=g;i=a._=h=a.A;f=h-i;c=true;a:while(c===true){c=false;if(!z(a)){break a}}a._=a.A-f;d=true;a:while(d===true){d=false;if(!w(a)){break a}}a._=a.D;return true};a.prototype.H=function(){var c;var a;var b;var d;var e;if(!N(this)){return false}this.D=this._;e=this._=d=this.A;c=d-e;a=true;a:while(a===true){a=false;if(!J(this)){break a}}this._=this.A-c;if(!this.B_continue_stemming_noun_suffixes){return false}b=true;a:while(b===true){b=false;if(!L(this)){break a}}this._=this.D;return!O(this)?false:true};a.prototype.stem=a.prototype.H;a.prototype.L=function(b){return b instanceof a};a.prototype.equals=a.prototype.L;a.prototype.M=function(){var c;var a;var b;var d;c='TurkishStemmer';a=0;for(b=0;b<c.length;b++){d=c.charCodeAt(b);a=(a<<5)-a+d;a=a&a}return a|0};a.prototype.hashCode=a.prototype.M;a.serialVersionUID=1;j(a,'methodObject',function(){return new a});j(a,'a_0',function(){return[new d('m',-1,-1),new d('n',-1,-1),new d('miz',-1,-1),new d('niz',-1,-1),new d('muz',-1,-1),new d('nuz',-1,-1),new d('müz',-1,-1),new d('nüz',-1,-1),new d('mız',-1,-1),new d('nız',-1,-1)]});j(a,'a_1',function(){return[new d('leri',-1,-1),new d('ları',-1,-1)]});j(a,'a_2',function(){return[new d('ni',-1,-1),new d('nu',-1,-1),new d('nü',-1,-1),new d('nı',-1,-1)]});j(a,'a_3',function(){return[new d('in',-1,-1),new d('un',-1,-1),new d('ün',-1,-1),new d('ın',-1,-1)]});j(a,'a_4',function(){return[new d('a',-1,-1),new d('e',-1,-1)]});j(a,'a_5',function(){return[new d('na',-1,-1),new d('ne',-1,-1)]});j(a,'a_6',function(){return[new d('da',-1,-1),new d('ta',-1,-1),new d('de',-1,-1),new d('te',-1,-1)]});j(a,'a_7',function(){return[new d('nda',-1,-1),new d('nde',-1,-1)]});j(a,'a_8',function(){return[new d('dan',-1,-1),new d('tan',-1,-1),new d('den',-1,-1),new d('ten',-1,-1)]});j(a,'a_9',function(){return[new d('ndan',-1,-1),new d('nden',-1,-1)]});j(a,'a_10',function(){return[new d('la',-1,-1),new d('le',-1,-1)]});j(a,'a_11',function(){return[new d('ca',-1,-1),new d('ce',-1,-1)]});j(a,'a_12',function(){return[new d('im',-1,-1),new d('um',-1,-1),new d('üm',-1,-1),new d('ım',-1,-1)]});j(a,'a_13',function(){return[new d('sin',-1,-1),new d('sun',-1,-1),new d('sün',-1,-1),new d('sın',-1,-1)]});j(a,'a_14',function(){return[new d('iz',-1,-1),new d('uz',-1,-1),new d('üz',-1,-1),new d('ız',-1,-1)]});j(a,'a_15',function(){return[new d('siniz',-1,-1),new d('sunuz',-1,-1),new d('sünüz',-1,-1),new d('sınız',-1,-1)]});j(a,'a_16',function(){return[new d('lar',-1,-1),new d('ler',-1,-1)]});j(a,'a_17',function(){return[new d('niz',-1,-1),new d('nuz',-1,-1),new d('nüz',-1,-1),new d('nız',-1,-1)]});j(a,'a_18',function(){return[new d('dir',-1,-1),new d('tir',-1,-1),new d('dur',-1,-1),new d('tur',-1,-1),new d('dür',-1,-1),new d('tür',-1,-1),new d('dır',-1,-1),new d('tır',-1,-1)]});j(a,'a_19',function(){return[new d('casına',-1,-1),new d('cesine',-1,-1)]});j(a,'a_20',function(){return[new d('di',-1,-1),new d('ti',-1,-1),new d('dik',-1,-1),new d('tik',-1,-1),new d('duk',-1,-1),new d('tuk',-1,-1),new d('dük',-1,-1),new d('tük',-1,-1),new d('dık',-1,-1),new d('tık',-1,-1),new d('dim',-1,-1),new d('tim',-1,-1),new d('dum',-1,-1),new d('tum',-1,-1),new d('düm',-1,-1),new d('tüm',-1,-1),new d('dım',-1,-1),new d('tım',-1,-1),new d('din',-1,-1),new d('tin',-1,-1),new d('dun',-1,-1),new d('tun',-1,-1),new d('dün',-1,-1),new d('tün',-1,-1),new d('dın',-1,-1),new d('tın',-1,-1),new d('du',-1,-1),new d('tu',-1,-1),new d('dü',-1,-1),new d('tü',-1,-1),new d('dı',-1,-1),new d('tı',-1,-1)]});j(a,'a_21',function(){return[new d('sa',-1,-1),new d('se',-1,-1),new d('sak',-1,-1),new d('sek',-1,-1),new d('sam',-1,-1),new d('sem',-1,-1),new d('san',-1,-1),new d('sen',-1,-1)]});j(a,'a_22',function(){return[new d('miş',-1,-1),new d('muş',-1,-1),new d('müş',-1,-1),new d('mış',-1,-1)]});j(a,'a_23',function(){return[new d('b',-1,1),new d('c',-1,2),new d('d',-1,3),new d('ğ',-1,4)]});j(a,'g_vowel',function(){return[17,65,16,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,32,8,0,0,0,0,0,0,1]});j(a,'g_U',function(){return[1,16,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,8,0,0,0,0,0,0,1]});j(a,'g_vowel1',function(){return[1,64,16,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1]});j(a,'g_vowel2',function(){return[17,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,130]});j(a,'g_vowel3',function(){return[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1]});j(a,'g_vowel4',function(){return[17]});j(a,'g_vowel5',function(){return[65]});j(a,'g_vowel6',function(){return[65]});var y={'src/stemmer.jsx':{Stemmer:u},'src/turkish-stemmer.jsx':{TurkishStemmer:a}}}(JSX))
var Stemmer = JSX.require("src/turkish-stemmer.jsx").TurkishStemmer;
"""
@@ -28,12 +27,12 @@ class SearchTurkish(SearchLanguage):
language_name = 'Turkish'
js_stemmer_rawcode = 'turkish-stemmer.js'
js_stemmer_code = js_stemmer
- stopwords = set() # type: Set[unicode]
+ stopwords = set() # type: Set[str]
def init(self, options):
# type: (Dict) -> None
self.stemmer = snowballstemmer.stemmer('turkish')
def stem(self, word):
- # type: (unicode) -> unicode
+ # type: (str) -> str
return self.stemmer.stemWord(word.lower())
diff --git a/sphinx/search/zh.py b/sphinx/search/zh.py
index 3753bc990..011706750 100644
--- a/sphinx/search/zh.py
+++ b/sphinx/search/zh.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.search.zh
~~~~~~~~~~~~~~~~
@@ -25,7 +24,7 @@ if False:
# For type annotation
from typing import Dict, List # NOQA
-english_stopwords = set(u"""
+english_stopwords = set("""
a and are as at
be but by
for
@@ -234,7 +233,7 @@ class SearchChinese(SearchLanguage):
js_stemmer_code = js_porter_stemmer
stopwords = english_stopwords
latin1_letters = re.compile(r'[a-zA-Z0-9_]+')
- latin_terms = [] # type: List[unicode]
+ latin_terms = [] # type: List[str]
def init(self, options):
# type: (Dict) -> None
@@ -246,8 +245,8 @@ class SearchChinese(SearchLanguage):
self.stemmer = get_stemmer()
def split(self, input):
- # type: (unicode) -> List[unicode]
- chinese = [] # type: List[unicode]
+ # type: (str) -> List[str]
+ chinese = [] # type: List[str]
if JIEBA:
chinese = list(jieba.cut_for_search(input))
@@ -257,11 +256,11 @@ class SearchChinese(SearchLanguage):
return chinese + latin1
def word_filter(self, stemmed_word):
- # type: (unicode) -> bool
+ # type: (str) -> bool
return len(stemmed_word) > 1
def stem(self, word):
- # type: (unicode) -> unicode
+ # type: (str) -> str
# Don't stem Latin words that are long enough to be relevant for search
# if not stemmed, but would be too short after being stemmed
diff --git a/sphinx/setup_command.py b/sphinx/setup_command.py
index 733bc4e34..da242f50b 100644
--- a/sphinx/setup_command.py
+++ b/sphinx/setup_command.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.setup_command
~~~~~~~~~~~~~~~~~~~~
@@ -11,14 +10,12 @@
:copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
-from __future__ import print_function
import os
import sys
from distutils.cmd import Command
from distutils.errors import DistutilsOptionError, DistutilsExecError
-
-from six import StringIO, string_types
+from io import StringIO
from sphinx.application import Sphinx
from sphinx.cmd.build import handle_exception
@@ -96,14 +93,14 @@ class BuildDoc(Command):
# type: () -> None
self.fresh_env = self.all_files = False
self.pdb = False
- self.source_dir = self.build_dir = None # type: unicode
+ self.source_dir = self.build_dir = None # type: str
self.builder = 'html'
self.warning_is_error = False
self.project = ''
self.version = ''
self.release = ''
self.today = ''
- self.config_dir = None # type: unicode
+ self.config_dir = None # type: str
self.link_index = False
self.copyright = ''
self.verbosity = 0
@@ -111,7 +108,7 @@ class BuildDoc(Command):
self.nitpicky = False
def _guess_source_dir(self):
- # type: () -> unicode
+ # type: () -> str
for guess in ('doc', 'docs'):
if not os.path.isdir(guess):
continue
@@ -124,12 +121,12 @@ class BuildDoc(Command):
# unicode, causing finalize_options to fail if invoked again. Workaround
# for https://bugs.python.org/issue19570
def _ensure_stringlike(self, option, what, default=None):
- # type: (unicode, unicode, Any) -> Any
+ # type: (str, str, Any) -> Any
val = getattr(self, option)
if val is None:
setattr(self, option, default)
return default
- elif not isinstance(val, string_types):
+ elif not isinstance(val, str):
raise DistutilsOptionError("'%s' must be a %s (got `%s`)"
% (option, what, val))
return val
@@ -155,7 +152,7 @@ class BuildDoc(Command):
self.builder_target_dirs = [
(builder, os.path.join(self.build_dir, builder))
- for builder in self.builder] # type: List[Tuple[str, unicode]]
+ for builder in self.builder]
def run(self):
# type: () -> None
@@ -165,7 +162,7 @@ class BuildDoc(Command):
status_stream = StringIO()
else:
status_stream = sys.stdout # type: ignore
- confoverrides = {} # type: Dict[unicode, Any]
+ confoverrides = {} # type: Dict[str, Any]
if self.project:
confoverrides['project'] = self.project
if self.version:
diff --git a/sphinx/templates/latex/latex.tex_t b/sphinx/templates/latex/latex.tex_t
index 633525551..39e3fa2e5 100644
--- a/sphinx/templates/latex/latex.tex_t
+++ b/sphinx/templates/latex/latex.tex_t
@@ -26,18 +26,28 @@
<%= fontenc %>
<%= amsmath %>
<%= multilingual %>
+<%= substitutefont %>
+<%= textcyrillic %>
<%= fontpkg %>
+<%= textgreek %>
<%= fncychap %>
\usepackage<%= sphinxpkgoptions %>{sphinx}
<%= sphinxsetup %>
<%= fvset %>
<%= geometry %>
-<%= usepackages %>
+
+<%- for name, option in packages %>
+<%- if option %>
+\usepackage[<%= option %>]{<%= name %>}
+<%- else %>
+\usepackage{<%= name %>}
+<%- endif %>
+<%- endfor %>
+
<%= hyperref %>
<%= contentsname %>
<%= numfig_format %>
-<%= translatablestrings %>
-<%= pageautorefname %>
+\input{sphinxmessages.sty}
<%= tocdepth %>
<%= secnumdepth %>
<%= preamble %>
@@ -46,8 +56,16 @@
\date{<%= date %>}
\release{<%= release %>}
\author{<%= author %>}
-\newcommand{\sphinxlogo}{<%= logo %>}
-\renewcommand{\releasename}{<%= releasename %>}
+<%- if logofilename %>
+\newcommand{\sphinxlogo}{\sphinxincludegraphics{<%= logofilename %>}\par}
+<%- else %>
+\newcommand{\sphinxlogo}{\vbox{}}
+<%- endif %>
+<%- if releasename or release %>
+\renewcommand{\releasename}{<%= releasename or _('Release') | e %>}
+<%- else %>
+\renewcommand{\releasename}{}
+<%- endif %>
<%= makeindex %>
\begin{document}
<%= shorthandoff %>
@@ -59,6 +77,6 @@
<%= body %>
<%= atendofbody %>
<%= indices %>
-\renewcommand{\indexname}{<%= indexname %>}
+\renewcommand{\indexname}{<%= _('Index') | e %>}
<%= printindex %>
\end{document}
diff --git a/sphinx/templates/latex/sphinxmessages.sty_t b/sphinx/templates/latex/sphinxmessages.sty_t
new file mode 100644
index 000000000..d548a4469
--- /dev/null
+++ b/sphinx/templates/latex/sphinxmessages.sty_t
@@ -0,0 +1,11 @@
+%
+% sphinxmessages.sty
+%
+% message resources for Sphinx
+%
+\renewcommand{\literalblockcontinuedname}{<%= _('continued from previous page') | e %>}
+\renewcommand{\literalblockcontinuesname}{<%= _('continues on next page') | e %>}
+\renewcommand{\sphinxnonalphabeticalgroupname}{<%= _('Non-alphabetical') | e %>}
+\renewcommand{\sphinxsymbolsname}{<%= _('Symbols') | e %>}
+\renewcommand{\sphinxnumbersname}{<%= _('Numbers') | e %>}
+\def\pageautorefname{<%= _('page') | e %>}
diff --git a/sphinx/templates/quickstart/conf.py_t b/sphinx/templates/quickstart/conf.py_t
index 9ca4fe437..90f3113ed 100644
--- a/sphinx/templates/quickstart/conf.py_t
+++ b/sphinx/templates/quickstart/conf.py_t
@@ -1,9 +1,7 @@
-# -*- coding: utf-8 -*-
-#
# Configuration file for the Sphinx documentation builder.
#
-# This file does only contain a selection of the most common options. For a
-# full list see the documentation:
+# This file only contains a selection of the most common options. For a full
+# list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
@@ -15,12 +13,12 @@
{% if append_syspath -%}
import os
import sys
-sys.path.insert(0, u'{{ module_path }}')
+sys.path.insert(0, '{{ module_path }}')
{% else -%}
# import os
# import sys
{% if module_path -%}
-# sys.path.insert(0, u'{{ module_path }}')
+# sys.path.insert(0, '{{ module_path }}')
{% else -%}
# sys.path.insert(0, os.path.abspath('.'))
{% endif -%}
@@ -28,22 +26,18 @@ sys.path.insert(0, u'{{ module_path }}')
# -- Project information -----------------------------------------------------
-project = u'{{ project_str }}'
-copyright = u'{{ copyright_str }}'
-author = u'{{ author_str }}'
+project = '{{ project_str }}'
+copyright = '{{ copyright_str }}'
+author = '{{ author_str }}'
# The short X.Y version
-version = u'{{ version_str }}'
+version = '{{ version_str }}'
# The full version, including alpha/beta/rc tags
-release = u'{{ release_str }}'
+release = '{{ release_str }}'
# -- General configuration ---------------------------------------------------
-# If your documentation needs a minimal Sphinx version, state it here.
-#
-# needs_sphinx = '1.0'
-
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
@@ -62,9 +56,11 @@ templates_path = ['{{ dot }}templates']
# source_suffix = ['.rst', '.md']
source_suffix = '{{ suffix }}'
+{% if master_doc != 'index' -%}
# The master toctree document.
master_doc = '{{ master_str }}'
+{% endif -%}
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
@@ -77,9 +73,6 @@ language = {{ language | repr }}
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = [{{ exclude_patterns }}]
-# The name of the Pygments (syntax highlighting) style to use.
-pygments_style = None
-
# -- Options for HTML output -------------------------------------------------
@@ -110,12 +103,6 @@ html_static_path = ['{{ dot }}static']
# html_sidebars = {}
-# -- Options for HTMLHelp output ---------------------------------------------
-
-# Output file base name for HTML help builder.
-htmlhelp_basename = '{{ project_fn }}doc'
-
-
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
@@ -140,30 +127,8 @@ latex_elements = {
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
- (master_doc, '{{ project_fn }}.tex', u'{{ project_doc_texescaped_str }}',
- u'{{ author_texescaped_str }}', 'manual'),
-]
-
-
-# -- Options for manual page output ------------------------------------------
-
-# One entry per manual page. List of tuples
-# (source start file, name, description, authors, manual section).
-man_pages = [
- (master_doc, '{{ project_manpage }}', u'{{ project_doc_str }}',
- [author], 1)
-]
-
-
-# -- Options for Texinfo output ----------------------------------------------
-
-# Grouping the document tree into Texinfo files. List of tuples
-# (source start file, target name, title, author,
-# dir menu entry, description, category)
-texinfo_documents = [
- (master_doc, '{{ project_fn }}', u'{{ project_doc_str }}',
- author, '{{ project_fn }}', 'One line description of project.',
- 'Miscellaneous'),
+ (master_doc, '{{ project_fn }}.tex', '{{ project_doc_texescaped_str }}',
+ '{{ author_texescaped_str }}', 'manual'),
]
diff --git a/sphinx/templates/texinfo/Makefile b/sphinx/templates/texinfo/Makefile
new file mode 100644
index 000000000..276a66136
--- /dev/null
+++ b/sphinx/templates/texinfo/Makefile
@@ -0,0 +1,50 @@
+# Makefile for Sphinx Texinfo output
+
+infodir ?= /usr/share/info
+
+MAKEINFO = makeinfo --no-split
+MAKEINFO_html = makeinfo --no-split --html
+MAKEINFO_plaintext = makeinfo --no-split --plaintext
+TEXI2PDF = texi2pdf --batch --expand
+INSTALL_INFO = install-info
+
+ALLDOCS = $(basename $(wildcard *.texi))
+
+all: info
+info: $(addsuffix .info,$(ALLDOCS))
+plaintext: $(addsuffix .txt,$(ALLDOCS))
+html: $(addsuffix .html,$(ALLDOCS))
+pdf: $(addsuffix .pdf,$(ALLDOCS))
+
+install-info: info
+ for f in *.info; do \\
+ cp -t $(infodir) "$$f" && \\
+ $(INSTALL_INFO) --info-dir=$(infodir) "$$f" ; \\
+ done
+
+uninstall-info: info
+ for f in *.info; do \\
+ rm -f "$(infodir)/$$f" ; \\
+ $(INSTALL_INFO) --delete --info-dir=$(infodir) "$$f" ; \\
+ done
+
+%.info: %.texi
+ $(MAKEINFO) -o '$@' '$<'
+
+%.txt: %.texi
+ $(MAKEINFO_plaintext) -o '$@' '$<'
+
+%.html: %.texi
+ $(MAKEINFO_html) -o '$@' '$<'
+
+%.pdf: %.texi
+ -$(TEXI2PDF) '$<'
+ -$(TEXI2PDF) '$<'
+ -$(TEXI2PDF) '$<'
+
+clean:
+ rm -f *.info *.pdf *.txt *.html
+ rm -f *.log *.ind *.aux *.toc *.syn *.idx *.out *.ilg *.pla *.ky *.pg
+ rm -f *.vr *.tp *.fn *.fns *.def *.defs *.cp *.cps *.ge *.ges *.mo
+
+.PHONY: all info plaintext html pdf install-info uninstall-info clean
diff --git a/sphinx/testing/__init__.py b/sphinx/testing/__init__.py
index c551da36f..8b9e32636 100644
--- a/sphinx/testing/__init__.py
+++ b/sphinx/testing/__init__.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.testing
~~~~~~~~~~~~~~
diff --git a/sphinx/testing/fixtures.py b/sphinx/testing/fixtures.py
index fcf1028fd..c37e0f357 100644
--- a/sphinx/testing/fixtures.py
+++ b/sphinx/testing/fixtures.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.testing.fixtures
~~~~~~~~~~~~~~~~~~~~~~~
@@ -8,16 +7,15 @@
:copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
-from __future__ import print_function
import os
import subprocess
import sys
from collections import namedtuple
+from io import StringIO
from tempfile import gettempdir
import pytest
-from six import StringIO, string_types
from . import util
@@ -102,8 +100,7 @@ def test_params(request):
}
result.update(kwargs)
- if (result['shared_result'] and
- not isinstance(result['shared_result'], string_types)):
+ if (result['shared_result'] and not isinstance(result['shared_result'], str)):
raise pytest.Exception('You can only provide a string type of value '
'for "shared_result" ')
return result
@@ -173,7 +170,7 @@ def make_app(test_params, monkeypatch):
app_.cleanup()
-class SharedResult(object):
+class SharedResult:
cache = {} # type: Dict[str, Dict[str, str]]
def store(self, key, app_):
diff --git a/sphinx/testing/path.py b/sphinx/testing/path.py
index 1c9781dea..9477391b4 100644
--- a/sphinx/testing/path.py
+++ b/sphinx/testing/path.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.testing.path
~~~~~~~~~~~~~~~~~~~
@@ -9,29 +8,20 @@
import os
import shutil
import sys
-from io import open
-
-from six import PY2, text_type
if False:
# For type annotation
+ import builtins # NOQA
from typing import Any, Callable, IO, List # NOQA
FILESYSTEMENCODING = sys.getfilesystemencoding() or sys.getdefaultencoding()
-class path(text_type):
+class path(str):
"""
Represents a path which behaves like a string.
"""
- if PY2:
- def __new__(cls, s, encoding=FILESYSTEMENCODING, errors='strict'):
- # type: (unicode, unicode, unicode) -> path
- if isinstance(s, str):
- s = s.decode(encoding, errors)
- return text_type.__new__(cls, s) # type: ignore
- return text_type.__new__(cls, s) # type: ignore
@property
def parent(self):
@@ -42,7 +32,7 @@ class path(text_type):
return self.__class__(os.path.dirname(self))
def basename(self):
- # type: () -> unicode
+ # type: () -> str
return os.path.basename(self)
def abspath(self):
@@ -107,7 +97,7 @@ class path(text_type):
shutil.rmtree(self, ignore_errors=ignore_errors, onerror=onerror)
def copytree(self, destination, symlinks=False):
- # type: (unicode, bool) -> None
+ # type: (str, bool) -> None
"""
Recursively copy a directory to the given `destination`. If the given
`destination` does not exist it will be created.
@@ -120,7 +110,7 @@ class path(text_type):
shutil.copytree(self, destination, symlinks=symlinks)
def movetree(self, destination):
- # type: (unicode) -> None
+ # type: (str) -> None
"""
Recursively move the file or directory to the given `destination`
similar to the Unix "mv" command.
@@ -151,30 +141,27 @@ class path(text_type):
os.utime(self, arg)
def open(self, mode='r', **kwargs):
- # type: (unicode, Any) -> IO
+ # type: (str, Any) -> IO
return open(self, mode, **kwargs)
def write_text(self, text, encoding='utf-8', **kwargs):
- # type: (unicode, unicode, Any) -> None
+ # type: (str, str, Any) -> None
"""
Writes the given `text` to the file.
"""
- if isinstance(text, bytes):
- text = text.decode(encoding)
with open(self, 'w', encoding=encoding, **kwargs) as f:
f.write(text)
def text(self, encoding='utf-8', **kwargs):
- # type: (unicode, Any) -> unicode
+ # type: (str, Any) -> str
"""
Returns the text in the file.
"""
- mode = 'rU' if PY2 else 'r'
- with open(self, mode=mode, encoding=encoding, **kwargs) as f:
+ with open(self, encoding=encoding, **kwargs) as f:
return f.read()
def bytes(self):
- # type: () -> str
+ # type: () -> builtins.bytes
"""
Returns the bytes in the file.
"""
@@ -211,12 +198,12 @@ class path(text_type):
"""
return os.path.lexists(self)
- def makedirs(self, mode=0o777):
- # type: (int) -> None
+ def makedirs(self, mode=0o777, exist_ok=False):
+ # type: (int, bool) -> None
"""
Recursively create directories.
"""
- os.makedirs(self, mode)
+ os.makedirs(self, mode, exist_ok=exist_ok)
def joinpath(self, *args):
# type: (Any) -> path
@@ -226,11 +213,11 @@ class path(text_type):
return self.__class__(os.path.join(self, *map(self.__class__, args)))
def listdir(self):
- # type: () -> List[unicode]
+ # type: () -> List[str]
return os.listdir(self)
__div__ = __truediv__ = joinpath
def __repr__(self):
# type: () -> str
- return '%s(%s)' % (self.__class__.__name__, text_type.__repr__(self))
+ return '%s(%s)' % (self.__class__.__name__, super().__repr__())
diff --git a/sphinx/testing/util.py b/sphinx/testing/util.py
index 4c18d1c0c..c8fb7f199 100644
--- a/sphinx/testing/util.py
+++ b/sphinx/testing/util.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.testing.util
~~~~~~~~~~~~~~~~~~~
@@ -16,11 +15,10 @@ from xml.etree import ElementTree
from docutils import nodes
from docutils.parsers.rst import directives, roles
-from six import string_types
from sphinx import application, locale
from sphinx.builders.latex import LaTeXBuilder
-from sphinx.ext.autodoc import AutoDirective
+from sphinx.deprecation import RemovedInSphinx40Warning
from sphinx.pycode import ModuleAnalyzer
from sphinx.testing.path import path
from sphinx.util.osutil import relpath
@@ -39,25 +37,25 @@ __all__ = [
def assert_re_search(regex, text, flags=0):
- # type: (Pattern, unicode, int) -> None
+ # type: (Pattern, str, int) -> None
if not re.search(regex, text, flags):
assert False, '%r did not match %r' % (regex, text)
def assert_not_re_search(regex, text, flags=0):
- # type: (Pattern, unicode, int) -> None
+ # type: (Pattern, str, int) -> None
if re.search(regex, text, flags):
assert False, '%r did match %r' % (regex, text)
def assert_startswith(thing, prefix):
- # type: (unicode, unicode) -> None
+ # type: (str, str) -> None
if not thing.startswith(prefix):
assert False, '%r does not start with %r' % (thing, prefix)
def assert_node(node, cls=None, xpath="", **kwargs):
- # type: (nodes.Node, Any, unicode, Any) -> None
+ # type: (nodes.Node, Any, str, Any) -> None
if cls:
if isinstance(cls, list):
assert_node(node, cls[0], xpath=xpath, **kwargs)
@@ -65,35 +63,44 @@ def assert_node(node, cls=None, xpath="", **kwargs):
if isinstance(cls[1], tuple):
assert_node(node, cls[1], xpath=xpath, **kwargs)
else:
+ assert isinstance(node, nodes.Element), \
+ 'The node%s does not have any children' % xpath
assert len(node) == 1, \
'The node%s has %d child nodes, not one' % (xpath, len(node))
assert_node(node[0], cls[1:], xpath=xpath + "[0]", **kwargs)
elif isinstance(cls, tuple):
+ assert isinstance(node, nodes.Element), \
+ 'The node%s does not have any items' % xpath
assert len(node) == len(cls), \
'The node%s has %d child nodes, not %r' % (xpath, len(node), len(cls))
for i, nodecls in enumerate(cls):
path = xpath + "[%d]" % i
assert_node(node[i], nodecls, xpath=path, **kwargs)
- elif isinstance(cls, string_types):
+ elif isinstance(cls, str):
assert node == cls, 'The node %r is not %r: %r' % (xpath, cls, node)
else:
assert isinstance(node, cls), \
'The node%s is not subclass of %r: %r' % (xpath, cls, node)
- for key, value in kwargs.items():
- assert key in node, 'The node%s does not have %r attribute: %r' % (xpath, key, node)
- assert node[key] == value, \
- 'The node%s[%s] is not %r: %r' % (xpath, key, value, node[key])
+ if kwargs:
+ assert isinstance(node, nodes.Element), \
+ 'The node%s does not have any attributes' % xpath
+
+ for key, value in kwargs.items():
+ assert key in node, \
+ 'The node%s does not have %r attribute: %r' % (xpath, key, node)
+ assert node[key] == value, \
+ 'The node%s[%s] is not %r: %r' % (xpath, key, value, node[key])
def etree_parse(path):
- # type: (unicode) -> Any
+ # type: (str) -> Any
with warnings.catch_warnings(record=False):
warnings.filterwarnings("ignore", category=DeprecationWarning)
- return ElementTree.parse(path) # type: ignore
+ return ElementTree.parse(path)
-class Struct(object):
+class Struct:
def __init__(self, **kwds):
# type: (Any) -> None
self.__dict__.update(kwds)
@@ -108,52 +115,45 @@ class SphinxTestApp(application.Sphinx):
def __init__(self, buildername='html', srcdir=None,
freshenv=False, confoverrides=None, status=None, warning=None,
tags=None, docutilsconf=None):
- # type: (unicode, path, bool, Dict, IO, IO, unicode, unicode) -> None
+ # type: (str, path, bool, Dict, IO, IO, List[str], str) -> None
if docutilsconf is not None:
(srcdir / 'docutils.conf').write_text(docutilsconf)
builddir = srcdir / '_build'
-# if confdir is None:
confdir = srcdir
-# if outdir is None:
outdir = builddir.joinpath(buildername)
- if not outdir.isdir():
- outdir.makedirs()
-# if doctreedir is None:
+ outdir.makedirs(exist_ok=True)
doctreedir = builddir.joinpath('doctrees')
- if not doctreedir.isdir():
- doctreedir.makedirs()
+ doctreedir.makedirs(exist_ok=True)
if confoverrides is None:
confoverrides = {}
-# if warningiserror is None:
warningiserror = False
self._saved_path = sys.path[:]
- self._saved_directives = directives._directives.copy()
- self._saved_roles = roles._roles.copy()
+ self._saved_directives = directives._directives.copy() # type: ignore
+ self._saved_roles = roles._roles.copy() # type: ignore
self._saved_nodeclasses = set(v for v in dir(nodes.GenericNodeVisitor)
if v.startswith('visit_'))
try:
- application.Sphinx.__init__(self, srcdir, confdir, outdir, doctreedir, # type: ignore # NOQA
- buildername, confoverrides, status, warning,
- freshenv, warningiserror, tags)
+ super().__init__(srcdir, confdir, outdir, doctreedir,
+ buildername, confoverrides, status, warning,
+ freshenv, warningiserror, tags)
except Exception:
self.cleanup()
raise
def cleanup(self, doctrees=False):
# type: (bool) -> None
- AutoDirective._registry.clear()
ModuleAnalyzer.cache.clear()
LaTeXBuilder.usepackages = []
locale.translators.clear()
sys.path[:] = self._saved_path
sys.modules.pop('autodoc_fodder', None)
- directives._directives = self._saved_directives
- roles._roles = self._saved_roles
+ directives._directives = self._saved_directives # type: ignore
+ roles._roles = self._saved_roles # type: ignore
for method in dir(nodes.GenericNodeVisitor):
if method.startswith('visit_') and \
method not in self._saved_nodeclasses:
@@ -165,7 +165,7 @@ class SphinxTestApp(application.Sphinx):
return '<%s buildername=%r>' % (self.__class__.__name__, self.builder.name)
-class SphinxTestAppWrapperForSkipBuilding(object):
+class SphinxTestAppWrapperForSkipBuilding:
"""
This class is a wrapper for SphinxTestApp to speed up the test by skipping
`app.build` process if it is already built and there is even one output
@@ -192,12 +192,14 @@ _unicode_literals_re = re.compile(r'u(".*?")|u(\'.*?\')')
def remove_unicode_literals(s):
- # type: (unicode) -> unicode
+ # type: (str) -> str
+ warnings.warn('remove_unicode_literals() is deprecated.',
+ RemovedInSphinx40Warning)
return _unicode_literals_re.sub(lambda x: x.group(1) or x.group(2), s)
def find_files(root, suffix=None):
- # type: (unicode, bool) -> Generator
+ # type: (str, bool) -> Generator
for dirpath, dirs, files in os.walk(root, followlinks=True):
dirpath = path(dirpath)
for f in [f for f in files if not suffix or f.endswith(suffix)]: # type: ignore
@@ -206,5 +208,5 @@ def find_files(root, suffix=None):
def strip_escseq(text):
- # type: (unicode) -> unicode
+ # type: (str) -> str
return re.sub('\x1b.*?m', '', text)
diff --git a/sphinx/texinputs/sphinx.xdy b/sphinx/texinputs/sphinx.xdy
index 0d02ef337..1c0794cd9 100644
--- a/sphinx/texinputs/sphinx.xdy
+++ b/sphinx/texinputs/sphinx.xdy
@@ -147,40 +147,61 @@
(merge-rule "\(\sb{\text{7}}\)" "₇" :string)
(merge-rule "\(\sb{\text{8}}\)" "₈" :string)
(merge-rule "\(\sb{\text{9}}\)" "₉" :string)
-(merge-rule "\(\alpha\)" "α" :string)
-(merge-rule "\(\beta\)" "β" :string)
-(merge-rule "\(\gamma\)" "γ" :string)
-(merge-rule "\(\delta\)" "δ" :string)
-(merge-rule "\(\epsilon\)" "ε" :string)
-(merge-rule "\(\zeta\)" "ζ" :string)
-(merge-rule "\(\eta\)" "η" :string)
-(merge-rule "\(\theta\)" "θ" :string)
-(merge-rule "\(\iota\)" "ι" :string)
-(merge-rule "\(\kappa\)" "κ" :string)
-(merge-rule "\(\lambda\)" "λ" :string)
-(merge-rule "\(\mu\)" "μ" :string)
-(merge-rule "\(\nu\)" "ν" :string)
-(merge-rule "\(\xi\)" "ξ" :string)
-(merge-rule "\(\pi\)" "π" :string)
-(merge-rule "\(\rho\)" "ρ" :string)
-(merge-rule "\(\sigma\)" "σ" :string)
-(merge-rule "\(\tau\)" "τ" :string)
-(merge-rule "\(\upsilon\)" "υ" :string)
-(merge-rule "\(\phi\)" "φ" :string)
-(merge-rule "\(\chi\)" "χ" :string)
-(merge-rule "\(\psi\)" "ψ" :string)
-(merge-rule "\(\omega\)" "ω" :string)
-(merge-rule "\(\Gamma\)" "Γ" :string)
-(merge-rule "\(\Delta\)" "Δ" :string)
-(merge-rule "\(\Theta\)" "Θ" :string)
-(merge-rule "\(\Lambda\)" "Λ" :string)
-(merge-rule "\(\Xi\)" "Ξ" :string)
-(merge-rule "\(\Pi\)" "Π" :string)
-(merge-rule "\(\Sigma\)" "Σ" :string)
-(merge-rule "\(\Upsilon\)" "Υ" :string)
-(merge-rule "\(\Phi\)" "Φ" :string)
-(merge-rule "\(\Psi\)" "Ψ" :string)
-(merge-rule "\(\Omega\)" "Ω" :string)
+(merge-rule "\IeC {\textalpha }" "α" :string)
+(merge-rule "\IeC {\textbeta }" "β" :string)
+(merge-rule "\IeC {\textgamma }" "γ" :string)
+(merge-rule "\IeC {\textdelta }" "δ" :string)
+(merge-rule "\IeC {\textepsilon }" "ε" :string)
+(merge-rule "\IeC {\textzeta }" "ζ" :string)
+(merge-rule "\IeC {\texteta }" "η" :string)
+(merge-rule "\IeC {\texttheta }" "θ" :string)
+(merge-rule "\IeC {\textiota }" "ι" :string)
+(merge-rule "\IeC {\textkappa }" "κ" :string)
+(merge-rule "\IeC {\textlambda }" "λ" :string)
+(merge-rule "\IeC {\textmu }" "μ" :string)
+(merge-rule "\IeC {\textnu }" "ν" :string)
+(merge-rule "\IeC {\textxi }" "ξ" :string)
+(merge-rule "\IeC {\textomicron }" "ο" :string)
+(merge-rule "\IeC {\textpi }" "π" :string)
+(merge-rule "\IeC {\textrho }" "ρ" :string)
+(merge-rule "\IeC {\textsigma }" "σ" :string)
+(merge-rule "\IeC {\texttau }" "τ" :string)
+(merge-rule "\IeC {\textupsilon }" "υ" :string)
+(merge-rule "\IeC {\textphi }" "φ" :string)
+(merge-rule "\IeC {\textchi }" "χ" :string)
+(merge-rule "\IeC {\textpsi }" "ψ" :string)
+(merge-rule "\IeC {\textomega }" "ω" :string)
+(merge-rule "\IeC {\textAlpha }" "Α" :string)
+(merge-rule "\IeC {\textBeta }" "Β" :string)
+(merge-rule "\IeC {\textGamma }" "Γ" :string)
+(merge-rule "\IeC {\textDelta }" "Δ" :string)
+(merge-rule "\IeC {\textEpsilon }" "Ε" :string)
+(merge-rule "\IeC {\textZeta }" "Ζ" :string)
+(merge-rule "\IeC {\textEta }" "Η" :string)
+(merge-rule "\IeC {\textTheta }" "Θ" :string)
+(merge-rule "\IeC {\textIota }" "Ι" :string)
+(merge-rule "\IeC {\textKappa }" "Κ" :string)
+(merge-rule "\IeC {\textLambda }" "Λ" :string)
+(merge-rule "\IeC {\textMu }" "Μ" :string)
+(merge-rule "\IeC {\textNu }" "Ν" :string)
+(merge-rule "\IeC {\textTheta }" "Θ" :string)
+(merge-rule "\IeC {\textIota }" "Ι" :string)
+(merge-rule "\IeC {\textKappa }" "Κ" :string)
+(merge-rule "\IeC {\textLambda }" "Λ" :string)
+(merge-rule "\IeC {\textMu }" "Μ" :string)
+(merge-rule "\IeC {\textNu }" "Ν" :string)
+(merge-rule "\IeC {\textXi }" "Ξ" :string)
+(merge-rule "\IeC {\textOmicron }" "Ο" :string)
+(merge-rule "\IeC {\textPi }" "Π" :string)
+(merge-rule "\IeC {\textRho }" "Ρ" :string)
+(merge-rule "\IeC {\textSigma }" "Σ" :string)
+(merge-rule "\IeC {\textTau }" "Τ" :string)
+(merge-rule "\IeC {\textUpsilon }" "Υ" :string)
+(merge-rule "\IeC {\textPhi }" "Φ" :string)
+(merge-rule "\IeC {\textChi }" "Χ" :string)
+(merge-rule "\IeC {\textPsi }" "Ψ" :string)
+(merge-rule "\IeC {\textOmega }" "Ω" :string)
+(merge-rule "\IeC {\textohm }" "Ω" :string)
;; This xindy module provides some basic support for "see"
(require "makeindex.xdy")
diff --git a/sphinx/texinputs/sphinxcyrillic.sty b/sphinx/texinputs/sphinxcyrillic.sty
new file mode 100644
index 000000000..1a14c7b24
--- /dev/null
+++ b/sphinx/texinputs/sphinxcyrillic.sty
@@ -0,0 +1,53 @@
+%% CYRILLIC IN NON-CYRILLIC DOCUMENTS (pdflatex only)
+%
+% refs: https://tex.stackexchange.com/q/460271/
+\ProvidesPackage{sphinxcyrillic}%
+ [2018/11/21 v2.0 support for Cyrillic in non-Cyrillic documents]
+\RequirePackage{kvoptions}
+\SetupKeyvalOptions{prefix=spx@cyropt@} % use \spx@cyropt@ prefix
+\DeclareBoolOption[false]{Xtwo}
+\DeclareBoolOption[false]{TtwoA}
+\DeclareDefaultOption{\@unknownoptionerror}
+\ProcessLocalKeyvalOptions* % ignore class options
+
+\ifspx@cyropt@Xtwo
+% original code by tex.sx user egreg:
+% https://tex.stackexchange.com/a/460325/
+% 159 Cyrillic glyphs as available in X2 TeX 8bit font encoding
+% This assumes inputenc loaded with utf8 option, or LaTeX release
+% as recent as 2018/04/01 which does it automatically.
+ \@tfor\next:=%
+ {Ё}{Ђ}{Є}{Ѕ}{І}{Ј}{Љ}{Њ}{Ћ}{Ў}{Џ}{А}{Б}{В}{Г}{Д}{Е}{Ж}{З}{И}{Й}%
+ {К}{Л}{М}{Н}{О}{П}{Р}{С}{Т}{У}{Ф}{Х}{Ц}{Ч}{Ш}{Щ}{Ъ}{Ы}{Ь}{Э}{Ю}%
+ {Я}{а}{б}{в}{г}{д}{е}{ж}{з}{и}{й}{к}{л}{м}{н}{о}{п}{р}{с}{т}{у}%
+ {ф}{х}{ц}{ч}{ш}{щ}{ъ}{ы}{ь}{э}{ю}{я}{ё}{ђ}{є}{ѕ}{і}{ј}{љ}{њ}{ћ}%
+ {ў}{џ}{Ѣ}{ѣ}{Ѫ}{ѫ}{Ѵ}{ѵ}{Ґ}{ґ}{Ғ}{ғ}{Ҕ}{ҕ}{Җ}{җ}{Ҙ}{ҙ}{Қ}{қ}{Ҝ}{ҝ}%
+ {Ҟ}{ҟ}{Ҡ}{ҡ}{Ң}{ң}{Ҥ}{ҥ}{Ҧ}{ҧ}{Ҩ}{ҩ}{Ҫ}{ҫ}{Ҭ}{ҭ}{Ү}{ү}{Ұ}{ұ}{Ҳ}{ҳ}%
+ {Ҵ}{ҵ}{Ҷ}{ҷ}{Ҹ}{ҹ}{Һ}{һ}{Ҽ}{ҽ}{Ҿ}{ҿ}{Ӏ}{Ӄ}{ӄ}{Ӆ}{ӆ}{Ӈ}{ӈ}{Ӌ}{ӌ}%
+ {Ӎ}{ӎ}{Ӕ}{ӕ}{Ә}{ә}{Ӡ}{ӡ}{Ө}{ө}\do
+ {%
+ \begingroup\def\IeC{\protect\DeclareTextSymbolDefault}%
+ \protected@edef\@temp{\endgroup\next{X2}}\@temp
+ }%
+\else
+\ifspx@cyropt@TtwoA
+% original code by tex.sx user jfbu:
+% https://tex.stackexchange.com/a/460305/
+% 63*2+1=127 Cyrillic glyphs as found in T2A 8bit TeX font-encoding
+ \@tfor\@tempa:=%
+ {ae}{a}{b}{chrdsc}{chvcrs}{ch}{c}{dje}{dze}{dzhe}{d}{erev}{ery}{e}%
+ {f}{ghcrs}{gup}{g}{hdsc}{hrdsn}{h}{ie}{ii}{ishrt}{i}{je}%
+ {kbeak}{kdsc}{kvcrs}{k}{lje}{l}{m}{ndsc}{ng}{nje}{n}{otld}{o}{p}{r}%
+ {schwa}{sdsc}{sftsn}{shch}{shha}{sh}{s}{tshe}{t}{ushrt}{u}{v}%
+ {ya}{yhcrs}{yi}{yo}{yu}{y}{zdsc}{zhdsc}{zh}{z}\do
+ {%
+ \expandafter\DeclareTextSymbolDefault\expandafter
+ {\csname cyr\@tempa\endcsname}{T2A}%
+ \expandafter\uppercase\expandafter{\expandafter
+ \def\expandafter\@tempa\expandafter{\@tempa}}%
+ \expandafter\DeclareTextSymbolDefault\expandafter
+ {\csname CYR\@tempa\endcsname}{T2A}%
+ }%
+ \DeclareTextSymbolDefault{\CYRpalochka}{T2A}%
+\fi\fi
+\endinput
diff --git a/sphinx/texinputs/sphinxhowto.cls b/sphinx/texinputs/sphinxhowto.cls
index 6e4858567..f2572b3b4 100644
--- a/sphinx/texinputs/sphinxhowto.cls
+++ b/sphinx/texinputs/sphinxhowto.cls
@@ -3,7 +3,7 @@
%
\NeedsTeXFormat{LaTeX2e}[1995/12/01]
-\ProvidesClass{sphinxhowto}[2018/12/22 v1.8.3 Document class (Sphinx howto)]
+\ProvidesClass{sphinxhowto}[2018/12/23 v2.0 Document class (Sphinx howto)]
% 'oneside' option overriding the 'twoside' default
\newif\if@oneside
@@ -57,15 +57,16 @@
%\gdef\@thanks{}\gdef\@author{}\gdef\@title{}
}
-\newcommand{\sphinxtableofcontents}{
+\newcommand{\sphinxtableofcontents}{%
\begingroup
- \parskip = 0mm
+ \parskip \z@skip
+ \sphinxtableofcontentshook
\tableofcontents
\endgroup
- \rule{\textwidth}{1pt}
- \vspace{12pt}
+ \noindent\rule{\textwidth}{1pt}\par
+ \vspace{12pt}%
}
-
+\newcommand\sphinxtableofcontentshook{}
\pagenumbering{arabic}
% Fix the bibliography environment to add an entry to the Table of
diff --git a/sphinx/texinputs/sphinxmanual.cls b/sphinx/texinputs/sphinxmanual.cls
index 1ab80d264..eabc1af7b 100644
--- a/sphinx/texinputs/sphinxmanual.cls
+++ b/sphinx/texinputs/sphinxmanual.cls
@@ -3,7 +3,7 @@
%
\NeedsTeXFormat{LaTeX2e}[1995/12/01]
-\ProvidesClass{sphinxmanual}[2018/12/22 v1.8.3 Document class (Sphinx manual)]
+\ProvidesClass{sphinxmanual}[2018/12/23 v2.0 Document class (Sphinx manual)]
% chapters starting at odd pages (overridden by 'openany' document option)
\PassOptionsToClass{openright}{\sphinxdocclass}
@@ -34,8 +34,8 @@
% ``Bjarne'' style a bit better.
%
\newcommand{\sphinxmaketitle}{%
- \let\spx@tempa\relax
- \ifHy@pageanchor\def\spx@tempa{\Hy@pageanchortrue}\fi
+ \let\sphinxrestorepageanchorsetting\relax
+ \ifHy@pageanchor\def\sphinxrestorepageanchorsetting{\Hy@pageanchortrue}\fi
\hypersetup{pageanchor=false}% avoid duplicate destination warnings
\begin{titlepage}%
\let\footnotesize\small
@@ -72,13 +72,14 @@
\clearpage
\ifdefined\sphinxbackoftitlepage\sphinxbackoftitlepage\fi
\if@openright\cleardoublepage\else\clearpage\fi
- \spx@tempa
+ \sphinxrestorepageanchorsetting
}
\newcommand{\sphinxtableofcontents}{%
\pagenumbering{roman}%
\begingroup
\parskip \z@skip
+ \sphinxtableofcontentshook
\tableofcontents
\endgroup
% before resetting page counter, let's do the right thing.
@@ -89,8 +90,10 @@
% This is needed to get the width of the section # area wide enough in the
% library reference. Doing it here keeps it the same for all the manuals.
%
-\renewcommand*\l@section{\@dottedtocline{1}{1.5em}{2.6em}}
-\renewcommand*\l@subsection{\@dottedtocline{2}{4.1em}{3.5em}}
+\newcommand{\sphinxtableofcontentshook}{%
+ \renewcommand*\l@section{\@dottedtocline{1}{1.5em}{2.6em}}%
+ \renewcommand*\l@subsection{\@dottedtocline{2}{4.1em}{3.5em}}%
+}
% Fix the bibliography environment to add an entry to the Table of
% Contents.
diff --git a/sphinx/themes/agogo/layout.html b/sphinx/themes/agogo/layout.html
index f2c880537..f50a2d78f 100644
--- a/sphinx/themes/agogo/layout.html
+++ b/sphinx/themes/agogo/layout.html
@@ -52,8 +52,6 @@
<form class="search" action="{{ pathto('search') }}" method="get">
<input type="text" name="q" />
<input type="submit" value="{{ _('Go') }}" />
- <input type="hidden" name="check_keywords" value="yes" />
- <input type="hidden" name="area" value="default" />
</form>
</div>
{%- endblock %}
diff --git a/sphinx/themes/basic/opensearch.xml b/sphinx/themes/basic/opensearch.xml
index 03875be49..a750b3041 100644
--- a/sphinx/themes/basic/opensearch.xml
+++ b/sphinx/themes/basic/opensearch.xml
@@ -4,7 +4,7 @@
<Description>{% trans docstitle=docstitle|e %}Search {{ docstitle }}{% endtrans %}</Description>
<InputEncoding>utf-8</InputEncoding>
<Url type="text/html" method="get"
- template="{{ use_opensearch }}/{{ pathto('search') }}?q={searchTerms}&amp;check_keywords=yes&amp;area=default"/>
+ template="{{ use_opensearch }}/{{ pathto('search') }}?q={searchTerms}"/>
<LongName>{{ docstitle|e }}</LongName>
{% block extra %} {# Put e.g. an <Image> element here. #} {% endblock %}
</OpenSearchDescription>
diff --git a/sphinx/themes/basic/searchbox.html b/sphinx/themes/basic/searchbox.html
index 506877410..6feaa93e0 100644
--- a/sphinx/themes/basic/searchbox.html
+++ b/sphinx/themes/basic/searchbox.html
@@ -14,8 +14,6 @@
<form class="search" action="{{ pathto('search') }}" method="get">
<input type="text" name="q" />
<input type="submit" value="{{ _('Go') }}" />
- <input type="hidden" name="check_keywords" value="yes" />
- <input type="hidden" name="area" value="default" />
</form>
</div>
</div>
diff --git a/sphinx/themes/basic/static/searchtools.js b/sphinx/themes/basic/static/searchtools.js
index 02ccf02a9..d6ee7aece 100644
--- a/sphinx/themes/basic/static/searchtools.js
+++ b/sphinx/themes/basic/static/searchtools.js
@@ -56,6 +56,14 @@ var Search = {
_queued_query : null,
_pulse_status : -1,
+ htmlToText : function(htmlString) {
+ var htmlElement = document.createElement('span');
+ htmlElement.innerHTML = htmlString;
+ $(htmlElement).find('.headerlink').remove();
+ docContent = $(htmlElement).find('[role=main]')[0];
+ return docContent.textContent || docContent.innerText;
+ },
+
init : function() {
var params = $.getQueryParameters();
if (params.q) {
@@ -259,11 +267,7 @@ var Search = {
displayNextItem();
});
} else if (DOCUMENTATION_OPTIONS.HAS_SOURCE) {
- var suffix = DOCUMENTATION_OPTIONS.SOURCELINK_SUFFIX;
- if (suffix === undefined) {
- suffix = '.txt';
- }
- $.ajax({url: DOCUMENTATION_OPTIONS.URL_ROOT + '_sources/' + item[5] + (item[5].slice(-suffix.length) === suffix ? '' : suffix),
+ $.ajax({url: DOCUMENTATION_OPTIONS.URL_ROOT + item[0] + DOCUMENTATION_OPTIONS.FILE_SUFFIX,
dataType: "text",
complete: function(jqxhr, textstatus) {
var data = jqxhr.responseText;
@@ -456,7 +460,8 @@ var Search = {
* words. the first one is used to find the occurrence, the
* latter for highlighting it.
*/
- makeSearchSummary : function(text, keywords, hlwords) {
+ makeSearchSummary : function(htmlText, keywords, hlwords) {
+ var text = Search.htmlToText(htmlText);
var textLower = text.toLowerCase();
var start = 0;
$.each(keywords, function() {
diff --git a/sphinx/theming.py b/sphinx/theming.py
index 944c446c3..3d7e67506 100644
--- a/sphinx/theming.py
+++ b/sphinx/theming.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.theming
~~~~~~~~~~~~~~
@@ -9,19 +8,16 @@
:license: BSD, see LICENSE for details.
"""
+import configparser
import os
import shutil
import tempfile
-import warnings
from os import path
from zipfile import ZipFile
import pkg_resources
-from six import string_types, iteritems
-from six.moves import configparser
from sphinx import package_dir
-from sphinx.deprecation import RemovedInSphinx20Warning
from sphinx.errors import ThemeError
from sphinx.locale import __
from sphinx.util import logging
@@ -39,7 +35,7 @@ THEMECONF = 'theme.conf'
def extract_zip(filename, targetdir):
- # type: (unicode, unicode) -> None
+ # type: (str, str) -> None
"""Extract zip file to target directory."""
ensuredir(targetdir)
@@ -53,13 +49,13 @@ def extract_zip(filename, targetdir):
fp.write(archive.read(name))
-class Theme(object):
+class Theme:
"""A Theme is a set of HTML templates and configurations.
This class supports both theme directory and theme archive (zipped theme)."""
def __init__(self, name, theme_path, factory):
- # type: (unicode, unicode, HTMLThemeFactory) -> None
+ # type: (str, str, HTMLThemeFactory) -> None
self.name = name
self.base = None
self.rootdir = None
@@ -75,7 +71,7 @@ class Theme(object):
extract_zip(theme_path, self.themedir)
self.config = configparser.RawConfigParser()
- self.config.read(path.join(self.themedir, THEMECONF)) # type: ignore
+ self.config.read(path.join(self.themedir, THEMECONF))
try:
inherit = self.config.get('theme', 'inherit')
@@ -92,7 +88,7 @@ class Theme(object):
(inherit, name))
def get_theme_dirs(self):
- # type: () -> List[unicode]
+ # type: () -> List[str]
"""Return a list of theme directories, beginning with this theme's,
then the base theme's, then that one's base theme's, etc.
"""
@@ -102,12 +98,12 @@ class Theme(object):
return [self.themedir] + self.base.get_theme_dirs()
def get_config(self, section, name, default=NODEFAULT):
- # type: (unicode, unicode, Any) -> Any
+ # type: (str, str, Any) -> Any
"""Return the value for a theme configuration setting, searching the
base theme chain.
"""
try:
- return self.config.get(section, name) # type: ignore
+ return self.config.get(section, name)
except (configparser.NoOptionError, configparser.NoSectionError):
if self.base:
return self.base.get_config(section, name, default)
@@ -119,7 +115,7 @@ class Theme(object):
return default
def get_options(self, overrides={}):
- # type: (Dict[unicode, Any]) -> Dict[unicode, Any]
+ # type: (Dict[str, Any]) -> Dict[str, Any]
"""Return a dictionary of theme options and their values."""
if self.base:
options = self.base.get_options()
@@ -131,7 +127,7 @@ class Theme(object):
except configparser.NoSectionError:
pass
- for option, value in iteritems(overrides):
+ for option, value in overrides.items():
if option not in options:
logger.warning(__('unsupported theme option %r given') % option)
else:
@@ -152,7 +148,7 @@ class Theme(object):
def is_archived_theme(filename):
- # type: (unicode) -> bool
+ # type: (str) -> bool
"""Check the specified file is an archived theme file or not."""
try:
with ZipFile(filename) as f:
@@ -161,7 +157,7 @@ def is_archived_theme(filename):
return False
-class HTMLThemeFactory(object):
+class HTMLThemeFactory:
"""A factory class for HTML Themes."""
def __init__(self, app):
@@ -176,20 +172,20 @@ class HTMLThemeFactory(object):
# type: () -> None
"""Load built-in themes."""
themes = self.find_themes(path.join(package_dir, 'themes'))
- for name, theme in iteritems(themes):
+ for name, theme in themes.items():
self.themes[name] = theme
def load_additional_themes(self, theme_paths):
- # type: (unicode) -> None
+ # type: (str) -> None
"""Load additional themes placed at specified directories."""
for theme_path in theme_paths:
abs_theme_path = path.abspath(path.join(self.app.confdir, theme_path))
themes = self.find_themes(abs_theme_path)
- for name, theme in iteritems(themes):
+ for name, theme in themes.items():
self.themes[name] = theme
def load_extra_theme(self, name):
- # type: (unicode) -> None
+ # type: (str) -> None
"""Try to load a theme having specifed name."""
if name == 'alabaster':
self.load_alabaster_theme()
@@ -215,7 +211,7 @@ class HTMLThemeFactory(object):
pass
def load_external_theme(self, name):
- # type: (unicode) -> None
+ # type: (str) -> None
"""Try to load a theme using entry_points.
Sphinx refers to ``sphinx_themes`` entry_points.
@@ -229,29 +225,10 @@ class HTMLThemeFactory(object):
except StopIteration:
pass
- # look up for old styled entry_points
- for entry_point in pkg_resources.iter_entry_points('sphinx_themes'):
- target = entry_point.load()
- if callable(target):
- themedir = target()
- if not isinstance(themedir, string_types):
- logger.warning(__('Theme extension %r does not respond correctly.') %
- entry_point.module_name)
- else:
- themedir = target
-
- themes = self.find_themes(themedir)
- for entry, theme in iteritems(themes):
- if name == entry:
- warnings.warn('``sphinx_themes`` entry point is now deprecated. '
- 'Please use ``sphinx.html_themes`` instead.',
- RemovedInSphinx20Warning)
- self.themes[name] = theme
-
def find_themes(self, theme_path):
- # type: (unicode) -> Dict[unicode, unicode]
+ # type: (str) -> Dict[str, str]
"""Search themes from specified directory."""
- themes = {} # type: Dict[unicode, unicode]
+ themes = {} # type: Dict[str, str]
if not path.isdir(theme_path):
return themes
@@ -271,7 +248,7 @@ class HTMLThemeFactory(object):
return themes
def create(self, name):
- # type: (unicode) -> Theme
+ # type: (str) -> Theme
"""Create an instance of theme."""
if name not in self.themes:
self.load_extra_theme(name)
diff --git a/sphinx/transforms/__init__.py b/sphinx/transforms/__init__.py
index ab69f981a..fc00a53a2 100644
--- a/sphinx/transforms/__init__.py
+++ b/sphinx/transforms/__init__.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.transforms
~~~~~~~~~~~~~~~~~
@@ -10,6 +9,7 @@
"""
import re
+from typing import cast
from docutils import nodes
from docutils.transforms import Transform, Transformer
@@ -27,7 +27,7 @@ from sphinx.util.nodes import apply_source_workaround, is_smartquotable
if False:
# For type annotation
- from typing import Generator, List # NOQA
+ from typing import Any, Generator, List, Tuple # NOQA
from sphinx.application import Sphinx # NOQA
from sphinx.config import Config # NOQA
from sphinx.domain.std import StandardDomain # NOQA
@@ -54,7 +54,7 @@ class SphinxTransform(Transform):
def app(self):
# type: () -> Sphinx
"""Reference to the :class:`.Sphinx` object."""
- return self.document.settings.env.app
+ return self.env.app
@property
def env(self):
@@ -66,7 +66,7 @@ class SphinxTransform(Transform):
def config(self):
# type: () -> Config
"""Reference to the :class:`.Config` object."""
- return self.document.settings.env.config
+ return self.env.config
class SphinxTransformer(Transformer):
@@ -74,7 +74,7 @@ class SphinxTransformer(Transformer):
A transformer for Sphinx.
"""
- document = None # type: nodes.Node
+ document = None # type: nodes.document
env = None # type: BuildEnvironment
def set_environment(self, env):
@@ -87,7 +87,7 @@ class SphinxTransformer(Transformer):
if not hasattr(self.document.settings, 'env') and self.env:
self.document.settings.env = self.env
- Transformer.apply_transforms(self)
+ super().apply_transforms()
else:
# wrap the target node by document node during transforming
try:
@@ -96,7 +96,7 @@ class SphinxTransformer(Transformer):
document.settings.env = self.env
document += self.document
self.document = document
- Transformer.apply_transforms(self)
+ super().apply_transforms()
finally:
self.document = self.document[0]
@@ -108,8 +108,8 @@ class DefaultSubstitutions(SphinxTransform):
# run before the default Substitutions
default_priority = 210
- def apply(self):
- # type: () -> None
+ def apply(self, **kwargs):
+ # type: (Any) -> None
# only handle those not otherwise defined in the document
to_handle = default_substitutions - set(self.document.substitution_defs)
for ref in self.document.traverse(nodes.substitution_reference):
@@ -132,8 +132,8 @@ class MoveModuleTargets(SphinxTransform):
"""
default_priority = 210
- def apply(self):
- # type: () -> None
+ def apply(self, **kwargs):
+ # type: (Any) -> None
for node in self.document.traverse(nodes.target):
if not node['ids']:
continue
@@ -151,8 +151,8 @@ class HandleCodeBlocks(SphinxTransform):
"""
default_priority = 210
- def apply(self):
- # type: () -> None
+ def apply(self, **kwargs):
+ # type: (Any) -> None
# move doctest blocks out of blockquotes
for node in self.document.traverse(nodes.block_quote):
if all(isinstance(child, nodes.doctest_block) for child
@@ -176,8 +176,8 @@ class AutoNumbering(SphinxTransform):
"""
default_priority = 210
- def apply(self):
- # type: () -> None
+ def apply(self, **kwargs):
+ # type: (Any) -> None
domain = self.env.get_domain('std') # type: StandardDomain
for node in self.document.traverse(nodes.Element):
@@ -191,8 +191,8 @@ class SortIds(SphinxTransform):
"""
default_priority = 261
- def apply(self):
- # type: () -> None
+ def apply(self, **kwargs):
+ # type: (Any) -> None
for node in self.document.traverse(nodes.section):
if len(node['ids']) > 1 and node['ids'][0].startswith('id'):
node['ids'] = node['ids'][1:] + [node['ids'][0]]
@@ -205,22 +205,23 @@ class CitationReferences(SphinxTransform):
"""
default_priority = 619
- def apply(self):
- # type: () -> None
+ def apply(self, **kwargs):
+ # type: (Any) -> None
# mark citation labels as not smartquoted
- for citnode in self.document.traverse(nodes.citation):
- citnode[0]['support_smartquotes'] = False
+ for citation in self.document.traverse(nodes.citation):
+ label = cast(nodes.label, citation[0])
+ label['support_smartquotes'] = False
- for citnode in self.document.traverse(nodes.citation_reference):
- cittext = citnode.astext()
+ for citation_ref in self.document.traverse(nodes.citation_reference):
+ cittext = citation_ref.astext()
refnode = addnodes.pending_xref(cittext, refdomain='std', reftype='citation',
reftarget=cittext, refwarn=True,
support_smartquotes=False,
- ids=citnode["ids"])
- refnode.source = citnode.source or citnode.parent.source
- refnode.line = citnode.line or citnode.parent.line
+ ids=citation_ref["ids"])
+ refnode.source = citation_ref.source or citation_ref.parent.source
+ refnode.line = citation_ref.line or citation_ref.parent.line
refnode += nodes.Text('[' + cittext + ']')
- citnode.parent.replace(citnode, refnode)
+ citation_ref.parent.replace(citation_ref, refnode)
TRANSLATABLE_NODES = {
@@ -238,11 +239,11 @@ class ApplySourceWorkaround(SphinxTransform):
"""
default_priority = 10
- def apply(self):
- # type: () -> None
- for n in self.document.traverse():
- if isinstance(n, (nodes.TextElement, nodes.image)):
- apply_source_workaround(n)
+ def apply(self, **kwargs):
+ # type: (Any) -> None
+ for node in self.document.traverse(): # type: nodes.Node
+ if isinstance(node, (nodes.TextElement, nodes.image)):
+ apply_source_workaround(node)
class AutoIndexUpgrader(SphinxTransform):
@@ -251,8 +252,8 @@ class AutoIndexUpgrader(SphinxTransform):
"""
default_priority = 210
- def apply(self):
- # type: () -> None
+ def apply(self, **kwargs):
+ # type: (Any) -> None
for node in self.document.traverse(addnodes.index):
if 'entries' in node and any(len(entry) == 4 for entry in node['entries']):
msg = __('4 column based index found. '
@@ -269,8 +270,8 @@ class ExtraTranslatableNodes(SphinxTransform):
"""
default_priority = 10
- def apply(self):
- # type: () -> None
+ def apply(self, **kwargs):
+ # type: (Any) -> None
targets = self.config.gettext_additional_targets
target_nodes = [v for k, v in TRANSLATABLE_NODES.items() if k in targets]
if not target_nodes:
@@ -280,7 +281,7 @@ class ExtraTranslatableNodes(SphinxTransform):
# type: (nodes.Node) -> bool
return isinstance(node, tuple(target_nodes))
- for node in self.document.traverse(is_translatable_node):
+ for node in self.document.traverse(is_translatable_node): # type: nodes.Element
node['translatable'] = True
@@ -290,8 +291,8 @@ class UnreferencedFootnotesDetector(SphinxTransform):
"""
default_priority = 200
- def apply(self):
- # type: () -> None
+ def apply(self, **kwargs):
+ # type: (Any) -> None
for node in self.document.footnotes:
if node['names'] == []:
# footnote having duplicated number. It is already warned at parser.
@@ -312,8 +313,8 @@ class FilterSystemMessages(SphinxTransform):
"""Filter system messages from a doctree."""
default_priority = 999
- def apply(self):
- # type: () -> None
+ def apply(self, **kwargs):
+ # type: (Any) -> None
filterlevel = self.config.keep_warnings and 2 or 5
for node in self.document.traverse(nodes.system_message):
if node['level'] < filterlevel:
@@ -327,13 +328,13 @@ class SphinxContentsFilter(ContentsFilter):
within table-of-contents link nodes.
"""
def visit_pending_xref(self, node):
- # type: (nodes.Node) -> None
+ # type: (addnodes.pending_xref) -> None
text = node.astext()
self.parent.append(nodes.literal(text, text))
raise nodes.SkipNode
def visit_image(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.image) -> None
raise nodes.SkipNode
@@ -345,12 +346,15 @@ class SphinxSmartQuotes(SmartQuotes, SphinxTransform):
"""
default_priority = 750
- def apply(self):
- # type: () -> None
+ def apply(self, **kwargs):
+ # type: (Any) -> None
if not self.is_available():
return
- SmartQuotes.apply(self)
+ # override default settings with :confval:`smartquotes_action`
+ self.smartquotes_action = self.config.smartquotes_action
+
+ super().apply()
def is_available(self):
# type: () -> bool
@@ -378,17 +382,8 @@ class SphinxSmartQuotes(SmartQuotes, SphinxTransform):
else:
return False
- @property
- def smartquotes_action(self):
- # type: () -> unicode
- """A smartquotes_action setting for SmartQuotes.
-
- Users can change this setting through :confval:`smartquotes_action`.
- """
- return self.config.smartquotes_action
-
def get_tokens(self, txtnodes):
- # type: (List[nodes.Node]) -> Generator
+ # type: (List[nodes.Text]) -> Generator[Tuple[str, str], None, None]
# A generator that yields ``(texttype, nodetext)`` tuples for a list
# of "Text" nodes (interface to ``smartquotes.educate_tokens()``).
@@ -403,8 +398,8 @@ class DoctreeReadEvent(SphinxTransform):
"""Emit :event:`doctree-read` event."""
default_priority = 880
- def apply(self):
- # type: () -> None
+ def apply(self, **kwargs):
+ # type: (Any) -> None
self.app.emit('doctree-read', self.document)
@@ -412,8 +407,8 @@ class ManpageLink(SphinxTransform):
"""Find manpage section numbers and names"""
default_priority = 999
- def apply(self):
- # type: () -> None
+ def apply(self, **kwargs):
+ # type: (Any) -> None
for node in self.document.traverse(addnodes.manpage):
manpage = ' '.join([str(x) for x in node.children
if isinstance(x, nodes.Text)])
diff --git a/sphinx/transforms/compact_bullet_list.py b/sphinx/transforms/compact_bullet_list.py
index 0121dd12f..99854f188 100644
--- a/sphinx/transforms/compact_bullet_list.py
+++ b/sphinx/transforms/compact_bullet_list.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.transforms.compact_bullet_list
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -9,6 +8,8 @@
:license: BSD, see LICENSE for details.
"""
+from typing import cast
+
from docutils import nodes
from sphinx import addnodes
@@ -16,7 +17,7 @@ from sphinx.transforms import SphinxTransform
if False:
# For type annotation
- from typing import List # NOQA
+ from typing import Any, List # NOQA
class RefOnlyListChecker(nodes.GenericNodeVisitor):
@@ -31,11 +32,11 @@ class RefOnlyListChecker(nodes.GenericNodeVisitor):
raise nodes.NodeFound
def visit_bullet_list(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.bullet_list) -> None
pass
def visit_list_item(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.list_item) -> None
children = [] # type: List[nodes.Node]
for child in node.children:
if not isinstance(child, nodes.Invisible):
@@ -65,8 +66,8 @@ class RefOnlyBulletListTransform(SphinxTransform):
"""
default_priority = 100
- def apply(self):
- # type: () -> None
+ def apply(self, **kwargs):
+ # type: (Any) -> None
if self.config.html_compact_lists:
return
@@ -84,8 +85,8 @@ class RefOnlyBulletListTransform(SphinxTransform):
for node in self.document.traverse(nodes.bullet_list):
if check_refonly_list(node):
for item in node.traverse(nodes.list_item):
- para = item[0]
- ref = para[0]
+ para = cast(nodes.paragraph, item[0])
+ ref = cast(nodes.reference, para[0])
compact_para = addnodes.compact_paragraph()
compact_para += ref
item.replace(para, compact_para)
diff --git a/sphinx/transforms/i18n.py b/sphinx/transforms/i18n.py
index 59a046628..c3d96562b 100644
--- a/sphinx/transforms/i18n.py
+++ b/sphinx/transforms/i18n.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.transforms.i18n
~~~~~~~~~~~~~~~~~~~~~~
@@ -10,6 +9,7 @@
"""
from os import path
+from typing import Any, TypeVar
from docutils import nodes
from docutils.io import StringInput
@@ -22,27 +22,29 @@ from sphinx.transforms import SphinxTransform
from sphinx.util import split_index_msg, logging
from sphinx.util.i18n import find_catalog
from sphinx.util.nodes import (
- LITERAL_TYPE_NODES, IMAGE_TYPE_NODES,
+ LITERAL_TYPE_NODES, IMAGE_TYPE_NODES, NodeMatcher,
extract_messages, is_pending_meta, traverse_translatable_index,
)
from sphinx.util.pycompat import indent
if False:
# For type annotation
- from typing import Any, Dict, List, Tuple # NOQA
+ from typing import Dict, List, Tuple, Type # NOQA
from sphinx.application import Sphinx # NOQA
from sphinx.config import Config # NOQA
logger = logging.getLogger(__name__)
+N = TypeVar('N', bound=nodes.Node)
+
def publish_msgstr(app, source, source_path, source_line, config, settings):
- # type: (Sphinx, unicode, unicode, int, Config, Dict) -> nodes.document
+ # type: (Sphinx, str, str, int, Config, Any) -> nodes.Element
"""Publish msgstr (single line) into docutils document
:param sphinx.application.Sphinx app: sphinx application
- :param unicode source: source text
- :param unicode source_path: source path for warning indication
+ :param str source: source text
+ :param str source_path: source path for warning indication
:param source_line: source line for warning indication
:param sphinx.config.Config config: sphinx config
:param docutils.frontend.Values settings: docutils settings
@@ -59,7 +61,7 @@ def publish_msgstr(app, source, source_path, source_line, config, settings):
settings=settings,
)
try:
- doc = doc[0]
+ doc = doc[0] # type: ignore
except IndexError: # empty node
pass
return doc
@@ -71,8 +73,8 @@ class PreserveTranslatableMessages(SphinxTransform):
"""
default_priority = 10 # this MUST be invoked before Locale transform
- def apply(self):
- # type: () -> None
+ def apply(self, **kwargs):
+ # type: (Any) -> None
for node in self.document.traverse(addnodes.translatable):
node.preserve_original_messages()
@@ -83,10 +85,10 @@ class Locale(SphinxTransform):
"""
default_priority = 20
- def apply(self):
- # type: () -> None
+ def apply(self, **kwargs):
+ # type: (Any) -> None
settings, source = self.document.settings, self.document['source']
- msgstr = u''
+ msgstr = ''
# XXX check if this is reliable
assert source.startswith(self.env.srcdir)
@@ -103,7 +105,7 @@ class Locale(SphinxTransform):
# phase1: replace reference ids with translated names
for node, msg in extract_messages(self.document):
- msgstr = catalog.gettext(msg) # type: ignore
+ msgstr = catalog.gettext(msg)
# XXX add marker to untranslated parts
if not msgstr or msgstr == msg or not msgstr.strip():
# as-of-yet untranslated
@@ -183,11 +185,8 @@ class Locale(SphinxTransform):
self.document.note_implicit_target(section_node)
# replace target's refname to new target name
- def is_named_target(node):
- # type: (nodes.Node) -> bool
- return isinstance(node, nodes.target) and \
- node.get('refname') == old_name
- for old_target in self.document.traverse(is_named_target):
+ matcher = NodeMatcher(nodes.target, refname=old_name)
+ for old_target in self.document.traverse(matcher): # type: nodes.target
old_target['refname'] = new_name
processed = True
@@ -220,7 +219,7 @@ class Locale(SphinxTransform):
if node.get('translated', False): # to avoid double translation
continue # skip if the node is already translated by phase1
- msgstr = catalog.gettext(msg) # type: ignore
+ msgstr = catalog.gettext(msg)
# XXX add marker to untranslated parts
if not msgstr or msgstr == msg: # as-of-yet untranslated
continue
@@ -231,7 +230,7 @@ class Locale(SphinxTransform):
continue
# update meta nodes
- if is_pending_meta(node):
+ if isinstance(node, nodes.pending) and is_pending_meta(node):
node.details['nodes'][0]['content'] = msgstr
continue
@@ -264,30 +263,30 @@ class Locale(SphinxTransform):
patch = patch.next_node()
# ignore unexpected markups in translation message
- if not isinstance(patch, (
- (nodes.paragraph, # expected form of translation
- nodes.title, # generated by above "Subelements phase2"
- ) +
- # following types are expected if
- # config.gettext_additional_targets is configured
- LITERAL_TYPE_NODES +
- IMAGE_TYPE_NODES
- )):
+ unexpected = (
+ nodes.paragraph, # expected form of translation
+ nodes.title # generated by above "Subelements phase2"
+ ) # type: Tuple[Type[nodes.Element], ...]
+
+ # following types are expected if
+ # config.gettext_additional_targets is configured
+ unexpected += LITERAL_TYPE_NODES
+ unexpected += IMAGE_TYPE_NODES
+
+ if not isinstance(patch, unexpected):
continue # skip
# auto-numbered foot note reference should use original 'ids'.
- def is_autofootnote_ref(node):
- # type: (nodes.Node) -> bool
- return isinstance(node, nodes.footnote_reference) and node.get('auto')
-
def list_replace_or_append(lst, old, new):
- # type: (List, Any, Any) -> None
+ # type: (List[N], N, N) -> None
if old in lst:
lst[lst.index(old)] = new
else:
lst.append(new)
- old_foot_refs = node.traverse(is_autofootnote_ref)
- new_foot_refs = patch.traverse(is_autofootnote_ref)
+
+ is_autofootnote_ref = NodeMatcher(nodes.footnote_reference, auto=Any)
+ old_foot_refs = node.traverse(is_autofootnote_ref) # type: List[nodes.footnote_reference] # NOQA
+ new_foot_refs = patch.traverse(is_autofootnote_ref) # type: List[nodes.footnote_reference] # NOQA
if len(old_foot_refs) != len(new_foot_refs):
old_foot_ref_rawsources = [ref.rawsource for ref in old_foot_refs]
new_foot_ref_rawsources = [ref.rawsource for ref in new_foot_refs]
@@ -295,45 +294,41 @@ class Locale(SphinxTransform):
' original: {0}, translated: {1}')
.format(old_foot_ref_rawsources, new_foot_ref_rawsources),
location=node)
- old_foot_namerefs = {} # type: Dict[unicode, List[nodes.footnote_reference]]
+ old_foot_namerefs = {} # type: Dict[str, List[nodes.footnote_reference]]
for r in old_foot_refs:
old_foot_namerefs.setdefault(r.get('refname'), []).append(r)
- for new in new_foot_refs:
- refname = new.get('refname')
+ for newf in new_foot_refs:
+ refname = newf.get('refname')
refs = old_foot_namerefs.get(refname, [])
if not refs:
continue
- old = refs.pop(0)
- new['ids'] = old['ids']
- for id in new['ids']:
- self.document.ids[id] = new
+ oldf = refs.pop(0)
+ newf['ids'] = oldf['ids']
+ for id in newf['ids']:
+ self.document.ids[id] = newf
- if new['auto'] == 1:
+ if newf['auto'] == 1:
# autofootnote_refs
- list_replace_or_append(self.document.autofootnote_refs, old, new)
+ list_replace_or_append(self.document.autofootnote_refs, oldf, newf)
else:
# symbol_footnote_refs
- list_replace_or_append(self.document.symbol_footnote_refs, old, new)
+ list_replace_or_append(self.document.symbol_footnote_refs, oldf, newf)
if refname:
- list_replace_or_append(
- self.document.footnote_refs.setdefault(refname, []),
- old, new)
- list_replace_or_append(
- self.document.refnames.setdefault(refname, []),
- old, new)
+ footnote_refs = self.document.footnote_refs.setdefault(refname, [])
+ list_replace_or_append(footnote_refs, oldf, newf)
+
+ refnames = self.document.refnames.setdefault(refname, [])
+ list_replace_or_append(refnames, oldf, newf)
# reference should use new (translated) 'refname'.
# * reference target ".. _Python: ..." is not translatable.
# * use translated refname for section refname.
# * inline reference "`Python <...>`_" has no 'refname'.
- def is_refnamed_ref(node):
- # type: (nodes.Node) -> bool
- return isinstance(node, nodes.reference) and \
- 'refname' in node
- old_refs = node.traverse(is_refnamed_ref)
- new_refs = patch.traverse(is_refnamed_ref)
+ is_refnamed_ref = NodeMatcher(nodes.reference, refname=Any)
+ old_refs = node.traverse(is_refnamed_ref) # type: List[nodes.reference]
+ new_refs = patch.traverse(is_refnamed_ref) # type: List[nodes.reference]
if len(old_refs) != len(new_refs):
old_ref_rawsources = [ref.rawsource for ref in old_refs]
new_ref_rawsources = [ref.rawsource for ref in new_refs]
@@ -344,27 +339,24 @@ class Locale(SphinxTransform):
old_ref_names = [r['refname'] for r in old_refs]
new_ref_names = [r['refname'] for r in new_refs]
orphans = list(set(old_ref_names) - set(new_ref_names))
- for new in new_refs:
- if not self.document.has_name(new['refname']):
+ for newr in new_refs:
+ if not self.document.has_name(newr['refname']):
# Maybe refname is translated but target is not translated.
# Note: multiple translated refnames break link ordering.
if orphans:
- new['refname'] = orphans.pop(0)
+ newr['refname'] = orphans.pop(0)
else:
# orphan refnames is already empty!
# reference number is same in new_refs and old_refs.
pass
- self.document.note_refname(new)
+ self.document.note_refname(newr)
# refnamed footnote should use original 'ids'.
- def is_refnamed_footnote_ref(node):
- # type: (nodes.Node) -> bool
- return isinstance(node, nodes.footnote_reference) and \
- 'refname' in node
+ is_refnamed_footnote_ref = NodeMatcher(nodes.footnote_reference, refname=Any)
old_foot_refs = node.traverse(is_refnamed_footnote_ref)
new_foot_refs = patch.traverse(is_refnamed_footnote_ref)
- refname_ids_map = {} # type: Dict[unicode, List[unicode]]
+ refname_ids_map = {} # type: Dict[str, List[str]]
if len(old_foot_refs) != len(new_foot_refs):
old_foot_ref_rawsources = [ref.rawsource for ref in old_foot_refs]
new_foot_ref_rawsources = [ref.rawsource for ref in new_foot_refs]
@@ -372,20 +364,17 @@ class Locale(SphinxTransform):
' original: {0}, translated: {1}')
.format(old_foot_ref_rawsources, new_foot_ref_rawsources),
location=node)
- for old in old_foot_refs:
- refname_ids_map.setdefault(old["refname"], []).append(old["ids"])
- for new in new_foot_refs:
- refname = new["refname"]
+ for oldf in old_foot_refs:
+ refname_ids_map.setdefault(oldf["refname"], []).append(oldf["ids"])
+ for newf in new_foot_refs:
+ refname = newf["refname"]
if refname_ids_map.get(refname):
- new["ids"] = refname_ids_map[refname].pop(0)
+ newf["ids"] = refname_ids_map[refname].pop(0)
# citation should use original 'ids'.
- def is_citation_ref(node):
- # type: (nodes.Node) -> bool
- return isinstance(node, nodes.citation_reference) and \
- 'refname' in node
- old_cite_refs = node.traverse(is_citation_ref)
- new_cite_refs = patch.traverse(is_citation_ref)
+ is_citation_ref = NodeMatcher(nodes.citation_reference, refname=Any)
+ old_cite_refs = node.traverse(is_citation_ref) # type: List[nodes.citation_reference] # NOQA
+ new_cite_refs = patch.traverse(is_citation_ref) # type: List[nodes.citation_reference] # NOQA
refname_ids_map = {}
if len(old_cite_refs) != len(new_cite_refs):
old_cite_ref_rawsources = [ref.rawsource for ref in old_cite_refs]
@@ -394,29 +383,29 @@ class Locale(SphinxTransform):
' original: {0}, translated: {1}')
.format(old_cite_ref_rawsources, new_cite_ref_rawsources),
location=node)
- for old in old_cite_refs:
- refname_ids_map.setdefault(old["refname"], []).append(old["ids"])
- for new in new_cite_refs:
- refname = new["refname"]
+ for oldc in old_cite_refs:
+ refname_ids_map.setdefault(oldc["refname"], []).append(oldc["ids"])
+ for newc in new_cite_refs:
+ refname = newc["refname"]
if refname_ids_map.get(refname):
- new["ids"] = refname_ids_map[refname].pop()
+ newc["ids"] = refname_ids_map[refname].pop()
# Original pending_xref['reftarget'] contain not-translated
# target name, new pending_xref must use original one.
# This code restricts to change ref-targets in the translation.
- old_refs = node.traverse(addnodes.pending_xref)
- new_refs = patch.traverse(addnodes.pending_xref)
+ old_xrefs = node.traverse(addnodes.pending_xref)
+ new_xrefs = patch.traverse(addnodes.pending_xref)
xref_reftarget_map = {}
- if len(old_refs) != len(new_refs):
- old_ref_rawsources = [ref.rawsource for ref in old_refs]
- new_ref_rawsources = [ref.rawsource for ref in new_refs]
+ if len(old_xrefs) != len(new_xrefs):
+ old_xref_rawsources = [xref.rawsource for xref in old_xrefs]
+ new_xref_rawsources = [xref.rawsource for xref in new_xrefs]
logger.warning(__('inconsistent term references in translated message.' +
' original: {0}, translated: {1}')
- .format(old_ref_rawsources, new_ref_rawsources),
+ .format(old_xref_rawsources, new_xref_rawsources),
location=node)
def get_ref_key(node):
- # type: (nodes.Node) -> Tuple[unicode, unicode, unicode]
+ # type: (addnodes.pending_xref) -> Tuple[str, str, str]
case = node["refdomain"], node["reftype"]
if case == ('std', 'term'):
return None
@@ -426,11 +415,11 @@ class Locale(SphinxTransform):
node["reftype"],
node['reftarget'],)
- for old in old_refs:
+ for old in old_xrefs:
key = get_ref_key(old)
if key:
xref_reftarget_map[key] = old.attributes
- for new in new_refs:
+ for new in new_xrefs:
key = get_ref_key(new)
# Copy attributes to keep original node behavior. Especially
# copying 'reftarget', 'py:module', 'py:class' are needed.
@@ -458,12 +447,12 @@ class Locale(SphinxTransform):
if 'index' in self.config.gettext_additional_targets:
# Extract and translate messages for index entries.
for node, entries in traverse_translatable_index(self.document):
- new_entries = [] # type: List[Tuple[unicode, unicode, unicode, unicode, unicode]] # NOQA
+ new_entries = [] # type: List[Tuple[str, str, str, str, str]]
for type, msg, tid, main, key_ in entries:
msg_parts = split_index_msg(type, msg)
msgstr_parts = []
for part in msg_parts:
- msgstr = catalog.gettext(part) # type: ignore
+ msgstr = catalog.gettext(part)
if not msgstr:
msgstr = part
msgstr_parts.append(msgstr)
@@ -474,11 +463,8 @@ class Locale(SphinxTransform):
node['entries'] = new_entries
# remove translated attribute that is used for avoiding double translation.
- def has_translatable(node):
- # type: (nodes.Node) -> bool
- return isinstance(node, nodes.Element) and 'translated' in node
- for node in self.document.traverse(has_translatable):
- node.delattr('translated')
+ for translated in self.document.traverse(NodeMatcher(translated=Any)): # type: nodes.Element # NOQA
+ translated.delattr('translated')
class RemoveTranslatableInline(SphinxTransform):
@@ -487,12 +473,13 @@ class RemoveTranslatableInline(SphinxTransform):
"""
default_priority = 999
- def apply(self):
- # type: () -> None
+ def apply(self, **kwargs):
+ # type: (Any) -> None
from sphinx.builders.gettext import MessageCatalogBuilder
if isinstance(self.app.builder, MessageCatalogBuilder):
return
- for inline in self.document.traverse(nodes.inline):
- if 'translatable' in inline:
- inline.parent.remove(inline)
- inline.parent += inline.children
+
+ matcher = NodeMatcher(nodes.inline, translatable=Any)
+ for inline in self.document.traverse(matcher): # type: nodes.inline
+ inline.parent.remove(inline)
+ inline.parent += inline.children
diff --git a/sphinx/transforms/post_transforms/__init__.py b/sphinx/transforms/post_transforms/__init__.py
index 6e53fec1d..eb6da0114 100644
--- a/sphinx/transforms/post_transforms/__init__.py
+++ b/sphinx/transforms/post_transforms/__init__.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.transforms.post_transforms
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -9,13 +8,11 @@
:license: BSD, see LICENSE for details.
"""
-import warnings
+from typing import cast
from docutils import nodes
-from docutils.utils import get_source_line
from sphinx import addnodes
-from sphinx.deprecation import RemovedInSphinx20Warning
from sphinx.environment import NoUri
from sphinx.locale import __
from sphinx.transforms import SphinxTransform
@@ -32,35 +29,6 @@ if False:
logger = logging.getLogger(__name__)
-class DocReferenceMigrator(SphinxTransform):
- """Migrate :doc: reference to std domain."""
-
- default_priority = 5 # before ReferencesResolver
-
- def apply(self):
- # type: () -> None
- for node in self.document.traverse(addnodes.pending_xref):
- if node.get('reftype') == 'doc' and node.get('refdomain') is None:
- source, line = get_source_line(node)
- if source and line:
- location = "%s:%s" % (source, line)
- elif source:
- location = "%s:" % source
- elif line:
- location = "<unknown>:%s" % line
- else:
- location = None
-
- message = ('Invalid pendig_xref node detected. '
- ':doc: reference should have refdomain=std attribute.')
- if location:
- warnings.warn("%s: %s" % (location, message),
- RemovedInSphinx20Warning)
- else:
- warnings.warn(message, RemovedInSphinx20Warning)
- node['refdomain'] = 'std'
-
-
class ReferencesResolver(SphinxTransform):
"""
Resolves cross-references on doctrees.
@@ -68,10 +36,10 @@ class ReferencesResolver(SphinxTransform):
default_priority = 10
- def apply(self):
- # type: () -> None
+ def apply(self, **kwargs):
+ # type: (Any) -> None
for node in self.document.traverse(addnodes.pending_xref):
- contnode = node[0].deepcopy()
+ contnode = cast(nodes.TextElement, node[0].deepcopy())
newnode = None
typ = node['reftype']
@@ -104,11 +72,11 @@ class ReferencesResolver(SphinxTransform):
node.replace_self(newnode or contnode)
def resolve_anyref(self, refdoc, node, contnode):
- # type: (unicode, nodes.Node, nodes.Node) -> nodes.Node
+ # type: (str, addnodes.pending_xref, nodes.TextElement) -> nodes.Element
"""Resolve reference generated by the "any" role."""
stddomain = self.env.get_domain('std')
target = node['reftarget']
- results = [] # type: List[Tuple[unicode, nodes.Node]]
+ results = [] # type: List[Tuple[str, nodes.Element]]
# first, try resolving as :doc:
doc_ref = stddomain.resolve_xref(self.env, refdoc, self.app.builder,
'doc', target, node, contnode)
@@ -146,13 +114,15 @@ class ReferencesResolver(SphinxTransform):
# Override "any" class with the actual role type to get the styling
# approximately correct.
res_domain = res_role.split(':')[0]
- if newnode and newnode[0].get('classes'):
+ if (len(newnode) > 0 and
+ isinstance(newnode[0], nodes.Element) and
+ newnode[0].get('classes')):
newnode[0]['classes'].append(res_domain)
newnode[0]['classes'].append(res_role.replace(':', '-'))
return newnode
def warn_missing_reference(self, refdoc, typ, target, node, domain):
- # type: (unicode, unicode, unicode, nodes.Node, Domain) -> None
+ # type: (str, str, str, addnodes.pending_xref, Domain) -> None
warn = node.get('refwarn')
if self.config.nitpicky:
warn = True
@@ -180,8 +150,8 @@ class ReferencesResolver(SphinxTransform):
class OnlyNodeTransform(SphinxTransform):
default_priority = 50
- def apply(self):
- # type: () -> None
+ def apply(self, **kwargs):
+ # type: (Any) -> None
# A comment on the comment() nodes being inserted: replacing by [] would
# result in a "Losing ids" exception if there is a target node before
# the only node, so we make sure docutils can transfer the id to
@@ -190,8 +160,7 @@ class OnlyNodeTransform(SphinxTransform):
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
- app.add_post_transform(DocReferenceMigrator)
+ # type: (Sphinx) -> Dict[str, Any]
app.add_post_transform(ReferencesResolver)
app.add_post_transform(OnlyNodeTransform)
diff --git a/sphinx/transforms/post_transforms/code.py b/sphinx/transforms/post_transforms/code.py
index 86143ffb4..610437ac6 100644
--- a/sphinx/transforms/post_transforms/code.py
+++ b/sphinx/transforms/post_transforms/code.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.transforms.post_transforms.code
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -14,7 +13,6 @@ from typing import NamedTuple
from docutils import nodes
from pygments.lexers import PythonConsoleLexer, guess_lexer
-from six import text_type
from sphinx import addnodes
from sphinx.ext import doctest
@@ -26,7 +24,7 @@ if False:
from sphinx.application import Sphinx # NOQA
-HighlightSetting = NamedTuple('HighlightSetting', [('language', text_type),
+HighlightSetting = NamedTuple('HighlightSetting', [('language', str),
('lineno_threshold', int)])
@@ -40,7 +38,8 @@ class HighlightLanguageTransform(SphinxTransform):
"""
default_priority = 400
- def apply(self):
+ def apply(self, **kwargs):
+ # type: (Any) -> None
visitor = HighlightLanguageVisitor(self.document,
self.config.highlight_language)
self.document.walkabout(visitor)
@@ -51,10 +50,10 @@ class HighlightLanguageTransform(SphinxTransform):
class HighlightLanguageVisitor(nodes.NodeVisitor):
def __init__(self, document, default_language):
- # type: (nodes.document, unicode) -> None
+ # type: (nodes.document, str) -> None
self.default_setting = HighlightSetting(default_language, sys.maxsize)
self.settings = [] # type: List[HighlightSetting]
- nodes.NodeVisitor.__init__(self, document)
+ super().__init__(document)
def unknown_visit(self, node):
# type: (nodes.Node) -> None
@@ -105,7 +104,8 @@ class TrimDoctestFlagsTransform(SphinxTransform):
"""
default_priority = HighlightLanguageTransform.default_priority + 1
- def apply(self):
+ def apply(self, **kwargs):
+ # type: (Any) -> None
if not self.config.trim_doctest_flags:
return
@@ -139,7 +139,7 @@ class TrimDoctestFlagsTransform(SphinxTransform):
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
app.add_post_transform(HighlightLanguageTransform)
app.add_post_transform(TrimDoctestFlagsTransform)
diff --git a/sphinx/transforms/post_transforms/compat.py b/sphinx/transforms/post_transforms/compat.py
index 8065cf91c..d2e5eb72a 100644
--- a/sphinx/transforms/post_transforms/compat.py
+++ b/sphinx/transforms/post_transforms/compat.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.transforms.post_transforms.compat
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -40,16 +39,16 @@ class MathNodeMigrator(SphinxTransform):
"""
default_priority = 999
- def apply(self):
- # type: () -> None
- for node in self.document.traverse(nodes.math):
+ def apply(self, **kwargs):
+ # type: (Any) -> None
+ for math_node in self.document.traverse(nodes.math):
# case: old styled ``math`` node generated by old extensions
- if len(node) == 0:
+ if len(math_node) == 0:
warnings.warn("math node for Sphinx was replaced by docutils'. "
"Please use ``docutils.nodes.math`` instead.",
RemovedInSphinx30Warning)
- equation = node['latex']
- node += nodes.Text(equation, equation)
+ equation = math_node['latex']
+ math_node += nodes.Text(equation, equation)
translator = self.app.builder.get_translator_class()
if hasattr(translator, 'visit_displaymath') and translator != XMLTranslator:
@@ -57,34 +56,35 @@ class MathNodeMigrator(SphinxTransform):
warnings.warn("Translator for %s does not support math_block node'. "
"Please update your extension." % translator,
RemovedInSphinx30Warning)
- for node in self.document.traverse(math_block):
- alt = displaymath(latex=node.astext(),
- number=node.get('number'),
- label=node.get('label'),
- nowrap=node.get('nowrap'),
- docname=node.get('docname'))
- node.replace(alt)
+ for old_math_block_node in self.document.traverse(math_block):
+ alt = displaymath(latex=old_math_block_node.astext(),
+ number=old_math_block_node.get('number'),
+ label=old_math_block_node.get('label'),
+ nowrap=old_math_block_node.get('nowrap'),
+ docname=old_math_block_node.get('docname'))
+ old_math_block_node.replace_self(alt)
elif getattr(self.app.builder, 'math_renderer_name', None) == 'unknown':
# case: math extension provides old styled math renderer
- for node in self.document.traverse(nodes.math_block):
- node['latex'] = node.astext()
+ for math_block_node in self.document.traverse(nodes.math_block):
+ math_block_node['latex'] = math_block_node.astext()
# case: old styled ``displaymath`` node generated by old extensions
- for node in self.document.traverse(math_block):
- if len(node) == 0:
+ for math_block_node in self.document.traverse(math_block):
+ if len(math_block_node) == 0:
warnings.warn("math node for Sphinx was replaced by docutils'. "
"Please use ``docutils.nodes.math_block`` instead.",
RemovedInSphinx30Warning)
- if isinstance(node, displaymath):
- newnode = nodes.math_block('', node['latex'], **node.attributes)
- node.replace_self(newnode)
+ if isinstance(math_block_node, displaymath):
+ newnode = nodes.math_block('', math_block_node['latex'],
+ **math_block_node.attributes)
+ math_block_node.replace_self(newnode)
else:
- latex = node['latex']
- node += nodes.Text(latex, latex)
+ latex = math_block_node['latex']
+ math_block_node += nodes.Text(latex, latex)
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
app.add_post_transform(MathNodeMigrator)
return {
diff --git a/sphinx/transforms/post_transforms/images.py b/sphinx/transforms/post_transforms/images.py
index a6b82f262..a863a00fa 100644
--- a/sphinx/transforms/post_transforms/images.py
+++ b/sphinx/transforms/post_transforms/images.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.transforms.post_transforms.images
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -14,7 +13,6 @@ from hashlib import sha1
from math import ceil
from docutils import nodes
-from six import text_type
from sphinx.locale import __
from sphinx.transforms import SphinxTransform
@@ -35,23 +33,23 @@ MAX_FILENAME_LEN = 32
class BaseImageConverter(SphinxTransform):
- def apply(self):
- # type: () -> None
+ def apply(self, **kwargsj):
+ # type: (Any) -> None
for node in self.document.traverse(nodes.image):
if self.match(node):
self.handle(node)
def match(self, node):
- # type: (nodes.Node) -> bool
+ # type: (nodes.image) -> bool
return True
def handle(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.image) -> None
pass
@property
def imagedir(self):
- # type: () -> unicode
+ # type: () -> str
return os.path.join(self.app.doctreedir, 'images')
@@ -59,7 +57,7 @@ class ImageDownloader(BaseImageConverter):
default_priority = 100
def match(self, node):
- # type: (nodes.Node) -> bool
+ # type: (nodes.image) -> bool
if self.app.builder.supported_image_types == []:
return False
elif self.app.builder.supported_remote_images:
@@ -68,19 +66,19 @@ class ImageDownloader(BaseImageConverter):
return '://' in node['uri']
def handle(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.image) -> None
try:
basename = os.path.basename(node['uri'])
if '?' in basename:
basename = basename.split('?')[0]
if basename == '' or len(basename) > MAX_FILENAME_LEN:
filename, ext = os.path.splitext(node['uri'])
- basename = sha1(filename.encode("utf-8")).hexdigest() + ext
+ basename = sha1(filename.encode()).hexdigest() + ext
- dirname = node['uri'].replace('://', '/').translate({ord("?"): u"/",
- ord("&"): u"/"})
+ dirname = node['uri'].replace('://', '/').translate({ord("?"): "/",
+ ord("&"): "/"})
if len(dirname) > MAX_FILENAME_LEN:
- dirname = sha1(dirname.encode('utf-8')).hexdigest()
+ dirname = sha1(dirname.encode()).hexdigest()
ensuredir(os.path.join(self.imagedir, dirname))
path = os.path.join(self.imagedir, dirname, basename)
@@ -119,15 +117,14 @@ class ImageDownloader(BaseImageConverter):
node['uri'] = path
self.app.env.images.add_file(self.env.docname, path)
except Exception as exc:
- logger.warning(__('Could not fetch remote image: %s [%s]') %
- (node['uri'], text_type(exc)))
+ logger.warning(__('Could not fetch remote image: %s [%s]') % (node['uri'], exc))
class DataURIExtractor(BaseImageConverter):
default_priority = 150
def match(self, node):
- # type: (nodes.Node) -> bool
+ # type: (nodes.image) -> bool
if self.app.builder.supported_remote_images == []:
return False
elif self.app.builder.supported_data_uri_images is True:
@@ -136,7 +133,7 @@ class DataURIExtractor(BaseImageConverter):
return 'data:' in node['uri']
def handle(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.image) -> None
image = parse_data_uri(node['uri'])
ext = get_image_extension(image.mimetype)
if ext is None:
@@ -159,7 +156,7 @@ class DataURIExtractor(BaseImageConverter):
def get_filename_for(filename, mimetype):
- # type: (unicode, unicode) -> unicode
+ # type: (str, str) -> str
basename = os.path.basename(filename)
return os.path.splitext(basename)[0] + get_image_extension(mimetype)
@@ -196,17 +193,17 @@ class ImageConverter(BaseImageConverter):
#: ('image/gif', 'image/png'),
#: ('application/pdf', 'image/png'),
#: ]
- conversion_rules = [] # type: List[Tuple[unicode, unicode]]
+ conversion_rules = [] # type: List[Tuple[str, str]]
def __init__(self, *args, **kwargs):
# type: (Any, Any) -> None
self.available = None # type: bool
# the converter is available or not.
# Will be checked at first conversion
- BaseImageConverter.__init__(self, *args, **kwargs) # type: ignore
+ super().__init__(*args, **kwargs)
def match(self, node):
- # type: (nodes.Node) -> bool
+ # type: (nodes.image) -> bool
if self.available is None:
self.available = self.is_available()
@@ -223,7 +220,7 @@ class ImageConverter(BaseImageConverter):
return False
def get_conversion_rule(self, node):
- # type: (nodes.Node) -> Tuple[unicode, unicode]
+ # type: (nodes.image) -> Tuple[str, str]
for candidate in self.guess_mimetypes(node):
for supported in self.app.builder.supported_image_types:
rule = (candidate, supported)
@@ -238,7 +235,7 @@ class ImageConverter(BaseImageConverter):
raise NotImplementedError()
def guess_mimetypes(self, node):
- # type: (nodes.Node) -> List[unicode]
+ # type: (nodes.image) -> List[str]
if '?' in node['candidates']:
return []
elif '*' in node['candidates']:
@@ -248,7 +245,7 @@ class ImageConverter(BaseImageConverter):
return node['candidates'].keys()
def handle(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.image) -> None
_from, _to = self.get_conversion_rule(node)
if _from in node['candidates']:
@@ -272,7 +269,7 @@ class ImageConverter(BaseImageConverter):
self.env.images.add_file(self.env.docname, destpath)
def convert(self, _from, _to):
- # type: (unicode, unicode) -> bool
+ # type: (str, str) -> bool
"""Convert a image file to expected format.
*_from* is a path for source image file, and *_to* is a path for
@@ -282,7 +279,7 @@ class ImageConverter(BaseImageConverter):
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
app.add_post_transform(ImageDownloader)
app.add_post_transform(DataURIExtractor)
diff --git a/sphinx/transforms/references.py b/sphinx/transforms/references.py
index 40efbf615..e1e694163 100644
--- a/sphinx/transforms/references.py
+++ b/sphinx/transforms/references.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.transforms.references
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -11,10 +10,13 @@
from docutils import nodes
from docutils.transforms.references import Substitutions
-from six import itervalues
from sphinx.transforms import SphinxTransform
+if False:
+ # For type annotation
+ from typing import Any # NOQA
+
class SubstitutionDefinitionsRemover(SphinxTransform):
"""Remove ``substitution_definition node from doctrees."""
@@ -22,8 +24,8 @@ class SubstitutionDefinitionsRemover(SphinxTransform):
# should be invoked after Substitutions process
default_priority = Substitutions.default_priority + 1
- def apply(self):
- # type: () -> None
+ def apply(self, **kwargs):
+ # type: (Any) -> None
for node in self.document.traverse(nodes.substitution_definition):
node.parent.remove(node)
@@ -32,7 +34,7 @@ class SphinxDomains(SphinxTransform):
"""Collect objects to Sphinx domains for cross references."""
default_priority = 850
- def apply(self):
- # type: () -> None
- for domain in itervalues(self.env.domains):
+ def apply(self, **kwargs):
+ # type: (Any) -> None
+ for domain in self.env.domains.values():
domain.process_doc(self.env, self.env.docname, self.document)
diff --git a/sphinx/util/__init__.py b/sphinx/util/__init__.py
index 019de997d..26d19ac6a 100644
--- a/sphinx/util/__init__.py
+++ b/sphinx/util/__init__.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.util
~~~~~~~~~~~
@@ -8,7 +7,6 @@
:copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
-from __future__ import absolute_import
import fnmatch
import os
@@ -25,18 +23,16 @@ from datetime import datetime
from hashlib import md5
from os import path
from time import mktime, strptime
+from urllib.parse import urlsplit, urlunsplit, quote_plus, parse_qsl, urlencode
from docutils.utils import relative_path
-from six import text_type, binary_type, itervalues
-from six.moves import range
-from six.moves.urllib.parse import urlsplit, urlunsplit, quote_plus, parse_qsl, urlencode
-from sphinx.deprecation import RemovedInSphinx30Warning
+from sphinx.deprecation import RemovedInSphinx30Warning, RemovedInSphinx40Warning
from sphinx.errors import PycodeError, SphinxParallelError, ExtensionError
+from sphinx.locale import __
from sphinx.util import logging
from sphinx.util.console import strip_colors, colorize, bold, term_width_line # type: ignore
from sphinx.util.fileutil import copy_asset_file
-from sphinx.util.osutil import fs_encoding
from sphinx.util import smartypants # noqa
# import other utilities; partly for backwards compatibility, so don't
@@ -64,22 +60,20 @@ url_re = re.compile(r'(?P<schema>.+)://.*') # type: Pattern
# High-level utility functions.
def docname_join(basedocname, docname):
- # type: (unicode, unicode) -> unicode
+ # type: (str, str) -> str
return posixpath.normpath(
posixpath.join('/' + basedocname, '..', docname))[1:]
def path_stabilize(filepath):
- # type: (unicode) -> unicode
+ # type: (str) -> str
"normalize path separater and unicode string"
newpath = filepath.replace(os.path.sep, SEP)
- if isinstance(newpath, text_type):
- newpath = unicodedata.normalize('NFC', newpath)
- return newpath
+ return unicodedata.normalize('NFC', newpath)
def get_matching_files(dirname, exclude_matchers=()):
- # type: (unicode, Tuple[Callable[[unicode], bool], ...]) -> Iterable[unicode]
+ # type: (str, Tuple[Callable[[str], bool], ...]) -> Iterable[str]
"""Get all file names in a directory, recursively.
Exclude files and dirs matching some matcher in *exclude_matchers*.
@@ -88,13 +82,13 @@ def get_matching_files(dirname, exclude_matchers=()):
dirname = path.normpath(path.abspath(dirname))
dirlen = len(dirname) + 1 # exclude final os.path.sep
- for root, dirs, files in walk(dirname, followlinks=True):
+ for root, dirs, files in os.walk(dirname, followlinks=True):
relativeroot = root[dirlen:]
qdirs = enumerate(path_stabilize(path.join(relativeroot, dn))
- for dn in dirs) # type: Iterable[Tuple[int, unicode]]
+ for dn in dirs) # type: Iterable[Tuple[int, str]]
qfiles = enumerate(path_stabilize(path.join(relativeroot, fn))
- for fn in files) # type: Iterable[Tuple[int, unicode]]
+ for fn in files) # type: Iterable[Tuple[int, str]]
for matcher in exclude_matchers:
qdirs = [entry for entry in qdirs if not matcher(entry[1])]
qfiles = [entry for entry in qfiles if not matcher(entry[1])]
@@ -106,12 +100,14 @@ def get_matching_files(dirname, exclude_matchers=()):
def get_matching_docs(dirname, suffixes, exclude_matchers=()):
- # type: (unicode, List[unicode], Tuple[Callable[[unicode], bool], ...]) -> Iterable[unicode] # NOQA
+ # type: (str, List[str], Tuple[Callable[[str], bool], ...]) -> Iterable[str] # NOQA
"""Get all file names (without suffixes) matching a suffix in a directory,
recursively.
Exclude files and dirs matching a pattern in *exclude_patterns*.
"""
+ warnings.warn('get_matching_docs() is now deprecated. Use get_matching_files() instead.',
+ RemovedInSphinx40Warning)
suffixpatterns = ['*' + s for s in suffixes]
for filename in get_matching_files(dirname, exclude_matchers):
for suffixpattern in suffixpatterns:
@@ -128,10 +124,10 @@ class FilenameUniqDict(dict):
"""
def __init__(self):
# type: () -> None
- self._existing = set() # type: Set[unicode]
+ self._existing = set() # type: Set[str]
def add_file(self, docname, newfile):
- # type: (unicode, unicode) -> unicode
+ # type: (str, str) -> str
if newfile in self:
self[newfile][0].add(docname)
return self[newfile][1]
@@ -146,7 +142,7 @@ class FilenameUniqDict(dict):
return uniquename
def purge_doc(self, docname):
- # type: (unicode) -> None
+ # type: (str) -> None
for filename, (docs, unique) in list(self.items()):
docs.discard(docname)
if not docs:
@@ -154,17 +150,17 @@ class FilenameUniqDict(dict):
self._existing.discard(unique)
def merge_other(self, docnames, other):
- # type: (Set[unicode], Dict[unicode, Tuple[Set[unicode], Any]]) -> None
+ # type: (Set[str], Dict[str, Tuple[Set[str], Any]]) -> None
for filename, (docs, unique) in other.items():
for doc in docs & set(docnames):
self.add_file(doc, filename)
def __getstate__(self):
- # type: () -> Set[unicode]
+ # type: () -> Set[str]
return self._existing
def __setstate__(self, state):
- # type: (Set[unicode]) -> None
+ # type: (Set[str]) -> None
self._existing = state
@@ -176,9 +172,9 @@ class DownloadFiles(dict):
"""
def add_file(self, docname, filename):
- # type: (unicode, unicode) -> None
+ # type: (str, str) -> None
if filename not in self:
- digest = md5(filename.encode('utf-8')).hexdigest()
+ digest = md5(filename.encode()).hexdigest()
dest = '%s/%s' % (digest, os.path.basename(filename))
self[filename] = (set(), dest)
@@ -186,14 +182,14 @@ class DownloadFiles(dict):
return self[filename][1]
def purge_doc(self, docname):
- # type: (unicode) -> None
+ # type: (str) -> None
for filename, (docs, dest) in list(self.items()):
docs.discard(docname)
if not docs:
del self[filename]
def merge_other(self, docnames, other):
- # type: (Set[unicode], Dict[unicode, Tuple[Set[unicode], Any]]) -> None
+ # type: (Set[str], Dict[str, Tuple[Set[str], Any]]) -> None
for filename, (docs, dest) in other.items():
for docname in docs & set(docnames):
self.add_file(docname, filename)
@@ -201,7 +197,7 @@ class DownloadFiles(dict):
def copy_static_entry(source, targetdir, builder, context={},
exclude_matchers=(), level=0):
- # type: (unicode, unicode, Any, Dict, Tuple[Callable, ...], int) -> None
+ # type: (str, str, Any, Dict, Tuple[Callable, ...], int) -> None
"""[DEPRECATED] Copy a HTML builder static_path entry from source to targetdir.
Handles all possible cases of files, directories and subdirectories.
@@ -217,8 +213,7 @@ def copy_static_entry(source, targetdir, builder, context={},
if path.isfile(source):
copy_asset_file(source, targetdir, context, builder.templates)
elif path.isdir(source):
- if not path.isdir(targetdir):
- os.mkdir(targetdir)
+ ensuredir(targetdir)
for entry in os.listdir(source):
if entry.startswith('.'):
continue
@@ -242,7 +237,7 @@ _DEBUG_HEADER = '''\
def save_traceback(app):
- # type: (Any) -> unicode
+ # type: (Any) -> str
"""Save the current exception's traceback in a temporary file."""
import sphinx
import jinja2
@@ -257,7 +252,7 @@ def save_traceback(app):
last_msgs = ''
if app is not None:
last_msgs = '\n'.join(
- '# %s' % strip_colors(force_decode(s, 'utf-8')).strip() # type: ignore
+ '# %s' % strip_colors(s).strip()
for s in app.messagelog)
os.write(fd, (_DEBUG_HEADER %
(sphinx.__display_version__,
@@ -265,22 +260,20 @@ def save_traceback(app):
platform.python_implementation(),
docutils.__version__, docutils.__version_details__,
jinja2.__version__, # type: ignore
- last_msgs)).encode('utf-8'))
+ last_msgs)).encode())
if app is not None:
- for ext in itervalues(app.extensions):
+ for ext in app.extensions.values():
modfile = getattr(ext.module, '__file__', 'unknown')
- if isinstance(modfile, bytes):
- modfile = modfile.decode(fs_encoding, 'replace')
if ext.version != 'builtin':
os.write(fd, ('# %s (%s) from %s\n' %
- (ext.name, ext.version, modfile)).encode('utf-8'))
- os.write(fd, exc_format.encode('utf-8'))
+ (ext.name, ext.version, modfile)).encode())
+ os.write(fd, exc_format.encode())
os.close(fd)
return path
def get_module_source(modname):
- # type: (str) -> Tuple[unicode, unicode]
+ # type: (str) -> Tuple[str, str]
"""Try to find the source code for a module.
Can return ('file', 'filename') in which case the source is in the given
@@ -328,7 +321,7 @@ def get_module_source(modname):
def get_full_modname(modname, attribute):
- # type: (str, unicode) -> unicode
+ # type: (str, str) -> str
if modname is None:
# Prevents a TypeError: if the last getattr() call will return None
# then it's better to return it directly
@@ -351,11 +344,11 @@ _coding_re = re.compile(r'coding[:=]\s*([-\w.]+)')
def detect_encoding(readline):
- # type: (Callable) -> unicode
+ # type: (Callable[[], bytes]) -> str
"""Like tokenize.detect_encoding() from Py3k, but a bit simplified."""
def read_or_stop():
- # type: () -> unicode
+ # type: () -> bytes
try:
return readline()
except StopIteration:
@@ -374,7 +367,7 @@ def detect_encoding(readline):
return orig_enc
def find_cookie(line):
- # type: (unicode) -> unicode
+ # type: (bytes) -> str
try:
line_string = line.decode('ascii')
except UnicodeDecodeError:
@@ -404,9 +397,31 @@ def detect_encoding(readline):
return default
+class UnicodeDecodeErrorHandler:
+ """Custom error handler for open() that warns and replaces."""
+
+ def __init__(self, docname):
+ # type: (str) -> None
+ self.docname = docname
+
+ def __call__(self, error):
+ # type: (UnicodeDecodeError) -> Tuple[Union[str, str], int]
+ linestart = error.object.rfind(b'\n', 0, error.start)
+ lineend = error.object.find(b'\n', error.start)
+ if lineend == -1:
+ lineend = len(error.object)
+ lineno = error.object.count(b'\n', 0, error.start) + 1
+ logger.warning(__('undecodable source characters, replacing with "?": %r'),
+ (error.object[linestart + 1:error.start] + b'>>>' +
+ error.object[error.start:error.end] + b'<<<' +
+ error.object[error.end:lineend]),
+ location=(self.docname, lineno))
+ return ('?', error.end)
+
+
# Low-level utility functions and classes.
-class Tee(object):
+class Tee:
"""
File-like object writing to two streams.
"""
@@ -416,7 +431,7 @@ class Tee(object):
self.stream2 = stream2
def write(self, text):
- # type: (unicode) -> None
+ # type: (str) -> None
self.stream1.write(text)
self.stream2.write(text)
@@ -429,7 +444,7 @@ class Tee(object):
def parselinenos(spec, total):
- # type: (unicode, int) -> List[int]
+ # type: (str, int) -> List[int]
"""Parse a line number spec (such as "1,2,4-6") and return a list of
wanted line numbers.
"""
@@ -457,15 +472,17 @@ def parselinenos(spec, total):
def force_decode(string, encoding):
- # type: (unicode, unicode) -> unicode
+ # type: (str, str) -> str
"""Forcibly get a unicode string out of a bytestring."""
- if isinstance(string, binary_type):
+ warnings.warn('force_decode() is deprecated.',
+ RemovedInSphinx40Warning, stacklevel=2)
+ if isinstance(string, bytes):
try:
if encoding:
string = string.decode(encoding)
else:
# try decoding with utf-8, should only work for real UTF-8
- string = string.decode('utf-8')
+ string = string.decode()
except UnicodeError:
# last resort -- can't fail
string = string.decode('latin1')
@@ -473,21 +490,26 @@ def force_decode(string, encoding):
class attrdict(dict):
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ warnings.warn('The attrdict class is deprecated.',
+ RemovedInSphinx40Warning, stacklevel=2)
+
def __getattr__(self, key):
- # type: (unicode) -> unicode
+ # type: (str) -> str
return self[key]
def __setattr__(self, key, val):
- # type: (unicode, unicode) -> None
+ # type: (str, str) -> None
self[key] = val
def __delattr__(self, key):
- # type: (unicode) -> None
+ # type: (str) -> None
del self[key]
def rpartition(s, t):
- # type: (unicode, unicode) -> Tuple[unicode, unicode]
+ # type: (str, str) -> Tuple[str, str]
"""Similar to str.rpartition from 2.5, but doesn't return the separator."""
i = s.rfind(t)
if i != -1:
@@ -496,7 +518,7 @@ def rpartition(s, t):
def split_into(n, type, value):
- # type: (int, unicode, unicode) -> List[unicode]
+ # type: (int, str, str) -> List[str]
"""Split an index entry into a given number of parts at semicolons."""
parts = [x.strip() for x in value.split(';', n - 1)]
if sum(1 for part in parts if part) < n:
@@ -505,7 +527,7 @@ def split_into(n, type, value):
def split_index_msg(type, value):
- # type: (unicode, unicode) -> List[unicode]
+ # type: (str, str) -> List[str]
# new entry types must be listed in directives/other.py!
if type == 'single':
try:
@@ -527,18 +549,18 @@ def split_index_msg(type, value):
def format_exception_cut_frames(x=1):
- # type: (int) -> unicode
+ # type: (int) -> str
"""Format an exception with traceback, but only the last x frames."""
typ, val, tb = sys.exc_info()
# res = ['Traceback (most recent call last):\n']
- res = [] # type: List[unicode]
+ res = [] # type: List[str]
tbres = traceback.format_tb(tb)
res += tbres[-x:]
res += traceback.format_exception_only(typ, val)
return ''.join(res)
-class PeekableIterator(object):
+class PeekableIterator:
"""
An iterator which wraps any iterable and makes it possible to peek to see
what's the next item.
@@ -547,6 +569,8 @@ class PeekableIterator(object):
# type: (Iterable) -> None
self.remaining = deque() # type: deque
self._iterator = iter(iterable)
+ warnings.warn('PeekableIterator is deprecated.',
+ RemovedInSphinx40Warning, stacklevel=2)
def __iter__(self):
# type: () -> PeekableIterator
@@ -577,7 +601,7 @@ class PeekableIterator(object):
def import_object(objname, source=None):
- # type: (str, unicode) -> Any
+ # type: (str, str) -> Any
try:
module, name = objname.rsplit('.', 1)
except ValueError as err:
@@ -597,26 +621,26 @@ def import_object(objname, source=None):
def encode_uri(uri):
- # type: (unicode) -> unicode
- split = list(urlsplit(uri)) # type: List[unicode]
+ # type: (str) -> str
+ split = list(urlsplit(uri))
split[1] = split[1].encode('idna').decode('ascii')
- split[2] = quote_plus(split[2].encode('utf-8'), '/')
- query = list((q, v.encode('utf-8')) for (q, v) in parse_qsl(split[3]))
+ split[2] = quote_plus(split[2].encode(), '/')
+ query = list((q, v.encode()) for (q, v) in parse_qsl(split[3]))
split[3] = urlencode(query)
return urlunsplit(split)
def display_chunk(chunk):
- # type: (Any) -> unicode
+ # type: (Any) -> str
if isinstance(chunk, (list, tuple)):
if len(chunk) == 1:
- return text_type(chunk[0])
+ return str(chunk[0])
return '%s .. %s' % (chunk[0], chunk[-1])
- return text_type(chunk)
+ return str(chunk)
def old_status_iterator(iterable, summary, color="darkgreen", stringify_func=display_chunk):
- # type: (Iterable, unicode, str, Callable[[Any], unicode]) -> Iterator
+ # type: (Iterable, str, str, Callable[[Any], str]) -> Iterator
l = 0
for item in iterable:
if l == 0:
@@ -632,10 +656,9 @@ def old_status_iterator(iterable, summary, color="darkgreen", stringify_func=dis
# new version with progress info
def status_iterator(iterable, summary, color="darkgreen", length=0, verbosity=0,
stringify_func=display_chunk):
- # type: (Iterable, unicode, str, int, int, Callable[[Any], unicode]) -> Iterable # NOQA
+ # type: (Iterable, str, str, int, int, Callable[[Any], str]) -> Iterable
if length == 0:
- for item in old_status_iterator(iterable, summary, color, stringify_func):
- yield item
+ yield from old_status_iterator(iterable, summary, color, stringify_func)
return
l = 0
summary = bold(summary)
@@ -653,7 +676,7 @@ def status_iterator(iterable, summary, color="darkgreen", length=0, verbosity=0,
def epoch_to_rfc1123(epoch):
- # type: (float) -> unicode
+ # type: (float) -> str
"""Convert datetime format epoch to RFC1123."""
from babel.dates import format_datetime
@@ -670,33 +693,29 @@ def rfc1123_to_epoch(rfc1123):
def xmlname_checker():
# type: () -> Pattern
# https://www.w3.org/TR/REC-xml/#NT-Name
- # Only Python 3.3 or newer support character code in regular expression
name_start_chars = [
- u':', [u'A', u'Z'], u'_', [u'a', u'z'], [u'\u00C0', u'\u00D6'],
- [u'\u00D8', u'\u00F6'], [u'\u00F8', u'\u02FF'], [u'\u0370', u'\u037D'],
- [u'\u037F', u'\u1FFF'], [u'\u200C', u'\u200D'], [u'\u2070', u'\u218F'],
- [u'\u2C00', u'\u2FEF'], [u'\u3001', u'\uD7FF'], [u'\uF900', u'\uFDCF'],
- [u'\uFDF0', u'\uFFFD']]
-
- if sys.version_info.major == 3:
- name_start_chars.append([u'\U00010000', u'\U000EFFFF'])
+ ':', ['A', 'Z'], '_', ['a', 'z'], ['\u00C0', '\u00D6'],
+ ['\u00D8', '\u00F6'], ['\u00F8', '\u02FF'], ['\u0370', '\u037D'],
+ ['\u037F', '\u1FFF'], ['\u200C', '\u200D'], ['\u2070', '\u218F'],
+ ['\u2C00', '\u2FEF'], ['\u3001', '\uD7FF'], ['\uF900', '\uFDCF'],
+ ['\uFDF0', '\uFFFD'], ['\U00010000', '\U000EFFFF']]
name_chars = [
- u"\\-", u"\\.", [u'0', u'9'], u'\u00B7', [u'\u0300', u'\u036F'],
- [u'\u203F', u'\u2040']
+ "\\-", "\\.", ['0', '9'], '\u00B7', ['\u0300', '\u036F'],
+ ['\u203F', '\u2040']
]
- def convert(entries, splitter=u'|'):
- # type: (Any, unicode) -> unicode
+ def convert(entries, splitter='|'):
+ # type: (Any, str) -> str
results = []
for entry in entries:
if isinstance(entry, list):
- results.append(u'[%s]' % convert(entry, u'-'))
+ results.append('[%s]' % convert(entry, '-'))
else:
results.append(entry)
return splitter.join(results)
start_chars_regex = convert(name_start_chars)
name_chars_regex = convert(name_chars)
- return re.compile(u'(%s)(%s|%s)*' % (
+ return re.compile('(%s)(%s|%s)*' % (
start_chars_regex, start_chars_regex, name_chars_regex))
diff --git a/sphinx/util/build_phase.py b/sphinx/util/build_phase.py
index e5a53551c..d2ed2e35f 100644
--- a/sphinx/util/build_phase.py
+++ b/sphinx/util/build_phase.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.util.build_phase
~~~~~~~~~~~~~~~~~~~~~~~
@@ -9,10 +8,7 @@
:license: BSD, see LICENSE for details.
"""
-try:
- from enum import IntEnum
-except ImportError: # py27
- IntEnum = object # type: ignore
+from enum import IntEnum
class BuildPhase(IntEnum):
diff --git a/sphinx/util/compat.py b/sphinx/util/compat.py
index 43ced1f5e..35b30f0bc 100644
--- a/sphinx/util/compat.py
+++ b/sphinx/util/compat.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.util.compat
~~~~~~~~~~~~~~~~~~
@@ -9,14 +8,14 @@
:license: BSD, see LICENSE for details.
"""
-from __future__ import absolute_import
-
import sys
import warnings
-from six import string_types, iteritems
+from docutils.utils import get_source_line
-from sphinx.deprecation import RemovedInSphinx30Warning
+from sphinx import addnodes
+from sphinx.deprecation import RemovedInSphinx30Warning, RemovedInSphinx40Warning
+from sphinx.transforms import SphinxTransform
from sphinx.util import import_object
if False:
@@ -32,9 +31,9 @@ def deprecate_source_parsers(app, config):
warnings.warn('The config variable "source_parsers" is deprecated. '
'Please use app.add_source_parser() API instead.',
RemovedInSphinx30Warning)
- for suffix, parser in iteritems(config.source_parsers):
- if isinstance(parser, string_types):
- parser = import_object(parser, 'source parser') # type: ignore
+ for suffix, parser in config.source_parsers.items():
+ if isinstance(parser, str):
+ parser = import_object(parser, 'source parser')
app.add_source_parser(suffix, parser)
@@ -52,8 +51,24 @@ def register_application_for_autosummary(app):
autosummary._app = app
+class IndexEntriesMigrator(SphinxTransform):
+ """Migrating indexentries from old style (4columns) to new style (5columns)."""
+ default_priority = 700
+
+ def apply(self, **kwargs):
+ # type: (Any) -> None
+ for node in self.document.traverse(addnodes.index):
+ for entries in node['entries']:
+ if len(entries) == 4:
+ source, line = get_source_line(node)
+ warnings.warn('An old styled index node found: %r at (%s:%s)' %
+ (node, source, line), RemovedInSphinx40Warning)
+ entries.extend([None])
+
+
def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
+ # type: (Sphinx) -> Dict[str, Any]
+ app.add_transform(IndexEntriesMigrator)
app.connect('config-inited', deprecate_source_parsers)
app.connect('builder-inited', register_application_for_autosummary)
diff --git a/sphinx/util/console.py b/sphinx/util/console.py
index d62169adf..f32360c63 100644
--- a/sphinx/util/console.py
+++ b/sphinx/util/console.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.util.console
~~~~~~~~~~~~~~~~~~~
@@ -88,7 +87,7 @@ def coloron():
def colorize(name, text, input_mode=False):
- # type: (str, unicode, bool) -> unicode
+ # type: (str, str, bool) -> str
def escseq(name):
# Wrap escape sequence with ``\1`` and ``\2`` to let readline know
# it is non-printable characters
@@ -112,7 +111,7 @@ def strip_colors(s):
def create_color_func(name):
# type: (str) -> None
def inner(text):
- # type: (unicode) -> unicode
+ # type: (str) -> str
return colorize(name, text)
globals()[name] = inner
diff --git a/sphinx/util/docfields.py b/sphinx/util/docfields.py
index 94968e148..063a89795 100644
--- a/sphinx/util/docfields.py
+++ b/sphinx/util/docfields.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.util.docfields
~~~~~~~~~~~~~~~~~~~~~
@@ -9,7 +8,8 @@
:copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
-from __future__ import absolute_import
+
+from typing import List, Tuple, cast
from docutils import nodes
@@ -17,18 +17,19 @@ from sphinx import addnodes
if False:
# For type annotation
- from typing import Any, Dict, List, Tuple # NOQA
+ from typing import Any, Dict, Type, Union # NOQA
from sphinx.domains import Domain # NOQA
from sphinx.environment import BuildEnvironment # NOQA
+ from sphinx.util.typing import TextlikeNode # NOQA
def _is_single_paragraph(node):
- # type: (nodes.Node) -> bool
+ # type: (nodes.field_body) -> bool
"""True if the node only contains one paragraph (and system messages)."""
if len(node) == 0:
return False
elif len(node) > 1:
- for subnode in node[1:]:
+ for subnode in node[1:]: # type: nodes.Node
if not isinstance(subnode, nodes.system_message):
return False
if isinstance(node[0], nodes.paragraph):
@@ -36,7 +37,7 @@ def _is_single_paragraph(node):
return False
-class Field(object):
+class Field:
"""A doc field that is never grouped. It can have an argument or not, the
argument can be linked using a specified *rolename*. Field should be used
for doc fields that usually don't occur more than once.
@@ -54,7 +55,7 @@ class Field(object):
def __init__(self, name, names=(), label=None, has_arg=True, rolename=None,
bodyrolename=None):
- # type: (unicode, Tuple[unicode, ...], unicode, bool, unicode, unicode) -> None
+ # type: (str, Tuple[str, ...], str, bool, str, str) -> None
self.name = name
self.names = names
self.label = label
@@ -63,10 +64,10 @@ class Field(object):
self.bodyrolename = bodyrolename
def make_xref(self,
- rolename, # type: unicode
- domain, # type: unicode
- target, # type: unicode
- innernode=addnodes.literal_emphasis, # type: nodes.Node
+ rolename, # type: str
+ domain, # type: str
+ target, # type: str
+ innernode=addnodes.literal_emphasis, # type: Type[TextlikeNode]
contnode=None, # type: nodes.Node
env=None, # type: BuildEnvironment
):
@@ -77,14 +78,14 @@ class Field(object):
reftype=rolename, reftarget=target)
refnode += contnode or innernode(target, target)
if env:
- env.domains[domain].process_field_xref(refnode)
+ env.get_domain(domain).process_field_xref(refnode)
return refnode
def make_xrefs(self,
- rolename, # type: unicode
- domain, # type: unicode
- target, # type: unicode
- innernode=addnodes.literal_emphasis, # type: nodes.Node
+ rolename, # type: str
+ domain, # type: str
+ target, # type: str
+ innernode=addnodes.literal_emphasis, # type: Type[TextlikeNode]
contnode=None, # type: nodes.Node
env=None, # type: BuildEnvironment
):
@@ -92,12 +93,12 @@ class Field(object):
return [self.make_xref(rolename, domain, target, innernode, contnode, env)]
def make_entry(self, fieldarg, content):
- # type: (List, unicode) -> Tuple[List, unicode]
+ # type: (str, List[nodes.Node]) -> Tuple[str, List[nodes.Node]]
return (fieldarg, content)
def make_field(self,
- types, # type: Dict[unicode, List[nodes.Node]]
- domain, # type: unicode
+ types, # type: Dict[str, List[nodes.Node]]
+ domain, # type: str
item, # type: Tuple
env=None, # type: BuildEnvironment
):
@@ -137,13 +138,13 @@ class GroupedField(Field):
def __init__(self, name, names=(), label=None, rolename=None,
can_collapse=False):
- # type: (unicode, Tuple[unicode, ...], unicode, unicode, bool) -> None
- Field.__init__(self, name, names, label, True, rolename)
+ # type: (str, Tuple[str, ...], str, str, bool) -> None
+ super().__init__(name, names, label, True, rolename)
self.can_collapse = can_collapse
def make_field(self,
- types, # type: Dict[unicode, List[nodes.Node]]
- domain, # type: unicode
+ types, # type: Dict[str, List[nodes.Node]]
+ domain, # type: str
items, # type: Tuple
env=None, # type: BuildEnvironment
):
@@ -159,7 +160,8 @@ class GroupedField(Field):
listnode += nodes.list_item('', par)
if len(items) == 1 and self.can_collapse:
- fieldbody = nodes.field_body('', listnode[0][0])
+ list_item = cast(nodes.list_item, listnode[0])
+ fieldbody = nodes.field_body('', list_item[0])
return nodes.field('', fieldname, fieldbody)
fieldbody = nodes.field_body('', listnode)
@@ -189,20 +191,20 @@ class TypedField(GroupedField):
def __init__(self, name, names=(), typenames=(), label=None,
rolename=None, typerolename=None, can_collapse=False):
- # type: (unicode, Tuple[unicode, ...], Tuple[unicode, ...], unicode, unicode, unicode, bool) -> None # NOQA
- GroupedField.__init__(self, name, names, label, rolename, can_collapse)
+ # type: (str, Tuple[str, ...], Tuple[str, ...], str, str, str, bool) -> None
+ super().__init__(name, names, label, rolename, can_collapse)
self.typenames = typenames
self.typerolename = typerolename
def make_field(self,
- types, # type: Dict[unicode, List[nodes.Node]]
- domain, # type: unicode
+ types, # type: Dict[str, List[nodes.Node]]
+ domain, # type: str
items, # type: Tuple
env=None, # type: BuildEnvironment
):
# type: (...) -> nodes.field
def handle_item(fieldarg, content):
- # type: (unicode, unicode) -> nodes.paragraph
+ # type: (str, str) -> nodes.paragraph
par = nodes.paragraph()
par.extend(self.make_xrefs(self.rolename, domain, fieldarg,
addnodes.literal_strong, env=env))
@@ -213,7 +215,7 @@ class TypedField(GroupedField):
# inconsistencies later when references are resolved
fieldtype = types.pop(fieldarg)
if len(fieldtype) == 1 and isinstance(fieldtype[0], nodes.Text):
- typename = u''.join(n.astext() for n in fieldtype)
+ typename = fieldtype[0].astext()
par.extend(self.make_xrefs(self.typerolename, domain, typename,
addnodes.literal_emphasis, env=env))
else:
@@ -226,7 +228,7 @@ class TypedField(GroupedField):
fieldname = nodes.field_name('', self.label)
if len(items) == 1 and self.can_collapse:
fieldarg, content = items[0]
- bodynode = handle_item(fieldarg, content)
+ bodynode = handle_item(fieldarg, content) # type: nodes.Node
else:
bodynode = self.list_type()
for fieldarg, content in items:
@@ -235,11 +237,12 @@ class TypedField(GroupedField):
return nodes.field('', fieldname, fieldbody)
-class DocFieldTransformer(object):
+class DocFieldTransformer:
"""
Transforms field lists in "doc field" syntax into better-looking
equivalents, using the field type definitions given on a domain.
"""
+ typemap = None # type: Dict[str, Tuple[Field, bool]]
def __init__(self, directive):
# type: (Any) -> None
@@ -250,18 +253,19 @@ class DocFieldTransformer(object):
self.typemap = directive._doc_field_type_map
def preprocess_fieldtypes(self, types):
- # type: (List) -> Dict[unicode, Tuple[Any, bool]]
+ # type: (List[Field]) -> Dict[str, Tuple[Field, bool]]
typemap = {}
for fieldtype in types:
for name in fieldtype.names:
typemap[name] = fieldtype, False
if fieldtype.is_typed:
- for name in fieldtype.typenames:
- typemap[name] = fieldtype, True
+ typed_field = cast(TypedField, fieldtype)
+ for name in typed_field.typenames:
+ typemap[name] = typed_field, True
return typemap
def transform_all(self, node):
- # type: (nodes.Node) -> None
+ # type: (addnodes.desc_content) -> None
"""Transform all field list children of a node."""
# don't traverse, only handle field lists that are immediate children
for child in node:
@@ -269,58 +273,62 @@ class DocFieldTransformer(object):
self.transform(child)
def transform(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.field_list) -> None
"""Transform a single field list *node*."""
typemap = self.typemap
- entries = []
- groupindices = {} # type: Dict[unicode, int]
- types = {} # type: Dict[unicode, Dict]
+ entries = [] # type: List[Union[nodes.field, Tuple[Field, Any]]]
+ groupindices = {} # type: Dict[str, int]
+ types = {} # type: Dict[str, Dict]
# step 1: traverse all fields and collect field types and content
- for field in node:
- fieldname, fieldbody = field
+ for field in cast(List[nodes.field], node):
+ assert len(field) == 2
+ field_name = cast(nodes.field_name, field[0])
+ field_body = cast(nodes.field_body, field[1])
try:
# split into field type and argument
- fieldtype, fieldarg = fieldname.astext().split(None, 1)
+ fieldtype_name, fieldarg = field_name.astext().split(None, 1)
except ValueError:
# maybe an argument-less field type?
- fieldtype, fieldarg = fieldname.astext(), ''
- typedesc, is_typefield = typemap.get(fieldtype, (None, None))
+ fieldtype_name, fieldarg = field_name.astext(), ''
+ typedesc, is_typefield = typemap.get(fieldtype_name, (None, None))
# collect the content, trying not to keep unnecessary paragraphs
- if _is_single_paragraph(fieldbody):
- content = fieldbody.children[0].children
+ if _is_single_paragraph(field_body):
+ paragraph = cast(nodes.paragraph, field_body[0])
+ content = paragraph.children
else:
- content = fieldbody.children
+ content = field_body.children
# sort out unknown fields
if typedesc is None or typedesc.has_arg != bool(fieldarg):
# either the field name is unknown, or the argument doesn't
# match the spec; capitalize field name and be done with it
- new_fieldname = fieldtype[0:1].upper() + fieldtype[1:]
+ new_fieldname = fieldtype_name[0:1].upper() + fieldtype_name[1:]
if fieldarg:
new_fieldname += ' ' + fieldarg
- fieldname[0] = nodes.Text(new_fieldname)
+ field_name[0] = nodes.Text(new_fieldname)
entries.append(field)
# but if this has a type then we can at least link it
if (typedesc and is_typefield and content and
len(content) == 1 and isinstance(content[0], nodes.Text)):
+ typed_field = cast(TypedField, typedesc)
target = content[0].astext()
- xrefs = typedesc.make_xrefs(
- typedesc.typerolename,
+ xrefs = typed_field.make_xrefs(
+ typed_field.typerolename,
self.directive.domain,
target,
contnode=content[0],
)
- if _is_single_paragraph(fieldbody):
- fieldbody.children[0].clear()
- fieldbody.children[0].extend(xrefs)
+ if _is_single_paragraph(field_body):
+ paragraph = cast(nodes.paragraph, field_body[0])
+ paragraph.clear()
+ paragraph.extend(xrefs)
else:
- fieldbody.clear()
- fieldbody += nodes.paragraph()
- fieldbody[0].extend(xrefs)
+ field_body.clear()
+ field_body += nodes.paragraph('', '', *xrefs)
continue
@@ -347,27 +355,27 @@ class DocFieldTransformer(object):
[nodes.Text(argtype)]
fieldarg = argname
- translatable_content = nodes.inline(fieldbody.rawsource,
+ translatable_content = nodes.inline(field_body.rawsource,
translatable=True)
- translatable_content.document = fieldbody.parent.document
- translatable_content.source = fieldbody.parent.source
- translatable_content.line = fieldbody.parent.line
+ translatable_content.document = field_body.parent.document
+ translatable_content.source = field_body.parent.source
+ translatable_content.line = field_body.parent.line
translatable_content += content
# grouped entries need to be collected in one entry, while others
# get one entry per field
if typedesc.is_grouped:
if typename in groupindices:
- group = entries[groupindices[typename]]
+ group = cast(Tuple[Field, List], entries[groupindices[typename]])
else:
groupindices[typename] = len(entries)
- group = [typedesc, []]
+ group = (typedesc, [])
entries.append(group)
- entry = typedesc.make_entry(fieldarg, [translatable_content])
- group[1].append(entry)
+ new_entry = typedesc.make_entry(fieldarg, [translatable_content])
+ group[1].append(new_entry)
else:
- entry = typedesc.make_entry(fieldarg, [translatable_content])
- entries.append([typedesc, entry])
+ new_entry = typedesc.make_entry(fieldarg, [translatable_content])
+ entries.append((typedesc, new_entry))
# step 2: all entries are collected, construct the new field list
new_list = nodes.field_list()
@@ -376,10 +384,10 @@ class DocFieldTransformer(object):
# pass-through old field
new_list += entry
else:
- fieldtype, content = entry
+ fieldtype, items = entry
fieldtypes = types.get(fieldtype.name, {})
env = self.directive.state.document.settings.env
new_list += fieldtype.make_field(fieldtypes, self.directive.domain,
- content, env=env)
+ items, env=env)
node.replace_self(new_list)
diff --git a/sphinx/util/docstrings.py b/sphinx/util/docstrings.py
index bc4b96a56..f13975e28 100644
--- a/sphinx/util/docstrings.py
+++ b/sphinx/util/docstrings.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.util.docstrings
~~~~~~~~~~~~~~~~~~~~~~
@@ -17,7 +16,7 @@ if False:
def prepare_docstring(s, ignore=1):
- # type: (unicode, int) -> List[unicode]
+ # type: (str, int) -> List[str]
"""Convert a docstring into lines of parseable reST. Remove common leading
indentation, where the indentation of a given number of lines (usually just
one) is ignored.
@@ -51,7 +50,7 @@ def prepare_docstring(s, ignore=1):
def prepare_commentdoc(s):
- # type: (unicode) -> List[unicode]
+ # type: (str) -> List[str]
"""Extract documentation comment lines (starting with #:) and return them
as a list of lines. Returns an empty list if there is no documentation.
"""
diff --git a/sphinx/util/docutils.py b/sphinx/util/docutils.py
index 874eb6baf..4f6eb418e 100644
--- a/sphinx/util/docutils.py
+++ b/sphinx/util/docutils.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.util.docutils
~~~~~~~~~~~~~~~~~~~~
@@ -8,9 +7,7 @@
:copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
-from __future__ import absolute_import
-import codecs
import os
import re
import types
@@ -19,6 +16,7 @@ from contextlib import contextmanager
from copy import copy
from distutils.version import LooseVersion
from os import path
+from typing import IO, cast
import docutils
from docutils import nodes
@@ -37,15 +35,18 @@ report_re = re.compile('^(.+?:(?:\\d+)?): \\((DEBUG|INFO|WARNING|ERROR|SEVERE)/(
if False:
# For type annotation
- from typing import Any, Callable, Generator, List, Set, Tuple # NOQA
- from docutils.statemachine import State, ViewList # NOQA
+ from types import ModuleType # NOQA
+ from typing import Any, Callable, Generator, List, Set, Tuple, Type # NOQA
+ from docutils.statemachine import State, StringList # NOQA
+ from sphinx.builders import Builder # NOQA
from sphinx.config import Config # NOQA
from sphinx.environment import BuildEnvironment # NOQA
from sphinx.io import SphinxFileInput # NOQA
+ from sphinx.util.typing import RoleFunction # NOQA
__version_info__ = tuple(LooseVersion(docutils.__version__).version)
-additional_nodes = set() # type: Set[nodes.Node]
+additional_nodes = set() # type: Set[Type[nodes.Element]]
@contextmanager
@@ -53,39 +54,77 @@ def docutils_namespace():
# type: () -> Generator[None, None, None]
"""Create namespace for reST parsers."""
try:
- _directives = copy(directives._directives)
- _roles = copy(roles._roles)
+ _directives = copy(directives._directives) # type: ignore
+ _roles = copy(roles._roles) # type: ignore
yield
finally:
- directives._directives = _directives
- roles._roles = _roles
+ directives._directives = _directives # type: ignore
+ roles._roles = _roles # type: ignore
for node in list(additional_nodes):
unregister_node(node)
additional_nodes.discard(node)
+def is_directive_registered(name):
+ # type: (str) -> bool
+ """Check the *name* directive is already registered."""
+ return name in directives._directives # type: ignore
+
+
+def register_directive(name, directive):
+ # type: (str, Type[Directive]) -> None
+ """Register a directive to docutils.
+
+ This modifies global state of docutils. So it is better to use this
+ inside ``docutils_namespace()`` to prevent side-effects.
+ """
+ directives.register_directive(name, directive)
+
+
+def is_role_registered(name):
+ # type: (str) -> bool
+ """Check the *name* role is already registered."""
+ return name in roles._roles # type: ignore
+
+
+def register_role(name, role):
+ # type: (str, RoleFunction) -> None
+ """Register a role to docutils.
+
+ This modifies global state of docutils. So it is better to use this
+ inside ``docutils_namespace()`` to prevent side-effects.
+ """
+ roles.register_local_role(name, role)
+
+
+def unregister_role(name):
+ # type: (str) -> None
+ """Unregister a role from docutils."""
+ roles._roles.pop(name, None) # type: ignore
+
+
def is_node_registered(node):
- # type: (nodes.Node) -> bool
+ # type: (Type[nodes.Element]) -> bool
"""Check the *node* is already registered."""
return hasattr(nodes.GenericNodeVisitor, 'visit_' + node.__name__)
def register_node(node):
- # type: (nodes.Node) -> None
+ # type: (Type[nodes.Element]) -> None
"""Register a node to docutils.
This modifies global state of some visitors. So it is better to use this
inside ``docutils_namespace()`` to prevent side-effects.
"""
if not hasattr(nodes.GenericNodeVisitor, 'visit_' + node.__name__):
- nodes._add_node_class_names([node.__name__])
+ nodes._add_node_class_names([node.__name__]) # type: ignore
additional_nodes.add(node)
def unregister_node(node):
- # type: (nodes.Node) -> None
+ # type: (Type[nodes.Element]) -> None
"""Unregister a node from docutils.
This is inverse of ``nodes._add_nodes_class_names()``.
@@ -108,7 +147,7 @@ def patched_get_language():
from docutils.languages import get_language
def patched_get_language(language_code, reporter=None):
- # type: (unicode, Reporter) -> Any
+ # type: (str, Reporter) -> Any
return get_language(language_code)
try:
@@ -121,12 +160,12 @@ def patched_get_language():
@contextmanager
def using_user_docutils_conf(confdir):
- # type: (unicode) -> Generator[None, None, None]
+ # type: (str) -> Generator[None, None, None]
"""Let docutils know the location of ``docutils.conf`` for Sphinx."""
try:
docutilsconfig = os.environ.get('DOCUTILSCONFIG', None)
if confdir:
- os.environ['DOCUTILSCONFIG'] = path.join(path.abspath(confdir), 'docutils.conf') # type: ignore # NOQA
+ os.environ['DOCUTILSCONFIG'] = path.join(path.abspath(confdir), 'docutils.conf')
yield
finally:
@@ -138,7 +177,7 @@ def using_user_docutils_conf(confdir):
@contextmanager
def patch_docutils(confdir=None):
- # type: (unicode) -> Generator[None, None, None]
+ # type: (str) -> Generator[None, None, None]
"""Patch to docutils temporarily."""
with patched_get_language(), using_user_docutils_conf(confdir):
yield
@@ -148,7 +187,7 @@ class ElementLookupError(Exception):
pass
-class sphinx_domains(object):
+class sphinx_domains:
"""Monkey-patch directive and role dispatch, so that domain-specific
markup takes precedence.
"""
@@ -163,7 +202,7 @@ class sphinx_domains(object):
self.enable()
def __exit__(self, type, value, traceback):
- # type: (unicode, unicode, unicode) -> None
+ # type: (str, str, str) -> None
self.disable()
def enable(self):
@@ -171,8 +210,8 @@ class sphinx_domains(object):
self.directive_func = directives.directive
self.role_func = roles.role
- directives.directive = self.lookup_directive
- roles.role = self.lookup_role
+ directives.directive = self.lookup_directive # type: ignore
+ roles.role = self.lookup_role # type: ignore
def disable(self):
# type: () -> None
@@ -180,7 +219,7 @@ class sphinx_domains(object):
roles.role = self.role_func
def lookup_domain_element(self, type, name):
- # type: (unicode, unicode) -> Tuple[Any, List]
+ # type: (str, str) -> Any
"""Lookup a markup element (directive or role), given its name which can
be a full name (with domain).
"""
@@ -209,30 +248,30 @@ class sphinx_domains(object):
raise ElementLookupError
def lookup_directive(self, name, lang_module, document):
- # type: (unicode, unicode, nodes.document) -> Tuple[Any, List]
+ # type: (str, ModuleType, nodes.document) -> Tuple[Type[Directive], List[nodes.system_message]] # NOQA
try:
return self.lookup_domain_element('directive', name)
except ElementLookupError:
return self.directive_func(name, lang_module, document)
def lookup_role(self, name, lang_module, lineno, reporter):
- # type: (unicode, unicode, int, Any) -> Tuple[Any, List]
+ # type: (str, ModuleType, int, Reporter) -> Tuple[RoleFunction, List[nodes.system_message]] # NOQA
try:
return self.lookup_domain_element('role', name)
except ElementLookupError:
return self.role_func(name, lang_module, lineno, reporter)
-class WarningStream(object):
+class WarningStream:
def write(self, text):
- # type: (unicode) -> None
+ # type: (str) -> None
matched = report_re.search(text)
if not matched:
logger.warning(text.rstrip("\r\n"))
else:
location, type, level = matched.groups()
message = report_re.sub('', text).rstrip()
- logger.log(type, message, location=location) # type: ignore
+ logger.log(type, message, location=location)
class LoggingReporter(Reporter):
@@ -246,10 +285,10 @@ class LoggingReporter(Reporter):
def __init__(self, source, report_level=Reporter.WARNING_LEVEL,
halt_level=Reporter.SEVERE_LEVEL, debug=False,
error_handler='backslashreplace'):
- # type: (unicode, int, int, bool, unicode) -> None
- stream = WarningStream()
- Reporter.__init__(self, source, report_level, halt_level,
- stream, debug, error_handler=error_handler)
+ # type: (str, int, int, bool, str) -> None
+ stream = cast(IO, WarningStream())
+ super().__init__(source, report_level, halt_level,
+ stream, debug, error_handler=error_handler)
class NullReporter(Reporter):
@@ -257,7 +296,7 @@ class NullReporter(Reporter):
def __init__(self):
# type: () -> None
- Reporter.__init__(self, '', 999, 4)
+ super().__init__('', 999, 4)
def is_html5_writer_available():
@@ -285,21 +324,21 @@ def directive_helper(obj, has_content=None, argument_spec=None, **option_spec):
@contextmanager
def switch_source_input(state, content):
- # type: (State, ViewList) -> Generator[None, None, None]
+ # type: (State, StringList) -> Generator[None, None, None]
"""Switch current source input of state temporarily."""
try:
# remember the original ``get_source_and_line()`` method
- get_source_and_line = state.memo.reporter.get_source_and_line
+ get_source_and_line = state.memo.reporter.get_source_and_line # type: ignore
# replace it by new one
state_machine = StateMachine([], None)
state_machine.input_lines = content
- state.memo.reporter.get_source_and_line = state_machine.get_source_and_line
+ state.memo.reporter.get_source_and_line = state_machine.get_source_and_line # type: ignore # NOQA
yield
finally:
# restore the method
- state.memo.reporter.get_source_and_line = get_source_and_line
+ state.memo.reporter.get_source_and_line = get_source_and_line # type: ignore
class SphinxFileOutput(FileOutput):
@@ -308,18 +347,18 @@ class SphinxFileOutput(FileOutput):
def __init__(self, **kwargs):
# type: (Any) -> None
self.overwrite_if_changed = kwargs.pop('overwrite_if_changed', False)
- FileOutput.__init__(self, **kwargs)
+ super().__init__(**kwargs)
def write(self, data):
- # type: (unicode) -> unicode
+ # type: (str) -> str
if (self.destination_path and self.autoclose and 'b' not in self.mode and
self.overwrite_if_changed and os.path.exists(self.destination_path)):
- with codecs.open(self.destination_path, encoding=self.encoding) as f:
+ with open(self.destination_path, encoding=self.encoding) as f:
# skip writing: content not changed
if f.read() == data:
return data
- return FileOutput.write(self, data)
+ return super().write(data)
class SphinxDirective(Directive):
@@ -344,13 +383,30 @@ class SphinxDirective(Directive):
return self.env.config
+class SphinxTranslator(nodes.NodeVisitor):
+ """A base class for Sphinx translators.
+
+ This class provides helper methods for Sphinx translators.
+
+ .. note:: The subclasses of this class might not work with docutils.
+ This class is strongly coupled with Sphinx.
+ """
+
+ def __init__(self, document, builder):
+ # type: (nodes.document, Builder) -> None
+ super().__init__(document)
+ self.builder = builder
+ self.config = builder.config
+ self.settings = document.settings
+
+
# cache a vanilla instance of nodes.document
# Used in new_document() function
__document_cache__ = None # type: nodes.document
def new_document(source_path, settings=None):
- # type: (unicode, Any) -> nodes.document
+ # type: (str, Any) -> nodes.document
"""Return a new empty document object. This is an alternative of docutils'.
This is a simple wrapper for ``docutils.utils.new_document()``. It
diff --git a/sphinx/util/fileutil.py b/sphinx/util/fileutil.py
index fcbc8abe6..fd019401e 100644
--- a/sphinx/util/fileutil.py
+++ b/sphinx/util/fileutil.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.util.fileutil
~~~~~~~~~~~~~~~~~~~~
@@ -8,15 +7,13 @@
:copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
-from __future__ import absolute_import
-import codecs
import os
import posixpath
from docutils.utils import relative_path
-from sphinx.util.osutil import copyfile, ensuredir, walk
+from sphinx.util.osutil import copyfile, ensuredir
if False:
# For type annotation
@@ -26,7 +23,7 @@ if False:
def copy_asset_file(source, destination, context=None, renderer=None):
- # type: (unicode, unicode, Dict, BaseRenderer) -> None
+ # type: (str, str, Dict, BaseRenderer) -> None
"""Copy an asset file to destination.
On copying, it expands the template variables if context argument is given and
@@ -40,26 +37,26 @@ def copy_asset_file(source, destination, context=None, renderer=None):
if not os.path.exists(source):
return
- if os.path.exists(destination) and os.path.isdir(destination):
+ if os.path.isdir(destination):
# Use source filename if destination points a directory
destination = os.path.join(destination, os.path.basename(source))
- if source.lower().endswith('_t') and context:
+ if source.lower().endswith('_t') and context is not None:
if renderer is None:
from sphinx.util.template import SphinxRenderer
renderer = SphinxRenderer()
- with codecs.open(source, 'r', encoding='utf-8') as fsrc: # type: ignore
+ with open(source, encoding='utf-8') as fsrc:
if destination.lower().endswith('_t'):
destination = destination[:-2]
- with codecs.open(destination, 'w', encoding='utf-8') as fdst: # type: ignore
+ with open(destination, 'w', encoding='utf-8') as fdst:
fdst.write(renderer.render_string(fsrc.read(), context))
else:
copyfile(source, destination)
def copy_asset(source, destination, excluded=lambda path: False, context=None, renderer=None):
- # type: (unicode, unicode, Union[Callable[[unicode], bool], Matcher], Dict, BaseRenderer) -> None # NOQA
+ # type: (str, str, Union[Callable[[str], bool], Matcher], Dict, BaseRenderer) -> None
"""Copy asset files to destination recursively.
On copying, it expands the template variables if context argument is given and
@@ -83,7 +80,7 @@ def copy_asset(source, destination, excluded=lambda path: False, context=None, r
copy_asset_file(source, destination, context, renderer)
return
- for root, dirs, files in walk(source, followlinks=True):
+ for root, dirs, files in os.walk(source, followlinks=True):
reldir = relative_path(source, root)
for dir in dirs[:]:
if excluded(posixpath.join(reldir, dir)):
diff --git a/sphinx/util/i18n.py b/sphinx/util/i18n.py
index e7c8cc9f2..be16b6b27 100644
--- a/sphinx/util/i18n.py
+++ b/sphinx/util/i18n.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.util.i18n
~~~~~~~~~~~~~~~~
@@ -9,7 +8,6 @@
:license: BSD, see LICENSE for details.
"""
import gettext
-import io
import os
import re
import warnings
@@ -26,7 +24,7 @@ from sphinx.errors import SphinxError
from sphinx.locale import __
from sphinx.util import logging
from sphinx.util.matching import Matcher
-from sphinx.util.osutil import SEP, relpath, walk
+from sphinx.util.osutil import SEP, relpath
logger = logging.getLogger(__name__)
@@ -43,22 +41,22 @@ class CatalogInfo(LocaleFileInfoBase):
@property
def po_file(self):
- # type: () -> unicode
+ # type: () -> str
return self.domain + '.po'
@property
def mo_file(self):
- # type: () -> unicode
+ # type: () -> str
return self.domain + '.mo'
@property
def po_path(self):
- # type: () -> unicode
+ # type: () -> str
return path.join(self.base_dir, self.po_file)
@property
def mo_path(self):
- # type: () -> unicode
+ # type: () -> str
return path.join(self.base_dir, self.mo_file)
def is_outdated(self):
@@ -68,15 +66,15 @@ class CatalogInfo(LocaleFileInfoBase):
path.getmtime(self.mo_path) < path.getmtime(self.po_path))
def write_mo(self, locale):
- # type: (unicode) -> None
- with io.open(self.po_path, 'rt', encoding=self.charset) as file_po:
+ # type: (str) -> None
+ with open(self.po_path, encoding=self.charset) as file_po:
try:
po = read_po(file_po, locale)
except Exception as exc:
logger.warning(__('reading error: %s, %s'), self.po_path, exc)
return
- with io.open(self.mo_path, 'wb') as file_mo:
+ with open(self.mo_path, 'wb') as file_mo:
try:
write_mo(file_mo, po)
except Exception as exc:
@@ -84,7 +82,7 @@ class CatalogInfo(LocaleFileInfoBase):
def find_catalog(docname, compaction):
- # type: (unicode, bool) -> unicode
+ # type: (str, bool) -> str
if compaction:
ret = docname.split(SEP, 1)[0]
else:
@@ -94,21 +92,21 @@ def find_catalog(docname, compaction):
def find_catalog_files(docname, srcdir, locale_dirs, lang, compaction):
- # type: (unicode, unicode, List[unicode], unicode, bool) -> List[unicode]
+ # type: (str, str, List[str], str, bool) -> List[str]
if not(lang and locale_dirs):
return []
domain = find_catalog(docname, compaction)
- files = [gettext.find(domain, path.join(srcdir, dir_), [lang]) # type: ignore
+ files = [gettext.find(domain, path.join(srcdir, dir_), [lang])
for dir_ in locale_dirs]
- files = [relpath(f, srcdir) for f in files if f] # type: ignore
- return files # type: ignore
+ files = [relpath(f, srcdir) for f in files if f]
+ return files
def find_catalog_source_files(locale_dirs, locale, domains=None, gettext_compact=None,
charset='utf-8', force_all=False,
excluded=Matcher([])):
- # type: (List[unicode], unicode, List[unicode], bool, unicode, bool, Matcher) -> Set[CatalogInfo] # NOQA
+ # type: (List[str], str, List[str], bool, str, bool, Matcher) -> Set[CatalogInfo]
"""
:param list locale_dirs:
list of path as `['locale_dir1', 'locale_dir2', ...]` to find
@@ -140,7 +138,7 @@ def find_catalog_source_files(locale_dirs, locale, domains=None, gettext_compact
if not path.exists(base_dir):
continue # locale path is not found
- for dirpath, dirnames, filenames in walk(base_dir, followlinks=True):
+ for dirpath, dirnames, filenames in os.walk(base_dir, followlinks=True):
filenames = [f for f in filenames if f.endswith('.po')]
for filename in filenames:
if excluded(path.join(relpath(dirpath, base_dir), filename)):
@@ -199,7 +197,7 @@ date_format_re = re.compile('(%s)' % '|'.join(date_format_mappings))
def babel_format_date(date, format, locale, formatter=babel.dates.format_date):
- # type: (datetime, unicode, unicode, Callable) -> unicode
+ # type: (datetime, str, str, Callable) -> str
if locale is None:
locale = 'en'
@@ -220,7 +218,7 @@ def babel_format_date(date, format, locale, formatter=babel.dates.format_date):
def format_date(format, date=None, language=None):
- # type: (str, datetime, unicode) -> unicode
+ # type: (str, datetime, str) -> str
if date is None:
# If time is not specified, try to use $SOURCE_DATE_EPOCH variable
# See https://wiki.debian.org/ReproducibleBuilds/TimestampsProposal
@@ -255,7 +253,7 @@ def format_date(format, date=None, language=None):
def get_image_filename_for_language(filename, env):
- # type: (unicode, BuildEnvironment) -> unicode
+ # type: (str, BuildEnvironment) -> str
if not env.config.language:
return filename
@@ -275,7 +273,7 @@ def get_image_filename_for_language(filename, env):
def search_image_for_language(filename, env):
- # type: (unicode, BuildEnvironment) -> unicode
+ # type: (str, BuildEnvironment) -> str
if not env.config.language:
return filename
diff --git a/sphinx/util/images.py b/sphinx/util/images.py
index c71da38b9..eaa188496 100644
--- a/sphinx/util/images.py
+++ b/sphinx/util/images.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.util.images
~~~~~~~~~~~~~~~~~~
@@ -8,17 +7,16 @@
:copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
-from __future__ import absolute_import
import base64
import imghdr
import warnings
from collections import OrderedDict
+from io import BytesIO
from os import path
from typing import NamedTuple
import imagesize
-from six import PY3, BytesIO, iteritems
from sphinx.deprecation import RemovedInSphinx30Warning
@@ -34,9 +32,6 @@ if False:
# For type annotation
from typing import Dict, IO, List, Tuple # NOQA
-if PY3:
- unicode = str # special alias for static typing...
-
mime_suffixes = OrderedDict([
('.gif', 'image/gif'),
('.jpg', 'image/jpeg'),
@@ -44,15 +39,15 @@ mime_suffixes = OrderedDict([
('.pdf', 'application/pdf'),
('.svg', 'image/svg+xml'),
('.svgz', 'image/svg+xml'),
-]) # type: Dict[unicode, unicode]
+])
-DataURI = NamedTuple('DataURI', [('mimetype', unicode),
- ('charset', unicode),
+DataURI = NamedTuple('DataURI', [('mimetype', str),
+ ('charset', str),
('data', bytes)])
def get_image_size(filename):
- # type: (unicode) -> Tuple[int, int]
+ # type: (str) -> Tuple[int, int]
try:
size = imagesize.get(filename)
if size[0] == -1:
@@ -72,7 +67,7 @@ def get_image_size(filename):
def guess_mimetype_for_stream(stream, default=None):
- # type: (IO, unicode) -> unicode
+ # type: (IO, str) -> str
imgtype = imghdr.what(stream) # type: ignore
if imgtype:
return 'image/' + imgtype
@@ -81,7 +76,7 @@ def guess_mimetype_for_stream(stream, default=None):
def guess_mimetype(filename='', content=None, default=None):
- # type: (unicode, unicode, unicode) -> unicode
+ # type: (str, bytes, str) -> str
_, ext = path.splitext(filename.lower())
if ext in mime_suffixes:
return mime_suffixes[ext]
@@ -97,8 +92,8 @@ def guess_mimetype(filename='', content=None, default=None):
def get_image_extension(mimetype):
- # type: (unicode) -> unicode
- for ext, _mimetype in iteritems(mime_suffixes):
+ # type: (str) -> str
+ for ext, _mimetype in mime_suffixes.items():
if mimetype == _mimetype:
return ext
@@ -106,13 +101,13 @@ def get_image_extension(mimetype):
def parse_data_uri(uri):
- # type: (unicode) -> DataURI
+ # type: (str) -> DataURI
if not uri.startswith('data:'):
return None
# data:[<MIME-type>][;charset=<encoding>][;base64],<data>
- mimetype = u'text/plain'
- charset = u'US-ASCII'
+ mimetype = 'text/plain'
+ charset = 'US-ASCII'
properties, data = uri[5:].split(',', 1)
for prop in properties.split(';'):
@@ -128,10 +123,10 @@ def parse_data_uri(uri):
def test_svg(h, f):
- # type: (unicode, IO) -> unicode
+ # type: (bytes, IO) -> str
"""An additional imghdr library helper; test the header is SVG's or not."""
try:
- if '<svg' in h.decode('utf-8').lower():
+ if '<svg' in h.decode().lower():
return 'svg+xml'
except UnicodeDecodeError:
pass
@@ -141,4 +136,4 @@ def test_svg(h, f):
# install test_svg() to imghdr
# refs: https://docs.python.org/3.6/library/imghdr.html#imghdr.tests
-imghdr.tests.append(test_svg) # type: ignore
+imghdr.tests.append(test_svg)
diff --git a/sphinx/util/inspect.py b/sphinx/util/inspect.py
index f21473cea..4973111cf 100644
--- a/sphinx/util/inspect.py
+++ b/sphinx/util/inspect.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.util.inspect
~~~~~~~~~~~~~~~~~~~
@@ -8,152 +7,108 @@
:copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
-from __future__ import absolute_import
+import builtins
+import enum
import inspect
import re
import sys
import typing
-from collections import OrderedDict
+import warnings
from functools import partial
+from io import StringIO
-from six import PY2, PY3, StringIO, binary_type, string_types, itervalues
-from six.moves import builtins
-
-from sphinx.util import force_decode
+from sphinx.deprecation import RemovedInSphinx30Warning
from sphinx.util import logging
from sphinx.util.pycompat import NoneType
if False:
# For type annotation
- from typing import Any, Callable, Dict, List, Tuple, Type # NOQA
+ from typing import Any, Callable, Mapping, List, Tuple, Type # NOQA
logger = logging.getLogger(__name__)
memory_address_re = re.compile(r' at 0x[0-9a-f]{8,16}(?=>)', re.IGNORECASE)
-if PY3:
- # Copied from the definition of inspect.getfullargspec from Python master,
- # and modified to remove the use of special flags that break decorated
- # callables and bound methods in the name of backwards compatibility. Used
- # under the terms of PSF license v2, which requires the above statement
- # and the following:
- #
- # Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009,
- # 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 Python Software
- # Foundation; All Rights Reserved
- def getargspec(func):
- """Like inspect.getfullargspec but supports bound methods, and wrapped
- methods."""
- # On 3.5+, signature(int) or similar raises ValueError. On 3.4, it
- # succeeds with a bogus signature. We want a TypeError uniformly, to
- # match historical behavior.
- if (isinstance(func, type) and
- is_builtin_class_method(func, "__new__") and
- is_builtin_class_method(func, "__init__")):
- raise TypeError(
- "can't compute signature for built-in type {}".format(func))
-
- sig = inspect.signature(func)
-
- args = []
- varargs = None
- varkw = None
- kwonlyargs = []
- defaults = ()
- annotations = {}
- defaults = ()
- kwdefaults = {}
-
- if sig.return_annotation is not sig.empty:
- annotations['return'] = sig.return_annotation
-
- for param in sig.parameters.values():
- kind = param.kind
- name = param.name
-
- if kind is inspect.Parameter.POSITIONAL_ONLY:
- args.append(name)
- elif kind is inspect.Parameter.POSITIONAL_OR_KEYWORD:
- args.append(name)
- if param.default is not param.empty:
- defaults += (param.default,)
- elif kind is inspect.Parameter.VAR_POSITIONAL:
- varargs = name
- elif kind is inspect.Parameter.KEYWORD_ONLY:
- kwonlyargs.append(name)
- if param.default is not param.empty:
- kwdefaults[name] = param.default
- elif kind is inspect.Parameter.VAR_KEYWORD:
- varkw = name
-
- if param.annotation is not param.empty:
- annotations[name] = param.annotation
-
- if not kwdefaults:
- # compatibility with 'func.__kwdefaults__'
- kwdefaults = None
-
- if not defaults:
- # compatibility with 'func.__defaults__'
- defaults = None
-
- return inspect.FullArgSpec(args, varargs, varkw, defaults,
- kwonlyargs, kwdefaults, annotations)
-
-else: # 2.7
- def getargspec(func):
- # type: (Any) -> Any
- """Like inspect.getargspec but supports functools.partial as well."""
- if inspect.ismethod(func):
- func = func.__func__
- parts = 0, () # type: Tuple[int, Tuple[unicode, ...]]
- if type(func) is partial:
- keywords = func.keywords
- if keywords is None:
- keywords = {}
- parts = len(func.args), keywords.keys()
- func = func.func
- if not inspect.isfunction(func):
- raise TypeError('%r is not a Python function' % func)
- args, varargs, varkw = inspect.getargs(func.__code__)
- func_defaults = func.__defaults__
- if func_defaults is None:
- func_defaults = []
- else:
- func_defaults = list(func_defaults)
- if parts[0]:
- args = args[parts[0]:]
- if parts[1]:
- for arg in parts[1]:
- i = args.index(arg) - len(args) # type: ignore
- del args[i]
- try:
- del func_defaults[i]
- except IndexError:
- pass
- return inspect.ArgSpec(args, varargs, varkw, func_defaults) # type: ignore
-
-try:
- import enum
-except ImportError:
- enum = None
+# Copied from the definition of inspect.getfullargspec from Python master,
+# and modified to remove the use of special flags that break decorated
+# callables and bound methods in the name of backwards compatibility. Used
+# under the terms of PSF license v2, which requires the above statement
+# and the following:
+#
+# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009,
+# 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 Python Software
+# Foundation; All Rights Reserved
+def getargspec(func):
+ """Like inspect.getfullargspec but supports bound methods, and wrapped
+ methods."""
+ # On 3.5+, signature(int) or similar raises ValueError. On 3.4, it
+ # succeeds with a bogus signature. We want a TypeError uniformly, to
+ # match historical behavior.
+ if (isinstance(func, type) and
+ is_builtin_class_method(func, "__new__") and
+ is_builtin_class_method(func, "__init__")):
+ raise TypeError(
+ "can't compute signature for built-in type {}".format(func))
+
+ sig = inspect.signature(func)
+
+ args = []
+ varargs = None
+ varkw = None
+ kwonlyargs = []
+ defaults = ()
+ annotations = {}
+ defaults = ()
+ kwdefaults = {}
+
+ if sig.return_annotation is not sig.empty:
+ annotations['return'] = sig.return_annotation
+
+ for param in sig.parameters.values():
+ kind = param.kind
+ name = param.name
+
+ if kind is inspect.Parameter.POSITIONAL_ONLY:
+ args.append(name)
+ elif kind is inspect.Parameter.POSITIONAL_OR_KEYWORD:
+ args.append(name)
+ if param.default is not param.empty:
+ defaults += (param.default,) # type: ignore
+ elif kind is inspect.Parameter.VAR_POSITIONAL:
+ varargs = name
+ elif kind is inspect.Parameter.KEYWORD_ONLY:
+ kwonlyargs.append(name)
+ if param.default is not param.empty:
+ kwdefaults[name] = param.default
+ elif kind is inspect.Parameter.VAR_KEYWORD:
+ varkw = name
+
+ if param.annotation is not param.empty:
+ annotations[name] = param.annotation
+
+ if not kwdefaults:
+ # compatibility with 'func.__kwdefaults__'
+ kwdefaults = None
+
+ if not defaults:
+ # compatibility with 'func.__defaults__'
+ defaults = None
+
+ return inspect.FullArgSpec(args, varargs, varkw, defaults,
+ kwonlyargs, kwdefaults, annotations)
def isenumclass(x):
# type: (Type) -> bool
"""Check if the object is subclass of enum."""
- if enum is None:
- return False
return inspect.isclass(x) and issubclass(x, enum.Enum)
def isenumattribute(x):
# type: (Any) -> bool
"""Check if the object is attribute of enum."""
- if enum is None:
- return False
return isinstance(x, enum.Enum)
@@ -168,17 +123,14 @@ def isclassmethod(obj):
"""Check if the object is classmethod."""
if isinstance(obj, classmethod):
return True
- elif inspect.ismethod(obj):
- if getattr(obj, 'im_self', None): # py2
- return True
- elif getattr(obj, '__self__', None): # py3
- return True
+ elif inspect.ismethod(obj) and obj.__self__ is not None:
+ return True
return False
def isstaticmethod(obj, cls=None, name=None):
- # type: (Any, Any, unicode) -> bool
+ # type: (Any, Any, str) -> bool
"""Check if the object is staticmethod."""
if isinstance(obj, staticmethod):
return True
@@ -219,7 +171,7 @@ def isbuiltin(obj):
def safe_getattr(obj, name, *defargs):
- # type: (Any, unicode, unicode) -> object
+ # type: (Any, str, str) -> object
"""A getattr() that turns all exceptions into AttributeErrors."""
try:
return getattr(obj, name, *defargs)
@@ -242,9 +194,9 @@ def safe_getattr(obj, name, *defargs):
def safe_getmembers(object, predicate=None, attr_getter=safe_getattr):
- # type: (Any, Callable[[unicode], bool], Callable) -> List[Tuple[unicode, Any]]
+ # type: (Any, Callable[[str], bool], Callable) -> List[Tuple[str, Any]]
"""A version of inspect.getmembers() that uses safe_getattr()."""
- results = [] # type: List[Tuple[unicode, Any]]
+ results = [] # type: List[Tuple[str, Any]]
for key in dir(object):
try:
value = attr_getter(object, key, None)
@@ -257,7 +209,7 @@ def safe_getmembers(object, predicate=None, attr_getter=safe_getattr):
def object_description(object):
- # type: (Any) -> unicode
+ # type: (Any) -> str
"""A repr() implementation that returns text safe to use in reST context."""
if isinstance(object, dict):
try:
@@ -275,15 +227,19 @@ def object_description(object):
except TypeError:
pass # Cannot sort set values, fall back to generic repr
else:
- template = "{%s}" if PY3 else "set([%s])"
- return template % ", ".join(object_description(x)
- for x in sorted_values)
+ return "{%s}" % ", ".join(object_description(x) for x in sorted_values)
+ if isinstance(object, frozenset):
+ try:
+ sorted_values = sorted(object)
+ except TypeError:
+ pass # Cannot sort frozenset values, fall back to generic repr
+ else:
+ return "frozenset({%s})" % ", ".join(object_description(x)
+ for x in sorted_values)
try:
s = repr(object)
except Exception:
raise ValueError
- if isinstance(s, binary_type):
- s = force_decode(s, None) # type: ignore
# Strip non-deterministic memory addresses such as
# ``<__main__.A at 0x7f68cb685710>``
s = memory_address_re.sub('', s)
@@ -291,7 +247,7 @@ def object_description(object):
def is_builtin_class_method(obj, attr_name):
- # type: (Any, unicode) -> bool
+ # type: (Any, str) -> bool
"""If attr_name is implemented at builtin class, return True.
>>> is_builtin_class_method(int, '__init__')
@@ -308,7 +264,7 @@ def is_builtin_class_method(obj, attr_name):
return getattr(builtins, safe_getattr(cls, '__name__', '')) is cls # type: ignore
-class Parameter(object):
+class Parameter:
"""Fake parameter class for python2."""
POSITIONAL_ONLY = 0
POSITIONAL_OR_KEYWORD = 1
@@ -324,8 +280,11 @@ class Parameter(object):
self.default = default
self.annotation = self.empty
+ warnings.warn('sphinx.util.inspect.Parameter is deprecated.',
+ RemovedInSphinx30Warning, stacklevel=2)
+
-class Signature(object):
+class Signature:
"""The Signature object represents the call signature of a callable object and
its return annotation.
"""
@@ -342,23 +301,20 @@ class Signature(object):
self.has_retval = has_retval
self.partialmethod_with_noargs = False
- if PY3:
- try:
- self.signature = inspect.signature(subject)
- except IndexError:
- # Until python 3.6.4, cpython has been crashed on inspection for
- # partialmethods not having any arguments.
- # https://bugs.python.org/issue33009
- if hasattr(subject, '_partialmethod'):
- self.signature = None
- self.partialmethod_with_noargs = True
- else:
- raise
- else:
- self.argspec = getargspec(subject)
+ try:
+ self.signature = inspect.signature(subject)
+ except IndexError:
+ # Until python 3.6.4, cpython has been crashed on inspection for
+ # partialmethods not having any arguments.
+ # https://bugs.python.org/issue33009
+ if hasattr(subject, '_partialmethod'):
+ self.signature = None
+ self.partialmethod_with_noargs = True
+ else:
+ raise
try:
- self.annotations = typing.get_type_hints(subject) # type: ignore
+ self.annotations = typing.get_type_hints(subject)
except Exception:
# get_type_hints() does not support some kind of objects like partial,
# ForwardRef and so on. For them, it raises an exception. In that case,
@@ -368,49 +324,28 @@ class Signature(object):
if bound_method:
# client gives a hint that the subject is a bound method
- if PY3 and inspect.ismethod(subject):
+ if inspect.ismethod(subject):
# inspect.signature already considers the subject is bound method.
# So it is not need to skip first argument.
self.skip_first_argument = False
else:
self.skip_first_argument = True
else:
- if PY3:
- # inspect.signature recognizes type of method properly without any hints
- self.skip_first_argument = False
- else:
- # check the subject is bound method or not
- self.skip_first_argument = inspect.ismethod(subject) and subject.__self__ # type: ignore # NOQA
+ # inspect.signature recognizes type of method properly without any hints
+ self.skip_first_argument = False
@property
def parameters(self):
- # type: () -> Dict
- if PY3:
- if self.partialmethod_with_noargs:
- return {}
- else:
- return self.signature.parameters
+ # type: () -> Mapping
+ if self.partialmethod_with_noargs:
+ return {}
else:
- params = OrderedDict() # type: Dict
- positionals = len(self.argspec.args) - len(self.argspec.defaults)
- for i, arg in enumerate(self.argspec.args):
- if i < positionals:
- params[arg] = Parameter(arg)
- else:
- default = self.argspec.defaults[i - positionals]
- params[arg] = Parameter(arg, default=default)
- if self.argspec.varargs:
- params[self.argspec.varargs] = Parameter(self.argspec.varargs,
- Parameter.VAR_POSITIONAL)
- if self.argspec.keywords:
- params[self.argspec.keywords] = Parameter(self.argspec.keywords,
- Parameter.VAR_KEYWORD)
- return params
+ return self.signature.parameters
@property
def return_annotation(self):
# type: () -> Any
- if PY3 and self.signature:
+ if self.signature:
if self.has_retval:
return self.signature.return_annotation
else:
@@ -419,10 +354,10 @@ class Signature(object):
return None
def format_args(self):
- # type: () -> unicode
+ # type: () -> str
args = []
last_kind = None
- for i, param in enumerate(itervalues(self.parameters)):
+ for i, param in enumerate(self.parameters.values()):
# skip first argument if subject is bound method
if self.skip_first_argument and i == 0:
continue
@@ -441,8 +376,7 @@ class Signature(object):
param.KEYWORD_ONLY):
arg.write(param.name)
if param.annotation is not param.empty:
- if isinstance(param.annotation, string_types) and \
- param.name in self.annotations:
+ if isinstance(param.annotation, str) and param.name in self.annotations:
arg.write(': ')
arg.write(self.format_annotation(self.annotations[param.name]))
else:
@@ -451,10 +385,10 @@ class Signature(object):
if param.default is not param.empty:
if param.annotation is param.empty:
arg.write('=')
- arg.write(object_description(param.default)) # type: ignore
+ arg.write(object_description(param.default))
else:
arg.write(' = ')
- arg.write(object_description(param.default)) # type: ignore
+ arg.write(object_description(param.default))
elif param.kind == param.VAR_POSITIONAL:
arg.write('*')
arg.write(param.name)
@@ -465,7 +399,7 @@ class Signature(object):
args.append(arg.getvalue())
last_kind = param.kind
- if PY2 or self.return_annotation is inspect.Parameter.empty:
+ if self.return_annotation is inspect.Parameter.empty:
return '(%s)' % ', '.join(args)
else:
if 'return' in self.annotations:
@@ -484,8 +418,8 @@ class Signature(object):
Displaying complex types from ``typing`` relies on its private API.
"""
- if isinstance(annotation, string_types):
- return annotation # type: ignore
+ if isinstance(annotation, str):
+ return annotation
elif isinstance(annotation, typing.TypeVar): # type: ignore
return annotation.__name__
elif not annotation:
@@ -636,110 +570,8 @@ class Signature(object):
return qualname
-if sys.version_info >= (3, 5):
- _getdoc = inspect.getdoc
-else:
- # code copied from the inspect.py module of the standard library
- # of Python 3.5
-
- def _findclass(func):
- # type: (Any) -> Any
- cls = sys.modules.get(func.__module__)
- if cls is None:
- return None
- if hasattr(func, 'im_class'):
- cls = func.im_class
- else:
- for name in func.__qualname__.split('.')[:-1]:
- cls = getattr(cls, name)
- if not inspect.isclass(cls):
- return None
- return cls
-
- def _finddoc(obj):
- # type: (Any) -> unicode
- if inspect.isclass(obj):
- for base in obj.__mro__:
- if base is not object:
- try:
- doc = base.__doc__
- except AttributeError:
- continue
- if doc is not None:
- return doc
- return None
-
- if inspect.ismethod(obj) and getattr(obj, '__self__', None):
- name = obj.__func__.__name__
- self = obj.__self__
- if (inspect.isclass(self) and
- getattr(getattr(self, name, None), '__func__')
- is obj.__func__):
- # classmethod
- cls = self
- else:
- cls = self.__class__
- elif inspect.isfunction(obj) or inspect.ismethod(obj):
- name = obj.__name__
- cls = _findclass(obj)
- if cls is None or getattr(cls, name) != obj:
- return None
- elif inspect.isbuiltin(obj):
- name = obj.__name__
- self = obj.__self__
- if (inspect.isclass(self) and
- self.__qualname__ + '.' + name == obj.__qualname__):
- # classmethod
- cls = self
- else:
- cls = self.__class__
- # Should be tested before isdatadescriptor().
- elif isinstance(obj, property):
- func = obj.fget
- name = func.__name__
- cls = _findclass(func)
- if cls is None or getattr(cls, name) is not obj:
- return None
- elif inspect.ismethoddescriptor(obj) or inspect.isdatadescriptor(obj):
- name = obj.__name__
- cls = obj.__objclass__
- if getattr(cls, name) is not obj:
- return None
- else:
- return None
-
- for base in cls.__mro__:
- try:
- doc = getattr(base, name).__doc__
- except AttributeError:
- continue
- if doc is not None:
- return doc
- return None
-
- def _getdoc(object):
- # type: (Any) -> unicode
- """Get the documentation string for an object.
-
- All tabs are expanded to spaces. To clean up docstrings that are
- indented to line up with blocks of code, any whitespace than can be
- uniformly removed from the second line onwards is removed."""
- try:
- doc = object.__doc__
- except AttributeError:
- return None
- if doc is None:
- try:
- doc = _finddoc(object)
- except (AttributeError, TypeError):
- return None
- if not isinstance(doc, str):
- return None
- return inspect.cleandoc(doc)
-
-
def getdoc(obj, attrgetter=safe_getattr, allow_inherited=False):
- # type: (Any, Callable, bool) -> unicode
+ # type: (Any, Callable, bool) -> str
"""Get the docstring for the object.
This tries to obtain the docstring for some kind of objects additionally:
@@ -751,6 +583,6 @@ def getdoc(obj, attrgetter=safe_getattr, allow_inherited=False):
if ispartial(obj) and doc == obj.__class__.__doc__:
return getdoc(obj.func)
elif doc is None and allow_inherited:
- doc = _getdoc(obj)
+ doc = inspect.getdoc(obj)
return doc
diff --git a/sphinx/util/inventory.py b/sphinx/util/inventory.py
index ed4e55bc2..b631e64f4 100644
--- a/sphinx/util/inventory.py
+++ b/sphinx/util/inventory.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.util.inventory
~~~~~~~~~~~~~~~~~~~~~
@@ -12,8 +11,6 @@ import os
import re
import zlib
-from six import PY3
-
from sphinx.util import logging
if False:
@@ -21,18 +18,14 @@ if False:
from typing import Callable, Dict, IO, Iterator, Tuple # NOQA
from sphinx.builders import Builder # NOQA
from sphinx.environment import BuildEnvironment # NOQA
-
- if PY3:
- unicode = str
-
- Inventory = Dict[unicode, Dict[unicode, Tuple[unicode, unicode, unicode, unicode]]]
+ from sphinx.util.typing import Inventory # NOQA
BUFSIZE = 16 * 1024
logger = logging.getLogger(__name__)
-class InventoryFileReader(object):
+class InventoryFileReader:
"""A file reader for inventory file.
This reader supports mixture of texts and compressed texts.
@@ -52,13 +45,13 @@ class InventoryFileReader(object):
self.buffer += chunk
def readline(self):
- # type: () -> unicode
+ # type: () -> str
pos = self.buffer.find(b'\n')
if pos != -1:
- line = self.buffer[:pos].decode('utf-8')
+ line = self.buffer[:pos].decode()
self.buffer = self.buffer[pos + 1:]
elif self.eof:
- line = self.buffer.decode('utf-8')
+ line = self.buffer.decode()
self.buffer = b''
else:
self.read_buffer()
@@ -67,7 +60,7 @@ class InventoryFileReader(object):
return line
def readlines(self):
- # type: () -> Iterator[unicode]
+ # type: () -> Iterator[str]
while not self.eof:
line = self.readline()
if line:
@@ -83,21 +76,21 @@ class InventoryFileReader(object):
yield decompressor.flush()
def read_compressed_lines(self):
- # type: () -> Iterator[unicode]
+ # type: () -> Iterator[str]
buf = b''
for chunk in self.read_compressed_chunks():
buf += chunk
pos = buf.find(b'\n')
while pos != -1:
- yield buf[:pos].decode('utf-8')
+ yield buf[:pos].decode()
buf = buf[pos + 1:]
pos = buf.find(b'\n')
-class InventoryFile(object):
+class InventoryFile:
@classmethod
def load(cls, stream, uri, joinfunc):
- # type: (IO, unicode, Callable) -> Inventory
+ # type: (IO, str, Callable) -> Inventory
reader = InventoryFileReader(stream)
line = reader.readline().rstrip()
if line == '# Sphinx inventory version 1':
@@ -109,7 +102,7 @@ class InventoryFile(object):
@classmethod
def load_v1(cls, stream, uri, join):
- # type: (InventoryFileReader, unicode, Callable) -> Inventory
+ # type: (InventoryFileReader, str, Callable) -> Inventory
invdata = {} # type: Inventory
projname = stream.readline().rstrip()[11:]
version = stream.readline().rstrip()[11:]
@@ -128,7 +121,7 @@ class InventoryFile(object):
@classmethod
def load_v2(cls, stream, uri, join):
- # type: (InventoryFileReader, unicode, Callable) -> Inventory
+ # type: (InventoryFileReader, str, Callable) -> Inventory
invdata = {} # type: Inventory
projname = stream.readline().rstrip()[11:]
version = stream.readline().rstrip()[11:]
@@ -149,7 +142,7 @@ class InventoryFile(object):
# for Python modules, and the first
# one is correct
continue
- if location.endswith(u'$'):
+ if location.endswith('$'):
location = location[:-1] + name
location = join(uri, location)
invdata.setdefault(type, {})[name] = (projname, version,
@@ -158,19 +151,19 @@ class InventoryFile(object):
@classmethod
def dump(cls, filename, env, builder):
- # type: (unicode, BuildEnvironment, Builder) -> None
+ # type: (str, BuildEnvironment, Builder) -> None
def escape(string):
- # type: (unicode) -> unicode
+ # type: (str) -> str
return re.sub("\\s+", " ", string)
with open(os.path.join(filename), 'wb') as f:
# header
- f.write((u'# Sphinx inventory version 2\n'
- u'# Project: %s\n'
- u'# Version: %s\n'
- u'# The remainder of this file is compressed using zlib.\n' %
+ f.write(('# Sphinx inventory version 2\n'
+ '# Project: %s\n'
+ '# Version: %s\n'
+ '# The remainder of this file is compressed using zlib.\n' %
(escape(env.config.project),
- escape(env.config.version))).encode('utf-8'))
+ escape(env.config.version))).encode())
# body
compressor = zlib.compressobj(9)
@@ -184,8 +177,8 @@ class InventoryFile(object):
if anchor:
uri += '#' + anchor
if dispname == name:
- dispname = u'-'
- entry = (u'%s %s:%s %s %s %s\n' %
+ dispname = '-'
+ entry = ('%s %s:%s %s %s %s\n' %
(name, domainname, typ, prio, uri, dispname))
- f.write(compressor.compress(entry.encode('utf-8')))
+ f.write(compressor.compress(entry.encode()))
f.write(compressor.flush())
diff --git a/sphinx/util/jsdump.py b/sphinx/util/jsdump.py
index 6776691cf..7b55e6543 100644
--- a/sphinx/util/jsdump.py
+++ b/sphinx/util/jsdump.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.util.jsdump
~~~~~~~~~~~~~~~~~~
@@ -12,10 +11,6 @@
import re
-from six import iteritems, integer_types, string_types
-
-from sphinx.util.pycompat import u
-
if False:
# For type annotation
from typing import Any, Dict, IO, List, Match, Union # NOQA
@@ -43,7 +38,7 @@ ESCAPED = re.compile(r'\\u.{4}|\\.')
def encode_string(s):
# type: (str) -> str
def replace(match):
- # type: (Match) -> unicode
+ # type: (Match) -> str
s = match.group(0)
try:
return ESCAPE_DICT[s]
@@ -57,12 +52,12 @@ def encode_string(s):
s1 = 0xd800 | ((n >> 10) & 0x3ff)
s2 = 0xdc00 | (n & 0x3ff)
return '\\u%04x\\u%04x' % (s1, s2)
- return '"' + str(ESCAPE_ASCII.sub(replace, s)) + '"' # type: ignore
+ return '"' + str(ESCAPE_ASCII.sub(replace, s)) + '"'
def decode_string(s):
# type: (str) -> str
- return ESCAPED.sub(lambda m: eval(u + '"' + m.group() + '"'), s)
+ return ESCAPED.sub(lambda m: eval('"' + m.group() + '"'), s)
reswords = set("""\
@@ -86,7 +81,7 @@ double in super""".split())
def dumps(obj, key=False):
# type: (Any, bool) -> str
if key:
- if not isinstance(obj, string_types):
+ if not isinstance(obj, str):
obj = str(obj)
if _nameonly_re.match(obj) and obj not in reswords:
return obj # return it as a bare word
@@ -96,19 +91,19 @@ def dumps(obj, key=False):
return 'null'
elif obj is True or obj is False:
return obj and 'true' or 'false'
- elif isinstance(obj, integer_types + (float,)): # type: ignore
+ elif isinstance(obj, (int, float)):
return str(obj)
elif isinstance(obj, dict):
return '{%s}' % ','.join(sorted('%s:%s' % (
dumps(key, True),
dumps(value)
- ) for key, value in iteritems(obj)))
+ ) for key, value in obj.items()))
elif isinstance(obj, set):
return '[%s]' % ','.join(sorted(dumps(x) for x in obj))
elif isinstance(obj, (tuple, list)):
return '[%s]' % ','.join(dumps(x) for x in obj)
- elif isinstance(obj, string_types):
- return encode_string(obj) # type: ignore
+ elif isinstance(obj, str):
+ return encode_string(obj)
raise TypeError(type(obj))
diff --git a/sphinx/util/jsonimpl.py b/sphinx/util/jsonimpl.py
index fbaa72978..8054be0d7 100644
--- a/sphinx/util/jsonimpl.py
+++ b/sphinx/util/jsonimpl.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.util.jsonimpl
~~~~~~~~~~~~~~~~~~~~
@@ -10,9 +9,7 @@
"""
import json
-
-from six import text_type
-from six.moves import UserString
+from collections import UserString
if False:
# For type annotation
@@ -22,10 +19,10 @@ if False:
class SphinxJSONEncoder(json.JSONEncoder):
"""JSONEncoder subclass that forces translation proxies."""
def default(self, obj):
- # type: (Any) -> unicode
+ # type: (Any) -> str
if isinstance(obj, UserString):
- return text_type(obj)
- return json.JSONEncoder.default(self, obj)
+ return str(obj)
+ return super().default(obj)
def dump(obj, fp, *args, **kwds):
@@ -35,7 +32,7 @@ def dump(obj, fp, *args, **kwds):
def dumps(obj, *args, **kwds):
- # type: (Any, Any, Any) -> unicode
+ # type: (Any, Any, Any) -> str
kwds['cls'] = SphinxJSONEncoder
return json.dumps(obj, *args, **kwds)
diff --git a/sphinx/util/logging.py b/sphinx/util/logging.py
index 09cef4b48..6e661400f 100644
--- a/sphinx/util/logging.py
+++ b/sphinx/util/logging.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.util.logging
~~~~~~~~~~~~~~~~~~~
@@ -8,7 +7,6 @@
:copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
-from __future__ import absolute_import
import logging
import logging.handlers
@@ -17,7 +15,6 @@ from contextlib import contextmanager
from docutils import nodes
from docutils.utils import get_source_line
-from six import PY2, StringIO
from sphinx.errors import SphinxWarning
from sphinx.util.console import colorize
@@ -50,12 +47,12 @@ VERBOSITY_MAP.update({
2: logging.DEBUG,
})
-COLOR_MAP = defaultdict(lambda: 'blue') # type: Dict[int, unicode]
-COLOR_MAP.update({
- logging.ERROR: 'darkred',
- logging.WARNING: 'red',
- logging.DEBUG: 'darkgray',
-})
+COLOR_MAP = defaultdict(lambda: 'blue',
+ {
+ logging.ERROR: 'darkred',
+ logging.WARNING: 'red',
+ logging.DEBUG: 'darkgray'
+ })
def getLogger(name):
@@ -101,7 +98,7 @@ class SphinxLogRecord(logging.LogRecord):
def getMessage(self):
# type: () -> str
- message = super(SphinxLogRecord, self).getMessage()
+ message = super().getMessage()
location = getattr(self, 'location', None)
if location:
message = '%s: %s%s' % (location, self.prefix, message)
@@ -125,19 +122,19 @@ class SphinxLoggerAdapter(logging.LoggerAdapter):
"""LoggerAdapter allowing ``type`` and ``subtype`` keywords."""
def log(self, level, msg, *args, **kwargs): # type: ignore
- # type: (Union[int, str], unicode, Any, Any) -> None
+ # type: (Union[int, str], str, Any, Any) -> None
if isinstance(level, int):
- super(SphinxLoggerAdapter, self).log(level, msg, *args, **kwargs)
+ super().log(level, msg, *args, **kwargs)
else:
levelno = LEVEL_NAMES[level]
- super(SphinxLoggerAdapter, self).log(levelno, msg, *args, **kwargs)
+ super().log(levelno, msg, *args, **kwargs)
def verbose(self, msg, *args, **kwargs):
- # type: (unicode, Any, Any) -> None
+ # type: (str, Any, Any) -> None
self.log(VERBOSE, msg, *args, **kwargs)
def process(self, msg, kwargs): # type: ignore
- # type: (unicode, Dict) -> Tuple[unicode, Dict]
+ # type: (str, Dict) -> Tuple[str, Dict]
extra = kwargs.setdefault('extra', {})
if 'type' in kwargs:
extra['type'] = kwargs.pop('type')
@@ -162,28 +159,7 @@ class WarningStreamHandler(logging.StreamHandler):
pass
-class NewLineStreamHandlerPY2(logging.StreamHandler):
- """StreamHandler which switches line terminator by record.nonl flag."""
-
- def emit(self, record):
- # type: (logging.LogRecord) -> None
- try:
- self.acquire()
- stream = self.stream
- if getattr(record, 'nonl', False):
- # remove return code forcely when nonl=True
- self.stream = StringIO()
- super(NewLineStreamHandlerPY2, self).emit(record)
- stream.write(self.stream.getvalue()[:-1])
- stream.flush()
- else:
- super(NewLineStreamHandlerPY2, self).emit(record)
- finally:
- self.stream = stream
- self.release()
-
-
-class NewLineStreamHandlerPY3(logging.StreamHandler):
+class NewLineStreamHandler(logging.StreamHandler):
"""StreamHandler which switches line terminator by record.nonl flag."""
def emit(self, record):
@@ -193,24 +169,18 @@ class NewLineStreamHandlerPY3(logging.StreamHandler):
if getattr(record, 'nonl', False):
# skip appending terminator when nonl=True
self.terminator = ''
- super(NewLineStreamHandlerPY3, self).emit(record)
+ super().emit(record)
finally:
self.terminator = '\n'
self.release()
-if PY2:
- NewLineStreamHandler = NewLineStreamHandlerPY2
-else:
- NewLineStreamHandler = NewLineStreamHandlerPY3
-
-
class MemoryHandler(logging.handlers.BufferingHandler):
"""Handler buffering all logs."""
def __init__(self):
# type: () -> None
- super(MemoryHandler, self).__init__(-1)
+ super().__init__(-1)
def shouldFlush(self, record):
# type: (logging.LogRecord) -> bool
@@ -315,7 +285,54 @@ def skip_warningiserror(skip=True):
handler.removeFilter(disabler)
-class LogCollector(object):
+@contextmanager
+def prefixed_warnings(prefix):
+ # type: (str) -> Generator
+ """Prepend prefix to all records for a while.
+
+ For example::
+
+ >>> with prefixed_warnings("prefix:"):
+ >>> logger.warning('Warning message!') # => prefix: Warning message!
+
+ .. versionadded:: 2.0
+ """
+ logger = logging.getLogger(NAMESPACE)
+ warning_handler = None
+ for handler in logger.handlers:
+ if isinstance(handler, WarningStreamHandler):
+ warning_handler = handler
+ break
+ else:
+ # warning stream not found
+ yield
+ return
+
+ prefix_filter = None
+ for _filter in warning_handler.filters:
+ if isinstance(_filter, MessagePrefixFilter):
+ prefix_filter = _filter
+ break
+
+ if prefix_filter:
+ # already prefixed
+ try:
+ previous = prefix_filter.prefix
+ prefix_filter.prefix = prefix
+ yield
+ finally:
+ prefix_filter.prefix = previous
+ else:
+ # not prefixed yet
+ try:
+ prefix_filter = MessagePrefixFilter(prefix)
+ warning_handler.addFilter(prefix_filter)
+ yield
+ finally:
+ warning_handler.removeFilter(prefix_filter)
+
+
+class LogCollector:
def __init__(self):
# type: () -> None
self.logs = [] # type: List[logging.LogRecord]
@@ -341,7 +358,7 @@ class InfoFilter(logging.Filter):
def is_suppressed_warning(type, subtype, suppress_warnings):
- # type: (unicode, unicode, List[unicode]) -> bool
+ # type: (str, str, List[str]) -> bool
"""Check the warning is suppressed or not."""
if type is None:
return False
@@ -366,7 +383,7 @@ class WarningSuppressor(logging.Filter):
def __init__(self, app):
# type: (Sphinx) -> None
self.app = app
- super(WarningSuppressor, self).__init__()
+ super().__init__()
def filter(self, record):
# type: (logging.LogRecord) -> bool
@@ -392,7 +409,7 @@ class WarningIsErrorFilter(logging.Filter):
def __init__(self, app):
# type: (Sphinx) -> None
self.app = app
- super(WarningIsErrorFilter, self).__init__()
+ super().__init__()
def filter(self, record):
# type: (logging.LogRecord) -> bool
@@ -423,6 +440,21 @@ class DisableWarningIsErrorFilter(logging.Filter):
return True
+class MessagePrefixFilter(logging.Filter):
+ """Prepend prefix to all records."""
+
+ def __init__(self, prefix):
+ # type: (str) -> None
+ self.prefix = prefix
+ super().__init__()
+
+ def filter(self, record):
+ # type: (logging.LogRecord) -> bool
+ if self.prefix:
+ record.msg = self.prefix + ' ' + record.msg
+ return True
+
+
class SphinxLogRecordTranslator(logging.Filter):
"""Converts a log record to one Sphinx expects
@@ -434,7 +466,7 @@ class SphinxLogRecordTranslator(logging.Filter):
def __init__(self, app):
# type: (Sphinx) -> None
self.app = app
- super(SphinxLogRecordTranslator, self).__init__()
+ super().__init__()
def filter(self, record): # type: ignore
# type: (SphinxWarningLogRecord) -> bool
@@ -485,18 +517,18 @@ def get_node_location(node):
class ColorizeFormatter(logging.Formatter):
def format(self, record):
# type: (logging.LogRecord) -> str
- message = super(ColorizeFormatter, self).format(record)
+ message = super().format(record)
color = getattr(record, 'color', None)
if color is None:
color = COLOR_MAP.get(record.levelno)
if color:
- return colorize(color, message) # type: ignore
+ return colorize(color, message)
else:
return message
-class SafeEncodingWriter(object):
+class SafeEncodingWriter:
"""Stream writer which ignores UnicodeEncodeError silently"""
def __init__(self, stream):
# type: (IO) -> None
@@ -504,7 +536,7 @@ class SafeEncodingWriter(object):
self.encoding = getattr(stream, 'encoding', 'ascii') or 'ascii'
def write(self, data):
- # type: (unicode) -> None
+ # type: (str) -> None
try:
self.stream.write(data)
except UnicodeEncodeError:
@@ -518,14 +550,14 @@ class SafeEncodingWriter(object):
self.stream.flush()
-class LastMessagesWriter(object):
+class LastMessagesWriter:
"""Stream writer which memories last 10 messages to save trackback"""
def __init__(self, app, stream):
# type: (Sphinx, IO) -> None
self.app = app
def write(self, data):
- # type: (unicode) -> None
+ # type: (str) -> None
self.app.messagelog.append(data)
diff --git a/sphinx/util/matching.py b/sphinx/util/matching.py
index bddf84f5c..787169c2e 100644
--- a/sphinx/util/matching.py
+++ b/sphinx/util/matching.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.util.matching
~~~~~~~~~~~~~~~~~~~~
@@ -17,14 +16,14 @@ if False:
def _translate_pattern(pat):
- # type: (unicode) -> unicode
+ # type: (str) -> str
"""Translate a shell-style glob pattern to a regular expression.
Adapted from the fnmatch module, but enhanced so that single stars don't
match slashes.
"""
i, n = 0, len(pat)
- res = '' # type: unicode
+ res = '' # type: str
while i < n:
c = pat[i]
i += 1
@@ -64,11 +63,11 @@ def _translate_pattern(pat):
def compile_matchers(patterns):
- # type: (List[unicode]) -> List[Callable[[unicode], Match[unicode]]]
+ # type: (List[str]) -> List[Callable[[str], Match[str]]]
return [re.compile(_translate_pattern(pat)).match for pat in patterns]
-class Matcher(object):
+class Matcher:
"""A pattern matcher for Multiple shell-style glob patterns.
Note: this modifies the patterns to work with copy_asset().
@@ -76,27 +75,27 @@ class Matcher(object):
"""
def __init__(self, patterns):
- # type: (List[unicode]) -> None
+ # type: (List[str]) -> None
expanded = [pat[3:] for pat in patterns if pat.startswith('**/')]
self.patterns = compile_matchers(patterns + expanded)
def __call__(self, string):
- # type: (unicode) -> bool
+ # type: (str) -> bool
return self.match(string)
def match(self, string):
- # type: (unicode) -> bool
+ # type: (str) -> bool
return any(pat(string) for pat in self.patterns)
DOTFILES = Matcher(['**/.*'])
-_pat_cache = {} # type: Dict[unicode, Pattern]
+_pat_cache = {} # type: Dict[str, Pattern]
def patmatch(name, pat):
- # type: (unicode, unicode) -> Match[unicode]
+ # type: (str, str) -> Match[str]
"""Return if name matches pat. Adapted from fnmatch module."""
if pat not in _pat_cache:
_pat_cache[pat] = re.compile(_translate_pattern(pat))
@@ -104,7 +103,7 @@ def patmatch(name, pat):
def patfilter(names, pat):
- # type: (List[unicode], unicode) -> List[unicode]
+ # type: (List[str], str) -> List[str]
"""Return the subset of the list NAMES that match PAT.
Adapted from fnmatch module.
diff --git a/sphinx/util/math.py b/sphinx/util/math.py
index b960613c0..a474a83f7 100644
--- a/sphinx/util/math.py
+++ b/sphinx/util/math.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.util.math
~~~~~~~~~~~~~~~~
@@ -13,31 +12,29 @@
if False:
# For type annotation
from docutils import nodes # NOQA
- from docutils.writers.html4css1 import Writer # NOQA
+ from sphinx.builders.html import HTMLTranslator # NOQA
def get_node_equation_number(writer, node):
- # type: (Writer, nodes.Node) -> unicode
+ # type: (HTMLTranslator, nodes.math_block) -> str
if writer.builder.config.math_numfig and writer.builder.config.numfig:
figtype = 'displaymath'
if writer.builder.name == 'singlehtml':
- key = u"%s/%s" % (writer.docnames[-1], figtype)
+ key = "%s/%s" % (writer.docnames[-1], figtype)
else:
key = figtype
id = node['ids'][0]
number = writer.builder.fignumbers.get(key, {}).get(id, ())
- number = '.'.join(map(str, number))
+ return '.'.join(map(str, number))
else:
- number = node['number']
-
- return number
+ return node['number']
def wrap_displaymath(text, label, numbering):
- # type: (unicode, unicode, bool) -> unicode
+ # type: (str, str, bool) -> str
def is_equation(part):
- # type: (unicode) -> unicode
+ # type: (str) -> str
return part.strip()
if label is None:
diff --git a/sphinx/util/nodes.py b/sphinx/util/nodes.py
index 03e06c416..ddf78185e 100644
--- a/sphinx/util/nodes.py
+++ b/sphinx/util/nodes.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.util.nodes
~~~~~~~~~~~~~~~~~
@@ -8,12 +7,11 @@
:copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
-from __future__ import absolute_import
import re
+from typing import Any, cast
from docutils import nodes
-from six import text_type
from sphinx import addnodes
from sphinx.locale import __
@@ -21,7 +19,9 @@ from sphinx.util import logging
if False:
# For type annotation
- from typing import Any, Callable, Iterable, List, Set, Tuple, Optional # NOQA
+ from typing import Any, Callable, Iterable, List, Optional, Set, Tuple, Type # NOQA
+ from docutils.parsers.rst.states import Inliner # NOQA
+ from docutils.statemachine import StringList # NOQA
from sphinx.builders import Builder # NOQA
from sphinx.utils.tags import Tags # NOQA
@@ -33,6 +33,61 @@ explicit_title_re = re.compile(r'^(.+?)\s*(?<!\x00)<(.*?)>$', re.DOTALL)
caption_ref_re = explicit_title_re # b/w compat alias
+class NodeMatcher:
+ """A helper class for Node.traverse().
+
+ It checks that given node is an instance of specified node-classes and it has
+ specified node-attributes.
+
+ For example, following example searches ``reference`` node having ``refdomain``
+ and ``reftype`` attributes::
+
+ matcher = NodeMatcher(nodes.reference, refdomain='std', reftype='citation')
+ doctree.traverse(matcher)
+ # => [<reference ...>, <reference ...>, ...]
+
+ A special value ``typing.Any`` matches any kind of node-attributes. For example,
+ following example searches ``reference`` node having ``refdomain`` attributes::
+
+ from typing import Any
+ matcher = NodeMatcher(nodes.reference, refdomain=Any)
+ doctree.traverse(matcher)
+ # => [<reference ...>, <reference ...>, ...]
+ """
+
+ def __init__(self, *classes, **attrs):
+ # type: (Type[nodes.Node], Any) -> None
+ self.classes = classes
+ self.attrs = attrs
+
+ def match(self, node):
+ # type: (nodes.Node) -> bool
+ try:
+ if self.classes and not isinstance(node, self.classes):
+ return False
+
+ if self.attrs:
+ if not isinstance(node, nodes.Element):
+ return False
+
+ for key, value in self.attrs.items():
+ if key not in node:
+ return False
+ elif value is Any:
+ continue
+ elif node.get(key) != value:
+ return False
+
+ return True
+ except Exception:
+ # for non-Element nodes
+ return False
+
+ def __call__(self, node):
+ # type: (nodes.Node) -> bool
+ return self.match(node)
+
+
def get_full_module_name(node):
# type: (nodes.Node) -> str
"""
@@ -45,7 +100,7 @@ def get_full_module_name(node):
def repr_domxml(node, length=80):
- # type: (nodes.Node, Optional[int]) -> unicode
+ # type: (nodes.Node, Optional[int]) -> str
"""
return DOM XML representation of the specified node like:
'<paragraph translatable="False"><inline classes="versionmodified">New in version...'
@@ -59,14 +114,14 @@ def repr_domxml(node, length=80):
try:
text = node.asdom().toxml()
except Exception:
- text = text_type(node)
+ text = str(node)
if length and len(text) > length:
text = text[:length] + '...'
return text
def apply_source_workaround(node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
# workaround: nodes.term have wrong rawsource if classifier is specified.
# The behavior of docutils-0.11, 0.12 is:
# * when ``term text : classifier1 : classifier2`` is specified,
@@ -195,9 +250,9 @@ META_TYPE_NODES = (
def extract_messages(doctree):
- # type: (nodes.Node) -> Iterable[Tuple[nodes.Node, unicode]]
+ # type: (nodes.Element) -> Iterable[Tuple[nodes.Element, str]]
"""Extract translatable messages from a document tree."""
- for node in doctree.traverse(is_translatable):
+ for node in doctree.traverse(is_translatable): # type: nodes.Element
if isinstance(node, addnodes.translatable):
for msg in node.extract_original_messages():
yield node, msg
@@ -212,7 +267,7 @@ def extract_messages(doctree):
msg += '\n :alt: %s' % node['alt']
elif isinstance(node, META_TYPE_NODES):
msg = node.rawcontent
- elif is_pending_meta(node):
+ elif isinstance(node, nodes.pending) and is_pending_meta(node):
msg = node.details['nodes'][0].rawcontent
else:
msg = node.rawsource.replace('\n', ' ').strip()
@@ -223,7 +278,7 @@ def extract_messages(doctree):
def find_source_node(node):
- # type: (nodes.Node) -> unicode
+ # type: (nodes.Element) -> str
for pnode in traverse_parent(node):
if pnode.source:
return pnode.source
@@ -231,7 +286,7 @@ def find_source_node(node):
def traverse_parent(node, cls=None):
- # type: (nodes.Node, Any) -> Iterable[nodes.Node]
+ # type: (nodes.Element, Any) -> Iterable[nodes.Element]
while node:
if cls is None or isinstance(node, cls):
yield node
@@ -239,13 +294,9 @@ def traverse_parent(node, cls=None):
def traverse_translatable_index(doctree):
- # type: (nodes.Node) -> Iterable[Tuple[nodes.Node, List[unicode]]]
+ # type: (nodes.Element) -> Iterable[Tuple[nodes.Element, List[str]]]
"""Traverse translatable index node from a document tree."""
- def is_block_index(node):
- # type: (nodes.Node) -> bool
- return isinstance(node, addnodes.index) and \
- node.get('inline') is False
- for node in doctree.traverse(is_block_index):
+ for node in doctree.traverse(NodeMatcher(addnodes.index, inline=False)): # type: addnodes.index # NOQA
if 'raw_entries' in node:
entries = node['raw_entries']
else:
@@ -254,7 +305,7 @@ def traverse_translatable_index(doctree):
def nested_parse_with_titles(state, content, node):
- # type: (Any, List[unicode], nodes.Node) -> unicode
+ # type: (Any, StringList, nodes.Node) -> str
"""Version of state.nested_parse() that allows titles and does not require
titles to have the same decoration as the calling document.
@@ -274,7 +325,7 @@ def nested_parse_with_titles(state, content, node):
def clean_astext(node):
- # type: (nodes.Node) -> unicode
+ # type: (nodes.Element) -> str
"""Like node.astext(), but ignore images."""
node = node.deepcopy()
for img in node.traverse(nodes.image):
@@ -285,7 +336,7 @@ def clean_astext(node):
def split_explicit_title(text):
- # type: (unicode) -> Tuple[bool, unicode, unicode]
+ # type: (str) -> Tuple[bool, str, str]
"""Split role content into title and target, if given."""
match = explicit_title_re.match(text)
if match:
@@ -299,10 +350,10 @@ indextypes = [
def process_index_entry(entry, targetid):
- # type: (unicode, unicode) -> List[Tuple[unicode, unicode, unicode, unicode, unicode]]
+ # type: (str, str) -> List[Tuple[str, str, str, str, str]]
from sphinx.domains.python import pairindextypes
- indexentries = [] # type: List[Tuple[unicode, unicode, unicode, unicode, unicode]]
+ indexentries = [] # type: List[Tuple[str, str, str, str, str]]
entry = entry.strip()
oentry = entry
main = ''
@@ -338,15 +389,15 @@ def process_index_entry(entry, targetid):
def inline_all_toctrees(builder, docnameset, docname, tree, colorfunc, traversed):
- # type: (Builder, Set[unicode], unicode, nodes.Node, Callable, nodes.Node) -> nodes.Node
+ # type: (Builder, Set[str], str, nodes.document, Callable, List[str]) -> nodes.document
"""Inline all toctrees in the *tree*.
Record all docnames in *docnameset*, and output docnames with *colorfunc*.
"""
- tree = tree.deepcopy()
+ tree = cast(nodes.document, tree.deepcopy())
for toctreenode in tree.traverse(addnodes.toctree):
newnodes = []
- includefiles = map(text_type, toctreenode['includefiles'])
+ includefiles = map(str, toctreenode['includefiles'])
for includefile in includefiles:
if includefile not in traversed:
try:
@@ -371,7 +422,7 @@ def inline_all_toctrees(builder, docnameset, docname, tree, colorfunc, traversed
def make_refnode(builder, fromdocname, todocname, targetid, child, title=None):
- # type: (Builder, unicode, unicode, unicode, nodes.Node, unicode) -> nodes.reference
+ # type: (Builder, str, str, str, nodes.Node, str) -> nodes.reference
"""Shortcut to create a reference node."""
node = nodes.reference('', '', internal=True)
if fromdocname == todocname and targetid:
@@ -395,8 +446,8 @@ def set_source_info(directive, node):
def set_role_source_info(inliner, lineno, node):
- # type: (Any, unicode, nodes.Node) -> None
- node.source, node.line = inliner.reporter.get_source_and_line(lineno)
+ # type: (Inliner, int, nodes.Node) -> None
+ node.source, node.line = inliner.reporter.get_source_and_line(lineno) # type: ignore
NON_SMARTQUOTABLE_PARENT_NODES = (
@@ -447,7 +498,7 @@ def process_only_nodes(document, tags):
# monkey-patch Element.copy to copy the rawsource and line
def _new_copy(self):
- # type: (nodes.Node) -> nodes.Node
+ # type: (nodes.Element) -> nodes.Element
newnode = self.__class__(self.rawsource, **self.attributes)
if isinstance(self, nodes.Element):
newnode.source = self.source
@@ -455,4 +506,4 @@ def _new_copy(self):
return newnode
-nodes.Element.copy = _new_copy
+nodes.Element.copy = _new_copy # type: ignore
diff --git a/sphinx/util/osutil.py b/sphinx/util/osutil.py
index fb9a2cfa1..077444f1d 100644
--- a/sphinx/util/osutil.py
+++ b/sphinx/util/osutil.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.util.osutil
~~~~~~~~~~~~~~~~~~
@@ -8,37 +7,30 @@
:copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
-from __future__ import print_function
import contextlib
import errno
import filecmp
-import locale
import os
import re
import shutil
import sys
import time
import warnings
-from io import BytesIO, StringIO
+from io import StringIO
from os import path
-from six import PY2, PY3, text_type
-
-from sphinx.deprecation import RemovedInSphinx30Warning
+from sphinx.deprecation import RemovedInSphinx30Warning, RemovedInSphinx40Warning
if False:
# For type annotation
from typing import Any, Iterator, List, Tuple, Union # NOQA
# Errnos that we need.
-EEXIST = getattr(errno, 'EEXIST', 0)
-ENOENT = getattr(errno, 'ENOENT', 0)
-EPIPE = getattr(errno, 'EPIPE', 0)
-EINVAL = getattr(errno, 'EINVAL', 0)
-
-if PY3:
- unicode = str # special alias for static typing...
+EEXIST = getattr(errno, 'EEXIST', 0) # RemovedInSphinx40Warning
+ENOENT = getattr(errno, 'ENOENT', 0) # RemovedInSphinx40Warning
+EPIPE = getattr(errno, 'EPIPE', 0) # RemovedInSphinx40Warning
+EINVAL = getattr(errno, 'EINVAL', 0) # RemovedInSphinx40Warning
# SEP separates path elements in the canonical file names
#
@@ -49,18 +41,18 @@ SEP = "/"
def os_path(canonicalpath):
- # type: (unicode) -> unicode
+ # type: (str) -> str
return canonicalpath.replace(SEP, path.sep)
def canon_path(nativepath):
- # type: (unicode) -> unicode
+ # type: (str) -> str
"""Return path in OS-independent form"""
return nativepath.replace(path.sep, SEP)
def relative_uri(base, to):
- # type: (unicode, unicode) -> unicode
+ # type: (str, str) -> str
"""Return a relative URL from ``base`` to ``to``."""
if to.startswith(SEP):
return to
@@ -84,66 +76,33 @@ def relative_uri(base, to):
def ensuredir(path):
- # type: (unicode) -> None
+ # type: (str) -> None
"""Ensure that a path exists."""
- try:
- os.makedirs(path)
- except OSError:
- # If the path is already an existing directory (not a file!),
- # that is OK.
- if not os.path.isdir(path):
- raise
+ os.makedirs(path, exist_ok=True)
-# This function is same as os.walk of Python2.7 except a customization
-# that check UnicodeError.
-# The customization obstacle to replace the function with the os.walk.
def walk(top, topdown=True, followlinks=False):
- # type: (unicode, bool, bool) -> Iterator[Tuple[unicode, List[unicode], List[unicode]]]
- """Backport of os.walk from 2.6, where the *followlinks* argument was
- added.
- """
- names = os.listdir(top)
-
- dirs, nondirs = [], []
- for name in names:
- try:
- fullpath = path.join(top, name)
- except UnicodeError:
- print('%s:: ERROR: non-ASCII filename not supported on this '
- 'filesystem encoding %r, skipped.' % (name, fs_encoding),
- file=sys.stderr)
- continue
- if path.isdir(fullpath):
- dirs.append(name)
- else:
- nondirs.append(name)
-
- if topdown:
- yield top, dirs, nondirs
- for name in dirs:
- fullpath = path.join(top, name)
- if followlinks or not path.islink(fullpath):
- for x in walk(fullpath, topdown, followlinks):
- yield x
- if not topdown:
- yield top, dirs, nondirs
+ # type: (str, bool, bool) -> Iterator[Tuple[str, List[str], List[str]]]
+ warnings.warn('sphinx.util.osutil.walk() is deprecated for removal. '
+ 'Please use os.walk() instead.',
+ RemovedInSphinx40Warning)
+ return os.walk(top, topdown=topdown, followlinks=followlinks)
def mtimes_of_files(dirnames, suffix):
- # type: (List[unicode], unicode) -> Iterator[float]
+ # type: (List[str], str) -> Iterator[float]
for dirname in dirnames:
for root, dirs, files in os.walk(dirname):
for sfile in files:
if sfile.endswith(suffix):
try:
yield path.getmtime(path.join(root, sfile))
- except EnvironmentError:
+ except OSError:
pass
def movefile(source, dest):
- # type: (unicode, unicode) -> None
+ # type: (str, str) -> None
"""Move a file, removing the destination if it exists."""
if os.path.exists(dest):
try:
@@ -154,7 +113,7 @@ def movefile(source, dest):
def copytimes(source, dest):
- # type: (unicode, unicode) -> None
+ # type: (str, str) -> None
"""Copy a file's modification times."""
st = os.stat(source)
if hasattr(os, 'utime'):
@@ -162,7 +121,7 @@ def copytimes(source, dest):
def copyfile(source, dest):
- # type: (unicode, unicode) -> None
+ # type: (str, str) -> None
"""Copy a file and its modification times, if possible.
Note: ``copyfile`` skips copying if the file has not been changed"""
@@ -176,15 +135,21 @@ def copyfile(source, dest):
no_fn_re = re.compile(r'[^a-zA-Z0-9_-]')
+project_suffix_re = re.compile(' Documentation$')
def make_filename(string):
- # type: (str) -> unicode
+ # type: (str) -> str
return no_fn_re.sub('', string) or 'sphinx'
+def make_filename_from_project(project):
+ # type: (str) -> str
+ return make_filename(project_suffix_re.sub('', project)).lower()
+
+
def ustrftime(format, *args):
- # type: (unicode, Any) -> unicode
+ # type: (str, Any) -> str
"""[DEPRECATED] strftime for unicode strings."""
warnings.warn('sphinx.util.osutil.ustrtime is deprecated for removal',
RemovedInSphinx30Warning, stacklevel=2)
@@ -196,23 +161,17 @@ def ustrftime(format, *args):
if source_date_epoch is not None:
time_struct = time.gmtime(float(source_date_epoch))
args = [time_struct] # type: ignore
- if PY2:
- # if a locale is set, the time strings are encoded in the encoding
- # given by LC_TIME; if that is available, use it
- enc = locale.getlocale(locale.LC_TIME)[1] or 'utf-8'
- return time.strftime(text_type(format).encode(enc), *args).decode(enc)
- else: # Py3
- # On Windows, time.strftime() and Unicode characters will raise UnicodeEncodeError.
- # https://bugs.python.org/issue8304
- try:
- return time.strftime(format, *args)
- except UnicodeEncodeError:
- r = time.strftime(format.encode('unicode-escape').decode(), *args)
- return r.encode().decode('unicode-escape')
+ # On Windows, time.strftime() and Unicode characters will raise UnicodeEncodeError.
+ # https://bugs.python.org/issue8304
+ try:
+ return time.strftime(format, *args)
+ except UnicodeEncodeError:
+ r = time.strftime(format.encode('unicode-escape').decode(), *args)
+ return r.encode().decode('unicode-escape')
def relpath(path, start=os.curdir):
- # type: (unicode, unicode) -> unicode
+ # type: (str, str) -> str
"""Return a relative filepath to *path* either from the current directory or
from an optional *start* directory.
@@ -226,33 +185,34 @@ def relpath(path, start=os.curdir):
safe_relpath = relpath # for compatibility
-fs_encoding = sys.getfilesystemencoding() or sys.getdefaultencoding() # type: unicode
+fs_encoding = sys.getfilesystemencoding() or sys.getdefaultencoding()
def abspath(pathdir):
- # type: (unicode) -> unicode
+ # type: (str) -> str
pathdir = path.abspath(pathdir)
if isinstance(pathdir, bytes):
try:
pathdir = pathdir.decode(fs_encoding)
except UnicodeDecodeError:
- raise UnicodeDecodeError('multibyte filename not supported on ' # type: ignore
+ raise UnicodeDecodeError('multibyte filename not supported on '
'this filesystem encoding '
'(%r)' % fs_encoding)
return pathdir
def getcwd():
- # type: () -> unicode
- if hasattr(os, 'getcwdu'):
- return os.getcwdu()
+ # type: () -> str
+ warnings.warn('sphinx.util.osutil.getcwd() is deprecated. '
+ 'Please use os.getcwd() instead.',
+ RemovedInSphinx40Warning)
return os.getcwd()
@contextlib.contextmanager
def cd(target_dir):
- # type: (unicode) -> Iterator[None]
- cwd = getcwd()
+ # type: (str) -> Iterator[None]
+ cwd = os.getcwd()
try:
os.chdir(target_dir)
yield
@@ -260,7 +220,7 @@ def cd(target_dir):
os.chdir(cwd)
-class FileAvoidWrite(object):
+class FileAvoidWrite:
"""File-like object that buffers output and only writes if content changed.
Use this class like when writing to a file to avoid touching the original
@@ -273,19 +233,15 @@ class FileAvoidWrite(object):
Objects can be used as context managers.
"""
def __init__(self, path):
- # type: (unicode) -> None
+ # type: (str) -> None
self._path = path
- self._io = None # type: Union[StringIO, BytesIO]
+ self._io = None # type: StringIO
def write(self, data):
- # type: (Union[str, unicode]) -> None
+ # type: (str) -> None
if not self._io:
- if isinstance(data, text_type):
- self._io = StringIO()
- else:
- self._io = BytesIO()
-
- self._io.write(data) # type: ignore
+ self._io = StringIO()
+ self._io.write(data)
def close(self):
# type: () -> None
@@ -296,23 +252,15 @@ class FileAvoidWrite(object):
buf = self.getvalue()
self._io.close()
- r_mode = 'r'
- w_mode = 'w'
- if isinstance(self._io, BytesIO):
- r_mode = 'rb'
- w_mode = 'wb'
-
- old_content = None
-
try:
- with open(self._path, r_mode) as old_f:
+ with open(self._path) as old_f:
old_content = old_f.read()
if old_content == buf:
return
- except IOError:
+ except OSError:
pass
- with open(self._path, w_mode) as f:
+ with open(self._path, 'w') as f:
f.write(buf)
def __enter__(self):
@@ -320,7 +268,7 @@ class FileAvoidWrite(object):
return self
def __exit__(self, type, value, traceback):
- # type: (unicode, unicode, unicode) -> None
+ # type: (str, str, str) -> None
self.close()
def __getattr__(self, name):
@@ -334,7 +282,7 @@ class FileAvoidWrite(object):
def rmtree(path):
- # type: (unicode) -> None
+ # type: (str) -> None
if os.path.isdir(path):
shutil.rmtree(path)
else:
diff --git a/sphinx/util/parallel.py b/sphinx/util/parallel.py
index 066e3c93a..0a2104ecd 100644
--- a/sphinx/util/parallel.py
+++ b/sphinx/util/parallel.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.util.parallel
~~~~~~~~~~~~~~~~~~~~
@@ -8,15 +7,12 @@
:copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
-from __future__ import absolute_import
import os
import time
import traceback
from math import sqrt
-from six import iteritems
-
try:
import multiprocessing
except ImportError:
@@ -36,7 +32,7 @@ logger = logging.getLogger(__name__)
parallel_available = multiprocessing and (os.name == 'posix')
-class SerialTasks(object):
+class SerialTasks:
"""Has the same interface as ParallelTasks, but executes tasks directly."""
def __init__(self, nproc=1):
@@ -57,7 +53,7 @@ class SerialTasks(object):
pass
-class ParallelTasks(object):
+class ParallelTasks:
"""Executes *nproc* tasks in parallel after forking."""
def __init__(self, nproc):
@@ -115,7 +111,7 @@ class ParallelTasks(object):
def _join_one(self):
# type: () -> None
- for tid, pipe in iteritems(self._precvs):
+ for tid, pipe in self._precvs.items():
if pipe.poll():
exc, logs, result = pipe.recv()
if exc:
@@ -137,7 +133,7 @@ class ParallelTasks(object):
def make_chunks(arguments, nproc, maxbatch=10):
- # type: (Sequence[unicode], int, int) -> List[Any]
+ # type: (Sequence[str], int, int) -> List[Any]
# determine how many documents to read in one go
nargs = len(arguments)
chunksize = nargs // nproc
diff --git a/sphinx/util/png.py b/sphinx/util/png.py
index 88530d979..1b1821857 100644
--- a/sphinx/util/png.py
+++ b/sphinx/util/png.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.util.png
~~~~~~~~~~~~~~~
@@ -22,7 +21,7 @@ IEND_CHUNK = b'\x00\x00\x00\x00IEND\xAE\x42\x60\x82'
def read_png_depth(filename):
- # type: (unicode) -> int
+ # type: (str) -> int
"""Read the special tEXt chunk indicating the depth from a PNG file."""
with open(filename, 'rb') as f:
f.seek(- (LEN_IEND + LEN_DEPTH), 2)
@@ -35,7 +34,7 @@ def read_png_depth(filename):
def write_png_depth(filename, depth):
- # type: (unicode, int) -> None
+ # type: (str, int) -> None
"""Write the special tEXt chunk indicating the depth to a PNG file.
The chunk is placed immediately before the special IEND chunk.
diff --git a/sphinx/util/pycompat.py b/sphinx/util/pycompat.py
index 8bcf7e4f8..df310368e 100644
--- a/sphinx/util/pycompat.py
+++ b/sphinx/util/pycompat.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.util.pycompat
~~~~~~~~~~~~~~~~~~~~
@@ -9,130 +8,79 @@
:license: BSD, see LICENSE for details.
"""
-import codecs
import sys
+import warnings
+from html import escape as htmlescape # NOQA
+from io import TextIOWrapper # NOQA
+from textwrap import indent # NOQA
-from six import PY3, text_type, exec_
+from sphinx.deprecation import RemovedInSphinx40Warning
+from sphinx.locale import __
+from sphinx.util import logging
if False:
# For type annotation
from typing import Any, Callable, Generator # NOQA
+logger = logging.getLogger(__name__)
+
+
NoneType = type(None)
# ------------------------------------------------------------------------------
# Python 2/3 compatibility
# prefix for Unicode strings
-if PY3:
- u = ''
-else:
- u = 'u'
-
-
-# TextIOWrapper
-if PY3:
- from io import TextIOWrapper
-else:
- def TextIOWrapper(stream, encoding):
- # type: (file, str) -> Any
- return codecs.lookup(encoding or 'ascii')[2](stream)
+u = '' # RemovedInSphinx40Warning
# sys_encoding: some kind of default system encoding; should be used with
# a lenient error handler
-if PY3:
- sys_encoding = sys.getdefaultencoding()
-else:
- sys_encoding = __import__('locale').getpreferredencoding()
+sys_encoding = sys.getdefaultencoding()
# terminal_safe(): safely encode a string for printing to the terminal
-if PY3:
- def terminal_safe(s):
- # type: (unicode) -> unicode
- return s.encode('ascii', 'backslashreplace').decode('ascii')
-else:
- def terminal_safe(s):
- # type: (unicode) -> unicode
- return s.encode('ascii', 'backslashreplace')
+def terminal_safe(s):
+ # type: (str) -> str
+ return s.encode('ascii', 'backslashreplace').decode('ascii')
# convert_with_2to3():
-if PY3:
- # support for running 2to3 over config files
- def convert_with_2to3(filepath):
- # type: (unicode) -> unicode
- from lib2to3.refactor import RefactoringTool, get_fixers_from_package
- from lib2to3.pgen2.parse import ParseError
- fixers = get_fixers_from_package('lib2to3.fixes')
- refactoring_tool = RefactoringTool(fixers)
- source = refactoring_tool._read_python_source(filepath)[0]
- try:
- tree = refactoring_tool.refactor_string(source, 'conf.py')
- except ParseError as err:
- # do not propagate lib2to3 exceptions
- lineno, offset = err.context[1]
- # try to match ParseError details with SyntaxError details
- raise SyntaxError(err.msg, (filepath, lineno, offset, err.value))
- return text_type(tree)
-else:
- # no need to refactor on 2.x versions
- convert_with_2to3 = None
-
-
-# htmlescape()
-if PY3:
- from html import escape as htmlescape
-else:
- from cgi import escape as htmlescape # NOQA
-
-
-# UnicodeMixin
-if PY3:
- class UnicodeMixin(object):
- """Mixin class to handle defining the proper __str__/__unicode__
- methods in Python 2 or 3."""
-
- def __str__(self):
- return self.__unicode__()
-else:
- class UnicodeMixin(object):
- """Mixin class to handle defining the proper __str__/__unicode__
- methods in Python 2 or 3."""
-
- def __str__(self):
- # type: () -> str
- return self.__unicode__().encode('utf8') # type: ignore
-
-
-# indent()
-if PY3:
- from textwrap import indent
-else:
- # backport from python3
- def indent(text, prefix, predicate=None):
- # type: (unicode, unicode, Callable) -> unicode
- if predicate is None:
- def predicate(line):
- # type: (unicode) -> unicode
- return line.strip()
-
- def prefixed_lines():
- # type: () -> Generator
- for line in text.splitlines(True):
- yield (prefix + line if predicate(line) else line)
- return ''.join(prefixed_lines())
+# support for running 2to3 over config files
+def convert_with_2to3(filepath):
+ # type: (str) -> str
+ from lib2to3.refactor import RefactoringTool, get_fixers_from_package
+ from lib2to3.pgen2.parse import ParseError
+ fixers = get_fixers_from_package('lib2to3.fixes')
+ refactoring_tool = RefactoringTool(fixers)
+ source = refactoring_tool._read_python_source(filepath)[0]
+ try:
+ tree = refactoring_tool.refactor_string(source, 'conf.py')
+ except ParseError as err:
+ # do not propagate lib2to3 exceptions
+ lineno, offset = err.context[1]
+ # try to match ParseError details with SyntaxError details
+ raise SyntaxError(err.msg, (filepath, lineno, offset, err.value))
+ return str(tree)
+
+
+class UnicodeMixin:
+ """Mixin class to handle defining the proper __str__/__unicode__
+ methods in Python 2 or 3.
+
+ .. deprecated:: 2.0
+ """
+ def __str__(self):
+ warnings.warn('UnicodeMixin is deprecated',
+ RemovedInSphinx40Warning, stacklevel=2)
+ return self.__unicode__()
def execfile_(filepath, _globals, open=open):
- # type: (unicode, Any, Callable) -> None
+ # type: (str, Any, Callable) -> None
from sphinx.util.osutil import fs_encoding
- # get config source -- 'b' is a no-op under 2.x, while 'U' is
- # ignored under 3.x (but 3.x compile() accepts \r\n newlines)
- mode = 'rb' if PY3 else 'rbU'
- with open(filepath, mode) as f:
+ with open(filepath, 'rb') as f:
source = f.read()
# compile to a code object, handle syntax errors
@@ -140,11 +88,14 @@ def execfile_(filepath, _globals, open=open):
try:
code = compile(source, filepath_enc, 'exec')
except SyntaxError:
- if convert_with_2to3:
- # maybe the file uses 2.x syntax; try to refactor to
- # 3.x syntax using 2to3
- source = convert_with_2to3(filepath)
- code = compile(source, filepath_enc, 'exec')
- else:
- raise
- exec_(code, _globals)
+ # maybe the file uses 2.x syntax; try to refactor to
+ # 3.x syntax using 2to3
+ source = convert_with_2to3(filepath)
+ code = compile(source, filepath_enc, 'exec')
+ # TODO: When support for evaluating Python 2 syntax is removed,
+ # deprecate convert_with_2to3().
+ logger.warning(__('Support for evaluating Python 2 syntax is deprecated '
+ 'and will be removed in Sphinx 4.0. '
+ 'Convert %s to Python 3 syntax.'),
+ filepath)
+ exec(code, _globals)
diff --git a/sphinx/util/requests.py b/sphinx/util/requests.py
index 65ddaedb7..9abd4f2f5 100644
--- a/sphinx/util/requests.py
+++ b/sphinx/util/requests.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.util.requests
~~~~~~~~~~~~~~~~~~~~
@@ -9,15 +8,12 @@
:license: BSD, see LICENSE for details.
"""
-from __future__ import absolute_import
-
import warnings
from contextlib import contextmanager
+from urllib.parse import urlsplit
import pkg_resources
import requests
-from six import string_types
-from six.moves.urllib.parse import urlsplit
try:
from requests.packages.urllib3.exceptions import SSLError
@@ -48,7 +44,7 @@ except ImportError:
# try to load requests[security] (but only if SSL is available)
try:
- import ssl
+ import ssl # NOQA
except ImportError:
pass
else:
@@ -56,24 +52,7 @@ else:
pkg_resources.require(['requests[security]'])
except (pkg_resources.DistributionNotFound,
pkg_resources.VersionConflict):
- if not getattr(ssl, 'HAS_SNI', False):
- # don't complain on each url processed about the SSL issue
- if InsecurePlatformWarning:
- requests.packages.urllib3.disable_warnings(InsecurePlatformWarning)
- warnings.warn(
- 'Some links may return broken results due to being unable to '
- 'check the Server Name Indication (SNI) in the returned SSL cert '
- 'against the hostname in the url requested. Recommended to '
- 'install "requests[security]" as a dependency or upgrade to '
- 'a python version with SNI support (Python 3 and Python 2.7.9+).'
- )
- except pkg_resources.UnknownExtra:
- warnings.warn(
- 'Some links may return broken results due to being unable to '
- 'check the Server Name Indication (SNI) in the returned SSL cert '
- 'against the hostname in the url requested. Recommended to '
- 'install requests-2.4.1+.'
- )
+ pass # ignored
if False:
# For type annotation
@@ -108,7 +87,7 @@ def ignore_insecure_warning(**kwargs):
def _get_tls_cacert(url, config):
- # type: (unicode, Config) -> Union[str, bool]
+ # type: (str, Config) -> Union[str, bool]
"""Get additional CA cert for a specific URL.
This also returns ``False`` if verification is disabled.
@@ -120,7 +99,7 @@ def _get_tls_cacert(url, config):
certs = getattr(config, 'tls_cacerts', None)
if not certs:
return True
- elif isinstance(certs, (string_types, tuple)):
+ elif isinstance(certs, (str, tuple)):
return certs # type: ignore
else:
hostname = urlsplit(url)[1]
@@ -131,7 +110,7 @@ def _get_tls_cacert(url, config):
def get(url, **kwargs):
- # type: (unicode, Any) -> requests.Response
+ # type: (str, Any) -> requests.Response
"""Sends a GET request like requests.get().
This sets up User-Agent header and TLS verification automatically."""
@@ -145,7 +124,7 @@ def get(url, **kwargs):
def head(url, **kwargs):
- # type: (unicode, Any) -> requests.Response
+ # type: (str, Any) -> requests.Response
"""Sends a HEAD request like requests.head().
This sets up User-Agent header and TLS verification automatically."""
diff --git a/sphinx/util/rst.py b/sphinx/util/rst.py
index c46389d7e..20ae364e3 100644
--- a/sphinx/util/rst.py
+++ b/sphinx/util/rst.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.util.rst
~~~~~~~~~~~~~~~
@@ -8,7 +7,6 @@
:copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
-from __future__ import absolute_import
import re
from contextlib import contextmanager
@@ -18,18 +16,22 @@ from docutils.parsers.rst.languages import en as english
from docutils.utils import Reporter
from sphinx.locale import __
+from sphinx.util import docutils
from sphinx.util import logging
if False:
# For type annotation
from typing import Generator # NOQA
+ from docutils.statemachine import StringList # NOQA
-symbols_re = re.compile(r'([!-\-/:-@\[-`{-~])') # symbols without dot(0x2e)
logger = logging.getLogger(__name__)
+docinfo_re = re.compile(':\\w+:.*?')
+symbols_re = re.compile(r'([!-\-/:-@\[-`{-~])') # symbols without dot(0x2e)
+
def escape(text):
- # type: (unicode) -> unicode
+ # type: (str) -> str
text = symbols_re.sub(r'\\\1', text)
text = re.sub(r'^\.', r'\.', text) # escape a dot at top
return text
@@ -37,15 +39,47 @@ def escape(text):
@contextmanager
def default_role(docname, name):
- # type: (unicode, unicode) -> Generator
+ # type: (str, str) -> Generator
if name:
dummy_reporter = Reporter('', 4, 4)
role_fn, _ = roles.role(name, english, 0, dummy_reporter)
if role_fn:
- roles._roles[''] = role_fn
+ docutils.register_role('', role_fn)
else:
logger.warning(__('default role %s not found'), name, location=docname)
yield
- roles._roles.pop('', None) # if a document has set a local default role
+ docutils.unregister_role('')
+
+
+def prepend_prolog(content, prolog):
+ # type: (StringList, str) -> None
+ """Prepend a string to content body as prolog."""
+ if prolog:
+ pos = 0
+ for line in content:
+ if docinfo_re.match(line):
+ pos += 1
+ else:
+ break
+
+ if pos > 0:
+ # insert a blank line after docinfo
+ content.insert(pos, '', '<generated>', 0)
+ pos += 1
+
+ # insert prolog (after docinfo if exists)
+ for lineno, line in enumerate(prolog.splitlines()):
+ content.insert(pos + lineno, line, '<rst_prolog>', lineno)
+
+ content.insert(pos + lineno + 1, '', '<generated>', 0)
+
+
+def append_epilog(content, epilog):
+ # type: (StringList, str) -> None
+ """Append a string to content body as epilog."""
+ if epilog:
+ content.append('', '<generated>', 0)
+ for lineno, line in enumerate(epilog.splitlines()):
+ content.append(line, '<rst_epilog>', lineno)
diff --git a/sphinx/util/smartypants.py b/sphinx/util/smartypants.py
index 03fc1816c..7450e07b8 100644
--- a/sphinx/util/smartypants.py
+++ b/sphinx/util/smartypants.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.util.smartypants
~~~~~~~~~~~~~~~~~~~~~~~
@@ -25,7 +24,6 @@
See the LICENSE file and the original docutils code for details.
"""
-from __future__ import absolute_import, unicode_literals
import re
@@ -34,101 +32,101 @@ from docutils.utils import smartquotes
from sphinx.util.docutils import __version_info__ as docutils_version
if False: # For type annotation
- from typing import Iterable, Iterator, Tuple # NOQA
-
-
-langquotes = {'af': u'“”‘’',
- 'af-x-altquot': u'„”‚’',
- 'bg': u'„“‚‘', # Bulgarian, https://bg.wikipedia.org/wiki/Кавички
- 'ca': u'«»“”',
- 'ca-x-altquot': u'“”‘’',
- 'cs': u'„“‚‘',
- 'cs-x-altquot': u'»«›‹',
- 'da': u'»«›‹',
- 'da-x-altquot': u'„“‚‘',
- # 'da-x-altquot2': u'””’’',
- 'de': u'„“‚‘',
- 'de-x-altquot': u'»«›‹',
- 'de-ch': u'«»‹›',
- 'el': u'«»“”',
- 'en': u'“”‘’',
- 'en-uk-x-altquot': u'‘’“”', # Attention: " → ‘ and ' → “ !
- 'eo': u'“”‘’',
- 'es': u'«»“”',
- 'es-x-altquot': u'“”‘’',
- 'et': u'„“‚‘', # no secondary quote listed in
- 'et-x-altquot': u'«»‹›', # the sources above (wikipedia.org)
- 'eu': u'«»‹›',
- 'fi': u'””’’',
- 'fi-x-altquot': u'»»››',
- 'fr': (u'« ', u' »', u'“', u'”'), # full no-break space
- 'fr-x-altquot': (u'« ', u' »', u'“', u'”'), # narrow no-break space
- 'fr-ch': u'«»‹›',
- 'fr-ch-x-altquot': (u'« ', u' »', u'‹ ', u' ›'), # narrow no-break space
+ from typing import Generator, Iterable, Tuple # NOQA
+
+
+langquotes = {'af': '“”‘’',
+ 'af-x-altquot': '„”‚’',
+ 'bg': '„“‚‘', # Bulgarian, https://bg.wikipedia.org/wiki/Кавички
+ 'ca': '«»“”',
+ 'ca-x-altquot': '“”‘’',
+ 'cs': '„“‚‘',
+ 'cs-x-altquot': '»«›‹',
+ 'da': '»«›‹',
+ 'da-x-altquot': '„“‚‘',
+ # 'da-x-altquot2': '””’’',
+ 'de': '„“‚‘',
+ 'de-x-altquot': '»«›‹',
+ 'de-ch': '«»‹›',
+ 'el': '«»“”',
+ 'en': '“”‘’',
+ 'en-uk-x-altquot': '‘’“”', # Attention: " → ‘ and ' → “ !
+ 'eo': '“”‘’',
+ 'es': '«»“”',
+ 'es-x-altquot': '“”‘’',
+ 'et': '„“‚‘', # no secondary quote listed in
+ 'et-x-altquot': '«»‹›', # the sources above (wikipedia.org)
+ 'eu': '«»‹›',
+ 'fi': '””’’',
+ 'fi-x-altquot': '»»››',
+ 'fr': ('« ', ' »', '“', '”'), # full no-break space
+ 'fr-x-altquot': ('« ', ' »', '“', '”'), # narrow no-break space
+ 'fr-ch': '«»‹›',
+ 'fr-ch-x-altquot': ('« ', ' »', '‹ ', ' ›'), # narrow no-break space
# http://typoguide.ch/
- 'gl': u'«»“”',
- 'he': u'”“»«', # Hebrew is RTL, test position:
- 'he-x-altquot': u'„”‚’', # low quotation marks are opening.
- # 'he-x-altquot': u'“„‘‚', # RTL: low quotation marks opening
- 'hr': u'„”‘’', # https://hrvatska-tipografija.com/polunavodnici/
- 'hr-x-altquot': u'»«›‹',
- 'hsb': u'„“‚‘',
- 'hsb-x-altquot': u'»«›‹',
- 'hu': u'„”«»',
- 'is': u'„“‚‘',
- 'it': u'«»“”',
- 'it-ch': u'«»‹›',
- 'it-x-altquot': u'“”‘’',
- # 'it-x-altquot2': u'“„‘‚', # [7] in headlines
- 'ja': u'「」『』',
- 'lt': u'„“‚‘',
- 'lv': u'„“‚‘',
- 'mk': u'„“‚‘', # Macedonian,
+ 'gl': '«»“”',
+ 'he': '”“»«', # Hebrew is RTL, test position:
+ 'he-x-altquot': '„”‚’', # low quotation marks are opening.
+ # 'he-x-altquot': '“„‘‚', # RTL: low quotation marks opening
+ 'hr': '„”‘’', # https://hrvatska-tipografija.com/polunavodnici/
+ 'hr-x-altquot': '»«›‹',
+ 'hsb': '„“‚‘',
+ 'hsb-x-altquot': '»«›‹',
+ 'hu': '„”«»',
+ 'is': '„“‚‘',
+ 'it': '«»“”',
+ 'it-ch': '«»‹›',
+ 'it-x-altquot': '“”‘’',
+ # 'it-x-altquot2': '“„‘‚', # [7] in headlines
+ 'ja': '「」『』',
+ 'lt': '„“‚‘',
+ 'lv': '„“‚‘',
+ 'mk': '„“‚‘', # Macedonian,
# https://mk.wikipedia.org/wiki/Правопис_и_правоговор_на_македонскиот_јазик
- 'nl': u'“”‘’',
- 'nl-x-altquot': u'„”‚’',
- # 'nl-x-altquot2': u'””’’',
- 'nb': u'«»’’', # Norsk bokmål (canonical form 'no')
- 'nn': u'«»’’', # Nynorsk [10]
- 'nn-x-altquot': u'«»‘’', # [8], [10]
- # 'nn-x-altquot2': u'«»«»', # [9], [10]
- # 'nn-x-altquot3': u'„“‚‘', # [10]
- 'no': u'«»’’', # Norsk bokmål [10]
- 'no-x-altquot': u'«»‘’', # [8], [10]
- # 'no-x-altquot2': u'«»«»', # [9], [10]
- # 'no-x-altquot3': u'„“‚‘', # [10]
- 'pl': u'„”«»',
- 'pl-x-altquot': u'«»‚’',
- # 'pl-x-altquot2': u'„”‚’',
+ 'nl': '“”‘’',
+ 'nl-x-altquot': '„”‚’',
+ # 'nl-x-altquot2': '””’’',
+ 'nb': '«»’’', # Norsk bokmål (canonical form 'no')
+ 'nn': '«»’’', # Nynorsk [10]
+ 'nn-x-altquot': '«»‘’', # [8], [10]
+ # 'nn-x-altquot2': '«»«»', # [9], [10]
+ # 'nn-x-altquot3': '„“‚‘', # [10]
+ 'no': '«»’’', # Norsk bokmål [10]
+ 'no-x-altquot': '«»‘’', # [8], [10]
+ # 'no-x-altquot2': '«»«»', # [9], [10]
+ # 'no-x-altquot3': '„“‚‘', # [10]
+ 'pl': '„”«»',
+ 'pl-x-altquot': '«»‚’',
+ # 'pl-x-altquot2': '„”‚’',
# https://pl.wikipedia.org/wiki/Cudzys%C5%82%C3%B3w
- 'pt': u'«»“”',
- 'pt-br': u'“”‘’',
- 'ro': u'„”«»',
- 'ru': u'«»„“',
- 'sh': u'„”‚’', # Serbo-Croatian
- 'sh-x-altquot': u'»«›‹',
- 'sk': u'„“‚‘', # Slovak
- 'sk-x-altquot': u'»«›‹',
- 'sl': u'„“‚‘', # Slovenian
- 'sl-x-altquot': u'»«›‹',
- 'sq': u'«»‹›', # Albanian
- 'sq-x-altquot': u'“„‘‚',
- 'sr': u'„”’’',
- 'sr-x-altquot': u'»«›‹',
- 'sv': u'””’’',
- 'sv-x-altquot': u'»»››',
- 'tr': u'“”‘’',
- 'tr-x-altquot': u'«»‹›',
- # 'tr-x-altquot2': u'“„‘‚', # [7] antiquated?
- 'uk': u'«»„“',
- 'uk-x-altquot': u'„“‚‘',
- 'zh-cn': u'“”‘’',
- 'zh-tw': u'「」『』',
+ 'pt': '«»“”',
+ 'pt-br': '“”‘’',
+ 'ro': '„”«»',
+ 'ru': '«»„“',
+ 'sh': '„”‚’', # Serbo-Croatian
+ 'sh-x-altquot': '»«›‹',
+ 'sk': '„“‚‘', # Slovak
+ 'sk-x-altquot': '»«›‹',
+ 'sl': '„“‚‘', # Slovenian
+ 'sl-x-altquot': '»«›‹',
+ 'sq': '«»‹›', # Albanian
+ 'sq-x-altquot': '“„‘‚',
+ 'sr': '„”’’',
+ 'sr-x-altquot': '»«›‹',
+ 'sv': '””’’',
+ 'sv-x-altquot': '»»››',
+ 'tr': '“”‘’',
+ 'tr-x-altquot': '«»‹›',
+ # 'tr-x-altquot2': '“„‘‚', # [7] antiquated?
+ 'uk': '«»„“',
+ 'uk-x-altquot': '„“‚‘',
+ 'zh-cn': '“”‘’',
+ 'zh-tw': '「」『』',
}
def educateQuotes(text, language='en'):
- # type: (unicode, unicode) -> unicode
+ # type: (str, str) -> str
"""
Parameter: - text string (unicode or bytes).
- language (`BCP 47` language tag.)
@@ -142,7 +140,7 @@ def educateQuotes(text, language='en'):
try:
apostrophe = smart.apostrophe
except Exception:
- apostrophe = u'’'
+ apostrophe = '’'
# oldtext = text
punct_class = r"""[!"#\$\%'()*+,-.\/:;<=>?\@\[\\\]\^_`{|}~]"""
@@ -243,7 +241,7 @@ def educateQuotes(text, language='en'):
def educate_tokens(text_tokens, attr=smartquotes.default_smartypants_attr, language='en'):
- # type: (Iterable[Tuple[str, unicode]], unicode, unicode) -> Iterator
+ # type: (Iterable[Tuple[str, str]], str, str) -> Generator[str, None, None]
"""Return iterator that "educates" the items of `text_tokens`.
This is modified to intercept the ``attr='2'`` as it was used by the
diff --git a/sphinx/util/stemmer/__init__.py b/sphinx/util/stemmer/__init__.py
index a10da7370..5417c4430 100644
--- a/sphinx/util/stemmer/__init__.py
+++ b/sphinx/util/stemmer/__init__.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.util.stemmer
~~~~~~~~~~~~~~~~~~~
@@ -18,9 +17,9 @@ except ImportError:
PYSTEMMER = False
-class BaseStemmer(object):
+class BaseStemmer:
def stem(self, word):
- # type: (unicode) -> unicode
+ # type: (str) -> str
raise NotImplementedError()
@@ -30,17 +29,17 @@ class PyStemmer(BaseStemmer):
self.stemmer = _PyStemmer('porter')
def stem(self, word):
- # type: (unicode) -> unicode
+ # type: (str) -> str
return self.stemmer.stemWord(word)
-class StandardStemmer(BaseStemmer, PorterStemmer): # type: ignore
+class StandardStemmer(PorterStemmer, BaseStemmer): # type: ignore
"""All those porter stemmer implementations look hideous;
make at least the stem method nicer.
"""
def stem(self, word): # type: ignore
- # type: (unicode) -> unicode
- return PorterStemmer.stem(self, word, 0, len(word) - 1)
+ # type: (str) -> str
+ return super().stem(word, 0, len(word) - 1)
def get_stemmer():
diff --git a/sphinx/util/stemmer/porter.py b/sphinx/util/stemmer/porter.py
index beb860c9e..51c132c2c 100644
--- a/sphinx/util/stemmer/porter.py
+++ b/sphinx/util/stemmer/porter.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.util.stemmer.porter
~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -14,7 +13,7 @@
only differing from it at the points maked --DEPARTURE-- below.
- See also http://www.tartarus.org/~martin/PorterStemmer
+ See also https://tartarus.org/martin/PorterStemmer/
The algorithm as described in the paper could be exactly replicated
by adjusting the points of DEPARTURE, but this is barely necessary,
@@ -29,7 +28,7 @@
"""
-class PorterStemmer(object):
+class PorterStemmer:
def __init__(self):
# type: () -> None
@@ -43,8 +42,7 @@ class PorterStemmer(object):
should be done before stem(...) is called.
"""
- self.b = "" # type: unicode
- # buffer for word to be stemmed
+ self.b = "" # buffer for word to be stemmed
self.k = 0
self.k0 = 0
self.j = 0 # j is a general offset into the string
@@ -136,7 +134,7 @@ class PorterStemmer(object):
return 1
def ends(self, s):
- # type: (unicode) -> int
+ # type: (str) -> int
"""ends(s) is TRUE <=> k0,...k ends with the string s."""
length = len(s)
if s[length - 1] != self.b[self.k]: # tiny speed-up
@@ -149,7 +147,7 @@ class PorterStemmer(object):
return 1
def setto(self, s):
- # type: (unicode) -> None
+ # type: (str) -> None
"""setto(s) sets (j+1),...k to the characters in the string s,
readjusting k."""
length = len(s)
@@ -157,7 +155,7 @@ class PorterStemmer(object):
self.k = self.j + length
def r(self, s):
- # type: (unicode) -> None
+ # type: (str) -> None
"""r(s) is used further down."""
if self.m() > 0:
self.setto(s)
@@ -398,7 +396,7 @@ class PorterStemmer(object):
self.k = self.k - 1
def stem(self, p, i, j):
- # type: (unicode, int, int) -> unicode
+ # type: (str, int, int) -> str
"""In stem(p,i,j), p is a char pointer, and the string to be stemmed
is from p[i] to p[j] inclusive. Typically i is zero and j is the
offset to the last character of a string, (p[j+1] == '\0'). The
diff --git a/sphinx/util/tags.py b/sphinx/util/tags.py
index 43a351f65..f69682ee0 100644
--- a/sphinx/util/tags.py
+++ b/sphinx/util/tags.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.util.tags
~~~~~~~~~~~~~~~~
@@ -46,31 +45,31 @@ class BooleanParser(Parser):
return node
-class Tags(object):
+class Tags:
def __init__(self, tags=None):
- # type: (List[unicode]) -> None
+ # type: (List[str]) -> None
self.tags = dict.fromkeys(tags or [], True)
def has(self, tag):
- # type: (unicode) -> bool
+ # type: (str) -> bool
return tag in self.tags
__contains__ = has
def __iter__(self):
- # type: () -> Iterator[unicode]
+ # type: () -> Iterator[str]
return iter(self.tags)
def add(self, tag):
- # type: (unicode) -> None
+ # type: (str) -> None
self.tags[tag] = True
def remove(self, tag):
- # type: (unicode) -> None
+ # type: (str) -> None
self.tags.pop(tag, None)
def eval_condition(self, condition):
- # type: (unicode) -> bool
+ # type: (str) -> bool
# exceptions are handled by the caller
parser = BooleanParser(env, condition, state='variable')
expr = parser.parse_expression()
diff --git a/sphinx/util/template.py b/sphinx/util/template.py
index 5a415d329..f8bece9b4 100644
--- a/sphinx/util/template.py
+++ b/sphinx/util/template.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.util.template
~~~~~~~~~~~~~~~~~~~~
@@ -16,6 +15,7 @@ from jinja2.sandbox import SandboxedEnvironment
from sphinx import package_dir
from sphinx.jinja2glue import SphinxFileSystemLoader
from sphinx.locale import get_translator
+from sphinx.util import texescape
if False:
# For type annotation
@@ -23,7 +23,7 @@ if False:
from jinja2.loaders import BaseLoader # NOQA
-class BaseRenderer(object):
+class BaseRenderer:
def __init__(self, loader=None):
# type: (BaseLoader) -> None
self.env = SandboxedEnvironment(loader=loader, extensions=['jinja2.ext.i18n'])
@@ -31,23 +31,23 @@ class BaseRenderer(object):
self.env.install_gettext_translations(get_translator()) # type: ignore
def render(self, template_name, context):
- # type: (unicode, Dict) -> unicode
+ # type: (str, Dict) -> str
return self.env.get_template(template_name).render(context)
def render_string(self, source, context):
- # type: (unicode, Dict) -> unicode
+ # type: (str, Dict) -> str
return self.env.from_string(source).render(context)
class FileRenderer(BaseRenderer):
def __init__(self, search_path):
- # type: (unicode) -> None
+ # type: (str) -> None
loader = SphinxFileSystemLoader(search_path)
- super(FileRenderer, self).__init__(loader)
+ super().__init__(loader)
@classmethod
def render_from_file(cls, filename, context):
- # type: (unicode, Dict) -> unicode
+ # type: (str, Dict) -> str
dirname = os.path.dirname(filename)
basename = os.path.basename(filename)
return cls(dirname).render(basename, context)
@@ -55,14 +55,14 @@ class FileRenderer(BaseRenderer):
class SphinxRenderer(FileRenderer):
def __init__(self, template_path=None):
- # type: (unicode) -> None
+ # type: (str) -> None
if template_path is None:
template_path = os.path.join(package_dir, 'templates')
- super(SphinxRenderer, self).__init__(template_path)
+ super().__init__(template_path)
@classmethod
def render_from_file(cls, filename, context):
- # type: (unicode, Dict) -> unicode
+ # type: (str, Dict) -> str
return FileRenderer.render_from_file(filename, context)
@@ -70,7 +70,11 @@ class LaTeXRenderer(SphinxRenderer):
def __init__(self):
# type: () -> None
template_path = os.path.join(package_dir, 'templates', 'latex')
- super(LaTeXRenderer, self).__init__(template_path)
+ super().__init__(template_path)
+
+ # use texescape as escape filter
+ self.env.filters['e'] = texescape.escape
+ self.env.filters['escape'] = texescape.escape
# use JSP/eRuby like tagging instead because curly bracket; the default
# tagging of jinja2 is not good for LaTeX sources.
diff --git a/sphinx/util/texescape.py b/sphinx/util/texescape.py
index 8d37e0f60..c806bbe7a 100644
--- a/sphinx/util/texescape.py
+++ b/sphinx/util/texescape.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.util.texescape
~~~~~~~~~~~~~~~~~~~~~
@@ -9,7 +8,9 @@
:license: BSD, see LICENSE for details.
"""
-from __future__ import unicode_literals
+if False:
+ # For type annotation
+ from typing import Dict # NOQA
tex_replacements = [
# map TeX special chars
@@ -65,63 +66,21 @@ tex_replacements = [
('₇', r'\(\sb{\text{7}}\)'),
('₈', r'\(\sb{\text{8}}\)'),
('₉', r'\(\sb{\text{9}}\)'),
- # map Greek alphabet
- ('α', r'\(\alpha\)'),
- ('β', r'\(\beta\)'),
- ('γ', r'\(\gamma\)'),
- ('δ', r'\(\delta\)'),
- ('ε', r'\(\epsilon\)'),
- ('ζ', r'\(\zeta\)'),
- ('η', r'\(\eta\)'),
- ('θ', r'\(\theta\)'),
- ('ι', r'\(\iota\)'),
- ('κ', r'\(\kappa\)'),
- ('λ', r'\(\lambda\)'),
- ('μ', r'\(\mu\)'),
- ('ν', r'\(\nu\)'),
- ('ξ', r'\(\xi\)'),
- ('ο', r'o'),
- ('π', r'\(\pi\)'),
- ('ρ', r'\(\rho\)'),
- ('σ', r'\(\sigma\)'),
- ('τ', r'\(\tau\)'),
- ('υ', '\\(\\upsilon\\)'),
- ('φ', r'\(\phi\)'),
- ('χ', r'\(\chi\)'),
- ('ψ', r'\(\psi\)'),
- ('ω', r'\(\omega\)'),
- ('Α', r'A'),
- ('Β', r'B'),
- ('Γ', r'\(\Gamma\)'),
- ('Δ', r'\(\Delta\)'),
- ('Ε', r'E'),
- ('Ζ', r'Z'),
- ('Η', r'H'),
- ('Θ', r'\(\Theta\)'),
- ('Ι', r'I'),
- ('Κ', r'K'),
- ('Λ', r'\(\Lambda\)'),
- ('Μ', r'M'),
- ('Ν', r'N'),
- ('Ξ', r'\(\Xi\)'),
- ('Ο', r'O'),
- ('Π', r'\(\Pi\)'),
- ('Ρ', r'P'),
- ('Σ', r'\(\Sigma\)'),
- ('Τ', r'T'),
- ('Υ', '\\(\\Upsilon\\)'),
- ('Φ', r'\(\Phi\)'),
- ('Χ', r'X'),
- ('Ψ', r'\(\Psi\)'),
- ('Ω', r'\(\Omega\)'),
- ('Ω', r'\(\Omega\)'),
+ # Greek alphabet not escaped: pdflatex handles it via textalpha and inputenc
+ # OHM SIGN U+2126 is handled by LaTeX textcomp package
]
-tex_escape_map = {}
+tex_escape_map = {} # type: Dict[int, str]
tex_replace_map = {}
tex_hl_escape_map_new = {}
+def escape(s):
+ # type: (str) -> str
+ """Escape text for LaTeX output."""
+ return s.translate(tex_escape_map)
+
+
def init():
# type: () -> None
for a, b in tex_replacements:
diff --git a/sphinx/util/typing.py b/sphinx/util/typing.py
index a26dac473..ecfd61f01 100644
--- a/sphinx/util/typing.py
+++ b/sphinx/util/typing.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.util.typing
~~~~~~~~~~~~~~~~~~
@@ -9,19 +8,24 @@
:license: BSD, see LICENSE for details.
"""
-from typing import Callable, Dict, List, Tuple
+from typing import Any, Callable, Dict, List, Tuple, Union
from docutils import nodes
from docutils.parsers.rst.states import Inliner
-from six import PY3
-if PY3:
- unicode = str
+# An entry of Directive.option_spec
+DirectiveOption = Callable[[str], Any]
+
+# Text like nodes which are initialized with text and rawsource
+TextlikeNode = Union[nodes.Text, nodes.TextElement]
# common role functions
-RoleFunction = Callable[[unicode, unicode, unicode, int, Inliner, Dict, List[unicode]],
- Tuple[List[nodes.Node], List[nodes.Node]]]
+RoleFunction = Callable[[str, str, str, int, Inliner, Dict, List[str]],
+ Tuple[List[nodes.Node], List[nodes.system_message]]]
# title getter functions for enumerable nodes (see sphinx.domains.std)
-TitleGetter = Callable[[nodes.Node], unicode]
+TitleGetter = Callable[[nodes.Node], str]
+
+# inventory data on memory
+Inventory = Dict[str, Dict[str, Tuple[str, str, str, str]]]
diff --git a/sphinx/util/websupport.py b/sphinx/util/websupport.py
index 59496ec02..7837fe517 100644
--- a/sphinx/util/websupport.py
+++ b/sphinx/util/websupport.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.util.websupport
~~~~~~~~~~~~~~~~~~~~~~
diff --git a/sphinx/versioning.py b/sphinx/versioning.py
index 78383464c..d39c9538e 100644
--- a/sphinx/versioning.py
+++ b/sphinx/versioning.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.versioning
~~~~~~~~~~~~~~~~~
@@ -9,15 +8,13 @@
:copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
+import pickle
import warnings
-from itertools import product
+from itertools import product, zip_longest
from operator import itemgetter
+from os import path
from uuid import uuid4
-from six import iteritems
-from six.moves import cPickle as pickle
-from six.moves import range, zip_longest
-
from sphinx.deprecation import RemovedInSphinx30Warning
from sphinx.transforms import SphinxTransform
@@ -102,7 +99,7 @@ def merge_doctrees(old, new, condition):
# choose the old node with the best ratio for each new node and set the uid
# as long as the ratio is under a certain value, in which case we consider
# them not changed but different
- ratios = sorted(iteritems(ratios), key=itemgetter(1)) # type: ignore
+ ratios = sorted(ratios.items(), key=itemgetter(1)) # type: ignore
for (old_node, new_node), ratio in ratios:
if new_node in seen:
continue
@@ -121,7 +118,7 @@ def merge_doctrees(old, new, condition):
def get_ratio(old, new):
- # type: (unicode, unicode) -> float
+ # type: (str, str) -> float
"""Return a "similiarity ratio" (in percent) representing the similarity
between the two strings where 0 is equal and anything above less than equal.
"""
@@ -135,7 +132,7 @@ def get_ratio(old, new):
def levenshtein_distance(a, b):
- # type: (unicode, unicode) -> int
+ # type: (str, str) -> int
"""Return the Levenshtein edit distance between two strings *a* and *b*."""
if a == b:
return 0
@@ -143,7 +140,7 @@ def levenshtein_distance(a, b):
a, b = b, a
if not a:
return len(b)
- previous_row = range(len(b) + 1)
+ previous_row = list(range(len(b) + 1))
for i, column1 in enumerate(a):
current_row = [i + 1]
for j, column2 in enumerate(b):
@@ -151,7 +148,7 @@ def levenshtein_distance(a, b):
deletions = current_row[j] + 1
substitutions = previous_row[j] + (column1 != column2)
current_row.append(min(insertions, deletions, substitutions))
- previous_row = current_row # type: ignore
+ previous_row = current_row
return previous_row[-1]
@@ -159,8 +156,8 @@ class UIDTransform(SphinxTransform):
"""Add UIDs to doctree for versioning."""
default_priority = 880
- def apply(self):
- # type: () -> None
+ def apply(self, **kwargs):
+ # type: (Any) -> None
env = self.env
old_doctree = None
if not env.versioning_condition:
@@ -169,10 +166,10 @@ class UIDTransform(SphinxTransform):
if env.versioning_compare:
# get old doctree
try:
- filename = env.doc2path(env.docname, env.doctreedir, '.doctree')
+ filename = path.join(env.doctreedir, env.docname + '.doctree')
with open(filename, 'rb') as f:
old_doctree = pickle.load(f)
- except EnvironmentError:
+ except OSError:
pass
# add uids for versioning
@@ -183,7 +180,7 @@ class UIDTransform(SphinxTransform):
def prepare(document):
- # type: (nodes.Node) -> None
+ # type: (nodes.document) -> None
"""Simple wrapper for UIDTransform."""
warnings.warn('versioning.prepare() is deprecated. Use UIDTransform instead.',
RemovedInSphinx30Warning, stacklevel=2)
diff --git a/sphinx/websupport/__init__.py b/sphinx/websupport/__init__.py
deleted file mode 100644
index 51d906fa6..000000000
--- a/sphinx/websupport/__init__.py
+++ /dev/null
@@ -1,28 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
- sphinx.websupport
- ~~~~~~~~~~~~~~~~~
-
- Base Module for web support functions.
-
- :copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
- :license: BSD, see LICENSE for details.
-"""
-
-import warnings
-
-from sphinx.deprecation import RemovedInSphinx20Warning
-
-try:
- from sphinxcontrib.websupport import WebSupport # NOQA
- from sphinxcontrib.websupport import errors # NOQA
- from sphinxcontrib.websupport.search import BaseSearch, SEARCH_ADAPTERS # NOQA
- from sphinxcontrib.websupport.storage import StorageBackend # NOQA
-
- warnings.warn('sphinx.websupport module is now provided as sphinxcontrib-websupport. '
- 'sphinx.websupport will be removed at Sphinx-2.0. '
- 'Please use the package instead.',
- RemovedInSphinx20Warning)
-except ImportError:
- warnings.warn('Since Sphinx-1.6, sphinx.websupport module is now separated to '
- 'sphinxcontrib-websupport package. Please add it into your dependency list.')
diff --git a/sphinx/websupport/errors.py b/sphinx/websupport/errors.py
deleted file mode 100644
index 7456659ec..000000000
--- a/sphinx/websupport/errors.py
+++ /dev/null
@@ -1,12 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
- sphinx.websupport.errors
- ~~~~~~~~~~~~~~~~~~~~~~~~
-
- Contains Error classes for the web support package.
-
- :copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
- :license: BSD, see LICENSE for details.
-"""
-
-from sphinxcontrib.websupport.errors import * # NOQA
diff --git a/sphinx/websupport/search/__init__.py b/sphinx/websupport/search/__init__.py
deleted file mode 100644
index e1e871ba0..000000000
--- a/sphinx/websupport/search/__init__.py
+++ /dev/null
@@ -1,12 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
- sphinx.websupport.search
- ~~~~~~~~~~~~~~~~~~~~~~~~
-
- Server side search support for the web support package.
-
- :copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
- :license: BSD, see LICENSE for details.
-"""
-
-from sphinxcontrib.websupport.search import BaseSearch, SEARCH_ADAPTERS # NOQA
diff --git a/sphinx/websupport/search/nullsearch.py b/sphinx/websupport/search/nullsearch.py
deleted file mode 100644
index 422b398c9..000000000
--- a/sphinx/websupport/search/nullsearch.py
+++ /dev/null
@@ -1,12 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
- sphinx.websupport.search.nullsearch
- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
- The default search adapter, does nothing.
-
- :copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
- :license: BSD, see LICENSE for details.
-"""
-
-from sphinxcontrib.websupport.search.nullsearch import NullSearch # NOQA
diff --git a/sphinx/websupport/search/whooshsearch.py b/sphinx/websupport/search/whooshsearch.py
deleted file mode 100644
index 94cce8ed7..000000000
--- a/sphinx/websupport/search/whooshsearch.py
+++ /dev/null
@@ -1,12 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
- sphinx.websupport.search.whooshsearch
- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
- Whoosh search adapter.
-
- :copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
- :license: BSD, see LICENSE for details.
-"""
-
-from sphinxcontrib.websupport.search.whooshsearch import WhooshSearch # NOQA
diff --git a/sphinx/websupport/search/xapiansearch.py b/sphinx/websupport/search/xapiansearch.py
deleted file mode 100644
index 4df4769e2..000000000
--- a/sphinx/websupport/search/xapiansearch.py
+++ /dev/null
@@ -1,12 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
- sphinx.websupport.search.xapiansearch
- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
- Xapian search adapter.
-
- :copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
- :license: BSD, see LICENSE for details.
-"""
-
-from sphinxcontrib.websupport.search.xapiansearch import XapianSearch # NOQA
diff --git a/sphinx/websupport/storage/__init__.py b/sphinx/websupport/storage/__init__.py
deleted file mode 100644
index 727e86da4..000000000
--- a/sphinx/websupport/storage/__init__.py
+++ /dev/null
@@ -1,12 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
- sphinx.websupport.storage
- ~~~~~~~~~~~~~~~~~~~~~~~~~
-
- Storage for the websupport package.
-
- :copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
- :license: BSD, see LICENSE for details.
-"""
-
-from sphinxcontrib.websupport.storage import StorageBackend # NOQA
diff --git a/sphinx/websupport/storage/differ.py b/sphinx/websupport/storage/differ.py
deleted file mode 100644
index 1358d8645..000000000
--- a/sphinx/websupport/storage/differ.py
+++ /dev/null
@@ -1,12 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
- sphinx.websupport.storage.differ
- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
- A differ for creating an HTML representations of proposal diffs
-
- :copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
- :license: BSD, see LICENSE for details.
-"""
-
-from sphinxcontrib.websupport.storage.differ import CombinedHtmlDiff # NOQA
diff --git a/sphinx/websupport/storage/sqlalchemy_db.py b/sphinx/websupport/storage/sqlalchemy_db.py
deleted file mode 100644
index e1c86dd9d..000000000
--- a/sphinx/websupport/storage/sqlalchemy_db.py
+++ /dev/null
@@ -1,13 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
- sphinx.websupport.storage.sqlalchemy_db
- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
- SQLAlchemy table and mapper definitions used by the
- :class:`sphinx.websupport.storage.sqlalchemystorage.SQLAlchemyStorage`.
-
- :copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
- :license: BSD, see LICENSE for details.
-"""
-
-from sphinxcontrib.websupport.storage.sqlalchemy_db import Node, Comment, CommentVote # NOQA
diff --git a/sphinx/websupport/storage/sqlalchemystorage.py b/sphinx/websupport/storage/sqlalchemystorage.py
deleted file mode 100644
index b018ea0a3..000000000
--- a/sphinx/websupport/storage/sqlalchemystorage.py
+++ /dev/null
@@ -1,12 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
- sphinx.websupport.storage.sqlalchemystorage
- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
- An SQLAlchemy storage backend.
-
- :copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
- :license: BSD, see LICENSE for details.
-"""
-
-from sphinxcontrib.websupport.storage.sqlalchemystorage import SQLAlchemyStorage # NOQA
diff --git a/sphinx/writers/__init__.py b/sphinx/writers/__init__.py
index 79eacbbfb..d8d9db004 100644
--- a/sphinx/writers/__init__.py
+++ b/sphinx/writers/__init__.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.writers
~~~~~~~~~~~~~~
diff --git a/sphinx/writers/html.py b/sphinx/writers/html.py
index 66f3bf8f4..014c0272c 100644
--- a/sphinx/writers/html.py
+++ b/sphinx/writers/html.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.writers.html
~~~~~~~~~~~~~~~~~~~
@@ -14,15 +13,17 @@ import os
import posixpath
import sys
import warnings
+from typing import Iterable, cast
from docutils import nodes
from docutils.writers.html4css1 import Writer, HTMLTranslator as BaseTranslator
-from six import string_types
from sphinx import addnodes
-from sphinx.deprecation import RemovedInSphinx30Warning
+from sphinx.builders import Builder
+from sphinx.deprecation import RemovedInSphinx30Warning, RemovedInSphinx40Warning
from sphinx.locale import admonitionlabels, _, __
from sphinx.util import logging
+from sphinx.util.docutils import SphinxTranslator
from sphinx.util.images import get_image_size
if False:
@@ -47,44 +48,53 @@ class HTMLWriter(Writer):
def __init__(self, builder):
# type: (StandaloneHTMLBuilder) -> None
- Writer.__init__(self)
+ super().__init__()
self.builder = builder
def translate(self):
# type: () -> None
# sadly, this is mostly copied from parent class
- self.visitor = visitor = self.builder.create_translator(self.builder,
- self.document)
+ visitor = self.builder.create_translator(self.document, self.builder)
+ self.visitor = cast(HTMLTranslator, visitor)
self.document.walkabout(visitor)
- self.output = visitor.astext()
+ self.output = self.visitor.astext()
for attr in ('head_prefix', 'stylesheet', 'head', 'body_prefix',
'body_pre_docinfo', 'docinfo', 'body', 'fragment',
'body_suffix', 'meta', 'title', 'subtitle', 'header',
'footer', 'html_prolog', 'html_head', 'html_title',
'html_subtitle', 'html_body', ):
setattr(self, attr, getattr(visitor, attr, None))
- self.clean_meta = ''.join(visitor.meta[2:])
+ self.clean_meta = ''.join(self.visitor.meta[2:])
-class HTMLTranslator(BaseTranslator):
+class HTMLTranslator(SphinxTranslator, BaseTranslator):
"""
Our custom HTML translator.
"""
- def __init__(self, builder, *args, **kwds):
- # type: (StandaloneHTMLBuilder, Any, Any) -> None
- BaseTranslator.__init__(self, *args, **kwds)
- self.highlighter = builder.highlighter
- self.builder = builder
- self.docnames = [builder.current_docname] # for singlehtml builder
- self.manpages_url = builder.config.manpages_url
+ builder = None # type: StandaloneHTMLBuilder
+
+ def __init__(self, *args):
+ # type: (Any) -> None
+ if isinstance(args[0], nodes.document) and isinstance(args[1], Builder):
+ document, builder = args
+ else:
+ warnings.warn('The order of arguments for HTMLTranslator has been changed. '
+ 'Please give "document" as 1st and "builder" as 2nd.',
+ RemovedInSphinx40Warning, stacklevel=2)
+ builder, document = args
+ super().__init__(document, builder)
+
+ self.highlighter = self.builder.highlighter
+ self.docnames = [self.builder.current_docname] # for singlehtml builder
+ self.manpages_url = self.config.manpages_url
self.protect_literal_text = 0
- self.permalink_text = builder.config.html_add_permalinks
+ self.permalink_text = self.config.html_add_permalinks
# support backwards-compatible setting to a bool
- if not isinstance(self.permalink_text, string_types):
- self.permalink_text = self.permalink_text and u'\u00B6' or ''
+ if not isinstance(self.permalink_text, str):
+ self.permalink_text = self.permalink_text and '¶' or ''
self.permalink_text = self.encode(self.permalink_text)
- self.secnumber_suffix = builder.config.html_secnumber_suffix
+ self.secnumber_suffix = self.config.html_secnumber_suffix
self.param_separator = ''
self.optional_param_level = 0
self._table_row_index = 0
@@ -92,25 +102,25 @@ class HTMLTranslator(BaseTranslator):
self.required_params_left = 0
def visit_start_of_file(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
# only occurs in the single-file builder
self.docnames.append(node['docname'])
self.body.append('<span id="document-%s"></span>' % node['docname'])
def depart_start_of_file(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.docnames.pop()
def visit_desc(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append(self.starttag(node, 'dl', CLASS=node['objtype']))
def depart_desc(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('</dl>\n\n')
def visit_desc_signature(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
# the id is set automatically
self.body.append(self.starttag(node, 'dt'))
# anchor for per-desc interactive data
@@ -119,56 +129,56 @@ class HTMLTranslator(BaseTranslator):
self.body.append('<!--[%s]-->' % node['ids'][0])
def depart_desc_signature(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
if not node.get('is_multiline'):
self.add_permalink_ref(node, _('Permalink to this definition'))
self.body.append('</dt>\n')
def visit_desc_signature_line(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def depart_desc_signature_line(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
if node.get('add_permalink'):
# the permalink info is on the parent desc_signature node
self.add_permalink_ref(node.parent, _('Permalink to this definition'))
self.body.append('<br />')
def visit_desc_addname(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append(self.starttag(node, 'code', '', CLASS='descclassname'))
def depart_desc_addname(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('</code>')
def visit_desc_type(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def depart_desc_type(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_desc_returns(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append(' &#x2192; ')
def depart_desc_returns(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_desc_name(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append(self.starttag(node, 'code', '', CLASS='descname'))
def depart_desc_name(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('</code>')
def visit_desc_parameterlist(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('<span class="sig-paren">(</span>')
self.first_param = 1
self.optional_param_level = 0
@@ -178,7 +188,7 @@ class HTMLTranslator(BaseTranslator):
self.param_separator = node.child_text_separator
def depart_desc_parameterlist(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('<span class="sig-paren">)</span>')
# If required parameters are still to come, then put the comma after
@@ -188,7 +198,7 @@ class HTMLTranslator(BaseTranslator):
# foo([a, ]b, c[, d])
#
def visit_desc_parameter(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
if self.first_param:
self.first_param = 0
elif not self.required_params_left:
@@ -199,49 +209,49 @@ class HTMLTranslator(BaseTranslator):
self.body.append('<em>')
def depart_desc_parameter(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
if not node.hasattr('noemph'):
self.body.append('</em>')
if self.required_params_left:
self.body.append(self.param_separator)
def visit_desc_optional(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.optional_param_level += 1
self.body.append('<span class="optional">[</span>')
def depart_desc_optional(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.optional_param_level -= 1
self.body.append('<span class="optional">]</span>')
def visit_desc_annotation(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append(self.starttag(node, 'em', '', CLASS='property'))
def depart_desc_annotation(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('</em>')
def visit_desc_content(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append(self.starttag(node, 'dd', ''))
def depart_desc_content(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('</dd>')
def visit_versionmodified(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append(self.starttag(node, 'div', CLASS=node['type']))
def depart_versionmodified(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('</div>\n')
# overwritten
def visit_reference(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
atts = {'class': 'reference'}
if node.get('internal') or 'refuri' not in node:
atts['class'] += ' internal'
@@ -249,10 +259,9 @@ class HTMLTranslator(BaseTranslator):
atts['class'] += ' external'
if 'refuri' in node:
atts['href'] = node['refuri'] or '#'
- if self.settings.cloak_email_addresses and \
- atts['href'].startswith('mailto:'):
+ if self.settings.cloak_email_addresses and atts['href'].startswith('mailto:'):
atts['href'] = self.cloak_mailto(atts['href'])
- self.in_mailto = 1
+ self.in_mailto = True
else:
assert 'refid' in node, \
'References must have "refuri" or "refid" attribute.'
@@ -271,21 +280,21 @@ class HTMLTranslator(BaseTranslator):
'.'.join(map(str, node['secnumber'])))
def visit_number_reference(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.visit_reference(node)
def depart_number_reference(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.depart_reference(node)
# overwritten -- we don't want source comments to show up in the HTML
- def visit_comment(self, node):
- # type: (nodes.Node) -> None
+ def visit_comment(self, node): # type: ignore
+ # type: (nodes.Element) -> None
raise nodes.SkipNode
# overwritten
def visit_admonition(self, node, name=''):
- # type: (nodes.Node, unicode) -> None
+ # type: (nodes.Element, str) -> None
self.body.append(self.starttag(
node, 'div', CLASS=('admonition ' + name)))
if name:
@@ -293,15 +302,15 @@ class HTMLTranslator(BaseTranslator):
self.set_first_last(node)
def visit_seealso(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.visit_admonition(node, 'seealso')
def depart_seealso(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.depart_admonition(node)
def add_secnumber(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
if node.get('secnumber'):
self.body.append('.'.join(map(str, node['secnumber'])) +
self.secnumber_suffix)
@@ -321,11 +330,11 @@ class HTMLTranslator(BaseTranslator):
self.secnumber_suffix)
def add_fignumber(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
def append_fignumber(figtype, figure_id):
- # type: (unicode, unicode) -> None
+ # type: (str, str) -> None
if self.builder.name == 'singlehtml':
- key = u"%s/%s" % (self.docnames[-1], figtype)
+ key = "%s/%s" % (self.docnames[-1], figtype)
else:
key = figtype
@@ -349,13 +358,13 @@ class HTMLTranslator(BaseTranslator):
append_fignumber(figtype, node['ids'][0])
def add_permalink_ref(self, node, title):
- # type: (nodes.Node, unicode) -> None
+ # type: (nodes.Element, str) -> None
if node['ids'] and self.permalink_text and self.builder.add_permalinks:
- format = u'<a class="headerlink" href="#%s" title="%s">%s</a>'
+ format = '<a class="headerlink" href="#%s" title="%s">%s</a>'
self.body.append(format % (node['ids'][0], title, self.permalink_text))
def generate_targets_for_listing(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
"""Generate hyperlink targets for listings.
Original visit_bullet_list(), visit_definition_list() and visit_enumerated_list()
@@ -371,30 +380,30 @@ class HTMLTranslator(BaseTranslator):
# overwritten
def visit_bullet_list(self, node):
- # type: (nodes.Node) -> None
- if len(node) == 1 and node[0].tagname == 'toctree':
+ # type: (nodes.Element) -> None
+ if len(node) == 1 and isinstance(node[0], addnodes.toctree):
# avoid emitting empty <ul></ul>
raise nodes.SkipNode
self.generate_targets_for_listing(node)
- BaseTranslator.visit_bullet_list(self, node)
+ super().visit_bullet_list(node)
# overwritten
def visit_enumerated_list(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.generate_targets_for_listing(node)
- BaseTranslator.visit_enumerated_list(self, node)
+ super().visit_enumerated_list(node)
# overwritten
def visit_title(self, node):
- # type: (nodes.Node) -> None
- BaseTranslator.visit_title(self, node)
+ # type: (nodes.Element) -> None
+ super().visit_title(node)
self.add_secnumber(node)
self.add_fignumber(node.parent)
if isinstance(node.parent, nodes.table):
self.body.append('<span class="caption-text">')
def depart_title(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
close_tag = self.context[-1]
if (self.permalink_text and self.builder.add_permalinks and
node.parent.hasattr('ids') and node.parent['ids']):
@@ -402,9 +411,9 @@ class HTMLTranslator(BaseTranslator):
if close_tag.startswith('</h'):
self.add_permalink_ref(node.parent, _('Permalink to this headline'))
elif close_tag.startswith('</a></h'):
- self.body.append(u'</a><a class="headerlink" href="#%s" ' %
+ self.body.append('</a><a class="headerlink" href="#%s" ' %
node.parent['ids'][0] +
- u'title="%s">%s' % (
+ 'title="%s">%s' % (
_('Permalink to this headline'),
self.permalink_text))
elif isinstance(node.parent, nodes.table):
@@ -413,14 +422,14 @@ class HTMLTranslator(BaseTranslator):
elif isinstance(node.parent, nodes.table):
self.body.append('</span>')
- BaseTranslator.depart_title(self, node)
+ super().depart_title(node)
# overwritten
def visit_literal_block(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
if node.rawsource != node.astext():
# most probably a parsed-literal block -- don't highlight
- return BaseTranslator.visit_literal_block(self, node)
+ return super().visit_literal_block(node)
lang = node.get('language', 'default')
linenos = node.get('linenos', False)
@@ -442,16 +451,16 @@ class HTMLTranslator(BaseTranslator):
raise nodes.SkipNode
def visit_caption(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
if isinstance(node.parent, nodes.container) and node.parent.get('literal_block'):
self.body.append('<div class="code-block-caption">')
else:
- BaseTranslator.visit_caption(self, node)
+ super().visit_caption(node)
self.add_fignumber(node.parent)
self.body.append(self.starttag(node, 'span', '', CLASS='caption-text'))
def depart_caption(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('</span>')
# append permalink if available
@@ -467,24 +476,24 @@ class HTMLTranslator(BaseTranslator):
if isinstance(node.parent, nodes.container) and node.parent.get('literal_block'):
self.body.append('</div>\n')
else:
- BaseTranslator.depart_caption(self, node)
+ super().depart_caption(node)
def visit_doctest_block(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.visit_literal_block(node)
# overwritten to add the <div> (for XHTML compliance)
def visit_block_quote(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append(self.starttag(node, 'blockquote') + '<div>')
def depart_block_quote(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('</div></blockquote>\n')
# overwritten
def visit_literal(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
if 'kbd' in node['classes']:
self.body.append(self.starttag(node, 'kbd', '',
CLASS='docutils literal notranslate'))
@@ -494,7 +503,7 @@ class HTMLTranslator(BaseTranslator):
self.protect_literal_text += 1
def depart_literal(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
if 'kbd' in node['classes']:
self.body.append('</kbd>')
else:
@@ -502,14 +511,15 @@ class HTMLTranslator(BaseTranslator):
self.body.append('</code>')
def visit_productionlist(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append(self.starttag(node, 'pre'))
names = []
- for production in node:
+ productionlist = cast(Iterable[addnodes.production], node)
+ for production in productionlist:
names.append(production['tokenname'])
maxlen = max(len(name) for name in names)
lastname = None
- for production in node:
+ for production in productionlist:
if production['tokenname']:
lastname = production['tokenname'].ljust(maxlen)
self.body.append(self.starttag(production, 'strong', ''))
@@ -522,24 +532,24 @@ class HTMLTranslator(BaseTranslator):
raise nodes.SkipNode
def depart_productionlist(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_production(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def depart_production(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_centered(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append(self.starttag(node, 'p', CLASS="centered") +
'<strong>')
def depart_centered(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('</strong></p>')
# overwritten
@@ -552,18 +562,18 @@ class HTMLTranslator(BaseTranslator):
if isinstance(node.parent, addnodes.versionmodified):
# Never compact versionmodified nodes.
return False
- return BaseTranslator.should_be_compact_paragraph(self, node)
+ return super().should_be_compact_paragraph(node)
def visit_compact_paragraph(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def depart_compact_paragraph(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_download_reference(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
atts = {'class': 'reference download',
'download': ''}
@@ -583,12 +593,12 @@ class HTMLTranslator(BaseTranslator):
self.context.append('')
def depart_download_reference(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append(self.context.pop())
# overwritten
def visit_image(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
olduri = node['uri']
# rewrite the URI if the environment knows about it
if olduri in self.builder.images:
@@ -626,70 +636,70 @@ class HTMLTranslator(BaseTranslator):
node['width'] = str(size[0])
if 'height' not in node:
node['height'] = str(size[1])
- BaseTranslator.visit_image(self, node)
+ super().visit_image(node)
# overwritten
def depart_image(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
if node['uri'].lower().endswith(('svg', 'svgz')):
self.body.append(self.context.pop())
else:
- BaseTranslator.depart_image(self, node)
+ super().depart_image(node)
def visit_toctree(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
# this only happens when formatting a toc from env.tocs -- in this
# case we don't want to include the subtree
raise nodes.SkipNode
def visit_index(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
raise nodes.SkipNode
def visit_tabular_col_spec(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
raise nodes.SkipNode
def visit_glossary(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def depart_glossary(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_acks(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def depart_acks(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_hlist(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('<table class="hlist"><tr>')
def depart_hlist(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('</tr></table>\n')
def visit_hlistcol(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('<td>')
def depart_hlistcol(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('</td>')
def visit_option_group(self, node):
- # type: (nodes.Node) -> None
- BaseTranslator.visit_option_group(self, node)
+ # type: (nodes.Element) -> None
+ super().visit_option_group(node)
self.context[-2] = self.context[-2].replace('&nbsp;', '&#160;')
# overwritten
def visit_Text(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Text) -> None
text = node.astext()
encoded = self.encode(text)
if self.protect_literal_text:
@@ -711,113 +721,113 @@ class HTMLTranslator(BaseTranslator):
self.body.append(encoded)
def visit_note(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.visit_admonition(node, 'note')
def depart_note(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.depart_admonition(node)
def visit_warning(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.visit_admonition(node, 'warning')
def depart_warning(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.depart_admonition(node)
def visit_attention(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.visit_admonition(node, 'attention')
def depart_attention(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.depart_admonition(node)
def visit_caution(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.visit_admonition(node, 'caution')
def depart_caution(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.depart_admonition(node)
def visit_danger(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.visit_admonition(node, 'danger')
def depart_danger(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.depart_admonition(node)
def visit_error(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.visit_admonition(node, 'error')
def depart_error(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.depart_admonition(node)
def visit_hint(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.visit_admonition(node, 'hint')
def depart_hint(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.depart_admonition(node)
def visit_important(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.visit_admonition(node, 'important')
def depart_important(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.depart_admonition(node)
def visit_tip(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.visit_admonition(node, 'tip')
def depart_tip(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.depart_admonition(node)
def visit_literal_emphasis(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
return self.visit_emphasis(node)
def depart_literal_emphasis(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
return self.depart_emphasis(node)
def visit_literal_strong(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
return self.visit_strong(node)
def depart_literal_strong(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
return self.depart_strong(node)
def visit_abbreviation(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
attrs = {}
if node.hasattr('explanation'):
attrs['title'] = node['explanation']
self.body.append(self.starttag(node, 'abbr', '', **attrs))
def depart_abbreviation(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('</abbr>')
def visit_manpage(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.visit_literal_emphasis(node)
if self.manpages_url:
node['refuri'] = self.manpages_url.format(**node.attributes)
self.visit_reference(node)
def depart_manpage(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
if self.manpages_url:
self.depart_reference(node)
self.depart_literal_emphasis(node)
@@ -825,33 +835,33 @@ class HTMLTranslator(BaseTranslator):
# overwritten to add even/odd classes
def visit_table(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self._table_row_index = 0
- return BaseTranslator.visit_table(self, node)
+ return super().visit_table(node)
def visit_row(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self._table_row_index += 1
if self._table_row_index % 2 == 0:
node['classes'].append('row-even')
else:
node['classes'].append('row-odd')
self.body.append(self.starttag(node, 'tr', ''))
- node.column = 0
+ node.column = 0 # type: ignore
def visit_entry(self, node):
- # type: (nodes.Node) -> None
- BaseTranslator.visit_entry(self, node)
+ # type: (nodes.Element) -> None
+ super().visit_entry(node)
if self.body[-1] == '&nbsp;':
self.body[-1] = '&#160;'
def visit_field_list(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self._fieldlist_row_index = 0
- return BaseTranslator.visit_field_list(self, node)
+ return super().visit_field_list(node)
def visit_field(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self._fieldlist_row_index += 1
if self._fieldlist_row_index % 2 == 0:
node['classes'].append('field-even')
@@ -860,33 +870,33 @@ class HTMLTranslator(BaseTranslator):
self.body.append(self.starttag(node, 'tr', '', CLASS='field'))
def visit_field_name(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
context_count = len(self.context)
- BaseTranslator.visit_field_name(self, node)
+ super().visit_field_name(node)
if context_count != len(self.context):
self.context[-1] = self.context[-1].replace('&nbsp;', '&#160;')
def visit_math(self, node, math_env=''):
- # type: (nodes.Node, unicode) -> None
+ # type: (nodes.Element, str) -> None
name = self.builder.math_renderer_name
visit, _ = self.builder.app.registry.html_inline_math_renderers[name]
visit(self, node)
def depart_math(self, node, math_env=''):
- # type: (nodes.Node, unicode) -> None
+ # type: (nodes.Element, str) -> None
name = self.builder.math_renderer_name
_, depart = self.builder.app.registry.html_inline_math_renderers[name]
if depart:
depart(self, node)
def visit_math_block(self, node, math_env=''):
- # type: (nodes.Node, unicode) -> None
+ # type: (nodes.Element, str) -> None
name = self.builder.math_renderer_name
visit, _ = self.builder.app.registry.html_block_math_renderers[name]
visit(self, node)
def depart_math_block(self, node, math_env=''):
- # type: (nodes.Node, unicode) -> None
+ # type: (nodes.Element, str) -> None
name = self.builder.math_renderer_name
_, depart = self.builder.app.registry.html_block_math_renderers[name]
if depart:
@@ -900,21 +910,21 @@ class HTMLTranslator(BaseTranslator):
@property
def highlightlang(self):
- # type: () -> unicode
+ # type: () -> str
warnings.warn('HTMLTranslator.highlightlang is deprecated.',
RemovedInSphinx30Warning, stacklevel=2)
return self.builder.config.highlight_language
@property
def highlightlang_base(self):
- # type: () -> unicode
+ # type: () -> str
warnings.warn('HTMLTranslator.highlightlang_base is deprecated.',
RemovedInSphinx30Warning)
return self.builder.config.highlight_language
@property
def highlightopts(self):
- # type: () -> unicode
+ # type: () -> str
warnings.warn('HTMLTranslator.highlightopts is deprecated.',
RemovedInSphinx30Warning, stacklevel=2)
return self.builder.config.highlight_options
diff --git a/sphinx/writers/html5.py b/sphinx/writers/html5.py
index 41c665cdd..88e95ceff 100644
--- a/sphinx/writers/html5.py
+++ b/sphinx/writers/html5.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.writers.html5
~~~~~~~~~~~~~~~~~~~~
@@ -13,15 +12,17 @@ import os
import posixpath
import sys
import warnings
+from typing import Iterable, cast
from docutils import nodes
from docutils.writers.html5_polyglot import HTMLTranslator as BaseTranslator
-from six import string_types
from sphinx import addnodes
-from sphinx.deprecation import RemovedInSphinx30Warning
+from sphinx.builders import Builder
+from sphinx.deprecation import RemovedInSphinx30Warning, RemovedInSphinx40Warning
from sphinx.locale import admonitionlabels, _, __
from sphinx.util import logging
+from sphinx.util.docutils import SphinxTranslator
from sphinx.util.images import get_image_size
if False:
@@ -36,50 +37,59 @@ logger = logging.getLogger(__name__)
# http://www.arnebrodowski.de/blog/write-your-own-restructuredtext-writer.html
-class HTML5Translator(BaseTranslator):
+class HTML5Translator(SphinxTranslator, BaseTranslator):
"""
Our custom HTML translator.
"""
- def __init__(self, builder, *args, **kwds):
- # type: (StandaloneHTMLBuilder, Any, Any) -> None
- BaseTranslator.__init__(self, *args, **kwds)
- self.highlighter = builder.highlighter
- self.builder = builder
- self.docnames = [builder.current_docname] # for singlehtml builder
- self.manpages_url = builder.config.manpages_url
+ builder = None # type: StandaloneHTMLBuilder
+
+ def __init__(self, *args):
+ # type: (Any) -> None
+ if isinstance(args[0], nodes.document) and isinstance(args[1], Builder):
+ document, builder = args
+ else:
+ warnings.warn('The order of arguments for HTML5Translator has been changed. '
+ 'Please give "document" as 1st and "builder" as 2nd.',
+ RemovedInSphinx40Warning, stacklevel=2)
+ builder, document = args
+ super().__init__(document, builder)
+
+ self.highlighter = self.builder.highlighter
+ self.docnames = [self.builder.current_docname] # for singlehtml builder
+ self.manpages_url = self.config.manpages_url
self.protect_literal_text = 0
- self.permalink_text = builder.config.html_add_permalinks
+ self.permalink_text = self.config.html_add_permalinks
# support backwards-compatible setting to a bool
- if not isinstance(self.permalink_text, string_types):
- self.permalink_text = self.permalink_text and u'\u00B6' or ''
+ if not isinstance(self.permalink_text, str):
+ self.permalink_text = self.permalink_text and '¶' or ''
self.permalink_text = self.encode(self.permalink_text)
- self.secnumber_suffix = builder.config.html_secnumber_suffix
+ self.secnumber_suffix = self.config.html_secnumber_suffix
self.param_separator = ''
self.optional_param_level = 0
self._table_row_index = 0
self.required_params_left = 0
def visit_start_of_file(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
# only occurs in the single-file builder
self.docnames.append(node['docname'])
self.body.append('<span id="document-%s"></span>' % node['docname'])
def depart_start_of_file(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.docnames.pop()
def visit_desc(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append(self.starttag(node, 'dl', CLASS=node['objtype']))
def depart_desc(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('</dl>\n\n')
def visit_desc_signature(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
# the id is set automatically
self.body.append(self.starttag(node, 'dt'))
# anchor for per-desc interactive data
@@ -88,56 +98,56 @@ class HTML5Translator(BaseTranslator):
self.body.append('<!--[%s]-->' % node['ids'][0])
def depart_desc_signature(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
if not node.get('is_multiline'):
self.add_permalink_ref(node, _('Permalink to this definition'))
self.body.append('</dt>\n')
def visit_desc_signature_line(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def depart_desc_signature_line(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
if node.get('add_permalink'):
# the permalink info is on the parent desc_signature node
self.add_permalink_ref(node.parent, _('Permalink to this definition'))
self.body.append('<br />')
def visit_desc_addname(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append(self.starttag(node, 'code', '', CLASS='descclassname'))
def depart_desc_addname(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('</code>')
def visit_desc_type(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def depart_desc_type(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_desc_returns(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append(' &#x2192; ')
def depart_desc_returns(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_desc_name(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append(self.starttag(node, 'code', '', CLASS='descname'))
def depart_desc_name(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('</code>')
def visit_desc_parameterlist(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('<span class="sig-paren">(</span>')
self.first_param = 1
self.optional_param_level = 0
@@ -147,7 +157,7 @@ class HTML5Translator(BaseTranslator):
self.param_separator = node.child_text_separator
def depart_desc_parameterlist(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('<span class="sig-paren">)</span>')
# If required parameters are still to come, then put the comma after
@@ -157,7 +167,7 @@ class HTML5Translator(BaseTranslator):
# foo([a, ]b, c[, d])
#
def visit_desc_parameter(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
if self.first_param:
self.first_param = 0
elif not self.required_params_left:
@@ -168,49 +178,49 @@ class HTML5Translator(BaseTranslator):
self.body.append('<em>')
def depart_desc_parameter(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
if not node.hasattr('noemph'):
self.body.append('</em>')
if self.required_params_left:
self.body.append(self.param_separator)
def visit_desc_optional(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.optional_param_level += 1
self.body.append('<span class="optional">[</span>')
def depart_desc_optional(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.optional_param_level -= 1
self.body.append('<span class="optional">]</span>')
def visit_desc_annotation(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append(self.starttag(node, 'em', '', CLASS='property'))
def depart_desc_annotation(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('</em>')
def visit_desc_content(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append(self.starttag(node, 'dd', ''))
def depart_desc_content(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('</dd>')
def visit_versionmodified(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append(self.starttag(node, 'div', CLASS=node['type']))
def depart_versionmodified(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('</div>\n')
# overwritten
def visit_reference(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
atts = {'class': 'reference'}
if node.get('internal') or 'refuri' not in node:
atts['class'] += ' internal'
@@ -218,10 +228,9 @@ class HTML5Translator(BaseTranslator):
atts['class'] += ' external'
if 'refuri' in node:
atts['href'] = node['refuri'] or '#'
- if self.settings.cloak_email_addresses and \
- atts['href'].startswith('mailto:'):
+ if self.settings.cloak_email_addresses and atts['href'].startswith('mailto:'):
atts['href'] = self.cloak_mailto(atts['href'])
- self.in_mailto = 1
+ self.in_mailto = True
else:
assert 'refid' in node, \
'References must have "refuri" or "refid" attribute.'
@@ -240,36 +249,36 @@ class HTML5Translator(BaseTranslator):
'.'.join(map(str, node['secnumber'])))
def visit_number_reference(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.visit_reference(node)
def depart_number_reference(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.depart_reference(node)
# overwritten -- we don't want source comments to show up in the HTML
- def visit_comment(self, node):
- # type: (nodes.Node) -> None
+ def visit_comment(self, node): # type: ignore
+ # type: (nodes.Element) -> None
raise nodes.SkipNode
# overwritten
def visit_admonition(self, node, name=''):
- # type: (nodes.Node, unicode) -> None
+ # type: (nodes.Element, str) -> None
self.body.append(self.starttag(
node, 'div', CLASS=('admonition ' + name)))
if name:
node.insert(0, nodes.title(name, admonitionlabels[name]))
def visit_seealso(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.visit_admonition(node, 'seealso')
def depart_seealso(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.depart_admonition(node)
def add_secnumber(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
if node.get('secnumber'):
self.body.append('.'.join(map(str, node['secnumber'])) +
self.secnumber_suffix)
@@ -289,11 +298,11 @@ class HTML5Translator(BaseTranslator):
self.secnumber_suffix)
def add_fignumber(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
def append_fignumber(figtype, figure_id):
- # type: (unicode, unicode) -> None
+ # type: (str, str) -> None
if self.builder.name == 'singlehtml':
- key = u"%s/%s" % (self.docnames[-1], figtype)
+ key = "%s/%s" % (self.docnames[-1], figtype)
else:
key = figtype
@@ -317,30 +326,30 @@ class HTML5Translator(BaseTranslator):
append_fignumber(figtype, node['ids'][0])
def add_permalink_ref(self, node, title):
- # type: (nodes.Node, unicode) -> None
+ # type: (nodes.Element, str) -> None
if node['ids'] and self.permalink_text and self.builder.add_permalinks:
- format = u'<a class="headerlink" href="#%s" title="%s">%s</a>'
+ format = '<a class="headerlink" href="#%s" title="%s">%s</a>'
self.body.append(format % (node['ids'][0], title, self.permalink_text))
# overwritten
def visit_bullet_list(self, node):
- # type: (nodes.Node) -> None
- if len(node) == 1 and node[0].tagname == 'toctree':
+ # type: (nodes.Element) -> None
+ if len(node) == 1 and isinstance(node[0], addnodes.toctree):
# avoid emitting empty <ul></ul>
raise nodes.SkipNode
- BaseTranslator.visit_bullet_list(self, node)
+ super().visit_bullet_list(node)
# overwritten
def visit_title(self, node):
- # type: (nodes.Node) -> None
- BaseTranslator.visit_title(self, node)
+ # type: (nodes.Element) -> None
+ super().visit_title(node)
self.add_secnumber(node)
self.add_fignumber(node.parent)
if isinstance(node.parent, nodes.table):
self.body.append('<span class="caption-text">')
def depart_title(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
close_tag = self.context[-1]
if (self.permalink_text and self.builder.add_permalinks and
node.parent.hasattr('ids') and node.parent['ids']):
@@ -348,9 +357,9 @@ class HTML5Translator(BaseTranslator):
if close_tag.startswith('</h'):
self.add_permalink_ref(node.parent, _('Permalink to this headline'))
elif close_tag.startswith('</a></h'):
- self.body.append(u'</a><a class="headerlink" href="#%s" ' %
+ self.body.append('</a><a class="headerlink" href="#%s" ' %
node.parent['ids'][0] +
- u'title="%s">%s' % (
+ 'title="%s">%s' % (
_('Permalink to this headline'),
self.permalink_text))
elif isinstance(node.parent, nodes.table):
@@ -359,14 +368,14 @@ class HTML5Translator(BaseTranslator):
elif isinstance(node.parent, nodes.table):
self.body.append('</span>')
- BaseTranslator.depart_title(self, node)
+ super().depart_title(node)
# overwritten
def visit_literal_block(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
if node.rawsource != node.astext():
# most probably a parsed-literal block -- don't highlight
- return BaseTranslator.visit_literal_block(self, node)
+ return super().visit_literal_block(node)
lang = node.get('language', 'default')
linenos = node.get('linenos', False)
@@ -388,16 +397,16 @@ class HTML5Translator(BaseTranslator):
raise nodes.SkipNode
def visit_caption(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
if isinstance(node.parent, nodes.container) and node.parent.get('literal_block'):
self.body.append('<div class="code-block-caption">')
else:
- BaseTranslator.visit_caption(self, node)
+ super().visit_caption(node)
self.add_fignumber(node.parent)
self.body.append(self.starttag(node, 'span', '', CLASS='caption-text'))
def depart_caption(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('</span>')
# append permalink if available
@@ -413,24 +422,24 @@ class HTML5Translator(BaseTranslator):
if isinstance(node.parent, nodes.container) and node.parent.get('literal_block'):
self.body.append('</div>\n')
else:
- BaseTranslator.depart_caption(self, node)
+ super().depart_caption(node)
def visit_doctest_block(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.visit_literal_block(node)
# overwritten to add the <div> (for XHTML compliance)
def visit_block_quote(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append(self.starttag(node, 'blockquote') + '<div>')
def depart_block_quote(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('</div></blockquote>\n')
# overwritten
def visit_literal(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
if 'kbd' in node['classes']:
self.body.append(self.starttag(node, 'kbd', '',
CLASS='docutils literal notranslate'))
@@ -440,7 +449,7 @@ class HTML5Translator(BaseTranslator):
self.protect_literal_text += 1
def depart_literal(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
if 'kbd' in node['classes']:
self.body.append('</kbd>')
else:
@@ -448,14 +457,15 @@ class HTML5Translator(BaseTranslator):
self.body.append('</code>')
def visit_productionlist(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append(self.starttag(node, 'pre'))
names = []
- for production in node:
+ productionlist = cast(Iterable[addnodes.production], node)
+ for production in productionlist:
names.append(production['tokenname'])
maxlen = max(len(name) for name in names)
lastname = None
- for production in node:
+ for production in productionlist:
if production['tokenname']:
lastname = production['tokenname'].ljust(maxlen)
self.body.append(self.starttag(production, 'strong', ''))
@@ -468,48 +478,36 @@ class HTML5Translator(BaseTranslator):
raise nodes.SkipNode
def depart_productionlist(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_production(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def depart_production(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_centered(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append(self.starttag(node, 'p', CLASS="centered") +
'<strong>')
def depart_centered(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('</strong></p>')
- # overwritten
- def should_be_compact_paragraph(self, node):
- # type: (nodes.Node) -> bool
- """Determine if the <p> tags around paragraph can be omitted."""
- if isinstance(node.parent, addnodes.desc_content):
- # Never compact desc_content items.
- return False
- if isinstance(node.parent, addnodes.versionmodified):
- # Never compact versionmodified nodes.
- return False
- return BaseTranslator.should_be_compact_paragraph(self, node)
-
def visit_compact_paragraph(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def depart_compact_paragraph(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_download_reference(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
atts = {'class': 'reference download',
'download': ''}
@@ -529,12 +527,12 @@ class HTML5Translator(BaseTranslator):
self.context.append('')
def depart_download_reference(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append(self.context.pop())
# overwritten
def visit_image(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
olduri = node['uri']
# rewrite the URI if the environment knows about it
if olduri in self.builder.images:
@@ -572,65 +570,65 @@ class HTML5Translator(BaseTranslator):
node['width'] = str(size[0])
if 'height' not in node:
node['height'] = str(size[1])
- BaseTranslator.visit_image(self, node)
+ super().visit_image(node)
# overwritten
def depart_image(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
if node['uri'].lower().endswith(('svg', 'svgz')):
self.body.append(self.context.pop())
else:
- BaseTranslator.depart_image(self, node)
+ super().depart_image(node)
def visit_toctree(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
# this only happens when formatting a toc from env.tocs -- in this
# case we don't want to include the subtree
raise nodes.SkipNode
def visit_index(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
raise nodes.SkipNode
def visit_tabular_col_spec(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
raise nodes.SkipNode
def visit_glossary(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def depart_glossary(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_acks(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def depart_acks(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_hlist(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('<table class="hlist"><tr>')
def depart_hlist(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('</tr></table>\n')
def visit_hlistcol(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('<td>')
def depart_hlistcol(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('</td>')
# overwritten
def visit_Text(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Text) -> None
text = node.astext()
encoded = self.encode(text)
if self.protect_literal_text:
@@ -652,113 +650,113 @@ class HTML5Translator(BaseTranslator):
self.body.append(encoded)
def visit_note(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.visit_admonition(node, 'note')
def depart_note(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.depart_admonition(node)
def visit_warning(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.visit_admonition(node, 'warning')
def depart_warning(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.depart_admonition(node)
def visit_attention(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.visit_admonition(node, 'attention')
def depart_attention(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.depart_admonition(node)
def visit_caution(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.visit_admonition(node, 'caution')
def depart_caution(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.depart_admonition(node)
def visit_danger(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.visit_admonition(node, 'danger')
def depart_danger(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.depart_admonition(node)
def visit_error(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.visit_admonition(node, 'error')
def depart_error(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.depart_admonition(node)
def visit_hint(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.visit_admonition(node, 'hint')
def depart_hint(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.depart_admonition(node)
def visit_important(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.visit_admonition(node, 'important')
def depart_important(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.depart_admonition(node)
def visit_tip(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.visit_admonition(node, 'tip')
def depart_tip(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.depart_admonition(node)
def visit_literal_emphasis(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
return self.visit_emphasis(node)
def depart_literal_emphasis(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
return self.depart_emphasis(node)
def visit_literal_strong(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
return self.visit_strong(node)
def depart_literal_strong(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
return self.depart_strong(node)
def visit_abbreviation(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
attrs = {}
if node.hasattr('explanation'):
attrs['title'] = node['explanation']
self.body.append(self.starttag(node, 'abbr', '', **attrs))
def depart_abbreviation(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('</abbr>')
def visit_manpage(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.visit_literal_emphasis(node)
if self.manpages_url:
- node['refuri'] = self.manpages_url.format(**dict(node))
+ node['refuri'] = self.manpages_url.format(**node.attributes)
self.visit_reference(node)
def depart_manpage(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
if self.manpages_url:
self.depart_reference(node)
self.depart_literal_emphasis(node)
@@ -766,7 +764,7 @@ class HTML5Translator(BaseTranslator):
# overwritten to add even/odd classes
def generate_targets_for_table(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
"""Generate hyperlink targets for tables.
Original visit_table() generates hyperlink targets inside table tags
@@ -780,13 +778,12 @@ class HTML5Translator(BaseTranslator):
node['ids'].remove(id)
def visit_table(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.generate_targets_for_table(node)
self._table_row_index = 0
- classes = [cls.strip(u' \t\n')
- for cls in self.settings.table_style.split(',')]
+ classes = [cls.strip(' \t\n') for cls in self.settings.table_style.split(',')]
classes.insert(0, "docutils") # compat
if 'align' in node:
classes.append('align-%s' % node['align'])
@@ -794,50 +791,49 @@ class HTML5Translator(BaseTranslator):
self.body.append(tag)
def visit_row(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self._table_row_index += 1
if self._table_row_index % 2 == 0:
node['classes'].append('row-even')
else:
node['classes'].append('row-odd')
self.body.append(self.starttag(node, 'tr', ''))
- node.column = 0
+ node.column = 0 # type: ignore
def visit_field_list(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self._fieldlist_row_index = 0
- return BaseTranslator.visit_field_list(self, node)
+ return super().visit_field_list(node)
def visit_field(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self._fieldlist_row_index += 1
if self._fieldlist_row_index % 2 == 0:
node['classes'].append('field-even')
else:
node['classes'].append('field-odd')
- return node
def visit_math(self, node, math_env=''):
- # type: (nodes.Node, unicode) -> None
+ # type: (nodes.Element, str) -> None
name = self.builder.math_renderer_name
visit, _ = self.builder.app.registry.html_inline_math_renderers[name]
visit(self, node)
def depart_math(self, node, math_env=''):
- # type: (nodes.Node, unicode) -> None
+ # type: (nodes.Element, str) -> None
name = self.builder.math_renderer_name
_, depart = self.builder.app.registry.html_inline_math_renderers[name]
if depart:
depart(self, node)
def visit_math_block(self, node, math_env=''):
- # type: (nodes.Node, unicode) -> None
+ # type: (nodes.Element, str) -> None
name = self.builder.math_renderer_name
visit, _ = self.builder.app.registry.html_block_math_renderers[name]
visit(self, node)
def depart_math_block(self, node, math_env=''):
- # type: (nodes.Node, unicode) -> None
+ # type: (nodes.Element, str) -> None
name = self.builder.math_renderer_name
_, depart = self.builder.app.registry.html_block_math_renderers[name]
if depart:
@@ -851,21 +847,21 @@ class HTML5Translator(BaseTranslator):
@property
def highlightlang(self):
- # type: () -> unicode
+ # type: () -> str
warnings.warn('HTMLTranslator.highlightlang is deprecated.',
RemovedInSphinx30Warning, stacklevel=2)
return self.builder.config.highlight_language
@property
def highlightlang_base(self):
- # type: () -> unicode
+ # type: () -> str
warnings.warn('HTMLTranslator.highlightlang_base is deprecated.',
RemovedInSphinx30Warning, stacklevel=2)
return self.builder.config.highlight_language
@property
def highlightopts(self):
- # type: () -> unicode
+ # type: () -> str
warnings.warn('HTMLTranslator.highlightopts is deprecated.',
RemovedInSphinx30Warning, stacklevel=2)
return self.builder.config.highlight_options
diff --git a/sphinx/writers/latex.py b/sphinx/writers/latex.py
index 9b9bf3455..d88a6ab5e 100644
--- a/sphinx/writers/latex.py
+++ b/sphinx/writers/latex.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.writers.latex
~~~~~~~~~~~~~~~~~~~~
@@ -17,18 +16,19 @@ import sys
import warnings
from collections import defaultdict
from os import path
+from typing import Iterable, cast
from docutils import nodes, writers
from docutils.writers.latex2e import Babel
-from six import itervalues, text_type
from sphinx import addnodes
from sphinx import highlighting
-from sphinx.deprecation import RemovedInSphinx30Warning
+from sphinx.deprecation import RemovedInSphinx30Warning, RemovedInSphinx40Warning
+from sphinx.domains.std import StandardDomain
from sphinx.errors import SphinxError
from sphinx.locale import admonitionlabels, _, __
from sphinx.util import split_into, logging
-from sphinx.util.i18n import format_date
+from sphinx.util.docutils import SphinxTranslator
from sphinx.util.nodes import clean_astext
from sphinx.util.template import LaTeXRenderer
from sphinx.util.texescape import tex_escape_map, tex_replace_map
@@ -37,12 +37,16 @@ try:
from docutils.utils.roman import toRoman
except ImportError:
# In Debain/Ubuntu, roman package is provided as roman, not as docutils.utils.roman
- from roman import toRoman
+ from roman import toRoman # type: ignore
if False:
# For type annotation
from typing import Any, Callable, Dict, Iterator, List, Pattern, Tuple, Set, Union # NOQA
- from sphinx.builder import Builder # NOQA
+ from sphinx.builders.latex import LaTeXBuilder # NOQA
+ from sphinx.builders.latex.nodes import ( # NOQA
+ captioned_literal_block, footnotemark, footnotetext, math_reference, thebibliography
+ )
+ from sphinx.domains import IndexEntry # NOQA
logger = logging.getLogger(__name__)
@@ -63,7 +67,55 @@ ENUMERATE_LIST_STYLE = defaultdict(lambda: r'\arabic',
'upperalpha': r'\Alph',
'lowerroman': r'\roman',
'upperroman': r'\Roman',
- }) # type: Dict[unicode, unicode]
+ })
+PDFLATEX_DEFAULT_FONTPKG = r'''
+\usepackage{times}
+\expandafter\ifx\csname T@LGR\endcsname\relax
+\else
+% LGR was declared as font encoding
+ \substitutefont{LGR}{\rmdefault}{cmr}
+ \substitutefont{LGR}{\sfdefault}{cmss}
+ \substitutefont{LGR}{\ttdefault}{cmtt}
+\fi
+\expandafter\ifx\csname T@X2\endcsname\relax
+ \expandafter\ifx\csname T@T2A\endcsname\relax
+ \else
+ % T2A was declared as font encoding
+ \substitutefont{T2A}{\rmdefault}{cmr}
+ \substitutefont{T2A}{\sfdefault}{cmss}
+ \substitutefont{T2A}{\ttdefault}{cmtt}
+ \fi
+\else
+% X2 was declared as font encoding
+ \substitutefont{X2}{\rmdefault}{cmr}
+ \substitutefont{X2}{\sfdefault}{cmss}
+ \substitutefont{X2}{\ttdefault}{cmtt}
+\fi
+'''
+XELATEX_DEFAULT_FONTPKG = r'''
+\setmainfont{FreeSerif}[
+ Extension = .otf,
+ UprightFont = *,
+ ItalicFont = *Italic,
+ BoldFont = *Bold,
+ BoldItalicFont = *BoldItalic
+]
+\setsansfont{FreeSans}[
+ Extension = .otf,
+ UprightFont = *,
+ ItalicFont = *Oblique,
+ BoldFont = *Bold,
+ BoldItalicFont = *BoldOblique,
+]
+\setmonofont{FreeMono}[
+ Extension = .otf,
+ UprightFont = *,
+ ItalicFont = *Oblique,
+ BoldFont = *Bold,
+ BoldItalicFont = *BoldOblique,
+]
+'''
+LUALATEX_DEFAULT_FONTPKG = XELATEX_DEFAULT_FONTPKG
DEFAULT_SETTINGS = {
'latex_engine': 'pdflatex',
@@ -86,7 +138,10 @@ DEFAULT_SETTINGS = {
'multilingual': '',
'babel': '\\usepackage{babel}',
'polyglossia': '',
- 'fontpkg': '\\usepackage{times}',
+ 'fontpkg': PDFLATEX_DEFAULT_FONTPKG,
+ 'substitutefont': '',
+ 'textcyrillic': '',
+ 'textgreek': '\\usepackage{textalpha}',
'fncychap': '\\usepackage[Bjarne]{fncychap}',
'hyperref': ('% Include hyperref last.\n'
'\\usepackage{hyperref}\n'
@@ -94,15 +149,12 @@ DEFAULT_SETTINGS = {
'\\usepackage{hypcap}% it must be loaded after hyperref.\n'
'% Set up styles of URL: it should be placed after hyperref.\n'
'\\urlstyle{same}'),
- 'usepackages': '',
'numfig_format': '',
'contentsname': '',
'preamble': '',
'title': '',
- 'date': '',
'release': '',
'author': '',
- 'logo': '\\vbox{}',
'releasename': '',
'makeindex': '\\makeindex',
'shorthandoff': '',
@@ -114,30 +166,24 @@ DEFAULT_SETTINGS = {
'figure_align': 'htbp',
'tocdepth': '',
'secnumdepth': '',
- 'pageautorefname': '',
- 'translatablestrings': '',
-} # type: Dict[unicode, unicode]
+} # type: Dict[str, Any]
ADDITIONAL_SETTINGS = {
'pdflatex': {
'inputenc': '\\usepackage[utf8]{inputenc}',
'utf8extra': ('\\ifdefined\\DeclareUnicodeCharacter\n'
'% support both utf8 and utf8x syntaxes\n'
- '\\edef\\sphinxdqmaybe{'
- '\\ifdefined\\DeclareUnicodeCharacterAsOptional'
- '\\string"\\fi}\n'
- ' \\DeclareUnicodeCharacter{\\sphinxdqmaybe00A0}'
- '{\\nobreakspace}\n'
- ' \\DeclareUnicodeCharacter{\\sphinxdqmaybe2500}'
- '{\\sphinxunichar{2500}}\n'
- ' \\DeclareUnicodeCharacter{\\sphinxdqmaybe2502}'
- '{\\sphinxunichar{2502}}\n'
- ' \\DeclareUnicodeCharacter{\\sphinxdqmaybe2514}'
- '{\\sphinxunichar{2514}}\n'
- ' \\DeclareUnicodeCharacter{\\sphinxdqmaybe251C}'
- '{\\sphinxunichar{251C}}\n'
- ' \\DeclareUnicodeCharacter{\\sphinxdqmaybe2572}'
- '{\\textbackslash}\n'
+ ' \\ifdefined\\DeclareUnicodeCharacterAsOptional\n'
+ ' \\def\\sphinxDUC#1{\\DeclareUnicodeCharacter{"#1}}\n'
+ ' \\else\n'
+ ' \\let\\sphinxDUC\\DeclareUnicodeCharacter\n'
+ ' \\fi\n'
+ ' \\sphinxDUC{00A0}{\\nobreakspace}\n'
+ ' \\sphinxDUC{2500}{\\sphinxunichar{2500}}\n'
+ ' \\sphinxDUC{2502}{\\sphinxunichar{2502}}\n'
+ ' \\sphinxDUC{2514}{\\sphinxunichar{2514}}\n'
+ ' \\sphinxDUC{251C}{\\sphinxunichar{251C}}\n'
+ ' \\sphinxDUC{2572}{\\textbackslash}\n'
'\\fi'),
},
'xelatex': {
@@ -145,29 +191,32 @@ ADDITIONAL_SETTINGS = {
'polyglossia': '\\usepackage{polyglossia}',
'babel': '',
'fontenc': '\\usepackage{fontspec}',
- 'fontpkg': '',
+ 'fontpkg': XELATEX_DEFAULT_FONTPKG,
+ 'textgreek': '',
'utf8extra': ('\\catcode`^^^^00a0\\active\\protected\\def^^^^00a0'
'{\\leavevmode\\nobreak\\ }'),
- 'fvset': '\\fvset{fontsize=auto}',
},
'lualatex': {
'latex_engine': 'lualatex',
'polyglossia': '\\usepackage{polyglossia}',
'babel': '',
- 'fontenc': '\\usepackage{fontspec}',
- 'fontpkg': '',
+ 'fontenc': ('\\usepackage{fontspec}\n'
+ '\\defaultfontfeatures[\\rmfamily,\\sffamily]{}'),
+ 'fontpkg': LUALATEX_DEFAULT_FONTPKG,
+ 'textgreek': '',
'utf8extra': ('\\catcode`^^^^00a0\\active\\protected\\def^^^^00a0'
'{\\leavevmode\\nobreak\\ }'),
- 'fvset': '\\fvset{fontsize=auto}',
},
'platex': {
'latex_engine': 'platex',
'babel': '',
'classoptions': ',dvipdfmx',
+ 'fontpkg': '\\usepackage{times}',
+ 'textgreek': '',
'fncychap': '',
'geometry': '\\usepackage[dvipdfm]{geometry}',
},
-} # type: Dict[unicode, Dict[unicode, unicode]]
+} # type: Dict[str, Dict[str, Any]]
EXTRA_RE = re.compile(r'^(.*\S)\s+\(([^()]*)\)\s*$')
@@ -194,15 +243,15 @@ class LaTeXWriter(writers.Writer):
output = None
def __init__(self, builder):
- # type: (Builder) -> None
- writers.Writer.__init__(self)
+ # type: (LaTeXBuilder) -> None
+ super().__init__()
self.builder = builder
def translate(self):
# type: () -> None
visitor = self.builder.create_translator(self.document, self.builder)
self.document.walkabout(visitor)
- self.output = visitor.astext()
+ self.output = cast(LaTeXTranslator, visitor).astext()
# Helper classes
@@ -211,14 +260,14 @@ class ExtBabel(Babel):
cyrillic_languages = ('bulgarian', 'kazakh', 'mongolian', 'russian', 'ukrainian')
def __init__(self, language_code, use_polyglossia=False):
- # type: (unicode, bool) -> None
+ # type: (str, bool) -> None
self.language_code = language_code
self.use_polyglossia = use_polyglossia
self.supported = True
- super(ExtBabel, self).__init__(language_code or '')
+ super().__init__(language_code or '')
def get_shorthandoff(self):
- # type: () -> unicode
+ # type: () -> str
warnings.warn('ExtBabel.get_shorthandoff() is deprecated.',
RemovedInSphinx30Warning, stacklevel=2)
return SHORTHANDOFF
@@ -232,8 +281,8 @@ class ExtBabel(Babel):
return self.supported
def language_name(self, language_code):
- # type: (unicode) -> unicode
- language = super(ExtBabel, self).language_name(language_code)
+ # type: (str) -> str
+ language = super().language_name(language_code)
if language == 'ngerman' and self.use_polyglossia:
# polyglossia calls new orthography (Neue Rechtschreibung) as
# german (with new spelling option).
@@ -245,12 +294,12 @@ class ExtBabel(Babel):
return language
def get_mainlanguage_options(self):
- # type: () -> unicode
+ # type: () -> str
"""Return options for polyglossia's ``\\setmainlanguage``."""
if self.use_polyglossia is False:
return None
elif self.language == 'german':
- language = super(ExtBabel, self).language_name(self.language_code)
+ language = super().language_name(self.language_code)
if language == 'ngerman':
return 'spelling=new'
else:
@@ -259,21 +308,21 @@ class ExtBabel(Babel):
return None
-class Table(object):
+class Table:
"""A table data"""
def __init__(self, node):
- # type: (nodes.table) -> None
- self.header = [] # type: List[unicode]
- self.body = [] # type: List[unicode]
+ # type: (nodes.Element) -> None
+ self.header = [] # type: List[str]
+ self.body = [] # type: List[str]
self.align = node.get('align')
self.colcount = 0
- self.colspec = None # type: unicode
+ self.colspec = None # type: str
self.colwidths = [] # type: List[int]
self.has_problematic = False
self.has_oldproblematic = False
self.has_verbatim = False
- self.caption = None # type: List[unicode]
+ self.caption = None # type: List[str]
self.stubs = [] # type: List[int]
# current position
@@ -281,7 +330,7 @@ class Table(object):
self.row = 0
# for internal use
- self.classes = node.get('classes', []) # type: List[unicode]
+ self.classes = node.get('classes', []) # type: List[str]
self.cells = defaultdict(int) # type: Dict[Tuple[int, int], int]
# it maps table location to cell_id
# (cell = rectangular area)
@@ -289,14 +338,14 @@ class Table(object):
@property
def caption_footnotetexts(self):
- # type: () -> List[unicode]
+ # type: () -> List[str]
warnings.warn('table.caption_footnotetexts is deprecated.',
RemovedInSphinx30Warning, stacklevel=2)
return []
@property
def header_footnotetexts(self):
- # type: () -> List[unicode]
+ # type: () -> List[str]
warnings.warn('table.header_footnotetexts is deprecated.',
RemovedInSphinx30Warning, stacklevel=2)
return []
@@ -307,7 +356,7 @@ class Table(object):
return self.row > 30 or 'longtable' in self.classes
def get_table_type(self):
- # type: () -> unicode
+ # type: () -> str
"""Returns the LaTeX environment name for the table.
The class currently supports:
@@ -328,7 +377,7 @@ class Table(object):
return 'tabulary'
def get_colspec(self):
- # type: () -> unicode
+ # type: () -> str
"""Returns a column spec of table.
This is what LaTeX calls the 'preamble argument' of the used table environment.
@@ -380,7 +429,7 @@ class Table(object):
return None
-class TableCell(object):
+class TableCell:
"""A cell data of tables."""
def __init__(self, table, row, col):
@@ -419,13 +468,13 @@ class TableCell(object):
def escape_abbr(text):
- # type: (unicode) -> unicode
+ # type: (str) -> str
"""Adjust spacing after abbreviations."""
return re.sub(r'\.(?=\s|$)', r'.\@', text)
def rstdim_to_latexdim(width_str):
- # type: (unicode) -> unicode
+ # type: (str) -> str
"""Convert `width_str` with rst length to LaTeX length."""
match = re.match(r'^(\d*\.?\d*)\s*(\S*)$', width_str)
if not match:
@@ -442,7 +491,8 @@ def rstdim_to_latexdim(width_str):
return res
-class LaTeXTranslator(nodes.NodeVisitor):
+class LaTeXTranslator(SphinxTranslator):
+ builder = None # type: LaTeXBuilder
secnumdepth = 2 # legacy sphinxhowto.cls uses this, whereas article.cls
# default is originally 3. For book/report, 2 is already LaTeX default.
@@ -452,10 +502,9 @@ class LaTeXTranslator(nodes.NodeVisitor):
docclasses = ('howto', 'manual')
def __init__(self, document, builder):
- # type: (nodes.Node, Builder) -> None
- nodes.NodeVisitor.__init__(self, document)
- self.builder = builder
- self.body = [] # type: List[unicode]
+ # type: (nodes.document, LaTeXBuilder) -> None
+ super().__init__(document, builder)
+ self.body = [] # type: List[str]
# flags
self.in_title = 0
@@ -474,65 +523,36 @@ class LaTeXTranslator(nodes.NodeVisitor):
self.first_param = 0
# sort out some elements
- self.elements = DEFAULT_SETTINGS.copy()
- self.elements.update(ADDITIONAL_SETTINGS.get(builder.config.latex_engine, {}))
- # for xelatex+French, don't use polyglossia
- if self.elements['latex_engine'] == 'xelatex':
- if builder.config.language:
- if builder.config.language[:2] == 'fr':
- self.elements.update({
- 'polyglossia': '',
- 'babel': '\\usepackage{babel}',
- })
- # allow the user to override them all
- self.elements.update(builder.config.latex_elements)
+ self.elements = self.builder.context.copy()
# but some have other interface in config file
- self.elements.update({
- 'wrapperclass': self.format_docclass(document.settings.docclass),
- # if empty, the title is set to the first section title
- 'title': document.settings.title, # treat as a raw LaTeX code
- 'release': self.encode(builder.config.release),
- 'author': document.settings.author, # treat as a raw LaTeX code
- 'indexname': _('Index'),
- 'use_xindy': builder.config.latex_use_xindy,
- })
- if not self.elements['releasename'] and self.elements['release']:
- self.elements.update({
- 'releasename': _('Release'),
- })
+ self.elements['wrapperclass'] = self.format_docclass(self.settings.docclass)
# we assume LaTeX class provides \chapter command except in case
# of non-Japanese 'howto' case
self.sectionnames = LATEXSECTIONNAMES[:]
- if document.settings.docclass == 'howto':
- docclass = builder.config.latex_docclass.get('howto', 'article')
+ if self.settings.docclass == 'howto':
+ docclass = self.config.latex_docclass.get('howto', 'article')
if docclass[0] == 'j': # Japanese class...
pass
else:
self.sectionnames.remove('chapter')
else:
- docclass = builder.config.latex_docclass.get('manual', 'report')
+ docclass = self.config.latex_docclass.get('manual', 'report')
self.elements['docclass'] = docclass
# determine top section level
self.top_sectionlevel = 1
- if builder.config.latex_toplevel_sectioning:
+ if self.config.latex_toplevel_sectioning:
try:
self.top_sectionlevel = \
- self.sectionnames.index(builder.config.latex_toplevel_sectioning)
+ self.sectionnames.index(self.config.latex_toplevel_sectioning)
except ValueError:
logger.warning(__('unknown %r toplevel_sectioning for class %r') %
- (builder.config.latex_toplevel_sectioning, docclass))
+ (self.config.latex_toplevel_sectioning, docclass))
- if builder.config.today:
- self.elements['date'] = builder.config.today
- else:
- self.elements['date'] = format_date(builder.config.today_fmt or _('%b %d, %Y'),
- language=builder.config.language)
-
- if builder.config.numfig:
- self.numfig_secnum_depth = builder.config.numfig_secnum_depth
+ if self.config.numfig:
+ self.numfig_secnum_depth = self.config.numfig_secnum_depth
if self.numfig_secnum_depth > 0: # default is 1
# numfig_secnum_depth as passed to sphinx.sty indices same names as in
# LATEXSECTIONNAMES but with -1 for part, 0 for chapter, 1 for section...
@@ -550,44 +570,52 @@ class LaTeXTranslator(nodes.NodeVisitor):
else:
self.elements['sphinxpkgoptions'] += ',nonumfigreset'
try:
- if builder.config.math_numfig:
+ if self.config.math_numfig:
self.elements['sphinxpkgoptions'] += ',mathnumfig'
except AttributeError:
pass
- if builder.config.latex_logo:
- # no need for \\noindent here, used in flushright
- self.elements['logo'] = '\\sphinxincludegraphics{%s}\\par' % \
- path.basename(builder.config.latex_logo)
-
- if (builder.config.language and builder.config.language != 'ja' and
- 'fncychap' not in builder.config.latex_elements):
- # use Sonny style if any language specified
+ if (self.config.language not in {None, 'en', 'ja'} and
+ 'fncychap' not in self.config.latex_elements):
+ # use Sonny style if any language specified (except English)
self.elements['fncychap'] = ('\\usepackage[Sonny]{fncychap}\n'
'\\ChNameVar{\\Large\\normalfont'
'\\sffamily}\n\\ChTitleVar{\\Large'
'\\normalfont\\sffamily}')
- self.babel = ExtBabel(builder.config.language,
+ self.babel = ExtBabel(self.config.language,
not self.elements['babel'])
- if builder.config.language and not self.babel.is_supported_language():
+ if self.config.language and not self.babel.is_supported_language():
# emit warning if specified language is invalid
# (only emitting, nothing changed to processing)
logger.warning(__('no Babel option known for language %r'),
- builder.config.language)
+ self.config.language)
# set up multilingual module...
+ if self.elements['latex_engine'] == 'pdflatex':
+ if not self.babel.uses_cyrillic():
+ if 'X2' in self.elements['fontenc']:
+ self.elements['substitutefont'] = '\\usepackage{substitutefont}'
+ self.elements['textcyrillic'] = ('\\usepackage[Xtwo]'
+ '{sphinxcyrillic}')
+ elif 'T2A' in self.elements['fontenc']:
+ self.elements['substitutefont'] = '\\usepackage{substitutefont}'
+ self.elements['textcyrillic'] = ('\\usepackage[TtwoA]'
+ '{sphinxcyrillic}')
+ if 'LGR' in self.elements['fontenc']:
+ self.elements['substitutefont'] = '\\usepackage{substitutefont}'
+ else:
+ self.elements['textgreek'] = ''
# 'babel' key is public and user setting must be obeyed
if self.elements['babel']:
self.elements['classoptions'] += ',' + self.babel.get_language()
# this branch is not taken for xelatex/lualatex if default settings
self.elements['multilingual'] = self.elements['babel']
- if builder.config.language:
+ if self.config.language:
self.elements['shorthandoff'] = SHORTHANDOFF
# Times fonts don't work with Cyrillic languages
- if self.babel.uses_cyrillic() \
- and 'fontpkg' not in builder.config.latex_elements:
+ if self.babel.uses_cyrillic() and 'fontpkg' not in self.config.latex_elements:
self.elements['fontpkg'] = ''
elif self.elements['polyglossia']:
self.elements['classoptions'] += ',' + self.babel.get_language()
@@ -601,25 +629,15 @@ class LaTeXTranslator(nodes.NodeVisitor):
self.elements['multilingual'] = '%s\n%s' % (self.elements['polyglossia'],
mainlanguage)
- if getattr(builder, 'usepackages', None):
- def declare_package(packagename, options=None):
- # type:(unicode, unicode) -> unicode
- if options:
- return '\\usepackage[%s]{%s}' % (options, packagename)
- else:
- return '\\usepackage{%s}' % (packagename,)
- usepackages = (declare_package(*p) for p in builder.usepackages)
- self.elements['usepackages'] += "\n".join(usepackages)
-
minsecnumdepth = self.secnumdepth # 2 from legacy sphinx manual/howto
- if document.get('tocdepth'):
+ if self.document.get('tocdepth'):
# reduce tocdepth if `part` or `chapter` is used for top_sectionlevel
# tocdepth = -1: show only parts
# tocdepth = 0: show parts and chapters
# tocdepth = 1: show parts, chapters and sections
# tocdepth = 2: show parts, chapters, sections and subsections
# ...
- tocdepth = document['tocdepth'] + self.top_sectionlevel - 2
+ tocdepth = self.document['tocdepth'] + self.top_sectionlevel - 2
if len(self.sectionnames) < len(LATEXSECTIONNAMES) and \
self.top_sectionlevel > 0:
tocdepth += 1 # because top_sectionlevel is shifted by -1
@@ -630,16 +648,17 @@ class LaTeXTranslator(nodes.NodeVisitor):
self.elements['tocdepth'] = '\\setcounter{tocdepth}{%d}' % tocdepth
minsecnumdepth = max(minsecnumdepth, tocdepth)
- if builder.config.numfig and (builder.config.numfig_secnum_depth > 0):
+ if self.config.numfig and (self.config.numfig_secnum_depth > 0):
minsecnumdepth = max(minsecnumdepth, self.numfig_secnum_depth - 1)
if minsecnumdepth > self.secnumdepth:
self.elements['secnumdepth'] = '\\setcounter{secnumdepth}{%d}' %\
minsecnumdepth
- if getattr(document.settings, 'contentsname', None):
- self.elements['contentsname'] = \
- self.babel_renewcommand('\\contentsname', document.settings.contentsname)
+ contentsname = self.settings.contentsname
+ if contentsname:
+ self.elements['contentsname'] = self.babel_renewcommand('\\contentsname',
+ contentsname)
if self.elements['maxlistdepth']:
self.elements['sphinxpkgoptions'] += (',maxlistdepth=%s' %
@@ -653,72 +672,53 @@ class LaTeXTranslator(nodes.NodeVisitor):
if self.elements['extraclassoptions']:
self.elements['classoptions'] += ',' + \
self.elements['extraclassoptions']
- self.elements['translatablestrings'] = (
- self.babel_renewcommand(
- '\\literalblockcontinuedname', self.encode(_('continued from previous page'))
- ) +
- self.babel_renewcommand(
- '\\literalblockcontinuesname', self.encode(_('continues on next page'))
- ) +
- self.babel_renewcommand(
- '\\sphinxnonalphabeticalgroupname', self.encode(_('Non-alphabetical'))
- ) +
- self.babel_renewcommand(
- '\\sphinxsymbolsname', self.encode(_('Symbols'))
- ) +
- self.babel_renewcommand(
- '\\sphinxnumbersname', self.encode(_('Numbers'))
- )
- )
- self.elements['pageautorefname'] = \
- self.babel_defmacro('\\pageautorefname', self.encode(_('page')))
- self.elements['numfig_format'] = self.generate_numfig_format(builder)
-
- self.highlighter = highlighting.PygmentsBridge('latex', builder.config.pygments_style)
- self.context = [] # type: List[Any]
- self.descstack = [] # type: List[unicode]
- self.table = None # type: Table
- self.next_table_colspec = None # type: unicode
- self.bodystack = [] # type: List[List[unicode]]
- self.footnote_restricted = False
- self.pending_footnotes = [] # type: List[nodes.footnote_reference]
- self.curfilestack = [] # type: List[unicode]
- self.handled_abbrs = set() # type: Set[unicode]
+ self.elements['numfig_format'] = self.generate_numfig_format(self.builder)
+
+ self.highlighter = highlighting.PygmentsBridge('latex', self.config.pygments_style)
+ self.context = [] # type: List[Any]
+ self.descstack = [] # type: List[str]
+ self.table = None # type: Table
+ self.next_table_colspec = None # type: str
+ self.bodystack = [] # type: List[List[str]]
+ self.footnote_restricted = None # type: nodes.Element
+ self.pending_footnotes = [] # type: List[nodes.footnote_reference]
+ self.curfilestack = [] # type: List[str]
+ self.handled_abbrs = set() # type: Set[str]
def pushbody(self, newbody):
- # type: (List[unicode]) -> None
+ # type: (List[str]) -> None
self.bodystack.append(self.body)
self.body = newbody
def popbody(self):
- # type: () -> List[unicode]
+ # type: () -> List[str]
body = self.body
self.body = self.bodystack.pop()
return body
def restrict_footnote(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
warnings.warn('LaTeXWriter.restrict_footnote() is deprecated.',
RemovedInSphinx30Warning, stacklevel=2)
- if self.footnote_restricted is False:
+ if self.footnote_restricted is None:
self.footnote_restricted = node
self.pending_footnotes = []
def unrestrict_footnote(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
warnings.warn('LaTeXWriter.unrestrict_footnote() is deprecated.',
RemovedInSphinx30Warning, stacklevel=2)
if self.footnote_restricted == node:
- self.footnote_restricted = False
+ self.footnote_restricted = None
for footnode in self.pending_footnotes:
footnode['footnotetext'] = True
footnode.walkabout(self)
self.pending_footnotes = []
def format_docclass(self, docclass):
- # type: (unicode) -> unicode
+ # type: (str) -> str
""" prepends prefix to sphinx document classes
"""
if docclass in self.docclasses:
@@ -726,22 +726,22 @@ class LaTeXTranslator(nodes.NodeVisitor):
return docclass
def astext(self):
- # type: () -> unicode
+ # type: () -> str
self.elements.update({
- 'body': u''.join(self.body),
+ 'body': ''.join(self.body),
'indices': self.generate_indices()
})
return self.render('latex.tex_t', self.elements)
def hypertarget(self, id, withdoc=True, anchor=True):
- # type: (unicode, bool, bool) -> unicode
+ # type: (str, bool, bool) -> str
if withdoc:
id = self.curfilestack[-1] + ':' + id
return (anchor and '\\phantomsection' or '') + \
'\\label{%s}' % self.idescape(id)
def hypertarget_to(self, node, anchor=False):
- # type: (nodes.Node, bool) -> unicode
+ # type: (nodes.Element, bool) -> str
labels = ''.join(self.hypertarget(node_id, anchor=False) for node_id in node['ids'])
if anchor:
return r'\phantomsection' + labels
@@ -749,21 +749,21 @@ class LaTeXTranslator(nodes.NodeVisitor):
return labels
def hyperlink(self, id):
- # type: (unicode) -> unicode
+ # type: (str) -> str
return '{\\hyperref[%s]{' % self.idescape(id)
def hyperpageref(self, id):
- # type: (unicode) -> unicode
+ # type: (str) -> str
return '\\autopageref*{%s}' % self.idescape(id)
def idescape(self, id):
- # type: (unicode) -> unicode
- return '\\detokenize{%s}' % text_type(id).translate(tex_replace_map).\
+ # type: (str) -> str
+ return '\\detokenize{%s}' % str(id).translate(tex_replace_map).\
encode('ascii', 'backslashreplace').decode('ascii').\
replace('\\', '_')
def babel_renewcommand(self, command, definition):
- # type: (unicode, unicode) -> unicode
+ # type: (str, str) -> str
if self.elements['multilingual']:
prefix = '\\addto\\captions%s{' % self.babel.get_language()
suffix = '}'
@@ -773,51 +773,40 @@ class LaTeXTranslator(nodes.NodeVisitor):
return ('%s\\renewcommand{%s}{%s}%s\n' % (prefix, command, definition, suffix))
- def babel_defmacro(self, name, definition):
- # type: (unicode, unicode) -> unicode
- if self.elements['babel']:
- prefix = '\\addto\\extras%s{' % self.babel.get_language()
- suffix = '}'
- else: # babel is disabled (mainly for Japanese environment)
- prefix = ''
- suffix = ''
-
- return ('%s\\def%s{%s}%s\n' % (prefix, name, definition, suffix))
-
def generate_numfig_format(self, builder):
- # type: (Builder) -> unicode
- ret = [] # type: List[unicode]
+ # type: (LaTeXBuilder) -> str
+ ret = [] # type: List[str]
figure = self.builder.config.numfig_format['figure'].split('%s', 1)
if len(figure) == 1:
ret.append('\\def\\fnum@figure{%s}\n' %
- text_type(figure[0]).strip().translate(tex_escape_map))
+ str(figure[0]).strip().translate(tex_escape_map))
else:
- definition = text_type(figure[0]).strip().translate(tex_escape_map)
+ definition = str(figure[0]).strip().translate(tex_escape_map)
ret.append(self.babel_renewcommand('\\figurename', definition))
if figure[1]:
ret.append('\\makeatletter\n')
ret.append('\\def\\fnum@figure{\\figurename\\thefigure%s}\n' %
- text_type(figure[1]).strip().translate(tex_escape_map))
+ str(figure[1]).strip().translate(tex_escape_map))
ret.append('\\makeatother\n')
table = self.builder.config.numfig_format['table'].split('%s', 1)
if len(table) == 1:
ret.append('\\def\\fnum@table{%s}\n' %
- text_type(table[0]).strip().translate(tex_escape_map))
+ str(table[0]).strip().translate(tex_escape_map))
else:
- definition = text_type(table[0]).strip().translate(tex_escape_map)
+ definition = str(table[0]).strip().translate(tex_escape_map)
ret.append(self.babel_renewcommand('\\tablename', definition))
if table[1]:
ret.append('\\makeatletter\n')
ret.append('\\def\\fnum@table{\\tablename\\thetable%s}\n' %
- text_type(table[1]).strip().translate(tex_escape_map))
+ str(table[1]).strip().translate(tex_escape_map))
ret.append('\\makeatother\n')
codeblock = self.builder.config.numfig_format['code-block'].split('%s', 1)
if len(codeblock) == 1:
pass # FIXME
else:
- definition = text_type(codeblock[0]).strip().translate(tex_escape_map)
+ definition = str(codeblock[0]).strip().translate(tex_escape_map)
ret.append(self.babel_renewcommand('\\literalblockname', definition))
if codeblock[1]:
pass # FIXME
@@ -825,16 +814,16 @@ class LaTeXTranslator(nodes.NodeVisitor):
return ''.join(ret)
def generate_indices(self):
- # type: (Builder) -> unicode
+ # type: () -> str
def generate(content, collapsed):
- # type: (List[Tuple[unicode, List[Tuple[unicode, unicode, unicode, unicode, unicode]]]], bool) -> None # NOQA
+ # type: (List[Tuple[str, List[IndexEntry]]], bool) -> None
ret.append('\\begin{sphinxtheindex}\n')
ret.append('\\let\\bigletter\\sphinxstyleindexlettergroup\n')
for i, (letter, entries) in enumerate(content):
if i > 0:
ret.append('\\indexspace\n')
ret.append('\\bigletter{%s}\n' %
- text_type(letter).translate(tex_escape_map))
+ str(letter).translate(tex_escape_map))
for entry in entries:
if not entry[3]:
continue
@@ -851,7 +840,7 @@ class LaTeXTranslator(nodes.NodeVisitor):
# latex_domain_indices can be False/True or a list of index names
indices_config = self.builder.config.latex_domain_indices
if indices_config:
- for domain in itervalues(self.builder.env.domains):
+ for domain in self.builder.env.domains.values():
for indexcls in domain.indices:
indexname = '%s-%s' % (domain.name, indexcls.name)
if isinstance(indices_config, list):
@@ -861,14 +850,14 @@ class LaTeXTranslator(nodes.NodeVisitor):
self.builder.docnames)
if not content:
continue
- ret.append(u'\\renewcommand{\\indexname}{%s}\n' %
+ ret.append('\\renewcommand{\\indexname}{%s}\n' %
indexcls.localname)
generate(content, collapsed)
return ''.join(ret)
def render(self, template_name, variables):
- # type: (unicode, Dict) -> unicode
+ # type: (str, Dict) -> str
for template_dir in self.builder.config.templates_path:
template = path.join(self.builder.confdir, template_dir,
template_name)
@@ -878,14 +867,14 @@ class LaTeXTranslator(nodes.NodeVisitor):
return LaTeXRenderer().render(template_name, variables)
def visit_document(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.curfilestack.append(node.get('docname', ''))
if self.first_document == 1:
# the first document is all the regular content ...
self.first_document = 0
elif self.first_document == 0:
# ... and all others are the appendices
- self.body.append(u'\n\\appendix\n')
+ self.body.append('\n\\appendix\n')
self.first_document = -1
if 'docname' in node:
self.body.append(self.hypertarget(':doc'))
@@ -893,88 +882,68 @@ class LaTeXTranslator(nodes.NodeVisitor):
self.sectionlevel = self.top_sectionlevel - 1
def depart_document(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_start_of_file(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.curfilestack.append(node['docname'])
- def collect_footnotes(self, node):
- # type: (nodes.Node) -> Dict[unicode, List[Union[collected_footnote, bool]]]
- def footnotes_under(n):
- # type: (nodes.Node) -> Iterator[nodes.Node]
- if isinstance(n, nodes.footnote):
- yield n
- else:
- for c in n.children:
- if isinstance(c, addnodes.start_of_file):
- continue
- for k in footnotes_under(c):
- yield k
-
- fnotes = {} # type: Dict[unicode, List[Union[collected_footnote, bool]]]
- for fn in footnotes_under(node):
- num = fn.children[0].astext().strip()
- newnode = collected_footnote(*fn.children, number=num)
- fnotes[num] = [newnode, False]
- return fnotes
-
def depart_start_of_file(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.curfilestack.pop()
def visit_section(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
if not self.this_is_the_title:
self.sectionlevel += 1
self.body.append('\n\n')
def depart_section(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.sectionlevel = max(self.sectionlevel - 1,
self.top_sectionlevel - 1)
def visit_problematic(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append(r'{\color{red}\bfseries{}')
def depart_problematic(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('}')
def visit_topic(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.in_minipage = 1
self.body.append('\n\\begin{sphinxShadowBox}\n')
def depart_topic(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.in_minipage = 0
self.body.append('\\end{sphinxShadowBox}\n')
visit_sidebar = visit_topic
depart_sidebar = depart_topic
def visit_glossary(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def depart_glossary(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_productionlist(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('\n\n\\begin{productionlist}\n')
self.in_production_list = 1
def depart_productionlist(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('\\end{productionlist}\n\n')
self.in_production_list = 0
def visit_production(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
if node['tokenname']:
tn = node['tokenname']
self.body.append(self.hypertarget('grammar-token-' + tn))
@@ -983,19 +952,19 @@ class LaTeXTranslator(nodes.NodeVisitor):
self.body.append('\\productioncont{')
def depart_production(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('}\n')
def visit_transition(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append(self.elements['transition'])
def depart_transition(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_title(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
parent = node.parent
if isinstance(parent, addnodes.seealso):
# the environment already handles this
@@ -1016,7 +985,7 @@ class LaTeXTranslator(nodes.NodeVisitor):
short = ''
if node.traverse(nodes.image):
short = ('[%s]' %
- u' '.join(clean_astext(node).split()).translate(tex_escape_map))
+ ' '.join(clean_astext(node).split()).translate(tex_escape_map))
try:
self.body.append(r'\%s%s{' % (self.sectionnames[self.sectionlevel], short))
@@ -1045,7 +1014,7 @@ class LaTeXTranslator(nodes.NodeVisitor):
self.in_title = 1
def depart_title(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.in_title = 0
if isinstance(node.parent, nodes.table):
self.table.caption = self.popbody()
@@ -1053,7 +1022,7 @@ class LaTeXTranslator(nodes.NodeVisitor):
self.body.append(self.context.pop())
def visit_subtitle(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
if isinstance(node.parent, nodes.sidebar):
self.body.append('\\sphinxstylesidebarsubtitle{')
self.context.append('}\n')
@@ -1061,21 +1030,21 @@ class LaTeXTranslator(nodes.NodeVisitor):
self.context.append('')
def depart_subtitle(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append(self.context.pop())
def visit_desc(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('\n\n\\begin{fulllineitems}\n')
if self.table:
self.table.has_problematic = True
def depart_desc(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('\n\\end{fulllineitems}\n\n')
def _visit_signature_line(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
for child in node:
if isinstance(child, addnodes.desc_parameterlist):
self.body.append(r'\pysiglinewithargsret{')
@@ -1084,11 +1053,11 @@ class LaTeXTranslator(nodes.NodeVisitor):
self.body.append(r'\pysigline{')
def _depart_signature_line(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('}')
def visit_desc_signature(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
if node.parent['objtype'] != 'describe' and node['ids']:
hyper = self.hypertarget(node['ids'][0])
else:
@@ -1100,71 +1069,71 @@ class LaTeXTranslator(nodes.NodeVisitor):
self.body.append('%\n\\pysigstartmultiline\n')
def depart_desc_signature(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
if not node.get('is_multiline'):
self._depart_signature_line(node)
else:
self.body.append('%\n\\pysigstopmultiline')
def visit_desc_signature_line(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self._visit_signature_line(node)
def depart_desc_signature_line(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self._depart_signature_line(node)
def visit_desc_addname(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append(r'\sphinxcode{\sphinxupquote{')
self.literal_whitespace += 1
def depart_desc_addname(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('}}')
self.literal_whitespace -= 1
def visit_desc_type(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def depart_desc_type(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_desc_returns(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append(r'{ $\rightarrow$ ')
def depart_desc_returns(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append(r'}')
def visit_desc_name(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append(r'\sphinxbfcode{\sphinxupquote{')
self.no_contractions += 1
self.literal_whitespace += 1
def depart_desc_name(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('}}')
self.literal_whitespace -= 1
self.no_contractions -= 1
def visit_desc_parameterlist(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
# close name, open parameterlist
self.body.append('}{')
self.first_param = 1
def depart_desc_parameterlist(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
# close parameterlist, open return annotation
self.body.append('}{')
def visit_desc_parameter(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
if not self.first_param:
self.body.append(', ')
else:
@@ -1173,69 +1142,69 @@ class LaTeXTranslator(nodes.NodeVisitor):
self.body.append(r'\emph{')
def depart_desc_parameter(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
if not node.hasattr('noemph'):
self.body.append('}')
def visit_desc_optional(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append(r'\sphinxoptional{')
def depart_desc_optional(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('}')
def visit_desc_annotation(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append(r'\sphinxbfcode{\sphinxupquote{')
def depart_desc_annotation(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('}}')
def visit_desc_content(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
if node.children and not isinstance(node.children[0], nodes.paragraph):
# avoid empty desc environment which causes a formatting bug
self.body.append('~')
def depart_desc_content(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_seealso(self, node):
- # type: (nodes.Node) -> None
- self.body.append(u'\n\n\\sphinxstrong{%s:}\n\n' % admonitionlabels['seealso'])
+ # type: (nodes.Element) -> None
+ self.body.append('\n\n\\sphinxstrong{%s:}\n\n' % admonitionlabels['seealso'])
def depart_seealso(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append("\n\n")
def visit_rubric(self, node):
- # type: (nodes.Node) -> None
- if len(node.children) == 1 and node.children[0].astext() in \
- ('Footnotes', _('Footnotes')):
+ # type: (nodes.Element) -> None
+ if len(node) == 1 and node.astext() in ('Footnotes', _('Footnotes')):
raise nodes.SkipNode
self.body.append('\\subsubsection*{')
self.context.append('}\n')
self.in_title = 1
def depart_rubric(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.in_title = 0
self.body.append(self.context.pop())
def visit_footnote(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.in_footnote += 1
+ label = cast(nodes.label, node[0])
if self.in_parsed_literal:
- self.body.append('\\begin{footnote}[%s]' % node[0].astext())
+ self.body.append('\\begin{footnote}[%s]' % label.astext())
else:
- self.body.append('%%\n\\begin{footnote}[%s]' % node[0].astext())
+ self.body.append('%%\n\\begin{footnote}[%s]' % label.astext())
self.body.append('\\sphinxAtStartFootnote\n')
def depart_footnote(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
if self.in_parsed_literal:
self.body.append('\\end{footnote}')
else:
@@ -1243,16 +1212,16 @@ class LaTeXTranslator(nodes.NodeVisitor):
self.in_footnote -= 1
def visit_label(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
raise nodes.SkipNode
def visit_tabular_col_spec(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.next_table_colspec = node['spec']
raise nodes.SkipNode
def visit_table(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
if self.table:
raise UnsupportedError(
'%s:%s: nested tables are not yet implemented.' %
@@ -1266,7 +1235,7 @@ class LaTeXTranslator(nodes.NodeVisitor):
self.next_table_colspec = None
def depart_table(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
labels = self.hypertarget_to(node)
table_type = self.table.get_table_type()
table = self.render(table_type + '.tex_t',
@@ -1278,7 +1247,7 @@ class LaTeXTranslator(nodes.NodeVisitor):
self.table = None
def visit_colspec(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.table.colcount += 1
if 'colwidth' in node:
self.table.colwidths.append(node['colwidth'])
@@ -1286,37 +1255,37 @@ class LaTeXTranslator(nodes.NodeVisitor):
self.table.stubs.append(self.table.colcount - 1)
def depart_colspec(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_tgroup(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def depart_tgroup(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_thead(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
# Redirect head output until header is finished.
self.pushbody(self.table.header)
def depart_thead(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.popbody()
def visit_tbody(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
# Redirect body output until table is finished.
self.pushbody(self.table.body)
def depart_tbody(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.popbody()
def visit_row(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.table.col = 0
# fill columns if the row starts with the bottom of multirow cell
@@ -1337,7 +1306,7 @@ class LaTeXTranslator(nodes.NodeVisitor):
(cell.width, cell.cell_id))
def depart_row(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('\\\\\n')
cells = [self.table.cell(self.table.row, i) for i in range(self.table.colcount)]
underlined = [cell.row + cell.height == self.table.row + 1 for cell in cells]
@@ -1355,7 +1324,7 @@ class LaTeXTranslator(nodes.NodeVisitor):
self.table.row += 1
def visit_entry(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
if self.table.col > 0:
self.body.append('&')
self.table.add_cell(node.get('morerows', 0) + 1, node.get('morecols', 0) + 1)
@@ -1393,7 +1362,7 @@ class LaTeXTranslator(nodes.NodeVisitor):
self.context.append(context)
def depart_entry(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
if self.needs_linetrimming:
self.needs_linetrimming = 0
body = self.popbody()
@@ -1427,31 +1396,32 @@ class LaTeXTranslator(nodes.NodeVisitor):
(nextcell.width, nextcell.cell_id))
def visit_acks(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
# this is a list in the source, but should be rendered as a
# comma-separated list here
+ bullet_list = cast(nodes.bullet_list, node[0])
+ list_items = cast(Iterable[nodes.list_item], bullet_list)
self.body.append('\n\n')
- self.body.append(', '.join(n.astext()
- for n in node.children[0].children) + '.')
+ self.body.append(', '.join(n.astext() for n in list_items) + '.')
self.body.append('\n\n')
raise nodes.SkipNode
def visit_bullet_list(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
if not self.compact_list:
self.body.append('\\begin{itemize}\n')
if self.table:
self.table.has_problematic = True
def depart_bullet_list(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
if not self.compact_list:
self.body.append('\\end{itemize}\n')
def visit_enumerated_list(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
def get_enumtype(node):
- # type: (nodes.Node) -> unicode
+ # type: (nodes.Element) -> str
enumtype = node.get('enumtype', 'arabic')
if 'alpha' in enumtype and 26 < node.get('start', 0) + len(node):
# fallback to arabic if alphabet counter overflows
@@ -1460,7 +1430,7 @@ class LaTeXTranslator(nodes.NodeVisitor):
return enumtype
def get_nested_level(node):
- # type: (nodes.Node) -> int
+ # type: (nodes.Element) -> int
if node is None:
return 0
elif isinstance(node, nodes.enumerated_list):
@@ -1486,41 +1456,41 @@ class LaTeXTranslator(nodes.NodeVisitor):
self.table.has_problematic = True
def depart_enumerated_list(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('\\end{enumerate}\n')
def visit_list_item(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
# Append "{}" in case the next character is "[", which would break
# LaTeX's list environment (no numbering and the "[" is not printed).
self.body.append(r'\item {} ')
def depart_list_item(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('\n')
def visit_definition_list(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('\\begin{description}\n')
if self.table:
self.table.has_problematic = True
def depart_definition_list(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('\\end{description}\n')
def visit_definition_list_item(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def depart_definition_list_item(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_term(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.in_term += 1
- ctx = '' # type: unicode
+ ctx = ''
if node.get('ids'):
ctx = '\\phantomsection'
for node_id in node['ids']:
@@ -1530,42 +1500,42 @@ class LaTeXTranslator(nodes.NodeVisitor):
self.context.append(ctx)
def depart_term(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append(self.context.pop())
self.in_term -= 1
def visit_classifier(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('{[}')
def depart_classifier(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('{]}')
def visit_definition(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def depart_definition(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('\n')
def visit_field_list(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('\\begin{quote}\\begin{description}\n')
if self.table:
self.table.has_problematic = True
def depart_field_list(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('\\end{description}\\end{quote}\n')
def visit_field(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def depart_field(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
visit_field_name = visit_term
@@ -1575,7 +1545,7 @@ class LaTeXTranslator(nodes.NodeVisitor):
depart_field_body = depart_definition
def visit_paragraph(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
index = node.parent.index(node)
if (index > 0 and isinstance(node.parent, nodes.compound) and
not isinstance(node.parent[index - 1], nodes.paragraph) and
@@ -1590,21 +1560,21 @@ class LaTeXTranslator(nodes.NodeVisitor):
self.body.append('\n')
def depart_paragraph(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('\n')
def visit_centered(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('\n\\begin{center}')
if self.table:
self.table.has_problematic = True
def depart_centered(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('\n\\end{center}')
def visit_hlist(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
# for now, we don't support a more compact list format
# don't add individual itemize environments, but one for all columns
self.compact_list += 1
@@ -1614,20 +1584,20 @@ class LaTeXTranslator(nodes.NodeVisitor):
self.table.has_problematic = True
def depart_hlist(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.compact_list -= 1
self.body.append('\\end{itemize}\n')
def visit_hlistcol(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def depart_hlistcol(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def latex_image_length(self, width_str):
- # type: (nodes.Node) -> unicode
+ # type: (str) -> str
try:
return rstdim_to_latexdim(width_str)
except ValueError:
@@ -1635,16 +1605,16 @@ class LaTeXTranslator(nodes.NodeVisitor):
return None
def is_inline(self, node):
- # type: (nodes.Node) -> bool
+ # type: (nodes.Element) -> bool
"""Check whether a node represents an inline element."""
return isinstance(node.parent, nodes.TextElement)
def visit_image(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
attrs = node.attributes
- pre = [] # type: List[unicode]
+ pre = [] # type: List[str]
# in reverse order
- post = [] # type: List[unicode]
+ post = [] # type: List[str]
include_graphics_options = []
is_inline = self.is_inline(node)
if 'width' in attrs:
@@ -1717,11 +1687,11 @@ class LaTeXTranslator(nodes.NodeVisitor):
self.body.extend(post)
def depart_image(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_figure(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
if self.table:
# TODO: support align option
if 'width' in node:
@@ -1738,7 +1708,7 @@ class LaTeXTranslator(nodes.NodeVisitor):
length = None
if 'width' in node:
length = self.latex_image_length(node['width'])
- elif 'width' in node[0]:
+ elif isinstance(node[0], nodes.image) and 'width' in node[0]:
length = self.latex_image_length(node[0]['width'])
self.body.append('\\begin{wrapfigure}{%s}{%s}\n\\centering' %
(node['align'] == 'right' and 'r' or 'l', length or '0pt'))
@@ -1754,11 +1724,11 @@ class LaTeXTranslator(nodes.NodeVisitor):
self.context.append('\\end{figure}\n')
def depart_figure(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append(self.context.pop())
def visit_caption(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.in_caption += 1
if isinstance(node.parent, captioned_literal_block):
self.body.append('\\sphinxSetupCaptionForVerbatim{')
@@ -1770,7 +1740,7 @@ class LaTeXTranslator(nodes.NodeVisitor):
self.body.append('\\caption{')
def depart_caption(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('}')
if isinstance(node.parent, nodes.figure):
labels = self.hypertarget_to(node.parent)
@@ -1778,64 +1748,62 @@ class LaTeXTranslator(nodes.NodeVisitor):
self.in_caption -= 1
def visit_legend(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('\n\\begin{sphinxlegend}')
def depart_legend(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('\\end{sphinxlegend}\n')
def visit_admonition(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('\n\\begin{sphinxadmonition}{note}')
def depart_admonition(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('\\end{sphinxadmonition}\n')
- def _make_visit_admonition(name):
- # type: (unicode) -> Callable[[LaTeXTranslator, nodes.Node], None]
- def visit_admonition(self, node):
- # type: (nodes.Node) -> None
- self.body.append(u'\n\\begin{sphinxadmonition}{%s}{%s:}' %
- (name, admonitionlabels[name]))
- return visit_admonition
+ def _visit_named_admonition(self, node):
+ # type: (nodes.Element) -> None
+ label = admonitionlabels[node.tagname]
+ self.body.append('\n\\begin{sphinxadmonition}{%s}{%s:}' %
+ (node.tagname, label))
def _depart_named_admonition(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('\\end{sphinxadmonition}\n')
- visit_attention = _make_visit_admonition('attention')
+ visit_attention = _visit_named_admonition
depart_attention = _depart_named_admonition
- visit_caution = _make_visit_admonition('caution')
+ visit_caution = _visit_named_admonition
depart_caution = _depart_named_admonition
- visit_danger = _make_visit_admonition('danger')
+ visit_danger = _visit_named_admonition
depart_danger = _depart_named_admonition
- visit_error = _make_visit_admonition('error')
+ visit_error = _visit_named_admonition
depart_error = _depart_named_admonition
- visit_hint = _make_visit_admonition('hint')
+ visit_hint = _visit_named_admonition
depart_hint = _depart_named_admonition
- visit_important = _make_visit_admonition('important')
+ visit_important = _visit_named_admonition
depart_important = _depart_named_admonition
- visit_note = _make_visit_admonition('note')
+ visit_note = _visit_named_admonition
depart_note = _depart_named_admonition
- visit_tip = _make_visit_admonition('tip')
+ visit_tip = _visit_named_admonition
depart_tip = _depart_named_admonition
- visit_warning = _make_visit_admonition('warning')
+ visit_warning = _visit_named_admonition
depart_warning = _depart_named_admonition
def visit_versionmodified(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def depart_versionmodified(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_target(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
def add_target(id):
- # type: (unicode) -> None
+ # type: (str) -> None
# indexing uses standard LaTeX index markup, so the targets
# will be generated differently
if id.startswith('index-'):
@@ -1856,11 +1824,11 @@ class LaTeXTranslator(nodes.NodeVisitor):
self.body.append(self.hypertarget(id, anchor=anchor))
# skip if visitor for next node supports hyperlink
- next_node = node
+ next_node = node # type: nodes.Node
while isinstance(next_node, nodes.target):
next_node = next_node.next_node(ascend=True)
- domain = self.builder.env.get_domain('std')
+ domain = cast(StandardDomain, self.builder.env.get_domain('std'))
if isinstance(next_node, HYPERLINK_SUPPORT_NODES):
return
elif domain.get_enumerable_node_type(next_node) and domain.get_numfig_title(next_node):
@@ -1874,20 +1842,20 @@ class LaTeXTranslator(nodes.NodeVisitor):
add_target(id)
def depart_target(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_attribution(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('\n\\begin{flushright}\n')
self.body.append('---')
def depart_attribution(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('\n\\end{flushright}\n')
def visit_index(self, node, scre = None):
- # type: (nodes.Node, None) -> None
+ # type: (nodes.Element, Pattern) -> None
def escape(value):
value = self.encode(value)
value = value.replace(r'\{', r'\sphinxleftcurlybrace{}')
@@ -1957,7 +1925,7 @@ class LaTeXTranslator(nodes.NodeVisitor):
raise nodes.SkipNode
def visit_raw(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
if not self.is_inline(node):
self.body.append('\n')
if 'latex' in node.get('format', '').split():
@@ -1967,7 +1935,7 @@ class LaTeXTranslator(nodes.NodeVisitor):
raise nodes.SkipNode
def visit_reference(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
if not self.in_title:
for id in node.get('ids'):
anchor = not self.in_caption
@@ -1997,8 +1965,9 @@ class LaTeXTranslator(nodes.NodeVisitor):
# reference to a label
id = uri[1:].replace('#', ':')
self.body.append(self.hyperlink(id))
- if len(node) and hasattr(node[0], 'attributes') and \
- 'std-term' in node[0].get('classes', []):
+ if (len(node) and
+ isinstance(node[0], nodes.Element) and
+ 'std-term' in node[0].get('classes', [])):
# don't add a pageref for glossary terms
self.context.append('}}}')
# mark up as termreference
@@ -2022,18 +1991,18 @@ class LaTeXTranslator(nodes.NodeVisitor):
self.context.append('}')
def depart_reference(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append(self.context.pop())
def visit_number_reference(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
if node.get('refid'):
id = self.curfilestack[-1] + ':' + node['refid']
else:
id = node.get('refuri', '')[1:].replace('#', ':')
title = node.get('title', '%s')
- title = text_type(title).translate(tex_escape_map).replace('\\%s', '%s')
+ title = str(title).translate(tex_escape_map).replace('\\%s', '%s')
if '\\{name\\}' in title or '\\{number\\}' in title:
# new style format (cf. "Fig.%{number}")
title = title.replace('\\{name\\}', '{name}').replace('\\{number\\}', '{number}')
@@ -2048,59 +2017,59 @@ class LaTeXTranslator(nodes.NodeVisitor):
raise nodes.SkipNode
def visit_download_reference(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def depart_download_reference(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_pending_xref(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def depart_pending_xref(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_emphasis(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append(r'\sphinxstyleemphasis{')
def depart_emphasis(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('}')
def visit_literal_emphasis(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append(r'\sphinxstyleliteralemphasis{\sphinxupquote{')
self.no_contractions += 1
def depart_literal_emphasis(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('}}')
self.no_contractions -= 1
def visit_strong(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append(r'\sphinxstylestrong{')
def depart_strong(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('}')
def visit_literal_strong(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append(r'\sphinxstyleliteralstrong{\sphinxupquote{')
self.no_contractions += 1
def depart_literal_strong(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('}}')
self.no_contractions -= 1
def visit_abbreviation(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
abbr = node.astext()
self.body.append(r'\sphinxstyleabbreviation{')
# spell out the explanation once
@@ -2111,51 +2080,53 @@ class LaTeXTranslator(nodes.NodeVisitor):
self.context.append('}')
def depart_abbreviation(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append(self.context.pop())
def visit_manpage(self, node):
- # type: (nodes.Node) -> Any
+ # type: (nodes.Element) -> None
return self.visit_literal_emphasis(node)
def depart_manpage(self, node):
- # type: (nodes.Node) -> Any
+ # type: (nodes.Element) -> None
return self.depart_literal_emphasis(node)
def visit_title_reference(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append(r'\sphinxtitleref{')
def depart_title_reference(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('}')
def visit_thebibliography(self, node):
- # type: (nodes.Node) -> None
- longest_label = max((subnode[0].astext() for subnode in node), key=len)
+ # type: (thebibliography) -> None
+ citations = cast(Iterable[nodes.citation], node)
+ labels = (cast(nodes.label, citation[0]) for citation in citations)
+ longest_label = max((label.astext() for label in labels), key=len)
if len(longest_label) > MAX_CITATION_LABEL_LENGTH:
# adjust max width of citation labels not to break the layout
longest_label = longest_label[:MAX_CITATION_LABEL_LENGTH]
- self.body.append(u'\n\\begin{sphinxthebibliography}{%s}\n' %
+ self.body.append('\n\\begin{sphinxthebibliography}{%s}\n' %
self.encode(longest_label))
def depart_thebibliography(self, node):
- # type: (nodes.Node) -> None
- self.body.append(u'\\end{sphinxthebibliography}\n')
+ # type: (thebibliography) -> None
+ self.body.append('\\end{sphinxthebibliography}\n')
def visit_citation(self, node):
- # type: (nodes.Node) -> None
- label = node[0].astext()
- self.body.append(u'\\bibitem[%s]{%s:%s}' %
- (self.encode(label), node['docname'], node['ids'][0]))
+ # type: (nodes.Element) -> None
+ label = cast(nodes.label, node[0])
+ self.body.append('\\bibitem[%s]{%s:%s}' % (self.encode(label.astext()),
+ node['docname'], node['ids'][0]))
def depart_citation(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_citation_reference(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
if self.in_title:
pass
else:
@@ -2163,11 +2134,11 @@ class LaTeXTranslator(nodes.NodeVisitor):
raise nodes.SkipNode
def depart_citation_reference(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_literal(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.no_contractions += 1
if self.in_title:
self.body.append(r'\sphinxstyleliteralintitle{\sphinxupquote{')
@@ -2175,43 +2146,43 @@ class LaTeXTranslator(nodes.NodeVisitor):
self.body.append(r'\sphinxcode{\sphinxupquote{')
def depart_literal(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.no_contractions -= 1
self.body.append('}}')
def visit_footnote_reference(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
raise nodes.SkipNode
def visit_footnotemark(self, node):
- # type: (nodes.Node) -> None
+ # type: (footnotemark) -> None
self.body.append('\\sphinxfootnotemark[')
def depart_footnotemark(self, node):
- # type: (nodes.Node) -> None
+ # type: (footnotemark) -> None
self.body.append(']')
def visit_footnotetext(self, node):
- # type: (nodes.Node) -> None
- number = node[0].astext()
+ # type: (footnotetext) -> None
+ label = cast(nodes.label, node[0])
self.body.append('%%\n\\begin{footnotetext}[%s]'
- '\\sphinxAtStartFootnote\n' % number)
+ '\\sphinxAtStartFootnote\n' % label.astext())
def depart_footnotetext(self, node):
- # type: (nodes.Node) -> None
+ # type: (footnotetext) -> None
# the \ignorespaces in particular for after table header use
self.body.append('%\n\\end{footnotetext}\\ignorespaces ')
def visit_captioned_literal_block(self, node):
- # type: (nodes.Node) -> None
+ # type: (captioned_literal_block) -> None
pass
def depart_captioned_literal_block(self, node):
- # type: (nodes.Node) -> None
+ # type: (captioned_literal_block) -> None
pass
def visit_literal_block(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
if node.rawsource != node.astext():
# most probably a parsed-literal block -- don't highlight
self.in_parsed_literal += 1
@@ -2238,7 +2209,7 @@ class LaTeXTranslator(nodes.NodeVisitor):
location=(self.curfilestack[-1], node.line), **highlight_args
)
# workaround for Unicode issue
- hlcode = hlcode.replace(u'€', u'@texteuro[]')
+ hlcode = hlcode.replace('€', '@texteuro[]')
if self.in_footnote:
self.body.append('\n\\sphinxSetupCodeBlockInFootnote')
hlcode = hlcode.replace('\\begin{Verbatim}',
@@ -2269,22 +2240,22 @@ class LaTeXTranslator(nodes.NodeVisitor):
raise nodes.SkipNode
def depart_literal_block(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('\n\\end{sphinxalltt}\n')
self.in_parsed_literal -= 1
visit_doctest_block = visit_literal_block
depart_doctest_block = depart_literal_block
def visit_line(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('\\item[] ')
def depart_line(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('\n')
def visit_line_block(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
if isinstance(node.parent, nodes.line_block):
self.body.append('\\item[]\n'
'\\begin{DUlineblock}{\\DUlineblockindent}\n')
@@ -2294,11 +2265,11 @@ class LaTeXTranslator(nodes.NodeVisitor):
self.table.has_problematic = True
def depart_line_block(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('\\end{DUlineblock}\n')
def visit_block_quote(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
# If the block quote contains a single object and that object
# is a list, then generate a list not a block quote.
# This lets us indent lists.
@@ -2314,7 +2285,7 @@ class LaTeXTranslator(nodes.NodeVisitor):
self.table.has_problematic = True
def depart_block_quote(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
done = 0
if len(node.children) == 1:
child = node.children[0]
@@ -2327,56 +2298,56 @@ class LaTeXTranslator(nodes.NodeVisitor):
# option node handling copied from docutils' latex writer
def visit_option(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
if self.context[-1]:
# this is not the first option
self.body.append(', ')
def depart_option(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
# flag that the first option is done.
self.context[-1] += 1
def visit_option_argument(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
"""The delimiter betweeen an option and its argument."""
self.body.append(node.get('delimiter', ' '))
def depart_option_argument(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_option_group(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('\\item [')
# flag for first option
self.context.append(0)
def depart_option_group(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.context.pop() # the flag
self.body.append('] ')
def visit_option_list(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('\\begin{optionlist}{3cm}\n')
if self.table:
self.table.has_problematic = True
def depart_option_list(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('\\end{optionlist}\n')
def visit_option_list_item(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def depart_option_list_item(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_option_string(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
ostring = node.astext()
self.no_contractions += 1
self.body.append(self.encode(ostring))
@@ -2384,31 +2355,31 @@ class LaTeXTranslator(nodes.NodeVisitor):
raise nodes.SkipNode
def visit_description(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append(' ')
def depart_description(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_superscript(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('$^{\\text{')
def depart_superscript(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('}}$')
def visit_subscript(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('$_{\\text{')
def depart_subscript(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('}}$')
def visit_inline(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
classes = node.get('classes', [])
if classes in [['menuselection']]:
self.body.append(r'\sphinxmenuselection{')
@@ -2426,102 +2397,102 @@ class LaTeXTranslator(nodes.NodeVisitor):
self.context.append('')
def depart_inline(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append(self.context.pop())
def visit_generated(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def depart_generated(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_compound(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def depart_compound(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_container(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def depart_container(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_decoration(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def depart_decoration(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
# docutils-generated elements that we don't support
def visit_header(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
raise nodes.SkipNode
def visit_footer(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
raise nodes.SkipNode
def visit_docinfo(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
raise nodes.SkipNode
# text handling
def encode(self, text):
- # type: (unicode) -> unicode
- text = text_type(text).translate(tex_escape_map)
+ # type: (str) -> str
+ text = str(text).translate(tex_escape_map)
if self.literal_whitespace:
# Insert a blank before the newline, to avoid
# ! LaTeX Error: There's no line here to end.
- text = text.replace(u'\n', u'~\\\\\n').replace(u' ', u'~')
+ text = text.replace('\n', '~\\\\\n').replace(' ', '~')
if self.no_contractions:
- text = text.replace('--', u'-{-}')
- text = text.replace("''", u"'{'}")
+ text = text.replace('--', '-{-}')
+ text = text.replace("''", "'{'}")
return text
def encode_uri(self, text):
- # type: (unicode) -> unicode
+ # type: (str) -> str
# in \href, the tilde is allowed and must be represented literally
return self.encode(text).replace('\\textasciitilde{}', '~')
def visit_Text(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Text) -> None
text = self.encode(node.astext())
self.body.append(text)
def depart_Text(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Text) -> None
pass
def visit_comment(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
raise nodes.SkipNode
def visit_meta(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
# only valid for HTML
raise nodes.SkipNode
def visit_system_message(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def depart_system_message(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('\n')
def visit_math(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
if self.in_title:
self.body.append(r'\protect\(%s\protect\)' % node.astext())
else:
@@ -2529,7 +2500,7 @@ class LaTeXTranslator(nodes.NodeVisitor):
raise nodes.SkipNode
def visit_math_block(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
if node.get('label'):
label = "equation:%s:%s" % (node['docname'], node['label'])
else:
@@ -2546,7 +2517,7 @@ class LaTeXTranslator(nodes.NodeVisitor):
raise nodes.SkipNode
def visit_math_reference(self, node):
- # type: (nodes.Node) -> None
+ # type: (math_reference) -> None
label = "equation:%s:%s" % (node['docname'], node['target'])
eqref_format = self.builder.config.math_eqref_format
if eqref_format:
@@ -2561,7 +2532,7 @@ class LaTeXTranslator(nodes.NodeVisitor):
self.body.append(r'\eqref{%s}' % label)
def depart_math_reference(self, node):
- # type: (nodes.Node) -> None
+ # type: (math_reference) -> None
pass
def unknown_visit(self, node):
@@ -2570,16 +2541,40 @@ class LaTeXTranslator(nodes.NodeVisitor):
# --------- METHODS FOR COMPATIBILITY --------------------------------------
+ def collect_footnotes(self, node):
+ # type: (nodes.Element) -> Dict[str, List[Union[collected_footnote, bool]]]
+ def footnotes_under(n):
+ # type: (nodes.Element) -> Iterator[nodes.footnote]
+ if isinstance(n, nodes.footnote):
+ yield n
+ else:
+ for c in n.children:
+ if isinstance(c, addnodes.start_of_file):
+ continue
+ elif isinstance(c, nodes.Element):
+ yield from footnotes_under(c)
+
+ warnings.warn('LaTeXWriter.collected_footnote() is deprecated.',
+ RemovedInSphinx40Warning, stacklevel=2)
+
+ fnotes = {} # type: Dict[str, List[Union[collected_footnote, bool]]]
+ for fn in footnotes_under(node):
+ label = cast(nodes.label, fn[0])
+ num = label.astext().strip()
+ newnode = collected_footnote('', *fn.children, number=num)
+ fnotes[num] = [newnode, False]
+ return fnotes
+
@property
def footnotestack(self):
- # type: () -> List[Dict[unicode, List[Union[collected_footnote, bool]]]]
+ # type: () -> List[Dict[str, List[Union[collected_footnote, bool]]]]
warnings.warn('LaTeXWriter.footnotestack is deprecated.',
RemovedInSphinx30Warning, stacklevel=2)
return []
@property
def bibitems(self):
- # type: () -> List[List[unicode]]
+ # type: () -> List[List[str]]
warnings.warn('LaTeXTranslator.bibitems() is deprecated.',
RemovedInSphinx30Warning, stacklevel=2)
return []
@@ -2593,7 +2588,7 @@ class LaTeXTranslator(nodes.NodeVisitor):
@property
def next_section_ids(self):
- # type: () -> Set[unicode]
+ # type: () -> Set[str]
warnings.warn('LaTeXTranslator.next_section_ids is deprecated.',
RemovedInSphinx30Warning, stacklevel=2)
return set()
@@ -2606,20 +2601,20 @@ class LaTeXTranslator(nodes.NodeVisitor):
return {}
def push_hyperlink_ids(self, figtype, ids):
- # type: (unicode, Set[unicode]) -> None
+ # type: (str, Set[str]) -> None
warnings.warn('LaTeXTranslator.push_hyperlink_ids() is deprecated.',
RemovedInSphinx30Warning, stacklevel=2)
pass
def pop_hyperlink_ids(self, figtype):
- # type: (unicode) -> Set[unicode]
+ # type: (str) -> Set[str]
warnings.warn('LaTeXTranslator.pop_hyperlink_ids() is deprecated.',
RemovedInSphinx30Warning, stacklevel=2)
return set()
@property
def hlsettingstack(self):
- # type: () -> List[List[Union[unicode, int]]]
+ # type: () -> List[List[Union[str, int]]]
warnings.warn('LaTeXTranslator.hlsettingstack is deprecated.',
RemovedInSphinx30Warning, stacklevel=2)
return [[self.builder.config.highlight_language, sys.maxsize]]
@@ -2634,6 +2629,30 @@ class LaTeXTranslator(nodes.NodeVisitor):
msg = __("Unknown configure key: latex_elements[%r] is ignored.")
logger.warning(msg % key)
+ def babel_defmacro(self, name, definition):
+ # type: (str, str) -> str
+ warnings.warn('babel_defmacro() is deprecated.',
+ RemovedInSphinx40Warning)
+
+ if self.elements['babel']:
+ prefix = '\\addto\\extras%s{' % self.babel.get_language()
+ suffix = '}'
+ else: # babel is disabled (mainly for Japanese environment)
+ prefix = ''
+ suffix = ''
+
+ return ('%s\\def%s{%s}%s\n' % (prefix, name, definition, suffix))
+
+ def _make_visit_admonition(name): # type: ignore
+ # type: (str) -> Callable[[LaTeXTranslator, nodes.Element], None]
+ warnings.warn('LaTeXTranslator._make_visit_admonition() is deprecated.',
+ RemovedInSphinx30Warning)
+
+ def visit_admonition(self, node):
+ # type: (nodes.Element) -> None
+ self.body.append('\n\\begin{sphinxadmonition}{%s}{%s:}' %
+ (name, admonitionlabels[name]))
+ return visit_admonition
# Import old modules here for compatibility
# They should be imported after `LaTeXTranslator` to avoid recursive import.
diff --git a/sphinx/writers/manpage.py b/sphinx/writers/manpage.py
index 45a800533..8d311acbc 100644
--- a/sphinx/writers/manpage.py
+++ b/sphinx/writers/manpage.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.writers.manpage
~~~~~~~~~~~~~~~~~~~~~~
@@ -9,23 +8,27 @@
:license: BSD, see LICENSE for details.
"""
+import warnings
+from typing import Iterable, cast
+
from docutils import nodes
from docutils.writers.manpage import (
- MACRO_DEF,
Writer,
Translator as BaseTranslator
)
-import sphinx.util.docutils
from sphinx import addnodes
+from sphinx.builders import Builder
+from sphinx.deprecation import RemovedInSphinx40Warning
from sphinx.locale import admonitionlabels, _
from sphinx.util import logging
+from sphinx.util.docutils import SphinxTranslator
from sphinx.util.i18n import format_date
+from sphinx.util.nodes import NodeMatcher
if False:
# For type annotation
- from typing import Any # NOQA
- from sphinx.builders import Builder # NOQA
+ from typing import Any, Dict # NOQA
logger = logging.getLogger(__name__)
@@ -33,20 +36,20 @@ logger = logging.getLogger(__name__)
class ManualPageWriter(Writer):
def __init__(self, builder):
# type: (Builder) -> None
- Writer.__init__(self)
+ super().__init__()
self.builder = builder
def translate(self):
# type: () -> None
transform = NestedInlineTransform(self.document)
transform.apply()
- visitor = self.builder.create_translator(self.builder, self.document)
- self.visitor = visitor
+ visitor = self.builder.create_translator(self.document, self.builder)
+ self.visitor = cast(ManualPageTranslator, visitor)
self.document.walkabout(visitor)
- self.output = visitor.astext()
+ self.output = self.visitor.astext()
-class NestedInlineTransform(object):
+class NestedInlineTransform:
"""
Flatten nested inline nodes:
@@ -61,33 +64,38 @@ class NestedInlineTransform(object):
# type: (nodes.document) -> None
self.document = document
- def apply(self):
- # type: () -> None
- def is_inline(node):
- # type: (nodes.Node) -> bool
- return isinstance(node, (nodes.literal, nodes.emphasis, nodes.strong))
-
- for node in self.document.traverse(is_inline):
- if any(is_inline(subnode) for subnode in node):
+ def apply(self, **kwargs):
+ # type: (Any) -> None
+ matcher = NodeMatcher(nodes.literal, nodes.emphasis, nodes.strong)
+ for node in self.document.traverse(matcher): # type: nodes.TextElement
+ if any(matcher(subnode) for subnode in node):
pos = node.parent.index(node)
for subnode in reversed(node[1:]):
node.remove(subnode)
- if is_inline(subnode):
+ if matcher(subnode):
node.parent.insert(pos + 1, subnode)
else:
- newnode = node.__class__('', subnode, **node.attributes)
+ newnode = node.__class__('', '', subnode, **node.attributes)
node.parent.insert(pos + 1, newnode)
-class ManualPageTranslator(BaseTranslator):
+class ManualPageTranslator(SphinxTranslator, BaseTranslator):
"""
Custom translator.
"""
- def __init__(self, builder, *args, **kwds):
- # type: (Builder, Any, Any) -> None
- BaseTranslator.__init__(self, *args, **kwds)
- self.builder = builder
+ _docinfo = {} # type: Dict[str, Any]
+
+ def __init__(self, *args):
+ # type: (Any) -> None
+ if isinstance(args[0], nodes.document) and isinstance(args[1], Builder):
+ document, builder = args
+ else:
+ warnings.warn('The order of arguments for ManualPageTranslator has been changed. '
+ 'Please give "document" as 1st and "builder" as 2nd.',
+ RemovedInSphinx40Warning, stacklevel=2)
+ builder, document = args
+ super().__init__(document, builder)
self.in_productionlist = 0
@@ -95,35 +103,31 @@ class ManualPageTranslator(BaseTranslator):
self.section_level = -1
# docinfo set by man_pages config value
- self._docinfo['title'] = self.document.settings.title
- self._docinfo['subtitle'] = self.document.settings.subtitle
- if self.document.settings.authors:
+ self._docinfo['title'] = self.settings.title
+ self._docinfo['subtitle'] = self.settings.subtitle
+ if self.settings.authors:
# don't set it if no author given
- self._docinfo['author'] = self.document.settings.authors
- self._docinfo['manual_section'] = self.document.settings.section
+ self._docinfo['author'] = self.settings.authors
+ self._docinfo['manual_section'] = self.settings.section
# docinfo set by other config values
self._docinfo['title_upper'] = self._docinfo['title'].upper()
- if builder.config.today:
- self._docinfo['date'] = builder.config.today
+ if self.config.today:
+ self._docinfo['date'] = self.config.today
else:
- self._docinfo['date'] = format_date(builder.config.today_fmt or _('%b %d, %Y'),
- language=builder.config.language)
- self._docinfo['copyright'] = builder.config.copyright
- self._docinfo['version'] = builder.config.version
- self._docinfo['manual_group'] = builder.config.project
-
- # In docutils < 0.11 self.append_header() was never called
- if sphinx.util.docutils.__version_info__ < (0, 11):
- self.body.append(MACRO_DEF)
+ self._docinfo['date'] = format_date(self.config.today_fmt or _('%b %d, %Y'),
+ language=self.config.language)
+ self._docinfo['copyright'] = self.config.copyright
+ self._docinfo['version'] = self.config.version
+ self._docinfo['manual_group'] = self.config.project
# Overwrite admonition label translations with our own
for label, translation in admonitionlabels.items():
- self.language.labels[label] = self.deunicode(translation)
+ self.language.labels[label] = self.deunicode(translation) # type: ignore
# overwritten -- added quotes around all .TH arguments
def header(self):
- # type: () -> unicode
+ # type: () -> str
tmpl = (".TH \"%(title_upper)s\" \"%(manual_section)s\""
" \"%(date)s\" \"%(version)s\" \"%(manual_group)s\"\n"
".SH NAME\n"
@@ -131,177 +135,175 @@ class ManualPageTranslator(BaseTranslator):
return tmpl % self._docinfo
def visit_start_of_file(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def depart_start_of_file(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_desc(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.visit_definition_list(node)
def depart_desc(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.depart_definition_list(node)
def visit_desc_signature(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.visit_definition_list_item(node)
self.visit_term(node)
def depart_desc_signature(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.depart_term(node)
def visit_desc_signature_line(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def depart_desc_signature_line(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append(' ')
def visit_desc_addname(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def depart_desc_addname(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_desc_type(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def depart_desc_type(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_desc_returns(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append(' -> ')
def depart_desc_returns(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_desc_name(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def depart_desc_name(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_desc_parameterlist(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('(')
self.first_param = 1
def depart_desc_parameterlist(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append(')')
def visit_desc_parameter(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
if not self.first_param:
self.body.append(', ')
else:
self.first_param = 0
def depart_desc_parameter(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_desc_optional(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('[')
def depart_desc_optional(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append(']')
def visit_desc_annotation(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def depart_desc_annotation(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_desc_content(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.visit_definition(node)
def depart_desc_content(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.depart_definition(node)
def visit_versionmodified(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.visit_paragraph(node)
def depart_versionmodified(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.depart_paragraph(node)
# overwritten -- don't make whole of term bold if it includes strong node
def visit_term(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
if node.traverse(nodes.strong):
self.body.append('\n')
else:
- BaseTranslator.visit_term(self, node)
+ super().visit_term(node)
# overwritten -- we don't want source comments to show up
- def visit_comment(self, node):
- # type: (nodes.Node) -> None
+ def visit_comment(self, node): # type: ignore
+ # type: (nodes.Element) -> None
raise nodes.SkipNode
# overwritten -- added ensure_eol()
def visit_footnote(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.ensure_eol()
- BaseTranslator.visit_footnote(self, node)
+ super().visit_footnote(node)
# overwritten -- handle footnotes rubric
def visit_rubric(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.ensure_eol()
- if len(node.children) == 1:
- rubtitle = node.children[0].astext()
- if rubtitle in ('Footnotes', _('Footnotes')):
- self.body.append('.SH ' + self.deunicode(rubtitle).upper() +
- '\n')
- raise nodes.SkipNode
+ if len(node) == 1 and node.astext() in ('Footnotes', _('Footnotes')):
+ self.body.append('.SH ' + self.deunicode(node.astext()).upper() + '\n')
+ raise nodes.SkipNode
else:
self.body.append('.sp\n')
def depart_rubric(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_seealso(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.visit_admonition(node, 'seealso')
def depart_seealso(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.depart_admonition(node)
def visit_productionlist(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.ensure_eol()
names = []
self.in_productionlist += 1
self.body.append('.sp\n.nf\n')
- for production in node:
+ productionlist = cast(Iterable[addnodes.production], node)
+ for production in productionlist:
names.append(production['tokenname'])
maxlen = max(len(name) for name in names)
lastname = None
- for production in node:
+ for production in productionlist:
if production['tokenname']:
lastname = production['tokenname'].ljust(maxlen)
self.body.append(self.defs['strong'][0])
@@ -317,16 +319,16 @@ class ManualPageTranslator(BaseTranslator):
raise nodes.SkipNode
def visit_production(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def depart_production(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
# overwritten -- don't emit a warning for images
def visit_image(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
if 'alt' in node.attributes:
self.body.append(_('[image: %s]') % node['alt'] + '\n')
self.body.append(_('[image]') + '\n')
@@ -334,11 +336,11 @@ class ManualPageTranslator(BaseTranslator):
# overwritten -- don't visit inner marked up nodes
def visit_reference(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append(self.defs['reference'][0])
# avoid repeating escaping code... fine since
# visit_Text calls astext() and only works on that afterwards
- self.visit_Text(node)
+ self.visit_Text(node) # type: ignore
self.body.append(self.defs['reference'][1])
uri = node.get('refuri', '')
@@ -356,115 +358,118 @@ class ManualPageTranslator(BaseTranslator):
raise nodes.SkipNode
def visit_number_reference(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
text = nodes.Text(node.get('title', '#'))
self.visit_Text(text)
raise nodes.SkipNode
def visit_centered(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.ensure_eol()
self.body.append('.sp\n.ce\n')
def depart_centered(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('\n.ce 0\n')
def visit_compact_paragraph(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def depart_compact_paragraph(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_download_reference(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def depart_download_reference(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_toctree(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
raise nodes.SkipNode
def visit_index(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
raise nodes.SkipNode
def visit_tabular_col_spec(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
raise nodes.SkipNode
def visit_glossary(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def depart_glossary(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_acks(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
+ bullet_list = cast(nodes.bullet_list, node[0])
+ list_items = cast(Iterable[nodes.list_item], bullet_list)
self.ensure_eol()
- self.body.append(', '.join(n.astext()
- for n in node.children[0].children) + '.')
+ bullet_list = cast(nodes.bullet_list, node[0])
+ list_items = cast(Iterable[nodes.list_item], bullet_list)
+ self.body.append(', '.join(n.astext() for n in list_items) + '.')
self.body.append('\n')
raise nodes.SkipNode
def visit_hlist(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.visit_bullet_list(node)
def depart_hlist(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.depart_bullet_list(node)
def visit_hlistcol(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def depart_hlistcol(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_literal_emphasis(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
return self.visit_emphasis(node)
def depart_literal_emphasis(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
return self.depart_emphasis(node)
def visit_literal_strong(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
return self.visit_strong(node)
def depart_literal_strong(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
return self.depart_strong(node)
def visit_abbreviation(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def depart_abbreviation(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_manpage(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
return self.visit_strong(node)
def depart_manpage(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
return self.depart_strong(node)
# overwritten: handle section titles better than in 0.6 release
def visit_title(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
if isinstance(node.parent, addnodes.seealso):
self.body.append('.IP "')
return
@@ -476,47 +481,47 @@ class ManualPageTranslator(BaseTranslator):
self.body.append('.SH %s\n' %
self.deunicode(node.astext().upper()))
raise nodes.SkipNode
- return BaseTranslator.visit_title(self, node)
+ return super().visit_title(node)
def depart_title(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
if isinstance(node.parent, addnodes.seealso):
self.body.append('"\n')
return
- return BaseTranslator.depart_title(self, node)
+ return super().depart_title(node)
def visit_raw(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
if 'manpage' in node.get('format', '').split():
self.body.append(node.astext())
raise nodes.SkipNode
def visit_meta(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
raise nodes.SkipNode
def visit_inline(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def depart_inline(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_math(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def depart_math(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_math_block(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.visit_centered(node)
def depart_math_block(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.depart_centered(node)
def unknown_visit(self, node):
diff --git a/sphinx/writers/texinfo.py b/sphinx/writers/texinfo.py
index 3343a9ec1..f4af84edf 100644
--- a/sphinx/writers/texinfo.py
+++ b/sphinx/writers/texinfo.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.writers.texinfo
~~~~~~~~~~~~~~~~~~~~~~
@@ -11,16 +10,18 @@
import re
import textwrap
+import warnings
from os import path
+from typing import Iterable, cast
from docutils import nodes, writers
-from six import itervalues
-from six.moves import range
from sphinx import addnodes, __display_version__
+from sphinx.deprecation import RemovedInSphinx30Warning
from sphinx.errors import ExtensionError
from sphinx.locale import admonitionlabels, _, __
from sphinx.util import logging
+from sphinx.util.docutils import SphinxTranslator
from sphinx.util.i18n import format_date
from sphinx.writers.latex import collected_footnote
@@ -28,6 +29,7 @@ if False:
# For type annotation
from typing import Any, Callable, Dict, Iterator, List, Pattern, Set, Tuple, Union # NOQA
from sphinx.builders.texinfo import TexinfoBuilder # NOQA
+ from sphinx.domains import IndexEntry # NOQA
logger = logging.getLogger(__name__)
@@ -88,19 +90,20 @@ TEMPLATE = """\
def find_subsections(section):
- # type: (nodes.Node) -> List[nodes.Node]
+ # type: (nodes.Element) -> List[nodes.section]
"""Return a list of subsections for the given ``section``."""
result = []
- for child in section.children:
+ for child in section:
if isinstance(child, nodes.section):
result.append(child)
continue
- result.extend(find_subsections(child))
+ elif isinstance(child, nodes.Element):
+ result.extend(find_subsections(child))
return result
def smart_capwords(s, sep=None):
- # type: (unicode, unicode) -> unicode
+ # type: (str, str) -> str
"""Like string.capwords() but does not capitalize words that already
contain a capital letter."""
words = s.split(sep)
@@ -120,30 +123,32 @@ class TexinfoWriter(writers.Writer):
('Dir entry', ['--texinfo-dir-entry'], {'default': ''}),
('Description', ['--texinfo-dir-description'], {'default': ''}),
('Category', ['--texinfo-dir-category'], {'default':
- 'Miscellaneous'}))) # type: Tuple[unicode, Any, Tuple[Tuple[unicode, List[unicode], Dict[unicode, unicode]], ...]] # NOQA
+ 'Miscellaneous'}))) # type: Tuple[str, Any, Tuple[Tuple[str, List[str], Dict[str, str]], ...]] # NOQA
settings_defaults = {} # type: Dict
- output = None # type: unicode
+ output = None # type: str
visitor_attributes = ('output', 'fragment')
def __init__(self, builder):
# type: (TexinfoBuilder) -> None
- writers.Writer.__init__(self)
+ super().__init__()
self.builder = builder
def translate(self):
# type: () -> None
- self.visitor = visitor = self.builder.create_translator(self.document, self.builder)
+ visitor = self.builder.create_translator(self.document, self.builder)
+ self.visitor = cast(TexinfoTranslator, visitor)
self.document.walkabout(visitor)
- visitor.finish()
+ self.visitor.finish()
for attr in self.visitor_attributes:
- setattr(self, attr, getattr(visitor, attr))
+ setattr(self, attr, getattr(self.visitor, attr))
-class TexinfoTranslator(nodes.NodeVisitor):
+class TexinfoTranslator(SphinxTranslator):
+ builder = None # type: TexinfoBuilder
ignore_missing_images = False
default_elements = {
@@ -162,24 +167,23 @@ class TexinfoTranslator(nodes.NodeVisitor):
}
def __init__(self, document, builder):
- # type: (nodes.Node, TexinfoBuilder) -> None
- nodes.NodeVisitor.__init__(self, document)
- self.builder = builder
+ # type: (nodes.document, TexinfoBuilder) -> None
+ super().__init__(document, builder)
self.init_settings()
- self.written_ids = set() # type: Set[unicode]
+ self.written_ids = set() # type: Set[str]
# node names and anchors in output
# node names and anchors that should be in output
- self.referenced_ids = set() # type: Set[unicode]
- self.indices = [] # type: List[Tuple[unicode, unicode]]
+ self.referenced_ids = set() # type: Set[str]
+ self.indices = [] # type: List[Tuple[str, str]]
# (node name, content)
- self.short_ids = {} # type: Dict[unicode, unicode]
+ self.short_ids = {} # type: Dict[str, str]
# anchors --> short ids
- self.node_names = {} # type: Dict[unicode, unicode]
+ self.node_names = {} # type: Dict[str, str]
# node name --> node's name to display
- self.node_menus = {} # type: Dict[unicode, List[unicode]]
+ self.node_menus = {} # type: Dict[str, List[str]]
# node name --> node's menu entries
- self.rellinks = {} # type: Dict[unicode, List[unicode]]
+ self.rellinks = {} # type: Dict[str, List[str]]
# node name --> (next, previous, up)
self.collect_indices()
@@ -187,18 +191,18 @@ class TexinfoTranslator(nodes.NodeVisitor):
self.collect_node_menus()
self.collect_rellinks()
- self.body = [] # type: List[unicode]
- self.context = [] # type: List[unicode]
+ self.body = [] # type: List[str]
+ self.context = [] # type: List[str]
self.previous_section = None # type: nodes.section
self.section_level = 0
self.seen_title = False
- self.next_section_ids = set() # type: Set[unicode]
+ self.next_section_ids = set() # type: Set[str]
self.escape_newlines = 0
self.escape_hyphens = 0
- self.curfilestack = [] # type: List[unicode]
- self.footnotestack = [] # type: List[Dict[unicode, List[Union[collected_footnote, bool]]]] # NOQA
+ self.curfilestack = [] # type: List[str]
+ self.footnotestack = [] # type: List[Dict[str, List[Union[collected_footnote, bool]]]] # NOQA
self.in_footnote = 0
- self.handled_abbrs = set() # type: Set[unicode]
+ self.handled_abbrs = set() # type: Set[str]
self.colwidths = None # type: List[int]
def finish(self):
@@ -225,14 +229,13 @@ class TexinfoTranslator(nodes.NodeVisitor):
def init_settings(self):
# type: () -> None
- settings = self.settings = self.document.settings
elements = self.elements = self.default_elements.copy()
elements.update({
# if empty, the title is set to the first section title
- 'title': settings.title,
- 'author': settings.author,
+ 'title': self.settings.title,
+ 'author': self.settings.author,
# if empty, use basename of input file
- 'filename': settings.texinfo_filename,
+ 'filename': self.settings.texinfo_filename,
'release': self.escape(self.builder.config.release),
'project': self.escape(self.builder.config.project),
'copyright': self.escape(self.builder.config.copyright),
@@ -241,11 +244,10 @@ class TexinfoTranslator(nodes.NodeVisitor):
language=self.builder.config.language))
})
# title
- title = None # type: unicode
- title = elements['title'] # type: ignore
+ title = self.settings.title # type: str
if not title:
- title = self.document.next_node(nodes.title)
- title = (title and title.astext()) or '<untitled>' # type: ignore
+ title_node = self.document.next_node(nodes.title)
+ title = (title and title_node.astext()) or '<untitled>'
elements['title'] = self.escape_id(title) or '<untitled>'
# filename
if not elements['filename']:
@@ -254,19 +256,19 @@ class TexinfoTranslator(nodes.NodeVisitor):
elements['filename'] = elements['filename'][:-4] # type: ignore
elements['filename'] += '.info' # type: ignore
# direntry
- if settings.texinfo_dir_entry:
+ if self.settings.texinfo_dir_entry:
entry = self.format_menu_entry(
- self.escape_menu(settings.texinfo_dir_entry),
+ self.escape_menu(self.settings.texinfo_dir_entry),
'(%s)' % elements['filename'],
- self.escape_arg(settings.texinfo_dir_description))
+ self.escape_arg(self.settings.texinfo_dir_description))
elements['direntry'] = ('@dircategory %s\n'
'@direntry\n'
'%s'
'@end direntry\n') % (
- self.escape_id(settings.texinfo_dir_category), entry)
+ self.escape_id(self.settings.texinfo_dir_category), entry)
elements['copying'] = COPYING % elements
# allow the user to override them all
- elements.update(settings.texinfo_elements)
+ elements.update(self.settings.texinfo_elements)
def collect_node_names(self):
# type: () -> None
@@ -275,7 +277,7 @@ class TexinfoTranslator(nodes.NodeVisitor):
Assigns the attribute ``node_name`` to each section."""
def add_node_name(name):
- # type: (unicode) -> unicode
+ # type: (str) -> str
node_id = self.escape_id(name)
nth, suffix = 1, ''
while node_id + suffix in self.written_ids or \
@@ -296,7 +298,7 @@ class TexinfoTranslator(nodes.NodeVisitor):
for name, content in self.indices]
# each section is also a node
for section in self.document.traverse(nodes.section):
- title = section.next_node(nodes.Titular)
+ title = cast(nodes.TextElement, section.next_node(nodes.Titular))
name = (title and title.astext()) or '<untitled>'
section['node_name'] = add_node_name(name)
@@ -304,8 +306,9 @@ class TexinfoTranslator(nodes.NodeVisitor):
# type: () -> None
"""Collect the menu entries for each "node" section."""
node_menus = self.node_menus
- for node in ([self.document] +
- self.document.traverse(nodes.section)):
+ targets = [self.document] # type: List[nodes.Element]
+ targets.extend(self.document.traverse(nodes.section))
+ for node in targets:
assert 'node_name' in node and node['node_name']
entries = [s['node_name'] for s in find_subsections(node)]
node_menus[node['node_name']] = entries
@@ -360,7 +363,7 @@ class TexinfoTranslator(nodes.NodeVisitor):
# characters.
def escape(self, s):
- # type: (unicode) -> unicode
+ # type: (str) -> str
"""Return a string with Texinfo command characters escaped."""
s = s.replace('@', '@@')
s = s.replace('{', '@{')
@@ -371,7 +374,7 @@ class TexinfoTranslator(nodes.NodeVisitor):
return s
def escape_arg(self, s):
- # type: (unicode) -> unicode
+ # type: (str) -> str
"""Return an escaped string suitable for use as an argument
to a Texinfo command."""
s = self.escape(s)
@@ -382,7 +385,7 @@ class TexinfoTranslator(nodes.NodeVisitor):
return s
def escape_id(self, s):
- # type: (unicode) -> unicode
+ # type: (str) -> str
"""Return an escaped string suitable for node names and anchors."""
bad_chars = ',:.()'
for bc in bad_chars:
@@ -391,7 +394,7 @@ class TexinfoTranslator(nodes.NodeVisitor):
return self.escape(s)
def escape_menu(self, s):
- # type: (unicode) -> unicode
+ # type: (str) -> str
"""Return an escaped string suitable for menu entries."""
s = self.escape_arg(s)
s = s.replace(':', ';')
@@ -405,7 +408,7 @@ class TexinfoTranslator(nodes.NodeVisitor):
self.body.append('\n')
def format_menu_entry(self, name, node_name, desc):
- # type: (unicode, unicode, unicode) -> unicode
+ # type: (str, str, str) -> str
if name == node_name:
s = '* %s:: ' % (name,)
else:
@@ -416,7 +419,7 @@ class TexinfoTranslator(nodes.NodeVisitor):
return s + wdesc.strip() + '\n'
def add_menu_entries(self, entries, reg=re.compile(r'\s+---?\s+')):
- # type: (List[unicode], Pattern) -> None
+ # type: (List[str], Pattern) -> None
for entry in entries:
name = self.node_names[entry]
# special formatting for entries that are divided by an em-dash
@@ -434,7 +437,7 @@ class TexinfoTranslator(nodes.NodeVisitor):
self.body.append(self.format_menu_entry(name, entry, desc))
def add_menu(self, node_name):
- # type: (unicode) -> None
+ # type: (str) -> None
entries = self.node_menus[node_name]
if not entries:
return
@@ -447,7 +450,7 @@ class TexinfoTranslator(nodes.NodeVisitor):
return
def _add_detailed_menu(name):
- # type: (unicode) -> None
+ # type: (str) -> None
entries = self.node_menus[name]
if not entries:
return
@@ -464,7 +467,7 @@ class TexinfoTranslator(nodes.NodeVisitor):
'@end menu\n')
def tex_image_length(self, width_str):
- # type: (unicode) -> unicode
+ # type: (str) -> str
match = re.match(r'(\d*\.?\d*)\s*(\S*)', width_str)
if not match:
# fallback
@@ -482,15 +485,15 @@ class TexinfoTranslator(nodes.NodeVisitor):
def collect_indices(self):
# type: () -> None
def generate(content, collapsed):
- # type: (List[Tuple[unicode, List[List[Union[unicode, int]]]]], bool) -> unicode
- ret = ['\n@menu\n'] # type: List[unicode]
+ # type: (List[Tuple[str, List[IndexEntry]]], bool) -> str
+ ret = ['\n@menu\n']
for letter, entries in content:
for entry in entries:
if not entry[3]:
continue
- name = self.escape_menu(entry[0]) # type: ignore
+ name = self.escape_menu(entry[0])
sid = self.get_short_id('%s:%s' % (entry[2], entry[3]))
- desc = self.escape_arg(entry[6]) # type: ignore
+ desc = self.escape_arg(entry[6])
me = self.format_menu_entry(name, sid, desc)
ret.append(me)
ret.append('@end menu\n')
@@ -498,7 +501,7 @@ class TexinfoTranslator(nodes.NodeVisitor):
indices_config = self.builder.config.texinfo_domain_indices
if indices_config:
- for domain in itervalues(self.builder.env.domains):
+ for domain in self.builder.env.domains.values():
for indexcls in domain.indices:
indexname = '%s-%s' % (domain.name, indexcls.name)
if isinstance(indices_config, list):
@@ -520,27 +523,28 @@ class TexinfoTranslator(nodes.NodeVisitor):
# TODO: move this to sphinx.util
def collect_footnotes(self, node):
- # type: (nodes.Node) -> Dict[unicode, List[Union[collected_footnote, bool]]]
+ # type: (nodes.Element) -> Dict[str, List[Union[collected_footnote, bool]]]
def footnotes_under(n):
- # type: (nodes.Node) -> Iterator[nodes.footnote]
+ # type: (nodes.Element) -> Iterator[nodes.footnote]
if isinstance(n, nodes.footnote):
yield n
else:
for c in n.children:
if isinstance(c, addnodes.start_of_file):
continue
- for k in footnotes_under(c):
- yield k
- fnotes = {} # type: Dict[unicode, List[Union[collected_footnote, bool]]]
+ elif isinstance(c, nodes.Element):
+ yield from footnotes_under(c)
+ fnotes = {} # type: Dict[str, List[Union[collected_footnote, bool]]]
for fn in footnotes_under(node):
- num = fn.children[0].astext().strip()
- fnotes[num] = [collected_footnote(*fn.children), False]
+ label = cast(nodes.label, fn[0])
+ num = label.astext().strip()
+ fnotes[num] = [collected_footnote('', *fn.children), False]
return fnotes
# -- xref handling
def get_short_id(self, id):
- # type: (unicode) -> unicode
+ # type: (str) -> str
"""Return a shorter 'id' associated with ``id``."""
# Shorter ids improve paragraph filling in places
# that the id is hidden by Emacs.
@@ -552,7 +556,7 @@ class TexinfoTranslator(nodes.NodeVisitor):
return sid
def add_anchor(self, id, node):
- # type: (unicode, nodes.Node) -> None
+ # type: (str, nodes.Node) -> None
if id.startswith('index-'):
return
id = self.curfilestack[-1] + ':' + id
@@ -564,7 +568,7 @@ class TexinfoTranslator(nodes.NodeVisitor):
self.written_ids.add(id)
def add_xref(self, id, name, node):
- # type: (unicode, unicode, nodes.Node) -> None
+ # type: (str, str, nodes.Node) -> None
name = self.escape_menu(name)
sid = self.get_short_id(id)
self.body.append('@ref{%s,,%s}' % (sid, name))
@@ -574,19 +578,19 @@ class TexinfoTranslator(nodes.NodeVisitor):
# -- Visiting
def visit_document(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.footnotestack.append(self.collect_footnotes(node))
self.curfilestack.append(node.get('docname', ''))
if 'docname' in node:
self.add_anchor(':doc', node)
def depart_document(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.footnotestack.pop()
self.curfilestack.pop()
def visit_Text(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Text) -> None
s = self.escape(node.astext())
if self.escape_newlines:
s = s.replace('\n', ' ')
@@ -596,11 +600,11 @@ class TexinfoTranslator(nodes.NodeVisitor):
self.body.append(s)
def depart_Text(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Text) -> None
pass
def visit_section(self, node):
- # type: (nodes.section) -> None
+ # type: (nodes.Element) -> None
self.next_section_ids.update(node.get('ids', []))
if not self.seen_title:
return
@@ -616,11 +620,11 @@ class TexinfoTranslator(nodes.NodeVisitor):
self.add_anchor(id, node)
self.next_section_ids.clear()
- self.previous_section = node
+ self.previous_section = cast(nodes.section, node)
self.section_level += 1
def depart_section(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.section_level -= 1
headings = (
@@ -629,16 +633,16 @@ class TexinfoTranslator(nodes.NodeVisitor):
'@section',
'@subsection',
'@subsubsection',
- ) # type: Tuple[unicode, ...]
+ )
rubrics = (
'@heading',
'@subheading',
'@subsubheading',
- ) # type: Tuple[unicode, ...]
+ )
def visit_title(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
if not self.seen_title:
self.seen_title = True
raise nodes.SkipNode
@@ -660,13 +664,12 @@ class TexinfoTranslator(nodes.NodeVisitor):
self.body.append('\n%s ' % heading)
def depart_title(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('\n\n')
def visit_rubric(self, node):
- # type: (nodes.Node) -> None
- if len(node.children) == 1 and node.children[0].astext() in \
- ('Footnotes', _('Footnotes')):
+ # type: (nodes.Element) -> None
+ if len(node) == 1 and node.astext() in ('Footnotes', _('Footnotes')):
raise nodes.SkipNode
try:
rubric = self.rubrics[self.section_level]
@@ -676,22 +679,22 @@ class TexinfoTranslator(nodes.NodeVisitor):
self.escape_newlines += 1
def depart_rubric(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.escape_newlines -= 1
self.body.append('\n\n')
def visit_subtitle(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('\n\n@noindent\n')
def depart_subtitle(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('\n\n')
# -- References
def visit_target(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
# postpone the labels until after the sectioning command
parindex = node.parent.index(node)
try:
@@ -716,11 +719,11 @@ class TexinfoTranslator(nodes.NodeVisitor):
self.add_anchor(id, node)
def depart_target(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_reference(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
# an xref's target is displayed in Info so we ignore a few
# cases for the sake of appearance
if isinstance(node.parent, (nodes.title, addnodes.desc_type)):
@@ -784,17 +787,17 @@ class TexinfoTranslator(nodes.NodeVisitor):
raise nodes.SkipNode
def depart_reference(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_number_reference(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
text = nodes.Text(node.get('title', '#'))
self.visit_Text(text)
raise nodes.SkipNode
def visit_title_reference(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
text = node.astext()
self.body.append('@cite{%s}' % self.escape_arg(text))
raise nodes.SkipNode
@@ -802,28 +805,28 @@ class TexinfoTranslator(nodes.NodeVisitor):
# -- Blocks
def visit_paragraph(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('\n')
def depart_paragraph(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('\n')
def visit_block_quote(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('\n@quotation\n')
def depart_block_quote(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.ensure_eol()
self.body.append('@end quotation\n')
def visit_literal_block(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('\n@example\n')
def depart_literal_block(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.ensure_eol()
self.body.append('@end example\n')
@@ -831,86 +834,86 @@ class TexinfoTranslator(nodes.NodeVisitor):
depart_doctest_block = depart_literal_block
def visit_line_block(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
if not isinstance(node.parent, nodes.line_block):
self.body.append('\n\n')
self.body.append('@display\n')
def depart_line_block(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('@end display\n')
if not isinstance(node.parent, nodes.line_block):
self.body.append('\n\n')
def visit_line(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.escape_newlines += 1
def depart_line(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('@w{ }\n')
self.escape_newlines -= 1
# -- Inline
def visit_strong(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('@strong{')
def depart_strong(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('}')
def visit_emphasis(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('@emph{')
def depart_emphasis(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('}')
def visit_literal(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('@code{')
def depart_literal(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('}')
def visit_superscript(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('@w{^')
def depart_superscript(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('}')
def visit_subscript(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('@w{[')
def depart_subscript(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append(']}')
# -- Footnotes
def visit_footnote(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
raise nodes.SkipNode
def visit_collected_footnote(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.in_footnote += 1
self.body.append('@footnote{')
def depart_collected_footnote(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('}')
self.in_footnote -= 1
def visit_footnote_reference(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
num = node.astext().strip()
try:
footnode, used = self.footnotestack[-1][num]
@@ -921,38 +924,38 @@ class TexinfoTranslator(nodes.NodeVisitor):
raise nodes.SkipChildren
def visit_citation(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('\n')
for id in node.get('ids'):
self.add_anchor(id, node)
self.escape_newlines += 1
def depart_citation(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.escape_newlines -= 1
def visit_citation_reference(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('@w{[')
def depart_citation_reference(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append(']}')
# -- Lists
def visit_bullet_list(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
bullet = node.get('bullet', '*')
self.body.append('\n\n@itemize %s\n' % bullet)
def depart_bullet_list(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.ensure_eol()
self.body.append('@end itemize\n')
def visit_enumerated_list(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
# doesn't support Roman numerals
enum = node.get('enumtype', 'arabic')
starters = {'arabic': '',
@@ -962,100 +965,100 @@ class TexinfoTranslator(nodes.NodeVisitor):
self.body.append('\n\n@enumerate %s\n' % start)
def depart_enumerated_list(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.ensure_eol()
self.body.append('@end enumerate\n')
def visit_list_item(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('\n@item ')
def depart_list_item(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
# -- Option List
def visit_option_list(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('\n\n@table @option\n')
def depart_option_list(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.ensure_eol()
self.body.append('@end table\n')
def visit_option_list_item(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def depart_option_list_item(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_option_group(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.at_item_x = '@item'
def depart_option_group(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_option(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.escape_hyphens += 1
self.body.append('\n%s ' % self.at_item_x)
self.at_item_x = '@itemx'
def depart_option(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.escape_hyphens -= 1
def visit_option_string(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def depart_option_string(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_option_argument(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append(node.get('delimiter', ' '))
def depart_option_argument(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_description(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('\n')
def depart_description(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
# -- Definitions
def visit_definition_list(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('\n\n@table @asis\n')
def depart_definition_list(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.ensure_eol()
self.body.append('@end table\n')
def visit_definition_list_item(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.at_item_x = '@item'
def depart_definition_list_item(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_term(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
for id in node.get('ids'):
self.add_anchor(id, node)
# anchors and indexes need to go in front
@@ -1067,45 +1070,45 @@ class TexinfoTranslator(nodes.NodeVisitor):
self.at_item_x = '@itemx'
def depart_term(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_classifier(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append(' : ')
def depart_classifier(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_definition(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('\n')
def depart_definition(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
# -- Tables
def visit_table(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.entry_sep = '@item'
def depart_table(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('\n@end multitable\n\n')
def visit_tabular_col_spec(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def depart_tabular_col_spec(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_colspec(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.colwidths.append(node['colwidth'])
if len(self.colwidths) != self.n_cols:
return
@@ -1114,209 +1117,208 @@ class TexinfoTranslator(nodes.NodeVisitor):
self.body.append('{%s} ' % ('x' * (n + 2)))
def depart_colspec(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_tgroup(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.colwidths = []
self.n_cols = node['cols']
def depart_tgroup(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_thead(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.entry_sep = '@headitem'
def depart_thead(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_tbody(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def depart_tbody(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_row(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def depart_row(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.entry_sep = '@item'
def visit_entry(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('\n%s\n' % self.entry_sep)
self.entry_sep = '@tab'
def depart_entry(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
for i in range(node.get('morecols', 0)):
self.body.append('\n@tab\n')
# -- Field Lists
def visit_field_list(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def depart_field_list(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_field(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('\n')
def depart_field(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('\n')
def visit_field_name(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.ensure_eol()
self.body.append('@*')
def depart_field_name(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append(': ')
def visit_field_body(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def depart_field_body(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
# -- Admonitions
def visit_admonition(self, node, name=''):
- # type: (nodes.Node, unicode) -> None
+ # type: (nodes.Element, str) -> None
if not name:
- name = self.escape(node[0].astext())
- self.body.append(u'\n@cartouche\n@quotation %s ' % name)
+ title = cast(nodes.title, node[0])
+ name = self.escape(title.astext())
+ self.body.append('\n@cartouche\n@quotation %s ' % name)
+
+ def _visit_named_admonition(self, node):
+ # type: (nodes.Element) -> None
+ label = admonitionlabels[node.tagname]
+ self.body.append('\n@cartouche\n@quotation %s ' % label)
def depart_admonition(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.ensure_eol()
self.body.append('@end quotation\n'
'@end cartouche\n')
- def _make_visit_admonition(name):
- # type: (unicode) -> Callable[[TexinfoTranslator, nodes.Node], None]
- def visit(self, node):
- # type: (nodes.Node) -> None
- self.visit_admonition(node, admonitionlabels[name])
- return visit
-
- visit_attention = _make_visit_admonition('attention')
+ visit_attention = _visit_named_admonition
depart_attention = depart_admonition
- visit_caution = _make_visit_admonition('caution')
+ visit_caution = _visit_named_admonition
depart_caution = depart_admonition
- visit_danger = _make_visit_admonition('danger')
+ visit_danger = _visit_named_admonition
depart_danger = depart_admonition
- visit_error = _make_visit_admonition('error')
+ visit_error = _visit_named_admonition
depart_error = depart_admonition
- visit_hint = _make_visit_admonition('hint')
+ visit_hint = _visit_named_admonition
depart_hint = depart_admonition
- visit_important = _make_visit_admonition('important')
+ visit_important = _visit_named_admonition
depart_important = depart_admonition
- visit_note = _make_visit_admonition('note')
+ visit_note = _visit_named_admonition
depart_note = depart_admonition
- visit_tip = _make_visit_admonition('tip')
+ visit_tip = _visit_named_admonition
depart_tip = depart_admonition
- visit_warning = _make_visit_admonition('warning')
+ visit_warning = _visit_named_admonition
depart_warning = depart_admonition
# -- Misc
def visit_docinfo(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
raise nodes.SkipNode
def visit_generated(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
raise nodes.SkipNode
def visit_header(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
raise nodes.SkipNode
def visit_footer(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
raise nodes.SkipNode
def visit_container(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
if node.get('literal_block'):
self.body.append('\n\n@float LiteralBlock\n')
def depart_container(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
if node.get('literal_block'):
self.body.append('\n@end float\n\n')
def visit_decoration(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def depart_decoration(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_topic(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
# ignore TOC's since we have to have a "menu" anyway
if 'contents' in node.get('classes', []):
raise nodes.SkipNode
- title = node[0]
+ title = cast(nodes.title, node[0])
self.visit_rubric(title)
self.body.append('%s\n' % self.escape(title.astext()))
def depart_topic(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_transition(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('\n\n%s\n\n' % ('_' * 66))
def depart_transition(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_attribution(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('\n\n@center --- ')
def depart_attribution(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('\n\n')
def visit_raw(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
format = node.get('format', '').split()
if 'texinfo' in format or 'texi' in format:
self.body.append(node.astext())
raise nodes.SkipNode
def visit_figure(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('\n\n@float Figure\n')
def depart_figure(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('\n@end float\n\n')
def visit_caption(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
if (isinstance(node.parent, nodes.figure) or
(isinstance(node.parent, nodes.container) and
node.parent.get('literal_block'))):
@@ -1326,14 +1328,14 @@ class TexinfoTranslator(nodes.NodeVisitor):
location=(self.curfilestack[-1], node.line))
def depart_caption(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
if (isinstance(node.parent, nodes.figure) or
(isinstance(node.parent, nodes.container) and
node.parent.get('literal_block'))):
self.body.append('}\n')
def visit_image(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
if node['uri'] in self.builder.images:
uri = self.builder.images[node['uri']]
else:
@@ -1354,65 +1356,65 @@ class TexinfoTranslator(nodes.NodeVisitor):
(name, width, height, alt, ext[1:]))
def depart_image(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_compound(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def depart_compound(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_sidebar(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.visit_topic(node)
def depart_sidebar(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.depart_topic(node)
def visit_label(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('@w{(')
def depart_label(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append(')} ')
def visit_legend(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def depart_legend(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_system_message(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('\n@verbatim\n'
'<SYSTEM MESSAGE: %s>\n'
'@end verbatim\n' % node.astext())
raise nodes.SkipNode
def visit_comment(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('\n')
for line in node.astext().splitlines():
self.body.append('@c %s\n' % line)
raise nodes.SkipNode
def visit_problematic(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('>>')
def depart_problematic(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('<<')
def unimplemented_visit(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
logger.warning(__("unimplemented node type: %r"), node,
location=(self.curfilestack[-1], node.line))
@@ -1428,13 +1430,14 @@ class TexinfoTranslator(nodes.NodeVisitor):
# -- Sphinx specific
def visit_productionlist(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.visit_literal_block(None)
names = []
- for production in node:
+ productionlist = cast(Iterable[addnodes.production], node)
+ for production in productionlist:
names.append(production['tokenname'])
maxlen = max(len(name) for name in names)
- for production in node:
+ for production in productionlist:
if production['tokenname']:
for id in production.get('ids'):
self.add_anchor(id, production)
@@ -1447,31 +1450,31 @@ class TexinfoTranslator(nodes.NodeVisitor):
raise nodes.SkipNode
def visit_production(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def depart_production(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_literal_emphasis(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('@code{')
def depart_literal_emphasis(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('}')
def visit_literal_strong(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('@code{')
def depart_literal_strong(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('}')
def visit_index(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
# terminate the line but don't prevent paragraph breaks
if isinstance(node.parent, nodes.paragraph):
self.ensure_eol()
@@ -1483,75 +1486,76 @@ class TexinfoTranslator(nodes.NodeVisitor):
self.body.append('@geindex %s\n' % text)
def visit_versionmodified(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('\n')
def depart_versionmodified(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('\n')
def visit_start_of_file(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
# add a document target
self.next_section_ids.add(':doc')
self.curfilestack.append(node['docname'])
self.footnotestack.append(self.collect_footnotes(node))
def depart_start_of_file(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.curfilestack.pop()
self.footnotestack.pop()
def visit_centered(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
txt = self.escape_arg(node.astext())
self.body.append('\n\n@center %s\n\n' % txt)
raise nodes.SkipNode
def visit_seealso(self, node):
- # type: (nodes.Node) -> None
- self.body.append(u'\n\n@subsubheading %s\n\n' %
+ # type: (nodes.Element) -> None
+ self.body.append('\n\n@subsubheading %s\n\n' %
admonitionlabels['seealso'])
def depart_seealso(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('\n')
def visit_meta(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
raise nodes.SkipNode
def visit_glossary(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def depart_glossary(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_acks(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
+ bullet_list = cast(nodes.bullet_list, node[0])
+ list_items = cast(Iterable[nodes.list_item], bullet_list)
self.body.append('\n\n')
- self.body.append(', '.join(n.astext()
- for n in node.children[0].children) + '.')
+ self.body.append(', '.join(n.astext() for n in list_items) + '.')
self.body.append('\n\n')
raise nodes.SkipNode
# -- Desc
def visit_desc(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.desc = node
self.at_deffnx = '@deffn'
def depart_desc(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.desc = None
self.ensure_eol()
self.body.append('@end deffn\n')
def visit_desc_signature(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.escape_hyphens += 1
objtype = node.parent['objtype']
if objtype != 'describe':
@@ -1572,74 +1576,74 @@ class TexinfoTranslator(nodes.NodeVisitor):
self.desc_type_name = name
def depart_desc_signature(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append("\n")
self.escape_hyphens -= 1
self.desc_type_name = None
def visit_desc_name(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def depart_desc_name(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_desc_addname(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def depart_desc_addname(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_desc_type(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def depart_desc_type(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_desc_returns(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append(' -> ')
def depart_desc_returns(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_desc_parameterlist(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append(' (')
self.first_param = 1
def depart_desc_parameterlist(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append(')')
def visit_desc_parameter(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
if not self.first_param:
self.body.append(', ')
else:
self.first_param = 0
text = self.escape(node.astext())
# replace no-break spaces with normal ones
- text = text.replace(u' ', '@w{ }')
+ text = text.replace(' ', '@w{ }')
self.body.append(text)
raise nodes.SkipNode
def visit_desc_optional(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('[')
def depart_desc_optional(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append(']')
def visit_desc_annotation(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
# Try to avoid duplicating info already displayed by the deffn category.
# e.g.
# @deffn {Class} Foo
@@ -1652,27 +1656,27 @@ class TexinfoTranslator(nodes.NodeVisitor):
raise nodes.SkipNode
def depart_desc_annotation(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_desc_content(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def depart_desc_content(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_inline(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def depart_inline(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_abbreviation(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
abbr = node.astext()
self.body.append('@abbr{')
if node.hasattr('explanation') and abbr not in self.handled_abbrs:
@@ -1682,58 +1686,68 @@ class TexinfoTranslator(nodes.NodeVisitor):
self.context.append('}')
def depart_abbreviation(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append(self.context.pop())
def visit_manpage(self, node):
- # type: (nodes.Node) -> Any
+ # type: (nodes.Element) -> None
return self.visit_literal_emphasis(node)
def depart_manpage(self, node):
- # type: (nodes.Node) -> Any
+ # type: (nodes.Element) -> None
return self.depart_literal_emphasis(node)
def visit_download_reference(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def depart_download_reference(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_hlist(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.visit_bullet_list(node)
def depart_hlist(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.depart_bullet_list(node)
def visit_hlistcol(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def depart_hlistcol(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_pending_xref(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def depart_pending_xref(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_math(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.body.append('@math{' + self.escape_arg(node.astext()) + '}')
raise nodes.SkipNode
def visit_math_block(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
if node.get('label'):
self.add_anchor(node['label'], node)
self.body.append('\n\n@example\n%s\n@end example\n\n' %
self.escape_arg(node.astext()))
raise nodes.SkipNode
+
+ def _make_visit_admonition(name): # type: ignore
+ # type: (str) -> Callable[[TexinfoTranslator, nodes.Element], None]
+ warnings.warn('TexinfoTranslator._make_visit_admonition() is deprecated.',
+ RemovedInSphinx30Warning)
+
+ def visit(self, node):
+ # type: (nodes.Element) -> None
+ self.visit_admonition(node, admonitionlabels[name])
+ return visit
diff --git a/sphinx/writers/text.py b/sphinx/writers/text.py
index 912d87399..3897f4e4c 100644
--- a/sphinx/writers/text.py
+++ b/sphinx/writers/text.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.writers.text
~~~~~~~~~~~~~~~~~~~
@@ -8,25 +7,252 @@
:copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
+import math
import os
import re
import textwrap
-from itertools import groupby
+import warnings
+from itertools import groupby, chain
+from typing import Iterable, cast
from docutils import nodes, writers
from docutils.utils import column_width
-from six.moves import zip_longest
from sphinx import addnodes
+from sphinx.deprecation import RemovedInSphinx30Warning
from sphinx.locale import admonitionlabels, _
-from sphinx.util import logging
+from sphinx.util.docutils import SphinxTranslator
if False:
# For type annotation
- from typing import Any, Callable, Dict, List, Tuple, Union # NOQA
+ from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Union # NOQA
from sphinx.builders.text import TextBuilder # NOQA
-logger = logging.getLogger(__name__)
+
+class Cell:
+ """Represents a cell in a table.
+ It can span on multiple columns or on multiple lines.
+ """
+ def __init__(self, text="", rowspan=1, colspan=1):
+ self.text = text
+ self.wrapped = [] # type: List[str]
+ self.rowspan = rowspan
+ self.colspan = colspan
+ self.col = None
+ self.row = None
+
+ def __repr__(self):
+ return "<Cell {!r} {}v{}/{}>{}>".format(
+ self.text, self.row, self.rowspan, self.col, self.colspan
+ )
+
+ def __hash__(self):
+ return hash((self.col, self.row))
+
+ def wrap(self, width):
+ self.wrapped = my_wrap(self.text, width)
+
+
+class Table:
+ """Represents a table, handling cells that can span on multiple lines
+ or rows, like::
+
+ +-----------+-----+
+ | AAA | BBB |
+ +-----+-----+ |
+ | | XXX | |
+ | +-----+-----+
+ | DDD | CCC |
+ +-----+-----------+
+
+ This class can be used in two ways:
+
+ - Either with absolute positions: call ``table[line, col] = Cell(...)``,
+ this overwrite an existing cell if any.
+
+ - Either with relative positions: call the ``add_row()`` and
+ ``add_cell(Cell(...))`` as needed.
+
+ Cell spanning on multiple rows or multiple columns (having a
+ colspan or rowspan greater than one) are automatically referenced
+ by all the table cells they covers. This is a usefull
+ representation as we can simply check ``if self[x, y] is self[x,
+ y+1]`` to recognize a rowspan.
+
+ Colwidth is not automatically computed, it has to be given, either
+ at construction time, either during the table construction.
+
+ Example usage::
+
+ table = Table([6, 6])
+ table.add_cell(Cell("foo"))
+ table.add_cell(Cell("bar"))
+ table.set_separator()
+ table.add_row()
+ table.add_cell(Cell("FOO"))
+ table.add_cell(Cell("BAR"))
+ print(table)
+ +--------+--------+
+ | foo | bar |
+ |========|========|
+ | FOO | BAR |
+ +--------+--------+
+
+ """
+ def __init__(self, colwidth=None):
+ self.lines = [] # type: List[List[Cell]]
+ self.separator = 0
+ self.colwidth = (colwidth if colwidth is not None
+ else []) # type: List[int]
+ self.current_line = 0
+ self.current_col = 0
+
+ def add_row(self):
+ """Add a row to the table, to use with ``add_cell()``. It is not needed
+ to call ``add_row()`` before the first ``add_cell()``.
+ """
+ self.current_line += 1
+ self.current_col = 0
+
+ def set_separator(self):
+ """Sets the separator below the current line.
+ """
+ self.separator = len(self.lines)
+
+ def add_cell(self, cell):
+ """Add a cell to the current line, to use with ``add_row()``. To add
+ a cell spanning on multiple lines or rows, simply set the
+ ``cell.colspan`` or ``cell.rowspan`` BEFORE inserting it to
+ the table.
+ """
+ while self[self.current_line, self.current_col]:
+ self.current_col += 1
+ self[self.current_line, self.current_col] = cell
+ self.current_col += cell.colspan
+
+ def __getitem__(self, pos):
+ line, col = pos
+ self._ensure_has_line(line + 1)
+ self._ensure_has_column(col + 1)
+ return self.lines[line][col]
+
+ def __setitem__(self, pos, cell):
+ line, col = pos
+ self._ensure_has_line(line + cell.rowspan)
+ self._ensure_has_column(col + cell.colspan)
+ for dline in range(cell.rowspan):
+ for dcol in range(cell.colspan):
+ self.lines[line + dline][col + dcol] = cell
+ cell.row = line
+ cell.col = col
+
+ def _ensure_has_line(self, line):
+ while len(self.lines) < line:
+ self.lines.append([])
+
+ def _ensure_has_column(self, col):
+ for line in self.lines:
+ while len(line) < col:
+ line.append(None)
+
+ def __repr__(self):
+ return "\n".join(repr(line) for line in self.lines)
+
+ def cell_width(self, cell, source):
+ """Give the cell width, according to the given source (either
+ ``self.colwidth`` or ``self.measured_widths``).
+ This take into account cells spanning on multiple columns.
+ """
+ width = 0
+ for i in range(self[cell.row, cell.col].colspan):
+ width += source[cell.col + i]
+ return width + (cell.colspan - 1) * 3
+
+ @property
+ def cells(self):
+ seen = set() # type: Set[Cell]
+ for lineno, line in enumerate(self.lines):
+ for colno, cell in enumerate(line):
+ if cell and cell not in seen:
+ yield cell
+ seen.add(cell)
+
+ def rewrap(self):
+ """Call ``cell.wrap()`` on all cells, and measure each column width
+ after wrapping (result written in ``self.measured_widths``).
+ """
+ self.measured_widths = self.colwidth[:]
+ for cell in self.cells:
+ cell.wrap(width=self.cell_width(cell, self.colwidth))
+ if not cell.wrapped:
+ continue
+ width = math.ceil(max(column_width(x) for x in cell.wrapped) / cell.colspan)
+ for col in range(cell.col, cell.col + cell.colspan):
+ self.measured_widths[col] = max(self.measured_widths[col], width)
+
+ def physical_lines_for_line(self, line):
+ """From a given line, compute the number of physical lines it spans
+ due to text wrapping.
+ """
+ physical_lines = 1
+ for cell in line:
+ physical_lines = max(physical_lines, len(cell.wrapped))
+ return physical_lines
+
+ def __str__(self):
+ out = []
+ self.rewrap()
+
+ def writesep(char="-", lineno=None):
+ # type: (str, Optional[int]) -> str
+ """Called on the line *before* lineno.
+ Called with no *lineno* for the last sep.
+ """
+ out = [] # type: List[str]
+ for colno, width in enumerate(self.measured_widths):
+ if (
+ lineno is not None and
+ lineno > 0 and
+ self[lineno, colno] is self[lineno - 1, colno]
+ ):
+ out.append(" " * (width + 2))
+ else:
+ out.append(char * (width + 2))
+ head = "+" if out[0][0] == "-" else "|"
+ tail = "+" if out[-1][0] == "-" else "|"
+ glue = [
+ "+" if left[0] == "-" or right[0] == "-" else "|"
+ for left, right in zip(out, out[1:])
+ ]
+ glue.append(tail)
+ return head + "".join(chain(*zip(out, glue)))
+
+ for lineno, line in enumerate(self.lines):
+ if self.separator and lineno == self.separator:
+ out.append(writesep("=", lineno))
+ else:
+ out.append(writesep("-", lineno))
+ for physical_line in range(self.physical_lines_for_line(line)):
+ linestr = ["|"]
+ for colno, cell in enumerate(line):
+ if cell.col != colno:
+ continue
+ if lineno != cell.row:
+ physical_text = ""
+ elif physical_line >= len(cell.wrapped):
+ physical_text = ""
+ else:
+ physical_text = cell.wrapped[physical_line]
+ adjust_len = len(physical_text) - column_width(physical_text)
+ linestr.append(
+ " " +
+ physical_text.ljust(
+ self.cell_width(cell, self.measured_widths) + 1 + adjust_len
+ ) + "|"
+ )
+ out.append("".join(linestr))
+ out.append(writesep("-"))
+ return "\n".join(out)
class TextWrapper(textwrap.TextWrapper):
@@ -39,14 +265,13 @@ class TextWrapper(textwrap.TextWrapper):
r'(?<=[\w\!\"\'\&\.\,\?])-{2,}(?=\w))') # em-dash
def _wrap_chunks(self, chunks):
- # type: (List[unicode]) -> List[unicode]
+ # type: (List[str]) -> List[str]
"""_wrap_chunks(chunks : [string]) -> [string]
The original _wrap_chunks uses len() to calculate width.
This method respects wide/fullwidth characters for width adjustment.
"""
- drop_whitespace = getattr(self, 'drop_whitespace', True) # py25 compat
- lines = [] # type: List[unicode]
+ lines = [] # type: List[str]
if self.width <= 0:
raise ValueError("invalid width %r (must be > 0)" % self.width)
@@ -63,7 +288,7 @@ class TextWrapper(textwrap.TextWrapper):
width = self.width - column_width(indent)
- if drop_whitespace and chunks[-1].strip() == '' and lines:
+ if self.drop_whitespace and chunks[-1].strip() == '' and lines:
del chunks[-1]
while chunks:
@@ -79,7 +304,7 @@ class TextWrapper(textwrap.TextWrapper):
if chunks and column_width(chunks[-1]) > width:
self._handle_long_word(chunks, cur_line, cur_len, width)
- if drop_whitespace and cur_line and cur_line[-1].strip() == '':
+ if self.drop_whitespace and cur_line and cur_line[-1].strip() == '':
del cur_line[-1]
if cur_line:
@@ -88,7 +313,7 @@ class TextWrapper(textwrap.TextWrapper):
return lines
def _break_word(self, word, space_left):
- # type: (unicode, int) -> Tuple[unicode, unicode]
+ # type: (str, int) -> Tuple[str, str]
"""_break_word(word : string, space_left : int) -> (string, string)
Break line by unicode width instead of len(word).
@@ -101,16 +326,16 @@ class TextWrapper(textwrap.TextWrapper):
return word, ''
def _split(self, text):
- # type: (unicode) -> List[unicode]
+ # type: (str) -> List[str]
"""_split(text : string) -> [string]
Override original method that only split by 'wordsep_re'.
This '_split' split wide-characters into chunk by one character.
"""
def split(t):
- # type: (unicode) -> List[unicode]
- return textwrap.TextWrapper._split(self, t) # type: ignore
- chunks = [] # type: List[unicode]
+ # type: (str) -> List[str]
+ return super(TextWrapper, self)._split(t)
+ chunks = [] # type: List[str]
for chunk in split(text):
for w, g in groupby(chunk, column_width):
if w == 1:
@@ -120,7 +345,7 @@ class TextWrapper(textwrap.TextWrapper):
return chunks
def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
- # type: (List[unicode], List[unicode], int, int) -> None
+ # type: (List[str], List[str], int, int) -> None
"""_handle_long_word(chunks : [string],
cur_line : [string],
cur_len : int, width : int)
@@ -142,7 +367,7 @@ STDINDENT = 3
def my_wrap(text, width=MAXWIDTH, **kwargs):
- # type: (unicode, int, Any) -> List[unicode]
+ # type: (str, int, Any) -> List[str]
w = TextWrapper(width=width, **kwargs)
return w.wrap(text)
@@ -156,43 +381,42 @@ class TextWriter(writers.Writer):
def __init__(self, builder):
# type: (TextBuilder) -> None
- writers.Writer.__init__(self)
+ super().__init__()
self.builder = builder
def translate(self):
# type: () -> None
visitor = self.builder.create_translator(self.document, self.builder)
self.document.walkabout(visitor)
- self.output = visitor.body
+ self.output = cast(TextTranslator, visitor).body
-class TextTranslator(nodes.NodeVisitor):
- sectionchars = '*=-~"+`'
+class TextTranslator(SphinxTranslator):
+ builder = None # type: TextBuilder
def __init__(self, document, builder):
- # type: (nodes.Node, TextBuilder) -> None
- nodes.NodeVisitor.__init__(self, document)
- self.builder = builder
+ # type: (nodes.document, TextBuilder) -> None
+ super().__init__(document, builder)
- newlines = builder.config.text_newlines
+ newlines = self.config.text_newlines
if newlines == 'windows':
self.nl = '\r\n'
elif newlines == 'native':
self.nl = os.linesep
else:
self.nl = '\n'
- self.sectionchars = builder.config.text_sectionchars
- self.add_secnumbers = builder.config.text_add_secnumbers
- self.secnumber_suffix = builder.config.text_secnumber_suffix
- self.states = [[]] # type: List[List[Tuple[int, Union[unicode, List[unicode]]]]]
+ self.sectionchars = self.config.text_sectionchars
+ self.add_secnumbers = self.config.text_add_secnumbers
+ self.secnumber_suffix = self.config.text_secnumber_suffix
+ self.states = [[]] # type: List[List[Tuple[int, Union[str, List[str]]]]]
self.stateindent = [0]
self.list_counter = [] # type: List[int]
self.sectionlevel = 0
self.lineblocklevel = 0
- self.table = None # type: List[Union[unicode, List[int]]]
+ self.table = None # type: Table
def add_text(self, text):
- # type: (unicode) -> None
+ # type: (str) -> None
self.states[-1].append((-1, text))
def new_state(self, indent=STDINDENT):
@@ -201,12 +425,12 @@ class TextTranslator(nodes.NodeVisitor):
self.stateindent.append(indent)
def end_state(self, wrap=True, end=[''], first=None):
- # type: (bool, List[unicode], unicode) -> None
+ # type: (bool, List[str], str) -> None
content = self.states.pop()
maxindent = sum(self.stateindent)
indent = self.stateindent.pop()
- result = [] # type: List[Tuple[int, List[unicode]]]
- toformat = [] # type: List[unicode]
+ result = [] # type: List[Tuple[int, List[str]]]
+ toformat = [] # type: List[str]
def do_format():
# type: () -> None
@@ -240,11 +464,11 @@ class TextTranslator(nodes.NodeVisitor):
self.states[-1].extend(result)
def visit_document(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.new_state(0)
def depart_document(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.end_state()
self.body = self.nl.join(line and (' ' * indent + line)
for indent, lines in self.states[0]
@@ -252,60 +476,60 @@ class TextTranslator(nodes.NodeVisitor):
# XXX header/footer?
def visit_section(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self._title_char = self.sectionchars[self.sectionlevel]
self.sectionlevel += 1
def depart_section(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.sectionlevel -= 1
def visit_topic(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.new_state(0)
def depart_topic(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.end_state()
visit_sidebar = visit_topic
depart_sidebar = depart_topic
def visit_rubric(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.new_state(0)
self.add_text('-[ ')
def depart_rubric(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.add_text(' ]-')
self.end_state()
def visit_compound(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def depart_compound(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_glossary(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def depart_glossary(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_title(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
if isinstance(node.parent, nodes.Admonition):
self.add_text(node.astext() + ': ')
raise nodes.SkipNode
self.new_state(0)
def get_section_number_string(self, node):
- # type: (nodes.Node) -> unicode
+ # type: (nodes.Element) -> str
if isinstance(node.parent, nodes.section):
anchorname = '#' + node.parent['ids'][0]
numbers = self.builder.secnumbers.get(anchorname)
@@ -316,106 +540,106 @@ class TextTranslator(nodes.NodeVisitor):
return ''
def depart_title(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
if isinstance(node.parent, nodes.section):
char = self._title_char
else:
char = '^'
- text = None # type: unicode
+ text = ''
text = ''.join(x[1] for x in self.states.pop() if x[0] == -1) # type: ignore
if self.add_secnumbers:
text = self.get_section_number_string(node) + text
self.stateindent.pop()
- title = ['', text, '%s' % (char * column_width(text)), ''] # type: List[unicode]
+ title = ['', text, '%s' % (char * column_width(text)), '']
if len(self.states) == 2 and len(self.states[-1]) == 0:
# remove an empty line before title if it is first section title in the document
title.pop(0)
self.states[-1].append((0, title))
def visit_subtitle(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def depart_subtitle(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_attribution(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.add_text('-- ')
def depart_attribution(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_desc(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def depart_desc(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_desc_signature(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.new_state(0)
def depart_desc_signature(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
# XXX: wrap signatures in a way that makes sense
self.end_state(wrap=False, end=None)
def visit_desc_signature_line(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def depart_desc_signature_line(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.add_text('\n')
def visit_desc_name(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def depart_desc_name(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_desc_addname(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def depart_desc_addname(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_desc_type(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def depart_desc_type(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_desc_returns(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.add_text(' -> ')
def depart_desc_returns(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_desc_parameterlist(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.add_text('(')
self.first_param = 1
def depart_desc_parameterlist(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.add_text(')')
def visit_desc_parameter(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
if not self.first_param:
self.add_text(', ')
else:
@@ -424,55 +648,56 @@ class TextTranslator(nodes.NodeVisitor):
raise nodes.SkipNode
def visit_desc_optional(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.add_text('[')
def depart_desc_optional(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.add_text(']')
def visit_desc_annotation(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def depart_desc_annotation(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_desc_content(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.new_state()
self.add_text(self.nl)
def depart_desc_content(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.end_state()
def visit_figure(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.new_state()
def depart_figure(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.end_state()
def visit_caption(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def depart_caption(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_productionlist(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.new_state()
names = []
- for production in node:
+ productionlist = cast(Iterable[addnodes.production], node)
+ for production in productionlist:
names.append(production['tokenname'])
maxlen = max(len(name) for name in names)
lastname = None
- for production in node:
+ for production in productionlist:
if production['tokenname']:
self.add_text(production['tokenname'].ljust(maxlen) + ' ::=')
lastname = production['tokenname']
@@ -483,16 +708,17 @@ class TextTranslator(nodes.NodeVisitor):
raise nodes.SkipNode
def visit_footnote(self, node):
- # type: (nodes.Node) -> None
- self._footnote = node.children[0].astext().strip()
+ # type: (nodes.Element) -> None
+ label = cast(nodes.label, node[0])
+ self._footnote = label.astext().strip()
self.new_state(len(self._footnote) + 3)
def depart_footnote(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.end_state(first='[%s] ' % self._footnote)
def visit_citation(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
if len(node) and isinstance(node[0], nodes.label):
self._citlabel = node[0].astext()
else:
@@ -500,218 +726,170 @@ class TextTranslator(nodes.NodeVisitor):
self.new_state(len(self._citlabel) + 3)
def depart_citation(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.end_state(first='[%s] ' % self._citlabel)
def visit_label(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
raise nodes.SkipNode
def visit_legend(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def depart_legend(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
# XXX: option list could use some better styling
def visit_option_list(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def depart_option_list(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_option_list_item(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.new_state(0)
def depart_option_list_item(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.end_state()
def visit_option_group(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self._firstoption = True
def depart_option_group(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.add_text(' ')
def visit_option(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
if self._firstoption:
self._firstoption = False
else:
self.add_text(', ')
def depart_option(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_option_string(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def depart_option_string(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_option_argument(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.add_text(node['delimiter'])
def depart_option_argument(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_description(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def depart_description(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_tabular_col_spec(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
raise nodes.SkipNode
def visit_colspec(self, node):
- # type: (nodes.Node) -> None
- self.table[0].append(node['colwidth']) # type: ignore
+ # type: (nodes.Element) -> None
+ self.table.colwidth.append(node["colwidth"])
raise nodes.SkipNode
def visit_tgroup(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def depart_tgroup(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_thead(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def depart_thead(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_tbody(self, node):
- # type: (nodes.Node) -> None
- self.table.append('sep')
+ # type: (nodes.Element) -> None
+ self.table.set_separator()
def depart_tbody(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_row(self, node):
- # type: (nodes.Node) -> None
- self.table.append([])
+ # type: (nodes.Element) -> None
+ if self.table.lines:
+ self.table.add_row()
def depart_row(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_entry(self, node):
- # type: (nodes.Node) -> None
- if 'morerows' in node or 'morecols' in node:
- raise NotImplementedError('Column or row spanning cells are '
- 'not implemented.')
+ # type: (nodes.Element) -> None
+ self.entry = Cell(
+ rowspan=node.get("morerows", 0) + 1, colspan=node.get("morecols", 0) + 1
+ )
self.new_state(0)
def depart_entry(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
text = self.nl.join(self.nl.join(x[1]) for x in self.states.pop())
self.stateindent.pop()
- self.table[-1].append(text) # type: ignore
+ self.entry.text = text
+ self.table.add_cell(self.entry)
+ self.entry = None
def visit_table(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
if self.table:
raise NotImplementedError('Nested tables are not supported.')
self.new_state(0)
- self.table = [[]]
+ self.table = Table()
def depart_table(self, node):
- # type: (nodes.Node) -> None
- lines = None # type: List[unicode]
- lines = self.table[1:] # type: ignore
- fmted_rows = [] # type: List[List[List[unicode]]]
- colwidths = None # type: List[int]
- colwidths = self.table[0] # type: ignore
- realwidths = colwidths[:]
- separator = 0
- # don't allow paragraphs in table cells for now
- for line in lines:
- if line == 'sep':
- separator = len(fmted_rows)
- else:
- cells = [] # type: List[List[unicode]]
- for i, cell in enumerate(line):
- par = my_wrap(cell, width=colwidths[i])
- if par:
- maxwidth = max(column_width(x) for x in par)
- else:
- maxwidth = 0
- realwidths[i] = max(realwidths[i], maxwidth)
- cells.append(par)
- fmted_rows.append(cells)
-
- def writesep(char='-'):
- # type: (unicode) -> None
- out = ['+'] # type: List[unicode]
- for width in realwidths:
- out.append(char * (width + 2))
- out.append('+')
- self.add_text(''.join(out) + self.nl)
-
- def writerow(row):
- # type: (List[List[unicode]]) -> None
- lines = zip_longest(*row)
- for line in lines:
- out = ['|']
- for i, cell in enumerate(line):
- if cell:
- adjust_len = len(cell) - column_width(cell)
- out.append(' ' + cell.ljust(
- realwidths[i] + 1 + adjust_len))
- else:
- out.append(' ' * (realwidths[i] + 2))
- out.append('|')
- self.add_text(''.join(out) + self.nl)
-
- for i, row in enumerate(fmted_rows):
- if separator and i == separator:
- writesep('=')
- else:
- writesep('-')
- writerow(row)
- writesep('-')
+ # type: (nodes.Element) -> None
+ self.add_text(str(self.table))
self.table = None
self.end_state(wrap=False)
def visit_acks(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
+ bullet_list = cast(nodes.bullet_list, node[0])
+ list_items = cast(Iterable[nodes.list_item], bullet_list)
self.new_state(0)
- self.add_text(', '.join(n.astext() for n in node.children[0].children) +
- '.')
+ self.add_text(', '.join(n.astext() for n in list_items) + '.')
self.end_state()
raise nodes.SkipNode
def visit_image(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
if 'alt' in node.attributes:
self.add_text(_('[image: %s]') % node['alt'])
self.add_text(_('[image]'))
raise nodes.SkipNode
def visit_transition(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
indent = sum(self.stateindent)
self.new_state(0)
self.add_text('=' * (MAXWIDTH - indent))
@@ -719,31 +897,31 @@ class TextTranslator(nodes.NodeVisitor):
raise nodes.SkipNode
def visit_bullet_list(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.list_counter.append(-1)
def depart_bullet_list(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.list_counter.pop()
def visit_enumerated_list(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.list_counter.append(node.get('start', 1) - 1)
def depart_enumerated_list(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.list_counter.pop()
def visit_definition_list(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.list_counter.append(-2)
def depart_definition_list(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.list_counter.pop()
def visit_list_item(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
if self.list_counter[-1] == -1:
# bullet list
self.new_state(2)
@@ -756,7 +934,7 @@ class TextTranslator(nodes.NodeVisitor):
self.new_state(len(str(self.list_counter[-1])) + 2)
def depart_list_item(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
if self.list_counter[-1] == -1:
self.end_state(first='* ')
elif self.list_counter[-1] == -2:
@@ -765,408 +943,406 @@ class TextTranslator(nodes.NodeVisitor):
self.end_state(first='%s. ' % self.list_counter[-1])
def visit_definition_list_item(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self._classifier_count_in_li = len(node.traverse(nodes.classifier))
def depart_definition_list_item(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_term(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.new_state(0)
def depart_term(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
if not self._classifier_count_in_li:
self.end_state(end=None)
def visit_classifier(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.add_text(' : ')
def depart_classifier(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self._classifier_count_in_li -= 1
if not self._classifier_count_in_li:
self.end_state(end=None)
def visit_definition(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.new_state()
def depart_definition(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.end_state()
def visit_field_list(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def depart_field_list(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_field(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def depart_field(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_field_name(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.new_state(0)
def depart_field_name(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.add_text(':')
self.end_state(end=None)
def visit_field_body(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.new_state()
def depart_field_body(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.end_state()
def visit_centered(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def depart_centered(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_hlist(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def depart_hlist(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_hlistcol(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def depart_hlistcol(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_admonition(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.new_state(0)
def depart_admonition(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.end_state()
def _visit_admonition(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.new_state(2)
if isinstance(node.children[0], nodes.Sequential):
self.add_text(self.nl)
- def _make_depart_admonition(name):
- # type: (unicode) -> Callable[[TextTranslator, nodes.Node], None]
- def depart_admonition(self, node):
- # type: (nodes.NodeVisitor, nodes.Node) -> None
- self.end_state(first=admonitionlabels[name] + ': ')
- return depart_admonition
+ def _depart_admonition(self, node):
+ # type: (nodes.Element) -> None
+ label = admonitionlabels[node.tagname]
+ self.end_state(first=label + ': ')
visit_attention = _visit_admonition
- depart_attention = _make_depart_admonition('attention')
+ depart_attention = _depart_admonition
visit_caution = _visit_admonition
- depart_caution = _make_depart_admonition('caution')
+ depart_caution = _depart_admonition
visit_danger = _visit_admonition
- depart_danger = _make_depart_admonition('danger')
+ depart_danger = _depart_admonition
visit_error = _visit_admonition
- depart_error = _make_depart_admonition('error')
+ depart_error = _depart_admonition
visit_hint = _visit_admonition
- depart_hint = _make_depart_admonition('hint')
+ depart_hint = _depart_admonition
visit_important = _visit_admonition
- depart_important = _make_depart_admonition('important')
+ depart_important = _depart_admonition
visit_note = _visit_admonition
- depart_note = _make_depart_admonition('note')
+ depart_note = _depart_admonition
visit_tip = _visit_admonition
- depart_tip = _make_depart_admonition('tip')
+ depart_tip = _depart_admonition
visit_warning = _visit_admonition
- depart_warning = _make_depart_admonition('warning')
+ depart_warning = _depart_admonition
visit_seealso = _visit_admonition
- depart_seealso = _make_depart_admonition('seealso')
+ depart_seealso = _depart_admonition
def visit_versionmodified(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.new_state(0)
def depart_versionmodified(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.end_state()
def visit_literal_block(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.new_state()
def depart_literal_block(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.end_state(wrap=False)
def visit_doctest_block(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.new_state(0)
def depart_doctest_block(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.end_state(wrap=False)
def visit_line_block(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.new_state()
self.lineblocklevel += 1
def depart_line_block(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.lineblocklevel -= 1
self.end_state(wrap=False, end=None)
if not self.lineblocklevel:
self.add_text('\n')
def visit_line(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def depart_line(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.add_text('\n')
def visit_block_quote(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.new_state()
def depart_block_quote(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.end_state()
def visit_compact_paragraph(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def depart_compact_paragraph(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_paragraph(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
if not isinstance(node.parent, nodes.Admonition) or \
isinstance(node.parent, addnodes.seealso):
self.new_state(0)
def depart_paragraph(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
if not isinstance(node.parent, nodes.Admonition) or \
isinstance(node.parent, addnodes.seealso):
self.end_state()
def visit_target(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
raise nodes.SkipNode
def visit_index(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
raise nodes.SkipNode
def visit_toctree(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
raise nodes.SkipNode
def visit_pending_xref(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def depart_pending_xref(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_reference(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
if self.add_secnumbers:
numbers = node.get("secnumber")
if numbers is not None:
self.add_text('.'.join(map(str, numbers)) + self.secnumber_suffix)
def depart_reference(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_number_reference(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
text = nodes.Text(node.get('title', '#'))
self.visit_Text(text)
raise nodes.SkipNode
def visit_download_reference(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def depart_download_reference(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_emphasis(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.add_text('*')
def depart_emphasis(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.add_text('*')
def visit_literal_emphasis(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.add_text('*')
def depart_literal_emphasis(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.add_text('*')
def visit_strong(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.add_text('**')
def depart_strong(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.add_text('**')
def visit_literal_strong(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.add_text('**')
def depart_literal_strong(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.add_text('**')
def visit_abbreviation(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.add_text('')
def depart_abbreviation(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
if node.hasattr('explanation'):
self.add_text(' (%s)' % node['explanation'])
def visit_manpage(self, node):
- # type: (nodes.Node) -> Any
+ # type: (nodes.Element) -> None
return self.visit_literal_emphasis(node)
def depart_manpage(self, node):
- # type: (nodes.Node) -> Any
+ # type: (nodes.Element) -> None
return self.depart_literal_emphasis(node)
def visit_title_reference(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.add_text('*')
def depart_title_reference(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.add_text('*')
def visit_literal(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.add_text('"')
def depart_literal(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.add_text('"')
def visit_subscript(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.add_text('_')
def depart_subscript(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_superscript(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.add_text('^')
def depart_superscript(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_footnote_reference(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.add_text('[%s]' % node.astext())
raise nodes.SkipNode
def visit_citation_reference(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.add_text('[%s]' % node.astext())
raise nodes.SkipNode
def visit_Text(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Text) -> None
self.add_text(node.astext())
def depart_Text(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Text) -> None
pass
def visit_generated(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def depart_generated(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_inline(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
if 'xref' in node['classes'] or 'term' in node['classes']:
self.add_text('*')
def depart_inline(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
if 'xref' in node['classes'] or 'term' in node['classes']:
self.add_text('*')
def visit_container(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def depart_container(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_problematic(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.add_text('>>')
def depart_problematic(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.add_text('<<')
def visit_system_message(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.new_state(0)
self.add_text('<SYSTEM MESSAGE: %s>' % node.astext())
self.end_state()
raise nodes.SkipNode
def visit_comment(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
raise nodes.SkipNode
def visit_meta(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
# only valid for HTML
raise nodes.SkipNode
def visit_raw(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
if 'text' in node.get('format', '').split():
self.new_state(0)
self.add_text(node.astext())
@@ -1174,21 +1350,31 @@ class TextTranslator(nodes.NodeVisitor):
raise nodes.SkipNode
def visit_math(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def depart_math(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
pass
def visit_math_block(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.new_state()
def depart_math_block(self, node):
- # type: (nodes.Node) -> None
+ # type: (nodes.Element) -> None
self.end_state()
def unknown_visit(self, node):
# type: (nodes.Node) -> None
raise NotImplementedError('Unknown node: ' + node.__class__.__name__)
+
+ def _make_depart_admonition(name): # type: ignore
+ # type: (str) -> Callable[[TextTranslator, nodes.Element], None]
+ warnings.warn('TextTranslator._make_depart_admonition() is deprecated.',
+ RemovedInSphinx30Warning)
+
+ def depart_admonition(self, node):
+ # type: (nodes.Element) -> None
+ self.end_state(first=admonitionlabels[name] + ': ')
+ return depart_admonition
diff --git a/sphinx/writers/websupport.py b/sphinx/writers/websupport.py
index a962faf4d..6583e8b57 100644
--- a/sphinx/writers/websupport.py
+++ b/sphinx/writers/websupport.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.writers.websupport
~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/sphinx/writers/xml.py b/sphinx/writers/xml.py
index f94fe847c..3cf459491 100644
--- a/sphinx/writers/xml.py
+++ b/sphinx/writers/xml.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
sphinx.writers.xml
~~~~~~~~~~~~~~~~~~
@@ -9,7 +8,6 @@
:license: BSD, see LICENSE for details.
"""
-from docutils import writers
from docutils.writers.docutils_xml import Writer as BaseXMLWriter
if False:
@@ -22,7 +20,7 @@ class XMLWriter(BaseXMLWriter):
def __init__(self, builder):
# type: (Builder) -> None
- BaseXMLWriter.__init__(self)
+ super().__init__()
self.builder = builder
self.translator_class = self.builder.get_translator_class()
@@ -33,23 +31,23 @@ class XMLWriter(BaseXMLWriter):
self.builder.env.config.xml_pretty
self.document.settings.xml_declaration = True
self.document.settings.doctype_declaration = True
- return BaseXMLWriter.translate(self)
+ return super().translate()
-class PseudoXMLWriter(writers.Writer):
+class PseudoXMLWriter(BaseXMLWriter):
supported = ('pprint', 'pformat', 'pseudoxml')
"""Formats this writer supports."""
config_section = 'pseudoxml writer'
- config_section_dependencies = ('writers',) # type: Tuple[unicode]
+ config_section_dependencies = ('writers',)
output = None
"""Final translated form of `document`."""
def __init__(self, builder):
# type: (Builder) -> None
- writers.Writer.__init__(self)
+ super().__init__()
self.builder = builder
def translate(self):
@@ -57,6 +55,6 @@ class PseudoXMLWriter(writers.Writer):
self.output = self.document.pformat()
def supports(self, format):
- # type: (unicode) -> bool
+ # type: (str) -> bool
"""This writer supports all format-specific elements."""
return True
diff --git a/tests/conftest.py b/tests/conftest.py
index 9f46b1868..363aeeb27 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
pytest config for sphinx/tests
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -9,7 +8,6 @@
import os
import shutil
-import sys
import docutils
import pytest
@@ -22,17 +20,10 @@ pytest_plugins = 'sphinx.testing.fixtures'
# Exclude 'roots' dirs for pytest test collector
collect_ignore = ['roots']
-# Disable Python version-specific
-if sys.version_info < (3,):
- collect_ignore += ['py3']
-
-if sys.version_info < (3, 5):
- collect_ignore += ['py35']
-
@pytest.fixture(scope='session')
def rootdir():
- return path(os.path.dirname(__file__) or '.').abspath() / 'roots'
+ return path(os.path.dirname(__file__)).abspath() / 'roots'
def pytest_report_header(config):
diff --git a/tests/py3/test_util_inspect_py3.py b/tests/py3/test_util_inspect_py3.py
deleted file mode 100644
index 6d02025f9..000000000
--- a/tests/py3/test_util_inspect_py3.py
+++ /dev/null
@@ -1,26 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
- py3/test_util_inspect
- ~~~~~~~~~~~~~~~~~~~~~
-
- Tests util.inspect functions.
-
- :copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
- :license: BSD, see LICENSE for details.
-"""
-
-from sphinx.util import inspect
-
-
-def test_Signature_keyword_only_arguments():
- def func1(arg1, arg2, *, arg3=None, arg4=None):
- pass
-
- def func2(*, arg3, arg4):
- pass
-
- sig = inspect.Signature(func1).format_args()
- assert sig == '(arg1, arg2, *, arg3=None, arg4=None)'
-
- sig = inspect.Signature(func2).format_args()
- assert sig == '(*, arg3, arg4)'
diff --git a/tests/py35/test_autodoc_py35.py b/tests/py35/test_autodoc_py35.py
deleted file mode 100644
index 046fb93b4..000000000
--- a/tests/py35/test_autodoc_py35.py
+++ /dev/null
@@ -1,348 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
- test_autodoc
- ~~~~~~~~~~~~
-
- Test the autodoc extension. This tests mainly the Documenters; the auto
- directives are tested in a test source file translated by test_build.
-
- :copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
- :license: BSD, see LICENSE for details.
-"""
-
-# "raises" imported for usage by autodoc
-import sys
-
-import pytest
-import six
-from docutils.statemachine import ViewList
-from six import StringIO
-
-from sphinx.ext.autodoc import add_documenter, FunctionDocumenter, ALL, Options # NOQA
-from sphinx.testing.util import SphinxTestApp, Struct
-from sphinx.util import logging
-
-app = None
-
-
-@pytest.fixture(scope='module', autouse=True)
-def setup_module(rootdir, sphinx_test_tempdir):
- global app
- srcdir = sphinx_test_tempdir / 'autodoc-root'
- if not srcdir.exists():
- (rootdir / 'test-root').copytree(srcdir)
- app = SphinxTestApp(srcdir=srcdir)
- app.builder.env.app = app
- app.builder.env.temp_data['docname'] = 'dummy'
- app.connect('autodoc-process-docstring', process_docstring)
- app.connect('autodoc-process-signature', process_signature)
- app.connect('autodoc-skip-member', skip_member)
- yield
- app.cleanup()
-
-
-directive = options = None
-
-
-@pytest.fixture
-def setup_test():
- global options, directive
- global processed_docstrings, processed_signatures
-
- options = Options(
- inherited_members = False,
- undoc_members = False,
- private_members = False,
- special_members = False,
- imported_members = False,
- show_inheritance = False,
- noindex = False,
- annotation = None,
- synopsis = '',
- platform = '',
- deprecated = False,
- members = [],
- member_order = 'alphabetic',
- exclude_members = set(),
- )
-
- directive = Struct(
- env = app.builder.env,
- genopt = options,
- result = ViewList(),
- filename_set = set(),
- )
-
- processed_docstrings = []
- processed_signatures = []
-
-
-processed_docstrings = []
-processed_signatures = []
-
-
-def process_docstring(app, what, name, obj, options, lines):
- processed_docstrings.append((what, name))
- if name == 'bar':
- lines.extend(['42', ''])
-
-
-def process_signature(app, what, name, obj, options, args, retann):
- processed_signatures.append((what, name))
- if name == 'bar':
- return '42', None
-
-
-def skip_member(app, what, name, obj, skip, options):
- if name in ('__special1__', '__special2__'):
- return skip
- if name.startswith('_'):
- return True
- if name == 'skipmeth':
- return True
-
-
-@pytest.mark.usefixtures('setup_test')
-def test_generate():
- logging.setup(app, app._status, app._warning)
-
- def assert_warns(warn_str, objtype, name, **kw):
- inst = app.registry.documenters[objtype](directive, name)
- inst.generate(**kw)
- assert len(directive.result) == 0, directive.result
- assert warn_str in app._warning.getvalue()
- app._warning.truncate(0)
- app._warning.seek(0)
-
- def assert_works(objtype, name, **kw):
- inst = app.registry.documenters[objtype](directive, name)
- inst.generate(**kw)
- assert directive.result
- # print '\n'.join(directive.result)
- assert app._warning.getvalue() == ''
- del directive.result[:]
-
- def assert_processes(items, objtype, name, **kw):
- del processed_docstrings[:]
- del processed_signatures[:]
- assert_works(objtype, name, **kw)
- assert set(processed_docstrings) | set(processed_signatures) == set(items)
-
- def assert_result_contains(item, objtype, name, **kw):
- inst = app.registry.documenters[objtype](directive, name)
- inst.generate(**kw)
- # print '\n'.join(directive.result)
- assert app._warning.getvalue() == ''
- assert item in directive.result
- del directive.result[:]
-
- def assert_order(items, objtype, name, member_order, **kw):
- inst = app.registry.documenters[objtype](directive, name)
- inst.options.member_order = member_order
- inst.generate(**kw)
- assert app._warning.getvalue() == ''
- items = list(reversed(items))
- lineiter = iter(directive.result)
- # for line in directive.result:
- # if line.strip():
- # print repr(line)
- while items:
- item = items.pop()
- for line in lineiter:
- if line == item:
- break
- else: # ran out of items!
- assert False, ('item %r not found in result or not in the '
- ' correct order' % item)
- del directive.result[:]
-
- options.members = []
-
- # no module found?
- assert_warns("import for autodocumenting 'foobar'",
- 'function', 'foobar', more_content=None)
- # importing
- assert_warns("failed to import module 'test_foobar'",
- 'module', 'test_foobar', more_content=None)
- # attributes missing
- assert_warns("failed to import function 'foobar' from module 'util'",
- 'function', 'util.foobar', more_content=None)
- # method missing
- assert_warns("failed to import method 'Class.foobar' from module 'test_autodoc_py35';",
- 'method', 'test_autodoc_py35.Class.foobar', more_content=None)
-
- # test auto and given content mixing
- directive.env.ref_context['py:module'] = 'test_autodoc_py35'
- assert_result_contains(' Function.', 'method', 'Class.meth')
- add_content = ViewList()
- add_content.append('Content.', '', 0)
- assert_result_contains(' Function.', 'method',
- 'Class.meth', more_content=add_content)
- assert_result_contains(' Content.', 'method',
- 'Class.meth', more_content=add_content)
-
- # test check_module
- inst = FunctionDocumenter(directive, 'add_documenter')
- inst.generate(check_module=True)
- assert len(directive.result) == 0
-
- # assert that exceptions can be documented
- assert_works('exception', 'test_autodoc_py35.CustomEx', all_members=True)
- assert_works('exception', 'test_autodoc_py35.CustomEx')
-
- # test diverse inclusion settings for members
- should = [('class', 'test_autodoc_py35.Class')]
- assert_processes(should, 'class', 'Class')
- should.extend([('method', 'test_autodoc_py35.Class.meth')])
- options.members = ['meth']
- options.exclude_members = set(['excludemeth'])
- assert_processes(should, 'class', 'Class')
- should.extend([('attribute', 'test_autodoc_py35.Class.prop'),
- ('attribute', 'test_autodoc_py35.Class.descr'),
- ('attribute', 'test_autodoc_py35.Class.attr'),
- ('attribute', 'test_autodoc_py35.Class.docattr'),
- ('attribute', 'test_autodoc_py35.Class.udocattr'),
- ('attribute', 'test_autodoc_py35.Class.mdocattr'),
- ('attribute', 'test_autodoc_py35.Class.inst_attr_comment'),
- ('attribute', 'test_autodoc_py35.Class.inst_attr_inline'),
- ('attribute', 'test_autodoc_py35.Class.inst_attr_string'),
- ('method', 'test_autodoc_py35.Class.moore'),
- ])
- if six.PY3 and sys.version_info[:2] >= (3, 5):
- should.extend([
- ('method', 'test_autodoc_py35.Class.do_coroutine'),
- ])
- options.members = ALL
- assert_processes(should, 'class', 'Class')
- options.undoc_members = True
- should.extend((('attribute', 'test_autodoc_py35.Class.skipattr'),
- ('method', 'test_autodoc_py35.Class.undocmeth'),
- ('method', 'test_autodoc_py35.Class.roger')))
- assert_processes(should, 'class', 'Class')
- options.inherited_members = True
- should.append(('method', 'test_autodoc_py35.Class.inheritedmeth'))
- assert_processes(should, 'class', 'Class')
-
- # test special members
- options.special_members = ['__special1__']
- should.append(('method', 'test_autodoc_py35.Class.__special1__'))
- assert_processes(should, 'class', 'Class')
- options.special_members = ALL
- should.append(('method', 'test_autodoc_py35.Class.__special2__'))
- assert_processes(should, 'class', 'Class')
- options.special_members = False
-
-
-# --- generate fodder ------------
-__all__ = ['Class']
-
-#: documentation for the integer
-integer = 1
-
-
-class CustomEx(Exception):
- """My custom exception."""
-
- def f(self):
- """Exception method."""
-
-
-class CustomDataDescriptor(object):
- """Descriptor class docstring."""
-
- def __init__(self, doc):
- self.__doc__ = doc
-
- def __get__(self, obj, type=None):
- if obj is None:
- return self
- return 42
-
- def meth(self):
- """Function."""
- return "The Answer"
-
-
-def _funky_classmethod(name, b, c, d, docstring=None):
- """Generates a classmethod for a class from a template by filling out
- some arguments."""
- def template(cls, a, b, c, d=4, e=5, f=6):
- return a, b, c, d, e, f
- from functools import partial
- function = partial(template, b=b, c=c, d=d)
- function.__name__ = name
- function.__doc__ = docstring
- return classmethod(function)
-
-
-class Base(object):
- def inheritedmeth(self):
- """Inherited function."""
-
-
-if six.PY3 and sys.version_info[:2] >= (3, 5):
- async def _other_coro_func():
- return "run"
-
-
-class Class(Base):
- """Class to document."""
-
- descr = CustomDataDescriptor("Descriptor instance docstring.")
-
- def meth(self):
- """Function."""
-
- def undocmeth(self):
- pass
-
- def skipmeth(self):
- """Method that should be skipped."""
-
- def excludemeth(self):
- """Method that should be excluded."""
-
- # should not be documented
- skipattr = 'foo'
-
- #: should be documented -- süß
- attr = 'bar'
-
- @property
- def prop(self):
- """Property."""
-
- docattr = 'baz'
- """should likewise be documented -- süß"""
-
- udocattr = 'quux'
- u"""should be documented as well - süß"""
-
- # initialized to any class imported from another module
- mdocattr = StringIO()
- """should be documented as well - süß"""
-
- roger = _funky_classmethod("roger", 2, 3, 4)
-
- moore = _funky_classmethod("moore", 9, 8, 7,
- docstring="moore(a, e, f) -> happiness")
-
- def __init__(self, arg):
- self.inst_attr_inline = None #: an inline documented instance attr
- #: a documented instance attribute
- self.inst_attr_comment = None
- self.inst_attr_string = None
- """a documented instance attribute"""
-
- def __special1__(self):
- """documented special method"""
-
- def __special2__(self):
- # undocumented special method
- pass
-
- if six.PY3 and sys.version_info[:2] >= (3, 5):
-
- async def do_coroutine(self):
- """A documented coroutine function"""
- attr_coro_result = await _other_coro_func() # NOQA
diff --git a/tests/roots/test-api-set-translator/conf.py b/tests/roots/test-api-set-translator/conf.py
index c1ad24e56..9a7312d65 100644
--- a/tests/roots/test-api-set-translator/conf.py
+++ b/tests/roots/test-api-set-translator/conf.py
@@ -11,7 +11,6 @@ from sphinx.writers.latex import LaTeXTranslator
from sphinx.writers.manpage import ManualPageTranslator
from sphinx.writers.texinfo import TexinfoTranslator
from sphinx.writers.text import TextTranslator
-from sphinx.writers.websupport import WebSupportTranslator
project = 'test'
@@ -54,10 +53,6 @@ class ConfTextTranslator(TextTranslator):
pass
-class ConfWebSupportTranslator(WebSupportTranslator):
- pass
-
-
class ConfXMLTranslator(XMLTranslator):
pass
@@ -76,6 +71,5 @@ def setup(app):
app.set_translator('man', ConfManualPageTranslator)
app.set_translator('texinfo', ConfTexinfoTranslator)
app.set_translator('text', ConfTextTranslator)
- app.set_translator('websupport', ConfWebSupportTranslator)
app.set_translator('xml', ConfXMLTranslator)
app.set_translator('pseudoxml', ConfPseudoXMLTranslator)
diff --git a/tests/roots/test-apidoc-toc/mypackage/main.py b/tests/roots/test-apidoc-toc/mypackage/main.py
index b3fa386fc..813db805e 100755
--- a/tests/roots/test-apidoc-toc/mypackage/main.py
+++ b/tests/roots/test-apidoc-toc/mypackage/main.py
@@ -1,4 +1,5 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
+
import os
import mod_resource
diff --git a/tests/roots/test-autosummary/contents.rst b/tests/roots/test-autosummary/index.rst
index 5ddc4bd40..5ddc4bd40 100644
--- a/tests/roots/test-autosummary/contents.rst
+++ b/tests/roots/test-autosummary/index.rst
diff --git a/tests/roots/test-build-text/conf.py b/tests/roots/test-build-text/conf.py
index 23d0ae840..fd9eefbf6 100644
--- a/tests/roots/test-build-text/conf.py
+++ b/tests/roots/test-build-text/conf.py
@@ -1,3 +1,2 @@
-master_doc = 'contents'
source_suffix = '.txt'
exclude_patterns = ['_build']
diff --git a/tests/roots/test-build-text/contents.txt b/tests/roots/test-build-text/index.txt
index ca9f8dc6c..ca9f8dc6c 100644
--- a/tests/roots/test-build-text/contents.txt
+++ b/tests/roots/test-build-text/index.txt
diff --git a/tests/roots/test-build-text/table.txt b/tests/roots/test-build-text/table.txt
index 84328940f..adc8b371a 100644
--- a/tests/roots/test-build-text/table.txt
+++ b/tests/roots/test-build-text/table.txt
@@ -1,7 +1,7 @@
- +-----+-----+
- | XXX | XXX |
- +-----+-----+
- | | XXX |
- +-----+-----+
- | XXX | |
- +-----+-----+
++-----+-----+
+| XXX | XXX |
++-----+-----+
+| | XXX |
++-----+-----+
+| XXX | |
++-----+-----+
diff --git a/tests/roots/test-build-text/table_colspan.txt b/tests/roots/test-build-text/table_colspan.txt
new file mode 100644
index 000000000..4ae663789
--- /dev/null
+++ b/tests/roots/test-build-text/table_colspan.txt
@@ -0,0 +1,7 @@
++-----+-----+
+| XXX | XXX |
++-----+-----+
+| | XXX |
++-----+ |
+| XXX | |
++-----+-----+
diff --git a/tests/roots/test-build-text/table_colspan_and_rowspan.txt b/tests/roots/test-build-text/table_colspan_and_rowspan.txt
new file mode 100644
index 000000000..82d36070a
--- /dev/null
+++ b/tests/roots/test-build-text/table_colspan_and_rowspan.txt
@@ -0,0 +1,7 @@
++-----------+-----+
+| AAA | BBB |
++-----+-----+ |
+| | XXX | |
+| +-----+-----+
+| DDD | CCC |
++-----+-----------+
diff --git a/tests/roots/test-build-text/table_colspan_left.txt b/tests/roots/test-build-text/table_colspan_left.txt
new file mode 100644
index 000000000..dbfa324c5
--- /dev/null
+++ b/tests/roots/test-build-text/table_colspan_left.txt
@@ -0,0 +1,7 @@
++-----+-----+
+| XXX | XXX |
++-----+-----+
+| | XXX |
+| +-----+
+| XXX | |
++-----+-----+
diff --git a/tests/roots/test-build-text/table_rowspan.txt b/tests/roots/test-build-text/table_rowspan.txt
new file mode 100644
index 000000000..36c30eb79
--- /dev/null
+++ b/tests/roots/test-build-text/table_rowspan.txt
@@ -0,0 +1,7 @@
++-----+-----+
+| XXXXXXXXX |
++-----+-----+
+| | XXX |
++-----+-----+
+| XXX | |
++-----+-----+
diff --git a/tests/roots/test-circular/contents.rst b/tests/roots/test-circular/index.rst
index 294e674dd..294e674dd 100644
--- a/tests/roots/test-circular/contents.rst
+++ b/tests/roots/test-circular/index.rst
diff --git a/tests/roots/test-circular/sub.rst b/tests/roots/test-circular/sub.rst
index 070c39743..cebfd6587 100644
--- a/tests/roots/test-circular/sub.rst
+++ b/tests/roots/test-circular/sub.rst
@@ -1,3 +1,3 @@
.. toctree::
- contents
+ index
diff --git a/tests/roots/test-correct-year/conf.py b/tests/roots/test-correct-year/conf.py
index 4cb2912f5..6aac1743e 100644
--- a/tests/roots/test-correct-year/conf.py
+++ b/tests/roots/test-correct-year/conf.py
@@ -1,2 +1,2 @@
-copyright = u'2006-2009, Author'
+copyright = '2006-2009, Author'
diff --git a/tests/roots/test-correct-year/contents.rst b/tests/roots/test-correct-year/index.rst
index 938dfd503..938dfd503 100644
--- a/tests/roots/test-correct-year/contents.rst
+++ b/tests/roots/test-correct-year/index.rst
diff --git a/tests/roots/test-directive-only/contents.rst b/tests/roots/test-directive-only/index.rst
index 80ec00313..80ec00313 100644
--- a/tests/roots/test-directive-only/contents.rst
+++ b/tests/roots/test-directive-only/index.rst
diff --git a/tests/roots/test-docutilsconf/contents.txt b/tests/roots/test-docutilsconf/index.txt
index b20204e61..b20204e61 100644
--- a/tests/roots/test-docutilsconf/contents.txt
+++ b/tests/roots/test-docutilsconf/index.txt
diff --git a/tests/roots/test-ext-autodoc/contents.rst b/tests/roots/test-ext-autodoc/index.rst
index ce4302204..ce4302204 100644
--- a/tests/roots/test-ext-autodoc/contents.rst
+++ b/tests/roots/test-ext-autodoc/index.rst
diff --git a/tests/roots/test-ext-autodoc/target/__init__.py b/tests/roots/test-ext-autodoc/target/__init__.py
index 9bb50bca9..f30045292 100644
--- a/tests/roots/test-ext-autodoc/target/__init__.py
+++ b/tests/roots/test-ext-autodoc/target/__init__.py
@@ -1,10 +1,9 @@
# -*- coding: utf-8 -*-
import enum
+from io import StringIO
-from six import StringIO, add_metaclass
-
-from sphinx.ext.autodoc import add_documenter # NOQA
+from sphinx.util import save_traceback # NOQA
__all__ = ['Class']
@@ -45,9 +44,9 @@ class CustomDataDescriptorMeta(type):
"""Descriptor metaclass docstring."""
-@add_metaclass(CustomDataDescriptorMeta)
class CustomDataDescriptor2(CustomDataDescriptor):
"""Descriptor class with custom metaclass docstring."""
+ __metaclass__ = CustomDataDescriptorMeta
def _funky_classmethod(name, b, c, d, docstring=None):
@@ -112,7 +111,7 @@ class Class(Base):
"""should likewise be documented -- süß"""
udocattr = 'quux'
- u"""should be documented as well - süß"""
+ """should be documented as well - süß"""
# initialized to any class imported from another module
mdocattr = StringIO()
diff --git a/tests/roots/test-ext-autodoc/target/coroutine.py b/tests/roots/test-ext-autodoc/target/coroutine.py
new file mode 100644
index 000000000..b3223a820
--- /dev/null
+++ b/tests/roots/test-ext-autodoc/target/coroutine.py
@@ -0,0 +1,8 @@
+class AsyncClass:
+ async def do_coroutine(self):
+ """A documented coroutine function"""
+ attr_coro_result = await _other_coro_func() # NOQA
+
+
+async def _other_coro_func():
+ return "run"
diff --git a/tests/roots/test-ext-autodoc/target/enum.py b/tests/roots/test-ext-autodoc/target/enum.py
index 31e7c6ccd..d0a59c71c 100644
--- a/tests/roots/test-ext-autodoc/target/enum.py
+++ b/tests/roots/test-ext-autodoc/target/enum.py
@@ -1,4 +1,3 @@
-from __future__ import absolute_import
import enum
diff --git a/tests/roots/test-root/autodoc_missing_imports.py b/tests/roots/test-ext-autodoc/target/need_mocks.py
index 4f83579a4..4f83579a4 100644
--- a/tests/roots/test-root/autodoc_missing_imports.py
+++ b/tests/roots/test-ext-autodoc/target/need_mocks.py
diff --git a/tests/roots/test-ext-autosummary/contents.rst b/tests/roots/test-ext-autosummary/index.rst
index fc84927bb..fc84927bb 100644
--- a/tests/roots/test-ext-autosummary/contents.rst
+++ b/tests/roots/test-ext-autosummary/index.rst
diff --git a/tests/roots/test-ext-graphviz/index.rst b/tests/roots/test-ext-graphviz/index.rst
index 930ec656d..e67d1d082 100644
--- a/tests/roots/test-ext-graphviz/index.rst
+++ b/tests/roots/test-ext-graphviz/index.rst
@@ -22,7 +22,7 @@ Hello |graph| graphviz world
.. digraph:: bar
:align: right
- :caption: on right
+ :caption: on *right*
foo -> bar
diff --git a/tests/roots/test-ext-viewcode-find/not_a_package/__init__.py b/tests/roots/test-ext-viewcode-find/not_a_package/__init__.py
index 4a1d689e5..8efc933b8 100644
--- a/tests/roots/test-ext-viewcode-find/not_a_package/__init__.py
+++ b/tests/roots/test-ext-viewcode-find/not_a_package/__init__.py
@@ -1,3 +1 @@
-from __future__ import absolute_import
-
from .submodule import func1, Class1 # NOQA
diff --git a/tests/roots/test-ext-viewcode/spam/__init__.py b/tests/roots/test-ext-viewcode/spam/__init__.py
index dffa85b91..2d5ca8239 100644
--- a/tests/roots/test-ext-viewcode/spam/__init__.py
+++ b/tests/roots/test-ext-viewcode/spam/__init__.py
@@ -1,4 +1,2 @@
-from __future__ import absolute_import
-
from .mod1 import func1, Class1 # NOQA
from .mod2 import func2, Class2 # NOQA
diff --git a/tests/roots/test-gettext-template/contents.rst b/tests/roots/test-gettext-template/index.rst
index e69de29bb..e69de29bb 100644
--- a/tests/roots/test-gettext-template/contents.rst
+++ b/tests/roots/test-gettext-template/index.rst
diff --git a/tests/roots/test-inheritance/contents.rst b/tests/roots/test-inheritance/index.rst
index db4fbacb8..db4fbacb8 100644
--- a/tests/roots/test-inheritance/contents.rst
+++ b/tests/roots/test-inheritance/index.rst
diff --git a/tests/roots/test-intl/_templates/index.html b/tests/roots/test-intl/_templates/contents.html
index d730545d1..d730545d1 100644
--- a/tests/roots/test-intl/_templates/index.html
+++ b/tests/roots/test-intl/_templates/contents.html
diff --git a/tests/roots/test-intl/conf.py b/tests/roots/test-intl/conf.py
index aafd9ba79..0306ff38e 100644
--- a/tests/roots/test-intl/conf.py
+++ b/tests/roots/test-intl/conf.py
@@ -4,7 +4,7 @@ project = 'Sphinx intl <Tests>'
source_suffix = '.txt'
keep_warnings = True
templates_path = ['_templates']
-html_additional_pages = {'index': 'index.html'}
+html_additional_pages = {'contents': 'contents.html'}
release = version = '2013.120'
gettext_additional_targets = ['index']
exclude_patterns = ['_build']
diff --git a/tests/roots/test-intl/contents.po b/tests/roots/test-intl/index.po
index 76ef049f0..76ef049f0 100644
--- a/tests/roots/test-intl/contents.po
+++ b/tests/roots/test-intl/index.po
diff --git a/tests/roots/test-intl/contents.txt b/tests/roots/test-intl/index.txt
index b818e99c7..cd63b5ec3 100644
--- a/tests/roots/test-intl/contents.txt
+++ b/tests/roots/test-intl/index.txt
@@ -10,7 +10,7 @@ CONTENTS
:numbered:
:caption: Table of Contents
- subdir/contents
+ subdir/index
bom
warnings
footnote
diff --git a/tests/roots/test-intl/role_xref.po b/tests/roots/test-intl/role_xref.po
index 5b6d114c0..81ee22c6e 100644
--- a/tests/roots/test-intl/role_xref.po
+++ b/tests/roots/test-intl/role_xref.po
@@ -19,8 +19,8 @@ msgstr ""
msgid "i18n role xref"
msgstr "I18N ROCK'N ROLE XREF"
-msgid "link to :term:`Some term`, :ref:`i18n-role-xref`, :doc:`contents`."
-msgstr "LINK TO :ref:`i18n-role-xref`, :doc:`contents`, :term:`SOME NEW TERM`."
+msgid "link to :term:`Some term`, :ref:`i18n-role-xref`, :doc:`index`."
+msgstr "LINK TO :ref:`i18n-role-xref`, :doc:`index`, :term:`SOME NEW TERM`."
msgid "same type links"
msgstr "SAME TYPE LINKS"
@@ -31,8 +31,8 @@ msgstr "LINK TO :term:`SOME OTHER NEW TERM` AND :term:`SOME NEW TERM`."
msgid "link to :ref:`i18n-role-xref` and :ref:`same-type-links`."
msgstr "LINK TO :ref:`same-type-links` AND :ref:`i18n-role-xref`."
-msgid "link to :doc:`contents` and :doc:`glossary_terms`."
-msgstr "LINK TO :doc:`glossary_terms` AND :doc:`contents`."
+msgid "link to :doc:`index` and :doc:`glossary_terms`."
+msgstr "LINK TO :doc:`glossary_terms` AND :doc:`index`."
msgid "link to :option:`-m` and :option:`--module`."
msgstr "LINK TO :option:`--module` AND :option:`-m`."
diff --git a/tests/roots/test-intl/role_xref.txt b/tests/roots/test-intl/role_xref.txt
index b3d42d127..875af4667 100644
--- a/tests/roots/test-intl/role_xref.txt
+++ b/tests/roots/test-intl/role_xref.txt
@@ -5,7 +5,7 @@
i18n role xref
==============
-link to :term:`Some term`, :ref:`i18n-role-xref`, :doc:`contents`.
+link to :term:`Some term`, :ref:`i18n-role-xref`, :doc:`index`.
.. _same-type-links:
@@ -16,7 +16,7 @@ link to :term:`Some term` and :term:`Some other term`.
link to :ref:`i18n-role-xref` and :ref:`same-type-links`.
-link to :doc:`contents` and :doc:`glossary_terms`.
+link to :doc:`index` and :doc:`glossary_terms`.
link to :option:`-m` and :option:`--module`.
diff --git a/tests/roots/test-intl/subdir/contents.txt b/tests/roots/test-intl/subdir/index.txt
index 7578ce387..7578ce387 100644
--- a/tests/roots/test-intl/subdir/contents.txt
+++ b/tests/roots/test-intl/subdir/index.txt
diff --git a/tests/roots/test-metadata/conf.py b/tests/roots/test-metadata/conf.py
new file mode 100644
index 000000000..f81c30bc4
--- /dev/null
+++ b/tests/roots/test-metadata/conf.py
@@ -0,0 +1,3 @@
+# -*- coding: utf-8 -*-
+
+master_doc = 'index'
diff --git a/tests/roots/test-root/metadata.add b/tests/roots/test-metadata/index.rst
index 821816a99..42af665f1 100644
--- a/tests/roots/test-root/metadata.add
+++ b/tests/roots/test-metadata/index.rst
@@ -42,16 +42,5 @@
markup language, containing examples of all basic
constructs and many advanced constructs.
-================================
- reStructuredText Demonstration
-================================
-
-.. Above is the document title, and below is the subtitle.
- They are transformed from section titles after parsing.
-
---------------------------------
- Examples of Syntax Constructs
---------------------------------
-
-.. bibliographic fields (which also require a transform):
-
+test-metadata
+==============
diff --git a/tests/roots/test-numbered-circular/contents.rst b/tests/roots/test-numbered-circular/index.rst
index c3129cd48..c3129cd48 100644
--- a/tests/roots/test-numbered-circular/contents.rst
+++ b/tests/roots/test-numbered-circular/index.rst
diff --git a/tests/roots/test-numbered-circular/sub.rst b/tests/roots/test-numbered-circular/sub.rst
index 070c39743..cebfd6587 100644
--- a/tests/roots/test-numbered-circular/sub.rst
+++ b/tests/roots/test-numbered-circular/sub.rst
@@ -1,3 +1,3 @@
.. toctree::
- contents
+ index
diff --git a/tests/roots/test-root/autodoc.txt b/tests/roots/test-root/autodoc.txt
index 3c83ebf6e..39d9dd5a7 100644
--- a/tests/roots/test-root/autodoc.txt
+++ b/tests/roots/test-root/autodoc.txt
@@ -45,5 +45,3 @@ Just testing a few autodoc possibilities...
:members: ca1, ia1
Specific members (2 total)
-
-.. automodule:: autodoc_missing_imports
diff --git a/tests/roots/test-root/autodoc_target.py b/tests/roots/test-root/autodoc_target.py
index 62ca9f691..9f02a6b21 100644
--- a/tests/roots/test-root/autodoc_target.py
+++ b/tests/roots/test-root/autodoc_target.py
@@ -1,10 +1,7 @@
# -*- coding: utf-8 -*-
import enum
-
-from six import StringIO, add_metaclass
-
-from sphinx.ext.autodoc import add_documenter # NOQA
+from io import StringIO
__all__ = ['Class']
@@ -45,9 +42,9 @@ class CustomDataDescriptorMeta(type):
"""Descriptor metaclass docstring."""
-@add_metaclass(CustomDataDescriptorMeta)
class CustomDataDescriptor2(CustomDataDescriptor):
"""Descriptor class with custom metaclass docstring."""
+ __metaclass__ = CustomDataDescriptorMeta
def _funky_classmethod(name, b, c, d, docstring=None):
@@ -104,7 +101,7 @@ class Class(Base):
"""should likewise be documented -- süß"""
udocattr = 'quux'
- u"""should be documented as well - süß"""
+ """should be documented as well - süß"""
# initialized to any class imported from another module
mdocattr = StringIO()
diff --git a/tests/roots/test-root/conf.py b/tests/roots/test-root/conf.py
index d5029a776..43fe39df1 100644
--- a/tests/roots/test-root/conf.py
+++ b/tests/roots/test-root/conf.py
@@ -18,7 +18,6 @@ jsmath_path = 'dummy.js'
templates_path = ['_templates']
-master_doc = 'contents'
source_suffix = ['.txt', '.add', '.foo']
project = 'Sphinx <Tests>'
@@ -37,48 +36,27 @@ rst_epilog = '.. |subst| replace:: global substitution'
html_sidebars = {'**': ['localtoc.html', 'relations.html', 'sourcelink.html',
'customsb.html', 'searchbox.html'],
- 'contents': ['contentssb.html', 'localtoc.html',
- 'globaltoc.html']}
+ 'index': ['contentssb.html', 'localtoc.html', 'globaltoc.html']}
html_style = 'default.css'
html_last_updated_fmt = '%b %d, %Y'
html_context = {'hckey': 'hcval', 'hckey_co': 'wrong_hcval_co'}
-htmlhelp_basename = 'SphinxTestsdoc'
-
applehelp_bundle_id = 'org.sphinx-doc.Sphinx.help'
applehelp_disable_external_tools = True
latex_documents = [
- ('contents', 'SphinxTests.tex', 'Sphinx Tests Documentation',
+ ('index', 'SphinxTests.tex', 'Sphinx Tests Documentation',
'Georg Brandl \\and someone else', 'manual'),
]
latex_additional_files = ['svgimg.svg']
-texinfo_documents = [
- ('contents', 'SphinxTests', 'Sphinx Tests',
- 'Georg Brandl \\and someone else', 'Sphinx Testing', 'Miscellaneous'),
-]
-
-man_pages = [
- ('contents', 'SphinxTests', 'Sphinx Tests Documentation',
- 'Georg Brandl and someone else', 1),
-]
-
coverage_c_path = ['special/*.h']
coverage_c_regexes = {'function': r'^PyAPI_FUNC\(.*\)\s+([^_][\w_]+)'}
extlinks = {'issue': ('http://bugs.python.org/issue%s', 'issue '),
'pyurl': ('http://python.org/%s', None)}
-autodoc_mock_imports = [
- 'missing_module',
- 'missing_package1',
- 'missing_package2',
- 'missing_package3',
- 'sphinx.missing_module4',
-]
-
# modify tags from conf.py
tags.add('confpytag') # NOQA
diff --git a/tests/roots/test-root/contents.txt b/tests/roots/test-root/index.txt
index d5ff24115..ce0338cf7 100644
--- a/tests/roots/test-root/contents.txt
+++ b/tests/roots/test-root/index.txt
@@ -23,7 +23,6 @@ Contents:
bom
math
autodoc
- metadata
extensions
extensions
footnote
diff --git a/tests/roots/test-setup/doc/contents.txt b/tests/roots/test-setup/doc/index.txt
index 56960f53e..56960f53e 100644
--- a/tests/roots/test-setup/doc/contents.txt
+++ b/tests/roots/test-setup/doc/index.txt
diff --git a/tests/roots/test-templating/contents.txt b/tests/roots/test-templating/index.txt
index 04a40e21c..04a40e21c 100644
--- a/tests/roots/test-templating/contents.txt
+++ b/tests/roots/test-templating/index.txt
diff --git a/tests/test_api_translator.py b/tests/test_api_translator.py
index 4e4230ba3..1ff41bcf6 100644
--- a/tests/test_api_translator.py
+++ b/tests/test_api_translator.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
test_api_translator
~~~~~~~~~~~~~~~~~~~
diff --git a/tests/test_application.py b/tests/test_application.py
index 149f6a958..6a3c84ce2 100644
--- a/tests/test_application.py
+++ b/tests/test_application.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
test_application
~~~~~~~~~~~~~~~~
@@ -42,13 +41,14 @@ def test_events(app, status, warning):
def test_emit_with_nonascii_name_node(app, status, warning):
- node = nodes.section(names=[u'\u65e5\u672c\u8a9e'])
+ node = nodes.section(names=['\u65e5\u672c\u8a9e'])
app.emit('my_event', node)
def test_extensions(app, status, warning):
app.setup_extension('shutil')
- assert strip_escseq(warning.getvalue()).startswith("WARNING: extension 'shutil'")
+ warning = strip_escseq(warning.getvalue())
+ assert "extension 'shutil' has no setup() function" in warning
def test_extension_in_blacklist(app, status, warning):
diff --git a/tests/test_autodoc.py b/tests/test_autodoc.py
index 7e48d92fa..597fa0ea1 100644
--- a/tests/test_autodoc.py
+++ b/tests/test_autodoc.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
test_autodoc
~~~~~~~~~~~~
@@ -17,10 +16,9 @@ from warnings import catch_warnings
import pytest
from docutils.statemachine import ViewList
-from six import PY3
from sphinx.ext.autodoc import (
- AutoDirective, ModuleLevelDocumenter, cut_lines, between, ALL,
+ ModuleLevelDocumenter, cut_lines, between, ALL,
merge_autodoc_default_flags, Options
)
from sphinx.ext.autodoc.directive import DocumenterBridge, process_documenter_options
@@ -30,17 +28,13 @@ from sphinx.util.docutils import LoggingReporter
app = None
-if PY3:
- ROGER_METHOD = ' .. py:classmethod:: Class.roger(a, *, b=2, c=3, d=4, e=5, f=6)'
-else:
- ROGER_METHOD = ' .. py:classmethod:: Class.roger(a, e=5, f=6)'
-
IS_PYPY = platform.python_implementation() == 'PyPy'
def do_autodoc(app, objtype, name, options=None):
if options is None:
options = {}
+ app.env.temp_data.setdefault('docname', 'index') # set dummy docname
doccls = app.registry.documenters[objtype]
docoptions = process_documenter_options(doccls, app.config, options)
bridge = DocumenterBridge(app.env, LoggingReporter(''), docoptions, 1)
@@ -112,7 +106,7 @@ def setup_test():
yield
- AutoDirective._special_attrgetters.clear()
+ app.registry.autodoc_attrgettrs.clear()
processed_docstrings = []
@@ -216,7 +210,7 @@ def test_format_signature():
class D:
pass
- class E(object):
+ class E:
pass
# no signature for classes without __init__
for C in (D, E):
@@ -226,7 +220,7 @@ def test_format_signature():
def __init__(self, a, b=None):
pass
- class G(F, object):
+ class G(F):
pass
for C in (F, G):
assert formatsig('class', 'C', C, None, None) == '(a, b=None)'
@@ -243,7 +237,7 @@ def test_format_signature():
some docstring for __init__.
'''
- class G2(F2, object):
+ class G2(F2):
pass
assert formatsig('class', 'F2', F2, None, None) == \
@@ -291,13 +285,13 @@ def test_format_signature():
@pytest.mark.usefixtures('setup_test')
def test_get_doc():
- def getdocl(objtype, obj, encoding=None):
+ def getdocl(objtype, obj):
inst = app.registry.documenters[objtype](directive, 'tmp')
inst.object = obj
inst.objpath = [obj.__name__]
inst.doc_as_attr = False
inst.format_signature() # handle docstring signatures!
- ds = inst.get_doc(encoding)
+ ds = inst.get_doc()
# for testing purposes, concat them and strip the empty line at the end
res = sum(ds, [])[:-1]
print(res)
@@ -330,12 +324,12 @@ def test_get_doc():
# charset guessing (this module is encoded in utf-8)
def f():
"""Döcstring"""
- assert getdocl('function', f) == [u'Döcstring']
+ assert getdocl('function', f) == ['Döcstring']
# already-unicode docstrings must be taken literally
def f():
- u"""Döcstring"""
- assert getdocl('function', f) == [u'Döcstring']
+ """Döcstring"""
+ assert getdocl('function', f) == ['Döcstring']
# class docstring: depends on config value which one is taken
class C:
@@ -399,7 +393,7 @@ def test_get_doc():
assert getdocl('class', E) == ['Class docstring', '', 'Init docstring']
# class does not have __init__ method
- class F(object):
+ class F:
"""Class docstring"""
# docstring in the __init__ method of base class will be discard
@@ -413,7 +407,7 @@ def test_get_doc():
assert getdocl('class', F) == ['Class docstring']
# class has __init__ method with no docstring
- class G(object):
+ class G:
"""Class docstring"""
def __init__(self):
pass
@@ -566,7 +560,7 @@ def test_attrgetter_using():
getattr_spy.append((obj, name))
return None
return getattr(obj, name, *defargs)
- AutoDirective._special_attrgetters[type] = special_getattr
+ app.add_autodoc_attrgetter(type, special_getattr)
del getattr_spy[:]
inst = app.registry.documenters[objtype](directive, name)
@@ -722,7 +716,7 @@ def test_autodoc_undoc_members(app):
' .. py:method:: Class.meth()',
' .. py:classmethod:: Class.moore(a, e, f) -> happiness',
' .. py:attribute:: Class.prop',
- ROGER_METHOD,
+ ' .. py:classmethod:: Class.roger(a, *, b=2, c=3, d=4, e=5, f=6)',
' .. py:attribute:: Class.skipattr',
' .. py:method:: Class.skipmeth()',
' .. py:attribute:: Class.udocattr',
@@ -752,7 +746,7 @@ def test_autodoc_imported_members(app):
"imported-members": None,
"ignore-module-all": None}
actual = do_autodoc(app, 'module', 'target', options)
- assert '.. py:function:: add_documenter(cls)' in actual
+ assert '.. py:function:: save_traceback(app)' in actual
@pytest.mark.sphinx('html', testroot='ext-autodoc')
@@ -802,7 +796,7 @@ def test_autodoc_special_members(app):
' .. py:method:: Class.meth()',
' .. py:classmethod:: Class.moore(a, e, f) -> happiness',
' .. py:attribute:: Class.prop',
- ROGER_METHOD,
+ ' .. py:classmethod:: Class.roger(a, *, b=2, c=3, d=4, e=5, f=6)',
' .. py:attribute:: Class.skipattr',
' .. py:method:: Class.skipmeth()',
' .. py:attribute:: Class.udocattr',
@@ -875,11 +869,6 @@ def test_autodoc_subclass_of_builtin_class(app):
@pytest.mark.sphinx('html', testroot='ext-autodoc')
def test_autodoc_inner_class(app):
- if PY3:
- builtins = ' alias of :class:`builtins.dict`'
- else:
- builtins = ' alias of :class:`__builtin__.dict`'
-
options = {"members": None}
actual = do_autodoc(app, 'class', 'target.Outer', options)
assert list(actual) == [
@@ -905,7 +894,7 @@ def test_autodoc_inner_class(app):
' .. py:attribute:: Outer.factory',
' :module: target',
' ',
- builtins
+ ' alias of :class:`builtins.dict`'
]
actual = do_autodoc(app, 'class', 'target.Outer.Inner', options)
@@ -974,7 +963,7 @@ def test_autodoc_member_order(app):
' .. py:attribute:: Class.docattr',
' .. py:attribute:: Class.udocattr',
' .. py:attribute:: Class.mdocattr',
- ROGER_METHOD,
+ ' .. py:classmethod:: Class.roger(a, *, b=2, c=3, d=4, e=5, f=6)',
' .. py:classmethod:: Class.moore(a, e, f) -> happiness',
' .. py:attribute:: Class.inst_attr_inline',
' .. py:attribute:: Class.inst_attr_comment',
@@ -993,7 +982,7 @@ def test_autodoc_member_order(app):
' .. py:method:: Class.excludemeth()',
' .. py:method:: Class.meth()',
' .. py:classmethod:: Class.moore(a, e, f) -> happiness',
- ROGER_METHOD,
+ ' .. py:classmethod:: Class.roger(a, *, b=2, c=3, d=4, e=5, f=6)',
' .. py:method:: Class.skipmeth()',
' .. py:method:: Class.undocmeth()',
' .. py:attribute:: Class._private_inst_attr',
@@ -1028,7 +1017,7 @@ def test_autodoc_member_order(app):
' .. py:method:: Class.meth()',
' .. py:classmethod:: Class.moore(a, e, f) -> happiness',
' .. py:attribute:: Class.prop',
- ROGER_METHOD,
+ ' .. py:classmethod:: Class.roger(a, *, b=2, c=3, d=4, e=5, f=6)',
' .. py:attribute:: Class.skipattr',
' .. py:method:: Class.skipmeth()',
' .. py:attribute:: Class.udocattr',
@@ -1038,38 +1027,32 @@ def test_autodoc_member_order(app):
@pytest.mark.sphinx('html', testroot='ext-autodoc')
def test_autodoc_module_scope(app):
- def convert(s):
- return re.sub('<.*>', '<FILTERED>', s) # for py2/py3
-
app.env.temp_data['autodoc:module'] = 'target'
actual = do_autodoc(app, 'attribute', 'Class.mdocattr')
- assert list(map(convert, actual)) == [
- u'',
- u'.. py:attribute:: Class.mdocattr',
- u' :module: target',
- u' :annotation: = <FILTERED>',
- u'',
- u' should be documented as well - süß',
- u' '
+ assert list(actual) == [
+ '',
+ '.. py:attribute:: Class.mdocattr',
+ ' :module: target',
+ ' :annotation: = <_io.StringIO object>',
+ '',
+ ' should be documented as well - süß',
+ ' '
]
@pytest.mark.sphinx('html', testroot='ext-autodoc')
def test_autodoc_class_scope(app):
- def convert(s):
- return re.sub('<.*>', '<FILTERED>', s) # for py2/py3
-
app.env.temp_data['autodoc:module'] = 'target'
app.env.temp_data['autodoc:class'] = 'Class'
actual = do_autodoc(app, 'attribute', 'mdocattr')
- assert list(map(convert, actual)) == [
- u'',
- u'.. py:attribute:: Class.mdocattr',
- u' :module: target',
- u' :annotation: = <FILTERED>',
- u'',
- u' should be documented as well - süß',
- u' '
+ assert list(actual) == [
+ '',
+ '.. py:attribute:: Class.mdocattr',
+ ' :module: target',
+ ' :annotation: = <_io.StringIO object>',
+ '',
+ ' should be documented as well - süß',
+ ' '
]
@@ -1118,43 +1101,43 @@ def test_autodoc_docstring_signature(app):
app.config.autodoc_docstring_signature = False
actual = do_autodoc(app, 'class', 'target.DocstringSig', options)
assert list(actual) == [
- u'',
- u'.. py:class:: DocstringSig',
- u' :module: target',
- u'',
- u' ',
- u' .. py:method:: DocstringSig.meth()',
- u' :module: target',
- u' ',
- u' meth(FOO, BAR=1) -> BAZ',
- u' First line of docstring',
- u' ',
- u' rest of docstring',
- u' ',
- u' ',
- u' ',
- u' .. py:method:: DocstringSig.meth2()',
- u' :module: target',
- u' ',
- u' First line, no signature',
- u' Second line followed by indentation::',
- u' ',
- u' indented line',
- u' ',
- u' ',
- u' .. py:attribute:: DocstringSig.prop1',
- u' :module: target',
- u' ',
- u' DocstringSig.prop1(self)',
- u' First line of docstring',
- u' ',
- u' ',
- u' .. py:attribute:: DocstringSig.prop2',
- u' :module: target',
- u' ',
- u' First line of docstring',
- u' Second line of docstring',
- u' '
+ '',
+ '.. py:class:: DocstringSig',
+ ' :module: target',
+ '',
+ ' ',
+ ' .. py:method:: DocstringSig.meth()',
+ ' :module: target',
+ ' ',
+ ' meth(FOO, BAR=1) -> BAZ',
+ ' First line of docstring',
+ ' ',
+ ' rest of docstring',
+ ' ',
+ ' ',
+ ' ',
+ ' .. py:method:: DocstringSig.meth2()',
+ ' :module: target',
+ ' ',
+ ' First line, no signature',
+ ' Second line followed by indentation::',
+ ' ',
+ ' indented line',
+ ' ',
+ ' ',
+ ' .. py:attribute:: DocstringSig.prop1',
+ ' :module: target',
+ ' ',
+ ' DocstringSig.prop1(self)',
+ ' First line of docstring',
+ ' ',
+ ' ',
+ ' .. py:attribute:: DocstringSig.prop2',
+ ' :module: target',
+ ' ',
+ ' First line of docstring',
+ ' Second line of docstring',
+ ' '
]
@@ -1359,47 +1342,56 @@ def test_autofunction_for_callable(app):
]
-@pytest.mark.sphinx('html', testroot='root')
-def test_mocked_module_imports(app):
+@pytest.mark.sphinx('html', testroot='ext-autodoc')
+def test_mocked_module_imports(app, warning):
+ # no autodoc_mock_imports
options = {"members": 'TestAutodoc,decoratedFunction'}
- actual = do_autodoc(app, 'module', 'autodoc_missing_imports', options)
+ actual = do_autodoc(app, 'module', 'target.need_mocks', options)
+ assert list(actual) == []
+ assert "autodoc: failed to import module 'need_mocks'" in warning.getvalue()
+
+ # with autodoc_mock_imports
+ app.config.autodoc_mock_imports = [
+ 'missing_module',
+ 'missing_package1',
+ 'missing_package2',
+ 'missing_package3',
+ 'sphinx.missing_module4',
+ ]
+
+ warning.truncate(0)
+ actual = do_autodoc(app, 'module', 'target.need_mocks', options)
assert list(actual) == [
'',
- '.. py:module:: autodoc_missing_imports',
+ '.. py:module:: target.need_mocks',
'',
'',
'.. py:class:: TestAutodoc',
- ' :module: autodoc_missing_imports',
+ ' :module: target.need_mocks',
'',
' TestAutodoc docstring.',
' ',
' ',
' .. py:method:: TestAutodoc.decoratedMethod()',
- ' :module: autodoc_missing_imports',
+ ' :module: target.need_mocks',
' ',
' TestAutodoc::decoratedMethod docstring',
' ',
'',
'.. py:function:: decoratedFunction()',
- ' :module: autodoc_missing_imports',
+ ' :module: target.need_mocks',
'',
' decoratedFunction docstring',
' '
]
+ assert warning.getvalue() == ''
@pytest.mark.usefixtures('setup_test')
def test_partialfunction():
- def call_autodoc(objtype, name):
- inst = app.registry.documenters[objtype](directive, name)
- inst.generate()
- result = list(directive.result)
- del directive.result[:]
- return result
-
- options.members = ALL
- #options.undoc_members = True
- expected = [
+ options = {"members": None}
+ actual = do_autodoc(app, 'module', 'target.partialfunction', options)
+ assert list(actual) == [
'',
'.. py:module:: target.partialfunction',
'',
@@ -1423,11 +1415,25 @@ def test_partialfunction():
' '
]
- assert call_autodoc('module', 'target.partialfunction') == expected
+
+@pytest.mark.usefixtures('setup_test')
+def test_coroutine():
+ options = {"members": None}
+ actual = do_autodoc(app, 'class', 'target.coroutine.AsyncClass', options)
+ assert list(actual) == [
+ '',
+ '.. py:class:: AsyncClass',
+ ' :module: target.coroutine',
+ '',
+ ' ',
+ ' .. py:method:: AsyncClass.do_coroutine()',
+ ' :module: target.coroutine',
+ ' ',
+ ' A documented coroutine function',
+ ' '
+ ]
-@pytest.mark.skipif(sys.version_info < (3, 4),
- reason='functools.partialmethod is available on py34 or above')
@pytest.mark.sphinx('html', testroot='ext-autodoc')
def test_partialmethod(app):
expected = [
@@ -1566,6 +1572,29 @@ def test_autodoc_default_options_with_values(app):
assert ' .. py:attribute:: EnumCls.val3' not in actual
assert ' .. py:attribute:: EnumCls.val4' not in actual
+ # with :member-order:
+ app.config.autodoc_default_options = {
+ 'members': None,
+ 'member-order': 'bysource',
+ }
+ actual = do_autodoc(app, 'class', 'target.Class')
+ assert list(filter(lambda l: '::' in l, actual)) == [
+ '.. py:class:: Class(arg)',
+ ' .. py:attribute:: Class.descr',
+ ' .. py:method:: Class.meth()',
+ ' .. py:method:: Class.skipmeth()',
+ ' .. py:method:: Class.excludemeth()',
+ ' .. py:attribute:: Class.attr',
+ ' .. py:attribute:: Class.prop',
+ ' .. py:attribute:: Class.docattr',
+ ' .. py:attribute:: Class.udocattr',
+ ' .. py:attribute:: Class.mdocattr',
+ ' .. py:classmethod:: Class.moore(a, e, f) -> happiness',
+ ' .. py:attribute:: Class.inst_attr_inline',
+ ' .. py:attribute:: Class.inst_attr_comment',
+ ' .. py:attribute:: Class.inst_attr_string',
+ ]
+
# with :special-members:
app.config.autodoc_default_options = {
'special-members': '__init__,__iter__',
diff --git a/tests/test_build.py b/tests/test_build.py
index 47d76b2a2..93e4ac346 100644
--- a/tests/test_build.py
+++ b/tests/test_build.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
test_build
~~~~~~~~~~
@@ -31,13 +30,8 @@ def request_session_head(url, **kwargs):
@pytest.fixture
def nonascii_srcdir(request, rootdir, sphinx_test_tempdir):
# If supported, build in a non-ASCII source dir
- test_name = u'\u65e5\u672c\u8a9e'
+ test_name = '\u65e5\u672c\u8a9e'
basedir = sphinx_test_tempdir / request.node.originalname
- # Windows with versions prior to 3.2 (I think) doesn't support unicode on system path
- # so we force a non-unicode path in that case
- if (sys.platform == "win32" and
- not (sys.version_info.major >= 3 and sys.version_info.minor >= 2)):
- return basedir / 'all'
try:
srcdir = basedir / test_name
if not srcdir.exists():
@@ -51,8 +45,8 @@ def nonascii_srcdir(request, rootdir, sphinx_test_tempdir):
=======================
"""))
- master_doc = srcdir / 'contents.txt'
- master_doc.write_text(master_doc.text() + dedent(u"""
+ master_doc = srcdir / 'index.txt'
+ master_doc.write_text(master_doc.text() + dedent("""
.. toctree::
%(test_name)s/%(test_name)s
@@ -93,10 +87,10 @@ def test_circular_toctree(app, status, warning):
warnings = warning.getvalue()
assert (
'circular toctree references detected, ignoring: '
- 'sub <- contents <- sub') in warnings
+ 'sub <- index <- sub') in warnings
assert (
'circular toctree references detected, ignoring: '
- 'contents <- sub <- contents') in warnings
+ 'index <- sub <- index') in warnings
@pytest.mark.sphinx(buildername='text', testroot='numbered-circular')
@@ -105,10 +99,10 @@ def test_numbered_circular_toctree(app, status, warning):
warnings = warning.getvalue()
assert (
'circular toctree references detected, ignoring: '
- 'sub <- contents <- sub') in warnings
+ 'sub <- index <- sub') in warnings
assert (
'circular toctree references detected, ignoring: '
- 'contents <- sub <- contents') in warnings
+ 'index <- sub <- index') in warnings
@pytest.mark.sphinx(buildername='dummy', testroot='images')
diff --git a/tests/test_build_applehelp.py b/tests/test_build_applehelp.py
index ed0022ce1..f66bd5eb8 100644
--- a/tests/test_build_applehelp.py
+++ b/tests/test_build_applehelp.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
test_build_applehelp
~~~~~~~~~~~~~~~~~~~~
@@ -17,12 +16,6 @@ import pytest
from sphinx.testing.path import path
-# Use plistlib.load in 3.4 and above
-try:
- read_plist = plistlib.load
-except AttributeError:
- read_plist = plistlib.readPlist
-
def check_structure(outdir):
contentsdir = outdir / 'Contents'
@@ -30,7 +23,7 @@ def check_structure(outdir):
assert (contentsdir / 'Info.plist').isfile()
with open(contentsdir / 'Info.plist', 'rb') as f:
- plist = read_plist(f)
+ plist = plistlib.load(f)
assert plist
assert len(plist)
assert plist.get('CFBundleIdentifier', None) == 'org.sphinx-doc.Sphinx.help'
diff --git a/tests/test_build_epub.py b/tests/test_build_epub.py
index f9872f28c..66ff62768 100644
--- a/tests/test_build_epub.py
+++ b/tests/test_build_epub.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
test_build_html
~~~~~~~~~~~~~~~
@@ -28,7 +27,7 @@ def runnable(command):
return p.returncode == 0
-class EPUBElementTree(object):
+class EPUBElementTree:
"""Test helper for content.opf and toc.ncx"""
namespaces = {
'idpf': 'http://www.idpf.org/2007/opf',
@@ -189,7 +188,7 @@ def test_nested_toc(app):
navpoints = toc.findall("./ncx:navMap/ncx:navPoint")
assert len(navpoints) == 4
assert navinfo(navpoints[0]) == ('navPoint1', '1', 'index.xhtml',
- u"Welcome to Sphinx Tests’s documentation!")
+ "Welcome to Sphinx Tests’s documentation!")
assert navpoints[0].findall("./ncx:navPoint") == []
# toc.ncx / nested navPoints
@@ -210,7 +209,7 @@ def test_nested_toc(app):
toc = nav.findall("./xhtml:body/xhtml:nav/xhtml:ol/xhtml:li")
assert len(toc) == 4
assert navinfo(toc[0]) == ('index.xhtml',
- u"Welcome to Sphinx Tests’s documentation!")
+ "Welcome to Sphinx Tests’s documentation!")
assert toc[0].findall("./xhtml:ol") == []
# nav.xhtml / nested toc
@@ -245,7 +244,7 @@ def test_escaped_toc(app):
navpoints = toc.findall("./ncx:navMap/ncx:navPoint")
assert len(navpoints) == 4
assert navinfo(navpoints[0]) == ('navPoint1', '1', 'index.xhtml',
- u"Welcome to Sphinx Tests's documentation!")
+ "Welcome to Sphinx Tests's documentation!")
assert navpoints[0].findall("./ncx:navPoint") == []
# toc.ncx / nested navPoints
@@ -254,7 +253,7 @@ def test_escaped_toc(app):
assert len(navchildren) == 4
assert navinfo(navchildren[0]) == ('navPoint3', '2', 'foo.xhtml', '<foo>')
assert navinfo(navchildren[1]) == ('navPoint4', '3', 'quux.xhtml', 'quux')
- assert navinfo(navchildren[2]) == ('navPoint5', '4', 'foo.xhtml#foo-1', u'foo “1”')
+ assert navinfo(navchildren[2]) == ('navPoint5', '4', 'foo.xhtml#foo-1', 'foo “1”')
assert navinfo(navchildren[3]) == ('navPoint8', '6', 'foo.xhtml#foo-2', 'foo.2')
# nav.xhtml / nav
@@ -274,7 +273,7 @@ def test_escaped_toc(app):
tocchildren = toc[1].findall("./xhtml:ol/xhtml:li")
assert len(tocchildren) == 3
assert navinfo(tocchildren[0]) == ('quux.xhtml', 'quux')
- assert navinfo(tocchildren[1]) == ('foo.xhtml#foo-1', u'foo “1”')
+ assert navinfo(tocchildren[1]) == ('foo.xhtml#foo-1', 'foo “1”')
assert navinfo(tocchildren[2]) == ('foo.xhtml#foo-2', 'foo.2')
grandchild = tocchildren[1].findall("./xhtml:ol/xhtml:li")
diff --git a/tests/test_build_gettext.py b/tests/test_build_gettext.py
index ec09eaf21..5f6e29fd6 100644
--- a/tests/test_build_gettext.py
+++ b/tests/test_build_gettext.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
test_build_gettext
~~~~~~~~~~~~~~~~~~
@@ -8,7 +7,6 @@
:copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
-from __future__ import print_function
import gettext
import os
@@ -72,7 +70,7 @@ def test_msgfmt(app):
assert mo.isfile(), 'msgfmt failed'
_ = gettext.translation('test_root', app.outdir, languages=['en']).gettext
- assert _("Testing various markup") == u"Testing various markup"
+ assert _("Testing various markup") == "Testing various markup"
@pytest.mark.sphinx(
diff --git a/tests/test_build_html.py b/tests/test_build_html.py
index aae53615b..05a4f98ae 100644
--- a/tests/test_build_html.py
+++ b/tests/test_build_html.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
test_build_html
~~~~~~~~~~~~~~~
@@ -16,10 +15,9 @@ from itertools import cycle, chain
import pytest
from html5lib import getTreeBuilder, HTMLParser
-from six import PY3
from sphinx.errors import ConfigError
-from sphinx.testing.util import remove_unicode_literals, strip_escseq
+from sphinx.testing.util import strip_escseq
from sphinx.util.inventory import InventoryFile
@@ -30,10 +28,10 @@ ENV_WARNINGS = """\
%(root)s/autodoc_fodder.py:docstring of autodoc_fodder.MarkupError:\\d+: \
WARNING: Explicit markup ends without a blank line; unexpected unindent.
%(root)s/index.rst:\\d+: WARNING: Encoding 'utf-8-sig' used for reading included \
-file u'%(root)s/wrongenc.inc' seems to be wrong, try giving an :encoding: option
+file '%(root)s/wrongenc.inc' seems to be wrong, try giving an :encoding: option
%(root)s/index.rst:\\d+: WARNING: image file not readable: foo.png
%(root)s/index.rst:\\d+: WARNING: download file not readable: %(root)s/nonexisting.png
-%(root)s/index.rst:\\d+: WARNING: invalid single index entry u''
+%(root)s/index.rst:\\d+: WARNING: invalid single index entry ''
%(root)s/undecodable.rst:\\d+: WARNING: undecodable source characters, replacing \
with "\\?": b?'here: >>>(\\\\|/)xbb<<<((\\\\|/)r)?'
"""
@@ -45,10 +43,6 @@ HTML_WARNINGS = ENV_WARNINGS + """\
%(root)s/index.rst:\\d+: WARNING: Could not lex literal_block as "c". Highlighting skipped.
"""
-if PY3:
- ENV_WARNINGS = remove_unicode_literals(ENV_WARNINGS)
- HTML_WARNINGS = remove_unicode_literals(HTML_WARNINGS)
-
etree_cache = {}
@@ -158,11 +152,11 @@ def test_html_warnings(app, warning):
(".//pre/span", 'line 2'),
],
'includes.html': [
- (".//pre", u'Max Strauß'),
+ (".//pre", 'Max Strauß'),
(".//a[@class='reference download internal']", ''),
- (".//pre/span", u'"quotes"'),
- (".//pre/span", u"'included'"),
- (".//pre/span[@class='s2']", u'üöä'),
+ (".//pre/span", '"quotes"'),
+ (".//pre/span", "'included'"),
+ (".//pre/span[@class='s2']", 'üöä'),
(".//div[@class='inc-pyobj1 highlight-text notranslate']//pre",
r'^class Foo:\n pass\n\s*$'),
(".//div[@class='inc-pyobj2 highlight-text notranslate']//pre",
@@ -170,7 +164,7 @@ def test_html_warnings(app, warning):
(".//div[@class='inc-lines highlight-text notranslate']//pre",
r'^class Foo:\n pass\nclass Bar:\n$'),
(".//div[@class='inc-startend highlight-text notranslate']//pre",
- u'^foo = "Including Unicode characters: üöä"\\n$'),
+ '^foo = "Including Unicode characters: üöä"\\n$'),
(".//div[@class='inc-preappend highlight-text notranslate']//pre",
r'(?m)^START CODE$'),
(".//div[@class='inc-pyobj-dedent highlight-python notranslate']//span",
@@ -210,7 +204,7 @@ def test_html_warnings(app, warning):
(".//li/strong", r'^program\\n$'),
(".//li/em", r'^dfn\\n$'),
(".//li/kbd", r'^kbd\\n$'),
- (".//li/span", u'File \N{TRIANGULAR BULLET} Close'),
+ (".//li/span", 'File \N{TRIANGULAR BULLET} Close'),
(".//li/code/span[@class='pre']", '^a/$'),
(".//li/code/em/span[@class='pre']", '^varpart$'),
(".//li/code/em/span[@class='pre']", '^i$'),
@@ -248,7 +242,7 @@ def test_html_warnings(app, warning):
# footnote reference
(".//a[@class='footnote-reference']", r'\[1\]'),
# created by reference lookup
- (".//a[@href='contents.html#ref1']", ''),
+ (".//a[@href='index.html#ref1']", ''),
# ``seealso`` directive
(".//div/p[@class='first admonition-title']", 'See also'),
# a ``hlist`` directive
@@ -271,12 +265,12 @@ def test_html_warnings(app, warning):
# tests for numeric labels
(".//a[@href='#id1'][@class='reference internal']/span", 'Testing various markup'),
# tests for smartypants
- (".//li", u'Smart “quotes” in English ‘text’.'),
- (".//li", u'Smart — long and – short dashes.'),
- (".//li", u'Ellipsis…'),
+ (".//li", 'Smart “quotes” in English ‘text’.'),
+ (".//li", 'Smart — long and – short dashes.'),
+ (".//li", 'Ellipsis…'),
(".//li//code//span[@class='pre']", 'foo--"bar"...'),
- (".//p", u'Этот «абзац» должен использовать „русские“ кавычки.'),
- (".//p", u'Il dit : « C’est “super” ! »'),
+ (".//p", 'Этот «абзац» должен использовать „русские“ кавычки.'),
+ (".//p", 'Il dit : « C’est “super” ! »'),
],
'objects.html': [
(".//dt[@id='mod.Cls.meth1']", ''),
@@ -347,7 +341,7 @@ def test_html_warnings(app, warning):
(".//a[@class='reference internal'][@href='#cmdoption-git-commit-p']/code/span",
'-p'),
],
- 'contents.html': [
+ 'index.html': [
(".//meta[@name='hc'][@content='hcval']", ''),
(".//meta[@name='hc_co'][@content='hcval_co']", ''),
(".//td[@class='label']", r'\[Ref1\]'),
diff --git a/tests/test_build_html5.py b/tests/test_build_html5.py
index e4c51eaea..c2a7bad1c 100644
--- a/tests/test_build_html5.py
+++ b/tests/test_build_html5.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
test_build_html5
~~~~~~~~~~~~~~~~
@@ -67,11 +66,11 @@ def cached_etree_parse():
(".//pre/span", 'line 2'),
],
'includes.html': [
- (".//pre", u'Max Strauß'),
+ (".//pre", 'Max Strauß'),
(".//a[@class='reference download internal']", ''),
- (".//pre/span", u'"quotes"'),
- (".//pre/span", u"'included'"),
- (".//pre/span[@class='s2']", u'üöä'),
+ (".//pre/span", '"quotes"'),
+ (".//pre/span", "'included'"),
+ (".//pre/span[@class='s2']", 'üöä'),
(".//div[@class='inc-pyobj1 highlight-text notranslate']//pre",
r'^class Foo:\n pass\n\s*$'),
(".//div[@class='inc-pyobj2 highlight-text notranslate']//pre",
@@ -79,7 +78,7 @@ def cached_etree_parse():
(".//div[@class='inc-lines highlight-text notranslate']//pre",
r'^class Foo:\n pass\nclass Bar:\n$'),
(".//div[@class='inc-startend highlight-text notranslate']//pre",
- u'^foo = "Including Unicode characters: üöä"\\n$'),
+ '^foo = "Including Unicode characters: üöä"\\n$'),
(".//div[@class='inc-preappend highlight-text notranslate']//pre",
r'(?m)^START CODE$'),
(".//div[@class='inc-pyobj-dedent highlight-python notranslate']//span",
@@ -119,7 +118,7 @@ def cached_etree_parse():
(".//li/p/strong", r'^program\\n$'),
(".//li/p/em", r'^dfn\\n$'),
(".//li/p/kbd", r'^kbd\\n$'),
- (".//li/p/span", u'File \N{TRIANGULAR BULLET} Close'),
+ (".//li/p/span", 'File \N{TRIANGULAR BULLET} Close'),
(".//li/p/code/span[@class='pre']", '^a/$'),
(".//li/p/code/em/span[@class='pre']", '^varpart$'),
(".//li/p/code/em/span[@class='pre']", '^i$'),
@@ -157,7 +156,7 @@ def cached_etree_parse():
# footnote reference
(".//a[@class='footnote-reference brackets']", r'1'),
# created by reference lookup
- (".//a[@href='contents.html#ref1']", ''),
+ (".//a[@href='index.html#ref1']", ''),
# ``seealso`` directive
(".//div/p[@class='admonition-title']", 'See also'),
# a ``hlist`` directive
@@ -249,7 +248,7 @@ def cached_etree_parse():
(".//a[@class='reference internal'][@href='#cmdoption-git-commit-p']/code/span",
'-p'),
],
- 'contents.html': [
+ 'index.html': [
(".//meta[@name='hc'][@content='hcval']", ''),
(".//meta[@name='hc_co'][@content='hcval_co']", ''),
(".//dt[@class='label']/span[@class='brackets']", r'Ref1'),
@@ -354,7 +353,7 @@ def test_html_download(app):
confoverrides={'html_experimental_html5_writer': True})
def test_html_download_role(app, status, warning):
app.build()
- digest = md5((app.srcdir / 'dummy.dat').encode('utf-8')).hexdigest()
+ digest = md5((app.srcdir / 'dummy.dat').encode()).hexdigest()
assert (app.outdir / '_downloads' / digest / 'dummy.dat').exists()
content = (app.outdir / 'index.html').text()
diff --git a/tests/test_build_htmlhelp.py b/tests/test_build_htmlhelp.py
index 99de2a65f..b56d42ee3 100644
--- a/tests/test_build_htmlhelp.py
+++ b/tests/test_build_htmlhelp.py
@@ -1,8 +1,9 @@
-# -*- coding: utf-8 -*-
"""
test_build_htmlhelp
~~~~~~~~~~~~~~~~~~~
+
Test the HTML Help builder and check output against XPath.
+
:copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
@@ -10,10 +11,29 @@
import re
import pytest
-from six import PY2
from sphinx.builders.htmlhelp import chm_htmlescape
+from sphinx.builders.htmlhelp import default_htmlhelp_basename
+from sphinx.config import Config
+
+
+@pytest.mark.sphinx('htmlhelp', testroot='basic')
+def test_default_htmlhelp_file_suffix(app, warning):
+ assert app.builder.out_suffix == '.html'
+
+
+@pytest.mark.sphinx('htmlhelp', testroot='basic',
+ confoverrides={'htmlhelp_file_suffix': '.htm'})
+def test_htmlhelp_file_suffix(app, warning):
+ assert app.builder.out_suffix == '.htm'
+
+
+def test_default_htmlhelp_basename():
+ config = Config({'project': 'Sphinx Documentation'})
+ config.init_values()
+ assert default_htmlhelp_basename(config) == 'sphinxdoc'
+
@pytest.mark.sphinx('htmlhelp', testroot='build-htmlhelp')
def test_chm(app):
@@ -34,21 +54,10 @@ def test_chm_htmlescape():
assert chm_htmlescape(u'Unicode 文字') == u'Unicode 文字'
assert chm_htmlescape('&#x45') == '&amp;#x45'
- if PY2:
- assert chm_htmlescape('<Hello> "world"') == '&lt;Hello&gt; "world"'
- assert chm_htmlescape('<Hello> "world"', True) == '&lt;Hello&gt; &quot;world&quot;'
- assert chm_htmlescape('<Hello> "world"', False) == '&lt;Hello&gt; "world"'
- else:
- assert chm_htmlescape('<Hello> "world"') == '&lt;Hello&gt; &quot;world&quot;'
- assert chm_htmlescape('<Hello> "world"', True) == '&lt;Hello&gt; &quot;world&quot;'
- assert chm_htmlescape('<Hello> "world"', False) == '&lt;Hello&gt; "world"'
-
- if PY2:
- # single quotes are not escaped on py2 (following the behavior of cgi.escape())
- assert chm_htmlescape("Hello 'world'") == "Hello 'world'"
- assert chm_htmlescape("Hello 'world'", True) == "Hello 'world'"
- assert chm_htmlescape("Hello 'world'", False) == "Hello 'world'"
- else:
- assert chm_htmlescape("Hello 'world'") == "Hello &#39;world&#39;"
- assert chm_htmlescape("Hello 'world'", True) == "Hello &#39;world&#39;"
- assert chm_htmlescape("Hello 'world'", False) == "Hello 'world'"
+ assert chm_htmlescape('<Hello> "world"') == '&lt;Hello&gt; &quot;world&quot;'
+ assert chm_htmlescape('<Hello> "world"', True) == '&lt;Hello&gt; &quot;world&quot;'
+ assert chm_htmlescape('<Hello> "world"', False) == '&lt;Hello&gt; "world"'
+
+ assert chm_htmlescape("Hello 'world'") == "Hello &#39;world&#39;"
+ assert chm_htmlescape("Hello 'world'", True) == "Hello &#39;world&#39;"
+ assert chm_htmlescape("Hello 'world'", False) == "Hello 'world'"
diff --git a/tests/test_build_latex.py b/tests/test_build_latex.py
index 53701afcf..8320fd255 100644
--- a/tests/test_build_latex.py
+++ b/tests/test_build_latex.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
test_build_latex
~~~~~~~~~~~~~~~~
@@ -8,7 +7,6 @@
:copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
-from __future__ import print_function
import os
import re
@@ -17,11 +15,10 @@ from shutil import copyfile
from subprocess import Popen, PIPE
import pytest
-from six import PY3
from test_build_html import ENV_WARNINGS
from sphinx.errors import SphinxError
-from sphinx.testing.util import remove_unicode_literals, strip_escseq
+from sphinx.testing.util import strip_escseq
from sphinx.util import docutils
from sphinx.util.osutil import cd, ensuredir
from sphinx.writers.latex import LaTeXTranslator
@@ -40,9 +37,6 @@ LATEX_WARNINGS = ENV_WARNINGS + """\
%(root)s/index.rst:\\d+: WARNING: Could not lex literal_block as "c". Highlighting skipped.
"""
-if PY3:
- LATEX_WARNINGS = remove_unicode_literals(LATEX_WARNINGS)
-
# only run latex if all needed packages are there
def kpsetest(*filenames):
@@ -113,6 +107,7 @@ def skip_if_stylefiles_notfound(testfunc):
def test_build_latex_doc(app, status, warning, engine, docclass):
app.config.latex_engine = engine
app.config.latex_documents[0] = app.config.latex_documents[0][:4] + (docclass,)
+ app.builder.init_context()
LaTeXTranslator.ignore_missing_images = True
app.builder.build_all()
@@ -150,6 +145,8 @@ def test_writer(app, status, warning):
'\\label{\\detokenize{markup:id11}}'
'\\end{wrapfigure}' in result)
+ assert 'Footnotes' not in result
+
@pytest.mark.sphinx('latex', testroot='warnings', freshenv=True)
def test_latex_warnings(app, status, warning):
@@ -324,25 +321,25 @@ def test_numref_with_language_ja(app, status, warning):
print(result)
print(status.getvalue())
print(warning.getvalue())
- assert u'\\renewcommand{\\figurename}{\u56f3}' in result # 図
- assert u'\\renewcommand{\\tablename}{\u8868}' in result # 表
- assert u'\\renewcommand{\\literalblockname}{\u30ea\u30b9\u30c8}' in result # リスト
- assert (u'\\hyperref[\\detokenize{index:fig1}]'
- u'{\u56f3 \\ref{\\detokenize{index:fig1}}}') in result
+ assert '\\renewcommand{\\figurename}{\u56f3}' in result # 図
+ assert '\\renewcommand{\\tablename}{\u8868}' in result # 表
+ assert '\\renewcommand{\\literalblockname}{\u30ea\u30b9\u30c8}' in result # リスト
+ assert ('\\hyperref[\\detokenize{index:fig1}]'
+ '{\u56f3 \\ref{\\detokenize{index:fig1}}}') in result
assert ('\\hyperref[\\detokenize{baz:fig22}]'
'{Figure\\ref{\\detokenize{baz:fig22}}}') in result
- assert (u'\\hyperref[\\detokenize{index:table-1}]'
- u'{\u8868 \\ref{\\detokenize{index:table-1}}}') in result
+ assert ('\\hyperref[\\detokenize{index:table-1}]'
+ '{\u8868 \\ref{\\detokenize{index:table-1}}}') in result
assert ('\\hyperref[\\detokenize{baz:table22}]'
'{Table:\\ref{\\detokenize{baz:table22}}}') in result
- assert (u'\\hyperref[\\detokenize{index:code-1}]'
- u'{\u30ea\u30b9\u30c8 \\ref{\\detokenize{index:code-1}}}') in result
+ assert ('\\hyperref[\\detokenize{index:code-1}]'
+ '{\u30ea\u30b9\u30c8 \\ref{\\detokenize{index:code-1}}}') in result
assert ('\\hyperref[\\detokenize{baz:code22}]'
'{Code-\\ref{\\detokenize{baz:code22}}}') in result
- assert (u'\\hyperref[\\detokenize{foo:foo}]'
- u'{\\ref{\\detokenize{foo:foo}} \u7ae0}') in result
- assert (u'\\hyperref[\\detokenize{bar:bar-a}]'
- u'{\\ref{\\detokenize{bar:bar-a}} \u7ae0}') in result
+ assert ('\\hyperref[\\detokenize{foo:foo}]'
+ '{\\ref{\\detokenize{foo:foo}} \u7ae0}') in result
+ assert ('\\hyperref[\\detokenize{bar:bar-a}]'
+ '{\\ref{\\detokenize{bar:bar-a}} \u7ae0}') in result
assert ('\\hyperref[\\detokenize{index:fig1}]{Fig.\\ref{\\detokenize{index:fig1}} '
'\\nameref{\\detokenize{index:fig1}}}') in result
assert ('\\hyperref[\\detokenize{foo:foo}]{Sect.\\ref{\\detokenize{foo:foo}} '
@@ -424,9 +421,13 @@ def test_babel_with_no_language_settings(app, status, warning):
in result)
assert '\\addto\\captionsenglish{\\renewcommand{\\figurename}{Fig.}}\n' in result
assert '\\addto\\captionsenglish{\\renewcommand{\\tablename}{Table.}}\n' in result
- assert '\\addto\\extrasenglish{\\def\\pageautorefname{page}}\n' in result
assert '\\shorthandoff' not in result
+ # sphinxmessages.sty
+ result = (app.outdir / 'sphinxmessages.sty').text(encoding='utf8')
+ print(result)
+ assert r'\def\pageautorefname{page}' in result
+
@pytest.mark.sphinx(
'latex', testroot='latex-babel',
@@ -445,9 +446,13 @@ def test_babel_with_language_de(app, status, warning):
in result)
assert '\\addto\\captionsngerman{\\renewcommand{\\figurename}{Fig.}}\n' in result
assert '\\addto\\captionsngerman{\\renewcommand{\\tablename}{Table.}}\n' in result
- assert '\\addto\\extrasngerman{\\def\\pageautorefname{Seite}}\n' in result
assert '\\shorthandoff{"}' in result
+ # sphinxmessages.sty
+ result = (app.outdir / 'sphinxmessages.sty').text(encoding='utf8')
+ print(result)
+ assert r'\def\pageautorefname{Seite}' in result
+
@pytest.mark.sphinx(
'latex', testroot='latex-babel',
@@ -466,10 +471,13 @@ def test_babel_with_language_ru(app, status, warning):
in result)
assert '\\addto\\captionsrussian{\\renewcommand{\\figurename}{Fig.}}\n' in result
assert '\\addto\\captionsrussian{\\renewcommand{\\tablename}{Table.}}\n' in result
- assert (u'\\addto\\extrasrussian{\\def\\pageautorefname'
- u'{\u0441\u0442\u0440\u0430\u043d\u0438\u0446\u0430}}\n' in result)
assert '\\shorthandoff{"}' in result
+ # sphinxmessages.sty
+ result = (app.outdir / 'sphinxmessages.sty').text(encoding='utf8')
+ print(result)
+ assert r'\def\pageautorefname{страница}' in result
+
@pytest.mark.sphinx(
'latex', testroot='latex-babel',
@@ -488,9 +496,13 @@ def test_babel_with_language_tr(app, status, warning):
in result)
assert '\\addto\\captionsturkish{\\renewcommand{\\figurename}{Fig.}}\n' in result
assert '\\addto\\captionsturkish{\\renewcommand{\\tablename}{Table.}}\n' in result
- assert '\\addto\\extrasturkish{\\def\\pageautorefname{sayfa}}\n' in result
assert '\\shorthandoff{=}' in result
+ # sphinxmessages.sty
+ result = (app.outdir / 'sphinxmessages.sty').text(encoding='utf8')
+ print(result)
+ assert r'\def\pageautorefname{sayfa}' in result
+
@pytest.mark.sphinx(
'latex', testroot='latex-babel',
@@ -508,9 +520,13 @@ def test_babel_with_language_ja(app, status, warning):
assert '\\renewcommand{\\contentsname}{Table of content}\n' in result
assert '\\renewcommand{\\figurename}{Fig.}\n' in result
assert '\\renewcommand{\\tablename}{Table.}\n' in result
- assert u'\\def\\pageautorefname{ページ}\n' in result
assert '\\shorthandoff' not in result
+ # sphinxmessages.sty
+ result = (app.outdir / 'sphinxmessages.sty').text(encoding='utf8')
+ print(result)
+ assert r'\def\pageautorefname{ページ}' in result
+
@pytest.mark.sphinx(
'latex', testroot='latex-babel',
@@ -529,11 +545,15 @@ def test_babel_with_unknown_language(app, status, warning):
in result)
assert '\\addto\\captionsenglish{\\renewcommand{\\figurename}{Fig.}}\n' in result
assert '\\addto\\captionsenglish{\\renewcommand{\\tablename}{Table.}}\n' in result
- assert '\\addto\\extrasenglish{\\def\\pageautorefname{page}}\n' in result
assert '\\shorthandoff' in result
assert "WARNING: no Babel option known for language 'unknown'" in warning.getvalue()
+ # sphinxmessages.sty
+ result = (app.outdir / 'sphinxmessages.sty').text(encoding='utf8')
+ print(result)
+ assert r'\def\pageautorefname{page}' in result
+
@pytest.mark.sphinx(
'latex', testroot='latex-babel',
@@ -553,9 +573,13 @@ def test_polyglossia_with_language_de(app, status, warning):
in result)
assert '\\addto\\captionsgerman{\\renewcommand{\\figurename}{Fig.}}\n' in result
assert '\\addto\\captionsgerman{\\renewcommand{\\tablename}{Table.}}\n' in result
- assert '\\def\\pageautorefname{Seite}\n' in result
assert '\\shorthandoff' not in result
+ # sphinxmessages.sty
+ result = (app.outdir / 'sphinxmessages.sty').text(encoding='utf8')
+ print(result)
+ assert r'\def\pageautorefname{Seite}' in result
+
@pytest.mark.sphinx(
'latex', testroot='latex-babel',
@@ -575,9 +599,13 @@ def test_polyglossia_with_language_de_1901(app, status, warning):
in result)
assert '\\addto\\captionsgerman{\\renewcommand{\\figurename}{Fig.}}\n' in result
assert '\\addto\\captionsgerman{\\renewcommand{\\tablename}{Table.}}\n' in result
- assert '\\def\\pageautorefname{page}\n' in result
assert '\\shorthandoff' not in result
+ # sphinxmessages.sty
+ result = (app.outdir / 'sphinxmessages.sty').text(encoding='utf8')
+ print(result)
+ assert r'\def\pageautorefname{page}' in result
+
@pytest.mark.sphinx('latex')
def test_footnote(app, status, warning):
@@ -1274,7 +1302,7 @@ def test_latex_glossary(app, status, warning):
app.builder.build_all()
result = (app.outdir / 'test.tex').text(encoding='utf8')
- assert (u'\\item[{änhlich\\index{änhlich@\\spxentry{änhlich}|spxpagem}'
+ assert ('\\item[{änhlich\\index{änhlich@\\spxentry{änhlich}|spxpagem}'
r'\phantomsection'
r'\label{\detokenize{index:term-anhlich}}}] \leavevmode' in result)
assert (r'\item[{boson\index{boson@\spxentry{boson}|spxpagem}\phantomsection'
@@ -1289,7 +1317,7 @@ def test_latex_glossary(app, status, warning):
r'\label{\detokenize{index:term-myon}}}] \leavevmode'
r'\item[{electron\index{electron@\spxentry{electron}|spxpagem}\phantomsection'
r'\label{\detokenize{index:term-electron}}}] \leavevmode' in result)
- assert (u'\\item[{über\\index{über@\\spxentry{über}|spxpagem}\\phantomsection'
+ assert ('\\item[{über\\index{über@\\spxentry{über}|spxpagem}\\phantomsection'
r'\label{\detokenize{index:term-uber}}}] \leavevmode' in result)
diff --git a/tests/test_build_linkcheck.py b/tests/test_build_linkcheck.py
index 1716a0c42..d6eacf467 100644
--- a/tests/test_build_linkcheck.py
+++ b/tests/test_build_linkcheck.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
test_build_linkcheck
~~~~~~~~~~~~~~~~~~~~
@@ -8,7 +7,6 @@
:copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
-from __future__ import print_function
import pytest
diff --git a/tests/test_build_manpage.py b/tests/test_build_manpage.py
index 3448d6eeb..5861422ef 100644
--- a/tests/test_build_manpage.py
+++ b/tests/test_build_manpage.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
test_build_manpage
~~~~~~~~~~~~~~~~~~
@@ -8,20 +7,35 @@
:copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
-from __future__ import print_function
import pytest
+from sphinx.builders.manpage import default_man_pages
+from sphinx.config import Config
+
@pytest.mark.sphinx('man')
def test_all(app, status, warning):
app.builder.build_all()
- assert (app.outdir / 'SphinxTests.1').exists()
+ assert (app.outdir / 'sphinxtests.1').exists()
- content = (app.outdir / 'SphinxTests.1').text()
+ content = (app.outdir / 'sphinxtests.1').text()
assert r'\fBprint \fP\fIi\fP\fB\en\fP' in content
assert r'\fBmanpage\en\fP' in content
# term of definition list including nodes.strong
assert '\n.B term1\n' in content
assert '\nterm2 (\\fBstronged partially\\fP)\n' in content
+
+ assert 'Footnotes' not in content
+
+
+def test_default_man_pages():
+ config = Config({'master_doc': 'index',
+ 'project': 'STASI™ Documentation',
+ 'author': "Wolfgang Schäuble & G'Beckstein",
+ 'release': '1.0'})
+ config.init_values()
+ expected = [('index', 'stasi', 'STASI™ Documentation 1.0',
+ ["Wolfgang Schäuble & G'Beckstein"], 1)]
+ assert default_man_pages(config) == expected
diff --git a/tests/test_build_qthelp.py b/tests/test_build_qthelp.py
index f427ec2d8..dfbfee08e 100644
--- a/tests/test_build_qthelp.py
+++ b/tests/test_build_qthelp.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
test_build_qthelp
~~~~~~~~~~~~~~~~~
diff --git a/tests/test_build_texinfo.py b/tests/test_build_texinfo.py
index 3b7e0c3de..364cdc60f 100644
--- a/tests/test_build_texinfo.py
+++ b/tests/test_build_texinfo.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
test_build_texinfo
~~~~~~~~~~~~~~~~~~
@@ -8,17 +7,17 @@
:copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
-from __future__ import print_function
import os
import re
from subprocess import Popen, PIPE
import pytest
-from six import PY3
from test_build_html import ENV_WARNINGS
-from sphinx.testing.util import remove_unicode_literals, strip_escseq
+from sphinx.builders.texinfo import default_texinfo_documents
+from sphinx.config import Config
+from sphinx.testing.util import strip_escseq
from sphinx.writers.texinfo import TexinfoTranslator
@@ -30,9 +29,6 @@ TEXINFO_WARNINGS = ENV_WARNINGS + """\
\\['application/pdf', 'image/svg\\+xml'\\] \\(svgimg.\\*\\)
"""
-if PY3:
- TEXINFO_WARNINGS = remove_unicode_literals(TEXINFO_WARNINGS)
-
@pytest.mark.sphinx('texinfo', testroot='warnings', freshenv=True)
def test_texinfo_warnings(app, status, warning):
@@ -50,16 +46,17 @@ def test_texinfo_warnings(app, status, warning):
def test_texinfo(app, status, warning):
TexinfoTranslator.ignore_missing_images = True
app.builder.build_all()
- result = (app.outdir / 'SphinxTests.texi').text(encoding='utf8')
- assert ('@anchor{markup doc}@anchor{12}'
- '@anchor{markup id1}@anchor{13}'
- '@anchor{markup testing-various-markup}@anchor{14}' in result)
+ result = (app.outdir / 'sphinxtests.texi').text(encoding='utf8')
+ assert ('@anchor{markup doc}@anchor{11}'
+ '@anchor{markup id1}@anchor{12}'
+ '@anchor{markup testing-various-markup}@anchor{13}' in result)
+ assert 'Footnotes' not in result
# now, try to run makeinfo over it
cwd = os.getcwd()
os.chdir(app.outdir)
try:
try:
- p = Popen(['makeinfo', '--no-split', 'SphinxTests.texi'],
+ p = Popen(['makeinfo', '--no-split', 'sphinxtests.texi'],
stdout=PIPE, stderr=PIPE)
except OSError:
raise pytest.skip.Exception # most likely makeinfo was not found
@@ -93,3 +90,14 @@ def test_texinfo_citation(app, status, warning):
'This is a citation\n') in output
assert ('@anchor{index cite2}@anchor{2}@w{(CITE2)} \n'
'This is a multiline citation\n') in output
+
+
+def test_default_texinfo_documents():
+ config = Config({'master_doc': 'index',
+ 'project': 'STASI™ Documentation',
+ 'author': "Wolfgang Schäuble & G'Beckstein"})
+ config.init_values()
+ expected = [('index', 'stasi', 'STASI™ Documentation',
+ "Wolfgang Schäuble & G'Beckstein", 'stasi',
+ 'One line description of project', 'Miscellaneous')]
+ assert default_texinfo_documents(config) == expected
diff --git a/tests/test_build_text.py b/tests/test_build_text.py
index f89187c85..b0755e689 100644
--- a/tests/test_build_text.py
+++ b/tests/test_build_text.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
test_build_text
~~~~~~~~~~~~~~~
@@ -12,7 +11,7 @@
import pytest
from docutils.utils import column_width
-from sphinx.writers.text import MAXWIDTH
+from sphinx.writers.text import MAXWIDTH, Table, Cell
def with_text_app(*args, **kw):
@@ -50,12 +49,12 @@ def test_lineblock(app, status, warning):
app.builder.build_update()
result = (app.outdir / 'lineblock.txt').text(encoding='utf-8')
expect = (
- u"* one\n"
- u"\n"
- u" line-block 1\n"
- u" line-block 2\n"
- u"\n"
- u"followed paragraph.\n"
+ "* one\n"
+ "\n"
+ " line-block 1\n"
+ " line-block 2\n"
+ "\n"
+ "followed paragraph.\n"
)
assert result == expect
@@ -87,6 +86,41 @@ def test_nonascii_maxwidth(app, status, warning):
assert max(line_widths) < MAXWIDTH
+def test_table_builder():
+ table = Table([6, 6])
+ table.add_cell(Cell("foo"))
+ table.add_cell(Cell("bar"))
+ table_str = str(table).split("\n")
+ assert table_str[0] == "+--------+--------+"
+ assert table_str[1] == "| foo | bar |"
+ assert table_str[2] == "+--------+--------+"
+ assert repr(table).count("<Cell ") == 2
+
+
+def test_table_separator():
+ table = Table([6, 6])
+ table.add_cell(Cell("foo"))
+ table.add_cell(Cell("bar"))
+ table.set_separator()
+ table.add_row()
+ table.add_cell(Cell("FOO"))
+ table.add_cell(Cell("BAR"))
+ table_str = str(table).split("\n")
+ assert table_str[0] == "+--------+--------+"
+ assert table_str[1] == "| foo | bar |"
+ assert table_str[2] == "|========|========|"
+ assert table_str[3] == "| FOO | BAR |"
+ assert table_str[4] == "+--------+--------+"
+ assert repr(table).count("<Cell ") == 4
+
+
+def test_table_cell():
+ cell = Cell("Foo bar baz")
+ cell.wrap(3)
+ assert "Cell" in repr(cell)
+ assert cell.wrapped == ["Foo", "bar", "baz"]
+
+
@with_text_app()
def test_table_with_empty_cell(app, status, warning):
app.builder.build_update()
@@ -102,6 +136,63 @@ def test_table_with_empty_cell(app, status, warning):
@with_text_app()
+def test_table_with_rowspan(app, status, warning):
+ app.builder.build_update()
+ result = (app.outdir / 'table_rowspan.txt').text(encoding='utf-8')
+ lines = [line.strip() for line in result.splitlines() if line.strip()]
+ assert lines[0] == "+-------+-------+"
+ assert lines[1] == "| XXXXXXXXX |"
+ assert lines[2] == "+-------+-------+"
+ assert lines[3] == "| | XXX |"
+ assert lines[4] == "+-------+-------+"
+ assert lines[5] == "| XXX | |"
+ assert lines[6] == "+-------+-------+"
+
+
+@with_text_app()
+def test_table_with_colspan(app, status, warning):
+ app.builder.build_update()
+ result = (app.outdir / 'table_colspan.txt').text(encoding='utf-8')
+ lines = [line.strip() for line in result.splitlines() if line.strip()]
+ assert lines[0] == "+-------+-------+"
+ assert lines[1] == "| XXX | XXX |"
+ assert lines[2] == "+-------+-------+"
+ assert lines[3] == "| | XXX |"
+ assert lines[4] == "+-------+ |"
+ assert lines[5] == "| XXX | |"
+ assert lines[6] == "+-------+-------+"
+
+
+@with_text_app()
+def test_table_with_colspan_left(app, status, warning):
+ app.builder.build_update()
+ result = (app.outdir / 'table_colspan_left.txt').text(encoding='utf-8')
+ lines = [line.strip() for line in result.splitlines() if line.strip()]
+ assert lines[0] == "+-------+-------+"
+ assert lines[1] == "| XXX | XXX |"
+ assert lines[2] == "+-------+-------+"
+ assert lines[3] == "| XXX | XXX |"
+ assert lines[4] == "| +-------+"
+ assert lines[5] == "| | |"
+ assert lines[6] == "+-------+-------+"
+
+
+@with_text_app()
+def test_table_with_colspan_and_rowspan(app, status, warning):
+ app.builder.build_update()
+ result = (app.outdir / 'table_colspan_and_rowspan.txt').text(encoding='utf-8')
+ lines = [line.strip() for line in result.splitlines() if line.strip()]
+ assert result
+ assert lines[0] == "+-------+-------+-------+"
+ assert lines[1] == "| AAA | BBB |"
+ assert lines[2] == "+-------+-------+ |"
+ assert lines[3] == "| DDD | XXX | |"
+ assert lines[4] == "| +-------+-------+"
+ assert lines[5] == "| | CCC |"
+ assert lines[6] == "+-------+-------+-------+"
+
+
+@with_text_app()
def test_list_items_in_admonition(app, status, warning):
app.builder.build_update()
result = (app.outdir / 'listitems.txt').text(encoding='utf-8')
@@ -116,8 +207,8 @@ def test_list_items_in_admonition(app, status, warning):
@with_text_app()
def test_secnums(app, status, warning):
app.builder.build_all()
- contents = (app.outdir / 'contents.txt').text(encoding='utf8')
- lines = contents.splitlines()
+ index = (app.outdir / 'index.txt').text(encoding='utf8')
+ lines = index.splitlines()
assert lines[0] == "* 1. Section A"
assert lines[1] == ""
assert lines[2] == "* 2. Section B"
@@ -142,8 +233,8 @@ def test_secnums(app, status, warning):
app.config.text_secnumber_suffix = " "
app.builder.build_all()
- contents = (app.outdir / 'contents.txt').text(encoding='utf8')
- lines = contents.splitlines()
+ index = (app.outdir / 'index.txt').text(encoding='utf8')
+ lines = index.splitlines()
assert lines[0] == "* 1 Section A"
assert lines[1] == ""
assert lines[2] == "* 2 Section B"
@@ -168,8 +259,8 @@ def test_secnums(app, status, warning):
app.config.text_add_secnumbers = False
app.builder.build_all()
- contents = (app.outdir / 'contents.txt').text(encoding='utf8')
- lines = contents.splitlines()
+ index = (app.outdir / 'index.txt').text(encoding='utf8')
+ lines = index.splitlines()
assert lines[0] == "* Section A"
assert lines[1] == ""
assert lines[2] == "* Section B"
diff --git a/tests/test_builder.py b/tests/test_builder.py
index d58091e8d..b6f7cb8ea 100644
--- a/tests/test_builder.py
+++ b/tests/test_builder.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
test_builder
~~~~~~~~
@@ -22,7 +21,7 @@ def test_incremental_reading(app):
# before second reading, add, modify and remove source files
(app.srcdir / 'new.txt').write_text('New file\n========\n')
- app.env.all_docs['contents'] = 0 # mark as modified
+ app.env.all_docs['index'] = 0 # mark as modified
(app.srcdir / 'autodoc.txt').unlink()
# second reading
@@ -31,7 +30,7 @@ def test_incremental_reading(app):
# "includes" and "images" are in there because they contain references
# to nonexisting downloadable or image files, which are given another
# chance to exist
- assert set(updated) == set(['contents', 'new', 'includes', 'images'])
+ assert set(updated) == set(['index', 'new', 'includes', 'images'])
assert 'autodoc' not in app.env.all_docs
assert 'autodoc' not in app.env.found_docs
diff --git a/tests/test_catalogs.py b/tests/test_catalogs.py
index 79e8d6b95..d53430a0d 100644
--- a/tests/test_catalogs.py
+++ b/tests/test_catalogs.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
test_build_base
~~~~~~~~~~~~~~~
diff --git a/tests/test_config.py b/tests/test_config.py
index 5dd05550c..2e3b6b9ae 100644
--- a/tests/test_config.py
+++ b/tests/test_config.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
test_config
~~~~~~~~~~~
@@ -11,10 +10,9 @@
"""
import mock
import pytest
-from six import PY3
import sphinx
-from sphinx.config import Config, ENUM, string_classes, check_confval_types
+from sphinx.config import Config, ENUM, check_confval_types
from sphinx.errors import ExtensionError, ConfigError, VersionRequirementError
from sphinx.testing.path import path
@@ -78,7 +76,7 @@ def test_extension_values():
config = Config()
# check standard settings
- assert config.master_doc == 'contents'
+ assert config.master_doc == 'index'
# can't override it by add_config_value()
with pytest.raises(ExtensionError) as excinfo:
@@ -122,37 +120,24 @@ def test_overrides():
@mock.patch("sphinx.config.logger")
def test_errors_warnings(logger, tempdir):
# test the error for syntax errors in the config file
- (tempdir / 'conf.py').write_text(u'project = \n', encoding='ascii')
+ (tempdir / 'conf.py').write_text('project = \n', encoding='ascii')
with pytest.raises(ConfigError) as excinfo:
Config.read(tempdir, {}, None)
assert 'conf.py' in str(excinfo.value)
# test the automatic conversion of 2.x only code in configs
(tempdir / 'conf.py').write_text(
- u'# -*- coding: utf-8\n\nproject = u"Jägermeister"\n',
+ '# -*- coding: utf-8\n\nproject = u"Jägermeister"\n',
encoding='utf-8')
cfg = Config.read(tempdir, {}, None)
cfg.init_values()
- assert cfg.project == u'Jägermeister'
+ assert cfg.project == 'Jägermeister'
assert logger.called is False
- # test the warning for bytestrings with non-ascii content
- # bytestrings with non-ascii content are a syntax error in python3 so we
- # skip the test there
- if PY3:
- return
- (tempdir / 'conf.py').write_text(
- u'# -*- coding: latin-1\nproject = "fooä"\n', encoding='latin-1')
- cfg = Config.read(tempdir, {}, None)
-
- assert logger.warning.called is False
- cfg.check_unicode()
- assert logger.warning.called is True
-
def test_errors_if_setup_is_not_callable(tempdir, make_app):
# test the error to call setup() in the config file
- (tempdir / 'conf.py').write_text(u'setup = 1')
+ (tempdir / 'conf.py').write_text('setup = 1')
with pytest.raises(ConfigError) as excinfo:
make_app(srcdir=tempdir)
assert 'callable' in str(excinfo.value)
@@ -198,7 +183,7 @@ def test_config_eol(logger, tempdir):
configfile.write_bytes(b'project = "spam"' + eol)
cfg = Config.read(tempdir, {}, None)
cfg.init_values()
- assert cfg.project == u'spam'
+ assert cfg.project == 'spam'
assert logger.called is False
@@ -218,7 +203,7 @@ def test_builtin_conf(app, status, warning):
# example classes for type checking
-class A(object):
+class A:
pass
@@ -242,12 +227,8 @@ TYPECHECK_WARNINGS = [
('value8', B(), None, C(), False), # sibling type
('value9', None, None, 'foo', False), # no default or no annotations
('value10', None, None, 123, False), # no default or no annotations
- ('value11', None, [str], u'bar', False if PY3 else True), # str vs unicode
- ('value12', 'string', None, u'bar', False), # str vs unicode
- ('value13', None, string_classes, 'bar', False), # string_classes
- ('value14', None, string_classes, u'bar', False), # string_classes
- ('value15', u'unicode', None, 'bar', False), # str vs unicode
- ('value16', u'unicode', None, u'bar', False), # str vs unicode
+ ('value11', None, [str], 'bar', False), # str
+ ('value12', 'string', None, 'bar', False), # str
]
@@ -261,6 +242,27 @@ def test_check_types(logger, name, default, annotation, actual, warned):
assert logger.warning.called == warned
+TYPECHECK_WARNING_MESSAGES = [
+ ('value1', 'string', [str], ['foo', 'bar'],
+ "The config value `value1' has type `list'; expected `str'."),
+ ('value1', 'string', [str, int], ['foo', 'bar'],
+ "The config value `value1' has type `list'; expected `str' or `int'."),
+ ('value1', 'string', [str, int, tuple], ['foo', 'bar'],
+ "The config value `value1' has type `list'; expected `str', `int', or `tuple'."),
+]
+
+
+@mock.patch("sphinx.config.logger")
+@pytest.mark.parametrize("name,default,annotation,actual,message", TYPECHECK_WARNING_MESSAGES)
+def test_conf_warning_message(logger, name, default, annotation, actual, message):
+ config = Config({name: actual})
+ config.add(name, default, False, annotation or ())
+ config.init_values()
+ check_confval_types(None, config)
+ logger.warning.assert_called()
+ assert logger.warning.call_args[0][0] == message
+
+
@mock.patch("sphinx.config.logger")
def test_check_enum(logger):
config = Config()
diff --git a/tests/test_correct_year.py b/tests/test_correct_year.py
index e7501bb6a..9403dcf6f 100644
--- a/tests/test_correct_year.py
+++ b/tests/test_correct_year.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
test_correct_year
~~~~~~~~~~~~~~~~~
@@ -33,5 +32,5 @@ def expect_date(request, monkeypatch):
@pytest.mark.sphinx('html', testroot='correct-year')
def test_correct_year(expect_date, app):
app.build()
- content = (app.outdir / 'contents.html').text()
+ content = (app.outdir / 'index.html').text()
assert expect_date in content
diff --git a/tests/test_directive_code.py b/tests/test_directive_code.py
index 3592ebe0a..c984c9762 100644
--- a/tests/test_directive_code.py
+++ b/tests/test_directive_code.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
test_directive_code
~~~~~~~~~~~~~~~~~~~
@@ -95,10 +94,10 @@ def test_LiteralIncludeReader_lines1(literal_inc_path):
options = {'lines': '1-4'}
reader = LiteralIncludeReader(literal_inc_path, options, DUMMY_CONFIG)
content, lines = reader.read()
- assert content == (u"# Literally included file using Python highlighting\n"
- u"# -*- coding: utf-8 -*-\n"
- u"\n"
- u"foo = \"Including Unicode characters: üöä\"\n")
+ assert content == ("# Literally included file using Python highlighting\n"
+ "# -*- coding: utf-8 -*-\n"
+ "\n"
+ "foo = \"Including Unicode characters: üöä\"\n")
@pytest.mark.xfail(os.name != 'posix', reason="Not working on windows")
@@ -106,9 +105,9 @@ def test_LiteralIncludeReader_lines2(literal_inc_path):
options = {'lines': '1,4,6'}
reader = LiteralIncludeReader(literal_inc_path, options, DUMMY_CONFIG)
content, lines = reader.read()
- assert content == (u"# Literally included file using Python highlighting\n"
- u"foo = \"Including Unicode characters: üöä\"\n"
- u"class Foo:\n")
+ assert content == ("# Literally included file using Python highlighting\n"
+ "foo = \"Including Unicode characters: üöä\"\n"
+ "class Foo:\n")
@pytest.mark.xfail(os.name != 'posix', reason="Not working on windows")
@@ -116,9 +115,9 @@ def test_LiteralIncludeReader_lines_and_lineno_match1(literal_inc_path):
options = {'lines': '4-6', 'lineno-match': True}
reader = LiteralIncludeReader(literal_inc_path, options, DUMMY_CONFIG)
content, lines = reader.read()
- assert content == (u"foo = \"Including Unicode characters: üöä\"\n"
- u"\n"
- u"class Foo:\n")
+ assert content == ("foo = \"Including Unicode characters: üöä\"\n"
+ "\n"
+ "class Foo:\n")
assert reader.lineno_start == 4
@@ -312,11 +311,11 @@ def test_code_block(app, status, warning):
def test_code_block_caption_html(app, status, warning):
app.builder.build(['caption'])
html = (app.outdir / 'caption.html').text(encoding='utf-8')
- caption = (u'<div class="code-block-caption">'
- u'<span class="caption-number">Listing 1 </span>'
- u'<span class="caption-text">caption <em>test</em> rb'
- u'</span><a class="headerlink" href="#id1" '
- u'title="Permalink to this code">\xb6</a></div>')
+ caption = ('<div class="code-block-caption">'
+ '<span class="caption-number">Listing 1 </span>'
+ '<span class="caption-text">caption <em>test</em> rb'
+ '</span><a class="headerlink" href="#id1" '
+ 'title="Permalink to this code">\xb6</a></div>')
assert caption in html
@@ -462,11 +461,11 @@ def test_literalinclude_file_whole_of_emptyline(app, status, warning):
def test_literalinclude_caption_html(app, status, warning):
app.builder.build('index')
html = (app.outdir / 'caption.html').text(encoding='utf-8')
- caption = (u'<div class="code-block-caption">'
- u'<span class="caption-number">Listing 2 </span>'
- u'<span class="caption-text">caption <strong>test</strong> py'
- u'</span><a class="headerlink" href="#id2" '
- u'title="Permalink to this code">\xb6</a></div>')
+ caption = ('<div class="code-block-caption">'
+ '<span class="caption-number">Listing 2 </span>'
+ '<span class="caption-text">caption <strong>test</strong> py'
+ '</span><a class="headerlink" href="#id2" '
+ 'title="Permalink to this code">\xb6</a></div>')
assert caption in html
diff --git a/tests/test_directive_only.py b/tests/test_directive_only.py
index 5b55bc370..0d2269366 100644
--- a/tests/test_directive_only.py
+++ b/tests/test_directive_only.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
test_only_directive
~~~~~~~~~~~~~~~~~~~
diff --git a/tests/test_directive_other.py b/tests/test_directive_other.py
index 6eb7a2056..61882dce4 100644
--- a/tests/test_directive_other.py
+++ b/tests/test_directive_other.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
test_directive_other
~~~~~~~~~~~~~~~~~~~~
@@ -21,9 +20,11 @@ from sphinx.testing.util import assert_node
def parse(app, docname, text):
app.env.temp_data['docname'] = docname
+ parser = RSTParser()
+ parser.set_application(app)
return publish_doctree(text, app.srcdir / docname + '.rst',
reader=SphinxStandaloneReader(app),
- parser=RSTParser(),
+ parser=parser,
settings_overrides={'env': app.env,
'gettext_compact': True})
diff --git a/tests/test_docutilsconf.py b/tests/test_docutilsconf.py
index 989edc6a8..e5b81c046 100644
--- a/tests/test_docutilsconf.py
+++ b/tests/test_docutilsconf.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
test_docutilsconf
~~~~~~~~~~~~~~~~~
@@ -10,7 +9,6 @@
"""
import re
-import sys
import pytest
@@ -27,7 +25,7 @@ def test_html_with_default_docutilsconf(app, status, warning):
with patch_docutils(app.confdir):
app.builder.build(['contents'])
- result = (app.outdir / 'contents.html').text(encoding='utf-8')
+ result = (app.outdir / 'index.html').text(encoding='utf-8')
assert regex_count(r'<th class="field-name">', result) == 1
assert regex_count(r'<th class="field-name" colspan="2">', result) == 1
@@ -45,7 +43,7 @@ def test_html_with_docutilsconf(app, status, warning):
with patch_docutils(app.confdir):
app.builder.build(['contents'])
- result = (app.outdir / 'contents.html').text(encoding='utf-8')
+ result = (app.outdir / 'index.html').text(encoding='utf-8')
assert regex_count(r'<th class="field-name">', result) == 0
assert regex_count(r'<th class="field-name" colspan="2">', result) == 2
@@ -82,12 +80,9 @@ def test_texinfo(app, status, warning):
@pytest.mark.sphinx('html', testroot='docutilsconf',
docutilsconf='[general]\nsource_link=true\n')
-@pytest.mark.skip(sys.platform == "win32" and
- not (sys.version_info.major >= 3 and sys.version_info.minor >= 2),
- reason="Python < 3.2 on Win32 doesn't handle non-ASCII paths right")
def test_docutils_source_link_with_nonascii_file(app, status, warning):
srcdir = path(app.srcdir)
- mb_name = u'\u65e5\u672c\u8a9e'
+ mb_name = '\u65e5\u672c\u8a9e'
try:
(srcdir / (mb_name + '.txt')).write_text('')
except UnicodeEncodeError:
diff --git a/tests/test_domain_cpp.py b/tests/test_domain_cpp.py
index 2602f73e6..6da91df82 100644
--- a/tests/test_domain_cpp.py
+++ b/tests/test_domain_cpp.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
test_domain_cpp
~~~~~~~~~~~~~~~
@@ -13,7 +12,6 @@ import re
import sys
import pytest
-from six import text_type
import sphinx.domains.cpp as cppDomain
from sphinx import addnodes
@@ -22,7 +20,7 @@ from sphinx.domains.cpp import Symbol, _max_id, _id_prefix
def parse(name, string):
- class Config(object):
+ class Config:
cpp_id_attributes = ["id_attr"]
cpp_paren_attributes = ["paren_attr"]
parser = DefinitionParser(string, None, Config())
@@ -40,10 +38,10 @@ def check(name, input, idDict, output=None):
if output is None:
output = input
ast = parse(name, input)
- res = text_type(ast)
+ res = str(ast)
if res != output:
print("")
- print("Input: ", text_type(input))
+ print("Input: ", input)
print("Result: ", res)
print("Expected: ", output)
raise DefinitionError("")
@@ -74,19 +72,19 @@ def check(name, input, idDict, output=None):
res.append(idExpected[i] == idActual[i])
if not all(res):
- print("input: %s" % text_type(input).rjust(20))
+ print("input: %s" % input.rjust(20))
for i in range(1, _max_id + 1):
if res[i]:
continue
print("Error in id version %d." % i)
- print("result: %s" % str(idActual[i]))
- print("expected: %s" % str(idExpected[i]))
+ print("result: %s" % idActual[i])
+ print("expected: %s" % idExpected[i])
print(rootSymbol.dump(0))
raise DefinitionError("")
def test_fundamental_types():
- # see http://en.cppreference.com/w/cpp/language/types
+ # see https://en.cppreference.com/w/cpp/language/types
for t, id_v2 in cppDomain._id_fundamental_v2.items():
def makeIdV1():
if t == 'decltype(auto)':
@@ -816,7 +814,7 @@ not found in `{test}`
assert result, expect
return set(result.group('classes').split())
- class RoleClasses(object):
+ class RoleClasses:
"""Collect the classes from the layout that was generated for a given role."""
def __init__(self, role, root, contents):
diff --git a/tests/test_domain_js.py b/tests/test_domain_js.py
index 9f5d70486..4dcfdb77d 100644
--- a/tests/test_domain_js.py
+++ b/tests/test_domain_js.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
test_domain_js
~~~~~~~~~~~~~~
@@ -39,25 +38,20 @@ def test_domain_js_xrefs(app, status, warning):
doctree = app.env.get_doctree('roles')
refnodes = list(doctree.traverse(addnodes.pending_xref))
- assert_refnode(refnodes[0], None, None, u'TopLevel', u'class')
- assert_refnode(refnodes[1], None, None, u'top_level', u'func')
- assert_refnode(refnodes[2], None, u'NestedParentA', u'child_1', u'func')
- assert_refnode(refnodes[3], None, u'NestedParentA',
- u'NestedChildA.subchild_2', u'func')
- assert_refnode(refnodes[4], None, u'NestedParentA', u'child_2', u'func')
- assert_refnode(refnodes[5], False, u'NestedParentA', u'any_child', domain='')
- assert_refnode(refnodes[6], None, u'NestedParentA', u'NestedChildA', u'class')
- assert_refnode(refnodes[7], None, u'NestedParentA.NestedChildA',
- u'subchild_2', u'func')
- assert_refnode(refnodes[8], None, u'NestedParentA.NestedChildA',
- u'NestedParentA.child_1', u'func')
- assert_refnode(refnodes[9], None, u'NestedParentA',
- u'NestedChildA.subchild_1', u'func')
- assert_refnode(refnodes[10], None, u'NestedParentB', u'child_1', u'func')
- assert_refnode(refnodes[11], None, u'NestedParentB', u'NestedParentB',
- u'class')
- assert_refnode(refnodes[12], None, None, u'NestedParentA.NestedChildA',
- u'class')
+ assert_refnode(refnodes[0], None, None, 'TopLevel', 'class')
+ assert_refnode(refnodes[1], None, None, 'top_level', 'func')
+ assert_refnode(refnodes[2], None, 'NestedParentA', 'child_1', 'func')
+ assert_refnode(refnodes[3], None, 'NestedParentA', 'NestedChildA.subchild_2', 'func')
+ assert_refnode(refnodes[4], None, 'NestedParentA', 'child_2', 'func')
+ assert_refnode(refnodes[5], False, 'NestedParentA', 'any_child', domain='')
+ assert_refnode(refnodes[6], None, 'NestedParentA', 'NestedChildA', 'class')
+ assert_refnode(refnodes[7], None, 'NestedParentA.NestedChildA', 'subchild_2', 'func')
+ assert_refnode(refnodes[8], None, 'NestedParentA.NestedChildA',
+ 'NestedParentA.child_1', 'func')
+ assert_refnode(refnodes[9], None, 'NestedParentA', 'NestedChildA.subchild_1', 'func')
+ assert_refnode(refnodes[10], None, 'NestedParentB', 'child_1', 'func')
+ assert_refnode(refnodes[11], None, 'NestedParentB', 'NestedParentB', 'class')
+ assert_refnode(refnodes[12], None, None, 'NestedParentA.NestedChildA', 'class')
assert len(refnodes) == 13
doctree = app.env.get_doctree('module')
@@ -118,24 +112,23 @@ def test_domain_js_find_obj(app, status, warning):
app.builder.build_all()
- assert (find_obj(None, None, u'NONEXISTANT', u'class') ==
- (None, None))
- assert (find_obj(None, None, u'NestedParentA', u'class') ==
- (u'NestedParentA', (u'roles', u'class')))
- assert (find_obj(None, None, u'NestedParentA.NestedChildA', u'class') ==
- (u'NestedParentA.NestedChildA', (u'roles', u'class')))
- assert (find_obj(None, 'NestedParentA', u'NestedChildA', u'class') ==
- (u'NestedParentA.NestedChildA', (u'roles', u'class')))
- assert (find_obj(None, None, u'NestedParentA.NestedChildA.subchild_1', u'func') ==
- (u'NestedParentA.NestedChildA.subchild_1', (u'roles', u'function')))
- assert (find_obj(None, u'NestedParentA', u'NestedChildA.subchild_1', u'func') ==
- (u'NestedParentA.NestedChildA.subchild_1', (u'roles', u'function')))
- assert (find_obj(None, u'NestedParentA.NestedChildA', u'subchild_1', u'func') ==
- (u'NestedParentA.NestedChildA.subchild_1', (u'roles', u'function')))
- assert (find_obj(u'module_a.submodule', u'ModTopLevel', u'mod_child_2', u'meth') ==
- (u'module_a.submodule.ModTopLevel.mod_child_2', (u'module', u'method')))
- assert (find_obj(u'module_b.submodule', u'ModTopLevel', u'module_a.submodule', u'mod') ==
- (u'module_a.submodule', (u'module', u'module')))
+ assert (find_obj(None, None, 'NONEXISTANT', 'class') == (None, None))
+ assert (find_obj(None, None, 'NestedParentA', 'class') ==
+ ('NestedParentA', ('roles', 'class')))
+ assert (find_obj(None, None, 'NestedParentA.NestedChildA', 'class') ==
+ ('NestedParentA.NestedChildA', ('roles', 'class')))
+ assert (find_obj(None, 'NestedParentA', 'NestedChildA', 'class') ==
+ ('NestedParentA.NestedChildA', ('roles', 'class')))
+ assert (find_obj(None, None, 'NestedParentA.NestedChildA.subchild_1', 'func') ==
+ ('NestedParentA.NestedChildA.subchild_1', ('roles', 'function')))
+ assert (find_obj(None, 'NestedParentA', 'NestedChildA.subchild_1', 'func') ==
+ ('NestedParentA.NestedChildA.subchild_1', ('roles', 'function')))
+ assert (find_obj(None, 'NestedParentA.NestedChildA', 'subchild_1', 'func') ==
+ ('NestedParentA.NestedChildA.subchild_1', ('roles', 'function')))
+ assert (find_obj('module_a.submodule', 'ModTopLevel', 'mod_child_2', 'meth') ==
+ ('module_a.submodule.ModTopLevel.mod_child_2', ('module', 'method')))
+ assert (find_obj('module_b.submodule', 'ModTopLevel', 'module_a.submodule', 'mod') ==
+ ('module_a.submodule', ('module', 'module')))
def test_get_full_qualified_name():
diff --git a/tests/test_domain_py.py b/tests/test_domain_py.py
index 44205f539..9d24d138e 100644
--- a/tests/test_domain_py.py
+++ b/tests/test_domain_py.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
test_domain_py
~~~~~~~~~~~~~~
@@ -12,7 +11,6 @@
import pytest
from docutils import nodes
from mock import Mock
-from six import text_type
from sphinx import addnodes
from sphinx.domains.python import py_sig_re, _pseudo_parse_arglist, PythonDomain
@@ -31,22 +29,22 @@ def parse(sig):
def test_function_signatures():
rv = parse('func(a=1) -> int object')
- assert text_type(rv) == u'a=1'
+ assert rv == 'a=1'
rv = parse('func(a=1, [b=None])')
- assert text_type(rv) == u'a=1, [b=None]'
+ assert rv == 'a=1, [b=None]'
rv = parse('func(a=1[, b=None])')
- assert text_type(rv) == u'a=1, [b=None]'
+ assert rv == 'a=1, [b=None]'
rv = parse("compile(source : string, filename, symbol='file')")
- assert text_type(rv) == u"source : string, filename, symbol='file'"
+ assert rv == "source : string, filename, symbol='file'"
rv = parse('func(a=[], [b=None])')
- assert text_type(rv) == u'a=[], [b=None]'
+ assert rv == 'a=[], [b=None]'
rv = parse('func(a=[][, b=None])')
- assert text_type(rv) == u'a=[], [b=None]'
+ assert rv == 'a=[], [b=None]'
@pytest.mark.sphinx('dummy', testroot='domain-py')
@@ -70,26 +68,20 @@ def test_domain_py_xrefs(app, status, warning):
doctree = app.env.get_doctree('roles')
refnodes = list(doctree.traverse(addnodes.pending_xref))
- assert_refnode(refnodes[0], None, None, u'TopLevel', u'class')
- assert_refnode(refnodes[1], None, None, u'top_level', u'meth')
- assert_refnode(refnodes[2], None, u'NestedParentA', u'child_1', u'meth')
- assert_refnode(refnodes[3], None, u'NestedParentA',
- u'NestedChildA.subchild_2', u'meth')
- assert_refnode(refnodes[4], None, u'NestedParentA', u'child_2', u'meth')
- assert_refnode(refnodes[5], False, u'NestedParentA', u'any_child', domain='')
- assert_refnode(refnodes[6], None, u'NestedParentA', u'NestedChildA',
- u'class')
- assert_refnode(refnodes[7], None, u'NestedParentA.NestedChildA',
- u'subchild_2', u'meth')
- assert_refnode(refnodes[8], None, u'NestedParentA.NestedChildA',
- u'NestedParentA.child_1', u'meth')
- assert_refnode(refnodes[9], None, u'NestedParentA',
- u'NestedChildA.subchild_1', u'meth')
- assert_refnode(refnodes[10], None, u'NestedParentB', u'child_1', u'meth')
- assert_refnode(refnodes[11], None, u'NestedParentB', u'NestedParentB',
- u'class')
- assert_refnode(refnodes[12], None, None, u'NestedParentA.NestedChildA',
- u'class')
+ assert_refnode(refnodes[0], None, None, 'TopLevel', 'class')
+ assert_refnode(refnodes[1], None, None, 'top_level', 'meth')
+ assert_refnode(refnodes[2], None, 'NestedParentA', 'child_1', 'meth')
+ assert_refnode(refnodes[3], None, 'NestedParentA', 'NestedChildA.subchild_2', 'meth')
+ assert_refnode(refnodes[4], None, 'NestedParentA', 'child_2', 'meth')
+ assert_refnode(refnodes[5], False, 'NestedParentA', 'any_child', domain='')
+ assert_refnode(refnodes[6], None, 'NestedParentA', 'NestedChildA', 'class')
+ assert_refnode(refnodes[7], None, 'NestedParentA.NestedChildA', 'subchild_2', 'meth')
+ assert_refnode(refnodes[8], None, 'NestedParentA.NestedChildA',
+ 'NestedParentA.child_1', 'meth')
+ assert_refnode(refnodes[9], None, 'NestedParentA', 'NestedChildA.subchild_1', 'meth')
+ assert_refnode(refnodes[10], None, 'NestedParentB', 'child_1', 'meth')
+ assert_refnode(refnodes[11], None, 'NestedParentB', 'NestedParentB', 'class')
+ assert_refnode(refnodes[12], None, None, 'NestedParentA.NestedChildA', 'class')
assert len(refnodes) == 13
doctree = app.env.get_doctree('module')
@@ -169,20 +161,19 @@ def test_domain_py_find_obj(app, status, warning):
app.builder.build_all()
- assert (find_obj(None, None, u'NONEXISTANT', u'class') ==
- [])
- assert (find_obj(None, None, u'NestedParentA', u'class') ==
- [(u'NestedParentA', (u'roles', u'class'))])
- assert (find_obj(None, None, u'NestedParentA.NestedChildA', u'class') ==
- [(u'NestedParentA.NestedChildA', (u'roles', u'class'))])
- assert (find_obj(None, 'NestedParentA', u'NestedChildA', u'class') ==
- [(u'NestedParentA.NestedChildA', (u'roles', u'class'))])
- assert (find_obj(None, None, u'NestedParentA.NestedChildA.subchild_1', u'meth') ==
- [(u'NestedParentA.NestedChildA.subchild_1', (u'roles', u'method'))])
- assert (find_obj(None, u'NestedParentA', u'NestedChildA.subchild_1', u'meth') ==
- [(u'NestedParentA.NestedChildA.subchild_1', (u'roles', u'method'))])
- assert (find_obj(None, u'NestedParentA.NestedChildA', u'subchild_1', u'meth') ==
- [(u'NestedParentA.NestedChildA.subchild_1', (u'roles', u'method'))])
+ assert (find_obj(None, None, 'NONEXISTANT', 'class') == [])
+ assert (find_obj(None, None, 'NestedParentA', 'class') ==
+ [('NestedParentA', ('roles', 'class'))])
+ assert (find_obj(None, None, 'NestedParentA.NestedChildA', 'class') ==
+ [('NestedParentA.NestedChildA', ('roles', 'class'))])
+ assert (find_obj(None, 'NestedParentA', 'NestedChildA', 'class') ==
+ [('NestedParentA.NestedChildA', ('roles', 'class'))])
+ assert (find_obj(None, None, 'NestedParentA.NestedChildA.subchild_1', 'meth') ==
+ [('NestedParentA.NestedChildA.subchild_1', ('roles', 'method'))])
+ assert (find_obj(None, 'NestedParentA', 'NestedChildA.subchild_1', 'meth') ==
+ [('NestedParentA.NestedChildA.subchild_1', ('roles', 'method'))])
+ assert (find_obj(None, 'NestedParentA.NestedChildA', 'subchild_1', 'meth') ==
+ [('NestedParentA.NestedChildA.subchild_1', ('roles', 'method'))])
def test_get_full_qualified_name():
diff --git a/tests/test_domain_rst.py b/tests/test_domain_rst.py
index 8cfe7e284..c446308f5 100644
--- a/tests/test_domain_rst.py
+++ b/tests/test_domain_rst.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
test_rst_domain
~~~~~~~~~~~~~~~
@@ -13,14 +12,14 @@ from sphinx.domains.rst import parse_directive
def test_parse_directive():
- s = parse_directive(u' foö ')
- assert s == (u'foö', '')
+ s = parse_directive(' foö ')
+ assert s == ('foö', '')
- s = parse_directive(u' .. foö :: ')
- assert s == (u'foö', ' ')
+ s = parse_directive(' .. foö :: ')
+ assert s == ('foö', ' ')
- s = parse_directive(u'.. foö:: args1 args2')
- assert s == (u'foö', ' args1 args2')
+ s = parse_directive('.. foö:: args1 args2')
+ assert s == ('foö', ' args1 args2')
s = parse_directive('.. :: bar')
assert s == ('.. :: bar', '')
diff --git a/tests/test_domain_std.py b/tests/test_domain_std.py
index 57d0bf185..b91a196b0 100644
--- a/tests/test_domain_std.py
+++ b/tests/test_domain_std.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
test_domain_std
~~~~~~~~~~~~~~~
diff --git a/tests/test_environment.py b/tests/test_environment.py
index 1ab60b539..807d2b2e9 100644
--- a/tests/test_environment.py
+++ b/tests/test_environment.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
test_env
~~~~~~~~
@@ -69,3 +68,47 @@ def test_object_inventory(app):
assert app.env.domains['py'].data is app.env.domaindata['py']
assert app.env.domains['c'].data is app.env.domaindata['c']
+
+
+@pytest.mark.sphinx('dummy', testroot='basic')
+def test_env_relfn2path(app):
+ # relative filename and root document
+ relfn, absfn = app.env.relfn2path('logo.jpg', 'index')
+ assert relfn == 'logo.jpg'
+ assert absfn == app.srcdir / 'logo.jpg'
+
+ # absolute filename and root document
+ relfn, absfn = app.env.relfn2path('/logo.jpg', 'index')
+ assert relfn == 'logo.jpg'
+ assert absfn == app.srcdir / 'logo.jpg'
+
+ # relative filename and a document in subdir
+ relfn, absfn = app.env.relfn2path('logo.jpg', 'subdir/index')
+ assert relfn == 'subdir/logo.jpg'
+ assert absfn == app.srcdir / 'subdir' / 'logo.jpg'
+
+ # absolute filename and a document in subdir
+ relfn, absfn = app.env.relfn2path('/logo.jpg', 'subdir/index')
+ assert relfn == 'logo.jpg'
+ assert absfn == app.srcdir / 'logo.jpg'
+
+ # relative filename having subdir
+ relfn, absfn = app.env.relfn2path('images/logo.jpg', 'index')
+ assert relfn == 'images/logo.jpg'
+ assert absfn == app.srcdir / 'images' / 'logo.jpg'
+
+ # relative path traversal
+ relfn, absfn = app.env.relfn2path('../logo.jpg', 'index')
+ assert relfn == '../logo.jpg'
+ assert absfn == app.srcdir.parent / 'logo.jpg'
+
+ # omit docname (w/ current docname)
+ app.env.temp_data['docname'] = 'subdir/document'
+ relfn, absfn = app.env.relfn2path('images/logo.jpg')
+ assert relfn == 'subdir/images/logo.jpg'
+ assert absfn == app.srcdir / 'subdir' / 'images' / 'logo.jpg'
+
+ # omit docname (w/o current docname)
+ app.env.temp_data.clear()
+ with pytest.raises(KeyError):
+ app.env.relfn2path('images/logo.jpg')
diff --git a/tests/test_environment_indexentries.py b/tests/test_environment_indexentries.py
index c3da93ef1..933bde60f 100644
--- a/tests/test_environment_indexentries.py
+++ b/tests/test_environment_indexentries.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
test_environment_indexentries
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -31,25 +30,25 @@ def test_create_single_index():
('single', 'pip; install', 'id3', '', None),
('single', 'pip; upgrade', 'id4', '', None),
('single', 'Sphinx', 'id5', '', None),
- ('single', u'Ель', 'id6', '', None),
- ('single', u'ёлка', 'id7', '', None),
- ('single', u'‏תירבע‎', 'id8', '', None),
- ('single', u'9-symbol', 'id9', '', None),
- ('single', u'&-symbol', 'id10', '', None),
+ ('single', 'Ель', 'id6', '', None),
+ ('single', 'ёлка', 'id7', '', None),
+ ('single', '‏תירבע‎', 'id8', '', None),
+ ('single', '9-symbol', 'id9', '', None),
+ ('single', '&-symbol', 'id10', '', None),
],
})
index = IndexEntries(env).create_index(dummy_builder)
assert len(index) == 6
- assert index[0] == (u'Symbols', [(u'&-symbol', [[('', '#id10')], [], None]),
- (u'9-symbol', [[('', '#id9')], [], None])])
- assert index[1] == (u'D', [(u'docutils', [[('', '#id1')], [], None])])
- assert index[2] == (u'P', [(u'pip', [[], [(u'install', [('', '#id3')]),
- (u'upgrade', [('', '#id4')])], None]),
- (u'Python', [[('', '#id2')], [], None])])
- assert index[3] == (u'S', [(u'Sphinx', [[('', '#id5')], [], None])])
- assert index[4] == (u'Е', [(u'ёлка', [[('', '#id7')], [], None]),
- (u'Ель', [[('', '#id6')], [], None])])
- assert index[5] == (u'ת', [(u'‏תירבע‎', [[('', '#id8')], [], None])])
+ assert index[0] == ('Symbols', [('&-symbol', [[('', '#id10')], [], None]),
+ ('9-symbol', [[('', '#id9')], [], None])])
+ assert index[1] == ('D', [('docutils', [[('', '#id1')], [], None])])
+ assert index[2] == ('P', [('pip', [[], [('install', [('', '#id3')]),
+ ('upgrade', [('', '#id4')])], None]),
+ ('Python', [[('', '#id2')], [], None])])
+ assert index[3] == ('S', [('Sphinx', [[('', '#id5')], [], None])])
+ assert index[4] == ('Е', [('ёлка', [[('', '#id7')], [], None]),
+ ('Ель', [[('', '#id6')], [], None])])
+ assert index[5] == ('ת', [('‏תירבע‎', [[('', '#id8')], [], None])])
def test_create_pair_index():
@@ -63,15 +62,15 @@ def test_create_pair_index():
})
index = IndexEntries(env).create_index(dummy_builder)
assert len(index) == 5
- assert index[0] == (u'D',
- [(u'documentation tool', [[], [(u'Sphinx', [('', '#id3')])], None]),
- (u'docutils', [[], [(u'reStructuredText', [('', '#id1')])], None])])
- assert index[1] == (u'I', [(u'interpreter', [[], [(u'Python', [('', '#id2')])], None])])
- assert index[2] == (u'P', [(u'Python', [[], [(u'interpreter', [('', '#id2')])], None])])
- assert index[3] == (u'R',
- [(u'reStructuredText', [[], [(u'docutils', [('', '#id1')])], None])])
- assert index[4] == (u'S',
- [(u'Sphinx', [[], [(u'documentation tool', [('', '#id3')])], None])])
+ assert index[0] == ('D',
+ [('documentation tool', [[], [('Sphinx', [('', '#id3')])], None]),
+ ('docutils', [[], [('reStructuredText', [('', '#id1')])], None])])
+ assert index[1] == ('I', [('interpreter', [[], [('Python', [('', '#id2')])], None])])
+ assert index[2] == ('P', [('Python', [[], [('interpreter', [('', '#id2')])], None])])
+ assert index[3] == ('R',
+ [('reStructuredText', [[], [('docutils', [('', '#id1')])], None])])
+ assert index[4] == ('S',
+ [('Sphinx', [[], [('documentation tool', [('', '#id3')])], None])])
def test_create_triple_index():
@@ -84,12 +83,12 @@ def test_create_triple_index():
})
index = IndexEntries(env).create_index(dummy_builder)
assert len(index) == 5
- assert index[0] == (u'B', [(u'bar', [[], [(u'baz, foo', [('', '#id1')])], None]),
- (u'baz', [[], [(u'foo bar', [('', '#id1')])], None])])
- assert index[1] == (u'F', [(u'foo', [[], [(u'bar baz', [('', '#id1')])], None])])
- assert index[2] == (u'P', [(u'Python', [[], [(u'Sphinx reST', [('', '#id2')])], None])])
- assert index[3] == (u'R', [(u'reST', [[], [(u'Python Sphinx', [('', '#id2')])], None])])
- assert index[4] == (u'S', [(u'Sphinx', [[], [(u'reST, Python', [('', '#id2')])], None])])
+ assert index[0] == ('B', [('bar', [[], [('baz, foo', [('', '#id1')])], None]),
+ ('baz', [[], [('foo bar', [('', '#id1')])], None])])
+ assert index[1] == ('F', [('foo', [[], [('bar baz', [('', '#id1')])], None])])
+ assert index[2] == ('P', [('Python', [[], [('Sphinx reST', [('', '#id2')])], None])])
+ assert index[3] == ('R', [('reST', [[], [('Python Sphinx', [('', '#id2')])], None])])
+ assert index[4] == ('S', [('Sphinx', [[], [('reST, Python', [('', '#id2')])], None])])
def test_create_see_index():
@@ -105,9 +104,9 @@ def test_create_see_index():
})
index = IndexEntries(env).create_index(dummy_builder)
assert len(index) == 3
- assert index[0] == (u'D', [(u'docutils', [[], [(u'see reStructuredText', [])], None])])
- assert index[1] == (u'P', [(u'Python', [[], [(u'see interpreter', [])], None])])
- assert index[2] == (u'S', [(u'Sphinx', [[], [(u'see documentation tool', [])], None])])
+ assert index[0] == ('D', [('docutils', [[], [('see reStructuredText', [])], None])])
+ assert index[1] == ('P', [('Python', [[], [('see interpreter', [])], None])])
+ assert index[2] == ('S', [('Sphinx', [[], [('see documentation tool', [])], None])])
def test_create_seealso_index():
@@ -123,12 +122,9 @@ def test_create_seealso_index():
})
index = IndexEntries(env).create_index(dummy_builder)
assert len(index) == 3
- assert index[0] == (u'D',
- [(u'docutils', [[], [(u'see also reStructuredText', [])], None])])
- assert index[1] == (u'P',
- [(u'Python', [[], [(u'see also interpreter', [])], None])])
- assert index[2] == (u'S',
- [(u'Sphinx', [[], [(u'see also documentation tool', [])], None])])
+ assert index[0] == ('D', [('docutils', [[], [('see also reStructuredText', [])], None])])
+ assert index[1] == ('P', [('Python', [[], [('see also interpreter', [])], None])])
+ assert index[2] == ('S', [('Sphinx', [[], [('see also documentation tool', [])], None])])
def test_create_index_by_key():
@@ -137,11 +133,11 @@ def test_create_index_by_key():
'index': [
('single', 'docutils', 'id1', '', None),
('single', 'Python', 'id2', '', None),
- ('single', u'スフィンクス', 'id3', '', u'ス'),
+ ('single', 'スフィンクス', 'id3', '', 'ス'),
],
})
index = IndexEntries(env).create_index(dummy_builder)
assert len(index) == 3
- assert index[0] == (u'D', [(u'docutils', [[('', '#id1')], [], None])])
- assert index[1] == (u'P', [(u'Python', [[('', '#id2')], [], None])])
- assert index[2] == (u'ス', [(u'スフィンクス', [[('', '#id3')], [], u'ス'])])
+ assert index[0] == ('D', [('docutils', [[('', '#id1')], [], None])])
+ assert index[1] == ('P', [('Python', [[('', '#id2')], [], None])])
+ assert index[2] == ('ス', [('スフィンクス', [[('', '#id3')], [], 'ス'])])
diff --git a/tests/test_environment_toctree.py b/tests/test_environment_toctree.py
index 618b7afe3..7fbce0475 100644
--- a/tests/test_environment_toctree.py
+++ b/tests/test_environment_toctree.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
test_environment_toctree
~~~~~~~~~~~~~~~~~~~~~~~~
@@ -37,7 +36,7 @@ def test_process_doc(app):
list_item)])
assert_node(toctree[0][0],
- [compact_paragraph, reference, u"Welcome to Sphinx Tests’s documentation!"])
+ [compact_paragraph, reference, "Welcome to Sphinx Tests’s documentation!"])
assert_node(toctree[0][0][0], reference, anchorname='')
assert_node(toctree[0][1][0], addnodes.toctree,
caption="Table of Contents", glob=False, hidden=False,
@@ -152,7 +151,7 @@ def test_get_toc_for(app):
addnodes.toctree)])],
[list_item, compact_paragraph])]) # [2][0]
assert_node(toctree[0][0],
- [compact_paragraph, reference, u"Welcome to Sphinx Tests’s documentation!"])
+ [compact_paragraph, reference, "Welcome to Sphinx Tests’s documentation!"])
assert_node(toctree[0][1][2],
([compact_paragraph, reference, "subsection"],
[bullet_list, list_item, compact_paragraph, reference, "subsubsection"]))
@@ -179,7 +178,7 @@ def test_get_toc_for_only(app):
addnodes.toctree)])],
[list_item, compact_paragraph])]) # [2][0]
assert_node(toctree[0][0],
- [compact_paragraph, reference, u"Welcome to Sphinx Tests’s documentation!"])
+ [compact_paragraph, reference, "Welcome to Sphinx Tests’s documentation!"])
assert_node(toctree[0][1][1],
([compact_paragraph, reference, "Section for HTML"],
[bullet_list, addnodes.toctree]))
diff --git a/tests/test_ext_apidoc.py b/tests/test_ext_apidoc.py
index 3d1517929..30de0c209 100644
--- a/tests/test_ext_apidoc.py
+++ b/tests/test_ext_apidoc.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
test_apidoc
~~~~~~~~~~~
@@ -9,8 +8,6 @@
:license: BSD, see LICENSE for details.
"""
-from __future__ import print_function
-
from collections import namedtuple
import pytest
@@ -268,10 +265,10 @@ def test_excludes_module_should_not_be_skipped(apidoc):
@pytest.mark.apidoc(
coderoot='test-root',
options=[
- '--doc-project', u'プロジェクト名',
- '--doc-author', u'著者名',
- '--doc-version', u'バージョン',
- '--doc-release', u'リリース',
+ '--doc-project', 'プロジェクト名',
+ '--doc-author', '著者名',
+ '--doc-version', 'バージョン',
+ '--doc-release', 'リリース',
],
)
def test_multibyte_parameters(make_app, apidoc):
@@ -282,10 +279,10 @@ def test_multibyte_parameters(make_app, apidoc):
conf_py = (outdir / 'conf.py').text()
conf_py_ = remove_unicode_literals(conf_py)
- assert u"project = 'プロジェクト名'" in conf_py_
- assert u"author = '著者名'" in conf_py_
- assert u"version = 'バージョン'" in conf_py_
- assert u"release = 'リリース'" in conf_py_
+ assert "project = 'プロジェクト名'" in conf_py_
+ assert "author = '著者名'" in conf_py_
+ assert "version = 'バージョン'" in conf_py_
+ assert "release = 'リリース'" in conf_py_
app = make_app('text', srcdir=outdir)
app.build()
diff --git a/tests/test_ext_autodoc.py b/tests/test_ext_autodoc.py
index 44470771d..2ee478d74 100644
--- a/tests/test_ext_autodoc.py
+++ b/tests/test_ext_autodoc.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
test_autodoc
~~~~~~~~~~~~
@@ -20,7 +19,7 @@ from sphinx import addnodes
def test_autodoc(app, status, warning):
app.builder.build_all()
- content = pickle.loads((app.doctreedir / 'contents.doctree').bytes())
+ content = pickle.loads((app.doctreedir / 'index.doctree').bytes())
assert isinstance(content[3], addnodes.desc)
assert content[3][0].astext() == 'autodoc_dummy_module.test'
assert content[3][1].astext() == 'Dummy function using dummy.*'
diff --git a/tests/test_ext_autodoc_importer.py b/tests/test_ext_autodoc_importer.py
index fe0c9f2bc..3ffb68d38 100644
--- a/tests/test_ext_autodoc_importer.py
+++ b/tests/test_ext_autodoc_importer.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
test_ext_autodoc_importer
~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -9,7 +8,11 @@
:license: BSD, see LICENSE for details.
"""
-from sphinx.ext.autodoc.importer import _MockObject
+import sys
+
+import pytest
+
+from sphinx.ext.autodoc.importer import _MockModule, _MockObject, mock
def test_MockObject():
@@ -29,3 +32,31 @@ def test_MockObject():
assert isinstance(obj, SubClass)
assert obj.method() == "string"
assert isinstance(obj.other_method(), SubClass)
+
+
+def test_mock():
+ modname = 'sphinx.unknown'
+ submodule = modname + '.submodule'
+ assert modname not in sys.modules
+ with pytest.raises(ImportError):
+ __import__(modname)
+
+ with mock([modname]):
+ __import__(modname)
+ assert modname in sys.modules
+ assert isinstance(sys.modules[modname], _MockModule)
+
+ # submodules are also mocked
+ __import__(submodule)
+ assert submodule in sys.modules
+ assert isinstance(sys.modules[submodule], _MockModule)
+
+ assert modname not in sys.modules
+ with pytest.raises(ImportError):
+ __import__(modname)
+
+
+def test_mock_does_not_follow_upper_modules():
+ with mock(['sphinx.unknown.module']):
+ with pytest.raises(ImportError):
+ __import__('sphinx.unknown')
diff --git a/tests/test_ext_autosectionlabel.py b/tests/test_ext_autosectionlabel.py
index e26b3f3ed..977dd7eee 100644
--- a/tests/test_ext_autosectionlabel.py
+++ b/tests/test_ext_autosectionlabel.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
test_ext_autosectionlabel
~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -36,10 +35,10 @@ def test_autosectionlabel_html(app, status, warning):
assert re.search(html, content, re.S)
# for smart_quotes (refs: #4027)
- html = (u'<li><a class="reference internal" '
- u'href="#this-one-s-got-an-apostrophe">'
- u'<span class="std std-ref">This one’s got an apostrophe'
- u'</span></a></li>')
+ html = ('<li><a class="reference internal" '
+ 'href="#this-one-s-got-an-apostrophe">'
+ '<span class="std std-ref">This one’s got an apostrophe'
+ '</span></a></li>')
assert re.search(html, content, re.S)
diff --git a/tests/test_ext_autosummary.py b/tests/test_ext_autosummary.py
index 9b98a59d1..d437ff86a 100644
--- a/tests/test_ext_autosummary.py
+++ b/tests/test_ext_autosummary.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
test_autosummary
~~~~~~~~~~~~~~~~
@@ -9,8 +8,9 @@
:license: BSD, see LICENSE for details.
"""
+from io import StringIO
+
import pytest
-from six import iteritems, StringIO
from sphinx.ext.autosummary import mangle_signature, import_by_name, extract_summary
from sphinx.testing.util import etree_parse
@@ -51,8 +51,8 @@ def test_mangle_signature():
TEST = [[y.strip() for y in x.split("::")] for x in TEST.split("\n")
if '::' in x]
for inp, outp in TEST:
- res = mangle_signature(inp).strip().replace(u"\u00a0", " ")
- assert res == outp, (u"'%s' -> '%s' != '%s'" % (inp, res, outp))
+ res = mangle_signature(inp).strip().replace("\u00a0", " ")
+ assert res == outp, ("'%s' -> '%s' != '%s'" % (inp, res, outp))
def test_extract_summary(capsys):
@@ -140,7 +140,7 @@ def test_get_items_summary(make_app, app_params):
'C.prop_attr2': 'This is a attribute docstring',
'C.C2': 'This is a nested inner class docstring',
}
- for key, expected in iteritems(expected_values):
+ for key, expected in expected_values.items():
assert autosummary_items[key][2] == expected, 'Summary for %s was %r -'\
' expected %r' % (key, autosummary_items[key], expected)
diff --git a/tests/test_ext_coverage.py b/tests/test_ext_coverage.py
index a8f222a00..688f679fd 100644
--- a/tests/test_ext_coverage.py
+++ b/tests/test_ext_coverage.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
test_coverage
~~~~~~~~~~~~~
diff --git a/tests/test_ext_doctest.py b/tests/test_ext_doctest.py
index f7b6ca5f8..585f84d00 100644
--- a/tests/test_ext_doctest.py
+++ b/tests/test_ext_doctest.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
test_doctest
~~~~~~~~~~~~
@@ -11,10 +10,10 @@
import os
from collections import Counter
+from docutils import nodes
import pytest
from packaging.specifiers import InvalidSpecifier
from packaging.version import InvalidVersion
-from six import PY2
from sphinx.ext.doctest import is_allowed_version
@@ -33,6 +32,23 @@ def test_build(app, status, warning):
assert cleanup_called == 3, 'testcleanup did not get executed enough times'
+@pytest.mark.sphinx('dummy', testroot='ext-doctest')
+def test_highlight_language_default(app, status, warning):
+ app.build()
+ doctree = app.env.get_doctree('doctest')
+ for node in doctree.traverse(nodes.literal_block):
+ assert node['language'] in ('python3', 'pycon3', 'none')
+
+
+@pytest.mark.sphinx('dummy', testroot='ext-doctest',
+ confoverrides={'highlight_language': 'python'})
+def test_highlight_language_python2(app, status, warning):
+ app.build()
+ doctree = app.env.get_doctree('doctest')
+ for node in doctree.traverse(nodes.literal_block):
+ assert node['language'] in ('python', 'pycon', 'none')
+
+
def test_is_allowed_version():
assert is_allowed_version('<3.4', '3.3') is True
assert is_allowed_version('<3.4', '3.3') is True
@@ -112,9 +128,6 @@ def record(directive, part, should_skip):
return 'Recorded {} {} {}'.format(directive, part, should_skip)
-@pytest.mark.xfail(
- PY2, reason='node.source points to document instead of filename',
-)
@pytest.mark.sphinx('doctest', testroot='ext-doctest-with-autodoc')
def test_reporting_with_autodoc(app, status, warning, capfd):
# Patch builder to get a copy of the output
diff --git a/tests/test_ext_githubpages.py b/tests/test_ext_githubpages.py
index 18ee51480..69cb958f7 100644
--- a/tests/test_ext_githubpages.py
+++ b/tests/test_ext_githubpages.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
test_ext_githubpages
~~~~~~~~~~~~~~~~~~~~
diff --git a/tests/test_ext_graphviz.py b/tests/test_ext_graphviz.py
index d0358b407..c632d6a20 100644
--- a/tests/test_ext_graphviz.py
+++ b/tests/test_ext_graphviz.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
test_ext_graphviz
~~~~~~~~~~~~~~~~~
@@ -35,7 +34,7 @@ def test_graphviz_png_html(app, status, warning):
html = (r'<div class="figure align-right" .*?>\s*'
r'<div class="graphviz"><img .*?/></div>\s*<p class="caption">'
- r'<span class="caption-text">on right</span>.*</p>\s*</div>')
+ r'<span class="caption-text">on <em>right</em></span>.*</p>\s*</div>')
assert re.search(html, content, re.S)
html = (r'<div align=\"center\" class=\"align-center\">'
@@ -73,7 +72,7 @@ def test_graphviz_svg_html(app, status, warning):
r'foo -&gt; bar\n'
r'}</p></object></div>\n'
r'<p class=\"caption\"><span class=\"caption-text\">'
- r'on right</span>.*</p>\n'
+ r'on <em>right</em></span>.*</p>\n'
r'</div>')
assert re.search(html, content, re.S)
@@ -102,7 +101,8 @@ def test_graphviz_latex(app, status, warning):
macro = ('\\\\begin{wrapfigure}{r}{0pt}\n\\\\centering\n'
'\\\\sphinxincludegraphics\\[\\]{graphviz-\\w+.pdf}\n'
- '\\\\caption{on right}\\\\label{.*}\\\\end{wrapfigure}')
+ '\\\\caption{on \\\\sphinxstyleemphasis{right}}'
+ '\\\\label{.*}\\\\end{wrapfigure}')
assert re.search(macro, content, re.S)
macro = (r'\{\\hfill'
diff --git a/tests/test_ext_ifconfig.py b/tests/test_ext_ifconfig.py
index b4c941512..df6b5c925 100644
--- a/tests/test_ext_ifconfig.py
+++ b/tests/test_ext_ifconfig.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
test_ext_ifconfig
~~~~~~~~~~~~~~~~~
diff --git a/tests/test_ext_imgconverter.py b/tests/test_ext_imgconverter.py
index 330c3eeed..520d3624a 100644
--- a/tests/test_ext_imgconverter.py
+++ b/tests/test_ext_imgconverter.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
test_ext_imgconverter
~~~~~~~~~~~~~~~~~~~~~
diff --git a/tests/test_ext_inheritance.py b/tests/test_ext_inheritance.py
index 8a8de8369..e8787427d 100644
--- a/tests/test_ext_inheritance.py
+++ b/tests/test_ext_inheritance.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
test_inheritance
~~~~~~~~~~~~~~~~
diff --git a/tests/test_ext_inheritance_diagram.py b/tests/test_ext_inheritance_diagram.py
index 711e0712c..dc9dd51c9 100644
--- a/tests/test_ext_inheritance_diagram.py
+++ b/tests/test_ext_inheritance_diagram.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
test_ext_inheritance_diagram
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/tests/test_ext_intersphinx.py b/tests/test_ext_intersphinx.py
index b9bb8421b..e81101300 100644
--- a/tests/test_ext_intersphinx.py
+++ b/tests/test_ext_intersphinx.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
test_intersphinx
~~~~~~~~~~~~~~~~
@@ -21,7 +20,7 @@ from test_util_inventory import inventory_v2, inventory_v2_not_having_version
from sphinx import addnodes
from sphinx.ext.intersphinx import (
- load_mappings, missing_reference, _strip_basic_auth,
+ load_mappings, missing_reference, normalize_intersphinx_mapping, _strip_basic_auth,
_get_safe_url, fetch_inventory, INVENTORY_FILENAME, inspect_main
)
from sphinx.ext.intersphinx import setup as intersphinx_setup
@@ -47,7 +46,7 @@ def reference_check(app, *args, **kwds):
@mock.patch('sphinx.ext.intersphinx._read_from_url')
def test_fetch_inventory_redirection(_read_from_url, InventoryFile, app, status, warning):
intersphinx_setup(app)
- _read_from_url().readline.return_value = '# Sphinx inventory version 2'.encode('utf-8')
+ _read_from_url().readline.return_value = '# Sphinx inventory version 2'.encode()
# same uri and inv, not redirected
_read_from_url().url = 'http://hostname/' + INVENTORY_FILENAME
@@ -100,6 +99,7 @@ def test_missing_reference(tempdir, app, status, warning):
app.config.intersphinx_cache_limit = 0
# load the inventory and check if it's done correctly
+ normalize_intersphinx_mapping(app, app.config)
load_mappings(app)
inv = app.env.intersphinx_inventory
@@ -175,6 +175,7 @@ def test_missing_reference_pydomain(tempdir, app, status, warning):
app.config.intersphinx_cache_limit = 0
# load the inventory and check if it's done correctly
+ normalize_intersphinx_mapping(app, app.config)
load_mappings(app)
# no context data
@@ -199,6 +200,7 @@ def test_missing_reference_stddomain(tempdir, app, status, warning):
app.config.intersphinx_cache_limit = 0
# load the inventory and check if it's done correctly
+ normalize_intersphinx_mapping(app, app.config)
load_mappings(app)
# no context data
@@ -230,6 +232,7 @@ def test_missing_reference_cppdomain(tempdir, app, status, warning):
app.config.intersphinx_cache_limit = 0
# load the inventory and check if it's done correctly
+ normalize_intersphinx_mapping(app, app.config)
load_mappings(app)
app.build()
@@ -256,6 +259,7 @@ def test_missing_reference_jsdomain(tempdir, app, status, warning):
app.config.intersphinx_cache_limit = 0
# load the inventory and check if it's done correctly
+ normalize_intersphinx_mapping(app, app.config)
load_mappings(app)
# no context data
@@ -281,6 +285,7 @@ def test_inventory_not_having_version(tempdir, app, status, warning):
app.config.intersphinx_cache_limit = 0
# load the inventory and check if it's done correctly
+ normalize_intersphinx_mapping(app, app.config)
load_mappings(app)
rn = reference_check(app, 'py', 'mod', 'module1', 'foo')
@@ -308,6 +313,7 @@ def test_load_mappings_warnings(tempdir, app, status, warning):
app.config.intersphinx_cache_limit = 0
# load the inventory and check if it's done correctly
+ normalize_intersphinx_mapping(app, app.config)
load_mappings(app)
assert warning.getvalue().count('\n') == 1
@@ -321,6 +327,7 @@ def test_load_mappings_fallback(tempdir, app, status, warning):
app.config.intersphinx_mapping = {
'fallback': ('https://docs.python.org/py3k/', '/invalid/inventory/path'),
}
+ normalize_intersphinx_mapping(app, app.config)
load_mappings(app)
assert "failed to reach any of the inventories" in warning.getvalue()
@@ -336,6 +343,7 @@ def test_load_mappings_fallback(tempdir, app, status, warning):
'fallback': ('https://docs.python.org/py3k/', ('/invalid/inventory/path',
inv_file)),
}
+ normalize_intersphinx_mapping(app, app.config)
load_mappings(app)
assert "encountered some issues with some of the inventories" in status.getvalue()
assert "" == warning.getvalue()
diff --git a/tests/test_ext_math.py b/tests/test_ext_math.py
index 796ed852c..37dc0bd91 100644
--- a/tests/test_ext_math.py
+++ b/tests/test_ext_math.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
test_ext_math
~~~~~~~~~~~~~
@@ -23,12 +22,10 @@ from sphinx.testing.util import assert_node
def has_binary(binary):
try:
subprocess.check_output([binary])
- except OSError as e:
- if e.errno == errno.ENOENT:
- # handle file not found error.
- return False
- else:
- return True
+ except FileNotFoundError:
+ return False
+ except OSError:
+ pass
return True
@@ -42,14 +39,14 @@ def test_jsmath(app, status, warning):
assert '<div class="math notranslate nohighlight">\na^2 + b^2 = c^2</div>' in content
assert ('<div class="math notranslate nohighlight">\n\\begin{split}a + 1 &lt; '
'b\\end{split}</div>' in content)
- assert (u'<span class="eqno">(1)<a class="headerlink" href="#equation-foo" '
- u'title="Permalink to this equation">\xb6</a></span>'
- u'<div class="math notranslate nohighlight" id="equation-foo">'
+ assert ('<span class="eqno">(1)<a class="headerlink" href="#equation-foo" '
+ 'title="Permalink to this equation">\xb6</a></span>'
+ '<div class="math notranslate nohighlight" id="equation-foo">'
'\ne^{i\\pi} = 1</div>' in content)
- assert (u'<span class="eqno">(2)<a class="headerlink" href="#equation-math-0" '
- u'title="Permalink to this equation">\xb6</a></span>'
- u'<div class="math notranslate nohighlight" id="equation-math-0">\n'
- u'e^{ix} = \\cos x + i\\sin x</div>' in content)
+ assert ('<span class="eqno">(2)<a class="headerlink" href="#equation-math-0" '
+ 'title="Permalink to this equation">\xb6</a></span>'
+ '<div class="math notranslate nohighlight" id="equation-math-0">\n'
+ 'e^{ix} = \\cos x + i\\sin x</div>' in content)
assert '<div class="math notranslate nohighlight">\nn \\in \\mathbb N</div>' in content
assert '<div class="math notranslate nohighlight">\na + 1 &lt; b</div>' in content
diff --git a/tests/test_ext_napoleon.py b/tests/test_ext_napoleon.py
index 31130ad54..b02f9630f 100644
--- a/tests/test_ext_napoleon.py
+++ b/tests/test_ext_napoleon.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
test_napoleon
~~~~~~~~~~~~~
@@ -37,7 +36,7 @@ def __special_undoc__():
pass
-class SampleClass(object):
+class SampleClass:
def _private_doc(self):
"""SampleClass._private_doc.DOCSTRING"""
pass
diff --git a/tests/test_ext_napoleon_docstring.py b/tests/test_ext_napoleon_docstring.py
index a4d127d0d..422ec0d75 100644
--- a/tests/test_ext_napoleon_docstring.py
+++ b/tests/test_ext_napoleon_docstring.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
test_napoleon_docstring
~~~~~~~~~~~~~~~~~~~~~~~
@@ -77,6 +76,34 @@ Sample namedtuple subclass
self.assertEqual(expected, actual)
+class InlineAttributeTest(BaseDocstringTest):
+
+ def test_class_data_member(self):
+ config = Config()
+ docstring = """data member description:
+
+- a: b
+"""
+ actual = str(GoogleDocstring(docstring, config=config, app=None,
+ what='attribute', name='some_data', obj=0))
+ expected = """data member description:
+
+- a: b"""
+
+ self.assertEqual(expected, actual)
+
+ def test_class_data_member_inline(self):
+ config = Config()
+ docstring = """b: data member description with :ref:`reference`"""
+ actual = str(GoogleDocstring(docstring, config=config, app=None,
+ what='attribute', name='some_data', obj=0))
+ expected = """data member description with :ref:`reference`
+
+:type: b"""
+
+ self.assertEqual(expected, actual)
+
+
class GoogleDocstringTest(BaseDocstringTest):
docstrings = [(
"""Single line summary""",
@@ -452,8 +479,8 @@ Raises:
""", """
Example Function
-:raises: * :exc:`RuntimeError` -- A setting wasn't specified, or was invalid.
- * :exc:`ValueError` -- Something something value error.
+:raises RuntimeError: A setting wasn't specified, or was invalid.
+:raises ValueError: Something something value error.
"""),
################################
("""
@@ -465,7 +492,7 @@ Raises:
""", """
Example Function
-:raises: :exc:`InvalidDimensionsError`
+:raises InvalidDimensionsError:
"""),
################################
("""
@@ -477,7 +504,7 @@ Raises:
""", """
Example Function
-:raises: Invalid Dimensions Error
+:raises Invalid Dimensions Error:
"""),
################################
("""
@@ -489,7 +516,7 @@ Raises:
""", """
Example Function
-:raises: *Invalid Dimensions Error* -- With description
+:raises Invalid Dimensions Error: With description
"""),
################################
("""
@@ -501,7 +528,7 @@ Raises:
""", """
Example Function
-:raises: :exc:`InvalidDimensionsError` -- If the dimensions couldn't be parsed.
+:raises InvalidDimensionsError: If the dimensions couldn't be parsed.
"""),
################################
("""
@@ -513,7 +540,7 @@ Raises:
""", """
Example Function
-:raises: *Invalid Dimensions Error* -- If the dimensions couldn't be parsed.
+:raises Invalid Dimensions Error: If the dimensions couldn't be parsed.
"""),
################################
("""
@@ -525,7 +552,7 @@ Raises:
""", """
Example Function
-:raises: If the dimensions couldn't be parsed.
+:raises If the dimensions couldn't be parsed.:
"""),
################################
("""
@@ -537,7 +564,7 @@ Raises:
""", """
Example Function
-:raises: :class:`exc.InvalidDimensionsError`
+:raises exc.InvalidDimensionsError:
"""),
################################
("""
@@ -549,8 +576,7 @@ Raises:
""", """
Example Function
-:raises: :class:`exc.InvalidDimensionsError` -- If the dimensions couldn't """
- """be parsed.
+:raises exc.InvalidDimensionsError: If the dimensions couldn't be parsed.
"""),
################################
("""
@@ -563,9 +589,8 @@ Raises:
""", """
Example Function
-:raises: :class:`exc.InvalidDimensionsError` -- If the dimensions couldn't """
- """be parsed,
- then a :class:`exc.InvalidDimensionsError` will be raised.
+:raises exc.InvalidDimensionsError: If the dimensions couldn't be parsed,
+ then a :class:`exc.InvalidDimensionsError` will be raised.
"""),
################################
("""
@@ -578,9 +603,8 @@ Raises:
""", """
Example Function
-:raises: * :class:`exc.InvalidDimensionsError` -- If the dimensions """
- """couldn't be parsed.
- * :class:`exc.InvalidArgumentsError` -- If the arguments are invalid.
+:raises exc.InvalidDimensionsError: If the dimensions couldn't be parsed.
+:raises exc.InvalidArgumentsError: If the arguments are invalid.
"""),
################################
("""
@@ -593,8 +617,8 @@ Raises:
""", """
Example Function
-:raises: * :class:`exc.InvalidDimensionsError`
- * :class:`exc.InvalidArgumentsError`
+:raises exc.InvalidDimensionsError:
+:raises exc.InvalidArgumentsError:
""")]
for docstring, expected in docstrings:
actual = str(GoogleDocstring(docstring))
@@ -1346,8 +1370,8 @@ Raises
""", """
Example Function
-:raises: * :exc:`RuntimeError` -- A setting wasn't specified, or was invalid.
- * :exc:`ValueError` -- Something something value error.
+:raises RuntimeError: A setting wasn't specified, or was invalid.
+:raises ValueError: Something something value error.
"""),
################################
("""
@@ -1360,7 +1384,7 @@ InvalidDimensionsError
""", """
Example Function
-:raises: :exc:`InvalidDimensionsError`
+:raises InvalidDimensionsError:
"""),
################################
("""
@@ -1373,7 +1397,7 @@ Invalid Dimensions Error
""", """
Example Function
-:raises: Invalid Dimensions Error
+:raises Invalid Dimensions Error:
"""),
################################
("""
@@ -1387,7 +1411,7 @@ Invalid Dimensions Error
""", """
Example Function
-:raises: *Invalid Dimensions Error* -- With description
+:raises Invalid Dimensions Error: With description
"""),
################################
("""
@@ -1401,7 +1425,7 @@ InvalidDimensionsError
""", """
Example Function
-:raises: :exc:`InvalidDimensionsError` -- If the dimensions couldn't be parsed.
+:raises InvalidDimensionsError: If the dimensions couldn't be parsed.
"""),
################################
("""
@@ -1415,7 +1439,7 @@ Invalid Dimensions Error
""", """
Example Function
-:raises: *Invalid Dimensions Error* -- If the dimensions couldn't be parsed.
+:raises Invalid Dimensions Error: If the dimensions couldn't be parsed.
"""),
################################
("""
@@ -1428,7 +1452,7 @@ If the dimensions couldn't be parsed.
""", """
Example Function
-:raises: If the dimensions couldn't be parsed.
+:raises If the dimensions couldn't be parsed.:
"""),
################################
("""
@@ -1441,7 +1465,7 @@ Raises
""", """
Example Function
-:raises: :class:`exc.InvalidDimensionsError`
+:raises exc.InvalidDimensionsError:
"""),
################################
("""
@@ -1455,8 +1479,7 @@ Raises
""", """
Example Function
-:raises: :class:`exc.InvalidDimensionsError` -- If the dimensions couldn't """
- """be parsed.
+:raises exc.InvalidDimensionsError: If the dimensions couldn't be parsed.
"""),
################################
("""
@@ -1471,9 +1494,8 @@ Raises
""", """
Example Function
-:raises: :class:`exc.InvalidDimensionsError` -- If the dimensions couldn't """
- """be parsed,
- then a :class:`exc.InvalidDimensionsError` will be raised.
+:raises exc.InvalidDimensionsError: If the dimensions couldn't be parsed,
+ then a :class:`exc.InvalidDimensionsError` will be raised.
"""),
################################
("""
@@ -1489,10 +1511,8 @@ Raises
""", """
Example Function
-:raises: * :class:`exc.InvalidDimensionsError` -- If the dimensions """
- """couldn't be parsed.
- * :class:`exc.InvalidArgumentsError` -- If the arguments """
- """are invalid.
+:raises exc.InvalidDimensionsError: If the dimensions couldn't be parsed.
+:raises exc.InvalidArgumentsError: If the arguments are invalid.
"""),
################################
("""
@@ -1506,8 +1526,8 @@ Raises
""", """
Example Function
-:raises: * :class:`exc.InvalidDimensionsError`
- * :class:`exc.InvalidArgumentsError`
+:raises exc.InvalidDimensionsError:
+:raises exc.InvalidArgumentsError:
""")]
for docstring, expected in docstrings:
config = Config()
diff --git a/tests/test_ext_napoleon_iterators.py b/tests/test_ext_napoleon_iterators.py
index d02f76897..fd3255869 100644
--- a/tests/test_ext_napoleon_iterators.py
+++ b/tests/test_ext_napoleon_iterators.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
test_napoleon_iterators
~~~~~~~~~~~~~~~~~~~~~~~
@@ -341,7 +340,7 @@ class ModifyIterTest(BaseIteratorsTest):
self.assertEqual(expected, [i for i in it])
def test_modifier_rstrip_unicode(self):
- a = [u'', u' ', u' a ', u'b ', u' c', u' ', u'']
+ a = ['', ' ', ' a ', 'b ', ' c', ' ', '']
it = modify_iter(a, modifier=lambda s: s.rstrip())
- expected = [u'', u'', u' a', u'b', u' c', u'', u'']
+ expected = ['', '', ' a', 'b', ' c', '', '']
self.assertEqual(expected, [i for i in it])
diff --git a/tests/test_ext_todo.py b/tests/test_ext_todo.py
index cd3f9b3d4..5e7ade9f6 100644
--- a/tests/test_ext_todo.py
+++ b/tests/test_ext_todo.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
test_ext_todo
~~~~~~~~~~~~~
diff --git a/tests/test_ext_viewcode.py b/tests/test_ext_viewcode.py
index 4676f488f..40fe888ae 100644
--- a/tests/test_ext_viewcode.py
+++ b/tests/test_ext_viewcode.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
test_ext_viewcode
~~~~~~~~~~~~~~~~~
@@ -68,10 +67,10 @@ def test_local_source_files(app, status, warning):
if modname == 'not_a_package':
source = (app.srcdir / 'not_a_package/__init__.py').text()
tags = {
- 'func1': ('def', 3, 3),
- 'Class1': ('class', 3, 3),
- 'not_a_package.submodule.func1': ('def', 3, 3),
- 'not_a_package.submodule.Class1': ('class', 3, 3),
+ 'func1': ('def', 1, 1),
+ 'Class1': ('class', 1, 1),
+ 'not_a_package.submodule.func1': ('def', 1, 1),
+ 'not_a_package.submodule.Class1': ('class', 1, 1),
}
else:
source = (app.srcdir / 'not_a_package/submodule.py').text()
diff --git a/tests/test_highlighting.py b/tests/test_highlighting.py
index 29a4c043a..1571adbce 100644
--- a/tests/test_highlighting.py
+++ b/tests/test_highlighting.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
test_highlighting
~~~~~~~~~~~~~~~~~
diff --git a/tests/test_intl.py b/tests/test_intl.py
index aed323eed..4961d2550 100644
--- a/tests/test_intl.py
+++ b/tests/test_intl.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
test_intl
~~~~~~~~~
@@ -9,7 +8,6 @@
:copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
-from __future__ import print_function
import os
import pickle
@@ -18,7 +16,6 @@ import re
import pytest
from babel.messages import pofile, mofile
from docutils import nodes
-from six import string_types
from sphinx.testing.util import (
path, etree_parse, strip_escseq,
@@ -78,20 +75,7 @@ def _info(app):
def elem_gettexts(elem):
- def itertext(self):
- # this function copied from Python-2.7 'ElementTree.itertext'.
- # for compatibility to Python-2.6
- tag = self.tag
- if not isinstance(tag, string_types) and tag is not None:
- return
- if self.text:
- yield self.text
- for e in self:
- for s in itertext(e):
- yield s
- if e.tail:
- yield e.tail
- return [_f for _f in [s.strip() for s in itertext(elem)] if _f]
+ return [_f for _f in [s.strip() for s in elem.itertext()] if _f]
def elem_getref(elem):
@@ -120,8 +104,8 @@ def assert_count(expected_expr, result, count):
@pytest.mark.test_params(shared_result='test_intl_basic')
def test_text_toctree(app):
app.build()
- result = (app.outdir / 'contents.txt').text(encoding='utf-8')
- assert_startswith(result, u"CONTENTS\n********\n\nTABLE OF CONTENTS\n")
+ result = (app.outdir / 'index.txt').text(encoding='utf-8')
+ assert_startswith(result, "CONTENTS\n********\n\nTABLE OF CONTENTS\n")
@sphinx_intl
@@ -143,9 +127,9 @@ def test_text_warning_node(app):
app.build()
# test warnings in translation
result = (app.outdir / 'warnings.txt').text(encoding='utf-8')
- expect = (u"3. I18N WITH REST WARNINGS"
- u"\n**************************\n"
- u"\nLINE OF >>``<<BROKEN LITERAL MARKUP.\n")
+ expect = ("3. I18N WITH REST WARNINGS"
+ "\n**************************\n"
+ "\nLINE OF >>``<<BROKEN LITERAL MARKUP.\n")
assert result == expect
@@ -157,9 +141,9 @@ def test_text_title_underline(app):
app.build()
# --- simple translation; check title underlines
result = (app.outdir / 'bom.txt').text(encoding='utf-8')
- expect = (u"2. Datei mit UTF-8"
- u"\n******************\n" # underline matches new translation
- u"\nThis file has umlauts: äöü.\n")
+ expect = ("2. Datei mit UTF-8"
+ "\n******************\n" # underline matches new translation
+ "\nThis file has umlauts: äöü.\n")
assert result == expect
@@ -169,8 +153,8 @@ def test_text_title_underline(app):
def test_text_subdirs(app):
app.build()
# --- check translation in subdirs
- result = (app.outdir / 'subdir' / 'contents.txt').text(encoding='utf-8')
- assert_startswith(result, u"1. subdir contents\n******************\n")
+ result = (app.outdir / 'subdir' / 'index.txt').text(encoding='utf-8')
+ assert_startswith(result, "1. subdir contents\n******************\n")
@sphinx_intl
@@ -180,46 +164,46 @@ def test_text_inconsistency_warnings(app, warning):
app.build()
# --- check warnings for inconsistency in number of references
result = (app.outdir / 'refs_inconsistency.txt').text(encoding='utf-8')
- expect = (u"8. I18N WITH REFS INCONSISTENCY"
- u"\n*******************************\n"
- u"\n* FOR CITATION [ref3].\n"
- u"\n* reference FOR reference.\n"
- u"\n* ORPHAN REFERENCE: I18N WITH REFS INCONSISTENCY.\n"
- u"\n[1] THIS IS A AUTO NUMBERED FOOTNOTE.\n"
- u"\n[ref2] THIS IS A CITATION.\n"
- u"\n[100] THIS IS A NUMBERED FOOTNOTE.\n")
+ expect = ("8. I18N WITH REFS INCONSISTENCY"
+ "\n*******************************\n"
+ "\n* FOR CITATION [ref3].\n"
+ "\n* reference FOR reference.\n"
+ "\n* ORPHAN REFERENCE: I18N WITH REFS INCONSISTENCY.\n"
+ "\n[1] THIS IS A AUTO NUMBERED FOOTNOTE.\n"
+ "\n[ref2] THIS IS A CITATION.\n"
+ "\n[100] THIS IS A NUMBERED FOOTNOTE.\n")
assert result == expect
warnings = getwarning(warning)
- warning_fmt = u'.*/refs_inconsistency.txt:\\d+: ' \
- u'WARNING: inconsistent %(reftype)s in translated message.' \
- u' original: %(original)s, translated: %(translated)s\n'
+ warning_fmt = ('.*/refs_inconsistency.txt:\\d+: '
+ 'WARNING: inconsistent %(reftype)s in translated message.'
+ ' original: %(original)s, translated: %(translated)s\n')
expected_warning_expr = (
warning_fmt % {
- u'reftype': u'footnote references',
- u'original': u"\\[u?'\\[#\\]_'\\]",
- u'translated': u"\\[\\]"
+ 'reftype': 'footnote references',
+ 'original': "\\['\\[#\\]_'\\]",
+ 'translated': "\\[\\]"
} +
warning_fmt % {
- u'reftype': u'footnote references',
- u'original': u"\\[u?'\\[100\\]_'\\]",
- u'translated': u"\\[\\]"
+ 'reftype': 'footnote references',
+ 'original': "\\['\\[100\\]_'\\]",
+ 'translated': "\\[\\]"
} +
warning_fmt % {
- u'reftype': u'references',
- u'original': u"\\[u?'reference_'\\]",
- u'translated': u"\\[u?'reference_', u?'reference_'\\]"
+ 'reftype': 'references',
+ 'original': "\\['reference_'\\]",
+ 'translated': "\\['reference_', 'reference_'\\]"
} +
warning_fmt % {
- u'reftype': u'references',
- u'original': u"\\[\\]",
- u'translated': u"\\[u?'`I18N WITH REFS INCONSISTENCY`_'\\]"
+ 'reftype': 'references',
+ 'original': "\\[\\]",
+ 'translated': "\\['`I18N WITH REFS INCONSISTENCY`_'\\]"
})
assert_re_search(expected_warning_expr, warnings)
expected_citation_warning_expr = (
- u'.*/refs_inconsistency.txt:\\d+: WARNING: Citation \\[ref2\\] is not referenced.\n' +
- u'.*/refs_inconsistency.txt:\\d+: WARNING: citation not found: ref3')
+ '.*/refs_inconsistency.txt:\\d+: WARNING: Citation \\[ref2\\] is not referenced.\n' +
+ '.*/refs_inconsistency.txt:\\d+: WARNING: citation not found: ref3')
assert_re_search(expected_citation_warning_expr, warnings)
@@ -230,18 +214,18 @@ def test_text_literalblock_warnings(app, warning):
app.build()
# --- check warning for literal block
result = (app.outdir / 'literalblock.txt').text(encoding='utf-8')
- expect = (u"9. I18N WITH LITERAL BLOCK"
- u"\n**************************\n"
- u"\nCORRECT LITERAL BLOCK:\n"
- u"\n this is"
- u"\n literal block\n"
- u"\nMISSING LITERAL BLOCK:\n"
- u"\n<SYSTEM MESSAGE:")
+ expect = ("9. I18N WITH LITERAL BLOCK"
+ "\n**************************\n"
+ "\nCORRECT LITERAL BLOCK:\n"
+ "\n this is"
+ "\n literal block\n"
+ "\nMISSING LITERAL BLOCK:\n"
+ "\n<SYSTEM MESSAGE:")
assert_startswith(result, expect)
warnings = getwarning(warning)
- expected_warning_expr = u'.*/literalblock.txt:\\d+: ' \
- u'WARNING: Literal block expected; none found.'
+ expected_warning_expr = ('.*/literalblock.txt:\\d+: '
+ 'WARNING: Literal block expected; none found.')
assert_re_search(expected_warning_expr, warnings)
@@ -252,17 +236,16 @@ def test_text_definition_terms(app):
app.build()
# --- definition terms: regression test for #975, #2198, #2205
result = (app.outdir / 'definition_terms.txt').text(encoding='utf-8')
- expect = (u"13. I18N WITH DEFINITION TERMS"
- u"\n******************************\n"
- u"\nSOME TERM"
- u"\n THE CORRESPONDING DEFINITION\n"
- u"\nSOME *TERM* WITH LINK"
- u"\n THE CORRESPONDING DEFINITION #2\n"
- u"\nSOME **TERM** WITH : CLASSIFIER1 : CLASSIFIER2"
- u"\n THE CORRESPONDING DEFINITION\n"
- u"\nSOME TERM WITH : CLASSIFIER[]"
- u"\n THE CORRESPONDING DEFINITION\n"
- )
+ expect = ("13. I18N WITH DEFINITION TERMS"
+ "\n******************************\n"
+ "\nSOME TERM"
+ "\n THE CORRESPONDING DEFINITION\n"
+ "\nSOME *TERM* WITH LINK"
+ "\n THE CORRESPONDING DEFINITION #2\n"
+ "\nSOME **TERM** WITH : CLASSIFIER1 : CLASSIFIER2"
+ "\n THE CORRESPONDING DEFINITION\n"
+ "\nSOME TERM WITH : CLASSIFIER[]"
+ "\n THE CORRESPONDING DEFINITION\n")
assert result == expect
@@ -273,13 +256,13 @@ def test_text_glossary_term(app, warning):
app.build()
# --- glossary terms: regression test for #1090
result = (app.outdir / 'glossary_terms.txt').text(encoding='utf-8')
- expect = (u"18. I18N WITH GLOSSARY TERMS"
- u"\n****************************\n"
- u"\nSOME NEW TERM"
- u"\n THE CORRESPONDING GLOSSARY\n"
- u"\nSOME OTHER NEW TERM"
- u"\n THE CORRESPONDING GLOSSARY #2\n"
- u"\nLINK TO *SOME NEW TERM*.\n")
+ expect = ("18. I18N WITH GLOSSARY TERMS"
+ "\n****************************\n"
+ "\nSOME NEW TERM"
+ "\n THE CORRESPONDING GLOSSARY\n"
+ "\nSOME OTHER NEW TERM"
+ "\n THE CORRESPONDING GLOSSARY #2\n"
+ "\nLINK TO *SOME NEW TERM*.\n")
assert result == expect
warnings = getwarning(warning)
assert 'term not in glossary' not in warnings
@@ -292,17 +275,17 @@ def test_text_glossary_term_inconsistencies(app, warning):
app.build()
# --- glossary term inconsistencies: regression test for #1090
result = (app.outdir / 'glossary_terms_inconsistency.txt').text(encoding='utf-8')
- expect = (u"19. I18N WITH GLOSSARY TERMS INCONSISTENCY"
- u"\n******************************************\n"
- u"\n1. LINK TO *SOME NEW TERM*.\n")
+ expect = ("19. I18N WITH GLOSSARY TERMS INCONSISTENCY"
+ "\n******************************************\n"
+ "\n1. LINK TO *SOME NEW TERM*.\n")
assert result == expect
warnings = getwarning(warning)
expected_warning_expr = (
- u'.*/glossary_terms_inconsistency.txt:\\d+: '
- u'WARNING: inconsistent term references in translated message.'
- u" original: \\[u?':term:`Some term`', u?':term:`Some other term`'\\],"
- u" translated: \\[u?':term:`SOME NEW TERM`'\\]\n")
+ '.*/glossary_terms_inconsistency.txt:\\d+: '
+ 'WARNING: inconsistent term references in translated message.'
+ " original: \\[':term:`Some term`', ':term:`Some other term`'\\],"
+ " translated: \\[':term:`SOME NEW TERM`'\\]\n")
assert_re_search(expected_warning_expr, warnings)
@@ -337,12 +320,12 @@ def test_text_seealso(app):
app.build()
# --- seealso
result = (app.outdir / 'seealso.txt').text(encoding='utf-8')
- expect = (u"12. I18N WITH SEEALSO"
- u"\n*********************\n"
- u"\nSee also: SHORT TEXT 1\n"
- u"\nSee also: LONG TEXT 1\n"
- u"\nSee also: SHORT TEXT 2\n"
- u"\n LONG TEXT 2\n")
+ expect = ("12. I18N WITH SEEALSO"
+ "\n*********************\n"
+ "\nSee also: SHORT TEXT 1\n"
+ "\nSee also: LONG TEXT 1\n"
+ "\nSee also: SHORT TEXT 2\n"
+ "\n LONG TEXT 2\n")
assert result == expect
@@ -353,39 +336,38 @@ def test_text_figure_captions(app):
app.build()
# --- figure captions: regression test for #940
result = (app.outdir / 'figure.txt').text(encoding='utf-8')
- expect = (u"14. I18N WITH FIGURE CAPTION"
- u"\n****************************\n"
- u"\n [image]MY CAPTION OF THE FIGURE\n"
- u"\n MY DESCRIPTION PARAGRAPH1 OF THE FIGURE.\n"
- u"\n MY DESCRIPTION PARAGRAPH2 OF THE FIGURE.\n"
- u"\n"
- u"\n14.1. FIGURE IN THE BLOCK"
- u"\n=========================\n"
- u"\nBLOCK\n"
- u"\n [image]MY CAPTION OF THE FIGURE\n"
- u"\n MY DESCRIPTION PARAGRAPH1 OF THE FIGURE.\n"
- u"\n MY DESCRIPTION PARAGRAPH2 OF THE FIGURE.\n"
- u"\n"
- u"\n"
- u"14.2. IMAGE URL AND ALT\n"
- u"=======================\n"
- u"\n"
- u"[image: i18n][image]\n"
- u"\n"
- u" [image: img][image]\n"
- u"\n"
- u"\n"
- u"14.3. IMAGE ON SUBSTITUTION\n"
- u"===========================\n"
- u"\n"
- u"\n"
- u"14.4. IMAGE UNDER NOTE\n"
- u"======================\n"
- u"\n"
- u"Note: [image: i18n under note][image]\n"
- u"\n"
- u" [image: img under note][image]\n"
- )
+ expect = ("14. I18N WITH FIGURE CAPTION"
+ "\n****************************\n"
+ "\n [image]MY CAPTION OF THE FIGURE\n"
+ "\n MY DESCRIPTION PARAGRAPH1 OF THE FIGURE.\n"
+ "\n MY DESCRIPTION PARAGRAPH2 OF THE FIGURE.\n"
+ "\n"
+ "\n14.1. FIGURE IN THE BLOCK"
+ "\n=========================\n"
+ "\nBLOCK\n"
+ "\n [image]MY CAPTION OF THE FIGURE\n"
+ "\n MY DESCRIPTION PARAGRAPH1 OF THE FIGURE.\n"
+ "\n MY DESCRIPTION PARAGRAPH2 OF THE FIGURE.\n"
+ "\n"
+ "\n"
+ "14.2. IMAGE URL AND ALT\n"
+ "=======================\n"
+ "\n"
+ "[image: i18n][image]\n"
+ "\n"
+ " [image: img][image]\n"
+ "\n"
+ "\n"
+ "14.3. IMAGE ON SUBSTITUTION\n"
+ "===========================\n"
+ "\n"
+ "\n"
+ "14.4. IMAGE UNDER NOTE\n"
+ "======================\n"
+ "\n"
+ "Note: [image: i18n under note][image]\n"
+ "\n"
+ " [image: img under note][image]\n")
assert result == expect
@@ -396,14 +378,14 @@ def test_text_rubric(app):
app.build()
# --- rubric: regression test for pull request #190
result = (app.outdir / 'rubric.txt').text(encoding='utf-8')
- expect = (u"I18N WITH RUBRIC"
- u"\n****************\n"
- u"\n-[ RUBRIC TITLE ]-\n"
- u"\n"
- u"\nRUBRIC IN THE BLOCK"
- u"\n===================\n"
- u"\nBLOCK\n"
- u"\n -[ RUBRIC TITLE ]-\n")
+ expect = ("I18N WITH RUBRIC"
+ "\n****************\n"
+ "\n-[ RUBRIC TITLE ]-\n"
+ "\n"
+ "\nRUBRIC IN THE BLOCK"
+ "\n===================\n"
+ "\nBLOCK\n"
+ "\n -[ RUBRIC TITLE ]-\n")
assert result == expect
@@ -414,25 +396,25 @@ def test_text_docfields(app):
app.build()
# --- docfields
result = (app.outdir / 'docfields.txt').text(encoding='utf-8')
- expect = (u"21. I18N WITH DOCFIELDS"
- u"\n***********************\n"
- u"\nclass Cls1\n"
- u"\n Parameters:"
- u"\n **param** -- DESCRIPTION OF PARAMETER param\n"
- u"\nclass Cls2\n"
- u"\n Parameters:"
- u"\n * **foo** -- DESCRIPTION OF PARAMETER foo\n"
- u"\n * **bar** -- DESCRIPTION OF PARAMETER bar\n"
- u"\nclass Cls3(values)\n"
- u"\n Raises:"
- u"\n **ValueError** -- IF THE VALUES ARE OUT OF RANGE\n"
- u"\nclass Cls4(values)\n"
- u"\n Raises:"
- u"\n * **TypeError** -- IF THE VALUES ARE NOT VALID\n"
- u"\n * **ValueError** -- IF THE VALUES ARE OUT OF RANGE\n"
- u"\nclass Cls5\n"
- u"\n Returns:"
- u'\n A NEW "Cls3" INSTANCE\n')
+ expect = ("21. I18N WITH DOCFIELDS"
+ "\n***********************\n"
+ "\nclass Cls1\n"
+ "\n Parameters:"
+ "\n **param** -- DESCRIPTION OF PARAMETER param\n"
+ "\nclass Cls2\n"
+ "\n Parameters:"
+ "\n * **foo** -- DESCRIPTION OF PARAMETER foo\n"
+ "\n * **bar** -- DESCRIPTION OF PARAMETER bar\n"
+ "\nclass Cls3(values)\n"
+ "\n Raises:"
+ "\n **ValueError** -- IF THE VALUES ARE OUT OF RANGE\n"
+ "\nclass Cls4(values)\n"
+ "\n Raises:"
+ "\n * **TypeError** -- IF THE VALUES ARE NOT VALID\n"
+ "\n * **ValueError** -- IF THE VALUES ARE OUT OF RANGE\n"
+ "\nclass Cls5\n"
+ "\n Returns:"
+ '\n A NEW "Cls3" INSTANCE\n')
assert result == expect
@@ -462,8 +444,8 @@ def test_text_admonitions(app):
def test_gettext_toctree(app):
app.build()
# --- toctree
- expect = read_po(app.srcdir / 'contents.po')
- actual = read_po(app.outdir / 'contents.pot')
+ expect = read_po(app.srcdir / 'index.po')
+ actual = read_po(app.outdir / 'index.pot')
for expect_msg in [m for m in expect if m.id]:
assert expect_msg.id in [m.id for m in actual if m.id]
@@ -629,7 +611,7 @@ def test_gettext_dont_rebuild_mo(make_app, app_params, build_mo):
def test_html_meta(app):
app.build()
# --- test for meta
- result = (app.outdir / 'contents.html').text(encoding='utf-8')
+ result = (app.outdir / 'index.html').text(encoding='utf-8')
expected_expr = '<meta content="TESTDATA FOR I18N" name="description" />'
assert expected_expr in result
expected_expr = '<meta content="I18N, SPHINX, MARKUP" name="keywords" />'
@@ -723,21 +705,21 @@ def test_html_versionchanges(app):
return ''
expect1 = (
- u"""<p><span class="versionmodified">Deprecated since version 1.0: </span>"""
- u"""THIS IS THE <em>FIRST</em> PARAGRAPH OF DEPRECATED.</p>\n"""
- u"""<p>THIS IS THE <em>SECOND</em> PARAGRAPH OF DEPRECATED.</p>\n""")
+ """<p><span class="versionmodified deprecated">Deprecated since version 1.0: </span>"""
+ """THIS IS THE <em>FIRST</em> PARAGRAPH OF DEPRECATED.</p>\n"""
+ """<p>THIS IS THE <em>SECOND</em> PARAGRAPH OF DEPRECATED.</p>\n""")
matched_content = get_content(result, "deprecated")
assert expect1 == matched_content
expect2 = (
- u"""<p><span class="versionmodified">New in version 1.0: </span>"""
- u"""THIS IS THE <em>FIRST</em> PARAGRAPH OF VERSIONADDED.</p>\n""")
+ """<p><span class="versionmodified added">New in version 1.0: </span>"""
+ """THIS IS THE <em>FIRST</em> PARAGRAPH OF VERSIONADDED.</p>\n""")
matched_content = get_content(result, "versionadded")
assert expect2 == matched_content
expect3 = (
- u"""<p><span class="versionmodified">Changed in version 1.0: </span>"""
- u"""THIS IS THE <em>FIRST</em> PARAGRAPH OF VERSIONCHANGED.</p>\n""")
+ """<p><span class="versionmodified changed">Changed in version 1.0: </span>"""
+ """THIS IS THE <em>FIRST</em> PARAGRAPH OF VERSIONCHANGED.</p>\n""")
matched_content = get_content(result, "versionchanged")
assert expect3 == matched_content
@@ -758,7 +740,7 @@ def test_html_docfields(app):
def test_html_template(app):
app.build()
# --- gettext template
- result = (app.outdir / 'index.html').text(encoding='utf-8')
+ result = (app.outdir / 'contents.html').text(encoding='utf-8')
assert "WELCOME" in result
assert "SPHINX 2013.120" in result
@@ -831,7 +813,7 @@ def test_xml_footnotes(app, warning):
['ref'])
warnings = getwarning(warning)
- warning_expr = u'.*/footnote.xml:\\d*: SEVERE: Duplicate ID: ".*".\n'
+ warning_expr = '.*/footnote.xml:\\d*: SEVERE: Duplicate ID: ".*".\n'
assert_not_re_search(warning_expr, warnings)
@@ -945,7 +927,7 @@ def test_xml_role_xref(app):
para1,
['LINK TO', "I18N ROCK'N ROLE XREF", ',', 'CONTENTS', ',',
'SOME NEW TERM', '.'],
- ['i18n-role-xref', 'contents',
+ ['i18n-role-xref', 'index',
'glossary_terms#term-some-term'])
para2 = sec2.findall('paragraph')
@@ -962,7 +944,7 @@ def test_xml_role_xref(app):
assert_elem(
para2[2],
['LINK TO', 'I18N WITH GLOSSARY TERMS', 'AND', 'CONTENTS', '.'],
- ['glossary_terms', 'contents'])
+ ['glossary_terms', 'index'])
assert_elem(
para2[3],
['LINK TO', '--module', 'AND', '-m', '.'],
@@ -1192,7 +1174,7 @@ def test_text_references(app, warning):
app.builder.build_specific([app.srcdir / 'refs.txt'])
warnings = warning.getvalue().replace(os.sep, '/')
- warning_expr = u'refs.txt:\\d+: ERROR: Unknown target name:'
+ warning_expr = 'refs.txt:\\d+: ERROR: Unknown target name:'
assert_count(warning_expr, warnings, 0)
@@ -1246,7 +1228,7 @@ def test_image_glob_intl(app):
srcdir='test_intl_images',
confoverrides={
'language': 'xx',
- 'figure_language_filename': u'{root}{ext}.{language}',
+ 'figure_language_filename': '{root}{ext}.{language}',
}
)
@pytest.mark.xfail(os.name != 'posix', reason="Not working on windows")
diff --git a/tests/test_io.py b/tests/test_io.py
index 1c8fee86b..ae7a162ae 100644
--- a/tests/test_io.py
+++ b/tests/test_io.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
test_sphinx_io
~~~~~~~~~~~~~~
@@ -9,8 +8,9 @@
:license: BSD, see LICENSE for details.
"""
+from io import StringIO
+
import pytest
-from six import StringIO
from sphinx.io import SphinxRSTFileInput
diff --git a/tests/test_locale.py b/tests/test_locale.py
index 9b1921bd6..bab4dc91a 100644
--- a/tests/test_locale.py
+++ b/tests/test_locale.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
test_locale
~~~~~~~~~~
diff --git a/tests/test_markup.py b/tests/test_markup.py
index 54a6e54be..e17e647c7 100644
--- a/tests/test_markup.py
+++ b/tests/test_markup.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
test_markup
~~~~~~~~~~~
@@ -18,6 +17,7 @@ from docutils.parsers.rst import Parser as RstParser
from docutils.transforms.universal import SmartQuotes
from sphinx import addnodes
+from sphinx.builders.latex import LaTeXBuilder
from sphinx.testing.util import assert_node
from sphinx.util import texescape
from sphinx.util.docutils import sphinx_domains
@@ -34,6 +34,7 @@ def settings(app):
settings.smart_quotes = True
settings.env = app.builder.env
settings.env.temp_data['docname'] = 'dummy'
+ settings.contentsname = 'dummy'
domain_context = sphinx_domains(settings.env)
domain_context.enable()
yield settings
@@ -87,6 +88,9 @@ def verify_re_html(app, parse):
def verify_re_latex(app, parse):
def verify(rst, latex_expected):
document = parse(rst)
+ app.builder = LaTeXBuilder(app)
+ app.builder.set_environment(app.env)
+ app.builder.init_context()
latex_translator = ForgivingLaTeXTranslator(document, app.builder)
latex_translator.first_document = -1 # don't write \begin{document}
document.walkabout(latex_translator)
@@ -158,14 +162,14 @@ def get_verifier(verify, verify_re):
# interpolation of arrows in menuselection
'verify',
':menuselection:`a --> b`',
- (u'<p><span class="menuselection">a \N{TRIANGULAR BULLET} b</span></p>'),
+ ('<p><span class="menuselection">a \N{TRIANGULAR BULLET} b</span></p>'),
'\\sphinxmenuselection{a \\(\\rightarrow\\) b}',
),
(
# interpolation of ampersands in menuselection
'verify',
':menuselection:`&Foo -&&- &Bar`',
- (u'<p><span class="menuselection"><span class="accelerator">F</span>oo '
+ ('<p><span class="menuselection"><span class="accelerator">F</span>oo '
'-&amp;- <span class="accelerator">B</span>ar</span></p>'),
r'\sphinxmenuselection{\sphinxaccelerator{F}oo -\&- \sphinxaccelerator{B}ar}',
),
@@ -173,7 +177,7 @@ def get_verifier(verify, verify_re):
# interpolation of ampersands in guilabel
'verify',
':guilabel:`&Foo -&&- &Bar`',
- (u'<p><span class="guilabel"><span class="accelerator">F</span>oo '
+ ('<p><span class="guilabel"><span class="accelerator">F</span>oo '
'-&amp;- <span class="accelerator">B</span>ar</span></p>'),
r'\sphinxguilabel{\sphinxaccelerator{F}oo -\&- \sphinxaccelerator{B}ar}',
),
@@ -189,8 +193,8 @@ def get_verifier(verify, verify_re):
# verify smarty-pants quotes
'verify',
'"John"',
- u'<p>“John”</p>',
- u"“John”",
+ '<p>“John”</p>',
+ "“John”",
),
(
# ... but not in literal text
@@ -210,23 +214,23 @@ def get_verifier(verify, verify_re):
(
# correct escaping in normal mode
'verify',
- u'Γ\\\\∞$',
+ 'Γ\\\\∞$',
None,
- r'\(\Gamma\)\textbackslash{}\(\infty\)\$',
+ 'Γ\\textbackslash{}\\(\\infty\\)\\$',
),
(
# in verbatim code fragments
'verify',
- u'::\n\n @Γ\\∞${}',
+ '::\n\n @Γ\\∞${}',
None,
- (u'\\begin{sphinxVerbatim}[commandchars=\\\\\\{\\}]\n'
- u'@\\(\\Gamma\\)\\PYGZbs{}\\(\\infty\\)\\PYGZdl{}\\PYGZob{}\\PYGZcb{}\n'
- u'\\end{sphinxVerbatim}'),
+ ('\\begin{sphinxVerbatim}[commandchars=\\\\\\{\\}]\n'
+ '@Γ\\PYGZbs{}\\(\\infty\\)\\PYGZdl{}\\PYGZob{}\\PYGZcb{}\n'
+ '\\end{sphinxVerbatim}'),
),
(
# in URIs
'verify_re',
- u'`test <http://example.com/~me/>`_',
+ '`test <http://example.com/~me/>`_',
None,
r'\\sphinxhref{http://example.com/~me/}{test}.*',
),
diff --git a/tests/test_metadata.py b/tests/test_metadata.py
index a00d76f87..435fb0876 100644
--- a/tests/test_metadata.py
+++ b/tests/test_metadata.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
test_metadata
~~~~~~~~~~~~~
@@ -15,7 +14,7 @@
import pytest
-@pytest.mark.sphinx('pseudoxml')
+@pytest.mark.sphinx('dummy', testroot='metadata')
def test_docinfo(app, status, warning):
"""
Inspect the 'docinfo' metadata stored in the first node of the document.
@@ -24,31 +23,29 @@ def test_docinfo(app, status, warning):
'dedication' blocks, or the 'meta' role. Doing otherwise is probably more
messing with the internals of sphinx than this rare use case merits.
"""
- app.builder.build(['metadata'])
- env = app.env
- exampledocinfo = env.metadata['metadata']
+ app.build()
expecteddocinfo = {
- 'author': u'David Goodger',
- 'authors': [u'Me', u'Myself', u'I'],
- 'address': u'123 Example Street\nExample, EX Canada\nA1B 2C3',
- 'field name': u'This is a generic bibliographic field.',
- 'field name 2': (u'Generic bibliographic fields may contain multiple '
- u'body elements.\n\nLike this.'),
- 'status': u'This is a “work in progress”',
- 'version': u'1',
- 'copyright': (u'This document has been placed in the public domain. '
- u'You\nmay do with it as you wish. You may copy, modify,'
- u'\nredistribute, reattribute, sell, buy, rent, lease,\n'
- u'destroy, or improve it, quote it at length, excerpt,\n'
- u'incorporate, collate, fold, staple, or mutilate it, or '
- u'do\nanything else to it that your or anyone else’s '
- u'heart\ndesires.'),
- 'contact': u'goodger@python.org',
- 'date': u'2006-05-21',
- 'organization': u'humankind',
- 'revision': u'4564',
+ 'author': 'David Goodger',
+ 'authors': ['Me', 'Myself', 'I'],
+ 'address': '123 Example Street\nExample, EX Canada\nA1B 2C3',
+ 'field name': 'This is a generic bibliographic field.',
+ 'field name 2': ('Generic bibliographic fields may contain multiple '
+ 'body elements.\n\nLike this.'),
+ 'status': 'This is a “work in progress”',
+ 'version': '1',
+ 'copyright': ('This document has been placed in the public domain. '
+ 'You\nmay do with it as you wish. You may copy, modify,'
+ '\nredistribute, reattribute, sell, buy, rent, lease,\n'
+ 'destroy, or improve it, quote it at length, excerpt,\n'
+ 'incorporate, collate, fold, staple, or mutilate it, or '
+ 'do\nanything else to it that your or anyone else’s '
+ 'heart\ndesires.'),
+ 'contact': 'goodger@python.org',
+ 'date': '2006-05-21',
+ 'organization': 'humankind',
+ 'revision': '4564',
'tocdepth': 1,
- 'orphan': u'',
- 'nocomments': u'',
+ 'orphan': '',
+ 'nocomments': '',
}
- assert exampledocinfo == expecteddocinfo
+ assert app.env.metadata['index'] == expecteddocinfo
diff --git a/tests/test_parser.py b/tests/test_parser.py
new file mode 100644
index 000000000..6a1db4a6b
--- /dev/null
+++ b/tests/test_parser.py
@@ -0,0 +1,65 @@
+"""
+ test_sphinx_parsers
+ ~~~~~~~~~~~~~~~~~~~
+
+ Tests parsers module.
+
+ :copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from unittest.mock import Mock, patch
+
+import pytest
+
+from sphinx.parsers import RSTParser
+from sphinx.util.docutils import new_document
+
+
+@pytest.mark.sphinx(testroot='basic')
+@patch('docutils.parsers.rst.states.RSTStateMachine')
+def test_RSTParser_prolog_epilog(RSTStateMachine, app):
+ document = new_document('dummy.rst')
+ document.settings = Mock(tab_width=8, language_code='')
+ parser = RSTParser()
+ parser.set_application(app)
+
+ # normal case
+ text = ('hello Sphinx world\n'
+ 'Sphinx is a document generator')
+ parser.parse(text, document)
+ (content, _), _ = RSTStateMachine().run.call_args
+
+ assert list(content.xitems()) == [('dummy.rst', 0, 'hello Sphinx world'),
+ ('dummy.rst', 1, 'Sphinx is a document generator')]
+
+ # with rst_prolog
+ app.env.config.rst_prolog = 'this is rst_prolog\nhello reST!'
+ parser.parse(text, document)
+ (content, _), _ = RSTStateMachine().run.call_args
+ assert list(content.xitems()) == [('<rst_prolog>', 0, 'this is rst_prolog'),
+ ('<rst_prolog>', 1, 'hello reST!'),
+ ('<generated>', 0, ''),
+ ('dummy.rst', 0, 'hello Sphinx world'),
+ ('dummy.rst', 1, 'Sphinx is a document generator')]
+
+ # with rst_epilog
+ app.env.config.rst_prolog = None
+ app.env.config.rst_epilog = 'this is rst_epilog\ngood-bye reST!'
+ parser.parse(text, document)
+ (content, _), _ = RSTStateMachine().run.call_args
+ assert list(content.xitems()) == [('dummy.rst', 0, 'hello Sphinx world'),
+ ('dummy.rst', 1, 'Sphinx is a document generator'),
+ ('<generated>', 0, ''),
+ ('<rst_epilog>', 0, 'this is rst_epilog'),
+ ('<rst_epilog>', 1, 'good-bye reST!')]
+
+ # expandtabs / convert whitespaces
+ app.env.config.rst_prolog = None
+ app.env.config.rst_epilog = None
+ text = ('\thello Sphinx world\n'
+ '\v\fSphinx is a document generator')
+ parser.parse(text, document)
+ (content, _), _ = RSTStateMachine().run.call_args
+ assert list(content.xitems()) == [('dummy.rst', 0, ' hello Sphinx world'),
+ ('dummy.rst', 1, ' Sphinx is a document generator')]
diff --git a/tests/test_project.py b/tests/test_project.py
new file mode 100644
index 000000000..97bcb78ae
--- /dev/null
+++ b/tests/test_project.py
@@ -0,0 +1,83 @@
+"""
+ test_project
+ ~~~~~~~~~~~~
+
+ Tests project module.
+
+ :copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from collections import OrderedDict
+
+import pytest
+
+from sphinx.project import Project
+
+
+def test_project_discover(rootdir):
+ project = Project(rootdir / 'test-root', {})
+
+ docnames = {'autodoc', 'bom', 'extapi', 'extensions', 'footnote', 'images',
+ 'includes', 'index', 'lists', 'markup', 'math', 'objects',
+ 'subdir/excluded', 'subdir/images', 'subdir/includes'}
+ subdir_docnames = {'subdir/excluded', 'subdir/images', 'subdir/includes'}
+
+ # basic case
+ project.source_suffix = ['.txt']
+ assert project.discover() == docnames
+
+ # exclude_paths option
+ assert project.discover(['subdir/*']) == docnames - subdir_docnames
+
+ # exclude_patterns
+ assert project.discover(['.txt', 'subdir/*']) == docnames - subdir_docnames
+
+ # multiple source_suffixes
+ project.source_suffix = ['.txt', '.foo']
+ assert project.discover() == docnames | {'otherext'}
+
+ # complicated source_suffix
+ project.source_suffix = ['.foo.png']
+ assert project.discover() == {'img'}
+
+ # templates_path
+ project.source_suffix = ['.html']
+ assert project.discover() == {'_templates/layout',
+ '_templates/customsb',
+ '_templates/contentssb'}
+
+ assert project.discover(['_templates']) == set()
+
+
+@pytest.mark.sphinx(testroot='basic')
+def test_project_path2doc(app):
+ project = Project(app.srcdir, app.config.source_suffix)
+ assert project.path2doc('index.rst') == 'index'
+ assert project.path2doc('index.foo') is None # unknown extension
+ assert project.path2doc('index.foo.rst') == 'index.foo'
+ assert project.path2doc('index') is None
+ assert project.path2doc('/path/to/index.rst') == '/path/to/index'
+ assert project.path2doc(app.srcdir / '/to/index.rst') == '/to/index'
+
+
+@pytest.mark.sphinx(srcdir='project_doc2path', testroot='basic')
+def test_project_doc2path(app):
+ source_suffix = OrderedDict([('.rst', 'restructuredtext'),
+ ('.txt', 'restructuredtext')])
+
+ project = Project(app.srcdir, source_suffix)
+ assert project.doc2path('index') == (app.srcdir / 'index.rst')
+
+ # first source_suffix is used for missing file
+ assert project.doc2path('foo') == (app.srcdir / 'foo.rst')
+
+ # matched source_suffix is used if exists
+ (app.srcdir / 'foo.txt').write_text('')
+ assert project.doc2path('foo') == (app.srcdir / 'foo.txt')
+
+ # absolute path
+ assert project.doc2path('index', basedir=True) == (app.srcdir / 'index.rst')
+
+ # relative path
+ assert project.doc2path('index', basedir=False) == 'index.rst'
diff --git a/tests/test_pycode.py b/tests/test_pycode.py
index 2eab456bc..5eccad0db 100644
--- a/tests/test_pycode.py
+++ b/tests/test_pycode.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
test_pycode
~~~~~~~~~~~
@@ -12,8 +11,6 @@
import os
import sys
-from six import PY2
-
import sphinx
from sphinx.pycode import ModuleAnalyzer
@@ -24,20 +21,14 @@ def test_ModuleAnalyzer_for_string():
analyzer = ModuleAnalyzer.for_string('print("Hello world")', 'module_name')
assert analyzer.modname == 'module_name'
assert analyzer.srcname == '<string>'
- if PY2:
- assert analyzer.encoding == 'ascii'
- else:
- assert analyzer.encoding is None
+ assert analyzer.encoding is None
def test_ModuleAnalyzer_for_file():
analyzer = ModuleAnalyzer.for_string(SPHINX_MODULE_PATH, 'sphinx')
assert analyzer.modname == 'sphinx'
assert analyzer.srcname == '<string>'
- if PY2:
- assert analyzer.encoding == 'ascii'
- else:
- assert analyzer.encoding is None
+ assert analyzer.encoding is None
def test_ModuleAnalyzer_for_module():
diff --git a/tests/test_pycode_parser.py b/tests/test_pycode_parser.py
index 0875329a4..403c918dc 100644
--- a/tests/test_pycode_parser.py
+++ b/tests/test_pycode_parser.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
test_pycode_parser
~~~~~~~~~~~~~~~~~~
@@ -12,7 +11,6 @@
import sys
import pytest
-from six import PY2
from sphinx.pycode.parser import Parser
@@ -135,7 +133,6 @@ def test_complex_assignment():
assert parser.definitions == {}
-@pytest.mark.skipif(PY2, reason='tests for py3 syntax')
def test_complex_assignment_py3():
source = ('a, *b, c = (1, 2, 3, 4) #: unpack assignment\n'
'd, *self.attr = (5, 6, 7) #: unpack assignment2\n'
diff --git a/tests/test_quickstart.py b/tests/test_quickstart.py
index 7889299b7..0085f257e 100644
--- a/tests/test_quickstart.py
+++ b/tests/test_quickstart.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
test_quickstart
~~~~~~~~~~~~~~~
@@ -9,12 +8,10 @@
:license: BSD, see LICENSE for details.
"""
-import sys
import time
+from io import StringIO
import pytest
-from six import PY2, text_type, StringIO
-from six.moves import input
from sphinx import application
from sphinx.cmd import quickstart as qs
@@ -37,12 +34,6 @@ def mock_input(answers, needanswer=False):
raise AssertionError('answer for %r missing and no default '
'present' % prompt)
called.add(prompt)
- if PY2:
- prompt = str(prompt) # Python2.x raw_input emulation
- # `raw_input` encode `prompt` by default encoding to print.
- else:
- prompt = text_type(prompt) # Python3.x input emulation
- # `input` decode prompt by default encoding before print.
for question in answers:
if prompt.startswith(qs.PROMPT_PREFIX + question):
return answers[question]
@@ -57,7 +48,6 @@ real_input = input
def teardown_module():
qs.term_input = real_input
- qs.TERM_ENCODING = getattr(sys.stdin, 'encoding', None)
coloron()
@@ -97,16 +87,11 @@ def test_do_prompt_inputstrip():
def test_do_prompt_with_nonascii():
answers = {
- 'Q1': u'\u30c9\u30a4\u30c4',
+ 'Q1': '\u30c9\u30a4\u30c4',
}
qs.term_input = mock_input(answers)
- try:
- result = qs.do_prompt('Q1', default=u'\u65e5\u672c')
- except UnicodeEncodeError:
- raise pytest.skip.Exception(
- 'non-ASCII console input not supported on this encoding: %s',
- qs.TERM_ENCODING)
- assert result == u'\u30c9\u30a4\u30c4'
+ result = qs.do_prompt('Q1', default='\u65e5\u672c')
+ assert result == '\u30c9\u30a4\u30c4'
def test_quickstart_defaults(tempdir):
@@ -150,8 +135,8 @@ def test_quickstart_all_answers(tempdir):
'Root path': tempdir,
'Separate source and build': 'y',
'Name prefix for templates': '.',
- 'Project name': u'STASI™'.encode('utf-8'),
- 'Author name': u'Wolfgang Schäuble & G\'Beckstein'.encode('utf-8'),
+ 'Project name': 'STASI™',
+ 'Author name': 'Wolfgang Schäuble & G\'Beckstein',
'Project version': '2.0',
'Project release': '2.0.1',
'Project language': 'de',
@@ -172,7 +157,6 @@ def test_quickstart_all_answers(tempdir):
'Do you want to use the epub builder': 'yes',
}
qs.term_input = mock_input(answers, needanswer=True)
- qs.TERM_ENCODING = 'utf-8'
d = {}
qs.ask_user(d)
qs.generate(d)
@@ -187,23 +171,16 @@ def test_quickstart_all_answers(tempdir):
assert ns['templates_path'] == ['.templates']
assert ns['source_suffix'] == '.txt'
assert ns['master_doc'] == 'contents'
- assert ns['project'] == u'STASI™'
- assert ns['copyright'] == u'%s, Wolfgang Schäuble & G\'Beckstein' % \
+ assert ns['project'] == 'STASI™'
+ assert ns['copyright'] == '%s, Wolfgang Schäuble & G\'Beckstein' % \
time.strftime('%Y')
assert ns['version'] == '2.0'
assert ns['release'] == '2.0.1'
assert ns['todo_include_todos'] is True
assert ns['html_static_path'] == ['.static']
assert ns['latex_documents'] == [
- ('contents', 'STASI.tex', u'STASI™ Documentation',
- u'Wolfgang Schäuble \\& G\'Beckstein', 'manual')]
- assert ns['man_pages'] == [
- ('contents', 'stasi', u'STASI™ Documentation',
- [u'Wolfgang Schäuble & G\'Beckstein'], 1)]
- assert ns['texinfo_documents'] == [
- ('contents', 'STASI', u'STASI™ Documentation',
- u'Wolfgang Schäuble & G\'Beckstein', 'STASI',
- 'One line description of project.', 'Miscellaneous')]
+ ('contents', 'STASI.tex', 'STASI™ Documentation',
+ 'Wolfgang Schäuble \\& G\'Beckstein', 'manual')]
assert (tempdir / 'build').isdir()
assert (tempdir / 'source' / '.static').isdir()
@@ -234,7 +211,7 @@ def test_generated_files_eol(tempdir):
def test_quickstart_and_build(tempdir):
answers = {
'Root path': tempdir,
- 'Project name': u'Fullwidth characters: \u30c9\u30a4\u30c4',
+ 'Project name': 'Fullwidth characters: \u30c9\u30a4\u30c4',
'Author name': 'Georg Brandl',
'Project version': '0.1',
}
@@ -259,7 +236,7 @@ def test_quickstart_and_build(tempdir):
def test_default_filename(tempdir):
answers = {
'Root path': tempdir,
- 'Project name': u'\u30c9\u30a4\u30c4', # Fullwidth characters only
+ 'Project name': '\u30c9\u30a4\u30c4', # Fullwidth characters only
'Author name': 'Georg Brandl',
'Project version': '0.1',
}
@@ -273,8 +250,6 @@ def test_default_filename(tempdir):
ns = {}
execfile_(conffile, ns)
assert ns['latex_documents'][0][1] == 'sphinx.tex'
- assert ns['man_pages'][0][1] == 'sphinx'
- assert ns['texinfo_documents'][0][1] == 'sphinx'
def test_extensions(tempdir):
diff --git a/tests/test_roles.py b/tests/test_roles.py
index 13b4e194d..27a300c47 100644
--- a/tests/test_roles.py
+++ b/tests/test_roles.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
test_roles
~~~~~~~~~~
@@ -20,62 +19,62 @@ def test_samp():
# normal case
text = 'print 1+{variable}'
ret, msg = emph_literal_role('samp', text, text, 0, Mock())
- assert_node(ret, ([nodes.literal, ("print 1+",
- [nodes.emphasis, "variable"])],))
+ assert_node(ret[0], [nodes.literal, ("print 1+",
+ [nodes.emphasis, "variable"])])
assert msg == []
# two emphasis items
text = 'print {1}+{variable}'
ret, msg = emph_literal_role('samp', text, text, 0, Mock())
- assert_node(ret, ([nodes.literal, ("print ",
- [nodes.emphasis, "1"],
- "+",
- [nodes.emphasis, "variable"])],))
+ assert_node(ret[0], [nodes.literal, ("print ",
+ [nodes.emphasis, "1"],
+ "+",
+ [nodes.emphasis, "variable"])])
assert msg == []
# empty curly brace
text = 'print 1+{}'
ret, msg = emph_literal_role('samp', text, text, 0, Mock())
- assert_node(ret, ([nodes.literal, "print 1+{}"],))
+ assert_node(ret[0], [nodes.literal, "print 1+{}"])
assert msg == []
# half-opened variable
text = 'print 1+{variable'
ret, msg = emph_literal_role('samp', text, text, 0, Mock())
- assert_node(ret, ([nodes.literal, "print 1+{variable"],))
+ assert_node(ret[0], [nodes.literal, "print 1+{variable"])
assert msg == []
# nested
text = 'print 1+{{variable}}'
ret, msg = emph_literal_role('samp', text, text, 0, Mock())
- assert_node(ret, ([nodes.literal, ("print 1+",
- [nodes.emphasis, "{variable"],
- "}")],))
+ assert_node(ret[0], [nodes.literal, ("print 1+",
+ [nodes.emphasis, "{variable"],
+ "}")])
assert msg == []
# emphasized item only
text = '{variable}'
ret, msg = emph_literal_role('samp', text, text, 0, Mock())
- assert_node(ret, ([nodes.literal, nodes.emphasis, "variable"],))
+ assert_node(ret[0], [nodes.literal, nodes.emphasis, "variable"])
assert msg == []
# escaping
text = r'print 1+\{variable}'
ret, msg = emph_literal_role('samp', text, text, 0, Mock())
- assert_node(ret, ([nodes.literal, "print 1+{variable}"],))
+ assert_node(ret[0], [nodes.literal, "print 1+{variable}"])
assert msg == []
# escaping (2)
text = r'print 1+\{{variable}\}'
ret, msg = emph_literal_role('samp', text, text, 0, Mock())
- assert_node(ret, ([nodes.literal, ("print 1+{",
- [nodes.emphasis, "variable"],
- "}")],))
+ assert_node(ret[0], [nodes.literal, ("print 1+{",
+ [nodes.emphasis, "variable"],
+ "}")])
assert msg == []
# escape a backslash
text = r'print 1+\\{variable}'
ret, msg = emph_literal_role('samp', text, text, 0, Mock())
- assert_node(ret, ([nodes.literal, ("print 1+\\",
- [nodes.emphasis, "variable"])],))
+ assert_node(ret[0], [nodes.literal, ("print 1+\\",
+ [nodes.emphasis, "variable"])])
assert msg == []
diff --git a/tests/test_search.py b/tests/test_search.py
index 4c7eb8b21..4ae3d460f 100644
--- a/tests/test_search.py
+++ b/tests/test_search.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
test_search
~~~~~~~~~~~
@@ -9,12 +8,12 @@
:license: BSD, see LICENSE for details.
"""
+from io import BytesIO
from collections import namedtuple
import pytest
from docutils import frontend, utils
from docutils.parsers import rst
-from six import BytesIO
from sphinx.search import IndexBuilder
from sphinx.util import jsdump
@@ -22,7 +21,7 @@ from sphinx.util import jsdump
DummyEnvironment = namedtuple('DummyEnvironment', ['version', 'domains'])
-class DummyDomain(object):
+class DummyDomain:
def __init__(self, data):
self.data = data
self.object_types = {}
@@ -44,8 +43,9 @@ def setup_module():
def jsload(path):
searchindex = path.text()
assert searchindex.startswith('Search.setIndex(')
+ assert searchindex.endswith(')')
- return jsdump.loads(searchindex[16:-2])
+ return jsdump.loads(searchindex[16:-1])
def is_registered_term(index, keyword):
@@ -65,10 +65,7 @@ test that non-comments are indexed: fermion
@pytest.mark.sphinx(testroot='ext-viewcode')
def test_objects_are_escaped(app, status, warning):
app.builder.build_all()
- searchindex = (app.outdir / 'searchindex.js').text()
- assert searchindex.startswith('Search.setIndex(')
-
- index = jsdump.loads(searchindex[16:-2])
+ index = jsload(app.outdir / 'searchindex.js')
assert 'n::Array&lt;T, d&gt;' in index.get('objects').get('') # n::Array<T,d> is escaped
diff --git a/tests/test_setup_command.py b/tests/test_setup_command.py
index cd1f89c0c..ed2fd4ffa 100644
--- a/tests/test_setup_command.py
+++ b/tests/test_setup_command.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
test_setup_command
~~~~~~~~~~~~~~~~~~~
@@ -54,8 +53,8 @@ def setup_command(request, tempdir, rootdir):
def test_build_sphinx(setup_command):
proc = setup_command.proc
out, err = proc.communicate()
- print(out)
- print(err)
+ print(out.decode())
+ print(err.decode())
assert proc.returncode == 0
@@ -63,8 +62,8 @@ def test_build_sphinx(setup_command):
def test_build_sphinx_multiple_builders(setup_command):
proc = setup_command.proc
out, err = proc.communicate()
- print(out)
- print(err)
+ print(out.decode())
+ print(err.decode())
assert proc.returncode == 0
@@ -72,14 +71,14 @@ def test_build_sphinx_multiple_builders(setup_command):
def test_build_sphinx_multiple_invalid_builders(setup_command):
proc = setup_command.proc
out, err = proc.communicate()
- print(out)
- print(err)
+ print(out.decode())
+ print(err.decode())
assert proc.returncode == 1
@pytest.fixture
def nonascii_srcdir(request, setup_command):
- mb_name = u'\u65e5\u672c\u8a9e'
+ mb_name = '\u65e5\u672c\u8a9e'
srcdir = (setup_command.pkgroot / 'doc')
try:
(srcdir / mb_name).makedirs()
@@ -94,20 +93,20 @@ def nonascii_srcdir(request, setup_command):
==========================
"""))
- master_doc = srcdir / 'contents.txt'
+ master_doc = srcdir / 'index.txt'
master_doc.write_bytes((master_doc.text() + dedent("""
.. toctree::
%(mb_name)s/%(mb_name)s
- """ % locals())).encode('utf-8'))
+ """ % locals())).encode())
@pytest.mark.usefixtures('nonascii_srcdir')
def test_build_sphinx_with_nonascii_path(setup_command):
proc = setup_command.proc
out, err = proc.communicate()
- print(out)
- print(err)
+ print(out.decode())
+ print(err.decode())
assert proc.returncode == 0
@@ -118,8 +117,8 @@ def test_build_sphinx_return_nonzero_status(setup_command):
'http://localhost.unexistentdomain/index.html')
proc = setup_command.proc
out, err = proc.communicate()
- print(out)
- print(err)
+ print(out.decode())
+ print(err.decode())
assert proc.returncode != 0, 'expect non-zero status for setup.py'
@@ -129,8 +128,8 @@ def test_build_sphinx_warning_return_zero_status(setup_command):
'See :ref:`unexisting-reference-label`')
proc = setup_command.proc
out, err = proc.communicate()
- print(out)
- print(err)
+ print(out.decode())
+ print(err.decode())
assert proc.returncode == 0
@@ -141,6 +140,6 @@ def test_build_sphinx_warning_is_error_return_nonzero_status(setup_command):
'See :ref:`unexisting-reference-label`')
proc = setup_command.proc
out, err = proc.communicate()
- print(out)
- print(err)
+ print(out.decode())
+ print(err.decode())
assert proc.returncode != 0, 'expect non-zero status for setup.py'
diff --git a/tests/test_smartquotes.py b/tests/test_smartquotes.py
index c66a1af56..c610e5b2d 100644
--- a/tests/test_smartquotes.py
+++ b/tests/test_smartquotes.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
test_smartquotes
~~~~~~~~~~~~~~~~
@@ -19,7 +18,7 @@ def test_basic(app, status, warning):
app.build()
content = (app.outdir / 'index.html').text()
- assert u'<p>– “Sphinx” is a tool that makes it easy …</p>' in content
+ assert '<p>– “Sphinx” is a tool that makes it easy …</p>' in content
@pytest.mark.sphinx(buildername='text', testroot='smartquotes', freshenv=True)
@@ -27,7 +26,7 @@ def test_text_builder(app, status, warning):
app.build()
content = (app.outdir / 'index.txt').text()
- assert u'-- "Sphinx" is a tool that makes it easy ...' in content
+ assert '-- "Sphinx" is a tool that makes it easy ...' in content
@pytest.mark.sphinx(buildername='man', testroot='smartquotes', freshenv=True)
@@ -35,7 +34,7 @@ def test_man_builder(app, status, warning):
app.build()
content = (app.outdir / 'python.1').text()
- assert u'\\-\\- "Sphinx" is a tool that makes it easy ...' in content
+ assert '\\-\\- "Sphinx" is a tool that makes it easy ...' in content
@pytest.mark.sphinx(buildername='latex', testroot='smartquotes', freshenv=True)
@@ -43,7 +42,7 @@ def test_latex_builder(app, status, warning):
app.build()
content = (app.outdir / 'test.tex').text()
- assert u'\\textendash{} “Sphinx” is a tool that makes it easy …' in content
+ assert '\\textendash{} “Sphinx” is a tool that makes it easy …' in content
@pytest.mark.sphinx(buildername='html', testroot='smartquotes', freshenv=True,
@@ -52,7 +51,7 @@ def test_ja_html_builder(app, status, warning):
app.build()
content = (app.outdir / 'index.html').text()
- assert u'<p>-- &quot;Sphinx&quot; is a tool that makes it easy ...</p>' in content
+ assert '<p>-- &quot;Sphinx&quot; is a tool that makes it easy ...</p>' in content
@pytest.mark.sphinx(buildername='html', testroot='smartquotes', freshenv=True,
@@ -61,7 +60,7 @@ def test_smartquotes_disabled(app, status, warning):
app.build()
content = (app.outdir / 'index.html').text()
- assert u'<p>-- &quot;Sphinx&quot; is a tool that makes it easy ...</p>' in content
+ assert '<p>-- &quot;Sphinx&quot; is a tool that makes it easy ...</p>' in content
@pytest.mark.skipif(docutils.__version_info__ < (0, 14),
@@ -72,7 +71,7 @@ def test_smartquotes_action(app, status, warning):
app.build()
content = (app.outdir / 'index.html').text()
- assert u'<p>-- “Sphinx” is a tool that makes it easy ...</p>' in content
+ assert '<p>-- “Sphinx” is a tool that makes it easy ...</p>' in content
@pytest.mark.sphinx(buildername='html', testroot='smartquotes', freshenv=True,
@@ -81,7 +80,7 @@ def test_smartquotes_excludes_language(app, status, warning):
app.build()
content = (app.outdir / 'index.html').text()
- assert u'<p>– 「Sphinx」 is a tool that makes it easy …</p>' in content
+ assert '<p>– 「Sphinx」 is a tool that makes it easy …</p>' in content
@pytest.mark.sphinx(buildername='man', testroot='smartquotes', freshenv=True,
@@ -90,4 +89,4 @@ def test_smartquotes_excludes_builders(app, status, warning):
app.build()
content = (app.outdir / 'python.1').text()
- assert u'– “Sphinx” is a tool that makes it easy …' in content
+ assert '– “Sphinx” is a tool that makes it easy …' in content
diff --git a/tests/test_templating.py b/tests/test_templating.py
index 8eed1fdf8..5b786c52c 100644
--- a/tests/test_templating.py
+++ b/tests/test_templating.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
test_templating
~~~~~~~~~~~~~~~~
@@ -21,7 +20,7 @@ def test_layout_overloading(make_app, app_params):
setup_documenters(app)
app.builder.build_update()
- result = (app.outdir / 'contents.html').text(encoding='utf-8')
+ result = (app.outdir / 'index.html').text(encoding='utf-8')
assert '<!-- layout overloading -->' in result
diff --git a/tests/test_theming.py b/tests/test_theming.py
index 43e97473d..3814bb599 100644
--- a/tests/test_theming.py
+++ b/tests/test_theming.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
test_theming
~~~~~~~~~~~~
diff --git a/tests/test_toctree.py b/tests/test_toctree.py
index e37862ae0..7453797b7 100644
--- a/tests/test_toctree.py
+++ b/tests/test_toctree.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
test_toctree
~~~~~~~~~~~~
diff --git a/tests/test_transforms_post_transforms_code.py b/tests/test_transforms_post_transforms_code.py
index e1c45ce65..1e7f81270 100644
--- a/tests/test_transforms_post_transforms_code.py
+++ b/tests/test_transforms_post_transforms_code.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
test_transforms_post_transforms_code
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/tests/test_util.py b/tests/test_util.py
index c6645220e..43fcd978d 100644
--- a/tests/test_util.py
+++ b/tests/test_util.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
test_util
~~~~~~~~~~~~~~~
@@ -9,36 +8,51 @@
:license: BSD, see LICENSE for details.
"""
+import os
+import tempfile
+
import pytest
from mock import patch
-from six import PY2
-
import sphinx
from sphinx.errors import PycodeError
from sphinx.testing.util import strip_escseq
from sphinx.util import (
- display_chunk, encode_uri, get_module_source, parselinenos, status_iterator,
+ display_chunk, encode_uri, ensuredir, get_module_source, parselinenos, status_iterator,
xmlname_checker
)
from sphinx.util import logging
def test_encode_uri():
- expected = (u'https://ru.wikipedia.org/wiki/%D0%A1%D0%B8%D1%81%D1%82%D0%B5%D0%BC%D0%B0_'
- u'%D1%83%D0%BF%D1%80%D0%B0%D0%B2%D0%BB%D0%B5%D0%BD%D0%B8%D1%8F_'
- u'%D0%B1%D0%B0%D0%B7%D0%B0%D0%BC%D0%B8_%D0%B4%D0%B0%D0%BD%D0%BD%D1%8B%D1%85')
- uri = (u'https://ru.wikipedia.org/wiki'
- u'/Система_управления_базами_данных')
+ expected = ('https://ru.wikipedia.org/wiki/%D0%A1%D0%B8%D1%81%D1%82%D0%B5%D0%BC%D0%B0_'
+ '%D1%83%D0%BF%D1%80%D0%B0%D0%B2%D0%BB%D0%B5%D0%BD%D0%B8%D1%8F_'
+ '%D0%B1%D0%B0%D0%B7%D0%B0%D0%BC%D0%B8_%D0%B4%D0%B0%D0%BD%D0%BD%D1%8B%D1%85')
+ uri = ('https://ru.wikipedia.org/wiki'
+ '/Система_управления_базами_данных')
assert expected == encode_uri(uri)
- expected = (u'https://github.com/search?utf8=%E2%9C%93&q=is%3Aissue+is%3Aopen+is%3A'
- u'sprint-friendly+user%3Ajupyter&type=Issues&ref=searchresults')
- uri = (u'https://github.com/search?utf8=✓&q=is%3Aissue+is%3Aopen+is%3A'
- u'sprint-friendly+user%3Ajupyter&type=Issues&ref=searchresults')
+ expected = ('https://github.com/search?utf8=%E2%9C%93&q=is%3Aissue+is%3Aopen+is%3A'
+ 'sprint-friendly+user%3Ajupyter&type=Issues&ref=searchresults')
+ uri = ('https://github.com/search?utf8=✓&q=is%3Aissue+is%3Aopen+is%3A'
+ 'sprint-friendly+user%3Ajupyter&type=Issues&ref=searchresults')
assert expected == encode_uri(uri)
+def test_ensuredir():
+ with tempfile.TemporaryDirectory() as tmp_path:
+ # Does not raise an exception for an existing directory.
+ ensuredir(tmp_path)
+
+ path = os.path.join(tmp_path, 'a', 'b', 'c')
+ ensuredir(path)
+ assert os.path.isdir(path)
+
+ with tempfile.NamedTemporaryFile() as tmp:
+ with pytest.raises(OSError):
+ ensuredir(tmp.name)
+
+
def test_display_chunk():
assert display_chunk('hello') == 'hello'
assert display_chunk(['hello']) == 'hello'
@@ -48,10 +62,7 @@ def test_display_chunk():
def test_get_module_source():
- if PY2:
- assert get_module_source('sphinx') == ('file', sphinx.__file__.replace('.pyc', '.py'))
- else:
- assert get_module_source('sphinx') == ('file', sphinx.__file__)
+ assert get_module_source('sphinx') == ('file', sphinx.__file__)
# failed to obtain source information from builtin modules
with pytest.raises(PycodeError):
diff --git a/tests/test_util_docstrings.py b/tests/test_util_docstrings.py
index 1bdda1021..bfd5b58b4 100644
--- a/tests/test_util_docstrings.py
+++ b/tests/test_util_docstrings.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
test_util_docstrings
~~~~~~~~~~~~~~~~~~~~
diff --git a/tests/test_util_docutils.py b/tests/test_util_docutils.py
index 9319863e0..9b71568dd 100644
--- a/tests/test_util_docutils.py
+++ b/tests/test_util_docutils.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
test_util_docutils
~~~~~~~~~~~~~~~~~~
diff --git a/tests/test_util_fileutil.py b/tests/test_util_fileutil.py
index 7b54d5dc1..7d219c4c8 100644
--- a/tests/test_util_fileutil.py
+++ b/tests/test_util_fileutil.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
test_util_fileutil
~~~~~~~~~~~~~~~~~~
@@ -17,7 +16,7 @@ from sphinx.util.fileutil import copy_asset, copy_asset_file
class DummyTemplateLoader(BuiltinTemplateLoader):
def __init__(self):
- BuiltinTemplateLoader.__init__(self)
+ super().__init__()
builder = mock.Mock()
builder.config.templates_path = []
builder.app.translater = None
diff --git a/tests/test_util_i18n.py b/tests/test_util_i18n.py
index 63496bccb..86df387ad 100644
--- a/tests/test_util_i18n.py
+++ b/tests/test_util_i18n.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
test_util_i18n
~~~~~~~~~~~~~~
@@ -8,7 +7,6 @@
:copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
-from __future__ import print_function
import datetime
import os
@@ -178,7 +176,7 @@ def test_format_date():
assert i18n.format_date(format, date=date, language='') == 'February 07, 2016'
assert i18n.format_date(format, date=date, language='unknown') == 'February 07, 2016'
assert i18n.format_date(format, date=date, language='en') == 'February 07, 2016'
- assert i18n.format_date(format, date=date, language='ja') == u'2月 07, 2016'
+ assert i18n.format_date(format, date=date, language='ja') == '2月 07, 2016'
assert i18n.format_date(format, date=date, language='de') == 'Februar 07, 2016'
# raw string
diff --git a/tests/test_util_images.py b/tests/test_util_images.py
index 079cdb8cb..37f426b7b 100644
--- a/tests/test_util_images.py
+++ b/tests/test_util_images.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
test_util_images
~~~~~~~~~~~~~~~~
@@ -8,7 +7,6 @@
:copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
-from __future__ import print_function
import pytest
@@ -19,7 +17,7 @@ from sphinx.util.images import (
GIF_FILENAME = 'img.gif'
PNG_FILENAME = 'img.png'
PDF_FILENAME = 'img.pdf'
-TXT_FILENAME = 'contents.txt'
+TXT_FILENAME = 'index.txt'
@pytest.fixture(scope='module')
diff --git a/tests/test_util_inspect.py b/tests/test_util_inspect.py
index 397560d94..9746a3e2a 100644
--- a/tests/test_util_inspect.py
+++ b/tests/test_util_inspect.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
test_util_inspect
~~~~~~~~~~~~~~~
@@ -13,7 +12,6 @@ import sys
from textwrap import dedent
import pytest
-from six import PY3
from sphinx.util import inspect
@@ -25,15 +23,11 @@ def test_getargspec():
spec = inspect.getargspec(func)
assert spec.args == ['a', 'b', 'c', 'd']
assert spec.varargs == 'e'
- if PY3:
- assert spec.varkw == 'f'
- assert spec.defaults == (1, 2)
- assert spec.kwonlyargs == []
- assert spec.kwonlydefaults is None
- assert spec.annotations == {}
- else:
- assert spec.keywords == 'f'
- assert spec.defaults == [1, 2]
+ assert spec.varkw == 'f'
+ assert spec.defaults == (1, 2)
+ assert spec.kwonlyargs == []
+ assert spec.kwonlydefaults is None
+ assert spec.annotations == {}
def test_getargspec_partial():
@@ -42,19 +36,13 @@ def test_getargspec_partial():
partial = functools.partial(func1, 10, c=11)
spec = inspect.getargspec(partial)
- if PY3:
- assert spec.args == ['b']
- assert spec.varargs is None
- assert spec.varkw == 'f'
- assert spec.defaults is None
- assert spec.kwonlyargs == ['c', 'd']
- assert spec.kwonlydefaults == {'c': 11, 'd': 2}
- assert spec.annotations == {}
- else:
- assert spec.args == ['b', 'd']
- assert spec.varargs == 'e'
- assert spec.keywords == 'f'
- assert spec.defaults == [2]
+ assert spec.args == ['b']
+ assert spec.varargs is None
+ assert spec.varkw == 'f'
+ assert spec.defaults is None
+ assert spec.kwonlyargs == ['c', 'd']
+ assert spec.kwonlydefaults == {'c': 11, 'd': 2}
+ assert spec.annotations == {}
def test_getargspec_partial2():
@@ -62,19 +50,8 @@ def test_getargspec_partial2():
pass
p = functools.partial(fun, 10, c=11)
- if PY3:
- # Python 3's partial is rather cleverer than Python 2's, and we
- # have to jump through some hoops to define an equivalent function
- # in a way that won't confuse Python 2's parser:
- ns = {}
- exec(dedent("""
- def f_expected(b, *, c=11, d=2):
- pass
- """), ns)
- f_expected = ns["f_expected"]
- else:
- def f_expected(b, d=2):
- pass
+ def f_expected(b, *, c=11, d=2):
+ pass
expected = inspect.getargspec(f_expected)
assert expected == inspect.getargspec(p)
@@ -105,13 +82,8 @@ def test_getargspec_bound_methods():
pass
assert expected_unbound == inspect.getargspec(Foo.method)
- if PY3 and sys.version_info >= (3, 4, 4):
- # On py2, the inspect functions don't properly handle bound
- # methods (they include a spurious 'self' argument)
- assert expected_bound == inspect.getargspec(bound_method)
- # On py2, the inspect functions can't properly handle wrapped
- # functions (no __wrapped__ support)
- assert expected_bound == inspect.getargspec(wrapped_bound_method)
+ assert expected_bound == inspect.getargspec(bound_method)
+ assert expected_bound == inspect.getargspec(wrapped_bound_method)
def test_Signature():
@@ -143,10 +115,7 @@ def test_Signature_partial():
p = functools.partial(fun, 10, c=11)
sig = inspect.Signature(p).format_args()
- if sys.version_info < (3,):
- assert sig == '(b, d=2)'
- else:
- assert sig == '(b, *, c=11, d=2)'
+ assert sig == '(b, *, c=11, d=2)'
def test_Signature_methods():
@@ -193,20 +162,13 @@ def test_Signature_methods():
# wrapped bound method
sig = inspect.Signature(wrapped_bound_method).format_args()
- if sys.version_info < (3,):
- assert sig == '(*args, **kwargs)'
- elif sys.version_info < (3, 4, 4):
- assert sig == '(self, arg1, **kwargs)'
- else:
- assert sig == '(arg1, **kwargs)'
+ assert sig == '(arg1, **kwargs)'
-@pytest.mark.skipif(sys.version_info < (3, 4),
- reason='functools.partialmethod is available on py34 or above')
def test_Signature_partialmethod():
from functools import partialmethod
- class Foo(object):
+ class Foo:
def meth1(self, arg1, arg2, arg3=None, arg4=None):
pass
@@ -228,11 +190,9 @@ def test_Signature_partialmethod():
assert sig == '()'
-@pytest.mark.skipif(sys.version_info < (3, 4),
- reason='type annotation test is available on py34 or above')
def test_Signature_annotations():
- from typing_test_data import (
- f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11, f12, f13, f14, f15, Node)
+ from typing_test_data import (f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10,
+ f11, f12, f13, f14, f15, f16, f17, Node)
# Class annotations
sig = inspect.Signature(f0).format_args()
@@ -301,6 +261,14 @@ def test_Signature_annotations():
sig = inspect.Signature(f15).format_args()
assert sig == '(x: Unknown, y: int) -> Any'
+ # keyword only arguments (1)
+ sig = inspect.Signature(f16).format_args()
+ assert sig == '(arg1, arg2, *, arg3=None, arg4=None)'
+
+ # keyword only arguments (2)
+ sig = inspect.Signature(f17).format_args()
+ assert sig == '(*, arg3, arg4)'
+
# type hints by string
sig = inspect.Signature(Node.children).format_args()
if (3, 5, 0) <= sys.version_info < (3, 5, 3):
@@ -313,7 +281,7 @@ def test_Signature_annotations():
def test_safe_getattr_with_default():
- class Foo(object):
+ class Foo:
def __getattr__(self, item):
raise Exception
@@ -325,7 +293,7 @@ def test_safe_getattr_with_default():
def test_safe_getattr_with_exception():
- class Foo(object):
+ class Foo:
def __getattr__(self, item):
raise Exception
@@ -340,7 +308,7 @@ def test_safe_getattr_with_exception():
def test_safe_getattr_with_property_exception():
- class Foo(object):
+ class Foo:
@property
def bar(self):
raise Exception
@@ -356,7 +324,7 @@ def test_safe_getattr_with_property_exception():
def test_safe_getattr_with___dict___override():
- class Foo(object):
+ class Foo:
@property
def __dict__(self):
raise Exception
@@ -380,23 +348,29 @@ def test_dictionary_sorting():
def test_set_sorting():
set_ = set("gfedcba")
description = inspect.object_description(set_)
- if PY3:
- assert description == "{'a', 'b', 'c', 'd', 'e', 'f', 'g'}"
- else:
- assert description == "set(['a', 'b', 'c', 'd', 'e', 'f', 'g'])"
+ assert description == "{'a', 'b', 'c', 'd', 'e', 'f', 'g'}"
def test_set_sorting_fallback():
set_ = set((None, 1))
description = inspect.object_description(set_)
- if PY3:
- assert description in ("{1, None}", "{None, 1}")
- else:
- assert description in ("set([1, None])", "set([None, 1])")
+ assert description in ("{1, None}", "{None, 1}")
+
+
+def test_frozenset_sorting():
+ frozenset_ = frozenset("gfedcba")
+ description = inspect.object_description(frozenset_)
+ assert description == "frozenset({'a', 'b', 'c', 'd', 'e', 'f', 'g'})"
+
+
+def test_frozenset_sorting_fallback():
+ frozenset_ = frozenset((None, 1))
+ description = inspect.object_description(frozenset_)
+ assert description in ("frozenset({1, None})", "frozenset({None, 1})")
def test_dict_customtype():
- class CustomType(object):
+ class CustomType:
def __init__(self, value):
self._value = value
@@ -423,10 +397,5 @@ def test_isstaticmethod():
assert inspect.isstaticmethod(Foo.method1, Foo, 'method1') is True
assert inspect.isstaticmethod(Foo.method2, Foo, 'method2') is False
-
- if sys.version_info < (3, 0):
- assert inspect.isstaticmethod(Bar.method1, Bar, 'method1') is False
- assert inspect.isstaticmethod(Bar.method2, Bar, 'method2') is False
- else:
- assert inspect.isstaticmethod(Bar.method1, Bar, 'method1') is True
- assert inspect.isstaticmethod(Bar.method2, Bar, 'method2') is False
+ assert inspect.isstaticmethod(Bar.method1, Bar, 'method1') is True
+ assert inspect.isstaticmethod(Bar.method2, Bar, 'method2') is False
diff --git a/tests/test_util_inventory.py b/tests/test_util_inventory.py
index af3a819cd..7491b217d 100644
--- a/tests/test_util_inventory.py
+++ b/tests/test_util_inventory.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
test_util_inventory
~~~~~~~~~~~~~~~~~~~
@@ -11,8 +10,7 @@
import posixpath
import zlib
-
-from six import BytesIO
+from io import BytesIO
from sphinx.ext.intersphinx import InventoryFile
@@ -22,14 +20,14 @@ inventory_v1 = '''\
# Version: 1.0
module mod foo.html
module.cls class foo.html
-'''.encode('utf-8')
+'''.encode()
inventory_v2 = '''\
# Sphinx inventory version 2
# Project: foo
# Version: 2.0
# The remainder of this file is compressed with zlib.
-'''.encode('utf-8') + zlib.compress('''\
+'''.encode() + zlib.compress('''\
module1 py:module 0 foo.html#module-module1 Long Module desc
module2 py:module 0 foo.html#module-$ -
module1.func py:function 1 sub/foo.html#$ -
@@ -48,16 +46,16 @@ foo.bar js:class 1 index.html#foo.bar -
foo.bar.baz js:method 1 index.html#foo.bar.baz -
foo.bar.qux js:data 1 index.html#foo.bar.qux -
a term including:colon std:term -1 glossary.html#term-a-term-including-colon -
-'''.encode('utf-8'))
+'''.encode())
inventory_v2_not_having_version = '''\
# Sphinx inventory version 2
# Project: foo
-# Version:
+# Version:
# The remainder of this file is compressed with zlib.
-'''.encode('utf-8') + zlib.compress('''\
+'''.encode() + zlib.compress('''\
module1 py:module 0 foo.html#module-module1 Long Module desc
-'''.encode('utf-8'))
+'''.encode())
def test_read_inventory_v1():
diff --git a/tests/test_util_jsdump.py b/tests/test_util_jsdump.py
index 8f98c79ac..d93c6ecd0 100644
--- a/tests/test_util_jsdump.py
+++ b/tests/test_util_jsdump.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
from sphinx.util.jsdump import dumps, loads
@@ -11,7 +10,7 @@ def test_jsdump():
assert dumps(data) == '{a1:1}'
assert data == loads(dumps(data))
- data = {u'a\xe8': 1}
+ data = {'a\xe8': 1}
assert dumps(data) == '{"a\\u00e8":1}'
assert data == loads(dumps(data))
diff --git a/tests/test_util_logging.py b/tests/test_util_logging.py
index 98affa886..41983c62f 100644
--- a/tests/test_util_logging.py
+++ b/tests/test_util_logging.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
test_util_logging
~~~~~~~~~~~~~~~~~
@@ -8,7 +7,6 @@
:copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
-from __future__ import print_function
import codecs
import os
@@ -20,7 +18,7 @@ from sphinx.errors import SphinxWarning
from sphinx.testing.util import strip_escseq
from sphinx.util import logging
from sphinx.util.console import colorize
-from sphinx.util.logging import is_suppressed_warning
+from sphinx.util.logging import is_suppressed_warning, prefixed_warnings
from sphinx.util.parallel import ParallelTasks
@@ -304,7 +302,7 @@ def test_output_with_unencodable_char(app, status, warning):
# info with UnicodeEncodeError
status.truncate(0)
status.seek(0)
- logger.info(u"unicode \u206d...")
+ logger.info("unicode \u206d...")
assert status.getvalue() == "unicode ?...\n"
@@ -330,3 +328,22 @@ def test_skip_warningiserror(app, status, warning):
with logging.pending_warnings():
with logging.skip_warningiserror(False):
logger.warning('message')
+
+
+def test_prefixed_warnings(app, status, warning):
+ logging.setup(app, status, warning)
+ logger = logging.getLogger(__name__)
+
+ logger.warning('message1')
+ with prefixed_warnings('PREFIX:'):
+ logger.warning('message2')
+ with prefixed_warnings('Another PREFIX:'):
+ logger.warning('message3')
+ logger.warning('message4')
+ logger.warning('message5')
+
+ assert 'WARNING: message1' in warning.getvalue()
+ assert 'WARNING: PREFIX: message2' in warning.getvalue()
+ assert 'WARNING: Another PREFIX: message3' in warning.getvalue()
+ assert 'WARNING: PREFIX: message4' in warning.getvalue()
+ assert 'WARNING: message5' in warning.getvalue()
diff --git a/tests/test_util_matching.py b/tests/test_util_matching.py
index fc38470d3..37f818f8c 100644
--- a/tests/test_util_matching.py
+++ b/tests/test_util_matching.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
test_util_matching
~~~~~~~~~~~~~~~~~~
diff --git a/tests/test_util_nodes.py b/tests/test_util_nodes.py
index d20b4b892..839334cd8 100644
--- a/tests/test_util_nodes.py
+++ b/tests/test_util_nodes.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
test_util_nodes
~~~~~~~~~~~~~~~
@@ -9,6 +8,7 @@
:license: BSD, see LICENSE for details.
"""
from textwrap import dedent
+from typing import Any
import pytest
from docutils import frontend
@@ -17,7 +17,7 @@ from docutils.parsers import rst
from docutils.utils import new_document
from sphinx.transforms import ApplySourceWorkaround
-from sphinx.util.nodes import extract_messages, clean_astext
+from sphinx.util.nodes import NodeMatcher, extract_messages, clean_astext
def _transform(doctree):
@@ -50,6 +50,42 @@ def assert_node_count(messages, node_type, expect_count):
% (node_type, node_list, count, expect_count))
+def test_NodeMatcher():
+ doctree = nodes.document(None, None)
+ doctree += nodes.paragraph('', 'Hello')
+ doctree += nodes.paragraph('', 'Sphinx', block=1)
+ doctree += nodes.paragraph('', 'World', block=2)
+ doctree += nodes.literal_block('', 'blah blah blah', block=3)
+
+ # search by node class
+ matcher = NodeMatcher(nodes.paragraph)
+ assert len(doctree.traverse(matcher)) == 3
+
+ # search by multiple node classes
+ matcher = NodeMatcher(nodes.paragraph, nodes.literal_block)
+ assert len(doctree.traverse(matcher)) == 4
+
+ # search by node attribute
+ matcher = NodeMatcher(block=1)
+ assert len(doctree.traverse(matcher)) == 1
+
+ # search by node attribute (Any)
+ matcher = NodeMatcher(block=Any)
+ assert len(doctree.traverse(matcher)) == 3
+
+ # search by both class and attribute
+ matcher = NodeMatcher(nodes.paragraph, block=Any)
+ assert len(doctree.traverse(matcher)) == 2
+
+ # mismatched
+ matcher = NodeMatcher(nodes.title)
+ assert len(doctree.traverse(matcher)) == 0
+
+ # search with Any does not match to Text node
+ matcher = NodeMatcher(blah=Any)
+ assert len(doctree.traverse(matcher)) == 0
+
+
@pytest.mark.parametrize(
'rst,node_cls,count',
[
diff --git a/tests/test_util_pycompat.py b/tests/test_util_pycompat.py
new file mode 100644
index 000000000..8dc8a9e34
--- /dev/null
+++ b/tests/test_util_pycompat.py
@@ -0,0 +1,42 @@
+"""
+ test_util_pycompat
+ ~~~~~~~~~~~~~~~~~~
+
+ Tests sphinx.util.pycompat functions.
+
+ :copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import tempfile
+
+from sphinx.testing.util import strip_escseq
+from sphinx.util import logging
+from sphinx.util.pycompat import execfile_
+
+
+def test_execfile_python2(capsys, app, status, warning):
+ logging.setup(app, status, warning)
+
+ ns = {}
+ with tempfile.NamedTemporaryFile() as tmp:
+ tmp.write(b'print "hello"\n')
+ tmp.flush()
+ execfile_(tmp.name, ns)
+ msg = (
+ 'Support for evaluating Python 2 syntax is deprecated '
+ 'and will be removed in Sphinx 4.0. '
+ 'Convert %s to Python 3 syntax.\n' % tmp.name)
+ assert msg in strip_escseq(warning.getvalue())
+ captured = capsys.readouterr()
+ assert captured.out == 'hello\n'
+
+
+def test_execfile(capsys):
+ ns = {}
+ with tempfile.NamedTemporaryFile() as tmp:
+ tmp.write(b'print("hello")\n')
+ tmp.flush()
+ execfile_(tmp.name, ns)
+ captured = capsys.readouterr()
+ assert captured.out == 'hello\n'
diff --git a/tests/test_util_rst.py b/tests/test_util_rst.py
index 07b9174cc..8c3627439 100644
--- a/tests/test_util_rst.py
+++ b/tests/test_util_rst.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
test_util_rst
~~~~~~~~~~~~~~~
@@ -8,7 +7,10 @@
:copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
-from sphinx.util.rst import escape
+
+from docutils.statemachine import StringList
+
+from sphinx.util.rst import append_epilog, escape, prepend_prolog
def test_escape():
@@ -16,3 +18,68 @@ def test_escape():
assert escape('footnote [#]_') == r'footnote \[\#\]\_'
assert escape('sphinx.application') == r'sphinx.application'
assert escape('.. toctree::') == r'\.. toctree\:\:'
+
+
+def test_append_epilog(app):
+ epilog = 'this is rst_epilog\ngood-bye reST!'
+ content = StringList(['hello Sphinx world',
+ 'Sphinx is a document generator'],
+ 'dummy.rst')
+ append_epilog(content, epilog)
+
+ assert list(content.xitems()) == [('dummy.rst', 0, 'hello Sphinx world'),
+ ('dummy.rst', 1, 'Sphinx is a document generator'),
+ ('<generated>', 0, ''),
+ ('<rst_epilog>', 0, 'this is rst_epilog'),
+ ('<rst_epilog>', 1, 'good-bye reST!')]
+
+
+def test_prepend_prolog(app):
+ prolog = 'this is rst_prolog\nhello reST!'
+ content = StringList([':title: test of SphinxFileInput',
+ ':author: Sphinx team',
+ '',
+ 'hello Sphinx world',
+ 'Sphinx is a document generator'],
+ 'dummy.rst')
+ prepend_prolog(content, prolog)
+
+ assert list(content.xitems()) == [('dummy.rst', 0, ':title: test of SphinxFileInput'),
+ ('dummy.rst', 1, ':author: Sphinx team'),
+ ('<generated>', 0, ''),
+ ('<rst_prolog>', 0, 'this is rst_prolog'),
+ ('<rst_prolog>', 1, 'hello reST!'),
+ ('<generated>', 0, ''),
+ ('dummy.rst', 2, ''),
+ ('dummy.rst', 3, 'hello Sphinx world'),
+ ('dummy.rst', 4, 'Sphinx is a document generator')]
+
+
+def test_prepend_prolog_with_CR(app):
+ # prolog having CR at tail
+ prolog = 'this is rst_prolog\nhello reST!\n'
+ content = StringList(['hello Sphinx world',
+ 'Sphinx is a document generator'],
+ 'dummy.rst')
+ prepend_prolog(content, prolog)
+
+ assert list(content.xitems()) == [('<rst_prolog>', 0, 'this is rst_prolog'),
+ ('<rst_prolog>', 1, 'hello reST!'),
+ ('<generated>', 0, ''),
+ ('dummy.rst', 0, 'hello Sphinx world'),
+ ('dummy.rst', 1, 'Sphinx is a document generator')]
+
+
+def test_prepend_prolog_without_CR(app):
+ # prolog not having CR at tail
+ prolog = 'this is rst_prolog\nhello reST!'
+ content = StringList(['hello Sphinx world',
+ 'Sphinx is a document generator'],
+ 'dummy.rst')
+ prepend_prolog(content, prolog)
+
+ assert list(content.xitems()) == [('<rst_prolog>', 0, 'this is rst_prolog'),
+ ('<rst_prolog>', 1, 'hello reST!'),
+ ('<generated>', 0, ''),
+ ('dummy.rst', 0, 'hello Sphinx world'),
+ ('dummy.rst', 1, 'Sphinx is a document generator')]
diff --git a/tests/test_versioning.py b/tests/test_versioning.py
index 240f293dc..009af98aa 100644
--- a/tests/test_versioning.py
+++ b/tests/test_versioning.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
test_versioning
~~~~~~~~~~~~~~~
@@ -128,6 +127,6 @@ def test_insert_similar():
new_nodes = list(merge_doctrees(original, insert_similar, is_paragraph))
uids = [n.uid for n in insert_similar.traverse(is_paragraph)]
assert len(new_nodes) == 1
- assert new_nodes[0].rawsource == u'Anyway I need more'
+ assert new_nodes[0].rawsource == 'Anyway I need more'
assert original_uids[0] == uids[0]
assert original_uids[1:] == uids[2:]
diff --git a/tests/test_websupport.py b/tests/test_websupport.py
deleted file mode 100644
index bf12cbade..000000000
--- a/tests/test_websupport.py
+++ /dev/null
@@ -1,36 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
- test_websupport
- ~~~~~~~~~~~~~~~
-
- Test the Web Support Package
-
- :copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
- :license: BSD, see LICENSE for details.
-"""
-
-import pytest
-
-from sphinx.websupport import WebSupport
-try:
- sqlalchemy_missing = False
- import sqlalchemy # NOQA
-except ImportError:
- sqlalchemy_missing = True
-
-
-@pytest.mark.skipif(sqlalchemy_missing, reason='needs sqlalchemy')
-def test_build(request, rootdir, sphinx_test_tempdir):
- settings = {
- 'srcdir': rootdir / 'test-basic',
- # to use same directory for 'builddir' in each 'support' fixture, using
- # 'sphinx_test_tempdir' (static) value instead of 'tempdir' fixture value.
- # each test expect result of db value at previous test case.
- 'builddir': sphinx_test_tempdir / 'websupport'
- }
- marker = request.node.get_marker('support')
- if marker:
- settings.update(marker.kwargs)
-
- support = WebSupport(**settings)
- support.build()
diff --git a/tests/test_writer_latex.py b/tests/test_writer_latex.py
index b0a84e944..ce6351111 100644
--- a/tests/test_writer_latex.py
+++ b/tests/test_writer_latex.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
test_writer_latex
~~~~~~~~~~~~~~~~
@@ -8,7 +7,6 @@
:copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
-from __future__ import print_function
import pytest
diff --git a/tests/typing_test_data.py b/tests/typing_test_data.py
index fb87b645c..2e0e27ced 100644
--- a/tests/typing_test_data.py
+++ b/tests/typing_test_data.py
@@ -80,6 +80,14 @@ def f15(x: "Unknown", y: "int") -> Any:
pass
+def f16(arg1, arg2, *, arg3=None, arg4=None):
+ pass
+
+
+def f17(*, arg3, arg4):
+ pass
+
+
class Node:
def __init__(self, parent: Optional['Node']) -> None:
pass
diff --git a/tox.ini b/tox.ini
index f189a778c..20f8cfdc8 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,27 +1,21 @@
[tox]
-minversion = 2.0
-envlist = docs,flake8,mypy,coverage,py{27,34,35,36,37,38,py},du{11,12,13,14}
+minversion = 2.4.0
+envlist = docs,flake8,mypy,coverage,py{35,36,37,38},du{12,13,14}
[testenv]
usedevelop = True
passenv =
https_proxy http_proxy no_proxy PERL PERL5LIB PYTEST_ADDOPTS EPUBCHECK_PATH
description =
- py{27,34,35,36,37,38,py}: Run unit tests against {envname}.
- du{11,12,13,14}: Run unit tests with the given version of docutils.
-
-# TODO(stephenfin) Replace this with the 'extras' config option when tox 2.4 is
-# widely available, likely some time after the Ubuntu 18.04 release
-#
-# https://tox.readthedocs.io/en/latest/config.html#confval-extras=MULTI-LINE-LIST
+ py{35,36,37,38}: Run unit tests against {envname}.
+ du{12,13,14}: Run unit tests with the given version of docutils.
deps =
- .[test,websupport]
- du11: docutils==0.11
du12: docutils==0.12
du13: docutils==0.13.1
du14: docutils==0.14
-# This entry should be also removed after moving 'extras' option
-install_command = pip install {opts} {packages}
+extras =
+ test
+ websupport
setenv =
PYTHONWARNINGS = all,ignore::ImportWarning:pkgutil,ignore::ImportWarning:importlib._bootstrap,ignore::ImportWarning:importlib._bootstrap_external,ignore::ImportWarning:pytest_cov.plugin,ignore::DeprecationWarning:site,ignore::DeprecationWarning:_pytest.assertion.rewrite,ignore::DeprecationWarning:_pytest.fixtures,ignore::DeprecationWarning:distutils
SPHINX_TEST_TEMPDIR = {envdir}/testbuild
@@ -61,6 +55,7 @@ description =
Run type checks.
deps =
mypy
+ docutils-stubs
commands=
mypy sphinx/
@@ -68,5 +63,7 @@ commands=
basepython = python3
description =
Build documentation.
+deps =
+ sphinxcontrib-websupport
commands =
python setup.py build_sphinx {posargs}
diff --git a/utils/bump_version.py b/utils/bump_version.py
index 444eefe07..6491a38e3 100755
--- a/utils/bump_version.py
+++ b/utils/bump_version.py
@@ -1,6 +1,4 @@
-#!/usr/bin/env python
-
-from __future__ import print_function
+#!/usr/bin/env python3
import argparse
import os
@@ -84,7 +82,7 @@ def processing(message):
print('done')
-class Changes(object):
+class Changes:
def __init__(self, path):
self.path = path
self.fetch_version()
diff --git a/utils/checks.py b/utils/checks.py
index 3d421b385..cf60a4f6e 100644
--- a/utils/checks.py
+++ b/utils/checks.py
@@ -1,4 +1,3 @@
-# -*- coding: utf-8 -*-
"""
utils.checks
~~~~~~~~~~~~
@@ -60,7 +59,7 @@ def sphinx_has_header(physical_line, filename, lines, line_number):
# line number correction
offset = 1
- if lines[0:1] == ['#!/usr/bin/env python\n']:
+ if lines[0:1] == ['#!/usr/bin/env python3\n']:
lines = lines[1:]
offset = 2
@@ -70,9 +69,6 @@ def sphinx_has_header(physical_line, filename, lines, line_number):
for lno, line in enumerate(lines):
llist.append(line)
if lno == 0:
- if line != '# -*- coding: utf-8 -*-\n':
- return 0, 'X101 missing coding declaration'
- elif lno == 1:
if line != '"""\n' and line != 'r"""\n':
return 0, 'X101 missing docstring begin (""")'
else:
@@ -80,20 +76,20 @@ def sphinx_has_header(physical_line, filename, lines, line_number):
elif doc_open:
if line == '"""\n':
# end of docstring
- if lno <= 4:
+ if lno <= 3:
return 0, 'X101 missing module name in docstring'
break
if line != '\n' and line[:4] != ' ' and doc_open:
return 0, 'X101 missing correct docstring indentation'
- if lno == 2:
+ if lno == 1:
mod_name_len = len(line.strip())
if line.strip() != mod_name:
- return 4, 'X101 wrong module name in docstring heading'
- elif lno == 3:
+ return 2, 'X101 wrong module name in docstring heading'
+ elif lno == 2:
if line.strip() != mod_name_len * '~':
- return (4, 'X101 wrong module name underline, should be '
+ return (3, 'X101 wrong module name underline, should be '
'~~~...~')
else:
return 0, 'X101 missing end and/or start of docstring...'
diff --git a/utils/jssplitter_generator.py b/utils/jssplitter_generator.py
index 684749973..c7bf162ef 100644
--- a/utils/jssplitter_generator.py
+++ b/utils/jssplitter_generator.py
@@ -1,11 +1,8 @@
-# -*- coding: utf-8 -*-
import json
import re
import subprocess
import sys
-import six
-
# find char codes they are matched with Python's (?u)\\w
match = re.compile(r'(?u)\w')
@@ -16,7 +13,7 @@ singles = []
for i in range(65536):
# 0xd800-0xdfff is surrogate pair area. skip this.
- if not match.match(six.unichr(i)) and not (0xd800 <= i <= 0xdfff):
+ if not match.match(chr(i)) and not (0xd800 <= i <= 0xdfff):
if begin == -1:
begin = i
elif begin != -1:
@@ -114,7 +111,7 @@ console.log(' ... ok\\n')
''' % js_src
-python_src = '''# -*- coding: utf-8 -*-
+python_src = '''\
"""
sphinx.search.jssplitter
~~~~~~~~~~~~~~~~~~~~~~~~
@@ -136,7 +133,7 @@ with open('../sphinx/search/jssplitter.py', 'w') as f:
f.write(python_src)
with open('./regression_test.js', 'w') as f:
- f.write(js_test_src.encode('utf-8'))
+ f.write(js_test_src.encode())
print("starting test...")
result = subprocess.call(['node', './regression_test.js'])