summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.github/ISSUE_TEMPLATE.md27
-rw-r--r--.github/PULL_REQUEST_TEMPLATE.md18
-rw-r--r--.travis.yml18
-rw-r--r--AUTHORS2
-rw-r--r--CHANGES154
-rw-r--r--CONTRIBUTING.rst3
-rw-r--r--EXAMPLES36
-rw-r--r--Makefile34
-rw-r--r--doc/_static/conf.py.txt7
-rw-r--r--doc/builders.rst29
-rw-r--r--doc/config.rst152
-rw-r--r--doc/contents.rst1
-rw-r--r--doc/develop.rst8
-rw-r--r--doc/domains.rst14
-rw-r--r--doc/ext/doctest.rst15
-rw-r--r--doc/ext/example_google.py2
-rw-r--r--doc/ext/example_numpy.py2
-rw-r--r--doc/ext/graphviz.rst17
-rw-r--r--doc/extdev/appapi.rst20
-rw-r--r--doc/extdev/collectorapi.rst9
-rw-r--r--doc/extdev/index.rst2
-rw-r--r--doc/extdev/logging.rst77
-rw-r--r--doc/extdev/nodes.rst1
-rw-r--r--doc/extdev/tutorial.rst25
-rw-r--r--doc/faq.rst2
-rw-r--r--doc/intl.rst2
-rw-r--r--doc/invocation.rst2
-rw-r--r--doc/latex.rst32
-rw-r--r--doc/markup/code.rst18
-rw-r--r--doc/markup/misc.rst91
-rw-r--r--doc/markup/toctree.rst2
-rw-r--r--doc/setuptools.rst178
-rw-r--r--doc/templating.rst64
-rw-r--r--doc/tutorial.rst1
-rw-r--r--mypy.ini8
-rw-r--r--setup.cfg3
-rw-r--r--setup.py1
-rw-r--r--sphinx/__init__.py29
-rw-r--r--sphinx/addnodes.py28
-rw-r--r--sphinx/apidoc.py35
-rw-r--r--sphinx/application.py698
-rw-r--r--sphinx/builders/__init__.py206
-rw-r--r--sphinx/builders/applehelp.py59
-rw-r--r--sphinx/builders/changes.py45
-rw-r--r--sphinx/builders/devhelp.py25
-rw-r--r--sphinx/builders/dummy.py13
-rw-r--r--sphinx/builders/epub.py409
-rw-r--r--sphinx/builders/epub3.py293
-rw-r--r--sphinx/builders/gettext.py85
-rw-r--r--sphinx/builders/html.py366
-rw-r--r--sphinx/builders/htmlhelp.py38
-rw-r--r--sphinx/builders/latex.py143
-rw-r--r--sphinx/builders/linkcheck.py71
-rw-r--r--sphinx/builders/manpage.py34
-rw-r--r--sphinx/builders/qthelp.py50
-rw-r--r--sphinx/builders/texinfo.py64
-rw-r--r--sphinx/builders/text.py22
-rw-r--r--sphinx/builders/websupport.py25
-rw-r--r--sphinx/builders/xml.py19
-rw-r--r--sphinx/cmdline.py40
-rw-r--r--sphinx/config.py109
-rw-r--r--sphinx/deprecation.py10
-rw-r--r--sphinx/directives/__init__.py26
-rw-r--r--sphinx/directives/code.py456
-rw-r--r--sphinx/directives/other.py38
-rw-r--r--sphinx/directives/patches.py8
-rw-r--r--sphinx/domains/__init__.py73
-rw-r--r--sphinx/domains/c.py35
-rw-r--r--sphinx/domains/cpp.py742
-rw-r--r--sphinx/domains/javascript.py23
-rw-r--r--sphinx/domains/python.py101
-rw-r--r--sphinx/domains/rst.py23
-rw-r--r--sphinx/domains/std.py137
-rw-r--r--sphinx/environment/__init__.py768
-rw-r--r--sphinx/environment/adapters/__init__.py10
-rw-r--r--sphinx/environment/adapters/indexentries.py (renamed from sphinx/environment/managers/indexentries.py)64
-rw-r--r--sphinx/environment/adapters/toctree.py336
-rw-r--r--sphinx/environment/collectors/__init__.py85
-rw-r--r--sphinx/environment/collectors/asset.py154
-rw-r--r--sphinx/environment/collectors/dependencies.py66
-rw-r--r--sphinx/environment/collectors/indexentries.py66
-rw-r--r--sphinx/environment/collectors/metadata.py79
-rw-r--r--sphinx/environment/collectors/title.py72
-rw-r--r--sphinx/environment/collectors/toctree.py294
-rw-r--r--sphinx/errors.py10
-rw-r--r--sphinx/events.py86
-rw-r--r--sphinx/ext/autodoc.py229
-rw-r--r--sphinx/ext/autosectionlabel.py8
-rw-r--r--sphinx/ext/autosummary/__init__.py81
-rw-r--r--sphinx/ext/autosummary/generate.py77
-rw-r--r--sphinx/ext/coverage.py63
-rw-r--r--sphinx/ext/doctest.py171
-rw-r--r--sphinx/ext/graphviz.py64
-rw-r--r--sphinx/ext/ifconfig.py16
-rw-r--r--sphinx/ext/imgmath.py42
-rw-r--r--sphinx/ext/inheritance_diagram.py41
-rw-r--r--sphinx/ext/intersphinx.py212
-rw-r--r--sphinx/ext/linkcode.py9
-rw-r--r--sphinx/ext/mathbase.py39
-rw-r--r--sphinx/ext/napoleon/__init__.py12
-rw-r--r--sphinx/ext/napoleon/docstring.py169
-rw-r--r--sphinx/ext/napoleon/iterators.py21
-rw-r--r--sphinx/ext/pngmath.py43
-rw-r--r--sphinx/ext/todo.py37
-rw-r--r--sphinx/ext/viewcode.py52
-rw-r--r--sphinx/extension.py120
-rw-r--r--sphinx/highlighting.py49
-rw-r--r--sphinx/io.py42
-rw-r--r--sphinx/jinja2glue.py27
-rw-r--r--sphinx/locale/__init__.py84
-rw-r--r--sphinx/make_mode.py61
-rw-r--r--sphinx/parsers.py5
-rw-r--r--sphinx/pycode/__init__.py47
-rw-r--r--sphinx/pycode/nodes.py10
-rw-r--r--sphinx/pycode/pgen2/grammar.py18
-rw-r--r--sphinx/pycode/pgen2/parse.py15
-rw-r--r--sphinx/pycode/pgen2/pgen.py25
-rw-r--r--sphinx/pycode/pgen2/tokenize.py16
-rw-r--r--sphinx/quickstart.py34
-rw-r--r--sphinx/roles.py42
-rw-r--r--sphinx/search/__init__.py100
-rw-r--r--sphinx/search/en.py31
-rw-r--r--sphinx/search/ja.py25
-rw-r--r--sphinx/search/ro.py8
-rw-r--r--sphinx/search/tr.py8
-rw-r--r--sphinx/search/zh.py39
-rw-r--r--sphinx/setup_command.py84
-rw-r--r--sphinx/templates/epub2/container.xml6
-rw-r--r--sphinx/templates/epub2/content.opf_t37
-rw-r--r--sphinx/templates/epub2/mimetype1
-rw-r--r--sphinx/templates/epub2/toc.ncx_t15
-rw-r--r--sphinx/templates/epub3/container.xml6
-rw-r--r--sphinx/templates/epub3/content.opf_t46
-rw-r--r--sphinx/templates/epub3/mimetype1
-rw-r--r--sphinx/templates/epub3/nav.xhtml_t26
-rw-r--r--sphinx/templates/epub3/toc.ncx_t24
-rw-r--r--sphinx/templates/latex/latex.tex_t (renamed from sphinx/templates/latex/content.tex_t)9
-rw-r--r--sphinx/templates/latex/longtable.tex_t32
-rw-r--r--sphinx/templates/latex/tabular.tex_t29
-rw-r--r--sphinx/templates/latex/tabulary.tex_t29
-rw-r--r--sphinx/templates/quickstart/Makefile.new_t2
-rw-r--r--sphinx/templates/quickstart/Makefile_t2
-rw-r--r--sphinx/templates/quickstart/make.bat.new_t10
-rw-r--r--sphinx/templates/quickstart/make.bat_t27
-rw-r--r--sphinx/texinputs/Makefile_t92
-rw-r--r--sphinx/texinputs/footnotehyper-sphinx.sty372
-rw-r--r--sphinx/texinputs/latexmkjarc7
-rw-r--r--sphinx/texinputs/latexmkrc9
-rw-r--r--sphinx/texinputs/needspace.sty35
-rw-r--r--sphinx/texinputs/sphinx.sty189
-rw-r--r--sphinx/texinputs/sphinxhowto.cls11
-rw-r--r--sphinx/texinputs/sphinxmanual.cls11
-rw-r--r--sphinx/texinputs/sphinxmulticell.sty317
-rw-r--r--sphinx/themes/basic/layout.html8
-rw-r--r--sphinx/themes/basic/static/basic.css_t7
-rw-r--r--sphinx/theming.py39
-rw-r--r--sphinx/transforms/__init__.py129
-rw-r--r--sphinx/transforms/compact_bullet_list.py13
-rw-r--r--sphinx/transforms/i18n.py83
-rw-r--r--sphinx/transforms/post_transforms.py175
-rw-r--r--sphinx/util/__init__.py151
-rw-r--r--sphinx/util/compat.py50
-rw-r--r--sphinx/util/console.py18
-rw-r--r--sphinx/util/docfields.py67
-rw-r--r--sphinx/util/docstrings.py6
-rw-r--r--sphinx/util/docutils.py61
-rw-r--r--sphinx/util/fileutil.py14
-rw-r--r--sphinx/util/i18n.py104
-rw-r--r--sphinx/util/images.py8
-rw-r--r--sphinx/util/inspect.py22
-rw-r--r--sphinx/util/inventory.py164
-rw-r--r--sphinx/util/jsdump.py22
-rw-r--r--sphinx/util/jsonimpl.py11
-rw-r--r--sphinx/util/logging.py402
-rw-r--r--sphinx/util/matching.py15
-rw-r--r--sphinx/util/nodes.py69
-rw-r--r--sphinx/util/osutil.py38
-rw-r--r--sphinx/util/parallel.py55
-rw-r--r--sphinx/util/png.py2
-rw-r--r--sphinx/util/pycompat.py130
-rw-r--r--sphinx/util/requests.py18
-rw-r--r--sphinx/util/rst.py5
-rw-r--r--sphinx/util/smartypants.py14
-rw-r--r--sphinx/util/stemmer/__init__.py51
-rw-r--r--sphinx/util/stemmer/porter.py (renamed from sphinx/util/stemmer.py)25
-rw-r--r--sphinx/util/tags.py27
-rw-r--r--sphinx/util/template.py26
-rw-r--r--sphinx/util/texescape.py1
-rw-r--r--sphinx/util/typing.py24
-rw-r--r--sphinx/util/websupport.py5
-rw-r--r--sphinx/versioning.py13
-rw-r--r--sphinx/websupport/__init__.py10
-rw-r--r--sphinx/websupport/storage/sqlalchemy_db.py12
-rw-r--r--sphinx/websupport/storage/sqlalchemystorage.py2
-rw-r--r--sphinx/writers/html.py202
-rw-r--r--sphinx/writers/html5.py923
-rw-r--r--sphinx/writers/latex.py861
-rw-r--r--sphinx/writers/manpage.py113
-rw-r--r--sphinx/writers/texinfo.py354
-rw-r--r--sphinx/writers/text.py269
-rw-r--r--sphinx/writers/xml.py12
-rw-r--r--test-reqs.txt1
-rw-r--r--tests/etree13/ElementPath.py226
-rw-r--r--tests/etree13/ElementTree.py1553
-rw-r--r--tests/root/Makefile7
-rw-r--r--tests/root/conf.py2
-rw-r--r--tests/root/footnote.txt8
-rw-r--r--tests/roots/test-build-html-translator/conf.py19
-rw-r--r--tests/roots/test-build-html-translator/index.rst24
-rw-r--r--tests/roots/test-directive-code/dedent.rst35
-rw-r--r--tests/roots/test-directive-code/dedent_code.rst53
-rw-r--r--tests/roots/test-directive-code/lineno_match.rst26
-rw-r--r--tests/roots/test-directive-code/lineno_start.rst6
-rw-r--r--tests/roots/test-directive-code/linenos.rst12
-rw-r--r--tests/roots/test-directive-code/literal-diff.inc14
-rw-r--r--tests/roots/test-directive-code/py-decorators.inc16
-rw-r--r--tests/roots/test-directive-code/py-decorators.rst17
-rw-r--r--tests/roots/test-directive-code/target.py5
-rw-r--r--tests/roots/test-doctest/doctest.txt16
-rw-r--r--tests/roots/test-ext-viewcode/spam/mod1.py5
-rw-r--r--tests/roots/test-ext-viewcode/spam/mod2.py5
-rw-r--r--tests/roots/test-latex-table/complex.rst35
-rw-r--r--tests/roots/test-latex-table/conf.py7
-rw-r--r--tests/roots/test-latex-table/index.rst8
-rw-r--r--tests/roots/test-latex-table/longtable.rst132
-rw-r--r--tests/roots/test-latex-table/tabular.rst135
-rwxr-xr-xtests/run.py1
-rw-r--r--tests/test_api_translator.py24
-rw-r--r--tests/test_apidoc.py13
-rw-r--r--tests/test_application.py34
-rw-r--r--tests/test_build_epub.py247
-rw-r--r--tests/test_build_html.py52
-rw-r--r--tests/test_build_html5.py330
-rw-r--r--tests/test_build_latex.py251
-rw-r--r--tests/test_config.py21
-rw-r--r--tests/test_directive_code.py432
-rw-r--r--tests/test_domain_cpp.py14
-rw-r--r--tests/test_environment.py28
-rw-r--r--tests/test_environment_indexentries.py2
-rw-r--r--tests/test_ext_doctest.py16
-rw-r--r--tests/test_ext_graphviz.py16
-rw-r--r--tests/test_ext_inheritance_diagram.py2
-rw-r--r--tests/test_ext_intersphinx.py88
-rw-r--r--tests/test_ext_math.py8
-rw-r--r--tests/test_ext_napoleon_docstring.py4
-rw-r--r--tests/test_ext_viewcode.py1
-rw-r--r--tests/test_highlighting.py15
-rw-r--r--tests/test_intl.py11
-rw-r--r--tests/test_pycode.py90
-rw-r--r--tests/test_setup_command.py18
-rw-r--r--tests/test_util.py51
-rw-r--r--tests/test_util_i18n.py20
-rw-r--r--tests/test_util_inventory.py67
-rw-r--r--tests/test_util_logging.py245
-rw-r--r--tests/test_util_rst.py4
-rw-r--r--tox.ini14
-rwxr-xr-xutils/bump_version.py16
-rwxr-xr-xutils/check_sources.py3
258 files changed, 15807 insertions, 4894 deletions
diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md
new file mode 100644
index 000000000..4fbf6ff2e
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE.md
@@ -0,0 +1,27 @@
+Subject: <what happen when you do on which document project>
+
+### Problem
+- <Detail of problem>
+
+#### Procedure to reproduce the problem
+```
+<Paste your command-line here which cause the problem>
+```
+
+#### Error logs / results
+```
+<Paste your error log here>
+```
+- <public link of unexpected result if you have>
+
+#### Expected results
+<Describe what to actually do>
+
+### Reproducible project / your project
+- <link to your project, or attach zipped small project sample>
+
+### Environment info
+- OS: <Unix/Linux/Mac/Win/other with version>
+- Python version:
+- Sphinx version:
+- <Extra tools e.g.: Browser, tex or something else>
diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md
new file mode 100644
index 000000000..073a57795
--- /dev/null
+++ b/.github/PULL_REQUEST_TEMPLATE.md
@@ -0,0 +1,18 @@
+Subject: <short purpose of this pull request>
+
+### Feature or Bugfix
+<!-- please choose -->
+- Feature
+- Bugfix
+
+### Purpose
+- <long purpose of this pull request>
+- <Environment if this PR depends on>
+
+### Detail
+- <feature1 or bug1>
+- <feature2 or bug2>
+
+### Relates
+- <URL or Ticket>
+
diff --git a/.travis.yml b/.travis.yml
index 33c93038f..3557095dd 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -1,14 +1,15 @@
language: python
sudo: false
+dist: trusty
cache:
directories:
- $HOME/.cache/pip
python:
- - "pypy"
- - "2.7"
- - "3.4"
- - "3.5"
+ - "pypy-5.4.1"
- "3.6"
+ - "3.5"
+ - "3.4"
+ - "2.7"
- "nightly"
env:
global:
@@ -24,9 +25,11 @@ matrix:
env: DOCUTILS=0.12
- python: "3.5"
env: DOCUTILS=0.12
+ - python: "3.6"
+ env: DOCUTILS=0.12
- python: nightly
env: DOCUTILS=0.12
- - python: pypy
+ - python: "pypy-5.4.1"
env: DOCUTILS=0.12
addons:
apt:
@@ -36,13 +39,16 @@ addons:
- texlive-latex-extra
- texlive-fonts-recommended
- texlive-fonts-extra
+ - texlive-luatex
- texlive-xetex
- lmodern
+ - latex-xcolor
install:
- pip install -U pip setuptools
- pip install docutils==$DOCUTILS
- pip install -r test-reqs.txt
+ - if [[ $TRAVIS_PYTHON_VERSION == '3.6' ]]; then python3.6 -m pip install mypy typed-ast; fi
script:
- flake8
- - if [[ $TRAVIS_PYTHON_VERSION == '3.6' ]]; then make style-check test-async; fi
+ - if [[ $TRAVIS_PYTHON_VERSION == '3.6' ]]; then make style-check type-check test-async; fi
- if [[ $TRAVIS_PYTHON_VERSION != '3.6' ]]; then make test; fi
diff --git a/AUTHORS b/AUTHORS
index 24897985e..1303e5b75 100644
--- a/AUTHORS
+++ b/AUTHORS
@@ -21,6 +21,7 @@ Other contributors, listed alphabetically, are:
* Henrique Bastos -- SVG support for graphviz extension
* Daniel Bültmann -- todo extension
* Jean-François Burnol -- LaTeX improvements
+* Marco Buttu -- doctest extension (pyversion option)
* Etienne Desautels -- apidoc module
* Michael Droettboom -- inheritance_diagram extension
* Charles Duffy -- original graphviz extension
@@ -67,6 +68,7 @@ Other contributors, listed alphabetically, are:
* Michael Wilson -- Intersphinx HTTP basic auth support
* Joel Wurtz -- cellspanning support in LaTeX
* Hong Xu -- svg support in imgmath extension and various bug fixes
+* Stephen Finucane -- setup command improvements and documentation
Many thanks for all contributions!
diff --git a/CHANGES b/CHANGES
index 4749337e6..23400160c 100644
--- a/CHANGES
+++ b/CHANGES
@@ -1,3 +1,144 @@
+Release 1.6 (in development)
+============================
+
+Dependencies
+------------
+
+* (updated) latex output is tested with Ubuntu trusty's texlive packages (Feb.
+ 2014) and earlier tex installations may not be fully compliant, particularly
+ regarding Unicode engines xelatex and lualatex
+* (added) latexmk is requirement for ``make latexpdf`` on Unix-like platforms
+ (refs: #3082)
+
+Incompatible changes
+--------------------
+
+* #1061, #2336, #3235: Now generation of autosummary doesn't contain imported
+ members by default. Thanks to Luc Saffre.
+* LaTeX ``\includegraphics`` command isn't overloaded: only ``\sphinxincludegraphics``
+ has the custom code to fit image to available width if oversized.
+* The subclasses of ``sphinx.domains.Index`` should override ``generate()``
+ method. The default implementation raises NotImplementedError
+* LaTeX positioned long tables horizontally centered, and short ones
+ flushed left (no text flow around table.) The position now defaults to center in
+ both cases, and it will obey Docutils 0.13 ``:align:`` option (refs #3415, #3377)
+* option directive also allows all punctuations for the option name (refs: #3366)
+* #3413: if :rst:dir:`literalinclude`'s ``:start-after:`` is used, make ``:lines:``
+ relative (refs #3412)
+* ``literalinclude`` directive does not allow the combination of ``:diff:``
+ option and other options (refs: #3416)
+* LuaLaTeX engine uses ``fontspec`` like XeLaTeX. It is advised ``latex_engine
+ = 'lualatex'`` be used only on up-to-date TeX installs (refs #3070, #3466)
+* :confval:`latex_keep_old_macro_names` default value has been changed from
+ ``True`` to ``False``. This means that some LaTeX macros for styling are
+ by default defined only with ``\sphinx..`` prefixed names. (refs: #3429)
+* Footer "Continued on next page" of LaTeX longtable's now not framed (refs: #3497)
+* #3529: The arguments of ``BuildEnvironment.__init__`` is changed
+* #3082: Use latexmk for pdf (and dvi) targets (Unix-like platforms only)
+* latex made available (non documented) colour macros from a file distributed
+ with pdftex engine for Plain TeX. This is removed in order to provide better
+ support for multiple TeX engines. Only interface from ``color`` or
+ ``xcolor`` packages should be used by extensions of Sphinx latex writer.
+ (refs #3550)
+* ``Builder.env`` is not filled at instantiation
+
+Features removed
+----------------
+
+* Configuration variables
+
+ - epub3_contributor
+ - epub3_description
+ - epub3_page_progression_direction
+ - html_translator_class
+ - html_use_modindex
+ - latex_font_size
+ - latex_paper_size
+ - latex_preamble
+ - latex_use_modindex
+ - latex_use_parts
+
+* ``termsep`` node
+* defindex.html template
+* LDML format support in `today`, `today_fmt` and `html_last_updated_fmt`
+* ``:inline:`` option for the directives of sphinx.ext.graphviz extension
+* sphinx.ext.pngmath extension
+* ``sphinx.util.compat.make_admonition()``
+
+Features added
+--------------
+
+* #3136: Add ``:name:`` option to the directives in ``sphinx.ext.graphviz``
+* #2336: Add ``imported_members`` option to ``sphinx-autogen`` command to document
+ imported members.
+* C++, add ``:tparam-line-spec:`` option to templated declarations.
+ When specified, each template parameter will be rendered on a separate line.
+* #3359: Allow sphinx.js in a user locale dir to override sphinx.js from Sphinx
+* #3303: Add ``:pyversion:`` option to the doctest directive.
+* #3378: (latex) support for ``:widths:`` option of table directives
+ (refs: #3379, #3381)
+* #3402: Allow to suppress "download file not readable" warnings using
+ :confval:`suppress_warnings`.
+* #3377: latex: Add support for Docutils 0.13 ``:align:`` option for tables
+ (but does not implement text flow around table).
+* latex: footnotes from inside tables are hyperlinked (except from captions or
+ headers) (refs: #3422)
+* Emit warning if over dedent has detected on ``literalinclude`` directive
+ (refs: #3416)
+* Use for LuaLaTeX same default settings as for XeLaTeX (i.e. ``fontspec`` and
+ ``polyglossia``). (refs: #3070, #3466)
+* Make ``'extraclassoptions'`` key of ``latex_elements`` public (refs #3480)
+* #3463: Add warning messages for required EPUB3 metadata. Add default value to
+ ``epub_description`` to avoid warning like other settings.
+* #3476: setuptools: Support multiple builders
+* latex: merged cells in LaTeX tables allow code-blocks, lists, blockquotes...
+ as do normal cells (refs: #3435)
+* HTML buildre uses experimental HTML5 writer if ``html_experimental_html5_builder`` is True
+ and docutils 0.13 and newer is installed.
+* LaTeX macros to customize space before and after tables in PDF output (refs #3504)
+* #3348: Show decorators in literalinclude and viewcode directives
+
+Bugs fixed
+----------
+
+* ``literalinclude`` directive expands tabs after dedent-ing (refs: #3416)
+* #1574: Paragraphs in table cell doesn't work in Latex output
+* #3288: Table with merged headers not wrapping text
+* #3491: Inconsistent vertical space around table and longtable in PDF
+* #3506: Depart functions for all admonitions in HTML writer now properly pass ``node`` to ``depart_admonition``.
+* #2693: Sphinx latex style file wrongly inhibits colours for section headings
+ for latex+dvi(ps,pdf,pdfmx)
+* C++, properly look up ``any`` references.
+
+Deprecated
+----------
+
+* ``sphinx.util.compat.Directive`` class is now deprecated. Please use instead
+ ``docutils.parsers.rsr.Directive``
+* ``sphinx.util.compat.docutils_version`` is now deprecated
+* #2367: ``Sphinx.warn()``, ``Sphinx.info()`` and other logging methods are now
+ deprecated. Please use ``sphinx.util.logging`` (:ref:`logging-api`) instead.
+* #3318: ``notice`` is now deprecated as LaTeX environment name and will be
+ removed at Sphinx 1.7. Extension authors please use ``sphinxadmonition``
+ instead (as Sphinx does since 1.5.)
+* ``Sphinx.status_iterator()`` and ``Sphinx.old_status_iterator()`` is now
+ deprecated. Please use ``sphinx.util:status_iterator()`` intead.
+* ``BuildEnvironment.set_warnfunc()`` is now deprecated
+* Following methods of ``BuildEnvironment`` is now deprecated.
+
+ - ``BuildEnvironment.note_toctree()``
+ - ``BuildEnvironment.get_toc_for()``
+ - ``BuildEnvironment.get_toctree_for()``
+ - ``BuildEnvironment.create_index()``
+
+ Please use ``sphinx.environment.adapters`` modules instead.
+* latex package ``footnote`` is not loaded anymore by its bundled replacement
+ ``footnotehyper-sphinx``. The redefined macros keep the same names as in the
+ original package.
+* #3429: deprecate config setting ``latex_keep_old_macro_names``. It will be
+ removed at 1.7, and already its default value has changed from ``True`` to
+ ``False``.
+
Release 1.5.4 (in development)
==============================
@@ -49,6 +190,8 @@ Features added
* Support requests-2.0.0 (experimental) (refs: #3367)
* (latex) PDF page margin dimensions may be customized (refs: #3387)
+* ``literalinclude`` directive allows combination of ``:pyobject:`` and
+ ``:lines:`` options (refs: #3416)
* #3400: make-mode doesn't use subprocess on building docs
Bugs fixed
@@ -207,7 +350,7 @@ Incompatible changes
* Fix ``genindex.html``, Sphinx's document template, link address to itself to satisfy xhtml standard.
* Use epub3 builder by default. And the old epub builder is renamed to epub2.
* Fix ``epub`` and ``epub3`` builders that contained links to ``genindex`` even if ``epub_use_index = False``.
-* `html_translator_class` is now deprecated.
+* ``html_translator_class`` is now deprecated.
Use `Sphinx.set_translator()` API instead.
* Drop python 2.6 and 3.3 support
* Drop epub3 builder's ``epub3_page_progression_direction`` option (use ``epub3_writing_mode``).
@@ -227,6 +370,8 @@ Incompatible changes
The non-modified package is used.
* #3057: By default, footnote marks in latex PDF output are not preceded by a
space anymore, ``\sphinxBeforeFootnote`` allows user customization if needed.
+* LaTeX target requires that option ``hyperfootnotes`` of package ``hyperref``
+ be left unchanged to its default (i.e. ``true``) (refs: #3022)
1.5 final
@@ -376,7 +521,7 @@ Bugs fixed
* `sphinx.ext.autodoc` crashes if target code imports * from mock modules
by `autodoc_mock_imports`.
* #1953: ``Sphinx.add_node`` does not add handlers the translator installed by
- `html_translator_class`
+ ``html_translator_class``
* #1797: text builder inserts blank line on top
* #2894: quickstart main() doesn't use argv argument
* #2874: gettext builder could not extract all text under the ``only``
@@ -783,7 +928,8 @@ Incompatible changes
``"MMMM dd, YYYY"`` is default format for `today_fmt` and `html_last_updated_fmt`.
However strftime format like ``"%B %d, %Y"`` is also supported for backward
compatibility until Sphinx-1.5. Later format will be disabled from Sphinx-1.5.
-* #2327: `latex_use_parts` is deprecated now. Use `latex_toplevel_sectioning` instead.
+* #2327: ``latex_use_parts`` is deprecated now. Use `latex_toplevel_sectioning`
+ instead.
* #2337: Use ``\url{URL}`` macro instead of ``\href{URL}{URL}`` in LaTeX writer.
* #1498: manpage writer: don't make whole of item in definition list bold if it includes strong node.
* #582: Remove hint message from quick search box for html output.
@@ -1346,7 +1492,7 @@ Features added
for the ids defined on the node. Thanks to Olivier Heurtier.
* PR#229: Allow registration of other translators. Thanks to Russell Sim.
* Add app.set_translator() API to register or override a Docutils translator
- class like `html_translator_class`.
+ class like ``html_translator_class``.
* PR#267, #1134: add 'diff' parameter to literalinclude. Thanks to Richard Wall
and WAKAYAMA shirou.
* PR#272: Added 'bizstyle' theme. Thanks to Shoji KUMAGAI.
diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst
index e3a68b58e..274bc994a 100644
--- a/CONTRIBUTING.rst
+++ b/CONTRIBUTING.rst
@@ -129,8 +129,7 @@ These are the basic steps needed to start developing on Sphinx.
cd doc
make clean html latexpdf
- * Run code style checks and type checks (type checks require ``mypy`` and are
- currently done for commits to ``master`` only)::
+ * Run code style checks and type checks (type checks require mypy)::
make style-check
make type-check
diff --git a/EXAMPLES b/EXAMPLES
index d6770436b..805f0edba 100644
--- a/EXAMPLES
+++ b/EXAMPLES
@@ -12,58 +12,43 @@ interesting examples.
Documentation using the alabaster theme
---------------------------------------
+* CodePy: https://documen.tician.de/codepy/
+* MeshPy: https://documen.tician.de/meshpy/
+* PyCuda: https://documen.tician.de/pycuda/
* PyLangAcq: http://pylangacq.org/
Documentation using the classic theme
-------------------------------------
-* APSW: http://apidoc.apsw.googlecode.com/hg/index.html
-* ASE: https://wiki.fysik.dtu.dk/ase/
+* APSW: https://rogerbinns.github.io/apsw/
* Calibre: http://manual.calibre-ebook.com/
-* CodePy: https://documen.tician.de/codepy/
* Cython: http://docs.cython.org/
* Cormoran: http://cormoran.nhopkg.org/docs/
* Director: http://pythonhosted.org/director/
-* Dirigible: http://www.projectdirigible.com/
* F2py: http://f2py.sourceforge.net/docs/
-* GeoDjango: https://docs.djangoproject.com/en/dev/ref/contrib/gis/
* Genomedata:
http://noble.gs.washington.edu/proj/genomedata/doc/1.2.2/genomedata.html
-* gevent: http://www.gevent.org/
-* Google Wave API:
- http://wave-robot-python-client.googlecode.com/svn/trunk/pydocs/index.html
* GSL Shell: http://www.nongnu.org/gsl-shell/
-* Heapkeeper: http://heapkeeper.org/
* Hands-on Python Tutorial:
http://anh.cs.luc.edu/python/hands-on/3.1/handsonHtml/
* Hedge: https://documen.tician.de/hedge/
* Leo: http://leoeditor.com/
* Lino: http://www.lino-framework.org/
-* MeshPy: https://documen.tician.de/meshpy/
-* mpmath: http://mpmath.googlecode.com/svn/trunk/doc/build/index.html
+* mpmath: http://mpmath.org/doc/current/
* OpenEXR: http://excamera.com/articles/26/doc/index.html
* OpenGDA: http://www.opengda.org/gdadoc/html/
* openWNS: http://docs.openwns.org/
-* Paste: http://pythonpaste.org/script/
-* Paver: http://paver.github.io/paver/
* Pioneers and Prominent Men of Utah: http://pioneers.rstebbing.com/
* PyCantonese: http://pycantonese.org/
-* Pyccuracy: https://github.com/heynemann/pyccuracy/wiki/
-* PyCuda: https://documen.tician.de/pycuda/
* Pyevolve: http://pyevolve.sourceforge.net/
-* Pylo: https://documen.tician.de/pylo/
* PyMQI: http://pythonhosted.org/pymqi/
-* PyPubSub: http://pubsub.sourceforge.net/
* pySPACE: http://pyspace.github.io/pyspace/
* Python: https://docs.python.org/3/
* python-apt: http://apt.alioth.debian.org/python-apt-doc/
* PyUblas: https://documen.tician.de/pyublas/
-* Quex: http://quex.sourceforge.net/doc/html/main.html
* Ring programming language: http://ring-lang.sourceforge.net/doc/index.html
* Scapy: http://www.secdev.org/projects/scapy/doc/
-* Seaborn: https://stanford.edu/~mwaskom/software/seaborn/
* Segway: http://noble.gs.washington.edu/proj/segway/doc/1.1.0/segway.html
-* SimPy: http://simpy.readthedocs.org/en/latest/
* SymPy: http://docs.sympy.org/
* WTForms: http://wtforms.simplecodes.com/docs/
* z3c: http://www.ibiblio.org/paulcarduner/z3ctutorial/
@@ -129,6 +114,7 @@ Documentation using the sphinxdoc theme
Documentation using another builtin theme
-----------------------------------------
+* ASE: https://wiki.fysik.dtu.dk/ase/ (sphinx_rtd_theme)
* C/C++ Development with Eclipse: http://eclipsebook.in/ (agogo)
* ESWP3 (http://eswp3.org) (sphinx_rtd_theme)
* Jinja: http://jinja.pocoo.org/ (scrolls)
@@ -137,13 +123,17 @@ Documentation using another builtin theme
* Linguistica: http://linguistica-uchicago.github.io/lxa5/ (sphinx_rtd_theme)
* MoinMoin: https://moin-20.readthedocs.io/en/latest/ (sphinx_rtd_theme)
* MPipe: http://vmlaker.github.io/mpipe/ (sphinx13)
+* Paver: http://paver.readthedocs.io/en/latest/
* pip: https://pip.pypa.io/en/latest/ (sphinx_rtd_theme)
-* Pyramid web framework:
- http://docs.pylonsproject.org/projects/pyramid/en/latest/ (pyramid)
* Programmieren mit PyGTK und Glade (German):
http://www.florian-diesch.de/doc/python-und-glade/online/ (agogo)
+* PyPubSub: http://pypubsub.readthedocs.io/ (bizstyle)
+* Pyramid web framework:
+ http://docs.pylonsproject.org/projects/pyramid/en/latest/ (pyramid)
+* Quex: http://quex.sourceforge.net/doc/html/main.html
* Satchmo: http://docs.satchmoproject.com/en/latest/ (sphinx_rtd_theme)
* Setuptools: http://pythonhosted.org/setuptools/ (nature)
+* SimPy: http://simpy.readthedocs.org/en/latest/
* Spring Python: http://docs.spring.io/spring-python/1.2.x/sphinx/html/ (nature)
* sqlparse: http://python-sqlparse.googlecode.com/svn/docs/api/index.html
(agogo)
@@ -169,6 +159,7 @@ Documentation using a custom theme/integrated in a site
* Flask-OpenID: http://pythonhosted.org/Flask-OpenID/
* Gameduino: http://excamera.com/sphinx/gameduino/
* GeoServer: http://docs.geoserver.org/
+* gevent: http://www.gevent.org/
* GHC - Glasgow Haskell Compiler: http://downloads.haskell.org/~ghc/master/users-guide/
* Glashammer: http://glashammer.org/
* Istihza (Turkish Python documentation project): http://belgeler.istihza.com/py2/
@@ -194,6 +185,7 @@ Documentation using a custom theme/integrated in a site
* QGIS: http://qgis.org/en/docs/index.html
* qooxdoo: http://manual.qooxdoo.org/current/
* Roundup: http://www.roundup-tracker.org/
+* Seaborn: https://stanford.edu/~mwaskom/software/seaborn/
* Selenium: http://docs.seleniumhq.org/docs/
* Self: http://www.selflanguage.org/
* Substance D: http://docs.pylonsproject.org/projects/substanced/en/latest/
diff --git a/Makefile b/Makefile
index 101cfca2c..28a33c8a0 100644
--- a/Makefile
+++ b/Makefile
@@ -1,17 +1,14 @@
PYTHON ?= python
-.PHONY: all style-check clean clean-pyc clean-patchfiles clean-backupfiles \
+.PHONY: all style-check type-check clean clean-pyc clean-patchfiles clean-backupfiles \
clean-generated pylint reindent test covertest build
-DONT_CHECK = -i build -i dist -i sphinx/style/jquery.js \
- -i sphinx/pycode/pgen2 -i sphinx/util/smartypants.py \
- -i .ropeproject -i doc/_build -i tests/path.py \
- -i utils/convert.py \
- -i tests/typing_test_data.py \
- -i tests/test_autodoc_py35.py \
- -i tests/roots/test-warnings/undecodable.rst \
- -i tests/build \
- -i tests/roots/test-warnings/undecodable.rst \
+DONT_CHECK = -i .ropeproject \
+ -i .tox \
+ -i build \
+ -i dist \
+ -i doc/_build \
+ -i sphinx/pycode/pgen2 \
-i sphinx/search/da.py \
-i sphinx/search/de.py \
-i sphinx/search/en.py \
@@ -28,12 +25,23 @@ DONT_CHECK = -i build -i dist -i sphinx/style/jquery.js \
-i sphinx/search/ru.py \
-i sphinx/search/sv.py \
-i sphinx/search/tr.py \
- -i .tox
+ -i sphinx/style/jquery.js \
+ -i sphinx/util/smartypants.py \
+ -i tests/build \
+ -i tests/path.py \
+ -i tests/roots/test-directive-code/target.py \
+ -i tests/roots/test-warnings/undecodable.rst \
+ -i tests/test_autodoc_py35.py \
+ -i tests/typing_test_data.py \
+ -i utils/convert.py
-all: clean-pyc clean-backupfiles style-check test
+all: clean-pyc clean-backupfiles style-check type-check test
style-check:
- @$(PYTHON) utils/check_sources.py $(DONT_CHECK) .
+ @PYTHONWARNINGS=all $(PYTHON) utils/check_sources.py $(DONT_CHECK) .
+
+type-check:
+ mypy sphinx/
clean: clean-pyc clean-pycache clean-patchfiles clean-backupfiles clean-generated clean-testfiles clean-buildfiles clean-mypyfiles
diff --git a/doc/_static/conf.py.txt b/doc/_static/conf.py.txt
index ab54f15b8..be0c846db 100644
--- a/doc/_static/conf.py.txt
+++ b/doc/_static/conf.py.txt
@@ -79,7 +79,7 @@ language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
-# This patterns also effect to html_static_path and html_extra_path
+# These patterns also affect html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
@@ -268,11 +268,6 @@ latex_documents = [
#
# latex_logo = None
-# For "manual" documents, if this is true, then toplevel headings are parts,
-# not chapters.
-#
-# latex_use_parts = False
-
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
diff --git a/doc/builders.rst b/doc/builders.rst
index f3bacb139..2d17abc7d 100644
--- a/doc/builders.rst
+++ b/doc/builders.rst
@@ -178,12 +178,29 @@ The builder's "name" must be given to the **-b** command-line option of
.. note::
The produced LaTeX file uses several LaTeX packages that may not be
- present in a "minimal" TeX distribution installation. For TeXLive,
- the following packages need to be installed:
-
- * latex-recommended
- * latex-extra
- * fonts-recommended
+ present in a "minimal" TeX distribution installation. For example, on
+ Ubuntu, the following packages need to be installed for successful PDF
+ builds:
+
+ * texlive-latex-recommended
+ * texlive-fonts-recommended
+ * texlive-latex-extra
+ * latexmk (for ``make latexpdf``)
+
+ Sphinx will use ``xcolor.sty`` if present: recent Ubuntu distributions
+ have ``xcolor.sty`` included in latex-recommended, earlier ones have it
+ in latex-xcolor. Unicode engines will need texlive-luatex or
+ texlive-xetex.
+
+ The testing of Sphinx LaTeX is done on Ubuntu trusty with the above
+ mentioned packages, which are from a TeXLive 2013 snapshot dated
+ February 2014.
+
+ .. versionchanged:: 1.6
+ Formerly, testing had been done for some years on Ubuntu precise
+ (based on TeXLive 2009).
+ .. versionchanged:: 1.6
+ Use of ``latexmk`` on GNU/Linux or Mac OS X.
.. autoattribute:: name
diff --git a/doc/config.rst b/doc/config.rst
index 5098c2fca..15a9a339b 100644
--- a/doc/config.rst
+++ b/doc/config.rst
@@ -226,8 +226,10 @@ General configuration
* app.add_role
* app.add_generic_role
* app.add_source_parser
+ * download.not_readable
* image.data_uri
* image.nonlocal_uri
+ * image.not_readable
* ref.term
* ref.ref
* ref.numref
@@ -382,18 +384,6 @@ Project information
%Y'`` (or, if translation is enabled with :confval:`language`, an equivalent
format for the selected locale).
- .. versionchanged:: 1.4
-
- Format specification was changed from strftime to Locale Data Markup
- Language. strftime format is also supported for backward compatibility
- until Sphinx-1.5.
-
- .. versionchanged:: 1.4.1
-
- Format specification was changed again from Locale Data Markup Language
- to strftime. LDML format is also supported for backward compatibility
- until Sphinx-1.5.
-
.. confval:: highlight_language
The default language to highlight source code in. The default is
@@ -769,19 +759,6 @@ that use Sphinx's HTMLWriter class.
The empty string is equivalent to ``'%b %d, %Y'`` (or a
locale-dependent equivalent).
- .. versionchanged:: 1.4
-
- Format specification was changed from strftime to Locale Data Markup
- Language. strftime format is also supported for backward compatibility
- until Sphinx-1.5.
-
- .. versionchanged:: 1.4.1
-
- Format specification was changed again from Locale Data Markup Language
- to strftime. LDML format is also supported for backward compatibility
- until Sphinx-1.5.
-
-
.. confval:: html_use_smartypants
If true, `SmartyPants <http://daringfireball.net/projects/smartypants/>`_
@@ -881,13 +858,6 @@ that use Sphinx's HTMLWriter class.
.. versionadded:: 1.0
-.. confval:: html_use_modindex
-
- If true, add a module index to the HTML documents. Default is ``True``.
-
- .. deprecated:: 1.0
- Use :confval:`html_domain_indices`.
-
.. confval:: html_use_index
If true, add an index to the HTML documents. Default is ``True``.
@@ -950,20 +920,6 @@ that use Sphinx's HTMLWriter class.
.. versionadded:: 0.6
-.. confval:: html_translator_class
-
- A string with the fully-qualified name of a HTML Translator class, that is, a
- subclass of Sphinx's :class:`~sphinx.writers.html.HTMLTranslator`, that is
- used to translate document trees to HTML. Default is ``None`` (use the
- builtin translator).
-
- .. seealso:: :meth:`~sphinx.application.Sphinx.set_translator`
-
- .. deprecated:: 1.5
-
- Implement your translator as extension and use `Sphinx.set_translator`
- instead.
-
.. confval:: html_show_copyright
If true, "(C) Copyright ..." is shown in the HTML footer. Default is
@@ -1133,6 +1089,11 @@ that use Sphinx's HTMLWriter class.
Output file base name for HTML help builder. Default is ``'pydoc'``.
+.. confval:: html_experimental_html5_writer
+
+ Output is processed with HTML5 writer. This feature needs docutils 0.13 or newer. Default is ``False``.
+
+ .. versionadded:: 1.6
.. _applehelp-options:
@@ -1334,7 +1295,7 @@ the `Dublin Core metadata <http://dublincore.org/>`_.
.. confval:: epub_description
- The description of the document. The default value is ``''``.
+ The description of the document. The default value is ``'unknown'``.
.. versionadded:: 1.4
@@ -1540,20 +1501,6 @@ the `Dublin Core metadata <http://dublincore.org/>`_.
.. [#] https://developer.mozilla.org/en-US/docs/Web/CSS/writing-mode
-.. confval:: epub3_page_progression_direction
-
- The global direction in which the content flows.
- Allowed values are ``'ltr'`` (left-to-right), ``'rtl'`` (right-to-left) and
- ``'default'``. The default value is ``'ltr'``.
-
- When the ``'default'`` value is specified, the Author is expressing no
- preference and the Reading System may chose the rendering direction.
-
- .. versionadded:: 1.4
-
- .. deprecated:: 1.5
- Use ``epub_writing_mode`` instead.
-
.. _latex-options:
Options for LaTeX output
@@ -1625,16 +1572,6 @@ These options influence LaTeX output. See further :doc:`latex`.
.. versionadded:: 1.4
-.. confval:: latex_use_parts
-
- If true, the topmost sectioning unit is parts, else it is chapters. Default:
- ``False``.
-
- .. versionadded:: 0.3
-
- .. deprecated:: 1.4
- Use :confval:`latex_toplevel_sectioning`.
-
.. confval:: latex_appendices
A list of document names to append as an appendix to all manuals.
@@ -1650,13 +1587,6 @@ These options influence LaTeX output. See further :doc:`latex`.
.. versionadded:: 1.0
-.. confval:: latex_use_modindex
-
- If true, add a module index to LaTeX documents. Default is ``True``.
-
- .. deprecated:: 1.0
- Use :confval:`latex_domain_indices`.
-
.. confval:: latex_show_pagerefs
If true, add page references after internal references. This is very useful
@@ -1681,18 +1611,39 @@ These options influence LaTeX output. See further :doc:`latex`.
.. confval:: latex_keep_old_macro_names
- If ``True`` (default) the ``\strong``, ``\code``, ``\bfcode``, ``\email``,
+ If ``True`` the ``\strong``, ``\code``, ``\bfcode``, ``\email``,
``\tablecontinued``, ``\titleref``, ``\menuselection``, ``\accelerator``,
``\crossref``, ``\termref``, and ``\optional`` text styling macros are
pre-defined by Sphinx and may be user-customized by some
``\renewcommand``'s inserted either via ``'preamble'`` key or :dudir:`raw
<raw-data-pass-through>` directive. If ``False``, only ``\sphinxstrong``,
- etc... macros are defined (and may be redefined by user). Setting to
- ``False`` may help solve macro name conflicts caused by user-added latex
- packages.
+ etc... macros are defined (and may be redefined by user).
- .. versionadded:: 1.4.5
+ The default is ``False`` as it prevents macro name conflicts caused by
+ latex packages. For example (``lualatex`` or ``xelatex``) ``fontspec v2.6``
+ has its own ``\strong`` macro.
+ .. versionadded:: 1.4.5
+ .. versionchanged:: 1.6
+ Default was changed from ``True`` to ``False``.
+ .. deprecated:: 1.6
+ This setting will be removed at Sphinx 1.7.
+
+.. confval:: latex_use_latex_multicolumn
+
+ If ``False`` (default), the LaTeX writer uses for merged cells in grid
+ tables Sphinx's own macros. They have the advantage to allow the same
+ contents as in non-merged cells (inclusive of literal blocks, lists,
+ blockquotes, ...). But they assume that the columns are separated by the
+ standard vertical rule. Further, in case the :rst:dir:`tabularcolumns`
+ directive was employed to inject more macros (using LaTeX's mark-up of the
+ type ``>{..}``, ``<{..}``, ``@{..}``) the multicolumn cannot ignore these
+ extra macros, contrarily to LaTeX's own ``\multicolumn``; but Sphinx's
+ version does arrange for ignoring ``\columncolor`` like the standard
+ ``\multicolumn`` does. Setting to ``True`` means to use LaTeX's standard
+ ``\multicolumn`` macro.
+
+ .. versionadded:: 1.6
.. confval:: latex_elements
@@ -1741,6 +1692,8 @@ These options influence LaTeX output. See further :doc:`latex`.
.. versionchanged:: 1.5
For :confval:`latex_engine` set to ``'xelatex'``, the default
is ``'\\usepackage{polyglossia}\n\\setmainlanguage{<language>}'``.
+ .. versionchanged:: 1.6
+ ``'lualatex'`` uses same default setting as ``'xelatex'``
``'fontpkg'``
Font package inclusion, default ``'\\usepackage{times}'`` (which uses
Times and Helvetica). You can set this to ``''`` to use the Computer
@@ -1751,6 +1704,8 @@ These options influence LaTeX output. See further :doc:`latex`.
script.
.. versionchanged:: 1.5
Defaults to ``''`` when :confval:`latex_engine` is ``'xelatex'``.
+ .. versionchanged:: 1.6
+ Defaults to ``''`` also with ``'lualatex'``.
``'fncychap'``
Inclusion of the "fncychap" package (which makes fancy chapter titles),
default ``'\\usepackage[Bjarne]{fncychap}'`` for English documentation
@@ -1781,6 +1736,14 @@ These options influence LaTeX output. See further :doc:`latex`.
* Keys that don't need to be overridden unless in special cases are:
+ ``'extraclassoptions'``
+ The default is the empty string. Example: ``'extraclassoptions':
+ 'openany'`` will allow chapters (for documents of the ``'manual'``
+ type) to start on any page.
+
+ .. versionadded:: 1.2
+ .. versionchanged:: 1.6
+ Added this documentation.
``'maxlistdepth'``
LaTeX allows by default at most 6 levels for nesting list and
quote-like environments, with at most 4 enumerated lists, and 4 bullet
@@ -1816,6 +1779,8 @@ These options influence LaTeX output. See further :doc:`latex`.
.. versionchanged:: 1.5
Defaults to ``'\\usepackage{fontspec}'`` when
:confval:`latex_engine` is ``'xelatex'``.
+ .. versionchanged:: 1.6
+ ``'lualatex'`` also uses ``fontspec`` per default.
``'geometry'``
"geometry" package inclusion, the default definition is:
@@ -1935,27 +1900,6 @@ These options influence LaTeX output. See further :doc:`latex`.
.. versionchanged:: 1.2
This overrides the files which is provided from Sphinx such as sphinx.sty.
-.. confval:: latex_preamble
-
- Additional LaTeX markup for the preamble.
-
- .. deprecated:: 0.5
- Use the ``'preamble'`` key in the :confval:`latex_elements` value.
-
-.. confval:: latex_paper_size
-
- The output paper size (``'letter'`` or ``'a4'``). Default is ``'letter'``.
-
- .. deprecated:: 0.5
- Use the ``'papersize'`` key in the :confval:`latex_elements` value.
-
-.. confval:: latex_font_size
-
- The font size ('10pt', '11pt' or '12pt'). Default is ``'10pt'``.
-
- .. deprecated:: 0.5
- Use the ``'pointsize'`` key in the :confval:`latex_elements` value.
-
.. _text-options:
diff --git a/doc/contents.rst b/doc/contents.rst
index 92fb13fcb..36eed649e 100644
--- a/doc/contents.rst
+++ b/doc/contents.rst
@@ -17,6 +17,7 @@ Sphinx documentation contents
config
intl
theming
+ setuptools
templating
latex
markdown
diff --git a/doc/develop.rst b/doc/develop.rst
index f2f336cfa..2f14c945f 100644
--- a/doc/develop.rst
+++ b/doc/develop.rst
@@ -55,17 +55,17 @@ This is the current list of contributed extensions in that repository:
- hyphenator: client-side hyphenation of HTML using hyphenator_
- inlinesyntaxhighlight_: inline syntax highlighting
- lassodomain: a domain for documenting Lasso_ source code
-- libreoffice: an extension to include any drawing supported by LibreOffice (e.g. odg, vsd...).
+- libreoffice: an extension to include any drawing supported by LibreOffice (e.g. odg, vsd, ...).
- lilypond: an extension inserting music scripts from Lilypond_ in PNG format.
- makedomain_: a domain for `GNU Make`_
- matlabdomain: document MATLAB_ code.
- mockautodoc: mock imports.
- mscgen: embed mscgen-formatted MSC (Message Sequence Chart)s.
- napoleon: supports `Google style`_ and `NumPy style`_ docstrings.
-- nicoviceo: embed videos from nicovideo
+- nicovideo: embed videos from nicovideo
- nwdiag: embed network diagrams by using nwdiag_
- omegat: support tools to collaborate with OmegaT_ (Sphinx 1.1 needed)
-- osaka: convert standard Japanese doc to Osaka dialect (it is joke extension)
+- osaka: convert standard Japanese doc to Osaka dialect (this is a joke extension)
- paverutils: an alternate integration of Sphinx with Paver_.
- phpdomain: an extension for PHP support
- plantuml: embed UML diagram by using PlantUML_
@@ -113,7 +113,7 @@ own extensions.
.. _Google Analytics: http://www.google.com/analytics/
.. _Google Chart: https://developers.google.com/chart/
.. _Google Maps: https://www.google.com/maps
-.. _Google style: http://google-styleguide.googlecode.com/svn/trunk/pyguide.html
+.. _Google style: https://google.github.io/styleguide/pyguide.html
.. _NumPy style: https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt
.. _hyphenator: https://github.com/mnater/hyphenator
.. _exceltable: http://pythonhosted.org/sphinxcontrib-exceltable/
diff --git a/doc/domains.rst b/doc/domains.rst
index 3eaaf758d..1b0dfac8c 100644
--- a/doc/domains.rst
+++ b/doc/domains.rst
@@ -546,6 +546,10 @@ The C++ Domain
The C++ domain (name **cpp**) supports documenting C++ projects.
+
+Directives
+~~~~~~~~~~
+
The following directives are available. All declarations can start with
a visibility statement (``public``, ``private`` or ``protected``).
@@ -741,6 +745,16 @@ a visibility statement (``public``, ``private`` or ``protected``).
Holder of elements, to which it can provide access via
:cpp:concept:`Iterator` s.
+Options
+.......
+
+Some directives support options:
+
+- ``:noindex:``, see :ref:`basic-domain-markup`.
+- ``:tparam-line-spec:``, for templated declarations.
+ If specified, each template parameter will be rendered on a separate line.
+
+
Constrained Templates
~~~~~~~~~~~~~~~~~~~~~
diff --git a/doc/ext/doctest.rst b/doc/ext/doctest.rst
index 818b86007..d1cb3c31d 100644
--- a/doc/ext/doctest.rst
+++ b/doc/ext/doctest.rst
@@ -63,7 +63,7 @@ a comma-separated list of group names.
default set of flags is specified by the :confval:`doctest_default_flags`
configuration variable.
- This directive supports two options:
+ This directive supports three options:
* ``hide``, a flag option, hides the doctest block in other builders. By
default it is shown as a highlighted doctest block.
@@ -73,6 +73,19 @@ a comma-separated list of group names.
explicit flags per example, with doctest comments, but they will show up in
other builders too.)
+ * ``pyversion``, a string option, can be used to specify the required Python
+ version for the example to be tested. For instance, in the following case
+ the example will be tested only for Python versions greather than 3.3::
+
+ .. doctest::
+ :pyversion: > 3.3
+
+ The supported operands are ``<``, ``<=``, ``==``, ``>=``, ``>``, and
+ comparison is performed by `distutils.version.LooseVersion
+ <https://www.python.org/dev/peps/pep-0386/#distutils>`__.
+
+ .. versionadded:: 1.6
+
Note that like with standard doctests, you have to use ``<BLANKLINE>`` to
signal a blank line in the expected output. The ``<BLANKLINE>`` is removed
when building presentation output (HTML, LaTeX etc.).
diff --git a/doc/ext/example_google.py b/doc/ext/example_google.py
index 34a720e36..5b4fa58df 100644
--- a/doc/ext/example_google.py
+++ b/doc/ext/example_google.py
@@ -83,7 +83,7 @@ def module_level_function(param1, param2=None, *args, **kwargs):
of each parameter is required. The type and description of each parameter
is optional, but should be included if not obvious.
- If \*args or \*\*kwargs are accepted,
+ If ``*args`` or ``**kwargs`` are accepted,
they should be listed as ``*args`` and ``**kwargs``.
The format for a parameter is::
diff --git a/doc/ext/example_numpy.py b/doc/ext/example_numpy.py
index 7a2db94cc..dbee080c3 100644
--- a/doc/ext/example_numpy.py
+++ b/doc/ext/example_numpy.py
@@ -106,7 +106,7 @@ def module_level_function(param1, param2=None, *args, **kwargs):
The name of each parameter is required. The type and description of each
parameter is optional, but should be included if not obvious.
- If \*args or \*\*kwargs are accepted,
+ If ``*args`` or ``**kwargs`` are accepted,
they should be listed as ``*args`` and ``**kwargs``.
The format for a parameter is::
diff --git a/doc/ext/graphviz.rst b/doc/ext/graphviz.rst
index 0994c932a..ef0483da7 100644
--- a/doc/ext/graphviz.rst
+++ b/doc/ext/graphviz.rst
@@ -77,20 +77,8 @@ It adds these directives:
the graphviz code.
.. versionadded:: 1.1
- All three directives support an ``inline`` flag that controls paragraph
- breaks in the output. When set, the graph is inserted into the current
- paragraph. If the flag is not given, paragraph breaks are introduced before
- and after the image (the default).
-
-.. versionadded:: 1.1
All three directives support a ``caption`` option that can be used to give a
- caption to the diagram. Naturally, diagrams marked as "inline" cannot have a
- caption.
-
-.. deprecated:: 1.4
- ``inline`` option is deprecated.
- All three directives generate inline node by default. If ``caption`` is given,
- these generate block node instead.
+ caption to the diagram.
.. versionchanged:: 1.4
All three directives support a ``graphviz_dot`` option that can be switch the
@@ -100,6 +88,9 @@ It adds these directives:
All three directives support a ``align`` option to align the graph horizontal.
The values "left", "center", "right" are allowed.
+.. versionadded:: 1.6
+ All three directives support a ``name`` option to set the label to graph.
+
There are also these new config values:
.. confval:: graphviz_dot
diff --git a/doc/extdev/appapi.rst b/doc/extdev/appapi.rst
index b3ffb7af0..1df85907a 100644
--- a/doc/extdev/appapi.rst
+++ b/doc/extdev/appapi.rst
@@ -89,11 +89,6 @@ package.
This allows extensions to use custom translator and define custom
nodes for the translator (see :meth:`add_node`).
- This is a API version of :confval:`html_translator_class` for all other
- builders. Note that if :confval:`html_translator_class` is specified and
- this API is called for html related builders, API overriding takes
- precedence.
-
.. versionadded:: 1.3
.. method:: Sphinx.add_node(node, **kwds)
@@ -293,6 +288,11 @@ package.
Add the standard docutils :class:`Transform` subclass *transform* to the list
of transforms that are applied after Sphinx parses a reST document.
+.. method:: Sphinx.add_post_transform(transform)
+
+ Add the standard docutils :class:`Transform` subclass *transform* to the list
+ of transforms that are applied before Sphinx writes a document.
+
.. method:: Sphinx.add_javascript(filename)
Add *filename* to the list of JavaScript files that the default HTML template
@@ -364,6 +364,12 @@ package.
.. versionadded:: 1.4
+.. method:: Sphinx.add_env_collector(collector)
+
+ Register an environment collector class (refs: :ref:`collector-api`)
+
+ .. versionadded:: 1.6
+
.. method:: Sphinx.require_sphinx(version)
Compare *version* (which must be a ``major.minor`` version string,
@@ -420,6 +426,10 @@ The application object also provides support for emitting leveled messages.
the build; just raise an exception (:exc:`sphinx.errors.SphinxError` or a
custom subclass) to do that.
+.. deprecated:: 1.6
+
+ Please use :ref:`logging-api` instead.
+
.. automethod:: Sphinx.warn
.. automethod:: Sphinx.info
diff --git a/doc/extdev/collectorapi.rst b/doc/extdev/collectorapi.rst
new file mode 100644
index 000000000..cb4c30bf3
--- /dev/null
+++ b/doc/extdev/collectorapi.rst
@@ -0,0 +1,9 @@
+.. _collector-api:
+
+Environment Collector API
+-------------------------
+
+.. module:: sphinx.environment.collectors
+
+.. autoclass:: EnvironmentCollector
+ :members:
diff --git a/doc/extdev/index.rst b/doc/extdev/index.rst
index b27db4b2d..85172abb6 100644
--- a/doc/extdev/index.rst
+++ b/doc/extdev/index.rst
@@ -50,7 +50,9 @@ APIs used for writing extensions
appapi
envapi
builderapi
+ collectorapi
markupapi
domainapi
parserapi
nodes
+ logging
diff --git a/doc/extdev/logging.rst b/doc/extdev/logging.rst
new file mode 100644
index 000000000..50110c2a4
--- /dev/null
+++ b/doc/extdev/logging.rst
@@ -0,0 +1,77 @@
+.. _logging-api:
+
+Logging API
+===========
+
+.. function:: sphinx.util.logging.getLogger(name)
+
+ Returns a logger wrapped by :class:`SphinxLoggerAdapter` with the specified *name*.
+
+ Example usage::
+
+ from sphinx.util import logging # Load on top of python's logging module
+
+ logger = logging.getLogger(__name__)
+ logger.info('Hello, this is an extension!')
+
+.. class:: SphinxLoggerAdapter(logging.LoggerAdapter)
+
+ .. method:: SphinxLoggerAdapter.error(level, msg, *args, **kwargs)
+ .. method:: SphinxLoggerAdapter.critical(level, msg, *args, **kwargs)
+ .. method:: SphinxLoggerAdapter.warning(level, msg, *args, **kwargs)
+
+ Logs a message on this logger with the specified level.
+ Basically, the arguments are as with python's logging module.
+
+ In addition, Sphinx logger supports following keyword arguments:
+
+ **type**, ***subtype***
+ Categories of warning logs. It is used to suppress
+ warnings by :confval:`suppress_warnings` setting.
+
+ **location**
+ Where the warning happened. It is used to include
+ the path and line number in each log. It allows docname,
+ tuple of docname and line number and nodes::
+
+ logger = sphinx.util.logging.getLogger(__name__)
+ logger.warning('Warning happened!', location='index')
+ logger.warning('Warning happened!', location=('chapter1/index', 10))
+ logger.warning('Warning happened!', location=some_node)
+
+ **color**
+ The color of logs. By default, warning level logs are
+ colored as ``"darkred"``. The others are not colored.
+
+ .. method:: SphinxLoggerAdapter.log(level, msg, *args, **kwargs)
+ .. method:: SphinxLoggerAdapter.info(level, msg, *args, **kwargs)
+ .. method:: SphinxLoggerAdapter.verbose(level, msg, *args, **kwargs)
+ .. method:: SphinxLoggerAdapter.debug(level, msg, *args, **kwargs)
+
+ Logs a message to this logger with the specified level.
+ Basically, the arguments are as with python's logging module.
+
+ In addition, Sphinx logger supports following keyword arguments:
+
+ **nonl**
+ If true, the logger does not fold lines at the end of the log message.
+ The default is ``False``.
+
+ **color**
+ The color of logs. By default, debug level logs are
+ colored as ``"darkgray"``, and debug2 level ones are ``"lightgray"``.
+ The others are not colored.
+
+.. function:: pending_logging()
+
+ Marks all logs as pending::
+
+ with pending_logging():
+ logger.warning('Warning message!') # not flushed yet
+ some_long_process()
+
+ # the warning is flushed here
+
+.. function:: pending_warnings()
+
+ Marks warning logs as pending. Similar to :func:`pending_logging`.
diff --git a/doc/extdev/nodes.rst b/doc/extdev/nodes.rst
index 359410e25..5d8272eae 100644
--- a/doc/extdev/nodes.rst
+++ b/doc/extdev/nodes.rst
@@ -55,4 +55,3 @@ You should not need to generate the nodes below in extensions.
.. autoclass:: start_of_file
.. autoclass:: productionlist
.. autoclass:: production
-.. autoclass:: termsep
diff --git a/doc/extdev/tutorial.rst b/doc/extdev/tutorial.rst
index 10a14fab7..33b45035e 100644
--- a/doc/extdev/tutorial.rst
+++ b/doc/extdev/tutorial.rst
@@ -98,8 +98,9 @@ in which a Sphinx project is built: this works in several phases.
Now that the metadata and cross-reference data of all existing documents is
known, all temporary nodes are replaced by nodes that can be converted into
- output. For example, links are created for object references that exist, and
- simple literal nodes are created for those that don't.
+ output using components called tranform. For example, links are created for
+ object references that exist, and simple literal nodes are created for those
+ that don't.
**Phase 4: Writing**
@@ -246,7 +247,6 @@ todolist directive has neither content nor arguments that need to be handled.
The ``todo`` directive function looks like this::
- from sphinx.util.compat import make_admonition
from sphinx.locale import _
class TodoDirective(Directive):
@@ -260,20 +260,20 @@ The ``todo`` directive function looks like this::
targetid = "todo-%d" % env.new_serialno('todo')
targetnode = nodes.target('', '', ids=[targetid])
- ad = make_admonition(todo, self.name, [_('Todo')], self.options,
- self.content, self.lineno, self.content_offset,
- self.block_text, self.state, self.state_machine)
+ todo_node = todo('\n'.join(self.content))
+ todo_node += nodes.title(_('Todo'), _('Todo'))
+ self.state.nested_parse(self.content, self.content_offset, todo_node)
if not hasattr(env, 'todo_all_todos'):
env.todo_all_todos = []
env.todo_all_todos.append({
'docname': env.docname,
'lineno': self.lineno,
- 'todo': ad[0].deepcopy(),
+ 'todo': todo_node.deepcopy(),
'target': targetnode,
})
- return [targetnode] + ad
+ return [targetnode, todo_node]
Several important things are covered here. First, as you can see, you can refer
to the build environment instance using ``self.state.document.settings.env``.
@@ -285,11 +285,10 @@ returns a new unique integer on each call and therefore leads to unique target
names. The target node is instantiated without any text (the first two
arguments).
-An admonition is created using a standard docutils function (wrapped in Sphinx
-for docutils cross-version compatibility). The first argument gives the node
-class, in our case ``todo``. The third argument gives the admonition title (use
-``arguments`` here to let the user specify the title). A list of nodes is
-returned from ``make_admonition``.
+On creating admonition node, the content body of the directive are parsed using
+``self.state.nested_parse``. The first argument gives the content body, and
+the second one gives content offset. The third argument gives the parent node
+of parsed result, in our case the ``todo`` node.
Then, the todo node is added to the environment. This is needed to be able to
create a list of all todo entries throughout the documentation, in the place
diff --git a/doc/faq.rst b/doc/faq.rst
index eaa663d92..5924f5f68 100644
--- a/doc/faq.rst
+++ b/doc/faq.rst
@@ -201,7 +201,7 @@ The following list gives some hints for the creation of epub files:
Error(prcgen):E24011: TOC section scope is not included in the parent chapter:(title)
Error(prcgen):E24001: The table of content could not be built.
-.. _Epubcheck: https://code.google.com/archive/p/epubcheck
+.. _Epubcheck: https://github.com/IDPF/epubcheck
.. _Calibre: http://calibre-ebook.com/
.. _FBreader: https://fbreader.org/
.. _Bookworm: http://www.oreilly.com/bookworm/index.html
diff --git a/doc/intl.rst b/doc/intl.rst
index 97f0e013e..dacced65b 100644
--- a/doc/intl.rst
+++ b/doc/intl.rst
@@ -74,7 +74,7 @@ Quick guide
^^^^^^^^^^^
`sphinx-intl`_ is a useful tool to work with Sphinx translation flow.
-This section describe a easy way to translate with sphinx-intl.
+This section describe an easy way to translate with sphinx-intl.
#. Install `sphinx-intl`_ by :command:`pip install sphinx-intl` or
:command:`easy_install sphinx-intl`.
diff --git a/doc/invocation.rst b/doc/invocation.rst
index 639b6880d..6cb16e919 100644
--- a/doc/invocation.rst
+++ b/doc/invocation.rst
@@ -404,7 +404,7 @@ variables to customize behavior:
.. describe:: PAPER
- The value for :confval:`latex_paper_size`.
+ The value for '"papersize"` key of :confval:`latex_elements`.
.. describe:: SPHINXBUILD
diff --git a/doc/latex.rst b/doc/latex.rst
index ebc14765e..e2c518058 100644
--- a/doc/latex.rst
+++ b/doc/latex.rst
@@ -179,12 +179,8 @@ Here are the currently available options together with their default values.
This is due to the way the LaTeX class ``jsbook`` handles the
pointsize.
- Or, one uses regular units but with ``nomag`` as document class option.
- This can be achieved for example via::
-
- 'pointsize': 'nomag,12pt',
-
- in the :confval:`latex_elements` configuration variable.
+ Or, one uses regular units but with ``nomag`` as extra document class
+ option (cf. ``'extraclassoptions'`` key of :confval:`latex_elements`.)
.. versionadded:: 1.5.3
@@ -418,16 +414,20 @@ Let us now list some macros from the package file
- text styling commands (they have one argument): ``\sphinx<foo>`` with
``<foo>`` being one of ``strong``, ``bfcode``, ``email``, ``tablecontinued``,
``titleref``, ``menuselection``, ``accelerator``, ``crossref``, ``termref``,
- ``optional``. By default and for backwards compatibility the ``\sphinx<foo>``
- expands to ``\<foo>`` hence the user can choose to customize rather the latter
- (the non-prefixed macros will be left undefined if option
- :confval:`latex_keep_old_macro_names` is set to ``False`` in :file:`conf.py`.)
-
- .. versionchanged:: 1.4.5
- use of ``\sphinx`` prefixed macro names to limit possibilities of conflict
- with user added packages: if
- :confval:`latex_keep_old_macro_names` is set to ``False`` in
- :file:`conf.py` only the prefixed names are defined.
+ ``optional``. The non-prefixed macros will still be defined if option
+ :confval:`latex_keep_old_macro_names` has been set to ``True`` (default is
+ ``False``), in which case the prefixed macros expand to the
+ non-prefixed ones.
+
+ .. versionadded:: 1.4.5
+ Use of ``\sphinx`` prefixed macro names to limit possibilities of conflict
+ with LaTeX packages.
+ .. versionchanged:: 1.6
+ The default value of :confval:`latex_keep_old_macro_names` changes to
+ ``False``, and even if set to ``True``, if a non-prefixed macro
+ already exists at ``sphinx.sty`` loading time, only the ``\sphinx``
+ prefixed one will be defined. The setting will be removed at 1.7.
+
- more text styling commands: ``\sphinxstyle<bar>`` with ``<bar>`` one of
``indexentry``, ``indexextra``, ``indexpageref``, ``topictitle``,
``sidebartitle``, ``othertitle``, ``sidebarsubtitle``, ``thead``,
diff --git a/doc/markup/code.rst b/doc/markup/code.rst
index c7cb0f911..cb3e55bdc 100644
--- a/doc/markup/code.rst
+++ b/doc/markup/code.rst
@@ -184,9 +184,17 @@ Includes
string option, only lines that precede the first lines containing that string
are included.
+ With lines selected using ``start-after`` it is still possible to use
+ ``lines``, the first allowed line having by convention the line number ``1``.
+
+ When lines have been selected in any of the ways described above, the
+ line numbers in ``emphasize-lines`` also refer to the selection, with the
+ first selected line having number ``1``.
+
When specifying particular parts of a file to display, it can be useful to
- display exactly which lines are being presented.
- This can be done using the ``lineno-match`` option.
+ display the original line numbers. This can be done using the
+ ``lineno-match`` option, which is however allowed only when the selection
+ consists of contiguous lines.
You can prepend and/or append a line to the included code, using the
``prepend`` and ``append`` option, respectively. This is useful e.g. for
@@ -212,7 +220,9 @@ Includes
.. versionadded:: 1.3
The ``diff`` option.
The ``lineno-match`` option.
-
+ .. versionchanged:: 1.6
+ With both ``start-after`` and ``lines`` in use, the first line as per
+ ``start-after`` is considered to be with line number ``1`` for ``lines``.
Caption and name
^^^^^^^^^^^^^^^^
@@ -232,7 +242,7 @@ For example::
:rst:dir:`literalinclude` also supports the ``caption`` and ``name`` option.
-``caption`` has a additional feature that if you leave the value empty, the shown
+``caption`` has an additional feature that if you leave the value empty, the shown
filename will be exactly the one given as an argument.
diff --git a/doc/markup/misc.rst b/doc/markup/misc.rst
index 0e1d104d6..c4b64e39c 100644
--- a/doc/markup/misc.rst
+++ b/doc/markup/misc.rst
@@ -201,6 +201,11 @@ Including content based on tags
.. versionchanged:: 1.2
Added the name of the builder and the prefixes.
+ .. warning::
+
+ This directive is designed to control only content of document. It could
+ not control sections, labels and so on.
+
Tables
------
@@ -237,32 +242,76 @@ following directive exists:
in proportion to the observed shares in a first pass where the table cells
are rendered at their natural "horizontal" widths.
- By default, Sphinx uses a table layout with ``L`` for every column.
-
- .. hint::
-
- For columns which are known to be much narrower than the others it is
- recommended to use the lowercase specifiers. For more information, check
- the ``tabulary`` manual.
+ By default, Sphinx uses a table layout with ``J`` for every column.
.. versionadded:: 0.3
-.. warning::
-
- Tables with more than 30 rows are rendered using ``longtable``, not
- ``tabulary``, in order to allow pagebreaks.
+ .. versionchanged:: 1.6
+ Merged cells may now contain multiple paragraphs and are much better
+ handled, thanks to custom Sphinx LaTeX macros. This novel situation
+ motivated the switch to ``J`` specifier and not ``L`` by default.
- Tables that contain list-like elements such as object descriptions,
- blockquotes or any kind of lists cannot be set out of the box with
- ``tabulary``. They are therefore set with the standard LaTeX ``tabular``
- environment if you don't give a ``tabularcolumns`` directive. If you do, the
- table will be set with ``tabulary``, but you must use the ``p{width}``
- construct for the columns that contain these elements.
+ .. hint::
- Literal blocks do not work with ``tabulary`` at all, so tables containing a
- literal block are always set with ``tabular``. Also, the verbatim
- environment used for literal blocks only works in ``p{width}`` columns, which
- means that by default, Sphinx generates such column specs for such tables.
+ Sphinx actually uses ``T`` specifier having done ``\newcolumntype{T}{J}``.
+ To revert to previous default, insert ``\newcolumntype{T}{L}`` in the
+ LaTeX preamble (see :confval:`latex_elements`).
+
+ A frequent issue with tabulary is that columns with little contents are
+ "squeezed". The minimal column width is a tabulary parameter called
+ ``\tymin``. You may set it globally in the LaTeX preamble via
+ ``\setlength{\tymin}{40pt}`` for example.
+
+ Else, use the :rst:dir:`tabularcolumns` directive with an explicit
+ ``p{40pt}`` (for example) for that column. You may use also ``l``
+ specifier but this makes the task of setting column widths more difficult
+ if some merged cell intersects that column.
+
+ .. warning::
+
+ Tables with more than 30 rows are rendered using ``longtable``, not
+ ``tabulary``, in order to allow pagebreaks. The ``L``, ``R``, ... specifiers
+ do not work for these tables.
+
+ Tables that contain list-like elements such as object descriptions,
+ blockquotes or any kind of lists cannot be set out of the box with
+ ``tabulary``. They are therefore set with the standard LaTeX ``tabular`` (or
+ ``longtable``) environment if you don't give a ``tabularcolumns`` directive.
+ If you do, the table will be set with ``tabulary`` but you must use the
+ ``p{width}`` construct (or Sphinx's ``\X`` and ``\Y`` specifiers described
+ below) for the columns containing these elements.
+
+ Literal blocks do not work with ``tabulary`` at all, so tables containing
+ a literal block are always set with ``tabular``. The verbatim environment
+ used for literal blocks only works in ``p{width}`` (and ``\X`` or ``\Y``)
+ columns, hence Sphinx generates such column specs for tables containing
+ literal blocks.
+
+ Since Sphinx 1.5, the ``\X{a}{b}`` specifier is used (there *is* a backslash
+ in the specifier letter). It is like ``p{width}`` with the width set to a
+ fraction ``a/b`` of the current line width. You can use it in the
+ :rst:dir:`tabularcolumns` (it is not a problem if some LaTeX macro is also
+ called ``\X``.)
+
+ It is *not* needed for ``b`` to be the total number of columns, nor for the
+ sum of the fractions of the ``\X`` specifiers to add up to one. For example
+ ``|\X{2}{5}|\X{1}{5}|\X{1}{5}|`` is legitimate and the table will occupy
+ 80% of the line width, the first of its three columns having the same width
+ as the sum of the next two.
+
+ This is used by the ``:widths:`` option of the :dudir:`table` directive.
+
+ Since Sphinx 1.6, there is also the ``\Y{f}`` specifier which admits a
+ decimal argument, such has ``\Y{0.15}``: this would have the same effect as
+ ``\X{3}{20}``.
+
+ .. versionchanged:: 1.6
+
+ Merged cells from complex grid tables (either multi-row, multi-column, or
+ both) now allow blockquotes, lists, literal blocks, ... as do regular cells.
+
+ Sphinx's merged cells interact well with ``p{width}``, ``\X{a}{b}``, ``Y{f}``
+ and tabulary's columns.
.. rubric:: Footnotes
diff --git a/doc/markup/toctree.rst b/doc/markup/toctree.rst
index a0161ee3c..78a72c1b4 100644
--- a/doc/markup/toctree.rst
+++ b/doc/markup/toctree.rst
@@ -41,7 +41,7 @@ tables of contents. The ``toctree`` directive is the central element.
* Tables of contents from all those documents are inserted, with a maximum
depth of two, that means one nested heading. ``toctree`` directives in
those documents are also taken into account.
- * Sphinx knows that the relative order of the documents ``intro``,
+ * Sphinx knows the relative order of the documents ``intro``,
``strings`` and so forth, and it knows that they are children of the shown
document, the library index. From this information it generates "next
chapter", "previous chapter" and "parent chapter" links.
diff --git a/doc/setuptools.rst b/doc/setuptools.rst
new file mode 100644
index 000000000..a8666a0bd
--- /dev/null
+++ b/doc/setuptools.rst
@@ -0,0 +1,178 @@
+.. _setuptools:
+
+Setuptools integration
+======================
+
+Sphinx supports integration with setuptools and distutils through a custom
+command - :class:`~sphinx.setup_command.BuildDoc`.
+
+Using setuptools integration
+----------------------------
+
+The Sphinx build can then be triggered from distutils, and some Sphinx
+options can be set in ``setup.py`` or ``setup.cfg`` instead of Sphinx own
+configuration file.
+
+For instance, from ``setup.py``::
+
+ # this is only necessary when not using setuptools/distribute
+ from sphinx.setup_command import BuildDoc
+ cmdclass = {'build_sphinx': BuildDoc}
+
+ name = 'My project'
+ version = '1.2'
+ release = '1.2.0'
+ setup(
+ name=name,
+ author='Bernard Montgomery',
+ version=release,
+ cmdclass=cmdclass,
+ # these are optional and override conf.py settings
+ command_options={
+ 'build_sphinx': {
+ 'project': ('setup.py', name),
+ 'version': ('setup.py', version),
+ 'release': ('setup.py', release)}},
+ )
+
+Or add this section in ``setup.cfg``::
+
+ [build_sphinx]
+ project = 'My project'
+ version = 1.2
+ release = 1.2.0
+
+Once configured, call this by calling the relevant command on ``setup.py``::
+
+ $ python setup.py build_sphinx
+
+Options for setuptools integration
+----------------------------------
+
+.. confval:: fresh-env
+
+ A boolean that determines whether the saved environment should be discarded
+ on build. Default is false.
+
+ This can also be set by passing the `-E` flag to ``setup.py``.
+
+ .. code-block:: bash
+
+ $ python setup.py build_sphinx -E
+
+.. confval:: all-files
+
+ A boolean that determines whether all files should be built from scratch.
+ Default is false.
+
+ This can also be set by passing the `-a` flag to ``setup.py``:
+
+ .. code-block:: bash
+
+ $ python setup.py build_sphinx -a
+
+.. confval:: source-dir
+
+ The target source directory. This can be relative to the ``setup.py`` or
+ ``setup.cfg`` file, or it can be absolute. Default is ``''``.
+
+ This can also be set by passing the `-s` flag to ``setup.py``:
+
+ .. code-block:: bash
+
+ $ python setup.py build_sphinx -s $SOURCE_DIR
+
+.. confval:: build-dir
+
+ The target build directory. This can be relative to the ``setup.py`` or
+ ``setup.cfg`` file, or it can be absolute. Default is ``''``.
+
+.. confval:: config-dir
+
+ Location of the configuration directory. This can be relative to the
+ ``setup.py`` or ``setup.cfg`` file, or it can be absolute. Default is
+ ``''``.
+
+ This can also be set by passing the `-c` flag to ``setup.py``:
+
+ .. code-block:: bash
+
+ $ python setup.py build_sphinx -c $CONFIG_DIR
+
+ .. versionadded:: 1.0
+
+.. confval:: builder
+
+ The builder or list of builders to use. Default is ``html``.
+
+ This can also be set by passing the `-b` flag to ``setup.py``:
+
+ .. code-block:: bash
+
+ $ python setup.py build_sphinx -b $BUILDER
+
+ .. versionchanged:: 1.6
+ This can now be a comma- or space-separated list of builders
+
+.. confval:: warning-is-error
+
+ A boolean that ensures Sphinx warnings will result in a failed build.
+ Default is false.
+
+ This can also be set by passing the `-W` flag to ``setup.py``:
+
+ .. code-block:: bash
+
+ $ python setup.py build_sphinx -W
+
+ .. versionadded:: 1.5
+
+.. confval:: project
+
+ The documented project's name. Default is ``''``.
+
+ .. versionadded:: 1.0
+
+.. confval:: version
+
+ The short X.Y version. Default is ``''``.
+
+ .. versionadded:: 1.0
+
+.. confval:: release
+
+ The full version, including alpha/beta/rc tags. Default is ``''``.
+
+ .. versionadded:: 1.0
+
+.. confval:: today
+
+ How to format the current date, used as the replacement for ``|today|``.
+ Default is ``''``.
+
+ .. versionadded:: 1.0
+
+.. confval:: link-index
+
+ A boolean that ensures index.html will be linked to the master doc. Default
+ is false.
+
+ This can also be set by passing the `-i` flag to ``setup.py``:
+
+ .. code-block:: bash
+
+ $ python setup.py build_sphinx -i
+
+ .. versionadded:: 1.0
+
+.. confval:: copyright
+
+ The copyright string. Default is ``''``.
+
+ .. versionadded:: 1.3
+
+.. confval:: pdb
+
+ A boolean to configure ``pdb`` on exception. Default is false.
+
+ .. versionadded:: 1.5
diff --git a/doc/templating.rst b/doc/templating.rst
index b9f9410de..41acea91b 100644
--- a/doc/templating.rst
+++ b/doc/templating.rst
@@ -291,31 +291,12 @@ in the future.
The value of :confval:`master_doc`, for usage with :func:`pathto`.
-.. data:: next
-
- The next document for the navigation. This variable is either false or has
- two attributes `link` and `title`. The title contains HTML markup. For
- example, to generate a link to the next page, you can use this snippet::
-
- {% if next %}
- <a href="{{ next.link|e }}">{{ next.title }}</a>
- {% endif %}
-
.. data:: pagename
The "page name" of the current file, i.e. either the document name if the
file is generated from a reST source, or the equivalent hierarchical name
relative to the output directory (``[directory/]filename_without_extension``).
-.. data:: parents
-
- A list of parent documents for navigation, structured like the :data:`next`
- item.
-
-.. data:: prev
-
- Like :data:`next`, but for the previous page.
-
.. data:: project
The value of :confval:`project`.
@@ -369,16 +350,58 @@ In documents that are created from source files (as opposed to
automatically-generated files like the module index, or documents that already
are in HTML form), these variables are also available:
+.. data:: body
+
+ A string containing the content of the page in HTML form as produced by the HTML builder,
+ before the theme is applied.
+
+.. data:: display_toc
+
+ A boolean that is True if the toc contains more than one entry.
+
.. data:: meta
Document metadata (a dictionary), see :ref:`metadata`.
+.. data:: metatags
+
+ A string containing the page's HTML :dudir:`meta` tags.
+
+.. data:: next
+
+ The next document for the navigation. This variable is either false or has
+ two attributes `link` and `title`. The title contains HTML markup. For
+ example, to generate a link to the next page, you can use this snippet::
+
+ {% if next %}
+ <a href="{{ next.link|e }}">{{ next.title }}</a>
+ {% endif %}
+
+
+.. data:: page_source_suffix
+
+ The suffix of the file that was rendered. Since we support a list of :confval:`source_suffix`,
+ this will allow you to properly link to the original source file.
+
+.. data:: parents
+
+ A list of parent documents for navigation, structured like the :data:`next`
+ item.
+
+.. data:: prev
+
+ Like :data:`next`, but for the previous page.
+
.. data:: sourcename
The name of the copied source file for the current document. This is only
nonempty if the :confval:`html_copy_source` value is ``True``.
This has empty value on creating automatically-generated files.
+.. data:: title
+
+ The page title.
+
.. data:: toc
The local table of contents for the current page, rendered as HTML bullet
@@ -401,7 +424,4 @@ are in HTML form), these variables are also available:
* ``includehidden`` (``False`` by default): if true, the TOC tree will also
contain hidden entries.
-.. data:: page_source_suffix
- The suffix of the file that was rendered. Since we support a list of :confval:`source_suffix`,
- this will allow you to properly link to the original source file.
diff --git a/doc/tutorial.rst b/doc/tutorial.rst
index bced21ade..ffabbd93e 100644
--- a/doc/tutorial.rst
+++ b/doc/tutorial.rst
@@ -308,6 +308,7 @@ More topics to be covered
* ...
- Static files
- :doc:`Selecting a theme <theming>`
+- :doc:`setuptools`
- :ref:`Templating <templating>`
- Using extensions
- :ref:`Writing extensions <dev-extensions>`
diff --git a/mypy.ini b/mypy.ini
new file mode 100644
index 000000000..8fbf24f30
--- /dev/null
+++ b/mypy.ini
@@ -0,0 +1,8 @@
+[mypy]
+python_version = 2.7
+ignore_missing_imports = True
+follow_imports = skip
+fast_parser = True
+incremental = True
+check_untyped_defs = True
+warn_unused_ignores = True
diff --git a/setup.cfg b/setup.cfg
index e341d921a..729d06312 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -27,3 +27,6 @@ universal = 1
max-line-length = 95
ignore = E116,E241,E251
exclude = .git,.tox,tests/*,build/*,sphinx/search/*,sphinx/pycode/pgen2/*,doc/ext/example*.py
+
+[build_sphinx]
+warning-is-error = 1
diff --git a/setup.py b/setup.py
index fafb2f34a..a118b7bcb 100644
--- a/setup.py
+++ b/setup.py
@@ -51,6 +51,7 @@ requires = [
'alabaster>=0.7,<0.8',
'imagesize',
'requests>=2.0.0',
+ 'typing',
]
extras_require = {
# Environment Marker works for wheel 0.24 or later
diff --git a/sphinx/__init__.py b/sphinx/__init__.py
index 130a8354a..10114fce2 100644
--- a/sphinx/__init__.py
+++ b/sphinx/__init__.py
@@ -21,6 +21,10 @@ from os import path
from .deprecation import RemovedInNextVersionWarning
+if False:
+ # For type annotation
+ from typing import List # NOQA
+
# by default, all DeprecationWarning under sphinx package will be emit.
# Users can avoid this by using environment variable: PYTHONWARNINGS=
if 'PYTHONWARNINGS' not in os.environ:
@@ -30,19 +34,19 @@ if 'PYTHONWARNINGS' not in os.environ:
warnings.filterwarnings('ignore', "'U' mode is deprecated",
DeprecationWarning, module='docutils.io')
-__version__ = '1.5.4+'
-__released__ = '1.5.4' # used when Sphinx builds its own docs
+__version__ = '1.6'
+__released__ = '1.6+' # used when Sphinx builds its own docs
# version info for better programmatic use
# possible values for 3rd element: 'alpha', 'beta', 'rc', 'final'
# 'final' has 0 as the last element
-version_info = (1, 5, 4, 'beta', 0)
+version_info = (1, 6, 0, 'beta', 1)
package_dir = path.abspath(path.dirname(__file__))
__display_version__ = __version__ # used for command line version
if __version__.endswith('+'):
- # try to find out the changeset hash if checked out from hg, and append
+ # try to find out the commit hash if checked out from git, and append
# it to __version__ (since we use this value from setup.py, it gets
# automatically propagated to an installed copy as well)
__display_version__ = __version__
@@ -54,19 +58,21 @@ if __version__.endswith('+'):
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
if out:
- __display_version__ += '/' + out.decode().strip()
+ __display_version__ += '/' + out.decode().strip() # type: ignore
except Exception:
pass
def main(argv=sys.argv):
+ # type: (List[str]) -> int
if sys.argv[1:2] == ['-M']:
- sys.exit(make_main(argv))
+ return make_main(argv)
else:
- sys.exit(build_main(argv))
+ return build_main(argv)
def build_main(argv=sys.argv):
+ # type: (List[str]) -> int
"""Sphinx build "main" command-line entry."""
if (sys.version_info[:3] < (2, 7, 0) or
(3, 0, 0) <= sys.version_info[:3] < (3, 4, 0)):
@@ -99,18 +105,19 @@ def build_main(argv=sys.argv):
return 1
raise
- from sphinx.util.compat import docutils_version
- if docutils_version < (0, 10):
+ import sphinx.util.docutils
+ if sphinx.util.docutils.__version_info__ < (0, 10):
sys.stderr.write('Error: Sphinx requires at least Docutils 0.10 to '
'run.\n')
return 1
- return cmdline.main(argv)
+ return cmdline.main(argv) # type: ignore
def make_main(argv=sys.argv):
+ # type: (List[str]) -> int
"""Sphinx build "make mode" entry."""
from sphinx import make_mode
- return make_mode.run_make_mode(argv[2:])
+ return make_mode.run_make_mode(argv[2:]) # type: ignore
if __name__ == '__main__':
diff --git a/sphinx/addnodes.py b/sphinx/addnodes.py
index 32fb3b8bc..e1168a8ff 100644
--- a/sphinx/addnodes.py
+++ b/sphinx/addnodes.py
@@ -9,10 +9,11 @@
:license: BSD, see LICENSE for details.
"""
-import warnings
-
from docutils import nodes
-from sphinx.deprecation import RemovedInSphinx16Warning
+
+if False:
+ # For type annotation
+ from typing import List, Sequence # NOQA
class translatable(object):
@@ -30,14 +31,17 @@ class translatable(object):
"""
def preserve_original_messages(self):
+ # type: () -> None
"""Preserve original translatable messages."""
raise NotImplementedError
def apply_translated_message(self, original_message, translated_message):
+ # type: (unicode, unicode) -> None
"""Apply translated message."""
raise NotImplementedError
def extract_original_messages(self):
+ # type: () -> Sequence[unicode]
"""Extract translation messages.
:returns: list of extracted messages or messages generator
@@ -49,14 +53,17 @@ class toctree(nodes.General, nodes.Element, translatable):
"""Node for inserting a "TOC tree"."""
def preserve_original_messages(self):
+ # type: () -> None
if self.get('caption'):
self['rawcaption'] = self['caption']
def apply_translated_message(self, original_message, translated_message):
+ # type: (unicode, unicode) -> None
if self.get('rawcaption') == original_message:
self['caption'] = translated_message
def extract_original_messages(self):
+ # type: () -> List[unicode]
if 'rawcaption' in self:
return [self['rawcaption']]
else:
@@ -109,6 +116,7 @@ class desc_type(nodes.Part, nodes.Inline, nodes.TextElement):
class desc_returns(desc_type):
"""Node for a "returns" annotation (a la -> in Python)."""
def astext(self):
+ # type: () -> unicode
return ' -> ' + nodes.TextElement.astext(self)
@@ -130,6 +138,7 @@ class desc_optional(nodes.Part, nodes.Inline, nodes.TextElement):
child_text_separator = ', '
def astext(self):
+ # type: () -> unicode
return '[' + nodes.TextElement.astext(self) + ']'
@@ -273,19 +282,6 @@ class abbreviation(nodes.Inline, nodes.TextElement):
"""Node for abbreviations with explanations."""
-class termsep(nodes.Structural, nodes.Element):
- """Separates two terms within a <term> node.
-
- .. versionchanged:: 1.4
- sphinx.addnodes.termsep is deprecated. It will be removed at Sphinx-1.6.
- """
-
- def __init__(self, *args, **kw):
- warnings.warn('sphinx.addnodes.termsep will be removed at Sphinx-1.6',
- RemovedInSphinx16Warning, stacklevel=2)
- super(termsep, self).__init__(*args, **kw)
-
-
class manpage(nodes.Inline, nodes.TextElement):
"""Node for references to manpages."""
diff --git a/sphinx/apidoc.py b/sphinx/apidoc.py
index 1efa8d33a..24ed874b0 100644
--- a/sphinx/apidoc.py
+++ b/sphinx/apidoc.py
@@ -23,9 +23,14 @@ from os import path
from six import binary_type
from fnmatch import fnmatch
-from sphinx.util.osutil import FileAvoidWrite, walk
from sphinx import __display_version__
+from sphinx.quickstart import EXTENSIONS
from sphinx.util import rst
+from sphinx.util.osutil import FileAvoidWrite, walk
+
+if False:
+ # For type annotation
+ from typing import Any, List, Tuple # NOQA
# automodule options
if 'SPHINX_APIDOC_OPTIONS' in os.environ:
@@ -43,6 +48,7 @@ PY_SUFFIXES = set(['.py', '.pyx'])
def makename(package, module):
+ # type: (unicode, unicode) -> unicode
"""Join package and module with a dot."""
# Both package and module can be None/empty.
if package:
@@ -55,6 +61,7 @@ def makename(package, module):
def write_file(name, text, opts):
+ # type: (unicode, unicode, Any) -> None
"""Write the output file for module/package <name>."""
fname = path.join(opts.destdir, '%s.%s' % (name, opts.suffix))
if opts.dryrun:
@@ -69,6 +76,7 @@ def write_file(name, text, opts):
def format_heading(level, text, escape=True):
+ # type: (int, unicode, bool) -> unicode
"""Create a heading of <level> [1, 2 or 3 supported]."""
if escape:
text = rst.escape(text)
@@ -77,6 +85,7 @@ def format_heading(level, text, escape=True):
def format_directive(module, package=None):
+ # type: (unicode, unicode) -> unicode
"""Create the automodule directive and add the options."""
directive = '.. automodule:: %s\n' % makename(package, module)
for option in OPTIONS:
@@ -85,6 +94,7 @@ def format_directive(module, package=None):
def create_module_file(package, module, opts):
+ # type: (unicode, unicode, Any) -> None
"""Build the text of the file and write the file."""
if not opts.noheadings:
text = format_heading(1, '%s module' % module)
@@ -96,6 +106,7 @@ def create_module_file(package, module, opts):
def create_package_file(root, master_package, subroot, py_files, opts, subs, is_namespace):
+ # type: (unicode, unicode, unicode, List[unicode], Any, List[unicode], bool) -> None
"""Build the text of the file and write the file."""
text = format_heading(1, ('%s package' if not is_namespace else "%s namespace")
% makename(master_package, subroot))
@@ -151,13 +162,14 @@ def create_package_file(root, master_package, subroot, py_files, opts, subs, is_
def create_modules_toc_file(modules, opts, name='modules'):
+ # type: (List[unicode], Any, unicode) -> None
"""Create the module's index."""
text = format_heading(1, '%s' % opts.header, escape=False)
text += '.. toctree::\n'
text += ' :maxdepth: %s\n\n' % opts.maxdepth
modules.sort()
- prev_module = ''
+ prev_module = '' # type: unicode
for module in modules:
# look if the module is a subpackage and, if yes, ignore it
if module.startswith(prev_module + '.'):
@@ -169,6 +181,7 @@ def create_modules_toc_file(modules, opts, name='modules'):
def shall_skip(module, opts):
+ # type: (unicode, Any) -> bool
"""Check if we want to skip this module."""
# skip if the file doesn't exist and not using implicit namespaces
if not opts.implicit_namespaces and not path.exists(module):
@@ -187,6 +200,7 @@ def shall_skip(module, opts):
def recurse_tree(rootpath, excludes, opts):
+ # type: (unicode, List[unicode], Any) -> List[unicode]
"""
Look for every file in the directory tree and create the corresponding
ReST files.
@@ -220,7 +234,7 @@ def recurse_tree(rootpath, excludes, opts):
# remove hidden ('.') and private ('_') directories, as well as
# excluded dirs
if includeprivate:
- exclude_prefixes = ('.',)
+ exclude_prefixes = ('.',) # type: Tuple[unicode, ...]
else:
exclude_prefixes = ('.', '_')
subs[:] = sorted(sub for sub in subs if not sub.startswith(exclude_prefixes) and
@@ -250,11 +264,13 @@ def recurse_tree(rootpath, excludes, opts):
def normalize_excludes(rootpath, excludes):
+ # type: (unicode, List[unicode]) -> List[unicode]
"""Normalize the excluded directory list."""
return [path.abspath(exclude) for exclude in excludes]
def is_excluded(root, excludes):
+ # type: (unicode, List[unicode]) -> bool
"""Check if the directory is in the exclude list.
Note: by having trailing slashes, we avoid common prefix issues, like
@@ -267,6 +283,7 @@ def is_excluded(root, excludes):
def main(argv=sys.argv):
+ # type: (List[str]) -> int
"""Parse and check the command line arguments."""
parser = optparse.OptionParser(
usage="""\
@@ -333,6 +350,11 @@ Note: By default this script will not overwrite already created files.""")
'defaults to --doc-version')
parser.add_option('--version', action='store_true', dest='show_version',
help='Show version information and exit')
+ group = parser.add_option_group('Extension options')
+ for ext in EXTENSIONS:
+ group.add_option('--ext-' + ext, action='store_true',
+ dest='ext_' + ext, default=False,
+ help='enable %s extension' % ext)
(opts, args) = parser.parse_args(argv[1:])
@@ -362,7 +384,7 @@ Note: By default this script will not overwrite already created files.""")
if opts.full:
from sphinx import quickstart as qs
modules.sort()
- prev_module = ''
+ prev_module = '' # type: unicode
text = ''
for module in modules:
if module.startswith(prev_module + '.'):
@@ -391,6 +413,10 @@ Note: By default this script will not overwrite already created files.""")
module_path = rootpath,
append_syspath = opts.append_syspath,
)
+ enabled_exts = {'ext_' + ext: getattr(opts, 'ext_' + ext)
+ for ext in EXTENSIONS if getattr(opts, 'ext_' + ext)}
+ d.update(enabled_exts)
+
if isinstance(opts.header, binary_type):
d['project'] = d['project'].decode('utf-8')
if isinstance(opts.author, binary_type):
@@ -404,6 +430,7 @@ Note: By default this script will not overwrite already created files.""")
qs.generate(d, silent=True, overwrite=opts.force)
elif not opts.notoc:
create_modules_toc_file(modules, opts)
+ return 0
# So program can be started with "python -m sphinx.apidoc ..."
diff --git a/sphinx/application.py b/sphinx/application.py
index 56b669d1b..e735b8530 100644
--- a/sphinx/application.py
+++ b/sphinx/application.py
@@ -15,52 +15,52 @@ from __future__ import print_function
import os
import sys
import types
+import warnings
import posixpath
-import traceback
from os import path
from collections import deque
-from six import iteritems, itervalues, text_type
+from six import iteritems
from six.moves import cStringIO
+
from docutils import nodes
from docutils.parsers.rst import convert_directive_function, \
directives, roles
import sphinx
from sphinx import package_dir, locale
-from sphinx.roles import XRefRole
from sphinx.config import Config
-from sphinx.errors import SphinxError, SphinxWarning, ExtensionError, \
- VersionRequirementError, ConfigError
+from sphinx.errors import SphinxError, ExtensionError, VersionRequirementError, \
+ ConfigError
from sphinx.domains import ObjType
from sphinx.domains.std import GenericObject, Target, StandardDomain
+from sphinx.deprecation import RemovedInSphinx17Warning, RemovedInSphinx20Warning
from sphinx.environment import BuildEnvironment
+from sphinx.events import EventManager
+from sphinx.extension import load_extension, verify_required_extensions
from sphinx.io import SphinxStandaloneReader
+from sphinx.locale import _
+from sphinx.roles import XRefRole
from sphinx.util import pycompat # noqa: F401
from sphinx.util import import_object
+from sphinx.util import logging
+from sphinx.util import status_iterator, old_status_iterator, display_chunk
from sphinx.util.tags import Tags
from sphinx.util.osutil import ENOENT
-from sphinx.util.logging import is_suppressed_warning
-from sphinx.util.console import bold, lightgray, darkgray, darkred, darkgreen, \
- term_width_line
+from sphinx.util.console import bold, darkgreen # type: ignore
+from sphinx.util.docutils import is_html5_writer_available
from sphinx.util.i18n import find_catalog_source_files
-# List of all known core events. Maps name to arguments description.
-events = {
- 'builder-inited': '',
- 'env-get-outdated': 'env, added, changed, removed',
- 'env-purge-doc': 'env, docname',
- 'env-before-read-docs': 'env, docnames',
- 'source-read': 'docname, source text',
- 'doctree-read': 'the doctree before being pickled',
- 'env-merge-info': 'env, read docnames, other env instance',
- 'missing-reference': 'env, node, contnode',
- 'doctree-resolved': 'doctree, docname',
- 'env-updated': 'env',
- 'html-collect-pages': 'builder',
- 'html-page-context': 'pagename, context, doctree or None',
- 'build-finished': 'exception',
-}
+if False:
+ # For type annotation
+ from typing import Any, Callable, Dict, IO, Iterable, Iterator, List, Tuple, Type, Union # NOQA
+ from docutils.parsers import Parser # NOQA
+ from docutils.transform import Transform # NOQA
+ from sphinx.builders import Builder # NOQA
+ from sphinx.domains import Domain, Index # NOQA
+ from sphinx.environment.collectors import EnvironmentCollector # NOQA
+ from sphinx.extension import Extension # NOQA
+
builtin_extensions = (
'sphinx.builders.applehelp',
'sphinx.builders.changes',
@@ -90,14 +90,23 @@ builtin_extensions = (
'sphinx.directives.other',
'sphinx.directives.patches',
'sphinx.roles',
-)
+ 'sphinx.transforms.post_transforms',
+ # collectors should be loaded by specific order
+ 'sphinx.environment.collectors.dependencies',
+ 'sphinx.environment.collectors.asset',
+ 'sphinx.environment.collectors.metadata',
+ 'sphinx.environment.collectors.title',
+ 'sphinx.environment.collectors.toctree',
+ 'sphinx.environment.collectors.indexentries',
+ # Strictly, alabaster theme is not a builtin extension,
+ # but it is loaded automatically to use it as default theme.
+ 'alabaster',
+) # type: Tuple[unicode, ...]
CONFIG_FILENAME = 'conf.py'
ENV_PICKLE_FILENAME = 'environment.pickle'
-# list of deprecated extensions. Keys are extension name.
-# Values are Sphinx version that merge the extension.
-EXTENSION_BLACKLIST = {"sphinxjp.themecore": "1.2"}
+logger = logging.getLogger(__name__)
class Sphinx(object):
@@ -106,19 +115,17 @@ class Sphinx(object):
confoverrides=None, status=sys.stdout, warning=sys.stderr,
freshenv=False, warningiserror=False, tags=None, verbosity=0,
parallel=0):
+ # type: (unicode, unicode, unicode, unicode, unicode, Dict, IO, IO, bool, bool, List[unicode], int, int) -> None # NOQA
self.verbosity = verbosity
- self.next_listener_id = 0
- self._extensions = {}
- self._extension_metadata = {}
- self._additional_source_parsers = {}
- self._listeners = {}
- self._setting_up_extension = ['?']
- self.domains = {}
- self.buildername = buildername
- self.builderclasses = {}
- self.builder = None
- self.env = None
- self.enumerable_nodes = {}
+ self.extensions = {} # type: Dict[unicode, Extension]
+ self._additional_source_parsers = {} # type: Dict[unicode, Parser]
+ self._setting_up_extension = ['?'] # type: List[unicode]
+ self.domains = {} # type: Dict[unicode, Type[Domain]]
+ self.builderclasses = {} # type: Dict[unicode, Type[Builder]]
+ self.builder = None # type: Builder
+ self.env = None # type: BuildEnvironment
+ self.enumerable_nodes = {} # type: Dict[nodes.Node, Tuple[unicode, Callable]] # NOQA
+ self.post_transforms = [] # type: List[Transform]
self.srcdir = srcdir
self.confdir = confdir
@@ -128,56 +135,55 @@ class Sphinx(object):
self.parallel = parallel
if status is None:
- self._status = cStringIO()
+ self._status = cStringIO() # type: IO
self.quiet = True
else:
self._status = status
self.quiet = False
if warning is None:
- self._warning = cStringIO()
+ self._warning = cStringIO() # type: IO
else:
self._warning = warning
self._warncount = 0
self.warningiserror = warningiserror
+ logging.setup(self, self._status, self._warning)
- self._events = events.copy()
- self._translators = {}
+ self.events = EventManager()
+ self._translators = {} # type: Dict[unicode, nodes.GenericNodeVisitor]
# keep last few messages for traceback
- self.messagelog = deque(maxlen=10)
+ self.messagelog = deque(maxlen=10) # type: deque
# say hello to the world
- self.info(bold('Running Sphinx v%s' % sphinx.__display_version__))
+ logger.info(bold('Running Sphinx v%s' % sphinx.__display_version__))
# status code for command-line application
self.statuscode = 0
if not path.isdir(outdir):
- self.info('making output directory...')
+ logger.info('making output directory...')
os.makedirs(outdir)
# read config
self.tags = Tags(tags)
self.config = Config(confdir, CONFIG_FILENAME,
confoverrides or {}, self.tags)
- self.config.check_unicode(self.warn)
+ self.config.check_unicode()
# defer checking types until i18n has been initialized
- # initialize some limited config variables before loading extensions
- self.config.pre_init_values(self.warn)
+ # initialize some limited config variables before initialize i18n and loading
+ # extensions
+ self.config.pre_init_values()
+
+ # set up translation infrastructure
+ self._init_i18n()
# check the Sphinx version if requested
if self.config.needs_sphinx and self.config.needs_sphinx > sphinx.__display_version__:
raise VersionRequirementError(
- 'This project needs at least Sphinx v%s and therefore cannot '
- 'be built with this version.' % self.config.needs_sphinx)
-
- # force preload html_translator_class
- if self.config.html_translator_class:
- translator_class = self.import_object(self.config.html_translator_class,
- 'html_translator_class setting')
- self.set_translator('html', translator_class)
+ _('This project needs at least Sphinx v%s and therefore cannot '
+ 'be built with this version.') % self.config.needs_sphinx)
# set confdir to srcdir if -C given (!= no confdir); a few pieces
# of code expect a confdir to be set
@@ -188,12 +194,6 @@ class Sphinx(object):
for extension in builtin_extensions:
self.setup_extension(extension)
- # extension loading support for alabaster theme
- # self.config.html_theme is not set from conf.py at here
- # for now, sphinx always load a 'alabaster' extension.
- if 'alabaster' not in self.config.extensions:
- self.config.extensions.append('alabaster')
-
# load all user-given extension modules
for extension in self.config.extensions:
self.setup_extension(extension)
@@ -205,60 +205,50 @@ class Sphinx(object):
self.config.setup(self)
else:
raise ConfigError(
- "'setup' that is specified in the conf.py has not been " +
- "callable. Please provide a callable `setup` function " +
- "in order to behave as a sphinx extension conf.py itself."
+ _("'setup' that is specified in the conf.py has not been "
+ "callable. Please provide a callable `setup` function "
+ "in order to behave as a sphinx extension conf.py itself.")
)
# now that we know all config values, collect them from conf.py
- self.config.init_values(self.warn)
+ self.config.init_values()
# check extension versions if requested
- if self.config.needs_extensions:
- for extname, needs_ver in self.config.needs_extensions.items():
- if extname not in self._extensions:
- self.warn('needs_extensions config value specifies a '
- 'version requirement for extension %s, but it is '
- 'not loaded' % extname)
- continue
- has_ver = self._extension_metadata[extname]['version']
- if has_ver == 'unknown version' or needs_ver > has_ver:
- raise VersionRequirementError(
- 'This project needs the extension %s at least in '
- 'version %s and therefore cannot be built with the '
- 'loaded version (%s).' % (extname, needs_ver, has_ver))
+ verify_required_extensions(self, self.config.needs_extensions)
# check primary_domain if requested
if self.config.primary_domain and self.config.primary_domain not in self.domains:
- self.warn('primary_domain %r not found, ignored.' % self.config.primary_domain)
+ logger.warning(_('primary_domain %r not found, ignored.'),
+ self.config.primary_domain)
- # set up translation infrastructure
- self._init_i18n()
+ # create the builder
+ self.builder = self.create_builder(buildername)
# check all configuration values for permissible types
- self.config.check_types(self.warn)
+ self.config.check_types()
# set up source_parsers
self._init_source_parsers()
# set up the build environment
self._init_env(freshenv)
# set up the builder
- self._init_builder(self.buildername)
+ self._init_builder()
# set up the enumerable nodes
self._init_enumerable_nodes()
def _init_i18n(self):
+ # type: () -> None
"""Load translated strings from the configured localedirs if enabled in
the configuration.
"""
if self.config.language is not None:
- self.info(bold('loading translations [%s]... ' %
- self.config.language), nonl=True)
+ logger.info(bold('loading translations [%s]... ' % self.config.language),
+ nonl=True)
user_locale_dirs = [
path.join(self.srcdir, x) for x in self.config.locale_dirs]
# compile mo files if sphinx.po file in user locale directories are updated
for catinfo in find_catalog_source_files(
user_locale_dirs, self.config.language, domains=['sphinx'],
charset=self.config.source_encoding):
- catinfo.write_mo(self.config.language, self.warn)
+ catinfo.write_mo(self.config.language)
locale_dirs = [None, path.join(package_dir, 'locale')] + user_locale_dirs
else:
locale_dirs = []
@@ -266,11 +256,12 @@ class Sphinx(object):
if self.config.language is not None:
if has_translation or self.config.language == 'en':
# "en" never needs to be translated
- self.info('done')
+ logger.info(_('done'))
else:
- self.info('not available for built-in messages')
+ logger.info('not available for built-in messages')
def _init_source_parsers(self):
+ # type: () -> None
for suffix, parser in iteritems(self._additional_source_parsers):
if suffix not in self.config.source_suffix:
self.config.source_suffix.append(suffix)
@@ -278,49 +269,62 @@ class Sphinx(object):
self.config.source_parsers[suffix] = parser
def _init_env(self, freshenv):
+ # type: (bool) -> None
if freshenv:
- self.env = BuildEnvironment(self.srcdir, self.doctreedir, self.config)
- self.env.set_warnfunc(self.warn)
- self.env.find_files(self.config, self.buildername)
+ self.env = BuildEnvironment(self)
+ self.env.find_files(self.config, self.builder)
for domain in self.domains.keys():
self.env.domains[domain] = self.domains[domain](self.env)
else:
try:
- self.info(bold('loading pickled environment... '), nonl=True)
- self.env = BuildEnvironment.frompickle(
- self.srcdir, self.config, path.join(self.doctreedir, ENV_PICKLE_FILENAME))
- self.env.set_warnfunc(self.warn)
- self.env.init_managers()
+ logger.info(bold(_('loading pickled environment... ')), nonl=True)
+ filename = path.join(self.doctreedir, ENV_PICKLE_FILENAME)
+ self.env = BuildEnvironment.frompickle(filename, self)
self.env.domains = {}
for domain in self.domains.keys():
# this can raise if the data version doesn't fit
self.env.domains[domain] = self.domains[domain](self.env)
- self.info('done')
+ logger.info(_('done'))
except Exception as err:
if isinstance(err, IOError) and err.errno == ENOENT:
- self.info('not yet created')
+ logger.info(_('not yet created'))
else:
- self.info('failed: %s' % err)
+ logger.info(_('failed: %s'), err)
self._init_env(freshenv=True)
- def _init_builder(self, buildername):
+ def create_builder(self, buildername):
+ # type: (unicode) -> Builder
if buildername is None:
- print('No builder selected, using default: html', file=self._status)
+ logger.info(_('No builder selected, using default: html'))
buildername = 'html'
if buildername not in self.builderclasses:
- raise SphinxError('Builder name %s not registered' % buildername)
+ raise SphinxError(_('Builder name %s not registered') % buildername)
builderclass = self.builderclasses[buildername]
- self.builder = builderclass(self)
+ return builderclass(self)
+
+ def _init_builder(self):
+ # type: () -> None
+ self.builder.set_environment(self.env)
+ self.builder.init()
self.emit('builder-inited')
def _init_enumerable_nodes(self):
+ # type: () -> None
for node, settings in iteritems(self.enumerable_nodes):
- self.env.domains['std'].enumerable_nodes[node] = settings
+ self.env.get_domain('std').enumerable_nodes[node] = settings # type: ignore
+
+ @property
+ def buildername(self):
+ # type: () -> unicode
+ warnings.warn('app.buildername is deprecated. Please use app.builder.name instead',
+ RemovedInSphinx17Warning)
+ return self.builder.name
# ---- main "build" method -------------------------------------------------
def build(self, force_all=False, filenames=None):
+ # type: (bool, List[unicode]) -> None
try:
if force_all:
self.builder.compile_all_catalogs()
@@ -333,13 +337,13 @@ class Sphinx(object):
self.builder.build_update()
status = (self.statuscode == 0 and
- 'succeeded' or 'finished with problems')
+ _('succeeded') or _('finished with problems'))
if self._warncount:
- self.info(bold('build %s, %s warning%s.' %
- (status, self._warncount,
- self._warncount != 1 and 's' or '')))
+ logger.info(bold(_('build %s, %s warning%s.') %
+ (status, self._warncount,
+ self._warncount != 1 and 's' or '')))
else:
- self.info(bold('build %s.' % status))
+ logger.info(bold(_('build %s.') % status))
except Exception as err:
# delete the saved env to force a fresh build next time
envfile = path.join(self.doctreedir, ENV_PICKLE_FILENAME)
@@ -352,23 +356,9 @@ class Sphinx(object):
self.builder.cleanup()
# ---- logging handling ----------------------------------------------------
-
- def _log(self, message, wfile, nonl=False):
- try:
- wfile.write(message)
- except UnicodeEncodeError:
- encoding = getattr(wfile, 'encoding', 'ascii') or 'ascii'
- # wfile.write accept only str, not bytes.So, we encode and replace
- # non-encodable characters, then decode them.
- wfile.write(message.encode(encoding, 'replace').decode(encoding))
- if not nonl:
- wfile.write('\n')
- if hasattr(wfile, 'flush'):
- wfile.flush()
- self.messagelog.append(message)
-
- def warn(self, message, location=None, prefix='WARNING: ',
- type=None, subtype=None, colorfunc=darkred):
+ def warn(self, message, location=None, prefix=None,
+ type=None, subtype=None, colorfunc=None):
+ # type: (unicode, unicode, unicode, unicode, unicode, Callable) -> None
"""Emit a warning.
If *location* is given, it should either be a tuple of (docname, lineno)
@@ -384,382 +374,306 @@ class Sphinx(object):
:meth:`.BuildEnvironment.warn` since that will collect all
warnings during parsing for later output.
"""
- if is_suppressed_warning(type, subtype, self.config.suppress_warnings):
- return
+ if prefix:
+ warnings.warn('prefix option of warn() is now deprecated.',
+ RemovedInSphinx17Warning)
+ if colorfunc:
+ warnings.warn('colorfunc option of warn() is now deprecated.',
+ RemovedInSphinx17Warning)
- if isinstance(location, tuple):
- docname, lineno = location
- if docname:
- location = '%s:%s' % (self.env.doc2path(docname), lineno or '')
- else:
- location = None
- warntext = location and '%s: %s%s\n' % (location, prefix, message) or \
- '%s%s\n' % (prefix, message)
- if self.warningiserror:
- raise SphinxWarning(warntext)
- self._warncount += 1
- self._log(colorfunc(warntext), self._warning, True)
+ warnings.warn('app.warning() is now deprecated. Use sphinx.util.logging instead.',
+ RemovedInSphinx20Warning)
+ logger.warning(message, type=type, subtype=subtype, location=location)
def info(self, message='', nonl=False):
+ # type: (unicode, bool) -> None
"""Emit an informational message.
If *nonl* is true, don't emit a newline at the end (which implies that
more info output will follow soon.)
"""
- self._log(message, self._status, nonl)
+ warnings.warn('app.info() is now deprecated. Use sphinx.util.logging instead.',
+ RemovedInSphinx20Warning)
+ logger.info(message, nonl=nonl)
def verbose(self, message, *args, **kwargs):
- """Emit a verbose informational message.
-
- The message will only be emitted for verbosity levels >= 1 (i.e. at
- least one ``-v`` option was given).
-
- The message can contain %-style interpolation placeholders, which is
- formatted with either the ``*args`` or ``**kwargs`` when output.
- """
- if self.verbosity < 1:
- return
- if args or kwargs:
- message = message % (args or kwargs)
- self._log(message, self._status)
+ # type: (unicode, Any, Any) -> None
+ """Emit a verbose informational message."""
+ warnings.warn('app.verbose() is now deprecated. Use sphinx.util.logging instead.',
+ RemovedInSphinx20Warning)
+ logger.verbose(message, *args, **kwargs)
def debug(self, message, *args, **kwargs):
- """Emit a debug-level informational message.
-
- The message will only be emitted for verbosity levels >= 2 (i.e. at
- least two ``-v`` options were given).
-
- The message can contain %-style interpolation placeholders, which is
- formatted with either the ``*args`` or ``**kwargs`` when output.
- """
- if self.verbosity < 2:
- return
- if args or kwargs:
- message = message % (args or kwargs)
- self._log(darkgray(message), self._status)
+ # type: (unicode, Any, Any) -> None
+ """Emit a debug-level informational message."""
+ warnings.warn('app.debug() is now deprecated. Use sphinx.util.logging instead.',
+ RemovedInSphinx20Warning)
+ logger.debug(message, *args, **kwargs)
def debug2(self, message, *args, **kwargs):
- """Emit a lowlevel debug-level informational message.
-
- The message will only be emitted for verbosity level 3 (i.e. three
- ``-v`` options were given).
-
- The message can contain %-style interpolation placeholders, which is
- formatted with either the ``*args`` or ``**kwargs`` when output.
- """
- if self.verbosity < 3:
- return
- if args or kwargs:
- message = message % (args or kwargs)
- self._log(lightgray(message), self._status)
+ # type: (unicode, Any, Any) -> None
+ """Emit a lowlevel debug-level informational message."""
+ warnings.warn('app.debug2() is now deprecated. Use debug() instead.',
+ RemovedInSphinx20Warning)
+ logger.debug(message, *args, **kwargs)
def _display_chunk(chunk):
- if isinstance(chunk, (list, tuple)):
- if len(chunk) == 1:
- return text_type(chunk[0])
- return '%s .. %s' % (chunk[0], chunk[-1])
- return text_type(chunk)
+ # type: (Any) -> unicode
+ warnings.warn('app._display_chunk() is now deprecated. '
+ 'Use sphinx.util.display_chunk() instead.',
+ RemovedInSphinx17Warning)
+ return display_chunk(chunk)
def old_status_iterator(self, iterable, summary, colorfunc=darkgreen,
- stringify_func=_display_chunk):
- l = 0
- for item in iterable:
- if l == 0:
- self.info(bold(summary), nonl=True)
- l = 1
- self.info(colorfunc(stringify_func(item)) + ' ', nonl=True)
+ stringify_func=display_chunk):
+ # type: (Iterable, unicode, Callable, Callable[[Any], unicode]) -> Iterator
+ warnings.warn('app.old_status_iterator() is now deprecated. '
+ 'Use sphinx.util.status_iterator() instead.',
+ RemovedInSphinx17Warning)
+ for item in old_status_iterator(iterable, summary,
+ color="darkgreen", stringify_func=stringify_func):
yield item
- if l == 1:
- self.info()
# new version with progress info
def status_iterator(self, iterable, summary, colorfunc=darkgreen, length=0,
stringify_func=_display_chunk):
- if length == 0:
- for item in self.old_status_iterator(iterable, summary, colorfunc,
- stringify_func):
- yield item
- return
- l = 0
- summary = bold(summary)
- for item in iterable:
- l += 1
- s = '%s[%3d%%] %s' % (summary, 100 * l / length,
- colorfunc(stringify_func(item)))
- if self.verbosity:
- s += '\n'
- else:
- s = term_width_line(s)
- self.info(s, nonl=True)
+ # type: (Iterable, unicode, Callable, int, Callable[[Any], unicode]) -> Iterable
+ warnings.warn('app.status_iterator() is now deprecated. '
+ 'Use sphinx.util.status_iterator() instead.',
+ RemovedInSphinx17Warning)
+ for item in status_iterator(iterable, summary, length=length, verbosity=self.verbosity,
+ color="darkgreen", stringify_func=stringify_func):
yield item
- if l > 0:
- self.info()
# ---- general extensibility interface -------------------------------------
- def setup_extension(self, extension):
+ def setup_extension(self, extname):
+ # type: (unicode) -> None
"""Import and setup a Sphinx extension module. No-op if called twice."""
- self.debug('[app] setting up extension: %r', extension)
- if extension in self._extensions:
- return
- if extension in EXTENSION_BLACKLIST:
- self.warn('the extension %r was already merged with Sphinx since version %s; '
- 'this extension is ignored.' % (
- extension, EXTENSION_BLACKLIST[extension]))
- return
- self._setting_up_extension.append(extension)
- try:
- mod = __import__(extension, None, None, ['setup'])
- except ImportError as err:
- self.verbose('Original exception:\n' + traceback.format_exc())
- raise ExtensionError('Could not import extension %s' % extension,
- err)
- if not hasattr(mod, 'setup'):
- self.warn('extension %r has no setup() function; is it really '
- 'a Sphinx extension module?' % extension)
- ext_meta = None
- else:
- try:
- ext_meta = mod.setup(self)
- except VersionRequirementError as err:
- # add the extension name to the version required
- raise VersionRequirementError(
- 'The %s extension used by this project needs at least '
- 'Sphinx v%s; it therefore cannot be built with this '
- 'version.' % (extension, err))
- if ext_meta is None:
- ext_meta = {}
- # special-case for compatibility
- if extension == 'rst2pdf.pdfbuilder':
- ext_meta = {'parallel_read_safe': True}
- try:
- if not ext_meta.get('version'):
- ext_meta['version'] = 'unknown version'
- except Exception:
- self.warn('extension %r returned an unsupported object from '
- 'its setup() function; it should return None or a '
- 'metadata dictionary' % extension)
- ext_meta = {'version': 'unknown version'}
- self._extensions[extension] = mod
- self._extension_metadata[extension] = ext_meta
- self._setting_up_extension.pop()
+ logger.debug('[app] setting up extension: %r', extname)
+ load_extension(self, extname)
def require_sphinx(self, version):
+ # type: (unicode) -> None
# check the Sphinx version if requested
if version > sphinx.__display_version__[:3]:
raise VersionRequirementError(version)
def import_object(self, objname, source=None):
+ # type: (str, unicode) -> Any
"""Import an object from a 'module.name' string."""
return import_object(objname, source=None)
# event interface
-
- def _validate_event(self, event):
- if event not in self._events:
- raise ExtensionError('Unknown event name: %s' % event)
-
def connect(self, event, callback):
- self._validate_event(event)
- listener_id = self.next_listener_id
- if event not in self._listeners:
- self._listeners[event] = {listener_id: callback}
- else:
- self._listeners[event][listener_id] = callback
- self.next_listener_id += 1
- self.debug('[app] connecting event %r: %r [id=%s]',
- event, callback, listener_id)
+ # type: (unicode, Callable) -> int
+ listener_id = self.events.connect(event, callback)
+ logger.debug('[app] connecting event %r: %r', event, callback, listener_id)
return listener_id
def disconnect(self, listener_id):
- self.debug('[app] disconnecting event: [id=%s]', listener_id)
- for event in itervalues(self._listeners):
- event.pop(listener_id, None)
+ # type: (int) -> None
+ logger.debug('[app] disconnecting event: [id=%s]', listener_id)
+ self.events.disconnect(listener_id)
def emit(self, event, *args):
+ # type: (unicode, Any) -> List
try:
- self.debug2('[app] emitting event: %r%s', event, repr(args)[:100])
+ logger.debug('[app] emitting event: %r%s', event, repr(args)[:100])
except Exception:
# not every object likes to be repr()'d (think
# random stuff coming via autodoc)
pass
- results = []
- if event in self._listeners:
- for _, callback in iteritems(self._listeners[event]):
- results.append(callback(self, *args))
- return results
+ return self.events.emit(event, self, *args)
def emit_firstresult(self, event, *args):
- for result in self.emit(event, *args):
- if result is not None:
- return result
- return None
+ # type: (unicode, Any) -> Any
+ return self.events.emit_firstresult(event, self, *args)
# registering addon parts
def add_builder(self, builder):
- self.debug('[app] adding builder: %r', builder)
+ # type: (Type[Builder]) -> None
+ logger.debug('[app] adding builder: %r', builder)
if not hasattr(builder, 'name'):
- raise ExtensionError('Builder class %s has no "name" attribute'
+ raise ExtensionError(_('Builder class %s has no "name" attribute')
% builder)
if builder.name in self.builderclasses:
- raise ExtensionError(
- 'Builder %r already exists (in module %s)' % (
- builder.name, self.builderclasses[builder.name].__module__))
+ raise ExtensionError(_('Builder %r already exists (in module %s)') %
+ (builder.name, self.builderclasses[builder.name].__module__))
self.builderclasses[builder.name] = builder
def add_config_value(self, name, default, rebuild, types=()):
- self.debug('[app] adding config value: %r',
- (name, default, rebuild) + ((types,) if types else ()))
- if name in self.config.values:
- raise ExtensionError('Config value %r already present' % name)
+ # type: (unicode, Any, Union[bool, unicode], Any) -> None
+ logger.debug('[app] adding config value: %r',
+ (name, default, rebuild) + ((types,) if types else ())) # type: ignore
+ if name in self.config:
+ raise ExtensionError(_('Config value %r already present') % name)
if rebuild in (False, True):
rebuild = rebuild and 'env' or ''
- self.config.values[name] = (default, rebuild, types)
+ self.config.add(name, default, rebuild, types)
def add_event(self, name):
- self.debug('[app] adding event: %r', name)
- if name in self._events:
- raise ExtensionError('Event %r already present' % name)
- self._events[name] = ''
+ # type: (unicode) -> None
+ logger.debug('[app] adding event: %r', name)
+ self.events.add(name)
def set_translator(self, name, translator_class):
- self.info(bold('A Translator for the %s builder is changed.' % name))
+ # type: (unicode, Any) -> None
+ logger.info(bold(_('A Translator for the %s builder is changed.') % name))
self._translators[name] = translator_class
def add_node(self, node, **kwds):
- self.debug('[app] adding node: %r', (node, kwds))
+ # type: (nodes.Node, Any) -> None
+ logger.debug('[app] adding node: %r', (node, kwds))
if not kwds.pop('override', False) and \
hasattr(nodes.GenericNodeVisitor, 'visit_' + node.__name__):
- self.warn('while setting up extension %s: node class %r is '
- 'already registered, its visitors will be overridden' %
- (self._setting_up_extension, node.__name__),
- type='app', subtype='add_node')
+ logger.warning(_('while setting up extension %s: node class %r is '
+ 'already registered, its visitors will be overridden'),
+ self._setting_up_extension, node.__name__,
+ type='app', subtype='add_node')
nodes._add_node_class_names([node.__name__])
for key, val in iteritems(kwds):
try:
visit, depart = val
except ValueError:
- raise ExtensionError('Value for key %r must be a '
- '(visit, depart) function tuple' % key)
+ raise ExtensionError(_('Value for key %r must be a '
+ '(visit, depart) function tuple') % key)
translator = self._translators.get(key)
+ translators = []
if translator is not None:
- pass
+ translators.append(translator)
elif key == 'html':
- from sphinx.writers.html import HTMLTranslator as translator
+ from sphinx.writers.html import HTMLTranslator
+ translators.append(HTMLTranslator)
+ if is_html5_writer_available():
+ from sphinx.writers.html5 import HTML5Translator
+ translators.append(HTML5Translator)
elif key == 'latex':
- from sphinx.writers.latex import LaTeXTranslator as translator
+ from sphinx.writers.latex import LaTeXTranslator
+ translators.append(LaTeXTranslator)
elif key == 'text':
- from sphinx.writers.text import TextTranslator as translator
+ from sphinx.writers.text import TextTranslator
+ translators.append(TextTranslator)
elif key == 'man':
- from sphinx.writers.manpage import ManualPageTranslator \
- as translator
+ from sphinx.writers.manpage import ManualPageTranslator
+ translators.append(ManualPageTranslator)
elif key == 'texinfo':
- from sphinx.writers.texinfo import TexinfoTranslator \
- as translator
- else:
- # ignore invalid keys for compatibility
- continue
- setattr(translator, 'visit_' + node.__name__, visit)
- if depart:
- setattr(translator, 'depart_' + node.__name__, depart)
+ from sphinx.writers.texinfo import TexinfoTranslator
+ translators.append(TexinfoTranslator)
+
+ for translator in translators:
+ setattr(translator, 'visit_' + node.__name__, visit)
+ if depart:
+ setattr(translator, 'depart_' + node.__name__, depart)
def add_enumerable_node(self, node, figtype, title_getter=None, **kwds):
+ # type: (nodes.Node, unicode, Callable, Any) -> None
self.enumerable_nodes[node] = (figtype, title_getter)
self.add_node(node, **kwds)
def _directive_helper(self, obj, content=None, arguments=None, **options):
+ # type: (Any, unicode, Any, Any) -> Any
if isinstance(obj, (types.FunctionType, types.MethodType)):
- obj.content = content
- obj.arguments = arguments or (0, 0, False)
- obj.options = options
+ obj.content = content # type: ignore
+ obj.arguments = arguments or (0, 0, False) # type: ignore
+ obj.options = options # type: ignore
return convert_directive_function(obj)
else:
if content or arguments or options:
- raise ExtensionError('when adding directive classes, no '
- 'additional arguments may be given')
+ raise ExtensionError(_('when adding directive classes, no '
+ 'additional arguments may be given'))
return obj
def add_directive(self, name, obj, content=None, arguments=None, **options):
- self.debug('[app] adding directive: %r',
- (name, obj, content, arguments, options))
+ # type: (unicode, Any, unicode, Any, Any) -> None
+ logger.debug('[app] adding directive: %r',
+ (name, obj, content, arguments, options))
if name in directives._directives:
- self.warn('while setting up extension %s: directive %r is '
- 'already registered, it will be overridden' %
- (self._setting_up_extension[-1], name),
- type='app', subtype='add_directive')
+ logger.warning(_('while setting up extension %s: directive %r is '
+ 'already registered, it will be overridden'),
+ self._setting_up_extension[-1], name,
+ type='app', subtype='add_directive')
directives.register_directive(
name, self._directive_helper(obj, content, arguments, **options))
def add_role(self, name, role):
- self.debug('[app] adding role: %r', (name, role))
+ # type: (unicode, Any) -> None
+ logger.debug('[app] adding role: %r', (name, role))
if name in roles._roles:
- self.warn('while setting up extension %s: role %r is '
- 'already registered, it will be overridden' %
- (self._setting_up_extension[-1], name),
- type='app', subtype='add_role')
+ logger.warning(_('while setting up extension %s: role %r is '
+ 'already registered, it will be overridden'),
+ self._setting_up_extension[-1], name,
+ type='app', subtype='add_role')
roles.register_local_role(name, role)
def add_generic_role(self, name, nodeclass):
+ # type: (unicode, Any) -> None
# don't use roles.register_generic_role because it uses
# register_canonical_role
- self.debug('[app] adding generic role: %r', (name, nodeclass))
+ logger.debug('[app] adding generic role: %r', (name, nodeclass))
if name in roles._roles:
- self.warn('while setting up extension %s: role %r is '
- 'already registered, it will be overridden' %
- (self._setting_up_extension[-1], name),
- type='app', subtype='add_generic_role')
+ logger.warning(_('while setting up extension %s: role %r is '
+ 'already registered, it will be overridden'),
+ self._setting_up_extension[-1], name,
+ type='app', subtype='add_generic_role')
role = roles.GenericRole(name, nodeclass)
roles.register_local_role(name, role)
def add_domain(self, domain):
- self.debug('[app] adding domain: %r', domain)
+ # type: (Type[Domain]) -> None
+ logger.debug('[app] adding domain: %r', domain)
if domain.name in self.domains:
- raise ExtensionError('domain %s already registered' % domain.name)
+ raise ExtensionError(_('domain %s already registered') % domain.name)
self.domains[domain.name] = domain
def override_domain(self, domain):
- self.debug('[app] overriding domain: %r', domain)
+ # type: (Type[Domain]) -> None
+ logger.debug('[app] overriding domain: %r', domain)
if domain.name not in self.domains:
- raise ExtensionError('domain %s not yet registered' % domain.name)
+ raise ExtensionError(_('domain %s not yet registered') % domain.name)
if not issubclass(domain, self.domains[domain.name]):
- raise ExtensionError('new domain not a subclass of registered %s '
- 'domain' % domain.name)
+ raise ExtensionError(_('new domain not a subclass of registered %s '
+ 'domain') % domain.name)
self.domains[domain.name] = domain
def add_directive_to_domain(self, domain, name, obj,
content=None, arguments=None, **options):
- self.debug('[app] adding directive to domain: %r',
- (domain, name, obj, content, arguments, options))
+ # type: (unicode, unicode, Any, unicode, Any, Any) -> None
+ logger.debug('[app] adding directive to domain: %r',
+ (domain, name, obj, content, arguments, options))
if domain not in self.domains:
- raise ExtensionError('domain %s not yet registered' % domain)
+ raise ExtensionError(_('domain %s not yet registered') % domain)
self.domains[domain].directives[name] = \
self._directive_helper(obj, content, arguments, **options)
def add_role_to_domain(self, domain, name, role):
- self.debug('[app] adding role to domain: %r', (domain, name, role))
+ # type: (unicode, unicode, Any) -> None
+ logger.debug('[app] adding role to domain: %r', (domain, name, role))
if domain not in self.domains:
- raise ExtensionError('domain %s not yet registered' % domain)
+ raise ExtensionError(_('domain %s not yet registered') % domain)
self.domains[domain].roles[name] = role
def add_index_to_domain(self, domain, index):
- self.debug('[app] adding index to domain: %r', (domain, index))
+ # type: (unicode, Type[Index]) -> None
+ logger.debug('[app] adding index to domain: %r', (domain, index))
if domain not in self.domains:
- raise ExtensionError('domain %s not yet registered' % domain)
+ raise ExtensionError(_('domain %s not yet registered') % domain)
self.domains[domain].indices.append(index)
def add_object_type(self, directivename, rolename, indextemplate='',
parse_node=None, ref_nodeclass=None, objname='',
doc_field_types=[]):
- self.debug('[app] adding object type: %r',
- (directivename, rolename, indextemplate, parse_node,
- ref_nodeclass, objname, doc_field_types))
+ # type: (unicode, unicode, unicode, Callable, nodes.Node, unicode, List) -> None
+ logger.debug('[app] adding object type: %r',
+ (directivename, rolename, indextemplate, parse_node,
+ ref_nodeclass, objname, doc_field_types))
StandardDomain.object_types[directivename] = \
ObjType(objname or directivename, rolename)
# create a subclass of GenericObject as the new directive
- new_directive = type(directivename, (GenericObject, object),
+ new_directive = type(directivename, (GenericObject, object), # type: ignore
{'indextemplate': indextemplate,
- 'parse_node': staticmethod(parse_node),
+ 'parse_node': staticmethod(parse_node), # type: ignore
'doc_field_types': doc_field_types})
StandardDomain.directives[directivename] = new_directive
# XXX support more options?
@@ -770,24 +684,32 @@ class Sphinx(object):
def add_crossref_type(self, directivename, rolename, indextemplate='',
ref_nodeclass=None, objname=''):
- self.debug('[app] adding crossref type: %r',
- (directivename, rolename, indextemplate, ref_nodeclass,
- objname))
+ # type: (unicode, unicode, unicode, nodes.Node, unicode) -> None
+ logger.debug('[app] adding crossref type: %r',
+ (directivename, rolename, indextemplate, ref_nodeclass,
+ objname))
StandardDomain.object_types[directivename] = \
ObjType(objname or directivename, rolename)
# create a subclass of Target as the new directive
- new_directive = type(directivename, (Target, object),
+ new_directive = type(directivename, (Target, object), # type: ignore
{'indextemplate': indextemplate})
StandardDomain.directives[directivename] = new_directive
# XXX support more options?
StandardDomain.roles[rolename] = XRefRole(innernodeclass=ref_nodeclass)
def add_transform(self, transform):
- self.debug('[app] adding transform: %r', transform)
+ # type: (Type[Transform]) -> None
+ logger.debug('[app] adding transform: %r', transform)
SphinxStandaloneReader.transforms.append(transform)
+ def add_post_transform(self, transform):
+ # type: (Type[Transform]) -> None
+ logger.debug('[app] adding post transform: %r', transform)
+ self.post_transforms.append(transform)
+
def add_javascript(self, filename):
- self.debug('[app] adding javascript: %r', filename)
+ # type: (unicode) -> None
+ logger.debug('[app] adding javascript: %r', filename)
from sphinx.builders.html import StandaloneHTMLBuilder
if '://' in filename:
StandaloneHTMLBuilder.script_files.append(filename)
@@ -796,7 +718,8 @@ class Sphinx(object):
posixpath.join('_static', filename))
def add_stylesheet(self, filename):
- self.debug('[app] adding stylesheet: %r', filename)
+ # type: (unicode) -> None
+ logger.debug('[app] adding stylesheet: %r', filename)
from sphinx.builders.html import StandaloneHTMLBuilder
if '://' in filename:
StandaloneHTMLBuilder.css_files.append(filename)
@@ -805,43 +728,54 @@ class Sphinx(object):
posixpath.join('_static', filename))
def add_latex_package(self, packagename, options=None):
- self.debug('[app] adding latex package: %r', packagename)
+ # type: (unicode, unicode) -> None
+ logger.debug('[app] adding latex package: %r', packagename)
if hasattr(self.builder, 'usepackages'): # only for LaTeX builder
- self.builder.usepackages.append((packagename, options))
+ self.builder.usepackages.append((packagename, options)) # type: ignore
def add_lexer(self, alias, lexer):
- self.debug('[app] adding lexer: %r', (alias, lexer))
+ # type: (unicode, Any) -> None
+ logger.debug('[app] adding lexer: %r', (alias, lexer))
from sphinx.highlighting import lexers
if lexers is None:
return
lexers[alias] = lexer
def add_autodocumenter(self, cls):
- self.debug('[app] adding autodocumenter: %r', cls)
+ # type: (Any) -> None
+ logger.debug('[app] adding autodocumenter: %r', cls)
from sphinx.ext import autodoc
autodoc.add_documenter(cls)
self.add_directive('auto' + cls.objtype, autodoc.AutoDirective)
def add_autodoc_attrgetter(self, type, getter):
- self.debug('[app] adding autodoc attrgetter: %r', (type, getter))
+ # type: (Any, Callable) -> None
+ logger.debug('[app] adding autodoc attrgetter: %r', (type, getter))
from sphinx.ext import autodoc
autodoc.AutoDirective._special_attrgetters[type] = getter
def add_search_language(self, cls):
- self.debug('[app] adding search language: %r', cls)
+ # type: (Any) -> None
+ logger.debug('[app] adding search language: %r', cls)
from sphinx.search import languages, SearchLanguage
assert issubclass(cls, SearchLanguage)
languages[cls.lang] = cls
def add_source_parser(self, suffix, parser):
- self.debug('[app] adding search source_parser: %r, %r', suffix, parser)
+ # type: (unicode, Parser) -> None
+ logger.debug('[app] adding search source_parser: %r, %r', suffix, parser)
if suffix in self._additional_source_parsers:
- self.warn('while setting up extension %s: source_parser for %r is '
- 'already registered, it will be overridden' %
- (self._setting_up_extension[-1], suffix),
- type='app', subtype='add_source_parser')
+ logger.warning(_('while setting up extension %s: source_parser for %r is '
+ 'already registered, it will be overridden'),
+ self._setting_up_extension[-1], suffix,
+ type='app', subtype='add_source_parser')
self._additional_source_parsers[suffix] = parser
+ def add_env_collector(self, collector):
+ # type: (Type[EnvironmentCollector]) -> None
+ logger.debug('[app] adding environment collector: %r', collector)
+ collector().enable(self)
+
class TemplateBridge(object):
"""
@@ -850,6 +784,7 @@ class TemplateBridge(object):
"""
def init(self, builder, theme=None, dirs=None):
+ # type: (Builder, unicode, List[unicode]) -> None
"""Called by the builder to initialize the template system.
*builder* is the builder object; you'll probably want to look at the
@@ -861,6 +796,7 @@ class TemplateBridge(object):
raise NotImplementedError('must be implemented in subclasses')
def newest_template_mtime(self):
+ # type: () -> float
"""Called by the builder to determine if output files are outdated
because of template changes. Return the mtime of the newest template
file that was changed. The default implementation returns ``0``.
@@ -868,12 +804,14 @@ class TemplateBridge(object):
return 0
def render(self, template, context):
+ # type: (unicode, Dict) -> None
"""Called by the builder to render a template given as a filename with
a specified context (a Python dictionary).
"""
raise NotImplementedError('must be implemented in subclasses')
def render_string(self, template, context):
+ # type: (unicode, Dict) -> unicode
"""Called by the builder to render a template given as a string with a
specified context (a Python dictionary).
"""
diff --git a/sphinx/builders/__init__.py b/sphinx/builders/__init__.py
index b62fa496b..72d897a96 100644
--- a/sphinx/builders/__init__.py
+++ b/sphinx/builders/__init__.py
@@ -17,12 +17,13 @@ try:
except ImportError:
multiprocessing = None
+from six import itervalues
from docutils import nodes
-from sphinx.util import i18n, path_stabilize
+from sphinx.util import i18n, path_stabilize, logging, status_iterator
from sphinx.util.osutil import SEP, relative_uri
from sphinx.util.i18n import find_catalog
-from sphinx.util.console import bold, darkgreen
+from sphinx.util.console import bold # type: ignore
from sphinx.util.parallel import ParallelTasks, SerialTasks, make_chunks, \
parallel_available
@@ -30,6 +31,18 @@ from sphinx.util.parallel import ParallelTasks, SerialTasks, make_chunks, \
from sphinx import roles # noqa
from sphinx import directives # noqa
+if False:
+ # For type annotation
+ from typing import Any, Callable, Dict, Iterable, List, Sequence, Set, Tuple, Union # NOQA
+ from sphinx.application import Sphinx # NOQA
+ from sphinx.config import Config # NOQA
+ from sphinx.environment import BuildEnvironment # NOQA
+ from sphinx.util.i18n import CatalogInfo # NOQA
+ from sphinx.util.tags import Tags # NOQA
+
+
+logger = logging.getLogger(__name__)
+
class Builder(object):
"""
@@ -37,19 +50,19 @@ class Builder(object):
"""
# builder's name, for the -b command line options
- name = ''
+ name = '' # type: unicode
# builder's output format, or '' if no document output is produced
- format = ''
+ format = '' # type: unicode
# doctree versioning method
- versioning_method = 'none'
+ versioning_method = 'none' # type: unicode
versioning_compare = False
# allow parallel write_doc() calls
allow_parallel = False
+ # support translation
+ use_message_catalog = True
def __init__(self, app):
- self.env = app.env
- self.env.set_versioning_method(self.versioning_method,
- self.versioning_compare)
+ # type: (Sphinx) -> None
self.srcdir = app.srcdir
self.confdir = app.confdir
self.outdir = app.outdir
@@ -57,11 +70,12 @@ class Builder(object):
if not path.isdir(self.doctreedir):
os.makedirs(self.doctreedir)
- self.app = app
- self.warn = app.warn
- self.info = app.info
- self.config = app.config
- self.tags = app.tags
+ self.app = app # type: Sphinx
+ self.env = None # type: BuildEnvironment
+ self.warn = app.warn # type: Callable
+ self.info = app.info # type: Callable
+ self.config = app.config # type: Config
+ self.tags = app.tags # type: Tags
self.tags.add(self.format)
self.tags.add(self.name)
self.tags.add("format_%s" % self.format)
@@ -71,29 +85,36 @@ class Builder(object):
self.old_status_iterator = app.old_status_iterator
# images that need to be copied over (source -> dest)
- self.images = {}
+ self.images = {} # type: Dict[unicode, unicode]
# basename of images directory
self.imagedir = ""
# relative path to image directory from current docname (used at writing docs)
- self.imgpath = ""
+ self.imgpath = "" # type: unicode
# these get set later
self.parallel_ok = False
- self.finish_tasks = None
+ self.finish_tasks = None # type: Any
# load default translator class
self.translator_class = app._translators.get(self.name)
- self.init()
+ def set_environment(self, env):
+ # type: (BuildEnvironment) -> None
+ """Store BuildEnvironment object."""
+ self.env = env
+ self.env.set_versioning_method(self.versioning_method,
+ self.versioning_compare)
# helper methods
def init(self):
+ # type: () -> None
"""Load necessary templates and perform initialization. The default
implementation does nothing.
"""
pass
def create_template_bridge(self):
+ # type: () -> None
"""Return the template bridge configured."""
if self.config.template_bridge:
self.templates = self.app.import_object(
@@ -103,6 +124,7 @@ class Builder(object):
self.templates = BuiltinTemplateLoader()
def get_target_uri(self, docname, typ=None):
+ # type: (unicode, unicode) -> unicode
"""Return the target URI for a document name.
*typ* can be used to qualify the link characteristic for individual
@@ -111,6 +133,7 @@ class Builder(object):
raise NotImplementedError
def get_relative_uri(self, from_, to, typ=None):
+ # type: (unicode, unicode, unicode) -> unicode
"""Return a relative URI between two source filenames.
May raise environment.NoUri if there's no way to return a sensible URI.
@@ -119,6 +142,7 @@ class Builder(object):
self.get_target_uri(to, typ))
def get_outdated_docs(self):
+ # type: () -> Union[unicode, Iterable[unicode]]
"""Return an iterable of output files that are outdated, or a string
describing what an update build will build.
@@ -128,9 +152,15 @@ class Builder(object):
"""
raise NotImplementedError
- supported_image_types = []
+ def get_asset_paths(self):
+ # type: () -> List[unicode]
+ """Return list of paths for assets (ex. templates, CSS, etc.)."""
+ return []
+
+ supported_image_types = [] # type: List[unicode]
def post_process_images(self, doctree):
+ # type: (nodes.Node) -> None
"""Pick the best candidate for all image URIs."""
for node in doctree.traverse(nodes.image):
if '?' in node['candidates']:
@@ -142,9 +172,8 @@ class Builder(object):
if candidate:
break
else:
- self.warn(
- 'no matching candidate for image URI %r' % node['uri'],
- '%s:%s' % (node.source, getattr(node, 'line', '')))
+ logger.warning('no matching candidate for image URI %r', node['uri'],
+ location=node)
continue
node['uri'] = candidate
else:
@@ -157,19 +186,22 @@ class Builder(object):
# compile po methods
def compile_catalogs(self, catalogs, message):
+ # type: (Set[CatalogInfo], unicode) -> None
if not self.config.gettext_auto_build:
return
def cat2relpath(cat):
+ # type: (CatalogInfo) -> unicode
return path.relpath(cat.mo_path, self.env.srcdir).replace(path.sep, SEP)
- self.info(bold('building [mo]: ') + message)
- for catalog in self.app.status_iterator(
- catalogs, 'writing output... ', darkgreen, len(catalogs),
- cat2relpath):
- catalog.write_mo(self.config.language, self.warn)
+ logger.info(bold('building [mo]: ') + message)
+ for catalog in status_iterator(catalogs, 'writing output... ', "darkgreen",
+ len(catalogs), self.app.verbosity,
+ stringify_func=cat2relpath):
+ catalog.write_mo(self.config.language)
def compile_all_catalogs(self):
+ # type: () -> None
catalogs = i18n.find_catalog_source_files(
[path.join(self.srcdir, x) for x in self.config.locale_dirs],
self.config.language,
@@ -180,7 +212,9 @@ class Builder(object):
self.compile_catalogs(catalogs, message)
def compile_specific_catalogs(self, specified_files):
+ # type: (List[unicode]) -> None
def to_domain(fpath):
+ # type: (unicode) -> unicode
docname, _ = path.splitext(path_stabilize(fpath))
dom = find_catalog(docname, self.config.gettext_compact)
return dom
@@ -196,6 +230,7 @@ class Builder(object):
self.compile_catalogs(catalogs, message)
def compile_update_catalogs(self):
+ # type: () -> None
catalogs = i18n.find_catalog_source_files(
[path.join(self.srcdir, x) for x in self.config.locale_dirs],
self.config.language,
@@ -207,26 +242,29 @@ class Builder(object):
# build methods
def build_all(self):
+ # type: () -> None
"""Build all source files."""
self.build(None, summary='all source files', method='all')
def build_specific(self, filenames):
+ # type: (List[unicode]) -> None
"""Only rebuild as much as needed for changes in the *filenames*."""
# bring the filenames to the canonical format, that is,
# relative to the source directory and without source_suffix.
dirlen = len(self.srcdir) + 1
to_write = []
- suffixes = tuple(self.config.source_suffix)
+ suffixes = None # type: Tuple[unicode]
+ suffixes = tuple(self.config.source_suffix) # type: ignore
for filename in filenames:
filename = path.normpath(path.abspath(filename))
if not filename.startswith(self.srcdir):
- self.warn('file %r given on command line is not under the '
- 'source directory, ignoring' % filename)
+ logger.warning('file %r given on command line is not under the '
+ 'source directory, ignoring', filename)
continue
if not (path.isfile(filename) or
any(path.isfile(filename + suffix) for suffix in suffixes)):
- self.warn('file %r given on command line does not exist, '
- 'ignoring' % filename)
+ logger.warning('file %r given on command line does not exist, '
+ 'ignoring', filename)
continue
filename = filename[dirlen:]
for suffix in suffixes:
@@ -240,6 +278,7 @@ class Builder(object):
'line' % len(to_write))
def build_update(self):
+ # type: () -> None
"""Only rebuild what was changed or added since last build."""
to_build = self.get_outdated_docs()
if isinstance(to_build, str):
@@ -251,46 +290,42 @@ class Builder(object):
'out of date' % len(to_build))
def build(self, docnames, summary=None, method='update'):
+ # type: (Iterable[unicode], unicode, unicode) -> None
"""Main build method.
First updates the environment, and then calls :meth:`write`.
"""
if summary:
- self.info(bold('building [%s]' % self.name) + ': ' + summary)
+ logger.info(bold('building [%s]' % self.name) + ': ' + summary)
# while reading, collect all warnings from docutils
- warnings = []
- self.env.set_warnfunc(lambda *args, **kwargs: warnings.append((args, kwargs)))
- updated_docnames = set(self.env.update(self.config, self.srcdir,
- self.doctreedir, self.app))
- self.env.set_warnfunc(self.warn)
- for warning, kwargs in warnings:
- self.warn(*warning, **kwargs)
+ with logging.pending_warnings():
+ updated_docnames = set(self.env.update(self.config, self.srcdir, self.doctreedir))
doccount = len(updated_docnames)
- self.info(bold('looking for now-outdated files... '), nonl=1)
- for docname in self.env.check_dependents(updated_docnames):
+ logger.info(bold('looking for now-outdated files... '), nonl=1)
+ for docname in self.env.check_dependents(self.app, updated_docnames):
updated_docnames.add(docname)
outdated = len(updated_docnames) - doccount
if outdated:
- self.info('%d found' % outdated)
+ logger.info('%d found', outdated)
else:
- self.info('none found')
+ logger.info('none found')
if updated_docnames:
# save the environment
from sphinx.application import ENV_PICKLE_FILENAME
- self.info(bold('pickling environment... '), nonl=True)
+ logger.info(bold('pickling environment... '), nonl=True)
self.env.topickle(path.join(self.doctreedir, ENV_PICKLE_FILENAME))
- self.info('done')
+ logger.info('done')
# global actions
- self.info(bold('checking consistency... '), nonl=True)
+ logger.info(bold('checking consistency... '), nonl=True)
self.env.check_consistency()
- self.info('done')
+ logger.info('done')
else:
if method == 'update' and not docnames:
- self.info(bold('no targets are out of date.'))
+ logger.info(bold('no targets are out of date.'))
return
# filter "docnames" (list of outdated files) by the updated
@@ -303,11 +338,10 @@ class Builder(object):
self.parallel_ok = False
if parallel_available and self.app.parallel > 1 and self.allow_parallel:
self.parallel_ok = True
- for extname, md in self.app._extension_metadata.items():
- par_ok = md.get('parallel_write_safe', True)
- if not par_ok:
- self.app.warn('the %s extension is not safe for parallel '
- 'writing, doing serial write' % extname)
+ for extension in itervalues(self.app.extensions):
+ if not extension.parallel_write_safe:
+ logger.warning('the %s extension is not safe for parallel '
+ 'writing, doing serial write', extension.name)
self.parallel_ok = False
break
@@ -328,6 +362,7 @@ class Builder(object):
self.finish_tasks.join()
def write(self, build_docnames, updated_docnames, method='update'):
+ # type: (Iterable[unicode], Sequence[unicode], unicode) -> None
if build_docnames is None or build_docnames == ['__all__']:
# build_all
build_docnames = self.env.found_docs
@@ -336,52 +371,42 @@ class Builder(object):
docnames = set(build_docnames) | set(updated_docnames)
else:
docnames = set(build_docnames)
- self.app.debug('docnames to write: %s', ', '.join(sorted(docnames)))
+ logger.debug('docnames to write: %s', ', '.join(sorted(docnames)))
# add all toctree-containing files that may have changed
for docname in list(docnames):
- for tocdocname in self.env.files_to_rebuild.get(docname, []):
+ for tocdocname in self.env.files_to_rebuild.get(docname, set()):
if tocdocname in self.env.found_docs:
docnames.add(tocdocname)
docnames.add(self.config.master_doc)
- self.info(bold('preparing documents... '), nonl=True)
+ logger.info(bold('preparing documents... '), nonl=True)
self.prepare_writing(docnames)
- self.info('done')
+ logger.info('done')
- warnings = []
- self.env.set_warnfunc(lambda *args, **kwargs: warnings.append((args, kwargs)))
if self.parallel_ok:
# number of subprocesses is parallel-1 because the main process
# is busy loading doctrees and doing write_doc_serialized()
- self._write_parallel(sorted(docnames), warnings,
+ self._write_parallel(sorted(docnames),
nproc=self.app.parallel - 1)
else:
- self._write_serial(sorted(docnames), warnings)
- self.env.set_warnfunc(self.warn)
-
- def _write_serial(self, docnames, warnings):
- for docname in self.app.status_iterator(
- docnames, 'writing output... ', darkgreen, len(docnames)):
- doctree = self.env.get_and_resolve_doctree(docname, self)
- self.write_doc_serialized(docname, doctree)
- self.write_doc(docname, doctree)
- for warning, kwargs in warnings:
- self.warn(*warning, **kwargs)
-
- def _write_parallel(self, docnames, warnings, nproc):
- def write_process(docs):
- local_warnings = []
+ self._write_serial(sorted(docnames))
- def warnfunc(*args, **kwargs):
- local_warnings.append((args, kwargs))
- self.env.set_warnfunc(warnfunc)
- for docname, doctree in docs:
+ def _write_serial(self, docnames):
+ # type: (Sequence[unicode]) -> None
+ with logging.pending_warnings():
+ for docname in status_iterator(docnames, 'writing output... ', "darkgreen",
+ len(docnames), self.app.verbosity):
+ doctree = self.env.get_and_resolve_doctree(docname, self)
+ self.write_doc_serialized(docname, doctree)
self.write_doc(docname, doctree)
- return local_warnings
- def add_warnings(docs, wlist):
- warnings.extend(wlist)
+ def _write_parallel(self, docnames, nproc):
+ # type: (Sequence[unicode], int) -> None
+ def write_process(docs):
+ # type: (List[Tuple[unicode, nodes.Node]]) -> None
+ for docname, doctree in docs:
+ self.write_doc(docname, doctree)
# warm up caches/compile templates using the first document
firstname, docnames = docnames[0], docnames[1:]
@@ -392,37 +417,38 @@ class Builder(object):
tasks = ParallelTasks(nproc)
chunks = make_chunks(docnames, nproc)
- for chunk in self.app.status_iterator(
- chunks, 'writing output... ', darkgreen, len(chunks)):
+ for chunk in status_iterator(chunks, 'writing output... ', "darkgreen",
+ len(chunks), self.app.verbosity):
arg = []
for i, docname in enumerate(chunk):
doctree = self.env.get_and_resolve_doctree(docname, self)
self.write_doc_serialized(docname, doctree)
arg.append((docname, doctree))
- tasks.add_task(write_process, arg, add_warnings)
+ tasks.add_task(write_process, arg)
# make sure all threads have finished
- self.info(bold('waiting for workers...'))
+ logger.info(bold('waiting for workers...'))
tasks.join()
- for warning, kwargs in warnings:
- self.warn(*warning, **kwargs)
-
def prepare_writing(self, docnames):
+ # type: (Set[unicode]) -> None
"""A place where you can add logic before :meth:`write_doc` is run"""
raise NotImplementedError
def write_doc(self, docname, doctree):
+ # type: (unicode, nodes.Node) -> None
"""Where you actually write something to the filesystem."""
raise NotImplementedError
def write_doc_serialized(self, docname, doctree):
+ # type: (unicode, nodes.Node) -> None
"""Handle parts of write_doc that must be called in the main process
if parallel build is active.
"""
pass
def finish(self):
+ # type: () -> None
"""Finish the building process.
The default implementation does nothing.
@@ -430,6 +456,7 @@ class Builder(object):
pass
def cleanup(self):
+ # type: () -> None
"""Cleanup any resources.
The default implementation does nothing.
@@ -437,6 +464,7 @@ class Builder(object):
pass
def get_builder_config(self, option, default):
+ # type: (unicode, unicode) -> Any
"""Return a builder specific option.
This method allows customization of common builder settings by
diff --git a/sphinx/builders/applehelp.py b/sphinx/builders/applehelp.py
index c963b2473..fa47429e2 100644
--- a/sphinx/builders/applehelp.py
+++ b/sphinx/builders/applehelp.py
@@ -18,8 +18,9 @@ import shlex
from sphinx.builders.html import StandaloneHTMLBuilder
from sphinx.config import string_classes
+from sphinx.util import logging
from sphinx.util.osutil import copyfile, ensuredir, make_filename
-from sphinx.util.console import bold
+from sphinx.util.console import bold # type: ignore
from sphinx.util.fileutil import copy_asset
from sphinx.util.pycompat import htmlescape
from sphinx.util.matching import Matcher
@@ -28,10 +29,17 @@ from sphinx.errors import SphinxError
import plistlib
import subprocess
+if False:
+ # For type annotation
+ from typing import Any, Dict # NOQA
+ from sphinx.application import Sphinx # NOQA
+
+
+logger = logging.getLogger(__name__)
# Use plistlib.dump in 3.4 and above
try:
- write_plist = plistlib.dump
+ write_plist = plistlib.dump # type: ignore
except AttributeError:
write_plist = plistlib.writePlist
@@ -83,6 +91,7 @@ class AppleHelpBuilder(StandaloneHTMLBuilder):
search = False
def init(self):
+ # type: () -> None
super(AppleHelpBuilder, self).init()
# the output files for HTML help must be .html only
self.out_suffix = '.html'
@@ -101,25 +110,28 @@ class AppleHelpBuilder(StandaloneHTMLBuilder):
self.config.applehelp_locale + '.lproj')
def handle_finish(self):
+ # type: () -> None
super(AppleHelpBuilder, self).handle_finish()
self.finish_tasks.add_task(self.copy_localized_files)
self.finish_tasks.add_task(self.build_helpbook)
def copy_localized_files(self):
+ # type: () -> None
source_dir = path.join(self.confdir, self.config.applehelp_locale + '.lproj')
target_dir = self.outdir
if path.isdir(source_dir):
- self.info(bold('copying localized files... '), nonl=True)
+ logger.info(bold('copying localized files... '), nonl=True)
excluded = Matcher(self.config.exclude_patterns + ['**/.*'])
copy_asset(source_dir, target_dir, excluded,
context=self.globalcontext, renderer=self.templates)
- self.info('done')
+ logger.info('done')
def build_helpbook(self):
+ # type: () -> None
contents_dir = path.join(self.bundle_path, 'Contents')
resources_dir = path.join(contents_dir, 'Resources')
language_dir = path.join(resources_dir,
@@ -157,37 +169,36 @@ class AppleHelpBuilder(StandaloneHTMLBuilder):
if self.config.applehelp_remote_url is not None:
info_plist['HPDBookRemoteURL'] = self.config.applehelp_remote_url
- self.info(bold('writing Info.plist... '), nonl=True)
+ logger.info(bold('writing Info.plist... '), nonl=True)
with open(path.join(contents_dir, 'Info.plist'), 'wb') as f:
write_plist(info_plist, f)
- self.info('done')
+ logger.info('done')
# Copy the icon, if one is supplied
if self.config.applehelp_icon:
- self.info(bold('copying icon... '), nonl=True)
+ logger.info(bold('copying icon... '), nonl=True)
try:
copyfile(path.join(self.srcdir, self.config.applehelp_icon),
path.join(resources_dir, info_plist['HPDBookIconPath']))
- self.info('done')
+ logger.info('done')
except Exception as err:
- self.warn('cannot copy icon file %r: %s' %
- (path.join(self.srcdir, self.config.applehelp_icon),
- err))
+ logger.warning('cannot copy icon file %r: %s',
+ path.join(self.srcdir, self.config.applehelp_icon), err)
del info_plist['HPDBookIconPath']
# Build the access page
- self.info(bold('building access page...'), nonl=True)
+ logger.info(bold('building access page...'), nonl=True)
with codecs.open(path.join(language_dir, '_access.html'), 'w') as f:
f.write(access_page_template % {
'toc': htmlescape(toc, quote=True),
'title': htmlescape(self.config.applehelp_title)
})
- self.info('done')
+ logger.info('done')
# Generate the help index
- self.info(bold('generating help index... '), nonl=True)
+ logger.info(bold('generating help index... '), nonl=True)
args = [
self.config.applehelp_indexer_path,
@@ -209,10 +220,10 @@ class AppleHelpBuilder(StandaloneHTMLBuilder):
args += ['-l', self.config.applehelp_locale]
if self.config.applehelp_disable_external_tools:
- self.info('skipping')
+ logger.info('skipping')
- self.warn('you will need to index this help book with:\n %s'
- % (' '.join([pipes.quote(arg) for arg in args])))
+ logger.warning('you will need to index this help book with:\n %s',
+ ' '.join([pipes.quote(arg) for arg in args]))
else:
try:
p = subprocess.Popen(args,
@@ -224,13 +235,13 @@ class AppleHelpBuilder(StandaloneHTMLBuilder):
if p.returncode != 0:
raise AppleHelpIndexerFailed(output)
else:
- self.info('done')
+ logger.info('done')
except OSError:
raise AppleHelpIndexerFailed('Command not found: %s' % args[0])
# If we've been asked to, sign the bundle
if self.config.applehelp_codesign_identity:
- self.info(bold('signing help book... '), nonl=True)
+ logger.info(bold('signing help book... '), nonl=True)
args = [
self.config.applehelp_codesign_path,
@@ -243,10 +254,9 @@ class AppleHelpBuilder(StandaloneHTMLBuilder):
args.append(self.bundle_path)
if self.config.applehelp_disable_external_tools:
- self.info('skipping')
-
- self.warn('you will need to sign this help book with:\n %s'
- % (' '.join([pipes.quote(arg) for arg in args])))
+ logger.info('skipping')
+ logger.warning('you will need to sign this help book with:\n %s',
+ ' '.join([pipes.quote(arg) for arg in args]))
else:
try:
p = subprocess.Popen(args,
@@ -258,12 +268,13 @@ class AppleHelpBuilder(StandaloneHTMLBuilder):
if p.returncode != 0:
raise AppleHelpCodeSigningFailed(output)
else:
- self.info('done')
+ logger.info('done')
except OSError:
raise AppleHelpCodeSigningFailed('Command not found: %s' % args[0])
def setup(app):
+ # type: (Sphinx) -> Dict[unicode, Any]
app.setup_extension('sphinx.builders.html')
app.add_builder(AppleHelpBuilder)
diff --git a/sphinx/builders/changes.py b/sphinx/builders/changes.py
index ac6fa1506..ddb2ed042 100644
--- a/sphinx/builders/changes.py
+++ b/sphinx/builders/changes.py
@@ -18,11 +18,20 @@ from sphinx import package_dir
from sphinx.locale import _
from sphinx.theming import Theme
from sphinx.builders import Builder
+from sphinx.util import logging
from sphinx.util.osutil import ensuredir, os_path
-from sphinx.util.console import bold
+from sphinx.util.console import bold # type: ignore
from sphinx.util.fileutil import copy_asset_file
from sphinx.util.pycompat import htmlescape
+if False:
+ # For type annotation
+ from typing import Any, Dict, List, Tuple # NOQA
+ from sphinx.application import Sphinx # NOQA
+
+
+logger = logging.getLogger(__name__)
+
class ChangesBuilder(Builder):
"""
@@ -31,30 +40,32 @@ class ChangesBuilder(Builder):
name = 'changes'
def init(self):
+ # type: () -> None
self.create_template_bridge()
- Theme.init_themes(self.confdir, self.config.html_theme_path,
- warn=self.warn)
+ Theme.init_themes(self.confdir, self.config.html_theme_path)
self.theme = Theme('default')
self.templates.init(self, self.theme)
def get_outdated_docs(self):
+ # type: () -> unicode
return self.outdir
typemap = {
'versionadded': 'added',
'versionchanged': 'changed',
'deprecated': 'deprecated',
- }
+ } # type: Dict[unicode, unicode]
def write(self, *ignored):
+ # type: (Any) -> None
version = self.config.version
- libchanges = {}
- apichanges = []
- otherchanges = {}
+ libchanges = {} # type: Dict[unicode, List[Tuple[unicode, unicode, int]]]
+ apichanges = [] # type: List[Tuple[unicode, unicode, int]]
+ otherchanges = {} # type: Dict[Tuple[unicode, unicode], List[Tuple[unicode, unicode, int]]] # NOQA
if version not in self.env.versionchanges:
- self.info(bold('no changes in version %s.' % version))
+ logger.info(bold('no changes in version %s.' % version))
return
- self.info(bold('writing summary file...'))
+ logger.info(bold('writing summary file...'))
for type, docname, lineno, module, descname, content in \
self.env.versionchanges[version]:
if isinstance(descname, tuple):
@@ -101,9 +112,9 @@ class ChangesBuilder(Builder):
'show_copyright': self.config.html_show_copyright,
'show_sphinx': self.config.html_show_sphinx,
}
- with codecs.open(path.join(self.outdir, 'index.html'), 'w', 'utf8') as f:
+ with codecs.open(path.join(self.outdir, 'index.html'), 'w', 'utf8') as f: # type: ignore # NOQA
f.write(self.templates.render('changes/frameset.html', ctx))
- with codecs.open(path.join(self.outdir, 'changes.html'), 'w', 'utf8') as f:
+ with codecs.open(path.join(self.outdir, 'changes.html'), 'w', 'utf8') as f: # type: ignore # NOQA
f.write(self.templates.render('changes/versionchanges.html', ctx))
hltext = ['.. versionadded:: %s' % version,
@@ -111,6 +122,7 @@ class ChangesBuilder(Builder):
'.. deprecated:: %s' % version]
def hl(no, line):
+ # type: (int, unicode) -> unicode
line = '<a name="L%s"> </a>' % no + htmlescape(line)
for x in hltext:
if x in line:
@@ -118,18 +130,18 @@ class ChangesBuilder(Builder):
break
return line
- self.info(bold('copying source files...'))
+ logger.info(bold('copying source files...'))
for docname in self.env.all_docs:
- with codecs.open(self.env.doc2path(docname), 'r',
+ with codecs.open(self.env.doc2path(docname), 'r', # type: ignore
self.env.config.source_encoding) as f:
try:
lines = f.readlines()
except UnicodeDecodeError:
- self.warn('could not read %r for changelog creation' % docname)
+ logger.warning('could not read %r for changelog creation', docname)
continue
targetfn = path.join(self.outdir, 'rst', os_path(docname)) + '.html'
ensuredir(path.dirname(targetfn))
- with codecs.open(targetfn, 'w', 'utf-8') as f:
+ with codecs.open(targetfn, 'w', 'utf-8') as f: # type: ignore
text = ''.join(hl(i + 1, line) for (i, line) in enumerate(lines))
ctx = {
'filename': self.env.doc2path(docname, None),
@@ -144,6 +156,7 @@ class ChangesBuilder(Builder):
self.outdir)
def hl(self, text, version):
+ # type: (unicode, unicode) -> unicode
text = htmlescape(text)
for directive in ['versionchanged', 'versionadded', 'deprecated']:
text = text.replace('.. %s:: %s' % (directive, version),
@@ -151,10 +164,12 @@ class ChangesBuilder(Builder):
return text
def finish(self):
+ # type: () -> None
pass
def setup(app):
+ # type: (Sphinx) -> Dict[unicode, Any]
app.add_builder(ChangesBuilder)
return {
diff --git a/sphinx/builders/devhelp.py b/sphinx/builders/devhelp.py
index ce00f9164..9dbbf3c17 100644
--- a/sphinx/builders/devhelp.py
+++ b/sphinx/builders/devhelp.py
@@ -19,13 +19,23 @@ from os import path
from docutils import nodes
from sphinx import addnodes
+from sphinx.util import logging
from sphinx.util.osutil import make_filename
from sphinx.builders.html import StandaloneHTMLBuilder
+from sphinx.environment.adapters.indexentries import IndexEntries
try:
import xml.etree.ElementTree as etree
except ImportError:
- import lxml.etree as etree
+ import lxml.etree as etree # type: ignore
+
+if False:
+ # For type annotation
+ from typing import Any, Dict, List # NOQA
+ from sphinx.application import Sphinx # NOQA
+
+
+logger = logging.getLogger(__name__)
class DevhelpBuilder(StandaloneHTMLBuilder):
@@ -44,15 +54,18 @@ class DevhelpBuilder(StandaloneHTMLBuilder):
embedded = True
def init(self):
+ # type: () -> None
StandaloneHTMLBuilder.init(self)
self.out_suffix = '.html'
self.link_suffix = '.html'
def handle_finish(self):
+ # type: () -> None
self.build_devhelp(self.outdir, self.config.devhelp_basename)
def build_devhelp(self, outdir, outname):
- self.info('dumping devhelp index...')
+ # type: (unicode, unicode) -> None
+ logger.info('dumping devhelp index...')
# Basic info
root = etree.Element('book',
@@ -69,6 +82,7 @@ class DevhelpBuilder(StandaloneHTMLBuilder):
self.config.master_doc, self, prune_toctrees=False)
def write_toc(node, parent):
+ # type: (nodes.Node, nodes.Node) -> None
if isinstance(node, addnodes.compact_paragraph) or \
isinstance(node, nodes.bullet_list):
for subnode in node:
@@ -82,6 +96,7 @@ class DevhelpBuilder(StandaloneHTMLBuilder):
parent.attrib['name'] = node.astext()
def istoctree(node):
+ # type: (nodes.Node) -> bool
return isinstance(node, addnodes.compact_paragraph) and \
'toctree' in node
@@ -90,9 +105,10 @@ class DevhelpBuilder(StandaloneHTMLBuilder):
# Index
functions = etree.SubElement(root, 'functions')
- index = self.env.create_index(self)
+ index = IndexEntries(self.env).create_index(self)
def write_index(title, refs, subitems):
+ # type: (unicode, List[Any], Any) -> None
if len(refs) == 0:
pass
elif len(refs) == 1:
@@ -116,11 +132,12 @@ class DevhelpBuilder(StandaloneHTMLBuilder):
# Dump the XML file
xmlfile = path.join(outdir, outname + '.devhelp.gz')
- with gzip.open(xmlfile, 'w') as f:
+ with gzip.open(xmlfile, 'w') as f: # type: ignore
tree.write(f, 'utf-8')
def setup(app):
+ # type: (Sphinx) -> Dict[unicode, Any]
app.setup_extension('sphinx.builders.html')
app.add_builder(DevhelpBuilder)
diff --git a/sphinx/builders/dummy.py b/sphinx/builders/dummy.py
index 2fb146ecf..74a3d4187 100644
--- a/sphinx/builders/dummy.py
+++ b/sphinx/builders/dummy.py
@@ -12,31 +12,44 @@
from sphinx.builders import Builder
+if False:
+ # For type annotation
+ from typing import Any, Dict, Set # NOQA
+ from docutils import nodes # NOQA
+ from sphinx.application import Sphinx # NOQA
+
class DummyBuilder(Builder):
name = 'dummy'
allow_parallel = True
def init(self):
+ # type: () -> None
pass
def get_outdated_docs(self):
+ # type: () -> Set[unicode]
return self.env.found_docs
def get_target_uri(self, docname, typ=None):
+ # type: (unicode, unicode) -> unicode
return ''
def prepare_writing(self, docnames):
+ # type: (Set[unicode]) -> None
pass
def write_doc(self, docname, doctree):
+ # type: (unicode, nodes.Node) -> None
pass
def finish(self):
+ # type: () -> None
pass
def setup(app):
+ # type: (Sphinx) -> Dict[unicode, Any]
app.add_builder(DummyBuilder)
return {
diff --git a/sphinx/builders/epub.py b/sphinx/builders/epub.py
index 152401447..ea604f75b 100644
--- a/sphinx/builders/epub.py
+++ b/sphinx/builders/epub.py
@@ -12,10 +12,10 @@
import os
import re
-import codecs
-import zipfile
from os import path
+from zipfile import ZIP_DEFLATED, ZIP_STORED, ZipFile
from datetime import datetime
+from collections import namedtuple
try:
from PIL import Image
@@ -28,107 +28,31 @@ except ImportError:
from docutils import nodes
from sphinx import addnodes
+from sphinx import package_dir
from sphinx.builders.html import StandaloneHTMLBuilder
-from sphinx.util.osutil import ensuredir, copyfile, make_filename, EEXIST
+from sphinx.util import logging
+from sphinx.util import status_iterator
+from sphinx.util.osutil import ensuredir, copyfile, make_filename
+from sphinx.util.fileutil import copy_asset_file
from sphinx.util.smartypants import sphinx_smarty_pants as ssp
-from sphinx.util.console import brown
+if False:
+ # For type annotation
+ from typing import Any, Dict, List, Tuple # NOQA
+ from sphinx.application import Sphinx # NOQA
-# (Fragment) templates from which the metainfo files content.opf, toc.ncx,
-# mimetype, and META-INF/container.xml are created.
+
+logger = logging.getLogger(__name__)
+
+
+# (Fragment) templates from which the metainfo files content.opf and
+# toc.ncx are created.
# This template section also defines strings that are embedded in the html
# output but that may be customized by (re-)setting module attributes,
# e.g. from conf.py.
-MIMETYPE_TEMPLATE = 'application/epub+zip' # no EOL!
-
-CONTAINER_TEMPLATE = u'''\
-<?xml version="1.0" encoding="UTF-8"?>
-<container version="1.0"
- xmlns="urn:oasis:names:tc:opendocument:xmlns:container">
- <rootfiles>
- <rootfile full-path="content.opf"
- media-type="application/oebps-package+xml"/>
- </rootfiles>
-</container>
-'''
-
-TOC_TEMPLATE = u'''\
-<?xml version="1.0"?>
-<ncx version="2005-1" xmlns="http://www.daisy.org/z3986/2005/ncx/">
- <head>
- <meta name="dtb:uid" content="%(uid)s"/>
- <meta name="dtb:depth" content="%(level)d"/>
- <meta name="dtb:totalPageCount" content="0"/>
- <meta name="dtb:maxPageNumber" content="0"/>
- </head>
- <docTitle>
- <text>%(title)s</text>
- </docTitle>
- <navMap>
-%(navpoints)s
- </navMap>
-</ncx>
-'''
-
-NAVPOINT_TEMPLATE = u'''\
-%(indent)s <navPoint id="%(navpoint)s" playOrder="%(playorder)d">
-%(indent)s <navLabel>
-%(indent)s <text>%(text)s</text>
-%(indent)s </navLabel>
-%(indent)s <content src="%(refuri)s" />
-%(indent)s </navPoint>'''
-
-NAVPOINT_INDENT = ' '
-NODE_NAVPOINT_TEMPLATE = 'navPoint%d'
-
-CONTENT_TEMPLATE = u'''\
-<?xml version="1.0" encoding="UTF-8"?>
-<package xmlns="http://www.idpf.org/2007/opf" version="2.0"
- unique-identifier="%(uid)s">
- <metadata xmlns:opf="http://www.idpf.org/2007/opf"
- xmlns:dc="http://purl.org/dc/elements/1.1/">
- <dc:language>%(lang)s</dc:language>
- <dc:title>%(title)s</dc:title>
- <dc:creator opf:role="aut">%(author)s</dc:creator>
- <dc:publisher>%(publisher)s</dc:publisher>
- <dc:rights>%(copyright)s</dc:rights>
- <dc:identifier id="%(uid)s" opf:scheme="%(scheme)s">%(id)s</dc:identifier>
- <dc:date>%(date)s</dc:date>
- </metadata>
- <manifest>
- <item id="ncx" href="toc.ncx" media-type="application/x-dtbncx+xml" />
-%(files)s
- </manifest>
- <spine toc="ncx">
-%(spine)s
- </spine>
- <guide>
-%(guide)s
- </guide>
-</package>
-'''
-
-COVER_TEMPLATE = u'''\
- <meta name="cover" content="%(cover)s"/>
-'''
-
COVERPAGE_NAME = u'epub-cover.xhtml'
-FILE_TEMPLATE = u'''\
- <item id="%(id)s"
- href="%(href)s"
- media-type="%(media_type)s" />'''
-
-SPINE_TEMPLATE = u'''\
- <itemref idref="%(idref)s" />'''
-
-NO_LINEAR_SPINE_TEMPLATE = u'''\
- <itemref idref="%(idref)s" linear="no" />'''
-
-GUIDE_TEMPLATE = u'''\
- <reference type="%(type)s" title="%(title)s" href="%(uri)s" />'''
-
TOCTREE_TEMPLATE = u'toctree-l%d'
DOCTYPE = u'''<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN"
@@ -159,7 +83,7 @@ MEDIA_TYPES = {
'.otf': 'application/x-font-otf',
'.ttf': 'application/x-font-ttf',
'.woff': 'application/font-woff',
-}
+} # type: Dict[unicode, unicode]
VECTOR_GRAPHICS_EXTENSIONS = ('.svg',)
@@ -169,6 +93,12 @@ VECTOR_GRAPHICS_EXTENSIONS = ('.svg',)
REFURI_RE = re.compile("([^#:]*#)(.*)")
+ManifestItem = namedtuple('ManifestItem', ['href', 'id', 'media_type'])
+Spine = namedtuple('Spine', ['idref', 'linear'])
+Guide = namedtuple('Guide', ['type', 'title', 'uri'])
+NavPoint = namedtuple('NavPoint', ['navpoint', 'playorder', 'text', 'refuri', 'children'])
+
+
# The epub publisher
class EpubBuilder(StandaloneHTMLBuilder):
@@ -181,6 +111,8 @@ class EpubBuilder(StandaloneHTMLBuilder):
"""
name = 'epub2'
+ template_dir = path.join(package_dir, 'templates', 'epub2')
+
# don't copy the reST source
copysource = False
supported_image_types = ['image/svg+xml', 'image/png', 'image/gif',
@@ -199,19 +131,7 @@ class EpubBuilder(StandaloneHTMLBuilder):
# don't generate search index or include search page
search = False
- mimetype_template = MIMETYPE_TEMPLATE
- container_template = CONTAINER_TEMPLATE
- toc_template = TOC_TEMPLATE
- navpoint_template = NAVPOINT_TEMPLATE
- navpoint_indent = NAVPOINT_INDENT
- node_navpoint_template = NODE_NAVPOINT_TEMPLATE
- content_template = CONTENT_TEMPLATE
- cover_template = COVER_TEMPLATE
coverpage_name = COVERPAGE_NAME
- file_template = FILE_TEMPLATE
- spine_template = SPINE_TEMPLATE
- no_linear_spine_template = NO_LINEAR_SPINE_TEMPLATE
- guide_template = GUIDE_TEMPLATE
toctree_template = TOCTREE_TEMPLATE
doctype = DOCTYPE
link_target_template = LINK_TARGET_TEMPLATE
@@ -221,28 +141,33 @@ class EpubBuilder(StandaloneHTMLBuilder):
refuri_re = REFURI_RE
def init(self):
+ # type: () -> None
StandaloneHTMLBuilder.init(self)
# the output files for epub must be .html only
self.out_suffix = '.xhtml'
self.link_suffix = '.xhtml'
self.playorder = 0
self.tocid = 0
+ self.id_cache = {} # type: Dict[unicode, unicode]
self.use_index = self.get_builder_config('use_index', 'epub')
def get_theme_config(self):
+ # type: () -> Tuple[unicode, Dict]
return self.config.epub_theme, self.config.epub_theme_options
# generic support functions
- def make_id(self, name, id_cache={}):
+ def make_id(self, name):
+ # type: (unicode) -> unicode
# id_cache is intentionally mutable
"""Return a unique id for name."""
- id = id_cache.get(name)
+ id = self.id_cache.get(name)
if not id:
id = 'epub-%d' % self.env.new_serialno('epub')
- id_cache[name] = id
+ self.id_cache[name] = id
return id
def esc(self, name):
+ # type: (unicode) -> unicode
"""Replace all characters not allowed in text an attribute values."""
# Like cgi.escape, but also replace apostrophe
name = name.replace('&', '&amp;')
@@ -253,6 +178,7 @@ class EpubBuilder(StandaloneHTMLBuilder):
return name
def get_refnodes(self, doctree, result):
+ # type: (nodes.Node, List[Dict[unicode, Any]]) -> List[Dict[unicode, Any]]
"""Collect section titles, their depth in the toc and the refuri."""
# XXX: is there a better way than checking the attribute
# toctree-l[1-8] on the parent node?
@@ -276,6 +202,7 @@ class EpubBuilder(StandaloneHTMLBuilder):
return result
def get_toc(self):
+ # type: () -> None
"""Get the total table of contents, containing the master_doc
and pre and post files not managed by sphinx.
"""
@@ -291,6 +218,7 @@ class EpubBuilder(StandaloneHTMLBuilder):
self.toc_add_files(self.refnodes)
def toc_add_files(self, refnodes):
+ # type: (List[nodes.Node]) -> None
"""Add the master_doc, pre and post files to a list of refnodes.
"""
refnodes.insert(0, {
@@ -313,10 +241,12 @@ class EpubBuilder(StandaloneHTMLBuilder):
})
def fix_fragment(self, prefix, fragment):
+ # type: (unicode, unicode) -> unicode
"""Return a href/id attribute with colons replaced by hyphens."""
return prefix + fragment.replace(':', '-')
def fix_ids(self, tree):
+ # type: (nodes.Node) -> None
"""Replace colons with hyphens in href and id attributes.
Some readers crash because they interpret the part as a
@@ -337,9 +267,11 @@ class EpubBuilder(StandaloneHTMLBuilder):
node.attributes['ids'] = newids
def add_visible_links(self, tree, show_urls='inline'):
+ # type: (nodes.Node, unicode) -> None
"""Add visible link targets for external links"""
def make_footnote_ref(doc, label):
+ # type: (nodes.Node, unicode) -> nodes.footnote_reference
"""Create a footnote_reference node with children"""
footnote_ref = nodes.footnote_reference('[#]_')
footnote_ref.append(nodes.Text(label))
@@ -347,6 +279,7 @@ class EpubBuilder(StandaloneHTMLBuilder):
return footnote_ref
def make_footnote(doc, label, uri):
+ # type: (nodes.Node, unicode, unicode) -> nodes.footnote
"""Create a footnote node with children"""
footnote = nodes.footnote(uri)
para = nodes.paragraph()
@@ -357,6 +290,7 @@ class EpubBuilder(StandaloneHTMLBuilder):
return footnote
def footnote_spot(tree):
+ # type: (nodes.Node) -> Tuple[nodes.Node, int]
"""Find or create a spot to place footnotes.
The function returns the tuple (parent, index)."""
@@ -406,6 +340,7 @@ class EpubBuilder(StandaloneHTMLBuilder):
fn_idx += 1
def write_doc(self, docname, doctree):
+ # type: (unicode, nodes.Node) -> None
"""Write one document file.
This method is overwritten in order to fix fragment identifiers
@@ -416,6 +351,7 @@ class EpubBuilder(StandaloneHTMLBuilder):
StandaloneHTMLBuilder.write_doc(self, docname, doctree)
def fix_genindex(self, tree):
+ # type: (nodes.Node) -> None
"""Fix href attributes for genindex pages."""
# XXX: modifies tree inline
# Logic modeled from themes/basic/genindex.html
@@ -434,31 +370,33 @@ class EpubBuilder(StandaloneHTMLBuilder):
self.fix_fragment(m.group(1), m.group(2)))
def is_vector_graphics(self, filename):
+ # type: (unicode) -> bool
"""Does the filename extension indicate a vector graphic format?"""
ext = path.splitext(filename)[-1]
return ext in VECTOR_GRAPHICS_EXTENSIONS
def copy_image_files_pil(self):
+ # type: () -> None
"""Copy images using the PIL.
The method tries to read and write the files with the PIL,
converting the format and resizing the image if necessary/possible.
"""
ensuredir(path.join(self.outdir, self.imagedir))
- for src in self.app.status_iterator(self.images, 'copying images... ',
- brown, len(self.images)):
+ for src in status_iterator(self.images, 'copying images... ', "brown",
+ len(self.images), self.app.verbosity):
dest = self.images[src]
try:
img = Image.open(path.join(self.srcdir, src))
except IOError:
if not self.is_vector_graphics(src):
- self.warn('cannot read image file %r: copying it instead' %
- (path.join(self.srcdir, src), ))
+ logger.warning('cannot read image file %r: copying it instead',
+ path.join(self.srcdir, src))
try:
copyfile(path.join(self.srcdir, src),
path.join(self.outdir, self.imagedir, dest))
except (IOError, OSError) as err:
- self.warn('cannot copy image file %r: %s' %
- (path.join(self.srcdir, src), err))
+ logger.warning('cannot copy image file %r: %s',
+ path.join(self.srcdir, src), err)
continue
if self.config.epub_fix_images:
if img.mode in ('P',):
@@ -473,17 +411,18 @@ class EpubBuilder(StandaloneHTMLBuilder):
try:
img.save(path.join(self.outdir, self.imagedir, dest))
except (IOError, OSError) as err:
- self.warn('cannot write image file %r: %s' %
- (path.join(self.srcdir, src), err))
+ logger.warning('cannot write image file %r: %s',
+ path.join(self.srcdir, src), err)
def copy_image_files(self):
+ # type: () -> None
"""Copy image files to destination directory.
This overwritten method can use the PIL to convert image files.
"""
if self.images:
if self.config.epub_fix_images or self.config.epub_max_image_width:
if not Image:
- self.warn('PIL not found - copying image files')
+ logger.warning('PIL not found - copying image files')
super(EpubBuilder, self).copy_image_files()
else:
self.copy_image_files_pil()
@@ -491,10 +430,12 @@ class EpubBuilder(StandaloneHTMLBuilder):
super(EpubBuilder, self).copy_image_files()
def copy_download_files(self):
+ # type: () -> None
pass
def handle_page(self, pagename, addctx, templatename='page.html',
outfilename=None, event_arg=None):
+ # type: (unicode, Dict, unicode, unicode, Any) -> None
"""Create a rendered page.
This method is overwritten for genindex pages in order to fix href link
@@ -510,6 +451,7 @@ class EpubBuilder(StandaloneHTMLBuilder):
# Finish by building the epub file
def handle_finish(self):
+ # type: () -> None
"""Create the metainfo files and finally the epub."""
self.get_toc()
self.build_mimetype(self.outdir, 'mimetype')
@@ -519,28 +461,26 @@ class EpubBuilder(StandaloneHTMLBuilder):
self.build_epub(self.outdir, self.config.epub_basename + '.epub')
def build_mimetype(self, outdir, outname):
+ # type: (unicode, unicode) -> None
"""Write the metainfo file mimetype."""
- self.info('writing %s file...' % outname)
- with codecs.open(path.join(outdir, outname), 'w', 'utf-8') as f:
- f.write(self.mimetype_template)
+ logger.info('writing %s file...', outname)
+ copy_asset_file(path.join(self.template_dir, 'mimetype'),
+ path.join(outdir, outname))
def build_container(self, outdir, outname):
- """Write the metainfo file META-INF/cointainer.xml."""
- self.info('writing %s file...' % outname)
- fn = path.join(outdir, outname)
- try:
- os.mkdir(path.dirname(fn))
- except OSError as err:
- if err.errno != EEXIST:
- raise
- with codecs.open(path.join(outdir, outname), 'w', 'utf-8') as f:
- f.write(self.container_template)
-
- def content_metadata(self, files, spine, guide):
+ # type: (unicode, unicode) -> None
+ """Write the metainfo file META-INF/container.xml."""
+ logger.info('writing %s file...', outname)
+ filename = path.join(outdir, outname)
+ ensuredir(path.dirname(filename))
+ copy_asset_file(path.join(self.template_dir, 'container.xml'), filename)
+
+ def content_metadata(self):
+ # type: () -> Dict[unicode, Any]
"""Create a dictionary with all metadata for the content.opf
file properly escaped.
"""
- metadata = {}
+ metadata = {} # type: Dict[unicode, Any]
metadata['title'] = self.esc(self.config.epub_title)
metadata['author'] = self.esc(self.config.epub_author)
metadata['uid'] = self.esc(self.config.epub_uid)
@@ -550,23 +490,24 @@ class EpubBuilder(StandaloneHTMLBuilder):
metadata['scheme'] = self.esc(self.config.epub_scheme)
metadata['id'] = self.esc(self.config.epub_identifier)
metadata['date'] = self.esc(datetime.utcnow().strftime("%Y-%m-%d"))
- metadata['files'] = files
- metadata['spine'] = spine
- metadata['guide'] = guide
+ metadata['manifest_items'] = []
+ metadata['spines'] = []
+ metadata['guides'] = []
return metadata
def build_content(self, outdir, outname):
+ # type: (unicode, unicode) -> None
"""Write the metainfo file content.opf It contains bibliographic data,
a file list and the spine (the reading order).
"""
- self.info('writing %s file...' % outname)
+ logger.info('writing %s file...', outname)
+ metadata = self.content_metadata()
# files
if not outdir.endswith(os.sep):
outdir += os.sep
olen = len(outdir)
- projectfiles = []
- self.files = []
+ self.files = [] # type: List[unicode]
self.ignored_files = ['.buildinfo', 'mimetype', 'content.opf',
'toc.ncx', 'META-INF/container.xml',
'Thumbs.db', 'ehthumbs.db', '.DS_Store',
@@ -575,7 +516,7 @@ class EpubBuilder(StandaloneHTMLBuilder):
if not self.use_index:
self.ignored_files.append('genindex' + self.out_suffix)
for root, dirs, files in os.walk(outdir):
- for fn in files:
+ for fn in sorted(files):
filename = path.join(root, fn)[olen:]
if filename in self.ignored_files:
continue
@@ -584,74 +525,61 @@ class EpubBuilder(StandaloneHTMLBuilder):
# we always have JS and potentially OpenSearch files, don't
# always warn about them
if ext not in ('.js', '.xml'):
- self.warn('unknown mimetype for %s, ignoring' % filename,
- type='epub', subtype='unknown_project_files')
+ logger.warning('unknown mimetype for %s, ignoring', filename,
+ type='epub', subtype='unknown_project_files')
continue
filename = filename.replace(os.sep, '/')
- projectfiles.append(self.file_template % {
- 'href': self.esc(filename),
- 'id': self.esc(self.make_id(filename)),
- 'media_type': self.esc(self.media_types[ext])
- })
+ item = ManifestItem(self.esc(filename),
+ self.esc(self.make_id(filename)),
+ self.esc(self.media_types[ext]))
+ metadata['manifest_items'].append(item)
self.files.append(filename)
# spine
- spine = []
spinefiles = set()
- for item in self.refnodes:
- if '#' in item['refuri']:
+ for refnode in self.refnodes:
+ if '#' in refnode['refuri']:
continue
- if item['refuri'] in self.ignored_files:
+ if refnode['refuri'] in self.ignored_files:
continue
- spine.append(self.spine_template % {
- 'idref': self.esc(self.make_id(item['refuri']))
- })
- spinefiles.add(item['refuri'])
+ spine = Spine(self.esc(self.make_id(refnode['refuri'])), True)
+ metadata['spines'].append(spine)
+ spinefiles.add(refnode['refuri'])
for info in self.domain_indices:
- spine.append(self.spine_template % {
- 'idref': self.esc(self.make_id(info[0] + self.out_suffix))
- })
+ spine = Spine(self.esc(self.make_id(info[0] + self.out_suffix)), True)
+ metadata['spines'].append(spine)
spinefiles.add(info[0] + self.out_suffix)
if self.use_index:
- spine.append(self.spine_template % {
- 'idref': self.esc(self.make_id('genindex' + self.out_suffix))
- })
+ spine = Spine(self.esc(self.make_id('genindex' + self.out_suffix)), True)
+ metadata['spines'].append(spine)
spinefiles.add('genindex' + self.out_suffix)
# add auto generated files
for name in self.files:
if name not in spinefiles and name.endswith(self.out_suffix):
- spine.append(self.no_linear_spine_template % {
- 'idref': self.esc(self.make_id(name))
- })
+ spine = Spine(self.esc(self.make_id(name)), False)
+ metadata['spines'].append(spine)
# add the optional cover
- content_tmpl = self.content_template
html_tmpl = None
if self.config.epub_cover:
image, html_tmpl = self.config.epub_cover
image = image.replace(os.sep, '/')
- mpos = content_tmpl.rfind('</metadata>')
- cpos = content_tmpl.rfind('\n', 0, mpos) + 1
- content_tmpl = content_tmpl[:cpos] + \
- COVER_TEMPLATE % {'cover': self.esc(self.make_id(image))} + \
- content_tmpl[cpos:]
+ metadata['cover'] = self.esc(self.make_id(image))
if html_tmpl:
- spine.insert(0, self.spine_template % {
- 'idref': self.esc(self.make_id(self.coverpage_name))})
+ spine = Spine(self.esc(self.make_id(self.coverpage_name)), True)
+ metadata['spines'].insert(0, spine)
if self.coverpage_name not in self.files:
ext = path.splitext(self.coverpage_name)[-1]
self.files.append(self.coverpage_name)
- projectfiles.append(self.file_template % {
- 'href': self.esc(self.coverpage_name),
- 'id': self.esc(self.make_id(self.coverpage_name)),
- 'media_type': self.esc(self.media_types[ext])
- })
+ item = ManifestItem(self.esc(filename),
+ self.esc(self.make_id(filename)),
+ self.esc(self.media_types[ext]))
+ metadata['manifest_items'].append(item)
ctx = {'image': self.esc(image), 'title': self.config.project}
self.handle_page(
path.splitext(self.coverpage_name)[0], ctx, html_tmpl)
spinefiles.add(self.coverpage_name)
- guide = []
auto_add_cover = True
auto_add_toc = True
if self.config.epub_guide:
@@ -663,61 +591,43 @@ class EpubBuilder(StandaloneHTMLBuilder):
auto_add_cover = False
if type == 'toc':
auto_add_toc = False
- guide.append(self.guide_template % {
- 'type': self.esc(type),
- 'title': self.esc(title),
- 'uri': self.esc(uri)
- })
+ metadata['guides'].append(Guide(self.esc(type),
+ self.esc(title),
+ self.esc(uri)))
if auto_add_cover and html_tmpl:
- guide.append(self.guide_template % {
- 'type': 'cover',
- 'title': self.guide_titles['cover'],
- 'uri': self.esc(self.coverpage_name)
- })
+ metadata['guides'].append(Guide('cover',
+ self.guide_titles['cover'],
+ self.esc(self.coverpage_name)))
if auto_add_toc and self.refnodes:
- guide.append(self.guide_template % {
- 'type': 'toc',
- 'title': self.guide_titles['toc'],
- 'uri': self.esc(self.refnodes[0]['refuri'])
- })
- projectfiles = '\n'.join(projectfiles)
- spine = '\n'.join(spine)
- guide = '\n'.join(guide)
+ metadata['guides'].append(Guide('toc',
+ self.guide_titles['toc'],
+ self.esc(self.refnodes[0]['refuri'])))
# write the project file
- with codecs.open(path.join(outdir, outname), 'w', 'utf-8') as f:
- f.write(content_tmpl %
- self.content_metadata(projectfiles, spine, guide))
+ copy_asset_file(path.join(self.template_dir, 'content.opf_t'),
+ path.join(outdir, outname),
+ metadata)
def new_navpoint(self, node, level, incr=True):
+ # type: (nodes.Node, int, bool) -> NavPoint
"""Create a new entry in the toc from the node at given level."""
# XXX Modifies the node
if incr:
self.playorder += 1
self.tocid += 1
- node['indent'] = self.navpoint_indent * level
- node['navpoint'] = self.esc(self.node_navpoint_template % self.tocid)
- node['playorder'] = self.playorder
- return self.navpoint_template % node
-
- def insert_subnav(self, node, subnav):
- """Insert nested navpoints for given node.
-
- The node and subnav are already rendered to text.
- """
- nlist = node.rsplit('\n', 1)
- nlist.insert(-1, subnav)
- return '\n'.join(nlist)
+ return NavPoint(self.esc('navPoint%d' % self.tocid), self.playorder,
+ node['text'], node['refuri'], [])
def build_navpoints(self, nodes):
+ # type: (nodes.Node) -> List[NavPoint]
"""Create the toc navigation structure.
Subelements of a node are nested inside the navpoint. For nested nodes
the parent node is reinserted in the subnav.
"""
- navstack = []
- navlist = []
- level = 1
+ navstack = [] # type: List[NavPoint]
+ navstack.append(NavPoint('dummy', '', '', '', []))
+ level = 0
lastnode = None
for node in nodes:
if not node['text']:
@@ -728,35 +638,37 @@ class EpubBuilder(StandaloneHTMLBuilder):
if node['level'] > self.config.epub_tocdepth:
continue
if node['level'] == level:
- navlist.append(self.new_navpoint(node, level))
+ navpoint = self.new_navpoint(node, level)
+ navstack.pop()
+ navstack[-1].children.append(navpoint)
+ navstack.append(navpoint)
elif node['level'] == level + 1:
- navstack.append(navlist)
- navlist = []
level += 1
if lastnode and self.config.epub_tocdup:
# Insert starting point in subtoc with same playOrder
- navlist.append(self.new_navpoint(lastnode, level, False))
- navlist.append(self.new_navpoint(node, level))
+ navstack[-1].children.append(self.new_navpoint(lastnode, level, False))
+ navpoint = self.new_navpoint(node, level)
+ navstack[-1].children.append(navpoint)
+ navstack.append(navpoint)
+ elif node['level'] < level:
+ while node['level'] < len(navstack):
+ navstack.pop()
+ level = node['level']
+ navpoint = self.new_navpoint(node, level)
+ navstack[-1].children.append(navpoint)
+ navstack.append(navpoint)
else:
- while node['level'] < level:
- subnav = '\n'.join(navlist)
- navlist = navstack.pop()
- navlist[-1] = self.insert_subnav(navlist[-1], subnav)
- level -= 1
- navlist.append(self.new_navpoint(node, level))
+ raise
lastnode = node
- while level != 1:
- subnav = '\n'.join(navlist)
- navlist = navstack.pop()
- navlist[-1] = self.insert_subnav(navlist[-1], subnav)
- level -= 1
- return '\n'.join(navlist)
+
+ return navstack[0].children
def toc_metadata(self, level, navpoints):
+ # type: (int, List[NavPoint]) -> Dict[unicode, Any]
"""Create a dictionary with all metadata for the toc.ncx file
properly escaped.
"""
- metadata = {}
+ metadata = {} # type: Dict[unicode, Any]
metadata['uid'] = self.config.epub_uid
metadata['title'] = self.config.epub_title
metadata['level'] = level
@@ -764,8 +676,9 @@ class EpubBuilder(StandaloneHTMLBuilder):
return metadata
def build_toc(self, outdir, outname):
+ # type: (unicode, unicode) -> None
"""Write the metainfo file toc.ncx."""
- self.info('writing %s file...' % outname)
+ logger.info('writing %s file...', outname)
if self.config.epub_tocscope == 'default':
doctree = self.env.get_and_resolve_doctree(self.config.master_doc,
@@ -779,29 +692,29 @@ class EpubBuilder(StandaloneHTMLBuilder):
navpoints = self.build_navpoints(refnodes)
level = max(item['level'] for item in self.refnodes)
level = min(level, self.config.epub_tocdepth)
- with codecs.open(path.join(outdir, outname), 'w', 'utf-8') as f:
- f.write(self.toc_template % self.toc_metadata(level, navpoints))
+ copy_asset_file(path.join(self.template_dir, 'toc.ncx_t'),
+ path.join(outdir, outname),
+ self.toc_metadata(level, navpoints))
def build_epub(self, outdir, outname):
+ # type: (unicode, unicode) -> None
"""Write the epub file.
It is a zip file with the mimetype file stored uncompressed as the first
entry.
"""
- self.info('writing %s file...' % outname)
- projectfiles = ['META-INF/container.xml', 'content.opf', 'toc.ncx'] \
- + self.files
- epub = zipfile.ZipFile(path.join(outdir, outname), 'w',
- zipfile.ZIP_DEFLATED)
- epub.write(path.join(outdir, 'mimetype'), 'mimetype',
- zipfile.ZIP_STORED)
- for file in projectfiles:
- fp = path.join(outdir, file)
- epub.write(fp, file, zipfile.ZIP_DEFLATED)
- epub.close()
+ logger.info('writing %s file...', outname)
+ epub_filename = path.join(outdir, outname)
+ with ZipFile(epub_filename, 'w', ZIP_DEFLATED) as epub: # type: ignore
+ epub.write(path.join(outdir, 'mimetype'), 'mimetype', ZIP_STORED) # type: ignore
+ for filename in [u'META-INF/container.xml', u'content.opf', u'toc.ncx']:
+ epub.write(path.join(outdir, filename), filename, ZIP_DEFLATED) # type: ignore
+ for filename in self.files:
+ epub.write(path.join(outdir, filename), filename, ZIP_DEFLATED) # type: ignore
def setup(app):
+ # type: (Sphinx) -> Dict[unicode, Any]
app.setup_extension('sphinx.builders.html')
app.add_builder(EpubBuilder)
diff --git a/sphinx/builders/epub3.py b/sphinx/builders/epub3.py
index 5e0663a08..8d5118a6d 100644
--- a/sphinx/builders/epub3.py
+++ b/sphinx/builders/epub3.py
@@ -10,87 +10,43 @@
:license: BSD, see LICENSE for details.
"""
-import codecs
from os import path
from datetime import datetime
+from collections import namedtuple
-from sphinx.config import string_classes
+from sphinx import package_dir
+from sphinx.config import string_classes, ENUM
from sphinx.builders.epub import EpubBuilder
-
-
-# (Fragment) templates from which the metainfo files content.opf, toc.ncx,
-# mimetype, and META-INF/container.xml are created.
-# This template section also defines strings that are embedded in the html
-# output but that may be customized by (re-)setting module attributes,
-# e.g. from conf.py.
-
-NAVIGATION_DOC_TEMPLATE = u'''\
-<?xml version="1.0" encoding="UTF-8"?>
-<!DOCTYPE html>
-<html xmlns="http://www.w3.org/1999/xhtml"\
- xmlns:epub="http://www.idpf.org/2007/ops" lang="%(lang)s" xml:lang="%(lang)s">
- <head>
- <title>%(toc_locale)s</title>
- </head>
- <body>
- <nav epub:type="toc">
- <h1>%(toc_locale)s</h1>
- <ol>
-%(navlist)s
- </ol>
- </nav>
- </body>
-</html>
-'''
-
-NAVLIST_TEMPLATE = u'''%(indent)s <li><a href="%(refuri)s">%(text)s</a></li>'''
-NAVLIST_TEMPLATE_HAS_CHILD = u'''%(indent)s <li><a href="%(refuri)s">%(text)s</a>'''
-NAVLIST_TEMPLATE_BEGIN_BLOCK = u'''%(indent)s <ol>'''
-NAVLIST_TEMPLATE_END_BLOCK = u'''%(indent)s </ol>
-%(indent)s </li>'''
-NAVLIST_INDENT = ' '
-
-PACKAGE_DOC_TEMPLATE = u'''\
-<?xml version="1.0" encoding="UTF-8"?>
-<package xmlns="http://www.idpf.org/2007/opf" version="3.0" xml:lang="%(lang)s"
- unique-identifier="%(uid)s"
- prefix="ibooks: http://vocabulary.itunes.apple.com/rdf/ibooks/vocabulary-extensions-1.0/">
- <metadata xmlns:opf="http://www.idpf.org/2007/opf"
- xmlns:dc="http://purl.org/dc/elements/1.1/">
- <dc:language>%(lang)s</dc:language>
- <dc:title>%(title)s</dc:title>
- <dc:description>%(description)s</dc:description>
- <dc:creator>%(author)s</dc:creator>
- <dc:contributor>%(contributor)s</dc:contributor>
- <dc:publisher>%(publisher)s</dc:publisher>
- <dc:rights>%(copyright)s</dc:rights>
- <dc:identifier id="%(uid)s">%(id)s</dc:identifier>
- <dc:date>%(date)s</dc:date>
- <meta property="dcterms:modified">%(date)s</meta>
- <meta property="ibooks:version">%(version)s</meta>
- <meta property="ibooks:specified-fonts">true</meta>
- <meta property="ibooks:binding">true</meta>
- <meta property="ibooks:scroll-axis">%(ibook_scroll_axis)s</meta>
- </metadata>
- <manifest>
- <item id="ncx" href="toc.ncx" media-type="application/x-dtbncx+xml" />
- <item id="nav" href="nav.xhtml"\
- media-type="application/xhtml+xml" properties="nav"/>
-%(files)s
- </manifest>
- <spine toc="ncx" page-progression-direction="%(page_progression_direction)s">
-%(spine)s
- </spine>
- <guide>
-%(guide)s
- </guide>
-</package>
-'''
+from sphinx.util import logging
+from sphinx.util.fileutil import copy_asset_file
+
+if False:
+ # For type annotation
+ from typing import Any, Dict, Iterable, List # NOQA
+ from docutils import nodes # NOQA
+ from sphinx.application import Sphinx # NOQA
+
+logger = logging.getLogger(__name__)
+
+
+NavPoint = namedtuple('NavPoint', ['text', 'refuri', 'children'])
+
+# writing modes
+PAGE_PROGRESSION_DIRECTIONS = {
+ 'horizontal': 'ltr',
+ 'vertical': 'rtl',
+}
+IBOOK_SCROLL_AXIS = {
+ 'horizontal': 'vertical',
+ 'vertical': 'horizontal',
+}
+THEME_WRITING_MODES = {
+ 'vertical': 'vertical-rl',
+ 'horizontal': 'horizontal-tb',
+}
DOCTYPE = u'''<!DOCTYPE html>'''
-# The epub3 publisher
-
class Epub3Builder(EpubBuilder):
"""
@@ -102,18 +58,14 @@ class Epub3Builder(EpubBuilder):
"""
name = 'epub'
- navigation_doc_template = NAVIGATION_DOC_TEMPLATE
- navlist_template = NAVLIST_TEMPLATE
- navlist_template_has_child = NAVLIST_TEMPLATE_HAS_CHILD
- navlist_template_begin_block = NAVLIST_TEMPLATE_BEGIN_BLOCK
- navlist_template_end_block = NAVLIST_TEMPLATE_END_BLOCK
- navlist_indent = NAVLIST_INDENT
- content_template = PACKAGE_DOC_TEMPLATE
+ template_dir = path.join(package_dir, 'templates', 'epub3')
doctype = DOCTYPE
# Finish by building the epub file
def handle_finish(self):
+ # type: () -> None
"""Create the metainfo files and finally the epub."""
+ self.validate_config_value()
self.get_toc()
self.build_mimetype(self.outdir, 'mimetype')
self.build_container(self.outdir, 'META-INF/container.xml')
@@ -122,68 +74,69 @@ class Epub3Builder(EpubBuilder):
self.build_toc(self.outdir, 'toc.ncx')
self.build_epub(self.outdir, self.config.epub_basename + '.epub')
- def content_metadata(self, files, spine, guide):
+ def validate_config_value(self):
+ # <package> lang attribute, dc:language
+ if not self.app.config.epub_language:
+ self.app.warn(
+ 'conf value "epub_language" (or "language") '
+ 'should not be empty for EPUB3')
+ # <package> unique-identifier attribute
+ if not self.app.config.epub_uid:
+ self.app.warn('conf value "epub_uid" should not be empty for EPUB3')
+ # dc:title
+ if not self.app.config.epub_title:
+ self.app.warn(
+ 'conf value "epub_title" (or "html_title") '
+ 'should not be empty for EPUB3')
+ # dc:creator
+ if not self.app.config.epub_author:
+ self.app.warn('conf value "epub_author" should not be empty for EPUB3')
+ # dc:contributor
+ if not self.app.config.epub_contributor:
+ self.app.warn('conf value "epub_contributor" should not be empty for EPUB3')
+ # dc:description
+ if not self.app.config.epub_description:
+ self.app.warn('conf value "epub_description" should not be empty for EPUB3')
+ # dc:publisher
+ if not self.app.config.epub_publisher:
+ self.app.warn('conf value "epub_publisher" should not be empty for EPUB3')
+ # dc:rights
+ if not self.app.config.epub_copyright:
+ self.app.warn(
+ 'conf value "epub_copyright" (or "copyright")'
+ 'should not be empty for EPUB3')
+ # dc:identifier
+ if not self.app.config.epub_identifier:
+ self.app.warn('conf value "epub_identifier" should not be empty for EPUB3')
+ # meta ibooks:version
+ if not self.app.config.version:
+ self.app.warn('conf value "version" should not be empty for EPUB3')
+
+ def content_metadata(self):
+ # type: () -> Dict
"""Create a dictionary with all metadata for the content.opf
file properly escaped.
"""
- metadata = super(Epub3Builder, self).content_metadata(
- files, spine, guide)
+ writing_mode = self.config.epub_writing_mode
+
+ metadata = super(Epub3Builder, self).content_metadata()
metadata['description'] = self.esc(self.config.epub_description)
metadata['contributor'] = self.esc(self.config.epub_contributor)
- metadata['page_progression_direction'] = self._page_progression_direction()
- metadata['ibook_scroll_axis'] = self._ibook_scroll_axis()
+ metadata['page_progression_direction'] = PAGE_PROGRESSION_DIRECTIONS.get(writing_mode)
+ metadata['ibook_scroll_axis'] = IBOOK_SCROLL_AXIS.get(writing_mode)
metadata['date'] = self.esc(datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ"))
metadata['version'] = self.esc(self.config.version)
return metadata
- def _page_progression_direction(self):
- if self.config.epub_writing_mode == 'horizontal':
- page_progression_direction = 'ltr'
- elif self.config.epub_writing_mode == 'vertical':
- page_progression_direction = 'rtl'
- else:
- page_progression_direction = 'default'
- return page_progression_direction
-
- def _ibook_scroll_axis(self):
- if self.config.epub_writing_mode == 'horizontal':
- scroll_axis = 'vertical'
- elif self.config.epub_writing_mode == 'vertical':
- scroll_axis = 'horizontal'
- else:
- scroll_axis = 'default'
- return scroll_axis
-
- def _css_writing_mode(self):
- if self.config.epub_writing_mode == 'vertical':
- editing_mode = 'vertical-rl'
- else:
- editing_mode = 'horizontal-tb'
- return editing_mode
-
def prepare_writing(self, docnames):
+ # type: (Iterable[unicode]) -> None
super(Epub3Builder, self).prepare_writing(docnames)
- self.globalcontext['theme_writing_mode'] = self._css_writing_mode()
-
- def new_navlist(self, node, level, has_child):
- """Create a new entry in the toc from the node at given level."""
- # XXX Modifies the node
- self.tocid += 1
- node['indent'] = self.navlist_indent * level
- if has_child:
- return self.navlist_template_has_child % node
- else:
- return self.navlist_template % node
-
- def begin_navlist_block(self, level):
- return self.navlist_template_begin_block % {
- "indent": self.navlist_indent * level
- }
- def end_navlist_block(self, level):
- return self.navlist_template_end_block % {"indent": self.navlist_indent * level}
+ writing_mode = self.config.epub_writing_mode
+ self.globalcontext['theme_writing_mode'] = THEME_WRITING_MODES.get(writing_mode)
- def build_navlist(self, nodes):
+ def build_navlist(self, navnodes):
+ # type: (List[nodes.Node]) -> List[NavPoint]
"""Create the toc navigation structure.
This method is almost same as build_navpoints method in epub.py.
@@ -193,10 +146,10 @@ class Epub3Builder(EpubBuilder):
The difference from build_navpoints method is templates which are used
when generating navigation documents.
"""
- navlist = []
- level = 1
- usenodes = []
- for node in nodes:
+ navstack = [] # type: List[NavPoint]
+ navstack.append(NavPoint('', '', []))
+ level = 0
+ for node in navnodes:
if not node['text']:
continue
file = node['refuri'].split('#')[0]
@@ -204,38 +157,42 @@ class Epub3Builder(EpubBuilder):
continue
if node['level'] > self.config.epub_tocdepth:
continue
- usenodes.append(node)
- for i, node in enumerate(usenodes):
- curlevel = node['level']
- if curlevel == level + 1:
- navlist.append(self.begin_navlist_block(level))
- while curlevel < level:
- level -= 1
- navlist.append(self.end_navlist_block(level))
- level = curlevel
- if i != len(usenodes) - 1 and usenodes[i + 1]['level'] > level:
- has_child = True
+
+ navpoint = NavPoint(node['text'], node['refuri'], [])
+ if node['level'] == level:
+ navstack.pop()
+ navstack[-1].children.append(navpoint)
+ navstack.append(navpoint)
+ elif node['level'] == level + 1:
+ level += 1
+ navstack[-1].children.append(navpoint)
+ navstack.append(navpoint)
+ elif node['level'] < level:
+ while node['level'] < len(navstack):
+ navstack.pop()
+ level = node['level']
+ navstack[-1].children.append(navpoint)
+ navstack.append(navpoint)
else:
- has_child = False
- navlist.append(self.new_navlist(node, level, has_child))
- while level != 1:
- level -= 1
- navlist.append(self.end_navlist_block(level))
- return '\n'.join(navlist)
+ raise
+
+ return navstack[0].children
def navigation_doc_metadata(self, navlist):
+ # type: (List[NavPoint]) -> Dict
"""Create a dictionary with all metadata for the nav.xhtml file
properly escaped.
"""
- metadata = {}
+ metadata = {} # type: Dict
metadata['lang'] = self.esc(self.config.epub_language)
metadata['toc_locale'] = self.esc(self.guide_titles['toc'])
metadata['navlist'] = navlist
return metadata
def build_navigation_doc(self, outdir, outname):
+ # type: (unicode, unicode) -> None
"""Write the metainfo file nav.xhtml."""
- self.info('writing %s file...' % outname)
+ logger.info('writing %s file...', outname)
if self.config.epub_tocscope == 'default':
doctree = self.env.get_and_resolve_doctree(
@@ -247,40 +204,24 @@ class Epub3Builder(EpubBuilder):
# 'includehidden'
refnodes = self.refnodes
navlist = self.build_navlist(refnodes)
- with codecs.open(path.join(outdir, outname), 'w', 'utf-8') as f:
- f.write(self.navigation_doc_template %
- self.navigation_doc_metadata(navlist))
+ copy_asset_file(path.join(self.template_dir, 'nav.xhtml_t'),
+ path.join(outdir, outname),
+ self.navigation_doc_metadata(navlist))
# Add nav.xhtml to epub file
if outname not in self.files:
self.files.append(outname)
-def validate_config_values(app):
- if app.config.epub3_description is not None:
- app.warn('epub3_description is deprecated. Use epub_description instead.')
- app.config.epub_description = app.config.epub3_description
-
- if app.config.epub3_contributor is not None:
- app.warn('epub3_contributor is deprecated. Use epub_contributor instead.')
- app.config.epub_contributor = app.config.epub3_contributor
-
- if app.config.epub3_page_progression_direction is not None:
- app.warn('epub3_page_progression_direction option is deprecated'
- ' from 1.5. Use epub_writing_mode instead.')
-
-
def setup(app):
+ # type: (Sphinx) -> Dict[unicode, Any]
app.setup_extension('sphinx.builders.epub')
app.add_builder(Epub3Builder)
- app.connect('builder-inited', validate_config_values)
- app.add_config_value('epub_description', '', 'epub3', string_classes)
+ app.add_config_value('epub_description', 'unknown', 'epub3', string_classes)
app.add_config_value('epub_contributor', 'unknown', 'epub3', string_classes)
- app.add_config_value('epub_writing_mode', 'horizontal', 'epub3', string_classes)
- app.add_config_value('epub3_description', None, 'epub3', string_classes)
- app.add_config_value('epub3_contributor', None, 'epub3', string_classes)
- app.add_config_value('epub3_page_progression_direction', None, 'epub3', string_classes)
+ app.add_config_value('epub_writing_mode', 'horizontal', 'epub3',
+ ENUM('horizontal', 'vertical'))
return {
'version': 'builtin',
diff --git a/sphinx/builders/gettext.py b/sphinx/builders/gettext.py
index 0eab06f8b..7535101a8 100644
--- a/sphinx/builders/gettext.py
+++ b/sphinx/builders/gettext.py
@@ -21,14 +21,24 @@ from uuid import uuid4
from six import iteritems, StringIO
from sphinx.builders import Builder
-from sphinx.util import split_index_msg
+from sphinx.util import split_index_msg, logging, status_iterator
from sphinx.util.tags import Tags
from sphinx.util.nodes import extract_messages, traverse_translatable_index
from sphinx.util.osutil import safe_relpath, ensuredir, canon_path
from sphinx.util.i18n import find_catalog
-from sphinx.util.console import darkgreen, purple, bold
+from sphinx.util.console import bold # type: ignore
from sphinx.locale import pairindextypes
+if False:
+ # For type annotation
+ from typing import Any, Dict, Iterable, List, Set, Tuple # NOQA
+ from docutils import nodes # NOQA
+ from sphinx.util.i18n import CatalogInfo # NOQA
+ from sphinx.application import Sphinx # NOQA
+
+
+logger = logging.getLogger(__name__)
+
POHEADER = r"""
# SOME DESCRIPTIVE TITLE.
# Copyright (C) %(copyright)s
@@ -55,10 +65,14 @@ class Catalog(object):
"""Catalog of translatable messages."""
def __init__(self):
- self.messages = [] # retain insertion order, a la OrderedDict
- self.metadata = {} # msgid -> file, line, uid
+ # type: () -> None
+ self.messages = [] # type: List[unicode]
+ # retain insertion order, a la OrderedDict
+ self.metadata = {} # type: Dict[unicode, List[Tuple[unicode, int, unicode]]]
+ # msgid -> file, line, uid
def add(self, msg, origin):
+ # type: (unicode, MsgOrigin) -> None
if not hasattr(origin, 'uid'):
# Nodes that are replicated like todo don't have a uid,
# however i18n is also unnecessary.
@@ -75,6 +89,7 @@ class MsgOrigin(object):
"""
def __init__(self, source, line):
+ # type: (unicode, int) -> None
self.source = source
self.line = line
self.uid = uuid4().hex
@@ -87,6 +102,7 @@ class I18nTags(Tags):
always returns True value even if no tags are defined.
"""
def eval_condition(self, condition):
+ # type: (Any) -> bool
return True
@@ -96,30 +112,36 @@ class I18nBuilder(Builder):
"""
name = 'i18n'
versioning_method = 'text'
- versioning_compare = None # be set by `gettext_uuid`
-
- def __init__(self, app):
- self.versioning_compare = app.env.config.gettext_uuid
- super(I18nBuilder, self).__init__(app)
+ versioning_compare = None # type: bool
+ # be set by `gettext_uuid`
+ use_message_catalog = False
def init(self):
+ # type: () -> None
Builder.init(self)
+ self.env.set_versioning_method(self.versioning_method,
+ self.env.config.gettext_uuid)
self.tags = I18nTags()
- self.catalogs = defaultdict(Catalog)
+ self.catalogs = defaultdict(Catalog) # type: defaultdict[unicode, Catalog]
def get_target_uri(self, docname, typ=None):
+ # type: (unicode, unicode) -> unicode
return ''
def get_outdated_docs(self):
+ # type: () -> Set[unicode]
return self.env.found_docs
def prepare_writing(self, docnames):
+ # type: (Set[unicode]) -> None
return
def compile_catalogs(self, catalogs, message):
+ # type: (Set[CatalogInfo], unicode) -> None
return
def write_doc(self, docname, doctree):
+ # type: (unicode, nodes.Node) -> None
catalog = self.catalogs[find_catalog(docname,
self.config.gettext_compact)]
@@ -153,13 +175,16 @@ if source_date_epoch is not None:
class LocalTimeZone(tzinfo):
def __init__(self, *args, **kw):
- super(LocalTimeZone, self).__init__(*args, **kw)
+ # type: (Any, Any) -> None
+ super(LocalTimeZone, self).__init__(*args, **kw) # type: ignore
self.tzdelta = tzdelta
def utcoffset(self, dt):
+ # type: (datetime) -> timedelta
return self.tzdelta
def dst(self, dt):
+ # type: (datetime) -> timedelta
return timedelta(0)
@@ -169,7 +194,7 @@ ltz = LocalTimeZone()
def should_write(filepath, new_content):
if not path.exists(filepath):
return True
- with open(filepath, 'r', encoding='utf-8') as oldpot:
+ with open(filepath, 'r', encoding='utf-8') as oldpot: # type: ignore
old_content = oldpot.read()
old_header_index = old_content.index('"POT-Creation-Date:')
new_header_index = old_content.index('"POT-Creation-Date:')
@@ -187,11 +212,13 @@ class MessageCatalogBuilder(I18nBuilder):
name = 'gettext'
def init(self):
+ # type: () -> None
I18nBuilder.init(self)
self.create_template_bridge()
self.templates.init(self)
def _collect_templates(self):
+ # type: () -> Set[unicode]
template_files = set()
for template_path in self.config.templates_path:
tmpl_abs_path = path.join(self.app.srcdir, template_path)
@@ -203,25 +230,28 @@ class MessageCatalogBuilder(I18nBuilder):
return template_files
def _extract_from_template(self):
+ # type: () -> None
files = self._collect_templates()
- self.info(bold('building [%s]: ' % self.name), nonl=1)
- self.info('targets for %d template files' % len(files))
+ logger.info(bold('building [%s]: ' % self.name), nonl=1)
+ logger.info('targets for %d template files', len(files))
extract_translations = self.templates.environment.extract_translations
- for template in self.app.status_iterator(
- files, 'reading templates... ', purple, len(files)):
- with open(template, 'r', encoding='utf-8') as f:
+ for template in status_iterator(files, 'reading templates... ', "purple", # type: ignore # NOQA
+ len(files), self.app.verbosity):
+ with open(template, 'r', encoding='utf-8') as f: # type: ignore
context = f.read()
for line, meth, msg in extract_translations(context):
origin = MsgOrigin(template, line)
self.catalogs['sphinx'].add(msg, origin)
def build(self, docnames, summary=None, method='update'):
+ # type: (Iterable[unicode], unicode, unicode) -> None
self._extract_from_template()
I18nBuilder.build(self, docnames, summary, method)
def finish(self):
+ # type: () -> None
I18nBuilder.finish(self)
data = dict(
version = self.config.version,
@@ -230,46 +260,47 @@ class MessageCatalogBuilder(I18nBuilder):
ctime = datetime.fromtimestamp(
timestamp, ltz).strftime('%Y-%m-%d %H:%M%z'),
)
- for textdomain, catalog in self.app.status_iterator(
- iteritems(self.catalogs), "writing message catalogs... ",
- darkgreen, len(self.catalogs),
- lambda textdomain__: textdomain__[0]):
+ for textdomain, catalog in status_iterator(iteritems(self.catalogs), # type: ignore
+ "writing message catalogs... ",
+ "darkgreen", len(self.catalogs),
+ self.app.verbosity,
+ lambda textdomain__: textdomain__[0]):
# noop if config.gettext_compact is set
ensuredir(path.join(self.outdir, path.dirname(textdomain)))
pofn = path.join(self.outdir, textdomain + '.pot')
-
output = StringIO()
- output.write(POHEADER % data)
+ output.write(POHEADER % data) # type: ignore
for message in catalog.messages:
positions = catalog.metadata[message]
if self.config.gettext_location:
# generate "#: file1:line1\n#: file2:line2 ..."
- output.write("#: %s\n" % "\n#: ".join(
+ output.write("#: %s\n" % "\n#: ".join( # type: ignore
"%s:%s" % (canon_path(
safe_relpath(source, self.outdir)), line)
for source, line, _ in positions))
if self.config.gettext_uuid:
# generate "# uuid1\n# uuid2\n ..."
- output.write("# %s\n" % "\n# ".join(
+ output.write("# %s\n" % "\n# ".join( # type: ignore
uid for _, _, uid in positions))
# message contains *one* line of text ready for translation
message = message.replace('\\', r'\\'). \
replace('"', r'\"'). \
replace('\n', '\\n"\n"')
- output.write('msgid "%s"\nmsgstr ""\n\n' % message)
+ output.write('msgid "%s"\nmsgstr ""\n\n' % message) # type: ignore
content = output.getvalue()
if should_write(pofn, content):
- with open(pofn, 'w', encoding='utf-8') as pofile:
+ with open(pofn, 'w', encoding='utf-8') as pofile: # type: ignore
pofile.write(content)
def setup(app):
+ # type: (Sphinx) -> Dict[unicode, Any]
app.add_builder(MessageCatalogBuilder)
app.add_config_value('gettext_compact', True, 'gettext')
diff --git a/sphinx/builders/html.py b/sphinx/builders/html.py
index 93d7a3519..77122749f 100644
--- a/sphinx/builders/html.py
+++ b/sphinx/builders/html.py
@@ -12,7 +12,6 @@
import os
import re
import sys
-import zlib
import codecs
import posixpath
from os import path
@@ -20,6 +19,7 @@ from hashlib import md5
from six import iteritems, text_type, string_types
from six.moves import cPickle as pickle
+
from docutils import nodes
from docutils.io import DocTreeInput, StringOutput
from docutils.core import Publisher
@@ -28,11 +28,13 @@ from docutils.frontend import OptionParser
from docutils.readers.doctree import Reader as DoctreeReader
from sphinx import package_dir, __display_version__
-from sphinx.util import jsonimpl
+from sphinx.util import jsonimpl, logging, status_iterator
from sphinx.util.i18n import format_date
+from sphinx.util.inventory import InventoryFile
from sphinx.util.osutil import SEP, os_path, relative_uri, ensuredir, \
movefile, copyfile
from sphinx.util.nodes import inline_all_toctrees
+from sphinx.util.docutils import is_html5_writer_available, __version_info__
from sphinx.util.fileutil import copy_asset
from sphinx.util.matching import patmatch, Matcher, DOTFILES
from sphinx.config import string_classes
@@ -42,19 +44,36 @@ from sphinx.theming import Theme
from sphinx.builders import Builder
from sphinx.application import ENV_PICKLE_FILENAME
from sphinx.highlighting import PygmentsBridge
-from sphinx.util.console import bold, darkgreen, brown
+from sphinx.util.console import bold, darkgreen # type: ignore
from sphinx.writers.html import HTMLWriter, HTMLTranslator, \
SmartyPantsHTMLTranslator
+from sphinx.environment.adapters.toctree import TocTree
+from sphinx.environment.adapters.indexentries import IndexEntries
+
+if False:
+ # For type annotation
+ from typing import Any, Dict, Iterable, Iterator, List, Type, Tuple, Union # NOQA
+ from sphinx.domains import Domain, Index # NOQA
+ from sphinx.application import Sphinx # NOQA
+
+# Experimental HTML5 Writer
+if is_html5_writer_available():
+ from sphinx.writers.html5 import HTML5Translator, SmartyPantsHTML5Translator
+ html5_ready = True
+else:
+ html5_ready = False
#: the filename for the inventory of objects
INVENTORY_FILENAME = 'objects.inv'
#: the filename for the "last build" file (for serializing builders)
LAST_BUILD_FILENAME = 'last_build'
+logger = logging.getLogger(__name__)
return_codes_re = re.compile('[\r\n]+')
def get_stable_hash(obj):
+ # type: (Any) -> unicode
"""
Return a stable hash for a Python data structure. We can't just use
the md5 of str(obj) since for example dictionary items are enumerated
@@ -77,7 +96,7 @@ class StandaloneHTMLBuilder(Builder):
allow_parallel = True
out_suffix = '.html'
link_suffix = '.html' # defaults to matching out_suffix
- indexer_format = js_index
+ indexer_format = js_index # type: Any
indexer_dumps_unicode = True
# create links to original images from images [True/False]
html_scaled_image_link = True
@@ -88,13 +107,17 @@ class StandaloneHTMLBuilder(Builder):
allow_sharp_as_current_path = True
embedded = False # for things like HTML help or Qt help: suppresses sidebar
search = True # for things like HTML help and Apple help: suppress search
+ use_index = False
download_support = True # enable download role
# This is a class attribute because it is mutated by Sphinx.add_javascript.
script_files = ['_static/jquery.js', '_static/underscore.js',
- '_static/doctools.js']
+ '_static/doctools.js'] # type: List[unicode]
# Dito for this one.
- css_files = []
+ css_files = [] # type: List[unicode]
+
+ imgpath = None # type: unicode
+ domain_indices = [] # type: List[Tuple[unicode, Type[Index], List[Tuple[unicode, List[List[Union[unicode, int]]]]], bool]] # NOQA
default_sidebars = ['localtoc.html', 'relations.html',
'sourcelink.html', 'searchbox.html']
@@ -103,15 +126,16 @@ class StandaloneHTMLBuilder(Builder):
_publisher = None
def init(self):
+ # type: () -> None
# a hash of all config values that, if changed, cause a full rebuild
- self.config_hash = ''
- self.tags_hash = ''
+ self.config_hash = '' # type: unicode
+ self.tags_hash = '' # type: unicode
# basename of images directory
self.imagedir = '_images'
# section numbers for headings in the currently visited document
- self.secnumbers = {}
+ self.secnumbers = {} # type: Dict[unicode, Tuple[int, ...]]
# currently written docname
- self.current_docname = None
+ self.current_docname = None # type: unicode
self.init_templates()
self.init_highlighter()
@@ -129,32 +153,42 @@ class StandaloneHTMLBuilder(Builder):
self.script_files.append('_static/translations.js')
self.use_index = self.get_builder_config('use_index', 'html')
+ if self.config.html_experimental_html5_writer and not html5_ready:
+ self.app.warn(' '.join((
+ 'html_experimental_html5_writer is set, but current version is old.',
+ 'Docutils\' version should be or newer than 0.13, but %s.',
+ )) % '.'.join(map(str, __version_info__)))
+
def _get_translations_js(self):
- candidates = [path.join(package_dir, 'locale', self.config.language,
+ # type: () -> unicode
+ candidates = [path.join(dir, self.config.language,
+ 'LC_MESSAGES', 'sphinx.js')
+ for dir in self.config.locale_dirs] + \
+ [path.join(package_dir, 'locale', self.config.language,
'LC_MESSAGES', 'sphinx.js'),
path.join(sys.prefix, 'share/sphinx/locale',
- self.config.language, 'sphinx.js')] + \
- [path.join(dir, self.config.language,
- 'LC_MESSAGES', 'sphinx.js')
- for dir in self.config.locale_dirs]
+ self.config.language, 'sphinx.js')]
+
for jsfile in candidates:
if path.isfile(jsfile):
return jsfile
return None
def get_theme_config(self):
+ # type: () -> Tuple[unicode, Dict]
return self.config.html_theme, self.config.html_theme_options
def init_templates(self):
- Theme.init_themes(self.confdir, self.config.html_theme_path,
- warn=self.warn)
+ # type: () -> None
+ Theme.init_themes(self.confdir, self.config.html_theme_path)
themename, themeoptions = self.get_theme_config()
- self.theme = Theme(themename, warn=self.warn)
+ self.theme = Theme(themename)
self.theme_options = themeoptions.copy()
self.create_template_bridge()
self.templates.init(self, self.theme)
def init_highlighter(self):
+ # type: () -> None
# determine Pygments style and create the highlighter
if self.config.pygments_style is not None:
style = self.config.pygments_style
@@ -166,18 +200,24 @@ class StandaloneHTMLBuilder(Builder):
self.config.trim_doctest_flags)
def init_translator_class(self):
+ # type: () -> None
if self.translator_class is None:
- if self.config.html_use_smartypants:
- self.translator_class = SmartyPantsHTMLTranslator
+ if self.config.html_experimental_html5_writer and html5_ready:
+ if self.config.html_use_smartypants:
+ self.translator_class = SmartyPantsHTML5Translator
+ else:
+ self.translator_class = HTML5Translator
else:
- self.translator_class = HTMLTranslator
+ if self.config.html_use_smartypants:
+ self.translator_class = SmartyPantsHTMLTranslator
+ else:
+ self.translator_class = HTMLTranslator
def get_outdated_docs(self):
- cfgdict = dict((name, self.config[name])
- for (name, desc) in iteritems(self.config.values)
- if desc[1] == 'html')
+ # type: () -> Iterator[unicode]
+ cfgdict = dict((confval.name, confval.value) for confval in self.config.filter('html'))
self.config_hash = get_stable_hash(cfgdict)
- self.tags_hash = get_stable_hash(sorted(self.tags))
+ self.tags_hash = get_stable_hash(sorted(self.tags)) # type: ignore
old_config_hash = old_tags_hash = ''
try:
with open(path.join(self.outdir, '.buildinfo')) as fp:
@@ -192,8 +232,8 @@ class StandaloneHTMLBuilder(Builder):
if tag != 'tags':
raise ValueError
except ValueError:
- self.warn('unsupported build info format in %r, building all' %
- path.join(self.outdir, '.buildinfo'))
+ logger.warning('unsupported build info format in %r, building all',
+ path.join(self.outdir, '.buildinfo'))
except Exception:
pass
if old_config_hash != self.config_hash or \
@@ -224,7 +264,12 @@ class StandaloneHTMLBuilder(Builder):
# source doesn't exist anymore
pass
+ def get_asset_paths(self):
+ # type: () -> List[unicode]
+ return self.config.html_extra_path
+
def render_partial(self, node):
+ # type: (nodes.Nodes) -> Dict[unicode, unicode]
"""Utility: Render a lone doctree node."""
if node is None:
return {'fragment': ''}
@@ -250,6 +295,7 @@ class StandaloneHTMLBuilder(Builder):
return pub.writer.parts
def prepare_writing(self, docnames):
+ # type: (Iterable[unicode]) -> nodes.Node
# create the search indexer
self.indexer = None
if self.search:
@@ -275,16 +321,13 @@ class StandaloneHTMLBuilder(Builder):
indices_config = self.config.html_domain_indices
if indices_config:
for domain_name in sorted(self.env.domains):
+ domain = None # type: Domain
domain = self.env.domains[domain_name]
for indexcls in domain.indices:
- indexname = '%s-%s' % (domain.name, indexcls.name)
+ indexname = '%s-%s' % (domain.name, indexcls.name) # type: unicode
if isinstance(indices_config, list):
if indexname not in indices_config:
continue
- # deprecated config value
- if indexname == 'py-modindex' and \
- not self.config.html_use_modindex:
- continue
content, collapse = indexcls(domain).generate()
if content:
self.domain_indices.append(
@@ -295,8 +338,7 @@ class StandaloneHTMLBuilder(Builder):
lufmt = self.config.html_last_updated_fmt
if lufmt is not None:
self.last_updated = format_date(lufmt or _('%b %d, %Y'),
- language=self.config.language,
- warn=self.warn)
+ language=self.config.language)
else:
self.last_updated = None
@@ -306,14 +348,14 @@ class StandaloneHTMLBuilder(Builder):
favicon = self.config.html_favicon and \
path.basename(self.config.html_favicon) or ''
if favicon and os.path.splitext(favicon)[1] != '.ico':
- self.warn('html_favicon is not an .ico file')
+ logger.warning('html_favicon is not an .ico file')
if not isinstance(self.config.html_use_opensearch, string_types):
- self.warn('html_use_opensearch config value must now be a string')
+ logger.warning('html_use_opensearch config value must now be a string')
self.relations = self.env.collect_relations()
- rellinks = []
+ rellinks = [] # type: List[Tuple[unicode, unicode, unicode, unicode]]
if self.use_index:
rellinks.append(('genindex', _('General Index'), 'I', _('index')))
for indexname, indexcls, content, collapse in self.domain_indices:
@@ -356,7 +398,8 @@ class StandaloneHTMLBuilder(Builder):
parents = [],
logo = logo,
favicon = favicon,
- )
+ html5_doctype = self.config.html_experimental_html5_writer and html5_ready,
+ ) # type: Dict[unicode, Any]
if self.theme:
self.globalcontext.update(
('theme_' + key, val) for (key, val) in
@@ -364,6 +407,7 @@ class StandaloneHTMLBuilder(Builder):
self.globalcontext.update(self.config.html_context)
def get_doc_context(self, docname, body, metatags):
+ # type: (unicode, unicode, Dict) -> Dict[unicode, Any]
"""Collect items for the template context of a page."""
# find out relations
prev = next = None
@@ -424,7 +468,7 @@ class StandaloneHTMLBuilder(Builder):
meta = self.env.metadata.get(docname)
# local TOC and global TOC tree
- self_toc = self.env.get_toc_for(docname, self)
+ self_toc = TocTree(self.env).get_toc_for(docname, self)
toc = self.render_partial(self_toc)['fragment']
return dict(
@@ -444,6 +488,7 @@ class StandaloneHTMLBuilder(Builder):
)
def write_doc(self, docname, doctree):
+ # type: (unicode, nodes.Node) -> None
destination = StringOutput(encoding='utf-8')
doctree.settings = self.docsettings
@@ -461,6 +506,7 @@ class StandaloneHTMLBuilder(Builder):
self.handle_page(docname, ctx, event_arg=doctree)
def write_doc_serialized(self, docname, doctree):
+ # type: (unicode, nodes.Node) -> None
self.imgpath = relative_uri(self.get_target_uri(docname), self.imagedir)
self.post_process_images(doctree)
title = self.env.longtitles.get(docname)
@@ -468,6 +514,7 @@ class StandaloneHTMLBuilder(Builder):
self.index_page(docname, doctree, title)
def finish(self):
+ # type: () -> None
self.finish_tasks.add_task(self.gen_indices)
self.finish_tasks.add_task(self.gen_additional_pages)
self.finish_tasks.add_task(self.copy_image_files)
@@ -480,7 +527,8 @@ class StandaloneHTMLBuilder(Builder):
self.handle_finish()
def gen_indices(self):
- self.info(bold('generating indices...'), nonl=1)
+ # type: () -> None
+ logger.info(bold('generating indices...'), nonl=1)
# the global general index
if self.use_index:
@@ -489,15 +537,16 @@ class StandaloneHTMLBuilder(Builder):
# the global domain-specific indices
self.write_domain_indices()
- self.info()
+ logger.info('')
def gen_additional_pages(self):
+ # type: () -> None
# pages from extensions
for pagelist in self.app.emit('html-collect-pages'):
for pagename, context, template in pagelist:
self.handle_page(pagename, context, template)
- self.info(bold('writing additional pages...'), nonl=1)
+ logger.info(bold('writing additional pages...'), nonl=1)
# additional pages from conf.py
for pagename, template in self.config.html_additional_pages.items():
@@ -506,21 +555,22 @@ class StandaloneHTMLBuilder(Builder):
# the search page
if self.search:
- self.info(' search', nonl=1)
+ logger.info(' search', nonl=1)
self.handle_page('search', {}, 'search.html')
# the opensearch xml file
if self.config.html_use_opensearch and self.search:
- self.info(' opensearch', nonl=1)
+ logger.info(' opensearch', nonl=1)
fn = path.join(self.outdir, '_static', 'opensearch.xml')
self.handle_page('opensearch', {}, 'opensearch.xml', outfilename=fn)
- self.info()
+ logger.info('')
def write_genindex(self):
+ # type: () -> None
# the total count of lines for each index letter, used to distribute
# the entries into two columns
- genindex = self.env.create_index(self)
+ genindex = IndexEntries(self.env).create_index(self)
indexcounts = []
for _k, entries in genindex:
indexcounts.append(sum(1 + len(subitems)
@@ -531,7 +581,7 @@ class StandaloneHTMLBuilder(Builder):
genindexcounts = indexcounts,
split_index = self.config.html_split_index,
)
- self.info(' genindex', nonl=1)
+ logger.info(' genindex', nonl=1)
if self.config.html_split_index:
self.handle_page('genindex', genindexcontext,
@@ -547,54 +597,58 @@ class StandaloneHTMLBuilder(Builder):
self.handle_page('genindex', genindexcontext, 'genindex.html')
def write_domain_indices(self):
+ # type: () -> None
for indexname, indexcls, content, collapse in self.domain_indices:
indexcontext = dict(
indextitle = indexcls.localname,
content = content,
collapse_index = collapse,
)
- self.info(' ' + indexname, nonl=1)
+ logger.info(' ' + indexname, nonl=1)
self.handle_page(indexname, indexcontext, 'domainindex.html')
def copy_image_files(self):
+ # type: () -> None
# copy image files
if self.images:
ensuredir(path.join(self.outdir, self.imagedir))
- for src in self.app.status_iterator(self.images, 'copying images... ',
- brown, len(self.images)):
+ for src in status_iterator(self.images, 'copying images... ', "brown",
+ len(self.images), self.app.verbosity):
dest = self.images[src]
try:
copyfile(path.join(self.srcdir, src),
path.join(self.outdir, self.imagedir, dest))
except Exception as err:
- self.warn('cannot copy image file %r: %s' %
- (path.join(self.srcdir, src), err))
+ logger.warning('cannot copy image file %r: %s',
+ path.join(self.srcdir, src), err)
def copy_download_files(self):
+ # type: () -> None
def to_relpath(f):
+ # type: (unicode) -> unicode
return relative_path(self.srcdir, f)
# copy downloadable files
if self.env.dlfiles:
ensuredir(path.join(self.outdir, '_downloads'))
- for src in self.app.status_iterator(self.env.dlfiles,
- 'copying downloadable files... ',
- brown, len(self.env.dlfiles),
- stringify_func=to_relpath):
+ for src in status_iterator(self.env.dlfiles, 'copying downloadable files... ',
+ "brown", len(self.env.dlfiles), self.app.verbosity,
+ stringify_func=to_relpath):
dest = self.env.dlfiles[src][1]
try:
copyfile(path.join(self.srcdir, src),
path.join(self.outdir, '_downloads', dest))
except Exception as err:
- self.warn('cannot copy downloadable file %r: %s' %
- (path.join(self.srcdir, src), err))
+ logger.warning('cannot copy downloadable file %r: %s',
+ path.join(self.srcdir, src), err)
def copy_static_files(self):
+ # type: () -> None
# copy static files
- self.info(bold('copying static files... '), nonl=True)
+ logger.info(bold('copying static files... '), nonl=True)
ensuredir(path.join(self.outdir, '_static'))
# first, create pygments style file
with open(path.join(self.outdir, '_static', 'pygments.css'), 'w') as f:
- f.write(self.highlighter.get_stylesheet())
+ f.write(self.highlighter.get_stylesheet()) # type: ignore
# then, copy translations JavaScript file
if self.config.language is not None:
jsfile = self._get_translations_js()
@@ -625,7 +679,7 @@ class StandaloneHTMLBuilder(Builder):
for static_path in self.config.html_static_path:
entry = path.join(self.confdir, static_path)
if not path.exists(entry):
- self.warn('html_static_path entry %r does not exist' % entry)
+ logger.warning('html_static_path entry %r does not exist', entry)
continue
copy_asset(entry, path.join(self.outdir, '_static'), excluded,
context=ctx, renderer=self.templates)
@@ -634,7 +688,7 @@ class StandaloneHTMLBuilder(Builder):
logobase = path.basename(self.config.html_logo)
logotarget = path.join(self.outdir, '_static', logobase)
if not path.isfile(path.join(self.confdir, self.config.html_logo)):
- self.warn('logo file %r does not exist' % self.config.html_logo)
+ logger.warning('logo file %r does not exist', self.config.html_logo)
elif not path.isfile(logotarget):
copyfile(path.join(self.confdir, self.config.html_logo),
logotarget)
@@ -642,27 +696,29 @@ class StandaloneHTMLBuilder(Builder):
iconbase = path.basename(self.config.html_favicon)
icontarget = path.join(self.outdir, '_static', iconbase)
if not path.isfile(path.join(self.confdir, self.config.html_favicon)):
- self.warn('favicon file %r does not exist' % self.config.html_favicon)
+ logger.warning('favicon file %r does not exist', self.config.html_favicon)
elif not path.isfile(icontarget):
copyfile(path.join(self.confdir, self.config.html_favicon),
icontarget)
- self.info('done')
+ logger.info('done')
def copy_extra_files(self):
+ # type: () -> None
# copy html_extra_path files
- self.info(bold('copying extra files... '), nonl=True)
+ logger.info(bold('copying extra files... '), nonl=True)
excluded = Matcher(self.config.exclude_patterns)
for extra_path in self.config.html_extra_path:
entry = path.join(self.confdir, extra_path)
if not path.exists(entry):
- self.warn('html_extra_path entry %r does not exist' % entry)
+ logger.warning('html_extra_path entry %r does not exist', entry)
continue
copy_asset(entry, self.outdir, excluded)
- self.info('done')
+ logger.info('done')
def write_buildinfo(self):
+ # type: () -> None
# write build info file
with open(path.join(self.outdir, '.buildinfo'), 'w') as fp:
fp.write('# Sphinx build info version 1\n'
@@ -672,11 +728,13 @@ class StandaloneHTMLBuilder(Builder):
(self.config_hash, self.tags_hash))
def cleanup(self):
+ # type: () -> None
# clean up theme stuff
if self.theme:
self.theme.cleanup()
def post_process_images(self, doctree):
+ # type: (nodes.Node) -> None
"""Pick the best candidate for an image and link down-scaled images to
their high res version.
"""
@@ -702,24 +760,26 @@ class StandaloneHTMLBuilder(Builder):
reference.append(node)
def load_indexer(self, docnames):
+ # type: (Iterable[unicode]) -> None
keep = set(self.env.all_docs) - set(docnames)
try:
searchindexfn = path.join(self.outdir, self.searchindex_filename)
if self.indexer_dumps_unicode:
- f = codecs.open(searchindexfn, 'r', encoding='utf-8')
+ f = codecs.open(searchindexfn, 'r', encoding='utf-8') # type: ignore
else:
- f = open(searchindexfn, 'rb')
+ f = open(searchindexfn, 'rb') # type: ignore
with f:
- self.indexer.load(f, self.indexer_format)
+ self.indexer.load(f, self.indexer_format) # type: ignore
except (IOError, OSError, ValueError):
if keep:
- self.warn('search index couldn\'t be loaded, but not all '
- 'documents will be built: the index will be '
- 'incomplete.')
+ logger.warning('search index couldn\'t be loaded, but not all '
+ 'documents will be built: the index will be '
+ 'incomplete.')
# delete all entries for files that will be rebuilt
self.indexer.prune(keep)
def index_page(self, pagename, doctree, title):
+ # type: (unicode, nodes.Node, unicode) -> None
# only index pages with title
if self.indexer is not None and title:
filename = self.env.doc2path(pagename, base=None)
@@ -727,19 +787,23 @@ class StandaloneHTMLBuilder(Builder):
self.indexer.feed(pagename, filename, title, doctree)
except TypeError:
# fallback for old search-adapters
- self.indexer.feed(pagename, title, doctree)
+ self.indexer.feed(pagename, title, doctree) # type: ignore
def _get_local_toctree(self, docname, collapse=True, **kwds):
+ # type: (unicode, bool, Any) -> unicode
if 'includehidden' not in kwds:
kwds['includehidden'] = False
- return self.render_partial(self.env.get_toctree_for(
+ return self.render_partial(TocTree(self.env).get_toctree_for(
docname, self, collapse, **kwds))['fragment']
def get_outfilename(self, pagename):
+ # type: (unicode) -> unicode
return path.join(self.outdir, os_path(pagename) + self.out_suffix)
def add_sidebars(self, pagename, ctx):
+ # type: (unicode, Dict) -> None
def has_wildcard(pattern):
+ # type: (unicode) -> bool
return any(char in pattern for char in '*?[')
sidebars = None
matched = None
@@ -750,9 +814,9 @@ class StandaloneHTMLBuilder(Builder):
if has_wildcard(pattern):
# warn if both patterns contain wildcards
if has_wildcard(matched):
- self.warn('page %s matches two patterns in '
- 'html_sidebars: %r and %r' %
- (pagename, matched, pattern))
+ logger.warning('page %s matches two patterns in '
+ 'html_sidebars: %r and %r',
+ pagename, matched, pattern)
# else the already matched pattern is more specific
# than the present one, because it contains no wildcard
continue
@@ -771,20 +835,24 @@ class StandaloneHTMLBuilder(Builder):
# --------- these are overwritten by the serialization builder
def get_target_uri(self, docname, typ=None):
+ # type: (unicode, unicode) -> unicode
return docname + self.link_suffix
def handle_page(self, pagename, addctx, templatename='page.html',
outfilename=None, event_arg=None):
+ # type: (unicode, Dict, unicode, unicode, Any) -> None
ctx = self.globalcontext.copy()
ctx['warn'] = self.warn
# current_page_name is backwards compatibility
ctx['pagename'] = ctx['current_page_name'] = pagename
+ ctx['encoding'] = self.config.html_output_encoding
default_baseuri = self.get_target_uri(pagename)
# in the singlehtml builder, default_baseuri still contains an #anchor
# part, which relative_uri doesn't really like...
default_baseuri = default_baseuri.rsplit('#', 1)[0]
def pathto(otheruri, resource=False, baseuri=default_baseuri):
+ # type: (unicode, bool, unicode) -> unicode
if resource and '://' in otheruri:
# allow non-local resources given by scheme
return otheruri
@@ -797,6 +865,7 @@ class StandaloneHTMLBuilder(Builder):
ctx['pathto'] = pathto
def hasdoc(name):
+ # type: (unicode) -> bool
if name in self.env.all_docs:
return True
elif name == 'search' and self.search:
@@ -806,14 +875,11 @@ class StandaloneHTMLBuilder(Builder):
return False
ctx['hasdoc'] = hasdoc
- if self.name != 'htmlhelp':
- ctx['encoding'] = encoding = self.config.html_output_encoding
- else:
- ctx['encoding'] = encoding = self.encoding
ctx['toctree'] = lambda **kw: self._get_local_toctree(pagename, **kw)
self.add_sidebars(pagename, ctx)
ctx.update(addctx)
+ self.update_page_context(pagename, templatename, ctx, event_arg)
newtmpl = self.app.emit_firstresult('html-page-context', pagename,
templatename, ctx, event_arg)
if newtmpl:
@@ -822,9 +888,9 @@ class StandaloneHTMLBuilder(Builder):
try:
output = self.templates.render(templatename, ctx)
except UnicodeError:
- self.warn("a Unicode error occurred when rendering the page %s. "
- "Please make sure all config values that contain "
- "non-ASCII content are Unicode strings." % pagename)
+ logger.warning("a Unicode error occurred when rendering the page %s. "
+ "Please make sure all config values that contain "
+ "non-ASCII content are Unicode strings.", pagename)
return
if not outfilename:
@@ -832,10 +898,10 @@ class StandaloneHTMLBuilder(Builder):
# outfilename's path is in general different from self.outdir
ensuredir(path.dirname(outfilename))
try:
- with codecs.open(outfilename, 'w', encoding, 'xmlcharrefreplace') as f:
+ with codecs.open(outfilename, 'w', ctx['encoding'], 'xmlcharrefreplace') as f: # type: ignore # NOQA
f.write(output)
except (IOError, OSError) as err:
- self.warn("error writing file %s: %s" % (outfilename, err))
+ logger.warning("error writing file %s: %s", outfilename, err)
if self.copysource and ctx.get('sourcename'):
# copy the source file for the "show source" link
source_name = path.join(self.outdir, '_sources',
@@ -843,43 +909,25 @@ class StandaloneHTMLBuilder(Builder):
ensuredir(path.dirname(source_name))
copyfile(self.env.doc2path(pagename), source_name)
+ def update_page_context(self, pagename, templatename, ctx, event_arg):
+ # type: (unicode, unicode, Dict, Any) -> None
+ pass
+
def handle_finish(self):
+ # type: () -> None
if self.indexer:
self.finish_tasks.add_task(self.dump_search_index)
self.finish_tasks.add_task(self.dump_inventory)
def dump_inventory(self):
- def safe_name(string):
- return re.sub("\s+", " ", string)
-
- self.info(bold('dumping object inventory... '), nonl=True)
- with open(path.join(self.outdir, INVENTORY_FILENAME), 'wb') as f:
- f.write((u'# Sphinx inventory version 2\n'
- u'# Project: %s\n'
- u'# Version: %s\n'
- u'# The remainder of this file is compressed using zlib.\n'
- % (safe_name(self.config.project),
- safe_name(self.config.version))).encode('utf-8'))
- compressor = zlib.compressobj(9)
- for domainname, domain in sorted(self.env.domains.items()):
- for name, dispname, type, docname, anchor, prio in \
- sorted(domain.get_objects()):
- if anchor.endswith(name):
- # this can shorten the inventory by as much as 25%
- anchor = anchor[:-len(name)] + '$'
- uri = self.get_target_uri(docname)
- if anchor:
- uri += '#' + anchor
- if dispname == name:
- dispname = u'-'
- f.write(compressor.compress(
- (u'%s %s:%s %s %s %s\n' % (name, domainname, type,
- prio, uri, dispname)).encode('utf-8')))
- f.write(compressor.flush())
- self.info('done')
+ # type: () -> None
+ logger.info(bold('dumping object inventory... '), nonl=True)
+ InventoryFile.dump(path.join(self.outdir, INVENTORY_FILENAME), self.env, self)
+ logger.info('done')
def dump_search_index(self):
- self.info(
+ # type: () -> None
+ logger.info(
bold('dumping search index in %s ... ' % self.indexer.label()),
nonl=True)
self.indexer.prune(self.env.all_docs)
@@ -887,13 +935,13 @@ class StandaloneHTMLBuilder(Builder):
# first write to a temporary file, so that if dumping fails,
# the existing index won't be overwritten
if self.indexer_dumps_unicode:
- f = codecs.open(searchindexfn + '.tmp', 'w', encoding='utf-8')
+ f = codecs.open(searchindexfn + '.tmp', 'w', encoding='utf-8') # type: ignore
else:
- f = open(searchindexfn + '.tmp', 'wb')
+ f = open(searchindexfn + '.tmp', 'wb') # type: ignore
with f:
- self.indexer.dump(f, self.indexer_format)
+ self.indexer.dump(f, self.indexer_format) # type: ignore
movefile(searchindexfn + '.tmp', searchindexfn)
- self.info('done')
+ logger.info('done')
class DirectoryHTMLBuilder(StandaloneHTMLBuilder):
@@ -905,6 +953,7 @@ class DirectoryHTMLBuilder(StandaloneHTMLBuilder):
name = 'dirhtml'
def get_target_uri(self, docname, typ=None):
+ # type: (unicode, unicode) -> unicode
if docname == 'index':
return ''
if docname.endswith(SEP + 'index'):
@@ -912,6 +961,7 @@ class DirectoryHTMLBuilder(StandaloneHTMLBuilder):
return docname + SEP
def get_outfilename(self, pagename):
+ # type: (unicode) -> unicode
if pagename == 'index' or pagename.endswith(SEP + 'index'):
outfilename = path.join(self.outdir, os_path(pagename) +
self.out_suffix)
@@ -922,6 +972,7 @@ class DirectoryHTMLBuilder(StandaloneHTMLBuilder):
return outfilename
def prepare_writing(self, docnames):
+ # type: (Iterable[unicode]) -> None
StandaloneHTMLBuilder.prepare_writing(self, docnames)
self.globalcontext['no_search_suffix'] = True
@@ -934,10 +985,12 @@ class SingleFileHTMLBuilder(StandaloneHTMLBuilder):
name = 'singlehtml'
copysource = False
- def get_outdated_docs(self):
+ def get_outdated_docs(self): # type: ignore
+ # type: () -> Union[unicode, List[unicode]]
return 'all documents'
def get_target_uri(self, docname, typ=None):
+ # type: (unicode, unicode) -> unicode
if docname in self.env.all_docs:
# all references are on the same page...
return self.config.master_doc + self.out_suffix + \
@@ -947,10 +1000,12 @@ class SingleFileHTMLBuilder(StandaloneHTMLBuilder):
return docname + self.out_suffix
def get_relative_uri(self, from_, to, typ=None):
+ # type: (unicode, unicode, unicode) -> unicode
# ignore source
return self.get_target_uri(to, typ)
def fix_refuris(self, tree):
+ # type: (nodes.Node) -> None
# fix refuris with double anchor
fname = self.config.master_doc + self.out_suffix
for refnode in tree.traverse(nodes.reference):
@@ -965,13 +1020,15 @@ class SingleFileHTMLBuilder(StandaloneHTMLBuilder):
refnode['refuri'] = fname + refuri[hashindex:]
def _get_local_toctree(self, docname, collapse=True, **kwds):
+ # type: (unicode, bool, Any) -> unicode
if 'includehidden' not in kwds:
kwds['includehidden'] = False
- toctree = self.env.get_toctree_for(docname, self, collapse, **kwds)
+ toctree = TocTree(self.env).get_toctree_for(docname, self, collapse, **kwds)
self.fix_refuris(toctree)
return self.render_partial(toctree)['fragment']
def assemble_doctree(self):
+ # type: () -> nodes.Node
master = self.config.master_doc
tree = self.env.get_doctree(master)
tree = inline_all_toctrees(self, set(), master, tree, darkgreen, [master])
@@ -981,6 +1038,7 @@ class SingleFileHTMLBuilder(StandaloneHTMLBuilder):
return tree
def assemble_toc_secnumbers(self):
+ # type: () -> Dict[unicode, Dict[unicode, Tuple[int, ...]]]
# Assemble toc_secnumbers to resolve section numbers on SingleHTML.
# Merge all secnumbers to single secnumber.
#
@@ -990,14 +1048,16 @@ class SingleFileHTMLBuilder(StandaloneHTMLBuilder):
#
# There are related codes in inline_all_toctres() and
# HTMLTranslter#add_secnumber().
- new_secnumbers = {}
+ new_secnumbers = {} # type: Dict[unicode, Tuple[int, ...]]
for docname, secnums in iteritems(self.env.toc_secnumbers):
for id, secnum in iteritems(secnums):
- new_secnumbers[(docname, id)] = secnum
+ alias = "%s/%s" % (docname, id)
+ new_secnumbers[alias] = secnum
return {self.config.master_doc: new_secnumbers}
def assemble_toc_fignumbers(self):
+ # type: () -> Dict[unicode, Dict[unicode, Dict[unicode, Tuple[int, ...]]]] # NOQA
# Assemble toc_fignumbers to resolve figure numbers on SingleHTML.
# Merge all fignumbers to single fignumber.
#
@@ -1007,19 +1067,22 @@ class SingleFileHTMLBuilder(StandaloneHTMLBuilder):
#
# There are related codes in inline_all_toctres() and
# HTMLTranslter#add_fignumber().
- new_fignumbers = {}
+ new_fignumbers = {} # type: Dict[unicode, Dict[unicode, Tuple[int, ...]]]
# {u'foo': {'figure': {'id2': (2,), 'id1': (1,)}}, u'bar': {'figure': {'id1': (3,)}}}
for docname, fignumlist in iteritems(self.env.toc_fignumbers):
for figtype, fignums in iteritems(fignumlist):
- new_fignumbers.setdefault((docname, figtype), {})
+ alias = "%s/%s" % (docname, figtype)
+ new_fignumbers.setdefault(alias, {})
for id, fignum in iteritems(fignums):
- new_fignumbers[(docname, figtype)][id] = fignum
+ new_fignumbers[alias][id] = fignum
return {self.config.master_doc: new_fignumbers}
def get_doc_context(self, docname, body, metatags):
+ # type: (unicode, unicode, Dict) -> Dict
# no relation links...
- toc = self.env.get_toctree_for(self.config.master_doc, self, False)
+ toc = TocTree(self.env).get_toctree_for(self.config.master_doc,
+ self, False)
# if there is no toctree, toc is None
if toc:
self.fix_refuris(toc)
@@ -1044,25 +1107,27 @@ class SingleFileHTMLBuilder(StandaloneHTMLBuilder):
)
def write(self, *ignored):
+ # type: (Any) -> None
docnames = self.env.all_docs
- self.info(bold('preparing documents... '), nonl=True)
+ logger.info(bold('preparing documents... '), nonl=True)
self.prepare_writing(docnames)
- self.info('done')
+ logger.info('done')
- self.info(bold('assembling single document... '), nonl=True)
+ logger.info(bold('assembling single document... '), nonl=True)
doctree = self.assemble_doctree()
self.env.toc_secnumbers = self.assemble_toc_secnumbers()
self.env.toc_fignumbers = self.assemble_toc_fignumbers()
- self.info()
- self.info(bold('writing... '), nonl=True)
+ logger.info('')
+ logger.info(bold('writing... '), nonl=True)
self.write_doc_serialized(self.config.master_doc, doctree)
self.write_doc(self.config.master_doc, doctree)
- self.info('done')
+ logger.info('done')
def finish(self):
+ # type: () -> None
# no indices or search pages are supported
- self.info(bold('writing additional files...'), nonl=1)
+ logger.info(bold('writing additional files...'), nonl=1)
# additional pages from conf.py
for pagename, template in self.config.html_additional_pages.items():
@@ -1070,11 +1135,11 @@ class SingleFileHTMLBuilder(StandaloneHTMLBuilder):
self.handle_page(pagename, {}, template)
if self.config.html_use_opensearch:
- self.info(' opensearch', nonl=1)
+ logger.info(' opensearch', nonl=1)
fn = path.join(self.outdir, '_static', 'opensearch.xml')
self.handle_page('opensearch', {}, 'opensearch.xml', outfilename=fn)
- self.info()
+ logger.info('')
self.copy_image_files()
self.copy_download_files()
@@ -1091,18 +1156,19 @@ class SerializingHTMLBuilder(StandaloneHTMLBuilder):
#: the serializing implementation to use. Set this to a module that
#: implements a `dump`, `load`, `dumps` and `loads` functions
#: (pickle, simplejson etc.)
- implementation = None
+ implementation = None # type: Any
implementation_dumps_unicode = False
#: additional arguments for dump()
- additional_dump_args = ()
+ additional_dump_args = () # type: Tuple
#: the filename for the global context file
- globalcontext_filename = None
+ globalcontext_filename = None # type: unicode
supported_image_types = ['image/svg+xml', 'image/png',
'image/gif', 'image/jpeg']
def init(self):
+ # type: () -> None
self.config_hash = ''
self.tags_hash = ''
self.imagedir = '_images'
@@ -1115,6 +1181,7 @@ class SerializingHTMLBuilder(StandaloneHTMLBuilder):
self.use_index = self.get_builder_config('use_index', 'html')
def get_target_uri(self, docname, typ=None):
+ # type: (unicode, unicode) -> unicode
if docname == 'index':
return ''
if docname.endswith(SEP + 'index'):
@@ -1122,15 +1189,17 @@ class SerializingHTMLBuilder(StandaloneHTMLBuilder):
return docname + SEP
def dump_context(self, context, filename):
+ # type: (Dict, unicode) -> None
if self.implementation_dumps_unicode:
- f = codecs.open(filename, 'w', encoding='utf-8')
+ f = codecs.open(filename, 'w', encoding='utf-8') # type: ignore
else:
- f = open(filename, 'wb')
+ f = open(filename, 'wb') # type: ignore
with f:
self.implementation.dump(context, f, *self.additional_dump_args)
def handle_page(self, pagename, ctx, templatename='page.html',
outfilename=None, event_arg=None):
+ # type: (unicode, Dict, unicode, unicode, Any) -> None
ctx['current_page_name'] = pagename
self.add_sidebars(pagename, ctx)
@@ -1154,6 +1223,7 @@ class SerializingHTMLBuilder(StandaloneHTMLBuilder):
copyfile(self.env.doc2path(pagename), source_name)
def handle_finish(self):
+ # type: () -> None
# dump the global context
outfilename = path.join(self.outdir, self.globalcontext_filename)
self.dump_context(self.globalcontext, outfilename)
@@ -1204,16 +1274,12 @@ class JSONHTMLBuilder(SerializingHTMLBuilder):
searchindex_filename = 'searchindex.json'
def init(self):
+ # type: () -> None
SerializingHTMLBuilder.init(self)
-def validate_config_values(app):
- if app.config.html_translator_class:
- app.warn('html_translator_class is deprecated. '
- 'Use Sphinx.set_translator() API instead.')
-
-
def setup(app):
+ # type: (Sphinx) -> Dict[unicode, Any]
# builders
app.add_builder(StandaloneHTMLBuilder)
app.add_builder(DirectoryHTMLBuilder)
@@ -1221,8 +1287,6 @@ def setup(app):
app.add_builder(PickleHTMLBuilder)
app.add_builder(JSONHTMLBuilder)
- app.connect('builder-inited', validate_config_values)
-
# config values
app.add_config_value('html_theme', 'alabaster', 'html')
app.add_config_value('html_theme_path', [], 'html')
@@ -1240,7 +1304,6 @@ def setup(app):
app.add_config_value('html_use_smartypants', True, 'html')
app.add_config_value('html_sidebars', {}, 'html')
app.add_config_value('html_additional_pages', {}, 'html')
- app.add_config_value('html_use_modindex', True, 'html') # deprecated
app.add_config_value('html_domain_indices', True, 'html', [list])
app.add_config_value('html_add_permalinks', u'\u00B6', 'html')
app.add_config_value('html_use_index', True, 'html')
@@ -1261,6 +1324,7 @@ def setup(app):
app.add_config_value('html_search_options', {}, 'html')
app.add_config_value('html_search_scorer', '', None)
app.add_config_value('html_scaled_image_link', True, 'html')
+ app.add_config_value('html_experimental_html5_writer', False, 'html')
return {
'version': 'builtin',
diff --git a/sphinx/builders/htmlhelp.py b/sphinx/builders/htmlhelp.py
index e192a4eaf..764b01aae 100644
--- a/sphinx/builders/htmlhelp.py
+++ b/sphinx/builders/htmlhelp.py
@@ -18,10 +18,20 @@ from os import path
from docutils import nodes
from sphinx import addnodes
-from sphinx.util.osutil import make_filename
from sphinx.builders.html import StandaloneHTMLBuilder
+from sphinx.environment.adapters.indexentries import IndexEntries
+from sphinx.util import logging
+from sphinx.util.osutil import make_filename
from sphinx.util.pycompat import htmlescape
+if False:
+ # For type annotation
+ from typing import Any, Dict, IO, List, Tuple # NOQA
+ from sphinx.application import Sphinx # NOQA
+
+
+logger = logging.getLogger(__name__)
+
# Project file (*.hhp) template. 'outname' is the file basename (like
# the pythlp in pythlp.hhp); 'version' is the doc version number (like
@@ -181,6 +191,7 @@ class HTMLHelpBuilder(StandaloneHTMLBuilder):
encoding = 'cp1252'
def init(self):
+ # type: () -> None
StandaloneHTMLBuilder.init(self)
# the output files for HTML help must be .html only
self.out_suffix = '.html'
@@ -191,14 +202,21 @@ class HTMLHelpBuilder(StandaloneHTMLBuilder):
self.lcid, self.encoding = locale
def open_file(self, outdir, basename, mode='w'):
+ # type: (unicode, unicode, unicode) -> IO
# open a file with the correct encoding for the selected language
- return codecs.open(path.join(outdir, basename), mode,
+ return codecs.open(path.join(outdir, basename), mode, # type: ignore
self.encoding, 'xmlcharrefreplace')
+ def update_page_context(self, pagename, templatename, ctx, event_arg):
+ # type: (unicode, unicode, Dict, unicode) -> None
+ ctx['encoding'] = self.encoding
+
def handle_finish(self):
+ # type: () -> None
self.build_hhx(self.outdir, self.config.htmlhelp_basename)
def write_doc(self, docname, doctree):
+ # type: (unicode, nodes.Node) -> None
for node in doctree.traverse(nodes.reference):
# add ``target=_blank`` attributes to external links
if node.get('internal') is None and 'refuri' in node:
@@ -207,12 +225,13 @@ class HTMLHelpBuilder(StandaloneHTMLBuilder):
StandaloneHTMLBuilder.write_doc(self, docname, doctree)
def build_hhx(self, outdir, outname):
- self.info('dumping stopword list...')
+ # type: (unicode, unicode) -> None
+ logger.info('dumping stopword list...')
with self.open_file(outdir, outname + '.stp') as f:
for word in sorted(stopwords):
print(word, file=f)
- self.info('writing project file...')
+ logger.info('writing project file...')
with self.open_file(outdir, outname + '.hhp') as f:
f.write(project_template % {
'outname': outname,
@@ -233,7 +252,7 @@ class HTMLHelpBuilder(StandaloneHTMLBuilder):
print(path.join(root, fn)[olen:].replace(os.sep, '\\'),
file=f)
- self.info('writing TOC file...')
+ logger.info('writing TOC file...')
with self.open_file(outdir, outname + '.hhc') as f:
f.write(contents_header)
# special books
@@ -247,6 +266,7 @@ class HTMLHelpBuilder(StandaloneHTMLBuilder):
self.config.master_doc, self, prune_toctrees=False)
def write_toc(node, ullevel=0):
+ # type: (nodes.Node, int) -> None
if isinstance(node, nodes.list_item):
f.write('<LI> ')
for subnode in node:
@@ -267,19 +287,22 @@ class HTMLHelpBuilder(StandaloneHTMLBuilder):
write_toc(subnode, ullevel)
def istoctree(node):
+ # type: (nodes.Node) -> bool
return isinstance(node, addnodes.compact_paragraph) and \
'toctree' in node
for node in tocdoc.traverse(istoctree):
write_toc(node)
f.write(contents_footer)
- self.info('writing index file...')
- index = self.env.create_index(self)
+ logger.info('writing index file...')
+ index = IndexEntries(self.env).create_index(self)
with self.open_file(outdir, outname + '.hhk') as f:
f.write('<UL>\n')
def write_index(title, refs, subitems):
+ # type: (unicode, List[Tuple[unicode, unicode]], List[Tuple[unicode, List[Tuple[unicode, unicode]]]]) -> None # NOQA
def write_param(name, value):
+ # type: (unicode, unicode) -> None
item = ' <param name="%s" value="%s">\n' % \
(name, value)
f.write(item)
@@ -308,6 +331,7 @@ class HTMLHelpBuilder(StandaloneHTMLBuilder):
def setup(app):
+ # type: (Sphinx) -> Dict[unicode, Any]
app.setup_extension('sphinx.builders.html')
app.add_builder(HTMLHelpBuilder)
diff --git a/sphinx/builders/latex.py b/sphinx/builders/latex.py
index 13e379bab..a57105c08 100644
--- a/sphinx/builders/latex.py
+++ b/sphinx/builders/latex.py
@@ -10,16 +10,19 @@
"""
import os
+import warnings
from os import path
from six import iteritems
+
from docutils import nodes
from docutils.io import FileOutput
from docutils.utils import new_document
from docutils.frontend import OptionParser
from sphinx import package_dir, addnodes, highlighting
-from sphinx.util import texescape
+from sphinx.deprecation import RemovedInSphinx17Warning
+from sphinx.util import texescape, logging
from sphinx.config import string_classes, ENUM
from sphinx.errors import SphinxError
from sphinx.locale import _
@@ -28,9 +31,18 @@ from sphinx.environment import NoUri
from sphinx.util.nodes import inline_all_toctrees
from sphinx.util.fileutil import copy_asset_file
from sphinx.util.osutil import SEP, make_filename
-from sphinx.util.console import bold, darkgreen
+from sphinx.util.console import bold, darkgreen # type: ignore
from sphinx.writers.latex import LaTeXWriter
+if False:
+ # For type annotation
+ from typing import Any, Dict, Iterable, List, Tuple, Union # NOQA
+ from sphinx.application import Sphinx # NOQA
+ from sphinx.config import Config # NOQA
+
+
+logger = logging.getLogger(__name__)
+
class LaTeXBuilder(Builder):
"""
@@ -41,44 +53,50 @@ class LaTeXBuilder(Builder):
supported_image_types = ['application/pdf', 'image/png', 'image/jpeg']
def init(self):
- self.docnames = []
- self.document_data = []
- self.usepackages = []
+ # type: () -> None
+ self.docnames = [] # type: Iterable[unicode]
+ self.document_data = [] # type: List[Tuple[unicode, unicode, unicode, unicode, unicode, bool]] # NOQA
+ self.usepackages = [] # type: List[unicode]
texescape.init()
def get_outdated_docs(self):
+ # type: () -> Union[unicode, List[unicode]]
return 'all documents' # for now
def get_target_uri(self, docname, typ=None):
+ # type: (unicode, unicode) -> unicode
if docname not in self.docnames:
raise NoUri
else:
return '%' + docname
def get_relative_uri(self, from_, to, typ=None):
+ # type: (unicode, unicode, unicode) -> unicode
# ignore source path
return self.get_target_uri(to, typ)
def init_document_data(self):
+ # type: () -> None
preliminary_document_data = [list(x) for x in self.config.latex_documents]
if not preliminary_document_data:
- self.warn('no "latex_documents" config value found; no documents '
- 'will be written')
+ logger.warning('no "latex_documents" config value found; no documents '
+ 'will be written')
return
# assign subdirs to titles
- self.titles = []
+ self.titles = [] # type: List[Tuple[unicode, unicode]]
for entry in preliminary_document_data:
docname = entry[0]
if docname not in self.env.all_docs:
- self.warn('"latex_documents" config value references unknown '
- 'document %s' % docname)
+ logger.warning('"latex_documents" config value references unknown '
+ 'document %s', docname)
continue
- self.document_data.append(entry)
+ self.document_data.append(entry) # type: ignore
if docname.endswith(SEP + 'index'):
docname = docname[:-5]
self.titles.append((docname, entry[2]))
def write_stylesheet(self):
+ # type: () -> None
highlighter = highlighting.PygmentsBridge(
'latex', self.config.pygments_style, self.config.trim_doctest_flags)
stylesheet = path.join(self.outdir, 'sphinxhighlight.sty')
@@ -86,9 +104,10 @@ class LaTeXBuilder(Builder):
f.write('\\NeedsTeXFormat{LaTeX2e}[1995/12/01]\n')
f.write('\\ProvidesPackage{sphinxhighlight}'
'[2016/05/29 stylesheet for highlighting with pygments]\n\n')
- f.write(highlighter.get_stylesheet())
+ f.write(highlighter.get_stylesheet()) # type: ignore
def write(self, *ignored):
+ # type: (Any) -> None
docwriter = LaTeXWriter(self)
docsettings = OptionParser(
defaults=self.env.settings,
@@ -106,7 +125,7 @@ class LaTeXBuilder(Builder):
destination = FileOutput(
destination_path=path.join(self.outdir, targetname),
encoding='utf-8')
- self.info("processing " + targetname + "... ", nonl=1)
+ logger.info("processing %s...", targetname, nonl=1)
toctrees = self.env.get_doctree(docname).traverse(addnodes.toctree)
if toctrees:
if toctrees[0].get('maxdepth') > 0:
@@ -120,7 +139,7 @@ class LaTeXBuilder(Builder):
appendices=((docclass != 'howto') and self.config.latex_appendices or []))
doctree['tocdepth'] = tocdepth
self.post_process_images(doctree)
- self.info("writing... ", nonl=1)
+ logger.info("writing... ", nonl=1)
doctree.settings = docsettings
doctree.settings.author = author
doctree.settings.title = title
@@ -128,9 +147,10 @@ class LaTeXBuilder(Builder):
doctree.settings.docname = docname
doctree.settings.docclass = docclass
docwriter.write(doctree, destination)
- self.info("done")
+ logger.info("done")
def get_contentsname(self, indexfile):
+ # type: (unicode) -> unicode
tree = self.env.get_doctree(indexfile)
contentsname = None
for toctree in tree.traverse(addnodes.toctree):
@@ -141,8 +161,9 @@ class LaTeXBuilder(Builder):
return contentsname
def assemble_doctree(self, indexfile, toctree_only, appendices):
+ # type: (unicode, bool, List[unicode]) -> nodes.Node
self.docnames = set([indexfile] + appendices)
- self.info(darkgreen(indexfile) + " ", nonl=1)
+ logger.info(darkgreen(indexfile) + " ", nonl=1)
tree = self.env.get_doctree(indexfile)
tree['docname'] = indexfile
if toctree_only:
@@ -163,8 +184,8 @@ class LaTeXBuilder(Builder):
appendix = self.env.get_doctree(docname)
appendix['docname'] = docname
largetree.append(appendix)
- self.info()
- self.info("resolving references...")
+ logger.info('')
+ logger.info("resolving references...")
self.env.resolve_references(largetree, indexfile, self)
# resolve :ref:s to distant tex files -- we can't add a cross-reference,
# but append the document name
@@ -184,18 +205,19 @@ class LaTeXBuilder(Builder):
return largetree
def finish(self):
+ # type: () -> None
# copy image files
if self.images:
- self.info(bold('copying images...'), nonl=1)
+ logger.info(bold('copying images...'), nonl=1)
for src, dest in iteritems(self.images):
- self.info(' ' + src, nonl=1)
+ logger.info(' ' + src, nonl=1)
copy_asset_file(path.join(self.srcdir, src),
path.join(self.outdir, dest))
- self.info()
+ logger.info('')
# copy TeX support files from texinputs
context = {'latex_engine': self.config.latex_engine}
- self.info(bold('copying TeX support files...'))
+ logger.info(bold('copying TeX support files...'))
staticdirname = path.join(package_dir, 'texinputs')
for filename in os.listdir(staticdirname):
if not filename.startswith('.'):
@@ -204,11 +226,11 @@ class LaTeXBuilder(Builder):
# copy additional files
if self.config.latex_additional_files:
- self.info(bold('copying additional files...'), nonl=1)
+ logger.info(bold('copying additional files...'), nonl=1)
for filename in self.config.latex_additional_files:
- self.info(' ' + filename, nonl=1)
+ logger.info(' ' + filename, nonl=1)
copy_asset_file(path.join(self.confdir, filename), self.outdir)
- self.info()
+ logger.info('')
# the logo is handled differently
if self.config.latex_logo:
@@ -216,59 +238,34 @@ class LaTeXBuilder(Builder):
raise SphinxError('logo file %r does not exist' % self.config.latex_logo)
else:
copy_asset_file(path.join(self.confdir, self.config.latex_logo), self.outdir)
- self.info('done')
+ logger.info('done')
def validate_config_values(app):
+ # type: (Sphinx) -> None
if app.config.latex_toplevel_sectioning not in (None, 'part', 'chapter', 'section'):
- app.warn('invalid latex_toplevel_sectioning, ignored: %s' %
- app.config.latex_toplevel_sectioning)
- app.config.latex_toplevel_sectioning = None
-
- if app.config.latex_use_parts:
- if app.config.latex_toplevel_sectioning:
- app.warn('latex_use_parts conflicts with latex_toplevel_sectioning, ignored.')
- else:
- app.warn('latex_use_parts is deprecated. Use latex_toplevel_sectioning instead.')
- app.config.latex_toplevel_sectioning = 'part'
-
- if app.config.latex_use_modindex is not True: # changed by user
- app.warn('latex_use_modindex is deprecated. Use latex_domain_indices instead.')
-
- if app.config.latex_preamble:
- if app.config.latex_elements.get('preamble'):
- app.warn("latex_preamble conflicts with latex_elements['preamble'], ignored.")
- else:
- app.warn("latex_preamble is deprecated. Use latex_elements['preamble'] instead.")
- app.config.latex_elements['preamble'] = app.config.latex_preamble
-
- if app.config.latex_paper_size != 'letter':
- if app.config.latex_elements.get('papersize'):
- app.warn("latex_paper_size conflicts with latex_elements['papersize'], ignored.")
- else:
- app.warn("latex_paper_size is deprecated. "
- "Use latex_elements['papersize'] instead.")
- if app.config.latex_paper_size:
- app.config.latex_elements['papersize'] = app.config.latex_paper_size + 'paper'
-
- if app.config.latex_font_size != '10pt':
- if app.config.latex_elements.get('pointsize'):
- app.warn("latex_font_size conflicts with latex_elements['pointsize'], ignored.")
- else:
- app.warn("latex_font_size is deprecated. Use latex_elements['pointsize'] instead.")
- app.config.latex_elements['pointsize'] = app.config.latex_font_size
+ logger.warning('invalid latex_toplevel_sectioning, ignored: %s',
+ app.config.latex_toplevel_sectioning)
+ app.config.latex_toplevel_sectioning = None # type: ignore
if 'footer' in app.config.latex_elements:
if 'postamble' in app.config.latex_elements:
- app.warn("latex_elements['footer'] conflicts with "
- "latex_elements['postamble'], ignored.")
+ logger.warning("latex_elements['footer'] conflicts with "
+ "latex_elements['postamble'], ignored.")
else:
- app.warn("latex_elements['footer'] is deprecated. "
- "Use latex_elements['preamble'] instead.")
+ warnings.warn("latex_elements['footer'] is deprecated. "
+ "Use latex_elements['preamble'] instead.",
+ RemovedInSphinx17Warning)
app.config.latex_elements['postamble'] = app.config.latex_elements['footer']
+ if app.config.latex_keep_old_macro_names:
+ warnings.warn("latex_keep_old_macro_names is deprecated. "
+ "LaTeX markup since Sphinx 1.4.5 uses only prefixed macro names.",
+ RemovedInSphinx17Warning)
+
def default_latex_engine(config):
+ # type: (Config) -> unicode
""" Better default latex_engine settings for specific languages. """
if config.language == 'ja':
return 'platex'
@@ -277,6 +274,7 @@ def default_latex_engine(config):
def default_latex_docclass(config):
+ # type: (Config) -> Dict[unicode, unicode]
""" Better default latex_docclass settings for specific languages. """
if config.language == 'ja':
return {'manual': 'jsbook',
@@ -286,6 +284,7 @@ def default_latex_docclass(config):
def setup(app):
+ # type: (Sphinx) -> Dict[unicode, Any]
app.add_builder(LaTeXBuilder)
app.connect('builder-inited', validate_config_values)
@@ -297,24 +296,16 @@ def setup(app):
None)
app.add_config_value('latex_logo', None, None, string_classes)
app.add_config_value('latex_appendices', [], None)
- app.add_config_value('latex_keep_old_macro_names', True, None)
- # now deprecated - use latex_toplevel_sectioning
- app.add_config_value('latex_use_parts', False, None)
+ app.add_config_value('latex_keep_old_macro_names', False, None)
+ app.add_config_value('latex_use_latex_multicolumn', False, None)
app.add_config_value('latex_toplevel_sectioning', None, None, [str])
- app.add_config_value('latex_use_modindex', True, None) # deprecated
app.add_config_value('latex_domain_indices', True, None, [list])
app.add_config_value('latex_show_urls', 'no', None)
app.add_config_value('latex_show_pagerefs', False, None)
- # paper_size and font_size are still separate values
- # so that you can give them easily on the command line
- app.add_config_value('latex_paper_size', 'letter', None)
- app.add_config_value('latex_font_size', '10pt', None)
app.add_config_value('latex_elements', {}, None)
app.add_config_value('latex_additional_files', [], None)
app.add_config_value('latex_docclass', default_latex_docclass, None)
- # now deprecated - use latex_elements
- app.add_config_value('latex_preamble', '', None)
return {
'version': 'builtin',
diff --git a/sphinx/builders/linkcheck.py b/sphinx/builders/linkcheck.py
index 4035e6fe1..722041d5b 100644
--- a/sphinx/builders/linkcheck.py
+++ b/sphinx/builders/linkcheck.py
@@ -16,7 +16,7 @@ import threading
from os import path
from requests.exceptions import HTTPError
-from six.moves import queue, html_parser
+from six.moves import queue, html_parser # type: ignore
from six.moves.urllib.parse import unquote
from docutils import nodes
@@ -25,22 +25,33 @@ from docutils import nodes
# going to just remove it. If it doesn't exist, define an exception that will
# never be caught but leaves the code in check_anchor() intact.
try:
- from six.moves.html_parser import HTMLParseError
+ from six.moves.html_parser import HTMLParseError # type: ignore
except ImportError:
- class HTMLParseError(Exception):
+ class HTMLParseError(Exception): # type: ignore
pass
from sphinx.builders import Builder
-from sphinx.util import encode_uri, requests
-from sphinx.util.console import purple, red, darkgreen, darkgray, \
- darkred, turquoise
+from sphinx.util import encode_uri, requests, logging
+from sphinx.util.console import ( # type: ignore
+ purple, red, darkgreen, darkgray, darkred, turquoise
+)
from sphinx.util.requests import is_ssl_error
+if False:
+ # For type annotation
+ from typing import Any, Dict, List, Set, Tuple, Union # NOQA
+ from sphinx.application import Sphinx # NOQA
+ from sphinx.util.requests.requests import Response # NOQA
+
+
+logger = logging.getLogger(__name__)
+
class AnchorCheckParser(html_parser.HTMLParser):
"""Specialized HTML parser that looks for a specific anchor."""
def __init__(self, search_anchor):
+ # type: (unicode) -> None
html_parser.HTMLParser.__init__(self)
self.search_anchor = search_anchor
@@ -54,6 +65,7 @@ class AnchorCheckParser(html_parser.HTMLParser):
def check_anchor(response, anchor):
+ # type: (Response, unicode) -> bool
"""Reads HTML data from a response object `response` searching for `anchor`.
Returns True if anchor was found, False otherwise.
"""
@@ -80,12 +92,13 @@ class CheckExternalLinksBuilder(Builder):
name = 'linkcheck'
def init(self):
+ # type: () -> None
self.to_ignore = [re.compile(x) for x in self.app.config.linkcheck_ignore]
self.anchors_ignore = [re.compile(x)
for x in self.app.config.linkcheck_anchors_ignore]
- self.good = set()
- self.broken = {}
- self.redirected = {}
+ self.good = set() # type: Set[unicode]
+ self.broken = {} # type: Dict[unicode, unicode]
+ self.redirected = {} # type: Dict[unicode, Tuple[unicode, int]]
# set a timeout for non-responding servers
socket.setdefaulttimeout(5.0)
# create output file
@@ -94,7 +107,7 @@ class CheckExternalLinksBuilder(Builder):
# create queues and worker threads
self.wqueue = queue.Queue()
self.rqueue = queue.Queue()
- self.workers = []
+ self.workers = [] # type: List[threading.Thread]
for i in range(self.app.config.linkcheck_workers):
thread = threading.Thread(target=self.check_thread)
thread.setDaemon(True)
@@ -102,6 +115,7 @@ class CheckExternalLinksBuilder(Builder):
self.workers.append(thread)
def check_thread(self):
+ # type: () -> None
kwargs = {}
if self.app.config.linkcheck_timeout:
kwargs['timeout'] = self.app.config.linkcheck_timeout
@@ -109,6 +123,7 @@ class CheckExternalLinksBuilder(Builder):
kwargs['allow_redirects'] = True
def check_uri():
+ # type: () -> Tuple[unicode, unicode, int]
# split off anchor
if '#' in uri:
req_url, anchor = uri.split('#', 1)
@@ -172,6 +187,7 @@ class CheckExternalLinksBuilder(Builder):
return 'redirected', new_url, 0
def check():
+ # type: () -> Tuple[unicode, unicode, int]
# check for various conditions without bothering the network
if len(uri) == 0 or uri.startswith(('#', 'mailto:', 'ftp:')):
return 'unchecked', '', 0
@@ -210,30 +226,31 @@ class CheckExternalLinksBuilder(Builder):
self.rqueue.put((uri, docname, lineno, status, info, code))
def process_result(self, result):
+ # type: (Tuple[unicode, unicode, int, unicode, unicode, int]) -> None
uri, docname, lineno, status, info, code = result
if status == 'unchecked':
return
if status == 'working' and info == 'old':
return
if lineno:
- self.info('(line %4d) ' % lineno, nonl=1)
+ logger.info('(line %4d) ', lineno, nonl=1)
if status == 'ignored':
if info:
- self.info(darkgray('-ignored- ') + uri + ': ' + info)
+ logger.info(darkgray('-ignored- ') + uri + ': ' + info)
else:
- self.info(darkgray('-ignored- ') + uri)
+ logger.info(darkgray('-ignored- ') + uri)
elif status == 'local':
- self.info(darkgray('-local- ') + uri)
+ logger.info(darkgray('-local- ') + uri)
self.write_entry('local', docname, lineno, uri)
elif status == 'working':
- self.info(darkgreen('ok ') + uri + info)
+ logger.info(darkgreen('ok ') + uri + info)
elif status == 'broken':
self.write_entry('broken', docname, lineno, uri + ': ' + info)
if self.app.quiet or self.app.warningiserror:
- self.warn('broken link: %s (%s)' % (uri, info),
- '%s:%s' % (self.env.doc2path(docname), lineno))
+ logger.warning('broken link: %s (%s)', uri, info,
+ location=(self.env.doc2path(docname), lineno))
else:
- self.info(red('broken ') + uri + red(' - ' + info))
+ logger.info(red('broken ') + uri + red(' - ' + info))
elif status == 'redirected':
text, color = {
301: ('permanently', darkred),
@@ -244,19 +261,23 @@ class CheckExternalLinksBuilder(Builder):
}[code]
self.write_entry('redirected ' + text, docname, lineno,
uri + ' to ' + info)
- self.info(color('redirect ') + uri + color(' - ' + text + ' to ' + info))
+ logger.info(color('redirect ') + uri + color(' - ' + text + ' to ' + info))
def get_target_uri(self, docname, typ=None):
+ # type: (unicode, unicode) -> unicode
return ''
def get_outdated_docs(self):
+ # type: () -> Set[unicode]
return self.env.found_docs
def prepare_writing(self, docnames):
+ # type: (nodes.Node) -> None
return
def write_doc(self, docname, doctree):
- self.info()
+ # type: (unicode, nodes.Node) -> None
+ logger.info('')
n = 0
for node in doctree.traverse(nodes.reference):
if 'refuri' not in node:
@@ -279,17 +300,19 @@ class CheckExternalLinksBuilder(Builder):
self.app.statuscode = 1
def write_entry(self, what, docname, line, uri):
- output = codecs.open(path.join(self.outdir, 'output.txt'), 'a', 'utf-8')
- output.write("%s:%s: [%s] %s\n" % (self.env.doc2path(docname, None),
- line, what, uri))
- output.close()
+ # type: (unicode, unicode, int, unicode) -> None
+ with codecs.open(path.join(self.outdir, 'output.txt'), 'a', 'utf-8') as output: # type: ignore # NOQA
+ output.write("%s:%s: [%s] %s\n" % (self.env.doc2path(docname, None),
+ line, what, uri))
def finish(self):
+ # type: () -> None
for worker in self.workers:
self.wqueue.put((None, None, None), False)
def setup(app):
+ # type: (Sphinx) -> Dict[unicode, Any]
app.add_builder(CheckExternalLinksBuilder)
app.add_config_value('linkcheck_ignore', [], None)
diff --git a/sphinx/builders/manpage.py b/sphinx/builders/manpage.py
index 83c179431..7ee2957be 100644
--- a/sphinx/builders/manpage.py
+++ b/sphinx/builders/manpage.py
@@ -12,17 +12,27 @@
from os import path
from six import string_types
+
from docutils.io import FileOutput
from docutils.frontend import OptionParser
from sphinx import addnodes
from sphinx.builders import Builder
from sphinx.environment import NoUri
+from sphinx.util import logging
from sphinx.util.nodes import inline_all_toctrees
from sphinx.util.osutil import make_filename
-from sphinx.util.console import bold, darkgreen
+from sphinx.util.console import bold, darkgreen # type: ignore
from sphinx.writers.manpage import ManualPageWriter
+if False:
+ # For type annotation
+ from typing import Any, Dict, List, Set, Union # NOQA
+ from sphinx.application import Sphinx # NOQA
+
+
+logger = logging.getLogger(__name__)
+
class ManualPageBuilder(Builder):
"""
@@ -30,29 +40,33 @@ class ManualPageBuilder(Builder):
"""
name = 'man'
format = 'man'
- supported_image_types = []
+ supported_image_types = [] # type: List[unicode]
def init(self):
+ # type: () -> None
if not self.config.man_pages:
- self.warn('no "man_pages" config value found; no manual pages '
- 'will be written')
+ logger.warning('no "man_pages" config value found; no manual pages '
+ 'will be written')
def get_outdated_docs(self):
+ # type: () -> Union[unicode, List[unicode]]
return 'all manpages' # for now
def get_target_uri(self, docname, typ=None):
+ # type: (unicode, unicode) -> unicode
if typ == 'token':
return ''
raise NoUri
def write(self, *ignored):
+ # type: (Any) -> None
docwriter = ManualPageWriter(self)
docsettings = OptionParser(
defaults=self.env.settings,
components=(docwriter,),
read_config_files=True).get_default_values()
- self.info(bold('writing... '), nonl=True)
+ logger.info(bold('writing... '), nonl=True)
for info in self.config.man_pages:
docname, name, description, authors, section = info
@@ -63,16 +77,16 @@ class ManualPageBuilder(Builder):
authors = []
targetname = '%s.%s' % (name, section)
- self.info(darkgreen(targetname) + ' { ', nonl=True)
+ logger.info(darkgreen(targetname) + ' { ', nonl=True)
destination = FileOutput(
destination_path=path.join(self.outdir, targetname),
encoding='utf-8')
tree = self.env.get_doctree(docname)
- docnames = set()
+ docnames = set() # type: Set[unicode]
largetree = inline_all_toctrees(self, docnames, docname, tree,
darkgreen, [docname])
- self.info('} ', nonl=True)
+ logger.info('} ', nonl=True)
self.env.resolve_references(largetree, docname, self)
# remove pending_xref nodes
for pendingnode in largetree.traverse(addnodes.pending_xref):
@@ -85,13 +99,15 @@ class ManualPageBuilder(Builder):
largetree.settings.section = section
docwriter.write(largetree, destination)
- self.info()
+ logger.info('')
def finish(self):
+ # type: () -> None
pass
def setup(app):
+ # type: (Sphinx) -> Dict[unicode, Any]
app.add_builder(ManualPageBuilder)
app.add_config_value('man_pages',
diff --git a/sphinx/builders/qthelp.py b/sphinx/builders/qthelp.py
index ae2da030e..ac3117733 100644
--- a/sphinx/builders/qthelp.py
+++ b/sphinx/builders/qthelp.py
@@ -16,14 +16,24 @@ import posixpath
from os import path
from six import text_type
+
from docutils import nodes
from sphinx import addnodes
from sphinx.builders.html import StandaloneHTMLBuilder
-from sphinx.util import force_decode
+from sphinx.environment.adapters.indexentries import IndexEntries
+from sphinx.util import force_decode, logging
from sphinx.util.osutil import make_filename
from sphinx.util.pycompat import htmlescape
+if False:
+ # For type annotation
+ from typing import Any, Dict, List, Tuple # NOQA
+ from sphinx.application import Sphinx # NOQA
+
+
+logger = logging.getLogger(__name__)
+
_idpattern = re.compile(
r'(?P<title>.+) (\((class in )?(?P<id>[\w\.]+)( (?P<descr>\w+))?\))$')
@@ -115,6 +125,7 @@ class QtHelpBuilder(StandaloneHTMLBuilder):
search = False
def init(self):
+ # type: () -> None
StandaloneHTMLBuilder.init(self)
# the output files for HTML help must be .html only
self.out_suffix = '.html'
@@ -122,19 +133,23 @@ class QtHelpBuilder(StandaloneHTMLBuilder):
# self.config.html_style = 'traditional.css'
def get_theme_config(self):
+ # type: () -> Tuple[unicode, Dict]
return self.config.qthelp_theme, self.config.qthelp_theme_options
def handle_finish(self):
+ # type: () -> None
self.build_qhp(self.outdir, self.config.qthelp_basename)
def build_qhp(self, outdir, outname):
- self.info('writing project file...')
+ # type: (unicode, unicode) -> None
+ logger.info('writing project file...')
# sections
tocdoc = self.env.get_and_resolve_doctree(self.config.master_doc, self,
prune_toctrees=False)
def istoctree(node):
+ # type: (nodes.Node) -> bool
return isinstance(node, addnodes.compact_paragraph) and \
'toctree' in node
sections = []
@@ -153,15 +168,15 @@ class QtHelpBuilder(StandaloneHTMLBuilder):
new_sections.append(force_decode(section, None))
else:
new_sections.append(section)
- sections = u'\n'.join(new_sections)
+ sections = u'\n'.join(new_sections) # type: ignore
# keywords
keywords = []
- index = self.env.create_index(self, group_entries=False)
+ index = IndexEntries(self.env).create_index(self, group_entries=False)
for (key, group) in index:
for title, (refs, subitems, key_) in group:
keywords.extend(self.build_keywords(title, refs, subitems))
- keywords = u'\n'.join(keywords)
+ keywords = u'\n'.join(keywords) # type: ignore
# files
if not outdir.endswith(os.sep):
@@ -179,7 +194,7 @@ class QtHelpBuilder(StandaloneHTMLBuilder):
filename = path.join(root, fn)[olen:]
projectfiles.append(file_template %
{'filename': htmlescape(filename)})
- projectfiles = '\n'.join(projectfiles)
+ projectfiles = '\n'.join(projectfiles) # type: ignore
# it seems that the "namespace" may not contain non-alphanumeric
# characters, and more than one successive dot, or leading/trailing
@@ -190,8 +205,8 @@ class QtHelpBuilder(StandaloneHTMLBuilder):
nspace = nspace.lower()
# write the project file
- with codecs.open(path.join(outdir, outname + '.qhp'), 'w', 'utf-8') as f:
- f.write(project_template % {
+ with codecs.open(path.join(outdir, outname + '.qhp'), 'w', 'utf-8') as f: # type: ignore # NOQA
+ f.write(project_template % { # type: ignore
'outname': htmlescape(outname),
'title': htmlescape(self.config.html_title),
'version': htmlescape(self.config.version),
@@ -206,15 +221,16 @@ class QtHelpBuilder(StandaloneHTMLBuilder):
nspace, 'doc', self.get_target_uri(self.config.master_doc))
startpage = 'qthelp://' + posixpath.join(nspace, 'doc', 'index.html')
- self.info('writing collection project file...')
- with codecs.open(path.join(outdir, outname + '.qhcp'), 'w', 'utf-8') as f:
- f.write(collection_template % {
+ logger.info('writing collection project file...')
+ with codecs.open(path.join(outdir, outname + '.qhcp'), 'w', 'utf-8') as f: # type: ignore # NOQA
+ f.write(collection_template % { # type: ignore
'outname': htmlescape(outname),
'title': htmlescape(self.config.html_short_title),
'homepage': htmlescape(homepage),
'startpage': htmlescape(startpage)})
def isdocnode(self, node):
+ # type: (nodes.Node) -> bool
if not isinstance(node, nodes.list_item):
return False
if len(node.children) != 2:
@@ -228,8 +244,9 @@ class QtHelpBuilder(StandaloneHTMLBuilder):
return True
def write_toc(self, node, indentlevel=4):
+ # type: (nodes.Node, int) -> List[unicode]
# XXX this should return a Unicode string, not a bytestring
- parts = []
+ parts = [] # type: List[unicode]
if self.isdocnode(node):
refnode = node.children[0][0]
link = refnode['refuri']
@@ -247,7 +264,7 @@ class QtHelpBuilder(StandaloneHTMLBuilder):
link = node['refuri']
title = htmlescape(node.astext()).replace('"', '&quot;')
item = section_template % {'title': title, 'ref': link}
- item = u' ' * 4 * indentlevel + item
+ item = u' ' * 4 * indentlevel + item # type: ignore
parts.append(item.encode('ascii', 'xmlcharrefreplace'))
elif isinstance(node, nodes.bullet_list):
for subnode in node:
@@ -259,7 +276,8 @@ class QtHelpBuilder(StandaloneHTMLBuilder):
return parts
def keyword_item(self, name, ref):
- matchobj = _idpattern.match(name)
+ # type: (unicode, Any) -> unicode
+ matchobj = _idpattern.match(name) # type: ignore
if matchobj:
groupdict = matchobj.groupdict()
shortname = groupdict['title']
@@ -280,7 +298,8 @@ class QtHelpBuilder(StandaloneHTMLBuilder):
return item
def build_keywords(self, title, refs, subitems):
- keywords = []
+ # type: (unicode, List[Any], Any) -> List[unicode]
+ keywords = [] # type: List[unicode]
title = htmlescape(title)
# if len(refs) == 0: # XXX
@@ -304,6 +323,7 @@ class QtHelpBuilder(StandaloneHTMLBuilder):
def setup(app):
+ # type: (Sphinx) -> Dict[unicode, Any]
app.setup_extension('sphinx.builders.html')
app.add_builder(QtHelpBuilder)
diff --git a/sphinx/builders/texinfo.py b/sphinx/builders/texinfo.py
index f0216a32f..823290255 100644
--- a/sphinx/builders/texinfo.py
+++ b/sphinx/builders/texinfo.py
@@ -12,6 +12,7 @@
from os import path
from six import iteritems
+
from docutils import nodes
from docutils.io import FileOutput
from docutils.utils import new_document
@@ -21,11 +22,19 @@ from sphinx import addnodes
from sphinx.locale import _
from sphinx.builders import Builder
from sphinx.environment import NoUri
+from sphinx.util import logging
from sphinx.util.nodes import inline_all_toctrees
from sphinx.util.osutil import SEP, copyfile, make_filename
-from sphinx.util.console import bold, darkgreen
+from sphinx.util.console import bold, darkgreen # type: ignore
from sphinx.writers.texinfo import TexinfoWriter
+if False:
+ # For type annotation
+ from sphinx.application import Sphinx # NOQA
+ from typing import Any, Dict, Iterable, List, Tuple, Union # NOQA
+
+
+logger = logging.getLogger(__name__)
TEXINFO_MAKEFILE = '''\
# Makefile for Sphinx Texinfo output
@@ -91,47 +100,53 @@ class TexinfoBuilder(Builder):
'image/gif']
def init(self):
- self.docnames = []
- self.document_data = []
+ # type: () -> None
+ self.docnames = [] # type: Iterable[unicode]
+ self.document_data = [] # type: List[Tuple[unicode, unicode, unicode, unicode, unicode, unicode, unicode, bool]] # NOQA
def get_outdated_docs(self):
+ # type: () -> Union[unicode, List[unicode]]
return 'all documents' # for now
def get_target_uri(self, docname, typ=None):
+ # type: (unicode, unicode) -> unicode
if docname not in self.docnames:
raise NoUri
else:
return '%' + docname
def get_relative_uri(self, from_, to, typ=None):
+ # type: (unicode, unicode, unicode) -> unicode
# ignore source path
return self.get_target_uri(to, typ)
def init_document_data(self):
+ # type: () -> None
preliminary_document_data = [list(x) for x in self.config.texinfo_documents]
if not preliminary_document_data:
- self.warn('no "texinfo_documents" config value found; no documents '
- 'will be written')
+ logger.warning('no "texinfo_documents" config value found; no documents '
+ 'will be written')
return
# assign subdirs to titles
- self.titles = []
+ self.titles = [] # type: List[Tuple[unicode, unicode]]
for entry in preliminary_document_data:
docname = entry[0]
if docname not in self.env.all_docs:
- self.warn('"texinfo_documents" config value references unknown '
- 'document %s' % docname)
+ logger.warning('"texinfo_documents" config value references unknown '
+ 'document %s', docname)
continue
- self.document_data.append(entry)
+ self.document_data.append(entry) # type: ignore
if docname.endswith(SEP + 'index'):
docname = docname[:-5]
self.titles.append((docname, entry[2]))
def write(self, *ignored):
+ # type: (Any) -> None
self.init_document_data()
for entry in self.document_data:
docname, targetname, title, author = entry[:4]
targetname += '.texi'
- direntry = description = category = ''
+ direntry = description = category = '' # type: unicode
if len(entry) > 6:
direntry, description, category = entry[4:7]
toctree_only = False
@@ -140,11 +155,11 @@ class TexinfoBuilder(Builder):
destination = FileOutput(
destination_path=path.join(self.outdir, targetname),
encoding='utf-8')
- self.info("processing " + targetname + "... ", nonl=1)
+ logger.info("processing " + targetname + "... ", nonl=1)
doctree = self.assemble_doctree(
docname, toctree_only,
appendices=(self.config.texinfo_appendices or []))
- self.info("writing... ", nonl=1)
+ logger.info("writing... ", nonl=1)
self.post_process_images(doctree)
docwriter = TexinfoWriter(self)
settings = OptionParser(
@@ -161,11 +176,12 @@ class TexinfoBuilder(Builder):
settings.docname = docname
doctree.settings = settings
docwriter.write(doctree, destination)
- self.info("done")
+ logger.info("done")
def assemble_doctree(self, indexfile, toctree_only, appendices):
+ # type: (unicode, bool, List[unicode]) -> nodes.Node
self.docnames = set([indexfile] + appendices)
- self.info(darkgreen(indexfile) + " ", nonl=1)
+ logger.info(darkgreen(indexfile) + " ", nonl=1)
tree = self.env.get_doctree(indexfile)
tree['docname'] = indexfile
if toctree_only:
@@ -186,8 +202,8 @@ class TexinfoBuilder(Builder):
appendix = self.env.get_doctree(docname)
appendix['docname'] = docname
largetree.append(appendix)
- self.info()
- self.info("resolving references...")
+ logger.info('')
+ logger.info("resolving references...")
self.env.resolve_references(largetree, indexfile, self)
# TODO: add support for external :ref:s
for pendingnode in largetree.traverse(addnodes.pending_xref):
@@ -206,28 +222,30 @@ class TexinfoBuilder(Builder):
return largetree
def finish(self):
+ # type: () -> None
# copy image files
if self.images:
- self.info(bold('copying images...'), nonl=1)
+ logger.info(bold('copying images...'), nonl=1)
for src, dest in iteritems(self.images):
- self.info(' ' + src, nonl=1)
+ logger.info(' ' + src, nonl=1)
copyfile(path.join(self.srcdir, src),
path.join(self.outdir, dest))
- self.info()
+ logger.info('')
- self.info(bold('copying Texinfo support files... '), nonl=True)
+ logger.info(bold('copying Texinfo support files... '), nonl=True)
# copy Makefile
fn = path.join(self.outdir, 'Makefile')
- self.info(fn, nonl=1)
+ logger.info(fn, nonl=1)
try:
with open(fn, 'w') as mkfile:
mkfile.write(TEXINFO_MAKEFILE)
except (IOError, OSError) as err:
- self.warn("error writing file %s: %s" % (fn, err))
- self.info(' done')
+ logger.warning("error writing file %s: %s", fn, err)
+ logger.info(' done')
def setup(app):
+ # type: (Sphinx) -> Dict[unicode, Any]
app.add_builder(TexinfoBuilder)
app.add_config_value('texinfo_documents',
diff --git a/sphinx/builders/text.py b/sphinx/builders/text.py
index 353e71868..4541e7bee 100644
--- a/sphinx/builders/text.py
+++ b/sphinx/builders/text.py
@@ -15,9 +15,18 @@ from os import path
from docutils.io import StringOutput
from sphinx.builders import Builder
+from sphinx.util import logging
from sphinx.util.osutil import ensuredir, os_path
from sphinx.writers.text import TextWriter
+if False:
+ # For type annotation
+ from typing import Any, Dict, Iterator, Set # NOQA
+ from docutils import nodes # NOQA
+ from sphinx.application import Sphinx # NOQA
+
+logger = logging.getLogger(__name__)
+
class TextBuilder(Builder):
name = 'text'
@@ -25,10 +34,14 @@ class TextBuilder(Builder):
out_suffix = '.txt'
allow_parallel = True
+ current_docname = None # type: unicode
+
def init(self):
+ # type: () -> None
pass
def get_outdated_docs(self):
+ # type: () -> Iterator[unicode]
for docname in self.env.found_docs:
if docname not in self.env.all_docs:
yield docname
@@ -48,28 +61,33 @@ class TextBuilder(Builder):
pass
def get_target_uri(self, docname, typ=None):
+ # type: (unicode, unicode) -> unicode
return ''
def prepare_writing(self, docnames):
+ # type: (Set[unicode]) -> None
self.writer = TextWriter(self)
def write_doc(self, docname, doctree):
+ # type: (unicode, nodes.Node) -> None
self.current_docname = docname
destination = StringOutput(encoding='utf-8')
self.writer.write(doctree, destination)
outfilename = path.join(self.outdir, os_path(docname) + self.out_suffix)
ensuredir(path.dirname(outfilename))
try:
- with codecs.open(outfilename, 'w', 'utf-8') as f:
+ with codecs.open(outfilename, 'w', 'utf-8') as f: # type: ignore
f.write(self.writer.output)
except (IOError, OSError) as err:
- self.warn("error writing file %s: %s" % (outfilename, err))
+ logger.warning("error writing file %s: %s", outfilename, err)
def finish(self):
+ # type: () -> None
pass
def setup(app):
+ # type: (Sphinx) -> Dict[unicode, Any]
app.add_builder(TextBuilder)
app.add_config_value('text_sectionchars', '*=-~"+`', 'env')
diff --git a/sphinx/builders/websupport.py b/sphinx/builders/websupport.py
index 394dafb3a..c2918edfa 100644
--- a/sphinx/builders/websupport.py
+++ b/sphinx/builders/websupport.py
@@ -20,6 +20,12 @@ from sphinx.util.osutil import os_path, relative_uri, ensuredir, copyfile
from sphinx.builders.html import PickleHTMLBuilder
from sphinx.writers.websupport import WebSupportTranslator
+if False:
+ # For type annotation
+ from typing import Any, Dict, Iterable, Tuple # NOQA
+ from docutils import nodes # NOQA
+ from sphinx.application import Sphinx # NOQA
+
class WebSupportBuilder(PickleHTMLBuilder):
"""
@@ -30,6 +36,7 @@ class WebSupportBuilder(PickleHTMLBuilder):
versioning_compare = True # for commentable node's uuid stability.
def init(self):
+ # type: () -> None
PickleHTMLBuilder.init(self)
# templates are needed for this builder, but the serializing
# builder does not initialize them
@@ -41,20 +48,24 @@ class WebSupportBuilder(PickleHTMLBuilder):
self.script_files.append('_static/websupport.js')
def set_webinfo(self, staticdir, virtual_staticdir, search, storage):
+ # type: (unicode, unicode, Any, unicode) -> None
self.staticdir = staticdir
self.virtual_staticdir = virtual_staticdir
self.search = search
self.storage = storage
def init_translator_class(self):
+ # type: () -> None
if self.translator_class is None:
self.translator_class = WebSupportTranslator
def prepare_writing(self, docnames):
+ # type: (Iterable[unicode]) -> None
PickleHTMLBuilder.prepare_writing(self, docnames)
self.globalcontext['no_search_suffix'] = True
def write_doc(self, docname, doctree):
+ # type: (unicode, nodes.Node) -> None
destination = StringOutput(encoding='utf-8')
doctree.settings = self.docsettings
@@ -72,6 +83,7 @@ class WebSupportBuilder(PickleHTMLBuilder):
self.handle_page(docname, ctx, event_arg=doctree)
def write_doc_serialized(self, docname, doctree):
+ # type: (unicode, nodes.Node) -> None
self.imgpath = '/' + posixpath.join(self.virtual_staticdir, self.imagedir)
self.post_process_images(doctree)
title = self.env.longtitles.get(docname)
@@ -79,10 +91,12 @@ class WebSupportBuilder(PickleHTMLBuilder):
self.index_page(docname, doctree, title)
def load_indexer(self, docnames):
- self.indexer = self.search
- self.indexer.init_indexing(changed=docnames)
+ # type: (Iterable[unicode]) -> None
+ self.indexer = self.search # type: ignore
+ self.indexer.init_indexing(changed=docnames) # type: ignore
def _render_page(self, pagename, addctx, templatename, event_arg=None):
+ # type: (unicode, Dict, unicode, unicode) -> Tuple[Dict, Dict]
# This is mostly copied from StandaloneHTMLBuilder. However, instead
# of rendering the template and saving the html, create a context
# dict and pickle it.
@@ -91,6 +105,7 @@ class WebSupportBuilder(PickleHTMLBuilder):
def pathto(otheruri, resource=False,
baseuri=self.get_target_uri(pagename)):
+ # type: (unicode, bool, unicode) -> unicode
if resource and '://' in otheruri:
return otheruri
elif not resource:
@@ -128,6 +143,7 @@ class WebSupportBuilder(PickleHTMLBuilder):
def handle_page(self, pagename, addctx, templatename='page.html',
outfilename=None, event_arg=None):
+ # type: (unicode, Dict, unicode, unicode, unicode) -> None
ctx, doc_ctx = self._render_page(pagename, addctx,
templatename, event_arg)
@@ -146,6 +162,7 @@ class WebSupportBuilder(PickleHTMLBuilder):
copyfile(self.env.doc2path(pagename), source_name)
def handle_finish(self):
+ # type: () -> None
# get global values for css and script files
_, doc_ctx = self._render_page('tmp', {}, 'page.html')
self.globalcontext['css'] = doc_ctx['css']
@@ -164,10 +181,12 @@ class WebSupportBuilder(PickleHTMLBuilder):
shutil.move(src, dst)
def dump_search_index(self):
- self.indexer.finish_indexing()
+ # type: () -> None
+ self.indexer.finish_indexing() # type: ignore
def setup(app):
+ # type: (Sphinx) -> Dict[unicode, Any]
app.add_builder(WebSupportBuilder)
return {
diff --git a/sphinx/builders/xml.py b/sphinx/builders/xml.py
index 9d6b232df..ba1a4850e 100644
--- a/sphinx/builders/xml.py
+++ b/sphinx/builders/xml.py
@@ -16,9 +16,17 @@ from docutils import nodes
from docutils.io import StringOutput
from sphinx.builders import Builder
+from sphinx.util import logging
from sphinx.util.osutil import ensuredir, os_path
from sphinx.writers.xml import XMLWriter, PseudoXMLWriter
+if False:
+ # For type annotation
+ from typing import Any, Dict, Iterator, Set # NOQA
+ from sphinx.application import Sphinx # NOQA
+
+logger = logging.getLogger(__name__)
+
class XMLBuilder(Builder):
"""
@@ -32,9 +40,11 @@ class XMLBuilder(Builder):
_writer_class = XMLWriter
def init(self):
+ # type: () -> None
pass
def get_outdated_docs(self):
+ # type: () -> Iterator[unicode]
for docname in self.env.found_docs:
if docname not in self.env.all_docs:
yield docname
@@ -54,12 +64,15 @@ class XMLBuilder(Builder):
pass
def get_target_uri(self, docname, typ=None):
+ # type: (unicode, unicode) -> unicode
return docname
def prepare_writing(self, docnames):
+ # type: (Set[unicode]) -> None
self.writer = self._writer_class(self)
def write_doc(self, docname, doctree):
+ # type: (unicode, nodes.Node) -> None
# work around multiple string % tuple issues in docutils;
# replace tuples in attribute values with lists
doctree = doctree.deepcopy()
@@ -77,12 +90,13 @@ class XMLBuilder(Builder):
outfilename = path.join(self.outdir, os_path(docname) + self.out_suffix)
ensuredir(path.dirname(outfilename))
try:
- with codecs.open(outfilename, 'w', 'utf-8') as f:
+ with codecs.open(outfilename, 'w', 'utf-8') as f: # type: ignore
f.write(self.writer.output)
except (IOError, OSError) as err:
- self.warn("error writing file %s: %s" % (outfilename, err))
+ logger.warning("error writing file %s: %s", outfilename, err)
def finish(self):
+ # type: () -> None
pass
@@ -98,6 +112,7 @@ class PseudoXMLBuilder(XMLBuilder):
def setup(app):
+ # type: (Sphinx) -> Dict[unicode, Any]
app.add_builder(XMLBuilder)
app.add_builder(PseudoXMLBuilder)
diff --git a/sphinx/cmdline.py b/sphinx/cmdline.py
index 9ad38b317..650f88b44 100644
--- a/sphinx/cmdline.py
+++ b/sphinx/cmdline.py
@@ -16,17 +16,22 @@ import traceback
from os import path
from six import text_type, binary_type
+
from docutils.utils import SystemMessage
from sphinx import __display_version__
from sphinx.errors import SphinxError
from sphinx.application import Sphinx
from sphinx.util import Tee, format_exception_cut_frames, save_traceback
-from sphinx.util.console import red, nocolor, color_terminal
+from sphinx.util.console import red, nocolor, color_terminal # type: ignore
from sphinx.util.docutils import docutils_namespace
from sphinx.util.osutil import abspath, fs_encoding
from sphinx.util.pycompat import terminal_safe
+if False:
+ # For type annotation
+ from typing import Any, IO, List, Union # NOQA
+
USAGE = """\
Sphinx v%s
@@ -45,18 +50,21 @@ For more information, visit <http://sphinx-doc.org/>.
class MyFormatter(optparse.IndentedHelpFormatter):
def format_usage(self, usage):
+ # type: (Any) -> Any
return usage
def format_help(self, formatter):
- result = []
- if self.description:
+ # type: (Any) -> unicode
+ result = [] # type: List[unicode]
+ if self.description: # type: ignore
result.append(self.format_description(formatter))
- if self.option_list:
- result.append(self.format_option_help(formatter))
+ if self.option_list: # type: ignore
+ result.append(self.format_option_help(formatter)) # type: ignore
return "\n".join(result)
def handle_exception(app, opts, exception, stderr=sys.stderr):
+ # type: (Sphinx, Any, Union[Exception, KeyboardInterrupt], IO) -> None
if opts.pdb:
import pdb
print(red('Exception occurred while building, starting debugger:'),
@@ -107,9 +115,7 @@ def handle_exception(app, opts, exception, stderr=sys.stderr):
def main(argv):
- if not color_terminal():
- nocolor()
-
+ # type: (List[unicode]) -> int
parser = optparse.OptionParser(USAGE, epilog=EPILOG, formatter=MyFormatter())
parser.add_option('--version', action='store_true', dest='version',
help='show version information and exit')
@@ -158,8 +164,12 @@ def main(argv):
help='no output on stdout, just warnings on stderr')
group.add_option('-Q', action='store_true', dest='really_quiet',
help='no output at all, not even warnings')
- group.add_option('-N', action='store_true', dest='nocolor',
- help='do not emit colored output')
+ group.add_option('--color', dest='color',
+ action='store_const', const='yes', default='auto',
+ help='Do emit colored output (default: auto-detect)')
+ group.add_option('-N', '--no-color', dest='color',
+ action='store_const', const='no',
+ help='Do not emit colored output (default: auot-detect)')
group.add_option('-w', metavar='FILE', dest='warnfile',
help='write warnings (and errors) to given file')
group.add_option('-W', action='store_true', dest='warningiserror',
@@ -210,12 +220,12 @@ def main(argv):
# handle remaining filename arguments
filenames = args[2:]
- err = 0
+ errored = False
for filename in filenames:
if not path.isfile(filename):
print('Error: Cannot find file %r.' % filename, file=sys.stderr)
- err = 1
- if err:
+ errored = True
+ if errored:
return 1
# likely encoding used for command-line arguments
@@ -229,7 +239,7 @@ def main(argv):
print('Error: Cannot combine -a option and filenames.', file=sys.stderr)
return 1
- if opts.nocolor:
+ if opts.color == 'no' or (opts.color == 'auto' and not color_terminal()):
nocolor()
doctreedir = abspath(opts.doctreedir or path.join(outdir, '.doctrees'))
@@ -249,7 +259,7 @@ def main(argv):
print('Error: Cannot open warning file %r: %s' %
(opts.warnfile, exc), file=sys.stderr)
sys.exit(1)
- warning = Tee(warning, warnfp)
+ warning = Tee(warning, warnfp) # type: ignore
error = warning
confoverrides = {}
diff --git a/sphinx/config.py b/sphinx/config.py
index 4222dbac9..451aa8b0d 100644
--- a/sphinx/config.py
+++ b/sphinx/config.py
@@ -13,12 +13,21 @@ import re
from os import path, getenv
from six import PY2, PY3, iteritems, string_types, binary_type, text_type, integer_types
+from typing import Any, NamedTuple, Union
from sphinx.errors import ConfigError
-from sphinx.locale import l_
+from sphinx.locale import l_, _
+from sphinx.util import logging
+from sphinx.util.i18n import format_date
from sphinx.util.osutil import cd
from sphinx.util.pycompat import execfile_, NoneType
-from sphinx.util.i18n import format_date
+
+if False:
+ # For type annotation
+ from typing import Any, Callable, Dict, Iterable, Iterator, List, Tuple # NOQA
+ from sphinx.util.tags import Tags # NOQA
+
+logger = logging.getLogger(__name__)
nonascii_re = re.compile(br'[\x80-\xff]')
copyright_year_re = re.compile(r'^((\d{4}-)?)(\d{4})(?=[ ,])')
@@ -35,6 +44,13 @@ CONFIG_PERMITTED_TYPE_WARNING = "The config value `{name}' has type `{current.__
CONFIG_TYPE_WARNING = "The config value `{name}' has type `{current.__name__}', " \
"defaults to `{default.__name__}'."
+if PY3:
+ unicode = str # special alias for static typing...
+
+ConfigValue = NamedTuple('ConfigValue', [('name', str),
+ ('value', Any),
+ ('rebuild', Union[bool, unicode])])
+
class ENUM(object):
"""represents the config value should be a one of candidates.
@@ -43,13 +59,15 @@ class ENUM(object):
app.add_config_value('latex_show_urls', 'no', None, ENUM('no', 'footnote', 'inline'))
"""
def __init__(self, *candidates):
+ # type: (unicode) -> None
self.candidates = candidates
def match(self, value):
+ # type: (unicode) -> bool
return value in self.candidates
-string_classes = [text_type]
+string_classes = [text_type] # type: List
if PY2:
string_classes.append(binary_type) # => [str, unicode]
@@ -114,15 +132,13 @@ class Config(object):
tls_verify = (True, 'env'),
tls_cacerts = (None, 'env'),
-
- # pre-initialized confval for HTML builder
- html_translator_class = (None, 'html', string_classes),
- )
+ ) # type: Dict[unicode, Tuple]
def __init__(self, dirname, filename, overrides, tags):
+ # type: (unicode, unicode, Dict, Tags) -> None
self.overrides = overrides
self.values = Config.config_values.copy()
- config = {}
+ config = {} # type: Dict[unicode, Any]
if dirname is not None:
config_file = path.join(dirname, filename)
config['__file__'] = config_file
@@ -140,14 +156,14 @@ class Config(object):
self._raw_config = config
# these two must be preinitialized because extensions can add their
# own config values
- self.setup = config.get('setup', None)
+ self.setup = config.get('setup', None) # type: Callable
if 'extensions' in overrides:
if isinstance(overrides['extensions'], string_types):
config['extensions'] = overrides.pop('extensions').split(',')
else:
config['extensions'] = overrides.pop('extensions')
- self.extensions = config.get('extensions', [])
+ self.extensions = config.get('extensions', []) # type: List[unicode]
# correct values of copyright year that are not coherent with
# the SOURCE_DATE_EPOCH environment variable (if set)
@@ -155,10 +171,11 @@ class Config(object):
if getenv('SOURCE_DATE_EPOCH') is not None:
for k in ('copyright', 'epub_copyright'):
if k in config:
- config[k] = copyright_year_re.sub('\g<1>%s' % format_date('%Y'),
+ config[k] = copyright_year_re.sub(r'\g<1>%s' % format_date('%Y'),
config[k])
- def check_types(self, warn):
+ def check_types(self):
+ # type: () -> None
# check all values for deviation from the default value's type, since
# that can result in TypeErrors all over the place
# NB. since config values might use l_() we have to wait with calling
@@ -177,7 +194,7 @@ class Config(object):
current = self[name]
if isinstance(permitted, ENUM):
if not permitted.match(current):
- warn(CONFIG_ENUM_WARNING.format(
+ logger.warning(CONFIG_ENUM_WARNING.format(
name=name, current=current, candidates=permitted.candidates))
else:
if type(current) is type(default):
@@ -192,30 +209,32 @@ class Config(object):
continue # at least we share a non-trivial base class
if permitted:
- warn(CONFIG_PERMITTED_TYPE_WARNING.format(
+ logger.warning(CONFIG_PERMITTED_TYPE_WARNING.format(
name=name, current=type(current),
permitted=str([cls.__name__ for cls in permitted])))
else:
- warn(CONFIG_TYPE_WARNING.format(
+ logger.warning(CONFIG_TYPE_WARNING.format(
name=name, current=type(current), default=type(default)))
- def check_unicode(self, warn):
+ def check_unicode(self):
+ # type: () -> None
# check all string values for non-ASCII characters in bytestrings,
# since that can result in UnicodeErrors all over the place
for name, value in iteritems(self._raw_config):
if isinstance(value, binary_type) and nonascii_re.search(value):
- warn('the config value %r is set to a string with non-ASCII '
- 'characters; this can lead to Unicode errors occurring. '
- 'Please use Unicode strings, e.g. %r.' % (name, u'Content'))
+ logger.warning('the config value %r is set to a string with non-ASCII '
+ 'characters; this can lead to Unicode errors occurring. '
+ 'Please use Unicode strings, e.g. %r.', name, u'Content')
def convert_overrides(self, name, value):
+ # type: (unicode, Any) -> Any
if not isinstance(value, string_types):
return value
else:
defvalue = self.values[name][0]
if isinstance(defvalue, dict):
- raise ValueError('cannot override dictionary config setting %r, '
- 'ignoring (use %r to set individual elements)' %
+ raise ValueError(_('cannot override dictionary config setting %r, '
+ 'ignoring (use %r to set individual elements)') %
(name, name + '.key=value'))
elif isinstance(defvalue, list):
return value.split(',')
@@ -223,19 +242,22 @@ class Config(object):
try:
return int(value)
except ValueError:
- raise ValueError('invalid number %r for config value %r, ignoring' %
+ raise ValueError(_('invalid number %r for config value %r, ignoring') %
(value, name))
elif hasattr(defvalue, '__call__'):
return value
elif defvalue is not None and not isinstance(defvalue, string_types):
- raise ValueError('cannot override config setting %r with unsupported '
- 'type, ignoring' % name)
+ raise ValueError(_('cannot override config setting %r with unsupported '
+ 'type, ignoring') % name)
else:
return value
- def pre_init_values(self, warn):
- """Initialize some limited config variables before loading extensions"""
- variables = ['needs_sphinx', 'suppress_warnings', 'html_translator_class']
+ def pre_init_values(self):
+ # type: () -> None
+ """
+ Initialize some limited config variables before initialize i18n and loading extensions
+ """
+ variables = ['needs_sphinx', 'suppress_warnings', 'language', 'locale_dirs']
for name in variables:
try:
if name in self.overrides:
@@ -243,9 +265,10 @@ class Config(object):
elif name in self._raw_config:
self.__dict__[name] = self._raw_config[name]
except ValueError as exc:
- warn(exc)
+ logger.warning("%s", exc)
- def init_values(self, warn):
+ def init_values(self):
+ # type: () -> None
config = self._raw_config
for valname, value in iteritems(self.overrides):
try:
@@ -254,38 +277,56 @@ class Config(object):
config.setdefault(realvalname, {})[key] = value
continue
elif valname not in self.values:
- warn('unknown config value %r in override, ignoring' % valname)
+ logger.warning(_('unknown config value %r in override, ignoring'), valname)
continue
if isinstance(value, string_types):
config[valname] = self.convert_overrides(valname, value)
else:
config[valname] = value
except ValueError as exc:
- warn(exc)
+ logger.warning("%s", exc)
for name in config:
if name in self.values:
self.__dict__[name] = config[name]
- if isinstance(self.source_suffix, string_types):
- self.source_suffix = [self.source_suffix]
+ if isinstance(self.source_suffix, string_types): # type: ignore
+ self.source_suffix = [self.source_suffix] # type: ignore
def __getattr__(self, name):
+ # type: (unicode) -> Any
if name.startswith('_'):
raise AttributeError(name)
if name not in self.values:
- raise AttributeError('No such config value: %s' % name)
+ raise AttributeError(_('No such config value: %s') % name)
default = self.values[name][0]
if hasattr(default, '__call__'):
return default(self)
return default
def __getitem__(self, name):
+ # type: (unicode) -> unicode
return getattr(self, name)
def __setitem__(self, name, value):
+ # type: (unicode, Any) -> None
setattr(self, name, value)
def __delitem__(self, name):
+ # type: (unicode) -> None
delattr(self, name)
def __contains__(self, name):
+ # type: (unicode) -> bool
return name in self.values
+
+ def __iter__(self):
+ # type: () -> Iterable[ConfigValue]
+ for name, value in iteritems(self.values):
+ yield ConfigValue(name, getattr(self, name), value[1]) # type: ignore
+
+ def add(self, name, default, rebuild, types):
+ # type: (unicode, Any, Union[bool, unicode], Any) -> None
+ self.values[name] = (default, rebuild, types)
+
+ def filter(self, rebuild):
+ # type: (str) -> Iterator[ConfigValue]
+ return (value for value in self if value.rebuild == rebuild) # type: ignore
diff --git a/sphinx/deprecation.py b/sphinx/deprecation.py
index 317ad5062..608b23c1b 100644
--- a/sphinx/deprecation.py
+++ b/sphinx/deprecation.py
@@ -10,12 +10,16 @@
"""
-class RemovedInSphinx16Warning(DeprecationWarning):
+class RemovedInSphinx17Warning(DeprecationWarning):
pass
-class RemovedInSphinx17Warning(PendingDeprecationWarning):
+class RemovedInSphinx18Warning(PendingDeprecationWarning):
pass
-RemovedInNextVersionWarning = RemovedInSphinx16Warning
+class RemovedInSphinx20Warning(PendingDeprecationWarning):
+ pass
+
+
+RemovedInNextVersionWarning = RemovedInSphinx17Warning
diff --git a/sphinx/directives/__init__.py b/sphinx/directives/__init__.py
index aac757497..dc0cc4f6c 100644
--- a/sphinx/directives/__init__.py
+++ b/sphinx/directives/__init__.py
@@ -29,6 +29,12 @@ from sphinx.directives.patches import ( # noqa
Figure, Meta
)
+if False:
+ # For type annotation
+ from typing import Any, Dict, List # NOQA
+ from sphinx.application import Sphinx # NOQA
+ from sphinx.environment import BuildEnvironment # NOQA
+
# RE to strip backslash escapes
nl_escape_re = re.compile(r'\\\n')
@@ -51,9 +57,13 @@ class ObjectDescription(Directive):
}
# types of doc fields that this directive handles, see sphinx.util.docfields
- doc_field_types = []
+ doc_field_types = [] # type: List[Any]
+ domain = None # type: unicode
+ objtype = None # type: unicode
+ indexnode = None # type: addnodes.index
def get_signatures(self):
+ # type: () -> List[unicode]
"""
Retrieve the signatures to document from the directive arguments. By
default, signatures are given as arguments, one per line.
@@ -65,6 +75,7 @@ class ObjectDescription(Directive):
return [strip_backslash_re.sub(r'\1', line.strip()) for line in lines]
def handle_signature(self, sig, signode):
+ # type: (unicode, addnodes.desc_signature) -> Any
"""
Parse the signature *sig* into individual nodes and append them to
*signode*. If ValueError is raised, parsing is aborted and the whole
@@ -77,6 +88,7 @@ class ObjectDescription(Directive):
raise ValueError
def add_target_and_index(self, name, sig, signode):
+ # type: (Any, unicode, addnodes.desc_signature) -> None
"""
Add cross-reference IDs and entries to self.indexnode, if applicable.
@@ -85,6 +97,7 @@ class ObjectDescription(Directive):
return # do nothing by default
def before_content(self):
+ # type: () -> None
"""
Called before parsing content. Used to set information about the current
directive context on the build environment.
@@ -92,6 +105,7 @@ class ObjectDescription(Directive):
pass
def after_content(self):
+ # type: () -> None
"""
Called after parsing content. Used to reset information about the
current directive context on the build environment.
@@ -99,6 +113,7 @@ class ObjectDescription(Directive):
pass
def run(self):
+ # type: () -> List[nodes.Node]
"""
Main directive entry function, called by docutils upon encountering the
directive.
@@ -120,7 +135,7 @@ class ObjectDescription(Directive):
self.domain, self.objtype = self.name.split(':', 1)
else:
self.domain, self.objtype = '', self.name
- self.env = self.state.document.settings.env
+ self.env = self.state.document.settings.env # type: BuildEnvironment
self.indexnode = addnodes.index(entries=[])
node = addnodes.desc()
@@ -130,7 +145,7 @@ class ObjectDescription(Directive):
node['objtype'] = node['desctype'] = self.objtype
node['noindex'] = noindex = ('noindex' in self.options)
- self.names = []
+ self.names = [] # type: List[unicode]
signatures = self.get_signatures()
for i, sig in enumerate(signatures):
# add a signature node for each signature in the current unit
@@ -181,6 +196,7 @@ class DefaultRole(Directive):
final_argument_whitespace = False
def run(self):
+ # type: () -> List[nodes.Node]
if not self.arguments:
if '' in roles._roles:
# restore the "default" default role
@@ -209,9 +225,10 @@ class DefaultDomain(Directive):
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = False
- option_spec = {}
+ option_spec = {} # type: Dict
def run(self):
+ # type: () -> List[nodes.Node]
env = self.state.document.settings.env
domain_name = self.arguments[0].lower()
# if domain_name not in env.domains:
@@ -225,6 +242,7 @@ class DefaultDomain(Directive):
def setup(app):
+ # type: (Sphinx) -> Dict[unicode, Any]
directives.register_directive('default-role', DefaultRole)
directives.register_directive('default-domain', DefaultDomain)
directives.register_directive('describe', ObjectDescription)
diff --git a/sphinx/directives/code.py b/sphinx/directives/code.py
index 58c881bbd..7ae95b131 100644
--- a/sphinx/directives/code.py
+++ b/sphinx/directives/code.py
@@ -15,13 +15,20 @@ from docutils import nodes
from docutils.parsers.rst import Directive, directives
from docutils.statemachine import ViewList
-from six import string_types
-
from sphinx import addnodes
from sphinx.locale import _
+from sphinx.util import logging
from sphinx.util import parselinenos
from sphinx.util.nodes import set_source_info
+if False:
+ # For type annotation
+ from typing import Any, Dict, List, Tuple # NOQA
+ from sphinx.application import Sphinx # NOQA
+ from sphinx.config import Config # NOQA
+
+logger = logging.getLogger(__name__)
+
class Highlight(Directive):
"""
@@ -38,6 +45,7 @@ class Highlight(Directive):
}
def run(self):
+ # type: () -> List[nodes.Node]
if 'linenothreshold' in self.options:
try:
linenothreshold = int(self.options['linenothreshold'])
@@ -49,10 +57,14 @@ class Highlight(Directive):
linenothreshold=linenothreshold)]
-def dedent_lines(lines, dedent):
+def dedent_lines(lines, dedent, location=None):
+ # type: (List[unicode], int, Any) -> List[unicode]
if not dedent:
return lines
+ if any(s[:dedent].strip() for s in lines):
+ logger.warning(_('Over dedent has detected'), location=location)
+
new_lines = []
for line in lines:
new_line = line[dedent:]
@@ -64,13 +76,15 @@ def dedent_lines(lines, dedent):
def container_wrapper(directive, literal_node, caption):
+ # type: (Directive, nodes.Node, unicode) -> nodes.container
container_node = nodes.container('', literal_block=True,
classes=['literal-block-wrapper'])
parsed = nodes.Element()
directive.state.nested_parse(ViewList([caption], source=''),
directive.content_offset, parsed)
if isinstance(parsed[0], nodes.system_message):
- raise ValueError(parsed[0])
+ msg = _('Invalid caption: %s' % parsed[0].astext())
+ raise ValueError(msg)
caption_node = nodes.caption(parsed[0].rawsource, '',
*parsed[0].children)
caption_node.source = literal_node.source
@@ -101,22 +115,31 @@ class CodeBlock(Directive):
}
def run(self):
+ # type: () -> List[nodes.Node]
+ document = self.state.document
code = u'\n'.join(self.content)
+ location = self.state_machine.get_source_and_line(self.lineno)
linespec = self.options.get('emphasize-lines')
if linespec:
try:
nlines = len(self.content)
- hl_lines = [x + 1 for x in parselinenos(linespec, nlines)]
+ hl_lines = parselinenos(linespec, nlines)
+ if any(i >= nlines for i in hl_lines):
+ logger.warning('line number spec is out of range(1-%d): %r' %
+ (nlines, self.options['emphasize_lines']),
+ location=location)
+
+ hl_lines = [x + 1 for x in hl_lines if x < nlines]
except ValueError as err:
- document = self.state.document
return [document.reporter.warning(str(err), line=self.lineno)]
else:
hl_lines = None
if 'dedent' in self.options:
+ location = self.state_machine.get_source_and_line(self.lineno)
lines = code.split('\n')
- lines = dedent_lines(lines, self.options['dedent'])
+ lines = dedent_lines(lines, self.options['dedent'], location=location)
code = '\n'.join(lines)
literal = nodes.literal_block(code, code)
@@ -136,9 +159,7 @@ class CodeBlock(Directive):
try:
literal = container_wrapper(self, literal, caption)
except ValueError as exc:
- document = self.state.document
- errmsg = _('Invalid caption: %s' % exc[0][0].astext())
- return [document.reporter.warning(errmsg, line=self.lineno)]
+ return [document.reporter.warning(str(exc), line=self.lineno)]
# literal will be note_implicit_target that is linked from caption and numref.
# when options['name'] is provided, it should be primary ID.
@@ -147,6 +168,196 @@ class CodeBlock(Directive):
return [literal]
+class LiteralIncludeReader(object):
+ INVALID_OPTIONS_PAIR = [
+ ('lineno-match', 'lineno-start'),
+ ('lineno-match', 'append'),
+ ('lineno-match', 'prepend'),
+ ('start-after', 'start-at'),
+ ('end-before', 'end-at'),
+ ('diff', 'pyobject'),
+ ('diff', 'lineno-start'),
+ ('diff', 'lineno-match'),
+ ('diff', 'lines'),
+ ('diff', 'start-after'),
+ ('diff', 'end-before'),
+ ('diff', 'start-at'),
+ ('diff', 'end-at'),
+ ]
+
+ def __init__(self, filename, options, config):
+ # type: (unicode, Dict, Config) -> None
+ self.filename = filename
+ self.options = options
+ self.encoding = options.get('encoding', config.source_encoding)
+ self.lineno_start = self.options.get('lineno-start', 1)
+
+ self.parse_options()
+
+ def parse_options(self):
+ # type: () -> None
+ for option1, option2 in self.INVALID_OPTIONS_PAIR:
+ if option1 in self.options and option2 in self.options:
+ raise ValueError(_('Cannot use both "%s" and "%s" options') %
+ (option1, option2))
+
+ def read_file(self, filename, location=None):
+ # type: (unicode, Any) -> List[unicode]
+ try:
+ with codecs.open(filename, 'r', self.encoding, errors='strict') as f: # type: ignore # NOQA
+ text = f.read() # type: unicode
+ if 'tab-width' in self.options:
+ text = text.expandtabs(self.options['tab-width'])
+
+ lines = text.splitlines(True)
+ if 'dedent' in self.options:
+ return dedent_lines(lines, self.options.get('dedent'), location=location)
+ else:
+ return lines
+ except (IOError, OSError):
+ raise IOError(_('Include file %r not found or reading it failed') % filename)
+ except UnicodeError:
+ raise UnicodeError(_('Encoding %r used for reading included file %r seems to '
+ 'be wrong, try giving an :encoding: option') %
+ (self.encoding, filename))
+
+ def read(self, location=None):
+ # type: (Any) -> Tuple[unicode, int]
+ if 'diff' in self.options:
+ lines = self.show_diff()
+ else:
+ filters = [self.pyobject_filter,
+ self.start_filter,
+ self.end_filter,
+ self.lines_filter,
+ self.prepend_filter,
+ self.append_filter]
+ lines = self.read_file(self.filename, location=location)
+ for func in filters:
+ lines = func(lines, location=location)
+
+ return ''.join(lines), len(lines)
+
+ def show_diff(self, location=None):
+ # type: (Any) -> List[unicode]
+ new_lines = self.read_file(self.filename)
+ old_filename = self.options.get('diff')
+ old_lines = self.read_file(old_filename)
+ diff = unified_diff(old_lines, new_lines, old_filename, self.filename) # type: ignore
+ return list(diff)
+
+ def pyobject_filter(self, lines, location=None):
+ # type: (List[unicode], Any) -> List[unicode]
+ pyobject = self.options.get('pyobject')
+ if pyobject:
+ from sphinx.pycode import ModuleAnalyzer
+ analyzer = ModuleAnalyzer.for_file(self.filename, '')
+ tags = analyzer.find_tags()
+ if pyobject not in tags:
+ raise ValueError(_('Object named %r not found in include file %r') %
+ (pyobject, self.filename))
+ else:
+ start = tags[pyobject][1]
+ end = tags[pyobject][2]
+ lines = lines[start - 1:end - 1]
+ if 'lineno-match' in self.options:
+ self.lineno_start = start
+
+ return lines
+
+ def lines_filter(self, lines, location=None):
+ # type: (List[unicode], Any) -> List[unicode]
+ linespec = self.options.get('lines')
+ if linespec:
+ linelist = parselinenos(linespec, len(lines))
+ if any(i >= len(lines) for i in linelist):
+ logger.warning('line number spec is out of range(1-%d): %r' %
+ (len(lines), linespec), location=location)
+
+ if 'lineno-match' in self.options:
+ # make sure the line list is not "disjoint".
+ first = linelist[0]
+ if all(first + i == n for i, n in enumerate(linelist)):
+ self.lineno_start += linelist[0]
+ else:
+ raise ValueError(_('Cannot use "lineno-match" with a disjoint '
+ 'set of "lines"'))
+
+ lines = [lines[n] for n in linelist if n < len(lines)]
+ if lines == []:
+ raise ValueError(_('Line spec %r: no lines pulled from include file %r') %
+ (linespec, self.filename))
+
+ return lines
+
+ def start_filter(self, lines, location=None):
+ # type: (List[unicode], Any) -> List[unicode]
+ if 'start-at' in self.options:
+ start = self.options.get('start-at')
+ inclusive = False
+ elif 'start-after' in self.options:
+ start = self.options.get('start-after')
+ inclusive = True
+ else:
+ start = None
+
+ if start:
+ for lineno, line in enumerate(lines):
+ if start in line:
+ if inclusive:
+ if 'lineno-match' in self.options:
+ self.lineno_start += lineno + 1
+
+ return lines[lineno + 1:]
+ else:
+ if 'lineno-match' in self.options:
+ self.lineno_start += lineno
+
+ return lines[lineno:]
+
+ return lines
+
+ def end_filter(self, lines, location=None):
+ # type: (List[unicode], Any) -> List[unicode]
+ if 'end-at' in self.options:
+ end = self.options.get('end-at')
+ inclusive = True
+ elif 'end-before' in self.options:
+ end = self.options.get('end-before')
+ inclusive = False
+ else:
+ end = None
+
+ if end:
+ for lineno, line in enumerate(lines):
+ if end in line:
+ if inclusive:
+ return lines[:lineno + 1]
+ else:
+ if lineno == 0:
+ return []
+ else:
+ return lines[:lineno]
+
+ return lines
+
+ def prepend_filter(self, lines, location=None):
+ # type: (List[unicode], Any) -> List[unicode]
+ prepend = self.options.get('prepend')
+ if prepend:
+ lines.insert(0, prepend + '\n')
+
+ return lines
+
+ def append_filter(self, lines, location=None):
+ # type: (List[unicode], Any) -> List[unicode]
+ append = self.options.get('append')
+ if append:
+ lines.append(append + '\n')
+
+ return lines
+
+
class LiteralInclude(Directive):
"""
Like ``.. include:: :literal:``, but only warns if the include file is
@@ -181,203 +392,62 @@ class LiteralInclude(Directive):
'diff': directives.unchanged_required,
}
- def read_with_encoding(self, filename, document, codec_info, encoding):
- try:
- with codecs.StreamReaderWriter(open(filename, 'rb'), codec_info[2],
- codec_info[3], 'strict') as f:
- lines = f.readlines()
- lines = dedent_lines(lines, self.options.get('dedent'))
- return lines
- except (IOError, OSError):
- return [document.reporter.warning(
- 'Include file %r not found or reading it failed' % filename,
- line=self.lineno)]
- except UnicodeError:
- return [document.reporter.warning(
- 'Encoding %r used for reading included file %r seems to '
- 'be wrong, try giving an :encoding: option' %
- (encoding, filename))]
-
def run(self):
+ # type: () -> List[nodes.Node]
document = self.state.document
if not document.settings.file_insertion_enabled:
return [document.reporter.warning('File insertion disabled',
line=self.lineno)]
env = document.settings.env
- rel_filename, filename = env.relfn2path(self.arguments[0])
-
- if 'pyobject' in self.options and 'lines' in self.options:
- return [document.reporter.warning(
- 'Cannot use both "pyobject" and "lines" options',
- line=self.lineno)]
-
- if 'lineno-match' in self.options and 'lineno-start' in self.options:
- return [document.reporter.warning(
- 'Cannot use both "lineno-match" and "lineno-start"',
- line=self.lineno)]
-
- if 'lineno-match' in self.options and \
- (set(['append', 'prepend']) & set(self.options.keys())):
- return [document.reporter.warning(
- 'Cannot use "lineno-match" and "append" or "prepend"',
- line=self.lineno)]
-
- if 'start-after' in self.options and 'start-at' in self.options:
- return [document.reporter.warning(
- 'Cannot use both "start-after" and "start-at" options',
- line=self.lineno)]
-
- if 'end-before' in self.options and 'end-at' in self.options:
- return [document.reporter.warning(
- 'Cannot use both "end-before" and "end-at" options',
- line=self.lineno)]
-
- encoding = self.options.get('encoding', env.config.source_encoding)
- codec_info = codecs.lookup(encoding)
-
- lines = self.read_with_encoding(filename, document,
- codec_info, encoding)
- if lines and not isinstance(lines[0], string_types):
- return lines
-
- diffsource = self.options.get('diff')
- if diffsource is not None:
- tmp, fulldiffsource = env.relfn2path(diffsource)
-
- difflines = self.read_with_encoding(fulldiffsource, document,
- codec_info, encoding)
- if not isinstance(difflines[0], string_types):
- return difflines
- diff = unified_diff(
- difflines,
- lines,
- diffsource,
- self.arguments[0])
- lines = list(diff)
-
- linenostart = self.options.get('lineno-start', 1)
- objectname = self.options.get('pyobject')
- if objectname is not None:
- from sphinx.pycode import ModuleAnalyzer
- analyzer = ModuleAnalyzer.for_file(filename, '')
- tags = analyzer.find_tags()
- if objectname not in tags:
- return [document.reporter.warning(
- 'Object named %r not found in include file %r' %
- (objectname, filename), line=self.lineno)]
- else:
- lines = lines[tags[objectname][1] - 1: tags[objectname][2] - 1]
- if 'lineno-match' in self.options:
- linenostart = tags[objectname][1]
- linespec = self.options.get('lines')
- if linespec:
- try:
- linelist = parselinenos(linespec, len(lines))
- except ValueError as err:
- return [document.reporter.warning(str(err), line=self.lineno)]
+ # convert options['diff'] to absolute path
+ if 'diff' in self.options:
+ _, path = env.relfn2path(self.options['diff'])
+ self.options['diff'] = path
- if 'lineno-match' in self.options:
- # make sure the line list is not "disjoint".
- previous = linelist[0]
- for line_number in linelist[1:]:
- if line_number == previous + 1:
- previous = line_number
- continue
- return [document.reporter.warning(
- 'Cannot use "lineno-match" with a disjoint set of '
- '"lines"', line=self.lineno)]
- linenostart = linelist[0] + 1
- # just ignore non-existing lines
- lines = [lines[i] for i in linelist if i < len(lines)]
- if not lines:
- return [document.reporter.warning(
- 'Line spec %r: no lines pulled from include file %r' %
- (linespec, filename), line=self.lineno)]
-
- linespec = self.options.get('emphasize-lines')
- if linespec:
- try:
- hl_lines = [x + 1 for x in parselinenos(linespec, len(lines))]
- except ValueError as err:
- return [document.reporter.warning(str(err), line=self.lineno)]
- else:
- hl_lines = None
-
- start_str = self.options.get('start-after')
- start_inclusive = False
- if self.options.get('start-at') is not None:
- start_str = self.options.get('start-at')
- start_inclusive = True
- end_str = self.options.get('end-before')
- end_inclusive = False
- if self.options.get('end-at') is not None:
- end_str = self.options.get('end-at')
- end_inclusive = True
- if start_str is not None or end_str is not None:
- use = not start_str
- res = []
- for line_number, line in enumerate(lines):
- if not use and start_str and start_str in line:
- if 'lineno-match' in self.options:
- linenostart += line_number + 1
- use = True
- if start_inclusive:
- res.append(line)
- elif use and end_str and end_str in line:
- if end_inclusive:
- res.append(line)
- break
- elif use:
- res.append(line)
- lines = res
-
- prepend = self.options.get('prepend')
- if prepend:
- lines.insert(0, prepend + '\n')
-
- append = self.options.get('append')
- if append:
- lines.append(append + '\n')
-
- text = ''.join(lines)
- if self.options.get('tab-width'):
- text = text.expandtabs(self.options['tab-width'])
- retnode = nodes.literal_block(text, text, source=filename)
- set_source_info(self, retnode)
- if diffsource: # if diff is set, set udiff
- retnode['language'] = 'udiff'
- if 'language' in self.options:
- retnode['language'] = self.options['language']
- retnode['linenos'] = 'linenos' in self.options or \
- 'lineno-start' in self.options or \
- 'lineno-match' in self.options
- retnode['classes'] += self.options.get('class', [])
- extra_args = retnode['highlight_args'] = {}
- if hl_lines is not None:
- extra_args['hl_lines'] = hl_lines
- extra_args['linenostart'] = linenostart
- env.note_dependency(rel_filename)
-
- caption = self.options.get('caption')
- if caption is not None:
- if not caption:
- caption = self.arguments[0]
- try:
+ try:
+ location = self.state_machine.get_source_and_line(self.lineno)
+ rel_filename, filename = env.relfn2path(self.arguments[0])
+ env.note_dependency(rel_filename)
+
+ reader = LiteralIncludeReader(filename, self.options, env.config)
+ text, lines = reader.read(location=location)
+
+ retnode = nodes.literal_block(text, text, source=filename)
+ set_source_info(self, retnode)
+ if self.options.get('diff'): # if diff is set, set udiff
+ retnode['language'] = 'udiff'
+ elif 'language' in self.options:
+ retnode['language'] = self.options['language']
+ retnode['linenos'] = ('linenos' in self.options or
+ 'lineno-start' in self.options or
+ 'lineno-match' in self.options)
+ retnode['classes'] += self.options.get('class', [])
+ extra_args = retnode['highlight_args'] = {}
+ if 'empahsize-lines' in self.options:
+ hl_lines = parselinenos(self.options['emphasize-lines'], lines)
+ if any(i >= lines for i in hl_lines):
+ logger.warning('line number spec is out of range(1-%d): %r' %
+ (lines, self.options['emphasize_lines']),
+ location=location)
+ extra_args['hl_lines'] = [x + 1 for x in hl_lines if x < lines]
+ extra_args['linenostart'] = reader.lineno_start
+
+ if 'caption' in self.options:
+ caption = self.options['caption'] or self.arguments[0]
retnode = container_wrapper(self, retnode, caption)
- except ValueError as exc:
- document = self.state.document
- errmsg = _('Invalid caption: %s' % exc[0][0].astext())
- return [document.reporter.warning(errmsg, line=self.lineno)]
- # retnode will be note_implicit_target that is linked from caption and numref.
- # when options['name'] is provided, it should be primary ID.
- self.add_name(retnode)
+ # retnode will be note_implicit_target that is linked from caption and numref.
+ # when options['name'] is provided, it should be primary ID.
+ self.add_name(retnode)
- return [retnode]
+ return [retnode]
+ except Exception as exc:
+ return [document.reporter.warning(str(exc), line=self.lineno)]
def setup(app):
+ # type: (Sphinx) -> Dict[unicode, Any]
directives.register_directive('highlight', Highlight)
directives.register_directive('highlightlang', Highlight) # old
directives.register_directive('code-block', CodeBlock)
diff --git a/sphinx/directives/other.py b/sphinx/directives/other.py
index 9c9e1cd16..626218ca2 100644
--- a/sphinx/directives/other.py
+++ b/sphinx/directives/other.py
@@ -8,6 +8,7 @@
"""
from six.moves import range
+
from docutils import nodes
from docutils.parsers.rst import Directive, directives
from docutils.parsers.rst.directives.admonitions import BaseAdmonition
@@ -21,8 +22,14 @@ from sphinx.util.nodes import explicit_title_re, set_source_info, \
process_index_entry
from sphinx.util.matching import patfilter
+if False:
+ # For type annotation
+ from typing import Any, Dict, List, Tuple # NOQA
+ from sphinx.application import Sphinx # NOQA
+
def int_or_nothing(argument):
+ # type: (unicode) -> int
if not argument:
return 999
return int(argument)
@@ -50,6 +57,7 @@ class TocTree(Directive):
}
def run(self):
+ # type: () -> List[nodes.Node]
env = self.state.document.settings.env
suffixes = env.config.source_suffix
glob = 'glob' in self.options
@@ -57,7 +65,7 @@ class TocTree(Directive):
ret = []
# (title, ref) pairs, where ref may be a document, or an external link,
# and title may be None if the document's title is to be used
- entries = []
+ entries = [] # type: List[Tuple[unicode, unicode]]
includefiles = []
all_docnames = env.found_docs.copy()
# don't add the currently visited file in catch-all patterns
@@ -136,9 +144,10 @@ class Author(Directive):
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
- option_spec = {}
+ option_spec = {} # type: Dict
def run(self):
+ # type: () -> List[nodes.Node]
env = self.state.document.settings.env
if not env.config.show_authors:
return []
@@ -168,20 +177,21 @@ class Index(Directive):
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
- option_spec = {}
+ option_spec = {} # type: Dict
def run(self):
+ # type: () -> List[nodes.Node]
arguments = self.arguments[0].split('\n')
env = self.state.document.settings.env
targetid = 'index-%s' % env.new_serialno('index')
targetnode = nodes.target('', '', ids=[targetid])
self.state.document.note_explicit_target(targetnode)
indexnode = addnodes.index()
- indexnode['entries'] = ne = []
+ indexnode['entries'] = []
indexnode['inline'] = False
set_source_info(self, indexnode)
for entry in arguments:
- ne.extend(process_index_entry(entry, targetid))
+ indexnode['entries'].extend(process_index_entry(entry, targetid))
return [indexnode, targetnode]
@@ -193,9 +203,10 @@ class VersionChange(Directive):
required_arguments = 1
optional_arguments = 1
final_argument_whitespace = True
- option_spec = {}
+ option_spec = {} # type: Dict
def run(self):
+ # type: () -> List[nodes.Node]
node = addnodes.versionmodified()
node.document = self.state.document
set_source_info(self, node)
@@ -248,9 +259,10 @@ class TabularColumns(Directive):
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
- option_spec = {}
+ option_spec = {} # type: Dict
def run(self):
+ # type: () -> List[nodes.Node]
node = addnodes.tabular_col_spec()
node['spec'] = self.arguments[0]
set_source_info(self, node)
@@ -265,9 +277,10 @@ class Centered(Directive):
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
- option_spec = {}
+ option_spec = {} # type: Dict
def run(self):
+ # type: () -> List[nodes.Node]
if not self.arguments:
return []
subnode = addnodes.centered()
@@ -285,9 +298,10 @@ class Acks(Directive):
required_arguments = 0
optional_arguments = 0
final_argument_whitespace = False
- option_spec = {}
+ option_spec = {} # type: Dict
def run(self):
+ # type: () -> List[nodes.Node]
node = addnodes.acks()
node.document = self.state.document
self.state.nested_parse(self.content, self.content_offset, node)
@@ -311,6 +325,7 @@ class HList(Directive):
}
def run(self):
+ # type: () -> List[nodes.Node]
ncolumns = self.options.get('columns', 2)
node = nodes.paragraph()
node.document = self.state.document
@@ -342,9 +357,10 @@ class Only(Directive):
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
- option_spec = {}
+ option_spec = {} # type: Dict
def run(self):
+ # type: () -> List[nodes.Node]
node = addnodes.only()
node.document = self.state.document
set_source_info(self, node)
@@ -398,6 +414,7 @@ class Include(BaseInclude):
"""
def run(self):
+ # type: () -> List[nodes.Node]
env = self.state.document.settings.env
if self.arguments[0].startswith('<') and \
self.arguments[0].endswith('>'):
@@ -410,6 +427,7 @@ class Include(BaseInclude):
def setup(app):
+ # type: (Sphinx) -> Dict[unicode, Any]
directives.register_directive('toctree', TocTree)
directives.register_directive('sectionauthor', Author)
directives.register_directive('moduleauthor', Author)
diff --git a/sphinx/directives/patches.py b/sphinx/directives/patches.py
index 134e10442..880377ff7 100644
--- a/sphinx/directives/patches.py
+++ b/sphinx/directives/patches.py
@@ -14,6 +14,11 @@ from docutils.parsers.rst.directives import images, html, tables
from sphinx import addnodes
from sphinx.util.nodes import set_source_info
+if False:
+ # For type annotation
+ from typing import Dict, List # NOQA
+ from sphinx.application import Sphinx # NOQA
+
class Figure(images.Figure):
"""The figure directive which applies `:name:` option to the figure node
@@ -21,6 +26,7 @@ class Figure(images.Figure):
"""
def run(self):
+ # type: () -> List[nodes.Node]
name = self.options.pop('name', None)
result = images.Figure.run(self)
if len(result) == 2 or isinstance(result[0], nodes.system_message):
@@ -40,6 +46,7 @@ class Figure(images.Figure):
class Meta(html.Meta):
def run(self):
+ # type: () -> List[nodes.Node]
env = self.state.document.settings.env
result = html.Meta.run(self)
for node in result:
@@ -96,6 +103,7 @@ class ListTable(tables.ListTable):
def setup(app):
+ # type: (Sphinx) -> Dict
directives.register_directive('figure', Figure)
directives.register_directive('meta', Meta)
directives.register_directive('table', RSTTable)
diff --git a/sphinx/domains/__init__.py b/sphinx/domains/__init__.py
index 0388250ee..43b6bac04 100644
--- a/sphinx/domains/__init__.py
+++ b/sphinx/domains/__init__.py
@@ -17,6 +17,16 @@ from six import iteritems
from sphinx.errors import SphinxError
from sphinx.locale import _
+if False:
+ # For type annotation
+ from typing import Any, Callable, Dict, Iterable, List, Tuple, Type, Union # NOQA
+ from docutils import nodes # NOQA
+ from docutils.parsers.rst.states import Inliner # NOQA
+ from sphinx.builders import Builder # NOQA
+ from sphinx.environment import BuildEnvironment # NOQA
+ from sphinx.roles import XRefRole # NOQA
+ from sphinx.util.typing import RoleFunction # NOQA
+
class ObjType(object):
"""
@@ -38,9 +48,10 @@ class ObjType(object):
}
def __init__(self, lname, *roles, **attrs):
- self.lname = lname
- self.roles = roles
- self.attrs = self.known_attrs.copy()
+ # type: (unicode, Any, Any) -> None
+ self.lname = lname # type: unicode
+ self.roles = roles # type: Tuple
+ self.attrs = self.known_attrs.copy() # type: Dict
self.attrs.update(attrs)
@@ -59,17 +70,19 @@ class Index(object):
domains using :meth:`~sphinx.application.Sphinx.add_index_to_domain()`.
"""
- name = None
- localname = None
- shortname = None
+ name = None # type: unicode
+ localname = None # type: unicode
+ shortname = None # type: unicode
def __init__(self, domain):
+ # type: (Domain) -> None
if self.name is None or self.localname is None:
raise SphinxError('Index subclass %s has no valid name or localname'
% self.__class__.__name__)
self.domain = domain
def generate(self, docnames=None):
+ # type: (Iterable[unicode]) -> Tuple[List[Tuple[unicode, List[List[Union[unicode, int]]]]], bool] # NOQA
"""Return entries for the index given by *name*. If *docnames* is
given, restrict to entries referring to these docnames.
@@ -97,7 +110,7 @@ class Index(object):
Qualifier and description are not rendered e.g. in LaTeX output.
"""
- return []
+ raise NotImplementedError
class Domain(object):
@@ -128,23 +141,26 @@ class Domain(object):
#: domain label: longer, more descriptive (used in messages)
label = ''
#: type (usually directive) name -> ObjType instance
- object_types = {}
+ object_types = {} # type: Dict[unicode, Any]
#: directive name -> directive class
- directives = {}
+ directives = {} # type: Dict[unicode, Any]
#: role name -> role callable
- roles = {}
+ roles = {} # type: Dict[unicode, Union[RoleFunction, XRefRole]]
#: a list of Index subclasses
- indices = []
+ indices = [] # type: List[Type[Index]]
#: role name -> a warning message if reference is missing
- dangling_warnings = {}
+ dangling_warnings = {} # type: Dict[unicode, unicode]
#: data value for a fresh environment
- initial_data = {}
+ initial_data = {} # type: Dict
+ #: data value
+ data = None # type: Dict
#: data version, bump this when the format of `self.data` changes
data_version = 0
def __init__(self, env):
- self.env = env
+ # type: (BuildEnvironment) -> None
+ self.env = env # type: BuildEnvironment
if self.name not in env.domaindata:
assert isinstance(self.initial_data, dict)
new_data = copy.deepcopy(self.initial_data)
@@ -154,18 +170,19 @@ class Domain(object):
self.data = env.domaindata[self.name]
if self.data['version'] != self.data_version:
raise IOError('data of %r domain out of date' % self.label)
- self._role_cache = {}
- self._directive_cache = {}
- self._role2type = {}
- self._type2role = {}
+ self._role_cache = {} # type: Dict[unicode, Callable]
+ self._directive_cache = {} # type: Dict[unicode, Callable]
+ self._role2type = {} # type: Dict[unicode, List[unicode]]
+ self._type2role = {} # type: Dict[unicode, unicode]
for name, obj in iteritems(self.object_types):
for rolename in obj.roles:
self._role2type.setdefault(rolename, []).append(name)
self._type2role[name] = obj.roles[0] if obj.roles else ''
- self.objtypes_for_role = self._role2type.get
- self.role_for_objtype = self._type2role.get
+ self.objtypes_for_role = self._role2type.get # type: Callable[[unicode], List[unicode]] # NOQA
+ self.role_for_objtype = self._type2role.get # type: Callable[[unicode], unicode]
def role(self, name):
+ # type: (unicode) -> Callable
"""Return a role adapter function that always gives the registered
role its full name ('domain:name') as the first argument.
"""
@@ -175,14 +192,15 @@ class Domain(object):
return None
fullname = '%s:%s' % (self.name, name)
- def role_adapter(typ, rawtext, text, lineno, inliner,
- options={}, content=[]):
+ def role_adapter(typ, rawtext, text, lineno, inliner, options={}, content=[]):
+ # type: (unicode, unicode, unicode, int, Inliner, Dict, List[unicode]) -> nodes.Node # NOQA
return self.roles[name](fullname, rawtext, text, lineno,
inliner, options, content)
self._role_cache[name] = role_adapter
return role_adapter
def directive(self, name):
+ # type: (unicode) -> Callable
"""Return a directive adapter class that always gives the registered
directive its full name ('domain:name') as ``self.name``.
"""
@@ -193,8 +211,9 @@ class Domain(object):
fullname = '%s:%s' % (self.name, name)
BaseDirective = self.directives[name]
- class DirectiveAdapter(BaseDirective):
+ class DirectiveAdapter(BaseDirective): # type: ignore
def run(self):
+ # type: () -> List[nodes.Node]
self.name = fullname
return BaseDirective.run(self)
self._directive_cache[name] = DirectiveAdapter
@@ -203,10 +222,12 @@ class Domain(object):
# methods that should be overwritten
def clear_doc(self, docname):
+ # type: (unicode) -> None
"""Remove traces of a document in the domain-specific inventories."""
pass
def merge_domaindata(self, docnames, otherdata):
+ # type: (List[unicode], Dict) -> None
"""Merge in data regarding *docnames* from a different domaindata
inventory (coming from a subprocess in parallel builds).
"""
@@ -215,10 +236,12 @@ class Domain(object):
self.__class__)
def process_doc(self, env, docname, document):
+ # type: (BuildEnvironment, unicode, nodes.Node) -> None
"""Process a document after it is read by the environment."""
pass
def process_field_xref(self, pnode):
+ # type: (nodes.Node) -> None
"""Process a pending xref created in a doc field.
For example, attach information about the current scope.
"""
@@ -226,6 +249,7 @@ class Domain(object):
def resolve_xref(self, env, fromdocname, builder,
typ, target, node, contnode):
+ # type: (BuildEnvironment, unicode, Builder, unicode, unicode, nodes.Node, nodes.Node) -> nodes.Node # NOQA
"""Resolve the pending_xref *node* with the given *typ* and *target*.
This method should return a new node, to replace the xref node,
@@ -242,6 +266,7 @@ class Domain(object):
pass
def resolve_any_xref(self, env, fromdocname, builder, target, node, contnode):
+ # type: (BuildEnvironment, unicode, Builder, unicode, nodes.Node, nodes.Node) -> List[Tuple[unicode, nodes.Node]] # NOQA
"""Resolve the pending_xref *node* with the given *target*.
The reference comes from an "any" or similar role, which means that we
@@ -258,6 +283,7 @@ class Domain(object):
raise NotImplementedError
def get_objects(self):
+ # type: () -> Iterable[Tuple[unicode, unicode, unicode, unicode, unicode, int]]
"""Return an iterable of "object descriptions", which are tuples with
five items:
@@ -277,6 +303,7 @@ class Domain(object):
return []
def get_type_name(self, type, primary=False):
+ # type: (ObjType, bool) -> unicode
"""Return full name for given ObjType."""
if primary:
return type.lname
diff --git a/sphinx/domains/c.py b/sphinx/domains/c.py
index 8d1eecbe5..b9afd10c2 100644
--- a/sphinx/domains/c.py
+++ b/sphinx/domains/c.py
@@ -22,6 +22,13 @@ from sphinx.directives import ObjectDescription
from sphinx.util.nodes import make_refnode
from sphinx.util.docfields import Field, TypedField
+if False:
+ # For type annotation
+ from typing import Any, Dict, Iterator, List, Tuple # NOQA
+ from sphinx.application import Sphinx # NOQA
+ from sphinx.builders import Builder # NOQA
+ from sphinx.environment import BuildEnvironment # NOQA
+
# RE to split at word boundaries
wsplit_re = re.compile(r'(\W+)')
@@ -74,8 +81,9 @@ class CObject(ObjectDescription):
))
def _parse_type(self, node, ctype):
+ # type: (nodes.Node, unicode) -> None
# add cross-ref nodes for all words
- for part in [_f for _f in wsplit_re.split(ctype) if _f]:
+ for part in [_f for _f in wsplit_re.split(ctype) if _f]: # type: ignore
tnode = nodes.Text(part, part)
if part[0] in string.ascii_letters + '_' and \
part not in self.stopwords:
@@ -88,11 +96,12 @@ class CObject(ObjectDescription):
node += tnode
def _parse_arglist(self, arglist):
+ # type: (unicode) -> Iterator[unicode]
while True:
- m = c_funcptr_arg_sig_re.match(arglist)
+ m = c_funcptr_arg_sig_re.match(arglist) # type: ignore
if m:
yield m.group()
- arglist = c_funcptr_arg_sig_re.sub('', arglist)
+ arglist = c_funcptr_arg_sig_re.sub('', arglist) # type: ignore
if ',' in arglist:
_, arglist = arglist.split(',', 1)
else:
@@ -106,11 +115,12 @@ class CObject(ObjectDescription):
break
def handle_signature(self, sig, signode):
+ # type: (unicode, addnodes.desc_signature) -> unicode
"""Transform a C signature into RST nodes."""
# first try the function pointer signature regex, it's more specific
- m = c_funcptr_sig_re.match(sig)
+ m = c_funcptr_sig_re.match(sig) # type: ignore
if m is None:
- m = c_sig_re.match(sig)
+ m = c_sig_re.match(sig) # type: ignore
if m is None:
raise ValueError('no match')
rettype, name, arglist, const = m.groups()
@@ -151,7 +161,7 @@ class CObject(ObjectDescription):
arg = arg.strip()
param = addnodes.desc_parameter('', '', noemph=True)
try:
- m = c_funcptr_arg_sig_re.match(arg)
+ m = c_funcptr_arg_sig_re.match(arg) # type: ignore
if m:
self._parse_type(param, m.group(1) + '(')
param += nodes.emphasis(m.group(2), m.group(2))
@@ -173,6 +183,7 @@ class CObject(ObjectDescription):
return fullname
def get_index_text(self, name):
+ # type: (unicode) -> unicode
if self.objtype == 'function':
return _('%s (C function)') % name
elif self.objtype == 'member':
@@ -187,6 +198,7 @@ class CObject(ObjectDescription):
return ''
def add_target_and_index(self, name, sig, signode):
+ # type: (unicode, unicode, addnodes.desc_signature) -> None
# for C API items we add a prefix since names are usually not qualified
# by a module name and so easily clash with e.g. section titles
targetname = 'c.' + name
@@ -209,6 +221,7 @@ class CObject(ObjectDescription):
targetname, '', None))
def before_content(self):
+ # type: () -> None
self.typename_set = False
if self.name == 'c:type':
if self.names:
@@ -216,12 +229,14 @@ class CObject(ObjectDescription):
self.typename_set = True
def after_content(self):
+ # type: () -> None
if self.typename_set:
self.env.ref_context.pop('c:type', None)
class CXRefRole(XRefRole):
def process_link(self, env, refnode, has_explicit_title, title, target):
+ # type: (BuildEnvironment, nodes.Node, bool, unicode, unicode) -> Tuple[unicode, unicode] # NOQA
if not has_explicit_title:
target = target.lstrip('~') # only has a meaning for the title
# if the first character is a tilde, don't display the module/class
@@ -262,14 +277,16 @@ class CDomain(Domain):
}
initial_data = {
'objects': {}, # fullname -> docname, objtype
- }
+ } # type: Dict[unicode, Dict[unicode, Tuple[unicode, Any]]]
def clear_doc(self, docname):
+ # type: (unicode) -> None
for fullname, (fn, _l) in list(self.data['objects'].items()):
if fn == docname:
del self.data['objects'][fullname]
def merge_domaindata(self, docnames, otherdata):
+ # type: (List[unicode], Dict) -> None
# XXX check duplicates
for fullname, (fn, objtype) in otherdata['objects'].items():
if fn in docnames:
@@ -277,6 +294,7 @@ class CDomain(Domain):
def resolve_xref(self, env, fromdocname, builder,
typ, target, node, contnode):
+ # type: (BuildEnvironment, unicode, Builder, unicode, unicode, nodes.Node, nodes.Node) -> nodes.Node # NOQA
# strip pointer asterisk
target = target.rstrip(' *')
# becase TypedField can generate xrefs
@@ -290,6 +308,7 @@ class CDomain(Domain):
def resolve_any_xref(self, env, fromdocname, builder, target,
node, contnode):
+ # type: (BuildEnvironment, unicode, Builder, unicode, nodes.Node, nodes.Node) -> List[Tuple[unicode, nodes.Node]] # NOQA
# strip pointer asterisk
target = target.rstrip(' *')
if target not in self.data['objects']:
@@ -300,11 +319,13 @@ class CDomain(Domain):
contnode, target))]
def get_objects(self):
+ # type: () -> Iterator[Tuple[unicode, unicode, unicode, unicode, unicode, int]]
for refname, (docname, type) in list(self.data['objects'].items()):
yield (refname, refname, type, docname, 'c.' + refname, 1)
def setup(app):
+ # type: (Sphinx) -> Dict[unicode, Any]
app.add_domain(CDomain)
return {
diff --git a/sphinx/domains/cpp.py b/sphinx/domains/cpp.py
index 7d0478336..2fceeded3 100644
--- a/sphinx/domains/cpp.py
+++ b/sphinx/domains/cpp.py
@@ -13,18 +13,30 @@ import re
from copy import deepcopy
from six import iteritems, text_type
+
from docutils import nodes
+from docutils.parsers.rst import Directive, directives
from sphinx import addnodes
from sphinx.roles import XRefRole
from sphinx.locale import l_, _
from sphinx.domains import Domain, ObjType
from sphinx.directives import ObjectDescription
+from sphinx.util import logging
from sphinx.util.nodes import make_refnode
-from sphinx.util.compat import Directive
from sphinx.util.pycompat import UnicodeMixin
from sphinx.util.docfields import Field, GroupedField
+if False:
+ # For type annotation
+ from typing import Any, Callable, Dict, Iterator, List, Match, Pattern, Tuple, Union # NOQA
+ from sphinx.application import Sphinx # NOQA
+ from sphinx.builders import Builder # NOQA
+ from sphinx.config import Config # NOQA
+ from sphinx.environment import BuildEnvironment # NOQA
+
+logger = logging.getLogger(__name__)
+
"""
Important note on ids
----------------------------------------------------------------------------
@@ -41,13 +53,17 @@ from sphinx.util.docfields import Field, GroupedField
the index. All of the versions should work as permalinks.
- Tagnames
+ Signature Nodes and Tagnames
----------------------------------------------------------------------------
- Each desc_signature node will have the attribute 'sphinx_cpp_tagname' set to
- - 'templateParams', if the line is on the form 'template<...>',
- - 'templateIntroduction, if the line is on the form 'conceptName{...}'
+ Each signature is in a desc_signature node, where all children are
+ desc_signature_line nodes. Each of these lines will have the attribute
+ 'sphinx_cpp_tagname' set to one of the following (prioritized):
- 'declarator', if the line contains the name of the declared object.
+ - 'templateParams', if the line starts a template parameter list,
+ - 'templateParams', if the line has template parameters
+ Note: such lines might get a new tag in the future.
+ - 'templateIntroduction, if the line is on the form 'conceptName{...}'
No other desc_signature nodes should exist (so far).
@@ -86,9 +102,9 @@ from sphinx.util.docfields import Field, GroupedField
attribute-specifier-seq[opt] decl-specifier-seq[opt]
init-declarator-list[opt] ;
# Drop the semi-colon. For now: drop the attributes (TODO).
- # Use at most 1 init-declerator.
- -> decl-specifier-seq init-declerator
- -> decl-specifier-seq declerator initializer
+ # Use at most 1 init-declarator.
+ -> decl-specifier-seq init-declarator
+ -> decl-specifier-seq declarator initializer
decl-specifier ->
storage-class-specifier ->
@@ -149,22 +165,22 @@ from sphinx.util.docfields import Field, GroupedField
| template-argument-list "," template-argument "..."[opt]
template-argument ->
constant-expression
- | type-specifier-seq abstract-declerator
+ | type-specifier-seq abstract-declarator
| id-expression
- declerator ->
- ptr-declerator
+ declarator ->
+ ptr-declarator
| noptr-declarator parameters-and-qualifiers trailing-return-type
(TODO: for now we don't support trailing-eturn-type)
- ptr-declerator ->
- noptr-declerator
+ ptr-declarator ->
+ noptr-declarator
| ptr-operator ptr-declarator
- noptr-declerator ->
+ noptr-declarator ->
declarator-id attribute-specifier-seq[opt] ->
"..."[opt] id-expression
| rest-of-trailing
- | noptr-declerator parameters-and-qualifiers
+ | noptr-declarator parameters-and-qualifiers
| noptr-declarator "[" constant-expression[opt] "]"
attribute-specifier-seq[opt]
| "(" ptr-declarator ")"
@@ -226,20 +242,20 @@ from sphinx.util.docfields import Field, GroupedField
# Drop the attributes
-> decl-specifier-seq abstract-declarator[opt]
grammar, typedef-like: no initilizer
- decl-specifier-seq declerator
+ decl-specifier-seq declarator
Can start with a templateDeclPrefix.
member_object:
- goal: as a type_object which must have a declerator, and optionally
+ goal: as a type_object which must have a declarator, and optionally
with a initializer
grammar:
- decl-specifier-seq declerator initializer
+ decl-specifier-seq declarator initializer
Can start with a templateDeclPrefix.
function_object:
goal: a function declaration, TODO: what about templates? for now: skip
grammar: no initializer
- decl-specifier-seq declerator
+ decl-specifier-seq declarator
Can start with a templateDeclPrefix.
class_object:
@@ -317,7 +333,7 @@ _id_fundamental_v1 = {
'signed long': 'l',
'unsigned long': 'L',
'bool': 'b'
-}
+} # type: Dict[unicode, unicode]
_id_shorthands_v1 = {
'std::string': 'ss',
'std::ostream': 'os',
@@ -325,7 +341,7 @@ _id_shorthands_v1 = {
'std::iostream': 'ios',
'std::vector': 'v',
'std::map': 'm'
-}
+} # type: Dict[unicode, unicode]
_id_operator_v1 = {
'new': 'new-operator',
'new[]': 'new-array-operator',
@@ -374,7 +390,7 @@ _id_operator_v1 = {
'->': 'pointer-operator',
'()': 'call-operator',
'[]': 'subscript-operator'
-}
+} # type: Dict[unicode, unicode]
# ------------------------------------------------------------------------------
# Id v2 constants
@@ -420,7 +436,7 @@ _id_fundamental_v2 = {
'auto': 'Da',
'decltype(auto)': 'Dc',
'std::nullptr_t': 'Dn'
-}
+} # type: Dict[unicode, unicode]
_id_operator_v2 = {
'new': 'nw',
'new[]': 'na',
@@ -469,43 +485,50 @@ _id_operator_v2 = {
'->': 'pt',
'()': 'cl',
'[]': 'ix'
-}
+} # type: Dict[unicode, unicode]
class NoOldIdError(UnicodeMixin, Exception):
# Used to avoid implementing unneeded id generation for old id schmes.
def __init__(self, description=""):
+ # type: (unicode) -> None
self.description = description
def __unicode__(self):
+ # type: () -> unicode
return self.description
class DefinitionError(UnicodeMixin, Exception):
def __init__(self, description):
+ # type: (unicode) -> None
self.description = description
def __unicode__(self):
+ # type: () -> unicode
return self.description
class _DuplicateSymbolError(UnicodeMixin, Exception):
def __init__(self, symbol, candSymbol):
+ # type: (Symbol, Symbol) -> None
assert symbol
assert candSymbol
self.symbol = symbol
self.candSymbol = candSymbol
def __unicode__(self):
+ # type: () -> unicode
return "Internal C++ duplicate symbol error:\n%s" % self.symbol.dump(0)
class ASTBase(UnicodeMixin):
def __eq__(self, other):
+ # type: (Any) -> bool
if type(self) is not type(other):
return False
try:
- for key, value in iteritems(self.__dict__):
+ for key, value in iteritems(self.__dict__): # type: ignore
if value != getattr(other, key):
return False
except AttributeError:
@@ -513,23 +536,28 @@ class ASTBase(UnicodeMixin):
return True
def __ne__(self, other):
+ # type: (Any) -> bool
return not self.__eq__(other)
- __hash__ = None
+ __hash__ = None # type: Callable[[], int]
def clone(self):
+ # type: () -> ASTBase
"""Clone a definition expression node."""
return deepcopy(self)
def get_id_v1(self):
+ # type: () -> unicode
"""Return the v1 id for the node."""
raise NotImplementedError(repr(self))
def get_id_v2(self):
+ # type: () -> unicode
"""Return the v2 id for the node."""
raise NotImplementedError(repr(self))
def get_name(self):
+ # type: () -> unicode
"""Return the name.
Returns either `None` or a node with a name you might call
@@ -538,10 +566,12 @@ class ASTBase(UnicodeMixin):
raise NotImplementedError(repr(self))
def prefix_nested_name(self, prefix):
+ # type: (unicode) -> unicode
"""Prefix a name node (a node returned by :meth:`get_name`)."""
raise NotImplementedError(repr(self))
def __unicode__(self):
+ # type: () -> unicode
raise NotImplementedError(repr(self))
def __repr__(self):
@@ -549,29 +579,35 @@ class ASTBase(UnicodeMixin):
def _verify_description_mode(mode):
+ # type: (unicode) -> None
if mode not in ('lastIsName', 'noneIsName', 'markType', 'param'):
raise Exception("Description mode '%s' is invalid." % mode)
class ASTCPPAttribute(ASTBase):
def __init__(self, arg):
+ # type: (unicode) -> None
self.arg = arg
def __unicode__(self):
+ # type: () -> unicode
return "[[" + self.arg + "]]"
def describe_signature(self, signode):
+ # type: (addnodes.desc_signature) -> None
txt = text_type(self)
signode.append(nodes.Text(txt, txt))
class ASTGnuAttribute(ASTBase):
def __init__(self, name, args):
+ # type: (unicode, Any) -> None
self.name = name
self.args = args
def __unicode__(self):
- res = [self.name]
+ # type: () -> unicode
+ res = [self.name] # type: List[unicode]
if self.args:
res.append('(')
res.append(text_type(self.args))
@@ -581,10 +617,12 @@ class ASTGnuAttribute(ASTBase):
class ASTGnuAttributeList(ASTBase):
def __init__(self, attrs):
+ # type: (List[Any]) -> None
self.attrs = attrs
def __unicode__(self):
- res = ['__attribute__((']
+ # type: () -> unicode
+ res = ['__attribute__(('] # type: List[unicode]
first = True
for attr in self.attrs:
if not first:
@@ -595,6 +633,7 @@ class ASTGnuAttributeList(ASTBase):
return ''.join(res)
def describe_signature(self, signode):
+ # type: (addnodes.desc_signature) -> None
txt = text_type(self)
signode.append(nodes.Text(txt, txt))
@@ -603,12 +642,15 @@ class ASTIdAttribute(ASTBase):
"""For simple attributes defined by the user."""
def __init__(self, id):
+ # type: (unicode) -> None
self.id = id
def __unicode__(self):
+ # type: () -> unicode
return self.id
def describe_signature(self, signode):
+ # type: (addnodes.desc_signature) -> None
signode.append(nodes.Text(self.id, self.id))
@@ -616,29 +658,35 @@ class ASTParenAttribute(ASTBase):
"""For paren attributes defined by the user."""
def __init__(self, id, arg):
+ # type: (unicode, unicode) -> None
self.id = id
self.arg = arg
def __unicode__(self):
+ # type: () -> unicode
return self.id + '(' + self.arg + ')'
def describe_signature(self, signode):
+ # type: (addnodes.desc_signature) -> None
txt = text_type(self)
signode.append(nodes.Text(txt, txt))
class ASTIdentifier(ASTBase):
def __init__(self, identifier):
+ # type: (unicode) -> None
assert identifier is not None
self.identifier = identifier
def get_id_v1(self):
+ # type: () -> unicode
if self.identifier == 'size_t':
return 's'
else:
return self.identifier
def get_id_v2(self):
+ # type: () -> unicode
if self.identifier == "std":
return 'St'
elif self.identifier[0] == "~":
@@ -648,9 +696,11 @@ class ASTIdentifier(ASTBase):
return text_type(len(self.identifier)) + self.identifier
def __unicode__(self):
+ # type: () -> unicode
return self.identifier
def describe_signature(self, signode, mode, env, prefix, symbol):
+ # type: (addnodes.desc_signature, unicode, BuildEnvironment, unicode, Symbol) -> None
_verify_description_mode(mode)
if mode == 'markType':
targetText = prefix + self.identifier
@@ -673,6 +723,7 @@ class ASTIdentifier(ASTBase):
class ASTTemplateKeyParamPackIdDefault(ASTBase):
def __init__(self, key, identifier, parameterPack, default):
+ # type: (unicode, Any, bool, Any) -> None
assert key
if parameterPack:
assert default is None
@@ -682,9 +733,11 @@ class ASTTemplateKeyParamPackIdDefault(ASTBase):
self.default = default
def get_identifier(self):
+ # type: () -> unicode
return self.identifier
def get_id_v2(self):
+ # type: () -> unicode
# this is not part of the normal name mangling in C++
res = []
if self.parameterPack:
@@ -694,7 +747,8 @@ class ASTTemplateKeyParamPackIdDefault(ASTBase):
return ''.join(res)
def __unicode__(self):
- res = [self.key]
+ # type: () -> unicode
+ res = [self.key] # type: List[unicode]
if self.parameterPack:
if self.identifier:
res.append(' ')
@@ -709,6 +763,7 @@ class ASTTemplateKeyParamPackIdDefault(ASTBase):
return ''.join(res)
def describe_signature(self, signode, mode, env, symbol):
+ # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None
signode += nodes.Text(self.key)
if self.parameterPack:
if self.identifier:
@@ -725,18 +780,22 @@ class ASTTemplateKeyParamPackIdDefault(ASTBase):
class ASTTemplateParamType(ASTBase):
def __init__(self, data):
+ # type: (Any) -> None
assert data
self.data = data
@property
def name(self):
+ # type: () -> ASTNestedName
id = self.get_identifier()
return ASTNestedName([ASTNestedNameElement(id, None)], rooted=False)
def get_identifier(self):
+ # type: () -> unicode
return self.data.get_identifier()
def get_id_v2(self, objectType=None, symbol=None):
+ # type: (unicode, Symbol) -> unicode
# this is not part of the normal name mangling in C++
if symbol:
# the anchor will be our parent
@@ -745,14 +804,17 @@ class ASTTemplateParamType(ASTBase):
return self.data.get_id_v2()
def __unicode__(self):
+ # type: () -> unicode
return text_type(self.data)
def describe_signature(self, signode, mode, env, symbol):
+ # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None
self.data.describe_signature(signode, mode, env, symbol)
class ASTTemplateParamTemplateType(ASTBase):
def __init__(self, nestedParams, data):
+ # type: (Any, Any) -> None
assert nestedParams
assert data
self.nestedParams = nestedParams
@@ -760,13 +822,16 @@ class ASTTemplateParamTemplateType(ASTBase):
@property
def name(self):
+ # type: () -> ASTNestedName
id = self.get_identifier()
return ASTNestedName([ASTNestedNameElement(id, None)], rooted=False)
def get_identifier(self):
+ # type: () -> unicode
return self.data.get_identifier()
def get_id_v2(self, objectType=None, symbol=None):
+ # type: (unicode, Symbol) -> unicode
# this is not part of the normal name mangling in C++
if symbol:
# the anchor will be our parent
@@ -775,9 +840,11 @@ class ASTTemplateParamTemplateType(ASTBase):
return self.nestedParams.get_id_v2() + self.data.get_id_v2()
def __unicode__(self):
+ # type: () -> unicode
return text_type(self.nestedParams) + text_type(self.data)
def describe_signature(self, signode, mode, env, symbol):
+ # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None
self.nestedParams.describe_signature(signode, 'noneIsName', env, symbol)
signode += nodes.Text(' ')
self.data.describe_signature(signode, mode, env, symbol)
@@ -785,15 +852,18 @@ class ASTTemplateParamTemplateType(ASTBase):
class ASTTemplateParamNonType(ASTBase):
def __init__(self, param):
+ # type: (Any) -> None
assert param
self.param = param
@property
def name(self):
+ # type: () -> ASTNestedName
id = self.get_identifier()
return ASTNestedName([ASTNestedNameElement(id, None)], rooted=False)
def get_identifier(self):
+ # type: () -> unicode
name = self.param.name
if name:
assert len(name.names) == 1
@@ -804,6 +874,7 @@ class ASTTemplateParamNonType(ASTBase):
return None
def get_id_v2(self, objectType=None, symbol=None):
+ # type: (unicode, Symbol) -> unicode
# this is not part of the normal name mangling in C++
if symbol:
# the anchor will be our parent
@@ -812,18 +883,23 @@ class ASTTemplateParamNonType(ASTBase):
return '_' + self.param.get_id_v2()
def __unicode__(self):
+ # type: () -> unicode
return text_type(self.param)
def describe_signature(self, signode, mode, env, symbol):
+ # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None
self.param.describe_signature(signode, mode, env, symbol)
class ASTTemplateParams(ASTBase):
def __init__(self, params):
+ # type: (Any) -> None
assert params is not None
self.params = params
+ self.isNested = False # whether it's a template template param
def get_id_v2(self):
+ # type: () -> unicode
res = []
res.append("I")
for param in self.params:
@@ -832,33 +908,51 @@ class ASTTemplateParams(ASTBase):
return ''.join(res)
def __unicode__(self):
+ # type: () -> unicode
res = []
res.append(u"template<")
res.append(u", ".join(text_type(a) for a in self.params))
res.append(u"> ")
return ''.join(res)
- def describe_signature(self, signode, mode, env, symbol):
- signode.sphinx_cpp_tagname = 'templateParams'
- signode += nodes.Text("template<")
+ def describe_signature(self, parentNode, mode, env, symbol, lineSpec=None):
+ # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol, bool) -> None
+ # 'lineSpec' is defaulted becuase of template template parameters
+ def makeLine(parentNode=parentNode):
+ signode = addnodes.desc_signature_line()
+ parentNode += signode
+ signode.sphinx_cpp_tagname = 'templateParams'
+ return signode
+ if self.isNested:
+ lineNode = parentNode
+ else:
+ lineNode = makeLine()
+ lineNode += nodes.Text("template<")
first = True
for param in self.params:
if not first:
- signode += nodes.Text(", ")
+ lineNode += nodes.Text(", ")
first = False
- param.describe_signature(signode, mode, env, symbol)
- signode += nodes.Text(">")
+ if lineSpec:
+ lineNode = makeLine()
+ param.describe_signature(lineNode, mode, env, symbol)
+ if lineSpec and not first:
+ lineNode = makeLine()
+ lineNode += nodes.Text(">")
class ASTTemplateIntroductionParameter(ASTBase):
def __init__(self, identifier, parameterPack):
+ # type: (Any, Any) -> None
self.identifier = identifier
self.parameterPack = parameterPack
def get_identifier(self):
+ # type: () -> unicode
return self.identifier
def get_id_v2(self, objectType=None, symbol=None):
+ # type: (unicode, Symbol) -> unicode
# this is not part of the normal name mangling in C++
if symbol:
# the anchor will be our parent
@@ -870,6 +964,7 @@ class ASTTemplateIntroductionParameter(ASTBase):
return '0' # we need to put something
def get_id_v2_as_arg(self):
+ # type: () -> unicode
# used for the implicit requires clause
res = self.identifier.get_id_v2()
if self.parameterPack:
@@ -878,13 +973,15 @@ class ASTTemplateIntroductionParameter(ASTBase):
return res
def __unicode__(self):
- res = []
+ # type: () -> unicode
+ res = [] # type: List[unicode]
if self.parameterPack:
res.append('...')
res.append(text_type(self.identifier))
return ''.join(res)
def describe_signature(self, signode, mode, env, symbol):
+ # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None
if self.parameterPack:
signode += nodes.Text('...')
self.identifier.describe_signature(signode, mode, env, '', symbol)
@@ -892,6 +989,7 @@ class ASTTemplateIntroductionParameter(ASTBase):
class ASTTemplateIntroduction(ASTBase):
def __init__(self, concept, params):
+ # type: (Any, List[Any]) -> None
assert len(params) > 0
self.concept = concept
self.params = params
@@ -899,6 +997,7 @@ class ASTTemplateIntroduction(ASTBase):
# id_v1 does not exist
def get_id_v2(self):
+ # type: () -> unicode
# first do the same as a normal template parameter list
res = []
res.append("I")
@@ -916,6 +1015,7 @@ class ASTTemplateIntroduction(ASTBase):
return ''.join(res)
def __unicode__(self):
+ # type: () -> unicode
res = []
res.append(text_type(self.concept))
res.append('{')
@@ -923,7 +1023,11 @@ class ASTTemplateIntroduction(ASTBase):
res.append('} ')
return ''.join(res)
- def describe_signature(self, signode, mode, env, symbol):
+ def describe_signature(self, parentNode, mode, env, symbol, lineSpec):
+ # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol, bool) -> None
+ # Note: 'lineSpec' has no effect on template introductions.
+ signode = addnodes.desc_signature_line()
+ parentNode += signode
signode.sphinx_cpp_tagname = 'templateIntroduction'
self.concept.describe_signature(signode, 'markType', env, symbol)
signode += nodes.Text('{')
@@ -938,6 +1042,7 @@ class ASTTemplateIntroduction(ASTBase):
class ASTTemplateDeclarationPrefix(ASTBase):
def __init__(self, templates):
+ # type: (List[Any]) -> None
assert templates is not None
assert len(templates) > 0
self.templates = templates
@@ -945,6 +1050,7 @@ class ASTTemplateDeclarationPrefix(ASTBase):
# id_v1 does not exist
def get_id_v2(self):
+ # type: () -> unicode
# this is not part of a normal name mangling system
res = []
for t in self.templates:
@@ -952,45 +1058,51 @@ class ASTTemplateDeclarationPrefix(ASTBase):
return u''.join(res)
def __unicode__(self):
+ # type: () -> unicode
res = []
for t in self.templates:
res.append(text_type(t))
return u''.join(res)
- def describe_signature(self, signode, mode, env, symbol):
+ def describe_signature(self, signode, mode, env, symbol, lineSpec):
+ # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol, bool) -> None
_verify_description_mode(mode)
for t in self.templates:
- templateNode = addnodes.desc_signature_line()
- t.describe_signature(templateNode, 'lastIsName', env, symbol)
- signode += templateNode
+ t.describe_signature(signode, 'lastIsName', env, symbol, lineSpec)
class ASTOperatorBuildIn(ASTBase):
def __init__(self, op):
+ # type: (unicode) -> None
self.op = op
def is_operator(self):
+ # type: () -> bool
return True
def get_id_v1(self):
+ # type: () -> unicode
if self.op not in _id_operator_v1:
raise Exception('Internal error: Build-in operator "%s" can not '
'be mapped to an id.' % self.op)
return _id_operator_v1[self.op]
def get_id_v2(self):
+ # type: () -> unicode
if self.op not in _id_operator_v2:
raise Exception('Internal error: Build-in operator "%s" can not '
'be mapped to an id.' % self.op)
return _id_operator_v2[self.op]
def __unicode__(self):
+ # type: () -> unicode
if self.op in ('new', 'new[]', 'delete', 'delete[]'):
return u'operator ' + self.op
else:
return u'operator' + self.op
def describe_signature(self, signode, mode, env, prefix, symbol):
+ # type: (addnodes.desc_signature, unicode, BuildEnvironment, unicode, Symbol) -> None
_verify_description_mode(mode)
identifier = text_type(self)
if mode == 'lastIsName':
@@ -1001,24 +1113,31 @@ class ASTOperatorBuildIn(ASTBase):
class ASTOperatorType(ASTBase):
def __init__(self, type):
+ # type: (Any) -> None
self.type = type
def is_operator(self):
+ # type: () -> bool
return True
def get_id_v1(self):
+ # type: () -> unicode
return u'castto-%s-operator' % self.type.get_id_v1()
def get_id_v2(self):
+ # type: () -> unicode
return u'cv' + self.type.get_id_v2()
def __unicode__(self):
+ # type: () -> unicode
return u''.join(['operator ', text_type(self.type)])
def get_name_no_template(self):
+ # type: () -> unicode
return text_type(self)
def describe_signature(self, signode, mode, env, prefix, symbol):
+ # type: (addnodes.desc_signature, unicode, BuildEnvironment, unicode, Symbol) -> None
_verify_description_mode(mode)
identifier = text_type(self)
if mode == 'lastIsName':
@@ -1029,21 +1148,27 @@ class ASTOperatorType(ASTBase):
class ASTOperatorLiteral(ASTBase):
def __init__(self, identifier):
+ # type: (Any) -> None
self.identifier = identifier
def is_operator(self):
+ # type: () -> bool
return True
def get_id_v1(self):
+ # type: () -> unicode
raise NoOldIdError()
def get_id_v2(self):
+ # type: () -> unicode
return u'li' + self.identifier.get_id_v2()
def __unicode__(self):
+ # type: () -> unicode
return u'operator""' + text_type(self.identifier)
def describe_signature(self, signode, mode, env, prefix, symbol):
+ # type: (addnodes.desc_signature, unicode, BuildEnvironment, unicode, Symbol) -> None
_verify_description_mode(mode)
identifier = text_type(self)
if mode == 'lastIsName':
@@ -1054,38 +1179,46 @@ class ASTOperatorLiteral(ASTBase):
class ASTTemplateArgConstant(ASTBase):
def __init__(self, value):
+ # type: (Any) -> None
self.value = value
def __unicode__(self):
+ # type: () -> unicode
return text_type(self.value)
def get_id_v1(self):
+ # type: () -> unicode
return text_type(self).replace(u' ', u'-')
def get_id_v2(self):
+ # type: () -> unicode
# TODO: doing this properly needs parsing of expressions, let's just
# juse it verbatim for now
return u'X' + text_type(self) + u'E'
def describe_signature(self, signode, mode, env, symbol):
+ # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None
_verify_description_mode(mode)
signode += nodes.Text(text_type(self))
class ASTTemplateArgs(ASTBase):
def __init__(self, args):
+ # type: (List[Any]) -> None
assert args is not None
assert len(args) > 0
self.args = args
def get_id_v1(self):
- res = []
+ # type: () -> unicode
+ res = [] # type: List[unicode]
res.append(':')
res.append(u'.'.join(a.get_id_v1() for a in self.args))
res.append(':')
return u''.join(res)
def get_id_v2(self):
+ # type: () -> unicode
res = []
res.append('I')
for a in self.args:
@@ -1094,10 +1227,12 @@ class ASTTemplateArgs(ASTBase):
return u''.join(res)
def __unicode__(self):
+ # type: () -> unicode
res = ', '.join(text_type(a) for a in self.args)
return '<' + res + '>'
def describe_signature(self, signode, mode, env, symbol):
+ # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None
_verify_description_mode(mode)
signode += nodes.Text('<')
first = True
@@ -1111,31 +1246,37 @@ class ASTTemplateArgs(ASTBase):
class ASTNestedNameElement(ASTBase):
def __init__(self, identifier, templateArgs):
+ # type: (Any, Any) -> None
self.identifier = identifier
self.templateArgs = templateArgs
def is_operator(self):
+ # type: () -> bool
return False
def get_id_v1(self):
+ # type: () -> unicode
res = self.identifier.get_id_v1()
if self.templateArgs:
res += self.templateArgs.get_id_v1()
return res
def get_id_v2(self):
+ # type: () -> unicode
res = self.identifier.get_id_v2()
if self.templateArgs:
res += self.templateArgs.get_id_v2()
return res
def __unicode__(self):
+ # type: () -> unicode
res = text_type(self.identifier)
if self.templateArgs:
res += text_type(self.templateArgs)
return res
def describe_signature(self, signode, mode, env, prefix, symbol):
+ # type: (addnodes.desc_signature, unicode, BuildEnvironment, unicode, Symbol) -> None
self.identifier.describe_signature(signode, mode, env, prefix, symbol)
if self.templateArgs:
self.templateArgs.describe_signature(signode, mode, env, symbol)
@@ -1143,15 +1284,18 @@ class ASTNestedNameElement(ASTBase):
class ASTNestedName(ASTBase):
def __init__(self, names, rooted):
+ # type: (List[Any], bool) -> None
assert len(names) > 0
self.names = names
self.rooted = rooted
@property
def name(self):
+ # type: () -> ASTNestedName
return self
def num_templates(self):
+ # type: () -> int
count = 0
for n in self.names:
if n.is_operator():
@@ -1161,6 +1305,7 @@ class ASTNestedName(ASTBase):
return count
def get_id_v1(self):
+ # type: () -> unicode
tt = text_type(self)
if tt in _id_shorthands_v1:
return _id_shorthands_v1[tt]
@@ -1168,7 +1313,8 @@ class ASTNestedName(ASTBase):
return u'::'.join(n.get_id_v1() for n in self.names)
def get_id_v2(self, modifiers=""):
- res = []
+ # type: (unicode) -> unicode
+ res = [] # type: List[unicode]
if len(self.names) > 1 or len(modifiers) > 0:
res.append('N')
res.append(modifiers)
@@ -1179,7 +1325,8 @@ class ASTNestedName(ASTBase):
return u''.join(res)
def __unicode__(self):
- res = []
+ # type: () -> unicode
+ res = [] # type: List[unicode]
if self.rooted:
res.append('')
for n in self.names:
@@ -1187,15 +1334,16 @@ class ASTNestedName(ASTBase):
return '::'.join(res)
def describe_signature(self, signode, mode, env, symbol):
+ # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None
_verify_description_mode(mode)
# just print the name part, with template args, not template params
if mode == 'lastIsName':
- addname = []
+ addname = [] # type: List[unicode]
if self.rooted:
addname.append('')
for n in self.names[:-1]:
addname.append(text_type(n))
- addname = '::'.join(addname)
+ addname = '::'.join(addname) # type: ignore
if len(self.names) > 1:
addname += '::'
signode += addnodes.desc_addname(addname, addname)
@@ -1209,7 +1357,7 @@ class ASTNestedName(ASTBase):
# each element should be a pending xref targeting the complete
# prefix. however, only the identifier part should be a link, such
# that template args can be a link as well.
- prefix = ''
+ prefix = '' # type: unicode
first = True
for name in self.names:
if not first:
@@ -1217,7 +1365,7 @@ class ASTNestedName(ASTBase):
prefix += '::'
first = False
if name != '':
- name.describe_signature(signode, mode, env, prefix, symbol)
+ name.describe_signature(signode, mode, env, prefix, symbol) # type: ignore
prefix += text_type(name)
else:
raise Exception('Unknown description mode: %s' % mode)
@@ -1225,12 +1373,15 @@ class ASTNestedName(ASTBase):
class ASTTrailingTypeSpecFundamental(ASTBase):
def __init__(self, name):
+ # type: (unicode) -> None
self.name = name
def __unicode__(self):
+ # type: () -> unicode
return self.name
def get_id_v1(self):
+ # type: () -> unicode
res = []
for a in self.name.split(' '):
if a in _id_fundamental_v1:
@@ -1240,6 +1391,7 @@ class ASTTrailingTypeSpecFundamental(ASTBase):
return u'-'.join(res)
def get_id_v2(self):
+ # type: () -> unicode
if self.name not in _id_fundamental_v2:
raise Exception(
'Semi-internal error: Fundamental type "%s" can not be mapped '
@@ -1248,26 +1400,32 @@ class ASTTrailingTypeSpecFundamental(ASTBase):
return _id_fundamental_v2[self.name]
def describe_signature(self, signode, mode, env, symbol):
+ # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None
signode += nodes.Text(text_type(self.name))
class ASTTrailingTypeSpecName(ASTBase):
def __init__(self, prefix, nestedName):
+ # type: (unicode, Any) -> None
self.prefix = prefix
self.nestedName = nestedName
@property
def name(self):
+ # type: () -> Any
return self.nestedName
def get_id_v1(self):
+ # type: () -> unicode
return self.nestedName.get_id_v1()
def get_id_v2(self):
+ # type: () -> unicode
return self.nestedName.get_id_v2()
def __unicode__(self):
- res = []
+ # type: () -> unicode
+ res = [] # type: List[unicode]
if self.prefix:
res.append(self.prefix)
res.append(' ')
@@ -1275,36 +1433,42 @@ class ASTTrailingTypeSpecName(ASTBase):
return u''.join(res)
def describe_signature(self, signode, mode, env, symbol):
+ # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None
if self.prefix:
signode += addnodes.desc_annotation(self.prefix, self.prefix)
signode += nodes.Text(' ')
self.nestedName.describe_signature(signode, mode, env, symbol=symbol)
-class ASTFunctinoParameter(ASTBase):
+class ASTFunctionParameter(ASTBase):
def __init__(self, arg, ellipsis=False):
+ # type: (Any, bool) -> None
self.arg = arg
self.ellipsis = ellipsis
def get_id_v1(self):
+ # type: () -> unicode
if self.ellipsis:
return 'z'
else:
return self.arg.get_id_v1()
def get_id_v2(self):
+ # type: () -> unicode
if self.ellipsis:
return 'z'
else:
return self.arg.get_id_v2()
def __unicode__(self):
+ # type: () -> unicode
if self.ellipsis:
return '...'
else:
return text_type(self.arg)
def describe_signature(self, signode, mode, env, symbol):
+ # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None
_verify_description_mode(mode)
if self.ellipsis:
signode += nodes.Text('...')
@@ -1315,6 +1479,7 @@ class ASTFunctinoParameter(ASTBase):
class ASTParametersQualifiers(ASTBase):
def __init__(self, args, volatile, const, refQual, exceptionSpec, override,
final, initializer):
+ # type: (List[Any], bool, bool, unicode, unicode, bool, bool, unicode) -> None
self.args = args
self.volatile = volatile
self.const = const
@@ -1327,6 +1492,7 @@ class ASTParametersQualifiers(ASTBase):
# Id v1 ------------------------------------------------------------------
def get_modifiers_id_v1(self):
+ # type: () -> unicode
res = []
if self.volatile:
res.append('V')
@@ -1339,6 +1505,7 @@ class ASTParametersQualifiers(ASTBase):
return u''.join(res)
def get_param_id_v1(self):
+ # type: () -> unicode
if len(self.args) == 0:
return ''
else:
@@ -1347,6 +1514,7 @@ class ASTParametersQualifiers(ASTBase):
# Id v2 ------------------------------------------------------------------
def get_modifiers_id_v2(self):
+ # type: () -> unicode
res = []
if self.volatile:
res.append('V')
@@ -1359,13 +1527,15 @@ class ASTParametersQualifiers(ASTBase):
return u''.join(res)
def get_param_id_v2(self):
+ # type: () -> unicode
if len(self.args) == 0:
return 'v'
else:
return u''.join(a.get_id_v2() for a in self.args)
def __unicode__(self):
- res = []
+ # type: () -> unicode
+ res = [] # type: List[unicode]
res.append('(')
first = True
for a in self.args:
@@ -1394,6 +1564,7 @@ class ASTParametersQualifiers(ASTBase):
return u''.join(res)
def describe_signature(self, signode, mode, env, symbol):
+ # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None
_verify_description_mode(mode)
paramlist = addnodes.desc_parameterlist()
for arg in self.args:
@@ -1431,6 +1602,7 @@ class ASTParametersQualifiers(ASTBase):
class ASTDeclSpecsSimple(ASTBase):
def __init__(self, storage, threadLocal, inline, virtual, explicit,
constexpr, volatile, const, friend, attrs):
+ # type: (unicode, bool, bool, bool, bool, bool, bool, bool, bool, List[Any]) -> None
self.storage = storage
self.threadLocal = threadLocal
self.inline = inline
@@ -1443,6 +1615,7 @@ class ASTDeclSpecsSimple(ASTBase):
self.attrs = attrs
def mergeWith(self, other):
+ # type: (ASTDeclSpecsSimple) -> ASTDeclSpecsSimple
if not other:
return self
return ASTDeclSpecsSimple(self.storage or other.storage,
@@ -1457,7 +1630,8 @@ class ASTDeclSpecsSimple(ASTBase):
self.attrs + other.attrs)
def __unicode__(self):
- res = []
+ # type: () -> unicode
+ res = [] # type: List[unicode]
res.extend(text_type(attr) for attr in self.attrs)
if self.storage:
res.append(self.storage)
@@ -1480,6 +1654,7 @@ class ASTDeclSpecsSimple(ASTBase):
return u' '.join(res)
def describe_signature(self, modifiers):
+ # type: (List[nodes.Node]) -> None
def _add(modifiers, text):
if len(modifiers) > 0:
modifiers.append(nodes.Text(' '))
@@ -1520,9 +1695,11 @@ class ASTDeclSpecs(ASTBase):
@property
def name(self):
+ # type: () -> unicode
return self.trailingTypeSpec.name
def get_id_v1(self):
+ # type: () -> unicode
res = []
res.append(self.trailingTypeSpec.get_id_v1())
if self.allSpecs.volatile:
@@ -1532,6 +1709,7 @@ class ASTDeclSpecs(ASTBase):
return u''.join(res)
def get_id_v2(self):
+ # type: () -> unicode
res = []
if self.leftSpecs.volatile or self.rightSpecs.volatile:
res.append('V')
@@ -1541,7 +1719,8 @@ class ASTDeclSpecs(ASTBase):
return u''.join(res)
def __unicode__(self):
- res = []
+ # type: () -> unicode
+ res = [] # type: List[unicode]
l = text_type(self.leftSpecs)
if len(l) > 0:
if len(res) > 0:
@@ -1559,8 +1738,9 @@ class ASTDeclSpecs(ASTBase):
return "".join(res)
def describe_signature(self, signode, mode, env, symbol):
+ # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None
_verify_description_mode(mode)
- modifiers = []
+ modifiers = [] # type: List[nodes.Node]
def _add(modifiers, text):
if len(modifiers) > 0:
@@ -1586,15 +1766,19 @@ class ASTDeclSpecs(ASTBase):
class ASTArray(ASTBase):
def __init__(self, size):
+ # type: (unicode) -> None
self.size = size
def __unicode__(self):
+ # type: () -> unicode
return u''.join(['[', text_type(self.size), ']'])
def get_id_v1(self):
+ # type: () -> unicode
return u'A'
def get_id_v2(self):
+ # type: () -> unicode
# TODO: this should maybe be done differently
return u'A' + text_type(self.size) + u'_'
@@ -1605,6 +1789,7 @@ class ASTArray(ASTBase):
class ASTDeclaratorPtr(ASTBase):
def __init__(self, next, volatile, const):
+ # type: (Any, bool, bool) -> None
assert next
self.next = next
self.volatile = volatile
@@ -1612,14 +1797,17 @@ class ASTDeclaratorPtr(ASTBase):
@property
def name(self):
+ # type: () -> unicode
return self.next.name
def require_space_after_declSpecs(self):
+ # type: () -> bool
# TODO: if has paramPack, then False ?
return True
def __unicode__(self):
- res = ['*']
+ # type: () -> unicode
+ res = ['*'] # type: List[unicode]
if self.volatile:
res.append('volatile')
if self.const:
@@ -1635,12 +1823,15 @@ class ASTDeclaratorPtr(ASTBase):
# Id v1 ------------------------------------------------------------------
def get_modifiers_id_v1(self):
+ # type: () -> unicode
return self.next.get_modifiers_id_v1()
def get_param_id_v1(self):
+ # type: () -> unicode
return self.next.get_param_id_v1()
def get_ptr_suffix_id_v1(self):
+ # type: () -> unicode
res = 'P'
if self.volatile:
res += 'V'
@@ -1651,13 +1842,16 @@ class ASTDeclaratorPtr(ASTBase):
# Id v2 ------------------------------------------------------------------
def get_modifiers_id_v2(self):
+ # type: () -> unicode
return self.next.get_modifiers_id_v2()
def get_param_id_v2(self):
+ # type: () -> unicode
return self.next.get_param_id_v2()
def get_ptr_suffix_id_v2(self):
- res = [self.next.get_ptr_suffix_id_v2()]
+ # type: () -> unicode
+ res = [self.next.get_ptr_suffix_id_v2()] # type: List[unicode]
res.append('P')
if self.volatile:
res.append('V')
@@ -1666,8 +1860,9 @@ class ASTDeclaratorPtr(ASTBase):
return u''.join(res)
def get_type_id_v2(self, returnTypeId):
+ # type: (unicode) -> unicode
# ReturnType *next, so we are part of the return type of 'next
- res = ['P']
+ res = ['P'] # type: List[unicode]
if self.volatile:
res.append('V')
if self.const:
@@ -1678,9 +1873,11 @@ class ASTDeclaratorPtr(ASTBase):
# ------------------------------------------------------------------------
def is_function_type(self):
+ # type: () -> bool
return self.next.is_function_type()
def describe_signature(self, signode, mode, env, symbol):
+ # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None
_verify_description_mode(mode)
signode += nodes.Text("*")
@@ -1700,51 +1897,64 @@ class ASTDeclaratorPtr(ASTBase):
class ASTDeclaratorRef(ASTBase):
def __init__(self, next):
+ # type: (Any) -> None
assert next
self.next = next
@property
def name(self):
+ # type: () -> unicode
return self.next.name
def require_space_after_declSpecs(self):
+ # type: () -> bool
return self.next.require_space_after_declSpecs()
def __unicode__(self):
+ # type: () -> unicode
return '&' + text_type(self.next)
# Id v1 ------------------------------------------------------------------
def get_modifiers_id_v1(self):
+ # type: () -> unicode
return self.next.get_modifiers_id_v1()
def get_param_id_v1(self): # only the parameters (if any)
+ # type: () -> unicode
return self.next.get_param_id_v1()
def get_ptr_suffix_id_v1(self):
+ # type: () -> unicode
return u'R' + self.next.get_ptr_suffix_id_v1()
# Id v2 ------------------------------------------------------------------
def get_modifiers_id_v2(self):
+ # type: () -> unicode
return self.next.get_modifiers_id_v2()
def get_param_id_v2(self): # only the parameters (if any)
+ # type: () -> unicode
return self.next.get_param_id_v2()
def get_ptr_suffix_id_v2(self):
+ # type: () -> unicode
return self.next.get_ptr_suffix_id_v2() + u'R'
def get_type_id_v2(self, returnTypeId):
+ # type: (unicode) -> unicode
# ReturnType &next, so we are part of the return type of 'next
return self.next.get_type_id_v2(returnTypeId=u'R' + returnTypeId)
# ------------------------------------------------------------------------
def is_function_type(self):
+ # type: () -> bool
return self.next.is_function_type()
def describe_signature(self, signode, mode, env, symbol):
+ # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None
_verify_description_mode(mode)
signode += nodes.Text("&")
self.next.describe_signature(signode, mode, env, symbol)
@@ -1752,17 +1962,21 @@ class ASTDeclaratorRef(ASTBase):
class ASTDeclaratorParamPack(ASTBase):
def __init__(self, next):
+ # type: (Any) -> None
assert next
self.next = next
@property
def name(self):
+ # type: () -> unicode
return self.next.name
def require_space_after_declSpecs(self):
+ # type: () -> bool
return False
def __unicode__(self):
+ # type: () -> unicode
res = text_type(self.next)
if self.next.name:
res = ' ' + res
@@ -1771,35 +1985,43 @@ class ASTDeclaratorParamPack(ASTBase):
# Id v1 ------------------------------------------------------------------
def get_modifiers_id_v1(self):
+ # type: () -> unicode
return self.next.get_modifiers_id_v1()
def get_param_id_v1(self): # only the parameters (if any)
+ # type: () -> unicode
return self.next.get_param_id_v1()
def get_ptr_suffix_id_v1(self):
+ # type: () -> unicode
return 'Dp' + self.next.get_ptr_suffix_id_v2()
# Id v2 ------------------------------------------------------------------
def get_modifiers_id_v2(self):
+ # type: () -> unicode
return self.next.get_modifiers_id_v2()
def get_param_id_v2(self): # only the parameters (if any)
return self.next.get_param_id_v2()
def get_ptr_suffix_id_v2(self):
+ # type: () -> unicode
return self.next.get_ptr_suffix_id_v2() + u'Dp'
def get_type_id_v2(self, returnTypeId):
+ # type: (unicode) -> unicode
# ReturnType... next, so we are part of the return type of 'next
return self.next.get_type_id_v2(returnTypeId=u'Dp' + returnTypeId)
# ------------------------------------------------------------------------
def is_function_type(self):
+ # type: () -> bool
return self.next.is_function_type()
def describe_signature(self, signode, mode, env, symbol):
+ # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None
_verify_description_mode(mode)
signode += nodes.Text("...")
if self.next.name:
@@ -1809,6 +2031,7 @@ class ASTDeclaratorParamPack(ASTBase):
class ASTDeclaratorMemPtr(ASTBase):
def __init__(self, className, const, volatile, next):
+ # type: (Any, bool, bool, Any) -> None
assert className
assert next
self.className = className
@@ -1818,12 +2041,15 @@ class ASTDeclaratorMemPtr(ASTBase):
@property
def name(self):
+ # type: () -> unicode
return self.next.name
def require_space_after_declSpecs(self):
+ # type: () -> bool
return True
def __unicode__(self):
+ # type: () -> unicode
res = []
res.append(text_type(self.className))
res.append('::*')
@@ -1839,29 +2065,36 @@ class ASTDeclaratorMemPtr(ASTBase):
# Id v1 ------------------------------------------------------------------
def get_modifiers_id_v1(self):
+ # type: () -> unicode
raise NoOldIdError()
def get_param_id_v1(self): # only the parameters (if any)
+ # type: () -> unicode
raise NoOldIdError()
def get_ptr_suffix_id_v1(self):
+ # type: () -> unicode
raise NoOldIdError()
# Id v2 ------------------------------------------------------------------
def get_modifiers_id_v2(self):
+ # type: () -> unicode
return self.next.get_modifiers_id_v2()
def get_param_id_v2(self): # only the parameters (if any)
+ # type: () -> unicode
return self.next.get_param_id_v2()
def get_ptr_suffix_id_v2(self):
+ # type: () -> unicode
raise NotImplementedError()
return self.next.get_ptr_suffix_id_v2() + u'Dp'
def get_type_id_v2(self, returnTypeId):
+ # type: (unicode) -> unicode
# ReturnType name::* next, so we are part of the return type of next
- nextReturnTypeId = ''
+ nextReturnTypeId = '' # type: unicode
if self.volatile:
nextReturnTypeId += 'V'
if self.const:
@@ -1874,9 +2107,11 @@ class ASTDeclaratorMemPtr(ASTBase):
# ------------------------------------------------------------------------
def is_function_type(self):
+ # type: () -> bool
return self.next.is_function_type()
def describe_signature(self, signode, mode, env, symbol):
+ # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None
_verify_description_mode(mode)
self.className.describe_signature(signode, mode, env, symbol)
signode += nodes.Text('::*')
@@ -1897,6 +2132,7 @@ class ASTDeclaratorMemPtr(ASTBase):
class ASTDeclaratorParen(ASTBase):
def __init__(self, inner, next):
+ # type: (Any, Any) -> None
assert inner
assert next
self.inner = inner
@@ -1905,13 +2141,16 @@ class ASTDeclaratorParen(ASTBase):
@property
def name(self):
+ # type: () -> unicode
return self.inner.name
def require_space_after_declSpecs(self):
+ # type: () -> bool
return True
def __unicode__(self):
- res = ['(']
+ # type: () -> unicode
+ res = ['('] # type: List[unicode]
res.append(text_type(self.inner))
res.append(')')
res.append(text_type(self.next))
@@ -1920,12 +2159,15 @@ class ASTDeclaratorParen(ASTBase):
# Id v1 ------------------------------------------------------------------
def get_modifiers_id_v1(self):
+ # type: () -> unicode
return self.inner.get_modifiers_id_v1()
def get_param_id_v1(self): # only the parameters (if any)
+ # type: () -> unicode
return self.inner.get_param_id_v1()
def get_ptr_suffix_id_v1(self):
+ # type: () -> unicode
raise NoOldIdError() # TODO: was this implemented before?
return self.next.get_ptr_suffix_id_v2() + \
self.inner.get_ptr_suffix_id_v2()
@@ -1933,16 +2175,20 @@ class ASTDeclaratorParen(ASTBase):
# Id v2 ------------------------------------------------------------------
def get_modifiers_id_v2(self):
+ # type: () -> unicode
return self.inner.get_modifiers_id_v2()
def get_param_id_v2(self): # only the parameters (if any)
+ # type: () -> unicode
return self.inner.get_param_id_v2()
def get_ptr_suffix_id_v2(self):
+ # type: () -> unicode
return self.inner.get_ptr_suffix_id_v2() + \
self.next.get_ptr_suffix_id_v2()
def get_type_id_v2(self, returnTypeId):
+ # type: (unicode) -> unicode
# ReturnType (inner)next, so 'inner' returns everything outside
nextId = self.next.get_type_id_v2(returnTypeId)
return self.inner.get_type_id_v2(returnTypeId=nextId)
@@ -1950,9 +2196,11 @@ class ASTDeclaratorParen(ASTBase):
# ------------------------------------------------------------------------
def is_function_type(self):
+ # type: () -> bool
return self.inner.is_function_type()
def describe_signature(self, signode, mode, env, symbol):
+ # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None
_verify_description_mode(mode)
signode += nodes.Text('(')
self.inner.describe_signature(signode, mode, env, symbol)
@@ -1960,19 +2208,22 @@ class ASTDeclaratorParen(ASTBase):
self.next.describe_signature(signode, "noneIsName", env, symbol)
-class ASTDecleratorNameParamQual(ASTBase):
+class ASTDeclaratorNameParamQual(ASTBase):
def __init__(self, declId, arrayOps, paramQual):
+ # type: (Any, List[Any], Any) -> None
self.declId = declId
self.arrayOps = arrayOps
self.paramQual = paramQual
@property
def name(self):
+ # type: () -> unicode
return self.declId
# Id v1 ------------------------------------------------------------------
def get_modifiers_id_v1(self): # only the modifiers for a function, e.g.,
+ # type: () -> unicode
# cv-qualifiers
if self.paramQual:
return self.paramQual.get_modifiers_id_v1()
@@ -1980,17 +2231,20 @@ class ASTDecleratorNameParamQual(ASTBase):
"This should only be called on a function: %s" % text_type(self))
def get_param_id_v1(self): # only the parameters (if any)
+ # type: () -> unicode
if self.paramQual:
return self.paramQual.get_param_id_v1()
else:
return ''
def get_ptr_suffix_id_v1(self): # only the array specifiers
+ # type: () -> unicode
return u''.join(a.get_id_v1() for a in self.arrayOps)
# Id v2 ------------------------------------------------------------------
def get_modifiers_id_v2(self): # only the modifiers for a function, e.g.,
+ # type: () -> unicode
# cv-qualifiers
if self.paramQual:
return self.paramQual.get_modifiers_id_v2()
@@ -1998,15 +2252,18 @@ class ASTDecleratorNameParamQual(ASTBase):
"This should only be called on a function: %s" % text_type(self))
def get_param_id_v2(self): # only the parameters (if any)
+ # type: () -> unicode
if self.paramQual:
return self.paramQual.get_param_id_v2()
else:
return ''
def get_ptr_suffix_id_v2(self): # only the array specifiers
+ # type: () -> unicode
return u''.join(a.get_id_v2() for a in self.arrayOps)
def get_type_id_v2(self, returnTypeId):
+ # type: (unicode) -> unicode
res = []
# TOOD: can we actually have both array ops and paramQual?
res.append(self.get_ptr_suffix_id_v2())
@@ -2023,12 +2280,15 @@ class ASTDecleratorNameParamQual(ASTBase):
# ------------------------------------------------------------------------
def require_space_after_declSpecs(self):
+ # type: () -> bool
return self.declId is not None
def is_function_type(self):
+ # type: () -> bool
return self.paramQual is not None
def __unicode__(self):
+ # type: () -> unicode
res = []
if self.declId:
res.append(text_type(self.declId))
@@ -2039,6 +2299,7 @@ class ASTDecleratorNameParamQual(ASTBase):
return u''.join(res)
def describe_signature(self, signode, mode, env, symbol):
+ # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None
_verify_description_mode(mode)
if self.declId:
self.declId.describe_signature(signode, mode, env, symbol)
@@ -2050,18 +2311,22 @@ class ASTDecleratorNameParamQual(ASTBase):
class ASTInitializer(ASTBase):
def __init__(self, value):
+ # type: (unicode) -> None
self.value = value
def __unicode__(self):
+ # type: () -> unicode
return u''.join([' = ', text_type(self.value)])
def describe_signature(self, signode, mode):
+ # type: (addnodes.desc_signature, unicode) -> None
_verify_description_mode(mode)
signode += nodes.Text(text_type(self))
class ASTType(ASTBase):
def __init__(self, declSpecs, decl):
+ # type: (Any, Any) -> None
assert declSpecs
assert decl
self.declSpecs = declSpecs
@@ -2069,10 +2334,12 @@ class ASTType(ASTBase):
@property
def name(self):
+ # type: () -> unicode
name = self.decl.name
return name
def get_id_v1(self, objectType=None, symbol=None):
+ # type: (unicode, Symbol) -> unicode
res = []
if objectType: # needs the name
if objectType == 'function': # also modifiers
@@ -2097,6 +2364,7 @@ class ASTType(ASTBase):
return u''.join(res)
def get_id_v2(self, objectType=None, symbol=None):
+ # type: (unicode, Symbol) -> unicode
res = []
if objectType: # needs the name
if objectType == 'function': # also modifiers
@@ -2117,6 +2385,7 @@ class ASTType(ASTBase):
return u''.join(res)
def __unicode__(self):
+ # type: () -> unicode
res = []
declSpecs = text_type(self.declSpecs)
res.append(declSpecs)
@@ -2126,12 +2395,14 @@ class ASTType(ASTBase):
return u''.join(res)
def get_type_declaration_prefix(self):
+ # type: () -> unicode
if self.declSpecs.trailingTypeSpec:
return 'typedef'
else:
return 'type'
def describe_signature(self, signode, mode, env, symbol):
+ # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None
_verify_description_mode(mode)
self.declSpecs.describe_signature(signode, 'markType', env, symbol)
if (self.decl.require_space_after_declSpecs() and
@@ -2142,14 +2413,17 @@ class ASTType(ASTBase):
class ASTTypeWithInit(ASTBase):
def __init__(self, type, init):
+ # type: (Any, Any) -> None
self.type = type
self.init = init
@property
def name(self):
+ # type: () -> unicode
return self.type.name
def get_id_v1(self, objectType=None, symbol=None):
+ # type: (unicode, Symbol) -> unicode
if objectType == 'member':
return symbol.get_full_nested_name().get_id_v1() + u'__' \
+ self.type.get_id_v1()
@@ -2157,12 +2431,14 @@ class ASTTypeWithInit(ASTBase):
return self.type.get_id_v1(objectType)
def get_id_v2(self, objectType=None, symbol=None):
+ # type: (unicode, Symbol) -> unicode
if objectType == 'member':
return symbol.get_full_nested_name().get_id_v2()
else:
return self.type.get_id_v2()
def __unicode__(self):
+ # type: () -> unicode
res = []
res.append(text_type(self.type))
if self.init:
@@ -2170,6 +2446,7 @@ class ASTTypeWithInit(ASTBase):
return u''.join(res)
def describe_signature(self, signode, mode, env, symbol):
+ # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None
_verify_description_mode(mode)
self.type.describe_signature(signode, mode, env, symbol=symbol)
if self.init:
@@ -2178,16 +2455,20 @@ class ASTTypeWithInit(ASTBase):
class ASTTypeUsing(ASTBase):
def __init__(self, name, type):
+ # type: (Any, Any) -> None
self.name = name
self.type = type
def get_id_v1(self, objectType=None, symbol=None):
+ # type: (unicode, Symbol) -> unicode
raise NoOldIdError()
def get_id_v2(self, objectType=None, symbol=None):
+ # type: (unicode, Symbol) -> unicode
return symbol.get_full_nested_name().get_id_v2()
def __unicode__(self):
+ # type: () -> unicode
res = []
res.append(text_type(self.name))
if self.type:
@@ -2196,9 +2477,11 @@ class ASTTypeUsing(ASTBase):
return u''.join(res)
def get_type_declaration_prefix(self):
+ # type: () -> unicode
return 'using'
def describe_signature(self, signode, mode, env, symbol):
+ # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None
_verify_description_mode(mode)
self.name.describe_signature(signode, mode, env, symbol=symbol)
if self.type:
@@ -2208,21 +2491,26 @@ class ASTTypeUsing(ASTBase):
class ASTConcept(ASTBase):
def __init__(self, nestedName, isFunction, initializer):
+ # type: (Any, bool, Any) -> None
self.nestedName = nestedName
self.isFunction = isFunction # otherwise it's a variable concept
self.initializer = initializer
@property
def name(self):
+ # type: () -> unicode
return self.nestedName
def get_id_v1(self, objectType=None, symbol=None):
+ # type: (unicode, Symbol) -> unicode
raise NoOldIdError()
- def get_id_v2(self, objectType, symbol):
+ def get_id_v2(self, objectType, symbol): # type: ignore
+ # type: (unicode, Symbol) -> unicode
return symbol.get_full_nested_name().get_id_v2()
def __unicode__(self):
+ # type: () -> unicode
res = text_type(self.nestedName)
if self.isFunction:
res += "()"
@@ -2231,6 +2519,7 @@ class ASTConcept(ASTBase):
return res
def describe_signature(self, signode, mode, env, symbol):
+ # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None
signode += nodes.Text(text_type("bool "))
self.nestedName.describe_signature(signode, mode, env, symbol)
if self.isFunction:
@@ -2241,13 +2530,15 @@ class ASTConcept(ASTBase):
class ASTBaseClass(ASTBase):
def __init__(self, name, visibility, virtual, pack):
+ # type: (Any, unicode, bool, bool) -> None
self.name = name
self.visibility = visibility
self.virtual = virtual
self.pack = pack
def __unicode__(self):
- res = []
+ # type: () -> unicode
+ res = [] # type: List[unicode]
if self.visibility != 'private':
res.append(self.visibility)
res.append(' ')
@@ -2259,6 +2550,7 @@ class ASTBaseClass(ASTBase):
return u''.join(res)
def describe_signature(self, signode, mode, env, symbol):
+ # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None
_verify_description_mode(mode)
if self.visibility != 'private':
signode += addnodes.desc_annotation(self.visibility,
@@ -2274,17 +2566,21 @@ class ASTBaseClass(ASTBase):
class ASTClass(ASTBase):
def __init__(self, name, final, bases):
+ # type: (Any, bool, List[Any]) -> None
self.name = name
self.final = final
self.bases = bases
- def get_id_v1(self, objectType, symbol):
+ def get_id_v1(self, objectType, symbol): # type: ignore
+ # type: (unicode, Symbol) -> unicode
return symbol.get_full_nested_name().get_id_v1()
- def get_id_v2(self, objectType, symbol):
+ def get_id_v2(self, objectType, symbol): # type: ignore
+ # type: (unicode, Symbol) -> unicode
return symbol.get_full_nested_name().get_id_v2()
def __unicode__(self):
+ # type: () -> unicode
res = []
res.append(text_type(self.name))
if self.final:
@@ -2300,6 +2596,7 @@ class ASTClass(ASTBase):
return u''.join(res)
def describe_signature(self, signode, mode, env, symbol):
+ # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None
_verify_description_mode(mode)
self.name.describe_signature(signode, mode, env, symbol=symbol)
if self.final:
@@ -2315,18 +2612,22 @@ class ASTClass(ASTBase):
class ASTEnum(ASTBase):
def __init__(self, name, scoped, underlyingType):
+ # type: (Any, unicode, Any) -> None
self.name = name
self.scoped = scoped
self.underlyingType = underlyingType
- def get_id_v1(self, objectType, symbol):
+ def get_id_v1(self, objectType, symbol): # type: ignore
+ # type: (unicode, Symbol) -> unicode
raise NoOldIdError()
- def get_id_v2(self, objectType, symbol):
+ def get_id_v2(self, objectType, symbol): # type: ignore
+ # type: (unicode, Symbol) -> unicode
return symbol.get_full_nested_name().get_id_v2()
def __unicode__(self):
- res = []
+ # type: () -> unicode
+ res = [] # type: List[unicode]
if self.scoped:
res.append(self.scoped)
res.append(' ')
@@ -2337,6 +2638,7 @@ class ASTEnum(ASTBase):
return u''.join(res)
def describe_signature(self, signode, mode, env, symbol):
+ # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None
_verify_description_mode(mode)
# self.scoped has been done by the CPPEnumObject
self.name.describe_signature(signode, mode, env, symbol=symbol)
@@ -2348,16 +2650,20 @@ class ASTEnum(ASTBase):
class ASTEnumerator(ASTBase):
def __init__(self, name, init):
+ # type: (Any, Any) -> None
self.name = name
self.init = init
- def get_id_v1(self, objectType, symbol):
+ def get_id_v1(self, objectType, symbol): # type: ignore
+ # type: (unicode, Symbol) -> unicode
raise NoOldIdError()
- def get_id_v2(self, objectType, symbol):
+ def get_id_v2(self, objectType, symbol): # type: ignore
+ # type: (unicode, Symbol) -> unicode
return symbol.get_full_nested_name().get_id_v2()
def __unicode__(self):
+ # type: () -> unicode
res = []
res.append(text_type(self.name))
if self.init:
@@ -2365,6 +2671,7 @@ class ASTEnumerator(ASTBase):
return u''.join(res)
def describe_signature(self, signode, mode, env, symbol):
+ # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None
_verify_description_mode(mode)
self.name.describe_signature(signode, mode, env, symbol=symbol)
if self.init:
@@ -2373,16 +2680,18 @@ class ASTEnumerator(ASTBase):
class ASTDeclaration(ASTBase):
def __init__(self, objectType, visibility, templatePrefix, declaration):
+ # type: (unicode, unicode, Any, Any) -> None
self.objectType = objectType
self.visibility = visibility
self.templatePrefix = templatePrefix
self.declaration = declaration
- self.symbol = None
+ self.symbol = None # type: Symbol
# set by CPPObject._add_enumerator_to_parent
- self.enumeratorScopedSymbol = None
+ self.enumeratorScopedSymbol = None # type: Any
def clone(self):
+ # type: () -> ASTDeclaration
if self.templatePrefix:
templatePrefixClone = self.templatePrefix.clone()
else:
@@ -2393,9 +2702,11 @@ class ASTDeclaration(ASTBase):
@property
def name(self):
+ # type: () -> unicode
return self.declaration.name
def get_id_v1(self):
+ # type: () -> unicode
if self.templatePrefix:
raise NoOldIdError()
if self.objectType == 'enumerator' and self.enumeratorScopedSymbol:
@@ -2403,6 +2714,7 @@ class ASTDeclaration(ASTBase):
return self.declaration.get_id_v1(self.objectType, self.symbol)
def get_id_v2(self, prefixed=True):
+ # type: (bool) -> unicode
if self.objectType == 'enumerator' and self.enumeratorScopedSymbol:
return self.enumeratorScopedSymbol.declaration.get_id_v2(prefixed)
if prefixed:
@@ -2415,10 +2727,12 @@ class ASTDeclaration(ASTBase):
return u''.join(res)
def get_newest_id(self):
+ # type: () -> unicode
return self.get_id_v2()
def __unicode__(self):
- res = []
+ # type: () -> unicode
+ res = [] # type: List[unicode]
if self.visibility and self.visibility != "public":
res.append(self.visibility)
res.append(u' ')
@@ -2427,7 +2741,8 @@ class ASTDeclaration(ASTBase):
res.append(text_type(self.declaration))
return u''.join(res)
- def describe_signature(self, signode, mode, env):
+ def describe_signature(self, signode, mode, env, options):
+ # type: (addnodes.desc_signature, unicode, BuildEnvironment, Dict) -> None
_verify_description_mode(mode)
# The caller of the domain added a desc_signature node.
# Always enable multiline:
@@ -2440,7 +2755,8 @@ class ASTDeclaration(ASTBase):
assert self.symbol
if self.templatePrefix:
self.templatePrefix.describe_signature(signode, mode, env,
- symbol=self.symbol)
+ symbol=self.symbol,
+ lineSpec=options.get('tparam-line-spec'))
signode += mainDeclNode
if self.visibility and self.visibility != "public":
mainDeclNode += addnodes.desc_annotation(self.visibility + " ",
@@ -2459,8 +2775,8 @@ class ASTDeclaration(ASTBase):
mainDeclNode += addnodes.desc_annotation('class ', 'class ')
elif self.objectType == 'enum':
prefix = 'enum '
- if self.scoped:
- prefix += self.scoped
+ if self.scoped: # type: ignore
+ prefix += self.scoped # type: ignore
prefix += ' '
mainDeclNode += addnodes.desc_annotation(prefix, prefix)
elif self.objectType == 'enumerator':
@@ -2473,12 +2789,14 @@ class ASTDeclaration(ASTBase):
class ASTNamespace(ASTBase):
def __init__(self, nestedName, templatePrefix):
+ # type: (Any, Any) -> None
self.nestedName = nestedName
self.templatePrefix = templatePrefix
class Symbol(object):
def _assert_invariants(self):
+ # type: () -> None
if not self.parent:
# parent == None means global scope, so declaration means a parent
assert not self.identifier
@@ -2495,6 +2813,7 @@ class Symbol(object):
def __init__(self, parent, identifier,
templateParams, templateArgs, declaration, docname):
+ # type: (Any, Any, Any, Any, Any, unicode) -> None
self.parent = parent
self.identifier = identifier
self.templateParams = templateParams # template<templateParams>
@@ -2503,7 +2822,7 @@ class Symbol(object):
self.docname = docname
self._assert_invariants()
- self.children = []
+ self.children = [] # type: List[Any]
if self.parent:
self.parent.children.append(self)
if self.declaration:
@@ -2524,6 +2843,7 @@ class Symbol(object):
self._add_symbols(nn, [], decl, docname)
def _fill_empty(self, declaration, docname):
+ # type: (Any, unicode) -> None
self._assert_invariants()
assert not self.declaration
assert not self.docname
@@ -2535,6 +2855,7 @@ class Symbol(object):
self._assert_invariants()
def clear_doc(self, docname):
+ # type: (unicode) -> None
newChildren = []
for sChild in self.children:
sChild.clear_doc(docname)
@@ -2550,12 +2871,14 @@ class Symbol(object):
self.children = newChildren
def get_all_symbols(self):
+ # type: () -> Iterator[Any]
yield self
for sChild in self.children:
for s in sChild.get_all_symbols():
yield s
def get_lookup_key(self):
+ # type: () -> List[Tuple[ASTNestedNameElement, Any]]
if not self.parent:
# specialise for the root
return None
@@ -2576,6 +2899,7 @@ class Symbol(object):
return key
def get_full_nested_name(self):
+ # type: () -> ASTNestedName
names = []
for nne, templateParams in self.get_lookup_key():
names.append(nne)
@@ -2584,6 +2908,7 @@ class Symbol(object):
def _find_named_symbol(self, identifier, templateParams,
templateArgs, operator,
templateShorthand, matchSelf):
+ # type: (Any, Any, Any, Any, Any, bool) -> Symbol
assert (identifier is None) != (operator is None)
def matches(s):
@@ -2624,6 +2949,7 @@ class Symbol(object):
return None
def _add_symbols(self, nestedName, templateDecls, declaration, docname):
+ # type: (Any, List[Any], Any, unicode) -> Symbol
# This condition should be checked at the parser level.
# Each template argument list must have a template parameter list.
# But to declare a template there must be an additional template parameter list.
@@ -2722,6 +3048,7 @@ class Symbol(object):
return symbol
def merge_with(self, other, docnames, env):
+ # type: (Any, List[unicode], BuildEnvironment) -> None
assert other is not None
for otherChild in other.children:
if not otherChild.identifier:
@@ -2756,7 +3083,7 @@ class Symbol(object):
msg = "Duplicate declaration, also defined in '%s'.\n"
msg += "Declaration is '%s'."
msg = msg % (ourChild.docname, name)
- env.warn(otherChild.docname, msg)
+ logger.warning(msg, location=otherChild.docname)
else:
# Both have declarations, and in the same docname.
# This can apparently happen, it should be safe to
@@ -2765,6 +3092,7 @@ class Symbol(object):
ourChild.merge_with(otherChild, docnames, env)
def add_name(self, nestedName, templatePrefix=None):
+ # type: (unicode, Any) -> Symbol
if templatePrefix:
templateDecls = templatePrefix.templates
else:
@@ -2773,6 +3101,7 @@ class Symbol(object):
declaration=None, docname=None)
def add_declaration(self, declaration, docname):
+ # type: (Any, unicode) -> Symbol
assert declaration
assert docname
nestedName = declaration.name
@@ -2783,6 +3112,7 @@ class Symbol(object):
return self._add_symbols(nestedName, templateDecls, declaration, docname)
def find_identifier(self, identifier, matchSelf):
+ # type: (Any, bool) -> Symbol
if matchSelf and self.identifier and self.identifier == identifier:
return self
for s in self.children:
@@ -2791,6 +3121,7 @@ class Symbol(object):
return None
def direct_lookup(self, key):
+ # type: (List[Tuple[Any, Any]]) -> Symbol
s = self
for name, templateParams in key:
if name.is_operator():
@@ -2810,6 +3141,7 @@ class Symbol(object):
return s
def find_name(self, nestedName, templateDecls, templateShorthand, matchSelf):
+ # type: (Any, Any, Any, bool) -> Symbol
# templateShorthand: missing template parameter lists for templates is ok
# TODO: unify this with the _add_symbols
@@ -2885,7 +3217,8 @@ class Symbol(object):
assert False # should have returned in the loop
def to_string(self, indent):
- res = ['\t' * indent]
+ # type: (int) -> unicode
+ res = ['\t' * indent] # type: List[unicode]
if not self.parent:
res.append('::')
else:
@@ -2910,6 +3243,7 @@ class Symbol(object):
return ''.join(res)
def dump(self, indent):
+ # type: (int) -> unicode
res = [self.to_string(indent)]
for c in self.children:
res.append(c.dump(indent + 1))
@@ -2927,16 +3261,18 @@ class DefinitionParser(object):
_prefix_keys = ('class', 'struct', 'enum', 'union', 'typename')
def __init__(self, definition, warnEnv, config):
+ # type: (Any, Any, Config) -> None
self.definition = definition.strip()
self.pos = 0
self.end = len(self.definition)
- self.last_match = None
- self._previous_state = (0, None)
+ self.last_match = None # type: Match
+ self._previous_state = (0, None) # type: Tuple[int, Match]
self.warnEnv = warnEnv
self.config = config
def _make_multi_error(self, errors, header):
+ # type: (List[Any], unicode) -> DefinitionError
if len(errors) == 1:
return DefinitionError(header + '\n' + errors[0][0].description)
result = [header, '\n']
@@ -2956,23 +3292,27 @@ class DefinitionParser(object):
return DefinitionError(''.join(result))
def status(self, msg):
+ # type: (unicode) -> None
# for debugging
indicator = '-' * self.pos + '^'
print("%s\n%s\n%s" % (msg, self.definition, indicator))
def fail(self, msg):
+ # type: (unicode) -> None
indicator = '-' * self.pos + '^'
raise DefinitionError(
'Invalid definition: %s [error at %d]\n %s\n %s' %
(msg, self.pos, self.definition, indicator))
def warn(self, msg):
+ # type: (unicode) -> None
if self.warnEnv:
self.warnEnv.warn(msg)
else:
print("Warning: %s" % msg)
def match(self, regex):
+ # type: (Pattern) -> bool
match = regex.match(self.definition, self.pos)
if match is not None:
self._previous_state = (self.pos, self.last_match)
@@ -2982,9 +3322,11 @@ class DefinitionParser(object):
return False
def backout(self):
+ # type: () -> None
self.pos, self.last_match = self._previous_state
def skip_string(self, string):
+ # type: (unicode) -> bool
strlen = len(string)
if self.definition[self.pos:self.pos + strlen] == string:
self.pos += strlen
@@ -2992,18 +3334,22 @@ class DefinitionParser(object):
return False
def skip_word(self, word):
+ # type: (unicode) -> bool
return self.match(re.compile(r'\b%s\b' % re.escape(word)))
def skip_ws(self):
+ # type: () -> bool
return self.match(_whitespace_re)
def skip_word_and_ws(self, word):
+ # type: (unicode) -> bool
if self.skip_word(word):
self.skip_ws()
return True
return False
def skip_string_and_ws(self, string):
+ # type: (unicode) -> bool
if self.skip_string(string):
self.skip_ws()
return True
@@ -3011,10 +3357,12 @@ class DefinitionParser(object):
@property
def eof(self):
+ # type: () -> bool
return self.pos >= self.end
@property
def current_char(self):
+ # type: () -> unicode
try:
return self.definition[self.pos]
except IndexError:
@@ -3022,24 +3370,30 @@ class DefinitionParser(object):
@property
def matched_text(self):
+ # type: () -> unicode
if self.last_match is not None:
return self.last_match.group()
+ else:
+ return None
def read_rest(self):
+ # type: () -> unicode
rv = self.definition[self.pos:]
self.pos = self.end
return rv
def assert_end(self):
+ # type: () -> None
self.skip_ws()
if not self.eof:
self.fail('Expected end of definition.')
def _parse_balanced_token_seq(self, end):
+ # type: (List[unicode]) -> unicode
# TODO: add handling of string literals and similar
- brackets = {'(': ')', '[': ']', '{': '}'}
+ brackets = {'(': ')', '[': ']', '{': '}'} # type: Dict[unicode, unicode]
startPos = self.pos
- symbols = []
+ symbols = [] # type: List[unicode]
while not self.eof:
if len(symbols) == 0 and self.current_char in end:
break
@@ -3056,6 +3410,7 @@ class DefinitionParser(object):
return self.definition[startPos:self.pos]
def _parse_attribute(self):
+ # type: () -> Any
self.skip_ws()
# try C++11 style
startPos = self.pos
@@ -3115,6 +3470,7 @@ class DefinitionParser(object):
return None
def _parse_expression(self, end):
+ # type: (List[unicode]) -> unicode
# Stupidly "parse" an expression.
# 'end' should be a list of characters which ends the expression.
assert end
@@ -3124,8 +3480,8 @@ class DefinitionParser(object):
value = self.matched_text
else:
# TODO: add handling of more bracket-like things, and quote handling
- brackets = {'(': ')', '[': ']', '<': '>'}
- symbols = []
+ brackets = {'(': ')', '[': ']', '<': '>'} # type: Dict[unicode, unicode]
+ symbols = [] # type: List[unicode]
while not self.eof:
if (len(symbols) == 0 and self.current_char in end):
break
@@ -3141,6 +3497,7 @@ class DefinitionParser(object):
return value.strip()
def _parse_operator(self):
+ # type: () -> Any
self.skip_ws()
# adapted from the old code
# thank god, a regular operator definition
@@ -3173,11 +3530,12 @@ class DefinitionParser(object):
return ASTOperatorType(type)
def _parse_template_argument_list(self):
+ # type: () -> ASTTemplateArgs
self.skip_ws()
if not self.skip_string('<'):
return None
prevErrors = []
- templateArgs = []
+ templateArgs = [] # type: List
while 1:
pos = self.pos
parsedComma = False
@@ -3216,6 +3574,7 @@ class DefinitionParser(object):
return ASTTemplateArgs(templateArgs)
def _parse_nested_name(self, memberPointer=False):
+ # type: (bool) -> ASTNestedName
names = []
self.skip_ws()
@@ -3240,7 +3599,7 @@ class DefinitionParser(object):
self.fail("Expected identifier in nested name, "
"got keyword: %s" % identifier)
templateArgs = self._parse_template_argument_list()
- identifier = ASTIdentifier(identifier)
+ identifier = ASTIdentifier(identifier) # type: ignore
names.append(ASTNestedNameElement(identifier, templateArgs))
self.skip_ws()
@@ -3251,6 +3610,7 @@ class DefinitionParser(object):
return ASTNestedName(names, rooted)
def _parse_trailing_type_spec(self):
+ # type: () -> Any
# fundemental types
self.skip_ws()
for t in self._simple_fundemental_types:
@@ -3296,6 +3656,7 @@ class DefinitionParser(object):
return ASTTrailingTypeSpecName(prefix, nestedName)
def _parse_parameters_and_qualifiers(self, paramMode):
+ # type: (unicode) -> ASTParametersQualifiers
self.skip_ws()
if not self.skip_string('('):
if paramMode == 'function':
@@ -3308,7 +3669,7 @@ class DefinitionParser(object):
while 1:
self.skip_ws()
if self.skip_string('...'):
- args.append(ASTFunctinoParameter(None, True))
+ args.append(ASTFunctionParameter(None, True))
self.skip_ws()
if not self.skip_string(')'):
self.fail('Expected ")" after "..." in '
@@ -3318,7 +3679,7 @@ class DefinitionParser(object):
# even in function pointers and similar.
arg = self._parse_type_with_init(outer=None, named='single')
# TODO: parse default parameters # TODO: didn't we just do that?
- args.append(ASTFunctinoParameter(arg))
+ args.append(ASTFunctionParameter(arg))
self.skip_ws()
if self.skip_string(','):
@@ -3385,6 +3746,7 @@ class DefinitionParser(object):
initializer)
def _parse_decl_specs_simple(self, outer, typed):
+ # type: (unicode, bool) -> ASTDeclSpecsSimple
"""Just parse the simple ones."""
storage = None
threadLocal = None
@@ -3459,6 +3821,7 @@ class DefinitionParser(object):
friend, attrs)
def _parse_decl_specs(self, outer, typed=True):
+ # type: (unicode, bool) -> ASTDeclSpecs
if outer:
if outer not in ('type', 'member', 'function', 'templateParam'):
raise Exception('Internal error, unknown outer "%s".' % outer)
@@ -3486,6 +3849,7 @@ class DefinitionParser(object):
return ASTDeclSpecs(outer, leftSpecs, rightSpecs, trailing)
def _parse_declarator_name_param_qual(self, named, paramMode, typed):
+ # type: (Union[bool, unicode], unicode, bool) -> ASTDeclaratorNameParamQual
# now we should parse the name, and then suffixes
if named == 'maybe':
pos = self.pos
@@ -3521,10 +3885,11 @@ class DefinitionParser(object):
else:
break
paramQual = self._parse_parameters_and_qualifiers(paramMode)
- return ASTDecleratorNameParamQual(declId=declId, arrayOps=arrayOps,
+ return ASTDeclaratorNameParamQual(declId=declId, arrayOps=arrayOps,
paramQual=paramQual)
- def _parse_declerator(self, named, paramMode, typed=True):
+ def _parse_declarator(self, named, paramMode, typed=True):
+ # type: (Union[bool, unicode], unicode, bool) -> Any
# 'typed' here means 'parse return type stuff'
if paramMode not in ('type', 'function', 'operatorCast'):
raise Exception(
@@ -3545,14 +3910,14 @@ class DefinitionParser(object):
if const:
continue
break
- next = self._parse_declerator(named, paramMode, typed)
+ next = self._parse_declarator(named, paramMode, typed)
return ASTDeclaratorPtr(next=next, volatile=volatile, const=const)
# TODO: shouldn't we parse an R-value ref here first?
if typed and self.skip_string("&"):
- next = self._parse_declerator(named, paramMode, typed)
+ next = self._parse_declarator(named, paramMode, typed)
return ASTDeclaratorRef(next=next)
if typed and self.skip_string("..."):
- next = self._parse_declerator(named, paramMode, False)
+ next = self._parse_declarator(named, paramMode, False)
return ASTDeclaratorParamPack(next=next)
if typed: # pointer to member
pos = self.pos
@@ -3578,13 +3943,13 @@ class DefinitionParser(object):
if const:
continue
break
- next = self._parse_declerator(named, paramMode, typed)
+ next = self._parse_declarator(named, paramMode, typed)
return ASTDeclaratorMemPtr(name, const, volatile, next=next)
if typed and self.current_char == '(': # note: peeking, not skipping
if paramMode == "operatorCast":
# TODO: we should be able to parse cast operators which return
# function pointers. For now, just hax it and ignore.
- return ASTDecleratorNameParamQual(declId=None, arrayOps=[],
+ return ASTDeclaratorNameParamQual(declId=None, arrayOps=[],
paramQual=None)
# maybe this is the beginning of params and quals,try that first,
# otherwise assume it's noptr->declarator > ( ptr-declarator )
@@ -3603,10 +3968,10 @@ class DefinitionParser(object):
# TODO: hmm, if there is a name, it must be in inner, right?
# TODO: hmm, if there must be parameters, they must b
# inside, right?
- inner = self._parse_declerator(named, paramMode, typed)
+ inner = self._parse_declarator(named, paramMode, typed)
if not self.skip_string(')'):
self.fail("Expected ')' in \"( ptr-declarator )\"")
- next = self._parse_declerator(named=False,
+ next = self._parse_declarator(named=False,
paramMode="type",
typed=typed)
return ASTDeclaratorParen(inner=inner, next=next)
@@ -3625,13 +3990,14 @@ class DefinitionParser(object):
raise self._make_multi_error(prevErrors, header)
def _parse_initializer(self, outer=None):
+ # type: (unicode) -> ASTInitializer
self.skip_ws()
# TODO: support paren and brace initialization for memberObject
if not self.skip_string('='):
return None
else:
if outer == 'member':
- value = self.read_rest().strip()
+ value = self.read_rest().strip() # type: unicode
elif outer == 'templateParam':
value = self._parse_expression(end=[',', '>'])
elif outer is None: # function parameter
@@ -3642,6 +4008,7 @@ class DefinitionParser(object):
return ASTInitializer(value)
def _parse_type(self, named, outer=None):
+ # type: (Union[bool, unicode], unicode) -> ASTType
"""
named=False|'maybe'|True: 'maybe' is e.g., for function objects which
doesn't need to name the arguments
@@ -3664,7 +4031,7 @@ class DefinitionParser(object):
# first try without the type
try:
declSpecs = self._parse_decl_specs(outer=outer, typed=False)
- decl = self._parse_declerator(named=True, paramMode=outer,
+ decl = self._parse_declarator(named=True, paramMode=outer,
typed=False)
self.assert_end()
except DefinitionError as exUntyped:
@@ -3678,7 +4045,7 @@ class DefinitionParser(object):
self.pos = startPos
try:
declSpecs = self._parse_decl_specs(outer=outer)
- decl = self._parse_declerator(named=True, paramMode=outer)
+ decl = self._parse_declarator(named=True, paramMode=outer)
except DefinitionError as exTyped:
self.pos = startPos
if outer == 'type':
@@ -3709,7 +4076,7 @@ class DefinitionParser(object):
self.pos = startPos
typed = True
declSpecs = self._parse_decl_specs(outer=outer, typed=typed)
- decl = self._parse_declerator(named=True, paramMode=outer,
+ decl = self._parse_declarator(named=True, paramMode=outer,
typed=typed)
else:
paramMode = 'type'
@@ -3721,10 +4088,11 @@ class DefinitionParser(object):
elif outer == 'templateParam':
named = 'single'
declSpecs = self._parse_decl_specs(outer=outer)
- decl = self._parse_declerator(named=named, paramMode=paramMode)
+ decl = self._parse_declarator(named=named, paramMode=paramMode)
return ASTType(declSpecs, decl)
def _parse_type_with_init(self, named, outer):
+ # type: (Union[bool, unicode], unicode) -> ASTTypeWithInit
if outer:
assert outer in ('type', 'member', 'function', 'templateParam')
type = self._parse_type(outer=outer, named=named)
@@ -3732,6 +4100,7 @@ class DefinitionParser(object):
return ASTTypeWithInit(type, init)
def _parse_type_using(self):
+ # type: () -> ASTTypeUsing
name = self._parse_nested_name()
self.skip_ws()
if not self.skip_string('='):
@@ -3740,6 +4109,7 @@ class DefinitionParser(object):
return ASTTypeUsing(name, type)
def _parse_concept(self):
+ # type: () -> ASTConcept
nestedName = self._parse_nested_name()
isFunction = False
@@ -3757,6 +4127,7 @@ class DefinitionParser(object):
return ASTConcept(nestedName, isFunction, initializer)
def _parse_class(self):
+ # type: () -> ASTClass
name = self._parse_nested_name()
self.skip_ws()
final = self.skip_word_and_ws('final')
@@ -3765,7 +4136,7 @@ class DefinitionParser(object):
if self.skip_string(':'):
while 1:
self.skip_ws()
- visibility = 'private'
+ visibility = 'private' # type: unicode
virtual = False
pack = False
if self.skip_word_and_ws('virtual'):
@@ -3787,7 +4158,8 @@ class DefinitionParser(object):
return ASTClass(name, final, bases)
def _parse_enum(self):
- scoped = None # is set by CPPEnumObject
+ # type: () -> ASTEnum
+ scoped = None # type: unicode # is set by CPPEnumObject
self.skip_ws()
name = self._parse_nested_name()
self.skip_ws()
@@ -3797,6 +4169,7 @@ class DefinitionParser(object):
return ASTEnum(name, scoped, underlyingType)
def _parse_enumerator(self):
+ # type: () -> ASTEnumerator
name = self._parse_nested_name()
self.skip_ws()
init = None
@@ -3806,9 +4179,10 @@ class DefinitionParser(object):
return ASTEnumerator(name, init)
def _parse_template_parameter_list(self):
+ # type: () -> ASTTemplateParams
# only: '<' parameter-list '>'
# we assume that 'template' has just been parsed
- templateParams = []
+ templateParams = [] # type: List
self.skip_ws()
if not self.skip_string("<"):
self.fail("Expected '<' after 'template'")
@@ -3818,6 +4192,7 @@ class DefinitionParser(object):
if self.skip_word('template'):
# declare a tenplate template parameter
nestedParams = self._parse_template_parameter_list()
+ nestedParams.isNested = True
else:
nestedParams = None
self.skip_ws()
@@ -3847,7 +4222,7 @@ class DefinitionParser(object):
parameterPack, default)
if nestedParams:
# template type
- param = ASTTemplateParamTemplateType(nestedParams, data)
+ param = ASTTemplateParamTemplateType(nestedParams, data) # type: Any
else:
# type
param = ASTTemplateParamType(data)
@@ -3875,6 +4250,7 @@ class DefinitionParser(object):
raise self._make_multi_error(prevErrors, header)
def _parse_template_introduction(self):
+ # type: () -> ASTTemplateIntroduction
pos = self.pos
try:
concept = self._parse_nested_name()
@@ -3899,7 +4275,7 @@ class DefinitionParser(object):
if identifier in _keywords:
self.fail("Expected identifier in template introduction list, "
"got keyword: %s" % identifier)
- identifier = ASTIdentifier(identifier)
+ identifier = ASTIdentifier(identifier) # type: ignore
params.append(ASTTemplateIntroductionParameter(identifier, parameterPack))
self.skip_ws()
@@ -3913,13 +4289,14 @@ class DefinitionParser(object):
return ASTTemplateIntroduction(concept, params)
def _parse_template_declaration_prefix(self, objectType):
- templates = []
+ # type: (unicode) -> ASTTemplateDeclarationPrefix
+ templates = [] # type: List
while 1:
self.skip_ws()
# the saved position is only used to provide a better error message
pos = self.pos
if self.skip_word("template"):
- params = self._parse_template_parameter_list()
+ params = self._parse_template_parameter_list() # type: Any
else:
params = self._parse_template_introduction()
if not params:
@@ -3937,6 +4314,7 @@ class DefinitionParser(object):
def _check_template_consistency(self, nestedName, templatePrefix,
fullSpecShorthand):
+ # type: (Any, Any, bool) -> ASTTemplateDeclarationPrefix
numArgs = nestedName.num_templates()
if not templatePrefix:
numParams = 0
@@ -3952,7 +4330,7 @@ class DefinitionParser(object):
msg = "Too many template argument lists compared to parameter" \
" lists. Argument lists: %d, Parameter lists: %d," \
" Extra empty parameters lists prepended: %d." \
- % (numArgs, numParams, numExtra)
+ % (numArgs, numParams, numExtra) # type: unicode
msg += " Declaration:\n\t"
if templatePrefix:
msg += "%s\n\t" % text_type(templatePrefix)
@@ -3968,12 +4346,13 @@ class DefinitionParser(object):
return templatePrefix
def parse_declaration(self, objectType):
+ # type: (unicode) -> ASTDeclaration
if objectType not in ('type', 'concept', 'member',
'function', 'class', 'enum', 'enumerator'):
raise Exception('Internal error, unknown objectType "%s".' % objectType)
visibility = None
templatePrefix = None
- declaration = None
+ declaration = None # type: Any
self.skip_ws()
if self.match(_visibility_re):
@@ -4021,15 +4400,17 @@ class DefinitionParser(object):
templatePrefix, declaration)
def parse_namespace_object(self):
+ # type: () -> ASTNamespace
templatePrefix = self._parse_template_declaration_prefix(objectType="namespace")
name = self._parse_nested_name()
templatePrefix = self._check_template_consistency(name, templatePrefix,
fullSpecShorthand=False)
res = ASTNamespace(name, templatePrefix)
- res.objectType = 'namespace'
+ res.objectType = 'namespace' # type: ignore
return res
def parse_xref_object(self):
+ # type: () -> ASTNamespace
templatePrefix = self._parse_template_declaration_prefix(objectType="xref")
name = self._parse_nested_name()
# if there are '()' left, just skip them
@@ -4038,11 +4419,12 @@ class DefinitionParser(object):
templatePrefix = self._check_template_consistency(name, templatePrefix,
fullSpecShorthand=True)
res = ASTNamespace(name, templatePrefix)
- res.objectType = 'xref'
+ res.objectType = 'xref' # type: ignore
return res
def _make_phony_error_name():
+ # type: () -> ASTNestedName
nne = ASTNestedNameElement(ASTIdentifier("PhonyNameDueToError"), None)
return ASTNestedName([nne], rooted=False)
@@ -4064,10 +4446,15 @@ class CPPObject(ObjectDescription):
names=('returns', 'return')),
]
+ option_spec = dict(ObjectDescription.option_spec)
+ option_spec['tparam-line-spec'] = directives.flag
+
def warn(self, msg):
+ # type: (unicode) -> None
self.state_machine.reporter.warning(msg, line=self.lineno)
def _add_enumerator_to_parent(self, ast):
+ # type: (Any) -> None
assert ast.objectType == 'enumerator'
# find the parent, if it exists && is an enum
# && it's unscoped,
@@ -4109,6 +4496,7 @@ class CPPObject(ObjectDescription):
docname=self.env.docname)
def add_target_and_index(self, ast, sig, signode):
+ # type: (Any, unicode, addnodes.desc_signature) -> None
# general note: name must be lstrip(':')'ed, to remove "::"
try:
id_v1 = ast.get_id_v1()
@@ -4155,16 +4543,20 @@ class CPPObject(ObjectDescription):
self.state.document.note_explicit_target(signode)
def parse_definition(self, parser):
+ # type: (Any) -> Any
raise NotImplementedError()
- def describe_signature(self, signode, ast, parentScope):
- raise NotImplementedError()
+ def describe_signature(self, signode, ast, options):
+ # type: (addnodes.desc_signature, Any, Dict) -> None
+ ast.describe_signature(signode, 'lastIsName', self.env, options)
def handle_signature(self, sig, signode):
- if 'cpp:parent_symbol' not in self.env.ref_context:
+ # type: (unicode, addnodes.desc_signature) -> Any
+ if 'cpp:parent_symbol' not in self.env.temp_data:
root = self.env.domaindata['cpp']['root_symbol']
- self.env.ref_context['cpp:parent_symbol'] = root
- parentSymbol = self.env.ref_context['cpp:parent_symbol']
+ self.env.temp_data['cpp:parent_symbol'] = root
+ self.env.ref_context['cpp:parent_key'] = root.get_lookup_key()
+ parentSymbol = self.env.temp_data['cpp:parent_symbol']
parser = DefinitionParser(sig, self, self.env.config)
try:
@@ -4176,93 +4568,96 @@ class CPPObject(ObjectDescription):
# the possibly inner declarations.
name = _make_phony_error_name()
symbol = parentSymbol.add_name(name)
- self.env.ref_context['cpp:last_symbol'] = symbol
+ self.env.temp_data['cpp:last_symbol'] = symbol
raise ValueError
try:
symbol = parentSymbol.add_declaration(ast, docname=self.env.docname)
- self.env.ref_context['cpp:last_symbol'] = symbol
+ self.env.temp_data['cpp:last_symbol'] = symbol
except _DuplicateSymbolError as e:
# Assume we are actually in the old symbol,
# instead of the newly created duplicate.
- self.env.ref_context['cpp:last_symbol'] = e.symbol
+ self.env.temp_data['cpp:last_symbol'] = e.symbol
if ast.objectType == 'enumerator':
self._add_enumerator_to_parent(ast)
- self.describe_signature(signode, ast)
+ self.options['tparam-line-spec'] = 'tparam-line-spec' in self.options
+ self.describe_signature(signode, ast, self.options)
return ast
def before_content(self):
- lastSymbol = self.env.ref_context['cpp:last_symbol']
+ # type: () -> None
+ lastSymbol = self.env.temp_data['cpp:last_symbol']
assert lastSymbol
- self.oldParentSymbol = self.env.ref_context['cpp:parent_symbol']
- self.env.ref_context['cpp:parent_symbol'] = lastSymbol
+ self.oldParentSymbol = self.env.temp_data['cpp:parent_symbol']
+ self.oldParentKey = self.env.ref_context['cpp:parent_key']
+ self.env.temp_data['cpp:parent_symbol'] = lastSymbol
+ self.env.ref_context['cpp:parent_key'] = lastSymbol.get_lookup_key()
def after_content(self):
- self.env.ref_context['cpp:parent_symbol'] = self.oldParentSymbol
+ # type: () -> None
+ self.env.temp_data['cpp:parent_symbol'] = self.oldParentSymbol
+ self.env.ref_context['cpp:parent_key'] = self.oldParentKey
class CPPTypeObject(CPPObject):
def get_index_text(self, name):
+ # type: (unicode) -> unicode
return _('%s (C++ type)') % name
def parse_definition(self, parser):
+ # type: (Any) -> Any
return parser.parse_declaration("type")
- def describe_signature(self, signode, ast):
- ast.describe_signature(signode, 'lastIsName', self.env)
-
class CPPConceptObject(CPPObject):
def get_index_text(self, name):
+ # type: (unicode) -> unicode
return _('%s (C++ concept)') % name
def parse_definition(self, parser):
+ # type: (Any) -> Any
return parser.parse_declaration("concept")
- def describe_signature(self, signode, ast):
- ast.describe_signature(signode, 'lastIsName', self.env)
-
class CPPMemberObject(CPPObject):
def get_index_text(self, name):
+ # type: (unicode) -> unicode
return _('%s (C++ member)') % name
def parse_definition(self, parser):
+ # type: (Any) -> Any
return parser.parse_declaration("member")
- def describe_signature(self, signode, ast):
- ast.describe_signature(signode, 'lastIsName', self.env)
-
class CPPFunctionObject(CPPObject):
def get_index_text(self, name):
+ # type: (unicode) -> unicode
return _('%s (C++ function)') % name
def parse_definition(self, parser):
+ # type: (Any) -> Any
return parser.parse_declaration("function")
- def describe_signature(self, signode, ast):
- ast.describe_signature(signode, 'lastIsName', self.env)
-
class CPPClassObject(CPPObject):
def get_index_text(self, name):
+ # type: (unicode) -> unicode
return _('%s (C++ class)') % name
def parse_definition(self, parser):
+ # type: (Any) -> Any
return parser.parse_declaration("class")
- def describe_signature(self, signode, ast):
- ast.describe_signature(signode, 'lastIsName', self.env)
-
class CPPEnumObject(CPPObject):
def get_index_text(self, name):
+ # type: (unicode) -> unicode
return _('%s (C++ enum)') % name
def parse_definition(self, parser):
+ # type: (Any) -> Any
ast = parser.parse_declaration("enum")
# self.objtype is set by ObjectDescription in run()
if self.objtype == "enum":
@@ -4275,20 +4670,16 @@ class CPPEnumObject(CPPObject):
assert False
return ast
- def describe_signature(self, signode, ast):
- ast.describe_signature(signode, 'lastIsName', self.env)
-
class CPPEnumeratorObject(CPPObject):
def get_index_text(self, name):
+ # type: (unicode) -> unicode
return _('%s (C++ enumerator)') % name
def parse_definition(self, parser):
+ # type: (Any) -> Any
return parser.parse_declaration("enumerator")
- def describe_signature(self, signode, ast):
- ast.describe_signature(signode, 'lastIsName', self.env)
-
class CPPNamespaceObject(Directive):
"""
@@ -4300,17 +4691,19 @@ class CPPNamespaceObject(Directive):
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
- option_spec = {}
+ option_spec = {} # type: Dict
def warn(self, msg):
+ # type: (unicode) -> None
self.state_machine.reporter.warning(msg, line=self.lineno)
def run(self):
+ # type: () -> List[nodes.Node]
env = self.state.document.settings.env
rootSymbol = env.domaindata['cpp']['root_symbol']
if self.arguments[0].strip() in ('NULL', '0', 'nullptr'):
symbol = rootSymbol
- stack = []
+ stack = [] # type: List[Symbol]
else:
parser = DefinitionParser(self.arguments[0], self, env.config)
try:
@@ -4322,8 +4715,9 @@ class CPPNamespaceObject(Directive):
ast = ASTNamespace(name, None)
symbol = rootSymbol.add_name(ast.nestedName, ast.templatePrefix)
stack = [symbol]
- env.ref_context['cpp:parent_symbol'] = symbol
+ env.temp_data['cpp:parent_symbol'] = symbol
env.temp_data['cpp:namespace_stack'] = stack
+ env.ref_context['cpp:parent_key'] = symbol.get_lookup_key()
return []
@@ -4332,15 +4726,17 @@ class CPPNamespacePushObject(Directive):
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
- option_spec = {}
+ option_spec = {} # type: Dict
def warn(self, msg):
+ # type: (unicode) -> None
self.state_machine.reporter.warning(msg, line=self.lineno)
def run(self):
+ # type: () -> List[nodes.Node]
env = self.state.document.settings.env
if self.arguments[0].strip() in ('NULL', '0', 'nullptr'):
- return
+ return []
parser = DefinitionParser(self.arguments[0], self, env.config)
try:
ast = parser.parse_namespace_object()
@@ -4349,14 +4745,15 @@ class CPPNamespacePushObject(Directive):
self.warn(e.description)
name = _make_phony_error_name()
ast = ASTNamespace(name, None)
- oldParent = env.ref_context.get('cpp:parent_symbol', None)
+ oldParent = env.temp_data.get('cpp:parent_symbol', None)
if not oldParent:
oldParent = env.domaindata['cpp']['root_symbol']
symbol = oldParent.add_name(ast.nestedName, ast.templatePrefix)
stack = env.temp_data.get('cpp:namespace_stack', [])
stack.append(symbol)
- env.ref_context['cpp:parent_symbol'] = symbol
+ env.temp_data['cpp:parent_symbol'] = symbol
env.temp_data['cpp:namespace_stack'] = stack
+ env.ref_context['cpp:parent_key'] = symbol.get_lookup_key()
return []
@@ -4365,12 +4762,14 @@ class CPPNamespacePopObject(Directive):
required_arguments = 0
optional_arguments = 0
final_argument_whitespace = True
- option_spec = {}
+ option_spec = {} # type: Dict
def warn(self, msg):
+ # type: (unicode) -> None
self.state_machine.reporter.warning(msg, line=self.lineno)
def run(self):
+ # type: () -> List[nodes.Node]
env = self.state.document.settings.env
stack = env.temp_data.get('cpp:namespace_stack', None)
if not stack or len(stack) == 0:
@@ -4382,16 +4781,16 @@ class CPPNamespacePopObject(Directive):
symbol = stack[-1]
else:
symbol = env.domaindata['cpp']['root_symbol']
- env.ref_context['cpp:parent_symbol'] = symbol
+ env.temp_data['cpp:parent_symbol'] = symbol
env.temp_data['cpp:namespace_stack'] = stack
+ env.ref_context['cpp:parent_key'] = symbol.get_lookup_key()
return []
class CPPXRefRole(XRefRole):
def process_link(self, env, refnode, has_explicit_title, title, target):
- parent = env.ref_context.get('cpp:parent_symbol', None)
- if parent:
- refnode['cpp:parent_key'] = parent.get_lookup_key()
+ # type: (BuildEnvironment, nodes.Node, bool, unicode, unicode) -> Tuple[unicode, unicode] # NOQA
+ refnode.attributes.update(env.ref_context)
if refnode['reftype'] == 'any':
# Assume the removal part of fix_parens for :any: refs.
# The addition part is done with the reference is resolved.
@@ -4458,6 +4857,7 @@ class CPPDomain(Domain):
}
def clear_doc(self, docname):
+ # type: (unicode) -> None
rootSymbol = self.data['root_symbol']
rootSymbol.clear_doc(docname)
for name, nDocname in list(self.data['names'].items()):
@@ -4465,18 +4865,18 @@ class CPPDomain(Domain):
del self.data['names'][name]
def process_doc(self, env, docname, document):
+ # type: (BuildEnvironment, unicode, nodes.Node) -> None
# just for debugging
# print(docname)
# print(self.data['root_symbol'].dump(0))
pass
def process_field_xref(self, pnode):
- symbol = self.env.ref_context['cpp:parent_symbol']
- key = symbol.get_lookup_key()
- assert key
- pnode['cpp:parent_key'] = key
+ # type: (nodes.Node) -> None
+ pnode.attributes.update(self.env.ref_context)
def merge_domaindata(self, docnames, otherdata):
+ # type: (List[unicode], Dict) -> None
self.data['root_symbol'].merge_with(otherdata['root_symbol'],
docnames, self.env)
ourNames = self.data['names']
@@ -4486,16 +4886,17 @@ class CPPDomain(Domain):
msg = "Duplicate declaration, also defined in '%s'.\n"
msg += "Name of declaration is '%s'."
msg = msg % (ourNames[name], name)
- self.env.warn(docname, msg)
+ logger.warning(msg, docname)
else:
ourNames[name] = docname
def _resolve_xref_inner(self, env, fromdocname, builder, typ,
target, node, contnode, emitWarnings=True):
+ # type: (BuildEnvironment, unicode, Builder, unicode, unicode, nodes.Node, nodes.Node, bool) -> nodes.Node # NOQA
class Warner(object):
def warn(self, msg):
if emitWarnings:
- env.warn_node(msg, node)
+ logger.warning(msg, location=node)
warner = Warner()
# add parens again for those that could be functions
if typ == 'any' or typ == 'func':
@@ -4601,19 +5002,25 @@ class CPPDomain(Domain):
def resolve_xref(self, env, fromdocname, builder,
typ, target, node, contnode):
+ # type: (BuildEnvironment, unicode, Builder, unicode, unicode, nodes.Node, nodes.Node) -> nodes.Node # NOQA
return self._resolve_xref_inner(env, fromdocname, builder, typ,
target, node, contnode)[0]
def resolve_any_xref(self, env, fromdocname, builder, target,
node, contnode):
+ # type: (BuildEnvironment, unicode, Builder, unicode, nodes.Node, nodes.Node) -> List[Tuple[unicode, nodes.Node]] # NOQA
node, objtype = self._resolve_xref_inner(env, fromdocname, builder,
'any', target, node, contnode,
emitWarnings=False)
if node:
- return [('cpp:' + self.role_for_objtype(objtype), node)]
+ if objtype == 'templateParam':
+ return [('cpp:templateParam', node)]
+ else:
+ return [('cpp:' + self.role_for_objtype(objtype), node)]
return []
def get_objects(self):
+ # type: () -> Iterator[Tuple[unicode, unicode, unicode, unicode, unicode, int]]
rootSymbol = self.data['root_symbol']
for symbol in rootSymbol.get_all_symbols():
if symbol.declaration is None:
@@ -4627,6 +5034,7 @@ class CPPDomain(Domain):
def setup(app):
+ # type: (Sphinx) -> Dict[unicode, Any]
app.add_domain(CPPDomain)
app.add_config_value("cpp_index_common_prefix", [], 'env')
app.add_config_value("cpp_id_attributes", [], 'env')
diff --git a/sphinx/domains/javascript.py b/sphinx/domains/javascript.py
index 363b2103e..b100f528e 100644
--- a/sphinx/domains/javascript.py
+++ b/sphinx/domains/javascript.py
@@ -18,6 +18,14 @@ from sphinx.domains.python import _pseudo_parse_arglist
from sphinx.util.nodes import make_refnode
from sphinx.util.docfields import Field, GroupedField, TypedField
+if False:
+ # For type annotation
+ from typing import Any, Dict, Iterator, List, Tuple # NOQA
+ from docutils import nodes # NOQA
+ from sphinx.application import Sphinx # NOQA
+ from sphinx.builders import Builder # NOQA
+ from sphinx.environment import BuildEnvironment # NOQA
+
class JSObject(ObjectDescription):
"""
@@ -28,9 +36,10 @@ class JSObject(ObjectDescription):
has_arguments = False
#: what is displayed right before the documentation entry
- display_prefix = None
+ display_prefix = None # type: unicode
def handle_signature(self, sig, signode):
+ # type: (unicode, addnodes.desc_signature) -> Tuple[unicode, unicode]
sig = sig.strip()
if '(' in sig and sig[-1:] == ')':
prefix, arglist = sig.split('(', 1)
@@ -76,6 +85,7 @@ class JSObject(ObjectDescription):
return fullname, nameprefix
def add_target_and_index(self, name_obj, sig, signode):
+ # type: (Tuple[unicode, unicode], unicode, addnodes.desc_signature) -> None
objectname = self.options.get(
'object', self.env.ref_context.get('js:object'))
fullname = name_obj[0]
@@ -100,6 +110,7 @@ class JSObject(ObjectDescription):
'', None))
def get_index_text(self, objectname, name_obj):
+ # type: (unicode, Tuple[unicode, unicode]) -> unicode
name, obj = name_obj
if self.objtype == 'function':
if not obj:
@@ -139,6 +150,7 @@ class JSConstructor(JSCallable):
class JSXRefRole(XRefRole):
def process_link(self, env, refnode, has_explicit_title, title, target):
+ # type: (BuildEnvironment, nodes.Node, bool, unicode, unicode) -> Tuple[unicode, unicode] # NOQA
# basically what sphinx.domains.python.PyXRefRole does
refnode['js:object'] = env.ref_context.get('js:object')
if not has_explicit_title:
@@ -180,20 +192,23 @@ class JavaScriptDomain(Domain):
}
initial_data = {
'objects': {}, # fullname -> docname, objtype
- }
+ } # type: Dict[unicode, Dict[unicode, Tuple[unicode, unicode]]]
def clear_doc(self, docname):
+ # type: (unicode) -> None
for fullname, (fn, _l) in list(self.data['objects'].items()):
if fn == docname:
del self.data['objects'][fullname]
def merge_domaindata(self, docnames, otherdata):
+ # type: (List[unicode], Dict) -> None
# XXX check duplicates
for fullname, (fn, objtype) in otherdata['objects'].items():
if fn in docnames:
self.data['objects'][fullname] = (fn, objtype)
def find_obj(self, env, obj, name, typ, searchorder=0):
+ # type: (BuildEnvironment, unicode, unicode, unicode, int) -> Tuple[unicode, Tuple[unicode, unicode]] # NOQA
if name[-2:] == '()':
name = name[:-2]
objects = self.data['objects']
@@ -212,6 +227,7 @@ class JavaScriptDomain(Domain):
def resolve_xref(self, env, fromdocname, builder, typ, target, node,
contnode):
+ # type: (BuildEnvironment, unicode, Builder, unicode, unicode, nodes.Node, nodes.Node) -> nodes.Node # NOQA
objectname = node.get('js:object')
searchorder = node.hasattr('refspecific') and 1 or 0
name, obj = self.find_obj(env, objectname, target, typ, searchorder)
@@ -222,6 +238,7 @@ class JavaScriptDomain(Domain):
def resolve_any_xref(self, env, fromdocname, builder, target, node,
contnode):
+ # type: (BuildEnvironment, unicode, Builder, unicode, nodes.Node, nodes.Node) -> List[Tuple[unicode, nodes.Node]] # NOQA
objectname = node.get('js:object')
name, obj = self.find_obj(env, objectname, target, None, 1)
if not obj:
@@ -231,12 +248,14 @@ class JavaScriptDomain(Domain):
name.replace('$', '_S_'), contnode, name))]
def get_objects(self):
+ # type: () -> Iterator[Tuple[unicode, unicode, unicode, unicode, unicode, int]]
for refname, (docname, type) in list(self.data['objects'].items()):
yield refname, refname, type, docname, \
refname.replace('$', '_S_'), 1
def setup(app):
+ # type: (Sphinx) -> Dict[unicode, Any]
app.add_domain(JavaScriptDomain)
return {
diff --git a/sphinx/domains/python.py b/sphinx/domains/python.py
index a6c223d89..b1a4c716c 100644
--- a/sphinx/domains/python.py
+++ b/sphinx/domains/python.py
@@ -12,18 +12,28 @@
import re
from six import iteritems
+
from docutils import nodes
-from docutils.parsers.rst import directives
+from docutils.parsers.rst import Directive, directives
from sphinx import addnodes
from sphinx.roles import XRefRole
from sphinx.locale import l_, _
from sphinx.domains import Domain, ObjType, Index
from sphinx.directives import ObjectDescription
+from sphinx.util import logging
from sphinx.util.nodes import make_refnode
-from sphinx.util.compat import Directive
from sphinx.util.docfields import Field, GroupedField, TypedField
+if False:
+ # For type annotation
+ from typing import Any, Dict, Iterable, Iterator, List, Tuple, Union # NOQA
+ from sphinx.application import Sphinx # NOQA
+ from sphinx.builders import Builder # NOQA
+ from sphinx.environment import BuildEnvironment # NOQA
+
+logger = logging.getLogger(__name__)
+
# REs for Python signatures
py_sig_re = re.compile(
@@ -36,6 +46,7 @@ py_sig_re = re.compile(
def _pseudo_parse_arglist(signode, arglist):
+ # type: (addnodes.desc_signature, unicode) -> None
""""Parse" a list of arguments separated by commas.
Arguments can have "optional" annotations given by enclosing them in
@@ -85,9 +96,16 @@ def _pseudo_parse_arglist(signode, arglist):
# This override allows our inline type specifiers to behave like :class: link
# when it comes to handling "." and "~" prefixes.
class PyXrefMixin(object):
- def make_xref(self, rolename, domain, target, innernode=nodes.emphasis,
- contnode=None, env=None):
- result = super(PyXrefMixin, self).make_xref(rolename, domain, target,
+ def make_xref(self,
+ rolename, # type: unicode
+ domain, # type: unicode
+ target, # type: unicode
+ innernode=nodes.emphasis, # type: nodes.Node
+ contnode=None, # type: nodes.Node
+ env=None, # type: BuildEnvironment
+ ):
+ # type: (...) -> nodes.Node
+ result = super(PyXrefMixin, self).make_xref(rolename, domain, target, # type: ignore
innernode, contnode, env)
result['refspecific'] = True
if target.startswith(('.', '~')):
@@ -101,9 +119,16 @@ class PyXrefMixin(object):
break
return result
- def make_xrefs(self, rolename, domain, target, innernode=nodes.emphasis,
- contnode=None, env=None):
- delims = '(\s*[\[\]\(\),](?:\s*or\s)?\s*|\s+or\s+)'
+ def make_xrefs(self,
+ rolename, # type: unicode
+ domain, # type: unicode
+ target, # type: unicode
+ innernode=nodes.emphasis, # type: nodes.Node
+ contnode=None, # type: nodes.Node
+ env=None, # type: BuildEnvironment
+ ):
+ # type: (...) -> List[nodes.Node]
+ delims = r'(\s*[\[\]\(\),](?:\s*or\s)?\s*|\s+or\s+)'
delims_re = re.compile(delims)
sub_targets = re.split(delims, target)
@@ -114,7 +139,7 @@ class PyXrefMixin(object):
if split_contnode:
contnode = nodes.Text(sub_target)
- if delims_re.match(sub_target):
+ if delims_re.match(sub_target): # type: ignore
results.append(contnode or innernode(sub_target, sub_target))
else:
results.append(self.make_xref(rolename, domain, sub_target,
@@ -170,18 +195,21 @@ class PyObject(ObjectDescription):
allow_nesting = False
def get_signature_prefix(self, sig):
+ # type: (unicode) -> unicode
"""May return a prefix to put before the object name in the
signature.
"""
return ''
def needs_arglist(self):
+ # type: () -> bool
"""May return true if an empty argument list is to be generated even if
the document contains none.
"""
return False
def handle_signature(self, sig, signode):
+ # type: (unicode, addnodes.desc_signature) -> Tuple[unicode, unicode]
"""Transform a Python signature into RST nodes.
Return (fully qualified name of the thing, classname if any).
@@ -190,7 +218,7 @@ class PyObject(ObjectDescription):
* it is stripped from the displayed name if present
* it is added to the full name (return value) if not present
"""
- m = py_sig_re.match(sig)
+ m = py_sig_re.match(sig) # type: ignore
if m is None:
raise ValueError
name_prefix, name, arglist, retann = m.groups()
@@ -261,10 +289,12 @@ class PyObject(ObjectDescription):
return fullname, name_prefix
def get_index_text(self, modname, name):
+ # type: (unicode, unicode) -> unicode
"""Return the text for the index entry of the object."""
raise NotImplementedError('must be implemented in subclasses')
def add_target_and_index(self, name_cls, sig, signode):
+ # type: (unicode, unicode, addnodes.desc_signature) -> None
modname = self.options.get(
'module', self.env.ref_context.get('py:module'))
fullname = (modname and modname + '.' or '') + name_cls[0]
@@ -347,9 +377,11 @@ class PyModulelevel(PyObject):
"""
def needs_arglist(self):
+ # type: () -> bool
return self.objtype == 'function'
def get_index_text(self, modname, name_cls):
+ # type: (unicode, unicode) -> unicode
if self.objtype == 'function':
if not modname:
return _('%s() (built-in function)') % name_cls[0]
@@ -370,9 +402,11 @@ class PyClasslike(PyObject):
allow_nesting = True
def get_signature_prefix(self, sig):
+ # type: (unicode) -> unicode
return self.objtype + ' '
def get_index_text(self, modname, name_cls):
+ # type: (unicode, unicode) -> unicode
if self.objtype == 'class':
if not modname:
return _('%s (built-in class)') % name_cls[0]
@@ -389,9 +423,11 @@ class PyClassmember(PyObject):
"""
def needs_arglist(self):
+ # type: () -> bool
return self.objtype.endswith('method')
def get_signature_prefix(self, sig):
+ # type: (unicode) -> unicode
if self.objtype == 'staticmethod':
return 'static '
elif self.objtype == 'classmethod':
@@ -399,6 +435,7 @@ class PyClassmember(PyObject):
return ''
def get_index_text(self, modname, name_cls):
+ # type: (unicode, unicode) -> unicode
name, cls = name_cls
add_modules = self.env.config.add_module_names
if self.objtype == 'method':
@@ -460,11 +497,13 @@ class PyDecoratorMixin(object):
Mixin for decorator directives.
"""
def handle_signature(self, sig, signode):
- ret = super(PyDecoratorMixin, self).handle_signature(sig, signode)
+ # type: (unicode, addnodes.desc_signature) -> Tuple[unicode, unicode]
+ ret = super(PyDecoratorMixin, self).handle_signature(sig, signode) # type: ignore
signode.insert(0, addnodes.desc_addname('@', '@'))
return ret
def needs_arglist(self):
+ # type: () -> bool
return False
@@ -473,6 +512,7 @@ class PyDecoratorFunction(PyDecoratorMixin, PyModulelevel):
Directive to mark functions meant to be used as decorators.
"""
def run(self):
+ # type: () -> List[nodes.Node]
# a decorator function is a function after all
self.name = 'py:function'
return PyModulelevel.run(self)
@@ -483,6 +523,7 @@ class PyDecoratorMethod(PyDecoratorMixin, PyClassmember):
Directive to mark methods meant to be used as decorators.
"""
def run(self):
+ # type: () -> List[nodes.Node]
self.name = 'py:method'
return PyClassmember.run(self)
@@ -504,6 +545,7 @@ class PyModule(Directive):
}
def run(self):
+ # type: () -> List[nodes.Node]
env = self.state.document.settings.env
modname = self.arguments[0].strip()
noindex = 'noindex' in self.options
@@ -539,9 +581,10 @@ class PyCurrentModule(Directive):
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = False
- option_spec = {}
+ option_spec = {} # type: Dict
def run(self):
+ # type: () -> List[nodes.Node]
env = self.state.document.settings.env
modname = self.arguments[0].strip()
if modname == 'None':
@@ -553,6 +596,7 @@ class PyCurrentModule(Directive):
class PyXRefRole(XRefRole):
def process_link(self, env, refnode, has_explicit_title, title, target):
+ # type: (BuildEnvironment, nodes.Node, bool, unicode, unicode) -> Tuple[unicode, unicode] # NOQA
refnode['py:module'] = env.ref_context.get('py:module')
refnode['py:class'] = env.ref_context.get('py:class')
if not has_explicit_title:
@@ -583,9 +627,11 @@ class PythonModuleIndex(Index):
shortname = l_('modules')
def generate(self, docnames=None):
- content = {}
+ # type: (Iterable[unicode]) -> Tuple[List[Tuple[unicode, List[List[Union[unicode, int]]]]], bool] # NOQA
+ content = {} # type: Dict[unicode, List]
# list of prefixes to ignore
- ignores = self.domain.env.config['modindex_common_prefix']
+ ignores = None # type: List[unicode]
+ ignores = self.domain.env.config['modindex_common_prefix'] # type: ignore
ignores = sorted(ignores, key=len, reverse=True)
# list of all modules, sorted by module name
modules = sorted(iteritems(self.domain.data['modules']),
@@ -638,9 +684,9 @@ class PythonModuleIndex(Index):
collapse = len(modules) - num_toplevels < num_toplevels
# sort by first letter
- content = sorted(iteritems(content))
+ sorted_content = sorted(iteritems(content))
- return content, collapse
+ return sorted_content, collapse
class PythonDomain(Domain):
@@ -657,7 +703,7 @@ class PythonDomain(Domain):
'staticmethod': ObjType(l_('static method'), 'meth', 'obj'),
'attribute': ObjType(l_('attribute'), 'attr', 'obj'),
'module': ObjType(l_('module'), 'mod', 'obj'),
- }
+ } # type: Dict[unicode, ObjType]
directives = {
'function': PyModulelevel,
@@ -687,12 +733,13 @@ class PythonDomain(Domain):
initial_data = {
'objects': {}, # fullname -> docname, objtype
'modules': {}, # modname -> docname, synopsis, platform, deprecated
- }
+ } # type: Dict[unicode, Dict[unicode, Tuple[Any]]]
indices = [
PythonModuleIndex,
]
def clear_doc(self, docname):
+ # type: (unicode) -> None
for fullname, (fn, _l) in list(self.data['objects'].items()):
if fn == docname:
del self.data['objects'][fullname]
@@ -701,6 +748,7 @@ class PythonDomain(Domain):
del self.data['modules'][modname]
def merge_domaindata(self, docnames, otherdata):
+ # type: (List[unicode], Dict) -> None
# XXX check duplicates?
for fullname, (fn, objtype) in otherdata['objects'].items():
if fn in docnames:
@@ -710,6 +758,7 @@ class PythonDomain(Domain):
self.data['modules'][modname] = data
def find_obj(self, env, modname, classname, name, type, searchmode=0):
+ # type: (BuildEnvironment, unicode, unicode, unicode, unicode, int) -> List[Tuple[unicode, Any]] # NOQA
"""Find a Python object for "name", perhaps using the given module
and/or classname. Returns a list of (name, object entry) tuples.
"""
@@ -721,7 +770,7 @@ class PythonDomain(Domain):
return []
objects = self.data['objects']
- matches = []
+ matches = [] # type: List[Tuple[unicode, Any]]
newname = None
if searchmode == 1:
@@ -774,6 +823,7 @@ class PythonDomain(Domain):
def resolve_xref(self, env, fromdocname, builder,
type, target, node, contnode):
+ # type: (BuildEnvironment, unicode, Builder, unicode, unicode, nodes.Node, nodes.Node) -> nodes.Node # NOQA
modname = node.get('py:module')
clsname = node.get('py:class')
searchmode = node.hasattr('refspecific') and 1 or 0
@@ -782,10 +832,9 @@ class PythonDomain(Domain):
if not matches:
return None
elif len(matches) > 1:
- env.warn_node(
- 'more than one target found for cross-reference '
- '%r: %s' % (target, ', '.join(match[0] for match in matches)),
- node)
+ logger.warning('more than one target found for cross-reference %r: %s',
+ target, ', '.join(match[0] for match in matches),
+ location=node)
name, obj = matches[0]
if obj[1] == 'module':
@@ -797,9 +846,10 @@ class PythonDomain(Domain):
def resolve_any_xref(self, env, fromdocname, builder, target,
node, contnode):
+ # type: (BuildEnvironment, unicode, Builder, unicode, nodes.Node, nodes.Node) -> List[Tuple[unicode, nodes.Node]] # NOQA
modname = node.get('py:module')
clsname = node.get('py:class')
- results = []
+ results = [] # type: List[Tuple[unicode, nodes.Node]]
# always search in "refspecific" mode with the :any: role
matches = self.find_obj(env, modname, clsname, target, None, 1)
@@ -815,6 +865,7 @@ class PythonDomain(Domain):
return results
def _make_module_refnode(self, builder, fromdocname, name, contnode):
+ # type: (Builder, unicode, unicode, nodes.Node) -> nodes.Node
# get additional info for modules
docname, synopsis, platform, deprecated = self.data['modules'][name]
title = name
@@ -828,6 +879,7 @@ class PythonDomain(Domain):
'module-' + name, contnode, title)
def get_objects(self):
+ # type: () -> Iterator[Tuple[unicode, unicode, unicode, unicode, unicode, int]]
for modname, info in iteritems(self.data['modules']):
yield (modname, modname, 'module', info[0], 'module-' + modname, 0)
for refname, (docname, type) in iteritems(self.data['objects']):
@@ -836,6 +888,7 @@ class PythonDomain(Domain):
def setup(app):
+ # type: (Sphinx) -> Dict[unicode, Any]
app.add_domain(PythonDomain)
return {
diff --git a/sphinx/domains/rst.py b/sphinx/domains/rst.py
index 94c9795e3..2a7dffc4d 100644
--- a/sphinx/domains/rst.py
+++ b/sphinx/domains/rst.py
@@ -20,6 +20,14 @@ from sphinx.directives import ObjectDescription
from sphinx.roles import XRefRole
from sphinx.util.nodes import make_refnode
+if False:
+ # For type annotation
+ from typing import Any, Dict, Iterator, List, Tuple # NOQA
+ from docutils import nodes # NOQA
+ from sphinx.application import Sphinx # NOQA
+ from sphinx.builders import Builder # NOQA
+ from sphinx.environment import BuildEnvironment # NOQA
+
dir_sig_re = re.compile(r'\.\. (.+?)::(.*)$')
@@ -30,6 +38,7 @@ class ReSTMarkup(ObjectDescription):
"""
def add_target_and_index(self, name, sig, signode):
+ # type: (unicode, unicode, addnodes.desc_signature) -> None
targetname = self.objtype + '-' + name
if targetname not in self.state.document.ids:
signode['names'].append(targetname)
@@ -51,6 +60,7 @@ class ReSTMarkup(ObjectDescription):
targetname, '', None))
def get_index_text(self, objectname, name):
+ # type: (unicode, unicode) -> unicode
if self.objtype == 'directive':
return _('%s (directive)') % name
elif self.objtype == 'role':
@@ -59,6 +69,7 @@ class ReSTMarkup(ObjectDescription):
def parse_directive(d):
+ # type: (unicode) -> Tuple[unicode, unicode]
"""Parse a directive signature.
Returns (directive, arguments) string tuple. If no arguments are given,
@@ -68,7 +79,7 @@ def parse_directive(d):
if not dir.startswith('.'):
# Assume it is a directive without syntax
return (dir, '')
- m = dir_sig_re.match(dir)
+ m = dir_sig_re.match(dir) # type: ignore
if not m:
return (dir, '')
parsed_dir, parsed_args = m.groups()
@@ -80,6 +91,7 @@ class ReSTDirective(ReSTMarkup):
Description of a reST directive.
"""
def handle_signature(self, sig, signode):
+ # type: (unicode, addnodes.desc_signature) -> unicode
name, args = parse_directive(sig)
desc_name = '.. %s::' % name
signode += addnodes.desc_name(desc_name, desc_name)
@@ -93,6 +105,7 @@ class ReSTRole(ReSTMarkup):
Description of a reST role.
"""
def handle_signature(self, sig, signode):
+ # type: (unicode, addnodes.desc_signature) -> unicode
signode += addnodes.desc_name(':%s:' % sig, ':%s:' % sig)
return sig
@@ -116,14 +129,16 @@ class ReSTDomain(Domain):
}
initial_data = {
'objects': {}, # fullname -> docname, objtype
- }
+ } # type: Dict[unicode, Dict[unicode, Tuple[unicode, ObjType]]]
def clear_doc(self, docname):
+ # type: (unicode) -> None
for (typ, name), doc in list(self.data['objects'].items()):
if doc == docname:
del self.data['objects'][typ, name]
def merge_domaindata(self, docnames, otherdata):
+ # type: (List[unicode], Dict) -> None
# XXX check duplicates
for (typ, name), doc in otherdata['objects'].items():
if doc in docnames:
@@ -131,6 +146,7 @@ class ReSTDomain(Domain):
def resolve_xref(self, env, fromdocname, builder, typ, target, node,
contnode):
+ # type: (BuildEnvironment, unicode, Builder, unicode, unicode, nodes.Node, nodes.Node) -> nodes.Node # NOQA
objects = self.data['objects']
objtypes = self.objtypes_for_role(typ)
for objtype in objtypes:
@@ -142,6 +158,7 @@ class ReSTDomain(Domain):
def resolve_any_xref(self, env, fromdocname, builder, target,
node, contnode):
+ # type: (BuildEnvironment, unicode, Builder, unicode, nodes.Node, nodes.Node) -> List[nodes.Node] # NOQA
objects = self.data['objects']
results = []
for objtype in self.object_types:
@@ -154,11 +171,13 @@ class ReSTDomain(Domain):
return results
def get_objects(self):
+ # type: () -> Iterator[Tuple[unicode, unicode, unicode, unicode, unicode, int]]
for (typ, name), docname in iteritems(self.data['objects']):
yield name, name, typ, docname, typ + '-' + name, 1
def setup(app):
+ # type: (Sphinx) -> Dict[unicode, Any]
app.add_domain(ReSTDomain)
return {
diff --git a/sphinx/domains/std.py b/sphinx/domains/std.py
index c00ff3856..f74c66f3c 100644
--- a/sphinx/domains/std.py
+++ b/sphinx/domains/std.py
@@ -13,8 +13,9 @@ import re
import unicodedata
from six import iteritems
+
from docutils import nodes
-from docutils.parsers.rst import directives
+from docutils.parsers.rst import Directive, directives
from docutils.statemachine import ViewList
from sphinx import addnodes
@@ -22,25 +23,35 @@ from sphinx.roles import XRefRole
from sphinx.locale import l_, _
from sphinx.domains import Domain, ObjType
from sphinx.directives import ObjectDescription
-from sphinx.util import ws_re
+from sphinx.util import ws_re, logging, docname_join
from sphinx.util.nodes import clean_astext, make_refnode
-from sphinx.util.compat import Directive
+
+if False:
+ # For type annotation
+ from typing import Any, Callable, Dict, Iterator, List, Tuple, Type, Union # NOQA
+ from sphinx.application import Sphinx # NOQA
+ from sphinx.builders import Builder # NOQA
+ from sphinx.environment import BuildEnvironment # NOQA
+ from sphinx.util.typing import RoleFunction # NOQA
+
+logger = logging.getLogger(__name__)
# RE for option descriptions
-option_desc_re = re.compile(r'((?:/|--|-|\+)?[-+\.?@#_a-zA-Z0-9]+)(=?\s*.*)')
+option_desc_re = re.compile(r'((?:/|--|-|\+)?[^\s=]+)(=?\s*.*)')
# RE for grammar tokens
-token_re = re.compile('`(\w+)`', re.U)
+token_re = re.compile(r'`(\w+)`', re.U)
class GenericObject(ObjectDescription):
"""
A generic x-ref directive registered with Sphinx.add_object_type().
"""
- indextemplate = ''
- parse_node = None
+ indextemplate = '' # type: unicode
+ parse_node = None # type: Callable[[GenericObject, BuildEnvironment, unicode, addnodes.desc_signature], unicode] # NOQA
def handle_signature(self, sig, signode):
+ # type: (unicode, addnodes.desc_signature) -> unicode
if self.parse_node:
name = self.parse_node(self.env, sig, signode)
else:
@@ -51,6 +62,7 @@ class GenericObject(ObjectDescription):
return name
def add_target_and_index(self, name, sig, signode):
+ # type: (unicode, unicode, addnodes.desc_signature) -> None
targetname = '%s-%s' % (self.objtype, name)
signode['ids'].append(targetname)
self.state.document.note_explicit_target(signode)
@@ -78,6 +90,7 @@ class EnvVarXRefRole(XRefRole):
"""
def result_nodes(self, document, env, node, is_ref):
+ # type: (nodes.Node, BuildEnvironment, nodes.Node, bool) -> Tuple[List[nodes.Node], List[nodes.Node]] # NOQA
if not is_ref:
return [node], []
varname = node['reftarget']
@@ -102,9 +115,10 @@ class Target(Directive):
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
- option_spec = {}
+ option_spec = {} # type: Dict
def run(self):
+ # type: () -> List[nodes.Node]
env = self.state.document.settings.env
# normalize whitespace in fullname like XRefRole does
fullname = ws_re.sub(' ', self.arguments[0].strip())
@@ -136,19 +150,18 @@ class Cmdoption(ObjectDescription):
"""
def handle_signature(self, sig, signode):
+ # type: (unicode, addnodes.desc_signature) -> unicode
"""Transform an option description into RST nodes."""
count = 0
firstname = ''
for potential_option in sig.split(', '):
potential_option = potential_option.strip()
- m = option_desc_re.match(potential_option)
+ m = option_desc_re.match(potential_option) # type: ignore
if not m:
- self.env.warn(
- self.env.docname,
- 'Malformed option description %r, should '
- 'look like "opt", "-opt args", "--opt args", '
- '"/opt args" or "+opt args"' % potential_option,
- self.lineno)
+ logger.warning('Malformed option description %r, should '
+ 'look like "opt", "-opt args", "--opt args", '
+ '"/opt args" or "+opt args"', potential_option,
+ location=(self.env.docname, self.lineno))
continue
optname, args = m.groups()
if count:
@@ -166,6 +179,7 @@ class Cmdoption(ObjectDescription):
return firstname
def add_target_and_index(self, firstname, sig, signode):
+ # type: (unicode, unicode, addnodes.desc_signature) -> None
currprogram = self.env.ref_context.get('std:program')
for optname in signode.get('allnames', []):
targetname = optname.replace('/', '-')
@@ -197,9 +211,10 @@ class Program(Directive):
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
- option_spec = {}
+ option_spec = {} # type: Dict
def run(self):
+ # type: () -> List[nodes.Node]
env = self.state.document.settings.env
program = ws_re.sub('-', self.arguments[0].strip())
if program == 'None':
@@ -211,17 +226,20 @@ class Program(Directive):
class OptionXRefRole(XRefRole):
def process_link(self, env, refnode, has_explicit_title, title, target):
+ # type: (BuildEnvironment, nodes.Node, bool, unicode, unicode) -> Tuple[unicode, unicode] # NOQA
refnode['std:program'] = env.ref_context.get('std:program')
return title, target
def split_term_classifiers(line):
+ # type: (unicode) -> List[Union[unicode, None]]
# split line into a term and classifiers. if no classifier, None is used..
parts = re.split(' +: +', line) + [None]
return parts
def make_glossary_term(env, textnodes, index_key, source, lineno, new_id=None):
+ # type: (BuildEnvironment, List[nodes.Node], unicode, unicode, int, unicode) -> nodes.term
# get a text-only representation of the term and register it
# as a cross-reference target
term = nodes.term('', '', *textnodes)
@@ -265,6 +283,7 @@ class Glossary(Directive):
}
def run(self):
+ # type: () -> List[nodes.Node]
env = self.state.document.settings.env
node = addnodes.glossary()
node.document = self.state.document
@@ -275,7 +294,7 @@ class Glossary(Directive):
# be* a definition list.
# first, collect single entries
- entries = []
+ entries = [] # type: List[Tuple[List[Tuple[unicode, unicode, int]], ViewList]]
in_definition = True
was_empty = True
messages = []
@@ -329,7 +348,7 @@ class Glossary(Directive):
for terms, definition in entries:
termtexts = []
termnodes = []
- system_messages = []
+ system_messages = [] # type: List[unicode]
for line, source, lineno in terms:
parts = split_term_classifiers(line)
# parse the term with inline markup
@@ -365,9 +384,10 @@ class Glossary(Directive):
def token_xrefs(text):
+ # type: (unicode) -> List[nodes.Node]
retnodes = []
pos = 0
- for m in token_re.finditer(text):
+ for m in token_re.finditer(text): # type: ignore
if m.start() > pos:
txt = text[pos:m.start()]
retnodes.append(nodes.Text(txt, txt))
@@ -390,13 +410,14 @@ class ProductionList(Directive):
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
- option_spec = {}
+ option_spec = {} # type: Dict
def run(self):
+ # type: () -> List[nodes.Node]
env = self.state.document.settings.env
objects = env.domaindata['std']['objects']
node = addnodes.productionlist()
- messages = []
+ messages = [] # type: List[nodes.Node]
i = 0
for rule in self.arguments[0].split('\n'):
@@ -437,7 +458,8 @@ class StandardDomain(Domain):
searchprio=-1),
'envvar': ObjType(l_('environment variable'), 'envvar'),
'cmdoption': ObjType(l_('program option'), 'option'),
- }
+ 'doc': ObjType(l_('document'), 'doc', searchprio=-1)
+ } # type: Dict[unicode, ObjType]
directives = {
'program': Program,
@@ -446,7 +468,7 @@ class StandardDomain(Domain):
'envvar': EnvVar,
'glossary': Glossary,
'productionlist': ProductionList,
- }
+ } # type: Dict[unicode, Type[Directive]]
roles = {
'option': OptionXRefRole(warn_dangling=True),
'envvar': EnvVarXRefRole(),
@@ -463,7 +485,9 @@ class StandardDomain(Domain):
warn_dangling=True),
# links to labels, without a different title
'keyword': XRefRole(warn_dangling=True),
- }
+ # links to documents
+ 'doc': XRefRole(warn_dangling=True, innernodeclass=nodes.inline),
+ } # type: Dict[unicode, Union[RoleFunction, XRefRole]]
initial_data = {
'progoptions': {}, # (program, name) -> docname, labelid
@@ -487,6 +511,7 @@ class StandardDomain(Domain):
'the label must precede a section header)',
'numref': 'undefined label: %(target)s',
'keyword': 'unknown keyword: %(target)s',
+ 'doc': 'unknown document: %(target)s',
'option': 'unknown option: %(target)s',
'citation': 'citation not found: %(target)s',
}
@@ -495,9 +520,10 @@ class StandardDomain(Domain):
nodes.figure: ('figure', None),
nodes.table: ('table', None),
nodes.container: ('code-block', None),
- }
+ } # type: Dict[nodes.Node, Tuple[unicode, Callable]]
def clear_doc(self, docname):
+ # type: (unicode) -> None
for key, (fn, _l) in list(self.data['progoptions'].items()):
if fn == docname:
del self.data['progoptions'][key]
@@ -515,6 +541,7 @@ class StandardDomain(Domain):
del self.data['anonlabels'][key]
def merge_domaindata(self, docnames, otherdata):
+ # type: (List[unicode], Dict) -> None
# XXX duplicates?
for key, data in otherdata['progoptions'].items():
if data[0] in docnames:
@@ -533,19 +560,22 @@ class StandardDomain(Domain):
self.data['anonlabels'][key] = data
def process_doc(self, env, docname, document):
+ # type: (BuildEnvironment, unicode, nodes.Node) -> None
self.note_citations(env, docname, document)
self.note_labels(env, docname, document)
def note_citations(self, env, docname, document):
+ # type: (BuildEnvironment, unicode, nodes.Node) -> None
for node in document.traverse(nodes.citation):
label = node[0].astext()
if label in self.data['citations']:
path = env.doc2path(self.data['citations'][label][0])
- env.warn_node('duplicate citation %s, other instance in %s' %
- (label, path), node)
+ logger.warning('duplicate citation %s, other instance in %s', label, path,
+ location=node)
self.data['citations'][label] = (docname, node['ids'][0])
def note_labels(self, env, docname, document):
+ # type: (BuildEnvironment, unicode, nodes.Node) -> None
labels, anonlabels = self.data['labels'], self.data['anonlabels']
for name, explicit in iteritems(document.nametypes):
if not explicit:
@@ -563,8 +593,9 @@ class StandardDomain(Domain):
# link and object descriptions
continue
if name in labels:
- env.warn_node('duplicate label %s, ' % name + 'other instance '
- 'in ' + env.doc2path(labels[name][0]), node)
+ logger.warning('duplicate label %s, ' % name + 'other instance '
+ 'in ' + env.doc2path(labels[name][0]),
+ location=node)
anonlabels[name] = docname, labelid
if node.tagname in ('section', 'rubric'):
sectname = clean_astext(node[0]) # node[0] == title node
@@ -585,6 +616,7 @@ class StandardDomain(Domain):
def build_reference_node(self, fromdocname, builder, docname, labelid,
sectname, rolename, **options):
+ # type: (unicode, Builder, unicode, unicode, unicode, unicode, Any) -> nodes.Node
nodeclass = options.pop('nodeclass', nodes.reference)
newnode = nodeclass('', '', internal=True, **options)
innernode = nodes.inline(sectname, sectname)
@@ -608,12 +640,15 @@ class StandardDomain(Domain):
return newnode
def resolve_xref(self, env, fromdocname, builder, typ, target, node, contnode):
+ # type: (BuildEnvironment, unicode, Builder, unicode, unicode, nodes.Node, nodes.Node) -> nodes.Node # NOQA
if typ == 'ref':
resolver = self._resolve_ref_xref
elif typ == 'numref':
resolver = self._resolve_numref_xref
elif typ == 'keyword':
resolver = self._resolve_keyword_xref
+ elif typ == 'doc':
+ resolver = self._resolve_doc_xref
elif typ == 'option':
resolver = self._resolve_option_xref
elif typ == 'citation':
@@ -624,6 +659,7 @@ class StandardDomain(Domain):
return resolver(env, fromdocname, builder, typ, target, node, contnode)
def _resolve_ref_xref(self, env, fromdocname, builder, typ, target, node, contnode):
+ # type: (BuildEnvironment, unicode, Builder, unicode, unicode, nodes.Node, nodes.Node) -> nodes.Node # NOQA
if node['refexplicit']:
# reference to anonymous label; the reference uses
# the supplied link caption
@@ -641,6 +677,7 @@ class StandardDomain(Domain):
docname, labelid, sectname, 'ref')
def _resolve_numref_xref(self, env, fromdocname, builder, typ, target, node, contnode):
+ # type: (BuildEnvironment, unicode, Builder, unicode, unicode, nodes.Node, nodes.Node) -> nodes.Node # NOQA
if target in self.data['labels']:
docname, labelid, figname = self.data['labels'].get(target, ('', '', ''))
else:
@@ -651,7 +688,7 @@ class StandardDomain(Domain):
return None
if env.config.numfig is False:
- env.warn_node('numfig is disabled. :numref: is ignored.', node)
+ logger.warning('numfig is disabled. :numref: is ignored.', location=node)
return contnode
target_node = env.get_doctree(docname).ids.get(labelid)
@@ -664,7 +701,8 @@ class StandardDomain(Domain):
if fignumber is None:
return contnode
except ValueError:
- env.warn_node("no number is assigned for %s: %s" % (figtype, labelid), node)
+ logger.warning("no number is assigned for %s: %s", figtype, labelid,
+ location=node)
return contnode
try:
@@ -674,7 +712,7 @@ class StandardDomain(Domain):
title = env.config.numfig_format.get(figtype, '')
if figname is None and '{name}' in title:
- env.warn_node('the link has no caption: %s' % title, node)
+ logger.warning('the link has no caption: %s', title, location=node)
return contnode
else:
fignum = '.'.join(map(str, fignumber))
@@ -688,10 +726,10 @@ class StandardDomain(Domain):
# old style format (cf. "Fig.%s")
newtitle = title % fignum
except KeyError as exc:
- env.warn_node('invalid numfig_format: %s (%r)' % (title, exc), node)
+ logger.warning('invalid numfig_format: %s (%r)', title, exc, location=node)
return contnode
except TypeError:
- env.warn_node('invalid numfig_format: %s' % title, node)
+ logger.warning('invalid numfig_format: %s', title, location=node)
return contnode
return self.build_reference_node(fromdocname, builder,
@@ -700,6 +738,7 @@ class StandardDomain(Domain):
title=title)
def _resolve_keyword_xref(self, env, fromdocname, builder, typ, target, node, contnode):
+ # type: (BuildEnvironment, unicode, Builder, unicode, unicode, nodes.Node, nodes.Node) -> nodes.Node # NOQA
# keywords are oddballs: they are referenced by named labels
docname, labelid, _ = self.data['labels'].get(target, ('', '', ''))
if not docname:
@@ -707,7 +746,24 @@ class StandardDomain(Domain):
return make_refnode(builder, fromdocname, docname,
labelid, contnode)
+ def _resolve_doc_xref(self, env, fromdocname, builder, typ, target, node, contnode):
+ # type: (BuildEnvironment, unicode, Builder, unicode, unicode, nodes.Node, nodes.Node) -> nodes.Node # NOQA
+ # directly reference to document by source name; can be absolute or relative
+ refdoc = node.get('refdoc', fromdocname)
+ docname = docname_join(refdoc, node['reftarget'])
+ if docname not in env.all_docs:
+ return None
+ else:
+ if node['refexplicit']:
+ # reference with explicit title
+ caption = node.astext()
+ else:
+ caption = clean_astext(env.titles[docname])
+ innernode = nodes.inline(caption, caption, classes=['doc'])
+ return make_refnode(builder, fromdocname, docname, None, innernode)
+
def _resolve_option_xref(self, env, fromdocname, builder, typ, target, node, contnode):
+ # type: (BuildEnvironment, unicode, Builder, unicode, unicode, nodes.Node, nodes.Node) -> nodes.Node # NOQA
progname = node.get('std:program')
target = target.strip()
docname, labelid = self.data['progoptions'].get((progname, target), ('', ''))
@@ -729,6 +785,7 @@ class StandardDomain(Domain):
labelid, contnode)
def _resolve_citation_xref(self, env, fromdocname, builder, typ, target, node, contnode):
+ # type: (BuildEnvironment, unicode, Builder, unicode, unicode, nodes.Node, nodes.Node) -> nodes.Node # NOQA
from sphinx.environment import NoUri
docname, labelid = self.data['citations'].get(target, ('', ''))
@@ -751,6 +808,7 @@ class StandardDomain(Domain):
raise
def _resolve_obj_xref(self, env, fromdocname, builder, typ, target, node, contnode):
+ # type: (BuildEnvironment, unicode, Builder, unicode, unicode, nodes.Node, nodes.Node) -> nodes.Node # NOQA
objtypes = self.objtypes_for_role(typ) or []
for objtype in objtypes:
if (objtype, target) in self.data['objects']:
@@ -764,7 +822,8 @@ class StandardDomain(Domain):
labelid, contnode)
def resolve_any_xref(self, env, fromdocname, builder, target, node, contnode):
- results = []
+ # type: (BuildEnvironment, unicode, Builder, unicode, nodes.Node, nodes.Node) -> List[Tuple[unicode, nodes.Node]] # NOQA
+ results = [] # type: List[Tuple[unicode, nodes.Node]]
ltarget = target.lower() # :ref: lowercases its target automatically
for role in ('ref', 'option'): # do not try "keyword"
res = self.resolve_xref(env, fromdocname, builder, role,
@@ -785,6 +844,7 @@ class StandardDomain(Domain):
return results
def get_objects(self):
+ # type: () -> Iterator[Tuple[unicode, unicode, unicode, unicode, unicode, int]]
# handle the special 'doc' reference here
for doc in self.env.all_docs:
yield (doc, clean_astext(self.env.titles[doc]), 'doc', doc, '', -1)
@@ -802,13 +862,16 @@ class StandardDomain(Domain):
yield (name, name, 'label', info[0], info[1], -1)
def get_type_name(self, type, primary=False):
+ # type: (ObjType, bool) -> unicode
# never prepend "Default"
return type.lname
def is_enumerable_node(self, node):
+ # type: (nodes.Node) -> bool
return node.__class__ in self.enumerable_nodes
def get_numfig_title(self, node):
+ # type: (nodes.Node) -> unicode
"""Get the title of enumerable nodes to refer them using its title"""
if self.is_enumerable_node(node):
_, title_getter = self.enumerable_nodes.get(node.__class__, (None, None))
@@ -822,8 +885,10 @@ class StandardDomain(Domain):
return None
def get_figtype(self, node):
+ # type: (nodes.Node) -> unicode
"""Get figure type of nodes."""
def has_child(node, cls):
+ # type: (nodes.Node, Type) -> bool
return any(isinstance(child, cls) for child in node)
if isinstance(node, nodes.section):
@@ -838,6 +903,7 @@ class StandardDomain(Domain):
return figtype
def get_fignumber(self, env, builder, figtype, docname, target_node):
+ # type: (BuildEnvironment, Builder, unicode, unicode, nodes.Node) -> Tuple[int, ...]
if figtype == 'section':
if builder.name == 'latex':
return tuple()
@@ -861,6 +927,7 @@ class StandardDomain(Domain):
def setup(app):
+ # type: (Sphinx) -> Dict[unicode, Any]
app.add_domain(StandardDomain)
return {
diff --git a/sphinx/environment/__init__.py b/sphinx/environment/__init__.py
index 129600380..00d730592 100644
--- a/sphinx/environment/__init__.py
+++ b/sphinx/environment/__init__.py
@@ -16,39 +16,50 @@ import time
import types
import codecs
import fnmatch
+import warnings
from os import path
-from glob import glob
+from collections import defaultdict
-from six import iteritems, itervalues, class_types, next
+from six import StringIO, itervalues, class_types, next
from six.moves import cPickle as pickle
-from docutils import nodes
+
from docutils.io import NullOutput
from docutils.core import Publisher
-from docutils.utils import Reporter, relative_path, get_source_line
+from docutils.utils import Reporter, get_source_line
from docutils.parsers.rst import roles
from docutils.parsers.rst.languages import en as english
from docutils.frontend import OptionParser
from sphinx import addnodes
from sphinx.io import SphinxStandaloneReader, SphinxDummyWriter, SphinxFileInput
-from sphinx.util import get_matching_docs, docname_join, FilenameUniqDict
-from sphinx.util.nodes import clean_astext, WarningStream, is_translatable, \
- process_only_nodes
-from sphinx.util.osutil import SEP, getcwd, fs_encoding, ensuredir
-from sphinx.util.images import guess_mimetype
-from sphinx.util.i18n import find_catalog_files, get_image_filename_for_language, \
- search_image_for_language
-from sphinx.util.console import bold, purple
+from sphinx.util import logging
+from sphinx.util import get_matching_docs, FilenameUniqDict, status_iterator
+from sphinx.util.nodes import WarningStream, is_translatable
+from sphinx.util.osutil import SEP, ensuredir
+from sphinx.util.i18n import find_catalog_files
+from sphinx.util.console import bold # type: ignore
from sphinx.util.docutils import sphinx_domains
from sphinx.util.matching import compile_matchers
from sphinx.util.parallel import ParallelTasks, parallel_available, make_chunks
from sphinx.util.websupport import is_commentable
from sphinx.errors import SphinxError, ExtensionError
+from sphinx.locale import _
+from sphinx.transforms import SphinxTransformer
from sphinx.versioning import add_uids, merge_doctrees
-from sphinx.transforms import SphinxContentsFilter
-from sphinx.environment.managers.indexentries import IndexEntries
-from sphinx.environment.managers.toctree import Toctree
+from sphinx.deprecation import RemovedInSphinx17Warning, RemovedInSphinx20Warning
+from sphinx.environment.adapters.indexentries import IndexEntries
+from sphinx.environment.adapters.toctree import TocTree
+
+if False:
+ # For type annotation
+ from typing import Any, Callable, Dict, IO, Iterator, List, Pattern, Set, Tuple, Type, Union # NOQA
+ from docutils import nodes # NOQA
+ from sphinx.application import Sphinx # NOQA
+ from sphinx.builders import Builder # NOQA
+ from sphinx.config import Config # NOQA
+ from sphinx.domains import Domain # NOQA
+logger = logging.getLogger(__name__)
default_settings = {
'embed_stylesheet': False,
@@ -66,7 +77,7 @@ default_settings = {
# or changed to properly invalidate pickle files.
#
# NOTE: increase base version by 2 to have distinct numbers for Py2 and 3
-ENV_VERSION = 50 + (sys.version_info[0] - 2)
+ENV_VERSION = 51 + (sys.version_info[0] - 2)
dummy_reporter = Reporter('', 4, 4)
@@ -75,7 +86,7 @@ versioning_conditions = {
'none': False,
'text': is_translatable,
'commentable': is_commentable,
-}
+} # type: Dict[unicode, Union[bool, Callable]]
class NoUri(Exception):
@@ -90,56 +101,82 @@ class BuildEnvironment(object):
transformations to resolve links to them.
"""
+ domains = None # type: Dict[unicode, Domain]
+
# --------- ENVIRONMENT PERSISTENCE ----------------------------------------
@staticmethod
- def frompickle(srcdir, config, filename):
- with open(filename, 'rb') as picklefile:
- env = pickle.load(picklefile)
+ def load(f, app=None):
+ # type: (IO, Sphinx) -> BuildEnvironment
+ env = pickle.load(f)
if env.version != ENV_VERSION:
raise IOError('build environment version not current')
- if env.srcdir != srcdir:
- raise IOError('source directory has changed')
- env.config.values = config.values
+ if app:
+ env.app = app
+ env.config.values = app.config.values
+ if env.srcdir != app.srcdir:
+ raise IOError('source directory has changed')
return env
- def topickle(self, filename):
+ @classmethod
+ def loads(cls, string, app=None):
+ # type: (unicode, Sphinx) -> BuildEnvironment
+ io = StringIO(string)
+ return cls.load(io, app)
+
+ @classmethod
+ def frompickle(cls, filename, app):
+ # type: (unicode, Sphinx) -> BuildEnvironment
+ with open(filename, 'rb') as f:
+ return cls.load(f, app)
+
+ @staticmethod
+ def dump(env, f):
+ # type: (BuildEnvironment, IO) -> None
# remove unpicklable attributes
- warnfunc = self._warnfunc
- self.set_warnfunc(None)
- values = self.config.values
- del self.config.values
- domains = self.domains
- del self.domains
- managers = self.detach_managers()
+ app = env.app
+ del env.app
+ values = env.config.values
+ del env.config.values
+ domains = env.domains
+ del env.domains
# remove potentially pickling-problematic values from config
- for key, val in list(vars(self.config).items()):
+ for key, val in list(vars(env.config).items()):
if key.startswith('_') or \
isinstance(val, types.ModuleType) or \
isinstance(val, types.FunctionType) or \
isinstance(val, class_types):
- del self.config[key]
- with open(filename, 'wb') as picklefile:
- pickle.dump(self, picklefile, pickle.HIGHEST_PROTOCOL)
+ del env.config[key]
+ pickle.dump(env, f, pickle.HIGHEST_PROTOCOL)
# reset attributes
- self.attach_managers(managers)
- self.domains = domains
- self.config.values = values
- self.set_warnfunc(warnfunc)
+ env.domains = domains
+ env.config.values = values
+ env.app = app
+
+ @classmethod
+ def dumps(cls, env):
+ # type: (BuildEnvironment) -> unicode
+ io = StringIO()
+ cls.dump(env, io)
+ return io.getvalue()
+
+ def topickle(self, filename):
+ # type: (unicode) -> None
+ with open(filename, 'wb') as f:
+ self.dump(self, f)
# --------- ENVIRONMENT INITIALIZATION -------------------------------------
- def __init__(self, srcdir, doctreedir, config):
- self.doctreedir = doctreedir
- self.srcdir = srcdir
- self.config = config
+ def __init__(self, app):
+ # type: (Sphinx) -> None
+ self.app = app
+ self.doctreedir = app.doctreedir
+ self.srcdir = app.srcdir
+ self.config = app.config
# the method of doctree versioning; see set_versioning_method
- self.versioning_condition = None
- self.versioning_compare = None
-
- # the application object; only set while update() runs
- self.app = None
+ self.versioning_condition = None # type: Union[bool, Callable]
+ self.versioning_compare = None # type: bool
# all the registered domains, set by the application
self.domains = {}
@@ -149,7 +186,7 @@ class BuildEnvironment(object):
self.settings['env'] = self
# the function to write warning messages with
- self._warnfunc = None
+ self._warnfunc = None # type: Callable
# this is to invalidate old pickles
self.version = ENV_VERSION
@@ -157,43 +194,63 @@ class BuildEnvironment(object):
# All "docnames" here are /-separated and relative and exclude
# the source suffix.
- self.found_docs = set() # contains all existing docnames
- self.all_docs = {} # docname -> mtime at the time of reading
+ self.found_docs = set() # type: Set[unicode]
+ # contains all existing docnames
+ self.all_docs = {} # type: Dict[unicode, float]
+ # docname -> mtime at the time of reading
# contains all read docnames
- self.dependencies = {} # docname -> set of dependent file
+ self.dependencies = defaultdict(set) # type: Dict[unicode, Set[unicode]]
+ # docname -> set of dependent file
# names, relative to documentation root
- self.included = set() # docnames included from other documents
- self.reread_always = set() # docnames to re-read unconditionally on
+ self.included = set() # type: Set[unicode]
+ # docnames included from other documents
+ self.reread_always = set() # type: Set[unicode]
+ # docnames to re-read unconditionally on
# next build
# File metadata
- self.metadata = {} # docname -> dict of metadata items
+ self.metadata = defaultdict(dict) # type: Dict[unicode, Dict[unicode, Any]]
+ # docname -> dict of metadata items
# TOC inventory
- self.titles = {} # docname -> title node
- self.longtitles = {} # docname -> title node; only different if
+ self.titles = {} # type: Dict[unicode, nodes.Node]
+ # docname -> title node
+ self.longtitles = {} # type: Dict[unicode, nodes.Node]
+ # docname -> title node; only different if
# set differently with title directive
- self.tocs = {} # docname -> table of contents nodetree
- self.toc_num_entries = {} # docname -> number of real entries
+ self.tocs = {} # type: Dict[unicode, nodes.Node]
+ # docname -> table of contents nodetree
+ self.toc_num_entries = {} # type: Dict[unicode, int]
+ # docname -> number of real entries
+
# used to determine when to show the TOC
# in a sidebar (don't show if it's only one item)
- self.toc_secnumbers = {} # docname -> dict of sectionid -> number
- self.toc_fignumbers = {} # docname -> dict of figtype ->
+ self.toc_secnumbers = {} # type: Dict[unicode, Dict[unicode, Tuple[int, ...]]]
+ # docname -> dict of sectionid -> number
+ self.toc_fignumbers = {} # type: Dict[unicode, Dict[unicode, Dict[unicode, Tuple[int, ...]]]] # NOQA
+ # docname -> dict of figtype ->
# dict of figureid -> number
- self.toctree_includes = {} # docname -> list of toctree includefiles
- self.files_to_rebuild = {} # docname -> set of files
+ self.toctree_includes = {} # type: Dict[unicode, List[unicode]]
+ # docname -> list of toctree includefiles
+ self.files_to_rebuild = {} # type: Dict[unicode, Set[unicode]]
+ # docname -> set of files
# (containing its TOCs) to rebuild too
- self.glob_toctrees = set() # docnames that have :glob: toctrees
- self.numbered_toctrees = set() # docnames that have :numbered: toctrees
+ self.glob_toctrees = set() # type: Set[unicode]
+ # docnames that have :glob: toctrees
+ self.numbered_toctrees = set() # type: Set[unicode]
+ # docnames that have :numbered: toctrees
# domain-specific inventories, here to be pickled
- self.domaindata = {} # domainname -> domain-specific dict
+ self.domaindata = {} # type: Dict[unicode, Dict]
+ # domainname -> domain-specific dict
# Other inventories
- self.indexentries = {} # docname -> list of
- # (type, string, target, aliasname)
- self.versionchanges = {} # version -> list of (type, docname,
+ self.indexentries = {} # type: Dict[unicode, List[Tuple[unicode, unicode, unicode, unicode, unicode]]] # NOQA
+ # docname -> list of
+ # (type, unicode, target, aliasname)
+ self.versionchanges = {} # type: Dict[unicode, List[Tuple[unicode, unicode, int, unicode, unicode, unicode]]] # NOQA
+ # version -> list of (type, docname,
# lineno, module, descname, content)
# these map absolute path -> (docnames, unique filename)
@@ -201,38 +258,19 @@ class BuildEnvironment(object):
self.dlfiles = FilenameUniqDict()
# temporary data storage while reading a document
- self.temp_data = {}
+ self.temp_data = {} # type: Dict[unicode, Any]
# context for cross-references (e.g. current module or class)
# this is similar to temp_data, but will for example be copied to
# attributes of "any" cross references
- self.ref_context = {}
-
- self.managers = {}
- self.init_managers()
-
- def init_managers(self):
- managers = {}
- for manager_class in [IndexEntries, Toctree]:
- managers[manager_class.name] = manager_class(self)
- self.attach_managers(managers)
-
- def attach_managers(self, managers):
- for name, manager in iteritems(managers):
- self.managers[name] = manager
- manager.attach(self)
-
- def detach_managers(self):
- managers = self.managers
- self.managers = {}
- for _, manager in iteritems(managers):
- manager.detach(self)
- return managers
+ self.ref_context = {} # type: Dict[unicode, Any]
def set_warnfunc(self, func):
- self._warnfunc = func
- self.settings['warning_stream'] = WarningStream(func)
+ # type: (Callable) -> None
+ warnings.warn('env.set_warnfunc() is now deprecated. Use sphinx.util.logging instead.',
+ RemovedInSphinx20Warning)
def set_versioning_method(self, method, compare):
+ # type: (unicode, bool) -> None
"""This sets the doctree versioning method for this environment.
Versioning methods are a builder property; only builders with the same
@@ -251,6 +289,7 @@ class BuildEnvironment(object):
self.versioning_compare = compare
def warn(self, docname, msg, lineno=None, **kwargs):
+ # type: (unicode, unicode, int, Any) -> None
"""Emit a warning.
This differs from using ``app.warn()`` in that the warning may not
@@ -261,62 +300,47 @@ class BuildEnvironment(object):
self._warnfunc(msg, (docname, lineno), **kwargs)
def warn_node(self, msg, node, **kwargs):
+ # type: (unicode, nodes.Node, Any) -> None
"""Like :meth:`warn`, but with source information taken from *node*."""
self._warnfunc(msg, '%s:%s' % get_source_line(node), **kwargs)
def clear_doc(self, docname):
+ # type: (unicode) -> None
"""Remove all traces of a source file in the inventory."""
if docname in self.all_docs:
self.all_docs.pop(docname, None)
self.reread_always.discard(docname)
- self.metadata.pop(docname, None)
- self.dependencies.pop(docname, None)
- self.titles.pop(docname, None)
- self.longtitles.pop(docname, None)
- self.images.purge_doc(docname)
- self.dlfiles.purge_doc(docname)
for version, changes in self.versionchanges.items():
new = [change for change in changes if change[1] != docname]
changes[:] = new
- for manager in itervalues(self.managers):
- manager.clear_doc(docname)
-
for domain in self.domains.values():
domain.clear_doc(docname)
def merge_info_from(self, docnames, other, app):
+ # type: (List[unicode], BuildEnvironment, Sphinx) -> None
"""Merge global information gathered about *docnames* while reading them
from the *other* environment.
This possibly comes from a parallel build process.
"""
- docnames = set(docnames)
+ docnames = set(docnames) # type: ignore
for docname in docnames:
self.all_docs[docname] = other.all_docs[docname]
if docname in other.reread_always:
self.reread_always.add(docname)
- self.metadata[docname] = other.metadata[docname]
- if docname in other.dependencies:
- self.dependencies[docname] = other.dependencies[docname]
- self.titles[docname] = other.titles[docname]
- self.longtitles[docname] = other.longtitles[docname]
-
- self.images.merge_other(docnames, other.images)
- self.dlfiles.merge_other(docnames, other.dlfiles)
for version, changes in other.versionchanges.items():
self.versionchanges.setdefault(version, []).extend(
change for change in changes if change[1] in docnames)
- for manager in itervalues(self.managers):
- manager.merge_other(docnames, other)
for domainname, domain in self.domains.items():
domain.merge_domaindata(docnames, other.domaindata[domainname])
app.emit('env-merge-info', self, docnames, other)
def path2doc(self, filename):
+ # type: (unicode) -> unicode
"""Return the docname for the filename if the file is document.
*filename* should be absolute or relative to the source directory.
@@ -331,6 +355,7 @@ class BuildEnvironment(object):
return None
def doc2path(self, docname, base=True, suffix=None):
+ # type: (unicode, Union[bool, unicode], unicode) -> unicode
"""Return the filename for the document name.
If *base* is True, return absolute path under self.srcdir.
@@ -340,6 +365,7 @@ class BuildEnvironment(object):
"""
docname = docname.replace(SEP, path.sep)
if suffix is None:
+ candidate_suffix = None # type: unicode
for candidate_suffix in self.config.source_suffix:
if path.isfile(path.join(self.srcdir, docname) +
candidate_suffix):
@@ -353,9 +379,10 @@ class BuildEnvironment(object):
elif base is None:
return docname + suffix
else:
- return path.join(base, docname) + suffix
+ return path.join(base, docname) + suffix # type: ignore
def relfn2path(self, filename, docname=None):
+ # type: (unicode, unicode) -> Tuple[unicode, unicode]
"""Return paths to a file referenced from a document, relative to
documentation root and absolute.
@@ -379,23 +406,24 @@ class BuildEnvironment(object):
enc_rel_fn = rel_fn.encode(sys.getfilesystemencoding())
return rel_fn, path.abspath(path.join(self.srcdir, enc_rel_fn))
- def find_files(self, config, buildername=None):
+ def find_files(self, config, builder):
+ # type: (Config, Builder) -> None
"""Find all source files in the source dir and put them in
self.found_docs.
"""
matchers = compile_matchers(
config.exclude_patterns[:] +
config.templates_path +
- config.html_extra_path +
+ builder.get_asset_paths() +
['**/_sources', '.#*', '**/.#*', '*.lproj/**']
)
self.found_docs = set()
- for docname in get_matching_docs(self.srcdir, config.source_suffix,
+ for docname in get_matching_docs(self.srcdir, config.source_suffix, # type: ignore
exclude_matchers=matchers):
if os.access(self.doc2path(docname), os.R_OK):
self.found_docs.add(docname)
else:
- self.warn(docname, "document not readable. Ignored.")
+ logger.warning("document not readable. Ignored.", location=docname)
# Current implementation is applying translated messages in the reading
# phase.Therefore, in order to apply the updated message catalog, it is
@@ -403,7 +431,7 @@ class BuildEnvironment(object):
# is set for the doc source and the mo file, it is processed again from
# the reading phase when mo is updated. In the future, we would like to
# move i18n process into the writing phase, and remove these lines.
- if buildername != 'gettext':
+ if builder.use_message_catalog:
# add catalog mo file dependency
for docname in self.found_docs:
catalog_files = find_catalog_files(
@@ -413,15 +441,16 @@ class BuildEnvironment(object):
self.config.language,
self.config.gettext_compact)
for filename in catalog_files:
- self.dependencies.setdefault(docname, set()).add(filename)
+ self.dependencies[docname].add(filename)
def get_outdated_files(self, config_changed):
+ # type: (bool) -> Tuple[Set[unicode], Set[unicode], Set[unicode]]
"""Return (added, changed, removed) sets."""
# clear all files no longer present
removed = set(self.all_docs) - self.found_docs
- added = set()
- changed = set()
+ added = set() # type: Set[unicode]
+ changed = set() # type: Set[unicode]
if config_changed:
# config values affect e.g. substitutions
@@ -447,7 +476,7 @@ class BuildEnvironment(object):
changed.add(docname)
continue
# finally, check the mtime of dependencies
- for dep in self.dependencies.get(docname, ()):
+ for dep in self.dependencies[docname]:
try:
# this will do the right thing when dep is absolute too
deppath = path.join(self.srcdir, dep)
@@ -465,7 +494,8 @@ class BuildEnvironment(object):
return added, changed, removed
- def update(self, config, srcdir, doctreedir, app):
+ def update(self, config, srcdir, doctreedir):
+ # type: (Config, unicode, unicode) -> List[unicode]
"""(Re-)read all files new or changed since last update.
Store all environment docnames in the canonical format (ie using SEP as
@@ -478,10 +508,8 @@ class BuildEnvironment(object):
else:
# check if a config value was changed that affects how
# doctrees are read
- for key, descr in iteritems(config.values):
- if descr[1] != 'env':
- continue
- if self.config[key] != config[key]:
+ for confval in config.filter('env'):
+ if self.config[confval.name] != confval.value:
msg = '[config changed] '
config_changed = True
break
@@ -495,18 +523,18 @@ class BuildEnvironment(object):
# the source and doctree directories may have been relocated
self.srcdir = srcdir
self.doctreedir = doctreedir
- self.find_files(config, app.buildername)
+ self.find_files(config, self.app.builder)
self.config = config
# this cache also needs to be updated every time
self._nitpick_ignore = set(self.config.nitpick_ignore)
- app.info(bold('updating environment: '), nonl=True)
+ logger.info(bold('updating environment: '), nonl=True)
added, changed, removed = self.get_outdated_files(config_changed)
# allow user intervention as well
- for docs in app.emit('env-get-outdated', self, added, changed, removed):
+ for docs in self.app.emit('env-get-outdated', self, added, changed, removed):
changed.update(set(docs) & self.found_docs)
# if files were added or removed, all documents with globbed toctrees
@@ -517,106 +545,95 @@ class BuildEnvironment(object):
msg += '%s added, %s changed, %s removed' % (len(added), len(changed),
len(removed))
- app.info(msg)
-
- self.app = app
+ logger.info(msg)
# clear all files no longer present
for docname in removed:
- app.emit('env-purge-doc', self, docname)
+ self.app.emit('env-purge-doc', self, docname)
self.clear_doc(docname)
# read all new and changed files
docnames = sorted(added | changed)
# allow changing and reordering the list of docs to read
- app.emit('env-before-read-docs', self, docnames)
+ self.app.emit('env-before-read-docs', self, docnames)
# check if we should do parallel or serial read
par_ok = False
- if parallel_available and len(docnames) > 5 and app.parallel > 1:
- par_ok = True
- for extname, md in app._extension_metadata.items():
- ext_ok = md.get('parallel_read_safe')
- if ext_ok:
- continue
- if ext_ok is None:
- app.warn('the %s extension does not declare if it '
- 'is safe for parallel reading, assuming it '
- 'isn\'t - please ask the extension author to '
- 'check and make it explicit' % extname)
- app.warn('doing serial read')
- else:
- app.warn('the %s extension is not safe for parallel '
- 'reading, doing serial read' % extname)
- par_ok = False
- break
+ if parallel_available and len(docnames) > 5 and self.app.parallel > 1:
+ for ext in itervalues(self.app.extensions):
+ if ext.parallel_read_safe is None:
+ logger.warning(_('the %s extension does not declare if it is safe '
+ 'for parallel reading, assuming it isn\'t - please '
+ 'ask the extension author to check and make it '
+ 'explicit'), ext.name)
+ logger.warning('doing serial read')
+ break
+ elif ext.parallel_read_safe is False:
+ break
+ else:
+ # all extensions support parallel-read
+ par_ok = True
+
if par_ok:
- self._read_parallel(docnames, app, nproc=app.parallel)
+ self._read_parallel(docnames, self.app, nproc=self.app.parallel)
else:
- self._read_serial(docnames, app)
+ self._read_serial(docnames, self.app)
if config.master_doc not in self.all_docs:
raise SphinxError('master file %s not found' %
self.doc2path(config.master_doc))
- self.app = None
-
- for retval in app.emit('env-updated', self):
+ for retval in self.app.emit('env-updated', self):
if retval is not None:
docnames.extend(retval)
return sorted(docnames)
def _read_serial(self, docnames, app):
- for docname in app.status_iterator(docnames, 'reading sources... ',
- purple, len(docnames)):
+ # type: (List[unicode], Sphinx) -> None
+ for docname in status_iterator(docnames, 'reading sources... ', "purple",
+ len(docnames), self.app.verbosity):
# remove all inventory entries for that file
app.emit('env-purge-doc', self, docname)
self.clear_doc(docname)
self.read_doc(docname, app)
def _read_parallel(self, docnames, app, nproc):
+ # type: (List[unicode], Sphinx, int) -> None
# clear all outdated docs at once
for docname in docnames:
app.emit('env-purge-doc', self, docname)
self.clear_doc(docname)
def read_process(docs):
+ # type: (List[unicode]) -> unicode
self.app = app
- self.warnings = []
- self.set_warnfunc(lambda *args, **kwargs: self.warnings.append((args, kwargs)))
for docname in docs:
self.read_doc(docname, app)
# allow pickling self to send it back
- self.set_warnfunc(None)
- del self.app
- del self.domains
- del self.config.values
- del self.config
- return self
+ return BuildEnvironment.dumps(self)
def merge(docs, otherenv):
- warnings.extend(otherenv.warnings)
- self.merge_info_from(docs, otherenv, app)
+ # type: (List[unicode], unicode) -> None
+ env = BuildEnvironment.loads(otherenv)
+ self.merge_info_from(docs, env, app)
tasks = ParallelTasks(nproc)
chunks = make_chunks(docnames, nproc)
- warnings = []
- for chunk in app.status_iterator(
- chunks, 'reading sources... ', purple, len(chunks)):
+ for chunk in status_iterator(chunks, 'reading sources... ', "purple",
+ len(chunks), self.app.verbosity):
tasks.add_task(read_process, chunk, merge)
# make sure all threads have finished
- app.info(bold('waiting for workers...'))
+ logger.info(bold('waiting for workers...'))
tasks.join()
- for warning, kwargs in warnings:
- self._warnfunc(*warning, **kwargs)
-
- def check_dependents(self, already):
- to_rewrite = (self.toctree.assign_section_numbers() +
- self.toctree.assign_figure_numbers())
+ def check_dependents(self, app, already):
+ # type: (Sphinx, Set[unicode]) -> Iterator[unicode]
+ to_rewrite = [] # type: List[unicode]
+ for docnames in app.emit('env-get-updated', self):
+ to_rewrite.extend(docnames)
for docname in set(to_rewrite):
if docname not in already:
yield docname
@@ -624,20 +641,22 @@ class BuildEnvironment(object):
# --------- SINGLE FILE READING --------------------------------------------
def warn_and_replace(self, error):
+ # type: (Any) -> Tuple
"""Custom decoding error handler that warns and replaces."""
linestart = error.object.rfind(b'\n', 0, error.start)
lineend = error.object.find(b'\n', error.start)
if lineend == -1:
lineend = len(error.object)
lineno = error.object.count(b'\n', 0, error.start) + 1
- self.warn(self.docname, 'undecodable source characters, '
- 'replacing with "?": %r' %
- (error.object[linestart + 1:error.start] + b'>>>' +
- error.object[error.start:error.end] + b'<<<' +
- error.object[error.end:lineend]), lineno)
+ logger.warning('undecodable source characters, replacing with "?": %r',
+ (error.object[linestart + 1:error.start] + b'>>>' +
+ error.object[error.start:error.end] + b'<<<' +
+ error.object[error.end:lineend]),
+ location=(self.docname, lineno))
return (u'?', error.end)
def read_doc(self, docname, app=None):
+ # type: (unicode, Sphinx) -> None
"""Parse a file and add/update inventory entries for the doctree."""
self.temp_data['docname'] = docname
@@ -663,10 +682,10 @@ class BuildEnvironment(object):
if role_fn:
roles._roles[''] = role_fn
else:
- self.warn(docname, 'default role %s not found' %
- self.config.default_role)
+ logger.warning('default role %s not found', self.config.default_role,
+ location=docname)
- codecs.register_error('sphinx', self.warn_and_replace)
+ codecs.register_error('sphinx', self.warn_and_replace) # type: ignore
# publish manually
reader = SphinxStandaloneReader(self.app, parsers=self.config.source_parsers)
@@ -685,13 +704,6 @@ class BuildEnvironment(object):
doctree = pub.document
# post-processing
- self.process_dependencies(docname, doctree)
- self.process_images(docname, doctree)
- self.process_downloads(docname, doctree)
- self.process_metadata(docname, doctree)
- self.create_title_from(docname, doctree)
- for manager in itervalues(self.managers):
- manager.process_doc(docname, doctree)
for domain in itervalues(self.domains):
domain.process_doc(self, docname, doctree)
@@ -747,24 +759,30 @@ class BuildEnvironment(object):
@property
def docname(self):
+ # type: () -> unicode
"""Returns the docname of the document currently being parsed."""
return self.temp_data['docname']
@property
def currmodule(self):
+ # type: () -> None
"""Backwards compatible alias. Will be removed."""
- self.warn(self.docname, 'env.currmodule is being referenced by an '
- 'extension; this API will be removed in the future')
+ warnings.warn('env.currmodule is deprecated. '
+ 'Use env.ref_context["py:module"] instead.',
+ RemovedInSphinx17Warning)
return self.ref_context.get('py:module')
@property
def currclass(self):
+ # type: () -> None
"""Backwards compatible alias. Will be removed."""
- self.warn(self.docname, 'env.currclass is being referenced by an '
- 'extension; this API will be removed in the future')
+ warnings.warn('env.currclass is deprecated. '
+ 'Use env.ref_context["py:class"] instead.',
+ RemovedInSphinx17Warning)
return self.ref_context.get('py:class')
def new_serialno(self, category=''):
+ # type: (unicode) -> int
"""Return a serial number, e.g. for index entry targets.
The number is guaranteed to be unique in the current document.
@@ -775,15 +793,17 @@ class BuildEnvironment(object):
return cur
def note_dependency(self, filename):
+ # type: (unicode) -> None
"""Add *filename* as a dependency of the current document.
This means that the document will be rebuilt if this file changes.
*filename* should be absolute or relative to the source directory.
"""
- self.dependencies.setdefault(self.docname, set()).add(filename)
+ self.dependencies[self.docname].add(filename)
def note_included(self, filename):
+ # type: (unicode) -> None
"""Add *filename* as a included from other document.
This means the document is not orphaned.
@@ -793,184 +813,47 @@ class BuildEnvironment(object):
self.included.add(self.path2doc(filename))
def note_reread(self):
+ # type: () -> None
"""Add the current document to the list of documents that will
automatically be re-read at the next build.
"""
self.reread_always.add(self.docname)
def note_versionchange(self, type, version, node, lineno):
+ # type: (unicode, unicode, nodes.Node, int) -> None
self.versionchanges.setdefault(version, []).append(
(type, self.temp_data['docname'], lineno,
self.ref_context.get('py:module'),
self.temp_data.get('object'), node.astext()))
- # post-processing of read doctrees
-
- def process_dependencies(self, docname, doctree):
- """Process docutils-generated dependency info."""
- cwd = getcwd()
- frompath = path.join(path.normpath(self.srcdir), 'dummy')
- deps = doctree.settings.record_dependencies
- if not deps:
- return
- for dep in deps.list:
- # the dependency path is relative to the working dir, so get
- # one relative to the srcdir
- if isinstance(dep, bytes):
- dep = dep.decode(fs_encoding)
- relpath = relative_path(frompath,
- path.normpath(path.join(cwd, dep)))
- self.dependencies.setdefault(docname, set()).add(relpath)
-
- def process_downloads(self, docname, doctree):
- """Process downloadable file paths. """
- for node in doctree.traverse(addnodes.download_reference):
- targetname = node['reftarget']
- rel_filename, filename = self.relfn2path(targetname, docname)
- self.dependencies.setdefault(docname, set()).add(rel_filename)
- if not os.access(filename, os.R_OK):
- self.warn_node('download file not readable: %s' % filename,
- node)
- continue
- uniquename = self.dlfiles.add_file(docname, filename)
- node['filename'] = uniquename
-
- def process_images(self, docname, doctree):
- """Process and rewrite image URIs."""
- def collect_candidates(imgpath, candidates):
- globbed = {}
- for filename in glob(imgpath):
- new_imgpath = relative_path(path.join(self.srcdir, 'dummy'),
- filename)
- try:
- mimetype = guess_mimetype(filename)
- if mimetype not in candidates:
- globbed.setdefault(mimetype, []).append(new_imgpath)
- except (OSError, IOError) as err:
- self.warn_node('image file %s not readable: %s' %
- (filename, err), node)
- for key, files in iteritems(globbed):
- candidates[key] = sorted(files, key=len)[0] # select by similarity
-
- for node in doctree.traverse(nodes.image):
- # Map the mimetype to the corresponding image. The writer may
- # choose the best image from these candidates. The special key * is
- # set if there is only single candidate to be used by a writer.
- # The special key ? is set for nonlocal URIs.
- node['candidates'] = candidates = {}
- imguri = node['uri']
- if imguri.startswith('data:'):
- self.warn_node('image data URI found. some builders might not support', node,
- type='image', subtype='data_uri')
- candidates['?'] = imguri
- continue
- elif imguri.find('://') != -1:
- self.warn_node('nonlocal image URI found: %s' % imguri, node,
- type='image', subtype='nonlocal_uri')
- candidates['?'] = imguri
- continue
- rel_imgpath, full_imgpath = self.relfn2path(imguri, docname)
- if self.config.language:
- # substitute figures (ex. foo.png -> foo.en.png)
- i18n_full_imgpath = search_image_for_language(full_imgpath, self)
- if i18n_full_imgpath != full_imgpath:
- full_imgpath = i18n_full_imgpath
- rel_imgpath = relative_path(path.join(self.srcdir, 'dummy'),
- i18n_full_imgpath)
- # set imgpath as default URI
- node['uri'] = rel_imgpath
- if rel_imgpath.endswith(os.extsep + '*'):
- if self.config.language:
- # Search language-specific figures at first
- i18n_imguri = get_image_filename_for_language(imguri, self)
- _, full_i18n_imgpath = self.relfn2path(i18n_imguri, docname)
- collect_candidates(full_i18n_imgpath, candidates)
-
- collect_candidates(full_imgpath, candidates)
- else:
- candidates['*'] = rel_imgpath
-
- # map image paths to unique image names (so that they can be put
- # into a single directory)
- for imgpath in itervalues(candidates):
- self.dependencies.setdefault(docname, set()).add(imgpath)
- if not os.access(path.join(self.srcdir, imgpath), os.R_OK):
- self.warn_node('image file not readable: %s' % imgpath,
- node)
- continue
- self.images.add_file(docname, imgpath)
-
- def process_metadata(self, docname, doctree):
- """Process the docinfo part of the doctree as metadata.
-
- Keep processing minimal -- just return what docutils says.
- """
- self.metadata[docname] = md = {}
- try:
- docinfo = doctree[0]
- except IndexError:
- # probably an empty document
- return
- if docinfo.__class__ is not nodes.docinfo:
- # nothing to see here
- return
- for node in docinfo:
- # nodes are multiply inherited...
- if isinstance(node, nodes.authors):
- md['authors'] = [author.astext() for author in node]
- elif isinstance(node, nodes.TextElement): # e.g. author
- md[node.__class__.__name__] = node.astext()
- else:
- name, body = node
- md[name.astext()] = body.astext()
- for name, value in md.items():
- if name in ('tocdepth',):
- try:
- value = int(value)
- except ValueError:
- value = 0
- md[name] = value
-
- del doctree[0]
-
- def create_title_from(self, docname, document):
- """Add a title node to the document (just copy the first section title),
- and store that title in the environment.
- """
- titlenode = nodes.title()
- longtitlenode = titlenode
- # explicit title set with title directive; use this only for
- # the <title> tag in HTML output
- if 'title' in document:
- longtitlenode = nodes.title()
- longtitlenode += nodes.Text(document['title'])
- # look for first section title and use that as the title
- for node in document.traverse(nodes.section):
- visitor = SphinxContentsFilter(document)
- node[0].walkabout(visitor)
- titlenode += visitor.get_entry_text()
- break
- else:
- # document has no title
- titlenode += nodes.Text('<no title>')
- self.titles[docname] = titlenode
- self.longtitles[docname] = longtitlenode
-
def note_toctree(self, docname, toctreenode):
+ # type: (unicode, addnodes.toctree) -> None
"""Note a TOC tree directive in a document and gather information about
file relations from it.
"""
- self.toctree.note_toctree(docname, toctreenode)
+ warnings.warn('env.note_toctree() is deprecated. '
+ 'Use sphinx.environment.adapters.toctre.TocTree instead.',
+ RemovedInSphinx20Warning)
+ TocTree(self).note(docname, toctreenode)
def get_toc_for(self, docname, builder):
+ # type: (unicode, Builder) -> Dict[unicode, nodes.Node]
"""Return a TOC nodetree -- for use on the same page only!"""
- return self.toctree.get_toc_for(docname, builder)
+ warnings.warn('env.get_toc_for() is deprecated. '
+ 'Use sphinx.environment.adapters.toctre.TocTree instead.',
+ RemovedInSphinx20Warning)
+ return TocTree(self).get_toc_for(docname, builder)
def get_toctree_for(self, docname, builder, collapse, **kwds):
+ # type: (unicode, Builder, bool, Any) -> addnodes.toctree
"""Return the global TOC nodetree."""
- return self.toctree.get_toctree_for(docname, builder, collapse, **kwds)
+ warnings.warn('env.get_toctree_for() is deprecated. '
+ 'Use sphinx.environment.adapters.toctre.TocTree instead.',
+ RemovedInSphinx20Warning)
+ return TocTree(self).get_toctree_for(docname, builder, collapse, **kwds)
def get_domain(self, domainname):
+ # type: (unicode) -> Domain
"""Return the domain instance with the specified name.
Raises an ExtensionError if the domain is not registered.
@@ -983,6 +866,7 @@ class BuildEnvironment(object):
# --------- RESOLVING REFERENCES AND TOCTREES ------------------------------
def get_doctree(self, docname):
+ # type: (unicode) -> nodes.Node
"""Read the doctree for a file from the pickle and return it."""
doctree_filename = self.doc2path(docname, self.doctreedir, '.doctree')
with open(doctree_filename, 'rb') as f:
@@ -994,6 +878,7 @@ class BuildEnvironment(object):
def get_and_resolve_doctree(self, docname, builder, doctree=None,
prune_toctrees=True, includehidden=False):
+ # type: (unicode, Builder, nodes.Node, bool, bool) -> nodes.Node
"""Read the doctree from the pickle, resolve cross-references and
toctrees and return it.
"""
@@ -1001,13 +886,13 @@ class BuildEnvironment(object):
doctree = self.get_doctree(docname)
# resolve all pending cross-references
- self.resolve_references(doctree, docname, builder)
+ self.apply_post_transforms(doctree, docname)
# now, resolve all toctree nodes
for toctreenode in doctree.traverse(addnodes.toctree):
- result = self.resolve_toctree(docname, builder, toctreenode,
- prune=prune_toctrees,
- includehidden=includehidden)
+ result = TocTree(self).resolve(docname, builder, toctreenode,
+ prune=prune_toctrees,
+ includehidden=includehidden)
if result is None:
toctreenode.replace_self([])
else:
@@ -1017,6 +902,7 @@ class BuildEnvironment(object):
def resolve_toctree(self, docname, builder, toctree, prune=True, maxdepth=0,
titles_only=False, collapse=False, includehidden=False):
+ # type: (unicode, Builder, addnodes.toctree, bool, int, bool, bool, bool) -> nodes.Node
"""Resolve a *toctree* node into individual bullet lists with titles
as items, returning None (if no containing titles are found) or
a new node.
@@ -1028,146 +914,50 @@ class BuildEnvironment(object):
If *collapse* is True, all branches not containing docname will
be collapsed.
"""
- return self.toctree.resolve_toctree(docname, builder, toctree, prune,
- maxdepth, titles_only, collapse,
- includehidden)
+ return TocTree(self).resolve(docname, builder, toctree, prune,
+ maxdepth, titles_only, collapse,
+ includehidden)
def resolve_references(self, doctree, fromdocname, builder):
- for node in doctree.traverse(addnodes.pending_xref):
- contnode = node[0].deepcopy()
- newnode = None
-
- typ = node['reftype']
- target = node['reftarget']
- refdoc = node.get('refdoc', fromdocname)
- domain = None
-
- try:
- if 'refdomain' in node and node['refdomain']:
- # let the domain try to resolve the reference
- try:
- domain = self.domains[node['refdomain']]
- except KeyError:
- raise NoUri
- newnode = domain.resolve_xref(self, refdoc, builder,
- typ, target, node, contnode)
- # really hardwired reference types
- elif typ == 'any':
- newnode = self._resolve_any_reference(builder, refdoc, node, contnode)
- elif typ == 'doc':
- newnode = self._resolve_doc_reference(builder, refdoc, node, contnode)
- # no new node found? try the missing-reference event
- if newnode is None:
- newnode = builder.app.emit_firstresult(
- 'missing-reference', self, node, contnode)
- # still not found? warn if node wishes to be warned about or
- # we are in nit-picky mode
- if newnode is None:
- self._warn_missing_reference(refdoc, typ, target, node, domain)
- except NoUri:
- newnode = contnode
- node.replace_self(newnode or contnode)
-
- # remove only-nodes that do not belong to our builder
- process_only_nodes(doctree, builder.tags, warn_node=self.warn_node)
+ # type: (nodes.Node, unicode, Builder) -> None
+ self.apply_post_transforms(doctree, fromdocname)
- # allow custom references to be resolved
- builder.app.emit('doctree-resolved', doctree, fromdocname)
-
- def _warn_missing_reference(self, refdoc, typ, target, node, domain):
- warn = node.get('refwarn')
- if self.config.nitpicky:
- warn = True
- if self._nitpick_ignore:
- dtype = domain and '%s:%s' % (domain.name, typ) or typ
- if (dtype, target) in self._nitpick_ignore:
- warn = False
- # for "std" types also try without domain name
- if (not domain or domain.name == 'std') and \
- (typ, target) in self._nitpick_ignore:
- warn = False
- if not warn:
- return
- if domain and typ in domain.dangling_warnings:
- msg = domain.dangling_warnings[typ]
- elif typ == 'doc':
- msg = 'unknown document: %(target)s'
- elif node.get('refdomain', 'std') not in ('', 'std'):
- msg = '%s:%s reference target not found: %%(target)s' % \
- (node['refdomain'], typ)
- else:
- msg = '%r reference target not found: %%(target)s' % typ
- self.warn_node(msg % {'target': target}, node, type='ref', subtype=typ)
+ def apply_post_transforms(self, doctree, docname):
+ # type: (nodes.Node, unicode) -> None
+ """Apply all post-transforms."""
+ try:
+ # set env.docname during applying post-transforms
+ self.temp_data['docname'] = docname
+ if hasattr(doctree, 'settings'):
+ doctree.settings.env = self
- def _resolve_doc_reference(self, builder, refdoc, node, contnode):
- # directly reference to document by source name;
- # can be absolute or relative
- docname = docname_join(refdoc, node['reftarget'])
- if docname in self.all_docs:
- if node['refexplicit']:
- # reference with explicit title
- caption = node.astext()
- else:
- caption = clean_astext(self.titles[docname])
- innernode = nodes.inline(caption, caption)
- innernode['classes'].append('doc')
- newnode = nodes.reference('', '', internal=True)
- newnode['refuri'] = builder.get_relative_uri(refdoc, docname)
- newnode.append(innernode)
- return newnode
-
- def _resolve_any_reference(self, builder, refdoc, node, contnode):
- """Resolve reference generated by the "any" role."""
- target = node['reftarget']
- results = []
- # first, try resolving as :doc:
- doc_ref = self._resolve_doc_reference(builder, refdoc, node, contnode)
- if doc_ref:
- results.append(('doc', doc_ref))
- # next, do the standard domain (makes this a priority)
- results.extend(self.domains['std'].resolve_any_xref(
- self, refdoc, builder, target, node, contnode))
- for domain in self.domains.values():
- if domain.name == 'std':
- continue # we did this one already
- try:
- results.extend(domain.resolve_any_xref(self, refdoc, builder,
- target, node, contnode))
- except NotImplementedError:
- # the domain doesn't yet support the new interface
- # we have to manually collect possible references (SLOW)
- for role in domain.roles:
- res = domain.resolve_xref(self, refdoc, builder, role, target,
- node, contnode)
- if res and isinstance(res[0], nodes.Element):
- results.append(('%s:%s' % (domain.name, role), res))
- # now, see how many matches we got...
- if not results:
- return None
- if len(results) > 1:
- nice_results = ' or '.join(':%s:' % r[0] for r in results)
- self.warn_node('more than one target found for \'any\' cross-'
- 'reference %r: could be %s' % (target, nice_results),
- node)
- res_role, newnode = results[0]
- # Override "any" class with the actual role type to get the styling
- # approximately correct.
- res_domain = res_role.split(':')[0]
- if newnode and newnode[0].get('classes'):
- newnode[0]['classes'].append(res_domain)
- newnode[0]['classes'].append(res_role.replace(':', '-'))
- return newnode
+ transformer = SphinxTransformer(doctree)
+ transformer.add_transforms(self.app.post_transforms)
+ transformer.apply_transforms()
+ finally:
+ self.temp_data.clear()
+
+ # allow custom references to be resolved
+ self.app.emit('doctree-resolved', doctree, docname)
def create_index(self, builder, group_entries=True,
_fixre=re.compile(r'(.*) ([(][^()]*[)])')):
- return self.indices.create_index(builder, group_entries=group_entries, _fixre=_fixre)
+ # type: (Builder, bool, Pattern) -> List[Tuple[unicode, List[Tuple[unicode, List[unicode]]]]] # NOQA
+ warnings.warn('env.create_index() is deprecated. '
+ 'Use sphinx.environment.adapters.indexentreis.IndexEntries instead.',
+ RemovedInSphinx20Warning)
+ return IndexEntries(self).create_index(builder,
+ group_entries=group_entries,
+ _fixre=_fixre)
def collect_relations(self):
+ # type: () -> Dict[unicode, List[unicode]]
traversed = set()
def traverse_toctree(parent, docname):
+ # type: (unicode, unicode) -> Iterator[Tuple[unicode, unicode]]
if parent == docname:
- self.warn(docname, 'self referenced toctree found. Ignored.')
+ logger.warning('self referenced toctree found. Ignored.', location=docname)
return
# traverse toctree by pre-order
@@ -1195,6 +985,7 @@ class BuildEnvironment(object):
return relations
def check_consistency(self):
+ # type: () -> None
"""Do consistency checks."""
for docname in sorted(self.all_docs):
if docname not in self.files_to_rebuild:
@@ -1206,4 +997,5 @@ class BuildEnvironment(object):
continue
if 'orphan' in self.metadata[docname]:
continue
- self.warn(docname, 'document isn\'t included in any toctree')
+ logger.warning('document isn\'t included in any toctree',
+ location=docname)
diff --git a/sphinx/environment/adapters/__init__.py b/sphinx/environment/adapters/__init__.py
new file mode 100644
index 000000000..12a6fa490
--- /dev/null
+++ b/sphinx/environment/adapters/__init__.py
@@ -0,0 +1,10 @@
+# -*- coding: utf-8 -*-
+"""
+ sphinx.environment.adapters
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Sphinx environment adapters
+
+ :copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
diff --git a/sphinx/environment/managers/indexentries.py b/sphinx/environment/adapters/indexentries.py
index aeea0e5c1..946e635ef 100644
--- a/sphinx/environment/managers/indexentries.py
+++ b/sphinx/environment/adapters/indexentries.py
@@ -1,9 +1,9 @@
# -*- coding: utf-8 -*-
"""
- sphinx.environment.managers.indexentries
+ sphinx.environment.adapters.indexentries
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- Index entries manager for sphinx.environment.
+ Index entries adapters for sphinx.environment.
:copyright: Copyright 2007-2017 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
@@ -13,53 +13,35 @@ import bisect
import unicodedata
from itertools import groupby
-from six import text_type
+from six import text_type, iteritems
-from sphinx import addnodes
-from sphinx.util import iteritems, split_index_msg, split_into
from sphinx.locale import _
-from sphinx.environment.managers import EnvironmentManager
+from sphinx.util import split_into, logging
+if False:
+ # For type annotation
+ from typing import Any, Dict, Pattern, List, Tuple # NOQA
+ from sphinx.builders import Builder # NOQA
+ from sphinx.environment import BuildEnvironment # NOQA
-class IndexEntries(EnvironmentManager):
- name = 'indices'
+logger = logging.getLogger(__name__)
- def __init__(self, env):
- super(IndexEntries, self).__init__(env)
- self.data = env.indexentries
-
- def clear_doc(self, docname):
- self.data.pop(docname, None)
-
- def merge_other(self, docnames, other):
- for docname in docnames:
- self.data[docname] = other.indexentries[docname]
- def process_doc(self, docname, doctree):
- entries = self.data[docname] = []
- for node in doctree.traverse(addnodes.index):
- try:
- for entry in node['entries']:
- split_index_msg(entry[0], entry[1])
- except ValueError as exc:
- self.env.warn_node(exc, node)
- node.parent.remove(node)
- else:
- for entry in node['entries']:
- if len(entry) == 5:
- # Since 1.4: new index structure including index_key (5th column)
- entries.append(entry)
- else:
- entries.append(entry + (None,))
+class IndexEntries(object):
+ def __init__(self, env):
+ # type: (BuildEnvironment) -> None
+ self.env = env
def create_index(self, builder, group_entries=True,
_fixre=re.compile(r'(.*) ([(][^()]*[)])')):
+ # type: (Builder, bool, Pattern) -> List[Tuple[unicode, List[Tuple[unicode, Any]]]] # NOQA
"""Create the real index from the collected index entries."""
from sphinx.environment import NoUri
- new = {}
+ new = {} # type: Dict[unicode, List]
def add_entry(word, subword, main, link=True, dic=new, key=None):
+ # type: (unicode, unicode, unicode, bool, Dict, unicode) -> None
# Force the word to be unicode if it's a ASCII bytestring.
# This will solve problems with unicode normalization later.
# For instance the RFC role will add bytestrings at the moment
@@ -78,7 +60,7 @@ class IndexEntries(EnvironmentManager):
# maintain links in sorted/deterministic order
bisect.insort(entry[0], (main, uri))
- for fn, entries in iteritems(self.data):
+ for fn, entries in iteritems(self.env.indexentries):
# new entry types must be listed in directives/other.py!
for type, value, tid, main, index_key in entries:
try:
@@ -107,13 +89,14 @@ class IndexEntries(EnvironmentManager):
add_entry(first, _('see also %s') % second, None,
link=False, key=index_key)
else:
- self.env.warn(fn, 'unknown index entry type %r' % type)
+ logger.warning('unknown index entry type %r', type, location=fn)
except ValueError as err:
- self.env.warn(fn, str(err))
+ logger.warning(str(err), location=fn)
# sort the index entries; put all symbols at the front, even those
# following the letters in ASCII, this is where the chr(127) comes from
def keyfunc(entry):
+ # type: (Tuple[unicode, List]) -> Tuple[unicode, unicode]
key, (void, void, category_key) = entry
if category_key:
# using specified category key to sort
@@ -136,8 +119,8 @@ class IndexEntries(EnvironmentManager):
# func()
# (in module foo)
# (in module bar)
- oldkey = ''
- oldsubitems = None
+ oldkey = '' # type: unicode
+ oldsubitems = None # type: Dict[unicode, List]
i = 0
while i < len(newlist):
key, (targets, subitems, _key) = newlist[i]
@@ -160,6 +143,7 @@ class IndexEntries(EnvironmentManager):
# group the entries by letter
def keyfunc2(item):
+ # type: (Tuple[unicode, List]) -> unicode
# hack: mutating the subitems dicts to a list in the keyfunc
k, v = item
v[1] = sorted((si, se) for (si, (se, void, void)) in iteritems(v[1]))
diff --git a/sphinx/environment/adapters/toctree.py b/sphinx/environment/adapters/toctree.py
new file mode 100644
index 000000000..ab36318dd
--- /dev/null
+++ b/sphinx/environment/adapters/toctree.py
@@ -0,0 +1,336 @@
+# -*- coding: utf-8 -*-
+"""
+ sphinx.environment.adapters.toctree
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Toctree adapter for sphinx.environment.
+
+ :copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from six import iteritems
+
+from docutils import nodes
+
+from sphinx import addnodes
+from sphinx.util import url_re, logging
+from sphinx.util.nodes import clean_astext
+
+if False:
+ # For type annotation
+ from typing import Any, Dict, List # NOQA
+ from sphinx.builders import Builder # NOQA
+ from sphinx.environment import BuildEnvironment # NOQA
+
+logger = logging.getLogger(__name__)
+
+
+class TocTree(object):
+ def __init__(self, env):
+ # type: (BuildEnvironment) -> None
+ self.env = env
+
+ def note(self, docname, toctreenode):
+ # type: (unicode, addnodes.toctree) -> None
+ """Note a TOC tree directive in a document and gather information about
+ file relations from it.
+ """
+ if toctreenode['glob']:
+ self.env.glob_toctrees.add(docname)
+ if toctreenode.get('numbered'):
+ self.env.numbered_toctrees.add(docname)
+ includefiles = toctreenode['includefiles']
+ for includefile in includefiles:
+ # note that if the included file is rebuilt, this one must be
+ # too (since the TOC of the included file could have changed)
+ self.env.files_to_rebuild.setdefault(includefile, set()).add(docname)
+ self.env.toctree_includes.setdefault(docname, []).extend(includefiles)
+
+ def resolve(self, docname, builder, toctree, prune=True, maxdepth=0,
+ titles_only=False, collapse=False, includehidden=False):
+ # type: (unicode, Builder, addnodes.toctree, bool, int, bool, bool, bool) -> nodes.Node
+ """Resolve a *toctree* node into individual bullet lists with titles
+ as items, returning None (if no containing titles are found) or
+ a new node.
+
+ If *prune* is True, the tree is pruned to *maxdepth*, or if that is 0,
+ to the value of the *maxdepth* option on the *toctree* node.
+ If *titles_only* is True, only toplevel document titles will be in the
+ resulting tree.
+ If *collapse* is True, all branches not containing docname will
+ be collapsed.
+ """
+ if toctree.get('hidden', False) and not includehidden:
+ return None
+
+ # For reading the following two helper function, it is useful to keep
+ # in mind the node structure of a toctree (using HTML-like node names
+ # for brevity):
+ #
+ # <ul>
+ # <li>
+ # <p><a></p>
+ # <p><a></p>
+ # ...
+ # <ul>
+ # ...
+ # </ul>
+ # </li>
+ # </ul>
+ #
+ # The transformation is made in two passes in order to avoid
+ # interactions between marking and pruning the tree (see bug #1046).
+
+ toctree_ancestors = self.get_toctree_ancestors(docname)
+
+ def _toctree_add_classes(node, depth):
+ # type: (nodes.Node, int) -> None
+ """Add 'toctree-l%d' and 'current' classes to the toctree."""
+ for subnode in node.children:
+ if isinstance(subnode, (addnodes.compact_paragraph,
+ nodes.list_item)):
+ # for <p> and <li>, indicate the depth level and recurse
+ subnode['classes'].append('toctree-l%d' % (depth - 1))
+ _toctree_add_classes(subnode, depth)
+ elif isinstance(subnode, nodes.bullet_list):
+ # for <ul>, just recurse
+ _toctree_add_classes(subnode, depth + 1)
+ elif isinstance(subnode, nodes.reference):
+ # for <a>, identify which entries point to the current
+ # document and therefore may not be collapsed
+ if subnode['refuri'] == docname:
+ if not subnode['anchorname']:
+ # give the whole branch a 'current' class
+ # (useful for styling it differently)
+ branchnode = subnode
+ while branchnode:
+ branchnode['classes'].append('current')
+ branchnode = branchnode.parent
+ # mark the list_item as "on current page"
+ if subnode.parent.parent.get('iscurrent'):
+ # but only if it's not already done
+ return
+ while subnode:
+ subnode['iscurrent'] = True
+ subnode = subnode.parent
+
+ def _entries_from_toctree(toctreenode, parents, separate=False, subtree=False):
+ # type: (addnodes.toctree, List[nodes.Node], bool, bool) -> List[nodes.Node]
+ """Return TOC entries for a toctree node."""
+ refs = [(e[0], e[1]) for e in toctreenode['entries']]
+ entries = []
+ for (title, ref) in refs:
+ try:
+ refdoc = None
+ if url_re.match(ref):
+ if title is None:
+ title = ref
+ reference = nodes.reference('', '', internal=False,
+ refuri=ref, anchorname='',
+ *[nodes.Text(title)])
+ para = addnodes.compact_paragraph('', '', reference)
+ item = nodes.list_item('', para)
+ toc = nodes.bullet_list('', item)
+ elif ref == 'self':
+ # 'self' refers to the document from which this
+ # toctree originates
+ ref = toctreenode['parent']
+ if not title:
+ title = clean_astext(self.env.titles[ref])
+ reference = nodes.reference('', '', internal=True,
+ refuri=ref,
+ anchorname='',
+ *[nodes.Text(title)])
+ para = addnodes.compact_paragraph('', '', reference)
+ item = nodes.list_item('', para)
+ # don't show subitems
+ toc = nodes.bullet_list('', item)
+ else:
+ if ref in parents:
+ logger.warning('circular toctree references '
+ 'detected, ignoring: %s <- %s',
+ ref, ' <- '.join(parents),
+ location=ref)
+ continue
+ refdoc = ref
+ toc = self.env.tocs[ref].deepcopy()
+ maxdepth = self.env.metadata[ref].get('tocdepth', 0)
+ if ref not in toctree_ancestors or (prune and maxdepth > 0):
+ self._toctree_prune(toc, 2, maxdepth, collapse)
+ self.process_only_nodes(toc)
+ if title and toc.children and len(toc.children) == 1:
+ child = toc.children[0]
+ for refnode in child.traverse(nodes.reference):
+ if refnode['refuri'] == ref and \
+ not refnode['anchorname']:
+ refnode.children = [nodes.Text(title)]
+ if not toc.children:
+ # empty toc means: no titles will show up in the toctree
+ logger.warning('toctree contains reference to document %r that '
+ 'doesn\'t have a title: no link will be generated',
+ ref, location=toctreenode)
+ except KeyError:
+ # this is raised if the included file does not exist
+ logger.warning('toctree contains reference to nonexisting document %r',
+ ref, location=toctreenode)
+ else:
+ # if titles_only is given, only keep the main title and
+ # sub-toctrees
+ if titles_only:
+ # delete everything but the toplevel title(s)
+ # and toctrees
+ for toplevel in toc:
+ # nodes with length 1 don't have any children anyway
+ if len(toplevel) > 1:
+ subtrees = toplevel.traverse(addnodes.toctree)
+ if subtrees:
+ toplevel[1][:] = subtrees
+ else:
+ toplevel.pop(1)
+ # resolve all sub-toctrees
+ for subtocnode in toc.traverse(addnodes.toctree):
+ if not (subtocnode.get('hidden', False) and
+ not includehidden):
+ i = subtocnode.parent.index(subtocnode) + 1
+ for item in _entries_from_toctree(
+ subtocnode, [refdoc] + parents,
+ subtree=True):
+ subtocnode.parent.insert(i, item)
+ i += 1
+ subtocnode.parent.remove(subtocnode)
+ if separate:
+ entries.append(toc)
+ else:
+ entries.extend(toc.children)
+ if not subtree and not separate:
+ ret = nodes.bullet_list()
+ ret += entries
+ return [ret]
+ return entries
+
+ maxdepth = maxdepth or toctree.get('maxdepth', -1)
+ if not titles_only and toctree.get('titlesonly', False):
+ titles_only = True
+ if not includehidden and toctree.get('includehidden', False):
+ includehidden = True
+
+ # NOTE: previously, this was separate=True, but that leads to artificial
+ # separation when two or more toctree entries form a logical unit, so
+ # separating mode is no longer used -- it's kept here for history's sake
+ tocentries = _entries_from_toctree(toctree, [], separate=False)
+ if not tocentries:
+ return None
+
+ newnode = addnodes.compact_paragraph('', '')
+ caption = toctree.attributes.get('caption')
+ if caption:
+ caption_node = nodes.caption(caption, '', *[nodes.Text(caption)])
+ caption_node.line = toctree.line
+ caption_node.source = toctree.source
+ caption_node.rawsource = toctree['rawcaption']
+ if hasattr(toctree, 'uid'):
+ # move uid to caption_node to translate it
+ caption_node.uid = toctree.uid
+ del toctree.uid
+ newnode += caption_node
+ newnode.extend(tocentries)
+ newnode['toctree'] = True
+
+ # prune the tree to maxdepth, also set toc depth and current classes
+ _toctree_add_classes(newnode, 1)
+ self._toctree_prune(newnode, 1, prune and maxdepth or 0, collapse)
+
+ if len(newnode[-1]) == 0: # No titles found
+ return None
+
+ # set the target paths in the toctrees (they are not known at TOC
+ # generation time)
+ for refnode in newnode.traverse(nodes.reference):
+ if not url_re.match(refnode['refuri']):
+ refnode['refuri'] = builder.get_relative_uri(
+ docname, refnode['refuri']) + refnode['anchorname']
+ return newnode
+
+ def get_toctree_ancestors(self, docname):
+ # type: (unicode) -> List[unicode]
+ parent = {}
+ for p, children in iteritems(self.env.toctree_includes):
+ for child in children:
+ parent[child] = p
+ ancestors = [] # type: List[unicode]
+ d = docname
+ while d in parent and d not in ancestors:
+ ancestors.append(d)
+ d = parent[d]
+ return ancestors
+
+ def _toctree_prune(self, node, depth, maxdepth, collapse=False):
+ # type: (nodes.Node, int, int, bool) -> None
+ """Utility: Cut a TOC at a specified depth."""
+ for subnode in node.children[:]:
+ if isinstance(subnode, (addnodes.compact_paragraph,
+ nodes.list_item)):
+ # for <p> and <li>, just recurse
+ self._toctree_prune(subnode, depth, maxdepth, collapse)
+ elif isinstance(subnode, nodes.bullet_list):
+ # for <ul>, determine if the depth is too large or if the
+ # entry is to be collapsed
+ if maxdepth > 0 and depth > maxdepth:
+ subnode.parent.replace(subnode, [])
+ else:
+ # cull sub-entries whose parents aren't 'current'
+ if (collapse and depth > 1 and
+ 'iscurrent' not in subnode.parent):
+ subnode.parent.remove(subnode)
+ else:
+ # recurse on visible children
+ self._toctree_prune(subnode, depth + 1, maxdepth, collapse)
+
+ def get_toc_for(self, docname, builder):
+ # type: (unicode, Builder) -> Dict[unicode, nodes.Node]
+ """Return a TOC nodetree -- for use on the same page only!"""
+ tocdepth = self.env.metadata[docname].get('tocdepth', 0)
+ try:
+ toc = self.env.tocs[docname].deepcopy()
+ self._toctree_prune(toc, 2, tocdepth)
+ except KeyError:
+ # the document does not exist anymore: return a dummy node that
+ # renders to nothing
+ return nodes.paragraph()
+ self.process_only_nodes(toc)
+ for node in toc.traverse(nodes.reference):
+ node['refuri'] = node['anchorname'] or '#'
+ return toc
+
+ def get_toctree_for(self, docname, builder, collapse, **kwds):
+ # type: (unicode, Builder, bool, Any) -> nodes.Node
+ """Return the global TOC nodetree."""
+ doctree = self.env.get_doctree(self.env.config.master_doc)
+ toctrees = []
+ if 'includehidden' not in kwds:
+ kwds['includehidden'] = True
+ if 'maxdepth' not in kwds:
+ kwds['maxdepth'] = 0
+ kwds['collapse'] = collapse
+ for toctreenode in doctree.traverse(addnodes.toctree):
+ toctree = self.resolve(docname, builder, toctreenode, prune=True, **kwds)
+ if toctree:
+ toctrees.append(toctree)
+ if not toctrees:
+ return None
+ result = toctrees[0]
+ for toctree in toctrees[1:]:
+ result.extend(toctree.children)
+ return result
+
+ def process_only_nodes(self, doctree):
+ # type: (nodes.Node) -> None
+ # Lazy loading
+ from sphinx.transforms import SphinxTransformer
+ from sphinx.transforms.post_transforms import OnlyNodeTransform
+
+ transformer = SphinxTransformer(doctree)
+ transformer.set_environment(self.env)
+ transformer.add_transform(OnlyNodeTransform)
+ transformer.apply_transforms()
diff --git a/sphinx/environment/collectors/__init__.py b/sphinx/environment/collectors/__init__.py
new file mode 100644
index 000000000..45add6c4e
--- /dev/null
+++ b/sphinx/environment/collectors/__init__.py
@@ -0,0 +1,85 @@
+# -*- coding: utf-8 -*-
+"""
+ sphinx.environment.collectors
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ The data collector components for sphinx.environment.
+
+ :copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from six import itervalues
+
+if False:
+ # For type annotation
+ from typing import Dict, List, Set # NOQA
+ from docutils import nodes # NOQA
+ from sphinx.sphinx import Sphinx # NOQA
+ from sphinx.environment import BuildEnvironment # NOQA
+
+
+class EnvironmentCollector(object):
+ """An EnvironmentCollector is a specific data collector from each document.
+
+ It gathers data and stores :py:class:`BuildEnvironment
+ <sphinx.environment.BuildEnvironment>` as a database. Examples of specific
+ data would be images, download files, section titles, metadatas, index
+ entries and toctrees, etc.
+ """
+
+ listener_ids = None # type: Dict[unicode, int]
+
+ def enable(self, app):
+ # type: (Sphinx) -> None
+ assert self.listener_ids is None
+ self.listener_ids = {
+ 'doctree-read': app.connect('doctree-read', self.process_doc),
+ 'env-merge-info': app.connect('env-merge-info', self.merge_other),
+ 'env-purge-doc': app.connect('env-purge-doc', self.clear_doc),
+ 'env-get-updated': app.connect('env-get-updated', self.get_updated_docs),
+ 'env-get-outdated': app.connect('env-get-outdated', self.get_outdated_docs),
+ }
+
+ def disable(self, app):
+ # type: (Sphinx) -> None
+ assert self.listener_ids is not None
+ for listener_id in itervalues(self.listener_ids):
+ app.disconnect(listener_id)
+ self.listener_ids = None
+
+ def clear_doc(self, app, env, docname):
+ # type: (Sphinx, BuildEnvironment, unicode) -> None
+ """Remove specified data of a document.
+
+ This method is called on the removal of the document."""
+ raise NotImplementedError
+
+ def merge_other(self, app, env, docnames, other):
+ # type: (Sphinx, BuildEnvironment, Set[unicode], BuildEnvironment) -> None
+ """Merge in specified data regarding docnames from a different `BuildEnvironment`
+ object which coming from a subprocess in parallel builds."""
+ raise NotImplementedError
+
+ def process_doc(self, app, doctree):
+ # type: (Sphinx, nodes.Node) -> None
+ """Process a document and gather specific data from it.
+
+ This method is called after the document is read."""
+ raise NotImplementedError
+
+ def get_updated_docs(self, app, env):
+ # type: (Sphinx, BuildEnvironment) -> List[unicode]
+ """Return a list of docnames to re-read.
+
+ This methods is called after reading the whole of documents (experimental).
+ """
+ return []
+
+ def get_outdated_docs(self, app, env, added, changed, removed):
+ # type: (Sphinx, BuildEnvironment, unicode, Set[unicode], Set[unicode], Set[unicode]) -> List[unicode] # NOQA
+ """Return a list of docnames to re-read.
+
+ This methods is called before reading the documents.
+ """
+ return []
diff --git a/sphinx/environment/collectors/asset.py b/sphinx/environment/collectors/asset.py
new file mode 100644
index 000000000..86a1a3414
--- /dev/null
+++ b/sphinx/environment/collectors/asset.py
@@ -0,0 +1,154 @@
+# -*- coding: utf-8 -*-
+"""
+ sphinx.environment.collectors.asset
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ The image collector for sphinx.environment.
+
+ :copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import os
+from os import path
+from glob import glob
+
+from six import iteritems, itervalues
+
+from docutils import nodes
+from docutils.utils import relative_path
+
+from sphinx import addnodes
+from sphinx.environment.collectors import EnvironmentCollector
+from sphinx.util import logging
+from sphinx.util.i18n import get_image_filename_for_language, search_image_for_language
+from sphinx.util.images import guess_mimetype
+
+if False:
+ # For type annotation
+ from typing import Dict, List, Set, Tuple # NOQA
+ from docutils import nodes # NOQA
+ from sphinx.sphinx import Sphinx # NOQA
+ from sphinx.environment import BuildEnvironment # NOQA
+
+logger = logging.getLogger(__name__)
+
+
+class ImageCollector(EnvironmentCollector):
+ """Image files collector for sphinx.environment."""
+
+ def clear_doc(self, app, env, docname):
+ # type: (Sphinx, BuildEnvironment, unicode) -> None
+ env.images.purge_doc(docname)
+
+ def merge_other(self, app, env, docnames, other):
+ # type: (Sphinx, BuildEnvironment, Set[unicode], BuildEnvironment) -> None
+ env.images.merge_other(docnames, other.images)
+
+ def process_doc(self, app, doctree):
+ # type: (Sphinx, nodes.Node) -> None
+ """Process and rewrite image URIs."""
+ docname = app.env.docname
+
+ for node in doctree.traverse(nodes.image):
+ # Map the mimetype to the corresponding image. The writer may
+ # choose the best image from these candidates. The special key * is
+ # set if there is only single candidate to be used by a writer.
+ # The special key ? is set for nonlocal URIs.
+ candidates = {} # type: Dict[unicode, unicode]
+ node['candidates'] = candidates
+ imguri = node['uri']
+ if imguri.startswith('data:'):
+ logger.warning('image data URI found. some builders might not support',
+ location=node, type='image', subtype='data_uri')
+ candidates['?'] = imguri
+ continue
+ elif imguri.find('://') != -1:
+ logger.warning('nonlocal image URI found: %s' % imguri,
+ location=node,
+ type='image', subtype='nonlocal_uri')
+ candidates['?'] = imguri
+ continue
+ rel_imgpath, full_imgpath = app.env.relfn2path(imguri, docname)
+ if app.config.language:
+ # substitute figures (ex. foo.png -> foo.en.png)
+ i18n_full_imgpath = search_image_for_language(full_imgpath, app.env)
+ if i18n_full_imgpath != full_imgpath:
+ full_imgpath = i18n_full_imgpath
+ rel_imgpath = relative_path(path.join(app.srcdir, 'dummy'),
+ i18n_full_imgpath)
+ # set imgpath as default URI
+ node['uri'] = rel_imgpath
+ if rel_imgpath.endswith(os.extsep + '*'):
+ if app.config.language:
+ # Search language-specific figures at first
+ i18n_imguri = get_image_filename_for_language(imguri, app.env)
+ _, full_i18n_imgpath = app.env.relfn2path(i18n_imguri, docname)
+ self.collect_candidates(app.env, full_i18n_imgpath, candidates, node)
+
+ self.collect_candidates(app.env, full_imgpath, candidates, node)
+ else:
+ candidates['*'] = rel_imgpath
+
+ # map image paths to unique image names (so that they can be put
+ # into a single directory)
+ for imgpath in itervalues(candidates):
+ app.env.dependencies[docname].add(imgpath)
+ if not os.access(path.join(app.srcdir, imgpath), os.R_OK):
+ logger.warning('image file not readable: %s' % imgpath,
+ location=node, type='image', subtype='not_readable')
+ continue
+ app.env.images.add_file(docname, imgpath)
+
+ def collect_candidates(self, env, imgpath, candidates, node):
+ # type: (BuildEnvironment, unicode, Dict[unicode, unicode], nodes.Node) -> None
+ globbed = {} # type: Dict[unicode, List[unicode]]
+ for filename in glob(imgpath):
+ new_imgpath = relative_path(path.join(env.srcdir, 'dummy'),
+ filename)
+ try:
+ mimetype = guess_mimetype(filename)
+ if mimetype not in candidates:
+ globbed.setdefault(mimetype, []).append(new_imgpath)
+ except (OSError, IOError) as err:
+ logger.warning('image file %s not readable: %s' % (filename, err),
+ location=node, type='image', subtype='not_readable')
+ for key, files in iteritems(globbed):
+ candidates[key] = sorted(files, key=len)[0] # select by similarity
+
+
+class DownloadFileCollector(EnvironmentCollector):
+ """Download files collector for sphinx.environment."""
+
+ def clear_doc(self, app, env, docname):
+ # type: (Sphinx, BuildEnvironment, unicode) -> None
+ env.dlfiles.purge_doc(docname)
+
+ def merge_other(self, app, env, docnames, other):
+ # type: (Sphinx, BuildEnvironment, Set[unicode], BuildEnvironment) -> None
+ env.dlfiles.merge_other(docnames, other.dlfiles)
+
+ def process_doc(self, app, doctree):
+ # type: (Sphinx, nodes.Node) -> None
+ """Process downloadable file paths. """
+ for node in doctree.traverse(addnodes.download_reference):
+ targetname = node['reftarget']
+ rel_filename, filename = app.env.relfn2path(targetname, app.env.docname)
+ app.env.dependencies[app.env.docname].add(rel_filename)
+ if not os.access(filename, os.R_OK):
+ logger.warning('download file not readable: %s' % filename,
+ location=node, type='download', subtype='not_readable')
+ continue
+ node['filename'] = app.env.dlfiles.add_file(app.env.docname, filename)
+
+
+def setup(app):
+ # type: (Sphinx) -> Dict
+ app.add_env_collector(ImageCollector)
+ app.add_env_collector(DownloadFileCollector)
+
+ return {
+ 'version': 'builtin',
+ 'parallel_read_safe': True,
+ 'parallel_write_safe': True,
+ }
diff --git a/sphinx/environment/collectors/dependencies.py b/sphinx/environment/collectors/dependencies.py
new file mode 100644
index 000000000..fe6ecccb1
--- /dev/null
+++ b/sphinx/environment/collectors/dependencies.py
@@ -0,0 +1,66 @@
+# -*- coding: utf-8 -*-
+"""
+ sphinx.environment.collectors.dependencies
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ The dependencies collector components for sphinx.environment.
+
+ :copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from os import path
+
+from docutils.utils import relative_path
+
+from sphinx.util.osutil import getcwd, fs_encoding
+from sphinx.environment.collectors import EnvironmentCollector
+
+if False:
+ # For type annotation
+ from typing import Dict, Set # NOQA
+ from docutils import nodes # NOQA
+ from sphinx.sphinx import Sphinx # NOQA
+ from sphinx.environment import BuildEnvironment # NOQA
+
+
+class DependenciesCollector(EnvironmentCollector):
+ """dependencies collector for sphinx.environment."""
+
+ def clear_doc(self, app, env, docname):
+ # type: (Sphinx, BuildEnvironment, unicode) -> None
+ env.dependencies.pop(docname, None)
+
+ def merge_other(self, app, env, docnames, other):
+ # type: (Sphinx, BuildEnvironment, Set[unicode], BuildEnvironment) -> None
+ for docname in docnames:
+ if docname in other.dependencies:
+ env.dependencies[docname] = other.dependencies[docname]
+
+ def process_doc(self, app, doctree):
+ # type: (Sphinx, nodes.Node) -> None
+ """Process docutils-generated dependency info."""
+ cwd = getcwd()
+ frompath = path.join(path.normpath(app.srcdir), 'dummy')
+ deps = doctree.settings.record_dependencies
+ if not deps:
+ return
+ for dep in deps.list:
+ # the dependency path is relative to the working dir, so get
+ # one relative to the srcdir
+ if isinstance(dep, bytes):
+ dep = dep.decode(fs_encoding)
+ relpath = relative_path(frompath,
+ path.normpath(path.join(cwd, dep)))
+ app.env.dependencies[app.env.docname].add(relpath)
+
+
+def setup(app):
+ # type: (Sphinx) -> Dict
+ app.add_env_collector(DependenciesCollector)
+
+ return {
+ 'version': 'builtin',
+ 'parallel_read_safe': True,
+ 'parallel_write_safe': True,
+ }
diff --git a/sphinx/environment/collectors/indexentries.py b/sphinx/environment/collectors/indexentries.py
new file mode 100644
index 000000000..44aee204c
--- /dev/null
+++ b/sphinx/environment/collectors/indexentries.py
@@ -0,0 +1,66 @@
+# -*- coding: utf-8 -*-
+"""
+ sphinx.environment.collectors.indexentries
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Index entries collector for sphinx.environment.
+
+ :copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from sphinx import addnodes
+from sphinx.util import split_index_msg, logging
+from sphinx.environment.collectors import EnvironmentCollector
+
+if False:
+ # For type annotation
+ from typing import Dict, Set # NOQA
+ from docutils import nodes # NOQA
+ from sphinx.applicatin import Sphinx # NOQA
+ from sphinx.environment import BuildEnvironment # NOQA
+
+logger = logging.getLogger(__name__)
+
+
+class IndexEntriesCollector(EnvironmentCollector):
+ name = 'indices'
+
+ def clear_doc(self, app, env, docname):
+ # type: (Sphinx, BuildEnvironment, unicode) -> None
+ env.indexentries.pop(docname, None)
+
+ def merge_other(self, app, env, docnames, other):
+ # type: (Sphinx, BuildEnvironment, Set[unicode], BuildEnvironment) -> None
+ for docname in docnames:
+ env.indexentries[docname] = other.indexentries[docname]
+
+ def process_doc(self, app, doctree):
+ # type: (Sphinx, nodes.Node) -> None
+ docname = app.env.docname
+ entries = app.env.indexentries[docname] = []
+ for node in doctree.traverse(addnodes.index):
+ try:
+ for entry in node['entries']:
+ split_index_msg(entry[0], entry[1])
+ except ValueError as exc:
+ logger.warning(str(exc), location=node)
+ node.parent.remove(node)
+ else:
+ for entry in node['entries']:
+ if len(entry) == 5:
+ # Since 1.4: new index structure including index_key (5th column)
+ entries.append(entry)
+ else:
+ entries.append(entry + (None,))
+
+
+def setup(app):
+ # type: (Sphinx) -> Dict
+ app.add_env_collector(IndexEntriesCollector)
+
+ return {
+ 'version': 'builtin',
+ 'parallel_read_safe': True,
+ 'parallel_write_safe': True,
+ }
diff --git a/sphinx/environment/collectors/metadata.py b/sphinx/environment/collectors/metadata.py
new file mode 100644
index 000000000..305a086de
--- /dev/null
+++ b/sphinx/environment/collectors/metadata.py
@@ -0,0 +1,79 @@
+# -*- coding: utf-8 -*-
+"""
+ sphinx.environment.collectors.metadata
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ The metadata collector components for sphinx.environment.
+
+ :copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from docutils import nodes
+
+from sphinx.environment.collectors import EnvironmentCollector
+
+if False:
+ # For type annotation
+ from typing import Dict, Set # NOQA
+ from docutils import nodes # NOQA
+ from sphinx.sphinx import Sphinx # NOQA
+ from sphinx.environment import BuildEnvironment # NOQA
+
+
+class MetadataCollector(EnvironmentCollector):
+ """metadata collector for sphinx.environment."""
+
+ def clear_doc(self, app, env, docname):
+ # type: (Sphinx, BuildEnvironment, unicode) -> None
+ env.metadata.pop(docname, None)
+
+ def merge_other(self, app, env, docnames, other):
+ # type: (Sphinx, BuildEnvironment, Set[unicode], BuildEnvironment) -> None
+ for docname in docnames:
+ env.metadata[docname] = other.metadata[docname]
+
+ def process_doc(self, app, doctree):
+ # type: (Sphinx, nodes.Node) -> None
+ """Process the docinfo part of the doctree as metadata.
+
+ Keep processing minimal -- just return what docutils says.
+ """
+ md = app.env.metadata[app.env.docname]
+ try:
+ docinfo = doctree[0]
+ except IndexError:
+ # probably an empty document
+ return
+ if docinfo.__class__ is not nodes.docinfo:
+ # nothing to see here
+ return
+ for node in docinfo:
+ # nodes are multiply inherited...
+ if isinstance(node, nodes.authors):
+ md['authors'] = [author.astext() for author in node]
+ elif isinstance(node, nodes.TextElement): # e.g. author
+ md[node.__class__.__name__] = node.astext()
+ else:
+ name, body = node
+ md[name.astext()] = body.astext()
+ for name, value in md.items():
+ if name in ('tocdepth',):
+ try:
+ value = int(value)
+ except ValueError:
+ value = 0
+ md[name] = value
+
+ del doctree[0]
+
+
+def setup(app):
+ # type: (Sphinx) -> Dict
+ app.add_env_collector(MetadataCollector)
+
+ return {
+ 'version': 'builtin',
+ 'parallel_read_safe': True,
+ 'parallel_write_safe': True,
+ }
diff --git a/sphinx/environment/collectors/title.py b/sphinx/environment/collectors/title.py
new file mode 100644
index 000000000..693a1cf32
--- /dev/null
+++ b/sphinx/environment/collectors/title.py
@@ -0,0 +1,72 @@
+# -*- coding: utf-8 -*-
+"""
+ sphinx.environment.collectors.title
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ The title collector components for sphinx.environment.
+
+ :copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from docutils import nodes
+
+from sphinx.environment.collectors import EnvironmentCollector
+from sphinx.transforms import SphinxContentsFilter
+
+if False:
+ # For type annotation
+ from typing import Dict, Set # NOQA
+ from docutils import nodes # NOQA
+ from sphinx.sphinx import Sphinx # NOQA
+ from sphinx.environment import BuildEnvironment # NOQA
+
+
+class TitleCollector(EnvironmentCollector):
+ """title collector for sphinx.environment."""
+
+ def clear_doc(self, app, env, docname):
+ # type: (Sphinx, BuildEnvironment, unicode) -> None
+ env.titles.pop(docname, None)
+ env.longtitles.pop(docname, None)
+
+ def merge_other(self, app, env, docnames, other):
+ # type: (Sphinx, BuildEnvironment, Set[unicode], BuildEnvironment) -> None
+ for docname in docnames:
+ env.titles[docname] = other.titles[docname]
+ env.longtitles[docname] = other.longtitles[docname]
+
+ def process_doc(self, app, doctree):
+ # type: (Sphinx, nodes.Node) -> None
+ """Add a title node to the document (just copy the first section title),
+ and store that title in the environment.
+ """
+ titlenode = nodes.title()
+ longtitlenode = titlenode
+ # explicit title set with title directive; use this only for
+ # the <title> tag in HTML output
+ if 'title' in doctree:
+ longtitlenode = nodes.title()
+ longtitlenode += nodes.Text(doctree['title'])
+ # look for first section title and use that as the title
+ for node in doctree.traverse(nodes.section):
+ visitor = SphinxContentsFilter(doctree)
+ node[0].walkabout(visitor)
+ titlenode += visitor.get_entry_text()
+ break
+ else:
+ # document has no title
+ titlenode += nodes.Text('<no title>')
+ app.env.titles[app.env.docname] = titlenode
+ app.env.longtitles[app.env.docname] = longtitlenode
+
+
+def setup(app):
+ # type: (Sphinx) -> Dict
+ app.add_env_collector(TitleCollector)
+
+ return {
+ 'version': 'builtin',
+ 'parallel_read_safe': True,
+ 'parallel_write_safe': True,
+ }
diff --git a/sphinx/environment/collectors/toctree.py b/sphinx/environment/collectors/toctree.py
new file mode 100644
index 000000000..a95e30895
--- /dev/null
+++ b/sphinx/environment/collectors/toctree.py
@@ -0,0 +1,294 @@
+# -*- coding: utf-8 -*-
+"""
+ sphinx.environment.collectors.toctree
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Toctree collector for sphinx.environment.
+
+ :copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from six import iteritems
+
+from docutils import nodes
+
+from sphinx import addnodes
+from sphinx.util import url_re, logging
+from sphinx.transforms import SphinxContentsFilter
+from sphinx.environment.adapters.toctree import TocTree
+from sphinx.environment.collectors import EnvironmentCollector
+
+if False:
+ # For type annotation
+ from typing import Any, Dict, List, Set, Tuple # NOQA
+ from sphinx.application import Sphinx # NOQA
+ from sphinx.builders import Builder # NOQA
+ from sphinx.environment import BuildEnvironment # NOQA
+
+logger = logging.getLogger(__name__)
+
+
+class TocTreeCollector(EnvironmentCollector):
+ def clear_doc(self, app, env, docname):
+ # type: (Sphinx, BuildEnvironment, unicode) -> None
+ env.tocs.pop(docname, None)
+ env.toc_secnumbers.pop(docname, None)
+ env.toc_fignumbers.pop(docname, None)
+ env.toc_num_entries.pop(docname, None)
+ env.toctree_includes.pop(docname, None)
+ env.glob_toctrees.discard(docname)
+ env.numbered_toctrees.discard(docname)
+
+ for subfn, fnset in list(env.files_to_rebuild.items()):
+ fnset.discard(docname)
+ if not fnset:
+ del env.files_to_rebuild[subfn]
+
+ def merge_other(self, app, env, docnames, other):
+ # type: (Sphinx, BuildEnvironment, Set[unicode], BuildEnvironment) -> None
+ for docname in docnames:
+ env.tocs[docname] = other.tocs[docname]
+ env.toc_num_entries[docname] = other.toc_num_entries[docname]
+ if docname in other.toctree_includes:
+ env.toctree_includes[docname] = other.toctree_includes[docname]
+ if docname in other.glob_toctrees:
+ env.glob_toctrees.add(docname)
+ if docname in other.numbered_toctrees:
+ env.numbered_toctrees.add(docname)
+
+ for subfn, fnset in other.files_to_rebuild.items():
+ env.files_to_rebuild.setdefault(subfn, set()).update(fnset & set(docnames))
+
+ def process_doc(self, app, doctree):
+ # type: (Sphinx, nodes.Node) -> None
+ """Build a TOC from the doctree and store it in the inventory."""
+ docname = app.env.docname
+ numentries = [0] # nonlocal again...
+
+ def traverse_in_section(node, cls):
+ """Like traverse(), but stay within the same section."""
+ result = []
+ if isinstance(node, cls):
+ result.append(node)
+ for child in node.children:
+ if isinstance(child, nodes.section):
+ continue
+ result.extend(traverse_in_section(child, cls))
+ return result
+
+ def build_toc(node, depth=1):
+ entries = []
+ for sectionnode in node:
+ # find all toctree nodes in this section and add them
+ # to the toc (just copying the toctree node which is then
+ # resolved in self.get_and_resolve_doctree)
+ if isinstance(sectionnode, addnodes.only):
+ onlynode = addnodes.only(expr=sectionnode['expr'])
+ blist = build_toc(sectionnode, depth)
+ if blist:
+ onlynode += blist.children
+ entries.append(onlynode)
+ continue
+ if not isinstance(sectionnode, nodes.section):
+ for toctreenode in traverse_in_section(sectionnode,
+ addnodes.toctree):
+ item = toctreenode.copy()
+ entries.append(item)
+ # important: do the inventory stuff
+ TocTree(app.env).note(docname, toctreenode)
+ continue
+ title = sectionnode[0]
+ # copy the contents of the section title, but without references
+ # and unnecessary stuff
+ visitor = SphinxContentsFilter(doctree)
+ title.walkabout(visitor)
+ nodetext = visitor.get_entry_text()
+ if not numentries[0]:
+ # for the very first toc entry, don't add an anchor
+ # as it is the file's title anyway
+ anchorname = ''
+ else:
+ anchorname = '#' + sectionnode['ids'][0]
+ numentries[0] += 1
+ # make these nodes:
+ # list_item -> compact_paragraph -> reference
+ reference = nodes.reference(
+ '', '', internal=True, refuri=docname,
+ anchorname=anchorname, *nodetext)
+ para = addnodes.compact_paragraph('', '', reference)
+ item = nodes.list_item('', para)
+ sub_item = build_toc(sectionnode, depth + 1)
+ item += sub_item
+ entries.append(item)
+ if entries:
+ return nodes.bullet_list('', *entries)
+ return []
+ toc = build_toc(doctree)
+ if toc:
+ app.env.tocs[docname] = toc
+ else:
+ app.env.tocs[docname] = nodes.bullet_list('')
+ app.env.toc_num_entries[docname] = numentries[0]
+
+ def get_updated_docs(self, app, env):
+ # type: (Sphinx, BuildEnvironment) -> List[unicode]
+ return self.assign_section_numbers(env) + self.assign_figure_numbers(env)
+
+ def assign_section_numbers(self, env):
+ # type: (BuildEnvironment) -> List[unicode]
+ """Assign a section number to each heading under a numbered toctree."""
+ # a list of all docnames whose section numbers changed
+ rewrite_needed = []
+
+ assigned = set() # type: Set[unicode]
+ old_secnumbers = env.toc_secnumbers
+ env.toc_secnumbers = {}
+
+ def _walk_toc(node, secnums, depth, titlenode=None):
+ # titlenode is the title of the document, it will get assigned a
+ # secnumber too, so that it shows up in next/prev/parent rellinks
+ for subnode in node.children:
+ if isinstance(subnode, nodes.bullet_list):
+ numstack.append(0)
+ _walk_toc(subnode, secnums, depth - 1, titlenode)
+ numstack.pop()
+ titlenode = None
+ elif isinstance(subnode, nodes.list_item):
+ _walk_toc(subnode, secnums, depth, titlenode)
+ titlenode = None
+ elif isinstance(subnode, addnodes.only):
+ # at this stage we don't know yet which sections are going
+ # to be included; just include all of them, even if it leads
+ # to gaps in the numbering
+ _walk_toc(subnode, secnums, depth, titlenode)
+ titlenode = None
+ elif isinstance(subnode, addnodes.compact_paragraph):
+ numstack[-1] += 1
+ if depth > 0:
+ number = tuple(numstack)
+ else:
+ number = None
+ secnums[subnode[0]['anchorname']] = \
+ subnode[0]['secnumber'] = number
+ if titlenode:
+ titlenode['secnumber'] = number
+ titlenode = None
+ elif isinstance(subnode, addnodes.toctree):
+ _walk_toctree(subnode, depth)
+
+ def _walk_toctree(toctreenode, depth):
+ if depth == 0:
+ return
+ for (title, ref) in toctreenode['entries']:
+ if url_re.match(ref) or ref == 'self':
+ # don't mess with those
+ continue
+ elif ref in assigned:
+ logger.warning('%s is already assigned section numbers '
+ '(nested numbered toctree?)', ref,
+ location=toctreenode, type='toc', subtype='secnum')
+ elif ref in env.tocs:
+ secnums = env.toc_secnumbers[ref] = {}
+ assigned.add(ref)
+ _walk_toc(env.tocs[ref], secnums, depth,
+ env.titles.get(ref))
+ if secnums != old_secnumbers.get(ref):
+ rewrite_needed.append(ref)
+
+ for docname in env.numbered_toctrees:
+ assigned.add(docname)
+ doctree = env.get_doctree(docname)
+ for toctreenode in doctree.traverse(addnodes.toctree):
+ depth = toctreenode.get('numbered', 0)
+ if depth:
+ # every numbered toctree gets new numbering
+ numstack = [0]
+ _walk_toctree(toctreenode, depth)
+
+ return rewrite_needed
+
+ def assign_figure_numbers(self, env):
+ # type: (BuildEnvironment) -> List[unicode]
+ """Assign a figure number to each figure under a numbered toctree."""
+
+ rewrite_needed = []
+
+ assigned = set() # type: Set[unicode]
+ old_fignumbers = env.toc_fignumbers
+ env.toc_fignumbers = {}
+ fignum_counter = {} # type: Dict[unicode, Dict[Tuple[int], int]]
+
+ def get_section_number(docname, section):
+ anchorname = '#' + section['ids'][0]
+ secnumbers = env.toc_secnumbers.get(docname, {})
+ if anchorname in secnumbers:
+ secnum = secnumbers.get(anchorname)
+ else:
+ secnum = secnumbers.get('')
+
+ return secnum or tuple()
+
+ def get_next_fignumber(figtype, secnum):
+ counter = fignum_counter.setdefault(figtype, {})
+
+ secnum = secnum[:env.config.numfig_secnum_depth]
+ counter[secnum] = counter.get(secnum, 0) + 1
+ return secnum + (counter[secnum],)
+
+ def register_fignumber(docname, secnum, figtype, fignode):
+ env.toc_fignumbers.setdefault(docname, {})
+ fignumbers = env.toc_fignumbers[docname].setdefault(figtype, {})
+ figure_id = fignode['ids'][0]
+
+ fignumbers[figure_id] = get_next_fignumber(figtype, secnum)
+
+ def _walk_doctree(docname, doctree, secnum):
+ for subnode in doctree.children:
+ if isinstance(subnode, nodes.section):
+ next_secnum = get_section_number(docname, subnode)
+ if next_secnum:
+ _walk_doctree(docname, subnode, next_secnum)
+ else:
+ _walk_doctree(docname, subnode, secnum)
+ continue
+ elif isinstance(subnode, addnodes.toctree):
+ for title, subdocname in subnode['entries']:
+ if url_re.match(subdocname) or subdocname == 'self':
+ # don't mess with those
+ continue
+
+ _walk_doc(subdocname, secnum)
+
+ continue
+
+ figtype = env.get_domain('std').get_figtype(subnode) # type: ignore
+ if figtype and subnode['ids']:
+ register_fignumber(docname, secnum, figtype, subnode)
+
+ _walk_doctree(docname, subnode, secnum)
+
+ def _walk_doc(docname, secnum):
+ if docname not in assigned:
+ assigned.add(docname)
+ doctree = env.get_doctree(docname)
+ _walk_doctree(docname, doctree, secnum)
+
+ if env.config.numfig:
+ _walk_doc(env.config.master_doc, tuple())
+ for docname, fignums in iteritems(env.toc_fignumbers):
+ if fignums != old_fignumbers.get(docname):
+ rewrite_needed.append(docname)
+
+ return rewrite_needed
+
+
+def setup(app):
+ # type: (Sphinx) -> Dict
+ app.add_env_collector(TocTreeCollector)
+
+ return {
+ 'version': 'builtin',
+ 'parallel_read_safe': True,
+ 'parallel_write_safe': True,
+ }
diff --git a/sphinx/errors.py b/sphinx/errors.py
index 9c4eb983f..7662c95a3 100644
--- a/sphinx/errors.py
+++ b/sphinx/errors.py
@@ -10,6 +10,10 @@
:license: BSD, see LICENSE for details.
"""
+if False:
+ # For type annotation
+ from typing import Any # NOQA
+
class SphinxError(Exception):
"""
@@ -29,16 +33,19 @@ class ExtensionError(SphinxError):
category = 'Extension error'
def __init__(self, message, orig_exc=None):
+ # type: (unicode, Exception) -> None
SphinxError.__init__(self, message)
self.orig_exc = orig_exc
def __repr__(self):
+ # type: () -> str
if self.orig_exc:
return '%s(%r, %r)' % (self.__class__.__name__,
self.message, self.orig_exc)
return '%s(%r)' % (self.__class__.__name__, self.message)
def __str__(self):
+ # type: () -> str
parent_str = SphinxError.__str__(self)
if self.orig_exc:
return '%s (exception: %s)' % (parent_str, self.orig_exc)
@@ -59,6 +66,7 @@ class VersionRequirementError(SphinxError):
class PycodeError(Exception):
def __str__(self):
+ # type: () -> str
res = self.args[0]
if len(self.args) > 1:
res += ' (exception was: %r)' % self.args[1]
@@ -70,8 +78,10 @@ class SphinxParallelError(SphinxError):
category = 'Sphinx parallel build error'
def __init__(self, message, traceback):
+ # type: (str, Any) -> None
self.message = message
self.traceback = traceback
def __str__(self):
+ # type: () -> str
return self.message
diff --git a/sphinx/events.py b/sphinx/events.py
new file mode 100644
index 000000000..ab31fe234
--- /dev/null
+++ b/sphinx/events.py
@@ -0,0 +1,86 @@
+# -*- coding: utf-8 -*-
+"""
+ sphinx.events
+ ~~~~~~~~~~~~~
+
+ Sphinx core events.
+
+ Gracefully adapted from the TextPress system by Armin.
+
+ :copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+from __future__ import print_function
+
+from collections import defaultdict
+
+from six import itervalues
+
+from sphinx.errors import ExtensionError
+from sphinx.locale import _
+
+if False:
+ # For type annotation
+ from typing import Any, Callable, Dict, List # NOQA
+
+
+# List of all known core events. Maps name to arguments description.
+core_events = {
+ 'builder-inited': '',
+ 'env-get-outdated': 'env, added, changed, removed',
+ 'env-get-updated': 'env',
+ 'env-purge-doc': 'env, docname',
+ 'env-before-read-docs': 'env, docnames',
+ 'source-read': 'docname, source text',
+ 'doctree-read': 'the doctree before being pickled',
+ 'env-merge-info': 'env, read docnames, other env instance',
+ 'missing-reference': 'env, node, contnode',
+ 'doctree-resolved': 'doctree, docname',
+ 'env-updated': 'env',
+ 'html-collect-pages': 'builder',
+ 'html-page-context': 'pagename, context, doctree or None',
+ 'build-finished': 'exception',
+} # type: Dict[unicode, unicode]
+
+
+class EventManager(object):
+ def __init__(self):
+ # type: () -> None
+ self.events = core_events.copy()
+ self.listeners = defaultdict(dict) # type: Dict[unicode, Dict[int, Callable]]
+ self.next_listener_id = 0
+
+ def add(self, name):
+ # type: (unicode) -> None
+ if name in self.events:
+ raise ExtensionError(_('Event %r already present') % name)
+ self.events[name] = ''
+
+ def connect(self, name, callback):
+ # type: (unicode, Callable) -> int
+ if name not in self.events:
+ raise ExtensionError(_('Unknown event name: %s') % name)
+
+ listener_id = self.next_listener_id
+ self.next_listener_id += 1
+ self.listeners[name][listener_id] = callback
+ return listener_id
+
+ def disconnect(self, listener_id):
+ # type: (int) -> None
+ for event in itervalues(self.listeners):
+ event.pop(listener_id, None)
+
+ def emit(self, name, *args):
+ # type: (unicode, Any) -> List
+ results = []
+ for callback in itervalues(self.listeners[name]):
+ results.append(callback(*args))
+ return results
+
+ def emit_firstresult(self, name, *args):
+ # type: (unicode, Any) -> Any
+ for result in self.emit(name, *args):
+ if result is not None:
+ return result
+ return None
diff --git a/sphinx/ext/autodoc.py b/sphinx/ext/autodoc.py
index 9e513276f..f78f9cbce 100644
--- a/sphinx/ext/autodoc.py
+++ b/sphinx/ext/autodoc.py
@@ -20,8 +20,10 @@ from types import FunctionType, BuiltinFunctionType, MethodType
from six import PY2, iterkeys, iteritems, itervalues, text_type, class_types, \
string_types, StringIO
+
from docutils import nodes
from docutils.utils import assemble_option_dict
+from docutils.parsers.rst import Directive
from docutils.statemachine import ViewList
import sphinx
@@ -29,21 +31,30 @@ from sphinx.util import rpartition, force_decode
from sphinx.locale import _
from sphinx.pycode import ModuleAnalyzer, PycodeError
from sphinx.application import ExtensionError
+from sphinx.util import logging
from sphinx.util.nodes import nested_parse_with_titles
-from sphinx.util.compat import Directive
from sphinx.util.inspect import getargspec, isdescriptor, safe_getmembers, \
safe_getattr, object_description, is_builtin_class_method, \
isenumclass, isenumattribute
from sphinx.util.docstrings import prepare_docstring
+if False:
+ # For type annotation
+ from typing import Any, Callable, Dict, Iterator, List, Sequence, Set, Tuple, Type, Union # NOQA
+ from types import ModuleType # NOQA
+ from docutils.utils import Reporter # NOQA
+ from sphinx.application import Sphinx # NOQA
+
try:
if sys.version_info >= (3,):
import typing
else:
- typing = None
+ typing = None # type: ignore
except ImportError:
typing = None
+logger = logging.getLogger(__name__)
+
# This type isn't exposed directly in any modules, but can be found
# here in most Python versions
MethodDescriptorType = type(type.__subclasses__)
@@ -63,28 +74,33 @@ py_ext_sig_re = re.compile(
class DefDict(dict):
"""A dict that returns a default on nonexisting keys."""
def __init__(self, default):
+ # type: (Any) -> None
dict.__init__(self)
self.default = default
def __getitem__(self, key):
+ # type: (Any) -> Any
try:
return dict.__getitem__(self, key)
except KeyError:
return self.default
def __bool__(self):
+ # type: () -> bool
# docutils check "if option_spec"
return True
__nonzero__ = __bool__ # for python2 compatibility
def identity(x):
+ # type: (Any) -> Any
return x
class Options(dict):
"""A dict/attribute hybrid that returns None on nonexisting keys."""
def __getattr__(self, name):
+ # type: (unicode) -> Any
try:
return self[name.replace('_', '-')]
except KeyError:
@@ -97,22 +113,26 @@ class _MockModule(object):
__path__ = '/dev/null'
def __init__(self, *args, **kwargs):
- self.__all__ = []
+ # type: (Any, Any) -> None
+ self.__all__ = [] # type: List[str]
def __call__(self, *args, **kwargs):
+ # type: (Any, Any) -> _MockModule
if args and type(args[0]) in [FunctionType, MethodType]:
# Appears to be a decorator, pass through unchanged
return args[0]
return _MockModule()
def _append_submodule(self, submod):
+ # type: (str) -> None
self.__all__.append(submod)
@classmethod
def __getattr__(cls, name):
+ # type: (unicode) -> Any
if name[0] == name[0].upper():
# Not very good, we assume Uppercase names are classes...
- mocktype = type(name, (), {})
+ mocktype = type(name, (), {}) # type: ignore
mocktype.__module__ = __name__
return mocktype
else:
@@ -120,15 +140,16 @@ class _MockModule(object):
def mock_import(modname):
+ # type: (str) -> None
if '.' in modname:
pkg, _n, mods = modname.rpartition('.')
mock_import(pkg)
if isinstance(sys.modules[pkg], _MockModule):
- sys.modules[pkg]._append_submodule(mods)
+ sys.modules[pkg]._append_submodule(mods) # type: ignore
if modname not in sys.modules:
mod = _MockModule()
- sys.modules[modname] = mod
+ sys.modules[modname] = mod # type: ignore
ALL = object()
@@ -136,6 +157,7 @@ INSTANCEATTR = object()
def members_option(arg):
+ # type: (Any) -> Union[object, List[unicode]]
"""Used to convert the :members: option to auto directives."""
if arg is None:
return ALL
@@ -143,6 +165,7 @@ def members_option(arg):
def members_set_option(arg):
+ # type: (Any) -> Union[object, Set[unicode]]
"""Used to convert the :members: option to auto directives."""
if arg is None:
return ALL
@@ -153,6 +176,7 @@ SUPPRESS = object()
def annotation_option(arg):
+ # type: (Any) -> Any
if arg is None:
# suppress showing the representation of the object
return SUPPRESS
@@ -161,6 +185,7 @@ def annotation_option(arg):
def bool_option(arg):
+ # type: (Any) -> bool
"""Used to convert flag options to auto directives. (Instead of
directives.flag(), which returns None).
"""
@@ -173,13 +198,16 @@ class AutodocReporter(object):
and line number to a system message, as recorded in a ViewList.
"""
def __init__(self, viewlist, reporter):
+ # type: (ViewList, Reporter) -> None
self.viewlist = viewlist
self.reporter = reporter
def __getattr__(self, name):
+ # type: (unicode) -> Any
return getattr(self.reporter, name)
def system_message(self, level, message, *children, **kwargs):
+ # type: (int, unicode, Any, Any) -> nodes.system_message
if 'line' in kwargs and 'source' not in kwargs:
try:
source, line = self.viewlist.items[kwargs['line']]
@@ -192,25 +220,31 @@ class AutodocReporter(object):
*children, **kwargs)
def debug(self, *args, **kwargs):
+ # type: (Any, Any) -> nodes.system_message
if self.reporter.debug_flag:
return self.system_message(0, *args, **kwargs)
def info(self, *args, **kwargs):
+ # type: (Any, Any) -> nodes.system_message
return self.system_message(1, *args, **kwargs)
def warning(self, *args, **kwargs):
+ # type: (Any, Any) -> nodes.system_message
return self.system_message(2, *args, **kwargs)
def error(self, *args, **kwargs):
+ # type: (Any, Any) -> nodes.system_message
return self.system_message(3, *args, **kwargs)
def severe(self, *args, **kwargs):
+ # type: (Any, Any) -> nodes.system_message
return self.system_message(4, *args, **kwargs)
# Some useful event listener factories for autodoc-process-docstring.
def cut_lines(pre, post=0, what=None):
+ # type: (int, int, unicode) -> Callable
"""Return a listener that removes the first *pre* and last *post*
lines of every docstring. If *what* is a sequence of strings,
only docstrings of a type in *what* will be processed.
@@ -223,6 +257,7 @@ def cut_lines(pre, post=0, what=None):
This can (and should) be used in place of :confval:`automodule_skip_lines`.
"""
def process(app, what_, name, obj, options, lines):
+ # type: (Sphinx, unicode, unicode, Any, Any, List[unicode]) -> None
if what and what_ not in what:
return
del lines[:pre]
@@ -238,6 +273,7 @@ def cut_lines(pre, post=0, what=None):
def between(marker, what=None, keepempty=False, exclude=False):
+ # type: (unicode, Sequence[unicode], bool, bool) -> Callable
"""Return a listener that either keeps, or if *exclude* is True excludes,
lines between lines that match the *marker* regular expression. If no line
matches, the resulting docstring would be empty, so no change will be made
@@ -249,6 +285,7 @@ def between(marker, what=None, keepempty=False, exclude=False):
marker_re = re.compile(marker)
def process(app, what_, name, obj, options, lines):
+ # type: (Sphinx, unicode, unicode, Any, Any, List[unicode]) -> None
if what and what_ not in what:
return
deleted = 0
@@ -272,6 +309,7 @@ def between(marker, what=None, keepempty=False, exclude=False):
def format_annotation(annotation):
+ # type: (Any) -> str
"""Return formatted representation of a type annotation.
Show qualified names for types and additional details for types from
@@ -279,18 +317,18 @@ def format_annotation(annotation):
Displaying complex types from ``typing`` relies on its private API.
"""
- if typing and isinstance(annotation, typing.TypeVar):
+ if typing and isinstance(annotation, typing.TypeVar): # type: ignore
return annotation.__name__
if annotation == Ellipsis:
return '...'
if not isinstance(annotation, type):
return repr(annotation)
- qualified_name = (annotation.__module__ + '.' + annotation.__qualname__
+ qualified_name = (annotation.__module__ + '.' + annotation.__qualname__ # type: ignore
if annotation else repr(annotation))
if annotation.__module__ == 'builtins':
- return annotation.__qualname__
+ return annotation.__qualname__ # type: ignore
elif typing:
if hasattr(typing, 'GenericMeta') and \
isinstance(annotation, typing.GenericMeta):
@@ -351,6 +389,7 @@ def format_annotation(annotation):
def formatargspec(function, args, varargs=None, varkw=None, defaults=None,
kwonlyargs=(), kwonlydefaults={}, annotations={}):
+ # type: (Callable, Tuple[str, ...], str, str, Any, Tuple, Dict, Dict[str, Any]) -> str
"""Return a string representation of an ``inspect.FullArgSpec`` tuple.
An enhanced version of ``inspect.formatargspec()`` that handles typing
@@ -358,18 +397,20 @@ def formatargspec(function, args, varargs=None, varkw=None, defaults=None,
"""
def format_arg_with_annotation(name):
+ # type: (str) -> str
if name in annotations:
return '%s: %s' % (name, format_annotation(get_annotation(name)))
return name
def get_annotation(name):
+ # type: (str) -> str
value = annotations[name]
if isinstance(value, string_types):
return introspected_hints.get(name, value)
else:
return value
- introspected_hints = (typing.get_type_hints(function)
+ introspected_hints = (typing.get_type_hints(function) # type: ignore
if typing and hasattr(function, '__code__') else {})
fd = StringIO()
@@ -383,7 +424,7 @@ def formatargspec(function, args, varargs=None, varkw=None, defaults=None,
arg_fd.write(format_arg_with_annotation(arg))
if defaults and i >= defaults_start:
arg_fd.write(' = ' if arg in annotations else '=')
- arg_fd.write(object_description(defaults[i - defaults_start]))
+ arg_fd.write(object_description(defaults[i - defaults_start])) # type: ignore
formatted.append(arg_fd.getvalue())
if varargs:
@@ -398,7 +439,7 @@ def formatargspec(function, args, varargs=None, varkw=None, defaults=None,
arg_fd.write(format_arg_with_annotation(kwarg))
if kwonlydefaults and kwarg in kwonlydefaults:
arg_fd.write(' = ' if kwarg in annotations else '=')
- arg_fd.write(object_description(kwonlydefaults[kwarg]))
+ arg_fd.write(object_description(kwonlydefaults[kwarg])) # type: ignore
formatted.append(arg_fd.getvalue())
if varkw:
@@ -441,10 +482,11 @@ class Documenter(object):
#: true if the generated content may contain titles
titles_allowed = False
- option_spec = {'noindex': bool_option}
+ option_spec = {'noindex': bool_option} # type: Dict[unicode, Callable]
@staticmethod
def get_attr(obj, name, *defargs):
+ # type: (Any, unicode, Any) -> Any
"""getattr() override for types such as Zope interfaces."""
for typ, func in iteritems(AutoDirective._special_attrgetters):
if isinstance(obj, typ):
@@ -453,10 +495,12 @@ class Documenter(object):
@classmethod
def can_document_member(cls, member, membername, isattr, parent):
+ # type: (Any, unicode, bool, Any) -> bool
"""Called to see if a member can be documented by this documenter."""
raise NotImplementedError('must be implemented in subclasses')
def __init__(self, directive, name, indent=u''):
+ # type: (Directive, unicode, unicode) -> None
self.directive = directive
self.env = directive.env
self.options = directive.genopt
@@ -464,27 +508,29 @@ class Documenter(object):
self.indent = indent
# the module and object path within the module, and the fully
# qualified name (all set after resolve_name succeeds)
- self.modname = None
- self.module = None
- self.objpath = None
- self.fullname = None
+ self.modname = None # type: str
+ self.module = None # type: ModuleType
+ self.objpath = None # type: List[unicode]
+ self.fullname = None # type: unicode
# extra signature items (arguments and return annotation,
# also set after resolve_name succeeds)
- self.args = None
- self.retann = None
+ self.args = None # type: unicode
+ self.retann = None # type: unicode
# the object to document (set after import_object succeeds)
- self.object = None
- self.object_name = None
+ self.object = None # type: Any
+ self.object_name = None # type: unicode
# the parent/owner of the object to document
- self.parent = None
+ self.parent = None # type: Any
# the module analyzer to get at attribute docs, or None
- self.analyzer = None
+ self.analyzer = None # type: Any
def add_line(self, line, source, *lineno):
+ # type: (unicode, unicode, int) -> None
"""Append one line of generated reST to the output."""
self.directive.result.append(self.indent + line, source, *lineno)
def resolve_name(self, modname, parents, path, base):
+ # type: (str, Any, str, Any) -> Tuple[str, List[unicode]]
"""Resolve the module and name of the object to document given by the
arguments and the current module/class.
@@ -495,6 +541,7 @@ class Documenter(object):
raise NotImplementedError('must be implemented in subclasses')
def parse_name(self):
+ # type: () -> bool
"""Determine what module to import and what attribute to document.
Returns True and sets *self.modname*, *self.objpath*, *self.fullname*,
@@ -505,7 +552,7 @@ class Documenter(object):
# an autogenerated one
try:
explicit_modname, path, base, args, retann = \
- py_ext_sig_re.match(self.name).groups()
+ py_ext_sig_re.match(self.name).groups() # type: ignore
except AttributeError:
self.directive.warn('invalid signature for auto%s (%r)' %
(self.objtype, self.name))
@@ -519,8 +566,7 @@ class Documenter(object):
modname = None
parents = []
- self.modname, self.objpath = \
- self.resolve_name(modname, parents, path, base)
+ self.modname, self.objpath = self.resolve_name(modname, parents, path, base)
if not self.modname:
return False
@@ -532,31 +578,31 @@ class Documenter(object):
return True
def import_object(self):
+ # type: () -> bool
"""Import the object given by *self.modname* and *self.objpath* and set
it as *self.object*.
Returns True if successful, False if an error occurred.
"""
- dbg = self.env.app.debug
if self.objpath:
- dbg('[autodoc] from %s import %s',
- self.modname, '.'.join(self.objpath))
+ logger.debug('[autodoc] from %s import %s',
+ self.modname, '.'.join(self.objpath))
try:
- dbg('[autodoc] import %s', self.modname)
+ logger.debug('[autodoc] import %s', self.modname)
for modname in self.env.config.autodoc_mock_imports:
- dbg('[autodoc] adding a mock module %s!', modname)
+ logger.debug('[autodoc] adding a mock module %s!', modname)
mock_import(modname)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=ImportWarning)
__import__(self.modname)
parent = None
obj = self.module = sys.modules[self.modname]
- dbg('[autodoc] => %r', obj)
+ logger.debug('[autodoc] => %r', obj)
for part in self.objpath:
parent = obj
- dbg('[autodoc] getattr(_, %r)', part)
+ logger.debug('[autodoc] getattr(_, %r)', part)
obj = self.get_attr(obj, part)
- dbg('[autodoc] => %r', obj)
+ logger.debug('[autodoc] => %r', obj)
self.object_name = part
self.parent = parent
self.object = obj
@@ -577,13 +623,14 @@ class Documenter(object):
errmsg += '; the following exception was raised:\n%s' % \
traceback.format_exc()
if PY2:
- errmsg = errmsg.decode('utf-8')
- dbg(errmsg)
+ errmsg = errmsg.decode('utf-8') # type: ignore
+ logger.debug(errmsg)
self.directive.warn(errmsg)
self.env.note_reread()
return False
def get_real_modname(self):
+ # type: () -> str
"""Get the real module name of an object to document.
It can differ from the name of the module through which the object was
@@ -592,6 +639,7 @@ class Documenter(object):
return self.get_attr(self.object, '__module__', None) or self.modname
def check_module(self):
+ # type: () -> bool
"""Check if *self.object* is really defined in the module given by
*self.modname*.
"""
@@ -604,6 +652,7 @@ class Documenter(object):
return True
def format_args(self):
+ # type: () -> unicode
"""Format the argument signature of *self.object*.
Should return None if the object does not have a signature.
@@ -611,6 +660,7 @@ class Documenter(object):
return None
def format_name(self):
+ # type: () -> unicode
"""Format the name of *self.object*.
This normally should be something that can be parsed by the generated
@@ -622,13 +672,14 @@ class Documenter(object):
return '.'.join(self.objpath) or self.modname
def format_signature(self):
+ # type: () -> unicode
"""Format the signature (arguments and return annotation) of the object.
Let the user process it via the ``autodoc-process-signature`` event.
"""
if self.args is not None:
# signature given explicitly
- args = "(%s)" % self.args
+ args = "(%s)" % self.args # type: unicode
else:
# try to introspect the signature
try:
@@ -652,6 +703,7 @@ class Documenter(object):
return ''
def add_directive_header(self, sig):
+ # type: (unicode) -> None
"""Add the directive header and options to the generated content."""
domain = getattr(self, 'domain', 'py')
directive = getattr(self, 'directivetype', self.objtype)
@@ -667,6 +719,7 @@ class Documenter(object):
self.add_line(u' :module: %s' % self.modname, sourcename)
def get_doc(self, encoding=None, ignore=1):
+ # type: (unicode, int) -> List[List[unicode]]
"""Decode and return lines of the docstring(s) for the object."""
docstring = self.get_attr(self.object, '__doc__', None)
# make sure we have Unicode docstrings, then sanitize and split
@@ -680,6 +733,7 @@ class Documenter(object):
return []
def process_doc(self, docstrings):
+ # type: (List[List[unicode]]) -> Iterator[unicode]
"""Let the user process the docstrings before adding them."""
for docstringlines in docstrings:
if self.env.app:
@@ -691,6 +745,7 @@ class Documenter(object):
yield line
def get_sourcename(self):
+ # type: () -> unicode
if self.analyzer:
# prevent encoding errors when the file name is non-ASCII
if not isinstance(self.analyzer.srcname, text_type):
@@ -702,6 +757,7 @@ class Documenter(object):
return u'docstring of %s' % self.fullname
def add_content(self, more_content, no_docstring=False):
+ # type: (Any, bool) -> None
"""Add content from docstrings, attribute documentation and user."""
# set sourcename and add content from attribute documentation
sourcename = self.get_sourcename()
@@ -733,6 +789,7 @@ class Documenter(object):
self.add_line(line, src[0], src[1])
def get_object_members(self, want_all):
+ # type: (bool) -> Tuple[bool, List[Tuple[unicode, object]]]
"""Return `(members_check_module, members)` where `members` is a
list of `(membername, member)` pairs of the members of *self.object*.
@@ -792,6 +849,7 @@ class Documenter(object):
return False, sorted(members)
def filter_members(self, members, want_all):
+ # type: (List[Tuple[unicode, Any]], bool) -> List[Tuple[unicode, Any, bool]]
"""Filter the given member list.
Members are skipped if
@@ -869,6 +927,7 @@ class Documenter(object):
return ret
def document_members(self, all_members=False):
+ # type: (bool) -> None
"""Generate reST for member documentation.
If *all_members* is True, do all members, else those given by
@@ -890,7 +949,7 @@ class Documenter(object):
if membername not in self.options.exclude_members]
# document non-skipped members
- memberdocumenters = []
+ memberdocumenters = [] # type: List[Tuple[Documenter, bool]]
for (mname, member, isattr) in self.filter_members(members, want_all):
classes = [cls for cls in itervalues(AutoDirective._registry)
if cls.can_document_member(member, mname, isattr, self)]
@@ -916,6 +975,7 @@ class Documenter(object):
tagorder = self.analyzer.tagorder
def keyfunc(entry):
+ # type: (Tuple[Documenter, bool]) -> int
fullname = entry[0].name.split('::')[1]
return tagorder.get(fullname, len(tagorder))
memberdocumenters.sort(key=keyfunc)
@@ -931,6 +991,7 @@ class Documenter(object):
def generate(self, more_content=None, real_modname=None,
check_module=False, all_members=False):
+ # type: (Any, str, bool, bool) -> None
"""Generate reST for the object given by *self.name*, and possibly for
its members.
@@ -966,7 +1027,7 @@ class Documenter(object):
# be cached anyway)
self.analyzer.find_attr_docs()
except PycodeError as err:
- self.env.app.debug('[autodoc] module analyzer failed: %s', err)
+ logger.debug('[autodoc] module analyzer failed: %s', err)
# no source file -- e.g. for builtin and C modules
self.analyzer = None
# at least add the module.__file__ as a dependency
@@ -1020,19 +1081,22 @@ class ModuleDocumenter(Documenter):
'member-order': identity, 'exclude-members': members_set_option,
'private-members': bool_option, 'special-members': members_option,
'imported-members': bool_option,
- }
+ } # type: Dict[unicode, Callable]
@classmethod
def can_document_member(cls, member, membername, isattr, parent):
+ # type: (Any, unicode, bool, Any) -> bool
# don't document submodules automatically
return False
def resolve_name(self, modname, parents, path, base):
+ # type: (str, Any, str, Any) -> Tuple[str, List[unicode]]
if modname is not None:
self.directive.warn('"::" in automodule name doesn\'t make sense')
return (path or '') + base, []
def parse_name(self):
+ # type: () -> bool
ret = Documenter.parse_name(self)
if self.args or self.retann:
self.directive.warn('signature arguments or return annotation '
@@ -1040,6 +1104,7 @@ class ModuleDocumenter(Documenter):
return ret
def add_directive_header(self, sig):
+ # type: (unicode) -> None
Documenter.add_directive_header(self, sig)
sourcename = self.get_sourcename()
@@ -1055,6 +1120,7 @@ class ModuleDocumenter(Documenter):
self.add_line(u' :deprecated:', sourcename)
def get_object_members(self, want_all):
+ # type: (bool) -> Tuple[bool, List[Tuple[unicode, object]]]
if want_all:
if not hasattr(self.object, '__all__'):
# for implicit module members, check __module__ to avoid
@@ -1091,6 +1157,7 @@ class ModuleLevelDocumenter(Documenter):
classes, data/constants).
"""
def resolve_name(self, modname, parents, path, base):
+ # type: (str, Any, str, Any) -> Tuple[str, List[unicode]]
if modname is None:
if path:
modname = path.rstrip('.')
@@ -1111,6 +1178,7 @@ class ClassLevelDocumenter(Documenter):
attributes).
"""
def resolve_name(self, modname, parents, path, base):
+ # type: (str, Any, str, Any) -> Tuple[str, List[unicode]]
if modname is None:
if path:
mod_cls = path.rstrip('.')
@@ -1126,7 +1194,7 @@ class ClassLevelDocumenter(Documenter):
# ... if still None, there's no way to know
if mod_cls is None:
return None, []
- modname, cls = rpartition(mod_cls, '.')
+ modname, cls = rpartition(mod_cls, '.') # type: ignore
parents = [cls]
# if the module name is still missing, get it like above
if not modname:
@@ -1144,6 +1212,7 @@ class DocstringSignatureMixin(object):
"""
def _find_signature(self, encoding=None):
+ # type: (unicode) -> Tuple[str, str]
docstrings = self.get_doc(encoding)
self._new_docstrings = docstrings[:]
result = None
@@ -1152,12 +1221,12 @@ class DocstringSignatureMixin(object):
if not doclines:
continue
# match first line of docstring against signature RE
- match = py_ext_sig_re.match(doclines[0])
+ match = py_ext_sig_re.match(doclines[0]) # type: ignore
if not match:
continue
exmod, path, base, args, retann = match.groups()
# the base name must match ours
- valid_names = [self.objpath[-1]]
+ valid_names = [self.objpath[-1]] # type: ignore
if isinstance(self, ClassDocumenter):
valid_names.append('__init__')
if hasattr(self.object, '__mro__'):
@@ -1172,19 +1241,21 @@ class DocstringSignatureMixin(object):
return result
def get_doc(self, encoding=None, ignore=1):
+ # type: (unicode, int) -> List[List[unicode]]
lines = getattr(self, '_new_docstrings', None)
if lines is not None:
return lines
- return Documenter.get_doc(self, encoding, ignore)
+ return Documenter.get_doc(self, encoding, ignore) # type: ignore
def format_signature(self):
- if self.args is None and self.env.config.autodoc_docstring_signature:
+ # type: () -> unicode
+ if self.args is None and self.env.config.autodoc_docstring_signature: # type: ignore
# only act if a signature is not explicitly given already, and if
# the feature is enabled
result = self._find_signature()
if result is not None:
self.args, self.retann = result
- return Documenter.format_signature(self)
+ return Documenter.format_signature(self) # type: ignore
class DocstringStripSignatureMixin(DocstringSignatureMixin):
@@ -1193,7 +1264,8 @@ class DocstringStripSignatureMixin(DocstringSignatureMixin):
feature of stripping any function signature from the docstring.
"""
def format_signature(self):
- if self.args is None and self.env.config.autodoc_docstring_signature:
+ # type: () -> unicode
+ if self.args is None and self.env.config.autodoc_docstring_signature: # type: ignore
# only act if a signature is not explicitly given already, and if
# the feature is enabled
result = self._find_signature()
@@ -1202,10 +1274,10 @@ class DocstringStripSignatureMixin(DocstringSignatureMixin):
# DocstringSignatureMixin.format_signature.
# Documenter.format_signature use self.args value to format.
_args, self.retann = result
- return Documenter.format_signature(self)
+ return Documenter.format_signature(self) # type: ignore
-class FunctionDocumenter(DocstringSignatureMixin, ModuleLevelDocumenter):
+class FunctionDocumenter(DocstringSignatureMixin, ModuleLevelDocumenter): # type: ignore
"""
Specialized Documenter subclass for functions.
"""
@@ -1214,9 +1286,11 @@ class FunctionDocumenter(DocstringSignatureMixin, ModuleLevelDocumenter):
@classmethod
def can_document_member(cls, member, membername, isattr, parent):
+ # type: (Any, unicode, bool, Any) -> bool
return isinstance(member, (FunctionType, BuiltinFunctionType))
def format_args(self):
+ # type: () -> unicode
if inspect.isbuiltin(self.object) or \
inspect.ismethoddescriptor(self.object):
# cannot introspect arguments of a C function or method
@@ -1243,10 +1317,11 @@ class FunctionDocumenter(DocstringSignatureMixin, ModuleLevelDocumenter):
return args
def document_members(self, all_members=False):
+ # type: (bool) -> None
pass
-class ClassDocumenter(DocstringSignatureMixin, ModuleLevelDocumenter):
+class ClassDocumenter(DocstringSignatureMixin, ModuleLevelDocumenter): # type: ignore
"""
Specialized Documenter subclass for classes.
"""
@@ -1258,13 +1333,15 @@ class ClassDocumenter(DocstringSignatureMixin, ModuleLevelDocumenter):
'show-inheritance': bool_option, 'member-order': identity,
'exclude-members': members_set_option,
'private-members': bool_option, 'special-members': members_option,
- }
+ } # type: Dict[unicode, Callable]
@classmethod
def can_document_member(cls, member, membername, isattr, parent):
+ # type: (Any, unicode, bool, Any) -> bool
return isinstance(member, class_types)
def import_object(self):
+ # type: () -> Any
ret = ModuleLevelDocumenter.import_object(self)
# if the class is documented under another name, document it
# as data/attribute
@@ -1276,6 +1353,7 @@ class ClassDocumenter(DocstringSignatureMixin, ModuleLevelDocumenter):
return ret
def format_args(self):
+ # type: () -> unicode
# for classes, the relevant signature is the __init__ method's
initmeth = self.get_attr(self.object, '__init__', None)
# classes without __init__ method, default __init__ or
@@ -1295,12 +1373,14 @@ class ClassDocumenter(DocstringSignatureMixin, ModuleLevelDocumenter):
return formatargspec(initmeth, *argspec)
def format_signature(self):
+ # type: () -> unicode
if self.doc_as_attr:
return ''
return DocstringSignatureMixin.format_signature(self)
def add_directive_header(self, sig):
+ # type: (unicode) -> None
if self.doc_as_attr:
self.directivetype = 'attribute'
Documenter.add_directive_header(self, sig)
@@ -1318,6 +1398,7 @@ class ClassDocumenter(DocstringSignatureMixin, ModuleLevelDocumenter):
sourcename)
def get_doc(self, encoding=None, ignore=1):
+ # type: (unicode, int) -> List[List[unicode]]
lines = getattr(self, '_new_docstrings', None)
if lines is not None:
return lines
@@ -1363,6 +1444,7 @@ class ClassDocumenter(DocstringSignatureMixin, ModuleLevelDocumenter):
return doc
def add_content(self, more_content, no_docstring=False):
+ # type: (Any, bool) -> None
if self.doc_as_attr:
classname = safe_getattr(self.object, '__name__', None)
if classname:
@@ -1374,6 +1456,7 @@ class ClassDocumenter(DocstringSignatureMixin, ModuleLevelDocumenter):
ModuleLevelDocumenter.add_content(self, more_content)
def document_members(self, all_members=False):
+ # type: (bool) -> None
if self.doc_as_attr:
return
ModuleLevelDocumenter.document_members(self, all_members)
@@ -1391,8 +1474,9 @@ class ExceptionDocumenter(ClassDocumenter):
@classmethod
def can_document_member(cls, member, membername, isattr, parent):
+ # type: (Any, unicode, bool, Any) -> bool
return isinstance(member, class_types) and \
- issubclass(member, BaseException)
+ issubclass(member, BaseException) # type: ignore
class DataDocumenter(ModuleLevelDocumenter):
@@ -1407,9 +1491,11 @@ class DataDocumenter(ModuleLevelDocumenter):
@classmethod
def can_document_member(cls, member, membername, isattr, parent):
+ # type: (Any, unicode, bool, Any) -> bool
return isinstance(parent, ModuleDocumenter) and isattr
def add_directive_header(self, sig):
+ # type: (unicode) -> None
ModuleLevelDocumenter.add_directive_header(self, sig)
sourcename = self.get_sourcename()
if not self.options.annotation:
@@ -1426,10 +1512,11 @@ class DataDocumenter(ModuleLevelDocumenter):
sourcename)
def document_members(self, all_members=False):
+ # type: (bool) -> None
pass
-class MethodDocumenter(DocstringSignatureMixin, ClassLevelDocumenter):
+class MethodDocumenter(DocstringSignatureMixin, ClassLevelDocumenter): # type: ignore
"""
Specialized Documenter subclass for methods (normal, static and class).
"""
@@ -1439,10 +1526,12 @@ class MethodDocumenter(DocstringSignatureMixin, ClassLevelDocumenter):
@classmethod
def can_document_member(cls, member, membername, isattr, parent):
+ # type: (Any, unicode, bool, Any) -> bool
return inspect.isroutine(member) and \
not isinstance(parent, ModuleDocumenter)
def import_object(self):
+ # type: () -> Any
ret = ClassLevelDocumenter.import_object(self)
if not ret:
return ret
@@ -1463,6 +1552,7 @@ class MethodDocumenter(DocstringSignatureMixin, ClassLevelDocumenter):
return ret
def format_args(self):
+ # type: () -> unicode
if inspect.isbuiltin(self.object) or \
inspect.ismethoddescriptor(self.object):
# can never get arguments of a C function or method
@@ -1476,10 +1566,11 @@ class MethodDocumenter(DocstringSignatureMixin, ClassLevelDocumenter):
return args
def document_members(self, all_members=False):
+ # type: (bool) -> None
pass
-class AttributeDocumenter(DocstringStripSignatureMixin, ClassLevelDocumenter):
+class AttributeDocumenter(DocstringStripSignatureMixin, ClassLevelDocumenter): # type: ignore
"""
Specialized Documenter subclass for attributes.
"""
@@ -1496,6 +1587,7 @@ class AttributeDocumenter(DocstringStripSignatureMixin, ClassLevelDocumenter):
@classmethod
def can_document_member(cls, member, membername, isattr, parent):
+ # type: (Any, unicode, bool, Any) -> bool
non_attr_types = cls.method_types + (type, MethodDescriptorType)
isdatadesc = isdescriptor(member) and not \
isinstance(member, non_attr_types) and not \
@@ -1508,9 +1600,11 @@ class AttributeDocumenter(DocstringStripSignatureMixin, ClassLevelDocumenter):
not isinstance(member, class_types))
def document_members(self, all_members=False):
+ # type: (bool) -> None
pass
def import_object(self):
+ # type: () -> Any
ret = ClassLevelDocumenter.import_object(self)
if isenumattribute(self.object):
self.object = self.object.value
@@ -1523,10 +1617,12 @@ class AttributeDocumenter(DocstringStripSignatureMixin, ClassLevelDocumenter):
return ret
def get_real_modname(self):
+ # type: () -> str
return self.get_attr(self.parent or self.object, '__module__', None) \
or self.modname
def add_directive_header(self, sig):
+ # type: (unicode) -> None
ClassLevelDocumenter.add_directive_header(self, sig)
sourcename = self.get_sourcename()
if not self.options.annotation:
@@ -1544,6 +1640,7 @@ class AttributeDocumenter(DocstringStripSignatureMixin, ClassLevelDocumenter):
sourcename)
def add_content(self, more_content, no_docstring=False):
+ # type: (Any, bool) -> None
if not self._datadescriptor:
# if it's not a data descriptor, its docstring is very probably the
# wrong thing to display
@@ -1565,10 +1662,12 @@ class InstanceAttributeDocumenter(AttributeDocumenter):
@classmethod
def can_document_member(cls, member, membername, isattr, parent):
+ # type: (Any, unicode, bool, Any) -> bool
"""This documents only INSTANCEATTR members."""
return isattr and (member is INSTANCEATTR)
def import_object(self):
+ # type: () -> bool
"""Never import anything."""
# disguise as an attribute
self.objtype = 'attribute'
@@ -1576,6 +1675,7 @@ class InstanceAttributeDocumenter(AttributeDocumenter):
return True
def add_content(self, more_content, no_docstring=False):
+ # type: (Any, bool) -> None
"""Never try to get a docstring from the object."""
AttributeDocumenter.add_content(self, more_content, no_docstring=True)
@@ -1596,10 +1696,10 @@ class AutoDirective(Directive):
attributes of the parents.
"""
# a registry of objtype -> documenter class
- _registry = {}
+ _registry = {} # type: Dict[unicode, Type[Documenter]]
# a registry of type -> getattr function
- _special_attrgetters = {}
+ _special_attrgetters = {} # type: Dict[Type, Callable]
# flags that can be given in autodoc_default_flags
_default_flags = set([
@@ -1617,21 +1717,24 @@ class AutoDirective(Directive):
option_spec = DefDict(identity)
def warn(self, msg):
+ # type: (unicode) -> None
self.warnings.append(self.reporter.warning(msg, line=self.lineno))
def run(self):
- self.filename_set = set() # a set of dependent filenames
+ # type: () -> List[nodes.Node]
+ self.filename_set = set() # type: Set[unicode]
+ # a set of dependent filenames
self.reporter = self.state.document.reporter
self.env = self.state.document.settings.env
- self.warnings = []
+ self.warnings = [] # type: List[unicode]
self.result = ViewList()
try:
source, lineno = self.reporter.get_source_and_line(self.lineno)
except AttributeError:
source = lineno = None
- self.env.app.debug('[autodoc] %s:%s: input:\n%s',
- source, lineno, self.block_text)
+ logger.debug('[autodoc] %s:%s: input:\n%s',
+ source, lineno, self.block_text)
# find out what documenter to call
objtype = self.name[4:]
@@ -1660,7 +1763,7 @@ class AutoDirective(Directive):
if not self.result:
return self.warnings
- self.env.app.debug2('[autodoc] output:\n%s', '\n'.join(self.result))
+ logger.debug('[autodoc] output:\n%s', '\n'.join(self.result))
# record all filenames as dependencies -- this will at least
# partially make automatic invalidation possible
@@ -1687,6 +1790,7 @@ class AutoDirective(Directive):
def add_documenter(cls):
+ # type: (Type[Documenter]) -> None
"""Register a new Documenter."""
if not issubclass(cls, Documenter):
raise ExtensionError('autodoc documenter %r must be a subclass '
@@ -1699,6 +1803,7 @@ def add_documenter(cls):
def setup(app):
+ # type: (Sphinx) -> Dict[unicode, Any]
app.add_autodocumenter(ModuleDocumenter)
app.add_autodocumenter(ClassDocumenter)
app.add_autodocumenter(ExceptionDocumenter)
@@ -1724,7 +1829,9 @@ class testcls:
"""test doc string"""
def __getattr__(self, x):
+ # type: (Any) -> Any
return x
def __setattr__(self, x, y):
+ # type: (Any, Any) -> None
"""Attr setter."""
diff --git a/sphinx/ext/autosectionlabel.py b/sphinx/ext/autosectionlabel.py
index 61ea94c0b..deb583808 100644
--- a/sphinx/ext/autosectionlabel.py
+++ b/sphinx/ext/autosectionlabel.py
@@ -10,8 +10,11 @@
"""
from docutils import nodes
+from sphinx.util import logging
from sphinx.util.nodes import clean_astext
+logger = logging.getLogger(__name__)
+
def register_sections_as_label(app, document):
labels = app.env.domaindata['std']['labels']
@@ -23,8 +26,9 @@ def register_sections_as_label(app, document):
sectname = clean_astext(node[0])
if name in labels:
- app.env.warn_node('duplicate label %s, ' % name + 'other instance '
- 'in ' + app.env.doc2path(labels[name][0]), node)
+ logger.warning('duplicate label %s, ' % name + 'other instance '
+ 'in ' + app.env.doc2path(labels[name][0]),
+ location=node)
anonlabels[name] = docname, labelid
labels[name] = docname, labelid, sectname
diff --git a/sphinx/ext/autosummary/__init__.py b/sphinx/ext/autosummary/__init__.py
index c9c4292b2..7a4a65741 100644
--- a/sphinx/ext/autosummary/__init__.py
+++ b/sphinx/ext/autosummary/__init__.py
@@ -62,17 +62,28 @@ from six import string_types
from types import ModuleType
from six import text_type
-from docutils.parsers.rst import directives
+
+from docutils.parsers.rst import Directive, directives
from docutils.statemachine import ViewList
from docutils import nodes
import sphinx
from sphinx import addnodes
-from sphinx.util import import_object, rst
-from sphinx.util.compat import Directive
+from sphinx.environment.adapters.toctree import TocTree
+from sphinx.util import import_object, rst, logging
from sphinx.pycode import ModuleAnalyzer, PycodeError
from sphinx.ext.autodoc import Options
+if False:
+ # For type annotation
+ from typing import Any, Dict, List, Tuple, Type, Union # NOQA
+ from docutils.utils import Inliner # NOQA
+ from sphinx.application import Sphinx # NOQA
+ from sphinx.environment import BuildEnvironment # NOQA
+ from sphinx.ext.autodoc import Documenter # NOQA
+
+logger = logging.getLogger(__name__)
+
# -- autosummary_toc node ------------------------------------------------------
@@ -81,6 +92,7 @@ class autosummary_toc(nodes.comment):
def process_autosummary_toc(app, doctree):
+ # type: (Sphinx, nodes.Node) -> None
"""Insert items described in autosummary:: to the TOC tree, but do
not generate the toctree:: list.
"""
@@ -93,7 +105,7 @@ def process_autosummary_toc(app, doctree):
try:
if (isinstance(subnode, autosummary_toc) and
isinstance(subnode[0], addnodes.toctree)):
- env.note_toctree(env.docname, subnode[0])
+ TocTree(env).note(env.docname, subnode[0])
continue
except IndexError:
continue
@@ -105,11 +117,13 @@ def process_autosummary_toc(app, doctree):
def autosummary_toc_visit_html(self, node):
+ # type: (nodes.NodeVisitor, autosummary_toc) -> None
"""Hide autosummary toctree list in HTML output."""
raise nodes.SkipNode
def autosummary_noop(self, node):
+ # type: (nodes.NodeVisitor, nodes.Node) -> None
pass
@@ -120,6 +134,7 @@ class autosummary_table(nodes.comment):
def autosummary_table_visit_html(self, node):
+ # type: (nodes.NodeVisitor, autosummary_table) -> None
"""Make the first column of the table non-breaking."""
try:
tbody = node[0][0][-1]
@@ -138,11 +153,12 @@ def autosummary_table_visit_html(self, node):
# -- autodoc integration -------------------------------------------------------
class FakeDirective(object):
- env = {}
+ env = {} # type: Dict
genopt = Options()
def get_documenter(obj, parent):
+ # type: (Any, Any) -> Type[Documenter]
"""Get an autodoc.Documenter class suitable for documenting the given
object.
@@ -198,13 +214,15 @@ class Autosummary(Directive):
}
def warn(self, msg):
+ # type: (unicode) -> None
self.warnings.append(self.state.document.reporter.warning(
msg, line=self.lineno))
def run(self):
+ # type: () -> List[nodes.Node]
self.env = env = self.state.document.settings.env
self.genopt = Options()
- self.warnings = []
+ self.warnings = [] # type: List[nodes.Node]
self.result = ViewList()
names = [x.strip().split()[0] for x in self.content
@@ -237,6 +255,7 @@ class Autosummary(Directive):
return self.warnings + nodes
def get_items(self, names):
+ # type: (List[unicode]) -> List[Tuple[unicode, unicode, unicode, unicode]]
"""Try to import the given names, and return a list of
``[(name, signature, summary_string, real_name), ...]``.
"""
@@ -244,7 +263,7 @@ class Autosummary(Directive):
prefixes = get_import_prefixes_from_env(env)
- items = []
+ items = [] # type: List[Tuple[unicode, unicode, unicode, unicode]]
max_item_chars = 50
@@ -289,8 +308,7 @@ class Autosummary(Directive):
# be cached anyway)
documenter.analyzer.find_attr_docs()
except PycodeError as err:
- documenter.env.app.debug(
- '[autodoc] module analyzer failed: %s', err)
+ logger.debug('[autodoc] module analyzer failed: %s', err)
# no source file -- e.g. for builtin and C modules
documenter.analyzer = None
@@ -333,12 +351,13 @@ class Autosummary(Directive):
return items
def get_table(self, items):
+ # type: (List[Tuple[unicode, unicode, unicode, unicode]]) -> List[Union[addnodes.tabular_col_spec, autosummary_table]] # NOQA
"""Generate a proper list of table nodes for autosummary:: directive.
*items* is a list produced by :meth:`get_items`.
"""
table_spec = addnodes.tabular_col_spec()
- table_spec['spec'] = 'p{0.5\linewidth}p{0.5\linewidth}'
+ table_spec['spec'] = r'p{0.5\linewidth}p{0.5\linewidth}'
table = autosummary_table('')
real_table = nodes.table('', classes=['longtable'])
@@ -351,6 +370,7 @@ class Autosummary(Directive):
group.append(body)
def append_row(*column_texts):
+ # type: (unicode) -> None
row = nodes.row('')
for text in column_texts:
node = nodes.paragraph('')
@@ -368,7 +388,7 @@ class Autosummary(Directive):
for name, sig, summary, real_name in items:
qualifier = 'obj'
if 'nosignatures' not in self.options:
- col1 = ':%s:`%s <%s>`\ %s' % (qualifier, name, real_name, rst.escape(sig))
+ col1 = ':%s:`%s <%s>`\\ %s' % (qualifier, name, real_name, rst.escape(sig)) # type: unicode # NOQA
else:
col1 = ':%s:`%s <%s>`' % (qualifier, name, real_name)
col2 = summary
@@ -378,6 +398,7 @@ class Autosummary(Directive):
def mangle_signature(sig, max_chars=30):
+ # type: (unicode, int) -> unicode
"""Reformat a function signature to a more compact form."""
s = re.sub(r"^\((.*)\)$", r"\1", sig).strip()
@@ -387,12 +408,12 @@ def mangle_signature(sig, max_chars=30):
s = re.sub(r"'[^']*'", "", s)
# Parse the signature to arguments + options
- args = []
- opts = []
+ args = [] # type: List[unicode]
+ opts = [] # type: List[unicode]
opt_re = re.compile(r"^(.*, |)([a-zA-Z0-9_*]+)=")
while s:
- m = opt_re.search(s)
+ m = opt_re.search(s) # type: ignore
if not m:
# The rest are arguments
args = s.split(', ')
@@ -414,6 +435,7 @@ def mangle_signature(sig, max_chars=30):
def limited_join(sep, items, max_chars=30, overflow_marker="..."):
+ # type: (unicode, List[unicode], int, unicode) -> unicode
"""Join a number of strings to one, limiting the length to *max_chars*.
If the string overflows this limit, replace the last fitting item by
@@ -440,11 +462,12 @@ def limited_join(sep, items, max_chars=30, overflow_marker="..."):
# -- Importing items -----------------------------------------------------------
def get_import_prefixes_from_env(env):
+ # type: (BuildEnvironment) -> List
"""
Obtain current Python import prefixes (for `import_by_name`)
from ``document.env``
"""
- prefixes = [None]
+ prefixes = [None] # type: List
currmodule = env.ref_context.get('py:module')
if currmodule:
@@ -461,6 +484,7 @@ def get_import_prefixes_from_env(env):
def import_by_name(name, prefixes=[None]):
+ # type: (unicode, List) -> Tuple[unicode, Any, Any, unicode]
"""Import a Python object that has the given *name*, under one of the
*prefixes*. The first name that succeeds is used.
"""
@@ -479,6 +503,7 @@ def import_by_name(name, prefixes=[None]):
def _import_by_name(name):
+ # type: (str) -> Tuple[Any, Any, unicode]
"""Import a Python object given its full name."""
try:
name_parts = name.split('.')
@@ -521,14 +546,15 @@ def _import_by_name(name):
# -- :autolink: (smart default role) -------------------------------------------
-def autolink_role(typ, rawtext, etext, lineno, inliner,
- options={}, content=[]):
+def autolink_role(typ, rawtext, etext, lineno, inliner, options={}, content=[]):
+ # type: (unicode, unicode, unicode, int, Inliner, Dict, List[unicode]) -> Tuple[List[nodes.Node], List[nodes.Node]] # NOQA
"""Smart linking role.
Expands to ':obj:`text`' if `text` is an object that can be imported;
otherwise expands to '*text*'.
"""
env = inliner.document.settings.env
+ r = None # type: Tuple[List[nodes.Node], List[nodes.Node]]
r = env.get_domain('py').role('obj')(
'obj', rawtext, etext, lineno, inliner, options, content)
pnode = r[0][0]
@@ -537,21 +563,24 @@ def autolink_role(typ, rawtext, etext, lineno, inliner,
try:
name, obj, parent, modname = import_by_name(pnode['reftarget'], prefixes)
except ImportError:
- content = pnode[0]
- r[0][0] = nodes.emphasis(rawtext, content[0].astext(),
- classes=content['classes'])
+ content_node = pnode[0]
+ r[0][0] = nodes.emphasis(rawtext, content_node[0].astext(),
+ classes=content_node['classes'])
return r
def get_rst_suffix(app):
+ # type: (Sphinx) -> unicode
def get_supported_format(suffix):
+ # type: (unicode) -> Tuple[unicode]
parser_class = app.config.source_parsers.get(suffix)
if parser_class is None:
return ('restructuredtext',)
if isinstance(parser_class, string_types):
- parser_class = import_object(parser_class, 'source parser')
+ parser_class = import_object(parser_class, 'source parser') # type: ignore
return parser_class.supported
+ suffix = None # type: unicode
for suffix in app.config.source_suffix:
if 'restructuredtext' in get_supported_format(suffix):
return suffix
@@ -560,6 +589,7 @@ def get_rst_suffix(app):
def process_generate_options(app):
+ # type: (Sphinx) -> None
genfiles = app.config.autosummary_generate
if genfiles and not hasattr(genfiles, '__len__'):
@@ -578,16 +608,17 @@ def process_generate_options(app):
suffix = get_rst_suffix(app)
if suffix is None:
- app.warn('autosummary generats .rst files internally. '
- 'But your source_suffix does not contain .rst. Skipped.')
+ logger.warning('autosummary generats .rst files internally. '
+ 'But your source_suffix does not contain .rst. Skipped.')
return
generate_autosummary_docs(genfiles, builder=app.builder,
- warn=app.warn, info=app.info, suffix=suffix,
- base_path=app.srcdir)
+ warn=logger.warning, info=logger.info,
+ suffix=suffix, base_path=app.srcdir)
def setup(app):
+ # type: (Sphinx) -> Dict[unicode, Any]
# I need autodoc
app.setup_extension('sphinx.ext.autodoc')
app.add_node(autosummary_toc,
diff --git a/sphinx/ext/autosummary/generate.py b/sphinx/ext/autosummary/generate.py
index 173c9ab08..2557f5b22 100644
--- a/sphinx/ext/autosummary/generate.py
+++ b/sphinx/ext/autosummary/generate.py
@@ -49,8 +49,17 @@ add_documenter(MethodDocumenter)
add_documenter(AttributeDocumenter)
add_documenter(InstanceAttributeDocumenter)
+if False:
+ # For type annotation
+ from typing import Any, Callable, Dict, Tuple, List # NOQA
+ from jinja2 import BaseLoader # NOQA
+ from sphinx import addnodes # NOQA
+ from sphinx.builders import Builder # NOQA
+ from sphinx.environment import BuildEnvironment # NOQA
+
def main(argv=sys.argv):
+ # type: (List[str]) -> None
usage = """%prog [OPTIONS] SOURCEFILE ..."""
p = optparse.OptionParser(usage.strip())
p.add_option("-o", "--output-dir", action="store", type="string",
@@ -62,6 +71,9 @@ def main(argv=sys.argv):
p.add_option("-t", "--templates", action="store", type="string",
dest="templates", default=None,
help="Custom template directory (default: %default)")
+ p.add_option("-i", "--imported-members", action="store_true",
+ dest="imported_members", default=False,
+ help="Document imported members (default: %default)")
options, args = p.parse_args(argv[1:])
if len(args) < 1:
@@ -69,14 +81,17 @@ def main(argv=sys.argv):
generate_autosummary_docs(args, options.output_dir,
"." + options.suffix,
- template_dir=options.templates)
+ template_dir=options.templates,
+ imported_members=options.imported_members)
def _simple_info(msg):
+ # type: (unicode) -> None
print(msg)
def _simple_warn(msg):
+ # type: (unicode) -> None
print('WARNING: ' + msg, file=sys.stderr)
@@ -84,7 +99,9 @@ def _simple_warn(msg):
def generate_autosummary_docs(sources, output_dir=None, suffix='.rst',
warn=_simple_warn, info=_simple_info,
- base_path=None, builder=None, template_dir=None):
+ base_path=None, builder=None, template_dir=None,
+ imported_members=False):
+ # type: (List[unicode], unicode, unicode, Callable, Callable, unicode, Builder, unicode, bool) -> None # NOQA
showed_sources = list(sorted(sources))
if len(showed_sources) > 20:
@@ -99,8 +116,11 @@ def generate_autosummary_docs(sources, output_dir=None, suffix='.rst',
sources = [os.path.join(base_path, filename) for filename in sources]
# create our own templating environment
+ template_dirs = None # type: List[unicode]
template_dirs = [os.path.join(package_dir, 'ext',
'autosummary', 'templates')]
+
+ template_loader = None # type: BaseLoader
if builder is not None:
# allow the user to override the templates
template_loader = BuiltinTemplateLoader()
@@ -108,7 +128,7 @@ def generate_autosummary_docs(sources, output_dir=None, suffix='.rst',
else:
if template_dir:
template_dirs.insert(0, template_dir)
- template_loader = FileSystemLoader(template_dirs)
+ template_loader = FileSystemLoader(template_dirs) # type: ignore
template_env = SandboxedEnvironment(loader=template_loader)
# read
@@ -153,36 +173,38 @@ def generate_autosummary_docs(sources, output_dir=None, suffix='.rst',
except TemplateNotFound:
template = template_env.get_template('autosummary/base.rst')
- def get_members(obj, typ, include_public=[]):
- items = []
+ def get_members(obj, typ, include_public=[], imported=False):
+ # type: (Any, unicode, List[unicode], bool) -> Tuple[List[unicode], List[unicode]] # NOQA
+ items = [] # type: List[unicode]
for name in dir(obj):
try:
- documenter = get_documenter(safe_getattr(obj, name),
- obj)
+ value = safe_getattr(obj, name)
except AttributeError:
continue
+ documenter = get_documenter(value, obj)
if documenter.objtype == typ:
- items.append(name)
+ if imported or getattr(value, '__module__', None) == obj.__name__:
+ items.append(name)
public = [x for x in items
if x in include_public or not x.startswith('_')]
return public, items
- ns = {}
+ ns = {} # type: Dict[unicode, Any]
if doc.objtype == 'module':
ns['members'] = dir(obj)
ns['functions'], ns['all_functions'] = \
- get_members(obj, 'function')
+ get_members(obj, 'function', imported=imported_members)
ns['classes'], ns['all_classes'] = \
- get_members(obj, 'class')
+ get_members(obj, 'class', imported=imported_members)
ns['exceptions'], ns['all_exceptions'] = \
- get_members(obj, 'exception')
+ get_members(obj, 'exception', imported=imported_members)
elif doc.objtype == 'class':
ns['members'] = dir(obj)
ns['methods'], ns['all_methods'] = \
- get_members(obj, 'method', ['__init__'])
+ get_members(obj, 'method', ['__init__'], imported=imported_members)
ns['attributes'], ns['all_attributes'] = \
- get_members(obj, 'attribute')
+ get_members(obj, 'attribute', imported=imported_members)
parts = name.split('.')
if doc.objtype in ('method', 'attribute'):
@@ -202,7 +224,7 @@ def generate_autosummary_docs(sources, output_dir=None, suffix='.rst',
ns['underline'] = len(name) * '='
rendered = template.render(**ns)
- f.write(rendered)
+ f.write(rendered) # type: ignore
# descend recursively to new files
if new_files:
@@ -215,21 +237,23 @@ def generate_autosummary_docs(sources, output_dir=None, suffix='.rst',
# -- Finding documented entries in files ---------------------------------------
def find_autosummary_in_files(filenames):
+ # type: (List[unicode]) -> List[Tuple[unicode, unicode, unicode]]
"""Find out what items are documented in source/*.rst.
See `find_autosummary_in_lines`.
"""
- documented = []
+ documented = [] # type: List[Tuple[unicode, unicode, unicode]]
for filename in filenames:
- with codecs.open(filename, 'r', encoding='utf-8',
+ with codecs.open(filename, 'r', encoding='utf-8', # type: ignore
errors='ignore') as f:
lines = f.read().splitlines()
- documented.extend(find_autosummary_in_lines(lines,
+ documented.extend(find_autosummary_in_lines(lines, # type: ignore
filename=filename))
return documented
def find_autosummary_in_docstring(name, module=None, filename=None):
+ # type: (unicode, Any, unicode) -> List[Tuple[unicode, unicode, unicode]]
"""Find out what items are documented in the given object's docstring.
See `find_autosummary_in_lines`.
@@ -249,6 +273,7 @@ def find_autosummary_in_docstring(name, module=None, filename=None):
def find_autosummary_in_lines(lines, module=None, filename=None):
+ # type: (List[unicode], Any, unicode) -> List[Tuple[unicode, unicode, unicode]]
"""Find out what items appear in autosummary:: directives in the
given lines.
@@ -268,9 +293,9 @@ def find_autosummary_in_lines(lines, module=None, filename=None):
toctree_arg_re = re.compile(r'^\s+:toctree:\s*(.*?)\s*$')
template_arg_re = re.compile(r'^\s+:template:\s*(.*?)\s*$')
- documented = []
+ documented = [] # type: List[Tuple[unicode, unicode, unicode]]
- toctree = None
+ toctree = None # type: unicode
template = None
current_module = module
in_autosummary = False
@@ -278,7 +303,7 @@ def find_autosummary_in_lines(lines, module=None, filename=None):
for line in lines:
if in_autosummary:
- m = toctree_arg_re.match(line)
+ m = toctree_arg_re.match(line) # type: ignore
if m:
toctree = m.group(1)
if filename:
@@ -286,7 +311,7 @@ def find_autosummary_in_lines(lines, module=None, filename=None):
toctree)
continue
- m = template_arg_re.match(line)
+ m = template_arg_re.match(line) # type: ignore
if m:
template = m.group(1).strip()
continue
@@ -294,7 +319,7 @@ def find_autosummary_in_lines(lines, module=None, filename=None):
if line.strip().startswith(':'):
continue # skip options
- m = autosummary_item_re.match(line)
+ m = autosummary_item_re.match(line) # type: ignore
if m:
name = m.group(1).strip()
if name.startswith('~'):
@@ -310,7 +335,7 @@ def find_autosummary_in_lines(lines, module=None, filename=None):
in_autosummary = False
- m = autosummary_re.match(line)
+ m = autosummary_re.match(line) # type: ignore
if m:
in_autosummary = True
base_indent = m.group(1)
@@ -318,7 +343,7 @@ def find_autosummary_in_lines(lines, module=None, filename=None):
template = None
continue
- m = automodule_re.search(line)
+ m = automodule_re.search(line) # type: ignore
if m:
current_module = m.group(1).strip()
# recurse into the automodule docstring
@@ -326,7 +351,7 @@ def find_autosummary_in_lines(lines, module=None, filename=None):
current_module, filename=filename))
continue
- m = module_re.match(line)
+ m = module_re.match(line) # type: ignore
if m:
current_module = m.group(2)
continue
diff --git a/sphinx/ext/coverage.py b/sphinx/ext/coverage.py
index 3a2fd7508..02843ac83 100644
--- a/sphinx/ext/coverage.py
+++ b/sphinx/ext/coverage.py
@@ -20,22 +20,32 @@ from six.moves import cPickle as pickle
import sphinx
from sphinx.builders import Builder
+from sphinx.util import logging
from sphinx.util.inspect import safe_getattr
+if False:
+ # For type annotation
+ from typing import Any, Callable, Dict, IO, List, Pattern, Set, Tuple # NOQA
+ from sphinx.application import Sphinx # NOQA
+
+logger = logging.getLogger(__name__)
+
# utility
def write_header(f, text, char='-'):
+ # type:(IO, unicode, unicode) -> None
f.write(text + '\n')
f.write(char * len(text) + '\n')
-def compile_regex_list(name, exps, warnfunc):
+def compile_regex_list(name, exps):
+ # type: (unicode, unicode) -> List[Pattern]
lst = []
for exp in exps:
try:
lst.append(re.compile(exp))
except Exception:
- warnfunc('invalid regex %r in %s' % (exp, name))
+ logger.warning('invalid regex %r in %s', exp, name)
return lst
@@ -44,45 +54,46 @@ class CoverageBuilder(Builder):
name = 'coverage'
def init(self):
- self.c_sourcefiles = []
+ # type: () -> None
+ self.c_sourcefiles = [] # type: List[unicode]
for pattern in self.config.coverage_c_path:
pattern = path.join(self.srcdir, pattern)
self.c_sourcefiles.extend(glob.glob(pattern))
- self.c_regexes = []
+ self.c_regexes = [] # type: List[Tuple[unicode, Pattern]]
for (name, exp) in self.config.coverage_c_regexes.items():
try:
self.c_regexes.append((name, re.compile(exp)))
except Exception:
- self.warn('invalid regex %r in coverage_c_regexes' % exp)
+ logger.warning('invalid regex %r in coverage_c_regexes', exp)
- self.c_ignorexps = {}
+ self.c_ignorexps = {} # type: Dict[unicode, List[Pattern]]
for (name, exps) in iteritems(self.config.coverage_ignore_c_items):
- self.c_ignorexps[name] = compile_regex_list(
- 'coverage_ignore_c_items', exps, self.warn)
- self.mod_ignorexps = compile_regex_list(
- 'coverage_ignore_modules', self.config.coverage_ignore_modules,
- self.warn)
- self.cls_ignorexps = compile_regex_list(
- 'coverage_ignore_classes', self.config.coverage_ignore_classes,
- self.warn)
- self.fun_ignorexps = compile_regex_list(
- 'coverage_ignore_functions', self.config.coverage_ignore_functions,
- self.warn)
+ self.c_ignorexps[name] = compile_regex_list('coverage_ignore_c_items',
+ exps)
+ self.mod_ignorexps = compile_regex_list('coverage_ignore_modules',
+ self.config.coverage_ignore_modules)
+ self.cls_ignorexps = compile_regex_list('coverage_ignore_classes',
+ self.config.coverage_ignore_classes)
+ self.fun_ignorexps = compile_regex_list('coverage_ignore_functions',
+ self.config.coverage_ignore_functions)
def get_outdated_docs(self):
+ # type: () -> unicode
return 'coverage overview'
def write(self, *ignored):
- self.py_undoc = {}
+ # type: (Any) -> None
+ self.py_undoc = {} # type: Dict[unicode, Dict[unicode, Any]]
self.build_py_coverage()
self.write_py_coverage()
- self.c_undoc = {}
+ self.c_undoc = {} # type: Dict[unicode, Set[Tuple[unicode, unicode]]]
self.build_c_coverage()
self.write_c_coverage()
def build_c_coverage(self):
+ # type: () -> None
# Fetch all the info from the header files
c_objects = self.env.domaindata['c']['objects']
for filename in self.c_sourcefiles:
@@ -94,7 +105,7 @@ class CoverageBuilder(Builder):
if match:
name = match.groups()[0]
if name not in c_objects:
- for exp in self.c_ignorexps.get(key, ()):
+ for exp in self.c_ignorexps.get(key, []):
if exp.match(name):
break
else:
@@ -104,6 +115,7 @@ class CoverageBuilder(Builder):
self.c_undoc[filename] = undoc
def write_c_coverage(self):
+ # type: () -> None
output_file = path.join(self.outdir, 'c.txt')
with open(output_file, 'w') as op:
if self.config.coverage_write_headline:
@@ -117,6 +129,7 @@ class CoverageBuilder(Builder):
op.write('\n')
def build_py_coverage(self):
+ # type: () -> None
objects = self.env.domaindata['py']['objects']
modules = self.env.domaindata['py']['modules']
@@ -134,13 +147,12 @@ class CoverageBuilder(Builder):
try:
mod = __import__(mod_name, fromlist=['foo'])
except ImportError as err:
- self.warn('module %s could not be imported: %s' %
- (mod_name, err))
+ logger.warning('module %s could not be imported: %s', mod_name, err)
self.py_undoc[mod_name] = {'error': err}
continue
funcs = []
- classes = {}
+ classes = {} # type: Dict[unicode, List[unicode]]
for name, obj in inspect.getmembers(mod):
# diverse module attributes are ignored:
@@ -177,7 +189,7 @@ class CoverageBuilder(Builder):
classes[name] = []
continue
- attrs = []
+ attrs = [] # type: List[unicode]
for attr_name in dir(obj):
if attr_name not in obj.__dict__:
@@ -207,6 +219,7 @@ class CoverageBuilder(Builder):
self.py_undoc[mod_name] = {'funcs': funcs, 'classes': classes}
def write_py_coverage(self):
+ # type: () -> None
output_file = path.join(self.outdir, 'python.txt')
failed = []
with open(output_file, 'w') as op:
@@ -242,6 +255,7 @@ class CoverageBuilder(Builder):
op.writelines(' * %s -- %s\n' % x for x in failed)
def finish(self):
+ # type: () -> None
# dump the coverage data to a pickle file too
picklepath = path.join(self.outdir, 'undoc.pickle')
with open(picklepath, 'wb') as dumpfile:
@@ -249,6 +263,7 @@ class CoverageBuilder(Builder):
def setup(app):
+ # type: (Sphinx) -> Dict[unicode, Any]
app.add_builder(CoverageBuilder)
app.add_config_value('coverage_ignore_modules', [], False)
app.add_config_value('coverage_ignore_functions', [], False)
diff --git a/sphinx/ext/doctest.py b/sphinx/ext/doctest.py
index 1904ebdbb..27672ff43 100644
--- a/sphinx/ext/doctest.py
+++ b/sphinx/ext/doctest.py
@@ -15,26 +15,37 @@ import re
import sys
import time
import codecs
+import platform
from os import path
import doctest
from six import itervalues, StringIO, binary_type, text_type, PY2
+from distutils.version import LooseVersion
+
from docutils import nodes
-from docutils.parsers.rst import directives
+from docutils.parsers.rst import Directive, directives
import sphinx
from sphinx.builders import Builder
-from sphinx.util import force_decode
+from sphinx.util import force_decode, logging
from sphinx.util.nodes import set_source_info
-from sphinx.util.compat import Directive
-from sphinx.util.console import bold
+from sphinx.util.console import bold # type: ignore
from sphinx.util.osutil import fs_encoding
+from sphinx.locale import _
+
+if False:
+ # For type annotation
+ from typing import Any, Callable, Dict, IO, Iterable, List, Sequence, Set, Tuple # NOQA
+ from sphinx.application import Sphinx # NOQA
+
+logger = logging.getLogger(__name__)
blankline_re = re.compile(r'^\s*<BLANKLINE>', re.MULTILINE)
doctestopt_re = re.compile(r'#\s*doctest:.+$', re.MULTILINE)
if PY2:
def doctest_encode(text, encoding):
+ # type: (str, unicode) -> unicode
if isinstance(text, text_type):
text = text.encode(encoding)
if text.startswith(codecs.BOM_UTF8):
@@ -42,9 +53,34 @@ if PY2:
return text
else:
def doctest_encode(text, encoding):
+ # type: (unicode, unicode) -> unicode
return text
+def compare_version(ver1, ver2, operand):
+ # type: (unicode, unicode, unicode) -> bool
+ """Compare `ver1` to `ver2`, relying on `operand`.
+
+ Some examples:
+
+ >>> compare_version('3.3', '3.5', '<=')
+ True
+ >>> compare_version('3.3', '3.2', '<=')
+ False
+ >>> compare_version('3.3a0', '3.3', '<=')
+ True
+ """
+ if operand not in ('<=', '<', '==', '>=', '>'):
+ raise ValueError("'%s' is not a valid operand.")
+ v1 = LooseVersion(ver1)
+ v2 = LooseVersion(ver2)
+ return ((operand == '<=' and (v1 <= v2)) or
+ (operand == '<' and (v1 < v2)) or
+ (operand == '==' and (v1 == v2)) or
+ (operand == '>=' and (v1 >= v2)) or
+ (operand == '>' and (v1 > v2)))
+
+
# set up the necessary directives
class TestDirective(Directive):
@@ -58,6 +94,7 @@ class TestDirective(Directive):
final_argument_whitespace = True
def run(self):
+ # type: () -> List[nodes.Node]
# use ordinary docutils nodes for test code: they get special attributes
# so that our builder recognizes them, and the other builders are happy.
code = '\n'.join(self.content)
@@ -91,33 +128,55 @@ class TestDirective(Directive):
# parse doctest-like output comparison flags
option_strings = self.options['options'].replace(',', ' ').split()
for option in option_strings:
- if (option[0] not in '+-' or option[1:] not in
- doctest.OPTIONFLAGS_BY_NAME):
- # XXX warn?
+ prefix, option_name = option[0], option[1:]
+ if prefix not in '+-':
+ self.state.document.reporter.warning(
+ _("missing '+' or '-' in '%s' option.") % option,
+ line=self.lineno)
continue
- flag = doctest.OPTIONFLAGS_BY_NAME[option[1:]]
+ if option_name not in doctest.OPTIONFLAGS_BY_NAME: # type: ignore
+ self.state.document.reporter.warning(
+ _("'%s' is not a valid option.") % option_name,
+ line=self.lineno)
+ continue
+ flag = doctest.OPTIONFLAGS_BY_NAME[option[1:]] # type: ignore
node['options'][flag] = (option[0] == '+')
+ if self.name == 'doctest' and 'pyversion' in self.options:
+ try:
+ option = self.options['pyversion']
+ # :pyversion: >= 3.6 --> operand='>=', option_version='3.6'
+ operand, option_version = [item.strip() for item in option.split()]
+ running_version = platform.python_version()
+ if not compare_version(running_version, option_version, operand):
+ flag = doctest.OPTIONFLAGS_BY_NAME['SKIP'] # type: ignore
+ node['options'][flag] = True # Skip the test
+ except ValueError:
+ self.state.document.reporter.warning(
+ _("'%s' is not a valid pyversion option") % option,
+ line=self.lineno)
return [node]
class TestsetupDirective(TestDirective):
- option_spec = {}
+ option_spec = {} # type: Dict
class TestcleanupDirective(TestDirective):
- option_spec = {}
+ option_spec = {} # type: Dict
class DoctestDirective(TestDirective):
option_spec = {
'hide': directives.flag,
'options': directives.unchanged,
+ 'pyversion': directives.unchanged_required,
}
class TestcodeDirective(TestDirective):
option_spec = {
'hide': directives.flag,
+ 'pyversion': directives.unchanged_required,
}
@@ -125,22 +184,25 @@ class TestoutputDirective(TestDirective):
option_spec = {
'hide': directives.flag,
'options': directives.unchanged,
+ 'pyversion': directives.unchanged_required,
}
-parser = doctest.DocTestParser()
+parser = doctest.DocTestParser() # type: ignore
# helper classes
class TestGroup(object):
def __init__(self, name):
+ # type: (unicode) -> None
self.name = name
- self.setup = []
- self.tests = []
- self.cleanup = []
+ self.setup = [] # type: List[TestCode]
+ self.tests = [] # type: List[List[TestCode]]
+ self.cleanup = [] # type: List[TestCode]
def add_code(self, code, prepend=False):
+ # type: (TestCode, bool) -> None
if code.type == 'testsetup':
if prepend:
self.setup.insert(0, code)
@@ -158,30 +220,34 @@ class TestGroup(object):
else:
raise RuntimeError('invalid TestCode type')
- def __repr__(self):
+ def __repr__(self): # type: ignore
+ # type: () -> unicode
return 'TestGroup(name=%r, setup=%r, cleanup=%r, tests=%r)' % (
self.name, self.setup, self.cleanup, self.tests)
class TestCode(object):
def __init__(self, code, type, lineno, options=None):
+ # type: (unicode, unicode, int, Dict) -> None
self.code = code
self.type = type
self.lineno = lineno
self.options = options or {}
- def __repr__(self):
+ def __repr__(self): # type: ignore
+ # type: () -> unicode
return 'TestCode(%r, %r, %r, options=%r)' % (
self.code, self.type, self.lineno, self.options)
-class SphinxDocTestRunner(doctest.DocTestRunner):
+class SphinxDocTestRunner(doctest.DocTestRunner): # type: ignore
def summarize(self, out, verbose=None):
+ # type: (Callable, bool) -> Tuple[int, int]
string_io = StringIO()
old_stdout = sys.stdout
sys.stdout = string_io
try:
- res = doctest.DocTestRunner.summarize(self, verbose)
+ res = doctest.DocTestRunner.summarize(self, verbose) # type: ignore
finally:
sys.stdout = old_stdout
out(string_io.getvalue())
@@ -189,6 +255,7 @@ class SphinxDocTestRunner(doctest.DocTestRunner):
def _DocTestRunner__patched_linecache_getlines(self, filename,
module_globals=None):
+ # type: (unicode, Any) -> Any
# this is overridden from DocTestRunner adding the try-except below
m = self._DocTestRunner__LINECACHE_FILENAME_RE.match(filename)
if m and m.group('name') == self.test.name:
@@ -213,6 +280,7 @@ class DocTestBuilder(Builder):
name = 'doctest'
def init(self):
+ # type: () -> None
# default options
self.opt = self.config.doctest_default_flags
@@ -221,7 +289,7 @@ class DocTestBuilder(Builder):
# for doctest examples but unusable for multi-statement code such
# as setup code -- to be able to use doctest error reporting with
# that code nevertheless, we monkey-patch the "compile" it uses.
- doctest.compile = self.compile
+ doctest.compile = self.compile # type: ignore
sys.path[0:0] = self.config.doctest_path
@@ -236,35 +304,41 @@ class DocTestBuilder(Builder):
date = time.strftime('%Y-%m-%d %H:%M:%S')
- self.outfile = codecs.open(path.join(self.outdir, 'output.txt'),
+ self.outfile = None # type: IO
+ self.outfile = codecs.open(path.join(self.outdir, 'output.txt'), # type: ignore
'w', encoding='utf-8')
- self.outfile.write('''\
-Results of doctest builder run on %s
-==================================%s
-''' % (date, '=' * len(date)))
+ self.outfile.write(('Results of doctest builder run on %s\n' # type: ignore
+ '==================================%s\n') %
+ (date, '=' * len(date)))
def _out(self, text):
- self.info(text, nonl=True)
+ # type: (unicode) -> None
+ logger.info(text, nonl=True)
self.outfile.write(text)
def _warn_out(self, text):
+ # type: (unicode) -> None
if self.app.quiet or self.app.warningiserror:
- self.warn(text)
+ logger.warning(text)
else:
- self.info(text, nonl=True)
+ logger.info(text, nonl=True)
if isinstance(text, binary_type):
text = force_decode(text, None)
self.outfile.write(text)
def get_target_uri(self, docname, typ=None):
+ # type: (unicode, unicode) -> unicode
return ''
def get_outdated_docs(self):
+ # type: () -> Set[unicode]
return self.env.found_docs
def finish(self):
+ # type: () -> None
# write executive summary
def s(v):
+ # type: (int) -> unicode
return v != 1 and 's' or ''
repl = (self.total_tries, s(self.total_tries),
self.total_failures, s(self.total_failures),
@@ -284,17 +358,19 @@ Doctest summary
self.app.statuscode = 1
def write(self, build_docnames, updated_docnames, method='update'):
+ # type: (Iterable[unicode], Sequence[unicode], unicode) -> None
if build_docnames is None:
build_docnames = sorted(self.env.all_docs)
- self.info(bold('running tests...'))
+ logger.info(bold('running tests...'))
for docname in build_docnames:
# no need to resolve the doctree
doctree = self.env.get_doctree(docname)
self.test_doc(docname, doctree)
def test_doc(self, docname, doctree):
- groups = {}
+ # type: (unicode, nodes.Node) -> None
+ groups = {} # type: Dict[unicode, TestGroup]
add_to_all_groups = []
self.setup_runner = SphinxDocTestRunner(verbose=False,
optionflags=self.opt)
@@ -308,19 +384,21 @@ Doctest summary
if self.config.doctest_test_doctest_blocks:
def condition(node):
+ # type: (nodes.Node) -> bool
return (isinstance(node, (nodes.literal_block, nodes.comment)) and
'testnodetype' in node) or \
isinstance(node, nodes.doctest_block)
else:
def condition(node):
+ # type: (nodes.Node) -> bool
return isinstance(node, (nodes.literal_block, nodes.comment)) \
and 'testnodetype' in node
for node in doctree.traverse(condition):
source = 'test' in node and node['test'] or node.astext()
if not source:
- self.warn('no code/output in %s block at %s:%s' %
- (node.get('testnodetype', 'doctest'),
- self.env.doc2path(docname), node.line))
+ logger.warning('no code/output in %s block at %s:%s',
+ node.get('testnodetype', 'doctest'),
+ self.env.doc2path(docname), node.line)
code = TestCode(source, type=node.get('testnodetype', 'doctest'),
lineno=node.line, options=node.get('options'))
node_groups = node.get('groups', ['default'])
@@ -366,26 +444,29 @@ Doctest summary
self.cleanup_tries += res_t
def compile(self, code, name, type, flags, dont_inherit):
+ # type: (unicode, unicode, unicode, Any, bool) -> Any
return compile(code, name, self.type, flags, dont_inherit)
def test_group(self, group, filename):
+ # type: (TestGroup, unicode) -> None
if PY2:
filename_str = filename.encode(fs_encoding)
else:
filename_str = filename
- ns = {}
+ ns = {} # type: Dict
def run_setup_cleanup(runner, testcodes, what):
+ # type: (Any, List[TestCode], Any) -> bool
examples = []
for testcode in testcodes:
- examples.append(doctest.Example(
- doctest_encode(testcode.code, self.env.config.source_encoding), '',
+ examples.append(doctest.Example( # type: ignore
+ doctest_encode(testcode.code, self.env.config.source_encoding), '', # type: ignore # NOQA
lineno=testcode.lineno))
if not examples:
return True
# simulate a doctest with the code
- sim_doctest = doctest.DocTest(examples, {},
+ sim_doctest = doctest.DocTest(examples, {}, # type: ignore
'%s (%s code)' % (group.name, what),
filename_str, 0, None)
sim_doctest.globs = ns
@@ -407,12 +488,11 @@ Doctest summary
# ordinary doctests (code/output interleaved)
try:
test = parser.get_doctest(
- doctest_encode(code[0].code, self.env.config.source_encoding), {},
+ doctest_encode(code[0].code, self.env.config.source_encoding), {}, # type: ignore # NOQA
group.name, filename_str, code[0].lineno)
except Exception:
- self.warn('ignoring invalid doctest code: %r' %
- code[0].code,
- '%s:%s' % (filename, code[0].lineno))
+ logger.warning('ignoring invalid doctest code: %r', code[0].code,
+ location=(filename, code[0].lineno))
continue
if not test.examples:
continue
@@ -427,19 +507,19 @@ Doctest summary
output = code[1] and code[1].code or ''
options = code[1] and code[1].options or {}
# disable <BLANKLINE> processing as it is not needed
- options[doctest.DONT_ACCEPT_BLANKLINE] = True
+ options[doctest.DONT_ACCEPT_BLANKLINE] = True # type: ignore
# find out if we're testing an exception
m = parser._EXCEPTION_RE.match(output)
if m:
exc_msg = m.group('msg')
else:
exc_msg = None
- example = doctest.Example(
- doctest_encode(code[0].code, self.env.config.source_encoding), output,
+ example = doctest.Example( # type: ignore
+ doctest_encode(code[0].code, self.env.config.source_encoding), output, # type: ignore # NOQA
exc_msg=exc_msg,
lineno=code[0].lineno,
options=options)
- test = doctest.DocTest([example], {}, group.name,
+ test = doctest.DocTest([example], {}, group.name, # type: ignore
filename_str, code[0].lineno, None)
self.type = 'exec' # multiple statements again
# DocTest.__init__ copies the globs namespace, which we don't want
@@ -452,6 +532,7 @@ Doctest summary
def setup(app):
+ # type: (Sphinx) -> Dict[unicode, Any]
app.add_directive('testsetup', TestsetupDirective)
app.add_directive('testcleanup', TestcleanupDirective)
app.add_directive('doctest', DoctestDirective)
@@ -465,6 +546,6 @@ def setup(app):
app.add_config_value('doctest_global_cleanup', '', False)
app.add_config_value(
'doctest_default_flags',
- doctest.DONT_ACCEPT_TRUE_FOR_1 | doctest.ELLIPSIS | doctest.IGNORE_EXCEPTION_DETAIL,
+ doctest.DONT_ACCEPT_TRUE_FOR_1 | doctest.ELLIPSIS | doctest.IGNORE_EXCEPTION_DETAIL, # type: ignore # NOQA
False)
return {'version': sphinx.__display_version__, 'parallel_read_safe': True}
diff --git a/sphinx/ext/graphviz.py b/sphinx/ext/graphviz.py
index f08636354..0b1b00d85 100644
--- a/sphinx/ext/graphviz.py
+++ b/sphinx/ext/graphviz.py
@@ -18,16 +18,24 @@ from subprocess import Popen, PIPE
from hashlib import sha1
from six import text_type
+
from docutils import nodes
-from docutils.parsers.rst import directives
+from docutils.parsers.rst import Directive, directives
from docutils.statemachine import ViewList
import sphinx
from sphinx.errors import SphinxError
from sphinx.locale import _
+from sphinx.util import logging
from sphinx.util.i18n import search_image_for_language
from sphinx.util.osutil import ensuredir, ENOENT, EPIPE, EINVAL
-from sphinx.util.compat import Directive
+
+if False:
+ # For type annotation
+ from typing import Any, Dict, List, Tuple # NOQA
+ from sphinx.application import Sphinx # NOQA
+
+logger = logging.getLogger(__name__)
mapname_re = re.compile(r'<map id="(.*?)"')
@@ -42,6 +50,7 @@ class graphviz(nodes.General, nodes.Inline, nodes.Element):
def figure_wrapper(directive, node, caption):
+ # type: (Directive, nodes.Node, unicode) -> nodes.figure
figure_node = nodes.figure('', node)
if 'align' in node:
figure_node['align'] = node.attributes.pop('align')
@@ -58,6 +67,7 @@ def figure_wrapper(directive, node, caption):
def align_spec(argument):
+ # type: (Any) -> bool
return directives.choice(argument, ('left', 'center', 'right'))
@@ -72,12 +82,13 @@ class Graphviz(Directive):
option_spec = {
'alt': directives.unchanged,
'align': align_spec,
- 'inline': directives.flag,
'caption': directives.unchanged,
'graphviz_dot': directives.unchanged,
+ 'name': directives.unchanged,
}
def run(self):
+ # type: () -> List[nodes.Node]
if self.arguments:
document = self.state.document
if self.content:
@@ -110,13 +121,12 @@ class Graphviz(Directive):
node['alt'] = self.options['alt']
if 'align' in self.options:
node['align'] = self.options['align']
- if 'inline' in self.options:
- node['inline'] = True
caption = self.options.get('caption')
if caption:
node = figure_wrapper(self, node, caption)
+ self.add_name(node)
return [node]
@@ -131,12 +141,13 @@ class GraphvizSimple(Directive):
option_spec = {
'alt': directives.unchanged,
'align': align_spec,
- 'inline': directives.flag,
'caption': directives.unchanged,
'graphviz_dot': directives.unchanged,
+ 'name': directives.unchanged,
}
def run(self):
+ # type: () -> List[nodes.Node]
node = graphviz()
node['code'] = '%s %s {\n%s\n}\n' % \
(self.name, self.arguments[0], '\n'.join(self.content))
@@ -147,17 +158,17 @@ class GraphvizSimple(Directive):
node['alt'] = self.options['alt']
if 'align' in self.options:
node['align'] = self.options['align']
- if 'inline' in self.options:
- node['inline'] = True
caption = self.options.get('caption')
if caption:
node = figure_wrapper(self, node, caption)
+ self.add_name(node)
return [node]
def render_dot(self, code, options, format, prefix='graphviz'):
+ # type: (nodes.NodeVisitor, unicode, Dict, unicode, unicode) -> Tuple[unicode, unicode]
"""Render graphviz code into a PNG or PDF output file."""
graphviz_dot = options.get('graphviz_dot', self.builder.config.graphviz_dot)
hashkey = (code + str(options) + str(graphviz_dot) +
@@ -190,8 +201,8 @@ def render_dot(self, code, options, format, prefix='graphviz'):
except OSError as err:
if err.errno != ENOENT: # No such file or directory
raise
- self.builder.warn('dot command %r cannot be run (needed for graphviz '
- 'output), check the graphviz_dot setting' % graphviz_dot)
+ logger.warning('dot command %r cannot be run (needed for graphviz '
+ 'output), check the graphviz_dot setting', graphviz_dot)
if not hasattr(self.builder, '_graphviz_warned_dot'):
self.builder._graphviz_warned_dot = {}
self.builder._graphviz_warned_dot[graphviz_dot] = True
@@ -216,17 +227,9 @@ def render_dot(self, code, options, format, prefix='graphviz'):
return relfn, outfn
-def warn_for_deprecated_option(self, node):
- if hasattr(self.builder, '_graphviz_warned_inline'):
- return
-
- if 'inline' in node:
- self.builder.warn(':inline: option for graphviz is deprecated since version 1.4.0.')
- self.builder._graphviz_warned_inline = True
-
-
def render_dot_html(self, node, code, options, prefix='graphviz',
imgcls=None, alt=None):
+ # type: (nodes.NodeVisitor, graphviz, unicode, Dict, unicode, unicode, unicode) -> Tuple[unicode, unicode] # NOQA
format = self.builder.config.graphviz_output_format
try:
if format not in ('png', 'svg'):
@@ -234,7 +237,7 @@ def render_dot_html(self, node, code, options, prefix='graphviz',
"'svg', but is %r" % format)
fname, outfn = render_dot(self, code, options, format, prefix)
except GraphvizError as exc:
- self.builder.warn('dot code %r: ' % code + str(exc))
+ logger.warning('dot code %r: ' % code + str(exc))
raise nodes.SkipNode
if fname is None:
@@ -259,7 +262,7 @@ def render_dot_html(self, node, code, options, prefix='graphviz',
(fname, alt, imgcss))
else:
# has a map: get the name of the map and connect the parts
- mapname = mapname_re.match(imgmap[0].decode('utf-8')).group(1)
+ mapname = mapname_re.match(imgmap[0].decode('utf-8')).group(1) # type: ignore
self.body.append('<img src="%s" alt="%s" usemap="#%s" %s/>\n' %
(fname, alt, mapname, imgcss))
self.body.extend([item.decode('utf-8') for item in imgmap])
@@ -270,15 +273,16 @@ def render_dot_html(self, node, code, options, prefix='graphviz',
def html_visit_graphviz(self, node):
- warn_for_deprecated_option(self, node)
+ # type: (nodes.NodeVisitor, graphviz) -> None
render_dot_html(self, node, node['code'], node['options'])
def render_dot_latex(self, node, code, options, prefix='graphviz'):
+ # type: (nodes.NodeVisitor, graphviz, unicode, Dict, unicode) -> None
try:
fname, outfn = render_dot(self, code, options, 'pdf', prefix)
except GraphvizError as exc:
- self.builder.warn('dot code %r: ' % code + str(exc))
+ logger.warning('dot code %r: ' % code + str(exc))
raise nodes.SkipNode
is_inline = self.is_inline(node)
@@ -288,7 +292,7 @@ def render_dot_latex(self, node, code, options, prefix='graphviz'):
para_separator = '\n'
if fname is not None:
- post = None
+ post = None # type: unicode
if not is_inline and 'align' in node:
if node['align'] == 'left':
self.body.append('{')
@@ -305,15 +309,16 @@ def render_dot_latex(self, node, code, options, prefix='graphviz'):
def latex_visit_graphviz(self, node):
- warn_for_deprecated_option(self, node)
+ # type: (nodes.NodeVisitor, graphviz) -> None
render_dot_latex(self, node, node['code'], node['options'])
def render_dot_texinfo(self, node, code, options, prefix='graphviz'):
+ # type: (nodes.NodeVisitor, graphviz, unicode, Dict, unicode) -> None
try:
fname, outfn = render_dot(self, code, options, 'png', prefix)
except GraphvizError as exc:
- self.builder.warn('dot code %r: ' % code + str(exc))
+ logger.warning('dot code %r: ' % code + str(exc))
raise nodes.SkipNode
if fname is not None:
self.body.append('@image{%s,,,[graphviz],png}\n' % fname[:-4])
@@ -321,12 +326,12 @@ def render_dot_texinfo(self, node, code, options, prefix='graphviz'):
def texinfo_visit_graphviz(self, node):
- warn_for_deprecated_option(self, node)
+ # type: (nodes.NodeVisitor, graphviz) -> None
render_dot_texinfo(self, node, node['code'], node['options'])
def text_visit_graphviz(self, node):
- warn_for_deprecated_option(self, node)
+ # type: (nodes.NodeVisitor, graphviz) -> None
if 'alt' in node.attributes:
self.add_text(_('[graph: %s]') % node['alt'])
else:
@@ -335,7 +340,7 @@ def text_visit_graphviz(self, node):
def man_visit_graphviz(self, node):
- warn_for_deprecated_option(self, node)
+ # type: (nodes.NodeVisitor, graphviz) -> None
if 'alt' in node.attributes:
self.body.append(_('[graph: %s]') % node['alt'])
else:
@@ -344,6 +349,7 @@ def man_visit_graphviz(self, node):
def setup(app):
+ # type: (Sphinx) -> Dict[unicode, Any]
app.add_node(graphviz,
html=(html_visit_graphviz, None),
latex=(latex_visit_graphviz, None),
diff --git a/sphinx/ext/ifconfig.py b/sphinx/ext/ifconfig.py
index 50173be83..036cbdf67 100644
--- a/sphinx/ext/ifconfig.py
+++ b/sphinx/ext/ifconfig.py
@@ -21,10 +21,15 @@
"""
from docutils import nodes
+from docutils.parsers.rst import Directive
import sphinx
from sphinx.util.nodes import set_source_info
-from sphinx.util.compat import Directive
+
+if False:
+ # For type annotation
+ from typing import Any, Dict, List # NOQA
+ from sphinx.application import Sphinx # NOQA
class ifconfig(nodes.Element):
@@ -37,9 +42,10 @@ class IfConfig(Directive):
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
- option_spec = {}
+ option_spec = {} # type: Dict
def run(self):
+ # type: () -> List[nodes.Node]
node = ifconfig()
node.document = self.state.document
set_source_info(self, node)
@@ -50,7 +56,8 @@ class IfConfig(Directive):
def process_ifconfig_nodes(app, doctree, docname):
- ns = dict((k, app.config[k]) for k in app.config.values)
+ # type: (Sphinx, nodes.Node, unicode) -> None
+ ns = dict((confval.name, confval.value) for confval in app.config) # type: ignore
ns.update(app.config.__dict__.copy())
ns['builder'] = app.builder.name
for node in doctree.traverse(ifconfig):
@@ -59,7 +66,7 @@ def process_ifconfig_nodes(app, doctree, docname):
except Exception as err:
# handle exceptions in a clean fashion
from traceback import format_exception_only
- msg = ''.join(format_exception_only(err.__class__, err))
+ msg = ''.join(format_exception_only(err.__class__, err)) # type: ignore
newnode = doctree.reporter.error('Exception occured in '
'ifconfig expression: \n%s' %
msg, base_node=node)
@@ -72,6 +79,7 @@ def process_ifconfig_nodes(app, doctree, docname):
def setup(app):
+ # type: (Sphinx) -> Dict[unicode, Any]
app.add_node(ifconfig)
app.add_directive('ifconfig', IfConfig)
app.connect('doctree-resolved', process_ifconfig_nodes)
diff --git a/sphinx/ext/imgmath.py b/sphinx/ext/imgmath.py
index d57502fc3..4e6bdd035 100644
--- a/sphinx/ext/imgmath.py
+++ b/sphinx/ext/imgmath.py
@@ -19,21 +19,32 @@ from subprocess import Popen, PIPE
from hashlib import sha1
from six import text_type
+
from docutils import nodes
import sphinx
from sphinx.locale import _
from sphinx.errors import SphinxError, ExtensionError
+from sphinx.util import logging
from sphinx.util.png import read_png_depth, write_png_depth
from sphinx.util.osutil import ensuredir, ENOENT, cd
from sphinx.util.pycompat import sys_encoding
from sphinx.ext.mathbase import setup_math as mathbase_setup, wrap_displaymath
+if False:
+ # For type annotation
+ from typing import Any, Dict, Tuple # NOQA
+ from sphinx.application import Sphinx # NOQA
+ from sphinx.ext.mathbase import math as math_node, displaymath # NOQA
+
+logger = logging.getLogger(__name__)
+
class MathExtError(SphinxError):
category = 'Math extension error'
def __init__(self, msg, stderr=None, stdout=None):
+ # type: (unicode, unicode, unicode) -> None
if stderr:
msg += '\n[stderr]\n' + stderr.decode(sys_encoding, 'replace')
if stdout:
@@ -72,6 +83,7 @@ depth_re = re.compile(br'\[\d+ depth=(-?\d+)\]')
def render_math(self, math):
+ # type: (nodes.NodeVisitor, unicode) -> Tuple[unicode, int]
"""Render the LaTeX math expression *math* using latex and dvipng or
dvisvgm.
@@ -116,9 +128,8 @@ def render_math(self, math):
else:
tempdir = self.builder._imgmath_tempdir
- tf = codecs.open(path.join(tempdir, 'math.tex'), 'w', 'utf-8')
- tf.write(latex)
- tf.close()
+ with codecs.open(path.join(tempdir, 'math.tex'), 'w', 'utf-8') as tf:
+ tf.write(latex)
# build latex command; old versions of latex don't have the
# --output-directory option, so we have to manually chdir to the
@@ -134,9 +145,9 @@ def render_math(self, math):
except OSError as err:
if err.errno != ENOENT: # No such file or directory
raise
- self.builder.warn('LaTeX command %r cannot be run (needed for math '
- 'display), check the imgmath_latex setting' %
- self.builder.config.imgmath_latex)
+ logger.warning('LaTeX command %r cannot be run (needed for math '
+ 'display), check the imgmath_latex setting',
+ self.builder.config.imgmath_latex)
self.builder._imgmath_warned_latex = True
return None, None
@@ -175,10 +186,10 @@ def render_math(self, math):
except OSError as err:
if err.errno != ENOENT: # No such file or directory
raise
- self.builder.warn('%s command %r cannot be run (needed for math '
- 'display), check the imgmath_%s setting' %
- (image_translator, image_translator_executable,
- image_translator))
+ logger.warning('%s command %r cannot be run (needed for math '
+ 'display), check the imgmath_%s setting',
+ image_translator, image_translator_executable,
+ image_translator)
self.builder._imgmath_warned_image_translator = True
return None, None
@@ -199,23 +210,26 @@ def render_math(self, math):
def cleanup_tempdir(app, exc):
+ # type: (Sphinx, Exception) -> None
if exc:
return
if not hasattr(app.builder, '_imgmath_tempdir'):
return
try:
- shutil.rmtree(app.builder._mathpng_tempdir)
+ shutil.rmtree(app.builder._mathpng_tempdir) # type: ignore
except Exception:
pass
def get_tooltip(self, node):
+ # type: (nodes.NodeVisitor, math_node) -> unicode
if self.builder.config.imgmath_add_tooltips:
return ' alt="%s"' % self.encode(node['latex']).strip()
return ''
def html_visit_math(self, node):
+ # type: (nodes.NodeVisitor, math_node) -> None
try:
fname, depth = render_math(self, '$' + node['latex'] + '$')
except MathExtError as exc:
@@ -223,7 +237,7 @@ def html_visit_math(self, node):
sm = nodes.system_message(msg, type='WARNING', level=2,
backrefs=[], source=node['latex'])
sm.walkabout(self)
- self.builder.warn('display latex %r: ' % node['latex'] + msg)
+ logger.warning('display latex %r: %s', node['latex'], msg)
raise nodes.SkipNode
if fname is None:
# something failed -- use text-only as a bad substitute
@@ -238,6 +252,7 @@ def html_visit_math(self, node):
def html_visit_displaymath(self, node):
+ # type: (nodes.NodeVisitor, displaymath) -> None
if node['nowrap']:
latex = node['latex']
else:
@@ -250,7 +265,7 @@ def html_visit_displaymath(self, node):
sm = nodes.system_message(msg, type='WARNING', level=2,
backrefs=[], source=node['latex'])
sm.walkabout(self)
- self.builder.warn('inline latex %r: ' % node['latex'] + msg)
+ logger.warning('inline latex %r: %s', node['latex'], msg)
raise nodes.SkipNode
self.body.append(self.starttag(node, 'div', CLASS='math'))
self.body.append('<p>')
@@ -269,6 +284,7 @@ def html_visit_displaymath(self, node):
def setup(app):
+ # type: (Sphinx) -> Dict[unicode, Any]
try:
mathbase_setup(app, (html_visit_math, None), (html_visit_displaymath, None))
except ExtensionError:
diff --git a/sphinx/ext/inheritance_diagram.py b/sphinx/ext/inheritance_diagram.py
index d0fc2f900..09b60655d 100644
--- a/sphinx/ext/inheritance_diagram.py
+++ b/sphinx/ext/inheritance_diagram.py
@@ -42,20 +42,25 @@ import inspect
try:
from hashlib import md5
except ImportError:
- from md5 import md5
+ from md5 import md5 # type: ignore
from six import text_type
from six.moves import builtins
from docutils import nodes
-from docutils.parsers.rst import directives
+from docutils.parsers.rst import Directive, directives
import sphinx
from sphinx.ext.graphviz import render_dot_html, render_dot_latex, \
render_dot_texinfo, figure_wrapper
from sphinx.pycode import ModuleAnalyzer
from sphinx.util import force_decode
-from sphinx.util.compat import Directive
+
+if False:
+ # For type annotation
+ from typing import Any, Dict, List, Tuple # NOQA
+ from sphinx.application import Sphinx # NOQA
+ from sphinx.environment import BuildEnvironment # NOQA
module_sig_re = re.compile(r'''^(?:([\w.]*)\.)? # module names
@@ -64,6 +69,7 @@ module_sig_re = re.compile(r'''^(?:([\w.]*)\.)? # module names
def try_import(objname):
+ # type: (unicode) -> Any
"""Import a object or module using *name* and *currentmodule*.
*name* should be a relative name from *currentmodule* or
a fully-qualified name.
@@ -72,9 +78,9 @@ def try_import(objname):
"""
try:
__import__(objname)
- return sys.modules.get(objname)
+ return sys.modules.get(objname) # type: ignore
except ImportError:
- modname, attrname = module_sig_re.match(objname).groups()
+ modname, attrname = module_sig_re.match(objname).groups() # type: ignore
if modname is None:
return None
try:
@@ -85,6 +91,7 @@ def try_import(objname):
def import_classes(name, currmodule):
+ # type: (unicode, unicode) -> Any
"""Import a class using its fully-qualified *name*."""
target = None
@@ -127,6 +134,7 @@ class InheritanceGraph(object):
"""
def __init__(self, class_names, currmodule, show_builtins=False,
private_bases=False, parts=0):
+ # type: (unicode, str, bool, bool, int) -> None
"""*class_names* is a list of child classes to show bases from.
If *show_builtins* is True, then Python builtins will be shown
@@ -141,13 +149,15 @@ class InheritanceGraph(object):
'inheritance diagram')
def _import_classes(self, class_names, currmodule):
+ # type: (unicode, str) -> List[Any]
"""Import a list of classes."""
- classes = []
+ classes = [] # type: List[Any]
for name in class_names:
classes.extend(import_classes(name, currmodule))
return classes
def _class_info(self, classes, show_builtins, private_bases, parts):
+ # type: (List[Any], bool, bool, int) -> List[Tuple[unicode, unicode, List[unicode], unicode]] # NOQA
"""Return name and bases for all classes that are ancestors of
*classes*.
@@ -158,6 +168,7 @@ class InheritanceGraph(object):
py_builtins = vars(builtins).values()
def recurse(cls):
+ # type: (Any) -> None
if not show_builtins and cls in py_builtins:
return
if not private_bases and cls.__name__.startswith('_'):
@@ -179,7 +190,7 @@ class InheritanceGraph(object):
except Exception: # might raise AttributeError for strange classes
pass
- baselist = []
+ baselist = [] # type: List[unicode]
all_classes[cls] = (nodename, fullname, baselist, tooltip)
for base in cls.__bases__:
if not show_builtins and base in py_builtins:
@@ -196,6 +207,7 @@ class InheritanceGraph(object):
return list(all_classes.values())
def class_name(self, cls, parts=0):
+ # type: (Any, int) -> unicode
"""Given a class object, return a fully-qualified name.
This works for things I've tested in matplotlib so far, but may not be
@@ -212,8 +224,9 @@ class InheritanceGraph(object):
return '.'.join(name_parts[-parts:])
def get_all_class_names(self):
+ # type: () -> List[unicode]
"""Get all of the class names involved in the graph."""
- return [fullname for (_, fullname, _, _) in self.class_info]
+ return [fullname for (_, fullname, _, _) in self.class_info] # type: ignore
# These are the default attrs for graphviz
default_graph_attrs = {
@@ -234,13 +247,16 @@ class InheritanceGraph(object):
}
def _format_node_attrs(self, attrs):
+ # type: (Dict) -> unicode
return ','.join(['%s=%s' % x for x in sorted(attrs.items())])
def _format_graph_attrs(self, attrs):
+ # type: (Dict) -> unicode
return ''.join(['%s=%s;\n' % x for x in sorted(attrs.items())])
def generate_dot(self, name, urls={}, env=None,
graph_attrs={}, node_attrs={}, edge_attrs={}):
+ # type: (unicode, Dict, BuildEnvironment, Dict, Dict, Dict) -> unicode
"""Generate a graphviz dot graph from the classes that were passed in
to __init__.
@@ -262,7 +278,7 @@ class InheritanceGraph(object):
n_attrs.update(env.config.inheritance_node_attrs)
e_attrs.update(env.config.inheritance_edge_attrs)
- res = []
+ res = [] # type: List[unicode]
res.append('digraph %s {\n' % name)
res.append(self._format_graph_attrs(g_attrs))
@@ -308,6 +324,7 @@ class InheritanceDiagram(Directive):
}
def run(self):
+ # type: () -> List[nodes.Node]
node = inheritance_diagram()
node.document = self.state.document
env = self.state.document.settings.env
@@ -347,11 +364,13 @@ class InheritanceDiagram(Directive):
def get_graph_hash(node):
+ # type: (inheritance_diagram) -> unicode
encoded = (node['content'] + str(node['parts'])).encode('utf-8')
return md5(encoded).hexdigest()[-10:]
def html_visit_inheritance_diagram(self, node):
+ # type: (nodes.NodeVisitor, inheritance_diagram) -> None
"""
Output the graph for HTML. This will insert a PNG with clickable
image map.
@@ -384,6 +403,7 @@ def html_visit_inheritance_diagram(self, node):
def latex_visit_inheritance_diagram(self, node):
+ # type: (nodes.NodeVisitor, inheritance_diagram) -> None
"""
Output the graph for LaTeX. This will insert a PDF.
"""
@@ -399,6 +419,7 @@ def latex_visit_inheritance_diagram(self, node):
def texinfo_visit_inheritance_diagram(self, node):
+ # type: (nodes.NodeVisitor, inheritance_diagram) -> None
"""
Output the graph for Texinfo. This will insert a PNG.
"""
@@ -414,10 +435,12 @@ def texinfo_visit_inheritance_diagram(self, node):
def skip(self, node):
+ # type: (nodes.NodeVisitor, inheritance_diagram) -> None
raise nodes.SkipNode
def setup(app):
+ # type: (Sphinx) -> Dict[unicode, Any]
app.setup_extension('sphinx.ext.graphviz')
app.add_node(
inheritance_diagram,
diff --git a/sphinx/ext/intersphinx.py b/sphinx/ext/intersphinx.py
index e49f5cbfa..32ba26cf9 100644
--- a/sphinx/ext/intersphinx.py
+++ b/sphinx/ext/intersphinx.py
@@ -28,105 +28,70 @@ from __future__ import print_function
import sys
import time
-import zlib
-import codecs
import functools
import posixpath
from os import path
-import re
-from six import iteritems, string_types
+from six import PY3, iteritems, string_types
from six.moves.urllib.parse import urlsplit, urlunsplit
+
from docutils import nodes
from docutils.utils import relative_path
import sphinx
from sphinx.locale import _
from sphinx.builders.html import INVENTORY_FILENAME
-from sphinx.util import requests
-
-
-UTF8StreamReader = codecs.lookup('utf-8')[2]
-
-
-def read_inventory_v1(f, uri, join):
- f = UTF8StreamReader(f)
- invdata = {}
- line = next(f)
- projname = line.rstrip()[11:]
- line = next(f)
- version = line.rstrip()[11:]
- for line in f:
- name, type, location = line.rstrip().split(None, 2)
- location = join(uri, location)
- # version 1 did not add anchors to the location
- if type == 'mod':
- type = 'py:module'
- location += '#module-' + name
- else:
- type = 'py:' + type
- location += '#' + name
- invdata.setdefault(type, {})[name] = (projname, version, location, '-')
- return invdata
-
-
-def read_inventory_v2(f, uri, join, bufsize=16 * 1024):
- invdata = {}
- line = f.readline()
- projname = line.rstrip()[11:].decode('utf-8')
- line = f.readline()
- version = line.rstrip()[11:].decode('utf-8')
- line = f.readline().decode('utf-8')
- if 'zlib' not in line:
- raise ValueError
-
- def read_chunks():
- decompressor = zlib.decompressobj()
- for chunk in iter(lambda: f.read(bufsize), b''):
- yield decompressor.decompress(chunk)
- yield decompressor.flush()
-
- def split_lines(iter):
- buf = b''
- for chunk in iter:
- buf += chunk
- lineend = buf.find(b'\n')
- while lineend != -1:
- yield buf[:lineend].decode('utf-8')
- buf = buf[lineend + 1:]
- lineend = buf.find(b'\n')
- assert not buf
-
- for line in split_lines(read_chunks()):
- # be careful to handle names with embedded spaces correctly
- m = re.match(r'(?x)(.+?)\s+(\S*:\S*)\s+(-?\d+)\s+(\S+)\s+(.*)',
- line.rstrip())
- if not m:
- continue
- name, type, prio, location, dispname = m.groups()
- if type == 'py:module' and type in invdata and \
- name in invdata[type]: # due to a bug in 1.1 and below,
- # two inventory entries are created
- # for Python modules, and the first
- # one is correct
- continue
- if location.endswith(u'$'):
- location = location[:-1] + name
- location = join(uri, location)
- invdata.setdefault(type, {})[name] = (projname, version,
- location, dispname)
- return invdata
-
-
-def read_inventory(f, uri, join, bufsize=16 * 1024):
- line = f.readline().rstrip().decode('utf-8')
- if line == '# Sphinx inventory version 1':
- return read_inventory_v1(f, uri, join)
- elif line == '# Sphinx inventory version 2':
- return read_inventory_v2(f, uri, join, bufsize=bufsize)
+from sphinx.util import requests, logging
+from sphinx.util.inventory import InventoryFile
+
+if False:
+ # For type annotation
+ from typing import Any, Dict, IO, List, Tuple, Union # NOQA
+ from sphinx.application import Sphinx # NOQA
+ from sphinx.config import Config # NOQA
+ from sphinx.environment import BuildEnvironment # NOQA
+
+ if PY3:
+ unicode = str
+
+ Inventory = Dict[unicode, Dict[unicode, Tuple[unicode, unicode, unicode, unicode]]]
+
+logger = logging.getLogger(__name__)
+
+
+class InventoryAdapter(object):
+ """Inventory adapter for environment"""
+
+ def __init__(self, env):
+ self.env = env
+
+ if not hasattr(env, 'intersphinx_cache'):
+ self.env.intersphinx_cache = {}
+ self.env.intersphinx_inventory = {}
+ self.env.intersphinx_named_inventory = {}
+
+ @property
+ def cache(self):
+ # type: () -> Dict[unicode, Tuple[unicode, int, Inventory]]
+ return self.env.intersphinx_cache
+
+ @property
+ def main_inventory(self):
+ # type: () -> Inventory
+ return self.env.intersphinx_inventory
+
+ @property
+ def named_inventory(self):
+ # type: () -> Dict[unicode, Inventory]
+ return self.env.intersphinx_named_inventory
+
+ def clear(self):
+ self.env.intersphinx_inventory.clear()
+ self.env.intersphinx_named_inventory.clear()
def _strip_basic_auth(url):
+ # type: (unicode) -> unicode
"""Returns *url* with basic auth credentials removed. Also returns the
basic auth username and password if they're present in *url*.
@@ -148,6 +113,7 @@ def _strip_basic_auth(url):
def _read_from_url(url, config=None):
+ # type: (unicode, Config) -> IO
"""Reads data from *url* with an HTTP *GET*.
This function supports fetching from resources which use basic HTTP auth as
@@ -173,6 +139,7 @@ def _read_from_url(url, config=None):
def _get_safe_url(url):
+ # type: (unicode) -> unicode
"""Gets version of *url* with basic auth passwords obscured. This function
returns results suitable for printing and logging.
@@ -198,6 +165,7 @@ def _get_safe_url(url):
def fetch_inventory(app, uri, inv):
+ # type: (Sphinx, unicode, Any) -> Any
"""Fetch, parse and return an intersphinx inventory file."""
# both *uri* (base URI of the links to generate) and *inv* (actual
# location of the inventory file) can be local or remote URIs
@@ -211,47 +179,47 @@ def fetch_inventory(app, uri, inv):
else:
f = open(path.join(app.srcdir, inv), 'rb')
except Exception as err:
- app.warn('intersphinx inventory %r not fetchable due to '
- '%s: %s' % (inv, err.__class__, err))
+ logger.warning('intersphinx inventory %r not fetchable due to %s: %s',
+ inv, err.__class__, err)
return
try:
if hasattr(f, 'url'):
- newinv = f.url
+ newinv = f.url # type: ignore
if inv != newinv:
- app.info('intersphinx inventory has moved: %s -> %s' % (inv, newinv))
+ logger.info('intersphinx inventory has moved: %s -> %s', inv, newinv)
if uri in (inv, path.dirname(inv), path.dirname(inv) + '/'):
uri = path.dirname(newinv)
with f:
try:
join = localuri and path.join or posixpath.join
- invdata = read_inventory(f, uri, join)
+ invdata = InventoryFile.load(f, uri, join)
except ValueError as exc:
raise ValueError('unknown or unsupported inventory version: %r' % exc)
except Exception as err:
- app.warn('intersphinx inventory %r not readable due to '
- '%s: %s' % (inv, err.__class__.__name__, err))
+ logger.warning('intersphinx inventory %r not readable due to %s: %s',
+ inv, err.__class__.__name__, err)
else:
return invdata
def load_mappings(app):
+ # type: (Sphinx) -> None
"""Load all intersphinx mappings into the environment."""
now = int(time.time())
cache_time = now - app.config.intersphinx_cache_limit * 86400
- env = app.builder.env
- if not hasattr(env, 'intersphinx_cache'):
- env.intersphinx_cache = {}
- env.intersphinx_inventory = {}
- env.intersphinx_named_inventory = {}
- cache = env.intersphinx_cache
+ inventories = InventoryAdapter(app.builder.env)
update = False
for key, value in iteritems(app.config.intersphinx_mapping):
+ name = None # type: unicode
+ uri = None # type: unicode
+ inv = None # type: Union[unicode, Tuple[unicode, ...]]
+
if isinstance(value, (list, tuple)):
# new format
- name, (uri, inv) = key, value
+ name, (uri, inv) = key, value # type: ignore
if not isinstance(name, string_types):
- app.warn('intersphinx identifier %r is not string. Ignored' % name)
+ logger.warning('intersphinx identifier %r is not string. Ignored', name)
continue
else:
# old format, no name
@@ -262,27 +230,26 @@ def load_mappings(app):
if not isinstance(inv, tuple):
invs = (inv, )
else:
- invs = inv
+ invs = inv # type: ignore
for inv in invs:
if not inv:
inv = posixpath.join(uri, INVENTORY_FILENAME)
# decide whether the inventory must be read: always read local
# files; remote ones only if the cache time is expired
- if '://' not in inv or uri not in cache \
- or cache[uri][1] < cache_time:
- safe_inv_url = _get_safe_url(inv)
- app.info(
- 'loading intersphinx inventory from %s...' % safe_inv_url)
+ if '://' not in inv or uri not in inventories.cache \
+ or inventories.cache[uri][1] < cache_time:
+ safe_inv_url = _get_safe_url(inv) # type: ignore
+ logger.info('loading intersphinx inventory from %s...', safe_inv_url)
invdata = fetch_inventory(app, uri, inv)
if invdata:
- cache[uri] = (name, now, invdata)
+ inventories.cache[uri] = (name, now, invdata)
update = True
break
if update:
- env.intersphinx_inventory = {}
- env.intersphinx_named_inventory = {}
+ inventories.clear()
+
# Duplicate values in different inventories will shadow each
# other; which one will override which can vary between builds
# since they are specified using an unordered dict. To make
@@ -290,49 +257,48 @@ def load_mappings(app):
# add the unnamed inventories last. This means that the
# unnamed inventories will shadow the named ones but the named
# ones can still be accessed when the name is specified.
- cached_vals = list(cache.values())
+ cached_vals = list(inventories.cache.values())
named_vals = sorted(v for v in cached_vals if v[0])
unnamed_vals = [v for v in cached_vals if not v[0]]
for name, _x, invdata in named_vals + unnamed_vals:
if name:
- env.intersphinx_named_inventory[name] = invdata
+ inventories.named_inventory[name] = invdata
for type, objects in iteritems(invdata):
- env.intersphinx_inventory.setdefault(
- type, {}).update(objects)
+ inventories.main_inventory.setdefault(type, {}).update(objects)
def missing_reference(app, env, node, contnode):
+ # type: (Sphinx, BuildEnvironment, nodes.Node, nodes.Node) -> None
"""Attempt to resolve a missing reference via intersphinx references."""
target = node['reftarget']
+ inventories = InventoryAdapter(env)
+ objtypes = None # type: List[unicode]
if node['reftype'] == 'any':
# we search anything!
objtypes = ['%s:%s' % (domain.name, objtype)
for domain in env.domains.values()
for objtype in domain.object_types]
domain = None
- elif node['reftype'] == 'doc':
- domain = 'std' # special case
- objtypes = ['std:doc']
else:
domain = node.get('refdomain')
if not domain:
# only objects in domains are in the inventory
return
- objtypes = env.domains[domain].objtypes_for_role(node['reftype'])
+ objtypes = env.get_domain(domain).objtypes_for_role(node['reftype'])
if not objtypes:
return
objtypes = ['%s:%s' % (domain, objtype) for objtype in objtypes]
if 'std:cmdoption' in objtypes:
# until Sphinx-1.6, cmdoptions are stored as std:option
objtypes.append('std:option')
- to_try = [(env.intersphinx_inventory, target)]
+ to_try = [(inventories.main_inventory, target)]
in_set = None
if ':' in target:
# first part may be the foreign doc set name
setname, newtarget = target.split(':', 1)
- if setname in env.intersphinx_named_inventory:
+ if setname in inventories.named_inventory:
in_set = setname
- to_try.append((env.intersphinx_named_inventory[setname], newtarget))
+ to_try.append((inventories.named_inventory[setname], newtarget))
for inventory, target in to_try:
for objtype in objtypes:
if objtype not in inventory or target not in inventory[objtype]:
@@ -366,6 +332,7 @@ def missing_reference(app, env, node, contnode):
def setup(app):
+ # type: (Sphinx) -> Dict[unicode, Any]
app.add_config_value('intersphinx_mapping', {}, True)
app.add_config_value('intersphinx_cache_limit', 5, False)
app.add_config_value('intersphinx_timeout', None, False)
@@ -375,6 +342,7 @@ def setup(app):
def debug(argv):
+ # type: (List[unicode]) -> None
"""Debug functionality to print out an inventory"""
if len(argv) < 2:
print("Print out an inventory file.\n"
@@ -394,7 +362,7 @@ def debug(argv):
print(msg, file=sys.stderr)
filename = argv[1]
- invdata = fetch_inventory(MockApp(), '', filename)
+ invdata = fetch_inventory(MockApp(), '', filename) # type: ignore
for key in sorted(invdata or {}):
print(key)
for entry, einfo in sorted(invdata[key].items()):
@@ -404,4 +372,4 @@ def debug(argv):
if __name__ == '__main__':
- debug(argv=sys.argv)
+ debug(argv=sys.argv) # type: ignore
diff --git a/sphinx/ext/linkcode.py b/sphinx/ext/linkcode.py
index 4bab81bc6..e74ee8529 100644
--- a/sphinx/ext/linkcode.py
+++ b/sphinx/ext/linkcode.py
@@ -16,12 +16,18 @@ from sphinx import addnodes
from sphinx.locale import _
from sphinx.errors import SphinxError
+if False:
+ # For type annotation
+ from typing import Any, Dict, Set # NOQA
+ from sphinx.application import Sphinx # NOQA
+
class LinkcodeError(SphinxError):
category = "linkcode error"
def doctree_read(app, doctree):
+ # type: (Sphinx, nodes.Node) -> None
env = app.builder.env
resolve_target = getattr(env.config, 'linkcode_resolve', None)
@@ -38,7 +44,7 @@ def doctree_read(app, doctree):
for objnode in doctree.traverse(addnodes.desc):
domain = objnode.get('domain')
- uris = set()
+ uris = set() # type: Set[unicode]
for signode in objnode:
if not isinstance(signode, addnodes.desc_signature):
continue
@@ -72,6 +78,7 @@ def doctree_read(app, doctree):
def setup(app):
+ # type: (Sphinx) -> Dict[unicode, Any]
app.connect('doctree-read', doctree_read)
app.add_config_value('linkcode_resolve', None, '')
return {'version': sphinx.__display_version__, 'parallel_read_safe': True}
diff --git a/sphinx/ext/mathbase.py b/sphinx/ext/mathbase.py
index 0aa108149..9ca81d738 100644
--- a/sphinx/ext/mathbase.py
+++ b/sphinx/ext/mathbase.py
@@ -10,13 +10,20 @@
"""
from docutils import nodes, utils
-from docutils.parsers.rst import directives
+from docutils.parsers.rst import Directive, directives
from sphinx.roles import XRefRole
from sphinx.locale import _
from sphinx.domains import Domain
from sphinx.util.nodes import make_refnode, set_source_info
-from sphinx.util.compat import Directive
+
+if False:
+ # For type annotation
+ from typing import Any, Callable, Dict, Iterable, List, Tuple # NOQA
+ from docutils.parsers.rst.states import Inliner # NOQA
+ from sphinx.application import Sphinx # NOQA
+ from sphinx.builders import Builder # NOQA
+ from sphinx.environment import BuildEnvironment # NOQA
class math(nodes.Inline, nodes.TextElement):
@@ -33,6 +40,7 @@ class eqref(nodes.Inline, nodes.TextElement):
class EqXRefRole(XRefRole):
def result_nodes(self, document, env, node, is_ref):
+ # type: (nodes.Node, BuildEnvironment, nodes.Node, bool) -> Tuple[List[nodes.Node], List[nodes.Node]] # NOQA
node['refdomain'] = 'math'
return [node], []
@@ -44,22 +52,25 @@ class MathDomain(Domain):
initial_data = {
'objects': {}, # labelid -> (docname, eqno)
- }
+ } # type: Dict[unicode, Dict[unicode, Tuple[unicode, int]]]
dangling_warnings = {
'eq': 'equation not found: %(target)s',
}
def clear_doc(self, docname):
+ # type: (unicode) -> None
for labelid, (doc, eqno) in list(self.data['objects'].items()):
if doc == docname:
del self.data['objects'][labelid]
def merge_domaindata(self, docnames, otherdata):
+ # type: (Iterable[unicode], Dict) -> None
for labelid, (doc, eqno) in otherdata['objects'].items():
if doc in docnames:
self.data['objects'][labelid] = doc
def resolve_xref(self, env, fromdocname, builder, typ, target, node, contnode):
+ # type: (BuildEnvironment, unicode, Builder, unicode, unicode, nodes.Node, nodes.Node) -> nodes.Node # NOQA
assert typ == 'eq'
docname, number = self.data['objects'].get(target, (None, None))
if docname:
@@ -76,6 +87,7 @@ class MathDomain(Domain):
return None
def resolve_any_xref(self, env, fromdocname, builder, target, node, contnode):
+ # type: (BuildEnvironment, unicode, Builder, unicode, nodes.Node, nodes.Node) -> List[nodes.Node] # NOQA
refnode = self.resolve_xref(env, fromdocname, builder, 'eq', target, node, contnode)
if refnode is None:
return []
@@ -83,9 +95,11 @@ class MathDomain(Domain):
return [refnode]
def get_objects(self):
+ # type: () -> List
return []
def add_equation(self, env, docname, labelid):
+ # type: (BuildEnvironment, unicode, unicode) -> int
equations = self.data['objects']
if labelid in equations:
path = env.doc2path(equations[labelid][0])
@@ -97,12 +111,15 @@ class MathDomain(Domain):
return eqno
def get_next_equation_number(self, docname):
+ # type: (unicode) -> int
targets = [eq for eq in self.data['objects'].values() if eq[0] == docname]
return len(targets) + 1
def wrap_displaymath(math, label, numbering):
+ # type: (unicode, unicode, bool) -> unicode
def is_equation(part):
+ # type: (unicode) -> unicode
return part.strip()
if label is None:
@@ -137,11 +154,13 @@ def wrap_displaymath(math, label, numbering):
def math_role(role, rawtext, text, lineno, inliner, options={}, content=[]):
+ # type: (unicode, unicode, unicode, int, Inliner, Dict, List[unicode]) -> Tuple[List[nodes.Node], List[nodes.Node]] # NOQA
latex = utils.unescape(text, restore_backslashes=True)
return [math(latex=latex)], []
def is_in_section_title(node):
+ # type: (nodes.Node) -> bool
"""Determine whether the node is in a section title"""
from sphinx.util.nodes import traverse_parent
@@ -165,6 +184,7 @@ class MathDirective(Directive):
}
def run(self):
+ # type: () -> List[nodes.Node]
latex = '\n'.join(self.content)
if self.arguments and self.arguments[0]:
latex = self.arguments[0] + '\n\n' + latex
@@ -186,6 +206,7 @@ class MathDirective(Directive):
return ret
def add_target(self, ret):
+ # type: (List[nodes.Node]) -> None
node = ret[0]
env = self.state.document.settings.env
@@ -213,6 +234,7 @@ class MathDirective(Directive):
def latex_visit_math(self, node):
+ # type: (nodes.NodeVisitor, math) -> None
if is_in_section_title(node):
protect = r'\protect'
else:
@@ -223,6 +245,7 @@ def latex_visit_math(self, node):
def latex_visit_displaymath(self, node):
+ # type: (nodes.NodeVisitor, displaymath) -> None
if not node['label']:
label = None
else:
@@ -239,17 +262,20 @@ def latex_visit_displaymath(self, node):
def latex_visit_eqref(self, node):
+ # type: (nodes.NodeVisitor, eqref) -> None
label = "equation:%s:%s" % (node['docname'], node['target'])
self.body.append('\\eqref{%s}' % label)
raise nodes.SkipNode
def text_visit_math(self, node):
+ # type: (nodes.NodeVisitor, math) -> None
self.add_text(node['latex'])
raise nodes.SkipNode
def text_visit_displaymath(self, node):
+ # type: (nodes.NodeVisitor, displaymath) -> None
self.new_state()
self.add_text(node['latex'])
self.end_state()
@@ -257,24 +283,29 @@ def text_visit_displaymath(self, node):
def man_visit_math(self, node):
+ # type: (nodes.NodeVisitor, math) -> None
self.body.append(node['latex'])
raise nodes.SkipNode
def man_visit_displaymath(self, node):
+ # type: (nodes.NodeVisitor, displaymath) -> None
self.visit_centered(node)
def man_depart_displaymath(self, node):
+ # type: (nodes.NodeVisitor, displaymath) -> None
self.depart_centered(node)
def texinfo_visit_math(self, node):
+ # type: (nodes.NodeVisitor, math) -> None
self.body.append('@math{' + self.escape_arg(node['latex']) + '}')
raise nodes.SkipNode
def texinfo_visit_displaymath(self, node):
+ # type: (nodes.NodeVisitor, displaymath) -> None
if node.get('label'):
self.add_anchor(node['label'], node)
self.body.append('\n\n@example\n%s\n@end example\n\n' %
@@ -282,10 +313,12 @@ def texinfo_visit_displaymath(self, node):
def texinfo_depart_displaymath(self, node):
+ # type: (nodes.NodeVisitor, displaymath) -> None
pass
def setup_math(app, htmlinlinevisitors, htmldisplayvisitors):
+ # type: (Sphinx, Tuple[Callable, Any], Tuple[Callable, Any]) -> None
app.add_config_value('math_number_all', False, 'env')
app.add_domain(MathDomain)
app.add_node(math, override=True,
diff --git a/sphinx/ext/napoleon/__init__.py b/sphinx/ext/napoleon/__init__.py
index 19a1d385a..f319a18c4 100644
--- a/sphinx/ext/napoleon/__init__.py
+++ b/sphinx/ext/napoleon/__init__.py
@@ -14,8 +14,13 @@ import sys
from six import PY2, iteritems
import sphinx
+from sphinx.application import Sphinx
from sphinx.ext.napoleon.docstring import GoogleDocstring, NumpyDocstring
+if False:
+ # For type annotation
+ from typing import Any, Dict, List # NOQA
+
class Config(object):
"""Sphinx napoleon extension settings in `conf.py`.
@@ -254,6 +259,7 @@ class Config(object):
}
def __init__(self, **settings):
+ # type: (Any) -> None
for name, (default, rebuild) in iteritems(self._config_values):
setattr(self, name, default)
for name, value in iteritems(settings):
@@ -261,6 +267,7 @@ class Config(object):
def setup(app):
+ # type: (Sphinx) -> Dict[unicode, Any]
"""Sphinx extension setup function.
When the extension is loaded, Sphinx imports this module and executes
@@ -282,7 +289,6 @@ def setup(app):
`The Extension API <http://sphinx-doc.org/extdev/appapi.html>`_
"""
- from sphinx.application import Sphinx
if not isinstance(app, Sphinx):
return # probably called by tests
@@ -297,6 +303,7 @@ def setup(app):
def _patch_python_domain():
+ # type: () -> None
try:
from sphinx.domains.python import PyTypedField
except ImportError:
@@ -317,6 +324,7 @@ def _patch_python_domain():
def _process_docstring(app, what, name, obj, options, lines):
+ # type: (Sphinx, unicode, unicode, Any, Any, List[unicode]) -> None
"""Process the docstring for a given python object.
Called when autodoc has read and processed a docstring. `lines` is a list
@@ -353,6 +361,7 @@ def _process_docstring(app, what, name, obj, options, lines):
"""
result_lines = lines
+ docstring = None # type: GoogleDocstring
if app.config.napoleon_numpy_docstring:
docstring = NumpyDocstring(result_lines, app.config, app, what, name,
obj, options)
@@ -365,6 +374,7 @@ def _process_docstring(app, what, name, obj, options, lines):
def _skip_member(app, what, name, obj, skip, options):
+ # type: (Sphinx, unicode, unicode, Any, bool, Any) -> bool
"""Determine if private and special class members are included in docs.
The following settings in conf.py determine if private and special class
diff --git a/sphinx/ext/napoleon/docstring.py b/sphinx/ext/napoleon/docstring.py
index e887e0246..c77598ef1 100644
--- a/sphinx/ext/napoleon/docstring.py
+++ b/sphinx/ext/napoleon/docstring.py
@@ -21,6 +21,12 @@ from six.moves import range
from sphinx.ext.napoleon.iterators import modify_iter
from sphinx.util.pycompat import UnicodeMixin
+if False:
+ # For type annotation
+ from typing import Any, Callable, Dict, List, Tuple, Union # NOQA
+ from sphinx.application import Sphinx # NOQA
+ from sphinx.config import Config as SphinxConfig # NOQA
+
_directive_regex = re.compile(r'\.\. \S+::')
_google_section_regex = re.compile(r'^(\s|\w)+:\s*$')
@@ -99,19 +105,20 @@ class GoogleDocstring(UnicodeMixin):
"""
def __init__(self, docstring, config=None, app=None, what='', name='',
obj=None, options=None):
+ # type: (Union[unicode, List[unicode]], SphinxConfig, Sphinx, unicode, unicode, Any, Any) -> None # NOQA
self._config = config
self._app = app
if not self._config:
from sphinx.ext.napoleon import Config
- self._config = self._app and self._app.config or Config()
+ self._config = self._app and self._app.config or Config() # type: ignore
if not what:
if inspect.isclass(obj):
what = 'class'
elif inspect.ismodule(obj):
what = 'module'
- elif isinstance(obj, collections.Callable):
+ elif isinstance(obj, collections.Callable): # type: ignore
what = 'function'
else:
what = 'object'
@@ -124,11 +131,11 @@ class GoogleDocstring(UnicodeMixin):
docstring = docstring.splitlines()
self._lines = docstring
self._line_iter = modify_iter(docstring, modifier=lambda s: s.rstrip())
- self._parsed_lines = []
+ self._parsed_lines = [] # type: List[unicode]
self._is_in_section = False
self._section_indent = 0
if not hasattr(self, '_directive_sections'):
- self._directive_sections = []
+ self._directive_sections = [] # type: List[unicode]
if not hasattr(self, '_sections'):
self._sections = {
'args': self._parse_parameters_section,
@@ -154,10 +161,11 @@ class GoogleDocstring(UnicodeMixin):
'warns': self._parse_warns_section,
'yield': self._parse_yields_section,
'yields': self._parse_yields_section,
- }
+ } # type: Dict[unicode, Callable]
self._parse()
def __unicode__(self):
+ # type: () -> unicode
"""Return the parsed docstring in reStructuredText format.
Returns
@@ -169,6 +177,7 @@ class GoogleDocstring(UnicodeMixin):
return u('\n').join(self.lines())
def lines(self):
+ # type: () -> List[unicode]
"""Return the parsed lines of the docstring in reStructuredText format.
Returns
@@ -180,38 +189,42 @@ class GoogleDocstring(UnicodeMixin):
return self._parsed_lines
def _consume_indented_block(self, indent=1):
+ # type: (int) -> List[unicode]
lines = []
line = self._line_iter.peek()
while(not self._is_section_break() and
(not line or self._is_indented(line, indent))):
- lines.append(next(self._line_iter))
+ lines.append(next(self._line_iter)) # type: ignore
line = self._line_iter.peek()
return lines
def _consume_contiguous(self):
+ # type: () -> List[unicode]
lines = []
while (self._line_iter.has_next() and
self._line_iter.peek() and
not self._is_section_header()):
- lines.append(next(self._line_iter))
+ lines.append(next(self._line_iter)) # type: ignore
return lines
def _consume_empty(self):
+ # type: () -> List[unicode]
lines = []
line = self._line_iter.peek()
while self._line_iter.has_next() and not line:
- lines.append(next(self._line_iter))
+ lines.append(next(self._line_iter)) # type: ignore
line = self._line_iter.peek()
return lines
def _consume_field(self, parse_type=True, prefer_type=False):
- line = next(self._line_iter)
+ # type: (bool, bool) -> Tuple[unicode, unicode, List[unicode]]
+ line = next(self._line_iter) # type: ignore
before, colon, after = self._partition_field_on_colon(line)
- _name, _type, _desc = before, '', after
+ _name, _type, _desc = before, '', after # type: unicode, unicode, unicode
if parse_type:
- match = _google_typed_arg_regex.match(before)
+ match = _google_typed_arg_regex.match(before) # type: ignore
if match:
_name = match.group(1)
_type = match.group(2)
@@ -221,11 +234,12 @@ class GoogleDocstring(UnicodeMixin):
if prefer_type and not _type:
_type, _name = _name, _type
indent = self._get_indent(line) + 1
- _desc = [_desc] + self._dedent(self._consume_indented_block(indent))
- _desc = self.__class__(_desc, self._config).lines()
- return _name, _type, _desc
+ _descs = [_desc] + self._dedent(self._consume_indented_block(indent))
+ _descs = self.__class__(_descs, self._config).lines()
+ return _name, _type, _descs
def _consume_fields(self, parse_type=True, prefer_type=False):
+ # type: (bool, bool) -> List[Tuple[unicode, unicode, List[unicode]]]
self._consume_empty()
fields = []
while not self._is_section_break():
@@ -235,19 +249,21 @@ class GoogleDocstring(UnicodeMixin):
return fields
def _consume_inline_attribute(self):
- line = next(self._line_iter)
+ # type: () -> Tuple[unicode, List[unicode]]
+ line = next(self._line_iter) # type: ignore
_type, colon, _desc = self._partition_field_on_colon(line)
if not colon:
_type, _desc = _desc, _type
- _desc = [_desc] + self._dedent(self._consume_to_end())
- _desc = self.__class__(_desc, self._config).lines()
- return _type, _desc
+ _descs = [_desc] + self._dedent(self._consume_to_end())
+ _descs = self.__class__(_descs, self._config).lines()
+ return _type, _descs
def _consume_returns_section(self):
+ # type: () -> List[Tuple[unicode, unicode, List[unicode]]]
lines = self._dedent(self._consume_to_next_section())
if lines:
before, colon, after = self._partition_field_on_colon(lines[0])
- _name, _type, _desc = '', '', lines
+ _name, _type, _desc = '', '', lines # type: unicode, unicode, List[unicode]
if colon:
if after:
@@ -263,30 +279,35 @@ class GoogleDocstring(UnicodeMixin):
return []
def _consume_usage_section(self):
+ # type: () -> List[unicode]
lines = self._dedent(self._consume_to_next_section())
return lines
def _consume_section_header(self):
- section = next(self._line_iter)
+ # type: () -> unicode
+ section = next(self._line_iter) # type: ignore
stripped_section = section.strip(':')
if stripped_section.lower() in self._sections:
section = stripped_section
return section
def _consume_to_end(self):
+ # type: () -> List[unicode]
lines = []
while self._line_iter.has_next():
- lines.append(next(self._line_iter))
+ lines.append(next(self._line_iter)) # type: ignore
return lines
def _consume_to_next_section(self):
+ # type: () -> List[unicode]
self._consume_empty()
lines = []
while not self._is_section_break():
- lines.append(next(self._line_iter))
+ lines.append(next(self._line_iter)) # type: ignore
return lines + self._consume_empty()
def _dedent(self, lines, full=False):
+ # type: (List[unicode], bool) -> List[unicode]
if full:
return [line.lstrip() for line in lines]
else:
@@ -294,6 +315,7 @@ class GoogleDocstring(UnicodeMixin):
return [line[min_indent:] for line in lines]
def _escape_args_and_kwargs(self, name):
+ # type: (unicode) -> unicode
if name[:2] == '**':
return r'\*\*' + name[2:]
elif name[:1] == '*':
@@ -302,29 +324,32 @@ class GoogleDocstring(UnicodeMixin):
return name
def _fix_field_desc(self, desc):
+ # type: (List[unicode]) -> List[unicode]
if self._is_list(desc):
- desc = [''] + desc
+ desc = [u''] + desc
elif desc[0].endswith('::'):
desc_block = desc[1:]
indent = self._get_indent(desc[0])
block_indent = self._get_initial_indent(desc_block)
if block_indent > indent:
- desc = [''] + desc
+ desc = [u''] + desc
else:
desc = ['', desc[0]] + self._indent(desc_block, 4)
return desc
def _format_admonition(self, admonition, lines):
+ # type: (unicode, List[unicode]) -> List[unicode]
lines = self._strip_empty(lines)
if len(lines) == 1:
return ['.. %s:: %s' % (admonition, lines[0].strip()), '']
elif lines:
lines = self._indent(self._dedent(lines), 3)
- return ['.. %s::' % admonition, ''] + lines + ['']
+ return [u'.. %s::' % admonition, u''] + lines + [u'']
else:
- return ['.. %s::' % admonition, '']
+ return [u'.. %s::' % admonition, u'']
def _format_block(self, prefix, lines, padding=None):
+ # type: (unicode, List[unicode], unicode) -> List[unicode]
if lines:
if padding is None:
padding = ' ' * len(prefix)
@@ -342,6 +367,7 @@ class GoogleDocstring(UnicodeMixin):
def _format_docutils_params(self, fields, field_role='param',
type_role='type'):
+ # type: (List[Tuple[unicode, unicode, List[unicode]]], unicode, unicode) -> List[unicode] # NOQA
lines = []
for _name, _type, _desc in fields:
_desc = self._strip_empty(_desc)
@@ -357,13 +383,14 @@ class GoogleDocstring(UnicodeMixin):
return lines + ['']
def _format_field(self, _name, _type, _desc):
+ # type: (unicode, unicode, List[unicode]) -> List[unicode]
_desc = self._strip_empty(_desc)
has_desc = any(_desc)
separator = has_desc and ' -- ' or ''
if _name:
if _type:
if '`' in _type:
- field = '**%s** (%s)%s' % (_name, _type, separator)
+ field = '**%s** (%s)%s' % (_name, _type, separator) # type: unicode
else:
field = '**%s** (*%s*)%s' % (_name, _type, separator)
else:
@@ -386,10 +413,11 @@ class GoogleDocstring(UnicodeMixin):
return [field]
def _format_fields(self, field_type, fields):
+ # type: (unicode, List[Tuple[unicode, unicode, List[unicode]]]) -> List[unicode]
field_type = ':%s:' % field_type.strip()
padding = ' ' * len(field_type)
multi = len(fields) > 1
- lines = []
+ lines = [] # type: List[unicode]
for _name, _type, _desc in fields:
field = self._format_field(_name, _type, _desc)
if multi:
@@ -404,6 +432,7 @@ class GoogleDocstring(UnicodeMixin):
return lines
def _get_current_indent(self, peek_ahead=0):
+ # type: (int) -> int
line = self._line_iter.peek(peek_ahead + 1)[peek_ahead]
while line != self._line_iter.sentinel:
if line:
@@ -413,18 +442,21 @@ class GoogleDocstring(UnicodeMixin):
return 0
def _get_indent(self, line):
+ # type: (unicode) -> int
for i, s in enumerate(line):
if not s.isspace():
return i
return len(line)
def _get_initial_indent(self, lines):
+ # type: (List[unicode]) -> int
for line in lines:
if line:
return self._get_indent(line)
return 0
def _get_min_indent(self, lines):
+ # type: (List[unicode]) -> int
min_indent = None
for line in lines:
if line:
@@ -436,9 +468,11 @@ class GoogleDocstring(UnicodeMixin):
return min_indent or 0
def _indent(self, lines, n=4):
+ # type: (List[unicode], int) -> List[unicode]
return [(' ' * n) + line for line in lines]
def _is_indented(self, line, indent=1):
+ # type: (unicode, int) -> bool
for i, s in enumerate(line):
if i >= indent:
return True
@@ -447,11 +481,12 @@ class GoogleDocstring(UnicodeMixin):
return False
def _is_list(self, lines):
+ # type: (List[unicode]) -> bool
if not lines:
return False
- if _bullet_list_regex.match(lines[0]):
+ if _bullet_list_regex.match(lines[0]): # type: ignore
return True
- if _enumerated_list_regex.match(lines[0]):
+ if _enumerated_list_regex.match(lines[0]): # type: ignore
return True
if len(lines) < 2 or lines[0].endswith('::'):
return False
@@ -464,6 +499,7 @@ class GoogleDocstring(UnicodeMixin):
return next_indent > indent
def _is_section_header(self):
+ # type: () -> bool
section = self._line_iter.peek().lower()
match = _google_section_regex.match(section)
if match and section.strip(':') in self._sections:
@@ -478,6 +514,7 @@ class GoogleDocstring(UnicodeMixin):
return False
def _is_section_break(self):
+ # type: () -> bool
line = self._line_iter.peek()
return (not self._line_iter.has_next() or
self._is_section_header() or
@@ -486,6 +523,7 @@ class GoogleDocstring(UnicodeMixin):
not self._is_indented(line, self._section_indent)))
def _parse(self):
+ # type: () -> None
self._parsed_lines = self._consume_empty()
if self._name and (self._what == 'attribute' or self._what == 'data'):
@@ -498,7 +536,7 @@ class GoogleDocstring(UnicodeMixin):
section = self._consume_section_header()
self._is_in_section = True
self._section_indent = self._get_current_indent()
- if _directive_regex.match(section):
+ if _directive_regex.match(section): # type: ignore
lines = [section] + self._consume_to_next_section()
else:
lines = self._sections[section.lower()](section)
@@ -513,42 +551,47 @@ class GoogleDocstring(UnicodeMixin):
self._parsed_lines.extend(lines)
def _parse_attribute_docstring(self):
+ # type: () -> List[unicode]
_type, _desc = self._consume_inline_attribute()
return self._format_field('', _type, _desc)
def _parse_attributes_section(self, section):
+ # type: (unicode) -> List[unicode]
lines = []
for _name, _type, _desc in self._consume_fields():
if self._config.napoleon_use_ivar:
- field = ':ivar %s: ' % _name
+ field = ':ivar %s: ' % _name # type: unicode
lines.extend(self._format_block(field, _desc))
if _type:
lines.append(':vartype %s: %s' % (_name, _type))
else:
lines.extend(['.. attribute:: ' + _name, ''])
- field = self._format_field('', _type, _desc)
- lines.extend(self._indent(field, 3))
+ fields = self._format_field('', _type, _desc)
+ lines.extend(self._indent(fields, 3))
lines.append('')
if self._config.napoleon_use_ivar:
lines.append('')
return lines
def _parse_examples_section(self, section):
+ # type: (unicode) -> List[unicode]
use_admonition = self._config.napoleon_use_admonition_for_examples
return self._parse_generic_section(section, use_admonition)
def _parse_usage_section(self, section):
- header = ['.. rubric:: Usage:', '']
- block = ['.. code-block:: python', '']
+ # type: (unicode) -> List[unicode]
+ header = ['.. rubric:: Usage:', ''] # type: List[unicode]
+ block = ['.. code-block:: python', ''] # type: List[unicode]
lines = self._consume_usage_section()
lines = self._indent(lines, 3)
return header + block + lines + ['']
def _parse_generic_section(self, section, use_admonition):
+ # type: (unicode, bool) -> List[unicode]
lines = self._strip_empty(self._consume_to_next_section())
lines = self._dedent(lines)
if use_admonition:
- header = '.. admonition:: %s' % section
+ header = '.. admonition:: %s' % section # type: unicode
lines = self._indent(lines, 3)
else:
header = '.. rubric:: %s' % section
@@ -558,6 +601,7 @@ class GoogleDocstring(UnicodeMixin):
return [header, '']
def _parse_keyword_arguments_section(self, section):
+ # type: (unicode) -> List[unicode]
fields = self._consume_fields()
if self._config.napoleon_use_keyword:
return self._format_docutils_params(
@@ -568,26 +612,31 @@ class GoogleDocstring(UnicodeMixin):
return self._format_fields('Keyword Arguments', fields)
def _parse_methods_section(self, section):
- lines = []
+ # type: (unicode) -> List[unicode]
+ lines = [] # type: List[unicode]
for _name, _, _desc in self._consume_fields(parse_type=False):
lines.append('.. method:: %s' % _name)
if _desc:
- lines.extend([''] + self._indent(_desc, 3))
+ lines.extend([u''] + self._indent(_desc, 3))
lines.append('')
return lines
def _parse_note_section(self, section):
+ # type: (unicode) -> List[unicode]
lines = self._consume_to_next_section()
return self._format_admonition('note', lines)
def _parse_notes_section(self, section):
+ # type: (unicode) -> List[unicode]
use_admonition = self._config.napoleon_use_admonition_for_notes
return self._parse_generic_section('Notes', use_admonition)
def _parse_other_parameters_section(self, section):
+ # type: (unicode) -> List[unicode]
return self._format_fields('Other Parameters', self._consume_fields())
def _parse_parameters_section(self, section):
+ # type: (unicode) -> List[unicode]
fields = self._consume_fields()
if self._config.napoleon_use_param:
return self._format_docutils_params(fields)
@@ -595,11 +644,12 @@ class GoogleDocstring(UnicodeMixin):
return self._format_fields('Parameters', fields)
def _parse_raises_section(self, section):
+ # type: (unicode) -> List[unicode]
fields = self._consume_fields(parse_type=False, prefer_type=True)
field_type = ':raises:'
padding = ' ' * len(field_type)
multi = len(fields) > 1
- lines = []
+ lines = [] # type: List[unicode]
for _, _type, _desc in fields:
_desc = self._strip_empty(_desc)
has_desc = any(_desc)
@@ -633,10 +683,12 @@ class GoogleDocstring(UnicodeMixin):
return lines
def _parse_references_section(self, section):
+ # type: (unicode) -> List[unicode]
use_admonition = self._config.napoleon_use_admonition_for_references
return self._parse_generic_section('References', use_admonition)
def _parse_returns_section(self, section):
+ # type: (unicode) -> List[unicode]
fields = self._consume_returns_section()
multi = len(fields) > 1
if multi:
@@ -644,7 +696,7 @@ class GoogleDocstring(UnicodeMixin):
else:
use_rtype = self._config.napoleon_use_rtype
- lines = []
+ lines = [] # type: List[unicode]
for _name, _type, _desc in fields:
if use_rtype:
field = self._format_field(_name, '', _desc)
@@ -665,30 +717,36 @@ class GoogleDocstring(UnicodeMixin):
return lines
def _parse_see_also_section(self, section):
+ # type: (unicode) -> List[unicode]
lines = self._consume_to_next_section()
return self._format_admonition('seealso', lines)
def _parse_todo_section(self, section):
+ # type: (unicode) -> List[unicode]
lines = self._consume_to_next_section()
return self._format_admonition('todo', lines)
def _parse_warning_section(self, section):
+ # type: (unicode) -> List[unicode]
lines = self._consume_to_next_section()
return self._format_admonition('warning', lines)
def _parse_warns_section(self, section):
+ # type: (unicode) -> List[unicode]
return self._format_fields('Warns', self._consume_fields())
def _parse_yields_section(self, section):
+ # type: (unicode) -> List[unicode]
fields = self._consume_returns_section()
return self._format_fields('Yields', fields)
def _partition_field_on_colon(self, line):
+ # type: (unicode) -> Tuple[unicode, unicode, unicode]
before_colon = []
after_colon = []
colon = ''
found_colon = False
- for i, source in enumerate(_xref_regex.split(line)):
+ for i, source in enumerate(_xref_regex.split(line)): # type: ignore
if found_colon:
after_colon.append(source)
else:
@@ -706,6 +764,7 @@ class GoogleDocstring(UnicodeMixin):
"".join(after_colon).strip())
def _strip_empty(self, lines):
+ # type: (List[unicode]) -> List[unicode]
if lines:
start = -1
for i, line in enumerate(lines):
@@ -820,12 +879,14 @@ class NumpyDocstring(GoogleDocstring):
"""
def __init__(self, docstring, config=None, app=None, what='', name='',
obj=None, options=None):
+ # type: (Union[unicode, List[unicode]], SphinxConfig, Sphinx, unicode, unicode, Any, Any) -> None # NOQA
self._directive_sections = ['.. index::']
super(NumpyDocstring, self).__init__(docstring, config, app, what,
name, obj, options)
def _consume_field(self, parse_type=True, prefer_type=False):
- line = next(self._line_iter)
+ # type: (bool, bool) -> Tuple[unicode, unicode, List[unicode]]
+ line = next(self._line_iter) # type: ignore
if parse_type:
_name, _, _type = self._partition_field_on_colon(line)
else:
@@ -841,16 +902,19 @@ class NumpyDocstring(GoogleDocstring):
return _name, _type, _desc
def _consume_returns_section(self):
+ # type: () -> List[Tuple[unicode, unicode, List[unicode]]]
return self._consume_fields(prefer_type=True)
def _consume_section_header(self):
- section = next(self._line_iter)
+ # type: () -> unicode
+ section = next(self._line_iter) # type: ignore
if not _directive_regex.match(section):
# Consume the header underline
- next(self._line_iter)
+ next(self._line_iter) # type: ignore
return section
def _is_section_break(self):
+ # type: () -> bool
line1, line2 = self._line_iter.peek(2)
return (not self._line_iter.has_next() or
self._is_section_header() or
@@ -860,10 +924,11 @@ class NumpyDocstring(GoogleDocstring):
not self._is_indented(line1, self._section_indent)))
def _is_section_header(self):
+ # type: () -> bool
section, underline = self._line_iter.peek(2)
section = section.lower()
if section in self._sections and isinstance(underline, string_types):
- return bool(_numpy_section_regex.match(underline))
+ return bool(_numpy_section_regex.match(underline)) # type: ignore
elif self._directive_sections:
if _directive_regex.match(section):
for directive_section in self._directive_sections:
@@ -875,6 +940,7 @@ class NumpyDocstring(GoogleDocstring):
r" (?P<name2>[a-zA-Z0-9_.-]+))\s*", re.X)
def _parse_see_also_section(self, section):
+ # type: (unicode) -> List[unicode]
lines = self._consume_to_next_section()
try:
return self._parse_numpydoc_see_also_section(lines)
@@ -882,6 +948,7 @@ class NumpyDocstring(GoogleDocstring):
return self._format_admonition('seealso', lines)
def _parse_numpydoc_see_also_section(self, content):
+ # type: (List[unicode]) -> List[unicode]
"""
Derived from the NumpyDoc implementation of _parse_see_also.
@@ -914,13 +981,13 @@ class NumpyDocstring(GoogleDocstring):
del rest[:]
current_func = None
- rest = []
+ rest = [] # type: List[unicode]
for line in content:
if not line.strip():
continue
- m = self._name_rgx.match(line)
+ m = self._name_rgx.match(line) # type: ignore
if m and line[m.end():].strip().startswith(':'):
push_item(current_func, rest)
current_func, line = line[:m.end()], line[m.end():]
@@ -960,12 +1027,12 @@ class NumpyDocstring(GoogleDocstring):
'const': 'const',
'attribute': 'attr',
'attr': 'attr'
- }
+ } # type: Dict[unicode, unicode]
if self._what is None:
- func_role = 'obj'
+ func_role = 'obj' # type: unicode
else:
func_role = roles.get(self._what, '')
- lines = []
+ lines = [] # type: List[unicode]
last_had_desc = True
for func, desc, role in items:
if role:
diff --git a/sphinx/ext/napoleon/iterators.py b/sphinx/ext/napoleon/iterators.py
index 1e34711bc..b03bcf047 100644
--- a/sphinx/ext/napoleon/iterators.py
+++ b/sphinx/ext/napoleon/iterators.py
@@ -13,6 +13,10 @@
import collections
+if False:
+ # For type annotation
+ from typing import Any, Iterable # NOQA
+
class peek_iter(object):
"""An iterator object that supports peeking ahead.
@@ -48,34 +52,39 @@ class peek_iter(object):
"""
def __init__(self, *args):
+ # type: (Any) -> None
"""__init__(o, sentinel=None)"""
- self._iterable = iter(*args)
- self._cache = collections.deque()
+ self._iterable = iter(*args) # type: Iterable
+ self._cache = collections.deque() # type: collections.deque
if len(args) == 2:
self.sentinel = args[1]
else:
self.sentinel = object()
def __iter__(self):
+ # type: () -> peek_iter
return self
def __next__(self, n=None):
+ # type: (int) -> Any
# note: prevent 2to3 to transform self.next() in next(self) which
# causes an infinite loop !
return getattr(self, 'next')(n)
def _fillcache(self, n):
+ # type: (int) -> None
"""Cache `n` items. If `n` is 0 or None, then 1 item is cached."""
if not n:
n = 1
try:
while len(self._cache) < n:
- self._cache.append(next(self._iterable))
+ self._cache.append(next(self._iterable)) # type: ignore
except StopIteration:
while len(self._cache) < n:
self._cache.append(self.sentinel)
def has_next(self):
+ # type: () -> bool
"""Determine if iterator is exhausted.
Returns
@@ -91,6 +100,7 @@ class peek_iter(object):
return self.peek() != self.sentinel
def next(self, n=None):
+ # type: (int) -> Any
"""Get the next item or `n` items of the iterator.
Parameters
@@ -126,6 +136,7 @@ class peek_iter(object):
return result
def peek(self, n=None):
+ # type: (int) -> Any
"""Preview the next item or `n` items of the iterator.
The iterator is not advanced when peek is called.
@@ -209,6 +220,7 @@ class modify_iter(peek_iter):
"""
def __init__(self, *args, **kwargs):
+ # type: (Any, Any) -> None
"""__init__(o, sentinel=None, modifier=lambda x: x)"""
if 'modifier' in kwargs:
self.modifier = kwargs['modifier']
@@ -223,6 +235,7 @@ class modify_iter(peek_iter):
super(modify_iter, self).__init__(*args)
def _fillcache(self, n):
+ # type: (int) -> None
"""Cache `n` modified items. If `n` is 0 or None, 1 item is cached.
Each item returned by the iterator is passed through the
@@ -233,7 +246,7 @@ class modify_iter(peek_iter):
n = 1
try:
while len(self._cache) < n:
- self._cache.append(self.modifier(next(self._iterable)))
+ self._cache.append(self.modifier(next(self._iterable))) # type: ignore
except StopIteration:
while len(self._cache) < n:
self._cache.append(self.sentinel)
diff --git a/sphinx/ext/pngmath.py b/sphinx/ext/pngmath.py
index e03c82acf..85010b799 100644
--- a/sphinx/ext/pngmath.py
+++ b/sphinx/ext/pngmath.py
@@ -20,20 +20,31 @@ from subprocess import Popen, PIPE
from hashlib import sha1
from six import text_type
+
from docutils import nodes
import sphinx
from sphinx.errors import SphinxError, ExtensionError
+from sphinx.util import logging
from sphinx.util.png import read_png_depth, write_png_depth
from sphinx.util.osutil import ensuredir, ENOENT, cd
from sphinx.util.pycompat import sys_encoding
from sphinx.ext.mathbase import setup_math as mathbase_setup, wrap_displaymath
+if False:
+ # For type annotation
+ from typing import Any, Dict, Tuple # NOQA
+ from sphinx.application import Sphinx # NOQA
+ from sphinx.ext.mathbase import math as math_node, displaymath # NOQA
+
+logger = logging.getLogger(__name__)
+
class MathExtError(SphinxError):
category = 'Math extension error'
def __init__(self, msg, stderr=None, stdout=None):
+ # type: (unicode, unicode, unicode) -> None
if stderr:
msg += '\n[stderr]\n' + stderr.decode(sys_encoding, 'replace')
if stdout:
@@ -71,6 +82,7 @@ depth_re = re.compile(br'\[\d+ depth=(-?\d+)\]')
def render_math(self, math):
+ # type: (nodes.NodeVisitor, unicode) -> Tuple[unicode, int]
"""Render the LaTeX math expression *math* using latex and dvipng.
Return the filename relative to the built document and the "depth",
@@ -107,9 +119,8 @@ def render_math(self, math):
else:
tempdir = self.builder._mathpng_tempdir
- tf = codecs.open(path.join(tempdir, 'math.tex'), 'w', 'utf-8')
- tf.write(latex)
- tf.close()
+ with codecs.open(path.join(tempdir, 'math.tex'), 'w', 'utf-8') as tf:
+ tf.write(latex)
# build latex command; old versions of latex don't have the
# --output-directory option, so we have to manually chdir to the
@@ -125,9 +136,9 @@ def render_math(self, math):
except OSError as err:
if err.errno != ENOENT: # No such file or directory
raise
- self.builder.warn('LaTeX command %r cannot be run (needed for math '
- 'display), check the pngmath_latex setting' %
- self.builder.config.pngmath_latex)
+ logger.warning('LaTeX command %r cannot be run (needed for math '
+ 'display), check the pngmath_latex setting',
+ self.builder.config.pngmath_latex)
self.builder._mathpng_warned_latex = True
return None, None
@@ -150,9 +161,9 @@ def render_math(self, math):
except OSError as err:
if err.errno != ENOENT: # No such file or directory
raise
- self.builder.warn('dvipng command %r cannot be run (needed for math '
- 'display), check the pngmath_dvipng setting' %
- self.builder.config.pngmath_dvipng)
+ logger.warning('dvipng command %r cannot be run (needed for math '
+ 'display), check the pngmath_dvipng setting',
+ self.builder.config.pngmath_dvipng)
self.builder._mathpng_warned_dvipng = True
return None, None
stdout, stderr = p.communicate()
@@ -171,23 +182,26 @@ def render_math(self, math):
def cleanup_tempdir(app, exc):
+ # type: (Sphinx, Exception) -> None
if exc:
return
if not hasattr(app.builder, '_mathpng_tempdir'):
return
try:
- shutil.rmtree(app.builder._mathpng_tempdir)
+ shutil.rmtree(app.builder._mathpng_tempdir) # type: ignore
except Exception:
pass
def get_tooltip(self, node):
+ # type: (nodes.NodeVisitor, math_node) -> unicode
if self.builder.config.pngmath_add_tooltips:
return ' alt="%s"' % self.encode(node['latex']).strip()
return ''
def html_visit_math(self, node):
+ # type: (nodes.NodeVisitor, math_node) -> None
try:
fname, depth = render_math(self, '$' + node['latex'] + '$')
except MathExtError as exc:
@@ -195,7 +209,7 @@ def html_visit_math(self, node):
sm = nodes.system_message(msg, type='WARNING', level=2,
backrefs=[], source=node['latex'])
sm.walkabout(self)
- self.builder.warn('display latex %r: ' % node['latex'] + msg)
+ logger.warning('display latex %r: %s', node['latex'], msg)
raise nodes.SkipNode
if fname is None:
# something failed -- use text-only as a bad substitute
@@ -210,6 +224,7 @@ def html_visit_math(self, node):
def html_visit_displaymath(self, node):
+ # type: (nodes.NodeVisitor, displaymath) -> None
if node['nowrap']:
latex = node['latex']
else:
@@ -222,7 +237,7 @@ def html_visit_displaymath(self, node):
sm = nodes.system_message(msg, type='WARNING', level=2,
backrefs=[], source=node['latex'])
sm.walkabout(self)
- self.builder.warn('inline latex %r: ' % node['latex'] + msg)
+ logger.warning('inline latex %r: %s', node['latex'], msg)
raise nodes.SkipNode
self.body.append(self.starttag(node, 'div', CLASS='math'))
self.body.append('<p>')
@@ -239,7 +254,9 @@ def html_visit_displaymath(self, node):
def setup(app):
- app.warn('sphinx.ext.pngmath has been deprecated. Please use sphinx.ext.imgmath instead.')
+ # type: (Sphinx) -> Dict[unicode, Any]
+ logger.warning('sphinx.ext.pngmath has been deprecated. '
+ 'Please use sphinx.ext.imgmath instead.')
try:
mathbase_setup(app, (html_visit_math, None), (html_visit_displaymath, None))
except ExtensionError:
diff --git a/sphinx/ext/todo.py b/sphinx/ext/todo.py
index cd3563107..4f1ced2bb 100644
--- a/sphinx/ext/todo.py
+++ b/sphinx/ext/todo.py
@@ -18,10 +18,19 @@ from docutils.parsers.rst import directives
import sphinx
from sphinx.locale import _
from sphinx.environment import NoUri
+from sphinx.util import logging
from sphinx.util.nodes import set_source_info
from docutils.parsers.rst import Directive
from docutils.parsers.rst.directives.admonitions import BaseAdmonition
+if False:
+ # For type annotation
+ from typing import Any, Dict, Iterable, List # NOQA
+ from sphinx.application import Sphinx # NOQA
+ from sphinx.environment import BuildEnvironment # NOQA
+
+logger = logging.getLogger(__name__)
+
class todo_node(nodes.Admonition, nodes.Element):
pass
@@ -46,6 +55,7 @@ class Todo(BaseAdmonition):
}
def run(self):
+ # type: () -> List[nodes.Node]
if not self.options.get('class'):
self.options['class'] = ['admonition-todo']
@@ -63,12 +73,13 @@ class Todo(BaseAdmonition):
def process_todos(app, doctree):
+ # type: (Sphinx, nodes.Node) -> None
# collect all todos in the environment
# this is not done in the directive itself because it some transformations
# must have already been run, e.g. substitutions
env = app.builder.env
if not hasattr(env, 'todo_all_todos'):
- env.todo_all_todos = []
+ env.todo_all_todos = [] # type: ignore
for node in doctree.traverse(todo_node):
app.emit('todo-defined', node)
@@ -80,7 +91,7 @@ def process_todos(app, doctree):
targetnode = None
newnode = node.deepcopy()
del newnode['ids']
- env.todo_all_todos.append({
+ env.todo_all_todos.append({ # type: ignore
'docname': env.docname,
'source': node.source or env.doc2path(env.docname),
'lineno': node.line,
@@ -89,7 +100,8 @@ def process_todos(app, doctree):
})
if env.config.todo_emit_warnings:
- env.warn_node("TODO entry found: %s" % node[1].astext(), node)
+ logger.warning("TODO entry found: %s", node[1].astext(),
+ location=node)
class TodoList(Directive):
@@ -101,15 +113,17 @@ class TodoList(Directive):
required_arguments = 0
optional_arguments = 0
final_argument_whitespace = False
- option_spec = {}
+ option_spec = {} # type: Dict
def run(self):
+ # type: () -> List[todolist]
# Simply insert an empty todolist node which will be replaced later
# when process_todo_nodes is called
return [todolist('')]
def process_todo_nodes(app, doctree, fromdocname):
+ # type: (Sphinx, nodes.Node, unicode) -> None
if not app.config['todo_include_todos']:
for node in doctree.traverse(todo_node):
node.parent.remove(node)
@@ -119,7 +133,7 @@ def process_todo_nodes(app, doctree, fromdocname):
env = app.builder.env
if not hasattr(env, 'todo_all_todos'):
- env.todo_all_todos = []
+ env.todo_all_todos = [] # type: ignore
for node in doctree.traverse(todolist):
if not app.config['todo_include_todos']:
@@ -128,7 +142,7 @@ def process_todo_nodes(app, doctree, fromdocname):
content = []
- for todo_info in env.todo_all_todos:
+ for todo_info in env.todo_all_todos: # type: ignore
para = nodes.paragraph(classes=['todo-source'])
if app.config['todo_link_only']:
description = _('<<original entry>>')
@@ -168,30 +182,35 @@ def process_todo_nodes(app, doctree, fromdocname):
def purge_todos(app, env, docname):
+ # type: (Sphinx, BuildEnvironment, unicode) -> None
if not hasattr(env, 'todo_all_todos'):
return
- env.todo_all_todos = [todo for todo in env.todo_all_todos
+ env.todo_all_todos = [todo for todo in env.todo_all_todos # type: ignore
if todo['docname'] != docname]
def merge_info(app, env, docnames, other):
+ # type: (Sphinx, BuildEnvironment, Iterable[unicode], BuildEnvironment) -> None
if not hasattr(other, 'todo_all_todos'):
return
if not hasattr(env, 'todo_all_todos'):
- env.todo_all_todos = []
- env.todo_all_todos.extend(other.todo_all_todos)
+ env.todo_all_todos = [] # type: ignore
+ env.todo_all_todos.extend(other.todo_all_todos) # type: ignore
def visit_todo_node(self, node):
+ # type: (nodes.NodeVisitor, todo_node) -> None
self.visit_admonition(node)
# self.visit_admonition(node, 'todo')
def depart_todo_node(self, node):
+ # type: (nodes.NodeVisitor, todo_node) -> None
self.depart_admonition(node)
def setup(app):
+ # type: (Sphinx) -> Dict[unicode, Any]
app.add_event('todo-defined')
app.add_config_value('todo_include_todos', False, 'html')
app.add_config_value('todo_link_only', False, 'html')
diff --git a/sphinx/ext/viewcode.py b/sphinx/ext/viewcode.py
index fa5c8721b..6ac8c76be 100644
--- a/sphinx/ext/viewcode.py
+++ b/sphinx/ext/viewcode.py
@@ -12,51 +12,60 @@
import traceback
from six import iteritems, text_type
+
from docutils import nodes
import sphinx
from sphinx import addnodes
from sphinx.locale import _
from sphinx.pycode import ModuleAnalyzer
-from sphinx.util import get_full_modname
+from sphinx.util import get_full_modname, logging, status_iterator
from sphinx.util.nodes import make_refnode
-from sphinx.util.console import blue
+
+if False:
+ # For type annotation
+ from typing import Any, Dict, Iterable, Iterator, Set, Tuple # NOQA
+ from sphinx.application import Sphinx # NOQA
+ from sphinx.environment import BuildEnvironment # NOQA
+
+logger = logging.getLogger(__name__)
def _get_full_modname(app, modname, attribute):
+ # type: (Sphinx, str, unicode) -> unicode
try:
return get_full_modname(modname, attribute)
except AttributeError:
# sphinx.ext.viewcode can't follow class instance attribute
# then AttributeError logging output only verbose mode.
- app.verbose('Didn\'t find %s in %s' % (attribute, modname))
+ logger.verbose('Didn\'t find %s in %s', attribute, modname)
return None
except Exception as e:
# sphinx.ext.viewcode follow python domain directives.
# because of that, if there are no real modules exists that specified
# by py:function or other directives, viewcode emits a lot of warnings.
# It should be displayed only verbose mode.
- app.verbose(traceback.format_exc().rstrip())
- app.verbose('viewcode can\'t import %s, failed with error "%s"' %
- (modname, e))
+ logger.verbose(traceback.format_exc().rstrip())
+ logger.verbose('viewcode can\'t import %s, failed with error "%s"', modname, e)
return None
def doctree_read(app, doctree):
+ # type: (Sphinx, nodes.Node) -> None
env = app.builder.env
if not hasattr(env, '_viewcode_modules'):
- env._viewcode_modules = {}
+ env._viewcode_modules = {} # type: ignore
if app.builder.name == "singlehtml":
return
if app.builder.name.startswith("epub") and not env.config.viewcode_enable_epub:
return
def has_tag(modname, fullname, docname, refname):
- entry = env._viewcode_modules.get(modname, None)
+ entry = env._viewcode_modules.get(modname, None) # type: ignore
try:
analyzer = ModuleAnalyzer.for_module(modname)
except Exception:
- env._viewcode_modules[modname] = False
+ env._viewcode_modules[modname] = False # type: ignore
return
if not isinstance(analyzer.code, text_type):
code = analyzer.code.decode(analyzer.encoding)
@@ -65,7 +74,7 @@ def doctree_read(app, doctree):
if entry is None or entry[0] != code:
analyzer.find_tags()
entry = code, analyzer.tags, {}, refname
- env._viewcode_modules[modname] = entry
+ env._viewcode_modules[modname] = entry # type: ignore
elif entry is False:
return
_, tags, used, _ = entry
@@ -76,7 +85,7 @@ def doctree_read(app, doctree):
for objnode in doctree.traverse(addnodes.desc):
if objnode.get('domain') != 'py':
continue
- names = set()
+ names = set() # type: Set[unicode]
for signode in objnode:
if not isinstance(signode, addnodes.desc_signature):
continue
@@ -106,16 +115,18 @@ def doctree_read(app, doctree):
def env_merge_info(app, env, docnames, other):
+ # type: (Sphinx, BuildEnvironment, Iterable[unicode], BuildEnvironment) -> None
if not hasattr(other, '_viewcode_modules'):
return
# create a _viewcode_modules dict on the main environment
if not hasattr(env, '_viewcode_modules'):
- env._viewcode_modules = {}
+ env._viewcode_modules = {} # type: ignore
# now merge in the information from the subprocess
- env._viewcode_modules.update(other._viewcode_modules)
+ env._viewcode_modules.update(other._viewcode_modules) # type: ignore
def missing_reference(app, env, node, contnode):
+ # type: (Sphinx, BuildEnvironment, nodes.Node, nodes.Node) -> nodes.Node
# resolve our "viewcode" reference nodes -- they need special treatment
if node['reftype'] == 'viewcode':
return make_refnode(app.builder, node['refdoc'], node['reftarget'],
@@ -123,20 +134,22 @@ def missing_reference(app, env, node, contnode):
def collect_pages(app):
+ # type: (Sphinx) -> Iterator[Tuple[unicode, Dict[unicode, Any], unicode]]
env = app.builder.env
if not hasattr(env, '_viewcode_modules'):
return
- highlighter = app.builder.highlighter
+ highlighter = app.builder.highlighter # type: ignore
urito = app.builder.get_relative_uri
- modnames = set(env._viewcode_modules)
+ modnames = set(env._viewcode_modules) # type: ignore
# app.builder.info(' (%d module code pages)' %
# len(env._viewcode_modules), nonl=1)
- for modname, entry in app.status_iterator(
- iteritems(env._viewcode_modules), 'highlighting module code... ',
- blue, len(env._viewcode_modules), lambda x: x[0]):
+ for modname, entry in status_iterator(iteritems(env._viewcode_modules), # type: ignore
+ 'highlighting module code... ', "blue",
+ len(env._viewcode_modules), # type: ignore
+ app.verbosity, lambda x: x[0]):
if not entry:
continue
code, tags, used, refname = entry
@@ -185,7 +198,7 @@ def collect_pages(app):
'title': modname,
'body': (_('<h1>Source code for %s</h1>') % modname +
'\n'.join(lines)),
- }
+ } # type: Dict[unicode, Any]
yield (pagename, context, 'page.html')
if not modnames:
@@ -218,6 +231,7 @@ def collect_pages(app):
def setup(app):
+ # type: (Sphinx) -> Dict[unicode, Any]
app.add_config_value('viewcode_import', True, False)
app.add_config_value('viewcode_enable_epub', False, False)
app.connect('doctree-read', doctree_read)
diff --git a/sphinx/extension.py b/sphinx/extension.py
new file mode 100644
index 000000000..12e80d1a2
--- /dev/null
+++ b/sphinx/extension.py
@@ -0,0 +1,120 @@
+# -*- coding: utf-8 -*-
+"""
+ sphinx.extension
+ ~~~~~~~~~~~~~~~~
+
+ Utilities for Sphinx extensions.
+
+ :copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import traceback
+
+from six import iteritems
+
+from sphinx.errors import ExtensionError, VersionRequirementError
+from sphinx.locale import _
+from sphinx.util import logging
+
+if False:
+ # For type annotation
+ from typing import Any, Dict # NOQA
+ from sphinx.application import Sphinx # NOQA
+
+
+logger = logging.getLogger(__name__)
+
+
+# list of deprecated extensions. Keys are extension name.
+# Values are Sphinx version that merge the extension.
+EXTENSION_BLACKLIST = {
+ "sphinxjp.themecore": "1.2"
+} # type: Dict[unicode, unicode]
+
+
+class Extension(object):
+ def __init__(self, name, module, **kwargs):
+ self.name = name
+ self.module = module
+ self.metadata = kwargs
+ self.version = kwargs.pop('version', 'unknown version')
+
+ # The extension supports parallel read or not. The default value
+ # is ``None``. It means the extension does not tell the status.
+ # It will be warned on parallel reading.
+ self.parallel_read_safe = kwargs.pop('parallel_read_safe', None)
+
+ # The extension supports parallel write or not. The default value
+ # is ``True``. Sphinx writes parallelly documents even if
+ # the extension does not tell its status.
+ self.parallel_write_safe = kwargs.pop('parallel_read_safe', True)
+
+
+def load_extension(app, extname):
+ # type: (Sphinx, unicode) -> None
+ """Load a Sphinx extension."""
+ if extname in app.extensions: # alread loaded
+ return
+ if extname in EXTENSION_BLACKLIST:
+ logger.warning(_('the extension %r was already merged with Sphinx since '
+ 'version %s; this extension is ignored.'),
+ extname, EXTENSION_BLACKLIST[extname])
+ return
+
+ # update loading context
+ app._setting_up_extension.append(extname)
+
+ try:
+ mod = __import__(extname, None, None, ['setup'])
+ except ImportError as err:
+ logger.verbose(_('Original exception:\n') + traceback.format_exc())
+ raise ExtensionError(_('Could not import extension %s') % extname, err)
+
+ if not hasattr(mod, 'setup'):
+ logger.warning(_('extension %r has no setup() function; is it really '
+ 'a Sphinx extension module?'), extname)
+ metadata = {} # type: Dict[unicode, Any]
+ else:
+ try:
+ metadata = mod.setup(app)
+ except VersionRequirementError as err:
+ # add the extension name to the version required
+ raise VersionRequirementError(
+ _('The %s extension used by this project needs at least '
+ 'Sphinx v%s; it therefore cannot be built with this '
+ 'version.') % (extname, err)
+ )
+
+ if metadata is None:
+ metadata = {}
+ if extname == 'rst2pdf.pdfbuilder':
+ metadata['parallel_read_safe'] = True
+ elif not isinstance(metadata, dict):
+ logger.warning(_('extension %r returned an unsupported object from '
+ 'its setup() function; it should return None or a '
+ 'metadata dictionary'), extname)
+
+ app.extensions[extname] = Extension(extname, mod, **metadata)
+ app._setting_up_extension.pop()
+
+
+def verify_required_extensions(app, requirements):
+ # type: (Sphinx, Dict[unicode, unicode]) -> None
+ """Verify the required Sphinx extensions are loaded."""
+ if requirements is None:
+ return
+
+ for extname, reqversion in iteritems(requirements):
+ extension = app.extensions.get(extname)
+ if extension is None:
+ logger.warning(_('needs_extensions config value specifies a '
+ 'version requirement for extension %s, but it is '
+ 'not loaded'), extname)
+ continue
+
+ if extension.version == 'unknown version' or reqversion > extension.version:
+ raise VersionRequirementError(_('This project needs the extension %s at least in '
+ 'version %s and therefore cannot be built with '
+ 'the loaded version (%s).') %
+ (extname, reqversion, extension.version))
diff --git a/sphinx/highlighting.py b/sphinx/highlighting.py
index f22f9d90e..eb309d82a 100644
--- a/sphinx/highlighting.py
+++ b/sphinx/highlighting.py
@@ -11,11 +11,13 @@
from six import text_type
+from sphinx.util import logging
from sphinx.util.pycompat import htmlescape
from sphinx.util.texescape import tex_hl_escape_map_new
from sphinx.ext import doctest
from pygments import highlight
+from pygments.lexer import Lexer # NOQA
from pygments.lexers import PythonLexer, Python3Lexer, PythonConsoleLexer, \
CLexer, TextLexer, RstLexer
from pygments.lexers import get_lexer_by_name, guess_lexer
@@ -25,6 +27,14 @@ from pygments.styles import get_style_by_name
from pygments.util import ClassNotFound
from sphinx.pygments_styles import SphinxStyle, NoneStyle
+if False:
+ # For type annotation
+ from typing import Any, Dict # NOQA
+ from pygments.formatter import Formatter # NOQA
+
+
+logger = logging.getLogger(__name__)
+
lexers = dict(
none = TextLexer(stripnl=False),
python = PythonLexer(stripnl=False),
@@ -33,7 +43,7 @@ lexers = dict(
pycon3 = PythonConsoleLexer(python3=True, stripnl=False),
rest = RstLexer(stripnl=False),
c = CLexer(stripnl=False),
-)
+) # type: Dict[unicode, Lexer]
for _lexer in lexers.values():
_lexer.add_filter('raiseonerror')
@@ -55,8 +65,8 @@ class PygmentsBridge(object):
html_formatter = HtmlFormatter
latex_formatter = LatexFormatter
- def __init__(self, dest='html', stylename='sphinx',
- trim_doctest_flags=False):
+ def __init__(self, dest='html', stylename='sphinx', trim_doctest_flags=False):
+ # type: (unicode, unicode, bool) -> None
self.dest = dest
if stylename is None or stylename == 'sphinx':
style = SphinxStyle
@@ -69,7 +79,7 @@ class PygmentsBridge(object):
else:
style = get_style_by_name(stylename)
self.trim_doctest_flags = trim_doctest_flags
- self.formatter_args = {'style': style}
+ self.formatter_args = {'style': style} # type: Dict[unicode, Any]
if dest == 'html':
self.formatter = self.html_formatter
else:
@@ -77,10 +87,12 @@ class PygmentsBridge(object):
self.formatter_args['commandprefix'] = 'PYG'
def get_formatter(self, **kwargs):
- kwargs.update(self.formatter_args)
+ # type: (Any) -> Formatter
+ kwargs.update(self.formatter_args) # type: ignore
return self.formatter(**kwargs)
def unhighlighted(self, source):
+ # type: (unicode) -> unicode
if self.dest == 'html':
return '<pre>' + htmlescape(source) + '</pre>\n'
else:
@@ -91,7 +103,8 @@ class PygmentsBridge(object):
return '\\begin{Verbatim}[commandchars=\\\\\\{\\}]\n' + \
source + '\\end{Verbatim}\n'
- def highlight_block(self, source, lang, opts=None, warn=None, force=False, **kwargs):
+ def highlight_block(self, source, lang, opts=None, location=None, force=False, **kwargs):
+ # type: (unicode, unicode, Any, Any, bool, Any) -> unicode
if not isinstance(source, text_type):
source = source.decode()
@@ -119,34 +132,31 @@ class PygmentsBridge(object):
try:
lexer = lexers[lang] = get_lexer_by_name(lang, **(opts or {}))
except ClassNotFound:
- if warn:
- warn('Pygments lexer name %r is not known' % lang)
- lexer = lexers['none']
- else:
- raise
+ logger.warning('Pygments lexer name %r is not known', lang,
+ location=location)
+ lexer = lexers['none']
else:
lexer.add_filter('raiseonerror')
# trim doctest options if wanted
if isinstance(lexer, PythonConsoleLexer) and self.trim_doctest_flags:
- source = doctest.blankline_re.sub('', source)
- source = doctest.doctestopt_re.sub('', source)
+ source = doctest.blankline_re.sub('', source) # type: ignore
+ source = doctest.doctestopt_re.sub('', source) # type: ignore
# highlight via Pygments
formatter = self.get_formatter(**kwargs)
try:
hlsource = highlight(source, lexer, formatter)
- except ErrorToken as exc:
+ except ErrorToken:
# this is most probably not the selected language,
# so let it pass unhighlighted
if lang == 'default':
pass # automatic highlighting failed.
- elif warn:
- warn('Could not lex literal_block as "%s". '
- 'Highlighting skipped.' % lang,
- type='misc', subtype='highlighting_failure')
else:
- raise exc
+ logger.warning('Could not lex literal_block as "%s". '
+ 'Highlighting skipped.', lang,
+ type='misc', subtype='highlighting_failure',
+ location=location)
hlsource = highlight(source, lexers['none'], formatter)
if self.dest == 'html':
return hlsource
@@ -156,6 +166,7 @@ class PygmentsBridge(object):
return hlsource.translate(tex_hl_escape_map_new)
def get_stylesheet(self):
+ # type: () -> unicode
formatter = self.get_formatter()
if self.dest == 'html':
return formatter.get_style_defs('.highlight')
diff --git a/sphinx/io.py b/sphinx/io.py
index 1d8f155de..389d4888b 100644
--- a/sphinx/io.py
+++ b/sphinx/io.py
@@ -12,6 +12,7 @@ from docutils.io import FileInput
from docutils.readers import standalone
from docutils.writers import UnfilteredWriter
from six import string_types, text_type
+from typing import Any, Union # NOQA
from sphinx.transforms import (
ApplySourceWorkaround, ExtraTranslatableNodes, CitationReferences,
@@ -23,6 +24,18 @@ from sphinx.transforms.i18n import (
PreserveTranslatableMessages, Locale, RemoveTranslatableInline,
)
from sphinx.util import import_object, split_docinfo
+from sphinx.util.docutils import LoggingReporter
+
+if False:
+ # For type annotation
+ from typing import Any, Dict, List, Tuple, Union # NOQA
+ from docutils import nodes # NOQA
+ from docutils.io import Input # NOQA
+ from docutils.parsers import Parser # NOQA
+ from docutils.transforms import Transform # NOQA
+ from sphinx.application import Sphinx # NOQA
+ from sphinx.builders import Builder # NOQA
+ from sphinx.environment import BuildEnvironment # NOQA
class SphinxBaseReader(standalone.Reader):
@@ -30,17 +43,19 @@ class SphinxBaseReader(standalone.Reader):
Add our source parsers
"""
def __init__(self, app, parsers={}, *args, **kwargs):
+ # type: (Sphinx, Dict[unicode, Parser], Any, Any) -> None
standalone.Reader.__init__(self, *args, **kwargs)
- self.parser_map = {}
+ self.parser_map = {} # type: Dict[unicode, Parser]
for suffix, parser_class in parsers.items():
if isinstance(parser_class, string_types):
- parser_class = import_object(parser_class, 'source parser')
+ parser_class = import_object(parser_class, 'source parser') # type: ignore
parser = parser_class()
if hasattr(parser, 'set_application'):
parser.set_application(app)
self.parser_map[suffix] = parser
def read(self, source, parser, settings):
+ # type: (Input, Parser, Dict) -> nodes.document
self.source = source
for suffix in self.parser_map:
@@ -56,8 +71,18 @@ class SphinxBaseReader(standalone.Reader):
return self.document
def get_transforms(self):
+ # type: () -> List[Transform]
return standalone.Reader.get_transforms(self) + self.transforms
+ def new_document(self):
+ # type: () -> nodes.document
+ document = standalone.Reader.new_document(self)
+ reporter = document.reporter
+ document.reporter = LoggingReporter(reporter.source, reporter.report_level,
+ reporter.halt_level, reporter.debug_flag,
+ reporter.error_handler)
+ return document
+
class SphinxStandaloneReader(SphinxBaseReader):
"""
@@ -84,17 +109,21 @@ class SphinxI18nReader(SphinxBaseReader):
FilterSystemMessages, RefOnlyBulletListTransform]
def __init__(self, *args, **kwargs):
+ # type: (Any, Any) -> None
SphinxBaseReader.__init__(self, *args, **kwargs)
- self.lineno = None
+ self.lineno = None # type: int
def set_lineno_for_reporter(self, lineno):
+ # type: (int) -> None
self.lineno = lineno
def new_document(self):
+ # type: () -> nodes.document
document = SphinxBaseReader.new_document(self)
reporter = document.reporter
def get_source_and_line(lineno=None):
+ # type: (int) -> Tuple[unicode, int]
return reporter.source, self.lineno
reporter.get_source_and_line = get_source_and_line
@@ -105,28 +134,33 @@ class SphinxDummyWriter(UnfilteredWriter):
supported = ('html',) # needed to keep "meta" nodes
def translate(self):
+ # type: () -> None
pass
class SphinxFileInput(FileInput):
def __init__(self, app, env, *args, **kwds):
+ # type: (Sphinx, BuildEnvironment, Any, Any) -> None
self.app = app
self.env = env
kwds['error_handler'] = 'sphinx' # py3: handle error on open.
FileInput.__init__(self, *args, **kwds)
def decode(self, data):
+ # type: (Union[unicode, bytes]) -> unicode
if isinstance(data, text_type): # py3: `data` already decoded.
return data
return data.decode(self.encoding, 'sphinx') # py2: decoding
def read(self):
+ # type: () -> unicode
def get_parser_type(source_path):
+ # type: (unicode) -> Tuple[unicode]
for suffix in self.env.config.source_parsers:
if source_path.endswith(suffix):
parser_class = self.env.config.source_parsers[suffix]
if isinstance(parser_class, string_types):
- parser_class = import_object(parser_class, 'source parser')
+ parser_class = import_object(parser_class, 'source parser') # type: ignore # NOQA
return parser_class.supported
else:
return ('restructuredtext',)
diff --git a/sphinx/jinja2glue.py b/sphinx/jinja2glue.py
index 3f7c7aa6d..91b3a6e3a 100644
--- a/sphinx/jinja2glue.py
+++ b/sphinx/jinja2glue.py
@@ -17,18 +17,28 @@ from jinja2 import FileSystemLoader, BaseLoader, TemplateNotFound, \
contextfunction
from jinja2.utils import open_if_exists
from jinja2.sandbox import SandboxedEnvironment
+from typing import Any, Callable, Iterator, Tuple # NOQA
from sphinx.application import TemplateBridge
from sphinx.util.osutil import mtimes_of_files
+if False:
+ # For type annotation
+ from typing import Any, Callable, Dict, List, Iterator, Tuple # NOQA
+ from jinja2.environment import Environment # NOQA
+ from sphinx.builders import Builder # NOQA
+ from sphinx.themes import Theme # NOQA
+
def _tobool(val):
+ # type: (unicode) -> bool
if isinstance(val, string_types):
return val.lower() in ('true', '1', 'yes', 'on')
return bool(val)
def _toint(val):
+ # type: (unicode) -> int
try:
return int(val)
except ValueError:
@@ -36,6 +46,7 @@ def _toint(val):
def _slice_index(values, slices):
+ # type: (List, int) -> Iterator[List]
seq = list(values)
length = 0
for value in values:
@@ -57,6 +68,7 @@ def _slice_index(values, slices):
def accesskey(context, key):
+ # type: (Any, unicode) -> unicode
"""Helper to output each access key only once."""
if '_accesskeys' not in context:
context.vars['_accesskeys'] = {}
@@ -68,12 +80,15 @@ def accesskey(context, key):
class idgen(object):
def __init__(self):
+ # type: () -> None
self.id = 0
def current(self):
+ # type: () -> int
return self.id
def __next__(self):
+ # type: () -> int
self.id += 1
return self.id
next = __next__ # Python 2/Jinja compatibility
@@ -86,6 +101,7 @@ class SphinxFileSystemLoader(FileSystemLoader):
"""
def get_source(self, environment, template):
+ # type: (Environment, unicode) -> Tuple[unicode, unicode, Callable]
for searchpath in self.searchpath:
filename = path.join(searchpath, template)
f = open_if_exists(filename)
@@ -97,6 +113,7 @@ class SphinxFileSystemLoader(FileSystemLoader):
mtime = path.getmtime(filename)
def uptodate():
+ # type: () -> bool
try:
return path.getmtime(filename) == mtime
except OSError:
@@ -113,6 +130,7 @@ class BuiltinTemplateLoader(TemplateBridge, BaseLoader):
# TemplateBridge interface
def init(self, builder, theme=None, dirs=None):
+ # type: (Builder, Theme, List[unicode]) -> None
# create a chain of paths to search
if theme:
# the theme's own dir and its bases' dirs
@@ -151,21 +169,24 @@ class BuiltinTemplateLoader(TemplateBridge, BaseLoader):
self.environment.globals['accesskey'] = contextfunction(accesskey)
self.environment.globals['idgen'] = idgen
if use_i18n:
- self.environment.install_gettext_translations(
- builder.app.translator)
+ self.environment.install_gettext_translations(builder.app.translator) # type: ignore # NOQA
- def render(self, template, context):
+ def render(self, template, context): # type: ignore
+ # type: (unicode, Dict) -> unicode
return self.environment.get_template(template).render(context)
def render_string(self, source, context):
+ # type: (unicode, Dict) -> unicode
return self.environment.from_string(source).render(context)
def newest_template_mtime(self):
+ # type: () -> float
return max(mtimes_of_files(self.pathchain, '.html'))
# Loader interface
def get_source(self, environment, template):
+ # type: (Environment, unicode) -> Tuple[unicode, unicode, Callable]
loaders = self.loaders
# exclamation mark starts search from theme
if template.startswith('!'):
diff --git a/sphinx/locale/__init__.py b/sphinx/locale/__init__.py
index d6ce7329b..7c0f9b4f9 100644
--- a/sphinx/locale/__init__.py
+++ b/sphinx/locale/__init__.py
@@ -14,6 +14,10 @@ import gettext
from six import PY3, text_type
from six.moves import UserString
+if False:
+ # For type annotation
+ from typing import Any, Callable, Dict, Iterator, List, Tuple # NOQA
+
class _TranslationProxy(UserString, object):
"""
@@ -31,24 +35,31 @@ class _TranslationProxy(UserString, object):
__slots__ = ('_func', '_args')
def __new__(cls, func, *args):
+ # type: (Callable, unicode) -> object
if not args:
# not called with "function" and "arguments", but a plain string
return text_type(func)
- return object.__new__(cls)
+ return object.__new__(cls) # type: ignore
def __getnewargs__(self):
- return (self._func,) + self._args
+ # type: () -> Tuple
+ return (self._func,) + self._args # type: ignore
def __init__(self, func, *args):
+ # type: (Callable, unicode) -> None
self._func = func
self._args = args
- data = property(lambda x: x._func(*x._args))
+ @property
+ def data(self):
+ # type: () -> unicode
+ return self._func(*self._args)
# replace function from UserString; it instantiates a self.__class__
# for the encoding result
def encode(self, encoding=None, errors=None):
+ # type: (unicode, unicode) -> str
if encoding:
if errors:
return self.data.encode(encoding, errors)
@@ -58,81 +69,106 @@ class _TranslationProxy(UserString, object):
return self.data.encode()
def __contains__(self, key):
+ # type: (Any) -> bool
return key in self.data
def __bool__(self):
+ # type: () -> bool
return bool(self.data)
__nonzero__ = __bool__ # for python2 compatibility
def __dir__(self):
+ # type: () -> List[str]
return dir(text_type)
def __iter__(self):
+ # type: () -> Iterator[unicode]
return iter(self.data)
def __len__(self):
+ # type: () -> int
return len(self.data)
def __str__(self):
+ # type: () -> str
return str(self.data)
def __unicode__(self):
+ # type: () -> unicode
return text_type(self.data)
def __add__(self, other):
+ # type: (unicode) -> unicode
return self.data + other
def __radd__(self, other):
+ # type: (unicode) -> unicode
return other + self.data
def __mod__(self, other):
+ # type: (unicode) -> unicode
return self.data % other
def __rmod__(self, other):
+ # type: (unicode) -> unicode
return other % self.data
def __mul__(self, other):
+ # type: (Any) -> unicode
return self.data * other
def __rmul__(self, other):
+ # type: (Any) -> unicode
return other * self.data
def __lt__(self, other):
+ # type: (unicode) -> bool
return self.data < other
def __le__(self, other):
+ # type: (unicode) -> bool
return self.data <= other
def __eq__(self, other):
+ # type: (Any) -> bool
return self.data == other
def __ne__(self, other):
+ # type: (Any) -> bool
return self.data != other
def __gt__(self, other):
+ # type: (unicode) -> bool
return self.data > other
def __ge__(self, other):
+ # type: (unicode) -> bool
return self.data >= other
def __getattr__(self, name):
+ # type: (unicode) -> Any
if name == '__members__':
return self.__dir__()
return getattr(self.data, name)
def __getstate__(self):
+ # type: () -> Tuple[Callable, Tuple[unicode, ...]]
return self._func, self._args
def __setstate__(self, tup):
+ # type: (Tuple[Callable, Tuple[unicode]]) -> None
self._func, self._args = tup
def __getitem__(self, key):
+ # type: (Any) -> unicode
return self.data[key]
def __copy__(self):
+ # type: () -> _TranslationProxy
return self
def __repr__(self):
+ # type: () -> str
try:
return 'i' + repr(text_type(self.data))
except:
@@ -140,6 +176,7 @@ class _TranslationProxy(UserString, object):
def mygettext(string):
+ # type: (unicode) -> unicode
"""Used instead of _ when creating TranslationProxies, because _ is
not bound yet at that time.
"""
@@ -147,10 +184,11 @@ def mygettext(string):
def lazy_gettext(string):
+ # type: (unicode) -> unicode
"""A lazy version of `gettext`."""
# if isinstance(string, _TranslationProxy):
# return string
- return _TranslationProxy(mygettext, string)
+ return _TranslationProxy(mygettext, string) # type: ignore
l_ = lazy_gettext
@@ -167,13 +205,13 @@ admonitionlabels = {
'seealso': l_('See also'),
'tip': l_('Tip'),
'warning': l_('Warning'),
-}
+} # type: Dict[unicode, unicode]
versionlabels = {
'versionadded': l_('New in version %s'),
'versionchanged': l_('Changed in version %s'),
'deprecated': l_('Deprecated since version %s'),
-}
+} # type: Dict[unicode, unicode]
# XXX Python specific
pairindextypes = {
@@ -184,19 +222,28 @@ pairindextypes = {
'exception': l_('exception'),
'statement': l_('statement'),
'builtin': l_('built-in function'),
-}
+} # Dict[unicode, _TranslationProxy]
-translators = {}
+translators = {} # type: Dict[unicode, Any]
if PY3:
def _(message):
- return translators['sphinx'].gettext(message)
+ # type: (unicode) -> unicode
+ try:
+ return translators['sphinx'].gettext(message)
+ except KeyError:
+ return message
else:
def _(message):
- return translators['sphinx'].ugettext(message)
+ # type: (unicode) -> unicode
+ try:
+ return translators['sphinx'].ugettext(message)
+ except KeyError:
+ return message
def init(locale_dirs, language, catalog='sphinx'):
+ # type: (List, unicode, unicode) -> Tuple[Any, bool]
"""Look for message catalogs in `locale_dirs` and *ensure* that there is at
least a NullTranslations catalog set in `translators`. If called multiple
times or if several ``.mo`` files are found, their contents are merged
@@ -213,12 +260,12 @@ def init(locale_dirs, language, catalog='sphinx'):
# loading
for dir_ in locale_dirs:
try:
- trans = gettext.translation(catalog, localedir=dir_,
- languages=[language])
+ trans = gettext.translation(catalog, localedir=dir_, # type: ignore
+ languages=[language]) # type: ignore
if translator is None:
translator = trans
else:
- translator._catalog.update(trans._catalog)
+ translator._catalog.update(trans._catalog) # type: ignore
except Exception:
# Language couldn't be found in the specified path
pass
@@ -230,3 +277,14 @@ def init(locale_dirs, language, catalog='sphinx'):
if hasattr(translator, 'ugettext'):
translator.gettext = translator.ugettext
return translator, has_translation
+
+
+def get_translator(catalog='sphinx'):
+ # type: (unicode) -> gettext.NullTranslations
+ global translators
+ translator = translators.get(catalog)
+ if translator is None:
+ translator = gettext.NullTranslations()
+ if hasattr(translator, 'ugettext'):
+ translator.gettext = translator.ugettext
+ return translator
diff --git a/sphinx/make_mode.py b/sphinx/make_mode.py
index bb57934d0..512dc5be3 100644
--- a/sphinx/make_mode.py
+++ b/sphinx/make_mode.py
@@ -22,9 +22,13 @@ from os import path
import sphinx
from sphinx import cmdline
-from sphinx.util.console import bold, blue
+from sphinx.util.console import bold, blue # type: ignore
from sphinx.util.osutil import cd, rmtree
+if False:
+ # For type annotation
+ from typing import List # NOQA
+
proj_name = os.getenv('SPHINXPROJ', '<project>')
@@ -59,71 +63,89 @@ BUILDERS = [
class Make(object):
def __init__(self, srcdir, builddir, opts):
+ # type: (unicode, unicode, List[unicode]) -> None
self.srcdir = srcdir
self.builddir = builddir
self.opts = opts
self.makecmd = os.environ.get('MAKE', 'make') # refer $MAKE to determine make command
def builddir_join(self, *comps):
+ # type: (unicode) -> unicode
return path.join(self.builddir, *comps)
def build_clean(self):
+ # type: () -> int
if not path.exists(self.builddir):
- return
+ return 0
elif not path.isdir(self.builddir):
print("Error: %r is not a directory!" % self.builddir)
return 1
print("Removing everything under %r..." % self.builddir)
for item in os.listdir(self.builddir):
rmtree(self.builddir_join(item))
+ return 0
def build_help(self):
+ # type: () -> None
print(bold("Sphinx v%s" % sphinx.__display_version__))
- print("Please use `make %s' where %s is one of" % ((blue('target'),) * 2))
+ print("Please use `make %s' where %s is one of" % ((blue('target'),) * 2)) # type: ignore # NOQA
for osname, bname, description in BUILDERS:
if not osname or os.name == osname:
print(' %s %s' % (blue(bname.ljust(10)), description))
def build_html(self):
+ # type: () -> int
if self.run_generic_build('html') > 0:
return 1
print()
print('Build finished. The HTML pages are in %s.' % self.builddir_join('html'))
+ return 0
def build_dirhtml(self):
+ # type: () -> int
if self.run_generic_build('dirhtml') > 0:
return 1
print()
print('Build finished. The HTML pages are in %s.' %
self.builddir_join('dirhtml'))
+ return 0
def build_singlehtml(self):
+ # type: () -> int
if self.run_generic_build('singlehtml') > 0:
return 1
print()
print('Build finished. The HTML page is in %s.' %
self.builddir_join('singlehtml'))
+ return 0
def build_pickle(self):
+ # type: () -> int
if self.run_generic_build('pickle') > 0:
return 1
print()
print('Build finished; now you can process the pickle files.')
+ return 0
def build_json(self):
+ # type: () -> int
if self.run_generic_build('json') > 0:
return 1
print()
print('Build finished; now you can process the JSON files.')
+ return 0
def build_htmlhelp(self):
+ # type: () -> int
if self.run_generic_build('htmlhelp') > 0:
return 1
print()
print('Build finished; now you can run HTML Help Workshop with the '
'.hhp project file in %s.' % self.builddir_join('htmlhelp'))
+ return 0
def build_qthelp(self):
+ # type: () -> int
if self.run_generic_build('qthelp') > 0:
return 1
print()
@@ -133,8 +155,10 @@ class Make(object):
print('To view the help file:')
print('$ assistant -collectionFile %s.qhc' %
self.builddir_join('qthelp', proj_name))
+ return 0
def build_devhelp(self):
+ # type: () -> int
if self.run_generic_build('devhelp') > 0:
return 1
print()
@@ -144,40 +168,52 @@ class Make(object):
print("$ ln -s %s $HOME/.local/share/devhelp/%s" %
(self.builddir_join('devhelp'), proj_name))
print("$ devhelp")
+ return 0
def build_epub(self):
+ # type: () -> int
if self.run_generic_build('epub') > 0:
return 1
print()
print('Build finished. The ePub file is in %s.' % self.builddir_join('epub'))
+ return 0
def build_latex(self):
+ # type: () -> int
if self.run_generic_build('latex') > 0:
return 1
print("Build finished; the LaTeX files are in %s." % self.builddir_join('latex'))
if os.name == 'posix':
print("Run `make' in that directory to run these through (pdf)latex")
print("(use `make latexpdf' here to do that automatically).")
+ return 0
def build_latexpdf(self):
+ # type: () -> int
if self.run_generic_build('latex') > 0:
return 1
with cd(self.builddir_join('latex')):
os.system('%s all-pdf' % self.makecmd)
+ return 0
def build_latexpdfja(self):
+ # type: () -> int
if self.run_generic_build('latex') > 0:
return 1
with cd(self.builddir_join('latex')):
os.system('%s all-pdf-ja' % self.makecmd)
+ return 0
def build_text(self):
+ # type: () -> int
if self.run_generic_build('text') > 0:
return 1
print()
print('Build finished. The text files are in %s.' % self.builddir_join('text'))
+ return 0
def build_texinfo(self):
+ # type: () -> int
if self.run_generic_build('texinfo') > 0:
return 1
print("Build finished; the Texinfo files are in %s." %
@@ -185,29 +221,37 @@ class Make(object):
if os.name == 'posix':
print("Run `make' in that directory to run these through makeinfo")
print("(use `make info' here to do that automatically).")
+ return 0
def build_info(self):
+ # type: () -> int
if self.run_generic_build('texinfo') > 0:
return 1
with cd(self.builddir_join('texinfo')):
os.system('%s info' % self.makecmd)
+ return 0
def build_gettext(self):
+ # type: () -> int
dtdir = self.builddir_join('gettext', '.doctrees')
if self.run_generic_build('gettext', doctreedir=dtdir) > 0:
return 1
print()
print('Build finished. The message catalogs are in %s.' %
self.builddir_join('gettext'))
+ return 0
def build_changes(self):
+ # type: () -> int
if self.run_generic_build('changes') > 0:
return 1
print()
print('Build finished. The overview file is in %s.' %
self.builddir_join('changes'))
+ return 0
def build_linkcheck(self):
+ # type: () -> int
res = self.run_generic_build('linkcheck')
print()
print('Link check complete; look for any errors in the above output '
@@ -215,38 +259,46 @@ class Make(object):
return res
def build_doctest(self):
+ # type: () -> int
res = self.run_generic_build('doctest')
print("Testing of doctests in the sources finished, look at the "
"results in %s." % self.builddir_join('doctest', 'output.txt'))
return res
def build_coverage(self):
+ # type: () -> int
if self.run_generic_build('coverage') > 0:
print("Has the coverage extension been enabled?")
return 1
print()
print("Testing of coverage in the sources finished, look at the "
"results in %s." % self.builddir_join('coverage'))
+ return 0
def build_xml(self):
+ # type: () -> int
if self.run_generic_build('xml') > 0:
return 1
print()
print('Build finished. The XML files are in %s.' % self.builddir_join('xml'))
+ return 0
def build_pseudoxml(self):
+ # type: () -> int
if self.run_generic_build('pseudoxml') > 0:
return 1
print()
print('Build finished. The pseudo-XML files are in %s.' %
self.builddir_join('pseudoxml'))
+ return 0
def run_generic_build(self, builder, doctreedir=None):
+ # type: (unicode, unicode) -> int
# compatibility with old Makefile
papersize = os.getenv('PAPER', '')
opts = self.opts
if papersize in ('a4', 'letter'):
- opts.extend(['-D', 'latex_paper_size=' + papersize])
+ opts.extend(['-D', 'latex_elements.papersize=' + papersize])
if doctreedir is None:
doctreedir = self.builddir_join('doctrees')
@@ -259,6 +311,7 @@ class Make(object):
def run_make_mode(args):
+ # type: (List[unicode]) -> int
if len(args) < 3:
print('Error: at least 3 arguments (builder, source '
'dir, build dir) are required.', file=sys.stderr)
diff --git a/sphinx/parsers.py b/sphinx/parsers.py
index b74ec6a65..9aba947d3 100644
--- a/sphinx/parsers.py
+++ b/sphinx/parsers.py
@@ -11,6 +11,10 @@
import docutils.parsers
+if False:
+ # For type annotation
+ from sphinx.application import Sphinx # NOQA
+
class Parser(docutils.parsers.Parser):
"""
@@ -33,6 +37,7 @@ class Parser(docutils.parsers.Parser):
"""
def set_application(self, app):
+ # type: (Sphinx) -> None
"""set_application will be called from Sphinx to set app and other instance variables
:param sphinx.application.Sphinx app: Sphinx application object
diff --git a/sphinx/pycode/__init__.py b/sphinx/pycode/__init__.py
index 8dbc95da8..eabcc8188 100644
--- a/sphinx/pycode/__init__.py
+++ b/sphinx/pycode/__init__.py
@@ -24,6 +24,10 @@ from sphinx.util import get_module_source, detect_encoding
from sphinx.util.pycompat import TextIOWrapper
from sphinx.util.docstrings import prepare_docstring, prepare_commentdoc
+if False:
+ # For type annotation
+ from typing import Any, Dict, List, Tuple # NOQA
+
# load the Python grammar
_grammarfile = path.join(package_dir, 'pycode',
@@ -48,7 +52,7 @@ number2name.update(token.tok_name)
_eq = nodes.Leaf(token.EQUAL, '=')
-emptyline_re = re.compile('^\s*(#.*)?$')
+emptyline_re = re.compile(r'^\s*(#.*)?$')
class AttrDocVisitor(nodes.NodeVisitor):
@@ -63,10 +67,10 @@ class AttrDocVisitor(nodes.NodeVisitor):
self.scope = scope
self.in_init = 0
self.encoding = encoding
- self.namespace = []
- self.collected = {}
+ self.namespace = [] # type: List[unicode]
+ self.collected = {} # type: Dict[Tuple[unicode, unicode], unicode]
self.tagnumber = 0
- self.tagorder = {}
+ self.tagorder = {} # type: Dict[unicode, int]
def add_tag(self, name):
name = '.'.join(self.namespace + [name])
@@ -102,10 +106,10 @@ class AttrDocVisitor(nodes.NodeVisitor):
parent = node.parent
idx = parent.children.index(node) + 1
while idx < len(parent):
- if parent[idx].type == sym.SEMI:
+ if parent[idx].type == sym.SEMI: # type: ignore
idx += 1
continue # skip over semicolon
- if parent[idx].type == sym.NEWLINE:
+ if parent[idx].type == sym.NEWLINE: # type: ignore
prefix = parent[idx].get_prefix()
if not isinstance(prefix, text_type):
prefix = prefix.decode(self.encoding)
@@ -138,8 +142,8 @@ class AttrDocVisitor(nodes.NodeVisitor):
prev = node.get_prev_sibling()
if not prev:
return
- if prev.type == sym.simple_stmt and \
- prev[0].type == sym.expr_stmt and _eq in prev[0].children:
+ if (prev.type == sym.simple_stmt and # type: ignore
+ prev[0].type == sym.expr_stmt and _eq in prev[0].children): # type: ignore
# need to "eval" the string because it's returned in its
# original form
docstring = literals.evalString(node[0].value, self.encoding)
@@ -178,7 +182,7 @@ class AttrDocVisitor(nodes.NodeVisitor):
class ModuleAnalyzer(object):
# cache for analyzer objects -- caches both by module and file name
- cache = {}
+ cache = {} # type: Dict[Tuple[unicode, unicode], Any]
@classmethod
def for_string(cls, string, modname, srcname='<string>'):
@@ -240,14 +244,14 @@ class ModuleAnalyzer(object):
self.source.seek(pos)
# will be filled by tokenize()
- self.tokens = None
+ self.tokens = None # type: List[unicode]
# will be filled by parse()
- self.parsetree = None
+ self.parsetree = None # type: Any
# will be filled by find_attr_docs()
- self.attr_docs = None
- self.tagorder = None
+ self.attr_docs = None # type: List[unicode]
+ self.tagorder = None # type: Dict[unicode, int]
# will be filled by find_tags()
- self.tags = None
+ self.tags = None # type: List[unicode]
def tokenize(self):
"""Generate tokens from the source."""
@@ -289,9 +293,10 @@ class ModuleAnalyzer(object):
return self.tags
self.tokenize()
result = {}
- namespace = []
- stack = []
+ namespace = [] # type: List[unicode]
+ stack = [] # type: List[Tuple[unicode, unicode, unicode, int]]
indent = 0
+ decopos = None
defline = False
expect_indent = False
emptylines = 0
@@ -301,7 +306,7 @@ class ModuleAnalyzer(object):
if tokentup[0] not in ignore:
yield tokentup
tokeniter = tokeniter()
- for type, tok, spos, epos, line in tokeniter:
+ for type, tok, spos, epos, line in tokeniter: # type: ignore
if expect_indent and type != token.NL:
if type != token.INDENT:
# no suite -- one-line definition
@@ -312,11 +317,15 @@ class ModuleAnalyzer(object):
result[fullname] = (dtype, startline, endline - emptylines)
expect_indent = False
if tok in ('def', 'class'):
- name = next(tokeniter)[1]
+ name = next(tokeniter)[1] # type: ignore
namespace.append(name)
fullname = '.'.join(namespace)
- stack.append((tok, fullname, spos[0], indent))
+ stack.append((tok, fullname, decopos or spos[0], indent))
defline = True
+ decopos = None
+ elif type == token.OP and tok == '@':
+ if decopos is None:
+ decopos = spos[0]
elif type == token.INDENT:
expect_indent = False
indent += 1
diff --git a/sphinx/pycode/nodes.py b/sphinx/pycode/nodes.py
index 2dae4f6ee..cecde9bd0 100644
--- a/sphinx/pycode/nodes.py
+++ b/sphinx/pycode/nodes.py
@@ -9,12 +9,16 @@
:license: BSD, see LICENSE for details.
"""
+if False:
+ # For type annotation
+ from typing import Callable # NOQA
+
class BaseNode(object):
"""
Node superclass for both terminal and nonterminal nodes.
"""
- parent = None
+ parent = None # type: BaseNode
def _eq(self, other):
raise NotImplementedError
@@ -29,7 +33,7 @@ class BaseNode(object):
return NotImplemented
return not self._eq(other)
- __hash__ = None
+ __hash__ = None # type: Callable[[object], int]
def get_prev_sibling(self):
"""Return previous child in parent's children, or None."""
@@ -204,5 +208,5 @@ class NodeVisitor(object):
def generic_visit(self, node):
"""Called if no explicit visitor function exists for a node."""
if isinstance(node, Node):
- for child in node:
+ for child in node: # type: ignore
self.visit(child)
diff --git a/sphinx/pycode/pgen2/grammar.py b/sphinx/pycode/pgen2/grammar.py
index 42e6d72ee..ac276776e 100644
--- a/sphinx/pycode/pgen2/grammar.py
+++ b/sphinx/pycode/pgen2/grammar.py
@@ -19,6 +19,10 @@ import pickle
# Local imports
from sphinx.pycode.pgen2 import token
+if False:
+ # For type annotation
+ from typing import Dict, List, Tuple # NOQA
+
class Grammar(object):
"""Pgen parsing tables tables conversion class.
@@ -75,14 +79,14 @@ class Grammar(object):
"""
def __init__(self):
- self.symbol2number = {}
- self.number2symbol = {}
- self.states = []
- self.dfas = {}
+ self.symbol2number = {} # type: Dict[unicode, int]
+ self.number2symbol = {} # type: Dict[int, unicode]
+ self.states = [] # type: List[List[List[Tuple[int, int]]]]
+ self.dfas = {} # type: Dict[int, Tuple[List[List[Tuple[int, int]]], unicode]]
self.labels = [(0, "EMPTY")]
- self.keywords = {}
- self.tokens = {}
- self.symbol2label = {}
+ self.keywords = {} # type: Dict[unicode, unicode]
+ self.tokens = {} # type: Dict[unicode, unicode]
+ self.symbol2label = {} # type: Dict[unicode, unicode]
self.start = 256
def dump(self, filename):
diff --git a/sphinx/pycode/pgen2/parse.py b/sphinx/pycode/pgen2/parse.py
index 60eec05ea..660a47e68 100644
--- a/sphinx/pycode/pgen2/parse.py
+++ b/sphinx/pycode/pgen2/parse.py
@@ -13,6 +13,10 @@ how this parsing engine works.
# Local imports
from sphinx.pycode.pgen2 import token
+if False:
+ # For type annotation
+ from typing import Any, List, Set, Tuple # NOQA
+
class ParseError(Exception):
"""Exception to signal the parser is stuck."""
@@ -104,11 +108,12 @@ class Parser(object):
# Each stack entry is a tuple: (dfa, state, node).
# A node is a tuple: (type, value, context, children),
# where children is a list of nodes or None, and context may be None.
- newnode = (start, None, None, [])
+ newnode = (start, None, None, []) # type: Tuple[unicode, unicode, unicode, List]
stackentry = (self.grammar.dfas[start], 0, newnode)
self.stack = [stackentry]
- self.rootnode = None
- self.used_names = set() # Aliased to self.rootnode.used_names in pop()
+ self.rootnode = None # type: Any
+ self.used_names = set() # type: Set[unicode]
+ # Aliased to self.rootnode.used_names in pop()
def addtoken(self, type, value, context):
"""Add a token; return True iff this is the end of the program."""
@@ -175,7 +180,7 @@ class Parser(object):
def shift(self, type, value, newstate, context):
"""Shift a token. (Internal)"""
dfa, state, node = self.stack[-1]
- newnode = (type, value, context, None)
+ newnode = (type, value, context, None) # type: Tuple[unicode, unicode, unicode, List]
newnode = self.convert(self.grammar, newnode)
if newnode is not None:
node[-1].append(newnode)
@@ -184,7 +189,7 @@ class Parser(object):
def push(self, type, newdfa, newstate, context):
"""Push a nonterminal. (Internal)"""
dfa, state, node = self.stack[-1]
- newnode = (type, None, context, [])
+ newnode = (type, None, context, []) # type: Tuple[unicode, unicode, unicode, List]
self.stack[-1] = (dfa, newstate, node)
self.stack.append((newdfa, 0, newnode))
diff --git a/sphinx/pycode/pgen2/pgen.py b/sphinx/pycode/pgen2/pgen.py
index 7598e6abc..8d9cc786a 100644
--- a/sphinx/pycode/pgen2/pgen.py
+++ b/sphinx/pycode/pgen2/pgen.py
@@ -7,9 +7,13 @@ from six import iteritems
from collections import OrderedDict
# Pgen imports
-
from sphinx.pycode.pgen2 import grammar, token, tokenize
+if False:
+ # For type annotation
+ from typing import Any, Dict, List, Tuple # NOQA
+
+
class PgenGrammar(grammar.Grammar):
pass
@@ -27,7 +31,8 @@ class ParserGenerator(object):
self.dfas, self.startsymbol = self.parse()
if close_stream is not None:
close_stream()
- self.first = {} # map from symbol name to set of tokens
+ self.first = {} # type: Dict[unicode, List[unicode]]
+ # map from symbol name to set of tokens
self.addfirstsets()
def make_grammar(self):
@@ -42,7 +47,7 @@ class ParserGenerator(object):
c.number2symbol[i] = name
for name in names:
dfa = self.dfas[name]
- states = []
+ states = [] # type: List[List[Tuple[int, int]]]
for state in dfa:
arcs = []
for label, next in iteritems(state.arcs):
@@ -122,7 +127,7 @@ class ParserGenerator(object):
dfa = self.dfas[name]
self.first[name] = None # dummy to detect left recursion
state = dfa[0]
- totalset = {}
+ totalset = {} # type: Dict[unicode, int]
overlapcheck = {}
for label, next in iteritems(state.arcs):
if label in self.dfas:
@@ -138,7 +143,7 @@ class ParserGenerator(object):
else:
totalset[label] = 1
overlapcheck[label] = {label: 1}
- inverse = {}
+ inverse = {} # type: Dict[unicode, unicode]
for label, itsfirst in sorted(overlapcheck.items()):
for symbol in sorted(itsfirst):
if symbol in inverse:
@@ -180,7 +185,7 @@ class ParserGenerator(object):
assert isinstance(start, NFAState)
assert isinstance(finish, NFAState)
def closure(state):
- base = {}
+ base = {} # type: Dict
addclosure(state, base)
return base
def addclosure(state, base):
@@ -193,7 +198,7 @@ class ParserGenerator(object):
addclosure(next, base)
states = [DFAState(closure(start), finish)]
for state in states: # NB states grows while we're iterating
- arcs = {}
+ arcs = {} # type: Dict[unicode, Dict]
for nfastate in state.nfaset:
for label, next in nfastate.arcs:
if label is not None:
@@ -343,7 +348,8 @@ class ParserGenerator(object):
class NFAState(object):
def __init__(self):
- self.arcs = [] # list of (label, NFAState) pairs
+ self.arcs = [] # type: List[Tuple[unicode, Any]]
+ # list of (label, NFAState) pairs
def addarc(self, next, label=None):
assert label is None or isinstance(label, str)
@@ -361,7 +367,8 @@ class DFAState(object):
assert isinstance(final, NFAState)
self.nfaset = nfaset
self.isfinal = final in nfaset
- self.arcs = OrderedDict() # map from label to DFAState
+ self.arcs = OrderedDict() # type: OrderedDict
+ # map from label to DFAState
def __hash__(self):
return hash(tuple(self.arcs))
diff --git a/sphinx/pycode/pgen2/tokenize.py b/sphinx/pycode/pgen2/tokenize.py
index c7013bf91..8b533d422 100644
--- a/sphinx/pycode/pgen2/tokenize.py
+++ b/sphinx/pycode/pgen2/tokenize.py
@@ -37,6 +37,10 @@ from six import PY3
from sphinx.pycode.pgen2.token import *
from sphinx.pycode.pgen2 import token
+if False:
+ # For type annotation
+ from typing import List # NOQA
+
__all__ = [x for x in dir(token) if x[0] != '_'] + ["tokenize",
"generate_tokens", "untokenize"]
del token
@@ -183,7 +187,7 @@ def tokenize_loop(readline, tokeneater):
class Untokenizer:
def __init__(self):
- self.tokens = []
+ self.tokens = [] # type: List[unicode]
self.prev_row = 1
self.prev_col = 0
@@ -294,17 +298,17 @@ def generate_tokens(readline):
if contstr: # continued string
if not line:
- raise TokenError("EOF in multi-line string", strstart)
- endmatch = endprog.match(line)
+ raise TokenError("EOF in multi-line string", strstart) # type: ignore
+ endmatch = endprog.match(line) # type: ignore
if endmatch:
pos = end = endmatch.end(0)
yield (STRING, contstr + line[:end],
- strstart, (lnum, end), contline + line)
+ strstart, (lnum, end), contline + line) # type: ignore
contstr, needcont = '', 0
contline = None
elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n':
yield (ERRORTOKEN, contstr + line,
- strstart, (lnum, len(line)), contline)
+ strstart, (lnum, len(line)), contline) # type: ignore
contstr = ''
contline = None
continue
@@ -333,7 +337,7 @@ def generate_tokens(readline):
yield (NL, line[nl_pos:],
(lnum, nl_pos), (lnum, len(line)), line)
else:
- yield ((NL, COMMENT)[line[pos] == '#'], line[pos:],
+ yield ((NL, COMMENT)[line[pos] == '#'], line[pos:], # type: ignore
(lnum, pos), (lnum, len(line)), line)
continue
diff --git a/sphinx/quickstart.py b/sphinx/quickstart.py
index aeac1fb72..50d200e60 100644
--- a/sphinx/quickstart.py
+++ b/sphinx/quickstart.py
@@ -36,11 +36,16 @@ from docutils.utils import column_width
from sphinx import __display_version__, package_dir
from sphinx.util.osutil import make_filename
-from sphinx.util.console import purple, bold, red, turquoise, \
- nocolor, color_terminal
+from sphinx.util.console import ( # type: ignore
+ purple, bold, red, turquoise, nocolor, color_terminal
+)
from sphinx.util.template import SphinxRenderer
from sphinx.util import texescape
+if False:
+ # For type annotation
+ from typing import Any, Callable, Dict, List, Pattern # NOQA
+
TERM_ENCODING = getattr(sys.stdin, 'encoding', None)
DEFAULT_VALUE = {
@@ -65,6 +70,7 @@ PROMPT_PREFIX = '> '
def mkdir_p(dir):
+ # type: (unicode) -> None
if path.isdir(dir):
return
os.makedirs(dir)
@@ -72,6 +78,7 @@ def mkdir_p(dir):
# function to get input from terminal -- overridden by the test suite
def term_input(prompt):
+ # type: (unicode) -> unicode
print(prompt, end='')
return input('')
@@ -81,6 +88,7 @@ class ValidationError(Exception):
def is_path(x):
+ # type: (unicode) -> unicode
x = path.expanduser(x)
if path.exists(x) and not path.isdir(x):
raise ValidationError("Please enter a valid path name.")
@@ -88,17 +96,21 @@ def is_path(x):
def allow_empty(x):
+ # type: (unicode) -> unicode
return x
def nonempty(x):
+ # type: (unicode) -> unicode
if not x:
raise ValidationError("Please enter some text.")
return x
def choice(*l):
+ # type: (unicode) -> Callable[[unicode], unicode]
def val(x):
+ # type: (unicode) -> unicode
if x not in l:
raise ValidationError('Please enter one of %s.' % ', '.join(l))
return x
@@ -106,12 +118,14 @@ def choice(*l):
def boolean(x):
+ # type: (unicode) -> bool
if x.upper() not in ('Y', 'YES', 'N', 'NO'):
raise ValidationError("Please enter either 'y' or 'n'.")
return x.upper() in ('Y', 'YES')
def suffix(x):
+ # type: (unicode) -> unicode
if not (x[0:1] == '.' and len(x) > 1):
raise ValidationError("Please enter a file suffix, "
"e.g. '.rst' or '.txt'.")
@@ -119,10 +133,12 @@ def suffix(x):
def ok(x):
+ # type: (unicode) -> unicode
return x
def term_decode(text):
+ # type: (unicode) -> unicode
if isinstance(text, text_type):
return text
@@ -144,9 +160,10 @@ def term_decode(text):
def do_prompt(d, key, text, default=None, validator=nonempty):
+ # type: (Dict, unicode, unicode, unicode, Callable[[unicode], Any]) -> None
while True:
if default is not None:
- prompt = PROMPT_PREFIX + '%s [%s]: ' % (text, default)
+ prompt = PROMPT_PREFIX + '%s [%s]: ' % (text, default) # type: unicode
else:
prompt = PROMPT_PREFIX + text + ': '
if PY2:
@@ -178,6 +195,7 @@ def do_prompt(d, key, text, default=None, validator=nonempty):
def convert_python_source(source, rex=re.compile(r"[uU]('.*?')")):
+ # type: (unicode, Pattern) -> unicode
# remove Unicode literal prefixes
if PY3:
return rex.sub('\\1', source)
@@ -187,10 +205,12 @@ def convert_python_source(source, rex=re.compile(r"[uU]('.*?')")):
class QuickstartRenderer(SphinxRenderer):
def __init__(self, templatedir):
+ # type: (unicode) -> None
self.templatedir = templatedir or ''
super(QuickstartRenderer, self).__init__()
def render(self, template_name, context):
+ # type: (unicode, Dict) -> unicode
user_template = path.join(self.templatedir, path.basename(template_name))
if self.templatedir and path.exists(user_template):
return self.render_from_file(user_template, context)
@@ -199,6 +219,7 @@ class QuickstartRenderer(SphinxRenderer):
def ask_user(d):
+ # type: (Dict) -> None
"""Ask the user for quickstart values missing from *d*.
Values are:
@@ -374,6 +395,7 @@ directly.''')
def generate(d, overwrite=True, silent=False, templatedir=None):
+ # type: (Dict, bool, bool, unicode) -> None
"""Generate project based on values in *d*."""
template = QuickstartRenderer(templatedir=templatedir)
@@ -431,6 +453,7 @@ def generate(d, overwrite=True, silent=False, templatedir=None):
mkdir_p(path.join(srcdir, d['dot'] + 'static'))
def write_file(fpath, content, newline=None):
+ # type: (unicode, unicode, unicode) -> None
if overwrite or not path.isfile(fpath):
print('Creating file %s.' % fpath)
with open(fpath, 'wt', encoding='utf-8', newline=newline) as f:
@@ -487,6 +510,7 @@ where "builder" is one of the supported builders, e.g. html, latex or linkcheck.
def usage(argv, msg=None):
+ # type: (List[unicode], unicode) -> None
if msg:
print(msg, file=sys.stderr)
print(file=sys.stderr)
@@ -503,6 +527,7 @@ For more information, visit <http://sphinx-doc.org/>.
def valid_dir(d):
+ # type: (Dict) -> bool
dir = d['path']
if not path.exists(dir):
return True
@@ -533,6 +558,7 @@ def valid_dir(d):
class MyFormatter(optparse.IndentedHelpFormatter):
def format_usage(self, usage):
+ # type: (str) -> str
return usage
def format_help(self, formatter):
@@ -545,6 +571,7 @@ class MyFormatter(optparse.IndentedHelpFormatter):
def main(argv=sys.argv):
+ # type: (List[str]) -> int
if not color_terminal():
nocolor()
@@ -681,6 +708,7 @@ def main(argv=sys.argv):
print('Invalid template variable: %s' % variable)
generate(d, templatedir=opts.templatedir)
+ return 0
if __name__ == '__main__':
diff --git a/sphinx/roles.py b/sphinx/roles.py
index 333cbbed7..7b5880873 100644
--- a/sphinx/roles.py
+++ b/sphinx/roles.py
@@ -21,6 +21,13 @@ from sphinx.util import ws_re
from sphinx.util.nodes import split_explicit_title, process_index_entry, \
set_role_source_info
+if False:
+ # For type annotation
+ from typing import Any, Dict, List, Tuple, Type # NOQA
+ from docutils.parsers.rst.states import Inliner # NOQA
+ from sphinx.application import Sphinx # NOQA
+ from sphinx.environment import BuildEnvironment # NOQA
+
generic_docroles = {
'command': addnodes.literal_strong,
@@ -67,6 +74,7 @@ class XRefRole(object):
def __init__(self, fix_parens=False, lowercase=False,
nodeclass=None, innernodeclass=None, warn_dangling=False):
+ # type: (bool, bool, Type[nodes.Node], Type[nodes.Node], bool) -> None
self.fix_parens = fix_parens
self.lowercase = lowercase
self.warn_dangling = warn_dangling
@@ -76,6 +84,7 @@ class XRefRole(object):
self.innernodeclass = innernodeclass
def _fix_parens(self, env, has_explicit_title, title, target):
+ # type: (BuildEnvironment, bool, unicode, unicode) -> Tuple[unicode, unicode]
if not has_explicit_title:
if title.endswith('()'):
# remove parentheses
@@ -90,6 +99,7 @@ class XRefRole(object):
def __call__(self, typ, rawtext, text, lineno, inliner,
options={}, content=[]):
+ # type: (unicode, unicode, unicode, int, Inliner, Dict, List[unicode]) -> Tuple[List[nodes.Node], List[nodes.Node]] # NOQA
env = inliner.document.settings.env
if not typ:
typ = env.temp_data.get('default_role')
@@ -100,7 +110,7 @@ class XRefRole(object):
else:
typ = typ.lower()
if ':' not in typ:
- domain, role = '', typ
+ domain, role = '', typ # type: unicode, unicode
classes = ['xref', role]
else:
domain, role = typ.split(':', 1)
@@ -127,7 +137,7 @@ class XRefRole(object):
refnode = self.nodeclass(rawtext, reftype=role, refdomain=domain,
refexplicit=has_explicit_title)
# we may need the line number for warnings
- set_role_source_info(inliner, lineno, refnode)
+ set_role_source_info(inliner, lineno, refnode) # type: ignore
title, target = self.process_link(
env, refnode, has_explicit_title, title, target)
# now that the target and title are finally determined, set them
@@ -142,6 +152,7 @@ class XRefRole(object):
# methods that can be overwritten
def process_link(self, env, refnode, has_explicit_title, title, target):
+ # type: (BuildEnvironment, nodes.reference, bool, unicode, unicode) -> Tuple[unicode, unicode] # NOQA
"""Called after parsing title and target text, and creating the
reference node (given in *refnode*). This method can alter the
reference node and must return a new (or the same) ``(title, target)``
@@ -150,6 +161,7 @@ class XRefRole(object):
return title, ws_re.sub(' ', target)
def result_nodes(self, document, env, node, is_ref):
+ # type: (nodes.document, BuildEnvironment, nodes.Node, bool) -> Tuple[List[nodes.Node], List[nodes.Node]] # NOQA
"""Called before returning the finished nodes. *node* is the reference
node if one was created (*is_ref* is then true), else the content node.
This method can add other nodes and must return a ``(nodes, messages)``
@@ -160,6 +172,7 @@ class XRefRole(object):
class AnyXRefRole(XRefRole):
def process_link(self, env, refnode, has_explicit_title, title, target):
+ # type: (BuildEnvironment, nodes.reference, bool, unicode, unicode) -> Tuple[unicode, unicode] # NOQA
result = XRefRole.process_link(self, env, refnode, has_explicit_title,
title, target)
# add all possible context info (i.e. std:program, py:module etc.)
@@ -167,8 +180,8 @@ class AnyXRefRole(XRefRole):
return result
-def indexmarkup_role(typ, rawtext, text, lineno, inliner,
- options={}, content=[]):
+def indexmarkup_role(typ, rawtext, text, lineno, inliner, options={}, content=[]):
+ # type: (unicode, unicode, unicode, int, Inliner, Dict, List[unicode]) -> Tuple[List[nodes.Node], List[nodes.Node]] # NOQA
"""Role for PEP/RFC references that generate an index entry."""
env = inliner.document.settings.env
if not typ:
@@ -186,7 +199,7 @@ def indexmarkup_role(typ, rawtext, text, lineno, inliner,
indexnode['entries'] = [
('single', _('Python Enhancement Proposals; PEP %s') % target,
targetid, '', None)]
- anchor = ''
+ anchor = '' # type: unicode
anchorindex = target.find('#')
if anchorindex > 0:
target, anchor = target[:anchorindex], target[anchorindex:]
@@ -227,16 +240,19 @@ def indexmarkup_role(typ, rawtext, text, lineno, inliner,
classes=[typ])
rn += sn
return [indexnode, targetnode, rn], []
+ else:
+ raise ValueError('unknown role type: %s' % typ)
_amp_re = re.compile(r'(?<!&)&(?![&\s])')
def menusel_role(typ, rawtext, text, lineno, inliner, options={}, content=[]):
+ # type: (unicode, unicode, unicode, int, Inliner, Dict, List[unicode]) -> Tuple[List[nodes.Node], List[nodes.Node]] # NOQA
text = utils.unescape(text)
if typ == 'menuselection':
text = text.replace('-->', u'\N{TRIANGULAR BULLET}')
- spans = _amp_re.split(text)
+ spans = _amp_re.split(text) # type: ignore
node = nodes.inline(rawtext=rawtext)
for i, span in enumerate(spans):
@@ -263,10 +279,11 @@ _litvar_re = re.compile('{([^}]+)}')
def emph_literal_role(typ, rawtext, text, lineno, inliner,
options={}, content=[]):
+ # type: (unicode, unicode, unicode, int, Inliner, Dict, List[unicode]) -> Tuple[List[nodes.Node], List[nodes.Node]] # NOQA
text = utils.unescape(text)
pos = 0
retnode = nodes.literal(role=typ.lower(), classes=[typ])
- for m in _litvar_re.finditer(text):
+ for m in _litvar_re.finditer(text): # type: ignore
if m.start() > pos:
txt = text[pos:m.start()]
retnode += nodes.Text(txt, txt)
@@ -277,12 +294,13 @@ def emph_literal_role(typ, rawtext, text, lineno, inliner,
return [retnode], []
-_abbr_re = re.compile('\((.*)\)$', re.S)
+_abbr_re = re.compile(r'\((.*)\)$', re.S)
def abbr_role(typ, rawtext, text, lineno, inliner, options={}, content=[]):
+ # type: (unicode, unicode, unicode, int, Inliner, Dict, List[unicode]) -> Tuple[List[nodes.Node], List[nodes.Node]] # NOQA
text = utils.unescape(text)
- m = _abbr_re.search(text)
+ m = _abbr_re.search(text) # type: ignore
if m is None:
return [addnodes.abbreviation(text, text, **options)], []
abbr = text[:m.start()].strip()
@@ -293,6 +311,7 @@ def abbr_role(typ, rawtext, text, lineno, inliner, options={}, content=[]):
def index_role(typ, rawtext, text, lineno, inliner, options={}, content=[]):
+ # type: (unicode, unicode, unicode, int, Inliner, Dict, List[unicode]) -> Tuple[List[nodes.Node], List[nodes.Node]] # NOQA
# create new reference target
env = inliner.document.settings.env
targetid = 'index-%s' % env.new_serialno('index')
@@ -315,7 +334,7 @@ def index_role(typ, rawtext, text, lineno, inliner, options={}, content=[]):
entries = [('single', target, targetid, main, None)]
indexnode = addnodes.index()
indexnode['entries'] = entries
- set_role_source_info(inliner, lineno, indexnode)
+ set_role_source_info(inliner, lineno, indexnode) # type: ignore
textnode = nodes.Text(title, title)
return [indexnode, targetnode, textnode], []
@@ -323,8 +342,6 @@ def index_role(typ, rawtext, text, lineno, inliner, options={}, content=[]):
specific_docroles = {
# links to download references
'download': XRefRole(nodeclass=addnodes.download_reference),
- # links to documents
- 'doc': XRefRole(warn_dangling=True, innernodeclass=nodes.inline),
# links to anything
'any': AnyXRefRole(warn_dangling=True),
@@ -340,6 +357,7 @@ specific_docroles = {
def setup(app):
+ # type: (Sphinx) -> Dict[unicode, Any]
from docutils.parsers.rst import roles
for rolename, nodeclass in iteritems(generic_docroles):
diff --git a/sphinx/search/__init__.py b/sphinx/search/__init__.py
index 9289bc909..ef74d3b71 100644
--- a/sphinx/search/__init__.py
+++ b/sphinx/search/__init__.py
@@ -9,17 +9,25 @@
:license: BSD, see LICENSE for details.
"""
import re
+from os import path
from six import iteritems, itervalues, text_type, string_types
from six.moves import cPickle as pickle
+
from docutils.nodes import raw, comment, title, Text, NodeVisitor, SkipNode
-from os import path
import sphinx
from sphinx.util import jsdump, rpartition
from sphinx.util.pycompat import htmlescape
from sphinx.search.jssplitter import splitter_code
+if False:
+ # For type annotation
+ from typing import Any, Dict, IO, Iterable, List, Tuple, Type, Set # NOQA
+ from docutils import nodes # NOQA
+ from sphinx.environment import BuildEnvironment # NOQA
+
+
class SearchLanguage(object):
"""
This class is the base class for search natural language preprocessors. If
@@ -42,10 +50,10 @@ class SearchLanguage(object):
This class is used to preprocess search word which Sphinx HTML readers
type, before searching index. Default implementation does nothing.
"""
- lang = None
- language_name = None
- stopwords = set()
- js_stemmer_rawcode = None
+ lang = None # type: unicode
+ language_name = None # type: unicode
+ stopwords = set() # type: Set[unicode]
+ js_stemmer_rawcode = None # type: unicode
js_stemmer_code = """
/**
* Dummy stemmer for languages without stemming rules.
@@ -55,28 +63,32 @@ var Stemmer = function() {
return w;
}
}
-"""
+""" # type: unicode
_word_re = re.compile(r'\w+(?u)')
def __init__(self, options):
+ # type: (Dict) -> None
self.options = options
self.init(options)
def init(self, options):
+ # type: (Dict) -> None
"""
Initialize the class with the options the user has given.
"""
def split(self, input):
+ # type: (unicode) -> List[unicode]
"""
This method splits a sentence into words. Default splitter splits input
at white spaces, which should be enough for most languages except CJK
languages.
"""
- return self._word_re.findall(input)
+ return self._word_re.findall(input) # type: ignore
def stem(self, word):
+ # type: (unicode) -> unicode
"""
This method implements stemming algorithm of the Python version.
@@ -90,6 +102,7 @@ var Stemmer = function() {
return word
def word_filter(self, word):
+ # type: (unicode) -> bool
"""
Return true if the target word should be registered in the search index.
This method is called after stemming.
@@ -107,6 +120,7 @@ from sphinx.search.en import SearchEnglish
def parse_stop_word(source):
+ # type: (unicode) -> Set[unicode]
"""
parse snowball style word list like this:
@@ -138,7 +152,7 @@ languages = {
'sv': 'sphinx.search.sv.SearchSwedish',
'tr': 'sphinx.search.tr.SearchTurkish',
'zh': 'sphinx.search.zh.SearchChinese',
-}
+} # type: Dict[unicode, Any]
class _JavaScriptIndex(object):
@@ -151,9 +165,11 @@ class _JavaScriptIndex(object):
SUFFIX = ')'
def dumps(self, data):
+ # type: (Any) -> unicode
return self.PREFIX + jsdump.dumps(data) + self.SUFFIX
def loads(self, s):
+ # type: (str) -> Any
data = s[len(self.PREFIX):-len(self.SUFFIX)]
if not data or not s.startswith(self.PREFIX) or not \
s.endswith(self.SUFFIX):
@@ -161,9 +177,11 @@ class _JavaScriptIndex(object):
return jsdump.loads(data)
def dump(self, data, f):
+ # type: (Any, IO) -> None
f.write(self.dumps(data))
def load(self, f):
+ # type: (IO) -> Any
return self.loads(f.read())
@@ -176,12 +194,14 @@ class WordCollector(NodeVisitor):
"""
def __init__(self, document, lang):
+ # type: (nodes.Node, SearchLanguage) -> None
NodeVisitor.__init__(self, document)
- self.found_words = []
- self.found_title_words = []
+ self.found_words = [] # type: List[unicode]
+ self.found_title_words = [] # type: List[unicode]
self.lang = lang
def is_meta_keywords(self, node, nodetype):
+ # type: (nodes.Node, Type) -> bool
if isinstance(node, sphinx.addnodes.meta) and node.get('name') == 'keywords':
meta_lang = node.get('lang')
if meta_lang is None: # lang not specified
@@ -192,6 +212,7 @@ class WordCollector(NodeVisitor):
return False
def dispatch_visit(self, node):
+ # type: (nodes.Node) -> None
nodetype = type(node)
if issubclass(nodetype, comment):
raise SkipNode
@@ -223,28 +244,29 @@ class IndexBuilder(object):
formats = {
'jsdump': jsdump,
'pickle': pickle
- }
+ } # type: Dict[unicode, Any]
def __init__(self, env, lang, options, scoring):
+ # type: (BuildEnvironment, unicode, Dict, unicode) -> None
self.env = env
- # docname -> title
- self._titles = {}
- # docname -> filename
- self._filenames = {}
- # stemmed word -> set(docname)
- self._mapping = {}
- # stemmed words in titles -> set(docname)
- self._title_mapping = {}
- # word -> stemmed word
- self._stem_cache = {}
- # objtype -> index
- self._objtypes = {}
- # objtype index -> (domain, type, objname (localized))
- self._objnames = {}
- # add language-specific SearchLanguage instance
- lang_class = languages.get(lang)
+ self._titles = {} # type: Dict[unicode, unicode]
+ # docname -> title
+ self._filenames = {} # type: Dict[unicode, unicode]
+ # docname -> filename
+ self._mapping = {} # type: Dict[unicode, Set[unicode]]
+ # stemmed word -> set(docname)
+ self._title_mapping = {} # type: Dict[unicode, Set[unicode]]
+ # stemmed words in titles -> set(docname)
+ self._stem_cache = {} # type: Dict[unicode, unicode]
+ # word -> stemmed word
+ self._objtypes = {} # type: Dict[Tuple[unicode, unicode], int]
+ # objtype -> index
+ self._objnames = {} # type: Dict[int, Tuple[unicode, unicode, unicode]]
+ # objtype index -> (domain, type, objname (localized))
+ lang_class = languages.get(lang) # type: Type[SearchLanguage]
+ # add language-specific SearchLanguage instance
if lang_class is None:
- self.lang = SearchEnglish(options)
+ self.lang = SearchEnglish(options) # type: SearchLanguage
elif isinstance(lang_class, str):
module, classname = lang_class.rsplit('.', 1)
lang_class = getattr(__import__(module, None, None, [classname]),
@@ -262,6 +284,7 @@ class IndexBuilder(object):
self.js_splitter_code = splitter_code
def load(self, stream, format):
+ # type: (IO, Any) -> None
"""Reconstruct from frozen data."""
if isinstance(format, string_types):
format = self.formats[format]
@@ -275,6 +298,7 @@ class IndexBuilder(object):
self._titles = dict(zip(index2fn, frozen['titles']))
def load_terms(mapping):
+ # type: (Dict[unicode, Any]) -> Dict[unicode, Set[unicode]]
rv = {}
for k, v in iteritems(mapping):
if isinstance(v, int):
@@ -288,13 +312,15 @@ class IndexBuilder(object):
# no need to load keywords/objtypes
def dump(self, stream, format):
+ # type: (IO, Any) -> None
"""Dump the frozen index to a stream."""
if isinstance(format, string_types):
format = self.formats[format]
format.dump(self.freeze(), stream)
def get_objects(self, fn2index):
- rv = {}
+ # type: (Dict[unicode, int]) -> Dict[unicode, Dict[unicode, Tuple[int, int, int, unicode]]] # NOQA
+ rv = {} # type: Dict[unicode, Dict[unicode, Tuple[int, int, int, unicode]]]
otypes = self._objtypes
onames = self._objnames
for domainname, domain in sorted(iteritems(self.env.domains)):
@@ -321,7 +347,7 @@ class IndexBuilder(object):
else:
onames[typeindex] = (domainname, type, type)
if anchor == fullname:
- shortanchor = ''
+ shortanchor = '' # type: unicode
elif anchor == type + '-' + fullname:
shortanchor = '-'
else:
@@ -330,7 +356,8 @@ class IndexBuilder(object):
return rv
def get_terms(self, fn2index):
- rvs = {}, {}
+ # type: (Dict) -> Tuple[Dict[unicode, List[unicode]], Dict[unicode, List[unicode]]]
+ rvs = {}, {} # type: Tuple[Dict[unicode, List[unicode]], Dict[unicode, List[unicode]]]
for rv, mapping in zip(rvs, (self._mapping, self._title_mapping)):
for k, v in iteritems(mapping):
if len(v) == 1:
@@ -342,6 +369,7 @@ class IndexBuilder(object):
return rvs
def freeze(self):
+ # type: () -> Dict[unicode, Any]
"""Create a usable data structure for serializing."""
docnames, titles = zip(*sorted(self._titles.items()))
filenames = [self._filenames.get(docname) for docname in docnames]
@@ -357,9 +385,11 @@ class IndexBuilder(object):
titleterms=title_terms, envversion=self.env.version)
def label(self):
+ # type: () -> unicode
return "%s (code: %s)" % (self.lang.language_name, self.lang.lang)
def prune(self, docnames):
+ # type: (Iterable[unicode]) -> None
"""Remove data for all docnames not in the list."""
new_titles = {}
new_filenames = {}
@@ -375,6 +405,7 @@ class IndexBuilder(object):
wordnames.intersection_update(docnames)
def feed(self, docname, filename, title, doctree):
+ # type: (unicode, unicode, unicode, nodes.Node) -> None
"""Feed a doctree to the index."""
self._titles[docname] = title
self._filenames[docname] = filename
@@ -384,6 +415,7 @@ class IndexBuilder(object):
# memoize self.lang.stem
def stem(word):
+ # type: (unicode) -> unicode
try:
return self._stem_cache[word]
except KeyError:
@@ -403,11 +435,12 @@ class IndexBuilder(object):
# again, stemmer must not remove words from search index
if not _filter(stemmed_word) and _filter(word):
stemmed_word = word
- already_indexed = docname in self._title_mapping.get(stemmed_word, [])
+ already_indexed = docname in self._title_mapping.get(stemmed_word, set())
if _filter(stemmed_word) and not already_indexed:
self._mapping.setdefault(stemmed_word, set()).add(docname)
def context_for_searchtool(self):
+ # type: () -> Dict[unicode, Any]
return dict(
search_language_stemming_code = self.lang.js_stemmer_code,
search_language_stop_words = jsdump.dumps(sorted(self.lang.stopwords)),
@@ -416,9 +449,12 @@ class IndexBuilder(object):
)
def get_js_stemmer_rawcode(self):
+ # type: () -> unicode
if self.lang.js_stemmer_rawcode:
return path.join(
sphinx.package_dir, 'search',
'non-minified-js',
self.lang.js_stemmer_rawcode
)
+ else:
+ return None
diff --git a/sphinx/search/en.py b/sphinx/search/en.py
index 70ad290a7..f7ce43350 100644
--- a/sphinx/search/en.py
+++ b/sphinx/search/en.py
@@ -10,15 +10,13 @@
"""
from sphinx.search import SearchLanguage
+from sphinx.util.stemmer import get_stemmer
-try:
- from Stemmer import Stemmer as PyStemmer
- PYSTEMMER = True
-except ImportError:
- from sphinx.util.stemmer import PorterStemmer
- PYSTEMMER = False
+if False:
+ # For type annotation
+ from typing import Dict # NOQA
-english_stopwords = set("""
+english_stopwords = set(u"""
a and are as at
be but by
for
@@ -224,22 +222,9 @@ class SearchEnglish(SearchLanguage):
stopwords = english_stopwords
def init(self, options):
- if PYSTEMMER:
- class Stemmer(object):
- def __init__(self):
- self.stemmer = PyStemmer('porter')
-
- def stem(self, word):
- return self.stemmer.stemWord(word)
- else:
- class Stemmer(PorterStemmer):
- """All those porter stemmer implementations look hideous;
- make at least the stem method nicer.
- """
- def stem(self, word):
- return PorterStemmer.stem(self, word, 0, len(word) - 1)
-
- self.stemmer = Stemmer()
+ # type: (Dict) -> None
+ self.stemmer = get_stemmer()
def stem(self, word):
+ # type: (unicode) -> unicode
return self.stemmer.stem(word.lower())
diff --git a/sphinx/search/ja.py b/sphinx/search/ja.py
index b613157fb..a2703441b 100644
--- a/sphinx/search/ja.py
+++ b/sphinx/search/ja.py
@@ -39,13 +39,19 @@ from sphinx.errors import SphinxError, ExtensionError
from sphinx.search import SearchLanguage
from sphinx.util import import_object
+if False:
+ # For type annotation
+ from typing import Dict, List # NOQA
+
class BaseSplitter(object):
def __init__(self, options):
+ # type: (Dict) -> None
self.options = options
def split(self, input):
+ # type: (unicode) -> List[unicode]
"""
:param str input:
@@ -57,9 +63,10 @@ class BaseSplitter(object):
class MecabSplitter(BaseSplitter):
def __init__(self, options):
+ # type: (Dict) -> None
super(MecabSplitter, self).__init__(options)
- self.ctypes_libmecab = None
- self.ctypes_mecab = None
+ self.ctypes_libmecab = None # type: ignore
+ self.ctypes_mecab = None # type: ignore
if not native_module:
self.init_ctypes(options)
else:
@@ -67,6 +74,7 @@ class MecabSplitter(BaseSplitter):
self.dict_encode = options.get('dic_enc', 'utf-8')
def split(self, input):
+ # type: (unicode) -> List[unicode]
input2 = input if PY3 else input.encode(self.dict_encode)
if native_module:
result = self.native.parse(input2)
@@ -79,6 +87,7 @@ class MecabSplitter(BaseSplitter):
return result.decode(self.dict_encode).split(' ')
def init_native(self, options):
+ # type: (Dict) -> None
param = '-Owakati'
dict = options.get('dict')
if dict:
@@ -86,6 +95,7 @@ class MecabSplitter(BaseSplitter):
self.native = MeCab.Tagger(param)
def init_ctypes(self, options):
+ # type: (Dict) -> None
import ctypes.util
lib = options.get('lib')
@@ -122,6 +132,7 @@ class MecabSplitter(BaseSplitter):
raise SphinxError('mecab initialization failed')
def __del__(self):
+ # type: () -> None
if self.ctypes_libmecab:
self.ctypes_libmecab.mecab_destroy(self.ctypes_mecab)
@@ -130,17 +141,20 @@ MeCabBinder = MecabSplitter # keep backward compatibility until Sphinx-1.6
class JanomeSplitter(BaseSplitter):
def __init__(self, options):
+ # type: (Dict) -> None
super(JanomeSplitter, self).__init__(options)
self.user_dict = options.get('user_dic')
self.user_dict_enc = options.get('user_dic_enc', 'utf8')
self.init_tokenizer()
def init_tokenizer(self):
+ # type: () -> None
if not janome_module:
raise RuntimeError('Janome is not available')
self.tokenizer = janome.tokenizer.Tokenizer(udic=self.user_dict, udic_enc=self.user_dict_enc)
def split(self, input):
+ # type: (unicode) -> List[unicode]
result = u' '.join(token.surface for token in self.tokenizer.tokenize(input))
return result.split(u' ')
@@ -417,6 +431,7 @@ class DefaultSplitter(BaseSplitter):
# ctype_
def ctype_(self, char):
+ # type: (unicode) -> unicode
for pattern, value in iteritems(self.patterns_):
if pattern.match(char):
return value
@@ -424,12 +439,14 @@ class DefaultSplitter(BaseSplitter):
# ts_
def ts_(self, dict, key):
+ # type: (Dict[unicode, int], unicode) -> int
if key in dict:
return dict[key]
return 0
# segment
def split(self, input):
+ # type: (unicode) -> List[unicode]
if not input:
return []
@@ -538,6 +555,7 @@ class SearchJapanese(SearchLanguage):
}
def init(self, options):
+ # type: (Dict) -> None
type = options.get('type', 'default')
if type in self.splitters:
dotted_path = self.splitters[type]
@@ -550,10 +568,13 @@ class SearchJapanese(SearchLanguage):
dotted_path)
def split(self, input):
+ # type: (unicode) -> List[unicode]
return self.splitter.split(input)
def word_filter(self, stemmed_word):
+ # type: (unicode) -> bool
return len(stemmed_word) > 1
def stem(self, word):
+ # type: (unicode) -> unicode
return word
diff --git a/sphinx/search/ro.py b/sphinx/search/ro.py
index 78ae01851..b4beced2d 100644
--- a/sphinx/search/ro.py
+++ b/sphinx/search/ro.py
@@ -13,6 +13,10 @@ from sphinx.search import SearchLanguage
import snowballstemmer
+if False:
+ # For type annotation
+ from typing import Dict, Set # NOQA
+
js_stemmer = u"""
var JSX={};(function(j){function l(b,e){var a=function(){};a.prototype=e.prototype;var c=new a;for(var d in b){b[d].prototype=c}}function L(c,b){for(var a in b.prototype)if(b.prototype.hasOwnProperty(a))c.prototype[a]=b.prototype[a]}function h(a,b,d){function c(a,b,c){delete a[b];a[b]=c;return c}Object.defineProperty(a,b,{get:function(){return c(a,b,d())},set:function(d){c(a,b,d)},enumerable:true,configurable:true})}function M(a,b,c){return a[b]=a[b]/c|0}var E=parseInt;var C=parseFloat;function N(a){return a!==a}var A=isFinite;var z=encodeURIComponent;var y=decodeURIComponent;var x=encodeURI;var w=decodeURI;var u=Object.prototype.toString;var D=Object.prototype.hasOwnProperty;function k(){}j.require=function(b){var a=r[b];return a!==undefined?a:null};j.profilerIsRunning=function(){return k.getResults!=null};j.getProfileResults=function(){return(k.getResults||function(){return{}})()};j.postProfileResults=function(a,b){if(k.postResults==null)throw new Error('profiler has not been turned on');return k.postResults(a,b)};j.resetProfileResults=function(){if(k.resetResults==null)throw new Error('profiler has not been turned on');return k.resetResults()};j.DEBUG=false;function t(){};l([t],Error);function a(a,b,c){this.F=a.length;this.K=a;this.L=b;this.I=c;this.H=null;this.P=null};l([a],Object);function n(){};l([n],Object);function g(){var a;var b;var c;this.G={};a=this.E='';b=this._=0;c=this.A=a.length;this.D=0;this.B=b;this.C=c};l([g],n);function v(a,b){a.E=b.E;a._=b._;a.A=b.A;a.D=b.D;a.B=b.B;a.C=b.C};function d(b,d,c,e){var a;if(b._>=b.A){return false}a=b.E.charCodeAt(b._);if(a>e||a<c){return false}a-=c;if((d[a>>>3]&1<<(a&7))===0){return false}b._++;return true};function e(a,d,c,e){var b;if(a._>=a.A){return false}b=a.E.charCodeAt(a._);if(b>e||b<c){a._++;return true}b-=c;if((d[b>>>3]&1<<(b&7))===0){a._++;return true}return false};function p(a,d,c,e){var b;if(a._<=a.D){return false}b=a.E.charCodeAt(a._-1);if(b>e||b<c){a._--;return true}b-=c;if((d[b>>>3]&1<<(b&7))===0){a._--;return true}return false};function m(a,b,d){var c;if(a.A-a._<b){return false}if(a.E.slice(c=a._,c+b)!==d){return false}a._+=b;return true};function i(a,b,d){var c;if(a._-a.D<b){return false}if(a.E.slice((c=a._)-b,c)!==d){return false}a._-=b;return true};function q(f,m,p){var b;var d;var e;var n;var g;var k;var l;var i;var h;var c;var a;var j;var o;b=0;d=p;e=f._;n=f.A;g=0;k=0;l=false;while(true){i=b+(d-b>>>1);h=0;c=g<k?g:k;a=m[i];for(j=c;j<a.F;j++){if(e+c===n){h=-1;break}h=f.E.charCodeAt(e+c)-a.K.charCodeAt(j);if(h!==0){break}c++}if(h<0){d=i;k=c}else{b=i;g=c}if(d-b<=1){if(b>0){break}if(d===b){break}if(l){break}l=true}}while(true){a=m[b];if(g>=a.F){f._=e+a.F|0;if(a.H==null){return a.I}o=a.H(a.P);f._=e+a.F|0;if(o){return a.I}}b=a.L;if(b<0){return 0}}return-1};function f(d,m,p){var b;var g;var e;var n;var f;var k;var l;var i;var h;var c;var a;var j;var o;b=0;g=p;e=d._;n=d.D;f=0;k=0;l=false;while(true){i=b+(g-b>>1);h=0;c=f<k?f:k;a=m[i];for(j=a.F-1-c;j>=0;j--){if(e-c===n){h=-1;break}h=d.E.charCodeAt(e-1-c)-a.K.charCodeAt(j);if(h!==0){break}c++}if(h<0){g=i;k=c}else{b=i;f=c}if(g-b<=1){if(b>0){break}if(g===b){break}if(l){break}l=true}}while(true){a=m[b];if(f>=a.F){d._=e-a.F|0;if(a.H==null){return a.I}o=a.H(d);d._=e-a.F|0;if(o){return a.I}}b=a.L;if(b<0){return 0}}return-1};function s(a,b,d,e){var c;c=e.length-(d-b);a.E=a.E.slice(0,b)+e+a.E.slice(d);a.A+=c|0;if(a._>=d){a._+=c|0}else if(a._>b){a._=b}return c|0};function c(a,f){var b;var c;var d;var e;b=false;if((c=a.B)<0||c>(d=a.C)||d>(e=a.A)||e>a.E.length?false:true){s(a,a.B,a.C,f);b=true}return b};g.prototype.J=function(){return false};g.prototype.b=function(b){var a;var c;var d;var e;a=this.G['.'+b];if(a==null){c=this.E=b;d=this._=0;e=this.A=c.length;this.D=0;this.B=d;this.C=e;this.J();a=this.E;this.G['.'+b]=a}return a};g.prototype.stemWord=g.prototype.b;g.prototype.c=function(e){var d;var b;var c;var a;var f;var g;var h;d=[];for(b=0;b<e.length;b++){c=e[b];a=this.G['.'+c];if(a==null){f=this.E=c;g=this._=0;h=this.A=f.length;this.D=0;this.B=g;this.C=h;this.J();a=this.E;this.G['.'+c]=a}d.push(a)}return d};g.prototype.stemWords=g.prototype.c;function b(){g.call(this);this.B_standard_suffix_removed=false;this.I_p2=0;this.I_p1=0;this.I_pV=0};l([b],g);b.prototype.M=function(a){this.B_standard_suffix_removed=a.B_standard_suffix_removed;this.I_p2=a.I_p2;this.I_p1=a.I_p1;this.I_pV=a.I_pV;v(this,a)};b.prototype.copy_from=b.prototype.M;b.prototype.W=function(){var i;var a;var j;var e;var f;var g;var h;var k;b:while(true){i=this._;e=true;d:while(e===true){e=false;e:while(true){a=this._;f=true;a:while(f===true){f=false;if(!d(this,b.g_v,97,259)){break a}this.B=this._;g=true;f:while(g===true){g=false;j=this._;h=true;c:while(h===true){h=false;if(!m(this,1,'u')){break c}this.C=this._;if(!d(this,b.g_v,97,259)){break c}if(!c(this,'U')){return false}break f}this._=j;if(!m(this,1,'i')){break a}this.C=this._;if(!d(this,b.g_v,97,259)){break a}if(!c(this,'I')){return false}}this._=a;break e}k=this._=a;if(k>=this.A){break d}this._++}continue b}this._=i;break b}return true};b.prototype.r_prelude=b.prototype.W;function G(a){var j;var e;var k;var f;var g;var h;var i;var l;b:while(true){j=a._;f=true;d:while(f===true){f=false;e:while(true){e=a._;g=true;a:while(g===true){g=false;if(!d(a,b.g_v,97,259)){break a}a.B=a._;h=true;f:while(h===true){h=false;k=a._;i=true;c:while(i===true){i=false;if(!m(a,1,'u')){break c}a.C=a._;if(!d(a,b.g_v,97,259)){break c}if(!c(a,'U')){return false}break f}a._=k;if(!m(a,1,'i')){break a}a.C=a._;if(!d(a,b.g_v,97,259)){break a}if(!c(a,'I')){return false}}a._=e;break e}l=a._=e;if(l>=a.A){break d}a._++}continue b}a._=j;break b}return true};b.prototype.U=function(){var u;var w;var x;var y;var t;var l;var f;var g;var h;var i;var c;var j;var k;var a;var m;var n;var o;var p;var q;var r;var s;var v;this.I_pV=s=this.A;this.I_p1=s;this.I_p2=s;u=this._;l=true;a:while(l===true){l=false;f=true;g:while(f===true){f=false;w=this._;g=true;b:while(g===true){g=false;if(!d(this,b.g_v,97,259)){break b}h=true;f:while(h===true){h=false;x=this._;i=true;c:while(i===true){i=false;if(!e(this,b.g_v,97,259)){break c}d:while(true){c=true;e:while(c===true){c=false;if(!d(this,b.g_v,97,259)){break e}break d}if(this._>=this.A){break c}this._++}break f}this._=x;if(!d(this,b.g_v,97,259)){break b}c:while(true){j=true;d:while(j===true){j=false;if(!e(this,b.g_v,97,259)){break d}break c}if(this._>=this.A){break b}this._++}}break g}this._=w;if(!e(this,b.g_v,97,259)){break a}k=true;c:while(k===true){k=false;y=this._;a=true;b:while(a===true){a=false;if(!e(this,b.g_v,97,259)){break b}e:while(true){m=true;d:while(m===true){m=false;if(!d(this,b.g_v,97,259)){break d}break e}if(this._>=this.A){break b}this._++}break c}this._=y;if(!d(this,b.g_v,97,259)){break a}if(this._>=this.A){break a}this._++}}this.I_pV=this._}v=this._=u;t=v;n=true;a:while(n===true){n=false;b:while(true){o=true;c:while(o===true){o=false;if(!d(this,b.g_v,97,259)){break c}break b}if(this._>=this.A){break a}this._++}b:while(true){p=true;c:while(p===true){p=false;if(!e(this,b.g_v,97,259)){break c}break b}if(this._>=this.A){break a}this._++}this.I_p1=this._;b:while(true){q=true;c:while(q===true){q=false;if(!d(this,b.g_v,97,259)){break c}break b}if(this._>=this.A){break a}this._++}c:while(true){r=true;b:while(r===true){r=false;if(!e(this,b.g_v,97,259)){break b}break c}if(this._>=this.A){break a}this._++}this.I_p2=this._}this._=t;return true};b.prototype.r_mark_regions=b.prototype.U;function H(a){var x;var y;var z;var u;var v;var l;var f;var g;var h;var i;var j;var k;var c;var m;var n;var o;var p;var q;var r;var s;var t;var w;a.I_pV=t=a.A;a.I_p1=t;a.I_p2=t;x=a._;l=true;a:while(l===true){l=false;f=true;g:while(f===true){f=false;y=a._;g=true;b:while(g===true){g=false;if(!d(a,b.g_v,97,259)){break b}h=true;f:while(h===true){h=false;z=a._;i=true;c:while(i===true){i=false;if(!e(a,b.g_v,97,259)){break c}d:while(true){j=true;e:while(j===true){j=false;if(!d(a,b.g_v,97,259)){break e}break d}if(a._>=a.A){break c}a._++}break f}a._=z;if(!d(a,b.g_v,97,259)){break b}c:while(true){k=true;d:while(k===true){k=false;if(!e(a,b.g_v,97,259)){break d}break c}if(a._>=a.A){break b}a._++}}break g}a._=y;if(!e(a,b.g_v,97,259)){break a}c=true;c:while(c===true){c=false;u=a._;m=true;b:while(m===true){m=false;if(!e(a,b.g_v,97,259)){break b}e:while(true){n=true;d:while(n===true){n=false;if(!d(a,b.g_v,97,259)){break d}break e}if(a._>=a.A){break b}a._++}break c}a._=u;if(!d(a,b.g_v,97,259)){break a}if(a._>=a.A){break a}a._++}}a.I_pV=a._}w=a._=x;v=w;o=true;a:while(o===true){o=false;b:while(true){p=true;c:while(p===true){p=false;if(!d(a,b.g_v,97,259)){break c}break b}if(a._>=a.A){break a}a._++}b:while(true){q=true;c:while(q===true){q=false;if(!e(a,b.g_v,97,259)){break c}break b}if(a._>=a.A){break a}a._++}a.I_p1=a._;b:while(true){r=true;c:while(r===true){r=false;if(!d(a,b.g_v,97,259)){break c}break b}if(a._>=a.A){break a}a._++}c:while(true){s=true;b:while(s===true){s=false;if(!e(a,b.g_v,97,259)){break b}break c}if(a._>=a.A){break a}a._++}a.I_p2=a._}a._=v;return true};b.prototype.V=function(){var a;var e;var d;b:while(true){e=this._;d=true;a:while(d===true){d=false;this.B=this._;a=q(this,b.a_0,3);if(a===0){break a}this.C=this._;switch(a){case 0:break a;case 1:if(!c(this,'i')){return false}break;case 2:if(!c(this,'u')){return false}break;case 3:if(this._>=this.A){break a}this._++;break}continue b}this._=e;break b}return true};b.prototype.r_postlude=b.prototype.V;function I(a){var d;var f;var e;b:while(true){f=a._;e=true;a:while(e===true){e=false;a.B=a._;d=q(a,b.a_0,3);if(d===0){break a}a.C=a._;switch(d){case 0:break a;case 1:if(!c(a,'i')){return false}break;case 2:if(!c(a,'u')){return false}break;case 3:if(a._>=a.A){break a}a._++;break}continue b}a._=f;break b}return true};b.prototype.S=function(){return!(this.I_pV<=this._)?false:true};b.prototype.r_RV=b.prototype.S;b.prototype.Q=function(){return!(this.I_p1<=this._)?false:true};b.prototype.r_R1=b.prototype.Q;b.prototype.R=function(){return!(this.I_p2<=this._)?false:true};b.prototype.r_R2=b.prototype.R;b.prototype.Y=function(){var a;var e;var d;var g;this.C=this._;a=f(this,b.a_1,16);if(a===0){return false}this.B=g=this._;if(!(!(this.I_p1<=g)?false:true)){return false}switch(a){case 0:return false;case 1:if(!c(this,'')){return false}break;case 2:if(!c(this,'a')){return false}break;case 3:if(!c(this,'e')){return false}break;case 4:if(!c(this,'i')){return false}break;case 5:e=this.A-this._;d=true;a:while(d===true){d=false;if(!i(this,2,'ab')){break a}return false}this._=this.A-e;if(!c(this,'i')){return false}break;case 6:if(!c(this,'at')){return false}break;case 7:if(!c(this,'aţi')){return false}break}return true};b.prototype.r_step_0=b.prototype.Y;function J(a){var d;var g;var e;var h;a.C=a._;d=f(a,b.a_1,16);if(d===0){return false}a.B=h=a._;if(!(!(a.I_p1<=h)?false:true)){return false}switch(d){case 0:return false;case 1:if(!c(a,'')){return false}break;case 2:if(!c(a,'a')){return false}break;case 3:if(!c(a,'e')){return false}break;case 4:if(!c(a,'i')){return false}break;case 5:g=a.A-a._;e=true;a:while(e===true){e=false;if(!i(a,2,'ab')){break a}return false}a._=a.A-g;if(!c(a,'i')){return false}break;case 6:if(!c(a,'at')){return false}break;case 7:if(!c(a,'aţi')){return false}break}return true};b.prototype.T=function(){var a;var d;var e;var g;d=this.A-(e=this._);this.C=e;a=f(this,b.a_2,46);if(a===0){return false}this.B=g=this._;if(!(!(this.I_p1<=g)?false:true)){return false}switch(a){case 0:return false;case 1:if(!c(this,'abil')){return false}break;case 2:if(!c(this,'ibil')){return false}break;case 3:if(!c(this,'iv')){return false}break;case 4:if(!c(this,'ic')){return false}break;case 5:if(!c(this,'at')){return false}break;case 6:if(!c(this,'it')){return false}break}this.B_standard_suffix_removed=true;this._=this.A-d;return true};b.prototype.r_combo_suffix=b.prototype.T;function o(a){var d;var e;var g;var h;e=a.A-(g=a._);a.C=g;d=f(a,b.a_2,46);if(d===0){return false}a.B=h=a._;if(!(!(a.I_p1<=h)?false:true)){return false}switch(d){case 0:return false;case 1:if(!c(a,'abil')){return false}break;case 2:if(!c(a,'ibil')){return false}break;case 3:if(!c(a,'iv')){return false}break;case 4:if(!c(a,'ic')){return false}break;case 5:if(!c(a,'at')){return false}break;case 6:if(!c(a,'it')){return false}break}a.B_standard_suffix_removed=true;a._=a.A-e;return true};b.prototype.X=function(){var a;var e;var d;var g;this.B_standard_suffix_removed=false;a:while(true){e=this.A-this._;d=true;b:while(d===true){d=false;if(!o(this)){break b}continue a}this._=this.A-e;break a}this.C=this._;a=f(this,b.a_3,62);if(a===0){return false}this.B=g=this._;if(!(!(this.I_p2<=g)?false:true)){return false}switch(a){case 0:return false;case 1:if(!c(this,'')){return false}break;case 2:if(!i(this,1,'ţ')){return false}this.B=this._;if(!c(this,'t')){return false}break;case 3:if(!c(this,'ist')){return false}break}this.B_standard_suffix_removed=true;return true};b.prototype.r_standard_suffix=b.prototype.X;function K(a){var d;var g;var e;var h;a.B_standard_suffix_removed=false;a:while(true){g=a.A-a._;e=true;b:while(e===true){e=false;if(!o(a)){break b}continue a}a._=a.A-g;break a}a.C=a._;d=f(a,b.a_3,62);if(d===0){return false}a.B=h=a._;if(!(!(a.I_p2<=h)?false:true)){return false}switch(d){case 0:return false;case 1:if(!c(a,'')){return false}break;case 2:if(!i(a,1,'ţ')){return false}a.B=a._;if(!c(a,'t')){return false}break;case 3:if(!c(a,'ist')){return false}break}a.B_standard_suffix_removed=true;return true};b.prototype.Z=function(){var d;var h;var a;var j;var e;var g;var k;var l;var m;h=this.A-(k=this._);if(k<this.I_pV){return false}l=this._=this.I_pV;a=this.D;this.D=l;m=this._=this.A-h;this.C=m;d=f(this,b.a_4,94);if(d===0){this.D=a;return false}this.B=this._;switch(d){case 0:this.D=a;return false;case 1:e=true;a:while(e===true){e=false;j=this.A-this._;g=true;b:while(g===true){g=false;if(!p(this,b.g_v,97,259)){break b}break a}this._=this.A-j;if(!i(this,1,'u')){this.D=a;return false}}if(!c(this,'')){return false}break;case 2:if(!c(this,'')){return false}break}this.D=a;return true};b.prototype.r_verb_suffix=b.prototype.Z;function F(a){var e;var l;var d;var j;var g;var h;var m;var n;var k;l=a.A-(m=a._);if(m<a.I_pV){return false}n=a._=a.I_pV;d=a.D;a.D=n;k=a._=a.A-l;a.C=k;e=f(a,b.a_4,94);if(e===0){a.D=d;return false}a.B=a._;switch(e){case 0:a.D=d;return false;case 1:g=true;a:while(g===true){g=false;j=a.A-a._;h=true;b:while(h===true){h=false;if(!p(a,b.g_v,97,259)){break b}break a}a._=a.A-j;if(!i(a,1,'u')){a.D=d;return false}}if(!c(a,'')){return false}break;case 2:if(!c(a,'')){return false}break}a.D=d;return true};b.prototype.a=function(){var a;var d;this.C=this._;a=f(this,b.a_5,5);if(a===0){return false}this.B=d=this._;if(!(!(this.I_pV<=d)?false:true)){return false}switch(a){case 0:return false;case 1:if(!c(this,'')){return false}break}return true};b.prototype.r_vowel_suffix=b.prototype.a;function B(a){var d;var e;a.C=a._;d=f(a,b.a_5,5);if(d===0){return false}a.B=e=a._;if(!(!(a.I_pV<=e)?false:true)){return false}switch(d){case 0:return false;case 1:if(!c(a,'')){return false}break}return true};b.prototype.J=function(){var n;var j;var k;var l;var m;var o;var p;var b;var c;var d;var e;var f;var a;var g;var h;var i;var r;var s;var t;var u;var v;var w;var x;var y;var q;n=this._;b=true;a:while(b===true){b=false;if(!G(this)){break a}}r=this._=n;j=r;c=true;a:while(c===true){c=false;if(!H(this)){break a}}s=this._=j;this.D=s;u=this._=t=this.A;k=t-u;d=true;a:while(d===true){d=false;if(!J(this)){break a}}w=this._=(v=this.A)-k;l=v-w;e=true;a:while(e===true){e=false;if(!K(this)){break a}}y=this._=(x=this.A)-l;m=x-y;f=true;a:while(f===true){f=false;a=true;b:while(a===true){a=false;o=this.A-this._;g=true;c:while(g===true){g=false;if(!this.B_standard_suffix_removed){break c}break b}this._=this.A-o;if(!F(this)){break a}}}this._=this.A-m;h=true;a:while(h===true){h=false;if(!B(this)){break a}}q=this._=this.D;p=q;i=true;a:while(i===true){i=false;if(!I(this)){break a}}this._=p;return true};b.prototype.stem=b.prototype.J;b.prototype.N=function(a){return a instanceof b};b.prototype.equals=b.prototype.N;b.prototype.O=function(){var c;var a;var b;var d;c='RomanianStemmer';a=0;for(b=0;b<c.length;b++){d=c.charCodeAt(b);a=(a<<5)-a+d;a=a&a}return a|0};b.prototype.hashCode=b.prototype.O;b.serialVersionUID=1;h(b,'methodObject',function(){return new b});h(b,'a_0',function(){return[new a('',-1,3),new a('I',0,1),new a('U',0,2)]});h(b,'a_1',function(){return[new a('ea',-1,3),new a('aţia',-1,7),new a('aua',-1,2),new a('iua',-1,4),new a('aţie',-1,7),new a('ele',-1,3),new a('ile',-1,5),new a('iile',6,4),new a('iei',-1,4),new a('atei',-1,6),new a('ii',-1,4),new a('ului',-1,1),new a('ul',-1,1),new a('elor',-1,3),new a('ilor',-1,4),new a('iilor',14,4)]});h(b,'a_2',function(){return[new a('icala',-1,4),new a('iciva',-1,4),new a('ativa',-1,5),new a('itiva',-1,6),new a('icale',-1,4),new a('aţiune',-1,5),new a('iţiune',-1,6),new a('atoare',-1,5),new a('itoare',-1,6),new a('ătoare',-1,5),new a('icitate',-1,4),new a('abilitate',-1,1),new a('ibilitate',-1,2),new a('ivitate',-1,3),new a('icive',-1,4),new a('ative',-1,5),new a('itive',-1,6),new a('icali',-1,4),new a('atori',-1,5),new a('icatori',18,4),new a('itori',-1,6),new a('ători',-1,5),new a('icitati',-1,4),new a('abilitati',-1,1),new a('ivitati',-1,3),new a('icivi',-1,4),new a('ativi',-1,5),new a('itivi',-1,6),new a('icităi',-1,4),new a('abilităi',-1,1),new a('ivităi',-1,3),new a('icităţi',-1,4),new a('abilităţi',-1,1),new a('ivităţi',-1,3),new a('ical',-1,4),new a('ator',-1,5),new a('icator',35,4),new a('itor',-1,6),new a('ător',-1,5),new a('iciv',-1,4),new a('ativ',-1,5),new a('itiv',-1,6),new a('icală',-1,4),new a('icivă',-1,4),new a('ativă',-1,5),new a('itivă',-1,6)]});h(b,'a_3',function(){return[new a('ica',-1,1),new a('abila',-1,1),new a('ibila',-1,1),new a('oasa',-1,1),new a('ata',-1,1),new a('ita',-1,1),new a('anta',-1,1),new a('ista',-1,3),new a('uta',-1,1),new a('iva',-1,1),new a('ic',-1,1),new a('ice',-1,1),new a('abile',-1,1),new a('ibile',-1,1),new a('isme',-1,3),new a('iune',-1,2),new a('oase',-1,1),new a('ate',-1,1),new a('itate',17,1),new a('ite',-1,1),new a('ante',-1,1),new a('iste',-1,3),new a('ute',-1,1),new a('ive',-1,1),new a('ici',-1,1),new a('abili',-1,1),new a('ibili',-1,1),new a('iuni',-1,2),new a('atori',-1,1),new a('osi',-1,1),new a('ati',-1,1),new a('itati',30,1),new a('iti',-1,1),new a('anti',-1,1),new a('isti',-1,3),new a('uti',-1,1),new a('işti',-1,3),new a('ivi',-1,1),new a('ităi',-1,1),new a('oşi',-1,1),new a('ităţi',-1,1),new a('abil',-1,1),new a('ibil',-1,1),new a('ism',-1,3),new a('ator',-1,1),new a('os',-1,1),new a('at',-1,1),new a('it',-1,1),new a('ant',-1,1),new a('ist',-1,3),new a('ut',-1,1),new a('iv',-1,1),new a('ică',-1,1),new a('abilă',-1,1),new a('ibilă',-1,1),new a('oasă',-1,1),new a('ată',-1,1),new a('ită',-1,1),new a('antă',-1,1),new a('istă',-1,3),new a('ută',-1,1),new a('ivă',-1,1)]});h(b,'a_4',function(){return[new a('ea',-1,1),new a('ia',-1,1),new a('esc',-1,1),new a('ăsc',-1,1),new a('ind',-1,1),new a('ând',-1,1),new a('are',-1,1),new a('ere',-1,1),new a('ire',-1,1),new a('âre',-1,1),new a('se',-1,2),new a('ase',10,1),new a('sese',10,2),new a('ise',10,1),new a('use',10,1),new a('âse',10,1),new a('eşte',-1,1),new a('ăşte',-1,1),new a('eze',-1,1),new a('ai',-1,1),new a('eai',19,1),new a('iai',19,1),new a('sei',-1,2),new a('eşti',-1,1),new a('ăşti',-1,1),new a('ui',-1,1),new a('ezi',-1,1),new a('âi',-1,1),new a('aşi',-1,1),new a('seşi',-1,2),new a('aseşi',29,1),new a('seseşi',29,2),new a('iseşi',29,1),new a('useşi',29,1),new a('âseşi',29,1),new a('işi',-1,1),new a('uşi',-1,1),new a('âşi',-1,1),new a('aţi',-1,2),new a('eaţi',38,1),new a('iaţi',38,1),new a('eţi',-1,2),new a('iţi',-1,2),new a('âţi',-1,2),new a('arăţi',-1,1),new a('serăţi',-1,2),new a('aserăţi',45,1),new a('seserăţi',45,2),new a('iserăţi',45,1),new a('userăţi',45,1),new a('âserăţi',45,1),new a('irăţi',-1,1),new a('urăţi',-1,1),new a('ârăţi',-1,1),new a('am',-1,1),new a('eam',54,1),new a('iam',54,1),new a('em',-1,2),new a('asem',57,1),new a('sesem',57,2),new a('isem',57,1),new a('usem',57,1),new a('âsem',57,1),new a('im',-1,2),new a('âm',-1,2),new a('ăm',-1,2),new a('arăm',65,1),new a('serăm',65,2),new a('aserăm',67,1),new a('seserăm',67,2),new a('iserăm',67,1),new a('userăm',67,1),new a('âserăm',67,1),new a('irăm',65,1),new a('urăm',65,1),new a('ârăm',65,1),new a('au',-1,1),new a('eau',76,1),new a('iau',76,1),new a('indu',-1,1),new a('ându',-1,1),new a('ez',-1,1),new a('ească',-1,1),new a('ară',-1,1),new a('seră',-1,2),new a('aseră',84,1),new a('seseră',84,2),new a('iseră',84,1),new a('useră',84,1),new a('âseră',84,1),new a('iră',-1,1),new a('ură',-1,1),new a('âră',-1,1),new a('ează',-1,1)]});h(b,'a_5',function(){return[new a('a',-1,1),new a('e',-1,1),new a('ie',1,1),new a('i',-1,1),new a('ă',-1,1)]});h(b,'g_v',function(){return[17,65,16,0,0,0,0,0,0,0,0,0,0,0,0,0,2,32,0,0,4]});var r={'src/stemmer.jsx':{Stemmer:n},'src/romanian-stemmer.jsx':{RomanianStemmer:b}}}(JSX))
var Stemmer = JSX.require("src/romanian-stemmer.jsx").RomanianStemmer;
@@ -24,10 +28,12 @@ class SearchRomanian(SearchLanguage):
language_name = 'Romanian'
js_stemmer_rawcode = 'romanian-stemmer.js'
js_stemmer_code = js_stemmer
- stopwords = []
+ stopwords = set() # type: Set[unicode]
def init(self, options):
+ # type: (Dict) -> None
self.stemmer = snowballstemmer.stemmer('romanian')
def stem(self, word):
+ # type: (unicode) -> unicode
return self.stemmer.stemWord(word)
diff --git a/sphinx/search/tr.py b/sphinx/search/tr.py
index 33c5c5192..4ce42dd76 100644
--- a/sphinx/search/tr.py
+++ b/sphinx/search/tr.py
@@ -13,6 +13,10 @@ from sphinx.search import SearchLanguage
import snowballstemmer
+if False:
+ # For type annotation
+ from typing import Dict, Set # NOQA
+
js_stemmer = u"""
var JSX={};(function(q){function r(b,e){var a=function(){};a.prototype=e.prototype;var c=new a;for(var d in b){b[d].prototype=c}}function Q(c,b){for(var a in b.prototype)if(b.prototype.hasOwnProperty(a))c.prototype[a]=b.prototype[a]}function j(a,b,d){function c(a,b,c){delete a[b];a[b]=c;return c}Object.defineProperty(a,b,{get:function(){return c(a,b,d())},set:function(d){c(a,b,d)},enumerable:true,configurable:true})}function R(a,b,c){return a[b]=a[b]/c|0}var M=parseInt;var K=parseFloat;function P(a){return a!==a}var A=isFinite;var G=encodeURIComponent;var F=decodeURIComponent;var E=encodeURI;var D=decodeURI;var C=Object.prototype.toString;var H=Object.prototype.hasOwnProperty;function p(){}q.require=function(b){var a=y[b];return a!==undefined?a:null};q.profilerIsRunning=function(){return p.getResults!=null};q.getProfileResults=function(){return(p.getResults||function(){return{}})()};q.postProfileResults=function(a,b){if(p.postResults==null)throw new Error('profiler has not been turned on');return p.postResults(a,b)};q.resetProfileResults=function(){if(p.resetResults==null)throw new Error('profiler has not been turned on');return p.resetResults()};q.DEBUG=false;function I(){};r([I],Error);function d(a,b,c){this.G=a.length;this.A_=a;this.D_=b;this.J=c;this.I=null;this.E_=null};r([d],Object);function u(){};r([u],Object);function m(){var a;var b;var c;this.F={};a=this.E='';b=this._=0;c=this.A=a.length;this.D=0;this.B=b;this.C=c};r([m],u);function B(a,b){a.E=b.E;a._=b._;a.A=b.A;a.D=b.D;a.B=b.B;a.C=b.C};function v(b,d,c,e){var a;if(b._>=b.A){return false}a=b.E.charCodeAt(b._);if(a>e||a<c){return false}a-=c;if((d[a>>>3]&1<<(a&7))===0){return false}b._++;return true};function f(b,d,c,e){var a;if(b._<=b.D){return false}a=b.E.charCodeAt(b._-1);if(a>e||a<c){return false}a-=c;if((d[a>>>3]&1<<(a&7))===0){return false}b._--;return true};function t(a,d,c,e){var b;if(a._<=a.D){return false}b=a.E.charCodeAt(a._-1);if(b>e||b<c){a._--;return true}b-=c;if((d[b>>>3]&1<<(b&7))===0){a._--;return true}return false};function s(a,b,d){var c;if(a.A-a._<b){return false}if(a.E.slice(c=a._,c+b)!==d){return false}a._+=b;return true};function g(a,b,d){var c;if(a._-a.D<b){return false}if(a.E.slice((c=a._)-b,c)!==d){return false}a._-=b;return true};function b(d,m,p){var b;var g;var e;var n;var f;var k;var l;var i;var h;var c;var a;var j;var o;b=0;g=p;e=d._;n=d.D;f=0;k=0;l=false;while(true){i=b+(g-b>>1);h=0;c=f<k?f:k;a=m[i];for(j=a.G-1-c;j>=0;j--){if(e-c===n){h=-1;break}h=d.E.charCodeAt(e-1-c)-a.A_.charCodeAt(j);if(h!==0){break}c++}if(h<0){g=i;k=c}else{b=i;f=c}if(g-b<=1){if(b>0){break}if(g===b){break}if(l){break}l=true}}while(true){a=m[b];if(f>=a.G){d._=e-a.G|0;if(a.I==null){return a.J}o=a.I(d);d._=e-a.G|0;if(o){return a.J}}b=a.D_;if(b<0){return 0}}return-1};function n(a,b,d,e){var c;c=e.length-(d-b);a.E=a.E.slice(0,b)+e+a.E.slice(d);a.A+=c|0;if(a._>=d){a._+=c|0}else if(a._>b){a._=b}return c|0};function e(a,f){var b;var c;var d;var e;b=false;if((c=a.B)<0||c>(d=a.C)||d>(e=a.A)||e>a.E.length?false:true){n(a,a.B,a.C,f);b=true}return b};m.prototype.H=function(){return false};m.prototype.B_=function(b){var a;var c;var d;var e;a=this.F['.'+b];if(a==null){c=this.E=b;d=this._=0;e=this.A=c.length;this.D=0;this.B=d;this.C=e;this.H();a=this.E;this.F['.'+b]=a}return a};m.prototype.stemWord=m.prototype.B_;m.prototype.C_=function(e){var d;var b;var c;var a;var f;var g;var h;d=[];for(b=0;b<e.length;b++){c=e[b];a=this.F['.'+c];if(a==null){f=this.E=c;g=this._=0;h=this.A=f.length;this.D=0;this.B=g;this.C=h;this.H();a=this.E;this.F['.'+c]=a}d.push(a)}return d};m.prototype.stemWords=m.prototype.C_;function a(){m.call(this);this.B_continue_stemming_noun_suffixes=false;this.I_strlen=0};r([a],m);a.prototype.K=function(a){this.B_continue_stemming_noun_suffixes=a.B_continue_stemming_noun_suffixes;this.I_strlen=a.I_strlen;B(this,a)};a.prototype.copy_from=a.prototype.K;a.prototype.O=function(){var E;var q;var b;var e;var h;var i;var j;var k;var l;var m;var n;var o;var p;var c;var r;var s;var t;var u;var d;var v;var w;var x;var y;var z;var A;var B;var C;var D;var G;var H;var I;var J;var K;var L;var M;var N;var F;E=this.A-this._;b:while(true){q=this.A-this._;o=true;a:while(o===true){o=false;if(!f(this,a.g_vowel,97,305)){break a}this._=this.A-q;break b}G=this._=this.A-q;if(G<=this.D){return false}this._--}p=true;a:while(p===true){p=false;b=this.A-this._;c=true;b:while(c===true){c=false;if(!g(this,1,'a')){break b}c:while(true){e=this.A-this._;r=true;d:while(r===true){r=false;if(!f(this,a.g_vowel1,97,305)){break d}this._=this.A-e;break c}H=this._=this.A-e;if(H<=this.D){break b}this._--}break a}this._=this.A-b;s=true;b:while(s===true){s=false;if(!g(this,1,'e')){break b}c:while(true){h=this.A-this._;t=true;d:while(t===true){t=false;if(!f(this,a.g_vowel2,101,252)){break d}this._=this.A-h;break c}I=this._=this.A-h;if(I<=this.D){break b}this._--}break a}this._=this.A-b;u=true;b:while(u===true){u=false;if(!g(this,1,'ı')){break b}c:while(true){i=this.A-this._;d=true;d:while(d===true){d=false;if(!f(this,a.g_vowel3,97,305)){break d}this._=this.A-i;break c}J=this._=this.A-i;if(J<=this.D){break b}this._--}break a}this._=this.A-b;v=true;b:while(v===true){v=false;if(!g(this,1,'i')){break b}c:while(true){j=this.A-this._;w=true;d:while(w===true){w=false;if(!f(this,a.g_vowel4,101,105)){break d}this._=this.A-j;break c}K=this._=this.A-j;if(K<=this.D){break b}this._--}break a}this._=this.A-b;x=true;b:while(x===true){x=false;if(!g(this,1,'o')){break b}c:while(true){k=this.A-this._;y=true;d:while(y===true){y=false;if(!f(this,a.g_vowel5,111,117)){break d}this._=this.A-k;break c}L=this._=this.A-k;if(L<=this.D){break b}this._--}break a}this._=this.A-b;z=true;b:while(z===true){z=false;if(!g(this,1,'ö')){break b}c:while(true){l=this.A-this._;A=true;d:while(A===true){A=false;if(!f(this,a.g_vowel6,246,252)){break d}this._=this.A-l;break c}M=this._=this.A-l;if(M<=this.D){break b}this._--}break a}this._=this.A-b;B=true;b:while(B===true){B=false;if(!g(this,1,'u')){break b}c:while(true){m=this.A-this._;C=true;d:while(C===true){C=false;if(!f(this,a.g_vowel5,111,117)){break d}this._=this.A-m;break c}N=this._=this.A-m;if(N<=this.D){break b}this._--}break a}this._=this.A-b;if(!g(this,1,'ü')){return false}b:while(true){n=this.A-this._;D=true;c:while(D===true){D=false;if(!f(this,a.g_vowel6,246,252)){break c}this._=this.A-n;break b}F=this._=this.A-n;if(F<=this.D){return false}this._--}}this._=this.A-E;return true};a.prototype.r_check_vowel_harmony=a.prototype.O;function c(b){var F;var r;var c;var e;var h;var i;var j;var k;var l;var m;var n;var o;var p;var q;var d;var s;var t;var u;var v;var w;var x;var y;var z;var A;var B;var C;var D;var E;var H;var I;var J;var K;var L;var M;var N;var O;var G;F=b.A-b._;b:while(true){r=b.A-b._;o=true;a:while(o===true){o=false;if(!f(b,a.g_vowel,97,305)){break a}b._=b.A-r;break b}H=b._=b.A-r;if(H<=b.D){return false}b._--}p=true;a:while(p===true){p=false;c=b.A-b._;q=true;b:while(q===true){q=false;if(!g(b,1,'a')){break b}c:while(true){e=b.A-b._;d=true;d:while(d===true){d=false;if(!f(b,a.g_vowel1,97,305)){break d}b._=b.A-e;break c}I=b._=b.A-e;if(I<=b.D){break b}b._--}break a}b._=b.A-c;s=true;b:while(s===true){s=false;if(!g(b,1,'e')){break b}c:while(true){h=b.A-b._;t=true;d:while(t===true){t=false;if(!f(b,a.g_vowel2,101,252)){break d}b._=b.A-h;break c}J=b._=b.A-h;if(J<=b.D){break b}b._--}break a}b._=b.A-c;u=true;b:while(u===true){u=false;if(!g(b,1,'ı')){break b}c:while(true){i=b.A-b._;v=true;d:while(v===true){v=false;if(!f(b,a.g_vowel3,97,305)){break d}b._=b.A-i;break c}K=b._=b.A-i;if(K<=b.D){break b}b._--}break a}b._=b.A-c;w=true;b:while(w===true){w=false;if(!g(b,1,'i')){break b}c:while(true){j=b.A-b._;x=true;d:while(x===true){x=false;if(!f(b,a.g_vowel4,101,105)){break d}b._=b.A-j;break c}L=b._=b.A-j;if(L<=b.D){break b}b._--}break a}b._=b.A-c;y=true;b:while(y===true){y=false;if(!g(b,1,'o')){break b}c:while(true){k=b.A-b._;z=true;d:while(z===true){z=false;if(!f(b,a.g_vowel5,111,117)){break d}b._=b.A-k;break c}M=b._=b.A-k;if(M<=b.D){break b}b._--}break a}b._=b.A-c;A=true;b:while(A===true){A=false;if(!g(b,1,'ö')){break b}c:while(true){l=b.A-b._;B=true;d:while(B===true){B=false;if(!f(b,a.g_vowel6,246,252)){break d}b._=b.A-l;break c}N=b._=b.A-l;if(N<=b.D){break b}b._--}break a}b._=b.A-c;C=true;b:while(C===true){C=false;if(!g(b,1,'u')){break b}c:while(true){m=b.A-b._;D=true;d:while(D===true){D=false;if(!f(b,a.g_vowel5,111,117)){break d}b._=b.A-m;break c}O=b._=b.A-m;if(O<=b.D){break b}b._--}break a}b._=b.A-c;if(!g(b,1,'ü')){return false}b:while(true){n=b.A-b._;E=true;c:while(E===true){E=false;if(!f(b,a.g_vowel6,246,252)){break c}b._=b.A-n;break b}G=b._=b.A-n;if(G<=b.D){return false}b._--}}b._=b.A-F;return true};a.prototype.j=function(){var k;var h;var l;var i;var m;var j;var b;var e;var d;var n;var o;var p;var q;var c;b=true;b:while(b===true){b=false;k=this.A-this._;e=true;a:while(e===true){e=false;h=this.A-this._;if(!g(this,1,'n')){break a}n=this._=this.A-h;if(n<=this.D){break a}this._--;l=this.A-this._;if(!f(this,a.g_vowel,97,305)){break a}this._=this.A-l;break b}p=this._=(o=this.A)-k;i=o-p;d=true;a:while(d===true){d=false;m=this.A-this._;if(!g(this,1,'n')){break a}this._=this.A-m;return false}c=this._=(q=this.A)-i;j=q-c;if(c<=this.D){return false}this._--;if(!f(this,a.g_vowel,97,305)){return false}this._=this.A-j}return true};a.prototype.r_mark_suffix_with_optional_n_consonant=a.prototype.j;function o(b){var i;var m;var l;var j;var n;var k;var c;var e;var d;var o;var p;var q;var r;var h;c=true;b:while(c===true){c=false;i=b.A-b._;e=true;a:while(e===true){e=false;m=b.A-b._;if(!g(b,1,'n')){break a}o=b._=b.A-m;if(o<=b.D){break a}b._--;l=b.A-b._;if(!f(b,a.g_vowel,97,305)){break a}b._=b.A-l;break b}q=b._=(p=b.A)-i;j=p-q;d=true;a:while(d===true){d=false;n=b.A-b._;if(!g(b,1,'n')){break a}b._=b.A-n;return false}h=b._=(r=b.A)-j;k=r-h;if(h<=b.D){return false}b._--;if(!f(b,a.g_vowel,97,305)){return false}b._=b.A-k}return true};a.prototype.k=function(){var k;var h;var l;var i;var m;var j;var b;var e;var d;var n;var o;var p;var q;var c;b=true;b:while(b===true){b=false;k=this.A-this._;e=true;a:while(e===true){e=false;h=this.A-this._;if(!g(this,1,'s')){break a}n=this._=this.A-h;if(n<=this.D){break a}this._--;l=this.A-this._;if(!f(this,a.g_vowel,97,305)){break a}this._=this.A-l;break b}p=this._=(o=this.A)-k;i=o-p;d=true;a:while(d===true){d=false;m=this.A-this._;if(!g(this,1,'s')){break a}this._=this.A-m;return false}c=this._=(q=this.A)-i;j=q-c;if(c<=this.D){return false}this._--;if(!f(this,a.g_vowel,97,305)){return false}this._=this.A-j}return true};a.prototype.r_mark_suffix_with_optional_s_consonant=a.prototype.k;function l(b){var i;var m;var l;var j;var n;var k;var c;var e;var d;var o;var p;var q;var r;var h;c=true;b:while(c===true){c=false;i=b.A-b._;e=true;a:while(e===true){e=false;m=b.A-b._;if(!g(b,1,'s')){break a}o=b._=b.A-m;if(o<=b.D){break a}b._--;l=b.A-b._;if(!f(b,a.g_vowel,97,305)){break a}b._=b.A-l;break b}q=b._=(p=b.A)-i;j=p-q;d=true;a:while(d===true){d=false;n=b.A-b._;if(!g(b,1,'s')){break a}b._=b.A-n;return false}h=b._=(r=b.A)-j;k=r-h;if(h<=b.D){return false}b._--;if(!f(b,a.g_vowel,97,305)){return false}b._=b.A-k}return true};a.prototype.l=function(){var k;var h;var l;var i;var m;var j;var b;var e;var d;var n;var o;var p;var q;var c;b=true;b:while(b===true){b=false;k=this.A-this._;e=true;a:while(e===true){e=false;h=this.A-this._;if(!g(this,1,'y')){break a}n=this._=this.A-h;if(n<=this.D){break a}this._--;l=this.A-this._;if(!f(this,a.g_vowel,97,305)){break a}this._=this.A-l;break b}p=this._=(o=this.A)-k;i=o-p;d=true;a:while(d===true){d=false;m=this.A-this._;if(!g(this,1,'y')){break a}this._=this.A-m;return false}c=this._=(q=this.A)-i;j=q-c;if(c<=this.D){return false}this._--;if(!f(this,a.g_vowel,97,305)){return false}this._=this.A-j}return true};a.prototype.r_mark_suffix_with_optional_y_consonant=a.prototype.l;function h(b){var i;var m;var l;var j;var n;var k;var c;var e;var d;var o;var p;var q;var r;var h;c=true;b:while(c===true){c=false;i=b.A-b._;e=true;a:while(e===true){e=false;m=b.A-b._;if(!g(b,1,'y')){break a}o=b._=b.A-m;if(o<=b.D){break a}b._--;l=b.A-b._;if(!f(b,a.g_vowel,97,305)){break a}b._=b.A-l;break b}q=b._=(p=b.A)-i;j=p-q;d=true;a:while(d===true){d=false;n=b.A-b._;if(!g(b,1,'y')){break a}b._=b.A-n;return false}h=b._=(r=b.A)-j;k=r-h;if(h<=b.D){return false}b._--;if(!f(b,a.g_vowel,97,305)){return false}b._=b.A-k}return true};a.prototype.i=function(){var j;var g;var k;var h;var l;var i;var b;var e;var d;var m;var n;var o;var p;var c;b=true;b:while(b===true){b=false;j=this.A-this._;e=true;a:while(e===true){e=false;g=this.A-this._;if(!f(this,a.g_U,105,305)){break a}m=this._=this.A-g;if(m<=this.D){break a}this._--;k=this.A-this._;if(!t(this,a.g_vowel,97,305)){break a}this._=this.A-k;break b}o=this._=(n=this.A)-j;h=n-o;d=true;a:while(d===true){d=false;l=this.A-this._;if(!f(this,a.g_U,105,305)){break a}this._=this.A-l;return false}c=this._=(p=this.A)-h;i=p-c;if(c<=this.D){return false}this._--;if(!t(this,a.g_vowel,97,305)){return false}this._=this.A-i}return true};a.prototype.r_mark_suffix_with_optional_U_vowel=a.prototype.i;function k(b){var h;var l;var k;var i;var m;var j;var c;var e;var d;var n;var o;var p;var q;var g;c=true;b:while(c===true){c=false;h=b.A-b._;e=true;a:while(e===true){e=false;l=b.A-b._;if(!f(b,a.g_U,105,305)){break a}n=b._=b.A-l;if(n<=b.D){break a}b._--;k=b.A-b._;if(!t(b,a.g_vowel,97,305)){break a}b._=b.A-k;break b}p=b._=(o=b.A)-h;i=o-p;d=true;a:while(d===true){d=false;m=b.A-b._;if(!f(b,a.g_U,105,305)){break a}b._=b.A-m;return false}g=b._=(q=b.A)-i;j=q-g;if(g<=b.D){return false}b._--;if(!t(b,a.g_vowel,97,305)){return false}b._=b.A-j}return true};a.prototype.e=function(){return b(this,a.a_0,10)===0?false:!k(this)?false:true};a.prototype.r_mark_possessives=a.prototype.e;a.prototype.f=function(){return!c(this)?false:!f(this,a.g_U,105,305)?false:!l(this)?false:true};a.prototype.r_mark_sU=a.prototype.f;a.prototype.W=function(){return b(this,a.a_1,2)===0?false:true};a.prototype.r_mark_lArI=a.prototype.W;a.prototype.o=function(){return!c(this)?false:!f(this,a.g_U,105,305)?false:!h(this)?false:true};a.prototype.r_mark_yU=a.prototype.o;a.prototype.Y=function(){return!c(this)?false:b(this,a.a_2,4)===0?false:true};a.prototype.r_mark_nU=a.prototype.Y;a.prototype.Z=function(){return!c(this)?false:b(this,a.a_3,4)===0?false:!o(this)?false:true};a.prototype.r_mark_nUn=a.prototype.Z;a.prototype.m=function(){return!c(this)?false:b(this,a.a_4,2)===0?false:!h(this)?false:true};a.prototype.r_mark_yA=a.prototype.m;a.prototype.X=function(){return!c(this)?false:b(this,a.a_5,2)===0?false:true};a.prototype.r_mark_nA=a.prototype.X;a.prototype.Q=function(){return!c(this)?false:b(this,a.a_6,4)===0?false:true};a.prototype.r_mark_DA=a.prototype.Q;a.prototype.c=function(){return!c(this)?false:b(this,a.a_7,2)===0?false:true};a.prototype.r_mark_ndA=a.prototype.c;a.prototype.R=function(){return!c(this)?false:b(this,a.a_8,4)===0?false:true};a.prototype.r_mark_DAn=a.prototype.R;a.prototype.d=function(){return!c(this)?false:b(this,a.a_9,2)===0?false:true};a.prototype.r_mark_ndAn=a.prototype.d;a.prototype.s=function(){return!c(this)?false:b(this,a.a_10,2)===0?false:!h(this)?false:true};a.prototype.r_mark_ylA=a.prototype.s;a.prototype.U=function(){return!g(this,2,'ki')?false:true};a.prototype.r_mark_ki=a.prototype.U;a.prototype.b=function(){return!c(this)?false:b(this,a.a_11,2)===0?false:!o(this)?false:true};a.prototype.r_mark_ncA=a.prototype.b;a.prototype.p=function(){return!c(this)?false:b(this,a.a_12,4)===0?false:!h(this)?false:true};a.prototype.r_mark_yUm=a.prototype.p;a.prototype.g=function(){return!c(this)?false:b(this,a.a_13,4)===0?false:true};a.prototype.r_mark_sUn=a.prototype.g;a.prototype.q=function(){return!c(this)?false:b(this,a.a_14,4)===0?false:!h(this)?false:true};a.prototype.r_mark_yUz=a.prototype.q;a.prototype.h=function(){return b(this,a.a_15,4)===0?false:true};a.prototype.r_mark_sUnUz=a.prototype.h;a.prototype.V=function(){return!c(this)?false:b(this,a.a_16,2)===0?false:true};a.prototype.r_mark_lAr=a.prototype.V;a.prototype.a=function(){return!c(this)?false:b(this,a.a_17,4)===0?false:true};a.prototype.r_mark_nUz=a.prototype.a;a.prototype.S=function(){return!c(this)?false:b(this,a.a_18,8)===0?false:true};a.prototype.r_mark_DUr=a.prototype.S;a.prototype.T=function(){return b(this,a.a_19,2)===0?false:true};a.prototype.r_mark_cAsInA=a.prototype.T;a.prototype.n=function(){return!c(this)?false:b(this,a.a_20,32)===0?false:!h(this)?false:true};a.prototype.r_mark_yDU=a.prototype.n;a.prototype.u=function(){return b(this,a.a_21,8)===0?false:!h(this)?false:true};a.prototype.r_mark_ysA=a.prototype.u;a.prototype.t=function(){return!c(this)?false:b(this,a.a_22,4)===0?false:!h(this)?false:true};a.prototype.r_mark_ymUs_=a.prototype.t;a.prototype.r=function(){return!g(this,3,'ken')?false:!h(this)?false:true};a.prototype.r_mark_yken=a.prototype.r;a.prototype.y=function(){var i;var j;var d;var Y;var k;var X;var l;var W;var V;var f;var r;var s;var t;var u;var v;var w;var x;var y;var z;var A;var B;var C;var m;var E;var F;var G;var H;var I;var J;var K;var L;var M;var N;var O;var P;var Q;var R;var S;var T;var U;var p;var o;var D;var n;var q;this.C=this._;this.B_continue_stemming_noun_suffixes=true;r=true;a:while(r===true){r=false;i=this.A-this._;s=true;d:while(s===true){s=false;t=true;b:while(t===true){t=false;j=this.A-this._;u=true;c:while(u===true){u=false;if(!(!c(this)?false:b(this,a.a_22,4)===0?false:!h(this)?false:true)){break c}break b}this._=this.A-j;v=true;c:while(v===true){v=false;if(!(!c(this)?false:b(this,a.a_20,32)===0?false:!h(this)?false:true)){break c}break b}this._=this.A-j;w=true;c:while(w===true){w=false;if(!(b(this,a.a_21,8)===0?false:!h(this)?false:true)){break c}break b}this._=this.A-j;if(!(!g(this,3,'ken')?false:!h(this)?false:true)){break d}}break a}this._=this.A-i;x=true;c:while(x===true){x=false;if(!(b(this,a.a_19,2)===0?false:true)){break c}y=true;b:while(y===true){y=false;d=this.A-this._;z=true;d:while(z===true){z=false;if(!(b(this,a.a_15,4)===0?false:true)){break d}break b}this._=this.A-d;A=true;d:while(A===true){A=false;if(!(!c(this)?false:b(this,a.a_16,2)===0?false:true)){break d}break b}this._=this.A-d;B=true;d:while(B===true){B=false;if(!(!c(this)?false:b(this,a.a_12,4)===0?false:!h(this)?false:true)){break d}break b}this._=this.A-d;C=true;d:while(C===true){C=false;if(!(!c(this)?false:b(this,a.a_13,4)===0?false:true)){break d}break b}this._=this.A-d;m=true;d:while(m===true){m=false;if(!(!c(this)?false:b(this,a.a_14,4)===0?false:!h(this)?false:true)){break d}break b}this._=this.A-d}if(!(!c(this)?false:b(this,a.a_22,4)===0?false:!h(this)?false:true)){break c}break a}this._=this.A-i;E=true;c:while(E===true){E=false;if(!(!c(this)?false:b(this,a.a_16,2)===0?false:true)){break c}this.B=this._;if(!e(this,'')){return false}Y=this.A-this._;F=true;d:while(F===true){F=false;this.C=this._;G=true;b:while(G===true){G=false;k=this.A-this._;H=true;e:while(H===true){H=false;if(!(!c(this)?false:b(this,a.a_18,8)===0?false:true)){break e}break b}this._=this.A-k;I=true;e:while(I===true){I=false;if(!(!c(this)?false:b(this,a.a_20,32)===0?false:!h(this)?false:true)){break e}break b}this._=this.A-k;J=true;e:while(J===true){J=false;if(!(b(this,a.a_21,8)===0?false:!h(this)?false:true)){break e}break b}this._=this.A-k;if(!(!c(this)?false:b(this,a.a_22,4)===0?false:!h(this)?false:true)){this._=this.A-Y;break d}}}this.B_continue_stemming_noun_suffixes=false;break a}this._=this.A-i;K=true;b:while(K===true){K=false;if(!(!c(this)?false:b(this,a.a_17,4)===0?false:true)){break b}L=true;c:while(L===true){L=false;X=this.A-this._;M=true;d:while(M===true){M=false;if(!(!c(this)?false:b(this,a.a_20,32)===0?false:!h(this)?false:true)){break d}break c}this._=this.A-X;if(!(b(this,a.a_21,8)===0?false:!h(this)?false:true)){break b}}break a}this._=this.A-i;N=true;c:while(N===true){N=false;O=true;b:while(O===true){O=false;l=this.A-this._;P=true;d:while(P===true){P=false;if(!(b(this,a.a_15,4)===0?false:true)){break d}break b}this._=this.A-l;Q=true;d:while(Q===true){Q=false;if(!(!c(this)?false:b(this,a.a_14,4)===0?false:!h(this)?false:true)){break d}break b}this._=this.A-l;R=true;d:while(R===true){R=false;if(!(!c(this)?false:b(this,a.a_13,4)===0?false:true)){break d}break b}this._=this.A-l;if(!(!c(this)?false:b(this,a.a_12,4)===0?false:!h(this)?false:true)){break c}}this.B=this._;if(!e(this,'')){return false}W=this.A-this._;S=true;b:while(S===true){S=false;this.C=this._;if(!(!c(this)?false:b(this,a.a_22,4)===0?false:!h(this)?false:true)){this._=this.A-W;break b}}break a}this._=this.A-i;if(!(!c(this)?false:b(this,a.a_18,8)===0?false:true)){return false}this.B=this._;if(!e(this,'')){return false}V=this.A-this._;T=true;d:while(T===true){T=false;this.C=this._;U=true;b:while(U===true){U=false;f=this.A-this._;p=true;c:while(p===true){p=false;if(!(b(this,a.a_15,4)===0?false:true)){break c}break b}this._=this.A-f;o=true;c:while(o===true){o=false;if(!(!c(this)?false:b(this,a.a_16,2)===0?false:true)){break c}break b}this._=this.A-f;D=true;c:while(D===true){D=false;if(!(!c(this)?false:b(this,a.a_12,4)===0?false:!h(this)?false:true)){break c}break b}this._=this.A-f;n=true;c:while(n===true){n=false;if(!(!c(this)?false:b(this,a.a_13,4)===0?false:true)){break c}break b}this._=this.A-f;q=true;c:while(q===true){q=false;if(!(!c(this)?false:b(this,a.a_14,4)===0?false:!h(this)?false:true)){break c}break b}this._=this.A-f}if(!(!c(this)?false:b(this,a.a_22,4)===0?false:!h(this)?false:true)){this._=this.A-V;break d}}}this.B=this._;return!e(this,'')?false:true};a.prototype.r_stem_nominal_verb_suffixes=a.prototype.y;function J(d){var f;var k;var i;var Z;var l;var Y;var m;var X;var W;var j;var s;var t;var u;var v;var w;var x;var y;var z;var A;var B;var C;var n;var E;var F;var G;var H;var I;var J;var K;var L;var M;var N;var O;var P;var Q;var R;var S;var T;var U;var V;var q;var p;var D;var o;var r;d.C=d._;d.B_continue_stemming_noun_suffixes=true;s=true;a:while(s===true){s=false;f=d.A-d._;t=true;d:while(t===true){t=false;u=true;b:while(u===true){u=false;k=d.A-d._;v=true;c:while(v===true){v=false;if(!(!c(d)?false:b(d,a.a_22,4)===0?false:!h(d)?false:true)){break c}break b}d._=d.A-k;w=true;c:while(w===true){w=false;if(!(!c(d)?false:b(d,a.a_20,32)===0?false:!h(d)?false:true)){break c}break b}d._=d.A-k;x=true;c:while(x===true){x=false;if(!(b(d,a.a_21,8)===0?false:!h(d)?false:true)){break c}break b}d._=d.A-k;if(!(!g(d,3,'ken')?false:!h(d)?false:true)){break d}}break a}d._=d.A-f;y=true;c:while(y===true){y=false;if(!(b(d,a.a_19,2)===0?false:true)){break c}z=true;b:while(z===true){z=false;i=d.A-d._;A=true;d:while(A===true){A=false;if(!(b(d,a.a_15,4)===0?false:true)){break d}break b}d._=d.A-i;B=true;d:while(B===true){B=false;if(!(!c(d)?false:b(d,a.a_16,2)===0?false:true)){break d}break b}d._=d.A-i;C=true;d:while(C===true){C=false;if(!(!c(d)?false:b(d,a.a_12,4)===0?false:!h(d)?false:true)){break d}break b}d._=d.A-i;n=true;d:while(n===true){n=false;if(!(!c(d)?false:b(d,a.a_13,4)===0?false:true)){break d}break b}d._=d.A-i;E=true;d:while(E===true){E=false;if(!(!c(d)?false:b(d,a.a_14,4)===0?false:!h(d)?false:true)){break d}break b}d._=d.A-i}if(!(!c(d)?false:b(d,a.a_22,4)===0?false:!h(d)?false:true)){break c}break a}d._=d.A-f;F=true;c:while(F===true){F=false;if(!(!c(d)?false:b(d,a.a_16,2)===0?false:true)){break c}d.B=d._;if(!e(d,'')){return false}Z=d.A-d._;G=true;d:while(G===true){G=false;d.C=d._;H=true;b:while(H===true){H=false;l=d.A-d._;I=true;e:while(I===true){I=false;if(!(!c(d)?false:b(d,a.a_18,8)===0?false:true)){break e}break b}d._=d.A-l;J=true;e:while(J===true){J=false;if(!(!c(d)?false:b(d,a.a_20,32)===0?false:!h(d)?false:true)){break e}break b}d._=d.A-l;K=true;e:while(K===true){K=false;if(!(b(d,a.a_21,8)===0?false:!h(d)?false:true)){break e}break b}d._=d.A-l;if(!(!c(d)?false:b(d,a.a_22,4)===0?false:!h(d)?false:true)){d._=d.A-Z;break d}}}d.B_continue_stemming_noun_suffixes=false;break a}d._=d.A-f;L=true;b:while(L===true){L=false;if(!(!c(d)?false:b(d,a.a_17,4)===0?false:true)){break b}M=true;c:while(M===true){M=false;Y=d.A-d._;N=true;d:while(N===true){N=false;if(!(!c(d)?false:b(d,a.a_20,32)===0?false:!h(d)?false:true)){break d}break c}d._=d.A-Y;if(!(b(d,a.a_21,8)===0?false:!h(d)?false:true)){break b}}break a}d._=d.A-f;O=true;c:while(O===true){O=false;P=true;b:while(P===true){P=false;m=d.A-d._;Q=true;d:while(Q===true){Q=false;if(!(b(d,a.a_15,4)===0?false:true)){break d}break b}d._=d.A-m;R=true;d:while(R===true){R=false;if(!(!c(d)?false:b(d,a.a_14,4)===0?false:!h(d)?false:true)){break d}break b}d._=d.A-m;S=true;d:while(S===true){S=false;if(!(!c(d)?false:b(d,a.a_13,4)===0?false:true)){break d}break b}d._=d.A-m;if(!(!c(d)?false:b(d,a.a_12,4)===0?false:!h(d)?false:true)){break c}}d.B=d._;if(!e(d,'')){return false}X=d.A-d._;T=true;b:while(T===true){T=false;d.C=d._;if(!(!c(d)?false:b(d,a.a_22,4)===0?false:!h(d)?false:true)){d._=d.A-X;break b}}break a}d._=d.A-f;if(!(!c(d)?false:b(d,a.a_18,8)===0?false:true)){return false}d.B=d._;if(!e(d,'')){return false}W=d.A-d._;U=true;d:while(U===true){U=false;d.C=d._;V=true;b:while(V===true){V=false;j=d.A-d._;q=true;c:while(q===true){q=false;if(!(b(d,a.a_15,4)===0?false:true)){break c}break b}d._=d.A-j;p=true;c:while(p===true){p=false;if(!(!c(d)?false:b(d,a.a_16,2)===0?false:true)){break c}break b}d._=d.A-j;D=true;c:while(D===true){D=false;if(!(!c(d)?false:b(d,a.a_12,4)===0?false:!h(d)?false:true)){break c}break b}d._=d.A-j;o=true;c:while(o===true){o=false;if(!(!c(d)?false:b(d,a.a_13,4)===0?false:true)){break c}break b}d._=d.A-j;r=true;c:while(r===true){r=false;if(!(!c(d)?false:b(d,a.a_14,4)===0?false:!h(d)?false:true)){break c}break b}d._=d.A-j}if(!(!c(d)?false:b(d,a.a_22,4)===0?false:!h(d)?false:true)){d._=d.A-W;break d}}}d.B=d._;return!e(d,'')?false:true};a.prototype.__=function(){var z;var N;var M;var L;var p;var K;var r;var J;var t;var u;var v;var w;var x;var y;var d;var A;var B;var C;var D;var E;var F;var G;var H;var I;var s;var q;var n;var m;var j;var h;this.C=this._;if(!(!g(this,2,'ki')?false:true)){return false}w=true;b:while(w===true){w=false;z=this.A-this._;x=true;c:while(x===true){x=false;if(!(!c(this)?false:b(this,a.a_6,4)===0?false:true)){break c}this.B=this._;if(!e(this,'')){return false}N=this.A-this._;y=true;f:while(y===true){y=false;this.C=this._;d=true;e:while(d===true){d=false;M=this.A-this._;A=true;d:while(A===true){A=false;if(!(!c(this)?false:b(this,a.a_16,2)===0?false:true)){break d}this.B=this._;if(!e(this,'')){return false}L=this.A-this._;B=true;a:while(B===true){B=false;if(!i(this)){this._=this.A-L;break a}}break e}this._=this.A-M;if(!(b(this,a.a_0,10)===0?false:!k(this)?false:true)){this._=this.A-N;break f}this.B=this._;if(!e(this,'')){return false}p=this.A-this._;C=true;a:while(C===true){C=false;this.C=this._;if(!(!c(this)?false:b(this,a.a_16,2)===0?false:true)){this._=this.A-p;break a}this.B=this._;if(!e(this,'')){return false}if(!i(this)){this._=this.A-p;break a}}}}break b}this._=this.A-z;D=true;d:while(D===true){D=false;if(!(!c(this)?false:b(this,a.a_3,4)===0?false:!o(this)?false:true)){break d}this.B=this._;if(!e(this,'')){return false}K=this.A-this._;E=true;e:while(E===true){E=false;this.C=this._;F=true;a:while(F===true){F=false;r=this.A-this._;G=true;c:while(G===true){G=false;if(!(b(this,a.a_1,2)===0?false:true)){break c}this.B=this._;if(!e(this,'')){return false}break a}this._=this.A-r;H=true;f:while(H===true){H=false;this.C=this._;I=true;g:while(I===true){I=false;J=this.A-this._;s=true;c:while(s===true){s=false;if(!(b(this,a.a_0,10)===0?false:!k(this)?false:true)){break c}break g}this._=this.A-J;if(!(!c(this)?false:!f(this,a.g_U,105,305)?false:!l(this)?false:true)){break f}}this.B=this._;if(!e(this,'')){return false}t=this.A-this._;q=true;c:while(q===true){q=false;this.C=this._;if(!(!c(this)?false:b(this,a.a_16,2)===0?false:true)){this._=this.A-t;break c}this.B=this._;if(!e(this,'')){return false}if(!i(this)){this._=this.A-t;break c}}break a}this._=this.A-r;if(!i(this)){this._=this.A-K;break e}}}break b}this._=this.A-z;if(!(!c(this)?false:b(this,a.a_7,2)===0?false:true)){return false}n=true;a:while(n===true){n=false;u=this.A-this._;m=true;c:while(m===true){m=false;if(!(b(this,a.a_1,2)===0?false:true)){break c}this.B=this._;if(!e(this,'')){return false}break a}this._=this.A-u;j=true;d:while(j===true){j=false;if(!(!c(this)?false:!f(this,a.g_U,105,305)?false:!l(this)?false:true)){break d}this.B=this._;if(!e(this,'')){return false}v=this.A-this._;h=true;c:while(h===true){h=false;this.C=this._;if(!(!c(this)?false:b(this,a.a_16,2)===0?false:true)){this._=this.A-v;break c}this.B=this._;if(!e(this,'')){return false}if(!i(this)){this._=this.A-v;break c}}break a}this._=this.A-u;if(!i(this)){return false}}}return true};a.prototype.r_stem_suffix_chain_before_ki=a.prototype.__;function i(d){var j;var O;var N;var M;var q;var L;var s;var K;var u;var v;var w;var x;var y;var z;var h;var B;var C;var D;var E;var F;var G;var H;var I;var J;var t;var r;var p;var n;var m;var A;d.C=d._;if(!(!g(d,2,'ki')?false:true)){return false}x=true;b:while(x===true){x=false;j=d.A-d._;y=true;c:while(y===true){y=false;if(!(!c(d)?false:b(d,a.a_6,4)===0?false:true)){break c}d.B=d._;if(!e(d,'')){return false}O=d.A-d._;z=true;f:while(z===true){z=false;d.C=d._;h=true;e:while(h===true){h=false;N=d.A-d._;B=true;d:while(B===true){B=false;if(!(!c(d)?false:b(d,a.a_16,2)===0?false:true)){break d}d.B=d._;if(!e(d,'')){return false}M=d.A-d._;C=true;a:while(C===true){C=false;if(!i(d)){d._=d.A-M;break a}}break e}d._=d.A-N;if(!(b(d,a.a_0,10)===0?false:!k(d)?false:true)){d._=d.A-O;break f}d.B=d._;if(!e(d,'')){return false}q=d.A-d._;D=true;a:while(D===true){D=false;d.C=d._;if(!(!c(d)?false:b(d,a.a_16,2)===0?false:true)){d._=d.A-q;break a}d.B=d._;if(!e(d,'')){return false}if(!i(d)){d._=d.A-q;break a}}}}break b}d._=d.A-j;E=true;d:while(E===true){E=false;if(!(!c(d)?false:b(d,a.a_3,4)===0?false:!o(d)?false:true)){break d}d.B=d._;if(!e(d,'')){return false}L=d.A-d._;F=true;e:while(F===true){F=false;d.C=d._;G=true;a:while(G===true){G=false;s=d.A-d._;H=true;c:while(H===true){H=false;if(!(b(d,a.a_1,2)===0?false:true)){break c}d.B=d._;if(!e(d,'')){return false}break a}d._=d.A-s;I=true;f:while(I===true){I=false;d.C=d._;J=true;g:while(J===true){J=false;K=d.A-d._;t=true;c:while(t===true){t=false;if(!(b(d,a.a_0,10)===0?false:!k(d)?false:true)){break c}break g}d._=d.A-K;if(!(!c(d)?false:!f(d,a.g_U,105,305)?false:!l(d)?false:true)){break f}}d.B=d._;if(!e(d,'')){return false}u=d.A-d._;r=true;c:while(r===true){r=false;d.C=d._;if(!(!c(d)?false:b(d,a.a_16,2)===0?false:true)){d._=d.A-u;break c}d.B=d._;if(!e(d,'')){return false}if(!i(d)){d._=d.A-u;break c}}break a}d._=d.A-s;if(!i(d)){d._=d.A-L;break e}}}break b}d._=d.A-j;if(!(!c(d)?false:b(d,a.a_7,2)===0?false:true)){return false}p=true;a:while(p===true){p=false;v=d.A-d._;n=true;c:while(n===true){n=false;if(!(b(d,a.a_1,2)===0?false:true)){break c}d.B=d._;if(!e(d,'')){return false}break a}d._=d.A-v;m=true;d:while(m===true){m=false;if(!(!c(d)?false:!f(d,a.g_U,105,305)?false:!l(d)?false:true)){break d}d.B=d._;if(!e(d,'')){return false}w=d.A-d._;A=true;c:while(A===true){A=false;d.C=d._;if(!(!c(d)?false:b(d,a.a_16,2)===0?false:true)){d._=d.A-w;break c}d.B=d._;if(!e(d,'')){return false}if(!i(d)){d._=d.A-w;break c}}break a}d._=d.A-v;if(!i(d)){return false}}}return true};a.prototype.z=function(){var d;var ar;var S;var j;var av;var m;var aq;var n;var p;var ax;var ay;var q;var ap;var r;var s;var as;var at;var au;var t;var aw;var u;var v;var w;var aA;var aB;var ao;var x;var y;var z;var A;var B;var C;var D;var E;var F;var G;var H;var I;var J;var K;var L;var M;var N;var O;var P;var Q;var R;var g;var T;var U;var V;var W;var X;var Y;var Z;var _;var $;var a0;var a1;var a2;var a3;var a4;var a5;var a6;var a7;var a8;var a9;var aa;var ab;var ac;var ad;var ae;var af;var ag;var ah;var ai;var aj;var ak;var al;var am;var an;var aC;var az;y=true;a:while(y===true){y=false;d=this.A-this._;z=true;b:while(z===true){z=false;this.C=this._;if(!(!c(this)?false:b(this,a.a_16,2)===0?false:true)){break b}this.B=this._;if(!e(this,'')){return false}ar=this.A-this._;A=true;c:while(A===true){A=false;if(!i(this)){this._=this.A-ar;break c}}break a}this._=this.A-d;B=true;g:while(B===true){B=false;this.C=this._;if(!(!c(this)?false:b(this,a.a_11,2)===0?false:!o(this)?false:true)){break g}this.B=this._;if(!e(this,'')){return false}S=this.A-this._;C=true;b:while(C===true){C=false;D=true;c:while(D===true){D=false;j=this.A-this._;E=true;d:while(E===true){E=false;this.C=this._;if(!(b(this,a.a_1,2)===0?false:true)){break d}this.B=this._;if(!e(this,'')){return false}break c}this._=this.A-j;F=true;f:while(F===true){F=false;this.C=this._;G=true;d:while(G===true){G=false;av=this.A-this._;H=true;e:while(H===true){H=false;if(!(b(this,a.a_0,10)===0?false:!k(this)?false:true)){break e}break d}this._=this.A-av;if(!(!c(this)?false:!f(this,a.g_U,105,305)?false:!l(this)?false:true)){break f}}this.B=this._;if(!e(this,'')){return false}m=this.A-this._;I=true;d:while(I===true){I=false;this.C=this._;if(!(!c(this)?false:b(this,a.a_16,2)===0?false:true)){this._=this.A-m;break d}this.B=this._;if(!e(this,'')){return false}if(!i(this)){this._=this.A-m;break d}}break c}aC=this._=this.A-j;this.C=aC;if(!(!c(this)?false:b(this,a.a_16,2)===0?false:true)){this._=this.A-S;break b}this.B=this._;if(!e(this,'')){return false}if(!i(this)){this._=this.A-S;break b}}}break a}this._=this.A-d;J=true;b:while(J===true){J=false;this.C=this._;K=true;d:while(K===true){K=false;aq=this.A-this._;L=true;c:while(L===true){L=false;if(!(!c(this)?false:b(this,a.a_7,2)===0?false:true)){break c}break d}this._=this.A-aq;if(!(!c(this)?false:b(this,a.a_5,2)===0?false:true)){break b}}M=true;c:while(M===true){M=false;n=this.A-this._;N=true;d:while(N===true){N=false;if(!(b(this,a.a_1,2)===0?false:true)){break d}this.B=this._;if(!e(this,'')){return false}break c}this._=this.A-n;O=true;e:while(O===true){O=false;if(!(!c(this)?false:!f(this,a.g_U,105,305)?false:!l(this)?false:true)){break e}this.B=this._;if(!e(this,'')){return false}p=this.A-this._;P=true;d:while(P===true){P=false;this.C=this._;if(!(!c(this)?false:b(this,a.a_16,2)===0?false:true)){this._=this.A-p;break d}this.B=this._;if(!e(this,'')){return false}if(!i(this)){this._=this.A-p;break d}}break c}this._=this.A-n;if(!i(this)){break b}}break a}this._=this.A-d;Q=true;c:while(Q===true){Q=false;this.C=this._;R=true;b:while(R===true){R=false;ax=this.A-this._;g=true;d:while(g===true){g=false;if(!(!c(this)?false:b(this,a.a_9,2)===0?false:true)){break d}break b}this._=this.A-ax;if(!(!c(this)?false:b(this,a.a_2,4)===0?false:true)){break c}}T=true;d:while(T===true){T=false;ay=this.A-this._;U=true;e:while(U===true){U=false;if(!(!c(this)?false:!f(this,a.g_U,105,305)?false:!l(this)?false:true)){break e}this.B=this._;if(!e(this,'')){return false}q=this.A-this._;V=true;b:while(V===true){V=false;this.C=this._;if(!(!c(this)?false:b(this,a.a_16,2)===0?false:true)){this._=this.A-q;break b}this.B=this._;if(!e(this,'')){return false}if(!i(this)){this._=this.A-q;break b}}break d}this._=this.A-ay;if(!(b(this,a.a_1,2)===0?false:true)){break c}}break a}this._=this.A-d;W=true;d:while(W===true){W=false;this.C=this._;if(!(!c(this)?false:b(this,a.a_8,4)===0?false:true)){break d}this.B=this._;if(!e(this,'')){return false}ap=this.A-this._;X=true;e:while(X===true){X=false;this.C=this._;Y=true;c:while(Y===true){Y=false;r=this.A-this._;Z=true;f:while(Z===true){Z=false;if(!(b(this,a.a_0,10)===0?false:!k(this)?false:true)){break f}this.B=this._;if(!e(this,'')){return false}s=this.A-this._;_=true;b:while(_===true){_=false;this.C=this._;if(!(!c(this)?false:b(this,a.a_16,2)===0?false:true)){this._=this.A-s;break b}this.B=this._;if(!e(this,'')){return false}if(!i(this)){this._=this.A-s;break b}}break c}this._=this.A-r;$=true;b:while($===true){$=false;if(!(!c(this)?false:b(this,a.a_16,2)===0?false:true)){break b}this.B=this._;if(!e(this,'')){return false}as=this.A-this._;a0=true;f:while(a0===true){a0=false;if(!i(this)){this._=this.A-as;break f}}break c}this._=this.A-r;if(!i(this)){this._=this.A-ap;break e}}}break a}this._=this.A-d;a1=true;d:while(a1===true){a1=false;this.C=this._;a2=true;b:while(a2===true){a2=false;at=this.A-this._;a3=true;c:while(a3===true){a3=false;if(!(!c(this)?false:b(this,a.a_3,4)===0?false:!o(this)?false:true)){break c}break b}this._=this.A-at;if(!(!c(this)?false:b(this,a.a_10,2)===0?false:!h(this)?false:true)){break d}}this.B=this._;if(!e(this,'')){return false}au=this.A-this._;a4=true;e:while(a4===true){a4=false;a5=true;c:while(a5===true){a5=false;t=this.A-this._;a6=true;b:while(a6===true){a6=false;this.C=this._;if(!(!c(this)?false:b(this,a.a_16,2)===0?false:true)){break b}this.B=this._;if(!e(this,'')){return false}if(!i(this)){break b}break c}this._=this.A-t;a7=true;f:while(a7===true){a7=false;this.C=this._;a8=true;b:while(a8===true){a8=false;aw=this.A-this._;a9=true;g:while(a9===true){a9=false;if(!(b(this,a.a_0,10)===0?false:!k(this)?false:true)){break g}break b}this._=this.A-aw;if(!(!c(this)?false:!f(this,a.g_U,105,305)?false:!l(this)?false:true)){break f}}this.B=this._;if(!e(this,'')){return false}u=this.A-this._;aa=true;b:while(aa===true){aa=false;this.C=this._;if(!(!c(this)?false:b(this,a.a_16,2)===0?false:true)){this._=this.A-u;break b}this.B=this._;if(!e(this,'')){return false}if(!i(this)){this._=this.A-u;break b}}break c}this._=this.A-t;if(!i(this)){this._=this.A-au;break e}}}break a}this._=this.A-d;ab=true;b:while(ab===true){ab=false;this.C=this._;if(!(b(this,a.a_1,2)===0?false:true)){break b}this.B=this._;if(!e(this,'')){return false}break a}this._=this.A-d;ac=true;b:while(ac===true){ac=false;if(!i(this)){break b}break a}this._=this.A-d;ad=true;c:while(ad===true){ad=false;this.C=this._;ae=true;b:while(ae===true){ae=false;v=this.A-this._;af=true;d:while(af===true){af=false;if(!(!c(this)?false:b(this,a.a_6,4)===0?false:true)){break d}break b}this._=this.A-v;ag=true;d:while(ag===true){ag=false;if(!(!c(this)?false:!f(this,a.g_U,105,305)?false:!h(this)?false:true)){break d}break b}this._=this.A-v;if(!(!c(this)?false:b(this,a.a_4,2)===0?false:!h(this)?false:true)){break c}}this.B=this._;if(!e(this,'')){return false}w=this.A-this._;ah=true;b:while(ah===true){ah=false;this.C=this._;ai=true;d:while(ai===true){ai=false;aA=this.A-this._;aj=true;e:while(aj===true){aj=false;if(!(b(this,a.a_0,10)===0?false:!k(this)?false:true)){break e}this.B=this._;if(!e(this,'')){return false}aB=this.A-this._;ak=true;f:while(ak===true){ak=false;this.C=this._;if(!(!c(this)?false:b(this,a.a_16,2)===0?false:true)){this._=this.A-aB;break f}}break d}this._=this.A-aA;if(!(!c(this)?false:b(this,a.a_16,2)===0?false:true)){this._=this.A-w;break b}}this.B=this._;if(!e(this,'')){return false}this.C=this._;if(!i(this)){this._=this.A-w;break b}}break a}az=this._=this.A-d;this.C=az;al=true;b:while(al===true){al=false;ao=this.A-this._;am=true;c:while(am===true){am=false;if(!(b(this,a.a_0,10)===0?false:!k(this)?false:true)){break c}break b}this._=this.A-ao;if(!(!c(this)?false:!f(this,a.g_U,105,305)?false:!l(this)?false:true)){return false}}this.B=this._;if(!e(this,'')){return false}x=this.A-this._;an=true;b:while(an===true){an=false;this.C=this._;if(!(!c(this)?false:b(this,a.a_16,2)===0?false:true)){this._=this.A-x;break b}this.B=this._;if(!e(this,'')){return false}if(!i(this)){this._=this.A-x;break b}}}return true};a.prototype.r_stem_noun_suffixes=a.prototype.z;function L(d){var g;var as;var S;var m;var aw;var n;var ar;var p;var q;var ay;var az;var r;var aq;var s;var t;var at;var au;var av;var u;var ax;var v;var w;var x;var aB;var aC;var ap;var y;var z;var A;var B;var C;var D;var E;var F;var G;var H;var I;var J;var K;var L;var M;var N;var O;var P;var Q;var R;var j;var T;var U;var V;var W;var X;var Y;var Z;var _;var $;var a0;var a1;var a2;var a3;var a4;var a5;var a6;var a7;var a8;var a9;var aa;var ab;var ac;var ad;var ae;var af;var ag;var ah;var ai;var aj;var ak;var al;var am;var an;var ao;var aD;var aA;z=true;a:while(z===true){z=false;g=d.A-d._;A=true;b:while(A===true){A=false;d.C=d._;if(!(!c(d)?false:b(d,a.a_16,2)===0?false:true)){break b}d.B=d._;if(!e(d,'')){return false}as=d.A-d._;B=true;c:while(B===true){B=false;if(!i(d)){d._=d.A-as;break c}}break a}d._=d.A-g;C=true;g:while(C===true){C=false;d.C=d._;if(!(!c(d)?false:b(d,a.a_11,2)===0?false:!o(d)?false:true)){break g}d.B=d._;if(!e(d,'')){return false}S=d.A-d._;D=true;b:while(D===true){D=false;E=true;c:while(E===true){E=false;m=d.A-d._;F=true;d:while(F===true){F=false;d.C=d._;if(!(b(d,a.a_1,2)===0?false:true)){break d}d.B=d._;if(!e(d,'')){return false}break c}d._=d.A-m;G=true;f:while(G===true){G=false;d.C=d._;H=true;d:while(H===true){H=false;aw=d.A-d._;I=true;e:while(I===true){I=false;if(!(b(d,a.a_0,10)===0?false:!k(d)?false:true)){break e}break d}d._=d.A-aw;if(!(!c(d)?false:!f(d,a.g_U,105,305)?false:!l(d)?false:true)){break f}}d.B=d._;if(!e(d,'')){return false}n=d.A-d._;J=true;d:while(J===true){J=false;d.C=d._;if(!(!c(d)?false:b(d,a.a_16,2)===0?false:true)){d._=d.A-n;break d}d.B=d._;if(!e(d,'')){return false}if(!i(d)){d._=d.A-n;break d}}break c}aD=d._=d.A-m;d.C=aD;if(!(!c(d)?false:b(d,a.a_16,2)===0?false:true)){d._=d.A-S;break b}d.B=d._;if(!e(d,'')){return false}if(!i(d)){d._=d.A-S;break b}}}break a}d._=d.A-g;K=true;b:while(K===true){K=false;d.C=d._;L=true;d:while(L===true){L=false;ar=d.A-d._;M=true;c:while(M===true){M=false;if(!(!c(d)?false:b(d,a.a_7,2)===0?false:true)){break c}break d}d._=d.A-ar;if(!(!c(d)?false:b(d,a.a_5,2)===0?false:true)){break b}}N=true;c:while(N===true){N=false;p=d.A-d._;O=true;d:while(O===true){O=false;if(!(b(d,a.a_1,2)===0?false:true)){break d}d.B=d._;if(!e(d,'')){return false}break c}d._=d.A-p;P=true;e:while(P===true){P=false;if(!(!c(d)?false:!f(d,a.g_U,105,305)?false:!l(d)?false:true)){break e}d.B=d._;if(!e(d,'')){return false}q=d.A-d._;Q=true;d:while(Q===true){Q=false;d.C=d._;if(!(!c(d)?false:b(d,a.a_16,2)===0?false:true)){d._=d.A-q;break d}d.B=d._;if(!e(d,'')){return false}if(!i(d)){d._=d.A-q;break d}}break c}d._=d.A-p;if(!i(d)){break b}}break a}d._=d.A-g;R=true;c:while(R===true){R=false;d.C=d._;j=true;b:while(j===true){j=false;ay=d.A-d._;T=true;d:while(T===true){T=false;if(!(!c(d)?false:b(d,a.a_9,2)===0?false:true)){break d}break b}d._=d.A-ay;if(!(!c(d)?false:b(d,a.a_2,4)===0?false:true)){break c}}U=true;d:while(U===true){U=false;az=d.A-d._;V=true;e:while(V===true){V=false;if(!(!c(d)?false:!f(d,a.g_U,105,305)?false:!l(d)?false:true)){break e}d.B=d._;if(!e(d,'')){return false}r=d.A-d._;W=true;b:while(W===true){W=false;d.C=d._;if(!(!c(d)?false:b(d,a.a_16,2)===0?false:true)){d._=d.A-r;break b}d.B=d._;if(!e(d,'')){return false}if(!i(d)){d._=d.A-r;break b}}break d}d._=d.A-az;if(!(b(d,a.a_1,2)===0?false:true)){break c}}break a}d._=d.A-g;X=true;d:while(X===true){X=false;d.C=d._;if(!(!c(d)?false:b(d,a.a_8,4)===0?false:true)){break d}d.B=d._;if(!e(d,'')){return false}aq=d.A-d._;Y=true;e:while(Y===true){Y=false;d.C=d._;Z=true;c:while(Z===true){Z=false;s=d.A-d._;_=true;f:while(_===true){_=false;if(!(b(d,a.a_0,10)===0?false:!k(d)?false:true)){break f}d.B=d._;if(!e(d,'')){return false}t=d.A-d._;$=true;b:while($===true){$=false;d.C=d._;if(!(!c(d)?false:b(d,a.a_16,2)===0?false:true)){d._=d.A-t;break b}d.B=d._;if(!e(d,'')){return false}if(!i(d)){d._=d.A-t;break b}}break c}d._=d.A-s;a0=true;b:while(a0===true){a0=false;if(!(!c(d)?false:b(d,a.a_16,2)===0?false:true)){break b}d.B=d._;if(!e(d,'')){return false}at=d.A-d._;a1=true;f:while(a1===true){a1=false;if(!i(d)){d._=d.A-at;break f}}break c}d._=d.A-s;if(!i(d)){d._=d.A-aq;break e}}}break a}d._=d.A-g;a2=true;d:while(a2===true){a2=false;d.C=d._;a3=true;b:while(a3===true){a3=false;au=d.A-d._;a4=true;c:while(a4===true){a4=false;if(!(!c(d)?false:b(d,a.a_3,4)===0?false:!o(d)?false:true)){break c}break b}d._=d.A-au;if(!(!c(d)?false:b(d,a.a_10,2)===0?false:!h(d)?false:true)){break d}}d.B=d._;if(!e(d,'')){return false}av=d.A-d._;a5=true;e:while(a5===true){a5=false;a6=true;c:while(a6===true){a6=false;u=d.A-d._;a7=true;b:while(a7===true){a7=false;d.C=d._;if(!(!c(d)?false:b(d,a.a_16,2)===0?false:true)){break b}d.B=d._;if(!e(d,'')){return false}if(!i(d)){break b}break c}d._=d.A-u;a8=true;f:while(a8===true){a8=false;d.C=d._;a9=true;b:while(a9===true){a9=false;ax=d.A-d._;aa=true;g:while(aa===true){aa=false;if(!(b(d,a.a_0,10)===0?false:!k(d)?false:true)){break g}break b}d._=d.A-ax;if(!(!c(d)?false:!f(d,a.g_U,105,305)?false:!l(d)?false:true)){break f}}d.B=d._;if(!e(d,'')){return false}v=d.A-d._;ab=true;b:while(ab===true){ab=false;d.C=d._;if(!(!c(d)?false:b(d,a.a_16,2)===0?false:true)){d._=d.A-v;break b}d.B=d._;if(!e(d,'')){return false}if(!i(d)){d._=d.A-v;break b}}break c}d._=d.A-u;if(!i(d)){d._=d.A-av;break e}}}break a}d._=d.A-g;ac=true;b:while(ac===true){ac=false;d.C=d._;if(!(b(d,a.a_1,2)===0?false:true)){break b}d.B=d._;if(!e(d,'')){return false}break a}d._=d.A-g;ad=true;b:while(ad===true){ad=false;if(!i(d)){break b}break a}d._=d.A-g;ae=true;c:while(ae===true){ae=false;d.C=d._;af=true;b:while(af===true){af=false;w=d.A-d._;ag=true;d:while(ag===true){ag=false;if(!(!c(d)?false:b(d,a.a_6,4)===0?false:true)){break d}break b}d._=d.A-w;ah=true;d:while(ah===true){ah=false;if(!(!c(d)?false:!f(d,a.g_U,105,305)?false:!h(d)?false:true)){break d}break b}d._=d.A-w;if(!(!c(d)?false:b(d,a.a_4,2)===0?false:!h(d)?false:true)){break c}}d.B=d._;if(!e(d,'')){return false}x=d.A-d._;ai=true;b:while(ai===true){ai=false;d.C=d._;aj=true;d:while(aj===true){aj=false;aB=d.A-d._;ak=true;e:while(ak===true){ak=false;if(!(b(d,a.a_0,10)===0?false:!k(d)?false:true)){break e}d.B=d._;if(!e(d,'')){return false}aC=d.A-d._;al=true;f:while(al===true){al=false;d.C=d._;if(!(!c(d)?false:b(d,a.a_16,2)===0?false:true)){d._=d.A-aC;break f}}break d}d._=d.A-aB;if(!(!c(d)?false:b(d,a.a_16,2)===0?false:true)){d._=d.A-x;break b}}d.B=d._;if(!e(d,'')){return false}d.C=d._;if(!i(d)){d._=d.A-x;break b}}break a}aA=d._=d.A-g;d.C=aA;am=true;b:while(am===true){am=false;ap=d.A-d._;an=true;c:while(an===true){an=false;if(!(b(d,a.a_0,10)===0?false:!k(d)?false:true)){break c}break b}d._=d.A-ap;if(!(!c(d)?false:!f(d,a.g_U,105,305)?false:!l(d)?false:true)){return false}}d.B=d._;if(!e(d,'')){return false}y=d.A-d._;ao=true;b:while(ao===true){ao=false;d.C=d._;if(!(!c(d)?false:b(d,a.a_16,2)===0?false:true)){d._=d.A-y;break b}d.B=d._;if(!e(d,'')){return false}if(!i(d)){d._=d.A-y;break b}}}return true};a.prototype.w=function(){var c;this.C=this._;c=b(this,a.a_23,4);if(c===0){return false}this.B=this._;switch(c){case 0:return false;case 1:if(!e(this,'p')){return false}break;case 2:if(!e(this,'ç')){return false}break;case 3:if(!e(this,'t')){return false}break;case 4:if(!e(this,'k')){return false}break}return true};a.prototype.r_post_process_last_consonants=a.prototype.w;function w(c){var d;c.C=c._;d=b(c,a.a_23,4);if(d===0){return false}c.B=c._;switch(d){case 0:return false;case 1:if(!e(c,'p')){return false}break;case 2:if(!e(c,'ç')){return false}break;case 3:if(!e(c,'t')){return false}break;case 4:if(!e(c,'k')){return false}break}return true};a.prototype.N=function(){var L;var _;var i;var Y;var B;var W;var K;var l;var S;var Q;var p;var O;var M;var s;var U;var u;var v;var w;var x;var y;var z;var A;var b;var C;var D;var j;var F;var G;var H;var I;var J;var E;var t;var r;var N;var q;var P;var o;var R;var m;var T;var k;var V;var h;var X;var e;var Z;var d;var $;var a0;var a1;var c;L=this.A-this._;u=true;a:while(u===true){u=false;_=this.A-this._;v=true;b:while(v===true){v=false;if(!g(this,1,'d')){break b}break a}this._=this.A-_;if(!g(this,1,'g')){return false}}this._=this.A-L;w=true;a:while(w===true){w=false;i=this.A-this._;x=true;b:while(x===true){x=false;Y=this.A-this._;d:while(true){B=this.A-this._;y=true;c:while(y===true){y=false;if(!f(this,a.g_vowel,97,305)){break c}this._=this.A-B;break d}V=this._=this.A-B;if(V<=this.D){break b}this._--}z=true;c:while(z===true){z=false;W=this.A-this._;A=true;d:while(A===true){A=false;if(!g(this,1,'a')){break d}break c}this._=this.A-W;if(!g(this,1,'ı')){break b}}h=this._=this.A-Y;b=h;N=h;q=n(this,h,h,'ı');if(h<=this.B){this.B+=q|0}if(N<=this.C){this.C+=q|0}this._=b;break a}this._=this.A-i;C=true;b:while(C===true){C=false;K=this.A-this._;c:while(true){l=this.A-this._;D=true;d:while(D===true){D=false;if(!f(this,a.g_vowel,97,305)){break d}this._=this.A-l;break c}X=this._=this.A-l;if(X<=this.D){break b}this._--}j=true;c:while(j===true){j=false;S=this.A-this._;F=true;d:while(F===true){F=false;if(!g(this,1,'e')){break d}break c}this._=this.A-S;if(!g(this,1,'i')){break b}}e=this._=this.A-K;b=e;P=e;o=n(this,e,e,'i');if(e<=this.B){this.B+=o|0}if(P<=this.C){this.C+=o|0}this._=b;break a}this._=this.A-i;G=true;b:while(G===true){G=false;Q=this.A-this._;c:while(true){p=this.A-this._;H=true;d:while(H===true){H=false;if(!f(this,a.g_vowel,97,305)){break d}this._=this.A-p;break c}Z=this._=this.A-p;if(Z<=this.D){break b}this._--}I=true;c:while(I===true){I=false;O=this.A-this._;J=true;d:while(J===true){J=false;if(!g(this,1,'o')){break d}break c}this._=this.A-O;if(!g(this,1,'u')){break b}}d=this._=this.A-Q;b=d;R=d;m=n(this,d,d,'u');if(d<=this.B){this.B+=m|0}if(R<=this.C){this.C+=m|0}this._=b;break a}a1=this._=(a0=this.A)-i;M=a0-a1;b:while(true){s=this.A-this._;E=true;c:while(E===true){E=false;if(!f(this,a.g_vowel,97,305)){break c}this._=this.A-s;break b}$=this._=this.A-s;if($<=this.D){return false}this._--}t=true;b:while(t===true){t=false;U=this.A-this._;r=true;c:while(r===true){r=false;if(!g(this,1,'ö')){break c}break b}this._=this.A-U;if(!g(this,1,'ü')){return false}}c=this._=this.A-M;b=c;T=c;k=n(this,c,c,'ü');if(c<=this.B){this.B+=k|0}if(T<=this.C){this.C+=k|0}this._=b}return true};a.prototype.r_append_U_to_stems_ending_with_d_or_g=a.prototype.N;function z(b){var $;var Z;var j;var X;var F;var L;var T;var m;var R;var P;var q;var N;var V;var t;var M;var v;var w;var x;var y;var z;var A;var B;var c;var D;var E;var C;var G;var H;var I;var J;var K;var u;var s;var r;var O;var p;var Q;var o;var S;var l;var U;var k;var W;var i;var Y;var h;var _;var e;var a0;var a1;var a2;var d;$=b.A-b._;v=true;a:while(v===true){v=false;Z=b.A-b._;w=true;b:while(w===true){w=false;if(!g(b,1,'d')){break b}break a}b._=b.A-Z;if(!g(b,1,'g')){return false}}b._=b.A-$;x=true;a:while(x===true){x=false;j=b.A-b._;y=true;b:while(y===true){y=false;X=b.A-b._;d:while(true){F=b.A-b._;z=true;c:while(z===true){z=false;if(!f(b,a.g_vowel,97,305)){break c}b._=b.A-F;break d}W=b._=b.A-F;if(W<=b.D){break b}b._--}A=true;c:while(A===true){A=false;L=b.A-b._;B=true;d:while(B===true){B=false;if(!g(b,1,'a')){break d}break c}b._=b.A-L;if(!g(b,1,'ı')){break b}}i=b._=b.A-X;c=i;O=i;p=n(b,i,i,'ı');if(i<=b.B){b.B+=p|0}if(O<=b.C){b.C+=p|0}b._=c;break a}b._=b.A-j;D=true;b:while(D===true){D=false;T=b.A-b._;c:while(true){m=b.A-b._;E=true;d:while(E===true){E=false;if(!f(b,a.g_vowel,97,305)){break d}b._=b.A-m;break c}Y=b._=b.A-m;if(Y<=b.D){break b}b._--}C=true;c:while(C===true){C=false;R=b.A-b._;G=true;d:while(G===true){G=false;if(!g(b,1,'e')){break d}break c}b._=b.A-R;if(!g(b,1,'i')){break b}}h=b._=b.A-T;c=h;Q=h;o=n(b,h,h,'i');if(h<=b.B){b.B+=o|0}if(Q<=b.C){b.C+=o|0}b._=c;break a}b._=b.A-j;H=true;b:while(H===true){H=false;P=b.A-b._;c:while(true){q=b.A-b._;I=true;d:while(I===true){I=false;if(!f(b,a.g_vowel,97,305)){break d}b._=b.A-q;break c}_=b._=b.A-q;if(_<=b.D){break b}b._--}J=true;c:while(J===true){J=false;N=b.A-b._;K=true;d:while(K===true){K=false;if(!g(b,1,'o')){break d}break c}b._=b.A-N;if(!g(b,1,'u')){break b}}e=b._=b.A-P;c=e;S=e;l=n(b,e,e,'u');if(e<=b.B){b.B+=l|0}if(S<=b.C){b.C+=l|0}b._=c;break a}a2=b._=(a1=b.A)-j;V=a1-a2;b:while(true){t=b.A-b._;u=true;c:while(u===true){u=false;if(!f(b,a.g_vowel,97,305)){break c}b._=b.A-t;break b}a0=b._=b.A-t;if(a0<=b.D){return false}b._--}s=true;b:while(s===true){s=false;M=b.A-b._;r=true;c:while(r===true){r=false;if(!g(b,1,'ö')){break c}break b}b._=b.A-M;if(!g(b,1,'ü')){return false}}d=b._=b.A-V;c=d;U=d;k=n(b,d,d,'ü');if(d<=b.B){b.B+=k|0}if(U<=b.C){b.C+=k|0}b._=c}return true};a.prototype.v=function(){var e;var f;var b;var c;var d;e=this._;b=2;a:while(true){f=this._;c=true;b:while(c===true){c=false;c:while(true){d=true;d:while(d===true){d=false;if(!v(this,a.g_vowel,97,305)){break d}break c}if(this._>=this.A){break b}this._++}b--;continue a}this._=f;break a}if(b>0){return false}this._=e;return true};a.prototype.r_more_than_one_syllable_word=a.prototype.v;function N(b){var f;var g;var c;var d;var e;f=b._;c=2;a:while(true){g=b._;d=true;b:while(d===true){d=false;c:while(true){e=true;d:while(e===true){e=false;if(!v(b,a.g_vowel,97,305)){break d}break c}if(b._>=b.A){break b}b._++}c--;continue a}b._=g;break a}if(c>0){return false}b._=f;return true};a.prototype.P=function(){var f;var g;var h;var b;var a;var c;var d;var i;var j;var e;b=true;b:while(b===true){b=false;f=this._;a=true;a:while(a===true){a=false;g=this._;c:while(true){c=true;d:while(c===true){c=false;if(!s(this,2,'ad')){break d}break c}if(this._>=this.A){break a}this._++}i=this.I_strlen=2;if(!(i===this.A)){break a}this._=g;break b}j=this._=f;h=j;a:while(true){d=true;c:while(d===true){d=false;if(!s(this,5,'soyad')){break c}break a}if(this._>=this.A){return false}this._++}e=this.I_strlen=5;if(!(e===this.A)){return false}this._=h}return true};a.prototype.r_is_reserved_word=a.prototype.P;function x(a){var g;var h;var i;var c;var b;var d;var e;var j;var k;var f;c=true;b:while(c===true){c=false;g=a._;b=true;a:while(b===true){b=false;h=a._;c:while(true){d=true;d:while(d===true){d=false;if(!s(a,2,'ad')){break d}break c}if(a._>=a.A){break a}a._++}j=a.I_strlen=2;if(!(j===a.A)){break a}a._=h;break b}k=a._=g;i=k;a:while(true){e=true;c:while(e===true){e=false;if(!s(a,5,'soyad')){break c}break a}if(a._>=a.A){return false}a._++}f=a.I_strlen=5;if(!(f===a.A)){return false}a._=i}return true};a.prototype.x=function(){var d;var e;var a;var b;var c;var f;var g;var h;d=this._;a=true;a:while(a===true){a=false;if(!x(this)){break a}return false}f=this._=d;this.D=f;h=this._=g=this.A;e=g-h;b=true;a:while(b===true){b=false;if(!z(this)){break a}}this._=this.A-e;c=true;a:while(c===true){c=false;if(!w(this)){break a}}this._=this.D;return true};a.prototype.r_postlude=a.prototype.x;function O(a){var e;var f;var b;var c;var d;var g;var h;var i;e=a._;b=true;a:while(b===true){b=false;if(!x(a)){break a}return false}g=a._=e;a.D=g;i=a._=h=a.A;f=h-i;c=true;a:while(c===true){c=false;if(!z(a)){break a}}a._=a.A-f;d=true;a:while(d===true){d=false;if(!w(a)){break a}}a._=a.D;return true};a.prototype.H=function(){var c;var a;var b;var d;var e;if(!N(this)){return false}this.D=this._;e=this._=d=this.A;c=d-e;a=true;a:while(a===true){a=false;if(!J(this)){break a}}this._=this.A-c;if(!this.B_continue_stemming_noun_suffixes){return false}b=true;a:while(b===true){b=false;if(!L(this)){break a}}this._=this.D;return!O(this)?false:true};a.prototype.stem=a.prototype.H;a.prototype.L=function(b){return b instanceof a};a.prototype.equals=a.prototype.L;a.prototype.M=function(){var c;var a;var b;var d;c='TurkishStemmer';a=0;for(b=0;b<c.length;b++){d=c.charCodeAt(b);a=(a<<5)-a+d;a=a&a}return a|0};a.prototype.hashCode=a.prototype.M;a.serialVersionUID=1;j(a,'methodObject',function(){return new a});j(a,'a_0',function(){return[new d('m',-1,-1),new d('n',-1,-1),new d('miz',-1,-1),new d('niz',-1,-1),new d('muz',-1,-1),new d('nuz',-1,-1),new d('müz',-1,-1),new d('nüz',-1,-1),new d('mız',-1,-1),new d('nız',-1,-1)]});j(a,'a_1',function(){return[new d('leri',-1,-1),new d('ları',-1,-1)]});j(a,'a_2',function(){return[new d('ni',-1,-1),new d('nu',-1,-1),new d('nü',-1,-1),new d('nı',-1,-1)]});j(a,'a_3',function(){return[new d('in',-1,-1),new d('un',-1,-1),new d('ün',-1,-1),new d('ın',-1,-1)]});j(a,'a_4',function(){return[new d('a',-1,-1),new d('e',-1,-1)]});j(a,'a_5',function(){return[new d('na',-1,-1),new d('ne',-1,-1)]});j(a,'a_6',function(){return[new d('da',-1,-1),new d('ta',-1,-1),new d('de',-1,-1),new d('te',-1,-1)]});j(a,'a_7',function(){return[new d('nda',-1,-1),new d('nde',-1,-1)]});j(a,'a_8',function(){return[new d('dan',-1,-1),new d('tan',-1,-1),new d('den',-1,-1),new d('ten',-1,-1)]});j(a,'a_9',function(){return[new d('ndan',-1,-1),new d('nden',-1,-1)]});j(a,'a_10',function(){return[new d('la',-1,-1),new d('le',-1,-1)]});j(a,'a_11',function(){return[new d('ca',-1,-1),new d('ce',-1,-1)]});j(a,'a_12',function(){return[new d('im',-1,-1),new d('um',-1,-1),new d('üm',-1,-1),new d('ım',-1,-1)]});j(a,'a_13',function(){return[new d('sin',-1,-1),new d('sun',-1,-1),new d('sün',-1,-1),new d('sın',-1,-1)]});j(a,'a_14',function(){return[new d('iz',-1,-1),new d('uz',-1,-1),new d('üz',-1,-1),new d('ız',-1,-1)]});j(a,'a_15',function(){return[new d('siniz',-1,-1),new d('sunuz',-1,-1),new d('sünüz',-1,-1),new d('sınız',-1,-1)]});j(a,'a_16',function(){return[new d('lar',-1,-1),new d('ler',-1,-1)]});j(a,'a_17',function(){return[new d('niz',-1,-1),new d('nuz',-1,-1),new d('nüz',-1,-1),new d('nız',-1,-1)]});j(a,'a_18',function(){return[new d('dir',-1,-1),new d('tir',-1,-1),new d('dur',-1,-1),new d('tur',-1,-1),new d('dür',-1,-1),new d('tür',-1,-1),new d('dır',-1,-1),new d('tır',-1,-1)]});j(a,'a_19',function(){return[new d('casına',-1,-1),new d('cesine',-1,-1)]});j(a,'a_20',function(){return[new d('di',-1,-1),new d('ti',-1,-1),new d('dik',-1,-1),new d('tik',-1,-1),new d('duk',-1,-1),new d('tuk',-1,-1),new d('dük',-1,-1),new d('tük',-1,-1),new d('dık',-1,-1),new d('tık',-1,-1),new d('dim',-1,-1),new d('tim',-1,-1),new d('dum',-1,-1),new d('tum',-1,-1),new d('düm',-1,-1),new d('tüm',-1,-1),new d('dım',-1,-1),new d('tım',-1,-1),new d('din',-1,-1),new d('tin',-1,-1),new d('dun',-1,-1),new d('tun',-1,-1),new d('dün',-1,-1),new d('tün',-1,-1),new d('dın',-1,-1),new d('tın',-1,-1),new d('du',-1,-1),new d('tu',-1,-1),new d('dü',-1,-1),new d('tü',-1,-1),new d('dı',-1,-1),new d('tı',-1,-1)]});j(a,'a_21',function(){return[new d('sa',-1,-1),new d('se',-1,-1),new d('sak',-1,-1),new d('sek',-1,-1),new d('sam',-1,-1),new d('sem',-1,-1),new d('san',-1,-1),new d('sen',-1,-1)]});j(a,'a_22',function(){return[new d('miş',-1,-1),new d('muş',-1,-1),new d('müş',-1,-1),new d('mış',-1,-1)]});j(a,'a_23',function(){return[new d('b',-1,1),new d('c',-1,2),new d('d',-1,3),new d('ğ',-1,4)]});j(a,'g_vowel',function(){return[17,65,16,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,32,8,0,0,0,0,0,0,1]});j(a,'g_U',function(){return[1,16,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,8,0,0,0,0,0,0,1]});j(a,'g_vowel1',function(){return[1,64,16,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1]});j(a,'g_vowel2',function(){return[17,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,130]});j(a,'g_vowel3',function(){return[1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1]});j(a,'g_vowel4',function(){return[17]});j(a,'g_vowel5',function(){return[65]});j(a,'g_vowel6',function(){return[65]});var y={'src/stemmer.jsx':{Stemmer:u},'src/turkish-stemmer.jsx':{TurkishStemmer:a}}}(JSX))
var Stemmer = JSX.require("src/turkish-stemmer.jsx").TurkishStemmer;
@@ -24,10 +28,12 @@ class SearchTurkish(SearchLanguage):
language_name = 'Turkish'
js_stemmer_rawcode = 'turkish-stemmer.js'
js_stemmer_code = js_stemmer
- stopwords = []
+ stopwords = set() # type: Set[unicode]
def init(self, options):
+ # type: (Dict) -> None
self.stemmer = snowballstemmer.stemmer('turkish')
def stem(self, word):
+ # type: (unicode) -> unicode
return self.stemmer.stemWord(word)
diff --git a/sphinx/search/zh.py b/sphinx/search/zh.py
index b585ee664..bf01812e0 100644
--- a/sphinx/search/zh.py
+++ b/sphinx/search/zh.py
@@ -13,13 +13,7 @@ import os
import re
from sphinx.search import SearchLanguage
-
-try:
- from Stemmer import Stemmer as PyStemmer
- PYSTEMMER = True
-except ImportError:
- from sphinx.util.stemmer import PorterStemmer
- PYSTEMMER = False
+from sphinx.util.stemmer import get_stemmer
try:
import jieba
@@ -27,7 +21,11 @@ try:
except ImportError:
JIEBA = False
-english_stopwords = set("""
+if False:
+ # For type annotation
+ from typing import Dict, List # NOQA
+
+english_stopwords = set(u"""
a and are as at
be but by
for
@@ -238,38 +236,27 @@ class SearchChinese(SearchLanguage):
latin1_letters = re.compile(r'\w+(?u)[\u0000-\u00ff]')
def init(self, options):
+ # type: (Dict) -> None
if JIEBA:
dict_path = options.get('dict')
if dict_path and os.path.isfile(dict_path):
jieba.set_dictionary(dict_path)
- if PYSTEMMER:
- class Stemmer(object):
- def __init__(self):
- self.stemmer = PyStemmer('porter')
-
- def stem(self, word):
- return self.stemmer.stemWord(word)
- else:
- class Stemmer(PorterStemmer):
- """All those porter stemmer implementations look hideous;
- make at least the stem method nicer.
- """
- def stem(self, word):
- return PorterStemmer.stem(self, word, 0, len(word) - 1)
-
- self.stemmer = Stemmer()
+ self.stemmer = get_stemmer()
def split(self, input):
- chinese = []
+ # type: (unicode) -> List[unicode]
+ chinese = [] # type: List[unicode]
if JIEBA:
chinese = list(jieba.cut_for_search(input))
- latin1 = self.latin1_letters.findall(input)
+ latin1 = self.latin1_letters.findall(input) # type: ignore
return chinese + latin1
def word_filter(self, stemmed_word):
+ # type: (unicode) -> bool
return len(stemmed_word) > 1
def stem(self, word):
+ # type: (unicode) -> unicode
return self.stemmer.stem(word)
diff --git a/sphinx/setup_command.py b/sphinx/setup_command.py
index c6c256b72..8e773923f 100644
--- a/sphinx/setup_command.py
+++ b/sphinx/setup_command.py
@@ -18,7 +18,7 @@ import os
from six import StringIO, string_types
from distutils.cmd import Command
-from distutils.errors import DistutilsOptionError, DistutilsExecError
+from distutils.errors import DistutilsOptionError, DistutilsExecError # type: ignore
from sphinx.application import Sphinx
from sphinx.cmdline import handle_exception
@@ -26,6 +26,10 @@ from sphinx.util.console import nocolor, color_terminal
from sphinx.util.docutils import docutils_namespace
from sphinx.util.osutil import abspath
+if False:
+ # For type annotation
+ from typing import Any, List, Tuple # NOQA
+
class BuildDoc(Command):
"""
@@ -72,7 +76,8 @@ class BuildDoc(Command):
('source-dir=', 's', 'Source directory'),
('build-dir=', None, 'Build directory'),
('config-dir=', 'c', 'Location of the configuration directory'),
- ('builder=', 'b', 'The builder to use. Defaults to "html"'),
+ ('builder=', 'b', 'The builder (or builders) to use. Can be a comma- '
+ 'or space-separated list. Defaults to "html"'),
('warning-is-error', 'W', 'Turn warning into errors'),
('project=', None, 'The documented project\'s name'),
('version=', None, 'The short X.Y version'),
@@ -87,22 +92,24 @@ class BuildDoc(Command):
'link-index']
def initialize_options(self):
+ # type: () -> None
self.fresh_env = self.all_files = False
self.pdb = False
- self.source_dir = self.build_dir = None
+ self.source_dir = self.build_dir = None # type: unicode
self.builder = 'html'
self.warning_is_error = False
self.project = ''
self.version = ''
self.release = ''
self.today = ''
- self.config_dir = None
+ self.config_dir = None # type: unicode
self.link_index = False
self.copyright = ''
self.verbosity = 0
self.traceback = False
def _guess_source_dir(self):
+ # type: () -> unicode
for guess in ('doc', 'docs'):
if not os.path.isdir(guess):
continue
@@ -115,6 +122,7 @@ class BuildDoc(Command):
# unicode, causing finalize_options to fail if invoked again. Workaround
# for http://bugs.python.org/issue19570
def _ensure_stringlike(self, option, what, default=None):
+ # type: (unicode, unicode, Any) -> Any
val = getattr(self, option)
if val is None:
setattr(self, option, default)
@@ -125,10 +133,11 @@ class BuildDoc(Command):
return val
def finalize_options(self):
+ # type: () -> None
if self.source_dir is None:
self.source_dir = self._guess_source_dir()
- self.announce('Using source directory %s' % self.source_dir)
- self.ensure_dirname('source_dir')
+ self.announce('Using source directory %s' % self.source_dir) # type: ignore
+ self.ensure_dirname('source_dir') # type: ignore
if self.source_dir is None:
self.source_dir = os.curdir
self.source_dir = abspath(self.source_dir)
@@ -136,23 +145,28 @@ class BuildDoc(Command):
self.config_dir = self.source_dir
self.config_dir = abspath(self.config_dir)
+ self.ensure_string_list('builder') # type: ignore
if self.build_dir is None:
- build = self.get_finalized_command('build')
+ build = self.get_finalized_command('build') # type: ignore
self.build_dir = os.path.join(abspath(build.build_base), 'sphinx')
- self.mkpath(self.build_dir)
+ self.mkpath(self.build_dir) # type: ignore
self.build_dir = abspath(self.build_dir)
self.doctree_dir = os.path.join(self.build_dir, 'doctrees')
- self.mkpath(self.doctree_dir)
- self.builder_target_dir = os.path.join(self.build_dir, self.builder)
- self.mkpath(self.builder_target_dir)
+ self.mkpath(self.doctree_dir) # type: ignore
+ self.builder_target_dirs = [
+ (builder, os.path.join(self.build_dir, builder))
+ for builder in self.builder] # type: List[Tuple[str, unicode]]
+ for _, builder_target_dir in self.builder_target_dirs:
+ self.mkpath(builder_target_dir) # type: ignore
def run(self):
+ # type: () -> None
if not color_terminal():
nocolor()
- if not self.verbose:
+ if not self.verbose: # type: ignore
status_stream = StringIO()
else:
- status_stream = sys.stdout
+ status_stream = sys.stdout # type: ignore
confoverrides = {}
if self.project:
confoverrides['project'] = self.project
@@ -165,24 +179,28 @@ class BuildDoc(Command):
if self.copyright:
confoverrides['copyright'] = self.copyright
- app = None
- try:
- with docutils_namespace():
- app = Sphinx(self.source_dir, self.config_dir,
- self.builder_target_dir, self.doctree_dir,
- self.builder, confoverrides, status_stream,
- freshenv=self.fresh_env,
- warningiserror=self.warning_is_error)
- app.build(force_all=self.all_files)
- if app.statuscode:
- raise DistutilsExecError(
- 'caused by %s builder.' % app.builder.name)
- except Exception as exc:
- handle_exception(app, self, exc, sys.stderr)
- if not self.pdb:
- raise SystemExit(1)
-
- if self.link_index:
- src = app.config.master_doc + app.builder.out_suffix
- dst = app.builder.get_outfilename('index')
+ for builder, builder_target_dir in self.builder_target_dirs:
+ app = None
+
+ try:
+ with docutils_namespace():
+ app = Sphinx(self.source_dir, self.config_dir,
+ builder_target_dir, self.doctree_dir,
+ builder, confoverrides, status_stream,
+ freshenv=self.fresh_env,
+ warningiserror=self.warning_is_error)
+ app.build(force_all=self.all_files)
+ if app.statuscode:
+ raise DistutilsExecError(
+ 'caused by %s builder.' % app.builder.name)
+ except Exception as exc:
+ handle_exception(app, self, exc, sys.stderr)
+ if not self.pdb:
+ raise SystemExit(1)
+
+ if not self.link_index:
+ continue
+
+ src = app.config.master_doc + app.builder.out_suffix # type: ignore
+ dst = app.builder.get_outfilename('index') # type: ignore
os.symlink(src, dst)
diff --git a/sphinx/templates/epub2/container.xml b/sphinx/templates/epub2/container.xml
new file mode 100644
index 000000000..326cf15fa
--- /dev/null
+++ b/sphinx/templates/epub2/container.xml
@@ -0,0 +1,6 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<container version="1.0" xmlns="urn:oasis:names:tc:opendocument:xmlns:container">
+ <rootfiles>
+ <rootfile full-path="content.opf" media-type="application/oebps-package+xml"/>
+ </rootfiles>
+</container>
diff --git a/sphinx/templates/epub2/content.opf_t b/sphinx/templates/epub2/content.opf_t
new file mode 100644
index 000000000..5169d0551
--- /dev/null
+++ b/sphinx/templates/epub2/content.opf_t
@@ -0,0 +1,37 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<package xmlns="http://www.idpf.org/2007/opf" version="2.0"
+ unique-identifier="%(uid)s">
+ <metadata xmlns:opf="http://www.idpf.org/2007/opf"
+ xmlns:dc="http://purl.org/dc/elements/1.1/">
+ <dc:language>{{ lang }}</dc:language>
+ <dc:title>{{ title }}</dc:title>
+ <dc:creator opf:role="aut">{{ author }}</dc:creator>
+ <dc:publisher>{{ publisher }}</dc:publisher>
+ <dc:rights>{{ copyright }}</dc:rights>
+ <dc:identifier id="{{ uid }}" opf:scheme="{{ scheme }}">{{ id }}</dc:identifier>
+ <dc:date>{{ date }}</dc:date>
+ {%- if cover %}
+ <meta name="cover" content="{{ cover }}"/>
+ {%- endif %}
+ </metadata>
+ <manifest>
+ <item id="ncx" href="toc.ncx" media-type="application/x-dtbncx+xml" />
+ {%- for item in manifest_items %}
+ <item id="{{ item.id }}" href="{{ item.href }}" media-type="{{ item.media_type }}" />
+ {%- endfor %}
+ </manifest>
+ <spine toc="ncx">
+ {%- for spine in spines %}
+ {%- if spine.linear %}
+ <itemref idref="{{ spine.idref }}" />
+ {%- else %}
+ <itemref idref="{{ spine.idref }}" linear="no" />
+ {%- endif %}
+ {%- endfor %}
+ </spine>
+ <guide>
+ {%- for guide in guides %}
+ <reference type="{{ guide.type }}" title="{{ guide.title }}" href="{{ guide.uri }}" />
+ {%- endfor %}
+ </guide>
+</package>
diff --git a/sphinx/templates/epub2/mimetype b/sphinx/templates/epub2/mimetype
new file mode 100644
index 000000000..57ef03f24
--- /dev/null
+++ b/sphinx/templates/epub2/mimetype
@@ -0,0 +1 @@
+application/epub+zip \ No newline at end of file
diff --git a/sphinx/templates/epub2/toc.ncx_t b/sphinx/templates/epub2/toc.ncx_t
new file mode 100644
index 000000000..9bb701908
--- /dev/null
+++ b/sphinx/templates/epub2/toc.ncx_t
@@ -0,0 +1,15 @@
+<?xml version="1.0"?>
+<ncx version="2005-1" xmlns="http://www.daisy.org/z3986/2005/ncx/">
+ <head>
+ <meta name="dtb:uid" content="{{ uid }}"/>
+ <meta name="dtb:depth" content="{{ level }}"/>
+ <meta name="dtb:totalPageCount" content="0"/>
+ <meta name="dtb:maxPageNumber" content="0"/>
+ </head>
+ <docTitle>
+ <text>{{ title }}</text>
+ </docTitle>
+ <navMap>
+{{ navpoints }}
+ </navMap>
+</ncx>
diff --git a/sphinx/templates/epub3/container.xml b/sphinx/templates/epub3/container.xml
new file mode 100644
index 000000000..326cf15fa
--- /dev/null
+++ b/sphinx/templates/epub3/container.xml
@@ -0,0 +1,6 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<container version="1.0" xmlns="urn:oasis:names:tc:opendocument:xmlns:container">
+ <rootfiles>
+ <rootfile full-path="content.opf" media-type="application/oebps-package+xml"/>
+ </rootfiles>
+</container>
diff --git a/sphinx/templates/epub3/content.opf_t b/sphinx/templates/epub3/content.opf_t
new file mode 100644
index 000000000..faabc86df
--- /dev/null
+++ b/sphinx/templates/epub3/content.opf_t
@@ -0,0 +1,46 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<package xmlns="http://www.idpf.org/2007/opf" version="3.0" xml:lang="{{ lang }}"
+ unique-identifier="{{ uid }}"
+ prefix="ibooks: http://vocabulary.itunes.apple.com/rdf/ibooks/vocabulary-extensions-1.0/">
+ <metadata xmlns:opf="http://www.idpf.org/2007/opf"
+ xmlns:dc="http://purl.org/dc/elements/1.1/">
+ <dc:language>{{ lang }}</dc:language>
+ <dc:title>{{ title }}</dc:title>
+ <dc:description>{{ description }}</dc:description>
+ <dc:creator>{{ author }}</dc:creator>
+ <dc:contributor>{{ contributor }}</dc:contributor>
+ <dc:publisher>{{ publisher }}</dc:publisher>
+ <dc:rights>{{ copyright }}</dc:rights>
+ <dc:identifier id="{{ uid }}">{{ id }}</dc:identifier>
+ <dc:date>{{ date }}</dc:date>
+ <meta property="dcterms:modified">{{ date }}</meta>
+ <meta property="ibooks:version">{{ version }}</meta>
+ <meta property="ibooks:specified-fonts">true</meta>
+ <meta property="ibooks:binding">true</meta>
+ <meta property="ibooks:scroll-axis">{{ ibook_scroll_axis }}</meta>
+ {%- if cover %}
+ <meta name="cover" content="{{ cover }}"/>
+ {%- endif %}
+ </metadata>
+ <manifest>
+ <item id="ncx" href="toc.ncx" media-type="application/x-dtbncx+xml" />
+ <item id="nav" href="nav.xhtml" media-type="application/xhtml+xml" properties="nav"/>
+ {%- for item in manifest_items %}
+ <item id="{{ item.id }}" href="{{ item.href }}" media-type="{{ item.media_type }}" />
+ {%- endfor %}
+ </manifest>
+ <spine toc="ncx" page-progression-direction="{{ page_progression_direction }}">
+ {%- for spine in spines %}
+ {%- if spine.linear %}
+ <itemref idref="{{ spine.idref }}" />
+ {%- else %}
+ <itemref idref="{{ spine.idref }}" linear="no" />
+ {%- endif %}
+ {%- endfor %}
+ </spine>
+ <guide>
+ {%- for guide in guides %}
+ <reference type="{{ guide.type }}" title="{{ guide.title }}" href="{{ guide.uri }}" />
+ {%- endfor %}
+ </guide>
+</package>
diff --git a/sphinx/templates/epub3/mimetype b/sphinx/templates/epub3/mimetype
new file mode 100644
index 000000000..57ef03f24
--- /dev/null
+++ b/sphinx/templates/epub3/mimetype
@@ -0,0 +1 @@
+application/epub+zip \ No newline at end of file
diff --git a/sphinx/templates/epub3/nav.xhtml_t b/sphinx/templates/epub3/nav.xhtml_t
new file mode 100644
index 000000000..2a32c2039
--- /dev/null
+++ b/sphinx/templates/epub3/nav.xhtml_t
@@ -0,0 +1,26 @@
+{%- macro toctree(navlist) -%}
+<ol>
+{%- for nav in navlist %}
+ <li>
+ <a href="{{ nav.refuri }}">{{ nav.text }}</a>
+ {%- if nav.children %}
+{{ toctree(nav.children)|indent(4, true) }}
+ {%- endif %}
+ </li>
+{%- endfor %}
+</ol>
+{%- endmacro -%}
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE html>
+<html xmlns="http://www.w3.org/1999/xhtml"
+ xmlns:epub="http://www.idpf.org/2007/ops" lang="{{ lang }}" xml:lang="{{ lang }}">
+ <head>
+ <title>{{ toc_locale }}</title>
+ </head>
+ <body>
+ <nav epub:type="toc">
+ <h1>{{ toc_locale }}</h1>
+{{ toctree(navlist)|indent(6, true) }}
+ </nav>
+ </body>
+</html>
diff --git a/sphinx/templates/epub3/toc.ncx_t b/sphinx/templates/epub3/toc.ncx_t
new file mode 100644
index 000000000..0ea7ca366
--- /dev/null
+++ b/sphinx/templates/epub3/toc.ncx_t
@@ -0,0 +1,24 @@
+{%- macro navPoints(navlist) %}
+{%- for nav in navlist %}
+<navPoint id="{{ nav.navpoint }}" playOrder="{{ nav.playorder }}">
+ <navLabel>
+ <text>{{ nav.text }}</text>
+ </navLabel>
+ <content src="{{ nav.refuri }}" />{{ navPoints(nav.children)|indent(2, true) }}
+</navPoint>
+{%- endfor %}
+{%- endmacro -%}
+<?xml version="1.0"?>
+<ncx version="2005-1" xmlns="http://www.daisy.org/z3986/2005/ncx/">
+ <head>
+ <meta name="dtb:uid" content="{{ uid }}"/>
+ <meta name="dtb:depth" content="{{ level }}"/>
+ <meta name="dtb:totalPageCount" content="0"/>
+ <meta name="dtb:maxPageNumber" content="0"/>
+ </head>
+ <docTitle>
+ <text>{{ title }}</text>
+ </docTitle>
+ <navMap>{{ navPoints(navpoints)|indent(4, true) }}
+ </navMap>
+</ncx>
diff --git a/sphinx/templates/latex/content.tex_t b/sphinx/templates/latex/latex.tex_t
index b3a77ca42..55f60437e 100644
--- a/sphinx/templates/latex/content.tex_t
+++ b/sphinx/templates/latex/latex.tex_t
@@ -22,12 +22,19 @@
<%= multilingual %>
<%= fontpkg %>
<%= fncychap %>
-<%= longtable %>
\usepackage<%= sphinxpkgoptions %>{sphinx}
<%= sphinxsetup %>
<%= geometry %>
\usepackage{multirow}
+\let\originalmutirow\multirow\protected\def\multirow{%
+ \sphinxdeprecationwarning{\multirow}{1.6}{1.7}
+ {Sphinx does not use package multirow. Its loading will be removed at 1.7.}%
+ \originalmultirow}%
\usepackage{eqparbox}
+\let\originaleqparbox\eqparbox\protected\def\eqparbox{%
+ \sphinxdeprecationwarning{\eqparbox}{1.6}{1.7}
+ {Sphinx does not use package eqparbox. Its loading will be removed at 1.7.}%
+ \originaleqparbox}%
<%= usepackages %>
<%= hyperref %>
<%= contentsname %>
diff --git a/sphinx/templates/latex/longtable.tex_t b/sphinx/templates/latex/longtable.tex_t
new file mode 100644
index 000000000..b7310a780
--- /dev/null
+++ b/sphinx/templates/latex/longtable.tex_t
@@ -0,0 +1,32 @@
+\begin{savenotes}\sphinxatlongtablestart\begin{longtable}
+<%- if table.align == 'center' -%>
+ [c]
+<%- elif table.align == 'left' -%>
+ [l]
+<%- elif table.align == 'right' -%>
+ [r]
+<%- endif -%>
+<%= table.get_colspec() %>
+<%- if table.caption -%>
+\caption{<%= ''.join(table.caption) %>\strut}<%= labels %>\\*[\sphinxlongtablecapskipadjust]
+<% endif -%>
+\hline
+<%= ''.join(table.header) %>
+\endfirsthead
+
+\multicolumn{<%= table.colcount %>}{c}%
+{\makebox[0pt]{\sphinxtablecontinued{\tablename\ \thetable{} -- <%= _('continued from previous page') %>}}}\\
+\hline
+<%= ''.join(table.header) %>
+\endhead
+
+\hline
+\multicolumn{<%= table.colcount %>}{r}{\makebox[0pt][r]{\sphinxtablecontinued{<%= _('Continued on next page') %>}}}\\
+\endfoot
+
+\endlastfoot
+<% if table.caption_footnotetexts -%>
+<%= ''.join(table.caption_footnotetexts) %>
+<% endif -%>
+<%= ''.join(table.body) %>
+\end{longtable}\sphinxatlongtableend\end{savenotes}
diff --git a/sphinx/templates/latex/tabular.tex_t b/sphinx/templates/latex/tabular.tex_t
new file mode 100644
index 000000000..62af2ffa8
--- /dev/null
+++ b/sphinx/templates/latex/tabular.tex_t
@@ -0,0 +1,29 @@
+\begin{savenotes}\sphinxattablestart
+<% if table.align -%>
+ <%- if table.align == 'center' -%>
+ \centering
+ <%- elif table.align == 'left' -%>
+ \raggedright
+ <%- else -%>
+ \raggedleft
+ <%- endif %>
+<%- else -%>
+ \centering
+<%- endif %>
+<% if table.caption -%>
+\begin{threeparttable}
+\capstart\caption{<%= ''.join(table.caption) %>}<%= labels %>
+<% endif -%>
+\begin{tabular}[t]<%= table.get_colspec() -%>
+\hline
+<%= ''.join(table.header) %>
+<%- if table.caption_footnotetexts -%>
+<%= ''.join(table.caption_footnotetexts) -%>
+<%- endif -%>
+<%=- ''.join(table.body) %>
+\end{tabular}
+<%- if table.caption %>
+\end{threeparttable}
+<%- endif %>
+\par
+\sphinxattableend\end{savenotes}
diff --git a/sphinx/templates/latex/tabulary.tex_t b/sphinx/templates/latex/tabulary.tex_t
new file mode 100644
index 000000000..c51d53396
--- /dev/null
+++ b/sphinx/templates/latex/tabulary.tex_t
@@ -0,0 +1,29 @@
+\begin{savenotes}\sphinxattablestart
+<% if table.align -%>
+ <%- if table.align == 'center' -%>
+ \centering
+ <%- elif table.align == 'left' -%>
+ \raggedright
+ <%- else -%>
+ \raggedleft
+ <%- endif %>
+<%- else -%>
+ \centering
+<%- endif %>
+<% if table.caption -%>
+\begin{threeparttable}
+\capstart\caption{<%= ''.join(table.caption) %>}<%= labels %>
+<% endif -%>
+\begin{tabulary}{\linewidth}[t]<%= table.get_colspec() -%>
+\hline
+<%= ''.join(table.header) %>
+<%- if table.caption_footnotetexts -%>
+<%= ''.join(table.caption_footnotetexts) -%>
+<%- endif -%>
+<%=- ''.join(table.body) %>
+\end{tabulary}
+<%- if table.caption %>
+\end{threeparttable}
+<%- endif %>
+\par
+\sphinxattableend\end{savenotes}
diff --git a/sphinx/templates/quickstart/Makefile.new_t b/sphinx/templates/quickstart/Makefile.new_t
index c7cd62dda..bba767a4c 100644
--- a/sphinx/templates/quickstart/Makefile.new_t
+++ b/sphinx/templates/quickstart/Makefile.new_t
@@ -3,7 +3,7 @@
# You can set these variables from the command line.
SPHINXOPTS =
-SPHINXBUILD = sphinx-build
+SPHINXBUILD = python -msphinx
SPHINXPROJ = {{ project_fn }}
SOURCEDIR = {{ rsrcdir }}
BUILDDIR = {{ rbuilddir }}
diff --git a/sphinx/templates/quickstart/Makefile_t b/sphinx/templates/quickstart/Makefile_t
index 5505f23f5..fdcf05691 100644
--- a/sphinx/templates/quickstart/Makefile_t
+++ b/sphinx/templates/quickstart/Makefile_t
@@ -3,7 +3,7 @@
# You can set these variables from the command line.
SPHINXOPTS =
-SPHINXBUILD = sphinx-build
+SPHINXBUILD = python -msphinx
PAPER =
BUILDDIR = {{ rbuilddir }}
diff --git a/sphinx/templates/quickstart/make.bat.new_t b/sphinx/templates/quickstart/make.bat.new_t
index e49ffbe78..a52951ebb 100644
--- a/sphinx/templates/quickstart/make.bat.new_t
+++ b/sphinx/templates/quickstart/make.bat.new_t
@@ -5,7 +5,7 @@ pushd %~dp0
REM Command file for Sphinx documentation
if "%SPHINXBUILD%" == "" (
- set SPHINXBUILD=sphinx-build
+ set SPHINXBUILD=python -msphinx
)
set SOURCEDIR={{ rsrcdir }}
set BUILDDIR={{ rbuilddir }}
@@ -16,10 +16,10 @@ if "%1" == "" goto help
%SPHINXBUILD% >NUL 2>NUL
if errorlevel 9009 (
echo.
- echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
- echo.installed, then set the SPHINXBUILD environment variable to point
- echo.to the full path of the 'sphinx-build' executable. Alternatively you
- echo.may add the Sphinx directory to PATH.
+ echo.The Sphinx module was not found. Make sure you have Sphinx installed,
+ echo.then set the SPHINXBUILD environment variable to point to the full
+ echo.path of the 'sphinx-build' executable. Alternatively you may add the
+ echo.Sphinx directory to PATH.
echo.
echo.If you don't have Sphinx installed, grab it from
echo.http://sphinx-doc.org/
diff --git a/sphinx/templates/quickstart/make.bat_t b/sphinx/templates/quickstart/make.bat_t
index 8f993a7b1..03ae9d423 100644
--- a/sphinx/templates/quickstart/make.bat_t
+++ b/sphinx/templates/quickstart/make.bat_t
@@ -5,14 +5,14 @@ REM Command file for Sphinx documentation
pushd %~dp0
if "%SPHINXBUILD%" == "" (
- set SPHINXBUILD=sphinx-build
+ set SPHINXBUILD=python -msphinx
)
set BUILDDIR={{ rbuilddir }}
set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% {{ rsrcdir }}
set I18NSPHINXOPTS=%SPHINXOPTS% {{ rsrcdir }}
if NOT "%PAPER%" == "" (
- set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS%
- set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS%
+ set ALLSPHINXOPTS=-D latex_elements.papersize=%PAPER% %ALLSPHINXOPTS%
+ set I18NSPHINXOPTS=-D latex_elements.papersize=%PAPER% %I18NSPHINXOPTS%
)
if "%1" == "" goto help
@@ -52,29 +52,20 @@ if "%1" == "clean" (
)
-REM Check if sphinx-build is available and fallback to Python version if any
+REM Check if sphinx-build is available
%SPHINXBUILD% 1>NUL 2>NUL
-if errorlevel 9009 goto sphinx_python
-goto sphinx_ok
-
-:sphinx_python
-
-set SPHINXBUILD=python -m sphinx.__init__
-%SPHINXBUILD% 2> nul
-if errorlevel 9009 (
+if errorlevel 1 (
echo.
- echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
- echo.installed, then set the SPHINXBUILD environment variable to point
- echo.to the full path of the 'sphinx-build' executable. Alternatively you
- echo.may add the Sphinx directory to PATH.
+ echo.The Sphinx module was not found. Make sure you have Sphinx installed,
+ echo.then set the SPHINXBUILD environment variable to point to the full
+ echo.path of the 'sphinx-build' executable. Alternatively you may add the
+ echo.Sphinx directory to PATH.
echo.
echo.If you don't have Sphinx installed, grab it from
echo.http://sphinx-doc.org/
exit /b 1
)
-:sphinx_ok
-
if "%1" == "html" (
%SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html
diff --git a/sphinx/texinputs/Makefile_t b/sphinx/texinputs/Makefile_t
index ffec3662c..de6bcc085 100644
--- a/sphinx/texinputs/Makefile_t
+++ b/sphinx/texinputs/Makefile_t
@@ -2,8 +2,15 @@
ALLDOCS = $(basename $(wildcard *.tex))
ALLPDF = $(addsuffix .pdf,$(ALLDOCS))
+{% if latex_engine == 'xelatex' -%}
+ALLDVI =
+ALLXDV = $(addsuffix .xdv,$(ALLDOCS))
+{% else -%}
ALLDVI = $(addsuffix .dvi,$(ALLDOCS))
+ALLXDV =
+{% endif -%}
ALLPS = $(addsuffix .ps,$(ALLDOCS))
+ALLIMGS = $(wildcard *.png *.gif *.jpg *.jpeg)
# Prefix for archive names
ARCHIVEPRREFIX =
@@ -12,29 +19,52 @@ LATEXOPTS =
# format: pdf or dvi
FMT = pdf
-LATEX = latex
-PDFLATEX = {{ latex_engine }}
-MAKEINDEX = makeindex
+{% if latex_engine == 'platex' -%}
+# latexmkrc is read then overridden by latexmkjarc
+LATEX = latexmk -r latexmkjarc -dvi
+PDFLATEX = latexmk -r latexmkjarc -pdfdvi -dvi- -ps-
+{% elif latex_engine == 'pdflatex' -%}
+LATEX = latexmk -dvi
+PDFLATEX = latexmk -pdf -dvi- -ps-
+{% elif latex_engine == 'lualatex' -%}
+LATEX = latexmk -lualatex
+PDFLATEX = latexmk -pdflua -dvi- -ps-
+{% elif latex_engine == 'xelatex' -%}
+LATEX = latexmk -pdfxe -dvi- -ps-
+PDFLATEX = $(LATEX)
+{% endif %}
+
+%.png %.gif %.jpg %.jpeg: FORCE_MAKE
+ extractbb '$@'
+
+{% if latex_engine == 'platex' -%}
+%.dvi: %.tex $(ALLIMGS) FORCE_MAKE
+ for f in *.pdf; do extractbb "$$f"; done
+ $(LATEX) $(LATEXOPTS) '$<'
+
+{% elif latex_engine != 'xelatex' -%}
+%.dvi: %.tex FORCE_MAKE
+ $(LATEX) $(LATEXOPTS) '$<'
-{% if latex_engine == 'platex' %}
-all: all-pdf-ja
-all-pdf: all-pdf-ja
-{% else %}
-all: $(ALLPDF)
-all-pdf: $(ALLPDF)
{% endif -%}
+%.ps: %.dvi
+ dvips '$<'
+
+{% if latex_engine == 'platex' -%}
+%.pdf: %.tex $(ALLIMGS) FORCE_MAKE
+ for f in *.pdf; do extractbb "$$f"; done
+{%- else -%}
+%.pdf: %.tex FORCE_MAKE
+{%- endif %}
+ $(PDFLATEX) $(LATEXOPTS) '$<'
+
all-dvi: $(ALLDVI)
+
all-ps: $(ALLPS)
-all-pdf-ja:
- for f in *.pdf *.png *.gif *.jpg *.jpeg; do extractbb $$f; done
- for f in *.tex; do platex -kanji=utf8 $(LATEXOPTS) $$f; done
- for f in *.tex; do platex -kanji=utf8 $(LATEXOPTS) $$f; done
- for f in *.tex; do platex -kanji=utf8 $(LATEXOPTS) $$f; done
- -for f in *.idx; do mendex -U -f -d "`basename $$f .idx`.dic" -s python.ist $$f; done
- for f in *.tex; do platex -kanji=utf8 $(LATEXOPTS) $$f; done
- for f in *.tex; do platex -kanji=utf8 $(LATEXOPTS) $$f; done
- for f in *.dvi; do dvipdfmx $$f; done
+all-pdf: $(ALLPDF)
+
+all: $(ALLPDF)
zip: all-$(FMT)
mkdir $(ARCHIVEPREFIX)docs-$(FMT)
@@ -57,30 +87,8 @@ bz2: tar
xz: tar
xz -9 -k $(ARCHIVEPREFIX)docs-$(FMT).tar
-# The number of LaTeX runs is quite conservative, but I don't expect it
-# to get run often, so the little extra time won't hurt.
-%.dvi: %.tex
- $(LATEX) $(LATEXOPTS) '$<'
- $(LATEX) $(LATEXOPTS) '$<'
- $(LATEX) $(LATEXOPTS) '$<'
- -$(MAKEINDEX) -s python.ist '$(basename $<).idx'
- $(LATEX) $(LATEXOPTS) '$<'
- $(LATEX) $(LATEXOPTS) '$<'
-
-%.pdf: %.tex
- $(PDFLATEX) $(LATEXOPTS) '$<'
- $(PDFLATEX) $(LATEXOPTS) '$<'
- $(PDFLATEX) $(LATEXOPTS) '$<'
- -$(MAKEINDEX) -s python.ist '$(basename $<).idx'
- $(PDFLATEX) $(LATEXOPTS) '$<'
- $(PDFLATEX) $(LATEXOPTS) '$<'
-
-%.ps: %.dvi
- dvips '$<'
-
clean:
- rm -f *.log *.ind *.aux *.toc *.syn *.idx *.out *.ilg *.pla *.ps *.tar *.tar.gz *.tar.bz2 *.tar.xz $(ALLPDF) $(ALLDVI)
+ rm -f *.log *.ind *.aux *.toc *.syn *.idx *.out *.ilg *.pla *.ps *.tar *.tar.gz *.tar.bz2 *.tar.xz $(ALLPDF) $(ALLDVI) $(ALLXDV) *.fls *.fdb_latexmk
.PHONY: all all-pdf all-dvi all-ps clean zip tar gz bz2 xz
-.PHONY: all-pdf-ja
-
+.PHONY: FORCE_MAKE
diff --git a/sphinx/texinputs/footnotehyper-sphinx.sty b/sphinx/texinputs/footnotehyper-sphinx.sty
index 763ce7323..ff23f6ebe 100644
--- a/sphinx/texinputs/footnotehyper-sphinx.sty
+++ b/sphinx/texinputs/footnotehyper-sphinx.sty
@@ -1,158 +1,268 @@
\NeedsTeXFormat{LaTeX2e}
\ProvidesPackage{footnotehyper-sphinx}%
- [2017/01/16 v1.5.2 hyperref aware footnote.sty for sphinx (JFB)]
+ [2017/03/07 v1.6 hyperref aware footnote.sty for sphinx (JFB)]
%%
%% Package: footnotehyper-sphinx
-%% Version: based on footnotehyper.sty v0.9f (2016/10/03)
+%% Version: based on footnotehyper.sty 2017/03/07 v1.0
%% as available at http://www.ctan.org/pkg/footnotehyper
%% License: the one applying to Sphinx
%%
-%% Differences from footnotehyper v0.9f (2016/10/03):
-%% 1. hyperref is assumed in use (with default hyperfootnotes=true),
-%% 2. no need to check if footnote.sty was loaded,
-%% 3. a special tabulary compatibility layer added, (partial but enough for
-%% Sphinx),
-%% 4. \sphinxfootnotemark, and use of \spx@opt@BeforeFootnote from sphinx.sty.
-%% Note: with \footnotemark[N]/\footnotetext[N] syntax, hyperref
-%% does not insert an hyperlink. This is _not_ improved here.
-%% 5. use of \sphinxunactivateextrasandspace for parsed literals
+%% Refer to the PDF documentation at http://www.ctan.org/pkg/footnotehyper for
+%% the code comments.
%%
-\DeclareOption*{\PackageWarning{footnotehyper}{Option `\CurrentOption' is unknown}}%
+%% Differences:
+%% 1. a partial tabulary compatibility layer added (enough for Sphinx mark-up),
+%% 2. use of \spx@opt@BeforeFootnote from sphinx.sty,
+%% 3. use of \sphinxunactivateextrasandspace from sphinx.sty,
+%% 4. macro definition \sphinxfootnotemark,
+%% 5. macro definition \sphinxlongtablepatch
+\DeclareOption*{\PackageWarning{footnotehyper-sphinx}{Option `\CurrentOption' is unknown}}%
\ProcessOptions\relax
-\let\FNH@@makefntext\@makefntext\let\@makefntext\@firstofone
-\RequirePackage{footnote}
-\let\fnparbox\parbox\let\parbox\fn@parbox\let\@makefntext\FNH@@makefntext
-\let\FNH@fn@footnote \footnote % buggy footnote.sty's \footnote
-\let\FNH@fn@footnotetext\footnotetext % will be redefined later
-\let\footnote \fn@latex@@footnote % meaning of \footnote before footnote.sty
-\let\footnotetext\fn@latex@@footnotetext
-\def\fn@endnote {\color@endgroup}%
+\newbox\FNH@notes
+\newdimen\FNH@width
+\let\FNH@colwidth\columnwidth
+\newif\ifFNH@savingnotes
\AtBeginDocument {%
- \let\fn@latex@@footnote \footnote % meaning of \footnote at end of preamble
- \let\fn@latex@@footnotetext\footnotetext
- \let\fn@fntext \FNH@hyper@fntext
- \let\spewnotes \FNH@hyper@spewnotes
- \let\endsavenotes\spewnotes
- \let\fn@endfntext\FNH@fixed@endfntext
- \let\footnote \FNH@fixed@footnote
- \let\footnotetext\FNH@fixed@footnotetext
- \let\endfootnote\fn@endfntext
- \let\endfootnotetext\endfootnote
-}%
-\def\FNH@hyper@fntext {%
-%% amsmath compatibility
- \ifx\ifmeasuring@\undefined\expandafter\@secondoftwo
- \else\expandafter\@firstofone\fi
+ \let\FNH@latex@footnote \footnote
+ \let\FNH@latex@footnotetext\footnotetext
+ \let\FNH@H@@footnotetext \@footnotetext
+ \newenvironment{savenotes}
+ {\FNH@savenotes\ignorespaces}{\FNH@spewnotes\ignorespacesafterend}%
+ \let\spewnotes \FNH@spewnotes
+ \let\footnote \FNH@footnote
+ \let\footnotetext \FNH@footnotetext
+ \let\endfootnote \FNH@endfntext
+ \let\endfootnotetext\FNH@endfntext
+ \@ifpackageloaded{hyperref}
+ {\ifHy@hyperfootnotes
+ \let\FNH@H@@footnotetext\H@@footnotetext
+ \else
+ \let\FNH@hyper@fntext\FNH@nohyp@fntext
+ \fi}%
+ {\let\FNH@hyper@fntext\FNH@nohyp@fntext}%
+}%
+\def\FNH@hyper@fntext{\FNH@fntext\FNH@hyper@fntext@i}%
+\def\FNH@nohyp@fntext{\FNH@fntext\FNH@nohyp@fntext@i}%
+\def\FNH@fntext #1{%
+ \ifx\ifmeasuring@\@undefined
+ \expandafter\@secondoftwo\else\expandafter\@firstofone\fi
+% these two lines modified for Sphinx (tabulary compatibility):
{\ifmeasuring@\expandafter\@gobbletwo\else\expandafter\@firstofone\fi}%
-%% partial tabulary compatibility, [N] must be used, but Sphinx does it
- {\ifx\equation$\expandafter\@gobbletwo\fi\FNH@hyper@fntext@i }%$
-}%
-\long\def\FNH@hyper@fntext@i #1{\global\setbox\fn@notes\vbox
- {\unvbox\fn@notes
- \fn@startnote
- \@makefntext
- {\rule\z@\footnotesep\ignorespaces
- \ifHy@nesting\expandafter\ltx@firstoftwo
- \else\expandafter\ltx@secondoftwo
- \fi
- {\expandafter\hyper@@anchor\expandafter{\Hy@footnote@currentHref}{#1}}%
- {\Hy@raisedlink
- {\expandafter\hyper@@anchor\expandafter{\Hy@footnote@currentHref}%
- {\relax}}%
- \let\@currentHref\Hy@footnote@currentHref
- \let\@currentlabelname\@empty
- #1}%
- \@finalstrut\strutbox }%
- \fn@endnote }%
-}%
-\def\FNH@hyper@spewnotes {\endgroup
- \if@savingnotes\else\ifvoid\fn@notes\else
- \begingroup\let\@makefntext\@empty
- \let\@finalstrut\@gobble
- \let\rule\@gobbletwo
- \H@@footnotetext{\unvbox\fn@notes}%
- \endgroup\fi\fi
-}%
-\def\FNH@fixed@endfntext {%
- \@finalstrut\strutbox
- \fn@postfntext
- \fn@endnote
- \egroup\FNH@endfntext@next % will decide if link or no link
+ {\ifx\equation$\expandafter\@gobbletwo\fi #1}%$
}%
-\def\FNH@endfntext@link {\begingroup
- \let\@makefntext\@empty\let\@finalstrut\@gobble\let\rule\@gobbletwo
- \@footnotetext {\unvbox\z@}%
+\long\def\FNH@hyper@fntext@i#1{%
+ \global\setbox\FNH@notes\vbox
+ {\unvbox\FNH@notes
+ \FNH@startnote
+ \@makefntext
+ {\rule\z@\footnotesep\ignorespaces
+ \ifHy@nesting\expandafter\ltx@firstoftwo
+ \else\expandafter\ltx@secondoftwo
+ \fi
+ {\expandafter\hyper@@anchor\expandafter{\Hy@footnote@currentHref}{#1}}%
+ {\Hy@raisedlink
+ {\expandafter\hyper@@anchor\expandafter{\Hy@footnote@currentHref}%
+ {\relax}}%
+ \let\@currentHref\Hy@footnote@currentHref
+ \let\@currentlabelname\@empty
+ #1}%
+ \@finalstrut\strutbox
+ }%
+ \FNH@endnote
+ }%
+}%
+\long\def\FNH@nohyp@fntext@i#1{%
+ \global\setbox\FNH@notes\vbox
+ {\unvbox\FNH@notes
+ \FNH@startnote
+ \@makefntext{\rule\z@\footnotesep\ignorespaces#1\@finalstrut\strutbox}%
+ \FNH@endnote
+ }%
+}%
+\def\FNH@startnote{%
+ \hsize\FNH@colwidth
+ \interlinepenalty\interfootnotelinepenalty
+ \reset@font\footnotesize
+ \floatingpenalty\@MM
+ \@parboxrestore
+ \protected@edef\@currentlabel{\csname p@\@mpfn\endcsname\@thefnmark}%
+ \color@begingroup
+}%
+\def\FNH@endnote{\color@endgroup}%
+\def\FNH@savenotes{%
+ \begingroup
+ \ifFNH@savingnotes\else
+ \FNH@savingnotestrue
+ \let\@footnotetext \FNH@hyper@fntext
+ \let\@mpfootnotetext \FNH@hyper@fntext
+ \let\H@@mpfootnotetext\FNH@nohyp@fntext
+ \FNH@width\columnwidth
+ \let\FNH@colwidth\FNH@width
+ \global\setbox\FNH@notes\box\voidb@x
+ \let\FNH@thempfn\thempfn
+ \let\FNH@mpfn\@mpfn
+ \ifx\@minipagerestore\relax\let\@minipagerestore\@empty\fi
+ \expandafter\def\expandafter\@minipagerestore\expandafter{%
+ \@minipagerestore
+ \let\thempfn\FNH@thempfn
+ \let\@mpfn\FNH@mpfn
+ }%
+ \fi
+}%
+\def\FNH@spewnotes {%
+ \endgroup
+ \ifFNH@savingnotes\else
+ \ifvoid\FNH@notes\else
+ \begingroup
+ \let\@makefntext\@empty
+ \let\@finalstrut\@gobble
+ \let\rule\@gobbletwo
+ \FNH@H@@footnotetext{\unvbox\FNH@notes}%
\endgroup
+ \fi
+ \fi
+}%
+\def\FNH@footnote@envname {footnote}%
+\def\FNH@footnotetext@envname{footnotetext}%
+\def\FNH@footnote{%
+% this line added for Sphinx:
+ \spx@opt@BeforeFootnote
+ \ifx\@currenvir\FNH@footnote@envname
+ \expandafter\FNH@footnoteenv
+ \else
+ \expandafter\FNH@latex@footnote
+ \fi
}%
-\def\FNH@endfntext@nolink {\begingroup
- \let\@makefntext\@empty\let\@finalstrut\@gobble
- \let\rule\@gobbletwo
- \if@savingnotes\expandafter\fn@fntext\else\expandafter\H@@footnotetext\fi
- {\unvbox\z@}\endgroup
-}%
-%% \spx@opt@BeforeFootnote is defined in sphinx.sty
-\def\FNH@fixed@footnote {\spx@opt@BeforeFootnote\ifx\@currenvir\fn@footnote
- \expandafter\FNH@footnoteenv\else\expandafter\fn@latex@@footnote\fi }%
-\def\FNH@footnoteenv {\catcode13=5\sphinxunactivateextrasandspace
- \@ifnextchar[\FNH@xfootnoteenv%]
- {\stepcounter\@mpfn
- \protected@xdef\@thefnmark{\thempfn}\@footnotemark
- \def\FNH@endfntext@next{\FNH@endfntext@link}\fn@startfntext}}%
-\def\FNH@xfootnoteenv [#1]{%
+\def\FNH@footnoteenv{%
+% this line added for Sphinx (footnotes in parsed literal blocks):
+ \catcode13=5 \sphinxunactivateextrasandspace
+ \@ifnextchar[%
+ \FNH@footnoteenv@i %]
+ {\stepcounter\@mpfn
+ \protected@xdef\@thefnmark{\thempfn}%
+ \@footnotemark
+ \def\FNH@endfntext@fntext{\@footnotetext}%
+ \FNH@startfntext}%
+}%
+\def\FNH@footnoteenv@i[#1]{%
\begingroup
\csname c@\@mpfn\endcsname #1\relax
\unrestored@protected@xdef\@thefnmark{\thempfn}%
- \endgroup\@footnotemark\def\FNH@endfntext@next{\FNH@endfntext@link}%
- \fn@startfntext}%
-\def\FNH@fixed@footnotetext {\ifx\@currenvir\fn@footnotetext
- \expandafter\FNH@footnotetextenv\else\expandafter\fn@latex@@footnotetext\fi}%
-\def\FNH@footnotetextenv {\@ifnextchar[\FNH@xfootnotetextenv%]
- {\protected@xdef\@thefnmark{\thempfn}%
- \def\FNH@endfntext@next{\FNH@endfntext@link}\fn@startfntext}}%
-\def\FNH@xfootnotetextenv [#1]{%
+ \endgroup
+ \@footnotemark
+ \def\FNH@endfntext@fntext{\@footnotetext}%
+ \FNH@startfntext
+}%
+\def\FNH@footnotetext{%
+ \ifx\@currenvir\FNH@footnotetext@envname
+ \expandafter\FNH@footnotetextenv
+ \else
+ \expandafter\FNH@latex@footnotetext
+ \fi
+}%
+\def\FNH@footnotetextenv{%
+ \@ifnextchar[%
+ \FNH@footnotetextenv@i %]
+ {\protected@xdef\@thefnmark{\thempfn}%
+ \def\FNH@endfntext@fntext{\@footnotetext}%
+ \FNH@startfntext}%
+}%
+\def\FNH@footnotetextenv@i[#1]{%
\begingroup
\csname c@\@mpfn\endcsname #1\relax
\unrestored@protected@xdef\@thefnmark{\thempfn}%
- \endgroup\def\FNH@endfntext@next{\FNH@endfntext@nolink}%
- \fn@startfntext }%
-% Now some checks in case some package has modified \@makefntext.
-\AtBeginDocument
-{% compatibility with French module of LaTeX babel
- \ifx\@makefntextFB\undefined
- \expandafter\@gobble\else\expandafter\@firstofone\fi
- {\ifFBFrenchFootnotes \let\FNH@@makefntext\@makefntextFB \else
- \let\FNH@@makefntext\@makefntextORI\fi}%
- \expandafter\FNH@check@a\FNH@@makefntext{1.2!3?4,}\FNH@@@1.2!3?4,\FNH@@@\relax
-}%
-\long\def\FNH@check@a #11.2!3?4,#2\FNH@@@#3%
-{%
- \ifx\relax#3\expandafter\@firstoftwo\else\expandafter\@secondoftwo\fi
- \FNH@bad@footnote@env
- {\def\fn@prefntext{#1}\def\fn@postfntext{#2}\FNH@check@b}%
-}%
-\def\FNH@check@b #1\relax
-{%
+ \endgroup
+ \ifFNH@savingnotes
+ \def\FNH@endfntext@fntext{\FNH@nohyp@fntext}%
+ \else
+ \def\FNH@endfntext@fntext{\FNH@H@@footnotetext}%
+ \fi
+ \FNH@startfntext
+}%
+\def\FNH@startfntext{%
+ \setbox\z@\vbox\bgroup
+ \FNH@startnote
+ \FNH@prefntext
+ \rule\z@\footnotesep\ignorespaces
+}%
+\def\FNH@endfntext {%
+ \@finalstrut\strutbox
+ \FNH@postfntext
+ \FNH@endnote
+ \egroup
+ \begingroup
+ \let\@makefntext\@empty\let\@finalstrut\@gobble\let\rule\@gobbletwo
+ \FNH@endfntext@fntext {\unvbox\z@}%
+ \endgroup
+}%
+\AtBeginDocument{%
+ \let\FNH@@makefntext\@makefntext
+ \ifx\@makefntextFB\undefined
+ \expandafter\@gobble\else\expandafter\@firstofone\fi
+ {\ifFBFrenchFootnotes \let\FNH@@makefntext\@makefntextFB \else
+ \let\FNH@@makefntext\@makefntextORI\fi}%
+ \expandafter\FNH@check@a\FNH@@makefntext{1.2!3?4,}%
+ \FNH@@@1.2!3?4,\FNH@@@\relax
+}%
+\long\def\FNH@check@a #11.2!3?4,#2\FNH@@@#3{%
+ \ifx\relax#3\expandafter\@firstoftwo\else\expandafter\@secondoftwo\fi
+ \FNH@bad@makefntext@alert
+ {\def\FNH@prefntext{#1}\def\FNH@postfntext{#2}\FNH@check@b}%
+}%
+\def\FNH@check@b #1\relax{%
\expandafter\expandafter\expandafter\FNH@check@c
- \expandafter\meaning\expandafter\fn@prefntext
- \meaning\fn@postfntext1.2!3?4,\FNH@check@c\relax
-}%
-\def\FNH@check@c #11.2!3?4,#2#3\relax
- {\ifx\FNH@check@c#2\expandafter\@gobble\fi\FNH@bad@footnote@env}%
-\def\FNH@bad@footnote@env
-{\PackageWarningNoLine{footnotehyper}%
- {The footnote environment from package footnote^^J
- will be dysfunctional, sorry (not my fault...). You may try to make a bug^^J
- report at https://github.com/sphinx-doc/sphinx including the next lines:}%
+ \expandafter\meaning\expandafter\FNH@prefntext
+ \meaning\FNH@postfntext1.2!3?4,\FNH@check@c\relax
+}%
+\def\FNH@check@c #11.2!3?4,#2#3\relax{%
+ \ifx\FNH@check@c#2\expandafter\@gobble\fi\FNH@bad@makefntext@alert
+}%
+% slight reformulation for Sphinx
+\def\FNH@bad@makefntext@alert{%
+ \PackageWarningNoLine{footnotehyper-sphinx}%
+ {Footnotes will be sub-optimal, sorry. This is due to the document class or^^J
+ some package modifying macro \string\@makefntext.^^J
+ You can try to report this incompatibility at^^J
+ https://github.com/sphinx-doc/sphinx with this info:}%
\typeout{\meaning\@makefntext}%
- \let\fn@prefntext\@empty\let\fn@postfntext\@empty
+ \let\FNH@prefntext\@empty\let\FNH@postfntext\@empty
}%
-%% \sphinxfootnotemark: usable in section titles and silently removed from
-%% TOCs.
+% this macro from original footnote.sty is not used anymore by Sphinx
+% but for simplicity sake let's just keep it as is
+\def\makesavenoteenv{\@ifnextchar[\FNH@msne@ii\FNH@msne@i}%]
+\def\FNH@msne@i #1{%
+ \expandafter\let\csname FNH$#1\expandafter\endcsname %$
+ \csname #1\endcsname
+ \expandafter\let\csname endFNH$#1\expandafter\endcsname %$
+ \csname end#1\endcsname
+ \FNH@msne@ii[#1]{FNH$#1}%$
+}%
+\def\FNH@msne@ii[#1]#2{%
+ \expandafter\edef\csname#1\endcsname{%
+ \noexpand\savenotes
+ \expandafter\noexpand\csname#2\endcsname
+ }%
+ \expandafter\edef\csname end#1\endcsname{%
+ \expandafter\noexpand\csname end#2\endcsname
+ \noexpand\expandafter
+ \noexpand\spewnotes
+ \noexpand\if@endpe\noexpand\@endpetrue\noexpand\fi
+ }%
+}%
+% end of footnotehyper 2017/02/16 v0.99
+% some extras for Sphinx :
+% \sphinxfootnotemark: usable in section titles and silently removed from TOCs.
\def\sphinxfootnotemark [#1]%
- {\ifx\thepage\relax\else \protect\spx@opt@BeforeFootnote
- \protect\footnotemark[#1]\fi}%
-\AtBeginDocument % let hyperref less complain
- {\pdfstringdefDisableCommands{\def\sphinxfootnotemark [#1]{}}}%
+ {\ifx\thepage\relax\else\protect\spx@opt@BeforeFootnote
+ \protect\footnotemark[#1]\fi}%
+\AtBeginDocument{%
+ % let hyperref less complain
+ \pdfstringdefDisableCommands{\def\sphinxfootnotemark [#1]{}}%
+ % to obtain hyperlinked footnotes in longtable environment we must replace
+ % hyperref's patch of longtable's patch of \@footnotetext by our own
+ \let\LT@p@ftntext\FNH@hyper@fntext
+ % this *requires* longtable to be used always wrapped in savenotes environment
+}%
\endinput
%%
%% End of file `footnotehyper-sphinx.sty'.
diff --git a/sphinx/texinputs/latexmkjarc b/sphinx/texinputs/latexmkjarc
new file mode 100644
index 000000000..4a6864e44
--- /dev/null
+++ b/sphinx/texinputs/latexmkjarc
@@ -0,0 +1,7 @@
+$latex = 'platex --halt-on-error --interaction=nonstopmode -kanji=utf8 %O %S';
+$dvipdf = 'dvipdfmx %O -o %D %S';
+$makeindex = 'rm -f %D; mendex -U -f -d %B.dic -s python.ist %S || echo "mendex exited with error code $? (ignoring)" && : >> %D';
+add_cus_dep( "glo", "gls", 0, "makeglo" );
+sub makeglo {
+ return system( "mendex -J -f -s gglo.ist -o '$_[0].gls' '$_[0].glo'" );
+}
diff --git a/sphinx/texinputs/latexmkrc b/sphinx/texinputs/latexmkrc
new file mode 100644
index 000000000..ddb165783
--- /dev/null
+++ b/sphinx/texinputs/latexmkrc
@@ -0,0 +1,9 @@
+$latex = 'latex --halt-on-error --interaction=nonstopmode %O %S';
+$pdflatex = 'pdflatex --halt-on-error --interaction=nonstopmode %O %S';
+$lualatex = 'lualatex --halt-on-error --interaction=nonstopmode %O %S';
+$xelatex = 'xelatex --no-pdf --halt-on-error --interaction=nonstopmode %O %S';
+$makeindex = 'makeindex -s python.ist %O -o %D %S';
+add_cus_dep( "glo", "gls", 0, "makeglo" );
+sub makeglo {
+ return system( "makeindex -s gglo.ist -o '$_[0].gls' '$_[0].glo'" );
+}
diff --git a/sphinx/texinputs/needspace.sty b/sphinx/texinputs/needspace.sty
deleted file mode 100644
index 113d87216..000000000
--- a/sphinx/texinputs/needspace.sty
+++ /dev/null
@@ -1,35 +0,0 @@
-
-\NeedsTeXFormat{LaTeX2e}
-\ProvidesPackage{needspace}[2010/09/12 v1.3d reserve vertical space]
-
-\newcommand{\needspace}[1]{%
- \begingroup
- \setlength{\dimen@}{#1}%
- \vskip\z@\@plus\dimen@
- \penalty -100\vskip\z@\@plus -\dimen@
- \vskip\dimen@
- \penalty 9999%
- \vskip -\dimen@
- \vskip\z@skip % hide the previous |\vskip| from |\addvspace|
- \endgroup
-}
-
-\newcommand{\Needspace}{\@ifstar{\@sneedsp@}{\@needsp@}}
-
-\newcommand{\@sneedsp@}[1]{\par \penalty-100\begingroup
- \setlength{\dimen@}{#1}%
- \dimen@ii\pagegoal \advance\dimen@ii-\pagetotal
- \ifdim \dimen@>\dimen@ii
- \break
- \fi\endgroup}
-
-\newcommand{\@needsp@}[1]{\par \penalty-100\begingroup
- \setlength{\dimen@}{#1}%
- \dimen@ii\pagegoal \advance\dimen@ii-\pagetotal
- \ifdim \dimen@>\dimen@ii
- \ifdim \dimen@ii>\z@
- \vfil
- \fi
- \break
- \fi\endgroup}
-
diff --git a/sphinx/texinputs/sphinx.sty b/sphinx/texinputs/sphinx.sty
index aaf2f548f..cbc465c1e 100644
--- a/sphinx/texinputs/sphinx.sty
+++ b/sphinx/texinputs/sphinx.sty
@@ -6,28 +6,19 @@
%
\NeedsTeXFormat{LaTeX2e}[1995/12/01]
-\ProvidesPackage{sphinx}[2017/03/12 v1.5.4 LaTeX package (Sphinx markup)]
-
-% this is the \ltx@ifundefined of ltxcmds.sty, which is loaded by
-% kvoptions (and later by hyperref), but the first release of
-% ltxcmds.sty as in TL2009/Debian has wrong definition.
-\newcommand{\spx@ifundefined}[1]{%
- \ifcsname #1\endcsname
- \expandafter\ifx\csname #1\endcsname\relax
- \expandafter\expandafter\expandafter\@firstoftwo
- \else
- \expandafter\expandafter\expandafter\@secondoftwo
- \fi
- \else
- \expandafter\@firstoftwo
- \fi
-}
+\ProvidesPackage{sphinx}[2017/03/12 v1.6 LaTeX package (Sphinx markup)]
+
+% provides \ltx@ifundefined
+% (many packages load ltxcmds: graphicx does for pdftex and lualatex but
+% not xelatex, and anyhow kvoptions does, but it may be needed in future to
+% use \sphinxdeprecationwarning earlier, and it needs \ltx@ifundefined)
+\RequirePackage{ltxcmds}
%% for deprecation warnings
\newcommand\sphinxdeprecationwarning[4]{% #1 the deprecated macro or name,
% #2 = version when deprecated, #3 = version when removed, #4 = message
\edef\spx@tempa{\detokenize{#1}}%
- \spx@ifundefined{sphinx_depr_\spx@tempa}{%
+ \ltx@ifundefined{sphinx_depr_\spx@tempa}{%
\global\expandafter\let\csname sphinx_depr_\spx@tempa\endcsname\spx@tempa
\expandafter\AtEndDocument\expandafter{\expandafter\let\expandafter
\sphinxdeprecatedmacro\csname sphinx_depr_\spx@tempa\endcsname
@@ -55,6 +46,64 @@
******** ERROR !! PLEASE UPDATE titlesec.sty !!********^^J%
******** THIS VERSION SWALLOWS SECTION NUMBERS.********}}}}{}
\RequirePackage{tabulary}
+% tabulary has a bug with its re-definition of \multicolumn in its first pass
+% which is not \long. But now Sphinx does not use LaTeX's \multicolumn but its
+% own macro. Hence we don't even need to patch tabulary. See sphinxmulticell.sty
+% X or S (Sphinx) may have meanings if some table package is loaded hence
+% \X was chosen to avoid possibility of conflict
+\newcolumntype{\X}[2]{p{\dimexpr
+ (\linewidth-\arrayrulewidth)*#1/#2-\tw@\tabcolsep-\arrayrulewidth\relax}}
+\newcolumntype{\Y}[1]{p{\dimexpr
+ #1\dimexpr\linewidth-\arrayrulewidth\relax-\tw@\tabcolsep-\arrayrulewidth\relax}}
+% using here T (for Tabulary) feels less of a problem than the X could be
+\newcolumntype{T}{J}%
+\RequirePackage{longtable}
+% For table captions.
+\RequirePackage{threeparttable}
+% fixing the LaTeX mess of vertical spaces with threeparttable and longtable
+% The user interface:
+\newcommand*\sphinxtablepre {0pt}%
+\newcommand*\sphinxtablepost{\medskipamount}%
+% as one can not use \baselineskip from inside longtable (it is zero there)
+% we need \sphinxbaselineskip, which defaults to \baselineskip
+\newcommand*\sphinxbelowcaptionspace{.5\sphinxbaselineskip}%
+\def\sphinxbaselineskip{\baselineskip}%
+% Helper macros, not a priori for user customization
+\def\sphinxatlongtablestart
+ {\par
+ \vskip\parskip
+ \vskip\dimexpr\sphinxtablepre\relax % adjust vertical position
+ \vbox{}% get correct baseline from above
+ \LTpre\z@skip\LTpost\z@skip % set to zero longtable's own skips
+ \edef\sphinxbaselineskip{\dimexpr\the\dimexpr\baselineskip\relax\relax}}%
+\def\sphinxatlongtableend{\prevdepth\z@\vskip\sphinxtablepost\relax}%
+% the longtable template inserts a \strut at caption's end
+\def\sphinxlongtablecapskipadjust
+ {\dimexpr-\dp\strutbox-\sphinxbaselineskip
+ +\sphinxbelowcaptionspace\relax}%
+% tabular(y) with or without threeparttable
+\def\sphinxattablestart
+ {\par
+ \vskip\dimexpr\sphinxtablepre\relax
+ \belowcaptionskip\sphinx@TPTbelowcaptionskip}%
+\let\sphinxattableend\sphinxatlongtableend
+% the tabular(y) templates use [t] vertical placement parameter
+\def\sphinx@TPTbelowcaptionskip
+ {\dimexpr-1.2\baselineskip % .2\baselineskip hardcoded in threeparttable
+ +\sphinxbelowcaptionspace\relax }%
+% varwidth is crucial for our handling of general contents in merged cells
+\RequirePackage{varwidth}
+% but addition of a compatibility patch with hyperref is needed
+% (tested with varwidth v 0.92 Mar 2009)
+\AtBeginDocument {%
+ \let\@@vwid@Hy@raisedlink\Hy@raisedlink
+ \long\def\@vwid@Hy@raisedlink#1{\@vwid@wrap{\@@vwid@Hy@raisedlink{#1}}}%
+ \edef\@vwid@setup{%
+ \let\noexpand\Hy@raisedlink\noexpand\@vwid@Hy@raisedlink % HYPERREF !
+ \unexpanded\expandafter{\@vwid@setup}}%
+}%
+% Homemade package to handle merged cells
+\RequirePackage{sphinxmulticell}
\RequirePackage{makeidx}
% For framing code-blocks and warning type notices, and shadowing topics
\RequirePackage{framed}
@@ -67,15 +116,9 @@
% For highlighted code.
\RequirePackage{fancyvrb}
\fvset{fontsize=\small}
-% For table captions.
-\RequirePackage{threeparttable}
% For hyperlinked footnotes in tables; also for gathering footnotes from
% topic and warning blocks. Also to allow code-blocks in footnotes.
\RequirePackage{footnotehyper-sphinx}
-\makesavenoteenv{tabulary}
-\makesavenoteenv{tabular}
-\makesavenoteenv{threeparttable}
-% (longtable is hyperref compatible and needs no special treatment here.)
% For the H specifier. Do not \restylefloat{figure}, it breaks Sphinx code
% for allowing figures in tables.
\RequirePackage{float}
@@ -267,7 +310,7 @@
% define all missing \@list... macros
\count@\@ne
\loop
- \spx@ifundefined{@list\romannumeral\the\count@}
+ \ltx@ifundefined{@list\romannumeral\the\count@}
{\iffalse}{\iftrue\advance\count@\@ne}%
\repeat
\loop
@@ -276,7 +319,7 @@
\csname @list\romannumeral\the\count@\expandafter\endcsname
\csname @list\romannumeral\the\numexpr\count@-\@ne\endcsname
% work around 2.6--3.2d babel-french issue (fixed in 3.2e; no change needed)
- \spx@ifundefined{leftmargin\romannumeral\the\count@}
+ \ltx@ifundefined{leftmargin\romannumeral\the\count@}
{\expandafter\let
\csname leftmargin\romannumeral\the\count@\expandafter\endcsname
\csname leftmargin\romannumeral\the\numexpr\count@-\@ne\endcsname}{}%
@@ -285,7 +328,7 @@
% define all missing enum... counters and \labelenum... macros and \p@enum..
\count@\@ne
\loop
- \spx@ifundefined{c@enum\romannumeral\the\count@}
+ \ltx@ifundefined{c@enum\romannumeral\the\count@}
{\iffalse}{\iftrue\advance\count@\@ne}%
\repeat
\loop
@@ -305,7 +348,7 @@
% define all missing labelitem... macros
\count@\@ne
\loop
- \spx@ifundefined{labelitem\romannumeral\the\count@}
+ \ltx@ifundefined{labelitem\romannumeral\the\count@}
{\iffalse}{\iftrue\advance\count@\@ne}%
\repeat
\loop
@@ -331,7 +374,7 @@
{\end{theindex}}
\renewenvironment{sphinxthebibliography}[1]
- {\cleardoublepage\phantomsection
+ {\cleardoublepage% \phantomsection % not needed here since TeXLive 2010's hyperref
\begin{thebibliography}{1}}
{\end{thebibliography}}
\fi
@@ -347,45 +390,27 @@
% make commands known to non-Sphinx document classes
\providecommand*{\sphinxtableofcontents}{\tableofcontents}
-\spx@ifundefined{sphinxthebibliography}
+\ltx@ifundefined{sphinxthebibliography}
{\newenvironment
{sphinxthebibliography}{\begin{thebibliography}}{\end{thebibliography}}%
}
{}% else clause of ifundefined
-\spx@ifundefined{sphinxtheindex}
+\ltx@ifundefined{sphinxtheindex}
{\newenvironment{sphinxtheindex}{\begin{theindex}}{\end{theindex}}}%
{}% else clause of ifundefined
%% COLOR (general)
%
-% FIXME: the reasons might be obsolete (better color drivers now?)
-% use pdfoutput for pTeX and dvipdfmx
-% when pTeX (\kanjiskip is defined), set pdfoutput to evade \include{pdfcolor}
-\ifx\kanjiskip\undefined\else
- \newcount\pdfoutput\pdfoutput=0
-\fi
-
-% for PDF output, use colors and maximal compression
-\newif\ifsphinxpdfoutput % used in \maketitle
-\ifx\pdfoutput\undefined\else
- \ifnum\pdfoutput=\z@
- \let\py@NormalColor\relax
- \let\py@TitleColor\relax
- \else
- \sphinxpdfoutputtrue
- \input{pdfcolor}
- \def\py@NormalColor{\color[rgb]{0.0,0.0,0.0}}
- \def\py@TitleColor{\color{TitleColor}}
- \pdfcompresslevel=9
- \fi
-\fi
-
-% XeLaTeX can do colors, too
-\ifx\XeTeXrevision\undefined\else
- \def\py@NormalColor{\color[rgb]{0.0,0.0,0.0}}
- \def\py@TitleColor{\color{TitleColor}}
-\fi
+% FIXME: \normalcolor should probably be used in place of \py@NormalColor
+% elsewhere, and \py@NormalColor should never be defined. \normalcolor
+% switches to the colour from last \color call in preamble.
+\def\py@NormalColor{\color{black}}
+% FIXME: it is probably better to use \color{TitleColor}, as TitleColor
+% can be customized from 'sphinxsetup', and drop usage of \py@TitleColor
+\def\py@TitleColor{\color{TitleColor}}
+% FIXME: this line should be dropped, as "9" is default anyhow.
+\ifdefined\pdfcompresslevel\pdfcompresslevel = 9 \fi
%% PAGE STYLING
@@ -399,7 +424,7 @@
% Redefine the 'normal' header/footer style when using "fancyhdr" package:
% Note: this presupposes "twoside". If "oneside" class option, there will be warnings.
-\spx@ifundefined{fancyhf}{}{
+\ltx@ifundefined{fancyhf}{}{
% Use \pagestyle{normal} as the primary pagestyle for text.
\fancypagestyle{normal}{
\fancyhf{}
@@ -411,7 +436,7 @@
\renewcommand{\headrulewidth}{0.4pt}
\renewcommand{\footrulewidth}{0.4pt}
% define chaptermark with \@chappos when \@chappos is available for Japanese
- \spx@ifundefined{@chappos}{}
+ \ltx@ifundefined{@chappos}{}
{\def\chaptermark##1{\markboth{\@chapapp\space\thechapter\space\@chappos\space ##1}{}}}
}
% Update the plain style so we get the page number & footer line,
@@ -528,33 +553,25 @@
%% GRAPHICS
%
-% Redefine \includegraphics to resize images larger than the line width,
+% \sphinxincludegraphics defined to resize images larger than the line width,
% except if height or width option present.
%
% If scale is present, rescale before fitting to line width. (since 1.5)
-%
-% Warning: future version of Sphinx will not modify original \includegraphics,
-% below code will be definition only of \sphinxincludegraphics.
-\let\py@Oldincludegraphics\includegraphics
\newbox\spx@image@box
-\renewcommand*{\includegraphics}[2][]{%
+\newcommand*{\sphinxincludegraphics}[2][]{%
\in@{height}{#1}\ifin@\else\in@{width}{#1}\fi
\ifin@ % height or width present
- \py@Oldincludegraphics[#1]{#2}%
+ \includegraphics[#1]{#2}%
\else % no height nor width (but #1 may be "scale=...")
- \setbox\spx@image@box\hbox{\py@Oldincludegraphics[#1,draft]{#2}}%
+ \setbox\spx@image@box\hbox{\includegraphics[#1,draft]{#2}}%
\ifdim \wd\spx@image@box>\linewidth
\setbox\spx@image@box\box\voidb@x % clear memory
- \py@Oldincludegraphics[#1,width=\linewidth]{#2}%
+ \includegraphics[#1,width=\linewidth]{#2}%
\else
- \py@Oldincludegraphics[#1]{#2}%
+ \includegraphics[#1]{#2}%
\fi
\fi
}
-% Writer will put \sphinxincludegraphics in LaTeX source, and with this,
-% documents which used their own modified \includegraphics will compile
-% as before. But see warning above.
-\newcommand*{\sphinxincludegraphics}{\includegraphics}
%% FIGURE IN TABLE
@@ -600,7 +617,7 @@
% for captions of literal blocks
% also define `\theH...` macros for hyperref
\newcounter{literalblock}
-\spx@ifundefined{c@chapter}
+\ltx@ifundefined{c@chapter}
{\@addtoreset{literalblock}{section}
\def\theliteralblock {\ifnum\c@section>\z@ \thesection.\fi\arabic{literalblock}}
\def\theHliteralblock {\theHsection.\arabic{literalblock}}}
@@ -971,7 +988,7 @@
\newenvironment{sphinxShadowBox}
{\def\FrameCommand {\spx@ShadowFBox }%
% configure framed.sty not to add extra vertical spacing
- \spx@ifundefined{OuterFrameSep}{}{\OuterFrameSep\z@skip}%
+ \ltx@ifundefined{OuterFrameSep}{}{\OuterFrameSep\z@skip}%
% the \trivlist will add the vertical spacing on top and bottom which is
% typical of center environment as used in Sphinx <= 1.4.1
% the \noindent has the effet of an extra blank line on top, to
@@ -1056,7 +1073,7 @@
% configure framed.sty's parameters to obtain same vertical spacing
% as for "light" boxes. We need for this to manually insert parskip glue and
% revert a skip done by framed before the frame.
- \spx@ifundefined{OuterFrameSep}{}{\OuterFrameSep\z@skip}%
+ \ltx@ifundefined{OuterFrameSep}{}{\OuterFrameSep\z@skip}%
\vspace{\FrameHeightAdjust}
% copied/adapted from framed.sty's snugshade
\def\FrameCommand##1{\hskip\@totalleftmargin
@@ -1112,10 +1129,7 @@
\csname\@backslashchar color@#2\endcsname }
% the main dispatch for all types of notices
-\newenvironment{sphinxadmonition}{\begin{notice}}{\end{notice}}
-% use of ``notice'' is for backwards compatibility and will be removed in
-% future release; sphinxadmonition environment will be defined directly.
-\newenvironment{notice}[2]{% #1=type, #2=heading
+\newenvironment{sphinxadmonition}[2]{% #1=type, #2=heading
% can't use #1 directly in definition of end part
\def\spx@noticetype {#1}%
% set parameters of heavybox/lightbox
@@ -1126,6 +1140,14 @@
\begin{sphinx#1}{#2}}
% in end part, need to go around a LaTeX's "feature"
{\edef\spx@temp{\noexpand\end{sphinx\spx@noticetype}}\spx@temp}
+% use of ``notice'' is for backwards compatibility and will be removed in
+% Sphinx 1.7.
+\newenvironment{notice}
+ {\sphinxdeprecationwarning {notice}{1.6}{1.7}{%
+ This document was probably built with a Sphinx extension using ``notice''^^J
+ environment. At Sphinx 1.7, ``notice'' environment will be removed. Please^^J
+ report to extension author to use ``sphinxadmonition'' instead.^^J%
+ ****}\begin{sphinxadmonition}}{\end{sphinxadmonition}}
%% PYTHON DOCS MACROS AND ENVIRONMENTS
@@ -1328,12 +1350,12 @@
\ifspx@opt@dontkeepoldnames\else
\let\spx@alreadydefinedlist\@empty
\typeout{** (sphinx) defining (legacy) text style macros without \string\sphinx\space prefix}
- \typeout{** if clashes with packages, set latex_keep_old_macro_names=False
+ \typeout{** if clashes with packages, do not set latex_keep_old_macro_names=True
in conf.py}
\@for\@tempa:=code,strong,bfcode,email,tablecontinued,titleref,%
menuselection,accelerator,crossref,termref,optional\do
{% first, check if command with no prefix already exists
- \spx@ifundefined{\@tempa}{%
+ \ltx@ifundefined{\@tempa}{%
% give it the meaning defined so far with \sphinx prefix
\expandafter\let\csname\@tempa\expandafter\endcsname
\csname sphinx\@tempa\endcsname
@@ -1353,6 +1375,7 @@
Sphinx mark-up uses only \string\sphinx\expandafter\@gobble\sphinxdeprecatedmacro.}%
}%
\fi
+ \sphinxdeprecationwarning{latex_keep_old_macro_names=True}{1.6}{1.7}{}%
\fi
% additional customizable styling
@@ -1378,4 +1401,4 @@
% Tell TeX about pathological hyphenation cases:
\hyphenation{Base-HTTP-Re-quest-Hand-ler}
-
+\endinput
diff --git a/sphinx/texinputs/sphinxhowto.cls b/sphinx/texinputs/sphinxhowto.cls
index 7336c697e..f4526c9d3 100644
--- a/sphinx/texinputs/sphinxhowto.cls
+++ b/sphinx/texinputs/sphinxhowto.cls
@@ -30,8 +30,9 @@
% ``Bjarne'' style a bit better.
%
\renewcommand{\maketitle}{%
- \noindent\rule{\textwidth}{1pt}\ifsphinxpdfoutput\newline\null\fi\par
- \ifsphinxpdfoutput
+ \noindent\rule{\textwidth}{1pt}\par
+ % FIXME: use \hypersetup{pdfauthor={\@author}, pdftitle={\@title}}
+ \ifdefined\pdfinfo
\begingroup
%\pdfstringdefDisableCommands{\def\\{, }\def\endgraf{ }\def\and{, }}%
%\hypersetup{pdfauthor={\@author}, pdftitle={\@title}}%
@@ -78,10 +79,8 @@
% For an article document class this environment is a section,
% so no page break before it.
%
-% Note: \phantomsection is required for TeXLive 2009
-% http://tex.stackexchange.com/questions/44088/when-do-i-need-to-invoke-phantomsection#comment166081_44091
\newenvironment{sphinxthebibliography}[1]{%
- \phantomsection
+ % \phantomsection % not needed here since TeXLive 2010's hyperref
\begin{thebibliography}{1}%
\addcontentsline{toc}{section}{\ifdefined\refname\refname\else\ifdefined\bibname\bibname\fi\fi}}{\end{thebibliography}}
@@ -92,6 +91,6 @@
\@ifclassloaded{memoir}
{\newenvironment{sphinxtheindex}{\begin{theindex}}{\end{theindex}}}
{\newenvironment{sphinxtheindex}{%
- \phantomsection
+ \phantomsection % needed because no chapter, section, ... is created by theindex
\begin{theindex}%
\addcontentsline{toc}{section}{\indexname}}{\end{theindex}}}
diff --git a/sphinx/texinputs/sphinxmanual.cls b/sphinx/texinputs/sphinxmanual.cls
index aa2586d0e..99b6f8309 100644
--- a/sphinx/texinputs/sphinxmanual.cls
+++ b/sphinx/texinputs/sphinxmanual.cls
@@ -40,8 +40,9 @@
\begin{titlepage}%
\let\footnotesize\small
\let\footnoterule\relax
- \noindent\rule{\textwidth}{1pt}\ifsphinxpdfoutput\newline\null\fi\par
- \ifsphinxpdfoutput
+ \noindent\rule{\textwidth}{1pt}\par
+ % FIXME: use \hypersetup{pdfauthor={\@author}, pdftitle={\@title}}
+ \ifdefined\pdfinfo
\begingroup
%\pdfstringdefDisableCommands{\def\\{, }\def\endgraf{ }\def\and{, }}%
%\hypersetup{pdfauthor={\@author}, pdftitle={\@title}}%
@@ -97,11 +98,9 @@
% Contents.
% For a report document class this environment is a chapter.
%
-% Note: \phantomsection is required for TeXLive 2009
-% http://tex.stackexchange.com/questions/44088/when-do-i-need-to-invoke-phantomsection#comment166081_44091
\newenvironment{sphinxthebibliography}[1]{%
\if@openright\cleardoublepage\else\clearpage\fi
- \phantomsection
+ % \phantomsection % not needed here since TeXLive 2010's hyperref
\begin{thebibliography}{1}%
\addcontentsline{toc}{chapter}{\bibname}}{\end{thebibliography}}
@@ -112,6 +111,6 @@
{\newenvironment{sphinxtheindex}{\begin{theindex}}{\end{theindex}}}
{\newenvironment{sphinxtheindex}{%
\if@openright\cleardoublepage\else\clearpage\fi
- \phantomsection
+ \phantomsection % needed as no chapter, section, ... created
\begin{theindex}%
\addcontentsline{toc}{chapter}{\indexname}}{\end{theindex}}}
diff --git a/sphinx/texinputs/sphinxmulticell.sty b/sphinx/texinputs/sphinxmulticell.sty
new file mode 100644
index 000000000..f0d11b1f9
--- /dev/null
+++ b/sphinx/texinputs/sphinxmulticell.sty
@@ -0,0 +1,317 @@
+\NeedsTeXFormat{LaTeX2e}
+\ProvidesPackage{sphinxmulticell}%
+ [2017/02/23 v1.6 better span rows and columns of a table (Sphinx team)]%
+\DeclareOption*{\PackageWarning{sphinxmulticell}{Option `\CurrentOption' is unknown}}%
+\ProcessOptions\relax
+%
+% --- MULTICOLUMN ---
+% standard LaTeX's \multicolumn
+% 1. does not allow verbatim contents,
+% 2. interacts very poorly with tabulary.
+%
+% It is needed to write own macros for Sphinx: to allow code-blocks in merged
+% cells rendered by tabular/longtable, and to allow multi-column cells with
+% paragraphs to be taken into account sanely by tabulary algorithm for column
+% widths.
+%
+% This requires quite a bit of hacking. First, in Sphinx, the multi-column
+% contents will *always* be wrapped in a varwidth environment. The issue
+% becomes to pass it the correct target width. We must trick tabulary into
+% believing the multicolumn is simply separate columns, else tabulary does not
+% incorporate the contents in its algorithm. But then we must clear the
+% vertical rules...
+%
+% configuration of tabulary
+\setlength{\tymin}{3\fontcharwd\font`0 }% minimal width of "squeezed" columns
+\setlength{\tymax}{10000pt}% allow enough room for paragraphs to "compete"
+% we need access to tabulary's final computed width. \@tempdima is too volatile
+% to hope it has kept tabulary's value when \sphinxcolwidth needs it.
+\newdimen\sphinx@TY@tablewidth
+\def\tabulary{%
+ \def\TY@final{\sphinx@TY@tablewidth\@tempdima\tabular}%
+ \let\endTY@final\endtabular
+ \TY@tabular}%
+% next hack is needed only if user has set latex_use_latex_multicolumn to True:
+% it fixes tabulary's bug with \multicolumn defined "short" in first pass. (if
+% upstream tabulary adds a \long, our extra one causes no harm)
+\def\sphinx@tempa #1\def\multicolumn#2#3#4#5#6#7#8#9\sphinx@tempa
+ {\def\TY@tab{#1\long\def\multicolumn####1####2####3{\multispan####1\relax}#9}}%
+\expandafter\sphinx@tempa\TY@tab\sphinx@tempa
+%
+% TN. 1: as \omit is never executed, Sphinx multicolumn does not need to worry
+% like standard multicolumn about |l| vs l|. On the other hand it assumes
+% columns are separated by a | ... (if not it will add extraneous
+% \arrayrulewidth space for each column separation in its estimate of available
+% width).
+%
+% TN. 1b: as Sphinx multicolumn uses neither \omit nor \span, it can not
+% (easily) get rid of extra macros from >{...} or <{...} between columns. At
+% least, it has been made compatible with colortbl's \columncolor.
+%
+% TN. 2: tabulary's second pass is handled like tabular/longtable's single
+% pass, with the difference that we hacked \TY@final to set in
+% \sphinx@TY@tablewidth the final target width as computed by tabulary. This is
+% needed only to handle columns with a "horizontal" specifier: "p" type columns
+% (inclusive of tabulary's LJRC) holds the target column width in the
+% \linewidth dimension.
+%
+% TN. 3: use of \begin{sphinxmulticolumn}...\end{sphinxmulticolumn} mark-up
+% would need some hacking around the fact that groups can not span across table
+% cells (the code does inserts & tokens, see TN1b). It was decided to keep it
+% simple with \sphinxstartmulticolumn...\sphinxstopmulticolumn.
+%
+% MEMO about nesting: if sphinxmulticolumn is encountered in a nested tabular
+% inside a tabulary it will think to be at top level in the tabulary. But
+% Sphinx generates no nested tables, and if some LaTeX macro uses internally a
+% tabular this will not have a \sphinxstartmulticolumn within it!
+%
+\def\sphinxstartmulticolumn{%
+ \ifx\equation$% $ tabulary's first pass
+ \expandafter\sphinx@TYI@start@multicolumn
+ \else % either not tabulary or tabulary's second pass
+ \expandafter\sphinx@start@multicolumn
+ \fi
+}%
+\def\sphinxstopmulticolumn{%
+ \ifx\equation$% $ tabulary's first pass
+ \expandafter\sphinx@TYI@stop@multicolumn
+ \else % either not tabulary or tabulary's second pass
+ \ignorespaces
+ \fi
+}%
+\def\sphinx@TYI@start@multicolumn#1{%
+ % use \gdef always to avoid stack space build up
+ \gdef\sphinx@tempa{#1}\begingroup\setbox\z@\hbox\bgroup
+}%
+\def\sphinx@TYI@stop@multicolumn{\egroup % varwidth was used with \tymax
+ \xdef\sphinx@tempb{\the\dimexpr\wd\z@/\sphinx@tempa}% per column width
+ \endgroup
+ \expandafter\sphinx@TYI@multispan\expandafter{\sphinx@tempa}%
+}%
+\def\sphinx@TYI@multispan #1{%
+ \kern\sphinx@tempb\ignorespaces % the per column occupied width
+ \ifnum#1>\@ne % repeat, taking into account subtleties of TeX's & ...
+ \expandafter\sphinx@TYI@multispan@next\expandafter{\the\numexpr#1-\@ne\expandafter}%
+ \fi
+}%
+\def\sphinx@TYI@multispan@next{&\relax\sphinx@TYI@multispan}%
+%
+% Now the branch handling either the second pass of tabulary or the single pass
+% of tabular/longtable. This is the delicate part where we gather the
+% dimensions from the p columns either set-up by tabulary or by user p column
+% or Sphinx \X, \Y columns. The difficulty is that to get the said width, the
+% template must be inserted (other hacks would be horribly complicated except
+% if we rewrote crucial parts of LaTeX's \@array !) and we can not do
+% \omit\span like standard \multicolumn's easy approach. Thus we must cancel
+% the \vrule separators. Also, perhaps the column specifier is of the l, c, r
+% type, then we attempt an ad hoc rescue to give varwidth a reasonable target
+% width.
+\def\sphinx@start@multicolumn#1{%
+ \gdef\sphinx@multiwidth{0pt}\gdef\sphinx@tempa{#1}\sphinx@multispan{#1}%
+}%
+\def\sphinx@multispan #1{%
+ \ifnum#1=\@ne\expandafter\sphinx@multispan@end
+ \else\expandafter\sphinx@multispan@next
+ \fi {#1}%
+}%
+\def\sphinx@multispan@next #1{%
+ % trick to recognize L, C, R, J or p, m, b type columns
+ \ifdim\baselineskip>\z@
+ \gdef\sphinx@tempb{\linewidth}%
+ \else
+ % if in an l, r, c type column, try and hope for the best
+ \xdef\sphinx@tempb{\the\dimexpr(\ifx\TY@final\@undefined\linewidth\else
+ \sphinx@TY@tablewidth\fi-\arrayrulewidth)/\sphinx@tempa
+ -\tw@\tabcolsep-\arrayrulewidth\relax}%
+ \fi
+ \noindent\kern\sphinx@tempb\relax
+ \xdef\sphinx@multiwidth
+ {\the\dimexpr\sphinx@multiwidth+\sphinx@tempb+\tw@\tabcolsep+\arrayrulewidth}%
+ % hack the \vline and the colortbl macros
+ \sphinx@hack@vline\sphinx@hack@CT&\relax
+ % repeat
+ \expandafter\sphinx@multispan\expandafter{\the\numexpr#1-\@ne}%
+}%
+% packages like colortbl add group levels, we need to "climb back up" to be
+% able to hack the \vline and also the colortbl inserted tokens. This creates
+% empty space whether or not the columns were | separated:
+\def\sphinx@hack@vline{\ifnum\currentgrouptype=6\relax
+ \kern\arrayrulewidth\arrayrulewidth\z@\else\aftergroup\sphinx@hack@vline\fi}%
+\def\sphinx@hack@CT{\ifnum\currentgrouptype=6\relax
+ \let\CT@setup\sphinx@CT@setup\else\aftergroup\sphinx@hack@CT\fi}%
+% It turns out \CT@row@color is not expanded contrarily to \CT@column@color
+% during LaTeX+colortbl preamble preparation, hence it would be possible for
+% \sphinx@CT@setup to discard only the column color and choose to obey or not
+% row color and cell color. It would even be possible to propagate cell color
+% to row color for the duration of the Sphinx multicolumn... the (provisional?)
+% choice has been made to cancel the colortbl colours for the multicolumn
+% duration.
+\def\sphinx@CT@setup #1\endgroup{\endgroup}% hack to remove colour commands
+\def\sphinx@multispan@end#1{%
+ % first, trace back our steps horizontally
+ \noindent\kern-\dimexpr\sphinx@multiwidth\relax
+ % and now we set the final computed width for the varwidth environment
+ \ifdim\baselineskip>\z@
+ \xdef\sphinx@multiwidth{\the\dimexpr\sphinx@multiwidth+\linewidth}%
+ \else
+ \xdef\sphinx@multiwidth{\the\dimexpr\sphinx@multiwidth+
+ (\ifx\TY@final\@undefined\linewidth\else
+ \sphinx@TY@tablewidth\fi-\arrayrulewidth)/\sphinx@tempa
+ -\tw@\tabcolsep-\arrayrulewidth\relax}%
+ \fi
+ % we need to remove colour set-up also for last cell of the multi-column
+ \aftergroup\sphinx@hack@CT
+}%
+\newcommand*\sphinxcolwidth[2]{%
+ % this dimension will always be used for varwidth, and serves as maximum
+ % width when cells are merged either via multirow or multicolumn or both,
+ % as always their contents is wrapped in varwidth environment.
+ \ifnum#1>\@ne % multi-column (and possibly also multi-row)
+ % we wrote our own multicolumn code especially to handle that (and allow
+ % verbatim contents)
+ \ifx\equation$%$
+ \tymax % first pass of tabulary (cf MEMO above regarding nesting)
+ \else % the \@gobble thing is for compatibility with standard \multicolumn
+ \sphinx@multiwidth\@gobble{#1/#2}%
+ \fi
+ \else % single column multirow
+ \ifx\TY@final\@undefined % not a tabulary.
+ \ifdim\baselineskip>\z@
+ % in a p{..} type column, \linewidth is the target box width
+ \linewidth
+ \else
+ % l, c, r columns. Do our best.
+ \dimexpr(\linewidth-\arrayrulewidth)/#2-
+ \tw@\tabcolsep-\arrayrulewidth\relax
+ \fi
+ \else % in tabulary
+ \ifx\equation$%$% first pass
+ \tymax % it is set to a big value so that paragraphs can express themselves
+ \else
+ % second pass.
+ \ifdim\baselineskip>\z@
+ \linewidth % in a L, R, C, J column or a p, \X, \Y ...
+ \else
+ % we have hacked \TY@final to put in \sphinx@TY@tablewidth the table width
+ \dimexpr(\sphinx@TY@tablewidth-\arrayrulewidth)/#2-
+ \tw@\tabcolsep-\arrayrulewidth\relax
+ \fi
+ \fi
+ \fi
+ \fi
+}%
+% fallback default in case user has set latex_use_latex_multicolumn to True:
+% \sphinxcolwidth will use this only inside LaTeX's standard \multicolumn
+\def\sphinx@multiwidth #1#2{\dimexpr % #1 to gobble the \@gobble (!)
+ (\ifx\TY@final\@undefined\linewidth\else\sphinx@TY@tablewidth\fi
+ -\arrayrulewidth)*#2-\tw@\tabcolsep-\arrayrulewidth\relax}%
+%
+% --- MULTIROW ---
+% standard \multirow
+% 1. does not allow verbatim contents,
+% 2. does not allow blank lines in its argument,
+% 3. its * specifier means to typeset "horizontally" which is very
+% bad for paragraph content. 2016 version has = specifier but it
+% must be used with p type columns only, else results are bad,
+% 4. it requires manual intervention if the contents is too long to fit
+% in the asked-for number of rows.
+% 5. colour panels (either from \rowcolor or \columncolor) will hide
+% the bottom part of multirow text, hence manual tuning is needed
+% to put the multirow insertion at the _bottom_.
+%
+% The Sphinx solution consists in always having contents wrapped
+% in a varwidth environment so that it makes sense to estimate how many
+% lines it will occupy, and then ensure by insertion of suitable struts
+% that the table rows have the needed height. The needed mark-up is done
+% by LaTeX writer, which has its own id for the merged cells.
+%
+% The colour issue is solved by clearing colour panels in all cells,
+% whether or not the multirow is single-column or multi-column.
+%
+% In passing we obtain baseline alignements across rows (only if
+% \arraylinestretch is 1, as LaTeX's does not obey \arraylinestretch in "p"
+% multi-line contents, only first and last line...)
+%
+% TODO: examine the situation with \arraylinestretch > 1. The \extrarowheight
+% is hopeless for multirow anyhow, it makes baseline alignment strictly
+% impossible.
+\newcommand\sphinxmultirow[2]{\begingroup
+ % #1 = nb of spanned rows, #2 = Sphinx id of "cell", #3 = contents
+ % but let's fetch #3 in a way allowing verbatim contents !
+ \def\sphinx@nbofrows{#1}\def\sphinx@cellid{#2}%
+ \afterassignment\sphinx@multirow\let\next=
+}%
+\def\sphinx@multirow {%
+ \setbox\z@\hbox\bgroup\aftergroup\sphinx@@multirow\strut
+}%
+\def\sphinx@@multirow {%
+ % The contents, which is a varwidth environment, has been captured in
+ % \box0 (a \hbox).
+ % We have with \sphinx@cellid an assigned unique id. The goal is to give
+ % about the same height to all the involved rows.
+ % For this Sphinx will insert a \sphinxtablestrut{cell_id} mark-up
+ % in LaTeX file and the expansion of the latter will do the suitable thing.
+ \dimen@\dp\z@
+ \dimen\tw@\ht\@arstrutbox
+ \advance\dimen@\dimen\tw@
+ \advance\dimen\tw@\dp\@arstrutbox
+ \count@=\dimen@ % type conversion dim -> int
+ \count\tw@=\dimen\tw@
+ \divide\count@\count\tw@ % TeX division truncates
+ \advance\dimen@-\count@\dimen\tw@
+ % 1300sp is about 0.02pt. For comparison a rule default width is 0.4pt.
+ % (note that if \count@ holds 0, surely \dimen@>1300sp)
+ \ifdim\dimen@>1300sp \advance\count@\@ne \fi
+ % now \count@ holds the count L of needed "lines"
+ % and \sphinx@nbofrows holds the number N of rows
+ % we have L >= 1 and N >= 1
+ % if L is a multiple of N, ... clear what to do !
+ % else write L = qN + r, 1 <= r < N and we will
+ % arrange for each row to have enough space for:
+ % q+1 "lines" in each of the first r rows
+ % q "lines" in each of the (N-r) bottom rows
+ % for a total of (q+1) * r + q * (N-r) = q * N + r = L
+ % It is possible that q == 0.
+ \count\tw@\count@
+ % the TeX division truncates
+ \divide\count\tw@\sphinx@nbofrows\relax
+ \count4\count\tw@ % q
+ \multiply\count\tw@\sphinx@nbofrows\relax
+ \advance\count@-\count\tw@ % r
+ \expandafter\xdef\csname sphinx@tablestrut_\sphinx@cellid\endcsname
+ {\noexpand\sphinx@tablestrut{\the\count4}{\the\count@}{\sphinx@cellid}}%
+ \dp\z@\z@
+ % this will use the real height if it is >\ht\@arstrutbox
+ \sphinxtablestrut{\sphinx@cellid}\box\z@
+ \endgroup % group was opened in \sphinxmultirow
+}%
+\newcommand*\sphinxtablestrut[1]{%
+ % #1 is a "cell_id", i.e. the id of a merged group of table cells
+ \csname sphinx@tablestrut_#1\endcsname
+}%
+% LaTeX typesets the table row by row, hence each execution can do
+% an update for the next row.
+\newcommand*\sphinx@tablestrut[3]{\begingroup
+ % #1 = q, #2 = (initially) r, #3 = cell_id, q+1 lines in first r rows
+ % if #2 = 0, create space for max(q,1) table lines
+ % if #2 > 0, create space for q+1 lines and decrement #2
+ \leavevmode
+ \count@#1\relax
+ \ifnum#2=\z@
+ \ifnum\count@=\z@\count@\@ne\fi
+ \else
+ % next row will be with a #2 decremented by one
+ \expandafter\xdef\csname sphinx@tablestrut_#3\endcsname
+ {\noexpand\sphinx@tablestrut{#1}{\the\numexpr#2-\@ne}{#3}}%
+ \advance\count@\@ne
+ \fi
+ \vrule\@height\ht\@arstrutbox
+ \@depth\dimexpr\count@\ht\@arstrutbox+\count@\dp\@arstrutbox-\ht\@arstrutbox\relax
+ \@width\z@
+ \endgroup
+ % we need this to avoid colour panels hiding bottom parts of multirow text
+ \sphinx@hack@CT
+}%
+\endinput
+%%
+%% End of file `sphinxmulticell.sty'.
diff --git a/sphinx/themes/basic/layout.html b/sphinx/themes/basic/layout.html
index e3e59a7ac..587f8814b 100644
--- a/sphinx/themes/basic/layout.html
+++ b/sphinx/themes/basic/layout.html
@@ -7,10 +7,12 @@
:copyright: Copyright 2007-2017 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
#}
-{%- block doctype -%}
+{%- block doctype -%}{%- if html5_doctype %}
+<!DOCTYPE html>
+{%- else %}
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
-{%- endblock %}
+{%- endif %}{%- endblock %}
{%- set reldelim1 = reldelim1 is not defined and ' &#187;' or reldelim1 %}
{%- set reldelim2 = reldelim2 is not defined and ' |' or reldelim2 %}
{%- set render_sidebar = (not embedded) and (not theme_nosidebar|tobool) and
@@ -149,7 +151,7 @@
{%- endblock %}
{%- block extrahead %} {% endblock %}
</head>
- <body role="document">
+ <body>
{%- block header %}{% endblock %}
{%- block relbar1 %}{{ relbar() }}{% endblock %}
diff --git a/sphinx/themes/basic/static/basic.css_t b/sphinx/themes/basic/static/basic.css_t
index 996d3246d..1cbc649f9 100644
--- a/sphinx/themes/basic/static/basic.css_t
+++ b/sphinx/themes/basic/static/basic.css_t
@@ -398,6 +398,13 @@ table.field-list td, table.field-list th {
margin: 0;
}
+.field-name {
+ -moz-hyphens: manual;
+ -ms-hyphens: manual;
+ -webkit-hyphens: manual;
+ hyphens: manual;
+}
+
/* -- other body styles ----------------------------------------------------- */
ol.arabic {
diff --git a/sphinx/theming.py b/sphinx/theming.py
index 2eb78799b..162b3206c 100644
--- a/sphinx/theming.py
+++ b/sphinx/theming.py
@@ -25,6 +25,13 @@ except ImportError:
from sphinx import package_dir
from sphinx.errors import ThemeError
+from sphinx.util import logging
+
+logger = logging.getLogger(__name__)
+
+if False:
+ # For type annotation
+ from typing import Any, Callable, Dict, List, Tuple # NOQA
NODEFAULT = object()
THEMECONF = 'theme.conf'
@@ -34,10 +41,12 @@ class Theme(object):
"""
Represents the theme chosen in the configuration.
"""
- themes = {}
+ themes = {} # type: Dict[unicode, Tuple[unicode, zipfile.ZipFile]]
+ themepath = [] # type: List[unicode]
@classmethod
- def init_themes(cls, confdir, theme_path, warn=None):
+ def init_themes(cls, confdir, theme_path):
+ # type: (unicode, unicode) -> None
"""Search all theme paths for available themes."""
cls.themepath = list(theme_path)
cls.themepath.append(path.join(package_dir, 'themes'))
@@ -49,15 +58,14 @@ class Theme(object):
for theme in os.listdir(themedir):
if theme.lower().endswith('.zip'):
try:
- zfile = zipfile.ZipFile(path.join(themedir, theme))
+ zfile = zipfile.ZipFile(path.join(themedir, theme)) # type: ignore
if THEMECONF not in zfile.namelist():
continue
tname = theme[:-4]
tinfo = zfile
except Exception:
- if warn:
- warn('file %r on theme path is not a valid '
- 'zipfile or contains no theme' % theme)
+ logger.warning('file %r on theme path is not a valid '
+ 'zipfile or contains no theme', theme)
continue
else:
if not path.isfile(path.join(themedir, theme, THEMECONF)):
@@ -68,6 +76,7 @@ class Theme(object):
@classmethod
def load_extra_theme(cls, name):
+ # type: (unicode) -> None
themes = ['alabaster']
try:
import sphinx_rtd_theme
@@ -97,7 +106,8 @@ class Theme(object):
cls.themes[name] = (path.join(themedir, name), None)
return
- def __init__(self, name, warn=None):
+ def __init__(self, name):
+ # type: (unicode) -> None
if name not in self.themes:
self.load_extra_theme(name)
if name not in self.themes:
@@ -137,7 +147,7 @@ class Theme(object):
fp.write(tinfo.read(name))
self.themeconf = configparser.RawConfigParser()
- self.themeconf.read(path.join(self.themedir, THEMECONF))
+ self.themeconf.read(path.join(self.themedir, THEMECONF)) # type: ignore
try:
inherit = self.themeconf.get('theme', 'inherit')
@@ -153,14 +163,15 @@ class Theme(object):
raise ThemeError('no theme named %r found, inherited by %r' %
(inherit, name))
else:
- self.base = Theme(inherit, warn=warn)
+ self.base = Theme(inherit)
def get_confstr(self, section, name, default=NODEFAULT):
+ # type: (unicode, unicode, Any) -> Any
"""Return the value for a theme configuration setting, searching the
base theme chain.
"""
try:
- return self.themeconf.get(section, name)
+ return self.themeconf.get(section, name) # type: ignore
except (configparser.NoOptionError, configparser.NoSectionError):
if self.base is not None:
return self.base.get_confstr(section, name, default)
@@ -171,13 +182,14 @@ class Theme(object):
return default
def get_options(self, overrides):
+ # type: (Dict) -> Any
"""Return a dictionary of theme options and their values."""
chain = [self.themeconf]
base = self.base
while base is not None:
chain.append(base.themeconf)
base = base.base
- options = {}
+ options = {} # type: Dict[unicode, Any]
for conf in reversed(chain):
try:
options.update(conf.items('options'))
@@ -190,6 +202,7 @@ class Theme(object):
return options
def get_dirchain(self):
+ # type: () -> List[unicode]
"""Return a list of theme directories, beginning with this theme's,
then the base theme's, then that one's base theme's, etc.
"""
@@ -201,6 +214,7 @@ class Theme(object):
return chain
def cleanup(self):
+ # type: () -> None
"""Remove temporary directories."""
if self.themedir_created:
try:
@@ -212,6 +226,7 @@ class Theme(object):
def load_theme_plugins():
+ # type: () -> List[unicode]
"""load plugins by using``sphinx_themes`` section in setuptools entry_points.
This API will return list of directory that contain some theme directory.
"""
@@ -219,7 +234,7 @@ def load_theme_plugins():
if not pkg_resources:
return []
- theme_paths = []
+ theme_paths = [] # type: List[unicode]
for plugin in pkg_resources.iter_entry_points('sphinx_themes'):
func_or_path = plugin.load()
diff --git a/sphinx/transforms/__init__.py b/sphinx/transforms/__init__.py
index 919482821..588ec0a81 100644
--- a/sphinx/transforms/__init__.py
+++ b/sphinx/transforms/__init__.py
@@ -10,14 +10,26 @@
"""
from docutils import nodes
-from docutils.transforms import Transform
+from docutils.transforms import Transform, Transformer
from docutils.transforms.parts import ContentsFilter
+from docutils.utils import new_document
from sphinx import addnodes
from sphinx.locale import _
+from sphinx.util import logging
from sphinx.util.i18n import format_date
from sphinx.util.nodes import apply_source_workaround
+if False:
+ # For type annotation
+ from sphinx.application import Sphinx # NOQA
+ from sphinx.config import Config # NOQA
+ from sphinx.domain.std import StandardDomain # NOQA
+ from sphinx.environment import BuildEnvironment # NOQA
+
+
+logger = logging.getLogger(__name__)
+
default_substitutions = set([
'version',
'release',
@@ -25,7 +37,69 @@ default_substitutions = set([
])
-class DefaultSubstitutions(Transform):
+class SphinxTransform(Transform):
+ """
+ A base class of Transforms.
+
+ Compared with ``docutils.transforms.Transform``, this class improves accessibility to
+ Sphinx APIs.
+
+ The subclasses can access following objects and functions:
+
+ self.app
+ The application object (:class:`sphinx.application.Sphinx`)
+ self.config
+ The config object (:class:`sphinx.config.Config`)
+ self.env
+ The environment object (:class:`sphinx.environment.BuildEnvironment`)
+ """
+
+ @property
+ def app(self):
+ # type: () -> Sphinx
+ return self.document.settings.env.app
+
+ @property
+ def env(self):
+ # type: () -> BuildEnvironment
+ return self.document.settings.env
+
+ @property
+ def config(self):
+ # type: () -> Config
+ return self.document.settings.env.config
+
+
+class SphinxTransformer(Transformer):
+ """
+ A transformer for Sphinx.
+ """
+
+ document = None # type: nodes.Node
+ env = None # type: BuildEnvironment
+
+ def set_environment(self, env):
+ # type: (BuildEnvironment) -> None
+ self.env = env
+
+ def apply_transforms(self):
+ # type: () -> None
+ if isinstance(self.document, nodes.document):
+ Transformer.apply_transforms(self)
+ else:
+ # wrap the target node by document node during transforming
+ try:
+ document = new_document('')
+ if self.env:
+ document.settings.env = self.env
+ document += self.document
+ self.document = document
+ Transformer.apply_transforms(self)
+ finally:
+ self.document = self.document[0]
+
+
+class DefaultSubstitutions(SphinxTransform):
"""
Replace some substitutions if they aren't defined in the document.
"""
@@ -33,22 +107,21 @@ class DefaultSubstitutions(Transform):
default_priority = 210
def apply(self):
- env = self.document.settings.env
- config = self.document.settings.env.config
+ # type: () -> None
# only handle those not otherwise defined in the document
to_handle = default_substitutions - set(self.document.substitution_defs)
for ref in self.document.traverse(nodes.substitution_reference):
refname = ref['refname']
if refname in to_handle:
- text = config[refname]
+ text = self.config[refname]
if refname == 'today' and not text:
# special handling: can also specify a strftime format
- text = format_date(config.today_fmt or _('%b %d, %Y'),
- language=config.language, warn=env.warn)
+ text = format_date(self.config.today_fmt or _('%b %d, %Y'),
+ language=self.config.language)
ref.replace_self(nodes.Text(text, text))
-class MoveModuleTargets(Transform):
+class MoveModuleTargets(SphinxTransform):
"""
Move module targets that are the first thing in a section to the section
title.
@@ -58,6 +131,7 @@ class MoveModuleTargets(Transform):
default_priority = 210
def apply(self):
+ # type: () -> None
for node in self.document.traverse(nodes.target):
if not node['ids']:
continue
@@ -69,13 +143,14 @@ class MoveModuleTargets(Transform):
node.parent.remove(node)
-class HandleCodeBlocks(Transform):
+class HandleCodeBlocks(SphinxTransform):
"""
Several code block related transformations.
"""
default_priority = 210
def apply(self):
+ # type: () -> None
# move doctest blocks out of blockquotes
for node in self.document.traverse(nodes.block_quote):
if all(isinstance(child, nodes.doctest_block) for child
@@ -93,33 +168,35 @@ class HandleCodeBlocks(Transform):
# del node.parent[parindex+1]
-class AutoNumbering(Transform):
+class AutoNumbering(SphinxTransform):
"""
Register IDs of tables, figures and literal_blocks to assign numbers.
"""
default_priority = 210
def apply(self):
- domain = self.document.settings.env.domains['std']
+ # type: () -> None
+ domain = self.env.get_domain('std') # type: StandardDomain
for node in self.document.traverse(nodes.Element):
if domain.is_enumerable_node(node) and domain.get_numfig_title(node) is not None:
self.document.note_implicit_target(node)
-class SortIds(Transform):
+class SortIds(SphinxTransform):
"""
Sort secion IDs so that the "id[0-9]+" one comes last.
"""
default_priority = 261
def apply(self):
+ # type: () -> None
for node in self.document.traverse(nodes.section):
if len(node['ids']) > 1 and node['ids'][0].startswith('id'):
node['ids'] = node['ids'][1:] + [node['ids'][0]]
-class CitationReferences(Transform):
+class CitationReferences(SphinxTransform):
"""
Replace citation references by pending_xref nodes before the default
docutils transform tries to resolve them.
@@ -127,6 +204,7 @@ class CitationReferences(Transform):
default_priority = 619
def apply(self):
+ # type: () -> None
for citnode in self.document.traverse(nodes.citation_reference):
cittext = citnode.astext()
refnode = addnodes.pending_xref(cittext, refdomain='std', reftype='citation',
@@ -147,65 +225,68 @@ TRANSLATABLE_NODES = {
}
-class ApplySourceWorkaround(Transform):
+class ApplySourceWorkaround(SphinxTransform):
"""
update source and rawsource attributes
"""
default_priority = 10
def apply(self):
+ # type: () -> None
for n in self.document.traverse():
if isinstance(n, (nodes.TextElement, nodes.image)):
apply_source_workaround(n)
-class AutoIndexUpgrader(Transform):
+class AutoIndexUpgrader(SphinxTransform):
"""
Detect old style; 4 column based indices and automatically upgrade to new style.
"""
default_priority = 210
def apply(self):
- env = self.document.settings.env
+ # type: () -> None
for node in self.document.traverse(addnodes.index):
if 'entries' in node and any(len(entry) == 4 for entry in node['entries']):
msg = ('4 column based index found. '
'It might be a bug of extensions you use: %r' % node['entries'])
- env.warn_node(msg, node)
+ logger.warning(msg, location=node)
for i, entry in enumerate(node['entries']):
if len(entry) == 4:
node['entries'][i] = entry + (None,)
-class ExtraTranslatableNodes(Transform):
+class ExtraTranslatableNodes(SphinxTransform):
"""
make nodes translatable
"""
default_priority = 10
def apply(self):
- targets = self.document.settings.env.config.gettext_additional_targets
+ # type: () -> None
+ targets = self.config.gettext_additional_targets
target_nodes = [v for k, v in TRANSLATABLE_NODES.items() if k in targets]
if not target_nodes:
return
def is_translatable_node(node):
+ # type: (nodes.Node) -> bool
return isinstance(node, tuple(target_nodes))
for node in self.document.traverse(is_translatable_node):
node['translatable'] = True
-class FilterSystemMessages(Transform):
+class FilterSystemMessages(SphinxTransform):
"""Filter system messages from a doctree."""
default_priority = 999
def apply(self):
- env = self.document.settings.env
- filterlevel = env.config.keep_warnings and 2 or 5
+ # type: () -> None
+ filterlevel = self.config.keep_warnings and 2 or 5
for node in self.document.traverse(nodes.system_message):
if node['level'] < filterlevel:
- env.app.debug('%s [filtered system message]', node.astext())
+ logger.debug('%s [filtered system message]', node.astext())
node.parent.remove(node)
@@ -215,9 +296,11 @@ class SphinxContentsFilter(ContentsFilter):
within table-of-contents link nodes.
"""
def visit_pending_xref(self, node):
+ # type: (nodes.Node) -> None
text = node.astext()
self.parent.append(nodes.literal(text, text))
raise nodes.SkipNode
def visit_image(self, node):
+ # type: (nodes.Node) -> None
raise nodes.SkipNode
diff --git a/sphinx/transforms/compact_bullet_list.py b/sphinx/transforms/compact_bullet_list.py
index 4ac2d3a87..006ae7161 100644
--- a/sphinx/transforms/compact_bullet_list.py
+++ b/sphinx/transforms/compact_bullet_list.py
@@ -10,9 +10,9 @@
"""
from docutils import nodes
-from docutils.transforms import Transform
from sphinx import addnodes
+from sphinx.transforms import SphinxTransform
class RefOnlyListChecker(nodes.GenericNodeVisitor):
@@ -23,12 +23,15 @@ class RefOnlyListChecker(nodes.GenericNodeVisitor):
"""
def default_visit(self, node):
+ # type: (nodes.Node) -> None
raise nodes.NodeFound
def visit_bullet_list(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_list_item(self, node):
+ # type: (nodes.Node) -> None
children = []
for child in node.children:
if not isinstance(child, nodes.Invisible):
@@ -45,11 +48,12 @@ class RefOnlyListChecker(nodes.GenericNodeVisitor):
raise nodes.SkipChildren
def invisible_visit(self, node):
+ # type: (nodes.Node) -> None
"""Invisible nodes should be ignored."""
pass
-class RefOnlyBulletListTransform(Transform):
+class RefOnlyBulletListTransform(SphinxTransform):
"""Change refonly bullet lists to use compact_paragraphs.
Specifically implemented for 'Indices and Tables' section, which looks
@@ -58,11 +62,12 @@ class RefOnlyBulletListTransform(Transform):
default_priority = 100
def apply(self):
- env = self.document.settings.env
- if env.config.html_compact_lists:
+ # type: () -> None
+ if self.config.html_compact_lists:
return
def check_refonly_list(node):
+ # type: (nodes.Node) -> bool
"""Check for list with only references in it."""
visitor = RefOnlyListChecker(self.document)
try:
diff --git a/sphinx/transforms/i18n.py b/sphinx/transforms/i18n.py
index 1e9188d17..d5ee6927d 100644
--- a/sphinx/transforms/i18n.py
+++ b/sphinx/transforms/i18n.py
@@ -14,10 +14,9 @@ from os import path
from docutils import nodes
from docutils.io import StringInput
from docutils.utils import relative_path
-from docutils.transforms import Transform
from sphinx import addnodes
-from sphinx.util import split_index_msg
+from sphinx.util import split_index_msg, logging
from sphinx.util.i18n import find_catalog
from sphinx.util.nodes import (
LITERAL_TYPE_NODES, IMAGE_TYPE_NODES,
@@ -25,10 +24,20 @@ from sphinx.util.nodes import (
)
from sphinx.util.pycompat import indent
from sphinx.locale import init as init_locale
+from sphinx.transforms import SphinxTransform
from sphinx.domains.std import make_glossary_term, split_term_classifiers
+if False:
+ # For type annotation
+ from typing import Any, Dict, List, Tuple # NOQA
+ from sphinx.application import Sphinx # NOQA
+ from sphinx.config import Config # NOQA
+
+logger = logging.getLogger(__name__)
+
def publish_msgstr(app, source, source_path, source_line, config, settings):
+ # type: (Sphinx, unicode, unicode, int, Config, Dict) -> nodes.document
"""Publish msgstr (single line) into docutils document
:param sphinx.application.Sphinx app: sphinx application
@@ -59,37 +68,38 @@ def publish_msgstr(app, source, source_path, source_line, config, settings):
return doc
-class PreserveTranslatableMessages(Transform):
+class PreserveTranslatableMessages(SphinxTransform):
"""
Preserve original translatable messages befor translation
"""
default_priority = 10 # this MUST be invoked before Locale transform
def apply(self):
+ # type: () -> None
for node in self.document.traverse(addnodes.translatable):
node.preserve_original_messages()
-class Locale(Transform):
+class Locale(SphinxTransform):
"""
Replace translatable nodes with their translated doctree.
"""
default_priority = 20
def apply(self):
- env = self.document.settings.env
+ # type: () -> None
settings, source = self.document.settings, self.document['source']
# XXX check if this is reliable
- assert source.startswith(env.srcdir)
- docname = path.splitext(relative_path(path.join(env.srcdir, 'dummy'),
+ assert source.startswith(self.env.srcdir)
+ docname = path.splitext(relative_path(path.join(self.env.srcdir, 'dummy'),
source))[0]
textdomain = find_catalog(docname,
self.document.settings.gettext_compact)
# fetch translations
- dirs = [path.join(env.srcdir, directory)
- for directory in env.config.locale_dirs]
- catalog, has_catalog = init_locale(dirs, env.config.language, textdomain)
+ dirs = [path.join(self.env.srcdir, directory)
+ for directory in self.config.locale_dirs]
+ catalog, has_catalog = init_locale(dirs, self.config.language, textdomain)
if not has_catalog:
return
@@ -114,8 +124,8 @@ class Locale(Transform):
if isinstance(node, LITERAL_TYPE_NODES):
msgstr = '::\n\n' + indent(msgstr, ' ' * 3)
- patch = publish_msgstr(
- env.app, msgstr, source, node.line, env.config, settings)
+ patch = publish_msgstr(self.app, msgstr, source,
+ node.line, self.config, settings)
# XXX doctest and other block markup
if not isinstance(patch, nodes.paragraph):
continue # skip for now
@@ -176,6 +186,7 @@ class Locale(Transform):
# replace target's refname to new target name
def is_named_target(node):
+ # type: (nodes.Node) -> bool
return isinstance(node, nodes.target) and \
node.get('refname') == old_name
for old_target in self.document.traverse(is_named_target):
@@ -185,16 +196,16 @@ class Locale(Transform):
# glossary terms update refid
if isinstance(node, nodes.term):
- gloss_entries = env.temp_data.setdefault('gloss_entries', set())
+ gloss_entries = self.env.temp_data.setdefault('gloss_entries', set())
for _id in node['names']:
if _id in gloss_entries:
gloss_entries.remove(_id)
parts = split_term_classifiers(msgstr)
- patch = publish_msgstr(
- env.app, parts[0], source, node.line, env.config, settings)
- patch = make_glossary_term(
- env, patch, parts[1], source, node.line, _id)
+ patch = publish_msgstr(self.app, parts[0], source,
+ node.line, self.config, settings)
+ patch = make_glossary_term(self.env, patch, parts[1],
+ source, node.line, _id)
node['ids'] = patch['ids']
node['names'] = patch['names']
processed = True
@@ -239,8 +250,8 @@ class Locale(Transform):
if isinstance(node, LITERAL_TYPE_NODES):
msgstr = '::\n\n' + indent(msgstr, ' ' * 3)
- patch = publish_msgstr(
- env.app, msgstr, source, node.line, env.config, settings)
+ patch = publish_msgstr(self.app, msgstr, source,
+ node.line, self.config, settings)
# XXX doctest and other block markup
if not isinstance(
patch,
@@ -249,10 +260,12 @@ class Locale(Transform):
# auto-numbered foot note reference should use original 'ids'.
def is_autonumber_footnote_ref(node):
+ # type: (nodes.Node) -> bool
return isinstance(node, nodes.footnote_reference) and \
node.get('auto') == 1
def list_replace_or_append(lst, old, new):
+ # type: (List, Any, Any) -> None
if old in lst:
lst[lst.index(old)] = new
else:
@@ -260,9 +273,9 @@ class Locale(Transform):
old_foot_refs = node.traverse(is_autonumber_footnote_ref)
new_foot_refs = patch.traverse(is_autonumber_footnote_ref)
if len(old_foot_refs) != len(new_foot_refs):
- env.warn_node('inconsistent footnote references in '
- 'translated message', node)
- old_foot_namerefs = {}
+ logger.warning('inconsistent footnote references in translated message',
+ location=node)
+ old_foot_namerefs = {} # type: Dict[unicode, List[nodes.footnote_reference]]
for r in old_foot_refs:
old_foot_namerefs.setdefault(r.get('refname'), []).append(r)
for new in new_foot_refs:
@@ -290,13 +303,14 @@ class Locale(Transform):
# * use translated refname for section refname.
# * inline reference "`Python <...>`_" has no 'refname'.
def is_refnamed_ref(node):
+ # type: (nodes.Node) -> bool
return isinstance(node, nodes.reference) and \
'refname' in node
old_refs = node.traverse(is_refnamed_ref)
new_refs = patch.traverse(is_refnamed_ref)
if len(old_refs) != len(new_refs):
- env.warn_node('inconsistent references in '
- 'translated message', node)
+ logger.warning('inconsistent references in translated message',
+ location=node)
old_ref_names = [r['refname'] for r in old_refs]
new_ref_names = [r['refname'] for r in new_refs]
orphans = list(set(old_ref_names) - set(new_ref_names))
@@ -315,6 +329,7 @@ class Locale(Transform):
# refnamed footnote and citation should use original 'ids'.
def is_refnamed_footnote_ref(node):
+ # type: (nodes.Node) -> bool
footnote_ref_classes = (nodes.footnote_reference,
nodes.citation_reference)
return isinstance(node, footnote_ref_classes) and \
@@ -323,8 +338,8 @@ class Locale(Transform):
new_refs = patch.traverse(is_refnamed_footnote_ref)
refname_ids_map = {}
if len(old_refs) != len(new_refs):
- env.warn_node('inconsistent references in '
- 'translated message', node)
+ logger.warning('inconsistent references in translated message',
+ location=node)
for old in old_refs:
refname_ids_map[old["refname"]] = old["ids"]
for new in new_refs:
@@ -339,10 +354,11 @@ class Locale(Transform):
new_refs = patch.traverse(addnodes.pending_xref)
xref_reftarget_map = {}
if len(old_refs) != len(new_refs):
- env.warn_node('inconsistent term references in '
- 'translated message', node)
+ logger.warning('inconsistent term references in translated message',
+ location=node)
def get_ref_key(node):
+ # type: (nodes.Node) -> Tuple[unicode, unicode, unicode]
case = node["refdomain"], node["reftype"]
if case == ('std', 'term'):
return None
@@ -381,10 +397,10 @@ class Locale(Transform):
node['translated'] = True
- if 'index' in env.config.gettext_additional_targets:
+ if 'index' in self.config.gettext_additional_targets:
# Extract and translate messages for index entries.
for node, entries in traverse_translatable_index(self.document):
- new_entries = []
+ new_entries = [] # type: List[Tuple[unicode, unicode, unicode, unicode, unicode]] # NOQA
for type, msg, tid, main, key_ in entries:
msg_parts = split_index_msg(type, msg)
msgstr_parts = []
@@ -400,17 +416,16 @@ class Locale(Transform):
node['entries'] = new_entries
-class RemoveTranslatableInline(Transform):
+class RemoveTranslatableInline(SphinxTransform):
"""
Remove inline nodes used for translation as placeholders.
"""
default_priority = 999
def apply(self):
+ # type: () -> None
from sphinx.builders.gettext import MessageCatalogBuilder
- env = self.document.settings.env
- builder = env.app.builder
- if isinstance(builder, MessageCatalogBuilder):
+ if isinstance(self.app.builder, MessageCatalogBuilder):
return
for inline in self.document.traverse(nodes.inline):
if 'translatable' in inline:
diff --git a/sphinx/transforms/post_transforms.py b/sphinx/transforms/post_transforms.py
new file mode 100644
index 000000000..3ec12038d
--- /dev/null
+++ b/sphinx/transforms/post_transforms.py
@@ -0,0 +1,175 @@
+# -*- coding: utf-8 -*-
+"""
+ sphinx.transforms.post_transforms
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Docutils transforms used by Sphinx.
+
+ :copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from docutils import nodes
+
+from sphinx import addnodes
+from sphinx.environment import NoUri
+from sphinx.locale import _
+from sphinx.transforms import SphinxTransform
+from sphinx.util import logging
+
+if False:
+ # For type annotation
+ from typing import Any, Dict, List, Tuple # NOQA
+ from sphinx.application import Sphinx # NOQA
+ from sphinx.domains import Domain # NOQA
+
+
+logger = logging.getLogger(__name__)
+
+
+class ReferencesResolver(SphinxTransform):
+ """
+ Resolves cross-references on doctrees.
+ """
+
+ default_priority = 10
+
+ def apply(self):
+ # type: () -> None
+ for node in self.document.traverse(addnodes.pending_xref):
+ contnode = node[0].deepcopy()
+ newnode = None
+
+ typ = node['reftype']
+ target = node['reftarget']
+ refdoc = node.get('refdoc', self.env.docname)
+ domain = None
+
+ try:
+ if 'refdomain' in node and node['refdomain']:
+ # let the domain try to resolve the reference
+ try:
+ domain = self.env.domains[node['refdomain']]
+ except KeyError:
+ raise NoUri
+ newnode = domain.resolve_xref(self.env, refdoc, self.app.builder,
+ typ, target, node, contnode)
+ # really hardwired reference types
+ elif typ == 'any':
+ newnode = self.resolve_anyref(refdoc, node, contnode)
+ # no new node found? try the missing-reference event
+ if newnode is None:
+ newnode = self.app.emit_firstresult('missing-reference', self.env,
+ node, contnode)
+ # still not found? warn if node wishes to be warned about or
+ # we are in nit-picky mode
+ if newnode is None:
+ self.warn_missing_reference(refdoc, typ, target, node, domain)
+ except NoUri:
+ newnode = contnode
+ node.replace_self(newnode or contnode)
+
+ def resolve_anyref(self, refdoc, node, contnode):
+ # type: (unicode, nodes.Node, nodes.Node) -> nodes.Node
+ """Resolve reference generated by the "any" role."""
+ stddomain = self.env.get_domain('std')
+ target = node['reftarget']
+ results = [] # type: List[Tuple[unicode, nodes.Node]]
+ # first, try resolving as :doc:
+ doc_ref = stddomain.resolve_xref(self.env, refdoc, self.app.builder,
+ 'doc', target, node, contnode)
+ if doc_ref:
+ results.append(('doc', doc_ref))
+ # next, do the standard domain (makes this a priority)
+ results.extend(stddomain.resolve_any_xref(self.env, refdoc, self.app.builder,
+ target, node, contnode))
+ for domain in self.env.domains.values():
+ if domain.name == 'std':
+ continue # we did this one already
+ try:
+ results.extend(domain.resolve_any_xref(self.env, refdoc, self.app.builder,
+ target, node, contnode))
+ except NotImplementedError:
+ # the domain doesn't yet support the new interface
+ # we have to manually collect possible references (SLOW)
+ for role in domain.roles:
+ res = domain.resolve_xref(self.env, refdoc, self.app.builder,
+ role, target, node, contnode)
+ if res and isinstance(res[0], nodes.Element):
+ results.append(('%s:%s' % (domain.name, role), res))
+ # now, see how many matches we got...
+ if not results:
+ return None
+ if len(results) > 1:
+ nice_results = ' or '.join(':%s:' % r[0] for r in results)
+ logger.warning(_('more than one target found for \'any\' cross-'
+ 'reference %r: could be %s'), target, nice_results,
+ location=node)
+ res_role, newnode = results[0]
+ # Override "any" class with the actual role type to get the styling
+ # approximately correct.
+ res_domain = res_role.split(':')[0]
+ if newnode and newnode[0].get('classes'):
+ newnode[0]['classes'].append(res_domain)
+ newnode[0]['classes'].append(res_role.replace(':', '-'))
+ return newnode
+
+ def warn_missing_reference(self, refdoc, typ, target, node, domain):
+ # type: (unicode, unicode, unicode, nodes.Node, Domain) -> None
+ warn = node.get('refwarn')
+ if self.config.nitpicky:
+ warn = True
+ if self.env._nitpick_ignore:
+ dtype = domain and '%s:%s' % (domain.name, typ) or typ
+ if (dtype, target) in self.env._nitpick_ignore:
+ warn = False
+ # for "std" types also try without domain name
+ if (not domain or domain.name == 'std') and \
+ (typ, target) in self.env._nitpick_ignore:
+ warn = False
+ if not warn:
+ return
+ if domain and typ in domain.dangling_warnings:
+ msg = domain.dangling_warnings[typ]
+ elif node.get('refdomain', 'std') not in ('', 'std'):
+ msg = (_('%s:%s reference target not found: %%(target)s') %
+ (node['refdomain'], typ))
+ else:
+ msg = _('%r reference target not found: %%(target)s') % typ
+ logger.warning(msg % {'target': target},
+ location=node, type='ref', subtype=typ)
+
+
+class OnlyNodeTransform(SphinxTransform):
+ default_priority = 50
+
+ def apply(self):
+ # type: () -> None
+ # A comment on the comment() nodes being inserted: replacing by [] would
+ # result in a "Losing ids" exception if there is a target node before
+ # the only node, so we make sure docutils can transfer the id to
+ # something, even if it's just a comment and will lose the id anyway...
+ for node in self.document.traverse(addnodes.only):
+ try:
+ ret = self.app.builder.tags.eval_condition(node['expr'])
+ except Exception as err:
+ logger.warning('exception while evaluating only directive expression: %s', err,
+ location=node)
+ node.replace_self(node.children or nodes.comment())
+ else:
+ if ret:
+ node.replace_self(node.children or nodes.comment())
+ else:
+ node.replace_self(nodes.comment())
+
+
+def setup(app):
+ # type: (Sphinx) -> Dict[unicode, Any]
+ app.add_post_transform(ReferencesResolver)
+ app.add_post_transform(OnlyNodeTransform)
+
+ return {
+ 'version': 'builtin',
+ 'parallel_read_safe': True,
+ 'parallel_write_safe': True,
+ }
diff --git a/sphinx/util/__init__.py b/sphinx/util/__init__.py
index 09f30fd94..cc4e0ef80 100644
--- a/sphinx/util/__init__.py
+++ b/sphinx/util/__init__.py
@@ -22,13 +22,14 @@ from os import path
from codecs import BOM_UTF8
from collections import deque
-from six import iteritems, text_type, binary_type
+from six import text_type, binary_type
from six.moves import range
from six.moves.urllib.parse import urlsplit, urlunsplit, quote_plus, parse_qsl, urlencode
from docutils.utils import relative_path
from sphinx.errors import PycodeError, SphinxParallelError, ExtensionError
-from sphinx.util.console import strip_colors
+from sphinx.util import logging
+from sphinx.util.console import strip_colors, colorize, bold, term_width_line # type: ignore
from sphinx.util.fileutil import copy_asset_file
from sphinx.util.osutil import fs_encoding
@@ -42,19 +43,28 @@ from sphinx.util.nodes import ( # noqa
caption_ref_re)
from sphinx.util.matching import patfilter # noqa
+if False:
+ # For type annotation
+ from typing import Any, Callable, Dict, IO, Iterable, Iterator, List, Pattern, Sequence, Set, Tuple, Union # NOQA
+
+
+logger = logging.getLogger(__name__)
+
# Generally useful regular expressions.
-ws_re = re.compile(r'\s+')
-url_re = re.compile(r'(?P<schema>.+)://.*')
+ws_re = re.compile(r'\s+') # type: Pattern
+url_re = re.compile(r'(?P<schema>.+)://.*') # type: Pattern
# High-level utility functions.
def docname_join(basedocname, docname):
+ # type: (unicode, unicode) -> unicode
return posixpath.normpath(
posixpath.join('/' + basedocname, '..', docname))[1:]
def path_stabilize(filepath):
+ # type: (unicode) -> unicode
"normalize path separater and unicode string"
newpath = filepath.replace(os.path.sep, SEP)
if isinstance(newpath, text_type):
@@ -63,6 +73,7 @@ def path_stabilize(filepath):
def get_matching_files(dirname, exclude_matchers=()):
+ # type: (unicode, Tuple[Callable[[unicode], bool], ...]) -> Iterable[unicode]
"""Get all file names in a directory, recursively.
Exclude files and dirs matching some matcher in *exclude_matchers*.
@@ -75,9 +86,9 @@ def get_matching_files(dirname, exclude_matchers=()):
relativeroot = root[dirlen:]
qdirs = enumerate(path_stabilize(path.join(relativeroot, dn))
- for dn in dirs)
+ for dn in dirs) # type: Iterable[Tuple[int, unicode]]
qfiles = enumerate(path_stabilize(path.join(relativeroot, fn))
- for fn in files)
+ for fn in files) # type: Iterable[Tuple[int, unicode]]
for matcher in exclude_matchers:
qdirs = [entry for entry in qdirs if not matcher(entry[1])]
qfiles = [entry for entry in qfiles if not matcher(entry[1])]
@@ -89,6 +100,7 @@ def get_matching_files(dirname, exclude_matchers=()):
def get_matching_docs(dirname, suffixes, exclude_matchers=()):
+ # type: (unicode, List[unicode], Tuple[Callable[[unicode], bool], ...]) -> Iterable[unicode] # NOQA
"""Get all file names (without suffixes) matching a suffix in a directory,
recursively.
@@ -109,9 +121,11 @@ class FilenameUniqDict(dict):
appear in. Used for images and downloadable files in the environment.
"""
def __init__(self):
- self._existing = set()
+ # type: () -> None
+ self._existing = set() # type: Set[unicode]
def add_file(self, docname, newfile):
+ # type: (unicode, unicode) -> unicode
if newfile in self:
self[newfile][0].add(docname)
return self[newfile][1]
@@ -126,6 +140,7 @@ class FilenameUniqDict(dict):
return uniquename
def purge_doc(self, docname):
+ # type: (unicode) -> None
for filename, (docs, unique) in list(self.items()):
docs.discard(docname)
if not docs:
@@ -133,19 +148,23 @@ class FilenameUniqDict(dict):
self._existing.discard(unique)
def merge_other(self, docnames, other):
+ # type: (Set[unicode], Dict[unicode, Tuple[Set[unicode], Any]]) -> None
for filename, (docs, unique) in other.items():
- for doc in docs & docnames:
+ for doc in docs & set(docnames):
self.add_file(doc, filename)
def __getstate__(self):
+ # type: () -> Set[unicode]
return self._existing
def __setstate__(self, state):
+ # type: (Set[unicode]) -> None
self._existing = state
def copy_static_entry(source, targetdir, builder, context={},
exclude_matchers=(), level=0):
+ # type: (unicode, unicode, Any, Dict, Tuple[Callable, ...], int) -> None
"""[DEPRECATED] Copy a HTML builder static_path entry from source to targetdir.
Handles all possible cases of files, directories and subdirectories.
@@ -183,6 +202,7 @@ _DEBUG_HEADER = '''\
def save_traceback(app):
+ # type: (Any) -> unicode
"""Save the current exception's traceback in a temporary file."""
import sphinx
import jinja2
@@ -197,30 +217,30 @@ def save_traceback(app):
last_msgs = ''
if app is not None:
last_msgs = '\n'.join(
- '# %s' % strip_colors(force_decode(s, 'utf-8')).strip()
+ '# %s' % strip_colors(force_decode(s, 'utf-8')).strip() # type: ignore
for s in app.messagelog)
os.write(fd, (_DEBUG_HEADER %
(sphinx.__display_version__,
platform.python_version(),
platform.python_implementation(),
docutils.__version__, docutils.__version_details__,
- jinja2.__version__,
+ jinja2.__version__, # type: ignore
last_msgs)).encode('utf-8'))
if app is not None:
- for extname, extmod in iteritems(app._extensions):
- modfile = getattr(extmod, '__file__', 'unknown')
+ for ext in app.extensions:
+ modfile = getattr(ext.module, '__file__', 'unknown')
if isinstance(modfile, bytes):
modfile = modfile.decode(fs_encoding, 'replace')
- version = app._extension_metadata[extname]['version']
- if version != 'builtin':
+ if ext.version != 'builtin':
os.write(fd, ('# %s (%s) from %s\n' %
- (extname, version, modfile)).encode('utf-8'))
+ (ext.name, ext.version, modfile)).encode('utf-8'))
os.write(fd, exc_format.encode('utf-8'))
os.close(fd)
return path
def get_module_source(modname):
+ # type: (str) -> Tuple[unicode, unicode]
"""Try to find the source code for a module.
Can return ('file', 'filename') in which case the source is in the given
@@ -260,6 +280,11 @@ def get_module_source(modname):
def get_full_modname(modname, attribute):
+ # type: (str, unicode) -> unicode
+ if modname is None:
+ # Prevents a TypeError: if the last getattr() call will return None
+ # then it's better to return it directly
+ return None
__import__(modname)
module = sys.modules[modname]
@@ -278,15 +303,18 @@ _coding_re = re.compile(r'coding[:=]\s*([-\w.]+)')
def detect_encoding(readline):
+ # type: (Callable) -> unicode
"""Like tokenize.detect_encoding() from Py3k, but a bit simplified."""
def read_or_stop():
+ # type: () -> unicode
try:
return readline()
except StopIteration:
return None
def get_normal_name(orig_enc):
+ # type: (str) -> str
"""Imitates get_normal_name in tokenizer.c."""
# Only care about the first 12 characters.
enc = orig_enc[:12].lower().replace('_', '-')
@@ -298,12 +326,13 @@ def detect_encoding(readline):
return orig_enc
def find_cookie(line):
+ # type: (unicode) -> unicode
try:
line_string = line.decode('ascii')
except UnicodeDecodeError:
return None
- matches = _coding_re.findall(line_string)
+ matches = _coding_re.findall(line_string) # type: ignore
if not matches:
return None
return get_normal_name(matches[0])
@@ -334,14 +363,17 @@ class Tee(object):
File-like object writing to two streams.
"""
def __init__(self, stream1, stream2):
+ # type: (IO, IO) -> None
self.stream1 = stream1
self.stream2 = stream2
def write(self, text):
+ # type: (unicode) -> None
self.stream1.write(text)
self.stream2.write(text)
def flush(self):
+ # type: () -> None
if hasattr(self.stream1, 'flush'):
self.stream1.flush()
if hasattr(self.stream2, 'flush'):
@@ -349,6 +381,7 @@ class Tee(object):
def parselinenos(spec, total):
+ # type: (unicode, int) -> List[int]
"""Parse a line number spec (such as "1,2,4-6") and return a list of
wanted line numbers.
"""
@@ -362,18 +395,23 @@ def parselinenos(spec, total):
elif len(begend) == 1:
items.append(int(begend[0]) - 1)
elif len(begend) == 2:
- start, end = begend
- start = start or 1 # left half open (cf. -10)
- end = end or total # right half open (cf. 10-)
- items.extend(range(int(start) - 1, int(end)))
+ start = int(begend[0] or 1) # type: ignore
+ # left half open (cf. -10)
+ end = int(begend[1] or max(start, total)) # type: ignore
+ # right half open (cf. 10-)
+ if start > end: # invalid range (cf. 10-1)
+ raise ValueError
+ items.extend(range(start - 1, end))
else:
raise ValueError
except Exception:
raise ValueError('invalid line number spec: %r' % spec)
+
return items
def force_decode(string, encoding):
+ # type: (unicode, unicode) -> unicode
"""Forcibly get a unicode string out of a bytestring."""
if isinstance(string, binary_type):
try:
@@ -390,16 +428,20 @@ def force_decode(string, encoding):
class attrdict(dict):
def __getattr__(self, key):
+ # type: (unicode) -> unicode
return self[key]
def __setattr__(self, key, val):
+ # type: (unicode, unicode) -> None
self[key] = val
def __delattr__(self, key):
+ # type: (unicode) -> None
del self[key]
def rpartition(s, t):
+ # type: (unicode, unicode) -> Tuple[unicode, unicode]
"""Similar to str.rpartition from 2.5, but doesn't return the separator."""
i = s.rfind(t)
if i != -1:
@@ -408,6 +450,7 @@ def rpartition(s, t):
def split_into(n, type, value):
+ # type: (int, unicode, unicode) -> List[unicode]
"""Split an index entry into a given number of parts at semicolons."""
parts = [x.strip() for x in value.split(';', n - 1)]
if sum(1 for part in parts if part) < n:
@@ -416,6 +459,7 @@ def split_into(n, type, value):
def split_index_msg(type, value):
+ # type: (unicode, unicode) -> List[unicode]
# new entry types must be listed in directives/other.py!
if type == 'single':
try:
@@ -437,10 +481,11 @@ def split_index_msg(type, value):
def format_exception_cut_frames(x=1):
+ # type: (int) -> unicode
"""Format an exception with traceback, but only the last x frames."""
typ, val, tb = sys.exc_info()
# res = ['Traceback (most recent call last):\n']
- res = []
+ res = [] # type: List[unicode]
tbres = traceback.format_tb(tb)
res += tbres[-x:]
res += traceback.format_exception_only(typ, val)
@@ -453,13 +498,16 @@ class PeekableIterator(object):
what's the next item.
"""
def __init__(self, iterable):
- self.remaining = deque()
+ # type: (Iterable) -> None
+ self.remaining = deque() # type: deque
self._iterator = iter(iterable)
def __iter__(self):
+ # type: () -> PeekableIterator
return self
def __next__(self):
+ # type: () -> Any
"""Return the next item from the iterator."""
if self.remaining:
return self.remaining.popleft()
@@ -468,19 +516,22 @@ class PeekableIterator(object):
next = __next__ # Python 2 compatibility
def push(self, item):
+ # type: (Any) -> None
"""Push the `item` on the internal stack, it will be returned on the
next :meth:`next` call.
"""
self.remaining.append(item)
def peek(self):
+ # type: () -> Any
"""Return the next item without changing the state of the iterator."""
- item = next(self)
+ item = next(self) # type: ignore
self.push(item)
return item
def import_object(objname, source=None):
+ # type: (str, unicode) -> Any
try:
module, name = objname.rsplit('.', 1)
except ValueError as err:
@@ -500,7 +551,8 @@ def import_object(objname, source=None):
def encode_uri(uri):
- split = list(urlsplit(uri))
+ # type: (unicode) -> unicode
+ split = list(urlsplit(uri)) # type: Any
split[1] = split[1].encode('idna').decode('ascii')
split[2] = quote_plus(split[2].encode('utf-8'), '/').decode('ascii')
query = list((q, quote_plus(v.encode('utf-8')))
@@ -510,9 +562,56 @@ def encode_uri(uri):
def split_docinfo(text):
- docinfo_re = re.compile('\A((?:\s*:\w+:.*?\n(?:[ \t]+.*?\n)*)+)', re.M)
- result = docinfo_re.split(text, 1)
+ # type: (unicode) -> Sequence[unicode]
+ docinfo_re = re.compile('\\A((?:\\s*:\\w+:.*?\n(?:[ \\t]+.*?\n)*)+)', re.M)
+ result = docinfo_re.split(text, 1) # type: ignore
if len(result) == 1:
return '', result[0]
else:
return result[1:]
+
+
+def display_chunk(chunk):
+ # type: (Any) -> unicode
+ if isinstance(chunk, (list, tuple)):
+ if len(chunk) == 1:
+ return text_type(chunk[0])
+ return '%s .. %s' % (chunk[0], chunk[-1])
+ return text_type(chunk)
+
+
+def old_status_iterator(iterable, summary, color="darkgreen", stringify_func=display_chunk):
+ # type: (Iterable, unicode, str, Callable[[Any], unicode]) -> Iterator
+ l = 0
+ for item in iterable:
+ if l == 0:
+ logger.info(bold(summary), nonl=True)
+ l = 1
+ logger.info(stringify_func(item), color=color, nonl=True)
+ logger.info(" ", nonl=True)
+ yield item
+ if l == 1:
+ logger.info('')
+
+
+# new version with progress info
+def status_iterator(iterable, summary, color="darkgreen", length=0, verbosity=0,
+ stringify_func=display_chunk):
+ # type: (Iterable, unicode, str, int, int, Callable[[Any], unicode]) -> Iterable # NOQA
+ if length == 0:
+ for item in old_status_iterator(iterable, summary, color, stringify_func):
+ yield item
+ return
+ l = 0
+ summary = bold(summary)
+ for item in iterable:
+ l += 1
+ s = '%s[%3d%%] %s' % (summary, 100 * l / length, colorize(color, stringify_func(item)))
+ if verbosity:
+ s += '\n'
+ else:
+ s = term_width_line(s)
+ logger.info(s, nonl=True)
+ yield item
+ if l > 0:
+ logger.info('')
diff --git a/sphinx/util/compat.py b/sphinx/util/compat.py
index f3d857793..30a89bbfb 100644
--- a/sphinx/util/compat.py
+++ b/sphinx/util/compat.py
@@ -10,31 +10,39 @@
"""
from __future__ import absolute_import
+import sys
import warnings
-from docutils import nodes
from docutils.parsers.rst import Directive # noqa
-
from docutils import __version__ as _du_version
+
+from sphinx.deprecation import RemovedInSphinx17Warning
+
docutils_version = tuple(int(x) for x in _du_version.split('.')[:2])
+if False:
+ # For type annotation
+ from typing import Any, Dict # NOQA
+
+
+class _DeprecationWrapper(object):
+ def __init__(self, mod, deprecated):
+ # type: (Any, Dict) -> None
+ self._mod = mod
+ self._deprecated = deprecated
+
+ def __getattr__(self, attr):
+ # type: (str) -> Any
+ if attr in self._deprecated:
+ warnings.warn("sphinx.util.compat.%s is deprecated and will be "
+ "removed in Sphinx 1.7, please use the standard "
+ "library version instead." % attr,
+ RemovedInSphinx17Warning, stacklevel=2)
+ return self._deprecated[attr]
+ return getattr(self._mod, attr)
+
-def make_admonition(node_class, name, arguments, options, content, lineno,
- content_offset, block_text, state, state_machine):
- warnings.warn('make_admonition is deprecated, use '
- 'docutils.parsers.rst.directives.admonitions.BaseAdmonition '
- 'instead', DeprecationWarning, stacklevel=2)
- text = '\n'.join(content)
- admonition_node = node_class(text)
- if arguments:
- title_text = arguments[0]
- textnodes, messages = state.inline_text(title_text, lineno)
- admonition_node += nodes.title(title_text, '', *textnodes)
- admonition_node += messages
- if 'class' in options:
- classes = options['class']
- else:
- classes = ['admonition-' + nodes.make_id(title_text)]
- admonition_node['classes'] += classes
- state.nested_parse(content, content_offset, admonition_node)
- return [admonition_node]
+sys.modules[__name__] = _DeprecationWrapper(sys.modules[__name__], dict( # type: ignore
+ docutils_version = docutils_version,
+ Directive = Directive,
+))
diff --git a/sphinx/util/console.py b/sphinx/util/console.py
index d4acb856c..63a619f55 100644
--- a/sphinx/util/console.py
+++ b/sphinx/util/console.py
@@ -19,11 +19,17 @@ try:
except ImportError:
colorama = None
+if False:
+ # For type annotation
+ from typing import Dict # NOQA
+
+
_ansi_re = re.compile('\x1b\\[(\\d\\d;){0,2}\\d\\dm')
-codes = {}
+codes = {} # type: Dict[str, str]
def get_terminal_width():
+ # type: () -> int
"""Borrowed from the py lib."""
try:
import termios
@@ -35,7 +41,7 @@ def get_terminal_width():
terminal_width = width
except Exception:
# FALLBACK
- terminal_width = int(os.environ.get('COLUMNS', 80)) - 1
+ terminal_width = int(os.environ.get('COLUMNS', "80")) - 1
return terminal_width
@@ -43,6 +49,7 @@ _tw = get_terminal_width()
def term_width_line(text):
+ # type: (str) -> str
if not codes:
# if no coloring, don't output fancy backspaces
return text + '\n'
@@ -52,6 +59,7 @@ def term_width_line(text):
def color_terminal():
+ # type: () -> bool
if sys.platform == 'win32' and colorama is not None:
colorama.init()
return True
@@ -68,25 +76,31 @@ def color_terminal():
def nocolor():
+ # type: () -> None
if sys.platform == 'win32' and colorama is not None:
colorama.deinit()
codes.clear()
def coloron():
+ # type: () -> None
codes.update(_orig_codes)
def colorize(name, text):
+ # type: (str, unicode) -> unicode
return codes.get(name, '') + text + codes.get('reset', '')
def strip_colors(s):
+ # type: (str) -> str
return re.compile('\x1b.*?m').sub('', s)
def create_color_func(name):
+ # type: (str) -> None
def inner(text):
+ # type: (unicode) -> unicode
return colorize(name, text)
globals()[name] = inner
diff --git a/sphinx/util/docfields.py b/sphinx/util/docfields.py
index 79b19d00e..44ddbccf6 100644
--- a/sphinx/util/docfields.py
+++ b/sphinx/util/docfields.py
@@ -15,8 +15,15 @@ from docutils import nodes
from sphinx import addnodes
+if False:
+ # For type annotation
+ from typing import Any, Dict, List, Tuple # NOQA
+ from sphinx.domains import Domain # NOQA
+ from sphinx.environment import BuildEnvironment # NOQA
+
def _is_single_paragraph(node):
+ # type: (nodes.Node) -> bool
"""True if the node only contains one paragraph (and system messages)."""
if len(node) == 0:
return False
@@ -47,6 +54,7 @@ class Field(object):
def __init__(self, name, names=(), label=None, has_arg=True, rolename=None,
bodyrolename=None):
+ # type: (unicode, Tuple[unicode, ...], unicode, bool, unicode, unicode) -> None
self.name = name
self.names = names
self.label = label
@@ -54,8 +62,15 @@ class Field(object):
self.rolename = rolename
self.bodyrolename = bodyrolename
- def make_xref(self, rolename, domain, target,
- innernode=addnodes.literal_emphasis, contnode=None, env=None):
+ def make_xref(self,
+ rolename, # type: unicode
+ domain, # type: unicode
+ target, # type: unicode
+ innernode=addnodes.literal_emphasis, # type: nodes.Node
+ contnode=None, # type: nodes.Node
+ env=None, # type: BuildEnvironment
+ ):
+ # type: (...) -> nodes.Node
if not rolename:
return contnode or innernode(target, target)
refnode = addnodes.pending_xref('', refdomain=domain, refexplicit=False,
@@ -65,14 +80,28 @@ class Field(object):
env.domains[domain].process_field_xref(refnode)
return refnode
- def make_xrefs(self, rolename, domain, target,
- innernode=addnodes.literal_emphasis, contnode=None, env=None):
+ def make_xrefs(self,
+ rolename, # type: unicode
+ domain, # type: unicode
+ target, # type: unicode
+ innernode=addnodes.literal_emphasis, # type: nodes.Node
+ contnode=None, # type: nodes.Node
+ env=None, # type: BuildEnvironment
+ ):
+ # type: (...) -> List[nodes.Node]
return [self.make_xref(rolename, domain, target, innernode, contnode, env)]
def make_entry(self, fieldarg, content):
+ # type: (List, unicode) -> Tuple[List, unicode]
return (fieldarg, content)
- def make_field(self, types, domain, item, env=None):
+ def make_field(self,
+ types, # type: Dict[unicode, List[nodes.Node]]
+ domain, # type: unicode
+ item, # type: Tuple
+ env=None, # type: BuildEnvironment
+ ):
+ # type: (...) -> nodes.field
fieldarg, content = item
fieldname = nodes.field_name('', self.label)
if fieldarg:
@@ -108,10 +137,17 @@ class GroupedField(Field):
def __init__(self, name, names=(), label=None, rolename=None,
can_collapse=False):
+ # type: (unicode, Tuple[unicode, ...], unicode, unicode, bool) -> None
Field.__init__(self, name, names, label, True, rolename)
self.can_collapse = can_collapse
- def make_field(self, types, domain, items, env=None):
+ def make_field(self,
+ types, # type: Dict[unicode, List[nodes.Node]]
+ domain, # type: unicode
+ items, # type: Tuple
+ env=None, # type: BuildEnvironment
+ ):
+ # type: (...) -> nodes.field
fieldname = nodes.field_name('', self.label)
listnode = self.list_type()
for fieldarg, content in items:
@@ -153,12 +189,20 @@ class TypedField(GroupedField):
def __init__(self, name, names=(), typenames=(), label=None,
rolename=None, typerolename=None, can_collapse=False):
+ # type: (unicode, Tuple[unicode, ...], Tuple[unicode, ...], unicode, unicode, unicode, bool) -> None # NOQA
GroupedField.__init__(self, name, names, label, rolename, can_collapse)
self.typenames = typenames
self.typerolename = typerolename
- def make_field(self, types, domain, items, env=None):
+ def make_field(self,
+ types, # type: Dict[unicode, List[nodes.Node]]
+ domain, # type: unicode
+ items, # type: Tuple
+ env=None, # type: BuildEnvironment
+ ):
+ # type: (...) -> nodes.field
def handle_item(fieldarg, content):
+ # type: (unicode, unicode) -> nodes.paragraph
par = nodes.paragraph()
par.extend(self.make_xrefs(self.rolename, domain, fieldarg,
addnodes.literal_strong, env=env))
@@ -198,6 +242,7 @@ class DocFieldTransformer(object):
"""
def __init__(self, directive):
+ # type: (Any) -> None
self.directive = directive
if '_doc_field_type_map' not in directive.__class__.__dict__:
directive.__class__._doc_field_type_map = \
@@ -205,6 +250,7 @@ class DocFieldTransformer(object):
self.typemap = directive._doc_field_type_map
def preprocess_fieldtypes(self, types):
+ # type: (List) -> Dict[unicode, Tuple[Any, bool]]
typemap = {}
for fieldtype in types:
for name in fieldtype.names:
@@ -215,6 +261,7 @@ class DocFieldTransformer(object):
return typemap
def transform_all(self, node):
+ # type: (nodes.Node) -> None
"""Transform all field list children of a node."""
# don't traverse, only handle field lists that are immediate children
for child in node:
@@ -222,12 +269,13 @@ class DocFieldTransformer(object):
self.transform(child)
def transform(self, node):
+ # type: (nodes.Node) -> None
"""Transform a single field list *node*."""
typemap = self.typemap
entries = []
- groupindices = {}
- types = {}
+ groupindices = {} # type: Dict[unicode, int]
+ types = {} # type: Dict[unicode, Dict]
# step 1: traverse all fields and collect field types and content
for field in node:
@@ -282,6 +330,7 @@ class DocFieldTransformer(object):
translatable_content = nodes.inline(fieldbody.rawsource,
translatable=True)
+ translatable_content.document = fieldbody.parent.document
translatable_content.source = fieldbody.parent.source
translatable_content.line = fieldbody.parent.line
translatable_content += content
diff --git a/sphinx/util/docstrings.py b/sphinx/util/docstrings.py
index 7094f6d52..c2ef91a66 100644
--- a/sphinx/util/docstrings.py
+++ b/sphinx/util/docstrings.py
@@ -11,8 +11,13 @@
import sys
+if False:
+ # For type annotation
+ from typing import List # NOQA
+
def prepare_docstring(s, ignore=1):
+ # type: (unicode, int) -> List[unicode]
"""Convert a docstring into lines of parseable reST. Remove common leading
indentation, where the indentation of a given number of lines (usually just
one) is ignored.
@@ -46,6 +51,7 @@ def prepare_docstring(s, ignore=1):
def prepare_commentdoc(s):
+ # type: (unicode) -> List[unicode]
"""Extract documentation comment lines (starting with #:) and return them
as a list of lines. Returns an empty list if there is no documentation.
"""
diff --git a/sphinx/util/docutils.py b/sphinx/util/docutils.py
index bed786f4b..4438f9123 100644
--- a/sphinx/util/docutils.py
+++ b/sphinx/util/docutils.py
@@ -10,17 +10,33 @@
"""
from __future__ import absolute_import
+import re
from copy import copy
from contextlib import contextmanager
+
import docutils
+from docutils.utils import Reporter
from docutils.parsers.rst import directives, roles
+from sphinx.util import logging
+
+logger = logging.getLogger(__name__)
+report_re = re.compile('^(.+?:\\d+): \\((DEBUG|INFO|WARNING|ERROR|SEVERE)/(\\d+)?\\) '
+ '(.+?)\n?$')
+
+if False:
+ # For type annotation
+ from typing import Any, Callable, Iterator, List, Tuple # NOQA
+ from docutils import nodes # NOQA
+ from sphinx.environment import BuildEnvironment # NOQA
+
__version_info__ = tuple(map(int, docutils.__version__.split('.')))
@contextmanager
def docutils_namespace():
+ # type: () -> Iterator[None]
"""Create namespace for reST parsers."""
try:
_directives = copy(directives._directives)
@@ -41,17 +57,21 @@ class sphinx_domains(object):
markup takes precedence.
"""
def __init__(self, env):
+ # type: (BuildEnvironment) -> None
self.env = env
- self.directive_func = None
- self.roles_func = None
+ self.directive_func = None # type: Callable
+ self.roles_func = None # type: Callable
def __enter__(self):
+ # type: () -> None
self.enable()
def __exit__(self, type, value, traceback):
+ # type: (unicode, unicode, unicode) -> None
self.disable()
def enable(self):
+ # type: () -> None
self.directive_func = directives.directive
self.role_func = roles.role
@@ -59,10 +79,12 @@ class sphinx_domains(object):
roles.role = self.lookup_role
def disable(self):
+ # type: () -> None
directives.directive = self.directive_func
roles.role = self.role_func
def lookup_domain_element(self, type, name):
+ # type: (unicode, unicode) -> Tuple[Any, List]
"""Lookup a markup element (directive or role), given its name which can
be a full name (with domain).
"""
@@ -71,7 +93,7 @@ class sphinx_domains(object):
if ':' in name:
domain_name, name = name.split(':', 1)
if domain_name in self.env.domains:
- domain = self.env.domains[domain_name]
+ domain = self.env.get_domain(domain_name)
element = getattr(domain, type)(name)
if element is not None:
return element, []
@@ -84,20 +106,51 @@ class sphinx_domains(object):
return element, []
# always look in the std domain
- element = getattr(self.env.domains['std'], type)(name)
+ element = getattr(self.env.get_domain('std'), type)(name)
if element is not None:
return element, []
raise ElementLookupError
def lookup_directive(self, name, lang_module, document):
+ # type: (unicode, unicode, nodes.document) -> Tuple[Any, List]
try:
return self.lookup_domain_element('directive', name)
except ElementLookupError:
return self.directive_func(name, lang_module, document)
def lookup_role(self, name, lang_module, lineno, reporter):
+ # type: (unicode, unicode, int, Any) -> Tuple[Any, List]
try:
return self.lookup_domain_element('role', name)
except ElementLookupError:
return self.role_func(name, lang_module, lineno, reporter)
+
+
+class WarningStream(object):
+ def write(self, text):
+ # type: (unicode) -> None
+ matched = report_re.search(text) # type: ignore
+ if not matched:
+ logger.warning(text.rstrip("\r\n"))
+ else:
+ location, type, level, message = matched.groups()
+ logger.log(type, message, location=location)
+
+
+class LoggingReporter(Reporter):
+ def __init__(self, source, report_level, halt_level,
+ debug=False, error_handler='backslashreplace'):
+ # type: (unicode, int, int, bool, unicode) -> None
+ stream = WarningStream()
+ Reporter.__init__(self, source, report_level, halt_level,
+ stream, debug, error_handler=error_handler)
+
+ def set_conditions(self, category, report_level, halt_level, debug=False):
+ # type: (unicode, int, int, bool) -> None
+ Reporter.set_conditions(self, category, report_level, halt_level, debug=debug)
+
+
+def is_html5_writer_available():
+ # type: () -> bool
+ return __version_info__ > (0, 13, 0)
diff --git a/sphinx/util/fileutil.py b/sphinx/util/fileutil.py
index aab919ef2..772e41331 100644
--- a/sphinx/util/fileutil.py
+++ b/sphinx/util/fileutil.py
@@ -16,8 +16,15 @@ import posixpath
from docutils.utils import relative_path
from sphinx.util.osutil import copyfile, ensuredir, walk
+if False:
+ # For type annotation
+ from typing import Callable, Dict, Union # NOQA
+ from sphinx.util.matching import Matcher # NOQA
+ from sphinx.util.template import BaseRenderer # NOQA
+
def copy_asset_file(source, destination, context=None, renderer=None):
+ # type: (unicode, unicode, Dict, BaseRenderer) -> None
"""Copy an asset file to destination.
On copying, it expands the template variables if context argument is given and
@@ -40,16 +47,17 @@ def copy_asset_file(source, destination, context=None, renderer=None):
from sphinx.util.template import SphinxRenderer
renderer = SphinxRenderer()
- with codecs.open(source, 'r', encoding='utf-8') as fsrc:
+ with codecs.open(source, 'r', encoding='utf-8') as fsrc: # type: ignore
if destination.lower().endswith('_t'):
destination = destination[:-2]
- with codecs.open(destination, 'w', encoding='utf-8') as fdst:
- fdst.write(renderer.render_string(fsrc.read(), context))
+ with codecs.open(destination, 'w', encoding='utf-8') as fdst: # type: ignore
+ fdst.write(renderer.render_string(fsrc.read(), context)) # type: ignore
else:
copyfile(source, destination)
def copy_asset(source, destination, excluded=lambda path: False, context=None, renderer=None):
+ # type: (unicode, unicode, Union[Callable[[unicode], bool], Matcher], Dict, BaseRenderer) -> None # NOQA
"""Copy asset files to destination recursively.
On copying, it expands the template variables if context argument is given and
diff --git a/sphinx/util/i18n.py b/sphinx/util/i18n.py
index 0e89c5a96..218fac163 100644
--- a/sphinx/util/i18n.py
+++ b/sphinx/util/i18n.py
@@ -12,7 +12,6 @@ import gettext
import io
import os
import re
-import warnings
from os import path
from datetime import datetime
from collections import namedtuple
@@ -22,10 +21,15 @@ from babel.messages.pofile import read_po
from babel.messages.mofile import write_mo
from sphinx.errors import SphinxError
-from sphinx.deprecation import RemovedInSphinx16Warning
-from sphinx.util.osutil import walk
-from sphinx.util import SEP
+from sphinx.util import logging
+from sphinx.util.osutil import SEP, walk
+logger = logging.getLogger(__name__)
+
+if False:
+ # For type annotation
+ from typing import Callable, List, Set # NOQA
+ from sphinx.environment import BuildEnvironment # NOQA
LocaleFileInfoBase = namedtuple('CatalogInfo', 'base_dir,domain,charset')
@@ -34,41 +38,48 @@ class CatalogInfo(LocaleFileInfoBase):
@property
def po_file(self):
+ # type: () -> unicode
return self.domain + '.po'
@property
def mo_file(self):
+ # type: () -> unicode
return self.domain + '.mo'
@property
def po_path(self):
+ # type: () -> unicode
return path.join(self.base_dir, self.po_file)
@property
def mo_path(self):
+ # type: () -> unicode
return path.join(self.base_dir, self.mo_file)
def is_outdated(self):
+ # type: () -> bool
return (
not path.exists(self.mo_path) or
path.getmtime(self.mo_path) < path.getmtime(self.po_path))
- def write_mo(self, locale, warnfunc):
+ def write_mo(self, locale):
+ # type: (unicode) -> None
with io.open(self.po_path, 'rt', encoding=self.charset) as file_po:
try:
po = read_po(file_po, locale)
except Exception:
- warnfunc('reading error: %s' % self.po_path)
+ logger.warning('reading error: %s', self.po_path)
return
with io.open(self.mo_path, 'wb') as file_mo:
try:
write_mo(file_mo, po)
except Exception:
- warnfunc('writing error: %s' % self.mo_path)
+ logger.warning('writing error: %s', self.mo_path)
def find_catalog(docname, compaction):
+ # type: (unicode, bool) -> unicode
if compaction:
ret = docname.split(SEP, 1)[0]
else:
@@ -78,18 +89,20 @@ def find_catalog(docname, compaction):
def find_catalog_files(docname, srcdir, locale_dirs, lang, compaction):
+ # type: (unicode, unicode, List[unicode], unicode, bool) -> List[unicode]
if not(lang and locale_dirs):
return []
domain = find_catalog(docname, compaction)
- files = [gettext.find(domain, path.join(srcdir, dir_), [lang])
+ files = [gettext.find(domain, path.join(srcdir, dir_), [lang]) # type: ignore
for dir_ in locale_dirs]
- files = [path.relpath(f, srcdir) for f in files if f]
- return files
+ files = [path.relpath(f, srcdir) for f in files if f] # type: ignore
+ return files # type: ignore
def find_catalog_source_files(locale_dirs, locale, domains=None, gettext_compact=False,
charset='utf-8', force_all=False):
+ # type: (List[unicode], unicode, List[unicode], bool, unicode, bool) -> Set[CatalogInfo]
"""
:param list locale_dirs:
list of path as `['locale_dir1', 'locale_dir2', ...]` to find
@@ -106,10 +119,11 @@ def find_catalog_source_files(locale_dirs, locale, domains=None, gettext_compact
default is False.
:return: [CatalogInfo(), ...]
"""
+ catalogs = set() # type: Set[CatalogInfo]
+
if not locale:
- return [] # locale is not specified
+ return catalogs # locale is not specified
- catalogs = set()
for locale_dir in locale_dirs:
if not locale_dir:
continue # skip system locale directory
@@ -167,7 +181,8 @@ date_format_mappings = {
}
-def babel_format_date(date, format, locale, warn=None, formatter=babel.dates.format_date):
+def babel_format_date(date, format, locale, formatter=babel.dates.format_date):
+ # type: (datetime, unicode, unicode, Callable) -> unicode
if locale is None:
locale = 'en'
@@ -182,17 +197,13 @@ def babel_format_date(date, format, locale, warn=None, formatter=babel.dates.for
# fallback to English
return formatter(date, format, locale='en')
except AttributeError:
- if warn:
- warn('Invalid date format. Quote the string by single quote '
- 'if you want to output it directly: %s' % format)
-
+ logger.warning('Invalid date format. Quote the string by single quote '
+ 'if you want to output it directly: %s', format)
return format
-def format_date(format, date=None, language=None, warn=None):
- if format is None:
- format = 'medium'
-
+def format_date(format, date=None, language=None):
+ # type: (str, datetime, unicode) -> unicode
if date is None:
# If time is not specified, try to use $SOURCE_DATE_EPOCH variable
# See https://wiki.debian.org/ReproducibleBuilds/TimestampsProposal
@@ -202,40 +213,32 @@ def format_date(format, date=None, language=None, warn=None):
else:
date = datetime.now()
- if re.match('EEE|MMM|dd|DDD|MM|WW|medium|YY', format):
- # consider the format as babel's
- warnings.warn('LDML format support will be dropped at Sphinx-1.6',
- RemovedInSphinx16Warning)
-
- return babel_format_date(date, format, locale=language, warn=warn,
- formatter=babel.dates.format_datetime)
- else:
- # consider the format as ustrftime's and try to convert it to babel's
- result = []
- tokens = re.split('(%.)', format)
- for token in tokens:
- if token in date_format_mappings:
- babel_format = date_format_mappings.get(token, '')
-
- # Check if we have to use a different babel formatter then
- # format_datetime, because we only want to format a date
- # or a time.
- if token == '%x':
- function = babel.dates.format_date
- elif token == '%X':
- function = babel.dates.format_time
- else:
- function = babel.dates.format_datetime
-
- result.append(babel_format_date(date, babel_format, locale=language,
- formatter=function))
+ result = []
+ tokens = re.split('(%.)', format)
+ for token in tokens:
+ if token in date_format_mappings:
+ babel_format = date_format_mappings.get(token, '')
+
+ # Check if we have to use a different babel formatter then
+ # format_datetime, because we only want to format a date
+ # or a time.
+ if token == '%x':
+ function = babel.dates.format_date
+ elif token == '%X':
+ function = babel.dates.format_time
else:
- result.append(token)
+ function = babel.dates.format_datetime
+
+ result.append(babel_format_date(date, babel_format, locale=language,
+ formatter=function))
+ else:
+ result.append(token)
- return "".join(result)
+ return "".join(result)
def get_image_filename_for_language(filename, env):
+ # type: (unicode, BuildEnvironment) -> unicode
if not env.config.language:
return filename
@@ -255,6 +258,7 @@ def get_image_filename_for_language(filename, env):
def search_image_for_language(filename, env):
+ # type: (unicode, BuildEnvironment) -> unicode
if not env.config.language:
return filename
diff --git a/sphinx/util/images.py b/sphinx/util/images.py
index c74951c6f..8de8254db 100644
--- a/sphinx/util/images.py
+++ b/sphinx/util/images.py
@@ -21,14 +21,19 @@ except ImportError:
except ImportError:
Image = None
+if False:
+ # For type annotation
+ from typing import Dict, List, Tuple # NOQA
+
mime_suffixes = {
'.pdf': 'application/pdf',
'.svg': 'image/svg+xml',
'.svgz': 'image/svg+xml',
-}
+} # type: Dict[unicode, unicode]
def get_image_size(filename):
+ # type: (unicode) -> Tuple[int, int]
try:
size = imagesize.get(filename)
if size[0] == -1:
@@ -48,6 +53,7 @@ def get_image_size(filename):
def guess_mimetype(filename):
+ # type: (unicode) -> unicode
_, ext = path.splitext(filename)
if ext in mime_suffixes:
return mime_suffixes[ext]
diff --git a/sphinx/util/inspect.py b/sphinx/util/inspect.py
index 861c6f1a2..43824865e 100644
--- a/sphinx/util/inspect.py
+++ b/sphinx/util/inspect.py
@@ -16,6 +16,10 @@ from six.moves import builtins
from sphinx.util import force_decode
+if False:
+ # For type annotation
+ from typing import Any, Callable, List, Tuple, Type # NOQA
+
# this imports the standard library inspect module without resorting to
# relatively import this module
inspect = __import__('inspect')
@@ -64,10 +68,11 @@ else: # 2.7
from functools import partial
def getargspec(func):
+ # type: (Any) -> Any
"""Like inspect.getargspec but supports functools.partial as well."""
if inspect.ismethod(func):
func = func.__func__
- parts = 0, ()
+ parts = 0, () # type: Tuple[int, Tuple[unicode, ...]]
if type(func) is partial:
keywords = func.keywords
if keywords is None:
@@ -101,6 +106,7 @@ except ImportError:
def isenumclass(x):
+ # type: (Type) -> bool
"""Check if the object is subclass of enum."""
if enum is None:
return False
@@ -108,6 +114,7 @@ def isenumclass(x):
def isenumattribute(x):
+ # type: (Any) -> bool
"""Check if the object is attribute of enum."""
if enum is None:
return False
@@ -115,6 +122,7 @@ def isenumattribute(x):
def isdescriptor(x):
+ # type: (Any) -> bool
"""Check if the object is some kind of descriptor."""
for item in '__get__', '__set__', '__delete__':
if hasattr(safe_getattr(x, item, None), '__call__'):
@@ -123,6 +131,7 @@ def isdescriptor(x):
def safe_getattr(obj, name, *defargs):
+ # type: (Any, unicode, unicode) -> object
"""A getattr() that turns all exceptions into AttributeErrors."""
try:
return getattr(obj, name, *defargs)
@@ -145,8 +154,9 @@ def safe_getattr(obj, name, *defargs):
def safe_getmembers(object, predicate=None, attr_getter=safe_getattr):
+ # type: (Any, Callable[[unicode], bool], Callable) -> List[Tuple[unicode, Any]]
"""A version of inspect.getmembers() that uses safe_getattr()."""
- results = []
+ results = [] # type: List[Tuple[unicode, Any]]
for key in dir(object):
try:
value = attr_getter(object, key, None)
@@ -159,13 +169,14 @@ def safe_getmembers(object, predicate=None, attr_getter=safe_getattr):
def object_description(object):
+ # type: (Any) -> unicode
"""A repr() implementation that returns text safe to use in reST context."""
try:
s = repr(object)
except Exception:
raise ValueError
if isinstance(s, binary_type):
- s = force_decode(s, None)
+ s = force_decode(s, None) # type: ignore
# Strip non-deterministic memory addresses such as
# ``<__main__.A at 0x7f68cb685710>``
s = memory_address_re.sub('', s)
@@ -173,6 +184,7 @@ def object_description(object):
def is_builtin_class_method(obj, attr_name):
+ # type: (Any, unicode) -> bool
"""If attr_name is implemented at builtin class, return True.
>>> is_builtin_class_method(int, '__init__')
@@ -184,6 +196,6 @@ def is_builtin_class_method(obj, attr_name):
classes = [c for c in inspect.getmro(obj) if attr_name in c.__dict__]
cls = classes[0] if classes else object
- if not hasattr(builtins, safe_getattr(cls, '__name__', '')):
+ if not hasattr(builtins, safe_getattr(cls, '__name__', '')): # type: ignore
return False
- return getattr(builtins, safe_getattr(cls, '__name__', '')) is cls
+ return getattr(builtins, safe_getattr(cls, '__name__', '')) is cls # type: ignore
diff --git a/sphinx/util/inventory.py b/sphinx/util/inventory.py
new file mode 100644
index 000000000..762d43c14
--- /dev/null
+++ b/sphinx/util/inventory.py
@@ -0,0 +1,164 @@
+# -*- coding: utf-8 -*-
+"""
+ sphinx.util.inventory
+ ~~~~~~~~~~~~~~~~~~~~~
+
+ Inventory utility functions for Sphinx.
+
+ :copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+import re
+import os
+import zlib
+import codecs
+
+from six import PY3
+
+from sphinx.util import logging
+
+if False:
+ # For type annotation
+ from typing import Callable, Dict, IO, Iterator, Tuple # NOQA
+ from sphinx.builders import Builder # NOQA
+ from sphinx.environment import BuildEnvironment # NOQA
+
+ if PY3:
+ unicode = str
+
+ Inventory = Dict[unicode, Dict[unicode, Tuple[unicode, unicode, unicode, unicode]]]
+
+
+BUFSIZE = 16 * 1024
+UTF8StreamReader = codecs.lookup('utf-8')[2]
+
+logger = logging.getLogger(__name__)
+
+
+class ZlibReader(object):
+ """Compressed file reader."""
+
+ def __init__(self, stream):
+ # type: (IO) -> None
+ self.stream = stream
+
+ def read_chunks(self):
+ # type: () -> Iterator[bytes]
+ decompressor = zlib.decompressobj()
+ for chunk in iter(lambda: self.stream.read(BUFSIZE), b''):
+ yield decompressor.decompress(chunk)
+ yield decompressor.flush()
+
+ def __iter__(self):
+ # type: () -> Iterator[unicode]
+ buf = b''
+ for chunk in self.read_chunks():
+ buf += chunk
+ pos = buf.find(b'\n')
+ while pos != -1:
+ yield buf[:pos].decode('utf-8')
+ buf = buf[pos + 1:]
+ pos = buf.find(b'\n')
+
+ assert not buf
+
+ def readlines(self):
+ # type: () -> Iterator[unicode]
+ return iter(self) # type: ignore
+
+
+class InventoryFile(object):
+ @classmethod
+ def load(cls, stream, uri, joinfunc):
+ # type: (IO, unicode, Callable) -> Inventory
+ line = stream.readline().rstrip().decode('utf-8')
+ if line == '# Sphinx inventory version 1':
+ return cls.load_v1(stream, uri, joinfunc)
+ elif line == '# Sphinx inventory version 2':
+ return cls.load_v2(stream, uri, joinfunc)
+ else:
+ raise ValueError('invalid inventory header: %s' % line)
+
+ @classmethod
+ def load_v1(cls, stream, uri, join):
+ # type: (IO, unicode, Callable) -> Inventory
+ stream = UTF8StreamReader(stream)
+ invdata = {} # type: Inventory
+ projname = stream.readline().rstrip()[11:]
+ version = stream.readline().rstrip()[11:]
+ for line in stream:
+ name, type, location = line.rstrip().split(None, 2)
+ location = join(uri, location)
+ # version 1 did not add anchors to the location
+ if type == 'mod':
+ type = 'py:module'
+ location += '#module-' + name
+ else:
+ type = 'py:' + type
+ location += '#' + name
+ invdata.setdefault(type, {})[name] = (projname, version, location, '-')
+ return invdata
+
+ @classmethod
+ def load_v2(cls, stream, uri, join):
+ # type: (IO, unicode, Callable) -> Inventory
+ invdata = {} # type: Inventory
+ projname = stream.readline().decode('utf-8').rstrip()[11:]
+ version = stream.readline().decode('utf-8').rstrip()[11:]
+ line = stream.readline().decode('utf-8')
+ if 'zlib' not in line:
+ raise ValueError('invalid inventory header (not compressed): %s' % line)
+
+ for line in ZlibReader(stream).readlines():
+ # be careful to handle names with embedded spaces correctly
+ m = re.match(r'(?x)(.+?)\s+(\S*:\S*)\s+(-?\d+)\s+(\S+)\s+(.*)',
+ line.rstrip())
+ if not m:
+ continue
+ name, type, prio, location, dispname = m.groups()
+ if type == 'py:module' and type in invdata and \
+ name in invdata[type]: # due to a bug in 1.1 and below,
+ # two inventory entries are created
+ # for Python modules, and the first
+ # one is correct
+ continue
+ if location.endswith(u'$'):
+ location = location[:-1] + name
+ location = join(uri, location)
+ invdata.setdefault(type, {})[name] = (projname, version,
+ location, dispname)
+ return invdata
+
+ @classmethod
+ def dump(cls, filename, env, builder):
+ # type: (unicode, BuildEnvironment, Builder) -> None
+ def escape(string):
+ # type: (unicode) -> unicode
+ return re.sub("\\s+", " ", string)
+
+ with open(os.path.join(filename), 'wb') as f:
+ # header
+ f.write((u'# Sphinx inventory version 2\n'
+ u'# Project: %s\n'
+ u'# Version: %s\n'
+ u'# The remainder of this file is compressed using zlib.\n' %
+ (escape(env.config.project),
+ escape(env.config.version))).encode('utf-8'))
+
+ # body
+ compressor = zlib.compressobj(9)
+ for domainname, domain in sorted(env.domains.items()):
+ for name, dispname, typ, docname, anchor, prio in \
+ sorted(domain.get_objects()):
+ if anchor.endswith(name):
+ # this can shorten the inventory by as much as 25%
+ anchor = anchor[:-len(name)] + '$'
+ uri = builder.get_target_uri(docname)
+ if anchor:
+ uri += '#' + anchor
+ if dispname == name:
+ dispname = u'-'
+ entry = (u'%s %s:%s %s %s %s\n' %
+ (name, domainname, typ, prio, uri, dispname))
+ f.write(compressor.compress(entry.encode('utf-8')))
+ f.write(compressor.flush())
diff --git a/sphinx/util/jsdump.py b/sphinx/util/jsdump.py
index 953e5e2f1..73aa2ce03 100644
--- a/sphinx/util/jsdump.py
+++ b/sphinx/util/jsdump.py
@@ -16,6 +16,10 @@ from six import iteritems, integer_types, string_types
from sphinx.util.pycompat import u
+if False:
+ # For type annotation
+ from typing import Any, Dict, IO, List, Match, Union # NOQA
+
_str_re = re.compile(r'"(\\\\|\\"|[^"])*"')
_int_re = re.compile(r'\d+')
_name_re = re.compile(r'[a-zA-Z_]\w*')
@@ -37,7 +41,9 @@ ESCAPED = re.compile(r'\\u.{4}|\\.')
def encode_string(s):
+ # type: (str) -> str
def replace(match):
+ # type: (Match) -> unicode
s = match.group(0)
try:
return ESCAPE_DICT[s]
@@ -51,10 +57,11 @@ def encode_string(s):
s1 = 0xd800 | ((n >> 10) & 0x3ff)
s2 = 0xdc00 | (n & 0x3ff)
return '\\u%04x\\u%04x' % (s1, s2)
- return '"' + str(ESCAPE_ASCII.sub(replace, s)) + '"'
+ return '"' + str(ESCAPE_ASCII.sub(replace, s)) + '"' # type: ignore
def decode_string(s):
+ # type: (str) -> str
return ESCAPED.sub(lambda m: eval(u + '"' + m.group() + '"'), s)
@@ -77,6 +84,7 @@ double in super""".split())
def dumps(obj, key=False):
+ # type: (Any, bool) -> str
if key:
if not isinstance(obj, string_types):
obj = str(obj)
@@ -88,7 +96,7 @@ def dumps(obj, key=False):
return 'null'
elif obj is True or obj is False:
return obj and 'true' or 'false'
- elif isinstance(obj, integer_types + (float,)):
+ elif isinstance(obj, integer_types + (float,)): # type: ignore
return str(obj)
elif isinstance(obj, dict):
return '{%s}' % ','.join(sorted('%s:%s' % (
@@ -100,21 +108,23 @@ def dumps(obj, key=False):
elif isinstance(obj, (tuple, list)):
return '[%s]' % ','.join(dumps(x) for x in obj)
elif isinstance(obj, string_types):
- return encode_string(obj)
+ return encode_string(obj) # type: ignore
raise TypeError(type(obj))
def dump(obj, f):
+ # type: (Any, IO) -> None
f.write(dumps(obj))
def loads(x):
+ # type: (str) -> Any
"""Loader that can read the JS subset the indexer produces."""
nothing = object()
i = 0
n = len(x)
- stack = []
- obj = nothing
+ stack = [] # type: List[Union[List, Dict]]
+ obj = nothing # type: Any
key = False
keys = []
while i < n:
@@ -164,6 +174,7 @@ def loads(x):
raise ValueError("multiple values")
key = False
else:
+ y = None # type: Any
m = _str_re.match(x, i)
if m:
y = decode_string(m.group()[1:-1])
@@ -200,4 +211,5 @@ def loads(x):
def load(f):
+ # type: (IO) -> Any
return loads(f.read())
diff --git a/sphinx/util/jsonimpl.py b/sphinx/util/jsonimpl.py
index 04a839390..09c04dc6a 100644
--- a/sphinx/util/jsonimpl.py
+++ b/sphinx/util/jsonimpl.py
@@ -14,28 +14,37 @@ import json
from six import text_type
from six.moves import UserString
+if False:
+ # For type annotation
+ from typing import Any, IO # NOQA
+
class SphinxJSONEncoder(json.JSONEncoder):
"""JSONEncoder subclass that forces translation proxies."""
def default(self, obj):
+ # type: (Any) -> unicode
if isinstance(obj, UserString):
return text_type(obj)
return json.JSONEncoder.default(self, obj)
def dump(obj, fp, *args, **kwds):
+ # type: (Any, IO, Any, Any) -> None
kwds['cls'] = SphinxJSONEncoder
- return json.dump(obj, fp, *args, **kwds)
+ json.dump(obj, fp, *args, **kwds)
def dumps(obj, *args, **kwds):
+ # type: (Any, Any, Any) -> unicode
kwds['cls'] = SphinxJSONEncoder
return json.dumps(obj, *args, **kwds)
def load(*args, **kwds):
+ # type: (Any, Any) -> Any
return json.load(*args, **kwds)
def loads(*args, **kwds):
+ # type: (Any, Any) -> Any
return json.loads(*args, **kwds)
diff --git a/sphinx/util/logging.py b/sphinx/util/logging.py
index 8b99525f4..0699503ea 100644
--- a/sphinx/util/logging.py
+++ b/sphinx/util/logging.py
@@ -8,9 +8,273 @@
:copyright: Copyright 2007-2017 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
+from __future__ import absolute_import
+
+import logging
+import logging.handlers
+from contextlib import contextmanager
+from collections import defaultdict
+
+from six import PY2, StringIO
+from docutils import nodes
+from docutils.utils import get_source_line
+
+from sphinx.errors import SphinxWarning
+from sphinx.util.console import colorize
+
+if False:
+ # For type annotation
+ from typing import Any, Dict, Generator, IO, List, Tuple, Union # NOQA
+ from docutils import nodes # NOQA
+ from sphinx.application import Sphinx # NOQA
+
+
+VERBOSE = 15
+
+LEVEL_NAMES = defaultdict(lambda: logging.WARNING) # type: Dict[str, int]
+LEVEL_NAMES.update({
+ 'CRITICAL': logging.CRITICAL,
+ 'SEVERE': logging.CRITICAL,
+ 'ERROR': logging.ERROR,
+ 'WARNING': logging.WARNING,
+ 'INFO': logging.INFO,
+ 'VERBOSE': VERBOSE,
+ 'DEBUG': logging.DEBUG,
+})
+
+VERBOSITY_MAP = defaultdict(lambda: 0) # type: Dict[int, int]
+VERBOSITY_MAP.update({
+ 0: logging.INFO,
+ 1: VERBOSE,
+ 2: logging.DEBUG,
+})
+
+COLOR_MAP = defaultdict(lambda: 'blue') # type: Dict[int, unicode]
+COLOR_MAP.update({
+ logging.WARNING: 'darkred',
+ logging.DEBUG: 'darkgray',
+})
+
+
+def getLogger(name):
+ # type: (str) -> SphinxLoggerAdapter
+ """Get logger wrapped by SphinxLoggerAdapter."""
+ return SphinxLoggerAdapter(logging.getLogger(name), {})
+
+
+def convert_serializable(records):
+ # type: (List[logging.LogRecord]) -> None
+ """Convert LogRecord serializable."""
+ for r in records:
+ # extract arguments to a message and clear them
+ r.msg = r.getMessage()
+ r.args = ()
+
+
+class SphinxWarningLogRecord(logging.LogRecord):
+ """Log record class supporting location"""
+ location = None # type: Any
+
+ def getMessage(self):
+ # type: () -> str
+ message = super(SphinxWarningLogRecord, self).getMessage()
+ location = getattr(self, 'location', None)
+ if location:
+ message = '%s: WARNING: %s' % (location, message)
+ elif 'WARNING:' not in message:
+ message = 'WARNING: %s' % message
+
+ return message
+
+
+class SphinxLoggerAdapter(logging.LoggerAdapter):
+ """LoggerAdapter allowing ``type`` and ``subtype`` keywords."""
+
+ def log(self, level, msg, *args, **kwargs): # type: ignore
+ # type: (Union[int, str], unicode, Any, Any) -> None
+ if isinstance(level, int):
+ super(SphinxLoggerAdapter, self).log(level, msg, *args, **kwargs)
+ else:
+ levelno = LEVEL_NAMES[level]
+ super(SphinxLoggerAdapter, self).log(levelno, msg, *args, **kwargs)
+
+ def verbose(self, msg, *args, **kwargs):
+ # type: (unicode, Any, Any) -> None
+ self.log(VERBOSE, msg, *args, **kwargs)
+
+ def process(self, msg, kwargs): # type: ignore
+ # type: (unicode, Dict) -> Tuple[unicode, Dict]
+ extra = kwargs.setdefault('extra', {})
+ if 'type' in kwargs:
+ extra['type'] = kwargs.pop('type')
+ if 'subtype' in kwargs:
+ extra['subtype'] = kwargs.pop('subtype')
+ if 'location' in kwargs:
+ extra['location'] = kwargs.pop('location')
+ if 'nonl' in kwargs:
+ extra['nonl'] = kwargs.pop('nonl')
+ if 'color' in kwargs:
+ extra['color'] = kwargs.pop('color')
+
+ return msg, kwargs
+
+ def handle(self, record):
+ # type: (logging.LogRecord) -> None
+ self.logger.handle(record) # type: ignore
+
+
+class WarningStreamHandler(logging.StreamHandler):
+ """StreamHandler for warnings."""
+ pass
+
+
+class NewLineStreamHandlerPY2(logging.StreamHandler):
+ """StreamHandler which switches line terminator by record.nonl flag."""
+
+ def emit(self, record):
+ # type: (logging.LogRecord) -> None
+ try:
+ self.acquire()
+ stream = self.stream # type: ignore
+ if getattr(record, 'nonl', False):
+ # remove return code forcely when nonl=True
+ self.stream = StringIO()
+ super(NewLineStreamHandlerPY2, self).emit(record)
+ stream.write(self.stream.getvalue()[:-1])
+ stream.flush()
+ else:
+ super(NewLineStreamHandlerPY2, self).emit(record)
+ finally:
+ self.stream = stream
+ self.release()
+
+
+class NewLineStreamHandlerPY3(logging.StreamHandler):
+ """StreamHandler which switches line terminator by record.nonl flag."""
+
+ def emit(self, record):
+ # type: (logging.LogRecord) -> None
+ try:
+ self.acquire()
+ if getattr(record, 'nonl', False):
+ # skip appending terminator when nonl=True
+ self.terminator = ''
+ super(NewLineStreamHandlerPY3, self).emit(record)
+ finally:
+ self.terminator = '\n'
+ self.release()
+
+
+if PY2:
+ NewLineStreamHandler = NewLineStreamHandlerPY2
+else:
+ NewLineStreamHandler = NewLineStreamHandlerPY3
+
+
+class MemoryHandler(logging.handlers.BufferingHandler):
+ """Handler buffering all logs."""
+
+ def __init__(self):
+ # type: () -> None
+ super(MemoryHandler, self).__init__(-1)
+
+ def shouldFlush(self, record):
+ # type: (logging.LogRecord) -> bool
+ return False # never flush
+
+ def flushTo(self, logger):
+ # type: (logging.Logger) -> None
+ self.acquire()
+ try:
+ for record in self.buffer:
+ logger.handle(record)
+ self.buffer = [] # type: List[logging.LogRecord]
+ finally:
+ self.release()
+
+ def clear(self):
+ # type: () -> List[logging.LogRecord]
+ buffer, self.buffer = self.buffer, []
+ return buffer
+
+
+@contextmanager
+def pending_warnings():
+ # type: () -> Generator
+ """contextmanager to pend logging warnings temporary."""
+ logger = logging.getLogger()
+ memhandler = MemoryHandler()
+ memhandler.setLevel(logging.WARNING)
+
+ try:
+ handlers = []
+ for handler in logger.handlers[:]:
+ if isinstance(handler, WarningStreamHandler):
+ logger.removeHandler(handler)
+ handlers.append(handler)
+
+ logger.addHandler(memhandler)
+ yield memhandler
+ finally:
+ logger.removeHandler(memhandler)
+
+ for handler in handlers:
+ logger.addHandler(handler)
+
+ memhandler.flushTo(logger)
+
+
+@contextmanager
+def pending_logging():
+ # type: () -> Generator
+ """contextmanager to pend logging all logs temporary."""
+ logger = logging.getLogger()
+ memhandler = MemoryHandler()
+
+ try:
+ handlers = []
+ for handler in logger.handlers[:]:
+ logger.removeHandler(handler)
+ handlers.append(handler)
+
+ logger.addHandler(memhandler)
+ yield memhandler
+ finally:
+ logger.removeHandler(memhandler)
+
+ for handler in handlers:
+ logger.addHandler(handler)
+
+ memhandler.flushTo(logger)
+
+
+class LogCollector(object):
+ def __init__(self):
+ # type: () -> None
+ self.logs = [] # type: List[logging.LogRecord]
+
+ @contextmanager
+ def collect(self):
+ # type: () -> Generator
+ with pending_logging() as memhandler:
+ yield
+
+ self.logs = memhandler.clear()
+
+
+class InfoFilter(logging.Filter):
+ """Filter error and warning messages."""
+
+ def filter(self, record):
+ # type: (logging.LogRecord) -> bool
+ if record.levelno < logging.WARNING:
+ return True
+ else:
+ return False
def is_suppressed_warning(type, subtype, suppress_warnings):
+ # type: (unicode, unicode, List[unicode]) -> bool
"""Check the warning is suppressed or not."""
if type is None:
return False
@@ -27,3 +291,141 @@ def is_suppressed_warning(type, subtype, suppress_warnings):
return True
return False
+
+
+class WarningSuppressor(logging.Filter):
+ """Filter logs by `suppress_warnings`."""
+
+ def __init__(self, app):
+ # type: (Sphinx) -> None
+ self.app = app
+ super(WarningSuppressor, self).__init__()
+
+ def filter(self, record):
+ # type: (logging.LogRecord) -> bool
+ type = getattr(record, 'type', None)
+ subtype = getattr(record, 'subtype', None)
+
+ if is_suppressed_warning(type, subtype, self.app.config.suppress_warnings):
+ return False
+ else:
+ self.app._warncount += 1
+ return True
+
+
+class WarningIsErrorFilter(logging.Filter):
+ """Raise exception if warning emitted."""
+
+ def __init__(self, app):
+ # type: (Sphinx) -> None
+ self.app = app
+ super(WarningIsErrorFilter, self).__init__()
+
+ def filter(self, record):
+ # type: (logging.LogRecord) -> bool
+ if self.app.warningiserror:
+ raise SphinxWarning(record.msg % record.args)
+ else:
+ return True
+
+
+class WarningLogRecordTranslator(logging.Filter):
+ """Converts a log record to one Sphinx expects
+
+ * Make a instance of SphinxWarningLogRecord
+ * docname to path if location given
+ """
+ def __init__(self, app):
+ # type: (Sphinx) -> None
+ self.app = app
+ super(WarningLogRecordTranslator, self).__init__()
+
+ def filter(self, record): # type: ignore
+ # type: (SphinxWarningLogRecord) -> bool
+ if isinstance(record, logging.LogRecord):
+ record.__class__ = SphinxWarningLogRecord # force subclassing to handle location
+
+ location = getattr(record, 'location', None)
+ if isinstance(location, tuple):
+ docname, lineno = location
+ if docname and lineno:
+ record.location = '%s:%s' % (self.app.env.doc2path(docname), lineno)
+ elif docname:
+ record.location = '%s' % self.app.env.doc2path(docname)
+ else:
+ record.location = None
+ elif isinstance(location, nodes.Node):
+ (source, line) = get_source_line(location)
+ if source and line:
+ record.location = "%s:%s" % (source, line)
+ elif source:
+ record.location = "%s:" % source
+ elif line:
+ record.location = "<unknown>:%s" % line
+ else:
+ record.location = None
+ elif location and ':' not in location:
+ record.location = '%s' % self.app.env.doc2path(location)
+
+ return True
+
+
+class ColorizeFormatter(logging.Formatter):
+ def format(self, record):
+ # type: (logging.LogRecord) -> str
+ message = super(ColorizeFormatter, self).format(record)
+ color = getattr(record, 'color', None)
+ if color is None:
+ color = COLOR_MAP.get(record.levelno)
+
+ if color:
+ return colorize(color, message) # type: ignore
+ else:
+ return message
+
+
+class SafeEncodingWriter(object):
+ """Stream writer which ignores UnicodeEncodeError silently"""
+ def __init__(self, stream):
+ # type: (IO) -> None
+ self.stream = stream
+ self.encoding = getattr(stream, 'encoding', 'ascii') or 'ascii'
+
+ def write(self, data):
+ # type: (unicode) -> None
+ try:
+ self.stream.write(data)
+ except UnicodeEncodeError:
+ # stream accept only str, not bytes. So, we encode and replace
+ # non-encodable characters, then decode them.
+ self.stream.write(data.encode(self.encoding, 'replace').decode(self.encoding))
+
+ def flush(self):
+ # type: () -> None
+ if hasattr(self.stream, 'flush'):
+ self.stream.flush()
+
+
+def setup(app, status, warning):
+ # type: (Sphinx, IO, IO) -> None
+ """Setup root logger for Sphinx"""
+ logger = logging.getLogger()
+ logger.setLevel(logging.NOTSET)
+
+ # clear all handlers
+ for handler in logger.handlers[:]:
+ logger.removeHandler(handler)
+
+ info_handler = NewLineStreamHandler(SafeEncodingWriter(status)) # type: ignore
+ info_handler.addFilter(InfoFilter())
+ info_handler.setLevel(VERBOSITY_MAP[app.verbosity])
+ info_handler.setFormatter(ColorizeFormatter())
+
+ warning_handler = WarningStreamHandler(SafeEncodingWriter(warning)) # type: ignore
+ warning_handler.addFilter(WarningSuppressor(app))
+ warning_handler.addFilter(WarningIsErrorFilter(app))
+ warning_handler.addFilter(WarningLogRecordTranslator(app))
+ warning_handler.setLevel(logging.WARNING)
+ warning_handler.setFormatter(ColorizeFormatter())
+ logger.addHandler(info_handler)
+ logger.addHandler(warning_handler)
diff --git a/sphinx/util/matching.py b/sphinx/util/matching.py
index 643632c4e..3990e0e4b 100644
--- a/sphinx/util/matching.py
+++ b/sphinx/util/matching.py
@@ -11,15 +11,20 @@
import re
+if False:
+ # For type annotation
+ from typing import Callable, Dict, List, Match, Pattern # NOQA
+
def _translate_pattern(pat):
+ # type: (unicode) -> unicode
"""Translate a shell-style glob pattern to a regular expression.
Adapted from the fnmatch module, but enhanced so that single stars don't
match slashes.
"""
i, n = 0, len(pat)
- res = ''
+ res = '' # type: unicode
while i < n:
c = pat[i]
i += 1
@@ -59,6 +64,7 @@ def _translate_pattern(pat):
def compile_matchers(patterns):
+ # type: (List[unicode]) -> List[Callable[[unicode], Match[unicode]]]
return [re.compile(_translate_pattern(pat)).match for pat in patterns]
@@ -70,23 +76,27 @@ class Matcher(object):
"""
def __init__(self, patterns):
+ # type: (List[unicode]) -> None
expanded = [pat[3:] for pat in patterns if pat.startswith('**/')]
self.patterns = compile_matchers(patterns + expanded)
def __call__(self, string):
+ # type: (unicode) -> bool
return self.match(string)
def match(self, string):
+ # type: (unicode) -> bool
return any(pat(string) for pat in self.patterns)
DOTFILES = Matcher(['**/.*'])
-_pat_cache = {}
+_pat_cache = {} # type: Dict[unicode, Pattern]
def patmatch(name, pat):
+ # type: (unicode, unicode) -> re.Match
"""Return if name matches pat. Adapted from fnmatch module."""
if pat not in _pat_cache:
_pat_cache[pat] = re.compile(_translate_pattern(pat))
@@ -94,6 +104,7 @@ def patmatch(name, pat):
def patfilter(names, pat):
+ # type: (List[unicode], unicode) -> List[unicode]
"""Return the subset of the list NAMES that match PAT.
Adapted from fnmatch module.
diff --git a/sphinx/util/nodes.py b/sphinx/util/nodes.py
index f87a7afd0..5c444fb56 100644
--- a/sphinx/util/nodes.py
+++ b/sphinx/util/nodes.py
@@ -11,21 +11,35 @@
from __future__ import absolute_import
import re
+import warnings
from six import text_type
+
from docutils import nodes
from sphinx import addnodes
+from sphinx.deprecation import RemovedInSphinx17Warning
from sphinx.locale import pairindextypes
+from sphinx.util import logging
+
+if False:
+ # For type annotation
+ from typing import Any, Callable, Iterable, List, Set, Tuple, Union # NOQA
+ from sphinx.builders import Builder # NOQA
+ from sphinx.utils.tags import Tags # NOQA
+
+logger = logging.getLogger(__name__)
class WarningStream(object):
def __init__(self, warnfunc):
+ # type: (Callable) -> None
self.warnfunc = warnfunc
self._re = re.compile(r'\((DEBUG|INFO|WARNING|ERROR|SEVERE)/[0-4]\)')
def write(self, text):
+ # type: (str) -> None
text = text.strip()
if text:
self.warnfunc(self._re.sub(r'\1:', text), None, '')
@@ -37,6 +51,7 @@ caption_ref_re = explicit_title_re # b/w compat alias
def apply_source_workaround(node):
+ # type: (nodes.Node) -> None
# workaround: nodes.term have wrong rawsource if classifier is specified.
# The behavior of docutils-0.11, 0.12 is:
# * when ``term text : classifier1 : classifier2`` is specified,
@@ -52,8 +67,8 @@ def apply_source_workaround(node):
if isinstance(node, nodes.term):
# strip classifier from rawsource of term
for classifier in reversed(node.parent.traverse(nodes.classifier)):
- node.rawsource = re.sub(
- '\s*:\s*%s' % re.escape(classifier.astext()), '', node.rawsource)
+ node.rawsource = re.sub(r'\s*:\s*%s' % re.escape(classifier.astext()),
+ '', node.rawsource)
# workaround: recommonmark-0.2.0 doesn't set rawsource attribute
if not node.rawsource:
@@ -85,6 +100,7 @@ IGNORED_NODES = (
def is_pending_meta(node):
+ # type: (nodes.Node) -> bool
if (isinstance(node, nodes.pending) and
isinstance(node.details.get('nodes', [None])[0], addnodes.meta)):
return True
@@ -93,6 +109,7 @@ def is_pending_meta(node):
def is_translatable(node):
+ # type: (nodes.Node) -> bool
if isinstance(node, addnodes.translatable):
return True
@@ -135,6 +152,7 @@ META_TYPE_NODES = (
def extract_messages(doctree):
+ # type: (nodes.Node) -> Iterable[Tuple[nodes.Node, unicode]]
"""Extract translatable messages from a document tree."""
for node in doctree.traverse(is_translatable):
if isinstance(node, addnodes.translatable):
@@ -162,12 +180,15 @@ def extract_messages(doctree):
def find_source_node(node):
+ # type: (nodes.Node) -> unicode
for pnode in traverse_parent(node):
if pnode.source:
return pnode.source
+ return None
def traverse_parent(node, cls=None):
+ # type: (nodes.Node, Any) -> Iterable[nodes.Node]
while node:
if cls is None or isinstance(node, cls):
yield node
@@ -175,8 +196,10 @@ def traverse_parent(node, cls=None):
def traverse_translatable_index(doctree):
+ # type: (nodes.Node) -> Iterable[Tuple[nodes.Node, List[unicode]]]
"""Traverse translatable index node from a document tree."""
def is_block_index(node):
+ # type: (nodes.Node) -> bool
return isinstance(node, addnodes.index) and \
node.get('inline') is False
for node in doctree.traverse(is_block_index):
@@ -188,6 +211,7 @@ def traverse_translatable_index(doctree):
def nested_parse_with_titles(state, content, node):
+ # type: (Any, List[unicode], nodes.Node) -> unicode
"""Version of state.nested_parse() that allows titles and does not require
titles to have the same decoration as the calling document.
@@ -207,6 +231,7 @@ def nested_parse_with_titles(state, content, node):
def clean_astext(node):
+ # type: (nodes.Node) -> unicode
"""Like node.astext(), but ignore images."""
node = node.deepcopy()
for img in node.traverse(nodes.image):
@@ -217,8 +242,9 @@ def clean_astext(node):
def split_explicit_title(text):
+ # type: (unicode) -> Tuple[bool, unicode, unicode]
"""Split role content into title and target, if given."""
- match = explicit_title_re.match(text)
+ match = explicit_title_re.match(text) # type: ignore
if match:
return True, match.group(1), match.group(2)
return False, text, text
@@ -230,7 +256,8 @@ indextypes = [
def process_index_entry(entry, targetid):
- indexentries = []
+ # type: (unicode, unicode) -> List[Tuple[unicode, unicode, unicode, unicode, unicode]]
+ indexentries = [] # type: List[Tuple[unicode, unicode, unicode, unicode, unicode]]
entry = entry.strip()
oentry = entry
main = ''
@@ -266,6 +293,7 @@ def process_index_entry(entry, targetid):
def inline_all_toctrees(builder, docnameset, docname, tree, colorfunc, traversed):
+ # type: (Builder, Set[unicode], unicode, nodes.Node, Callable, nodes.Node) -> nodes.Node
"""Inline all toctrees in the *tree*.
Record all docnames in *docnameset*, and output docnames with *colorfunc*.
@@ -278,15 +306,14 @@ def inline_all_toctrees(builder, docnameset, docname, tree, colorfunc, traversed
if includefile not in traversed:
try:
traversed.append(includefile)
- builder.info(colorfunc(includefile) + " ", nonl=1)
+ logger.info(colorfunc(includefile) + " ", nonl=1)
subtree = inline_all_toctrees(builder, docnameset, includefile,
builder.env.get_doctree(includefile),
colorfunc, traversed)
docnameset.add(includefile)
except Exception:
- builder.warn('toctree contains ref to nonexisting '
- 'file %r' % includefile,
- builder.env.doc2path(docname))
+ logger.warning('toctree contains ref to nonexisting file %r',
+ includefile, location=docname)
else:
sof = addnodes.start_of_file(docname=includefile)
sof.children = subtree.children
@@ -299,13 +326,17 @@ def inline_all_toctrees(builder, docnameset, docname, tree, colorfunc, traversed
def make_refnode(builder, fromdocname, todocname, targetid, child, title=None):
+ # type: (Builder, unicode, unicode, unicode, nodes.Node, unicode) -> nodes.reference
"""Shortcut to create a reference node."""
node = nodes.reference('', '', internal=True)
- if fromdocname == todocname:
+ if fromdocname == todocname and targetid:
node['refid'] = targetid
else:
- node['refuri'] = (builder.get_relative_uri(fromdocname, todocname) +
- '#' + targetid)
+ if targetid:
+ node['refuri'] = (builder.get_relative_uri(fromdocname, todocname) +
+ '#' + targetid)
+ else:
+ node['refuri'] = builder.get_relative_uri(fromdocname, todocname)
if title:
node['reftitle'] = title
node.append(child)
@@ -313,27 +344,32 @@ def make_refnode(builder, fromdocname, todocname, targetid, child, title=None):
def set_source_info(directive, node):
+ # type: (Any, nodes.Node) -> None
node.source, node.line = \
directive.state_machine.get_source_and_line(directive.lineno)
def set_role_source_info(inliner, lineno, node):
+ # type: (Any, unicode, nodes.Node) -> None
node.source, node.line = inliner.reporter.get_source_and_line(lineno)
-def process_only_nodes(doctree, tags, warn_node=None):
+def process_only_nodes(doctree, tags):
+ # type: (nodes.Node, Tags) -> None
# A comment on the comment() nodes being inserted: replacing by [] would
# result in a "Losing ids" exception if there is a target node before
# the only node, so we make sure docutils can transfer the id to
# something, even if it's just a comment and will lose the id anyway...
+ warnings.warn('process_only_nodes() is deprecated. '
+ 'Use sphinx.environment.apply_post_transforms() instead.',
+ RemovedInSphinx17Warning)
+
for node in doctree.traverse(addnodes.only):
try:
ret = tags.eval_condition(node['expr'])
except Exception as err:
- if warn_node is None:
- raise err
- warn_node('exception while evaluating only '
- 'directive expression: %s' % err, node)
+ logger.warning('exception while evaluating only directive expression: %s', err,
+ location=node)
node.replace_self(node.children or nodes.comment())
else:
if ret:
@@ -345,6 +381,7 @@ def process_only_nodes(doctree, tags, warn_node=None):
# monkey-patch Element.copy to copy the rawsource and line
def _new_copy(self):
+ # type: (nodes.Node) -> nodes.Node
newnode = self.__class__(self.rawsource, **self.attributes)
if isinstance(self, nodes.Element):
newnode.source = self.source
diff --git a/sphinx/util/osutil.py b/sphinx/util/osutil.py
index 6deb5c5e9..a8bff11c4 100644
--- a/sphinx/util/osutil.py
+++ b/sphinx/util/osutil.py
@@ -21,8 +21,11 @@ import filecmp
from os import path
import contextlib
from io import BytesIO, StringIO
+from six import PY2, PY3, text_type
-from six import PY2, text_type
+if False:
+ # For type annotation
+ from typing import Any, Iterator, List, Tuple, Union # NOQA
# Errnos that we need.
EEXIST = getattr(errno, 'EEXIST', 0)
@@ -30,6 +33,9 @@ ENOENT = getattr(errno, 'ENOENT', 0)
EPIPE = getattr(errno, 'EPIPE', 0)
EINVAL = getattr(errno, 'EINVAL', 0)
+if PY3:
+ unicode = str # special alias for static typing...
+
# SEP separates path elements in the canonical file names
#
# Define SEP as a manifest constant, not so much because we expect it to change
@@ -39,15 +45,18 @@ SEP = "/"
def os_path(canonicalpath):
+ # type: (unicode) -> unicode
return canonicalpath.replace(SEP, path.sep)
def canon_path(nativepath):
+ # type: (unicode) -> unicode
"""Return path in OS-independent form"""
return nativepath.replace(path.sep, SEP)
def relative_uri(base, to):
+ # type: (unicode, unicode) -> unicode
"""Return a relative URL from ``base`` to ``to``."""
if to.startswith(SEP):
return to
@@ -71,6 +80,7 @@ def relative_uri(base, to):
def ensuredir(path):
+ # type: (unicode) -> None
"""Ensure that a path exists."""
try:
os.makedirs(path)
@@ -84,6 +94,7 @@ def ensuredir(path):
# that check UnicodeError.
# The customization obstacle to replace the function with the os.walk.
def walk(top, topdown=True, followlinks=False):
+ # type: (unicode, bool, bool) -> Iterator[Tuple[unicode, List[unicode], List[unicode]]]
"""Backport of os.walk from 2.6, where the *followlinks* argument was
added.
"""
@@ -115,6 +126,7 @@ def walk(top, topdown=True, followlinks=False):
def mtimes_of_files(dirnames, suffix):
+ # type: (List[unicode], unicode) -> Iterator[float]
for dirname in dirnames:
for root, dirs, files in os.walk(dirname):
for sfile in files:
@@ -126,6 +138,7 @@ def mtimes_of_files(dirnames, suffix):
def movefile(source, dest):
+ # type: (unicode, unicode) -> None
"""Move a file, removing the destination if it exists."""
if os.path.exists(dest):
try:
@@ -136,6 +149,7 @@ def movefile(source, dest):
def copytimes(source, dest):
+ # type: (unicode, unicode) -> None
"""Copy a file's modification times."""
st = os.stat(source)
if hasattr(os, 'utime'):
@@ -143,6 +157,7 @@ def copytimes(source, dest):
def copyfile(source, dest):
+ # type: (unicode, unicode) -> None
"""Copy a file and its modification times, if possible.
Note: ``copyfile`` skips copying if the file has not been changed"""
@@ -159,10 +174,12 @@ no_fn_re = re.compile(r'[^a-zA-Z0-9_-]')
def make_filename(string):
+ # type: (str) -> unicode
return no_fn_re.sub('', string) or 'sphinx'
def ustrftime(format, *args):
+ # type: (unicode, Any) -> unicode
# [DEPRECATED] strftime for unicode strings
# It will be removed at Sphinx-1.5
if not args:
@@ -171,7 +188,7 @@ def ustrftime(format, *args):
source_date_epoch = os.getenv('SOURCE_DATE_EPOCH')
if source_date_epoch is not None:
time_struct = time.gmtime(float(source_date_epoch))
- args = [time_struct]
+ args = [time_struct] # type: ignore
if PY2:
# if a locale is set, the time strings are encoded in the encoding
# given by LC_TIME; if that is available, use it
@@ -188,16 +205,18 @@ def ustrftime(format, *args):
def safe_relpath(path, start=None):
+ # type: (unicode, unicode) -> unicode
try:
return os.path.relpath(path, start)
except ValueError:
return path
-fs_encoding = sys.getfilesystemencoding() or sys.getdefaultencoding()
+fs_encoding = sys.getfilesystemencoding() or sys.getdefaultencoding() # type: unicode
def abspath(pathdir):
+ # type: (unicode) -> unicode
pathdir = path.abspath(pathdir)
if isinstance(pathdir, bytes):
pathdir = pathdir.decode(fs_encoding)
@@ -205,6 +224,7 @@ def abspath(pathdir):
def getcwd():
+ # type: () -> unicode
if hasattr(os, 'getcwdu'):
return os.getcwdu()
return os.getcwd()
@@ -212,6 +232,7 @@ def getcwd():
@contextlib.contextmanager
def cd(target_dir):
+ # type: (unicode) -> Iterator[None]
cwd = getcwd()
try:
os.chdir(target_dir)
@@ -233,19 +254,22 @@ class FileAvoidWrite(object):
Objects can be used as context managers.
"""
def __init__(self, path):
+ # type: (unicode) -> None
self._path = path
- self._io = None
+ self._io = None # type: Union[StringIO, BytesIO]
def write(self, data):
+ # type: (Union[str, unicode]) -> None
if not self._io:
if isinstance(data, text_type):
self._io = StringIO()
else:
self._io = BytesIO()
- self._io.write(data)
+ self._io.write(data) # type: ignore
def close(self):
+ # type: () -> None
"""Stop accepting writes and write file, if needed."""
if not self._io:
raise Exception('FileAvoidWrite does not support empty files.')
@@ -273,12 +297,15 @@ class FileAvoidWrite(object):
f.write(buf)
def __enter__(self):
+ # type: () -> FileAvoidWrite
return self
def __exit__(self, type, value, traceback):
+ # type: (unicode, unicode, unicode) -> None
self.close()
def __getattr__(self, name):
+ # type: (str) -> Any
# Proxy to _io instance.
if not self._io:
raise Exception('Must write to FileAvoidWrite before other '
@@ -288,6 +315,7 @@ class FileAvoidWrite(object):
def rmtree(path):
+ # type: (unicode) -> None
if os.path.isdir(path):
shutil.rmtree(path)
else:
diff --git a/sphinx/util/parallel.py b/sphinx/util/parallel.py
index 109448e05..bcc6117f6 100644
--- a/sphinx/util/parallel.py
+++ b/sphinx/util/parallel.py
@@ -13,15 +13,22 @@ import os
import time
import traceback
from math import sqrt
+from six import iteritems
try:
import multiprocessing
except ImportError:
multiprocessing = None
-from six import iteritems
-
from sphinx.errors import SphinxParallelError
+from sphinx.util import logging
+
+if False:
+ # For type annotation
+ from typing import Any, Callable, Dict, List, Sequence # NOQA
+
+logger = logging.getLogger(__name__)
+
# our parallel functionality only works for the forking Process
parallel_available = multiprocessing and (os.name == 'posix')
@@ -31,9 +38,11 @@ class SerialTasks(object):
"""Has the same interface as ParallelTasks, but executes tasks directly."""
def __init__(self, nproc=1):
+ # type: (int) -> None
pass
def add_task(self, task_func, arg=None, result_func=None):
+ # type: (Callable, Any, Callable) -> None
if arg is not None:
res = task_func(arg)
else:
@@ -42,6 +51,7 @@ class SerialTasks(object):
result_func(res)
def join(self):
+ # type: () -> None
pass
@@ -49,37 +59,45 @@ class ParallelTasks(object):
"""Executes *nproc* tasks in parallel after forking."""
def __init__(self, nproc):
+ # type: (int) -> None
self.nproc = nproc
# (optional) function performed by each task on the result of main task
- self._result_funcs = {}
+ self._result_funcs = {} # type: Dict[int, Callable]
# task arguments
- self._args = {}
+ self._args = {} # type: Dict[int, List[Any]]
# list of subprocesses (both started and waiting)
- self._procs = {}
+ self._procs = {} # type: Dict[int, multiprocessing.Process]
# list of receiving pipe connections of running subprocesses
- self._precvs = {}
+ self._precvs = {} # type: Dict[int, Any]
# list of receiving pipe connections of waiting subprocesses
- self._precvsWaiting = {}
+ self._precvsWaiting = {} # type: Dict[int, Any]
# number of working subprocesses
self._pworking = 0
# task number of each subprocess
self._taskid = 0
def _process(self, pipe, func, arg):
+ # type: (Any, Callable, Any) -> None
try:
- if arg is None:
- ret = func()
- else:
- ret = func(arg)
- pipe.send((False, ret))
+ collector = logging.LogCollector()
+ with collector.collect():
+ if arg is None:
+ ret = func()
+ else:
+ ret = func(arg)
+ failed = False
except BaseException as err:
- errmsg = traceback.format_exception_only(err.__class__, err)[0].strip()
- pipe.send((True, (errmsg, traceback.format_exc())))
+ failed = True
+ errmsg = traceback.format_exception_only(err.__class__, err)[0].strip() # type: ignore # NOQA
+ ret = (errmsg, traceback.format_exc())
+ logging.convert_serializable(collector.logs)
+ pipe.send((failed, collector.logs, ret))
def add_task(self, task_func, arg=None, result_func=None):
+ # type: (Callable, Any, Callable) -> None
tid = self._taskid
self._taskid += 1
- self._result_funcs[tid] = result_func or (lambda arg: None)
+ self._result_funcs[tid] = result_func or (lambda arg, result: None)
self._args[tid] = arg
precv, psend = multiprocessing.Pipe(False)
proc = multiprocessing.Process(target=self._process,
@@ -89,15 +107,19 @@ class ParallelTasks(object):
self._join_one()
def join(self):
+ # type: () -> None
while self._pworking:
self._join_one()
def _join_one(self):
+ # type: () -> None
for tid, pipe in iteritems(self._precvs):
if pipe.poll():
- exc, result = pipe.recv()
+ exc, logs, result = pipe.recv()
if exc:
raise SphinxParallelError(*result)
+ for log in logs:
+ logger.handle(log)
self._result_funcs.pop(tid)(self._args.pop(tid), result)
self._procs[tid].join()
self._pworking -= 1
@@ -112,6 +134,7 @@ class ParallelTasks(object):
def make_chunks(arguments, nproc, maxbatch=10):
+ # type: (Sequence[unicode], int, int) -> List[Any]
# determine how many documents to read in one go
nargs = len(arguments)
chunksize = nargs // nproc
diff --git a/sphinx/util/png.py b/sphinx/util/png.py
index 5a15dc703..cc4447e4e 100644
--- a/sphinx/util/png.py
+++ b/sphinx/util/png.py
@@ -22,6 +22,7 @@ IEND_CHUNK = b'\x00\x00\x00\x00IEND\xAE\x42\x60\x82'
def read_png_depth(filename):
+ # type: (unicode) -> int
"""Read the special tEXt chunk indicating the depth from a PNG file."""
with open(filename, 'rb') as f:
f.seek(- (LEN_IEND + LEN_DEPTH), 2)
@@ -34,6 +35,7 @@ def read_png_depth(filename):
def write_png_depth(filename, depth):
+ # type: (unicode, int) -> None
"""Write the special tEXt chunk indicating the depth to a PNG file.
The chunk is placed immediately before the special IEND chunk.
diff --git a/sphinx/util/pycompat.py b/sphinx/util/pycompat.py
index 85bf50a43..5a1fda909 100644
--- a/sphinx/util/pycompat.py
+++ b/sphinx/util/pycompat.py
@@ -9,38 +9,61 @@
:license: BSD, see LICENSE for details.
"""
-import io
import sys
import codecs
-import warnings
-from six import class_types
from six import PY3, text_type, exec_
-from six.moves import zip_longest
-from itertools import product
-from sphinx.deprecation import RemovedInSphinx16Warning
+if False:
+ # For type annotation
+ from typing import Any, Callable, Generator # NOQA
+
NoneType = type(None)
# ------------------------------------------------------------------------------
# Python 2/3 compatibility
+# prefix for Unicode strings
if PY3:
- # Python 3
- # prefix for Unicode strings
u = ''
+else:
+ u = 'u'
+
+
+# TextIOWrapper
+if PY3:
from io import TextIOWrapper
+else:
+ def TextIOWrapper(stream, encoding):
+ # type: (file, str) -> unicode
+ return codecs.lookup(encoding or 'ascii')[2](stream)
+
+
+# sys_encoding: some kind of default system encoding; should be used with
+# a lenient error handler
+if PY3:
+ sys_encoding = sys.getdefaultencoding()
+else:
+ sys_encoding = __import__('locale').getpreferredencoding()
- # safely encode a string for printing to the terminal
+
+# terminal_safe(): safely encode a string for printing to the terminal
+if PY3:
def terminal_safe(s):
+ # type: (unicode) -> unicode
return s.encode('ascii', 'backslashreplace').decode('ascii')
- # some kind of default system encoding; should be used with a lenient
- # error handler
- sys_encoding = sys.getdefaultencoding()
+else:
+ def terminal_safe(s):
+ # type: (unicode) -> unicode
+ return s.encode('ascii', 'backslashreplace')
+
+# convert_with_2to3():
+if PY3:
# support for running 2to3 over config files
def convert_with_2to3(filepath):
+ # type: (unicode) -> unicode
from lib2to3.refactor import RefactoringTool, get_fixers_from_package
from lib2to3.pgen2.parse import ParseError
fixers = get_fixers_from_package('lib2to3.fixes')
@@ -54,55 +77,57 @@ if PY3:
# try to match ParseError details with SyntaxError details
raise SyntaxError(err.msg, (filepath, lineno, offset, err.value))
return text_type(tree)
- from html import escape as htmlescape # noqa: >= Python 3.2
+else:
+ # no need to refactor on 2.x versions
+ convert_with_2to3 = None # type: ignore
+
+# htmlescape()
+if PY3:
+ from html import escape as htmlescape
+else:
+ from cgi import escape as htmlescape # NOQA
+
+
+# UnicodeMixin
+if PY3:
class UnicodeMixin(object):
"""Mixin class to handle defining the proper __str__/__unicode__
methods in Python 2 or 3."""
def __str__(self):
return self.__unicode__()
-
- from textwrap import indent
-
else:
- # Python 2
- u = 'u'
- # no need to refactor on 2.x versions
- convert_with_2to3 = None
-
- def TextIOWrapper(stream, encoding):
- return codecs.lookup(encoding or 'ascii')[2](stream)
-
- # safely encode a string for printing to the terminal
- def terminal_safe(s):
- return s.encode('ascii', 'backslashreplace')
- # some kind of default system encoding; should be used with a lenient
- # error handler
- sys_encoding = __import__('locale').getpreferredencoding()
- # use Python 3 name
- from cgi import escape as htmlescape # noqa: F401
-
class UnicodeMixin(object):
"""Mixin class to handle defining the proper __str__/__unicode__
methods in Python 2 or 3."""
def __str__(self):
- return self.__unicode__().encode('utf8')
+ # type: () -> str
+ return self.__unicode__().encode('utf8') # type: ignore
+
+# indent()
+if PY3:
+ from textwrap import indent
+else:
# backport from python3
def indent(text, prefix, predicate=None):
+ # type: (unicode, unicode, Callable) -> unicode
if predicate is None:
def predicate(line):
+ # type: (unicode) -> unicode
return line.strip()
def prefixed_lines():
+ # type: () -> Generator
for line in text.splitlines(True):
yield (prefix + line if predicate(line) else line)
return ''.join(prefixed_lines())
def execfile_(filepath, _globals, open=open):
+ # type: (unicode, Any, Callable) -> None
from sphinx.util.osutil import fs_encoding
# get config source -- 'b' is a no-op under 2.x, while 'U' is
# ignored under 3.x (but 3.x compile() accepts \r\n newlines)
@@ -110,10 +135,6 @@ def execfile_(filepath, _globals, open=open):
with open(filepath, mode) as f:
source = f.read()
- # py26 accept only LF eol instead of CRLF
- if sys.version_info[:2] == (2, 6):
- source = source.replace(b'\r\n', b'\n')
-
# compile to a code object, handle syntax errors
filepath_enc = filepath.encode(fs_encoding)
try:
@@ -127,36 +148,3 @@ def execfile_(filepath, _globals, open=open):
else:
raise
exec_(code, _globals)
-
-# ------------------------------------------------------------------------------
-# Internal module backwards-compatibility
-
-
-class _DeprecationWrapper(object):
- def __init__(self, mod, deprecated):
- self._mod = mod
- self._deprecated = deprecated
-
- def __getattr__(self, attr):
- if attr in self._deprecated:
- warnings.warn("sphinx.util.pycompat.%s is deprecated and will be "
- "removed in Sphinx 1.6, please use the standard "
- "library version instead." % attr,
- RemovedInSphinx16Warning, stacklevel=2)
- return self._deprecated[attr]
- return getattr(self._mod, attr)
-
-
-sys.modules[__name__] = _DeprecationWrapper(sys.modules[__name__], dict(
- zip_longest = zip_longest,
- product = product,
- all = all,
- any = any,
- next = next,
- open = open,
- class_types = class_types,
- base_exception = BaseException,
- relpath = __import__('os').path.relpath,
- StringIO = io.StringIO,
- BytesIO = io.BytesIO,
-))
diff --git a/sphinx/util/requests.py b/sphinx/util/requests.py
index 48d9ae93a..3b4b7f8b6 100644
--- a/sphinx/util/requests.py
+++ b/sphinx/util/requests.py
@@ -24,14 +24,14 @@ try:
except ImportError:
# python-requests package in Debian jessie does not provide ``requests.packages.urllib3``.
# So try to import the exceptions from urllib3 package.
- from urllib3.exceptions import SSLError
+ from urllib3.exceptions import SSLError # type: ignore
try:
from requests.packages.urllib3.exceptions import InsecureRequestWarning
except ImportError:
try:
# for Debian-jessie
- from urllib3.exceptions import InsecureRequestWarning
+ from urllib3.exceptions import InsecureRequestWarning # type: ignore
except ImportError:
# for requests < 2.4.0
InsecureRequestWarning = None
@@ -65,11 +65,17 @@ else:
'install requests-2.4.1+.'
)
+if False:
+ # For type annotation
+ from typing import Any, Generator, Union # NOQA
+ from sphinx.config import Config # NOQA
+
useragent_header = [('User-Agent',
'Mozilla/5.0 (X11; Linux x86_64; rv:25.0) Gecko/20100101 Firefox/25.0')]
def is_ssl_error(exc):
+ # type: (Exception) -> bool
"""Check an exception is SSLError."""
if isinstance(exc, SSLError):
return True
@@ -83,6 +89,7 @@ def is_ssl_error(exc):
@contextmanager
def ignore_insecure_warning(**kwargs):
+ # type: (Any) -> Generator
with warnings.catch_warnings():
if not kwargs.get('verify') and InsecureRequestWarning:
# ignore InsecureRequestWarning if verify=False
@@ -91,6 +98,7 @@ def ignore_insecure_warning(**kwargs):
def _get_tls_cacert(url, config):
+ # type: (unicode, Config) -> Union[str, bool]
"""Get addiotinal CA cert for a specific URL.
This also returns ``False`` if verification is disabled.
@@ -102,8 +110,8 @@ def _get_tls_cacert(url, config):
certs = getattr(config, 'tls_cacerts', None)
if not certs:
return True
- elif isinstance(certs, (string_types, tuple)):
- return certs
+ elif isinstance(certs, (string_types, tuple)): # type: ignore
+ return certs # type: ignore
else:
hostname = urlsplit(url)[1]
if '@' in hostname:
@@ -113,6 +121,7 @@ def _get_tls_cacert(url, config):
def get(url, **kwargs):
+ # type: (unicode, Any) -> requests.Response
"""Sends a GET request like requests.get().
This sets up User-Agent header and TLS verification automatically."""
@@ -126,6 +135,7 @@ def get(url, **kwargs):
def head(url, **kwargs):
+ # type: (unicode, Any) -> requests.Response
"""Sends a HEAD request like requests.head().
This sets up User-Agent header and TLS verification automatically."""
diff --git a/sphinx/util/rst.py b/sphinx/util/rst.py
index c53076a80..8186130cf 100644
--- a/sphinx/util/rst.py
+++ b/sphinx/util/rst.py
@@ -11,8 +11,9 @@
import re
-symbols_re = re.compile('([!-/:-@\[-`{-~])')
+symbols_re = re.compile(r'([!-/:-@\[-`{-~])')
def escape(text):
- return symbols_re.sub(r'\\\1', text)
+ # type: (unicode) -> unicode
+ return symbols_re.sub(r'\\\1', text) # type: ignore
diff --git a/sphinx/util/smartypants.py b/sphinx/util/smartypants.py
index dee2f50ba..0146ba6e9 100644
--- a/sphinx/util/smartypants.py
+++ b/sphinx/util/smartypants.py
@@ -73,11 +73,16 @@ smartypants.py license::
import re
+if False:
+ # For type annotation
+ from typing import Tuple # NOQA
+
def sphinx_smarty_pants(t):
+ # type: (unicode) -> unicode
t = t.replace('&quot;', '"')
t = educate_dashes_oldschool(t)
- t = educate_quotes(t)
+ t = educate_quotes(t) # type: ignore
t = t.replace('"', '&quot;')
return t
@@ -155,6 +160,7 @@ closing_single_quotes_regex_2 = re.compile(r"""
def educate_quotes(s):
+ # type: (str) -> str
"""
Parameter: String.
@@ -194,6 +200,7 @@ def educate_quotes(s):
def educate_quotes_latex(s, dquotes=("``", "''")):
+ # type: (str, Tuple[str, str]) -> unicode
"""
Parameter: String.
@@ -237,6 +244,7 @@ def educate_quotes_latex(s, dquotes=("``", "''")):
def educate_backticks(s):
+ # type: (unicode) -> unicode
"""
Parameter: String.
Returns: The string, with ``backticks'' -style double quotes
@@ -248,6 +256,7 @@ def educate_backticks(s):
def educate_single_backticks(s):
+ # type: (unicode) -> unicode
"""
Parameter: String.
Returns: The string, with `backticks' -style single quotes
@@ -260,6 +269,7 @@ def educate_single_backticks(s):
def educate_dashes_oldschool(s):
+ # type: (unicode) -> unicode
"""
Parameter: String.
@@ -271,6 +281,7 @@ def educate_dashes_oldschool(s):
def educate_dashes_oldschool_inverted(s):
+ # type: (unicode) -> unicode
"""
Parameter: String.
@@ -289,6 +300,7 @@ def educate_dashes_oldschool_inverted(s):
def educate_ellipses(s):
+ # type: (unicode) -> unicode
"""
Parameter: String.
Returns: The string, with each instance of "..." translated to
diff --git a/sphinx/util/stemmer/__init__.py b/sphinx/util/stemmer/__init__.py
new file mode 100644
index 000000000..f36924223
--- /dev/null
+++ b/sphinx/util/stemmer/__init__.py
@@ -0,0 +1,51 @@
+# -*- coding: utf-8 -*-
+"""
+ sphinx.util.stemmer
+ ~~~~~~~~~~~~~~~~~~~
+
+ Word stemming utilities for Sphinx.
+
+ :copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from sphinx.util.stemmer.porter import PorterStemmer
+
+try:
+ from Stemmer import Stemmer as _PyStemmer
+ PYSTEMMER = True
+except ImportError:
+ PYSTEMMER = False
+
+
+class BaseStemmer(object):
+ def stem(self, word):
+ # type: (unicode) -> unicode
+ raise NotImplemented
+
+
+class PyStemmer(BaseStemmer):
+ def __init__(self):
+ # type: () -> None
+ self.stemmer = _PyStemmer('porter')
+
+ def stem(self, word):
+ # type: (unicode) -> unicode
+ return self.stemmer.stemWord(word)
+
+
+class StandardStemmer(BaseStemmer, PorterStemmer): # type: ignore
+ """All those porter stemmer implementations look hideous;
+ make at least the stem method nicer.
+ """
+ def stem(self, word): # type: ignore
+ # type: (unicode) -> unicode
+ return PorterStemmer.stem(self, word, 0, len(word) - 1)
+
+
+def get_stemmer():
+ # type: () -> BaseStemmer
+ if PYSTEMMER:
+ return PyStemmer()
+ else:
+ return StandardStemmer()
diff --git a/sphinx/util/stemmer.py b/sphinx/util/stemmer/porter.py
index 951c6ab67..beb860c9e 100644
--- a/sphinx/util/stemmer.py
+++ b/sphinx/util/stemmer/porter.py
@@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
"""
- sphinx.util.stemmer
- ~~~~~~~~~~~~~~~~~~~
+ sphinx.util.stemmer.porter
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~
Porter Stemming Algorithm
@@ -32,6 +32,7 @@
class PorterStemmer(object):
def __init__(self):
+ # type: () -> None
"""The main part of the stemming algorithm starts here.
b is a buffer holding a word to be stemmed. The letters are in b[k0],
b[k0+1] ... ending at b[k]. In fact k0 = 0 in this demo program. k is
@@ -42,12 +43,14 @@ class PorterStemmer(object):
should be done before stem(...) is called.
"""
- self.b = "" # buffer for word to be stemmed
+ self.b = "" # type: unicode
+ # buffer for word to be stemmed
self.k = 0
self.k0 = 0
- self.j = 0 # j is a general offset into the string
+ self.j = 0 # j is a general offset into the string
def cons(self, i):
+ # type: (int) -> int
"""cons(i) is TRUE <=> b[i] is a consonant."""
if self.b[i] == 'a' or self.b[i] == 'e' or self.b[i] == 'i' \
or self.b[i] == 'o' or self.b[i] == 'u':
@@ -60,6 +63,7 @@ class PorterStemmer(object):
return 1
def m(self):
+ # type: () -> int
"""m() measures the number of consonant sequences between k0 and j.
if c is a consonant sequence and v a vowel sequence, and <..>
indicates arbitrary presence,
@@ -97,6 +101,7 @@ class PorterStemmer(object):
i = i + 1
def vowelinstem(self):
+ # type: () -> int
"""vowelinstem() is TRUE <=> k0,...j contains a vowel"""
for i in range(self.k0, self.j + 1):
if not self.cons(i):
@@ -104,6 +109,7 @@ class PorterStemmer(object):
return 0
def doublec(self, j):
+ # type: (int) -> int
"""doublec(j) is TRUE <=> j,(j-1) contain a double consonant."""
if j < (self.k0 + 1):
return 0
@@ -112,6 +118,7 @@ class PorterStemmer(object):
return self.cons(j)
def cvc(self, i):
+ # type: (int) -> int
"""cvc(i) is TRUE <=> i-2,i-1,i has the form
consonant - vowel - consonant
and also if the second c is not w,x or y. this is used when trying to
@@ -129,6 +136,7 @@ class PorterStemmer(object):
return 1
def ends(self, s):
+ # type: (unicode) -> int
"""ends(s) is TRUE <=> k0,...k ends with the string s."""
length = len(s)
if s[length - 1] != self.b[self.k]: # tiny speed-up
@@ -141,6 +149,7 @@ class PorterStemmer(object):
return 1
def setto(self, s):
+ # type: (unicode) -> None
"""setto(s) sets (j+1),...k to the characters in the string s,
readjusting k."""
length = len(s)
@@ -148,11 +157,13 @@ class PorterStemmer(object):
self.k = self.j + length
def r(self, s):
+ # type: (unicode) -> None
"""r(s) is used further down."""
if self.m() > 0:
self.setto(s)
def step1ab(self):
+ # type: () -> None
"""step1ab() gets rid of plurals and -ed or -ing. e.g.
caresses -> caress
@@ -200,12 +211,14 @@ class PorterStemmer(object):
self.setto("e")
def step1c(self):
+ # type: () -> None
"""step1c() turns terminal y to i when there is another vowel in
the stem."""
if (self.ends("y") and self.vowelinstem()):
self.b = self.b[:self.k] + 'i' + self.b[self.k + 1:]
def step2(self):
+ # type: () -> None
"""step2() maps double suffices to single ones.
so -ization ( = -ize plus -ation) maps to -ize etc. note that the
string before the suffix must give m() > 0.
@@ -265,6 +278,7 @@ class PorterStemmer(object):
# To match the published algorithm, delete this phrase
def step3(self):
+ # type: () -> None
"""step3() dels with -ic-, -full, -ness etc. similar strategy
to step2."""
if self.b[self.k] == 'e':
@@ -287,6 +301,7 @@ class PorterStemmer(object):
self.r("")
def step4(self):
+ # type: () -> None
"""step4() takes off -ant, -ence etc., in context <c>vcvc<v>."""
if self.b[self.k - 1] == 'a':
if self.ends("al"):
@@ -370,6 +385,7 @@ class PorterStemmer(object):
self.k = self.j
def step5(self):
+ # type: () -> None
"""step5() removes a final -e if m() > 1, and changes -ll to -l if
m() > 1.
"""
@@ -382,6 +398,7 @@ class PorterStemmer(object):
self.k = self.k - 1
def stem(self, p, i, j):
+ # type: (unicode, int, int) -> unicode
"""In stem(p,i,j), p is a char pointer, and the string to be stemmed
is from p[i] to p[j] inclusive. Typically i is zero and j is the
offset to the last character of a string, (p[j+1] == '\0'). The
diff --git a/sphinx/util/tags.py b/sphinx/util/tags.py
index 51aa8d6e1..24f64bece 100644
--- a/sphinx/util/tags.py
+++ b/sphinx/util/tags.py
@@ -14,6 +14,10 @@ from jinja2.environment import Environment
env = Environment()
+if False:
+ # For type annotation
+ from typing import Iterator, List # NOQA
+
class BooleanParser(Parser):
"""
@@ -21,6 +25,8 @@ class BooleanParser(Parser):
"""
def parse_compare(self):
+ # type: () -> nodes.Node
+ node = None # type: nodes.Node
token = self.stream.current
if token.type == 'name':
if token.value in ('true', 'false', 'True', 'False'):
@@ -42,23 +48,29 @@ class BooleanParser(Parser):
class Tags(object):
def __init__(self, tags=None):
+ # type: (List[unicode]) -> None
self.tags = dict.fromkeys(tags or [], True)
def has(self, tag):
+ # type: (unicode) -> bool
return tag in self.tags
__contains__ = has
def __iter__(self):
+ # type: () -> Iterator[unicode]
return iter(self.tags)
def add(self, tag):
+ # type: (unicode) -> None
self.tags[tag] = True
def remove(self, tag):
+ # type: (unicode) -> None
self.tags.pop(tag, None)
def eval_condition(self, condition):
+ # type: (unicode) -> bool
# exceptions are handled by the caller
parser = BooleanParser(env, condition, state='variable')
expr = parser.parse_expression()
@@ -66,19 +78,20 @@ class Tags(object):
raise ValueError('chunk after expression')
def eval_node(node):
+ # type: (nodes.Node) -> bool
if isinstance(node, nodes.CondExpr):
- if eval_node(node.test):
- return eval_node(node.expr1)
+ if eval_node(node.test): # type: ignore
+ return eval_node(node.expr1) # type: ignore
else:
- return eval_node(node.expr2)
+ return eval_node(node.expr2) # type: ignore
elif isinstance(node, nodes.And):
- return eval_node(node.left) and eval_node(node.right)
+ return eval_node(node.left) and eval_node(node.right) # type: ignore
elif isinstance(node, nodes.Or):
- return eval_node(node.left) or eval_node(node.right)
+ return eval_node(node.left) or eval_node(node.right) # type: ignore
elif isinstance(node, nodes.Not):
- return not eval_node(node.node)
+ return not eval_node(node.node) # type: ignore
elif isinstance(node, nodes.Name):
- return self.tags.get(node.name, False)
+ return self.tags.get(node.name, False) # type: ignore
else:
raise ValueError('invalid node, check parsing')
diff --git a/sphinx/util/template.py b/sphinx/util/template.py
index ced31ee24..87e81d823 100644
--- a/sphinx/util/template.py
+++ b/sphinx/util/template.py
@@ -14,44 +14,62 @@ from jinja2.sandbox import SandboxedEnvironment
from sphinx import package_dir
from sphinx.jinja2glue import SphinxFileSystemLoader
+from sphinx.locale import get_translator
+
+if False:
+ # For type annotation
+ from typing import Dict # NOQA
+ from jinja2.loaders import BaseLoader # NOQA
class BaseRenderer(object):
def __init__(self, loader=None):
- self.env = SandboxedEnvironment(loader=loader)
+ # type: (BaseLoader) -> None
+ self.env = SandboxedEnvironment(loader=loader, extensions=['jinja2.ext.i18n'])
self.env.filters['repr'] = repr
+ self.env.install_gettext_translations(get_translator()) # type: ignore
def render(self, template_name, context):
+ # type: (unicode, Dict) -> unicode
return self.env.get_template(template_name).render(context)
def render_string(self, source, context):
+ # type: (unicode, Dict) -> unicode
return self.env.from_string(source).render(context)
class FileRenderer(BaseRenderer):
def __init__(self, search_path):
+ # type: (unicode) -> None
loader = SphinxFileSystemLoader(search_path)
super(FileRenderer, self).__init__(loader)
@classmethod
def render_from_file(cls, filename, context):
+ # type: (unicode, Dict) -> unicode
dirname = os.path.dirname(filename)
basename = os.path.basename(filename)
return cls(dirname).render(basename, context)
class SphinxRenderer(FileRenderer):
- def __init__(self):
- super(SphinxRenderer, self).__init__(os.path.join(package_dir, 'templates'))
+ def __init__(self, template_path=None):
+ # type: (unicode) -> None
+ if template_path is None:
+ template_path = os.path.join(package_dir, 'templates')
+ super(SphinxRenderer, self).__init__(template_path)
@classmethod
def render_from_file(cls, filename, context):
+ # type: (unicode, Dict) -> unicode
return FileRenderer.render_from_file(filename, context)
class LaTeXRenderer(SphinxRenderer):
def __init__(self):
- super(LaTeXRenderer, self).__init__()
+ # type: () -> None
+ template_path = os.path.join(package_dir, 'templates', 'latex')
+ super(LaTeXRenderer, self).__init__(template_path)
# use JSP/eRuby like tagging instead because curly bracket; the default
# tagging of jinja2 is not good for LaTeX sources.
diff --git a/sphinx/util/texescape.py b/sphinx/util/texescape.py
index 6d375aa56..d9102c417 100644
--- a/sphinx/util/texescape.py
+++ b/sphinx/util/texescape.py
@@ -126,6 +126,7 @@ tex_hl_escape_map_new = {}
def init():
+ # type: () -> None
for a, b in tex_replacements:
tex_escape_map[ord(a)] = b
tex_replace_map[ord(a)] = '_'
diff --git a/sphinx/util/typing.py b/sphinx/util/typing.py
new file mode 100644
index 000000000..20c8883e4
--- /dev/null
+++ b/sphinx/util/typing.py
@@ -0,0 +1,24 @@
+# -*- coding: utf-8 -*-
+"""
+ sphinx.util.typing
+ ~~~~~~~~~~~~~~~~~~
+
+ The composit types for Sphinx.
+
+ :copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from six import PY3
+from typing import Callable, Dict, List, Tuple
+
+from docutils import nodes
+from docutils.parsers.rst.states import Inliner
+
+
+if PY3:
+ unicode = str
+
+# common role functions
+RoleFunction = Callable[[unicode, unicode, unicode, int, Inliner, Dict, List[unicode]],
+ Tuple[List[nodes.Node], List[nodes.Node]]]
diff --git a/sphinx/util/websupport.py b/sphinx/util/websupport.py
index b07c7ddd8..b9a76e5c5 100644
--- a/sphinx/util/websupport.py
+++ b/sphinx/util/websupport.py
@@ -7,7 +7,12 @@
:license: BSD, see LICENSE for details.
"""
+if False:
+ # For type annotation
+ from docutils import nodes # NOQA
+
def is_commentable(node):
+ # type: (nodes.Node) -> bool
# return node.__class__.__name__ in ('paragraph', 'literal_block')
return node.__class__.__name__ == 'paragraph'
diff --git a/sphinx/versioning.py b/sphinx/versioning.py
index 419dbea25..97a013135 100644
--- a/sphinx/versioning.py
+++ b/sphinx/versioning.py
@@ -16,6 +16,11 @@ from itertools import product
from six import iteritems
from six.moves import range, zip_longest
+if False:
+ # For type annotation
+ from typing import Any, Iterator # NOQA
+ from docutils import nodes # NOQA
+
try:
import Levenshtein
IS_SPEEDUP = True
@@ -27,6 +32,7 @@ VERSIONING_RATIO = 65
def add_uids(doctree, condition):
+ # type: (nodes.Node, Any) -> Iterator[nodes.Node]
"""Add a unique id to every node in the `doctree` which matches the
condition and yield the nodes.
@@ -42,6 +48,7 @@ def add_uids(doctree, condition):
def merge_doctrees(old, new, condition):
+ # type: (nodes.Node, nodes.Node, Any) -> Iterator[nodes.Node]
"""Merge the `old` doctree with the `new` one while looking at nodes
matching the `condition`.
@@ -90,7 +97,7 @@ def merge_doctrees(old, new, condition):
# choose the old node with the best ratio for each new node and set the uid
# as long as the ratio is under a certain value, in which case we consider
# them not changed but different
- ratios = sorted(iteritems(ratios), key=itemgetter(1))
+ ratios = sorted(iteritems(ratios), key=itemgetter(1)) # type: ignore
for (old_node, new_node), ratio in ratios:
if new_node in seen:
continue
@@ -109,6 +116,7 @@ def merge_doctrees(old, new, condition):
def get_ratio(old, new):
+ # type: (unicode, unicode) -> float
"""Return a "similiarity ratio" (in percent) representing the similarity
between the two strings where 0 is equal and anything above less than equal.
"""
@@ -122,6 +130,7 @@ def get_ratio(old, new):
def levenshtein_distance(a, b):
+ # type: (unicode, unicode) -> int
"""Return the Levenshtein edit distance between two strings *a* and *b*."""
if a == b:
return 0
@@ -137,5 +146,5 @@ def levenshtein_distance(a, b):
deletions = current_row[j] + 1
substitutions = previous_row[j] + (column1 != column2)
current_row.append(min(insertions, deletions, substitutions))
- previous_row = current_row
+ previous_row = current_row # type: ignore
return previous_row[-1]
diff --git a/sphinx/websupport/__init__.py b/sphinx/websupport/__init__.py
index 0323a32e8..6d6b289dc 100644
--- a/sphinx/websupport/__init__.py
+++ b/sphinx/websupport/__init__.py
@@ -26,6 +26,10 @@ from sphinx.websupport import errors
from sphinx.websupport.search import BaseSearch, SEARCH_ADAPTERS
from sphinx.websupport.storage import StorageBackend
+if False:
+ # For type annotation
+ from typing import Dict # NOQA
+
class WebSupport(object):
"""The main API class for the web support package. All interactions
@@ -66,7 +70,7 @@ class WebSupport(object):
self._init_search(search)
self._init_storage(storage)
- self._globalcontext = None
+ self._globalcontext = None # type: ignore
self._make_base_comment_options()
@@ -119,7 +123,7 @@ class WebSupport(object):
raise RuntimeError('No srcdir associated with WebSupport object')
app = Sphinx(self.srcdir, self.srcdir, self.outdir, self.doctreedir,
'websupport', status=self.status, warning=self.warning)
- app.builder.set_webinfo(self.staticdir, self.staticroot,
+ app.builder.set_webinfo(self.staticdir, self.staticroot, # type: ignore
self.search, self.storage)
self.storage.pre_build()
@@ -384,7 +388,7 @@ class WebSupport(object):
that remains the same throughout the lifetime of the
:class:`~sphinx.websupport.WebSupport` object.
"""
- self.base_comment_opts = {}
+ self.base_comment_opts = {} # type: Dict[unicode, unicode]
if self.docroot != '':
comment_urls = [
diff --git a/sphinx/websupport/storage/sqlalchemy_db.py b/sphinx/websupport/storage/sqlalchemy_db.py
index 75dcf2538..bbafc4860 100644
--- a/sphinx/websupport/storage/sqlalchemy_db.py
+++ b/sphinx/websupport/storage/sqlalchemy_db.py
@@ -17,13 +17,17 @@ from sqlalchemy import Column, Integer, Text, String, Boolean, \
from sqlalchemy.orm import relation, sessionmaker, aliased
from sqlalchemy.ext.declarative import declarative_base
+if False:
+ # For type annotation
+ from typing import List # NOQA
+
Base = declarative_base()
Session = sessionmaker()
db_prefix = 'sphinx_'
-class Node(Base):
+class Node(Base): # type: ignore
"""Data about a Node in a doctree."""
__tablename__ = db_prefix + 'nodes'
@@ -74,7 +78,7 @@ class Node(Base):
:param results: the flat list of comments
:param username: the name of the user requesting the comments.
"""
- comments = []
+ comments = [] # type: List
list_stack = [comments]
for r in results:
if username:
@@ -101,7 +105,7 @@ class Node(Base):
self.source = source
-class CommentVote(Base):
+class CommentVote(Base): # type: ignore
"""A vote a user has made on a Comment."""
__tablename__ = db_prefix + 'commentvote'
@@ -117,7 +121,7 @@ class CommentVote(Base):
self.value = value
-class Comment(Base):
+class Comment(Base): # type: ignore
"""An individual Comment being stored."""
__tablename__ = db_prefix + 'comments'
diff --git a/sphinx/websupport/storage/sqlalchemystorage.py b/sphinx/websupport/storage/sqlalchemystorage.py
index 08cbc4935..48997b032 100644
--- a/sphinx/websupport/storage/sqlalchemystorage.py
+++ b/sphinx/websupport/storage/sqlalchemystorage.py
@@ -22,7 +22,7 @@ from sphinx.websupport.storage.sqlalchemy_db import Base, Node, \
Comment, CommentVote, Session
from sphinx.websupport.storage.differ import CombinedHtmlDiff
-if sqlalchemy.__version__[:3] < '0.5':
+if sqlalchemy.__version__[:3] < '0.5': # type: ignore
raise ImportError('SQLAlchemy version 0.5 or greater is required for this '
'storage backend; you have version %s' % sqlalchemy.__version__)
diff --git a/sphinx/writers/html.py b/sphinx/writers/html.py
index e5f5446ad..45ec65662 100644
--- a/sphinx/writers/html.py
+++ b/sphinx/writers/html.py
@@ -13,18 +13,25 @@ import sys
import posixpath
import os
import copy
-import warnings
from six import string_types
from docutils import nodes
from docutils.writers.html4css1 import Writer, HTMLTranslator as BaseTranslator
from sphinx import addnodes
-from sphinx.deprecation import RemovedInSphinx16Warning
from sphinx.locale import admonitionlabels, _
+from sphinx.util import logging
from sphinx.util.images import get_image_size
from sphinx.util.smartypants import sphinx_smarty_pants
+if False:
+ # For type annotation
+ from typing import Any # NOQA
+ from sphinx.builders.html import StandaloneHTMLBuilder # NOQA
+
+
+logger = logging.getLogger(__name__)
+
# A good overview of the purpose behind these classes can be found here:
# http://www.arnebrodowski.de/blog/write-your-own-restructuredtext-writer.html
@@ -38,10 +45,12 @@ class HTMLWriter(Writer):
_setting[2]['default'] = 0
def __init__(self, builder):
+ # type: (StandaloneHTMLBuilder) -> None
Writer.__init__(self)
self.builder = builder
def translate(self):
+ # type: () -> None
# sadly, this is mostly copied from parent class
self.visitor = visitor = self.builder.translator_class(self.builder,
self.document)
@@ -62,6 +71,7 @@ class HTMLTranslator(BaseTranslator):
"""
def __init__(self, builder, *args, **kwds):
+ # type: (StandaloneHTMLBuilder, Any, Any) -> None
BaseTranslator.__init__(self, *args, **kwds)
self.highlighter = builder.highlighter
self.no_smarty = 0
@@ -81,22 +91,28 @@ class HTMLTranslator(BaseTranslator):
self.param_separator = ''
self.optional_param_level = 0
self._table_row_index = 0
+ self.required_params_left = 0
def visit_start_of_file(self, node):
+ # type: (nodes.Node) -> None
# only occurs in the single-file builder
self.docnames.append(node['docname'])
self.body.append('<span id="document-%s"></span>' % node['docname'])
def depart_start_of_file(self, node):
+ # type: (nodes.Node) -> None
self.docnames.pop()
def visit_desc(self, node):
+ # type: (nodes.Node) -> None
self.body.append(self.starttag(node, 'dl', CLASS=node['objtype']))
def depart_desc(self, node):
+ # type: (nodes.Node) -> None
self.body.append('</dl>\n\n')
def visit_desc_signature(self, node):
+ # type: (nodes.Node) -> None
# the id is set automatically
self.body.append(self.starttag(node, 'dt'))
# anchor for per-desc interactive data
@@ -105,44 +121,56 @@ class HTMLTranslator(BaseTranslator):
self.body.append('<!--[%s]-->' % node['ids'][0])
def depart_desc_signature(self, node):
+ # type: (nodes.Node) -> None
if not node.get('is_multiline'):
self.add_permalink_ref(node, _('Permalink to this definition'))
self.body.append('</dt>\n')
def visit_desc_signature_line(self, node):
+ # type: (nodes.Node) -> None
pass
def depart_desc_signature_line(self, node):
+ # type: (nodes.Node) -> None
if node.get('add_permalink'):
# the permalink info is on the parent desc_signature node
self.add_permalink_ref(node.parent, _('Permalink to this definition'))
self.body.append('<br />')
def visit_desc_addname(self, node):
+ # type: (nodes.Node) -> None
self.body.append(self.starttag(node, 'code', '', CLASS='descclassname'))
def depart_desc_addname(self, node):
+ # type: (nodes.Node) -> None
self.body.append('</code>')
def visit_desc_type(self, node):
+ # type: (nodes.Node) -> None
pass
def depart_desc_type(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_desc_returns(self, node):
+ # type: (nodes.Node) -> None
self.body.append(' &#x2192; ')
def depart_desc_returns(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_desc_name(self, node):
+ # type: (nodes.Node) -> None
self.body.append(self.starttag(node, 'code', '', CLASS='descname'))
def depart_desc_name(self, node):
+ # type: (nodes.Node) -> None
self.body.append('</code>')
def visit_desc_parameterlist(self, node):
+ # type: (nodes.Node) -> None
self.body.append('<span class="sig-paren">(</span>')
self.first_param = 1
self.optional_param_level = 0
@@ -152,6 +180,7 @@ class HTMLTranslator(BaseTranslator):
self.param_separator = node.child_text_separator
def depart_desc_parameterlist(self, node):
+ # type: (nodes.Node) -> None
self.body.append('<span class="sig-paren">)</span>')
# If required parameters are still to come, then put the comma after
@@ -161,6 +190,7 @@ class HTMLTranslator(BaseTranslator):
# foo([a, ]b, c[, d])
#
def visit_desc_parameter(self, node):
+ # type: (nodes.Node) -> None
if self.first_param:
self.first_param = 0
elif not self.required_params_left:
@@ -171,39 +201,49 @@ class HTMLTranslator(BaseTranslator):
self.body.append('<em>')
def depart_desc_parameter(self, node):
+ # type: (nodes.Node) -> None
if not node.hasattr('noemph'):
self.body.append('</em>')
if self.required_params_left:
self.body.append(self.param_separator)
def visit_desc_optional(self, node):
+ # type: (nodes.Node) -> None
self.optional_param_level += 1
self.body.append('<span class="optional">[</span>')
def depart_desc_optional(self, node):
+ # type: (nodes.Node) -> None
self.optional_param_level -= 1
self.body.append('<span class="optional">]</span>')
def visit_desc_annotation(self, node):
+ # type: (nodes.Node) -> None
self.body.append(self.starttag(node, 'em', '', CLASS='property'))
def depart_desc_annotation(self, node):
+ # type: (nodes.Node) -> None
self.body.append('</em>')
def visit_desc_content(self, node):
+ # type: (nodes.Node) -> None
self.body.append(self.starttag(node, 'dd', ''))
def depart_desc_content(self, node):
+ # type: (nodes.Node) -> None
self.body.append('</dd>')
def visit_versionmodified(self, node):
+ # type: (nodes.Node) -> None
self.body.append(self.starttag(node, 'div', CLASS=node['type']))
def depart_versionmodified(self, node):
+ # type: (nodes.Node) -> None
self.body.append('</div>\n')
# overwritten
def visit_reference(self, node):
+ # type: (nodes.Node) -> None
atts = {'class': 'reference'}
if node.get('internal') or 'refuri' not in node:
atts['class'] += ' internal'
@@ -233,17 +273,21 @@ class HTMLTranslator(BaseTranslator):
'.'.join(map(str, node['secnumber'])))
def visit_number_reference(self, node):
+ # type: (nodes.Node) -> None
self.visit_reference(node)
def depart_number_reference(self, node):
+ # type: (nodes.Node) -> None
self.depart_reference(node)
# overwritten -- we don't want source comments to show up in the HTML
def visit_comment(self, node):
+ # type: (nodes.Node) -> None
raise nodes.SkipNode
# overwritten
def visit_admonition(self, node, name=''):
+ # type: (nodes.Node, unicode) -> None
self.body.append(self.starttag(
node, 'div', CLASS=('admonition ' + name)))
if name:
@@ -251,23 +295,24 @@ class HTMLTranslator(BaseTranslator):
self.set_first_last(node)
def visit_seealso(self, node):
+ # type: (nodes.Node) -> None
self.visit_admonition(node, 'seealso')
def depart_seealso(self, node):
+ # type: (nodes.Node) -> None
self.depart_admonition(node)
def add_secnumber(self, node):
+ # type: (nodes.Node) -> None
if node.get('secnumber'):
self.body.append('.'.join(map(str, node['secnumber'])) +
self.secnumber_suffix)
elif isinstance(node.parent, nodes.section):
if self.builder.name == 'singlehtml':
docname = self.docnames[-1]
- anchorname = '#' + node.parent['ids'][0]
- if (docname, anchorname) not in self.builder.secnumbers:
- anchorname = (docname, '') # try first heading which has no anchor
- else:
- anchorname = (docname, anchorname)
+ anchorname = "%s/#%s" % (docname, node.parent['ids'][0])
+ if anchorname not in self.builder.secnumbers:
+ anchorname = "%s/" % docname # try first heading which has no anchor
else:
anchorname = '#' + node.parent['ids'][0]
if anchorname not in self.builder.secnumbers:
@@ -278,9 +323,11 @@ class HTMLTranslator(BaseTranslator):
self.secnumber_suffix)
def add_fignumber(self, node):
+ # type: (nodes.Node) -> None
def append_fignumber(figtype, figure_id):
+ # type: (unicode, unicode) -> None
if self.builder.name == 'singlehtml':
- key = (self.docnames[-1], figtype)
+ key = u"%s/%s" % (self.docnames[-1], figtype)
else:
key = figtype
@@ -289,26 +336,28 @@ class HTMLTranslator(BaseTranslator):
prefix = self.builder.config.numfig_format.get(figtype)
if prefix is None:
msg = 'numfig_format is not defined for %s' % figtype
- self.builder.warn(msg)
+ logger.warning(msg)
else:
numbers = self.builder.fignumbers[key][figure_id]
self.body.append(prefix % '.'.join(map(str, numbers)) + ' ')
self.body.append('</span>')
- figtype = self.builder.env.domains['std'].get_figtype(node)
+ figtype = self.builder.env.domains['std'].get_figtype(node) # type: ignore
if figtype:
if len(node['ids']) == 0:
msg = 'Any IDs not assigned for %s node' % node.tagname
- self.builder.env.warn_node(msg, node)
+ logger.warning(msg, location=node)
else:
append_fignumber(figtype, node['ids'][0])
def add_permalink_ref(self, node, title):
+ # type: (nodes.Node, unicode) -> None
if node['ids'] and self.permalink_text and self.builder.add_permalinks:
format = u'<a class="headerlink" href="#%s" title="%s">%s</a>'
self.body.append(format % (node['ids'][0], title, self.permalink_text))
def generate_targets_for_listing(self, node):
+ # type: (nodes.Node) -> None
"""Generate hyperlink targets for listings.
Original visit_bullet_list(), visit_definition_list() and visit_enumerated_list()
@@ -324,6 +373,7 @@ class HTMLTranslator(BaseTranslator):
# overwritten
def visit_bullet_list(self, node):
+ # type: (nodes.Node) -> None
if len(node) == 1 and node[0].tagname == 'toctree':
# avoid emitting empty <ul></ul>
raise nodes.SkipNode
@@ -332,11 +382,13 @@ class HTMLTranslator(BaseTranslator):
# overwritten
def visit_enumerated_list(self, node):
+ # type: (nodes.Node) -> None
self.generate_targets_for_listing(node)
BaseTranslator.visit_enumerated_list(self, node)
# overwritten
def visit_title(self, node):
+ # type: (nodes.Node) -> None
BaseTranslator.visit_title(self, node)
self.add_secnumber(node)
self.add_fignumber(node.parent)
@@ -344,6 +396,7 @@ class HTMLTranslator(BaseTranslator):
self.body.append('<span class="caption-text">')
def depart_title(self, node):
+ # type: (nodes.Node) -> None
close_tag = self.context[-1]
if (self.permalink_text and self.builder.add_permalinks and
node.parent.hasattr('ids') and node.parent['ids']):
@@ -366,6 +419,7 @@ class HTMLTranslator(BaseTranslator):
# overwritten
def visit_literal_block(self, node):
+ # type: (nodes.Node) -> None
if node.rawsource != node.astext():
# most probably a parsed-literal block -- don't highlight
return BaseTranslator.visit_literal_block(self, node)
@@ -385,17 +439,17 @@ class HTMLTranslator(BaseTranslator):
else:
opts = {}
- def warner(msg, **kwargs):
- self.builder.warn(msg, (self.builder.current_docname, node.line), **kwargs)
highlighted = self.highlighter.highlight_block(
- node.rawsource, lang, opts=opts, warn=warner, linenos=linenos,
- **highlight_args)
+ node.rawsource, lang, opts=opts, linenos=linenos,
+ location=(self.builder.current_docname, node.line), **highlight_args
+ )
starttag = self.starttag(node, 'div', suffix='',
CLASS='highlight-%s' % lang)
self.body.append(starttag + highlighted + '</div>\n')
raise nodes.SkipNode
def visit_caption(self, node):
+ # type: (nodes.Node) -> None
if isinstance(node.parent, nodes.container) and node.parent.get('literal_block'):
self.body.append('<div class="code-block-caption">')
else:
@@ -404,6 +458,7 @@ class HTMLTranslator(BaseTranslator):
self.body.append(self.starttag(node, 'span', '', CLASS='caption-text'))
def depart_caption(self, node):
+ # type: (nodes.Node) -> None
self.body.append('</span>')
# append permalink if available
@@ -422,26 +477,32 @@ class HTMLTranslator(BaseTranslator):
BaseTranslator.depart_caption(self, node)
def visit_doctest_block(self, node):
+ # type: (nodes.Node) -> None
self.visit_literal_block(node)
# overwritten to add the <div> (for XHTML compliance)
def visit_block_quote(self, node):
+ # type: (nodes.Node) -> None
self.body.append(self.starttag(node, 'blockquote') + '<div>')
def depart_block_quote(self, node):
+ # type: (nodes.Node) -> None
self.body.append('</div></blockquote>\n')
# overwritten
def visit_literal(self, node):
+ # type: (nodes.Node) -> None
self.body.append(self.starttag(node, 'code', '',
CLASS='docutils literal'))
self.protect_literal_text += 1
def depart_literal(self, node):
+ # type: (nodes.Node) -> None
self.protect_literal_text -= 1
self.body.append('</code>')
def visit_productionlist(self, node):
+ # type: (nodes.Node) -> None
self.body.append(self.starttag(node, 'pre'))
names = []
for production in node:
@@ -461,23 +522,29 @@ class HTMLTranslator(BaseTranslator):
raise nodes.SkipNode
def depart_productionlist(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_production(self, node):
+ # type: (nodes.Node) -> None
pass
def depart_production(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_centered(self, node):
+ # type: (nodes.Node) -> None
self.body.append(self.starttag(node, 'p', CLASS="centered") +
'<strong>')
def depart_centered(self, node):
+ # type: (nodes.Node) -> None
self.body.append('</strong></p>')
# overwritten
def should_be_compact_paragraph(self, node):
+ # type: (nodes.Node) -> bool
"""Determine if the <p> tags around paragraph can be omitted."""
if isinstance(node.parent, addnodes.desc_content):
# Never compact desc_content items.
@@ -488,19 +555,24 @@ class HTMLTranslator(BaseTranslator):
return BaseTranslator.should_be_compact_paragraph(self, node)
def visit_compact_paragraph(self, node):
+ # type: (nodes.Node) -> None
pass
def depart_compact_paragraph(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_highlightlang(self, node):
+ # type: (nodes.Node) -> None
self.highlightlang = node['lang']
self.highlightlinenothreshold = node['linenothreshold']
def depart_highlightlang(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_download_reference(self, node):
+ # type: (nodes.Node) -> None
if self.builder.download_support and node.hasattr('filename'):
self.body.append(
'<a class="reference download internal" href="%s" download="">' %
@@ -510,10 +582,12 @@ class HTMLTranslator(BaseTranslator):
self.context.append('')
def depart_download_reference(self, node):
+ # type: (nodes.Node) -> None
self.body.append(self.context.pop())
# overwritten
def visit_image(self, node):
+ # type: (nodes.Node) -> None
olduri = node['uri']
# rewrite the URI if the environment knows about it
if olduri in self.builder.images:
@@ -544,8 +618,8 @@ class HTMLTranslator(BaseTranslator):
if not ('width' in node and 'height' in node):
size = get_image_size(os.path.join(self.builder.srcdir, olduri))
if size is None:
- self.builder.env.warn_node('Could not obtain image size. '
- ':scale: option is ignored.', node)
+ logger.warning('Could not obtain image size. :scale: option is ignored.',
+ location=node)
else:
if 'width' not in node:
node['width'] = str(size[0])
@@ -555,44 +629,56 @@ class HTMLTranslator(BaseTranslator):
# overwritten
def depart_image(self, node):
+ # type: (nodes.Node) -> None
if node['uri'].lower().endswith(('svg', 'svgz')):
self.body.append(self.context.pop())
else:
BaseTranslator.depart_image(self, node)
def visit_toctree(self, node):
+ # type: (nodes.Node) -> None
# this only happens when formatting a toc from env.tocs -- in this
# case we don't want to include the subtree
raise nodes.SkipNode
def visit_index(self, node):
+ # type: (nodes.Node) -> None
raise nodes.SkipNode
def visit_tabular_col_spec(self, node):
+ # type: (nodes.Node) -> None
raise nodes.SkipNode
def visit_glossary(self, node):
+ # type: (nodes.Node) -> None
pass
def depart_glossary(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_acks(self, node):
+ # type: (nodes.Node) -> None
pass
def depart_acks(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_hlist(self, node):
+ # type: (nodes.Node) -> None
self.body.append('<table class="hlist"><tr>')
def depart_hlist(self, node):
+ # type: (nodes.Node) -> None
self.body.append('</tr></table>\n')
def visit_hlistcol(self, node):
+ # type: (nodes.Node) -> None
self.body.append('<td>')
def depart_hlistcol(self, node):
+ # type: (nodes.Node) -> None
self.body.append('</td>')
def visit_option_group(self, node):
@@ -600,10 +686,12 @@ class HTMLTranslator(BaseTranslator):
self.context[-2] = self.context[-2].replace('&nbsp;', '&#160;')
def bulk_text_processor(self, text):
+ # type: (unicode) -> unicode
return text
# overwritten
def visit_Text(self, node):
+ # type: (nodes.Node) -> None
text = node.astext()
encoded = self.encode(text)
if self.protect_literal_text:
@@ -627,102 +715,122 @@ class HTMLTranslator(BaseTranslator):
self.body.append(encoded)
def visit_note(self, node):
+ # type: (nodes.Node) -> None
self.visit_admonition(node, 'note')
def depart_note(self, node):
+ # type: (nodes.Node) -> None
self.depart_admonition(node)
def visit_warning(self, node):
+ # type: (nodes.Node) -> None
self.visit_admonition(node, 'warning')
def depart_warning(self, node):
+ # type: (nodes.Node) -> None
self.depart_admonition(node)
def visit_attention(self, node):
+ # type: (nodes.Node) -> None
self.visit_admonition(node, 'attention')
def depart_attention(self, node):
- self.depart_admonition()
+ # type: (nodes.Node) -> None
+ self.depart_admonition(node)
def visit_caution(self, node):
+ # type: (nodes.Node) -> None
self.visit_admonition(node, 'caution')
def depart_caution(self, node):
- self.depart_admonition()
+ # type: (nodes.Node) -> None
+ self.depart_admonition(node)
def visit_danger(self, node):
+ # type: (nodes.Node) -> None
self.visit_admonition(node, 'danger')
def depart_danger(self, node):
- self.depart_admonition()
+ # type: (nodes.Node) -> None
+ self.depart_admonition(node)
def visit_error(self, node):
+ # type: (nodes.Node) -> None
self.visit_admonition(node, 'error')
def depart_error(self, node):
- self.depart_admonition()
+ # type: (nodes.Node) -> None
+ self.depart_admonition(node)
def visit_hint(self, node):
+ # type: (nodes.Node) -> None
self.visit_admonition(node, 'hint')
def depart_hint(self, node):
- self.depart_admonition()
+ # type: (nodes.Node) -> None
+ self.depart_admonition(node)
def visit_important(self, node):
+ # type: (nodes.Node) -> None
self.visit_admonition(node, 'important')
def depart_important(self, node):
- self.depart_admonition()
+ # type: (nodes.Node) -> None
+ self.depart_admonition(node)
def visit_tip(self, node):
+ # type: (nodes.Node) -> None
self.visit_admonition(node, 'tip')
def depart_tip(self, node):
- self.depart_admonition()
+ # type: (nodes.Node) -> None
+ self.depart_admonition(node)
# these are only handled specially in the SmartyPantsHTMLTranslator
def visit_literal_emphasis(self, node):
+ # type: (nodes.Node) -> None
return self.visit_emphasis(node)
def depart_literal_emphasis(self, node):
+ # type: (nodes.Node) -> None
return self.depart_emphasis(node)
def visit_literal_strong(self, node):
+ # type: (nodes.Node) -> None
return self.visit_strong(node)
def depart_literal_strong(self, node):
+ # type: (nodes.Node) -> None
return self.depart_strong(node)
def visit_abbreviation(self, node):
+ # type: (nodes.Node) -> None
attrs = {}
if node.hasattr('explanation'):
attrs['title'] = node['explanation']
self.body.append(self.starttag(node, 'abbr', '', **attrs))
def depart_abbreviation(self, node):
+ # type: (nodes.Node) -> None
self.body.append('</abbr>')
- def visit_termsep(self, node):
- warnings.warn('sphinx.addnodes.termsep will be removed at Sphinx-1.6. '
- 'This warning is displayed because some Sphinx extension '
- 'uses sphinx.addnodes.termsep. Please report it to '
- 'author of the extension.', RemovedInSphinx16Warning)
- self.body.append('<br />')
- raise nodes.SkipNode
-
def visit_manpage(self, node):
- return self.visit_literal_emphasis(node)
+ # type: (nodes.Node) -> None
+ self.visit_literal_emphasis(node)
def depart_manpage(self, node):
- return self.depart_literal_emphasis(node)
+ # type: (nodes.Node) -> None
+ self.depart_literal_emphasis(node)
# overwritten to add even/odd classes
def visit_table(self, node):
+ # type: (nodes.Node) -> None
self._table_row_index = 0
return BaseTranslator.visit_table(self, node)
def visit_row(self, node):
+ # type: (nodes.Node) -> None
self._table_row_index += 1
if self._table_row_index % 2 == 0:
node['classes'].append('row-even')
@@ -737,10 +845,12 @@ class HTMLTranslator(BaseTranslator):
self.body[-1] = '&#160;'
def visit_field_list(self, node):
+ # type: (nodes.Node) -> None
self._fieldlist_row_index = 0
return BaseTranslator.visit_field_list(self, node)
def visit_field(self, node):
+ # type: (nodes.Node) -> None
self._fieldlist_row_index += 1
if self._fieldlist_row_index % 2 == 0:
node['classes'].append('field-even')
@@ -755,13 +865,15 @@ class HTMLTranslator(BaseTranslator):
self.context[-1] = self.context[-1].replace('&nbsp;', '&#160;')
def visit_math(self, node, math_env=''):
- self.builder.warn('using "math" markup without a Sphinx math extension '
- 'active, please use one of the math extensions '
- 'described at http://sphinx-doc.org/ext/math.html',
- (self.builder.current_docname, node.line))
+ # type: (nodes.Node, unicode) -> None
+ logger.warning('using "math" markup without a Sphinx math extension '
+ 'active, please use one of the math extensions '
+ 'described at http://sphinx-doc.org/ext/math.html',
+ location=(self.builder.current_docname, node.line))
raise nodes.SkipNode
def unknown_visit(self, node):
+ # type: (nodes.Node) -> None
raise NotImplementedError('Unknown node: ' + node.__class__.__name__)
@@ -772,10 +884,12 @@ class SmartyPantsHTMLTranslator(HTMLTranslator):
"""
def __init__(self, *args, **kwds):
+ # type: (Any, Any) -> None
self.no_smarty = 0
HTMLTranslator.__init__(self, *args, **kwds)
def visit_literal(self, node):
+ # type: (nodes.Node) -> None
self.no_smarty += 1
try:
# this raises SkipNode
@@ -784,6 +898,7 @@ class SmartyPantsHTMLTranslator(HTMLTranslator):
self.no_smarty -= 1
def visit_literal_block(self, node):
+ # type: (nodes.Node) -> None
self.no_smarty += 1
try:
HTMLTranslator.visit_literal_block(self, node)
@@ -794,34 +909,42 @@ class SmartyPantsHTMLTranslator(HTMLTranslator):
raise
def depart_literal_block(self, node):
+ # type: (nodes.Node) -> None
HTMLTranslator.depart_literal_block(self, node)
self.no_smarty -= 1
def visit_literal_emphasis(self, node):
+ # type: (nodes.Node) -> None
self.no_smarty += 1
self.visit_emphasis(node)
def depart_literal_emphasis(self, node):
+ # type: (nodes.Node) -> None
self.depart_emphasis(node)
self.no_smarty -= 1
def visit_literal_strong(self, node):
+ # type: (nodes.Node) -> None
self.no_smarty += 1
self.visit_strong(node)
def depart_literal_strong(self, node):
+ # type: (nodes.Node) -> None
self.depart_strong(node)
self.no_smarty -= 1
def visit_desc_signature(self, node):
+ # type: (nodes.Node) -> None
self.no_smarty += 1
HTMLTranslator.visit_desc_signature(self, node)
def depart_desc_signature(self, node):
+ # type: (nodes.Node) -> None
self.no_smarty -= 1
HTMLTranslator.depart_desc_signature(self, node)
def visit_productionlist(self, node):
+ # type: (nodes.Node) -> None
self.no_smarty += 1
try:
HTMLTranslator.visit_productionlist(self, node)
@@ -829,14 +952,17 @@ class SmartyPantsHTMLTranslator(HTMLTranslator):
self.no_smarty -= 1
def visit_option(self, node):
+ # type: (nodes.Node) -> None
self.no_smarty += 1
HTMLTranslator.visit_option(self, node)
def depart_option(self, node):
+ # type: (nodes.Node) -> None
self.no_smarty -= 1
HTMLTranslator.depart_option(self, node)
def bulk_text_processor(self, text):
+ # type: (unicode) -> unicode
if self.no_smarty <= 0:
return sphinx_smarty_pants(text)
return text
diff --git a/sphinx/writers/html5.py b/sphinx/writers/html5.py
new file mode 100644
index 000000000..9deb35a88
--- /dev/null
+++ b/sphinx/writers/html5.py
@@ -0,0 +1,923 @@
+# -*- coding: utf-8 -*-
+"""
+ sphinx.writers.html5
+ ~~~~~~~~~~~~~~~~~~~~
+
+ Experimental docutils writers for HTML5 handling Sphinx' custom nodes.
+
+ :copyright: Copyright 2007-2017 by the Sphinx team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import sys
+import posixpath
+import os
+
+from six import string_types
+from docutils import nodes
+from docutils.writers.html5_polyglot import HTMLTranslator as BaseTranslator
+
+from sphinx import addnodes
+from sphinx.locale import admonitionlabels, _
+from sphinx.util import logging
+from sphinx.util.images import get_image_size
+from sphinx.util.smartypants import sphinx_smarty_pants
+
+if False:
+ # For type annotation
+ from typing import Any # NOQA
+ from sphinx.builders.html import StandaloneHTMLBuilder # NOQA
+
+
+logger = logging.getLogger(__name__)
+
+# A good overview of the purpose behind these classes can be found here:
+# http://www.arnebrodowski.de/blog/write-your-own-restructuredtext-writer.html
+
+
+class HTML5Translator(BaseTranslator):
+ """
+ Our custom HTML translator.
+ """
+
+ def __init__(self, builder, *args, **kwds):
+ # type: (StandaloneHTMLBuilder, Any, Any) -> None
+ BaseTranslator.__init__(self, *args, **kwds)
+ self.highlighter = builder.highlighter
+ self.no_smarty = 0
+ self.builder = builder
+ self.highlightlang = self.highlightlang_base = \
+ builder.config.highlight_language
+ self.highlightopts = builder.config.highlight_options
+ self.highlightlinenothreshold = sys.maxsize
+ self.docnames = [builder.current_docname] # for singlehtml builder
+ self.protect_literal_text = 0
+ self.permalink_text = builder.config.html_add_permalinks
+ # support backwards-compatible setting to a bool
+ if not isinstance(self.permalink_text, string_types):
+ self.permalink_text = self.permalink_text and u'\u00B6' or ''
+ self.permalink_text = self.encode(self.permalink_text)
+ self.secnumber_suffix = builder.config.html_secnumber_suffix
+ self.param_separator = ''
+ self.optional_param_level = 0
+ self._table_row_index = 0
+ self.required_params_left = 0
+
+ def visit_start_of_file(self, node):
+ # type: (nodes.Node) -> None
+ # only occurs in the single-file builder
+ self.docnames.append(node['docname'])
+ self.body.append('<span id="document-%s"></span>' % node['docname'])
+
+ def depart_start_of_file(self, node):
+ # type: (nodes.Node) -> None
+ self.docnames.pop()
+
+ def visit_desc(self, node):
+ # type: (nodes.Node) -> None
+ self.body.append(self.starttag(node, 'dl', CLASS=node['objtype']))
+
+ def depart_desc(self, node):
+ # type: (nodes.Node) -> None
+ self.body.append('</dl>\n\n')
+
+ def visit_desc_signature(self, node):
+ # type: (nodes.Node) -> None
+ # the id is set automatically
+ self.body.append(self.starttag(node, 'dt'))
+ # anchor for per-desc interactive data
+ if node.parent['objtype'] != 'describe' \
+ and node['ids'] and node['first']:
+ self.body.append('<!--[%s]-->' % node['ids'][0])
+
+ def depart_desc_signature(self, node):
+ # type: (nodes.Node) -> None
+ if not node.get('is_multiline'):
+ self.add_permalink_ref(node, _('Permalink to this definition'))
+ self.body.append('</dt>\n')
+
+ def visit_desc_signature_line(self, node):
+ # type: (nodes.Node) -> None
+ pass
+
+ def depart_desc_signature_line(self, node):
+ # type: (nodes.Node) -> None
+ if node.get('add_permalink'):
+ # the permalink info is on the parent desc_signature node
+ self.add_permalink_ref(node.parent, _('Permalink to this definition'))
+ self.body.append('<br />')
+
+ def visit_desc_addname(self, node):
+ # type: (nodes.Node) -> None
+ self.body.append(self.starttag(node, 'code', '', CLASS='descclassname'))
+
+ def depart_desc_addname(self, node):
+ # type: (nodes.Node) -> None
+ self.body.append('</code>')
+
+ def visit_desc_type(self, node):
+ # type: (nodes.Node) -> None
+ pass
+
+ def depart_desc_type(self, node):
+ # type: (nodes.Node) -> None
+ pass
+
+ def visit_desc_returns(self, node):
+ # type: (nodes.Node) -> None
+ self.body.append(' &#x2192; ')
+
+ def depart_desc_returns(self, node):
+ # type: (nodes.Node) -> None
+ pass
+
+ def visit_desc_name(self, node):
+ # type: (nodes.Node) -> None
+ self.body.append(self.starttag(node, 'code', '', CLASS='descname'))
+
+ def depart_desc_name(self, node):
+ # type: (nodes.Node) -> None
+ self.body.append('</code>')
+
+ def visit_desc_parameterlist(self, node):
+ # type: (nodes.Node) -> None
+ self.body.append('<span class="sig-paren">(</span>')
+ self.first_param = 1
+ self.optional_param_level = 0
+ # How many required parameters are left.
+ self.required_params_left = sum([isinstance(c, addnodes.desc_parameter)
+ for c in node.children])
+ self.param_separator = node.child_text_separator
+
+ def depart_desc_parameterlist(self, node):
+ # type: (nodes.Node) -> None
+ self.body.append('<span class="sig-paren">)</span>')
+
+ # If required parameters are still to come, then put the comma after
+ # the parameter. Otherwise, put the comma before. This ensures that
+ # signatures like the following render correctly (see issue #1001):
+ #
+ # foo([a, ]b, c[, d])
+ #
+ def visit_desc_parameter(self, node):
+ # type: (nodes.Node) -> None
+ if self.first_param:
+ self.first_param = 0
+ elif not self.required_params_left:
+ self.body.append(self.param_separator)
+ if self.optional_param_level == 0:
+ self.required_params_left -= 1
+ if not node.hasattr('noemph'):
+ self.body.append('<em>')
+
+ def depart_desc_parameter(self, node):
+ # type: (nodes.Node) -> None
+ if not node.hasattr('noemph'):
+ self.body.append('</em>')
+ if self.required_params_left:
+ self.body.append(self.param_separator)
+
+ def visit_desc_optional(self, node):
+ # type: (nodes.Node) -> None
+ self.optional_param_level += 1
+ self.body.append('<span class="optional">[</span>')
+
+ def depart_desc_optional(self, node):
+ # type: (nodes.Node) -> None
+ self.optional_param_level -= 1
+ self.body.append('<span class="optional">]</span>')
+
+ def visit_desc_annotation(self, node):
+ # type: (nodes.Node) -> None
+ self.body.append(self.starttag(node, 'em', '', CLASS='property'))
+
+ def depart_desc_annotation(self, node):
+ # type: (nodes.Node) -> None
+ self.body.append('</em>')
+
+ def visit_desc_content(self, node):
+ # type: (nodes.Node) -> None
+ self.body.append(self.starttag(node, 'dd', ''))
+
+ def depart_desc_content(self, node):
+ # type: (nodes.Node) -> None
+ self.body.append('</dd>')
+
+ def visit_versionmodified(self, node):
+ # type: (nodes.Node) -> None
+ self.body.append(self.starttag(node, 'div', CLASS=node['type']))
+
+ def depart_versionmodified(self, node):
+ # type: (nodes.Node) -> None
+ self.body.append('</div>\n')
+
+ # overwritten
+ def visit_reference(self, node):
+ # type: (nodes.Node) -> None
+ atts = {'class': 'reference'}
+ if node.get('internal') or 'refuri' not in node:
+ atts['class'] += ' internal'
+ else:
+ atts['class'] += ' external'
+ if 'refuri' in node:
+ atts['href'] = node['refuri'] or '#'
+ if self.settings.cloak_email_addresses and \
+ atts['href'].startswith('mailto:'):
+ atts['href'] = self.cloak_mailto(atts['href'])
+ self.in_mailto = 1
+ else:
+ assert 'refid' in node, \
+ 'References must have "refuri" or "refid" attribute.'
+ atts['href'] = '#' + node['refid']
+ if not isinstance(node.parent, nodes.TextElement):
+ assert len(node) == 1 and isinstance(node[0], nodes.image)
+ atts['class'] += ' image-reference'
+ if 'reftitle' in node:
+ atts['title'] = node['reftitle']
+ if 'target' in node:
+ atts['target'] = node['target']
+ self.body.append(self.starttag(node, 'a', '', **atts))
+
+ if node.get('secnumber'):
+ self.body.append(('%s' + self.secnumber_suffix) %
+ '.'.join(map(str, node['secnumber'])))
+
+ def visit_number_reference(self, node):
+ # type: (nodes.Node) -> None
+ self.visit_reference(node)
+
+ def depart_number_reference(self, node):
+ # type: (nodes.Node) -> None
+ self.depart_reference(node)
+
+ # overwritten -- we don't want source comments to show up in the HTML
+ def visit_comment(self, node):
+ # type: (nodes.Node) -> None
+ raise nodes.SkipNode
+
+ # overwritten
+ def visit_admonition(self, node, name=''):
+ # type: (nodes.Node, unicode) -> None
+ self.body.append(self.starttag(
+ node, 'div', CLASS=('admonition ' + name)))
+ if name:
+ node.insert(0, nodes.title(name, admonitionlabels[name]))
+
+ def visit_seealso(self, node):
+ # type: (nodes.Node) -> None
+ self.visit_admonition(node, 'seealso')
+
+ def depart_seealso(self, node):
+ # type: (nodes.Node) -> None
+ self.depart_admonition(node)
+
+ def add_secnumber(self, node):
+ # type: (nodes.Node) -> None
+ if node.get('secnumber'):
+ self.body.append('.'.join(map(str, node['secnumber'])) +
+ self.secnumber_suffix)
+ elif isinstance(node.parent, nodes.section):
+ if self.builder.name == 'singlehtml':
+ docname = self.docnames[-1]
+ anchorname = "%s/#%s" % (docname, node.parent['ids'][0])
+ if anchorname not in self.builder.secnumbers:
+ anchorname = "%s/" % docname # try first heading which has no anchor
+ else:
+ anchorname = '#' + node.parent['ids'][0]
+ if anchorname not in self.builder.secnumbers:
+ anchorname = '' # try first heading which has no anchor
+ if self.builder.secnumbers.get(anchorname):
+ numbers = self.builder.secnumbers[anchorname]
+ self.body.append('.'.join(map(str, numbers)) +
+ self.secnumber_suffix)
+
+ def add_fignumber(self, node):
+ # type: (nodes.Node) -> None
+ def append_fignumber(figtype, figure_id):
+ # type: (unicode, unicode) -> None
+ if self.builder.name == 'singlehtml':
+ key = u"%s/%s" % (self.docnames[-1], figtype)
+ else:
+ key = figtype
+
+ if figure_id in self.builder.fignumbers.get(key, {}):
+ self.body.append('<span class="caption-number">')
+ prefix = self.builder.config.numfig_format.get(figtype)
+ if prefix is None:
+ msg = 'numfig_format is not defined for %s' % figtype
+ logger.warning(msg)
+ else:
+ numbers = self.builder.fignumbers[key][figure_id]
+ self.body.append(prefix % '.'.join(map(str, numbers)) + ' ')
+ self.body.append('</span>')
+
+ figtype = self.builder.env.domains['std'].get_figtype(node) # type: ignore
+ if figtype:
+ if len(node['ids']) == 0:
+ msg = 'Any IDs not assigned for %s node' % node.tagname
+ logger.warning(msg, location=node)
+ else:
+ append_fignumber(figtype, node['ids'][0])
+
+ def add_permalink_ref(self, node, title):
+ # type: (nodes.Node, unicode) -> None
+ if node['ids'] and self.permalink_text and self.builder.add_permalinks:
+ format = u'<a class="headerlink" href="#%s" title="%s">%s</a>'
+ self.body.append(format % (node['ids'][0], title, self.permalink_text))
+
+ # overwritten
+ def visit_bullet_list(self, node):
+ # type: (nodes.Node) -> None
+ if len(node) == 1 and node[0].tagname == 'toctree':
+ # avoid emitting empty <ul></ul>
+ raise nodes.SkipNode
+ BaseTranslator.visit_bullet_list(self, node)
+
+ # overwritten
+ def visit_title(self, node):
+ # type: (nodes.Node) -> None
+ BaseTranslator.visit_title(self, node)
+ self.add_secnumber(node)
+ self.add_fignumber(node.parent)
+ if isinstance(node.parent, nodes.table):
+ self.body.append('<span class="caption-text">')
+
+ def depart_title(self, node):
+ # type: (nodes.Node) -> None
+ close_tag = self.context[-1]
+ if (self.permalink_text and self.builder.add_permalinks and
+ node.parent.hasattr('ids') and node.parent['ids']):
+ # add permalink anchor
+ if close_tag.startswith('</h'):
+ self.add_permalink_ref(node.parent, _('Permalink to this headline'))
+ elif close_tag.startswith('</a></h'):
+ self.body.append(u'</a><a class="headerlink" href="#%s" ' %
+ node.parent['ids'][0] +
+ u'title="%s">%s' % (
+ _('Permalink to this headline'),
+ self.permalink_text))
+ elif isinstance(node.parent, nodes.table):
+ self.body.append('</span>')
+ self.add_permalink_ref(node.parent, _('Permalink to this table'))
+ elif isinstance(node.parent, nodes.table):
+ self.body.append('</span>')
+
+ BaseTranslator.depart_title(self, node)
+
+ # overwritten
+ def visit_literal_block(self, node):
+ # type: (nodes.Node) -> None
+ if node.rawsource != node.astext():
+ # most probably a parsed-literal block -- don't highlight
+ return BaseTranslator.visit_literal_block(self, node)
+ lang = self.highlightlang
+ linenos = node.rawsource.count('\n') >= \
+ self.highlightlinenothreshold - 1
+ highlight_args = node.get('highlight_args', {})
+ if 'language' in node:
+ # code-block directives
+ lang = node['language']
+ highlight_args['force'] = True
+ if 'linenos' in node:
+ linenos = node['linenos']
+ if lang is self.highlightlang_base:
+ # only pass highlighter options for original language
+ opts = self.highlightopts
+ else:
+ opts = {}
+
+ highlighted = self.highlighter.highlight_block(
+ node.rawsource, lang, opts=opts, linenos=linenos,
+ location=(self.builder.current_docname, node.line), **highlight_args
+ )
+ starttag = self.starttag(node, 'div', suffix='',
+ CLASS='highlight-%s' % lang)
+ self.body.append(starttag + highlighted + '</div>\n')
+ raise nodes.SkipNode
+
+ def visit_caption(self, node):
+ # type: (nodes.Node) -> None
+ if isinstance(node.parent, nodes.container) and node.parent.get('literal_block'):
+ self.body.append('<div class="code-block-caption">')
+ else:
+ BaseTranslator.visit_caption(self, node)
+ self.add_fignumber(node.parent)
+ self.body.append(self.starttag(node, 'span', '', CLASS='caption-text'))
+
+ def depart_caption(self, node):
+ # type: (nodes.Node) -> None
+ self.body.append('</span>')
+
+ # append permalink if available
+ if isinstance(node.parent, nodes.container) and node.parent.get('literal_block'):
+ self.add_permalink_ref(node.parent, _('Permalink to this code'))
+ elif isinstance(node.parent, nodes.figure):
+ image_nodes = node.parent.traverse(nodes.image)
+ target_node = image_nodes and image_nodes[0] or node.parent
+ self.add_permalink_ref(target_node, _('Permalink to this image'))
+ elif node.parent.get('toctree'):
+ self.add_permalink_ref(node.parent.parent, _('Permalink to this toctree'))
+
+ if isinstance(node.parent, nodes.container) and node.parent.get('literal_block'):
+ self.body.append('</div>\n')
+ else:
+ BaseTranslator.depart_caption(self, node)
+
+ def visit_doctest_block(self, node):
+ # type: (nodes.Node) -> None
+ self.visit_literal_block(node)
+
+ # overwritten to add the <div> (for XHTML compliance)
+ def visit_block_quote(self, node):
+ # type: (nodes.Node) -> None
+ self.body.append(self.starttag(node, 'blockquote') + '<div>')
+
+ def depart_block_quote(self, node):
+ # type: (nodes.Node) -> None
+ self.body.append('</div></blockquote>\n')
+
+ # overwritten
+ def visit_literal(self, node):
+ # type: (nodes.Node) -> None
+ self.body.append(self.starttag(node, 'code', '',
+ CLASS='docutils literal'))
+ self.protect_literal_text += 1
+
+ def depart_literal(self, node):
+ # type: (nodes.Node) -> None
+ self.protect_literal_text -= 1
+ self.body.append('</code>')
+
+ def visit_productionlist(self, node):
+ # type: (nodes.Node) -> None
+ self.body.append(self.starttag(node, 'pre'))
+ names = []
+ for production in node:
+ names.append(production['tokenname'])
+ maxlen = max(len(name) for name in names)
+ lastname = None
+ for production in node:
+ if production['tokenname']:
+ lastname = production['tokenname'].ljust(maxlen)
+ self.body.append(self.starttag(production, 'strong', ''))
+ self.body.append(lastname + '</strong> ::= ')
+ elif lastname is not None:
+ self.body.append('%s ' % (' ' * len(lastname)))
+ production.walkabout(self)
+ self.body.append('\n')
+ self.body.append('</pre>\n')
+ raise nodes.SkipNode
+
+ def depart_productionlist(self, node):
+ # type: (nodes.Node) -> None
+ pass
+
+ def visit_production(self, node):
+ # type: (nodes.Node) -> None
+ pass
+
+ def depart_production(self, node):
+ # type: (nodes.Node) -> None
+ pass
+
+ def visit_centered(self, node):
+ # type: (nodes.Node) -> None
+ self.body.append(self.starttag(node, 'p', CLASS="centered") +
+ '<strong>')
+
+ def depart_centered(self, node):
+ # type: (nodes.Node) -> None
+ self.body.append('</strong></p>')
+
+ # overwritten
+ def should_be_compact_paragraph(self, node):
+ # type: (nodes.Node) -> bool
+ """Determine if the <p> tags around paragraph can be omitted."""
+ if isinstance(node.parent, addnodes.desc_content):
+ # Never compact desc_content items.
+ return False
+ if isinstance(node.parent, addnodes.versionmodified):
+ # Never compact versionmodified nodes.
+ return False
+ return BaseTranslator.should_be_compact_paragraph(self, node)
+
+ def visit_compact_paragraph(self, node):
+ # type: (nodes.Node) -> None
+ pass
+
+ def depart_compact_paragraph(self, node):
+ # type: (nodes.Node) -> None
+ pass
+
+ def visit_highlightlang(self, node):
+ # type: (nodes.Node) -> None
+ self.highlightlang = node['lang']
+ self.highlightlinenothreshold = node['linenothreshold']
+
+ def depart_highlightlang(self, node):
+ # type: (nodes.Node) -> None
+ pass
+
+ def visit_download_reference(self, node):
+ # type: (nodes.Node) -> None
+ if self.builder.download_support and node.hasattr('filename'):
+ self.body.append(
+ '<a class="reference download internal" href="%s" download="">' %
+ posixpath.join(self.builder.dlpath, node['filename']))
+ self.context.append('</a>')
+ else:
+ self.context.append('')
+
+ def depart_download_reference(self, node):
+ # type: (nodes.Node) -> None
+ self.body.append(self.context.pop())
+
+ # overwritten
+ def visit_image(self, node):
+ # type: (nodes.Node) -> None
+ olduri = node['uri']
+ # rewrite the URI if the environment knows about it
+ if olduri in self.builder.images:
+ node['uri'] = posixpath.join(self.builder.imgpath,
+ self.builder.images[olduri])
+
+ uri = node['uri']
+ if uri.lower().endswith(('svg', 'svgz')):
+ atts = {'src': uri}
+ if 'width' in node:
+ atts['width'] = node['width']
+ if 'height' in node:
+ atts['height'] = node['height']
+ atts['alt'] = node.get('alt', uri)
+ if 'align' in node:
+ self.body.append('<div align="%s" class="align-%s">' %
+ (node['align'], node['align']))
+ self.context.append('</div>\n')
+ else:
+ self.context.append('')
+ self.body.append(self.emptytag(node, 'img', '', **atts))
+ return
+
+ if 'scale' in node:
+ # Try to figure out image height and width. Docutils does that too,
+ # but it tries the final file name, which does not necessarily exist
+ # yet at the time the HTML file is written.
+ if not ('width' in node and 'height' in node):
+ size = get_image_size(os.path.join(self.builder.srcdir, olduri))
+ if size is None:
+ logger.warning('Could not obtain image size. :scale: option is ignored.',
+ location=node)
+ else:
+ if 'width' not in node:
+ node['width'] = str(size[0])
+ if 'height' not in node:
+ node['height'] = str(size[1])
+ BaseTranslator.visit_image(self, node)
+
+ # overwritten
+ def depart_image(self, node):
+ # type: (nodes.Node) -> None
+ if node['uri'].lower().endswith(('svg', 'svgz')):
+ self.body.append(self.context.pop())
+ else:
+ BaseTranslator.depart_image(self, node)
+
+ def visit_toctree(self, node):
+ # type: (nodes.Node) -> None
+ # this only happens when formatting a toc from env.tocs -- in this
+ # case we don't want to include the subtree
+ raise nodes.SkipNode
+
+ def visit_index(self, node):
+ # type: (nodes.Node) -> None
+ raise nodes.SkipNode
+
+ def visit_tabular_col_spec(self, node):
+ # type: (nodes.Node) -> None
+ raise nodes.SkipNode
+
+ def visit_glossary(self, node):
+ # type: (nodes.Node) -> None
+ pass
+
+ def depart_glossary(self, node):
+ # type: (nodes.Node) -> None
+ pass
+
+ def visit_acks(self, node):
+ # type: (nodes.Node) -> None
+ pass
+
+ def depart_acks(self, node):
+ # type: (nodes.Node) -> None
+ pass
+
+ def visit_hlist(self, node):
+ # type: (nodes.Node) -> None
+ self.body.append('<table class="hlist"><tr>')
+
+ def depart_hlist(self, node):
+ # type: (nodes.Node) -> None
+ self.body.append('</tr></table>\n')
+
+ def visit_hlistcol(self, node):
+ # type: (nodes.Node) -> None
+ self.body.append('<td>')
+
+ def depart_hlistcol(self, node):
+ # type: (nodes.Node) -> None
+ self.body.append('</td>')
+
+ def bulk_text_processor(self, text):
+ # type: (unicode) -> unicode
+ return text
+
+ # overwritten
+ def visit_Text(self, node):
+ # type: (nodes.Node) -> None
+ text = node.astext()
+ encoded = self.encode(text)
+ if self.protect_literal_text:
+ # moved here from base class's visit_literal to support
+ # more formatting in literal nodes
+ for token in self.words_and_spaces.findall(encoded):
+ if token.strip():
+ # protect literal text from line wrapping
+ self.body.append('<span class="pre">%s</span>' % token)
+ elif token in ' \n':
+ # allow breaks at whitespace
+ self.body.append(token)
+ else:
+ # protect runs of multiple spaces; the last one can wrap
+ self.body.append('&#160;' * (len(token) - 1) + ' ')
+ else:
+ if self.in_mailto and self.settings.cloak_email_addresses:
+ encoded = self.cloak_email(encoded)
+ else:
+ encoded = self.bulk_text_processor(encoded)
+ self.body.append(encoded)
+
+ def visit_note(self, node):
+ # type: (nodes.Node) -> None
+ self.visit_admonition(node, 'note')
+
+ def depart_note(self, node):
+ # type: (nodes.Node) -> None
+ self.depart_admonition(node)
+
+ def visit_warning(self, node):
+ # type: (nodes.Node) -> None
+ self.visit_admonition(node, 'warning')
+
+ def depart_warning(self, node):
+ # type: (nodes.Node) -> None
+ self.depart_admonition(node)
+
+ def visit_attention(self, node):
+ # type: (nodes.Node) -> None
+ self.visit_admonition(node, 'attention')
+
+ def depart_attention(self, node):
+ # type: (nodes.Node) -> None
+ self.depart_admonition(node)
+
+ def visit_caution(self, node):
+ # type: (nodes.Node) -> None
+ self.visit_admonition(node, 'caution')
+
+ def depart_caution(self, node):
+ # type: (nodes.Node) -> None
+ self.depart_admonition(node)
+
+ def visit_danger(self, node):
+ # type: (nodes.Node) -> None
+ self.visit_admonition(node, 'danger')
+
+ def depart_danger(self, node):
+ # type: (nodes.Node) -> None
+ self.depart_admonition(node)
+
+ def visit_error(self, node):
+ # type: (nodes.Node) -> None
+ self.visit_admonition(node, 'error')
+
+ def depart_error(self, node):
+ # type: (nodes.Node) -> None
+ self.depart_admonition(node)
+
+ def visit_hint(self, node):
+ # type: (nodes.Node) -> None
+ self.visit_admonition(node, 'hint')
+
+ def depart_hint(self, node):
+ # type: (nodes.Node) -> None
+ self.depart_admonition(node)
+
+ def visit_important(self, node):
+ # type: (nodes.Node) -> None
+ self.visit_admonition(node, 'important')
+
+ def depart_important(self, node):
+ # type: (nodes.Node) -> None
+ self.depart_admonition(node)
+
+ def visit_tip(self, node):
+ # type: (nodes.Node) -> None
+ self.visit_admonition(node, 'tip')
+
+ def depart_tip(self, node):
+ # type: (nodes.Node) -> None
+ self.depart_admonition(node)
+
+ # these are only handled specially in the SmartyPantsHTML5Translator
+ def visit_literal_emphasis(self, node):
+ # type: (nodes.Node) -> None
+ return self.visit_emphasis(node)
+
+ def depart_literal_emphasis(self, node):
+ # type: (nodes.Node) -> None
+ return self.depart_emphasis(node)
+
+ def visit_literal_strong(self, node):
+ # type: (nodes.Node) -> None
+ return self.visit_strong(node)
+
+ def depart_literal_strong(self, node):
+ # type: (nodes.Node) -> None
+ return self.depart_strong(node)
+
+ def visit_abbreviation(self, node):
+ # type: (nodes.Node) -> None
+ attrs = {}
+ if node.hasattr('explanation'):
+ attrs['title'] = node['explanation']
+ self.body.append(self.starttag(node, 'abbr', '', **attrs))
+
+ def depart_abbreviation(self, node):
+ # type: (nodes.Node) -> None
+ self.body.append('</abbr>')
+
+ def visit_manpage(self, node):
+ # type: (nodes.Node) -> None
+ self.visit_literal_emphasis(node)
+
+ def depart_manpage(self, node):
+ # type: (nodes.Node) -> None
+ self.depart_literal_emphasis(node)
+
+ # overwritten to add even/odd classes
+
+ def generate_targets_for_table(self, node):
+ # type: (nodes.Node) -> None
+ """Generate hyperlink targets for tables.
+
+ Original visit_table() generates hyperlink targets inside table tags
+ (<table>) if multiple IDs are assigned to listings.
+ That is invalid DOM structure. (This is a bug of docutils <= 0.13.1)
+
+ This exports hyperlink targets before tables to make valid DOM structure.
+ """
+ for id in node['ids'][1:]:
+ self.body.append('<span id="%s"></span>' % id)
+ node['ids'].remove(id)
+
+ def visit_table(self, node):
+ # type: (nodes.Node) -> None
+ self.generate_targets_for_table(node)
+
+ self._table_row_index = 0
+
+ classes = [cls.strip(u' \t\n')
+ for cls in self.settings.table_style.split(',')]
+ classes.insert(0, "docutils") # compat
+ if 'align' in node:
+ classes.append('align-%s' % node['align'])
+ tag = self.starttag(node, 'table', CLASS=' '.join(classes))
+ self.body.append(tag)
+
+ def visit_row(self, node):
+ # type: (nodes.Node) -> None
+ self._table_row_index += 1
+ if self._table_row_index % 2 == 0:
+ node['classes'].append('row-even')
+ else:
+ node['classes'].append('row-odd')
+ self.body.append(self.starttag(node, 'tr', ''))
+ node.column = 0
+
+ def visit_field_list(self, node):
+ # type: (nodes.Node) -> None
+ self._fieldlist_row_index = 0
+ return BaseTranslator.visit_field_list(self, node)
+
+ def visit_field(self, node):
+ # type: (nodes.Node) -> None
+ self._fieldlist_row_index += 1
+ if self._fieldlist_row_index % 2 == 0:
+ node['classes'].append('field-even')
+ else:
+ node['classes'].append('field-odd')
+ return node
+
+ def visit_math(self, node, math_env=''):
+ # type: (nodes.Node, unicode) -> None
+ logger.warning('using "math" markup without a Sphinx math extension '
+ 'active, please use one of the math extensions '
+ 'described at http://sphinx-doc.org/ext/math.html',
+ location=(self.builder.current_docname, node.line))
+ raise nodes.SkipNode
+
+ def unknown_visit(self, node):
+ # type: (nodes.Node) -> None
+ raise NotImplementedError('Unknown node: ' + node.__class__.__name__)
+
+
+class SmartyPantsHTML5Translator(HTML5Translator):
+ """
+ Handle ordinary text via smartypants, converting quotes and dashes
+ to the correct entities.
+ """
+
+ def __init__(self, *args, **kwds):
+ # type: (Any, Any) -> None
+ self.no_smarty = 0
+ HTML5Translator.__init__(self, *args, **kwds)
+
+ def visit_literal(self, node):
+ # type: (nodes.Node) -> None
+ self.no_smarty += 1
+ try:
+ # this raises SkipNode
+ HTML5Translator.visit_literal(self, node)
+ finally:
+ self.no_smarty -= 1
+
+ def visit_literal_block(self, node):
+ # type: (nodes.Node) -> None
+ self.no_smarty += 1
+ try:
+ HTML5Translator.visit_literal_block(self, node)
+ except nodes.SkipNode:
+ # HTML5Translator raises SkipNode for simple literal blocks,
+ # but not for parsed literal blocks
+ self.no_smarty -= 1
+ raise
+
+ def depart_literal_block(self, node):
+ # type: (nodes.Node) -> None
+ HTML5Translator.depart_literal_block(self, node)
+ self.no_smarty -= 1
+
+ def visit_literal_emphasis(self, node):
+ # type: (nodes.Node) -> None
+ self.no_smarty += 1
+ self.visit_emphasis(node)
+
+ def depart_literal_emphasis(self, node):
+ # type: (nodes.Node) -> None
+ self.depart_emphasis(node)
+ self.no_smarty -= 1
+
+ def visit_literal_strong(self, node):
+ # type: (nodes.Node) -> None
+ self.no_smarty += 1
+ self.visit_strong(node)
+
+ def depart_literal_strong(self, node):
+ # type: (nodes.Node) -> None
+ self.depart_strong(node)
+ self.no_smarty -= 1
+
+ def visit_desc_signature(self, node):
+ # type: (nodes.Node) -> None
+ self.no_smarty += 1
+ HTML5Translator.visit_desc_signature(self, node)
+
+ def depart_desc_signature(self, node):
+ # type: (nodes.Node) -> None
+ self.no_smarty -= 1
+ HTML5Translator.depart_desc_signature(self, node)
+
+ def visit_productionlist(self, node):
+ # type: (nodes.Node) -> None
+ self.no_smarty += 1
+ try:
+ HTML5Translator.visit_productionlist(self, node)
+ finally:
+ self.no_smarty -= 1
+
+ def visit_option(self, node):
+ # type: (nodes.Node) -> None
+ self.no_smarty += 1
+ HTML5Translator.visit_option(self, node)
+
+ def depart_option(self, node):
+ # type: (nodes.Node) -> None
+ self.no_smarty -= 1
+ HTML5Translator.depart_option(self, node)
+
+ def bulk_text_processor(self, text):
+ # type: (unicode) -> unicode
+ if self.no_smarty <= 0:
+ return sphinx_smarty_pants(text)
+ return text
diff --git a/sphinx/writers/latex.py b/sphinx/writers/latex.py
index be0771c09..6466194b0 100644
--- a/sphinx/writers/latex.py
+++ b/sphinx/writers/latex.py
@@ -15,7 +15,7 @@
import re
import sys
from os import path
-import warnings
+from collections import defaultdict
from six import itervalues, text_type
from docutils import nodes, writers
@@ -24,15 +24,20 @@ from docutils.writers.latex2e import Babel
from sphinx import addnodes
from sphinx import highlighting
from sphinx.errors import SphinxError
-from sphinx.deprecation import RemovedInSphinx16Warning
from sphinx.locale import admonitionlabels, _
-from sphinx.util import split_into
+from sphinx.util import split_into, logging
from sphinx.util.i18n import format_date
from sphinx.util.nodes import clean_astext, traverse_parent
from sphinx.util.template import LaTeXRenderer
from sphinx.util.texescape import tex_escape_map, tex_replace_map
from sphinx.util.smartypants import educate_quotes_latex
+if False:
+ # For type annotation
+ from typing import Any, Callable, Dict, Iterator, List, Pattern, Tuple, Set, Union # NOQA
+ from sphinx.builder import Builder # NOQA
+
+logger = logging.getLogger(__name__)
BEGIN_DOC = r'''
\begin{document}
@@ -42,7 +47,6 @@ BEGIN_DOC = r'''
'''
-DEFAULT_TEMPLATE = 'latex/content.tex_t'
URI_SCHEMES = ('mailto:', 'http:', 'https:', 'ftp:')
SECNUMDEPTH = 3
@@ -54,7 +58,7 @@ DEFAULT_SETTINGS = {
'classoptions': '',
'extraclassoptions': '',
'maxlistdepth': '',
- 'sphinxpkgoptions': '',
+ 'sphinxpkgoptions': 'dontkeepoldnames',
'sphinxsetup': '',
'passoptionstopackages': '',
'geometry': '\\usepackage{geometry}',
@@ -68,7 +72,6 @@ DEFAULT_SETTINGS = {
'polyglossia': '',
'fontpkg': '\\usepackage{times}',
'fncychap': '\\usepackage[Bjarne]{fncychap}',
- 'longtable': '\\usepackage{longtable}',
'hyperref': ('% Include hyperref last.\n'
'\\usepackage{hyperref}\n'
'% Fix anchor placement for figures with captions.\n'
@@ -83,7 +86,7 @@ DEFAULT_SETTINGS = {
'date': '',
'release': '',
'author': '',
- 'logo': '',
+ 'logo': '\\vbox{}',
'releasename': '',
'makeindex': '\\makeindex',
'shorthandoff': '',
@@ -96,7 +99,7 @@ DEFAULT_SETTINGS = {
'tocdepth': '',
'secnumdepth': '',
'pageautorefname': '',
-}
+} # type: Dict[unicode, unicode]
ADDITIONAL_SETTINGS = {
'pdflatex': {
@@ -117,6 +120,10 @@ ADDITIONAL_SETTINGS = {
},
'lualatex': {
'latex_engine': 'lualatex',
+ 'polyglossia': '\\usepackage{polyglossia}',
+ 'babel': '',
+ 'fontenc': '\\usepackage{fontspec}',
+ 'fontpkg': '',
'utf8extra': ('\\catcode`^^^^00a0\\active\\protected\\def^^^^00a0'
'{\\leavevmode\\nobreak\\ }'),
},
@@ -124,7 +131,7 @@ ADDITIONAL_SETTINGS = {
'latex_engine': 'platex',
'geometry': '\\usepackage[dvipdfm]{geometry}',
},
-}
+} # type: Dict[unicode, Dict[unicode, unicode]]
class collected_footnote(nodes.footnote):
@@ -144,17 +151,19 @@ class LaTeXWriter(writers.Writer):
('Document class', ['--docclass'], {'default': 'manual'}),
('Author', ['--author'], {'default': ''}),
))
- settings_defaults = {}
+ settings_defaults = {} # type: Dict
output = None
def __init__(self, builder):
+ # type: (Builder) -> None
writers.Writer.__init__(self)
self.builder = builder
self.translator_class = (
self.builder.translator_class or LaTeXTranslator)
def translate(self):
+ # type: () -> None
transform = ShowUrlsTransform(self.document)
transform.apply()
visitor = self.translator_class(self.document, self.builder)
@@ -166,10 +175,12 @@ class LaTeXWriter(writers.Writer):
class ExtBabel(Babel):
def __init__(self, language_code):
+ # type: (unicode) -> None
super(ExtBabel, self).__init__(language_code or '')
self.language_code = language_code
def get_shorthandoff(self):
+ # type: () -> unicode
shortlang = self.language.split('_')[0]
if shortlang in ('de', 'ngerman', 'sl', 'slovene', 'pt', 'portuges',
'es', 'spanish', 'nl', 'dutch', 'pl', 'polish', 'it',
@@ -181,15 +192,18 @@ class ExtBabel(Babel):
return ''
def uses_cyrillic(self):
+ # type: () -> bool
shortlang = self.language.split('_')[0]
return shortlang in ('bg', 'bulgarian', 'kk', 'kazakh',
'mn', 'mongolian', 'ru', 'russian',
'uk', 'ukrainian')
def is_supported_language(self):
+ # type: () -> bool
return bool(super(ExtBabel, self).get_language())
def get_language(self):
+ # type: () -> unicode
language = super(ExtBabel, self).get_language()
if not language:
return 'english' # fallback to english
@@ -201,9 +215,11 @@ class ShowUrlsTransform(object):
expanded = False
def __init__(self, document):
+ # type: (nodes.Node) -> None
self.document = document
def apply(self):
+ # type: () -> None
# replace id_prefix temporarily
id_prefix = self.document.settings.id_prefix
self.document.settings.id_prefix = 'show_urls'
@@ -216,6 +232,7 @@ class ShowUrlsTransform(object):
self.document.settings.id_prefix = id_prefix
def expand_show_urls(self):
+ # type: () -> None
show_urls = self.document.settings.env.config.latex_show_urls
if show_urls is False or show_urls == 'no':
return
@@ -238,6 +255,7 @@ class ShowUrlsTransform(object):
node.parent.insert(index + 1, textnode)
def create_footnote(self, uri):
+ # type: (unicode) -> List[Union[nodes.footnote, nodes.footnote_ref]]
label = nodes.label('', '#')
para = nodes.paragraph()
para.append(nodes.reference('', nodes.Text(uri), refuri=uri, nolinkurl=True))
@@ -254,7 +272,9 @@ class ShowUrlsTransform(object):
return [footnote, footnote_ref]
def renumber_footnotes(self):
+ # type: () -> None
def is_used_number(number):
+ # type: (unicode) -> bool
for node in self.document.traverse(nodes.footnote):
if not node.get('auto') and number in node['names']:
return True
@@ -262,13 +282,16 @@ class ShowUrlsTransform(object):
return False
def is_auto_footnote(node):
+ # type: (nodes.Node) -> bool
return isinstance(node, nodes.footnote) and node.get('auto')
def footnote_ref_by(node):
+ # type: (nodes.Node) -> Callable[[nodes.Node], bool]
ids = node['ids']
parent = list(traverse_parent(node, (nodes.document, addnodes.start_of_file)))[0]
def is_footnote_ref(node):
+ # type: (nodes.Node) -> bool
return (isinstance(node, nodes.footnote_reference) and
ids[0] == node['refid'] and
parent in list(traverse_parent(node)))
@@ -296,26 +319,161 @@ class ShowUrlsTransform(object):
class Table(object):
- def __init__(self):
- self.col = 0
+ """A table data"""
+
+ def __init__(self, node):
+ # type: (nodes.table) -> None
+ self.header = [] # type: List[unicode]
+ self.body = [] # type: List[unicode]
+ self.align = node.get('align')
self.colcount = 0
- self.colspec = None
- self.rowcount = 0
- self.had_head = False
+ self.colspec = None # type: unicode
+ self.colwidths = [] # type: List[int]
self.has_problematic = False
+ self.has_oldproblematic = False
self.has_verbatim = False
- self.caption = None
- self.longtable = False
+ self.caption = None # type: List[unicode]
+ self.caption_footnotetexts = [] # type: List[unicode]
+ self.header_footnotetexts = [] # type: List[unicode]
+
+ # current position
+ self.col = 0
+ self.row = 0
+
+ # for internal use
+ self.classes = node.get('classes', []) # type: List[unicode]
+ self.cells = defaultdict(int) # type: Dict[Tuple[int, int], int]
+ # it maps table location to cell_id
+ # (cell = rectangular area)
+ self.cell_id = 0 # last assigned cell_id
+
+ def is_longtable(self):
+ # type: () -> bool
+ """True if and only if table uses longtable environment."""
+ return self.row > 30 or 'longtable' in self.classes
+
+ def get_table_type(self):
+ # type: () -> unicode
+ """Returns the LaTeX environment name for the table.
+
+ The class currently supports:
+
+ * longtable
+ * tabular
+ * tabulary
+ """
+ if self.is_longtable():
+ return 'longtable'
+ elif self.has_verbatim:
+ return 'tabular'
+ elif self.colspec:
+ return 'tabulary'
+ elif self.has_problematic or (self.colwidths and 'colwidths-given' in self.classes):
+ return 'tabular'
+ else:
+ return 'tabulary'
+
+ def get_colspec(self):
+ # type: () -> unicode
+ """Returns a column spec of table.
+
+ This is what LaTeX calls the 'preamble argument' of the used table environment.
+
+ .. note:: the ``\\X`` and ``T`` column type specifiers are defined in ``sphinx.sty``.
+ """
+ if self.colspec:
+ return self.colspec
+ elif self.colwidths and 'colwidths-given' in self.classes:
+ total = sum(self.colwidths)
+ colspecs = ['\\X{%d}{%d}' % (width, total) for width in self.colwidths]
+ return '{|%s|}\n' % '|'.join(colspecs)
+ elif self.has_problematic:
+ return '{|*{%d}{\\X{1}{%d}|}}\n' % (self.colcount, self.colcount)
+ elif self.get_table_type() == 'tabulary':
+ # sphinx.sty sets T to be J by default.
+ return '{|' + ('T|' * self.colcount) + '}\n'
+ elif self.has_oldproblematic:
+ return '{|*{%d}{\\X{1}{%d}|}}\n' % (self.colcount, self.colcount)
+ else:
+ return '{|' + ('l|' * self.colcount) + '}\n'
+
+ def add_cell(self, height, width):
+ # type: (int, int) -> None
+ """Adds a new cell to a table.
+
+ It will be located at current position: (``self.row``, ``self.col``).
+ """
+ self.cell_id += 1
+ for col in range(width):
+ for row in range(height):
+ assert self.cells[(self.row + row, self.col + col)] == 0
+ self.cells[(self.row + row, self.col + col)] = self.cell_id
+
+ def cell(self, row=None, col=None):
+ # type: (int, int) -> TableCell
+ """Returns a cell object (i.e. rectangular area) containing given position.
+
+ If no option arguments: ``row`` or ``col`` are given, the current position;
+ ``self.row`` and ``self.col`` are used to get a cell object by default.
+ """
+ try:
+ if row is None:
+ row = self.row
+ if col is None:
+ col = self.col
+ return TableCell(self, row, col)
+ except IndexError:
+ return None
+
+
+class TableCell(object):
+ """A cell data of tables."""
+
+ def __init__(self, table, row, col):
+ # type: (Table, int, int) -> None
+ if table.cells[(row, col)] == 0:
+ raise IndexError
+
+ self.table = table
+ self.cell_id = table.cells[(row, col)]
+ self.row = row
+ self.col = col
+
+ # adjust position for multirow/multicol cell
+ while table.cells[(self.row - 1, self.col)] == self.cell_id:
+ self.row -= 1
+ while table.cells[(self.row, self.col - 1)] == self.cell_id:
+ self.col -= 1
+
+ @property
+ def width(self):
+ # type: () -> int
+ """Returns the cell width."""
+ width = 0
+ while self.table.cells[(self.row, self.col + width)] == self.cell_id:
+ width += 1
+ return width
+
+ @property
+ def height(self):
+ # type: () -> int
+ """Returns the cell height."""
+ height = 0
+ while self.table.cells[(self.row + height, self.col)] == self.cell_id:
+ height += 1
+ return height
def escape_abbr(text):
+ # type: (unicode) -> unicode
"""Adjust spacing after abbreviations."""
- return re.sub('\.(?=\s|$)', '.\\@', text)
+ return re.sub(r'\.(?=\s|$)', r'.\@', text)
def rstdim_to_latexdim(width_str):
+ # type: (unicode) -> unicode
"""Convert `width_str` with rst length to LaTeX length."""
- match = re.match('^(\d*\.?\d*)\s*(\S*)$', width_str)
+ match = re.match(r'^(\d*\.?\d*)\s*(\S*)$', width_str)
if not match:
raise ValueError
res = width_str
@@ -340,9 +498,10 @@ class LaTeXTranslator(nodes.NodeVisitor):
docclasses = ('howto', 'manual')
def __init__(self, document, builder):
+ # type: (nodes.Node, Builder) -> None
nodes.NodeVisitor.__init__(self, document)
self.builder = builder
- self.body = []
+ self.body = [] # type: List[unicode]
# flags
self.in_title = 0
@@ -351,7 +510,7 @@ class LaTeXTranslator(nodes.NodeVisitor):
self.in_caption = 0
self.in_container_literal_block = 0
self.in_term = 0
- self.in_merged_cell = 0
+ self.needs_linetrimming = 0
self.in_minipage = 0
self.first_document = 1
self.this_is_the_title = 1
@@ -360,8 +519,6 @@ class LaTeXTranslator(nodes.NodeVisitor):
self.in_parsed_literal = 0
self.compact_list = 0
self.first_param = 0
- self.remember_multirow = {}
- self.remember_multirowcol = {}
# determine top section level
if builder.config.latex_toplevel_sectioning:
@@ -393,8 +550,8 @@ class LaTeXTranslator(nodes.NodeVisitor):
self.elements.update({
'releasename': _('Release'),
})
- if not builder.config.latex_keep_old_macro_names:
- self.elements['sphinxpkgoptions'] = 'dontkeepoldnames'
+ if builder.config.latex_keep_old_macro_names:
+ self.elements['sphinxpkgoptions'] = ''
if document.settings.docclass == 'howto':
docclass = builder.config.latex_docclass.get('howto', 'article')
else:
@@ -419,8 +576,8 @@ class LaTeXTranslator(nodes.NodeVisitor):
if builder.config.language and not self.babel.is_supported_language():
# emit warning if specified language is invalid
# (only emitting, nothing changed to processing)
- self.builder.warn('no Babel option known for language %r' %
- builder.config.language)
+ logger.warning('no Babel option known for language %r',
+ builder.config.language)
# simply use babel.get_language() always, as get_language() returns
# 'english' even if language is invalid or empty
@@ -429,7 +586,7 @@ class LaTeXTranslator(nodes.NodeVisitor):
# set up multilingual module...
# 'babel' key is public and user setting must be obeyed
if self.elements['babel']:
- # this branch is not taken for xelatex with writer default settings
+ # this branch is not taken for xelatex/lualatex if default settings
self.elements['multilingual'] = self.elements['babel']
if builder.config.language:
self.elements['shorthandoff'] = self.babel.get_shorthandoff()
@@ -454,6 +611,7 @@ class LaTeXTranslator(nodes.NodeVisitor):
if getattr(builder, 'usepackages', None):
def declare_package(packagename, options=None):
+ # type:(unicode, unicode) -> unicode
if options:
return '\\usepackage[%s]{%s}' % (options, packagename)
else:
@@ -470,7 +628,7 @@ class LaTeXTranslator(nodes.NodeVisitor):
tocdepth = document['tocdepth'] + self.top_sectionlevel - 2
maxdepth = len(self.sectionnames) - self.top_sectionlevel
if tocdepth > maxdepth:
- self.builder.warn('too large :maxdepth:, ignored.')
+ logger.warning('too large :maxdepth:, ignored.')
tocdepth = maxdepth
self.elements['tocdepth'] = '\\setcounter{tocdepth}{%d}' % tocdepth
@@ -501,54 +659,61 @@ class LaTeXTranslator(nodes.NodeVisitor):
self.highlighter = highlighting.PygmentsBridge(
'latex',
builder.config.pygments_style, builder.config.trim_doctest_flags)
- self.context = []
- self.descstack = []
- self.bibitems = []
- self.table = None
- self.next_table_colspec = None
+ self.context = [] # type: List[Any]
+ self.descstack = [] # type: List[unicode]
+ self.bibitems = [] # type: List[List[unicode]]
+ self.table = None # type: Table
+ self.next_table_colspec = None # type: unicode
# stack of [language, linenothreshold] settings per file
# the first item here is the default and must not be changed
# the second item is the default for the master file and can be changed
# by .. highlight:: directive in the master file
self.hlsettingstack = 2 * [[builder.config.highlight_language,
sys.maxsize]]
- self.bodystack = []
- self.footnotestack = []
+ self.bodystack = [] # type: List[List[unicode]]
+ self.footnotestack = [] # type: List[Dict[unicode, List[Union[collected_footnote, bool]]]] # NOQA
self.footnote_restricted = False
- self.pending_footnotes = []
- self.curfilestack = []
- self.handled_abbrs = set()
- self.next_hyperlink_ids = {}
- self.next_section_ids = set()
+ self.pending_footnotes = [] # type: List[nodes.footnote_reference]
+ self.curfilestack = [] # type: List[unicode]
+ self.handled_abbrs = set() # type: Set[unicode]
+ self.next_hyperlink_ids = {} # type: Dict[unicode, Set[unicode]]
+ self.next_section_ids = set() # type: Set[unicode]
def pushbody(self, newbody):
+ # type: (List[unicode]) -> None
self.bodystack.append(self.body)
self.body = newbody
def popbody(self):
+ # type: () -> List[unicode]
body = self.body
self.body = self.bodystack.pop()
return body
def push_hyperlink_ids(self, figtype, ids):
+ # type: (unicode, Set[unicode]) -> None
hyperlink_ids = self.next_hyperlink_ids.setdefault(figtype, set())
hyperlink_ids.update(ids)
def pop_hyperlink_ids(self, figtype):
+ # type: (unicode) -> Set[unicode]
return self.next_hyperlink_ids.pop(figtype, set())
def check_latex_elements(self):
+ # type: () -> None
for key in self.builder.config.latex_elements:
if key not in self.elements:
msg = _("Unknown configure key: latex_elements[%r] is ignored.")
- self.builder.warn(msg % key)
+ logger.warning(msg % key)
def restrict_footnote(self, node):
+ # type: (nodes.Node) -> None
if self.footnote_restricted is False:
self.footnote_restricted = node
self.pending_footnotes = []
def unrestrict_footnote(self, node):
+ # type: (nodes.Node) -> None
if self.footnote_restricted == node:
self.footnote_restricted = False
for footnode in self.pending_footnotes:
@@ -557,6 +722,7 @@ class LaTeXTranslator(nodes.NodeVisitor):
self.pending_footnotes = []
def format_docclass(self, docclass):
+ # type: (unicode) -> unicode
""" prepends prefix to sphinx document classes
"""
if docclass in self.docclasses:
@@ -564,35 +730,36 @@ class LaTeXTranslator(nodes.NodeVisitor):
return docclass
def astext(self):
+ # type: () -> unicode
self.elements.update({
'body': u''.join(self.body),
'indices': self.generate_indices()
})
-
- template_path = path.join(self.builder.srcdir, '_templates', 'latex.tex_t')
- if path.exists(template_path):
- return LaTeXRenderer().render(template_path, self.elements)
- else:
- return LaTeXRenderer().render(DEFAULT_TEMPLATE, self.elements)
+ return self.render('latex.tex_t', self.elements)
def hypertarget(self, id, withdoc=True, anchor=True):
+ # type: (unicode, bool, bool) -> unicode
if withdoc:
id = self.curfilestack[-1] + ':' + id
return (anchor and '\\phantomsection' or '') + \
'\\label{%s}' % self.idescape(id)
def hyperlink(self, id):
+ # type: (unicode) -> unicode
return '{\\hyperref[%s]{' % self.idescape(id)
def hyperpageref(self, id):
+ # type: (unicode) -> unicode
return '\\autopageref*{%s}' % self.idescape(id)
def idescape(self, id):
+ # type: (unicode) -> unicode
return '\\detokenize{%s}' % text_type(id).translate(tex_replace_map).\
encode('ascii', 'backslashreplace').decode('ascii').\
replace('\\', '_')
def babel_renewcommand(self, command, definition):
+ # type: (unicode, unicode) -> unicode
if self.elements['multilingual']:
prefix = '\\addto\\captions%s{' % self.babel.get_language()
suffix = '}'
@@ -603,6 +770,7 @@ class LaTeXTranslator(nodes.NodeVisitor):
return ('%s\\renewcommand{%s}{%s}%s\n' % (prefix, command, definition, suffix))
def babel_defmacro(self, name, definition):
+ # type: (unicode, unicode) -> unicode
if self.elements['babel']:
prefix = '\\addto\\extras%s{' % self.babel.get_language()
suffix = '}'
@@ -613,7 +781,8 @@ class LaTeXTranslator(nodes.NodeVisitor):
return ('%s\\def%s{%s}%s\n' % (prefix, name, definition, suffix))
def generate_numfig_format(self, builder):
- ret = []
+ # type: (Builder) -> unicode
+ ret = [] # type: List[unicode]
figure = self.builder.config.numfig_format['figure'].split('%s', 1)
if len(figure) == 1:
ret.append('\\def\\fnum@figure{%s}\n' %
@@ -652,7 +821,9 @@ class LaTeXTranslator(nodes.NodeVisitor):
return ''.join(ret)
def generate_indices(self):
+ # type: (Builder) -> unicode
def generate(content, collapsed):
+ # type: (List[Tuple[unicode, List[Tuple[unicode, unicode, unicode, unicode, unicode]]]], bool) -> None # NOQA
ret.append('\\begin{sphinxtheindex}\n')
ret.append('\\def\\bigletter#1{{\\Large\\sffamily#1}'
'\\nopagebreak\\vspace{1mm}}\n')
@@ -682,10 +853,6 @@ class LaTeXTranslator(nodes.NodeVisitor):
if isinstance(indices_config, list):
if indexname not in indices_config:
continue
- # deprecated config value
- if indexname == 'py-modindex' and \
- not self.builder.config.latex_use_modindex:
- continue
content, collapsed = indexcls(domain).generate(
self.builder.docnames)
if not content:
@@ -696,7 +863,16 @@ class LaTeXTranslator(nodes.NodeVisitor):
return ''.join(ret)
+ def render(self, template_name, variables):
+ # type: (unicode, Dict) -> unicode
+ template_path = path.join(self.builder.srcdir, '_templates', template_name)
+ if path.exists(template_path):
+ return LaTeXRenderer().render(template_path, variables)
+ else:
+ return LaTeXRenderer().render(template_name, variables)
+
def visit_document(self, node):
+ # type: (nodes.Node) -> None
self.footnotestack.append(self.collect_footnotes(node))
self.curfilestack.append(node.get('docname', ''))
if self.first_document == 1:
@@ -713,8 +889,9 @@ class LaTeXTranslator(nodes.NodeVisitor):
self.sectionlevel = self.top_sectionlevel - 1
def depart_document(self, node):
+ # type: (nodes.Node) -> None
if self.bibitems:
- widest_label = ""
+ widest_label = "" # type: unicode
for bi in self.bibitems:
if len(widest_label) < len(bi[0]):
widest_label = bi[0]
@@ -729,6 +906,7 @@ class LaTeXTranslator(nodes.NodeVisitor):
self.bibitems = []
def visit_start_of_file(self, node):
+ # type: (nodes.Node) -> None
# collect new footnotes
self.footnotestack.append(self.collect_footnotes(node))
# also add a document target
@@ -738,7 +916,9 @@ class LaTeXTranslator(nodes.NodeVisitor):
self.hlsettingstack.append(self.hlsettingstack[0])
def collect_footnotes(self, node):
+ # type: (nodes.Node) -> Dict[unicode, List[Union[collected_footnote, bool]]]
def footnotes_under(n):
+ # type: (nodes.Node) -> Iterator[nodes.Node]
if isinstance(n, nodes.footnote):
yield n
else:
@@ -747,7 +927,8 @@ class LaTeXTranslator(nodes.NodeVisitor):
continue
for k in footnotes_under(c):
yield k
- fnotes = {}
+
+ fnotes = {} # type: Dict[unicode, List[Union[collected_footnote, bool]]]
for fn in footnotes_under(node):
num = fn.children[0].astext().strip()
newnode = collected_footnote(*fn.children, number=num)
@@ -755,15 +936,18 @@ class LaTeXTranslator(nodes.NodeVisitor):
return fnotes
def depart_start_of_file(self, node):
+ # type: (nodes.Node) -> None
self.footnotestack.pop()
self.curfilestack.pop()
self.hlsettingstack.pop()
def visit_highlightlang(self, node):
+ # type: (nodes.Node) -> None
self.hlsettingstack[-1] = [node['lang'], node['linenothreshold']]
raise nodes.SkipNode
def visit_section(self, node):
+ # type: (nodes.Node) -> None
if not self.this_is_the_title:
self.sectionlevel += 1
self.body.append('\n\n')
@@ -771,40 +955,50 @@ class LaTeXTranslator(nodes.NodeVisitor):
self.next_section_ids.update(node['ids'])
def depart_section(self, node):
+ # type: (nodes.Node) -> None
self.sectionlevel = max(self.sectionlevel - 1,
self.top_sectionlevel - 1)
def visit_problematic(self, node):
+ # type: (nodes.Node) -> None
self.body.append(r'{\color{red}\bfseries{}')
def depart_problematic(self, node):
+ # type: (nodes.Node) -> None
self.body.append('}')
def visit_topic(self, node):
+ # type: (nodes.Node) -> None
self.in_minipage = 1
self.body.append('\n\\begin{sphinxShadowBox}\n')
def depart_topic(self, node):
+ # type: (nodes.Node) -> None
self.in_minipage = 0
self.body.append('\\end{sphinxShadowBox}\n')
visit_sidebar = visit_topic
depart_sidebar = depart_topic
def visit_glossary(self, node):
+ # type: (nodes.Node) -> None
pass
def depart_glossary(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_productionlist(self, node):
+ # type: (nodes.Node) -> None
self.body.append('\n\n\\begin{productionlist}\n')
self.in_production_list = 1
def depart_productionlist(self, node):
+ # type: (nodes.Node) -> None
self.body.append('\\end{productionlist}\n\n')
self.in_production_list = 0
def visit_production(self, node):
+ # type: (nodes.Node) -> None
if node['tokenname']:
tn = node['tokenname']
self.body.append(self.hypertarget('grammar-token-' + tn))
@@ -813,15 +1007,19 @@ class LaTeXTranslator(nodes.NodeVisitor):
self.body.append('\\productioncont{')
def depart_production(self, node):
+ # type: (nodes.Node) -> None
self.body.append('}\n')
def visit_transition(self, node):
+ # type: (nodes.Node) -> None
self.body.append(self.elements['transition'])
def depart_transition(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_title(self, node):
+ # type: (nodes.Node) -> None
parent = node.parent
if isinstance(parent, addnodes.seealso):
# the environment already handles this
@@ -830,8 +1028,8 @@ class LaTeXTranslator(nodes.NodeVisitor):
if self.this_is_the_title:
if len(node.children) != 1 and not isinstance(node.children[0],
nodes.Text):
- self.builder.warn('document title is not a single Text node',
- (self.curfilestack[-1], node.line))
+ logger.warning('document title is not a single Text node',
+ location=(self.curfilestack[-1], node.line))
if not self.elements['title']:
# text needs to be escaped since it is inserted into
# the output literally
@@ -868,24 +1066,31 @@ class LaTeXTranslator(nodes.NodeVisitor):
elif isinstance(parent, nodes.table):
# Redirect body output until title is finished.
self.pushbody([])
+ self.restrict_footnote(node)
else:
- self.builder.warn(
- 'encountered title node not in section, topic, table, '
- 'admonition or sidebar',
- (self.curfilestack[-1], node.line or ''))
+ logger.warning('encountered title node not in section, topic, table, '
+ 'admonition or sidebar',
+ location=(self.curfilestack[-1], node.line or ''))
self.body.append('\\sphinxstyleothertitle{')
self.context.append('}\n')
self.in_title = 1
def depart_title(self, node):
+ # type: (nodes.Node) -> None
self.in_title = 0
if isinstance(node.parent, nodes.table):
self.table.caption = self.popbody()
+ # temporary buffer for footnotes from caption
+ self.pushbody([])
+ self.unrestrict_footnote(node)
+ # the footnote texts from caption
+ self.table.caption_footnotetexts = self.popbody()
else:
self.body.append(self.context.pop())
- self.unrestrict_footnote(node)
+ self.unrestrict_footnote(node)
def visit_subtitle(self, node):
+ # type: (nodes.Node) -> None
if isinstance(node.parent, nodes.sidebar):
self.body.append('\\sphinxstylesidebarsubtitle{')
self.context.append('}\n')
@@ -893,17 +1098,21 @@ class LaTeXTranslator(nodes.NodeVisitor):
self.context.append('')
def depart_subtitle(self, node):
+ # type: (nodes.Node) -> None
self.body.append(self.context.pop())
def visit_desc(self, node):
+ # type: (nodes.Node) -> None
self.body.append('\n\n\\begin{fulllineitems}\n')
if self.table:
self.table.has_problematic = True
def depart_desc(self, node):
+ # type: (nodes.Node) -> None
self.body.append('\n\\end{fulllineitems}\n\n')
def _visit_signature_line(self, node):
+ # type: (nodes.Node) -> None
for child in node:
if isinstance(child, addnodes.desc_parameterlist):
self.body.append(r'\pysiglinewithargsret{')
@@ -912,9 +1121,11 @@ class LaTeXTranslator(nodes.NodeVisitor):
self.body.append(r'\pysigline{')
def _depart_signature_line(self, node):
+ # type: (nodes.Node) -> None
self.body.append('}')
def visit_desc_signature(self, node):
+ # type: (nodes.Node) -> None
if node.parent['objtype'] != 'describe' and node['ids']:
hyper = self.hypertarget(node['ids'][0])
else:
@@ -926,57 +1137,71 @@ class LaTeXTranslator(nodes.NodeVisitor):
self.body.append('%\n\\pysigstartmultiline\n')
def depart_desc_signature(self, node):
+ # type: (nodes.Node) -> None
if not node.get('is_multiline'):
self._depart_signature_line(node)
else:
self.body.append('%\n\\pysigstopmultiline')
def visit_desc_signature_line(self, node):
+ # type: (nodes.Node) -> None
self._visit_signature_line(node)
def depart_desc_signature_line(self, node):
+ # type: (nodes.Node) -> None
self._depart_signature_line(node)
def visit_desc_addname(self, node):
+ # type: (nodes.Node) -> None
self.body.append(r'\sphinxcode{')
self.literal_whitespace += 1
def depart_desc_addname(self, node):
+ # type: (nodes.Node) -> None
self.body.append('}')
self.literal_whitespace -= 1
def visit_desc_type(self, node):
+ # type: (nodes.Node) -> None
pass
def depart_desc_type(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_desc_returns(self, node):
+ # type: (nodes.Node) -> None
self.body.append(r'{ $\rightarrow$ ')
def depart_desc_returns(self, node):
+ # type: (nodes.Node) -> None
self.body.append(r'}')
def visit_desc_name(self, node):
+ # type: (nodes.Node) -> None
self.body.append(r'\sphinxbfcode{')
self.no_contractions += 1
self.literal_whitespace += 1
def depart_desc_name(self, node):
+ # type: (nodes.Node) -> None
self.body.append('}')
self.literal_whitespace -= 1
self.no_contractions -= 1
def visit_desc_parameterlist(self, node):
+ # type: (nodes.Node) -> None
# close name, open parameterlist
self.body.append('}{')
self.first_param = 1
def depart_desc_parameterlist(self, node):
+ # type: (nodes.Node) -> None
# close parameterlist, open return annotation
self.body.append('}{')
def visit_desc_parameter(self, node):
+ # type: (nodes.Node) -> None
if not self.first_param:
self.body.append(', ')
else:
@@ -985,36 +1210,46 @@ class LaTeXTranslator(nodes.NodeVisitor):
self.body.append(r'\emph{')
def depart_desc_parameter(self, node):
+ # type: (nodes.Node) -> None
if not node.hasattr('noemph'):
self.body.append('}')
def visit_desc_optional(self, node):
+ # type: (nodes.Node) -> None
self.body.append(r'\sphinxoptional{')
def depart_desc_optional(self, node):
+ # type: (nodes.Node) -> None
self.body.append('}')
def visit_desc_annotation(self, node):
+ # type: (nodes.Node) -> None
self.body.append(r'\sphinxstrong{')
def depart_desc_annotation(self, node):
+ # type: (nodes.Node) -> None
self.body.append('}')
def visit_desc_content(self, node):
+ # type: (nodes.Node) -> None
if node.children and not isinstance(node.children[0], nodes.paragraph):
# avoid empty desc environment which causes a formatting bug
self.body.append('~')
def depart_desc_content(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_seealso(self, node):
+ # type: (nodes.Node) -> None
self.body.append(u'\n\n\\sphinxstrong{%s:}\n\n' % admonitionlabels['seealso'])
def depart_seealso(self, node):
+ # type: (nodes.Node) -> None
self.body.append("\n\n")
def visit_rubric(self, node):
+ # type: (nodes.Node) -> None
if len(node.children) == 1 and node.children[0].astext() in \
('Footnotes', _('Footnotes')):
raise nodes.SkipNode
@@ -1023,13 +1258,16 @@ class LaTeXTranslator(nodes.NodeVisitor):
self.in_title = 1
def depart_rubric(self, node):
+ # type: (nodes.Node) -> None
self.in_title = 0
self.body.append(self.context.pop())
def visit_footnote(self, node):
+ # type: (nodes.Node) -> None
raise nodes.SkipNode
def visit_collected_footnote(self, node):
+ # type: (nodes.Node) -> None
self.in_footnote += 1
if 'footnotetext' in node:
self.body.append('%%\n\\begin{footnotetext}[%s]'
@@ -1042,8 +1280,10 @@ class LaTeXTranslator(nodes.NodeVisitor):
self.body.append('\\sphinxAtStartFootnote\n')
def depart_collected_footnote(self, node):
+ # type: (nodes.Node) -> None
if 'footnotetext' in node:
- self.body.append('%\n\\end{footnotetext}')
+ # the \ignorespaces in particular for after table header use
+ self.body.append('%\n\\end{footnotetext}\\ignorespaces ')
else:
if self.in_parsed_literal:
self.body.append('\\end{footnote}')
@@ -1052,6 +1292,7 @@ class LaTeXTranslator(nodes.NodeVisitor):
self.in_footnote -= 1
def visit_label(self, node):
+ # type: (nodes.Node) -> None
if isinstance(node.parent, nodes.citation):
self.bibitems[-1][0] = node.astext()
self.bibitems[-1][2] = self.curfilestack[-1]
@@ -1059,233 +1300,199 @@ class LaTeXTranslator(nodes.NodeVisitor):
raise nodes.SkipNode
def visit_tabular_col_spec(self, node):
+ # type: (nodes.Node) -> None
self.next_table_colspec = node['spec']
raise nodes.SkipNode
def visit_table(self, node):
+ # type: (nodes.Node) -> None
if self.table:
raise UnsupportedError(
'%s:%s: nested tables are not yet implemented.' %
(self.curfilestack[-1], node.line or ''))
- self.table = Table()
- self.table.longtable = 'longtable' in node['classes']
- self.tablebody = []
- self.tableheaders = []
- # Redirect body output until table is finished.
- self.pushbody(self.tablebody)
- self.restrict_footnote(node)
+ self.table = Table(node)
+ if self.next_table_colspec:
+ self.table.colspec = '{%s}\n' % self.next_table_colspec
+ self.next_table_colspec = None
def depart_table(self, node):
- if self.table.rowcount > 30:
- self.table.longtable = True
- self.popbody()
- if not self.table.longtable and self.table.caption is not None:
- self.body.append('\n\n\\begin{threeparttable}\n'
- '\\capstart\\caption{')
- for caption in self.table.caption:
- self.body.append(caption)
- self.body.append('}')
- for id in self.pop_hyperlink_ids('table'):
- self.body.append(self.hypertarget(id, anchor=False))
- if node['ids']:
- self.body.append(self.hypertarget(node['ids'][0], anchor=False))
- if self.table.longtable:
- self.body.append('\n\\begin{longtable}')
- endmacro = '\\end{longtable}\n\n'
- elif self.table.has_verbatim:
- self.body.append('\n\\noindent\\begin{tabular}')
- endmacro = '\\end{tabular}\n\n'
- elif self.table.has_problematic and not self.table.colspec:
- # if the user has given us tabularcolumns, accept them and use
- # tabulary nevertheless
- self.body.append('\n\\noindent\\begin{tabular}')
- endmacro = '\\end{tabular}\n\n'
- else:
- self.body.append('\n\\noindent\\begin{tabulary}{\\linewidth}')
- endmacro = '\\end{tabulary}\n\n'
- if self.table.colspec:
- self.body.append(self.table.colspec)
- else:
- if self.table.has_problematic:
- colspec = ('*{%d}{p{\\dimexpr(\\linewidth-\\arrayrulewidth)/%d'
- '-2\\tabcolsep-\\arrayrulewidth\\relax}|}' %
- (self.table.colcount, self.table.colcount))
- self.body.append('{|' + colspec + '}\n')
- elif self.table.longtable:
- self.body.append('{|' + ('l|' * self.table.colcount) + '}\n')
- else:
- self.body.append('{|' + ('L|' * self.table.colcount) + '}\n')
- if self.table.longtable and self.table.caption is not None:
- self.body.append(u'\\caption{')
- for caption in self.table.caption:
- self.body.append(caption)
- self.body.append('}')
- for id in self.pop_hyperlink_ids('table'):
- self.body.append(self.hypertarget(id, anchor=False))
- if node['ids']:
- self.body.append(self.hypertarget(node['ids'][0], anchor=False))
- self.body.append(u'\\\\\n')
- if self.table.longtable:
- self.body.append('\\hline\n')
- self.body.extend(self.tableheaders)
- self.body.append('\\endfirsthead\n\n')
- self.body.append('\\multicolumn{%s}{c}%%\n' % self.table.colcount)
- self.body.append(r'{\makebox[0pt]{\tablecontinued{\tablename\ '
- r'\thetable{} -- %s}}}\\'
- % _('continued from previous page'))
- self.body.append('\n\\hline\n')
- self.body.extend(self.tableheaders)
- self.body.append('\\endhead\n\n')
- self.body.append(r'\hline \multicolumn{%s}{|r|}{\makebox[0pt][r]'
- r'{\tablecontinued{%s}}}\\\hline'
- % (self.table.colcount,
- _('Continued on next page')))
- self.body.append('\n\\endfoot\n\n')
- self.body.append('\\endlastfoot\n\n')
- else:
- self.body.append('\\hline\n')
- self.body.extend(self.tableheaders)
- self.body.extend(self.tablebody)
- self.body.append(endmacro)
- if not self.table.longtable and self.table.caption is not None:
- self.body.append('\\end{threeparttable}\n\n')
- self.unrestrict_footnote(node)
+ # type: (nodes.Node) -> None
+ labels = '' # type: unicode
+ for labelid in self.pop_hyperlink_ids('table'):
+ labels += self.hypertarget(labelid, anchor=False)
+ if node['ids']:
+ labels += self.hypertarget(node['ids'][0], anchor=False)
+
+ table_type = self.table.get_table_type()
+ table = self.render(table_type + '.tex_t',
+ dict(table=self.table, labels=labels))
+ self.body.append("\n\n")
+ self.body.append(table)
+ self.body.append("\n")
+
self.table = None
- self.tablebody = None
def visit_colspec(self, node):
+ # type: (nodes.Node) -> None
self.table.colcount += 1
+ if 'colwidth' in node:
+ self.table.colwidths.append(node['colwidth'])
def depart_colspec(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_tgroup(self, node):
+ # type: (nodes.Node) -> None
pass
def depart_tgroup(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_thead(self, node):
- self.table.had_head = True
- if self.next_table_colspec:
- self.table.colspec = '{%s}\n' % self.next_table_colspec
- self.next_table_colspec = None
- # Redirect head output until header is finished. see visit_tbody.
- self.body = self.tableheaders
+ # type: (nodes.Node) -> None
+ # Redirect head output until header is finished.
+ self.pushbody(self.table.header)
+ # footnotes in longtable header must be restricted
+ self.restrict_footnote(node)
def depart_thead(self, node):
- pass
+ # type: (nodes.Node) -> None
+ self.popbody()
+ # temporary buffer for footnotes from table header
+ self.pushbody([])
+ self.unrestrict_footnote(node)
+ # the footnote texts from header
+ self.table.header_footnotetexts = self.popbody()
def visit_tbody(self, node):
- if not self.table.had_head:
- self.visit_thead(node)
- self.body = self.tablebody
+ # type: (nodes.Node) -> None
+ # Redirect body output until table is finished.
+ self.pushbody(self.table.body)
+ # insert footnotetexts from header at start of body (due to longtable)
+ # those from caption are handled by templates (to allow caption at foot)
+ self.body.extend(self.table.header_footnotetexts)
def depart_tbody(self, node):
- self.remember_multirow = {}
- self.remember_multirowcol = {}
+ # type: (nodes.Node) -> None
+ self.popbody()
def visit_row(self, node):
+ # type: (nodes.Node) -> None
self.table.col = 0
- for key, value in self.remember_multirow.items():
- if not value and key in self.remember_multirowcol:
- del self.remember_multirowcol[key]
+
+ # fill columns if the row starts with the bottom of multirow cell
+ while True:
+ cell = self.table.cell(self.table.row, self.table.col)
+ if cell is None: # not a bottom of multirow cell
+ break
+ else: # a bottom of multirow cell
+ self.table.col += cell.width
+ if cell.col:
+ self.body.append('&')
+ if cell.width == 1:
+ # insert suitable strut for equalizing row heights in given multirow
+ self.body.append('\\sphinxtablestrut{%d}' % cell.cell_id)
+ else: # use \multicolumn for wide multirow cell
+ self.body.append('\\multicolumn{%d}{|l|}'
+ '{\\sphinxtablestrut{%d}}' %
+ (cell.width, cell.cell_id))
def depart_row(self, node):
+ # type: (nodes.Node) -> None
self.body.append('\\\\\n')
- if any(self.remember_multirow.values()):
- linestart = 1
- col = self.table.colcount
- for col in range(1, self.table.col + 1):
- if self.remember_multirow.get(col):
- if linestart != col:
- linerange = str(linestart) + '-' + str(col - 1)
- self.body.append('\\cline{' + linerange + '}')
- linestart = col + 1
- if self.remember_multirowcol.get(col, 0):
- linestart += self.remember_multirowcol[col]
- if linestart <= col:
- linerange = str(linestart) + '-' + str(col)
- self.body.append('\\cline{' + linerange + '}')
- else:
+ cells = [self.table.cell(self.table.row, i) for i in range(self.table.colcount)]
+ underlined = [cell.row + cell.height == self.table.row + 1 for cell in cells]
+ if all(underlined):
self.body.append('\\hline')
- self.table.rowcount += 1
+ else:
+ i = 0
+ underlined.extend([False]) # sentinel
+ while i < len(underlined):
+ if underlined[i] is True:
+ j = underlined[i:].index(False)
+ self.body.append('\\cline{%d-%d}' % (i + 1, i + j))
+ i += j
+ i += 1
+ self.table.row += 1
def visit_entry(self, node):
- if self.table.col == 0:
- while self.remember_multirow.get(self.table.col + 1, 0):
- self.table.col += 1
- self.remember_multirow[self.table.col] -= 1
- if self.remember_multirowcol.get(self.table.col, 0):
- extracols = self.remember_multirowcol[self.table.col]
- self.body.append('\\multicolumn{')
- self.body.append(str(extracols + 1))
- self.body.append('}{|l|}{}\\relax ')
- self.table.col += extracols
- self.body.append('&')
- else:
+ # type: (nodes.Node) -> None
+ if self.table.col > 0:
self.body.append('&')
- self.table.col += 1
+ self.table.add_cell(node.get('morerows', 0) + 1, node.get('morecols', 0) + 1)
+ cell = self.table.cell()
context = ''
- if 'morecols' in node:
- self.body.append('\\multicolumn{')
- self.body.append(str(node.get('morecols') + 1))
- if self.table.col == 1:
- self.body.append('}{|l|}{\\relax ')
+ if cell.width > 1:
+ if self.builder.config.latex_use_latex_multicolumn:
+ if self.table.col == 0:
+ self.body.append('\\multicolumn{%d}{|l|}{%%\n' % cell.width)
+ else:
+ self.body.append('\\multicolumn{%d}{l|}{%%\n' % cell.width)
+ context = '}%\n'
else:
- self.body.append('}{l|}{\\relax ')
- context += '\\unskip}\\relax '
- if 'morerows' in node:
- self.body.append('\\multirow{')
- self.body.append(str(node.get('morerows') + 1))
- self.body.append('}{*}{\\relax ')
- context += '\\unskip}\\relax '
- self.remember_multirow[self.table.col] = node.get('morerows')
- if 'morecols' in node:
- if 'morerows' in node:
- self.remember_multirowcol[self.table.col] = node.get('morecols')
- self.table.col += node.get('morecols')
- if (('morecols' in node or 'morerows' in node) and
- (len(node) > 2 or len(node.astext().split('\n')) > 2)):
- self.in_merged_cell = 1
- self.literal_whitespace += 1
- self.body.append('\\eqparbox{%d}{\\vspace{.5\\baselineskip}\n' % id(node))
- self.pushbody([])
- context += '}'
+ self.body.append('\\sphinxstartmulticolumn{%d}%%\n' % cell.width)
+ context = '\\sphinxstopmulticolumn\n'
+ if cell.height > 1:
+ # \sphinxmultirow 2nd arg "cell_id" will serve as id for LaTeX macros as well
+ self.body.append('\\sphinxmultirow{%d}{%d}{%%\n' % (cell.height, cell.cell_id))
+ context = '}%\n' + context
+ if cell.width > 1 or cell.height > 1:
+ self.body.append('\\begin{varwidth}[t]{\\sphinxcolwidth{%d}{%d}}\n'
+ % (cell.width, self.table.colcount))
+ context = ('\\par\n\\vskip-\\baselineskip\\strut\\end{varwidth}%\n') + context
+ self.needs_linetrimming = 1
+ if len(node) > 2 and len(node.astext().split('\n')) > 2:
+ self.needs_linetrimming = 1
+ if len(node.traverse(nodes.paragraph)) >= 2:
+ self.table.has_oldproblematic = True
if isinstance(node.parent.parent, nodes.thead):
if len(node) == 1 and isinstance(node[0], nodes.paragraph) and node.astext() == '':
pass
else:
self.body.append('\\sphinxstylethead{\\relax ')
- context += '\\unskip}\\relax '
- while self.remember_multirow.get(self.table.col + 1, 0):
- self.table.col += 1
- self.remember_multirow[self.table.col] -= 1
- context += '&'
- if self.remember_multirowcol.get(self.table.col, 0):
- extracols = self.remember_multirowcol[self.table.col]
- context += '\\multicolumn{'
- context += str(extracols + 1)
- context += '}{l|}{}\\relax '
- self.table.col += extracols
- if len(node.traverse(nodes.paragraph)) >= 2:
- self.table.has_problematic = True
+ context = '\\unskip}\\relax ' + context
+ if self.needs_linetrimming:
+ self.pushbody([])
self.context.append(context)
def depart_entry(self, node):
- if self.in_merged_cell:
- self.in_merged_cell = 0
- self.literal_whitespace -= 1
+ # type: (nodes.Node) -> None
+ if self.needs_linetrimming:
+ self.needs_linetrimming = 0
body = self.popbody()
+
# Remove empty lines from top of merged cell
while body and body[0] == "\n":
body.pop(0)
- for line in body:
- line = re.sub(u'(?<!~\\\\\\\\)\n', u'~\\\\\\\\\n', line) # escape return code
- self.body.append(line)
- self.body.append(self.context.pop()) # header
+ self.body.extend(body)
+
+ self.body.append(self.context.pop())
+
+ cell = self.table.cell()
+ self.table.col += cell.width
+
+ # fill columns if next ones are a bottom of wide-multirow cell
+ while True:
+ nextcell = self.table.cell()
+ if nextcell is None: # not a bottom of multirow cell
+ break
+ else: # a bottom part of multirow cell
+ self.table.col += nextcell.width
+ self.body.append('&')
+ if nextcell.width == 1:
+ # insert suitable strut for equalizing row heights in multirow
+ # they also serve to clear colour panels which would hide the text
+ self.body.append('\\sphinxtablestrut{%d}' % nextcell.cell_id)
+ else:
+ # use \multicolumn for wide multirow cell
+ self.body.append('\\multicolumn{%d}{l|}'
+ '{\\sphinxtablestrut{%d}}' %
+ (nextcell.width, nextcell.cell_id))
def visit_acks(self, node):
+ # type: (nodes.Node) -> None
# this is a list in the source, but should be rendered as a
# comma-separated list here
self.body.append('\n\n')
@@ -1295,16 +1502,19 @@ class LaTeXTranslator(nodes.NodeVisitor):
raise nodes.SkipNode
def visit_bullet_list(self, node):
+ # type: (nodes.Node) -> None
if not self.compact_list:
self.body.append('\\begin{itemize}\n')
if self.table:
self.table.has_problematic = True
def depart_bullet_list(self, node):
+ # type: (nodes.Node) -> None
if not self.compact_list:
self.body.append('\\end{itemize}\n')
def visit_enumerated_list(self, node):
+ # type: (nodes.Node) -> None
self.body.append('\\begin{enumerate}\n')
if 'start' in node:
self.body.append('\\setcounter{enumi}{%d}\n' % (node['start'] - 1))
@@ -1312,33 +1522,41 @@ class LaTeXTranslator(nodes.NodeVisitor):
self.table.has_problematic = True
def depart_enumerated_list(self, node):
+ # type: (nodes.Node) -> None
self.body.append('\\end{enumerate}\n')
def visit_list_item(self, node):
+ # type: (nodes.Node) -> None
# Append "{}" in case the next character is "[", which would break
# LaTeX's list environment (no numbering and the "[" is not printed).
self.body.append(r'\item {} ')
def depart_list_item(self, node):
+ # type: (nodes.Node) -> None
self.body.append('\n')
def visit_definition_list(self, node):
+ # type: (nodes.Node) -> None
self.body.append('\\begin{description}\n')
if self.table:
self.table.has_problematic = True
def depart_definition_list(self, node):
+ # type: (nodes.Node) -> None
self.body.append('\\end{description}\n')
def visit_definition_list_item(self, node):
+ # type: (nodes.Node) -> None
pass
def depart_definition_list_item(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_term(self, node):
+ # type: (nodes.Node) -> None
self.in_term += 1
- ctx = '}] \\leavevmode'
+ ctx = '}] \\leavevmode' # type: unicode
if node.get('ids'):
ctx += self.hypertarget(node['ids'][0])
self.body.append('\\item[{')
@@ -1346,42 +1564,43 @@ class LaTeXTranslator(nodes.NodeVisitor):
self.context.append(ctx)
def depart_term(self, node):
+ # type: (nodes.Node) -> None
self.body.append(self.context.pop())
self.unrestrict_footnote(node)
self.in_term -= 1
- def visit_termsep(self, node):
- warnings.warn('sphinx.addnodes.termsep will be removed at Sphinx-1.6. '
- 'This warning is displayed because some Sphinx extension '
- 'uses sphinx.addnodes.termsep. Please report it to '
- 'author of the extension.', RemovedInSphinx16Warning)
- self.body.append(', ')
- raise nodes.SkipNode
-
def visit_classifier(self, node):
+ # type: (nodes.Node) -> None
self.body.append('{[}')
def depart_classifier(self, node):
+ # type: (nodes.Node) -> None
self.body.append('{]}')
def visit_definition(self, node):
+ # type: (nodes.Node) -> None
pass
def depart_definition(self, node):
+ # type: (nodes.Node) -> None
self.body.append('\n')
def visit_field_list(self, node):
+ # type: (nodes.Node) -> None
self.body.append('\\begin{quote}\\begin{description}\n')
if self.table:
self.table.has_problematic = True
def depart_field_list(self, node):
+ # type: (nodes.Node) -> None
self.body.append('\\end{description}\\end{quote}\n')
def visit_field(self, node):
+ # type: (nodes.Node) -> None
pass
def depart_field(self, node):
+ # type: (nodes.Node) -> None
pass
visit_field_name = visit_term
@@ -1391,6 +1610,7 @@ class LaTeXTranslator(nodes.NodeVisitor):
depart_field_body = depart_definition
def visit_paragraph(self, node):
+ # type: (nodes.Node) -> None
index = node.parent.index(node)
if (index > 0 and isinstance(node.parent, nodes.compound) and
not isinstance(node.parent[index - 1], nodes.paragraph) and
@@ -1404,17 +1624,21 @@ class LaTeXTranslator(nodes.NodeVisitor):
self.body.append('\n')
def depart_paragraph(self, node):
+ # type: (nodes.Node) -> None
self.body.append('\n')
def visit_centered(self, node):
+ # type: (nodes.Node) -> None
self.body.append('\n\\begin{center}')
if self.table:
self.table.has_problematic = True
def depart_centered(self, node):
+ # type: (nodes.Node) -> None
self.body.append('\n\\end{center}')
def visit_hlist(self, node):
+ # type: (nodes.Node) -> None
# for now, we don't support a more compact list format
# don't add individual itemize environments, but one for all columns
self.compact_list += 1
@@ -1424,29 +1648,37 @@ class LaTeXTranslator(nodes.NodeVisitor):
self.table.has_problematic = True
def depart_hlist(self, node):
+ # type: (nodes.Node) -> None
self.compact_list -= 1
self.body.append('\\end{itemize}\n')
def visit_hlistcol(self, node):
+ # type: (nodes.Node) -> None
pass
def depart_hlistcol(self, node):
+ # type: (nodes.Node) -> None
pass
def latex_image_length(self, width_str):
+ # type: (nodes.Node) -> unicode
try:
return rstdim_to_latexdim(width_str)
except ValueError:
- self.builder.warn('dimension unit %s is invalid. Ignored.' % width_str)
+ logger.warning('dimension unit %s is invalid. Ignored.', width_str)
+ return None
def is_inline(self, node):
+ # type: (nodes.Node) -> bool
"""Check whether a node represents an inline element."""
return isinstance(node.parent, nodes.TextElement)
def visit_image(self, node):
+ # type: (nodes.Node) -> None
attrs = node.attributes
- pre = [] # in reverse order
- post = []
+ pre = [] # type: List[unicode]
+ # in reverse order
+ post = [] # type: List[unicode]
if self.in_parsed_literal:
pre = ['\\begingroup\\sphinxunactivateextrasandspace\\relax ']
post = ['\\endgroup ']
@@ -1519,10 +1751,12 @@ class LaTeXTranslator(nodes.NodeVisitor):
self.body.extend(post)
def depart_image(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_figure(self, node):
- ids = ''
+ # type: (nodes.Node) -> None
+ ids = '' # type: unicode
for id in self.pop_hyperlink_ids('figure'):
ids += self.hypertarget(id, anchor=False)
if node['ids']:
@@ -1578,10 +1812,12 @@ class LaTeXTranslator(nodes.NodeVisitor):
self.context.append(ids + align_end + '\\end{figure}\n')
def depart_figure(self, node):
+ # type: (nodes.Node) -> None
self.body.append(self.context.pop())
self.unrestrict_footnote(node)
def visit_caption(self, node):
+ # type: (nodes.Node) -> None
self.in_caption += 1
self.restrict_footnote(node)
if self.in_container_literal_block:
@@ -1594,29 +1830,37 @@ class LaTeXTranslator(nodes.NodeVisitor):
self.body.append('\\caption{')
def depart_caption(self, node):
+ # type: (nodes.Node) -> None
self.body.append('}')
self.in_caption -= 1
self.unrestrict_footnote(node)
def visit_legend(self, node):
+ # type: (nodes.Node) -> None
self.body.append('{\\small ')
def depart_legend(self, node):
+ # type: (nodes.Node) -> None
self.body.append('}')
def visit_admonition(self, node):
+ # type: (nodes.Node) -> None
self.body.append('\n\\begin{sphinxadmonition}{note}')
def depart_admonition(self, node):
+ # type: (nodes.Node) -> None
self.body.append('\\end{sphinxadmonition}\n')
def _make_visit_admonition(name):
+ # type: (unicode) -> Callable[[LaTeXTranslator, nodes.Node], None]
def visit_admonition(self, node):
+ # type: (nodes.Node) -> None
self.body.append(u'\n\\begin{sphinxadmonition}{%s}{%s:}' %
(name, admonitionlabels[name]))
return visit_admonition
def _depart_named_admonition(self, node):
+ # type: (nodes.Node) -> None
self.body.append('\\end{sphinxadmonition}\n')
visit_attention = _make_visit_admonition('attention')
@@ -1639,13 +1883,17 @@ class LaTeXTranslator(nodes.NodeVisitor):
depart_warning = _depart_named_admonition
def visit_versionmodified(self, node):
+ # type: (nodes.Node) -> None
pass
def depart_versionmodified(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_target(self, node):
+ # type: (nodes.Node) -> None
def add_target(id):
+ # type: (unicode) -> None
# indexing uses standard LaTeX index markup, so the targets
# will be generated differently
if id.startswith('index-'):
@@ -1673,7 +1921,7 @@ class LaTeXTranslator(nodes.NodeVisitor):
self.next_section_ids.update(node['ids'])
return
else:
- domain = self.builder.env.domains['std']
+ domain = self.builder.env.get_domain('std')
figtype = domain.get_figtype(next)
if figtype and domain.get_numfig_title(next):
ids = set()
@@ -1693,16 +1941,20 @@ class LaTeXTranslator(nodes.NodeVisitor):
add_target(id)
def depart_target(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_attribution(self, node):
+ # type: (nodes.Node) -> None
self.body.append('\n\\begin{flushright}\n')
self.body.append('---')
def depart_attribution(self, node):
+ # type: (nodes.Node) -> None
self.body.append('\n\\end{flushright}\n')
def visit_index(self, node, scre=re.compile(r';\s*')):
+ # type: (nodes.Node, Pattern) -> None
if not node.get('inline', True):
self.body.append('\n')
entries = node['entries']
@@ -1732,18 +1984,19 @@ class LaTeXTranslator(nodes.NodeVisitor):
p1, p2 = [self.encode(x) for x in split_into(2, 'seealso', string)]
self.body.append(r'\index{%s|see{%s}}' % (p1, p2))
else:
- self.builder.warn(
- 'unknown index entry type %s found' % type)
+ logger.warning('unknown index entry type %s found', type)
except ValueError as err:
- self.builder.warn(str(err))
+ logger.warning(str(err))
raise nodes.SkipNode
def visit_raw(self, node):
+ # type: (nodes.Node) -> None
if 'latex' in node.get('format', '').split():
self.body.append(node.astext())
raise nodes.SkipNode
def visit_reference(self, node):
+ # type: (nodes.Node) -> None
if not self.in_title:
for id in node.get('ids'):
anchor = not self.in_caption
@@ -1797,14 +2050,16 @@ class LaTeXTranslator(nodes.NodeVisitor):
else:
self.context.append('}}}')
else:
- self.builder.warn('unusable reference target found: %s' % uri,
- (self.curfilestack[-1], node.line))
+ logger.warning('unusable reference target found: %s', uri,
+ location=(self.curfilestack[-1], node.line))
self.context.append('')
def depart_reference(self, node):
+ # type: (nodes.Node) -> None
self.body.append(self.context.pop())
def visit_number_reference(self, node):
+ # type: (nodes.Node) -> None
if node.get('refid'):
id = self.curfilestack[-1] + ':' + node['refid']
else:
@@ -1826,46 +2081,59 @@ class LaTeXTranslator(nodes.NodeVisitor):
raise nodes.SkipNode
def visit_download_reference(self, node):
+ # type: (nodes.Node) -> None
pass
def depart_download_reference(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_pending_xref(self, node):
+ # type: (nodes.Node) -> None
pass
def depart_pending_xref(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_emphasis(self, node):
+ # type: (nodes.Node) -> None
self.body.append(r'\sphinxstyleemphasis{')
def depart_emphasis(self, node):
+ # type: (nodes.Node) -> None
self.body.append('}')
def visit_literal_emphasis(self, node):
+ # type: (nodes.Node) -> None
self.body.append(r'\sphinxstyleliteralemphasis{')
self.no_contractions += 1
def depart_literal_emphasis(self, node):
+ # type: (nodes.Node) -> None
self.body.append('}')
self.no_contractions -= 1
def visit_strong(self, node):
+ # type: (nodes.Node) -> None
self.body.append(r'\sphinxstylestrong{')
def depart_strong(self, node):
+ # type: (nodes.Node) -> None
self.body.append('}')
def visit_literal_strong(self, node):
+ # type: (nodes.Node) -> None
self.body.append(r'\sphinxstyleliteralstrong{')
self.no_contractions += 1
def depart_literal_strong(self, node):
+ # type: (nodes.Node) -> None
self.body.append('}')
self.no_contractions -= 1
def visit_abbreviation(self, node):
+ # type: (nodes.Node) -> None
abbr = node.astext()
self.body.append(r'\sphinxstyleabbreviation{')
# spell out the explanation once
@@ -1876,39 +2144,48 @@ class LaTeXTranslator(nodes.NodeVisitor):
self.context.append('}')
def depart_abbreviation(self, node):
+ # type: (nodes.Node) -> None
self.body.append(self.context.pop())
def visit_manpage(self, node):
+ # type: (nodes.Node) -> Any
return self.visit_literal_emphasis(node)
def depart_manpage(self, node):
+ # type: (nodes.Node) -> Any
return self.depart_literal_emphasis(node)
def visit_title_reference(self, node):
+ # type: (nodes.Node) -> None
self.body.append(r'\sphinxtitleref{')
def depart_title_reference(self, node):
+ # type: (nodes.Node) -> None
self.body.append('}')
def visit_citation(self, node):
+ # type: (nodes.Node) -> None
# TODO maybe use cite bibitems
# bibitem: [citelabel, citetext, docname, citeid]
self.bibitems.append(['', '', '', ''])
self.context.append(len(self.body))
def depart_citation(self, node):
+ # type: (nodes.Node) -> None
size = self.context.pop()
text = ''.join(self.body[size:])
del self.body[size:]
self.bibitems[-1][1] = text
def visit_citation_reference(self, node):
+ # type: (nodes.Node) -> None
# This is currently never encountered, since citation_reference nodes
# are already replaced by pending_xref nodes in the environment.
self.body.append('\\cite{%s}' % self.idescape(node.astext()))
raise nodes.SkipNode
def visit_literal(self, node):
+ # type: (nodes.Node) -> None
self.no_contractions += 1
if self.in_title:
self.body.append(r'\sphinxstyleliteralintitle{')
@@ -1916,10 +2193,12 @@ class LaTeXTranslator(nodes.NodeVisitor):
self.body.append(r'\sphinxcode{')
def depart_literal(self, node):
+ # type: (nodes.Node) -> None
self.no_contractions -= 1
self.body.append('}')
def visit_footnote_reference(self, node):
+ # type: (nodes.Node) -> None
num = node.astext().strip()
try:
footnode, used = self.footnotestack[-1][num]
@@ -1935,19 +2214,21 @@ class LaTeXTranslator(nodes.NodeVisitor):
self.pending_footnotes.append(footnode)
else:
self.footnotestack[-1][num][1] = True
- footnode.walkabout(self)
+ footnode.walkabout(self) # type: ignore
raise nodes.SkipChildren
def depart_footnote_reference(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_literal_block(self, node):
+ # type: (nodes.Node) -> None
if node.rawsource != node.astext():
# most probably a parsed-literal block -- don't highlight
self.in_parsed_literal += 1
self.body.append('\\begin{sphinxalltt}\n')
else:
- ids = ''
+ ids = '' # type: unicode
for id in self.pop_hyperlink_ids('code-block'):
ids += self.hypertarget(id, anchor=False)
if node['ids']:
@@ -1972,11 +2253,10 @@ class LaTeXTranslator(nodes.NodeVisitor):
else:
opts = {}
- def warner(msg, **kwargs):
- self.builder.warn(msg, (self.curfilestack[-1], node.line), **kwargs)
- hlcode = self.highlighter.highlight_block(code, lang, opts=opts,
- warn=warner, linenos=linenos,
- **highlight_args)
+ hlcode = self.highlighter.highlight_block(
+ code, lang, opts=opts, linenos=linenos,
+ location=(self.curfilestack[-1], node.line), **highlight_args
+ )
# workaround for Unicode issue
hlcode = hlcode.replace(u'€', u'@texteuro[]')
if self.in_footnote:
@@ -2005,18 +2285,22 @@ class LaTeXTranslator(nodes.NodeVisitor):
raise nodes.SkipNode
def depart_literal_block(self, node):
+ # type: (nodes.Node) -> None
self.body.append('\n\\end{sphinxalltt}\n')
self.in_parsed_literal -= 1
visit_doctest_block = visit_literal_block
depart_doctest_block = depart_literal_block
def visit_line(self, node):
- self.body.append('\item[] ')
+ # type: (nodes.Node) -> None
+ self.body.append('\\item[] ')
def depart_line(self, node):
+ # type: (nodes.Node) -> None
self.body.append('\n')
def visit_line_block(self, node):
+ # type: (nodes.Node) -> None
if isinstance(node.parent, nodes.line_block):
self.body.append('\\item[]\n'
'\\begin{DUlineblock}{\\DUlineblockindent}\n')
@@ -2026,9 +2310,11 @@ class LaTeXTranslator(nodes.NodeVisitor):
self.table.has_problematic = True
def depart_line_block(self, node):
+ # type: (nodes.Node) -> None
self.body.append('\\end{DUlineblock}\n')
def visit_block_quote(self, node):
+ # type: (nodes.Node) -> None
# If the block quote contains a single object and that object
# is a list, then generate a list not a block quote.
# This lets us indent lists.
@@ -2044,6 +2330,7 @@ class LaTeXTranslator(nodes.NodeVisitor):
self.table.has_problematic = True
def depart_block_quote(self, node):
+ # type: (nodes.Node) -> None
done = 0
if len(node.children) == 1:
child = node.children[0]
@@ -2056,45 +2343,56 @@ class LaTeXTranslator(nodes.NodeVisitor):
# option node handling copied from docutils' latex writer
def visit_option(self, node):
+ # type: (nodes.Node) -> None
if self.context[-1]:
# this is not the first option
self.body.append(', ')
def depart_option(self, node):
+ # type: (nodes.Node) -> None
# flag that the first option is done.
self.context[-1] += 1
def visit_option_argument(self, node):
+ # type: (nodes.Node) -> None
"""The delimiter betweeen an option and its argument."""
self.body.append(node.get('delimiter', ' '))
def depart_option_argument(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_option_group(self, node):
+ # type: (nodes.Node) -> None
self.body.append('\\item [')
# flag for first option
self.context.append(0)
def depart_option_group(self, node):
+ # type: (nodes.Node) -> None
self.context.pop() # the flag
self.body.append('] ')
def visit_option_list(self, node):
+ # type: (nodes.Node) -> None
self.body.append('\\begin{optionlist}{3cm}\n')
if self.table:
self.table.has_problematic = True
def depart_option_list(self, node):
+ # type: (nodes.Node) -> None
self.body.append('\\end{optionlist}\n')
def visit_option_list_item(self, node):
+ # type: (nodes.Node) -> None
pass
def depart_option_list_item(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_option_string(self, node):
+ # type: (nodes.Node) -> None
ostring = node.astext()
self.no_contractions += 1
self.body.append(self.encode(ostring))
@@ -2102,30 +2400,39 @@ class LaTeXTranslator(nodes.NodeVisitor):
raise nodes.SkipNode
def visit_description(self, node):
+ # type: (nodes.Node) -> None
self.body.append(' ')
def depart_description(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_superscript(self, node):
+ # type: (nodes.Node) -> None
self.body.append('$^{\\text{')
def depart_superscript(self, node):
+ # type: (nodes.Node) -> None
self.body.append('}}$')
def visit_subscript(self, node):
+ # type: (nodes.Node) -> None
self.body.append('$_{\\text{')
def depart_subscript(self, node):
+ # type: (nodes.Node) -> None
self.body.append('}}$')
def visit_substitution_definition(self, node):
+ # type: (nodes.Node) -> None
raise nodes.SkipNode
def visit_substitution_reference(self, node):
+ # type: (nodes.Node) -> None
raise nodes.SkipNode
def visit_inline(self, node):
+ # type: (nodes.Node) -> None
classes = node.get('classes', [])
if classes in [['menuselection'], ['guilabel']]:
self.body.append(r'\sphinxmenuselection{')
@@ -2140,24 +2447,30 @@ class LaTeXTranslator(nodes.NodeVisitor):
self.context.append('')
def depart_inline(self, node):
+ # type: (nodes.Node) -> None
self.body.append(self.context.pop())
def visit_generated(self, node):
+ # type: (nodes.Node) -> None
pass
def depart_generated(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_compound(self, node):
+ # type: (nodes.Node) -> None
pass
def depart_compound(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_container(self, node):
+ # type: (nodes.Node) -> None
if node.get('literal_block'):
self.in_container_literal_block += 1
- ids = ''
+ ids = '' # type: unicode
for id in self.pop_hyperlink_ids('code-block'):
ids += self.hypertarget(id, anchor=False)
if node['ids']:
@@ -2168,31 +2481,38 @@ class LaTeXTranslator(nodes.NodeVisitor):
self.body.append('\n\\def\\sphinxLiteralBlockLabel{' + ids + '}\n')
def depart_container(self, node):
+ # type: (nodes.Node) -> None
if node.get('literal_block'):
self.in_container_literal_block -= 1
self.body.append('\\let\\sphinxVerbatimTitle\\empty\n')
self.body.append('\\let\\sphinxLiteralBlockLabel\\empty\n')
def visit_decoration(self, node):
+ # type: (nodes.Node) -> None
pass
def depart_decoration(self, node):
+ # type: (nodes.Node) -> None
pass
# docutils-generated elements that we don't support
def visit_header(self, node):
+ # type: (nodes.Node) -> None
raise nodes.SkipNode
def visit_footer(self, node):
+ # type: (nodes.Node) -> None
raise nodes.SkipNode
def visit_docinfo(self, node):
+ # type: (nodes.Node) -> None
raise nodes.SkipNode
# text handling
def encode(self, text):
+ # type: (unicode) -> unicode
text = text_type(text).translate(tex_escape_map)
if self.literal_whitespace:
# Insert a blank before the newline, to avoid
@@ -2204,41 +2524,50 @@ class LaTeXTranslator(nodes.NodeVisitor):
return text
def encode_uri(self, text):
+ # type: (unicode) -> unicode
# in \href, the tilde is allowed and must be represented literally
return self.encode(text).replace('\\textasciitilde{}', '~')
def visit_Text(self, node):
+ # type: (nodes.Node) -> None
text = self.encode(node.astext())
if not self.no_contractions and not self.in_parsed_literal:
- text = educate_quotes_latex(text,
+ text = educate_quotes_latex(text, # type: ignore
dquotes=("\\sphinxquotedblleft{}",
"\\sphinxquotedblright{}"))
self.body.append(text)
def depart_Text(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_comment(self, node):
+ # type: (nodes.Node) -> None
raise nodes.SkipNode
def visit_meta(self, node):
+ # type: (nodes.Node) -> None
# only valid for HTML
raise nodes.SkipNode
def visit_system_message(self, node):
+ # type: (nodes.Node) -> None
pass
def depart_system_message(self, node):
+ # type: (nodes.Node) -> None
self.body.append('\n')
def visit_math(self, node):
- self.builder.warn('using "math" markup without a Sphinx math extension '
- 'active, please use one of the math extensions '
- 'described at http://sphinx-doc.org/ext/math.html',
- (self.curfilestack[-1], node.line))
+ # type: (nodes.Node) -> None
+ logger.warning('using "math" markup without a Sphinx math extension '
+ 'active, please use one of the math extensions '
+ 'described at http://sphinx-doc.org/ext/math.html',
+ location=(self.curfilestack[-1], node.line))
raise nodes.SkipNode
visit_math_block = visit_math
def unknown_visit(self, node):
+ # type: (nodes.Node) -> None
raise NotImplementedError('Unknown node: ' + node.__class__.__name__)
diff --git a/sphinx/writers/manpage.py b/sphinx/writers/manpage.py
index 73a305bb8..7372230ea 100644
--- a/sphinx/writers/manpage.py
+++ b/sphinx/writers/manpage.py
@@ -9,8 +9,6 @@
:license: BSD, see LICENSE for details.
"""
-import warnings
-
from docutils import nodes
from docutils.writers.manpage import (
MACRO_DEF,
@@ -19,20 +17,29 @@ from docutils.writers.manpage import (
)
from sphinx import addnodes
-from sphinx.deprecation import RemovedInSphinx16Warning
from sphinx.locale import admonitionlabels, _
-from sphinx.util.compat import docutils_version
+from sphinx.util import logging
+import sphinx.util.docutils
from sphinx.util.i18n import format_date
+if False:
+ # For type annotation
+ from typing import Any # NOQA
+ from sphinx.builders import Builder # NOQA
+
+logger = logging.getLogger(__name__)
+
class ManualPageWriter(Writer):
def __init__(self, builder):
+ # type: (Builder) -> None
Writer.__init__(self)
self.builder = builder
self.translator_class = (
self.builder.translator_class or ManualPageTranslator)
def translate(self):
+ # type: () -> None
transform = NestedInlineTransform(self.document)
transform.apply()
visitor = self.translator_class(self.builder, self.document)
@@ -53,10 +60,13 @@ class NestedInlineTransform(object):
<strong>&bar=</strong><emphasis>2</emphasis>
"""
def __init__(self, document):
+ # type: (nodes.document) -> None
self.document = document
def apply(self):
+ # type: () -> None
def is_inline(node):
+ # type: (nodes.Node) -> bool
return isinstance(node, (nodes.literal, nodes.emphasis, nodes.strong))
for node in self.document.traverse(is_inline):
@@ -77,6 +87,7 @@ class ManualPageTranslator(BaseTranslator):
"""
def __init__(self, builder, *args, **kwds):
+ # type: (Builder, Any, Any) -> None
BaseTranslator.__init__(self, *args, **kwds)
self.builder = builder
@@ -105,7 +116,7 @@ class ManualPageTranslator(BaseTranslator):
self._docinfo['manual_group'] = builder.config.project
# In docutils < 0.11 self.append_header() was never called
- if docutils_version < (0, 11):
+ if sphinx.util.docutils.__version_info__ < (0, 11):
self.body.append(MACRO_DEF)
# Overwrite admonition label translations with our own
@@ -114,127 +125,152 @@ class ManualPageTranslator(BaseTranslator):
# overwritten -- added quotes around all .TH arguments
def header(self):
+ # type: () -> unicode
tmpl = (".TH \"%(title_upper)s\" \"%(manual_section)s\""
" \"%(date)s\" \"%(version)s\" \"%(manual_group)s\"\n"
".SH NAME\n"
- "%(title)s \- %(subtitle)s\n")
+ "%(title)s \\- %(subtitle)s\n")
return tmpl % self._docinfo
def visit_start_of_file(self, node):
+ # type: (nodes.Node) -> None
pass
def depart_start_of_file(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_desc(self, node):
+ # type: (nodes.Node) -> None
self.visit_definition_list(node)
def depart_desc(self, node):
+ # type: (nodes.Node) -> None
self.depart_definition_list(node)
def visit_desc_signature(self, node):
+ # type: (nodes.Node) -> None
self.visit_definition_list_item(node)
self.visit_term(node)
def depart_desc_signature(self, node):
+ # type: (nodes.Node) -> None
self.depart_term(node)
def visit_desc_signature_line(self, node):
+ # type: (nodes.Node) -> None
pass
def depart_desc_signature_line(self, node):
+ # type: (nodes.Node) -> None
self.body.append(' ')
def visit_desc_addname(self, node):
+ # type: (nodes.Node) -> None
pass
def depart_desc_addname(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_desc_type(self, node):
+ # type: (nodes.Node) -> None
pass
def depart_desc_type(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_desc_returns(self, node):
+ # type: (nodes.Node) -> None
self.body.append(' -> ')
def depart_desc_returns(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_desc_name(self, node):
+ # type: (nodes.Node) -> None
pass
def depart_desc_name(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_desc_parameterlist(self, node):
+ # type: (nodes.Node) -> None
self.body.append('(')
self.first_param = 1
def depart_desc_parameterlist(self, node):
+ # type: (nodes.Node) -> None
self.body.append(')')
def visit_desc_parameter(self, node):
+ # type: (nodes.Node) -> None
if not self.first_param:
self.body.append(', ')
else:
self.first_param = 0
def depart_desc_parameter(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_desc_optional(self, node):
+ # type: (nodes.Node) -> None
self.body.append('[')
def depart_desc_optional(self, node):
+ # type: (nodes.Node) -> None
self.body.append(']')
def visit_desc_annotation(self, node):
+ # type: (nodes.Node) -> None
pass
def depart_desc_annotation(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_desc_content(self, node):
+ # type: (nodes.Node) -> None
self.visit_definition(node)
def depart_desc_content(self, node):
+ # type: (nodes.Node) -> None
self.depart_definition(node)
def visit_versionmodified(self, node):
+ # type: (nodes.Node) -> None
self.visit_paragraph(node)
def depart_versionmodified(self, node):
+ # type: (nodes.Node) -> None
self.depart_paragraph(node)
# overwritten -- don't make whole of term bold if it includes strong node
def visit_term(self, node):
+ # type: (nodes.Node) -> None
if node.traverse(nodes.strong):
self.body.append('\n')
else:
BaseTranslator.visit_term(self, node)
- def visit_termsep(self, node):
- warnings.warn('sphinx.addnodes.termsep will be removed at Sphinx-1.6. '
- 'This warning is displayed because some Sphinx extension '
- 'uses sphinx.addnodes.termsep. Please report it to '
- 'author of the extension.', RemovedInSphinx16Warning)
- self.body.append(', ')
- raise nodes.SkipNode
-
# overwritten -- we don't want source comments to show up
def visit_comment(self, node):
+ # type: (nodes.Node) -> None
raise nodes.SkipNode
# overwritten -- added ensure_eol()
def visit_footnote(self, node):
+ # type: (nodes.Node) -> None
self.ensure_eol()
BaseTranslator.visit_footnote(self, node)
# overwritten -- handle footnotes rubric
def visit_rubric(self, node):
+ # type: (nodes.Node) -> None
self.ensure_eol()
if len(node.children) == 1:
rubtitle = node.children[0].astext()
@@ -246,15 +282,19 @@ class ManualPageTranslator(BaseTranslator):
self.body.append('.sp\n')
def depart_rubric(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_seealso(self, node):
+ # type: (nodes.Node) -> None
self.visit_admonition(node, 'seealso')
def depart_seealso(self, node):
+ # type: (nodes.Node) -> None
self.depart_admonition(node)
def visit_productionlist(self, node):
+ # type: (nodes.Node) -> None
self.ensure_eol()
names = []
self.in_productionlist += 1
@@ -279,13 +319,16 @@ class ManualPageTranslator(BaseTranslator):
raise nodes.SkipNode
def visit_production(self, node):
+ # type: (nodes.Node) -> None
pass
def depart_production(self, node):
+ # type: (nodes.Node) -> None
pass
# overwritten -- don't emit a warning for images
def visit_image(self, node):
+ # type: (nodes.Node) -> None
if 'alt' in node.attributes:
self.body.append(_('[image: %s]') % node['alt'] + '\n')
self.body.append(_('[image]') + '\n')
@@ -293,6 +336,7 @@ class ManualPageTranslator(BaseTranslator):
# overwritten -- don't visit inner marked up nodes
def visit_reference(self, node):
+ # type: (nodes.Node) -> None
self.body.append(self.defs['reference'][0])
# avoid repeating escaping code... fine since
# visit_Text calls astext() and only works on that afterwards
@@ -314,51 +358,66 @@ class ManualPageTranslator(BaseTranslator):
raise nodes.SkipNode
def visit_number_reference(self, node):
+ # type: (nodes.Node) -> None
text = nodes.Text(node.get('title', '#'))
self.visit_Text(text)
raise nodes.SkipNode
def visit_centered(self, node):
+ # type: (nodes.Node) -> None
self.ensure_eol()
self.body.append('.sp\n.ce\n')
def depart_centered(self, node):
+ # type: (nodes.Node) -> None
self.body.append('\n.ce 0\n')
def visit_compact_paragraph(self, node):
+ # type: (nodes.Node) -> None
pass
def depart_compact_paragraph(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_highlightlang(self, node):
+ # type: (nodes.Node) -> None
pass
def depart_highlightlang(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_download_reference(self, node):
+ # type: (nodes.Node) -> None
pass
def depart_download_reference(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_toctree(self, node):
+ # type: (nodes.Node) -> None
raise nodes.SkipNode
def visit_index(self, node):
+ # type: (nodes.Node) -> None
raise nodes.SkipNode
def visit_tabular_col_spec(self, node):
+ # type: (nodes.Node) -> None
raise nodes.SkipNode
def visit_glossary(self, node):
+ # type: (nodes.Node) -> None
pass
def depart_glossary(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_acks(self, node):
+ # type: (nodes.Node) -> None
self.ensure_eol()
self.body.append(', '.join(n.astext()
for n in node.children[0].children) + '.')
@@ -366,43 +425,56 @@ class ManualPageTranslator(BaseTranslator):
raise nodes.SkipNode
def visit_hlist(self, node):
+ # type: (nodes.Node) -> None
self.visit_bullet_list(node)
def depart_hlist(self, node):
+ # type: (nodes.Node) -> None
self.depart_bullet_list(node)
def visit_hlistcol(self, node):
+ # type: (nodes.Node) -> None
pass
def depart_hlistcol(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_literal_emphasis(self, node):
+ # type: (nodes.Node) -> None
return self.visit_emphasis(node)
def depart_literal_emphasis(self, node):
+ # type: (nodes.Node) -> None
return self.depart_emphasis(node)
def visit_literal_strong(self, node):
+ # type: (nodes.Node) -> None
return self.visit_strong(node)
def depart_literal_strong(self, node):
+ # type: (nodes.Node) -> None
return self.depart_strong(node)
def visit_abbreviation(self, node):
+ # type: (nodes.Node) -> None
pass
def depart_abbreviation(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_manpage(self, node):
+ # type: (nodes.Node) -> None
return self.visit_strong(node)
def depart_manpage(self, node):
+ # type: (nodes.Node) -> None
return self.depart_strong(node)
# overwritten: handle section titles better than in 0.6 release
def visit_title(self, node):
+ # type: (nodes.Node) -> None
if isinstance(node.parent, addnodes.seealso):
self.body.append('.IP "')
return
@@ -417,32 +489,39 @@ class ManualPageTranslator(BaseTranslator):
return BaseTranslator.visit_title(self, node)
def depart_title(self, node):
+ # type: (nodes.Node) -> None
if isinstance(node.parent, addnodes.seealso):
self.body.append('"\n')
return
return BaseTranslator.depart_title(self, node)
def visit_raw(self, node):
+ # type: (nodes.Node) -> None
if 'manpage' in node.get('format', '').split():
self.body.append(node.astext())
raise nodes.SkipNode
def visit_meta(self, node):
+ # type: (nodes.Node) -> None
raise nodes.SkipNode
def visit_inline(self, node):
+ # type: (nodes.Node) -> None
pass
def depart_inline(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_math(self, node):
- self.builder.warn('using "math" markup without a Sphinx math extension '
- 'active, please use one of the math extensions '
- 'described at http://sphinx-doc.org/ext/math.html')
+ # type: (nodes.Node) -> None
+ logger.warning('using "math" markup without a Sphinx math extension '
+ 'active, please use one of the math extensions '
+ 'described at http://sphinx-doc.org/ext/math.html')
raise nodes.SkipNode
visit_math_block = visit_math
def unknown_visit(self, node):
+ # type: (nodes.Node) -> None
raise NotImplementedError('Unknown node: ' + node.__class__.__name__)
diff --git a/sphinx/writers/texinfo.py b/sphinx/writers/texinfo.py
index be3d5ebdf..a79f34e67 100644
--- a/sphinx/writers/texinfo.py
+++ b/sphinx/writers/texinfo.py
@@ -12,18 +12,26 @@
import re
import textwrap
from os import path
-import warnings
from six import itervalues
from six.moves import range
+
from docutils import nodes, writers
from sphinx import addnodes, __display_version__
-from sphinx.deprecation import RemovedInSphinx16Warning
+from sphinx.errors import ExtensionError
from sphinx.locale import admonitionlabels, _
+from sphinx.util import logging
from sphinx.util.i18n import format_date
from sphinx.writers.latex import collected_footnote
+if False:
+ # For type annotation
+ from typing import Any, Callable, Dict, Iterator, List, Pattern, Set, Tuple, Union # NOQA
+ from sphinx.builders.texinfo import TexinfoBuilder # NOQA
+
+logger = logging.getLogger(__name__)
+
COPYING = """\
@quotation
@@ -81,6 +89,7 @@ TEMPLATE = """\
def find_subsections(section):
+ # type: (nodes.Node) -> List[nodes.Node]
"""Return a list of subsections for the given ``section``."""
result = []
for child in section.children:
@@ -92,6 +101,7 @@ def find_subsections(section):
def smart_capwords(s, sep=None):
+ # type: (unicode, unicode) -> unicode
"""Like string.capwords() but does not capitalize words that already
contain a capital letter."""
words = s.split(sep)
@@ -111,21 +121,23 @@ class TexinfoWriter(writers.Writer):
('Dir entry', ['--texinfo-dir-entry'], {'default': ''}),
('Description', ['--texinfo-dir-description'], {'default': ''}),
('Category', ['--texinfo-dir-category'], {'default':
- 'Miscellaneous'})))
+ 'Miscellaneous'}))) # type: Tuple[unicode, Any, Tuple[Tuple[unicode, List[unicode], Dict[unicode, unicode]], ...]] # NOQA
- settings_defaults = {}
+ settings_defaults = {} # type: Dict
- output = None
+ output = None # type: unicode
visitor_attributes = ('output', 'fragment')
def __init__(self, builder):
+ # type: (TexinfoBuilder) -> None
writers.Writer.__init__(self)
self.builder = builder
self.translator_class = (
self.builder.translator_class or TexinfoTranslator)
def translate(self):
+ # type: () -> None
self.visitor = visitor = self.translator_class(
self.document, self.builder)
self.document.walkabout(visitor)
@@ -154,44 +166,53 @@ class TexinfoTranslator(nodes.NodeVisitor):
}
def __init__(self, document, builder):
+ # type: (nodes.Node, TexinfoBuilder) -> None
nodes.NodeVisitor.__init__(self, document)
self.builder = builder
self.init_settings()
- self.written_ids = set() # node names and anchors in output
+ self.written_ids = set() # type: Set[unicode]
+ # node names and anchors in output
# node names and anchors that should be in output
- self.referenced_ids = set()
- self.indices = [] # (node name, content)
- self.short_ids = {} # anchors --> short ids
- self.node_names = {} # node name --> node's name to display
- self.node_menus = {} # node name --> node's menu entries
- self.rellinks = {} # node name --> (next, previous, up)
+ self.referenced_ids = set() # type: Set[unicode]
+ self.indices = [] # type: List[Tuple[unicode, unicode]]
+ # (node name, content)
+ self.short_ids = {} # type: Dict[unicode, unicode]
+ # anchors --> short ids
+ self.node_names = {} # type: Dict[unicode, unicode]
+ # node name --> node's name to display
+ self.node_menus = {} # type: Dict[unicode, List[unicode]]
+ # node name --> node's menu entries
+ self.rellinks = {} # type: Dict[unicode, List[unicode]]
+ # node name --> (next, previous, up)
self.collect_indices()
self.collect_node_names()
self.collect_node_menus()
self.collect_rellinks()
- self.body = []
- self.context = []
- self.previous_section = None
+ self.body = [] # type: List[unicode]
+ self.context = [] # type: List[unicode]
+ self.previous_section = None # type: nodes.section
self.section_level = 0
self.seen_title = False
- self.next_section_ids = set()
+ self.next_section_ids = set() # type: Set[unicode]
self.escape_newlines = 0
self.escape_hyphens = 0
- self.curfilestack = []
- self.footnotestack = []
+ self.curfilestack = [] # type: List[unicode]
+ self.footnotestack = [] # type: List[Dict[unicode, List[Union[collected_footnote, bool]]]] # NOQA
self.in_footnote = 0
- self.handled_abbrs = set()
+ self.handled_abbrs = set() # type: Set[unicode]
+ self.colwidths = None # type: List[int]
def finish(self):
+ # type: () -> None
if self.previous_section is None:
self.add_menu('Top')
for index in self.indices:
name, content = index
pointers = tuple([name] + self.rellinks[name])
- self.body.append('\n@node %s,%s,%s,%s\n' % pointers)
+ self.body.append('\n@node %s,%s,%s,%s\n' % pointers) # type: ignore
self.body.append('@unnumbered %s\n\n%s\n' % (name, content))
while self.referenced_ids:
@@ -207,6 +228,7 @@ class TexinfoTranslator(nodes.NodeVisitor):
# -- Helper routines
def init_settings(self):
+ # type: () -> None
settings = self.settings = self.document.settings
elements = self.elements = self.default_elements.copy()
elements.update({
@@ -223,17 +245,18 @@ class TexinfoTranslator(nodes.NodeVisitor):
language=self.builder.config.language))
})
# title
- title = elements['title']
+ title = None # type: unicode
+ title = elements['title'] # type: ignore
if not title:
title = self.document.next_node(nodes.title)
- title = (title and title.astext()) or '<untitled>'
+ title = (title and title.astext()) or '<untitled>' # type: ignore
elements['title'] = self.escape_id(title) or '<untitled>'
# filename
if not elements['filename']:
elements['filename'] = self.document.get('source') or 'untitled'
- if elements['filename'][-4:] in ('.txt', '.rst'):
- elements['filename'] = elements['filename'][:-4]
- elements['filename'] += '.info'
+ if elements['filename'][-4:] in ('.txt', '.rst'): # type: ignore
+ elements['filename'] = elements['filename'][:-4] # type: ignore
+ elements['filename'] += '.info' # type: ignore
# direntry
if settings.texinfo_dir_entry:
entry = self.format_menu_entry(
@@ -250,11 +273,13 @@ class TexinfoTranslator(nodes.NodeVisitor):
elements.update(settings.texinfo_elements)
def collect_node_names(self):
+ # type: () -> None
"""Generates a unique id for each section.
Assigns the attribute ``node_name`` to each section."""
def add_node_name(name):
+ # type: (unicode) -> unicode
node_id = self.escape_id(name)
nth, suffix = 1, ''
while node_id + suffix in self.written_ids or \
@@ -280,6 +305,7 @@ class TexinfoTranslator(nodes.NodeVisitor):
section['node_name'] = add_node_name(name)
def collect_node_menus(self):
+ # type: () -> None
"""Collect the menu entries for each "node" section."""
node_menus = self.node_menus
for node in ([self.document] +
@@ -300,10 +326,11 @@ class TexinfoTranslator(nodes.NodeVisitor):
top['node_name'] = 'Top'
# handle the indices
for name, content in self.indices:
- node_menus[name] = ()
+ node_menus[name] = []
node_menus['Top'].append(name)
def collect_rellinks(self):
+ # type: () -> None
"""Collect the relative links (next, previous, up) for each "node"."""
rellinks = self.rellinks
node_menus = self.node_menus
@@ -337,6 +364,7 @@ class TexinfoTranslator(nodes.NodeVisitor):
# characters.
def escape(self, s):
+ # type: (unicode) -> unicode
"""Return a string with Texinfo command characters escaped."""
s = s.replace('@', '@@')
s = s.replace('{', '@{')
@@ -347,6 +375,7 @@ class TexinfoTranslator(nodes.NodeVisitor):
return s
def escape_arg(self, s):
+ # type: (unicode) -> unicode
"""Return an escaped string suitable for use as an argument
to a Texinfo command."""
s = self.escape(s)
@@ -357,6 +386,7 @@ class TexinfoTranslator(nodes.NodeVisitor):
return s
def escape_id(self, s):
+ # type: (unicode) -> unicode
"""Return an escaped string suitable for node names and anchors."""
bad_chars = ',:.()'
for bc in bad_chars:
@@ -365,6 +395,7 @@ class TexinfoTranslator(nodes.NodeVisitor):
return self.escape(s)
def escape_menu(self, s):
+ # type: (unicode) -> unicode
"""Return an escaped string suitable for menu entries."""
s = self.escape_arg(s)
s = s.replace(':', ';')
@@ -372,11 +403,13 @@ class TexinfoTranslator(nodes.NodeVisitor):
return s
def ensure_eol(self):
+ # type: () -> None
"""Ensure the last line in body is terminated by new line."""
if self.body and self.body[-1][-1:] != '\n':
self.body.append('\n')
def format_menu_entry(self, name, node_name, desc):
+ # type: (unicode, unicode, unicode) -> unicode
if name == node_name:
s = '* %s:: ' % (name,)
else:
@@ -387,6 +420,7 @@ class TexinfoTranslator(nodes.NodeVisitor):
return s + wdesc.strip() + '\n'
def add_menu_entries(self, entries, reg=re.compile(r'\s+---?\s+')):
+ # type: (List[unicode], Pattern) -> None
for entry in entries:
name = self.node_names[entry]
# special formatting for entries that are divided by an em-dash
@@ -404,6 +438,7 @@ class TexinfoTranslator(nodes.NodeVisitor):
self.body.append(self.format_menu_entry(name, entry, desc))
def add_menu(self, node_name):
+ # type: (unicode) -> None
entries = self.node_menus[node_name]
if not entries:
return
@@ -416,6 +451,7 @@ class TexinfoTranslator(nodes.NodeVisitor):
return
def _add_detailed_menu(name):
+ # type: (unicode) -> None
entries = self.node_menus[name]
if not entries:
return
@@ -432,7 +468,8 @@ class TexinfoTranslator(nodes.NodeVisitor):
'@end menu\n')
def tex_image_length(self, width_str):
- match = re.match('(\d*\.?\d*)\s*(\S*)', width_str)
+ # type: (unicode) -> unicode
+ match = re.match(r'(\d*\.?\d*)\s*(\S*)', width_str)
if not match:
# fallback
return width_str
@@ -447,15 +484,17 @@ class TexinfoTranslator(nodes.NodeVisitor):
return res
def collect_indices(self):
+ # type: () -> None
def generate(content, collapsed):
- ret = ['\n@menu\n']
+ # type: (List[Tuple[unicode, List[List[Union[unicode, int]]]]], bool) -> unicode
+ ret = ['\n@menu\n'] # type: List[unicode]
for letter, entries in content:
for entry in entries:
if not entry[3]:
continue
- name = self.escape_menu(entry[0])
+ name = self.escape_menu(entry[0]) # type: ignore
sid = self.get_short_id('%s:%s' % (entry[2], entry[3]))
- desc = self.escape_arg(entry[6])
+ desc = self.escape_arg(entry[6]) # type: ignore
me = self.format_menu_entry(name, sid, desc)
ret.append(me)
ret.append('@end menu\n')
@@ -485,7 +524,9 @@ class TexinfoTranslator(nodes.NodeVisitor):
# TODO: move this to sphinx.util
def collect_footnotes(self, node):
+ # type: (nodes.Node) -> Dict[unicode, List[Union[collected_footnote, bool]]]
def footnotes_under(n):
+ # type: (nodes.Node) -> Iterator[nodes.footnote]
if isinstance(n, nodes.footnote):
yield n
else:
@@ -494,7 +535,7 @@ class TexinfoTranslator(nodes.NodeVisitor):
continue
for k in footnotes_under(c):
yield k
- fnotes = {}
+ fnotes = {} # type: Dict[unicode, List[Union[collected_footnote, bool]]]
for fn in footnotes_under(node):
num = fn.children[0].astext().strip()
fnotes[num] = [collected_footnote(*fn.children), False]
@@ -503,6 +544,7 @@ class TexinfoTranslator(nodes.NodeVisitor):
# -- xref handling
def get_short_id(self, id):
+ # type: (unicode) -> unicode
"""Return a shorter 'id' associated with ``id``."""
# Shorter ids improve paragraph filling in places
# that the id is hidden by Emacs.
@@ -514,6 +556,7 @@ class TexinfoTranslator(nodes.NodeVisitor):
return sid
def add_anchor(self, id, node):
+ # type: (unicode, nodes.Node) -> None
if id.startswith('index-'):
return
id = self.curfilestack[-1] + ':' + id
@@ -525,6 +568,7 @@ class TexinfoTranslator(nodes.NodeVisitor):
self.written_ids.add(id)
def add_xref(self, id, name, node):
+ # type: (unicode, unicode, nodes.Node) -> None
name = self.escape_menu(name)
sid = self.get_short_id(id)
self.body.append('@ref{%s,,%s}' % (sid, name))
@@ -534,16 +578,19 @@ class TexinfoTranslator(nodes.NodeVisitor):
# -- Visiting
def visit_document(self, node):
+ # type: (nodes.Node) -> None
self.footnotestack.append(self.collect_footnotes(node))
self.curfilestack.append(node.get('docname', ''))
if 'docname' in node:
self.add_anchor(':doc', node)
def depart_document(self, node):
+ # type: (nodes.Node) -> None
self.footnotestack.pop()
self.curfilestack.pop()
def visit_Text(self, node):
+ # type: (nodes.Node) -> None
s = self.escape(node.astext())
if self.escape_newlines:
s = s.replace('\n', ' ')
@@ -553,9 +600,11 @@ class TexinfoTranslator(nodes.NodeVisitor):
self.body.append(s)
def depart_Text(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_section(self, node):
+ # type: (nodes.section) -> None
self.next_section_ids.update(node.get('ids', []))
if not self.seen_title:
return
@@ -566,7 +615,7 @@ class TexinfoTranslator(nodes.NodeVisitor):
node_name = node['node_name']
pointers = tuple([node_name] + self.rellinks[node_name])
- self.body.append('\n@node %s,%s,%s,%s\n' % pointers)
+ self.body.append('\n@node %s,%s,%s,%s\n' % pointers) # type: ignore
for id in self.next_section_ids:
self.add_anchor(id, node)
@@ -575,6 +624,7 @@ class TexinfoTranslator(nodes.NodeVisitor):
self.section_level += 1
def depart_section(self, node):
+ # type: (nodes.Node) -> None
self.section_level -= 1
headings = (
@@ -583,17 +633,18 @@ class TexinfoTranslator(nodes.NodeVisitor):
'@section',
'@subsection',
'@subsubsection',
- )
+ ) # type: Tuple[unicode, ...]
rubrics = (
'@heading',
'@subheading',
'@subsubheading',
- )
+ ) # type: Tuple[unicode, ...]
def visit_title(self, node):
+ # type: (nodes.Node) -> None
if not self.seen_title:
- self.seen_title = 1
+ self.seen_title = True
raise nodes.SkipNode
parent = node.parent
if isinstance(parent, nodes.table):
@@ -601,9 +652,9 @@ class TexinfoTranslator(nodes.NodeVisitor):
if isinstance(parent, (nodes.Admonition, nodes.sidebar, nodes.topic)):
raise nodes.SkipNode
elif not isinstance(parent, nodes.section):
- self.builder.warn(
- 'encountered title node not in section, topic, table, '
- 'admonition or sidebar', (self.curfilestack[-1], node.line))
+ logger.warning('encountered title node not in section, topic, table, '
+ 'admonition or sidebar',
+ location=(self.curfilestack[-1], node.line))
self.visit_rubric(node)
else:
try:
@@ -613,9 +664,11 @@ class TexinfoTranslator(nodes.NodeVisitor):
self.body.append('\n%s ' % heading)
def depart_title(self, node):
+ # type: (nodes.Node) -> None
self.body.append('\n\n')
def visit_rubric(self, node):
+ # type: (nodes.Node) -> None
if len(node.children) == 1 and node.children[0].astext() in \
('Footnotes', _('Footnotes')):
raise nodes.SkipNode
@@ -626,17 +679,21 @@ class TexinfoTranslator(nodes.NodeVisitor):
self.body.append('\n%s ' % rubric)
def depart_rubric(self, node):
+ # type: (nodes.Node) -> None
self.body.append('\n\n')
def visit_subtitle(self, node):
+ # type: (nodes.Node) -> None
self.body.append('\n\n@noindent\n')
def depart_subtitle(self, node):
+ # type: (nodes.Node) -> None
self.body.append('\n\n')
# -- References
def visit_target(self, node):
+ # type: (nodes.Node) -> None
# postpone the labels until after the sectioning command
parindex = node.parent.index(node)
try:
@@ -661,9 +718,11 @@ class TexinfoTranslator(nodes.NodeVisitor):
self.add_anchor(id, node)
def depart_target(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_reference(self, node):
+ # type: (nodes.Node) -> None
# an xref's target is displayed in Info so we ignore a few
# cases for the sake of appearance
if isinstance(node.parent, (nodes.title, addnodes.desc_type)):
@@ -727,14 +786,17 @@ class TexinfoTranslator(nodes.NodeVisitor):
raise nodes.SkipNode
def depart_reference(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_number_reference(self, node):
+ # type: (nodes.Node) -> None
text = nodes.Text(node.get('title', '#'))
self.visit_Text(text)
raise nodes.SkipNode
def visit_title_reference(self, node):
+ # type: (nodes.Node) -> None
text = node.astext()
self.body.append('@cite{%s}' % self.escape_arg(text))
raise nodes.SkipNode
@@ -742,22 +804,28 @@ class TexinfoTranslator(nodes.NodeVisitor):
# -- Blocks
def visit_paragraph(self, node):
+ # type: (nodes.Node) -> None
self.body.append('\n')
def depart_paragraph(self, node):
+ # type: (nodes.Node) -> None
self.body.append('\n')
def visit_block_quote(self, node):
+ # type: (nodes.Node) -> None
self.body.append('\n@quotation\n')
def depart_block_quote(self, node):
+ # type: (nodes.Node) -> None
self.ensure_eol()
self.body.append('@end quotation\n')
def visit_literal_block(self, node):
+ # type: (nodes.Node) -> None
self.body.append('\n@example\n')
def depart_literal_block(self, node):
+ # type: (nodes.Node) -> None
self.ensure_eol()
self.body.append('@end example\n')
@@ -765,101 +833,126 @@ class TexinfoTranslator(nodes.NodeVisitor):
depart_doctest_block = depart_literal_block
def visit_line_block(self, node):
+ # type: (nodes.Node) -> None
if not isinstance(node.parent, nodes.line_block):
self.body.append('\n\n')
self.body.append('@display\n')
def depart_line_block(self, node):
+ # type: (nodes.Node) -> None
self.body.append('@end display\n')
if not isinstance(node.parent, nodes.line_block):
self.body.append('\n\n')
def visit_line(self, node):
+ # type: (nodes.Node) -> None
self.escape_newlines += 1
def depart_line(self, node):
+ # type: (nodes.Node) -> None
self.body.append('@w{ }\n')
self.escape_newlines -= 1
# -- Inline
def visit_strong(self, node):
+ # type: (nodes.Node) -> None
self.body.append('@strong{')
def depart_strong(self, node):
+ # type: (nodes.Node) -> None
self.body.append('}')
def visit_emphasis(self, node):
+ # type: (nodes.Node) -> None
self.body.append('@emph{')
def depart_emphasis(self, node):
+ # type: (nodes.Node) -> None
self.body.append('}')
def visit_literal(self, node):
+ # type: (nodes.Node) -> None
self.body.append('@code{')
def depart_literal(self, node):
+ # type: (nodes.Node) -> None
self.body.append('}')
def visit_superscript(self, node):
+ # type: (nodes.Node) -> None
self.body.append('@w{^')
def depart_superscript(self, node):
+ # type: (nodes.Node) -> None
self.body.append('}')
def visit_subscript(self, node):
+ # type: (nodes.Node) -> None
self.body.append('@w{[')
def depart_subscript(self, node):
+ # type: (nodes.Node) -> None
self.body.append(']}')
# -- Footnotes
def visit_footnote(self, node):
+ # type: (nodes.Node) -> None
raise nodes.SkipNode
def visit_collected_footnote(self, node):
+ # type: (nodes.Node) -> None
self.in_footnote += 1
self.body.append('@footnote{')
def depart_collected_footnote(self, node):
+ # type: (nodes.Node) -> None
self.body.append('}')
self.in_footnote -= 1
def visit_footnote_reference(self, node):
+ # type: (nodes.Node) -> None
num = node.astext().strip()
try:
footnode, used = self.footnotestack[-1][num]
except (KeyError, IndexError):
raise nodes.SkipNode
# footnotes are repeated for each reference
- footnode.walkabout(self)
+ footnode.walkabout(self) # type: ignore
raise nodes.SkipChildren
def visit_citation(self, node):
+ # type: (nodes.Node) -> None
for id in node.get('ids'):
self.add_anchor(id, node)
def depart_citation(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_citation_reference(self, node):
+ # type: (nodes.Node) -> None
self.body.append('@w{[')
def depart_citation_reference(self, node):
+ # type: (nodes.Node) -> None
self.body.append(']}')
# -- Lists
def visit_bullet_list(self, node):
+ # type: (nodes.Node) -> None
bullet = node.get('bullet', '*')
self.body.append('\n\n@itemize %s\n' % bullet)
def depart_bullet_list(self, node):
+ # type: (nodes.Node) -> None
self.ensure_eol()
self.body.append('@end itemize\n')
def visit_enumerated_list(self, node):
+ # type: (nodes.Node) -> None
# doesn't support Roman numerals
enum = node.get('enumtype', 'arabic')
starters = {'arabic': '',
@@ -869,78 +962,100 @@ class TexinfoTranslator(nodes.NodeVisitor):
self.body.append('\n\n@enumerate %s\n' % start)
def depart_enumerated_list(self, node):
+ # type: (nodes.Node) -> None
self.ensure_eol()
self.body.append('@end enumerate\n')
def visit_list_item(self, node):
+ # type: (nodes.Node) -> None
self.body.append('\n@item ')
def depart_list_item(self, node):
+ # type: (nodes.Node) -> None
pass
# -- Option List
def visit_option_list(self, node):
+ # type: (nodes.Node) -> None
self.body.append('\n\n@table @option\n')
def depart_option_list(self, node):
+ # type: (nodes.Node) -> None
self.ensure_eol()
self.body.append('@end table\n')
def visit_option_list_item(self, node):
+ # type: (nodes.Node) -> None
pass
def depart_option_list_item(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_option_group(self, node):
+ # type: (nodes.Node) -> None
self.at_item_x = '@item'
def depart_option_group(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_option(self, node):
+ # type: (nodes.Node) -> None
self.escape_hyphens += 1
self.body.append('\n%s ' % self.at_item_x)
self.at_item_x = '@itemx'
def depart_option(self, node):
+ # type: (nodes.Node) -> None
self.escape_hyphens -= 1
def visit_option_string(self, node):
+ # type: (nodes.Node) -> None
pass
def depart_option_string(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_option_argument(self, node):
+ # type: (nodes.Node) -> None
self.body.append(node.get('delimiter', ' '))
def depart_option_argument(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_description(self, node):
+ # type: (nodes.Node) -> None
self.body.append('\n')
def depart_description(self, node):
+ # type: (nodes.Node) -> None
pass
# -- Definitions
def visit_definition_list(self, node):
+ # type: (nodes.Node) -> None
self.body.append('\n\n@table @asis\n')
def depart_definition_list(self, node):
+ # type: (nodes.Node) -> None
self.ensure_eol()
self.body.append('@end table\n')
def visit_definition_list_item(self, node):
+ # type: (nodes.Node) -> None
self.at_item_x = '@item'
def depart_definition_list_item(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_term(self, node):
+ # type: (nodes.Node) -> None
for id in node.get('ids'):
self.add_anchor(id, node)
# anchors and indexes need to go in front
@@ -952,45 +1067,45 @@ class TexinfoTranslator(nodes.NodeVisitor):
self.at_item_x = '@itemx'
def depart_term(self, node):
- pass
-
- def visit_termsep(self, node):
- warnings.warn('sphinx.addnodes.termsep will be removed at Sphinx-1.6. '
- 'This warning is displayed because some Sphinx extension '
- 'uses sphinx.addnodes.termsep. Please report it to '
- 'author of the extension.', RemovedInSphinx16Warning)
- self.body.append('\n%s ' % self.at_item_x)
-
- def depart_termsep(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_classifier(self, node):
+ # type: (nodes.Node) -> None
self.body.append(' : ')
def depart_classifier(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_definition(self, node):
+ # type: (nodes.Node) -> None
self.body.append('\n')
def depart_definition(self, node):
+ # type: (nodes.Node) -> None
pass
# -- Tables
def visit_table(self, node):
+ # type: (nodes.Node) -> None
self.entry_sep = '@item'
def depart_table(self, node):
+ # type: (nodes.Node) -> None
self.body.append('\n@end multitable\n\n')
def visit_tabular_col_spec(self, node):
+ # type: (nodes.Node) -> None
pass
def depart_tabular_col_spec(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_colspec(self, node):
+ # type: (nodes.Node) -> None
self.colwidths.append(node['colwidth'])
if len(self.colwidths) != self.n_cols:
return
@@ -999,82 +1114,105 @@ class TexinfoTranslator(nodes.NodeVisitor):
self.body.append('{%s} ' % ('x' * (n + 2)))
def depart_colspec(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_tgroup(self, node):
+ # type: (nodes.Node) -> None
self.colwidths = []
self.n_cols = node['cols']
def depart_tgroup(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_thead(self, node):
+ # type: (nodes.Node) -> None
self.entry_sep = '@headitem'
def depart_thead(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_tbody(self, node):
+ # type: (nodes.Node) -> None
pass
def depart_tbody(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_row(self, node):
+ # type: (nodes.Node) -> None
pass
def depart_row(self, node):
+ # type: (nodes.Node) -> None
self.entry_sep = '@item'
def visit_entry(self, node):
+ # type: (nodes.Node) -> None
self.body.append('\n%s\n' % self.entry_sep)
self.entry_sep = '@tab'
def depart_entry(self, node):
+ # type: (nodes.Node) -> None
for i in range(node.get('morecols', 0)):
self.body.append('\n@tab\n')
# -- Field Lists
def visit_field_list(self, node):
+ # type: (nodes.Node) -> None
pass
def depart_field_list(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_field(self, node):
+ # type: (nodes.Node) -> None
self.body.append('\n')
def depart_field(self, node):
+ # type: (nodes.Node) -> None
self.body.append('\n')
def visit_field_name(self, node):
+ # type: (nodes.Node) -> None
self.ensure_eol()
self.body.append('@*')
def depart_field_name(self, node):
+ # type: (nodes.Node) -> None
self.body.append(': ')
def visit_field_body(self, node):
+ # type: (nodes.Node) -> None
pass
def depart_field_body(self, node):
+ # type: (nodes.Node) -> None
pass
# -- Admonitions
def visit_admonition(self, node, name=''):
+ # type: (nodes.Node, unicode) -> None
if not name:
name = self.escape(node[0].astext())
self.body.append(u'\n@cartouche\n@quotation %s ' % name)
def depart_admonition(self, node):
+ # type: (nodes.Node) -> None
self.ensure_eol()
self.body.append('@end quotation\n'
'@end cartouche\n')
def _make_visit_admonition(name):
+ # type: (unicode) -> Callable[[TexinfoTranslator, nodes.Node], None]
def visit(self, node):
+ # type: (nodes.Node) -> None
self.visit_admonition(node, admonitionlabels[name])
return visit
@@ -1100,32 +1238,41 @@ class TexinfoTranslator(nodes.NodeVisitor):
# -- Misc
def visit_docinfo(self, node):
+ # type: (nodes.Node) -> None
raise nodes.SkipNode
def visit_generated(self, node):
+ # type: (nodes.Node) -> None
raise nodes.SkipNode
def visit_header(self, node):
+ # type: (nodes.Node) -> None
raise nodes.SkipNode
def visit_footer(self, node):
+ # type: (nodes.Node) -> None
raise nodes.SkipNode
def visit_container(self, node):
+ # type: (nodes.Node) -> None
if node.get('literal_block'):
self.body.append('\n\n@float LiteralBlock\n')
def depart_container(self, node):
+ # type: (nodes.Node) -> None
if node.get('literal_block'):
self.body.append('\n@end float\n\n')
def visit_decoration(self, node):
+ # type: (nodes.Node) -> None
pass
def depart_decoration(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_topic(self, node):
+ # type: (nodes.Node) -> None
# ignore TOC's since we have to have a "menu" anyway
if 'contents' in node.get('classes', []):
raise nodes.SkipNode
@@ -1134,48 +1281,59 @@ class TexinfoTranslator(nodes.NodeVisitor):
self.body.append('%s\n' % self.escape(title.astext()))
def depart_topic(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_transition(self, node):
+ # type: (nodes.Node) -> None
self.body.append('\n\n%s\n\n' % ('_' * 66))
def depart_transition(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_attribution(self, node):
+ # type: (nodes.Node) -> None
self.body.append('\n\n@center --- ')
def depart_attribution(self, node):
+ # type: (nodes.Node) -> None
self.body.append('\n\n')
def visit_raw(self, node):
+ # type: (nodes.Node) -> None
format = node.get('format', '').split()
if 'texinfo' in format or 'texi' in format:
self.body.append(node.astext())
raise nodes.SkipNode
def visit_figure(self, node):
+ # type: (nodes.Node) -> None
self.body.append('\n\n@float Figure\n')
def depart_figure(self, node):
+ # type: (nodes.Node) -> None
self.body.append('\n@end float\n\n')
def visit_caption(self, node):
+ # type: (nodes.Node) -> None
if (isinstance(node.parent, nodes.figure) or
(isinstance(node.parent, nodes.container) and
node.parent.get('literal_block'))):
self.body.append('\n@caption{')
else:
- self.builder.warn('caption not inside a figure.',
- (self.curfilestack[-1], node.line))
+ logger.warning('caption not inside a figure.',
+ location=(self.curfilestack[-1], node.line))
def depart_caption(self, node):
+ # type: (nodes.Node) -> None
if (isinstance(node.parent, nodes.figure) or
(isinstance(node.parent, nodes.container) and
node.parent.get('literal_block'))):
self.body.append('}\n')
def visit_image(self, node):
+ # type: (nodes.Node) -> None
if node['uri'] in self.builder.images:
uri = self.builder.images[node['uri']]
else:
@@ -1196,73 +1354,93 @@ class TexinfoTranslator(nodes.NodeVisitor):
(name, width, height, alt, ext[1:]))
def depart_image(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_compound(self, node):
+ # type: (nodes.Node) -> None
pass
def depart_compound(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_sidebar(self, node):
+ # type: (nodes.Node) -> None
self.visit_topic(node)
def depart_sidebar(self, node):
+ # type: (nodes.Node) -> None
self.depart_topic(node)
def visit_label(self, node):
+ # type: (nodes.Node) -> None
self.body.append('@w{(')
def depart_label(self, node):
+ # type: (nodes.Node) -> None
self.body.append(')} ')
def visit_legend(self, node):
+ # type: (nodes.Node) -> None
pass
def depart_legend(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_substitution_reference(self, node):
+ # type: (nodes.Node) -> None
pass
def depart_substitution_reference(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_substitution_definition(self, node):
+ # type: (nodes.Node) -> None
raise nodes.SkipNode
def visit_system_message(self, node):
+ # type: (nodes.Node) -> None
self.body.append('\n@verbatim\n'
'<SYSTEM MESSAGE: %s>\n'
'@end verbatim\n' % node.astext())
raise nodes.SkipNode
def visit_comment(self, node):
+ # type: (nodes.Node) -> None
self.body.append('\n')
for line in node.astext().splitlines():
self.body.append('@c %s\n' % line)
raise nodes.SkipNode
def visit_problematic(self, node):
+ # type: (nodes.Node) -> None
self.body.append('>>')
def depart_problematic(self, node):
+ # type: (nodes.Node) -> None
self.body.append('<<')
def unimplemented_visit(self, node):
- self.builder.warn("unimplemented node type: %r" % node,
- (self.curfilestack[-1], node.line))
+ # type: (nodes.Node) -> None
+ logger.warning("unimplemented node type: %r", node,
+ location=(self.curfilestack[-1], node.line))
def unknown_visit(self, node):
- self.builder.warn("unknown node type: %r" % node,
- (self.curfilestack[-1], node.line))
+ # type: (nodes.Node) -> None
+ logger.warning("unknown node type: %r", node,
+ location=(self.curfilestack[-1], node.line))
def unknown_departure(self, node):
+ # type: (nodes.Node) -> None
pass
# -- Sphinx specific
def visit_productionlist(self, node):
+ # type: (nodes.Node) -> None
self.visit_literal_block(None)
names = []
for production in node:
@@ -1281,24 +1459,31 @@ class TexinfoTranslator(nodes.NodeVisitor):
raise nodes.SkipNode
def visit_production(self, node):
+ # type: (nodes.Node) -> None
pass
def depart_production(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_literal_emphasis(self, node):
+ # type: (nodes.Node) -> None
self.body.append('@code{')
def depart_literal_emphasis(self, node):
+ # type: (nodes.Node) -> None
self.body.append('}')
def visit_literal_strong(self, node):
+ # type: (nodes.Node) -> None
self.body.append('@code{')
def depart_literal_strong(self, node):
+ # type: (nodes.Node) -> None
self.body.append('}')
def visit_index(self, node):
+ # type: (nodes.Node) -> None
# terminate the line but don't prevent paragraph breaks
if isinstance(node.parent, nodes.paragraph):
self.ensure_eol()
@@ -1310,43 +1495,54 @@ class TexinfoTranslator(nodes.NodeVisitor):
self.body.append('@geindex %s\n' % text)
def visit_versionmodified(self, node):
+ # type: (nodes.Node) -> None
self.body.append('\n')
def depart_versionmodified(self, node):
+ # type: (nodes.Node) -> None
self.body.append('\n')
def visit_start_of_file(self, node):
+ # type: (nodes.Node) -> None
# add a document target
self.next_section_ids.add(':doc')
self.curfilestack.append(node['docname'])
self.footnotestack.append(self.collect_footnotes(node))
def depart_start_of_file(self, node):
+ # type: (nodes.Node) -> None
self.curfilestack.pop()
self.footnotestack.pop()
def visit_centered(self, node):
+ # type: (nodes.Node) -> None
txt = self.escape_arg(node.astext())
self.body.append('\n\n@center %s\n\n' % txt)
raise nodes.SkipNode
def visit_seealso(self, node):
+ # type: (nodes.Node) -> None
self.body.append(u'\n\n@subsubheading %s\n\n' %
admonitionlabels['seealso'])
def depart_seealso(self, node):
+ # type: (nodes.Node) -> None
self.body.append('\n')
def visit_meta(self, node):
+ # type: (nodes.Node) -> None
raise nodes.SkipNode
def visit_glossary(self, node):
+ # type: (nodes.Node) -> None
pass
def depart_glossary(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_acks(self, node):
+ # type: (nodes.Node) -> None
self.body.append('\n\n')
self.body.append(', '.join(n.astext()
for n in node.children[0].children) + '.')
@@ -1354,23 +1550,28 @@ class TexinfoTranslator(nodes.NodeVisitor):
raise nodes.SkipNode
def visit_highlightlang(self, node):
+ # type: (nodes.Node) -> None
pass
def depart_highlightlang(self, node):
+ # type: (nodes.Node) -> None
pass
# -- Desc
def visit_desc(self, node):
+ # type: (nodes.Node) -> None
self.desc = node
self.at_deffnx = '@deffn'
def depart_desc(self, node):
+ # type: (nodes.Node) -> None
self.desc = None
self.ensure_eol()
self.body.append('@end deffn\n')
def visit_desc_signature(self, node):
+ # type: (nodes.Node) -> None
self.escape_hyphens += 1
objtype = node.parent['objtype']
if objtype != 'describe':
@@ -1378,11 +1579,11 @@ class TexinfoTranslator(nodes.NodeVisitor):
self.add_anchor(id, node)
# use the full name of the objtype for the category
try:
- domain = self.builder.env.domains[node.parent['domain']]
+ domain = self.builder.env.get_domain(node.parent['domain'])
primary = self.builder.config.primary_domain
name = domain.get_type_name(domain.object_types[objtype],
primary == domain.name)
- except KeyError:
+ except (KeyError, ExtensionError):
name = objtype
# by convention, the deffn category should be capitalized like a title
category = self.escape_arg(smart_capwords(name))
@@ -1391,42 +1592,54 @@ class TexinfoTranslator(nodes.NodeVisitor):
self.desc_type_name = name
def depart_desc_signature(self, node):
+ # type: (nodes.Node) -> None
self.body.append("\n")
self.escape_hyphens -= 1
self.desc_type_name = None
def visit_desc_name(self, node):
+ # type: (nodes.Node) -> None
pass
def depart_desc_name(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_desc_addname(self, node):
+ # type: (nodes.Node) -> None
pass
def depart_desc_addname(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_desc_type(self, node):
+ # type: (nodes.Node) -> None
pass
def depart_desc_type(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_desc_returns(self, node):
+ # type: (nodes.Node) -> None
self.body.append(' -> ')
def depart_desc_returns(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_desc_parameterlist(self, node):
+ # type: (nodes.Node) -> None
self.body.append(' (')
self.first_param = 1
def depart_desc_parameterlist(self, node):
+ # type: (nodes.Node) -> None
self.body.append(')')
def visit_desc_parameter(self, node):
+ # type: (nodes.Node) -> None
if not self.first_param:
self.body.append(', ')
else:
@@ -1438,12 +1651,15 @@ class TexinfoTranslator(nodes.NodeVisitor):
raise nodes.SkipNode
def visit_desc_optional(self, node):
+ # type: (nodes.Node) -> None
self.body.append('[')
def depart_desc_optional(self, node):
+ # type: (nodes.Node) -> None
self.body.append(']')
def visit_desc_annotation(self, node):
+ # type: (nodes.Node) -> None
# Try to avoid duplicating info already displayed by the deffn category.
# e.g.
# @deffn {Class} Foo
@@ -1456,21 +1672,27 @@ class TexinfoTranslator(nodes.NodeVisitor):
raise nodes.SkipNode
def depart_desc_annotation(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_desc_content(self, node):
+ # type: (nodes.Node) -> None
pass
def depart_desc_content(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_inline(self, node):
+ # type: (nodes.Node) -> None
pass
def depart_inline(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_abbreviation(self, node):
+ # type: (nodes.Node) -> None
abbr = node.astext()
self.body.append('@abbr{')
if node.hasattr('explanation') and abbr not in self.handled_abbrs:
@@ -1480,42 +1702,54 @@ class TexinfoTranslator(nodes.NodeVisitor):
self.context.append('}')
def depart_abbreviation(self, node):
+ # type: (nodes.Node) -> None
self.body.append(self.context.pop())
def visit_manpage(self, node):
+ # type: (nodes.Node) -> Any
return self.visit_literal_emphasis(node)
def depart_manpage(self, node):
+ # type: (nodes.Node) -> Any
return self.depart_literal_emphasis(node)
def visit_download_reference(self, node):
+ # type: (nodes.Node) -> None
pass
def depart_download_reference(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_hlist(self, node):
+ # type: (nodes.Node) -> None
self.visit_bullet_list(node)
def depart_hlist(self, node):
+ # type: (nodes.Node) -> None
self.depart_bullet_list(node)
def visit_hlistcol(self, node):
+ # type: (nodes.Node) -> None
pass
def depart_hlistcol(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_pending_xref(self, node):
+ # type: (nodes.Node) -> None
pass
def depart_pending_xref(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_math(self, node):
- self.builder.warn('using "math" markup without a Sphinx math extension '
- 'active, please use one of the math extensions '
- 'described at http://sphinx-doc.org/ext/math.html')
+ # type: (nodes.Node) -> None
+ logger.warning('using "math" markup without a Sphinx math extension '
+ 'active, please use one of the math extensions '
+ 'described at http://sphinx-doc.org/ext/math.html')
raise nodes.SkipNode
visit_math_block = visit_math
diff --git a/sphinx/writers/text.py b/sphinx/writers/text.py
index f7366b93b..5ab5ec0e6 100644
--- a/sphinx/writers/text.py
+++ b/sphinx/writers/text.py
@@ -12,7 +12,6 @@ import os
import re
import textwrap
from itertools import groupby
-import warnings
from six.moves import zip_longest
@@ -20,8 +19,15 @@ from docutils import nodes, writers
from docutils.utils import column_width
from sphinx import addnodes
-from sphinx.deprecation import RemovedInSphinx16Warning
from sphinx.locale import admonitionlabels, _
+from sphinx.util import logging
+
+if False:
+ # For type annotation
+ from typing import Any, Callable, Dict, List, Tuple, Union # NOQA
+ from sphinx.builders.text import TextBuilder # NOQA
+
+logger = logging.getLogger(__name__)
class TextWrapper(textwrap.TextWrapper):
@@ -34,13 +40,14 @@ class TextWrapper(textwrap.TextWrapper):
r'(?<=[\w\!\"\'\&\.\,\?])-{2,}(?=\w))') # em-dash
def _wrap_chunks(self, chunks):
+ # type: (List[unicode]) -> List[unicode]
"""_wrap_chunks(chunks : [string]) -> [string]
The original _wrap_chunks uses len() to calculate width.
This method respects wide/fullwidth characters for width adjustment.
"""
drop_whitespace = getattr(self, 'drop_whitespace', True) # py25 compat
- lines = []
+ lines = [] # type: List[unicode]
if self.width <= 0:
raise ValueError("invalid width %r (must be > 0)" % self.width)
@@ -82,6 +89,7 @@ class TextWrapper(textwrap.TextWrapper):
return lines
def _break_word(self, word, space_left):
+ # type: (unicode, int) -> Tuple[unicode, unicode]
"""_break_word(word : string, space_left : int) -> (string, string)
Break line by unicode width instead of len(word).
@@ -94,14 +102,16 @@ class TextWrapper(textwrap.TextWrapper):
return word, ''
def _split(self, text):
+ # type: (unicode) -> List[unicode]
"""_split(text : string) -> [string]
Override original method that only split by 'wordsep_re'.
This '_split' split wide-characters into chunk by one character.
"""
def split(t):
- return textwrap.TextWrapper._split(self, t)
- chunks = []
+ # type: (unicode) -> List[unicode]
+ return textwrap.TextWrapper._split(self, t) # type: ignore
+ chunks = [] # type: List[unicode]
for chunk in split(text):
for w, g in groupby(chunk, column_width):
if w == 1:
@@ -111,6 +121,7 @@ class TextWrapper(textwrap.TextWrapper):
return chunks
def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
+ # type: (List[unicode], List[unicode], int, int) -> None
"""_handle_long_word(chunks : [string],
cur_line : [string],
cur_len : int, width : int)
@@ -132,6 +143,7 @@ STDINDENT = 3
def my_wrap(text, width=MAXWIDTH, **kwargs):
+ # type: (unicode, int, Any) -> List[unicode]
w = TextWrapper(width=width, **kwargs)
return w.wrap(text)
@@ -139,16 +151,18 @@ def my_wrap(text, width=MAXWIDTH, **kwargs):
class TextWriter(writers.Writer):
supported = ('text',)
settings_spec = ('No options here.', '', ())
- settings_defaults = {}
+ settings_defaults = {} # type: Dict
output = None
def __init__(self, builder):
+ # type: (TextBuilder) -> None
writers.Writer.__init__(self)
self.builder = builder
self.translator_class = self.builder.translator_class or TextTranslator
def translate(self):
+ # type: () -> None
visitor = self.translator_class(self.document, self.builder)
self.document.walkabout(visitor)
self.output = visitor.body
@@ -158,6 +172,7 @@ class TextTranslator(nodes.NodeVisitor):
sectionchars = '*=-~"+`'
def __init__(self, document, builder):
+ # type: (nodes.Node, TextBuilder) -> None
nodes.NodeVisitor.__init__(self, document)
self.builder = builder
@@ -169,28 +184,32 @@ class TextTranslator(nodes.NodeVisitor):
else:
self.nl = '\n'
self.sectionchars = builder.config.text_sectionchars
- self.states = [[]]
+ self.states = [[]] # type: List[List[Tuple[int, Union[unicode, List[unicode]]]]]
self.stateindent = [0]
- self.list_counter = []
+ self.list_counter = [] # type: List[int]
self.sectionlevel = 0
self.lineblocklevel = 0
- self.table = None
+ self.table = None # type: List[Union[unicode, List[int]]]
def add_text(self, text):
+ # type: (unicode) -> None
self.states[-1].append((-1, text))
def new_state(self, indent=STDINDENT):
+ # type: (int) -> None
self.states.append([])
self.stateindent.append(indent)
def end_state(self, wrap=True, end=[''], first=None):
+ # type: (bool, List[unicode], unicode) -> None
content = self.states.pop()
maxindent = sum(self.stateindent)
indent = self.stateindent.pop()
- result = []
- toformat = []
+ result = [] # type: List[Tuple[int, List[unicode]]]
+ toformat = [] # type: List[unicode]
def do_format():
+ # type: () -> None
if not toformat:
return
if wrap:
@@ -202,10 +221,10 @@ class TextTranslator(nodes.NodeVisitor):
result.append((indent, res))
for itemindent, item in content:
if itemindent == -1:
- toformat.append(item)
+ toformat.append(item) # type: ignore
else:
do_format()
- result.append((indent + itemindent, item))
+ result.append((indent + itemindent, item)) # type: ignore
toformat = []
do_format()
if first is not None and result:
@@ -221,9 +240,11 @@ class TextTranslator(nodes.NodeVisitor):
self.states[-1].extend(result)
def visit_document(self, node):
+ # type: (nodes.Node) -> None
self.new_state(0)
def depart_document(self, node):
+ # type: (nodes.Node) -> None
self.end_state()
self.body = self.nl.join(line and (' ' * indent + line)
for indent, lines in self.states[0]
@@ -231,126 +252,161 @@ class TextTranslator(nodes.NodeVisitor):
# XXX header/footer?
def visit_highlightlang(self, node):
+ # type: (nodes.Node) -> None
raise nodes.SkipNode
def visit_section(self, node):
+ # type: (nodes.Node) -> None
self._title_char = self.sectionchars[self.sectionlevel]
self.sectionlevel += 1
def depart_section(self, node):
+ # type: (nodes.Node) -> None
self.sectionlevel -= 1
def visit_topic(self, node):
+ # type: (nodes.Node) -> None
self.new_state(0)
def depart_topic(self, node):
+ # type: (nodes.Node) -> None
self.end_state()
visit_sidebar = visit_topic
depart_sidebar = depart_topic
def visit_rubric(self, node):
+ # type: (nodes.Node) -> None
self.new_state(0)
self.add_text('-[ ')
def depart_rubric(self, node):
+ # type: (nodes.Node) -> None
self.add_text(' ]-')
self.end_state()
def visit_compound(self, node):
+ # type: (nodes.Node) -> None
pass
def depart_compound(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_glossary(self, node):
+ # type: (nodes.Node) -> None
pass
def depart_glossary(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_title(self, node):
+ # type: (nodes.Node) -> None
if isinstance(node.parent, nodes.Admonition):
self.add_text(node.astext() + ': ')
raise nodes.SkipNode
self.new_state(0)
def depart_title(self, node):
+ # type: (nodes.Node) -> None
if isinstance(node.parent, nodes.section):
char = self._title_char
else:
char = '^'
- text = ''.join(x[1] for x in self.states.pop() if x[0] == -1)
+ text = None # type: unicode
+ text = ''.join(x[1] for x in self.states.pop() if x[0] == -1) # type: ignore
self.stateindent.pop()
- title = ['', text, '%s' % (char * column_width(text)), '']
+ title = ['', text, '%s' % (char * column_width(text)), ''] # type: List[unicode]
if len(self.states) == 2 and len(self.states[-1]) == 0:
# remove an empty line before title if it is first section title in the document
title.pop(0)
self.states[-1].append((0, title))
def visit_subtitle(self, node):
+ # type: (nodes.Node) -> None
pass
def depart_subtitle(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_attribution(self, node):
+ # type: (nodes.Node) -> None
self.add_text('-- ')
def depart_attribution(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_desc(self, node):
+ # type: (nodes.Node) -> None
pass
def depart_desc(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_desc_signature(self, node):
+ # type: (nodes.Node) -> None
self.new_state(0)
def depart_desc_signature(self, node):
+ # type: (nodes.Node) -> None
# XXX: wrap signatures in a way that makes sense
self.end_state(wrap=False, end=None)
def visit_desc_signature_line(self, node):
+ # type: (nodes.Node) -> None
pass
def depart_desc_signature_line(self, node):
+ # type: (nodes.Node) -> None
self.add_text('\n')
def visit_desc_name(self, node):
+ # type: (nodes.Node) -> None
pass
def depart_desc_name(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_desc_addname(self, node):
+ # type: (nodes.Node) -> None
pass
def depart_desc_addname(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_desc_type(self, node):
+ # type: (nodes.Node) -> None
pass
def depart_desc_type(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_desc_returns(self, node):
+ # type: (nodes.Node) -> None
self.add_text(' -> ')
def depart_desc_returns(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_desc_parameterlist(self, node):
+ # type: (nodes.Node) -> None
self.add_text('(')
self.first_param = 1
def depart_desc_parameterlist(self, node):
+ # type: (nodes.Node) -> None
self.add_text(')')
def visit_desc_parameter(self, node):
+ # type: (nodes.Node) -> None
if not self.first_param:
self.add_text(', ')
else:
@@ -359,37 +415,48 @@ class TextTranslator(nodes.NodeVisitor):
raise nodes.SkipNode
def visit_desc_optional(self, node):
+ # type: (nodes.Node) -> None
self.add_text('[')
def depart_desc_optional(self, node):
+ # type: (nodes.Node) -> None
self.add_text(']')
def visit_desc_annotation(self, node):
+ # type: (nodes.Node) -> None
pass
def depart_desc_annotation(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_desc_content(self, node):
+ # type: (nodes.Node) -> None
self.new_state()
self.add_text(self.nl)
def depart_desc_content(self, node):
+ # type: (nodes.Node) -> None
self.end_state()
def visit_figure(self, node):
+ # type: (nodes.Node) -> None
self.new_state()
def depart_figure(self, node):
+ # type: (nodes.Node) -> None
self.end_state()
def visit_caption(self, node):
+ # type: (nodes.Node) -> None
pass
def depart_caption(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_productionlist(self, node):
+ # type: (nodes.Node) -> None
self.new_state()
names = []
for production in node:
@@ -407,13 +474,16 @@ class TextTranslator(nodes.NodeVisitor):
raise nodes.SkipNode
def visit_footnote(self, node):
+ # type: (nodes.Node) -> None
self._footnote = node.children[0].astext().strip()
self.new_state(len(self._footnote) + 3)
def depart_footnote(self, node):
+ # type: (nodes.Node) -> None
self.end_state(first='[%s] ' % self._footnote)
def visit_citation(self, node):
+ # type: (nodes.Node) -> None
if len(node) and isinstance(node[0], nodes.label):
self._citlabel = node[0].astext()
else:
@@ -421,116 +491,150 @@ class TextTranslator(nodes.NodeVisitor):
self.new_state(len(self._citlabel) + 3)
def depart_citation(self, node):
+ # type: (nodes.Node) -> None
self.end_state(first='[%s] ' % self._citlabel)
def visit_label(self, node):
+ # type: (nodes.Node) -> None
raise nodes.SkipNode
def visit_legend(self, node):
+ # type: (nodes.Node) -> None
pass
def depart_legend(self, node):
+ # type: (nodes.Node) -> None
pass
# XXX: option list could use some better styling
def visit_option_list(self, node):
+ # type: (nodes.Node) -> None
pass
def depart_option_list(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_option_list_item(self, node):
+ # type: (nodes.Node) -> None
self.new_state(0)
def depart_option_list_item(self, node):
+ # type: (nodes.Node) -> None
self.end_state()
def visit_option_group(self, node):
+ # type: (nodes.Node) -> None
self._firstoption = True
def depart_option_group(self, node):
+ # type: (nodes.Node) -> None
self.add_text(' ')
def visit_option(self, node):
+ # type: (nodes.Node) -> None
if self._firstoption:
self._firstoption = False
else:
self.add_text(', ')
def depart_option(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_option_string(self, node):
+ # type: (nodes.Node) -> None
pass
def depart_option_string(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_option_argument(self, node):
+ # type: (nodes.Node) -> None
self.add_text(node['delimiter'])
def depart_option_argument(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_description(self, node):
+ # type: (nodes.Node) -> None
pass
def depart_description(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_tabular_col_spec(self, node):
+ # type: (nodes.Node) -> None
raise nodes.SkipNode
def visit_colspec(self, node):
- self.table[0].append(node['colwidth'])
+ # type: (nodes.Node) -> None
+ self.table[0].append(node['colwidth']) # type: ignore
raise nodes.SkipNode
def visit_tgroup(self, node):
+ # type: (nodes.Node) -> None
pass
def depart_tgroup(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_thead(self, node):
+ # type: (nodes.Node) -> None
pass
def depart_thead(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_tbody(self, node):
+ # type: (nodes.Node) -> None
self.table.append('sep')
def depart_tbody(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_row(self, node):
+ # type: (nodes.Node) -> None
self.table.append([])
def depart_row(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_entry(self, node):
+ # type: (nodes.Node) -> None
if 'morerows' in node or 'morecols' in node:
raise NotImplementedError('Column or row spanning cells are '
'not implemented.')
self.new_state(0)
def depart_entry(self, node):
+ # type: (nodes.Node) -> None
text = self.nl.join(self.nl.join(x[1]) for x in self.states.pop())
self.stateindent.pop()
- self.table[-1].append(text)
+ self.table[-1].append(text) # type: ignore
def visit_table(self, node):
+ # type: (nodes.Node) -> None
if self.table:
raise NotImplementedError('Nested tables are not supported.')
self.new_state(0)
self.table = [[]]
def depart_table(self, node):
- lines = self.table[1:]
- fmted_rows = []
- colwidths = self.table[0]
+ # type: (nodes.Node) -> None
+ lines = None # type: List[unicode]
+ lines = self.table[1:] # type: ignore
+ fmted_rows = [] # type: List[List[List[unicode]]]
+ colwidths = None # type: List[int]
+ colwidths = self.table[0] # type: ignore
realwidths = colwidths[:]
separator = 0
# don't allow paragraphs in table cells for now
@@ -538,7 +642,7 @@ class TextTranslator(nodes.NodeVisitor):
if line == 'sep':
separator = len(fmted_rows)
else:
- cells = []
+ cells = [] # type: List[List[unicode]]
for i, cell in enumerate(line):
par = my_wrap(cell, width=colwidths[i])
if par:
@@ -550,13 +654,15 @@ class TextTranslator(nodes.NodeVisitor):
fmted_rows.append(cells)
def writesep(char='-'):
- out = ['+']
+ # type: (unicode) -> None
+ out = ['+'] # type: List[unicode]
for width in realwidths:
out.append(char * (width + 2))
out.append('+')
self.add_text(''.join(out) + self.nl)
def writerow(row):
+ # type: (list[List[unicode]]) -> None
lines = zip_longest(*row)
for line in lines:
out = ['|']
@@ -581,6 +687,7 @@ class TextTranslator(nodes.NodeVisitor):
self.end_state(wrap=False)
def visit_acks(self, node):
+ # type: (nodes.Node) -> None
self.new_state(0)
self.add_text(', '.join(n.astext() for n in node.children[0].children) +
'.')
@@ -588,12 +695,14 @@ class TextTranslator(nodes.NodeVisitor):
raise nodes.SkipNode
def visit_image(self, node):
+ # type: (nodes.Node) -> None
if 'alt' in node.attributes:
self.add_text(_('[image: %s]') % node['alt'])
self.add_text(_('[image]'))
raise nodes.SkipNode
def visit_transition(self, node):
+ # type: (nodes.Node) -> None
indent = sum(self.stateindent)
self.new_state(0)
self.add_text('=' * (MAXWIDTH - indent))
@@ -601,24 +710,31 @@ class TextTranslator(nodes.NodeVisitor):
raise nodes.SkipNode
def visit_bullet_list(self, node):
+ # type: (nodes.Node) -> None
self.list_counter.append(-1)
def depart_bullet_list(self, node):
+ # type: (nodes.Node) -> None
self.list_counter.pop()
def visit_enumerated_list(self, node):
+ # type: (nodes.Node) -> None
self.list_counter.append(node.get('start', 1) - 1)
def depart_enumerated_list(self, node):
+ # type: (nodes.Node) -> None
self.list_counter.pop()
def visit_definition_list(self, node):
+ # type: (nodes.Node) -> None
self.list_counter.append(-2)
def depart_definition_list(self, node):
+ # type: (nodes.Node) -> None
self.list_counter.pop()
def visit_list_item(self, node):
+ # type: (nodes.Node) -> None
if self.list_counter[-1] == -1:
# bullet list
self.new_state(2)
@@ -631,6 +747,7 @@ class TextTranslator(nodes.NodeVisitor):
self.new_state(len(str(self.list_counter[-1])) + 2)
def depart_list_item(self, node):
+ # type: (nodes.Node) -> None
if self.list_counter[-1] == -1:
self.end_state(first='* ')
elif self.list_counter[-1] == -2:
@@ -639,97 +756,116 @@ class TextTranslator(nodes.NodeVisitor):
self.end_state(first='%s. ' % self.list_counter[-1])
def visit_definition_list_item(self, node):
+ # type: (nodes.Node) -> None
self._classifier_count_in_li = len(node.traverse(nodes.classifier))
def depart_definition_list_item(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_term(self, node):
+ # type: (nodes.Node) -> None
self.new_state(0)
def depart_term(self, node):
+ # type: (nodes.Node) -> None
if not self._classifier_count_in_li:
self.end_state(end=None)
- def visit_termsep(self, node):
- warnings.warn('sphinx.addnodes.termsep will be removed at Sphinx-1.6. '
- 'This warning is displayed because some Sphinx extension '
- 'uses sphinx.addnodes.termsep. Please report it to '
- 'author of the extension.', RemovedInSphinx16Warning)
- self.add_text(', ')
- raise nodes.SkipNode
-
def visit_classifier(self, node):
+ # type: (nodes.Node) -> None
self.add_text(' : ')
def depart_classifier(self, node):
+ # type: (nodes.Node) -> None
self._classifier_count_in_li -= 1
if not self._classifier_count_in_li:
self.end_state(end=None)
def visit_definition(self, node):
+ # type: (nodes.Node) -> None
self.new_state()
def depart_definition(self, node):
+ # type: (nodes.Node) -> None
self.end_state()
def visit_field_list(self, node):
+ # type: (nodes.Node) -> None
pass
def depart_field_list(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_field(self, node):
+ # type: (nodes.Node) -> None
pass
def depart_field(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_field_name(self, node):
+ # type: (nodes.Node) -> None
self.new_state(0)
def depart_field_name(self, node):
+ # type: (nodes.Node) -> None
self.add_text(':')
self.end_state(end=None)
def visit_field_body(self, node):
+ # type: (nodes.Node) -> None
self.new_state()
def depart_field_body(self, node):
+ # type: (nodes.Node) -> None
self.end_state()
def visit_centered(self, node):
+ # type: (nodes.Node) -> None
pass
def depart_centered(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_hlist(self, node):
+ # type: (nodes.Node) -> None
pass
def depart_hlist(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_hlistcol(self, node):
+ # type: (nodes.Node) -> None
pass
def depart_hlistcol(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_admonition(self, node):
+ # type: (nodes.Node) -> None
self.new_state(0)
def depart_admonition(self, node):
+ # type: (nodes.Node) -> None
self.end_state()
def _visit_admonition(self, node):
+ # type: (nodes.Node) -> None
self.new_state(2)
if isinstance(node.children[0], nodes.Sequential):
self.add_text(self.nl)
def _make_depart_admonition(name):
+ # type: (unicode) -> Callable[[TextTranslator, nodes.Node], None]
def depart_admonition(self, node):
+ # type: (nodes.NodeVisitor, nodes.Node) -> None
self.end_state(first=admonitionlabels[name] + ': ')
return depart_admonition
@@ -755,211 +891,274 @@ class TextTranslator(nodes.NodeVisitor):
depart_seealso = _make_depart_admonition('seealso')
def visit_versionmodified(self, node):
+ # type: (nodes.Node) -> None
self.new_state(0)
def depart_versionmodified(self, node):
+ # type: (nodes.Node) -> None
self.end_state()
def visit_literal_block(self, node):
+ # type: (nodes.Node) -> None
self.new_state()
def depart_literal_block(self, node):
+ # type: (nodes.Node) -> None
self.end_state(wrap=False)
def visit_doctest_block(self, node):
+ # type: (nodes.Node) -> None
self.new_state(0)
def depart_doctest_block(self, node):
+ # type: (nodes.Node) -> None
self.end_state(wrap=False)
def visit_line_block(self, node):
+ # type: (nodes.Node) -> None
self.new_state()
self.lineblocklevel += 1
def depart_line_block(self, node):
+ # type: (nodes.Node) -> None
self.lineblocklevel -= 1
self.end_state(wrap=False, end=None)
if not self.lineblocklevel:
self.add_text('\n')
def visit_line(self, node):
+ # type: (nodes.Node) -> None
pass
def depart_line(self, node):
+ # type: (nodes.Node) -> None
self.add_text('\n')
def visit_block_quote(self, node):
+ # type: (nodes.Node) -> None
self.new_state()
def depart_block_quote(self, node):
+ # type: (nodes.Node) -> None
self.end_state()
def visit_compact_paragraph(self, node):
+ # type: (nodes.Node) -> None
pass
def depart_compact_paragraph(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_paragraph(self, node):
+ # type: (nodes.Node) -> None
if not isinstance(node.parent, nodes.Admonition) or \
isinstance(node.parent, addnodes.seealso):
self.new_state(0)
def depart_paragraph(self, node):
+ # type: (nodes.Node) -> None
if not isinstance(node.parent, nodes.Admonition) or \
isinstance(node.parent, addnodes.seealso):
self.end_state()
def visit_target(self, node):
+ # type: (nodes.Node) -> None
raise nodes.SkipNode
def visit_index(self, node):
+ # type: (nodes.Node) -> None
raise nodes.SkipNode
def visit_toctree(self, node):
+ # type: (nodes.Node) -> None
raise nodes.SkipNode
def visit_substitution_definition(self, node):
+ # type: (nodes.Node) -> None
raise nodes.SkipNode
def visit_pending_xref(self, node):
+ # type: (nodes.Node) -> None
pass
def depart_pending_xref(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_reference(self, node):
+ # type: (nodes.Node) -> None
pass
def depart_reference(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_number_reference(self, node):
+ # type: (nodes.Node) -> None
text = nodes.Text(node.get('title', '#'))
self.visit_Text(text)
raise nodes.SkipNode
def visit_download_reference(self, node):
+ # type: (nodes.Node) -> None
pass
def depart_download_reference(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_emphasis(self, node):
+ # type: (nodes.Node) -> None
self.add_text('*')
def depart_emphasis(self, node):
+ # type: (nodes.Node) -> None
self.add_text('*')
def visit_literal_emphasis(self, node):
+ # type: (nodes.Node) -> None
self.add_text('*')
def depart_literal_emphasis(self, node):
+ # type: (nodes.Node) -> None
self.add_text('*')
def visit_strong(self, node):
+ # type: (nodes.Node) -> None
self.add_text('**')
def depart_strong(self, node):
+ # type: (nodes.Node) -> None
self.add_text('**')
def visit_literal_strong(self, node):
+ # type: (nodes.Node) -> None
self.add_text('**')
def depart_literal_strong(self, node):
+ # type: (nodes.Node) -> None
self.add_text('**')
def visit_abbreviation(self, node):
+ # type: (nodes.Node) -> None
self.add_text('')
def depart_abbreviation(self, node):
+ # type: (nodes.Node) -> None
if node.hasattr('explanation'):
self.add_text(' (%s)' % node['explanation'])
def visit_manpage(self, node):
+ # type: (nodes.Node) -> Any
return self.visit_literal_emphasis(node)
def depart_manpage(self, node):
+ # type: (nodes.Node) -> Any
return self.depart_literal_emphasis(node)
def visit_title_reference(self, node):
+ # type: (nodes.Node) -> None
self.add_text('*')
def depart_title_reference(self, node):
+ # type: (nodes.Node) -> None
self.add_text('*')
def visit_literal(self, node):
+ # type: (nodes.Node) -> None
self.add_text('"')
def depart_literal(self, node):
+ # type: (nodes.Node) -> None
self.add_text('"')
def visit_subscript(self, node):
+ # type: (nodes.Node) -> None
self.add_text('_')
def depart_subscript(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_superscript(self, node):
+ # type: (nodes.Node) -> None
self.add_text('^')
def depart_superscript(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_footnote_reference(self, node):
+ # type: (nodes.Node) -> None
self.add_text('[%s]' % node.astext())
raise nodes.SkipNode
def visit_citation_reference(self, node):
+ # type: (nodes.Node) -> None
self.add_text('[%s]' % node.astext())
raise nodes.SkipNode
def visit_Text(self, node):
+ # type: (nodes.Node) -> None
self.add_text(node.astext())
def depart_Text(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_generated(self, node):
+ # type: (nodes.Node) -> None
pass
def depart_generated(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_inline(self, node):
+ # type: (nodes.Node) -> None
if 'xref' in node['classes'] or 'term' in node['classes']:
self.add_text('*')
def depart_inline(self, node):
+ # type: (nodes.Node) -> None
if 'xref' in node['classes'] or 'term' in node['classes']:
self.add_text('*')
def visit_container(self, node):
+ # type: (nodes.Node) -> None
pass
def depart_container(self, node):
+ # type: (nodes.Node) -> None
pass
def visit_problematic(self, node):
+ # type: (nodes.Node) -> None
self.add_text('>>')
def depart_problematic(self, node):
+ # type: (nodes.Node) -> None
self.add_text('<<')
def visit_system_message(self, node):
+ # type: (nodes.Node) -> None
self.new_state(0)
self.add_text('<SYSTEM MESSAGE: %s>' % node.astext())
self.end_state()
raise nodes.SkipNode
def visit_comment(self, node):
+ # type: (nodes.Node) -> None
raise nodes.SkipNode
def visit_meta(self, node):
+ # type: (nodes.Node) -> None
# only valid for HTML
raise nodes.SkipNode
def visit_raw(self, node):
+ # type: (nodes.Node) -> None
if 'text' in node.get('format', '').split():
self.new_state(0)
self.add_text(node.astext())
@@ -967,13 +1166,15 @@ class TextTranslator(nodes.NodeVisitor):
raise nodes.SkipNode
def visit_math(self, node):
- self.builder.warn('using "math" markup without a Sphinx math extension '
- 'active, please use one of the math extensions '
- 'described at http://sphinx-doc.org/ext/math.html',
- (self.builder.current_docname, node.line))
+ # type: (nodes.Node) -> None
+ logger.warning('using "math" markup without a Sphinx math extension '
+ 'active, please use one of the math extensions '
+ 'described at http://sphinx-doc.org/ext/math.html',
+ location=(self.builder.current_docname, node.line))
raise nodes.SkipNode
visit_math_block = visit_math
def unknown_visit(self, node):
+ # type: (nodes.Node) -> None
raise NotImplementedError('Unknown node: ' + node.__class__.__name__)
diff --git a/sphinx/writers/xml.py b/sphinx/writers/xml.py
index bc0241a12..7d87e0bd6 100644
--- a/sphinx/writers/xml.py
+++ b/sphinx/writers/xml.py
@@ -12,16 +12,23 @@
from docutils import writers
from docutils.writers.docutils_xml import Writer as BaseXMLWriter
+if False:
+ # For type annotation
+ from typing import Any, Tuple # NOQA
+ from sphinx.builders import Builder # NOQA
+
class XMLWriter(BaseXMLWriter):
def __init__(self, builder):
+ # type: (Builder) -> None
BaseXMLWriter.__init__(self)
self.builder = builder
if self.builder.translator_class:
self.translator_class = self.builder.translator_class
def translate(self, *args, **kwargs):
+ # type: (Any, Any) -> None
self.document.settings.newlines = \
self.document.settings.indents = \
self.builder.env.config.xml_pretty
@@ -36,18 +43,21 @@ class PseudoXMLWriter(writers.Writer):
"""Formats this writer supports."""
config_section = 'pseudoxml writer'
- config_section_dependencies = ('writers',)
+ config_section_dependencies = ('writers',) # type: Tuple[unicode]
output = None
"""Final translated form of `document`."""
def __init__(self, builder):
+ # type: (Builder) -> None
writers.Writer.__init__(self)
self.builder = builder
def translate(self):
+ # type: () -> None
self.output = self.document.pformat()
def supports(self, format):
+ # type: (unicode) -> bool
"""This writer supports all format-specific elements."""
return True
diff --git a/test-reqs.txt b/test-reqs.txt
index 1877886c1..e06513a01 100644
--- a/test-reqs.txt
+++ b/test-reqs.txt
@@ -16,3 +16,4 @@ imagesize
requests
html5lib
enum34
+typing
diff --git a/tests/etree13/ElementPath.py b/tests/etree13/ElementPath.py
new file mode 100644
index 000000000..8cf1ab578
--- /dev/null
+++ b/tests/etree13/ElementPath.py
@@ -0,0 +1,226 @@
+#
+# ElementTree
+# $Id$
+#
+# limited xpath support for element trees
+#
+# history:
+# 2003-05-23 fl created
+# 2003-05-28 fl added support for // etc
+# 2003-08-27 fl fixed parsing of periods in element names
+# 2007-09-10 fl new selection engine
+#
+# Copyright (c) 2003-2007 by Fredrik Lundh. All rights reserved.
+#
+# fredrik@pythonware.com
+# http://www.pythonware.com
+#
+# --------------------------------------------------------------------
+# The ElementTree toolkit is
+#
+# Copyright (c) 1999-2007 by Fredrik Lundh
+#
+# By obtaining, using, and/or copying this software and/or its
+# associated documentation, you agree that you have read, understood,
+# and will comply with the following terms and conditions:
+#
+# Permission to use, copy, modify, and distribute this software and
+# its associated documentation for any purpose and without fee is
+# hereby granted, provided that the above copyright notice appears in
+# all copies, and that both that copyright notice and this permission
+# notice appear in supporting documentation, and that the name of
+# Secret Labs AB or the author not be used in advertising or publicity
+# pertaining to distribution of the software without specific, written
+# prior permission.
+#
+# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
+# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
+# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
+# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+# --------------------------------------------------------------------
+
+##
+# Implementation module for XPath support. There's usually no reason
+# to import this module directly; the <b>ElementTree</b> does this for
+# you, if needed.
+##
+
+import re
+
+xpath_tokenizer = re.compile(
+ r"("
+ r"'[^']*'|\"[^\"]*\"|"
+ r"::|"
+ r"//?|"
+ r"\.\.|"
+ r"\(\)|"
+ r"[/.*:\[\]\(\)@=])|"
+ r"((?:\{[^}]+\})?[^/:\[\]\(\)@=\s]+)|"
+ r"\s+"
+ ).findall
+
+def prepare_tag(next, token):
+ tag = token[1]
+ def select(context, result):
+ for elem in result:
+ for e in elem:
+ if e.tag == tag:
+ yield e
+ return select
+
+def prepare_star(next, token):
+ def select(context, result):
+ for elem in result:
+ for e in elem:
+ yield e
+ return select
+
+def prepare_dot(next, token):
+ def select(context, result):
+ for elem in result:
+ yield elem
+ return select
+
+def prepare_iter(next, token):
+ token = next()
+ if token[0] == "*":
+ tag = "*"
+ elif not token[0]:
+ tag = token[1]
+ else:
+ raise SyntaxError
+ def select(context, result):
+ for elem in result:
+ for e in elem.iter(tag):
+ if e is not elem:
+ yield e
+ return select
+
+def prepare_dot_dot(next, token):
+ def select(context, result):
+ parent_map = context.parent_map
+ if parent_map is None:
+ context.parent_map = parent_map = {}
+ for p in context.root.iter():
+ for e in p:
+ parent_map[e] = p
+ for elem in result:
+ if elem in parent_map:
+ yield parent_map[elem]
+ return select
+
+def prepare_predicate(next, token):
+ # this one should probably be refactored...
+ token = next()
+ if token[0] == "@":
+ # attribute
+ token = next()
+ if token[0]:
+ raise SyntaxError("invalid attribute predicate")
+ key = token[1]
+ token = next()
+ if token[0] == "]":
+ def select(context, result):
+ for elem in result:
+ if elem.get(key) is not None:
+ yield elem
+ elif token[0] == "=":
+ value = next()[0]
+ if value[:1] == "'" or value[:1] == '"':
+ value = value[1:-1]
+ else:
+ raise SyntaxError("invalid comparision target")
+ token = next()
+ def select(context, result):
+ for elem in result:
+ if elem.get(key) == value:
+ yield elem
+ if token[0] != "]":
+ raise SyntaxError("invalid attribute predicate")
+ elif not token[0]:
+ tag = token[1]
+ token = next()
+ if token[0] != "]":
+ raise SyntaxError("invalid node predicate")
+ def select(context, result):
+ for elem in result:
+ if elem.find(tag) is not None:
+ yield elem
+ else:
+ raise SyntaxError("invalid predicate")
+ return select
+
+ops = {
+ "": prepare_tag,
+ "*": prepare_star,
+ ".": prepare_dot,
+ "..": prepare_dot_dot,
+ "//": prepare_iter,
+ "[": prepare_predicate,
+ }
+
+_cache = {}
+
+class _SelectorContext:
+ parent_map = None
+ def __init__(self, root):
+ self.root = root
+
+# --------------------------------------------------------------------
+
+##
+# Find first matching object.
+
+def find(elem, path):
+ try:
+ return next(findall(elem, path))
+ except StopIteration:
+ return None
+
+##
+# Find all matching objects.
+
+def findall(elem, path):
+ # compile selector pattern
+ try:
+ selector = _cache[path]
+ except KeyError:
+ if len(_cache) > 100:
+ _cache.clear()
+ if path[:1] == "/":
+ raise SyntaxError("cannot use absolute path on element")
+ stream = iter(xpath_tokenizer(path))
+ next_ = lambda: next(stream); token = next_()
+ selector = []
+ while 1:
+ try:
+ selector.append(ops[token[0]](next_, token))
+ except StopIteration:
+ raise SyntaxError("invalid path")
+ try:
+ token = next_()
+ if token[0] == "/":
+ token = next_()
+ except StopIteration:
+ break
+ _cache[path] = selector
+ # execute selector pattern
+ result = [elem]
+ context = _SelectorContext(elem)
+ for select in selector:
+ result = select(context, result)
+ return result
+
+##
+# Find text for first matching object.
+
+def findtext(elem, path, default=None):
+ try:
+ elem = next(findall(elem, path))
+ return elem.text
+ except StopIteration:
+ return default
diff --git a/tests/etree13/ElementTree.py b/tests/etree13/ElementTree.py
new file mode 100644
index 000000000..134abf313
--- /dev/null
+++ b/tests/etree13/ElementTree.py
@@ -0,0 +1,1553 @@
+#
+# ElementTree
+# $Id$
+#
+# light-weight XML support for Python 2.2 and later.
+#
+# history:
+# 2001-10-20 fl created (from various sources)
+# 2001-11-01 fl return root from parse method
+# 2002-02-16 fl sort attributes in lexical order
+# 2002-04-06 fl TreeBuilder refactoring, added PythonDoc markup
+# 2002-05-01 fl finished TreeBuilder refactoring
+# 2002-07-14 fl added basic namespace support to ElementTree.write
+# 2002-07-25 fl added QName attribute support
+# 2002-10-20 fl fixed encoding in write
+# 2002-11-24 fl changed default encoding to ascii; fixed attribute encoding
+# 2002-11-27 fl accept file objects or file names for parse/write
+# 2002-12-04 fl moved XMLTreeBuilder back to this module
+# 2003-01-11 fl fixed entity encoding glitch for us-ascii
+# 2003-02-13 fl added XML literal factory
+# 2003-02-21 fl added ProcessingInstruction/PI factory
+# 2003-05-11 fl added tostring/fromstring helpers
+# 2003-05-26 fl added ElementPath support
+# 2003-07-05 fl added makeelement factory method
+# 2003-07-28 fl added more well-known namespace prefixes
+# 2003-08-15 fl fixed typo in ElementTree.findtext (Thomas Dartsch)
+# 2003-09-04 fl fall back on emulator if ElementPath is not installed
+# 2003-10-31 fl markup updates
+# 2003-11-15 fl fixed nested namespace bug
+# 2004-03-28 fl added XMLID helper
+# 2004-06-02 fl added default support to findtext
+# 2004-06-08 fl fixed encoding of non-ascii element/attribute names
+# 2004-08-23 fl take advantage of post-2.1 expat features
+# 2004-09-03 fl made Element class visible; removed factory
+# 2005-02-01 fl added iterparse implementation
+# 2005-03-02 fl fixed iterparse support for pre-2.2 versions
+# 2005-11-12 fl added tostringlist/fromstringlist helpers
+# 2006-07-05 fl merged in selected changes from the 1.3 sandbox
+# 2006-07-05 fl removed support for 2.1 and earlier
+# 2007-06-21 fl added deprecation/future warnings
+# 2007-08-25 fl added doctype hook, added parser version attribute etc
+# 2007-08-26 fl added new serializer code (better namespace handling, etc)
+# 2007-08-27 fl warn for broken /tag searches on tree level
+# 2007-09-02 fl added html/text methods to serializer (experimental)
+# 2007-09-05 fl added method argument to tostring/tostringlist
+# 2007-09-06 fl improved error handling
+#
+# Copyright (c) 1999-2007 by Fredrik Lundh. All rights reserved.
+#
+# fredrik@pythonware.com
+# http://www.pythonware.com
+#
+# --------------------------------------------------------------------
+# The ElementTree toolkit is
+#
+# Copyright (c) 1999-2007 by Fredrik Lundh
+#
+# By obtaining, using, and/or copying this software and/or its
+# associated documentation, you agree that you have read, understood,
+# and will comply with the following terms and conditions:
+#
+# Permission to use, copy, modify, and distribute this software and
+# its associated documentation for any purpose and without fee is
+# hereby granted, provided that the above copyright notice appears in
+# all copies, and that both that copyright notice and this permission
+# notice appear in supporting documentation, and that the name of
+# Secret Labs AB or the author not be used in advertising or publicity
+# pertaining to distribution of the software without specific, written
+# prior permission.
+#
+# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
+# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
+# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
+# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
+# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
+# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
+# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+# OF THIS SOFTWARE.
+# --------------------------------------------------------------------
+
+from __future__ import generators
+from __future__ import absolute_import
+
+from six import string_types
+
+
+__all__ = [
+ # public symbols
+ "Comment",
+ "dump",
+ "Element", "ElementTree",
+ "fromstring", "fromstringlist",
+ "iselement", "iterparse",
+ "parse", "ParseError",
+ "PI", "ProcessingInstruction",
+ "QName",
+ "SubElement",
+ "tostring", "tostringlist",
+ "TreeBuilder",
+ "VERSION",
+ "XML",
+ "XMLParser", "XMLTreeBuilder",
+ ]
+
+##
+# The <b>Element</b> type is a flexible container object, designed to
+# store hierarchical data structures in memory. The type can be
+# described as a cross between a list and a dictionary.
+# <p>
+# Each element has a number of properties associated with it:
+# <ul>
+# <li>a <i>tag</i>. This is a string identifying what kind of data
+# this element represents (the element type, in other words).</li>
+# <li>a number of <i>attributes</i>, stored in a Python dictionary.</li>
+# <li>a <i>text</i> string.</li>
+# <li>an optional <i>tail</i> string.</li>
+# <li>a number of <i>child elements</i>, stored in a Python sequence</li>
+# </ul>
+#
+# To create an element instance, use the {@link #Element} constructor
+# or the {@link #SubElement} factory function.
+# <p>
+# The {@link #ElementTree} class can be used to wrap an element
+# structure, and convert it from and to XML.
+##
+
+import sys, re
+
+class _SimpleElementPath(object):
+ # emulate pre-1.2 find/findtext/findall behaviour
+ def find(self, element, tag):
+ for elem in element:
+ if elem.tag == tag:
+ return elem
+ return None
+ def findtext(self, element, tag, default=None):
+ for elem in element:
+ if elem.tag == tag:
+ return elem.text or ""
+ return default
+ def findall(self, element, tag):
+ if tag[:3] == ".//":
+ return element.getiterator(tag[3:])
+ result = []
+ for elem in element:
+ if elem.tag == tag:
+ result.append(elem)
+ return result
+
+try:
+ from . import ElementPath
+except ImportError:
+ # FIXME: issue warning in this case?
+ ElementPath = _SimpleElementPath()
+
+VERSION = "1.3a2"
+
+class ParseError(SyntaxError):
+ pass
+
+# --------------------------------------------------------------------
+
+##
+# Checks if an object appears to be a valid element object.
+#
+# @param An element instance.
+# @return A true value if this is an element object.
+# @defreturn flag
+
+def iselement(element):
+ # FIXME: not sure about this; might be a better idea to look
+ # for tag/attrib/text attributes
+ return isinstance(element, Element) or hasattr(element, "tag")
+
+##
+# Element class. This class defines the Element interface, and
+# provides a reference implementation of this interface.
+# <p>
+# The element name, attribute names, and attribute values can be
+# either 8-bit ASCII strings or Unicode strings.
+#
+# @param tag The element name.
+# @param attrib An optional dictionary, containing element attributes.
+# @param **extra Additional attributes, given as keyword arguments.
+# @see Element
+# @see SubElement
+# @see Comment
+# @see ProcessingInstruction
+
+class Element(object):
+ # <tag attrib>text<child/>...</tag>tail
+
+ ##
+ # (Attribute) Element tag.
+
+ tag = None
+
+ ##
+ # (Attribute) Element attribute dictionary. Where possible, use
+ # {@link #Element.get},
+ # {@link #Element.set},
+ # {@link #Element.keys}, and
+ # {@link #Element.items} to access
+ # element attributes.
+
+ attrib = None
+
+ ##
+ # (Attribute) Text before first subelement. This is either a
+ # string or the value None, if there was no text.
+
+ text = None
+
+ ##
+ # (Attribute) Text after this element's end tag, but before the
+ # next sibling element's start tag. This is either a string or
+ # the value None, if there was no text.
+
+ tail = None # text after end tag, if any
+
+ def __init__(self, tag, attrib={}, **extra):
+ attrib = attrib.copy()
+ attrib.update(extra)
+ self.tag = tag
+ self.attrib = attrib
+ self._children = []
+
+ def __repr__(self):
+ return "<Element %s at %x>" % (repr(self.tag), id(self))
+
+ ##
+ # Creates a new element object of the same type as this element.
+ #
+ # @param tag Element tag.
+ # @param attrib Element attributes, given as a dictionary.
+ # @return A new element instance.
+
+ def makeelement(self, tag, attrib):
+ return Element(tag, attrib)
+
+ ##
+ # Returns the number of subelements.
+ #
+ # @return The number of subelements.
+
+ def __len__(self):
+ return len(self._children)
+
+ def __bool__(self):
+ import warnings
+ warnings.warn(
+ "The behavior of this method will change in future versions. "
+ "Use specific 'len(elem)' or 'elem is not None' test instead.",
+ FutureWarning
+ )
+ return len(self._children) != 0 # emulate old behaviour
+ __nonzero__ = __bool__ # for python2 compatibility
+
+ ##
+ # Returns the given subelement.
+ #
+ # @param index What subelement to return.
+ # @return The given subelement.
+ # @exception IndexError If the given element does not exist.
+
+ def __getitem__(self, index):
+ return self._children[index]
+
+ ##
+ # Replaces the given subelement.
+ #
+ # @param index What subelement to replace.
+ # @param element The new element value.
+ # @exception IndexError If the given element does not exist.
+ # @exception AssertionError If element is not a valid object.
+
+ def __setitem__(self, index, element):
+ assert iselement(element)
+ self._children[index] = element
+
+ ##
+ # Deletes the given subelement.
+ #
+ # @param index What subelement to delete.
+ # @exception IndexError If the given element does not exist.
+
+ def __delitem__(self, index):
+ del self._children[index]
+
+ ##
+ # Returns a list containing subelements in the given range.
+ #
+ # @param start The first subelement to return.
+ # @param stop The first subelement that shouldn't be returned.
+ # @return A sequence object containing subelements.
+
+ def __getslice__(self, start, stop):
+ return self._children[start:stop]
+
+ ##
+ # Replaces a number of subelements with elements from a sequence.
+ #
+ # @param start The first subelement to replace.
+ # @param stop The first subelement that shouldn't be replaced.
+ # @param elements A sequence object with zero or more elements.
+ # @exception AssertionError If a sequence member is not a valid object.
+
+ def __setslice__(self, start, stop, elements):
+ for element in elements:
+ assert iselement(element)
+ self._children[start:stop] = list(elements)
+
+ ##
+ # Deletes a number of subelements.
+ #
+ # @param start The first subelement to delete.
+ # @param stop The first subelement to leave in there.
+
+ def __delslice__(self, start, stop):
+ del self._children[start:stop]
+
+ ##
+ # Adds a subelement to the end of this element.
+ #
+ # @param element The element to add.
+ # @exception AssertionError If a sequence member is not a valid object.
+
+ def append(self, element):
+ assert iselement(element)
+ self._children.append(element)
+
+ ##
+ # Appends subelements from a sequence.
+ #
+ # @param elements A sequence object with zero or more elements.
+ # @exception AssertionError If a subelement is not a valid object.
+ # @since 1.3
+
+ def extend(self, elements):
+ for element in elements:
+ assert iselement(element)
+ self._children.extend(elements)
+
+ ##
+ # Inserts a subelement at the given position in this element.
+ #
+ # @param index Where to insert the new subelement.
+ # @exception AssertionError If the element is not a valid object.
+
+ def insert(self, index, element):
+ assert iselement(element)
+ self._children.insert(index, element)
+
+ ##
+ # Removes a matching subelement. Unlike the <b>find</b> methods,
+ # this method compares elements based on identity, not on tag
+ # value or contents.
+ #
+ # @param element What element to remove.
+ # @exception ValueError If a matching element could not be found.
+ # @exception AssertionError If the element is not a valid object.
+
+ def remove(self, element):
+ assert iselement(element)
+ self._children.remove(element)
+
+ ##
+ # (Deprecated) Returns all subelements. The elements are returned
+ # in document order.
+ #
+ # @return A list of subelements.
+ # @defreturn list of Element instances
+
+ def getchildren(self):
+ import warnings
+ warnings.warn(
+ "This method will be removed in future versions. "
+ "Use 'list(elem)' or iteration over elem instead.",
+ DeprecationWarning
+ )
+ return self._children
+
+ ##
+ # Finds the first matching subelement, by tag name or path.
+ #
+ # @param path What element to look for.
+ # @return The first matching element, or None if no element was found.
+ # @defreturn Element or None
+
+ def find(self, path):
+ return ElementPath.find(self, path)
+
+ ##
+ # Finds text for the first matching subelement, by tag name or path.
+ #
+ # @param path What element to look for.
+ # @param default What to return if the element was not found.
+ # @return The text content of the first matching element, or the
+ # default value no element was found. Note that if the element
+ # has is found, but has no text content, this method returns an
+ # empty string.
+ # @defreturn string
+
+ def findtext(self, path, default=None):
+ return ElementPath.findtext(self, path, default)
+
+ ##
+ # Finds all matching subelements, by tag name or path.
+ #
+ # @param path What element to look for.
+ # @return A list or iterator containing all matching elements,
+ # in document order.
+ # @defreturn list of Element instances
+
+ def findall(self, path):
+ return ElementPath.findall(self, path)
+
+ ##
+ # Resets an element. This function removes all subelements, clears
+ # all attributes, and sets the text and tail attributes to None.
+
+ def clear(self):
+ self.attrib.clear()
+ self._children = []
+ self.text = self.tail = None
+
+ ##
+ # Gets an element attribute.
+ #
+ # @param key What attribute to look for.
+ # @param default What to return if the attribute was not found.
+ # @return The attribute value, or the default value, if the
+ # attribute was not found.
+ # @defreturn string or None
+
+ def get(self, key, default=None):
+ return self.attrib.get(key, default)
+
+ ##
+ # Sets an element attribute.
+ #
+ # @param key What attribute to set.
+ # @param value The attribute value.
+
+ def set(self, key, value):
+ self.attrib[key] = value
+
+ ##
+ # Gets a list of attribute names. The names are returned in an
+ # arbitrary order (just like for an ordinary Python dictionary).
+ #
+ # @return A list of element attribute names.
+ # @defreturn list of strings
+
+ def keys(self):
+ return self.attrib.keys()
+
+ ##
+ # Gets element attributes, as a sequence. The attributes are
+ # returned in an arbitrary order.
+ #
+ # @return A list of (name, value) tuples for all attributes.
+ # @defreturn list of (string, string) tuples
+
+ def items(self):
+ return self.attrib.items()
+
+ ##
+ # Creates a tree iterator. The iterator loops over this element
+ # and all subelements, in document order, and returns all elements
+ # with a matching tag.
+ # <p>
+ # If the tree structure is modified during iteration, new or removed
+ # elements may or may not be included. To get a stable set, use the
+ # list() function on the iterator, and loop over the resulting list.
+ #
+ # @param tag What tags to look for (default is to return all elements).
+ # @return An iterator containing all the matching elements.
+ # @defreturn iterator
+
+ def iter(self, tag=None):
+ if tag == "*":
+ tag = None
+ if tag is None or self.tag == tag:
+ yield self
+ for e in self._children:
+ for e in e.iter(tag):
+ yield e
+
+ # compatibility (FIXME: preserve list behaviour too? see below)
+ getiterator = iter
+
+ # def getiterator(self, tag=None):
+ # return list(tag)
+
+ ##
+ # Creates a text iterator. The iterator loops over this element
+ # and all subelements, in document order, and returns all inner
+ # text.
+ #
+ # @return An iterator containing all inner text.
+ # @defreturn iterator
+
+ def itertext(self):
+ if self.text:
+ yield self.text
+ for e in self:
+ for s in e.itertext():
+ yield s
+ if e.tail:
+ yield e.tail
+
+# compatibility
+_Element = _ElementInterface = Element
+
+##
+# Subelement factory. This function creates an element instance, and
+# appends it to an existing element.
+# <p>
+# The element name, attribute names, and attribute values can be
+# either 8-bit ASCII strings or Unicode strings.
+#
+# @param parent The parent element.
+# @param tag The subelement name.
+# @param attrib An optional dictionary, containing element attributes.
+# @param **extra Additional attributes, given as keyword arguments.
+# @return An element instance.
+# @defreturn Element
+
+def SubElement(parent, tag, attrib={}, **extra):
+ attrib = attrib.copy()
+ attrib.update(extra)
+ element = parent.makeelement(tag, attrib)
+ parent.append(element)
+ return element
+
+##
+# Comment element factory. This factory function creates a special
+# element that will be serialized as an XML comment by the standard
+# serializer.
+# <p>
+# The comment string can be either an 8-bit ASCII string or a Unicode
+# string.
+#
+# @param text A string containing the comment string.
+# @return An element instance, representing a comment.
+# @defreturn Element
+
+def Comment(text=None):
+ element = Element(Comment)
+ element.text = text
+ return element
+
+##
+# PI element factory. This factory function creates a special element
+# that will be serialized as an XML processing instruction by the standard
+# serializer.
+#
+# @param target A string containing the PI target.
+# @param text A string containing the PI contents, if any.
+# @return An element instance, representing a PI.
+# @defreturn Element
+
+def ProcessingInstruction(target, text=None):
+ element = Element(ProcessingInstruction)
+ element.text = target
+ if text:
+ element.text = element.text + " " + text
+ return element
+
+PI = ProcessingInstruction
+
+##
+# QName wrapper. This can be used to wrap a QName attribute value, in
+# order to get proper namespace handling on output.
+#
+# @param text A string containing the QName value, in the form {uri}local,
+# or, if the tag argument is given, the URI part of a QName.
+# @param tag Optional tag. If given, the first argument is interpreted as
+# an URI, and this argument is interpreted as a local name.
+# @return An opaque object, representing the QName.
+
+class QName(object):
+ def __init__(self, text_or_uri, tag=None):
+ if tag:
+ text_or_uri = "{%s}%s" % (text_or_uri, tag)
+ self.text = text_or_uri
+ def __str__(self):
+ return self.text
+ def __hash__(self):
+ return hash(self.text)
+ def __cmp__(self, other):
+ if isinstance(other, QName):
+ return cmp(self.text, other.text)
+ return cmp(self.text, other)
+
+# --------------------------------------------------------------------
+
+##
+# ElementTree wrapper class. This class represents an entire element
+# hierarchy, and adds some extra support for serialization to and from
+# standard XML.
+#
+# @param element Optional root element.
+# @keyparam file Optional file handle or file name. If given, the
+# tree is initialized with the contents of this XML file.
+
+class ElementTree(object):
+
+ def __init__(self, element=None, file=None):
+ assert element is None or iselement(element)
+ self._root = element # first node
+ if file:
+ self.parse(file)
+
+ ##
+ # Gets the root element for this tree.
+ #
+ # @return An element instance.
+ # @defreturn Element
+
+ def getroot(self):
+ return self._root
+
+ ##
+ # Replaces the root element for this tree. This discards the
+ # current contents of the tree, and replaces it with the given
+ # element. Use with care.
+ #
+ # @param element An element instance.
+
+ def _setroot(self, element):
+ assert iselement(element)
+ self._root = element
+
+ ##
+ # Loads an external XML document into this element tree.
+ #
+ # @param source A file name or file object.
+ # @keyparam parser An optional parser instance. If not given, the
+ # standard {@link XMLParser} parser is used.
+ # @return The document root element.
+ # @defreturn Element
+
+ def parse(self, source, parser=None):
+ if not hasattr(source, "read"):
+ source = open(source, "rb")
+ if not parser:
+ parser = XMLParser(target=TreeBuilder())
+ while 1:
+ data = source.read(32768)
+ if not data:
+ break
+ parser.feed(data)
+ self._root = parser.close()
+ return self._root
+
+ ##
+ # Creates a tree iterator for the root element. The iterator loops
+ # over all elements in this tree, in document order.
+ #
+ # @param tag What tags to look for (default is to return all elements)
+ # @return An iterator.
+ # @defreturn iterator
+
+ def iter(self, tag=None):
+ assert self._root is not None
+ return self._root.iter(tag)
+
+ getiterator = iter
+
+ ##
+ # Finds the first toplevel element with given tag.
+ # Same as getroot().find(path).
+ #
+ # @param path What element to look for.
+ # @return The first matching element, or None if no element was found.
+ # @defreturn Element or None
+
+ def find(self, path):
+ assert self._root is not None
+ if path[:1] == "/":
+ path = "." + path
+ import warnings
+ warnings.warn(
+ "This search is broken in 1.3 and earlier; if you rely "
+ "on the current behaviour, change it to %r" % path,
+ FutureWarning
+ )
+ return self._root.find(path)
+
+ ##
+ # Finds the element text for the first toplevel element with given
+ # tag. Same as getroot().findtext(path).
+ #
+ # @param path What toplevel element to look for.
+ # @param default What to return if the element was not found.
+ # @return The text content of the first matching element, or the
+ # default value no element was found. Note that if the element
+ # has is found, but has no text content, this method returns an
+ # empty string.
+ # @defreturn string
+
+ def findtext(self, path, default=None):
+ assert self._root is not None
+ if path[:1] == "/":
+ path = "." + path
+ import warnings
+ warnings.warn(
+ "This search is broken in 1.3 and earlier; if you rely "
+ "on the current behaviour, change it to %r" % path,
+ FutureWarning
+ )
+ return self._root.findtext(path, default)
+
+ ##
+ # Finds all toplevel elements with the given tag.
+ # Same as getroot().findall(path).
+ #
+ # @param path What element to look for.
+ # @return A list or iterator containing all matching elements,
+ # in document order.
+ # @defreturn list of Element instances
+
+ def findall(self, path):
+ assert self._root is not None
+ if path[:1] == "/":
+ path = "." + path
+ import warnings
+ warnings.warn(
+ "This search is broken in 1.3 and earlier; if you rely "
+ "on the current behaviour, change it to %r" % path,
+ FutureWarning
+ )
+ return self._root.findall(path)
+
+ ##
+ # Writes the element tree to a file, as XML.
+ #
+ # @param file A file name, or a file object opened for writing.
+ # @keyparam encoding Optional output encoding (default is US-ASCII).
+ # @keyparam method Optional output method ("xml" or "html"; default
+ # is "xml".
+ # @keyparam xml_declaration Controls if an XML declaration should
+ # be added to the file. Use False for never, True for always,
+ # None for only if not US-ASCII or UTF-8. None is default.
+
+ def write(self, file,
+ # keyword arguments
+ encoding="us-ascii",
+ xml_declaration=None,
+ default_namespace=None,
+ method=None):
+ assert self._root is not None
+ if not hasattr(file, "write"):
+ file = open(file, "wb")
+ write = file.write
+ if not method:
+ method = "xml"
+ if not encoding:
+ encoding = "us-ascii"
+ elif xml_declaration or (xml_declaration is None and
+ encoding not in ("utf-8", "us-ascii")):
+ write("<?xml version='1.0' encoding='%s'?>\n" % encoding)
+ if method == "text":
+ _serialize_text(write, self._root, encoding)
+ else:
+ qnames, namespaces = _namespaces(
+ self._root, encoding, default_namespace
+ )
+ if method == "xml":
+ _serialize_xml(
+ write, self._root, encoding, qnames, namespaces
+ )
+ elif method == "html":
+ _serialize_html(
+ write, self._root, encoding, qnames, namespaces
+ )
+ else:
+ raise ValueError("unknown method %r" % method)
+
+# --------------------------------------------------------------------
+# serialization support
+
+def _namespaces(elem, encoding, default_namespace=None):
+ # identify namespaces used in this tree
+
+ # maps qnames to *encoded* prefix:local names
+ qnames = {None: None}
+
+ # maps uri:s to prefixes
+ namespaces = {}
+ if default_namespace:
+ namespaces[default_namespace] = ""
+
+ def encode(text):
+ return text.encode(encoding)
+
+ def add_qname(qname):
+ # calculate serialized qname representation
+ try:
+ if qname[:1] == "{":
+ uri, tag = qname[1:].split("}", 1)
+ prefix = namespaces.get(uri)
+ if prefix is None:
+ prefix = _namespace_map.get(uri)
+ if prefix is None:
+ prefix = "ns%d" % len(namespaces)
+ if prefix != "xml":
+ namespaces[uri] = prefix
+ if prefix:
+ qnames[qname] = encode("%s:%s" % (prefix, tag))
+ else:
+ qnames[qname] = encode(tag) # default element
+ else:
+ if default_namespace:
+ # FIXME: can this be handled in XML 1.0?
+ raise ValueError(
+ "cannot use non-qualified names with "
+ "default_namespace option"
+ )
+ qnames[qname] = encode(qname)
+ except TypeError:
+ _raise_serialization_error(qname)
+
+ # populate qname and namespaces table
+ try:
+ iterate = elem.iter
+ except AttributeError:
+ iterate = elem.getiterator # cET compatibility
+ for elem in iterate():
+ tag = elem.tag
+ if isinstance(tag, QName) and tag.text not in qnames:
+ add_qname(tag.text)
+ elif isinstance(tag, string_types):
+ if tag not in qnames:
+ add_qname(tag)
+ elif tag is not None and tag is not Comment and tag is not PI:
+ _raise_serialization_error(tag)
+ for key, value in elem.items():
+ if isinstance(key, QName):
+ key = key.text
+ if key not in qnames:
+ add_qname(key)
+ if isinstance(value, QName) and value.text not in qnames:
+ add_qname(value.text)
+ text = elem.text
+ if isinstance(text, QName) and text.text not in qnames:
+ add_qname(text.text)
+ return qnames, namespaces
+
+def _serialize_xml(write, elem, encoding, qnames, namespaces):
+ tag = elem.tag
+ text = elem.text
+ if tag is Comment:
+ write("<!--%s-->" % _escape_cdata(text, encoding))
+ elif tag is ProcessingInstruction:
+ write("<?%s?>" % _escape_cdata(text, encoding))
+ else:
+ tag = qnames[tag]
+ if tag is None:
+ if text:
+ write(_escape_cdata(text, encoding))
+ for e in elem:
+ _serialize_xml(write, e, encoding, qnames, None)
+ else:
+ write("<" + tag)
+ items = elem.items()
+ if items or namespaces:
+ items = sorted(items) # lexical order
+ for k, v in items:
+ if isinstance(k, QName):
+ k = k.text
+ if isinstance(v, QName):
+ v = qnames[v.text]
+ else:
+ v = _escape_attrib(v, encoding)
+ write(" %s=\"%s\"" % (qnames[k], v))
+ if namespaces:
+ items = namespaces.items()
+ items = sorted(items, key=lambda x: x[1]) # sort on prefix
+ for v, k in items:
+ if k:
+ k = ":" + k
+ write(" xmlns%s=\"%s\"" % (
+ k.encode(encoding),
+ _escape_attrib(v, encoding)
+ ))
+ if text or len(elem):
+ write(">")
+ if text:
+ write(_escape_cdata(text, encoding))
+ for e in elem:
+ _serialize_xml(write, e, encoding, qnames, None)
+ write("</" + tag + ">")
+ else:
+ write(" />")
+ if elem.tail:
+ write(_escape_cdata(elem.tail, encoding))
+
+HTML_EMPTY = ("area", "base", "basefont", "br", "col", "frame", "hr",
+ "img", "input", "isindex", "link", "meta" "param")
+
+try:
+ HTML_EMPTY = set(HTML_EMPTY)
+except NameError:
+ pass
+
+def _serialize_html(write, elem, encoding, qnames, namespaces):
+ tag = elem.tag
+ text = elem.text
+ if tag is Comment:
+ write("<!--%s-->" % _escape_cdata(text, encoding))
+ elif tag is ProcessingInstruction:
+ write("<?%s?>" % _escape_cdata(text, encoding))
+ else:
+ tag = qnames[tag]
+ if tag is None:
+ if text:
+ write(_escape_cdata(text, encoding))
+ for e in elem:
+ _serialize_html(write, e, encoding, qnames, None)
+ else:
+ write("<" + tag)
+ items = elem.items()
+ if items or namespaces:
+ items = sorted(items) # lexical order
+ for k, v in items:
+ if isinstance(k, QName):
+ k = k.text
+ if isinstance(v, QName):
+ v = qnames[v.text]
+ else:
+ v = _escape_attrib_html(v, encoding)
+ # FIXME: handle boolean attributes
+ write(" %s=\"%s\"" % (qnames[k], v))
+ if namespaces:
+ items = namespaces.items()
+ items = sorted(items, key=lambda x: x[1]) # sort on prefix
+ for v, k in items:
+ if k:
+ k = ":" + k
+ write(" xmlns%s=\"%s\"" % (
+ k.encode(encoding),
+ _escape_attrib(v, encoding)
+ ))
+ write(">")
+ tag = tag.lower()
+ if text:
+ if tag == "script" or tag == "style":
+ write(_encode(text, encoding))
+ else:
+ write(_escape_cdata(text, encoding))
+ for e in elem:
+ _serialize_html(write, e, encoding, qnames, None)
+ if tag not in HTML_EMPTY:
+ write("</" + tag + ">")
+ if elem.tail:
+ write(_escape_cdata(elem.tail, encoding))
+
+def _serialize_text(write, elem, encoding):
+ for part in elem.itertext():
+ write(part.encode(encoding))
+ if elem.tail:
+ write(elem.tail.encode(encoding))
+
+##
+# Registers a namespace prefix. The registry is global, and any
+# existing mapping for either the given prefix or the namespace URI
+# will be removed.
+#
+# @param prefix Namespace prefix.
+# @param uri Namespace uri. Tags and attributes in this namespace
+# will be serialized with the given prefix, if at all possible.
+# @raise ValueError If the prefix is reserved, or is otherwise
+# invalid.
+
+def register_namespace(prefix, uri):
+ if re.match(r"ns\d+$", prefix):
+ raise ValueError("Prefix format reserved for internal use")
+ for k, v in _namespace_map.items():
+ if k == uri or v == prefix:
+ del _namespace_map[k]
+ _namespace_map[uri] = prefix
+
+_namespace_map = {
+ # "well-known" namespace prefixes
+ "http://www.w3.org/XML/1998/namespace": "xml",
+ "http://www.w3.org/1999/xhtml": "html",
+ "http://www.w3.org/1999/02/22-rdf-syntax-ns#": "rdf",
+ "http://schemas.xmlsoap.org/wsdl/": "wsdl",
+ # xml schema
+ "http://www.w3.org/2001/XMLSchema": "xs",
+ "http://www.w3.org/2001/XMLSchema-instance": "xsi",
+ # dublic core
+ "http://purl.org/dc/elements/1.1/": "dc",
+}
+
+def _raise_serialization_error(text):
+ raise TypeError(
+ "cannot serialize %r (type %s)" % (text, type(text).__name__)
+ )
+
+def _encode(text, encoding):
+ try:
+ return text.encode(encoding, "xmlcharrefreplace")
+ except (TypeError, AttributeError):
+ _raise_serialization_error(text)
+
+def _escape_cdata(text, encoding):
+ # escape character data
+ try:
+ # it's worth avoiding do-nothing calls for strings that are
+ # shorter than 500 character, or so. assume that's, by far,
+ # the most common case in most applications.
+ if "&" in text:
+ text = text.replace("&", "&amp;")
+ if "<" in text:
+ text = text.replace("<", "&lt;")
+ if ">" in text:
+ text = text.replace(">", "&gt;")
+ return text.encode(encoding, "xmlcharrefreplace")
+ except (TypeError, AttributeError):
+ _raise_serialization_error(text)
+
+def _escape_attrib(text, encoding):
+ # escape attribute value
+ try:
+ if "&" in text:
+ text = text.replace("&", "&amp;")
+ if "<" in text:
+ text = text.replace("<", "&lt;")
+ if ">" in text:
+ text = text.replace(">", "&gt;")
+ if "\"" in text:
+ text = text.replace("\"", "&quot;")
+ if "\n" in text:
+ text = text.replace("\n", "&#10;")
+ return text.encode(encoding, "xmlcharrefreplace")
+ except (TypeError, AttributeError):
+ _raise_serialization_error(text)
+
+def _escape_attrib_html(text, encoding):
+ # escape attribute value
+ try:
+ if "&" in text:
+ text = text.replace("&", "&amp;")
+ if ">" in text:
+ text = text.replace(">", "&gt;")
+ if "\"" in text:
+ text = text.replace("\"", "&quot;")
+ return text.encode(encoding, "xmlcharrefreplace")
+ except (TypeError, AttributeError):
+ _raise_serialization_error(text)
+
+# --------------------------------------------------------------------
+
+##
+# Generates a string representation of an XML element, including all
+# subelements.
+#
+# @param element An Element instance.
+# @return An encoded string containing the XML data.
+# @defreturn string
+
+def tostring(element, encoding=None, method=None):
+ class dummy:
+ pass
+ data = []
+ file = dummy()
+ file.write = data.append
+ ElementTree(element).write(file, encoding, method=method)
+ return "".join(data)
+
+##
+# Generates a string representation of an XML element, including all
+# subelements. The string is returned as a sequence of string fragments.
+#
+# @param element An Element instance.
+# @return A sequence object containing the XML data.
+# @defreturn sequence
+# @since 1.3
+
+def tostringlist(element, encoding=None):
+ class dummy:
+ pass
+ data = []
+ file = dummy()
+ file.write = data.append
+ ElementTree(element).write(file, encoding)
+ # FIXME: merge small fragments into larger parts
+ return data
+
+##
+# Writes an element tree or element structure to sys.stdout. This
+# function should be used for debugging only.
+# <p>
+# The exact output format is implementation dependent. In this
+# version, it's written as an ordinary XML file.
+#
+# @param elem An element tree or an individual element.
+
+def dump(elem):
+ # debugging
+ if not isinstance(elem, ElementTree):
+ elem = ElementTree(elem)
+ elem.write(sys.stdout)
+ tail = elem.getroot().tail
+ if not tail or tail[-1] != "\n":
+ sys.stdout.write("\n")
+
+# --------------------------------------------------------------------
+# parsing
+
+##
+# Parses an XML document into an element tree.
+#
+# @param source A filename or file object containing XML data.
+# @param parser An optional parser instance. If not given, the
+# standard {@link XMLParser} parser is used.
+# @return An ElementTree instance
+
+def parse(source, parser=None):
+ tree = ElementTree()
+ tree.parse(source, parser)
+ return tree
+
+##
+# Parses an XML document into an element tree incrementally, and reports
+# what's going on to the user.
+#
+# @param source A filename or file object containing XML data.
+# @param events A list of events to report back. If omitted, only "end"
+# events are reported.
+# @param parser An optional parser instance. If not given, the
+# standard {@link XMLParser} parser is used.
+# @return A (event, elem) iterator.
+
+def iterparse(source, events=None, parser=None):
+ if not hasattr(source, "read"):
+ source = open(source, "rb")
+ if not parser:
+ parser = XMLParser(target=TreeBuilder())
+ return _IterParseIterator(source, events, parser)
+
+class _IterParseIterator(object):
+
+ def __init__(self, source, events, parser):
+ self._file = source
+ self._events = []
+ self._index = 0
+ self.root = self._root = None
+ self._parser = parser
+ # wire up the parser for event reporting
+ parser = self._parser._parser
+ append = self._events.append
+ if events is None:
+ events = ["end"]
+ for event in events:
+ if event == "start":
+ try:
+ parser.ordered_attributes = 1
+ parser.specified_attributes = 1
+ def handler(tag, attrib_in, event=event, append=append,
+ start=self._parser._start_list):
+ append((event, start(tag, attrib_in)))
+ parser.StartElementHandler = handler
+ except AttributeError:
+ def handler(tag, attrib_in, event=event, append=append,
+ start=self._parser._start):
+ append((event, start(tag, attrib_in)))
+ parser.StartElementHandler = handler
+ elif event == "end":
+ def handler(tag, event=event, append=append,
+ end=self._parser._end):
+ append((event, end(tag)))
+ parser.EndElementHandler = handler
+ elif event == "start-ns":
+ def handler(prefix, uri, event=event, append=append):
+ try:
+ uri = uri.encode("ascii")
+ except UnicodeError:
+ pass
+ append((event, (prefix or "", uri)))
+ parser.StartNamespaceDeclHandler = handler
+ elif event == "end-ns":
+ def handler(prefix, event=event, append=append):
+ append((event, None))
+ parser.EndNamespaceDeclHandler = handler
+
+ def __next__(self):
+ while 1:
+ try:
+ item = self._events[self._index]
+ except IndexError:
+ if self._parser is None:
+ self.root = self._root
+ raise StopIteration
+ # load event buffer
+ del self._events[:]
+ self._index = 0
+ data = self._file.read(16384)
+ if data:
+ self._parser.feed(data)
+ else:
+ self._root = self._parser.close()
+ self._parser = None
+ else:
+ self._index = self._index + 1
+ return item
+
+ next = __next__ # Python 2 compatibility
+
+ def __iter__(self):
+ return self
+
+##
+# Parses an XML document from a string constant. This function can
+# be used to embed "XML literals" in Python code.
+#
+# @param source A string containing XML data.
+# @param parser An optional parser instance. If not given, the
+# standard {@link XMLParser} parser is used.
+# @return An Element instance.
+# @defreturn Element
+
+def XML(text, parser=None):
+ if not parser:
+ parser = XMLParser(target=TreeBuilder())
+ parser.feed(text)
+ return parser.close()
+
+##
+# Parses an XML document from a string constant, and also returns
+# a dictionary which maps from element id:s to elements.
+#
+# @param source A string containing XML data.
+# @param parser An optional parser instance. If not given, the
+# standard {@link XMLParser} parser is used.
+# @return A tuple containing an Element instance and a dictionary.
+# @defreturn (Element, dictionary)
+
+def XMLID(text, parser=None):
+ if not parser:
+ parser = XMLParser(target=TreeBuilder())
+ parser.feed(text)
+ tree = parser.close()
+ ids = {}
+ for elem in tree.getiterator():
+ id = elem.get("id")
+ if id:
+ ids[id] = elem
+ return tree, ids
+
+##
+# Parses an XML document from a string constant. Same as {@link #XML}.
+#
+# @def fromstring(text)
+# @param source A string containing XML data.
+# @return An Element instance.
+# @defreturn Element
+
+fromstring = XML
+
+##
+# Parses an XML document from a sequence of string fragments.
+#
+# @param sequence A list or other sequence containing XML data fragments.
+# @param parser An optional parser instance. If not given, the
+# standard {@link XMLParser} parser is used.
+# @return An Element instance.
+# @defreturn Element
+# @since 1.3
+
+def fromstringlist(sequence, parser=None):
+ if not parser:
+ parser = XMLParser(target=TreeBuilder())
+ for text in sequence:
+ parser.feed(text)
+ return parser.close()
+
+# --------------------------------------------------------------------
+
+##
+# Generic element structure builder. This builder converts a sequence
+# of {@link #TreeBuilder.start}, {@link #TreeBuilder.data}, and {@link
+# #TreeBuilder.end} method calls to a well-formed element structure.
+# <p>
+# You can use this class to build an element structure using a custom XML
+# parser, or a parser for some other XML-like format.
+#
+# @param element_factory Optional element factory. This factory
+# is called to create new Element instances, as necessary.
+
+class TreeBuilder(object):
+
+ def __init__(self, element_factory=None):
+ self._data = [] # data collector
+ self._elem = [] # element stack
+ self._last = None # last element
+ self._tail = None # true if we're after an end tag
+ if element_factory is None:
+ element_factory = Element
+ self._factory = element_factory
+
+ ##
+ # Flushes the builder buffers, and returns the toplevel document
+ # element.
+ #
+ # @return An Element instance.
+ # @defreturn Element
+
+ def close(self):
+ assert len(self._elem) == 0, "missing end tags"
+ assert self._last != None, "missing toplevel element"
+ return self._last
+
+ def _flush(self):
+ if self._data:
+ if self._last is not None:
+ text = "".join(self._data)
+ if self._tail:
+ assert self._last.tail is None, "internal error (tail)"
+ self._last.tail = text
+ else:
+ assert self._last.text is None, "internal error (text)"
+ self._last.text = text
+ self._data = []
+
+ ##
+ # Adds text to the current element.
+ #
+ # @param data A string. This should be either an 8-bit string
+ # containing ASCII text, or a Unicode string.
+
+ def data(self, data):
+ self._data.append(data)
+
+ ##
+ # Opens a new element.
+ #
+ # @param tag The element name.
+ # @param attrib A dictionary containing element attributes.
+ # @return The opened element.
+ # @defreturn Element
+
+ def start(self, tag, attrs):
+ self._flush()
+ self._last = elem = self._factory(tag, attrs)
+ if self._elem:
+ self._elem[-1].append(elem)
+ self._elem.append(elem)
+ self._tail = 0
+ return elem
+
+ ##
+ # Closes the current element.
+ #
+ # @param tag The element name.
+ # @return The closed element.
+ # @defreturn Element
+
+ def end(self, tag):
+ self._flush()
+ self._last = self._elem.pop()
+ assert self._last.tag == tag,\
+ "end tag mismatch (expected %s, got %s)" % (
+ self._last.tag, tag)
+ self._tail = 1
+ return self._last
+
+##
+# Element structure builder for XML source data, based on the
+# <b>expat</b> parser.
+#
+# @keyparam target Target object. If omitted, the builder uses an
+# instance of the standard {@link #TreeBuilder} class.
+# @keyparam html Predefine HTML entities. This flag is not supported
+# by the current implementation.
+# @keyparam encoding Optional encoding. If given, the value overrides
+# the encoding specified in the XML file.
+# @see #ElementTree
+# @see #TreeBuilder
+
+class XMLParser(object):
+
+ def __init__(self, html=0, target=None, encoding=None):
+ try:
+ from xml.parsers import expat
+ except ImportError:
+ try:
+ import pyexpat; expat = pyexpat
+ except ImportError:
+ raise ImportError(
+ "No module named expat; use SimpleXMLTreeBuilder instead"
+ )
+ parser = expat.ParserCreate(encoding, "}")
+ if target is None:
+ target = TreeBuilder()
+ # underscored names are provided for compatibility only
+ self.parser = self._parser = parser
+ self.target = self._target = target
+ self._error = expat.error
+ self._names = {} # name memo cache
+ # callbacks
+ parser.DefaultHandlerExpand = self._default
+ parser.StartElementHandler = self._start
+ parser.EndElementHandler = self._end
+ parser.CharacterDataHandler = self._data
+ # let expat do the buffering, if supported
+ try:
+ self._parser.buffer_text = 1
+ except AttributeError:
+ pass
+ # use new-style attribute handling, if supported
+ try:
+ self._parser.ordered_attributes = 1
+ self._parser.specified_attributes = 1
+ parser.StartElementHandler = self._start_list
+ except AttributeError:
+ pass
+ self._doctype = None
+ self.entity = {}
+ try:
+ self.version = "Expat %d.%d.%d" % expat.version_info
+ except AttributeError:
+ pass # unknown
+
+ def _raiseerror(self, value):
+ err = ParseError(value)
+ err.code = value.code
+ err.position = value.lineno, value.offset
+ raise err
+
+ if sys.version_info >= (3, 0):
+ def _fixtext(self, text):
+ return text
+ else:
+ def _fixtext(self, text):
+ # convert text string to ascii, if possible
+ try:
+ return text.encode("ascii")
+ except UnicodeError:
+ return text
+
+ def _fixname(self, key):
+ # expand qname, and convert name string to ascii, if possible
+ try:
+ name = self._names[key]
+ except KeyError:
+ name = key
+ if "}" in name:
+ name = "{" + name
+ self._names[key] = name = self._fixtext(name)
+ return name
+
+ def _start(self, tag, attrib_in):
+ fixname = self._fixname
+ fixtext = self._fixtext
+ tag = fixname(tag)
+ attrib = {}
+ for key, value in attrib_in.items():
+ attrib[fixname(key)] = fixtext(value)
+ return self.target.start(tag, attrib)
+
+ def _start_list(self, tag, attrib_in):
+ fixname = self._fixname
+ fixtext = self._fixtext
+ tag = fixname(tag)
+ attrib = {}
+ if attrib_in:
+ for i in range(0, len(attrib_in), 2):
+ attrib[fixname(attrib_in[i])] = fixtext(attrib_in[i+1])
+ return self.target.start(tag, attrib)
+
+ def _data(self, text):
+ return self.target.data(self._fixtext(text))
+
+ def _end(self, tag):
+ return self.target.end(self._fixname(tag))
+
+ def _default(self, text):
+ prefix = text[:1]
+ if prefix == "&":
+ # deal with undefined entities
+ try:
+ self.target.data(self.entity[text[1:-1]])
+ except KeyError:
+ from xml.parsers import expat
+ err = expat.error(
+ "undefined entity %s: line %d, column %d" %
+ (text, self._parser.ErrorLineNumber,
+ self._parser.ErrorColumnNumber)
+ )
+ err.code = 11 # XML_ERROR_UNDEFINED_ENTITY
+ err.lineno = self._parser.ErrorLineNumber
+ err.offset = self._parser.ErrorColumnNumber
+ raise err
+ elif prefix == "<" and text[:9] == "<!DOCTYPE":
+ self._doctype = [] # inside a doctype declaration
+ elif self._doctype is not None:
+ # parse doctype contents
+ if prefix == ">":
+ self._doctype = None
+ return
+ text = text.strip()
+ if not text:
+ return
+ self._doctype.append(text)
+ n = len(self._doctype)
+ if n > 2:
+ type = self._doctype[1]
+ if type == "PUBLIC" and n == 4:
+ name, type, pubid, system = self._doctype
+ elif type == "SYSTEM" and n == 3:
+ name, type, system = self._doctype
+ pubid = None
+ else:
+ return
+ if pubid:
+ pubid = pubid[1:-1]
+ if hasattr(self.target, "doctype"):
+ self.target.doctype(name, pubid, system[1:-1])
+ self._doctype = None
+
+ ##
+ # Feeds data to the parser.
+ #
+ # @param data Encoded data.
+
+ def feed(self, data):
+ try:
+ self._parser.Parse(data, 0)
+ except self._error as v:
+ self._raiseerror(v)
+
+ ##
+ # Finishes feeding data to the parser.
+ #
+ # @return An element structure.
+ # @defreturn Element
+
+ def close(self):
+ try:
+ self._parser.Parse("", 1) # end of data
+ except self._error as v:
+ self._raiseerror(v)
+ tree = self.target.close()
+ del self.target, self._parser # get rid of circular references
+ return tree
+
+# compatibility
+XMLTreeBuilder = XMLParser
diff --git a/tests/root/Makefile b/tests/root/Makefile
index 7d5162fe7..85a93bc54 100644
--- a/tests/root/Makefile
+++ b/tests/root/Makefile
@@ -4,12 +4,9 @@
# You can set these variables from the command line.
SPHINXOPTS =
SPHINXBUILD = sphinx-build
-PAPER =
# Internal variables.
-PAPEROPT_a4 = -D latex_paper_size=a4
-PAPEROPT_letter = -D latex_paper_size=letter
-ALLSPHINXOPTS = -d _build/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
+ALLSPHINXOPTS = -d _build/doctrees $(SPHINXOPTS) .
.PHONY: help clean html web pickle htmlhelp latex changes linkcheck
@@ -18,7 +15,7 @@ help:
@echo " html to make standalone HTML files"
@echo " pickle to make pickle files (usable by e.g. sphinx-web)"
@echo " htmlhelp to make HTML files and an HTML help project"
- @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
+ @echo " latex to make LaTeX files"
@echo " changes to make an overview over all changed/added/deprecated items"
@echo " linkcheck to check all external links for integrity"
diff --git a/tests/root/conf.py b/tests/root/conf.py
index f2684e33f..a23aec482 100644
--- a/tests/root/conf.py
+++ b/tests/root/conf.py
@@ -84,8 +84,8 @@ tags.add('confpytag')
# -- extension API
from docutils import nodes
+from docutils.parsers.rst import Directive
from sphinx import addnodes
-from sphinx.util.compat import Directive
def userdesc_parse(env, sig, signode):
diff --git a/tests/root/footnote.txt b/tests/root/footnote.txt
index a040f10f3..dc5e0b0da 100644
--- a/tests/root/footnote.txt
+++ b/tests/root/footnote.txt
@@ -33,7 +33,7 @@ footnotes in table
* - name [#]_
- desription
* - VIDIOC_CROPCAP
- - Information about VIDIOC_CROPCAP
+ - Information about VIDIOC_CROPCAP [#]_
footenotes
--------------------
@@ -50,6 +50,8 @@ footenotes
.. [bar] cite
-.. [#] footnotes in table caption
+.. [#] footnote in table caption
-.. [#] footnotes in table
+.. [#] footnote in table header
+
+.. [#] footnote in table not in header
diff --git a/tests/roots/test-build-html-translator/conf.py b/tests/roots/test-build-html-translator/conf.py
new file mode 100644
index 000000000..2a9301380
--- /dev/null
+++ b/tests/roots/test-build-html-translator/conf.py
@@ -0,0 +1,19 @@
+# -*- coding: utf-8 -*-
+
+from sphinx.writers.html import HTMLTranslator
+
+project = 'test'
+master_doc = 'index'
+
+
+class ConfHTMLTranslator(HTMLTranslator):
+ depart_with_node = 0
+
+ def depart_admonition(self, node=None):
+ if node is not None:
+ self.depart_with_node += 1
+ HTMLTranslator.depart_admonition(self, node)
+
+
+def setup(app):
+ app.set_translator('html', ConfHTMLTranslator)
diff --git a/tests/roots/test-build-html-translator/index.rst b/tests/roots/test-build-html-translator/index.rst
new file mode 100644
index 000000000..1610d2b45
--- /dev/null
+++ b/tests/roots/test-build-html-translator/index.rst
@@ -0,0 +1,24 @@
+=======================
+Test HTML admonitions
+=======================
+
+.. seealso:: test
+
+.. note:: test
+
+.. warning:: test
+
+.. attention:: test
+
+.. caution:: test
+
+.. danger:: test
+
+.. error:: test
+
+.. hint:: test
+
+.. important:: test
+
+.. tip:: test
+
diff --git a/tests/roots/test-directive-code/dedent.rst b/tests/roots/test-directive-code/dedent.rst
deleted file mode 100644
index 9ec1c0eeb..000000000
--- a/tests/roots/test-directive-code/dedent.rst
+++ /dev/null
@@ -1,35 +0,0 @@
-Dedent
-======
-
-Literal Include
----------------
-
-.. literalinclude:: literal.inc
- :language: python
- :lines: 10-11
- :dedent: 0
-
-.. literalinclude:: literal.inc
- :language: python
- :lines: 10-11
- :dedent: 1
-
-.. literalinclude:: literal.inc
- :language: python
- :lines: 10-11
- :dedent: 2
-
-.. literalinclude:: literal.inc
- :language: python
- :lines: 10-11
- :dedent: 3
-
-.. literalinclude:: literal.inc
- :language: python
- :lines: 10-11
- :dedent: 4
-
-.. literalinclude:: literal.inc
- :language: python
- :lines: 10-11
- :dedent: 1000
diff --git a/tests/roots/test-directive-code/dedent_code.rst b/tests/roots/test-directive-code/dedent_code.rst
deleted file mode 100644
index 3e8dacd69..000000000
--- a/tests/roots/test-directive-code/dedent_code.rst
+++ /dev/null
@@ -1,53 +0,0 @@
-Dedent
-======
-
-Code blocks
------------
-
-.. code-block:: ruby
- :linenos:
- :dedent: 0
-
- def ruby?
- false
- end
-
-.. code-block:: ruby
- :linenos:
- :dedent: 1
-
- def ruby?
- false
- end
-
-.. code-block:: ruby
- :linenos:
- :dedent: 2
-
- def ruby?
- false
- end
-
-.. code-block:: ruby
- :linenos:
- :dedent: 3
-
- def ruby?
- false
- end
-
-.. code-block:: ruby
- :linenos:
- :dedent: 4
-
- def ruby?
- false
- end
-
-.. code-block:: ruby
- :linenos:
- :dedent: 1000
-
- def ruby?
- false
- end
diff --git a/tests/roots/test-directive-code/lineno_match.rst b/tests/roots/test-directive-code/lineno_match.rst
deleted file mode 100644
index 42987609a..000000000
--- a/tests/roots/test-directive-code/lineno_match.rst
+++ /dev/null
@@ -1,26 +0,0 @@
-Literal Includes with Line Numbers Matching
-===========================================
-
-.. literalinclude:: literal.inc
- :language: python
- :pyobject: Bar
- :lineno-match:
-
-.. literalinclude:: literal.inc
- :language: python
- :lines: 5-6,7,8-9
- :lineno-match:
-
-.. literalinclude:: literal.inc
- :language: python
- :start-after: pass
- :lineno-match:
-
-.. literalinclude:: literal.inc
- :language: python
- :start-at: class Bar:
- :end-at: pass
- :lineno-match:
-
-.. literalinclude:: empty.inc
- :lineno-match:
diff --git a/tests/roots/test-directive-code/lineno_start.rst b/tests/roots/test-directive-code/lineno_start.rst
deleted file mode 100644
index 1beaabbf1..000000000
--- a/tests/roots/test-directive-code/lineno_start.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-Literal Includes with Line Numbers Starting from 200
-====================================================
-
-.. literalinclude:: literal.inc
- :language: python
- :lineno-start: 200
diff --git a/tests/roots/test-directive-code/linenos.rst b/tests/roots/test-directive-code/linenos.rst
index 2f64498d8..a8e5b69cd 100644
--- a/tests/roots/test-directive-code/linenos.rst
+++ b/tests/roots/test-directive-code/linenos.rst
@@ -4,3 +4,15 @@ Literal Includes with Line Numbers
.. literalinclude:: literal.inc
:language: python
:linenos:
+
+.. literalinclude:: literal.inc
+ :language: python
+ :lineno-start: 200
+
+.. literalinclude:: literal.inc
+ :language: python
+ :lines: 5-9
+ :lineno-match:
+
+.. literalinclude:: empty.inc
+ :lineno-match:
diff --git a/tests/roots/test-directive-code/literal-diff.inc b/tests/roots/test-directive-code/literal-diff.inc
new file mode 100644
index 000000000..dc2ec3116
--- /dev/null
+++ b/tests/roots/test-directive-code/literal-diff.inc
@@ -0,0 +1,14 @@
+# Literally included file using Python highlighting
+# -*- coding: utf-8 -*-
+
+foo = "Including Unicode characters: üöä"
+
+class Foo:
+ pass
+
+class Bar:
+ def baz(self):
+ pass
+
+# comment after Bar class
+def bar(): pass
diff --git a/tests/roots/test-directive-code/py-decorators.inc b/tests/roots/test-directive-code/py-decorators.inc
new file mode 100644
index 000000000..b1864ab8d
--- /dev/null
+++ b/tests/roots/test-directive-code/py-decorators.inc
@@ -0,0 +1,16 @@
+# Literally included file using Python highlighting
+# -*- coding: utf-8 -*-
+
+@class_decorator
+@other_decorator()
+class TheClass(object):
+
+ @method_decorator
+ @other_decorator()
+ def the_method():
+ pass
+
+@function_decorator
+@other_decorator()
+def the_function():
+ pass
diff --git a/tests/roots/test-directive-code/py-decorators.rst b/tests/roots/test-directive-code/py-decorators.rst
new file mode 100644
index 000000000..31417f590
--- /dev/null
+++ b/tests/roots/test-directive-code/py-decorators.rst
@@ -0,0 +1,17 @@
+py-decorators
+=============
+
+Various decorators
+------------------
+
+.. literalinclude:: py-decorators.inc
+ :name: literal_include_pydecorators_1
+ :pyobject: TheClass
+
+.. literalinclude:: py-decorators.inc
+ :name: literal_include_pydecorators_2
+ :pyobject: TheClass.the_method
+
+.. literalinclude:: py-decorators.inc
+ :name: literal_include_pydecorators_3
+ :pyobject: the_function
diff --git a/tests/roots/test-directive-code/target.py b/tests/roots/test-directive-code/target.py
index 61ad717ec..a5c678648 100644
--- a/tests/roots/test-directive-code/target.py
+++ b/tests/roots/test-directive-code/target.py
@@ -20,3 +20,8 @@ def block_start_with_comment():
def block_start_with_blank():
return 1
+
+
+class Qux:
+ def quux(self):
+ pass
diff --git a/tests/roots/test-doctest/doctest.txt b/tests/roots/test-doctest/doctest.txt
index 053601f3c..e45bc2721 100644
--- a/tests/roots/test-doctest/doctest.txt
+++ b/tests/roots/test-doctest/doctest.txt
@@ -69,7 +69,7 @@ Special directives
>>> squared(2)
4
-* options for testcode/testoutput blocks
+* options for doctest/testcode/testoutput blocks
.. testcode::
:hide:
@@ -82,6 +82,20 @@ Special directives
Output text.
+ .. doctest::
+ :pyversion: >= 2.0
+
+ >>> a = 3
+ >>> a
+ 3
+
+ .. doctest::
+ :pyversion: < 2.0
+
+ >>> a = 3
+ >>> a
+ 4
+
* grouping
.. testsetup:: group1
diff --git a/tests/roots/test-ext-viewcode/spam/mod1.py b/tests/roots/test-ext-viewcode/spam/mod1.py
index 7133fc829..94fceff7a 100644
--- a/tests/roots/test-ext-viewcode/spam/mod1.py
+++ b/tests/roots/test-ext-viewcode/spam/mod1.py
@@ -2,6 +2,10 @@
mod1
"""
+def decorator(f):
+ return f
+
+@decorator
def func1(a, b):
"""
this is func1
@@ -9,6 +13,7 @@ def func1(a, b):
return a, b
+@decorator
class Class1(object):
"""
this is Class1
diff --git a/tests/roots/test-ext-viewcode/spam/mod2.py b/tests/roots/test-ext-viewcode/spam/mod2.py
index 79834b665..5953c94fe 100644
--- a/tests/roots/test-ext-viewcode/spam/mod2.py
+++ b/tests/roots/test-ext-viewcode/spam/mod2.py
@@ -2,6 +2,10 @@
mod2
"""
+def decorator(f):
+ return f
+
+@decorator
def func2(a, b):
"""
this is func2
@@ -9,6 +13,7 @@ def func2(a, b):
return a, b
+@decorator
class Class2(object):
"""
this is Class2
diff --git a/tests/roots/test-latex-table/complex.rst b/tests/roots/test-latex-table/complex.rst
new file mode 100644
index 000000000..f3f927a3e
--- /dev/null
+++ b/tests/roots/test-latex-table/complex.rst
@@ -0,0 +1,35 @@
+complex tables
+==============
+
+grid table
+----------
+
++---------+---------+---------+
+| header1 | header2 | header3 |
++=========+=========+=========+
+| cell1-1 | cell1-2 | cell1-3 |
++---------+ +---------+
+| cell2-1 | | cell2-3 |
++ +---------+---------+
+| | cell3-2 |
++---------+ |
+| cell4-1 | |
++---------+---------+---------+
+| cell5-1 |
++---------+---------+---------+
+
+complex spanning cell
+---------------------
+
+table having ...
+
+* consecutive multirow at top of row (1-1 and 1-2)
+* consecutive multirow at end of row (1-4 and 1-5)
+
++-----------+-----------+-----------+-----------+-----------+
+| | | cell1-3 | | |
+| | +-----------+ | cell1-5 |
+| cell1-1 | cell1-2 | | cell1-4 | |
+| | | cell2-3 | +-----------+
+| | | | | cell3-5 |
++-----------+-----------+-----------+-----------+-----------+
diff --git a/tests/roots/test-latex-table/conf.py b/tests/roots/test-latex-table/conf.py
new file mode 100644
index 000000000..31e7a6ed4
--- /dev/null
+++ b/tests/roots/test-latex-table/conf.py
@@ -0,0 +1,7 @@
+# -*- coding: utf-8 -*-
+
+master_doc = 'index'
+
+latex_documents = [
+ (master_doc, 'test.tex', 'The basic Sphinx documentation for testing', 'Sphinx', 'report')
+]
diff --git a/tests/roots/test-latex-table/index.rst b/tests/roots/test-latex-table/index.rst
new file mode 100644
index 000000000..80dd11064
--- /dev/null
+++ b/tests/roots/test-latex-table/index.rst
@@ -0,0 +1,8 @@
+test-latex-table
+================
+
+.. toctree::
+
+ tabular
+ longtable
+ complex
diff --git a/tests/roots/test-latex-table/longtable.rst b/tests/roots/test-latex-table/longtable.rst
new file mode 100644
index 000000000..316dab775
--- /dev/null
+++ b/tests/roots/test-latex-table/longtable.rst
@@ -0,0 +1,132 @@
+longtables
+==========
+
+longtable
+---------
+
+.. table::
+ :class: longtable
+
+ ======= =======
+ header1 header2
+ ======= =======
+ cell1-1 cell1-2
+ cell2-1 cell2-2
+ cell3-1 cell3-2
+ ======= =======
+
+longtable having :widths: option
+--------------------------------
+
+.. table::
+ :class: longtable
+ :widths: 30,70
+
+ ======= =======
+ header1 header2
+ ======= =======
+ cell1-1 cell1-2
+ cell2-1 cell2-2
+ cell3-1 cell3-2
+ ======= =======
+
+longtable having :align: option
+-------------------------------
+
+.. table::
+ :align: right
+ :class: longtable
+
+ ======= =======
+ header1 header2
+ ======= =======
+ cell1-1 cell1-2
+ cell2-1 cell2-2
+ cell3-1 cell3-2
+ ======= =======
+
+longtable with tabularcolumn
+----------------------------
+
+.. tabularcolumns:: |c|c|
+
+.. table::
+ :class: longtable
+
+ ======= =======
+ header1 header2
+ ======= =======
+ cell1-1 cell1-2
+ cell2-1 cell2-2
+ cell3-1 cell3-2
+ ======= =======
+
+longtable having caption
+------------------------
+
+.. list-table:: caption for longtable
+ :class: longtable
+ :header-rows: 1
+
+ * - header1
+ - header2
+ * - cell1-1
+ - cell1-2
+ * - cell2-1
+ - cell2-2
+ * - cell3-1
+ - cell3-2
+
+longtable having verbatim
+-------------------------
+
+.. list-table::
+ :class: longtable
+ :header-rows: 1
+
+ * - header1
+ - header2
+ * - ::
+
+ hello world
+
+ - cell1-2
+ * - cell2-1
+ - cell2-2
+ * - cell3-1
+ - cell3-2
+
+longtable having both :widths: and problematic cell
+---------------------------------------------------
+
+.. list-table::
+ :class: longtable
+ :header-rows: 1
+ :widths: 30,70
+
+ * - header1
+ - header2
+ * - + item1
+ + item2
+ - cell1-2
+ * - cell2-1
+ - cell2-2
+ * - cell3-1
+ - cell3-2
+
+longtable having problematic cell
+---------------------------------
+
+.. list-table::
+ :class: longtable
+ :header-rows: 1
+
+ * - header1
+ - header2
+ * - + item1
+ + item2
+ - cell1-2
+ * - cell2-1
+ - cell2-2
+ * - cell3-1
+ - cell3-2
diff --git a/tests/roots/test-latex-table/tabular.rst b/tests/roots/test-latex-table/tabular.rst
new file mode 100644
index 000000000..5577c496e
--- /dev/null
+++ b/tests/roots/test-latex-table/tabular.rst
@@ -0,0 +1,135 @@
+taburar and taburary
+====================
+
+simple table
+------------
+
+======= =======
+header1 header2
+======= =======
+cell1-1 cell1-2
+cell2-1 cell2-2
+cell3-1 cell3-2
+======= =======
+
+table having :widths: option
+----------------------------
+
+.. table::
+ :widths: 30,70
+
+ ======= =======
+ header1 header2
+ ======= =======
+ cell1-1 cell1-2
+ cell2-1 cell2-2
+ cell3-1 cell3-2
+ ======= =======
+
+table having :align: option (tabulary)
+--------------------------------------
+
+.. table::
+ :align: right
+
+ ======= =======
+ header1 header2
+ ======= =======
+ cell1-1 cell1-2
+ cell2-1 cell2-2
+ cell3-1 cell3-2
+ ======= =======
+
+table having :align: option (tabular)
+-------------------------------------
+
+.. table::
+ :align: left
+ :widths: 30,70
+
+ ======= =======
+ header1 header2
+ ======= =======
+ cell1-1 cell1-2
+ cell2-1 cell2-2
+ cell3-1 cell3-2
+ ======= =======
+
+table with tabularcolumn
+------------------------
+
+.. tabularcolumns:: |c|c|
+
+======= =======
+header1 header2
+======= =======
+cell1-1 cell1-2
+cell2-1 cell2-2
+cell3-1 cell3-2
+======= =======
+
+table having caption
+--------------------
+
+.. list-table:: caption for table
+ :header-rows: 1
+
+ * - header1
+ - header2
+ * - cell1-1
+ - cell1-2
+ * - cell2-1
+ - cell2-2
+ * - cell3-1
+ - cell3-2
+
+table having verbatim
+---------------------
+
+.. list-table::
+ :header-rows: 1
+
+ * - header1
+ - header2
+ * - ::
+
+ hello world
+
+ - cell1-2
+ * - cell2-1
+ - cell2-2
+ * - cell3-1
+ - cell3-2
+
+table having both :widths: and problematic cell
+-----------------------------------------------
+
+.. list-table::
+ :header-rows: 1
+ :widths: 30,70
+
+ * - header1
+ - header2
+ * - + item1
+ + item2
+ - cell1-2
+ * - cell2-1
+ - cell2-2
+ * - cell3-1
+ - cell3-2
+
+table having problematic cell
+-----------------------------
+
+.. list-table::
+ :header-rows: 1
+
+ * - header1
+ - header2
+ * - + item1
+ + item2
+ - cell1-2
+ * - cell2-1
+ - cell2-2
+ * - cell3-1
+ - cell3-2
diff --git a/tests/run.py b/tests/run.py
index cce06c407..71a41c7c1 100755
--- a/tests/run.py
+++ b/tests/run.py
@@ -24,6 +24,7 @@ sys.path.insert(0, os.path.abspath(os.path.join(testroot, os.path.pardir)))
# filter warnings of test dependencies
warnings.filterwarnings('ignore', category=DeprecationWarning, module='site') # virtualenv
warnings.filterwarnings('ignore', category=ImportWarning, module='backports')
+warnings.filterwarnings('ignore', category=ImportWarning, module='pytest_cov')
warnings.filterwarnings('ignore', category=PendingDeprecationWarning, module=r'_pytest\..*')
# check dependencies before testing
diff --git a/tests/test_api_translator.py b/tests/test_api_translator.py
index c2661ef12..d987a1b57 100644
--- a/tests/test_api_translator.py
+++ b/tests/test_api_translator.py
@@ -25,22 +25,13 @@ def teardown_module():
@pytest.mark.sphinx('html')
def test_html_translator(app, status, warning):
- # no set_translator(), no html_translator_class
+ # no set_translator()
translator_class = app.builder.translator_class
assert translator_class
assert translator_class.__name__ == 'SmartyPantsHTMLTranslator'
@pytest.mark.sphinx('html', confoverrides={
- 'html_translator_class': 'translator.ExtHTMLTranslator'})
-def test_html_with_html_translator_class(app, status, warning):
- # no set_translator(), but html_translator_class
- translator_class = app.builder.translator_class
- assert translator_class
- assert translator_class.__name__ == 'ExtHTMLTranslator'
-
-
-@pytest.mark.sphinx('html', confoverrides={
'html_use_smartypants': False})
def test_html_with_smartypants(app, status, warning):
# no set_translator(), html_use_smartypants=False
@@ -51,18 +42,7 @@ def test_html_with_smartypants(app, status, warning):
@pytest.mark.sphinx('html', testroot='api-set-translator')
def test_html_with_set_translator_for_html_(app, status, warning):
- # use set_translator(), no html_translator_class
- translator_class = app.builder.translator_class
- assert translator_class
- assert translator_class.__name__ == 'ConfHTMLTranslator'
-
-
-@pytest.mark.sphinx('html', testroot='api-set-translator', confoverrides={
- 'html_translator_class': 'translator.ExtHTMLTranslator'})
-def test_html_with_set_translator_for_html_and_html_translator_class(
- app, status, warning):
- # use set_translator() and html_translator_class.
- # set_translator() is given priority over html_translator_clas.
+ # use set_translator()
translator_class = app.builder.translator_class
assert translator_class
assert translator_class.__name__ == 'ConfHTMLTranslator'
diff --git a/tests/test_apidoc.py b/tests/test_apidoc.py
index 221a88d56..358df4283 100644
--- a/tests/test_apidoc.py
+++ b/tests/test_apidoc.py
@@ -173,3 +173,16 @@ def test_multibyte_parameters(make_app, apidoc):
app.build()
print(app._status.getvalue())
print(app._warning.getvalue())
+
+
+@pytest.mark.apidoc(
+ coderoot=(rootdir / 'root'),
+ options=['--ext-mathjax'],
+)
+def test_extension_parsed(make_app, apidoc):
+ outdir = apidoc.outdir
+ assert (outdir / 'conf.py').isfile()
+
+ with open(outdir / 'conf.py') as f:
+ rst = f.read()
+ assert "sphinx.ext.mathjax" in rst
diff --git a/tests/test_application.py b/tests/test_application.py
index c1e07aade..d285aa03d 100644
--- a/tests/test_application.py
+++ b/tests/test_application.py
@@ -49,40 +49,6 @@ def test_emit_with_nonascii_name_node(app, status, warning):
app.emit('my_event', node)
-def test_output(app, status, warning):
- # info with newline
- status.truncate(0) # __init__ writes to status
- status.seek(0)
- app.info("Nothing here...")
- assert status.getvalue() == "Nothing here...\n"
- # info without newline
- status.truncate(0)
- status.seek(0)
- app.info("Nothing here...", True)
- assert status.getvalue() == "Nothing here..."
-
- # warning
- old_count = app._warncount
- app.warn("Bad news!")
- assert strip_escseq(warning.getvalue()) == "WARNING: Bad news!\n"
- assert app._warncount == old_count + 1
-
-
-def test_output_with_unencodable_char(app, status, warning):
-
- class StreamWriter(codecs.StreamWriter):
- def write(self, object):
- self.stream.write(object.encode('cp1252').decode('cp1252'))
-
- app._status = StreamWriter(status)
-
- # info with UnicodeEncodeError
- status.truncate(0)
- status.seek(0)
- app.info(u"unicode \u206d...")
- assert status.getvalue() == "unicode ?...\n"
-
-
def test_extensions(app, status, warning):
app.setup_extension('shutil')
assert strip_escseq(warning.getvalue()).startswith("WARNING: extension 'shutil'")
diff --git a/tests/test_build_epub.py b/tests/test_build_epub.py
new file mode 100644
index 000000000..cf0e2857a
--- /dev/null
+++ b/tests/test_build_epub.py
@@ -0,0 +1,247 @@
+# -*- coding: utf-8 -*-
+"""
+ test_build_html
+ ~~~~~~~~~~~~~~~
+
+ Test the HTML builder and check output against XPath.
+
+ :copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from xml.etree import ElementTree
+
+import pytest
+
+
+class EPUBElementTree(object):
+ """Test helper for content.opf and tox.ncx"""
+ namespaces = {
+ 'idpf': 'http://www.idpf.org/2007/opf',
+ 'dc': 'http://purl.org/dc/elements/1.1/',
+ 'ibooks': 'http://vocabulary.itunes.apple.com/rdf/ibooks/vocabulary-extensions-1.0/',
+ 'ncx': 'http://www.daisy.org/z3986/2005/ncx/',
+ 'xhtml': 'http://www.w3.org/1999/xhtml',
+ 'epub': 'http://www.idpf.org/2007/ops'
+ }
+
+ def __init__(self, tree):
+ self.tree = tree
+
+ @classmethod
+ def fromstring(cls, string):
+ return cls(ElementTree.fromstring(string))
+
+ def find(self, match):
+ ret = self.tree.find(match, namespaces=self.namespaces)
+ if ret is not None:
+ return self.__class__(ret)
+ else:
+ return ret
+
+ def findall(self, match):
+ ret = self.tree.findall(match, namespaces=self.namespaces)
+ return [self.__class__(e) for e in ret]
+
+ def __getattr__(self, name):
+ return getattr(self.tree, name)
+
+ def __iter__(self):
+ for child in self.tree:
+ yield self.__class__(child)
+
+
+@pytest.mark.sphinx('epub', testroot='basic')
+def test_build_epub(app):
+ app.build()
+ assert (app.outdir / 'mimetype').text() == 'application/epub+zip'
+ assert (app.outdir / 'META-INF' / 'container.xml').exists()
+
+ # toc.ncx
+ toc = EPUBElementTree.fromstring((app.outdir / 'toc.ncx').text())
+ assert toc.find("./ncx:docTitle/ncx:text").text == 'Python documentation'
+
+ # toc.ncx / head
+ meta = list(toc.find("./ncx:head"))
+ assert meta[0].attrib == {'name': 'dtb:uid', 'content': 'unknown'}
+ assert meta[1].attrib == {'name': 'dtb:depth', 'content': '1'}
+ assert meta[2].attrib == {'name': 'dtb:totalPageCount', 'content': '0'}
+ assert meta[3].attrib == {'name': 'dtb:maxPageNumber', 'content': '0'}
+
+ # toc.ncx / navMap
+ navpoints = toc.findall("./ncx:navMap/ncx:navPoint")
+ assert len(navpoints) == 1
+ assert navpoints[0].attrib == {'id': 'navPoint1', 'playOrder': '1'}
+ assert navpoints[0].find("./ncx:content").attrib == {'src': 'index.xhtml'}
+
+ navlabel = navpoints[0].find("./ncx:navLabel/ncx:text")
+ assert navlabel.text == 'The basic Sphinx documentation for testing'
+
+ # content.opf
+ opf = EPUBElementTree.fromstring((app.outdir / 'content.opf').text())
+
+ # content.opf / metadata
+ metadata = opf.find("./idpf:metadata")
+ assert metadata.find("./dc:language").text == 'en'
+ assert metadata.find("./dc:title").text == 'Python documentation'
+ assert metadata.find("./dc:description").text == 'unknown'
+ assert metadata.find("./dc:creator").text == 'unknown'
+ assert metadata.find("./dc:contributor").text == 'unknown'
+ assert metadata.find("./dc:publisher").text == 'unknown'
+ assert metadata.find("./dc:rights").text is None
+ assert metadata.find("./idpf:meta[@property='ibooks:version']").text is None
+ assert metadata.find("./idpf:meta[@property='ibooks:specified-fonts']").text == 'true'
+ assert metadata.find("./idpf:meta[@property='ibooks:binding']").text == 'true'
+ assert metadata.find("./idpf:meta[@property='ibooks:scroll-axis']").text == 'vertical'
+
+ # content.opf / manifest
+ manifest = opf.find("./idpf:manifest")
+ items = list(manifest)
+ assert items[0].attrib == {'id': 'ncx',
+ 'href': 'toc.ncx',
+ 'media-type': 'application/x-dtbncx+xml'}
+ assert items[1].attrib == {'id': 'nav',
+ 'href': 'nav.xhtml',
+ 'media-type': 'application/xhtml+xml',
+ 'properties': 'nav'}
+ assert items[2].attrib == {'id': 'epub-0',
+ 'href': 'genindex.xhtml',
+ 'media-type': 'application/xhtml+xml'}
+ assert items[3].attrib == {'id': 'epub-1',
+ 'href': 'index.xhtml',
+ 'media-type': 'application/xhtml+xml'}
+
+ for i, item in enumerate(items[2:]):
+ # items are named as epub-NN
+ assert item.get('id') == 'epub-%d' % i
+
+ # content.opf / spine
+ spine = opf.find("./idpf:spine")
+ itemrefs = list(spine)
+ assert spine.get('toc') == 'ncx'
+ assert spine.get('page-progression-direction') == 'ltr'
+ assert itemrefs[0].get('idref') == 'epub-1'
+ assert itemrefs[1].get('idref') == 'epub-0'
+
+ # content.opf / guide
+ reference = opf.find("./idpf:guide/idpf:reference")
+ assert reference.get('type') == 'toc'
+ assert reference.get('title') == 'Table of Contents'
+ assert reference.get('href') == 'index.xhtml'
+
+ # nav.xhtml
+ nav = EPUBElementTree.fromstring((app.outdir / 'nav.xhtml').text())
+ assert nav.attrib == {'lang': 'en',
+ '{http://www.w3.org/XML/1998/namespace}lang': 'en'}
+ assert nav.find("./xhtml:head/xhtml:title").text == 'Table of Contents'
+
+ # nav.xhtml / nav
+ navlist = nav.find("./xhtml:body/xhtml:nav")
+ toc = navlist.findall("./xhtml:ol/xhtml:li")
+ assert navlist.find("./xhtml:h1").text == 'Table of Contents'
+ assert len(toc) == 1
+ assert toc[0].find("./xhtml:a").get("href") == 'index.xhtml'
+ assert toc[0].find("./xhtml:a").text == 'The basic Sphinx documentation for testing'
+
+
+@pytest.mark.sphinx('epub', testroot='footnotes',
+ confoverrides={'epub_cover': ('_images/rimg.png', None)})
+def test_epub_cover(app):
+ app.build()
+
+ # content.opf / metadata
+ opf = EPUBElementTree.fromstring((app.outdir / 'content.opf').text())
+ cover_image = opf.find("./idpf:manifest/idpf:item[@href='%s']" % app.config.epub_cover[0])
+ cover = opf.find("./idpf:metadata/idpf:meta[@name='cover']")
+ assert cover
+ assert cover.get('content') == cover_image.get('id')
+
+
+@pytest.mark.sphinx('epub', testroot='toctree')
+def test_nested_toc(app):
+ app.build()
+
+ # toc.ncx
+ toc = EPUBElementTree.fromstring((app.outdir / 'toc.ncx').text())
+ assert toc.find("./ncx:docTitle/ncx:text").text == 'Python documentation'
+
+ # toc.ncx / navPoint
+ def navinfo(elem):
+ label = elem.find("./ncx:navLabel/ncx:text")
+ content = elem.find("./ncx:content")
+ return (elem.get('id'), elem.get('playOrder'),
+ content.get('src'), label.text)
+
+ navpoints = toc.findall("./ncx:navMap/ncx:navPoint")
+ assert len(navpoints) == 4
+ assert navinfo(navpoints[0]) == ('navPoint1', '1', 'index.xhtml',
+ "Welcome to Sphinx Tests's documentation!")
+ assert navpoints[0].findall("./ncx:navPoint") == []
+
+ # toc.ncx / nested navPoints
+ assert navinfo(navpoints[1]) == ('navPoint2', '2', 'foo.xhtml', 'foo')
+ navchildren = navpoints[1].findall("./ncx:navPoint")
+ assert len(navchildren) == 4
+ assert navinfo(navchildren[0]) == ('navPoint3', '2', 'foo.xhtml', 'foo')
+ assert navinfo(navchildren[1]) == ('navPoint4', '3', 'quux.xhtml', 'quux')
+ assert navinfo(navchildren[2]) == ('navPoint5', '4', 'foo.xhtml#foo-1', 'foo.1')
+ assert navinfo(navchildren[3]) == ('navPoint8', '6', 'foo.xhtml#foo-2', 'foo.2')
+
+ # nav.xhtml / nav
+ def navinfo(elem):
+ anchor = elem.find("./xhtml:a")
+ return (anchor.get('href'), anchor.text)
+
+ nav = EPUBElementTree.fromstring((app.outdir / 'nav.xhtml').text())
+ toc = nav.findall("./xhtml:body/xhtml:nav/xhtml:ol/xhtml:li")
+ assert len(toc) == 4
+ assert navinfo(toc[0]) == ('index.xhtml',
+ "Welcome to Sphinx Tests's documentation!")
+ assert toc[0].findall("./xhtml:ol") == []
+
+ # nav.xhtml / nested toc
+ assert navinfo(toc[1]) == ('foo.xhtml', 'foo')
+ tocchildren = toc[1].findall("./xhtml:ol/xhtml:li")
+ assert len(tocchildren) == 3
+ assert navinfo(tocchildren[0]) == ('quux.xhtml', 'quux')
+ assert navinfo(tocchildren[1]) == ('foo.xhtml#foo-1', 'foo.1')
+ assert navinfo(tocchildren[2]) == ('foo.xhtml#foo-2', 'foo.2')
+
+ grandchild = tocchildren[1].findall("./xhtml:ol/xhtml:li")
+ assert len(grandchild) == 1
+ assert navinfo(grandchild[0]) == ('foo.xhtml#foo-1-1', 'foo.1-1')
+
+
+@pytest.mark.sphinx('epub', testroot='basic')
+def test_epub_writing_mode(app):
+ # horizontal (default)
+ app.build()
+
+ # horizontal / page-progression-direction
+ opf = EPUBElementTree.fromstring((app.outdir / 'content.opf').text())
+ assert opf.find("./idpf:spine").get('page-progression-direction') == 'ltr'
+
+ # horizontal / ibooks:scroll-axis
+ metadata = opf.find("./idpf:metadata")
+ assert metadata.find("./idpf:meta[@property='ibooks:scroll-axis']").text == 'vertical'
+
+ # horizontal / writing-mode (CSS)
+ css = (app.outdir / '_static' / 'epub.css').text()
+ assert 'writing-mode: horizontal-tb;' in css
+
+ # vertical
+ app.config.epub_writing_mode = 'vertical'
+ (app.outdir / 'index.xhtml').unlink() # forcely rebuild
+ app.build()
+
+ # vertical / page-progression-direction
+ opf = EPUBElementTree.fromstring((app.outdir / 'content.opf').text())
+ assert opf.find("./idpf:spine").get('page-progression-direction') == 'rtl'
+
+ # vertical / ibooks:scroll-axis
+ metadata = opf.find("./idpf:metadata")
+ assert metadata.find("./idpf:meta[@property='ibooks:scroll-axis']").text == 'horizontal'
+
+ # vertical / writing-mode (CSS)
+ css = (app.outdir / '_static' / 'epub.css').text()
+ assert 'writing-mode: vertical-rl;' in css
diff --git a/tests/test_build_html.py b/tests/test_build_html.py
index f27832c82..def6722c3 100644
--- a/tests/test_build_html.py
+++ b/tests/test_build_html.py
@@ -16,6 +16,7 @@ from itertools import cycle, chain
from six import PY3
from sphinx import __display_version__
+from sphinx.util.inventory import InventoryFile
from util import remove_unicode_literals, strip_escseq
import xml.etree.cElementTree as ElementTree
from html5lib import getTreeBuilder, HTMLParser
@@ -39,10 +40,10 @@ with "\\?": b?'here: >>>(\\\\|/)xbb<<<((\\\\|/)r)?'
"""
HTML_WARNINGS = ENV_WARNINGS + """\
-%(root)s/index.rst:\\d+: WARNING: no matching candidate for image URI u'foo.\\*'
-%(root)s/index.rst:\\d+: WARNING: Could not lex literal_block as "c". Highlighting skipped.
%(root)s/index.rst:\\d+: WARNING: unknown option: &option
%(root)s/index.rst:\\d+: WARNING: citation not found: missing
+%(root)s/index.rst:\\d+: WARNING: no matching candidate for image URI u'foo.\\*'
+%(root)s/index.rst:\\d+: WARNING: Could not lex literal_block as "c". Highlighting skipped.
"""
if PY3:
@@ -109,9 +110,9 @@ def check_xpath(etree, fname, path, check, be_found=True):
# Since pygments-2.1.1, empty <span> tag is inserted at top of
# highlighting block
if len(node) == 1 and node[0].tag == 'span' and node[0].text is None:
- return node[0].tail
- else:
- return ''
+ if node[0].tail is not None:
+ return node[0].tail
+ return ''
rex = re.compile(check)
if be_found:
@@ -346,7 +347,7 @@ def test_static_output(app):
(".//a[@class='reference internal'][@href='#cmdoption-perl-arg-p']/code/span",
'perl'),
(".//a[@class='reference internal'][@href='#cmdoption-perl-arg-p']/code/span",
- '\+p'),
+ '\\+p'),
(".//a[@class='reference internal'][@href='#cmdoption-perl-objc']/code/span",
'--ObjC\\+\\+'),
(".//a[@class='reference internal'][@href='#cmdoption-perl-plugin-option']/code/span",
@@ -415,18 +416,19 @@ def test_static_output(app):
(".//li/a", "double"),
],
'footnote.html': [
- (".//a[@class='footnote-reference'][@href='#id7'][@id='id1']", r"\[1\]"),
- (".//a[@class='footnote-reference'][@href='#id8'][@id='id2']", r"\[2\]"),
+ (".//a[@class='footnote-reference'][@href='#id8'][@id='id1']", r"\[1\]"),
+ (".//a[@class='footnote-reference'][@href='#id9'][@id='id2']", r"\[2\]"),
(".//a[@class='footnote-reference'][@href='#foo'][@id='id3']", r"\[3\]"),
(".//a[@class='reference internal'][@href='#bar'][@id='id4']", r"\[bar\]"),
- (".//a[@class='footnote-reference'][@href='#id9'][@id='id5']", r"\[4\]"),
- (".//a[@class='footnote-reference'][@href='#id10'][@id='id6']", r"\[5\]"),
+ (".//a[@class='footnote-reference'][@href='#id10'][@id='id5']", r"\[4\]"),
+ (".//a[@class='footnote-reference'][@href='#id11'][@id='id6']", r"\[5\]"),
(".//a[@class='fn-backref'][@href='#id1']", r"\[1\]"),
(".//a[@class='fn-backref'][@href='#id2']", r"\[2\]"),
(".//a[@class='fn-backref'][@href='#id3']", r"\[3\]"),
(".//a[@class='fn-backref'][@href='#id4']", r"\[bar\]"),
(".//a[@class='fn-backref'][@href='#id5']", r"\[4\]"),
(".//a[@class='fn-backref'][@href='#id6']", r"\[5\]"),
+ (".//a[@class='fn-backref'][@href='#id7']", r"\[6\]"),
],
'otherext.html': [
(".//h1", "Generated section"),
@@ -440,6 +442,10 @@ def test_html_output(app, cached_etree_parse, fname, expect):
app.build()
check_xpath(cached_etree_parse(app.outdir / fname), fname, *expect)
+@pytest.mark.sphinx('html', testroot='build-html-translator')
+def test_html_translator(app):
+ app.build()
+ assert app.builder.docwriter.visitor.depart_with_node == 10
@pytest.mark.parametrize("fname,expect", flat_dict({
'index.html': [
@@ -1148,3 +1154,29 @@ def test_html_entity(app):
content = (app.outdir / 'index.html').text()
for entity in re.findall(r'&([a-z]+);', content, re.M):
assert entity not in valid_entities
+
+
+@pytest.mark.sphinx('html', testroot='basic')
+def test_html_inventory(app):
+ app.builder.build_all()
+ with open(app.outdir / 'objects.inv', 'rb') as f:
+ invdata = InventoryFile.load(f, 'http://example.com', os.path.join)
+ assert set(invdata.keys()) == {'std:label', 'std:doc'}
+ assert set(invdata['std:label'].keys()) == {'modindex', 'genindex', 'search'}
+ assert invdata['std:label']['modindex'] == ('Python',
+ '',
+ 'http://example.com/py-modindex.html',
+ 'Module Index')
+ assert invdata['std:label']['genindex'] == ('Python',
+ '',
+ 'http://example.com/genindex.html',
+ 'Index')
+ assert invdata['std:label']['search'] == ('Python',
+ '',
+ 'http://example.com/search.html',
+ 'Search Page')
+ assert set(invdata['std:doc'].keys()) == {'index'}
+ assert invdata['std:doc']['index'] == ('Python',
+ '',
+ 'http://example.com/index.html',
+ 'The basic Sphinx documentation for testing')
diff --git a/tests/test_build_html5.py b/tests/test_build_html5.py
new file mode 100644
index 000000000..b491b6306
--- /dev/null
+++ b/tests/test_build_html5.py
@@ -0,0 +1,330 @@
+# -*- coding: utf-8 -*-
+"""
+ test_build_html5
+ ~~~~~~~~~~~~~~~~
+
+ Test the HTML5 writer and check output against XPath.
+
+ This code is digest to reduce test running time.
+ Complete test code is here:
+
+ https://github.com/sphinx-doc/sphinx/pull/2805/files
+
+ :copyright: Copyright 2007-2017 by the Sphinx team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import os
+import re
+from itertools import cycle, chain
+import xml.etree.cElementTree as ElementTree
+
+from six import PY3
+import pytest
+from html5lib import getTreeBuilder, HTMLParser
+
+from sphinx import __display_version__
+from sphinx.util.docutils import is_html5_writer_available
+
+from util import remove_unicode_literals, strip_escseq, skip_unless
+from test_build_html import flat_dict, tail_check, check_xpath
+
+TREE_BUILDER = getTreeBuilder('etree', implementation=ElementTree)
+HTML_PARSER = HTMLParser(TREE_BUILDER, namespaceHTMLElements=False)
+
+
+etree_cache = {}
+
+@skip_unless(is_html5_writer_available())
+@pytest.fixture(scope='module')
+def cached_etree_parse():
+ def parse(fname):
+ if fname in etree_cache:
+ return etree_cache[fname]
+ with (fname).open('rb') as fp:
+ etree = HTML_PARSER.parse(fp)
+ etree_cache.clear()
+ etree_cache[fname] = etree
+ return etree
+ yield parse
+ etree_cache.clear()
+
+
+@skip_unless(is_html5_writer_available())
+@pytest.mark.parametrize("fname,expect", flat_dict({
+ 'images.html': [
+ (".//img[@src='_images/img.png']", ''),
+ (".//img[@src='_images/img1.png']", ''),
+ (".//img[@src='_images/simg.png']", ''),
+ (".//img[@src='_images/svgimg.svg']", ''),
+ (".//a[@href='_sources/images.txt']", ''),
+ ],
+ 'subdir/images.html': [
+ (".//img[@src='../_images/img1.png']", ''),
+ (".//img[@src='../_images/rimg.png']", ''),
+ ],
+ 'subdir/includes.html': [
+ (".//a[@href='../_downloads/img.png']", ''),
+ (".//img[@src='../_images/img.png']", ''),
+ (".//p", 'This is an include file.'),
+ (".//pre/span", 'line 1'),
+ (".//pre/span", 'line 2'),
+ ],
+ 'includes.html': [
+ (".//pre", u'Max Strauß'),
+ (".//a[@href='_downloads/img.png']", ''),
+ (".//a[@href='_downloads/img1.png']", ''),
+ (".//pre/span", u'"quotes"'),
+ (".//pre/span", u"'included'"),
+ (".//pre/span[@class='s2']", u'üöä'),
+ (".//div[@class='inc-pyobj1 highlight-text']//pre",
+ r'^class Foo:\n pass\n\s*$'),
+ (".//div[@class='inc-pyobj2 highlight-text']//pre",
+ r'^ def baz\(\):\n pass\n\s*$'),
+ (".//div[@class='inc-lines highlight-text']//pre",
+ r'^class Foo:\n pass\nclass Bar:\n$'),
+ (".//div[@class='inc-startend highlight-text']//pre",
+ u'^foo = "Including Unicode characters: üöä"\\n$'),
+ (".//div[@class='inc-preappend highlight-text']//pre",
+ r'(?m)^START CODE$'),
+ (".//div[@class='inc-pyobj-dedent highlight-python']//span",
+ r'def'),
+ (".//div[@class='inc-tab3 highlight-text']//pre",
+ r'-| |-'),
+ (".//div[@class='inc-tab8 highlight-python']//pre/span",
+ r'-| |-'),
+ ],
+ 'autodoc.html': [
+ (".//dt[@id='test_autodoc.Class']", ''),
+ (".//dt[@id='test_autodoc.function']/em", r'\*\*kwds'),
+ (".//dd/p", r'Return spam\.'),
+ ],
+ 'extapi.html': [
+ (".//strong", 'from function: Foo'),
+ (".//strong", 'from class: Bar'),
+ ],
+ 'markup.html': [
+ (".//title", 'set by title directive'),
+ (".//p/em", 'Section author: Georg Brandl'),
+ (".//p/em", 'Module author: Georg Brandl'),
+ # created by the meta directive
+ (".//meta[@name='author'][@content='Me']", ''),
+ (".//meta[@name='keywords'][@content='docs, sphinx']", ''),
+ # a label created by ``.. _label:``
+ (".//div[@id='label']", ''),
+ # code with standard code blocks
+ (".//pre", '^some code$'),
+ # an option list
+ (".//span[@class='option']", '--help'),
+ # admonitions
+ (".//p[@class='admonition-title']", 'My Admonition'),
+ (".//div[@class='admonition note']/p", 'Note text.'),
+ (".//div[@class='admonition warning']/p", 'Warning text.'),
+ # inline markup
+ (".//li/p/strong", r'^command\\n$'),
+ (".//li/p/strong", r'^program\\n$'),
+ (".//li/p/em", r'^dfn\\n$'),
+ (".//li/p/code/span[@class='pre']", r'^kbd\\n$'),
+ (".//li/p/span", u'File \N{TRIANGULAR BULLET} Close'),
+ (".//li/p/code/span[@class='pre']", '^a/$'),
+ (".//li/p/code/em/span[@class='pre']", '^varpart$'),
+ (".//li/p/code/em/span[@class='pre']", '^i$'),
+ (".//a[@href='https://www.python.org/dev/peps/pep-0008']"
+ "[@class='pep reference external']/strong", 'PEP 8'),
+ (".//a[@href='https://www.python.org/dev/peps/pep-0008']"
+ "[@class='pep reference external']/strong",
+ 'Python Enhancement Proposal #8'),
+ (".//a[@href='https://tools.ietf.org/html/rfc1.html']"
+ "[@class='rfc reference external']/strong", 'RFC 1'),
+ (".//a[@href='https://tools.ietf.org/html/rfc1.html']"
+ "[@class='rfc reference external']/strong", 'Request for Comments #1'),
+ (".//a[@href='objects.html#envvar-HOME']"
+ "[@class='reference internal']/code/span[@class='pre']", 'HOME'),
+ (".//a[@href='#with']"
+ "[@class='reference internal']/code/span[@class='pre']", '^with$'),
+ (".//a[@href='#grammar-token-try_stmt']"
+ "[@class='reference internal']/code/span", '^statement$'),
+ (".//a[@href='#some-label'][@class='reference internal']/span", '^here$'),
+ (".//a[@href='#some-label'][@class='reference internal']/span", '^there$'),
+ (".//a[@href='subdir/includes.html']"
+ "[@class='reference internal']/span", 'Including in subdir'),
+ (".//a[@href='objects.html#cmdoption-python-c']"
+ "[@class='reference internal']/code/span[@class='pre']", '-c'),
+ # abbreviations
+ (".//abbr[@title='abbreviation']", '^abbr$'),
+ # version stuff
+ (".//div[@class='versionadded']/p/span", 'New in version 0.6: '),
+ (".//div[@class='versionadded']/p/span",
+ tail_check('First paragraph of versionadded')),
+ (".//div[@class='versionchanged']/p/span",
+ tail_check('First paragraph of versionchanged')),
+ (".//div[@class='versionchanged']/p",
+ 'Second paragraph of versionchanged'),
+ # footnote reference
+ (".//a[@class='footnote-reference brackets']", r'1'),
+ # created by reference lookup
+ (".//a[@href='contents.html#ref1']", ''),
+ # ``seealso`` directive
+ (".//div/p[@class='admonition-title']", 'See also'),
+ # a ``hlist`` directive
+ (".//table[@class='hlist']/tbody/tr/td/ul/li/p", '^This$'),
+ # a ``centered`` directive
+ (".//p[@class='centered']/strong", 'LICENSE'),
+ # a glossary
+ (".//dl/dt[@id='term-boson']", 'boson'),
+ # a production list
+ (".//pre/strong", 'try_stmt'),
+ (".//pre/a[@href='#grammar-token-try1_stmt']/code/span", 'try1_stmt'),
+ # tests for ``only`` directive
+ (".//p", 'A global substitution.'),
+ (".//p", 'In HTML.'),
+ (".//p", 'In both.'),
+ (".//p", 'Always present'),
+ # tests for ``any`` role
+ (".//a[@href='#with']/span", 'headings'),
+ (".//a[@href='objects.html#func_without_body']/code/span", 'objects'),
+ ],
+ 'objects.html': [
+ (".//dt[@id='mod.Cls.meth1']", ''),
+ (".//dt[@id='errmod.Error']", ''),
+ (".//dt/code", r'long\(parameter,\s* list\)'),
+ (".//dt/code", 'another one'),
+ (".//a[@href='#mod.Cls'][@class='reference internal']", ''),
+ (".//dl[@class='userdesc']", ''),
+ (".//dt[@id='userdesc-myobj']", ''),
+ (".//a[@href='#userdesc-myobj'][@class='reference internal']", ''),
+ # docfields
+ (".//a[@class='reference internal'][@href='#TimeInt']/em", 'TimeInt'),
+ (".//a[@class='reference internal'][@href='#Time']", 'Time'),
+ (".//a[@class='reference internal'][@href='#errmod.Error']/strong", 'Error'),
+ # C references
+ (".//span[@class='pre']", 'CFunction()'),
+ (".//a[@href='#c.Sphinx_DoSomething']", ''),
+ (".//a[@href='#c.SphinxStruct.member']", ''),
+ (".//a[@href='#c.SPHINX_USE_PYTHON']", ''),
+ (".//a[@href='#c.SphinxType']", ''),
+ (".//a[@href='#c.sphinx_global']", ''),
+ # test global TOC created by toctree()
+ (".//ul[@class='current']/li[@class='toctree-l1 current']/a[@href='#']",
+ 'Testing object descriptions'),
+ (".//li[@class='toctree-l1']/a[@href='markup.html']",
+ 'Testing various markup'),
+ # test unknown field names
+ (".//dt[@class='field-odd']", 'Field_name'),
+ (".//dt[@class='field-even']", 'Field_name all lower'),
+ (".//dt[@class='field-odd']", 'FIELD_NAME'),
+ (".//dt[@class='field-even']", 'FIELD_NAME ALL CAPS'),
+ (".//dt[@class='field-odd']", 'Field_Name'),
+ (".//dt[@class='field-even']", 'Field_Name All Word Caps'),
+ (".//dt[@class='field-odd']", 'Field_name'),
+ (".//dt[@class='field-even']", 'Field_name First word cap'),
+ (".//dt[@class='field-odd']", 'FIELd_name'),
+ (".//dt[@class='field-even']", 'FIELd_name PARTial caps'),
+ # custom sidebar
+ (".//h4", 'Custom sidebar'),
+ # docfields
+ (".//dd[@class='field-odd']/p/strong", '^moo$'),
+ (".//dd[@class='field-odd']/p/strong", tail_check(r'\(Moo\) .* Moo')),
+ (".//dd[@class='field-odd']/ul/li/p/strong", '^hour$'),
+ (".//dd[@class='field-odd']/ul/li/p/em", '^DuplicateType$'),
+ (".//dd[@class='field-odd']/ul/li/p/em", tail_check(r'.* Some parameter')),
+ # others
+ (".//a[@class='reference internal'][@href='#cmdoption-perl-arg-p']/code/span",
+ 'perl'),
+ (".//a[@class='reference internal'][@href='#cmdoption-perl-arg-p']/code/span",
+ '\\+p'),
+ (".//a[@class='reference internal'][@href='#cmdoption-perl-objc']/code/span",
+ '--ObjC\\+\\+'),
+ (".//a[@class='reference internal'][@href='#cmdoption-perl-plugin-option']/code/span",
+ '--plugin.option'),
+ (".//a[@class='reference internal'][@href='#cmdoption-perl-arg-create-auth-token']"
+ "/code/span",
+ 'create-auth-token'),
+ (".//a[@class='reference internal'][@href='#cmdoption-perl-arg-arg']/code/span",
+ 'arg'),
+ (".//a[@class='reference internal'][@href='#cmdoption-hg-arg-commit']/code/span",
+ 'hg'),
+ (".//a[@class='reference internal'][@href='#cmdoption-hg-arg-commit']/code/span",
+ 'commit'),
+ (".//a[@class='reference internal'][@href='#cmdoption-git-commit-p']/code/span",
+ 'git'),
+ (".//a[@class='reference internal'][@href='#cmdoption-git-commit-p']/code/span",
+ 'commit'),
+ (".//a[@class='reference internal'][@href='#cmdoption-git-commit-p']/code/span",
+ '-p'),
+ ],
+ 'contents.html': [
+ (".//meta[@name='hc'][@content='hcval']", ''),
+ (".//meta[@name='hc_co'][@content='hcval_co']", ''),
+ (".//meta[@name='testopt'][@content='testoverride']", ''),
+ (".//dt[@class='label']/span[@class='brackets']", r'Ref1'),
+ (".//dt[@class='label']", ''),
+ (".//li[@class='toctree-l1']/a", 'Testing various markup'),
+ (".//li[@class='toctree-l2']/a", 'Inline markup'),
+ (".//title", 'Sphinx <Tests>'),
+ (".//div[@class='footer']", 'Georg Brandl & Team'),
+ (".//a[@href='http://python.org/']"
+ "[@class='reference external']", ''),
+ (".//li/p/a[@href='genindex.html']/span", 'Index'),
+ (".//li/p/a[@href='py-modindex.html']/span", 'Module Index'),
+ (".//li/p/a[@href='search.html']/span", 'Search Page'),
+ # custom sidebar only for contents
+ (".//h4", 'Contents sidebar'),
+ # custom JavaScript
+ (".//script[@src='file://moo.js']", ''),
+ # URL in contents
+ (".//a[@class='reference external'][@href='http://sphinx-doc.org/']",
+ 'http://sphinx-doc.org/'),
+ (".//a[@class='reference external'][@href='http://sphinx-doc.org/latest/']",
+ 'Latest reference'),
+ # Indirect hyperlink targets across files
+ (".//a[@href='markup.html#some-label'][@class='reference internal']/span",
+ '^indirect hyperref$'),
+ ],
+ 'bom.html': [
+ (".//title", " File with UTF-8 BOM"),
+ ],
+ 'extensions.html': [
+ (".//a[@href='http://python.org/dev/']", "http://python.org/dev/"),
+ (".//a[@href='http://bugs.python.org/issue1000']", "issue 1000"),
+ (".//a[@href='http://bugs.python.org/issue1042']", "explicit caption"),
+ ],
+ '_static/statictmpl.html': [
+ (".//project", 'Sphinx <Tests>'),
+ ],
+ 'genindex.html': [
+ # index entries
+ (".//a/strong", "Main"),
+ (".//a/strong", "[1]"),
+ (".//a/strong", "Other"),
+ (".//a", "entry"),
+ (".//li/a", "double"),
+ ],
+ 'footnote.html': [
+ (".//a[@class='footnote-reference brackets'][@href='#id8'][@id='id1']", r"1"),
+ (".//a[@class='footnote-reference brackets'][@href='#id9'][@id='id2']", r"2"),
+ (".//a[@class='footnote-reference brackets'][@href='#foo'][@id='id3']", r"3"),
+ (".//a[@class='reference internal'][@href='#bar'][@id='id4']", r"\[bar\]"),
+ (".//a[@class='footnote-reference brackets'][@href='#id10'][@id='id5']", r"4"),
+ (".//a[@class='footnote-reference brackets'][@href='#id11'][@id='id6']", r"5"),
+ (".//a[@class='fn-backref'][@href='#id1']", r"1"),
+ (".//a[@class='fn-backref'][@href='#id2']", r"2"),
+ (".//a[@class='fn-backref'][@href='#id3']", r"3"),
+ (".//a[@class='fn-backref'][@href='#id4']", r"bar"),
+ (".//a[@class='fn-backref'][@href='#id5']", r"4"),
+ (".//a[@class='fn-backref'][@href='#id6']", r"5"),
+ (".//a[@class='fn-backref'][@href='#id7']", r"6"),
+ ],
+ 'otherext.html': [
+ (".//h1", "Generated section"),
+ (".//a[@href='_sources/otherext.foo.txt']", ''),
+ ]
+}))
+@pytest.mark.sphinx('html', tags=['testtag'], confoverrides={
+ 'html_context.hckey_co': 'hcval_co',
+ 'html_experimental_html5_writer': True})
+@pytest.mark.test_params(shared_result='test_build_html5_output')
+def test_html5_output(app, cached_etree_parse, fname, expect):
+ app.build()
+ print(app.outdir / fname)
+ check_xpath(cached_etree_parse(app.outdir / fname), fname, *expect)
diff --git a/tests/test_build_latex.py b/tests/test_build_latex.py
index a8179284c..b2cfd6cbb 100644
--- a/tests/test_build_latex.py
+++ b/tests/test_build_latex.py
@@ -14,12 +14,14 @@ import os
import re
from itertools import product
from subprocess import Popen, PIPE
+from shutil import copyfile
from six import PY3
import pytest
from sphinx.errors import SphinxError
from sphinx.util.osutil import cd, ensuredir
+from sphinx.util import docutils
from sphinx.writers.latex import LaTeXTranslator
from util import SkipTest, remove_unicode_literals, strip_escseq, skip_if
@@ -66,6 +68,9 @@ def compile_latex_document(app):
with cd(app.outdir):
try:
ensuredir(app.config.latex_engine)
+ # keep a copy of latex file for this engine in case test fails
+ copyfile('SphinxTests.tex',
+ app.config.latex_engine + '/SphinxTests.tex')
p = Popen([app.config.latex_engine,
'--interaction=nonstopmode',
'-output-directory=%s' % app.config.latex_engine,
@@ -479,12 +484,16 @@ def test_footnote(app, status, warning):
'{\\phantomsection\\label{\\detokenize{footnote:bar}} '
'\ncite\n}') in result
assert '\\caption{Table caption \\sphinxfootnotemark[4]' in result
- assert 'name \\sphinxfootnotemark[5]' in result
- assert ('\\end{threeparttable}\n\n%\n'
- '\\begin{footnotetext}[4]\sphinxAtStartFootnote\n'
- 'footnotes in table caption\n%\n\\end{footnotetext}%\n'
- '\\begin{footnotetext}[5]\sphinxAtStartFootnote\n'
- 'footnotes in table\n%\n\\end{footnotetext}') in result
+ assert ('\\hline%\n\\begin{footnotetext}[4]\\sphinxAtStartFootnote\n'
+ 'footnote in table caption\n%\n\\end{footnotetext}\\ignorespaces %\n'
+ '\\begin{footnotetext}[5]\\sphinxAtStartFootnote\n'
+ 'footnote in table header\n%\n\\end{footnotetext}\\ignorespaces \n'
+ 'VIDIOC\\_CROPCAP\n&\n') in result
+ assert ('Information about VIDIOC\\_CROPCAP %\n'
+ '\\begin{footnote}[6]\\sphinxAtStartFootnote\n'
+ 'footnote in table not in header\n%\n\\end{footnote}\n\\\\\n\\hline\n'
+ '\\end{tabulary}\n\\end{threeparttable}\n'
+ '\\par\n\\sphinxattableend\\end{savenotes}\n') in result
@pytest.mark.sphinx('latex', testroot='footnotes')
@@ -506,21 +515,25 @@ def test_reference_in_caption_and_codeblock_in_footnote(app, status, warning):
'%\n\\begin{footnotetext}[4]\\sphinxAtStartFootnote\n'
'Footnote in section\n%\n\\end{footnotetext}') in result
assert ('\\caption{This is the figure caption with a footnote to '
- '\\sphinxfootnotemark[6].}\label{\\detokenize{index:id27}}\end{figure}\n'
+ '\\sphinxfootnotemark[6].}\\label{\\detokenize{index:id27}}\\end{figure}\n'
'%\n\\begin{footnotetext}[6]\\sphinxAtStartFootnote\n'
'Footnote in caption\n%\n\\end{footnotetext}')in result
assert ('\\caption{footnote \\sphinxfootnotemark[7] '
'in caption of normal table}\\label{\\detokenize{index:id28}}') in result
assert ('\\caption{footnote \\sphinxfootnotemark[8] '
- 'in caption \sphinxfootnotemark[9] of longtable}') in result
- assert ('\end{longtable}\n\n%\n\\begin{footnotetext}[8]'
- '\sphinxAtStartFootnote\n'
- 'Foot note in longtable\n%\n\\end{footnotetext}' in result)
+ 'in caption \\sphinxfootnotemark[9] of longtable\\strut}') in result
+ assert ('\\endlastfoot\n%\n\\begin{footnotetext}[8]\\sphinxAtStartFootnote\n'
+ 'Foot note in longtable\n%\n\\end{footnotetext}\\ignorespaces %\n'
+ '\\begin{footnotetext}[9]\\sphinxAtStartFootnote\n'
+ 'Second footnote in caption of longtable\n') in result
assert ('This is a reference to the code-block in the footnote:\n'
- '{\hyperref[\\detokenize{index:codeblockinfootnote}]'
+ '{\\hyperref[\\detokenize{index:codeblockinfootnote}]'
'{\\sphinxcrossref{\\DUrole{std,std-ref}{I am in a footnote}}}}') in result
- assert ('&\nThis is one more footnote with some code in it '
- '\\sphinxfootnotemark[10].\n\\\\') in result
+ assert ('&\nThis is one more footnote with some code in it %\n'
+ '\\begin{footnote}[10]\\sphinxAtStartFootnote\n'
+ 'Third footnote in longtable\n') in result
+ assert ('\\end{sphinxVerbatim}\n\\let\\sphinxVerbatimTitle\\empty\n'
+ '\\let\\sphinxLiteralBlockLabel\\empty\n%\n\\end{footnote}.\n') in result
assert '\\begin{sphinxVerbatim}[commandchars=\\\\\\{\\}]' in result
@@ -560,7 +573,8 @@ def test_latex_show_urls_is_inline(app, status, warning):
'(http://sphinx-doc.org/)}] \\leavevmode\nDescription' in result)
assert ('\\item[{Footnote in term \\sphinxfootnotemark[5]}] '
'\\leavevmode%\n\\begin{footnotetext}[5]\\sphinxAtStartFootnote\n'
- 'Footnote in term\n%\n\\end{footnotetext}\nDescription') in result
+ 'Footnote in term\n%\n\\end{footnotetext}\\ignorespaces \n'
+ 'Description') in result
assert ('\\item[{\\sphinxhref{http://sphinx-doc.org/}{Term in deflist} '
'(http://sphinx-doc.org/)}] \\leavevmode\nDescription') in result
assert '\\sphinxurl{https://github.com/sphinx-doc/sphinx}\n' in result
@@ -606,15 +620,16 @@ def test_latex_show_urls_is_footnote(app, status, warning):
'{URL in term}\\sphinxfootnotemark[8]}] '
'\\leavevmode%\n\\begin{footnotetext}[8]\\sphinxAtStartFootnote\n'
'\\sphinxnolinkurl{http://sphinx-doc.org/}\n%\n'
- '\\end{footnotetext}\nDescription') in result
+ '\\end{footnotetext}\\ignorespaces \nDescription') in result
assert ('\\item[{Footnote in term \\sphinxfootnotemark[10]}] '
'\\leavevmode%\n\\begin{footnotetext}[10]\\sphinxAtStartFootnote\n'
- 'Footnote in term\n%\n\\end{footnotetext}\nDescription') in result
+ 'Footnote in term\n%\n\\end{footnotetext}\\ignorespaces \n'
+ 'Description') in result
assert ('\\item[{\\sphinxhref{http://sphinx-doc.org/}{Term in deflist}'
'\\sphinxfootnotemark[9]}] '
'\\leavevmode%\n\\begin{footnotetext}[9]\\sphinxAtStartFootnote\n'
'\\sphinxnolinkurl{http://sphinx-doc.org/}\n%\n'
- '\\end{footnotetext}\nDescription') in result
+ '\\end{footnotetext}\\ignorespaces \nDescription') in result
assert ('\\sphinxurl{https://github.com/sphinx-doc/sphinx}\n' in result)
assert ('\\sphinxhref{mailto:sphinx-dev@googlegroups.com}'
'{sphinx-dev@googlegroups.com}\n') in result
@@ -654,7 +669,8 @@ def test_latex_show_urls_is_no(app, status, warning):
'\\leavevmode\nDescription') in result
assert ('\\item[{Footnote in term \\sphinxfootnotemark[5]}] '
'\\leavevmode%\n\\begin{footnotetext}[5]\\sphinxAtStartFootnote\n'
- 'Footnote in term\n%\n\\end{footnotetext}\nDescription') in result
+ 'Footnote in term\n%\n\\end{footnotetext}\\ignorespaces \n'
+ 'Description') in result
assert ('\\item[{\\sphinxhref{http://sphinx-doc.org/}{Term in deflist}}] '
'\\leavevmode\nDescription') in result
assert ('\\sphinxurl{https://github.com/sphinx-doc/sphinx}\n' in result)
@@ -814,3 +830,200 @@ def test_maxlistdepth_at_ten(app, status, warning):
print(status.getvalue())
print(warning.getvalue())
compile_latex_document(app)
+
+
+@pytest.mark.skipif(docutils.__version_info__ < (0, 13),
+ reason='docutils-0.13 or above is required')
+@pytest.mark.sphinx('latex', testroot='latex-table')
+@pytest.mark.test_params(shared_result='test_latex_table')
+def test_latex_table_tabulars(app, status, warning):
+ app.builder.build_all()
+ result = (app.outdir / 'test.tex').text(encoding='utf8')
+ tables = {}
+ for chap in re.split(r'\\section{', result)[1:]:
+ sectname, content = chap.split('}', 1)
+ tables[sectname] = content.strip()
+
+ # simple_table
+ table = tables['simple table']
+ assert ('\\begin{savenotes}\\sphinxattablestart\n\\centering\n'
+ '\\begin{tabulary}{\\linewidth}[t]{|T|T|}' in table)
+ assert ('\\hline\n'
+ '\\sphinxstylethead{\\relax \nheader1\n\\unskip}\\relax &'
+ '\\sphinxstylethead{\\relax \nheader2\n\\unskip}\\relax' in table)
+ assert ('\\hline\ncell1-1\n&\ncell1-2\n\\\\' in table)
+ assert ('\\hline\ncell2-1\n&\ncell2-2\n\\\\' in table)
+ assert ('\\hline\ncell3-1\n&\ncell3-2\n\\\\' in table)
+ assert ('\\hline\n\\end{tabulary}\n\\par\n'
+ '\\sphinxattableend\\end{savenotes}' in table)
+
+ # table having :widths: option
+ table = tables['table having :widths: option']
+ assert ('\\begin{savenotes}\\sphinxattablestart\n\\centering\n'
+ '\\begin{tabular}[t]{|\\X{30}{100}|\\X{70}{100}|}' in table)
+ assert ('\\hline\n\\end{tabular}\n\\par\n'
+ '\\sphinxattableend\\end{savenotes}' in table)
+
+ # table having :align: option (tabulary)
+ table = tables['table having :align: option (tabulary)']
+ assert ('\\begin{savenotes}\\sphinxattablestart\n\\raggedleft\n'
+ '\\begin{tabulary}{\\linewidth}[t]{|T|T|}\n' in table)
+ assert ('\\hline\n\\end{tabulary}\n\\par\n'
+ '\\sphinxattableend\\end{savenotes}' in table)
+
+ # table having :align: option (tabular)
+ table = tables['table having :align: option (tabular)']
+ assert ('\\begin{savenotes}\\sphinxattablestart\n\\raggedright\n'
+ '\\begin{tabular}[t]{|\\X{30}{100}|\\X{70}{100}|}\n' in table)
+ assert ('\\hline\n\\end{tabular}\n\\par\n'
+ '\\sphinxattableend\\end{savenotes}' in table)
+
+ # table with tabularcolumn
+ table = tables['table with tabularcolumn']
+ assert ('\\begin{tabulary}{\\linewidth}[t]{|c|c|}' in table)
+
+ # table having caption
+ table = tables['table having caption']
+ assert ('\\begin{savenotes}\\sphinxattablestart\n\\centering\n'
+ '\\begin{threeparttable}\n\\capstart\\caption{caption for table}'
+ '\\label{\\detokenize{tabular:id1}}' in table)
+ assert ('\\begin{tabulary}{\\linewidth}[t]{|T|T|}' in table)
+ assert ('\\hline\n\\end{tabulary}\n\\end{threeparttable}'
+ '\n\\par\n\\sphinxattableend\\end{savenotes}' in table)
+
+ # table having verbatim
+ table = tables['table having verbatim']
+ assert ('\\begin{tabular}[t]{|*{2}{\\X{1}{2}|}}\n\\hline' in table)
+
+ # table having problematic cell
+ table = tables['table having problematic cell']
+ assert ('\\begin{tabular}[t]{|*{2}{\\X{1}{2}|}}\n\\hline' in table)
+
+ # table having both :widths: and problematic cell
+ table = tables['table having both :widths: and problematic cell']
+ assert ('\\begin{tabular}[t]{|\\X{30}{100}|\\X{70}{100}|}' in table)
+
+
+@pytest.mark.skipif(docutils.__version_info__ < (0, 13),
+ reason='docutils-0.13 or above is required')
+@pytest.mark.sphinx('latex', testroot='latex-table')
+@pytest.mark.test_params(shared_result='test_latex_table')
+def test_latex_table_longtable(app, status, warning):
+ app.builder.build_all()
+ result = (app.outdir / 'test.tex').text(encoding='utf8')
+ tables = {}
+ for chap in re.split(r'\\section{', result)[1:]:
+ sectname, content = chap.split('}', 1)
+ tables[sectname] = content.strip()
+
+ # longtable
+ table = tables['longtable']
+ assert ('\\begin{savenotes}\\sphinxatlongtablestart'
+ '\\begin{longtable}{|l|l|}\n\\hline' in table)
+ assert ('\\hline\n'
+ '\\sphinxstylethead{\\relax \nheader1\n\\unskip}\\relax &'
+ '\\sphinxstylethead{\\relax \nheader2\n\\unskip}\\relax \\\\\n'
+ '\\hline\n\\endfirsthead' in table)
+ assert ('\\multicolumn{2}{c}%\n'
+ '{\\makebox[0pt]{\\sphinxtablecontinued{\\tablename\\ \\thetable{} -- '
+ 'continued from previous page}}}\\\\\n\\hline\n'
+ '\\sphinxstylethead{\\relax \nheader1\n\\unskip}\\relax &'
+ '\\sphinxstylethead{\\relax \nheader2\n\\unskip}\\relax \\\\\n'
+ '\\hline\n\\endhead' in table)
+ assert ('\\hline\n\\multicolumn{2}{r}'
+ '{\\makebox[0pt][r]{\\sphinxtablecontinued{Continued on next page}}}\\\\\n'
+ '\\endfoot\n\n\\endlastfoot' in table)
+ assert ('\ncell1-1\n&\ncell1-2\n\\\\' in table)
+ assert ('\\hline\ncell2-1\n&\ncell2-2\n\\\\' in table)
+ assert ('\\hline\ncell3-1\n&\ncell3-2\n\\\\' in table)
+ assert ('\\hline\n\\end{longtable}\\sphinxatlongtableend\\end{savenotes}' in table)
+
+ # longtable having :widths: option
+ table = tables['longtable having :widths: option']
+ assert ('\\begin{longtable}{|\\X{30}{100}|\\X{70}{100}|}' in table)
+
+ # longtable having :align: option
+ table = tables['longtable having :align: option']
+ assert ('\\begin{longtable}[r]{|l|l|}\n' in table)
+ assert ('\\hline\n\\end{longtable}' in table)
+
+ # longtable with tabularcolumn
+ table = tables['longtable with tabularcolumn']
+ assert ('\\begin{longtable}{|c|c|}' in table)
+
+ # longtable having caption
+ table = tables['longtable having caption']
+ assert ('\\begin{longtable}{|l|l|}\n\\caption{caption for longtable\\strut}'
+ '\\label{\\detokenize{longtable:id1}}'
+ '\\\\*[\\sphinxlongtablecapskipadjust]\n\\hline' in table)
+
+ # longtable having verbatim
+ table = tables['longtable having verbatim']
+ assert ('\\begin{longtable}{|*{2}{\\X{1}{2}|}}\n\\hline' in table)
+
+ # longtable having problematic cell
+ table = tables['longtable having problematic cell']
+ assert ('\\begin{longtable}{|*{2}{\\X{1}{2}|}}\n\\hline' in table)
+
+ # longtable having both :widths: and problematic cell
+ table = tables['longtable having both :widths: and problematic cell']
+ assert ('\\begin{longtable}{|\\X{30}{100}|\\X{70}{100}|}' in table)
+
+
+@pytest.mark.skipif(docutils.__version_info__ < (0, 13),
+ reason='docutils-0.13 or above is required')
+@pytest.mark.sphinx('latex', testroot='latex-table')
+@pytest.mark.test_params(shared_result='test_latex_table')
+def test_latex_table_complex_tables(app, status, warning):
+ app.builder.build_all()
+ result = (app.outdir / 'test.tex').text(encoding='utf8')
+ tables = {}
+ for chap in re.split(r'\\section{', result)[1:]:
+ sectname, content = chap.split('}', 1)
+ tables[sectname] = content.strip()
+
+ # grid table
+ table = tables['grid table']
+ assert ('\\begin{tabulary}{\\linewidth}[t]{|T|T|T|}' in table)
+ assert ('\\hline\n'
+ '\\sphinxstylethead{\\relax \nheader1\n\\unskip}\\relax &'
+ '\\sphinxstylethead{\\relax \nheader2\n\\unskip}\\relax &'
+ '\\sphinxstylethead{\\relax \nheader3\n\\unskip}\\relax \\\\' in table)
+ assert ('\\hline\ncell1-1\n&\\sphinxmultirow{2}{5}{%\n\\begin{varwidth}[t]'
+ '{\\sphinxcolwidth{1}{3}}\n'
+ 'cell1-2\n\\par\n' in table)
+ assert ('\\cline{1-1}\\cline{3-3}\\sphinxmultirow{2}{7}{%\n' in table)
+ assert ('&\\sphinxtablestrut{5}&\ncell2-3\n\\\\\n'
+ '\\cline{2-3}\\sphinxtablestrut{7}&\\sphinxstartmulticolumn{2}%\n'
+ '\\sphinxmultirow{2}{9}{%\n\\begin{varwidth}' in table)
+ assert ('\\cline{1-1}\ncell4-1\n&\\multicolumn{2}{l|}'
+ '{\\sphinxtablestrut{9}}\\\\' in table)
+ assert ('\\hline\\sphinxstartmulticolumn{3}%\n'
+ in table)
+
+ # complex spanning cell
+ table = tables['complex spanning cell']
+ assert ('\\begin{tabulary}{\\linewidth}[t]{|T|T|T|T|T|}' in table)
+ assert ('\\sphinxmultirow{3}{1}{%\n'
+ '\\begin{varwidth}[t]{\\sphinxcolwidth{1}{5}}\n'
+ 'cell1-1\n\\par\n\\vskip-\\baselineskip\\strut\\end{varwidth}%\n'
+ '}%\n'
+ '&\\sphinxmultirow{3}{2}{%\n'
+ '\\begin{varwidth}[t]{\\sphinxcolwidth{1}{5}}\n'
+ 'cell1-2\n\\par\n\\vskip-\\baselineskip\\strut\\end{varwidth}%\n'
+ '}%\n&\ncell1-3\n&\\sphinxmultirow{3}{4}{%\n'
+ '\\begin{varwidth}[t]{\\sphinxcolwidth{1}{5}}\n'
+ 'cell1-4\n\\par\n\\vskip-\\baselineskip\\strut\\end{varwidth}%\n'
+ '}%\n'
+ '&\\sphinxmultirow{2}{5}{%\n'
+ '\\begin{varwidth}[t]{\\sphinxcolwidth{1}{5}}\n'
+ 'cell1-5\n'
+ in table)
+ assert ('\\cline{3-3}\\sphinxtablestrut{1}&\\sphinxtablestrut{2}&'
+ '\\sphinxmultirow{2}{6}{%\n'
+ in table)
+ assert ('&\\sphinxtablestrut{4}&\\sphinxtablestrut{5}\\\\\n'
+ '\\cline{5-5}\\sphinxtablestrut{1}&\\sphinxtablestrut{2}&'
+ '\\sphinxtablestrut{6}&\\sphinxtablestrut{4}&\ncell3-5\n'
+ '\\\\\n\\hline\n\\end{tabulary}\n'
+ in table)
diff --git a/tests/test_config.py b/tests/test_config.py
index 4d53e3405..a026f3d2e 100644
--- a/tests/test_config.py
+++ b/tests/test_config.py
@@ -90,7 +90,8 @@ def test_extension_values(app, status, warning):
assert 'already present' in str(excinfo.value)
-def test_errors_warnings(tempdir):
+@mock.patch("sphinx.config.logger")
+def test_errors_warnings(logger, tempdir):
# test the error for syntax errors in the config file
(tempdir / 'conf.py').write_text(u'project = \n', encoding='ascii')
with pytest.raises(ConfigError) as excinfo:
@@ -102,8 +103,9 @@ def test_errors_warnings(tempdir):
u'# -*- coding: utf-8\n\nproject = u"Jägermeister"\n',
encoding='utf-8')
cfg = Config(tempdir, 'conf.py', {}, None)
- cfg.init_values(lambda warning: 1 / 0)
+ cfg.init_values()
assert cfg.project == u'Jägermeister'
+ assert logger.called is False
# test the warning for bytestrings with non-ascii content
# bytestrings with non-ascii content are a syntax error in python3 so we
@@ -113,13 +115,10 @@ def test_errors_warnings(tempdir):
(tempdir / 'conf.py').write_text(
u'# -*- coding: latin-1\nproject = "fooä"\n', encoding='latin-1')
cfg = Config(tempdir, 'conf.py', {}, None)
- warned = [False]
- def warn(msg):
- warned[0] = True
-
- cfg.check_unicode(warn)
- assert warned[0]
+ assert logger.warning.called is False
+ cfg.check_unicode()
+ assert logger.warning.called is True
def test_errors_if_setup_is_not_callable(tempdir, make_app):
@@ -157,14 +156,16 @@ def test_needs_sphinx(make_app):
make_app(confoverrides={'needs_sphinx': '2'}) # NG: greater
-def test_config_eol(tempdir):
+@mock.patch("sphinx.config.logger")
+def test_config_eol(logger, tempdir):
# test config file's eol patterns: LF, CRLF
configfile = tempdir / 'conf.py'
for eol in (b'\n', b'\r\n'):
configfile.write_bytes(b'project = "spam"' + eol)
cfg = Config(tempdir, 'conf.py', {}, None)
- cfg.init_values(lambda warning: 1 / 0)
+ cfg.init_values()
assert cfg.project == u'spam'
+ assert logger.called is False
@pytest.mark.sphinx(confoverrides={'master_doc': 123,
diff --git a/tests/test_directive_code.py b/tests/test_directive_code.py
index fad1fba1c..5a91b4aab 100644
--- a/tests/test_directive_code.py
+++ b/tests/test_directive_code.py
@@ -11,7 +11,234 @@
import pytest
-from util import etree_parse
+from sphinx.config import Config
+from sphinx.directives.code import LiteralIncludeReader
+from util import etree_parse, rootdir
+
+TESTROOT_PATH = rootdir / 'roots' / 'test-directive-code'
+LITERAL_INC_PATH = TESTROOT_PATH / 'literal.inc'
+DUMMY_CONFIG = Config(None, None, {}, '')
+
+
+def test_LiteralIncludeReader():
+ options = {'lineno-match': True}
+ reader = LiteralIncludeReader(LITERAL_INC_PATH, options, DUMMY_CONFIG)
+ content, lines = reader.read()
+ assert content == LITERAL_INC_PATH.text()
+ assert lines == 14
+ assert reader.lineno_start == 1
+
+
+def test_LiteralIncludeReader_lineno_start():
+ options = {'lineno-start': 5}
+ reader = LiteralIncludeReader(LITERAL_INC_PATH, options, DUMMY_CONFIG)
+ content, lines = reader.read()
+ assert content == LITERAL_INC_PATH.text()
+ assert lines == 14
+ assert reader.lineno_start == 5
+
+
+def test_LiteralIncludeReader_pyobject1():
+ options = {'lineno-match': True, 'pyobject': 'Foo'}
+ reader = LiteralIncludeReader(LITERAL_INC_PATH, options, DUMMY_CONFIG)
+ content, lines = reader.read()
+ assert content == ("class Foo:\n"
+ " pass\n")
+ assert reader.lineno_start == 6
+
+
+def test_LiteralIncludeReader_pyobject2():
+ options = {'pyobject': 'Bar'}
+ reader = LiteralIncludeReader(LITERAL_INC_PATH, options, DUMMY_CONFIG)
+ content, lines = reader.read()
+ assert content == ("class Bar:\n"
+ " def baz():\n"
+ " pass\n")
+ assert reader.lineno_start == 1 # no lineno-match
+
+
+def test_LiteralIncludeReader_pyobject3():
+ options = {'pyobject': 'Bar.baz'}
+ reader = LiteralIncludeReader(LITERAL_INC_PATH, options, DUMMY_CONFIG)
+ content, lines = reader.read()
+ assert content == (" def baz():\n"
+ " pass\n")
+
+
+def test_LiteralIncludeReader_pyobject_and_lines():
+ options = {'pyobject': 'Bar', 'lines': '2-'}
+ reader = LiteralIncludeReader(LITERAL_INC_PATH, options, DUMMY_CONFIG)
+ content, lines = reader.read()
+ assert content == (" def baz():\n"
+ " pass\n")
+
+
+def test_LiteralIncludeReader_lines1():
+ options = {'lines': '1-4'}
+ reader = LiteralIncludeReader(LITERAL_INC_PATH, options, DUMMY_CONFIG)
+ content, lines = reader.read()
+ assert content == (u"# Literally included file using Python highlighting\n"
+ u"# -*- coding: utf-8 -*-\n"
+ u"\n"
+ u"foo = \"Including Unicode characters: üöä\"\n")
+
+
+def test_LiteralIncludeReader_lines2():
+ options = {'lines': '1,4,6'}
+ reader = LiteralIncludeReader(LITERAL_INC_PATH, options, DUMMY_CONFIG)
+ content, lines = reader.read()
+ assert content == (u"# Literally included file using Python highlighting\n"
+ u"foo = \"Including Unicode characters: üöä\"\n"
+ u"class Foo:\n")
+
+
+def test_LiteralIncludeReader_lines_and_lineno_match1():
+ options = {'lines': '4-6', 'lineno-match': True}
+ reader = LiteralIncludeReader(LITERAL_INC_PATH, options, DUMMY_CONFIG)
+ content, lines = reader.read()
+ assert content == (u"foo = \"Including Unicode characters: üöä\"\n"
+ u"\n"
+ u"class Foo:\n")
+ assert reader.lineno_start == 4
+
+
+@pytest.mark.sphinx() # init locale for errors
+def test_LiteralIncludeReader_lines_and_lineno_match2(app, status, warning):
+ options = {'lines': '1,4,6', 'lineno-match': True}
+ reader = LiteralIncludeReader(LITERAL_INC_PATH, options, DUMMY_CONFIG)
+ with pytest.raises(ValueError):
+ content, lines = reader.read()
+
+
+@pytest.mark.sphinx() # init locale for errors
+def test_LiteralIncludeReader_lines_and_lineno_match3(app, status, warning):
+ options = {'lines': '100-', 'lineno-match': True}
+ reader = LiteralIncludeReader(LITERAL_INC_PATH, options, DUMMY_CONFIG)
+ with pytest.raises(ValueError):
+ content, lines = reader.read()
+
+
+def test_LiteralIncludeReader_start_at():
+ options = {'lineno-match': True, 'start-at': 'Foo', 'end-at': 'Bar'}
+ reader = LiteralIncludeReader(LITERAL_INC_PATH, options, DUMMY_CONFIG)
+ content, lines = reader.read()
+ assert content == ("class Foo:\n"
+ " pass\n"
+ "\n"
+ "class Bar:\n")
+ assert reader.lineno_start == 6
+
+
+def test_LiteralIncludeReader_start_after():
+ options = {'lineno-match': True, 'start-after': 'Foo', 'end-before': 'Bar'}
+ reader = LiteralIncludeReader(LITERAL_INC_PATH, options, DUMMY_CONFIG)
+ content, lines = reader.read()
+ assert content == (" pass\n"
+ "\n")
+ assert reader.lineno_start == 7
+
+
+def test_LiteralIncludeReader_start_after_and_lines():
+ options = {'lineno-match': True, 'lines': '6-',
+ 'start-after': 'coding', 'end-before': 'comment'}
+ reader = LiteralIncludeReader(LITERAL_INC_PATH, options, DUMMY_CONFIG)
+ content, lines = reader.read()
+ assert content == ("\n"
+ "class Bar:\n"
+ " def baz():\n"
+ " pass\n"
+ "\n")
+ assert reader.lineno_start == 8
+
+
+def test_LiteralIncludeReader_start_at_and_lines():
+ options = {'lines': '2, 3, 5', 'start-at': 'foo', 'end-before': '#'}
+ reader = LiteralIncludeReader(LITERAL_INC_PATH, options, DUMMY_CONFIG)
+ content, lines = reader.read()
+ assert content == ("\n"
+ "class Foo:\n"
+ "\n")
+ assert reader.lineno_start == 1
+
+
+def test_LiteralIncludeReader_prepend():
+ options = {'lines': '1', 'prepend': 'Hello', 'append': 'Sphinx'}
+ reader = LiteralIncludeReader(LITERAL_INC_PATH, options, DUMMY_CONFIG)
+ content, lines = reader.read()
+ assert content == ("Hello\n"
+ "# Literally included file using Python highlighting\n"
+ "Sphinx\n")
+
+
+def test_LiteralIncludeReader_dedent():
+ # dedent: 2
+ options = {'lines': '10-12', 'dedent': 2}
+ reader = LiteralIncludeReader(LITERAL_INC_PATH, options, DUMMY_CONFIG)
+ content, lines = reader.read()
+ assert content == (" def baz():\n"
+ " pass\n"
+ "\n")
+
+ # dedent: 4
+ options = {'lines': '10-12', 'dedent': 4}
+ reader = LiteralIncludeReader(LITERAL_INC_PATH, options, DUMMY_CONFIG)
+ content, lines = reader.read()
+ assert content == ("def baz():\n"
+ " pass\n"
+ "\n")
+
+ # dedent: 6
+ options = {'lines': '10-12', 'dedent': 6}
+ reader = LiteralIncludeReader(LITERAL_INC_PATH, options, DUMMY_CONFIG)
+ content, lines = reader.read()
+ assert content == ("f baz():\n"
+ " pass\n"
+ "\n")
+
+
+def test_LiteralIncludeReader_tabwidth():
+ # tab-width: 4
+ options = {'tab-width': 4, 'pyobject': 'Qux'}
+ reader = LiteralIncludeReader(TESTROOT_PATH / 'target.py', options, DUMMY_CONFIG)
+ content, lines = reader.read()
+ assert content == ("class Qux:\n"
+ " def quux(self):\n"
+ " pass\n")
+
+ # tab-width: 8
+ options = {'tab-width': 8, 'pyobject': 'Qux'}
+ reader = LiteralIncludeReader(TESTROOT_PATH / 'target.py', options, DUMMY_CONFIG)
+ content, lines = reader.read()
+ assert content == ("class Qux:\n"
+ " def quux(self):\n"
+ " pass\n")
+
+
+def test_LiteralIncludeReader_tabwidth_dedent():
+ options = {'tab-width': 4, 'dedent': 4, 'pyobject': 'Qux.quux'}
+ reader = LiteralIncludeReader(TESTROOT_PATH / 'target.py', options, DUMMY_CONFIG)
+ content, lines = reader.read()
+ assert content == ("def quux(self):\n"
+ " pass\n")
+
+
+def test_LiteralIncludeReader_diff():
+ options = {'diff': TESTROOT_PATH / 'literal-diff.inc'}
+ reader = LiteralIncludeReader(LITERAL_INC_PATH, options, DUMMY_CONFIG)
+ content, lines = reader.read()
+ assert content == ("--- " + TESTROOT_PATH + "/literal-diff.inc\n"
+ "+++ " + TESTROOT_PATH + "/literal.inc\n"
+ "@@ -7,8 +7,8 @@\n"
+ " pass\n"
+ " \n"
+ " class Bar:\n"
+ "- def baz(self):\n"
+ "+ def baz():\n"
+ " pass\n"
+ " \n"
+ "-# comment after Bar class\n"
+ "+# comment after Bar class definition\n"
+ " def bar(): pass\n")
@pytest.mark.sphinx('xml', testroot='directive-code')
@@ -30,25 +257,6 @@ def test_code_block(app, status, warning):
assert actual == expect
-@pytest.mark.sphinx('xml', testroot='directive-code')
-def test_code_block_dedent(app, status, warning):
- app.builder.build(['dedent_code'])
- et = etree_parse(app.outdir / 'dedent_code.xml')
- blocks = et.findall('./section/section/literal_block')
-
- for i in range(5): # 0-4
- actual = blocks[i].text
- indent = " " * (4 - i)
- expect = (
- indent + "def ruby?\n" +
- indent + " false\n" +
- indent + "end"
- )
- assert (i, actual) == (i, expect)
-
- assert blocks[5].text == '\n\n' # dedent: 1000
-
-
@pytest.mark.sphinx('html', testroot='directive-code')
def test_code_block_caption_html(app, status, warning):
app.builder.build(['caption'])
@@ -67,7 +275,7 @@ def test_code_block_caption_latex(app, status, warning):
latex = (app.outdir / 'Python.tex').text(encoding='utf-8')
caption = '\\sphinxSetupCaptionForVerbatim{caption \\sphinxstyleemphasis{test} rb}'
label = '\\def\\sphinxLiteralBlockLabel{\\label{\\detokenize{caption:id1}}}'
- link = '\hyperref[\\detokenize{caption:name-test-rb}]' \
+ link = '\\hyperref[\\detokenize{caption:name-test-rb}]' \
'{Listing \\ref{\\detokenize{caption:name-test-rb}}}'
assert caption in latex
assert label in latex
@@ -104,24 +312,6 @@ def test_literal_include(app, status, warning):
@pytest.mark.sphinx('xml', testroot='directive-code')
-def test_literal_include_dedent(app, status, warning):
- literal_src = (app.srcdir / 'literal.inc').text(encoding='utf-8')
- literal_lines = [l[4:] for l in literal_src.split('\n')[9:11]]
-
- app.builder.build(['dedent'])
- et = etree_parse(app.outdir / 'dedent.xml')
- blocks = et.findall('./section/section/literal_block')
-
- for i in range(5): # 0-4
- actual = blocks[i].text
- indent = ' ' * (4 - i)
- expect = '\n'.join(indent + l for l in literal_lines) + '\n'
- assert (i, actual) == (i, expect)
-
- assert blocks[5].text == '\n\n' # dedent: 1000
-
-
-@pytest.mark.sphinx('xml', testroot='directive-code')
def test_literal_include_block_start_with_comment_or_brank(app, status, warning):
app.builder.build(['python'])
et = etree_parse(app.outdir / 'python.xml')
@@ -149,86 +339,48 @@ def test_literal_include_block_start_with_comment_or_brank(app, status, warning)
def test_literal_include_linenos(app, status, warning):
app.builder.build(['linenos'])
html = (app.outdir / 'linenos.html').text(encoding='utf-8')
- linenos = (
- '<td class="linenos"><div class="linenodiv"><pre>'
- ' 1\n'
- ' 2\n'
- ' 3\n'
- ' 4\n'
- ' 5\n'
- ' 6\n'
- ' 7\n'
- ' 8\n'
- ' 9\n'
- '10\n'
- '11\n'
- '12\n'
- '13\n'
- '14</pre></div></td>')
- assert linenos in html
-
-
-@pytest.mark.sphinx('html', testroot='directive-code')
-def test_literal_include_lineno_start(app, status, warning):
- app.builder.build(['lineno_start'])
- html = (app.outdir / 'lineno_start.html').text(encoding='utf-8')
- linenos = (
- '<td class="linenos"><div class="linenodiv"><pre>'
- '200\n'
- '201\n'
- '202\n'
- '203\n'
- '204\n'
- '205\n'
- '206\n'
- '207\n'
- '208\n'
- '209\n'
- '210\n'
- '211\n'
- '212\n'
- '213</pre></div></td>')
- assert linenos in html
-
-@pytest.mark.sphinx('html', testroot='directive-code')
-def test_literal_include_lineno_match(app, status, warning):
- app.builder.build(['lineno_match'])
- html = (app.outdir / 'lineno_match.html').text(encoding='utf-8')
- pyobject = (
- '<td class="linenos"><div class="linenodiv"><pre>'
- ' 9\n'
- '10\n'
- '11</pre></div></td>')
-
- assert pyobject in html
-
- lines = (
- '<td class="linenos"><div class="linenodiv"><pre>'
- '5\n'
- '6\n'
- '7\n'
- '8\n'
- '9</pre></div></td>')
- assert lines in html
-
- start_after = (
- '<td class="linenos"><div class="linenodiv"><pre>'
- ' 8\n'
- ' 9\n'
- '10\n'
- '11\n'
- '12\n'
- '13\n'
- '14</pre></div></td>')
- assert start_after in html
-
- start_at_end_at = (
- '<td class="linenos"><div class="linenodiv"><pre>'
- ' 9\n'
- '10\n'
- '11</pre></div></td>')
- assert start_at_end_at in html
+ # :linenos:
+ assert ('<td class="linenos"><div class="linenodiv"><pre>'
+ ' 1\n'
+ ' 2\n'
+ ' 3\n'
+ ' 4\n'
+ ' 5\n'
+ ' 6\n'
+ ' 7\n'
+ ' 8\n'
+ ' 9\n'
+ '10\n'
+ '11\n'
+ '12\n'
+ '13\n'
+ '14</pre></div></td>' in html)
+
+ # :lineno-start:
+ assert ('<td class="linenos"><div class="linenodiv"><pre>'
+ '200\n'
+ '201\n'
+ '202\n'
+ '203\n'
+ '204\n'
+ '205\n'
+ '206\n'
+ '207\n'
+ '208\n'
+ '209\n'
+ '210\n'
+ '211\n'
+ '212\n'
+ '213</pre></div></td>' in html)
+
+ # :lineno-match:
+ assert ('<td class="linenos"><div class="linenodiv"><pre>'
+ '5\n'
+ '6\n'
+ '7\n'
+ '8\n'
+ '9</pre></div></td>' in html)
@pytest.mark.sphinx('latex', testroot='directive-code')
@@ -263,7 +415,7 @@ def test_literalinclude_caption_latex(app, status, warning):
latex = (app.outdir / 'Python.tex').text(encoding='utf-8')
caption = '\\sphinxSetupCaptionForVerbatim{caption \\sphinxstylestrong{test} py}'
label = '\\def\\sphinxLiteralBlockLabel{\\label{\\detokenize{caption:id2}}}'
- link = '\hyperref[\\detokenize{caption:name-test-py}]' \
+ link = '\\hyperref[\\detokenize{caption:name-test-py}]' \
'{Listing \\ref{\\detokenize{caption:name-test-py}}}'
assert caption in latex
assert label in latex
@@ -302,3 +454,45 @@ def test_literalinclude_classes(app, status, warning):
assert len(literalinclude) > 0
assert 'bar baz' == literalinclude[0].get('classes')
assert 'literal_include' == literalinclude[0].get('names')
+
+
+@pytest.mark.sphinx('xml', testroot='directive-code')
+def test_literalinclude_pydecorators(app, status, warning):
+ app.builder.build(['py-decorators'])
+ et = etree_parse(app.outdir / 'py-decorators.xml')
+ secs = et.findall('./section/section')
+
+ literal_include = secs[0].findall('literal_block')
+ assert len(literal_include) == 3
+
+ actual = literal_include[0].text
+ expect = (
+ '@class_decorator\n'
+ '@other_decorator()\n'
+ 'class TheClass(object):\n'
+ '\n'
+ ' @method_decorator\n'
+ ' @other_decorator()\n'
+ ' def the_method():\n'
+ ' pass\n'
+ )
+ assert actual == expect
+
+ actual = literal_include[1].text
+ expect = (
+ ' @method_decorator\n'
+ ' @other_decorator()\n'
+ ' def the_method():\n'
+ ' pass\n'
+ )
+ assert actual == expect
+
+ actual = literal_include[2].text
+ expect = (
+ '@function_decorator\n'
+ '@other_decorator()\n'
+ 'def the_function():\n'
+ ' pass\n'
+ )
+ assert actual == expect
+
diff --git a/tests/test_domain_cpp.py b/tests/test_domain_cpp.py
index cf5a5ea8a..49d8e3206 100644
--- a/tests/test_domain_cpp.py
+++ b/tests/test_domain_cpp.py
@@ -56,7 +56,7 @@ def check(name, input, idv1output=None, idv2output=None, output=None):
parentNode = addnodes.desc()
signode = addnodes.desc_signature(input, '')
parentNode += signode
- ast.describe_signature(signode, 'lastIsName', symbol)
+ ast.describe_signature(signode, 'lastIsName', symbol, options={})
if idv2output:
idv2output = "_CPPv2" + idv2output
@@ -524,12 +524,12 @@ def test_build_domain_cpp_with_add_function_parentheses_is_True(app, status, war
('', 'MyEnum')
]
parenPatterns = [
- ('ref function without parens ', 'paren_1\(\)'),
- ('ref function with parens ', 'paren_2\(\)'),
+ ('ref function without parens ', r'paren_1\(\)'),
+ ('ref function with parens ', r'paren_2\(\)'),
('ref function without parens, explicit title ', 'paren_3_title'),
('ref function with parens, explicit title ', 'paren_4_title'),
- ('ref op call without parens ', 'paren_5::operator\(\)\(\)'),
- ('ref op call with parens ', 'paren_6::operator\(\)\(\)'),
+ ('ref op call without parens ', r'paren_5::operator\(\)\(\)'),
+ ('ref op call with parens ', r'paren_6::operator\(\)\(\)'),
('ref op call without parens, explicit title ', 'paren_7_title'),
('ref op call with parens, explicit title ', 'paren_8_title')
]
@@ -570,8 +570,8 @@ def test_build_domain_cpp_with_add_function_parentheses_is_False(app, status, wa
('ref function with parens ', 'paren_2'),
('ref function without parens, explicit title ', 'paren_3_title'),
('ref function with parens, explicit title ', 'paren_4_title'),
- ('ref op call without parens ', 'paren_5::operator\(\)'),
- ('ref op call with parens ', 'paren_6::operator\(\)'),
+ ('ref op call without parens ', r'paren_5::operator\(\)'),
+ ('ref op call with parens ', r'paren_6::operator\(\)'),
('ref op call without parens, explicit title ', 'paren_7_title'),
('ref op call with parens, explicit title ', 'paren_8_title')
]
diff --git a/tests/test_environment.py b/tests/test_environment.py
index 65c82691d..4133a28fd 100644
--- a/tests/test_environment.py
+++ b/tests/test_environment.py
@@ -15,44 +15,38 @@ from sphinx.builders.html import StandaloneHTMLBuilder
from sphinx.builders.latex import LaTeXBuilder
app = env = None
-warnings = []
def setup_module():
global app, env
app = SphinxTestApp(srcdir='root-envtest')
env = app.env
- env.set_warnfunc(lambda *args, **kwargs: warnings.append(args))
def teardown_module():
app.cleanup()
-def warning_emitted(file, text):
- for warning in warnings:
- if len(warning) == 2 and file in warning[1] and text in warning[0]:
- return True
- return False
-
-
# Tests are run in the order they appear in the file, therefore we can
# afford to not run update() in the setup but in its own test
def test_first_update():
- updated = env.update(app.config, app.srcdir, app.doctreedir, app)
+ updated = env.update(app.config, app.srcdir, app.doctreedir)
assert set(updated) == env.found_docs == set(env.all_docs)
# test if exclude_patterns works ok
assert 'subdir/excluded' not in env.found_docs
def test_images():
- assert warning_emitted('images', 'image file not readable: foo.png')
- assert warning_emitted('images', 'nonlocal image URI found: '
- 'http://www.python.org/logo.png')
+ assert ('image file not readable: foo.png'
+ in app._warning.getvalue())
+ assert ('nonlocal image URI found: http://www.python.org/logo.png'
+ in app._warning.getvalue())
tree = env.get_doctree('images')
htmlbuilder = StandaloneHTMLBuilder(app)
+ htmlbuilder.set_environment(app.env)
+ htmlbuilder.init()
htmlbuilder.imgpath = 'dummy'
htmlbuilder.post_process_images(tree)
assert set(htmlbuilder.images.keys()) == \
@@ -62,6 +56,8 @@ def test_images():
set(['img.png', 'img1.png', 'simg.png', 'svgimg.svg', 'img.foo.png'])
latexbuilder = LaTeXBuilder(app)
+ latexbuilder.set_environment(app.env)
+ latexbuilder.init()
latexbuilder.post_process_images(tree)
assert set(latexbuilder.images.keys()) == \
set(['subdir/img.png', 'subdir/simg.png', 'img.png', 'img.pdf',
@@ -79,7 +75,7 @@ def test_second_update():
# the contents.txt toctree; otherwise section numbers would shift
(root / 'autodoc.txt').unlink()
(root / 'new.txt').write_text('New file\n========\n')
- updated = env.update(app.config, app.srcdir, app.doctreedir, app)
+ updated = env.update(app.config, app.srcdir, app.doctreedir)
# "includes" and "images" are in there because they contain references
# to nonexisting downloadable or image files, which are given another
# chance to exist
@@ -95,7 +91,7 @@ def test_env_read_docs():
app.connect('env-before-read-docs', on_env_read_docs_1)
- read_docnames = env.update(app.config, app.srcdir, app.doctreedir, app)
+ read_docnames = env.update(app.config, app.srcdir, app.doctreedir)
assert len(read_docnames) > 2 and read_docnames == sorted(read_docnames)
def on_env_read_docs_2(app, env, docnames):
@@ -103,7 +99,7 @@ def test_env_read_docs():
app.connect('env-before-read-docs', on_env_read_docs_2)
- read_docnames = env.update(app.config, app.srcdir, app.doctreedir, app)
+ read_docnames = env.update(app.config, app.srcdir, app.doctreedir)
assert len(read_docnames) == 2
diff --git a/tests/test_environment_indexentries.py b/tests/test_environment_indexentries.py
index fae5eb4e9..b9de151cc 100644
--- a/tests/test_environment_indexentries.py
+++ b/tests/test_environment_indexentries.py
@@ -11,7 +11,7 @@
from collections import namedtuple
from sphinx import locale
-from sphinx.environment.managers.indexentries import IndexEntries
+from sphinx.environment.adapters.indexentries import IndexEntries
import mock
diff --git a/tests/test_ext_doctest.py b/tests/test_ext_doctest.py
index fec9a44d0..705f6262a 100644
--- a/tests/test_ext_doctest.py
+++ b/tests/test_ext_doctest.py
@@ -9,6 +9,7 @@
:license: BSD, see LICENSE for details.
"""
import pytest
+from sphinx.ext.doctest import compare_version
cleanup_called = 0
@@ -25,6 +26,21 @@ def test_build(app, status, warning):
assert cleanup_called == 3, 'testcleanup did not get executed enough times'
+def test_compare_version():
+ assert compare_version('3.3', '3.4', '<') is True
+ assert compare_version('3.3', '3.2', '<') is False
+ assert compare_version('3.3', '3.4', '<=') is True
+ assert compare_version('3.3', '3.2', '<=') is False
+ assert compare_version('3.3', '3.3', '==') is True
+ assert compare_version('3.3', '3.4', '==') is False
+ assert compare_version('3.3', '3.2', '>=') is True
+ assert compare_version('3.3', '3.4', '>=') is False
+ assert compare_version('3.3', '3.2', '>') is True
+ assert compare_version('3.3', '3.4', '>') is False
+ with pytest.raises(ValueError):
+ compare_version('3.3', '3.4', '+')
+
+
def cleanup_call():
global cleanup_called
cleanup_called += 1
diff --git a/tests/test_ext_graphviz.py b/tests/test_ext_graphviz.py
index ecf18f9b9..6affda7ab 100644
--- a/tests/test_ext_graphviz.py
+++ b/tests/test_ext_graphviz.py
@@ -20,8 +20,8 @@ def test_graphviz_html(app, status, warning):
app.builder.build_all()
content = (app.outdir / 'index.html').text()
- html = ('<div class="figure" .*?>\s*<img .*?/>\s*<p class="caption">'
- '<span class="caption-text">caption of graph</span>.*</p>\s*</div>')
+ html = (r'<div class="figure" .*?>\s*<img .*?/>\s*<p class="caption">'
+ r'<span class="caption-text">caption of graph</span>.*</p>\s*</div>')
assert re.search(html, content, re.S)
html = 'Hello <img .*?/>\n graphviz world'
@@ -30,8 +30,8 @@ def test_graphviz_html(app, status, warning):
html = '<img src=".*?" alt="digraph {\n bar -&gt; baz\n}" />'
assert re.search(html, content, re.M)
- html = ('<div class="figure align-right" .*?>\s*<img .*?/>\s*<p class="caption">'
- '<span class="caption-text">on right</span>.*</p>\s*</div>')
+ html = (r'<div class="figure align-right" .*?>\s*<img .*?/>\s*<p class="caption">'
+ r'<span class="caption-text">on right</span>.*</p>\s*</div>')
assert re.search(html, content, re.S)
@@ -41,16 +41,16 @@ def test_graphviz_latex(app, status, warning):
app.builder.build_all()
content = (app.outdir / 'SphinxTests.tex').text()
- macro = ('\\\\begin{figure}\[htbp\]\n\\\\centering\n\\\\capstart\n\n'
- '\\\\includegraphics{graphviz-\w+.pdf}\n'
+ macro = ('\\\\begin{figure}\\[htbp\\]\n\\\\centering\n\\\\capstart\n\n'
+ '\\\\includegraphics{graphviz-\\w+.pdf}\n'
'\\\\caption{caption of graph}\\\\label{.*}\\\\end{figure}')
assert re.search(macro, content, re.S)
- macro = 'Hello \\\\includegraphics{graphviz-\w+.pdf} graphviz world'
+ macro = 'Hello \\\\includegraphics{graphviz-\\w+.pdf} graphviz world'
assert re.search(macro, content, re.S)
macro = ('\\\\begin{wrapfigure}{r}{0pt}\n\\\\centering\n'
- '\\\\includegraphics{graphviz-\w+.pdf}\n'
+ '\\\\includegraphics{graphviz-\\w+.pdf}\n'
'\\\\caption{on right}\\\\label{.*}\\\\end{wrapfigure}')
assert re.search(macro, content, re.S)
diff --git a/tests/test_ext_inheritance_diagram.py b/tests/test_ext_inheritance_diagram.py
index 249a7e035..5c4e5b673 100644
--- a/tests/test_ext_inheritance_diagram.py
+++ b/tests/test_ext_inheritance_diagram.py
@@ -24,7 +24,7 @@ def test_inheritance_diagram_html(app, status, warning):
content = (app.outdir / 'index.html').text()
pattern = ('<div class="figure" id="id1">\n'
- '<img src="_images/inheritance-\w+.png" alt="Inheritance diagram of test.Foo" '
+ '<img src="_images/inheritance-\\w+.png" alt="Inheritance diagram of test.Foo" '
'class="inheritance"/>\n<p class="caption"><span class="caption-text">'
'Test Foo!</span><a class="headerlink" href="#id1" '
'title="Permalink to this image">\xb6</a></p>')
diff --git a/tests/test_ext_intersphinx.py b/tests/test_ext_intersphinx.py
index 869da42e1..86e56fd47 100644
--- a/tests/test_ext_intersphinx.py
+++ b/tests/test_ext_intersphinx.py
@@ -9,11 +9,8 @@
:license: BSD, see LICENSE for details.
"""
-import posixpath
import unittest
-import zlib
-from six import BytesIO
from docutils import nodes
import mock
import pytest
@@ -22,71 +19,16 @@ from io import BytesIO
from sphinx import addnodes
from sphinx.ext.intersphinx import setup as intersphinx_setup
-from sphinx.ext.intersphinx import read_inventory, \
- load_mappings, missing_reference, _strip_basic_auth, \
- _get_safe_url, fetch_inventory, INVENTORY_FILENAME, \
- debug
-
-
-inventory_v1 = '''\
-# Sphinx inventory version 1
-# Project: foo
-# Version: 1.0
-module mod foo.html
-module.cls class foo.html
-'''.encode('utf-8')
-
-inventory_v2 = '''\
-# Sphinx inventory version 2
-# Project: foo
-# Version: 2.0
-# The remainder of this file is compressed with zlib.
-'''.encode('utf-8') + zlib.compress('''\
-module1 py:module 0 foo.html#module-module1 Long Module desc
-module2 py:module 0 foo.html#module-$ -
-module1.func py:function 1 sub/foo.html#$ -
-CFunc c:function 2 cfunc.html#CFunc -
-a term std:term -1 glossary.html#term-a-term -
-a term including:colon std:term -1 glossary.html#term-a-term-including-colon -
-'''.encode('utf-8'))
-
-
-def test_read_inventory_v1():
- f = BytesIO(inventory_v1)
- invdata = read_inventory(f, '/util', posixpath.join)
- assert invdata['py:module']['module'] == \
- ('foo', '1.0', '/util/foo.html#module-module', '-')
- assert invdata['py:class']['module.cls'] == \
- ('foo', '1.0', '/util/foo.html#module.cls', '-')
-
-
-def test_read_inventory_v2():
- f = BytesIO(inventory_v2)
- invdata1 = read_inventory(f, '/util', posixpath.join)
-
- # try again with a small buffer size to test the chunking algorithm
- f = BytesIO(inventory_v2)
- invdata2 = read_inventory(f, '/util', posixpath.join, bufsize=5)
-
- assert invdata1 == invdata2
-
- assert len(invdata1['py:module']) == 2
- assert invdata1['py:module']['module1'] == \
- ('foo', '2.0', '/util/foo.html#module-module1', 'Long Module desc')
- assert invdata1['py:module']['module2'] == \
- ('foo', '2.0', '/util/foo.html#module-module2', '-')
- assert invdata1['py:function']['module1.func'][2] == \
- '/util/sub/foo.html#module1.func'
- assert invdata1['c:function']['CFunc'][2] == '/util/cfunc.html#CFunc'
- assert invdata1['std:term']['a term'][2] == \
- '/util/glossary.html#term-a-term'
- assert invdata1['std:term']['a term including:colon'][2] == \
- '/util/glossary.html#term-a-term-including-colon'
-
-
-@mock.patch('sphinx.ext.intersphinx.read_inventory')
+from sphinx.ext.intersphinx import (
+ load_mappings, missing_reference, _strip_basic_auth,
+ _get_safe_url, fetch_inventory, INVENTORY_FILENAME, debug
+)
+from test_util_inventory import inventory_v2
+
+
+@mock.patch('sphinx.ext.intersphinx.InventoryFile')
@mock.patch('sphinx.ext.intersphinx._read_from_url')
-def test_fetch_inventory_redirection(_read_from_url, read_inventory, app, status, warning):
+def test_fetch_inventory_redirection(_read_from_url, InventoryFile, app, status, warning):
intersphinx_setup(app)
_read_from_url().readline.return_value = '# Sphinx inventory version 2'.encode('utf-8')
@@ -94,7 +36,7 @@ def test_fetch_inventory_redirection(_read_from_url, read_inventory, app, status
_read_from_url().url = 'http://hostname/' + INVENTORY_FILENAME
fetch_inventory(app, 'http://hostname/', 'http://hostname/' + INVENTORY_FILENAME)
assert 'intersphinx inventory has moved' not in status.getvalue()
- assert read_inventory.call_args[0][1] == 'http://hostname/'
+ assert InventoryFile.load.call_args[0][1] == 'http://hostname/'
# same uri and inv, redirected
status.seek(0)
@@ -105,7 +47,7 @@ def test_fetch_inventory_redirection(_read_from_url, read_inventory, app, status
assert status.getvalue() == ('intersphinx inventory has moved: '
'http://hostname/%s -> http://hostname/new/%s\n' %
(INVENTORY_FILENAME, INVENTORY_FILENAME))
- assert read_inventory.call_args[0][1] == 'http://hostname/new'
+ assert InventoryFile.load.call_args[0][1] == 'http://hostname/new'
# different uri and inv, not redirected
status.seek(0)
@@ -114,7 +56,7 @@ def test_fetch_inventory_redirection(_read_from_url, read_inventory, app, status
fetch_inventory(app, 'http://hostname/', 'http://hostname/new/' + INVENTORY_FILENAME)
assert 'intersphinx inventory has moved' not in status.getvalue()
- assert read_inventory.call_args[0][1] == 'http://hostname/'
+ assert InventoryFile.load.call_args[0][1] == 'http://hostname/'
# different uri and inv, redirected
status.seek(0)
@@ -125,7 +67,7 @@ def test_fetch_inventory_redirection(_read_from_url, read_inventory, app, status
assert status.getvalue() == ('intersphinx inventory has moved: '
'http://hostname/new/%s -> http://hostname/other/%s\n' %
(INVENTORY_FILENAME, INVENTORY_FILENAME))
- assert read_inventory.call_args[0][1] == 'http://hostname/'
+ assert InventoryFile.load.call_args[0][1] == 'http://hostname/'
def test_missing_reference(tempdir, app, status, warning):
@@ -216,6 +158,10 @@ def test_missing_reference(tempdir, app, status, warning):
rn = reference_check('py', 'mod', 'py3krelparent:module1', 'foo', refdoc='sub/dir/test')
assert rn['refuri'] == '../../../../py3k/foo.html#module-module1'
+ # check refs of standard domain
+ rn = reference_check('std', 'doc', 'docname', 'docname')
+ assert rn['refuri'] == 'https://docs.python.org/docname.html'
+
def test_load_mappings_warnings(tempdir, app, status, warning):
"""
diff --git a/tests/test_ext_math.py b/tests/test_ext_math.py
index acf43fd3b..296bba94f 100644
--- a/tests/test_ext_math.py
+++ b/tests/test_ext_math.py
@@ -45,8 +45,8 @@ def test_imgmath_png(app, status, warning):
raise SkipTest('dvipng command "dvipng" is not available')
content = (app.outdir / 'index.html').text()
- html = ('<div class="math">\s*<p>\s*<img src="_images/math/\w+.png"'
- '\s*alt="a\^2\+b\^2=c\^2"/>\s*</p>\s*</div>')
+ html = (r'<div class="math">\s*<p>\s*<img src="_images/math/\w+.png"'
+ r'\s*alt="a\^2\+b\^2=c\^2"/>\s*</p>\s*</div>')
assert re.search(html, content, re.S)
@@ -61,8 +61,8 @@ def test_imgmath_svg(app, status, warning):
raise SkipTest('dvisvgm command "dvisvgm" is not available')
content = (app.outdir / 'index.html').text()
- html = ('<div class="math">\s*<p>\s*<img src="_images/math/\w+.svg"'
- '\s*alt="a\^2\+b\^2=c\^2"/>\s*</p>\s*</div>')
+ html = (r'<div class="math">\s*<p>\s*<img src="_images/math/\w+.svg"'
+ r'\s*alt="a\^2\+b\^2=c\^2"/>\s*</p>\s*</div>')
assert re.search(html, content, re.S)
diff --git a/tests/test_ext_napoleon_docstring.py b/tests/test_ext_napoleon_docstring.py
index a894f108f..e71d517fe 100644
--- a/tests/test_ext_napoleon_docstring.py
+++ b/tests/test_ext_napoleon_docstring.py
@@ -284,7 +284,7 @@ Construct a new XBlock.
This class should only be used by runtimes.
Arguments:
- runtime (:class:`~typing.Dict`\[:class:`int`,:class:`str`\]): Use it to
+ runtime (:class:`~typing.Dict`\\[:class:`int`,:class:`str`\\]): Use it to
access the environment. It is available in XBlock code
as ``self.runtime``.
@@ -304,7 +304,7 @@ This class should only be used by runtimes.
:param runtime: Use it to
access the environment. It is available in XBlock code
as ``self.runtime``.
-:type runtime: :class:`~typing.Dict`\[:class:`int`,:class:`str`\]
+:type runtime: :class:`~typing.Dict`\\[:class:`int`,:class:`str`\\]
:param field_data: Interface used by the XBlock
fields to access their data from wherever it is persisted.
:type field_data: :class:`FieldData`
diff --git a/tests/test_ext_viewcode.py b/tests/test_ext_viewcode.py
index ba781c2be..4a3fb550f 100644
--- a/tests/test_ext_viewcode.py
+++ b/tests/test_ext_viewcode.py
@@ -30,6 +30,7 @@ def test_viewcode(app, status, warning):
assert result.count('href="_modules/spam/mod2.html#func2"') == 2
assert result.count('href="_modules/spam/mod1.html#Class1"') == 2
assert result.count('href="_modules/spam/mod2.html#Class2"') == 2
+ assert result.count('@decorator') == 1
@pytest.mark.sphinx(testroot='ext-viewcode', tags=['test_linkcode'])
diff --git a/tests/test_highlighting.py b/tests/test_highlighting.py
index 6d45e9423..938181fe1 100644
--- a/tests/test_highlighting.py
+++ b/tests/test_highlighting.py
@@ -9,9 +9,9 @@
:license: BSD, see LICENSE for details.
"""
+import mock
from pygments.lexer import RegexLexer
from pygments.token import Text, Name
-from pygments.filters import ErrorToken
from pygments.formatters.html import HtmlFormatter
from sphinx.highlighting import PygmentsBridge
@@ -86,7 +86,8 @@ def test_trim_doctest_flags():
PygmentsBridge.html_formatter = HtmlFormatter
-def test_default_highlight():
+@mock.patch('sphinx.highlighting.logger')
+def test_default_highlight(logger):
bridge = PygmentsBridge('html')
# default: highlights as python3
@@ -104,8 +105,8 @@ def test_default_highlight():
'<span class="s2">&quot;Hello sphinx world&quot;</span>\n</pre></div>\n')
# python3: raises error if highlighting failed
- try:
- ret = bridge.highlight_block('reST ``like`` text', 'python3')
- assert False, "highlight_block() does not raise any exceptions"
- except ErrorToken:
- pass # raise parsing error
+ ret = bridge.highlight_block('reST ``like`` text', 'python3')
+ logger.warning.assert_called_with('Could not lex literal_block as "%s". '
+ 'Highlighting skipped.', 'python3',
+ type='misc', subtype='highlighting_failure',
+ location=None)
diff --git a/tests/test_intl.py b/tests/test_intl.py
index 0d9b189ca..f03c771c9 100644
--- a/tests/test_intl.py
+++ b/tests/test_intl.py
@@ -21,7 +21,8 @@ from six import string_types
import pytest
from util import (
- path, assert_re_search, assert_not_re_search, assert_startswith, assert_node, etree_parse
+ path, etree_parse, strip_escseq,
+ assert_re_search, assert_not_re_search, assert_startswith, assert_node
)
@@ -496,7 +497,7 @@ def test_gettext_buildr_ignores_only_directive(app):
def test_gettext_dont_rebuild_mo(make_app, app_params, build_mo):
# --- don't rebuild by .mo mtime
def get_number_of_update_targets(app_):
- updated = app_.env.update(app_.config, app_.srcdir, app_.doctreedir, app_)
+ updated = app_.env.update(app_.config, app_.srcdir, app_.doctreedir)
return len(updated)
# setup new directory
@@ -679,12 +680,12 @@ def test_html_rebuild_mo(app):
app.build()
# --- rebuild by .mo mtime
app.builder.build_update()
- updated = app.env.update(app.config, app.srcdir, app.doctreedir, app)
+ updated = app.env.update(app.config, app.srcdir, app.doctreedir)
assert len(updated) == 0
mtime = (app.srcdir / 'xx' / 'LC_MESSAGES' / 'bom.mo').stat().st_mtime
(app.srcdir / 'xx' / 'LC_MESSAGES' / 'bom.mo').utime((mtime + 5, mtime + 5))
- updated = app.env.update(app.config, app.srcdir, app.doctreedir, app)
+ updated = app.env.update(app.config, app.srcdir, app.doctreedir)
assert len(updated) == 1
@@ -1170,4 +1171,4 @@ def test_image_glob_intl_using_figure_language_filename(app):
def getwarning(warnings):
- return warnings.getvalue().replace(os.sep, '/')
+ return strip_escseq(warnings.getvalue().replace(os.sep, '/'))
diff --git a/tests/test_pycode.py b/tests/test_pycode.py
new file mode 100644
index 000000000..710e11341
--- /dev/null
+++ b/tests/test_pycode.py
@@ -0,0 +1,90 @@
+# -*- coding: utf-8 -*-
+"""
+ test_pycode
+ ~~~~~~~~~~~
+
+ Test pycode.
+
+ :copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from sphinx.pycode import ModuleAnalyzer
+
+
+def test_ModuleAnalyzer_find_tags():
+ code = ('class Foo(object):\n' # line: 1
+ ' """class Foo!"""\n'
+ ' def __init__(self):\n'
+ ' pass\n'
+ '\n'
+ ' def bar(self, arg1, arg2=True, *args, **kwargs):\n'
+ ' """method Foo.bar"""\n'
+ ' pass\n'
+ '\n'
+ ' class Baz(object):\n'
+ ' def __init__(self):\n' # line: 11
+ ' pass\n'
+ '\n'
+ 'def qux():\n'
+ ' """function baz"""\n'
+ ' pass\n'
+ '\n'
+ '@decorator\n'
+ 'def quux():\n'
+ ' pass\n')
+ analyzer = ModuleAnalyzer.for_string(code, 'module')
+ tags = analyzer.find_tags()
+ assert set(tags.keys()) == {'Foo', 'Foo.__init__', 'Foo.bar',
+ 'Foo.Baz', 'Foo.Baz.__init__', 'qux', 'quux'}
+ assert tags['Foo'] == ('class', 1, 13) # type, start, end
+ assert tags['Foo.__init__'] == ('def', 3, 5)
+ assert tags['Foo.bar'] == ('def', 6, 9)
+ assert tags['Foo.Baz'] == ('class', 10, 13)
+ assert tags['Foo.Baz.__init__'] == ('def', 11, 13)
+ assert tags['qux'] == ('def', 14, 17)
+ assert tags['quux'] == ('def', 18, 21) # decorator
+
+
+def test_ModuleAnalyzer_find_attr_docs():
+ code = ('class Foo(object):\n'
+ ' """class Foo!"""\n'
+ ' #: comment before attr1\n'
+ ' attr1 = None\n'
+ ' attr2 = None # attribute comment for attr2 (without colon)\n'
+ ' attr3 = None #: attribute comment for attr3\n'
+ ' attr4 = None #: long attribute comment\n'
+ ' #: for attr4\n'
+ ' #: comment before attr5\n'
+ ' attr5 = None #: attribute comment for attr5\n'
+ ' attr6, attr7 = 1, 2 #: this comment is ignored\n'
+ '\n'
+ ' def __init__(self):\n'
+ ' self.attr8 = None #: first attribute comment (ignored)\n'
+ ' self.attr8 = None #: attribute comment for attr8\n'
+ ' #: comment before attr9\n'
+ ' self.attr9 = None #: comment after attr9\n'
+ ' "string after attr9"\n'
+ '\n'
+ ' def bar(self, arg1, arg2=True, *args, **kwargs):\n'
+ ' """method Foo.bar"""\n'
+ ' pass\n'
+ '\n'
+ 'def baz():\n'
+ ' """function baz"""\n'
+ ' pass\n')
+ analyzer = ModuleAnalyzer.for_string(code, 'module')
+ docs = analyzer.find_attr_docs()
+ assert set(docs) == {('Foo', 'attr1'),
+ ('Foo', 'attr3'),
+ ('Foo', 'attr4'),
+ ('Foo', 'attr5'),
+ ('Foo', 'attr8'),
+ ('Foo', 'attr9')}
+ assert docs[('Foo', 'attr1')] == ['comment before attr1', '']
+ assert docs[('Foo', 'attr3')] == ['attribute comment for attr3', '']
+ assert docs[('Foo', 'attr4')] == ['long attribute comment', '']
+ assert docs[('Foo', 'attr4')] == ['long attribute comment', '']
+ assert docs[('Foo', 'attr5')] == ['attribute comment for attr5', '']
+ assert docs[('Foo', 'attr8')] == ['attribute comment for attr8', '']
+ assert docs[('Foo', 'attr9')] == ['string after attr9', '']
diff --git a/tests/test_setup_command.py b/tests/test_setup_command.py
index 1eb7c8fc1..70c6b796f 100644
--- a/tests/test_setup_command.py
+++ b/tests/test_setup_command.py
@@ -64,6 +64,24 @@ def test_build_sphinx(setup_command):
assert proc.returncode == 0
+@pytest.mark.setup_command('-b', 'html,man')
+def test_build_sphinx_multiple_builders(setup_command):
+ proc = setup_command.proc
+ out, err = proc.communicate()
+ print(out)
+ print(err)
+ assert proc.returncode == 0
+
+
+@pytest.mark.setup_command('-b', 'html,bar')
+def test_build_sphinx_multiple_invalid_builders(setup_command):
+ proc = setup_command.proc
+ out, err = proc.communicate()
+ print(out)
+ print(err)
+ assert proc.returncode == 1
+
+
@pytest.fixture
def nonascii_srcdir(request, setup_command):
mb_name = u'\u65e5\u672c\u8a9e'
diff --git a/tests/test_util.py b/tests/test_util.py
index 18c2bcf77..0a481db2a 100644
--- a/tests/test_util.py
+++ b/tests/test_util.py
@@ -10,11 +10,15 @@
"""
import pytest
+from mock import patch
+from sphinx.util import logging
from sphinx.util import (
- encode_uri, parselinenos, split_docinfo
+ display_chunk, encode_uri, parselinenos, split_docinfo, status_iterator
)
+from util import strip_escseq
+
def test_encode_uri():
expected = (u'https://ru.wikipedia.org/wiki/%D0%A1%D0%B8%D1%81%D1%82%D0%B5%D0%BC%D0%B0_'
@@ -53,6 +57,47 @@ def test_splitdocinfo():
assert content == '\nHello world.\n'
+def test_display_chunk():
+ assert display_chunk('hello') == 'hello'
+ assert display_chunk(['hello']) == 'hello'
+ assert display_chunk(['hello', 'sphinx', 'world']) == 'hello .. world'
+ assert display_chunk(('hello',)) == 'hello'
+ assert display_chunk(('hello', 'sphinx', 'world')) == 'hello .. world'
+
+
+@pytest.mark.sphinx('dummy')
+@patch('sphinx.util.console._tw', 40) # terminal width = 40
+def test_status_iterator(app, status, warning):
+ logging.setup(app, status, warning)
+
+ # test for old_status_iterator
+ status.truncate(0)
+ yields = list(status_iterator(['hello', 'sphinx', 'world'], 'testing ... '))
+ output = strip_escseq(status.getvalue())
+ assert 'testing ... hello sphinx world \n' in output
+ assert yields == ['hello', 'sphinx', 'world']
+
+ # test for status_iterator (verbosity=0)
+ status.truncate(0)
+ yields = list(status_iterator(['hello', 'sphinx', 'world'], 'testing ... ',
+ length=3, verbosity=0))
+ output = strip_escseq(status.getvalue())
+ assert 'testing ... [ 33%] hello \r' in output
+ assert 'testing ... [ 66%] sphinx \r' in output
+ assert 'testing ... [100%] world \r\n' in output
+ assert yields == ['hello', 'sphinx', 'world']
+
+ # test for status_iterator (verbosity=1)
+ status.truncate(0)
+ yields = list(status_iterator(['hello', 'sphinx', 'world'], 'testing ... ',
+ length=3, verbosity=1))
+ output = strip_escseq(status.getvalue())
+ assert 'testing ... [ 33%] hello\n' in output
+ assert 'testing ... [ 66%] sphinx\n' in output
+ assert 'testing ... [100%] world\n\n' in output
+ assert yields == ['hello', 'sphinx', 'world']
+
+
def test_parselinenos():
assert parselinenos('1,2,3', 10) == [0, 1, 2]
assert parselinenos('4, 5, 6', 10) == [3, 4, 5]
@@ -60,9 +105,13 @@ def test_parselinenos():
assert parselinenos('7-9', 10) == [6, 7, 8]
assert parselinenos('7-', 10) == [6, 7, 8, 9]
assert parselinenos('1,7-', 10) == [0, 6, 7, 8, 9]
+ assert parselinenos('7-7', 10) == [6]
+ assert parselinenos('11-', 10) == [10]
with pytest.raises(ValueError):
parselinenos('1-2-3', 10)
with pytest.raises(ValueError):
parselinenos('abc-def', 10)
with pytest.raises(ValueError):
parselinenos('-', 10)
+ with pytest.raises(ValueError):
+ parselinenos('3-1', 10)
diff --git a/tests/test_util_i18n.py b/tests/test_util_i18n.py
index 6f82eda8c..a155afe35 100644
--- a/tests/test_util_i18n.py
+++ b/tests/test_util_i18n.py
@@ -51,7 +51,7 @@ def test_catalog_outdated(tempdir):
def test_catalog_write_mo(tempdir):
(tempdir / 'test.po').write_text('#')
cat = i18n.CatalogInfo(tempdir, 'test', 'utf-8')
- cat.write_mo('en', lambda *a, **kw: None)
+ cat.write_mo('en')
assert os.path.exists(cat.mo_path)
with open(cat.mo_path, 'rb') as f:
assert read_mo(f) is not None
@@ -159,15 +159,6 @@ def test_get_catalogs_with_compact(tempdir):
def test_format_date():
date = datetime.date(2016, 2, 7)
- # default format
- format = None
- assert i18n.format_date(format, date=date) == 'Feb 7, 2016'
- assert i18n.format_date(format, date=date, language='') == 'Feb 7, 2016'
- assert i18n.format_date(format, date=date, language='unknown') == 'Feb 7, 2016'
- assert i18n.format_date(format, date=date, language='en') == 'Feb 7, 2016'
- assert i18n.format_date(format, date=date, language='ja') == '2016/02/07'
- assert i18n.format_date(format, date=date, language='de') == '07.02.2016'
-
# strftime format
format = '%B %d, %Y'
assert i18n.format_date(format, date=date) == 'February 07, 2016'
@@ -177,15 +168,6 @@ def test_format_date():
assert i18n.format_date(format, date=date, language='ja') == u'2月 07, 2016'
assert i18n.format_date(format, date=date, language='de') == 'Februar 07, 2016'
- # LDML format
- format = 'MMM dd, YYYY'
- assert i18n.format_date(format, date=date) == 'Feb 07, 2016'
- assert i18n.format_date(format, date=date, language='') == 'Feb 07, 2016'
- assert i18n.format_date(format, date=date, language='unknown') == 'Feb 07, 2016'
- assert i18n.format_date(format, date=date, language='en') == 'Feb 07, 2016'
- assert i18n.format_date(format, date=date, language='ja') == u'2月 07, 2016'
- assert i18n.format_date(format, date=date, language='de') == 'Feb. 07, 2016'
-
# raw string
format = 'Mon Mar 28 12:37:08 2016, commit 4367aef'
assert i18n.format_date(format, date=date) == format
diff --git a/tests/test_util_inventory.py b/tests/test_util_inventory.py
new file mode 100644
index 000000000..6114ef513
--- /dev/null
+++ b/tests/test_util_inventory.py
@@ -0,0 +1,67 @@
+# -*- coding: utf-8 -*-
+"""
+ test_util_inventory
+ ~~~~~~~~~~~~~~~~~~~
+
+ Test inventory util functions.
+
+ :copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import zlib
+import posixpath
+
+from six import BytesIO
+
+from sphinx.ext.intersphinx import InventoryFile
+
+inventory_v1 = '''\
+# Sphinx inventory version 1
+# Project: foo
+# Version: 1.0
+module mod foo.html
+module.cls class foo.html
+'''.encode('utf-8')
+
+inventory_v2 = '''\
+# Sphinx inventory version 2
+# Project: foo
+# Version: 2.0
+# The remainder of this file is compressed with zlib.
+'''.encode('utf-8') + zlib.compress('''\
+module1 py:module 0 foo.html#module-module1 Long Module desc
+module2 py:module 0 foo.html#module-$ -
+module1.func py:function 1 sub/foo.html#$ -
+CFunc c:function 2 cfunc.html#CFunc -
+a term std:term -1 glossary.html#term-a-term -
+docname std:doc -1 docname.html -
+a term including:colon std:term -1 glossary.html#term-a-term-including-colon -
+'''.encode('utf-8'))
+
+
+def test_read_inventory_v1():
+ f = BytesIO(inventory_v1)
+ invdata = InventoryFile.load(f, '/util', posixpath.join)
+ assert invdata['py:module']['module'] == \
+ ('foo', '1.0', '/util/foo.html#module-module', '-')
+ assert invdata['py:class']['module.cls'] == \
+ ('foo', '1.0', '/util/foo.html#module.cls', '-')
+
+
+def test_read_inventory_v2():
+ f = BytesIO(inventory_v2)
+ invdata = InventoryFile.load(f, '/util', posixpath.join)
+
+ assert len(invdata['py:module']) == 2
+ assert invdata['py:module']['module1'] == \
+ ('foo', '2.0', '/util/foo.html#module-module1', 'Long Module desc')
+ assert invdata['py:module']['module2'] == \
+ ('foo', '2.0', '/util/foo.html#module-module2', '-')
+ assert invdata['py:function']['module1.func'][2] == \
+ '/util/sub/foo.html#module1.func'
+ assert invdata['c:function']['CFunc'][2] == '/util/cfunc.html#CFunc'
+ assert invdata['std:term']['a term'][2] == \
+ '/util/glossary.html#term-a-term'
+ assert invdata['std:term']['a term including:colon'][2] == \
+ '/util/glossary.html#term-a-term-including-colon'
diff --git a/tests/test_util_logging.py b/tests/test_util_logging.py
index d8ab27b47..ca46b8328 100644
--- a/tests/test_util_logging.py
+++ b/tests/test_util_logging.py
@@ -10,7 +10,96 @@
"""
from __future__ import print_function
+import codecs
+from docutils import nodes
+
+from sphinx.errors import SphinxWarning
+from sphinx.util import logging
+from sphinx.util.console import colorize
from sphinx.util.logging import is_suppressed_warning
+from sphinx.util.parallel import ParallelTasks
+
+import pytest
+from util import strip_escseq
+
+
+def test_info_and_warning(app, status, warning):
+ app.verbosity = 2
+ logging.setup(app, status, warning)
+ logger = logging.getLogger(__name__)
+
+ logger.debug('message1')
+ logger.info('message2')
+ logger.warning('message3')
+ logger.critical('message4')
+ logger.error('message5')
+
+ assert 'message1' in status.getvalue()
+ assert 'message2' in status.getvalue()
+ assert 'message3' not in status.getvalue()
+ assert 'message4' not in status.getvalue()
+ assert 'message5' not in status.getvalue()
+
+ assert 'message1' not in warning.getvalue()
+ assert 'message2' not in warning.getvalue()
+ assert 'message3' in warning.getvalue()
+ assert 'message4' in warning.getvalue()
+ assert 'message5' in warning.getvalue()
+
+
+def test_verbosity_filter(app, status, warning):
+ # verbosity = 0: INFO
+ app.verbosity = 0
+ logging.setup(app, status, warning)
+ logger = logging.getLogger(__name__)
+
+ logger.info('message1')
+ logger.verbose('message2')
+ logger.debug('message3')
+
+ assert 'message1' in status.getvalue()
+ assert 'message2' not in status.getvalue()
+ assert 'message3' not in status.getvalue()
+ assert 'message4' not in status.getvalue()
+
+ # verbosity = 1: VERBOSE
+ app.verbosity = 1
+ logging.setup(app, status, warning)
+ logger = logging.getLogger(__name__)
+
+ logger.info('message1')
+ logger.verbose('message2')
+ logger.debug('message3')
+
+ assert 'message1' in status.getvalue()
+ assert 'message2' in status.getvalue()
+ assert 'message3' not in status.getvalue()
+ assert 'message4' not in status.getvalue()
+
+ # verbosity = 2: DEBUG
+ app.verbosity = 2
+ logging.setup(app, status, warning)
+ logger = logging.getLogger(__name__)
+
+ logger.info('message1')
+ logger.verbose('message2')
+ logger.debug('message3')
+
+ assert 'message1' in status.getvalue()
+ assert 'message2' in status.getvalue()
+ assert 'message3' in status.getvalue()
+ assert 'message4' not in status.getvalue()
+
+
+def test_nonl_info_log(app, status, warning):
+ logging.setup(app, status, warning)
+ logger = logging.getLogger(__name__)
+
+ logger.info('message1', nonl=True)
+ logger.info('message2')
+ logger.info('message3')
+
+ assert 'message1message2\nmessage3' in status.getvalue()
def test_is_suppressed_warning():
@@ -24,3 +113,159 @@ def test_is_suppressed_warning():
assert is_suppressed_warning("files", "stylesheet", suppress_warnings) is True
assert is_suppressed_warning("rest", "syntax", suppress_warnings) is False
assert is_suppressed_warning("rest", "duplicated_labels", suppress_warnings) is True
+
+
+def test_suppress_warnings(app, status, warning):
+ logging.setup(app, status, warning)
+ logger = logging.getLogger(__name__)
+
+ app._warncount = 0 # force reset
+
+ app.config.suppress_warnings = []
+ warning.truncate(0)
+ logger.warning('message1', type='test', subtype='logging')
+ logger.warning('message2', type='test', subtype='crash')
+ logger.warning('message3', type='actual', subtype='logging')
+ assert 'message1' in warning.getvalue()
+ assert 'message2' in warning.getvalue()
+ assert 'message3' in warning.getvalue()
+ assert app._warncount == 3
+
+ app.config.suppress_warnings = ['test']
+ warning.truncate(0)
+ logger.warning('message1', type='test', subtype='logging')
+ logger.warning('message2', type='test', subtype='crash')
+ logger.warning('message3', type='actual', subtype='logging')
+ assert 'message1' not in warning.getvalue()
+ assert 'message2' not in warning.getvalue()
+ assert 'message3' in warning.getvalue()
+ assert app._warncount == 4
+
+ app.config.suppress_warnings = ['test.logging']
+ warning.truncate(0)
+ logger.warning('message1', type='test', subtype='logging')
+ logger.warning('message2', type='test', subtype='crash')
+ logger.warning('message3', type='actual', subtype='logging')
+ assert 'message1' not in warning.getvalue()
+ assert 'message2' in warning.getvalue()
+ assert 'message3' in warning.getvalue()
+ assert app._warncount == 6
+
+
+def test_warningiserror(app, status, warning):
+ logging.setup(app, status, warning)
+ logger = logging.getLogger(__name__)
+
+ # if False, warning is not error
+ app.warningiserror = False
+ logger.warning('message')
+
+ # if True, warning raises SphinxWarning exception
+ app.warningiserror = True
+ with pytest.raises(SphinxWarning):
+ logger.warning('message')
+
+
+def test_warning_location(app, status, warning):
+ logging.setup(app, status, warning)
+ logger = logging.getLogger(__name__)
+
+ logger.warning('message1', location='index')
+ assert 'index.txt: WARNING: message1' in warning.getvalue()
+
+ logger.warning('message2', location=('index', 10))
+ assert 'index.txt:10: WARNING: message2' in warning.getvalue()
+
+ logger.warning('message3', location=None)
+ assert colorize('darkred', 'WARNING: message3') in warning.getvalue()
+
+ node = nodes.Node()
+ node.source, node.line = ('index.txt', 10)
+ logger.warning('message4', location=node)
+ assert 'index.txt:10: WARNING: message4' in warning.getvalue()
+
+ node.source, node.line = ('index.txt', None)
+ logger.warning('message5', location=node)
+ assert 'index.txt:: WARNING: message5' in warning.getvalue()
+
+ node.source, node.line = (None, 10)
+ logger.warning('message6', location=node)
+ assert '<unknown>:10: WARNING: message6' in warning.getvalue()
+
+ node.source, node.line = (None, None)
+ logger.warning('message7', location=node)
+ assert colorize('darkred', 'WARNING: message7') in warning.getvalue()
+
+
+def test_pending_warnings(app, status, warning):
+ logging.setup(app, status, warning)
+ logger = logging.getLogger(__name__)
+
+ logger.warning('message1')
+ with logging.pending_warnings():
+ # not logged yet (bufferred) in here
+ logger.warning('message2')
+ logger.warning('message3')
+ assert 'WARNING: message1' in warning.getvalue()
+ assert 'WARNING: message2' not in warning.getvalue()
+ assert 'WARNING: message3' not in warning.getvalue()
+
+ # actually logged as ordered
+ assert 'WARNING: message2\nWARNING: message3' in strip_escseq(warning.getvalue())
+
+
+def test_colored_logs(app, status, warning):
+ app.verbosity = 2
+ logging.setup(app, status, warning)
+ logger = logging.getLogger(__name__)
+
+ # default colors
+ logger.debug('message1')
+ logger.verbose('message2')
+ logger.info('message3')
+ logger.warning('message4')
+ logger.critical('message5')
+ logger.error('message6')
+
+ assert colorize('darkgray', 'message1') in status.getvalue()
+ assert 'message2\n' in status.getvalue() # not colored
+ assert 'message3\n' in status.getvalue() # not colored
+ assert colorize('darkred', 'WARNING: message4') in warning.getvalue()
+ assert 'WARNING: message5\n' in warning.getvalue() # not colored
+ assert 'WARNING: message6\n' in warning.getvalue() # not colored
+
+ # color specification
+ logger.debug('message7', color='white')
+ logger.info('message8', color='red')
+ assert colorize('white', 'message7') in status.getvalue()
+ assert colorize('red', 'message8') in status.getvalue()
+
+
+def test_logging_in_ParallelTasks(app, status, warning):
+ logging.setup(app, status, warning)
+ logger = logging.getLogger(__name__)
+
+ def child_process():
+ logger.info('message1')
+ logger.warning('message2', location='index')
+
+ tasks = ParallelTasks(1)
+ tasks.add_task(child_process)
+ tasks.join()
+ assert 'message1' in status.getvalue()
+ assert 'index.txt: WARNING: message2' in warning.getvalue()
+
+
+def test_output_with_unencodable_char(app, status, warning):
+ class StreamWriter(codecs.StreamWriter):
+ def write(self, object):
+ self.stream.write(object.encode('cp1252').decode('cp1252'))
+
+ logging.setup(app, StreamWriter(status), warning)
+ logger = logging.getLogger(__name__)
+
+ # info with UnicodeEncodeError
+ status.truncate(0)
+ status.seek(0)
+ logger.info(u"unicode \u206d...")
+ assert status.getvalue() == "unicode ?...\n"
diff --git a/tests/test_util_rst.py b/tests/test_util_rst.py
index 695263fad..5fce6e3eb 100644
--- a/tests/test_util_rst.py
+++ b/tests/test_util_rst.py
@@ -12,5 +12,5 @@ from sphinx.util.rst import escape
def test_escape():
- assert escape(':ref:`id`') == '\:ref\:\`id\`'
- assert escape('footnote [#]_') == 'footnote \[\#\]\_'
+ assert escape(':ref:`id`') == r'\:ref\:\`id\`'
+ assert escape('footnote [#]_') == r'footnote \[\#\]\_'
diff --git a/tox.ini b/tox.ini
index b3f084a4d..1d138d49f 100644
--- a/tox.ini
+++ b/tox.ini
@@ -47,6 +47,20 @@ deps=
{[testenv]deps}
[testenv:py35]
+deps=
+ mypy
+ typed_ast
+ {[testenv]deps}
commands=
{envpython} -Wall tests/run.py {posargs}
sphinx-build -q -W -b html -d {envtmpdir}/doctrees doc {envtmpdir}/html
+
+[testenv:mypy]
+deps=
+ mypy
+commands=
+ mypy sphinx/
+
+[testenv:docs]
+commands=
+ python setup.py build_sphinx
diff --git a/utils/bump_version.py b/utils/bump_version.py
index 617c33c0c..95db65ea7 100755
--- a/utils/bump_version.py
+++ b/utils/bump_version.py
@@ -29,9 +29,9 @@ def bump_version(path, version_info):
with open(path, 'r+') as f:
body = f.read()
- body = re.sub("(?<=__version__ = ')[^']+", version, body)
- body = re.sub("(?<=__released__ = ')[^']+", release, body)
- body = re.sub("(?<=version_info = )\(.*\)", str(version_info), body)
+ body = re.sub(r"(?<=__version__ = ')[^']+", version, body)
+ body = re.sub(r"(?<=__released__ = ')[^']+", release, body)
+ body = re.sub(r"(?<=version_info = )\(.*\)", str(version_info), body)
f.seek(0)
f.truncate(0)
@@ -39,23 +39,23 @@ def bump_version(path, version_info):
def parse_version(version):
- matched = re.search('^(\d+)\.(\d+)$', version)
+ matched = re.search(r'^(\d+)\.(\d+)$', version)
if matched:
major, minor = matched.groups()
return (int(major), int(minor), 0, 'final', 0)
- matched = re.search('^(\d+)\.(\d+)\.(\d+)$', version)
+ matched = re.search(r'^(\d+)\.(\d+)\.(\d+)$', version)
if matched:
major, minor, rev = matched.groups()
return (int(major), int(minor), int(rev), 'final', 0)
- matched = re.search('^(\d+)\.(\d+)\s*(a|b|alpha|beta)(\d+)$', version)
+ matched = re.search(r'^(\d+)\.(\d+)\s*(a|b|alpha|beta)(\d+)$', version)
if matched:
major, minor, typ, relver = matched.groups()
release = RELEASE_TYPE.get(typ, typ)
return (int(major), int(minor), 0, release, int(relver))
- matched = re.search('^(\d+)\.(\d+)\.(\d+)\s*(a|b|alpha|beta)(\d+)$', version)
+ matched = re.search(r'^(\d+)\.(\d+)\.(\d+)\s*(a|b|alpha|beta)(\d+)$', version)
if matched:
major, minor, rev, typ, relver = matched.groups()
release = RELEASE_TYPE.get(typ, typ)
@@ -90,7 +90,7 @@ class Changes(object):
def fetch_version(self):
with open(self.path) as f:
version = f.readline().strip()
- matched = re.search('^Release (.*) \((.*)\)$', version)
+ matched = re.search(r'^Release (.*) \((.*)\)$', version)
if matched is None:
raise RuntimeError('Unknown CHANGES format: %s' % version)
diff --git a/utils/check_sources.py b/utils/check_sources.py
index 06b4a21dc..3895ee1d6 100755
--- a/utils/check_sources.py
+++ b/utils/check_sources.py
@@ -46,6 +46,7 @@ copyright_2_re = re.compile(r'^ %s(, %s)*[,.]$' %
(name_mail_re, name_mail_re))
not_ix_re = re.compile(r'\bnot\s+\S+?\s+i[sn]\s\S+')
is_const_re = re.compile(r'if.*?==\s+(None|False|True)\b')
+noqa_re = re.compile(r'#\s+NOQA\s*$', re.I)
misspellings = ["developement", "adress", # ALLOW-MISSPELLING
"verificate", "informations"] # ALLOW-MISSPELLING
@@ -81,6 +82,8 @@ def check_syntax(fn, lines):
@checker('.py')
def check_style(fn, lines):
for lno, line in enumerate(lines):
+ if noqa_re.search(line):
+ continue
if len(line.rstrip('\n')) > 95:
yield lno + 1, "line too long"
if line.strip().startswith('#'):