summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.appveyor.yml14
-rw-r--r--.circleci/config.yml2
-rw-r--r--.gitignore4
-rw-r--r--.travis.yml66
-rw-r--r--AUTHORS2
-rw-r--r--CHANGES109
-rw-r--r--CONTRIBUTING.rst89
-rw-r--r--EXAMPLES453
-rw-r--r--MANIFEST.in3
-rw-r--r--Makefile70
-rw-r--r--README.rst133
-rw-r--r--doc/Makefile2
-rw-r--r--doc/_static/conf.py.txt11
-rw-r--r--doc/_themes/sphinx13/layout.html2
-rw-r--r--doc/builders.rst20
-rw-r--r--doc/conf.py2
-rw-r--r--doc/config.rst83
-rw-r--r--doc/contents.rst2
-rw-r--r--doc/domains.rst72
-rw-r--r--doc/ext/autodoc.rst18
-rw-r--r--doc/ext/autosummary.rst5
-rw-r--r--doc/ext/inheritance.rst10
-rw-r--r--doc/ext/math.rst19
-rw-r--r--doc/extdev/markupapi.rst8
-rw-r--r--doc/invocation.rst547
-rw-r--r--doc/latex.rst38
-rw-r--r--doc/man/index.rst22
-rw-r--r--doc/man/sphinx-apidoc.rst141
-rw-r--r--doc/man/sphinx-autogen.rst93
-rw-r--r--doc/man/sphinx-build.rst351
-rw-r--r--doc/man/sphinx-quickstart.rst160
-rw-r--r--doc/markup/code.rst2
-rw-r--r--doc/markup/inline.rst11
-rw-r--r--doc/setuptools.rst20
-rw-r--r--doc/theming.rst19
-rw-r--r--doc/tutorial.rst10
-rw-r--r--mypy.ini7
-rw-r--r--setup.cfg39
-rw-r--r--setup.py73
-rwxr-xr-xsphinx-apidoc.py15
-rwxr-xr-xsphinx-autogen.py15
-rwxr-xr-xsphinx-build.py15
-rwxr-xr-xsphinx-quickstart.py15
-rw-r--r--sphinx/__init__.py82
-rw-r--r--sphinx/__main__.py2
-rw-r--r--sphinx/apidoc.py435
-rw-r--r--sphinx/application.py132
-rw-r--r--sphinx/builders/__init__.py15
-rw-r--r--sphinx/builders/_epub_base.py5
-rw-r--r--sphinx/builders/changes.py2
-rw-r--r--sphinx/builders/epub2.py100
-rw-r--r--sphinx/builders/epub3.py31
-rw-r--r--sphinx/builders/html.py21
-rw-r--r--sphinx/builders/latex.py26
-rw-r--r--sphinx/builders/qthelp.py8
-rw-r--r--sphinx/cmd/__init__.py10
-rw-r--r--sphinx/cmd/build.py42
-rw-r--r--sphinx/cmd/quickstart.py668
-rw-r--r--sphinx/cmdline.py307
-rw-r--r--sphinx/config.py9
-rw-r--r--sphinx/deprecation.py6
-rw-r--r--sphinx/directives/code.py17
-rw-r--r--sphinx/domains/__init__.py29
-rw-r--r--sphinx/domains/cpp.py2583
-rw-r--r--sphinx/domains/python.py8
-rw-r--r--sphinx/environment/__init__.py178
-rw-r--r--sphinx/ext/apidoc.py444
-rw-r--r--sphinx/ext/autodoc/__init__.py (renamed from sphinx/ext/autodoc.py)394
-rw-r--r--sphinx/ext/autodoc/importer.py146
-rw-r--r--sphinx/ext/autodoc/inspector.py184
-rw-r--r--sphinx/ext/autosummary/__init__.py7
-rw-r--r--sphinx/ext/autosummary/generate.py86
-rw-r--r--sphinx/ext/imgmath.py4
-rw-r--r--sphinx/ext/inheritance_diagram.py41
-rw-r--r--sphinx/ext/intersphinx.py2
-rw-r--r--sphinx/ext/jsmath.py4
-rw-r--r--sphinx/ext/mathbase.py62
-rw-r--r--sphinx/ext/mathjax.py4
-rw-r--r--sphinx/ext/pngmath.py4
-rw-r--r--sphinx/ext/viewcode.py9
-rw-r--r--sphinx/extension.py2
-rw-r--r--sphinx/io.py246
-rw-r--r--sphinx/locale/__init__.py24
-rw-r--r--sphinx/make_mode.py3
-rw-r--r--sphinx/parsers.py25
-rw-r--r--sphinx/pycode/Grammar-py2.txt135
-rw-r--r--sphinx/pycode/Grammar-py3.txt143
-rw-r--r--sphinx/pycode/__init__.py321
-rw-r--r--sphinx/pycode/nodes.py212
-rw-r--r--sphinx/pycode/parser.py471
-rw-r--r--sphinx/pycode/pgen2/__init__.py4
-rw-r--r--sphinx/pycode/pgen2/driver.py154
-rw-r--r--sphinx/pycode/pgen2/grammar.py178
-rw-r--r--sphinx/pycode/pgen2/literals.py100
-rw-r--r--sphinx/pycode/pgen2/parse.c4544
-rw-r--r--sphinx/pycode/pgen2/parse.py206
-rw-r--r--sphinx/pycode/pgen2/parse.pyx165
-rw-r--r--sphinx/pycode/pgen2/pgen.py403
-rwxr-xr-xsphinx/pycode/pgen2/token.py86
-rw-r--r--sphinx/pycode/pgen2/tokenize.py441
-rw-r--r--sphinx/quickstart.py715
-rw-r--r--sphinx/registry.py148
-rw-r--r--sphinx/search/__init__.py2
-rw-r--r--sphinx/search/zh.py6
-rw-r--r--sphinx/templates/epub2/container.xml6
-rw-r--r--sphinx/templates/epub2/content.opf_t37
-rw-r--r--sphinx/templates/epub2/mimetype1
-rw-r--r--sphinx/templates/epub2/toc.ncx_t15
-rw-r--r--sphinx/templates/quickstart/Makefile_t7
-rw-r--r--sphinx/templates/quickstart/conf.py_t47
-rw-r--r--sphinx/templates/quickstart/make.bat_t9
-rw-r--r--sphinx/testing/util.py162
-rw-r--r--sphinx/texinputs/footnotehyper-sphinx.sty5
-rw-r--r--sphinx/texinputs/sphinx.sty447
-rw-r--r--sphinx/texinputs/sphinxhowto.cls1
-rw-r--r--sphinx/themes/basic/layout.html7
-rw-r--r--sphinx/themes/basic/static/websupport.js2
-rw-r--r--sphinx/themes/basic/theme.conf1
-rw-r--r--sphinx/themes/bizstyle/static/css3-mediaqueries_src.js2
-rw-r--r--sphinx/theming.py5
-rw-r--r--sphinx/transforms/i18n.py9
-rw-r--r--sphinx/transforms/post_transforms/__init__.py3
-rw-r--r--sphinx/transforms/post_transforms/images.py4
-rw-r--r--sphinx/util/__init__.py10
-rw-r--r--sphinx/util/compat.py48
-rw-r--r--sphinx/util/docutils.py28
-rw-r--r--sphinx/util/inspect.py339
-rw-r--r--sphinx/util/nodes.py26
-rw-r--r--sphinx/util/requests.py2
-rw-r--r--sphinx/util/rst.py29
-rw-r--r--sphinx/util/stemmer/__init__.py2
-rw-r--r--sphinx/versioning.py32
-rw-r--r--sphinx/writers/html.py17
-rw-r--r--sphinx/writers/html5.py17
-rw-r--r--sphinx/writers/latex.py83
-rw-r--r--test-reqs.txt20
-rw-r--r--tests/conftest.py42
-rw-r--r--tests/roots/test-basic/index.rst3
-rw-r--r--tests/roots/test-domain-cpp/index.rst6
-rw-r--r--tests/roots/test-domain-py/module.rst2
-rw-r--r--tests/roots/test-ext-autodoc/target/__init__.py225
-rw-r--r--tests/roots/test-ext-autosummary/autosummary_importfail.py4
-rw-r--r--tests/roots/test-ext-autosummary/contents.rst5
-rw-r--r--tests/roots/test-ext-inheritance_diagram/index.rst2
-rw-r--r--tests/roots/test-ext-intersphinx-cppdomain/index.rst2
-rw-r--r--tests/roots/test-ext-math/index.rst2
-rw-r--r--tests/roots/test-ext-math/page.rst9
-rw-r--r--tests/roots/test-extensions/conf.py4
-rw-r--r--tests/roots/test-extensions/read_parallel.py4
-rw-r--r--tests/roots/test-extensions/read_serial.py4
-rw-r--r--tests/roots/test-extensions/write_parallel.py4
-rw-r--r--tests/roots/test-extensions/write_serial.py4
-rw-r--r--tests/roots/test-latex-numfig/conf.py12
-rw-r--r--tests/roots/test-latex-numfig/index.rst9
-rw-r--r--tests/roots/test-latex-numfig/indexhowto.rst10
-rw-r--r--tests/roots/test-latex-numfig/indexmanual.rst13
-rw-r--r--tests/roots/test-latex-table/expects/gridtable.tex8
-rw-r--r--tests/roots/test-latex-table/expects/longtable.tex12
-rw-r--r--tests/roots/test-latex-table/expects/longtable_having_align.tex12
-rw-r--r--tests/roots/test-latex-table/expects/longtable_having_caption.tex12
-rw-r--r--tests/roots/test-latex-table/expects/longtable_having_problematic_cell.tex12
-rw-r--r--tests/roots/test-latex-table/expects/longtable_having_stub_columns_and_problematic_cell.tex28
-rw-r--r--tests/roots/test-latex-table/expects/longtable_having_verbatim.tex12
-rw-r--r--tests/roots/test-latex-table/expects/longtable_having_widths.tex12
-rw-r--r--tests/roots/test-latex-table/expects/longtable_having_widths_and_problematic_cell.tex12
-rw-r--r--tests/roots/test-latex-table/expects/longtable_with_tabularcolumn.tex12
-rw-r--r--tests/roots/test-latex-table/expects/simple_table.tex6
-rw-r--r--tests/roots/test-latex-table/expects/table_having_caption.tex6
-rw-r--r--tests/roots/test-latex-table/expects/table_having_problematic_cell.tex6
-rw-r--r--tests/roots/test-latex-table/expects/table_having_stub_columns_and_problematic_cell.tex20
-rw-r--r--tests/roots/test-latex-table/expects/table_having_verbatim.tex6
-rw-r--r--tests/roots/test-latex-table/expects/table_having_widths.tex6
-rw-r--r--tests/roots/test-latex-table/expects/table_having_widths_and_problematic_cell.tex6
-rw-r--r--tests/roots/test-latex-table/expects/tabular_having_widths.tex6
-rw-r--r--tests/roots/test-latex-table/expects/tabularcolumn.tex6
-rw-r--r--tests/roots/test-latex-table/expects/tabulary_having_widths.tex6
-rw-r--r--tests/roots/test-root/autodoc.txt4
-rw-r--r--tests/roots/test-root/autodoc_target.py225
-rw-r--r--tests/roots/test-root/conf.py3
-rw-r--r--tests/roots/test-theming/test_theme/test-theme/theme.conf1
-rwxr-xr-xtests/run.py68
-rw-r--r--tests/test_application.py36
-rw-r--r--tests/test_autodoc.py460
-rw-r--r--tests/test_build.py7
-rw-r--r--tests/test_build_epub.py2
-rw-r--r--tests/test_build_html.py25
-rw-r--r--tests/test_build_html5.py11
-rw-r--r--tests/test_build_latex.py72
-rw-r--r--tests/test_build_qthelp.py28
-rw-r--r--tests/test_build_texinfo.py4
-rw-r--r--tests/test_build_text.py5
-rw-r--r--tests/test_config.py12
-rw-r--r--tests/test_directive_only.py1
-rw-r--r--tests/test_docutilsconf.py5
-rw-r--r--tests/test_domain_cpp.py617
-rw-r--r--tests/test_domain_py.py11
-rw-r--r--tests/test_environment.py2
-rw-r--r--tests/test_ext_apidoc.py (renamed from tests/test_apidoc.py)16
-rw-r--r--tests/test_ext_autosummary.py26
-rw-r--r--tests/test_ext_coverage.py16
-rw-r--r--tests/test_ext_graphviz.py2
-rw-r--r--tests/test_ext_inheritance_diagram.py28
-rw-r--r--tests/test_ext_intersphinx.py8
-rw-r--r--tests/test_ext_math.py99
-rw-r--r--tests/test_ext_todo.py4
-rw-r--r--tests/test_intl.py1
-rw-r--r--tests/test_io.py118
-rw-r--r--tests/test_pycode.py88
-rw-r--r--tests/test_pycode_parser.py301
-rw-r--r--tests/test_quickstart.py74
-rw-r--r--tests/test_theming.py18
-rw-r--r--tests/test_util.py26
-rw-r--r--tests/test_util_docstrings.py65
-rw-r--r--tests/test_util_images.py16
-rw-r--r--tests/test_util_inspect.py428
-rw-r--r--tests/test_util_inventory.py2
-rw-r--r--tests/test_versioning.py2
-rw-r--r--tox.ini93
-rw-r--r--utils/__init__.py0
-rwxr-xr-xutils/check_sources.py259
-rw-r--r--utils/checks.py111
-rw-r--r--utils/jssplitter_generator.py4
-rwxr-xr-xutils/reindent.py320
-rw-r--r--utils/release-checklist1
224 files changed, 9738 insertions, 13615 deletions
diff --git a/.appveyor.yml b/.appveyor.yml
index 3012267c2..d2c5d0d95 100644
--- a/.appveyor.yml
+++ b/.appveyor.yml
@@ -6,20 +6,20 @@ environment:
matrix:
- PYTHON: 27
- DOCUTILS: 0.12
+ DOCUTILS: 0.13.1
TEST_IGNORE: --ignore py35
- PYTHON: 27
- DOCUTILS: 0.13.1
+ DOCUTILS: 0.14
TEST_IGNORE: --ignore py35
- PYTHON: 36
- DOCUTILS: 0.13.1
+ DOCUTILS: 0.14
- PYTHON: 36-x64
- DOCUTILS: 0.13.1
+ DOCUTILS: 0.14
install:
- C:\Python%PYTHON%\python.exe -m pip install -U pip setuptools
- - C:\Python%PYTHON%\python.exe -m pip install docutils==%DOCUTILS%
- - C:\Python%PYTHON%\python.exe -m pip install -r test-reqs.txt
+ - C:\Python%PYTHON%\python.exe -m pip install docutils==%DOCUTILS% mock
+ - C:\Python%PYTHON%\python.exe -m pip install .[test,websupport]
# No automatic build, just run python tests
build: off
@@ -39,7 +39,7 @@ test_script:
if (-not $test_ignore) { $test_ignore = '' }
$tests = $env:TEST
if (-not $tests) { $tests = '' }
- & "C:\Python$($env:PYTHON)\python.exe" run.py $test_ignore.Split(' ') --junitxml .junit.xml $tests.Split(' ')
+ & "C:\Python$($env:PYTHON)\python.exe" -m pytest $test_ignore.Split(' ') --junitxml .junit.xml $tests.Split(' ')
Pop-Location
if ($LastExitCode -eq 1) { Write-Host "Test Failures Occurred, leaving for test result parsing" }
elseif ($LastExitCode -ne 0) { Write-Host "Other Error Occurred, aborting"; exit $LastExitCode }
diff --git a/.circleci/config.yml b/.circleci/config.yml
index 1bbcb4884..f4d4415f1 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -6,4 +6,6 @@ jobs:
working_directory: /sphinx
steps:
- checkout
+ - run: /python3.4/bin/pip install -U pip setuptools
+ - run: /python3.4/bin/pip install -U .[test,websupport]
- run: make test PYTHON=/python3.4/bin/python
diff --git a/.gitignore b/.gitignore
index 8ba227c7a..5d1026c5e 100644
--- a/.gitignore
+++ b/.gitignore
@@ -5,11 +5,15 @@
.dir-locals.el
.cache/
+.idea
.mypy_cache/
.ropeproject/
TAGS
.tags
.tox
+.venv
+.coverage
+htmlcov
.DS_Store
sphinx/pycode/Grammar*pickle
distribute-*
diff --git a/.travis.yml b/.travis.yml
index c78db3d4e..2bd437436 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -1,48 +1,50 @@
language: python
sudo: false
dist: trusty
-cache:
- directories:
- - $HOME/.cache/pip
-python:
- - "pypy-5.4.1"
- - "3.6"
- - "3.5"
- - "3.4"
- - "2.7"
- - "nightly"
+cache: pip
+
env:
global:
- - TEST='-v --durations 25'
- PYTHONFAULTHANDLER=x
- PYTHONWARNINGS=all
- SKIP_LATEX_BUILD=1
- matrix:
- - DOCUTILS=0.12
- - DOCUTILS=0.13.1
+
matrix:
- exclude:
- - python: "3.4"
- env: DOCUTILS=0.12
- - python: "3.5"
- env: DOCUTILS=0.12
- - python: "3.6"
- env: DOCUTILS=0.12
- - python: nightly
- env: DOCUTILS=0.12
- - python: "pypy-5.4.1"
- env: DOCUTILS=0.12
+ include:
+ - python: 'pypy'
+ env: TOXENV=pypy
+ - python: '2.7'
+ env:
+ - TOXENV=du13
+ - PYTEST_ADDOPTS = --cov sphinx --cov-append --cov-config setup.cfg
+ - python: '3.4'
+ env: TOXENV=py34
+ - python: '3.5'
+ env: TOXENV=py35
+ - python: '3.6'
+ env:
+ - TOXENV=py36
+ - PYTEST_ADDOPTS = --cov sphinx --cov-append --cov-config setup.cfg
+ - python: 'nightly'
+ env: TOXENV=py37
+ - python: '3.6'
+ env: TOXENV=docs
+ - python: '3.6'
+ env: TOXENV=mypy
+ - python: '2.7'
+ env: TOXENV=flake8
+
addons:
apt:
packages:
- graphviz
- imagemagick
+
install:
- - pip install -U pip setuptools
- - pip install docutils==$DOCUTILS
- - pip install -r test-reqs.txt
- - if [[ $TRAVIS_PYTHON_VERSION == '3.6' ]]; then python3.6 -m pip install mypy typed-ast; fi
+ - pip install -U tox codecov
+
script:
- - flake8
- - if [[ $TRAVIS_PYTHON_VERSION == '3.6' ]]; then make style-check type-check test-async; fi
- - if [[ $TRAVIS_PYTHON_VERSION != '3.6' ]]; then make test; fi
+ - tox -- -v
+
+after_success:
+ - codecov
diff --git a/AUTHORS b/AUTHORS
index 13ce2df9f..f4ce16164 100644
--- a/AUTHORS
+++ b/AUTHORS
@@ -58,7 +58,7 @@ Other contributors, listed alphabetically, are:
* Stefan Seefeld -- toctree improvements
* Gregory Szorc -- performance improvements
* Taku Shimizu -- epub3 builder
-* Antonio Valentino -- qthelp builder
+* Antonio Valentino -- qthelp builder, docstring inheritance
* Filip Vavera -- napoleon todo directive
* Pauli Virtanen -- autodoc improvements, autosummary extension
* Stefan van der Walt -- autosummary extension
diff --git a/CHANGES b/CHANGES
index 6d70f8e5f..bd6a92728 100644
--- a/CHANGES
+++ b/CHANGES
@@ -1,3 +1,112 @@
+Release 1.7 (in development)
+============================
+
+Incompatible changes
+--------------------
+
+* #3668: The arguments has changed of main functions for each command
+* #3893: Unknown html_theme_options throw warnings instead of errors
+* #3927: Python parameter/variable types should match classes, not all objects
+* #3962: sphinx-apidoc now recognizes given directory as an implicit namespace
+ package when ``--implicit-namespaces`` option given, not subdirectories of
+ given directory.
+* #3929: apidoc: Move sphinx.apidoc to sphinx.ext.apidoc
+* #4226: apidoc: Generate new style makefile (make-mode)
+* #4274: sphinx-build returns 2 as an exit code on argument error
+
+Deprecated
+----------
+
+* using a string value for :confval:`html_sidebars` is deprecated and only list
+ values will be accepted at 2.0.
+* ``format_annotation()`` and ``formatargspec()`` is deprecated. Please use
+ ``sphinx.util.inspect.Signature`` instead.
+
+Features added
+--------------
+
+* C++, handle ``decltype(auto)``.
+* #2406: C++, add proper parsing of expressions, including linking of identifiers.
+* C++, add a ``cpp:expr`` role for inserting inline C++ expressions or types.
+* C++, support explicit member instantiations with shorthand ``template`` prefix.
+* C++, make function parameters linkable, like template params.
+* #3638: Allow to change a label of reference to equation using
+ ``math_eqref_format``
+
+* Now :confval:`suppress_warnings` accepts following configurations:
+
+ - ``ref.python`` (ref: #3866)
+* #3872: Add latex key to configure literal blocks caption position in PDF
+ output (refs #3792, #1723)
+* In case of missing docstring try to retrieve doc from base classes (ref: #3140)
+* #4023: Clarify error message when any role has more than one target.
+* #3973: epub: allow to override build date
+* #3972: epub: Sort manifest entries by filename
+* #4052: viewcode: Sort before highlighting module code
+* #1448: qthelp: Add new config value; :confval:`qthelp_namespace`
+* #4140: html themes: Make body tag inheritable
+* #4168: improve zh search with jieba
+* HTML themes can set up default sidebars through ``theme.conf``
+* #3160: html: Use ``<kdb>`` to represent ``:kbd:`` role
+* #4212: autosummary: catch all exceptions when importing modules
+* #4166: Add :confval:`math_numfig` for equation numbering by section (refs:
+ #3991, #4080). Thanks to Oliver Jahn.
+* #4311: Let LaTeX obey :confval:`numfig_secnum_depth` for figures, tables, and
+ code-blocks
+* #947: autodoc now supports ignore-module-all to ignore a module's ``__all__``
+* #4332: Let LaTeX obey :confval:`math_numfig` for equation numbering
+
+
+Features removed
+----------------
+
+* Configuration variables
+
+ - html_use_smartypants
+ - latex_keep_old_macro_names
+ - latex_elements['footer']
+
+* utility methods of ``sphinx.application.Sphinx`` class
+
+ - buildername (property)
+ - _display_chunk()
+ - old_status_iterator()
+ - status_iterator()
+ - _directive_helper()
+
+* utility methods of ``sphinx.environment.BuildEnvironment`` class
+
+ - currmodule (property)
+ - currclass (property)
+
+* epub2 builder
+* prefix and colorfunc parameter for warn()
+* ``sphinx.util.compat`` module
+* ``sphinx.util.nodes.process_only_nodes()``
+* LaTeX environment ``notice``, use ``sphinxadmonition`` instead
+* LaTeX ``\sphinxstylethead``, use ``\sphinxstyletheadfamily``
+* C++, support of function concepts. Thanks to mickk-on-cpp.
+
+
+Bugs fixed
+----------
+
+* #3882: Update the order of files for HTMLHelp and QTHelp
+* #3962: sphinx-apidoc does not recognize implicit namespace packages correctly
+* #4094: C++, allow empty template argument lists.
+* C++, also hyperlink types in the name of declarations with qualified names.
+* C++, do not add index entries for declarations inside concepts.
+* C++, support the template disambiguator for dependent names.
+* #4314: For PDF 'howto' documents, numbering of code-blocks differs from the
+ one of figures and tables
+* #4330: PDF 'howto' documents have an incoherent default LaTeX tocdepth counter
+ setting
+
+Testing
+--------
+
+* Add support for docutils 0.14
+
Release 1.6.6 (in development)
==============================
diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst
index 4438e2838..03d26c001 100644
--- a/CONTRIBUTING.rst
+++ b/CONTRIBUTING.rst
@@ -33,10 +33,10 @@ Bug Reports and Feature Requests
If you have encountered a problem with Sphinx or have an idea for a new
feature, please submit it to the `issue tracker`_ on GitHub or discuss it
-on the sphinx-dev mailing list.
+on the `sphinx-dev`_ mailing list.
For bug reports, please include the output produced during the build process
-and also the log file Sphinx creates after it encounters an un-handled
+and also the log file Sphinx creates after it encounters an unhandled
exception. The location of this file should be shown towards the end of the
error message.
@@ -45,6 +45,7 @@ issue. If possible, try to create a minimal project that produces the error
and post that instead.
.. _`issue tracker`: https://github.com/sphinx-doc/sphinx/issues
+.. _`sphinx-dev`: mailto:sphinx-dev@googlegroups.com
Contributing to Sphinx
@@ -58,7 +59,7 @@ of the core developers before it is merged into the main repository.
#. Check for open issues or open a fresh issue to start a discussion around a
feature idea or a bug.
#. If you feel uncomfortable or uncertain about an issue or your changes, feel
- free to email sphinx-dev@googlegroups.com.
+ free to email the *sphinx-dev* mailing list.
#. Fork `the repository`_ on GitHub to start making your changes to the
**master** branch for next major version, or **stable** branch for next
minor version.
@@ -98,10 +99,14 @@ These are the basic steps needed to start developing on Sphinx.
For new features or other substantial changes that should wait until the
next major release, use the ``master`` branch.
-#. Optional: setup a virtual environment. ::
+#. Setup a virtual environment.
- virtualenv ~/sphinxenv
- . ~/sphinxenv/bin/activate
+ This is not necessary for unit testing, thanks to ``tox``, but it is
+ necessary if you wish to run ``sphinx-build`` locally or run unit tests
+ without the help of ``tox``. ::
+
+ virtualenv ~/.venv
+ . ~/.venv/bin/activate
pip install -e .
#. Create a new working branch. Choose any name you like. ::
@@ -112,40 +117,53 @@ These are the basic steps needed to start developing on Sphinx.
For tips on working with the code, see the `Coding Guide`_.
-#. Test, test, test. Possible steps:
+#. Test, test, test.
+
+ Testing is best done through ``tox``, which provides a number of targets and
+ allows testing against multiple different Python environments:
+
+ * To list all possible targets::
+
+ tox -av
+
+ * To run unit tests for a specific Python version, such as 3.6::
- * Run the unit tests::
+ tox -e py36
- pip install -r test-reqs.txt
- make test
+ * To run unit tests for a specific Python version and turn on deprecation
+ warnings on so they're shown in the test output::
- * Again, it's useful to turn on deprecation warnings on so they're shown in
- the test output::
+ PYTHONWARNINGS=all tox -e py36
- PYTHONWARNINGS=all make test
+ * To run code style and type checks::
- * Build the documentation and check the output for different builders::
+ tox -e mypy
+ tox -e flake8
- cd doc
- make clean html latexpdf
+ * Arguments to ``pytest`` can be passed via ``tox``, e.g. in order to run a
+ particular test::
- * Run code style checks and type checks (type checks require mypy)::
+ tox -e py36 tests/test_module.py::test_new_feature
- make style-check
- make type-check
+ * To build the documentation::
- * Run the unit tests under different Python environments using
- :program:`tox`::
+ tox -e docs
- pip install tox
- tox -v
+ * To build the documentation in multiple formats::
- * Add a new unit test in the ``tests`` directory if you can.
+ tox -e docs -- -b html,latexpdf
+
+ You can also test by installing dependencies in your local environment. ::
+
+ pip install .[test]
+
+ New unit tests should be included in the ``tests`` directory where
+ necessary:
* For bug fixes, first add a test that fails without your changes and passes
after they are applied.
- * Tests that need a sphinx-build run should be integrated in one of the
+ * Tests that need a ``sphinx-build`` run should be integrated in one of the
existing test modules if possible. New tests that to ``@with_app`` and
then ``build_all`` for a few assertions are not good since *the test suite
should not take more than a minute to run*.
@@ -262,7 +280,7 @@ Debugging Tips
code by running the command ``make clean`` or using the
:option:`sphinx-build -E` option.
-* Use the :option:`sphinx-build -P` option to run Pdb on exceptions.
+* Use the :option:`sphinx-build -P` option to run ``pdb`` on exceptions.
* Use ``node.pformat()`` and ``node.asdom().toxml()`` to generate a printable
representation of the document structure.
@@ -299,14 +317,17 @@ There are a couple reasons that code in Sphinx might be deprecated:
no longer needs to support the older version of Python that doesn't include
the library, the library will be deprecated in Sphinx.
-As the :ref:`deprecation-policy` describes,
-the first release of Sphinx that deprecates a feature (``A.B``) should raise a
-``RemovedInSphinxXXWarning`` (where XX is the Sphinx version where the feature
-will be removed) when the deprecated feature is invoked. Assuming we have good
-test coverage, these warnings are converted to errors when running the test
-suite with warnings enabled: ``python -Wall tests/run.py``. Thus, when adding
-a ``RemovedInSphinxXXWarning`` you need to eliminate or silence any warnings
-generated when running the tests.
+As the :ref:`deprecation-policy` describes, the first release of Sphinx that
+deprecates a feature (``A.B``) should raise a ``RemovedInSphinxXXWarning``
+(where ``XX`` is the Sphinx version where the feature will be removed) when the
+deprecated feature is invoked. Assuming we have good test coverage, these
+warnings are converted to errors when running the test suite with warnings
+enabled::
+
+ pytest -Wall
+
+Thus, when adding a ``RemovedInSphinxXXWarning`` you need to eliminate or
+silence any warnings generated when running the tests.
.. _deprecation-policy:
diff --git a/EXAMPLES b/EXAMPLES
index 9d3b24311..6bf6d0e31 100644
--- a/EXAMPLES
+++ b/EXAMPLES
@@ -12,227 +12,384 @@ interesting examples.
Documentation using the alabaster theme
---------------------------------------
+* Alabaster: https://alabaster.readthedocs.io/
+* Blinker: https://pythonhosted.org/blinker/
+* Calibre: https://manual.calibre-ebook.com/
+* Click: http://click.pocoo.org/ (customized)
+* coala: https://docs.coala.io/ (customized)
* CodePy: https://documen.tician.de/codepy/
+* Fabric: http://docs.fabfile.org/
+* Fityk: http://fityk.nieto.pl/
+* Flask: http://flask.pocoo.org/docs/
+* Flask-OpenID: https://pythonhosted.org/Flask-OpenID/
+* Invoke: http://docs.pyinvoke.org/
+* Jinja: http://jinja.pocoo.org/docs/
+* Lino: http://www.lino-framework.org/ (customized)
+* marbl: https://getmarbl.readthedocs.io/
+* MDAnalysis: http://www.mdanalysis.org/docs/ (customized)
* MeshPy: https://documen.tician.de/meshpy/
-* PyCuda: https://documen.tician.de/pycuda/
+* PyCUDA: https://documen.tician.de/pycuda/
+* PyOpenCL: https://documen.tician.de/pyopencl/
* PyLangAcq: http://pylangacq.org/
+* pytest: https://docs.pytest.org/ (customized)
+* python-apt: https://apt.alioth.debian.org/python-apt-doc/
+* PyVisfile: https://documen.tician.de/pyvisfile/
+* Requests: http://www.python-requests.org/
+* searx: https://asciimoo.github.io/searx/
+* Tablib: http://docs.python-tablib.org/
+* urllib3: https://urllib3.readthedocs.io/ (customized)
+* Werkzeug: http://werkzeug.pocoo.org/docs/ (customized)
Documentation using the classic theme
-------------------------------------
+* Advanced Generic Widgets: http://xoomer.virgilio.it/infinity77/AGW_Docs/ (customized)
+* Apache CouchDB: http://docs.couchdb.org/ (customized)
* APSW: https://rogerbinns.github.io/apsw/
-* Calibre: http://manual.calibre-ebook.com/
-* Cython: http://docs.cython.org/
+* Arb: http://arblib.org/
+* Bazaar: http://doc.bazaar.canonical.com/ (customized)
+* Beautiful Soup: https://www.crummy.com/software/BeautifulSoup/bs4/doc/
+* Blender: https://docs.blender.org/api/current/
+* Bugzilla: https://bugzilla.readthedocs.io/
+* Buildbot: https://docs.buildbot.net/latest/
+* CMake: https://cmake.org/documentation/ (customized)
+* Chaco: http://docs.enthought.com/chaco/ (customized)
* Cormoran: http://cormoran.nhopkg.org/docs/
-* Director: http://pythonhosted.org/director/
+* DEAP: https://deap.readthedocs.io/ (customized)
+* Director: https://pythonhosted.org/director/
+* EZ-Draw: https://pageperso.lif.univ-mrs.fr/~edouard.thiel/ez-draw/doc/en/html/ez-manual.html (customized)
* F2py: http://f2py.sourceforge.net/docs/
-* Genomedata:
- http://noble.gs.washington.edu/proj/genomedata/doc/1.2.2/genomedata.html
+* Generic Mapping Tools (GMT): http://gmt.soest.hawaii.edu/doc/latest/ (customized)
+* Genomedata: https://noble.gs.washington.edu/proj/genomedata/doc/1.3.3/
+* GetFEM++: http://getfem.org/ (customized)
+* Glasgow Haskell Compiler: https://downloads.haskell.org/~ghc/latest/docs/html/users_guide/ (customized)
+* Grok: http://grok.zope.org/doc/current/ (customized)
+* GROMACS: http://manual.gromacs.org/documentation/
* GSL Shell: http://www.nongnu.org/gsl-shell/
-* Hands-on Python Tutorial:
- http://anh.cs.luc.edu/python/hands-on/3.1/handsonHtml/
-* Hedge: https://documen.tician.de/hedge/
+* Hands-on Python Tutorial: http://anh.cs.luc.edu/python/hands-on/3.1/handsonHtml/
+* Kaa: http://api.freevo.org/kaa-base/ (customized)
* Leo: http://leoeditor.com/
-* Lino: http://www.lino-framework.org/
+* LEPL: http://www.acooke.org/lepl/ (customized)
+* Mayavi: http://docs.enthought.com/mayavi/mayavi/ (customized)
+* MediaGoblin: https://mediagoblin.readthedocs.io/ (customized)
* mpmath: http://mpmath.org/doc/current/
+* OpenCV: http://docs.opencv.org/ (customized)
* OpenEXR: http://excamera.com/articles/26/doc/index.html
* OpenGDA: http://www.opengda.org/gdadoc/html/
-* Pioneers and Prominent Men of Utah: http://pioneers.rstebbing.com/
-* PyCantonese: http://pycantonese.org/
+* Peach^3: https://peach3.nl/doc/latest/userdoc/ (customized)
+* Plone: https://docs.plone.org/ (customized)
+* PyEMD: https://pyemd.readthedocs.io/
* Pyevolve: http://pyevolve.sourceforge.net/
-* PyMQI: http://pythonhosted.org/pymqi/
-* pySPACE: http://pyspace.github.io/pyspace/
-* Python: https://docs.python.org/3/
-* python-apt: http://apt.alioth.debian.org/python-apt-doc/
-* PyUblas: https://documen.tician.de/pyublas/
-* Ring programming language: http://ring-lang.sourceforge.net/doc/index.html
-* Scapy: http://www.secdev.org/projects/scapy/doc/
+* Pygame: https://www.pygame.org/docs/ (customized)
+* PyMQI: https://pythonhosted.org/pymqi/
+* PyQt4: http://pyqt.sourceforge.net/Docs/PyQt4/ (customized)
+* PyQt5: http://pyqt.sourceforge.net/Docs/PyQt5/ (customized)
+* Python 2: https://docs.python.org/2/
+* Python 3: https://docs.python.org/3/ (customized)
+* Python Packaging Authority: https://www.pypa.io/ (customized)
+* Ring programming language: http://ring-lang.sourceforge.net/doc/ (customized)
+* SageMath: https://doc.sagemath.org/ (customized)
* Segway: http://noble.gs.washington.edu/proj/segway/doc/1.1.0/segway.html
+* simuPOP: http://simupop.sourceforge.net/manual_release/build/userGuide.html (customized)
+* Sprox: http://sprox.org/ (customized)
* SymPy: http://docs.sympy.org/
-* WTForms: http://wtforms.simplecodes.com/docs/
+* TurboGears: https://turbogears.readthedocs.org/ (customized)
+* tvtk: http://docs.enthought.com/mayavi/tvtk/
+* Varnish: https://www.varnish-cache.org/docs/ (customized, alabaster for index)
+* Waf: https://waf.io/apidocs/
+* wxPython Phoenix: https://wxpython.org/Phoenix/docs/html/main.html (customized)
* z3c: http://www.ibiblio.org/paulcarduner/z3ctutorial/
-
-
-Documentation using a customized version of the classic theme
--------------------------------------------------------------
-
-* Advanced Generic Widgets:
- http://xoomer.virgilio.it/infinity77/AGW_Docs/index.html
-* Arb: http://fredrikj.net/arb/
-* Bazaar: http://doc.bazaar.canonical.com/en/
-* CakePHP: http://book.cakephp.org/2.0/en/index.html
-* Chaco: http://docs.enthought.com/chaco/
-* Chef: https://docs.chef.io/index.html
-* EZ-Draw: http://pageperso.lif.univ-mrs.fr/~edouard.thiel/ez-draw/doc/en/html/ez-manual.html
-* Google or-tools:
- https://or-tools.googlecode.com/svn/trunk/documentation/user_manual/index.html
-* GPAW: https://wiki.fysik.dtu.dk/gpaw/
-* Grok: http://grok.zope.org/doc/current/
-* Kaa: http://api.freevo.org/kaa-base/
-* LEPL: http://www.acooke.org/lepl/
-* Mayavi: http://docs.enthought.com/mayavi/mayavi/
-* NICOS: http://trac.frm2.tum.de/nicos/doc/nicos-master/index.html
-* NOC: http://redmine.nocproject.org/projects/noc
-* NumPy: http://docs.scipy.org/doc/numpy/reference/
-* OpenCV: http://docs.opencv.org/
-* Peach^3: http://peach3.nl/doc/latest/userdoc/
-* Pygame: http://www.pygame.org/docs/
-* Sage: http://www.sagemath.org/doc/
-* SciPy: http://docs.scipy.org/doc/scipy/reference/
-* simuPOP: http://simupop.sourceforge.net/manual_release/build/userGuide.html
-* Sprox: http://sprox.org/
-* TurboGears: http://turbogears.readthedocs.org/en/latest/
-* Varnish: https://www.varnish-cache.org/docs/
-* Zentyal: http://doc.zentyal.org/
-* Zope: http://docs.zope.org/zope2/index.html
-* zc.async: http://pythonhosted.org/zc.async/1.5.0/
-
+* zc.async: https://pythonhosted.org/zc.async/ (customized)
+* Zope: https://docs.zope.org/zope2/ (customized)
Documentation using the sphinxdoc theme
---------------------------------------
-* Fityk: http://fityk.nieto.pl/
-* MapServer: http://mapserver.org/
-* Matplotlib: http://matplotlib.org/
-* Music21: http://web.mit.edu/music21/doc/index.html
-* NetworkX: http://networkx.github.io/
-* Pweave: http://mpastell.com/pweave/
+* cartopy: http://scitools.org.uk/cartopy/docs/latest/
+* Jython: http://www.jython.org/docs/
+* Matplotlib: https://matplotlib.org/
+* MDAnalysis Tutorial: http://www.mdanalysis.org/MDAnalysisTutorial/
+* NetworkX: https://networkx.github.io/
+* PyCantonese: http://pycantonese.org/
* Pyre: http://docs.danse.us/pyre/sphinx/
+* pySPACE: https://pyspace.github.io/pyspace/
* Pysparse: http://pysparse.sourceforge.net/
* PyTango:
- http://www.esrf.eu/computing/cs/tango/tango_doc/kernel_doc/pytango/latest/index.html
-* Python Wild Magic: http://vmlaker.github.io/pythonwildmagic/
-* Reteisi: http://www.reteisi.org/contents.html
-* Sqlkit: http://sqlkit.argolinux.org/
+ http://www.esrf.eu/computing/cs/tango/tango_doc/kernel_doc/pytango/latest/
+* Python Wild Magic: https://vmlaker.github.io/pythonwildmagic/ (customized)
+* Reteisi: http://www.reteisi.org/contents.html (customized)
+* Sqlkit: http://sqlkit.argolinux.org/ (customized)
* Turbulenz: http://docs.turbulenz.com/
-* WebFaction: https://docs.webfaction.com/
+Documentation using the nature theme
+------------------------------------
+
+* Alembic: http://alembic.zzzcomputing.com/
+* Cython: http://docs.cython.org/
+* easybuild: https://easybuild.readthedocs.io/
+* jsFiddle: http://doc.jsfiddle.net/
+* libLAS: https://www.liblas.org/ (customized)
+* Lmod: https://lmod.readthedocs.io/
+* MapServer: http://mapserver.org/ (customized)
+* Pandas: https://pandas.pydata.org/pandas-docs/stable/
+* pyglet: https://pyglet.readthedocs.io/ (customized)
+* Setuptools: https://setuptools.readthedocs.io/
+* Spring Python: https://docs.spring.io/spring-python/1.2.x/sphinx/html/
+* StatsModels: http://www.statsmodels.org/ (customized)
+* Sylli: http://sylli.sourceforge.net/
Documentation using another builtin theme
-----------------------------------------
* Arcade: http://arcade.academy/ (sphinx_rtd_theme)
-* ASE: https://wiki.fysik.dtu.dk/ase/ (sphinx_rtd_theme)
-* C/C++ Development with Eclipse: http://eclipsebook.in/ (agogo)
-* ESWP3 (http://eswp3.org) (sphinx_rtd_theme)
-* Jinja: http://jinja.pocoo.org/ (scrolls)
-* jsFiddle: http://doc.jsfiddle.net/ (nature)
-* libLAS: http://www.liblas.org/ (nature)
-* Linguistica: http://linguistica-uchicago.github.io/lxa5/ (sphinx_rtd_theme)
-* MoinMoin: https://moin-20.readthedocs.io/en/latest/ (sphinx_rtd_theme)
-* MPipe: http://vmlaker.github.io/mpipe/ (sphinx13)
-* Paver: http://paver.readthedocs.io/en/latest/
-* pip: https://pip.pypa.io/en/latest/ (sphinx_rtd_theme)
+* Breathe: https://breathe.readthedocs.io/ (haiku)
+* MPipe: https://vmlaker.github.io/mpipe/ (sphinx13)
+* NLTK: http://www.nltk.org/ (agogo)
* Programmieren mit PyGTK und Glade (German):
- http://www.florian-diesch.de/doc/python-und-glade/online/ (agogo)
-* PyPubSub: http://pypubsub.readthedocs.io/ (bizstyle)
+ http://www.florian-diesch.de/doc/python-und-glade/online/ (agogo, customized)
+* PyPubSub: https://pypubsub.readthedocs.io/ (bizstyle)
+* Pylons: http://docs.pylonsproject.org/projects/pylons-webframework/ (pyramid)
* Pyramid web framework:
- http://docs.pylonsproject.org/projects/pyramid/en/latest/ (pyramid)
+ https://docs.pylonsproject.org/projects/pyramid/ (pyramid)
+* Sphinx: http://www.sphinx-doc.org/ (sphinx13) :-)
+* Valence: http://docs.valence.desire2learn.com/ (haiku, customized)
+
+Documentation using sphinx_rtd_theme
+------------------------------------
+
+* Annotator: http://docs.annotatorjs.org/
+* Ansible: https://docs.ansible.com/ (customized)
+* ASE: https://wiki.fysik.dtu.dk/ase/
+* Autofac: http://docs.autofac.org/
+* BigchainDB: https://docs.bigchaindb.com/
+* Blocks: https://blocks.readthedocs.io/
+* bootstrap-datepicker: https://bootstrap-datepicker.readthedocs.io/
+* Certbot: https://letsencrypt.readthedocs.io/
+* Chainer: https://docs.chainer.org/ (customized)
+* CherryPy: http://docs.cherrypy.org/
+* Chainer: https://docs.chainer.org/
+* CodeIgniter: https://www.codeigniter.com/user_guide/
+* Conda: https://conda.io/docs/
+* Corda: https://docs.corda.net/
+* Dask: https://dask.pydata.org/
+* Databricks: https://docs.databricks.com/ (customized)
+* Dataiku DSS: https://doc.dataiku.com/
+* edX: http://docs.edx.org/
+* Electrum: http://docs.electrum.org/
+* Elemental: http://libelemental.org/documentation/dev/
+* ESWP3: https://eswp3.readthedocs.io/
+* Ethereum Homestead: http://www.ethdocs.org/
+* Fidimag: https://fidimag.readthedocs.io/
+* Flake8: http://flake8.pycqa.org/
+* GeoNode: http://docs.geonode.org/
+* Godot: https://godot.readthedocs.io/
+* Graylog: http://docs.graylog.org/
+* GPAW: https://wiki.fysik.dtu.dk/gpaw/ (customized)
+* HDF5 for Python (h5py): http://docs.h5py.org/
+* Hyperledger Fabric: https://hyperledger-fabric.readthedocs.io/
+* Hyperledger Sawtooth: https://intelledger.github.io/
+* IdentityServer: http://docs.identityserver.io/
+* Idris: http://docs.idris-lang.org/
+* javasphinx: https://bronto-javasphinx.readthedocs.io/
+* Julia: https://julia.readthedocs.io/
+* Jupyter Notebook: https://jupyter-notebook.readthedocs.io/
+* Lasagne: https://lasagne.readthedocs.io/
+* Linguistica: https://linguistica-uchicago.github.io/lxa5/
+* Linux kernel: https://www.kernel.org/doc/html/latest/index.html
+* MathJax: https://docs.mathjax.org/
+* MDTraj: http://mdtraj.org/latest/ (customized)
+* MICrobial Community Analysis (micca): http://micca.org/docs/latest/
+* MicroPython: https://docs.micropython.org/
+* Minds: https://www.minds.org/docs/ (customized)
+* Mink: http://mink.behat.org/
+* Mockery: http://docs.mockery.io/
+* mod_wsgi: https://modwsgi.readthedocs.io/
+* MoinMoin: https://moin-20.readthedocs.io/
+* Mopidy: https://docs.mopidy.com/
+* MyHDL: http://docs.myhdl.org/
+* Nextflow: https://www.nextflow.io/docs/latest/index.html
+* NICOS: https://forge.frm2.tum.de/nicos/doc/nicos-master/ (customized)
+* Pelican: http://docs.getpelican.com/
+* picamera: https://picamera.readthedocs.io/
+* Pillow: https://pillow.readthedocs.io/
+* pip: https://pip.pypa.io/
+* Paver: https://paver.readthedocs.io/
+* peewee: http://docs.peewee-orm.com/
+* Phinx: http://docs.phinx.org/
+* phpMyAdmin: https://docs.phpmyadmin.net/
+* Pweave: http://mpastell.com/pweave/
+* PyPy: http://doc.pypy.org/
+* python-sqlparse: https://sqlparse.readthedocs.io/
+* PyVISA: https://pyvisa.readthedocs.io/
+* Read The Docs: https://docs.readthedocs.io/
+* Free your information from their silos (French):
+ http://redaction-technique.org/ (customized)
+* Releases Sphinx extension: https://releases.readthedocs.io/
+* Qtile: http://docs.qtile.org/
* Quex: http://quex.sourceforge.net/doc/html/main.html
-* Satchmo: http://docs.satchmoproject.com/en/latest/ (sphinx_rtd_theme)
-* Setuptools: https://setuptools.readthedocs.io/en/latest/ (nature)
-* SimPy: http://simpy.readthedocs.org/en/latest/
-* Spring Python: http://docs.spring.io/spring-python/1.2.x/sphinx/html/ (nature)
-* sqlparse: https://sqlparse.readthedocs.io/en/latest/ (sphinx_rtd_theme)
-* Sylli: http://sylli.sourceforge.net/ (nature)
-* Tuleap Open ALM: https://tuleap.net/doc/en/ (nature)
-* Valence: http://docs.valence.desire2learn.com/ (haiku)
+* Satchmo: http://docs.satchmoproject.com/
+* Scapy: https://scapy.readthedocs.io/
+* SimPy: http://simpy.readthedocs.io/
+* SlamData: http://docs.slamdata.com/
+* Solidity: https://solidity.readthedocs.io/
+* Sonos Controller (SoCo): http://docs.python-soco.com/
+* Sphinx AutoAPI: https://sphinx-autoapi.readthedocs.io/
+* sphinx-argparse: https://sphinx-argparse.readthedocs.io/
+* Sphinx-Gallery: https://sphinx-gallery.readthedocs.io/ (customized)
+* StarUML: http://docs.staruml.io/
+* Sublime Text Unofficial Documentation: http://docs.sublimetext.info/
+* SunPy: http://docs.sunpy.org/
+* Sylius: http://docs.sylius.org/
+* Tango Controls: https://tango-controls.readthedocs.io/ (customized)
+* Topshelf: http://docs.topshelf-project.com/
+* Theano: http://www.deeplearning.net/software/theano/
+* ThreatConnect: https://docs.threatconnect.com/
+* Tuleap: https://tuleap.net/doc/en/
+* TYPO3: https://docs.typo3.org/ (customized)
+* uWSGI: https://uwsgi-docs.readthedocs.io/
+* Wagtail: http://docs.wagtail.io/
+* Web Application Attack and Audit Framework (w3af): http://docs.w3af.org/
+* Weblate: https://docs.weblate.org/
+* x265: https://x265.readthedocs.io/
+* ZeroNet: https://zeronet.readthedocs.io/
+Documentation using sphinx_bootstrap_theme
+------------------------------------------
-Documentation using a custom theme/integrated in a site
--------------------------------------------------------
+* Bootstrap Theme: https://ryan-roemer.github.io/sphinx-bootstrap-theme/
+* C/C++ Software Development with Eclipse: http://eclipsebook.in/
+* Dataverse: http://guides.dataverse.org/
+* e-cidadania: http://e-cidadania.readthedocs.org/
+* Hangfire: http://docs.hangfire.io/
+* Hedge: https://documen.tician.de/hedge/
+* ObsPy: https://docs.obspy.org/
+* Open Dylan: https://opendylan.org/documentation/
+* Pootle: http://docs.translatehouse.org/projects/pootle/
+* PyUblas: https://documen.tician.de/pyublas/
+* seaborn: https://seaborn.pydata.org/
+
+Documentation using a custom theme or integrated in a website
+-------------------------------------------------------------
-* Blender: https://www.blender.org/api/250PythonDoc/
-* Blinker: http://discorporate.us/projects/Blinker/docs/
+* Apache Cassandra: https://cassandra.apache.org/doc/
+* Astropy: http://docs.astropy.org/
+* Bokeh: https://bokeh.pydata.org/
+* Boto 3: https://boto3.readthedocs.io/
+* CakePHP: https://book.cakephp.org/
+* CasperJS: http://docs.casperjs.org/
* Ceph: http://docs.ceph.com/docs/master/
-* Classy: http://www.pocoo.org/projects/classy/
-* DEAP: http://deap.gel.ulaval.ca/doc/0.8/index.html
+* Chef: https://docs.chef.io/
+* CKAN: http://docs.ckan.org/
+* Confluent Platform: http://docs.confluent.io/
* Django: https://docs.djangoproject.com/
-* Elemental: http://libelemental.org/documentation/dev/index.html
+* Doctrine: http://docs.doctrine-project.org/
* Enterprise Toolkit for Acrobat products:
- http://www.adobe.com/devnet-docs/acrobatetk/
-* e-cidadania: http://e-cidadania.readthedocs.org/en/latest/
-* Flask: http://flask.pocoo.org/docs/
-* Flask-OpenID: http://pythonhosted.org/Flask-OpenID/
+ https://www.adobe.com/devnet-docs/acrobatetk/
* Gameduino: http://excamera.com/sphinx/gameduino/
+* gensim: https://radimrehurek.com/gensim/
* GeoServer: http://docs.geoserver.org/
* gevent: http://www.gevent.org/
* GHC - Glasgow Haskell Compiler: http://downloads.haskell.org/~ghc/master/users-guide/
+* Guzzle: http://docs.guzzlephp.org/en/stable/
+* H2O.ai: http://docs.h2o.ai/
+* Istihza (Turkish Python documentation project): https://belgeler.yazbel.com/python-istihza/
+* Kombu: http://docs.kombu.me/
* Lasso: http://lassoguide.com/
-* Manage documentation such as source code (fr): http://redaction-technique.org/
-* MathJax: http://docs.mathjax.org/en/latest/
+* Mako: http://docs.makotemplates.org/
* MirrorBrain: http://mirrorbrain.org/docs/
+* MongoDB: https://docs.mongodb.com/
+* Music21: http://web.mit.edu/music21/doc/
* MyHDL: http://docs.myhdl.org/en/latest/
-* nose: http://nose.readthedocs.org/en/latest/
+* nose: https://nose.readthedocs.io/
+* ns-3: https://www.nsnam.org/documentation/
+* NumPy: https://docs.scipy.org/doc/numpy/reference/
* ObjectListView: http://objectlistview.sourceforge.net/python/
-* Open ERP: https://doc.odoo.com/
+* OpenERP: https://doc.odoo.com/
* OpenCV: http://docs.opencv.org/
-* Open Dylan: http://opendylan.org/documentation/
* OpenLayers: http://docs.openlayers.org/
+* OpenTURNS: http://openturns.github.io/openturns/master/
+* Open vSwitch: http://docs.openvswitch.org/
+* PlatformIO: http://docs.platformio.org/
* PyEphem: http://rhodesmill.org/pyephem/
-* German Plone user manual: http://www.hasecke.com/plone-benutzerhandbuch/
+* Pygments: http://pygments.org/docs/
+* Plone User Manual (German): https://www.hasecke.com/plone-benutzerhandbuch/4.0/
* PSI4: http://www.psicode.org/psi4manual/master/index.html
-* Pylons: http://docs.pylonsproject.org/projects/pylons-webframework/en/latest/
* PyMOTW: https://pymotw.com/2/
-* python-aspectlib: http://python-aspectlib.readthedocs.org/en/latest/
- (`sphinx-py3doc-enhanced-theme`_)
-* QGIS: http://qgis.org/en/docs/index.html
-* qooxdoo: http://manual.qooxdoo.org/current/
+* python-aspectlib: https://python-aspectlib.readthedocs.io/
+ (`sphinx_py3doc_enhanced_theme <https://pypi.python.org/pypi/sphinx_py3doc_enhanced_theme>`__)
+* QGIS: https://qgis.org/en/docs/index.html
+* qooxdoo: http://www.qooxdoo.org/current/
* Roundup: http://www.roundup-tracker.org/
-* Seaborn: https://stanford.edu/~mwaskom/software/seaborn/
+* SaltStack: https://docs.saltstack.com/
+* scikit-learn: http://scikit-learn.org/stable/
+* SciPy: https://docs.scipy.org/doc/scipy/refrence/
+* Scrapy: https://doc.scrapy.org/
+* Seaborn: https://seaborn.pydata.org/
* Selenium: http://docs.seleniumhq.org/docs/
* Self: http://www.selflanguage.org/
-* Substance D: http://docs.pylonsproject.org/projects/substanced/en/latest/
-* SQLAlchemy: http://www.sqlalchemy.org/docs/
-* Sylius: http://docs.sylius.org/
+* Substance D: https://docs.pylonsproject.org/projects/substanced/
+* Sulu: http://docs.sulu.io/
+* SQLAlchemy: https://docs.sqlalchemy.org/
* tinyTiM: http://tinytim.sourceforge.net/docs/2.0/
-* Ubuntu packaging guide: http://packaging.ubuntu.com/html/
-* Werkzeug: http://werkzeug.pocoo.org/docs/
-* WFront: http://discorporate.us/projects/WFront/
-
-.. _sphinx-py3doc-enhanced-theme: https://pypi.python.org/pypi/sphinx_py3doc_enhanced_theme
-
+* Twisted: http://twistedmatrix.com/documents/current/
+* Ubuntu Packaging Guide: http://packaging.ubuntu.com/html/
+* WebFaction: https://docs.webfaction.com/
+* WTForms: https://wtforms.readthedocs.io/
Homepages and other non-documentation sites
-------------------------------------------
-* A personal page: http://www.dehlia.in/
-* Benoit Boissinot: http://bboissin.appspot.com/
-* The Wine Cellar Book: http://www.thewinecellarbook.com/doc/en/
-* UC Berkeley Advanced Control Systems course:
- http://msc.berkeley.edu/tomizuka/me233spring13/
-
+* Arizona State University PHY494/PHY598/CHM598 Simulation approaches to Bio-
+ and Nanophysics:
+ https://becksteinlab.physics.asu.edu/pages/courses/2013/SimBioNano/ (classic)
+* Benoit Boissinot: https://bboissin.appspot.com/ (classic, customized)
+* Computer Networks, Parallelization, and Simulation Laboratory (CNPSLab):
+ https://lab.miletic.net/ (sphinx_rtd_theme)
+* Deep Learning Tutorials: http://www.deeplearning.net/tutorial/ (sphinxdoc)
+* Loyola University Chicago COMP 339-439 Distributed Systems course:
+ http://books.cs.luc.edu/distributedsystems/ (sphinx_bootstrap_theme)
+* Pylearn2: http://www.deeplearning.net/software/pylearn2/ (sphinxdoc, customized)
+* SciPy Cookbook: https://scipy-cookbook.readthedocs.io/ (sphinx_rtd_theme)
+* The Wine Cellar Book: https://www.thewinecellarbook.com/doc/en/ (sphinxdoc)
+* Thomas Cokelaer's Python, Sphinx and reStructuredText tutorials:
+ http://thomas-cokelaer.info/tutorials/ (standard)
+* UC Berkeley ME233 Advanced Control Systems II course:
+ https://berkeley-me233.github.io/ (sphinxdoc)
Books produced using Sphinx
---------------------------
-* "The ``repoze.bfg`` Web Application Framework":
- http://www.amazon.com/repoze-bfg-Web-Application-Framework-Version/dp/0615345379
-* A Theoretical Physics Reference book: http://www.theoretical-physics.net/
-* "Simple and Steady Way of Learning for Software Engineering" (in Japanese):
- http://www.amazon.co.jp/dp/477414259X/
+* "Die Wahrheit des Sehens. Der DEKALOG von Krzysztof Kieślowski":
+ https://literatur.hasecke.com/post/die-wahrheit-des-sehens-dekalog-kieslowski/
* "Expert Python Programming":
https://www.packtpub.com/application-development/expert-python-programming
* "Expert Python Programming" (Japanese translation):
- http://www.amazon.co.jp/dp/4048686291/
+ https://www.amazon.co.jp/dp/4048686291/
+* "LassoGuide": http://www.lassosoft.com/Lasso-Documentation
+* "Learning Sphinx" (in Japanese):
+ https://www.oreilly.co.jp/books/9784873116488/
+* "Mercurial: the definitive guide (Second edition)":
+ https://book.mercurial-scm.org/
+* "Pioneers and Prominent Men of Utah": http://pioneers.rstebbing.com/
* "Pomodoro Technique Illustrated" (Japanese translation):
- http://www.amazon.co.jp/dp/4048689525/
+ https://www.amazon.co.jp/dp/4048689525/
* "Python Professional Programming" (in Japanese):
http://www.amazon.co.jp/dp/4798032948/
-* "Die Wahrheit des Sehens. Der DEKALOG von Krzysztof Kieślowski":
- http://www.hasecke.eu/Dekalog/
-* The "Varnish Book":
- http://book.varnish-software.com/4.0/
-* "Learning Sphinx" (in Japanese):
- http://www.oreilly.co.jp/books/9784873116488/
-* "LassoGuide":
- http://www.lassosoft.com/Lasso-Documentation
-* "Software-Dokumentation mit Sphinx": http://www.amazon.de/dp/1497448689/
-
+* "The ``repoze.bfg`` Web Application Framework":
+ https://www.amazon.com/repoze-bfg-Web-Application-Framework-Version/dp/0615345379
+* "Simple and Steady Way of Learning for Software Engineering" (in Japanese):
+ https://www.amazon.co.jp/dp/477414259X/
+* "Software-Dokumentation mit Sphinx": https://www.amazon.de/dp/1497448689/
+* "Theoretical Physics Reference": http://www.theoretical-physics.net/
+* "The Varnish Book":
+ https://info.varnish-software.com/the-varnish-book
-Thesis using Sphinx
--------------------
+Theses produced using Sphinx
+----------------------------
-* "A Web-Based System for Comparative Analysis of OpenStreetMap Data
- by the Use of CouchDB":
+* "A Web-Based System for Comparative Analysis of OpenStreetMap Data by the Use
+ of CouchDB":
https://www.yumpu.com/et/document/view/11722645/masterthesis-markusmayr-0542042
+* "Content Conditioning and Distribution for Dynamic Virtual Worlds":
+ https://www.cs.princeton.edu/research/techreps/TR-941-12
+* "The Sphinx Thesis Resource": https://jterrace.github.io/sphinxtr/
diff --git a/MANIFEST.in b/MANIFEST.in
index d478724e7..a5699c23c 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -12,7 +12,6 @@ include sphinx-autogen.py
include sphinx-build.py
include sphinx-quickstart.py
include sphinx-apidoc.py
-include test-reqs.txt
include tox.ini
include sphinx/locale/.tx/config
@@ -20,13 +19,11 @@ recursive-include sphinx/templates *
recursive-include sphinx/texinputs *
recursive-include sphinx/texinputs_win *
recursive-include sphinx/themes *
-recursive-include sphinx/pycode/pgen2 *.c *.pyx
recursive-include sphinx/locale *.js *.pot *.po *.mo
recursive-include sphinx/search/non-minified-js *.js
recursive-include sphinx/ext/autosummary/templates *
recursive-include tests *
recursive-include utils *
-include sphinx/pycode/Grammar-py*
recursive-include doc *
prune doc/_build
diff --git a/Makefile b/Makefile
index a20df8f39..67699363f 100644
--- a/Makefile
+++ b/Makefile
@@ -1,67 +1,41 @@
PYTHON ?= python
-.PHONY: all style-check type-check clean clean-pyc clean-patchfiles clean-backupfiles \
- clean-generated pylint reindent test covertest build
-
-DONT_CHECK = -i .ropeproject \
- -i .tox \
- -i build \
- -i dist \
- -i doc/_build \
- -i sphinx/pycode/pgen2 \
- -i sphinx/search/da.py \
- -i sphinx/search/de.py \
- -i sphinx/search/en.py \
- -i sphinx/search/es.py \
- -i sphinx/search/fi.py \
- -i sphinx/search/fr.py \
- -i sphinx/search/hu.py \
- -i sphinx/search/it.py \
- -i sphinx/search/ja.py \
- -i sphinx/search/nl.py \
- -i sphinx/search/no.py \
- -i sphinx/search/pt.py \
- -i sphinx/search/ro.py \
- -i sphinx/search/ru.py \
- -i sphinx/search/sv.py \
- -i sphinx/search/tr.py \
- -i sphinx/style/jquery.js \
- -i sphinx/util/smartypants.py \
- -i tests/build \
- -i tests/path.py \
- -i tests/roots/test-directive-code/target.py \
- -i tests/roots/test-warnings/undecodable.rst \
- -i tests/test_autodoc_py35.py \
- -i tests/typing_test_data.py \
- -i utils/convert.py
-
+.PHONY: all
all: clean-pyc clean-backupfiles style-check type-check test
+.PHONY: style-check
style-check:
- @PYTHONWARNINGS=all $(PYTHON) utils/check_sources.py $(DONT_CHECK) .
+ @flake8
+.PHONY: type-check
type-check:
mypy sphinx/
+.PHONY: clean
clean: clean-pyc clean-pycache clean-patchfiles clean-backupfiles clean-generated clean-testfiles clean-buildfiles clean-mypyfiles
+.PHONY: clean-pyc
clean-pyc:
find . -name '*.pyc' -exec rm -f {} +
find . -name '*.pyo' -exec rm -f {} +
+.PHONY: clean-pycache
clean-pycache:
find . -name __pycache__ -exec rm -rf {} +
+.PHONY: clean-patchfiles
clean-patchfiles:
find . -name '*.orig' -exec rm -f {} +
find . -name '*.rej' -exec rm -f {} +
+.PHONY: clean-backupfiles
clean-backupfiles:
find . -name '*~' -exec rm -f {} +
find . -name '*.bak' -exec rm -f {} +
find . -name '*.swp' -exec rm -f {} +
find . -name '*.swo' -exec rm -f {} +
+.PHONY: clean-generated
clean-generated:
find . -name '.DS_Store' -exec rm -f {} +
rm -rf Sphinx.egg-info/
@@ -70,32 +44,48 @@ clean-generated:
rm -f utils/*3.py*
rm -f utils/regression_test.js
+.PHONY: clean-testfiles
clean-testfiles:
rm -rf tests/.coverage
rm -rf tests/build
rm -rf .tox/
rm -rf .cache/
+.PHONY: clean-buildfiles
clean-buildfiles:
rm -rf build
+.PHONY: clean-mypyfiles
clean-mypyfiles:
rm -rf .mypy_cache/
+.PHONY: pylint
pylint:
@pylint --rcfile utils/pylintrc sphinx
+.PHONY: reindent
reindent:
- @$(PYTHON) utils/reindent.py -r -n .
+ @echo "This target no longer does anything and will be removed imminently"
+.PHONY: test
test:
- @cd tests; $(PYTHON) run.py --ignore py35 -v $(TEST)
+ @$(PYTHON) -m pytest -v $(TEST)
+.PHONY: test-async
test-async:
- @cd tests; $(PYTHON) run.py -v $(TEST)
+ @echo "This target no longer does anything and will be removed imminently"
+.PHONY: covertest
covertest:
- @cd tests; $(PYTHON) run.py -v --cov=sphinx --junitxml=.junit.xml $(TEST)
+ @$(PYTHON) -m pytest -v --cov=sphinx --junitxml=.junit.xml $(TEST)
+.PHONY: build
build:
@$(PYTHON) setup.py build
+
+.PHONY: docs
+docs:
+ifndef target
+ $(info You need to give a provide a target variable, e.g. `make docs target=html`.)
+endif
+ $(MAKE) -C doc $(target)
diff --git a/README.rst b/README.rst
index 1e027ec8e..2d841f78e 100644
--- a/README.rst
+++ b/README.rst
@@ -1,45 +1,106 @@
+========
+ Sphinx
+========
+
.. image:: https://img.shields.io/pypi/v/sphinx.svg
:target: https://pypi.python.org/pypi/Sphinx
+ :alt: Package on PyPi
+
.. image:: https://readthedocs.org/projects/sphinx/badge/
:target: http://www.sphinx-doc.org/
:alt: Documentation Status
+
.. image:: https://travis-ci.org/sphinx-doc/sphinx.svg?branch=master
:target: https://travis-ci.org/sphinx-doc/sphinx
+ :alt: Build Status (Travis CI)
+
+.. image:: https://ci.appveyor.com/api/projects/status/github/sphinx-doc/sphinx?branch=master&svg=true
+ :target: https://ci.appveyor.com/project/sphinxdoc/sphinx
+ :alt: Build Status (AppVeyor)
+
+.. image:: https://circleci.com/gh/sphinx-doc/sphinx.svg?style=shield
+ :target: https://circleci.com/gh/sphinx-doc/sphinx
+ :alt: Build Status (CircleCI)
+
+.. image:: https://codecov.io/gh/sphinx-doc/sphinx/branch/master/graph/badge.svg
+ :target: https://codecov.io/gh/sphinx-doc/sphinx
+ :alt: Code Coverage Status (Codecov)
+
+Sphinx is a tool that makes it easy to create intelligent and beautiful
+documentation for Python projects (or other documents consisting of multiple
+reStructuredText sources), written by Georg Brandl. It was originally created
+for the new Python documentation, and has excellent facilities for Python
+project documentation, but C/C++ is supported as well, and more languages are
+planned.
+
+Sphinx uses reStructuredText as its markup language, and many of its strengths
+come from the power and straightforwardness of reStructuredText and its parsing
+and translating suite, the Docutils.
+
+Among its features are the following:
+
+* Output formats: HTML (including derivative formats such as HTML Help, Epub
+ and Qt Help), plain text, manual pages and LaTeX or direct PDF output
+ using rst2pdf
+* Extensive cross-references: semantic markup and automatic links
+ for functions, classes, glossary terms and similar pieces of information
+* Hierarchical structure: easy definition of a document tree, with automatic
+ links to siblings, parents and children
+* Automatic indices: general index as well as a module index
+* Code handling: automatic highlighting using the Pygments highlighter
+* Flexible HTML output using the Jinja 2 templating engine
+* Various extensions are available, e.g. for automatic testing of snippets
+ and inclusion of appropriately formatted docstrings
+* Setuptools integration
+
+For more information, refer to the `the documentation`__.
+
+.. __: http://www.sphinx-doc.org/
+
+Installation
+============
-=================
-README for Sphinx
-=================
-
-This is the Sphinx documentation generator, see http://www.sphinx-doc.org/.
+Sphinx is published on `PyPI`__ and can be installed from there::
+ pip install -U sphinx
-Installing
-==========
+We also publish beta releases::
-Install from PyPI to use stable version::
+ pip install -U --pre sphinx
- pip install -U sphinx
+If you wish to install `Sphinx` for development purposes, refer to `the
+contributors guide`__.
-Install from PyPI to use beta version::
+__ https://pypi.python.org/pypi/Sphinx
+__ CONTRIBUTING.rst
- pip install -U --pre sphinx
+Documentation
+=============
-Install from newest dev version in stable branch::
+Documentation is available from `sphinx-doc.org`__.
- pip install git+https://github.com/sphinx-doc/sphinx@stable
+__ http://www.sphinx-doc.org/
-Install from newest dev version in master branch::
+Testing
+=======
- pip install git+https://github.com/sphinx-doc/sphinx
+Continuous testing is provided by `Travis`__ (for unit tests and style checks
+on Linux), `AppVeyor`__ (for unit tests on Windows), and `CircleCI`__ (for
+large processes like TeX compilation).
-Install from cloned source::
+For information on running tests locally, refer to `the contributors guide`__.
- pip install .
+__ https://travis-ci.org/sphinx-doc/sphinx
+__ https://ci.appveyor.com/project/sphinxdoc/sphinx
+__ https://circleci.com/gh/sphinx-doc/sphinx
+__ CONTRIBUTING.rst
-Install from cloned source as editable::
+Contributing
+============
- pip install -e .
+Refer to `the contributors guide`__.
+__ CONTRIBUTING.rst
Release signatures
==================
@@ -48,37 +109,3 @@ Releases are signed with following keys:
* `498D6B9E <https://pgp.mit.edu/pks/lookup?op=vindex&search=0x102C2C17498D6B9E>`_
* `5EBA0E07 <https://pgp.mit.edu/pks/lookup?op=vindex&search=0x1425F8CE5EBA0E07>`_
-
-Reading the docs
-================
-
-You can read them online at <http://www.sphinx-doc.org/>.
-
-Or, after installing::
-
- cd doc
- make html
-
-Then, direct your browser to ``_build/html/index.html``.
-
-Testing
-=======
-
-To run the tests with the interpreter available as ``python``, use::
-
- make test
-
-If you want to use a different interpreter, e.g. ``python3``, use::
-
- PYTHON=python3 make test
-
-Continuous testing runs on travis: https://travis-ci.org/sphinx-doc/sphinx
-
-
-Contributing
-============
-
-See `CONTRIBUTING.rst`__
-
-.. __: CONTRIBUTING.rst
-
diff --git a/doc/Makefile b/doc/Makefile
index d0e4e297b..c54236be0 100644
--- a/doc/Makefile
+++ b/doc/Makefile
@@ -3,7 +3,7 @@
# You can set these variables from the command line.
SPHINXOPTS =
-SPHINXBUILD = python ../sphinx-build.py
+SPHINXBUILD = python ../sphinx/cmd/build.py
SPHINXPROJ = sphinx
SOURCEDIR = .
BUILDDIR = _build
diff --git a/doc/_static/conf.py.txt b/doc/_static/conf.py.txt
index be0c846db..50c7bb782 100644
--- a/doc/_static/conf.py.txt
+++ b/doc/_static/conf.py.txt
@@ -167,11 +167,6 @@ html_static_path = ['_static']
#
# html_last_updated_fmt = None
-# If true, SmartyPants will be used to convert quotes and dashes to
-# typographically correct entities.
-#
-# html_use_smartypants = True
-
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
@@ -280,12 +275,6 @@ latex_documents = [
#
# latex_appendices = []
-# If false, will not define \strong, \code, \titleref, \crossref ... but only
-# \sphinxstrong, ..., \sphinxtitleref, ... to help avoid clash with user added
-# packages.
-#
-# latex_keep_old_macro_names = True
-
# If false, no module index is generated.
#
# latex_domain_indices = True
diff --git a/doc/_themes/sphinx13/layout.html b/doc/_themes/sphinx13/layout.html
index cd870fe7b..ce6f08daa 100644
--- a/doc/_themes/sphinx13/layout.html
+++ b/doc/_themes/sphinx13/layout.html
@@ -14,7 +14,7 @@
{% block sidebar2 %}{% endblock %}
{% block extrahead %}
- <link href='http://fonts.googleapis.com/css?family=Open+Sans:300,400,700'
+ <link href='https://fonts.googleapis.com/css?family=Open+Sans:300,400,700'
rel='stylesheet' type='text/css' />
{{ super() }}
{%- if not embedded %}
diff --git a/doc/builders.rst b/doc/builders.rst
index 534428732..ff56881af 100644
--- a/doc/builders.rst
+++ b/doc/builders.rst
@@ -125,26 +125,6 @@ The builder's "name" must be given to the **-b** command-line option of
.. autoattribute:: supported_image_types
-.. module:: sphinx.builders.epub2
-.. class:: Epub2Builder
-
- This builder produces the same output as the standalone HTML builder, but
- also generates an *epub* file for ebook readers. See :ref:`epub-faq` for
- details about it. For definition of the epub format, have a look at
- `<http://idpf.org/epub>`_ or `<https://en.wikipedia.org/wiki/EPUB>`_.
- The builder creates *EPUB 2* files.
-
- .. autoattribute:: name
-
- .. autoattribute:: format
-
- .. autoattribute:: supported_image_types
-
- .. deprecated:: 1.5
-
- Since Sphinx-1.5, the epub3 builder is used for the default builder of epub.
- Now EpubBuilder is renamed to epub2.
-
.. module:: sphinx.builders.epub3
.. class:: Epub3Builder
diff --git a/doc/conf.py b/doc/conf.py
index 1f19b53a2..fa82cbfb7 100644
--- a/doc/conf.py
+++ b/doc/conf.py
@@ -84,6 +84,8 @@ man_pages = [
'template generator', '', 1),
('man/sphinx-apidoc', 'sphinx-apidoc', 'Sphinx API doc generator tool',
'', 1),
+ ('man/sphinx-autogen', 'sphinx-autogen', 'Generate autodoc stub pages',
+ '', 1),
]
texinfo_documents = [
diff --git a/doc/config.rst b/doc/config.rst
index 1f222451d..1ef455803 100644
--- a/doc/config.rst
+++ b/doc/config.rst
@@ -59,6 +59,9 @@ Important points to note:
Note that the current builder tag is not available in ``conf.py``, as it is
created *after* the builder is initialized.
+.. seealso:: Additional configurations, such as adding stylesheets,
+ javascripts, builders, etc. can be made through the :doc:`/extdev/appapi`.
+
General configuration
---------------------
@@ -135,12 +138,10 @@ General configuration
- ``'library/xml.rst'`` -- ignores the ``library/xml.rst`` file (replaces
entry in :confval:`unused_docs`)
- - ``'library/xml'`` -- ignores the ``library/xml`` directory (replaces entry
- in :confval:`exclude_trees`)
+ - ``'library/xml'`` -- ignores the ``library/xml`` directory
- ``'library/xml*'`` -- ignores all files and directories starting with
``library/xml``
- - ``'**/.svn'`` -- ignores all ``.svn`` directories (replaces entry in
- :confval:`exclude_dirnames`)
+ - ``'**/.svn'`` -- ignores all ``.svn`` directories
:confval:`exclude_patterns` is also consulted when looking for static files
in :confval:`html_static_path` and :confval:`html_extra_path`.
@@ -245,6 +246,7 @@ General configuration
* ref.citation
* ref.footnote
* ref.doc
+ * ref.python
* misc.highlighting_failure
* toc.secnum
* epub.unknown_project_files
@@ -339,20 +341,19 @@ General configuration
starting at ``1``.
- if ``1`` (default) numbers will be ``x.1``, ``x.2``, ... with ``x``
the section number (top level sectioning; no ``x.`` if no section).
- This naturally applies only if section numbering has been activated via
+ This naturally applies only if section numbering has been activated via
the ``:numbered:`` option of the :rst:dir:`toctree` directive.
- ``2`` means that numbers will be ``x.y.1``, ``x.y.2``, ... if located in
a sub-section (but still ``x.1``, ``x.2``, ... if located directly under a
section and ``1``, ``2``, ... if not in any top level section.)
- etc...
- .. note::
-
- The LaTeX builder currently ignores this configuration setting. It will
- obey it at Sphinx 1.7.
-
.. versionadded:: 1.3
+ .. versionchanged:: 1.7
+ The LaTeX builder obeys this setting (if :confval:`numfig` is set to
+ ``True``).
+
.. confval:: tls_verify
If true, Sphinx verifies server certifications. Default is ``True``.
@@ -824,14 +825,19 @@ that use Sphinx's HTMLWriter class.
to include. If all or some of the default sidebars are to be included,
they must be put into this list as well.
- The default sidebars (for documents that don't match any pattern) are:
- ``['localtoc.html', 'relations.html', 'sourcelink.html',
+ The default sidebars (for documents that don't match any pattern) are
+ defined by theme itself. Builtin themes are using these templates by
+ default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
'searchbox.html']``.
* If a value is a single string, it specifies a custom sidebar to be added
between the ``'sourcelink.html'`` and ``'searchbox.html'`` entries. This
is for compatibility with Sphinx versions before 1.0.
+ .. deprecated:: 1.7
+
+ a single string value for ``html_sidebars`` will be removed in 2.0
+
Builtin sidebar templates that can be rendered are:
* **localtoc.html** -- a fine-grained table of contents of the current
@@ -1452,10 +1458,6 @@ the `Dublin Core metadata <http://dublincore.org/>`_.
a chapter, but can be confusing because it mixes entries of different
depth in one list. The default value is ``True``.
- .. note::
-
- ``epub3`` builder ignores ``epub_tocdup`` option(always ``False``)
-
.. confval:: epub_tocscope
This setting control the scope of the epub table of contents. The setting
@@ -1665,26 +1667,6 @@ These options influence LaTeX output. See further :doc:`latex`.
value selected the ``'inline'`` display. For backwards compatibility,
``True`` is still accepted.
-.. confval:: latex_keep_old_macro_names
-
- If ``True`` the ``\strong``, ``\code``, ``\bfcode``, ``\email``,
- ``\tablecontinued``, ``\titleref``, ``\menuselection``, ``\accelerator``,
- ``\crossref``, ``\termref``, and ``\optional`` text styling macros are
- pre-defined by Sphinx and may be user-customized by some
- ``\renewcommand``'s inserted either via ``'preamble'`` key or :dudir:`raw
- <raw-data-pass-through>` directive. If ``False``, only ``\sphinxstrong``,
- etc... macros are defined (and may be redefined by user).
-
- The default is ``False`` as it prevents macro name conflicts caused by
- latex packages. For example (``lualatex`` or ``xelatex``) ``fontspec v2.6``
- has its own ``\strong`` macro.
-
- .. versionadded:: 1.4.5
- .. versionchanged:: 1.6
- Default was changed from ``True`` to ``False``.
- .. deprecated:: 1.6
- This setting will be removed at Sphinx 1.7.
-
.. confval:: latex_use_latex_multicolumn
The default is ``False``: it means that Sphinx's own macros are used for
@@ -2134,6 +2116,35 @@ These options influence Texinfo output.
.. versionadded:: 1.1
+.. _qthelp-options:
+
+Options for QtHelp output
+--------------------------
+
+These options influence qthelp output. As this builder derives from the HTML
+builder, the HTML options also apply where appropriate.
+
+.. confval:: qthelp_basename
+
+ The basename for the qthelp file. It defaults to the :confval:`project` name.
+
+.. confval:: qthelp_namespace
+
+ The namespace for the qthelp file. It defaults to
+ ``org.sphinx.<project_name>.<project_version>``.
+
+.. confval:: qthelp_theme
+
+ The HTML theme for the qthelp output.
+ This defaults to ``'nonav'``.
+
+.. confval:: qthelp_theme_options
+
+ A dictionary of options that influence the look and feel of the selected
+ theme. These are theme-specific. For the options understood by the builtin
+ themes, see :ref:`this section <builtin-themes>`.
+
+
Options for the linkcheck builder
---------------------------------
diff --git a/doc/contents.rst b/doc/contents.rst
index 36eed649e..ab8d09eb0 100644
--- a/doc/contents.rst
+++ b/doc/contents.rst
@@ -9,7 +9,7 @@ Sphinx documentation contents
intro
tutorial
- invocation
+ man/index
rest
markup/index
domains
diff --git a/doc/domains.rst b/doc/domains.rst
index 2dc01c0b9..5bed02cf4 100644
--- a/doc/domains.rst
+++ b/doc/domains.rst
@@ -544,6 +544,7 @@ defined in the documentation:
Reference a C-language variable.
+.. _cpp-domain:
The C++ Domain
--------------
@@ -567,7 +568,7 @@ a visibility statement (``public``, ``private`` or ``protected``).
.. cpp:class:: OuterScope::MyClass : public MyBase, MyOtherBase
- A template class can be declared::
+ A class template can be declared::
.. cpp:class:: template<typename T, std::size_t N> std::array
@@ -719,35 +720,47 @@ a visibility statement (``public``, ``private`` or ``protected``).
.. rst:directive:: .. cpp:concept:: template-parameter-list name
- .. cpp:concept:: template-parameter-list name()
.. warning:: The support for concepts is experimental. It is based on the
- Concepts Technical Specification, and the features may change as the TS evolves.
+ current draft standard and the Concepts Technical Specification.
+ The features may change as they evolve.
- Describe a variable concept or a function concept. Both must have exactly 1
- template parameter list. The name may be a nested name. Examples::
+ Describe a concept. It must have exactly 1 template parameter list. The name may be a
+ nested name. Example::
.. cpp:concept:: template<typename It> std::Iterator
Proxy to an element of a notional sequence that can be compared,
indirected, or incremented.
- .. cpp:concept:: template<typename Cont> std::Container()
+ **Notation**
- Holder of elements, to which it can provide access via
- :cpp:concept:`Iterator` s.
+ .. cpp:var:: It r
- They will render as follows:
+ An lvalue.
+
+ **Valid Expressions**
+
+ - :cpp:expr:`*r`, when :cpp:expr:`r` is dereferenceable.
+ - :cpp:expr:`++r`, with return type :cpp:expr:`It&`, when :cpp:expr:`r` is incrementable.
+
+ This will render as follows:
.. cpp:concept:: template<typename It> std::Iterator
Proxy to an element of a notional sequence that can be compared,
indirected, or incremented.
- .. cpp:concept:: template<typename Cont> std::Container()
+ **Notation**
+
+ .. cpp:var:: It r
+
+ An lvalue.
- Holder of elements, to which it can provide access via
- :cpp:concept:`Iterator` s.
+ **Valid Expressions**
+
+ - :cpp:expr:`*r`, when :cpp:expr:`r` is dereferenceable.
+ - :cpp:expr:`++r`, with return type :cpp:expr:`It&`, when :cpp:expr:`r` is incrementable.
Options
.......
@@ -762,8 +775,9 @@ Some directives support options:
Constrained Templates
~~~~~~~~~~~~~~~~~~~~~
-.. warning:: The support for constrained templates is experimental. It is based on the
- Concepts Technical Specification, and the features may change as the TS evolves.
+.. warning:: The support for concepts is experimental. It is based on the
+ current draft standard and the Concepts Technical Specification.
+ The features may change as they evolve.
.. note:: Sphinx does not currently support ``requires`` clauses.
@@ -811,6 +825,30 @@ compatibility. E.g., ``Iterator{A, B, C}`` will be accepted as an introduction
even though it would not be valid C++.
+Inline Expressions and Tpes
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. rst:role:: cpp:expr
+
+ A role for inserting a C++ expression or type as inline text.
+ For example::
+
+ .. cpp:var:: int a = 42
+
+ .. cpp:function:: int f(int i)
+
+ An expression: :cpp:expr:`a * f(a)`.
+ A type: :cpp:expr:`const MySortedContainer<int>&`.
+
+ will be rendered as follows:
+
+ .. cpp:var:: int a = 42
+
+ .. cpp:function:: int f(int i)
+
+ An expression: :cpp:expr:`a * f(a)`.
+ A type: :cpp:expr:`const MySortedContainer<int>&`.
+
Namespacing
~~~~~~~~~~~~~~~~~
@@ -846,7 +884,7 @@ directive.
.. cpp:function:: std::size_t size() const
- declares ``size`` as a member function of the template class ``std::vector``.
+ declares ``size`` as a member function of the class template ``std::vector``.
Equivalently this could have been declared using::
.. cpp:class:: template<typename T> \
@@ -926,7 +964,7 @@ These roles link to the given declaration types:
.. admonition:: Note on References with Templates Parameters/Arguments
Sphinx's syntax to give references a custom title can interfere with
- linking to template classes, if nothing follows the closing angle
+ linking to class templates, if nothing follows the closing angle
bracket, i.e. if the link looks like this: ``:cpp:class:`MyClass<int>```.
This is interpreted as a link to ``int`` with a title of ``MyClass``.
In this case, please escape the opening angle bracket with a backslash,
@@ -965,7 +1003,7 @@ In general the reference must include the template paraemter declarations, e.g.,
Currently the lookup only succeed if the template parameter identifiers are equal strings. That is,
``template\<typename UOuter> Wrapper::Outer`` will not work.
-The inner template class can not be directly referenced, unless the current namespace
+The inner class template can not be directly referenced, unless the current namespace
is changed or the following shorthand is used.
If a template parameter list is omitted, then the lookup will assume either a template or a non-template,
but not a partial template specialisation.
diff --git a/doc/ext/autodoc.rst b/doc/ext/autodoc.rst
index 1f1892dbf..09098f39c 100644
--- a/doc/ext/autodoc.rst
+++ b/doc/ext/autodoc.rst
@@ -103,8 +103,10 @@ inserting them into the page source under a suitable :rst:dir:`py:module`,
will document all non-private member functions and properties (that is,
those whose name doesn't start with ``_``).
- For modules, ``__all__`` will be respected when looking for members; the
- order of the members will also be the order in ``__all__``.
+ For modules, ``__all__`` will be respected when looking for members unless
+ you give the ``ignore-module-all`` flag option. Without
+ ``ignore-module-all``, the order of the members will also be the order in
+ ``__all__``.
You can also give an explicit list of members; only these will then be
documented::
@@ -339,7 +341,7 @@ There are also new config values that you can set:
This value is a list of autodoc directive flags that should be automatically
applied to all autodoc directives. The supported flags are ``'members'``,
``'undoc-members'``, ``'private-members'``, ``'special-members'``,
- ``'inherited-members'`` and ``'show-inheritance'``.
+ ``'inherited-members'``, ``'show-inheritance'`` and ``'ignore-module-all'``.
If you set one of these flags in this config value, you can use a negated
form, :samp:`'no-{flag}'`, in an autodoc directive, to disable it once.
@@ -393,6 +395,16 @@ There are also new config values that you can set:
If ``False`` is given, autodoc forcely suppresses the error if the imported
module emits warnings. By default, ``True``.
+.. confval:: autodoc_inherit_docstrings
+
+ This value controls the docstrings inheritance.
+ If set to True the cocstring for classes or methods, if not explicitly set,
+ is inherited form parents.
+
+ The default is ``True``.
+
+ .. versionadded:: 1.7
+
Docstring preprocessing
-----------------------
diff --git a/doc/ext/autosummary.rst b/doc/ext/autosummary.rst
index d2e94e7da..c35ba50a5 100644
--- a/doc/ext/autosummary.rst
+++ b/doc/ext/autosummary.rst
@@ -125,6 +125,9 @@ text of the form::
If the ``-o`` option is not given, the script will place the output files in the
directories specified in the ``:toctree:`` options.
+For more information, refer to the :doc:`sphinx-autogen documentation
+</man/sphinx-autogen>`
+
Generating stub pages automatically
-----------------------------------
@@ -234,7 +237,7 @@ Additionally, the following filters are available
.. function:: escape(s)
Escape any special characters in the text to be used in formatting RST
- contexts. For instance, this prevents asterisks making things bolt. This
+ contexts. For instance, this prevents asterisks making things bold. This
replaces the builtin Jinja `escape filter`_ that does html-escaping.
.. function:: underline(s, line='=')
diff --git a/doc/ext/inheritance.rst b/doc/ext/inheritance.rst
index bd287aa49..231b5fdaa 100644
--- a/doc/ext/inheritance.rst
+++ b/doc/ext/inheritance.rst
@@ -66,3 +66,13 @@ New config values are:
.. confval:: inheritance_edge_attrs
A dictionary of graphviz edge attributes for inheritance diagrams.
+
+.. confval:: inheritance_alias
+
+ Allows mapping the full qualified name of the class to custom values
+ (useful when exposing the underlying path of a class is not desirable,
+ e.g. it's a private class and should not be instantiated by the user).
+
+ For example::
+
+ inheritance_alias = {'_pytest.Magic': 'pytest.Magic'}
diff --git a/doc/ext/math.rst b/doc/ext/math.rst
index ca53f3a8e..4097bb29e 100644
--- a/doc/ext/math.rst
+++ b/doc/ext/math.rst
@@ -37,6 +37,22 @@ or use Python raw strings (``r"raw"``).
Set this option to ``True`` if you want all displayed math to be numbered.
The default is ``False``.
+.. confval:: math_eqref_format
+
+ A string that are used for format of label of references to equations.
+ As a special character, ``{number}`` will be replaced to equaition number.
+
+ Example: ``'Eq.{number}'`` is rendered as ``Eq.10``
+
+.. confval:: math_numfig
+
+ If ``True``, displayed math equations are numbered across pages when
+ :confval:`numfig` is enabled. The :confval:`numfig_secnum_depth` setting
+ is respected. The :rst:role:`eq`, not :rst:role:`numref`, role
+ must be used to reference equation numbers. Default is ``True``.
+
+ .. versionadded:: 1.7
+
:mod:`.mathbase` defines these new markup elements:
.. rst:role:: math
@@ -95,8 +111,7 @@ or use Python raw strings (``r"raw"``).
.. rst:role:: eq
- Role for cross-referencing equations via their label. This currently works
- only within the same document. Example::
+ Role for cross-referencing equations via their label. Example::
.. math:: e^{i\pi} + 1 = 0
:label: euler
diff --git a/doc/extdev/markupapi.rst b/doc/extdev/markupapi.rst
index 533660c84..df23f164d 100644
--- a/doc/extdev/markupapi.rst
+++ b/doc/extdev/markupapi.rst
@@ -70,14 +70,6 @@ using :meth:`.Sphinx.add_directive` or :meth:`.Sphinx.add_directive_to_domain`.
The absolute line number on which the directive appeared. This is not
always a useful value; use :attr:`srcline` instead.
- .. attribute:: src
-
- The source file of the directive.
-
- .. attribute:: srcline
-
- The line number in the source file on which the directive appeared.
-
.. attribute:: content_offset
Internal offset of the directive content. Used when calling
diff --git a/doc/invocation.rst b/doc/invocation.rst
deleted file mode 100644
index 6cb16e919..000000000
--- a/doc/invocation.rst
+++ /dev/null
@@ -1,547 +0,0 @@
-.. default-role:: any
-
-.. _invocation:
-
-Invocation of sphinx-quickstart
-===============================
-
-The :program:`sphinx-quickstart` script generates a Sphinx documentation set.
-It is called like this:
-
-.. code-block:: console
-
- $ sphinx-quickstart [options] [projectdir]
-
-where *projectdir* is the Sphinx documentation set directory in which you want
-to place. If you omit *projectdir*, files are generated into current directory
-by default.
-
-The :program:`sphinx-quickstart` script has several options:
-
-.. program:: sphinx-quickstart
-
-.. option:: -q, --quiet
-
- Quiet mode that will skips interactive wizard to specify options.
- This option requires `-p`, `-a` and `-v` options.
-
-.. option:: -h, --help, --version
-
- Display usage summary or Sphinx version.
-
-
-Structure options
------------------
-
-.. option:: --sep
-
- If specified, separate source and build directories.
-
-.. option:: --dot=DOT
-
- Inside the root directory, two more directories will be created;
- "_templates" for custom HTML templates and "_static" for custom stylesheets
- and other static files. You can enter another prefix (such as ".") to
- replace the underscore.
-
-Project basic options
----------------------
-
-.. option:: -p PROJECT, --project=PROJECT
-
- Project name will be set. (see :confval:`project`).
-
-.. option:: -a AUTHOR, --author=AUTHOR
-
- Author names. (see :confval:`copyright`).
-
-.. option:: -v VERSION
-
- Version of project. (see :confval:`version`).
-
-.. option:: -r RELEASE, --release=RELEASE
-
- Release of project. (see :confval:`release`).
-
-.. option:: -l LANGUAGE, --language=LANGUAGE
-
- Document language. (see :confval:`language`).
-
-.. option:: --suffix=SUFFIX
-
- Source file suffix. (see :confval:`source_suffix`).
-
-.. option:: --master=MASTER
-
- Master document name. (see :confval:`master_doc`).
-
-.. option:: --epub
-
- Use epub.
-
-Extension options
------------------
-
-.. option:: --ext-autodoc
-
- Enable `sphinx.ext.autodoc` extension.
-
-.. option:: --ext-doctest
-
- Enable `sphinx.ext.doctest` extension.
-
-.. option:: --ext-intersphinx
-
- Enable `sphinx.ext.intersphinx` extension.
-
-.. option:: --ext-todo
-
- Enable `sphinx.ext.todo` extension.
-
-.. option:: --ext-coverage
-
- Enable `sphinx.ext.coverage` extension.
-
-.. option:: --ext-imgmath
-
- Enable `sphinx.ext.imgmath` extension.
-
-.. option:: --ext-mathjax
-
- Enable `sphinx.ext.mathjax` extension.
-
-.. option:: --ext-ifconfig
-
- Enable `sphinx.ext.ifconfig` extension.
-
-.. option:: --ext-viewcode
-
- Enable `sphinx.ext.viewcode` extension.
-
-.. option:: --extensions=EXTENSIONS
-
- Enable arbitary extensions.
-
-
-Makefile and Batchfile creation options
----------------------------------------
-
-.. option:: --use-make-mode, --no-use-make-mode
-
- Makefile/make.bat uses (or not use) make-mode. Default is use.
-
- .. versionchanged:: 1.5
- make-mode is default.
-
-.. option:: --makefile, --no-makefile
-
- Create (or not create) makefile.
-
-.. option:: --batchfile, --no-batchfile
-
- Create (or not create) batchfile
-
-
-.. versionadded:: 1.3
- Add various options for sphinx-quickstart invocation.
-
-Project templating
-------------------
-
-.. option:: -t, --templatedir=TEMPLATEDIR
-
- Template directory for template files. You can modify the templates of
- sphinx project files generated by quickstart. Following Jinja2 template
- files are allowed:
-
- * master_doc.rst_t
- * conf.py_t
- * Makefile_t
- * Makefile.new_t
- * make.bat_t
- * make.bat.new_t
-
- In detail, please refer the system template files Sphinx provides.
- (sphinx/templates/quickstart)
-
-.. option:: -d NAME=VALUE
-
- Define a template variable
-
-.. versionadded:: 1.5
- Project templating options for sphinx-quickstart
-
-
-Invocation of sphinx-build
-==========================
-
-The :program:`sphinx-build` script builds a Sphinx documentation set. It is
-called like this:
-
-.. code-block:: console
-
- $ sphinx-build [options] sourcedir builddir [filenames]
-
-where *sourcedir* is the :term:`source directory`, and *builddir* is the
-directory in which you want to place the built documentation. Most of the time,
-you don't need to specify any *filenames*.
-
-The :program:`sphinx-build` script has several options:
-
-.. program:: sphinx-build
-
-.. option:: -b buildername
-
- The most important option: it selects a builder. The most common builders
- are:
-
- **html**
- Build HTML pages. This is the default builder.
-
- **dirhtml**
- Build HTML pages, but with a single directory per document. Makes for
- prettier URLs (no ``.html``) if served from a webserver.
-
- **singlehtml**
- Build a single HTML with the whole content.
-
- **htmlhelp**, **qthelp**, **devhelp**, **epub**
- Build HTML files with additional information for building a documentation
- collection in one of these formats.
-
- **applehelp**
- Build an Apple Help Book. Requires :program:`hiutil` and
- :program:`codesign`, which are not Open Source and presently only
- available on Mac OS X 10.6 and higher.
-
- **latex**
- Build LaTeX sources that can be compiled to a PDF document using
- :program:`pdflatex`.
-
- **man**
- Build manual pages in groff format for UNIX systems.
-
- **texinfo**
- Build Texinfo files that can be processed into Info files using
- :program:`makeinfo`.
-
- **text**
- Build plain text files.
-
- **gettext**
- Build gettext-style message catalogs (``.pot`` files).
-
- **doctest**
- Run all doctests in the documentation, if the :mod:`~sphinx.ext.doctest`
- extension is enabled.
-
- **linkcheck**
- Check the integrity of all external links.
-
- **xml**
- Build Docutils-native XML files.
-
- **pseudoxml**
- Build compact pretty-printed "pseudo-XML" files displaying the
- internal structure of the intermediate document trees.
-
- See :ref:`builders` for a list of all builders shipped with Sphinx.
- Extensions can add their own builders.
-
-.. option:: -a
-
- If given, always write all output files. The default is to only write output
- files for new and changed source files. (This may not apply to all
- builders.)
-
-.. option:: -E
-
- Don't use a saved :term:`environment` (the structure caching all
- cross-references), but rebuild it completely. The default is to only read
- and parse source files that are new or have changed since the last run.
-
-.. option:: -t tag
-
- Define the tag *tag*. This is relevant for :rst:dir:`only` directives that
- only include their content if this tag is set.
-
- .. versionadded:: 0.6
-
-.. option:: -d path
-
- Since Sphinx has to read and parse all source files before it can write an
- output file, the parsed source files are cached as "doctree pickles".
- Normally, these files are put in a directory called :file:`.doctrees` under
- the build directory; with this option you can select a different cache
- directory (the doctrees can be shared between all builders).
-
-.. option:: -j N
-
- Distribute the build over *N* processes in parallel, to make building on
- multiprocessor machines more effective. Note that not all parts and not all
- builders of Sphinx can be parallelized.
-
- .. versionadded:: 1.2
- This option should be considered *experimental*.
-
-.. option:: -c path
-
- Don't look for the :file:`conf.py` in the source directory, but use the given
- configuration directory instead. Note that various other files and paths
- given by configuration values are expected to be relative to the
- configuration directory, so they will have to be present at this location
- too.
-
- .. versionadded:: 0.3
-
-.. option:: -C
-
- Don't look for a configuration file; only take options via the ``-D`` option.
-
- .. versionadded:: 0.5
-
-.. option:: -D setting=value
-
- Override a configuration value set in the :file:`conf.py` file. The value
- must be a number, string, list or dictionary value.
-
- For lists, you can separate elements with a comma like this: ``-D
- html_theme_path=path1,path2``.
-
- For dictionary values, supply the setting name and key like this:
- ``-D latex_elements.docclass=scrartcl``.
-
- For boolean values, use ``0`` or ``1`` as the value.
-
- .. versionchanged:: 0.6
- The value can now be a dictionary value.
-
- .. versionchanged:: 1.3
- The value can now also be a list value.
-
-.. option:: -A name=value
-
- Make the *name* assigned to *value* in the HTML templates.
-
- .. versionadded:: 0.5
-
-.. option:: -n
-
- Run in nit-picky mode. Currently, this generates warnings for all missing
- references. See the config value :confval:`nitpick_ignore` for a way to
- exclude some references as "known missing".
-
-.. option:: -N
-
- Do not emit colored output.
-
-.. option:: -v
-
- Increase verbosity (loglevel). This option can be given up to three times
- to get more debug logging output. It implies :option:`-T`.
-
- .. versionadded:: 1.2
-
-.. option:: -q
-
- Do not output anything on standard output, only write warnings and errors to
- standard error.
-
-.. option:: -Q
-
- Do not output anything on standard output, also suppress warnings. Only
- errors are written to standard error.
-
-.. option:: -w file
-
- Write warnings (and errors) to the given file, in addition to standard error.
-
-.. option:: -W
-
- Turn warnings into errors. This means that the build stops at the first
- warning and ``sphinx-build`` exits with exit status 1.
-
-.. option:: -T
-
- Display the full traceback when an unhandled exception occurs. Otherwise,
- only a summary is displayed and the traceback information is saved to a file
- for further analysis.
-
- .. versionadded:: 1.2
-
-.. option:: -P
-
- (Useful for debugging only.) Run the Python debugger, :mod:`pdb`, if an
- unhandled exception occurs while building.
-
-.. option:: -h, --help, --version
-
- Display usage summary or Sphinx version.
-
- .. versionadded:: 1.2
-
-You can also give one or more filenames on the command line after the source and
-build directories. Sphinx will then try to build only these output files (and
-their dependencies).
-
-Environment variables
----------------------
-
-The :program:`sphinx-build` refers following environment variables:
-
-.. describe:: MAKE
-
- A path to make command. A command name is also allowed.
- :program:`sphinx-build` uses it to invoke sub-build process on make-mode.
-
-Makefile options
-----------------
-
-The :file:`Makefile` and :file:`make.bat` files created by
-:program:`sphinx-quickstart` usually run :program:`sphinx-build` only with the
-:option:`-b` and :option:`-d` options. However, they support the following
-variables to customize behavior:
-
-.. describe:: PAPER
-
- The value for '"papersize"` key of :confval:`latex_elements`.
-
-.. describe:: SPHINXBUILD
-
- The command to use instead of ``sphinx-build``.
-
-.. describe:: BUILDDIR
-
- The build directory to use instead of the one chosen in
- :program:`sphinx-quickstart`.
-
-.. describe:: SPHINXOPTS
-
- Additional options for :program:`sphinx-build`.
-
-.. _when-deprecation-warnings-are-displayed:
-
-Deprecation Warnings
---------------------
-
-If any deprecation warning like ``RemovedInSphinxXXXWarning`` are displayed
-when building a user's document, some Sphinx extension is using deprecated
-features. In that case, please report it to author of the extension.
-
-To disable the deprecation warnings, please set ``PYTHONWARNINGS=`` environment
-variable to your environment. For example:
-
-* ``PYTHONWARNINGS= make html`` (Linux/Mac)
-* ``export PYTHONWARNINGS=`` and do ``make html`` (Linux/Mac)
-* ``set PYTHONWARNINGS=`` and do ``make html`` (Windows)
-* modify your Makefile/make.bat and set the environment variable
-
-
-.. _invocation-apidoc:
-
-Invocation of sphinx-apidoc
-===========================
-
-The :program:`sphinx-apidoc` generates completely automatic API documentation
-for a Python package. It is called like this:
-
-.. code-block:: console
-
- $ sphinx-apidoc [options] -o outputdir packagedir [pathnames]
-
-where *packagedir* is the path to the package to document, and *outputdir* is
-the directory where the generated sources are placed. Any *pathnames* given
-are paths to be excluded ignored during generation.
-
-.. warning::
-
- ``sphinx-apidoc`` generates reST files that use :mod:`sphinx.ext.autodoc` to
- document all found modules. If any modules have side effects on import,
- these will be executed by ``autodoc`` when ``sphinx-build`` is run.
-
- If you document scripts (as opposed to library modules), make sure their main
- routine is protected by a ``if __name__ == '__main__'`` condition.
-
-
-The :program:`sphinx-apidoc` script has several options:
-
-.. program:: sphinx-apidoc
-
-.. option:: -o outputdir
-
- Gives the directory in which to place the generated output.
-
-.. option:: -f, --force
-
- Normally, sphinx-apidoc does not overwrite any files. Use this option to
- force the overwrite of all files that it generates.
-
-.. option:: -n, --dry-run
-
- With this option given, no files will be written at all.
-
-.. option:: -s suffix
-
- This option selects the file name suffix of output files. By default, this
- is ``rst``.
-
-.. option:: -d maxdepth
-
- This sets the maximum depth of the table of contents, if one is generated.
-
-.. option:: -l, --follow-links
-
- This option makes sphinx-apidoc follow symbolic links when recursing the
- filesystem to discover packages and modules. You may need it if you want
- to generate documentation from a source directory managed by
- `collective.recipe.omelette
- <https://pypi.python.org/pypi/collective.recipe.omelette/>`_.
- By default, symbolic links are skipped.
-
- .. versionadded:: 1.2
-
-.. option:: -T, --no-toc
-
- This prevents the generation of a table-of-contents file ``modules.rst``.
- This has no effect when :option:`--full` is given.
-
-.. option:: -F, --full
-
- This option makes sphinx-apidoc create a full Sphinx project, using the same
- mechanism as :program:`sphinx-quickstart`. Most configuration values are set
- to default values, but you can influence the most important ones using the
- following options.
-
-.. option:: --implicit-namespaces
-
- By default sphinx-apidoc processes sys.path searching for modules only.
- Python 3.3 introduced :pep:`420` implicit namespaces that allow module path
- structures such as ``foo/bar/module.py`` or ``foo/bar/baz/__init__.py``
- (notice that ``bar`` and ``foo`` are namespaces, not modules).
-
- Specifying this option interprets paths recursively according to PEP-0420.
-
-.. option:: -M
-
- This option makes sphinx-apidoc put module documentation before submodule
- documentation.
-
-.. option:: -a
-
- Append module_path to sys.path.
-
-.. option:: -H project
-
- Sets the project name to put in generated files (see :confval:`project`).
-
-.. option:: -A author
-
- Sets the author name(s) to put in generated files (see :confval:`copyright`).
-
-.. option:: -V version
-
- Sets the project version to put in generated files (see :confval:`version`).
-
-.. option:: -R release
-
- Sets the project release to put in generated files (see :confval:`release`).
diff --git a/doc/latex.rst b/doc/latex.rst
index 2197efcd6..87117c164 100644
--- a/doc/latex.rst
+++ b/doc/latex.rst
@@ -190,14 +190,25 @@ The available styling options
default ``true``. Tells whether long lines in :rst:dir:`code-block`\ 's
contents should wrap.
+``literalblockcappos``
+ default ``t`` for "top". Decides the caption position. Alternative is
+ ``b`` ("bottom").
+
+ .. versionadded:: 1.7
+
``verbatimhintsturnover``
- default ``false``. If ``true``, code-blocks display "continued on next
+ default ``true``. If ``true``, code-blocks display "continued on next
page", "continued from previous page" hints in case of pagebreaks.
.. versionadded:: 1.6.3
- the default will change to ``true`` at 1.7 and horizontal positioning
- of continuation hints (currently right aligned only) will be
- customizable.
+ .. versionchanged:: 1.7
+ the default changed from ``false`` to ``true``.
+
+``verbatimcontinuedalign``, ``verbatimcontinuesalign``
+ default ``c``. Horizontal position relative to the framed contents:
+ either ``l`` (left aligned), ``r`` (right aligned) or ``c`` (centered).
+
+ .. versionadded:: 1.7
``parsedliteralwraps``
default ``true``. Tells whether long lines in :dudir:`parsed-literal`\ 's
@@ -356,26 +367,15 @@ Macros
- text styling commands ``\sphinx<foo>`` with ``<foo>`` being one of
``strong``, ``bfcode``, ``email``, ``tablecontinued``, ``titleref``,
``menuselection``, ``accelerator``, ``crossref``, ``termref``, ``optional``.
- The non-prefixed macros will still be defined if option
- :confval:`latex_keep_old_macro_names` has been set to ``True`` (default is
- ``False``), in which case the prefixed macros expand to the non-prefixed
- ones.
.. versionadded:: 1.4.5
Use of ``\sphinx`` prefixed macro names to limit possibilities of conflict
with LaTeX packages.
- .. versionchanged:: 1.6
- The default value of :confval:`latex_keep_old_macro_names` changes to
- ``False``, and even if set to ``True``, if a non-prefixed macro
- already exists at ``sphinx.sty`` loading time, only the ``\sphinx``
- prefixed one will be defined. The setting will be removed at 1.7.
-
- more text styling: ``\sphinxstyle<bar>`` with ``<bar>`` one of
``indexentry``, ``indexextra``, ``indexpageref``, ``topictitle``,
- ``sidebartitle``, ``othertitle``, ``sidebarsubtitle``, ``thead``,
- ``theadfamily``, ``emphasis``, ``literalemphasis``, ``strong``,
- ``literalstrong``, ``abbreviation``, ``literalintitle``, ``codecontinued``,
- ``codecontinues``.
+ ``sidebartitle``, ``othertitle``, ``sidebarsubtitle``, ``theadfamily``,
+ ``emphasis``, ``literalemphasis``, ``strong``, ``literalstrong``,
+ ``abbreviation``, ``literalintitle``, ``codecontinued``, ``codecontinues``
.. versionadded:: 1.5
these macros were formerly hard-coded as non customizable ``\texttt``,
@@ -383,8 +383,6 @@ Macros
.. versionadded:: 1.6
``\sphinxstyletheadfamily`` which defaults to ``\sffamily`` and allows
multiple paragraphs in header cells of tables.
- .. deprecated:: 1.6
- macro ``\sphinxstylethead`` is deprecated at 1.6 and will be removed at 1.7.
.. versionadded:: 1.6.3
``\sphinxstylecodecontinued`` and ``\sphinxstylecodecontinues``.
- by default the Sphinx style file ``sphinx.sty`` executes the command
diff --git a/doc/man/index.rst b/doc/man/index.rst
new file mode 100644
index 000000000..c2ca3f065
--- /dev/null
+++ b/doc/man/index.rst
@@ -0,0 +1,22 @@
+Man Pages
+=========
+
+These are the applications provided as part of Sphinx.
+
+Core Applications
+-----------------
+
+.. toctree::
+ :maxdepth: 3
+
+ sphinx-quickstart
+ sphinx-build
+
+Additional Applications
+-----------------------
+
+.. toctree::
+ :maxdepth: 3
+
+ sphinx-apidoc
+ sphinx-autogen
diff --git a/doc/man/sphinx-apidoc.rst b/doc/man/sphinx-apidoc.rst
index be0c3d3a3..9a13f1401 100644
--- a/doc/man/sphinx-apidoc.rst
+++ b/doc/man/sphinx-apidoc.rst
@@ -1,23 +1,21 @@
-:orphan:
-
-sphinx-apidoc manual page
-=========================
+sphinx-apidoc
+=============
Synopsis
--------
**sphinx-apidoc** [*options*] -o <*outputdir*> <*sourcedir*> [*pathnames* ...]
-
Description
-----------
:program:`sphinx-apidoc` is a tool for automatic generation of Sphinx sources
-that, using the autodoc extension, document a whole package in the style of
-other automatic API documentation tools.
+that, using the :rst:dir:`autodoc` extension, document a whole package in the
+style of other automatic API documentation tools.
-*sourcedir* must point to a Python package. Any *pathnames* given are paths to
-be excluded from the generation.
+*sourcedir* is the path to a Python package to document, and *outputdir* is the
+directory where the generated sources are placed. Any *pathnames* given are
+paths to be excluded from the generation.
.. warning::
@@ -28,42 +26,107 @@ be excluded from the generation.
If you document scripts (as opposed to library modules), make sure their main
routine is protected by a ``if __name__ == '__main__'`` condition.
-
Options
-------
--o <outputdir> Directory to place the output files. If it does not exist,
- it is created.
--f, --force Usually, apidoc does not overwrite files, unless this option
- is given.
--l, --follow-links Follow symbolic links.
--n, --dry-run If given, apidoc does not create any files.
--s <suffix> Suffix for the source files generated, default is ``rst``.
--d <maxdepth> Maximum depth for the generated table of contents file.
--T, --no-toc Do not create a table of contents file.
--F, --full If given, a full Sphinx project is generated (``conf.py``,
- ``Makefile`` etc.) using sphinx-quickstart.
--e, --separate Put each module file in its own page.
--E, --no-headings Don't create headings for the modules/packages
--P, --private Include "_private" modules
-
-These options are used with ``-F``:
-
--a Append module_path to sys.path.
--H <project> Project name to put into the configuration.
--A <author> Author name(s) to put into the configuration.
--V <version> Project version.
--R <release> Project release.
+.. program:: sphinx-apidoc
+.. option:: -o <outputdir>
-See also
---------
+ Directory to place the output files. If it does not exist, it is created.
+
+.. option:: -f, --force
+
+ Force overwritting of any existing generated files.
+
+.. option:: -l, --follow-links
+
+ Follow symbolic links.
+
+.. option:: -n, --dry-run
+
+ Do not create any files.
+
+.. option:: -s <suffix>
+
+ Suffix for the source files generated. Defaults to ``rst``.
+
+.. option:: -d <maxdepth>
+
+ Maximum depth for the generated table of contents file.
+
+.. option:: -T, --no-toc
+
+ Do not create a table of contents file. Ignored when :option:`--full` is
+ provided.
+
+.. option:: -F, --full
+
+ Generate a full Sphinx project (``conf.py``, ``Makefile`` etc.) using
+ the same mechanism as :program:`sphinx-quickstart`.
+
+.. option:: -e, --separate
+
+ Put documentation for each module on its own page.
+
+ .. versionadded:: 1.2
+
+.. option:: -E, --no-headings
+
+ Do not create headings for the modules/packages. This is useful, for
+ example, when docstrings already contain headings.
-:manpage:`sphinx-build(1)`
+.. option:: -P, --private
+ Include "_private" modules.
-Author
-------
+ .. versionadded:: 1.2
+
+.. option:: --implicit-namespaces
+
+ By default sphinx-apidoc processes sys.path searching for modules only.
+ Python 3.3 introduced :pep:`420` implicit namespaces that allow module path
+ structures such as ``foo/bar/module.py`` or ``foo/bar/baz/__init__.py``
+ (notice that ``bar`` and ``foo`` are namespaces, not modules).
+
+ Interpret paths recursively according to PEP-0420.
+
+.. option:: -M, --module-first
+
+ Put module documentation before submodule documentation.
+
+These options are used when :option:`--full` is specified:
+
+.. option:: -a
+
+ Append module_path to sys.path.
+
+.. option:: -H <project>
+
+ Sets the project name to put in generated files (see :confval:`project`).
+
+.. option:: -A <author>
+
+ Sets the author name(s) to put in generated files (see
+ :confval:`copyright`).
+
+.. option:: -V <version>
+
+ Sets the project version to put in generated files (see :confval:`version`).
+
+.. option:: -R <release>
+
+ Sets the project release to put in generated files (see :confval:`release`).
+
+Environment
+-----------
+
+.. envvar:: SPHINX_APIDOC_OPTIONS
+
+ A comma-separated list of option to append to generated ``automodule``
+ directives. Defaults to ``members,undoc-members,show-inheritance``.
+
+See also
+--------
-Etienne Desautels, <etienne.desautels@gmail.com>, Georg Brandl
-<georg@python.org> et al.
+:manpage:`sphinx-build(1)`, :manpage:`sphinx-autogen(1)`
diff --git a/doc/man/sphinx-autogen.rst b/doc/man/sphinx-autogen.rst
new file mode 100644
index 000000000..49a8220d0
--- /dev/null
+++ b/doc/man/sphinx-autogen.rst
@@ -0,0 +1,93 @@
+sphinx-autogen
+==============
+
+Synopsis
+--------
+
+**sphinx-autogen** [*options*] <sourcefile> ...
+
+Description
+-----------
+
+:program:`sphinx-autogen` is a tool for automatic generation of Sphinx sources
+that, using the :rst:dir:`autodoc` extension, document items included in
+:rst:dir:`autosummary` listing(s).
+
+*sourcefile* is the path to one or more reStructuredText documents containing
+:rst:dir:`autosummary` entries with the ``:toctree::`` option set. *sourcefile*
+can be an :py:mod:`fnmatch`-style pattern.
+
+Options
+-------
+
+.. program:: sphinx-autogen
+
+.. option:: -o <outputdir>
+
+ Directory to place the output file. If it does not exist, it is created.
+ Defaults to the value passed to the ``:toctree:`` option.
+
+.. option:: -s <suffix>, --suffix <suffix>
+
+ Default suffix to use for generated files. Defaults to ``rst``.
+
+.. option:: -t <templates>, --templates <templates>
+
+ Custom template directory. Defaults to ``None``.
+
+.. option:: -i, --imported-members
+
+ Document imported members.
+
+Example
+-------
+
+Given the following directory structure::
+
+ docs
+ ├── index.rst
+ └── ...
+ foobar
+ ├── foo
+ │ └── __init__.py
+ └── bar
+ ├── __init__.py
+ └── baz
+ └── __init__.py
+
+and assuming ``docs/index.rst`` contained the following:
+
+.. code-block:: rst
+
+ Modules
+ =======
+
+ .. autosummary::
+ :toctree: modules
+
+ foobar.foo
+ foobar.bar
+ foobar.bar.baz
+
+If you run the following:
+
+.. code-block:: bash
+
+ $ sphinx-autodoc doc/index.rst
+
+then the following stub files will be created in ``docs``::
+
+ docs
+ ├── index.rst
+ └── modules
+ ├── foobar.bar.rst
+ ├── foobar.bar.baz.rst
+ └── foobar.foo.rst
+
+and each of those files will contain a :rst:dir:`autodoc` directive and some
+other information.
+
+See also
+--------
+
+:manpage:`sphinx-build(1)`, :manpage:`sphinx-apidoc(1)`
diff --git a/doc/man/sphinx-build.rst b/doc/man/sphinx-build.rst
index 13564ff4d..46f213989 100644
--- a/doc/man/sphinx-build.rst
+++ b/doc/man/sphinx-build.rst
@@ -1,132 +1,307 @@
-:orphan:
-
-sphinx-build manual page
-========================
+sphinx-build
+============
Synopsis
--------
-**sphinx-build** [*options*] <*sourcedir*> <*outdir*> [*filenames* ...]
-
+**sphinx-build** [*options*] <*sourcedir*> <*outputdir*> [*filenames* ...]
Description
-----------
:program:`sphinx-build` generates documentation from the files in
-``<sourcedir>`` and places it in the ``<outdir>``.
+``<sourcedir>`` and places it in the ``<outputdir>``.
:program:`sphinx-build` looks for ``<sourcedir>/conf.py`` for the configuration
settings. :manpage:`sphinx-quickstart(1)` may be used to generate template
files, including ``conf.py``.
-:program:`sphinx-build` can create documentation in different formats. A format
-is selected by specifying the builder name on the command line; it defaults to
-HTML. Builders can also perform other tasks related to documentation
-processing.
+:program:`sphinx-build` can create documentation in different formats. A
+format is selected by specifying the builder name on the command line; it
+defaults to HTML. Builders can also perform other tasks related to
+documentation processing.
By default, everything that is outdated is built. Output only for selected
files can be built by specifying individual filenames.
-List of available builders:
+For a list of available options, refer to :option:`sphinx-build -b`.
-html
- HTML file generation. This is the default builder.
+Options
+-------
-dirhtml
- HTML file generation with every HTML file named "index.html" in a separate
- directory.
+.. program:: sphinx-build
-singlehtml
- HTML file generation with all content in a single HTML file.
+.. option:: -b buildername
-htmlhelp
- Generates files for CHM (compiled help files) generation.
+ The most important option: it selects a builder. The most common builders
+ are:
-qthelp
- Generates files for Qt help collection generation.
+ **html**
+ Build HTML pages. This is the default builder.
-devhelp
- Generates files for the GNOME Devhelp help viewer.
+ **dirhtml**
+ Build HTML pages, but with a single directory per document. Makes for
+ prettier URLs (no ``.html``) if served from a webserver.
-latex
- Generates LaTeX output that can be compiled to a PDF document.
+ **singlehtml**
+ Build a single HTML with the whole content.
-man
- Generates manual pages.
+ **htmlhelp**, **qthelp**, **devhelp**, **epub**
+ Build HTML files with additional information for building a documentation
+ collection in one of these formats.
-texinfo
- Generates Texinfo output that can be processed by :program:`makeinfo` to
- generate an Info document.
+ **applehelp**
+ Build an Apple Help Book. Requires :program:`hiutil` and
+ :program:`codesign`, which are not Open Source and presently only
+ available on Mac OS X 10.6 and higher.
-epub
- Generates an ePub e-book version of the HTML output.
+ **latex**
+ Build LaTeX sources that can be compiled to a PDF document using
+ :program:`pdflatex`.
-text
- Generates a plain-text version of the documentation.
+ **man**
+ Build manual pages in groff format for UNIX systems.
-gettext
- Generates Gettext message catalogs for content translation.
+ **texinfo**
+ Build Texinfo files that can be processed into Info files using
+ :program:`makeinfo`.
-changes
- Generates HTML files listing changed/added/deprecated items for
- the current version of the documented project.
+ **text**
+ Build plain text files.
-linkcheck
- Checks the integrity of all external links in the source.
+ **gettext**
+ Build gettext-style message catalogs (``.pot`` files).
-pickle / json
- Generates serialized HTML files for use in web applications.
+ **doctest**
+ Run all doctests in the documentation, if the :mod:`~sphinx.ext.doctest`
+ extension is enabled.
-xml
- Generates Docutils-native XML files.
+ **linkcheck**
+ Check the integrity of all external links.
-pseudoxml
- Generates compact pretty-printed "pseudo-XML" files displaying the
- internal structure of the intermediate document trees.
+ **xml**
+ Build Docutils-native XML files.
+ **pseudoxml**
+ Build compact pretty-printed "pseudo-XML" files displaying the
+ internal structure of the intermediate document trees.
-Options
--------
+ See :ref:`builders` for a list of all builders shipped with Sphinx.
+ Extensions can add their own builders.
--b <builder> Builder to use; defaults to html. See the full list
- of builders above.
--a Generate output for all files; without this option only
- output for new and changed files is generated.
--E Ignore cached files, forces to re-read all source files
- from disk.
--d <path> Path to cached files; defaults to <outdir>/.doctrees.
--j <N> Build in parallel with N processes where possible.
--c <path> Locate the conf.py file in the specified path instead of
- <sourcedir>.
--C Specify that no conf.py file at all is to be used.
- Configuration can only be set with the -D option.
--D <setting=value> Override a setting from the configuration file.
--t <tag> Define *tag* for use in "only" blocks.
--A <name=value> Pass a value into the HTML templates (only for HTML
- builders).
--n Run in nit-picky mode, warn about all missing references.
--v Increase verbosity (can be repeated).
--N Prevent colored output.
--q Quiet operation, just print warnings and errors on stderr.
--Q Very quiet operation, don't print anything except for
- errors.
--w <file> Write warnings and errors into the given file, in addition
- to stderr.
--W Turn warnings into errors.
--T Show full traceback on exception.
--P Run Pdb on exception.
+.. _make_mode:
+.. option:: -M buildername
-See also
---------
+ Alternative to :option:`-b`. Uses the Sphinx :program:`make_mode` module,
+ which provides the same build functionality as a default :ref:`Makefile or
+ Make.bat <makefile_options>`. In addition to all Sphinx
+ :ref:`builders <builders>`, the following build pipelines are available:
-:manpage:`sphinx-quickstart(1)`
+ **latexpdf**
+ Build LaTeX files and run them through :program:`pdflatex`, or as per
+ :confval:`latex_engine` setting.
+ If :confval:`language` is set to ``'ja'``, will use automatically
+ the :program:`platex/dvipdfmx` latex to PDF pipeline.
+
+ **info**
+ Build Texinfo files and run them through :program:`makeinfo`.
+
+ .. important::
+ Sphinx only recognizes the ``-M`` option if it is placed first.
+
+ .. versionadded:: 1.2.1
+
+.. option:: -a
+
+ If given, always write all output files. The default is to only write output
+ files for new and changed source files. (This may not apply to all
+ builders.)
+
+.. option:: -E
+
+ Don't use a saved :term:`environment` (the structure caching all
+ cross-references), but rebuild it completely. The default is to only read
+ and parse source files that are new or have changed since the last run.
+
+.. option:: -t tag
+
+ Define the tag *tag*. This is relevant for :rst:dir:`only` directives that
+ only include their content if this tag is set.
+
+ .. versionadded:: 0.6
+
+.. option:: -d path
+
+ Since Sphinx has to read and parse all source files before it can write an
+ output file, the parsed source files are cached as "doctree pickles".
+ Normally, these files are put in a directory called :file:`.doctrees` under
+ the build directory; with this option you can select a different cache
+ directory (the doctrees can be shared between all builders).
+
+.. option:: -j N
+
+ Distribute the build over *N* processes in parallel, to make building on
+ multiprocessor machines more effective. Note that not all parts and not all
+ builders of Sphinx can be parallelized.
+
+ .. versionadded:: 1.2
+ This option should be considered *experimental*.
+
+.. option:: -c path
+
+ Don't look for the :file:`conf.py` in the source directory, but use the given
+ configuration directory instead. Note that various other files and paths
+ given by configuration values are expected to be relative to the
+ configuration directory, so they will have to be present at this location
+ too.
+
+ .. versionadded:: 0.3
+
+.. option:: -C
+
+ Don't look for a configuration file; only take options via the ``-D`` option.
+
+ .. versionadded:: 0.5
+
+.. option:: -D setting=value
+
+ Override a configuration value set in the :file:`conf.py` file. The value
+ must be a number, string, list or dictionary value.
+
+ For lists, you can separate elements with a comma like this: ``-D
+ html_theme_path=path1,path2``.
+
+ For dictionary values, supply the setting name and key like this:
+ ``-D latex_elements.docclass=scrartcl``.
+
+ For boolean values, use ``0`` or ``1`` as the value.
+
+ .. versionchanged:: 0.6
+ The value can now be a dictionary value.
+
+ .. versionchanged:: 1.3
+ The value can now also be a list value.
+
+.. option:: -A name=value
+
+ Make the *name* assigned to *value* in the HTML templates.
+
+ .. versionadded:: 0.5
+
+.. option:: -n
+
+ Run in nit-picky mode. Currently, this generates warnings for all missing
+ references. See the config value :confval:`nitpick_ignore` for a way to
+ exclude some references as "known missing".
+
+.. option:: -N
-Author
-------
+ Do not emit colored output.
-Georg Brandl <georg@python.org>, Armin Ronacher <armin.ronacher@active-4.com> et
-al.
+.. option:: -v
-This manual page was initially written by Mikhail Gusarov
-<dottedmag@dottedmag.net>, for the Debian project.
+ Increase verbosity (loglevel). This option can be given up to three times
+ to get more debug logging output. It implies :option:`-T`.
+
+ .. versionadded:: 1.2
+
+.. option:: -q
+
+ Do not output anything on standard output, only write warnings and errors to
+ standard error.
+
+.. option:: -Q
+
+ Do not output anything on standard output, also suppress warnings. Only
+ errors are written to standard error.
+
+.. option:: -w file
+
+ Write warnings (and errors) to the given file, in addition to standard error.
+
+.. option:: -W
+
+ Turn warnings into errors. This means that the build stops at the first
+ warning and ``sphinx-build`` exits with exit status 1.
+
+.. option:: -T
+
+ Display the full traceback when an unhandled exception occurs. Otherwise,
+ only a summary is displayed and the traceback information is saved to a file
+ for further analysis.
+
+ .. versionadded:: 1.2
+
+.. option:: -P
+
+ (Useful for debugging only.) Run the Python debugger, :mod:`pdb`, if an
+ unhandled exception occurs while building.
+
+.. option:: -h, --help, --version
+
+ Display usage summary or Sphinx version.
+
+ .. versionadded:: 1.2
+
+You can also give one or more filenames on the command line after the source
+and build directories. Sphinx will then try to build only these output files
+(and their dependencies).
+
+Environment Variables
+---------------------
+
+The :program:`sphinx-build` refers following environment variables:
+
+.. describe:: MAKE
+
+ A path to make command. A command name is also allowed.
+ :program:`sphinx-build` uses it to invoke sub-build process on make-mode.
+
+.. _makefile_options:
+
+.. rubric:: Makefile Options
+
+The :file:`Makefile` and :file:`make.bat` files created by
+:program:`sphinx-quickstart` usually run :program:`sphinx-build` only with the
+:option:`-b` and :option:`-d` options. However, they support the following
+variables to customize behavior:
+
+.. describe:: PAPER
+
+ The value for '"papersize"` key of :confval:`latex_elements`.
+
+.. describe:: SPHINXBUILD
+
+ The command to use instead of ``sphinx-build``.
+
+.. describe:: BUILDDIR
+
+ The build directory to use instead of the one chosen in
+ :program:`sphinx-quickstart`.
+
+.. describe:: SPHINXOPTS
+
+ Additional options for :program:`sphinx-build`.
+
+.. _when-deprecation-warnings-are-displayed:
+
+Deprecation Warnings
+--------------------
+
+If any deprecation warning like ``RemovedInSphinxXXXWarning`` are displayed
+when building a user's document, some Sphinx extension is using deprecated
+features. In that case, please report it to author of the extension.
+
+To disable the deprecation warnings, please set ``PYTHONWARNINGS=`` environment
+variable to your environment. For example:
+
+* ``PYTHONWARNINGS= make html`` (Linux/Mac)
+* ``export PYTHONWARNINGS=`` and do ``make html`` (Linux/Mac)
+* ``set PYTHONWARNINGS=`` and do ``make html`` (Windows)
+* modify your Makefile/make.bat and set the environment variable
+
+See also
+--------
+
+:manpage:`sphinx-quickstart(1)`
diff --git a/doc/man/sphinx-quickstart.rst b/doc/man/sphinx-quickstart.rst
index 172772610..c4bbc531b 100644
--- a/doc/man/sphinx-quickstart.rst
+++ b/doc/man/sphinx-quickstart.rst
@@ -1,14 +1,11 @@
-:orphan:
-
-sphinx-quickstart manual page
-=============================
+sphinx-quickstart
+=================
Synopsis
--------
**sphinx-quickstart**
-
Description
-----------
@@ -16,18 +13,153 @@ Description
about your project and then generates a complete documentation directory and
sample Makefile to be used with :manpage:`sphinx-build(1)`.
+Options
+-------
-See also
---------
+.. program:: sphinx-quickstart
-:manpage:`sphinx-build(1)`
+.. option:: -q, --quiet
+
+ Quiet mode that will skips interactive wizard to specify options.
+ This option requires `-p`, `-a` and `-v` options.
+
+.. option:: -h, --help, --version
+
+ Display usage summary or Sphinx version.
+
+.. rubric:: Structure Options
+
+.. option:: --sep
+
+ If specified, separate source and build directories.
+
+.. option:: --dot=DOT
+
+ Inside the root directory, two more directories will be created;
+ "_templates" for custom HTML templates and "_static" for custom stylesheets
+ and other static files. You can enter another prefix (such as ".") to
+ replace the underscore.
+
+.. rubric:: Project Basic Options
+
+.. option:: -p PROJECT, --project=PROJECT
+
+ Project name will be set. (see :confval:`project`).
+
+.. option:: -a AUTHOR, --author=AUTHOR
+
+ Author names. (see :confval:`copyright`).
+
+.. option:: -v VERSION
+
+ Version of project. (see :confval:`version`).
+
+.. option:: -r RELEASE, --release=RELEASE
+
+ Release of project. (see :confval:`release`).
+
+.. option:: -l LANGUAGE, --language=LANGUAGE
+
+ Document language. (see :confval:`language`).
+
+.. option:: --suffix=SUFFIX
+
+ Source file suffix. (see :confval:`source_suffix`).
+
+.. option:: --master=MASTER
+
+ Master document name. (see :confval:`master_doc`).
+
+.. option:: --epub
+
+ Use epub.
+
+.. rubric:: Extension Options
+
+.. option:: --ext-autodoc
+
+ Enable `sphinx.ext.autodoc` extension.
+
+.. option:: --ext-doctest
+ Enable `sphinx.ext.doctest` extension.
-Author
-------
+.. option:: --ext-intersphinx
-Georg Brandl <georg@python.org>, Armin Ronacher <armin.ronacher@active-4.com> et
-al.
+ Enable `sphinx.ext.intersphinx` extension.
-This manual page was initially written by Mikhail Gusarov
-<dottedmag@dottedmag.net>, for the Debian project.
+.. option:: --ext-todo
+
+ Enable `sphinx.ext.todo` extension.
+
+.. option:: --ext-coverage
+
+ Enable `sphinx.ext.coverage` extension.
+
+.. option:: --ext-imgmath
+
+ Enable `sphinx.ext.imgmath` extension.
+
+.. option:: --ext-mathjax
+
+ Enable `sphinx.ext.mathjax` extension.
+
+.. option:: --ext-ifconfig
+
+ Enable `sphinx.ext.ifconfig` extension.
+
+.. option:: --ext-viewcode
+
+ Enable `sphinx.ext.viewcode` extension.
+
+.. option:: --extensions=EXTENSIONS
+
+ Enable arbitary extensions.
+
+.. rubric:: Makefile and Batchfile Creation Options
+
+.. option:: --use-make-mode, --no-use-make-mode
+
+ :file:`Makefile/make.bat` uses (or doesn't use) :ref:`make-mode <make_mode>`.
+ Default is ``use``, which generates a more concise :file:`Makefile/make.bat`.
+
+ .. versionchanged:: 1.5
+ make-mode is default.
+
+.. option:: --makefile, --no-makefile
+
+ Create (or not create) makefile.
+
+.. option:: --batchfile, --no-batchfile
+
+ Create (or not create) batchfile
+
+.. rubric:: Project templating
+
+.. versionadded:: 1.5
+ Project templating options for sphinx-quickstart
+
+.. option:: -t, --templatedir=TEMPLATEDIR
+
+ Template directory for template files. You can modify the templates of
+ sphinx project files generated by quickstart. Following Jinja2 template
+ files are allowed:
+
+ * ``master_doc.rst_t``
+ * ``conf.py_t``
+ * ``Makefile_t``
+ * ``Makefile.new_t``
+ * ``make.bat_t``
+ * ``make.bat.new_t``
+
+ In detail, please refer the system template files Sphinx provides.
+ (``sphinx/templates/quickstart``)
+
+.. option:: -d NAME=VALUE
+
+ Define a template variable
+
+See also
+--------
+
+:manpage:`sphinx-build(1)`
diff --git a/doc/markup/code.rst b/doc/markup/code.rst
index 0021af501..3b14bd6e2 100644
--- a/doc/markup/code.rst
+++ b/doc/markup/code.rst
@@ -96,7 +96,7 @@ switch on line numbers for the individual block::
Some more Ruby code.
The first line number can be selected with the ``lineno-start`` option. If
-present, ``linenos`` is automatically activated as well.
+present, ``linenos`` is automatically activated as well::
.. code-block:: ruby
:lineno-start: 10
diff --git a/doc/markup/inline.rst b/doc/markup/inline.rst
index 7ed3e7207..4d14a653d 100644
--- a/doc/markup/inline.rst
+++ b/doc/markup/inline.rst
@@ -63,7 +63,7 @@ Cross-referencing anything
by :rst:role:`doc`, :rst:role:`ref` or :rst:role:`option`.
Custom objects added to the standard domain by extensions (see
- :meth:`.add_object_type`) are also searched.
+ :meth:`Sphinx.add_object_type`) are also searched.
* Then, it looks for objects (targets) in all loaded domains. It is up to
the domains how specific a match must be. For example, in the Python
@@ -145,10 +145,15 @@ Cross-referencing arbitrary locations
The same works for tables that are given an explicit caption using the
:dudir:`table` directive.
- * Labels that aren't placed before a section title can still be referenced
- to, but you must give the link an explicit title, using this syntax:
+ * Labels that aren't placed before a section title can still be referenced,
+ but you must give the link an explicit title, using this syntax:
``:ref:`Link title <label-name>```.
+ .. note::
+
+ Reference labels must start with an underscore. When referencing a
+ label, the underscore must be omitted (see examples above).
+
Using :rst:role:`ref` is advised over standard reStructuredText links to
sections (like ```Section title`_``) because it works across files, when
section headings are changed, and for all builders that support
diff --git a/doc/setuptools.rst b/doc/setuptools.rst
index dab25fc59..8d759f985 100644
--- a/doc/setuptools.rst
+++ b/doc/setuptools.rst
@@ -32,15 +32,23 @@ For instance, from ``setup.py``::
'build_sphinx': {
'project': ('setup.py', name),
'version': ('setup.py', version),
- 'release': ('setup.py', release)}},
+ 'release': ('setup.py', release),
+ 'source_dir': ('setup.py', 'doc')}},
)
+.. note::
+
+ If you set Sphinx options directly in the ``setup()`` command, replace
+ hyphens in variable names with underscores. In the example above,
+ ``source-dir`` becomes ``source_dir``.
+
Or add this section in ``setup.cfg``::
[build_sphinx]
project = 'My project'
version = 1.2
release = 1.2.0
+ source-dir = 'doc'
Once configured, call this by calling the relevant command on ``setup.py``::
@@ -74,7 +82,9 @@ Options for setuptools integration
.. confval:: source-dir
The target source directory. This can be relative to the ``setup.py`` or
- ``setup.cfg`` file, or it can be absolute. Default is ``''``.
+ ``setup.cfg`` file, or it can be absolute. It defaults to ``./doc`` or
+ ``./docs`` if either contains a file named ``conf.py`` (checking ``./doc``
+ first); otherwise it defaults to the current directory.
This can also be set by passing the `-s` flag to ``setup.py``:
@@ -85,13 +95,13 @@ Options for setuptools integration
.. confval:: build-dir
The target build directory. This can be relative to the ``setup.py`` or
- ``setup.cfg`` file, or it can be absolute. Default is ``''``.
+ ``setup.cfg`` file, or it can be absolute. Default is ``./build/sphinx``.
.. confval:: config-dir
Location of the configuration directory. This can be relative to the
- ``setup.py`` or ``setup.cfg`` file, or it can be absolute. Default is
- ``''``.
+ ``setup.py`` or ``setup.cfg`` file, or it can be absolute. Default is to use
+ `source-dir`.
This can also be set by passing the `-c` flag to ``setup.py``:
diff --git a/doc/theming.rst b/doc/theming.rst
index c8cad2ba2..34bca9607 100644
--- a/doc/theming.rst
+++ b/doc/theming.rst
@@ -171,9 +171,15 @@ These themes are:
- **bodyfont** (CSS font-family): Font for normal text.
- **headfont** (CSS font-family): Font for headings.
-* **sphinxdoc** -- The theme used for this documentation. It features a sidebar
- on the right side. There are currently no options beyond *nosidebar* and
- *sidebarwidth*.
+* **sphinxdoc** -- The theme originally used by this documentation. It features
+ a sidebar on the right side. There are currently no options beyond
+ *nosidebar* and *sidebarwidth*.
+
+ .. note::
+
+ The Sphinx documentation now uses
+ `an adjusted version of the sphinxdoc theme
+ <https://github.com/sphinx-doc/sphinx/tree/master/doc/_themes/sphinx13>`_.
* **scrolls** -- A more lightweight theme, based on `the Jinja documentation
<http://jinja.pocoo.org/>`_. The following color options are available:
@@ -270,6 +276,7 @@ Python :mod:`ConfigParser` module) and has the following structure:
inherit = base theme
stylesheet = main CSS name
pygments_style = stylename
+ sidebars = localtoc.html, relations.html, sourcelink.html, searchbox.html
[options]
variable = default value
@@ -289,10 +296,16 @@ Python :mod:`ConfigParser` module) and has the following structure:
highlighting. This can be overridden by the user in the
:confval:`pygments_style` config value.
+* The **sidebars** setting gives the comma separated list of sidebar templates
+ for constructing sidebars. This can be overridden by the user in the
+ :confval:`html_sidebars` config value.
+
* The **options** section contains pairs of variable names and default values.
These options can be overridden by the user in :confval:`html_theme_options`
and are accessible from all templates as ``theme_<name>``.
+.. versionadded:: 1.7
+ sidebar settings
.. _distribute-your-theme:
diff --git a/doc/tutorial.rst b/doc/tutorial.rst
index 4dcee9e1d..243d44d03 100644
--- a/doc/tutorial.rst
+++ b/doc/tutorial.rst
@@ -36,7 +36,7 @@ configuration values from a few questions it asks you. Just run ::
and answer its questions. (Be sure to say yes to the "autodoc" extension.)
There is also an automatic "API documentation" generator called
-:program:`sphinx-apidoc`; see :ref:`invocation-apidoc` for details.
+:program:`sphinx-apidoc`; see :doc:`/man/sphinx-apidoc` for details.
Defining document structure
@@ -126,8 +126,8 @@ directory in which you want to place the built documentation.
The :option:`-b <sphinx-build -b>` option selects a builder; in this example
Sphinx will build HTML files.
-|more| See :ref:`invocation` for all options that :program:`sphinx-build`
-supports.
+|more| Refer to the :program:`sphinx-build man page <sphinx-build>` for all
+options that :program:`sphinx-build` supports.
However, :program:`sphinx-quickstart` script creates a :file:`Makefile` and a
:file:`make.bat` which make life even easier for you: with them you only need
@@ -317,8 +317,8 @@ More topics to be covered
.. rubric:: Footnotes
.. [#] This is the usual layout. However, :file:`conf.py` can also live in
- another directory, the :term:`configuration directory`. See
- :ref:`invocation`.
+ another directory, the :term:`configuration directory`. Refer to the
+ :program:`sphinx-build man page <sphinx-build>` for more information.
.. |more| image:: more.png
:align: middle
diff --git a/mypy.ini b/mypy.ini
deleted file mode 100644
index ec12ea587..000000000
--- a/mypy.ini
+++ /dev/null
@@ -1,7 +0,0 @@
-[mypy]
-python_version = 2.7
-ignore_missing_imports = True
-follow_imports = skip
-incremental = True
-check_untyped_defs = True
-warn_unused_ignores = True
diff --git a/setup.cfg b/setup.cfg
index a5433c6b0..0ce6282dc 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -1,11 +1,20 @@
+[metadata]
+license_file = LICENSE
+
[egg_info]
tag_build = .dev
tag_date = true
+[bdist_wheel]
+universal = 1
+
[aliases]
release = egg_info -Db ''
upload = upload --sign --identity=36580288
+[build_sphinx]
+warning-is-error = 1
+
[extract_messages]
mapping_file = babel.cfg
output_file = sphinx/locale/sphinx.pot
@@ -20,13 +29,31 @@ output_dir = sphinx/locale/
domain = sphinx
directory = sphinx/locale/
-[wheel]
-universal = 1
-
[flake8]
max-line-length = 95
ignore = E116,E241,E251,E741
-exclude = .git,.tox,tests/*,build/*,sphinx/search/*,sphinx/pycode/pgen2/*,doc/ext/example*.py
+exclude = .git,.tox,.venv,tests/*,build/*,doc/_build/*,sphinx/search/*,sphinx/pycode/pgen2/*,doc/ext/example*.py
-[build_sphinx]
-warning-is-error = 1
+[mypy]
+python_version = 2.7
+show_column_numbers = True
+show_error_context = True
+ignore_missing_imports = True
+follow_imports = skip
+incremental = True
+check_untyped_defs = True
+warn_unused_ignores = True
+
+[coverage:run]
+branch = True
+source = sphinx
+
+[coverage:report]
+exclude_lines =
+ # Have to re-enable the standard pragma
+ pragma: no cover
+ # Don't complain if tests don't hit defensive assertion code:
+ raise NotImplementedError
+ # Don't complain if non-runnable code isn't run:
+ if __name__ == .__main__.:
+ignore_errors = True
diff --git a/setup.py b/setup.py
index e5453f645..6b7de9129 100644
--- a/setup.py
+++ b/setup.py
@@ -8,34 +8,8 @@ from distutils.cmd import Command
import sphinx
-long_desc = '''
-Sphinx is a tool that makes it easy to create intelligent and beautiful
-documentation for Python projects (or other documents consisting of multiple
-reStructuredText sources), written by Georg Brandl. It was originally created
-for the new Python documentation, and has excellent facilities for Python
-project documentation, but C/C++ is supported as well, and more languages are
-planned.
-
-Sphinx uses reStructuredText as its markup language, and many of its strengths
-come from the power and straightforwardness of reStructuredText and its parsing
-and translating suite, the Docutils.
-
-Among its features are the following:
-
-* Output formats: HTML (including derivative formats such as HTML Help, Epub
- and Qt Help), plain text, manual pages and LaTeX or direct PDF output
- using rst2pdf
-* Extensive cross-references: semantic markup and automatic links
- for functions, classes, glossary terms and similar pieces of information
-* Hierarchical structure: easy definition of a document tree, with automatic
- links to siblings, parents and children
-* Automatic indices: general index as well as a module index
-* Code handling: automatic highlighting using the Pygments highlighter
-* Flexible HTML output using the Jinja 2 templating engine
-* Various extensions are available, e.g. for automatic testing of snippets
- and inclusion of appropriately formatted docstrings
-* Setuptools integration
-'''
+with open('README.rst') as f:
+ long_desc = f.read()
if sys.version_info < (2, 7) or (3, 0) <= sys.version_info < (3, 4):
print('ERROR: Sphinx requires at least Python 2.7 or 3.4 to run.')
@@ -68,20 +42,21 @@ extras_require = {
'whoosh>=2.0',
],
'test': [
+ 'mock',
'pytest',
- 'mock', # it would be better for 'test:python_version in 2.7'
- 'simplejson', # better: 'test:platform_python_implementation=="PyPy"'
+ 'pytest-cov',
'html5lib',
+ 'flake8',
+ ],
+ 'test:python_version<"3"': [
+ 'enum34',
+ ],
+ 'test:python_version>="3"': [
+ 'mypy',
+ 'typed_ast',
],
}
-# for sdist installation with pip-1.5.6
-if sys.platform == 'win32':
- requires.append('colorama>=0.3.5')
-
-if sys.version_info < (3, 5):
- requires.append('typing')
-
# Provide a "compile_catalog" command that also creates the translated
# JavaScript files if Babel is available.
@@ -90,10 +65,7 @@ cmdclass = {}
try:
from babel.messages.pofile import read_po
from babel.messages.frontend import compile_catalog
- try:
- from simplejson import dump
- except ImportError:
- from json import dump
+ from json import dump
except ImportError:
pass
else:
@@ -225,7 +197,13 @@ setup(
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
+ 'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
+ 'Programming Language :: Python :: 3.4',
+ 'Programming Language :: Python :: 3.5',
+ 'Programming Language :: Python :: 3.6',
+ 'Programming Language :: Python :: Implementation :: CPython',
+ 'Programming Language :: Python :: Implementation :: PyPy',
'Framework :: Sphinx',
'Framework :: Sphinx :: Extension',
'Framework :: Sphinx :: Theme',
@@ -239,14 +217,21 @@ setup(
include_package_data=True,
entry_points={
'console_scripts': [
- 'sphinx-build = sphinx:main',
- 'sphinx-quickstart = sphinx.quickstart:main',
- 'sphinx-apidoc = sphinx.apidoc:main',
+ 'sphinx-build = sphinx.cmd.build:main',
+ 'sphinx-quickstart = sphinx.cmd.quickstart:main',
+ 'sphinx-apidoc = sphinx.ext.apidoc:main',
'sphinx-autogen = sphinx.ext.autosummary.generate:main',
],
'distutils.commands': [
'build_sphinx = sphinx.setup_command:BuildDoc',
],
+ # consider moving this to 'flake8:local-plugins' once flake8 3.5.0 is
+ # in the wild:
+ # http://flake8.pycqa.org/en/latest/user/configuration.html\
+ # #using-local-plugins
+ 'flake8.extension': [
+ 'X101 = utils.checks:sphinx_has_header',
+ ],
},
install_requires=requires,
extras_require=extras_require,
diff --git a/sphinx-apidoc.py b/sphinx-apidoc.py
deleted file mode 100755
index 8279feaaa..000000000
--- a/sphinx-apidoc.py
+++ /dev/null
@@ -1,15 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-"""
- Sphinx - Python documentation toolchain
- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
- :copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
- :license: BSD, see LICENSE for details.
-"""
-
-import sys
-
-if __name__ == '__main__':
- from sphinx.apidoc import main
- sys.exit(main(sys.argv))
diff --git a/sphinx-autogen.py b/sphinx-autogen.py
deleted file mode 100755
index 6c10f0e64..000000000
--- a/sphinx-autogen.py
+++ /dev/null
@@ -1,15 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-"""
- Sphinx - Python documentation toolchain
- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
- :copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
- :license: BSD, see LICENSE for details.
-"""
-
-import sys
-
-if __name__ == '__main__':
- from sphinx.ext.autosummary.generate import main
- sys.exit(main(sys.argv))
diff --git a/sphinx-build.py b/sphinx-build.py
deleted file mode 100755
index 1b8d14082..000000000
--- a/sphinx-build.py
+++ /dev/null
@@ -1,15 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-"""
- Sphinx - Python documentation toolchain
- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
- :copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
- :license: BSD, see LICENSE for details.
-"""
-
-import sys
-
-if __name__ == '__main__':
- from sphinx import main
- sys.exit(main(sys.argv))
diff --git a/sphinx-quickstart.py b/sphinx-quickstart.py
deleted file mode 100755
index 81d6b6696..000000000
--- a/sphinx-quickstart.py
+++ /dev/null
@@ -1,15 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-"""
- Sphinx - Python documentation toolchain
- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
- :copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
- :license: BSD, see LICENSE for details.
-"""
-
-import sys
-
-if __name__ == '__main__':
- from sphinx.quickstart import main
- sys.exit(main(sys.argv))
diff --git a/sphinx/__init__.py b/sphinx/__init__.py
index b8a62f9e8..c84e2672f 100644
--- a/sphinx/__init__.py
+++ b/sphinx/__init__.py
@@ -15,15 +15,12 @@
from __future__ import absolute_import
import os
-import sys
import warnings
from os import path
+from .cmd import build
from .deprecation import RemovedInNextVersionWarning
-
-if False:
- # For type annotation
- from typing import List # NOQA
+from .deprecation import RemovedInSphinx20Warning
# by default, all DeprecationWarning under sphinx package will be emit.
# Users can avoid this by using environment variable: PYTHONWARNINGS=
@@ -34,13 +31,13 @@ if 'PYTHONWARNINGS' not in os.environ:
warnings.filterwarnings('ignore', "'U' mode is deprecated",
DeprecationWarning, module='docutils.io')
-__version__ = '1.6.6+'
-__released__ = '1.6.6' # used when Sphinx builds its own docs
+__version__ = '1.7+'
+__released__ = '1.7' # used when Sphinx builds its own docs
# version info for better programmatic use
# possible values for 3rd element: 'alpha', 'beta', 'rc', 'final'
# 'final' has 0 as the last element
-version_info = (1, 6, 6, 'beta', 0)
+version_info = (1, 7, 0, 'beta', 0)
package_dir = path.abspath(path.dirname(__file__))
@@ -63,62 +60,19 @@ if __version__.endswith('+'):
pass
-def main(argv=sys.argv):
- # type: (List[str]) -> int
- if sys.argv[1:2] == ['-M']:
- return make_main(argv)
- else:
- return build_main(argv)
-
-
-def build_main(argv=sys.argv):
- # type: (List[str]) -> int
- """Sphinx build "main" command-line entry."""
- if (sys.version_info[:3] < (2, 7, 0) or
- (3, 0, 0) <= sys.version_info[:3] < (3, 4, 0)):
- sys.stderr.write('Error: Sphinx requires at least Python 2.7 or 3.4 to run.\n')
- return 1
- try:
- from sphinx import cmdline
- except ImportError:
- err = sys.exc_info()[1]
- errstr = str(err)
- if errstr.lower().startswith('no module named'):
- whichmod = errstr[16:]
- hint = ''
- if whichmod.startswith('docutils'):
- whichmod = 'Docutils library'
- elif whichmod.startswith('jinja'):
- whichmod = 'Jinja2 library'
- elif whichmod == 'roman':
- whichmod = 'roman module (which is distributed with Docutils)'
- hint = ('This can happen if you upgraded docutils using\n'
- 'easy_install without uninstalling the old version'
- 'first.\n')
- else:
- whichmod += ' module'
- sys.stderr.write('Error: The %s cannot be found. '
- 'Did you install Sphinx and its dependencies '
- 'correctly?\n' % whichmod)
- if hint:
- sys.stderr.write(hint)
- return 1
- raise
-
- import sphinx.util.docutils
- if sphinx.util.docutils.__version_info__ < (0, 10):
- sys.stderr.write('Error: Sphinx requires at least Docutils 0.10 to '
- 'run.\n')
- return 1
- return cmdline.main(argv) # type: ignore
-
-
-def make_main(argv=sys.argv):
- # type: (List[str]) -> int
- """Sphinx build "make mode" entry."""
- from sphinx import make_mode
- return make_mode.run_make_mode(argv[2:]) # type: ignore
+def main(*args, **kwargs):
+ warnings.warn(
+ '`sphinx.main()` has moved to `sphinx.cmd.build.main()`.',
+ RemovedInSphinx20Warning,
+ stacklevel=2,
+ )
+ return build.main(*args, **kwargs)
if __name__ == '__main__':
- sys.exit(main(sys.argv))
+ warnings.warn(
+ '`sphinx` has moved to `sphinx.build`.',
+ RemovedInSphinx20Warning,
+ stacklevel=2,
+ )
+ build.main()
diff --git a/sphinx/__main__.py b/sphinx/__main__.py
index 06b1812f2..fbac1c4f7 100644
--- a/sphinx/__main__.py
+++ b/sphinx/__main__.py
@@ -11,4 +11,4 @@
import sys
from sphinx import main
-sys.exit(main(sys.argv))
+sys.exit(main(sys.argv[1:]))
diff --git a/sphinx/apidoc.py b/sphinx/apidoc.py
index 8f9694cdd..4e20fb7e4 100644
--- a/sphinx/apidoc.py
+++ b/sphinx/apidoc.py
@@ -3,435 +3,32 @@
sphinx.apidoc
~~~~~~~~~~~~~
- Parses a directory tree looking for Python modules and packages and creates
- ReST files appropriately to create code documentation with Sphinx. It also
- creates a modules index (named modules.<suffix>).
-
- This is derived from the "sphinx-autopackage" script, which is:
- Copyright 2008 Société des arts technologiques (SAT),
- http://www.sat.qc.ca/
+ This file has moved to :py:mod:`sphinx.ext.apidoc`.
:copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
-from __future__ import print_function
-
-import os
-import sys
-import optparse
-from os import path
-from six import binary_type
-from fnmatch import fnmatch
-
-from sphinx import __display_version__
-from sphinx.quickstart import EXTENSIONS
-from sphinx.util import rst
-from sphinx.util.osutil import FileAvoidWrite, ensuredir, walk
-
-if False:
- # For type annotation
- from typing import Any, List, Tuple # NOQA
-
-# automodule options
-if 'SPHINX_APIDOC_OPTIONS' in os.environ:
- OPTIONS = os.environ['SPHINX_APIDOC_OPTIONS'].split(',')
-else:
- OPTIONS = [
- 'members',
- 'undoc-members',
- # 'inherited-members', # disabled because there's a bug in sphinx
- 'show-inheritance',
- ]
-
-INITPY = '__init__.py'
-PY_SUFFIXES = set(['.py', '.pyx'])
-
-
-def makename(package, module):
- # type: (unicode, unicode) -> unicode
- """Join package and module with a dot."""
- # Both package and module can be None/empty.
- if package:
- name = package
- if module:
- name += '.' + module
- else:
- name = module
- return name
-
-
-def write_file(name, text, opts):
- # type: (unicode, unicode, Any) -> None
- """Write the output file for module/package <name>."""
- fname = path.join(opts.destdir, '%s.%s' % (name, opts.suffix))
- if opts.dryrun:
- print('Would create file %s.' % fname)
- return
- if not opts.force and path.isfile(fname):
- print('File %s already exists, skipping.' % fname)
- else:
- print('Creating file %s.' % fname)
- with FileAvoidWrite(fname) as f:
- f.write(text)
-
-
-def format_heading(level, text, escape=True):
- # type: (int, unicode, bool) -> unicode
- """Create a heading of <level> [1, 2 or 3 supported]."""
- if escape:
- text = rst.escape(text)
- underlining = ['=', '-', '~', ][level - 1] * len(text)
- return '%s\n%s\n\n' % (text, underlining)
-
-
-def format_directive(module, package=None):
- # type: (unicode, unicode) -> unicode
- """Create the automodule directive and add the options."""
- directive = '.. automodule:: %s\n' % makename(package, module)
- for option in OPTIONS:
- directive += ' :%s:\n' % option
- return directive
-
-
-def create_module_file(package, module, opts):
- # type: (unicode, unicode, Any) -> None
- """Build the text of the file and write the file."""
- if not opts.noheadings:
- text = format_heading(1, '%s module' % module)
- else:
- text = ''
- # text += format_heading(2, ':mod:`%s` Module' % module)
- text += format_directive(module, package)
- write_file(makename(package, module), text, opts)
-
-
-def create_package_file(root, master_package, subroot, py_files, opts, subs, is_namespace):
- # type: (unicode, unicode, unicode, List[unicode], Any, List[unicode], bool) -> None
- """Build the text of the file and write the file."""
- text = format_heading(1, ('%s package' if not is_namespace else "%s namespace")
- % makename(master_package, subroot))
-
- if opts.modulefirst and not is_namespace:
- text += format_directive(subroot, master_package)
- text += '\n'
-
- # build a list of directories that are szvpackages (contain an INITPY file)
- subs = [sub for sub in subs if path.isfile(path.join(root, sub, INITPY))]
- # if there are some package directories, add a TOC for theses subpackages
- if subs:
- text += format_heading(2, 'Subpackages')
- text += '.. toctree::\n\n'
- for sub in subs:
- text += ' %s.%s\n' % (makename(master_package, subroot), sub)
- text += '\n'
-
- submods = [path.splitext(sub)[0] for sub in py_files
- if not shall_skip(path.join(root, sub), opts) and
- sub != INITPY]
- if submods:
- text += format_heading(2, 'Submodules')
- if opts.separatemodules:
- text += '.. toctree::\n\n'
- for submod in submods:
- modfile = makename(master_package, makename(subroot, submod))
- text += ' %s\n' % modfile
-
- # generate separate file for this module
- if not opts.noheadings:
- filetext = format_heading(1, '%s module' % modfile)
- else:
- filetext = ''
- filetext += format_directive(makename(subroot, submod),
- master_package)
- write_file(modfile, filetext, opts)
- else:
- for submod in submods:
- modfile = makename(master_package, makename(subroot, submod))
- if not opts.noheadings:
- text += format_heading(2, '%s module' % modfile)
- text += format_directive(makename(subroot, submod),
- master_package)
- text += '\n'
- text += '\n'
-
- if not opts.modulefirst and not is_namespace:
- text += format_heading(2, 'Module contents')
- text += format_directive(subroot, master_package)
-
- write_file(makename(master_package, subroot), text, opts)
-
-
-def create_modules_toc_file(modules, opts, name='modules'):
- # type: (List[unicode], Any, unicode) -> None
- """Create the module's index."""
- text = format_heading(1, '%s' % opts.header, escape=False)
- text += '.. toctree::\n'
- text += ' :maxdepth: %s\n\n' % opts.maxdepth
-
- modules.sort()
- prev_module = '' # type: unicode
- for module in modules:
- # look if the module is a subpackage and, if yes, ignore it
- if module.startswith(prev_module + '.'):
- continue
- prev_module = module
- text += ' %s\n' % module
-
- write_file(name, text, opts)
-
-
-def shall_skip(module, opts):
- # type: (unicode, Any) -> bool
- """Check if we want to skip this module."""
- # skip if the file doesn't exist and not using implicit namespaces
- if not opts.implicit_namespaces and not path.exists(module):
- return True
-
- # skip it if there is nothing (or just \n or \r\n) in the file
- if path.exists(module) and path.getsize(module) <= 2:
- return True
-
- # skip if it has a "private" name and this is selected
- filename = path.basename(module)
- if filename != '__init__.py' and filename.startswith('_') and \
- not opts.includeprivate:
- return True
- return False
-
-
-def recurse_tree(rootpath, excludes, opts):
- # type: (unicode, List[unicode], Any) -> List[unicode]
- """
- Look for every file in the directory tree and create the corresponding
- ReST files.
- """
- # check if the base directory is a package and get its name
- if INITPY in os.listdir(rootpath):
- root_package = rootpath.split(path.sep)[-1]
- else:
- # otherwise, the base is a directory with packages
- root_package = None
-
- toplevels = []
- followlinks = getattr(opts, 'followlinks', False)
- includeprivate = getattr(opts, 'includeprivate', False)
- implicit_namespaces = getattr(opts, 'implicit_namespaces', False)
- for root, subs, files in walk(rootpath, followlinks=followlinks):
- # document only Python module files (that aren't excluded)
- py_files = sorted(f for f in files
- if path.splitext(f)[1] in PY_SUFFIXES and
- not is_excluded(path.join(root, f), excludes))
- is_pkg = INITPY in py_files
- is_namespace = INITPY not in py_files and implicit_namespaces
- if is_pkg:
- py_files.remove(INITPY)
- py_files.insert(0, INITPY)
- elif root != rootpath:
- # only accept non-package at toplevel unless using implicit namespaces
- if not implicit_namespaces:
- del subs[:]
- continue
- # remove hidden ('.') and private ('_') directories, as well as
- # excluded dirs
- if includeprivate:
- exclude_prefixes = ('.',) # type: Tuple[unicode, ...]
- else:
- exclude_prefixes = ('.', '_')
- subs[:] = sorted(sub for sub in subs if not sub.startswith(exclude_prefixes) and
- not is_excluded(path.join(root, sub), excludes))
-
- if is_pkg or is_namespace:
- # we are in a package with something to document
- if subs or len(py_files) > 1 or not shall_skip(path.join(root, INITPY), opts):
- subpackage = root[len(rootpath):].lstrip(path.sep).\
- replace(path.sep, '.')
- # if this is not a namespace or
- # a namespace and there is something there to document
- if not is_namespace or len(py_files) > 0:
- create_package_file(root, root_package, subpackage,
- py_files, opts, subs, is_namespace)
- toplevels.append(makename(root_package, subpackage))
- else:
- # if we are at the root level, we don't require it to be a package
- assert root == rootpath and root_package is None
- for py_file in py_files:
- if not shall_skip(path.join(rootpath, py_file), opts):
- module = path.splitext(py_file)[0]
- create_module_file(root_package, module, opts)
- toplevels.append(module)
-
- return toplevels
-
-
-def normalize_excludes(rootpath, excludes):
- # type: (unicode, List[unicode]) -> List[unicode]
- """Normalize the excluded directory list."""
- return [path.abspath(exclude) for exclude in excludes]
-
-
-def is_excluded(root, excludes):
- # type: (unicode, List[unicode]) -> bool
- """Check if the directory is in the exclude list.
-
- Note: by having trailing slashes, we avoid common prefix issues, like
- e.g. an exlude "foo" also accidentally excluding "foobar".
- """
- for exclude in excludes:
- if fnmatch(root, exclude):
- return True
- return False
-
-
-def main(argv=sys.argv):
- # type: (List[str]) -> int
- """Parse and check the command line arguments."""
- parser = optparse.OptionParser(
- usage="""\
-usage: %prog [options] -o <output_path> <module_path> [exclude_pattern, ...]
-
-Look recursively in <module_path> for Python modules and packages and create
-one reST file with automodule directives per package in the <output_path>.
-
-The <exclude_pattern>s can be file and/or directory patterns that will be
-excluded from generation.
-
-Note: By default this script will not overwrite already created files.""")
-
- parser.add_option('-o', '--output-dir', action='store', dest='destdir',
- help='Directory to place all output', default='')
- parser.add_option('-d', '--maxdepth', action='store', dest='maxdepth',
- help='Maximum depth of submodules to show in the TOC '
- '(default: 4)', type='int', default=4)
- parser.add_option('-f', '--force', action='store_true', dest='force',
- help='Overwrite existing files')
- parser.add_option('-l', '--follow-links', action='store_true',
- dest='followlinks', default=False,
- help='Follow symbolic links. Powerful when combined '
- 'with collective.recipe.omelette.')
- parser.add_option('-n', '--dry-run', action='store_true', dest='dryrun',
- help='Run the script without creating files')
- parser.add_option('-e', '--separate', action='store_true',
- dest='separatemodules',
- help='Put documentation for each module on its own page')
- parser.add_option('-P', '--private', action='store_true',
- dest='includeprivate',
- help='Include "_private" modules')
- parser.add_option('-T', '--no-toc', action='store_true', dest='notoc',
- help='Don\'t create a table of contents file')
- parser.add_option('-E', '--no-headings', action='store_true',
- dest='noheadings',
- help='Don\'t create headings for the module/package '
- 'packages (e.g. when the docstrings already contain '
- 'them)')
- parser.add_option('-M', '--module-first', action='store_true',
- dest='modulefirst',
- help='Put module documentation before submodule '
- 'documentation')
- parser.add_option('--implicit-namespaces', action='store_true',
- dest='implicit_namespaces',
- help='Interpret module paths according to PEP-0420 '
- 'implicit namespaces specification')
- parser.add_option('-s', '--suffix', action='store', dest='suffix',
- help='file suffix (default: rst)', default='rst')
- parser.add_option('-F', '--full', action='store_true', dest='full',
- help='Generate a full project with sphinx-quickstart')
- parser.add_option('-a', '--append-syspath', action='store_true',
- dest='append_syspath',
- help='Append module_path to sys.path, used when --full is given')
- parser.add_option('-H', '--doc-project', action='store', dest='header',
- help='Project name (default: root module name)')
- parser.add_option('-A', '--doc-author', action='store', dest='author',
- type='str',
- help='Project author(s), used when --full is given')
- parser.add_option('-V', '--doc-version', action='store', dest='version',
- help='Project version, used when --full is given')
- parser.add_option('-R', '--doc-release', action='store', dest='release',
- help='Project release, used when --full is given, '
- 'defaults to --doc-version')
- parser.add_option('--version', action='store_true', dest='show_version',
- help='Show version information and exit')
- group = parser.add_option_group('Extension options')
- for ext in EXTENSIONS:
- group.add_option('--ext-' + ext, action='store_true',
- dest='ext_' + ext, default=False,
- help='enable %s extension' % ext)
-
- (opts, args) = parser.parse_args(argv[1:])
-
- if opts.show_version:
- print('Sphinx (sphinx-apidoc) %s' % __display_version__)
- return 0
- if not args:
- parser.error('A package path is required.')
+import warnings
- rootpath, excludes = args[0], args[1:]
- if not opts.destdir:
- parser.error('An output directory is required.')
- if opts.header is None:
- opts.header = path.abspath(rootpath).split(path.sep)[-1]
- if opts.suffix.startswith('.'):
- opts.suffix = opts.suffix[1:]
- if not path.isdir(rootpath):
- print('%s is not a directory.' % rootpath, file=sys.stderr)
- sys.exit(1)
- if not opts.dryrun:
- ensuredir(opts.destdir)
- rootpath = path.abspath(rootpath)
- excludes = normalize_excludes(rootpath, excludes)
- modules = recurse_tree(rootpath, excludes, opts)
- if opts.full:
- from sphinx import quickstart as qs
- modules.sort()
- prev_module = '' # type: unicode
- text = ''
- for module in modules:
- if module.startswith(prev_module + '.'):
- continue
- prev_module = module
- text += ' %s\n' % module
- d = dict(
- path = opts.destdir,
- sep = False,
- dot = '_',
- project = opts.header,
- author = opts.author or 'Author',
- version = opts.version or '',
- release = opts.release or opts.version or '',
- suffix = '.' + opts.suffix,
- master = 'index',
- epub = True,
- ext_autodoc = True,
- ext_viewcode = True,
- ext_todo = True,
- makefile = True,
- batchfile = True,
- mastertocmaxdepth = opts.maxdepth,
- mastertoctree = text,
- language = 'en',
- module_path = rootpath,
- append_syspath = opts.append_syspath,
- )
- enabled_exts = {'ext_' + ext: getattr(opts, 'ext_' + ext)
- for ext in EXTENSIONS if getattr(opts, 'ext_' + ext)}
- d.update(enabled_exts)
+from sphinx.deprecation import RemovedInSphinx20Warning
+from sphinx.ext.apidoc import main as _main
- if isinstance(opts.header, binary_type):
- d['project'] = d['project'].decode('utf-8')
- if isinstance(opts.author, binary_type):
- d['author'] = d['author'].decode('utf-8')
- if isinstance(opts.version, binary_type):
- d['version'] = d['version'].decode('utf-8')
- if isinstance(opts.release, binary_type):
- d['release'] = d['release'].decode('utf-8')
- if not opts.dryrun:
- qs.generate(d, silent=True, overwrite=opts.force)
- elif not opts.notoc:
- create_modules_toc_file(modules, opts)
- return 0
+def main(*args, **kwargs):
+ warnings.warn(
+ '`sphinx.apidoc.main()` has moved to `sphinx.ext.apidoc.main()`.',
+ RemovedInSphinx20Warning,
+ stacklevel=2,
+ )
+ _main(*args, **kwargs)
# So program can be started with "python -m sphinx.apidoc ..."
if __name__ == "__main__":
+ warnings.warn(
+ '`sphinx.apidoc` has moved to `sphinx.ext.apidoc`.',
+ RemovedInSphinx20Warning,
+ stacklevel=2,
+ )
main()
diff --git a/sphinx/application.py b/sphinx/application.py
index d23d97fbd..9195f11af 100644
--- a/sphinx/application.py
+++ b/sphinx/application.py
@@ -19,7 +19,7 @@ import posixpath
from os import path
from collections import deque
-from six import iteritems
+from six import iteritems, itervalues
from six.moves import cStringIO
from docutils import nodes
@@ -29,20 +29,18 @@ import sphinx
from sphinx import package_dir, locale
from sphinx.config import Config
from sphinx.errors import ConfigError, ExtensionError, VersionRequirementError
-from sphinx.deprecation import RemovedInSphinx17Warning, RemovedInSphinx20Warning
+from sphinx.deprecation import RemovedInSphinx20Warning
from sphinx.environment import BuildEnvironment
from sphinx.events import EventManager
from sphinx.extension import verify_required_extensions
-from sphinx.io import SphinxStandaloneReader
from sphinx.locale import __
from sphinx.registry import SphinxComponentRegistry
from sphinx.util import pycompat # noqa: F401
from sphinx.util import import_object
from sphinx.util import logging
-from sphinx.util import status_iterator, old_status_iterator, display_chunk
from sphinx.util.tags import Tags
from sphinx.util.osutil import ENOENT, ensuredir
-from sphinx.util.console import bold, darkgreen # type: ignore
+from sphinx.util.console import bold # type: ignore
from sphinx.util.docutils import is_html5_writer_available, directive_helper
from sphinx.util.i18n import find_catalog_source_files
@@ -55,12 +53,13 @@ if False:
from sphinx.domains import Domain, Index # NOQA
from sphinx.environment.collectors import EnvironmentCollector # NOQA
from sphinx.extension import Extension # NOQA
+ from sphinx.roles import XRefRole # NOQA
from sphinx.theming import Theme # NOQA
+ from sphinx.util.typing import RoleFunction # NOQA
builtin_extensions = (
'sphinx.builders.applehelp',
'sphinx.builders.changes',
- 'sphinx.builders.epub2',
'sphinx.builders.epub3',
'sphinx.builders.devhelp',
'sphinx.builders.dummy',
@@ -85,6 +84,7 @@ builtin_extensions = (
'sphinx.directives.code',
'sphinx.directives.other',
'sphinx.directives.patches',
+ 'sphinx.io',
'sphinx.parsers',
'sphinx.roles',
'sphinx.transforms.post_transforms',
@@ -121,7 +121,6 @@ class Sphinx(object):
self.env = None # type: BuildEnvironment
self.registry = SphinxComponentRegistry()
self.enumerable_nodes = {} # type: Dict[nodes.Node, Tuple[unicode, Callable]] # NOQA
- self.post_transforms = [] # type: List[Transform]
self.html_themes = {} # type: Dict[unicode, unicode]
self.srcdir = srcdir
@@ -316,13 +315,6 @@ class Sphinx(object):
for node, settings in iteritems(self.enumerable_nodes):
self.env.get_domain('std').enumerable_nodes[node] = settings # type: ignore
- @property
- def buildername(self):
- # type: () -> unicode
- warnings.warn('app.buildername is deprecated. Please use app.builder.name instead',
- RemovedInSphinx17Warning)
- return self.builder.name
-
# ---- main "build" method -------------------------------------------------
def build(self, force_all=False, filenames=None):
@@ -341,9 +333,9 @@ class Sphinx(object):
status = (self.statuscode == 0 and
__('succeeded') or __('finished with problems'))
if self._warncount:
- logger.info(bold(__('build %s, %s warning%s.') %
- (status, self._warncount,
- self._warncount != 1 and 's' or '')))
+ logger.info(bold(__('build %s, %s warning.',
+ 'build %s, %s warnings.', self._warncount) %
+ (status, self._warncount)))
else:
logger.info(bold(__('build %s.') % status))
except Exception as err:
@@ -358,31 +350,15 @@ class Sphinx(object):
self.builder.cleanup()
# ---- logging handling ----------------------------------------------------
- def warn(self, message, location=None, prefix=None,
- type=None, subtype=None, colorfunc=None):
- # type: (unicode, unicode, unicode, unicode, unicode, Callable) -> None
+ def warn(self, message, location=None, type=None, subtype=None):
+ # type: (unicode, unicode, unicode, unicode) -> None
"""Emit a warning.
If *location* is given, it should either be a tuple of (docname, lineno)
or a string describing the location of the warning as well as possible.
- *prefix* usually should not be changed.
-
*type* and *subtype* are used to suppress warnings with :confval:`suppress_warnings`.
-
- .. note::
-
- For warnings emitted during parsing, you should use
- :meth:`.BuildEnvironment.warn` since that will collect all
- warnings during parsing for later output.
"""
- if prefix:
- warnings.warn('prefix option of warn() is now deprecated.',
- RemovedInSphinx17Warning)
- if colorfunc:
- warnings.warn('colorfunc option of warn() is now deprecated.',
- RemovedInSphinx17Warning)
-
warnings.warn('app.warning() is now deprecated. Use sphinx.util.logging instead.',
RemovedInSphinx20Warning)
logger.warning(message, type=type, subtype=subtype, location=location)
@@ -419,34 +395,6 @@ class Sphinx(object):
RemovedInSphinx20Warning)
logger.debug(message, *args, **kwargs)
- def _display_chunk(chunk):
- # type: (Any) -> unicode
- warnings.warn('app._display_chunk() is now deprecated. '
- 'Use sphinx.util.display_chunk() instead.',
- RemovedInSphinx17Warning)
- return display_chunk(chunk)
-
- def old_status_iterator(self, iterable, summary, colorfunc=darkgreen,
- stringify_func=display_chunk):
- # type: (Iterable, unicode, Callable, Callable[[Any], unicode]) -> Iterator
- warnings.warn('app.old_status_iterator() is now deprecated. '
- 'Use sphinx.util.status_iterator() instead.',
- RemovedInSphinx17Warning)
- for item in old_status_iterator(iterable, summary,
- color="darkgreen", stringify_func=stringify_func):
- yield item
-
- # new version with progress info
- def status_iterator(self, iterable, summary, colorfunc=darkgreen, length=0,
- stringify_func=_display_chunk):
- # type: (Iterable, unicode, Callable, int, Callable[[Any], unicode]) -> Iterable
- warnings.warn('app.status_iterator() is now deprecated. '
- 'Use sphinx.util.status_iterator() instead.',
- RemovedInSphinx17Warning)
- for item in status_iterator(iterable, summary, length=length, verbosity=self.verbosity,
- color="darkgreen", stringify_func=stringify_func):
- yield item
-
# ---- general extensibility interface -------------------------------------
def setup_extension(self, extname):
@@ -496,7 +444,6 @@ class Sphinx(object):
def add_builder(self, builder):
# type: (Type[Builder]) -> None
- logger.debug('[app] adding builder: %r', builder)
self.registry.add_builder(builder)
def add_config_value(self, name, default, rebuild, types=()):
@@ -516,7 +463,6 @@ class Sphinx(object):
def set_translator(self, name, translator_class):
# type: (unicode, Type[nodes.NodeVisitor]) -> None
- logger.info(bold(__('Change of translator for the %s builder.') % name))
self.registry.add_translator(name, translator_class)
def add_node(self, node, **kwds):
@@ -568,13 +514,6 @@ class Sphinx(object):
self.enumerable_nodes[node] = (figtype, title_getter)
self.add_node(node, **kwds)
- def _directive_helper(self, obj, has_content=None, argument_spec=None, **option_spec):
- # type: (Any, bool, Tuple[int, int, bool], Any) -> Any
- warnings.warn('_directive_helper() is now deprecated. '
- 'Please use sphinx.util.docutils.directive_helper() instead.',
- RemovedInSphinx17Warning)
- return directive_helper(obj, has_content, argument_spec, **option_spec)
-
def add_directive(self, name, obj, content=None, arguments=None, **options):
# type: (unicode, Any, bool, Tuple[int, int, bool], Any) -> None
logger.debug('[app] adding directive: %r',
@@ -612,39 +551,30 @@ class Sphinx(object):
def add_domain(self, domain):
# type: (Type[Domain]) -> None
- logger.debug('[app] adding domain: %r', domain)
self.registry.add_domain(domain)
def override_domain(self, domain):
# type: (Type[Domain]) -> None
- logger.debug('[app] overriding domain: %r', domain)
self.registry.override_domain(domain)
def add_directive_to_domain(self, domain, name, obj,
has_content=None, argument_spec=None, **option_spec):
# type: (unicode, unicode, Any, bool, Any, Any) -> None
- logger.debug('[app] adding directive to domain: %r',
- (domain, name, obj, has_content, argument_spec, option_spec))
self.registry.add_directive_to_domain(domain, name, obj,
has_content, argument_spec, **option_spec)
def add_role_to_domain(self, domain, name, role):
- # type: (unicode, unicode, Any) -> None
- logger.debug('[app] adding role to domain: %r', (domain, name, role))
+ # type: (unicode, unicode, Union[RoleFunction, XRefRole]) -> None
self.registry.add_role_to_domain(domain, name, role)
def add_index_to_domain(self, domain, index):
# type: (unicode, Type[Index]) -> None
- logger.debug('[app] adding index to domain: %r', (domain, index))
self.registry.add_index_to_domain(domain, index)
def add_object_type(self, directivename, rolename, indextemplate='',
parse_node=None, ref_nodeclass=None, objname='',
doc_field_types=[]):
# type: (unicode, unicode, unicode, Callable, nodes.Node, unicode, List) -> None
- logger.debug('[app] adding object type: %r',
- (directivename, rolename, indextemplate, parse_node,
- ref_nodeclass, objname, doc_field_types))
self.registry.add_object_type(directivename, rolename, indextemplate, parse_node,
ref_nodeclass, objname, doc_field_types)
@@ -661,21 +591,16 @@ class Sphinx(object):
def add_crossref_type(self, directivename, rolename, indextemplate='',
ref_nodeclass=None, objname=''):
# type: (unicode, unicode, unicode, nodes.Node, unicode) -> None
- logger.debug('[app] adding crossref type: %r',
- (directivename, rolename, indextemplate, ref_nodeclass,
- objname))
self.registry.add_crossref_type(directivename, rolename,
indextemplate, ref_nodeclass, objname)
def add_transform(self, transform):
# type: (Type[Transform]) -> None
- logger.debug('[app] adding transform: %r', transform)
- SphinxStandaloneReader.transforms.append(transform)
+ self.registry.add_transform(transform)
def add_post_transform(self, transform):
# type: (Type[Transform]) -> None
- logger.debug('[app] adding post transform: %r', transform)
- self.post_transforms.append(transform)
+ self.registry.add_post_transform(transform)
def add_javascript(self, filename):
# type: (unicode) -> None
@@ -736,7 +661,6 @@ class Sphinx(object):
def add_source_parser(self, suffix, parser):
# type: (unicode, Parser) -> None
- logger.debug('[app] adding search source_parser: %r, %r', suffix, parser)
self.registry.add_source_parser(suffix, parser)
def add_env_collector(self, collector):
@@ -749,6 +673,34 @@ class Sphinx(object):
logger.debug('[app] adding HTML theme: %r, %r', name, theme_path)
self.html_themes[name] = theme_path
+ # ---- other methods -------------------------------------------------
+ def is_parallel_allowed(self, typ):
+ # type: (unicode) -> bool
+ """Check parallel processing is allowed or not.
+
+ ``typ`` is a type of processing; ``'read'`` or ``'write'``.
+ """
+ if typ == 'read':
+ attrname = 'parallel_read_safe'
+ elif typ == 'write':
+ attrname = 'parallel_write_safe'
+ else:
+ raise ValueError('parallel type %s is not supported' % typ)
+
+ for ext in itervalues(self.extensions):
+ allowed = getattr(ext, attrname, None)
+ if allowed is None:
+ logger.warning(__("the %s extension does not declare if it is safe "
+ "for parallel %sing, assuming it isn't - please "
+ "ask the extension author to check and make it "
+ "explicit"), ext.name, typ)
+ logger.warning('doing serial %s', typ)
+ return False
+ elif not allowed:
+ return False
+
+ return True
+
class TemplateBridge(object):
"""
diff --git a/sphinx/builders/__init__.py b/sphinx/builders/__init__.py
index 4b977cbc2..a1e360d2f 100644
--- a/sphinx/builders/__init__.py
+++ b/sphinx/builders/__init__.py
@@ -17,7 +17,6 @@ try:
except ImportError:
multiprocessing = None
-from six import itervalues
from docutils import nodes
from sphinx.deprecation import RemovedInSphinx20Warning
@@ -90,9 +89,6 @@ class Builder(object):
self.tags.add(self.name)
self.tags.add("format_%s" % self.format)
self.tags.add("builder_%s" % self.name)
- # compatibility aliases
- self.status_iterator = app.status_iterator
- self.old_status_iterator = app.old_status_iterator
# images that need to be copied over (source -> dest)
self.images = {} # type: Dict[unicode, unicode]
@@ -374,15 +370,10 @@ class Builder(object):
docnames = set(docnames) & self.env.found_docs
# determine if we can write in parallel
- self.parallel_ok = False
if parallel_available and self.app.parallel > 1 and self.allow_parallel:
- self.parallel_ok = True
- for extension in itervalues(self.app.extensions):
- if not extension.parallel_write_safe:
- logger.warning('the %s extension is not safe for parallel '
- 'writing, doing serial write', extension.name)
- self.parallel_ok = False
- break
+ self.parallel_ok = self.app.is_parallel_allowed('write')
+ else:
+ self.parallel_ok = False
# create a task executor to use for misc. "finish-up" tasks
# if self.parallel_ok:
diff --git a/sphinx/builders/_epub_base.py b/sphinx/builders/_epub_base.py
index 411ea7067..f4cb8afa0 100644
--- a/sphinx/builders/_epub_base.py
+++ b/sphinx/builders/_epub_base.py
@@ -12,8 +12,8 @@
import os
import re
from os import path
+from sphinx.util.i18n import format_date
from zipfile import ZIP_DEFLATED, ZIP_STORED, ZipFile
-from datetime import datetime
from collections import namedtuple
try:
@@ -486,7 +486,7 @@ class EpubBuilder(StandaloneHTMLBuilder):
metadata['copyright'] = self.esc(self.config.epub_copyright)
metadata['scheme'] = self.esc(self.config.epub_scheme)
metadata['id'] = self.esc(self.config.epub_identifier)
- metadata['date'] = self.esc(datetime.utcnow().strftime("%Y-%m-%d"))
+ metadata['date'] = self.esc(format_date("%Y-%m-%d"))
metadata['manifest_items'] = []
metadata['spines'] = []
metadata['guides'] = []
@@ -513,6 +513,7 @@ class EpubBuilder(StandaloneHTMLBuilder):
if not self.use_index:
self.ignored_files.append('genindex' + self.out_suffix)
for root, dirs, files in os.walk(outdir):
+ dirs.sort()
for fn in sorted(files):
filename = path.join(root, fn)[olen:]
if filename in self.ignored_files:
diff --git a/sphinx/builders/changes.py b/sphinx/builders/changes.py
index f18076700..5309649c6 100644
--- a/sphinx/builders/changes.py
+++ b/sphinx/builders/changes.py
@@ -73,8 +73,6 @@ class ChangesBuilder(Builder):
ttext = self.typemap[type]
context = content.replace('\n', ' ')
if descname and docname.startswith('c-api'):
- if not descname:
- continue
if context:
entry = '<b>%s</b>: <i>%s:</i> %s' % (descname, ttext,
context)
diff --git a/sphinx/builders/epub2.py b/sphinx/builders/epub2.py
deleted file mode 100644
index a6dcc8568..000000000
--- a/sphinx/builders/epub2.py
+++ /dev/null
@@ -1,100 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
- sphinx.builders.epub2
- ~~~~~~~~~~~~~~~~~~~~~
-
- Build epub2 files.
- Originally derived from qthelp.py.
-
- :copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS.
- :license: BSD, see LICENSE for details.
-"""
-
-import warnings
-from os import path
-
-from sphinx import package_dir
-from sphinx.builders import _epub_base
-from sphinx.util.osutil import make_filename
-from sphinx.deprecation import RemovedInSphinx17Warning
-
-if False:
- # For type annotation
- from typing import Any, Dict # NOQA
- from sphinx.application import Sphinx # NOQA
-
-
-DOCTYPE = '''<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN"
- "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">'''
-
-
-# The epub publisher
-
-class Epub2Builder(_epub_base.EpubBuilder):
- """
- Builder that outputs epub files.
-
- It creates the metainfo files container.opf, toc.ncx, mimetype, and
- META-INF/container.xml. Afterwards, all necessary files are zipped to an
- epub file.
- """
- name = 'epub2'
-
- template_dir = path.join(package_dir, 'templates', 'epub2')
- doctype = DOCTYPE
-
- # Finish by building the epub file
- def handle_finish(self):
- # type: () -> None
- """Create the metainfo files and finally the epub."""
- self.get_toc()
- self.build_mimetype(self.outdir, 'mimetype')
- self.build_container(self.outdir, 'META-INF/container.xml')
- self.build_content(self.outdir, 'content.opf')
- self.build_toc(self.outdir, 'toc.ncx')
- self.build_epub(self.outdir, self.config.epub_basename + '.epub')
-
-
-def emit_deprecation_warning(app):
- # type: (Sphinx) -> None
- if app.builder.__class__ is Epub2Builder:
- warnings.warn('epub2 builder is deprecated. Please use epub3 builder instead.',
- RemovedInSphinx17Warning)
-
-
-def setup(app):
- # type: (Sphinx) -> Dict[unicode, Any]
- app.setup_extension('sphinx.builders.html')
- app.add_builder(Epub2Builder)
- app.connect('builder-inited', emit_deprecation_warning)
-
- # config values
- app.add_config_value('epub_basename', lambda self: make_filename(self.project), None)
- app.add_config_value('epub_theme', 'epub', 'html')
- app.add_config_value('epub_theme_options', {}, 'html')
- app.add_config_value('epub_title', lambda self: self.html_title, 'html')
- app.add_config_value('epub_author', 'unknown', 'html')
- app.add_config_value('epub_language', lambda self: self.language or 'en', 'html')
- app.add_config_value('epub_publisher', 'unknown', 'html')
- app.add_config_value('epub_copyright', lambda self: self.copyright, 'html')
- app.add_config_value('epub_identifier', 'unknown', 'html')
- app.add_config_value('epub_scheme', 'unknown', 'html')
- app.add_config_value('epub_uid', 'unknown', 'env')
- app.add_config_value('epub_cover', (), 'env')
- app.add_config_value('epub_guide', (), 'env')
- app.add_config_value('epub_pre_files', [], 'env')
- app.add_config_value('epub_post_files', [], 'env')
- app.add_config_value('epub_exclude_files', [], 'env')
- app.add_config_value('epub_tocdepth', 3, 'env')
- app.add_config_value('epub_tocdup', True, 'env')
- app.add_config_value('epub_tocscope', 'default', 'env')
- app.add_config_value('epub_fix_images', False, 'env')
- app.add_config_value('epub_max_image_width', 0, 'env')
- app.add_config_value('epub_show_urls', 'inline', 'html')
- app.add_config_value('epub_use_index', lambda self: self.html_use_index, 'html')
-
- return {
- 'version': 'builtin',
- 'parallel_read_safe': True,
- 'parallel_write_safe': True,
- }
diff --git a/sphinx/builders/epub3.py b/sphinx/builders/epub3.py
index fb2a71b34..92c55c880 100644
--- a/sphinx/builders/epub3.py
+++ b/sphinx/builders/epub3.py
@@ -11,7 +11,6 @@
"""
from os import path
-from datetime import datetime
from collections import namedtuple
from sphinx import package_dir
@@ -19,6 +18,8 @@ from sphinx.config import string_classes, ENUM
from sphinx.builders import _epub_base
from sphinx.util import logging, xmlname_checker
from sphinx.util.fileutil import copy_asset_file
+from sphinx.util.i18n import format_date
+from sphinx.util.osutil import make_filename
if False:
# For type annotation
@@ -129,7 +130,7 @@ class Epub3Builder(_epub_base.EpubBuilder):
metadata['contributor'] = self.esc(self.config.epub_contributor)
metadata['page_progression_direction'] = PAGE_PROGRESSION_DIRECTIONS.get(writing_mode)
metadata['ibook_scroll_axis'] = IBOOK_SCROLL_AXIS.get(writing_mode)
- metadata['date'] = self.esc(datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ"))
+ metadata['date'] = self.esc(format_date("%Y-%m-%dT%H:%M:%SZ"))
metadata['version'] = self.esc(self.config.version)
return metadata
@@ -222,12 +223,32 @@ class Epub3Builder(_epub_base.EpubBuilder):
def setup(app):
# type: (Sphinx) -> Dict[unicode, Any]
-
- app.setup_extension('sphinx.builders.epub2')
-
app.add_builder(Epub3Builder)
# config values
+ app.add_config_value('epub_basename', lambda self: make_filename(self.project), None)
+ app.add_config_value('epub_theme', 'epub', 'html')
+ app.add_config_value('epub_theme_options', {}, 'html')
+ app.add_config_value('epub_title', lambda self: self.html_title, 'html')
+ app.add_config_value('epub_author', 'unknown', 'html')
+ app.add_config_value('epub_language', lambda self: self.language or 'en', 'html')
+ app.add_config_value('epub_publisher', 'unknown', 'html')
+ app.add_config_value('epub_copyright', lambda self: self.copyright, 'html')
+ app.add_config_value('epub_identifier', 'unknown', 'html')
+ app.add_config_value('epub_scheme', 'unknown', 'html')
+ app.add_config_value('epub_uid', 'unknown', 'env')
+ app.add_config_value('epub_cover', (), 'env')
+ app.add_config_value('epub_guide', (), 'env')
+ app.add_config_value('epub_pre_files', [], 'env')
+ app.add_config_value('epub_post_files', [], 'env')
+ app.add_config_value('epub_exclude_files', [], 'env')
+ app.add_config_value('epub_tocdepth', 3, 'env')
+ app.add_config_value('epub_tocdup', True, 'env')
+ app.add_config_value('epub_tocscope', 'default', 'env')
+ app.add_config_value('epub_fix_images', False, 'env')
+ app.add_config_value('epub_max_image_width', 0, 'env')
+ app.add_config_value('epub_show_urls', 'inline', 'html')
+ app.add_config_value('epub_use_index', lambda self: self.html_use_index, 'html')
app.add_config_value('epub_description', 'unknown', 'epub3', string_classes)
app.add_config_value('epub_contributor', 'unknown', 'epub3', string_classes)
app.add_config_value('epub_writing_mode', 'horizontal', 'epub3',
diff --git a/sphinx/builders/html.py b/sphinx/builders/html.py
index 68f38320b..f9c9420c2 100644
--- a/sphinx/builders/html.py
+++ b/sphinx/builders/html.py
@@ -864,9 +864,21 @@ class StandaloneHTMLBuilder(Builder):
def has_wildcard(pattern):
# type: (unicode) -> bool
return any(char in pattern for char in '*?[')
- sidebars = None
+ sidebars = self.theme.get_config('theme', 'sidebars', None)
matched = None
customsidebar = None
+
+ # default sidebars settings for selected theme
+ theme_default_sidebars = self.theme.get_config('theme', 'sidebars', None)
+ if theme_default_sidebars:
+ sidebars = [name.strip() for name in theme_default_sidebars.split(',')]
+ elif self.theme.name == 'alabaster':
+ # provide default settings for alabaster (for compatibility)
+ # Note: this will be removed before Sphinx-2.0
+ sidebars = ['about.html', 'navigation.html', 'relation.html',
+ 'searchbox.html', 'donate.html']
+
+ # user sidebar settings
for pattern, patsidebars in iteritems(self.config.html_sidebars):
if patmatch(pagename, pattern):
if matched:
@@ -881,6 +893,7 @@ class StandaloneHTMLBuilder(Builder):
continue
matched = pattern
sidebars = patsidebars
+
if sidebars is None:
# keep defaults
pass
@@ -888,6 +901,11 @@ class StandaloneHTMLBuilder(Builder):
# 0.x compatible mode: insert custom sidebar before searchbox
customsidebar = sidebars
sidebars = None
+ warnings.warn('Now html_sidebars only allows list of sidebar '
+ 'templates as a value. Support for a string value '
+ 'will be removed at Sphinx-2.0.',
+ RemovedInSphinx20Warning)
+
ctx['sidebars'] = sidebars
ctx['customsidebar'] = customsidebar
@@ -1366,7 +1384,6 @@ def setup(app):
app.add_config_value('html_static_path', [], 'html')
app.add_config_value('html_extra_path', [], 'html')
app.add_config_value('html_last_updated_fmt', None, 'html', string_classes)
- app.add_config_value('html_use_smartypants', None, 'html')
app.add_config_value('html_sidebars', {}, 'html')
app.add_config_value('html_additional_pages', {}, 'html')
app.add_config_value('html_domain_indices', True, 'html', [list])
diff --git a/sphinx/builders/latex.py b/sphinx/builders/latex.py
index d93b0ab99..8fdb2fa49 100644
--- a/sphinx/builders/latex.py
+++ b/sphinx/builders/latex.py
@@ -10,7 +10,6 @@
"""
import os
-import warnings
from os import path
from six import text_type
@@ -21,7 +20,6 @@ from docutils.utils import new_document
from docutils.frontend import OptionParser
from sphinx import package_dir, addnodes, highlighting
-from sphinx.deprecation import RemovedInSphinx17Warning
from sphinx.config import string_classes, ENUM
from sphinx.errors import SphinxError, ConfigError
from sphinx.locale import _
@@ -260,26 +258,6 @@ class LaTeXBuilder(Builder):
def validate_config_values(app):
# type: (Sphinx) -> None
- if app.config.latex_toplevel_sectioning not in (None, 'part', 'chapter', 'section'):
- logger.warning('invalid latex_toplevel_sectioning, ignored: %s',
- app.config.latex_toplevel_sectioning)
- app.config.latex_toplevel_sectioning = None # type: ignore
-
- if 'footer' in app.config.latex_elements:
- if 'postamble' in app.config.latex_elements:
- logger.warning("latex_elements['footer'] conflicts with "
- "latex_elements['postamble'], ignored.")
- else:
- warnings.warn("latex_elements['footer'] is deprecated. "
- "Use latex_elements['preamble'] instead.",
- RemovedInSphinx17Warning)
- app.config.latex_elements['postamble'] = app.config.latex_elements['footer']
-
- if app.config.latex_keep_old_macro_names:
- warnings.warn("latex_keep_old_macro_names is deprecated. "
- "LaTeX markup since Sphinx 1.4.5 uses only prefixed macro names.",
- RemovedInSphinx17Warning)
-
for document in app.config.latex_documents:
try:
text_type(document[2])
@@ -330,9 +308,9 @@ def setup(app):
None)
app.add_config_value('latex_logo', None, None, string_classes)
app.add_config_value('latex_appendices', [], None)
- app.add_config_value('latex_keep_old_macro_names', False, None)
app.add_config_value('latex_use_latex_multicolumn', False, None)
- app.add_config_value('latex_toplevel_sectioning', None, None, [str])
+ app.add_config_value('latex_toplevel_sectioning', None, None,
+ ENUM('part', 'chapter', 'section'))
app.add_config_value('latex_domain_indices', True, None, [list])
app.add_config_value('latex_show_urls', 'no', None)
app.add_config_value('latex_show_pagerefs', False, None)
diff --git a/sphinx/builders/qthelp.py b/sphinx/builders/qthelp.py
index 07efd02ab..2f56792a9 100644
--- a/sphinx/builders/qthelp.py
+++ b/sphinx/builders/qthelp.py
@@ -21,6 +21,7 @@ from docutils import nodes
from sphinx import addnodes
from sphinx.builders.html import StandaloneHTMLBuilder
+from sphinx.config import string_classes
from sphinx.environment.adapters.indexentries import IndexEntries
from sphinx.util import force_decode, logging
from sphinx.util.osutil import make_filename
@@ -199,7 +200,11 @@ class QtHelpBuilder(StandaloneHTMLBuilder):
# it seems that the "namespace" may not contain non-alphanumeric
# characters, and more than one successive dot, or leading/trailing
# dots, are also forbidden
- nspace = 'org.sphinx.%s.%s' % (outname, self.config.version)
+ if self.config.qthelp_namespace:
+ nspace = self.config.qthelp_namespace
+ else:
+ nspace = 'org.sphinx.%s.%s' % (outname, self.config.version)
+
nspace = re.sub('[^a-zA-Z0-9.]', '', nspace)
nspace = re.sub(r'\.+', '.', nspace).strip('.')
nspace = nspace.lower()
@@ -328,6 +333,7 @@ def setup(app):
app.add_builder(QtHelpBuilder)
app.add_config_value('qthelp_basename', lambda self: make_filename(self.project), None)
+ app.add_config_value('qthelp_namespace', None, 'html', string_classes)
app.add_config_value('qthelp_theme', 'nonav', 'html')
app.add_config_value('qthelp_theme_options', {}, 'html')
diff --git a/sphinx/cmd/__init__.py b/sphinx/cmd/__init__.py
new file mode 100644
index 000000000..9ffb9e612
--- /dev/null
+++ b/sphinx/cmd/__init__.py
@@ -0,0 +1,10 @@
+# -*- coding: utf-8 -*-
+"""
+ sphinx.cmd
+ ~~~~~~~~~~
+
+ Modules for command line executables.
+
+ :copyright: Copyright 2007-2017 by the Sphinx team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
diff --git a/sphinx/cmd/build.py b/sphinx/cmd/build.py
new file mode 100644
index 000000000..6c9d6e3e9
--- /dev/null
+++ b/sphinx/cmd/build.py
@@ -0,0 +1,42 @@
+# -*- coding: utf-8 -*-
+"""
+ sphinx.cmd.build
+ ~~~~~~~~~~~~~~~~
+
+ Build documentation from a provided source.
+
+ :copyright: Copyright 2007-2017 by the Sphinx team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import sys
+
+if False:
+ # For type annotation
+ from typing import List # NOQA
+
+
+def build_main(argv=sys.argv[1:]):
+ # type: (List[str]) -> int
+ """Sphinx build "main" command-line entry."""
+ from sphinx import cmdline
+ return cmdline.main(argv) # type: ignore
+
+
+def make_main(argv=sys.argv[1:]):
+ # type: (List[str]) -> int
+ """Sphinx build "make mode" entry."""
+ from sphinx import make_mode
+ return make_mode.run_make_mode(argv[1:]) # type: ignore
+
+
+def main(argv=sys.argv[1:]):
+ # type: (List[str]) -> int
+ if sys.argv[1:2] == ['-M']:
+ return make_main(argv)
+ else:
+ return build_main(argv)
+
+
+if __name__ == '__main__':
+ sys.exit(main(sys.argv[1:]))
diff --git a/sphinx/cmd/quickstart.py b/sphinx/cmd/quickstart.py
new file mode 100644
index 000000000..3051520cb
--- /dev/null
+++ b/sphinx/cmd/quickstart.py
@@ -0,0 +1,668 @@
+# -*- coding: utf-8 -*-
+"""
+ sphinx.cmd.quickstart
+ ~~~~~~~~~~~~~~~~~~~~~
+
+ Quickly setup documentation source to work with Sphinx.
+
+ :copyright: Copyright 2007-2017 by the Sphinx team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+from __future__ import print_function
+from __future__ import absolute_import
+
+import argparse
+import os
+import re
+import sys
+import time
+from collections import OrderedDict
+from io import open
+from os import path
+
+# try to import readline, unix specific enhancement
+try:
+ import readline
+ if readline.__doc__ and 'libedit' in readline.__doc__:
+ readline.parse_and_bind("bind ^I rl_complete")
+ else:
+ readline.parse_and_bind("tab: complete")
+except ImportError:
+ pass
+
+from six import PY2, PY3, text_type, binary_type
+from six.moves import input
+from six.moves.urllib.parse import quote as urlquote
+from docutils.utils import column_width
+
+from sphinx import __display_version__, package_dir
+from sphinx.util.osutil import ensuredir, make_filename
+from sphinx.util.console import ( # type: ignore
+ purple, bold, red, turquoise, nocolor, color_terminal
+)
+from sphinx.util.template import SphinxRenderer
+from sphinx.util import texescape
+
+if False:
+ # For type annotation
+ from typing import Any, Callable, Dict, List, Pattern, Union # NOQA
+
+TERM_ENCODING = getattr(sys.stdin, 'encoding', None)
+
+EXTENSIONS = OrderedDict([
+ ('autodoc', 'automatically insert docstrings from modules'),
+ ('doctest', 'automatically test code snippets in doctest blocks'),
+ ('intersphinx', 'link between Sphinx documentation of different projects'),
+ ('todo', 'write "todo" entries that can be shown or hidden on build'),
+ ('coverage', 'checks for documentation coverage'),
+ ('imgmath', 'include math, rendered as PNG or SVG images'),
+ ('mathjax', 'include math, rendered in the browser by MathJax'),
+ ('ifconfig', 'conditional inclusion of content based on config values'),
+ ('viewcode',
+ 'include links to the source code of documented Python objects'),
+ ('githubpages',
+ 'create .nojekyll file to publish the document on GitHub pages'),
+])
+
+DEFAULTS = {
+ 'path': '.',
+ 'sep': False,
+ 'dot': '_',
+ 'language': None,
+ 'suffix': '.rst',
+ 'master': 'index',
+ 'epub': False,
+ 'makefile': True,
+ 'batchfile': True,
+}
+
+PROMPT_PREFIX = '> '
+
+
+# function to get input from terminal -- overridden by the test suite
+def term_input(prompt):
+ # type: (unicode) -> unicode
+ print(prompt, end='')
+ return input('')
+
+
+class ValidationError(Exception):
+ """Raised for validation errors."""
+
+
+def is_path(x):
+ # type: (unicode) -> unicode
+ x = path.expanduser(x)
+ if path.exists(x) and not path.isdir(x):
+ raise ValidationError("Please enter a valid path name.")
+ return x
+
+
+def allow_empty(x):
+ # type: (unicode) -> unicode
+ return x
+
+
+def nonempty(x):
+ # type: (unicode) -> unicode
+ if not x:
+ raise ValidationError("Please enter some text.")
+ return x
+
+
+def choice(*l):
+ # type: (unicode) -> Callable[[unicode], unicode]
+ def val(x):
+ # type: (unicode) -> unicode
+ if x not in l:
+ raise ValidationError('Please enter one of %s.' % ', '.join(l))
+ return x
+ return val
+
+
+def boolean(x):
+ # type: (unicode) -> bool
+ if x.upper() not in ('Y', 'YES', 'N', 'NO'):
+ raise ValidationError("Please enter either 'y' or 'n'.")
+ return x.upper() in ('Y', 'YES')
+
+
+def suffix(x):
+ # type: (unicode) -> unicode
+ if not (x[0:1] == '.' and len(x) > 1):
+ raise ValidationError("Please enter a file suffix, "
+ "e.g. '.rst' or '.txt'.")
+ return x
+
+
+def ok(x):
+ # type: (unicode) -> unicode
+ return x
+
+
+def term_decode(text):
+ # type: (Union[bytes,unicode]) -> unicode
+ if isinstance(text, text_type):
+ return text
+
+ # Use the known encoding, if possible
+ if TERM_ENCODING:
+ return text.decode(TERM_ENCODING)
+
+ # If ascii is safe, use it with no warning
+ if text.decode('ascii', 'replace').encode('ascii', 'replace') == text:
+ return text.decode('ascii')
+
+ print(turquoise('* Note: non-ASCII characters entered '
+ 'and terminal encoding unknown -- assuming '
+ 'UTF-8 or Latin-1.'))
+ try:
+ return text.decode('utf-8')
+ except UnicodeDecodeError:
+ return text.decode('latin1')
+
+
+def do_prompt(text, default=None, validator=nonempty):
+ # type: (unicode, unicode, Callable[[unicode], Any]) -> Union[unicode, bool]
+ while True:
+ if default is not None:
+ prompt = PROMPT_PREFIX + '%s [%s]: ' % (text, default) # type: unicode
+ else:
+ prompt = PROMPT_PREFIX + text + ': '
+ if PY2:
+ # for Python 2.x, try to get a Unicode string out of it
+ if prompt.encode('ascii', 'replace').decode('ascii', 'replace') \
+ != prompt:
+ if TERM_ENCODING:
+ prompt = prompt.encode(TERM_ENCODING)
+ else:
+ print(turquoise('* Note: non-ASCII default value provided '
+ 'and terminal encoding unknown -- assuming '
+ 'UTF-8 or Latin-1.'))
+ try:
+ prompt = prompt.encode('utf-8')
+ except UnicodeEncodeError:
+ prompt = prompt.encode('latin1')
+ prompt = purple(prompt)
+ x = term_input(prompt).strip()
+ if default and not x:
+ x = default
+ x = term_decode(x)
+ try:
+ x = validator(x)
+ except ValidationError as err:
+ print(red('* ' + str(err)))
+ continue
+ break
+ return x
+
+
+def convert_python_source(source, rex=re.compile(r"[uU]('.*?')")):
+ # type: (unicode, Pattern) -> unicode
+ # remove Unicode literal prefixes
+ if PY3:
+ return rex.sub('\\1', source)
+ else:
+ return source
+
+
+class QuickstartRenderer(SphinxRenderer):
+ def __init__(self, templatedir):
+ # type: (unicode) -> None
+ self.templatedir = templatedir or ''
+ super(QuickstartRenderer, self).__init__()
+
+ def render(self, template_name, context):
+ # type: (unicode, Dict) -> unicode
+ user_template = path.join(self.templatedir, path.basename(template_name))
+ if self.templatedir and path.exists(user_template):
+ return self.render_from_file(user_template, context)
+ else:
+ return super(QuickstartRenderer, self).render(template_name, context)
+
+
+def ask_user(d):
+ # type: (Dict) -> None
+ """Ask the user for quickstart values missing from *d*.
+
+ Values are:
+
+ * path: root path
+ * sep: separate source and build dirs (bool)
+ * dot: replacement for dot in _templates etc.
+ * project: project name
+ * author: author names
+ * version: version of project
+ * release: release of project
+ * language: document language
+ * suffix: source file suffix
+ * master: master document name
+ * epub: use epub (bool)
+ * extensions: extensions to use (list)
+ * makefile: make Makefile
+ * batchfile: make command file
+ """
+
+ print(bold('Welcome to the Sphinx %s quickstart utility.') % __display_version__)
+ print('''
+Please enter values for the following settings (just press Enter to
+accept a default value, if one is given in brackets).''')
+
+ if 'path' in d:
+ print(bold('''
+Selected root path: %s''' % d['path']))
+ else:
+ print('''
+Enter the root path for documentation.''')
+ d['path'] = do_prompt('Root path for the documentation', '.', is_path)
+
+ while path.isfile(path.join(d['path'], 'conf.py')) or \
+ path.isfile(path.join(d['path'], 'source', 'conf.py')):
+ print()
+ print(bold('Error: an existing conf.py has been found in the '
+ 'selected root path.'))
+ print('sphinx-quickstart will not overwrite existing Sphinx projects.')
+ print()
+ d['path'] = do_prompt('Please enter a new root path (or just Enter '
+ 'to exit)', '', is_path)
+ if not d['path']:
+ sys.exit(1)
+
+ if 'sep' not in d:
+ print('''
+You have two options for placing the build directory for Sphinx output.
+Either, you use a directory "_build" within the root path, or you separate
+"source" and "build" directories within the root path.''')
+ d['sep'] = do_prompt('Separate source and build directories (y/n)',
+ 'n', boolean)
+
+ if 'dot' not in d:
+ print('''
+Inside the root directory, two more directories will be created; "_templates"
+for custom HTML templates and "_static" for custom stylesheets and other static
+files. You can enter another prefix (such as ".") to replace the underscore.''')
+ d['dot'] = do_prompt('Name prefix for templates and static dir', '_', ok)
+
+ if 'project' not in d:
+ print('''
+The project name will occur in several places in the built documentation.''')
+ d['project'] = do_prompt('Project name')
+ if 'author' not in d:
+ d['author'] = do_prompt('Author name(s)')
+
+ if 'version' not in d:
+ print('''
+Sphinx has the notion of a "version" and a "release" for the
+software. Each version can have multiple releases. For example, for
+Python the version is something like 2.5 or 3.0, while the release is
+something like 2.5.1 or 3.0a1. If you don't need this dual structure,
+just set both to the same value.''')
+ d['version'] = do_prompt('Project version', '', allow_empty)
+ if 'release' not in d:
+ d['release'] = do_prompt('Project release', d['version'], allow_empty)
+
+ if 'language' not in d:
+ print('''
+If the documents are to be written in a language other than English,
+you can select a language here by its language code. Sphinx will then
+translate text that it generates into that language.
+
+For a list of supported codes, see
+http://sphinx-doc.org/config.html#confval-language.''')
+ d['language'] = do_prompt('Project language', 'en')
+ if d['language'] == 'en':
+ d['language'] = None
+
+ if 'suffix' not in d:
+ print('''
+The file name suffix for source files. Commonly, this is either ".txt"
+or ".rst". Only files with this suffix are considered documents.''')
+ d['suffix'] = do_prompt('Source file suffix', '.rst', suffix)
+
+ if 'master' not in d:
+ print('''
+One document is special in that it is considered the top node of the
+"contents tree", that is, it is the root of the hierarchical structure
+of the documents. Normally, this is "index", but if your "index"
+document is a custom template, you can also set this to another filename.''')
+ d['master'] = do_prompt('Name of your master document (without suffix)',
+ 'index')
+
+ while path.isfile(path.join(d['path'], d['master'] + d['suffix'])) or \
+ path.isfile(path.join(d['path'], 'source', d['master'] + d['suffix'])):
+ print()
+ print(bold('Error: the master file %s has already been found in the '
+ 'selected root path.' % (d['master'] + d['suffix'])))
+ print('sphinx-quickstart will not overwrite the existing file.')
+ print()
+ d['master'] = do_prompt('Please enter a new file name, or rename the '
+ 'existing file and press Enter', d['master'])
+
+ if 'epub' not in d:
+ print('''
+Sphinx can also add configuration for epub output:''')
+ d['epub'] = do_prompt('Do you want to use the epub builder (y/n)',
+ 'n', boolean)
+
+ if 'extensions' not in d:
+ print('Indicate which of the following Sphinx extensions should be '
+ 'enabled:')
+ d['extensions'] = []
+ for name, description in EXTENSIONS.items():
+ if do_prompt('%s: %s (y/n)' % (name, description), 'n', boolean):
+ d['extensions'].append('sphinx.ext.%s' % name)
+
+ # Handle conflicting options
+ if set(['sphinx.ext.imgmath', 'sphinx.ext.mathjax']).issubset(
+ d['extensions']):
+ print('Note: imgmath and mathjax cannot be enabled at the same '
+ 'time. imgmath has been deselected.')
+ d['extensions'].remove('sphinx.ext.imgmath')
+
+ if 'makefile' not in d:
+ print('''
+A Makefile and a Windows command file can be generated for you so that you
+only have to run e.g. `make html' instead of invoking sphinx-build
+directly.''')
+ d['makefile'] = do_prompt('Create Makefile? (y/n)', 'y', boolean)
+
+ if 'batchfile' not in d:
+ d['batchfile'] = do_prompt('Create Windows command file? (y/n)',
+ 'y', boolean)
+ print()
+
+
+def generate(d, overwrite=True, silent=False, templatedir=None):
+ # type: (Dict, bool, bool, unicode) -> None
+ """Generate project based on values in *d*."""
+ template = QuickstartRenderer(templatedir=templatedir)
+
+ texescape.init()
+
+ if 'mastertoctree' not in d:
+ d['mastertoctree'] = ''
+ if 'mastertocmaxdepth' not in d:
+ d['mastertocmaxdepth'] = 2
+
+ d['PY3'] = PY3
+ d['project_fn'] = make_filename(d['project'])
+ d['project_url'] = urlquote(d['project'].encode('idna'))
+ d['project_manpage'] = d['project_fn'].lower()
+ d['now'] = time.asctime()
+ d['project_underline'] = column_width(d['project']) * '='
+ d.setdefault('extensions', [])
+ d['copyright'] = time.strftime('%Y') + ', ' + d['author']
+ d['author_texescaped'] = text_type(d['author']).\
+ translate(texescape.tex_escape_map)
+ d['project_doc'] = d['project'] + ' Documentation'
+ d['project_doc_texescaped'] = text_type(d['project'] + ' Documentation').\
+ translate(texescape.tex_escape_map)
+
+ # escape backslashes and single quotes in strings that are put into
+ # a Python string literal
+ for key in ('project', 'project_doc', 'project_doc_texescaped',
+ 'author', 'author_texescaped', 'copyright',
+ 'version', 'release', 'master'):
+ d[key + '_str'] = d[key].replace('\\', '\\\\').replace("'", "\\'")
+
+ if not path.isdir(d['path']):
+ ensuredir(d['path'])
+
+ srcdir = d['sep'] and path.join(d['path'], 'source') or d['path']
+
+ ensuredir(srcdir)
+ if d['sep']:
+ builddir = path.join(d['path'], 'build')
+ d['exclude_patterns'] = ''
+ else:
+ builddir = path.join(srcdir, d['dot'] + 'build')
+ exclude_patterns = map(repr, [
+ d['dot'] + 'build',
+ 'Thumbs.db', '.DS_Store',
+ ])
+ d['exclude_patterns'] = ', '.join(exclude_patterns)
+ ensuredir(builddir)
+ ensuredir(path.join(srcdir, d['dot'] + 'templates'))
+ ensuredir(path.join(srcdir, d['dot'] + 'static'))
+
+ def write_file(fpath, content, newline=None):
+ # type: (unicode, unicode, unicode) -> None
+ if overwrite or not path.isfile(fpath):
+ if 'quiet' not in d:
+ print('Creating file %s.' % fpath)
+ with open(fpath, 'wt', encoding='utf-8', newline=newline) as f:
+ f.write(content)
+ else:
+ if 'quiet' not in d:
+ print('File %s already exists, skipping.' % fpath)
+
+ conf_path = os.path.join(templatedir, 'conf.py_t') if templatedir else None
+ if not conf_path or not path.isfile(conf_path):
+ conf_path = os.path.join(package_dir, 'templates', 'quickstart', 'conf.py_t')
+ with open(conf_path) as f:
+ conf_text = convert_python_source(f.read())
+
+ write_file(path.join(srcdir, 'conf.py'), template.render_string(conf_text, d))
+
+ masterfile = path.join(srcdir, d['master'] + d['suffix'])
+ write_file(masterfile, template.render('quickstart/master_doc.rst_t', d))
+
+ if d.get('make_mode') is True:
+ makefile_template = 'quickstart/Makefile.new_t'
+ batchfile_template = 'quickstart/make.bat.new_t'
+ else:
+ makefile_template = 'quickstart/Makefile_t'
+ batchfile_template = 'quickstart/make.bat_t'
+
+ if d['makefile'] is True:
+ d['rsrcdir'] = d['sep'] and 'source' or '.'
+ d['rbuilddir'] = d['sep'] and 'build' or d['dot'] + 'build'
+ # use binary mode, to avoid writing \r\n on Windows
+ write_file(path.join(d['path'], 'Makefile'),
+ template.render(makefile_template, d), u'\n')
+
+ if d['batchfile'] is True:
+ d['rsrcdir'] = d['sep'] and 'source' or '.'
+ d['rbuilddir'] = d['sep'] and 'build' or d['dot'] + 'build'
+ write_file(path.join(d['path'], 'make.bat'),
+ template.render(batchfile_template, d), u'\r\n')
+
+ if silent:
+ return
+ print()
+ print(bold('Finished: An initial directory structure has been created.'))
+ print('''
+You should now populate your master file %s and create other documentation
+source files. ''' % masterfile + ((d['makefile'] or d['batchfile']) and '''\
+Use the Makefile to build the docs, like so:
+ make builder
+''' or '''\
+Use the sphinx-build command to build the docs, like so:
+ sphinx-build -b builder %s %s
+''' % (srcdir, builddir)) + '''\
+where "builder" is one of the supported builders, e.g. html, latex or linkcheck.
+''')
+
+
+def valid_dir(d):
+ # type: (Dict) -> bool
+ dir = d['path']
+ if not path.exists(dir):
+ return True
+ if not path.isdir(dir):
+ return False
+
+ if set(['Makefile', 'make.bat']) & set(os.listdir(dir)): # type: ignore
+ return False
+
+ if d['sep']:
+ dir = os.path.join('source', dir)
+ if not path.exists(dir):
+ return True
+ if not path.isdir(dir):
+ return False
+
+ reserved_names = [
+ 'conf.py',
+ d['dot'] + 'static',
+ d['dot'] + 'templates',
+ d['master'] + d['suffix'],
+ ]
+ if set(reserved_names) & set(os.listdir(dir)): # type: ignore
+ return False
+
+ return True
+
+
+def get_parser():
+ # type: () -> argparse.ArgumentParser
+ parser = argparse.ArgumentParser(
+ usage='%(prog)s [OPTIONS] <PROJECT_DIR>',
+ epilog="For more information, visit <http://sphinx-doc.org/>.",
+ description="""
+Generate required files for a Sphinx project.
+
+sphinx-quickstart is an interactive tool that asks some questions about your
+project and then generates a complete documentation directory and sample
+Makefile to be used with sphinx-build.
+""")
+
+ parser.add_argument('-q', '--quiet', action='store_true', dest='quiet',
+ default=False,
+ help='quiet mode')
+ parser.add_argument('--version', action='version', dest='show_version',
+ version='%%(prog)s %s' % __display_version__)
+
+ parser.add_argument('path', metavar='PROJECT_DIR', default='.',
+ help='output path')
+
+ group = parser.add_argument_group('Structure options')
+ group.add_argument('--sep', action='store_true',
+ help='if specified, separate source and build dirs')
+ group.add_argument('--dot', metavar='DOT',
+ help='replacement for dot in _templates etc.')
+
+ group = parser.add_argument_group('Project basic options')
+ group.add_argument('-p', '--project', metavar='PROJECT', dest='project',
+ help='project name')
+ group.add_argument('-a', '--author', metavar='AUTHOR', dest='author',
+ help='author names')
+ group.add_argument('-v', metavar='VERSION', dest='version', default='',
+ help='version of project')
+ group.add_argument('-r', '--release', metavar='RELEASE', dest='release',
+ help='release of project')
+ group.add_argument('-l', '--language', metavar='LANGUAGE', dest='language',
+ help='document language')
+ group.add_argument('--suffix', metavar='SUFFIX',
+ help='source file suffix')
+ group.add_argument('--master', metavar='MASTER',
+ help='master document name')
+ group.add_argument('--epub', action='store_true', default=False,
+ help='use epub')
+
+ group = parser.add_argument_group('Extension options')
+ for ext in EXTENSIONS:
+ group.add_argument('--ext-%s' % ext, action='append_const',
+ const='sphinx.ext.%s' % ext, dest='extensions',
+ help='enable %s extension' % ext)
+ group.add_argument('--extensions', metavar='EXTENSIONS', dest='extensions',
+ action='append', help='enable arbitrary extensions')
+
+ group = parser.add_argument_group('Makefile and Batchfile creation')
+ group.add_argument('--makefile', action='store_true', dest='makefile',
+ help='create makefile')
+ group.add_argument('--no-makefile', action='store_false', dest='makefile',
+ help='do not create makefile')
+ group.add_argument('--batchfile', action='store_true', dest='batchfile',
+ help='create batchfile')
+ group.add_argument('--no-batchfile', action='store_false',
+ dest='batchfile',
+ help='do not create batchfile')
+ group.add_argument('-m', '--use-make-mode', action='store_true',
+ dest='make_mode', default=True,
+ help='use make-mode for Makefile/make.bat')
+ group.add_argument('-M', '--no-use-make-mode', action='store_false',
+ dest='make_mode',
+ help='do not use make-mode for Makefile/make.bat')
+
+ group = parser.add_argument_group('Project templating')
+ group.add_argument('-t', '--templatedir', metavar='TEMPLATEDIR',
+ dest='templatedir',
+ help='template directory for template files')
+ group.add_argument('-d', metavar='NAME=VALUE', action='append',
+ dest='variables',
+ help='define a template variable')
+
+ return parser
+
+
+def main(argv=sys.argv[1:]):
+ # type: (List[str]) -> int
+ if not color_terminal():
+ nocolor()
+
+ # parse options
+ parser = get_parser()
+ try:
+ args = parser.parse_args(argv)
+ except SystemExit as err:
+ return err.code
+
+ d = vars(args)
+ # delete None or False value
+ d = dict((k, v) for k, v in d.items() if not (v is None or v is False))
+
+ try:
+ if 'quiet' in d:
+ if not set(['project', 'author']).issubset(d):
+ print('''"quiet" is specified, but any of "project" or \
+"author" is not specified.''')
+ return 1
+
+ if set(['quiet', 'project', 'author']).issubset(d):
+ # quiet mode with all required params satisfied, use default
+ d.setdefault('version', '')
+ d.setdefault('release', d['version'])
+ d2 = DEFAULTS.copy()
+ d2.update(d)
+ d = d2
+
+ if not valid_dir(d):
+ print()
+ print(bold('Error: specified path is not a directory, or sphinx'
+ ' files already exist.'))
+ print('sphinx-quickstart only generate into a empty directory.'
+ ' Please specify a new root path.')
+ return 1
+ else:
+ ask_user(d)
+ except (KeyboardInterrupt, EOFError):
+ print()
+ print('[Interrupted.]')
+ return 130 # 128 + SIGINT
+
+ # decode values in d if value is a Python string literal
+ for key, value in d.items():
+ if isinstance(value, binary_type):
+ d[key] = term_decode(value)
+
+ # handle use of CSV-style extension values
+ d.setdefault('extensions', [])
+ for ext in d['extensions'][:]:
+ if ',' in ext:
+ d['extensions'].remove(ext)
+ d['extensions'].extend(ext.split(','))
+
+ for variable in d.get('variables', []):
+ try:
+ name, value = variable.split('=')
+ d[name] = value
+ except ValueError:
+ print('Invalid template variable: %s' % variable)
+
+ generate(d, templatedir=args.templatedir)
+ return 0
+
+
+if __name__ == '__main__':
+ sys.exit(main(sys.argv[1:]))
diff --git a/sphinx/cmdline.py b/sphinx/cmdline.py
index c9a0dd4c6..779ba142d 100644
--- a/sphinx/cmdline.py
+++ b/sphinx/cmdline.py
@@ -10,14 +10,13 @@
"""
from __future__ import print_function
+import argparse
import sys
-import optparse
import traceback
from os import path
-from six import text_type, binary_type
-
from docutils.utils import SystemMessage
+from six import text_type, binary_type
from sphinx import __display_version__
from sphinx.errors import SphinxError
@@ -33,39 +32,9 @@ if False:
from typing import Any, IO, List, Union # NOQA
-USAGE = """\
-Sphinx v%s
-Usage: %%prog [options] sourcedir outdir [filenames...]
-
-Filename arguments:
- without -a and without filenames, write new and changed files.
- with -a, write all files.
- with filenames, write these.
-""" % __display_version__
-
-EPILOG = """\
-For more information, visit <http://sphinx-doc.org/>.
-"""
-
-
-class MyFormatter(optparse.IndentedHelpFormatter):
- def format_usage(self, usage):
- # type: (Any) -> Any
- return usage
-
- def format_help(self, formatter):
- # type: (Any) -> unicode
- result = [] # type: List[unicode]
- if self.description: # type: ignore
- result.append(self.format_description(formatter))
- if self.option_list: # type: ignore
- result.append(self.format_option_help(formatter)) # type: ignore
- return "\n".join(result)
-
-
-def handle_exception(app, opts, exception, stderr=sys.stderr):
+def handle_exception(app, args, exception, stderr=sys.stderr):
# type: (Sphinx, Any, Union[Exception, KeyboardInterrupt], IO) -> None
- if opts.pdb:
+ if args.pdb:
import pdb
print(red('Exception occurred while building, starting debugger:'),
file=stderr)
@@ -73,7 +42,7 @@ def handle_exception(app, opts, exception, stderr=sys.stderr):
pdb.post_mortem(sys.exc_info()[2])
else:
print(file=stderr)
- if opts.verbosity or opts.traceback:
+ if args.verbosity or args.traceback:
traceback.print_exc(None, stderr)
print(file=stderr)
if isinstance(exception, KeyboardInterrupt):
@@ -114,119 +83,135 @@ def handle_exception(app, opts, exception, stderr=sys.stderr):
file=stderr)
-def main(argv):
+def get_parser():
+ # type: () -> argparse.ArgumentParser
+ parser = argparse.ArgumentParser(
+ usage='usage: %(prog)s [OPTIONS] SOURCEDIR OUTPUTDIR [FILENAMES...]',
+ epilog='For more information, visit <http://sphinx-doc.org/>.',
+ description="""
+Generate documentation from source files.
+
+sphinx-build generates documentation from the files in SOURCEDIR and places it
+in OUTPUTDIR. It looks for 'conf.py' in SOURCEDIR for the configuration
+settings. The 'sphinx-quickstart' tool may be used to generate template files,
+including 'conf.py'
+
+sphinx-build can create documentation in different formats. A format is
+selected by specifying the builder name on the command line; it defaults to
+HTML. Builders can also perform other tasks related to documentation
+processing.
+
+By default, everything that is outdated is built. Output only for selected
+files can be built by specifying individual filenames.
+""")
+
+ parser.add_argument('--version', action='version', dest='show_version',
+ version='%%(prog)s %s' % __display_version__)
+
+ parser.add_argument('sourcedir',
+ help='path to documentation source files')
+ parser.add_argument('outputdir',
+ help='path to output directory')
+ parser.add_argument('filenames', nargs='*',
+ help='a list of specific files to rebuild. Ignored '
+ 'if -a is specified')
+
+ group = parser.add_argument_group('general options')
+ group.add_argument('-b', metavar='BUILDER', dest='builder',
+ default='html',
+ help='builder to use (default: html)')
+ group.add_argument('-a', action='store_true', dest='force_all',
+ help='write all files (default: only write new and '
+ 'changed files)')
+ group.add_argument('-E', action='store_true', dest='freshenv',
+ help='don\'t use a saved environment, always read '
+ 'all files')
+ group.add_argument('-d', metavar='PATH', dest='doctreedir',
+ help='path for the cached environment and doctree '
+ 'files (default: OUTPUTDIR/.doctrees)')
+ group.add_argument('-j', metavar='N', default=1, type=int, dest='jobs',
+ help='build in parallel with N processes where '
+ 'possible')
+
+ group = parser.add_argument_group('build configuration options')
+ group.add_argument('-c', metavar='PATH', dest='confdir',
+ help='path where configuration file (conf.py) is '
+ 'located (default: same as SOURCEDIR)')
+ group.add_argument('-C', action='store_true', dest='noconfig',
+ help='use no config file at all, only -D options')
+ group.add_argument('-D', metavar='setting=value', action='append',
+ dest='define', default=[],
+ help='override a setting in configuration file')
+ group.add_argument('-A', metavar='name=value', action='append',
+ dest='htmldefine', default=[],
+ help='pass a value into HTML templates')
+ group.add_argument('-t', metavar='TAG', action='append',
+ dest='tags', default=[],
+ help='define tag: include "only" blocks with TAG')
+ group.add_argument('-n', action='store_true', dest='nitpicky',
+ help='nit-picky mode, warn about all missing '
+ 'references')
+
+ group = parser.add_argument_group('console output options')
+ group.add_argument('-v', action='count', dest='verbosity', default=0,
+ help='increase verbosity (can be repeated)')
+ group.add_argument('-q', action='store_true', dest='quiet',
+ help='no output on stdout, just warnings on stderr')
+ group.add_argument('-Q', action='store_true', dest='really_quiet',
+ help='no output at all, not even warnings')
+ group.add_argument('--color', action='store_const', const='yes',
+ default='auto',
+ help='do emit colored output (default: auto-detect)')
+ group.add_argument('-N', '--no-color', dest='color', action='store_const',
+ const='no',
+ help='do not emit colored output (default: '
+ 'auto-detect)')
+ group.add_argument('-w', metavar='FILE', dest='warnfile',
+ help='write warnings (and errors) to given file')
+ group.add_argument('-W', action='store_true', dest='warningiserror',
+ help='turn warnings into errors')
+ group.add_argument('-T', action='store_true', dest='traceback',
+ help='show full traceback on exception')
+ group.add_argument('-P', action='store_true', dest='pdb',
+ help='run Pdb on exception')
+
+ return parser
+
+
+def main(argv=sys.argv[1:]): # type: ignore
# type: (List[unicode]) -> int
- parser = optparse.OptionParser(USAGE, epilog=EPILOG, formatter=MyFormatter())
- parser.add_option('--version', action='store_true', dest='version',
- help='show version information and exit')
-
- group = parser.add_option_group('General options')
- group.add_option('-b', metavar='BUILDER', dest='builder', default='html',
- help='builder to use; default is html')
- group.add_option('-a', action='store_true', dest='force_all',
- help='write all files; default is to only write new and '
- 'changed files')
- group.add_option('-E', action='store_true', dest='freshenv',
- help='don\'t use a saved environment, always read '
- 'all files')
- group.add_option('-d', metavar='PATH', default=None, dest='doctreedir',
- help='path for the cached environment and doctree files '
- '(default: outdir/.doctrees)')
- group.add_option('-j', metavar='N', default=1, type='int', dest='jobs',
- help='build in parallel with N processes where possible')
- # this option never gets through to this point (it is intercepted earlier)
- # group.add_option('-M', metavar='BUILDER', dest='make_mode',
- # help='"make" mode -- as used by Makefile, like '
- # '"sphinx-build -M html"')
-
- group = parser.add_option_group('Build configuration options')
- group.add_option('-c', metavar='PATH', dest='confdir',
- help='path where configuration file (conf.py) is located '
- '(default: same as sourcedir)')
- group.add_option('-C', action='store_true', dest='noconfig',
- help='use no config file at all, only -D options')
- group.add_option('-D', metavar='setting=value', action='append',
- dest='define', default=[],
- help='override a setting in configuration file')
- group.add_option('-A', metavar='name=value', action='append',
- dest='htmldefine', default=[],
- help='pass a value into HTML templates')
- group.add_option('-t', metavar='TAG', action='append',
- dest='tags', default=[],
- help='define tag: include "only" blocks with TAG')
- group.add_option('-n', action='store_true', dest='nitpicky',
- help='nit-picky mode, warn about all missing references')
-
- group = parser.add_option_group('Console output options')
- group.add_option('-v', action='count', dest='verbosity', default=0,
- help='increase verbosity (can be repeated)')
- group.add_option('-q', action='store_true', dest='quiet',
- help='no output on stdout, just warnings on stderr')
- group.add_option('-Q', action='store_true', dest='really_quiet',
- help='no output at all, not even warnings')
- group.add_option('--color', dest='color',
- action='store_const', const='yes', default='auto',
- help='Do emit colored output (default: auto-detect)')
- group.add_option('-N', '--no-color', dest='color',
- action='store_const', const='no',
- help='Do not emit colored output (default: auto-detect)')
- group.add_option('-w', metavar='FILE', dest='warnfile',
- help='write warnings (and errors) to given file')
- group.add_option('-W', action='store_true', dest='warningiserror',
- help='turn warnings into errors')
- group.add_option('-T', action='store_true', dest='traceback',
- help='show full traceback on exception')
- group.add_option('-P', action='store_true', dest='pdb',
- help='run Pdb on exception')
-
- # parse options
- try:
- opts, args = parser.parse_args(list(argv[1:]))
- except SystemExit as err:
- return err.code
- # handle basic options
- if opts.version:
- print('Sphinx (sphinx-build) %s' % __display_version__)
- return 0
+ parser = get_parser()
+ args = parser.parse_args(argv)
# get paths (first and second positional argument)
try:
- srcdir = abspath(args[0])
- confdir = abspath(opts.confdir or srcdir)
- if opts.noconfig:
+ srcdir = abspath(args.sourcedir)
+ confdir = abspath(args.confdir or srcdir)
+ if args.noconfig:
confdir = None
+
if not path.isdir(srcdir):
- print('Error: Cannot find source directory `%s\'.' % srcdir,
- file=sys.stderr)
- return 1
- if not opts.noconfig and not path.isfile(path.join(confdir, 'conf.py')):
- print('Error: Config directory doesn\'t contain a conf.py file.',
- file=sys.stderr)
- return 1
- outdir = abspath(args[1])
+ parser.error('cannot find source directory (%s)' % srcdir)
+ if not args.noconfig and not path.isfile(path.join(confdir, 'conf.py')):
+ parser.error("config directory doesn't contain a conf.py file "
+ "(%s)" % confdir)
+
+ outdir = abspath(args.outputdir)
if srcdir == outdir:
- print('Error: source directory and destination directory are same.',
- file=sys.stderr)
- return 1
- except IndexError:
- parser.print_help()
- return 1
+ parser.error('source directory and destination directory are same')
except UnicodeError:
- print(
- 'Error: Multibyte filename not supported on this filesystem '
- 'encoding (%r).' % fs_encoding, file=sys.stderr)
- return 1
+ parser.error('multibyte filename not supported on this filesystem '
+ 'encoding (%r)' % fs_encoding)
# handle remaining filename arguments
- filenames = args[2:]
- errored = False
+ filenames = args.filenames
+ missing_files = []
for filename in filenames:
if not path.isfile(filename):
- print('Error: Cannot find file %r.' % filename, file=sys.stderr)
- errored = True
- if errored:
- return 1
+ missing_files.append(filename)
+ if missing_files:
+ parser.error('cannot find files %r' % missing_files)
# likely encoding used for command-line arguments
try:
@@ -235,41 +220,39 @@ def main(argv):
except Exception:
likely_encoding = None
- if opts.force_all and filenames:
- print('Error: Cannot combine -a option and filenames.', file=sys.stderr)
- return 1
+ if args.force_all and filenames:
+ parser.error('cannot combine -a option and filenames')
- if opts.color == 'no' or (opts.color == 'auto' and not color_terminal()):
+ if args.color == 'no' or (args.color == 'auto' and not color_terminal()):
nocolor()
- doctreedir = abspath(opts.doctreedir or path.join(outdir, '.doctrees'))
+ doctreedir = abspath(args.doctreedir or path.join(outdir, '.doctrees'))
status = sys.stdout
warning = sys.stderr
error = sys.stderr
- if opts.quiet:
+ if args.quiet:
status = None
- if opts.really_quiet:
+
+ if args.really_quiet:
status = warning = None
- if warning and opts.warnfile:
+
+ if warning and args.warnfile:
try:
- warnfp = open(opts.warnfile, 'w')
+ warnfp = open(args.warnfile, 'w')
except Exception as exc:
- print('Error: Cannot open warning file %r: %s' %
- (opts.warnfile, exc), file=sys.stderr)
- sys.exit(1)
+ parser.error('cannot open warning file %r: %s' % (
+ args.warnfile, exc))
warning = Tee(warning, warnfp) # type: ignore
error = warning
confoverrides = {}
- for val in opts.define:
+ for val in args.define:
try:
key, val = val.split('=', 1)
except ValueError:
- print('Error: -D option argument must be in the form name=value.',
- file=sys.stderr)
- return 1
+ parser.error('-D option argument must be in the form name=value')
if likely_encoding and isinstance(val, binary_type):
try:
val = val.decode(likely_encoding)
@@ -277,13 +260,11 @@ def main(argv):
pass
confoverrides[key] = val
- for val in opts.htmldefine:
+ for val in args.htmldefine:
try:
key, val = val.split('=')
except ValueError:
- print('Error: -A option argument must be in the form name=value.',
- file=sys.stderr)
- return 1
+ parser.error('-A option argument must be in the form name=value')
try:
val = int(val)
except ValueError:
@@ -294,17 +275,17 @@ def main(argv):
pass
confoverrides['html_context.%s' % key] = val
- if opts.nitpicky:
+ if args.nitpicky:
confoverrides['nitpicky'] = True
app = None
try:
with patch_docutils(), docutils_namespace():
- app = Sphinx(srcdir, confdir, outdir, doctreedir, opts.builder,
- confoverrides, status, warning, opts.freshenv,
- opts.warningiserror, opts.tags, opts.verbosity, opts.jobs)
- app.build(opts.force_all, filenames)
+ app = Sphinx(srcdir, confdir, outdir, doctreedir, args.builder,
+ confoverrides, status, warning, args.freshenv,
+ args.warningiserror, args.tags, args.verbosity, args.jobs)
+ app.build(args.force_all, filenames)
return app.statuscode
except (Exception, KeyboardInterrupt) as exc:
- handle_exception(app, opts, exc, error)
- return 1
+ handle_exception(app, args, exc, error)
+ return 2
diff --git a/sphinx/config.py b/sphinx/config.py
index d3468b0a5..49422427f 100644
--- a/sphinx/config.py
+++ b/sphinx/config.py
@@ -24,7 +24,7 @@ from sphinx.util.pycompat import execfile_, NoneType
if False:
# For type annotation
- from typing import Any, Callable, Dict, Iterable, Iterator, List, Tuple # NOQA
+ from typing import Any, Callable, Dict, Iterable, Iterator, List, Tuple, Union # NOQA
from sphinx.util.tags import Tags # NOQA
logger = logging.getLogger(__name__)
@@ -63,8 +63,11 @@ class ENUM(object):
self.candidates = candidates
def match(self, value):
- # type: (unicode) -> bool
- return value in self.candidates
+ # type: (Union[unicode,List,Tuple]) -> bool
+ if isinstance(value, (list, tuple)):
+ return all(item in self.candidates for item in value)
+ else:
+ return value in self.candidates
string_classes = [text_type] # type: List
diff --git a/sphinx/deprecation.py b/sphinx/deprecation.py
index 93395fa7d..e28e0f916 100644
--- a/sphinx/deprecation.py
+++ b/sphinx/deprecation.py
@@ -10,11 +10,11 @@
"""
-class RemovedInSphinx17Warning(DeprecationWarning):
+class RemovedInSphinx18Warning(DeprecationWarning):
pass
-class RemovedInSphinx18Warning(PendingDeprecationWarning):
+class RemovedInSphinx19Warning(PendingDeprecationWarning):
pass
@@ -22,4 +22,4 @@ class RemovedInSphinx20Warning(PendingDeprecationWarning):
pass
-RemovedInNextVersionWarning = RemovedInSphinx17Warning
+RemovedInNextVersionWarning = RemovedInSphinx18Warning
diff --git a/sphinx/directives/code.py b/sphinx/directives/code.py
index 9758cbbe8..b951e704d 100644
--- a/sphinx/directives/code.py
+++ b/sphinx/directives/code.py
@@ -256,7 +256,7 @@ class LiteralIncludeReader(object):
else:
start = tags[pyobject][1]
end = tags[pyobject][2]
- lines = lines[start - 1:end - 1]
+ lines = lines[start - 1:end]
if 'lineno-match' in self.options:
self.lineno_start = start
@@ -311,11 +311,11 @@ class LiteralIncludeReader(object):
self.lineno_start += lineno
return lines[lineno:]
+
+ if inclusive is True:
+ raise ValueError('start-after pattern not found: %s' % start)
else:
- if inclusive is True:
- raise ValueError('start-after pattern not found: %s' % start)
- else:
- raise ValueError('start-at pattern not found: %s' % start)
+ raise ValueError('start-at pattern not found: %s' % start)
return lines
@@ -340,11 +340,10 @@ class LiteralIncludeReader(object):
return []
else:
return lines[:lineno]
+ if inclusive is True:
+ raise ValueError('end-at pattern not found: %s' % end)
else:
- if inclusive is True:
- raise ValueError('end-at pattern not found: %s' % end)
- else:
- raise ValueError('end-before pattern not found: %s' % end)
+ raise ValueError('end-before pattern not found: %s' % end)
return lines
diff --git a/sphinx/domains/__init__.py b/sphinx/domains/__init__.py
index a6d28a06d..c68d37472 100644
--- a/sphinx/domains/__init__.py
+++ b/sphinx/domains/__init__.py
@@ -141,7 +141,7 @@ class Domain(object):
#: domain label: longer, more descriptive (used in messages)
label = ''
#: type (usually directive) name -> ObjType instance
- object_types = {} # type: Dict[unicode, Any]
+ object_types = {} # type: Dict[unicode, ObjType]
#: directive name -> directive class
directives = {} # type: Dict[unicode, Any]
#: role name -> role callable
@@ -161,6 +161,17 @@ class Domain(object):
def __init__(self, env):
# type: (BuildEnvironment) -> None
self.env = env # type: BuildEnvironment
+ self._role_cache = {} # type: Dict[unicode, Callable]
+ self._directive_cache = {} # type: Dict[unicode, Callable]
+ self._role2type = {} # type: Dict[unicode, List[unicode]]
+ self._type2role = {} # type: Dict[unicode, unicode]
+
+ # convert class variables to instance one (to enhance through API)
+ self.object_types = dict(self.object_types)
+ self.directives = dict(self.directives)
+ self.roles = dict(self.roles)
+ self.indices = list(self.indices)
+
if self.name not in env.domaindata:
assert isinstance(self.initial_data, dict)
new_data = copy.deepcopy(self.initial_data)
@@ -170,10 +181,6 @@ class Domain(object):
self.data = env.domaindata[self.name]
if self.data['version'] != self.data_version:
raise IOError('data of %r domain out of date' % self.label)
- self._role_cache = {} # type: Dict[unicode, Callable]
- self._directive_cache = {} # type: Dict[unicode, Callable]
- self._role2type = {} # type: Dict[unicode, List[unicode]]
- self._type2role = {} # type: Dict[unicode, unicode]
for name, obj in iteritems(self.object_types):
for rolename in obj.roles:
self._role2type.setdefault(rolename, []).append(name)
@@ -181,6 +188,18 @@ class Domain(object):
self.objtypes_for_role = self._role2type.get # type: Callable[[unicode], List[unicode]] # NOQA
self.role_for_objtype = self._type2role.get # type: Callable[[unicode], unicode]
+ def add_object_type(self, name, objtype):
+ # type: (unicode, ObjType) -> None
+ """Add an object type."""
+ self.object_types[name] = objtype
+ if objtype.roles:
+ self._type2role[name] = objtype.roles[0]
+ else:
+ self._type2role[name] = ''
+
+ for role in objtype.roles:
+ self._role2type.setdefault(role, []).append(name)
+
def role(self, name):
# type: (unicode) -> Callable
"""Return a role adapter function that always gives the registered
diff --git a/sphinx/domains/cpp.py b/sphinx/domains/cpp.py
index fa9eaa5e8..39955d4f4 100644
--- a/sphinx/domains/cpp.py
+++ b/sphinx/domains/cpp.py
@@ -18,6 +18,7 @@ from docutils import nodes
from docutils.parsers.rst import Directive, directives
from sphinx import addnodes
+from sphinx.environment import NoUri
from sphinx.roles import XRefRole
from sphinx.locale import l_, _
from sphinx.domains import Domain, ObjType
@@ -27,6 +28,7 @@ from sphinx.util.nodes import make_refnode
from sphinx.util.pycompat import UnicodeMixin
from sphinx.util.docfields import Field, GroupedField
+
if False:
# For type annotation
from typing import Any, Callable, Dict, Iterator, List, Match, Pattern, Tuple, Union # NOQA
@@ -47,7 +49,7 @@ logger = logging.getLogger(__name__)
It is not the actual old code, but a replication of the behaviour.
- v2: 1.3 <= version < now
Standardised mangling scheme from
- http://mentorembedded.github.io/cxx-abi/abi.html#mangling
+ http://itanium-cxx-abi.github.io/cxx-abi/abi.html#mangling
though not completely implemented.
All versions are generated and attached to elements. The newest is used for
the index. All of the versions should work as permalinks.
@@ -94,7 +96,7 @@ logger = logging.getLogger(__name__)
"class" "..."[opt] identifier[opt]
| "template" "<" template-parameter-list ">"
"class" identifier[opt] "=" id-expression
- # also, from C++17 we can have "typname" in template templates
+ # also, from C++17 we can have "typename" in template templates
templateDeclPrefix ->
"template" "<" template-parameter-list ">"
@@ -222,13 +224,12 @@ logger = logging.getLogger(__name__)
concept_object:
goal:
just a declaration of the name (for now)
- either a variable concept or function concept
grammar: only a single template parameter list, and the nested name
may not have any template argument lists
"template" "<" template-parameter-list ">"
- nested-name-specifier "()"[opt]
+ nested-name-specifier
type_object:
goal:
@@ -288,8 +289,15 @@ logger = logging.getLogger(__name__)
nested-name
"""
+# TODO: support hex, oct, etc. work
+_integer_literal_re = re.compile(r'[1-9][0-9]*')
+_octal_literal_re = re.compile(r'0[0-7]*')
+_hex_literal_re = re.compile(r'0[xX][0-7a-fA-F][0-7a-fA-F]*')
+_binary_literal_re = re.compile(r'0[bB][01][01]*')
+_integer_suffix_re = re.compile(r'')
+_float_literal_re = re.compile(r'[+-]?[0-9]*\.[0-9]+')
_identifier_re = re.compile(r'(~?\b[a-zA-Z_][a-zA-Z0-9_]*)\b')
-_whitespace_re = re.compile(r'\s+(?u)')
+_whitespace_re = re.compile(r'(?u)\s+')
_string_re = re.compile(r"[LuU8]?('([^'\\]*(?:\\.[^'\\]*)*)'"
r'|"([^"\\]*(?:\\.[^"\\]*)*)")', re.S)
_visibility_re = re.compile(r'\b(public|private|protected)\b')
@@ -301,6 +309,12 @@ _operator_re = re.compile(r'''(?x)
| (<<|>>)=? | && | \|\|
| [!<>=/*%+|&^~-]=?
''')
+_fold_operator_re = re.compile(r'''(?x)
+ ->\* | \.\* | \,
+ | (<<|>>)=? | && | \|\|
+ | !=
+ | [<>=/*%+|&^~-]=?
+''')
# see http://en.cppreference.com/w/cpp/keyword
_keywords = [
'alignas', 'alignof', 'and', 'and_eq', 'asm', 'auto', 'bitand', 'bitor',
@@ -318,6 +332,9 @@ _keywords = [
'while', 'xor', 'xor_eq'
]
+_max_id = 3
+_id_prefix = [None, '', '_CPPv2', '_CPPv3']
+
# ------------------------------------------------------------------------------
# Id v1 constants
# ------------------------------------------------------------------------------
@@ -393,10 +410,9 @@ _id_operator_v1 = {
} # type: Dict[unicode, unicode]
# ------------------------------------------------------------------------------
-# Id v2 constants
+# Id v > 1 constants
# ------------------------------------------------------------------------------
-_id_prefix_v2 = '_CPPv2'
_id_fundamental_v2 = {
# not all of these are actually parsed as fundamental types, TODO: do that
'void': 'v',
@@ -443,6 +459,7 @@ _id_operator_v2 = {
'delete': 'dl',
'delete[]': 'da',
# the arguments will make the difference between unary and binary
+ # in operator definitions
# '+(unary)' : 'ps',
# '-(unary)' : 'ng',
# '&(unary)' : 'ad',
@@ -484,8 +501,36 @@ _id_operator_v2 = {
'->*': 'pm',
'->': 'pt',
'()': 'cl',
- '[]': 'ix'
+ '[]': 'ix',
+ '.*': 'ds' # this one is not overloadable, but we need it for expressions
} # type: Dict[unicode, unicode]
+_id_operator_unary_v2 = {
+ '++': 'pp_',
+ '--': 'mm_',
+ '*': 'de',
+ '&': 'ad',
+ '+': 'ps',
+ '-': 'ng',
+ '!': 'nt',
+ '~': 'co'
+}
+# these are ordered by preceedence
+_expression_bin_ops = [
+ ['||'],
+ ['&&'],
+ ['|'],
+ ['^'],
+ ['&'],
+ ['==', '!='],
+ ['<=', '>=', '<', '>'],
+ ['<<', '>>'],
+ ['+', '-'],
+ ['*', '/', '%'],
+ ['.*', '->*']
+]
+_expression_unary_ops = ["++", "--", "*", "&", "+", "-", "!", "~"]
+_expression_assignment_ops = ["=", "*=", "/=", "%=", "+=", "-=",
+ ">>=", "<<=", "&=", "^=", "|="]
class NoOldIdError(UnicodeMixin, Exception):
@@ -510,12 +555,12 @@ class DefinitionError(UnicodeMixin, Exception):
class _DuplicateSymbolError(UnicodeMixin, Exception):
- def __init__(self, symbol, candSymbol):
- # type: (Symbol, Symbol) -> None
+ def __init__(self, symbol, declaration):
+ # type: (Symbol, Any) -> None
assert symbol
- assert candSymbol
+ assert declaration
self.symbol = symbol
- self.candSymbol = candSymbol
+ self.declaration = declaration
def __unicode__(self):
# type: () -> unicode
@@ -546,16 +591,6 @@ class ASTBase(UnicodeMixin):
"""Clone a definition expression node."""
return deepcopy(self)
- def get_id_v1(self):
- # type: () -> unicode
- """Return the v1 id for the node."""
- raise NotImplementedError(repr(self))
-
- def get_id_v2(self):
- # type: () -> unicode
- """Return the v2 id for the node."""
- raise NotImplementedError(repr(self))
-
def get_name(self):
# type: () -> unicode
"""Return the name.
@@ -584,6 +619,10 @@ def _verify_description_mode(mode):
raise Exception("Description mode '%s' is invalid." % mode)
+################################################################################
+# Attributes
+################################################################################
+
class ASTCPPAttribute(ASTBase):
def __init__(self, arg):
# type: (unicode) -> None
@@ -672,21 +711,479 @@ class ASTParenAttribute(ASTBase):
signode.append(nodes.Text(txt, txt))
+################################################################################
+# Expressions and Literals
+################################################################################
+
+class ASTPointerLiteral(ASTBase):
+ def __unicode__(self):
+ return u'nullptr'
+
+ def get_id(self, version):
+ return 'LDnE'
+
+ def describe_signature(self, signode, mode, env, symbol):
+ signode.append(nodes.Text('nullptr'))
+
+
+class ASTBooleanLiteral(ASTBase):
+ def __init__(self, value):
+ self.value = value
+
+ def __unicode__(self):
+ if self.value:
+ return u'true'
+ else:
+ return u'false'
+
+ def get_id(self, version):
+ if self.value:
+ return 'L1E'
+ else:
+ return 'L0E'
+
+ def describe_signature(self, signode, mode, env, symbol):
+ signode.append(nodes.Text(text_type(self)))
+
+
+class ASTNumberLiteral(ASTBase):
+ def __init__(self, data):
+ # type: (unicode) -> None
+ self.data = data
+
+ def __unicode__(self):
+ return self.data
+
+ def get_id(self, version):
+ return "L%sE" % self.data
+
+ def describe_signature(self, signode, mode, env, symbol):
+ txt = text_type(self)
+ signode.append(nodes.Text(txt, txt))
+
+
+class ASTStringLiteral(ASTBase):
+ def __init__(self, data):
+ # type: (unicode) -> None
+ self.data = data
+
+ def __unicode__(self):
+ return self.data
+
+ def get_id(self, version):
+ # note: the length is not really correct with escaping
+ return "LA%d_KcE" % (len(self.data) - 2)
+
+ def describe_signature(self, signode, mode, env, symbol):
+ txt = text_type(self)
+ signode.append(nodes.Text(txt, txt))
+
+
+class ASTParenExpr(ASTBase):
+ def __init__(self, expr):
+ self.expr = expr
+
+ def __unicode__(self):
+ return '(' + text_type(self.expr) + ')'
+
+ def get_id(self, version):
+ return self.expr.get_id(version)
+
+ def describe_signature(self, signode, mode, env, symbol):
+ signode.append(nodes.Text('(', '('))
+ self.expr.describe_signature(signode, mode, env, symbol)
+ signode.append(nodes.Text(')', ')'))
+
+
+class ASTFoldExpr(ASTBase):
+ def __init__(self, leftExpr, op, rightExpr):
+ assert leftExpr is not None or rightExpr is not None
+ self.leftExpr = leftExpr
+ self.op = op
+ self.rightExpr = rightExpr
+
+ def __unicode__(self):
+ res = [u'(']
+ if self.leftExpr:
+ res.append(text_type(self.leftExpr))
+ res.append(u' ')
+ res.append(text_type(self.op))
+ res.append(u' ')
+ res.append(u'...')
+ if self.rightExpr:
+ res.append(u' ')
+ res.append(text_type(self.op))
+ res.append(u' ')
+ res.append(text_type(self.rightExpr))
+ res.append(u')')
+ return u''.join(res)
+
+ def get_id(self, version):
+ assert version >= 3
+ if version == 3:
+ return text_type(self)
+ # TODO: find the right mangling scheme
+ assert False
+
+ def describe_signature(self, signode, mode, env, symbol):
+ signode.append(nodes.Text('('))
+ if self.leftExpr:
+ self.leftExpr.describe_signature(signode, mode, env, symbol)
+ signode.append(nodes.Text(' '))
+ signode.append(nodes.Text(self.op))
+ signode.append(nodes.Text(' '))
+ signode.append(nodes.Text('...'))
+ if self.rightExpr:
+ signode.append(nodes.Text(' '))
+ signode.append(nodes.Text(self.op))
+ signode.append(nodes.Text(' '))
+ self.rightExpr.describe_signature(signode, mode, env, symbol)
+ signode.append(nodes.Text(')'))
+
+
+class ASTBinOpExpr(ASTBase):
+ def __init__(self, exprs, ops):
+ assert len(exprs) > 0
+ assert len(exprs) == len(ops) + 1
+ self.exprs = exprs
+ self.ops = ops
+
+ def __unicode__(self):
+ res = []
+ res.append(text_type(self.exprs[0]))
+ for i in range(1, len(self.exprs)):
+ res.append(' ')
+ res.append(self.ops[i - 1])
+ res.append(' ')
+ res.append(text_type(self.exprs[i]))
+ return u''.join(res)
+
+ def get_id(self, version):
+ assert version >= 2
+ res = []
+ for i in range(len(self.ops)):
+ res.append(_id_operator_v2[self.ops[i]])
+ res.append(self.exprs[i].get_id(version))
+ res.append(self.exprs[-1].get_id(version))
+ return u''.join(res)
+
+ def describe_signature(self, signode, mode, env, symbol):
+ self.exprs[0].describe_signature(signode, mode, env, symbol)
+ for i in range(1, len(self.exprs)):
+ signode.append(nodes.Text(' '))
+ signode.append(nodes.Text(self.ops[i - 1]))
+ signode.append(nodes.Text(' '))
+ self.exprs[i].describe_signature(signode, mode, env, symbol)
+
+
+class ASTAssignmentExpr(ASTBase):
+ def __init__(self, exprs, ops):
+ assert len(exprs) > 0
+ assert len(exprs) == len(ops) + 1
+ self.exprs = exprs
+ self.ops = ops
+
+ def __unicode__(self):
+ res = []
+ res.append(text_type(self.exprs[0]))
+ for i in range(1, len(self.exprs)):
+ res.append(' ')
+ res.append(self.ops[i - 1])
+ res.append(' ')
+ res.append(text_type(self.exprs[i]))
+ return u''.join(res)
+
+ def get_id(self, version):
+ res = []
+ for i in range(len(self.ops)):
+ res.append(_id_operator_v2[self.ops[i]])
+ res.append(self.exprs[i].get_id(version))
+ res.append(self.exprs[-1].get_id(version))
+ return u''.join(res)
+
+ def describe_signature(self, signode, mode, env, symbol):
+ self.exprs[0].describe_signature(signode, mode, env, symbol)
+ for i in range(1, len(self.exprs)):
+ signode.append(nodes.Text(' '))
+ signode.append(nodes.Text(self.ops[i - 1]))
+ signode.append(nodes.Text(' '))
+ self.exprs[i].describe_signature(signode, mode, env, symbol)
+
+
+class ASTCastExpr(ASTBase):
+ def __init__(self, typ, expr):
+ self.typ = typ
+ self.expr = expr
+
+ def __unicode__(self):
+ res = [u'(']
+ res.append(text_type(self.typ))
+ res.append(u')')
+ res.append(text_type(self.expr))
+ return u''.join(res)
+
+ def get_id(self, version):
+ return 'cv' + self.typ.get_id(version) + self.expr.get_id(version)
+
+ def describe_signature(self, signode, mode, env, symbol):
+ signode.append(nodes.Text('('))
+ self.typ.describe_signature(signode, mode, env, symbol)
+ signode.append(nodes.Text(')'))
+ self.expr.describe_signature(signode, mode, env, symbol)
+
+
+class ASTUnaryOpExpr(ASTBase):
+ def __init__(self, op, expr):
+ self.op = op
+ self.expr = expr
+
+ def __unicode__(self):
+ return text_type(self.op) + text_type(self.expr)
+
+ def get_id(self, version):
+ return _id_operator_unary_v2[self.op] + self.expr.get_id(version)
+
+ def describe_signature(self, signode, mode, env, symbol):
+ signode.append(nodes.Text(self.op))
+ self.expr.describe_signature(signode, mode, env, symbol)
+
+
+class ASTSizeofParamPack(ASTBase):
+ def __init__(self, identifier):
+ self.identifier = identifier
+
+ def __unicode__(self):
+ return "sizeof...(" + text_type(self.identifier) + ")"
+
+ def get_id(self, version):
+ return 'sZ' + self.identifier.get_id(version)
+
+ def describe_signature(self, signode, mode, env, symbol):
+ signode.append(nodes.Text('sizeof...('))
+ self.identifier.describe_signature(signode, mode, env,
+ symbol=symbol, prefix="", templateArgs="")
+ signode.append(nodes.Text(')'))
+
+
+class ASTSizeofType(ASTBase):
+ def __init__(self, typ):
+ self.typ = typ
+
+ def __unicode__(self):
+ return "sizeof(" + text_type(self.typ) + ")"
+
+ def get_id(self, version):
+ return 'st' + self.typ.get_id(version)
+
+ def describe_signature(self, signode, mode, env, symbol):
+ signode.append(nodes.Text('sizeof('))
+ self.typ.describe_signature(signode, mode, env, symbol)
+ signode.append(nodes.Text(')'))
+
+
+class ASTSizeofExpr(ASTBase):
+ def __init__(self, expr):
+ self.expr = expr
+
+ def __unicode__(self):
+ return "sizeof " + text_type(self.expr)
+
+ def get_id(self, version):
+ return 'sz' + self.expr.get_id(version)
+
+ def describe_signature(self, signode, mode, env, symbol):
+ signode.append(nodes.Text('sizeof '))
+ self.expr.describe_signature(signode, mode, env, symbol)
+
+
+class ASTAlignofExpr(ASTBase):
+ def __init__(self, typ):
+ self.typ = typ
+
+ def __unicode__(self):
+ return "alignof(" + text_type(self.typ) + ")"
+
+ def get_id(self, version):
+ return 'at' + self.typ.get_id(version)
+
+ def describe_signature(self, signode, mode, env, symbol):
+ signode.append(nodes.Text('alignof('))
+ self.typ.describe_signature(signode, mode, env, symbol)
+ signode.append(nodes.Text(')'))
+
+
+class ASTNoexceptExpr(ASTBase):
+ def __init__(self, expr):
+ self.expr = expr
+
+ def __unicode__(self):
+ return "noexcept(" + text_type(self.expr) + ")"
+
+ def get_id(self, version):
+ return 'nx' + self.expr.get_id(version)
+
+ def describe_signature(self, signode, mode, env, symbol):
+ signode.append(nodes.Text('noexcept('))
+ self.expr.describe_signature(signode, mode, env, symbol)
+ signode.append(nodes.Text(')'))
+
+
+class ASTPostfixCallExpr(ASTBase):
+ def __init__(self, exprs):
+ self.exprs = exprs
+
+ def __unicode__(self):
+ res = [u'(']
+ first = True
+ for e in self.exprs:
+ if not first:
+ res.append(u', ')
+ first = False
+ res.append(text_type(e))
+ res.append(u')')
+ return u''.join(res)
+
+ def get_id(self, idPrefix, version):
+ res = ['cl', idPrefix]
+ for e in self.exprs:
+ res.append(e.get_id(version))
+ res.append('E')
+ return u''.join(res)
+
+ def describe_signature(self, signode, mode, env, symbol):
+ signode.append(nodes.Text('('))
+ first = True
+ for e in self.exprs:
+ if not first:
+ signode.append(nodes.Text(', '))
+ first = False
+ e.describe_signature(signode, mode, env, symbol)
+ signode.append(nodes.Text(')'))
+
+
+class ASTPostfixArray(ASTBase):
+ def __init__(self, expr):
+ self.expr = expr
+
+ def __unicode__(self):
+ return u'[' + text_type(self.expr) + ']'
+
+ def get_id(self, idPrefix, version):
+ return 'ix' + idPrefix + self.expr.get_id(version)
+
+ def describe_signature(self, signode, mode, env, symbol):
+ signode.append(nodes.Text('['))
+ self.expr.describe_signature(signode, mode, env, symbol)
+ signode.append(nodes.Text(']'))
+
+
+class ASTPostfixInc(ASTBase):
+ def __unicode__(self):
+ return u'++'
+
+ def get_id(self, idPrefix, version):
+ return 'pp' + idPrefix
+
+ def describe_signature(self, signode, mode, env, symbol):
+ signode.append(nodes.Text('++'))
+
+
+class ASTPostfixDec(ASTBase):
+ def __unicode__(self):
+ return u'--'
+
+ def get_id(self, idPrefix, version):
+ return 'mm' + idPrefix
+
+ def describe_signature(self, signode, mode, env, symbol):
+ signode.append(nodes.Text('--'))
+
+
+class ASTPostfixMember(ASTBase):
+ def __init__(self, name):
+ self.name = name
+
+ def __unicode__(self):
+ return u'.' + text_type(self.name)
+
+ def get_id(self, idPrefix, version):
+ return 'dt' + idPrefix + self.name.get_id(version)
+
+ def describe_signature(self, signode, mode, env, symbol):
+ signode.append(nodes.Text('.'))
+ self.name.describe_signature(signode, 'noneIsName', env, symbol)
+
+
+class ASTPostfixMemberOfPointer(ASTBase):
+ def __init__(self, name):
+ self.name = name
+
+ def __unicode__(self):
+ return u'->' + text_type(self.name)
+
+ def get_id(self, idPrefix, version):
+ return 'pt' + idPrefix + self.name.get_id(version)
+
+ def describe_signature(self, signode, mode, env, symbol):
+ signode.append(nodes.Text('->'))
+ self.name.describe_signature(signode, 'noneIsName', env, symbol)
+
+
+class ASTPostfixExpr(ASTBase):
+ def __init__(self, prefix, postFixes):
+ assert len(postFixes) > 0
+ self.prefix = prefix
+ self.postFixes = postFixes
+
+ def __unicode__(self):
+ res = [text_type(self.prefix)]
+ for p in self.postFixes:
+ res.append(text_type(p))
+ return u''.join(res)
+
+ def get_id(self, version):
+ id = self.prefix.get_id(version)
+ for p in self.postFixes:
+ id = p.get_id(id, version)
+ return id
+
+ def describe_signature(self, signode, mode, env, symbol):
+ self.prefix.describe_signature(signode, mode, env, symbol)
+ for p in self.postFixes:
+ p.describe_signature(signode, mode, env, symbol)
+
+
+class ASTFallbackExpr(ASTBase):
+ def __init__(self, expr):
+ self.expr = expr
+
+ def __unicode__(self):
+ return self.expr
+
+ def get_id(self, version):
+ return text_type(self.expr)
+
+ def describe_signature(self, signode, mode, env, symbol):
+ signode += nodes.Text(self.expr)
+
+
+################################################################################
+# The Rest
+################################################################################
+
class ASTIdentifier(ASTBase):
def __init__(self, identifier):
# type: (unicode) -> None
assert identifier is not None
self.identifier = identifier
- def get_id_v1(self):
- # type: () -> unicode
- if self.identifier == 'size_t':
- return 's'
- else:
- return self.identifier
-
- def get_id_v2(self):
- # type: () -> unicode
+ def get_id(self, version):
+ # type: (int) -> unicode
+ if version == 1:
+ if self.identifier == 'size_t':
+ return 's'
+ else:
+ return self.identifier
if self.identifier == "std":
return 'St'
elif self.identifier[0] == "~":
@@ -699,17 +1196,16 @@ class ASTIdentifier(ASTBase):
# type: () -> unicode
return self.identifier
- def describe_signature(self, signode, mode, env, prefix, symbol):
- # type: (addnodes.desc_signature, unicode, BuildEnvironment, unicode, Symbol) -> None
+ def describe_signature(self, signode, mode, env, prefix, templateArgs, symbol):
+ # type: (Any, unicode, BuildEnvironment, unicode, unicode, Symbol) -> None
_verify_description_mode(mode)
if mode == 'markType':
- targetText = prefix + self.identifier
+ targetText = prefix + self.identifier + templateArgs
pnode = addnodes.pending_xref('', refdomain='cpp',
- reftype='typeOrConcept',
+ reftype='identifier',
reftarget=targetText, modname=None,
classname=None)
key = symbol.get_lookup_key()
- assert key
pnode['cpp:parent_key'] = key
pnode += nodes.Text(self.identifier)
signode += pnode
@@ -736,8 +1232,9 @@ class ASTTemplateKeyParamPackIdDefault(ASTBase):
# type: () -> unicode
return self.identifier
- def get_id_v2(self):
- # type: () -> unicode
+ def get_id(self, version):
+ # type: (int) -> unicode
+ assert version >= 2
# this is not part of the normal name mangling in C++
res = []
if self.parameterPack:
@@ -772,7 +1269,7 @@ class ASTTemplateKeyParamPackIdDefault(ASTBase):
if self.identifier:
if not self.parameterPack:
signode += nodes.Text(' ')
- self.identifier.describe_signature(signode, mode, env, '', symbol)
+ self.identifier.describe_signature(signode, mode, env, '', '', symbol)
if self.default:
signode += nodes.Text(' = ')
self.default.describe_signature(signode, 'markType', env, symbol)
@@ -788,20 +1285,25 @@ class ASTTemplateParamType(ASTBase):
def name(self):
# type: () -> ASTNestedName
id = self.get_identifier()
- return ASTNestedName([ASTNestedNameElement(id, None)], rooted=False)
+ return ASTNestedName([ASTNestedNameElement(id, None)], [False], rooted=False)
+
+ @property
+ def isPack(self):
+ return self.data.parameterPack
def get_identifier(self):
# type: () -> unicode
return self.data.get_identifier()
- def get_id_v2(self, objectType=None, symbol=None):
- # type: (unicode, Symbol) -> unicode
+ def get_id(self, version, objectType=None, symbol=None):
+ # type: (int, unicode, Symbol) -> unicode
# this is not part of the normal name mangling in C++
+ assert version >= 2
if symbol:
# the anchor will be our parent
- return symbol.parent.declaration.get_id_v2(prefixed=None)
+ return symbol.parent.declaration.get_id(version, prefixed=False)
else:
- return self.data.get_id_v2()
+ return self.data.get_id(version)
def __unicode__(self):
# type: () -> unicode
@@ -812,6 +1314,44 @@ class ASTTemplateParamType(ASTBase):
self.data.describe_signature(signode, mode, env, symbol)
+class ASTTemplateParamConstrainedTypeWithInit(ASTBase):
+ def __init__(self, type, init):
+ # type: (Any, Any) -> None
+ assert type
+ self.type = type
+ self.init = init
+
+ @property
+ def name(self):
+ # type: () -> ASTNestedName
+ return self.type.name
+
+ def get_id(self, version, objectType=None, symbol=None):
+ # type: (int, unicode, Symbol) -> unicode
+ # this is not part of the normal name mangling in C++
+ assert version >= 2
+ if symbol:
+ # the anchor will be our parent
+ return symbol.parent.declaration.get_id(version, prefixed=False)
+ else:
+ return self.type.get_id(version)
+
+ def __unicode__(self):
+ # type: () -> unicode
+ res = text_type(self.type)
+ if self.init:
+ res += " = "
+ res += text_type(self.init)
+ return res
+
+ def describe_signature(self, signode, mode, env, symbol):
+ # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None
+ self.type.describe_signature(signode, mode, env, symbol)
+ if self.init:
+ signode += nodes.Text(" = ")
+ self.init.describe_signature(signode, mode, env, symbol)
+
+
class ASTTemplateParamTemplateType(ASTBase):
def __init__(self, nestedParams, data):
# type: (Any, Any) -> None
@@ -824,20 +1364,21 @@ class ASTTemplateParamTemplateType(ASTBase):
def name(self):
# type: () -> ASTNestedName
id = self.get_identifier()
- return ASTNestedName([ASTNestedNameElement(id, None)], rooted=False)
+ return ASTNestedName([ASTNestedNameElement(id, None)], [False], rooted=False)
def get_identifier(self):
# type: () -> unicode
return self.data.get_identifier()
- def get_id_v2(self, objectType=None, symbol=None):
- # type: (unicode, Symbol) -> unicode
+ def get_id(self, version, objectType=None, symbol=None):
+ # type: (int, unicode, Symbol) -> unicode
+ assert version >= 2
# this is not part of the normal name mangling in C++
if symbol:
# the anchor will be our parent
- return symbol.parent.declaration.get_id_v2(prefixed=None)
+ return symbol.parent.declaration.get_id(version, prefixed=None)
else:
- return self.nestedParams.get_id_v2() + self.data.get_id_v2()
+ return self.nestedParams.get_id(version) + self.data.get_id(version)
def __unicode__(self):
# type: () -> unicode
@@ -860,27 +1401,28 @@ class ASTTemplateParamNonType(ASTBase):
def name(self):
# type: () -> ASTNestedName
id = self.get_identifier()
- return ASTNestedName([ASTNestedNameElement(id, None)], rooted=False)
+ return ASTNestedName([ASTNestedNameElement(id, None)], [False], rooted=False)
def get_identifier(self):
# type: () -> unicode
name = self.param.name
if name:
assert len(name.names) == 1
- assert name.names[0].identifier
+ assert name.names[0].identOrOp
assert not name.names[0].templateArgs
- return name.names[0].identifier
+ return name.names[0].identOrOp
else:
return None
- def get_id_v2(self, objectType=None, symbol=None):
- # type: (unicode, Symbol) -> unicode
+ def get_id(self, version, objectType=None, symbol=None):
+ # type: (int, unicode, Symbol) -> unicode
+ assert version >= 2
# this is not part of the normal name mangling in C++
if symbol:
# the anchor will be our parent
- return symbol.parent.declaration.get_id_v2(prefixed=None)
+ return symbol.parent.declaration.get_id(version, prefixed=None)
else:
- return '_' + self.param.get_id_v2()
+ return '_' + self.param.get_id(version)
def __unicode__(self):
# type: () -> unicode
@@ -898,12 +1440,13 @@ class ASTTemplateParams(ASTBase):
self.params = params
self.isNested = False # whether it's a template template param
- def get_id_v2(self):
- # type: () -> unicode
+ def get_id(self, version):
+ # type: (int) -> unicode
+ assert version >= 2
res = []
res.append("I")
for param in self.params:
- res.append(param.get_id_v2())
+ res.append(param.get_id(version))
res.append("E")
return ''.join(res)
@@ -947,26 +1490,38 @@ class ASTTemplateIntroductionParameter(ASTBase):
self.identifier = identifier
self.parameterPack = parameterPack
+ @property
+ def name(self):
+ # type: () -> ASTNestedName
+ id = self.get_identifier()
+ return ASTNestedName([ASTNestedNameElement(id, None)], [False], rooted=False)
+
+ @property
+ def isPack(self):
+ return self.parameterPack
+
def get_identifier(self):
# type: () -> unicode
return self.identifier
- def get_id_v2(self, objectType=None, symbol=None):
- # type: (unicode, Symbol) -> unicode
+ def get_id(self, version, objectType=None, symbol=None):
+ # type: (int, unicode, Symbol) -> unicode
+ assert version >= 2
# this is not part of the normal name mangling in C++
if symbol:
# the anchor will be our parent
- return symbol.parent.declaration.get_id_v2(prefixed=None)
+ return symbol.parent.declaration.get_id(version, prefixed=None)
else:
if self.parameterPack:
return 'Dp'
else:
return '0' # we need to put something
- def get_id_v2_as_arg(self):
- # type: () -> unicode
+ def get_id_as_arg(self, version):
+ # type: (int) -> unicode
+ assert version >= 2
# used for the implicit requires clause
- res = self.identifier.get_id_v2()
+ res = self.identifier.get_id(version)
if self.parameterPack:
return u'sp' + res
else:
@@ -984,7 +1539,7 @@ class ASTTemplateIntroductionParameter(ASTBase):
# type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None
if self.parameterPack:
signode += nodes.Text('...')
- self.identifier.describe_signature(signode, mode, env, '', symbol)
+ self.identifier.describe_signature(signode, mode, env, '', '', symbol)
class ASTTemplateIntroduction(ASTBase):
@@ -994,22 +1549,21 @@ class ASTTemplateIntroduction(ASTBase):
self.concept = concept
self.params = params
- # id_v1 does not exist
-
- def get_id_v2(self):
- # type: () -> unicode
+ def get_id(self, version):
+ # type: (int) -> unicode
+ assert version >= 2
# first do the same as a normal template parameter list
res = []
res.append("I")
for param in self.params:
- res.append(param.get_id_v2())
+ res.append(param.get_id(version))
res.append("E")
# let's use X expr E, which is otherwise for constant template args
res.append("X")
- res.append(self.concept.get_id_v2())
+ res.append(self.concept.get_id(version))
res.append("I")
for param in self.params:
- res.append(param.get_id_v2_as_arg())
+ res.append(param.get_id_as_arg(version))
res.append("E")
res.append("E")
return ''.join(res)
@@ -1043,18 +1597,16 @@ class ASTTemplateIntroduction(ASTBase):
class ASTTemplateDeclarationPrefix(ASTBase):
def __init__(self, templates):
# type: (List[Any]) -> None
- assert templates is not None
- assert len(templates) > 0
+ # template is None means it's an explicit instantiation of a variable
self.templates = templates
- # id_v1 does not exist
-
- def get_id_v2(self):
- # type: () -> unicode
+ def get_id(self, version):
+ # type: (int) -> unicode
+ assert version >= 2
# this is not part of a normal name mangling system
res = []
for t in self.templates:
- res.append(t.get_id_v2())
+ res.append(t.get_id(version))
return u''.join(res)
def __unicode__(self):
@@ -1080,19 +1632,16 @@ class ASTOperatorBuildIn(ASTBase):
# type: () -> bool
return True
- def get_id_v1(self):
- # type: () -> unicode
- if self.op not in _id_operator_v1:
- raise Exception('Internal error: Build-in operator "%s" can not '
- 'be mapped to an id.' % self.op)
- return _id_operator_v1[self.op]
-
- def get_id_v2(self):
- # type: () -> unicode
- if self.op not in _id_operator_v2:
+ def get_id(self, version):
+ # type: (int) -> unicode
+ if version == 1:
+ ids = _id_operator_v1
+ else:
+ ids = _id_operator_v2
+ if self.op not in ids:
raise Exception('Internal error: Build-in operator "%s" can not '
'be mapped to an id.' % self.op)
- return _id_operator_v2[self.op]
+ return ids[self.op]
def __unicode__(self):
# type: () -> unicode
@@ -1101,8 +1650,8 @@ class ASTOperatorBuildIn(ASTBase):
else:
return u'operator' + self.op
- def describe_signature(self, signode, mode, env, prefix, symbol):
- # type: (addnodes.desc_signature, unicode, BuildEnvironment, unicode, Symbol) -> None
+ def describe_signature(self, signode, mode, env, prefix, templateArgs, symbol):
+ # type: (addnodes.desc_signature, unicode, Any, unicode, unicode, Symbol) -> None
_verify_description_mode(mode)
identifier = text_type(self)
if mode == 'lastIsName':
@@ -1120,13 +1669,12 @@ class ASTOperatorType(ASTBase):
# type: () -> bool
return True
- def get_id_v1(self):
- # type: () -> unicode
- return u'castto-%s-operator' % self.type.get_id_v1()
-
- def get_id_v2(self):
- # type: () -> unicode
- return u'cv' + self.type.get_id_v2()
+ def get_id(self, version):
+ # type: (int) -> unicode
+ if version == 1:
+ return u'castto-%s-operator' % self.type.get_id(version)
+ else:
+ return u'cv' + self.type.get_id(version)
def __unicode__(self):
# type: () -> unicode
@@ -1136,8 +1684,8 @@ class ASTOperatorType(ASTBase):
# type: () -> unicode
return text_type(self)
- def describe_signature(self, signode, mode, env, prefix, symbol):
- # type: (addnodes.desc_signature, unicode, BuildEnvironment, unicode, Symbol) -> None
+ def describe_signature(self, signode, mode, env, prefix, templateArgs, symbol):
+ # type: (addnodes.desc_signature, unicode, Any, unicode, unicode, Symbol) -> None
_verify_description_mode(mode)
identifier = text_type(self)
if mode == 'lastIsName':
@@ -1155,20 +1703,19 @@ class ASTOperatorLiteral(ASTBase):
# type: () -> bool
return True
- def get_id_v1(self):
- # type: () -> unicode
- raise NoOldIdError()
-
- def get_id_v2(self):
- # type: () -> unicode
- return u'li' + self.identifier.get_id_v2()
+ def get_id(self, version):
+ # type: (int) -> unicode
+ if version == 1:
+ raise NoOldIdError()
+ else:
+ return u'li' + self.identifier.get_id(version)
def __unicode__(self):
# type: () -> unicode
return u'operator""' + text_type(self.identifier)
- def describe_signature(self, signode, mode, env, prefix, symbol):
- # type: (addnodes.desc_signature, unicode, BuildEnvironment, unicode, Symbol) -> None
+ def describe_signature(self, signode, mode, env, prefix, templateArgs, symbol):
+ # type: (addnodes.desc_signature, unicode, Any, unicode, unicode, Symbol) -> None
_verify_description_mode(mode)
identifier = text_type(self)
if mode == 'lastIsName':
@@ -1186,43 +1733,39 @@ class ASTTemplateArgConstant(ASTBase):
# type: () -> unicode
return text_type(self.value)
- def get_id_v1(self):
- # type: () -> unicode
- return text_type(self).replace(u' ', u'-')
-
- def get_id_v2(self):
- # type: () -> unicode
- # TODO: doing this properly needs parsing of expressions, let's just
- # juse it verbatim for now
- return u'X' + text_type(self) + u'E'
+ def get_id(self, version):
+ # type: (int) -> unicode
+ if version == 1:
+ return text_type(self).replace(u' ', u'-')
+ if version == 2:
+ return u'X' + text_type(self) + u'E'
+ return u'X' + self.value.get_id(version) + u'E'
def describe_signature(self, signode, mode, env, symbol):
# type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None
_verify_description_mode(mode)
- signode += nodes.Text(text_type(self))
+ self.value.describe_signature(signode, mode, env, symbol)
class ASTTemplateArgs(ASTBase):
def __init__(self, args):
# type: (List[Any]) -> None
assert args is not None
- assert len(args) > 0
self.args = args
- def get_id_v1(self):
- # type: () -> unicode
- res = [] # type: List[unicode]
- res.append(':')
- res.append(u'.'.join(a.get_id_v1() for a in self.args))
- res.append(':')
- return u''.join(res)
+ def get_id(self, version):
+ # type: (int) -> unicode
+ if version == 1:
+ res = [] # type: List[unicode]
+ res.append(':')
+ res.append(u'.'.join(a.get_id(version) for a in self.args))
+ res.append(':')
+ return u''.join(res)
- def get_id_v2(self):
- # type: () -> unicode
res = []
res.append('I')
for a in self.args:
- res.append(a.get_id_v2())
+ res.append(a.get_id(version))
res.append('E')
return u''.join(res)
@@ -1245,48 +1788,44 @@ class ASTTemplateArgs(ASTBase):
class ASTNestedNameElement(ASTBase):
- def __init__(self, identifier, templateArgs):
+ def __init__(self, identOrOp, templateArgs):
# type: (Any, Any) -> None
- self.identifier = identifier
+ self.identOrOp = identOrOp
self.templateArgs = templateArgs
def is_operator(self):
# type: () -> bool
return False
- def get_id_v1(self):
- # type: () -> unicode
- res = self.identifier.get_id_v1()
- if self.templateArgs:
- res += self.templateArgs.get_id_v1()
- return res
-
- def get_id_v2(self):
- # type: () -> unicode
- res = self.identifier.get_id_v2()
+ def get_id(self, version):
+ # type: (int) -> unicode
+ res = self.identOrOp.get_id(version)
if self.templateArgs:
- res += self.templateArgs.get_id_v2()
+ res += self.templateArgs.get_id(version)
return res
def __unicode__(self):
# type: () -> unicode
- res = text_type(self.identifier)
+ res = text_type(self.identOrOp)
if self.templateArgs:
res += text_type(self.templateArgs)
return res
def describe_signature(self, signode, mode, env, prefix, symbol):
# type: (addnodes.desc_signature, unicode, BuildEnvironment, unicode, Symbol) -> None
- self.identifier.describe_signature(signode, mode, env, prefix, symbol)
- if self.templateArgs:
+ tArgs = text_type(self.templateArgs) if self.templateArgs is not None else ''
+ self.identOrOp.describe_signature(signode, mode, env, prefix, tArgs, symbol)
+ if self.templateArgs is not None:
self.templateArgs.describe_signature(signode, mode, env, symbol)
class ASTNestedName(ASTBase):
- def __init__(self, names, rooted):
- # type: (List[Any], bool) -> None
+ def __init__(self, names, templates, rooted):
+ # type: (List[Any], List[bool], bool) -> None
assert len(names) > 0
self.names = names
+ self.templates = templates
+ assert len(self.names) == len(self.templates)
self.rooted = rooted
@property
@@ -1304,22 +1843,20 @@ class ASTNestedName(ASTBase):
count += 1
return count
- def get_id_v1(self):
- # type: () -> unicode
- tt = text_type(self)
- if tt in _id_shorthands_v1:
- return _id_shorthands_v1[tt]
- else:
- return u'::'.join(n.get_id_v1() for n in self.names)
-
- def get_id_v2(self, modifiers=""):
- # type: (unicode) -> unicode
+ def get_id(self, version, modifiers=''):
+ # type: (int, unicode) -> unicode
+ if version == 1:
+ tt = text_type(self)
+ if tt in _id_shorthands_v1:
+ return _id_shorthands_v1[tt]
+ else:
+ return u'::'.join(n.get_id(version) for n in self.names)
res = [] # type: List[unicode]
if len(self.names) > 1 or len(modifiers) > 0:
res.append('N')
res.append(modifiers)
for n in self.names:
- res.append(n.get_id_v2())
+ res.append(n.get_id(version))
if len(self.names) > 1 or len(modifiers) > 0:
res.append('E')
return u''.join(res)
@@ -1329,44 +1866,62 @@ class ASTNestedName(ASTBase):
res = [] # type: List[unicode]
if self.rooted:
res.append('')
- for n in self.names:
- res.append(text_type(n))
+ for i in range(len(self.names)):
+ n = self.names[i]
+ t = self.templates[i]
+ if t:
+ res.append("template " + text_type(n))
+ else:
+ res.append(text_type(n))
return '::'.join(res)
def describe_signature(self, signode, mode, env, symbol):
# type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None
_verify_description_mode(mode)
# just print the name part, with template args, not template params
- if mode == 'lastIsName':
- addname = [] # type: List[unicode]
- if self.rooted:
- addname.append('')
- for n in self.names[:-1]:
- addname.append(text_type(n))
- addname = '::'.join(addname) # type: ignore
- if len(self.names) > 1:
- addname += '::'
- signode += addnodes.desc_addname(addname, addname)
- self.names[-1].describe_signature(signode, mode, env, '', symbol)
- elif mode == 'noneIsName':
+ if mode == 'noneIsName':
signode += nodes.Text(text_type(self))
elif mode == 'param':
name = text_type(self)
signode += nodes.emphasis(name, name)
- elif mode == 'markType':
- # each element should be a pending xref targeting the complete
+ elif mode == 'markType' or mode == 'lastIsName':
+ # Each element should be a pending xref targeting the complete
# prefix. however, only the identifier part should be a link, such
# that template args can be a link as well.
+ # For 'lastIsName' we should also prepend template parameter lists.
+ templateParams = [] # type: List[Any]
+ if mode == 'lastIsName':
+ assert symbol is not None
+ if symbol.declaration.templatePrefix is not None:
+ templateParams = symbol.declaration.templatePrefix.templates
+ iTemplateParams = 0
+ templateParamsPrefix = u''
prefix = '' # type: unicode
first = True
- for name in self.names:
+ names = self.names[:-1] if mode == 'lastIsName' else self.names
+ for i in range(len(names)):
+ name = names[i]
+ template = self.templates[i]
if not first:
signode += nodes.Text('::')
prefix += '::'
+ if template:
+ signode += nodes.Text("template ")
first = False
if name != '':
- name.describe_signature(signode, mode, env, prefix, symbol) # type: ignore
+ if (name.templateArgs and # type: ignore
+ iTemplateParams < len(templateParams)):
+ templateParamsPrefix += text_type(templateParams[iTemplateParams])
+ iTemplateParams += 1
+ name.describe_signature(signode, 'markType', # type: ignore
+ env, templateParamsPrefix + prefix, symbol)
prefix += text_type(name)
+ if mode == 'lastIsName':
+ if len(self.names) > 1:
+ signode += addnodes.desc_addname('::', '::')
+ if self.templates[-1]:
+ signode += nodes.Text("template ")
+ self.names[-1].describe_signature(signode, mode, env, '', symbol)
else:
raise Exception('Unknown description mode: %s' % mode)
@@ -1380,18 +1935,17 @@ class ASTTrailingTypeSpecFundamental(ASTBase):
# type: () -> unicode
return self.name
- def get_id_v1(self):
- # type: () -> unicode
- res = []
- for a in self.name.split(' '):
- if a in _id_fundamental_v1:
- res.append(_id_fundamental_v1[a])
- else:
- res.append(a)
- return u'-'.join(res)
+ def get_id(self, version):
+ # type: (int) -> unicode
+ if version == 1:
+ res = []
+ for a in self.name.split(' '):
+ if a in _id_fundamental_v1:
+ res.append(_id_fundamental_v1[a])
+ else:
+ res.append(a)
+ return u'-'.join(res)
- def get_id_v2(self):
- # type: () -> unicode
if self.name not in _id_fundamental_v2:
raise Exception(
'Semi-internal error: Fundamental type "%s" can not be mapped '
@@ -1415,13 +1969,9 @@ class ASTTrailingTypeSpecName(ASTBase):
# type: () -> Any
return self.nestedName
- def get_id_v1(self):
- # type: () -> unicode
- return self.nestedName.get_id_v1()
-
- def get_id_v2(self):
- # type: () -> unicode
- return self.nestedName.get_id_v2()
+ def get_id(self, version):
+ # type: (int) -> unicode
+ return self.nestedName.get_id(version)
def __unicode__(self):
# type: () -> unicode
@@ -1440,25 +1990,55 @@ class ASTTrailingTypeSpecName(ASTBase):
self.nestedName.describe_signature(signode, mode, env, symbol=symbol)
+class ASTTrailingTypeSpecDecltypeAuto(ASTBase):
+ def __unicode__(self):
+ return u'decltype(auto)'
+
+ def get_id(self, version):
+ if version == 1:
+ raise NoOldIdError()
+ return 'Dc'
+
+ def describe_signature(self, signode, mode, env, symbol):
+ # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None
+ signode.append(nodes.Text(text_type(self)))
+
+
+class ASTTrailingTypeSpecDecltype(ASTBase):
+ def __init__(self, expr):
+ self.expr = expr
+
+ def __unicode__(self):
+ return u'decltype(' + text_type(self.expr) + ')'
+
+ def get_id(self, version):
+ if version == 1:
+ raise NoOldIdError()
+ return 'DT' + self.expr.get_id(version) + "E"
+
+ def describe_signature(self, signode, mode, env, symbol):
+ signode.append(nodes.Text('decltype('))
+ self.expr.describe_signature(signode, mode, env, symbol)
+ signode.append(nodes.Text(')'))
+
+
class ASTFunctionParameter(ASTBase):
def __init__(self, arg, ellipsis=False):
# type: (Any, bool) -> None
self.arg = arg
self.ellipsis = ellipsis
- def get_id_v1(self):
- # type: () -> unicode
- if self.ellipsis:
- return 'z'
- else:
- return self.arg.get_id_v1()
-
- def get_id_v2(self):
- # type: () -> unicode
+ def get_id(self, version, objectType=None, symbol=None):
+ # type: (int, unicode, Symbol) -> unicode
+ # this is not part of the normal name mangling in C++
+ if symbol:
+ # the anchor will be our parent
+ return symbol.parent.declaration.get_id(version, prefixed=None)
+ # else, do the usual
if self.ellipsis:
return 'z'
else:
- return self.arg.get_id_v2()
+ return self.arg.get_id(version)
def __unicode__(self):
# type: () -> unicode
@@ -1489,49 +2069,38 @@ class ASTParametersQualifiers(ASTBase):
self.final = final
self.initializer = initializer
- # Id v1 ------------------------------------------------------------------
-
- def get_modifiers_id_v1(self):
- # type: () -> unicode
- res = []
- if self.volatile:
- res.append('V')
- if self.const:
- res.append('C')
- if self.refQual == '&&':
- res.append('O')
- elif self.refQual == '&':
- res.append('R')
- return u''.join(res)
-
- def get_param_id_v1(self):
- # type: () -> unicode
- if len(self.args) == 0:
- return ''
- else:
- return u'__' + u'.'.join(a.get_id_v1() for a in self.args)
-
- # Id v2 ------------------------------------------------------------------
+ @property
+ def function_params(self):
+ # type: () -> Any
+ return self.args
- def get_modifiers_id_v2(self):
- # type: () -> unicode
+ def get_modifiers_id(self, version):
+ # type: (int) -> unicode
res = []
if self.volatile:
res.append('V')
if self.const:
- res.append('K')
+ if version == 1:
+ res.append('C')
+ else:
+ res.append('K')
if self.refQual == '&&':
res.append('O')
elif self.refQual == '&':
res.append('R')
return u''.join(res)
- def get_param_id_v2(self):
- # type: () -> unicode
+ def get_param_id(self, version):
+ # type: (int) -> unicode
+ if version == 1:
+ if len(self.args) == 0:
+ return ''
+ else:
+ return u'__' + u'.'.join(a.get_id(version) for a in self.args)
if len(self.args) == 0:
return 'v'
else:
- return u''.join(a.get_id_v2() for a in self.args)
+ return u''.join(a.get_id(version) for a in self.args)
def __unicode__(self):
# type: () -> unicode
@@ -1698,24 +2267,22 @@ class ASTDeclSpecs(ASTBase):
# type: () -> unicode
return self.trailingTypeSpec.name
- def get_id_v1(self):
- # type: () -> unicode
- res = []
- res.append(self.trailingTypeSpec.get_id_v1())
- if self.allSpecs.volatile:
- res.append('V')
- if self.allSpecs.const:
- res.append('C')
- return u''.join(res)
-
- def get_id_v2(self):
- # type: () -> unicode
+ def get_id(self, version):
+ # type: (int) -> unicode
+ if version == 1:
+ res = []
+ res.append(self.trailingTypeSpec.get_id(version))
+ if self.allSpecs.volatile:
+ res.append('V')
+ if self.allSpecs.const:
+ res.append('C')
+ return u''.join(res)
res = []
if self.leftSpecs.volatile or self.rightSpecs.volatile:
res.append('V')
if self.leftSpecs.const or self.rightSpecs.volatile:
res.append('K')
- res.append(self.trailingTypeSpec.get_id_v2())
+ res.append(self.trailingTypeSpec.get_id(version))
return u''.join(res)
def __unicode__(self):
@@ -1766,25 +2333,35 @@ class ASTDeclSpecs(ASTBase):
class ASTArray(ASTBase):
def __init__(self, size):
- # type: (unicode) -> None
self.size = size
def __unicode__(self):
# type: () -> unicode
- return u''.join(['[', text_type(self.size), ']'])
-
- def get_id_v1(self):
- # type: () -> unicode
- return u'A'
+ if self.size:
+ return u''.join(['[', text_type(self.size), ']'])
+ else:
+ return u'[]'
- def get_id_v2(self):
- # type: () -> unicode
- # TODO: this should maybe be done differently
- return u'A' + text_type(self.size) + u'_'
+ def get_id(self, version):
+ # type: (int) -> unicode
+ if version == 1:
+ return u'A'
+ if version == 2:
+ if self.size:
+ return u'A' + text_type(self.size) + u'_'
+ else:
+ return u'A_'
+ if self.size:
+ return u'A' + self.size.get_id(version) + u'_'
+ else:
+ return u'A_'
- def describe_signature(self, signode, mode, env):
+ def describe_signature(self, signode, mode, env, symbol):
_verify_description_mode(mode)
- signode += nodes.Text(text_type(self))
+ signode.append(nodes.Text("["))
+ if self.size:
+ self.size.describe_signature(signode, mode, env, symbol)
+ signode.append(nodes.Text("]"))
class ASTDeclaratorPtr(ASTBase):
@@ -1800,6 +2377,11 @@ class ASTDeclaratorPtr(ASTBase):
# type: () -> unicode
return self.next.name
+ @property
+ def function_params(self):
+ # type: () -> Any
+ return self.next.function_params
+
def require_space_after_declSpecs(self):
# type: () -> bool
# TODO: if has paramPack, then False ?
@@ -1820,38 +2402,26 @@ class ASTDeclaratorPtr(ASTBase):
res.append(text_type(self.next))
return u''.join(res)
- # Id v1 ------------------------------------------------------------------
-
- def get_modifiers_id_v1(self):
- # type: () -> unicode
- return self.next.get_modifiers_id_v1()
-
- def get_param_id_v1(self):
- # type: () -> unicode
- return self.next.get_param_id_v1()
-
- def get_ptr_suffix_id_v1(self):
- # type: () -> unicode
- res = 'P'
- if self.volatile:
- res += 'V'
- if self.const:
- res += 'C'
- return res + self.next.get_ptr_suffix_id_v1()
-
- # Id v2 ------------------------------------------------------------------
+ def get_modifiers_id(self, version):
+ # type: (int) -> unicode
+ return self.next.get_modifiers_id(version)
- def get_modifiers_id_v2(self):
- # type: () -> unicode
- return self.next.get_modifiers_id_v2()
+ def get_param_id(self, version):
+ # type: (int) -> unicode
+ return self.next.get_param_id(version)
- def get_param_id_v2(self):
- # type: () -> unicode
- return self.next.get_param_id_v2()
+ def get_ptr_suffix_id(self, version):
+ # type: (int) -> unicode
+ if version == 1:
+ res = ['P']
+ if self.volatile:
+ res.append('V')
+ if self.const:
+ res.append('C')
+ res.append(self.next.get_ptr_suffix_id(version))
+ return u''.join(res)
- def get_ptr_suffix_id_v2(self):
- # type: () -> unicode
- res = [self.next.get_ptr_suffix_id_v2()] # type: List[unicode]
+ res = [self.next.get_ptr_suffix_id(version)]
res.append('P')
if self.volatile:
res.append('V')
@@ -1859,8 +2429,8 @@ class ASTDeclaratorPtr(ASTBase):
res.append('C')
return u''.join(res)
- def get_type_id_v2(self, returnTypeId):
- # type: (unicode) -> unicode
+ def get_type_id(self, version, returnTypeId):
+ # type: (int, unicode) -> unicode
# ReturnType *next, so we are part of the return type of 'next
res = ['P'] # type: List[unicode]
if self.volatile:
@@ -1868,9 +2438,7 @@ class ASTDeclaratorPtr(ASTBase):
if self.const:
res.append('C')
res.append(returnTypeId)
- return self.next.get_type_id_v2(returnTypeId=u''.join(res))
-
- # ------------------------------------------------------------------------
+ return self.next.get_type_id(version, returnTypeId=u''.join(res))
def is_function_type(self):
# type: () -> bool
@@ -1906,6 +2474,11 @@ class ASTDeclaratorRef(ASTBase):
# type: () -> unicode
return self.next.name
+ @property
+ def function_params(self):
+ # type: () -> Any
+ return self.next.function_params
+
def require_space_after_declSpecs(self):
# type: () -> bool
return self.next.require_space_after_declSpecs()
@@ -1914,40 +2487,26 @@ class ASTDeclaratorRef(ASTBase):
# type: () -> unicode
return '&' + text_type(self.next)
- # Id v1 ------------------------------------------------------------------
-
- def get_modifiers_id_v1(self):
- # type: () -> unicode
- return self.next.get_modifiers_id_v1()
-
- def get_param_id_v1(self): # only the parameters (if any)
- # type: () -> unicode
- return self.next.get_param_id_v1()
-
- def get_ptr_suffix_id_v1(self):
- # type: () -> unicode
- return u'R' + self.next.get_ptr_suffix_id_v1()
-
- # Id v2 ------------------------------------------------------------------
-
- def get_modifiers_id_v2(self):
- # type: () -> unicode
- return self.next.get_modifiers_id_v2()
+ def get_modifiers_id(self, version):
+ # type: (int) -> unicode
+ return self.next.get_modifiers_id(version)
- def get_param_id_v2(self): # only the parameters (if any)
- # type: () -> unicode
- return self.next.get_param_id_v2()
+ def get_param_id(self, version): # only the parameters (if any)
+ # type: (int) -> unicode
+ return self.next.get_param_id(version)
- def get_ptr_suffix_id_v2(self):
- # type: () -> unicode
- return self.next.get_ptr_suffix_id_v2() + u'R'
+ def get_ptr_suffix_id(self, version):
+ # type: (int) -> unicode
+ if version == 1:
+ return u'R' + self.next.get_ptr_suffix_id(version)
+ else:
+ return self.next.get_ptr_suffix_id(version) + u'R'
- def get_type_id_v2(self, returnTypeId):
- # type: (unicode) -> unicode
+ def get_type_id(self, version, returnTypeId):
+ # type: (int, unicode) -> unicode
+ assert version >= 2
# ReturnType &next, so we are part of the return type of 'next
- return self.next.get_type_id_v2(returnTypeId=u'R' + returnTypeId)
-
- # ------------------------------------------------------------------------
+ return self.next.get_type_id(version, returnTypeId=u'R' + returnTypeId)
def is_function_type(self):
# type: () -> bool
@@ -1971,6 +2530,11 @@ class ASTDeclaratorParamPack(ASTBase):
# type: () -> unicode
return self.next.name
+ @property
+ def function_params(self):
+ # type: () -> Any
+ return self.next.function_params
+
def require_space_after_declSpecs(self):
# type: () -> bool
return False
@@ -1982,39 +2546,26 @@ class ASTDeclaratorParamPack(ASTBase):
res = ' ' + res
return '...' + res
- # Id v1 ------------------------------------------------------------------
-
- def get_modifiers_id_v1(self):
- # type: () -> unicode
- return self.next.get_modifiers_id_v1()
-
- def get_param_id_v1(self): # only the parameters (if any)
- # type: () -> unicode
- return self.next.get_param_id_v1()
-
- def get_ptr_suffix_id_v1(self):
- # type: () -> unicode
- return 'Dp' + self.next.get_ptr_suffix_id_v2()
-
- # Id v2 ------------------------------------------------------------------
-
- def get_modifiers_id_v2(self):
- # type: () -> unicode
- return self.next.get_modifiers_id_v2()
+ def get_modifiers_id(self, version):
+ # type: (int) -> unicode
+ return self.next.get_modifiers_id(version)
- def get_param_id_v2(self): # only the parameters (if any)
- return self.next.get_param_id_v2()
+ def get_param_id(self, version): # only the parameters (if any)
+ # type: (int) -> unicode
+ return self.next.get_param_id(version)
- def get_ptr_suffix_id_v2(self):
- # type: () -> unicode
- return self.next.get_ptr_suffix_id_v2() + u'Dp'
+ def get_ptr_suffix_id(self, version):
+ # type: (int) -> unicode
+ if version == 1:
+ return 'Dp' + self.next.get_ptr_suffix_id(version)
+ else:
+ return self.next.get_ptr_suffix_id(version) + u'Dp'
- def get_type_id_v2(self, returnTypeId):
- # type: (unicode) -> unicode
+ def get_type_id(self, version, returnTypeId):
+ # type: (int, unicode) -> unicode
+ assert version >= 2
# ReturnType... next, so we are part of the return type of 'next
- return self.next.get_type_id_v2(returnTypeId=u'Dp' + returnTypeId)
-
- # ------------------------------------------------------------------------
+ return self.next.get_type_id(version, returnTypeId=u'Dp' + returnTypeId)
def is_function_type(self):
# type: () -> bool
@@ -2044,6 +2595,11 @@ class ASTDeclaratorMemPtr(ASTBase):
# type: () -> unicode
return self.next.name
+ @property
+ def function_params(self):
+ # type: () -> Any
+ return self.next.function_params
+
def require_space_after_declSpecs(self):
# type: () -> bool
return True
@@ -2062,37 +2618,31 @@ class ASTDeclaratorMemPtr(ASTBase):
res.append(text_type(self.next))
return ''.join(res)
- # Id v1 ------------------------------------------------------------------
-
- def get_modifiers_id_v1(self):
- # type: () -> unicode
- raise NoOldIdError()
-
- def get_param_id_v1(self): # only the parameters (if any)
- # type: () -> unicode
- raise NoOldIdError()
-
- def get_ptr_suffix_id_v1(self):
- # type: () -> unicode
- raise NoOldIdError()
-
- # Id v2 ------------------------------------------------------------------
-
- def get_modifiers_id_v2(self):
- # type: () -> unicode
- return self.next.get_modifiers_id_v2()
+ def get_modifiers_id(self, version):
+ # type: (int) -> unicode
+ if version == 1:
+ raise NoOldIdError()
+ else:
+ return self.next.get_modifiers_id(version)
- def get_param_id_v2(self): # only the parameters (if any)
- # type: () -> unicode
- return self.next.get_param_id_v2()
+ def get_param_id(self, version): # only the parameters (if any)
+ # type: (int) -> unicode
+ if version == 1:
+ raise NoOldIdError()
+ else:
+ return self.next.get_param_id(version)
- def get_ptr_suffix_id_v2(self):
- # type: () -> unicode
- raise NotImplementedError()
- return self.next.get_ptr_suffix_id_v2() + u'Dp'
+ def get_ptr_suffix_id(self, version):
+ # type: (int) -> unicode
+ if version == 1:
+ raise NoOldIdError()
+ else:
+ raise NotImplementedError()
+ return self.next.get_ptr_suffix_id(version) + u'Dp'
- def get_type_id_v2(self, returnTypeId):
- # type: (unicode) -> unicode
+ def get_type_id(self, version, returnTypeId):
+ # type: (int, unicode) -> unicode
+ assert version >= 2
# ReturnType name::* next, so we are part of the return type of next
nextReturnTypeId = '' # type: unicode
if self.volatile:
@@ -2100,11 +2650,9 @@ class ASTDeclaratorMemPtr(ASTBase):
if self.const:
nextReturnTypeId += 'K'
nextReturnTypeId += 'M'
- nextReturnTypeId += self.className.get_id_v2()
+ nextReturnTypeId += self.className.get_id(version)
nextReturnTypeId += returnTypeId
- return self.next.get_type_id_v2(nextReturnTypeId)
-
- # ------------------------------------------------------------------------
+ return self.next.get_type_id(version, nextReturnTypeId)
def is_function_type(self):
# type: () -> bool
@@ -2144,6 +2692,11 @@ class ASTDeclaratorParen(ASTBase):
# type: () -> unicode
return self.inner.name
+ @property
+ def function_params(self):
+ # type: () -> Any
+ return self.inner.function_params
+
def require_space_after_declSpecs(self):
# type: () -> bool
return True
@@ -2156,44 +2709,30 @@ class ASTDeclaratorParen(ASTBase):
res.append(text_type(self.next))
return ''.join(res)
- # Id v1 ------------------------------------------------------------------
-
- def get_modifiers_id_v1(self):
- # type: () -> unicode
- return self.inner.get_modifiers_id_v1()
-
- def get_param_id_v1(self): # only the parameters (if any)
- # type: () -> unicode
- return self.inner.get_param_id_v1()
-
- def get_ptr_suffix_id_v1(self):
- # type: () -> unicode
- raise NoOldIdError() # TODO: was this implemented before?
- return self.next.get_ptr_suffix_id_v2() + \
- self.inner.get_ptr_suffix_id_v2()
-
- # Id v2 ------------------------------------------------------------------
-
- def get_modifiers_id_v2(self):
- # type: () -> unicode
- return self.inner.get_modifiers_id_v2()
+ def get_modifiers_id(self, version):
+ # type: (int) -> unicode
+ return self.inner.get_modifiers_id(version)
- def get_param_id_v2(self): # only the parameters (if any)
- # type: () -> unicode
- return self.inner.get_param_id_v2()
+ def get_param_id(self, version): # only the parameters (if any)
+ # type: (int) -> unicode
+ return self.inner.get_param_id(version)
- def get_ptr_suffix_id_v2(self):
- # type: () -> unicode
- return self.inner.get_ptr_suffix_id_v2() + \
- self.next.get_ptr_suffix_id_v2()
+ def get_ptr_suffix_id(self, version):
+ # type: (int) -> unicode
+ if version == 1:
+ raise NoOldIdError() # TODO: was this implemented before?
+ return self.next.get_ptr_suffix_id(version) + \
+ self.inner.get_ptr_suffix_id(version)
+ else:
+ return self.inner.get_ptr_suffix_id(version) + \
+ self.next.get_ptr_suffix_id(version)
- def get_type_id_v2(self, returnTypeId):
- # type: (unicode) -> unicode
+ def get_type_id(self, version, returnTypeId):
+ # type: (int, unicode) -> unicode
+ assert version >= 2
# ReturnType (inner)next, so 'inner' returns everything outside
- nextId = self.next.get_type_id_v2(returnTypeId)
- return self.inner.get_type_id_v2(returnTypeId=nextId)
-
- # ------------------------------------------------------------------------
+ nextId = self.next.get_type_id(version, returnTypeId)
+ return self.inner.get_type_id(version, returnTypeId=nextId)
def is_function_type(self):
# type: () -> bool
@@ -2220,58 +2759,41 @@ class ASTDeclaratorNameParamQual(ASTBase):
# type: () -> unicode
return self.declId
- # Id v1 ------------------------------------------------------------------
-
- def get_modifiers_id_v1(self): # only the modifiers for a function, e.g.,
- # type: () -> unicode
- # cv-qualifiers
- if self.paramQual:
- return self.paramQual.get_modifiers_id_v1()
- raise Exception(
- "This should only be called on a function: %s" % text_type(self))
-
- def get_param_id_v1(self): # only the parameters (if any)
- # type: () -> unicode
- if self.paramQual:
- return self.paramQual.get_param_id_v1()
- else:
- return ''
-
- def get_ptr_suffix_id_v1(self): # only the array specifiers
- # type: () -> unicode
- return u''.join(a.get_id_v1() for a in self.arrayOps)
-
- # Id v2 ------------------------------------------------------------------
+ @property
+ def function_params(self):
+ # type: () -> Any
+ return self.paramQual.function_params
- def get_modifiers_id_v2(self): # only the modifiers for a function, e.g.,
- # type: () -> unicode
+ def get_modifiers_id(self, version): # only the modifiers for a function, e.g.,
+ # type: (int) -> unicode
# cv-qualifiers
if self.paramQual:
- return self.paramQual.get_modifiers_id_v2()
+ return self.paramQual.get_modifiers_id(version)
raise Exception(
"This should only be called on a function: %s" % text_type(self))
- def get_param_id_v2(self): # only the parameters (if any)
- # type: () -> unicode
+ def get_param_id(self, version): # only the parameters (if any)
+ # type: (int) -> unicode
if self.paramQual:
- return self.paramQual.get_param_id_v2()
+ return self.paramQual.get_param_id(version)
else:
return ''
- def get_ptr_suffix_id_v2(self): # only the array specifiers
- # type: () -> unicode
- return u''.join(a.get_id_v2() for a in self.arrayOps)
+ def get_ptr_suffix_id(self, version): # only the array specifiers
+ # type: (int) -> unicode
+ return u''.join(a.get_id(version) for a in self.arrayOps)
- def get_type_id_v2(self, returnTypeId):
- # type: (unicode) -> unicode
+ def get_type_id(self, version, returnTypeId):
+ # type: (int, unicode) -> unicode
+ assert version >= 2
res = []
# TOOD: can we actually have both array ops and paramQual?
- res.append(self.get_ptr_suffix_id_v2())
+ res.append(self.get_ptr_suffix_id(version))
if self.paramQual:
- res.append(self.get_modifiers_id_v2())
+ res.append(self.get_modifiers_id(version))
res.append('F')
res.append(returnTypeId)
- res.append(self.get_param_id_v2())
+ res.append(self.get_param_id(version))
res.append('E')
else:
res.append(returnTypeId)
@@ -2304,24 +2826,24 @@ class ASTDeclaratorNameParamQual(ASTBase):
if self.declId:
self.declId.describe_signature(signode, mode, env, symbol)
for op in self.arrayOps:
- op.describe_signature(signode, mode, env)
+ op.describe_signature(signode, mode, env, symbol)
if self.paramQual:
self.paramQual.describe_signature(signode, mode, env, symbol)
class ASTInitializer(ASTBase):
def __init__(self, value):
- # type: (unicode) -> None
self.value = value
def __unicode__(self):
# type: () -> unicode
return u''.join([' = ', text_type(self.value)])
- def describe_signature(self, signode, mode):
- # type: (addnodes.desc_signature, unicode) -> None
+ def describe_signature(self, signode, mode, env, symbol):
+ # type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None
_verify_description_mode(mode)
- signode += nodes.Text(text_type(self))
+ signode.append(nodes.Text(' = '))
+ self.value.describe_signature(signode, 'markType', env, symbol)
class ASTType(ASTBase):
@@ -2338,49 +2860,53 @@ class ASTType(ASTBase):
name = self.decl.name
return name
- def get_id_v1(self, objectType=None, symbol=None):
- # type: (unicode, Symbol) -> unicode
- res = []
- if objectType: # needs the name
- if objectType == 'function': # also modifiers
- res.append(symbol.get_full_nested_name().get_id_v1())
- res.append(self.decl.get_param_id_v1())
- res.append(self.decl.get_modifiers_id_v1())
- if (self.declSpecs.leftSpecs.constexpr or
- (self.declSpecs.rightSpecs and
- self.declSpecs.rightSpecs.constexpr)):
- res.append('CE')
- elif objectType == 'type': # just the name
- res.append(symbol.get_full_nested_name().get_id_v1())
- else:
- print(objectType)
- assert False
- else: # only type encoding
- if self.decl.is_function_type():
- raise NoOldIdError()
- res.append(self.declSpecs.get_id_v1())
- res.append(self.decl.get_ptr_suffix_id_v1())
- res.append(self.decl.get_param_id_v1())
- return u''.join(res)
+ @property
+ def function_params(self):
+ # type: () -> Any
+ return self.decl.function_params
- def get_id_v2(self, objectType=None, symbol=None):
- # type: (unicode, Symbol) -> unicode
+ def get_id(self, version, objectType=None, symbol=None):
+ # type: (int, unicode, Symbol) -> unicode
+ if version == 1:
+ res = []
+ if objectType: # needs the name
+ if objectType == 'function': # also modifiers
+ res.append(symbol.get_full_nested_name().get_id(version))
+ res.append(self.decl.get_param_id(version))
+ res.append(self.decl.get_modifiers_id(version))
+ if (self.declSpecs.leftSpecs.constexpr or
+ (self.declSpecs.rightSpecs and
+ self.declSpecs.rightSpecs.constexpr)):
+ res.append('CE')
+ elif objectType == 'type': # just the name
+ res.append(symbol.get_full_nested_name().get_id(version))
+ else:
+ print(objectType)
+ assert False
+ else: # only type encoding
+ if self.decl.is_function_type():
+ raise NoOldIdError()
+ res.append(self.declSpecs.get_id(version))
+ res.append(self.decl.get_ptr_suffix_id(version))
+ res.append(self.decl.get_param_id(version))
+ return u''.join(res)
+ # other versions
res = []
if objectType: # needs the name
if objectType == 'function': # also modifiers
- modifiers = self.decl.get_modifiers_id_v2()
- res.append(symbol.get_full_nested_name().get_id_v2(modifiers))
- res.append(self.decl.get_param_id_v2())
+ modifiers = self.decl.get_modifiers_id(version)
+ res.append(symbol.get_full_nested_name().get_id(version, modifiers))
+ res.append(self.decl.get_param_id(version))
elif objectType == 'type': # just the name
- res.append(symbol.get_full_nested_name().get_id_v2())
+ res.append(symbol.get_full_nested_name().get_id(version))
else:
print(objectType)
assert False
else: # only type encoding
# the 'returnType' of a non-function type is simply just the last
# type, i.e., for 'int*' it is 'int'
- returnTypeId = self.declSpecs.get_id_v2()
- typeId = self.decl.get_type_id_v2(returnTypeId)
+ returnTypeId = self.declSpecs.get_id(version)
+ typeId = self.decl.get_type_id(version, returnTypeId)
res.append(typeId)
return u''.join(res)
@@ -2426,20 +2952,14 @@ class ASTTypeWithInit(ASTBase):
# type: () -> unicode
return self.type.name
- def get_id_v1(self, objectType=None, symbol=None):
- # type: (unicode, Symbol) -> unicode
- if objectType == 'member':
- return symbol.get_full_nested_name().get_id_v1() + u'__' \
- + self.type.get_id_v1()
- else:
- return self.type.get_id_v1(objectType)
-
- def get_id_v2(self, objectType=None, symbol=None):
- # type: (unicode, Symbol) -> unicode
- if objectType == 'member':
- return symbol.get_full_nested_name().get_id_v2()
- else:
- return self.type.get_id_v2()
+ def get_id(self, version, objectType=None, symbol=None):
+ # type: (int, unicode, Symbol) -> unicode
+ if objectType != 'member':
+ return self.type.get_id(version, objectType)
+ if version == 1:
+ return symbol.get_full_nested_name().get_id(version) + u'__' \
+ + self.type.get_id(version)
+ return symbol.get_full_nested_name().get_id(version)
def __unicode__(self):
# type: () -> unicode
@@ -2452,9 +2972,9 @@ class ASTTypeWithInit(ASTBase):
def describe_signature(self, signode, mode, env, symbol):
# type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None
_verify_description_mode(mode)
- self.type.describe_signature(signode, mode, env, symbol=symbol)
+ self.type.describe_signature(signode, mode, env, symbol)
if self.init:
- self.init.describe_signature(signode, mode)
+ self.init.describe_signature(signode, mode, env, symbol)
class ASTTypeUsing(ASTBase):
@@ -2463,13 +2983,11 @@ class ASTTypeUsing(ASTBase):
self.name = name
self.type = type
- def get_id_v1(self, objectType=None, symbol=None):
- # type: (unicode, Symbol) -> unicode
- raise NoOldIdError()
-
- def get_id_v2(self, objectType=None, symbol=None):
- # type: (unicode, Symbol) -> unicode
- return symbol.get_full_nested_name().get_id_v2()
+ def get_id(self, version, objectType=None, symbol=None):
+ # type: (int, unicode, Symbol) -> unicode
+ if version == 1:
+ raise NoOldIdError()
+ return symbol.get_full_nested_name().get_id(version)
def __unicode__(self):
# type: () -> unicode
@@ -2494,10 +3012,9 @@ class ASTTypeUsing(ASTBase):
class ASTConcept(ASTBase):
- def __init__(self, nestedName, isFunction, initializer):
- # type: (Any, bool, Any) -> None
+ def __init__(self, nestedName, initializer):
+ # type: (Any, Any) -> None
self.nestedName = nestedName
- self.isFunction = isFunction # otherwise it's a variable concept
self.initializer = initializer
@property
@@ -2505,31 +3022,24 @@ class ASTConcept(ASTBase):
# type: () -> unicode
return self.nestedName
- def get_id_v1(self, objectType=None, symbol=None):
- # type: (unicode, Symbol) -> unicode
- raise NoOldIdError()
-
- def get_id_v2(self, objectType, symbol): # type: ignore
- # type: (unicode, Symbol) -> unicode
- return symbol.get_full_nested_name().get_id_v2()
+ def get_id(self, version, objectType=None, symbol=None):
+ # type: (int, unicode, Symbol) -> unicode
+ if version == 1:
+ raise NoOldIdError()
+ return symbol.get_full_nested_name().get_id(version)
def __unicode__(self):
# type: () -> unicode
res = text_type(self.nestedName)
- if self.isFunction:
- res += "()"
if self.initializer:
res += text_type(self.initializer)
return res
def describe_signature(self, signode, mode, env, symbol):
# type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None
- signode += nodes.Text(text_type("bool "))
self.nestedName.describe_signature(signode, mode, env, symbol)
- if self.isFunction:
- signode += nodes.Text("()")
if self.initializer:
- self.initializer.describe_signature(signode, mode)
+ self.initializer.describe_signature(signode, mode, env, symbol)
class ASTBaseClass(ASTBase):
@@ -2575,13 +3085,9 @@ class ASTClass(ASTBase):
self.final = final
self.bases = bases
- def get_id_v1(self, objectType, symbol): # type: ignore
- # type: (unicode, Symbol) -> unicode
- return symbol.get_full_nested_name().get_id_v1()
-
- def get_id_v2(self, objectType, symbol): # type: ignore
- # type: (unicode, Symbol) -> unicode
- return symbol.get_full_nested_name().get_id_v2()
+ def get_id(self, version, objectType, symbol):
+ # type: (int, unicode, Symbol) -> unicode
+ return symbol.get_full_nested_name().get_id(version)
def __unicode__(self):
# type: () -> unicode
@@ -2621,13 +3127,11 @@ class ASTEnum(ASTBase):
self.scoped = scoped
self.underlyingType = underlyingType
- def get_id_v1(self, objectType, symbol): # type: ignore
- # type: (unicode, Symbol) -> unicode
- raise NoOldIdError()
-
- def get_id_v2(self, objectType, symbol): # type: ignore
- # type: (unicode, Symbol) -> unicode
- return symbol.get_full_nested_name().get_id_v2()
+ def get_id(self, version, objectType, symbol):
+ # type: (int, unicode, Symbol) -> unicode
+ if version == 1:
+ raise NoOldIdError()
+ return symbol.get_full_nested_name().get_id(version)
def __unicode__(self):
# type: () -> unicode
@@ -2658,13 +3162,11 @@ class ASTEnumerator(ASTBase):
self.name = name
self.init = init
- def get_id_v1(self, objectType, symbol): # type: ignore
- # type: (unicode, Symbol) -> unicode
- raise NoOldIdError()
-
- def get_id_v2(self, objectType, symbol): # type: ignore
- # type: (unicode, Symbol) -> unicode
- return symbol.get_full_nested_name().get_id_v2()
+ def get_id(self, version, objectType, symbol):
+ # type: (int, unicode, Symbol) -> unicode
+ if version == 1:
+ raise NoOldIdError()
+ return symbol.get_full_nested_name().get_id(version)
def __unicode__(self):
# type: () -> unicode
@@ -2677,9 +3179,9 @@ class ASTEnumerator(ASTBase):
def describe_signature(self, signode, mode, env, symbol):
# type: (addnodes.desc_signature, unicode, BuildEnvironment, Symbol) -> None
_verify_description_mode(mode)
- self.name.describe_signature(signode, mode, env, symbol=symbol)
+ self.name.describe_signature(signode, mode, env, symbol)
if self.init:
- self.init.describe_signature(signode, 'noneIsName')
+ self.init.describe_signature(signode, 'markType', env, symbol)
class ASTDeclaration(ASTBase):
@@ -2709,30 +3211,36 @@ class ASTDeclaration(ASTBase):
# type: () -> unicode
return self.declaration.name
- def get_id_v1(self):
- # type: () -> unicode
- if self.templatePrefix:
- raise NoOldIdError()
- if self.objectType == 'enumerator' and self.enumeratorScopedSymbol:
- return self.enumeratorScopedSymbol.declaration.get_id_v1()
- return self.declaration.get_id_v1(self.objectType, self.symbol)
+ @property
+ def function_params(self):
+ # type: () -> Any
+ if self.objectType != 'function':
+ return None
+ return self.declaration.function_params
- def get_id_v2(self, prefixed=True):
- # type: (bool) -> unicode
+ def get_id(self, version, prefixed=True):
+ # type: (int, bool) -> unicode
+ if version == 1:
+ if self.templatePrefix:
+ raise NoOldIdError()
+ if self.objectType == 'enumerator' and self.enumeratorScopedSymbol:
+ return self.enumeratorScopedSymbol.declaration.get_id(version)
+ return self.declaration.get_id(version, self.objectType, self.symbol)
+ # version >= 2
if self.objectType == 'enumerator' and self.enumeratorScopedSymbol:
- return self.enumeratorScopedSymbol.declaration.get_id_v2(prefixed)
+ return self.enumeratorScopedSymbol.declaration.get_id(version, prefixed)
if prefixed:
- res = [_id_prefix_v2]
+ res = [_id_prefix[version]]
else:
res = []
if self.templatePrefix:
- res.append(self.templatePrefix.get_id_v2())
- res.append(self.declaration.get_id_v2(self.objectType, self.symbol))
+ res.append(self.templatePrefix.get_id(version))
+ res.append(self.declaration.get_id(version, self.objectType, self.symbol))
return u''.join(res)
def get_newest_id(self):
# type: () -> unicode
- return self.get_id_v2()
+ return self.get_id(_max_id, True)
def __unicode__(self):
# type: () -> unicode
@@ -2748,15 +3256,15 @@ class ASTDeclaration(ASTBase):
def describe_signature(self, signode, mode, env, options):
# type: (addnodes.desc_signature, unicode, BuildEnvironment, Dict) -> None
_verify_description_mode(mode)
+ assert self.symbol
# The caller of the domain added a desc_signature node.
# Always enable multiline:
signode['is_multiline'] = True
# Put each line in a desc_signature_line node.
mainDeclNode = addnodes.desc_signature_line()
mainDeclNode.sphinx_cpp_tagname = 'declarator'
- mainDeclNode['add_permalink'] = True
+ mainDeclNode['add_permalink'] = not self.symbol.isRedeclaration
- assert self.symbol
if self.templatePrefix:
self.templatePrefix.describe_signature(signode, mode, env,
symbol=self.symbol,
@@ -2787,8 +3295,7 @@ class ASTDeclaration(ASTBase):
mainDeclNode += addnodes.desc_annotation('enumerator ', 'enumerator ')
else:
assert False
- self.declaration.describe_signature(mainDeclNode, mode, env,
- symbol=self.symbol)
+ self.declaration.describe_signature(mainDeclNode, mode, env, self.symbol)
class ASTNamespace(ASTBase):
@@ -2803,27 +3310,25 @@ class Symbol(object):
# type: () -> None
if not self.parent:
# parent == None means global scope, so declaration means a parent
- assert not self.identifier
+ assert not self.identOrOp
assert not self.templateParams
assert not self.templateArgs
assert not self.declaration
assert not self.docname
else:
- if not self.identifier:
- # in case it's an operator
- assert self.declaration
if self.declaration:
assert self.docname
- def __init__(self, parent, identifier,
+ def __init__(self, parent, identOrOp,
templateParams, templateArgs, declaration, docname):
# type: (Any, Any, Any, Any, Any, unicode) -> None
self.parent = parent
- self.identifier = identifier
+ self.identOrOp = identOrOp
self.templateParams = templateParams # template<templateParams>
self.templateArgs = templateArgs # identifier<templateArgs>
self.declaration = declaration
self.docname = docname
+ self.isRedeclaration = False
self._assert_invariants()
self.children = [] # type: List[Any]
@@ -2837,14 +3342,30 @@ class Symbol(object):
for p in self.templateParams.params:
if not p.get_identifier():
continue
- # only add a declaration if we our selfs from a declaration
+ # only add a declaration if we our selfs are from a declaration
if declaration:
decl = ASTDeclaration('templateParam', None, None, p)
else:
decl = None
nne = ASTNestedNameElement(p.get_identifier(), None)
- nn = ASTNestedName([nne], rooted=False)
+ nn = ASTNestedName([nne], [False], rooted=False)
self._add_symbols(nn, [], decl, docname)
+ # add symbols for function parameters, if any
+ if declaration is not None and declaration.function_params is not None:
+ for p in declaration.function_params:
+ if p.arg is None:
+ continue
+ nn = p.arg.name
+ if nn is None:
+ continue
+ # (comparing to the template params: we have checked that we are a declaration)
+ decl = ASTDeclaration('functionParam', None, None, p)
+ assert not nn.rooted
+ assert len(nn.names) == 1
+ identOrOp = nn.names[0].identOrOp
+ Symbol(parent=self, identOrOp=identOrOp,
+ templateParams=None, templateArgs=None,
+ declaration=decl, docname=docname)
def _fill_empty(self, declaration, docname):
# type: (Any, unicode) -> None
@@ -2866,12 +3387,7 @@ class Symbol(object):
if sChild.declaration and sChild.docname == docname:
sChild.declaration = None
sChild.docname = None
- # Just remove operators, because there is no identification if
- # they got removed.
- # Don't remove other symbols because they may be used in namespace
- # directives.
- if sChild.identifier or sChild.declaration:
- newChildren.append(sChild)
+ newChildren.append(sChild)
self.children = newChildren
def get_all_symbols(self):
@@ -2883,9 +3399,6 @@ class Symbol(object):
def get_lookup_key(self):
# type: () -> List[Tuple[ASTNestedNameElement, Any]]
- if not self.parent:
- # specialise for the root
- return None
symbols = []
s = self
while s.parent:
@@ -2894,39 +3407,51 @@ class Symbol(object):
symbols.reverse()
key = []
for s in symbols:
- if s.identifier:
- nne = ASTNestedNameElement(s.identifier, s.templateArgs)
- else:
- assert s.declaration
- nne = s.declaration.name.names[-1]
+ nne = ASTNestedNameElement(s.identOrOp, s.templateArgs)
key.append((nne, s.templateParams))
return key
def get_full_nested_name(self):
# type: () -> ASTNestedName
names = []
+ templates = []
for nne, templateParams in self.get_lookup_key():
names.append(nne)
- return ASTNestedName(names, rooted=False)
+ templates.append(False)
+ return ASTNestedName(names, templates, rooted=False)
- def _find_named_symbol(self, identifier, templateParams,
- templateArgs, operator,
+ def _find_named_symbol(self, identOrOp, templateParams, templateArgs,
templateShorthand, matchSelf):
- # type: (Any, Any, Any, Any, Any, bool) -> Symbol
- assert (identifier is None) != (operator is None)
+ # type: (Any, Any, Any, Any, bool) -> Symbol
+
+ def isSpecialization():
+ # the names of the template parameters must be given exactly as args
+ # and params that are packs must in the args be the name expanded
+ if len(templateParams.params) != len(templateArgs.args):
+ return True
+ for i in range(len(templateParams.params)):
+ param = templateParams.params[i]
+ arg = templateArgs.args[i]
+ # TODO: doing this by string manipulation is probably not the most efficient
+ paramName = text_type(param.name)
+ argTxt = text_type(arg)
+ isArgPackExpansion = argTxt.endswith('...')
+ if param.isPack != isArgPackExpansion:
+ return True
+ argName = argTxt[:-3] if isArgPackExpansion else argTxt
+ if paramName != argName:
+ return True
+ return False
+ if templateParams is not None and templateArgs is not None:
+ # If both are given, but it's not a specialization, then do lookup as if
+ # there is no argument list.
+ # For example: template<typename T> int A<T>::var;
+ if not isSpecialization():
+ templateArgs = None
def matches(s):
- if s.identifier != identifier:
+ if s.identOrOp != identOrOp:
return False
- if not s.identifier:
- if not s.declaration:
- return False
- assert operator
- name = s.declaration.name.names[-1]
- if not name.is_operator():
- return False
- if text_type(name) != text_type(operator):
- return False
if (s.templateParams is None) != (templateParams is None):
if templateParams is not None:
# we query with params, they must match params
@@ -2967,10 +3492,7 @@ class Symbol(object):
names = nestedName.names
iTemplateDecl = 0
for name in names[:-1]:
- # there shouldn't be anything inside an operator
- # (other than template parameters, which are not added this way, right?)
- assert not name.is_operator()
- identifier = name.identifier
+ identOrOp = name.identOrOp
templateArgs = name.templateArgs
if templateArgs:
assert iTemplateDecl < len(templateDecls)
@@ -2978,27 +3500,20 @@ class Symbol(object):
iTemplateDecl += 1
else:
templateParams = None
- symbol = parentSymbol._find_named_symbol(identifier,
+ symbol = parentSymbol._find_named_symbol(identOrOp,
templateParams,
templateArgs,
- operator=None,
templateShorthand=False,
matchSelf=False)
if symbol is None:
- symbol = Symbol(parent=parentSymbol, identifier=identifier,
+ symbol = Symbol(parent=parentSymbol, identOrOp=identOrOp,
templateParams=templateParams,
templateArgs=templateArgs, declaration=None,
docname=None)
parentSymbol = symbol
name = names[-1]
- if name.is_operator():
- identifier = None
- templateArgs = None
- operator = name
- else:
- identifier = name.identifier
- templateArgs = name.templateArgs
- operator = None
+ identOrOp = name.identOrOp
+ templateArgs = name.templateArgs
if iTemplateDecl < len(templateDecls):
if iTemplateDecl + 1 != len(templateDecls):
print(text_type(templateDecls))
@@ -3008,10 +3523,9 @@ class Symbol(object):
else:
assert iTemplateDecl == len(templateDecls)
templateParams = None
- symbol = parentSymbol._find_named_symbol(identifier,
+ symbol = parentSymbol._find_named_symbol(identOrOp,
templateParams,
templateArgs,
- operator,
templateShorthand=False,
matchSelf=False)
if symbol:
@@ -3026,25 +3540,29 @@ class Symbol(object):
# .. class:: Test
symbol._fill_empty(declaration, docname)
return symbol
- # It may simply be a functin overload, so let's compare ids.
- candSymbol = Symbol(parent=parentSymbol, identifier=identifier,
+ # It may simply be a function overload, so let's compare ids.
+ isRedeclaration = True
+ candSymbol = Symbol(parent=parentSymbol, identOrOp=identOrOp,
templateParams=templateParams,
templateArgs=templateArgs,
declaration=declaration,
docname=docname)
- newId = declaration.get_newest_id()
- oldId = symbol.declaration.get_newest_id()
- if newId != oldId:
- # we already inserted the symbol, so return the new one
- symbol = candSymbol
- else:
+ if declaration.objectType == "function":
+ newId = declaration.get_newest_id()
+ oldId = symbol.declaration.get_newest_id()
+ if newId != oldId:
+ # we already inserted the symbol, so return the new one
+ symbol = candSymbol
+ isRedeclaration = False
+ if isRedeclaration:
# Redeclaration of the same symbol.
# Let the new one be there, but raise an error to the client
# so it can use the real symbol as subscope.
# This will probably result in a duplicate id warning.
- raise _DuplicateSymbolError(symbol, candSymbol)
+ candSymbol.isRedeclaration = True
+ raise _DuplicateSymbolError(symbol, declaration)
else:
- symbol = Symbol(parent=parentSymbol, identifier=identifier,
+ symbol = Symbol(parent=parentSymbol, identOrOp=identOrOp,
templateParams=templateParams,
templateArgs=templateArgs,
declaration=declaration,
@@ -3055,22 +3573,9 @@ class Symbol(object):
# type: (Any, List[unicode], BuildEnvironment) -> None
assert other is not None
for otherChild in other.children:
- if not otherChild.identifier:
- if not otherChild.declaration:
- print("Problem in symbol tree merging")
- print("OtherChild.dump:")
- print(otherChild.dump(0))
- print("Other.dump:")
- print(other.dump(0))
- assert otherChild.declaration
- operator = otherChild.declaration.name.names[-1]
- assert operator.is_operator()
- else:
- operator = None
- ourChild = self._find_named_symbol(otherChild.identifier,
+ ourChild = self._find_named_symbol(otherChild.identOrOp,
otherChild.templateParams,
otherChild.templateArgs,
- operator,
templateShorthand=False,
matchSelf=False)
if ourChild is None:
@@ -3115,12 +3620,12 @@ class Symbol(object):
templateDecls = []
return self._add_symbols(nestedName, templateDecls, declaration, docname)
- def find_identifier(self, identifier, matchSelf):
+ def find_identifier(self, identOrOp, matchSelf):
# type: (Any, bool) -> Symbol
- if matchSelf and self.identifier and self.identifier == identifier:
+ if matchSelf and self.identOrOp == identOrOp:
return self
for s in self.children:
- if s.identifier and s.identifier == identifier:
+ if s.identOrOp == identOrOp:
return s
return None
@@ -3128,16 +3633,10 @@ class Symbol(object):
# type: (List[Tuple[Any, Any]]) -> Symbol
s = self
for name, templateParams in key:
- if name.is_operator():
- identifier = None
- templateArgs = None
- operator = name
- else:
- identifier = name.identifier
- templateArgs = name.templateArgs
- operator = None
- s = s._find_named_symbol(identifier, templateParams,
- templateArgs, operator,
+ identOrOp = name.identOrOp
+ templateArgs = name.templateArgs
+ s = s._find_named_symbol(identOrOp,
+ templateParams, templateArgs,
templateShorthand=False,
matchSelf=False)
if not s:
@@ -3161,60 +3660,51 @@ class Symbol(object):
firstName = names[0]
if not firstName.is_operator():
while parentSymbol.parent:
- if parentSymbol.find_identifier(firstName.identifier,
+ if parentSymbol.find_identifier(firstName.identOrOp,
matchSelf=matchSelf):
# if we are in the scope of a constructor but wants to reference the class
# we need to walk one extra up
if (len(names) == 1 and typ == 'class' and matchSelf and
- parentSymbol.parent and parentSymbol.parent.identifier and
- parentSymbol.parent.identifier == firstName.identifier):
+ parentSymbol.parent and
+ parentSymbol.parent.identOrOp == firstName.identOrOp):
pass
else:
break
parentSymbol = parentSymbol.parent
-
iTemplateDecl = 0
for iName in range(len(names)):
name = names[iName]
if iName + 1 == len(names):
- if name.is_operator():
- identifier = None
- templateArgs = None
- operator = name
- else:
- identifier = name.identifier
- templateArgs = name.templateArgs
- operator = None
+ identOrOp = name.identOrOp
+ templateArgs = name.templateArgs
if iTemplateDecl < len(templateDecls):
assert iTemplateDecl + 1 == len(templateDecls)
templateParams = templateDecls[iTemplateDecl]
else:
assert iTemplateDecl == len(templateDecls)
templateParams = None
- symbol = parentSymbol._find_named_symbol(identifier,
- templateParams,
- templateArgs,
- operator,
+ symbol = parentSymbol._find_named_symbol(identOrOp,
+ templateParams, templateArgs,
templateShorthand=templateShorthand,
matchSelf=matchSelf)
- if symbol:
+ if symbol is not None:
return symbol
- else:
- return None
+ # try without template params and args
+ symbol = parentSymbol._find_named_symbol(identOrOp,
+ None, None,
+ templateShorthand=templateShorthand,
+ matchSelf=matchSelf)
+ return symbol
else:
- # there shouldn't be anything inside an operator
- assert not name.is_operator()
- identifier = name.identifier
+ identOrOp = name.identOrOp
templateArgs = name.templateArgs
if templateArgs and iTemplateDecl < len(templateDecls):
templateParams = templateDecls[iTemplateDecl]
iTemplateDecl += 1
else:
templateParams = None
- symbol = parentSymbol._find_named_symbol(identifier,
- templateParams,
- templateArgs,
- operator=None,
+ symbol = parentSymbol._find_named_symbol(identOrOp,
+ templateParams, templateArgs,
templateShorthand=templateShorthand,
matchSelf=matchSelf)
if symbol is None:
@@ -3237,8 +3727,8 @@ class Symbol(object):
res.append(text_type(self.templateParams))
res.append('\n')
res.append('\t' * indent)
- if self.identifier:
- res.append(text_type(self.identifier))
+ if self.identOrOp:
+ res.append(text_type(self.identOrOp))
else:
res.append(text_type(self.declaration))
if self.templateArgs:
@@ -3278,6 +3768,9 @@ class DefinitionParser(object):
self.end = len(self.definition)
self.last_match = None # type: Match
self._previous_state = (0, None) # type: Tuple[int, Match]
+ self.otherErrors = [] # type: List[DefinitionError]
+ # in our tests the following is set to False to capture bad parsing
+ self.allowFallbackExpressionParsing = True
self.warnEnv = warnEnv
self.config = config
@@ -3285,7 +3778,10 @@ class DefinitionParser(object):
def _make_multi_error(self, errors, header):
# type: (List[Any], unicode) -> DefinitionError
if len(errors) == 1:
- return DefinitionError(header + '\n' + errors[0][0].description)
+ if len(header) > 0:
+ return DefinitionError(header + '\n' + errors[0][0].description)
+ else:
+ return DefinitionError(errors[0][0].description)
result = [header, '\n']
for e in errors:
if len(e[1]) > 0:
@@ -3310,10 +3806,16 @@ class DefinitionParser(object):
def fail(self, msg):
# type: (unicode) -> None
+ errors = []
indicator = '-' * self.pos + '^'
- raise DefinitionError(
+ exMain = DefinitionError(
'Invalid definition: %s [error at %d]\n %s\n %s' %
(msg, self.pos, self.definition, indicator))
+ errors.append((exMain, "Main error"))
+ for err in self.otherErrors:
+ errors.append((err, "Potential other error"))
+ self.otherErrors = []
+ raise self._make_multi_error(errors, '')
def warn(self, msg):
# type: (unicode) -> None
@@ -3399,6 +3901,25 @@ class DefinitionParser(object):
if not self.eof:
self.fail('Expected end of definition.')
+ def _parse_string(self):
+ if self.current_char != '"':
+ return None
+ startPos = self.pos
+ self.pos += 1
+ escape = False
+ while True:
+ if self.eof:
+ self.fail("Unexpected end during inside string.")
+ elif self.current_char == '"' and not escape:
+ self.pos += 1
+ break
+ elif self.current_char == '\\':
+ escape = True
+ else:
+ escape = False
+ self.pos += 1
+ return self.definition[startPos:self.pos]
+
def _parse_balanced_token_seq(self, end):
# type: (List[unicode]) -> unicode
# TODO: add handling of string literals and similar
@@ -3480,11 +4001,381 @@ class DefinitionParser(object):
return None
- def _parse_expression(self, end):
- # type: (List[unicode]) -> unicode
+ def _parse_literal(self):
+ # -> integer-literal
+ # | character-literal
+ # | floating-literal
+ # | string-literal
+ # | boolean-literal -> "false" | "true"
+ # | pointer-literal -> "nullptr"
+ # | user-defined-literal
+ self.skip_ws()
+ if self.skip_word('nullptr'):
+ return ASTPointerLiteral()
+ if self.skip_word('true'):
+ return ASTBooleanLiteral(True)
+ if self.skip_word('false'):
+ return ASTBooleanLiteral(False)
+ for regex in [_float_literal_re, _binary_literal_re, _hex_literal_re,
+ _integer_literal_re, _octal_literal_re]:
+ pos = self.pos
+ if self.match(regex):
+ while self.current_char in 'uUlLfF':
+ self.pos += 1
+ return ASTNumberLiteral(self.definition[pos:self.pos])
+
+ string = self._parse_string()
+ if string is not None:
+ return ASTStringLiteral(string)
+ # TODO: char lit
+ # TODO: user-defined lit
+ return None
+
+ def _parse_fold_or_paren_expression(self):
+ # "(" expression ")"
+ # fold-expression
+ # -> ( cast-expression fold-operator ... )
+ # | ( ... fold-operator cast-expression )
+ # | ( cast-expression fold-operator ... fold-operator cast-expression
+ if self.current_char != '(':
+ return None
+ self.pos += 1
+ self.skip_ws()
+ if self.skip_string_and_ws("..."):
+ # ( ... fold-operator cast-expression )
+ if not self.match(_fold_operator_re):
+ self.fail("Expected fold operator after '...' in fold expression.")
+ op = self.matched_text
+ rightExpr = self._parse_cast_expression()
+ if not self.skip_string(')'):
+ self.fail("Expected ')' in end of fold expression.")
+ return ASTFoldExpr(None, op, rightExpr)
+ # TODO: actually try to parse fold expression
+ # fall back to a paren expression
+ res = self._parse_expression(inTemplate=False)
+ self.skip_ws()
+ if not self.skip_string(')'):
+ self.fail("Expected ')' in end of fold expression or parenthesized expression.")
+ return ASTParenExpr(res)
+
+ def _parse_primary_expression(self):
+ # literal
+ # "this"
+ # lambda-expression
+ # "(" expression ")"
+ # fold-expression
+ # id-expression -> we parse this with _parse_nested_name
+ self.skip_ws()
+ res = self._parse_literal()
+ if res is not None:
+ return res
+ # TODO: try 'this' and lambda expression
+ res = self._parse_fold_or_paren_expression()
+ if res is not None:
+ return res
+ return self._parse_nested_name()
+
+ def _parse_postfix_expression(self):
+ # -> primary
+ # | postfix "[" expression "]"
+ # | postfix "[" braced-init-list [opt] "]"
+ # | postfix "(" expression-list [opt] ")"
+ # | postfix "." "template" [opt] id-expression
+ # | postfix "->" "template" [opt] id-expression
+ # | postfix "." pseudo-destructor-name
+ # | postfix "->" pseudo-destructor-name
+ # | postfix "++"
+ # | postfix "--"
+ # | simple-type-specifier "(" expression-list [opt] ")"
+ # | simple-type-specifier braced-init-list
+ # | typename-specifier "(" expression-list [opt] ")"
+ # | typename-specifier braced-init-list
+ # | "dynamic_cast" "<" type-id ">" "(" expression ")"
+ # | "static_cast" "<" type-id ">" "(" expression ")"
+ # | "reinterpret_cast" "<" type-id ">" "(" expression ")"
+ # | "const_cast" "<" type-id ">" "(" expression ")"
+ # | "typeid" "(" expression ")"
+ # | "typeid" "(" type-id ")"
+
+ # TODO: try the productions with prefixes:
+ # dynamic_cast, static_cast, reinterpret_cast, const_cast, typeid
+ prefixType = None
+ pos = self.pos
+ try:
+ prefix = self._parse_primary_expression()
+ prefixType = 'expr'
+ except DefinitionError as eOuter:
+ self.pos = pos
+ try:
+ # we are potentially casting, so save parens for us
+ # TODO: hmm, would we need to try both with operatorCast and with None?
+ prefix = self._parse_type(False, 'operatorCast')
+ prefixType = 'typeOperatorCast'
+ # | simple-type-specifier "(" expression-list [opt] ")"
+ # | simple-type-specifier braced-init-list
+ # | typename-specifier "(" expression-list [opt] ")"
+ # | typename-specifier braced-init-list
+ self.skip_ws()
+ if self.current_char != '(' and self.current_char != '{':
+ self.fail("Expecting '(' or '{' after type in cast expression.")
+ except DefinitionError as eInner:
+ self.pos = pos
+ header = "Error in postfix expression, expected primary expression or type."
+ errors = []
+ errors.append((eOuter, "If primary expression"))
+ errors.append((eInner, "If type"))
+ raise self._make_multi_error(errors, header)
+ # and now parse postfixes
+ postFixes = []
+ while True:
+ self.skip_ws()
+ if prefixType == 'expr':
+ if self.skip_string_and_ws('['):
+ expr = self._parse_expression(inTemplate=False)
+ self.skip_ws()
+ if not self.skip_string(']'):
+ self.fail("Expected ']' in end of postfix expression.")
+ postFixes.append(ASTPostfixArray(expr))
+ continue
+ if self.skip_string('.'):
+ if self.skip_string('*'):
+ # don't steal the dot
+ self.pos -= 2
+ else:
+ name = self._parse_nested_name()
+ postFixes.append(ASTPostfixMember(name)) # type: ignore
+ continue
+ if self.skip_string('->'):
+ if self.skip_string('*'):
+ # don't steal the arrow
+ self.pos -= 3
+ else:
+ name = self._parse_nested_name()
+ postFixes.append(ASTPostfixMemberOfPointer(name)) # type: ignore
+ continue
+ if self.skip_string('++'):
+ postFixes.append(ASTPostfixInc()) # type: ignore
+ continue
+ if self.skip_string('--'):
+ postFixes.append(ASTPostfixDec()) # type: ignore
+ continue
+ if self.skip_string_and_ws('('):
+ # TODO: handled braced init
+ exprs = []
+ self.skip_ws()
+ if not self.skip_string(')'):
+ while True:
+ self.skip_ws()
+ expr = self._parse_expression(inTemplate=False)
+ exprs.append(expr)
+ self.skip_ws()
+ if self.skip_string(')'):
+ break
+ if not self.skip_string(','):
+ self.fail("Error in cast or call, expected ',' or ')'.")
+ postFixes.append(ASTPostfixCallExpr(exprs)) # type: ignore
+ continue
+ break
+ if len(postFixes) == 0:
+ return prefix
+ else:
+ return ASTPostfixExpr(prefix, postFixes)
+
+ def _parse_unary_expression(self):
+ # -> postfix
+ # | "++" cast
+ # | "--" cast
+ # | unary-operator cast -> (* | & | + | - | ! | ~) cast
+ # The rest:
+ # | "sizeof" unary
+ # | "sizeof" "(" type-id ")"
+ # | "sizeof" "..." "(" identifier ")"
+ # | "alignof" "(" type-id ")"
+ # | noexcept-expression -> noexcept "(" expression ")"
+ # | new-expression
+ # | delete-expression
+ self.skip_ws()
+ for op in _expression_unary_ops:
+ # TODO: hmm, should we be able to backtrack here?
+ if self.skip_string(op):
+ expr = self._parse_cast_expression()
+ return ASTUnaryOpExpr(op, expr)
+ if self.skip_word_and_ws('sizeof'):
+ if self.skip_string_and_ws('...'):
+ if not self.skip_string_and_ws('('):
+ self.fail("Expecting '(' after 'sizeof...'.")
+ if not self.match(_identifier_re):
+ self.fail("Expecting identifier for 'sizeof...'.")
+ ident = ASTIdentifier(self.matched_text)
+ self.skip_ws()
+ if not self.skip_string(")"):
+ self.fail("Expecting ')' to end 'sizeof...'.")
+ return ASTSizeofParamPack(ident)
+ if self.skip_string_and_ws('('):
+ typ = self._parse_type(named=False)
+ self.skip_ws()
+ if not self.skip_string(')'):
+ self.fail("Expecting ')' to end 'sizeof'.")
+ return ASTSizeofType(typ)
+ expr = self._parse_unary_expression()
+ return ASTSizeofExpr(expr)
+ if self.skip_word_and_ws('alignof'):
+ if not self.skip_string_and_ws('('):
+ self.fail("Expecting '(' after 'alignof'.")
+ typ = self._parse_type(named=False)
+ self.skip_ws()
+ if not self.skip_string(')'):
+ self.fail("Expecting ')' to end 'alignof'.")
+ return ASTAlignofExpr(typ)
+ if self.skip_word_and_ws('noexcept'):
+ if not self.skip_string_and_ws('('):
+ self.fail("Expecting '(' after 'noexcept'.")
+ expr = self._parse_expression(inTemplate=False)
+ self.skip_ws()
+ if not self.skip_string(')'):
+ self.fail("Expecting ')' to end 'noexcept'.")
+ return ASTNoexceptExpr(expr)
+ # TODO: the rest
+ return self._parse_postfix_expression()
+
+ def _parse_cast_expression(self):
+ # -> unary | "(" type-id ")" cast
+ pos = self.pos
+ self.skip_ws()
+ if self.skip_string('('):
+ try:
+ typ = self._parse_type(False)
+ if not self.skip_string(')'):
+ raise DefinitionError("Expected ')' in cast expression.")
+ expr = self._parse_cast_expression()
+ return ASTCastExpr(typ, expr)
+ except DefinitionError as exCast:
+ self.pos = pos
+ try:
+ return self._parse_unary_expression()
+ except DefinitionError as exUnary:
+ errs = []
+ errs.append((exCast, "If type cast expression"))
+ errs.append((exUnary, "If unary expression"))
+ raise self._make_multi_error(errs, "Error in cast expression.")
+ else:
+ return self._parse_unary_expression()
+
+ def _parse_logical_or_expression(self, inTemplate):
+ # logical-or = logical-and ||
+ # logical-and = inclusive-or &&
+ # inclusive-or = exclusive-or |
+ # exclusive-or = and ^
+ # and = equality &
+ # equality = relational ==, !=
+ # relational = shift <, >, <=, >=
+ # shift = additive <<, >>
+ # additive = multiplicative +, -
+ # multiplicative = pm *, /, %
+ # pm = cast .*, ->*
+ def _parse_bin_op_expr(self, opId, inTemplate):
+ if opId + 1 == len(_expression_bin_ops):
+ def parser(inTemplate):
+ return self._parse_cast_expression()
+ else:
+ def parser(inTemplate):
+ return _parse_bin_op_expr(self, opId + 1, inTemplate=inTemplate)
+ exprs = []
+ ops = []
+ exprs.append(parser(inTemplate=inTemplate))
+ while True:
+ self.skip_ws()
+ if inTemplate and self.current_char == '>':
+ break
+ pos = self.pos
+ oneMore = False
+ for op in _expression_bin_ops[opId]:
+ if not self.skip_string(op):
+ continue
+ if op == '&' and self.current_char == '&':
+ # don't split the && 'token'
+ self.pos -= 1
+ # and btw. && has lower precedence, so we are done
+ break
+ try:
+ expr = parser(inTemplate=inTemplate)
+ exprs.append(expr)
+ ops.append(op)
+ oneMore = True
+ break
+ except DefinitionError:
+ self.pos = pos
+ if not oneMore:
+ break
+ return ASTBinOpExpr(exprs, ops)
+ return _parse_bin_op_expr(self, 0, inTemplate=inTemplate)
+
+ def _parse_conditional_expression_tail(self, orExprHead):
+ # -> "?" expression ":" assignment-expression
+ return None
+
+ def _parse_assignment_expression(self, inTemplate):
+ # -> conditional-expression
+ # | logical-or-expression assignment-operator initializer-clause
+ # | throw-expression
+ # TODO: parse throw-expression: "throw" assignment-expression [opt]
+ # if not a throw expression, then:
+ # -> conditional-expression ->
+ # logical-or-expression
+ # | logical-or-expression "?" expression ":" assignment-expression
+ # | logical-or-expression assignment-operator initializer-clause
+ exprs = []
+ ops = []
+ orExpr = self._parse_logical_or_expression(inTemplate=inTemplate)
+ exprs.append(orExpr)
+ # TODO: handle ternary with _parse_conditional_expression_tail
+ while True:
+ oneMore = False
+ self.skip_ws()
+ for op in _expression_assignment_ops:
+ if not self.skip_string(op):
+ continue
+ expr = self._parse_logical_or_expression(False)
+ exprs.append(expr)
+ ops.append(op)
+ oneMore = True
+ if not oneMore:
+ break
+ if len(ops) == 0:
+ return orExpr
+ else:
+ return ASTAssignmentExpr(exprs, ops)
+
+ def _parse_constant_expression(self, inTemplate):
+ # -> conditional-expression
+ orExpr = self._parse_logical_or_expression(inTemplate=inTemplate)
+ # TODO: use _parse_conditional_expression_tail
+ return orExpr
+
+ def _parse_expression(self, inTemplate):
+ # -> assignment-expression
+ # | expression "," assignment-expresion
+ # TODO: actually parse the second production
+ return self._parse_assignment_expression(inTemplate=inTemplate)
+
+ def _parse_expression_fallback(self, end, parser, allow=True):
# Stupidly "parse" an expression.
# 'end' should be a list of characters which ends the expression.
- assert end
+
+ # first try to use the provided parser
+ prevPos = self.pos
+ try:
+ return parser()
+ except DefinitionError as e:
+ # some places (e.g., template parameters) we really don't want to use fallback,
+ # and for testing we may want to globally disable it
+ if not allow or not self.allowFallbackExpressionParsing:
+ raise
+ self.warn("Parsing of expression failed. Using fallback parser."
+ " Error was:\n%s" % e.description)
+ self.pos = prevPos
+ # and then the fallback scanning
+ assert end is not None
self.skip_ws()
startPos = self.pos
if self.match(_string_re):
@@ -3501,11 +4392,11 @@ class DefinitionParser(object):
elif len(symbols) > 0 and self.current_char == symbols[-1]:
symbols.pop()
self.pos += 1
- if self.eof:
+ if len(end) > 0 and self.eof:
self.fail("Could not find end of expression starting at %d."
% startPos)
value = self.definition[startPos:self.pos].strip()
- return value.strip()
+ return ASTFallbackExpr(value.strip())
def _parse_operator(self):
# type: () -> Any
@@ -3543,8 +4434,10 @@ class DefinitionParser(object):
def _parse_template_argument_list(self):
# type: () -> ASTTemplateArgs
self.skip_ws()
- if not self.skip_string('<'):
+ if not self.skip_string_and_ws('<'):
return None
+ if self.skip_string('>'):
+ return ASTTemplateArgs([])
prevErrors = []
templateArgs = [] # type: List
while 1:
@@ -3565,7 +4458,9 @@ class DefinitionParser(object):
prevErrors.append((e, "If type argument"))
self.pos = pos
try:
- value = self._parse_expression(end=[',', '>'])
+ def parser():
+ return self._parse_constant_expression(inTemplate=True)
+ value = self._parse_expression_fallback([',', '>'], parser)
self.skip_ws()
if self.skip_string('>'):
parsedEnd = True
@@ -3586,7 +4481,8 @@ class DefinitionParser(object):
def _parse_nested_name(self, memberPointer=False):
# type: (bool) -> ASTNestedName
- names = []
+ names = [] # type: List[Any]
+ templates = [] # type: List[bool]
self.skip_ws()
rooted = False
@@ -3594,14 +4490,17 @@ class DefinitionParser(object):
rooted = True
while 1:
self.skip_ws()
- if self.skip_word_and_ws('template'):
- self.fail("'template' in nested name not implemented.")
- elif self.skip_word_and_ws('operator'):
- op = self._parse_operator()
- names.append(op)
+ if len(names) > 0:
+ template = self.skip_word_and_ws('template')
+ else:
+ template = False
+ templates.append(template)
+ if self.skip_word_and_ws('operator'):
+ identOrOp = self._parse_operator()
else:
if not self.match(_identifier_re):
if memberPointer and len(names) > 0:
+ templates.pop()
break
self.fail("Expected identifier in nested name.")
identifier = self.matched_text
@@ -3609,16 +4508,24 @@ class DefinitionParser(object):
if identifier in _keywords:
self.fail("Expected identifier in nested name, "
"got keyword: %s" % identifier)
+ identOrOp = ASTIdentifier(identifier)
+ # try greedily to get template arguments,
+ # but otherwise a < might be because we are in an expression
+ pos = self.pos
+ try:
templateArgs = self._parse_template_argument_list()
- identifier = ASTIdentifier(identifier) # type: ignore
- names.append(ASTNestedNameElement(identifier, templateArgs))
+ except DefinitionError as ex:
+ self.pos = pos
+ templateArgs = None
+ self.otherErrors.append(ex)
+ names.append(ASTNestedNameElement(identOrOp, templateArgs))
self.skip_ws()
if not self.skip_string('::'):
if memberPointer:
self.fail("Expected '::' in pointer to member (function).")
break
- return ASTNestedName(names, rooted)
+ return ASTNestedName(names, templates, rooted)
def _parse_trailing_type_spec(self):
# type: () -> Any
@@ -3653,7 +4560,17 @@ class DefinitionParser(object):
# decltype
self.skip_ws()
if self.skip_word_and_ws('decltype'):
- self.fail('"decltype(.)" in trailing_type_spec not implemented')
+ if not self.skip_string_and_ws('('):
+ self.fail("Expected '(' after 'decltype'.")
+ if self.skip_word_and_ws('auto'):
+ if not self.skip_string(')'):
+ self.fail("Expected ')' after 'decltype(auto'.")
+ return ASTTrailingTypeSpecDecltypeAuto()
+ expr = self._parse_expression(inTemplate=False)
+ self.skip_ws()
+ if not self.skip_string(')'):
+ self.fail("Expected ')' after 'decltype(<expr>'.")
+ return ASTTrailingTypeSpecDecltype(expr)
# prefixed
prefix = None
@@ -3686,7 +4603,7 @@ class DefinitionParser(object):
self.fail('Expected ")" after "..." in '
'parameters_and_qualifiers.')
break
- # note: it seems that function arguments can always sbe named,
+ # note: it seems that function arguments can always be named,
# even in function pointers and similar.
arg = self._parse_type_with_init(outer=None, named='single')
# TODO: parse default parameters # TODO: didn't we just do that?
@@ -3873,7 +4790,7 @@ class DefinitionParser(object):
if self.match(_identifier_re):
identifier = ASTIdentifier(self.matched_text)
nne = ASTNestedNameElement(identifier, None)
- declId = ASTNestedName([nne], rooted=False)
+ declId = ASTNestedName([nne], [False], rooted=False)
# if it's a member pointer, we may have '::', which should be an error
self.skip_ws()
if self.current_char == ':':
@@ -3888,9 +4805,16 @@ class DefinitionParser(object):
while 1:
self.skip_ws()
if typed and self.skip_string('['):
- value = self._parse_expression(end=[']'])
- res = self.skip_string(']')
- assert res
+ self.skip_ws()
+ if self.skip_string(']'):
+ arrayOps.append(ASTArray(None))
+ continue
+
+ def parser():
+ return self._parse_expression(inTemplate=False)
+ value = self._parse_expression_fallback([']'], parser)
+ if not self.skip_string(']'):
+ self.fail("Expected ']' in end of array operator.")
arrayOps.append(ASTArray(value))
continue
else:
@@ -4000,19 +4924,28 @@ class DefinitionParser(object):
header = "Error in declarator or parameters and qualifiers"
raise self._make_multi_error(prevErrors, header)
- def _parse_initializer(self, outer=None):
- # type: (unicode) -> ASTInitializer
+ def _parse_initializer(self, outer=None, allowFallback=True):
+ # type: (unicode, bool) -> ASTInitializer
self.skip_ws()
# TODO: support paren and brace initialization for memberObject
if not self.skip_string('='):
return None
else:
if outer == 'member':
- value = self.read_rest().strip() # type: unicode
+ def parser():
+ return self._parse_assignment_expression(inTemplate=False)
+ value = self._parse_expression_fallback([], parser,
+ allow=allowFallback)
elif outer == 'templateParam':
- value = self._parse_expression(end=[',', '>'])
+ def parser():
+ return self._parse_assignment_expression(inTemplate=True)
+ value = self._parse_expression_fallback([',', '>'], parser,
+ allow=allowFallback)
elif outer is None: # function parameter
- value = self._parse_expression(end=[',', ')'])
+ def parser():
+ return self._parse_assignment_expression(inTemplate=False)
+ value = self._parse_expression_fallback([',', ')'], parser,
+ allow=allowFallback)
else:
self.fail("Internal error, initializer for outer '%s' not "
"implemented." % outer)
@@ -4081,9 +5014,8 @@ class DefinitionParser(object):
else:
# For testing purposes.
# do it again to get the proper traceback (how do you
- # relieable save a traceback when an exception is
+ # reliably save a traceback when an exception is
# constructed?)
- pass
self.pos = startPos
typed = True
declSpecs = self._parse_decl_specs(outer=outer, typed=typed)
@@ -4103,12 +5035,48 @@ class DefinitionParser(object):
return ASTType(declSpecs, decl)
def _parse_type_with_init(self, named, outer):
- # type: (Union[bool, unicode], unicode) -> ASTTypeWithInit
+ # type: (Union[bool, unicode], unicode) -> Any
if outer:
assert outer in ('type', 'member', 'function', 'templateParam')
type = self._parse_type(outer=outer, named=named)
- init = self._parse_initializer(outer=outer)
- return ASTTypeWithInit(type, init)
+ if outer != 'templateParam':
+ init = self._parse_initializer(outer=outer)
+ return ASTTypeWithInit(type, init)
+ # it could also be a constrained type parameter, e.g., C T = int&
+ pos = self.pos
+ eExpr = None
+ try:
+ init = self._parse_initializer(outer=outer, allowFallback=False)
+ # note: init may be None if there is no =
+ if init is None:
+ return ASTTypeWithInit(type, None)
+ # we parsed an expression, so we must have a , or a >,
+ # otherwise the expression didn't get everything
+ self.skip_ws()
+ if self.current_char != ',' and self.current_char != '>':
+ # pretend it didn't happen
+ self.pos = pos
+ init = None
+ else:
+ # we assume that it was indeed an expression
+ return ASTTypeWithInit(type, init)
+ except DefinitionError as e:
+ self.pos = pos
+ eExpr = e
+ if not self.skip_string("="):
+ return ASTTypeWithInit(type, None)
+ try:
+ typeInit = self._parse_type(named=False, outer=None)
+ return ASTTemplateParamConstrainedTypeWithInit(type, typeInit)
+ except DefinitionError as eType:
+ if eExpr is None:
+ raise eType
+ errs = []
+ errs.append((eExpr, "If default is an expression"))
+ errs.append((eType, "If default is a type"))
+ msg = "Error in non-type template parameter"
+ msg += " or constrianted template paramter."
+ raise self._make_multi_error(errs, msg)
def _parse_type_using(self):
# type: () -> ASTTypeUsing
@@ -4122,20 +5090,9 @@ class DefinitionParser(object):
def _parse_concept(self):
# type: () -> ASTConcept
nestedName = self._parse_nested_name()
- isFunction = False
-
self.skip_ws()
- if self.skip_string('('):
- isFunction = True
- self.skip_ws()
- if not self.skip_string(')'):
- self.fail("Expected ')' in function concept declaration.")
-
initializer = self._parse_initializer('member')
- if initializer and isFunction:
- self.fail("Function concept with initializer.")
-
- return ASTConcept(nestedName, isFunction, initializer)
+ return ASTConcept(nestedName, initializer)
def _parse_class(self):
# type: () -> ASTClass
@@ -4186,7 +5143,11 @@ class DefinitionParser(object):
init = None
if self.skip_string('='):
self.skip_ws()
- init = ASTInitializer(self.read_rest())
+
+ def parser():
+ return self._parse_constant_expression(inTemplate=False)
+ initVal = self._parse_expression_fallback([], parser)
+ init = ASTInitializer(initVal)
return ASTEnumerator(name, init)
def _parse_template_parameter_list(self):
@@ -4239,13 +5200,14 @@ class DefinitionParser(object):
param = ASTTemplateParamType(data)
templateParams.append(param)
else:
- # declare a non-type parameter
+ # declare a non-type parameter, or constrained type parameter
pos = self.pos
try:
param = self._parse_type_with_init('maybe', 'templateParam')
templateParams.append(ASTTemplateParamNonType(param))
except DefinitionError as e:
- prevErrors.append((e, "If non-type template parameter"))
+ msg = "If non-type template parameter or constrained template parameter"
+ prevErrors.append((e, msg))
self.pos = pos
self.skip_ws()
if self.skip_string('>'):
@@ -4307,7 +5269,13 @@ class DefinitionParser(object):
# the saved position is only used to provide a better error message
pos = self.pos
if self.skip_word("template"):
- params = self._parse_template_parameter_list() # type: Any
+ try:
+ params = self._parse_template_parameter_list() # type: Any
+ except DefinitionError as e:
+ if objectType == 'member' and len(templates) == 0:
+ return ASTTemplateDeclarationPrefix(None)
+ else:
+ raise e
else:
params = self._parse_template_introduction()
if not params:
@@ -4324,20 +5292,25 @@ class DefinitionParser(object):
return ASTTemplateDeclarationPrefix(templates)
def _check_template_consistency(self, nestedName, templatePrefix,
- fullSpecShorthand):
- # type: (Any, Any, bool) -> ASTTemplateDeclarationPrefix
+ fullSpecShorthand, isMember=False):
+ # type: (Any, Any, Any, bool) -> ASTTemplateDeclarationPrefix
numArgs = nestedName.num_templates()
+ isMemberInstantiation = False
if not templatePrefix:
numParams = 0
else:
- numParams = len(templatePrefix.templates)
+ if isMember and templatePrefix.templates is None:
+ numParams = 0
+ isMemberInstantiation = True
+ else:
+ numParams = len(templatePrefix.templates)
if numArgs + 1 < numParams:
self.fail("Too few template argument lists comapred to parameter"
" lists. Argument lists: %d, Parameter lists: %d."
% (numArgs, numParams))
if numArgs > numParams:
numExtra = numArgs - numParams
- if not fullSpecShorthand:
+ if not fullSpecShorthand and not isMemberInstantiation:
msg = "Too many template argument lists compared to parameter" \
" lists. Argument lists: %d, Parameter lists: %d," \
" Extra empty parameters lists prepended: %d." \
@@ -4351,7 +5324,7 @@ class DefinitionParser(object):
newTemplates = []
for i in range(numExtra):
newTemplates.append(ASTTemplateParams([]))
- if templatePrefix:
+ if templatePrefix and not isMemberInstantiation:
newTemplates.extend(templatePrefix.templates)
templatePrefix = ASTTemplateDeclarationPrefix(newTemplates)
return templatePrefix
@@ -4406,7 +5379,8 @@ class DefinitionParser(object):
assert False
templatePrefix = self._check_template_consistency(declaration.name,
templatePrefix,
- fullSpecShorthand=False)
+ fullSpecShorthand=False,
+ isMember=objectType == 'member')
return ASTDeclaration(objectType, visibility,
templatePrefix, declaration)
@@ -4433,11 +5407,31 @@ class DefinitionParser(object):
res.objectType = 'xref' # type: ignore
return res
+ def parse_expression(self):
+ pos = self.pos
+ try:
+ expr = self._parse_expression(False)
+ self.skip_ws()
+ self.assert_end()
+ except DefinitionError as exExpr:
+ self.pos = pos
+ try:
+ expr = self._parse_type(False)
+ self.skip_ws()
+ self.assert_end()
+ except DefinitionError as exType:
+ header = "Error when parsing (type) expression."
+ errs = []
+ errs.append((exExpr, "If expression:"))
+ errs.append((exType, "If type:"))
+ raise self._make_multi_error(errs, header)
+ return expr
+
def _make_phony_error_name():
# type: () -> ASTNestedName
nne = ASTNestedNameElement(ASTIdentifier("PhonyNameDueToError"), None)
- return ASTNestedName([nne], rooted=False)
+ return ASTNestedName([nne], [False], rooted=False)
class CPPObject(ObjectDescription):
@@ -4472,7 +5466,7 @@ class CPPObject(ObjectDescription):
# then add the name to the parent scope
symbol = ast.symbol
assert symbol
- assert symbol.identifier is not None
+ assert symbol.identOrOp is not None
assert symbol.templateParams is None
assert symbol.templateArgs is None
parentSymbol = symbol.parent
@@ -4485,7 +5479,7 @@ class CPPObject(ObjectDescription):
if parentDecl is None:
# the parent is not explicitly declared
# TODO: we could warn, but it could be a style to just assume
- # enumerator parnets to be scoped
+ # enumerator parents to be scoped
return
if parentDecl.objectType != 'enum':
# TODO: maybe issue a warning, enumerators in non-enums is weird,
@@ -4495,13 +5489,13 @@ class CPPObject(ObjectDescription):
return
targetSymbol = parentSymbol.parent
- s = targetSymbol.find_identifier(symbol.identifier, matchSelf=False)
+ s = targetSymbol.find_identifier(symbol.identOrOp, matchSelf=False)
if s is not None:
# something is already declared with that name
return
declClone = symbol.declaration.clone()
declClone.enumeratorScopedSymbol = symbol
- Symbol(parent=targetSymbol, identifier=symbol.identifier,
+ Symbol(parent=targetSymbol, identOrOp=symbol.identOrOp,
templateParams=None, templateArgs=None,
declaration=declClone,
docname=self.env.docname)
@@ -4509,14 +5503,15 @@ class CPPObject(ObjectDescription):
def add_target_and_index(self, ast, sig, signode):
# type: (Any, unicode, addnodes.desc_signature) -> None
# general note: name must be lstrip(':')'ed, to remove "::"
- try:
- id_v1 = ast.get_id_v1()
- except NoOldIdError:
- id_v1 = None
- id_v2 = ast.get_id_v2()
- # store them in reverse order, so the newest is first
- ids = [id_v2, id_v1]
-
+ ids = []
+ for i in range(1, _max_id + 1):
+ try:
+ id = ast.get_id(version=i)
+ ids.append(id)
+ except NoOldIdError:
+ assert i < _max_id
+ # let's keep the newest first
+ ids = list(reversed(ids))
newestId = ids[0]
assert newestId # shouldn't be None
if not re.compile(r'^[a-zA-Z0-9_]*$').match(newestId):
@@ -4524,13 +5519,25 @@ class CPPObject(ObjectDescription):
'report as bug (id=%s).' % (text_type(ast), newestId))
name = text_type(ast.symbol.get_full_nested_name()).lstrip(':')
- strippedName = name
- for prefix in self.env.config.cpp_index_common_prefix:
- if name.startswith(prefix):
- strippedName = strippedName[len(prefix):]
+ # Add index entry, but not if it's a declaration inside a concept
+ isInConcept = False
+ s = ast.symbol.parent
+ while s is not None:
+ decl = s.declaration
+ s = s.parent
+ if decl is None:
+ continue
+ if decl.objectType == 'concept':
+ isInConcept = True
break
- indexText = self.get_index_text(strippedName)
- self.indexnode['entries'].append(('single', indexText, newestId, '', None))
+ if not isInConcept:
+ strippedName = name
+ for prefix in self.env.config.cpp_index_common_prefix:
+ if name.startswith(prefix):
+ strippedName = strippedName[len(prefix):]
+ break
+ indexText = self.get_index_text(strippedName)
+ self.indexnode['entries'].append(('single', indexText, newestId, '', None))
if newestId not in self.state.document.ids:
# if the name is not unique, the first one will win
@@ -4589,6 +5596,7 @@ class CPPObject(ObjectDescription):
# Assume we are actually in the old symbol,
# instead of the newly created duplicate.
self.env.temp_data['cpp:last_symbol'] = e.symbol
+ self.warn("Duplicate declaration.")
if ast.objectType == 'enumerator':
self._add_enumerator_to_parent(ast)
@@ -4825,17 +5833,39 @@ class CPPXRefRole(XRefRole):
return title, target
+class CPPExprRole(object):
+ def __call__(self, typ, rawtext, text, lineno, inliner, options={}, content=[]):
+ class Warner(object):
+ def warn(self, msg):
+ inliner.reporter.warning(msg, line=lineno)
+
+ env = inliner.document.settings.env
+ parser = DefinitionParser(text, Warner(), env.config)
+ try:
+ ast = parser.parse_expression()
+ except DefinitionError as ex:
+ Warner().warn('Unparseable C++ expression: %r\n%s'
+ % (text, text_type(ex.description)))
+ return [nodes.literal(text)], []
+ parentSymbol = env.temp_data.get('cpp:parent_symbol', None)
+ if parentSymbol is None:
+ parentSymbol = env.domaindata['cpp']['root_symbol']
+ p = nodes.literal()
+ ast.describe_signature(p, 'markType', env, parentSymbol)
+ return [p], []
+
+
class CPPDomain(Domain):
"""C++ language domain."""
name = 'cpp'
label = 'C++'
object_types = {
- 'class': ObjType(l_('class'), 'class', 'type', 'typeOrConcept'),
- 'function': ObjType(l_('function'), 'function', 'func', 'type', 'typeOrConcept'),
+ 'class': ObjType(l_('class'), 'class', 'type', 'identifier'),
+ 'function': ObjType(l_('function'), 'function', 'func', 'type', 'identifier'),
'member': ObjType(l_('member'), 'member', 'var'),
- 'type': ObjType(l_('type'), 'type', 'typeOrConcept'),
- 'concept': ObjType(l_('concept'), 'concept', 'typeOrConcept'),
- 'enum': ObjType(l_('enum'), 'enum', 'type', 'typeOrConcept'),
+ 'type': ObjType(l_('type'), 'type', 'identifier'),
+ 'concept': ObjType(l_('concept'), 'concept', 'identifier'),
+ 'enum': ObjType(l_('enum'), 'enum', 'type', 'identifier'),
'enumerator': ObjType(l_('enumerator'), 'enumerator')
}
@@ -4863,7 +5893,8 @@ class CPPDomain(Domain):
'type': CPPXRefRole(),
'concept': CPPXRefRole(),
'enum': CPPXRefRole(),
- 'enumerator': CPPXRefRole()
+ 'enumerator': CPPXRefRole(),
+ 'expr': CPPExprRole()
}
initial_data = {
'root_symbol': Symbol(None, None, None, None, None, None),
@@ -4907,6 +5938,7 @@ class CPPDomain(Domain):
def _resolve_xref_inner(self, env, fromdocname, builder, typ,
target, node, contnode, emitWarnings=True):
# type: (BuildEnvironment, unicode, Builder, unicode, unicode, nodes.Node, nodes.Node, bool) -> nodes.Node # NOQA
+
class Warner(object):
def warn(self, msg):
if emitWarnings:
@@ -4956,6 +5988,9 @@ class CPPDomain(Domain):
templateShorthand=True,
matchSelf=True)
if s is None or s.declaration is None:
+ txtName = text_type(name)
+ if txtName.startswith('std::') or txtName == 'std':
+ raise NoUri()
return None, None
if typ.startswith('cpp:'):
@@ -4965,7 +6000,7 @@ class CPPDomain(Domain):
declTyp = s.declaration.objectType
def checkType():
- if typ == 'any':
+ if typ == 'any' or typ == 'identifier':
return True
if declTyp == 'templateParam':
return True
diff --git a/sphinx/domains/python.py b/sphinx/domains/python.py
index 432e9b542..fa96590b3 100644
--- a/sphinx/domains/python.py
+++ b/sphinx/domains/python.py
@@ -177,11 +177,11 @@ class PyObject(ObjectDescription):
PyTypedField('parameter', label=l_('Parameters'),
names=('param', 'parameter', 'arg', 'argument',
'keyword', 'kwarg', 'kwparam'),
- typerolename='obj', typenames=('paramtype', 'type'),
+ typerolename='class', typenames=('paramtype', 'type'),
can_collapse=True),
PyTypedField('variable', label=l_('Variables'), rolename='obj',
names=('var', 'ivar', 'cvar'),
- typerolename='obj', typenames=('vartype',),
+ typerolename='class', typenames=('vartype',),
can_collapse=True),
PyGroupedField('exceptions', label=l_('Raises'), rolename='exc',
names=('raises', 'raise', 'exception', 'except'),
@@ -189,7 +189,7 @@ class PyObject(ObjectDescription):
Field('returnvalue', label=l_('Returns'), has_arg=False,
names=('returns', 'return')),
PyField('returntype', label=l_('Return type'), has_arg=False,
- names=('rtype',), bodyrolename='obj'),
+ names=('rtype',), bodyrolename='class'),
]
allow_nesting = False
@@ -843,7 +843,7 @@ class PythonDomain(Domain):
elif len(matches) > 1:
logger.warning('more than one target found for cross-reference %r: %s',
target, ', '.join(match[0] for match in matches),
- location=node)
+ type='ref', subtype='python', location=node)
name, obj = matches[0]
if obj[1] == 'module':
diff --git a/sphinx/environment/__init__.py b/sphinx/environment/__init__.py
index 65a73b019..b38e94d68 100644
--- a/sphinx/environment/__init__.py
+++ b/sphinx/environment/__init__.py
@@ -14,7 +14,6 @@ import os
import sys
import time
import types
-import codecs
import fnmatch
import warnings
from os import path
@@ -24,17 +23,13 @@ from collections import defaultdict
from six import BytesIO, itervalues, class_types, next
from six.moves import cPickle as pickle
-from docutils.io import NullOutput
-from docutils.core import Publisher
from docutils.utils import Reporter, get_source_line, normalize_language_tag
from docutils.utils.smartquotes import smartchars
-from docutils.parsers.rst import roles
-from docutils.parsers.rst.languages import en as english
from docutils.frontend import OptionParser
-from sphinx import addnodes
-from sphinx.io import SphinxStandaloneReader, SphinxDummyWriter, SphinxFileInput
-from sphinx.util import logging
+from sphinx import addnodes, versioning
+from sphinx.io import read_doc
+from sphinx.util import logging, rst
from sphinx.util import get_matching_docs, FilenameUniqDict, status_iterator
from sphinx.util.nodes import is_translatable
from sphinx.util.osutil import SEP, ensuredir
@@ -45,10 +40,8 @@ from sphinx.util.matching import compile_matchers
from sphinx.util.parallel import ParallelTasks, parallel_available, make_chunks
from sphinx.util.websupport import is_commentable
from sphinx.errors import SphinxError, ExtensionError
-from sphinx.locale import __
from sphinx.transforms import SphinxTransformer
-from sphinx.versioning import add_uids, merge_doctrees
-from sphinx.deprecation import RemovedInSphinx17Warning, RemovedInSphinx20Warning
+from sphinx.deprecation import RemovedInSphinx20Warning
from sphinx.environment.adapters.indexentries import IndexEntries
from sphinx.environment.adapters.toctree import TocTree
@@ -82,8 +75,6 @@ default_settings = {
ENV_VERSION = 52 + (sys.version_info[0] - 2)
-dummy_reporter = Reporter('', 4, 4)
-
versioning_conditions = {
'none': False,
'text': is_translatable,
@@ -110,7 +101,12 @@ class BuildEnvironment(object):
@staticmethod
def load(f, app=None):
# type: (IO, Sphinx) -> BuildEnvironment
- env = pickle.load(f)
+ try:
+ env = pickle.load(f)
+ except Exception as exc:
+ # This can happen for example when the pickle is from a
+ # different version of Sphinx.
+ raise IOError(exc)
if env.version != ENV_VERSION:
raise IOError('build environment version not current')
if app:
@@ -354,9 +350,8 @@ class BuildEnvironment(object):
for suffix in self.config.source_suffix:
if fnmatch.fnmatch(filename, '*' + suffix):
return filename[:-len(suffix)]
- else:
- # the file does not have docname
- return None
+ # the file does not have docname
+ return None
def doc2path(self, docname, base=True, suffix=None):
# type: (unicode, Union[bool, unicode], unicode) -> unicode
@@ -562,21 +557,10 @@ class BuildEnvironment(object):
self.app.emit('env-before-read-docs', self, docnames)
# check if we should do parallel or serial read
- par_ok = False
if parallel_available and len(docnames) > 5 and self.app.parallel > 1:
- for ext in itervalues(self.app.extensions):
- if ext.parallel_read_safe is None:
- logger.warning(__('the %s extension does not declare if it is safe '
- 'for parallel reading, assuming it isn\'t - please '
- 'ask the extension author to check and make it '
- 'explicit'), ext.name)
- logger.warning('doing serial read')
- break
- elif ext.parallel_read_safe is False:
- break
- else:
- # all extensions support parallel-read
- par_ok = True
+ par_ok = self.app.is_parallel_allowed('read')
+ else:
+ par_ok = False
if par_ok:
self._read_parallel(docnames, self.app, nproc=self.app.parallel)
@@ -644,25 +628,9 @@ class BuildEnvironment(object):
# --------- SINGLE FILE READING --------------------------------------------
- def warn_and_replace(self, error):
- # type: (Any) -> Tuple
- """Custom decoding error handler that warns and replaces."""
- linestart = error.object.rfind(b'\n', 0, error.start)
- lineend = error.object.find(b'\n', error.start)
- if lineend == -1:
- lineend = len(error.object)
- lineno = error.object.count(b'\n', 0, error.start) + 1
- logger.warning('undecodable source characters, replacing with "?": %r',
- (error.object[linestart + 1:error.start] + b'>>>' +
- error.object[error.start:error.end] + b'<<<' +
- error.object[error.end:lineend]),
- location=(self.docname, lineno))
- return (u'?', error.end)
-
- def read_doc(self, docname, app=None):
- # type: (unicode, Sphinx) -> None
- """Parse a file and add/update inventory entries for the doctree."""
-
+ def prepare_settings(self, docname):
+ # type: (unicode) -> None
+ """Prepare to set up environment for reading."""
self.temp_data['docname'] = docname
# defaults to the global default, but can be re-set in a document
self.temp_data['default_role'] = self.config.default_role
@@ -678,13 +646,6 @@ class BuildEnvironment(object):
self.settings['language_code'] = language
if 'smart_quotes' not in self.settings:
self.settings['smart_quotes'] = True
- if self.config.html_use_smartypants is not None:
- warnings.warn("html_use_smartypants option is deprecated. Smart "
- "quotes are on by default; if you want to disable "
- "or customize them, use the smart_quotes option in "
- "docutils.conf.",
- RemovedInSphinx17Warning)
- self.settings['smart_quotes'] = self.config.html_use_smartypants
# confirm selected language supports smart_quotes or not
for tag in normalize_language_tag(language):
@@ -693,40 +654,19 @@ class BuildEnvironment(object):
else:
self.settings['smart_quotes'] = False
+ def read_doc(self, docname, app=None):
+ # type: (unicode, Sphinx) -> None
+ """Parse a file and add/update inventory entries for the doctree."""
+ self.prepare_settings(docname)
+
docutilsconf = path.join(self.srcdir, 'docutils.conf')
# read docutils.conf from source dir, not from current dir
OptionParser.standard_config_files[1] = docutilsconf
if path.isfile(docutilsconf):
self.note_dependency(docutilsconf)
- with sphinx_domains(self):
- if self.config.default_role:
- role_fn, messages = roles.role(self.config.default_role, english,
- 0, dummy_reporter)
- if role_fn:
- roles._roles[''] = role_fn
- else:
- logger.warning('default role %s not found', self.config.default_role,
- location=docname)
-
- codecs.register_error('sphinx', self.warn_and_replace) # type: ignore
-
- # publish manually
- reader = SphinxStandaloneReader(self.app,
- parsers=self.app.registry.get_source_parsers())
- pub = Publisher(reader=reader,
- writer=SphinxDummyWriter(),
- destination_class=NullOutput)
- pub.set_components(None, 'restructuredtext', None)
- pub.process_programmatic_settings(None, self.settings, None)
- src_path = self.doc2path(docname)
- source = SphinxFileInput(app, self, source=None, source_path=src_path,
- encoding=self.config.source_encoding)
- pub.source = source
- pub.settings._source = src_path
- pub.set_destination(None, None)
- pub.publish()
- doctree = pub.document
+ with sphinx_domains(self), rst.default_role(docname, self.config.default_role):
+ doctree = read_doc(self.app, self, self.doc2path(docname))
# post-processing
for domain in itervalues(self.domains):
@@ -744,41 +684,14 @@ class BuildEnvironment(object):
time.time(), path.getmtime(self.doc2path(docname)))
if self.versioning_condition:
- old_doctree = None
- if self.versioning_compare:
- # get old doctree
- try:
- with open(self.doc2path(docname,
- self.doctreedir, '.doctree'), 'rb') as f:
- old_doctree = pickle.load(f)
- except EnvironmentError:
- pass
-
# add uids for versioning
- if not self.versioning_compare or old_doctree is None:
- list(add_uids(doctree, self.versioning_condition))
- else:
- list(merge_doctrees(
- old_doctree, doctree, self.versioning_condition))
-
- # make it picklable
- doctree.reporter = None
- doctree.transformer = None
- doctree.settings.warning_stream = None
- doctree.settings.env = None
- doctree.settings.record_dependencies = None
+ versioning.prepare(doctree)
# cleanup
self.temp_data.clear()
self.ref_context.clear()
- roles._roles.pop('', None) # if a document has set a local default role
- # save the parsed doctree
- doctree_filename = self.doc2path(docname, self.doctreedir,
- '.doctree')
- ensuredir(path.dirname(doctree_filename))
- with open(doctree_filename, 'wb') as f:
- pickle.dump(doctree, f, pickle.HIGHEST_PROTOCOL)
+ self.write_doctree(docname, doctree)
# utilities to use while reading a document
@@ -788,24 +701,6 @@ class BuildEnvironment(object):
"""Returns the docname of the document currently being parsed."""
return self.temp_data['docname']
- @property
- def currmodule(self):
- # type: () -> None
- """Backwards compatible alias. Will be removed."""
- warnings.warn('env.currmodule is deprecated. '
- 'Use env.ref_context["py:module"] instead.',
- RemovedInSphinx17Warning)
- return self.ref_context.get('py:module')
-
- @property
- def currclass(self):
- # type: () -> None
- """Backwards compatible alias. Will be removed."""
- warnings.warn('env.currclass is deprecated. '
- 'Use env.ref_context["py:class"] instead.',
- RemovedInSphinx17Warning)
- return self.ref_context.get('py:class')
-
def new_serialno(self, category=''):
# type: (unicode) -> int
"""Return a serial number, e.g. for index entry targets.
@@ -857,7 +752,7 @@ class BuildEnvironment(object):
file relations from it.
"""
warnings.warn('env.note_toctree() is deprecated. '
- 'Use sphinx.environment.adapters.toctre.TocTree instead.',
+ 'Use sphinx.environment.adapters.toctree.TocTree instead.',
RemovedInSphinx20Warning)
TocTree(self).note(docname, toctreenode)
@@ -900,6 +795,21 @@ class BuildEnvironment(object):
doctree.reporter = Reporter(self.doc2path(docname), 2, 5, stream=WarningStream())
return doctree
+ def write_doctree(self, docname, doctree):
+ # type: (unicode, nodes.Node) -> None
+ """Write the doctree to a file."""
+ # make it picklable
+ doctree.reporter = None
+ doctree.transformer = None
+ doctree.settings.warning_stream = None
+ doctree.settings.env = None
+ doctree.settings.record_dependencies = None
+
+ doctree_filename = self.doc2path(docname, self.doctreedir, '.doctree')
+ ensuredir(path.dirname(doctree_filename))
+ with open(doctree_filename, 'wb') as f:
+ pickle.dump(doctree, f, pickle.HIGHEST_PROTOCOL)
+
def get_and_resolve_doctree(self, docname, builder, doctree=None,
prune_toctrees=True, includehidden=False):
# type: (unicode, Builder, nodes.Node, bool, bool) -> nodes.Node
@@ -956,7 +866,7 @@ class BuildEnvironment(object):
transformer = SphinxTransformer(doctree)
transformer.set_environment(self)
- transformer.add_transforms(self.app.post_transforms)
+ transformer.add_transforms(self.app.registry.get_post_transforms())
transformer.apply_transforms()
finally:
self.temp_data = backup
diff --git a/sphinx/ext/apidoc.py b/sphinx/ext/apidoc.py
new file mode 100644
index 000000000..d99f852f1
--- /dev/null
+++ b/sphinx/ext/apidoc.py
@@ -0,0 +1,444 @@
+# -*- coding: utf-8 -*-
+"""
+ sphinx.ext.apidoc
+ ~~~~~~~~~~~~~~~~~
+
+ Parses a directory tree looking for Python modules and packages and creates
+ ReST files appropriately to create code documentation with Sphinx. It also
+ creates a modules index (named modules.<suffix>).
+
+ This is derived from the "sphinx-autopackage" script, which is:
+ Copyright 2008 Société des arts technologiques (SAT),
+ http://www.sat.qc.ca/
+
+ :copyright: Copyright 2007-2017 by the Sphinx team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from __future__ import print_function
+
+import argparse
+import os
+import sys
+from os import path
+from six import binary_type
+from fnmatch import fnmatch
+
+from sphinx import __display_version__
+from sphinx.cmd.quickstart import EXTENSIONS
+from sphinx.util import rst
+from sphinx.util.osutil import FileAvoidWrite, ensuredir, walk
+
+if False:
+ # For type annotation
+ from typing import Any, List, Tuple # NOQA
+
+# automodule options
+if 'SPHINX_APIDOC_OPTIONS' in os.environ:
+ OPTIONS = os.environ['SPHINX_APIDOC_OPTIONS'].split(',')
+else:
+ OPTIONS = [
+ 'members',
+ 'undoc-members',
+ # 'inherited-members', # disabled because there's a bug in sphinx
+ 'show-inheritance',
+ ]
+
+INITPY = '__init__.py'
+PY_SUFFIXES = set(['.py', '.pyx'])
+
+
+def makename(package, module):
+ # type: (unicode, unicode) -> unicode
+ """Join package and module with a dot."""
+ # Both package and module can be None/empty.
+ if package:
+ name = package
+ if module:
+ name += '.' + module
+ else:
+ name = module
+ return name
+
+
+def write_file(name, text, opts):
+ # type: (unicode, unicode, Any) -> None
+ """Write the output file for module/package <name>."""
+ fname = path.join(opts.destdir, '%s.%s' % (name, opts.suffix))
+ if opts.dryrun:
+ print('Would create file %s.' % fname)
+ return
+ if not opts.force and path.isfile(fname):
+ print('File %s already exists, skipping.' % fname)
+ else:
+ print('Creating file %s.' % fname)
+ with FileAvoidWrite(fname) as f:
+ f.write(text)
+
+
+def format_heading(level, text, escape=True):
+ # type: (int, unicode, bool) -> unicode
+ """Create a heading of <level> [1, 2 or 3 supported]."""
+ if escape:
+ text = rst.escape(text)
+ underlining = ['=', '-', '~', ][level - 1] * len(text)
+ return '%s\n%s\n\n' % (text, underlining)
+
+
+def format_directive(module, package=None):
+ # type: (unicode, unicode) -> unicode
+ """Create the automodule directive and add the options."""
+ directive = '.. automodule:: %s\n' % makename(package, module)
+ for option in OPTIONS:
+ directive += ' :%s:\n' % option
+ return directive
+
+
+def create_module_file(package, module, opts):
+ # type: (unicode, unicode, Any) -> None
+ """Build the text of the file and write the file."""
+ if not opts.noheadings:
+ text = format_heading(1, '%s module' % module)
+ else:
+ text = ''
+ # text += format_heading(2, ':mod:`%s` Module' % module)
+ text += format_directive(module, package)
+ write_file(makename(package, module), text, opts)
+
+
+def create_package_file(root, master_package, subroot, py_files, opts, subs, is_namespace):
+ # type: (unicode, unicode, unicode, List[unicode], Any, List[unicode], bool) -> None
+ """Build the text of the file and write the file."""
+ text = format_heading(1, ('%s package' if not is_namespace else "%s namespace")
+ % makename(master_package, subroot))
+
+ if opts.modulefirst and not is_namespace:
+ text += format_directive(subroot, master_package)
+ text += '\n'
+
+ # build a list of directories that are szvpackages (contain an INITPY file)
+ subs = [sub for sub in subs if path.isfile(path.join(root, sub, INITPY))]
+ # if there are some package directories, add a TOC for theses subpackages
+ if subs:
+ text += format_heading(2, 'Subpackages')
+ text += '.. toctree::\n\n'
+ for sub in subs:
+ text += ' %s.%s\n' % (makename(master_package, subroot), sub)
+ text += '\n'
+
+ submods = [path.splitext(sub)[0] for sub in py_files
+ if not shall_skip(path.join(root, sub), opts) and
+ sub != INITPY]
+ if submods:
+ text += format_heading(2, 'Submodules')
+ if opts.separatemodules:
+ text += '.. toctree::\n\n'
+ for submod in submods:
+ modfile = makename(master_package, makename(subroot, submod))
+ text += ' %s\n' % modfile
+
+ # generate separate file for this module
+ if not opts.noheadings:
+ filetext = format_heading(1, '%s module' % modfile)
+ else:
+ filetext = ''
+ filetext += format_directive(makename(subroot, submod),
+ master_package)
+ write_file(modfile, filetext, opts)
+ else:
+ for submod in submods:
+ modfile = makename(master_package, makename(subroot, submod))
+ if not opts.noheadings:
+ text += format_heading(2, '%s module' % modfile)
+ text += format_directive(makename(subroot, submod),
+ master_package)
+ text += '\n'
+ text += '\n'
+
+ if not opts.modulefirst and not is_namespace:
+ text += format_heading(2, 'Module contents')
+ text += format_directive(subroot, master_package)
+
+ write_file(makename(master_package, subroot), text, opts)
+
+
+def create_modules_toc_file(modules, opts, name='modules'):
+ # type: (List[unicode], Any, unicode) -> None
+ """Create the module's index."""
+ text = format_heading(1, '%s' % opts.header, escape=False)
+ text += '.. toctree::\n'
+ text += ' :maxdepth: %s\n\n' % opts.maxdepth
+
+ modules.sort()
+ prev_module = '' # type: unicode
+ for module in modules:
+ # look if the module is a subpackage and, if yes, ignore it
+ if module.startswith(prev_module + '.'):
+ continue
+ prev_module = module
+ text += ' %s\n' % module
+
+ write_file(name, text, opts)
+
+
+def shall_skip(module, opts):
+ # type: (unicode, Any) -> bool
+ """Check if we want to skip this module."""
+ # skip if the file doesn't exist and not using implicit namespaces
+ if not opts.implicit_namespaces and not path.exists(module):
+ return True
+
+ # skip it if there is nothing (or just \n or \r\n) in the file
+ if path.exists(module) and path.getsize(module) <= 2:
+ return True
+
+ # skip if it has a "private" name and this is selected
+ filename = path.basename(module)
+ if filename != '__init__.py' and filename.startswith('_') and \
+ not opts.includeprivate:
+ return True
+ return False
+
+
+def recurse_tree(rootpath, excludes, opts):
+ # type: (unicode, List[unicode], Any) -> List[unicode]
+ """
+ Look for every file in the directory tree and create the corresponding
+ ReST files.
+ """
+ followlinks = getattr(opts, 'followlinks', False)
+ includeprivate = getattr(opts, 'includeprivate', False)
+ implicit_namespaces = getattr(opts, 'implicit_namespaces', False)
+
+ # check if the base directory is a package and get its name
+ if INITPY in os.listdir(rootpath) or implicit_namespaces:
+ root_package = rootpath.split(path.sep)[-1]
+ else:
+ # otherwise, the base is a directory with packages
+ root_package = None
+
+ toplevels = []
+ for root, subs, files in walk(rootpath, followlinks=followlinks):
+ # document only Python module files (that aren't excluded)
+ py_files = sorted(f for f in files
+ if path.splitext(f)[1] in PY_SUFFIXES and
+ not is_excluded(path.join(root, f), excludes))
+ is_pkg = INITPY in py_files
+ is_namespace = INITPY not in py_files and implicit_namespaces
+ if is_pkg:
+ py_files.remove(INITPY)
+ py_files.insert(0, INITPY)
+ elif root != rootpath:
+ # only accept non-package at toplevel unless using implicit namespaces
+ if not implicit_namespaces:
+ del subs[:]
+ continue
+ # remove hidden ('.') and private ('_') directories, as well as
+ # excluded dirs
+ if includeprivate:
+ exclude_prefixes = ('.',) # type: Tuple[unicode, ...]
+ else:
+ exclude_prefixes = ('.', '_')
+ subs[:] = sorted(sub for sub in subs if not sub.startswith(exclude_prefixes) and
+ not is_excluded(path.join(root, sub), excludes))
+
+ if is_pkg or is_namespace:
+ # we are in a package with something to document
+ if subs or len(py_files) > 1 or not shall_skip(path.join(root, INITPY), opts):
+ subpackage = root[len(rootpath):].lstrip(path.sep).\
+ replace(path.sep, '.')
+ # if this is not a namespace or
+ # a namespace and there is something there to document
+ if not is_namespace or len(py_files) > 0:
+ create_package_file(root, root_package, subpackage,
+ py_files, opts, subs, is_namespace)
+ toplevels.append(makename(root_package, subpackage))
+ else:
+ # if we are at the root level, we don't require it to be a package
+ assert root == rootpath and root_package is None
+ for py_file in py_files:
+ if not shall_skip(path.join(rootpath, py_file), opts):
+ module = path.splitext(py_file)[0]
+ create_module_file(root_package, module, opts)
+ toplevels.append(module)
+
+ return toplevels
+
+
+def is_excluded(root, excludes):
+ # type: (unicode, List[unicode]) -> bool
+ """Check if the directory is in the exclude list.
+
+ Note: by having trailing slashes, we avoid common prefix issues, like
+ e.g. an exlude "foo" also accidentally excluding "foobar".
+ """
+ for exclude in excludes:
+ if fnmatch(root, exclude):
+ return True
+ return False
+
+
+def get_parser():
+ # type: () -> argparse.ArgumentParser
+ parser = argparse.ArgumentParser(
+ usage='usage: %(prog)s [OPTIONS] -o <OUTPUT_PATH> <MODULE_PATH> '
+ '[EXCLUDE_PATTERN, ...]',
+ epilog='For more information, visit <http://sphinx-doc.org/>.',
+ description="""
+Look recursively in <MODULE_PATH> for Python modules and packages and create
+one reST file with automodule directives per package in the <OUTPUT_PATH>.
+
+The <EXCLUDE_PATTERN>s can be file and/or directory patterns that will be
+excluded from generation.
+
+Note: By default this script will not overwrite already created files.""")
+
+ parser.add_argument('--version', action='version', dest='show_version',
+ version='%%(prog)s %s' % __display_version__)
+
+ parser.add_argument('module_path',
+ help='path to module to document')
+ parser.add_argument('exclude_pattern', nargs='*',
+ help='fnmatch-style file and/or directory patterns '
+ 'to exclude from generation')
+
+ parser.add_argument('-o', '--output-dir', action='store', dest='destdir',
+ required=True,
+ help='directory to place all output')
+ parser.add_argument('-d', '--maxdepth', action='store', dest='maxdepth',
+ type=int, default=4,
+ help='maximum depth of submodules to show in the TOC '
+ '(default: 4)')
+ parser.add_argument('-f', '--force', action='store_true', dest='force',
+ help='overwrite existing files')
+ parser.add_argument('-l', '--follow-links', action='store_true',
+ dest='followlinks', default=False,
+ help='follow symbolic links. Powerful when combined '
+ 'with collective.recipe.omelette.')
+ parser.add_argument('-n', '--dry-run', action='store_true', dest='dryrun',
+ help='run the script without creating files')
+ parser.add_argument('-e', '--separate', action='store_true',
+ dest='separatemodules',
+ help='put documentation for each module on its own page')
+ parser.add_argument('-P', '--private', action='store_true',
+ dest='includeprivate',
+ help='include "_private" modules')
+ parser.add_argument('-T', '--no-toc', action='store_true', dest='notoc',
+ help="don't create a table of contents file")
+ parser.add_argument('-E', '--no-headings', action='store_true',
+ dest='noheadings',
+ help="don't create headings for the module/package "
+ "packages (e.g. when the docstrings already "
+ "contain them)")
+ parser.add_argument('-M', '--module-first', action='store_true',
+ dest='modulefirst',
+ help='put module documentation before submodule '
+ 'documentation')
+ parser.add_argument('--implicit-namespaces', action='store_true',
+ dest='implicit_namespaces',
+ help='interpret module paths according to PEP-0420 '
+ 'implicit namespaces specification')
+ parser.add_argument('-s', '--suffix', action='store', dest='suffix',
+ default='rst',
+ help='file suffix (default: rst)')
+ parser.add_argument('-F', '--full', action='store_true', dest='full',
+ help='generate a full project with sphinx-quickstart')
+ parser.add_argument('-a', '--append-syspath', action='store_true',
+ dest='append_syspath',
+ help='append module_path to sys.path, used when --full is given')
+ parser.add_argument('-H', '--doc-project', action='store', dest='header',
+ help='project name (default: root module name)')
+ parser.add_argument('-A', '--doc-author', action='store', dest='author',
+ help='project author(s), used when --full is given')
+ parser.add_argument('-V', '--doc-version', action='store', dest='version',
+ help='project version, used when --full is given')
+ parser.add_argument('-R', '--doc-release', action='store', dest='release',
+ help='project release, used when --full is given, '
+ 'defaults to --doc-version')
+
+ group = parser.add_argument_group('extension options')
+ for ext in EXTENSIONS:
+ group.add_argument('--ext-%s' % ext, action='append_const',
+ const='sphinx.ext.%s' % ext, dest='extensions',
+ help='enable %s extension' % ext)
+
+ return parser
+
+
+def main(argv=sys.argv[1:]):
+ # type: (List[str]) -> int
+ """Parse and check the command line arguments."""
+ parser = get_parser()
+ args = parser.parse_args(argv)
+
+ rootpath = path.abspath(args.module_path)
+
+ # normalize opts
+
+ if args.header is None:
+ args.header = rootpath.split(path.sep)[-1]
+ if args.suffix.startswith('.'):
+ args.suffix = args.suffix[1:]
+ if not path.isdir(rootpath):
+ print('%s is not a directory.' % rootpath, file=sys.stderr)
+ sys.exit(1)
+ if not args.dryrun:
+ ensuredir(args.destdir)
+ excludes = [path.abspath(exclude) for exclude in args.exclude_pattern]
+ modules = recurse_tree(rootpath, excludes, args)
+
+ if args.full:
+ from sphinx.cmd import quickstart as qs
+ modules.sort()
+ prev_module = '' # type: unicode
+ text = ''
+ for module in modules:
+ if module.startswith(prev_module + '.'):
+ continue
+ prev_module = module
+ text += ' %s\n' % module
+ d = dict(
+ path = args.destdir,
+ sep = False,
+ dot = '_',
+ project = args.header,
+ author = args.author or 'Author',
+ version = args.version or '',
+ release = args.release or args.version or '',
+ suffix = '.' + args.suffix,
+ master = 'index',
+ epub = True,
+ extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode',
+ 'sphinx.ext.todo'],
+ makefile = True,
+ batchfile = True,
+ make_mode = True,
+ mastertocmaxdepth = args.maxdepth,
+ mastertoctree = text,
+ language = 'en',
+ module_path = rootpath,
+ append_syspath = args.append_syspath,
+ )
+ if args.extensions:
+ d['extensions'].extend(args.extensions)
+
+ if isinstance(args.header, binary_type):
+ d['project'] = d['project'].decode('utf-8')
+ if isinstance(args.author, binary_type):
+ d['author'] = d['author'].decode('utf-8')
+ if isinstance(args.version, binary_type):
+ d['version'] = d['version'].decode('utf-8')
+ if isinstance(args.release, binary_type):
+ d['release'] = d['release'].decode('utf-8')
+
+ if not args.dryrun:
+ qs.generate(d, silent=True, overwrite=args.force)
+ elif not args.notoc:
+ create_modules_toc_file(modules, args)
+
+ return 0
+
+
+# So program can be started with "python -m sphinx.apidoc ..."
+if __name__ == "__main__":
+ main()
diff --git a/sphinx/ext/autodoc.py b/sphinx/ext/autodoc/__init__.py
index e04b4a09d..5c1a71ea9 100644
--- a/sphinx/ext/autodoc.py
+++ b/sphinx/ext/autodoc/__init__.py
@@ -15,11 +15,8 @@ import re
import sys
import inspect
import traceback
-import warnings
-from types import FunctionType, MethodType, ModuleType
-from six import PY2, iterkeys, iteritems, itervalues, text_type, class_types, \
- string_types, StringIO
+from six import PY2, iterkeys, iteritems, itervalues, text_type, class_types, string_types
from docutils import nodes
from docutils.utils import assemble_option_dict
@@ -27,31 +24,27 @@ from docutils.parsers.rst import Directive
from docutils.statemachine import ViewList
import sphinx
+from sphinx.ext.autodoc.importer import mock, import_module
+from sphinx.ext.autodoc.importer import _MockImporter # to keep compatibility # NOQA
+from sphinx.ext.autodoc.inspector import format_annotation, formatargspec # to keep compatibility # NOQA
from sphinx.util import rpartition, force_decode
from sphinx.locale import _
from sphinx.pycode import ModuleAnalyzer, PycodeError
from sphinx.application import ExtensionError
from sphinx.util import logging
from sphinx.util.nodes import nested_parse_with_titles
-from sphinx.util.inspect import getargspec, isdescriptor, safe_getmembers, \
+from sphinx.util.inspect import Signature, isdescriptor, safe_getmembers, \
safe_getattr, object_description, is_builtin_class_method, \
- isenumclass, isenumattribute
+ isenumclass, isenumattribute, getdoc
from sphinx.util.docstrings import prepare_docstring
if False:
# For type annotation
+ from types import ModuleType # NOQA
from typing import Any, Callable, Dict, Iterator, List, Sequence, Set, Tuple, Type, Union # NOQA
from docutils.utils import Reporter # NOQA
from sphinx.application import Sphinx # NOQA
-try:
- if sys.version_info >= (3,):
- import typing
- else:
- typing = None
-except ImportError:
- typing = None
-
logger = logging.getLogger(__name__)
# This type isn't exposed directly in any modules, but can be found
@@ -106,103 +99,6 @@ class Options(dict):
return None
-class _MockObject(object):
- """Used by autodoc_mock_imports."""
-
- def __init__(self, *args, **kwargs):
- # type: (Any, Any) -> None
- pass
-
- def __len__(self):
- # type: () -> int
- return 0
-
- def __contains__(self, key):
- # type: (str) -> bool
- return False
-
- def __iter__(self):
- # type: () -> None
- pass
-
- def __getitem__(self, key):
- # type: (str) -> _MockObject
- return self
-
- def __getattr__(self, key):
- # type: (str) -> _MockObject
- return self
-
- def __call__(self, *args, **kw):
- # type: (Any, Any) -> Any
- if args and type(args[0]) in [FunctionType, MethodType]:
- # Appears to be a decorator, pass through unchanged
- return args[0]
- return self
-
-
-class _MockModule(ModuleType):
- """Used by autodoc_mock_imports."""
- __file__ = '/dev/null'
-
- def __init__(self, name, loader):
- # type: (str, _MockImporter) -> None
- self.__name__ = self.__package__ = name
- self.__loader__ = loader
- self.__all__ = [] # type: List[str]
- self.__path__ = [] # type: List[str]
-
- def __getattr__(self, name):
- # type: (str) -> _MockObject
- o = _MockObject()
- o.__module__ = self.__name__
- return o
-
-
-class _MockImporter(object):
-
- def __init__(self, names):
- # type: (List[str]) -> None
- self.base_packages = set() # type: Set[str]
- for n in names:
- # Convert module names:
- # ['a.b.c', 'd.e']
- # to a set of base packages:
- # set(['a', 'd'])
- self.base_packages.add(n.split('.')[0])
- self.mocked_modules = [] # type: List[str]
- # enable hook by adding itself to meta_path
- sys.meta_path = sys.meta_path + [self]
-
- def disable(self):
- # remove `self` from `sys.meta_path` to disable import hook
- sys.meta_path = [i for i in sys.meta_path if i is not self]
- # remove mocked modules from sys.modules to avoid side effects after
- # running auto-documenter
- for m in self.mocked_modules:
- if m in sys.modules:
- del sys.modules[m]
-
- def find_module(self, name, path=None):
- # type: (str, str) -> Any
- base_package = name.split('.')[0]
- if base_package in self.base_packages:
- return self
- return None
-
- def load_module(self, name):
- # type: (str) -> ModuleType
- if name in sys.modules:
- # module has already been imported, return it
- return sys.modules[name]
- else:
- logger.debug('[autodoc] adding a mock module %s!', name)
- module = _MockModule(name, self)
- sys.modules[name] = module
- self.mocked_modules.append(name)
- return module
-
-
ALL = object()
INSTANCEATTR = object()
@@ -359,162 +255,6 @@ def between(marker, what=None, keepempty=False, exclude=False):
return process
-def format_annotation(annotation):
- # type: (Any) -> str
- """Return formatted representation of a type annotation.
-
- Show qualified names for types and additional details for types from
- the ``typing`` module.
-
- Displaying complex types from ``typing`` relies on its private API.
- """
- if typing and isinstance(annotation, typing.TypeVar):
- return annotation.__name__
- if annotation == Ellipsis:
- return '...'
- if not isinstance(annotation, type):
- return repr(annotation)
-
- qualified_name = (annotation.__module__ + '.' + annotation.__qualname__ # type: ignore
- if annotation else repr(annotation))
-
- if annotation.__module__ == 'builtins':
- return annotation.__qualname__ # type: ignore
- elif typing:
- if hasattr(typing, 'GenericMeta') and \
- isinstance(annotation, typing.GenericMeta):
- # In Python 3.5.2+, all arguments are stored in __args__,
- # whereas __parameters__ only contains generic parameters.
- #
- # Prior to Python 3.5.2, __args__ is not available, and all
- # arguments are in __parameters__.
- params = None
- if hasattr(annotation, '__args__'):
- if annotation.__args__ is None or len(annotation.__args__) <= 2:
- params = annotation.__args__
- else: # typing.Callable
- args = ', '.join(format_annotation(a) for a in annotation.__args__[:-1])
- result = format_annotation(annotation.__args__[-1])
- return '%s[[%s], %s]' % (qualified_name, args, result)
- elif hasattr(annotation, '__parameters__'):
- params = annotation.__parameters__
- if params is not None:
- param_str = ', '.join(format_annotation(p) for p in params)
- return '%s[%s]' % (qualified_name, param_str)
- elif hasattr(typing, 'UnionMeta') and \
- isinstance(annotation, typing.UnionMeta) and \
- hasattr(annotation, '__union_params__'):
- params = annotation.__union_params__
- if params is not None:
- param_str = ', '.join(format_annotation(p) for p in params)
- return '%s[%s]' % (qualified_name, param_str)
- elif hasattr(typing, 'CallableMeta') and \
- isinstance(annotation, typing.CallableMeta) and \
- getattr(annotation, '__args__', None) is not None and \
- hasattr(annotation, '__result__'):
- # Skipped in the case of plain typing.Callable
- args = annotation.__args__
- if args is None:
- return qualified_name
- elif args is Ellipsis:
- args_str = '...'
- else:
- formatted_args = (format_annotation(a) for a in args)
- args_str = '[%s]' % ', '.join(formatted_args)
- return '%s[%s, %s]' % (qualified_name,
- args_str,
- format_annotation(annotation.__result__))
- elif hasattr(typing, 'TupleMeta') and \
- isinstance(annotation, typing.TupleMeta) and \
- hasattr(annotation, '__tuple_params__') and \
- hasattr(annotation, '__tuple_use_ellipsis__'):
- params = annotation.__tuple_params__
- if params is not None:
- param_strings = [format_annotation(p) for p in params]
- if annotation.__tuple_use_ellipsis__:
- param_strings.append('...')
- return '%s[%s]' % (qualified_name,
- ', '.join(param_strings))
- return qualified_name
-
-
-def formatargspec(function, args, varargs=None, varkw=None, defaults=None,
- kwonlyargs=(), kwonlydefaults={}, annotations={}):
- # type: (Callable, Tuple[str, ...], str, str, Any, Tuple, Dict, Dict[str, Any]) -> str
- """Return a string representation of an ``inspect.FullArgSpec`` tuple.
-
- An enhanced version of ``inspect.formatargspec()`` that handles typing
- annotations better.
- """
-
- def format_arg_with_annotation(name):
- # type: (str) -> str
- if name in annotations:
- return '%s: %s' % (name, format_annotation(get_annotation(name)))
- return name
-
- def get_annotation(name):
- # type: (str) -> str
- value = annotations[name]
- if isinstance(value, string_types):
- return introspected_hints.get(name, value)
- else:
- return value
-
- introspected_hints = (typing.get_type_hints(function) # type: ignore
- if typing and hasattr(function, '__code__') else {})
-
- fd = StringIO()
- fd.write('(')
-
- formatted = []
- defaults_start = len(args) - len(defaults) if defaults else len(args)
-
- for i, arg in enumerate(args):
- arg_fd = StringIO()
- if isinstance(arg, list):
- # support tupled arguments list (only for py2): def foo((x, y))
- arg_fd.write('(')
- arg_fd.write(format_arg_with_annotation(arg[0]))
- for param in arg[1:]:
- arg_fd.write(', ')
- arg_fd.write(format_arg_with_annotation(param))
- arg_fd.write(')')
- else:
- arg_fd.write(format_arg_with_annotation(arg))
- if defaults and i >= defaults_start:
- arg_fd.write(' = ' if arg in annotations else '=')
- arg_fd.write(object_description(defaults[i - defaults_start])) # type: ignore
- formatted.append(arg_fd.getvalue())
-
- if varargs:
- formatted.append('*' + format_arg_with_annotation(varargs))
-
- if kwonlyargs:
- if not varargs:
- formatted.append('*')
-
- for kwarg in kwonlyargs:
- arg_fd = StringIO()
- arg_fd.write(format_arg_with_annotation(kwarg))
- if kwonlydefaults and kwarg in kwonlydefaults:
- arg_fd.write(' = ' if kwarg in annotations else '=')
- arg_fd.write(object_description(kwonlydefaults[kwarg])) # type: ignore
- formatted.append(arg_fd.getvalue())
-
- if varkw:
- formatted.append('**' + format_arg_with_annotation(varkw))
-
- fd.write(', '.join(formatted))
- fd.write(')')
-
- if 'return' in annotations:
- fd.write(' -> ')
- fd.write(format_annotation(get_annotation('return')))
-
- return fd.getvalue()
-
-
class Documenter(object):
"""
A Documenter knows how to autodocument a single object type. When
@@ -649,48 +389,53 @@ class Documenter(object):
self.modname, '.'.join(self.objpath))
# always enable mock import hook
# it will do nothing if autodoc_mock_imports is empty
- import_hook = _MockImporter(self.env.config.autodoc_mock_imports)
- try:
- logger.debug('[autodoc] import %s', self.modname)
- with warnings.catch_warnings():
- warnings.filterwarnings("ignore", category=ImportWarning)
- with logging.skip_warningiserror(not self.env.config.autodoc_warningiserror):
- __import__(self.modname)
- parent = None
- obj = self.module = sys.modules[self.modname]
- logger.debug('[autodoc] => %r', obj)
- for part in self.objpath:
- parent = obj
- logger.debug('[autodoc] getattr(_, %r)', part)
- obj = self.get_attr(obj, part)
+ with mock(self.env.config.autodoc_mock_imports):
+ try:
+ logger.debug('[autodoc] import %s', self.modname)
+ obj = import_module(self.modname, self.env.config.autodoc_warningiserror)
+ parent = None
+ self.module = obj
logger.debug('[autodoc] => %r', obj)
- self.object_name = part
- self.parent = parent
- self.object = obj
- return True
- # this used to only catch SyntaxError, ImportError and AttributeError,
- # but importing modules with side effects can raise all kinds of errors
- except (Exception, SystemExit) as e:
- if self.objpath:
- errmsg = 'autodoc: failed to import %s %r from module %r' % \
- (self.objtype, '.'.join(self.objpath), self.modname)
- else:
- errmsg = 'autodoc: failed to import %s %r' % \
- (self.objtype, self.fullname)
- if isinstance(e, SystemExit):
- errmsg += ('; the module executes module level statement ' +
- 'and it might call sys.exit().')
- else:
- errmsg += '; the following exception was raised:\n%s' % \
- traceback.format_exc()
- if PY2:
- errmsg = errmsg.decode('utf-8') # type: ignore
- logger.debug(errmsg)
- self.directive.warn(errmsg)
- self.env.note_reread()
- return False
- finally:
- import_hook.disable()
+ for part in self.objpath:
+ parent = obj
+ logger.debug('[autodoc] getattr(_, %r)', part)
+ obj = self.get_attr(obj, part)
+ logger.debug('[autodoc] => %r', obj)
+ self.object_name = part
+ self.parent = parent
+ self.object = obj
+ return True
+ except (AttributeError, ImportError) as exc:
+ if self.objpath:
+ errmsg = 'autodoc: failed to import %s %r from module %r' % \
+ (self.objtype, '.'.join(self.objpath), self.modname)
+ else:
+ errmsg = 'autodoc: failed to import %s %r' % \
+ (self.objtype, self.fullname)
+
+ if isinstance(exc, ImportError):
+ # import_module() raises ImportError having real exception obj and
+ # traceback
+ real_exc, traceback_msg = exc.args
+ if isinstance(real_exc, SystemExit):
+ errmsg += ('; the module executes module level statement ' +
+ 'and it might call sys.exit().')
+ elif isinstance(real_exc, ImportError):
+ errmsg += ('; the following exception was raised:\n%s' %
+ real_exc.args[0])
+ else:
+ errmsg += ('; the following exception was raised:\n%s' %
+ traceback_msg)
+ else:
+ errmsg += ('; the following exception was raised:\n%s' %
+ traceback.format_exc())
+
+ if PY2:
+ errmsg = errmsg.decode('utf-8') # type: ignore
+ logger.debug(errmsg)
+ self.directive.warn(errmsg)
+ self.env.note_reread()
+ return False
def get_real_modname(self):
# type: () -> str
@@ -785,6 +530,8 @@ class Documenter(object):
# type: (unicode, int) -> List[List[unicode]]
"""Decode and return lines of the docstring(s) for the object."""
docstring = self.get_attr(self.object, '__doc__', None)
+ if docstring is None and self.env.config.autodoc_inherit_docstrings:
+ docstring = getdoc(self.object)
# make sure we have Unicode docstrings, then sanitize and split
# into lines
if isinstance(docstring, text_type):
@@ -942,6 +689,9 @@ class Documenter(object):
isattr = False
doc = self.get_attr(member, '__doc__', None)
+ if doc is None and self.env.config.autodoc_inherit_docstrings:
+ doc = getdoc(member)
+
# if the member __doc__ is the same as self's __doc__, it's just
# inherited and therefore not the member's doc
cls = self.get_attr(member, '__class__', None)
@@ -1148,7 +898,7 @@ class ModuleDocumenter(Documenter):
'platform': identity, 'deprecated': bool_option,
'member-order': identity, 'exclude-members': members_set_option,
'private-members': bool_option, 'special-members': members_option,
- 'imported-members': bool_option,
+ 'imported-members': bool_option, 'ignore-module-all': bool_option
} # type: Dict[unicode, Callable]
@classmethod
@@ -1190,7 +940,8 @@ class ModuleDocumenter(Documenter):
def get_object_members(self, want_all):
# type: (bool) -> Tuple[bool, List[Tuple[unicode, object]]]
if want_all:
- if not hasattr(self.object, '__all__'):
+ if (self.options.ignore_module_all or not
+ hasattr(self.object, '__all__')):
# for implicit module members, check __module__ to avoid
# documenting imported objects
return True, safe_getmembers(self.object)
@@ -1364,7 +1115,7 @@ class FunctionDocumenter(DocstringSignatureMixin, ModuleLevelDocumenter): # typ
# cannot introspect arguments of a C function or method
return None
try:
- argspec = getargspec(self.object)
+ args = Signature(self.object).format_args()
except TypeError:
if (is_builtin_class_method(self.object, '__new__') and
is_builtin_class_method(self.object, '__init__')):
@@ -1374,12 +1125,10 @@ class FunctionDocumenter(DocstringSignatureMixin, ModuleLevelDocumenter): # typ
# typing) we try to use the constructor signature as function
# signature without the first argument.
try:
- argspec = getargspec(self.object.__new__)
+ args = Signature(self.object.__new__, bound_method=True).format_args()
except TypeError:
- argspec = getargspec(self.object.__init__)
- if argspec[0]:
- del argspec[0][0]
- args = formatargspec(self.object, *argspec)
+ args = Signature(self.object.__init__, bound_method=True).format_args()
+
# escape backslashes for reST
args = args.replace('\\', '\\\\')
return args
@@ -1431,14 +1180,11 @@ class ClassDocumenter(DocstringSignatureMixin, ModuleLevelDocumenter): # type:
not(inspect.ismethod(initmeth) or inspect.isfunction(initmeth)):
return None
try:
- argspec = getargspec(initmeth)
+ return Signature(initmeth, bound_method=True).format_args()
except TypeError:
# still not possible: happens e.g. for old-style classes
# with __init__ in C
return None
- if argspec[0] and argspec[0][0] in ('cls', 'self'):
- del argspec[0][0]
- return formatargspec(initmeth, *argspec)
def format_signature(self):
# type: () -> unicode
@@ -1636,10 +1382,7 @@ class MethodDocumenter(DocstringSignatureMixin, ClassLevelDocumenter): # type:
inspect.ismethoddescriptor(self.object):
# can never get arguments of a C function or method
return None
- argspec = getargspec(self.object)
- if argspec[0] and argspec[0][0] in ('cls', 'self'):
- del argspec[0][0]
- args = formatargspec(self.object, *argspec)
+ args = Signature(self.object, bound_method=True).format_args()
# escape backslashes for reST
args = args.replace('\\', '\\\\')
return args
@@ -1786,7 +1529,7 @@ class AutoDirective(Directive):
# flags that can be given in autodoc_default_flags
_default_flags = set([
'members', 'undoc-members', 'inherited-members', 'show-inheritance',
- 'private-members', 'special-members',
+ 'private-members', 'special-members', 'ignore-module-all'
])
# standard docutils directive settings
@@ -1901,6 +1644,7 @@ def setup(app):
app.add_config_value('autodoc_docstring_signature', True, True)
app.add_config_value('autodoc_mock_imports', [], True)
app.add_config_value('autodoc_warningiserror', True, True)
+ app.add_config_value('autodoc_inherit_docstrings', True, True)
app.add_event('autodoc-process-docstring')
app.add_event('autodoc-process-signature')
app.add_event('autodoc-skip-member')
diff --git a/sphinx/ext/autodoc/importer.py b/sphinx/ext/autodoc/importer.py
new file mode 100644
index 000000000..5c28f490d
--- /dev/null
+++ b/sphinx/ext/autodoc/importer.py
@@ -0,0 +1,146 @@
+# -*- coding: utf-8 -*-
+"""
+ sphinx.ext.autodoc.importer
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Importer utilities for autodoc
+
+ :copyright: Copyright 2007-2017 by the Sphinx team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import sys
+import warnings
+import traceback
+import contextlib
+from types import FunctionType, MethodType, ModuleType
+
+from sphinx.util import logging
+
+if False:
+ # For type annotation
+ from typing import Any, Generator, List, Set # NOQA
+
+logger = logging.getLogger(__name__)
+
+
+class _MockObject(object):
+ """Used by autodoc_mock_imports."""
+
+ def __init__(self, *args, **kwargs):
+ # type: (Any, Any) -> None
+ pass
+
+ def __len__(self):
+ # type: () -> int
+ return 0
+
+ def __contains__(self, key):
+ # type: (str) -> bool
+ return False
+
+ def __iter__(self):
+ # type: () -> None
+ pass
+
+ def __getitem__(self, key):
+ # type: (str) -> _MockObject
+ return self
+
+ def __getattr__(self, key):
+ # type: (str) -> _MockObject
+ return self
+
+ def __call__(self, *args, **kw):
+ # type: (Any, Any) -> Any
+ if args and type(args[0]) in [FunctionType, MethodType]:
+ # Appears to be a decorator, pass through unchanged
+ return args[0]
+ return self
+
+
+class _MockModule(ModuleType):
+ """Used by autodoc_mock_imports."""
+ __file__ = '/dev/null'
+
+ def __init__(self, name, loader):
+ # type: (str, _MockImporter) -> None
+ self.__name__ = self.__package__ = name
+ self.__loader__ = loader
+ self.__all__ = [] # type: List[str]
+ self.__path__ = [] # type: List[str]
+
+ def __getattr__(self, name):
+ # type: (str) -> _MockObject
+ o = _MockObject()
+ o.__module__ = self.__name__
+ return o
+
+
+class _MockImporter(object):
+ def __init__(self, names):
+ # type: (List[str]) -> None
+ self.base_packages = set() # type: Set[str]
+ for n in names:
+ # Convert module names:
+ # ['a.b.c', 'd.e']
+ # to a set of base packages:
+ # set(['a', 'd'])
+ self.base_packages.add(n.split('.')[0])
+ self.mocked_modules = [] # type: List[str]
+ # enable hook by adding itself to meta_path
+ sys.meta_path = sys.meta_path + [self]
+
+ def disable(self):
+ # remove `self` from `sys.meta_path` to disable import hook
+ sys.meta_path = [i for i in sys.meta_path if i is not self]
+ # remove mocked modules from sys.modules to avoid side effects after
+ # running auto-documenter
+ for m in self.mocked_modules:
+ if m in sys.modules:
+ del sys.modules[m]
+
+ def find_module(self, name, path=None):
+ # type: (str, str) -> Any
+ base_package = name.split('.')[0]
+ if base_package in self.base_packages:
+ return self
+ return None
+
+ def load_module(self, name):
+ # type: (str) -> ModuleType
+ if name in sys.modules:
+ # module has already been imported, return it
+ return sys.modules[name]
+ else:
+ logger.debug('[autodoc] adding a mock module %s!', name)
+ module = _MockModule(name, self)
+ sys.modules[name] = module
+ self.mocked_modules.append(name)
+ return module
+
+
+@contextlib.contextmanager
+def mock(names):
+ # type: (List[str]) -> Generator
+ try:
+ importer = _MockImporter(names)
+ yield
+ finally:
+ importer.disable()
+
+
+def import_module(modname, warningiserror=False):
+ """
+ Call __import__(modname), convert exceptions to ImportError
+ """
+ try:
+ with warnings.catch_warnings():
+ warnings.filterwarnings("ignore", category=ImportWarning)
+ with logging.skip_warningiserror(not warningiserror):
+ __import__(modname)
+ return sys.modules[modname]
+ except BaseException as exc:
+ # Importing modules may cause any side effects, including
+ # SystemExit, so we need to catch all errors.
+ raise ImportError(exc, traceback.format_exc())
diff --git a/sphinx/ext/autodoc/inspector.py b/sphinx/ext/autodoc/inspector.py
new file mode 100644
index 000000000..50c5a9082
--- /dev/null
+++ b/sphinx/ext/autodoc/inspector.py
@@ -0,0 +1,184 @@
+# -*- coding: utf-8 -*-
+"""
+ sphinx.ext.autodoc.inspector
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Inspect utilities for autodoc
+
+ :copyright: Copyright 2007-2017 by the Sphinx team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import typing
+import warnings
+
+from six import StringIO, string_types
+
+from sphinx.deprecation import RemovedInSphinx20Warning
+from sphinx.util.inspect import object_description
+
+if False:
+ # For type annotation
+ from typing import Any, Callable, Dict, Tuple # NOQA
+
+
+def format_annotation(annotation):
+ # type: (Any) -> str
+ """Return formatted representation of a type annotation.
+
+ Show qualified names for types and additional details for types from
+ the ``typing`` module.
+
+ Displaying complex types from ``typing`` relies on its private API.
+ """
+ warnings.warn('format_annotation() is now deprecated. '
+ 'Please use sphinx.util.inspect.Signature instead.',
+ RemovedInSphinx20Warning)
+ if isinstance(annotation, typing.TypeVar): # type: ignore
+ return annotation.__name__
+ if annotation == Ellipsis:
+ return '...'
+ if not isinstance(annotation, type):
+ return repr(annotation)
+
+ qualified_name = (annotation.__module__ + '.' + annotation.__qualname__ # type: ignore
+ if annotation else repr(annotation))
+
+ if annotation.__module__ == 'builtins':
+ return annotation.__qualname__ # type: ignore
+ else:
+ if hasattr(typing, 'GenericMeta') and \
+ isinstance(annotation, typing.GenericMeta):
+ # In Python 3.5.2+, all arguments are stored in __args__,
+ # whereas __parameters__ only contains generic parameters.
+ #
+ # Prior to Python 3.5.2, __args__ is not available, and all
+ # arguments are in __parameters__.
+ params = None
+ if hasattr(annotation, '__args__'):
+ if annotation.__args__ is None or len(annotation.__args__) <= 2: # type: ignore # NOQA
+ params = annotation.__args__ # type: ignore
+ else: # typing.Callable
+ args = ', '.join(format_annotation(a) for a in annotation.__args__[:-1]) # type: ignore # NOQA
+ result = format_annotation(annotation.__args__[-1]) # type: ignore
+ return '%s[[%s], %s]' % (qualified_name, args, result)
+ elif hasattr(annotation, '__parameters__'):
+ params = annotation.__parameters__ # type: ignore
+ if params is not None:
+ param_str = ', '.join(format_annotation(p) for p in params)
+ return '%s[%s]' % (qualified_name, param_str)
+ elif (hasattr(typing, 'UnionMeta') and
+ isinstance(annotation, typing.UnionMeta) and # type: ignore
+ hasattr(annotation, '__union_params__')):
+ params = annotation.__union_params__
+ if params is not None:
+ param_str = ', '.join(format_annotation(p) for p in params)
+ return '%s[%s]' % (qualified_name, param_str)
+ elif (hasattr(typing, 'CallableMeta') and
+ isinstance(annotation, typing.CallableMeta) and # type: ignore
+ getattr(annotation, '__args__', None) is not None and
+ hasattr(annotation, '__result__')):
+ # Skipped in the case of plain typing.Callable
+ args = annotation.__args__
+ if args is None:
+ return qualified_name
+ elif args is Ellipsis:
+ args_str = '...'
+ else:
+ formatted_args = (format_annotation(a) for a in args)
+ args_str = '[%s]' % ', '.join(formatted_args)
+ return '%s[%s, %s]' % (qualified_name,
+ args_str,
+ format_annotation(annotation.__result__))
+ elif (hasattr(typing, 'TupleMeta') and
+ isinstance(annotation, typing.TupleMeta) and # type: ignore
+ hasattr(annotation, '__tuple_params__') and
+ hasattr(annotation, '__tuple_use_ellipsis__')):
+ params = annotation.__tuple_params__
+ if params is not None:
+ param_strings = [format_annotation(p) for p in params]
+ if annotation.__tuple_use_ellipsis__:
+ param_strings.append('...')
+ return '%s[%s]' % (qualified_name,
+ ', '.join(param_strings))
+ return qualified_name
+
+
+def formatargspec(function, args, varargs=None, varkw=None, defaults=None,
+ kwonlyargs=(), kwonlydefaults={}, annotations={}):
+ # type: (Callable, Tuple[str, ...], str, str, Any, Tuple, Dict, Dict[str, Any]) -> str
+ """Return a string representation of an ``inspect.FullArgSpec`` tuple.
+
+ An enhanced version of ``inspect.formatargspec()`` that handles typing
+ annotations better.
+ """
+ warnings.warn('formatargspec() is now deprecated. '
+ 'Please use sphinx.util.inspect.Signature instead.',
+ RemovedInSphinx20Warning)
+
+ def format_arg_with_annotation(name):
+ # type: (str) -> str
+ if name in annotations:
+ return '%s: %s' % (name, format_annotation(get_annotation(name)))
+ return name
+
+ def get_annotation(name):
+ # type: (str) -> str
+ value = annotations[name]
+ if isinstance(value, string_types):
+ return introspected_hints.get(name, value)
+ else:
+ return value
+
+ introspected_hints = (typing.get_type_hints(function) # type: ignore
+ if typing and hasattr(function, '__code__') else {})
+
+ fd = StringIO()
+ fd.write('(')
+
+ formatted = []
+ defaults_start = len(args) - len(defaults) if defaults else len(args)
+
+ for i, arg in enumerate(args):
+ arg_fd = StringIO()
+ if isinstance(arg, list):
+ # support tupled arguments list (only for py2): def foo((x, y))
+ arg_fd.write('(')
+ arg_fd.write(format_arg_with_annotation(arg[0]))
+ for param in arg[1:]:
+ arg_fd.write(', ')
+ arg_fd.write(format_arg_with_annotation(param))
+ arg_fd.write(')')
+ else:
+ arg_fd.write(format_arg_with_annotation(arg))
+ if defaults and i >= defaults_start:
+ arg_fd.write(' = ' if arg in annotations else '=')
+ arg_fd.write(object_description(defaults[i - defaults_start])) # type: ignore
+ formatted.append(arg_fd.getvalue())
+
+ if varargs:
+ formatted.append('*' + format_arg_with_annotation(varargs))
+
+ if kwonlyargs:
+ if not varargs:
+ formatted.append('*')
+
+ for kwarg in kwonlyargs:
+ arg_fd = StringIO()
+ arg_fd.write(format_arg_with_annotation(kwarg))
+ if kwonlydefaults and kwarg in kwonlydefaults:
+ arg_fd.write(' = ' if kwarg in annotations else '=')
+ arg_fd.write(object_description(kwonlydefaults[kwarg])) # type: ignore
+ formatted.append(arg_fd.getvalue())
+
+ if varkw:
+ formatted.append('**' + format_arg_with_annotation(varkw))
+
+ fd.write(', '.join(formatted))
+ fd.write(')')
+
+ if 'return' in annotations:
+ fd.write(' -> ')
+ fd.write(format_annotation(get_annotation('return')))
+
+ return fd.getvalue()
diff --git a/sphinx/ext/autosummary/__init__.py b/sphinx/ext/autosummary/__init__.py
index 08776badc..3dded11ff 100644
--- a/sphinx/ext/autosummary/__init__.py
+++ b/sphinx/ext/autosummary/__init__.py
@@ -73,6 +73,7 @@ from sphinx.environment.adapters.toctree import TocTree
from sphinx.util import import_object, rst, logging
from sphinx.pycode import ModuleAnalyzer, PycodeError
from sphinx.ext.autodoc import Options
+from sphinx.ext.autodoc.importer import import_module
if False:
# For type annotation
@@ -512,8 +513,7 @@ def _import_by_name(name):
modname = '.'.join(name_parts[:-1])
if modname:
try:
- __import__(modname)
- mod = sys.modules[modname]
+ mod = import_module(modname)
return getattr(mod, name_parts[-1]), mod, modname
except (ImportError, IndexError, AttributeError):
pass
@@ -525,9 +525,10 @@ def _import_by_name(name):
last_j = j
modname = '.'.join(name_parts[:j])
try:
- __import__(modname)
+ import_module(modname)
except ImportError:
continue
+
if modname in sys.modules:
break
diff --git a/sphinx/ext/autosummary/generate.py b/sphinx/ext/autosummary/generate.py
index 99d5d5796..4db1a93e9 100644
--- a/sphinx/ext/autosummary/generate.py
+++ b/sphinx/ext/autosummary/generate.py
@@ -19,16 +19,17 @@
"""
from __future__ import print_function
+import argparse
+import codecs
import os
+import pydoc
import re
import sys
-import pydoc
-import optparse
-import codecs
from jinja2 import FileSystemLoader, TemplateNotFound
from jinja2.sandbox import SandboxedEnvironment
+from sphinx import __display_version__
from sphinx import package_dir
from sphinx.ext.autosummary import import_by_name, get_documenter
from sphinx.jinja2glue import BuiltinTemplateLoader
@@ -59,33 +60,6 @@ if False:
from sphinx.environment import BuildEnvironment # NOQA
-def main(argv=sys.argv):
- # type: (List[str]) -> None
- usage = """%prog [OPTIONS] SOURCEFILE ..."""
- p = optparse.OptionParser(usage.strip())
- p.add_option("-o", "--output-dir", action="store", type="string",
- dest="output_dir", default=None,
- help="Directory to place all output in")
- p.add_option("-s", "--suffix", action="store", type="string",
- dest="suffix", default="rst",
- help="Default suffix for files (default: %default)")
- p.add_option("-t", "--templates", action="store", type="string",
- dest="templates", default=None,
- help="Custom template directory (default: %default)")
- p.add_option("-i", "--imported-members", action="store_true",
- dest="imported_members", default=False,
- help="Document imported members (default: %default)")
- options, args = p.parse_args(argv[1:])
-
- if len(args) < 1:
- p.error('no input files given')
-
- generate_autosummary_docs(args, options.output_dir,
- "." + options.suffix,
- template_dir=options.templates,
- imported_members=options.imported_members)
-
-
def _simple_info(msg):
# type: (unicode) -> None
print(msg)
@@ -373,5 +347,57 @@ def find_autosummary_in_lines(lines, module=None, filename=None):
return documented
+def get_parser():
+ # type: () -> argparse.ArgumentParser
+ parser = argparse.ArgumentParser(
+ usage='%(prog)s [OPTIONS] <SOURCE_FILE>...',
+ epilog='For more information, visit <http://sphinx-doc.org/>.',
+ description="""
+Generate ReStructuredText using autosummary directives.
+
+sphinx-autogen is a frontend to sphinx.ext.autosummary.generate. It generates
+the reStructuredText files from the autosummary directives contained in the
+given input files.
+
+The format of the autosummary directive is documented in the
+``sphinx.ext.autosummary`` Python module and can be read using::
+
+ pydoc sphinx.ext.autosummary
+""")
+
+ parser.add_argument('--version', action='version', dest='show_version',
+ version='%%(prog)s %s' % __display_version__)
+
+ parser.add_argument('source_file', nargs='+',
+ help='source files to generate rST files for')
+
+ parser.add_argument('-o', '--output-dir', action='store',
+ dest='output_dir',
+ help='directory to place all output in')
+ parser.add_argument('-s', '--suffix', action='store', dest='suffix',
+ default='rst',
+ help='default suffix for files (default: '
+ '%(default)s)')
+ parser.add_argument('-t', '--templates', action='store', dest='templates',
+ default=None,
+ help='custom template directory (default: '
+ '%(default)s)')
+ parser.add_argument('-i', '--imported-members', action='store_true',
+ dest='imported_members', default=False,
+ help='document imported members (default: '
+ '%(default)s)')
+
+ return parser
+
+
+def main(argv=sys.argv[1:]):
+ # type: (List[str]) -> None
+ args = get_parser().parse_args(argv)
+ generate_autosummary_docs(args.source_file, args.output_dir,
+ '.' + args.suffix,
+ template_dir=args.templates,
+ imported_members=args.imported_members)
+
+
if __name__ == '__main__':
main()
diff --git a/sphinx/ext/imgmath.py b/sphinx/ext/imgmath.py
index f4a144403..b5f67e713 100644
--- a/sphinx/ext/imgmath.py
+++ b/sphinx/ext/imgmath.py
@@ -30,6 +30,7 @@ from sphinx.util.png import read_png_depth, write_png_depth
from sphinx.util.osutil import ensuredir, ENOENT, cd
from sphinx.util.pycompat import sys_encoding
from sphinx.ext.mathbase import setup_math as mathbase_setup, wrap_displaymath
+from sphinx.ext.mathbase import get_node_equation_number
if False:
# For type annotation
@@ -333,7 +334,8 @@ def html_visit_displaymath(self, node):
self.body.append(self.starttag(node, 'div', CLASS='math'))
self.body.append('<p>')
if node['number']:
- self.body.append('<span class="eqno">(%s)' % node['number'])
+ number = get_node_equation_number(self, node)
+ self.body.append('<span class="eqno">(%s)' % number)
self.add_permalink_ref(node, _('Permalink to this equation'))
self.body.append('</span>')
if fname is None:
diff --git a/sphinx/ext/inheritance_diagram.py b/sphinx/ext/inheritance_diagram.py
index 34fe7bea6..8e01a1b28 100644
--- a/sphinx/ext/inheritance_diagram.py
+++ b/sphinx/ext/inheritance_diagram.py
@@ -39,10 +39,7 @@ r"""
import re
import sys
import inspect
-try:
- from hashlib import md5
-except ImportError:
- from md5 import md5
+from hashlib import md5
from six import text_type
from six.moves import builtins
@@ -58,7 +55,7 @@ from sphinx.util import force_decode
if False:
# For type annotation
- from typing import Any, Dict, List, Tuple # NOQA
+ from typing import Any, Dict, List, Tuple, Dict, Optional # NOQA
from sphinx.application import Sphinx # NOQA
from sphinx.environment import BuildEnvironment # NOQA
@@ -133,8 +130,8 @@ class InheritanceGraph(object):
graphviz dot graph from them.
"""
def __init__(self, class_names, currmodule, show_builtins=False,
- private_bases=False, parts=0):
- # type: (unicode, str, bool, bool, int) -> None
+ private_bases=False, parts=0, aliases=None):
+ # type: (unicode, str, bool, bool, int, Optional[Dict[unicode, unicode]]) -> None
"""*class_names* is a list of child classes to show bases from.
If *show_builtins* is True, then Python builtins will be shown
@@ -143,7 +140,7 @@ class InheritanceGraph(object):
self.class_names = class_names
classes = self._import_classes(class_names, currmodule)
self.class_info = self._class_info(classes, show_builtins,
- private_bases, parts)
+ private_bases, parts, aliases)
if not self.class_info:
raise InheritanceException('No classes found for '
'inheritance diagram')
@@ -156,8 +153,8 @@ class InheritanceGraph(object):
classes.extend(import_classes(name, currmodule))
return classes
- def _class_info(self, classes, show_builtins, private_bases, parts):
- # type: (List[Any], bool, bool, int) -> List[Tuple[unicode, unicode, List[unicode], unicode]] # NOQA
+ def _class_info(self, classes, show_builtins, private_bases, parts, aliases):
+ # type: (List[Any], bool, bool, int, Optional[Dict[unicode, unicode]]) -> List[Tuple[unicode, unicode, List[unicode], unicode]] # NOQA
"""Return name and bases for all classes that are ancestors of
*classes*.
@@ -174,8 +171,8 @@ class InheritanceGraph(object):
if not private_bases and cls.__name__.startswith('_'):
return
- nodename = self.class_name(cls, parts)
- fullname = self.class_name(cls, 0)
+ nodename = self.class_name(cls, parts, aliases)
+ fullname = self.class_name(cls, 0, aliases)
# Use first line of docstring as tooltip, if available
tooltip = None
@@ -197,7 +194,7 @@ class InheritanceGraph(object):
continue
if not private_bases and base.__name__.startswith('_'):
continue
- baselist.append(self.class_name(base, parts))
+ baselist.append(self.class_name(base, parts, aliases))
if base not in all_classes:
recurse(base)
@@ -206,8 +203,8 @@ class InheritanceGraph(object):
return list(all_classes.values())
- def class_name(self, cls, parts=0):
- # type: (Any, int) -> unicode
+ def class_name(self, cls, parts=0, aliases=None):
+ # type: (Any, int, Optional[Dict[unicode, unicode]]) -> unicode
"""Given a class object, return a fully-qualified name.
This works for things I've tested in matplotlib so far, but may not be
@@ -219,9 +216,13 @@ class InheritanceGraph(object):
else:
fullname = '%s.%s' % (module, cls.__name__)
if parts == 0:
- return fullname
- name_parts = fullname.split('.')
- return '.'.join(name_parts[-parts:])
+ result = fullname
+ else:
+ name_parts = fullname.split('.')
+ result = '.'.join(name_parts[-parts:])
+ if aliases is not None and result in aliases:
+ return aliases[result]
+ return result
def get_all_class_names(self):
# type: () -> List[unicode]
@@ -339,7 +340,8 @@ class InheritanceDiagram(Directive):
graph = InheritanceGraph(
class_names, env.ref_context.get('py:module'),
parts=node['parts'],
- private_bases='private-bases' in self.options)
+ private_bases='private-bases' in self.options,
+ aliases=env.config.inheritance_alias)
except InheritanceException as err:
return [node.document.reporter.warning(err.args[0],
line=self.lineno)]
@@ -453,4 +455,5 @@ def setup(app):
app.add_config_value('inheritance_graph_attrs', {}, False)
app.add_config_value('inheritance_node_attrs', {}, False)
app.add_config_value('inheritance_edge_attrs', {}, False)
+ app.add_config_value('inheritance_alias', {}, False)
return {'version': sphinx.__display_version__, 'parallel_read_safe': True}
diff --git a/sphinx/ext/intersphinx.py b/sphinx/ext/intersphinx.py
index ccd2c9321..cd39c48ab 100644
--- a/sphinx/ext/intersphinx.py
+++ b/sphinx/ext/intersphinx.py
@@ -383,4 +383,4 @@ if __name__ == '__main__':
import logging # type: ignore
logging.basicConfig()
- debug(argv=sys.argv) # type: ignore
+ debug(argv=sys.argv[1:]) # type: ignore
diff --git a/sphinx/ext/jsmath.py b/sphinx/ext/jsmath.py
index 21ec3cd23..a74f0641a 100644
--- a/sphinx/ext/jsmath.py
+++ b/sphinx/ext/jsmath.py
@@ -16,6 +16,7 @@ import sphinx
from sphinx.locale import _
from sphinx.application import ExtensionError
from sphinx.ext.mathbase import setup_math as mathbase_setup
+from sphinx.ext.mathbase import get_node_equation_number
def html_visit_math(self, node):
@@ -35,7 +36,8 @@ def html_visit_displaymath(self, node):
if i == 0:
# necessary to e.g. set the id property correctly
if node['number']:
- self.body.append('<span class="eqno">(%s)' % node['number'])
+ number = get_node_equation_number(self, node)
+ self.body.append('<span class="eqno">(%s)' % number)
self.add_permalink_ref(node, _('Permalink to this equation'))
self.body.append('</span>')
self.body.append(self.starttag(node, 'div', CLASS='math'))
diff --git a/sphinx/ext/mathbase.py b/sphinx/ext/mathbase.py
index 549ca30cd..f2e15b485 100644
--- a/sphinx/ext/mathbase.py
+++ b/sphinx/ext/mathbase.py
@@ -12,9 +12,11 @@
from docutils import nodes, utils
from docutils.parsers.rst import Directive, directives
+from sphinx.config import string_classes
from sphinx.roles import XRefRole
from sphinx.locale import __
from sphinx.domains import Domain
+from sphinx.util import logging
from sphinx.util.nodes import make_refnode, set_source_info
if False:
@@ -25,6 +27,8 @@ if False:
from sphinx.builders import Builder # NOQA
from sphinx.environment import BuildEnvironment # NOQA
+logger = logging.getLogger(__name__)
+
class math(nodes.Inline, nodes.TextElement):
pass
@@ -80,7 +84,20 @@ class MathDomain(Domain):
newnode['target'] = target
return newnode
else:
- title = nodes.Text("(%d)" % number)
+ if env.config.math_numfig and env.config.numfig:
+ if docname in env.toc_fignumbers:
+ id = 'equation-' + target
+ number = env.toc_fignumbers[docname]['displaymath'].get(id, ())
+ number = '.'.join(map(str, number))
+ else:
+ number = ''
+ try:
+ eqref_format = env.config.math_eqref_format or "({number})"
+ title = nodes.Text(eqref_format.format(number=number))
+ except KeyError as exc:
+ logger.warning('Invalid math_eqref_format: %r', exc,
+ location=node)
+ title = nodes.Text("(%d)" % number)
return make_refnode(builder, fromdocname, docname,
"equation-" + target, title)
else:
@@ -116,6 +133,23 @@ class MathDomain(Domain):
return len(targets) + 1
+def get_node_equation_number(writer, node):
+ if writer.builder.config.math_numfig and writer.builder.config.numfig:
+ figtype = 'displaymath'
+ if writer.builder.name == 'singlehtml':
+ key = u"%s/%s" % (writer.docnames[-1], figtype)
+ else:
+ key = figtype
+
+ id = node['ids'][0]
+ number = writer.builder.fignumbers.get(key, {}).get(id, ())
+ number = '.'.join(map(str, number))
+ else:
+ number = node['number']
+
+ return number
+
+
def wrap_displaymath(math, label, numbering):
# type: (unicode, unicode, bool) -> unicode
def is_equation(part):
@@ -264,7 +298,17 @@ def latex_visit_displaymath(self, node):
def latex_visit_eqref(self, node):
# type: (nodes.NodeVisitor, eqref) -> None
label = "equation:%s:%s" % (node['docname'], node['target'])
- self.body.append('\\eqref{%s}' % label)
+ eqref_format = self.builder.config.math_eqref_format
+ if eqref_format:
+ try:
+ ref = '\\ref{%s}' % label
+ self.body.append(eqref_format.format(number=ref))
+ except KeyError as exc:
+ logger.warning('Invalid math_eqref_format: %r', exc,
+ location=node)
+ self.body.append('\\eqref{%s}' % label)
+ else:
+ self.body.append('\\eqref{%s}' % label)
raise nodes.SkipNode
@@ -320,6 +364,8 @@ def texinfo_depart_displaymath(self, node):
def setup_math(app, htmlinlinevisitors, htmldisplayvisitors):
# type: (Sphinx, Tuple[Callable, Any], Tuple[Callable, Any]) -> None
app.add_config_value('math_number_all', False, 'env')
+ app.add_config_value('math_eqref_format', None, 'env', string_classes)
+ app.add_config_value('math_numfig', True, 'env')
app.add_domain(MathDomain)
app.add_node(math, override=True,
latex=(latex_visit_math, None),
@@ -327,12 +373,12 @@ def setup_math(app, htmlinlinevisitors, htmldisplayvisitors):
man=(man_visit_math, None),
texinfo=(texinfo_visit_math, None),
html=htmlinlinevisitors)
- app.add_node(displaymath,
- latex=(latex_visit_displaymath, None),
- text=(text_visit_displaymath, None),
- man=(man_visit_displaymath, man_depart_displaymath),
- texinfo=(texinfo_visit_displaymath, texinfo_depart_displaymath),
- html=htmldisplayvisitors)
+ app.add_enumerable_node(displaymath, 'displaymath',
+ latex=(latex_visit_displaymath, None),
+ text=(text_visit_displaymath, None),
+ man=(man_visit_displaymath, man_depart_displaymath),
+ texinfo=(texinfo_visit_displaymath, texinfo_depart_displaymath),
+ html=htmldisplayvisitors)
app.add_node(eqref, latex=(latex_visit_eqref, None))
app.add_role('math', math_role)
app.add_role('eq', EqXRefRole(warn_dangling=True))
diff --git a/sphinx/ext/mathjax.py b/sphinx/ext/mathjax.py
index 7fb3b17ad..8698e2801 100644
--- a/sphinx/ext/mathjax.py
+++ b/sphinx/ext/mathjax.py
@@ -17,6 +17,7 @@ import sphinx
from sphinx.locale import _
from sphinx.errors import ExtensionError
from sphinx.ext.mathbase import setup_math as mathbase_setup
+from sphinx.ext.mathbase import get_node_equation_number
def html_visit_math(self, node):
@@ -36,7 +37,8 @@ def html_visit_displaymath(self, node):
# necessary to e.g. set the id property correctly
if node['number']:
- self.body.append('<span class="eqno">(%s)' % node['number'])
+ number = get_node_equation_number(self, node)
+ self.body.append('<span class="eqno">(%s)' % number)
self.add_permalink_ref(node, _('Permalink to this equation'))
self.body.append('</span>')
self.body.append(self.builder.config.mathjax_display[0])
diff --git a/sphinx/ext/pngmath.py b/sphinx/ext/pngmath.py
index eb1c82e8f..717d51756 100644
--- a/sphinx/ext/pngmath.py
+++ b/sphinx/ext/pngmath.py
@@ -30,6 +30,7 @@ from sphinx.util.png import read_png_depth, write_png_depth
from sphinx.util.osutil import ensuredir, ENOENT, cd
from sphinx.util.pycompat import sys_encoding
from sphinx.ext.mathbase import setup_math as mathbase_setup, wrap_displaymath
+from sphinx.ext.mathbase import get_node_equation_number
if False:
# For type annotation
@@ -242,7 +243,8 @@ def html_visit_displaymath(self, node):
self.body.append(self.starttag(node, 'div', CLASS='math'))
self.body.append('<p>')
if node['number']:
- self.body.append('<span class="eqno">(%s)</span>' % node['number'])
+ number = get_node_equation_number(self, node)
+ self.body.append('<span class="eqno">(%s)</span>' % number)
if fname is None:
# something failed -- use text-only as a bad substitute
self.body.append('<span class="math">%s</span></p>\n</div>' %
diff --git a/sphinx/ext/viewcode.py b/sphinx/ext/viewcode.py
index 2fd4479f8..d4ff7cebb 100644
--- a/sphinx/ext/viewcode.py
+++ b/sphinx/ext/viewcode.py
@@ -146,10 +146,11 @@ def collect_pages(app):
# app.builder.info(' (%d module code pages)' %
# len(env._viewcode_modules), nonl=1)
- for modname, entry in status_iterator(iteritems(env._viewcode_modules), # type: ignore
- 'highlighting module code... ', "blue",
- len(env._viewcode_modules), # type: ignore
- app.verbosity, lambda x: x[0]):
+ for modname, entry in status_iterator(
+ sorted(iteritems(env._viewcode_modules)), # type: ignore
+ 'highlighting module code... ', "blue",
+ len(env._viewcode_modules), # type: ignore
+ app.verbosity, lambda x: x[0]):
if not entry:
continue
code, tags, used, refname = entry
diff --git a/sphinx/extension.py b/sphinx/extension.py
index 8a2f945ae..98d35b5af 100644
--- a/sphinx/extension.py
+++ b/sphinx/extension.py
@@ -38,7 +38,7 @@ class Extension(object):
# The extension supports parallel write or not. The default value
# is ``True``. Sphinx writes parallelly documents even if
# the extension does not tell its status.
- self.parallel_write_safe = kwargs.pop('parallel_read_safe', True)
+ self.parallel_write_safe = kwargs.pop('parallel_write_safe', True)
def verify_required_extensions(app, requirements):
diff --git a/sphinx/io.py b/sphinx/io.py
index 8365e22e0..2d584de91 100644
--- a/sphinx/io.py
+++ b/sphinx/io.py
@@ -8,10 +8,15 @@
:copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
-from docutils.io import FileInput
+import re
+import codecs
+
+from docutils.io import FileInput, NullOutput
+from docutils.core import Publisher
from docutils.readers import standalone
+from docutils.statemachine import StringList, string2lines
from docutils.writers import UnfilteredWriter
-from six import string_types, text_type, iteritems
+from six import text_type
from typing import Any, Union # NOQA
from sphinx.transforms import (
@@ -24,7 +29,7 @@ from sphinx.transforms.compact_bullet_list import RefOnlyBulletListTransform
from sphinx.transforms.i18n import (
PreserveTranslatableMessages, Locale, RemoveTranslatableInline,
)
-from sphinx.util import import_object, split_docinfo
+from sphinx.util import logging
from sphinx.util.docutils import LoggingReporter
if False:
@@ -38,41 +43,18 @@ if False:
from sphinx.builders import Builder # NOQA
from sphinx.environment import BuildEnvironment # NOQA
+docinfo_re = re.compile(':\\w+:.*?')
+
+
+logger = logging.getLogger(__name__)
+
class SphinxBaseReader(standalone.Reader):
"""
- Add our source parsers
- """
- def __init__(self, app, parsers={}, *args, **kwargs):
- # type: (Sphinx, Dict[unicode, Parser], Any, Any) -> None
- standalone.Reader.__init__(self, *args, **kwargs)
- self.parser_map = {} # type: Dict[unicode, Parser]
- for suffix, parser_class in parsers.items():
- if isinstance(parser_class, string_types):
- parser_class = import_object(parser_class, 'source parser') # type: ignore
- parser = parser_class()
- if hasattr(parser, 'set_application'):
- parser.set_application(app)
- self.parser_map[suffix] = parser
-
- def read(self, source, parser, settings):
- # type: (Input, Parser, Dict) -> nodes.document
- self.source = source
-
- for suffix in self.parser_map:
- if source.source_path.endswith(suffix):
- self.parser = self.parser_map[suffix]
- break
- else:
- # use special parser for unknown file-extension '*' (if exists)
- self.parser = self.parser_map.get('*')
+ A base class of readers for Sphinx.
- if not self.parser:
- self.parser = parser
- self.settings = settings
- self.input = self.source.read()
- self.parse()
- return self.document
+ This replaces reporter by Sphinx's on generating document.
+ """
def get_transforms(self):
# type: () -> List[Transform]
@@ -80,50 +62,59 @@ class SphinxBaseReader(standalone.Reader):
def new_document(self):
# type: () -> nodes.document
+ """Creates a new document object which having a special reporter object good
+ for logging.
+ """
document = standalone.Reader.new_document(self)
reporter = document.reporter
- document.reporter = LoggingReporter(reporter.source, reporter.report_level,
- reporter.halt_level, reporter.debug_flag,
- reporter.error_handler)
+ document.reporter = LoggingReporter.from_reporter(reporter)
+ document.reporter.set_source(self.source)
return document
class SphinxStandaloneReader(SphinxBaseReader):
"""
- Add our own transforms.
+ A basic document reader for Sphinx.
"""
transforms = [ApplySourceWorkaround, ExtraTranslatableNodes, PreserveTranslatableMessages,
Locale, CitationReferences, DefaultSubstitutions, MoveModuleTargets,
HandleCodeBlocks, AutoNumbering, AutoIndexUpgrader, SortIds,
RemoveTranslatableInline, PreserveTranslatableMessages, FilterSystemMessages,
- RefOnlyBulletListTransform, UnreferencedFootnotesDetector]
+ RefOnlyBulletListTransform, UnreferencedFootnotesDetector
+ ] # type: List[Transform]
+
+ def __init__(self, app, *args, **kwargs):
+ # type: (Sphinx, Any, Any) -> None
+ self.transforms = self.transforms + app.registry.get_transforms()
+ SphinxBaseReader.__init__(self, *args, **kwargs) # type: ignore
class SphinxI18nReader(SphinxBaseReader):
"""
- Replacer for document.reporter.get_source_and_line method.
+ A document reader for i18n.
- reST text lines for translation do not have the original source line number.
- This class provides the correct line numbers when reporting.
+ This returns the source line number of original text as current source line number
+ to let users know where the error happened.
+ Because the translated texts are partial and they don't have correct line numbers.
"""
+ lineno = None # type: int
transforms = [ApplySourceWorkaround, ExtraTranslatableNodes, CitationReferences,
DefaultSubstitutions, MoveModuleTargets, HandleCodeBlocks,
AutoNumbering, SortIds, RemoveTranslatableInline,
FilterSystemMessages, RefOnlyBulletListTransform,
UnreferencedFootnotesDetector]
- def __init__(self, *args, **kwargs):
- # type: (Any, Any) -> None
- SphinxBaseReader.__init__(self, *args, **kwargs)
- self.lineno = None # type: int
-
def set_lineno_for_reporter(self, lineno):
# type: (int) -> None
+ """Stores the source line number of original text."""
self.lineno = lineno
def new_document(self):
# type: () -> nodes.document
+ """Creates a new document object which having a special reporter object for
+ translation.
+ """
document = SphinxBaseReader.new_document(self)
reporter = document.reporter
@@ -136,6 +127,8 @@ class SphinxI18nReader(SphinxBaseReader):
class SphinxDummyWriter(UnfilteredWriter):
+ """Dummy writer module used for generating doctree."""
+
supported = ('html',) # needed to keep "meta" nodes
def translate(self):
@@ -143,11 +136,26 @@ class SphinxDummyWriter(UnfilteredWriter):
pass
-class SphinxFileInput(FileInput):
+def SphinxDummySourceClass(source, *args, **kwargs):
+ """Bypass source object as is to cheat Publisher."""
+ return source
+
+
+class SphinxBaseFileInput(FileInput):
+ """A base class of SphinxFileInput.
+
+ It supports to replace unknown Unicode characters to '?'. And it also emits
+ Sphinx events ``source-read`` on reading.
+ """
+
def __init__(self, app, env, *args, **kwds):
# type: (Sphinx, BuildEnvironment, Any, Any) -> None
self.app = app
self.env = env
+
+ # set up error handler
+ codecs.register_error('sphinx', self.warn_and_replace) # type: ignore
+
kwds['error_handler'] = 'sphinx' # py3: handle error on open.
FileInput.__init__(self, *args, **kwds)
@@ -159,25 +167,127 @@ class SphinxFileInput(FileInput):
def read(self):
# type: () -> unicode
- def get_parser_type(source_path):
- # type: (unicode) -> Tuple[unicode]
- for suffix, parser_class in iteritems(self.app.registry.get_source_parsers()):
- if source_path.endswith(suffix):
- if isinstance(parser_class, string_types):
- parser_class = import_object(parser_class, 'source parser') # type: ignore # NOQA
- return parser_class.supported
- else:
- return ('restructuredtext',)
+ """Reads the contents from file.
+ After reading, it emits Sphinx event ``source-read``.
+ """
data = FileInput.read(self)
- if self.app:
- arg = [data]
- self.app.emit('source-read', self.env.docname, arg)
- data = arg[0]
- docinfo, data = split_docinfo(data)
- if 'restructuredtext' in get_parser_type(self.source_path):
- if self.env.config.rst_epilog:
- data = data + '\n' + self.env.config.rst_epilog + '\n'
- if self.env.config.rst_prolog:
- data = self.env.config.rst_prolog + '\n' + data
- return docinfo + data
+
+ # emit source-read event
+ arg = [data]
+ self.app.emit('source-read', self.env.docname, arg)
+ return arg[0]
+
+ def warn_and_replace(self, error):
+ # type: (Any) -> Tuple
+ """Custom decoding error handler that warns and replaces."""
+ linestart = error.object.rfind(b'\n', 0, error.start)
+ lineend = error.object.find(b'\n', error.start)
+ if lineend == -1:
+ lineend = len(error.object)
+ lineno = error.object.count(b'\n', 0, error.start) + 1
+ logger.warning('undecodable source characters, replacing with "?": %r',
+ (error.object[linestart + 1:error.start] + b'>>>' +
+ error.object[error.start:error.end] + b'<<<' +
+ error.object[error.end:lineend]),
+ location=(self.env.docname, lineno))
+ return (u'?', error.end)
+
+
+class SphinxFileInput(SphinxBaseFileInput):
+ """A basic FileInput for Sphinx."""
+ pass
+
+
+class SphinxRSTFileInput(SphinxBaseFileInput):
+ """A reST FileInput for Sphinx.
+
+ This FileInput automatically prepends and appends text by :confval:`rst_prolog` and
+ :confval:`rst_epilog`.
+
+ .. important::
+
+ This FileInput uses an instance of ``StringList`` as a return value of ``read()``
+ method to indicate original source filename and line numbers after prepending and
+ appending.
+ For that reason, ``sphinx.parsers.RSTParser`` should be used with this to parse
+ a content correctly.
+ """
+
+ def prepend_prolog(self, text, prolog):
+ # type: (StringList, unicode) -> None
+ docinfo = self.count_docinfo_lines(text)
+ if docinfo:
+ # insert a blank line after docinfo
+ text.insert(docinfo, '', '<generated>', 0)
+ docinfo += 1
+
+ # insert prolog (after docinfo if exists)
+ for lineno, line in enumerate(prolog.splitlines()):
+ text.insert(docinfo + lineno, line, '<rst_prolog>', lineno)
+
+ text.insert(docinfo + lineno + 1, '', '<generated>', 0)
+
+ def append_epilog(self, text, epilog):
+ # type: (StringList, unicode) -> None
+ # append a blank line and rst_epilog
+ text.append('', '<generated>', 0)
+ for lineno, line in enumerate(epilog.splitlines()):
+ text.append(line, '<rst_epilog>', lineno)
+
+ def read(self):
+ # type: () -> StringList
+ inputstring = SphinxBaseFileInput.read(self)
+ lines = string2lines(inputstring, convert_whitespace=True)
+ content = StringList()
+ for lineno, line in enumerate(lines):
+ content.append(line, self.source_path, lineno)
+
+ if self.env.config.rst_prolog:
+ self.prepend_prolog(content, self.env.config.rst_prolog)
+ if self.env.config.rst_epilog:
+ self.append_epilog(content, self.env.config.rst_epilog)
+
+ return content
+
+ def count_docinfo_lines(self, content):
+ # type: (StringList) -> int
+ if len(content) == 0:
+ return 0
+ else:
+ for lineno, line in enumerate(content.data):
+ if not docinfo_re.match(line):
+ break
+ return lineno
+
+
+def read_doc(app, env, filename):
+ # type: (Sphinx, BuildEnvironment, unicode) -> nodes.document
+ """Parse a document and convert to doctree."""
+ input_class = app.registry.get_source_input(filename)
+ reader = SphinxStandaloneReader(app)
+ source = input_class(app, env, source=None, source_path=filename,
+ encoding=env.config.source_encoding)
+ parser = app.registry.create_source_parser(app, filename)
+
+ pub = Publisher(reader=reader,
+ parser=parser,
+ writer=SphinxDummyWriter(),
+ source_class=SphinxDummySourceClass,
+ destination=NullOutput())
+ pub.set_components(None, 'restructuredtext', None)
+ pub.process_programmatic_settings(None, env.settings, None)
+ pub.set_source(source, filename)
+ pub.publish()
+ return pub.document
+
+
+def setup(app):
+ app.registry.add_source_input('*', SphinxFileInput)
+ app.registry.add_source_input('restructuredtext', SphinxRSTFileInput)
+
+ return {
+ 'version': 'builtin',
+ 'parallel_read_safe': True,
+ 'parallel_write_safe': True,
+ }
diff --git a/sphinx/locale/__init__.py b/sphinx/locale/__init__.py
index 68686e3fc..e148f2c12 100644
--- a/sphinx/locale/__init__.py
+++ b/sphinx/locale/__init__.py
@@ -227,29 +227,35 @@ pairindextypes = {
translators = {} # type: Dict[unicode, Any]
if PY3:
- def _(message):
- # type: (unicode) -> unicode
+ def _(message, *args):
+ # type: (unicode, *Any) -> unicode
try:
- return translators['sphinx'].gettext(message)
+ if len(args) <= 1:
+ return translators['sphinx'].gettext(message)
+ else: # support pluralization
+ return translators['sphinx'].ngettext(message, args[0], args[1])
except KeyError:
return message
else:
- def _(message):
- # type: (unicode) -> unicode
+ def _(message, *args):
+ # type: (unicode, *Any) -> unicode
try:
- return translators['sphinx'].ugettext(message)
+ if len(args) <= 1:
+ return translators['sphinx'].ugettext(message)
+ else: # support pluralization
+ return translators['sphinx'].ungettext(message, args[0], args[1])
except KeyError:
return message
-def __(message):
- # type: (unicode) -> unicode
+def __(message, *args):
+ # type: (unicode, *Any) -> unicode
"""A dummy wrapper to i18n'ize exceptions and command line messages.
In future, the messages are translated using LC_MESSAGES or any other
locale settings.
"""
- return message
+ return message if len(args) <= 1 else args[0]
def init(locale_dirs, language, catalog='sphinx'):
diff --git a/sphinx/make_mode.py b/sphinx/make_mode.py
index f22a63b09..0bc1a797d 100644
--- a/sphinx/make_mode.py
+++ b/sphinx/make_mode.py
@@ -303,8 +303,7 @@ class Make(object):
if doctreedir is None:
doctreedir = self.builddir_join('doctrees')
- args = [sys.argv[0],
- '-b', builder,
+ args = ['-b', builder,
'-d', doctreedir,
self.srcdir,
self.builddir_join(builder)]
diff --git a/sphinx/parsers.py b/sphinx/parsers.py
index b58eefa23..48a155203 100644
--- a/sphinx/parsers.py
+++ b/sphinx/parsers.py
@@ -11,6 +11,8 @@
import docutils.parsers
import docutils.parsers.rst
+from docutils.parsers.rst import states
+from docutils.statemachine import StringList
from docutils.transforms.universal import SmartQuotes
from sphinx.transforms import SphinxSmartQuotes
@@ -18,6 +20,7 @@ from sphinx.transforms import SphinxSmartQuotes
if False:
# For type annotation
from typing import Any, Dict, List, Type # NOQA
+ from docutils import nodes # NOQA
from docutils.transforms import Transform # NOQA
from sphinx.application import Sphinx # NOQA
@@ -56,7 +59,7 @@ class Parser(docutils.parsers.Parser):
class RSTParser(docutils.parsers.rst.Parser):
- """A reST parser customized for Sphinx."""
+ """A reST parser for Sphinx."""
def get_transforms(self):
# type: () -> List[Type[Transform]]
@@ -66,6 +69,26 @@ class RSTParser(docutils.parsers.rst.Parser):
transforms.append(SphinxSmartQuotes)
return transforms
+ def parse(self, inputstring, document):
+ # type: (Any, nodes.document) -> None
+ """Parse text and generate a document tree.
+
+ This accepts StringList as an inputstring parameter.
+ It enables to handle mixed contents (cf. :confval:`rst_prolog`) correctly.
+ """
+ if isinstance(inputstring, StringList):
+ self.setup_parse(inputstring, document)
+ self.statemachine = states.RSTStateMachine(
+ state_classes=self.state_classes,
+ initial_state=self.initial_state,
+ debug=document.reporter.debug_flag)
+ # Give inputstring directly to statemachine.
+ self.statemachine.run(inputstring, document, inliner=self.inliner)
+ self.finish_parse()
+ else:
+ # otherwise, inputstring might be a string. It will be handled by superclass.
+ docutils.parsers.rst.Parser.parse(self, inputstring, document)
+
def setup(app):
# type: (Sphinx) -> Dict[unicode, Any]
diff --git a/sphinx/pycode/Grammar-py2.txt b/sphinx/pycode/Grammar-py2.txt
deleted file mode 100644
index 98bd1f22b..000000000
--- a/sphinx/pycode/Grammar-py2.txt
+++ /dev/null
@@ -1,135 +0,0 @@
-# Grammar for Python 2.x
-
-# IMPORTANT: when copying over a new Grammar file, make sure file_input
-# is the first nonterminal in the file!
-
-# Start symbols for the grammar:
-# single_input is a single interactive statement;
-# file_input is a module or sequence of commands read from an input file;
-# eval_input is the input for the eval() and input() functions.
-# NB: compound_stmt in single_input is followed by extra NEWLINE!
-file_input: (NEWLINE | stmt)* ENDMARKER
-single_input: NEWLINE | simple_stmt | compound_stmt NEWLINE
-eval_input: testlist NEWLINE* ENDMARKER
-
-decorator: '@' dotted_name [ '(' [arglist] ')' ] NEWLINE
-decorators: decorator+
-decorated: decorators (classdef | funcdef)
-funcdef: 'def' NAME parameters ':' suite
-parameters: '(' [varargslist] ')'
-varargslist: ((fpdef ['=' test] ',')*
- ('*' NAME [',' '**' NAME] | '**' NAME) |
- fpdef ['=' test] (',' fpdef ['=' test])* [','])
-fpdef: NAME | '(' fplist ')'
-fplist: fpdef (',' fpdef)* [',']
-
-stmt: simple_stmt | compound_stmt
-simple_stmt: small_stmt (';' small_stmt)* [';'] NEWLINE
-small_stmt: (expr_stmt | print_stmt | del_stmt | pass_stmt | flow_stmt |
- import_stmt | global_stmt | exec_stmt | assert_stmt)
-expr_stmt: testlist (augassign (yield_expr|testlist) |
- ('=' (yield_expr|testlist))*)
-augassign: ('+=' | '-=' | '*=' | '/=' | '%=' | '&=' | '|=' | '^=' |
- '<<=' | '>>=' | '**=' | '//=')
-# For normal assignments, additional restrictions enforced by the interpreter
-print_stmt: 'print' ( [ test (',' test)* [','] ] |
- '>>' test [ (',' test)+ [','] ] )
-del_stmt: 'del' exprlist
-pass_stmt: 'pass'
-flow_stmt: break_stmt | continue_stmt | return_stmt | raise_stmt | yield_stmt
-break_stmt: 'break'
-continue_stmt: 'continue'
-return_stmt: 'return' [testlist]
-yield_stmt: yield_expr
-raise_stmt: 'raise' [test [',' test [',' test]]]
-import_stmt: import_name | import_from
-import_name: 'import' dotted_as_names
-import_from: ('from' ('.'* dotted_name | '.'+)
- 'import' ('*' | '(' import_as_names ')' | import_as_names))
-import_as_name: NAME ['as' NAME]
-dotted_as_name: dotted_name ['as' NAME]
-import_as_names: import_as_name (',' import_as_name)* [',']
-dotted_as_names: dotted_as_name (',' dotted_as_name)*
-dotted_name: NAME ('.' NAME)*
-global_stmt: 'global' NAME (',' NAME)*
-exec_stmt: 'exec' expr ['in' test [',' test]]
-assert_stmt: 'assert' test [',' test]
-
-compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | with_stmt | funcdef | classdef | decorated
-if_stmt: 'if' test ':' suite ('elif' test ':' suite)* ['else' ':' suite]
-while_stmt: 'while' test ':' suite ['else' ':' suite]
-for_stmt: 'for' exprlist 'in' testlist ':' suite ['else' ':' suite]
-try_stmt: ('try' ':' suite
- ((except_clause ':' suite)+
- ['else' ':' suite]
- ['finally' ':' suite] |
- 'finally' ':' suite))
-with_stmt: 'with' with_item (',' with_item)* ':' suite
-with_item: test ['as' expr]
-# NB compile.c makes sure that the default except clause is last
-except_clause: 'except' [test [('as' | ',') test]]
-suite: simple_stmt | NEWLINE INDENT stmt+ DEDENT
-
-# Backward compatibility cruft to support:
-# [ x for x in lambda: True, lambda: False if x() ]
-# even while also allowing:
-# lambda x: 5 if x else 2
-# (But not a mix of the two)
-testlist_safe: old_test [(',' old_test)+ [',']]
-old_test: or_test | old_lambdef
-old_lambdef: 'lambda' [varargslist] ':' old_test
-
-test: or_test ['if' or_test 'else' test] | lambdef
-or_test: and_test ('or' and_test)*
-and_test: not_test ('and' not_test)*
-not_test: 'not' not_test | comparison
-comparison: expr (comp_op expr)*
-comp_op: '<'|'>'|'=='|'>='|'<='|'<>'|'!='|'in'|'not' 'in'|'is'|'is' 'not'
-expr: xor_expr ('|' xor_expr)*
-xor_expr: and_expr ('^' and_expr)*
-and_expr: shift_expr ('&' shift_expr)*
-shift_expr: arith_expr (('<<'|'>>') arith_expr)*
-arith_expr: term (('+'|'-') term)*
-term: factor (('*'|'/'|'%'|'//') factor)*
-factor: ('+'|'-'|'~') factor | power
-power: atom trailer* ['**' factor]
-atom: ('(' [yield_expr|testlist_comp] ')' |
- '[' [listmaker] ']' |
- '{' [dictorsetmaker] '}' |
- '`' testlist1 '`' |
- NAME | NUMBER | STRING+)
-listmaker: test ( list_for | (',' test)* [','] )
-testlist_comp: test ( comp_for | (',' test)* [','] )
-lambdef: 'lambda' [varargslist] ':' test
-trailer: '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME
-subscriptlist: subscript (',' subscript)* [',']
-subscript: '.' '.' '.' | test | [test] ':' [test] [sliceop]
-sliceop: ':' [test]
-exprlist: expr (',' expr)* [',']
-testlist: test (',' test)* [',']
-dictorsetmaker: ( (test ':' test (comp_for | (',' test ':' test)* [','])) |
- (test (comp_for | (',' test)* [','])) )
-
-classdef: 'class' NAME ['(' [testlist] ')'] ':' suite
-
-arglist: (argument ',')* (argument [',']
- |'*' test (',' argument)* [',' '**' test]
- |'**' test)
-# The reason that keywords are test nodes instead of NAME is that using NAME
-# results in an ambiguity. ast.c makes sure it's a NAME.
-argument: test [comp_for] | test '=' test
-
-list_iter: list_for | list_if
-list_for: 'for' exprlist 'in' testlist_safe [list_iter]
-list_if: 'if' old_test [list_iter]
-
-comp_iter: comp_for | comp_if
-comp_for: 'for' exprlist 'in' or_test [comp_iter]
-comp_if: 'if' old_test [comp_iter]
-
-testlist1: test (',' test)*
-
-# not used in grammar, but may appear in "node" passed from Parser to Compiler
-encoding_decl: NAME
-
-yield_expr: 'yield' [testlist]
diff --git a/sphinx/pycode/Grammar-py3.txt b/sphinx/pycode/Grammar-py3.txt
deleted file mode 100644
index d05b758ee..000000000
--- a/sphinx/pycode/Grammar-py3.txt
+++ /dev/null
@@ -1,143 +0,0 @@
-# Grammar for Python 3.x (with at least x <= 5)
-
-
-# IMPORTANT: when copying over a new Grammar file, make sure file_input
-# is the first nonterminal in the file!
-
-# Start symbols for the grammar:
-# single_input is a single interactive statement;
-# file_input is a module or sequence of commands read from an input file;
-# eval_input is the input for the eval() functions.
-# NB: compound_stmt in single_input is followed by extra NEWLINE!
-file_input: (NEWLINE | stmt)* ENDMARKER
-single_input: NEWLINE | simple_stmt | compound_stmt NEWLINE
-eval_input: testlist NEWLINE* ENDMARKER
-
-decorator: '@' dotted_name [ '(' [arglist] ')' ] NEWLINE
-decorators: decorator+
-decorated: decorators (classdef | funcdef | async_funcdef)
-
-async_funcdef: ASYNC funcdef
-funcdef: 'def' NAME parameters ['->' test] ':' suite
-
-parameters: '(' [typedargslist] ')'
-typedargslist: (tfpdef ['=' test] (',' tfpdef ['=' test])* [','
- ['*' [tfpdef] (',' tfpdef ['=' test])* [',' '**' tfpdef] | '**' tfpdef]]
- | '*' [tfpdef] (',' tfpdef ['=' test])* [',' '**' tfpdef] | '**' tfpdef)
-tfpdef: NAME [':' test]
-varargslist: (vfpdef ['=' test] (',' vfpdef ['=' test])* [','
- ['*' [vfpdef] (',' vfpdef ['=' test])* [',' '**' vfpdef] | '**' vfpdef]]
- | '*' [vfpdef] (',' vfpdef ['=' test])* [',' '**' vfpdef] | '**' vfpdef)
-vfpdef: NAME
-
-stmt: simple_stmt | compound_stmt
-simple_stmt: small_stmt (';' small_stmt)* [';'] NEWLINE
-small_stmt: (expr_stmt | del_stmt | pass_stmt | flow_stmt |
- import_stmt | global_stmt | nonlocal_stmt | assert_stmt)
-expr_stmt: testlist_star_expr (augassign (yield_expr|testlist) |
- ('=' (yield_expr|testlist_star_expr))*)
-testlist_star_expr: (test|star_expr) (',' (test|star_expr))* [',']
-augassign: ('+=' | '-=' | '*=' | '@=' | '/=' | '%=' | '&=' | '|=' | '^=' |
- '<<=' | '>>=' | '**=' | '//=')
-# For normal assignments, additional restrictions enforced by the interpreter
-del_stmt: 'del' exprlist
-pass_stmt: 'pass'
-flow_stmt: break_stmt | continue_stmt | return_stmt | raise_stmt | yield_stmt
-break_stmt: 'break'
-continue_stmt: 'continue'
-return_stmt: 'return' [testlist]
-yield_stmt: yield_expr
-raise_stmt: 'raise' [test ['from' test]]
-import_stmt: import_name | import_from
-import_name: 'import' dotted_as_names
-# note below: the ('.' | '...') is necessary because '...' is tokenized as ELLIPSIS
-import_from: ('from' (('.' | '...')* dotted_name | ('.' | '...')+)
- 'import' ('*' | '(' import_as_names ')' | import_as_names))
-import_as_name: NAME ['as' NAME]
-dotted_as_name: dotted_name ['as' NAME]
-import_as_names: import_as_name (',' import_as_name)* [',']
-dotted_as_names: dotted_as_name (',' dotted_as_name)*
-dotted_name: NAME ('.' NAME)*
-global_stmt: 'global' NAME (',' NAME)*
-nonlocal_stmt: 'nonlocal' NAME (',' NAME)*
-assert_stmt: 'assert' test [',' test]
-
-compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | with_stmt | funcdef | classdef | decorated | async_stmt
-async_stmt: ASYNC (funcdef | with_stmt | for_stmt)
-if_stmt: 'if' test ':' suite ('elif' test ':' suite)* ['else' ':' suite]
-while_stmt: 'while' test ':' suite ['else' ':' suite]
-for_stmt: 'for' exprlist 'in' testlist ':' suite ['else' ':' suite]
-try_stmt: ('try' ':' suite
- ((except_clause ':' suite)+
- ['else' ':' suite]
- ['finally' ':' suite] |
- 'finally' ':' suite))
-with_stmt: 'with' with_item (',' with_item)* ':' suite
-with_item: test ['as' expr]
-# NB compile.c makes sure that the default except clause is last
-except_clause: 'except' [test ['as' NAME]]
-suite: simple_stmt | NEWLINE INDENT stmt+ DEDENT
-
-test: or_test ['if' or_test 'else' test] | lambdef
-test_nocond: or_test | lambdef_nocond
-lambdef: 'lambda' [varargslist] ':' test
-lambdef_nocond: 'lambda' [varargslist] ':' test_nocond
-or_test: and_test ('or' and_test)*
-and_test: not_test ('and' not_test)*
-not_test: 'not' not_test | comparison
-comparison: expr (comp_op expr)*
-# <> isn't actually a valid comparison operator in Python. It's here for the
-# sake of a __future__ import described in PEP 401 (which really works :-)
-comp_op: '<'|'>'|'=='|'>='|'<='|'<>'|'!='|'in'|'not' 'in'|'is'|'is' 'not'
-star_expr: '*' expr
-expr: xor_expr ('|' xor_expr)*
-xor_expr: and_expr ('^' and_expr)*
-and_expr: shift_expr ('&' shift_expr)*
-shift_expr: arith_expr (('<<'|'>>') arith_expr)*
-arith_expr: term (('+'|'-') term)*
-term: factor (('*'|'@'|'/'|'%'|'//') factor)*
-factor: ('+'|'-'|'~') factor | power
-power: [AWAIT] atom trailer* ['**' factor]
-atom: ('(' [yield_expr|testlist_comp] ')' |
- '[' [testlist_comp] ']' |
- '{' [dictorsetmaker] '}' |
- NAME | NUMBER | STRING+ | '...' | 'None' | 'True' | 'False')
-testlist_comp: (test|star_expr) ( comp_for | (',' (test|star_expr))* [','] )
-trailer: '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME
-subscriptlist: subscript (',' subscript)* [',']
-subscript: test | [test] ':' [test] [sliceop]
-sliceop: ':' [test]
-exprlist: (expr|star_expr) (',' (expr|star_expr))* [',']
-testlist: test (',' test)* [',']
-dictorsetmaker: ( ((test ':' test | '**' expr)
- (comp_for | (',' (test ':' test | '**' expr))* [','])) |
- ((test | star_expr)
- (comp_for | (',' (test | star_expr))* [','])) )
-
-classdef: 'class' NAME ['(' [arglist] ')'] ':' suite
-
-arglist: argument (',' argument)* [',']
-
-# The reason that keywords are test nodes instead of NAME is that using NAME
-# results in an ambiguity. ast.c makes sure it's a NAME.
-# "test '=' test" is really "keyword '=' test", but we have no such token.
-# These need to be in a single rule to avoid grammar that is ambiguous
-# to our LL(1) parser. Even though 'test' includes '*expr' in star_expr,
-# we explicitly match '*' here, too, to give it proper precedence.
-# Illegal combinations and orderings are blocked in ast.c:
-# multiple (test comp_for) arguements are blocked; keyword unpackings
-# that precede iterable unpackings are blocked; etc.
-argument: ( test [comp_for] |
- test '=' test |
- '**' test |
- '*' test )
-
-comp_iter: comp_for | comp_if
-comp_for: 'for' exprlist 'in' or_test [comp_iter]
-comp_if: 'if' test_nocond [comp_iter]
-
-# not used in grammar, but may appear in "node" passed from Parser to Compiler
-encoding_decl: NAME
-
-yield_expr: 'yield' [yield_arg]
-yield_arg: 'from' test | testlist
diff --git a/sphinx/pycode/__init__.py b/sphinx/pycode/__init__.py
index 92d96cecf..de951a19f 100644
--- a/sphinx/pycode/__init__.py
+++ b/sphinx/pycode/__init__.py
@@ -10,174 +10,15 @@
"""
from __future__ import print_function
-import re
-import sys
-from os import path
+from six import iteritems, BytesIO, StringIO
-from six import iteritems, text_type, BytesIO, StringIO
-
-from sphinx import package_dir
from sphinx.errors import PycodeError
-from sphinx.pycode import nodes
-from sphinx.pycode.pgen2 import driver, token, tokenize, parse, literals
+from sphinx.pycode.parser import Parser
from sphinx.util import get_module_source, detect_encoding
-from sphinx.util.pycompat import TextIOWrapper
-from sphinx.util.docstrings import prepare_docstring, prepare_commentdoc
if False:
# For type annotation
- from typing import Any, Dict, List, Tuple # NOQA
-
-
-# load the Python grammar
-_grammarfile = path.join(package_dir, 'pycode',
- 'Grammar-py%d.txt' % sys.version_info[0])
-pygrammar = driver.load_grammar(_grammarfile)
-pydriver = driver.Driver(pygrammar, convert=nodes.convert)
-
-
-# an object with attributes corresponding to token and symbol names
-class sym(object):
- pass
-
-
-for k, v in iteritems(pygrammar.symbol2number):
- setattr(sym, k, v)
-for k, v in iteritems(token.tok_name):
- setattr(sym, v, k)
-
-# a dict mapping terminal and nonterminal numbers to their names
-number2name = pygrammar.number2symbol.copy()
-number2name.update(token.tok_name)
-
-_eq = nodes.Leaf(token.EQUAL, '=')
-
-emptyline_re = re.compile(r'^\s*(#.*)?$')
-
-
-class AttrDocVisitor(nodes.NodeVisitor):
- """
- Visitor that collects docstrings for attribute assignments on toplevel and
- in classes (class attributes and attributes set in __init__).
-
- The docstrings can either be in special '#:' comments before the assignment
- or in a docstring after it.
- """
- def init(self, scope, encoding):
- self.scope = scope
- self.in_init = 0
- self.encoding = encoding
- self.namespace = [] # type: List[unicode]
- self.collected = {} # type: Dict[Tuple[unicode, unicode], unicode]
- self.tagnumber = 0
- self.tagorder = {} # type: Dict[unicode, int]
-
- def add_tag(self, name):
- name = '.'.join(self.namespace + [name])
- self.tagorder[name] = self.tagnumber
- self.tagnumber += 1
-
- def visit_classdef(self, node):
- """Visit a class."""
- self.add_tag(node[1].value)
- self.namespace.append(node[1].value)
- self.generic_visit(node)
- self.namespace.pop()
-
- def visit_funcdef(self, node):
- """Visit a function (or method)."""
- # usually, don't descend into functions -- nothing interesting there
- self.add_tag(node[1].value)
- if node[1].value == '__init__':
- # however, collect attributes set in __init__ methods
- self.in_init += 1
- self.generic_visit(node)
- self.in_init -= 1
-
- def visit_expr_stmt(self, node):
- """Visit an assignment which may have a special comment before (or
- after) it.
- """
- if _eq not in node.children:
- # not an assignment (we don't care for augmented assignments)
- return
- # look *after* the node; there may be a comment prefixing the NEWLINE
- # of the simple_stmt
- parent = node.parent
- idx = parent.children.index(node) + 1
- while idx < len(parent):
- if parent[idx].type == sym.SEMI: # type: ignore
- idx += 1
- continue # skip over semicolon
- if parent[idx].type == sym.NEWLINE: # type: ignore
- prefix = parent[idx].get_prefix()
- if not isinstance(prefix, text_type):
- prefix = prefix.decode(self.encoding)
- docstring = prepare_commentdoc(prefix)
- if docstring:
- self.add_docstring(node, docstring)
- return # don't allow docstrings both before and after
- break
- # now look *before* the node
- pnode = node[0]
- prefix = pnode.get_prefix()
- # if the assignment is the first statement on a new indentation
- # level, its preceding whitespace and comments are not assigned
- # to that token, but the first INDENT or DEDENT token
- while not prefix:
- pnode = pnode.get_prev_leaf()
- if not pnode or pnode.type not in (token.INDENT, token.DEDENT):
- break
- prefix = pnode.get_prefix()
- if not isinstance(prefix, text_type):
- prefix = prefix.decode(self.encoding)
- docstring = prepare_commentdoc(prefix)
- self.add_docstring(node, docstring)
-
- def visit_simple_stmt(self, node):
- """Visit a docstring statement which may have an assignment before."""
- if node[0].type != token.STRING:
- # not a docstring; but still need to visit children
- return self.generic_visit(node)
- prev = node.get_prev_sibling()
- if not prev:
- return
- if (prev.type == sym.simple_stmt and # type: ignore
- prev[0].type == sym.expr_stmt and _eq in prev[0].children): # type: ignore
- # need to "eval" the string because it's returned in its
- # original form
- docstring = literals.evalString(node[0].value, self.encoding)
- docstring = prepare_docstring(docstring)
- self.add_docstring(prev[0], docstring)
-
- def add_docstring(self, node, docstring):
- # add an item for each assignment target
- for i in range(0, len(node) - 1, 2):
- target = node[i]
- if self.in_init and self.number2name[target.type] == 'power':
- # maybe an attribute assignment -- check necessary conditions
- if ( # node must have two children
- len(target) != 2 or
- # first child must be "self"
- target[0].type != token.NAME or target[0].value != 'self' or
- # second child must be a "trailer" with two children
- self.number2name[target[1].type] != 'trailer' or
- len(target[1]) != 2 or
- # first child must be a dot, second child a name
- target[1][0].type != token.DOT or
- target[1][1].type != token.NAME):
- continue
- name = target[1][1].value
- elif target.type != token.NAME:
- # don't care about other complex targets
- continue
- else:
- name = target.value
- self.add_tag(name)
- if docstring:
- namespace = '.'.join(self.namespace)
- if namespace.startswith(self.scope):
- self.collected[namespace, name] = docstring
+ from typing import Any, Dict, IO, List, Tuple # NOQA
class ModuleAnalyzer(object):
@@ -195,11 +36,11 @@ class ModuleAnalyzer(object):
if ('file', filename) in cls.cache:
return cls.cache['file', filename]
try:
- fileobj = open(filename, 'rb')
+ with open(filename, 'rb') as f:
+ obj = cls(f, modname, filename)
+ cls.cache['file', filename] = obj
except Exception as err:
raise PycodeError('error opening %r' % filename, err)
- obj = cls(fileobj, modname, filename)
- cls.cache['file', filename] = obj
return obj
@classmethod
@@ -223,137 +64,59 @@ class ModuleAnalyzer(object):
return obj
def __init__(self, source, modname, srcname, decoded=False):
- # name of the module
- self.modname = modname
- # name of the source file
- self.srcname = srcname
- # file-like object yielding source lines
- self.source = source
+ # type: (IO, unicode, unicode, bool) -> None
+ self.modname = modname # name of the module
+ self.srcname = srcname # name of the source file
# cache the source code as well
- pos = self.source.tell()
+ pos = source.tell()
if not decoded:
- self.encoding = detect_encoding(self.source.readline)
- self.source.seek(pos)
- self.code = self.source.read().decode(self.encoding)
- self.source.seek(pos)
- self.source = TextIOWrapper(self.source, self.encoding)
+ self.encoding = detect_encoding(source.readline)
+ source.seek(pos)
+ self.code = source.read().decode(self.encoding)
else:
self.encoding = None
- self.code = self.source.read()
- self.source.seek(pos)
+ self.code = source.read()
- # will be filled by tokenize()
- self.tokens = None # type: List[unicode]
# will be filled by parse()
- self.parsetree = None # type: Any
- # will be filled by find_attr_docs()
- self.attr_docs = None # type: List[unicode]
+ self.attr_docs = None # type: Dict[Tuple[unicode, unicode], List[unicode]]
self.tagorder = None # type: Dict[unicode, int]
- # will be filled by find_tags()
- self.tags = None # type: List[unicode]
-
- def tokenize(self):
- """Generate tokens from the source."""
- if self.tokens is not None:
- return
- try:
- self.tokens = list(tokenize.generate_tokens(self.source.readline))
- except tokenize.TokenError as err:
- raise PycodeError('tokenizing failed', err)
- self.source.close()
+ self.tags = None # type: Dict[unicode, Tuple[unicode, int, int]]
def parse(self):
- """Parse the generated source tokens."""
- if self.parsetree is not None:
- return
- self.tokenize()
+ # type: () -> None
+ """Parse the source code."""
try:
- self.parsetree = pydriver.parse_tokens(self.tokens)
- except parse.ParseError as err:
- raise PycodeError('parsing failed', err)
+ parser = Parser(self.code, self.encoding)
+ parser.parse()
+
+ self.attr_docs = {}
+ for (scope, comment) in iteritems(parser.comments):
+ if comment:
+ self.attr_docs[scope] = comment.splitlines() + ['']
+ else:
+ self.attr_docs[scope] = ['']
- def find_attr_docs(self, scope=''):
+ self.tags = parser.definitions
+ self.tagorder = parser.deforders
+ except Exception as exc:
+ raise PycodeError('parsing %r failed: %r' % (self.srcname, exc))
+
+ def find_attr_docs(self):
+ # type: () -> Dict[Tuple[unicode, unicode], List[unicode]]
"""Find class and module-level attributes and their documentation."""
- if self.attr_docs is not None:
- return self.attr_docs
- self.parse()
- attr_visitor = AttrDocVisitor(number2name, scope, self.encoding)
- attr_visitor.visit(self.parsetree)
- self.attr_docs = attr_visitor.collected
- self.tagorder = attr_visitor.tagorder
- # now that we found everything we could in the tree, throw it away
- # (it takes quite a bit of memory for large modules)
- self.parsetree = None
- return attr_visitor.collected
+ if self.attr_docs is None:
+ self.parse()
+
+ return self.attr_docs
def find_tags(self):
+ # type: () -> Dict[unicode, Tuple[unicode, int, int]]
"""Find class, function and method definitions and their location."""
- if self.tags is not None:
- return self.tags
- self.tokenize()
- result = {}
- namespace = [] # type: List[unicode]
- stack = [] # type: List[Tuple[unicode, unicode, unicode, int]]
- indent = 0
- decopos = None
- defline = False
- expect_indent = False
- emptylines = 0
+ if self.tags is None:
+ self.parse()
- def tokeniter(ignore = (token.COMMENT,)):
- for tokentup in self.tokens:
- if tokentup[0] not in ignore:
- yield tokentup
- tokeniter = tokeniter()
- for type, tok, spos, epos, line in tokeniter: # type: ignore
- if expect_indent and type != token.NL:
- if type != token.INDENT:
- # no suite -- one-line definition
- assert stack
- dtype, fullname, startline, _ = stack.pop()
- endline = epos[0]
- namespace.pop()
- result[fullname] = (dtype, startline, endline - emptylines)
- expect_indent = False
- if tok in ('def', 'class'):
- name = next(tokeniter)[1] # type: ignore
- namespace.append(name)
- fullname = '.'.join(namespace)
- stack.append((tok, fullname, decopos or spos[0], indent))
- defline = True
- decopos = None
- elif type == token.OP and tok == '@':
- if decopos is None:
- decopos = spos[0]
- elif type == token.INDENT:
- expect_indent = False
- indent += 1
- elif type == token.DEDENT:
- indent -= 1
- # if the stacklevel is the same as it was before the last
- # def/class block, this dedent closes that block
- if stack and indent == stack[-1][3]:
- dtype, fullname, startline, _ = stack.pop()
- endline = spos[0]
- namespace.pop()
- result[fullname] = (dtype, startline, endline - emptylines)
- elif type == token.NEWLINE:
- # if this line contained a definition, expect an INDENT
- # to start the suite; if there is no such INDENT
- # it's a one-line definition
- if defline:
- defline = False
- expect_indent = True
- emptylines = 0
- elif type == token.NL:
- # count up if line is empty or comment only
- if emptyline_re.match(line):
- emptylines += 1
- else:
- emptylines = 0
- self.tags = result
- return result
+ return self.tags
if __name__ == '__main__':
diff --git a/sphinx/pycode/nodes.py b/sphinx/pycode/nodes.py
deleted file mode 100644
index 52bba1a35..000000000
--- a/sphinx/pycode/nodes.py
+++ /dev/null
@@ -1,212 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
- sphinx.pycode.nodes
- ~~~~~~~~~~~~~~~~~~~
-
- Parse tree node implementations.
-
- :copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
- :license: BSD, see LICENSE for details.
-"""
-
-if False:
- # For type annotation
- from typing import Callable # NOQA
-
-
-class BaseNode(object):
- """
- Node superclass for both terminal and nonterminal nodes.
- """
- parent = None # type: BaseNode
-
- def _eq(self, other):
- raise NotImplementedError
-
- def __eq__(self, other):
- if self.__class__ is not other.__class__:
- return NotImplemented
- return self._eq(other)
-
- def __ne__(self, other):
- if self.__class__ is not other.__class__:
- return NotImplemented
- return not self._eq(other)
-
- __hash__ = None # type: Callable[[object], int]
-
- def get_prev_sibling(self):
- """Return previous child in parent's children, or None."""
- if self.parent is None:
- return None
- for i, child in enumerate(self.parent.children):
- if child is self:
- if i == 0:
- return None
- return self.parent.children[i - 1]
-
- def get_next_sibling(self):
- """Return next child in parent's children, or None."""
- if self.parent is None:
- return None
- for i, child in enumerate(self.parent.children):
- if child is self:
- try:
- return self.parent.children[i + 1]
- except IndexError:
- return None
-
- def get_prev_leaf(self):
- """Return the leaf node that precedes this node in the parse tree."""
- def last_child(node):
- if isinstance(node, Leaf):
- return node
- elif not node.children:
- return None
- else:
- return last_child(node.children[-1])
- if self.parent is None:
- return None
- prev = self.get_prev_sibling()
- if isinstance(prev, Leaf):
- return prev
- elif prev is not None:
- return last_child(prev)
- return self.parent.get_prev_leaf()
-
- def get_next_leaf(self):
- """Return self if leaf, otherwise the leaf node that succeeds this
- node in the parse tree.
- """
- node = self
- while not isinstance(node, Leaf):
- assert node.children
- node = node.children[0]
- return node
-
- def get_lineno(self):
- """Return the line number which generated the invocant node."""
- return self.get_next_leaf().lineno
-
- def get_prefix(self):
- """Return the prefix of the next leaf node."""
- # only leaves carry a prefix
- return self.get_next_leaf().prefix
-
-
-class Node(BaseNode):
- """
- Node implementation for nonterminals.
- """
-
- def __init__(self, type, children, context=None):
- # type of nonterminals is >= 256
- # assert type >= 256, type
- self.type = type
- self.children = list(children)
- for ch in self.children:
- # assert ch.parent is None, repr(ch)
- ch.parent = self
-
- def __repr__(self):
- return '%s(%s, %r)' % (self.__class__.__name__,
- self.type, self.children)
-
- def __str__(self):
- """This reproduces the input source exactly."""
- return ''.join(map(str, self.children))
-
- def _eq(self, other):
- return (self.type, self.children) == (other.type, other.children)
-
- # support indexing the node directly instead of .children
-
- def __getitem__(self, index):
- return self.children[index]
-
- def __iter__(self):
- return iter(self.children)
-
- def __len__(self):
- return len(self.children)
-
-
-class Leaf(BaseNode):
- """
- Node implementation for leaf nodes (terminals).
- """
- prefix = '' # Whitespace and comments preceding this token in the input
- lineno = 0 # Line where this token starts in the input
- column = 0 # Column where this token tarts in the input
-
- def __init__(self, type, value, context=None):
- # type of terminals is below 256
- # assert 0 <= type < 256, type
- self.type = type
- self.value = value
- if context is not None:
- self.prefix, (self.lineno, self.column) = context
-
- def __repr__(self):
- return '%s(%r, %r, %r)' % (self.__class__.__name__,
- self.type, self.value, self.prefix)
-
- def __str__(self):
- """This reproduces the input source exactly."""
- return self.prefix + str(self.value)
-
- def _eq(self, other):
- """Compares two nodes for equality."""
- return (self.type, self.value) == (other.type, other.value)
-
-
-def convert(grammar, raw_node):
- """Convert raw node to a Node or Leaf instance."""
- type, value, context, children = raw_node
- if children or type in grammar.number2symbol:
- # If there's exactly one child, return that child instead of
- # creating a new node.
- if len(children) == 1:
- return children[0]
- return Node(type, children, context=context)
- else:
- return Leaf(type, value, context=context)
-
-
-def nice_repr(node, number2name, prefix=False):
- def _repr(node):
- if isinstance(node, Leaf):
- return "%s(%r)" % (number2name[node.type], node.value)
- else:
- return "%s(%s)" % (number2name[node.type],
- ', '.join(map(_repr, node.children)))
-
- def _prepr(node):
- if isinstance(node, Leaf):
- return "%s(%r, %r)" % (number2name[node.type],
- node.prefix, node.value)
- else:
- return "%s(%s)" % (number2name[node.type],
- ', '.join(map(_prepr, node.children)))
- return (prefix and _prepr or _repr)(node)
-
-
-class NodeVisitor(object):
- def __init__(self, number2name, *args):
- self.number2name = number2name
- self.init(*args)
-
- def init(self, *args):
- pass
-
- def visit(self, node):
- """Visit a node."""
- method = 'visit_' + self.number2name[node.type]
- visitor = getattr(self, method, self.generic_visit)
- return visitor(node)
-
- def generic_visit(self, node):
- """Called if no explicit visitor function exists for a node."""
- if isinstance(node, Node):
- for child in node:
- self.visit(child)
diff --git a/sphinx/pycode/parser.py b/sphinx/pycode/parser.py
new file mode 100644
index 000000000..7460dcfce
--- /dev/null
+++ b/sphinx/pycode/parser.py
@@ -0,0 +1,471 @@
+# -*- coding: utf-8 -*-
+"""
+ sphinx.pycode.parser
+ ~~~~~~~~~~~~~~~~~~~~
+
+ Utilities parsing and analyzing Python code.
+
+ :copyright: Copyright 2007-2017 by the Sphinx team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+import re
+import ast
+import inspect
+import tokenize
+import itertools
+from token import NAME, NEWLINE, INDENT, DEDENT, NUMBER, OP, STRING
+from tokenize import COMMENT, NL
+
+from six import PY2, text_type
+
+if False:
+ # For type annotation
+ from typing import Any, Dict, IO, List, Tuple # NOQA
+
+comment_re = re.compile(u'^\\s*#: ?(.*)\r?\n?$')
+indent_re = re.compile(u'^\\s*$')
+emptyline_re = re.compile(u'^\\s*(#.*)?$')
+
+
+def get_lvar_names(node, self=None):
+ # type: (ast.AST, ast.expr) -> List[unicode]
+ """Convert assignment-AST to variable names.
+
+ This raises `TypeError` if the assignment does not create new variable::
+
+ ary[0] = 'foo'
+ dic["bar"] = 'baz'
+ # => TypeError
+ """
+ if self:
+ if PY2:
+ self_id = self.id # type: ignore
+ else:
+ self_id = self.arg
+
+ node_name = node.__class__.__name__
+ if node_name in ('Index', 'Num', 'Slice', 'Str', 'Subscript'):
+ raise TypeError('%r does not create new variable' % node)
+ elif node_name == 'Name':
+ if self is None or node.id == self_id: # type: ignore
+ return [node.id] # type: ignore
+ else:
+ raise TypeError('The assignment %r is not instance variable' % node)
+ elif node_name in ('Tuple', 'List'):
+ members = []
+ for elt in node.elts: # type: ignore
+ try:
+ members.extend(get_lvar_names(elt, self))
+ except TypeError:
+ pass
+ return members
+ elif node_name == 'Attribute':
+ if node.value.__class__.__name__ == 'Name' and self and node.value.id == self_id: # type: ignore # NOQA
+ # instance variable
+ return ["%s" % get_lvar_names(node.attr, self)[0]] # type: ignore
+ else:
+ raise TypeError('The assignment %r is not instance variable' % node)
+ elif node_name == 'str':
+ return [node] # type: ignore
+ elif node_name == 'Starred':
+ return get_lvar_names(node.value, self) # type: ignore
+ else:
+ raise NotImplementedError('Unexpected node name %r' % node_name)
+
+
+def dedent_docstring(s):
+ # type: (unicode) -> unicode
+ """Remove common leading indentation from docstring."""
+ def dummy():
+ # dummy function to mock `inspect.getdoc`.
+ pass
+
+ dummy.__doc__ = s # type: ignore
+ docstring = inspect.getdoc(dummy)
+ return docstring.lstrip("\r\n").rstrip("\r\n")
+
+
+class Token(object):
+ """Better token wrapper for tokenize module."""
+
+ def __init__(self, kind, value, start, end, source):
+ # type: (int, Any, Tuple[int, int], Tuple[int, int], unicode) -> None # NOQA
+ self.kind = kind
+ self.value = value
+ self.start = start
+ self.end = end
+ self.source = source
+
+ def __eq__(self, other):
+ # type: (Any) -> bool
+ if isinstance(other, int):
+ return self.kind == other
+ elif isinstance(other, str):
+ return self.value == other
+ elif isinstance(other, (list, tuple)):
+ return [self.kind, self.value] == list(other)
+ elif other is None:
+ return False
+ else:
+ raise ValueError('Unknown value: %r' % other)
+
+ def __ne__(self, other):
+ # type: (Any) -> bool
+ return not (self == other)
+
+ def match(self, *conditions):
+ # type: (Any) -> bool
+ return any(self == candidate for candidate in conditions)
+
+ def __repr__(self):
+ # type: () -> str
+ return '<Token kind=%r value=%r>' % (tokenize.tok_name[self.kind],
+ self.value.strip())
+
+
+class TokenProcessor(object):
+ def __init__(self, buffers):
+ # type: (List[unicode]) -> None
+ lines = iter(buffers)
+ self.buffers = buffers
+ self.tokens = tokenize.generate_tokens(lambda: next(lines)) # type: ignore # NOQA
+ self.current = None # type: Token
+ self.previous = None # type: Token
+
+ def get_line(self, lineno):
+ # type: (int) -> unicode
+ """Returns specified line."""
+ return self.buffers[lineno - 1]
+
+ def fetch_token(self):
+ # type: () -> Token
+ """Fetch a next token from source code.
+
+ Returns ``False`` if sequence finished.
+ """
+ try:
+ self.previous = self.current
+ self.current = Token(*next(self.tokens))
+ except StopIteration:
+ self.current = None
+
+ return self.current
+
+ def fetch_until(self, condition):
+ # type: (Any) -> List[Token]
+ """Fetch tokens until specified token appeared.
+
+ .. note:: This also handles parenthesis well.
+ """
+ tokens = []
+ while self.fetch_token():
+ tokens.append(self.current)
+ if self.current == condition:
+ break
+ elif self.current == [OP, '(']:
+ tokens += self.fetch_until([OP, ')'])
+ elif self.current == [OP, '{']:
+ tokens += self.fetch_until([OP, '}'])
+ elif self.current == [OP, '[']:
+ tokens += self.fetch_until([OP, ']'])
+
+ return tokens
+
+
+class AfterCommentParser(TokenProcessor):
+ """Python source code parser to pick up comment after assignment.
+
+ This parser takes a python code starts with assignment statement,
+ and returns the comments for variable if exists.
+ """
+
+ def __init__(self, lines):
+ # type: (List[unicode]) -> None
+ super(AfterCommentParser, self).__init__(lines)
+ self.comment = None # type: unicode
+
+ def fetch_rvalue(self):
+ # type: () -> List[Token]
+ """Fetch right-hand value of assignment."""
+ tokens = []
+ while self.fetch_token():
+ tokens.append(self.current)
+ if self.current == [OP, '(']:
+ tokens += self.fetch_until([OP, ')'])
+ elif self.current == [OP, '{']:
+ tokens += self.fetch_until([OP, '}'])
+ elif self.current == [OP, '[']:
+ tokens += self.fetch_until([OP, ']'])
+ elif self.current == INDENT:
+ tokens += self.fetch_until(DEDENT)
+ elif self.current == [OP, ';']:
+ break
+ elif self.current.kind not in (OP, NAME, NUMBER, STRING):
+ break
+
+ return tokens
+
+ def parse(self):
+ # type: () -> None
+ """Parse the code and obtain comment after assignment."""
+ # skip lvalue (until '=' operator)
+ while self.fetch_token() != [OP, '=']:
+ assert self.current
+
+ # skip rvalue
+ self.fetch_rvalue()
+
+ if self.current == COMMENT:
+ self.comment = self.current.value
+
+
+class VariableCommentPicker(ast.NodeVisitor):
+ """Python source code parser to pick up variable comments."""
+
+ def __init__(self, buffers, encoding):
+ # type: (List[unicode], unicode) -> None
+ self.counter = itertools.count()
+ self.buffers = buffers
+ self.encoding = encoding
+ self.context = [] # type: List[unicode]
+ self.current_classes = [] # type: List[unicode]
+ self.current_function = None # type: ast.FunctionDef
+ self.comments = {} # type: Dict[Tuple[unicode, unicode], unicode]
+ self.previous = None # type: ast.AST
+ self.deforders = {} # type: Dict[unicode, int]
+ super(VariableCommentPicker, self).__init__()
+
+ def add_entry(self, name):
+ # type: (unicode) -> None
+ if self.current_function:
+ if self.current_classes and self.context[-1] == "__init__":
+ # store variable comments inside __init__ method of classes
+ definition = self.context[:-1] + [name]
+ else:
+ return
+ else:
+ definition = self.context + [name]
+
+ self.deforders[".".join(definition)] = next(self.counter)
+
+ def add_variable_comment(self, name, comment):
+ # type: (unicode, unicode) -> None
+ if self.current_function:
+ if self.current_classes and self.context[-1] == "__init__":
+ # store variable comments inside __init__ method of classes
+ context = ".".join(self.context[:-1])
+ else:
+ return
+ else:
+ context = ".".join(self.context)
+
+ self.comments[(context, name)] = comment
+
+ def get_self(self):
+ # type: () -> ast.expr
+ """Returns the name of first argument if in function."""
+ if self.current_function and self.current_function.args.args:
+ return self.current_function.args.args[0]
+ else:
+ return None
+
+ def get_line(self, lineno):
+ # type: (int) -> unicode
+ """Returns specified line."""
+ return self.buffers[lineno - 1]
+
+ def visit(self, node):
+ # type: (ast.AST) -> None
+ """Updates self.previous to ."""
+ super(VariableCommentPicker, self).visit(node)
+ self.previous = node
+
+ def visit_Assign(self, node):
+ # type: (ast.Assign) -> None
+ """Handles Assign node and pick up a variable comment."""
+ try:
+ varnames = sum([get_lvar_names(t, self=self.get_self()) for t in node.targets], [])
+ current_line = self.get_line(node.lineno)
+ except TypeError:
+ return # this assignment is not new definition!
+
+ # check comments after assignment
+ parser = AfterCommentParser([current_line[node.col_offset:]] +
+ self.buffers[node.lineno:])
+ parser.parse()
+ if parser.comment and comment_re.match(parser.comment):
+ for varname in varnames:
+ self.add_variable_comment(varname, comment_re.sub('\\1', parser.comment))
+ self.add_entry(varname)
+ return
+
+ # check comments before assignment
+ if indent_re.match(current_line[:node.col_offset]):
+ comment_lines = []
+ for i in range(node.lineno - 1):
+ before_line = self.get_line(node.lineno - 1 - i)
+ if comment_re.match(before_line):
+ comment_lines.append(comment_re.sub('\\1', before_line))
+ else:
+ break
+
+ if comment_lines:
+ comment = dedent_docstring('\n'.join(reversed(comment_lines)))
+ for varname in varnames:
+ self.add_variable_comment(varname, comment)
+ self.add_entry(varname)
+ return
+
+ # not commented (record deforders only)
+ for varname in varnames:
+ self.add_entry(varname)
+
+ def visit_Expr(self, node):
+ # type: (ast.Expr) -> None
+ """Handles Expr node and pick up a comment if string."""
+ if (isinstance(self.previous, ast.Assign) and isinstance(node.value, ast.Str)):
+ try:
+ varnames = get_lvar_names(self.previous.targets[0], self.get_self())
+ for varname in varnames:
+ if isinstance(node.value.s, text_type):
+ docstring = node.value.s
+ else:
+ docstring = node.value.s.decode(self.encoding or 'utf-8')
+
+ self.add_variable_comment(varname, dedent_docstring(docstring))
+ self.add_entry(varname)
+ except TypeError:
+ pass # this assignment is not new definition!
+
+ def visit_ClassDef(self, node):
+ # type: (ast.ClassDef) -> None
+ """Handles ClassDef node and set context."""
+ self.current_classes.append(node.name)
+ self.add_entry(node.name)
+ self.context.append(node.name)
+ self.previous = node
+ for child in node.body:
+ self.visit(child)
+ self.context.pop()
+ self.current_classes.pop()
+
+ def visit_FunctionDef(self, node):
+ # type: (ast.FunctionDef) -> None
+ """Handles FunctionDef node and set context."""
+ if self.current_function is None:
+ self.add_entry(node.name) # should be called before setting self.current_function
+ self.context.append(node.name)
+ self.current_function = node
+ for child in node.body:
+ self.visit(child)
+ self.context.pop()
+ self.current_function = None
+
+
+class DefinitionFinder(TokenProcessor):
+ def __init__(self, lines):
+ # type: (List[unicode]) -> None
+ super(DefinitionFinder, self).__init__(lines)
+ self.decorator = None # type: Token
+ self.context = [] # type: List[unicode]
+ self.indents = [] # type: List
+ self.definitions = {} # type: Dict[unicode, Tuple[unicode, int, int]]
+
+ def add_definition(self, name, entry):
+ # type: (unicode, Tuple[unicode, int, int]) -> None
+ if self.indents and self.indents[-1][0] == 'def' and entry[0] == 'def':
+ # ignore definition of inner function
+ pass
+ else:
+ self.definitions[name] = entry
+
+ def parse(self):
+ # type: () -> None
+ while True:
+ token = self.fetch_token()
+ if token is None:
+ break
+ elif token == COMMENT:
+ pass
+ elif token == [OP, '@'] and (self.previous is None or
+ self.previous.match(NEWLINE, NL, INDENT, DEDENT)):
+ if self.decorator is None:
+ self.decorator = token
+ elif token.match([NAME, 'class']):
+ self.parse_definition('class')
+ elif token.match([NAME, 'def']):
+ self.parse_definition('def')
+ elif token == INDENT:
+ self.indents.append(('other', None, None))
+ elif token == DEDENT:
+ self.finalize_block()
+
+ def parse_definition(self, typ):
+ # type: (unicode) -> None
+ name = self.fetch_token()
+ self.context.append(name.value)
+ funcname = '.'.join(self.context)
+
+ if self.decorator:
+ start_pos = self.decorator.start[0]
+ self.decorator = None
+ else:
+ start_pos = name.start[0]
+
+ self.fetch_until([OP, ':'])
+ if self.fetch_token().match(COMMENT, NEWLINE):
+ self.fetch_until(INDENT)
+ self.indents.append((typ, funcname, start_pos))
+ else:
+ # one-liner
+ self.add_definition(funcname, (typ, start_pos, name.end[0]))
+ self.context.pop()
+
+ def finalize_block(self):
+ # type: () -> None
+ definition = self.indents.pop()
+ if definition[0] != 'other':
+ typ, funcname, start_pos = definition
+ end_pos = self.current.end[0] - 1
+ while emptyline_re.match(self.get_line(end_pos)):
+ end_pos -= 1
+
+ self.add_definition(funcname, (typ, start_pos, end_pos))
+ self.context.pop()
+
+
+class Parser(object):
+ """Python source code parser to pick up variable comments.
+
+ This is a better wrapper for ``VariableCommentPicker``.
+ """
+
+ def __init__(self, code, encoding='utf-8'):
+ # type: (unicode, unicode) -> None
+ self.code = code
+ self.encoding = encoding
+ self.comments = {} # type: Dict[Tuple[unicode, unicode], unicode]
+ self.deforders = {} # type: Dict[unicode, int]
+ self.definitions = {} # type: Dict[unicode, Tuple[unicode, int, int]]
+
+ def parse(self):
+ # type: () -> None
+ """Parse the source code."""
+ self.parse_comments()
+ self.parse_definition()
+
+ def parse_comments(self):
+ # type: () -> None
+ """Parse the code and pick up comments."""
+ tree = ast.parse(self.code.encode('utf-8'))
+ picker = VariableCommentPicker(self.code.splitlines(True), self.encoding)
+ picker.visit(tree)
+ self.comments = picker.comments
+ self.deforders = picker.deforders
+
+ def parse_definition(self):
+ # type: () -> None
+ """Parse the location of definitions from the code."""
+ parser = DefinitionFinder(self.code.splitlines(True))
+ parser.parse()
+ self.definitions = parser.definitions
diff --git a/sphinx/pycode/pgen2/__init__.py b/sphinx/pycode/pgen2/__init__.py
deleted file mode 100644
index af3904845..000000000
--- a/sphinx/pycode/pgen2/__init__.py
+++ /dev/null
@@ -1,4 +0,0 @@
-# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
-# Licensed to PSF under a Contributor Agreement.
-
-"""The pgen2 package."""
diff --git a/sphinx/pycode/pgen2/driver.py b/sphinx/pycode/pgen2/driver.py
deleted file mode 100644
index 90476ed00..000000000
--- a/sphinx/pycode/pgen2/driver.py
+++ /dev/null
@@ -1,154 +0,0 @@
-# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
-# Licensed to PSF under a Contributor Agreement.
-
-# Modifications:
-# Copyright 2006 Google, Inc. All Rights Reserved.
-# Licensed to PSF under a Contributor Agreement.
-
-"""Parser driver.
-
-This provides a high-level interface to parse a file into a syntax tree.
-
-"""
-
-__author__ = "Guido van Rossum <guido@python.org>"
-
-__all__ = ["Driver", "load_grammar"]
-
-# Python imports
-import os
-import logging
-
-import sphinx
-
-# Pgen imports
-from sphinx.pycode.pgen2 import grammar, parse, token, tokenize, pgen
-
-
-class Driver(object):
-
- def __init__(self, grammar, convert=None, logger=None):
- self.grammar = grammar
- if logger is None:
- logger = logging.getLogger()
- self.logger = logger
- self.convert = convert
-
- def parse_tokens(self, tokens, debug=False):
- """Parse a series of tokens and return the syntax tree."""
- # X X X Move the prefix computation into a wrapper around tokenize.
- p = parse.Parser(self.grammar, self.convert)
- p.setup()
- lineno = 1
- column = 0
- type = value = start = end = line_text = None
- prefix = ""
- opmap = grammar.opmap
- for type, value, start, end, line_text in tokens:
- if start != (lineno, column):
- assert (lineno, column) <= start, ((lineno, column), start)
- s_lineno, s_column = start
- if lineno < s_lineno:
- prefix += "\n" * (s_lineno - lineno)
- lineno = s_lineno
- column = 0
- if column < s_column:
- prefix += line_text[column:s_column]
- column = s_column
- if type in (tokenize.COMMENT, tokenize.NL):
- prefix += value
- lineno, column = end
- if value.endswith("\n"):
- lineno += 1
- column = 0
- continue
- if type == token.OP:
- type = opmap[value]
- # if debug:
- # self.logger.debug("%s %r (prefix=%r)",
- # token.tok_name[type], value, prefix)
- if p.addtoken(type, value, (prefix, start)):
- # if debug:
- # self.logger.debug("Stop.")
- break
- prefix = ""
- lineno, column = end
- if value.endswith("\n"):
- lineno += 1
- column = 0
- else:
- # We never broke out -- EOF is too soon (how can this happen???)
- raise parse.ParseError("incomplete input", type, value, line_text)
- return p.rootnode
-
- def parse_stream_raw(self, stream, debug=False):
- """Parse a stream and return the syntax tree."""
- tokens = tokenize.generate_tokens(stream.readline)
- return self.parse_tokens(tokens, debug)
-
- def parse_stream(self, stream, debug=False):
- """Parse a stream and return the syntax tree."""
- return self.parse_stream_raw(stream, debug)
-
- def parse_file(self, filename, debug=False):
- """Parse a file and return the syntax tree."""
- with open(filename) as stream:
- return self.parse_stream(stream, debug)
-
- def parse_string(self, text, debug=False):
- """Parse a string and return the syntax tree."""
- tokens = tokenize.generate_tokens(generate_lines(text).next)
- return self.parse_tokens(tokens, debug)
-
-
-def generate_lines(text):
- """Generator that behaves like readline without using StringIO."""
- for line in text.splitlines(True):
- yield line
- while True:
- yield ""
-
-
-def get_compiled_path(filename):
- head, tail = os.path.splitext(filename)
- if tail == ".txt":
- tail = ""
- return "%s%s.pickle" % (head, tail)
-
-
-def compile_grammar(gt='Grammar.txt', logger=None):
- """Compile the grammer."""
- if logger is None:
- logger = logging.getLogger()
-
- logger.info("Generating grammar tables from %s", gt)
- g = pgen.generate_grammar(gt)
- gp = get_compiled_path(gt)
- logger.info("Writing grammar tables to %s", gp)
- try:
- g.dump(gp)
- except IOError as e:
- logger.info("Writing failed:"+str(e))
-
-
-def load_grammar(gt="Grammar.txt", logger=None):
- """Load the grammar (maybe from a pickle)."""
- if logger is None:
- logger = logging.getLogger()
- gp = get_compiled_path(gt)
- if not os.path.exists(gp):
- logger.info("Generating grammar tables from %s", gt)
- g = pgen.generate_grammar(gt)
- else:
- g = grammar.Grammar()
- g.load(gp)
- return g
-
-
-def _newer(a, b):
- """Inquire whether file a was written since file b."""
- if not os.path.exists(a):
- return False
- if not os.path.exists(b):
- return True
- return os.path.getmtime(a) >= os.path.getmtime(b)
diff --git a/sphinx/pycode/pgen2/grammar.py b/sphinx/pycode/pgen2/grammar.py
deleted file mode 100644
index ac276776e..000000000
--- a/sphinx/pycode/pgen2/grammar.py
+++ /dev/null
@@ -1,178 +0,0 @@
-# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
-# Licensed to PSF under a Contributor Agreement.
-
-"""This module defines the data structures used to represent a grammar.
-
-These are a bit arcane because they are derived from the data
-structures used by Python's 'pgen' parser generator.
-
-There's also a table here mapping operators to their names in the
-token module; the Python tokenize module reports all operators as the
-fallback token code OP, but the parser needs the actual token code.
-
-"""
-from __future__ import print_function
-
-# Python imports
-import pickle
-
-# Local imports
-from sphinx.pycode.pgen2 import token
-
-if False:
- # For type annotation
- from typing import Dict, List, Tuple # NOQA
-
-
-class Grammar(object):
- """Pgen parsing tables tables conversion class.
-
- Once initialized, this class supplies the grammar tables for the
- parsing engine implemented by parse.py. The parsing engine
- accesses the instance variables directly. The class here does not
- provide initialization of the tables; several subclasses exist to
- do this (see the conv and pgen modules).
-
- The load() method reads the tables from a pickle file, which is
- much faster than the other ways offered by subclasses. The pickle
- file is written by calling dump() (after loading the grammar
- tables using a subclass). The report() method prints a readable
- representation of the tables to stdout, for debugging.
-
- The instance variables are as follows:
-
- symbol2number -- a dict mapping symbol names to numbers. Symbol
- numbers are always 256 or higher, to distinguish
- them from token numbers, which are between 0 and
- 255 (inclusive).
-
- number2symbol -- a dict mapping numbers to symbol names;
- these two are each other's inverse.
-
- states -- a list of DFAs, where each DFA is a list of
- states, each state is is a list of arcs, and each
- arc is a (i, j) pair where i is a label and j is
- a state number. The DFA number is the index into
- this list. (This name is slightly confusing.)
- Final states are represented by a special arc of
- the form (0, j) where j is its own state number.
-
- dfas -- a dict mapping symbol numbers to (DFA, first)
- pairs, where DFA is an item from the states list
- above, and first is a set of tokens that can
- begin this grammar rule (represented by a dict
- whose values are always 1).
-
- labels -- a list of (x, y) pairs where x is either a token
- number or a symbol number, and y is either None
- or a string; the strings are keywords. The label
- number is the index in this list; label numbers
- are used to mark state transitions (arcs) in the
- DFAs.
-
- start -- the number of the grammar's start symbol.
-
- keywords -- a dict mapping keyword strings to arc labels.
-
- tokens -- a dict mapping token numbers to arc labels.
-
- """
-
- def __init__(self):
- self.symbol2number = {} # type: Dict[unicode, int]
- self.number2symbol = {} # type: Dict[int, unicode]
- self.states = [] # type: List[List[List[Tuple[int, int]]]]
- self.dfas = {} # type: Dict[int, Tuple[List[List[Tuple[int, int]]], unicode]]
- self.labels = [(0, "EMPTY")]
- self.keywords = {} # type: Dict[unicode, unicode]
- self.tokens = {} # type: Dict[unicode, unicode]
- self.symbol2label = {} # type: Dict[unicode, unicode]
- self.start = 256
-
- def dump(self, filename):
- """Dump the grammar tables to a pickle file."""
- f = open(filename, "wb")
- pickle.dump(self.__dict__, f, 2)
- f.close()
-
- def load(self, filename):
- """Load the grammar tables from a pickle file."""
- f = open(filename, "rb")
- d = pickle.load(f)
- f.close()
- self.__dict__.update(d)
-
- def report(self):
- """Dump the grammar tables to standard output, for debugging."""
- from pprint import pprint
- print("s2n")
- pprint(self.symbol2number)
- print("n2s")
- pprint(self.number2symbol)
- print("states")
- pprint(self.states)
- print("dfas")
- pprint(self.dfas)
- print("labels")
- pprint(self.labels)
- print("start", self.start)
-
-
-# Map from operator to number (since tokenize doesn't do this)
-
-opmap_raw = """
-( LPAR
-) RPAR
-[ LSQB
-] RSQB
-: COLON
-, COMMA
-; SEMI
-+ PLUS
-- MINUS
-* STAR
-/ SLASH
-| VBAR
-& AMPER
-< LESS
-> GREATER
-= EQUAL
-. DOT
-% PERCENT
-` BACKQUOTE
-{ LBRACE
-} RBRACE
-@ AT
-@= ATEQUAL
-== EQEQUAL
-!= NOTEQUAL
-<> NOTEQUAL
-<= LESSEQUAL
->= GREATEREQUAL
-~ TILDE
-^ CIRCUMFLEX
-<< LEFTSHIFT
->> RIGHTSHIFT
-** DOUBLESTAR
-+= PLUSEQUAL
--= MINEQUAL
-*= STAREQUAL
-/= SLASHEQUAL
-%= PERCENTEQUAL
-&= AMPEREQUAL
-|= VBAREQUAL
-^= CIRCUMFLEXEQUAL
-<<= LEFTSHIFTEQUAL
->>= RIGHTSHIFTEQUAL
-**= DOUBLESTAREQUAL
-// DOUBLESLASH
-//= DOUBLESLASHEQUAL
--> RARROW
-... ELLIPSIS
-"""
-
-opmap = {}
-for line in opmap_raw.splitlines():
- if line:
- op, name = line.split()
- opmap[op] = getattr(token, name)
diff --git a/sphinx/pycode/pgen2/literals.py b/sphinx/pycode/pgen2/literals.py
deleted file mode 100644
index 25e09b62d..000000000
--- a/sphinx/pycode/pgen2/literals.py
+++ /dev/null
@@ -1,100 +0,0 @@
-# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
-# Licensed to PSF under a Contributor Agreement.
-
-# Extended to handle raw and unicode literals by Georg Brandl.
-
-"""Safely evaluate Python string literals without using eval()."""
-from __future__ import print_function
-
-import re
-
-from six import text_type
-
-
-simple_escapes = {"a": "\a",
- "b": "\b",
- "f": "\f",
- "n": "\n",
- "r": "\r",
- "t": "\t",
- "v": "\v",
- "'": "'",
- '"': '"',
- "\\": "\\"}
-
-def convert_hex(x, n):
- if len(x) < n+1:
- raise ValueError("invalid hex string escape ('\\%s')" % x)
- try:
- return int(x[1:], 16)
- except ValueError:
- raise ValueError("invalid hex string escape ('\\%s')" % x)
-
-def escape(m):
- all, tail = m.group(0, 1)
- assert all.startswith("\\")
- esc = simple_escapes.get(tail)
- if esc is not None:
- return esc
- elif tail.startswith("x"):
- return chr(convert_hex(tail, 2))
- elif tail.startswith('u'):
- return unichr(convert_hex(tail, 4))
- elif tail.startswith('U'):
- return unichr(convert_hex(tail, 8))
- elif tail.startswith('N'):
- import unicodedata
- try:
- return unicodedata.lookup(tail[1:-1])
- except KeyError:
- raise ValueError("undefined character name %r" % tail[1:-1])
- else:
- try:
- return chr(int(tail, 8))
- except ValueError:
- raise ValueError("invalid octal string escape ('\\%s')" % tail)
-
-def escaperaw(m):
- all, tail = m.group(0, 1)
- if tail.startswith('u'):
- return unichr(convert_hex(tail, 4))
- elif tail.startswith('U'):
- return unichr(convert_hex(tail, 8))
- else:
- return all
-
-escape_re = re.compile(r"\\(\'|\"|\\|[abfnrtv]|x.{0,2}|[0-7]{1,3})")
-uni_escape_re = re.compile(r"\\(\'|\"|\\|[abfnrtv]|x.{0,2}|[0-7]{1,3}|"
- r"u[0-9a-fA-F]{0,4}|U[0-9a-fA-F]{0,8}|N\{.+?\})")
-
-def evalString(s, encoding=None):
- regex = escape_re
- repl = escape
- if encoding and not isinstance(s, text_type):
- s = s.decode(encoding)
- if s.startswith('u') or s.startswith('U'):
- regex = uni_escape_re
- s = s[1:]
- if s.startswith('r') or s.startswith('R'):
- repl = escaperaw
- s = s[1:]
- assert s.startswith("'") or s.startswith('"'), repr(s[:1])
- q = s[0]
- if s[:3] == q*3:
- q = q*3
- assert s.endswith(q), repr(s[-len(q):])
- assert len(s) >= 2*len(q)
- s = s[len(q):-len(q)]
- return regex.sub(repl, s)
-
-def test():
- for i in range(256):
- c = chr(i)
- s = repr(c)
- e = evalString(s)
- if e != c:
- print(i, c, s, e)
-
-
-if __name__ == "__main__":
- test()
diff --git a/sphinx/pycode/pgen2/parse.c b/sphinx/pycode/pgen2/parse.c
deleted file mode 100644
index 96fa6c8b2..000000000
--- a/sphinx/pycode/pgen2/parse.c
+++ /dev/null
@@ -1,4544 +0,0 @@
-/* Generated by Cython 0.12 on Fri Jan 22 10:39:58 2010 */
-
-#define PY_SSIZE_T_CLEAN
-#include "Python.h"
-#include "structmember.h"
-#ifndef Py_PYTHON_H
- #error Python headers needed to compile C extensions, please install development version of Python.
-#else
-#ifndef PY_LONG_LONG
- #define PY_LONG_LONG LONG_LONG
-#endif
-#ifndef DL_EXPORT
- #define DL_EXPORT(t) t
-#endif
-#if PY_VERSION_HEX < 0x02040000
- #define METH_COEXIST 0
- #define PyDict_CheckExact(op) (Py_TYPE(op) == &PyDict_Type)
- #define PyDict_Contains(d,o) PySequence_Contains(d,o)
-#endif
-#if PY_VERSION_HEX < 0x02050000
- typedef int Py_ssize_t;
- #define PY_SSIZE_T_MAX INT_MAX
- #define PY_SSIZE_T_MIN INT_MIN
- #define PY_FORMAT_SIZE_T ""
- #define PyInt_FromSsize_t(z) PyInt_FromLong(z)
- #define PyInt_AsSsize_t(o) PyInt_AsLong(o)
- #define PyNumber_Index(o) PyNumber_Int(o)
- #define PyIndex_Check(o) PyNumber_Check(o)
-#endif
-#if PY_VERSION_HEX < 0x02060000
- #define Py_REFCNT(ob) (((PyObject*)(ob))->ob_refcnt)
- #define Py_TYPE(ob) (((PyObject*)(ob))->ob_type)
- #define Py_SIZE(ob) (((PyVarObject*)(ob))->ob_size)
- #define PyVarObject_HEAD_INIT(type, size) \
- PyObject_HEAD_INIT(type) size,
- #define PyType_Modified(t)
-
- typedef struct {
- void *buf;
- PyObject *obj;
- Py_ssize_t len;
- Py_ssize_t itemsize;
- int readonly;
- int ndim;
- char *format;
- Py_ssize_t *shape;
- Py_ssize_t *strides;
- Py_ssize_t *suboffsets;
- void *internal;
- } Py_buffer;
-
- #define PyBUF_SIMPLE 0
- #define PyBUF_WRITABLE 0x0001
- #define PyBUF_FORMAT 0x0004
- #define PyBUF_ND 0x0008
- #define PyBUF_STRIDES (0x0010 | PyBUF_ND)
- #define PyBUF_C_CONTIGUOUS (0x0020 | PyBUF_STRIDES)
- #define PyBUF_F_CONTIGUOUS (0x0040 | PyBUF_STRIDES)
- #define PyBUF_ANY_CONTIGUOUS (0x0080 | PyBUF_STRIDES)
- #define PyBUF_INDIRECT (0x0100 | PyBUF_STRIDES)
-
-#endif
-#if PY_MAJOR_VERSION < 3
- #define __Pyx_BUILTIN_MODULE_NAME "__builtin__"
-#else
- #define __Pyx_BUILTIN_MODULE_NAME "builtins"
-#endif
-#if PY_MAJOR_VERSION >= 3
- #define Py_TPFLAGS_CHECKTYPES 0
- #define Py_TPFLAGS_HAVE_INDEX 0
-#endif
-#if (PY_VERSION_HEX < 0x02060000) || (PY_MAJOR_VERSION >= 3)
- #define Py_TPFLAGS_HAVE_NEWBUFFER 0
-#endif
-#if PY_MAJOR_VERSION >= 3
- #define PyBaseString_Type PyUnicode_Type
- #define PyString_Type PyUnicode_Type
- #define PyString_CheckExact PyUnicode_CheckExact
-#else
- #define PyBytes_Type PyString_Type
- #define PyBytes_CheckExact PyString_CheckExact
-#endif
-#if PY_MAJOR_VERSION >= 3
- #define PyInt_Type PyLong_Type
- #define PyInt_Check(op) PyLong_Check(op)
- #define PyInt_CheckExact(op) PyLong_CheckExact(op)
- #define PyInt_FromString PyLong_FromString
- #define PyInt_FromUnicode PyLong_FromUnicode
- #define PyInt_FromLong PyLong_FromLong
- #define PyInt_FromSize_t PyLong_FromSize_t
- #define PyInt_FromSsize_t PyLong_FromSsize_t
- #define PyInt_AsLong PyLong_AsLong
- #define PyInt_AS_LONG PyLong_AS_LONG
- #define PyInt_AsSsize_t PyLong_AsSsize_t
- #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask
- #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask
- #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y)
- #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y)
-#else
- #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y)
- #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y)
-#endif
-#if PY_MAJOR_VERSION >= 3
- #define PyMethod_New(func, self, klass) PyInstanceMethod_New(func)
-#endif
-#if !defined(WIN32) && !defined(MS_WINDOWS)
- #ifndef __stdcall
- #define __stdcall
- #endif
- #ifndef __cdecl
- #define __cdecl
- #endif
- #ifndef __fastcall
- #define __fastcall
- #endif
-#else
- #define _USE_MATH_DEFINES
-#endif
-#if PY_VERSION_HEX < 0x02050000
- #define __Pyx_GetAttrString(o,n) PyObject_GetAttrString((o),((char *)(n)))
- #define __Pyx_SetAttrString(o,n,a) PyObject_SetAttrString((o),((char *)(n)),(a))
- #define __Pyx_DelAttrString(o,n) PyObject_DelAttrString((o),((char *)(n)))
-#else
- #define __Pyx_GetAttrString(o,n) PyObject_GetAttrString((o),(n))
- #define __Pyx_SetAttrString(o,n,a) PyObject_SetAttrString((o),(n),(a))
- #define __Pyx_DelAttrString(o,n) PyObject_DelAttrString((o),(n))
-#endif
-#if PY_VERSION_HEX < 0x02050000
- #define __Pyx_NAMESTR(n) ((char *)(n))
- #define __Pyx_DOCSTR(n) ((char *)(n))
-#else
- #define __Pyx_NAMESTR(n) (n)
- #define __Pyx_DOCSTR(n) (n)
-#endif
-#ifdef __cplusplus
-#define __PYX_EXTERN_C extern "C"
-#else
-#define __PYX_EXTERN_C extern
-#endif
-#include <math.h>
-#define __PYX_HAVE_API__sphinx__pycode__pgen2__parse
-
-#ifdef __GNUC__
-#define INLINE __inline__
-#elif _WIN32
-#define INLINE __inline
-#else
-#define INLINE
-#endif
-
-typedef struct {PyObject **p; char *s; const long n; const char* encoding; const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; /*proto*/
-
-
-/* Type Conversion Predeclarations */
-
-#if PY_MAJOR_VERSION < 3
-#define __Pyx_PyBytes_FromString PyString_FromString
-#define __Pyx_PyBytes_FromStringAndSize PyString_FromStringAndSize
-#define __Pyx_PyBytes_AsString PyString_AsString
-#else
-#define __Pyx_PyBytes_FromString PyBytes_FromString
-#define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize
-#define __Pyx_PyBytes_AsString PyBytes_AsString
-#endif
-
-#define __Pyx_PyBytes_FromUString(s) __Pyx_PyBytes_FromString((char*)s)
-#define __Pyx_PyBytes_AsUString(s) ((unsigned char*) __Pyx_PyBytes_AsString(s))
-
-#define __Pyx_PyBool_FromLong(b) ((b) ? (Py_INCREF(Py_True), Py_True) : (Py_INCREF(Py_False), Py_False))
-static INLINE int __Pyx_PyObject_IsTrue(PyObject*);
-static INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x);
-
-#if !defined(T_PYSSIZET)
-#if PY_VERSION_HEX < 0x02050000
-#define T_PYSSIZET T_INT
-#elif !defined(T_LONGLONG)
-#define T_PYSSIZET \
- ((sizeof(Py_ssize_t) == sizeof(int)) ? T_INT : \
- ((sizeof(Py_ssize_t) == sizeof(long)) ? T_LONG : -1))
-#else
-#define T_PYSSIZET \
- ((sizeof(Py_ssize_t) == sizeof(int)) ? T_INT : \
- ((sizeof(Py_ssize_t) == sizeof(long)) ? T_LONG : \
- ((sizeof(Py_ssize_t) == sizeof(PY_LONG_LONG)) ? T_LONGLONG : -1)))
-#endif
-#endif
-
-
-#if !defined(T_ULONGLONG)
-#define __Pyx_T_UNSIGNED_INT(x) \
- ((sizeof(x) == sizeof(unsigned char)) ? T_UBYTE : \
- ((sizeof(x) == sizeof(unsigned short)) ? T_USHORT : \
- ((sizeof(x) == sizeof(unsigned int)) ? T_UINT : \
- ((sizeof(x) == sizeof(unsigned long)) ? T_ULONG : -1))))
-#else
-#define __Pyx_T_UNSIGNED_INT(x) \
- ((sizeof(x) == sizeof(unsigned char)) ? T_UBYTE : \
- ((sizeof(x) == sizeof(unsigned short)) ? T_USHORT : \
- ((sizeof(x) == sizeof(unsigned int)) ? T_UINT : \
- ((sizeof(x) == sizeof(unsigned long)) ? T_ULONG : \
- ((sizeof(x) == sizeof(unsigned PY_LONG_LONG)) ? T_ULONGLONG : -1)))))
-#endif
-#if !defined(T_LONGLONG)
-#define __Pyx_T_SIGNED_INT(x) \
- ((sizeof(x) == sizeof(char)) ? T_BYTE : \
- ((sizeof(x) == sizeof(short)) ? T_SHORT : \
- ((sizeof(x) == sizeof(int)) ? T_INT : \
- ((sizeof(x) == sizeof(long)) ? T_LONG : -1))))
-#else
-#define __Pyx_T_SIGNED_INT(x) \
- ((sizeof(x) == sizeof(char)) ? T_BYTE : \
- ((sizeof(x) == sizeof(short)) ? T_SHORT : \
- ((sizeof(x) == sizeof(int)) ? T_INT : \
- ((sizeof(x) == sizeof(long)) ? T_LONG : \
- ((sizeof(x) == sizeof(PY_LONG_LONG)) ? T_LONGLONG : -1)))))
-#endif
-
-#define __Pyx_T_FLOATING(x) \
- ((sizeof(x) == sizeof(float)) ? T_FLOAT : \
- ((sizeof(x) == sizeof(double)) ? T_DOUBLE : -1))
-
-#if !defined(T_SIZET)
-#if !defined(T_ULONGLONG)
-#define T_SIZET \
- ((sizeof(size_t) == sizeof(unsigned int)) ? T_UINT : \
- ((sizeof(size_t) == sizeof(unsigned long)) ? T_ULONG : -1))
-#else
-#define T_SIZET \
- ((sizeof(size_t) == sizeof(unsigned int)) ? T_UINT : \
- ((sizeof(size_t) == sizeof(unsigned long)) ? T_ULONG : \
- ((sizeof(size_t) == sizeof(unsigned PY_LONG_LONG)) ? T_ULONGLONG : -1)))
-#endif
-#endif
-
-static INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*);
-static INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t);
-static INLINE size_t __Pyx_PyInt_AsSize_t(PyObject*);
-
-#define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x))
-
-
-#ifdef __GNUC__
-/* Test for GCC > 2.95 */
-#if __GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))
-#define likely(x) __builtin_expect(!!(x), 1)
-#define unlikely(x) __builtin_expect(!!(x), 0)
-#else /* __GNUC__ > 2 ... */
-#define likely(x) (x)
-#define unlikely(x) (x)
-#endif /* __GNUC__ > 2 ... */
-#else /* __GNUC__ */
-#define likely(x) (x)
-#define unlikely(x) (x)
-#endif /* __GNUC__ */
-
-static PyObject *__pyx_m;
-static PyObject *__pyx_b;
-static PyObject *__pyx_empty_tuple;
-static PyObject *__pyx_empty_bytes;
-static int __pyx_lineno;
-static int __pyx_clineno = 0;
-static const char * __pyx_cfilenm= __FILE__;
-static const char *__pyx_filename;
-static const char **__pyx_f;
-
-
-/* Type declarations */
-
-/* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":31
- *
- *
- * cdef class Parser: # <<<<<<<<<<<<<<
- * cdef public object grammar
- * cdef public object rootnode
- */
-
-struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser {
- PyObject_HEAD
- struct __pyx_vtabstruct_6sphinx_6pycode_5pgen2_5parse_Parser *__pyx_vtab;
- PyObject *grammar;
- PyObject *rootnode;
- PyObject *stack;
- PyObject *used_names;
- int _grammar_start;
- PyObject *_grammar_labels;
- PyObject *_grammar_dfas;
- PyObject *_grammar_keywords;
- PyObject *_grammar_tokens;
- PyObject *_grammar_number2symbol;
-};
-
-
-struct __pyx_vtabstruct_6sphinx_6pycode_5pgen2_5parse_Parser {
- int (*classify)(struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *, int, PyObject *, PyObject *);
- void (*shift)(struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *, PyObject *, PyObject *, PyObject *, PyObject *);
- void (*push)(struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *, PyObject *, PyObject *, PyObject *, PyObject *);
- void (*pop)(struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *);
- PyObject *(*convert)(struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *, PyObject *);
-};
-static struct __pyx_vtabstruct_6sphinx_6pycode_5pgen2_5parse_Parser *__pyx_vtabptr_6sphinx_6pycode_5pgen2_5parse_Parser;
-
-#ifndef CYTHON_REFNANNY
- #define CYTHON_REFNANNY 0
-#endif
-
-#if CYTHON_REFNANNY
- typedef struct {
- void (*INCREF)(void*, PyObject*, int);
- void (*DECREF)(void*, PyObject*, int);
- void (*GOTREF)(void*, PyObject*, int);
- void (*GIVEREF)(void*, PyObject*, int);
- void* (*SetupContext)(const char*, int, const char*);
- void (*FinishContext)(void**);
- } __Pyx_RefNannyAPIStruct;
- static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL;
- static __Pyx_RefNannyAPIStruct * __Pyx_RefNannyImportAPI(const char *modname) {
- PyObject *m = NULL, *p = NULL;
- void *r = NULL;
- m = PyImport_ImportModule((char *)modname);
- if (!m) goto end;
- p = PyObject_GetAttrString(m, (char *)"RefNannyAPI");
- if (!p) goto end;
- r = PyLong_AsVoidPtr(p);
- end:
- Py_XDECREF(p);
- Py_XDECREF(m);
- return (__Pyx_RefNannyAPIStruct *)r;
- }
- #define __Pyx_RefNannySetupContext(name) void *__pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__)
- #define __Pyx_RefNannyFinishContext() __Pyx_RefNanny->FinishContext(&__pyx_refnanny)
- #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
- #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
- #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
- #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
- #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r);} } while(0)
-#else
- #define __Pyx_RefNannySetupContext(name)
- #define __Pyx_RefNannyFinishContext()
- #define __Pyx_INCREF(r) Py_INCREF(r)
- #define __Pyx_DECREF(r) Py_DECREF(r)
- #define __Pyx_GOTREF(r)
- #define __Pyx_GIVEREF(r)
- #define __Pyx_XDECREF(r) Py_XDECREF(r)
-#endif /* CYTHON_REFNANNY */
-#define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);} } while(0)
-#define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r);} } while(0)
-
-static void __Pyx_RaiseDoubleKeywordsError(
- const char* func_name, PyObject* kw_name); /*proto*/
-
-static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact,
- Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); /*proto*/
-
-static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[], PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, const char* function_name); /*proto*/
-
-static INLINE PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) {
- PyObject *r;
- if (!j) return NULL;
- r = PyObject_GetItem(o, j);
- Py_DECREF(j);
- return r;
-}
-
-
-#define __Pyx_GetItemInt_List(o, i, size, to_py_func) ((size <= sizeof(Py_ssize_t)) ? \
- __Pyx_GetItemInt_List_Fast(o, i, size <= sizeof(long)) : \
- __Pyx_GetItemInt_Generic(o, to_py_func(i)))
-
-static INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, int fits_long) {
- if (likely(o != Py_None)) {
- if (likely((0 <= i) & (i < PyList_GET_SIZE(o)))) {
- PyObject *r = PyList_GET_ITEM(o, i);
- Py_INCREF(r);
- return r;
- }
- else if ((-PyList_GET_SIZE(o) <= i) & (i < 0)) {
- PyObject *r = PyList_GET_ITEM(o, PyList_GET_SIZE(o) + i);
- Py_INCREF(r);
- return r;
- }
- }
- return __Pyx_GetItemInt_Generic(o, fits_long ? PyInt_FromLong(i) : PyLong_FromLongLong(i));
-}
-
-#define __Pyx_GetItemInt_Tuple(o, i, size, to_py_func) ((size <= sizeof(Py_ssize_t)) ? \
- __Pyx_GetItemInt_Tuple_Fast(o, i, size <= sizeof(long)) : \
- __Pyx_GetItemInt_Generic(o, to_py_func(i)))
-
-static INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, int fits_long) {
- if (likely(o != Py_None)) {
- if (likely((0 <= i) & (i < PyTuple_GET_SIZE(o)))) {
- PyObject *r = PyTuple_GET_ITEM(o, i);
- Py_INCREF(r);
- return r;
- }
- else if ((-PyTuple_GET_SIZE(o) <= i) & (i < 0)) {
- PyObject *r = PyTuple_GET_ITEM(o, PyTuple_GET_SIZE(o) + i);
- Py_INCREF(r);
- return r;
- }
- }
- return __Pyx_GetItemInt_Generic(o, fits_long ? PyInt_FromLong(i) : PyLong_FromLongLong(i));
-}
-
-
-#define __Pyx_GetItemInt(o, i, size, to_py_func) ((size <= sizeof(Py_ssize_t)) ? \
- __Pyx_GetItemInt_Fast(o, i, size <= sizeof(long)) : \
- __Pyx_GetItemInt_Generic(o, to_py_func(i)))
-
-static INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int fits_long) {
- PyObject *r;
- if (PyList_CheckExact(o) && ((0 <= i) & (i < PyList_GET_SIZE(o)))) {
- r = PyList_GET_ITEM(o, i);
- Py_INCREF(r);
- }
- else if (PyTuple_CheckExact(o) && ((0 <= i) & (i < PyTuple_GET_SIZE(o)))) {
- r = PyTuple_GET_ITEM(o, i);
- Py_INCREF(r);
- }
- else if (Py_TYPE(o)->tp_as_sequence && Py_TYPE(o)->tp_as_sequence->sq_item && (likely(i >= 0))) {
- r = PySequence_GetItem(o, i);
- }
- else {
- r = __Pyx_GetItemInt_Generic(o, fits_long ? PyInt_FromLong(i) : PyLong_FromLongLong(i));
- }
- return r;
-}
-
-static INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index);
-
-static INLINE void __Pyx_RaiseTooManyValuesError(void);
-
-static PyObject *__Pyx_UnpackItem(PyObject *, Py_ssize_t index); /*proto*/
-static int __Pyx_EndUnpack(PyObject *); /*proto*/
-
-static INLINE long __Pyx_NegateNonNeg(long b) { return unlikely(b < 0) ? b : !b; }
-static INLINE PyObject* __Pyx_PyBoolOrNull_FromLong(long b) {
- return unlikely(b < 0) ? NULL : __Pyx_PyBool_FromLong(b);
-}
-
-static INLINE void __Pyx_RaiseNoneNotIterableError(void);
-
-static INLINE PyObject* __Pyx_PyObject_Append(PyObject* L, PyObject* x) {
- if (likely(PyList_CheckExact(L))) {
- if (PyList_Append(L, x) < 0) return NULL;
- Py_INCREF(Py_None);
- return Py_None; /* this is just to have an accurate signature */
- }
- else {
- PyObject *r, *m;
- m = __Pyx_GetAttrString(L, "append");
- if (!m) return NULL;
- r = PyObject_CallFunctionObjArgs(m, x, NULL);
- Py_DECREF(m);
- return r;
- }
-}
-
-#define __Pyx_SetItemInt(o, i, v, size, to_py_func) ((size <= sizeof(Py_ssize_t)) ? \
- __Pyx_SetItemInt_Fast(o, i, v, size <= sizeof(long)) : \
- __Pyx_SetItemInt_Generic(o, to_py_func(i), v))
-
-static INLINE int __Pyx_SetItemInt_Generic(PyObject *o, PyObject *j, PyObject *v) {
- int r;
- if (!j) return -1;
- r = PyObject_SetItem(o, j, v);
- Py_DECREF(j);
- return r;
-}
-
-static INLINE int __Pyx_SetItemInt_Fast(PyObject *o, Py_ssize_t i, PyObject *v, int fits_long) {
- if (PyList_CheckExact(o) && ((0 <= i) & (i < PyList_GET_SIZE(o)))) {
- Py_INCREF(v);
- Py_DECREF(PyList_GET_ITEM(o, i));
- PyList_SET_ITEM(o, i, v);
- return 1;
- }
- else if (Py_TYPE(o)->tp_as_sequence && Py_TYPE(o)->tp_as_sequence->sq_ass_item && (likely(i >= 0)))
- return PySequence_SetItem(o, i, v);
- else {
- PyObject *j = fits_long ? PyInt_FromLong(i) : PyLong_FromLongLong(i);
- return __Pyx_SetItemInt_Generic(o, j, v);
- }
-}
-
-static void __Pyx_UnpackTupleError(PyObject *, Py_ssize_t index); /*proto*/
-
-static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list); /*proto*/
-
-static PyObject *__Pyx_GetName(PyObject *dict, PyObject *name); /*proto*/
-
-static PyObject *__Pyx_CreateClass(PyObject *bases, PyObject *dict, PyObject *name, const char *modname); /*proto*/
-
-static INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb); /*proto*/
-static INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb); /*proto*/
-
-static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb); /*proto*/
-
-static INLINE unsigned char __Pyx_PyInt_AsUnsignedChar(PyObject *);
-
-static INLINE unsigned short __Pyx_PyInt_AsUnsignedShort(PyObject *);
-
-static INLINE unsigned int __Pyx_PyInt_AsUnsignedInt(PyObject *);
-
-static INLINE char __Pyx_PyInt_AsChar(PyObject *);
-
-static INLINE short __Pyx_PyInt_AsShort(PyObject *);
-
-static INLINE int __Pyx_PyInt_AsInt(PyObject *);
-
-static INLINE signed char __Pyx_PyInt_AsSignedChar(PyObject *);
-
-static INLINE signed short __Pyx_PyInt_AsSignedShort(PyObject *);
-
-static INLINE signed int __Pyx_PyInt_AsSignedInt(PyObject *);
-
-static INLINE unsigned long __Pyx_PyInt_AsUnsignedLong(PyObject *);
-
-static INLINE unsigned PY_LONG_LONG __Pyx_PyInt_AsUnsignedLongLong(PyObject *);
-
-static INLINE long __Pyx_PyInt_AsLong(PyObject *);
-
-static INLINE PY_LONG_LONG __Pyx_PyInt_AsLongLong(PyObject *);
-
-static INLINE signed long __Pyx_PyInt_AsSignedLong(PyObject *);
-
-static INLINE signed PY_LONG_LONG __Pyx_PyInt_AsSignedLongLong(PyObject *);
-
-static void __Pyx_WriteUnraisable(const char *name); /*proto*/
-
-static int __Pyx_SetVtable(PyObject *dict, void *vtable); /*proto*/
-
-static void __Pyx_AddTraceback(const char *funcname); /*proto*/
-
-static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); /*proto*/
-/* Module declarations from sphinx.pycode.pgen2.parse */
-
-static PyTypeObject *__pyx_ptype_6sphinx_6pycode_5pgen2_5parse_Parser = 0;
-#define __Pyx_MODULE_NAME "sphinx.pycode.pgen2.parse"
-int __pyx_module_is_main_sphinx__pycode__pgen2__parse = 0;
-
-/* Implementation of sphinx.pycode.pgen2.parse */
-static PyObject *__pyx_builtin_Exception;
-static char __pyx_k_1[] = "%s: type=%r, value=%r, context=%r";
-static char __pyx_k_2[] = "_grammar_number2symbol";
-static char __pyx_k_3[] = "too much input";
-static char __pyx_k_4[] = "bad input";
-static char __pyx_k_5[] = "bad token";
-static char __pyx_k_6[] = "Parser engine for the grammar tables generated by pgen.\n\nThe grammar table must be loaded first.\n\nSee Parser/parser.c in the Python distribution for additional info on\nhow this parsing engine works.\n\n";
-static char __pyx_k_7[] = "sphinx.pycode.nodes";
-static char __pyx_k_8[] = "Exception to signal the parser is stuck.";
-static char __pyx_k_9[] = "Parser.addtoken (line 66)";
-static char __pyx_k__add[] = "add";
-static char __pyx_k__msg[] = "msg";
-static char __pyx_k__pop[] = "pop";
-static char __pyx_k__Leaf[] = "Leaf";
-static char __pyx_k__Node[] = "Node";
-static char __pyx_k__dfas[] = "dfas";
-static char __pyx_k__push[] = "push";
-static char __pyx_k__self[] = "self";
-static char __pyx_k__type[] = "type";
-static char __pyx_k__shift[] = "shift";
-static char __pyx_k__stack[] = "stack";
-static char __pyx_k__start[] = "start";
-static char __pyx_k__value[] = "value";
-static char __pyx_k__Parser[] = "Parser";
-static char __pyx_k__labels[] = "labels";
-static char __pyx_k__tokens[] = "tokens";
-static char __pyx_k__context[] = "context";
-static char __pyx_k__convert[] = "convert";
-static char __pyx_k__grammar[] = "grammar";
-static char __pyx_k____init__[] = "__init__";
-static char __pyx_k____main__[] = "__main__";
-static char __pyx_k____test__[] = "__test__";
-static char __pyx_k__addtoken[] = "addtoken";
-static char __pyx_k__classify[] = "classify";
-static char __pyx_k__keywords[] = "keywords";
-static char __pyx_k__rootnode[] = "rootnode";
-static char __pyx_k__Exception[] = "Exception";
-static char __pyx_k__ParseError[] = "ParseError";
-static char __pyx_k__used_names[] = "used_names";
-static char __pyx_k___grammar_dfas[] = "_grammar_dfas";
-static char __pyx_k__number2symbol[] = "number2symbol";
-static char __pyx_k___grammar_start[] = "_grammar_start";
-static char __pyx_k___grammar_labels[] = "_grammar_labels";
-static char __pyx_k___grammar_tokens[] = "_grammar_tokens";
-static char __pyx_k___grammar_keywords[] = "_grammar_keywords";
-static PyObject *__pyx_kp_s_1;
-static PyObject *__pyx_n_s_2;
-static PyObject *__pyx_kp_s_3;
-static PyObject *__pyx_kp_s_4;
-static PyObject *__pyx_kp_s_5;
-static PyObject *__pyx_n_s_7;
-static PyObject *__pyx_kp_s_8;
-static PyObject *__pyx_kp_u_9;
-static PyObject *__pyx_n_s__Exception;
-static PyObject *__pyx_n_s__Leaf;
-static PyObject *__pyx_n_s__Node;
-static PyObject *__pyx_n_s__ParseError;
-static PyObject *__pyx_n_s__Parser;
-static PyObject *__pyx_n_s____init__;
-static PyObject *__pyx_n_s____main__;
-static PyObject *__pyx_n_s____test__;
-static PyObject *__pyx_n_s___grammar_dfas;
-static PyObject *__pyx_n_s___grammar_keywords;
-static PyObject *__pyx_n_s___grammar_labels;
-static PyObject *__pyx_n_s___grammar_start;
-static PyObject *__pyx_n_s___grammar_tokens;
-static PyObject *__pyx_n_s__add;
-static PyObject *__pyx_n_s__addtoken;
-static PyObject *__pyx_n_s__classify;
-static PyObject *__pyx_n_s__context;
-static PyObject *__pyx_n_s__convert;
-static PyObject *__pyx_n_s__dfas;
-static PyObject *__pyx_n_s__grammar;
-static PyObject *__pyx_n_s__keywords;
-static PyObject *__pyx_n_s__labels;
-static PyObject *__pyx_n_s__msg;
-static PyObject *__pyx_n_s__number2symbol;
-static PyObject *__pyx_n_s__pop;
-static PyObject *__pyx_n_s__push;
-static PyObject *__pyx_n_s__rootnode;
-static PyObject *__pyx_n_s__self;
-static PyObject *__pyx_n_s__shift;
-static PyObject *__pyx_n_s__stack;
-static PyObject *__pyx_n_s__start;
-static PyObject *__pyx_n_s__tokens;
-static PyObject *__pyx_n_s__type;
-static PyObject *__pyx_n_s__used_names;
-static PyObject *__pyx_n_s__value;
-static PyObject *__pyx_int_0;
-
-/* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":22
- * """Exception to signal the parser is stuck."""
- *
- * def __init__(self, msg, type, value, context): # <<<<<<<<<<<<<<
- * Exception.__init__(self, "%s: type=%r, value=%r, context=%r" %
- * (msg, type, value, context))
- */
-
-static PyObject *__pyx_pf_6sphinx_6pycode_5pgen2_5parse_10ParseError___init__(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
-static PyMethodDef __pyx_mdef_6sphinx_6pycode_5pgen2_5parse_10ParseError___init__ = {__Pyx_NAMESTR("__init__"), (PyCFunction)__pyx_pf_6sphinx_6pycode_5pgen2_5parse_10ParseError___init__, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(0)};
-static PyObject *__pyx_pf_6sphinx_6pycode_5pgen2_5parse_10ParseError___init__(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
- PyObject *__pyx_v_self = 0;
- PyObject *__pyx_v_msg = 0;
- PyObject *__pyx_v_type = 0;
- PyObject *__pyx_v_value = 0;
- PyObject *__pyx_v_context = 0;
- PyObject *__pyx_r = NULL;
- PyObject *__pyx_t_1 = NULL;
- PyObject *__pyx_t_2 = NULL;
- PyObject *__pyx_t_3 = NULL;
- static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__self,&__pyx_n_s__msg,&__pyx_n_s__type,&__pyx_n_s__value,&__pyx_n_s__context,0};
- __Pyx_RefNannySetupContext("__init__");
- __pyx_self = __pyx_self;
- if (unlikely(__pyx_kwds)) {
- Py_ssize_t kw_args = PyDict_Size(__pyx_kwds);
- PyObject* values[5] = {0,0,0,0,0};
- switch (PyTuple_GET_SIZE(__pyx_args)) {
- case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
- case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
- case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
- case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
- case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
- case 0: break;
- default: goto __pyx_L5_argtuple_error;
- }
- switch (PyTuple_GET_SIZE(__pyx_args)) {
- case 0:
- values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__self);
- if (likely(values[0])) kw_args--;
- else goto __pyx_L5_argtuple_error;
- case 1:
- values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__msg);
- if (likely(values[1])) kw_args--;
- else {
- __Pyx_RaiseArgtupleInvalid("__init__", 1, 5, 5, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 22; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
- }
- case 2:
- values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__type);
- if (likely(values[2])) kw_args--;
- else {
- __Pyx_RaiseArgtupleInvalid("__init__", 1, 5, 5, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 22; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
- }
- case 3:
- values[3] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__value);
- if (likely(values[3])) kw_args--;
- else {
- __Pyx_RaiseArgtupleInvalid("__init__", 1, 5, 5, 3); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 22; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
- }
- case 4:
- values[4] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__context);
- if (likely(values[4])) kw_args--;
- else {
- __Pyx_RaiseArgtupleInvalid("__init__", 1, 5, 5, 4); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 22; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
- }
- }
- if (unlikely(kw_args > 0)) {
- if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, PyTuple_GET_SIZE(__pyx_args), "__init__") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 22; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
- }
- __pyx_v_self = values[0];
- __pyx_v_msg = values[1];
- __pyx_v_type = values[2];
- __pyx_v_value = values[3];
- __pyx_v_context = values[4];
- } else if (PyTuple_GET_SIZE(__pyx_args) != 5) {
- goto __pyx_L5_argtuple_error;
- } else {
- __pyx_v_self = PyTuple_GET_ITEM(__pyx_args, 0);
- __pyx_v_msg = PyTuple_GET_ITEM(__pyx_args, 1);
- __pyx_v_type = PyTuple_GET_ITEM(__pyx_args, 2);
- __pyx_v_value = PyTuple_GET_ITEM(__pyx_args, 3);
- __pyx_v_context = PyTuple_GET_ITEM(__pyx_args, 4);
- }
- goto __pyx_L4_argument_unpacking_done;
- __pyx_L5_argtuple_error:;
- __Pyx_RaiseArgtupleInvalid("__init__", 1, 5, 5, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 22; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
- __pyx_L3_error:;
- __Pyx_AddTraceback("sphinx.pycode.pgen2.parse.ParseError.__init__");
- return NULL;
- __pyx_L4_argument_unpacking_done:;
-
- /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":23
- *
- * def __init__(self, msg, type, value, context):
- * Exception.__init__(self, "%s: type=%r, value=%r, context=%r" % # <<<<<<<<<<<<<<
- * (msg, type, value, context))
- * self.msg = msg
- */
- __pyx_t_1 = PyObject_GetAttr(__pyx_builtin_Exception, __pyx_n_s____init__); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 23; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_1);
-
- /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":24
- * def __init__(self, msg, type, value, context):
- * Exception.__init__(self, "%s: type=%r, value=%r, context=%r" %
- * (msg, type, value, context)) # <<<<<<<<<<<<<<
- * self.msg = msg
- * self.type = type
- */
- __pyx_t_2 = PyTuple_New(4); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 24; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __Pyx_INCREF(__pyx_v_msg);
- PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_msg);
- __Pyx_GIVEREF(__pyx_v_msg);
- __Pyx_INCREF(__pyx_v_type);
- PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_v_type);
- __Pyx_GIVEREF(__pyx_v_type);
- __Pyx_INCREF(__pyx_v_value);
- PyTuple_SET_ITEM(__pyx_t_2, 2, __pyx_v_value);
- __Pyx_GIVEREF(__pyx_v_value);
- __Pyx_INCREF(__pyx_v_context);
- PyTuple_SET_ITEM(__pyx_t_2, 3, __pyx_v_context);
- __Pyx_GIVEREF(__pyx_v_context);
- __pyx_t_3 = PyNumber_Remainder(((PyObject *)__pyx_kp_s_1), __pyx_t_2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 23; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_3);
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 23; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __Pyx_INCREF(__pyx_v_self);
- PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_self);
- __Pyx_GIVEREF(__pyx_v_self);
- PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_3);
- __Pyx_GIVEREF(__pyx_t_3);
- __pyx_t_3 = 0;
- __pyx_t_3 = PyObject_Call(__pyx_t_1, __pyx_t_2, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 23; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_3);
- __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
-
- /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":25
- * Exception.__init__(self, "%s: type=%r, value=%r, context=%r" %
- * (msg, type, value, context))
- * self.msg = msg # <<<<<<<<<<<<<<
- * self.type = type
- * self.value = value
- */
- if (PyObject_SetAttr(__pyx_v_self, __pyx_n_s__msg, __pyx_v_msg) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 25; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-
- /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":26
- * (msg, type, value, context))
- * self.msg = msg
- * self.type = type # <<<<<<<<<<<<<<
- * self.value = value
- * self.context = context
- */
- if (PyObject_SetAttr(__pyx_v_self, __pyx_n_s__type, __pyx_v_type) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 26; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-
- /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":27
- * self.msg = msg
- * self.type = type
- * self.value = value # <<<<<<<<<<<<<<
- * self.context = context
- *
- */
- if (PyObject_SetAttr(__pyx_v_self, __pyx_n_s__value, __pyx_v_value) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 27; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-
- /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":28
- * self.type = type
- * self.value = value
- * self.context = context # <<<<<<<<<<<<<<
- *
- *
- */
- if (PyObject_SetAttr(__pyx_v_self, __pyx_n_s__context, __pyx_v_context) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 28; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-
- __pyx_r = Py_None; __Pyx_INCREF(Py_None);
- goto __pyx_L0;
- __pyx_L1_error:;
- __Pyx_XDECREF(__pyx_t_1);
- __Pyx_XDECREF(__pyx_t_2);
- __Pyx_XDECREF(__pyx_t_3);
- __Pyx_AddTraceback("sphinx.pycode.pgen2.parse.ParseError.__init__");
- __pyx_r = NULL;
- __pyx_L0:;
- __Pyx_XGIVEREF(__pyx_r);
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-/* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":34
- * cdef public object grammar
- * cdef public object rootnode
- * cdef public list stack # <<<<<<<<<<<<<<
- * cdef public set used_names
- * cdef int _grammar_start
- */
-
-static PyObject *__pyx_pf_6sphinx_6pycode_5pgen2_5parse_6Parser_5stack___get__(PyObject *__pyx_v_self); /*proto*/
-static PyObject *__pyx_pf_6sphinx_6pycode_5pgen2_5parse_6Parser_5stack___get__(PyObject *__pyx_v_self) {
- PyObject *__pyx_r = NULL;
- __Pyx_RefNannySetupContext("__get__");
- __Pyx_XDECREF(__pyx_r);
- __Pyx_INCREF(((PyObject *)((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->stack));
- __pyx_r = ((PyObject *)((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->stack);
- goto __pyx_L0;
-
- __pyx_r = Py_None; __Pyx_INCREF(Py_None);
- __pyx_L0:;
- __Pyx_XGIVEREF(__pyx_r);
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-static int __pyx_pf_6sphinx_6pycode_5pgen2_5parse_6Parser_5stack___set__(PyObject *__pyx_v_self, PyObject *__pyx_v_value); /*proto*/
-static int __pyx_pf_6sphinx_6pycode_5pgen2_5parse_6Parser_5stack___set__(PyObject *__pyx_v_self, PyObject *__pyx_v_value) {
- int __pyx_r;
- __Pyx_RefNannySetupContext("__set__");
- if (!(likely(PyList_CheckExact(__pyx_v_value))||((__pyx_v_value) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected list, got %.200s", Py_TYPE(__pyx_v_value)->tp_name), 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 34; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_INCREF(__pyx_v_value);
- __Pyx_GIVEREF(__pyx_v_value);
- __Pyx_GOTREF(((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->stack);
- __Pyx_DECREF(((PyObject *)((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->stack));
- ((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->stack = ((PyObject *)__pyx_v_value);
-
- __pyx_r = 0;
- goto __pyx_L0;
- __pyx_L1_error:;
- __Pyx_AddTraceback("sphinx.pycode.pgen2.parse.Parser.stack.__set__");
- __pyx_r = -1;
- __pyx_L0:;
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-/* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":35
- * cdef public object rootnode
- * cdef public list stack
- * cdef public set used_names # <<<<<<<<<<<<<<
- * cdef int _grammar_start
- * cdef list _grammar_labels
- */
-
-static PyObject *__pyx_pf_6sphinx_6pycode_5pgen2_5parse_6Parser_10used_names___get__(PyObject *__pyx_v_self); /*proto*/
-static PyObject *__pyx_pf_6sphinx_6pycode_5pgen2_5parse_6Parser_10used_names___get__(PyObject *__pyx_v_self) {
- PyObject *__pyx_r = NULL;
- __Pyx_RefNannySetupContext("__get__");
- __Pyx_XDECREF(__pyx_r);
- __Pyx_INCREF(((PyObject *)((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->used_names));
- __pyx_r = ((PyObject *)((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->used_names);
- goto __pyx_L0;
-
- __pyx_r = Py_None; __Pyx_INCREF(Py_None);
- __pyx_L0:;
- __Pyx_XGIVEREF(__pyx_r);
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-static int __pyx_pf_6sphinx_6pycode_5pgen2_5parse_6Parser_10used_names___set__(PyObject *__pyx_v_self, PyObject *__pyx_v_value); /*proto*/
-static int __pyx_pf_6sphinx_6pycode_5pgen2_5parse_6Parser_10used_names___set__(PyObject *__pyx_v_self, PyObject *__pyx_v_value) {
- int __pyx_r;
- __Pyx_RefNannySetupContext("__set__");
- if (!(likely(PyAnySet_CheckExact(__pyx_v_value))||((__pyx_v_value) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected set, got %.200s", Py_TYPE(__pyx_v_value)->tp_name), 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 35; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_INCREF(__pyx_v_value);
- __Pyx_GIVEREF(__pyx_v_value);
- __Pyx_GOTREF(((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->used_names);
- __Pyx_DECREF(((PyObject *)((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->used_names));
- ((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->used_names = ((PyObject *)__pyx_v_value);
-
- __pyx_r = 0;
- goto __pyx_L0;
- __pyx_L1_error:;
- __Pyx_AddTraceback("sphinx.pycode.pgen2.parse.Parser.used_names.__set__");
- __pyx_r = -1;
- __pyx_L0:;
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-/* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":43
- * cdef dict _grammar_number2symbol
- *
- * def __init__(self, grammar, convert=None): # <<<<<<<<<<<<<<
- * self.grammar = grammar
- * #self.convert = convert or noconvert
- */
-
-static int __pyx_pf_6sphinx_6pycode_5pgen2_5parse_6Parser___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
-static int __pyx_pf_6sphinx_6pycode_5pgen2_5parse_6Parser___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
- PyObject *__pyx_v_grammar = 0;
- PyObject *__pyx_v_convert = 0;
- int __pyx_r;
- PyObject *__pyx_t_1 = NULL;
- int __pyx_t_2;
- static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__grammar,&__pyx_n_s__convert,0};
- __Pyx_RefNannySetupContext("__init__");
- if (unlikely(__pyx_kwds)) {
- Py_ssize_t kw_args = PyDict_Size(__pyx_kwds);
- PyObject* values[2] = {0,0};
- values[1] = ((PyObject *)Py_None);
- switch (PyTuple_GET_SIZE(__pyx_args)) {
- case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
- case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
- case 0: break;
- default: goto __pyx_L5_argtuple_error;
- }
- switch (PyTuple_GET_SIZE(__pyx_args)) {
- case 0:
- values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__grammar);
- if (likely(values[0])) kw_args--;
- else goto __pyx_L5_argtuple_error;
- case 1:
- if (kw_args > 1) {
- PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__convert);
- if (unlikely(value)) { values[1] = value; kw_args--; }
- }
- }
- if (unlikely(kw_args > 0)) {
- if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, PyTuple_GET_SIZE(__pyx_args), "__init__") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 43; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
- }
- __pyx_v_grammar = values[0];
- __pyx_v_convert = values[1];
- } else {
- __pyx_v_convert = ((PyObject *)Py_None);
- switch (PyTuple_GET_SIZE(__pyx_args)) {
- case 2: __pyx_v_convert = PyTuple_GET_ITEM(__pyx_args, 1);
- case 1: __pyx_v_grammar = PyTuple_GET_ITEM(__pyx_args, 0);
- break;
- default: goto __pyx_L5_argtuple_error;
- }
- }
- goto __pyx_L4_argument_unpacking_done;
- __pyx_L5_argtuple_error:;
- __Pyx_RaiseArgtupleInvalid("__init__", 0, 1, 2, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 43; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
- __pyx_L3_error:;
- __Pyx_AddTraceback("sphinx.pycode.pgen2.parse.Parser.__init__");
- return -1;
- __pyx_L4_argument_unpacking_done:;
-
- /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":44
- *
- * def __init__(self, grammar, convert=None):
- * self.grammar = grammar # <<<<<<<<<<<<<<
- * #self.convert = convert or noconvert
- *
- */
- __Pyx_INCREF(__pyx_v_grammar);
- __Pyx_GIVEREF(__pyx_v_grammar);
- __Pyx_GOTREF(((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->grammar);
- __Pyx_DECREF(((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->grammar);
- ((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->grammar = __pyx_v_grammar;
-
- /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":47
- * #self.convert = convert or noconvert
- *
- * self._grammar_dfas = grammar.dfas # <<<<<<<<<<<<<<
- * self._grammar_labels = grammar.labels
- * self._grammar_keywords = grammar.keywords
- */
- __pyx_t_1 = PyObject_GetAttr(__pyx_v_grammar, __pyx_n_s__dfas); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 47; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_1);
- if (!(likely(PyDict_CheckExact(__pyx_t_1))||((__pyx_t_1) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected dict, got %.200s", Py_TYPE(__pyx_t_1)->tp_name), 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 47; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GIVEREF(__pyx_t_1);
- __Pyx_GOTREF(((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->_grammar_dfas);
- __Pyx_DECREF(((PyObject *)((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->_grammar_dfas));
- ((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->_grammar_dfas = ((PyObject *)__pyx_t_1);
- __pyx_t_1 = 0;
-
- /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":48
- *
- * self._grammar_dfas = grammar.dfas
- * self._grammar_labels = grammar.labels # <<<<<<<<<<<<<<
- * self._grammar_keywords = grammar.keywords
- * self._grammar_tokens = grammar.tokens
- */
- __pyx_t_1 = PyObject_GetAttr(__pyx_v_grammar, __pyx_n_s__labels); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 48; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_1);
- if (!(likely(PyList_CheckExact(__pyx_t_1))||((__pyx_t_1) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected list, got %.200s", Py_TYPE(__pyx_t_1)->tp_name), 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 48; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GIVEREF(__pyx_t_1);
- __Pyx_GOTREF(((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->_grammar_labels);
- __Pyx_DECREF(((PyObject *)((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->_grammar_labels));
- ((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->_grammar_labels = ((PyObject *)__pyx_t_1);
- __pyx_t_1 = 0;
-
- /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":49
- * self._grammar_dfas = grammar.dfas
- * self._grammar_labels = grammar.labels
- * self._grammar_keywords = grammar.keywords # <<<<<<<<<<<<<<
- * self._grammar_tokens = grammar.tokens
- * self._grammar_number2symbol = grammar.number2symbol
- */
- __pyx_t_1 = PyObject_GetAttr(__pyx_v_grammar, __pyx_n_s__keywords); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 49; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_1);
- if (!(likely(PyDict_CheckExact(__pyx_t_1))||((__pyx_t_1) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected dict, got %.200s", Py_TYPE(__pyx_t_1)->tp_name), 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 49; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GIVEREF(__pyx_t_1);
- __Pyx_GOTREF(((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->_grammar_keywords);
- __Pyx_DECREF(((PyObject *)((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->_grammar_keywords));
- ((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->_grammar_keywords = ((PyObject *)__pyx_t_1);
- __pyx_t_1 = 0;
-
- /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":50
- * self._grammar_labels = grammar.labels
- * self._grammar_keywords = grammar.keywords
- * self._grammar_tokens = grammar.tokens # <<<<<<<<<<<<<<
- * self._grammar_number2symbol = grammar.number2symbol
- * self._grammar_start = grammar.start
- */
- __pyx_t_1 = PyObject_GetAttr(__pyx_v_grammar, __pyx_n_s__tokens); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 50; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_1);
- if (!(likely(PyDict_CheckExact(__pyx_t_1))||((__pyx_t_1) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected dict, got %.200s", Py_TYPE(__pyx_t_1)->tp_name), 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 50; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GIVEREF(__pyx_t_1);
- __Pyx_GOTREF(((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->_grammar_tokens);
- __Pyx_DECREF(((PyObject *)((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->_grammar_tokens));
- ((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->_grammar_tokens = ((PyObject *)__pyx_t_1);
- __pyx_t_1 = 0;
-
- /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":51
- * self._grammar_keywords = grammar.keywords
- * self._grammar_tokens = grammar.tokens
- * self._grammar_number2symbol = grammar.number2symbol # <<<<<<<<<<<<<<
- * self._grammar_start = grammar.start
- *
- */
- __pyx_t_1 = PyObject_GetAttr(__pyx_v_grammar, __pyx_n_s__number2symbol); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 51; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_1);
- if (!(likely(PyDict_CheckExact(__pyx_t_1))||((__pyx_t_1) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected dict, got %.200s", Py_TYPE(__pyx_t_1)->tp_name), 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 51; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GIVEREF(__pyx_t_1);
- __Pyx_GOTREF(((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->_grammar_number2symbol);
- __Pyx_DECREF(((PyObject *)((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->_grammar_number2symbol));
- ((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->_grammar_number2symbol = ((PyObject *)__pyx_t_1);
- __pyx_t_1 = 0;
-
- /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":52
- * self._grammar_tokens = grammar.tokens
- * self._grammar_number2symbol = grammar.number2symbol
- * self._grammar_start = grammar.start # <<<<<<<<<<<<<<
- *
- * def setup(self, start=None):
- */
- __pyx_t_1 = PyObject_GetAttr(__pyx_v_grammar, __pyx_n_s__start); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 52; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_1);
- __pyx_t_2 = __Pyx_PyInt_AsInt(__pyx_t_1); if (unlikely((__pyx_t_2 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 52; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- ((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->_grammar_start = __pyx_t_2;
-
- __pyx_r = 0;
- goto __pyx_L0;
- __pyx_L1_error:;
- __Pyx_XDECREF(__pyx_t_1);
- __Pyx_AddTraceback("sphinx.pycode.pgen2.parse.Parser.__init__");
- __pyx_r = -1;
- __pyx_L0:;
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-/* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":54
- * self._grammar_start = grammar.start
- *
- * def setup(self, start=None): # <<<<<<<<<<<<<<
- * if start is None:
- * start = self._grammar_start
- */
-
-static PyObject *__pyx_pf_6sphinx_6pycode_5pgen2_5parse_6Parser_setup(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
-static PyObject *__pyx_pf_6sphinx_6pycode_5pgen2_5parse_6Parser_setup(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
- PyObject *__pyx_v_start = 0;
- PyObject *__pyx_v_newnode;
- PyObject *__pyx_v_stackentry;
- PyObject *__pyx_r = NULL;
- int __pyx_t_1;
- PyObject *__pyx_t_2 = NULL;
- PyObject *__pyx_t_3 = NULL;
- static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__start,0};
- __Pyx_RefNannySetupContext("setup");
- if (unlikely(__pyx_kwds)) {
- Py_ssize_t kw_args = PyDict_Size(__pyx_kwds);
- PyObject* values[1] = {0};
- values[0] = ((PyObject *)Py_None);
- switch (PyTuple_GET_SIZE(__pyx_args)) {
- case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
- case 0: break;
- default: goto __pyx_L5_argtuple_error;
- }
- switch (PyTuple_GET_SIZE(__pyx_args)) {
- case 0:
- if (kw_args > 1) {
- PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s__start);
- if (unlikely(value)) { values[0] = value; kw_args--; }
- }
- }
- if (unlikely(kw_args > 0)) {
- if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, PyTuple_GET_SIZE(__pyx_args), "setup") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 54; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
- }
- __pyx_v_start = values[0];
- } else {
- __pyx_v_start = ((PyObject *)Py_None);
- switch (PyTuple_GET_SIZE(__pyx_args)) {
- case 1: __pyx_v_start = PyTuple_GET_ITEM(__pyx_args, 0);
- case 0: break;
- default: goto __pyx_L5_argtuple_error;
- }
- }
- goto __pyx_L4_argument_unpacking_done;
- __pyx_L5_argtuple_error:;
- __Pyx_RaiseArgtupleInvalid("setup", 0, 0, 1, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 54; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
- __pyx_L3_error:;
- __Pyx_AddTraceback("sphinx.pycode.pgen2.parse.Parser.setup");
- return NULL;
- __pyx_L4_argument_unpacking_done:;
- __Pyx_INCREF((PyObject *)__pyx_v_self);
- __Pyx_INCREF(__pyx_v_start);
- __pyx_v_newnode = Py_None; __Pyx_INCREF(Py_None);
- __pyx_v_stackentry = Py_None; __Pyx_INCREF(Py_None);
-
- /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":55
- *
- * def setup(self, start=None):
- * if start is None: # <<<<<<<<<<<<<<
- * start = self._grammar_start
- * # Each stack entry is a tuple: (dfa, state, node).
- */
- __pyx_t_1 = (__pyx_v_start == Py_None);
- if (__pyx_t_1) {
-
- /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":56
- * def setup(self, start=None):
- * if start is None:
- * start = self._grammar_start # <<<<<<<<<<<<<<
- * # Each stack entry is a tuple: (dfa, state, node).
- * # A node is a tuple: (type, value, context, children),
- */
- __pyx_t_2 = PyInt_FromLong(((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->_grammar_start); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 56; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __Pyx_DECREF(__pyx_v_start);
- __pyx_v_start = __pyx_t_2;
- __pyx_t_2 = 0;
- goto __pyx_L6;
- }
- __pyx_L6:;
-
- /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":60
- * # A node is a tuple: (type, value, context, children),
- * # where children is a list of nodes or None, and context may be None.
- * newnode = (start, None, None, []) # <<<<<<<<<<<<<<
- * stackentry = (self._grammar_dfas[start], 0, newnode)
- * self.stack = [stackentry]
- */
- __pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 60; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(((PyObject *)__pyx_t_2));
- __pyx_t_3 = PyTuple_New(4); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 60; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_3);
- __Pyx_INCREF(__pyx_v_start);
- PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_start);
- __Pyx_GIVEREF(__pyx_v_start);
- __Pyx_INCREF(Py_None);
- PyTuple_SET_ITEM(__pyx_t_3, 1, Py_None);
- __Pyx_GIVEREF(Py_None);
- __Pyx_INCREF(Py_None);
- PyTuple_SET_ITEM(__pyx_t_3, 2, Py_None);
- __Pyx_GIVEREF(Py_None);
- PyTuple_SET_ITEM(__pyx_t_3, 3, ((PyObject *)__pyx_t_2));
- __Pyx_GIVEREF(((PyObject *)__pyx_t_2));
- __pyx_t_2 = 0;
- __Pyx_DECREF(__pyx_v_newnode);
- __pyx_v_newnode = __pyx_t_3;
- __pyx_t_3 = 0;
-
- /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":61
- * # where children is a list of nodes or None, and context may be None.
- * newnode = (start, None, None, [])
- * stackentry = (self._grammar_dfas[start], 0, newnode) # <<<<<<<<<<<<<<
- * self.stack = [stackentry]
- * self.rootnode = None
- */
- __pyx_t_3 = PyObject_GetItem(((PyObject *)((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->_grammar_dfas), __pyx_v_start); if (!__pyx_t_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 61; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_3);
- __pyx_t_2 = PyTuple_New(3); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 61; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_3);
- __Pyx_GIVEREF(__pyx_t_3);
- __Pyx_INCREF(__pyx_int_0);
- PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_int_0);
- __Pyx_GIVEREF(__pyx_int_0);
- __Pyx_INCREF(__pyx_v_newnode);
- PyTuple_SET_ITEM(__pyx_t_2, 2, __pyx_v_newnode);
- __Pyx_GIVEREF(__pyx_v_newnode);
- __pyx_t_3 = 0;
- __Pyx_DECREF(__pyx_v_stackentry);
- __pyx_v_stackentry = __pyx_t_2;
- __pyx_t_2 = 0;
-
- /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":62
- * newnode = (start, None, None, [])
- * stackentry = (self._grammar_dfas[start], 0, newnode)
- * self.stack = [stackentry] # <<<<<<<<<<<<<<
- * self.rootnode = None
- * self.used_names = set() # Aliased to self.rootnode.used_names in pop()
- */
- __pyx_t_2 = PyList_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 62; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(((PyObject *)__pyx_t_2));
- __Pyx_INCREF(__pyx_v_stackentry);
- PyList_SET_ITEM(__pyx_t_2, 0, __pyx_v_stackentry);
- __Pyx_GIVEREF(__pyx_v_stackentry);
- __Pyx_GIVEREF(((PyObject *)__pyx_t_2));
- __Pyx_GOTREF(((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->stack);
- __Pyx_DECREF(((PyObject *)((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->stack));
- ((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->stack = __pyx_t_2;
- __pyx_t_2 = 0;
-
- /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":63
- * stackentry = (self._grammar_dfas[start], 0, newnode)
- * self.stack = [stackentry]
- * self.rootnode = None # <<<<<<<<<<<<<<
- * self.used_names = set() # Aliased to self.rootnode.used_names in pop()
- *
- */
- __Pyx_INCREF(Py_None);
- __Pyx_GIVEREF(Py_None);
- __Pyx_GOTREF(((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->rootnode);
- __Pyx_DECREF(((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->rootnode);
- ((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->rootnode = Py_None;
-
- /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":64
- * self.stack = [stackentry]
- * self.rootnode = None
- * self.used_names = set() # Aliased to self.rootnode.used_names in pop() # <<<<<<<<<<<<<<
- *
- * def addtoken(self, int type, value, context):
- */
- __pyx_t_2 = PySet_New(0); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 64; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(((PyObject *)__pyx_t_2));
- if (!(likely(PyAnySet_CheckExact(((PyObject *)__pyx_t_2)))||((((PyObject *)__pyx_t_2)) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected set, got %.200s", Py_TYPE(((PyObject *)__pyx_t_2))->tp_name), 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 64; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GIVEREF(__pyx_t_2);
- __Pyx_GOTREF(((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->used_names);
- __Pyx_DECREF(((PyObject *)((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->used_names));
- ((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->used_names = ((PyObject *)__pyx_t_2);
- __pyx_t_2 = 0;
-
- __pyx_r = Py_None; __Pyx_INCREF(Py_None);
- goto __pyx_L0;
- __pyx_L1_error:;
- __Pyx_XDECREF(__pyx_t_2);
- __Pyx_XDECREF(__pyx_t_3);
- __Pyx_AddTraceback("sphinx.pycode.pgen2.parse.Parser.setup");
- __pyx_r = NULL;
- __pyx_L0:;
- __Pyx_DECREF(__pyx_v_newnode);
- __Pyx_DECREF(__pyx_v_stackentry);
- __Pyx_DECREF((PyObject *)__pyx_v_self);
- __Pyx_DECREF(__pyx_v_start);
- __Pyx_XGIVEREF(__pyx_r);
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-/* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":66
- * self.used_names = set() # Aliased to self.rootnode.used_names in pop()
- *
- * def addtoken(self, int type, value, context): # <<<<<<<<<<<<<<
- * """Add a token; return True iff this is the end of the program."""
- * cdef int ilabel, i, t, state, newstate
- */
-
-static PyObject *__pyx_pf_6sphinx_6pycode_5pgen2_5parse_6Parser_addtoken(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
-static char __pyx_doc_6sphinx_6pycode_5pgen2_5parse_6Parser_addtoken[] = "Add a token; return True iff this is the end of the program.";
-static PyObject *__pyx_pf_6sphinx_6pycode_5pgen2_5parse_6Parser_addtoken(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
- int __pyx_v_type;
- PyObject *__pyx_v_value = 0;
- PyObject *__pyx_v_context = 0;
- int __pyx_v_ilabel;
- int __pyx_v_i;
- int __pyx_v_t;
- int __pyx_v_state;
- int __pyx_v_newstate;
- PyObject *__pyx_v_dfa;
- PyObject *__pyx_v_node;
- PyObject *__pyx_v_states;
- PyObject *__pyx_v_first;
- PyObject *__pyx_v_arcs;
- PyObject *__pyx_v_v;
- PyObject *__pyx_v_itsdfa;
- PyObject *__pyx_v_itsstates;
- PyObject *__pyx_v_itsfirst;
- PyObject *__pyx_r = NULL;
- int __pyx_t_1;
- PyObject *__pyx_t_2 = NULL;
- PyObject *__pyx_t_3 = NULL;
- PyObject *__pyx_t_4 = NULL;
- PyObject *__pyx_t_5 = NULL;
- int __pyx_t_6;
- PyObject *__pyx_t_7 = NULL;
- Py_ssize_t __pyx_t_8;
- int __pyx_t_9;
- int __pyx_t_10;
- PyObject *__pyx_t_11 = NULL;
- static PyObject **__pyx_pyargnames[] = {&__pyx_n_s__type,&__pyx_n_s__value,&__pyx_n_s__context,0};
- __Pyx_RefNannySetupContext("addtoken");
- if (unlikely(__pyx_kwds)) {
- Py_ssize_t kw_args = PyDict_Size(__pyx_kwds);
- PyObject* values[3] = {0,0,0};
- switch (PyTuple_GET_SIZE(__pyx_args)) {
- case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
- case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
- case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
- case 0: break;
- default: goto __pyx_L5_argtuple_error;
- }
- switch (PyTuple_GET_SIZE(__pyx_args)) {
- case 0:
- values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__type);
- if (likely(values[0])) kw_args--;
- else goto __pyx_L5_argtuple_error;
- case 1:
- values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__value);
- if (likely(values[1])) kw_args--;
- else {
- __Pyx_RaiseArgtupleInvalid("addtoken", 1, 3, 3, 1); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 66; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
- }
- case 2:
- values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s__context);
- if (likely(values[2])) kw_args--;
- else {
- __Pyx_RaiseArgtupleInvalid("addtoken", 1, 3, 3, 2); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 66; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
- }
- }
- if (unlikely(kw_args > 0)) {
- if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, PyTuple_GET_SIZE(__pyx_args), "addtoken") < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 66; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
- }
- __pyx_v_type = __Pyx_PyInt_AsInt(values[0]); if (unlikely((__pyx_v_type == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 66; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
- __pyx_v_value = values[1];
- __pyx_v_context = values[2];
- } else if (PyTuple_GET_SIZE(__pyx_args) != 3) {
- goto __pyx_L5_argtuple_error;
- } else {
- __pyx_v_type = __Pyx_PyInt_AsInt(PyTuple_GET_ITEM(__pyx_args, 0)); if (unlikely((__pyx_v_type == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 66; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
- __pyx_v_value = PyTuple_GET_ITEM(__pyx_args, 1);
- __pyx_v_context = PyTuple_GET_ITEM(__pyx_args, 2);
- }
- goto __pyx_L4_argument_unpacking_done;
- __pyx_L5_argtuple_error:;
- __Pyx_RaiseArgtupleInvalid("addtoken", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 66; __pyx_clineno = __LINE__; goto __pyx_L3_error;}
- __pyx_L3_error:;
- __Pyx_AddTraceback("sphinx.pycode.pgen2.parse.Parser.addtoken");
- return NULL;
- __pyx_L4_argument_unpacking_done:;
- __Pyx_INCREF((PyObject *)__pyx_v_self);
- __Pyx_INCREF(__pyx_v_value);
- __Pyx_INCREF(__pyx_v_context);
- __pyx_v_dfa = Py_None; __Pyx_INCREF(Py_None);
- __pyx_v_node = Py_None; __Pyx_INCREF(Py_None);
- __pyx_v_states = Py_None; __Pyx_INCREF(Py_None);
- __pyx_v_first = Py_None; __Pyx_INCREF(Py_None);
- __pyx_v_arcs = Py_None; __Pyx_INCREF(Py_None);
- __pyx_v_v = Py_None; __Pyx_INCREF(Py_None);
- __pyx_v_itsdfa = Py_None; __Pyx_INCREF(Py_None);
- __pyx_v_itsstates = Py_None; __Pyx_INCREF(Py_None);
- __pyx_v_itsfirst = Py_None; __Pyx_INCREF(Py_None);
-
- /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":70
- * cdef int ilabel, i, t, state, newstate
- * # Map from token to label
- * ilabel = self.classify(type, value, context) # <<<<<<<<<<<<<<
- * # Loop until the token is shifted; may raise exceptions
- * while True:
- */
- __pyx_v_ilabel = ((struct __pyx_vtabstruct_6sphinx_6pycode_5pgen2_5parse_Parser *)((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->__pyx_vtab)->classify(((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self), __pyx_v_type, __pyx_v_value, __pyx_v_context);
-
- /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":72
- * ilabel = self.classify(type, value, context)
- * # Loop until the token is shifted; may raise exceptions
- * while True: # <<<<<<<<<<<<<<
- * dfa, state, node = self.stack[-1]
- * states, first = dfa
- */
- while (1) {
- __pyx_t_1 = 1;
- if (!__pyx_t_1) break;
-
- /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":73
- * # Loop until the token is shifted; may raise exceptions
- * while True:
- * dfa, state, node = self.stack[-1] # <<<<<<<<<<<<<<
- * states, first = dfa
- * arcs = states[state]
- */
- __pyx_t_2 = __Pyx_GetItemInt_List(((PyObject *)((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->stack), -1, sizeof(long), PyInt_FromLong); if (!__pyx_t_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 73; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- if (PyTuple_CheckExact(__pyx_t_2) && likely(PyTuple_GET_SIZE(__pyx_t_2) == 3)) {
- PyObject* tuple = __pyx_t_2;
- __pyx_t_3 = PyTuple_GET_ITEM(tuple, 0); __Pyx_INCREF(__pyx_t_3);
- __pyx_t_4 = PyTuple_GET_ITEM(tuple, 1); __Pyx_INCREF(__pyx_t_4);
- __pyx_t_6 = __Pyx_PyInt_AsInt(__pyx_t_4); if (unlikely((__pyx_t_6 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 73; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
- __pyx_t_5 = PyTuple_GET_ITEM(tuple, 2); __Pyx_INCREF(__pyx_t_5);
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __Pyx_DECREF(__pyx_v_dfa);
- __pyx_v_dfa = __pyx_t_3;
- __pyx_t_3 = 0;
- __pyx_v_state = __pyx_t_6;
- __Pyx_DECREF(__pyx_v_node);
- __pyx_v_node = __pyx_t_5;
- __pyx_t_5 = 0;
- } else {
- __pyx_t_7 = PyObject_GetIter(__pyx_t_2); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 73; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_7);
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_3 = __Pyx_UnpackItem(__pyx_t_7, 0); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 73; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_3);
- __pyx_t_4 = __Pyx_UnpackItem(__pyx_t_7, 1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 73; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_4);
- __pyx_t_6 = __Pyx_PyInt_AsInt(__pyx_t_4); if (unlikely((__pyx_t_6 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 73; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
- __pyx_t_5 = __Pyx_UnpackItem(__pyx_t_7, 2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 73; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_5);
- if (__Pyx_EndUnpack(__pyx_t_7) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 73; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
- __Pyx_DECREF(__pyx_v_dfa);
- __pyx_v_dfa = __pyx_t_3;
- __pyx_t_3 = 0;
- __pyx_v_state = __pyx_t_6;
- __Pyx_DECREF(__pyx_v_node);
- __pyx_v_node = __pyx_t_5;
- __pyx_t_5 = 0;
- }
-
- /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":74
- * while True:
- * dfa, state, node = self.stack[-1]
- * states, first = dfa # <<<<<<<<<<<<<<
- * arcs = states[state]
- * # Look for a state with this label
- */
- if (PyTuple_CheckExact(__pyx_v_dfa) && likely(PyTuple_GET_SIZE(__pyx_v_dfa) == 2)) {
- PyObject* tuple = __pyx_v_dfa;
- __pyx_t_2 = PyTuple_GET_ITEM(tuple, 0); __Pyx_INCREF(__pyx_t_2);
- __pyx_t_5 = PyTuple_GET_ITEM(tuple, 1); __Pyx_INCREF(__pyx_t_5);
- __Pyx_DECREF(__pyx_v_states);
- __pyx_v_states = __pyx_t_2;
- __pyx_t_2 = 0;
- __Pyx_DECREF(__pyx_v_first);
- __pyx_v_first = __pyx_t_5;
- __pyx_t_5 = 0;
- } else {
- __pyx_t_4 = PyObject_GetIter(__pyx_v_dfa); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 74; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_4);
- __pyx_t_2 = __Pyx_UnpackItem(__pyx_t_4, 0); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 74; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __pyx_t_5 = __Pyx_UnpackItem(__pyx_t_4, 1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 74; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_5);
- if (__Pyx_EndUnpack(__pyx_t_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 74; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
- __Pyx_DECREF(__pyx_v_states);
- __pyx_v_states = __pyx_t_2;
- __pyx_t_2 = 0;
- __Pyx_DECREF(__pyx_v_first);
- __pyx_v_first = __pyx_t_5;
- __pyx_t_5 = 0;
- }
-
- /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":75
- * dfa, state, node = self.stack[-1]
- * states, first = dfa
- * arcs = states[state] # <<<<<<<<<<<<<<
- * # Look for a state with this label
- * for i, newstate in arcs:
- */
- __pyx_t_5 = __Pyx_GetItemInt(__pyx_v_states, __pyx_v_state, sizeof(int), PyInt_FromLong); if (!__pyx_t_5) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 75; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_5);
- __Pyx_DECREF(__pyx_v_arcs);
- __pyx_v_arcs = __pyx_t_5;
- __pyx_t_5 = 0;
-
- /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":77
- * arcs = states[state]
- * # Look for a state with this label
- * for i, newstate in arcs: # <<<<<<<<<<<<<<
- * t, v = self._grammar_labels[i]
- * if ilabel == i:
- */
- if (PyList_CheckExact(__pyx_v_arcs) || PyTuple_CheckExact(__pyx_v_arcs)) {
- __pyx_t_8 = 0; __pyx_t_5 = __pyx_v_arcs; __Pyx_INCREF(__pyx_t_5);
- } else {
- __pyx_t_8 = -1; __pyx_t_5 = PyObject_GetIter(__pyx_v_arcs); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 77; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_5);
- }
- for (;;) {
- if (likely(PyList_CheckExact(__pyx_t_5))) {
- if (__pyx_t_8 >= PyList_GET_SIZE(__pyx_t_5)) break;
- __pyx_t_2 = PyList_GET_ITEM(__pyx_t_5, __pyx_t_8); __Pyx_INCREF(__pyx_t_2); __pyx_t_8++;
- } else if (likely(PyTuple_CheckExact(__pyx_t_5))) {
- if (__pyx_t_8 >= PyTuple_GET_SIZE(__pyx_t_5)) break;
- __pyx_t_2 = PyTuple_GET_ITEM(__pyx_t_5, __pyx_t_8); __Pyx_INCREF(__pyx_t_2); __pyx_t_8++;
- } else {
- __pyx_t_2 = PyIter_Next(__pyx_t_5);
- if (!__pyx_t_2) {
- if (unlikely(PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 77; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- break;
- }
- __Pyx_GOTREF(__pyx_t_2);
- }
- if (PyTuple_CheckExact(__pyx_t_2) && likely(PyTuple_GET_SIZE(__pyx_t_2) == 2)) {
- PyObject* tuple = __pyx_t_2;
- __pyx_t_4 = PyTuple_GET_ITEM(tuple, 0); __Pyx_INCREF(__pyx_t_4);
- __pyx_t_6 = __Pyx_PyInt_AsInt(__pyx_t_4); if (unlikely((__pyx_t_6 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 77; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
- __pyx_t_3 = PyTuple_GET_ITEM(tuple, 1); __Pyx_INCREF(__pyx_t_3);
- __pyx_t_9 = __Pyx_PyInt_AsInt(__pyx_t_3); if (unlikely((__pyx_t_9 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 77; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_v_i = __pyx_t_6;
- __pyx_v_newstate = __pyx_t_9;
- } else {
- __pyx_t_7 = PyObject_GetIter(__pyx_t_2); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 77; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_7);
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_4 = __Pyx_UnpackItem(__pyx_t_7, 0); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 77; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_4);
- __pyx_t_9 = __Pyx_PyInt_AsInt(__pyx_t_4); if (unlikely((__pyx_t_9 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 77; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
- __pyx_t_3 = __Pyx_UnpackItem(__pyx_t_7, 1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 77; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_3);
- __pyx_t_6 = __Pyx_PyInt_AsInt(__pyx_t_3); if (unlikely((__pyx_t_6 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 77; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
- if (__Pyx_EndUnpack(__pyx_t_7) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 77; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
- __pyx_v_i = __pyx_t_9;
- __pyx_v_newstate = __pyx_t_6;
- }
-
- /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":78
- * # Look for a state with this label
- * for i, newstate in arcs:
- * t, v = self._grammar_labels[i] # <<<<<<<<<<<<<<
- * if ilabel == i:
- * # Look it up in the list of labels
- */
- __pyx_t_2 = __Pyx_GetItemInt_List(((PyObject *)((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->_grammar_labels), __pyx_v_i, sizeof(int), PyInt_FromLong); if (!__pyx_t_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 78; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- if (PyTuple_CheckExact(__pyx_t_2) && likely(PyTuple_GET_SIZE(__pyx_t_2) == 2)) {
- PyObject* tuple = __pyx_t_2;
- __pyx_t_3 = PyTuple_GET_ITEM(tuple, 0); __Pyx_INCREF(__pyx_t_3);
- __pyx_t_6 = __Pyx_PyInt_AsInt(__pyx_t_3); if (unlikely((__pyx_t_6 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 78; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
- __pyx_t_4 = PyTuple_GET_ITEM(tuple, 1); __Pyx_INCREF(__pyx_t_4);
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_v_t = __pyx_t_6;
- __Pyx_DECREF(__pyx_v_v);
- __pyx_v_v = __pyx_t_4;
- __pyx_t_4 = 0;
- } else {
- __pyx_t_7 = PyObject_GetIter(__pyx_t_2); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 78; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_7);
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_3 = __Pyx_UnpackItem(__pyx_t_7, 0); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 78; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_3);
- __pyx_t_6 = __Pyx_PyInt_AsInt(__pyx_t_3); if (unlikely((__pyx_t_6 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 78; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
- __pyx_t_4 = __Pyx_UnpackItem(__pyx_t_7, 1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 78; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_4);
- if (__Pyx_EndUnpack(__pyx_t_7) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 78; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
- __pyx_v_t = __pyx_t_6;
- __Pyx_DECREF(__pyx_v_v);
- __pyx_v_v = __pyx_t_4;
- __pyx_t_4 = 0;
- }
-
- /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":79
- * for i, newstate in arcs:
- * t, v = self._grammar_labels[i]
- * if ilabel == i: # <<<<<<<<<<<<<<
- * # Look it up in the list of labels
- * ## assert t < 256
- */
- __pyx_t_1 = (__pyx_v_ilabel == __pyx_v_i);
- if (__pyx_t_1) {
-
- /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":83
- * ## assert t < 256
- * # Shift a token; we're done with it
- * self.shift(type, value, newstate, context) # <<<<<<<<<<<<<<
- * # Pop while we are in an accept-only state
- * state = newstate
- */
- __pyx_t_2 = PyInt_FromLong(__pyx_v_type); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 83; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __pyx_t_4 = PyInt_FromLong(__pyx_v_newstate); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 83; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_4);
- ((struct __pyx_vtabstruct_6sphinx_6pycode_5pgen2_5parse_Parser *)((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->__pyx_vtab)->shift(((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self), __pyx_t_2, __pyx_v_value, __pyx_t_4, __pyx_v_context);
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-
- /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":85
- * self.shift(type, value, newstate, context)
- * # Pop while we are in an accept-only state
- * state = newstate # <<<<<<<<<<<<<<
- * while states[state] == [(0, state)]:
- * self.pop()
- */
- __pyx_v_state = __pyx_v_newstate;
-
- /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":86
- * # Pop while we are in an accept-only state
- * state = newstate
- * while states[state] == [(0, state)]: # <<<<<<<<<<<<<<
- * self.pop()
- * if not self.stack:
- */
- while (1) {
- __pyx_t_4 = __Pyx_GetItemInt(__pyx_v_states, __pyx_v_state, sizeof(int), PyInt_FromLong); if (!__pyx_t_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 86; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_4);
- __pyx_t_2 = PyInt_FromLong(__pyx_v_state); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 86; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 86; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_3);
- __Pyx_INCREF(__pyx_int_0);
- PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_int_0);
- __Pyx_GIVEREF(__pyx_int_0);
- PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_2);
- __Pyx_GIVEREF(__pyx_t_2);
- __pyx_t_2 = 0;
- __pyx_t_2 = PyList_New(1); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 86; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(((PyObject *)__pyx_t_2));
- PyList_SET_ITEM(__pyx_t_2, 0, __pyx_t_3);
- __Pyx_GIVEREF(__pyx_t_3);
- __pyx_t_3 = 0;
- __pyx_t_3 = PyObject_RichCompare(__pyx_t_4, ((PyObject *)__pyx_t_2), Py_EQ); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 86; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_3);
- __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
- __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0;
- __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 86; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
- if (!__pyx_t_1) break;
-
- /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":87
- * state = newstate
- * while states[state] == [(0, state)]:
- * self.pop() # <<<<<<<<<<<<<<
- * if not self.stack:
- * # Done parsing!
- */
- ((struct __pyx_vtabstruct_6sphinx_6pycode_5pgen2_5parse_Parser *)((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->__pyx_vtab)->pop(((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self));
-
- /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":88
- * while states[state] == [(0, state)]:
- * self.pop()
- * if not self.stack: # <<<<<<<<<<<<<<
- * # Done parsing!
- * return True
- */
- __pyx_t_1 = __Pyx_PyObject_IsTrue(((PyObject *)((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->stack)); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 88; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __pyx_t_10 = (!__pyx_t_1);
- if (__pyx_t_10) {
-
- /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":90
- * if not self.stack:
- * # Done parsing!
- * return True # <<<<<<<<<<<<<<
- * dfa, state, node = self.stack[-1]
- * states, first = dfa
- */
- __Pyx_XDECREF(__pyx_r);
- __pyx_t_3 = __Pyx_PyBool_FromLong(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 90; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_3);
- __pyx_r = __pyx_t_3;
- __pyx_t_3 = 0;
- __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
- goto __pyx_L0;
- goto __pyx_L13;
- }
- __pyx_L13:;
-
- /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":91
- * # Done parsing!
- * return True
- * dfa, state, node = self.stack[-1] # <<<<<<<<<<<<<<
- * states, first = dfa
- * # Done with this token
- */
- __pyx_t_3 = __Pyx_GetItemInt_List(((PyObject *)((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->stack), -1, sizeof(long), PyInt_FromLong); if (!__pyx_t_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 91; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_3);
- if (PyTuple_CheckExact(__pyx_t_3) && likely(PyTuple_GET_SIZE(__pyx_t_3) == 3)) {
- PyObject* tuple = __pyx_t_3;
- __pyx_t_2 = PyTuple_GET_ITEM(tuple, 0); __Pyx_INCREF(__pyx_t_2);
- __pyx_t_4 = PyTuple_GET_ITEM(tuple, 1); __Pyx_INCREF(__pyx_t_4);
- __pyx_t_6 = __Pyx_PyInt_AsInt(__pyx_t_4); if (unlikely((__pyx_t_6 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 91; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
- __pyx_t_7 = PyTuple_GET_ITEM(tuple, 2); __Pyx_INCREF(__pyx_t_7);
- __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
- __Pyx_DECREF(__pyx_v_dfa);
- __pyx_v_dfa = __pyx_t_2;
- __pyx_t_2 = 0;
- __pyx_v_state = __pyx_t_6;
- __Pyx_DECREF(__pyx_v_node);
- __pyx_v_node = __pyx_t_7;
- __pyx_t_7 = 0;
- } else {
- __pyx_t_11 = PyObject_GetIter(__pyx_t_3); if (unlikely(!__pyx_t_11)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 91; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_11);
- __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
- __pyx_t_2 = __Pyx_UnpackItem(__pyx_t_11, 0); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 91; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __pyx_t_4 = __Pyx_UnpackItem(__pyx_t_11, 1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 91; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_4);
- __pyx_t_6 = __Pyx_PyInt_AsInt(__pyx_t_4); if (unlikely((__pyx_t_6 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 91; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
- __pyx_t_7 = __Pyx_UnpackItem(__pyx_t_11, 2); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 91; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_7);
- if (__Pyx_EndUnpack(__pyx_t_11) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 91; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
- __Pyx_DECREF(__pyx_v_dfa);
- __pyx_v_dfa = __pyx_t_2;
- __pyx_t_2 = 0;
- __pyx_v_state = __pyx_t_6;
- __Pyx_DECREF(__pyx_v_node);
- __pyx_v_node = __pyx_t_7;
- __pyx_t_7 = 0;
- }
-
- /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":92
- * return True
- * dfa, state, node = self.stack[-1]
- * states, first = dfa # <<<<<<<<<<<<<<
- * # Done with this token
- * return False
- */
- if (PyTuple_CheckExact(__pyx_v_dfa) && likely(PyTuple_GET_SIZE(__pyx_v_dfa) == 2)) {
- PyObject* tuple = __pyx_v_dfa;
- __pyx_t_3 = PyTuple_GET_ITEM(tuple, 0); __Pyx_INCREF(__pyx_t_3);
- __pyx_t_7 = PyTuple_GET_ITEM(tuple, 1); __Pyx_INCREF(__pyx_t_7);
- __Pyx_DECREF(__pyx_v_states);
- __pyx_v_states = __pyx_t_3;
- __pyx_t_3 = 0;
- __Pyx_DECREF(__pyx_v_first);
- __pyx_v_first = __pyx_t_7;
- __pyx_t_7 = 0;
- } else {
- __pyx_t_4 = PyObject_GetIter(__pyx_v_dfa); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 92; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_4);
- __pyx_t_3 = __Pyx_UnpackItem(__pyx_t_4, 0); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 92; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_3);
- __pyx_t_7 = __Pyx_UnpackItem(__pyx_t_4, 1); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 92; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_7);
- if (__Pyx_EndUnpack(__pyx_t_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 92; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
- __Pyx_DECREF(__pyx_v_states);
- __pyx_v_states = __pyx_t_3;
- __pyx_t_3 = 0;
- __Pyx_DECREF(__pyx_v_first);
- __pyx_v_first = __pyx_t_7;
- __pyx_t_7 = 0;
- }
- }
-
- /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":94
- * states, first = dfa
- * # Done with this token
- * return False # <<<<<<<<<<<<<<
- * elif t >= 256:
- * # See if it's a symbol and if we're in its first set
- */
- __Pyx_XDECREF(__pyx_r);
- __pyx_t_7 = __Pyx_PyBool_FromLong(0); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 94; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_7);
- __pyx_r = __pyx_t_7;
- __pyx_t_7 = 0;
- __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
- goto __pyx_L0;
- goto __pyx_L10;
- }
-
- /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":95
- * # Done with this token
- * return False
- * elif t >= 256: # <<<<<<<<<<<<<<
- * # See if it's a symbol and if we're in its first set
- * itsdfa = self._grammar_dfas[t]
- */
- __pyx_t_10 = (__pyx_v_t >= 256);
- if (__pyx_t_10) {
-
- /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":97
- * elif t >= 256:
- * # See if it's a symbol and if we're in its first set
- * itsdfa = self._grammar_dfas[t] # <<<<<<<<<<<<<<
- * itsstates, itsfirst = itsdfa
- * if ilabel in itsfirst:
- */
- __pyx_t_7 = __Pyx_GetItemInt(((PyObject *)((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->_grammar_dfas), __pyx_v_t, sizeof(int), PyInt_FromLong); if (!__pyx_t_7) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 97; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_7);
- __Pyx_DECREF(__pyx_v_itsdfa);
- __pyx_v_itsdfa = __pyx_t_7;
- __pyx_t_7 = 0;
-
- /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":98
- * # See if it's a symbol and if we're in its first set
- * itsdfa = self._grammar_dfas[t]
- * itsstates, itsfirst = itsdfa # <<<<<<<<<<<<<<
- * if ilabel in itsfirst:
- * # Push a symbol
- */
- if (PyTuple_CheckExact(__pyx_v_itsdfa) && likely(PyTuple_GET_SIZE(__pyx_v_itsdfa) == 2)) {
- PyObject* tuple = __pyx_v_itsdfa;
- __pyx_t_7 = PyTuple_GET_ITEM(tuple, 0); __Pyx_INCREF(__pyx_t_7);
- __pyx_t_3 = PyTuple_GET_ITEM(tuple, 1); __Pyx_INCREF(__pyx_t_3);
- __Pyx_DECREF(__pyx_v_itsstates);
- __pyx_v_itsstates = __pyx_t_7;
- __pyx_t_7 = 0;
- __Pyx_DECREF(__pyx_v_itsfirst);
- __pyx_v_itsfirst = __pyx_t_3;
- __pyx_t_3 = 0;
- } else {
- __pyx_t_4 = PyObject_GetIter(__pyx_v_itsdfa); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 98; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_4);
- __pyx_t_7 = __Pyx_UnpackItem(__pyx_t_4, 0); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 98; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_7);
- __pyx_t_3 = __Pyx_UnpackItem(__pyx_t_4, 1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 98; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_3);
- if (__Pyx_EndUnpack(__pyx_t_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 98; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
- __Pyx_DECREF(__pyx_v_itsstates);
- __pyx_v_itsstates = __pyx_t_7;
- __pyx_t_7 = 0;
- __Pyx_DECREF(__pyx_v_itsfirst);
- __pyx_v_itsfirst = __pyx_t_3;
- __pyx_t_3 = 0;
- }
-
- /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":99
- * itsdfa = self._grammar_dfas[t]
- * itsstates, itsfirst = itsdfa
- * if ilabel in itsfirst: # <<<<<<<<<<<<<<
- * # Push a symbol
- * self.push(t, itsdfa, newstate, context)
- */
- __pyx_t_3 = PyInt_FromLong(__pyx_v_ilabel); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 99; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_3);
- __pyx_t_10 = ((PySequence_Contains(__pyx_v_itsfirst, __pyx_t_3))); if (unlikely(__pyx_t_10 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 99; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
- if (__pyx_t_10) {
-
- /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":101
- * if ilabel in itsfirst:
- * # Push a symbol
- * self.push(t, itsdfa, newstate, context) # <<<<<<<<<<<<<<
- * break # To continue the outer while loop
- * else:
- */
- __pyx_t_3 = PyInt_FromLong(__pyx_v_t); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 101; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_3);
- __pyx_t_7 = PyInt_FromLong(__pyx_v_newstate); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 101; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_7);
- ((struct __pyx_vtabstruct_6sphinx_6pycode_5pgen2_5parse_Parser *)((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->__pyx_vtab)->push(((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self), __pyx_t_3, __pyx_v_itsdfa, __pyx_t_7, __pyx_v_context);
- __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
- __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
-
- /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":102
- * # Push a symbol
- * self.push(t, itsdfa, newstate, context)
- * break # To continue the outer while loop # <<<<<<<<<<<<<<
- * else:
- * if (0, state) in arcs:
- */
- goto __pyx_L9_break;
- goto __pyx_L14;
- }
- __pyx_L14:;
- goto __pyx_L10;
- }
- __pyx_L10:;
- }
- /*else*/ {
-
- /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":104
- * break # To continue the outer while loop
- * else:
- * if (0, state) in arcs: # <<<<<<<<<<<<<<
- * # An accepting state, pop it and try something else
- * self.pop()
- */
- __pyx_t_7 = PyInt_FromLong(__pyx_v_state); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 104; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_7);
- __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 104; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_3);
- __Pyx_INCREF(__pyx_int_0);
- PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_int_0);
- __Pyx_GIVEREF(__pyx_int_0);
- PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_7);
- __Pyx_GIVEREF(__pyx_t_7);
- __pyx_t_7 = 0;
- __pyx_t_10 = ((PySequence_Contains(__pyx_v_arcs, __pyx_t_3))); if (unlikely(__pyx_t_10 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 104; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
- if (__pyx_t_10) {
-
- /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":106
- * if (0, state) in arcs:
- * # An accepting state, pop it and try something else
- * self.pop() # <<<<<<<<<<<<<<
- * if not self.stack:
- * # Done parsing, but another token is input
- */
- ((struct __pyx_vtabstruct_6sphinx_6pycode_5pgen2_5parse_Parser *)((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->__pyx_vtab)->pop(((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self));
-
- /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":107
- * # An accepting state, pop it and try something else
- * self.pop()
- * if not self.stack: # <<<<<<<<<<<<<<
- * # Done parsing, but another token is input
- * raise ParseError("too much input",
- */
- __pyx_t_10 = __Pyx_PyObject_IsTrue(((PyObject *)((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self)->stack)); if (unlikely(__pyx_t_10 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 107; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __pyx_t_1 = (!__pyx_t_10);
- if (__pyx_t_1) {
-
- /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":109
- * if not self.stack:
- * # Done parsing, but another token is input
- * raise ParseError("too much input", # <<<<<<<<<<<<<<
- * type, value, context)
- * else:
- */
- __pyx_t_3 = __Pyx_GetName(__pyx_m, __pyx_n_s__ParseError); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 109; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_3);
-
- /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":110
- * # Done parsing, but another token is input
- * raise ParseError("too much input",
- * type, value, context) # <<<<<<<<<<<<<<
- * else:
- * # No success finding a transition
- */
- __pyx_t_7 = PyInt_FromLong(__pyx_v_type); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 110; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_7);
- __pyx_t_4 = PyTuple_New(4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 109; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_4);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_3));
- PyTuple_SET_ITEM(__pyx_t_4, 0, ((PyObject *)__pyx_kp_s_3));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_3));
- PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_7);
- __Pyx_GIVEREF(__pyx_t_7);
- __Pyx_INCREF(__pyx_v_value);
- PyTuple_SET_ITEM(__pyx_t_4, 2, __pyx_v_value);
- __Pyx_GIVEREF(__pyx_v_value);
- __Pyx_INCREF(__pyx_v_context);
- PyTuple_SET_ITEM(__pyx_t_4, 3, __pyx_v_context);
- __Pyx_GIVEREF(__pyx_v_context);
- __pyx_t_7 = 0;
- __pyx_t_7 = PyObject_Call(__pyx_t_3, __pyx_t_4, NULL); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 109; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_7);
- __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
- __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
- __Pyx_Raise(__pyx_t_7, 0, 0);
- __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 109; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- goto __pyx_L16;
- }
- __pyx_L16:;
- goto __pyx_L15;
- }
- /*else*/ {
-
- /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":113
- * else:
- * # No success finding a transition
- * raise ParseError("bad input", type, value, context) # <<<<<<<<<<<<<<
- *
- * cdef int classify(self, int type, value, context):
- */
- __pyx_t_7 = __Pyx_GetName(__pyx_m, __pyx_n_s__ParseError); if (unlikely(!__pyx_t_7)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 113; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_7);
- __pyx_t_4 = PyInt_FromLong(__pyx_v_type); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 113; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_4);
- __pyx_t_3 = PyTuple_New(4); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 113; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_3);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_4));
- PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_kp_s_4));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_4));
- PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_4);
- __Pyx_GIVEREF(__pyx_t_4);
- __Pyx_INCREF(__pyx_v_value);
- PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_v_value);
- __Pyx_GIVEREF(__pyx_v_value);
- __Pyx_INCREF(__pyx_v_context);
- PyTuple_SET_ITEM(__pyx_t_3, 3, __pyx_v_context);
- __Pyx_GIVEREF(__pyx_v_context);
- __pyx_t_4 = 0;
- __pyx_t_4 = PyObject_Call(__pyx_t_7, __pyx_t_3, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 113; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_4);
- __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
- __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
- __Pyx_Raise(__pyx_t_4, 0, 0);
- __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 113; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- }
- __pyx_L15:;
- }
- __pyx_L9_break:;
- __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
- }
-
- __pyx_r = Py_None; __Pyx_INCREF(Py_None);
- goto __pyx_L0;
- __pyx_L1_error:;
- __Pyx_XDECREF(__pyx_t_2);
- __Pyx_XDECREF(__pyx_t_3);
- __Pyx_XDECREF(__pyx_t_4);
- __Pyx_XDECREF(__pyx_t_5);
- __Pyx_XDECREF(__pyx_t_7);
- __Pyx_XDECREF(__pyx_t_11);
- __Pyx_AddTraceback("sphinx.pycode.pgen2.parse.Parser.addtoken");
- __pyx_r = NULL;
- __pyx_L0:;
- __Pyx_DECREF(__pyx_v_dfa);
- __Pyx_DECREF(__pyx_v_node);
- __Pyx_DECREF(__pyx_v_states);
- __Pyx_DECREF(__pyx_v_first);
- __Pyx_DECREF(__pyx_v_arcs);
- __Pyx_DECREF(__pyx_v_v);
- __Pyx_DECREF(__pyx_v_itsdfa);
- __Pyx_DECREF(__pyx_v_itsstates);
- __Pyx_DECREF(__pyx_v_itsfirst);
- __Pyx_DECREF((PyObject *)__pyx_v_self);
- __Pyx_DECREF(__pyx_v_value);
- __Pyx_DECREF(__pyx_v_context);
- __Pyx_XGIVEREF(__pyx_r);
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-/* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":115
- * raise ParseError("bad input", type, value, context)
- *
- * cdef int classify(self, int type, value, context): # <<<<<<<<<<<<<<
- * """Turn a token into a label. (Internal)"""
- * if type == NAME:
- */
-
-static int __pyx_f_6sphinx_6pycode_5pgen2_5parse_6Parser_classify(struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *__pyx_v_self, int __pyx_v_type, PyObject *__pyx_v_value, PyObject *__pyx_v_context) {
- int __pyx_r;
- int __pyx_t_1;
- int __pyx_t_2;
- PyObject *__pyx_t_3 = NULL;
- PyObject *__pyx_t_4 = NULL;
- PyObject *__pyx_t_5 = NULL;
- __Pyx_RefNannySetupContext("classify");
- __Pyx_INCREF((PyObject *)__pyx_v_self);
- __Pyx_INCREF(__pyx_v_value);
- __Pyx_INCREF(__pyx_v_context);
-
- /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":117
- * cdef int classify(self, int type, value, context):
- * """Turn a token into a label. (Internal)"""
- * if type == NAME: # <<<<<<<<<<<<<<
- * # Keep a listing of all used names
- * self.used_names.add(value)
- */
- __pyx_t_1 = (__pyx_v_type == 1);
- if (__pyx_t_1) {
-
- /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":119
- * if type == NAME:
- * # Keep a listing of all used names
- * self.used_names.add(value) # <<<<<<<<<<<<<<
- * # Check for reserved words
- * if value in self._grammar_keywords:
- */
- __pyx_t_2 = PySet_Add(((PyObject *)__pyx_v_self->used_names), __pyx_v_value); if (unlikely(__pyx_t_2 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 119; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
-
- /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":121
- * self.used_names.add(value)
- * # Check for reserved words
- * if value in self._grammar_keywords: # <<<<<<<<<<<<<<
- * return self._grammar_keywords[value]
- * if type not in self._grammar_tokens:
- */
- if (unlikely(((PyObject *)__pyx_v_self->_grammar_keywords) == Py_None)) {
- __Pyx_RaiseNoneNotIterableError(); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 121; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- } else {
- __pyx_t_1 = ((PyDict_Contains(((PyObject *)__pyx_v_self->_grammar_keywords), __pyx_v_value))); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 121; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- }
- if (__pyx_t_1) {
-
- /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":122
- * # Check for reserved words
- * if value in self._grammar_keywords:
- * return self._grammar_keywords[value] # <<<<<<<<<<<<<<
- * if type not in self._grammar_tokens:
- * raise ParseError("bad token", type, value, context)
- */
- __pyx_t_3 = PyObject_GetItem(((PyObject *)__pyx_v_self->_grammar_keywords), __pyx_v_value); if (!__pyx_t_3) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 122; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_3);
- __pyx_t_2 = __Pyx_PyInt_AsInt(__pyx_t_3); if (unlikely((__pyx_t_2 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 122; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
- __pyx_r = __pyx_t_2;
- goto __pyx_L0;
- goto __pyx_L4;
- }
- __pyx_L4:;
- goto __pyx_L3;
- }
- __pyx_L3:;
-
- /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":123
- * if value in self._grammar_keywords:
- * return self._grammar_keywords[value]
- * if type not in self._grammar_tokens: # <<<<<<<<<<<<<<
- * raise ParseError("bad token", type, value, context)
- * return self._grammar_tokens[type]
- */
- __pyx_t_3 = PyInt_FromLong(__pyx_v_type); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 123; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_3);
- if (unlikely(((PyObject *)__pyx_v_self->_grammar_tokens) == Py_None)) {
- __Pyx_RaiseNoneNotIterableError(); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 123; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- } else {
- __pyx_t_1 = (__Pyx_NegateNonNeg(PyDict_Contains(((PyObject *)__pyx_v_self->_grammar_tokens), __pyx_t_3))); if (unlikely(__pyx_t_1 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 123; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- }
- __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
- if (__pyx_t_1) {
-
- /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":124
- * return self._grammar_keywords[value]
- * if type not in self._grammar_tokens:
- * raise ParseError("bad token", type, value, context) # <<<<<<<<<<<<<<
- * return self._grammar_tokens[type]
- *
- */
- __pyx_t_3 = __Pyx_GetName(__pyx_m, __pyx_n_s__ParseError); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 124; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_3);
- __pyx_t_4 = PyInt_FromLong(__pyx_v_type); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 124; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_4);
- __pyx_t_5 = PyTuple_New(4); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 124; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_5);
- __Pyx_INCREF(((PyObject *)__pyx_kp_s_5));
- PyTuple_SET_ITEM(__pyx_t_5, 0, ((PyObject *)__pyx_kp_s_5));
- __Pyx_GIVEREF(((PyObject *)__pyx_kp_s_5));
- PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_4);
- __Pyx_GIVEREF(__pyx_t_4);
- __Pyx_INCREF(__pyx_v_value);
- PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_v_value);
- __Pyx_GIVEREF(__pyx_v_value);
- __Pyx_INCREF(__pyx_v_context);
- PyTuple_SET_ITEM(__pyx_t_5, 3, __pyx_v_context);
- __Pyx_GIVEREF(__pyx_v_context);
- __pyx_t_4 = 0;
- __pyx_t_4 = PyObject_Call(__pyx_t_3, __pyx_t_5, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 124; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_4);
- __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
- __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
- __Pyx_Raise(__pyx_t_4, 0, 0);
- __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 124; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- goto __pyx_L5;
- }
- __pyx_L5:;
-
- /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":125
- * if type not in self._grammar_tokens:
- * raise ParseError("bad token", type, value, context)
- * return self._grammar_tokens[type] # <<<<<<<<<<<<<<
- *
- * cdef void shift(self, type, value, newstate, context):
- */
- __pyx_t_4 = __Pyx_GetItemInt(((PyObject *)__pyx_v_self->_grammar_tokens), __pyx_v_type, sizeof(int), PyInt_FromLong); if (!__pyx_t_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 125; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_4);
- __pyx_t_2 = __Pyx_PyInt_AsInt(__pyx_t_4); if (unlikely((__pyx_t_2 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 125; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
- __pyx_r = __pyx_t_2;
- goto __pyx_L0;
-
- __pyx_r = 0;
- goto __pyx_L0;
- __pyx_L1_error:;
- __Pyx_XDECREF(__pyx_t_3);
- __Pyx_XDECREF(__pyx_t_4);
- __Pyx_XDECREF(__pyx_t_5);
- __Pyx_WriteUnraisable("sphinx.pycode.pgen2.parse.Parser.classify");
- __pyx_r = 0;
- __pyx_L0:;
- __Pyx_DECREF((PyObject *)__pyx_v_self);
- __Pyx_DECREF(__pyx_v_value);
- __Pyx_DECREF(__pyx_v_context);
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-
-/* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":127
- * return self._grammar_tokens[type]
- *
- * cdef void shift(self, type, value, newstate, context): # <<<<<<<<<<<<<<
- * """Shift a token. (Internal)"""
- * cdef tuple node
- */
-
-static void __pyx_f_6sphinx_6pycode_5pgen2_5parse_6Parser_shift(struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *__pyx_v_self, PyObject *__pyx_v_type, PyObject *__pyx_v_value, PyObject *__pyx_v_newstate, PyObject *__pyx_v_context) {
- PyObject *__pyx_v_node;
- PyObject *__pyx_v_dfa;
- PyObject *__pyx_v_state;
- PyObject *__pyx_v_newnode;
- PyObject *__pyx_t_1 = NULL;
- PyObject *__pyx_t_2 = NULL;
- PyObject *__pyx_t_3 = NULL;
- PyObject *__pyx_t_4 = NULL;
- PyObject *__pyx_t_5 = NULL;
- int __pyx_t_6;
- __Pyx_RefNannySetupContext("shift");
- __Pyx_INCREF((PyObject *)__pyx_v_self);
- __Pyx_INCREF(__pyx_v_type);
- __Pyx_INCREF(__pyx_v_value);
- __Pyx_INCREF(__pyx_v_newstate);
- __Pyx_INCREF(__pyx_v_context);
- __pyx_v_node = ((PyObject *)Py_None); __Pyx_INCREF(Py_None);
- __pyx_v_dfa = Py_None; __Pyx_INCREF(Py_None);
- __pyx_v_state = Py_None; __Pyx_INCREF(Py_None);
- __pyx_v_newnode = Py_None; __Pyx_INCREF(Py_None);
-
- /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":130
- * """Shift a token. (Internal)"""
- * cdef tuple node
- * dfa, state, node = self.stack[-1] # <<<<<<<<<<<<<<
- * newnode = (type, value, context, None)
- * newnode = self.convert(newnode)
- */
- __pyx_t_1 = __Pyx_GetItemInt_List(((PyObject *)__pyx_v_self->stack), -1, sizeof(long), PyInt_FromLong); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 130; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_1);
- if (PyTuple_CheckExact(__pyx_t_1) && likely(PyTuple_GET_SIZE(__pyx_t_1) == 3)) {
- PyObject* tuple = __pyx_t_1;
- __pyx_t_2 = PyTuple_GET_ITEM(tuple, 0); __Pyx_INCREF(__pyx_t_2);
- __pyx_t_3 = PyTuple_GET_ITEM(tuple, 1); __Pyx_INCREF(__pyx_t_3);
- __pyx_t_4 = PyTuple_GET_ITEM(tuple, 2); __Pyx_INCREF(__pyx_t_4);
- if (!(likely(PyTuple_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected tuple, got %.200s", Py_TYPE(__pyx_t_4)->tp_name), 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 130; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- __Pyx_DECREF(__pyx_v_dfa);
- __pyx_v_dfa = __pyx_t_2;
- __pyx_t_2 = 0;
- __Pyx_DECREF(__pyx_v_state);
- __pyx_v_state = __pyx_t_3;
- __pyx_t_3 = 0;
- __Pyx_DECREF(((PyObject *)__pyx_v_node));
- __pyx_v_node = ((PyObject *)__pyx_t_4);
- __pyx_t_4 = 0;
- } else {
- __pyx_t_5 = PyObject_GetIter(__pyx_t_1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 130; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_5);
- __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- __pyx_t_2 = __Pyx_UnpackItem(__pyx_t_5, 0); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 130; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __pyx_t_3 = __Pyx_UnpackItem(__pyx_t_5, 1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 130; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_3);
- __pyx_t_4 = __Pyx_UnpackItem(__pyx_t_5, 2); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 130; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_4);
- if (!(likely(PyTuple_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected tuple, got %.200s", Py_TYPE(__pyx_t_4)->tp_name), 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 130; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- if (__Pyx_EndUnpack(__pyx_t_5) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 130; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
- __Pyx_DECREF(__pyx_v_dfa);
- __pyx_v_dfa = __pyx_t_2;
- __pyx_t_2 = 0;
- __Pyx_DECREF(__pyx_v_state);
- __pyx_v_state = __pyx_t_3;
- __pyx_t_3 = 0;
- __Pyx_DECREF(((PyObject *)__pyx_v_node));
- __pyx_v_node = ((PyObject *)__pyx_t_4);
- __pyx_t_4 = 0;
- }
-
- /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":131
- * cdef tuple node
- * dfa, state, node = self.stack[-1]
- * newnode = (type, value, context, None) # <<<<<<<<<<<<<<
- * newnode = self.convert(newnode)
- * if newnode is not None:
- */
- __pyx_t_1 = PyTuple_New(4); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 131; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_1);
- __Pyx_INCREF(__pyx_v_type);
- PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_type);
- __Pyx_GIVEREF(__pyx_v_type);
- __Pyx_INCREF(__pyx_v_value);
- PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_v_value);
- __Pyx_GIVEREF(__pyx_v_value);
- __Pyx_INCREF(__pyx_v_context);
- PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_v_context);
- __Pyx_GIVEREF(__pyx_v_context);
- __Pyx_INCREF(Py_None);
- PyTuple_SET_ITEM(__pyx_t_1, 3, Py_None);
- __Pyx_GIVEREF(Py_None);
- __Pyx_DECREF(__pyx_v_newnode);
- __pyx_v_newnode = __pyx_t_1;
- __pyx_t_1 = 0;
-
- /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":132
- * dfa, state, node = self.stack[-1]
- * newnode = (type, value, context, None)
- * newnode = self.convert(newnode) # <<<<<<<<<<<<<<
- * if newnode is not None:
- * node[-1].append(newnode)
- */
- if (!(likely(PyTuple_CheckExact(__pyx_v_newnode))||((__pyx_v_newnode) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected tuple, got %.200s", Py_TYPE(__pyx_v_newnode)->tp_name), 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 132; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __pyx_t_1 = ((struct __pyx_vtabstruct_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self->__pyx_vtab)->convert(__pyx_v_self, ((PyObject *)__pyx_v_newnode)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 132; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_1);
- __Pyx_DECREF(__pyx_v_newnode);
- __pyx_v_newnode = __pyx_t_1;
- __pyx_t_1 = 0;
-
- /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":133
- * newnode = (type, value, context, None)
- * newnode = self.convert(newnode)
- * if newnode is not None: # <<<<<<<<<<<<<<
- * node[-1].append(newnode)
- * self.stack[-1] = (dfa, newstate, node)
- */
- __pyx_t_6 = (__pyx_v_newnode != Py_None);
- if (__pyx_t_6) {
-
- /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":134
- * newnode = self.convert(newnode)
- * if newnode is not None:
- * node[-1].append(newnode) # <<<<<<<<<<<<<<
- * self.stack[-1] = (dfa, newstate, node)
- *
- */
- __pyx_t_1 = __Pyx_GetItemInt_Tuple(((PyObject *)__pyx_v_node), -1, sizeof(long), PyInt_FromLong); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 134; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_1);
- __pyx_t_4 = __Pyx_PyObject_Append(__pyx_t_1, __pyx_v_newnode); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 134; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_4);
- __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
- goto __pyx_L3;
- }
- __pyx_L3:;
-
- /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":135
- * if newnode is not None:
- * node[-1].append(newnode)
- * self.stack[-1] = (dfa, newstate, node) # <<<<<<<<<<<<<<
- *
- * cdef void push(self, type, newdfa, newstate, context):
- */
- __pyx_t_4 = PyTuple_New(3); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 135; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_4);
- __Pyx_INCREF(__pyx_v_dfa);
- PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_v_dfa);
- __Pyx_GIVEREF(__pyx_v_dfa);
- __Pyx_INCREF(__pyx_v_newstate);
- PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_v_newstate);
- __Pyx_GIVEREF(__pyx_v_newstate);
- __Pyx_INCREF(((PyObject *)__pyx_v_node));
- PyTuple_SET_ITEM(__pyx_t_4, 2, ((PyObject *)__pyx_v_node));
- __Pyx_GIVEREF(((PyObject *)__pyx_v_node));
- if (__Pyx_SetItemInt(((PyObject *)__pyx_v_self->stack), -1, __pyx_t_4, sizeof(long), PyInt_FromLong) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 135; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-
- goto __pyx_L0;
- __pyx_L1_error:;
- __Pyx_XDECREF(__pyx_t_1);
- __Pyx_XDECREF(__pyx_t_2);
- __Pyx_XDECREF(__pyx_t_3);
- __Pyx_XDECREF(__pyx_t_4);
- __Pyx_XDECREF(__pyx_t_5);
- __Pyx_WriteUnraisable("sphinx.pycode.pgen2.parse.Parser.shift");
- __pyx_L0:;
- __Pyx_DECREF(__pyx_v_node);
- __Pyx_DECREF(__pyx_v_dfa);
- __Pyx_DECREF(__pyx_v_state);
- __Pyx_DECREF(__pyx_v_newnode);
- __Pyx_DECREF((PyObject *)__pyx_v_self);
- __Pyx_DECREF(__pyx_v_type);
- __Pyx_DECREF(__pyx_v_value);
- __Pyx_DECREF(__pyx_v_newstate);
- __Pyx_DECREF(__pyx_v_context);
- __Pyx_RefNannyFinishContext();
-}
-
-/* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":137
- * self.stack[-1] = (dfa, newstate, node)
- *
- * cdef void push(self, type, newdfa, newstate, context): # <<<<<<<<<<<<<<
- * """Push a nonterminal. (Internal)"""
- * dfa, state, node = self.stack[-1]
- */
-
-static void __pyx_f_6sphinx_6pycode_5pgen2_5parse_6Parser_push(struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *__pyx_v_self, PyObject *__pyx_v_type, PyObject *__pyx_v_newdfa, PyObject *__pyx_v_newstate, PyObject *__pyx_v_context) {
- PyObject *__pyx_v_dfa;
- PyObject *__pyx_v_state;
- PyObject *__pyx_v_node;
- PyObject *__pyx_v_newnode;
- PyObject *__pyx_t_1 = NULL;
- PyObject *__pyx_t_2 = NULL;
- PyObject *__pyx_t_3 = NULL;
- PyObject *__pyx_t_4 = NULL;
- PyObject *__pyx_t_5 = NULL;
- int __pyx_t_6;
- __Pyx_RefNannySetupContext("push");
- __pyx_v_dfa = Py_None; __Pyx_INCREF(Py_None);
- __pyx_v_state = Py_None; __Pyx_INCREF(Py_None);
- __pyx_v_node = Py_None; __Pyx_INCREF(Py_None);
- __pyx_v_newnode = Py_None; __Pyx_INCREF(Py_None);
-
- /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":139
- * cdef void push(self, type, newdfa, newstate, context):
- * """Push a nonterminal. (Internal)"""
- * dfa, state, node = self.stack[-1] # <<<<<<<<<<<<<<
- * newnode = (type, None, context, [])
- * self.stack[-1] = (dfa, newstate, node)
- */
- __pyx_t_1 = __Pyx_GetItemInt_List(((PyObject *)__pyx_v_self->stack), -1, sizeof(long), PyInt_FromLong); if (!__pyx_t_1) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 139; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_1);
- if (PyTuple_CheckExact(__pyx_t_1) && likely(PyTuple_GET_SIZE(__pyx_t_1) == 3)) {
- PyObject* tuple = __pyx_t_1;
- __pyx_t_2 = PyTuple_GET_ITEM(tuple, 0); __Pyx_INCREF(__pyx_t_2);
- __pyx_t_3 = PyTuple_GET_ITEM(tuple, 1); __Pyx_INCREF(__pyx_t_3);
- __pyx_t_4 = PyTuple_GET_ITEM(tuple, 2); __Pyx_INCREF(__pyx_t_4);
- __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- __Pyx_DECREF(__pyx_v_dfa);
- __pyx_v_dfa = __pyx_t_2;
- __pyx_t_2 = 0;
- __Pyx_DECREF(__pyx_v_state);
- __pyx_v_state = __pyx_t_3;
- __pyx_t_3 = 0;
- __Pyx_DECREF(__pyx_v_node);
- __pyx_v_node = __pyx_t_4;
- __pyx_t_4 = 0;
- } else {
- __pyx_t_5 = PyObject_GetIter(__pyx_t_1); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 139; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_5);
- __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- __pyx_t_2 = __Pyx_UnpackItem(__pyx_t_5, 0); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 139; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __pyx_t_3 = __Pyx_UnpackItem(__pyx_t_5, 1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 139; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_3);
- __pyx_t_4 = __Pyx_UnpackItem(__pyx_t_5, 2); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 139; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_4);
- if (__Pyx_EndUnpack(__pyx_t_5) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 139; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
- __Pyx_DECREF(__pyx_v_dfa);
- __pyx_v_dfa = __pyx_t_2;
- __pyx_t_2 = 0;
- __Pyx_DECREF(__pyx_v_state);
- __pyx_v_state = __pyx_t_3;
- __pyx_t_3 = 0;
- __Pyx_DECREF(__pyx_v_node);
- __pyx_v_node = __pyx_t_4;
- __pyx_t_4 = 0;
- }
-
- /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":140
- * """Push a nonterminal. (Internal)"""
- * dfa, state, node = self.stack[-1]
- * newnode = (type, None, context, []) # <<<<<<<<<<<<<<
- * self.stack[-1] = (dfa, newstate, node)
- * self.stack.append((newdfa, 0, newnode))
- */
- __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 140; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(((PyObject *)__pyx_t_1));
- __pyx_t_4 = PyTuple_New(4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 140; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_4);
- __Pyx_INCREF(__pyx_v_type);
- PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_v_type);
- __Pyx_GIVEREF(__pyx_v_type);
- __Pyx_INCREF(Py_None);
- PyTuple_SET_ITEM(__pyx_t_4, 1, Py_None);
- __Pyx_GIVEREF(Py_None);
- __Pyx_INCREF(__pyx_v_context);
- PyTuple_SET_ITEM(__pyx_t_4, 2, __pyx_v_context);
- __Pyx_GIVEREF(__pyx_v_context);
- PyTuple_SET_ITEM(__pyx_t_4, 3, ((PyObject *)__pyx_t_1));
- __Pyx_GIVEREF(((PyObject *)__pyx_t_1));
- __pyx_t_1 = 0;
- __Pyx_DECREF(__pyx_v_newnode);
- __pyx_v_newnode = __pyx_t_4;
- __pyx_t_4 = 0;
-
- /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":141
- * dfa, state, node = self.stack[-1]
- * newnode = (type, None, context, [])
- * self.stack[-1] = (dfa, newstate, node) # <<<<<<<<<<<<<<
- * self.stack.append((newdfa, 0, newnode))
- *
- */
- __pyx_t_4 = PyTuple_New(3); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 141; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_4);
- __Pyx_INCREF(__pyx_v_dfa);
- PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_v_dfa);
- __Pyx_GIVEREF(__pyx_v_dfa);
- __Pyx_INCREF(__pyx_v_newstate);
- PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_v_newstate);
- __Pyx_GIVEREF(__pyx_v_newstate);
- __Pyx_INCREF(__pyx_v_node);
- PyTuple_SET_ITEM(__pyx_t_4, 2, __pyx_v_node);
- __Pyx_GIVEREF(__pyx_v_node);
- if (__Pyx_SetItemInt(((PyObject *)__pyx_v_self->stack), -1, __pyx_t_4, sizeof(long), PyInt_FromLong) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 141; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-
- /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":142
- * newnode = (type, None, context, [])
- * self.stack[-1] = (dfa, newstate, node)
- * self.stack.append((newdfa, 0, newnode)) # <<<<<<<<<<<<<<
- *
- * cdef void pop(self):
- */
- if (unlikely(__pyx_v_self->stack == Py_None)) {
- PyErr_SetString(PyExc_AttributeError, "'NoneType' object has no attribute 'append'"); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 142; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- }
- __pyx_t_4 = PyTuple_New(3); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 142; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_4);
- __Pyx_INCREF(__pyx_v_newdfa);
- PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_v_newdfa);
- __Pyx_GIVEREF(__pyx_v_newdfa);
- __Pyx_INCREF(__pyx_int_0);
- PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_int_0);
- __Pyx_GIVEREF(__pyx_int_0);
- __Pyx_INCREF(__pyx_v_newnode);
- PyTuple_SET_ITEM(__pyx_t_4, 2, __pyx_v_newnode);
- __Pyx_GIVEREF(__pyx_v_newnode);
- __pyx_t_6 = PyList_Append(((PyObject *)__pyx_v_self->stack), __pyx_t_4); if (unlikely(__pyx_t_6 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 142; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
-
- goto __pyx_L0;
- __pyx_L1_error:;
- __Pyx_XDECREF(__pyx_t_1);
- __Pyx_XDECREF(__pyx_t_2);
- __Pyx_XDECREF(__pyx_t_3);
- __Pyx_XDECREF(__pyx_t_4);
- __Pyx_XDECREF(__pyx_t_5);
- __Pyx_WriteUnraisable("sphinx.pycode.pgen2.parse.Parser.push");
- __pyx_L0:;
- __Pyx_DECREF(__pyx_v_dfa);
- __Pyx_DECREF(__pyx_v_state);
- __Pyx_DECREF(__pyx_v_node);
- __Pyx_DECREF(__pyx_v_newnode);
- __Pyx_RefNannyFinishContext();
-}
-
-/* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":144
- * self.stack.append((newdfa, 0, newnode))
- *
- * cdef void pop(self): # <<<<<<<<<<<<<<
- * """Pop a nonterminal. (Internal)"""
- * popdfa, popstate, popnode = self.stack.pop()
- */
-
-static void __pyx_f_6sphinx_6pycode_5pgen2_5parse_6Parser_pop(struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *__pyx_v_self) {
- PyObject *__pyx_v_popdfa;
- PyObject *__pyx_v_popstate;
- PyObject *__pyx_v_popnode;
- PyObject *__pyx_v_newnode;
- PyObject *__pyx_v_dfa;
- PyObject *__pyx_v_state;
- PyObject *__pyx_v_node;
- PyObject *__pyx_t_1 = NULL;
- PyObject *__pyx_t_2 = NULL;
- PyObject *__pyx_t_3 = NULL;
- PyObject *__pyx_t_4 = NULL;
- PyObject *__pyx_t_5 = NULL;
- int __pyx_t_6;
- __Pyx_RefNannySetupContext("pop");
- __Pyx_INCREF((PyObject *)__pyx_v_self);
- __pyx_v_popdfa = Py_None; __Pyx_INCREF(Py_None);
- __pyx_v_popstate = Py_None; __Pyx_INCREF(Py_None);
- __pyx_v_popnode = Py_None; __Pyx_INCREF(Py_None);
- __pyx_v_newnode = Py_None; __Pyx_INCREF(Py_None);
- __pyx_v_dfa = Py_None; __Pyx_INCREF(Py_None);
- __pyx_v_state = Py_None; __Pyx_INCREF(Py_None);
- __pyx_v_node = Py_None; __Pyx_INCREF(Py_None);
-
- /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":146
- * cdef void pop(self):
- * """Pop a nonterminal. (Internal)"""
- * popdfa, popstate, popnode = self.stack.pop() # <<<<<<<<<<<<<<
- * newnode = self.convert(popnode)
- * if newnode is not None:
- */
- __pyx_t_1 = PyObject_GetAttr(((PyObject *)__pyx_v_self->stack), __pyx_n_s__pop); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 146; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_1);
- __pyx_t_2 = PyObject_Call(__pyx_t_1, ((PyObject *)__pyx_empty_tuple), NULL); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 146; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- if (PyTuple_CheckExact(__pyx_t_2) && likely(PyTuple_GET_SIZE(__pyx_t_2) == 3)) {
- PyObject* tuple = __pyx_t_2;
- __pyx_t_1 = PyTuple_GET_ITEM(tuple, 0); __Pyx_INCREF(__pyx_t_1);
- __pyx_t_3 = PyTuple_GET_ITEM(tuple, 1); __Pyx_INCREF(__pyx_t_3);
- __pyx_t_4 = PyTuple_GET_ITEM(tuple, 2); __Pyx_INCREF(__pyx_t_4);
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __Pyx_DECREF(__pyx_v_popdfa);
- __pyx_v_popdfa = __pyx_t_1;
- __pyx_t_1 = 0;
- __Pyx_DECREF(__pyx_v_popstate);
- __pyx_v_popstate = __pyx_t_3;
- __pyx_t_3 = 0;
- __Pyx_DECREF(__pyx_v_popnode);
- __pyx_v_popnode = __pyx_t_4;
- __pyx_t_4 = 0;
- } else {
- __pyx_t_5 = PyObject_GetIter(__pyx_t_2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 146; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_5);
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_1 = __Pyx_UnpackItem(__pyx_t_5, 0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 146; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_1);
- __pyx_t_3 = __Pyx_UnpackItem(__pyx_t_5, 1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 146; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_3);
- __pyx_t_4 = __Pyx_UnpackItem(__pyx_t_5, 2); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 146; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_4);
- if (__Pyx_EndUnpack(__pyx_t_5) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 146; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
- __Pyx_DECREF(__pyx_v_popdfa);
- __pyx_v_popdfa = __pyx_t_1;
- __pyx_t_1 = 0;
- __Pyx_DECREF(__pyx_v_popstate);
- __pyx_v_popstate = __pyx_t_3;
- __pyx_t_3 = 0;
- __Pyx_DECREF(__pyx_v_popnode);
- __pyx_v_popnode = __pyx_t_4;
- __pyx_t_4 = 0;
- }
-
- /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":147
- * """Pop a nonterminal. (Internal)"""
- * popdfa, popstate, popnode = self.stack.pop()
- * newnode = self.convert(popnode) # <<<<<<<<<<<<<<
- * if newnode is not None:
- * if self.stack:
- */
- if (!(likely(PyTuple_CheckExact(__pyx_v_popnode))||((__pyx_v_popnode) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected tuple, got %.200s", Py_TYPE(__pyx_v_popnode)->tp_name), 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 147; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __pyx_t_2 = ((struct __pyx_vtabstruct_6sphinx_6pycode_5pgen2_5parse_Parser *)__pyx_v_self->__pyx_vtab)->convert(__pyx_v_self, ((PyObject *)__pyx_v_popnode)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 147; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __Pyx_DECREF(__pyx_v_newnode);
- __pyx_v_newnode = __pyx_t_2;
- __pyx_t_2 = 0;
-
- /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":148
- * popdfa, popstate, popnode = self.stack.pop()
- * newnode = self.convert(popnode)
- * if newnode is not None: # <<<<<<<<<<<<<<
- * if self.stack:
- * dfa, state, node = self.stack[-1]
- */
- __pyx_t_6 = (__pyx_v_newnode != Py_None);
- if (__pyx_t_6) {
-
- /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":149
- * newnode = self.convert(popnode)
- * if newnode is not None:
- * if self.stack: # <<<<<<<<<<<<<<
- * dfa, state, node = self.stack[-1]
- * node[-1].append(newnode)
- */
- __pyx_t_6 = __Pyx_PyObject_IsTrue(((PyObject *)__pyx_v_self->stack)); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 149; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- if (__pyx_t_6) {
-
- /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":150
- * if newnode is not None:
- * if self.stack:
- * dfa, state, node = self.stack[-1] # <<<<<<<<<<<<<<
- * node[-1].append(newnode)
- * else:
- */
- __pyx_t_2 = __Pyx_GetItemInt_List(((PyObject *)__pyx_v_self->stack), -1, sizeof(long), PyInt_FromLong); if (!__pyx_t_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 150; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- if (PyTuple_CheckExact(__pyx_t_2) && likely(PyTuple_GET_SIZE(__pyx_t_2) == 3)) {
- PyObject* tuple = __pyx_t_2;
- __pyx_t_4 = PyTuple_GET_ITEM(tuple, 0); __Pyx_INCREF(__pyx_t_4);
- __pyx_t_3 = PyTuple_GET_ITEM(tuple, 1); __Pyx_INCREF(__pyx_t_3);
- __pyx_t_1 = PyTuple_GET_ITEM(tuple, 2); __Pyx_INCREF(__pyx_t_1);
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __Pyx_DECREF(__pyx_v_dfa);
- __pyx_v_dfa = __pyx_t_4;
- __pyx_t_4 = 0;
- __Pyx_DECREF(__pyx_v_state);
- __pyx_v_state = __pyx_t_3;
- __pyx_t_3 = 0;
- __Pyx_DECREF(__pyx_v_node);
- __pyx_v_node = __pyx_t_1;
- __pyx_t_1 = 0;
- } else {
- __pyx_t_5 = PyObject_GetIter(__pyx_t_2); if (unlikely(!__pyx_t_5)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 150; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_5);
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __pyx_t_4 = __Pyx_UnpackItem(__pyx_t_5, 0); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 150; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_4);
- __pyx_t_3 = __Pyx_UnpackItem(__pyx_t_5, 1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 150; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_3);
- __pyx_t_1 = __Pyx_UnpackItem(__pyx_t_5, 2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 150; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_1);
- if (__Pyx_EndUnpack(__pyx_t_5) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 150; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
- __Pyx_DECREF(__pyx_v_dfa);
- __pyx_v_dfa = __pyx_t_4;
- __pyx_t_4 = 0;
- __Pyx_DECREF(__pyx_v_state);
- __pyx_v_state = __pyx_t_3;
- __pyx_t_3 = 0;
- __Pyx_DECREF(__pyx_v_node);
- __pyx_v_node = __pyx_t_1;
- __pyx_t_1 = 0;
- }
-
- /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":151
- * if self.stack:
- * dfa, state, node = self.stack[-1]
- * node[-1].append(newnode) # <<<<<<<<<<<<<<
- * else:
- * self.rootnode = newnode
- */
- __pyx_t_2 = __Pyx_GetItemInt(__pyx_v_node, -1, sizeof(long), PyInt_FromLong); if (!__pyx_t_2) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 151; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __pyx_t_1 = __Pyx_PyObject_Append(__pyx_t_2, __pyx_v_newnode); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 151; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_1);
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- goto __pyx_L4;
- }
- /*else*/ {
-
- /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":153
- * node[-1].append(newnode)
- * else:
- * self.rootnode = newnode # <<<<<<<<<<<<<<
- * self.rootnode.used_names = self.used_names
- *
- */
- __Pyx_INCREF(__pyx_v_newnode);
- __Pyx_GIVEREF(__pyx_v_newnode);
- __Pyx_GOTREF(__pyx_v_self->rootnode);
- __Pyx_DECREF(__pyx_v_self->rootnode);
- __pyx_v_self->rootnode = __pyx_v_newnode;
-
- /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":154
- * else:
- * self.rootnode = newnode
- * self.rootnode.used_names = self.used_names # <<<<<<<<<<<<<<
- *
- * cdef convert(self, tuple raw_node):
- */
- if (PyObject_SetAttr(__pyx_v_self->rootnode, __pyx_n_s__used_names, ((PyObject *)__pyx_v_self->used_names)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 154; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- }
- __pyx_L4:;
- goto __pyx_L3;
- }
- __pyx_L3:;
-
- goto __pyx_L0;
- __pyx_L1_error:;
- __Pyx_XDECREF(__pyx_t_1);
- __Pyx_XDECREF(__pyx_t_2);
- __Pyx_XDECREF(__pyx_t_3);
- __Pyx_XDECREF(__pyx_t_4);
- __Pyx_XDECREF(__pyx_t_5);
- __Pyx_WriteUnraisable("sphinx.pycode.pgen2.parse.Parser.pop");
- __pyx_L0:;
- __Pyx_DECREF(__pyx_v_popdfa);
- __Pyx_DECREF(__pyx_v_popstate);
- __Pyx_DECREF(__pyx_v_popnode);
- __Pyx_DECREF(__pyx_v_newnode);
- __Pyx_DECREF(__pyx_v_dfa);
- __Pyx_DECREF(__pyx_v_state);
- __Pyx_DECREF(__pyx_v_node);
- __Pyx_DECREF((PyObject *)__pyx_v_self);
- __Pyx_RefNannyFinishContext();
-}
-
-/* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":156
- * self.rootnode.used_names = self.used_names
- *
- * cdef convert(self, tuple raw_node): # <<<<<<<<<<<<<<
- * type, value, context, children = raw_node
- * if children or type in self._grammar_number2symbol:
- */
-
-static PyObject *__pyx_f_6sphinx_6pycode_5pgen2_5parse_6Parser_convert(struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *__pyx_v_self, PyObject *__pyx_v_raw_node) {
- PyObject *__pyx_v_type;
- PyObject *__pyx_v_value;
- PyObject *__pyx_v_context;
- PyObject *__pyx_v_children;
- PyObject *__pyx_r = NULL;
- PyObject *__pyx_t_1 = NULL;
- PyObject *__pyx_t_2 = NULL;
- PyObject *__pyx_t_3 = NULL;
- PyObject *__pyx_t_4 = NULL;
- int __pyx_t_5;
- int __pyx_t_6;
- int __pyx_t_7;
- Py_ssize_t __pyx_t_8;
- __Pyx_RefNannySetupContext("convert");
- __Pyx_INCREF((PyObject *)__pyx_v_self);
- __Pyx_INCREF(__pyx_v_raw_node);
- __pyx_v_type = Py_None; __Pyx_INCREF(Py_None);
- __pyx_v_value = Py_None; __Pyx_INCREF(Py_None);
- __pyx_v_context = Py_None; __Pyx_INCREF(Py_None);
- __pyx_v_children = Py_None; __Pyx_INCREF(Py_None);
-
- /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":157
- *
- * cdef convert(self, tuple raw_node):
- * type, value, context, children = raw_node # <<<<<<<<<<<<<<
- * if children or type in self._grammar_number2symbol:
- * # If there's exactly one child, return that child instead of
- */
- if (likely(((PyObject *)__pyx_v_raw_node) != Py_None) && likely(PyTuple_GET_SIZE(((PyObject *)__pyx_v_raw_node)) == 4)) {
- PyObject* tuple = ((PyObject *)__pyx_v_raw_node);
- __pyx_t_1 = PyTuple_GET_ITEM(tuple, 0); __Pyx_INCREF(__pyx_t_1);
- __pyx_t_2 = PyTuple_GET_ITEM(tuple, 1); __Pyx_INCREF(__pyx_t_2);
- __pyx_t_3 = PyTuple_GET_ITEM(tuple, 2); __Pyx_INCREF(__pyx_t_3);
- __pyx_t_4 = PyTuple_GET_ITEM(tuple, 3); __Pyx_INCREF(__pyx_t_4);
- __Pyx_DECREF(__pyx_v_type);
- __pyx_v_type = __pyx_t_1;
- __pyx_t_1 = 0;
- __Pyx_DECREF(__pyx_v_value);
- __pyx_v_value = __pyx_t_2;
- __pyx_t_2 = 0;
- __Pyx_DECREF(__pyx_v_context);
- __pyx_v_context = __pyx_t_3;
- __pyx_t_3 = 0;
- __Pyx_DECREF(__pyx_v_children);
- __pyx_v_children = __pyx_t_4;
- __pyx_t_4 = 0;
- } else {
- __Pyx_UnpackTupleError(((PyObject *)__pyx_v_raw_node), 4);
- {__pyx_filename = __pyx_f[0]; __pyx_lineno = 157; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- }
-
- /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":158
- * cdef convert(self, tuple raw_node):
- * type, value, context, children = raw_node
- * if children or type in self._grammar_number2symbol: # <<<<<<<<<<<<<<
- * # If there's exactly one child, return that child instead of
- * # creating a new node.
- */
- __pyx_t_5 = __Pyx_PyObject_IsTrue(__pyx_v_children); if (unlikely(__pyx_t_5 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 158; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- if (!__pyx_t_5) {
- if (unlikely(((PyObject *)__pyx_v_self->_grammar_number2symbol) == Py_None)) {
- __Pyx_RaiseNoneNotIterableError(); {__pyx_filename = __pyx_f[0]; __pyx_lineno = 158; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- } else {
- __pyx_t_6 = ((PyDict_Contains(((PyObject *)__pyx_v_self->_grammar_number2symbol), __pyx_v_type))); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 158; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- }
- __pyx_t_7 = __pyx_t_6;
- } else {
- __pyx_t_7 = __pyx_t_5;
- }
- if (__pyx_t_7) {
-
- /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":161
- * # If there's exactly one child, return that child instead of
- * # creating a new node.
- * if len(children) == 1: # <<<<<<<<<<<<<<
- * return children[0]
- * return Node(type, children, context=context)
- */
- __pyx_t_8 = PyObject_Length(__pyx_v_children); if (unlikely(__pyx_t_8 == -1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 161; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __pyx_t_7 = (__pyx_t_8 == 1);
- if (__pyx_t_7) {
-
- /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":162
- * # creating a new node.
- * if len(children) == 1:
- * return children[0] # <<<<<<<<<<<<<<
- * return Node(type, children, context=context)
- * else:
- */
- __Pyx_XDECREF(__pyx_r);
- __pyx_t_4 = __Pyx_GetItemInt(__pyx_v_children, 0, sizeof(long), PyInt_FromLong); if (!__pyx_t_4) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 162; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_4);
- __pyx_r = __pyx_t_4;
- __pyx_t_4 = 0;
- goto __pyx_L0;
- goto __pyx_L4;
- }
- __pyx_L4:;
-
- /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":163
- * if len(children) == 1:
- * return children[0]
- * return Node(type, children, context=context) # <<<<<<<<<<<<<<
- * else:
- * return Leaf(type, value, context=context)
- */
- __Pyx_XDECREF(__pyx_r);
- __pyx_t_4 = __Pyx_GetName(__pyx_m, __pyx_n_s__Node); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 163; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_4);
- __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 163; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_3);
- __Pyx_INCREF(__pyx_v_type);
- PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_type);
- __Pyx_GIVEREF(__pyx_v_type);
- __Pyx_INCREF(__pyx_v_children);
- PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_v_children);
- __Pyx_GIVEREF(__pyx_v_children);
- __pyx_t_2 = PyDict_New(); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 163; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(((PyObject *)__pyx_t_2));
- if (PyDict_SetItem(__pyx_t_2, ((PyObject *)__pyx_n_s__context), __pyx_v_context) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 163; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __pyx_t_1 = PyEval_CallObjectWithKeywords(__pyx_t_4, __pyx_t_3, ((PyObject *)__pyx_t_2)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 163; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_1);
- __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
- __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
- __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0;
- __pyx_r = __pyx_t_1;
- __pyx_t_1 = 0;
- goto __pyx_L0;
- goto __pyx_L3;
- }
- /*else*/ {
-
- /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":165
- * return Node(type, children, context=context)
- * else:
- * return Leaf(type, value, context=context) # <<<<<<<<<<<<<<
- */
- __Pyx_XDECREF(__pyx_r);
- __pyx_t_1 = __Pyx_GetName(__pyx_m, __pyx_n_s__Leaf); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 165; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_1);
- __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 165; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __Pyx_INCREF(__pyx_v_type);
- PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_type);
- __Pyx_GIVEREF(__pyx_v_type);
- __Pyx_INCREF(__pyx_v_value);
- PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_v_value);
- __Pyx_GIVEREF(__pyx_v_value);
- __pyx_t_3 = PyDict_New(); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 165; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(((PyObject *)__pyx_t_3));
- if (PyDict_SetItem(__pyx_t_3, ((PyObject *)__pyx_n_s__context), __pyx_v_context) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 165; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __pyx_t_4 = PyEval_CallObjectWithKeywords(__pyx_t_1, __pyx_t_2, ((PyObject *)__pyx_t_3)); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 165; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_4);
- __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
- __Pyx_DECREF(((PyObject *)__pyx_t_3)); __pyx_t_3 = 0;
- __pyx_r = __pyx_t_4;
- __pyx_t_4 = 0;
- goto __pyx_L0;
- }
- __pyx_L3:;
-
- __pyx_r = Py_None; __Pyx_INCREF(Py_None);
- goto __pyx_L0;
- __pyx_L1_error:;
- __Pyx_XDECREF(__pyx_t_1);
- __Pyx_XDECREF(__pyx_t_2);
- __Pyx_XDECREF(__pyx_t_3);
- __Pyx_XDECREF(__pyx_t_4);
- __Pyx_AddTraceback("sphinx.pycode.pgen2.parse.Parser.convert");
- __pyx_r = 0;
- __pyx_L0:;
- __Pyx_DECREF(__pyx_v_type);
- __Pyx_DECREF(__pyx_v_value);
- __Pyx_DECREF(__pyx_v_context);
- __Pyx_DECREF(__pyx_v_children);
- __Pyx_DECREF((PyObject *)__pyx_v_self);
- __Pyx_DECREF(__pyx_v_raw_node);
- __Pyx_XGIVEREF(__pyx_r);
- __Pyx_RefNannyFinishContext();
- return __pyx_r;
-}
-static struct __pyx_vtabstruct_6sphinx_6pycode_5pgen2_5parse_Parser __pyx_vtable_6sphinx_6pycode_5pgen2_5parse_Parser;
-
-static PyObject *__pyx_tp_new_6sphinx_6pycode_5pgen2_5parse_Parser(PyTypeObject *t, PyObject *a, PyObject *k) {
- struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *p;
- PyObject *o = (*t->tp_alloc)(t, 0);
- if (!o) return 0;
- p = ((struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)o);
- p->__pyx_vtab = __pyx_vtabptr_6sphinx_6pycode_5pgen2_5parse_Parser;
- p->grammar = Py_None; Py_INCREF(Py_None);
- p->rootnode = Py_None; Py_INCREF(Py_None);
- p->stack = ((PyObject *)Py_None); Py_INCREF(Py_None);
- p->used_names = ((PyObject *)Py_None); Py_INCREF(Py_None);
- p->_grammar_labels = ((PyObject *)Py_None); Py_INCREF(Py_None);
- p->_grammar_dfas = ((PyObject *)Py_None); Py_INCREF(Py_None);
- p->_grammar_keywords = ((PyObject *)Py_None); Py_INCREF(Py_None);
- p->_grammar_tokens = ((PyObject *)Py_None); Py_INCREF(Py_None);
- p->_grammar_number2symbol = ((PyObject *)Py_None); Py_INCREF(Py_None);
- return o;
-}
-
-static void __pyx_tp_dealloc_6sphinx_6pycode_5pgen2_5parse_Parser(PyObject *o) {
- struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *p = (struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)o;
- Py_XDECREF(p->grammar);
- Py_XDECREF(p->rootnode);
- Py_XDECREF(((PyObject *)p->stack));
- Py_XDECREF(((PyObject *)p->used_names));
- Py_XDECREF(((PyObject *)p->_grammar_labels));
- Py_XDECREF(((PyObject *)p->_grammar_dfas));
- Py_XDECREF(((PyObject *)p->_grammar_keywords));
- Py_XDECREF(((PyObject *)p->_grammar_tokens));
- Py_XDECREF(((PyObject *)p->_grammar_number2symbol));
- (*Py_TYPE(o)->tp_free)(o);
-}
-
-static int __pyx_tp_traverse_6sphinx_6pycode_5pgen2_5parse_Parser(PyObject *o, visitproc v, void *a) {
- int e;
- struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *p = (struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)o;
- if (p->grammar) {
- e = (*v)(p->grammar, a); if (e) return e;
- }
- if (p->rootnode) {
- e = (*v)(p->rootnode, a); if (e) return e;
- }
- if (p->stack) {
- e = (*v)(p->stack, a); if (e) return e;
- }
- if (p->used_names) {
- e = (*v)(p->used_names, a); if (e) return e;
- }
- if (p->_grammar_labels) {
- e = (*v)(p->_grammar_labels, a); if (e) return e;
- }
- if (p->_grammar_dfas) {
- e = (*v)(p->_grammar_dfas, a); if (e) return e;
- }
- if (p->_grammar_keywords) {
- e = (*v)(p->_grammar_keywords, a); if (e) return e;
- }
- if (p->_grammar_tokens) {
- e = (*v)(p->_grammar_tokens, a); if (e) return e;
- }
- if (p->_grammar_number2symbol) {
- e = (*v)(p->_grammar_number2symbol, a); if (e) return e;
- }
- return 0;
-}
-
-static int __pyx_tp_clear_6sphinx_6pycode_5pgen2_5parse_Parser(PyObject *o) {
- struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *p = (struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *)o;
- PyObject* tmp;
- tmp = ((PyObject*)p->grammar);
- p->grammar = Py_None; Py_INCREF(Py_None);
- Py_XDECREF(tmp);
- tmp = ((PyObject*)p->rootnode);
- p->rootnode = Py_None; Py_INCREF(Py_None);
- Py_XDECREF(tmp);
- tmp = ((PyObject*)p->stack);
- p->stack = ((PyObject *)Py_None); Py_INCREF(Py_None);
- Py_XDECREF(tmp);
- tmp = ((PyObject*)p->used_names);
- p->used_names = ((PyObject *)Py_None); Py_INCREF(Py_None);
- Py_XDECREF(tmp);
- tmp = ((PyObject*)p->_grammar_labels);
- p->_grammar_labels = ((PyObject *)Py_None); Py_INCREF(Py_None);
- Py_XDECREF(tmp);
- tmp = ((PyObject*)p->_grammar_dfas);
- p->_grammar_dfas = ((PyObject *)Py_None); Py_INCREF(Py_None);
- Py_XDECREF(tmp);
- tmp = ((PyObject*)p->_grammar_keywords);
- p->_grammar_keywords = ((PyObject *)Py_None); Py_INCREF(Py_None);
- Py_XDECREF(tmp);
- tmp = ((PyObject*)p->_grammar_tokens);
- p->_grammar_tokens = ((PyObject *)Py_None); Py_INCREF(Py_None);
- Py_XDECREF(tmp);
- tmp = ((PyObject*)p->_grammar_number2symbol);
- p->_grammar_number2symbol = ((PyObject *)Py_None); Py_INCREF(Py_None);
- Py_XDECREF(tmp);
- return 0;
-}
-
-static PyObject *__pyx_getprop_6sphinx_6pycode_5pgen2_5parse_6Parser_stack(PyObject *o, void *x) {
- return __pyx_pf_6sphinx_6pycode_5pgen2_5parse_6Parser_5stack___get__(o);
-}
-
-static int __pyx_setprop_6sphinx_6pycode_5pgen2_5parse_6Parser_stack(PyObject *o, PyObject *v, void *x) {
- if (v) {
- return __pyx_pf_6sphinx_6pycode_5pgen2_5parse_6Parser_5stack___set__(o, v);
- }
- else {
- PyErr_SetString(PyExc_NotImplementedError, "__del__");
- return -1;
- }
-}
-
-static PyObject *__pyx_getprop_6sphinx_6pycode_5pgen2_5parse_6Parser_used_names(PyObject *o, void *x) {
- return __pyx_pf_6sphinx_6pycode_5pgen2_5parse_6Parser_10used_names___get__(o);
-}
-
-static int __pyx_setprop_6sphinx_6pycode_5pgen2_5parse_6Parser_used_names(PyObject *o, PyObject *v, void *x) {
- if (v) {
- return __pyx_pf_6sphinx_6pycode_5pgen2_5parse_6Parser_10used_names___set__(o, v);
- }
- else {
- PyErr_SetString(PyExc_NotImplementedError, "__del__");
- return -1;
- }
-}
-
-static struct PyMethodDef __pyx_methods_6sphinx_6pycode_5pgen2_5parse_Parser[] = {
- {__Pyx_NAMESTR("setup"), (PyCFunction)__pyx_pf_6sphinx_6pycode_5pgen2_5parse_6Parser_setup, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(0)},
- {__Pyx_NAMESTR("addtoken"), (PyCFunction)__pyx_pf_6sphinx_6pycode_5pgen2_5parse_6Parser_addtoken, METH_VARARGS|METH_KEYWORDS, __Pyx_DOCSTR(__pyx_doc_6sphinx_6pycode_5pgen2_5parse_6Parser_addtoken)},
- {0, 0, 0, 0}
-};
-
-static struct PyMemberDef __pyx_members_6sphinx_6pycode_5pgen2_5parse_Parser[] = {
- {(char *)"grammar", T_OBJECT, offsetof(struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser, grammar), 0, 0},
- {(char *)"rootnode", T_OBJECT, offsetof(struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser, rootnode), 0, 0},
- {0, 0, 0, 0, 0}
-};
-
-static struct PyGetSetDef __pyx_getsets_6sphinx_6pycode_5pgen2_5parse_Parser[] = {
- {(char *)"stack", __pyx_getprop_6sphinx_6pycode_5pgen2_5parse_6Parser_stack, __pyx_setprop_6sphinx_6pycode_5pgen2_5parse_6Parser_stack, 0, 0},
- {(char *)"used_names", __pyx_getprop_6sphinx_6pycode_5pgen2_5parse_6Parser_used_names, __pyx_setprop_6sphinx_6pycode_5pgen2_5parse_6Parser_used_names, 0, 0},
- {0, 0, 0, 0, 0}
-};
-
-static PyNumberMethods __pyx_tp_as_number_Parser = {
- 0, /*nb_add*/
- 0, /*nb_subtract*/
- 0, /*nb_multiply*/
- #if PY_MAJOR_VERSION < 3
- 0, /*nb_divide*/
- #endif
- 0, /*nb_remainder*/
- 0, /*nb_divmod*/
- 0, /*nb_power*/
- 0, /*nb_negative*/
- 0, /*nb_positive*/
- 0, /*nb_absolute*/
- 0, /*nb_nonzero*/
- 0, /*nb_invert*/
- 0, /*nb_lshift*/
- 0, /*nb_rshift*/
- 0, /*nb_and*/
- 0, /*nb_xor*/
- 0, /*nb_or*/
- #if PY_MAJOR_VERSION < 3
- 0, /*nb_coerce*/
- #endif
- 0, /*nb_int*/
- #if PY_MAJOR_VERSION >= 3
- 0, /*reserved*/
- #else
- 0, /*nb_long*/
- #endif
- 0, /*nb_float*/
- #if PY_MAJOR_VERSION < 3
- 0, /*nb_oct*/
- #endif
- #if PY_MAJOR_VERSION < 3
- 0, /*nb_hex*/
- #endif
- 0, /*nb_inplace_add*/
- 0, /*nb_inplace_subtract*/
- 0, /*nb_inplace_multiply*/
- #if PY_MAJOR_VERSION < 3
- 0, /*nb_inplace_divide*/
- #endif
- 0, /*nb_inplace_remainder*/
- 0, /*nb_inplace_power*/
- 0, /*nb_inplace_lshift*/
- 0, /*nb_inplace_rshift*/
- 0, /*nb_inplace_and*/
- 0, /*nb_inplace_xor*/
- 0, /*nb_inplace_or*/
- 0, /*nb_floor_divide*/
- 0, /*nb_true_divide*/
- 0, /*nb_inplace_floor_divide*/
- 0, /*nb_inplace_true_divide*/
- #if (PY_MAJOR_VERSION >= 3) || (Py_TPFLAGS_DEFAULT & Py_TPFLAGS_HAVE_INDEX)
- 0, /*nb_index*/
- #endif
-};
-
-static PySequenceMethods __pyx_tp_as_sequence_Parser = {
- 0, /*sq_length*/
- 0, /*sq_concat*/
- 0, /*sq_repeat*/
- 0, /*sq_item*/
- 0, /*sq_slice*/
- 0, /*sq_ass_item*/
- 0, /*sq_ass_slice*/
- 0, /*sq_contains*/
- 0, /*sq_inplace_concat*/
- 0, /*sq_inplace_repeat*/
-};
-
-static PyMappingMethods __pyx_tp_as_mapping_Parser = {
- 0, /*mp_length*/
- 0, /*mp_subscript*/
- 0, /*mp_ass_subscript*/
-};
-
-static PyBufferProcs __pyx_tp_as_buffer_Parser = {
- #if PY_MAJOR_VERSION < 3
- 0, /*bf_getreadbuffer*/
- #endif
- #if PY_MAJOR_VERSION < 3
- 0, /*bf_getwritebuffer*/
- #endif
- #if PY_MAJOR_VERSION < 3
- 0, /*bf_getsegcount*/
- #endif
- #if PY_MAJOR_VERSION < 3
- 0, /*bf_getcharbuffer*/
- #endif
- #if PY_VERSION_HEX >= 0x02060000
- 0, /*bf_getbuffer*/
- #endif
- #if PY_VERSION_HEX >= 0x02060000
- 0, /*bf_releasebuffer*/
- #endif
-};
-
-PyTypeObject __pyx_type_6sphinx_6pycode_5pgen2_5parse_Parser = {
- PyVarObject_HEAD_INIT(0, 0)
- __Pyx_NAMESTR("sphinx.pycode.pgen2.parse.Parser"), /*tp_name*/
- sizeof(struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser), /*tp_basicsize*/
- 0, /*tp_itemsize*/
- __pyx_tp_dealloc_6sphinx_6pycode_5pgen2_5parse_Parser, /*tp_dealloc*/
- 0, /*tp_print*/
- 0, /*tp_getattr*/
- 0, /*tp_setattr*/
- 0, /*tp_compare*/
- 0, /*tp_repr*/
- &__pyx_tp_as_number_Parser, /*tp_as_number*/
- &__pyx_tp_as_sequence_Parser, /*tp_as_sequence*/
- &__pyx_tp_as_mapping_Parser, /*tp_as_mapping*/
- 0, /*tp_hash*/
- 0, /*tp_call*/
- 0, /*tp_str*/
- 0, /*tp_getattro*/
- 0, /*tp_setattro*/
- &__pyx_tp_as_buffer_Parser, /*tp_as_buffer*/
- Py_TPFLAGS_DEFAULT|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
- 0, /*tp_doc*/
- __pyx_tp_traverse_6sphinx_6pycode_5pgen2_5parse_Parser, /*tp_traverse*/
- __pyx_tp_clear_6sphinx_6pycode_5pgen2_5parse_Parser, /*tp_clear*/
- 0, /*tp_richcompare*/
- 0, /*tp_weaklistoffset*/
- 0, /*tp_iter*/
- 0, /*tp_iternext*/
- __pyx_methods_6sphinx_6pycode_5pgen2_5parse_Parser, /*tp_methods*/
- __pyx_members_6sphinx_6pycode_5pgen2_5parse_Parser, /*tp_members*/
- __pyx_getsets_6sphinx_6pycode_5pgen2_5parse_Parser, /*tp_getset*/
- 0, /*tp_base*/
- 0, /*tp_dict*/
- 0, /*tp_descr_get*/
- 0, /*tp_descr_set*/
- 0, /*tp_dictoffset*/
- __pyx_pf_6sphinx_6pycode_5pgen2_5parse_6Parser___init__, /*tp_init*/
- 0, /*tp_alloc*/
- __pyx_tp_new_6sphinx_6pycode_5pgen2_5parse_Parser, /*tp_new*/
- 0, /*tp_free*/
- 0, /*tp_is_gc*/
- 0, /*tp_bases*/
- 0, /*tp_mro*/
- 0, /*tp_cache*/
- 0, /*tp_subclasses*/
- 0, /*tp_weaklist*/
- 0, /*tp_del*/
- #if PY_VERSION_HEX >= 0x02060000
- 0, /*tp_version_tag*/
- #endif
-};
-
-static struct PyMethodDef __pyx_methods[] = {
- {0, 0, 0, 0}
-};
-
-static void __pyx_init_filenames(void); /*proto*/
-
-#if PY_MAJOR_VERSION >= 3
-static struct PyModuleDef __pyx_moduledef = {
- PyModuleDef_HEAD_INIT,
- __Pyx_NAMESTR("parse"),
- __Pyx_DOCSTR(__pyx_k_6), /* m_doc */
- -1, /* m_size */
- __pyx_methods /* m_methods */,
- NULL, /* m_reload */
- NULL, /* m_traverse */
- NULL, /* m_clear */
- NULL /* m_free */
-};
-#endif
-
-static __Pyx_StringTabEntry __pyx_string_tab[] = {
- {&__pyx_kp_s_1, __pyx_k_1, sizeof(__pyx_k_1), 0, 0, 1, 0},
- {&__pyx_n_s_2, __pyx_k_2, sizeof(__pyx_k_2), 0, 0, 1, 1},
- {&__pyx_kp_s_3, __pyx_k_3, sizeof(__pyx_k_3), 0, 0, 1, 0},
- {&__pyx_kp_s_4, __pyx_k_4, sizeof(__pyx_k_4), 0, 0, 1, 0},
- {&__pyx_kp_s_5, __pyx_k_5, sizeof(__pyx_k_5), 0, 0, 1, 0},
- {&__pyx_n_s_7, __pyx_k_7, sizeof(__pyx_k_7), 0, 0, 1, 1},
- {&__pyx_kp_s_8, __pyx_k_8, sizeof(__pyx_k_8), 0, 0, 1, 0},
- {&__pyx_kp_u_9, __pyx_k_9, sizeof(__pyx_k_9), 0, 1, 0, 0},
- {&__pyx_n_s__Exception, __pyx_k__Exception, sizeof(__pyx_k__Exception), 0, 0, 1, 1},
- {&__pyx_n_s__Leaf, __pyx_k__Leaf, sizeof(__pyx_k__Leaf), 0, 0, 1, 1},
- {&__pyx_n_s__Node, __pyx_k__Node, sizeof(__pyx_k__Node), 0, 0, 1, 1},
- {&__pyx_n_s__ParseError, __pyx_k__ParseError, sizeof(__pyx_k__ParseError), 0, 0, 1, 1},
- {&__pyx_n_s__Parser, __pyx_k__Parser, sizeof(__pyx_k__Parser), 0, 0, 1, 1},
- {&__pyx_n_s____init__, __pyx_k____init__, sizeof(__pyx_k____init__), 0, 0, 1, 1},
- {&__pyx_n_s____main__, __pyx_k____main__, sizeof(__pyx_k____main__), 0, 0, 1, 1},
- {&__pyx_n_s____test__, __pyx_k____test__, sizeof(__pyx_k____test__), 0, 0, 1, 1},
- {&__pyx_n_s___grammar_dfas, __pyx_k___grammar_dfas, sizeof(__pyx_k___grammar_dfas), 0, 0, 1, 1},
- {&__pyx_n_s___grammar_keywords, __pyx_k___grammar_keywords, sizeof(__pyx_k___grammar_keywords), 0, 0, 1, 1},
- {&__pyx_n_s___grammar_labels, __pyx_k___grammar_labels, sizeof(__pyx_k___grammar_labels), 0, 0, 1, 1},
- {&__pyx_n_s___grammar_start, __pyx_k___grammar_start, sizeof(__pyx_k___grammar_start), 0, 0, 1, 1},
- {&__pyx_n_s___grammar_tokens, __pyx_k___grammar_tokens, sizeof(__pyx_k___grammar_tokens), 0, 0, 1, 1},
- {&__pyx_n_s__add, __pyx_k__add, sizeof(__pyx_k__add), 0, 0, 1, 1},
- {&__pyx_n_s__addtoken, __pyx_k__addtoken, sizeof(__pyx_k__addtoken), 0, 0, 1, 1},
- {&__pyx_n_s__classify, __pyx_k__classify, sizeof(__pyx_k__classify), 0, 0, 1, 1},
- {&__pyx_n_s__context, __pyx_k__context, sizeof(__pyx_k__context), 0, 0, 1, 1},
- {&__pyx_n_s__convert, __pyx_k__convert, sizeof(__pyx_k__convert), 0, 0, 1, 1},
- {&__pyx_n_s__dfas, __pyx_k__dfas, sizeof(__pyx_k__dfas), 0, 0, 1, 1},
- {&__pyx_n_s__grammar, __pyx_k__grammar, sizeof(__pyx_k__grammar), 0, 0, 1, 1},
- {&__pyx_n_s__keywords, __pyx_k__keywords, sizeof(__pyx_k__keywords), 0, 0, 1, 1},
- {&__pyx_n_s__labels, __pyx_k__labels, sizeof(__pyx_k__labels), 0, 0, 1, 1},
- {&__pyx_n_s__msg, __pyx_k__msg, sizeof(__pyx_k__msg), 0, 0, 1, 1},
- {&__pyx_n_s__number2symbol, __pyx_k__number2symbol, sizeof(__pyx_k__number2symbol), 0, 0, 1, 1},
- {&__pyx_n_s__pop, __pyx_k__pop, sizeof(__pyx_k__pop), 0, 0, 1, 1},
- {&__pyx_n_s__push, __pyx_k__push, sizeof(__pyx_k__push), 0, 0, 1, 1},
- {&__pyx_n_s__rootnode, __pyx_k__rootnode, sizeof(__pyx_k__rootnode), 0, 0, 1, 1},
- {&__pyx_n_s__self, __pyx_k__self, sizeof(__pyx_k__self), 0, 0, 1, 1},
- {&__pyx_n_s__shift, __pyx_k__shift, sizeof(__pyx_k__shift), 0, 0, 1, 1},
- {&__pyx_n_s__stack, __pyx_k__stack, sizeof(__pyx_k__stack), 0, 0, 1, 1},
- {&__pyx_n_s__start, __pyx_k__start, sizeof(__pyx_k__start), 0, 0, 1, 1},
- {&__pyx_n_s__tokens, __pyx_k__tokens, sizeof(__pyx_k__tokens), 0, 0, 1, 1},
- {&__pyx_n_s__type, __pyx_k__type, sizeof(__pyx_k__type), 0, 0, 1, 1},
- {&__pyx_n_s__used_names, __pyx_k__used_names, sizeof(__pyx_k__used_names), 0, 0, 1, 1},
- {&__pyx_n_s__value, __pyx_k__value, sizeof(__pyx_k__value), 0, 0, 1, 1},
- {0, 0, 0, 0, 0, 0, 0}
-};
-static int __Pyx_InitCachedBuiltins(void) {
- __pyx_builtin_Exception = __Pyx_GetName(__pyx_b, __pyx_n_s__Exception); if (!__pyx_builtin_Exception) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 19; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- return 0;
- __pyx_L1_error:;
- return -1;
-}
-
-static int __Pyx_InitGlobals(void) {
- #if PY_VERSION_HEX < 0x02040000
- if (unlikely(__Pyx_Py23SetsImport() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- #endif
- if (__Pyx_InitStrings(__pyx_string_tab) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
- __pyx_int_0 = PyInt_FromLong(0); if (unlikely(!__pyx_int_0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
- return 0;
- __pyx_L1_error:;
- return -1;
-}
-
-#if PY_MAJOR_VERSION < 3
-PyMODINIT_FUNC initparse(void); /*proto*/
-PyMODINIT_FUNC initparse(void)
-#else
-PyMODINIT_FUNC PyInit_parse(void); /*proto*/
-PyMODINIT_FUNC PyInit_parse(void)
-#endif
-{
- PyObject *__pyx_t_1 = NULL;
- PyObject *__pyx_t_2 = NULL;
- PyObject *__pyx_t_3 = NULL;
- PyObject *__pyx_t_4 = NULL;
- #if CYTHON_REFNANNY
- void* __pyx_refnanny = NULL;
- __Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny");
- if (!__Pyx_RefNanny) {
- PyErr_Clear();
- __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny");
- if (!__Pyx_RefNanny)
- Py_FatalError("failed to import 'refnanny' module");
- }
- __pyx_refnanny = __Pyx_RefNanny->SetupContext("PyMODINIT_FUNC PyInit_parse(void)", __LINE__, __FILE__);
- #endif
- __pyx_init_filenames();
- __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- #if PY_MAJOR_VERSION < 3
- __pyx_empty_bytes = PyString_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- #else
- __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- #endif
- /*--- Library function declarations ---*/
- /*--- Threads initialization code ---*/
- #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS
- #ifdef WITH_THREAD /* Python build with threading support? */
- PyEval_InitThreads();
- #endif
- #endif
- /*--- Module creation code ---*/
- #if PY_MAJOR_VERSION < 3
- __pyx_m = Py_InitModule4(__Pyx_NAMESTR("parse"), __pyx_methods, __Pyx_DOCSTR(__pyx_k_6), 0, PYTHON_API_VERSION);
- #else
- __pyx_m = PyModule_Create(&__pyx_moduledef);
- #endif
- if (!__pyx_m) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
- #if PY_MAJOR_VERSION < 3
- Py_INCREF(__pyx_m);
- #endif
- __pyx_b = PyImport_AddModule(__Pyx_NAMESTR(__Pyx_BUILTIN_MODULE_NAME));
- if (!__pyx_b) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
- if (__Pyx_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
- /*--- Initialize various global constants etc. ---*/
- if (unlikely(__Pyx_InitGlobals() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- if (__pyx_module_is_main_sphinx__pycode__pgen2__parse) {
- if (__Pyx_SetAttrString(__pyx_m, "__name__", __pyx_n_s____main__) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
- }
- /*--- Builtin init code ---*/
- if (unlikely(__Pyx_InitCachedBuiltins() < 0)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- /*--- Global init code ---*/
- /*--- Function export code ---*/
- /*--- Type init code ---*/
- __pyx_vtabptr_6sphinx_6pycode_5pgen2_5parse_Parser = &__pyx_vtable_6sphinx_6pycode_5pgen2_5parse_Parser;
- #if PY_MAJOR_VERSION >= 3
- __pyx_vtable_6sphinx_6pycode_5pgen2_5parse_Parser.classify = (int (*)(struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *, int, PyObject *, PyObject *))__pyx_f_6sphinx_6pycode_5pgen2_5parse_6Parser_classify;
- __pyx_vtable_6sphinx_6pycode_5pgen2_5parse_Parser.shift = (void (*)(struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *, PyObject *, PyObject *, PyObject *, PyObject *))__pyx_f_6sphinx_6pycode_5pgen2_5parse_6Parser_shift;
- __pyx_vtable_6sphinx_6pycode_5pgen2_5parse_Parser.push = (void (*)(struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *, PyObject *, PyObject *, PyObject *, PyObject *))__pyx_f_6sphinx_6pycode_5pgen2_5parse_6Parser_push;
- __pyx_vtable_6sphinx_6pycode_5pgen2_5parse_Parser.pop = (void (*)(struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *))__pyx_f_6sphinx_6pycode_5pgen2_5parse_6Parser_pop;
- __pyx_vtable_6sphinx_6pycode_5pgen2_5parse_Parser.convert = (PyObject *(*)(struct __pyx_obj_6sphinx_6pycode_5pgen2_5parse_Parser *, PyObject *))__pyx_f_6sphinx_6pycode_5pgen2_5parse_6Parser_convert;
- #else
- *(void(**)(void))&__pyx_vtable_6sphinx_6pycode_5pgen2_5parse_Parser.classify = (void(*)(void))__pyx_f_6sphinx_6pycode_5pgen2_5parse_6Parser_classify;
- *(void(**)(void))&__pyx_vtable_6sphinx_6pycode_5pgen2_5parse_Parser.shift = (void(*)(void))__pyx_f_6sphinx_6pycode_5pgen2_5parse_6Parser_shift;
- *(void(**)(void))&__pyx_vtable_6sphinx_6pycode_5pgen2_5parse_Parser.push = (void(*)(void))__pyx_f_6sphinx_6pycode_5pgen2_5parse_6Parser_push;
- *(void(**)(void))&__pyx_vtable_6sphinx_6pycode_5pgen2_5parse_Parser.pop = (void(*)(void))__pyx_f_6sphinx_6pycode_5pgen2_5parse_6Parser_pop;
- *(void(**)(void))&__pyx_vtable_6sphinx_6pycode_5pgen2_5parse_Parser.convert = (void(*)(void))__pyx_f_6sphinx_6pycode_5pgen2_5parse_6Parser_convert;
- #endif
- if (PyType_Ready(&__pyx_type_6sphinx_6pycode_5pgen2_5parse_Parser) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 31; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- if (__Pyx_SetVtable(__pyx_type_6sphinx_6pycode_5pgen2_5parse_Parser.tp_dict, __pyx_vtabptr_6sphinx_6pycode_5pgen2_5parse_Parser) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 31; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- if (__Pyx_SetAttrString(__pyx_m, "Parser", (PyObject *)&__pyx_type_6sphinx_6pycode_5pgen2_5parse_Parser) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 31; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __pyx_ptype_6sphinx_6pycode_5pgen2_5parse_Parser = &__pyx_type_6sphinx_6pycode_5pgen2_5parse_Parser;
- /*--- Type import code ---*/
- /*--- Function import code ---*/
- /*--- Execution code ---*/
-
- /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":15
- * """
- *
- * from sphinx.pycode.nodes import Node, Leaf # <<<<<<<<<<<<<<
- *
- * DEF NAME = 1
- */
- __pyx_t_1 = PyList_New(2); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(((PyObject *)__pyx_t_1));
- __Pyx_INCREF(((PyObject *)__pyx_n_s__Node));
- PyList_SET_ITEM(__pyx_t_1, 0, ((PyObject *)__pyx_n_s__Node));
- __Pyx_GIVEREF(((PyObject *)__pyx_n_s__Node));
- __Pyx_INCREF(((PyObject *)__pyx_n_s__Leaf));
- PyList_SET_ITEM(__pyx_t_1, 1, ((PyObject *)__pyx_n_s__Leaf));
- __Pyx_GIVEREF(((PyObject *)__pyx_n_s__Leaf));
- __pyx_t_2 = __Pyx_Import(((PyObject *)__pyx_n_s_7), ((PyObject *)__pyx_t_1)); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_2);
- __Pyx_DECREF(((PyObject *)__pyx_t_1)); __pyx_t_1 = 0;
- __pyx_t_1 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__Node); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_1);
- if (PyObject_SetAttr(__pyx_m, __pyx_n_s__Node, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- __pyx_t_1 = PyObject_GetAttr(__pyx_t_2, __pyx_n_s__Leaf); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_1);
- if (PyObject_SetAttr(__pyx_m, __pyx_n_s__Leaf, __pyx_t_1) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 15; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
-
- /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":19
- * DEF NAME = 1
- *
- * class ParseError(Exception): # <<<<<<<<<<<<<<
- * """Exception to signal the parser is stuck."""
- *
- */
- __pyx_t_2 = PyDict_New(); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 19; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(((PyObject *)__pyx_t_2));
- __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 19; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_1);
- __Pyx_INCREF(__pyx_builtin_Exception);
- PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_builtin_Exception);
- __Pyx_GIVEREF(__pyx_builtin_Exception);
- if (PyDict_SetItemString(((PyObject *)__pyx_t_2), "__doc__", ((PyObject *)__pyx_kp_s_8)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 19; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __pyx_t_3 = __Pyx_CreateClass(__pyx_t_1, ((PyObject *)__pyx_t_2), __pyx_n_s__ParseError, "sphinx.pycode.pgen2.parse"); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 19; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_3);
- __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
-
- /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":22
- * """Exception to signal the parser is stuck."""
- *
- * def __init__(self, msg, type, value, context): # <<<<<<<<<<<<<<
- * Exception.__init__(self, "%s: type=%r, value=%r, context=%r" %
- * (msg, type, value, context))
- */
- __pyx_t_1 = PyCFunction_New(&__pyx_mdef_6sphinx_6pycode_5pgen2_5parse_10ParseError___init__, 0); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 22; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_1);
- __pyx_t_4 = PyMethod_New(__pyx_t_1, 0, __pyx_t_3); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 22; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_4);
- __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
- if (PyObject_SetAttr(__pyx_t_3, __pyx_n_s____init__, __pyx_t_4) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 22; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
- if (PyObject_SetAttr(__pyx_m, __pyx_n_s__ParseError, __pyx_t_3) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 19; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
- __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0;
-
- /* "/home/gbr/devel/sphinx/sphinx/pycode/pgen2/parse.pyx":1
- * # Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved. # <<<<<<<<<<<<<<
- * # Licensed to PSF under a Contributor Agreement.
- *
- */
- __pyx_t_2 = PyDict_New(); if (unlikely(!__pyx_t_2)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(((PyObject *)__pyx_t_2));
- __pyx_t_3 = PyObject_GetAttr(__pyx_m, __pyx_n_s__Parser); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_3);
- __pyx_t_4 = PyObject_GetAttr(__pyx_t_3, __pyx_n_s__addtoken); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_GOTREF(__pyx_t_4);
- __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
- __pyx_t_3 = __Pyx_GetAttrString(__pyx_t_4, "__doc__");
- __Pyx_GOTREF(__pyx_t_3);
- __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
- if (PyDict_SetItem(__pyx_t_2, ((PyObject *)__pyx_kp_u_9), __pyx_t_3) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
- if (PyObject_SetAttr(__pyx_m, __pyx_n_s____test__, ((PyObject *)__pyx_t_2)) < 0) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 1; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
- __Pyx_DECREF(((PyObject *)__pyx_t_2)); __pyx_t_2 = 0;
- goto __pyx_L0;
- __pyx_L1_error:;
- __Pyx_XDECREF(__pyx_t_1);
- __Pyx_XDECREF(__pyx_t_2);
- __Pyx_XDECREF(__pyx_t_3);
- __Pyx_XDECREF(__pyx_t_4);
- if (__pyx_m) {
- __Pyx_AddTraceback("init sphinx.pycode.pgen2.parse");
- Py_DECREF(__pyx_m); __pyx_m = 0;
- } else if (!PyErr_Occurred()) {
- PyErr_SetString(PyExc_ImportError, "init sphinx.pycode.pgen2.parse");
- }
- __pyx_L0:;
- __Pyx_RefNannyFinishContext();
- #if PY_MAJOR_VERSION < 3
- return;
- #else
- return __pyx_m;
- #endif
-}
-
-static const char *__pyx_filenames[] = {
- "parse.pyx",
-};
-
-/* Runtime support code */
-
-static void __pyx_init_filenames(void) {
- __pyx_f = __pyx_filenames;
-}
-
-static void __Pyx_RaiseDoubleKeywordsError(
- const char* func_name,
- PyObject* kw_name)
-{
- PyErr_Format(PyExc_TypeError,
- #if PY_MAJOR_VERSION >= 3
- "%s() got multiple values for keyword argument '%U'", func_name, kw_name);
- #else
- "%s() got multiple values for keyword argument '%s'", func_name,
- PyString_AS_STRING(kw_name));
- #endif
-}
-
-static void __Pyx_RaiseArgtupleInvalid(
- const char* func_name,
- int exact,
- Py_ssize_t num_min,
- Py_ssize_t num_max,
- Py_ssize_t num_found)
-{
- Py_ssize_t num_expected;
- const char *number, *more_or_less;
-
- if (num_found < num_min) {
- num_expected = num_min;
- more_or_less = "at least";
- } else {
- num_expected = num_max;
- more_or_less = "at most";
- }
- if (exact) {
- more_or_less = "exactly";
- }
- number = (num_expected == 1) ? "" : "s";
- PyErr_Format(PyExc_TypeError,
- #if PY_VERSION_HEX < 0x02050000
- "%s() takes %s %d positional argument%s (%d given)",
- #else
- "%s() takes %s %zd positional argument%s (%zd given)",
- #endif
- func_name, more_or_less, num_expected, number, num_found);
-}
-
-static int __Pyx_ParseOptionalKeywords(
- PyObject *kwds,
- PyObject **argnames[],
- PyObject *kwds2,
- PyObject *values[],
- Py_ssize_t num_pos_args,
- const char* function_name)
-{
- PyObject *key = 0, *value = 0;
- Py_ssize_t pos = 0;
- PyObject*** name;
- PyObject*** first_kw_arg = argnames + num_pos_args;
-
- while (PyDict_Next(kwds, &pos, &key, &value)) {
- name = first_kw_arg;
- while (*name && (**name != key)) name++;
- if (*name) {
- values[name-argnames] = value;
- } else {
- #if PY_MAJOR_VERSION < 3
- if (unlikely(!PyString_CheckExact(key)) && unlikely(!PyString_Check(key))) {
- #else
- if (unlikely(!PyUnicode_CheckExact(key)) && unlikely(!PyUnicode_Check(key))) {
- #endif
- goto invalid_keyword_type;
- } else {
- for (name = first_kw_arg; *name; name++) {
- #if PY_MAJOR_VERSION >= 3
- if (PyUnicode_GET_SIZE(**name) == PyUnicode_GET_SIZE(key) &&
- PyUnicode_Compare(**name, key) == 0) break;
- #else
- if (PyString_GET_SIZE(**name) == PyString_GET_SIZE(key) &&
- _PyString_Eq(**name, key)) break;
- #endif
- }
- if (*name) {
- values[name-argnames] = value;
- } else {
- /* unexpected keyword found */
- for (name=argnames; name != first_kw_arg; name++) {
- if (**name == key) goto arg_passed_twice;
- #if PY_MAJOR_VERSION >= 3
- if (PyUnicode_GET_SIZE(**name) == PyUnicode_GET_SIZE(key) &&
- PyUnicode_Compare(**name, key) == 0) goto arg_passed_twice;
- #else
- if (PyString_GET_SIZE(**name) == PyString_GET_SIZE(key) &&
- _PyString_Eq(**name, key)) goto arg_passed_twice;
- #endif
- }
- if (kwds2) {
- if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad;
- } else {
- goto invalid_keyword;
- }
- }
- }
- }
- }
- return 0;
-arg_passed_twice:
- __Pyx_RaiseDoubleKeywordsError(function_name, **name);
- goto bad;
-invalid_keyword_type:
- PyErr_Format(PyExc_TypeError,
- "%s() keywords must be strings", function_name);
- goto bad;
-invalid_keyword:
- PyErr_Format(PyExc_TypeError,
- #if PY_MAJOR_VERSION < 3
- "%s() got an unexpected keyword argument '%s'",
- function_name, PyString_AsString(key));
- #else
- "%s() got an unexpected keyword argument '%U'",
- function_name, key);
- #endif
-bad:
- return -1;
-}
-
-
-static INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) {
- PyErr_Format(PyExc_ValueError,
- #if PY_VERSION_HEX < 0x02050000
- "need more than %d value%s to unpack", (int)index,
- #else
- "need more than %zd value%s to unpack", index,
- #endif
- (index == 1) ? "" : "s");
-}
-
-static INLINE void __Pyx_RaiseTooManyValuesError(void) {
- PyErr_SetString(PyExc_ValueError, "too many values to unpack");
-}
-
-static PyObject *__Pyx_UnpackItem(PyObject *iter, Py_ssize_t index) {
- PyObject *item;
- if (!(item = PyIter_Next(iter))) {
- if (!PyErr_Occurred()) {
- __Pyx_RaiseNeedMoreValuesError(index);
- }
- }
- return item;
-}
-
-static int __Pyx_EndUnpack(PyObject *iter) {
- PyObject *item;
- if ((item = PyIter_Next(iter))) {
- Py_DECREF(item);
- __Pyx_RaiseTooManyValuesError();
- return -1;
- }
- else if (!PyErr_Occurred())
- return 0;
- else
- return -1;
-}
-
-static INLINE void __Pyx_RaiseNoneNotIterableError(void) {
- PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable");
-}
-
-
-static void __Pyx_UnpackTupleError(PyObject *t, Py_ssize_t index) {
- if (t == Py_None) {
- __Pyx_RaiseNoneNotIterableError();
- } else if (PyTuple_GET_SIZE(t) < index) {
- __Pyx_RaiseNeedMoreValuesError(PyTuple_GET_SIZE(t));
- } else {
- __Pyx_RaiseTooManyValuesError();
- }
-}
-
-static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list) {
- PyObject *__import__ = 0;
- PyObject *empty_list = 0;
- PyObject *module = 0;
- PyObject *global_dict = 0;
- PyObject *empty_dict = 0;
- PyObject *list;
- __import__ = __Pyx_GetAttrString(__pyx_b, "__import__");
- if (!__import__)
- goto bad;
- if (from_list)
- list = from_list;
- else {
- empty_list = PyList_New(0);
- if (!empty_list)
- goto bad;
- list = empty_list;
- }
- global_dict = PyModule_GetDict(__pyx_m);
- if (!global_dict)
- goto bad;
- empty_dict = PyDict_New();
- if (!empty_dict)
- goto bad;
- module = PyObject_CallFunctionObjArgs(__import__,
- name, global_dict, empty_dict, list, NULL);
-bad:
- Py_XDECREF(empty_list);
- Py_XDECREF(__import__);
- Py_XDECREF(empty_dict);
- return module;
-}
-
-static PyObject *__Pyx_GetName(PyObject *dict, PyObject *name) {
- PyObject *result;
- result = PyObject_GetAttr(dict, name);
- if (!result)
- PyErr_SetObject(PyExc_NameError, name);
- return result;
-}
-
-static PyObject *__Pyx_CreateClass(
- PyObject *bases, PyObject *dict, PyObject *name, const char *modname)
-{
- PyObject *py_modname;
- PyObject *result = 0;
-
- #if PY_MAJOR_VERSION < 3
- py_modname = PyString_FromString(modname);
- #else
- py_modname = PyUnicode_FromString(modname);
- #endif
- if (!py_modname)
- goto bad;
- if (PyDict_SetItemString(dict, "__module__", py_modname) < 0)
- goto bad;
- #if PY_MAJOR_VERSION < 3
- result = PyClass_New(bases, dict, name);
- #else
- result = PyObject_CallFunctionObjArgs((PyObject *)&PyType_Type, name, bases, dict, NULL);
- #endif
-bad:
- Py_XDECREF(py_modname);
- return result;
-}
-
-static INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb) {
- PyObject *tmp_type, *tmp_value, *tmp_tb;
- PyThreadState *tstate = PyThreadState_GET();
-
- tmp_type = tstate->curexc_type;
- tmp_value = tstate->curexc_value;
- tmp_tb = tstate->curexc_traceback;
- tstate->curexc_type = type;
- tstate->curexc_value = value;
- tstate->curexc_traceback = tb;
- Py_XDECREF(tmp_type);
- Py_XDECREF(tmp_value);
- Py_XDECREF(tmp_tb);
-}
-
-static INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb) {
- PyThreadState *tstate = PyThreadState_GET();
- *type = tstate->curexc_type;
- *value = tstate->curexc_value;
- *tb = tstate->curexc_traceback;
-
- tstate->curexc_type = 0;
- tstate->curexc_value = 0;
- tstate->curexc_traceback = 0;
-}
-
-
-#if PY_MAJOR_VERSION < 3
-static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb) {
- Py_XINCREF(type);
- Py_XINCREF(value);
- Py_XINCREF(tb);
- /* First, check the traceback argument, replacing None with NULL. */
- if (tb == Py_None) {
- Py_DECREF(tb);
- tb = 0;
- }
- else if (tb != NULL && !PyTraceBack_Check(tb)) {
- PyErr_SetString(PyExc_TypeError,
- "raise: arg 3 must be a traceback or None");
- goto raise_error;
- }
- /* Next, replace a missing value with None */
- if (value == NULL) {
- value = Py_None;
- Py_INCREF(value);
- }
- #if PY_VERSION_HEX < 0x02050000
- if (!PyClass_Check(type))
- #else
- if (!PyType_Check(type))
- #endif
- {
- /* Raising an instance. The value should be a dummy. */
- if (value != Py_None) {
- PyErr_SetString(PyExc_TypeError,
- "instance exception may not have a separate value");
- goto raise_error;
- }
- /* Normalize to raise <class>, <instance> */
- Py_DECREF(value);
- value = type;
- #if PY_VERSION_HEX < 0x02050000
- if (PyInstance_Check(type)) {
- type = (PyObject*) ((PyInstanceObject*)type)->in_class;
- Py_INCREF(type);
- }
- else {
- type = 0;
- PyErr_SetString(PyExc_TypeError,
- "raise: exception must be an old-style class or instance");
- goto raise_error;
- }
- #else
- type = (PyObject*) Py_TYPE(type);
- Py_INCREF(type);
- if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) {
- PyErr_SetString(PyExc_TypeError,
- "raise: exception class must be a subclass of BaseException");
- goto raise_error;
- }
- #endif
- }
-
- __Pyx_ErrRestore(type, value, tb);
- return;
-raise_error:
- Py_XDECREF(value);
- Py_XDECREF(type);
- Py_XDECREF(tb);
- return;
-}
-
-#else /* Python 3+ */
-
-static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb) {
- if (tb == Py_None) {
- tb = 0;
- } else if (tb && !PyTraceBack_Check(tb)) {
- PyErr_SetString(PyExc_TypeError,
- "raise: arg 3 must be a traceback or None");
- goto bad;
- }
- if (value == Py_None)
- value = 0;
-
- if (PyExceptionInstance_Check(type)) {
- if (value) {
- PyErr_SetString(PyExc_TypeError,
- "instance exception may not have a separate value");
- goto bad;
- }
- value = type;
- type = (PyObject*) Py_TYPE(value);
- } else if (!PyExceptionClass_Check(type)) {
- PyErr_SetString(PyExc_TypeError,
- "raise: exception class must be a subclass of BaseException");
- goto bad;
- }
-
- PyErr_SetObject(type, value);
-
- if (tb) {
- PyThreadState *tstate = PyThreadState_GET();
- PyObject* tmp_tb = tstate->curexc_traceback;
- if (tb != tmp_tb) {
- Py_INCREF(tb);
- tstate->curexc_traceback = tb;
- Py_XDECREF(tmp_tb);
- }
- }
-
-bad:
- return;
-}
-#endif
-
-static INLINE unsigned char __Pyx_PyInt_AsUnsignedChar(PyObject* x) {
- const unsigned char neg_one = (unsigned char)-1, const_zero = 0;
- const int is_unsigned = neg_one > const_zero;
- if (sizeof(unsigned char) < sizeof(long)) {
- long val = __Pyx_PyInt_AsLong(x);
- if (unlikely(val != (long)(unsigned char)val)) {
- if (!unlikely(val == -1 && PyErr_Occurred())) {
- PyErr_SetString(PyExc_OverflowError,
- (is_unsigned && unlikely(val < 0)) ?
- "can't convert negative value to unsigned char" :
- "value too large to convert to unsigned char");
- }
- return (unsigned char)-1;
- }
- return (unsigned char)val;
- }
- return (unsigned char)__Pyx_PyInt_AsUnsignedLong(x);
-}
-
-static INLINE unsigned short __Pyx_PyInt_AsUnsignedShort(PyObject* x) {
- const unsigned short neg_one = (unsigned short)-1, const_zero = 0;
- const int is_unsigned = neg_one > const_zero;
- if (sizeof(unsigned short) < sizeof(long)) {
- long val = __Pyx_PyInt_AsLong(x);
- if (unlikely(val != (long)(unsigned short)val)) {
- if (!unlikely(val == -1 && PyErr_Occurred())) {
- PyErr_SetString(PyExc_OverflowError,
- (is_unsigned && unlikely(val < 0)) ?
- "can't convert negative value to unsigned short" :
- "value too large to convert to unsigned short");
- }
- return (unsigned short)-1;
- }
- return (unsigned short)val;
- }
- return (unsigned short)__Pyx_PyInt_AsUnsignedLong(x);
-}
-
-static INLINE unsigned int __Pyx_PyInt_AsUnsignedInt(PyObject* x) {
- const unsigned int neg_one = (unsigned int)-1, const_zero = 0;
- const int is_unsigned = neg_one > const_zero;
- if (sizeof(unsigned int) < sizeof(long)) {
- long val = __Pyx_PyInt_AsLong(x);
- if (unlikely(val != (long)(unsigned int)val)) {
- if (!unlikely(val == -1 && PyErr_Occurred())) {
- PyErr_SetString(PyExc_OverflowError,
- (is_unsigned && unlikely(val < 0)) ?
- "can't convert negative value to unsigned int" :
- "value too large to convert to unsigned int");
- }
- return (unsigned int)-1;
- }
- return (unsigned int)val;
- }
- return (unsigned int)__Pyx_PyInt_AsUnsignedLong(x);
-}
-
-static INLINE char __Pyx_PyInt_AsChar(PyObject* x) {
- const char neg_one = (char)-1, const_zero = 0;
- const int is_unsigned = neg_one > const_zero;
- if (sizeof(char) < sizeof(long)) {
- long val = __Pyx_PyInt_AsLong(x);
- if (unlikely(val != (long)(char)val)) {
- if (!unlikely(val == -1 && PyErr_Occurred())) {
- PyErr_SetString(PyExc_OverflowError,
- (is_unsigned && unlikely(val < 0)) ?
- "can't convert negative value to char" :
- "value too large to convert to char");
- }
- return (char)-1;
- }
- return (char)val;
- }
- return (char)__Pyx_PyInt_AsLong(x);
-}
-
-static INLINE short __Pyx_PyInt_AsShort(PyObject* x) {
- const short neg_one = (short)-1, const_zero = 0;
- const int is_unsigned = neg_one > const_zero;
- if (sizeof(short) < sizeof(long)) {
- long val = __Pyx_PyInt_AsLong(x);
- if (unlikely(val != (long)(short)val)) {
- if (!unlikely(val == -1 && PyErr_Occurred())) {
- PyErr_SetString(PyExc_OverflowError,
- (is_unsigned && unlikely(val < 0)) ?
- "can't convert negative value to short" :
- "value too large to convert to short");
- }
- return (short)-1;
- }
- return (short)val;
- }
- return (short)__Pyx_PyInt_AsLong(x);
-}
-
-static INLINE int __Pyx_PyInt_AsInt(PyObject* x) {
- const int neg_one = (int)-1, const_zero = 0;
- const int is_unsigned = neg_one > const_zero;
- if (sizeof(int) < sizeof(long)) {
- long val = __Pyx_PyInt_AsLong(x);
- if (unlikely(val != (long)(int)val)) {
- if (!unlikely(val == -1 && PyErr_Occurred())) {
- PyErr_SetString(PyExc_OverflowError,
- (is_unsigned && unlikely(val < 0)) ?
- "can't convert negative value to int" :
- "value too large to convert to int");
- }
- return (int)-1;
- }
- return (int)val;
- }
- return (int)__Pyx_PyInt_AsLong(x);
-}
-
-static INLINE signed char __Pyx_PyInt_AsSignedChar(PyObject* x) {
- const signed char neg_one = (signed char)-1, const_zero = 0;
- const int is_unsigned = neg_one > const_zero;
- if (sizeof(signed char) < sizeof(long)) {
- long val = __Pyx_PyInt_AsLong(x);
- if (unlikely(val != (long)(signed char)val)) {
- if (!unlikely(val == -1 && PyErr_Occurred())) {
- PyErr_SetString(PyExc_OverflowError,
- (is_unsigned && unlikely(val < 0)) ?
- "can't convert negative value to signed char" :
- "value too large to convert to signed char");
- }
- return (signed char)-1;
- }
- return (signed char)val;
- }
- return (signed char)__Pyx_PyInt_AsSignedLong(x);
-}
-
-static INLINE signed short __Pyx_PyInt_AsSignedShort(PyObject* x) {
- const signed short neg_one = (signed short)-1, const_zero = 0;
- const int is_unsigned = neg_one > const_zero;
- if (sizeof(signed short) < sizeof(long)) {
- long val = __Pyx_PyInt_AsLong(x);
- if (unlikely(val != (long)(signed short)val)) {
- if (!unlikely(val == -1 && PyErr_Occurred())) {
- PyErr_SetString(PyExc_OverflowError,
- (is_unsigned && unlikely(val < 0)) ?
- "can't convert negative value to signed short" :
- "value too large to convert to signed short");
- }
- return (signed short)-1;
- }
- return (signed short)val;
- }
- return (signed short)__Pyx_PyInt_AsSignedLong(x);
-}
-
-static INLINE signed int __Pyx_PyInt_AsSignedInt(PyObject* x) {
- const signed int neg_one = (signed int)-1, const_zero = 0;
- const int is_unsigned = neg_one > const_zero;
- if (sizeof(signed int) < sizeof(long)) {
- long val = __Pyx_PyInt_AsLong(x);
- if (unlikely(val != (long)(signed int)val)) {
- if (!unlikely(val == -1 && PyErr_Occurred())) {
- PyErr_SetString(PyExc_OverflowError,
- (is_unsigned && unlikely(val < 0)) ?
- "can't convert negative value to signed int" :
- "value too large to convert to signed int");
- }
- return (signed int)-1;
- }
- return (signed int)val;
- }
- return (signed int)__Pyx_PyInt_AsSignedLong(x);
-}
-
-static INLINE unsigned long __Pyx_PyInt_AsUnsignedLong(PyObject* x) {
- const unsigned long neg_one = (unsigned long)-1, const_zero = 0;
- const int is_unsigned = neg_one > const_zero;
-#if PY_VERSION_HEX < 0x03000000
- if (likely(PyInt_Check(x))) {
- long val = PyInt_AS_LONG(x);
- if (is_unsigned && unlikely(val < 0)) {
- PyErr_SetString(PyExc_OverflowError,
- "can't convert negative value to unsigned long");
- return (unsigned long)-1;
- }
- return (unsigned long)val;
- } else
-#endif
- if (likely(PyLong_Check(x))) {
- if (is_unsigned) {
- if (unlikely(Py_SIZE(x) < 0)) {
- PyErr_SetString(PyExc_OverflowError,
- "can't convert negative value to unsigned long");
- return (unsigned long)-1;
- }
- return PyLong_AsUnsignedLong(x);
- } else {
- return PyLong_AsLong(x);
- }
- } else {
- unsigned long val;
- PyObject *tmp = __Pyx_PyNumber_Int(x);
- if (!tmp) return (unsigned long)-1;
- val = __Pyx_PyInt_AsUnsignedLong(tmp);
- Py_DECREF(tmp);
- return val;
- }
-}
-
-static INLINE unsigned PY_LONG_LONG __Pyx_PyInt_AsUnsignedLongLong(PyObject* x) {
- const unsigned PY_LONG_LONG neg_one = (unsigned PY_LONG_LONG)-1, const_zero = 0;
- const int is_unsigned = neg_one > const_zero;
-#if PY_VERSION_HEX < 0x03000000
- if (likely(PyInt_Check(x))) {
- long val = PyInt_AS_LONG(x);
- if (is_unsigned && unlikely(val < 0)) {
- PyErr_SetString(PyExc_OverflowError,
- "can't convert negative value to unsigned PY_LONG_LONG");
- return (unsigned PY_LONG_LONG)-1;
- }
- return (unsigned PY_LONG_LONG)val;
- } else
-#endif
- if (likely(PyLong_Check(x))) {
- if (is_unsigned) {
- if (unlikely(Py_SIZE(x) < 0)) {
- PyErr_SetString(PyExc_OverflowError,
- "can't convert negative value to unsigned PY_LONG_LONG");
- return (unsigned PY_LONG_LONG)-1;
- }
- return PyLong_AsUnsignedLongLong(x);
- } else {
- return PyLong_AsLongLong(x);
- }
- } else {
- unsigned PY_LONG_LONG val;
- PyObject *tmp = __Pyx_PyNumber_Int(x);
- if (!tmp) return (unsigned PY_LONG_LONG)-1;
- val = __Pyx_PyInt_AsUnsignedLongLong(tmp);
- Py_DECREF(tmp);
- return val;
- }
-}
-
-static INLINE long __Pyx_PyInt_AsLong(PyObject* x) {
- const long neg_one = (long)-1, const_zero = 0;
- const int is_unsigned = neg_one > const_zero;
-#if PY_VERSION_HEX < 0x03000000
- if (likely(PyInt_Check(x))) {
- long val = PyInt_AS_LONG(x);
- if (is_unsigned && unlikely(val < 0)) {
- PyErr_SetString(PyExc_OverflowError,
- "can't convert negative value to long");
- return (long)-1;
- }
- return (long)val;
- } else
-#endif
- if (likely(PyLong_Check(x))) {
- if (is_unsigned) {
- if (unlikely(Py_SIZE(x) < 0)) {
- PyErr_SetString(PyExc_OverflowError,
- "can't convert negative value to long");
- return (long)-1;
- }
- return PyLong_AsUnsignedLong(x);
- } else {
- return PyLong_AsLong(x);
- }
- } else {
- long val;
- PyObject *tmp = __Pyx_PyNumber_Int(x);
- if (!tmp) return (long)-1;
- val = __Pyx_PyInt_AsLong(tmp);
- Py_DECREF(tmp);
- return val;
- }
-}
-
-static INLINE PY_LONG_LONG __Pyx_PyInt_AsLongLong(PyObject* x) {
- const PY_LONG_LONG neg_one = (PY_LONG_LONG)-1, const_zero = 0;
- const int is_unsigned = neg_one > const_zero;
-#if PY_VERSION_HEX < 0x03000000
- if (likely(PyInt_Check(x))) {
- long val = PyInt_AS_LONG(x);
- if (is_unsigned && unlikely(val < 0)) {
- PyErr_SetString(PyExc_OverflowError,
- "can't convert negative value to PY_LONG_LONG");
- return (PY_LONG_LONG)-1;
- }
- return (PY_LONG_LONG)val;
- } else
-#endif
- if (likely(PyLong_Check(x))) {
- if (is_unsigned) {
- if (unlikely(Py_SIZE(x) < 0)) {
- PyErr_SetString(PyExc_OverflowError,
- "can't convert negative value to PY_LONG_LONG");
- return (PY_LONG_LONG)-1;
- }
- return PyLong_AsUnsignedLongLong(x);
- } else {
- return PyLong_AsLongLong(x);
- }
- } else {
- PY_LONG_LONG val;
- PyObject *tmp = __Pyx_PyNumber_Int(x);
- if (!tmp) return (PY_LONG_LONG)-1;
- val = __Pyx_PyInt_AsLongLong(tmp);
- Py_DECREF(tmp);
- return val;
- }
-}
-
-static INLINE signed long __Pyx_PyInt_AsSignedLong(PyObject* x) {
- const signed long neg_one = (signed long)-1, const_zero = 0;
- const int is_unsigned = neg_one > const_zero;
-#if PY_VERSION_HEX < 0x03000000
- if (likely(PyInt_Check(x))) {
- long val = PyInt_AS_LONG(x);
- if (is_unsigned && unlikely(val < 0)) {
- PyErr_SetString(PyExc_OverflowError,
- "can't convert negative value to signed long");
- return (signed long)-1;
- }
- return (signed long)val;
- } else
-#endif
- if (likely(PyLong_Check(x))) {
- if (is_unsigned) {
- if (unlikely(Py_SIZE(x) < 0)) {
- PyErr_SetString(PyExc_OverflowError,
- "can't convert negative value to signed long");
- return (signed long)-1;
- }
- return PyLong_AsUnsignedLong(x);
- } else {
- return PyLong_AsLong(x);
- }
- } else {
- signed long val;
- PyObject *tmp = __Pyx_PyNumber_Int(x);
- if (!tmp) return (signed long)-1;
- val = __Pyx_PyInt_AsSignedLong(tmp);
- Py_DECREF(tmp);
- return val;
- }
-}
-
-static INLINE signed PY_LONG_LONG __Pyx_PyInt_AsSignedLongLong(PyObject* x) {
- const signed PY_LONG_LONG neg_one = (signed PY_LONG_LONG)-1, const_zero = 0;
- const int is_unsigned = neg_one > const_zero;
-#if PY_VERSION_HEX < 0x03000000
- if (likely(PyInt_Check(x))) {
- long val = PyInt_AS_LONG(x);
- if (is_unsigned && unlikely(val < 0)) {
- PyErr_SetString(PyExc_OverflowError,
- "can't convert negative value to signed PY_LONG_LONG");
- return (signed PY_LONG_LONG)-1;
- }
- return (signed PY_LONG_LONG)val;
- } else
-#endif
- if (likely(PyLong_Check(x))) {
- if (is_unsigned) {
- if (unlikely(Py_SIZE(x) < 0)) {
- PyErr_SetString(PyExc_OverflowError,
- "can't convert negative value to signed PY_LONG_LONG");
- return (signed PY_LONG_LONG)-1;
- }
- return PyLong_AsUnsignedLongLong(x);
- } else {
- return PyLong_AsLongLong(x);
- }
- } else {
- signed PY_LONG_LONG val;
- PyObject *tmp = __Pyx_PyNumber_Int(x);
- if (!tmp) return (signed PY_LONG_LONG)-1;
- val = __Pyx_PyInt_AsSignedLongLong(tmp);
- Py_DECREF(tmp);
- return val;
- }
-}
-
-static void __Pyx_WriteUnraisable(const char *name) {
- PyObject *old_exc, *old_val, *old_tb;
- PyObject *ctx;
- __Pyx_ErrFetch(&old_exc, &old_val, &old_tb);
- #if PY_MAJOR_VERSION < 3
- ctx = PyString_FromString(name);
- #else
- ctx = PyUnicode_FromString(name);
- #endif
- __Pyx_ErrRestore(old_exc, old_val, old_tb);
- if (!ctx) {
- PyErr_WriteUnraisable(Py_None);
- } else {
- PyErr_WriteUnraisable(ctx);
- Py_DECREF(ctx);
- }
-}
-
-static int __Pyx_SetVtable(PyObject *dict, void *vtable) {
-#if PY_VERSION_HEX < 0x03010000
- PyObject *ob = PyCObject_FromVoidPtr(vtable, 0);
-#else
- PyObject *ob = PyCapsule_New(vtable, 0, 0);
-#endif
- if (!ob)
- goto bad;
- if (PyDict_SetItemString(dict, "__pyx_vtable__", ob) < 0)
- goto bad;
- Py_DECREF(ob);
- return 0;
-bad:
- Py_XDECREF(ob);
- return -1;
-}
-
-#include "compile.h"
-#include "frameobject.h"
-#include "traceback.h"
-
-static void __Pyx_AddTraceback(const char *funcname) {
- PyObject *py_srcfile = 0;
- PyObject *py_funcname = 0;
- PyObject *py_globals = 0;
- PyCodeObject *py_code = 0;
- PyFrameObject *py_frame = 0;
-
- #if PY_MAJOR_VERSION < 3
- py_srcfile = PyString_FromString(__pyx_filename);
- #else
- py_srcfile = PyUnicode_FromString(__pyx_filename);
- #endif
- if (!py_srcfile) goto bad;
- if (__pyx_clineno) {
- #if PY_MAJOR_VERSION < 3
- py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, __pyx_clineno);
- #else
- py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, __pyx_clineno);
- #endif
- }
- else {
- #if PY_MAJOR_VERSION < 3
- py_funcname = PyString_FromString(funcname);
- #else
- py_funcname = PyUnicode_FromString(funcname);
- #endif
- }
- if (!py_funcname) goto bad;
- py_globals = PyModule_GetDict(__pyx_m);
- if (!py_globals) goto bad;
- py_code = PyCode_New(
- 0, /*int argcount,*/
- #if PY_MAJOR_VERSION >= 3
- 0, /*int kwonlyargcount,*/
- #endif
- 0, /*int nlocals,*/
- 0, /*int stacksize,*/
- 0, /*int flags,*/
- __pyx_empty_bytes, /*PyObject *code,*/
- __pyx_empty_tuple, /*PyObject *consts,*/
- __pyx_empty_tuple, /*PyObject *names,*/
- __pyx_empty_tuple, /*PyObject *varnames,*/
- __pyx_empty_tuple, /*PyObject *freevars,*/
- __pyx_empty_tuple, /*PyObject *cellvars,*/
- py_srcfile, /*PyObject *filename,*/
- py_funcname, /*PyObject *name,*/
- __pyx_lineno, /*int firstlineno,*/
- __pyx_empty_bytes /*PyObject *lnotab*/
- );
- if (!py_code) goto bad;
- py_frame = PyFrame_New(
- PyThreadState_GET(), /*PyThreadState *tstate,*/
- py_code, /*PyCodeObject *code,*/
- py_globals, /*PyObject *globals,*/
- 0 /*PyObject *locals*/
- );
- if (!py_frame) goto bad;
- py_frame->f_lineno = __pyx_lineno;
- PyTraceBack_Here(py_frame);
-bad:
- Py_XDECREF(py_srcfile);
- Py_XDECREF(py_funcname);
- Py_XDECREF(py_code);
- Py_XDECREF(py_frame);
-}
-
-static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) {
- while (t->p) {
- #if PY_MAJOR_VERSION < 3
- if (t->is_unicode) {
- *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL);
- } else if (t->intern) {
- *t->p = PyString_InternFromString(t->s);
- } else {
- *t->p = PyString_FromStringAndSize(t->s, t->n - 1);
- }
- #else /* Python 3+ has unicode identifiers */
- if (t->is_unicode | t->is_str) {
- if (t->intern) {
- *t->p = PyUnicode_InternFromString(t->s);
- } else if (t->encoding) {
- *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL);
- } else {
- *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1);
- }
- } else {
- *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1);
- }
- #endif
- if (!*t->p)
- return -1;
- ++t;
- }
- return 0;
-}
-
-/* Type Conversion Functions */
-
-static INLINE int __Pyx_PyObject_IsTrue(PyObject* x) {
- if (x == Py_True) return 1;
- else if ((x == Py_False) | (x == Py_None)) return 0;
- else return PyObject_IsTrue(x);
-}
-
-static INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x) {
- PyNumberMethods *m;
- const char *name = NULL;
- PyObject *res = NULL;
-#if PY_VERSION_HEX < 0x03000000
- if (PyInt_Check(x) || PyLong_Check(x))
-#else
- if (PyLong_Check(x))
-#endif
- return Py_INCREF(x), x;
- m = Py_TYPE(x)->tp_as_number;
-#if PY_VERSION_HEX < 0x03000000
- if (m && m->nb_int) {
- name = "int";
- res = PyNumber_Int(x);
- }
- else if (m && m->nb_long) {
- name = "long";
- res = PyNumber_Long(x);
- }
-#else
- if (m && m->nb_int) {
- name = "int";
- res = PyNumber_Long(x);
- }
-#endif
- if (res) {
-#if PY_VERSION_HEX < 0x03000000
- if (!PyInt_Check(res) && !PyLong_Check(res)) {
-#else
- if (!PyLong_Check(res)) {
-#endif
- PyErr_Format(PyExc_TypeError,
- "__%s__ returned non-%s (type %.200s)",
- name, name, Py_TYPE(res)->tp_name);
- Py_DECREF(res);
- return NULL;
- }
- }
- else if (!PyErr_Occurred()) {
- PyErr_SetString(PyExc_TypeError,
- "an integer is required");
- }
- return res;
-}
-
-static INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) {
- Py_ssize_t ival;
- PyObject* x = PyNumber_Index(b);
- if (!x) return -1;
- ival = PyInt_AsSsize_t(x);
- Py_DECREF(x);
- return ival;
-}
-
-static INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) {
-#if PY_VERSION_HEX < 0x02050000
- if (ival <= LONG_MAX)
- return PyInt_FromLong((long)ival);
- else {
- unsigned char *bytes = (unsigned char *) &ival;
- int one = 1; int little = (int)*(unsigned char*)&one;
- return _PyLong_FromByteArray(bytes, sizeof(size_t), little, 0);
- }
-#else
- return PyInt_FromSize_t(ival);
-#endif
-}
-
-static INLINE size_t __Pyx_PyInt_AsSize_t(PyObject* x) {
- unsigned PY_LONG_LONG val = __Pyx_PyInt_AsUnsignedLongLong(x);
- if (unlikely(val == (unsigned PY_LONG_LONG)-1 && PyErr_Occurred())) {
- return (size_t)-1;
- } else if (unlikely(val != (unsigned PY_LONG_LONG)(size_t)val)) {
- PyErr_SetString(PyExc_OverflowError,
- "value too large to convert to size_t");
- return (size_t)-1;
- }
- return (size_t)val;
-}
-
-
-#endif /* Py_PYTHON_H */
diff --git a/sphinx/pycode/pgen2/parse.py b/sphinx/pycode/pgen2/parse.py
deleted file mode 100644
index 660a47e68..000000000
--- a/sphinx/pycode/pgen2/parse.py
+++ /dev/null
@@ -1,206 +0,0 @@
-# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
-# Licensed to PSF under a Contributor Agreement.
-
-"""Parser engine for the grammar tables generated by pgen.
-
-The grammar table must be loaded first.
-
-See Parser/parser.c in the Python distribution for additional info on
-how this parsing engine works.
-
-"""
-
-# Local imports
-from sphinx.pycode.pgen2 import token
-
-if False:
- # For type annotation
- from typing import Any, List, Set, Tuple # NOQA
-
-class ParseError(Exception):
- """Exception to signal the parser is stuck."""
-
- def __init__(self, msg, type, value, context):
- Exception.__init__(self, "%s: type=%r, value=%r, context=%r" %
- (msg, type, value, context))
- self.msg = msg
- self.type = type
- self.value = value
- self.context = context
-
-class Parser(object):
- """Parser engine.
-
- The proper usage sequence is:
-
- p = Parser(grammar, [converter]) # create instance
- p.setup([start]) # prepare for parsing
- <for each input token>:
- if p.addtoken(...): # parse a token; may raise ParseError
- break
- root = p.rootnode # root of abstract syntax tree
-
- A Parser instance may be reused by calling setup() repeatedly.
-
- A Parser instance contains state pertaining to the current token
- sequence, and should not be used concurrently by different threads
- to parse separate token sequences.
-
- See driver.py for how to get input tokens by tokenizing a file or
- string.
-
- Parsing is complete when addtoken() returns True; the root of the
- abstract syntax tree can then be retrieved from the rootnode
- instance variable. When a syntax error occurs, addtoken() raises
- the ParseError exception. There is no error recovery; the parser
- cannot be used after a syntax error was reported (but it can be
- reinitialized by calling setup()).
-
- """
-
- def __init__(self, grammar, convert=None):
- """Constructor.
-
- The grammar argument is a grammar.Grammar instance; see the
- grammar module for more information.
-
- The parser is not ready yet for parsing; you must call the
- setup() method to get it started.
-
- The optional convert argument is a function mapping concrete
- syntax tree nodes to abstract syntax tree nodes. If not
- given, no conversion is done and the syntax tree produced is
- the concrete syntax tree. If given, it must be a function of
- two arguments, the first being the grammar (a grammar.Grammar
- instance), and the second being the concrete syntax tree node
- to be converted. The syntax tree is converted from the bottom
- up.
-
- A concrete syntax tree node is a (type, value, context, nodes)
- tuple, where type is the node type (a token or symbol number),
- value is None for symbols and a string for tokens, context is
- None or an opaque value used for error reporting (typically a
- (lineno, offset) pair), and nodes is a list of children for
- symbols, and None for tokens.
-
- An abstract syntax tree node may be anything; this is entirely
- up to the converter function.
-
- """
- self.grammar = grammar
- self.convert = convert or (lambda grammar, node: node)
-
- def setup(self, start=None):
- """Prepare for parsing.
-
- This *must* be called before starting to parse.
-
- The optional argument is an alternative start symbol; it
- defaults to the grammar's start symbol.
-
- You can use a Parser instance to parse any number of programs;
- each time you call setup() the parser is reset to an initial
- state determined by the (implicit or explicit) start symbol.
-
- """
- if start is None:
- start = self.grammar.start
- # Each stack entry is a tuple: (dfa, state, node).
- # A node is a tuple: (type, value, context, children),
- # where children is a list of nodes or None, and context may be None.
- newnode = (start, None, None, []) # type: Tuple[unicode, unicode, unicode, List]
- stackentry = (self.grammar.dfas[start], 0, newnode)
- self.stack = [stackentry]
- self.rootnode = None # type: Any
- self.used_names = set() # type: Set[unicode]
- # Aliased to self.rootnode.used_names in pop()
-
- def addtoken(self, type, value, context):
- """Add a token; return True iff this is the end of the program."""
- # Map from token to label
- ilabel = self.classify(type, value, context)
- # Loop until the token is shifted; may raise exceptions
- while True:
- dfa, state, node = self.stack[-1]
- states, first = dfa
- arcs = states[state]
- # Look for a state with this label
- for i, newstate in arcs:
- t, v = self.grammar.labels[i]
- if ilabel == i:
- # Look it up in the list of labels
- assert t < 256
- # Shift a token; we're done with it
- self.shift(type, value, newstate, context)
- # Pop while we are in an accept-only state
- state = newstate
- while states[state] == [(0, state)]:
- self.pop()
- if not self.stack:
- # Done parsing!
- return True
- dfa, state, node = self.stack[-1]
- states, first = dfa
- # Done with this token
- return False
- elif t >= 256:
- # See if it's a symbol and if we're in its first set
- itsdfa = self.grammar.dfas[t]
- itsstates, itsfirst = itsdfa
- if ilabel in itsfirst:
- # Push a symbol
- self.push(t, self.grammar.dfas[t], newstate, context)
- break # To continue the outer while loop
- else:
- if (0, state) in arcs:
- # An accepting state, pop it and try something else
- self.pop()
- if not self.stack:
- # Done parsing, but another token is input
- raise ParseError("too much input",
- type, value, context)
- else:
- # No success finding a transition
- raise ParseError("bad input", type, value, context)
-
- def classify(self, type, value, context):
- """Turn a token into a label. (Internal)"""
- if type == token.NAME:
- # Keep a listing of all used names
- self.used_names.add(value)
- # Check for reserved words
- ilabel = self.grammar.keywords.get(value)
- if ilabel is not None:
- return ilabel
- ilabel = self.grammar.tokens.get(type)
- if ilabel is None:
- raise ParseError("bad token", type, value, context)
- return ilabel
-
- def shift(self, type, value, newstate, context):
- """Shift a token. (Internal)"""
- dfa, state, node = self.stack[-1]
- newnode = (type, value, context, None) # type: Tuple[unicode, unicode, unicode, List]
- newnode = self.convert(self.grammar, newnode)
- if newnode is not None:
- node[-1].append(newnode)
- self.stack[-1] = (dfa, newstate, node)
-
- def push(self, type, newdfa, newstate, context):
- """Push a nonterminal. (Internal)"""
- dfa, state, node = self.stack[-1]
- newnode = (type, None, context, []) # type: Tuple[unicode, unicode, unicode, List]
- self.stack[-1] = (dfa, newstate, node)
- self.stack.append((newdfa, 0, newnode))
-
- def pop(self):
- """Pop a nonterminal. (Internal)"""
- popdfa, popstate, popnode = self.stack.pop()
- newnode = self.convert(self.grammar, popnode)
- if newnode is not None:
- if self.stack:
- dfa, state, node = self.stack[-1]
- node[-1].append(newnode)
- else:
- self.rootnode = newnode
- self.rootnode.used_names = self.used_names
diff --git a/sphinx/pycode/pgen2/parse.pyx b/sphinx/pycode/pgen2/parse.pyx
deleted file mode 100644
index 9c97a4539..000000000
--- a/sphinx/pycode/pgen2/parse.pyx
+++ /dev/null
@@ -1,165 +0,0 @@
-# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
-# Licensed to PSF under a Contributor Agreement.
-
-# Adapted from parse.py to be compiled with Cython by Georg Brandl.
-
-"""Parser engine for the grammar tables generated by pgen.
-
-The grammar table must be loaded first.
-
-See Parser/parser.c in the Python distribution for additional info on
-how this parsing engine works.
-
-"""
-
-from sphinx.pycode.nodes import Node, Leaf
-
-DEF NAME = 1
-
-class ParseError(Exception):
- """Exception to signal the parser is stuck."""
-
- def __init__(self, msg, type, value, context):
- Exception.__init__(self, "%s: type=%r, value=%r, context=%r" %
- (msg, type, value, context))
- self.msg = msg
- self.type = type
- self.value = value
- self.context = context
-
-
-cdef class Parser:
- cdef public object grammar
- cdef public object rootnode
- cdef public list stack
- cdef public set used_names
- cdef int _grammar_start
- cdef list _grammar_labels
- cdef dict _grammar_dfas
- cdef dict _grammar_keywords
- cdef dict _grammar_tokens
- cdef dict _grammar_number2symbol
-
- def __init__(self, grammar, convert=None):
- self.grammar = grammar
- #self.convert = convert or noconvert
-
- self._grammar_dfas = grammar.dfas
- self._grammar_labels = grammar.labels
- self._grammar_keywords = grammar.keywords
- self._grammar_tokens = grammar.tokens
- self._grammar_number2symbol = grammar.number2symbol
- self._grammar_start = grammar.start
-
- def setup(self, start=None):
- if start is None:
- start = self._grammar_start
- # Each stack entry is a tuple: (dfa, state, node).
- # A node is a tuple: (type, value, context, children),
- # where children is a list of nodes or None, and context may be None.
- newnode = (start, None, None, [])
- stackentry = (self._grammar_dfas[start], 0, newnode)
- self.stack = [stackentry]
- self.rootnode = None
- self.used_names = set() # Aliased to self.rootnode.used_names in pop()
-
- def addtoken(self, int type, value, context):
- """Add a token; return True iff this is the end of the program."""
- cdef int ilabel, i, t, state, newstate
- # Map from token to label
- ilabel = self.classify(type, value, context)
- # Loop until the token is shifted; may raise exceptions
- while True:
- dfa, state, node = self.stack[-1]
- states, first = dfa
- arcs = states[state]
- # Look for a state with this label
- for i, newstate in arcs:
- t, v = self._grammar_labels[i]
- if ilabel == i:
- # Look it up in the list of labels
- ## assert t < 256
- # Shift a token; we're done with it
- self.shift(type, value, newstate, context)
- # Pop while we are in an accept-only state
- state = newstate
- while states[state] == [(0, state)]:
- self.pop()
- if not self.stack:
- # Done parsing!
- return True
- dfa, state, node = self.stack[-1]
- states, first = dfa
- # Done with this token
- return False
- elif t >= 256:
- # See if it's a symbol and if we're in its first set
- itsdfa = self._grammar_dfas[t]
- itsstates, itsfirst = itsdfa
- if ilabel in itsfirst:
- # Push a symbol
- self.push(t, itsdfa, newstate, context)
- break # To continue the outer while loop
- else:
- if (0, state) in arcs:
- # An accepting state, pop it and try something else
- self.pop()
- if not self.stack:
- # Done parsing, but another token is input
- raise ParseError("too much input",
- type, value, context)
- else:
- # No success finding a transition
- raise ParseError("bad input", type, value, context)
-
- cdef int classify(self, int type, value, context):
- """Turn a token into a label. (Internal)"""
- if type == NAME:
- # Keep a listing of all used names
- self.used_names.add(value)
- # Check for reserved words
- if value in self._grammar_keywords:
- return self._grammar_keywords[value]
- if type not in self._grammar_tokens:
- raise ParseError("bad token", type, value, context)
- return self._grammar_tokens[type]
-
- cdef void shift(self, type, value, newstate, context):
- """Shift a token. (Internal)"""
- cdef tuple node
- dfa, state, node = self.stack[-1]
- newnode = (type, value, context, None)
- newnode = self.convert(newnode)
- if newnode is not None:
- node[-1].append(newnode)
- self.stack[-1] = (dfa, newstate, node)
-
- cdef void push(self, type, newdfa, newstate, context):
- """Push a nonterminal. (Internal)"""
- dfa, state, node = self.stack[-1]
- newnode = (type, None, context, [])
- self.stack[-1] = (dfa, newstate, node)
- self.stack.append((newdfa, 0, newnode))
-
- cdef void pop(self):
- """Pop a nonterminal. (Internal)"""
- popdfa, popstate, popnode = self.stack.pop()
- newnode = self.convert(popnode)
- if newnode is not None:
- if self.stack:
- dfa, state, node = self.stack[-1]
- node[-1].append(newnode)
- else:
- self.rootnode = newnode
- self.rootnode.used_names = self.used_names
-
- cdef convert(self, tuple raw_node):
- type, value, context, children = raw_node
- if children or type in self._grammar_number2symbol:
- # If there's exactly one child, return that child instead of
- # creating a new node.
- if len(children) == 1:
- return children[0]
- return Node(type, children, context=context)
- else:
- return Leaf(type, value, context=context)
diff --git a/sphinx/pycode/pgen2/pgen.py b/sphinx/pycode/pgen2/pgen.py
deleted file mode 100644
index 8d9cc786a..000000000
--- a/sphinx/pycode/pgen2/pgen.py
+++ /dev/null
@@ -1,403 +0,0 @@
-# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
-# Licensed to PSF under a Contributor Agreement.
-
-from __future__ import print_function
-
-from six import iteritems
-from collections import OrderedDict
-
-# Pgen imports
-from sphinx.pycode.pgen2 import grammar, token, tokenize
-
-if False:
- # For type annotation
- from typing import Any, Dict, List, Tuple # NOQA
-
-
-class PgenGrammar(grammar.Grammar):
- pass
-
-class ParserGenerator(object):
-
- def __init__(self, filename, stream=None):
- close_stream = None
- if stream is None:
- stream = open(filename)
- close_stream = stream.close
- self.filename = filename
- self.stream = stream
- self.generator = tokenize.generate_tokens(stream.readline)
- self.gettoken() # Initialize lookahead
- self.dfas, self.startsymbol = self.parse()
- if close_stream is not None:
- close_stream()
- self.first = {} # type: Dict[unicode, List[unicode]]
- # map from symbol name to set of tokens
- self.addfirstsets()
-
- def make_grammar(self):
- c = PgenGrammar()
- names = list(self.dfas.keys())
- names.sort()
- names.remove(self.startsymbol)
- names.insert(0, self.startsymbol)
- for name in names:
- i = 256 + len(c.symbol2number)
- c.symbol2number[name] = i
- c.number2symbol[i] = name
- for name in names:
- dfa = self.dfas[name]
- states = [] # type: List[List[Tuple[int, int]]]
- for state in dfa:
- arcs = []
- for label, next in iteritems(state.arcs):
- arcs.append((self.make_label(c, label), dfa.index(next)))
- if state.isfinal:
- arcs.append((0, dfa.index(state)))
- states.append(arcs)
- c.states.append(states)
- c.dfas[c.symbol2number[name]] = (states, self.make_first(c, name))
- c.start = c.symbol2number[self.startsymbol]
- return c
-
- def make_first(self, c, name):
- rawfirst = self.first[name]
- first = {}
- for label in sorted(rawfirst):
- ilabel = self.make_label(c, label)
- ##assert ilabel not in first # X X X failed on <> ... !=
- first[ilabel] = 1
- return first
-
- def make_label(self, c, label):
- # X X X Maybe this should be a method on a subclass of converter?
- ilabel = len(c.labels)
- if label[0].isalpha():
- # Either a symbol name or a named token
- if label in c.symbol2number:
- # A symbol name (a non-terminal)
- if label in c.symbol2label:
- return c.symbol2label[label]
- else:
- c.labels.append((c.symbol2number[label], None))
- c.symbol2label[label] = ilabel
- return ilabel
- else:
- # A named token (NAME, NUMBER, STRING)
- itoken = getattr(token, label, None)
- assert isinstance(itoken, int), label
- assert itoken in token.tok_name, label
- if itoken in c.tokens:
- return c.tokens[itoken]
- else:
- c.labels.append((itoken, None))
- c.tokens[itoken] = ilabel
- return ilabel
- else:
- # Either a keyword or an operator
- assert label[0] in ('"', "'"), label
- value = eval(label)
- if value[0].isalpha():
- # A keyword
- if value in c.keywords:
- return c.keywords[value]
- else:
- c.labels.append((token.NAME, value))
- c.keywords[value] = ilabel
- return ilabel
- else:
- # An operator (any non-numeric token)
- itoken = grammar.opmap[value] # Fails if unknown token
- if itoken in c.tokens:
- return c.tokens[itoken]
- else:
- c.labels.append((itoken, None))
- c.tokens[itoken] = ilabel
- return ilabel
-
- def addfirstsets(self):
- names = list(self.dfas.keys())
- names.sort()
- for name in names:
- if name not in self.first:
- self.calcfirst(name)
- #print name, self.first[name].keys()
-
- def calcfirst(self, name):
- dfa = self.dfas[name]
- self.first[name] = None # dummy to detect left recursion
- state = dfa[0]
- totalset = {} # type: Dict[unicode, int]
- overlapcheck = {}
- for label, next in iteritems(state.arcs):
- if label in self.dfas:
- if label in self.first:
- fset = self.first[label]
- if fset is None:
- raise ValueError("recursion for rule %r" % name)
- else:
- self.calcfirst(label)
- fset = self.first[label]
- totalset.update(fset)
- overlapcheck[label] = fset
- else:
- totalset[label] = 1
- overlapcheck[label] = {label: 1}
- inverse = {} # type: Dict[unicode, unicode]
- for label, itsfirst in sorted(overlapcheck.items()):
- for symbol in sorted(itsfirst):
- if symbol in inverse:
- raise ValueError("rule %s is ambiguous; %s is in the"
- " first sets of %s as well as %s" %
- (name, symbol, label, inverse[symbol]))
- inverse[symbol] = label
- self.first[name] = totalset
-
- def parse(self):
- dfas = {}
- startsymbol = None
- # MSTART: (NEWLINE | RULE)* ENDMARKER
- while self.type != token.ENDMARKER:
- while self.type == token.NEWLINE:
- self.gettoken()
- # RULE: NAME ':' RHS NEWLINE
- name = self.expect(token.NAME)
- self.expect(token.OP, ":")
- a, z = self.parse_rhs()
- self.expect(token.NEWLINE)
- #self.dump_nfa(name, a, z)
- dfa = self.make_dfa(a, z)
- #self.dump_dfa(name, dfa)
- #oldlen = len(dfa)
- self.simplify_dfa(dfa)
- #newlen = len(dfa)
- dfas[name] = dfa
- #print name, oldlen, newlen
- if startsymbol is None:
- startsymbol = name
- return dfas, startsymbol
-
- def make_dfa(self, start, finish):
- # To turn an NFA into a DFA, we define the states of the DFA
- # to correspond to *sets* of states of the NFA. Then do some
- # state reduction. Let's represent sets as dicts with 1 for
- # values.
- assert isinstance(start, NFAState)
- assert isinstance(finish, NFAState)
- def closure(state):
- base = {} # type: Dict
- addclosure(state, base)
- return base
- def addclosure(state, base):
- assert isinstance(state, NFAState)
- if state in base:
- return
- base[state] = 1
- for label, next in state.arcs:
- if label is None:
- addclosure(next, base)
- states = [DFAState(closure(start), finish)]
- for state in states: # NB states grows while we're iterating
- arcs = {} # type: Dict[unicode, Dict]
- for nfastate in state.nfaset:
- for label, next in nfastate.arcs:
- if label is not None:
- addclosure(next, arcs.setdefault(label, {}))
- for label, nfaset in iteritems(arcs):
- for st in states:
- if st.nfaset == nfaset:
- break
- else:
- st = DFAState(nfaset, finish)
- states.append(st)
- state.addarc(st, label)
- return states # List of DFAState instances; first one is start
-
- def dump_nfa(self, name, start, finish):
- print("Dump of NFA for", name)
- todo = [start]
- for i, state in enumerate(todo):
- print(" State", i, state is finish and "(final)" or "")
- for label, next in state.arcs:
- if next in todo:
- j = todo.index(next)
- else:
- j = len(todo)
- todo.append(next)
- if label is None:
- print(" -> %d" % j)
- else:
- print(" %s -> %d" % (label, j))
-
- def dump_dfa(self, name, dfa):
- print("Dump of DFA for", name)
- for i, state in enumerate(dfa):
- print(" State", i, state.isfinal and "(final)" or "")
- for label, next in iteritems(state.arcs):
- print(" %s -> %d" % (label, dfa.index(next)))
-
- def simplify_dfa(self, dfa):
- # This is not theoretically optimal, but works well enough.
- # Algorithm: repeatedly look for two states that have the same
- # set of arcs (same labels pointing to the same nodes) and
- # unify them, until things stop changing.
-
- # dfa is a list of DFAState instances
- changes = True
- while changes:
- changes = False
- for i, state_i in enumerate(dfa):
- for j in range(i+1, len(dfa)):
- state_j = dfa[j]
- if state_i == state_j:
- #print " unify", i, j
- del dfa[j]
- for state in dfa:
- state.unifystate(state_j, state_i)
- changes = True
- break
-
- def parse_rhs(self):
- # RHS: ALT ('|' ALT)*
- a, z = self.parse_alt()
- if self.value != "|":
- return a, z
- else:
- aa = NFAState()
- zz = NFAState()
- aa.addarc(a)
- z.addarc(zz)
- while self.value == "|":
- self.gettoken()
- a, z = self.parse_alt()
- aa.addarc(a)
- z.addarc(zz)
- return aa, zz
-
- def parse_alt(self):
- # ALT: ITEM+
- a, b = self.parse_item()
- while (self.value in ("(", "[") or
- self.type in (token.NAME, token.STRING)):
- c, d = self.parse_item()
- b.addarc(c)
- b = d
- return a, b
-
- def parse_item(self):
- # ITEM: '[' RHS ']' | ATOM ['+' | '*']
- if self.value == "[":
- self.gettoken()
- a, z = self.parse_rhs()
- self.expect(token.OP, "]")
- a.addarc(z)
- return a, z
- else:
- a, z = self.parse_atom()
- value = self.value
- if value not in ("+", "*"):
- return a, z
- self.gettoken()
- z.addarc(a)
- if value == "+":
- return a, z
- else:
- return a, a
-
- def parse_atom(self):
- # ATOM: '(' RHS ')' | NAME | STRING
- if self.value == "(":
- self.gettoken()
- a, z = self.parse_rhs()
- self.expect(token.OP, ")")
- return a, z
- elif self.type in (token.NAME, token.STRING):
- a = NFAState()
- z = NFAState()
- a.addarc(z, self.value)
- self.gettoken()
- return a, z
- else:
- self.raise_error("expected (...) or NAME or STRING, got %s/%s",
- self.type, self.value)
-
- def expect(self, type, value=None):
- if self.type != type or (value is not None and self.value != value):
- self.raise_error("expected %s/%s, got %s/%s",
- type, value, self.type, self.value)
- value = self.value
- self.gettoken()
- return value
-
- def gettoken(self):
- tup = next(self.generator)
- while tup[0] in (tokenize.COMMENT, tokenize.NL):
- tup = next(self.generator)
- self.type, self.value, self.begin, self.end, self.line = tup
- #print token.tok_name[self.type], repr(self.value)
-
- def raise_error(self, msg, *args):
- if args:
- try:
- msg = msg % args
- except:
- msg = " ".join([msg] + [str(x) for x in args])
- raise SyntaxError(msg, (self.filename, self.end[0],
- self.end[1], self.line))
-
-class NFAState(object):
-
- def __init__(self):
- self.arcs = [] # type: List[Tuple[unicode, Any]]
- # list of (label, NFAState) pairs
-
- def addarc(self, next, label=None):
- assert label is None or isinstance(label, str)
- assert isinstance(next, NFAState)
- self.arcs.append((label, next))
-
- def __hash__(self):
- return hash(tuple(x[0] for x in self.arcs))
-
-class DFAState(object):
-
- def __init__(self, nfaset, final):
- assert isinstance(nfaset, dict)
- assert isinstance(next(iter(nfaset)), NFAState)
- assert isinstance(final, NFAState)
- self.nfaset = nfaset
- self.isfinal = final in nfaset
- self.arcs = OrderedDict() # type: OrderedDict
- # map from label to DFAState
-
- def __hash__(self):
- return hash(tuple(self.arcs))
-
- def addarc(self, next, label):
- assert isinstance(label, str)
- assert label not in self.arcs
- assert isinstance(next, DFAState)
- self.arcs[label] = next
-
- def unifystate(self, old, new):
- for label, next in iteritems(self.arcs):
- if next is old:
- self.arcs[label] = new
-
- def __eq__(self, other):
- # Equality test -- ignore the nfaset instance variable
- assert isinstance(other, DFAState)
- if self.isfinal != other.isfinal:
- return False
- # Can't just return self.arcs == other.arcs, because that
- # would invoke this method recursively, with cycles...
- if len(self.arcs) != len(other.arcs):
- return False
- for label, next in iteritems(self.arcs):
- if next is not other.arcs.get(label):
- return False
- return True
-
-def generate_grammar(filename="Grammar.txt"):
- p = ParserGenerator(filename)
- return p.make_grammar()
diff --git a/sphinx/pycode/pgen2/token.py b/sphinx/pycode/pgen2/token.py
deleted file mode 100755
index 73718d166..000000000
--- a/sphinx/pycode/pgen2/token.py
+++ /dev/null
@@ -1,86 +0,0 @@
-#! /usr/bin/env python
-
-"""Token constants (from "token.h")."""
-
-# Taken from Python (r53757) and modified to include some tokens
-# originally monkeypatched in by pgen2.tokenize
-
-#--start constants--
-ENDMARKER = 0
-NAME = 1
-NUMBER = 2
-STRING = 3
-NEWLINE = 4
-INDENT = 5
-DEDENT = 6
-LPAR = 7
-RPAR = 8
-LSQB = 9
-RSQB = 10
-COLON = 11
-COMMA = 12
-SEMI = 13
-PLUS = 14
-MINUS = 15
-STAR = 16
-SLASH = 17
-VBAR = 18
-AMPER = 19
-LESS = 20
-GREATER = 21
-EQUAL = 22
-DOT = 23
-PERCENT = 24
-BACKQUOTE = 25
-LBRACE = 26
-RBRACE = 27
-EQEQUAL = 28
-NOTEQUAL = 29
-LESSEQUAL = 30
-GREATEREQUAL = 31
-TILDE = 32
-CIRCUMFLEX = 33
-LEFTSHIFT = 34
-RIGHTSHIFT = 35
-DOUBLESTAR = 36
-PLUSEQUAL = 37
-MINEQUAL = 38
-STAREQUAL = 39
-SLASHEQUAL = 40
-PERCENTEQUAL = 41
-AMPEREQUAL = 42
-VBAREQUAL = 43
-CIRCUMFLEXEQUAL = 44
-LEFTSHIFTEQUAL = 45
-RIGHTSHIFTEQUAL = 46
-DOUBLESTAREQUAL = 47
-DOUBLESLASH = 48
-DOUBLESLASHEQUAL = 49
-AT = 50
-ATEQUAL = 51
-RARROW = 52
-ELLIPSIS = 53
-OP = 54
-AWAIT = 55
-ASYNC = 56
-COMMENT = 57
-NL = 58
-ERRORTOKEN = 59
-N_TOKENS = 60
-NT_OFFSET = 256
-#--end constants--
-
-tok_name = {}
-for _name, _value in list(globals().items()):
- if type(_value) is type(0):
- tok_name[_value] = _name
-
-
-def ISTERMINAL(x):
- return x < NT_OFFSET
-
-def ISNONTERMINAL(x):
- return x >= NT_OFFSET
-
-def ISEOF(x):
- return x == ENDMARKER
diff --git a/sphinx/pycode/pgen2/tokenize.py b/sphinx/pycode/pgen2/tokenize.py
deleted file mode 100644
index 5f6abdb2f..000000000
--- a/sphinx/pycode/pgen2/tokenize.py
+++ /dev/null
@@ -1,441 +0,0 @@
-# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006 Python Software Foundation.
-# All rights reserved.
-
-"""Tokenization help for Python programs.
-
-generate_tokens(readline) is a generator that breaks a stream of
-text into Python tokens. It accepts a readline-like method which is called
-repeatedly to get the next line of input (or "" for EOF). It generates
-5-tuples with these members:
-
- the token type (see token.py)
- the token (a string)
- the starting (row, column) indices of the token (a 2-tuple of ints)
- the ending (row, column) indices of the token (a 2-tuple of ints)
- the original line (string)
-
-It is designed to match the working of the Python tokenizer exactly, except
-that it produces COMMENT tokens for comments and gives type OP for all
-operators
-
-Older entry points
- tokenize_loop(readline, tokeneater)
- tokenize(readline, tokeneater=printtoken)
-are the same, except instead of generating tokens, tokeneater is a callback
-function to which the 5 fields described above are passed as 5 arguments,
-each time a new token is found.
-"""
-
-from __future__ import print_function
-
-__author__ = 'Ka-Ping Yee <ping@lfw.org>'
-__credits__ = \
- 'GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, Skip Montanaro'
-
-import string, re
-from six import PY3
-from sphinx.pycode.pgen2.token import *
-from sphinx.pycode.pgen2 import token
-
-if False:
- # For type annotation
- from typing import List # NOQA
-
-__all__ = [x for x in dir(token) if x[0] != '_'] + ["tokenize",
- "generate_tokens", "untokenize"]
-del token
-
-def group(*choices): return '(' + '|'.join(choices) + ')'
-def any(*choices): return group(*choices) + '*'
-def maybe(*choices): return group(*choices) + '?'
-
-Whitespace = r'[ \f\t]*'
-Comment = r'#[^\r\n]*'
-Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment)
-Name = r'[a-zA-Z_]\w*'
-
-Binnumber = r'0[bB][01]*'
-Hexnumber = r'0[xX][\da-fA-F]*[lL]?'
-Octnumber = r'0[oO]?[0-7]*[lL]?'
-Decnumber = r'[1-9]\d*[lL]?'
-Intnumber = group(Binnumber, Hexnumber, Octnumber, Decnumber)
-Exponent = r'[eE][-+]?\d+'
-Pointfloat = group(r'\d+\.\d*', r'\.\d+') + maybe(Exponent)
-Expfloat = r'\d+' + Exponent
-Floatnumber = group(Pointfloat, Expfloat)
-Imagnumber = group(r'\d+[jJ]', Floatnumber + r'[jJ]')
-Number = group(Imagnumber, Floatnumber, Intnumber)
-
-# Tail end of ' string.
-Single = r"[^'\\]*(?:\\.[^'\\]*)*'"
-# Tail end of " string.
-Double = r'[^"\\]*(?:\\.[^"\\]*)*"'
-# Tail end of ''' string.
-Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''"
-# Tail end of """ string.
-Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""'
-Triple = group("[ubUB]?[rR]?'''", '[ubUB]?[rR]?"""')
-# Single-line ' or " string.
-String = group(r"[uU]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*'",
- r'[uU]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*"')
-
-# Because of leftmost-then-longest match semantics, be sure to put the
-# longest operators first (e.g., if = came before ==, == would get
-# recognized as two instances of =).
-Operator = group(r"\*\*=?", r">>=?", r"<<=?", r"<>", r"!=",
- r"//=?", r"->",
- r"[+\-*/%&|^=<>]=?",
- r"~")
-
-Bracket = '[][(){}]'
-Special = group(r'\r?\n', r'[:;.,`@]')
-if PY3:
- Ellipsis_ = r'\.{3}'
- Special = group(Ellipsis_, Special)
-Funny = group(Operator, Bracket, Special)
-
-PlainToken = group(Number, Funny, String, Name)
-Token = Ignore + PlainToken
-
-# First (or only) line of ' or " string.
-ContStr = group(r"[uUbB]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
- group("'", r'\\\r?\n'),
- r'[uUbB]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*' +
- group('"', r'\\\r?\n'))
-PseudoExtras = group(r'\\\r?\n', Comment, Triple)
-PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)
-
-tokenprog, pseudoprog, single3prog, double3prog = [
- re.compile(x) for x in (Token, PseudoToken, Single3, Double3)
-]
-endprogs = {"'": re.compile(Single), '"': re.compile(Double),
- "'''": single3prog, '"""': double3prog,
- "r'''": single3prog, 'r"""': double3prog,
- "u'''": single3prog, 'u"""': double3prog,
- "b'''": single3prog, 'b"""': double3prog,
- "ur'''": single3prog, 'ur"""': double3prog,
- "br'''": single3prog, 'br"""': double3prog,
- "R'''": single3prog, 'R"""': double3prog,
- "U'''": single3prog, 'U"""': double3prog,
- "B'''": single3prog, 'B"""': double3prog,
- "uR'''": single3prog, 'uR"""': double3prog,
- "Ur'''": single3prog, 'Ur"""': double3prog,
- "UR'''": single3prog, 'UR"""': double3prog,
- "bR'''": single3prog, 'bR"""': double3prog,
- "Br'''": single3prog, 'Br"""': double3prog,
- "BR'''": single3prog, 'BR"""': double3prog,
- 'r': None, 'R': None,
- 'u': None, 'U': None,
- 'b': None, 'B': None}
-
-triple_quoted = {}
-for t in ("'''", '"""',
- "r'''", 'r"""', "R'''", 'R"""',
- "u'''", 'u"""', "U'''", 'U"""',
- "b'''", 'b"""', "B'''", 'B"""',
- "ur'''", 'ur"""', "Ur'''", 'Ur"""',
- "uR'''", 'uR"""', "UR'''", 'UR"""',
- "br'''", 'br"""', "Br'''", 'Br"""',
- "bR'''", 'bR"""', "BR'''", 'BR"""',):
- triple_quoted[t] = t
-single_quoted = {}
-for t in ("'", '"',
- "r'", 'r"', "R'", 'R"',
- "u'", 'u"', "U'", 'U"',
- "b'", 'b"', "B'", 'B"',
- "ur'", 'ur"', "Ur'", 'Ur"',
- "uR'", 'uR"', "UR'", 'UR"',
- "br'", 'br"', "Br'", 'Br"',
- "bR'", 'bR"', "BR'", 'BR"', ):
- single_quoted[t] = t
-
-tabsize = 8
-
-class TokenError(Exception): pass
-
-class StopTokenizing(Exception): pass
-
-def printtoken(type, token, scell, ecell, line): # for testing
- srow, scol = scell
- erow, ecol = ecell
- print("%d,%d-%d,%d:\t%s\t%s" %
- (srow, scol, erow, ecol, tok_name[type], repr(token)))
-
-def tokenize(readline, tokeneater=printtoken):
- """
- The tokenize() function accepts two parameters: one representing the
- input stream, and one providing an output mechanism for tokenize().
-
- The first parameter, readline, must be a callable object which provides
- the same interface as the readline() method of built-in file objects.
- Each call to the function should return one line of input as a string.
-
- The second parameter, tokeneater, must also be a callable object. It is
- called once for each token, with five arguments, corresponding to the
- tuples generated by generate_tokens().
- """
- try:
- tokenize_loop(readline, tokeneater)
- except StopTokenizing:
- pass
-
-# backwards compatible interface
-def tokenize_loop(readline, tokeneater):
- for token_info in generate_tokens(readline):
- tokeneater(*token_info)
-
-class Untokenizer:
-
- def __init__(self):
- self.tokens = [] # type: List[unicode]
- self.prev_row = 1
- self.prev_col = 0
-
- def add_whitespace(self, start):
- row, col = start
- assert row <= self.prev_row
- col_offset = col - self.prev_col
- if col_offset:
- self.tokens.append(" " * col_offset)
-
- def untokenize(self, iterable):
- for t in iterable:
- if len(t) == 2:
- self.compat(t, iterable)
- break
- tok_type, token, start, end, line = t
- self.add_whitespace(start)
- self.tokens.append(token)
- self.prev_row, self.prev_col = end
- if tok_type in (NEWLINE, NL):
- self.prev_row += 1
- self.prev_col = 0
- return "".join(self.tokens)
-
- def compat(self, token, iterable):
- startline = False
- indents = []
- toks_append = self.tokens.append
- toknum, tokval = token
- if toknum in (NAME, NUMBER):
- tokval += ' '
- if toknum in (NEWLINE, NL):
- startline = True
- for tok in iterable:
- toknum, tokval = tok[:2]
-
- if toknum in (NAME, NUMBER):
- tokval += ' '
-
- if toknum == INDENT:
- indents.append(tokval)
- continue
- elif toknum == DEDENT:
- indents.pop()
- continue
- elif toknum in (NEWLINE, NL):
- startline = True
- elif startline and indents:
- toks_append(indents[-1])
- startline = False
- toks_append(tokval)
-
-def untokenize(iterable):
- """Transform tokens back into Python source code.
-
- Each element returned by the iterable must be a token sequence
- with at least two elements, a token number and token value. If
- only two tokens are passed, the resulting output is poor.
-
- Round-trip invariant for full input:
- Untokenized source will match input source exactly
-
- Round-trip invariant for limited intput:
- # Output text will tokenize the back to the input
- t1 = [tok[:2] for tok in generate_tokens(f.readline)]
- newcode = untokenize(t1)
- readline = iter(newcode.splitlines(1)).next
- t2 = [tok[:2] for tokin generate_tokens(readline)]
- assert t1 == t2
- """
- ut = Untokenizer()
- return ut.untokenize(iterable)
-
-def generate_tokens(readline):
- """
- The generate_tokens() generator requires one argment, readline, which
- must be a callable object which provides the same interface as the
- readline() method of built-in file objects. Each call to the function
- should return one line of input as a string. Alternately, readline
- can be a callable function terminating with StopIteration:
- readline = open(myfile).next # Example of alternate readline
-
- The generator produces 5-tuples with these members: the token type; the
- token string; a 2-tuple (srow, scol) of ints specifying the row and
- column where the token begins in the source; a 2-tuple (erow, ecol) of
- ints specifying the row and column where the token ends in the source;
- and the line on which the token was found. The line passed is the
- logical line; continuation lines are included.
- """
- lnum = parenlev = continued = 0
- namechars, numchars = string.ascii_letters + '_', '0123456789'
- contstr, needcont = '', 0
- contline = None
- indents = [0]
-
- while 1: # loop over lines in stream
- try:
- line = readline()
- except StopIteration:
- line = ''
- # if we are not at the end of the file make sure the
- # line ends with a newline because the parser depends
- # on that.
- if line:
- line = line.rstrip() + '\n'
- lnum = lnum + 1
- pos, max = 0, len(line)
-
- if contstr: # continued string
- if not line:
- raise TokenError("EOF in multi-line string", strstart) # type: ignore
- endmatch = endprog.match(line) # type: ignore
- if endmatch:
- pos = end = endmatch.end(0)
- yield (STRING, contstr + line[:end],
- strstart, (lnum, end), contline + line) # type: ignore
- contstr, needcont = '', 0
- contline = None
- elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n':
- yield (ERRORTOKEN, contstr + line,
- strstart, (lnum, len(line)), contline) # type: ignore
- contstr = ''
- contline = None
- continue
- else:
- contstr = contstr + line
- contline = contline + line
- continue
-
- elif parenlev == 0 and not continued: # new statement
- if not line: break
- column = 0
- while pos < max: # measure leading whitespace
- if line[pos] == ' ': column = column + 1
- elif line[pos] == '\t': column = (column/tabsize + 1)*tabsize
- elif line[pos] == '\f': column = 0
- else: break
- pos = pos + 1
- if pos == max: break
-
- if line[pos] in '#\r\n': # skip comments or blank lines
- if line[pos] == '#':
- comment_token = line[pos:].rstrip('\r\n')
- nl_pos = pos + len(comment_token)
- yield (COMMENT, comment_token,
- (lnum, pos), (lnum, pos + len(comment_token)), line)
- yield (NL, line[nl_pos:],
- (lnum, nl_pos), (lnum, len(line)), line)
- else:
- yield ((NL, COMMENT)[line[pos] == '#'], line[pos:],
- (lnum, pos), (lnum, len(line)), line)
- continue
-
- if column > indents[-1]: # count indents or dedents
- indents.append(column)
- yield (INDENT, line[:pos], (lnum, 0), (lnum, pos), line)
- while column < indents[-1]:
- if column not in indents:
- raise IndentationError(
- "unindent does not match any outer indentation level",
- ("<tokenize>", lnum, pos, line))
- indents = indents[:-1]
- yield (DEDENT, '', (lnum, pos), (lnum, pos), line)
-
- else: # continued statement
- if not line:
- raise TokenError("EOF in multi-line statement", (lnum, 0))
- continued = 0
-
- while pos < max:
- pseudomatch = pseudoprog.match(line, pos)
- if pseudomatch: # scan for tokens
- start, end = pseudomatch.span(1)
- spos, epos, pos = (lnum, start), (lnum, end), end
- token, initial = line[start:end], line[start]
-
- if end < max:
- next_pseudomatch = pseudoprog.match(line, end)
- if next_pseudomatch:
- n_start, n_end = next_pseudomatch.span(1)
- n_token = line[n_start:n_end]
- else:
- n_token = None
- else:
- n_token = None
-
- if initial in numchars or (
- initial == '.' and token not in ('.', '...')
- ): # ordinary number
- yield (NUMBER, token, spos, epos, line)
- elif initial in '\r\n':
- newline = NEWLINE
- if parenlev > 0:
- newline = NL
- yield (newline, token, spos, epos, line)
- elif initial == '#':
- assert not token.endswith("\n")
- yield (COMMENT, token, spos, epos, line)
- elif token in triple_quoted:
- endprog = endprogs[token]
- endmatch = endprog.match(line, pos)
- if endmatch: # all on one line
- pos = endmatch.end(0)
- token = line[start:pos]
- yield (STRING, token, spos, (lnum, pos), line)
- else:
- strstart = (lnum, start) # multiple lines
- contstr = line[start:]
- contline = line
- break
- elif initial in single_quoted or \
- token[:2] in single_quoted or \
- token[:3] in single_quoted:
- if token[-1] == '\n': # continued string
- strstart = (lnum, start)
- endprog = (endprogs[initial] or endprogs[token[1]] or
- endprogs[token[2]])
- contstr, needcont = line[start:], 1
- contline = line
- break
- else: # ordinary string
- yield (STRING, token, spos, epos, line)
- elif token == 'await' and n_token:
- yield (AWAIT, token, spos, epos, line)
- elif token == 'async' and n_token in ('def', 'for', 'with'):
- yield (ASYNC, token, spos, epos, line)
- elif initial in namechars: # ordinary name
- yield (NAME, token, spos, epos, line)
- elif token in ('...',): # ordinary name
- yield (NAME, token, spos, epos, line)
- elif initial == '\\': # continued stmt
- # This yield is new; needed for better idempotency:
- yield (NL, token, spos, (lnum, pos), line)
- continued = 1
- else:
- if initial in '([{': parenlev = parenlev + 1
- elif initial in ')]}': parenlev = parenlev - 1
- yield (OP, token, spos, epos, line)
- else:
- yield (ERRORTOKEN, line[pos],
- (lnum, pos), (lnum, pos+1), line)
- pos = pos + 1
-
- for indent in indents[1:]: # pop remaining indent levels
- yield (DEDENT, '', (lnum, 0), (lnum, 0), '')
- yield (ENDMARKER, '', (lnum, 0), (lnum, 0), '')
-
-if __name__ == '__main__': # testing
- import sys
- if len(sys.argv) > 1: tokenize(open(sys.argv[1]).readline)
- else: tokenize(sys.stdin.readline)
diff --git a/sphinx/quickstart.py b/sphinx/quickstart.py
index a36c57528..5e403b1d8 100644
--- a/sphinx/quickstart.py
+++ b/sphinx/quickstart.py
@@ -3,708 +3,33 @@
sphinx.quickstart
~~~~~~~~~~~~~~~~~
- Quickly setup documentation source to work with Sphinx.
+ This file has moved to :py:mod:`sphinx.cmd.quickstart`.
:copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
-from __future__ import print_function
-from __future__ import absolute_import
-import re
-import os
-import sys
-import optparse
-import time
-from os import path
-from io import open
+import warnings
-# try to import readline, unix specific enhancement
-try:
- import readline
- if readline.__doc__ and 'libedit' in readline.__doc__:
- readline.parse_and_bind("bind ^I rl_complete")
- else:
- readline.parse_and_bind("tab: complete")
-except ImportError:
- pass
+from sphinx.deprecation import RemovedInSphinx20Warning
+from sphinx.cmd.quickstart import main as _main
-from six import PY2, PY3, text_type, binary_type
-from six.moves import input
-from six.moves.urllib.parse import quote as urlquote
-from docutils.utils import column_width
-from sphinx import __display_version__, package_dir
-from sphinx.util.osutil import ensuredir, make_filename
-from sphinx.util.console import ( # type: ignore
- purple, bold, red, turquoise, nocolor, color_terminal
-)
-from sphinx.util.template import SphinxRenderer
-from sphinx.util import texescape
+def main(*args, **kwargs):
+ warnings.warn(
+ '`sphinx.quickstart.main()` has moved to `sphinx.cmd.quickstart.'
+ 'main()`.',
+ RemovedInSphinx20Warning,
+ stacklevel=2,
+ )
+ _main(*args, **kwargs)
-if False:
- # For type annotation
- from typing import Any, Callable, Dict, List, Pattern # NOQA
-TERM_ENCODING = getattr(sys.stdin, 'encoding', None)
-
-DEFAULT_VALUE = {
- 'path': '.',
- 'sep': False,
- 'dot': '_',
- 'language': None,
- 'suffix': '.rst',
- 'master': 'index',
- 'epub': False,
- 'ext_autodoc': False,
- 'ext_doctest': False,
- 'ext_todo': False,
- 'makefile': True,
- 'batchfile': True,
-}
-
-EXTENSIONS = ('autodoc', 'doctest', 'intersphinx', 'todo', 'coverage',
- 'imgmath', 'mathjax', 'ifconfig', 'viewcode', 'githubpages')
-
-PROMPT_PREFIX = '> '
-
-
-# function to get input from terminal -- overridden by the test suite
-def term_input(prompt):
- # type: (unicode) -> unicode
- print(prompt, end='')
- return input('')
-
-
-class ValidationError(Exception):
- """Raised for validation errors."""
-
-
-def is_path(x):
- # type: (unicode) -> unicode
- x = path.expanduser(x)
- if path.exists(x) and not path.isdir(x):
- raise ValidationError("Please enter a valid path name.")
- return x
-
-
-def allow_empty(x):
- # type: (unicode) -> unicode
- return x
-
-
-def nonempty(x):
- # type: (unicode) -> unicode
- if not x:
- raise ValidationError("Please enter some text.")
- return x
-
-
-def choice(*l):
- # type: (unicode) -> Callable[[unicode], unicode]
- def val(x):
- # type: (unicode) -> unicode
- if x not in l:
- raise ValidationError('Please enter one of %s.' % ', '.join(l))
- return x
- return val
-
-
-def boolean(x):
- # type: (unicode) -> bool
- if x.upper() not in ('Y', 'YES', 'N', 'NO'):
- raise ValidationError("Please enter either 'y' or 'n'.")
- return x.upper() in ('Y', 'YES')
-
-
-def suffix(x):
- # type: (unicode) -> unicode
- if not (x[0:1] == '.' and len(x) > 1):
- raise ValidationError("Please enter a file suffix, "
- "e.g. '.rst' or '.txt'.")
- return x
-
-
-def ok(x):
- # type: (unicode) -> unicode
- return x
-
-
-def term_decode(text):
- # type: (unicode) -> unicode
- if isinstance(text, text_type):
- return text
-
- # for Python 2.x, try to get a Unicode string out of it
- if text.decode('ascii', 'replace').encode('ascii', 'replace') == text:
- return text
-
- if TERM_ENCODING:
- text = text.decode(TERM_ENCODING)
- else:
- print(turquoise('* Note: non-ASCII characters entered '
- 'and terminal encoding unknown -- assuming '
- 'UTF-8 or Latin-1.'))
- try:
- text = text.decode('utf-8')
- except UnicodeDecodeError:
- text = text.decode('latin1')
- return text
-
-
-def do_prompt(d, key, text, default=None, validator=nonempty):
- # type: (Dict, unicode, unicode, unicode, Callable[[unicode], Any]) -> None
- while True:
- if default is not None:
- prompt = PROMPT_PREFIX + '%s [%s]: ' % (text, default) # type: unicode
- else:
- prompt = PROMPT_PREFIX + text + ': '
- if PY2:
- # for Python 2.x, try to get a Unicode string out of it
- if prompt.encode('ascii', 'replace').decode('ascii', 'replace') \
- != prompt:
- if TERM_ENCODING:
- prompt = prompt.encode(TERM_ENCODING)
- else:
- print(turquoise('* Note: non-ASCII default value provided '
- 'and terminal encoding unknown -- assuming '
- 'UTF-8 or Latin-1.'))
- try:
- prompt = prompt.encode('utf-8')
- except UnicodeEncodeError:
- prompt = prompt.encode('latin1')
- prompt = purple(prompt)
- x = term_input(prompt).strip()
- if default and not x:
- x = default
- x = term_decode(x)
- try:
- x = validator(x)
- except ValidationError as err:
- print(red('* ' + str(err)))
- continue
- break
- d[key] = x
-
-
-def convert_python_source(source, rex=re.compile(r"[uU]('.*?')")):
- # type: (unicode, Pattern) -> unicode
- # remove Unicode literal prefixes
- if PY3:
- return rex.sub('\\1', source)
- else:
- return source
-
-
-class QuickstartRenderer(SphinxRenderer):
- def __init__(self, templatedir):
- # type: (unicode) -> None
- self.templatedir = templatedir or ''
- super(QuickstartRenderer, self).__init__()
-
- def render(self, template_name, context):
- # type: (unicode, Dict) -> unicode
- user_template = path.join(self.templatedir, path.basename(template_name))
- if self.templatedir and path.exists(user_template):
- return self.render_from_file(user_template, context)
- else:
- return super(QuickstartRenderer, self).render(template_name, context)
-
-
-def ask_user(d):
- # type: (Dict) -> None
- """Ask the user for quickstart values missing from *d*.
-
- Values are:
-
- * path: root path
- * sep: separate source and build dirs (bool)
- * dot: replacement for dot in _templates etc.
- * project: project name
- * author: author names
- * version: version of project
- * release: release of project
- * language: document language
- * suffix: source file suffix
- * master: master document name
- * epub: use epub (bool)
- * ext_*: extensions to use (bools)
- * makefile: make Makefile
- * batchfile: make command file
- """
-
- print(bold('Welcome to the Sphinx %s quickstart utility.') % __display_version__)
- print('''
-Please enter values for the following settings (just press Enter to
-accept a default value, if one is given in brackets).''')
-
- if 'path' in d:
- print(bold('''
-Selected root path: %s''' % d['path']))
- else:
- print('''
-Enter the root path for documentation.''')
- do_prompt(d, 'path', 'Root path for the documentation', '.', is_path)
-
- while path.isfile(path.join(d['path'], 'conf.py')) or \
- path.isfile(path.join(d['path'], 'source', 'conf.py')):
- print()
- print(bold('Error: an existing conf.py has been found in the '
- 'selected root path.'))
- print('sphinx-quickstart will not overwrite existing Sphinx projects.')
- print()
- do_prompt(d, 'path', 'Please enter a new root path (or just Enter '
- 'to exit)', '', is_path)
- if not d['path']:
- sys.exit(1)
-
- if 'sep' not in d:
- print('''
-You have two options for placing the build directory for Sphinx output.
-Either, you use a directory "_build" within the root path, or you separate
-"source" and "build" directories within the root path.''')
- do_prompt(d, 'sep', 'Separate source and build directories (y/n)', 'n',
- boolean)
-
- if 'dot' not in d:
- print('''
-Inside the root directory, two more directories will be created; "_templates"
-for custom HTML templates and "_static" for custom stylesheets and other static
-files. You can enter another prefix (such as ".") to replace the underscore.''')
- do_prompt(d, 'dot', 'Name prefix for templates and static dir', '_', ok)
-
- if 'project' not in d:
- print('''
-The project name will occur in several places in the built documentation.''')
- do_prompt(d, 'project', 'Project name')
- if 'author' not in d:
- do_prompt(d, 'author', 'Author name(s)')
-
- if 'version' not in d:
- print('''
-Sphinx has the notion of a "version" and a "release" for the
-software. Each version can have multiple releases. For example, for
-Python the version is something like 2.5 or 3.0, while the release is
-something like 2.5.1 or 3.0a1. If you don't need this dual structure,
-just set both to the same value.''')
- do_prompt(d, 'version', 'Project version', '', allow_empty)
- if 'release' not in d:
- do_prompt(d, 'release', 'Project release', d['version'], allow_empty)
-
- if 'language' not in d:
- print('''
-If the documents are to be written in a language other than English,
-you can select a language here by its language code. Sphinx will then
-translate text that it generates into that language.
-
-For a list of supported codes, see
-http://sphinx-doc.org/config.html#confval-language.''')
- do_prompt(d, 'language', 'Project language', 'en')
- if d['language'] == 'en':
- d['language'] = None
-
- if 'suffix' not in d:
- print('''
-The file name suffix for source files. Commonly, this is either ".txt"
-or ".rst". Only files with this suffix are considered documents.''')
- do_prompt(d, 'suffix', 'Source file suffix', '.rst', suffix)
-
- if 'master' not in d:
- print('''
-One document is special in that it is considered the top node of the
-"contents tree", that is, it is the root of the hierarchical structure
-of the documents. Normally, this is "index", but if your "index"
-document is a custom template, you can also set this to another filename.''')
- do_prompt(d, 'master', 'Name of your master document (without suffix)',
- 'index')
-
- while path.isfile(path.join(d['path'], d['master'] + d['suffix'])) or \
- path.isfile(path.join(d['path'], 'source', d['master'] + d['suffix'])):
- print()
- print(bold('Error: the master file %s has already been found in the '
- 'selected root path.' % (d['master'] + d['suffix'])))
- print('sphinx-quickstart will not overwrite the existing file.')
- print()
- do_prompt(d, 'master', 'Please enter a new file name, or rename the '
- 'existing file and press Enter', d['master'])
-
- if 'epub' not in d:
- print('''
-Sphinx can also add configuration for epub output:''')
- do_prompt(d, 'epub', 'Do you want to use the epub builder (y/n)',
- 'n', boolean)
-
- if 'ext_autodoc' not in d:
- print('''
-Please indicate if you want to use one of the following Sphinx extensions:''')
- do_prompt(d, 'ext_autodoc', 'autodoc: automatically insert docstrings '
- 'from modules (y/n)', 'n', boolean)
- if 'ext_doctest' not in d:
- do_prompt(d, 'ext_doctest', 'doctest: automatically test code snippets '
- 'in doctest blocks (y/n)', 'n', boolean)
- if 'ext_intersphinx' not in d:
- do_prompt(d, 'ext_intersphinx', 'intersphinx: link between Sphinx '
- 'documentation of different projects (y/n)', 'n', boolean)
- if 'ext_todo' not in d:
- do_prompt(d, 'ext_todo', 'todo: write "todo" entries '
- 'that can be shown or hidden on build (y/n)', 'n', boolean)
- if 'ext_coverage' not in d:
- do_prompt(d, 'ext_coverage', 'coverage: checks for documentation '
- 'coverage (y/n)', 'n', boolean)
- if 'ext_imgmath' not in d:
- do_prompt(d, 'ext_imgmath', 'imgmath: include math, rendered '
- 'as PNG or SVG images (y/n)', 'n', boolean)
- if 'ext_mathjax' not in d:
- do_prompt(d, 'ext_mathjax', 'mathjax: include math, rendered in the '
- 'browser by MathJax (y/n)', 'n', boolean)
- if d['ext_imgmath'] and d['ext_mathjax']:
- print('''Note: imgmath and mathjax cannot be enabled at the same time.
-imgmath has been deselected.''')
- d['ext_imgmath'] = False
- if 'ext_ifconfig' not in d:
- do_prompt(d, 'ext_ifconfig', 'ifconfig: conditional inclusion of '
- 'content based on config values (y/n)', 'n', boolean)
- if 'ext_viewcode' not in d:
- do_prompt(d, 'ext_viewcode', 'viewcode: include links to the source '
- 'code of documented Python objects (y/n)', 'n', boolean)
- if 'ext_githubpages' not in d:
- do_prompt(d, 'ext_githubpages', 'githubpages: create .nojekyll file '
- 'to publish the document on GitHub pages (y/n)', 'n', boolean)
-
- if 'no_makefile' in d:
- d['makefile'] = False
- elif 'makefile' not in d:
- print('''
-A Makefile and a Windows command file can be generated for you so that you
-only have to run e.g. `make html' instead of invoking sphinx-build
-directly.''')
- do_prompt(d, 'makefile', 'Create Makefile? (y/n)', 'y', boolean)
- if 'no_batchfile' in d:
- d['batchfile'] = False
- elif 'batchfile' not in d:
- do_prompt(d, 'batchfile', 'Create Windows command file? (y/n)',
- 'y', boolean)
- print()
-
-
-def generate(d, overwrite=True, silent=False, templatedir=None):
- # type: (Dict, bool, bool, unicode) -> None
- """Generate project based on values in *d*."""
- template = QuickstartRenderer(templatedir=templatedir)
-
- texescape.init()
- indent = ' ' * 4
-
- if 'mastertoctree' not in d:
- d['mastertoctree'] = ''
- if 'mastertocmaxdepth' not in d:
- d['mastertocmaxdepth'] = 2
-
- d['PY3'] = PY3
- d['project_fn'] = make_filename(d['project'])
- d['project_url'] = urlquote(d['project'].encode('idna'))
- d['project_manpage'] = d['project_fn'].lower()
- d['now'] = time.asctime()
- d['project_underline'] = column_width(d['project']) * '='
- d.setdefault('extensions', [])
- for name in EXTENSIONS:
- if d.get('ext_' + name):
- d['extensions'].append('sphinx.ext.' + name)
- d['extensions'] = (',\n' + indent).join(repr(name) for name in d['extensions'])
- d['copyright'] = time.strftime('%Y') + ', ' + d['author']
- d['author_texescaped'] = text_type(d['author']).\
- translate(texescape.tex_escape_map)
- d['project_doc'] = d['project'] + ' Documentation'
- d['project_doc_texescaped'] = text_type(d['project'] + ' Documentation').\
- translate(texescape.tex_escape_map)
-
- # escape backslashes and single quotes in strings that are put into
- # a Python string literal
- for key in ('project', 'project_doc', 'project_doc_texescaped',
- 'author', 'author_texescaped', 'copyright',
- 'version', 'release', 'master'):
- d[key + '_str'] = d[key].replace('\\', '\\\\').replace("'", "\\'")
-
- if not path.isdir(d['path']):
- ensuredir(d['path'])
-
- srcdir = d['sep'] and path.join(d['path'], 'source') or d['path']
-
- ensuredir(srcdir)
- if d['sep']:
- builddir = path.join(d['path'], 'build')
- d['exclude_patterns'] = ''
- else:
- builddir = path.join(srcdir, d['dot'] + 'build')
- exclude_patterns = map(repr, [
- d['dot'] + 'build',
- 'Thumbs.db', '.DS_Store',
- ])
- d['exclude_patterns'] = ', '.join(exclude_patterns)
- ensuredir(builddir)
- ensuredir(path.join(srcdir, d['dot'] + 'templates'))
- ensuredir(path.join(srcdir, d['dot'] + 'static'))
-
- def write_file(fpath, content, newline=None):
- # type: (unicode, unicode, unicode) -> None
- if overwrite or not path.isfile(fpath):
- if 'quiet' not in d:
- print('Creating file %s.' % fpath)
- with open(fpath, 'wt', encoding='utf-8', newline=newline) as f:
- f.write(content)
- else:
- if 'quiet' not in d:
- print('File %s already exists, skipping.' % fpath)
-
- conf_path = os.path.join(templatedir, 'conf.py_t') if templatedir else None
- if not conf_path or not path.isfile(conf_path):
- conf_path = os.path.join(package_dir, 'templates', 'quickstart', 'conf.py_t')
- with open(conf_path) as f:
- conf_text = convert_python_source(f.read())
-
- write_file(path.join(srcdir, 'conf.py'), template.render_string(conf_text, d))
-
- masterfile = path.join(srcdir, d['master'] + d['suffix'])
- write_file(masterfile, template.render('quickstart/master_doc.rst_t', d))
-
- if d.get('make_mode') is True:
- makefile_template = 'quickstart/Makefile.new_t'
- batchfile_template = 'quickstart/make.bat.new_t'
- else:
- makefile_template = 'quickstart/Makefile_t'
- batchfile_template = 'quickstart/make.bat_t'
-
- if d['makefile'] is True:
- d['rsrcdir'] = d['sep'] and 'source' or '.'
- d['rbuilddir'] = d['sep'] and 'build' or d['dot'] + 'build'
- # use binary mode, to avoid writing \r\n on Windows
- write_file(path.join(d['path'], 'Makefile'),
- template.render(makefile_template, d), u'\n')
-
- if d['batchfile'] is True:
- d['rsrcdir'] = d['sep'] and 'source' or '.'
- d['rbuilddir'] = d['sep'] and 'build' or d['dot'] + 'build'
- write_file(path.join(d['path'], 'make.bat'),
- template.render(batchfile_template, d), u'\r\n')
-
- if silent:
- return
- print()
- print(bold('Finished: An initial directory structure has been created.'))
- print('''
-You should now populate your master file %s and create other documentation
-source files. ''' % masterfile + ((d['makefile'] or d['batchfile']) and '''\
-Use the Makefile to build the docs, like so:
- make builder
-''' or '''\
-Use the sphinx-build command to build the docs, like so:
- sphinx-build -b builder %s %s
-''' % (srcdir, builddir)) + '''\
-where "builder" is one of the supported builders, e.g. html, latex or linkcheck.
-''')
-
-
-def usage(argv, msg=None):
- # type: (List[unicode], unicode) -> None
- if msg:
- print(msg, file=sys.stderr)
- print(file=sys.stderr)
-
-
-USAGE = """\
-Sphinx v%s
-Usage: %%prog [options] [projectdir]
-""" % __display_version__
-
-EPILOG = """\
-For more information, visit <http://sphinx-doc.org/>.
-"""
-
-
-def valid_dir(d):
- # type: (Dict) -> bool
- dir = d['path']
- if not path.exists(dir):
- return True
- if not path.isdir(dir):
- return False
-
- if set(['Makefile', 'make.bat']) & set(os.listdir(dir)): # type: ignore
- return False
-
- if d['sep']:
- dir = os.path.join('source', dir)
- if not path.exists(dir):
- return True
- if not path.isdir(dir):
- return False
-
- reserved_names = [
- 'conf.py',
- d['dot'] + 'static',
- d['dot'] + 'templates',
- d['master'] + d['suffix'],
- ]
- if set(reserved_names) & set(os.listdir(dir)): # type: ignore
- return False
-
- return True
-
-
-class MyFormatter(optparse.IndentedHelpFormatter):
- def format_usage(self, usage): # type: ignore
- # type: (str) -> str
- return usage
-
- def format_help(self, formatter):
- result = []
- if self.description:
- result.append(self.format_description(formatter))
- if self.option_list:
- result.append(self.format_option_help(formatter))
- return "\n".join(result)
-
-
-def main(argv=sys.argv):
- # type: (List[str]) -> int
- if not color_terminal():
- nocolor()
-
- parser = optparse.OptionParser(USAGE, epilog=EPILOG,
- version='Sphinx v%s' % __display_version__,
- formatter=MyFormatter())
- parser.add_option('-q', '--quiet', action='store_true', dest='quiet',
- default=False,
- help='quiet mode')
-
- group = parser.add_option_group('Structure options')
- group.add_option('--sep', action='store_true', dest='sep',
- help='if specified, separate source and build dirs')
- group.add_option('--dot', metavar='DOT', dest='dot',
- help='replacement for dot in _templates etc.')
-
- group = parser.add_option_group('Project basic options')
- group.add_option('-p', '--project', metavar='PROJECT', dest='project',
- help='project name')
- group.add_option('-a', '--author', metavar='AUTHOR', dest='author',
- help='author names')
- group.add_option('-v', metavar='VERSION', dest='version',
- help='version of project')
- group.add_option('-r', '--release', metavar='RELEASE', dest='release',
- help='release of project')
- group.add_option('-l', '--language', metavar='LANGUAGE', dest='language',
- help='document language')
- group.add_option('--suffix', metavar='SUFFIX', dest='suffix',
- help='source file suffix')
- group.add_option('--master', metavar='MASTER', dest='master',
- help='master document name')
- group.add_option('--epub', action='store_true', dest='epub',
- default=False,
- help='use epub')
-
- group = parser.add_option_group('Extension options')
- for ext in EXTENSIONS:
- group.add_option('--ext-' + ext, action='store_true',
- dest='ext_' + ext, default=False,
- help='enable %s extension' % ext)
- group.add_option('--extensions', metavar='EXTENSIONS', dest='extensions',
- action='append', help='enable extensions')
-
- group = parser.add_option_group('Makefile and Batchfile creation')
- group.add_option('--makefile', action='store_true', dest='makefile',
- default=False,
- help='create makefile')
- group.add_option('--no-makefile', action='store_true', dest='no_makefile',
- default=False,
- help='not create makefile')
- group.add_option('--batchfile', action='store_true', dest='batchfile',
- default=False,
- help='create batchfile')
- group.add_option('--no-batchfile', action='store_true', dest='no_batchfile',
- default=False,
- help='not create batchfile')
- group.add_option('-M', '--no-use-make-mode', action='store_false', dest='make_mode',
- help='not use make-mode for Makefile/make.bat')
- group.add_option('-m', '--use-make-mode', action='store_true', dest='make_mode',
- default=True,
- help='use make-mode for Makefile/make.bat')
-
- group = parser.add_option_group('Project templating')
- group.add_option('-t', '--templatedir', metavar='TEMPLATEDIR', dest='templatedir',
- help='template directory for template files')
- group.add_option('-d', metavar='NAME=VALUE', action='append', dest='variables',
- help='define a template variable')
-
- # parse options
- try:
- opts, args = parser.parse_args(argv[1:])
- except SystemExit as err:
- return err.code
-
- if len(args) > 0:
- opts.ensure_value('path', args[0])
-
- d = vars(opts)
- # delete None or False value
- d = dict((k, v) for k, v in d.items() if not (v is None or v is False))
-
- try:
- if 'quiet' in d:
- if not set(['project', 'author']).issubset(d):
- print('''"quiet" is specified, but any of "project" or \
-"author" is not specified.''')
- return 1
-
- if set(['quiet', 'project', 'author']).issubset(d):
- # quiet mode with all required params satisfied, use default
- d.setdefault('version', '')
- d.setdefault('release', d['version'])
- d2 = DEFAULT_VALUE.copy()
- d2.update(dict(("ext_" + ext, False) for ext in EXTENSIONS))
- d2.update(d)
- d = d2
- if 'no_makefile' in d:
- d['makefile'] = False
- if 'no_batchfile' in d:
- d['batchfile'] = False
-
- if not valid_dir(d):
- print()
- print(bold('Error: specified path is not a directory, or sphinx'
- ' files already exist.'))
- print('sphinx-quickstart only generate into a empty directory.'
- ' Please specify a new root path.')
- return 1
- else:
- ask_user(d)
- except (KeyboardInterrupt, EOFError):
- print()
- print('[Interrupted.]')
- return 130 # 128 + SIGINT
-
- # decode values in d if value is a Python string literal
- for key, value in d.items():
- if isinstance(value, binary_type):
- d[key] = term_decode(value)
-
- # parse extensions list
- d.setdefault('extensions', [])
- for ext in d['extensions'][:]:
- if ',' in ext:
- d['extensions'].remove(ext)
- for modname in ext.split(','):
- d['extensions'].append(modname)
-
- for variable in d.get('variables', []):
- try:
- name, value = variable.split('=')
- d[name] = value
- except ValueError:
- print('Invalid template variable: %s' % variable)
-
- generate(d, templatedir=opts.templatedir)
- return 0
-
-
-if __name__ == '__main__':
- sys.exit(main(sys.argv))
+# So program can be started with "python -m sphinx.quickstart ..."
+if __name__ == "__main__":
+ warnings.warn(
+ '`sphinx.quickstart` has moved to `sphinx.cmd.quickstart`.',
+ RemovedInSphinx20Warning,
+ stacklevel=2,
+ )
+ main()
diff --git a/sphinx/registry.py b/sphinx/registry.py
index 0861575db..6ec966a6a 100644
--- a/sphinx/registry.py
+++ b/sphinx/registry.py
@@ -13,26 +13,32 @@ from __future__ import print_function
import traceback
from pkg_resources import iter_entry_points
-from six import itervalues
+from six import iteritems, itervalues, string_types
from sphinx.errors import ExtensionError, SphinxError, VersionRequirementError
from sphinx.extension import Extension
from sphinx.domains import ObjType
from sphinx.domains.std import GenericObject, Target
from sphinx.locale import __
+from sphinx.parsers import Parser as SphinxParser
from sphinx.roles import XRefRole
from sphinx.util import logging
+from sphinx.util import import_object
+from sphinx.util.console import bold # type: ignore
from sphinx.util.docutils import directive_helper
if False:
# For type annotation
- from typing import Any, Callable, Dict, Iterator, List, Type # NOQA
+ from typing import Any, Callable, Dict, Iterator, List, Type, Union # NOQA
from docutils import nodes # NOQA
+ from docutils.io import Input # NOQA
from docutils.parsers import Parser # NOQA
+ from docutils.transform import Transform # NOQA
from sphinx.application import Sphinx # NOQA
from sphinx.builders import Builder # NOQA
from sphinx.domains import Domain, Index # NOQA
from sphinx.environment import BuildEnvironment # NOQA
+ from sphinx.util.typing import RoleFunction # NOQA
logger = logging.getLogger(__name__)
@@ -45,13 +51,21 @@ EXTENSION_BLACKLIST = {
class SphinxComponentRegistry(object):
def __init__(self):
- self.builders = {} # type: Dict[unicode, Type[Builder]]
- self.domains = {} # type: Dict[unicode, Type[Domain]]
- self.source_parsers = {} # type: Dict[unicode, Parser]
- self.translators = {} # type: Dict[unicode, nodes.NodeVisitor]
+ self.builders = {} # type: Dict[unicode, Type[Builder]]
+ self.domains = {} # type: Dict[unicode, Type[Domain]]
+ self.domain_directives = {} # type: Dict[unicode, Dict[unicode, Any]]
+ self.domain_indices = {} # type: Dict[unicode, List[Type[Index]]]
+ self.domain_object_types = {} # type: Dict[unicode, Dict[unicode, ObjType]]
+ self.domain_roles = {} # type: Dict[unicode, Dict[unicode, Union[RoleFunction, XRefRole]]] # NOQA
+ self.post_transforms = [] # type: List[Type[Transform]]
+ self.source_parsers = {} # type: Dict[unicode, Parser]
+ self.source_inputs = {} # type: Dict[unicode, Input]
+ self.translators = {} # type: Dict[unicode, nodes.NodeVisitor]
+ self.transforms = [] # type: List[Type[Transform]]
def add_builder(self, builder):
# type: (Type[Builder]) -> None
+ logger.debug('[app] adding builder: %r', builder)
if not hasattr(builder, 'name'):
raise ExtensionError(__('Builder class %s has no "name" attribute') % builder)
if builder.name in self.builders:
@@ -83,6 +97,7 @@ class SphinxComponentRegistry(object):
def add_domain(self, domain):
# type: (Type[Domain]) -> None
+ logger.debug('[app] adding domain: %r', domain)
if domain.name in self.domains:
raise ExtensionError(__('domain %s already registered') % domain.name)
self.domains[domain.name] = domain
@@ -94,10 +109,20 @@ class SphinxComponentRegistry(object):
def create_domains(self, env):
# type: (BuildEnvironment) -> Iterator[Domain]
for DomainClass in itervalues(self.domains):
- yield DomainClass(env)
+ domain = DomainClass(env)
+
+ # transplant components added by extensions
+ domain.directives.update(self.domain_directives.get(domain.name, {}))
+ domain.roles.update(self.domain_roles.get(domain.name, {}))
+ domain.indices.extend(self.domain_indices.get(domain.name, []))
+ for name, objtype in iteritems(self.domain_object_types.get(domain.name, {})):
+ domain.add_object_type(name, objtype)
+
+ yield domain
def override_domain(self, domain):
# type: (Type[Domain]) -> None
+ logger.debug('[app] overriding domain: %r', domain)
if domain.name not in self.domains:
raise ExtensionError(__('domain %s not yet registered') % domain.name)
if not issubclass(domain, self.domains[domain.name]):
@@ -108,27 +133,37 @@ class SphinxComponentRegistry(object):
def add_directive_to_domain(self, domain, name, obj,
has_content=None, argument_spec=None, **option_spec):
# type: (unicode, unicode, Any, bool, Any, Any) -> None
+ logger.debug('[app] adding directive to domain: %r',
+ (domain, name, obj, has_content, argument_spec, option_spec))
if domain not in self.domains:
raise ExtensionError(__('domain %s not yet registered') % domain)
- directive = directive_helper(obj, has_content, argument_spec, **option_spec)
- self.domains[domain].directives[name] = directive
+ directives = self.domain_directives.setdefault(domain, {})
+ directives[name] = directive_helper(obj, has_content, argument_spec, **option_spec)
def add_role_to_domain(self, domain, name, role):
- # type: (unicode, unicode, Any) -> None
+ # type: (unicode, unicode, Union[RoleFunction, XRefRole]) -> None
+ logger.debug('[app] adding role to domain: %r', (domain, name, role))
if domain not in self.domains:
raise ExtensionError(__('domain %s not yet registered') % domain)
- self.domains[domain].roles[name] = role
+ roles = self.domain_roles.setdefault(domain, {})
+ roles[name] = role
def add_index_to_domain(self, domain, index):
# type: (unicode, Type[Index]) -> None
+ logger.debug('[app] adding index to domain: %r', (domain, index))
if domain not in self.domains:
raise ExtensionError(__('domain %s not yet registered') % domain)
- self.domains[domain].indices.append(index)
+ indices = self.domain_indices.setdefault(domain, [])
+ indices.append(index)
def add_object_type(self, directivename, rolename, indextemplate='',
parse_node=None, ref_nodeclass=None, objname='',
doc_field_types=[]):
# type: (unicode, unicode, unicode, Callable, nodes.Node, unicode, List) -> None
+ logger.debug('[app] adding object type: %r',
+ (directivename, rolename, indextemplate, parse_node,
+ ref_nodeclass, objname, doc_field_types))
+
# create a subclass of GenericObject as the new directive
directive = type(directivename, # type: ignore
(GenericObject, object),
@@ -136,36 +171,89 @@ class SphinxComponentRegistry(object):
'parse_node': staticmethod(parse_node),
'doc_field_types': doc_field_types})
- stddomain = self.domains['std']
- stddomain.directives[directivename] = directive
- stddomain.roles[rolename] = XRefRole(innernodeclass=ref_nodeclass)
- stddomain.object_types[directivename] = ObjType(objname or directivename, rolename)
+ self.add_directive_to_domain('std', directivename, directive)
+ self.add_role_to_domain('std', rolename, XRefRole(innernodeclass=ref_nodeclass))
+
+ object_types = self.domain_object_types.setdefault('std', {})
+ object_types[directivename] = ObjType(objname or directivename, rolename)
def add_crossref_type(self, directivename, rolename, indextemplate='',
ref_nodeclass=None, objname=''):
# type: (unicode, unicode, unicode, nodes.Node, unicode) -> None
+ logger.debug('[app] adding crossref type: %r',
+ (directivename, rolename, indextemplate, ref_nodeclass, objname))
+
# create a subclass of Target as the new directive
directive = type(directivename, # type: ignore
(Target, object),
{'indextemplate': indextemplate})
- stddomain = self.domains['std']
- stddomain.directives[directivename] = directive
- stddomain.roles[rolename] = XRefRole(innernodeclass=ref_nodeclass)
- stddomain.object_types[directivename] = ObjType(objname or directivename, rolename)
+ self.add_directive_to_domain('std', directivename, directive)
+ self.add_role_to_domain('std', rolename, XRefRole(innernodeclass=ref_nodeclass))
+
+ object_types = self.domain_object_types.setdefault('std', {})
+ object_types[directivename] = ObjType(objname or directivename, rolename)
def add_source_parser(self, suffix, parser):
- # type: (unicode, Parser) -> None
+ # type: (unicode, Type[Parser]) -> None
+ logger.debug('[app] adding search source_parser: %r, %r', suffix, parser)
if suffix in self.source_parsers:
raise ExtensionError(__('source_parser for %r is already registered') % suffix)
self.source_parsers[suffix] = parser
+ def get_source_parser(self, filename):
+ # type: (unicode) -> Type[Parser]
+ for suffix, parser_class in iteritems(self.source_parsers):
+ if filename.endswith(suffix):
+ break
+ else:
+ # use special parser for unknown file-extension '*' (if exists)
+ parser_class = self.source_parsers.get('*')
+
+ if parser_class is None:
+ raise SphinxError(__('Source parser for %s not registered') % filename)
+ else:
+ if isinstance(parser_class, string_types):
+ parser_class = import_object(parser_class, 'source parser') # type: ignore
+ return parser_class
+
def get_source_parsers(self):
# type: () -> Dict[unicode, Parser]
return self.source_parsers
+ def create_source_parser(self, app, filename):
+ # type: (Sphinx, unicode) -> Parser
+ parser_class = self.get_source_parser(filename)
+ parser = parser_class()
+ if isinstance(parser, SphinxParser):
+ parser.set_application(app)
+ return parser
+
+ def add_source_input(self, filetype, input_class):
+ # type: (unicode, Type[Input]) -> None
+ if filetype in self.source_inputs:
+ raise ExtensionError(__('source_input for %r is already registered') % filetype)
+ self.source_inputs[filetype] = input_class
+
+ def get_source_input(self, filename):
+ # type: (unicode) -> Type[Input]
+ parser = self.get_source_parser(filename)
+ for filetype in parser.supported:
+ if filetype in self.source_inputs:
+ input_class = self.source_inputs[filetype]
+ break
+ else:
+ # use special source_input for unknown file-type '*' (if exists)
+ input_class = self.source_inputs.get('*')
+
+ if input_class is None:
+ raise SphinxError(__('source_input for %s not registered') % filename)
+ else:
+ return input_class
+
def add_translator(self, name, translator):
# type: (unicode, Type[nodes.NodeVisitor]) -> None
+ logger.info(bold(__('Change of translator for the %s builder.') % name))
self.translators[name] = translator
def get_translator_class(self, builder):
@@ -178,6 +266,24 @@ class SphinxComponentRegistry(object):
translator_class = self.get_translator_class(builder)
return translator_class(builder, document)
+ def add_transform(self, transform):
+ # type: (Type[Transform]) -> None
+ logger.debug('[app] adding transform: %r', transform)
+ self.transforms.append(transform)
+
+ def get_transforms(self):
+ # type: () -> List[Type[Transform]]
+ return self.transforms
+
+ def add_post_transform(self, transform):
+ # type: (Type[Transform]) -> None
+ logger.debug('[app] adding post transform: %r', transform)
+ self.post_transforms.append(transform)
+
+ def get_post_transforms(self):
+ # type: () -> List[Type[Transform]]
+ return self.post_transforms
+
def load_extension(self, app, extname):
# type: (Sphinx, unicode) -> None
"""Load a Sphinx extension."""
diff --git a/sphinx/search/__init__.py b/sphinx/search/__init__.py
index d56b9e626..fc55a2a45 100644
--- a/sphinx/search/__init__.py
+++ b/sphinx/search/__init__.py
@@ -65,7 +65,7 @@ var Stemmer = function() {
}
""" # type: unicode
- _word_re = re.compile(r'\w+(?u)')
+ _word_re = re.compile(r'(?u)\w+')
def __init__(self, options):
# type: (Dict) -> None
diff --git a/sphinx/search/zh.py b/sphinx/search/zh.py
index c9ae890ed..2301e1103 100644
--- a/sphinx/search/zh.py
+++ b/sphinx/search/zh.py
@@ -233,14 +233,14 @@ class SearchChinese(SearchLanguage):
language_name = 'Chinese'
js_stemmer_code = js_porter_stemmer
stopwords = english_stopwords
- latin1_letters = re.compile(r'\w+(?u)[\u0000-\u00ff]')
+ latin1_letters = re.compile(u'(?u)\\w+[\u0000-\u00ff]')
def init(self, options):
# type: (Dict) -> None
if JIEBA:
dict_path = options.get('dict')
if dict_path and os.path.isfile(dict_path):
- jieba.set_dictionary(dict_path)
+ jieba.load_userdict(dict_path)
self.stemmer = get_stemmer()
@@ -250,7 +250,7 @@ class SearchChinese(SearchLanguage):
if JIEBA:
chinese = list(jieba.cut_for_search(input))
- latin1 = self.latin1_letters.findall(input) # type: ignore
+ latin1 = self.latin1_letters.findall(input)
return chinese + latin1
def word_filter(self, stemmed_word):
diff --git a/sphinx/templates/epub2/container.xml b/sphinx/templates/epub2/container.xml
deleted file mode 100644
index 326cf15fa..000000000
--- a/sphinx/templates/epub2/container.xml
+++ /dev/null
@@ -1,6 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<container version="1.0" xmlns="urn:oasis:names:tc:opendocument:xmlns:container">
- <rootfiles>
- <rootfile full-path="content.opf" media-type="application/oebps-package+xml"/>
- </rootfiles>
-</container>
diff --git a/sphinx/templates/epub2/content.opf_t b/sphinx/templates/epub2/content.opf_t
deleted file mode 100644
index 5169d0551..000000000
--- a/sphinx/templates/epub2/content.opf_t
+++ /dev/null
@@ -1,37 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<package xmlns="http://www.idpf.org/2007/opf" version="2.0"
- unique-identifier="%(uid)s">
- <metadata xmlns:opf="http://www.idpf.org/2007/opf"
- xmlns:dc="http://purl.org/dc/elements/1.1/">
- <dc:language>{{ lang }}</dc:language>
- <dc:title>{{ title }}</dc:title>
- <dc:creator opf:role="aut">{{ author }}</dc:creator>
- <dc:publisher>{{ publisher }}</dc:publisher>
- <dc:rights>{{ copyright }}</dc:rights>
- <dc:identifier id="{{ uid }}" opf:scheme="{{ scheme }}">{{ id }}</dc:identifier>
- <dc:date>{{ date }}</dc:date>
- {%- if cover %}
- <meta name="cover" content="{{ cover }}"/>
- {%- endif %}
- </metadata>
- <manifest>
- <item id="ncx" href="toc.ncx" media-type="application/x-dtbncx+xml" />
- {%- for item in manifest_items %}
- <item id="{{ item.id }}" href="{{ item.href }}" media-type="{{ item.media_type }}" />
- {%- endfor %}
- </manifest>
- <spine toc="ncx">
- {%- for spine in spines %}
- {%- if spine.linear %}
- <itemref idref="{{ spine.idref }}" />
- {%- else %}
- <itemref idref="{{ spine.idref }}" linear="no" />
- {%- endif %}
- {%- endfor %}
- </spine>
- <guide>
- {%- for guide in guides %}
- <reference type="{{ guide.type }}" title="{{ guide.title }}" href="{{ guide.uri }}" />
- {%- endfor %}
- </guide>
-</package>
diff --git a/sphinx/templates/epub2/mimetype b/sphinx/templates/epub2/mimetype
deleted file mode 100644
index 57ef03f24..000000000
--- a/sphinx/templates/epub2/mimetype
+++ /dev/null
@@ -1 +0,0 @@
-application/epub+zip \ No newline at end of file
diff --git a/sphinx/templates/epub2/toc.ncx_t b/sphinx/templates/epub2/toc.ncx_t
deleted file mode 100644
index 9bb701908..000000000
--- a/sphinx/templates/epub2/toc.ncx_t
+++ /dev/null
@@ -1,15 +0,0 @@
-<?xml version="1.0"?>
-<ncx version="2005-1" xmlns="http://www.daisy.org/z3986/2005/ncx/">
- <head>
- <meta name="dtb:uid" content="{{ uid }}"/>
- <meta name="dtb:depth" content="{{ level }}"/>
- <meta name="dtb:totalPageCount" content="0"/>
- <meta name="dtb:maxPageNumber" content="0"/>
- </head>
- <docTitle>
- <text>{{ title }}</text>
- </docTitle>
- <navMap>
-{{ navpoints }}
- </navMap>
-</ncx>
diff --git a/sphinx/templates/quickstart/Makefile_t b/sphinx/templates/quickstart/Makefile_t
index 4639a982b..2858d9bf7 100644
--- a/sphinx/templates/quickstart/Makefile_t
+++ b/sphinx/templates/quickstart/Makefile_t
@@ -27,7 +27,6 @@ help:
@echo " applehelp to make an Apple Help Book"
@echo " devhelp to make HTML files and a Devhelp project"
@echo " epub to make an epub"
- @echo " epub3 to make an epub3"
@echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
@echo " latexpdf to make LaTeX files and run them through pdflatex"
@echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx"
@@ -122,12 +121,6 @@ epub:
@echo
@echo "Build finished. The epub file is in $(BUILDDIR)/epub."
-.PHONY: epub3
-epub3:
- $(SPHINXBUILD) -b epub3 $(ALLSPHINXOPTS) $(BUILDDIR)/epub3
- @echo
- @echo "Build finished. The epub3 file is in $(BUILDDIR)/epub3."
-
.PHONY: latex
latex:
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
diff --git a/sphinx/templates/quickstart/conf.py_t b/sphinx/templates/quickstart/conf.py_t
index 70683f8ee..c42861c28 100644
--- a/sphinx/templates/quickstart/conf.py_t
+++ b/sphinx/templates/quickstart/conf.py_t
@@ -42,7 +42,11 @@ sys.path.insert(0, u'{{ module_path }}')
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
-extensions = [{{ extensions }}]
+extensions = [
+{%- for ext in extensions %}
+ '{{ ext }}',
+{%- endfor %}
+]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['{{ dot }}templates']
@@ -79,15 +83,12 @@ language = {{ language | repr }}
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
-# This patterns also effect to html_static_path and html_extra_path
+# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = [{{ exclude_patterns }}]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
-# If true, `todo` and `todoList` produce output, else they produce nothing.
-todo_include_todos = {{ ext_todo }}
-
# -- Options for HTML output ----------------------------------------------
@@ -110,14 +111,12 @@ html_static_path = ['{{ dot }}static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
-# This is required for the alabaster theme
-# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
-html_sidebars = {
- '**': [
- 'relations.html', # needs 'show_related': True theme option to display
- 'searchbox.html',
- ]
-}
+# The default sidebars (for documents that don't match any pattern) are
+# defined by theme itself. Builtin themes are using these templates by
+# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
+# 'searchbox.html']``.
+#
+# html_sidebars = {}
# -- Options for HTMLHelp output ------------------------------------------
@@ -175,8 +174,8 @@ texinfo_documents = [
author, '{{ project_fn }}', 'One line description of project.',
'Miscellaneous'),
]
+{%- if epub %}
-{% if epub %}
# -- Options for Epub output ----------------------------------------------
@@ -197,9 +196,23 @@ epub_copyright = copyright
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
-{% endif %}
+{%- endif %}
+{%- if extensions %}
+
+
+# -- Extension configuration ----------------------------------------------
+{%- endif %}
+{%- if 'sphinx.ext.intersphinx' in extensions %}
+
+# -- Options for intersphinx extension ------------------------------------
-{% if ext_intersphinx %}
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
-{% endif %}
+{%- endif %}
+{%- if 'sphinx.ext.todo' in extensions %}
+
+# -- Options for todo extension -------------------------------------------
+
+# If true, `todo` and `todoList` produce output, else they produce nothing.
+todo_include_todos = True
+{%- endif %}
diff --git a/sphinx/templates/quickstart/make.bat_t b/sphinx/templates/quickstart/make.bat_t
index 8438b5f7e..230977488 100644
--- a/sphinx/templates/quickstart/make.bat_t
+++ b/sphinx/templates/quickstart/make.bat_t
@@ -29,7 +29,6 @@ if "%1" == "help" (
echo. qthelp to make HTML files and a qthelp project
echo. devhelp to make HTML files and a Devhelp project
echo. epub to make an epub
- echo. epub3 to make an epub3
echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter
echo. text to make text files
echo. man to make manual pages
@@ -153,14 +152,6 @@ if "%1" == "epub" (
goto end
)
-if "%1" == "epub3" (
- %SPHINXBUILD% -b epub3 %ALLSPHINXOPTS% %BUILDDIR%/epub3
- if errorlevel 1 exit /b 1
- echo.
- echo.Build finished. The epub3 file is in %BUILDDIR%/epub3.
- goto end
-)
-
if "%1" == "latex" (
%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
if errorlevel 1 exit /b 1
diff --git a/sphinx/testing/util.py b/sphinx/testing/util.py
index d8d06d3d4..fb2e8f1f5 100644
--- a/sphinx/testing/util.py
+++ b/sphinx/testing/util.py
@@ -12,13 +12,9 @@ import os
import re
import sys
import warnings
-from functools import wraps
from xml.etree import ElementTree
from six import string_types
-from six import StringIO
-
-import pytest
from docutils import nodes
from docutils.parsers.rst import directives, roles
@@ -27,7 +23,6 @@ from sphinx import application
from sphinx.builders.latex import LaTeXBuilder
from sphinx.ext.autodoc import AutoDirective
from sphinx.pycode import ModuleAnalyzer
-from sphinx.deprecation import RemovedInSphinx17Warning
from sphinx.testing.path import path
@@ -196,160 +191,3 @@ def find_files(root, suffix=None):
def strip_escseq(text):
return re.sub('\x1b.*?m', '', text)
-
-
-# #############################################
-# DEPRECATED implementations
-
-
-def gen_with_app(*args, **kwargs):
- """
- **DEPRECATED**: use pytest.mark.parametrize instead.
-
- Decorate a test generator to pass a SphinxTestApp as the first argument to
- the test generator when it's executed.
- """
- def generator(func):
- @wraps(func)
- def deco(*args2, **kwargs2):
- status, warning = StringIO(), StringIO()
- kwargs['status'] = status
- kwargs['warning'] = warning
- app = SphinxTestApp(*args, **kwargs)
- try:
- for item in func(app, status, warning, *args2, **kwargs2):
- yield item
- finally:
- app.cleanup()
- return deco
- return generator
-
-
-def skip_if(condition, msg=None):
- """
- **DEPRECATED**: use pytest.mark.skipif instead.
-
- Decorator to skip test if condition is true.
- """
- return pytest.mark.skipif(condition, reason=(msg or 'conditional skip'))
-
-
-def skip_unless(condition, msg=None):
- """
- **DEPRECATED**: use pytest.mark.skipif instead.
-
- Decorator to skip test if condition is false.
- """
- return pytest.mark.skipif(not condition, reason=(msg or 'conditional skip'))
-
-
-def with_tempdir(func):
- """
- **DEPRECATED**: use tempdir fixture instead.
- """
- return func
-
-
-def raises(exc, func, *args, **kwds):
- """
- **DEPRECATED**: use pytest.raises instead.
-
- Raise AssertionError if ``func(*args, **kwds)`` does not raise *exc*.
- """
- with pytest.raises(exc):
- func(*args, **kwds)
-
-
-def raises_msg(exc, msg, func, *args, **kwds):
- """
- **DEPRECATED**: use pytest.raises instead.
-
- Raise AssertionError if ``func(*args, **kwds)`` does not raise *exc*,
- and check if the message contains *msg*.
- """
- with pytest.raises(exc) as excinfo:
- func(*args, **kwds)
- assert msg in str(excinfo.value)
-
-
-def assert_true(v1, msg=''):
- """
- **DEPRECATED**: use assert instead.
- """
- assert v1, msg
-
-
-def assert_equal(v1, v2, msg=''):
- """
- **DEPRECATED**: use assert instead.
- """
- assert v1 == v2, msg
-
-
-def assert_in(x, thing, msg=''):
- """
- **DEPRECATED**: use assert instead.
- """
- if x not in thing:
- assert False, msg or '%r is not in %r' % (x, thing)
-
-
-def assert_not_in(x, thing, msg=''):
- """
- **DEPRECATED**: use assert instead.
- """
- if x in thing:
- assert False, msg or '%r is in %r' % (x, thing)
-
-
-class ListOutput(object):
- """
- File-like object that collects written text in a list.
- """
- def __init__(self, name):
- self.name = name
- self.content = [] # type: List[str]
-
- def reset(self):
- del self.content[:]
-
- def write(self, text):
- self.content.append(text)
-
-
-# **DEPRECATED**: use pytest.skip instead.
-SkipTest = pytest.skip.Exception
-
-
-class _DeprecationWrapper(object):
- def __init__(self, mod, deprecated):
- self._mod = mod
- self._deprecated = deprecated
-
- def __getattr__(self, attr):
- if attr in self._deprecated:
- obj, instead = self._deprecated[attr]
- warnings.warn("tests/util.py::%s is deprecated and will be "
- "removed in Sphinx 1.7, please use %s instead."
- % (attr, instead),
- RemovedInSphinx17Warning, stacklevel=2)
- return obj
- return getattr(self._mod, attr)
-
-
-sys.modules[__name__] = _DeprecationWrapper(sys.modules[__name__], dict(
- with_app=(pytest.mark.sphinx, 'pytest.mark.sphinx'),
- TestApp=(SphinxTestApp, 'SphinxTestApp'),
- gen_with_app=(gen_with_app, 'pytest.mark.parametrize'),
- skip_if=(skip_if, 'pytest.skipif'),
- skip_unless=(skip_unless, 'pytest.skipif'),
- with_tempdir=(with_tempdir, 'tmpdir pytest fixture'),
- raises=(raises, 'pytest.raises'),
- raises_msg=(raises_msg, 'pytest.raises'),
- assert_true=(assert_true, 'assert'),
- assert_equal=(assert_equal, 'assert'),
- assert_in=(assert_in, 'assert'),
- assert_not_in=(assert_not_in, 'assert'),
- ListOutput=(ListOutput, 'StringIO'),
- SkipTest=(SkipTest, 'pytest.skip'),
-))
diff --git a/sphinx/texinputs/footnotehyper-sphinx.sty b/sphinx/texinputs/footnotehyper-sphinx.sty
index ff23f6ebe..5995f012d 100644
--- a/sphinx/texinputs/footnotehyper-sphinx.sty
+++ b/sphinx/texinputs/footnotehyper-sphinx.sty
@@ -1,6 +1,6 @@
\NeedsTeXFormat{LaTeX2e}
\ProvidesPackage{footnotehyper-sphinx}%
- [2017/03/07 v1.6 hyperref aware footnote.sty for sphinx (JFB)]
+ [2017/10/27 v1.7 hyperref aware footnote.sty for sphinx (JFB)]
%%
%% Package: footnotehyper-sphinx
%% Version: based on footnotehyper.sty 2017/03/07 v1.0
@@ -16,6 +16,7 @@
%% 3. use of \sphinxunactivateextrasandspace from sphinx.sty,
%% 4. macro definition \sphinxfootnotemark,
%% 5. macro definition \sphinxlongtablepatch
+%% 6. replaced an \undefined by \@undefined
\DeclareOption*{\PackageWarning{footnotehyper-sphinx}{Option `\CurrentOption' is unknown}}%
\ProcessOptions\relax
\newbox\FNH@notes
@@ -197,7 +198,7 @@
}%
\AtBeginDocument{%
\let\FNH@@makefntext\@makefntext
- \ifx\@makefntextFB\undefined
+ \ifx\@makefntextFB\@undefined
\expandafter\@gobble\else\expandafter\@firstofone\fi
{\ifFBFrenchFootnotes \let\FNH@@makefntext\@makefntextFB \else
\let\FNH@@makefntext\@makefntextORI\fi}%
diff --git a/sphinx/texinputs/sphinx.sty b/sphinx/texinputs/sphinx.sty
index 5ceb05e19..890ef60f7 100644
--- a/sphinx/texinputs/sphinx.sty
+++ b/sphinx/texinputs/sphinx.sty
@@ -6,7 +6,7 @@
%
\NeedsTeXFormat{LaTeX2e}[1995/12/01]
-\ProvidesPackage{sphinx}[2017/12/12 v1.6.6 LaTeX package (Sphinx markup)]
+\ProvidesPackage{sphinx}[2017/12/12 v1.7 LaTeX package (Sphinx markup)]
% provides \ltx@ifundefined
% (many packages load ltxcmds: graphicx does for pdftex and lualatex but
@@ -114,19 +114,25 @@
% move back vertically to compensate space inserted by next paragraph
\vskip-\baselineskip\vskip-\parskip
}%
+% use \LTcapwidth (default is 4in) to wrap caption (if line width is bigger)
\newcommand\sphinxcaption[2][\LTcapwidth]{%
\noindent\hb@xt@\linewidth{\hss
\vtop{\@tempdima\dimexpr#1\relax
% don't exceed linewidth for the caption width
\ifdim\@tempdima>\linewidth\hsize\linewidth\else\hsize\@tempdima\fi
-% longtable ignores \abovecaptionskip/\belowcaptionskip, so do the same here
- \abovecaptionskip\z@skip
- \belowcaptionskip\z@skip
+% longtable ignores \abovecaptionskip/\belowcaptionskip, so add hooks here
+% to uniformize control of caption distance to tables
+ \abovecaptionskip\sphinxabovecaptionskip
+ \belowcaptionskip\sphinxbelowcaptionskip
\caption[{#2}]%
{\strut\ignorespaces#2\ifhmode\unskip\@finalstrut\strutbox\fi}%
}\hss}%
\par\prevdepth\dp\strutbox
}%
+\def\spx@abovecaptionskip{\abovecaptionskip}
+\newcommand*\sphinxabovecaptionskip{\z@skip}
+\newcommand*\sphinxbelowcaptionskip{\z@skip}
+
\newcommand\sphinxaftercaption
{% this default definition serves with a caption *above* a table, to make sure
% its last baseline is \sphinxbelowcaptionspace above table top
@@ -177,15 +183,15 @@
% control caption around literal-block
\RequirePackage{capt-of}
\RequirePackage{needspace}
-
+\RequirePackage{remreset}% provides \@removefromreset
% to make pdf with correct encoded bookmarks in Japanese
% this should precede the hyperref package
-\ifx\kanjiskip\undefined
+\ifx\kanjiskip\@undefined
% for non-Japanese: make sure bookmarks are ok also with lualatex
\PassOptionsToPackage{pdfencoding=unicode}{hyperref}
\else
\RequirePackage{atbegshi}
- \ifx\ucs\undefined
+ \ifx\ucs\@undefined
\ifnum 42146=\euc"A4A2
\AtBeginShipoutFirst{\special{pdf:tounicode EUC-UCS2}}
\else
@@ -196,7 +202,7 @@
\fi
\fi
-\ifx\@jsc@uplatextrue\undefined\else
+\ifx\@jsc@uplatextrue\@undefined\else
\PassOptionsToPackage{setpagesize=false}{hyperref}
\fi
@@ -229,7 +235,7 @@
\SetupKeyvalOptions{prefix=spx@opt@} % use \spx@opt@ prefix
% Sphinx legacy text layout: 1in margins on all four sides
-\ifx\@jsc@uplatextrue\undefined
+\ifx\@jsc@uplatextrue\@undefined
\DeclareStringOption[1in]{hmargin}
\DeclareStringOption[1in]{vmargin}
\DeclareStringOption[.5in]{marginpar}
@@ -240,9 +246,11 @@
\DeclareStringOption[.5\dimexpr\inv@mag in\relax]{marginpar}
\fi
-\DeclareBoolOption{dontkeepoldnames} % \ifspx@opt@dontkeepoldnames = \iffalse
\DeclareStringOption[0]{maxlistdepth}% \newcommand*\spx@opt@maxlistdepth{0}
-
+\DeclareStringOption[-1]{numfigreset}
+\DeclareBoolOption[false]{nonumfigreset}
+\DeclareBoolOption[false]{mathnumfig}
+% \DeclareBoolOption[false]{usespart}% not used
% dimensions, we declare the \dimen registers here.
\newdimen\sphinxverbatimsep
\newdimen\sphinxverbatimborder
@@ -269,8 +277,11 @@
% verbatim
\DeclareBoolOption[true]{verbatimwithframe}
\DeclareBoolOption[true]{verbatimwrapslines}
-\DeclareBoolOption[false]{verbatimhintsturnover}
+\DeclareBoolOption[true]{verbatimhintsturnover}
\DeclareBoolOption[true]{inlineliteralwraps}
+\DeclareStringOption[t]{literalblockcappos}
+\DeclareStringOption[r]{verbatimcontinuedalign}
+\DeclareStringOption[r]{verbatimcontinuesalign}
% parsed literal
\DeclareBoolOption[true]{parsedliteralwraps}
% \textvisiblespace for compatibility with fontspec+XeTeX/LuaTeX
@@ -341,6 +352,9 @@
\ProcessKeyvalOptions*
% don't allow use of maxlistdepth via \sphinxsetup.
\DisableKeyvalOption{sphinx}{maxlistdepth}
+\DisableKeyvalOption{sphinx}{numfigreset}
+\DisableKeyvalOption{sphinx}{nonumfigreset}
+\DisableKeyvalOption{sphinx}{mathnumfig}
% user interface: options can be changed midway in a document!
\newcommand\sphinxsetup[1]{\setkeys{sphinx}{#1}}
@@ -425,7 +439,7 @@
%
% fix the double index and bibliography on the table of contents
% in jsclasses (Japanese standard document classes)
-\ifx\@jsc@uplatextrue\undefined\else
+\ifx\@jsc@uplatextrue\@undefined\else
\renewenvironment{sphinxtheindex}
{\cleardoublepage\phantomsection
\begin{theindex}}
@@ -438,7 +452,7 @@
\fi
% disable \@chappos in Appendix in pTeX
-\ifx\kanjiskip\undefined\else
+\ifx\kanjiskip\@undefined\else
\let\py@OldAppendix=\appendix
\renewcommand{\appendix}{
\py@OldAppendix
@@ -452,10 +466,10 @@
{\newenvironment
{sphinxthebibliography}{\begin{thebibliography}}{\end{thebibliography}}%
}
- {}% else clause of ifundefined
+ {}% else clause of \ltx@ifundefined
\ltx@ifundefined{sphinxtheindex}
{\newenvironment{sphinxtheindex}{\begin{theindex}}{\end{theindex}}}%
- {}% else clause of ifundefined
+ {}% else clause of \ltx@ifundefined
%% COLOR (general)
@@ -509,7 +523,7 @@
}
% geometry
-\ifx\kanjiskip\undefined
+\ifx\kanjiskip\@undefined
\PassOptionsToPackage{%
hmargin={\unexpanded{\spx@opt@hmargin}},%
vmargin={\unexpanded{\spx@opt@vmargin}},%
@@ -531,7 +545,7 @@
\newcommand*\sphinxtextlinesja[1]{%
\numexpr\@ne+\dimexpr\paperheight-\topskip-\tw@\dimexpr#1\relax\relax/
\baselineskip\relax}%
- \ifx\@jsc@uplatextrue\undefined\else
+ \ifx\@jsc@uplatextrue\@undefined\else
% the way we found in order for the papersize special written by
% geometry in the dvi file to be correct in case of jsbook class
\ifnum\mag=\@m\else % do nothing special if nomag class option or 10pt
@@ -548,7 +562,7 @@
}{geometry}%
\AtBeginDocument
{% update a dimension used by the jsclasses
- \ifx\@jsc@uplatextrue\undefined\else\fullwidth\textwidth\fi
+ \ifx\@jsc@uplatextrue\@undefined\else\fullwidth\textwidth\fi
% for some reason, jreport normalizes all dimensions with \@settopoint
\@ifclassloaded{jreport}
{\@settopoint\textwidth\@settopoint\textheight\@settopoint\marginparwidth}
@@ -649,6 +663,7 @@
{\abovecaptionskip\smallskipamount
\belowcaptionskip\smallskipamount}
+
%% FOOTNOTES
%
% Support large numbered footnotes in minipage
@@ -657,6 +672,111 @@
\def\thempfootnote{\arabic{mpfootnote}}
+%% NUMBERING OF FIGURES, TABLES, AND LITERAL BLOCKS
+\ltx@ifundefined{c@chapter}
+ {\newcounter{literalblock}}%
+ {\newcounter{literalblock}[chapter]%
+ \def\theliteralblock{\ifnum\c@chapter>\z@\arabic{chapter}.\fi
+ \arabic{literalblock}}%
+ }%
+\ifspx@opt@nonumfigreset
+ \ltx@ifundefined{c@chapter}{}{%
+ \@removefromreset{figure}{chapter}%
+ \@removefromreset{table}{chapter}%
+ \@removefromreset{literalblock}{chapter}%
+ \ifspx@opt@mathnumfig
+ \@removefromreset{equation}{chapter}%
+ \fi
+ }%
+ \def\thefigure{\arabic{figure}}%
+ \def\thetable {\arabic{table}}%
+ \def\theliteralblock{\arabic{literalblock}}%
+ \ifspx@opt@mathnumfig
+ \def\theequation{\arabic{equation}}%
+ \fi
+\else
+\let\spx@preAthefigure\@empty
+\let\spx@preBthefigure\@empty
+% \ifspx@opt@usespart % <-- LaTeX writer could pass such a 'usespart' boolean
+% % as sphinx.sty package option
+% If document uses \part, (triggered in Sphinx by latex_toplevel_sectioning)
+% LaTeX core per default does not reset chapter or section
+% counters at each part.
+% But if we modify this, we need to redefine \thechapter, \thesection to
+% include the part number and this will cause problems in table of contents
+% because of too wide numbering. Simplest is to do nothing.
+% \fi
+\ifnum\spx@opt@numfigreset>0
+ \ltx@ifundefined{c@chapter}
+ {}
+ {\g@addto@macro\spx@preAthefigure{\ifnum\c@chapter>\z@\arabic{chapter}.}%
+ \g@addto@macro\spx@preBthefigure{\fi}}%
+\fi
+\ifnum\spx@opt@numfigreset>1
+ \@addtoreset{figure}{section}%
+ \@addtoreset{table}{section}%
+ \@addtoreset{literalblock}{section}%
+ \ifspx@opt@mathnumfig
+ \@addtoreset{equation}{section}%
+ \fi
+ \g@addto@macro\spx@preAthefigure{\ifnum\c@section>\z@\arabic{section}.}%
+ \g@addto@macro\spx@preBthefigure{\fi}%
+\fi
+\ifnum\spx@opt@numfigreset>2
+ \@addtoreset{figure}{subsection}%
+ \@addtoreset{table}{subsection}%
+ \@addtoreset{literalblock}{subsection}%
+ \ifspx@opt@mathnumfig
+ \@addtoreset{equation}{subsection}%
+ \fi
+ \g@addto@macro\spx@preAthefigure{\ifnum\c@subsection>\z@\arabic{subsection}.}%
+ \g@addto@macro\spx@preBthefigure{\fi}%
+\fi
+\ifnum\spx@opt@numfigreset>3
+ \@addtoreset{figure}{subsubsection}%
+ \@addtoreset{table}{subsubsection}%
+ \@addtoreset{literalblock}{subsubsection}%
+ \ifspx@opt@mathnumfig
+ \@addtoreset{equation}{subsubsection}%
+ \fi
+ \g@addto@macro\spx@preAthefigure{\ifnum\c@subsubsection>\z@\arabic{subsubsection}.}%
+ \g@addto@macro\spx@preBthefigure{\fi}%
+\fi
+\ifnum\spx@opt@numfigreset>4
+ \@addtoreset{figure}{paragraph}%
+ \@addtoreset{table}{paragraph}%
+ \@addtoreset{literalblock}{paragraph}%
+ \ifspx@opt@mathnumfig
+ \@addtoreset{equation}{paragraph}%
+ \fi
+ \g@addto@macro\spx@preAthefigure{\ifnum\c@subparagraph>\z@\arabic{subparagraph}.}%
+ \g@addto@macro\spx@preBthefigure{\fi}%
+\fi
+\ifnum\spx@opt@numfigreset>5
+ \@addtoreset{figure}{subparagraph}%
+ \@addtoreset{table}{subparagraph}%
+ \@addtoreset{literalblock}{subparagraph}%
+ \ifspx@opt@mathnumfig
+ \@addtoreset{equation}{subparagraph}%
+ \fi
+ \g@addto@macro\spx@preAthefigure{\ifnum\c@subsubparagraph>\z@\arabic{subsubparagraph}.}%
+ \g@addto@macro\spx@preBthefigure{\fi}%
+\fi
+\expandafter\g@addto@macro
+\expandafter\spx@preAthefigure\expandafter{\spx@preBthefigure}%
+\let\thefigure\spx@preAthefigure
+\let\thetable\spx@preAthefigure
+\let\theliteralblock\spx@preAthefigure
+\g@addto@macro\thefigure{\arabic{figure}}%
+\g@addto@macro\thetable{\arabic{table}}%
+\g@addto@macro\theliteralblock{\arabic{literalblock}}%
+ \ifspx@opt@mathnumfig
+ \let\theequation\spx@preAthefigure
+ \g@addto@macro\theequation{\arabic{equation}}%
+ \fi
+\fi
+
+
%% LITERAL BLOCKS
%
% Based on use of "fancyvrb.sty"'s Verbatim.
@@ -671,20 +791,7 @@
\let\OriginalVerbatim \Verbatim
\let\endOriginalVerbatim\endVerbatim
-% if the available space on page is less than \literalblockneedspace, insert pagebreak
-\newcommand{\sphinxliteralblockneedspace}{5\baselineskip}
-\newcommand{\sphinxliteralblockwithoutcaptionneedspace}{1.5\baselineskip}
-
% for captions of literal blocks
-% also define `\theH...` macros for hyperref
-\newcounter{literalblock}
-\ltx@ifundefined{c@chapter}
- {\@addtoreset{literalblock}{section}
- \def\theliteralblock {\ifnum\c@section>\z@ \thesection.\fi\arabic{literalblock}}
- \def\theHliteralblock {\theHsection.\arabic{literalblock}}}
- {\@addtoreset{literalblock}{chapter}
- \def\theliteralblock {\ifnum\c@chapter>\z@ \thechapter.\fi\arabic{literalblock}}
- \def\theHliteralblock {\theHchapter.\arabic{literalblock}}}
% at start of caption title
\newcommand*{\fnum@literalblock}{\literalblockname\nobreakspace\theliteralblock}
% this will be overwritten in document preamble by Babel translation
@@ -694,56 +801,33 @@
% analogous to \listoffigures, but for the code listings (foo = chosen title.)
\newcommand*{\ext@literalblock}{lol}
-% The title (caption) is specified from outside as macro \sphinxVerbatimTitle.
-% \sphinxVerbatimTitle is reset to empty after each use of Verbatim.
-\newcommand*\sphinxVerbatimTitle {}
-% This box to typeset the caption before framed.sty multiple passes for framing.
-\newbox\spx@Verbatim@TitleBox
-% Holder macro for labels of literal blocks. Set-up by LaTeX writer.
-\newcommand*\sphinxLiteralBlockLabel {}
-\newcommand*\sphinxSetupCaptionForVerbatim [1]
-{%
- \needspace{\sphinxliteralblockneedspace}%
-% insert a \label via \sphinxLiteralBlockLabel
-% reset to normal the color for the literal block caption
-% the caption inserts \abovecaptionskip whitespace above itself (usually 10pt)
-% there is also \belowcaptionskip but it is usually zero, hence the \smallskip
- \def\sphinxVerbatimTitle
- {\py@NormalColor
- \captionof{literalblock}{\sphinxLiteralBlockLabel #1}\smallskip }%
-}
-\newcommand*\sphinxSetupCodeBlockInFootnote {%
- \fvset{fontsize=\footnotesize}\let\caption\sphinxfigcaption
- \sphinxverbatimwithminipagetrue % reduces vertical spaces
- % we counteract float.sty's \caption which does \@normalsize
- \let\normalsize\footnotesize\let\@parboxrestore\relax
- \abovecaptionskip \smallskipamount \belowcaptionskip \z@skip}
-
\newif\ifspx@inframed % flag set if we are already in a framed environment
% if forced use of minipage encapsulation is needed (e.g. table cells)
\newif\ifsphinxverbatimwithminipage \sphinxverbatimwithminipagefalse
-\long\def\spx@colorbox #1#2#3{%
-% let the framing obey the current indentation (adapted from framed.sty's code).
+
+% Framing macro for use with framed.sty's \FrameCommand
+% - it obeys current indentation,
+% - frame is \fboxsep separated from the contents,
+% - the contents use the full available text width,
+% - #1 = color of frame, #2 = color of background,
+% - #3 = above frame, #4 = below frame, #5 = within frame,
+% - #3 and #4 must be already typeset boxes; they must issue \normalcolor
+% or similar, else, they are under scope of color #1
+\long\def\spx@fcolorbox #1#2#3#4#5{%
\hskip\@totalleftmargin
\hskip-\fboxsep\hskip-\fboxrule
- \spx@fcolorbox{VerbatimBorderColor}{VerbatimColor}{#1}{#2}{#3}%
+ % use of \color@b@x here is compatible with both xcolor.sty and color.sty
+ \color@b@x {\color{#1}\spx@CustomFBox{#3}{#4}}{\color{#2}}{#5}%
\hskip-\fboxsep\hskip-\fboxrule
\hskip-\linewidth \hskip-\@totalleftmargin \hskip\columnwidth
-}
-% use of \color@b@x here is compatible with both xcolor.sty and color.sty
-\long\def\spx@fcolorbox #1#2#3#4%
- {\color@b@x {\color{#1}\spx@VerbatimFBox{#3}{#4}}{\color{#2}}}%
-% Frame drawing macro
-% #1 = used by default for title above frame, may contain "continued" hint
-% #2 = for material underneath frame, used for "continues on next page" hint
-% #3 = actual contents with background color
-\long\def\spx@VerbatimFBox#1#2#3{%
- \leavevmode
+}%
+% #1 = for material above frame, such as a caption or a "continued" hint
+% #2 = for material below frame, such as a caption or "continues on next page"
+% #3 = actual contents, which will be typeset with a background color
+\long\def\spx@CustomFBox#1#2#3{%
\begingroup
\setbox\@tempboxa\hbox{{#3}}% inner braces to avoid color leaks
- \hbox
- {\lower\dimexpr\fboxrule+\dp\@tempboxa\hbox{%
- \vbox{#1% above frame
+ \vbox{#1% above frame
% draw frame border _latest_ to avoid pdf viewer issue
\kern\fboxrule
\hbox{\kern\fboxrule
@@ -756,42 +840,43 @@
\hrule\@height\fboxrule
\kern\dimexpr\ht\@tempboxa+\dp\@tempboxa\relax
\hrule\@height\fboxrule
- #2% below frame
- }%
- }%
- }%
+ #2% below frame
+ }%
\endgroup
-}
-
-% Customize framed.sty \MakeFramed to glue caption to literal block
-% and add optional hint "continued on next page"
-\def\spx@Verbatim@FrameCommand
- {\spx@colorbox\spx@Verbatim@Title{}}%
-% Macros for a frame with page breaks:
-\def\spx@Verbatim@FirstFrameCommand
- {\spx@colorbox\spx@Verbatim@Title\spx@Verbatim@Continues}%
-\def\spx@Verbatim@MidFrameCommand
- {\spx@colorbox\spx@Verbatim@Continued\spx@Verbatim@Continues}%
-\def\spx@Verbatim@LastFrameCommand
- {\spx@colorbox\spx@Verbatim@Continued{}}%
-
-\def\spx@Verbatim@Title{% hide width from framed.sty measuring
- \moveright\dimexpr\fboxrule+.5\wd\@tempboxa
- \hb@xt@\z@{\hss\unhcopy\spx@Verbatim@TitleBox\hss}%
}%
-\def\spx@Verbatim@Continued{%
- \moveright\dimexpr\fboxrule+\wd\@tempboxa-\fboxsep
- \hb@xt@\z@{\hss
- {\normalcolor\sphinxstylecodecontinued\literalblockcontinuedname}}%
+\def\spx@fcolorbox@put@c#1{% hide width from framed.sty measuring
+ \moveright\dimexpr\fboxrule+.5\wd\@tempboxa\hb@xt@\z@{\hss#1\hss}%
+}%
+\def\spx@fcolorbox@put@r#1{% right align with contents, width hidden
+ \moveright\dimexpr\fboxrule+\wd\@tempboxa-\fboxsep\hb@xt@\z@{\hss#1}%
}%
-\def\spx@Verbatim@Continues{%
- \moveright\dimexpr\fboxrule+\wd\@tempboxa-\fboxsep
- \hb@xt@\z@{\hss
- {\normalcolor\sphinxstylecodecontinues\literalblockcontinuesname}}%
+\def\spx@fcolorbox@put@l#1{% left align with contents, width hidden
+ \moveright\dimexpr\fboxrule+\fboxsep\hb@xt@\z@{#1\hss}%
}%
+%
+\def\sphinxVerbatim@Continued
+ {\csname spx@fcolorbox@put@\spx@opt@verbatimcontinuedalign\endcsname
+ {\normalcolor\sphinxstylecodecontinued\literalblockcontinuedname}}%
+\def\sphinxVerbatim@Continues
+ {\csname spx@fcolorbox@put@\spx@opt@verbatimcontinuesalign\endcsname
+ {\normalcolor\sphinxstylecodecontinues\literalblockcontinuesname}}%
+\def\sphinxVerbatim@Title
+ {\spx@fcolorbox@put@c{\unhcopy\sphinxVerbatim@TitleBox}}%
+\let\sphinxVerbatim@Before\@empty
+\let\sphinxVerbatim@After\@empty
% Defaults are redefined in document preamble according to language
\newcommand*\literalblockcontinuedname{continued from previous page}%
\newcommand*\literalblockcontinuesname{continues on next page}%
+%
+\def\spx@verbatimfcolorbox{\spx@fcolorbox{VerbatimBorderColor}{VerbatimColor}}%
+\def\sphinxVerbatim@FrameCommand
+ {\spx@verbatimfcolorbox\sphinxVerbatim@Before\sphinxVerbatim@After}%
+\def\sphinxVerbatim@FirstFrameCommand
+ {\spx@verbatimfcolorbox\sphinxVerbatim@Before\sphinxVerbatim@Continues}%
+\def\sphinxVerbatim@MidFrameCommand
+ {\spx@verbatimfcolorbox\sphinxVerbatim@Continued\sphinxVerbatim@Continues}%
+\def\sphinxVerbatim@LastFrameCommand
+ {\spx@verbatimfcolorbox\sphinxVerbatim@Continued\sphinxVerbatim@After}%
% For linebreaks inside Verbatim environment from package fancyvrb.
\newbox\sphinxcontinuationbox
@@ -849,8 +934,40 @@
{\kern\fontdimen2\font}%
}%
+% if the available space on page is less than \literalblockneedspace, insert pagebreak
+\newcommand{\sphinxliteralblockneedspace}{5\baselineskip}
+\newcommand{\sphinxliteralblockwithoutcaptionneedspace}{1.5\baselineskip}
+% The title (caption) is specified from outside as macro \sphinxVerbatimTitle.
+% \sphinxVerbatimTitle is reset to empty after each use of Verbatim.
+\newcommand*\sphinxVerbatimTitle {}
+% This box to typeset the caption before framed.sty multiple passes for framing.
+\newbox\sphinxVerbatim@TitleBox
+% This is a workaround to a "feature" of French lists, when literal block
+% follows immediately; usable generally (does only \par then), a priori...
+\newcommand*\sphinxvspacefixafterfrenchlists{%
+ \ifvmode\ifdim\lastskip<\z@ \vskip\parskip\fi\else\par\fi
+}
+% Holder macro for labels of literal blocks. Set-up by LaTeX writer.
+\newcommand*\sphinxLiteralBlockLabel {}
+\newcommand*\sphinxSetupCaptionForVerbatim [1]
+{%
+ \sphinxvspacefixafterfrenchlists
+ \needspace{\sphinxliteralblockneedspace}%
+% insert a \label via \sphinxLiteralBlockLabel
+% reset to normal the color for the literal block caption
+ \def\sphinxVerbatimTitle
+ {\py@NormalColor\sphinxcaption{\sphinxLiteralBlockLabel #1}}%
+}
+\newcommand*\sphinxSetupCodeBlockInFootnote {%
+ \fvset{fontsize=\footnotesize}\let\caption\sphinxfigcaption
+ \sphinxverbatimwithminipagetrue % reduces vertical spaces
+ % we counteract (this is in a group) the \@normalsize from \caption
+ \let\normalsize\footnotesize\let\@parboxrestore\relax
+ \def\spx@abovecaptionskip{\sphinxverbatimsmallskipamount}%
+}
% needed to create wrapper environments of fancyvrb's Verbatim
\newcommand*{\sphinxVerbatimEnvironment}{\gdef\FV@EnvironName{sphinxVerbatim}}
+\newcommand*{\sphinxverbatimsmallskipamount}{\smallskipamount}
% serves to implement line highlighting and line wrapping
\newcommand\sphinxFancyVerbFormatLine[1]{%
\expandafter\sphinx@verbatim@checkifhl\expandafter{\the\FV@CodeLineNo}%
@@ -879,17 +996,12 @@
\sbox\sphinxcontinuationbox {\spx@opt@verbatimcontinued}%
\sbox\sphinxvisiblespacebox {\spx@opt@verbatimvisiblespace}%
}%
-% Sphinx <1.5 optional argument was in fact mandatory. It is now really
-% optional and handled by original Verbatim.
\newenvironment{sphinxVerbatim}{%
- % quit horizontal mode if we are still in a paragraph
- \par
- % list starts new par, but we don't want it to be set apart vertically
- \parskip\z@skip
% first, let's check if there is a caption
\ifx\sphinxVerbatimTitle\empty
- \addvspace\z@% counteract possible previous negative skip (French lists!)
- \smallskip
+ \sphinxvspacefixafterfrenchlists
+ \parskip\z@skip
+ \vskip\sphinxverbatimsmallskipamount
% there was no caption. Check if nevertheless a label was set.
\ifx\sphinxLiteralBlockLabel\empty\else
% we require some space to be sure hyperlink target from \phantomsection
@@ -897,24 +1009,37 @@
\needspace{\sphinxliteralblockwithoutcaptionneedspace}%
\phantomsection\sphinxLiteralBlockLabel
\fi
- \let\spx@Verbatim@Title\@empty
\else
- % non-empty \sphinxVerbatimTitle has label inside it (in case there is one)
- \setbox\spx@Verbatim@TitleBox
+ \parskip\z@skip
+ \if t\spx@opt@literalblockcappos
+ \vskip\spx@abovecaptionskip
+ \def\sphinxVerbatim@Before
+ {\sphinxVerbatim@Title\nointerlineskip
+ \kern\dimexpr-\dp\strutbox+\sphinxbelowcaptionspace\relax}%
+ \else
+ \vskip\sphinxverbatimsmallskipamount
+ \def\sphinxVerbatim@After
+ {\nointerlineskip\kern\dp\strutbox\sphinxVerbatim@Title}%
+ \fi
+ \def\@captype{literalblock}%
+ \capstart
+ % \sphinxVerbatimTitle must reset color
+ \setbox\sphinxVerbatim@TitleBox
\hbox{\begin{minipage}{\linewidth}%
\sphinxVerbatimTitle
\end{minipage}}%
\fi
+ \global\let\sphinxLiteralBlockLabel\empty
+ \global\let\sphinxVerbatimTitle\empty
\fboxsep\sphinxverbatimsep \fboxrule\sphinxverbatimborder
- % setting borderwidth to zero is simplest for no-frame effect with same pagebreaks
\ifspx@opt@verbatimwithframe\else\fboxrule\z@\fi
- \let\FrameCommand \spx@Verbatim@FrameCommand
- \let\FirstFrameCommand\spx@Verbatim@FirstFrameCommand
- \let\MidFrameCommand \spx@Verbatim@MidFrameCommand
- \let\LastFrameCommand \spx@Verbatim@LastFrameCommand
+ \let\FrameCommand \sphinxVerbatim@FrameCommand
+ \let\FirstFrameCommand\sphinxVerbatim@FirstFrameCommand
+ \let\MidFrameCommand \sphinxVerbatim@MidFrameCommand
+ \let\LastFrameCommand \sphinxVerbatim@LastFrameCommand
\ifspx@opt@verbatimhintsturnover\else
- \let\spx@Verbatim@Continued\@empty
- \let\spx@Verbatim@Continues\@empty
+ \let\sphinxVerbatim@Continued\@empty
+ \let\sphinxVerbatim@Continues\@empty
\fi
\ifspx@opt@verbatimwrapslines
% fancyvrb's Verbatim puts each input line in (unbreakable) horizontal boxes.
@@ -941,13 +1066,21 @@
\def\@toodeep {\advance\@listdepth\@ne}%
% The list environment is needed to control perfectly the vertical space.
% Note: \OuterFrameSep used by framed.sty is later set to \topsep hence 0pt.
- % - if caption: vertical space above caption = (\abovecaptionskip + D) with
- % D = \baselineskip-\FrameHeightAdjust, and then \smallskip above frame.
- % - if no caption: (\smallskip + D) above frame. By default D=6pt.
- % Use trivlist rather than list to avoid possible "too deeply nested" error.
+ % - if caption: distance from last text baseline to caption baseline is
+ % A+(B-F)+\ht\strutbox, A = \abovecaptionskip (default 10pt), B =
+ % \baselineskip, F is the framed.sty \FrameHeightAdjust macro, default 6pt.
+ % Formula valid for F < 10pt.
+ % - distance of baseline of caption to top of frame is like for tables:
+ % \sphinxbelowcaptionspace (=0.5\baselineskip)
+ % - if no caption: distance of last text baseline to code frame is S+(B-F),
+ % with S = \sphinxverbatimtopskip (=\smallskip)
+ % - and distance from bottom of frame to next text baseline is
+ % \baselineskip+\parskip.
+ % The \trivlist is used to avoid possible "too deeply nested" error.
\itemsep \z@skip
\topsep \z@skip
- \partopsep \z@skip% trivlist will set \parsep to \parskip = zero (see above)
+ \partopsep \z@skip
+ % trivlist will set \parsep to \parskip = zero
% \leftmargin will be set to zero by trivlist
\rightmargin\z@
\parindent \z@% becomes \itemindent. Default zero, but perhaps overwritten.
@@ -983,10 +1116,12 @@
{% don't use a frame if in a table cell
\spx@opt@verbatimwithframefalse
\sphinxverbatimwithminipagetrue
- % counteract longtable redefinition of caption
+ % the literal block caption uses \sphinxcaption which is wrapper of \caption,
+ % but \caption must be modified because longtable redefines it to work only
+ % for the own table caption, and tabulary has multiple passes
\let\caption\sphinxfigcaption
- % reduce above caption space if in a table cell
- \abovecaptionskip\smallskipamount
+ % reduce above caption skip
+ \def\spx@abovecaptionskip{\sphinxverbatimsmallskipamount}%
\def\sphinxVerbatimEnvironment{\gdef\FV@EnvironName{sphinxVerbatimintable}}%
\begin{sphinxVerbatim}}
{\end{sphinxVerbatim}}
@@ -1149,10 +1284,9 @@
{\parskip\z@skip\noindent}%
}
{%
- \par
% counteract previous possible negative skip (French lists!):
% (we can't cancel that any earlier \vskip introduced a potential pagebreak)
- \ifdim\lastskip<\z@\vskip-\lastskip\fi
+ \sphinxvspacefixafterfrenchlists
\nobreak\vbox{\noindent\kern\@totalleftmargin
{\color{spx@notice@bordercolor}%
\rule[\dimexpr.4\baselineskip-\spx@notice@border\relax]
@@ -1252,14 +1386,6 @@
\begin{sphinx#1}{#2}}
% workaround some LaTeX "feature" of \end command
{\edef\spx@temp{\noexpand\end{sphinx\spx@noticetype}}\spx@temp}
-% use of ``notice'' is for backwards compatibility and will be removed in
-% Sphinx 1.7.
-\newenvironment{notice}
- {\sphinxdeprecationwarning {notice}{1.6}{1.7}{%
- This document was probably built with a Sphinx extension using ``notice''^^J
- environment. At Sphinx 1.7, ``notice'' environment will be removed. Please^^J
- report to extension author to use ``sphinxadmonition'' instead.^^J%
- ****}\begin{sphinxadmonition}}{\end{sphinxadmonition}}
%% PYTHON DOCS MACROS AND ENVIRONMENTS
@@ -1416,7 +1542,6 @@
%% TEXT STYLING
%
% Some custom font markup commands.
-% *** the macros without \sphinx prefix are still defined farther down ***
\protected\def\sphinxstrong#1{{\textbf{#1}}}
% to obtain straight quotes we execute \@noligs as patched by upquote, and
% \scantokens is needed in cases where it would be too late for the macro to
@@ -1452,39 +1577,6 @@
\long\protected\def\sphinxoptional#1{%
{\textnormal{\Large[}}{#1}\hspace{0.5mm}{\textnormal{\Large]}}}
-\ifspx@opt@dontkeepoldnames\else
- \let\spx@alreadydefinedlist\@empty
- \typeout{** (sphinx) defining (legacy) text style macros without \string\sphinx\space prefix}
- \typeout{** if clashes with packages, do not set latex_keep_old_macro_names=True
- in conf.py}
- \@for\@tempa:=code,strong,bfcode,email,tablecontinued,titleref,%
- menuselection,accelerator,crossref,termref,optional\do
- {% first, check if command with no prefix already exists
- \ltx@ifundefined{\@tempa}{%
- % give it the meaning defined so far with \sphinx prefix
- \expandafter\let\csname\@tempa\expandafter\endcsname
- \csname sphinx\@tempa\endcsname
- % redefine the \sphinx prefixed macro to expand to non-prefixed one
- \expandafter\def\csname sphinx\@tempa\expandafter\endcsname
- \expandafter{\csname\@tempa\endcsname}%
- }{\edef\spx@alreadydefinedlist{\spx@alreadydefinedlist{\@tempa}}}%
- }%
- \ifx\spx@alreadydefinedlist\@empty\else
- \expandafter\@tfor\expandafter\@tempa\expandafter:\expandafter=\spx@alreadydefinedlist\do
- {% emit warning now
- \PackageWarning{sphinx}{not redefining already existing \@backslashchar\@tempa\space!^^J%
- Anyhow, Sphinx mark-up uses only \string\sphinx\@tempa.}%
- % and also at end of log for better visibility
- \expandafter\sphinxdeprecationwarning\expandafter{\csname\@tempa\endcsname}{1.6}{1.7}
- {\sphinxdeprecatedmacro already existed at Sphinx loading time! Not redefined!^^J
- Sphinx mark-up uses only \string\sphinx\expandafter\@gobble\sphinxdeprecatedmacro.^^J
- Note: if this warning is about macro \string\strong, it presumably results^^J
- from fontspec 2.6 having defined it prior to Sphinx. No need for alarm!}%
- }%
- \fi
- \sphinxdeprecationwarning{latex_keep_old_macro_names=True}{1.6}{1.7}{}%
-\fi
-
% additional customizable styling
% FIXME: convert this to package options ?
\protected\def\sphinxstyleindexentry {\texttt}
@@ -1495,16 +1587,7 @@
\protected\def\sphinxstyleothertitle {\textbf}
\protected\def\sphinxstylesidebarsubtitle #1{~\\\textbf{#1} \smallskip}
% \text.. commands do not allow multiple paragraphs
-\let\sphinxstylethead\empty
-\protected\def\sphinxstyletheadfamily {\ifx\sphinxstylethead\empty\sffamily\fi}
-\AtBeginDocument{\ifx\sphinxstylethead\empty\else
-\sphinxdeprecationwarning{\sphinxstylethead}{1.6}{1.7}{%
- \string\sphinxstyletheadfamily\space replaces it
- (it defaults to \string\sffamily) to allow use^^J
- with multiple paragraphs. Backwards compatibility is maintained, but please^^J
- move customization into \string\sphinxstyletheadfamily\space
- in time for 1.7.^^J
- And if you do it now, you will spare yourself this warning!}\fi}
+\protected\def\sphinxstyletheadfamily {\sffamily}
\protected\def\sphinxstyleemphasis {\emph}
\protected\def\sphinxstyleliteralemphasis#1{\emph{\sphinxcode{#1}}}
\protected\def\sphinxstylestrong {\textbf}
diff --git a/sphinx/texinputs/sphinxhowto.cls b/sphinx/texinputs/sphinxhowto.cls
index 90680fdee..11a49a205 100644
--- a/sphinx/texinputs/sphinxhowto.cls
+++ b/sphinx/texinputs/sphinxhowto.cls
@@ -25,6 +25,7 @@
% reset these counters in your preamble.
%
\setcounter{secnumdepth}{2}
+\setcounter{tocdepth}{2}% i.e. section and subsection
% Change the title page to look a bit better, and fit in with the fncychap
% ``Bjarne'' style a bit better.
diff --git a/sphinx/themes/basic/layout.html b/sphinx/themes/basic/layout.html
index cb89921fd..75c1ca568 100644
--- a/sphinx/themes/basic/layout.html
+++ b/sphinx/themes/basic/layout.html
@@ -120,7 +120,10 @@
<html xmlns="http://www.w3.org/1999/xhtml"{% if language is not none %} lang="{{ language }}"{% endif %}>
{%- endif %}
<head>
- {%- if use_meta_charset %}
+ {%- if not html5_doctype %}
+ <meta http-equiv="X-UA-Compatible" content="IE=Edge" />
+ {%- endif %}
+ {%- if use_meta_charset or html5_doctype %}
<meta charset="{{ encoding }}" />
{%- else %}
<meta http-equiv="Content-Type" content="text/html; charset={{ encoding }}" />
@@ -167,7 +170,7 @@
{%- endblock %}
{%- block extrahead %} {% endblock %}
</head>
- <body>
+ {%- block body_tag %}<body>{% endblock %}
{%- block header %}{% endblock %}
{%- block relbar1 %}{{ relbar() }}{% endblock %}
diff --git a/sphinx/themes/basic/static/websupport.js b/sphinx/themes/basic/static/websupport.js
index 79b18e389..78e14bb4a 100644
--- a/sphinx/themes/basic/static/websupport.js
+++ b/sphinx/themes/basic/static/websupport.js
@@ -301,7 +301,7 @@
li.hide();
// Determine where in the parents children list to insert this comment.
- for(i=0; i < siblings.length; i++) {
+ for(var i=0; i < siblings.length; i++) {
if (comp(comment, siblings[i]) <= 0) {
$('#cd' + siblings[i].id)
.parent()
diff --git a/sphinx/themes/basic/theme.conf b/sphinx/themes/basic/theme.conf
index 3248070bc..25495e8c6 100644
--- a/sphinx/themes/basic/theme.conf
+++ b/sphinx/themes/basic/theme.conf
@@ -2,6 +2,7 @@
inherit = none
stylesheet = basic.css
pygments_style = none
+sidebars = localtoc.html, relations.html, sourcelink.html, searchbox.html
[options]
nosidebar = false
diff --git a/sphinx/themes/bizstyle/static/css3-mediaqueries_src.js b/sphinx/themes/bizstyle/static/css3-mediaqueries_src.js
index 65b44825d..f21dd4949 100644
--- a/sphinx/themes/bizstyle/static/css3-mediaqueries_src.js
+++ b/sphinx/themes/bizstyle/static/css3-mediaqueries_src.js
@@ -432,7 +432,7 @@ var cssHelper = function () {
oss[n][oss[n].length] = r;
}
};
- for (i = 0; i < ors.length; i++) {
+ for (var i = 0; i < ors.length; i++) {
collectSelectors(ors[i]);
}
diff --git a/sphinx/theming.py b/sphinx/theming.py
index 64cd67126..33c4c76be 100644
--- a/sphinx/theming.py
+++ b/sphinx/theming.py
@@ -133,8 +133,9 @@ class Theme(object):
for option, value in iteritems(overrides):
if option not in options:
- raise ThemeError('unsupported theme option %r given' % option)
- options[option] = value
+ logger.warning('unsupported theme option %r given' % option)
+ else:
+ options[option] = value
return options
diff --git a/sphinx/transforms/i18n.py b/sphinx/transforms/i18n.py
index 12f143302..5ae33d86a 100644
--- a/sphinx/transforms/i18n.py
+++ b/sphinx/transforms/i18n.py
@@ -50,15 +50,12 @@ def publish_msgstr(app, source, source_path, source_line, config, settings):
:rtype: docutils.nodes.document
"""
from sphinx.io import SphinxI18nReader
- reader = SphinxI18nReader(
- app=app,
- parsers=app.registry.get_source_parsers(),
- parser_name='restructuredtext', # default parser
- )
+ reader = SphinxI18nReader()
reader.set_lineno_for_reporter(source_line)
+ parser = app.registry.create_source_parser(app, '')
doc = reader.read(
source=StringInput(source=source, source_path=source_path),
- parser=reader.parser,
+ parser=parser,
settings=settings,
)
try:
diff --git a/sphinx/transforms/post_transforms/__init__.py b/sphinx/transforms/post_transforms/__init__.py
index 42e7307be..b65d929e2 100644
--- a/sphinx/transforms/post_transforms/__init__.py
+++ b/sphinx/transforms/post_transforms/__init__.py
@@ -134,7 +134,8 @@ class ReferencesResolver(SphinxTransform):
if not results:
return None
if len(results) > 1:
- nice_results = ' or '.join(':%s:' % r[0] for r in results)
+ nice_results = ' or '.join(':%s:`%s`' % (name, role["reftitle"])
+ for name, role in results)
logger.warning(__('more than one target found for \'any\' cross-'
'reference %r: could be %s'), target, nice_results,
location=node)
diff --git a/sphinx/transforms/post_transforms/images.py b/sphinx/transforms/post_transforms/images.py
index bffba8516..d09f57e67 100644
--- a/sphinx/transforms/post_transforms/images.py
+++ b/sphinx/transforms/post_transforms/images.py
@@ -209,7 +209,7 @@ class ImageConverter(BaseImageConverter):
def is_available(self):
# type: () -> bool
"""Confirms the converter is available or not."""
- raise NotImplemented
+ raise NotImplementedError()
def guess_mimetypes(self, node):
# type: (nodes.Node) -> List[unicode]
@@ -248,7 +248,7 @@ class ImageConverter(BaseImageConverter):
def convert(self, _from, _to):
# type: (unicode, unicode) -> bool
"""Converts the image to expected one."""
- raise NotImplemented
+ raise NotImplementedError()
def setup(app):
diff --git a/sphinx/util/__init__.py b/sphinx/util/__init__.py
index 4201c9462..938ec71db 100644
--- a/sphinx/util/__init__.py
+++ b/sphinx/util/__init__.py
@@ -562,16 +562,6 @@ def encode_uri(uri):
return urlunsplit(split)
-def split_docinfo(text):
- # type: (unicode) -> Sequence[unicode]
- docinfo_re = re.compile('\\A((?:\\s*:\\w+:.*?\n(?:[ \\t]+.*?\n)*)+)', re.M)
- result = docinfo_re.split(text, 1) # type: ignore
- if len(result) == 1:
- return '', result[0]
- else:
- return result[1:]
-
-
def display_chunk(chunk):
# type: (Any) -> unicode
if isinstance(chunk, (list, tuple)):
diff --git a/sphinx/util/compat.py b/sphinx/util/compat.py
deleted file mode 100644
index 402e4bf9f..000000000
--- a/sphinx/util/compat.py
+++ /dev/null
@@ -1,48 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
- sphinx.util.compat
- ~~~~~~~~~~~~~~~~~~
-
- Stuff for docutils compatibility.
-
- :copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
- :license: BSD, see LICENSE for details.
-"""
-from __future__ import absolute_import
-
-import sys
-import warnings
-from distutils.version import LooseVersion
-
-from docutils.parsers.rst import Directive # noqa
-from docutils import __version__ as _du_version
-
-from sphinx.deprecation import RemovedInSphinx17Warning
-
-docutils_version = tuple(LooseVersion(_du_version).version)[:2]
-
-if False:
- # For type annotation
- from typing import Any, Dict # NOQA
-
-
-class _DeprecationWrapper(object):
- def __init__(self, mod, deprecated):
- # type: (Any, Dict) -> None
- self._mod = mod
- self._deprecated = deprecated
-
- def __getattr__(self, attr):
- # type: (str) -> Any
- if attr in self._deprecated:
- warnings.warn("sphinx.util.compat.%s is deprecated and will be removed "
- "in Sphinx 1.7, please use docutils' instead." % attr,
- RemovedInSphinx17Warning)
- return self._deprecated[attr]
- return getattr(self._mod, attr)
-
-
-sys.modules[__name__] = _DeprecationWrapper(sys.modules[__name__], dict(
- docutils_version = docutils_version,
- Directive = Directive,
-))
diff --git a/sphinx/util/docutils.py b/sphinx/util/docutils.py
index bbf32da1d..b6ddd2dd8 100644
--- a/sphinx/util/docutils.py
+++ b/sphinx/util/docutils.py
@@ -18,8 +18,9 @@ from contextlib import contextmanager
import docutils
from docutils.languages import get_language
-from docutils.utils import Reporter
+from docutils.statemachine import ViewList
from docutils.parsers.rst import directives, roles, convert_directive_function
+from docutils.utils import Reporter
from sphinx.errors import ExtensionError
from sphinx.locale import __
@@ -33,6 +34,7 @@ if False:
from typing import Any, Callable, Iterator, List, Tuple # NOQA
from docutils import nodes # NOQA
from sphinx.environment import BuildEnvironment # NOQA
+ from sphinx.io import SphinxFileInput # NOQA
__version_info__ = tuple(LooseVersion(docutils.__version__).version)
@@ -167,16 +169,34 @@ class WarningStream(object):
class LoggingReporter(Reporter):
+ @classmethod
+ def from_reporter(cls, reporter):
+ # type: (Reporter) -> LoggingReporter
+ """Create an instance of LoggingReporter from other reporter object."""
+ return cls(reporter.source, reporter.report_level, reporter.halt_level,
+ reporter.debug_flag, reporter.error_handler)
+
def __init__(self, source, report_level, halt_level,
debug=False, error_handler='backslashreplace'):
# type: (unicode, int, int, bool, unicode) -> None
stream = WarningStream()
Reporter.__init__(self, source, report_level, halt_level,
stream, debug, error_handler=error_handler)
+ self.source_and_line = None # type: SphinxFileInput
+
+ def set_source(self, source):
+ # type: (SphinxFileInput) -> None
+ self.source_and_line = source
+
+ def system_message(self, *args, **kwargs):
+ # type: (Any, Any) -> Any
+ if kwargs.get('line') and isinstance(self.source_and_line, ViewList):
+ # replace source parameter if source is set
+ source, lineno = self.source_and_line.info(kwargs.get('line'))
+ kwargs['source'] = source
+ kwargs['line'] = lineno
- def set_conditions(self, category, report_level, halt_level, debug=False):
- # type: (unicode, int, int, bool) -> None
- Reporter.set_conditions(self, category, report_level, halt_level, debug=debug)
+ return Reporter.system_message(self, *args, **kwargs)
def is_html5_writer_available():
diff --git a/sphinx/util/inspect.py b/sphinx/util/inspect.py
index 704225359..62cd1a7e9 100644
--- a/sphinx/util/inspect.py
+++ b/sphinx/util/inspect.py
@@ -8,21 +8,22 @@
:copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
+from __future__ import absolute_import
import re
+import sys
+import typing
+import inspect
+from collections import OrderedDict
-from six import PY3, binary_type
+from six import PY2, PY3, StringIO, binary_type, string_types, itervalues
from six.moves import builtins
from sphinx.util import force_decode
if False:
# For type annotation
- from typing import Any, Callable, List, Tuple, Type # NOQA
-
-# this imports the standard library inspect module without resorting to
-# relatively import this module
-inspect = __import__('inspect')
+ from typing import Any, Callable, Dict, List, Tuple, Type # NOQA
memory_address_re = re.compile(r' at 0x[0-9a-f]{8,16}(?=>)', re.IGNORECASE)
@@ -113,7 +114,7 @@ else: # 2.7
func = func.func
if not inspect.isfunction(func):
raise TypeError('%r is not a Python function' % func)
- args, varargs, varkw = inspect.getargs(func.__code__)
+ args, varargs, varkw = inspect.getargs(func.__code__) # type: ignore
func_defaults = func.__defaults__
if func_defaults is None:
func_defaults = []
@@ -239,3 +240,327 @@ def is_builtin_class_method(obj, attr_name):
if not hasattr(builtins, safe_getattr(cls, '__name__', '')): # type: ignore
return False
return getattr(builtins, safe_getattr(cls, '__name__', '')) is cls # type: ignore
+
+
+class Parameter(object):
+ """Fake parameter class for python2."""
+ POSITIONAL_ONLY = 0
+ POSITIONAL_OR_KEYWORD = 1
+ VAR_POSITIONAL = 2
+ KEYWORD_ONLY = 3
+ VAR_KEYWORD = 4
+ empty = object()
+
+ def __init__(self, name, kind=POSITIONAL_OR_KEYWORD, default=empty):
+ # type: (str, int, Any) -> None
+ self.name = name
+ self.kind = kind
+ self.default = default
+ self.annotation = self.empty
+
+
+class Signature(object):
+ """The Signature object represents the call signature of a callable object and
+ its return annotation.
+ """
+
+ def __init__(self, subject, bound_method=False):
+ # type: (Callable, bool) -> None
+ # check subject is not a built-in class (ex. int, str)
+ if (isinstance(subject, type) and
+ is_builtin_class_method(subject, "__new__") and
+ is_builtin_class_method(subject, "__init__")):
+ raise TypeError("can't compute signature for built-in type {}".format(subject))
+
+ self.subject = subject
+
+ if PY3:
+ self.signature = inspect.signature(subject)
+ else:
+ self.argspec = getargspec(subject)
+
+ try:
+ self.annotations = typing.get_type_hints(subject) # type: ignore
+ except Exception:
+ self.annotations = {}
+
+ if bound_method:
+ # client gives a hint that the subject is a bound method
+
+ if PY3 and inspect.ismethod(subject):
+ # inspect.signature already considers the subject is bound method.
+ # So it is not need to skip first argument.
+ self.skip_first_argument = False
+ else:
+ self.skip_first_argument = True
+ else:
+ if PY3:
+ # inspect.signature recognizes type of method properly without any hints
+ self.skip_first_argument = False
+ else:
+ # check the subject is bound method or not
+ self.skip_first_argument = inspect.ismethod(subject) and subject.__self__ # type: ignore # NOQA
+
+ @property
+ def parameters(self):
+ # type: () -> Dict
+ if PY3:
+ return self.signature.parameters
+ else:
+ params = OrderedDict() # type: Dict
+ positionals = len(self.argspec.args) - len(self.argspec.defaults)
+ for i, arg in enumerate(self.argspec.args):
+ if i < positionals:
+ params[arg] = Parameter(arg)
+ else:
+ default = self.argspec.defaults[i - positionals]
+ params[arg] = Parameter(arg, default=default)
+ if self.argspec.varargs:
+ params[self.argspec.varargs] = Parameter(self.argspec.varargs,
+ Parameter.VAR_POSITIONAL)
+ if self.argspec.keywords:
+ params[self.argspec.keywords] = Parameter(self.argspec.keywords,
+ Parameter.VAR_KEYWORD)
+ return params
+
+ @property
+ def return_annotation(self):
+ # type: () -> Any
+ if PY3:
+ return self.signature.return_annotation
+ else:
+ return None
+
+ def format_args(self):
+ # type: () -> unicode
+ args = []
+ last_kind = None
+ for i, param in enumerate(itervalues(self.parameters)):
+ # skip first argument if subject is bound method
+ if self.skip_first_argument and i == 0:
+ continue
+
+ arg = StringIO()
+
+ # insert '*' between POSITIONAL args and KEYWORD_ONLY args::
+ # func(a, b, *, c, d):
+ if param.kind == param.KEYWORD_ONLY and last_kind in (param.POSITIONAL_OR_KEYWORD,
+ param.POSITIONAL_ONLY):
+ args.append('*')
+
+ if param.kind in (param.POSITIONAL_ONLY,
+ param.POSITIONAL_OR_KEYWORD,
+ param.KEYWORD_ONLY):
+ arg.write(param.name)
+ if param.annotation is not param.empty:
+ if isinstance(param.annotation, string_types) and \
+ param.name in self.annotations:
+ arg.write(': ')
+ arg.write(self.format_annotation(self.annotations[param.name]))
+ else:
+ arg.write(': ')
+ arg.write(self.format_annotation(param.annotation))
+ if param.default is not param.empty:
+ if param.annotation is param.empty:
+ arg.write('=')
+ arg.write(object_description(param.default)) # type: ignore
+ else:
+ arg.write(' = ')
+ arg.write(object_description(param.default)) # type: ignore
+ elif param.kind == param.VAR_POSITIONAL:
+ arg.write('*')
+ arg.write(param.name)
+ elif param.kind == param.VAR_KEYWORD:
+ arg.write('**')
+ arg.write(param.name)
+
+ args.append(arg.getvalue())
+ last_kind = param.kind
+
+ if PY2 or self.return_annotation is inspect.Parameter.empty:
+ return '(%s)' % ', '.join(args)
+ else:
+ if isinstance(self.return_annotation, string_types) and \
+ 'return' in self.annotations:
+ annotation = self.format_annotation(self.annotations['return'])
+ else:
+ annotation = self.format_annotation(self.return_annotation)
+
+ return '(%s) -> %s' % (', '.join(args), annotation)
+
+ def format_annotation(self, annotation):
+ # type: (Any) -> str
+ """Return formatted representation of a type annotation.
+
+ Show qualified names for types and additional details for types from
+ the ``typing`` module.
+
+ Displaying complex types from ``typing`` relies on its private API.
+ """
+ if isinstance(annotation, string_types):
+ return annotation # type: ignore
+ if isinstance(annotation, typing.TypeVar): # type: ignore
+ return annotation.__name__
+ if annotation == Ellipsis:
+ return '...'
+ if not isinstance(annotation, type):
+ return repr(annotation)
+
+ qualified_name = (annotation.__module__ + '.' + annotation.__qualname__ # type: ignore
+ if annotation else repr(annotation))
+
+ if annotation.__module__ == 'builtins':
+ return annotation.__qualname__ # type: ignore
+ elif isinstance(annotation, typing.GenericMeta):
+ # In Python 3.5.2+, all arguments are stored in __args__,
+ # whereas __parameters__ only contains generic parameters.
+ #
+ # Prior to Python 3.5.2, __args__ is not available, and all
+ # arguments are in __parameters__.
+ params = None
+ if hasattr(annotation, '__args__'):
+ if annotation.__args__ is None or len(annotation.__args__) <= 2: # type: ignore # NOQA
+ params = annotation.__args__ # type: ignore
+ else: # typing.Callable
+ args = ', '.join(self.format_annotation(arg) for arg
+ in annotation.__args__[:-1]) # type: ignore
+ result = self.format_annotation(annotation.__args__[-1]) # type: ignore
+ return '%s[[%s], %s]' % (qualified_name, args, result)
+ elif hasattr(annotation, '__parameters__'):
+ params = annotation.__parameters__ # type: ignore
+ if params is not None:
+ param_str = ', '.join(self.format_annotation(p) for p in params)
+ return '%s[%s]' % (qualified_name, param_str)
+ elif (hasattr(typing, 'UnionMeta') and # for py35 or below
+ isinstance(annotation, typing.UnionMeta) and # type: ignore
+ hasattr(annotation, '__union_params__')):
+ params = annotation.__union_params__
+ if params is not None:
+ param_str = ', '.join(self.format_annotation(p) for p in params)
+ return '%s[%s]' % (qualified_name, param_str)
+ elif (isinstance(annotation, typing.CallableMeta) and # type: ignore
+ getattr(annotation, '__args__', None) is not None and
+ hasattr(annotation, '__result__')):
+ # Skipped in the case of plain typing.Callable
+ args = annotation.__args__
+ if args is None:
+ return qualified_name
+ elif args is Ellipsis:
+ args_str = '...'
+ else:
+ formatted_args = (self.format_annotation(a) for a in args)
+ args_str = '[%s]' % ', '.join(formatted_args)
+ return '%s[%s, %s]' % (qualified_name,
+ args_str,
+ self.format_annotation(annotation.__result__))
+ elif (isinstance(annotation, typing.TupleMeta) and # type: ignore
+ hasattr(annotation, '__tuple_params__') and
+ hasattr(annotation, '__tuple_use_ellipsis__')):
+ params = annotation.__tuple_params__
+ if params is not None:
+ param_strings = [self.format_annotation(p) for p in params]
+ if annotation.__tuple_use_ellipsis__:
+ param_strings.append('...')
+ return '%s[%s]' % (qualified_name,
+ ', '.join(param_strings))
+
+ return qualified_name
+
+
+if sys.version_info >= (3, 5):
+ getdoc = inspect.getdoc
+else:
+ # code copied from the inspect.py module of the standard library
+ # of Python 3.5
+
+ def _findclass(func):
+ cls = sys.modules.get(func.__module__)
+ if cls is None:
+ return None
+ if hasattr(func, 'im_class'):
+ cls = func.im_class
+ else:
+ for name in func.__qualname__.split('.')[:-1]:
+ cls = getattr(cls, name)
+ if not inspect.isclass(cls):
+ return None
+ return cls
+
+ def _finddoc(obj):
+ if inspect.isclass(obj):
+ for base in obj.__mro__:
+ if base is not object:
+ try:
+ doc = base.__doc__
+ except AttributeError:
+ continue
+ if doc is not None:
+ return doc
+ return None
+
+ if inspect.ismethod(obj) and getattr(obj, '__self__', None):
+ name = obj.__func__.__name__
+ self = obj.__self__
+ if (inspect.isclass(self) and
+ getattr(getattr(self, name, None), '__func__')
+ is obj.__func__):
+ # classmethod
+ cls = self
+ else:
+ cls = self.__class__
+ elif inspect.isfunction(obj) or inspect.ismethod(obj):
+ name = obj.__name__
+ cls = _findclass(obj)
+ if cls is None or getattr(cls, name) != obj:
+ return None
+ elif inspect.isbuiltin(obj):
+ name = obj.__name__
+ self = obj.__self__
+ if (inspect.isclass(self) and
+ self.__qualname__ + '.' + name == obj.__qualname__):
+ # classmethod
+ cls = self
+ else:
+ cls = self.__class__
+ # Should be tested before isdatadescriptor().
+ elif isinstance(obj, property):
+ func = obj.fget
+ name = func.__name__
+ cls = _findclass(func)
+ if cls is None or getattr(cls, name) is not obj:
+ return None
+ elif inspect.ismethoddescriptor(obj) or inspect.isdatadescriptor(obj):
+ name = obj.__name__
+ cls = obj.__objclass__
+ if getattr(cls, name) is not obj:
+ return None
+ else:
+ return None
+
+ for base in cls.__mro__:
+ try:
+ doc = getattr(base, name).__doc__
+ except AttributeError:
+ continue
+ if doc is not None:
+ return doc
+ return None
+
+ def getdoc(object):
+ """Get the documentation string for an object.
+
+ All tabs are expanded to spaces. To clean up docstrings that are
+ indented to line up with blocks of code, any whitespace than can be
+ uniformly removed from the second line onwards is removed."""
+ try:
+ doc = object.__doc__
+ except AttributeError:
+ return None
+ if doc is None:
+ try:
+ doc = _finddoc(object)
+ except (AttributeError, TypeError):
+ return None
+ if not isinstance(doc, str):
+ return None
+ return inspect.cleandoc(doc)
diff --git a/sphinx/util/nodes.py b/sphinx/util/nodes.py
index 97e5b7f30..4ff4937b9 100644
--- a/sphinx/util/nodes.py
+++ b/sphinx/util/nodes.py
@@ -11,14 +11,12 @@
from __future__ import absolute_import
import re
-import warnings
from six import text_type
from docutils import nodes
from sphinx import addnodes
-from sphinx.deprecation import RemovedInSphinx17Warning
from sphinx.locale import pairindextypes
from sphinx.util import logging
@@ -340,30 +338,6 @@ def set_role_source_info(inliner, lineno, node):
node.source, node.line = inliner.reporter.get_source_and_line(lineno)
-def process_only_nodes(doctree, tags):
- # type: (nodes.Node, Tags) -> None
- # A comment on the comment() nodes being inserted: replacing by [] would
- # result in a "Losing ids" exception if there is a target node before
- # the only node, so we make sure docutils can transfer the id to
- # something, even if it's just a comment and will lose the id anyway...
- warnings.warn('process_only_nodes() is deprecated. '
- 'Use sphinx.environment.apply_post_transforms() instead.',
- RemovedInSphinx17Warning)
-
- for node in doctree.traverse(addnodes.only):
- try:
- ret = tags.eval_condition(node['expr'])
- except Exception as err:
- logger.warning('exception while evaluating only directive expression: %s', err,
- location=node)
- node.replace_self(node.children or nodes.comment())
- else:
- if ret:
- node.replace_self(node.children or nodes.comment())
- else:
- node.replace_self(nodes.comment())
-
-
NON_SMARTQUOTABLE_PARENT_NODES = (
nodes.FixedTextElement,
nodes.literal,
diff --git a/sphinx/util/requests.py b/sphinx/util/requests.py
index 9dee6e694..4bd4c042e 100644
--- a/sphinx/util/requests.py
+++ b/sphinx/util/requests.py
@@ -109,7 +109,7 @@ def ignore_insecure_warning(**kwargs):
def _get_tls_cacert(url, config):
# type: (unicode, Config) -> Union[str, bool]
- """Get addiotinal CA cert for a specific URL.
+ """Get additional CA cert for a specific URL.
This also returns ``False`` if verification is disabled.
And returns ``True`` if additional CA cert not found.
diff --git a/sphinx/util/rst.py b/sphinx/util/rst.py
index efc1ca76d..5860b0fd5 100644
--- a/sphinx/util/rst.py
+++ b/sphinx/util/rst.py
@@ -8,12 +8,41 @@
:copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
+from __future__ import absolute_import
import re
+from contextlib import contextmanager
+
+from docutils.parsers.rst import roles
+from docutils.parsers.rst.languages import en as english
+from docutils.utils import Reporter
+
+from sphinx.util import logging
+
+if False:
+ # For type annotation
+ from typing import Generator # NOQA
symbols_re = re.compile(r'([!-/:-@\[-`{-~])')
+logger = logging.getLogger(__name__)
def escape(text):
# type: (unicode) -> unicode
return symbols_re.sub(r'\\\1', text) # type: ignore
+
+
+@contextmanager
+def default_role(docname, name):
+ # type: (unicode, unicode) -> Generator
+ if name:
+ dummy_reporter = Reporter('', 4, 4)
+ role_fn, _ = roles.role(name, english, 0, dummy_reporter)
+ if role_fn:
+ roles._roles[''] = role_fn
+ else:
+ logger.warning('default role %s not found', name, location=docname)
+
+ yield
+
+ roles._roles.pop('', None) # if a document has set a local default role
diff --git a/sphinx/util/stemmer/__init__.py b/sphinx/util/stemmer/__init__.py
index 8cefee6d2..a10da7370 100644
--- a/sphinx/util/stemmer/__init__.py
+++ b/sphinx/util/stemmer/__init__.py
@@ -21,7 +21,7 @@ except ImportError:
class BaseStemmer(object):
def stem(self, word):
# type: (unicode) -> unicode
- raise NotImplemented
+ raise NotImplementedError()
class PyStemmer(BaseStemmer):
diff --git a/sphinx/versioning.py b/sphinx/versioning.py
index d911085c3..953ef4f6b 100644
--- a/sphinx/versioning.py
+++ b/sphinx/versioning.py
@@ -15,6 +15,9 @@ from itertools import product
from six import iteritems
from six.moves import range, zip_longest
+from six.moves import cPickle as pickle
+
+from sphinx.transforms import SphinxTransform
if False:
# For type annotation
@@ -148,3 +151,32 @@ def levenshtein_distance(a, b):
current_row.append(min(insertions, deletions, substitutions))
previous_row = current_row # type: ignore
return previous_row[-1]
+
+
+class UIDTransform(SphinxTransform):
+ """Add UIDs to doctree for versioning."""
+ default_priority = 100
+
+ def apply(self):
+ env = self.env
+ old_doctree = None
+ if env.versioning_compare:
+ # get old doctree
+ try:
+ filename = env.doc2path(env.docname, env.doctreedir, '.doctree')
+ with open(filename, 'rb') as f:
+ old_doctree = pickle.load(f)
+ except EnvironmentError:
+ pass
+
+ # add uids for versioning
+ if not env.versioning_compare or old_doctree is None:
+ list(add_uids(self.document, env.versioning_condition))
+ else:
+ list(merge_doctrees(old_doctree, self.document, env.versioning_condition))
+
+
+def prepare(document):
+ """Simple wrapper for UIDTransform."""
+ transform = UIDTransform(document)
+ transform.apply()
diff --git a/sphinx/writers/html.py b/sphinx/writers/html.py
index 30631f560..b3d27e31a 100644
--- a/sphinx/writers/html.py
+++ b/sphinx/writers/html.py
@@ -491,14 +491,21 @@ class HTMLTranslator(BaseTranslator):
# overwritten
def visit_literal(self, node):
# type: (nodes.Node) -> None
- self.body.append(self.starttag(node, 'code', '',
- CLASS='docutils literal'))
- self.protect_literal_text += 1
+ if 'kbd' in node['classes']:
+ self.body.append(self.starttag(node, 'kbd', '',
+ CLASS='docutils literal'))
+ else:
+ self.body.append(self.starttag(node, 'code', '',
+ CLASS='docutils literal'))
+ self.protect_literal_text += 1
def depart_literal(self, node):
# type: (nodes.Node) -> None
- self.protect_literal_text -= 1
- self.body.append('</code>')
+ if 'kbd' in node['classes']:
+ self.body.append('</kbd>')
+ else:
+ self.protect_literal_text -= 1
+ self.body.append('</code>')
def visit_productionlist(self, node):
# type: (nodes.Node) -> None
diff --git a/sphinx/writers/html5.py b/sphinx/writers/html5.py
index 533235f62..a47fee77e 100644
--- a/sphinx/writers/html5.py
+++ b/sphinx/writers/html5.py
@@ -437,14 +437,21 @@ class HTML5Translator(BaseTranslator):
# overwritten
def visit_literal(self, node):
# type: (nodes.Node) -> None
- self.body.append(self.starttag(node, 'code', '',
- CLASS='docutils literal'))
- self.protect_literal_text += 1
+ if 'kbd' in node['classes']:
+ self.body.append(self.starttag(node, 'kbd', '',
+ CLASS='docutils literal'))
+ else:
+ self.body.append(self.starttag(node, 'code', '',
+ CLASS='docutils literal'))
+ self.protect_literal_text += 1
def depart_literal(self, node):
# type: (nodes.Node) -> None
- self.protect_literal_text -= 1
- self.body.append('</code>')
+ if 'kbd' in node['classes']:
+ self.body.append('</kbd>')
+ else:
+ self.protect_literal_text -= 1
+ self.body.append('</code>')
def visit_productionlist(self, node):
# type: (nodes.Node) -> None
diff --git a/sphinx/writers/latex.py b/sphinx/writers/latex.py
index 98fec811c..9a3c0e5cd 100644
--- a/sphinx/writers/latex.py
+++ b/sphinx/writers/latex.py
@@ -25,6 +25,7 @@ from sphinx import addnodes
from sphinx import highlighting
from sphinx.errors import SphinxError
from sphinx.locale import admonitionlabels, _
+from sphinx.transforms import SphinxTransform
from sphinx.util import split_into, logging
from sphinx.util.i18n import format_date
from sphinx.util.nodes import clean_astext, traverse_parent
@@ -47,7 +48,6 @@ BEGIN_DOC = r'''
URI_SCHEMES = ('mailto:', 'http:', 'https:', 'ftp:')
-SECNUMDEPTH = 3
LATEXSECTIONNAMES = ["part", "chapter", "section", "subsection",
"subsubsection", "paragraph", "subparagraph"]
@@ -59,7 +59,7 @@ DEFAULT_SETTINGS = {
'classoptions': '',
'extraclassoptions': '',
'maxlistdepth': '',
- 'sphinxpkgoptions': 'dontkeepoldnames',
+ 'sphinxpkgoptions': '',
'sphinxsetup': '',
'passoptionstopackages': '',
'geometry': '\\usepackage{geometry}',
@@ -224,12 +224,11 @@ class ExtBabel(Babel):
return language
-class ShowUrlsTransform(object):
- expanded = False
-
- def __init__(self, document):
- # type: (nodes.Node) -> None
- self.document = document
+class ShowUrlsTransform(SphinxTransform, object):
+ def __init__(self, document, startnode=None):
+ # type: (nodes.document, nodes.Node) -> None
+ super(ShowUrlsTransform, self).__init__(document, startnode)
+ self.expanded = False
def apply(self):
# type: () -> None
@@ -504,6 +503,8 @@ def rstdim_to_latexdim(width_str):
class LaTeXTranslator(nodes.NodeVisitor):
+ secnumdepth = 2 # legacy sphinxhowto.cls uses this, whereas article.cls
+ # default is originally 3. For book/report, 2 is already LaTeX default.
ignore_missing_images = False
# sphinx specific document classes
@@ -552,8 +553,6 @@ class LaTeXTranslator(nodes.NodeVisitor):
self.elements.update({
'releasename': _('Release'),
})
- if builder.config.latex_keep_old_macro_names:
- self.elements['sphinxpkgoptions'] = ''
# we assume LaTeX class provides \chapter command except in case
# of non-Japanese 'howto' case
@@ -583,6 +582,31 @@ class LaTeXTranslator(nodes.NodeVisitor):
else:
self.elements['date'] = format_date(builder.config.today_fmt or _('%b %d, %Y'), # type: ignore # NOQA
language=builder.config.language)
+
+ if builder.config.numfig:
+ self.numfig_secnum_depth = builder.config.numfig_secnum_depth
+ if self.numfig_secnum_depth > 0: # default is 1
+ # numfig_secnum_depth as passed to sphinx.sty indices same names as in
+ # LATEXSECTIONNAMES but with -1 for part, 0 for chapter, 1 for section...
+ if len(self.sectionnames) < len(LATEXSECTIONNAMES) and \
+ self.top_sectionlevel > 0:
+ self.numfig_secnum_depth += self.top_sectionlevel
+ else:
+ self.numfig_secnum_depth += self.top_sectionlevel - 1
+ # this (minus one) will serve as minimum to LaTeX's secnumdepth
+ self.numfig_secnum_depth = min(self.numfig_secnum_depth,
+ len(LATEXSECTIONNAMES) - 1)
+ # if passed key value is < 1 LaTeX will act as if 0; see sphinx.sty
+ self.elements['sphinxpkgoptions'] += \
+ (',numfigreset=%s' % self.numfig_secnum_depth)
+ else:
+ self.elements['sphinxpkgoptions'] += ',nonumfigreset'
+ try:
+ if builder.config.math_numfig:
+ self.elements['sphinxpkgoptions'] += ',mathnumfig'
+ except AttributeError:
+ pass
+
if builder.config.latex_logo:
# no need for \\noindent here, used in flushright
self.elements['logo'] = '\\sphinxincludegraphics{%s}\\par' % \
@@ -639,6 +663,8 @@ class LaTeXTranslator(nodes.NodeVisitor):
return '\\usepackage{%s}' % (packagename,)
usepackages = (declare_package(*p) for p in builder.usepackages)
self.elements['usepackages'] += "\n".join(usepackages)
+
+ minsecnumdepth = self.secnumdepth # 2 from legacy sphinx manual/howto
if document.get('tocdepth'):
# reduce tocdepth if `part` or `chapter` is used for top_sectionlevel
# tocdepth = -1: show only parts
@@ -646,18 +672,23 @@ class LaTeXTranslator(nodes.NodeVisitor):
# tocdepth = 1: show parts, chapters and sections
# tocdepth = 2: show parts, chapters, sections and subsections
# ...
-
tocdepth = document['tocdepth'] + self.top_sectionlevel - 2
- if len(self.sectionnames) < 7 and self.top_sectionlevel > 0:
+ if len(self.sectionnames) < len(LATEXSECTIONNAMES) and \
+ self.top_sectionlevel > 0:
tocdepth += 1 # because top_sectionlevel is shifted by -1
- if tocdepth > 5: # 5 corresponds to subparagraph
+ if tocdepth > len(LATEXSECTIONNAMES) - 2: # default is 5 <-> subparagraph
logger.warning('too large :maxdepth:, ignored.')
- tocdepth = 5
+ tocdepth = len(LATEXSECTIONNAMES) - 2
self.elements['tocdepth'] = '\\setcounter{tocdepth}{%d}' % tocdepth
- if tocdepth >= SECNUMDEPTH:
- # Increase secnumdepth if tocdepth is deeper than default SECNUMDEPTH
- self.elements['secnumdepth'] = '\\setcounter{secnumdepth}{%d}' % tocdepth
+ minsecnumdepth = max(minsecnumdepth, tocdepth)
+
+ if builder.config.numfig and (builder.config.numfig_secnum_depth > 0):
+ minsecnumdepth = max(minsecnumdepth, self.numfig_secnum_depth - 1)
+
+ if minsecnumdepth > self.secnumdepth:
+ self.elements['secnumdepth'] = '\\setcounter{secnumdepth}{%d}' %\
+ minsecnumdepth
if getattr(document.settings, 'contentsname', None):
self.elements['contentsname'] = \
@@ -896,11 +927,12 @@ class LaTeXTranslator(nodes.NodeVisitor):
def render(self, template_name, variables):
# type: (unicode, Dict) -> unicode
- template_path = path.join(self.builder.srcdir, '_templates', template_name)
- if path.exists(template_path):
- return LaTeXRenderer().render(template_path, variables)
- else:
- return LaTeXRenderer().render(template_name, variables)
+ for template_path in self.builder.config.templates_path:
+ template = path.join(template_path, template_name)
+ if path.exists(template):
+ return LaTeXRenderer().render(template, variables)
+
+ return LaTeXRenderer().render(template_name, variables)
def visit_document(self, node):
# type: (nodes.Node) -> None
@@ -1485,8 +1517,7 @@ class LaTeXTranslator(nodes.NodeVisitor):
if len(node) == 1 and isinstance(node[0], nodes.paragraph) and node.astext() == '':
pass
else:
- self.body.append('\\sphinxstylethead{\\sphinxstyletheadfamily ')
- context = '\\unskip}\\relax ' + context
+ self.body.append('\\sphinxstyletheadfamily ')
if self.needs_linetrimming:
self.pushbody([])
self.context.append(context)
@@ -2322,8 +2353,6 @@ class LaTeXTranslator(nodes.NodeVisitor):
else:
hlcode += '\\end{sphinxVerbatim}'
self.body.append('\n' + hllines + '\n' + hlcode + '\n')
- if ids:
- self.body.append('\\let\\sphinxLiteralBlockLabel\\empty\n')
raise nodes.SkipNode
def depart_literal_block(self, node):
@@ -2526,8 +2555,6 @@ class LaTeXTranslator(nodes.NodeVisitor):
# type: (nodes.Node) -> None
if node.get('literal_block'):
self.in_container_literal_block -= 1
- self.body.append('\\let\\sphinxVerbatimTitle\\empty\n')
- self.body.append('\\let\\sphinxLiteralBlockLabel\\empty\n')
def visit_decoration(self, node):
# type: (nodes.Node) -> None
diff --git a/test-reqs.txt b/test-reqs.txt
deleted file mode 100644
index 3a7bde8ea..000000000
--- a/test-reqs.txt
+++ /dev/null
@@ -1,20 +0,0 @@
-flake8
-pytest>=3.0
-pytest-cov
-mock
-six>=1.4
-Jinja2>=2.3
-Pygments>=2.0
-docutils>=0.11
-snowballstemmer>=1.1
-babel
-sqlalchemy>=0.9
-whoosh>=2.0
-alabaster
-sphinx_rtd_theme
-sphinxcontrib-websupport
-imagesize
-requests
-html5lib
-enum34
-typing
diff --git a/tests/conftest.py b/tests/conftest.py
index 4de67c7d6..6cb239d9f 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -8,13 +8,55 @@
"""
import os
+import shutil
+import sys
+import warnings
import pytest
from sphinx.testing.path import path
pytest_plugins = 'sphinx.testing.fixtures'
+# Exclude 'roots' dirs for pytest test collector
+collect_ignore = ['roots']
+
+# Disable Python version-specific
+if sys.version_info < (3, 5):
+ collect_ignore += ['py35']
+
@pytest.fixture(scope='session')
def rootdir():
return path(os.path.dirname(__file__) or '.').abspath() / 'roots'
+
+
+def pytest_report_header(config):
+ return 'Running Sphinx test suite (with Python %s)...' % (
+ sys.version.split()[0])
+
+
+def _filter_warnings():
+ def ignore(**kwargs): warnings.filterwarnings('ignore', **kwargs)
+
+ ignore(category=DeprecationWarning, module='site') # virtualenv
+ ignore(category=PendingDeprecationWarning, module=r'_pytest\..*')
+ ignore(category=ImportWarning, module='pkgutil')
+
+
+def _initialize_test_directory(session):
+ testroot = os.path.join(str(session.config.rootdir), 'tests')
+ tempdir = os.path.abspath(os.getenv('SPHINX_TEST_TEMPDIR',
+ os.path.join(testroot, 'build')))
+ os.environ['SPHINX_TEST_TEMPDIR'] = tempdir
+
+ print('Temporary files will be placed in %s.' % tempdir)
+
+ if os.path.exists(tempdir):
+ shutil.rmtree(tempdir)
+
+ os.makedirs(tempdir)
+
+
+def pytest_sessionstart(session):
+ _filter_warnings()
+ _initialize_test_directory(session)
diff --git a/tests/roots/test-basic/index.rst b/tests/roots/test-basic/index.rst
index 8c4ca7d80..48407e643 100644
--- a/tests/roots/test-basic/index.rst
+++ b/tests/roots/test-basic/index.rst
@@ -12,6 +12,9 @@ Sphinx uses reStructuredText as its markup language, and many of its strengths
come from the power and straightforwardness of reStructuredText and its parsing
and translating suite, the Docutils.
+features
+--------
+
Among its features are the following:
* Output formats: HTML (including derivative formats such as HTML Help, Epub
diff --git a/tests/roots/test-domain-cpp/index.rst b/tests/roots/test-domain-cpp/index.rst
index 618e51037..2df5ec848 100644
--- a/tests/roots/test-domain-cpp/index.rst
+++ b/tests/roots/test-domain-cpp/index.rst
@@ -28,14 +28,20 @@ directives
An unscoped enum.
+ .. cpp:enumerator:: A
+
.. cpp:enum-class:: MyScopedEnum
A scoped enum.
+ .. cpp:enumerator:: B
+
.. cpp:enum-struct:: protected MyScopedVisibilityEnum : std::underlying_type<MySpecificEnum>::type
A scoped enum with non-default visibility, and with a specified underlying type.
+ .. cpp:enumerator:: B
+
.. cpp:function:: void paren_1(int, float)
.. cpp:function:: void paren_2(int, float)
diff --git a/tests/roots/test-domain-py/module.rst b/tests/roots/test-domain-py/module.rst
index deb54629e..509be6c3b 100644
--- a/tests/roots/test-domain-py/module.rst
+++ b/tests/roots/test-domain-py/module.rst
@@ -36,3 +36,5 @@ module
:type x: int
:param y: param y
:type y: tuple(str, float)
+ :rtype: list
+
diff --git a/tests/roots/test-ext-autodoc/target/__init__.py b/tests/roots/test-ext-autodoc/target/__init__.py
new file mode 100644
index 000000000..bd00bf183
--- /dev/null
+++ b/tests/roots/test-ext-autodoc/target/__init__.py
@@ -0,0 +1,225 @@
+# -*- coding: utf-8 -*-
+
+import enum
+from six import StringIO, add_metaclass
+from sphinx.ext.autodoc import add_documenter # NOQA
+
+
+__all__ = ['Class']
+
+#: documentation for the integer
+integer = 1
+
+
+def raises(exc, func, *args, **kwds):
+ """Raise AssertionError if ``func(*args, **kwds)`` does not raise *exc*."""
+ pass
+
+
+class CustomEx(Exception):
+ """My custom exception."""
+
+ def f(self):
+ """Exception method."""
+
+
+class CustomDataDescriptor(object):
+ """Descriptor class docstring."""
+
+ def __init__(self, doc):
+ self.__doc__ = doc
+
+ def __get__(self, obj, type=None):
+ if obj is None:
+ return self
+ return 42
+
+ def meth(self):
+ """Function."""
+ return "The Answer"
+
+
+class CustomDataDescriptorMeta(type):
+ """Descriptor metaclass docstring."""
+
+
+@add_metaclass(CustomDataDescriptorMeta)
+class CustomDataDescriptor2(CustomDataDescriptor):
+ """Descriptor class with custom metaclass docstring."""
+
+
+def _funky_classmethod(name, b, c, d, docstring=None):
+ """Generates a classmethod for a class from a template by filling out
+ some arguments."""
+ def template(cls, a, b, c, d=4, e=5, f=6):
+ return a, b, c, d, e, f
+ from functools import partial
+ function = partial(template, b=b, c=c, d=d)
+ function.__name__ = name
+ function.__doc__ = docstring
+ return classmethod(function)
+
+
+class Base(object):
+ def inheritedmeth(self):
+ """Inherited function."""
+
+
+class Derived(Base):
+ def inheritedmeth(self):
+ # no docstring here
+ pass
+
+
+class Class(Base):
+ """Class to document."""
+
+ descr = CustomDataDescriptor("Descriptor instance docstring.")
+
+ def meth(self):
+ """Function."""
+
+ def undocmeth(self):
+ pass
+
+ def skipmeth(self):
+ """Method that should be skipped."""
+
+ def excludemeth(self):
+ """Method that should be excluded."""
+
+ # should not be documented
+ skipattr = 'foo'
+
+ #: should be documented -- süß
+ attr = 'bar'
+
+ @property
+ def prop(self):
+ """Property."""
+
+ docattr = 'baz'
+ """should likewise be documented -- süß"""
+
+ udocattr = 'quux'
+ u"""should be documented as well - süß"""
+
+ # initialized to any class imported from another module
+ mdocattr = StringIO()
+ """should be documented as well - süß"""
+
+ roger = _funky_classmethod("roger", 2, 3, 4)
+
+ moore = _funky_classmethod("moore", 9, 8, 7,
+ docstring="moore(a, e, f) -> happiness")
+
+ def __init__(self, arg):
+ self.inst_attr_inline = None #: an inline documented instance attr
+ #: a documented instance attribute
+ self.inst_attr_comment = None
+ self.inst_attr_string = None
+ """a documented instance attribute"""
+ self._private_inst_attr = None #: a private instance attribute
+
+ def __special1__(self):
+ """documented special method"""
+
+ def __special2__(self):
+ # undocumented special method
+ pass
+
+
+class CustomDict(dict):
+ """Docstring."""
+
+
+def function(foo, *args, **kwds):
+ """
+ Return spam.
+ """
+ pass
+
+
+class Outer(object):
+ """Foo"""
+
+ class Inner(object):
+ """Foo"""
+
+ def meth(self):
+ """Foo"""
+
+ # should be documented as an alias
+ factory = dict
+
+
+class DocstringSig(object):
+ def meth(self):
+ """meth(FOO, BAR=1) -> BAZ
+First line of docstring
+
+ rest of docstring
+ """
+
+ def meth2(self):
+ """First line, no signature
+ Second line followed by indentation::
+
+ indented line
+ """
+
+ @property
+ def prop1(self):
+ """DocstringSig.prop1(self)
+ First line of docstring
+ """
+ return 123
+
+ @property
+ def prop2(self):
+ """First line of docstring
+ Second line of docstring
+ """
+ return 456
+
+
+class StrRepr(str):
+ def __repr__(self):
+ return self
+
+
+class AttCls(object):
+ a1 = StrRepr('hello\nworld')
+ a2 = None
+
+
+class InstAttCls(object):
+ """Class with documented class and instance attributes."""
+
+ #: Doc comment for class attribute InstAttCls.ca1.
+ #: It can have multiple lines.
+ ca1 = 'a'
+
+ ca2 = 'b' #: Doc comment for InstAttCls.ca2. One line only.
+
+ ca3 = 'c'
+ """Docstring for class attribute InstAttCls.ca3."""
+
+ def __init__(self):
+ #: Doc comment for instance attribute InstAttCls.ia1
+ self.ia1 = 'd'
+
+ self.ia2 = 'e'
+ """Docstring for instance attribute InstAttCls.ia2."""
+
+
+class EnumCls(enum.Enum):
+ """
+ this is enum class
+ """
+
+ #: doc for val1
+ val1 = 12
+ val2 = 23 #: doc for val2
+ val3 = 34
+ """doc for val3"""
diff --git a/tests/roots/test-ext-autosummary/autosummary_importfail.py b/tests/roots/test-ext-autosummary/autosummary_importfail.py
new file mode 100644
index 000000000..9e3f9f195
--- /dev/null
+++ b/tests/roots/test-ext-autosummary/autosummary_importfail.py
@@ -0,0 +1,4 @@
+import sys
+
+# Fail module import in a catastrophic way
+sys.exit(1)
diff --git a/tests/roots/test-ext-autosummary/contents.rst b/tests/roots/test-ext-autosummary/contents.rst
index 3b43086a2..fc84927bb 100644
--- a/tests/roots/test-ext-autosummary/contents.rst
+++ b/tests/roots/test-ext-autosummary/contents.rst
@@ -1,6 +1,11 @@
+:autolink:`autosummary_dummy_module.Foo`
+
+:autolink:`autosummary_importfail`
+
.. autosummary::
:toctree: generated
autosummary_dummy_module
autosummary_dummy_module.Foo
+ autosummary_importfail
diff --git a/tests/roots/test-ext-inheritance_diagram/index.rst b/tests/roots/test-ext-inheritance_diagram/index.rst
index 777192bd7..8e25eee5b 100644
--- a/tests/roots/test-ext-inheritance_diagram/index.rst
+++ b/tests/roots/test-ext-inheritance_diagram/index.rst
@@ -6,3 +6,5 @@ test-ext-inheritance_diagram
.. inheritance-diagram:: test.Foo
:caption: Test Foo!
+
+.. inheritance-diagram:: test.Baz
diff --git a/tests/roots/test-ext-intersphinx-cppdomain/index.rst b/tests/roots/test-ext-intersphinx-cppdomain/index.rst
index 06c954b99..bf67d52d2 100644
--- a/tests/roots/test-ext-intersphinx-cppdomain/index.rst
+++ b/tests/roots/test-ext-intersphinx-cppdomain/index.rst
@@ -5,4 +5,4 @@ test-ext-intersphinx-cppdomain
:cpp:class:`Bar`
-.. cpp:function:: std::uint8_t FooBarBaz()
+.. cpp:function:: foons::bartype FooBarBaz()
diff --git a/tests/roots/test-ext-math/index.rst b/tests/roots/test-ext-math/index.rst
index 9d16824f6..4237b73ff 100644
--- a/tests/roots/test-ext-math/index.rst
+++ b/tests/roots/test-ext-math/index.rst
@@ -2,8 +2,10 @@ Test Math
=========
.. toctree::
+ :numbered: 1
math
+ page
.. math:: a^2+b^2=c^2
diff --git a/tests/roots/test-ext-math/page.rst b/tests/roots/test-ext-math/page.rst
new file mode 100644
index 000000000..ef8040910
--- /dev/null
+++ b/tests/roots/test-ext-math/page.rst
@@ -0,0 +1,9 @@
+Test multiple pages
+===================
+
+.. math::
+ :label: bar
+
+ a = b + 1
+
+Referencing equations :eq:`foo` and :eq:`bar`.
diff --git a/tests/roots/test-extensions/conf.py b/tests/roots/test-extensions/conf.py
new file mode 100644
index 000000000..9a3cbc844
--- /dev/null
+++ b/tests/roots/test-extensions/conf.py
@@ -0,0 +1,4 @@
+import os
+import sys
+
+sys.path.insert(0, os.path.abspath('.'))
diff --git a/tests/roots/test-extensions/read_parallel.py b/tests/roots/test-extensions/read_parallel.py
new file mode 100644
index 000000000..a3e052f95
--- /dev/null
+++ b/tests/roots/test-extensions/read_parallel.py
@@ -0,0 +1,4 @@
+def setup(app):
+ return {
+ 'parallel_read_safe': True
+ }
diff --git a/tests/roots/test-extensions/read_serial.py b/tests/roots/test-extensions/read_serial.py
new file mode 100644
index 000000000..c55570a5c
--- /dev/null
+++ b/tests/roots/test-extensions/read_serial.py
@@ -0,0 +1,4 @@
+def setup(app):
+ return {
+ 'parallel_read_safe': False
+ }
diff --git a/tests/roots/test-extensions/write_parallel.py b/tests/roots/test-extensions/write_parallel.py
new file mode 100644
index 000000000..ebc48ef9b
--- /dev/null
+++ b/tests/roots/test-extensions/write_parallel.py
@@ -0,0 +1,4 @@
+def setup(app):
+ return {
+ 'parallel_write_safe': True,
+ }
diff --git a/tests/roots/test-extensions/write_serial.py b/tests/roots/test-extensions/write_serial.py
new file mode 100644
index 000000000..75494ce77
--- /dev/null
+++ b/tests/roots/test-extensions/write_serial.py
@@ -0,0 +1,4 @@
+def setup(app):
+ return {
+ 'parallel_write_safe': False
+ }
diff --git a/tests/roots/test-latex-numfig/conf.py b/tests/roots/test-latex-numfig/conf.py
new file mode 100644
index 000000000..506186b26
--- /dev/null
+++ b/tests/roots/test-latex-numfig/conf.py
@@ -0,0 +1,12 @@
+# -*- coding: utf-8 -*-
+
+master_doc = 'index'
+
+extensions = ['sphinx.ext.imgmath'] # for math_numfig
+
+latex_documents = [
+ ('indexmanual', 'SphinxManual.tex', 'Test numfig manual',
+ 'Sphinx', 'manual'),
+ ('indexhowto', 'SphinxHowTo.tex', 'Test numfig howto',
+ 'Sphinx', 'howto'),
+]
diff --git a/tests/roots/test-latex-numfig/index.rst b/tests/roots/test-latex-numfig/index.rst
new file mode 100644
index 000000000..6b8b9688c
--- /dev/null
+++ b/tests/roots/test-latex-numfig/index.rst
@@ -0,0 +1,9 @@
+=================
+test-latex-numfig
+=================
+
+.. toctree::
+ :numbered:
+
+ indexmanual
+ indexhowto
diff --git a/tests/roots/test-latex-numfig/indexhowto.rst b/tests/roots/test-latex-numfig/indexhowto.rst
new file mode 100644
index 000000000..4749f1ecd
--- /dev/null
+++ b/tests/roots/test-latex-numfig/indexhowto.rst
@@ -0,0 +1,10 @@
+=======================
+test-latex-numfig-howto
+=======================
+
+This is a part
+==============
+
+This is a section
+-----------------
+
diff --git a/tests/roots/test-latex-numfig/indexmanual.rst b/tests/roots/test-latex-numfig/indexmanual.rst
new file mode 100644
index 000000000..8bab4fbfd
--- /dev/null
+++ b/tests/roots/test-latex-numfig/indexmanual.rst
@@ -0,0 +1,13 @@
+========================
+test-latex-numfig-manual
+========================
+
+First part
+==========
+
+This is chapter
+---------------
+
+This is section
+~~~~~~~~~~~~~~~
+
diff --git a/tests/roots/test-latex-table/expects/gridtable.tex b/tests/roots/test-latex-table/expects/gridtable.tex
index 6f1fc0b91..cb74bea77 100644
--- a/tests/roots/test-latex-table/expects/gridtable.tex
+++ b/tests/roots/test-latex-table/expects/gridtable.tex
@@ -4,13 +4,13 @@
\centering
\begin{tabulary}{\linewidth}[t]{|T|T|T|}
\hline
-\sphinxstylethead{\sphinxstyletheadfamily
+\sphinxstyletheadfamily
header1
-\unskip}\relax &\sphinxstylethead{\sphinxstyletheadfamily
+&\sphinxstyletheadfamily
header2
-\unskip}\relax &\sphinxstylethead{\sphinxstyletheadfamily
+&\sphinxstyletheadfamily
header3
-\unskip}\relax \\
+\\
\hline
cell1-1
&\sphinxmultirow{2}{5}{%
diff --git a/tests/roots/test-latex-table/expects/longtable.tex b/tests/roots/test-latex-table/expects/longtable.tex
index 75b03f613..7c8699c75 100644
--- a/tests/roots/test-latex-table/expects/longtable.tex
+++ b/tests/roots/test-latex-table/expects/longtable.tex
@@ -2,22 +2,22 @@
\begin{savenotes}\sphinxatlongtablestart\begin{longtable}{|l|l|}
\hline
-\sphinxstylethead{\sphinxstyletheadfamily
+\sphinxstyletheadfamily
header1
-\unskip}\relax &\sphinxstylethead{\sphinxstyletheadfamily
+&\sphinxstyletheadfamily
header2
-\unskip}\relax \\
+\\
\hline
\endfirsthead
\multicolumn{2}{c}%
{\makebox[0pt]{\sphinxtablecontinued{\tablename\ \thetable{} -- continued from previous page}}}\\
\hline
-\sphinxstylethead{\sphinxstyletheadfamily
+\sphinxstyletheadfamily
header1
-\unskip}\relax &\sphinxstylethead{\sphinxstyletheadfamily
+&\sphinxstyletheadfamily
header2
-\unskip}\relax \\
+\\
\hline
\endhead
diff --git a/tests/roots/test-latex-table/expects/longtable_having_align.tex b/tests/roots/test-latex-table/expects/longtable_having_align.tex
index eec0dad60..cfb9f1fcc 100644
--- a/tests/roots/test-latex-table/expects/longtable_having_align.tex
+++ b/tests/roots/test-latex-table/expects/longtable_having_align.tex
@@ -2,22 +2,22 @@
\begin{savenotes}\sphinxatlongtablestart\begin{longtable}[r]{|l|l|}
\hline
-\sphinxstylethead{\sphinxstyletheadfamily
+\sphinxstyletheadfamily
header1
-\unskip}\relax &\sphinxstylethead{\sphinxstyletheadfamily
+&\sphinxstyletheadfamily
header2
-\unskip}\relax \\
+\\
\hline
\endfirsthead
\multicolumn{2}{c}%
{\makebox[0pt]{\sphinxtablecontinued{\tablename\ \thetable{} -- continued from previous page}}}\\
\hline
-\sphinxstylethead{\sphinxstyletheadfamily
+\sphinxstyletheadfamily
header1
-\unskip}\relax &\sphinxstylethead{\sphinxstyletheadfamily
+&\sphinxstyletheadfamily
header2
-\unskip}\relax \\
+\\
\hline
\endhead
diff --git a/tests/roots/test-latex-table/expects/longtable_having_caption.tex b/tests/roots/test-latex-table/expects/longtable_having_caption.tex
index 3dc4c0d41..10920f82e 100644
--- a/tests/roots/test-latex-table/expects/longtable_having_caption.tex
+++ b/tests/roots/test-latex-table/expects/longtable_having_caption.tex
@@ -3,22 +3,22 @@
\begin{savenotes}\sphinxatlongtablestart\begin{longtable}{|l|l|}
\caption{caption for longtable\strut}\label{\detokenize{longtable:id1}}\\*[\sphinxlongtablecapskipadjust]
\hline
-\sphinxstylethead{\sphinxstyletheadfamily
+\sphinxstyletheadfamily
header1
-\unskip}\relax &\sphinxstylethead{\sphinxstyletheadfamily
+&\sphinxstyletheadfamily
header2
-\unskip}\relax \\
+\\
\hline
\endfirsthead
\multicolumn{2}{c}%
{\makebox[0pt]{\sphinxtablecontinued{\tablename\ \thetable{} -- continued from previous page}}}\\
\hline
-\sphinxstylethead{\sphinxstyletheadfamily
+\sphinxstyletheadfamily
header1
-\unskip}\relax &\sphinxstylethead{\sphinxstyletheadfamily
+&\sphinxstyletheadfamily
header2
-\unskip}\relax \\
+\\
\hline
\endhead
diff --git a/tests/roots/test-latex-table/expects/longtable_having_problematic_cell.tex b/tests/roots/test-latex-table/expects/longtable_having_problematic_cell.tex
index ade76d5e8..7fe48817f 100644
--- a/tests/roots/test-latex-table/expects/longtable_having_problematic_cell.tex
+++ b/tests/roots/test-latex-table/expects/longtable_having_problematic_cell.tex
@@ -2,22 +2,22 @@
\begin{savenotes}\sphinxatlongtablestart\begin{longtable}{|*{2}{\X{1}{2}|}}
\hline
-\sphinxstylethead{\sphinxstyletheadfamily
+\sphinxstyletheadfamily
header1
-\unskip}\relax &\sphinxstylethead{\sphinxstyletheadfamily
+&\sphinxstyletheadfamily
header2
-\unskip}\relax \\
+\\
\hline
\endfirsthead
\multicolumn{2}{c}%
{\makebox[0pt]{\sphinxtablecontinued{\tablename\ \thetable{} -- continued from previous page}}}\\
\hline
-\sphinxstylethead{\sphinxstyletheadfamily
+\sphinxstyletheadfamily
header1
-\unskip}\relax &\sphinxstylethead{\sphinxstyletheadfamily
+&\sphinxstyletheadfamily
header2
-\unskip}\relax \\
+\\
\hline
\endhead
diff --git a/tests/roots/test-latex-table/expects/longtable_having_stub_columns_and_problematic_cell.tex b/tests/roots/test-latex-table/expects/longtable_having_stub_columns_and_problematic_cell.tex
index ac6a5208a..137752c64 100644
--- a/tests/roots/test-latex-table/expects/longtable_having_stub_columns_and_problematic_cell.tex
+++ b/tests/roots/test-latex-table/expects/longtable_having_stub_columns_and_problematic_cell.tex
@@ -2,26 +2,26 @@
\begin{savenotes}\sphinxatlongtablestart\begin{longtable}{|*{3}{\X{1}{3}|}}
\hline
-\sphinxstylethead{\sphinxstyletheadfamily
+\sphinxstyletheadfamily
header1
-\unskip}\relax &\sphinxstylethead{\sphinxstyletheadfamily
+&\sphinxstyletheadfamily
header2
-\unskip}\relax &\sphinxstylethead{\sphinxstyletheadfamily
+&\sphinxstyletheadfamily
header3
-\unskip}\relax \\
+\\
\hline
\endfirsthead
\multicolumn{3}{c}%
{\makebox[0pt]{\sphinxtablecontinued{\tablename\ \thetable{} -- continued from previous page}}}\\
\hline
-\sphinxstylethead{\sphinxstyletheadfamily
+\sphinxstyletheadfamily
header1
-\unskip}\relax &\sphinxstylethead{\sphinxstyletheadfamily
+&\sphinxstyletheadfamily
header2
-\unskip}\relax &\sphinxstylethead{\sphinxstyletheadfamily
+&\sphinxstyletheadfamily
header3
-\unskip}\relax \\
+\\
\hline
\endhead
@@ -30,7 +30,7 @@ header3
\endfoot
\endlastfoot
-\sphinxstylethead{\sphinxstyletheadfamily \begin{itemize}
+\sphinxstyletheadfamily \begin{itemize}
\item {}
instub1-1a
@@ -38,16 +38,16 @@ instub1-1a
instub1-1b
\end{itemize}
-\unskip}\relax &\sphinxstylethead{\sphinxstyletheadfamily
+&\sphinxstyletheadfamily
instub1-2
-\unskip}\relax &
+&
notinstub1-3
\\
-\hline\sphinxstylethead{\sphinxstyletheadfamily
+\hline\sphinxstyletheadfamily
cell2-1
-\unskip}\relax &\sphinxstylethead{\sphinxstyletheadfamily
+&\sphinxstyletheadfamily
cell2-2
-\unskip}\relax &
+&
cell2-3
\\
\hline
diff --git a/tests/roots/test-latex-table/expects/longtable_having_verbatim.tex b/tests/roots/test-latex-table/expects/longtable_having_verbatim.tex
index 7b4fb6964..e1628a9bd 100644
--- a/tests/roots/test-latex-table/expects/longtable_having_verbatim.tex
+++ b/tests/roots/test-latex-table/expects/longtable_having_verbatim.tex
@@ -2,22 +2,22 @@
\begin{savenotes}\sphinxatlongtablestart\begin{longtable}{|*{2}{\X{1}{2}|}}
\hline
-\sphinxstylethead{\sphinxstyletheadfamily
+\sphinxstyletheadfamily
header1
-\unskip}\relax &\sphinxstylethead{\sphinxstyletheadfamily
+&\sphinxstyletheadfamily
header2
-\unskip}\relax \\
+\\
\hline
\endfirsthead
\multicolumn{2}{c}%
{\makebox[0pt]{\sphinxtablecontinued{\tablename\ \thetable{} -- continued from previous page}}}\\
\hline
-\sphinxstylethead{\sphinxstyletheadfamily
+\sphinxstyletheadfamily
header1
-\unskip}\relax &\sphinxstylethead{\sphinxstyletheadfamily
+&\sphinxstyletheadfamily
header2
-\unskip}\relax \\
+\\
\hline
\endhead
diff --git a/tests/roots/test-latex-table/expects/longtable_having_widths.tex b/tests/roots/test-latex-table/expects/longtable_having_widths.tex
index 18f8c2a86..d41a87586 100644
--- a/tests/roots/test-latex-table/expects/longtable_having_widths.tex
+++ b/tests/roots/test-latex-table/expects/longtable_having_widths.tex
@@ -2,22 +2,22 @@
\begin{savenotes}\sphinxatlongtablestart\begin{longtable}{|\X{30}{100}|\X{70}{100}|}
\hline
-\sphinxstylethead{\sphinxstyletheadfamily
+\sphinxstyletheadfamily
header1
-\unskip}\relax &\sphinxstylethead{\sphinxstyletheadfamily
+&\sphinxstyletheadfamily
header2
-\unskip}\relax \\
+\\
\hline
\endfirsthead
\multicolumn{2}{c}%
{\makebox[0pt]{\sphinxtablecontinued{\tablename\ \thetable{} -- continued from previous page}}}\\
\hline
-\sphinxstylethead{\sphinxstyletheadfamily
+\sphinxstyletheadfamily
header1
-\unskip}\relax &\sphinxstylethead{\sphinxstyletheadfamily
+&\sphinxstyletheadfamily
header2
-\unskip}\relax \\
+\\
\hline
\endhead
diff --git a/tests/roots/test-latex-table/expects/longtable_having_widths_and_problematic_cell.tex b/tests/roots/test-latex-table/expects/longtable_having_widths_and_problematic_cell.tex
index ec9fade7b..b299bfeb8 100644
--- a/tests/roots/test-latex-table/expects/longtable_having_widths_and_problematic_cell.tex
+++ b/tests/roots/test-latex-table/expects/longtable_having_widths_and_problematic_cell.tex
@@ -2,22 +2,22 @@
\begin{savenotes}\sphinxatlongtablestart\begin{longtable}{|\X{30}{100}|\X{70}{100}|}
\hline
-\sphinxstylethead{\sphinxstyletheadfamily
+\sphinxstyletheadfamily
header1
-\unskip}\relax &\sphinxstylethead{\sphinxstyletheadfamily
+&\sphinxstyletheadfamily
header2
-\unskip}\relax \\
+\\
\hline
\endfirsthead
\multicolumn{2}{c}%
{\makebox[0pt]{\sphinxtablecontinued{\tablename\ \thetable{} -- continued from previous page}}}\\
\hline
-\sphinxstylethead{\sphinxstyletheadfamily
+\sphinxstyletheadfamily
header1
-\unskip}\relax &\sphinxstylethead{\sphinxstyletheadfamily
+&\sphinxstyletheadfamily
header2
-\unskip}\relax \\
+\\
\hline
\endhead
diff --git a/tests/roots/test-latex-table/expects/longtable_with_tabularcolumn.tex b/tests/roots/test-latex-table/expects/longtable_with_tabularcolumn.tex
index 613041b9b..8777ef090 100644
--- a/tests/roots/test-latex-table/expects/longtable_with_tabularcolumn.tex
+++ b/tests/roots/test-latex-table/expects/longtable_with_tabularcolumn.tex
@@ -2,22 +2,22 @@
\begin{savenotes}\sphinxatlongtablestart\begin{longtable}{|c|c|}
\hline
-\sphinxstylethead{\sphinxstyletheadfamily
+\sphinxstyletheadfamily
header1
-\unskip}\relax &\sphinxstylethead{\sphinxstyletheadfamily
+&\sphinxstyletheadfamily
header2
-\unskip}\relax \\
+\\
\hline
\endfirsthead
\multicolumn{2}{c}%
{\makebox[0pt]{\sphinxtablecontinued{\tablename\ \thetable{} -- continued from previous page}}}\\
\hline
-\sphinxstylethead{\sphinxstyletheadfamily
+\sphinxstyletheadfamily
header1
-\unskip}\relax &\sphinxstylethead{\sphinxstyletheadfamily
+&\sphinxstyletheadfamily
header2
-\unskip}\relax \\
+\\
\hline
\endhead
diff --git a/tests/roots/test-latex-table/expects/simple_table.tex b/tests/roots/test-latex-table/expects/simple_table.tex
index 58a6e31b3..9ad911588 100644
--- a/tests/roots/test-latex-table/expects/simple_table.tex
+++ b/tests/roots/test-latex-table/expects/simple_table.tex
@@ -4,11 +4,11 @@
\centering
\begin{tabulary}{\linewidth}[t]{|T|T|}
\hline
-\sphinxstylethead{\sphinxstyletheadfamily
+\sphinxstyletheadfamily
header1
-\unskip}\relax &\sphinxstylethead{\sphinxstyletheadfamily
+&\sphinxstyletheadfamily
header2
-\unskip}\relax \\
+\\
\hline
cell1-1
&
diff --git a/tests/roots/test-latex-table/expects/table_having_caption.tex b/tests/roots/test-latex-table/expects/table_having_caption.tex
index e0957e2ea..13e02e62c 100644
--- a/tests/roots/test-latex-table/expects/table_having_caption.tex
+++ b/tests/roots/test-latex-table/expects/table_having_caption.tex
@@ -7,11 +7,11 @@
\sphinxaftercaption
\begin{tabulary}{\linewidth}[t]{|T|T|}
\hline
-\sphinxstylethead{\sphinxstyletheadfamily
+\sphinxstyletheadfamily
header1
-\unskip}\relax &\sphinxstylethead{\sphinxstyletheadfamily
+&\sphinxstyletheadfamily
header2
-\unskip}\relax \\
+\\
\hline
cell1-1
&
diff --git a/tests/roots/test-latex-table/expects/table_having_problematic_cell.tex b/tests/roots/test-latex-table/expects/table_having_problematic_cell.tex
index 0448b4e2b..561a98010 100644
--- a/tests/roots/test-latex-table/expects/table_having_problematic_cell.tex
+++ b/tests/roots/test-latex-table/expects/table_having_problematic_cell.tex
@@ -4,11 +4,11 @@
\centering
\begin{tabular}[t]{|*{2}{\X{1}{2}|}}
\hline
-\sphinxstylethead{\sphinxstyletheadfamily
+\sphinxstyletheadfamily
header1
-\unskip}\relax &\sphinxstylethead{\sphinxstyletheadfamily
+&\sphinxstyletheadfamily
header2
-\unskip}\relax \\
+\\
\hline\begin{itemize}
\item {}
item1
diff --git a/tests/roots/test-latex-table/expects/table_having_stub_columns_and_problematic_cell.tex b/tests/roots/test-latex-table/expects/table_having_stub_columns_and_problematic_cell.tex
index e8818d431..6904c43c3 100644
--- a/tests/roots/test-latex-table/expects/table_having_stub_columns_and_problematic_cell.tex
+++ b/tests/roots/test-latex-table/expects/table_having_stub_columns_and_problematic_cell.tex
@@ -4,14 +4,14 @@
\centering
\begin{tabular}[t]{|*{3}{\X{1}{3}|}}
\hline
-\sphinxstylethead{\sphinxstyletheadfamily
+\sphinxstyletheadfamily
header1
-\unskip}\relax &\sphinxstylethead{\sphinxstyletheadfamily
+&\sphinxstyletheadfamily
header2
-\unskip}\relax &\sphinxstylethead{\sphinxstyletheadfamily
+&\sphinxstyletheadfamily
header3
-\unskip}\relax \\
-\hline\sphinxstylethead{\sphinxstyletheadfamily \begin{itemize}
+\\
+\hline\sphinxstyletheadfamily \begin{itemize}
\item {}
instub1-1a
@@ -19,16 +19,16 @@ instub1-1a
instub1-1b
\end{itemize}
-\unskip}\relax &\sphinxstylethead{\sphinxstyletheadfamily
+&\sphinxstyletheadfamily
instub1-2
-\unskip}\relax &
+&
notinstub1-3
\\
-\hline\sphinxstylethead{\sphinxstyletheadfamily
+\hline\sphinxstyletheadfamily
cell2-1
-\unskip}\relax &\sphinxstylethead{\sphinxstyletheadfamily
+&\sphinxstyletheadfamily
cell2-2
-\unskip}\relax &
+&
cell2-3
\\
\hline
diff --git a/tests/roots/test-latex-table/expects/table_having_verbatim.tex b/tests/roots/test-latex-table/expects/table_having_verbatim.tex
index 4a49e4cbb..40d2f424c 100644
--- a/tests/roots/test-latex-table/expects/table_having_verbatim.tex
+++ b/tests/roots/test-latex-table/expects/table_having_verbatim.tex
@@ -4,11 +4,11 @@
\centering
\begin{tabular}[t]{|*{2}{\X{1}{2}|}}
\hline
-\sphinxstylethead{\sphinxstyletheadfamily
+\sphinxstyletheadfamily
header1
-\unskip}\relax &\sphinxstylethead{\sphinxstyletheadfamily
+&\sphinxstyletheadfamily
header2
-\unskip}\relax \\
+\\
\hline
\fvset{hllines={, ,}}%
\begin{sphinxVerbatimintable}[commandchars=\\\{\}]
diff --git a/tests/roots/test-latex-table/expects/table_having_widths.tex b/tests/roots/test-latex-table/expects/table_having_widths.tex
index 2314283ae..914793181 100644
--- a/tests/roots/test-latex-table/expects/table_having_widths.tex
+++ b/tests/roots/test-latex-table/expects/table_having_widths.tex
@@ -4,11 +4,11 @@
\centering
\begin{tabular}[t]{|\X{30}{100}|\X{70}{100}|}
\hline
-\sphinxstylethead{\sphinxstyletheadfamily
+\sphinxstyletheadfamily
header1
-\unskip}\relax &\sphinxstylethead{\sphinxstyletheadfamily
+&\sphinxstyletheadfamily
header2
-\unskip}\relax \\
+\\
\hline
cell1-1
&
diff --git a/tests/roots/test-latex-table/expects/table_having_widths_and_problematic_cell.tex b/tests/roots/test-latex-table/expects/table_having_widths_and_problematic_cell.tex
index 3df2dc4e3..d3e2e8144 100644
--- a/tests/roots/test-latex-table/expects/table_having_widths_and_problematic_cell.tex
+++ b/tests/roots/test-latex-table/expects/table_having_widths_and_problematic_cell.tex
@@ -4,11 +4,11 @@
\centering
\begin{tabular}[t]{|\X{30}{100}|\X{70}{100}|}
\hline
-\sphinxstylethead{\sphinxstyletheadfamily
+\sphinxstyletheadfamily
header1
-\unskip}\relax &\sphinxstylethead{\sphinxstyletheadfamily
+&\sphinxstyletheadfamily
header2
-\unskip}\relax \\
+\\
\hline\begin{itemize}
\item {}
item1
diff --git a/tests/roots/test-latex-table/expects/tabular_having_widths.tex b/tests/roots/test-latex-table/expects/tabular_having_widths.tex
index 0e3d4f2a4..ae67fe924 100644
--- a/tests/roots/test-latex-table/expects/tabular_having_widths.tex
+++ b/tests/roots/test-latex-table/expects/tabular_having_widths.tex
@@ -4,11 +4,11 @@
\raggedright
\begin{tabular}[t]{|\X{30}{100}|\X{70}{100}|}
\hline
-\sphinxstylethead{\sphinxstyletheadfamily
+\sphinxstyletheadfamily
header1
-\unskip}\relax &\sphinxstylethead{\sphinxstyletheadfamily
+&\sphinxstyletheadfamily
header2
-\unskip}\relax \\
+\\
\hline
cell1-1
&
diff --git a/tests/roots/test-latex-table/expects/tabularcolumn.tex b/tests/roots/test-latex-table/expects/tabularcolumn.tex
index 9237639b5..78d31058f 100644
--- a/tests/roots/test-latex-table/expects/tabularcolumn.tex
+++ b/tests/roots/test-latex-table/expects/tabularcolumn.tex
@@ -4,11 +4,11 @@
\centering
\begin{tabulary}{\linewidth}[t]{|c|c|}
\hline
-\sphinxstylethead{\sphinxstyletheadfamily
+\sphinxstyletheadfamily
header1
-\unskip}\relax &\sphinxstylethead{\sphinxstyletheadfamily
+&\sphinxstyletheadfamily
header2
-\unskip}\relax \\
+\\
\hline
cell1-1
&
diff --git a/tests/roots/test-latex-table/expects/tabulary_having_widths.tex b/tests/roots/test-latex-table/expects/tabulary_having_widths.tex
index fc89e3e6e..53f1f2aaa 100644
--- a/tests/roots/test-latex-table/expects/tabulary_having_widths.tex
+++ b/tests/roots/test-latex-table/expects/tabulary_having_widths.tex
@@ -4,11 +4,11 @@
\raggedleft
\begin{tabulary}{\linewidth}[t]{|T|T|}
\hline
-\sphinxstylethead{\sphinxstyletheadfamily
+\sphinxstyletheadfamily
header1
-\unskip}\relax &\sphinxstylethead{\sphinxstyletheadfamily
+&\sphinxstyletheadfamily
header2
-\unskip}\relax \\
+\\
\hline
cell1-1
&
diff --git a/tests/roots/test-root/autodoc.txt b/tests/roots/test-root/autodoc.txt
index aa0dffba1..3c83ebf6e 100644
--- a/tests/roots/test-root/autodoc.txt
+++ b/tests/roots/test-root/autodoc.txt
@@ -5,7 +5,7 @@ Just testing a few autodoc possibilities...
.. automodule:: util
-.. automodule:: test_autodoc
+.. automodule:: autodoc_target
:members:
.. autofunction:: function
@@ -34,7 +34,7 @@ Just testing a few autodoc possibilities...
.. autoclass:: MarkupError
-.. currentmodule:: test_autodoc
+.. currentmodule:: autodoc_target
.. autoclass:: InstAttCls
:members:
diff --git a/tests/roots/test-root/autodoc_target.py b/tests/roots/test-root/autodoc_target.py
new file mode 100644
index 000000000..bd00bf183
--- /dev/null
+++ b/tests/roots/test-root/autodoc_target.py
@@ -0,0 +1,225 @@
+# -*- coding: utf-8 -*-
+
+import enum
+from six import StringIO, add_metaclass
+from sphinx.ext.autodoc import add_documenter # NOQA
+
+
+__all__ = ['Class']
+
+#: documentation for the integer
+integer = 1
+
+
+def raises(exc, func, *args, **kwds):
+ """Raise AssertionError if ``func(*args, **kwds)`` does not raise *exc*."""
+ pass
+
+
+class CustomEx(Exception):
+ """My custom exception."""
+
+ def f(self):
+ """Exception method."""
+
+
+class CustomDataDescriptor(object):
+ """Descriptor class docstring."""
+
+ def __init__(self, doc):
+ self.__doc__ = doc
+
+ def __get__(self, obj, type=None):
+ if obj is None:
+ return self
+ return 42
+
+ def meth(self):
+ """Function."""
+ return "The Answer"
+
+
+class CustomDataDescriptorMeta(type):
+ """Descriptor metaclass docstring."""
+
+
+@add_metaclass(CustomDataDescriptorMeta)
+class CustomDataDescriptor2(CustomDataDescriptor):
+ """Descriptor class with custom metaclass docstring."""
+
+
+def _funky_classmethod(name, b, c, d, docstring=None):
+ """Generates a classmethod for a class from a template by filling out
+ some arguments."""
+ def template(cls, a, b, c, d=4, e=5, f=6):
+ return a, b, c, d, e, f
+ from functools import partial
+ function = partial(template, b=b, c=c, d=d)
+ function.__name__ = name
+ function.__doc__ = docstring
+ return classmethod(function)
+
+
+class Base(object):
+ def inheritedmeth(self):
+ """Inherited function."""
+
+
+class Derived(Base):
+ def inheritedmeth(self):
+ # no docstring here
+ pass
+
+
+class Class(Base):
+ """Class to document."""
+
+ descr = CustomDataDescriptor("Descriptor instance docstring.")
+
+ def meth(self):
+ """Function."""
+
+ def undocmeth(self):
+ pass
+
+ def skipmeth(self):
+ """Method that should be skipped."""
+
+ def excludemeth(self):
+ """Method that should be excluded."""
+
+ # should not be documented
+ skipattr = 'foo'
+
+ #: should be documented -- süß
+ attr = 'bar'
+
+ @property
+ def prop(self):
+ """Property."""
+
+ docattr = 'baz'
+ """should likewise be documented -- süß"""
+
+ udocattr = 'quux'
+ u"""should be documented as well - süß"""
+
+ # initialized to any class imported from another module
+ mdocattr = StringIO()
+ """should be documented as well - süß"""
+
+ roger = _funky_classmethod("roger", 2, 3, 4)
+
+ moore = _funky_classmethod("moore", 9, 8, 7,
+ docstring="moore(a, e, f) -> happiness")
+
+ def __init__(self, arg):
+ self.inst_attr_inline = None #: an inline documented instance attr
+ #: a documented instance attribute
+ self.inst_attr_comment = None
+ self.inst_attr_string = None
+ """a documented instance attribute"""
+ self._private_inst_attr = None #: a private instance attribute
+
+ def __special1__(self):
+ """documented special method"""
+
+ def __special2__(self):
+ # undocumented special method
+ pass
+
+
+class CustomDict(dict):
+ """Docstring."""
+
+
+def function(foo, *args, **kwds):
+ """
+ Return spam.
+ """
+ pass
+
+
+class Outer(object):
+ """Foo"""
+
+ class Inner(object):
+ """Foo"""
+
+ def meth(self):
+ """Foo"""
+
+ # should be documented as an alias
+ factory = dict
+
+
+class DocstringSig(object):
+ def meth(self):
+ """meth(FOO, BAR=1) -> BAZ
+First line of docstring
+
+ rest of docstring
+ """
+
+ def meth2(self):
+ """First line, no signature
+ Second line followed by indentation::
+
+ indented line
+ """
+
+ @property
+ def prop1(self):
+ """DocstringSig.prop1(self)
+ First line of docstring
+ """
+ return 123
+
+ @property
+ def prop2(self):
+ """First line of docstring
+ Second line of docstring
+ """
+ return 456
+
+
+class StrRepr(str):
+ def __repr__(self):
+ return self
+
+
+class AttCls(object):
+ a1 = StrRepr('hello\nworld')
+ a2 = None
+
+
+class InstAttCls(object):
+ """Class with documented class and instance attributes."""
+
+ #: Doc comment for class attribute InstAttCls.ca1.
+ #: It can have multiple lines.
+ ca1 = 'a'
+
+ ca2 = 'b' #: Doc comment for InstAttCls.ca2. One line only.
+
+ ca3 = 'c'
+ """Docstring for class attribute InstAttCls.ca3."""
+
+ def __init__(self):
+ #: Doc comment for instance attribute InstAttCls.ia1
+ self.ia1 = 'd'
+
+ self.ia2 = 'e'
+ """Docstring for instance attribute InstAttCls.ia2."""
+
+
+class EnumCls(enum.Enum):
+ """
+ this is enum class
+ """
+
+ #: doc for val1
+ val1 = 12
+ val2 = 23 #: doc for val2
+ val3 = 34
+ """doc for val3"""
diff --git a/tests/roots/test-root/conf.py b/tests/roots/test-root/conf.py
index bd3cb9a9b..0753fe19c 100644
--- a/tests/roots/test-root/conf.py
+++ b/tests/roots/test-root/conf.py
@@ -29,7 +29,8 @@ numfig = True
rst_epilog = '.. |subst| replace:: global substitution'
-html_sidebars = {'**': 'customsb.html',
+html_sidebars = {'**': ['localtoc.html', 'relations.html', 'sourcelink.html',
+ 'customsb.html', 'searchbox.html'],
'contents': ['contentssb.html', 'localtoc.html',
'globaltoc.html']}
html_style = 'default.css'
diff --git a/tests/roots/test-theming/test_theme/test-theme/theme.conf b/tests/roots/test-theming/test_theme/test-theme/theme.conf
index 0d8403f0b..b7518bc9c 100644
--- a/tests/roots/test-theming/test_theme/test-theme/theme.conf
+++ b/tests/roots/test-theming/test_theme/test-theme/theme.conf
@@ -1,2 +1,3 @@
[theme]
inherit = classic
+sidebars = globaltoc.html, searchbox.html
diff --git a/tests/run.py b/tests/run.py
deleted file mode 100755
index f84926fb6..000000000
--- a/tests/run.py
+++ /dev/null
@@ -1,68 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-"""
- Sphinx unit test driver
- ~~~~~~~~~~~~~~~~~~~~~~~
-
- This script runs the Sphinx unit test suite.
-
- :copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
- :license: BSD, see LICENSE for details.
-"""
-from __future__ import print_function
-
-import os
-import sys
-import warnings
-import traceback
-import shutil
-
-testroot = os.path.dirname(__file__) or '.'
-sys.path.insert(0, os.path.abspath(os.path.join(testroot, os.path.pardir)))
-
-# filter warnings of test dependencies
-warnings.filterwarnings('ignore', category=DeprecationWarning, module='site') # virtualenv
-warnings.filterwarnings('ignore', category=ImportWarning, module='backports')
-warnings.filterwarnings('ignore', category=ImportWarning, module='pkgutil')
-warnings.filterwarnings('ignore', category=ImportWarning, module='pytest_cov')
-warnings.filterwarnings('ignore', category=PendingDeprecationWarning, module=r'_pytest\..*')
-
-# check dependencies before testing
-print('Checking dependencies...')
-for modname in ('pytest', 'mock', 'six', 'docutils', 'jinja2', 'pygments',
- 'snowballstemmer', 'babel', 'html5lib'):
- try:
- __import__(modname)
- except ImportError as err:
- if modname == 'mock' and sys.version_info[0] == 3:
- continue
- traceback.print_exc()
- print('The %r package is needed to run the Sphinx test suite.' % modname)
- sys.exit(1)
-
-# find a temp dir for testing and clean it up now
-os.environ['SPHINX_TEST_TEMPDIR'] = \
- os.path.abspath(os.path.join(testroot, 'build')) \
- if 'SPHINX_TEST_TEMPDIR' not in os.environ \
- else os.path.abspath(os.environ['SPHINX_TEST_TEMPDIR'])
-
-tempdir = os.environ['SPHINX_TEST_TEMPDIR']
-print('Temporary files will be placed in %s.' % tempdir)
-if os.path.exists(tempdir):
- shutil.rmtree(tempdir)
-os.makedirs(tempdir)
-
-print('Running Sphinx test suite (with Python %s)...' % sys.version.split()[0])
-sys.stdout.flush()
-
-# exclude 'roots' dirs for pytest test collector
-ignore_paths = [
- os.path.relpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), sub))
- for sub in ('roots',)
-]
-args = sys.argv[1:]
-for ignore_path in ignore_paths:
- args.extend(['--ignore', ignore_path])
-
-import pytest # NOQA
-sys.exit(pytest.main(args))
diff --git a/tests/test_application.py b/tests/test_application.py
index 8535cc9dc..12b6bbe60 100644
--- a/tests/test_application.py
+++ b/tests/test_application.py
@@ -12,6 +12,7 @@ from docutils import nodes
from sphinx.application import ExtensionError
from sphinx.domains import Domain
+from sphinx.util import logging
from sphinx.testing.util import strip_escseq
import pytest
@@ -86,3 +87,38 @@ def test_add_source_parser(app, status, warning):
assert set(app.registry.get_source_parsers().keys()) == set(['*', '.md', '.test'])
assert app.registry.get_source_parsers()['.md'].__name__ == 'DummyMarkdownParser'
assert app.registry.get_source_parsers()['.test'].__name__ == 'TestSourceParser'
+
+
+@pytest.mark.sphinx(testroot='extensions')
+def test_add_is_parallel_allowed(app, status, warning):
+ logging.setup(app, status, warning)
+
+ assert app.is_parallel_allowed('read') is True
+ assert app.is_parallel_allowed('write') is True
+ assert warning.getvalue() == ''
+
+ app.setup_extension('read_parallel')
+ assert app.is_parallel_allowed('read') is True
+ assert app.is_parallel_allowed('write') is True
+ assert warning.getvalue() == ''
+ app.extensions.pop('read_parallel')
+
+ app.setup_extension('write_parallel')
+ assert app.is_parallel_allowed('read') is False
+ assert app.is_parallel_allowed('write') is True
+ assert 'the write_parallel extension does not declare' in warning.getvalue()
+ app.extensions.pop('write_parallel')
+ warning.truncate(0) # reset warnings
+
+ app.setup_extension('read_serial')
+ assert app.is_parallel_allowed('read') is False
+ assert app.is_parallel_allowed('write') is True
+ assert warning.getvalue() == ''
+ app.extensions.pop('read_serial')
+
+ app.setup_extension('write_serial')
+ assert app.is_parallel_allowed('read') is False
+ assert app.is_parallel_allowed('write') is False
+ assert 'the write_serial extension does not declare' in warning.getvalue()
+ app.extensions.pop('write_serial')
+ warning.truncate(0) # reset warnings
diff --git a/tests/test_autodoc.py b/tests/test_autodoc.py
index 462f65698..61152ba02 100644
--- a/tests/test_autodoc.py
+++ b/tests/test_autodoc.py
@@ -10,13 +10,12 @@
:license: BSD, see LICENSE for details.
"""
+import sys
from six import PY3
from sphinx.testing.util import SphinxTestApp, Struct # NOQA
import pytest
-import enum
-from six import StringIO, add_metaclass
from docutils.statemachine import ViewList
from sphinx.ext.autodoc import AutoDirective, add_documenter, \
@@ -27,18 +26,23 @@ app = None
@pytest.fixture(scope='module', autouse=True)
def setup_module(rootdir, sphinx_test_tempdir):
- global app
- srcdir = sphinx_test_tempdir / 'autodoc-root'
- if not srcdir.exists():
- (rootdir/'test-root').copytree(srcdir)
- app = SphinxTestApp(srcdir=srcdir)
- app.builder.env.app = app
- app.builder.env.temp_data['docname'] = 'dummy'
- app.connect('autodoc-process-docstring', process_docstring)
- app.connect('autodoc-process-signature', process_signature)
- app.connect('autodoc-skip-member', skip_member)
- yield
- app.cleanup()
+ try:
+ global app
+ srcdir = sphinx_test_tempdir / 'autodoc-root'
+ if not srcdir.exists():
+ (rootdir / 'test-root').copytree(srcdir)
+ testroot = rootdir / 'test-ext-autodoc'
+ sys.path.append(testroot)
+ app = SphinxTestApp(srcdir=srcdir)
+ app.builder.env.app = app
+ app.builder.env.temp_data['docname'] = 'dummy'
+ app.connect('autodoc-process-docstring', process_docstring)
+ app.connect('autodoc-process-signature', process_signature)
+ app.connect('autodoc-skip-member', skip_member)
+ yield
+ finally:
+ app.cleanup()
+ sys.path.remove(testroot)
directive = options = None
@@ -64,6 +68,7 @@ def setup_test():
members = [],
member_order = 'alphabetic',
exclude_members = set(),
+ ignore_module_all = False,
)
directive = Struct(
@@ -235,7 +240,7 @@ def test_format_signature():
pass
assert formatsig('method', 'H.foo', H.foo1, None, None) == '(b, *c)'
assert formatsig('method', 'H.foo', H.foo1, 'a', None) == '(a)'
- assert formatsig('method', 'H.foo', H.foo2, None, None) == '(b, *c)'
+ assert formatsig('method', 'H.foo', H.foo2, None, None) == '(*c)'
assert formatsig('method', 'H.foo', H.foo3, None, None) == r"(d='\\n')"
# test exception handling (exception is caught and args is '')
@@ -420,7 +425,7 @@ def test_get_doc():
# class has __init__ method without docstring and
# __new__ method with docstring
# class docstring: depends on config value which one is taken
- class I:
+ class I: # NOQA
"""Class docstring"""
def __new__(cls):
"""New docstring"""
@@ -431,6 +436,15 @@ def test_get_doc():
directive.env.config.autoclass_content = 'both'
assert getdocl('class', I) == ['Class docstring', '', 'New docstring']
+ from target import Base, Derived
+
+ # NOTE: inspect.getdoc seems not to work with locally defined classes
+ directive.env.config.autodoc_inherit_docstrings = False
+ assert getdocl('method', Base.inheritedmeth) == ['Inherited function.']
+ assert getdocl('method', Derived.inheritedmeth) == []
+ directive.env.config.autodoc_inherit_docstrings = True
+ assert getdocl('method', Derived.inheritedmeth) == ['Inherited function.']
+
@pytest.mark.usefixtures('setup_test')
def test_docstring_processing():
@@ -501,24 +515,24 @@ def test_docstring_property_processing():
directive.env.config.autodoc_docstring_signature = False
results, docstrings = \
- genarate_docstring('attribute', 'test_autodoc.DocstringSig.prop1')
+ genarate_docstring('attribute', 'target.DocstringSig.prop1')
assert '.. py:attribute:: DocstringSig.prop1' in results
assert 'First line of docstring' in docstrings
assert 'DocstringSig.prop1(self)' in docstrings
results, docstrings = \
- genarate_docstring('attribute', 'test_autodoc.DocstringSig.prop2')
+ genarate_docstring('attribute', 'target.DocstringSig.prop2')
assert '.. py:attribute:: DocstringSig.prop2' in results
assert 'First line of docstring' in docstrings
assert 'Second line of docstring' in docstrings
directive.env.config.autodoc_docstring_signature = True
results, docstrings = \
- genarate_docstring('attribute', 'test_autodoc.DocstringSig.prop1')
+ genarate_docstring('attribute', 'target.DocstringSig.prop1')
assert '.. py:attribute:: DocstringSig.prop1' in results
assert 'First line of docstring' in docstrings
assert 'DocstringSig.prop1(self)' not in docstrings
results, docstrings = \
- genarate_docstring('attribute', 'test_autodoc.DocstringSig.prop2')
+ genarate_docstring('attribute', 'target.DocstringSig.prop2')
assert '.. py:attribute:: DocstringSig.prop2' in results
assert 'First line of docstring' in docstrings
assert 'Second line of docstring' in docstrings
@@ -549,11 +563,13 @@ def test_new_documenter():
del directive.result[:]
options.members = ['integer']
- assert_result_contains('.. py:data:: integer', 'module', 'test_autodoc')
+ assert_result_contains('.. py:data:: integer', 'module', 'target')
@pytest.mark.usefixtures('setup_test')
def test_attrgetter_using():
+ from target import Class
+
def assert_getter_works(objtype, name, obj, attrs=[], **kw):
getattr_spy = []
@@ -578,10 +594,10 @@ def test_attrgetter_using():
options.members = ALL
options.inherited_members = False
- assert_getter_works('class', 'test_autodoc.Class', Class, ['meth'])
+ assert_getter_works('class', 'target.Class', Class, ['meth'])
options.inherited_members = True
- assert_getter_works('class', 'test_autodoc.Class', Class, ['meth', 'inheritedmeth'])
+ assert_getter_works('class', 'target.Class', Class, ['meth', 'inheritedmeth'])
@pytest.mark.usefixtures('setup_test')
@@ -648,11 +664,11 @@ def test_generate():
assert_warns("failed to import function 'foobar' from module 'util'",
'function', 'util.foobar', more_content=None)
# method missing
- assert_warns("failed to import method 'Class.foobar' from module 'test_autodoc';",
- 'method', 'test_autodoc.Class.foobar', more_content=None)
+ assert_warns("failed to import method 'Class.foobar' from module 'target';",
+ 'method', 'target.Class.foobar', more_content=None)
# test auto and given content mixing
- directive.env.ref_context['py:module'] = 'test_autodoc'
+ directive.env.ref_context['py:module'] = 'target'
assert_result_contains(' Function.', 'method', 'Class.meth')
add_content = ViewList()
add_content.append('Content.', '', 0)
@@ -667,72 +683,77 @@ def test_generate():
assert len(directive.result) == 0
# assert that exceptions can be documented
- assert_works('exception', 'test_autodoc.CustomEx', all_members=True)
- assert_works('exception', 'test_autodoc.CustomEx')
+ assert_works('exception', 'target.CustomEx', all_members=True)
+ assert_works('exception', 'target.CustomEx')
# test diverse inclusion settings for members
- should = [('class', 'test_autodoc.Class')]
+ should = [('class', 'target.Class')]
assert_processes(should, 'class', 'Class')
- should.extend([('method', 'test_autodoc.Class.meth')])
+ should.extend([('method', 'target.Class.meth')])
options.members = ['meth']
options.exclude_members = set(['excludemeth'])
assert_processes(should, 'class', 'Class')
- should.extend([('attribute', 'test_autodoc.Class.prop'),
- ('attribute', 'test_autodoc.Class.descr'),
- ('attribute', 'test_autodoc.Class.attr'),
- ('attribute', 'test_autodoc.Class.docattr'),
- ('attribute', 'test_autodoc.Class.udocattr'),
- ('attribute', 'test_autodoc.Class.mdocattr'),
- ('attribute', 'test_autodoc.Class.inst_attr_comment'),
- ('attribute', 'test_autodoc.Class.inst_attr_inline'),
- ('attribute', 'test_autodoc.Class.inst_attr_string'),
- ('method', 'test_autodoc.Class.moore'),
+ should.extend([('attribute', 'target.Class.prop'),
+ ('attribute', 'target.Class.descr'),
+ ('attribute', 'target.Class.attr'),
+ ('attribute', 'target.Class.docattr'),
+ ('attribute', 'target.Class.udocattr'),
+ ('attribute', 'target.Class.mdocattr'),
+ ('attribute', 'target.Class.inst_attr_comment'),
+ ('attribute', 'target.Class.inst_attr_inline'),
+ ('attribute', 'target.Class.inst_attr_string'),
+ ('method', 'target.Class.moore'),
])
options.members = ALL
assert_processes(should, 'class', 'Class')
options.undoc_members = True
- should.extend((('attribute', 'test_autodoc.Class.skipattr'),
- ('method', 'test_autodoc.Class.undocmeth'),
- ('method', 'test_autodoc.Class.roger')))
+ should.extend((('attribute', 'target.Class.skipattr'),
+ ('method', 'target.Class.undocmeth'),
+ ('method', 'target.Class.roger')))
assert_processes(should, 'class', 'Class')
options.inherited_members = True
- should.append(('method', 'test_autodoc.Class.inheritedmeth'))
+ should.append(('method', 'target.Class.inheritedmeth'))
assert_processes(should, 'class', 'Class')
# test special members
options.special_members = ['__special1__']
- should.append(('method', 'test_autodoc.Class.__special1__'))
+ should.append(('method', 'target.Class.__special1__'))
assert_processes(should, 'class', 'Class')
options.special_members = ALL
- should.append(('method', 'test_autodoc.Class.__special2__'))
+ should.append(('method', 'target.Class.__special2__'))
assert_processes(should, 'class', 'Class')
options.special_members = False
options.members = []
# test module flags
- assert_result_contains('.. py:module:: test_autodoc',
- 'module', 'test_autodoc')
+ assert_result_contains('.. py:module:: target',
+ 'module', 'target')
options.synopsis = 'Synopsis'
- assert_result_contains(' :synopsis: Synopsis', 'module', 'test_autodoc')
+ assert_result_contains(' :synopsis: Synopsis', 'module', 'target')
options.deprecated = True
- assert_result_contains(' :deprecated:', 'module', 'test_autodoc')
+ assert_result_contains(' :deprecated:', 'module', 'target')
options.platform = 'Platform'
- assert_result_contains(' :platform: Platform', 'module', 'test_autodoc')
+ assert_result_contains(' :platform: Platform', 'module', 'target')
# test if __all__ is respected for modules
options.members = ALL
- assert_result_contains('.. py:class:: Class(arg)', 'module', 'test_autodoc')
+ assert_result_contains('.. py:class:: Class(arg)', 'module', 'target')
try:
assert_result_contains('.. py:exception:: CustomEx',
- 'module', 'test_autodoc')
+ 'module', 'target')
except AssertionError:
pass
else:
assert False, 'documented CustomEx which is not in __all__'
+ # test ignore-module-all
+ options.ignore_module_all = True
+ assert_result_contains('.. py:class:: Class(arg)', 'module', 'target')
+ assert_result_contains('.. py:exception:: CustomEx', 'module', 'target')
+
# test noindex flag
options.members = []
options.noindex = True
- assert_result_contains(' :noindex:', 'module', 'test_autodoc')
+ assert_result_contains(' :noindex:', 'module', 'target')
assert_result_contains(' :noindex:', 'class', 'Base')
# okay, now let's get serious about mixing Python and C signature stuff
@@ -740,14 +761,14 @@ def test_generate():
all_members=True)
# test inner class handling
- assert_processes([('class', 'test_autodoc.Outer'),
- ('class', 'test_autodoc.Outer.Inner'),
- ('method', 'test_autodoc.Outer.Inner.meth')],
+ assert_processes([('class', 'target.Outer'),
+ ('class', 'target.Outer.Inner'),
+ ('method', 'target.Outer.Inner.meth')],
'class', 'Outer', all_members=True)
# test descriptor docstrings
assert_result_contains(' Descriptor instance docstring.',
- 'attribute', 'test_autodoc.Class.descr')
+ 'attribute', 'target.Class.descr')
# test generation for C modules (which have no source file)
directive.env.ref_context['py:module'] = 'time'
@@ -755,7 +776,7 @@ def test_generate():
assert_processes([('function', 'time.asctime')], 'function', 'asctime')
# test autodoc_member_order == 'source'
- directive.env.ref_context['py:module'] = 'test_autodoc'
+ directive.env.ref_context['py:module'] = 'target'
options.private_members = True
if PY3:
roger_line = ' .. py:classmethod:: Class.roger(a, *, b=2, c=3, d=4, e=5, f=6)'
@@ -781,7 +802,7 @@ def test_generate():
del directive.env.ref_context['py:module']
# test attribute initialized to class instance from other module
- directive.env.temp_data['autodoc:class'] = 'test_autodoc.Class'
+ directive.env.temp_data['autodoc:class'] = 'target.Class'
assert_result_contains(u' should be documented as well - s\xfc\xdf',
'attribute', 'mdocattr')
del directive.env.temp_data['autodoc:class']
@@ -789,25 +810,25 @@ def test_generate():
# test autodoc_docstring_signature
assert_result_contains(
'.. py:method:: DocstringSig.meth(FOO, BAR=1) -> BAZ', 'method',
- 'test_autodoc.DocstringSig.meth')
+ 'target.DocstringSig.meth')
assert_result_contains(
- ' rest of docstring', 'method', 'test_autodoc.DocstringSig.meth')
+ ' rest of docstring', 'method', 'target.DocstringSig.meth')
assert_result_contains(
'.. py:method:: DocstringSig.meth2()', 'method',
- 'test_autodoc.DocstringSig.meth2')
+ 'target.DocstringSig.meth2')
assert_result_contains(
' indented line', 'method',
- 'test_autodoc.DocstringSig.meth2')
+ 'target.DocstringSig.meth2')
assert_result_contains(
'.. py:classmethod:: Class.moore(a, e, f) -> happiness', 'method',
- 'test_autodoc.Class.moore')
+ 'target.Class.moore')
# test new attribute documenter behavior
- directive.env.ref_context['py:module'] = 'test_autodoc'
+ directive.env.ref_context['py:module'] = 'target'
options.undoc_members = True
- assert_processes([('class', 'test_autodoc.AttCls'),
- ('attribute', 'test_autodoc.AttCls.a1'),
- ('attribute', 'test_autodoc.AttCls.a2'),
+ assert_processes([('class', 'target.AttCls'),
+ ('attribute', 'target.AttCls.a1'),
+ ('attribute', 'target.AttCls.a2'),
], 'class', 'AttCls')
assert_result_contains(
' :annotation: = hello world', 'attribute', 'AttCls.a1')
@@ -817,40 +838,40 @@ def test_generate():
# test explicit members with instance attributes
del directive.env.temp_data['autodoc:class']
del directive.env.temp_data['autodoc:module']
- directive.env.ref_context['py:module'] = 'test_autodoc'
+ directive.env.ref_context['py:module'] = 'target'
options.inherited_members = False
options.undoc_members = False
options.members = ALL
assert_processes([
- ('class', 'test_autodoc.InstAttCls'),
- ('attribute', 'test_autodoc.InstAttCls.ca1'),
- ('attribute', 'test_autodoc.InstAttCls.ca2'),
- ('attribute', 'test_autodoc.InstAttCls.ca3'),
- ('attribute', 'test_autodoc.InstAttCls.ia1'),
- ('attribute', 'test_autodoc.InstAttCls.ia2'),
+ ('class', 'target.InstAttCls'),
+ ('attribute', 'target.InstAttCls.ca1'),
+ ('attribute', 'target.InstAttCls.ca2'),
+ ('attribute', 'target.InstAttCls.ca3'),
+ ('attribute', 'target.InstAttCls.ia1'),
+ ('attribute', 'target.InstAttCls.ia2'),
], 'class', 'InstAttCls')
del directive.env.temp_data['autodoc:class']
del directive.env.temp_data['autodoc:module']
options.members = ['ca1', 'ia1']
assert_processes([
- ('class', 'test_autodoc.InstAttCls'),
- ('attribute', 'test_autodoc.InstAttCls.ca1'),
- ('attribute', 'test_autodoc.InstAttCls.ia1'),
+ ('class', 'target.InstAttCls'),
+ ('attribute', 'target.InstAttCls.ca1'),
+ ('attribute', 'target.InstAttCls.ia1'),
], 'class', 'InstAttCls')
del directive.env.temp_data['autodoc:class']
del directive.env.temp_data['autodoc:module']
del directive.env.ref_context['py:module']
# test members with enum attributes
- directive.env.ref_context['py:module'] = 'test_autodoc'
+ directive.env.ref_context['py:module'] = 'target'
options.inherited_members = False
options.undoc_members = False
options.members = ALL
assert_processes([
- ('class', 'test_autodoc.EnumCls'),
- ('attribute', 'test_autodoc.EnumCls.val1'),
- ('attribute', 'test_autodoc.EnumCls.val2'),
- ('attribute', 'test_autodoc.EnumCls.val3'),
+ ('class', 'target.EnumCls'),
+ ('attribute', 'target.EnumCls.val1'),
+ ('attribute', 'target.EnumCls.val2'),
+ ('attribute', 'target.EnumCls.val3'),
], 'class', 'EnumCls')
assert_result_contains(
' :annotation: = 12', 'attribute', 'EnumCls.val1')
@@ -864,11 +885,11 @@ def test_generate():
# test descriptor class documentation
options.members = ['CustomDataDescriptor', 'CustomDataDescriptor2']
assert_result_contains('.. py:class:: CustomDataDescriptor(doc)',
- 'module', 'test_autodoc')
+ 'module', 'target')
assert_result_contains(' .. py:method:: CustomDataDescriptor.meth()',
- 'module', 'test_autodoc')
+ 'module', 'target')
assert_result_contains('.. py:class:: CustomDataDescriptor2(doc)',
- 'module', 'test_autodoc')
+ 'module', 'target')
# test mocked module imports
options.members = ['TestAutodoc']
@@ -880,268 +901,3 @@ def test_generate():
options.members = ['decoratedFunction']
assert_result_contains('.. py:function:: decoratedFunction()',
'module', 'autodoc_missing_imports')
-
-
-# --- generate fodder ------------
-__all__ = ['Class']
-
-#: documentation for the integer
-integer = 1
-
-
-def raises(exc, func, *args, **kwds):
- """Raise AssertionError if ``func(*args, **kwds)`` does not raise *exc*."""
- pass
-
-
-class CustomEx(Exception):
- """My custom exception."""
-
- def f(self):
- """Exception method."""
-
-
-class CustomDataDescriptor(object):
- """Descriptor class docstring."""
-
- def __init__(self, doc):
- self.__doc__ = doc
-
- def __get__(self, obj, type=None):
- if obj is None:
- return self
- return 42
-
- def meth(self):
- """Function."""
- return "The Answer"
-
-
-class CustomDataDescriptorMeta(type):
- """Descriptor metaclass docstring."""
-
-
-@add_metaclass(CustomDataDescriptorMeta)
-class CustomDataDescriptor2(CustomDataDescriptor):
- """Descriptor class with custom metaclass docstring."""
-
-
-def _funky_classmethod(name, b, c, d, docstring=None):
- """Generates a classmethod for a class from a template by filling out
- some arguments."""
- def template(cls, a, b, c, d=4, e=5, f=6):
- return a, b, c, d, e, f
- from functools import partial
- function = partial(template, b=b, c=c, d=d)
- function.__name__ = name
- function.__doc__ = docstring
- return classmethod(function)
-
-
-class Base(object):
- def inheritedmeth(self):
- """Inherited function."""
-
-
-class Class(Base):
- """Class to document."""
-
- descr = CustomDataDescriptor("Descriptor instance docstring.")
-
- def meth(self):
- """Function."""
-
- def undocmeth(self):
- pass
-
- def skipmeth(self):
- """Method that should be skipped."""
-
- def excludemeth(self):
- """Method that should be excluded."""
-
- # should not be documented
- skipattr = 'foo'
-
- #: should be documented -- süß
- attr = 'bar'
-
- @property
- def prop(self):
- """Property."""
-
- docattr = 'baz'
- """should likewise be documented -- süß"""
-
- udocattr = 'quux'
- u"""should be documented as well - süß"""
-
- # initialized to any class imported from another module
- mdocattr = StringIO()
- """should be documented as well - süß"""
-
- roger = _funky_classmethod("roger", 2, 3, 4)
-
- moore = _funky_classmethod("moore", 9, 8, 7,
- docstring="moore(a, e, f) -> happiness")
-
- def __init__(self, arg):
- self.inst_attr_inline = None #: an inline documented instance attr
- #: a documented instance attribute
- self.inst_attr_comment = None
- self.inst_attr_string = None
- """a documented instance attribute"""
- self._private_inst_attr = None #: a private instance attribute
-
- def __special1__(self):
- """documented special method"""
-
- def __special2__(self):
- # undocumented special method
- pass
-
-
-class CustomDict(dict):
- """Docstring."""
-
-
-def function(foo, *args, **kwds):
- """
- Return spam.
- """
- pass
-
-
-class Outer(object):
- """Foo"""
-
- class Inner(object):
- """Foo"""
-
- def meth(self):
- """Foo"""
-
- # should be documented as an alias
- factory = dict
-
-
-class DocstringSig(object):
- def meth(self):
- """meth(FOO, BAR=1) -> BAZ
-First line of docstring
-
- rest of docstring
- """
-
- def meth2(self):
- """First line, no signature
- Second line followed by indentation::
-
- indented line
- """
-
- @property
- def prop1(self):
- """DocstringSig.prop1(self)
- First line of docstring
- """
- return 123
-
- @property
- def prop2(self):
- """First line of docstring
- Second line of docstring
- """
- return 456
-
-
-class StrRepr(str):
- def __repr__(self):
- return self
-
-
-class AttCls(object):
- a1 = StrRepr('hello\nworld')
- a2 = None
-
-
-class InstAttCls(object):
- """Class with documented class and instance attributes."""
-
- #: Doc comment for class attribute InstAttCls.ca1.
- #: It can have multiple lines.
- ca1 = 'a'
-
- ca2 = 'b' #: Doc comment for InstAttCls.ca2. One line only.
-
- ca3 = 'c'
- """Docstring for class attribute InstAttCls.ca3."""
-
- def __init__(self):
- #: Doc comment for instance attribute InstAttCls.ia1
- self.ia1 = 'd'
-
- self.ia2 = 'e'
- """Docstring for instance attribute InstAttCls.ia2."""
-
-
-class EnumCls(enum.Enum):
- """
- this is enum class
- """
-
- #: doc for val1
- val1 = 12
- val2 = 23 #: doc for val2
- val3 = 34
- """doc for val3"""
-
-
-def test_type_hints():
- from sphinx.ext.autodoc import formatargspec
- from sphinx.util.inspect import getargspec
-
- try:
- from typing_test_data import f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11
- except (ImportError, SyntaxError):
- pytest.skip('Cannot import Python code with function annotations')
-
- def verify_arg_spec(f, expected):
- assert formatargspec(f, *getargspec(f)) == expected
-
- # Class annotations
- verify_arg_spec(f0, '(x: int, y: numbers.Integral) -> None')
-
- # Generic types with concrete parameters
- verify_arg_spec(f1, '(x: typing.List[int]) -> typing.List[int]')
-
- # TypeVars and generic types with TypeVars
- verify_arg_spec(f2, '(x: typing.List[T],'
- ' y: typing.List[T_co],'
- ' z: T) -> typing.List[T_contra]')
-
- # Union types
- verify_arg_spec(f3, '(x: typing.Union[str, numbers.Integral]) -> None')
-
- # Quoted annotations
- verify_arg_spec(f4, '(x: str, y: str) -> None')
-
- # Keyword-only arguments
- verify_arg_spec(f5, '(x: int, *, y: str, z: str) -> None')
-
- # Keyword-only arguments with varargs
- verify_arg_spec(f6, '(x: int, *args, y: str, z: str) -> None')
-
- # Space around '=' for defaults
- verify_arg_spec(f7, '(x: int = None, y: dict = {}) -> None')
-
- # Callable types
- verify_arg_spec(f8, '(x: typing.Callable[[int, str], int]) -> None')
- verify_arg_spec(f9, '(x: typing.Callable) -> None')
-
- # Tuple types
- verify_arg_spec(f10, '(x: typing.Tuple[int, str],'
- ' y: typing.Tuple[int, ...]) -> None')
-
- # Instance annotations
- verify_arg_spec(f11, '(x: CustomAnnotation, y: 123) -> None')
diff --git a/tests/test_build.py b/tests/test_build.py
index 6533a1763..df0458aa3 100644
--- a/tests/test_build.py
+++ b/tests/test_build.py
@@ -34,8 +34,8 @@ def nonascii_srcdir(request, rootdir, sphinx_test_tempdir):
basedir = sphinx_test_tempdir / request.node.originalname
# Windows with versions prior to 3.2 (I think) doesn't support unicode on system path
# so we force a non-unicode path in that case
- if sys.platform == "win32" and \
- not (sys.version_info.major >= 3 and sys.version_info.minor >= 2):
+ if (sys.platform == "win32" and
+ not (sys.version_info.major >= 3 and sys.version_info.minor >= 2)):
return basedir / 'all'
try:
srcdir = basedir / test_name
@@ -64,8 +64,9 @@ def nonascii_srcdir(request, rootdir, sphinx_test_tempdir):
@pytest.mark.parametrize(
"buildername",
[
+ # note: no 'html' - if it's ok with dirhtml it's ok with html
'dirhtml', 'singlehtml', 'pickle', 'json', 'text', 'htmlhelp', 'qthelp',
- 'epub2', 'epub', 'applehelp', 'changes', 'xml', 'pseudoxml', 'linkcheck',
+ 'epub', 'applehelp', 'changes', 'xml', 'pseudoxml', 'linkcheck',
],
)
@mock.patch('sphinx.builders.linkcheck.requests.head',
diff --git a/tests/test_build_epub.py b/tests/test_build_epub.py
index 397547734..e5d86b0ed 100644
--- a/tests/test_build_epub.py
+++ b/tests/test_build_epub.py
@@ -245,5 +245,3 @@ def test_epub_writing_mode(app):
# vertical / writing-mode (CSS)
css = (app.outdir / '_static' / 'epub.css').text()
assert 'writing-mode: vertical-rl;' in css
-
-
diff --git a/tests/test_build_html.py b/tests/test_build_html.py
index 6fc024d35..8265c8471 100644
--- a/tests/test_build_html.py
+++ b/tests/test_build_html.py
@@ -15,7 +15,6 @@ from itertools import cycle, chain
from six import PY3
-from sphinx import __display_version__
from sphinx.util.inventory import InventoryFile
from sphinx.testing.util import remove_unicode_literals, strip_escseq
import xml.etree.cElementTree as ElementTree
@@ -182,8 +181,8 @@ def test_html_warnings(app, warning):
r'-| |-'),
],
'autodoc.html': [
- (".//dt[@id='test_autodoc.Class']", ''),
- (".//dt[@id='test_autodoc.function']/em", r'\*\*kwds'),
+ (".//dt[@id='autodoc_target.Class']", ''),
+ (".//dt[@id='autodoc_target.function']/em", r'\*\*kwds'),
(".//dd/p", r'Return spam\.'),
],
'extapi.html': [
@@ -211,7 +210,7 @@ def test_html_warnings(app, warning):
(".//li/strong", r'^command\\n$'),
(".//li/strong", r'^program\\n$'),
(".//li/em", r'^dfn\\n$'),
- (".//li/code/span[@class='pre']", r'^kbd\\n$'),
+ (".//li/kbd", r'^kbd\\n$'),
(".//li/span", u'File \N{TRIANGULAR BULLET} Close'),
(".//li/code/span[@class='pre']", '^a/$'),
(".//li/code/em/span[@class='pre']", '^varpart$'),
@@ -1226,3 +1225,21 @@ def test_html_remote_images(app, status, warning):
assert ('<img alt="https://www.python.org/static/img/python-logo.png" '
'src="https://www.python.org/static/img/python-logo.png" />' in result)
assert not (app.outdir / 'python-logo.png').exists()
+
+
+@pytest.mark.sphinx('html', testroot='basic')
+def test_html_sidebar(app, status, warning):
+ app.builder.build_all()
+ result = (app.outdir / 'index.html').text(encoding='utf8')
+ assert '<h3><a href="#">Table Of Contents</a></h3>' in result
+ assert '<h3>Related Topics</h3>' in result
+ assert '<h3>This Page</h3>' in result
+ assert '<h3>Quick search</h3>' in result
+
+ app.config.html_sidebars = {'**': []}
+ app.builder.build_all()
+ result = (app.outdir / 'index.html').text(encoding='utf8')
+ assert '<h3><a href="#">Table Of Contents</a></h3>' not in result
+ assert '<h3>Related Topics</h3>' not in result
+ assert '<h3>This Page</h3>' not in result
+ assert '<h3>Quick search</h3>' not in result
diff --git a/tests/test_build_html5.py b/tests/test_build_html5.py
index 217050ec7..4ac70be51 100644
--- a/tests/test_build_html5.py
+++ b/tests/test_build_html5.py
@@ -21,7 +21,6 @@ from html5lib import getTreeBuilder, HTMLParser
from sphinx.util.docutils import is_html5_writer_available
-from sphinx.testing.util import skip_unless
from test_build_html import flat_dict, tail_check, check_xpath
TREE_BUILDER = getTreeBuilder('etree', implementation=ElementTree)
@@ -31,7 +30,7 @@ HTML_PARSER = HTMLParser(TREE_BUILDER, namespaceHTMLElements=False)
etree_cache = {}
-@skip_unless(is_html5_writer_available())
+@pytest.mark.skipif(not is_html5_writer_available(), reason='HTML5 writer is not available')
@pytest.fixture(scope='module')
def cached_etree_parse():
def parse(fname):
@@ -46,7 +45,7 @@ def cached_etree_parse():
etree_cache.clear()
-@skip_unless(is_html5_writer_available())
+@pytest.mark.skipif(not is_html5_writer_available(), reason='HTML5 writer is not available')
@pytest.mark.parametrize("fname,expect", flat_dict({
'images.html': [
(".//img[@src='_images/img.png']", ''),
@@ -91,8 +90,8 @@ def cached_etree_parse():
r'-| |-'),
],
'autodoc.html': [
- (".//dt[@id='test_autodoc.Class']", ''),
- (".//dt[@id='test_autodoc.function']/em", r'\*\*kwds'),
+ (".//dt[@id='autodoc_target.Class']", ''),
+ (".//dt[@id='autodoc_target.function']/em", r'\*\*kwds'),
(".//dd/p", r'Return spam\.'),
],
'extapi.html': [
@@ -120,7 +119,7 @@ def cached_etree_parse():
(".//li/p/strong", r'^command\\n$'),
(".//li/p/strong", r'^program\\n$'),
(".//li/p/em", r'^dfn\\n$'),
- (".//li/p/code/span[@class='pre']", r'^kbd\\n$'),
+ (".//li/p/kbd", r'^kbd\\n$'),
(".//li/p/span", u'File \N{TRIANGULAR BULLET} Close'),
(".//li/p/code/span[@class='pre']", '^a/$'),
(".//li/p/code/em/span[@class='pre']", '^varpart$'),
diff --git a/tests/test_build_latex.py b/tests/test_build_latex.py
index 0c95a0cdb..ab91d7a48 100644
--- a/tests/test_build_latex.py
+++ b/tests/test_build_latex.py
@@ -24,7 +24,7 @@ from sphinx.util.osutil import cd, ensuredir
from sphinx.util import docutils
from sphinx.writers.latex import LaTeXTranslator
-from sphinx.testing.util import SkipTest, remove_unicode_literals, strip_escseq, skip_if
+from sphinx.testing.util import remove_unicode_literals, strip_escseq
from test_build_html import ENV_WARNINGS
@@ -77,7 +77,7 @@ def compile_latex_document(app):
'SphinxTests.tex'],
stdout=PIPE, stderr=PIPE)
except OSError: # most likely the latex executable was not found
- raise SkipTest
+ raise pytest.skip.Exception
else:
stdout, stderr = p.communicate()
if p.returncode != 0:
@@ -90,7 +90,7 @@ def compile_latex_document(app):
def skip_if_requested(testfunc):
if 'SKIP_LATEX_BUILD' in os.environ:
msg = 'Skip LaTeX builds because SKIP_LATEX_BUILD is set'
- return skip_if(True, msg)(testfunc)
+ return pytest.mark.skipif(True, reason=msg)(testfunc)
else:
return testfunc
@@ -98,7 +98,7 @@ def skip_if_requested(testfunc):
def skip_if_stylefiles_notfound(testfunc):
if kpsetest(*STYLEFILES) is False:
msg = 'not running latex, the required styles do not seem to be installed'
- return skip_if(True, msg)(testfunc)
+ return pytest.mark.skipif(True, reason=msg)(testfunc)
else:
return testfunc
@@ -335,6 +335,56 @@ def test_numref_with_language_ja(app, status, warning):
'\\nameref{\\detokenize{foo:foo}}}') in result
+@pytest.mark.sphinx('latex', testroot='latex-numfig')
+def test_latex_obey_numfig_is_false(app, status, warning):
+ app.builder.build_all()
+
+ result = (app.outdir / 'SphinxManual.tex').text(encoding='utf8')
+ assert '\\usepackage{sphinx}' in result
+
+ result = (app.outdir / 'SphinxHowTo.tex').text(encoding='utf8')
+ assert '\\usepackage{sphinx}' in result
+
+
+@pytest.mark.sphinx(
+ 'latex', testroot='latex-numfig',
+ confoverrides={'numfig': True, 'numfig_secnum_depth': 0})
+def test_latex_obey_numfig_secnum_depth_is_zero(app, status, warning):
+ app.builder.build_all()
+
+ result = (app.outdir / 'SphinxManual.tex').text(encoding='utf8')
+ assert '\\usepackage[,nonumfigreset,mathnumfig]{sphinx}' in result
+
+ result = (app.outdir / 'SphinxHowTo.tex').text(encoding='utf8')
+ assert '\\usepackage[,nonumfigreset,mathnumfig]{sphinx}' in result
+
+
+@pytest.mark.sphinx(
+ 'latex', testroot='latex-numfig',
+ confoverrides={'numfig': True, 'numfig_secnum_depth': 2})
+def test_latex_obey_numfig_secnum_depth_is_two(app, status, warning):
+ app.builder.build_all()
+
+ result = (app.outdir / 'SphinxManual.tex').text(encoding='utf8')
+ assert '\\usepackage[,numfigreset=2,mathnumfig]{sphinx}' in result
+
+ result = (app.outdir / 'SphinxHowTo.tex').text(encoding='utf8')
+ assert '\\usepackage[,numfigreset=3,mathnumfig]{sphinx}' in result
+
+
+@pytest.mark.sphinx(
+ 'latex', testroot='latex-numfig',
+ confoverrides={'numfig': True, 'math_numfig': False})
+def test_latex_obey_numfig_but_math_numfig_false(app, status, warning):
+ app.builder.build_all()
+
+ result = (app.outdir / 'SphinxManual.tex').text(encoding='utf8')
+ assert '\\usepackage[,numfigreset=1]{sphinx}' in result
+
+ result = (app.outdir / 'SphinxHowTo.tex').text(encoding='utf8')
+ assert '\\usepackage[,numfigreset=2]{sphinx}' in result
+
+
@pytest.mark.sphinx('latex')
def test_latex_add_latex_package(app, status, warning):
app.add_latex_package('foo')
@@ -542,8 +592,7 @@ def test_reference_in_caption_and_codeblock_in_footnote(app, status, warning):
assert ('&\nThis is one more footnote with some code in it %\n'
'\\begin{footnote}[10]\\sphinxAtStartFootnote\n'
'Third footnote in longtable\n') in result
- assert ('\\end{sphinxVerbatim}\n\\let\\sphinxVerbatimTitle\\empty\n'
- '\\let\\sphinxLiteralBlockLabel\\empty\n%\n\\end{footnote}.\n') in result
+ assert ('\\end{sphinxVerbatim}\n%\n\\end{footnote}.\n') in result
assert '\\begin{sphinxVerbatim}[commandchars=\\\\\\{\\}]' in result
@@ -724,6 +773,7 @@ def test_toctree_maxdepth_manual(app, status, warning):
assert '\\setcounter{secnumdepth}' not in result
assert '\\chapter{Foo}' in result
+
@pytest.mark.sphinx(
'latex', testroot='toctree-maxdepth',
confoverrides={'latex_documents': [
@@ -740,6 +790,7 @@ def test_toctree_maxdepth_howto(app, status, warning):
assert '\\setcounter{secnumdepth}' not in result
assert '\\section{Foo}' in result
+
@pytest.mark.sphinx(
'latex', testroot='toctree-maxdepth',
confoverrides={'master_doc': 'foo'})
@@ -753,6 +804,7 @@ def test_toctree_not_found(app, status, warning):
assert '\\setcounter{secnumdepth}' not in result
assert '\\chapter{Foo A}' in result
+
@pytest.mark.sphinx(
'latex', testroot='toctree-maxdepth',
confoverrides={'master_doc': 'bar'})
@@ -809,8 +861,8 @@ def test_latex_toplevel_sectioning_is_part(app, status, warning):
'latex', testroot='toctree-maxdepth',
confoverrides={'latex_toplevel_sectioning': 'part',
'latex_documents': [
- ('index', 'Python.tex', 'Sphinx Tests Documentation',
- 'Georg Brandl', 'howto')
+ ('index', 'Python.tex', 'Sphinx Tests Documentation',
+ 'Georg Brandl', 'howto')
]})
def test_latex_toplevel_sectioning_is_part_with_howto(app, status, warning):
app.builder.build_all()
@@ -839,8 +891,8 @@ def test_latex_toplevel_sectioning_is_chapter(app, status, warning):
'latex', testroot='toctree-maxdepth',
confoverrides={'latex_toplevel_sectioning': 'chapter',
'latex_documents': [
- ('index', 'Python.tex', 'Sphinx Tests Documentation',
- 'Georg Brandl', 'howto')
+ ('index', 'Python.tex', 'Sphinx Tests Documentation',
+ 'Georg Brandl', 'howto')
]})
def test_latex_toplevel_sectioning_is_chapter_with_howto(app, status, warning):
app.builder.build_all()
diff --git a/tests/test_build_qthelp.py b/tests/test_build_qthelp.py
new file mode 100644
index 000000000..3e4815fbe
--- /dev/null
+++ b/tests/test_build_qthelp.py
@@ -0,0 +1,28 @@
+# -*- coding: utf-8 -*-
+"""
+ test_build_qthelp
+ ~~~~~~~~~~~~~~~~~
+
+ Test the Qt Help builder and check its output. We don't need to
+ test the HTML itself; that's already handled by
+ :file:`test_build_html.py`.
+
+ :copyright: Copyright 2007-2017 by the Sphinx team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import pytest
+
+
+@pytest.mark.sphinx('qthelp', testroot='basic')
+def test_qthelp_namespace(app, status, warning):
+ # default namespace
+ app.builder.build_all()
+ qhp = (app.outdir / 'Python.qhp').text()
+ assert '<namespace>org.sphinx.python</namespace>' in qhp
+
+ # give a namespace
+ app.config.qthelp_namespace = 'org.sphinx-doc.sphinx'
+ app.builder.build_all()
+ qhp = (app.outdir / 'Python.qhp').text()
+ assert '<namespace>org.sphinxdoc.sphinx</namespace>' in qhp
diff --git a/tests/test_build_texinfo.py b/tests/test_build_texinfo.py
index 50f42542d..114f194fe 100644
--- a/tests/test_build_texinfo.py
+++ b/tests/test_build_texinfo.py
@@ -19,7 +19,7 @@ import pytest
from sphinx.writers.texinfo import TexinfoTranslator
-from sphinx.testing.util import SkipTest, remove_unicode_literals, strip_escseq
+from sphinx.testing.util import remove_unicode_literals, strip_escseq
from test_build_html import ENV_WARNINGS
@@ -58,7 +58,7 @@ def test_texinfo(app, status, warning):
p = Popen(['makeinfo', '--no-split', 'SphinxTests.texi'],
stdout=PIPE, stderr=PIPE)
except OSError:
- raise SkipTest # most likely makeinfo was not found
+ raise pytest.skip.Exception # most likely makeinfo was not found
else:
stdout, stderr = p.communicate()
retcode = p.returncode
diff --git a/tests/test_build_text.py b/tests/test_build_text.py
index 382e62b34..9bfbe1206 100644
--- a/tests/test_build_text.py
+++ b/tests/test_build_text.py
@@ -8,12 +8,11 @@
:copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
+import pytest
from docutils.utils import column_width
from sphinx.writers.text import MAXWIDTH
-from sphinx.testing.util import with_app
-
def with_text_app(*args, **kw):
default_kw = {
@@ -21,7 +20,7 @@ def with_text_app(*args, **kw):
'testroot': 'build-text',
}
default_kw.update(kw)
- return with_app(*args, **default_kw)
+ return pytest.mark.sphinx(*args, **default_kw)
@with_text_app()
diff --git a/tests/test_config.py b/tests/test_config.py
index 965f46c3e..3f38c7ab8 100644
--- a/tests/test_config.py
+++ b/tests/test_config.py
@@ -240,3 +240,15 @@ def test_check_enum(app, status, warning):
def test_check_enum_failed(app, status, warning):
assert "The config value `value17` has to be a one of ('default', 'one', 'two'), " \
"but `invalid` is given." in warning.getvalue()
+
+
+@pytest.mark.sphinx(testroot='config', confoverrides={'value17': ['one', 'two']})
+def test_check_enum_for_list(app, status, warning):
+ assert "The config value `value17` has to be a one of ('default', 'one', 'two'), " \
+ not in warning.getvalue()
+
+
+@pytest.mark.sphinx(testroot='config', confoverrides={'value17': ['one', 'two', 'invalid']})
+def test_check_enum_for_list_failed(app, status, warning):
+ assert "The config value `value17` has to be a one of ('default', 'one', 'two'), " \
+ "but `['one', 'two', 'invalid']` is given." in warning.getvalue()
diff --git a/tests/test_directive_only.py b/tests/test_directive_only.py
index 79544975b..010eae384 100644
--- a/tests/test_directive_only.py
+++ b/tests/test_directive_only.py
@@ -12,7 +12,6 @@
import re
from docutils import nodes
-from sphinx.util.nodes import process_only_nodes
import pytest
diff --git a/tests/test_docutilsconf.py b/tests/test_docutilsconf.py
index d2b56d30d..91bf8fc95 100644
--- a/tests/test_docutilsconf.py
+++ b/tests/test_docutilsconf.py
@@ -14,7 +14,6 @@ import sys
import pytest
from sphinx.testing.path import path
-from sphinx.testing.util import SkipTest
def regex_count(expr, result):
@@ -73,7 +72,7 @@ def test_texinfo(app, status, warning):
@pytest.mark.sphinx('html', testroot='docutilsconf',
docutilsconf='[general]\nsource_link=true\n')
-@pytest.mark.skip(sys.platform == "win32" and \
+@pytest.mark.skip(sys.platform == "win32" and
not (sys.version_info.major >= 3 and sys.version_info.minor >= 2),
reason="Python < 3.2 on Win32 doesn't handle non-ASCII paths right")
def test_docutils_source_link_with_nonascii_file(app, status, warning):
@@ -83,7 +82,7 @@ def test_docutils_source_link_with_nonascii_file(app, status, warning):
(srcdir / (mb_name + '.txt')).write_text('')
except UnicodeEncodeError:
from sphinx.testing.path import FILESYSTEMENCODING
- raise SkipTest(
+ raise pytest.skip.Exception(
'nonascii filename not supported on this filesystem encoding: '
'%s', FILESYSTEMENCODING)
diff --git a/tests/test_domain_cpp.py b/tests/test_domain_cpp.py
index 7437b4c05..3561e76ce 100644
--- a/tests/test_domain_cpp.py
+++ b/tests/test_domain_cpp.py
@@ -16,30 +16,25 @@ import pytest
from sphinx import addnodes
from sphinx.domains.cpp import DefinitionParser, DefinitionError, NoOldIdError
-from sphinx.domains.cpp import Symbol
+from sphinx.domains.cpp import Symbol, _max_id, _id_prefix
import sphinx.domains.cpp as cppDomain
-ids = []
-
def parse(name, string):
class Config(object):
cpp_id_attributes = ["id_attr"]
cpp_paren_attributes = ["paren_attr"]
parser = DefinitionParser(string, None, Config())
+ parser.allowFallbackExpressionParsing = False
ast = parser.parse_declaration(name)
- if not parser.eof:
- print("Parsing stopped at", parser.pos)
- print(string)
- print('-' * parser.pos + '^')
- raise DefinitionError("")
+ parser.assert_end()
# The scopedness would usually have been set by CPPEnumObject
if name == "enum":
ast.scoped = None # simulate unscoped enum
return ast
-def check(name, input, idv1output=None, idv2output=None, output=None):
+def check(name, input, idDict, output=None):
# first a simple check of the AST
if output is None:
output = input
@@ -58,37 +53,43 @@ def check(name, input, idv1output=None, idv2output=None, output=None):
parentNode += signode
ast.describe_signature(signode, 'lastIsName', symbol, options={})
- if idv2output:
- idv2output = "_CPPv2" + idv2output
- try:
- idv1 = ast.get_id_v1()
- assert idv1 is not None
- except NoOldIdError:
- idv1 = None
- try:
- idv2 = ast.get_id_v2()
- assert idv2 is not None
- except NoOldIdError:
- idv2 = None
- if idv1 != idv1output or idv2 != idv2output:
+ idExpected = [None]
+ for i in range(1, _max_id + 1):
+ if i in idDict:
+ idExpected.append(idDict[i])
+ else:
+ idExpected.append(idExpected[i - 1])
+ idActual = [None]
+ for i in range(1, _max_id + 1):
+ try:
+ id = ast.get_id(version=i)
+ assert id is not None
+ idActual.append(id[len(_id_prefix[i]):])
+ except NoOldIdError:
+ idActual.append(None)
+
+ res = [True]
+ for i in range(1, _max_id + 1):
+ res.append(idExpected[i] == idActual[i])
+
+ if not all(res):
print("input: %s" % text_type(input).rjust(20))
- print(" %s %s" % ("Id v1".rjust(20), "Id v2".rjust(20)))
- print("result: %s %s" % (str(idv1).rjust(20), str(idv2).rjust(20)))
- print("expected: %s %s" % (str(idv1output).rjust(20),
- str(idv2output).rjust(20)))
+ for i in range(1, _max_id + 1):
+ if res[i]:
+ continue
+ print("Error in id version %d." % i)
+ print("result: %s" % str(idActual[i]))
+ print("expected: %s" % str(idExpected[i]))
print(rootSymbol.dump(0))
raise DefinitionError("")
- ids.append(ast.get_id_v2())
- # print ".. %s:: %s" % (name, input)
def test_fundamental_types():
# see http://en.cppreference.com/w/cpp/language/types
for t, id_v2 in cppDomain._id_fundamental_v2.items():
- if t == "decltype(auto)":
- continue
-
def makeIdV1():
+ if t == 'decltype(auto)':
+ return None
id = t.replace(" ", "-").replace("long", "l").replace("int", "i")
id = id.replace("bool", "b").replace("char", "c")
id = id.replace("wc_t", "wchar_t").replace("c16_t", "char16_t")
@@ -100,55 +101,163 @@ def test_fundamental_types():
if t == "std::nullptr_t":
id = "NSt9nullptr_tE"
return "1f%s" % id
- check("function", "void f(%s arg)" % t, makeIdV1(), makeIdV2())
+ check("function", "void f(%s arg)" % t, {1: makeIdV1(), 2: makeIdV2()})
+
+
+def test_expressions():
+ def exprCheck(expr, id):
+ ids = 'IE1CIA%s_1aE'
+ check('class', 'template<> C<a[%s]>' % expr, {2: ids % expr, 3: ids % id})
+ # primary
+ exprCheck('nullptr', 'LDnE')
+ exprCheck('true', 'L1E')
+ exprCheck('false', 'L0E')
+ ints = ['5', '0', '075', '0xF', '0XF', '0b1', '0B1']
+ unsignedSuffix = ['', 'u', 'U']
+ longSuffix = ['', 'l', 'L', 'll', 'LL']
+ for i in ints:
+ for u in unsignedSuffix:
+ for l in longSuffix:
+ expr = i + u + l
+ exprCheck(expr, 'L' + expr + 'E')
+ expr = i + l + u
+ exprCheck(expr, 'L' + expr + 'E')
+ for suffix in ['', 'f', 'F', 'l', 'L']:
+ expr = '5.0' + suffix
+ exprCheck(expr, 'L' + expr + 'E')
+ exprCheck('"abc\\"cba"', 'LA8_KcE') # string
+ # TODO: test the rest
+ exprCheck('(... + Ns)', '(... + Ns)')
+ exprCheck('(5)', 'L5E')
+ exprCheck('C', '1C')
+ # postfix
+ exprCheck('A(2)', 'cl1AL2EE')
+ exprCheck('A[2]', 'ix1AL2E')
+ exprCheck('a.b.c', 'dtdt1a1b1c')
+ exprCheck('a->b->c', 'ptpt1a1b1c')
+ exprCheck('i++', 'pp1i')
+ exprCheck('i--', 'mm1i')
+ # TODO, those with prefixes
+ # unary
+ exprCheck('++5', 'pp_L5E')
+ exprCheck('--5', 'mm_L5E')
+ exprCheck('*5', 'deL5E')
+ exprCheck('&5', 'adL5E')
+ exprCheck('+5', 'psL5E')
+ exprCheck('-5', 'ngL5E')
+ exprCheck('!5', 'ntL5E')
+ exprCheck('~5', 'coL5E')
+ exprCheck('sizeof...(a)', 'sZ1a')
+ exprCheck('sizeof(T)', 'st1T')
+ exprCheck('sizeof -42', 'szngL42E')
+ exprCheck('alignof(T)', 'at1T')
+ exprCheck('noexcept(-42)', 'nxngL42E')
+ # cast
+ exprCheck('(int)2', 'cviL2E')
+ # binary op
+ exprCheck('5 || 42', 'ooL5EL42E')
+ exprCheck('5 && 42', 'aaL5EL42E')
+ exprCheck('5 | 42', 'orL5EL42E')
+ exprCheck('5 ^ 42', 'eoL5EL42E')
+ exprCheck('5 & 42', 'anL5EL42E')
+ # ['==', '!=']
+ exprCheck('5 == 42', 'eqL5EL42E')
+ exprCheck('5 != 42', 'neL5EL42E')
+ # ['<=', '>=', '<', '>']
+ exprCheck('5 <= 42', 'leL5EL42E')
+ exprCheck('5 >= 42', 'geL5EL42E')
+ exprCheck('5 < 42', 'ltL5EL42E')
+ exprCheck('5 > 42', 'gtL5EL42E')
+ # ['<<', '>>']
+ exprCheck('5 << 42', 'lsL5EL42E')
+ exprCheck('5 >> 42', 'rsL5EL42E')
+ # ['+', '-']
+ exprCheck('5 + 42', 'plL5EL42E')
+ exprCheck('5 - 42', 'miL5EL42E')
+ # ['*', '/', '%']
+ exprCheck('5 * 42', 'mlL5EL42E')
+ exprCheck('5 / 42', 'dvL5EL42E')
+ exprCheck('5 % 42', 'rmL5EL42E')
+ # ['.*', '->*']
+ exprCheck('5 .* 42', 'dsL5EL42E')
+ exprCheck('5 ->* 42', 'pmL5EL42E')
+ # conditional
+ # TODO
+ # assignment
+ exprCheck('a = 5', 'aS1aL5E')
+ exprCheck('a *= 5', 'mL1aL5E')
+ exprCheck('a /= 5', 'dV1aL5E')
+ exprCheck('a %= 5', 'rM1aL5E')
+ exprCheck('a += 5', 'pL1aL5E')
+ exprCheck('a -= 5', 'mI1aL5E')
+ exprCheck('a >>= 5', 'rS1aL5E')
+ exprCheck('a <<= 5', 'lS1aL5E')
+ exprCheck('a &= 5', 'aN1aL5E')
+ exprCheck('a ^= 5', 'eO1aL5E')
+ exprCheck('a |= 5', 'oR1aL5E')
+
+ # Additional tests
+ # a < expression that starts with something that could be a template
+ exprCheck('A < 42', 'lt1AL42E')
+ check('function', 'template<> void f(A<B, 2> &v)',
+ {2: "IE1fR1AI1BX2EE", 3: "IE1fR1AI1BXL2EEE"})
+ exprCheck('A<1>::value', 'N1AIXL1EEE5valueE')
+ check('class', "template<int T = 42> A", {2: "I_iE1A"})
+ check('enumerator', 'A = std::numeric_limits<unsigned long>::max()', {2: "1A"})
+
+ exprCheck('operator()()', 'clclE')
+ exprCheck('operator()<int>()', 'clclIiEE')
def test_type_definitions():
- check("type", "public bool b", "b", "1b", "bool b")
- check("type", "bool A::b", "A::b", "N1A1bE")
- check("type", "bool *b", "b", "1b")
- check("type", "bool *const b", "b", "1b")
- check("type", "bool *volatile const b", "b", "1b")
- check("type", "bool *volatile const b", "b", "1b")
- check("type", "bool *volatile const *b", "b", "1b")
- check("type", "bool &b", "b", "1b")
- check("type", "bool b[]", "b", "1b")
- check("type", "std::pair<int, int> coord", "coord", "5coord")
- check("type", "long long int foo", "foo", "3foo")
+ check("type", "public bool b", {1: "b", 2: "1b"}, "bool b")
+ check("type", "bool A::b", {1: "A::b", 2: "N1A1bE"})
+ check("type", "bool *b", {1: "b", 2: "1b"})
+ check("type", "bool *const b", {1: "b", 2: "1b"})
+ check("type", "bool *volatile const b", {1: "b", 2: "1b"})
+ check("type", "bool *volatile const b", {1: "b", 2: "1b"})
+ check("type", "bool *volatile const *b", {1: "b", 2: "1b"})
+ check("type", "bool &b", {1: "b", 2: "1b"})
+ check("type", "bool b[]", {1: "b", 2: "1b"})
+ check("type", "std::pair<int, int> coord", {1: "coord", 2: "5coord"})
+ check("type", "long long int foo", {1: "foo", 2: "3foo"})
check("type", 'std::vector<std::pair<std::string, long long>> module::blah',
- "module::blah", "N6module4blahE")
- check("type", "std::function<void()> F", "F", "1F")
- check("type", "std::function<R(A1, A2)> F", "F", "1F")
- check("type", "std::function<R(A1, A2, A3)> F", "F", "1F")
- check("type", "std::function<R(A1, A2, A3, As...)> F", "F", "1F")
+ {1: "module::blah", 2: "N6module4blahE"})
+ check("type", "std::function<void()> F", {1: "F", 2: "1F"})
+ check("type", "std::function<R(A1, A2)> F", {1: "F", 2: "1F"})
+ check("type", "std::function<R(A1, A2, A3)> F", {1: "F", 2: "1F"})
+ check("type", "std::function<R(A1, A2, A3, As...)> F", {1: "F", 2: "1F"})
check("type", "MyContainer::const_iterator",
- "MyContainer::const_iterator", "N11MyContainer14const_iteratorE")
+ {1: "MyContainer::const_iterator", 2: "N11MyContainer14const_iteratorE"})
check("type",
"public MyContainer::const_iterator",
- "MyContainer::const_iterator", "N11MyContainer14const_iteratorE",
+ {1: "MyContainer::const_iterator", 2: "N11MyContainer14const_iteratorE"},
output="MyContainer::const_iterator")
# test decl specs on right
- check("type", "bool const b", "b", "1b")
+ check("type", "bool const b", {1: "b", 2: "1b"})
# test name in global scope
- check("type", "bool ::B::b", "B::b", "N1B1bE")
+ check("type", "bool ::B::b", {1: "B::b", 2: "N1B1bE"})
- check('type', 'A = B', None, '1A')
+ check('type', 'A = B', {2: '1A'})
+ check('type', 'A = decltype(b)', {2: '1A'})
# from breathe#267 (named function parameters for function pointers
check('type', 'void (*gpio_callback_t)(struct device *port, uint32_t pin)',
- 'gpio_callback_t', '15gpio_callback_t')
- check('type', 'void (*f)(std::function<void(int i)> g)', 'f', '1f')
+ {1: 'gpio_callback_t', 2: '15gpio_callback_t'})
+ check('type', 'void (*f)(std::function<void(int i)> g)', {1: 'f', 2: '1f'})
+
+ check('type', 'T = A::template B<int>::template C<double>', {2: '1T'})
+
+ check('type', 'T = Q<A::operator()>', {2: '1T'})
+ check('type', 'T = Q<A::operator()<int>>', {2: '1T'})
+ check('type', 'T = Q<A::operator bool>', {2: '1T'})
def test_concept_definitions():
check('concept', 'template<typename Param> A::B::Concept',
- None, 'I0EN1A1B7ConceptE')
+ {2: 'I0EN1A1B7ConceptE'})
check('concept', 'template<typename A, typename B, typename ...C> Foo',
- None, 'I00DpE3Foo')
- check('concept', 'template<typename Param> A::B::Concept()',
- None, 'I0EN1A1B7ConceptE')
- check('concept', 'template<typename A, typename B, typename ...C> Foo()',
- None, 'I00DpE3Foo')
+ {2: 'I00DpE3Foo'})
with pytest.raises(DefinitionError):
parse('concept', 'Foo')
with pytest.raises(DefinitionError):
@@ -157,256 +266,270 @@ def test_concept_definitions():
def test_member_definitions():
check('member', ' const std::string & name = 42',
- "name__ssCR", "4name", output='const std::string &name = 42')
- check('member', ' const std::string & name', "name__ssCR", "4name",
+ {1: "name__ssCR", 2: "4name"}, output='const std::string &name = 42')
+ check('member', ' const std::string & name', {1: "name__ssCR", 2: "4name"},
output='const std::string &name')
check('member', ' const std::string & name [ n ]',
- "name__ssCRA", "4name", output='const std::string &name[n]')
+ {1: "name__ssCRA", 2: "4name"}, output='const std::string &name[n]')
check('member', 'const std::vector< unsigned int, long> &name',
- "name__std::vector:unsigned-i.l:CR",
- "4name", output='const std::vector<unsigned int, long> &name')
- check('member', 'module::myclass foo[n]', "foo__module::myclassA", "3foo")
- check('member', 'int *const p', 'p__iPC', '1p')
- check('member', 'extern int myInt', 'myInt__i', '5myInt')
- check('member', 'thread_local int myInt', 'myInt__i', '5myInt')
- check('member', 'extern thread_local int myInt', 'myInt__i', '5myInt')
- check('member', 'thread_local extern int myInt', 'myInt__i', '5myInt',
+ {1: "name__std::vector:unsigned-i.l:CR", 2: "4name"},
+ output='const std::vector<unsigned int, long> &name')
+ check('member', 'module::myclass foo[n]', {1: "foo__module::myclassA", 2: "3foo"})
+ check('member', 'int *const p', {1: 'p__iPC', 2: '1p'})
+ check('member', 'extern int myInt', {1: 'myInt__i', 2: '5myInt'})
+ check('member', 'thread_local int myInt', {1: 'myInt__i', 2: '5myInt'})
+ check('member', 'extern thread_local int myInt', {1: 'myInt__i', 2: '5myInt'})
+ check('member', 'thread_local extern int myInt', {1: 'myInt__i', 2: '5myInt'},
'extern thread_local int myInt')
def test_function_definitions():
- check('function', 'operator bool() const', "castto-b-operatorC", "NKcvbEv")
+ check('function', 'operator bool() const', {1: "castto-b-operatorC", 2: "NKcvbEv"})
check('function', 'A::operator bool() const',
- "A::castto-b-operatorC", "NK1AcvbEv")
+ {1: "A::castto-b-operatorC", 2: "NK1AcvbEv"})
check('function', 'A::operator bool() volatile const &',
- "A::castto-b-operatorVCR", "NVKR1AcvbEv")
+ {1: "A::castto-b-operatorVCR", 2: "NVKR1AcvbEv"})
check('function', 'A::operator bool() volatile const &&',
- "A::castto-b-operatorVCO", "NVKO1AcvbEv")
+ {1: "A::castto-b-operatorVCO", 2: "NVKO1AcvbEv"})
check('function', 'bool namespaced::theclass::method(arg1, arg2)',
- "namespaced::theclass::method__arg1.arg2",
- "N10namespaced8theclass6methodE4arg14arg2")
+ {1: "namespaced::theclass::method__arg1.arg2",
+ 2: "N10namespaced8theclass6methodE4arg14arg2"})
x = 'std::vector<std::pair<std::string, int>> &module::test(register int ' \
'foo, bar, std::string baz = "foobar, blah, bleh") const = 0'
- check('function', x, "module::test__i.bar.ssC",
- "NK6module4testEi3barNSt6stringE")
+ check('function', x, {1: "module::test__i.bar.ssC",
+ 2: "NK6module4testEi3barNSt6stringE"})
check('function', 'void f(std::pair<A, B>)',
- "f__std::pair:A.B:", "1fNSt4pairI1A1BEE")
+ {1: "f__std::pair:A.B:", 2: "1fNSt4pairI1A1BEE"})
check('function', 'explicit module::myclass::foo::foo()',
- "module::myclass::foo::foo", "N6module7myclass3foo3fooEv")
+ {1: "module::myclass::foo::foo", 2: "N6module7myclass3foo3fooEv"})
check('function', 'module::myclass::foo::~foo()',
- "module::myclass::foo::~foo", "N6module7myclass3fooD0Ev")
+ {1: "module::myclass::foo::~foo", 2: "N6module7myclass3fooD0Ev"})
check('function', 'int printf(const char *fmt, ...)',
- "printf__cCP.z", "6printfPKcz")
+ {1: "printf__cCP.z", 2: "6printfPKcz"})
check('function', 'int foo(const unsigned int j)',
- "foo__unsigned-iC", "3fooKj")
+ {1: "foo__unsigned-iC", 2: "3fooKj"})
check('function', 'int foo(const int *const ptr)',
- "foo__iCPC", "3fooPCKi")
+ {1: "foo__iCPC", 2: "3fooPCKi"})
check('function', 'module::myclass::operator std::vector<std::string>()',
- "module::myclass::castto-std::vector:ss:-operator",
- "N6module7myclasscvNSt6vectorINSt6stringEEEEv")
+ {1: "module::myclass::castto-std::vector:ss:-operator",
+ 2: "N6module7myclasscvNSt6vectorINSt6stringEEEEv"})
check('function',
'void operator()(const boost::array<VertexID, 2> &v) const',
- "call-operator__boost::array:VertexID.2:CRC",
- "NKclERKN5boost5arrayI8VertexIDX2EEE")
+ {1: "call-operator__boost::array:VertexID.2:CRC",
+ 2: "NKclERKN5boost5arrayI8VertexIDX2EEE",
+ 3: "NKclERKN5boost5arrayI8VertexIDXL2EEEE"})
check('function',
'void operator()(const boost::array<VertexID, 2, "foo, bar"> &v) const',
- 'call-operator__boost::array:VertexID.2."foo,--bar":CRC',
- 'NKclERKN5boost5arrayI8VertexIDX2EX"foo, bar"EEE')
+ {1: 'call-operator__boost::array:VertexID.2."foo,--bar":CRC',
+ 2: 'NKclERKN5boost5arrayI8VertexIDX2EX"foo, bar"EEE',
+ 3: 'NKclERKN5boost5arrayI8VertexIDXL2EEXLA9_KcEEEE'})
check('function', 'MyClass::MyClass(MyClass::MyClass&&)',
- "MyClass::MyClass__MyClass::MyClassRR",
- "N7MyClass7MyClassERRN7MyClass7MyClassE")
- check('function', 'constexpr int get_value()', "get_valueCE", "9get_valuev")
+ {1: "MyClass::MyClass__MyClass::MyClassRR",
+ 2: "N7MyClass7MyClassERRN7MyClass7MyClassE"})
+ check('function', 'constexpr int get_value()', {1: "get_valueCE", 2: "9get_valuev"})
check('function', 'static constexpr int get_value()',
- "get_valueCE", "9get_valuev")
+ {1: "get_valueCE", 2: "9get_valuev"})
check('function', 'int get_value() const noexcept',
- "get_valueC", "NK9get_valueEv")
+ {1: "get_valueC", 2: "NK9get_valueEv"})
check('function', 'int get_value() const noexcept = delete',
- "get_valueC", "NK9get_valueEv")
+ {1: "get_valueC", 2: "NK9get_valueEv"})
check('function', 'int get_value() volatile const',
- "get_valueVC", "NVK9get_valueEv")
+ {1: "get_valueVC", 2: "NVK9get_valueEv"})
check('function', 'MyClass::MyClass(MyClass::MyClass&&) = default',
- "MyClass::MyClass__MyClass::MyClassRR",
- "N7MyClass7MyClassERRN7MyClass7MyClassE")
+ {1: "MyClass::MyClass__MyClass::MyClassRR",
+ 2: "N7MyClass7MyClassERRN7MyClass7MyClassE"})
check('function', 'virtual MyClass::a_virtual_function() const override',
- "MyClass::a_virtual_functionC", "NK7MyClass18a_virtual_functionEv")
- check('function', 'A B() override', "B", "1Bv")
- check('function', 'A B() final', "B", "1Bv")
- check('function', 'A B() final override', "B", "1Bv")
- check('function', 'A B() override final', "B", "1Bv",
+ {1: "MyClass::a_virtual_functionC", 2: "NK7MyClass18a_virtual_functionEv"})
+ check('function', 'A B() override', {1: "B", 2: "1Bv"})
+ check('function', 'A B() final', {1: "B", 2: "1Bv"})
+ check('function', 'A B() final override', {1: "B", 2: "1Bv"})
+ check('function', 'A B() override final', {1: "B", 2: "1Bv"},
output='A B() final override')
check('function', 'MyClass::a_member_function() volatile',
- "MyClass::a_member_functionV", "NV7MyClass17a_member_functionEv")
+ {1: "MyClass::a_member_functionV", 2: "NV7MyClass17a_member_functionEv"})
check('function', 'MyClass::a_member_function() volatile const',
- "MyClass::a_member_functionVC", "NVK7MyClass17a_member_functionEv")
+ {1: "MyClass::a_member_functionVC", 2: "NVK7MyClass17a_member_functionEv"})
check('function', 'MyClass::a_member_function() &&',
- "MyClass::a_member_functionO", "NO7MyClass17a_member_functionEv")
+ {1: "MyClass::a_member_functionO", 2: "NO7MyClass17a_member_functionEv"})
check('function', 'MyClass::a_member_function() &',
- "MyClass::a_member_functionR", "NR7MyClass17a_member_functionEv")
+ {1: "MyClass::a_member_functionR", 2: "NR7MyClass17a_member_functionEv"})
check('function', 'MyClass::a_member_function() const &',
- "MyClass::a_member_functionCR", "NKR7MyClass17a_member_functionEv")
+ {1: "MyClass::a_member_functionCR", 2: "NKR7MyClass17a_member_functionEv"})
check('function', 'int main(int argc, char *argv[])',
- "main__i.cPA", "4mainiA_Pc")
+ {1: "main__i.cPA", 2: "4mainiA_Pc"})
check('function', 'MyClass &MyClass::operator++()',
- "MyClass::inc-operator", "N7MyClassppEv")
+ {1: "MyClass::inc-operator", 2: "N7MyClassppEv"})
check('function', 'MyClass::pointer MyClass::operator->()',
- "MyClass::pointer-operator", "N7MyClassptEv")
+ {1: "MyClass::pointer-operator", 2: "N7MyClassptEv"})
x = 'std::vector<std::pair<std::string, int>> &module::test(register int ' \
'foo, bar[n], std::string baz = "foobar, blah, bleh") const = 0'
- check('function', x, "module::test__i.barA.ssC",
- "NK6module4testEiAn_3barNSt6stringE")
+ check('function', x, {1: "module::test__i.barA.ssC",
+ 2: "NK6module4testEiAn_3barNSt6stringE",
+ 3: "NK6module4testEiA1n_3barNSt6stringE"})
check('function',
'int foo(Foo f = Foo(double(), std::make_pair(int(2), double(3.4))))',
- "foo__Foo", "3foo3Foo")
- check('function', 'int foo(A a = x(a))', "foo__A", "3foo1A")
+ {1: "foo__Foo", 2: "3foo3Foo"})
+ check('function', 'int foo(A a = x(a))', {1: "foo__A", 2: "3foo1A"})
with pytest.raises(DefinitionError):
parse('function', 'int foo(B b=x(a)')
with pytest.raises(DefinitionError):
parse('function', 'int foo)C c=x(a))')
with pytest.raises(DefinitionError):
parse('function', 'int foo(D d=x(a')
- check('function', 'int foo(const A&... a)', "foo__ACRDp", "3fooDpRK1A")
- check('function', 'virtual void f()', "f", "1fv")
+ check('function', 'int foo(const A&... a)', {1: "foo__ACRDp", 2: "3fooDpRK1A"})
+ check('function', 'virtual void f()', {1: "f", 2: "1fv"})
# test for ::nestedName, from issue 1738
check("function", "result(int val, ::std::error_category const &cat)",
- "result__i.std::error_categoryCR", "6resultiRNSt14error_categoryE")
- check("function", "int *f()", "f", "1fv")
+ {1: "result__i.std::error_categoryCR", 2: "6resultiRNSt14error_categoryE"})
+ check("function", "int *f()", {1: "f", 2: "1fv"})
# tests derived from issue #1753 (skip to keep sanity)
- check("function", "f(int (&array)[10])", None, "1fRA10_i")
- check("function", "void f(int (&array)[10])", None, "1fRA10_i")
- check("function", "void f(float *q(double))", None, "1fFPfdE")
- check("function", "void f(float *(*q)(double))", None, "1fPFPfdE")
- check("function", "void f(float (*q)(double))", None, "1fPFfdE")
- check("function", "int (*f(double d))(float)", "f__double", "1fd")
- check("function", "int (*f(bool b))[5]", "f__b", "1fb")
+ check("function", "f(int (&array)[10])", {2: "1fRA10_i", 3: "1fRAL10E_i"})
+ check("function", "void f(int (&array)[10])", {2: "1fRA10_i", 3: "1fRAL10E_i"})
+ check("function", "void f(float *q(double))", {2: "1fFPfdE"})
+ check("function", "void f(float *(*q)(double))", {2: "1fPFPfdE"})
+ check("function", "void f(float (*q)(double))", {2: "1fPFfdE"})
+ check("function", "int (*f(double d))(float)", {1: "f__double", 2: "1fd"})
+ check("function", "int (*f(bool b))[5]", {1: "f__b", 2: "1fb"})
check("function", "int (*A::f(double d) const)(float)",
- "A::f__doubleC", "NK1A1fEd")
+ {1: "A::f__doubleC", 2: "NK1A1fEd"})
check("function", "void f(std::shared_ptr<int(double)> ptr)",
- None, "1fNSt10shared_ptrIFidEEE")
- check("function", "void f(int *const p)", "f__iPC", "1fPCi")
- check("function", "void f(int *volatile const p)", "f__iPVC", "1fPVCi")
+ {2: "1fNSt10shared_ptrIFidEEE"})
+ check("function", "void f(int *const p)", {1: "f__iPC", 2: "1fPCi"})
+ check("function", "void f(int *volatile const p)", {1: "f__iPVC", 2: "1fPVCi"})
+
+ check('function', 'extern int f()', {1: 'f', 2: '1fv'})
- check('function', 'extern int f()', 'f', '1fv')
+ check('function', 'decltype(auto) f()', {1: 'f', 2: "1fv"})
# TODO: make tests for functions in a template, e.g., Test<int&&()>
# such that the id generation for function type types is correct.
check('function', 'friend std::ostream &f(std::ostream&, int)',
- 'f__osR.i', '1fRNSt7ostreamEi')
+ {1: 'f__osR.i', 2: '1fRNSt7ostreamEi'})
# from breathe#223
- check('function', 'void f(struct E e)', 'f__E', '1f1E')
- check('function', 'void f(class E e)', 'f__E', '1f1E')
- check('function', 'void f(typename E e)', 'f__E', '1f1E')
- check('function', 'void f(enum E e)', 'f__E', '1f1E')
- check('function', 'void f(union E e)', 'f__E', '1f1E')
+ check('function', 'void f(struct E e)', {1: 'f__E', 2: '1f1E'})
+ check('function', 'void f(class E e)', {1: 'f__E', 2: '1f1E'})
+ check('function', 'void f(typename E e)', {1: 'f__E', 2: '1f1E'})
+ check('function', 'void f(enum E e)', {1: 'f__E', 2: '1f1E'})
+ check('function', 'void f(union E e)', {1: 'f__E', 2: '1f1E'})
# pointer to member (function)
- check('function', 'void f(int C::*)', None, '1fM1Ci')
- check('function', 'void f(int C::* p)', None, '1fM1Ci')
- check('function', 'void f(int ::C::* p)', None, '1fM1Ci')
- check('function', 'void f(int C::* const)', None, '1fKM1Ci')
- check('function', 'void f(int C::* const&)', None, '1fRKM1Ci')
- check('function', 'void f(int C::* volatile)', None, '1fVM1Ci')
- check('function', 'void f(int C::* const volatile)', None, '1fVKM1Ci',
+ check('function', 'void f(int C::*)', {2: '1fM1Ci'})
+ check('function', 'void f(int C::* p)', {2: '1fM1Ci'})
+ check('function', 'void f(int ::C::* p)', {2: '1fM1Ci'})
+ check('function', 'void f(int C::* const)', {2: '1fKM1Ci'})
+ check('function', 'void f(int C::* const&)', {2: '1fRKM1Ci'})
+ check('function', 'void f(int C::* volatile)', {2: '1fVM1Ci'})
+ check('function', 'void f(int C::* const volatile)', {2: '1fVKM1Ci'},
output='void f(int C::* volatile const)')
- check('function', 'void f(int C::* volatile const)', None, '1fVKM1Ci')
- check('function', 'void f(int (C::*)(float, double))', None, '1fM1CFifdE')
- check('function', 'void f(int (C::* p)(float, double))', None, '1fM1CFifdE')
- check('function', 'void f(int (::C::* p)(float, double))', None, '1fM1CFifdE')
- check('function', 'void f(void (C::*)() const &)', None, '1fM1CKRFvvE')
- check('function', 'int C::* f(int, double)', None, '1fid')
- check('function', 'void f(int C::* *)', None, '1fPM1Ci')
+ check('function', 'void f(int C::* volatile const)', {2: '1fVKM1Ci'})
+ check('function', 'void f(int (C::*)(float, double))', {2: '1fM1CFifdE'})
+ check('function', 'void f(int (C::* p)(float, double))', {2: '1fM1CFifdE'})
+ check('function', 'void f(int (::C::* p)(float, double))', {2: '1fM1CFifdE'})
+ check('function', 'void f(void (C::*)() const &)', {2: '1fM1CKRFvvE'})
+ check('function', 'int C::* f(int, double)', {2: '1fid'})
+ check('function', 'void f(int C::* *)', {2: '1fPM1Ci'})
def test_operators():
check('function', 'void operator new [ ] ()',
- "new-array-operator", "nav", output='void operator new[]()')
+ {1: "new-array-operator", 2: "nav"}, output='void operator new[]()')
check('function', 'void operator delete ()',
- "delete-operator", "dlv", output='void operator delete()')
+ {1: "delete-operator", 2: "dlv"}, output='void operator delete()')
check('function', 'operator bool() const',
- "castto-b-operatorC", "NKcvbEv", output='operator bool() const')
+ {1: "castto-b-operatorC", 2: "NKcvbEv"}, output='operator bool() const')
check('function', 'void operator * ()',
- "mul-operator", "mlv", output='void operator*()')
+ {1: "mul-operator", 2: "mlv"}, output='void operator*()')
check('function', 'void operator - ()',
- "sub-operator", "miv", output='void operator-()')
+ {1: "sub-operator", 2: "miv"}, output='void operator-()')
check('function', 'void operator + ()',
- "add-operator", "plv", output='void operator+()')
+ {1: "add-operator", 2: "plv"}, output='void operator+()')
check('function', 'void operator = ()',
- "assign-operator", "aSv", output='void operator=()')
+ {1: "assign-operator", 2: "aSv"}, output='void operator=()')
check('function', 'void operator / ()',
- "div-operator", "dvv", output='void operator/()')
+ {1: "div-operator", 2: "dvv"}, output='void operator/()')
check('function', 'void operator % ()',
- "mod-operator", "rmv", output='void operator%()')
+ {1: "mod-operator", 2: "rmv"}, output='void operator%()')
check('function', 'void operator ! ()',
- "not-operator", "ntv", output='void operator!()')
+ {1: "not-operator", 2: "ntv"}, output='void operator!()')
check('function', 'void operator "" _udl()',
- None, 'li4_udlv', output='void operator""_udl()')
+ {2: 'li4_udlv'}, output='void operator""_udl()')
def test_class_definitions():
- check('class', 'public A', "A", "1A", output='A')
- check('class', 'private A', "A", "1A")
- check('class', 'A final', 'A', '1A')
+ check('class', 'public A', {1: "A", 2: "1A"}, output='A')
+ check('class', 'private A', {1: "A", 2: "1A"})
+ check('class', 'A final', {1: 'A', 2: '1A'})
# test bases
- check('class', 'A', "A", "1A")
- check('class', 'A::B::C', "A::B::C", "N1A1B1CE")
- check('class', 'A : B', "A", "1A")
- check('class', 'A : private B', "A", "1A", output='A : B')
- check('class', 'A : public B', "A", "1A")
- check('class', 'A : B, C', "A", "1A")
- check('class', 'A : B, protected C, D', "A", "1A")
- check('class', 'A : virtual private B', 'A', '1A', output='A : virtual B')
- check('class', 'A : B, virtual C', 'A', '1A')
- check('class', 'A : public virtual B', 'A', '1A')
- check('class', 'A : B, C...', 'A', '1A')
- check('class', 'A : B..., C', 'A', '1A')
+ check('class', 'A', {1: "A", 2: "1A"})
+ check('class', 'A::B::C', {1: "A::B::C", 2: "N1A1B1CE"})
+ check('class', 'A : B', {1: "A", 2: "1A"})
+ check('class', 'A : private B', {1: "A", 2: "1A"}, output='A : B')
+ check('class', 'A : public B', {1: "A", 2: "1A"})
+ check('class', 'A : B, C', {1: "A", 2: "1A"})
+ check('class', 'A : B, protected C, D', {1: "A", 2: "1A"})
+ check('class', 'A : virtual private B', {1: 'A', 2: '1A'}, output='A : virtual B')
+ check('class', 'A : B, virtual C', {1: 'A', 2: '1A'})
+ check('class', 'A : public virtual B', {1: 'A', 2: '1A'})
+ check('class', 'A : B, C...', {1: 'A', 2: '1A'})
+ check('class', 'A : B..., C', {1: 'A', 2: '1A'})
+
+ # from #4094
+ check('class', 'template<class, class = std::void_t<>> has_var', {2: 'I00E7has_var'})
+ check('class', 'template<class T> has_var<T, std::void_t<decltype(&T::var)>>',
+ {2: 'I0E7has_varI1TNSt6void_tIDTadN1T3varEEEEE'})
def test_enum_definitions():
- check('enum', 'A', None, "1A")
- check('enum', 'A : std::underlying_type<B>::type', None, "1A")
- check('enum', 'A : unsigned int', None, "1A")
- check('enum', 'public A', None, "1A", output='A')
- check('enum', 'private A', None, "1A")
+ check('enum', 'A', {2: "1A"})
+ check('enum', 'A : std::underlying_type<B>::type', {2: "1A"})
+ check('enum', 'A : unsigned int', {2: "1A"})
+ check('enum', 'public A', {2: "1A"}, output='A')
+ check('enum', 'private A', {2: "1A"})
- check('enumerator', 'A', None, "1A")
- check('enumerator', 'A = std::numeric_limits<unsigned long>::max()',
- None, "1A")
+ check('enumerator', 'A', {2: "1A"})
+ check('enumerator', 'A = std::numeric_limits<unsigned long>::max()', {2: "1A"})
def test_templates():
- check('class', "A<T>", None, "IE1AI1TE", output="template<> A<T>")
+ check('class', "A<T>", {2: "IE1AI1TE"}, output="template<> A<T>")
# first just check which objects support templating
- check('class', "template<> A", None, "IE1A")
- check('function', "template<> void A()", None, "IE1Av")
- check('member', "template<> A a", None, "IE1a")
- check('type', "template<> a = A", None, "IE1a")
+ check('class', "template<> A", {2: "IE1A"})
+ check('function', "template<> void A()", {2: "IE1Av"})
+ check('member', "template<> A a", {2: "IE1a"})
+ check('type', "template<> a = A", {2: "IE1a"})
with pytest.raises(DefinitionError):
parse('enum', "template<> A")
with pytest.raises(DefinitionError):
parse('enumerator', "template<> A")
# then all the real tests
- check('class', "template<typename T1, typename T2> A", None, "I00E1A")
- check('type', "template<> a", None, "IE1a")
-
- check('class', "template<typename T> A", None, "I0E1A")
- check('class', "template<class T> A", None, "I0E1A")
- check('class', "template<typename ...T> A", None, "IDpE1A")
- check('class', "template<typename...> A", None, "IDpE1A")
- check('class', "template<typename = Test> A", None, "I0E1A")
- check('class', "template<typename T = Test> A", None, "I0E1A")
-
- check('class', "template<template<typename> typename T> A",
- None, "II0E0E1A")
- check('class', "template<int> A", None, "I_iE1A")
- check('class', "template<int T> A", None, "I_iE1A")
- check('class', "template<int... T> A", None, "I_DpiE1A")
- check('class', "template<int T = 42> A", None, "I_iE1A")
- check('class', "template<int = 42> A", None, "I_iE1A")
+ check('class', "template<typename T1, typename T2> A", {2: "I00E1A"})
+ check('type', "template<> a", {2: "IE1a"})
+
+ check('class', "template<typename T> A", {2: "I0E1A"})
+ check('class', "template<class T> A", {2: "I0E1A"})
+ check('class', "template<typename ...T> A", {2: "IDpE1A"})
+ check('class', "template<typename...> A", {2: "IDpE1A"})
+ check('class', "template<typename = Test> A", {2: "I0E1A"})
+ check('class', "template<typename T = Test> A", {2: "I0E1A"})
+
+ check('class', "template<template<typename> typename T> A", {2: "II0E0E1A"})
+ check('class', "template<template<typename> typename> A", {2: "II0E0E1A"})
+ check('class', "template<template<typename> typename ...T> A", {2: "II0EDpE1A"})
+ check('class', "template<template<typename> typename...> A", {2: "II0EDpE1A"})
+
+ check('class', "template<int> A", {2: "I_iE1A"})
+ check('class', "template<int T> A", {2: "I_iE1A"})
+ check('class', "template<int... T> A", {2: "I_DpiE1A"})
+ check('class', "template<int T = 42> A", {2: "I_iE1A"})
+ check('class', "template<int = 42> A", {2: "I_iE1A"})
+
+ check('class', "template<> A<NS::B<>>", {2: "IE1AIN2NS1BIEEE"})
# from #2058
check('function',
@@ -414,8 +537,8 @@ def test_templates():
"inline std::basic_ostream<Char, Traits> &operator<<("
"std::basic_ostream<Char, Traits> &os, "
"const c_string_view_base<const Char, Traits> &str)",
- None, "I00ElsRNSt13basic_ostreamI4Char6TraitsEE"
- "RK18c_string_view_baseIK4Char6TraitsE")
+ {2: "I00ElsRNSt13basic_ostreamI4Char6TraitsEE"
+ "RK18c_string_view_baseIK4Char6TraitsE"})
# template introductions
with pytest.raises(DefinitionError):
@@ -423,65 +546,75 @@ def test_templates():
with pytest.raises(DefinitionError):
parse('enumerator', 'abc::ns::foo{id_0, id_1, id_2} A')
check('class', 'abc::ns::foo{id_0, id_1, id_2} xyz::bar',
- None, 'I000EXN3abc2ns3fooEI4id_04id_14id_2EEN3xyz3barE')
+ {2: 'I000EXN3abc2ns3fooEI4id_04id_14id_2EEN3xyz3barE'})
check('class', 'abc::ns::foo{id_0, id_1, ...id_2} xyz::bar',
- None, 'I00DpEXN3abc2ns3fooEI4id_04id_1sp4id_2EEN3xyz3barE')
+ {2: 'I00DpEXN3abc2ns3fooEI4id_04id_1sp4id_2EEN3xyz3barE'})
check('class', 'abc::ns::foo{id_0, id_1, id_2} xyz::bar<id_0, id_1, id_2>',
- None, 'I000EXN3abc2ns3fooEI4id_04id_14id_2EEN3xyz3barI4id_04id_14id_2EE')
+ {2: 'I000EXN3abc2ns3fooEI4id_04id_14id_2EEN3xyz3barI4id_04id_14id_2EE'})
check('class', 'abc::ns::foo{id_0, id_1, ...id_2} xyz::bar<id_0, id_1, id_2...>',
- None, 'I00DpEXN3abc2ns3fooEI4id_04id_1sp4id_2EEN3xyz3barI4id_04id_1Dp4id_2EE')
+ {2: 'I00DpEXN3abc2ns3fooEI4id_04id_1sp4id_2EEN3xyz3barI4id_04id_1Dp4id_2EE'})
- check('class', 'template<> Concept{U} A<int>::B',
- None, 'IEI0EX7ConceptI1UEEN1AIiE1BE')
+ check('class', 'template<> Concept{U} A<int>::B', {2: 'IEI0EX7ConceptI1UEEN1AIiE1BE'})
check('type', 'abc::ns::foo{id_0, id_1, id_2} xyz::bar = ghi::qux',
- None, 'I000EXN3abc2ns3fooEI4id_04id_14id_2EEN3xyz3barE')
+ {2: 'I000EXN3abc2ns3fooEI4id_04id_14id_2EEN3xyz3barE'})
check('type', 'abc::ns::foo{id_0, id_1, ...id_2} xyz::bar = ghi::qux',
- None, 'I00DpEXN3abc2ns3fooEI4id_04id_1sp4id_2EEN3xyz3barE')
+ {2: 'I00DpEXN3abc2ns3fooEI4id_04id_1sp4id_2EEN3xyz3barE'})
check('function', 'abc::ns::foo{id_0, id_1, id_2} void xyz::bar()',
- None, 'I000EXN3abc2ns3fooEI4id_04id_14id_2EEN3xyz3barEv')
+ {2: 'I000EXN3abc2ns3fooEI4id_04id_14id_2EEN3xyz3barEv'})
check('function', 'abc::ns::foo{id_0, id_1, ...id_2} void xyz::bar()',
- None, 'I00DpEXN3abc2ns3fooEI4id_04id_1sp4id_2EEN3xyz3barEv')
+ {2: 'I00DpEXN3abc2ns3fooEI4id_04id_1sp4id_2EEN3xyz3barEv'})
check('member', 'abc::ns::foo{id_0, id_1, id_2} ghi::qux xyz::bar',
- None, 'I000EXN3abc2ns3fooEI4id_04id_14id_2EEN3xyz3barE')
+ {2: 'I000EXN3abc2ns3fooEI4id_04id_14id_2EEN3xyz3barE'})
check('member', 'abc::ns::foo{id_0, id_1, ...id_2} ghi::qux xyz::bar',
- None, 'I00DpEXN3abc2ns3fooEI4id_04id_1sp4id_2EEN3xyz3barE')
- check('concept', 'Iterator{T, U} Another',
- None, 'I00EX8IteratorI1T1UEE7Another')
+ {2: 'I00DpEXN3abc2ns3fooEI4id_04id_1sp4id_2EEN3xyz3barE'})
+ check('concept', 'Iterator{T, U} Another', {2: 'I00EX8IteratorI1T1UEE7Another'})
check('concept', 'template<typename ...Pack> Numerics = (... && Numeric<Pack>)',
- None, 'IDpE8Numerics')
+ {2: 'IDpE8Numerics'})
+
+ # explicit specializations of members
+ check('member', 'template<> int A<int>::a', {2: 'IEN1AIiE1aE'})
+ check('member', 'template int A<int>::a', {2: 'IEN1AIiE1aE'},
+ output='template<> int A<int>::a') # same as above
+ check('member', 'template<> template<> int A<int>::B<int>::b', {2: 'IEIEN1AIiE1BIiE1bE'})
+ check('member', 'template int A<int>::B<int>::b', {2: 'IEIEN1AIiE1BIiE1bE'},
+ output='template<> template<> int A<int>::B<int>::b') # same as above
+
+ # defaulted constrained type parameters
+ check('type', 'template<C T = int&> A', {2: 'I_1CE1A'})
def test_template_args():
# from breathe#218
check('function',
"template<typename F> "
- "void allow(F *f, typename func<F, B, G!=1>::type tt)",
- None, "I0E5allowP1FN4funcI1F1BXG!=1EE4typeE")
+ "void allow(F *f, typename func<F, B, G != 1>::type tt)",
+ {2: "I0E5allowP1FN4funcI1F1BXG != 1EE4typeE",
+ 3: "I0E5allowP1FN4funcI1F1BXne1GL1EEE4typeE"})
# from #3542
check('type', "template<typename T> "
"enable_if_not_array_t = std::enable_if_t<!is_array<T>::value, int>",
- None, "I0E21enable_if_not_array_t")
+ {2: "I0E21enable_if_not_array_t"})
def test_attributes():
# style: C++
- check('member', '[[]] int f', 'f__i', '1f')
- check('member', '[ [ ] ] int f', 'f__i', '1f',
+ check('member', '[[]] int f', {1: 'f__i', 2: '1f'})
+ check('member', '[ [ ] ] int f', {1: 'f__i', 2: '1f'},
# this will fail when the proper grammar is implemented
output='[[ ]] int f')
- check('member', '[[a]] int f', 'f__i', '1f')
+ check('member', '[[a]] int f', {1: 'f__i', 2: '1f'})
# style: GNU
- check('member', '__attribute__(()) int f', 'f__i', '1f')
- check('member', '__attribute__((a)) int f', 'f__i', '1f')
- check('member', '__attribute__((a, b)) int f', 'f__i', '1f')
+ check('member', '__attribute__(()) int f', {1: 'f__i', 2: '1f'})
+ check('member', '__attribute__((a)) int f', {1: 'f__i', 2: '1f'})
+ check('member', '__attribute__((a, b)) int f', {1: 'f__i', 2: '1f'})
# style: user-defined id
- check('member', 'id_attr int f', 'f__i', '1f')
+ check('member', 'id_attr int f', {1: 'f__i', 2: '1f'})
# style: user-defined paren
- check('member', 'paren_attr() int f', 'f__i', '1f')
- check('member', 'paren_attr(a) int f', 'f__i', '1f')
- check('member', 'paren_attr("") int f', 'f__i', '1f')
- check('member', 'paren_attr(()[{}][]{}) int f', 'f__i', '1f')
+ check('member', 'paren_attr() int f', {1: 'f__i', 2: '1f'})
+ check('member', 'paren_attr(a) int f', {1: 'f__i', 2: '1f'})
+ check('member', 'paren_attr("") int f', {1: 'f__i', 2: '1f'})
+ check('member', 'paren_attr(()[{}][]{}) int f', {1: 'f__i', 2: '1f'})
with pytest.raises(DefinitionError):
parse('member', 'paren_attr(() int f')
with pytest.raises(DefinitionError):
@@ -497,7 +630,7 @@ def test_attributes():
# position: decl specs
check('function', 'static inline __attribute__(()) void f()',
- 'f', '1fv',
+ {1: 'f', 2: '1fv'},
output='__attribute__(()) static inline void f()')
diff --git a/tests/test_domain_py.py b/tests/test_domain_py.py
index b917540d8..0c1d28dd9 100644
--- a/tests/test_domain_py.py
+++ b/tests/test_domain_py.py
@@ -109,11 +109,12 @@ def test_domain_py_xrefs(app, status, warning):
'ModTopLevel', 'class')
assert_refnode(refnodes[6], 'module_b.submodule', 'ModTopLevel',
'ModNoModule', 'class')
- assert_refnode(refnodes[7], False, False, 'int', 'obj')
- assert_refnode(refnodes[8], False, False, 'tuple', 'obj')
- assert_refnode(refnodes[9], False, False, 'str', 'obj')
- assert_refnode(refnodes[10], False, False, 'float', 'obj')
- assert len(refnodes) == 11
+ assert_refnode(refnodes[7], False, False, 'int', 'class')
+ assert_refnode(refnodes[8], False, False, 'tuple', 'class')
+ assert_refnode(refnodes[9], False, False, 'str', 'class')
+ assert_refnode(refnodes[10], False, False, 'float', 'class')
+ assert_refnode(refnodes[11], False, False, 'list', 'class')
+ assert len(refnodes) == 12
doctree = app.env.get_doctree('module_option')
refnodes = list(doctree.traverse(addnodes.pending_xref))
diff --git a/tests/test_environment.py b/tests/test_environment.py
index adc1e306b..6f9ffec08 100644
--- a/tests/test_environment.py
+++ b/tests/test_environment.py
@@ -22,7 +22,7 @@ def setup_module(rootdir, sphinx_test_tempdir):
global app, env
srcdir = sphinx_test_tempdir / 'root-envtest'
if not srcdir.exists():
- (rootdir/'test-root').copytree(srcdir)
+ (rootdir / 'test-root').copytree(srcdir)
app = SphinxTestApp(srcdir=srcdir)
env = app.env
yield
diff --git a/tests/test_apidoc.py b/tests/test_ext_apidoc.py
index fd0049c8f..7492fdea3 100644
--- a/tests/test_apidoc.py
+++ b/tests/test_ext_apidoc.py
@@ -15,7 +15,7 @@ from collections import namedtuple
import pytest
-from sphinx.apidoc import main as apidoc_main
+from sphinx.ext.apidoc import main as apidoc_main
from sphinx.testing.util import remove_unicode_literals
@@ -25,7 +25,7 @@ def apidoc(rootdir, tempdir, apidoc_params):
_, kwargs = apidoc_params
coderoot = rootdir / kwargs.get('coderoot', 'test-root')
outdir = tempdir / 'out'
- args = ['sphinx-apidoc', '-o', outdir, '-F', coderoot] + kwargs.get('options', [])
+ args = ['-o', outdir, '-F', coderoot] + kwargs.get('options', [])
apidoc_main(args)
return namedtuple('apidoc', 'coderoot,outdir')(coderoot, outdir)
@@ -60,7 +60,7 @@ def test_simple(make_app, apidoc):
@pytest.mark.apidoc(
- coderoot='test-apidoc-pep420',
+ coderoot='test-apidoc-pep420/a',
options=["--implicit-namespaces"],
)
def test_pep_0420_enabled(make_app, apidoc):
@@ -97,7 +97,7 @@ def test_pep_0420_enabled(make_app, apidoc):
assert "a.b.x namespace\n" in txt
-@pytest.mark.apidoc(coderoot='test-apidoc-pep420')
+@pytest.mark.apidoc(coderoot='test-apidoc-pep420/a')
def test_pep_0420_disabled(make_app, apidoc):
outdir = apidoc.outdir
assert (outdir / 'conf.py').isfile()
@@ -152,10 +152,10 @@ def test_trailing_underscore(make_app, apidoc):
@pytest.mark.apidoc(
coderoot='test-root',
options=[
- '--doc-project', u'プロジェクト名'.encode('utf-8'),
- '--doc-author', u'著者名'.encode('utf-8'),
- '--doc-version', u'バージョン'.encode('utf-8'),
- '--doc-release', u'リリース'.encode('utf-8'),
+ '--doc-project', u'プロジェクト名',
+ '--doc-author', u'著者名',
+ '--doc-version', u'バージョン',
+ '--doc-release', u'リリース',
],
)
def test_multibyte_parameters(make_app, apidoc):
diff --git a/tests/test_ext_autosummary.py b/tests/test_ext_autosummary.py
index a4cd9e03b..0aea99df6 100644
--- a/tests/test_ext_autosummary.py
+++ b/tests/test_ext_autosummary.py
@@ -11,7 +11,7 @@
from six import iteritems, StringIO
-from sphinx.ext.autosummary import mangle_signature
+from sphinx.ext.autosummary import mangle_signature, import_by_name
from sphinx.testing.util import etree_parse
@@ -145,3 +145,27 @@ def test_autosummary_generate(app, status, warning):
' ~Foo.__init__\n'
' ~Foo.bar\n'
' \n' in Foo)
+
+
+def test_import_by_name():
+ import sphinx
+ import sphinx.ext.autosummary
+
+ prefixed_name, obj, parent, modname = import_by_name('sphinx')
+ assert prefixed_name == 'sphinx'
+ assert obj is sphinx
+ assert parent is None
+ assert modname == 'sphinx'
+
+ prefixed_name, obj, parent, modname = import_by_name('sphinx.ext.autosummary.__name__')
+ assert prefixed_name == 'sphinx.ext.autosummary.__name__'
+ assert obj is sphinx.ext.autosummary.__name__
+ assert parent is sphinx.ext.autosummary
+ assert modname == 'sphinx.ext.autosummary'
+
+ prefixed_name, obj, parent, modname = \
+ import_by_name('sphinx.ext.autosummary.Autosummary.get_items')
+ assert prefixed_name == 'sphinx.ext.autosummary.Autosummary.get_items'
+ assert obj == sphinx.ext.autosummary.Autosummary.get_items
+ assert parent is sphinx.ext.autosummary.Autosummary
+ assert modname == 'sphinx.ext.autosummary'
diff --git a/tests/test_ext_coverage.py b/tests/test_ext_coverage.py
index d1b4b55c4..a8f222a00 100644
--- a/tests/test_ext_coverage.py
+++ b/tests/test_ext_coverage.py
@@ -21,9 +21,9 @@ def test_build(app, status, warning):
py_undoc = (app.outdir / 'python.txt').text()
assert py_undoc.startswith('Undocumented Python objects\n'
'===========================\n')
- assert 'test_autodoc\n------------\n' in py_undoc
+ assert 'autodoc_target\n--------------\n' in py_undoc
assert ' * Class -- missing methods:\n' in py_undoc
- assert ' * process_docstring\n' in py_undoc
+ assert ' * raises\n' in py_undoc
assert ' * function\n' not in py_undoc # these two are documented
assert ' * Class\n' not in py_undoc # in autodoc.txt
@@ -40,9 +40,9 @@ def test_build(app, status, warning):
# the key is the full path to the header file, which isn't testable
assert list(undoc_c.values())[0] == set([('function', 'Py_SphinxTest')])
- assert 'test_autodoc' in undoc_py
- assert 'funcs' in undoc_py['test_autodoc']
- assert 'process_docstring' in undoc_py['test_autodoc']['funcs']
- assert 'classes' in undoc_py['test_autodoc']
- assert 'Class' in undoc_py['test_autodoc']['classes']
- assert 'undocmeth' in undoc_py['test_autodoc']['classes']['Class']
+ assert 'autodoc_target' in undoc_py
+ assert 'funcs' in undoc_py['autodoc_target']
+ assert 'raises' in undoc_py['autodoc_target']['funcs']
+ assert 'classes' in undoc_py['autodoc_target']
+ assert 'Class' in undoc_py['autodoc_target']['classes']
+ assert 'undocmeth' in undoc_py['autodoc_target']['classes']['Class']
diff --git a/tests/test_ext_graphviz.py b/tests/test_ext_graphviz.py
index e59b697be..762add6f0 100644
--- a/tests/test_ext_graphviz.py
+++ b/tests/test_ext_graphviz.py
@@ -40,6 +40,7 @@ def test_graphviz_png_html(app, status, warning):
r'}\" />\n</div>')
assert re.search(html, content, re.S)
+
@pytest.mark.sphinx('html', testroot='ext-graphviz',
confoverrides={'graphviz_output_format': 'svg'})
@pytest.mark.usefixtures('if_graphviz_found')
@@ -80,6 +81,7 @@ def test_graphviz_svg_html(app, status, warning):
r'</div>')
assert re.search(html, content, re.S)
+
@pytest.mark.sphinx('latex', testroot='ext-graphviz')
@pytest.mark.usefixtures('if_graphviz_found')
def test_graphviz_latex(app, status, warning):
diff --git a/tests/test_ext_inheritance_diagram.py b/tests/test_ext_inheritance_diagram.py
index 1b168e622..deb04ce15 100644
--- a/tests/test_ext_inheritance_diagram.py
+++ b/tests/test_ext_inheritance_diagram.py
@@ -11,9 +11,11 @@
import re
import sys
-from sphinx.ext.inheritance_diagram import InheritanceException, import_classes
+
import pytest
+from sphinx.ext.inheritance_diagram import InheritanceException, import_classes
+
@pytest.mark.sphinx('html', testroot='ext-inheritance_diagram')
@pytest.mark.usefixtures('if_graphviz_found')
@@ -43,6 +45,30 @@ def test_inheritance_diagram_latex(app, status, warning):
assert re.search(pattern, content, re.M)
+@pytest.mark.sphinx('html', testroot='ext-inheritance_diagram',
+ srcdir='ext-inheritance_diagram-alias')
+@pytest.mark.usefixtures('if_graphviz_found')
+def test_inheritance_diagram_latex_alias(app, status, warning):
+ app.config.inheritance_alias = {'test.Foo': 'alias.Foo'}
+ app.builder.build_all()
+
+ doc = app.env.get_and_resolve_doctree('index', app)
+ aliased_graph = doc.children[0].children[3]['graph'].class_info
+ assert len(aliased_graph) == 3
+ assert ('test.Baz', 'test.Baz', ['test.Bar'], None) in aliased_graph
+ assert ('test.Bar', 'test.Bar', ['alias.Foo'], None) in aliased_graph
+ assert ('alias.Foo', 'alias.Foo', [], None) in aliased_graph
+
+ content = (app.outdir / 'index.html').text()
+
+ pattern = ('<div class="figure" id="id1">\n'
+ '<img src="_images/inheritance-\\w+.png" alt="Inheritance diagram of test.Foo" '
+ 'class="inheritance"/>\n<p class="caption"><span class="caption-text">'
+ 'Test Foo!</span><a class="headerlink" href="#id1" '
+ 'title="Permalink to this image">\xb6</a></p>')
+ assert re.search(pattern, content, re.M)
+
+
def test_import_classes(rootdir):
from sphinx.application import Sphinx, TemplateBridge
from sphinx.util.i18n import CatalogInfo
diff --git a/tests/test_ext_intersphinx.py b/tests/test_ext_intersphinx.py
index 371f296ea..1e5ffc5f1 100644
--- a/tests/test_ext_intersphinx.py
+++ b/tests/test_ext_intersphinx.py
@@ -233,11 +233,11 @@ def test_missing_reference_cppdomain(tempdir, app, status, warning):
' title="(in foo v2.0)"><code class="xref cpp cpp-class docutils literal">'
'<span class="pre">Bar</span></code></a>' in html)
assert ('<a class="reference external"'
- ' href="https://docs.python.org/index.html#std"'
- ' title="(in foo v2.0)">std</a>' in html)
+ ' href="https://docs.python.org/index.html#foons"'
+ ' title="(in foo v2.0)">foons</a>' in html)
assert ('<a class="reference external"'
- ' href="https://docs.python.org/index.html#std_uint8_t"'
- ' title="(in foo v2.0)">uint8_t</a>' in html)
+ ' href="https://docs.python.org/index.html#foons_bartype"'
+ ' title="(in foo v2.0)">bartype</a>' in html)
def test_missing_reference_jsdomain(tempdir, app, status, warning):
diff --git a/tests/test_ext_math.py b/tests/test_ext_math.py
index 2b8a68f9c..5bf4ebb15 100644
--- a/tests/test_ext_math.py
+++ b/tests/test_ext_math.py
@@ -9,10 +9,23 @@
:license: BSD, see LICENSE for details.
"""
+import os
import re
+import subprocess
import pytest
-from sphinx.testing.util import SkipTest
+
+
+def has_binary(binary):
+ try:
+ subprocess.check_output([binary])
+ except OSError as e:
+ if e.errno == os.errno.ENOENT:
+ # handle file not found error.
+ return False
+ else:
+ return True
+ return True
@pytest.mark.sphinx(
@@ -35,14 +48,16 @@ def test_jsmath(app, status, warning):
assert '<div class="math">\na + 1 &lt; b</div>' in content
+@pytest.mark.skipif(not has_binary('dvipng'),
+ reason='Requires dvipng" binary')
@pytest.mark.sphinx('html', testroot='ext-math-simple',
confoverrides = {'extensions': ['sphinx.ext.imgmath']})
def test_imgmath_png(app, status, warning):
app.builder.build_all()
if "LaTeX command 'latex' cannot be run" in warning.getvalue():
- raise SkipTest('LaTeX command "latex" is not available')
+ raise pytest.skip.Exception('LaTeX command "latex" is not available')
if "dvipng command 'dvipng' cannot be run" in warning.getvalue():
- raise SkipTest('dvipng command "dvipng" is not available')
+ raise pytest.skip.Exception('dvipng command "dvipng" is not available')
content = (app.outdir / 'index.html').text()
html = (r'<div class="math">\s*<p>\s*<img src="_images/math/\w+.png"'
@@ -50,15 +65,17 @@ def test_imgmath_png(app, status, warning):
assert re.search(html, content, re.S)
+@pytest.mark.skipif(not has_binary('dvisvgm'),
+ reason='Requires dvisvgm" binary')
@pytest.mark.sphinx('html', testroot='ext-math-simple',
confoverrides={'extensions': ['sphinx.ext.imgmath'],
'imgmath_image_format': 'svg'})
def test_imgmath_svg(app, status, warning):
app.builder.build_all()
if "LaTeX command 'latex' cannot be run" in warning.getvalue():
- raise SkipTest('LaTeX command "latex" is not available')
+ raise pytest.skip.Exception('LaTeX command "latex" is not available')
if "dvisvgm command 'dvisvgm' cannot be run" in warning.getvalue():
- raise SkipTest('dvisvgm command "dvisvgm" is not available')
+ raise pytest.skip.Exception('dvisvgm command "dvisvgm" is not available')
content = (app.outdir / 'index.html').text()
html = (r'<div class="math">\s*<p>\s*<img src="_images/math/\w+.svg"'
@@ -117,3 +134,75 @@ def test_math_number_all_latex(app, status, warning):
macro = r'Referencing equation \\eqref{equation:math:foo}.'
assert re.search(macro, content, re.S)
+
+
+@pytest.mark.sphinx('html', testroot='ext-math',
+ confoverrides={'extensions': ['sphinx.ext.mathjax'],
+ 'math_eqref_format': 'Eq.{number}'})
+def test_math_eqref_format_html(app, status, warning):
+ app.builder.build_all()
+
+ content = (app.outdir / 'math.html').text()
+ html = ('<p>Referencing equation <a class="reference internal" '
+ 'href="#equation-foo">Eq.1</a>.</p>')
+ assert html in content
+
+
+@pytest.mark.sphinx('latex', testroot='ext-math',
+ confoverrides={'extensions': ['sphinx.ext.mathjax'],
+ 'math_eqref_format': 'Eq.{number}'})
+def test_math_eqref_format_latex(app, status, warning):
+ app.builder.build_all()
+
+ content = (app.outdir / 'test.tex').text()
+ macro = r'Referencing equation Eq.\\ref{equation:math:foo}.'
+ assert re.search(macro, content, re.S)
+
+
+@pytest.mark.sphinx('html', testroot='ext-math',
+ confoverrides={'extensions': ['sphinx.ext.mathjax'],
+ 'numfig': True,
+ 'math_numfig': True})
+def test_mathjax_numfig_html(app, status, warning):
+ app.builder.build_all()
+
+ content = (app.outdir / 'math.html').text()
+ html = ('<div class="math" id="equation-math:0">\n'
+ '<span class="eqno">(1.2)')
+ assert html in content
+ html = ('<p>Referencing equation <a class="reference internal" '
+ 'href="#equation-foo">(1.1)</a>.</p>')
+ assert html in content
+
+
+@pytest.mark.sphinx('html', testroot='ext-math',
+ confoverrides={'extensions': ['sphinx.ext.jsmath'],
+ 'jsmath_path': 'dummy.js',
+ 'numfig': True,
+ 'math_numfig': True})
+def test_jsmath_numfig_html(app, status, warning):
+ app.builder.build_all()
+
+ content = (app.outdir / 'math.html').text()
+ html = '<span class="eqno">(1.2)<a class="headerlink" href="#equation-math:0"'
+ assert html in content
+ html = ('<p>Referencing equation <a class="reference internal" '
+ 'href="#equation-foo">(1.1)</a>.</p>')
+ assert html in content
+
+
+@pytest.mark.sphinx('html', testroot='ext-math',
+ confoverrides={'extensions': ['sphinx.ext.imgmath'],
+ 'numfig': True,
+ 'numfig_secnum_depth': 0,
+ 'math_numfig': True})
+def test_imgmath_numfig_html(app, status, warning):
+ app.builder.build_all()
+
+ content = (app.outdir / 'page.html').text()
+ html = '<span class="eqno">(3)<a class="headerlink" href="#equation-bar"'
+ assert html in content
+ html = ('<p>Referencing equations <a class="reference internal" '
+ 'href="math.html#equation-foo">(1)</a> and '
+ '<a class="reference internal" href="#equation-bar">(3)</a>.</p>')
+ assert html in content
diff --git a/tests/test_ext_todo.py b/tests/test_ext_todo.py
index 99eb7b801..0260b821d 100644
--- a/tests/test_ext_todo.py
+++ b/tests/test_ext_todo.py
@@ -85,6 +85,7 @@ def test_todo_not_included(app, status, warning):
assert len(todos) == 2
assert set(todo[1].astext() for todo in todos) == set(['todo in foo', 'todo in bar'])
+
@pytest.mark.sphinx('latex', testroot='ext-todo', freshenv=True,
confoverrides={'todo_include_todos': True})
def test_todo_valid_link(app, status, warning):
@@ -109,8 +110,7 @@ def test_todo_valid_link(app, status, warning):
target = m[0]
# Look for the targets of this link.
- labels = [m for m in re.findall(r'\\label\{([^}]*)}', content)
- if m == target]
+ labels = [m for m in re.findall(r'\\label\{([^}]*)}', content) if m == target]
# If everything is correct we should have exactly one target.
assert len(labels) == 1
diff --git a/tests/test_intl.py b/tests/test_intl.py
index 920def588..cb13b00f3 100644
--- a/tests/test_intl.py
+++ b/tests/test_intl.py
@@ -222,6 +222,7 @@ def test_text_inconsistency_warnings(app, warning):
u'.*/refs_inconsistency.txt:\\d+: WARNING: citation not found: ref3')
assert_re_search(expected_citation_warning_expr, warnings)
+
@sphinx_intl
@pytest.mark.sphinx('text')
@pytest.mark.test_params(shared_result='test_intl_basic')
diff --git a/tests/test_io.py b/tests/test_io.py
new file mode 100644
index 000000000..ecd4a1009
--- /dev/null
+++ b/tests/test_io.py
@@ -0,0 +1,118 @@
+# -*- coding: utf-8 -*-
+"""
+ test_sphinx_io
+ ~~~~~~~~~~~~~~
+
+ Tests io modules.
+
+ :copyright: Copyright 2007-2017 by the Sphinx team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import pytest
+from six import StringIO
+
+from sphinx.io import SphinxRSTFileInput
+
+
+@pytest.mark.sphinx(testroot='basic')
+def test_SphinxRSTFileInput(app):
+ app.env.temp_data['docname'] = 'index'
+
+ # normal case
+ text = ('hello Sphinx world\n'
+ 'Sphinx is a document generator')
+ source = SphinxRSTFileInput(app, app.env, source=StringIO(text),
+ source_path='dummy.rst', encoding='utf-8')
+ result = source.read()
+ assert result.data == ['hello Sphinx world',
+ 'Sphinx is a document generator']
+ assert result.info(0) == ('dummy.rst', 0)
+ assert result.info(1) == ('dummy.rst', 1)
+ assert result.info(2) == ('dummy.rst', None) # out of range
+
+ # having rst_prolog ends without CR
+ app.env.config.rst_prolog = 'this is rst_prolog\nhello reST!'
+ source = SphinxRSTFileInput(app, app.env, source=StringIO(text),
+ source_path='dummy.rst', encoding='utf-8')
+ result = source.read()
+ assert result.data == ['this is rst_prolog',
+ 'hello reST!',
+ '',
+ 'hello Sphinx world',
+ 'Sphinx is a document generator']
+ assert result.info(0) == ('<rst_prolog>', 0)
+ assert result.info(1) == ('<rst_prolog>', 1)
+ assert result.info(2) == ('<generated>', 0)
+ assert result.info(3) == ('dummy.rst', 0)
+ assert result.info(4) == ('dummy.rst', 1)
+
+ # having rst_prolog ends with CR
+ app.env.config.rst_prolog = 'this is rst_prolog\nhello reST!\n'
+ source = SphinxRSTFileInput(app, app.env, source=StringIO(text),
+ source_path='dummy.rst', encoding='utf-8')
+ result = source.read()
+ assert result.data == ['this is rst_prolog',
+ 'hello reST!',
+ '',
+ 'hello Sphinx world',
+ 'Sphinx is a document generator']
+
+ # having docinfo and rst_prolog
+ docinfo_text = (':title: test of SphinxFileInput\n'
+ ':author: Sphinx team\n'
+ '\n'
+ 'hello Sphinx world\n'
+ 'Sphinx is a document generator\n')
+ app.env.config.rst_prolog = 'this is rst_prolog\nhello reST!'
+ source = SphinxRSTFileInput(app, app.env, source=StringIO(docinfo_text),
+ source_path='dummy.rst', encoding='utf-8')
+ result = source.read()
+ assert result.data == [':title: test of SphinxFileInput',
+ ':author: Sphinx team',
+ '',
+ 'this is rst_prolog',
+ 'hello reST!',
+ '',
+ '',
+ 'hello Sphinx world',
+ 'Sphinx is a document generator']
+ assert result.info(0) == ('dummy.rst', 0)
+ assert result.info(1) == ('dummy.rst', 1)
+ assert result.info(2) == ('<generated>', 0)
+ assert result.info(3) == ('<rst_prolog>', 0)
+ assert result.info(4) == ('<rst_prolog>', 1)
+ assert result.info(5) == ('<generated>', 0)
+ assert result.info(6) == ('dummy.rst', 2)
+ assert result.info(7) == ('dummy.rst', 3)
+ assert result.info(8) == ('dummy.rst', 4)
+ assert result.info(9) == ('dummy.rst', None) # out of range
+
+ # having rst_epilog
+ app.env.config.rst_prolog = None
+ app.env.config.rst_epilog = 'this is rst_epilog\ngood-bye reST!'
+ source = SphinxRSTFileInput(app, app.env, source=StringIO(text),
+ source_path='dummy.rst', encoding='utf-8')
+ result = source.read()
+ assert result.data == ['hello Sphinx world',
+ 'Sphinx is a document generator',
+ '',
+ 'this is rst_epilog',
+ 'good-bye reST!']
+ assert result.info(0) == ('dummy.rst', 0)
+ assert result.info(1) == ('dummy.rst', 1)
+ assert result.info(2) == ('<generated>', 0)
+ assert result.info(3) == ('<rst_epilog>', 0)
+ assert result.info(4) == ('<rst_epilog>', 1)
+ assert result.info(5) == ('<rst_epilog>', None) # out of range
+
+ # expandtabs / convert whitespaces
+ app.env.config.rst_prolog = None
+ app.env.config.rst_epilog = None
+ text = ('\thello Sphinx world\n'
+ '\v\fSphinx is a document generator')
+ source = SphinxRSTFileInput(app, app.env, source=StringIO(text),
+ source_path='dummy.rst', encoding='utf-8')
+ result = source.read()
+ assert result.data == [' hello Sphinx world',
+ ' Sphinx is a document generator']
diff --git a/tests/test_pycode.py b/tests/test_pycode.py
index 710e11341..400c47dc5 100644
--- a/tests/test_pycode.py
+++ b/tests/test_pycode.py
@@ -9,8 +9,42 @@
:license: BSD, see LICENSE for details.
"""
+import os
+from six import PY2
+
+import sphinx
from sphinx.pycode import ModuleAnalyzer
+SPHINX_MODULE_PATH = os.path.splitext(sphinx.__file__)[0] + '.py'
+
+
+def test_ModuleAnalyzer_for_string():
+ analyzer = ModuleAnalyzer.for_string('print("Hello world")', 'module_name')
+ assert analyzer.modname == 'module_name'
+ assert analyzer.srcname == '<string>'
+ if PY2:
+ assert analyzer.encoding == 'ascii'
+ else:
+ assert analyzer.encoding is None
+
+
+def test_ModuleAnalyzer_for_file():
+ analyzer = ModuleAnalyzer.for_string(SPHINX_MODULE_PATH, 'sphinx')
+ assert analyzer.modname == 'sphinx'
+ assert analyzer.srcname == '<string>'
+ if PY2:
+ assert analyzer.encoding == 'ascii'
+ else:
+ assert analyzer.encoding is None
+
+
+def test_ModuleAnalyzer_for_module():
+ analyzer = ModuleAnalyzer.for_module('sphinx')
+ assert analyzer.modname == 'sphinx'
+ assert analyzer.srcname in (SPHINX_MODULE_PATH,
+ os.path.abspath(SPHINX_MODULE_PATH))
+ assert analyzer.encoding == 'utf-8'
+
def test_ModuleAnalyzer_find_tags():
code = ('class Foo(object):\n' # line: 1
@@ -30,20 +64,30 @@ def test_ModuleAnalyzer_find_tags():
' """function baz"""\n'
' pass\n'
'\n'
- '@decorator\n'
+ '@decorator1\n'
+ '@decorator2\n'
'def quux():\n'
- ' pass\n')
+ ' pass\n' # line: 21
+ '\n'
+ 'class Corge(object):\n'
+ ' @decorator1\n'
+ ' @decorator2\n'
+ ' def grault(self):\n'
+ ' pass\n')
analyzer = ModuleAnalyzer.for_string(code, 'module')
tags = analyzer.find_tags()
assert set(tags.keys()) == {'Foo', 'Foo.__init__', 'Foo.bar',
- 'Foo.Baz', 'Foo.Baz.__init__', 'qux', 'quux'}
- assert tags['Foo'] == ('class', 1, 13) # type, start, end
- assert tags['Foo.__init__'] == ('def', 3, 5)
- assert tags['Foo.bar'] == ('def', 6, 9)
- assert tags['Foo.Baz'] == ('class', 10, 13)
- assert tags['Foo.Baz.__init__'] == ('def', 11, 13)
- assert tags['qux'] == ('def', 14, 17)
- assert tags['quux'] == ('def', 18, 21) # decorator
+ 'Foo.Baz', 'Foo.Baz.__init__', 'qux', 'quux',
+ 'Corge', 'Corge.grault'}
+ assert tags['Foo'] == ('class', 1, 12) # type, start, end
+ assert tags['Foo.__init__'] == ('def', 3, 4)
+ assert tags['Foo.bar'] == ('def', 6, 8)
+ assert tags['Foo.Baz'] == ('class', 10, 12)
+ assert tags['Foo.Baz.__init__'] == ('def', 11, 12)
+ assert tags['qux'] == ('def', 14, 16)
+ assert tags['quux'] == ('def', 18, 21)
+ assert tags['Corge'] == ('class', 23, 27)
+ assert tags['Corge.grault'] == ('def', 24, 27)
def test_ModuleAnalyzer_find_attr_docs():
@@ -72,13 +116,17 @@ def test_ModuleAnalyzer_find_attr_docs():
'\n'
'def baz():\n'
' """function baz"""\n'
- ' pass\n')
+ ' pass\n'
+ '\n'
+ 'class Qux: attr1 = 1; attr2 = 2')
analyzer = ModuleAnalyzer.for_string(code, 'module')
docs = analyzer.find_attr_docs()
assert set(docs) == {('Foo', 'attr1'),
('Foo', 'attr3'),
('Foo', 'attr4'),
('Foo', 'attr5'),
+ ('Foo', 'attr6'),
+ ('Foo', 'attr7'),
('Foo', 'attr8'),
('Foo', 'attr9')}
assert docs[('Foo', 'attr1')] == ['comment before attr1', '']
@@ -86,5 +134,23 @@ def test_ModuleAnalyzer_find_attr_docs():
assert docs[('Foo', 'attr4')] == ['long attribute comment', '']
assert docs[('Foo', 'attr4')] == ['long attribute comment', '']
assert docs[('Foo', 'attr5')] == ['attribute comment for attr5', '']
+ assert docs[('Foo', 'attr6')] == ['this comment is ignored', '']
+ assert docs[('Foo', 'attr7')] == ['this comment is ignored', '']
assert docs[('Foo', 'attr8')] == ['attribute comment for attr8', '']
assert docs[('Foo', 'attr9')] == ['string after attr9', '']
+ assert analyzer.tagorder == {'Foo': 0,
+ 'Foo.__init__': 8,
+ 'Foo.attr1': 1,
+ 'Foo.attr2': 2,
+ 'Foo.attr3': 3,
+ 'Foo.attr4': 4,
+ 'Foo.attr5': 5,
+ 'Foo.attr6': 6,
+ 'Foo.attr7': 7,
+ 'Foo.attr8': 10,
+ 'Foo.attr9': 12,
+ 'Foo.bar': 13,
+ 'baz': 14,
+ 'Qux': 15,
+ 'Qux.attr1': 16,
+ 'Qux.attr2': 17}
diff --git a/tests/test_pycode_parser.py b/tests/test_pycode_parser.py
new file mode 100644
index 000000000..b9327999b
--- /dev/null
+++ b/tests/test_pycode_parser.py
@@ -0,0 +1,301 @@
+# -*- coding: utf-8 -*-
+"""
+ test_pycode_parser
+ ~~~~~~~~~~~~~~~~~~
+
+ Test pycode.parser.
+
+ :copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import pytest
+from six import PY2
+
+from sphinx.pycode.parser import Parser
+
+
+def test_comment_picker_basic():
+ source = ('a = 1 + 1 #: assignment\n'
+ 'b = 1 +\\\n 1 #: assignment including a CR\n'
+ 'c = (1 +\n 1) #: tuple \n'
+ 'd = {1, \n 1} #: set\n'
+ 'e = [1, \n 1] #: list #: additional comment\n'
+ 'f = "abc"\n'
+ '#: string; comment on next line (ignored)\n'
+ 'g = 1.0\n'
+ '"""float; string on next line"""\n')
+ parser = Parser(source)
+ parser.parse()
+ assert parser.comments == {('', 'a'): 'assignment',
+ ('', 'b'): 'assignment including a CR',
+ ('', 'c'): 'tuple ',
+ ('', 'd'): ' set',
+ ('', 'e'): 'list #: additional comment',
+ ('', 'g'): 'float; string on next line'}
+
+
+def test_comment_picker_location():
+ # multiple "before" comments
+ source = ('#: comment before assignment1\n'
+ '#:\n'
+ '#: comment before assignment2\n'
+ 'a = 1 + 1\n')
+ parser = Parser(source)
+ parser.parse()
+ assert parser.comments == {('', 'a'): ('comment before assignment1\n'
+ '\n'
+ 'comment before assignment2')}
+
+ # before and after comments
+ source = ('#: comment before assignment\n'
+ 'a = 1 + 1 #: comment after assignment\n')
+ parser = Parser(source)
+ parser.parse()
+ assert parser.comments == {('', 'a'): 'comment after assignment'}
+
+ # after comment and next line string
+ source = ('a = 1 + 1\n #: comment after assignment\n'
+ '"""string on next line"""\n')
+ parser = Parser(source)
+ parser.parse()
+ assert parser.comments == {('', 'a'): 'string on next line'}
+
+ # before comment and next line string
+ source = ('#: comment before assignment\n'
+ 'a = 1 + 1\n'
+ '"""string on next line"""\n')
+ parser = Parser(source)
+ parser.parse()
+ assert parser.comments == {('', 'a'): 'string on next line'}
+
+ # before comment, after comment and next line string
+ source = ('#: comment before assignment\n'
+ 'a = 1 + 1 #: comment after assignment\n'
+ '"""string on next line"""\n')
+ parser = Parser(source)
+ parser.parse()
+ assert parser.comments == {('', 'a'): 'string on next line'}
+
+ # inside __init__ method
+ source = ('class Foo(object):\n'
+ ' def __init__(self):\n'
+ ' #: comment before assignment\n'
+ ' self.attr1 = None\n'
+ ' self.attr2 = None #: comment after assignment\n'
+ '\n'
+ ' #: comment for attr3(1)\n'
+ ' self.attr3 = None #: comment for attr3(2)\n'
+ ' """comment for attr3(3)"""\n')
+ parser = Parser(source)
+ parser.parse()
+ assert parser.comments == {('Foo', 'attr1'): 'comment before assignment',
+ ('Foo', 'attr2'): 'comment after assignment',
+ ('Foo', 'attr3'): 'comment for attr3(3)'}
+
+
+def test_complex_assignment():
+ source = ('a = 1 + 1; b = a #: compound statement\n'
+ 'c, d = (1, 1) #: unpack assignment\n'
+ 'e = True #: first assignment\n'
+ 'e = False #: second assignment\n'
+ 'f = g = None #: multiple assignment at once\n'
+ '(theta, phi) = (0, 0.5) #: unpack assignment via tuple\n'
+ '[x, y] = (5, 6) #: unpack assignment via list\n'
+ )
+ parser = Parser(source)
+ parser.parse()
+ assert parser.comments == {('', 'b'): 'compound statement',
+ ('', 'c'): 'unpack assignment',
+ ('', 'd'): 'unpack assignment',
+ ('', 'e'): 'second assignment',
+ ('', 'f'): 'multiple assignment at once',
+ ('', 'g'): 'multiple assignment at once',
+ ('', 'theta'): 'unpack assignment via tuple',
+ ('', 'phi'): 'unpack assignment via tuple',
+ ('', 'x'): 'unpack assignment via list',
+ ('', 'y'): 'unpack assignment via list',
+ }
+ assert parser.definitions == {}
+
+
+@pytest.mark.skipif(PY2, reason='tests for py3 syntax')
+def test_complex_assignment_py3():
+ source = ('a, *b, c = (1, 2, 3, 4) #: unpack assignment\n'
+ 'd, *self.attr = (5, 6, 7) #: unpack assignment2\n'
+ 'e, *f[0] = (8, 9, 0) #: unpack assignment3\n'
+ )
+ parser = Parser(source)
+ parser.parse()
+ assert parser.comments == {('', 'a'): 'unpack assignment',
+ ('', 'b'): 'unpack assignment',
+ ('', 'c'): 'unpack assignment',
+ ('', 'd'): 'unpack assignment2',
+ ('', 'e'): 'unpack assignment3',
+ }
+ assert parser.definitions == {}
+
+
+def test_obj_assignment():
+ source = ('obj = SomeObject() #: some object\n'
+ 'obj.attr = 1 #: attr1\n'
+ 'obj.attr.attr = 1 #: attr2\n')
+ parser = Parser(source)
+ parser.parse()
+ assert parser.comments == {('', 'obj'): 'some object'}
+ assert parser.definitions == {}
+
+
+def test_container_assignment():
+ source = ('l = [] #: list\n'
+ 'l[1] = True #: list assignment\n'
+ 'l[0:0] = [] #: list assignment\n'
+ 'l[_from:_to] = [] #: list assignment\n'
+ 'd = {} #: dict\n'
+ 'd["doc"] = 1 #: dict assignment\n')
+ parser = Parser(source)
+ parser.parse()
+ assert parser.comments == {('', 'l'): 'list',
+ ('', 'd'): 'dict'}
+ assert parser.definitions == {}
+
+
+def test_function():
+ source = ('def some_function():\n'
+ ' """docstring"""\n'
+ ' a = 1 + 1 #: comment1\n'
+ '\n'
+ ' b = a #: comment2\n')
+ parser = Parser(source)
+ parser.parse()
+ assert parser.comments == {}
+ assert parser.definitions == {'some_function': ('def', 1, 5)}
+ assert parser.deforders == {'some_function': 0}
+
+
+def test_nested_function():
+ source = ('def some_function():\n'
+ ' a = 1 + 1 #: comment1\n'
+ '\n'
+ ' def inner_function():\n'
+ ' b = 1 + 1 #: comment2\n')
+ parser = Parser(source)
+ parser.parse()
+ assert parser.comments == {}
+ assert parser.definitions == {'some_function': ('def', 1, 5)}
+ assert parser.deforders == {'some_function': 0}
+
+
+def test_class():
+ source = ('class Foo(object):\n'
+ ' attr1 = None #: comment1\n'
+ ' attr2 = None #: comment2\n'
+ '\n'
+ ' def __init__(self):\n'
+ ' self.a = 1 + 1 #: comment3\n'
+ ' self.attr2 = 1 + 1 #: overrided\n'
+ ' b = 1 + 1 #: comment5\n'
+ '\n'
+ ' def some_method(self):\n'
+ ' c = 1 + 1 #: comment6\n')
+ parser = Parser(source)
+ parser.parse()
+ assert parser.comments == {('Foo', 'attr1'): 'comment1',
+ ('Foo', 'a'): 'comment3',
+ ('Foo', 'attr2'): 'overrided'}
+ assert parser.definitions == {'Foo': ('class', 1, 11),
+ 'Foo.__init__': ('def', 5, 8),
+ 'Foo.some_method': ('def', 10, 11)}
+ assert parser.deforders == {'Foo': 0,
+ 'Foo.attr1': 1,
+ 'Foo.__init__': 3,
+ 'Foo.a': 4,
+ 'Foo.attr2': 5,
+ 'Foo.some_method': 6}
+
+
+def test_class_uses_non_self():
+ source = ('class Foo(object):\n'
+ ' def __init__(this):\n'
+ ' this.a = 1 + 1 #: comment\n')
+ parser = Parser(source)
+ parser.parse()
+ assert parser.comments == {('Foo', 'a'): 'comment'}
+ assert parser.definitions == {'Foo': ('class', 1, 3),
+ 'Foo.__init__': ('def', 2, 3)}
+ assert parser.deforders == {'Foo': 0,
+ 'Foo.__init__': 1,
+ 'Foo.a': 2}
+
+
+def test_nested_class():
+ source = ('class Foo(object):\n'
+ ' attr1 = None #: comment1\n'
+ '\n'
+ ' class Bar(object):\n'
+ ' attr2 = None #: comment2\n')
+ parser = Parser(source)
+ parser.parse()
+ assert parser.comments == {('Foo', 'attr1'): 'comment1',
+ ('Foo.Bar', 'attr2'): 'comment2'}
+ assert parser.definitions == {'Foo': ('class', 1, 5),
+ 'Foo.Bar': ('class', 4, 5)}
+ assert parser.deforders == {'Foo': 0,
+ 'Foo.attr1': 1,
+ 'Foo.Bar': 2,
+ 'Foo.Bar.attr2': 3}
+
+
+def test_class_comment():
+ source = ('import logging\n'
+ 'logger = logging.getLogger(__name__)\n'
+ '\n'
+ 'class Foo(object):\n'
+ ' """Bar"""\n')
+ parser = Parser(source)
+ parser.parse()
+ assert parser.comments == {}
+ assert parser.definitions == {'Foo': ('class', 4, 5)}
+
+
+def test_comment_picker_multiline_string():
+ source = ('class Foo(object):\n'
+ ' a = None\n'
+ ' """multiline\n'
+ ' docstring\n'
+ ' """\n'
+ ' b = None\n'
+ ' """\n'
+ ' docstring\n'
+ ' starts with::\n'
+ '\n'
+ ' empty line"""\n')
+ parser = Parser(source)
+ parser.parse()
+ assert parser.comments == {('Foo', 'a'): 'multiline\ndocstring',
+ ('Foo', 'b'): 'docstring\nstarts with::\n\n empty line'}
+
+
+def test_decorators():
+ source = ('@deco\n'
+ 'def func1(): pass\n'
+ '\n'
+ '@deco(param1, param2)\n'
+ 'def func2(): pass\n'
+ '\n'
+ '@deco1\n'
+ '@deco2\n'
+ 'def func3(): pass\n'
+ '\n'
+ '@deco\n'
+ 'class Foo():\n'
+ ' @deco1\n'
+ ' @deco2\n'
+ ' def method(self): pass\n')
+ parser = Parser(source)
+ parser.parse()
+ assert parser.definitions == {'func1': ('def', 1, 2),
+ 'func2': ('def', 4, 5),
+ 'func3': ('def', 7, 9),
+ 'Foo': ('class', 11, 15),
+ 'Foo.method': ('def', 13, 15)}
diff --git a/tests/test_quickstart.py b/tests/test_quickstart.py
index a4f12a551..b1b0fc535 100644
--- a/tests/test_quickstart.py
+++ b/tests/test_quickstart.py
@@ -16,10 +16,8 @@ from six import PY2, text_type, StringIO
from six.moves import input
import pytest
-from sphinx.testing.util import SkipTest
-
from sphinx import application
-from sphinx import quickstart as qs
+from sphinx.cmd import quickstart as qs
from sphinx.util.console import nocolor, coloron
from sphinx.util.pycompat import execfile_
@@ -63,27 +61,7 @@ def teardown_module():
coloron()
-def test_quickstart_inputstrip():
- d = {}
- answers = {
- 'Q1': 'Y',
- 'Q2': ' Yes ',
- 'Q3': 'N',
- 'Q4': 'N ',
- }
- qs.term_input = mock_input(answers)
- qs.do_prompt(d, 'k1', 'Q1')
- assert d['k1'] == 'Y'
- qs.do_prompt(d, 'k2', 'Q2')
- assert d['k2'] == 'Yes'
- qs.do_prompt(d, 'k3', 'Q3')
- assert d['k3'] == 'N'
- qs.do_prompt(d, 'k4', 'Q4')
- assert d['k4'] == 'N'
-
-
def test_do_prompt():
- d = {}
answers = {
'Q2': 'v2',
'Q3': 'v3',
@@ -92,39 +70,43 @@ def test_do_prompt():
'Q6': 'foo',
}
qs.term_input = mock_input(answers)
- try:
- qs.do_prompt(d, 'k1', 'Q1')
- except AssertionError:
- assert 'k1' not in d
- else:
- assert False, 'AssertionError not raised'
- qs.do_prompt(d, 'k1', 'Q1', default='v1')
- assert d['k1'] == 'v1'
- qs.do_prompt(d, 'k3', 'Q3', default='v3_default')
- assert d['k3'] == 'v3'
- qs.do_prompt(d, 'k2', 'Q2')
- assert d['k2'] == 'v2'
- qs.do_prompt(d, 'k4', 'Q4', validator=qs.boolean)
- assert d['k4'] is True
- qs.do_prompt(d, 'k5', 'Q5', validator=qs.boolean)
- assert d['k5'] is False
+
+ assert qs.do_prompt('Q1', default='v1') == 'v1'
+ assert qs.do_prompt('Q3', default='v3_default') == 'v3'
+ assert qs.do_prompt('Q2') == 'v2'
+ assert qs.do_prompt('Q4', validator=qs.boolean) is True
+ assert qs.do_prompt('Q5', validator=qs.boolean) is False
with pytest.raises(AssertionError):
- qs.do_prompt(d, 'k6', 'Q6', validator=qs.boolean)
+ qs.do_prompt('Q6', validator=qs.boolean)
+
+
+def test_do_prompt_inputstrip():
+ answers = {
+ 'Q1': 'Y',
+ 'Q2': ' Yes ',
+ 'Q3': 'N',
+ 'Q4': 'N ',
+ }
+ qs.term_input = mock_input(answers)
+
+ assert qs.do_prompt('Q1') == 'Y'
+ assert qs.do_prompt('Q2') == 'Yes'
+ assert qs.do_prompt('Q3') == 'N'
+ assert qs.do_prompt('Q4') == 'N'
def test_do_prompt_with_nonascii():
- d = {}
answers = {
'Q1': u'\u30c9\u30a4\u30c4',
}
qs.term_input = mock_input(answers)
try:
- qs.do_prompt(d, 'k1', 'Q1', default=u'\u65e5\u672c')
+ result = qs.do_prompt('Q1', default=u'\u65e5\u672c')
except UnicodeEncodeError:
- raise SkipTest(
+ raise pytest.skip.Exception(
'non-ASCII console input not supported on this encoding: %s',
qs.TERM_ENCODING)
- assert d['k1'] == u'\u30c9\u30a4\u30c4'
+ assert result == u'\u30c9\u30a4\u30c4'
def test_quickstart_defaults(tempdir):
@@ -151,7 +133,6 @@ def test_quickstart_defaults(tempdir):
assert ns['copyright'] == '%s, Georg Brandl' % time.strftime('%Y')
assert ns['version'] == '0.1'
assert ns['release'] == '0.1'
- assert ns['todo_include_todos'] is False
assert ns['html_static_path'] == ['_static']
assert ns['latex_documents'] == [
('index', 'SphinxTest.tex', 'Sphinx Test Documentation',
@@ -298,8 +279,7 @@ def test_default_filename(tempdir):
def test_extensions(tempdir):
- qs.main(['sphinx-quickstart', '-q',
- '-p', 'project_name', '-a', 'author',
+ qs.main(['-q', '-p', 'project_name', '-a', 'author',
'--extensions', 'foo,bar,baz', tempdir])
conffile = tempdir / 'conf.py'
diff --git a/tests/test_theming.py b/tests/test_theming.py
index 48a4e1865..dfe583918 100644
--- a/tests/test_theming.py
+++ b/tests/test_theming.py
@@ -49,8 +49,10 @@ def test_theme_api(app, status, warning):
theme.get_config('theme', 'foobar')
# options API
- with pytest.raises(ThemeError):
- theme.get_options({'nonexisting': 'foo'})
+
+ options = theme.get_options({'nonexisting': 'foo'})
+ assert 'nonexisting' not in options.keys()
+
options = theme.get_options(cfg.html_theme_options)
assert options['testopt'] == 'foo'
assert options['nosidebar'] == 'false'
@@ -110,3 +112,15 @@ def test_staticfiles(app, status, warning):
result = (app.outdir / 'index.html').text()
assert '<meta name="testopt" content="optdefault" />' in result
+
+
+@pytest.mark.sphinx(testroot='theming')
+def test_theme_sidebars(app, status, warning):
+ app.build()
+
+ # test-theme specifies globaltoc and searchbox as default sidebars
+ result = (app.outdir / 'index.html').text(encoding='utf8')
+ assert '<h3><a href="#">Table Of Contents</a></h3>' in result
+ assert '<h3>Related Topics</h3>' not in result
+ assert '<h3>This Page</h3>' not in result
+ assert '<h3>Quick search</h3>' in result
diff --git a/tests/test_util.py b/tests/test_util.py
index d55de7f5c..189e221b2 100644
--- a/tests/test_util.py
+++ b/tests/test_util.py
@@ -14,8 +14,7 @@ from mock import patch
from sphinx.util import logging
from sphinx.util import (
- display_chunk, encode_uri, parselinenos, split_docinfo, status_iterator,
- xmlname_checker
+ display_chunk, encode_uri, parselinenos, status_iterator, xmlname_checker
)
from sphinx.testing.util import strip_escseq
@@ -36,28 +35,6 @@ def test_encode_uri():
assert expected, encode_uri(uri)
-def test_splitdocinfo():
- source = "Hello world.\n"
- docinfo, content = split_docinfo(source)
- assert docinfo == ''
- assert content == 'Hello world.\n'
-
- source = ":orphan:\n\nHello world.\n"
- docinfo, content = split_docinfo(source)
- assert docinfo == ':orphan:\n'
- assert content == '\nHello world.\n'
-
- source = ":author: Georg Brandl\n:title: Manual of Sphinx\n\nHello world.\n"
- docinfo, content = split_docinfo(source)
- assert docinfo == ':author: Georg Brandl\n:title: Manual of Sphinx\n'
- assert content == '\nHello world.\n'
-
- source = ":multiline: one\n\ttwo\n\tthree\n\nHello world.\n"
- docinfo, content = split_docinfo(source)
- assert docinfo == ":multiline: one\n\ttwo\n\tthree\n"
- assert content == '\nHello world.\n'
-
-
def test_display_chunk():
assert display_chunk('hello') == 'hello'
assert display_chunk(['hello']) == 'hello'
@@ -118,7 +95,6 @@ def test_parselinenos():
parselinenos('3-1', 10)
-
def test_xmlname_check():
checker = xmlname_checker()
assert checker.match('id-pub')
diff --git a/tests/test_util_docstrings.py b/tests/test_util_docstrings.py
new file mode 100644
index 000000000..1bdda1021
--- /dev/null
+++ b/tests/test_util_docstrings.py
@@ -0,0 +1,65 @@
+# -*- coding: utf-8 -*-
+"""
+ test_util_docstrings
+ ~~~~~~~~~~~~~~~~~~~~
+
+ Test sphinx.util.docstrings.
+
+ :copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from sphinx.util.docstrings import prepare_docstring, prepare_commentdoc
+
+
+def test_prepare_docstring():
+ docstring = """multiline docstring
+
+ Lorem ipsum dolor sit amet, consectetur adipiscing elit,
+ sed do eiusmod tempor incididunt ut labore et dolore magna
+ aliqua::
+
+ Ut enim ad minim veniam, quis nostrud exercitation
+ ullamco laboris nisi ut aliquip ex ea commodo consequat.
+ """
+
+ assert (prepare_docstring(docstring) ==
+ ["multiline docstring",
+ "",
+ "Lorem ipsum dolor sit amet, consectetur adipiscing elit,",
+ "sed do eiusmod tempor incididunt ut labore et dolore magna",
+ "aliqua::",
+ "",
+ " Ut enim ad minim veniam, quis nostrud exercitation",
+ " ullamco laboris nisi ut aliquip ex ea commodo consequat.",
+ ""])
+ assert (prepare_docstring(docstring, 5) ==
+ ["multiline docstring",
+ "",
+ "Lorem ipsum dolor sit amet, consectetur adipiscing elit,",
+ "sed do eiusmod tempor incididunt ut labore et dolore magna",
+ "aliqua::",
+ "",
+ "Ut enim ad minim veniam, quis nostrud exercitation",
+ " ullamco laboris nisi ut aliquip ex ea commodo consequat.",
+ ""])
+
+ docstring = """
+
+ multiline docstring with leading empty lines
+ """
+ assert (prepare_docstring(docstring) ==
+ ["multiline docstring with leading empty lines",
+ ""])
+
+ docstring = "single line docstring"
+ assert (prepare_docstring(docstring) ==
+ ["single line docstring",
+ ""])
+
+
+def test_prepare_commentdoc():
+ assert prepare_commentdoc("hello world") == []
+ assert prepare_commentdoc("#: hello world") == ["hello world", ""]
+ assert prepare_commentdoc("#: hello world") == [" hello world", ""]
+ assert prepare_commentdoc("#: hello\n#: world\n") == ["hello", "world", ""]
diff --git a/tests/test_util_images.py b/tests/test_util_images.py
index a9c023b09..624690831 100644
--- a/tests/test_util_images.py
+++ b/tests/test_util_images.py
@@ -44,22 +44,22 @@ def test_guess_mimetype(testroot):
assert guess_mimetype('IMG.PNG') == 'image/png'
# guess by content
- assert guess_mimetype(content=(testroot/GIF_FILENAME).bytes()) == 'image/gif'
- assert guess_mimetype(content=(testroot/PNG_FILENAME).bytes()) == 'image/png'
- assert guess_mimetype(content=(testroot/PDF_FILENAME).bytes()) is None
- assert guess_mimetype(content=(testroot/TXT_FILENAME).bytes()) is None
- assert guess_mimetype(content=(testroot/TXT_FILENAME).bytes(),
+ assert guess_mimetype(content=(testroot / GIF_FILENAME).bytes()) == 'image/gif'
+ assert guess_mimetype(content=(testroot / PNG_FILENAME).bytes()) == 'image/png'
+ assert guess_mimetype(content=(testroot / PDF_FILENAME).bytes()) is None
+ assert guess_mimetype(content=(testroot / TXT_FILENAME).bytes()) is None
+ assert guess_mimetype(content=(testroot / TXT_FILENAME).bytes(),
default='text/plain') == 'text/plain'
# the priority of params: filename > content > default
assert guess_mimetype('img.png',
- content=(testroot/GIF_FILENAME).bytes(),
+ content=(testroot / GIF_FILENAME).bytes(),
default='text/plain') == 'image/png'
assert guess_mimetype('no_extension',
- content=(testroot/GIF_FILENAME).bytes(),
+ content=(testroot / GIF_FILENAME).bytes(),
default='text/plain') == 'image/gif'
assert guess_mimetype('no_extension',
- content=(testroot/TXT_FILENAME).bytes(),
+ content=(testroot / TXT_FILENAME).bytes(),
default='text/plain') == 'text/plain'
diff --git a/tests/test_util_inspect.py b/tests/test_util_inspect.py
index a463f4f6a..f0188cafa 100644
--- a/tests/test_util_inspect.py
+++ b/tests/test_util_inspect.py
@@ -8,8 +8,6 @@
:copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
-from unittest import TestCase
-
import sys
from six import PY3
import functools
@@ -19,134 +17,316 @@ import pytest
from sphinx.util import inspect
-class TestGetArgSpec(TestCase):
- def test_getargspec_builtin_type(self):
- with pytest.raises(TypeError):
- inspect.getargspec(int)
+def test_getargspec():
+ def func(a, b, c=1, d=2, *e, **f):
+ pass
+
+ spec = inspect.getargspec(func)
+ assert spec.args == ['a', 'b', 'c', 'd']
+ assert spec.varargs == 'e'
+ if PY3:
+ assert spec.varkw == 'f'
+ assert spec.defaults == (1, 2)
+ assert spec.kwonlyargs == []
+ assert spec.kwonlydefaults is None
+ assert spec.annotations == {}
+ else:
+ assert spec.keywords == 'f'
+ assert spec.defaults == [1, 2]
+
+
+def test_getargspec_partial():
+ def func1(a, b, c=1, d=2, *e, **f):
+ pass
+
+ partial = functools.partial(func1, 10, c=11)
+ spec = inspect.getargspec(partial)
+ if PY3:
+ assert spec.args == ['b']
+ assert spec.varargs is None
+ assert spec.varkw == 'f'
+ assert spec.defaults is None
+ assert spec.kwonlyargs == ['c', 'd']
+ assert spec.kwonlydefaults == {'c': 11, 'd': 2}
+ assert spec.annotations == {}
+ else:
+ assert spec.args == ['b', 'd']
+ assert spec.varargs == 'e'
+ assert spec.keywords == 'f'
+ assert spec.defaults == [2]
+
+
+def test_getargspec_partial2():
+ def fun(a, b, c=1, d=2):
+ pass
+ p = functools.partial(fun, 10, c=11)
+
+ if PY3:
+ # Python 3's partial is rather cleverer than Python 2's, and we
+ # have to jump through some hoops to define an equivalent function
+ # in a way that won't confuse Python 2's parser:
+ ns = {}
+ exec(dedent("""
+ def f_expected(b, *, c=11, d=2):
+ pass
+ """), ns)
+ f_expected = ns["f_expected"]
+ else:
+ def f_expected(b, d=2):
+ pass
+ expected = inspect.getargspec(f_expected)
+
+ assert expected == inspect.getargspec(p)
+
+
+def test_getargspec_builtin_type():
+ with pytest.raises(TypeError):
+ inspect.getargspec(int)
+
+
+def test_getargspec_bound_methods():
+ def f_expected_unbound(self, arg1, **kwargs):
+ pass
+ expected_unbound = inspect.getargspec(f_expected_unbound)
+
+ def f_expected_bound(arg1, **kwargs):
+ pass
+ expected_bound = inspect.getargspec(f_expected_bound)
+
+ class Foo:
+ def method(self, arg1, **kwargs):
+ pass
+
+ bound_method = Foo().method
+
+ @functools.wraps(bound_method)
+ def wrapped_bound_method(*args, **kwargs):
+ pass
+
+ assert expected_unbound == inspect.getargspec(Foo.method)
+ if PY3 and sys.version_info >= (3, 4, 4):
+ # On py2, the inspect functions don't properly handle bound
+ # methods (they include a spurious 'self' argument)
+ assert expected_bound == inspect.getargspec(bound_method)
+ # On py2, the inspect functions can't properly handle wrapped
+ # functions (no __wrapped__ support)
+ assert expected_bound == inspect.getargspec(wrapped_bound_method)
+
+
+def test_Signature():
+ # literals
+ with pytest.raises(TypeError):
+ inspect.Signature(1)
+
+ with pytest.raises(TypeError):
+ inspect.Signature('')
+
+ # builitin classes
+ with pytest.raises(TypeError):
+ inspect.Signature(int)
- def test_getargspec_partial(self):
- def fun(a, b, c=1, d=2):
+ with pytest.raises(TypeError):
+ inspect.Signature(str)
+
+ # normal function
+ def func(a, b, c=1, d=2, *e, **f):
+ pass
+
+ sig = inspect.Signature(func).format_args()
+ assert sig == '(a, b, c=1, d=2, *e, **f)'
+
+
+def test_Signature_partial():
+ def fun(a, b, c=1, d=2):
+ pass
+ p = functools.partial(fun, 10, c=11)
+
+ sig = inspect.Signature(p).format_args()
+ if sys.version_info < (3,):
+ assert sig == '(b, d=2)'
+ else:
+ assert sig == '(b, *, c=11, d=2)'
+
+
+def test_Signature_methods():
+ class Foo:
+ def meth1(self, arg1, **kwargs):
pass
- p = functools.partial(fun, 10, c=11)
-
- if PY3:
- # Python 3's partial is rather cleverer than Python 2's, and we
- # have to jump through some hoops to define an equivalent function
- # in a way that won't confuse Python 2's parser:
- ns = {}
- exec(dedent("""
- def f_expected(b, *, c=11, d=2):
- pass
- """), ns)
- f_expected = ns["f_expected"]
- else:
- def f_expected(b, d=2):
- pass
- expected = inspect.getargspec(f_expected)
-
- assert expected == inspect.getargspec(p)
-
- def test_getargspec_bound_methods(self):
- def f_expected_unbound(self, arg1, **kwargs):
+
+ @classmethod
+ def meth2(cls, arg1, *args, **kwargs):
pass
- expected_unbound = inspect.getargspec(f_expected_unbound)
- def f_expected_bound(arg1, **kwargs):
+ @staticmethod
+ def meth3(arg1, *args, **kwargs):
pass
- expected_bound = inspect.getargspec(f_expected_bound)
- class Foo:
- def method(self, arg1, **kwargs):
- pass
+ @functools.wraps(Foo().meth1)
+ def wrapped_bound_method(*args, **kwargs):
+ pass
- bound_method = Foo().method
+ # unbound method
+ sig = inspect.Signature(Foo.meth1).format_args()
+ assert sig == '(self, arg1, **kwargs)'
- @functools.wraps(bound_method)
- def wrapped_bound_method(*args, **kwargs):
- pass
+ sig = inspect.Signature(Foo.meth1, bound_method=True).format_args()
+ assert sig == '(arg1, **kwargs)'
+
+ # bound method
+ sig = inspect.Signature(Foo().meth1).format_args()
+ assert sig == '(arg1, **kwargs)'
+
+ # class method
+ sig = inspect.Signature(Foo.meth2).format_args()
+ assert sig == '(arg1, *args, **kwargs)'
+
+ sig = inspect.Signature(Foo().meth2).format_args()
+ assert sig == '(arg1, *args, **kwargs)'
+
+ # static method
+ sig = inspect.Signature(Foo.meth3).format_args()
+ assert sig == '(arg1, *args, **kwargs)'
+
+ sig = inspect.Signature(Foo().meth3).format_args()
+ assert sig == '(arg1, *args, **kwargs)'
+
+ # wrapped bound method
+ sig = inspect.Signature(wrapped_bound_method).format_args()
+ if sys.version_info < (3,):
+ assert sig == '(*args, **kwargs)'
+ elif sys.version_info < (3, 4, 4):
+ assert sig == '(self, arg1, **kwargs)'
+ else:
+ assert sig == '(arg1, **kwargs)'
+
+
+@pytest.mark.skipif(sys.version_info < (3, 5),
+ reason='type annotation test is available on py35 or above')
+def test_Signature_annotations():
+ from typing_test_data import f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11
+
+ # Class annotations
+ sig = inspect.Signature(f0).format_args()
+ assert sig == '(x: int, y: numbers.Integral) -> None'
+
+ # Generic types with concrete parameters
+ sig = inspect.Signature(f1).format_args()
+ assert sig == '(x: typing.List[int]) -> typing.List[int]'
+
+ # TypeVars and generic types with TypeVars
+ sig = inspect.Signature(f2).format_args()
+ assert sig == '(x: typing.List[T], y: typing.List[T_co], z: T) -> typing.List[T_contra]'
+
+ # Union types
+ sig = inspect.Signature(f3).format_args()
+ assert sig == '(x: typing.Union[str, numbers.Integral]) -> None'
+
+ # Quoted annotations
+ sig = inspect.Signature(f4).format_args()
+ assert sig == '(x: str, y: str) -> None'
+
+ # Keyword-only arguments
+ sig = inspect.Signature(f5).format_args()
+ assert sig == '(x: int, *, y: str, z: str) -> None'
+
+ # Keyword-only arguments with varargs
+ sig = inspect.Signature(f6).format_args()
+ assert sig == '(x: int, *args, y: str, z: str) -> None'
+
+ # Space around '=' for defaults
+ sig = inspect.Signature(f7).format_args()
+ assert sig == '(x: int = None, y: dict = {}) -> None'
+
+ # Callable types
+ sig = inspect.Signature(f8).format_args()
+ assert sig == '(x: typing.Callable[[int, str], int]) -> None'
+
+ sig = inspect.Signature(f9).format_args()
+ assert sig == '(x: typing.Callable) -> None'
+
+ # Tuple types
+ sig = inspect.Signature(f10).format_args()
+ assert sig == '(x: typing.Tuple[int, str], y: typing.Tuple[int, ...]) -> None'
+
+ # Instance annotations
+ sig = inspect.Signature(f11).format_args()
+ assert sig == '(x: CustomAnnotation, y: 123) -> None'
+
+
+def test_safe_getattr_with_default():
+ class Foo(object):
+ def __getattr__(self, item):
+ raise Exception
+
+ obj = Foo()
+
+ result = inspect.safe_getattr(obj, 'bar', 'baz')
+
+ assert result == 'baz'
+
+
+def test_safe_getattr_with_exception():
+ class Foo(object):
+ def __getattr__(self, item):
+ raise Exception
+
+ obj = Foo()
+
+ try:
+ inspect.safe_getattr(obj, 'bar')
+ except AttributeError as exc:
+ assert exc.args[0] == 'bar'
+ else:
+ pytest.fail('AttributeError not raised')
+
+
+def test_safe_getattr_with_property_exception():
+ class Foo(object):
+ @property
+ def bar(self):
+ raise Exception
+
+ obj = Foo()
+
+ try:
+ inspect.safe_getattr(obj, 'bar')
+ except AttributeError as exc:
+ assert exc.args[0] == 'bar'
+ else:
+ pytest.fail('AttributeError not raised')
+
+
+def test_safe_getattr_with___dict___override():
+ class Foo(object):
+ @property
+ def __dict__(self):
+ raise Exception
+
+ obj = Foo()
+
+ try:
+ inspect.safe_getattr(obj, 'bar')
+ except AttributeError as exc:
+ assert exc.args[0] == 'bar'
+ else:
+ pytest.fail('AttributeError not raised')
+
+
+def test_dictionary_sorting():
+ dictionary = {"c": 3, "a": 1, "d": 2, "b": 4}
+ description = inspect.object_description(dictionary)
+ assert description == "{'a': 1, 'b': 4, 'c': 3, 'd': 2}"
+
+
+def test_dict_customtype():
+ class CustomType(object):
+ def __init__(self, value):
+ self._value = value
+
+ def __repr__(self):
+ return "<CustomType(%r)>" % self._value
- assert expected_unbound == inspect.getargspec(Foo.method)
- if PY3 and sys.version_info >= (3, 4, 4):
- # On py2, the inspect functions don't properly handle bound
- # methods (they include a spurious 'self' argument)
- assert expected_bound == inspect.getargspec(bound_method)
- # On py2, the inspect functions can't properly handle wrapped
- # functions (no __wrapped__ support)
- assert expected_bound == inspect.getargspec(wrapped_bound_method)
-
-
-class TestSafeGetAttr(TestCase):
- def test_safe_getattr_with_default(self):
- class Foo(object):
- def __getattr__(self, item):
- raise Exception
-
- obj = Foo()
-
- result = inspect.safe_getattr(obj, 'bar', 'baz')
-
- assert result == 'baz'
-
- def test_safe_getattr_with_exception(self):
- class Foo(object):
- def __getattr__(self, item):
- raise Exception
-
- obj = Foo()
-
- try:
- inspect.safe_getattr(obj, 'bar')
- except AttributeError as exc:
- self.assertEqual(exc.args[0], 'bar')
- else:
- self.fail('AttributeError not raised')
-
- def test_safe_getattr_with_property_exception(self):
- class Foo(object):
- @property
- def bar(self):
- raise Exception
-
- obj = Foo()
-
- try:
- inspect.safe_getattr(obj, 'bar')
- except AttributeError as exc:
- self.assertEqual(exc.args[0], 'bar')
- else:
- self.fail('AttributeError not raised')
-
- def test_safe_getattr_with___dict___override(self):
- class Foo(object):
- @property
- def __dict__(self):
- raise Exception
-
- obj = Foo()
-
- try:
- inspect.safe_getattr(obj, 'bar')
- except AttributeError as exc:
- self.assertEqual(exc.args[0], 'bar')
- else:
- self.fail('AttributeError not raised')
-
-
-class TestObjectDescription(TestCase):
- def test_dictionary_sorting(self):
- dictionary = {"c": 3, "a": 1, "d": 2, "b": 4}
- description = inspect.object_description(dictionary)
- assert description == "{'a': 1, 'b': 4, 'c': 3, 'd': 2}"
-
- def test_dict_customtype(self):
- class CustomType(object):
- def __init__(self, value):
- self._value = value
-
- def __repr__(self):
- return "<CustomType(%r)>" % self._value
-
- dictionary = {CustomType(2): 2, CustomType(1): 1}
- description = inspect.object_description(dictionary)
- # Type is unsortable, just check that it does not crash
- assert "<CustomType(2)>: 2" in description
+ dictionary = {CustomType(2): 2, CustomType(1): 1}
+ description = inspect.object_description(dictionary)
+ # Type is unsortable, just check that it does not crash
+ assert "<CustomType(2)>: 2" in description
diff --git a/tests/test_util_inventory.py b/tests/test_util_inventory.py
index 1a5be431b..3829de9ef 100644
--- a/tests/test_util_inventory.py
+++ b/tests/test_util_inventory.py
@@ -38,6 +38,8 @@ std cpp:type 1 index.html#std -
std::uint8_t cpp:type 1 index.html#std_uint8_t -
foo::Bar cpp:class 1 index.html#cpp_foo_bar -
foo::Bar::baz cpp:function 1 index.html#cpp_foo_bar_baz -
+foons cpp:type 1 index.html#foons -
+foons::bartype cpp:type 1 index.html#foons_bartype -
a term std:term -1 glossary.html#term-a-term -
ls.-l std:cmdoption 1 index.html#cmdoption-ls-l -
docname std:doc -1 docname.html -
diff --git a/tests/test_versioning.py b/tests/test_versioning.py
index 7956b6710..e17d250e5 100644
--- a/tests/test_versioning.py
+++ b/tests/test_versioning.py
@@ -28,7 +28,7 @@ def setup_module(rootdir, sphinx_test_tempdir):
global app, original, original_uids
srcdir = sphinx_test_tempdir / 'test-versioning'
if not srcdir.exists():
- (rootdir/'test-versioning').copytree(srcdir)
+ (rootdir / 'test-versioning').copytree(srcdir)
app = SphinxTestApp(srcdir=srcdir)
app.builder.env.app = app
app.connect('doctree-resolved', on_doctree_resolved)
diff --git a/tox.ini b/tox.ini
index 530e5b941..ae6b2a4b3 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,64 +1,65 @@
[tox]
-envlist=flake8,py27,py34,py35,py36,pypy,du13,du12,du11
+minversion = 2.0
+envlist = docs,flake8,mypy,coverage,py{27,34,35,36,py},du{11,12,13,14}
+skipsdist = True
[testenv]
-deps=
- six
- pytest
- html5lib
- mock
- enum34
- typing
+passenv =
+ https_proxy http_proxy no_proxy PERL PERL5LIB
+description =
+ py{27,34,35,36,py}: Run unit tests against {envname}.
+ du{11,12,13,14}: Run unit tests with the given version of docutils.
+
+# TODO(stephenfin) Replace this with the 'extras' config option when tox 2.4 is
+# widely available, likely some time after the Ubuntu 18.04 release
+#
+# https://tox.readthedocs.io/en/latest/config.html#confval-extras=MULTI-LINE-LIST
+deps =
+ .[test,websupport]
+ du11: docutils==0.11
+ du12: docutils==0.12
+ du13: docutils==0.13.1
+ du14: docutils==0.14
setenv =
+ PYTHONWARNINGS = all,ignore::ImportWarning:pkgutil
SPHINX_TEST_TEMPDIR = {envdir}/testbuild
- PYTHONDONTWRITEBYTECODE = true
commands=
- {envpython} -Wall tests/run.py --ignore tests/py35 {posargs}
- sphinx-build -q -W -b html -d {envtmpdir}/doctrees doc {envtmpdir}/html
-
-[testenv:pypy]
-deps=
- simplejson
- {[testenv]deps}
-
-[testenv:du11]
-deps=
- docutils==0.11
- {[testenv]deps}
-
-[testenv:du12]
-deps=
- docutils==0.12
- {[testenv]deps}
-
-[testenv:du13]
-deps=
- docutils==0.13.1
- {[testenv]deps}
+ pytest -Wall --durations 25 {posargs}
[testenv:flake8]
-deps=flake8
-commands=flake8
+description =
+ Run style checks.
+commands =
+ flake8
-[testenv:py27]
-deps=
+[testenv:pylint]
+description =
+ Run source code analyzer.
+deps =
+ pylint
{[testenv]deps}
+commands =
+ pylint --rcfile utils/pylintrc sphinx
-[testenv:py35]
-deps=
- mypy
- typed_ast
- {[testenv]deps}
-commands=
- {envpython} -Wall tests/run.py {posargs}
- sphinx-build -q -W -b html -d {envtmpdir}/doctrees doc {envtmpdir}/html
+[testenv:coverage]
+description =
+ Run code coverage checks.
+setenv =
+ PYTEST_ADDOPTS = --cov sphinx --cov-config {toxinidir}/setup.cfg
+commands =
+ {[testenv]commands}
+ coverage report
[testenv:mypy]
-deps=
+description =
+ Run type checks.
+deps =
mypy
commands=
mypy sphinx/
[testenv:docs]
-commands=
- python setup.py build_sphinx
+description =
+ Build documentation.
+commands =
+ python setup.py build_sphinx {posargs}
diff --git a/utils/__init__.py b/utils/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/utils/__init__.py
diff --git a/utils/check_sources.py b/utils/check_sources.py
deleted file mode 100755
index 30f1fb93c..000000000
--- a/utils/check_sources.py
+++ /dev/null
@@ -1,259 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-"""
- Checker for file headers
- ~~~~~~~~~~~~~~~~~~~~~~~~
-
- Make sure each Python file has a correct file header
- including copyright and license information.
-
- :copyright: Copyright 2007-2018 by the Sphinx team, see AUTHORS.
- :license: BSD, see LICENSE for details.
-"""
-from __future__ import print_function
-
-import os
-import re
-import sys
-from optparse import OptionParser
-from os.path import join, splitext, abspath
-
-
-checkers = {}
-
-
-def checker(*suffixes, **kwds):
- only_pkg = kwds.pop('only_pkg', False)
-
- def deco(func):
- for suffix in suffixes:
- checkers.setdefault(suffix, []).append(func)
- func.only_pkg = only_pkg
- return func
- return deco
-
-
-# this one is a byte regex since it is applied before decoding
-coding_re = re.compile(br'coding[:=]\s*([-\w.]+)')
-
-uni_coding_re = re.compile(r'^#.*coding[:=]\s*([-\w.]+).*')
-name_mail_re = r'[\w ]+(<.*?>)?'
-copyright_re = re.compile(r'^ :copyright: Copyright 200\d(-20\d\d)? '
- r'by %s(, %s)*[,.]$' %
- (name_mail_re, name_mail_re))
-license_re = re.compile(r" :license: (.*?).\n")
-copyright_2_re = re.compile(r'^ %s(, %s)*[,.]$' %
- (name_mail_re, name_mail_re))
-not_ix_re = re.compile(r'\bnot\s+\S+?\s+i[sn]\s\S+')
-is_const_re = re.compile(r'if.*?==\s+(None|False|True)\b')
-noqa_re = re.compile(r'#\s+NOQA\s*$', re.I)
-
-misspellings = ["developement", "adress", # ALLOW-MISSPELLING
- "verificate", "informations"] # ALLOW-MISSPELLING
-
-
-def decode_source(fn, lines):
- encoding = 'ascii' if fn.endswith('.py') else 'utf-8'
- decoded_lines = []
- for lno, line in enumerate(lines):
- if lno < 2:
- co = coding_re.search(line)
- if co:
- encoding = co.group(1).decode()
- try:
- decoded_lines.append(line.decode(encoding))
- except UnicodeDecodeError as err:
- raise UnicodeError("%s:%d: not decodable: %s\n Line: %r" %
- (fn, lno + 1, err, line))
- except LookupError as err:
- raise LookupError("unknown encoding: %s" % encoding)
- return decoded_lines
-
-
-@checker('.py')
-def check_syntax(fn, lines):
- lines = [uni_coding_re.sub('', line) for line in lines]
- try:
- compile(''.join(lines), fn, "exec")
- except SyntaxError as err:
- yield 0, "not compilable: %s" % err
-
-
-@checker('.py')
-def check_style(fn, lines):
- for lno, line in enumerate(lines):
- if noqa_re.search(line):
- continue
- if len(line.rstrip('\n')) > 95:
- yield lno + 1, "line too long"
- if line.strip().startswith('#'):
- continue
- # m = not_ix_re.search(line)
- # if m:
- # yield lno+1, '"' + m.group() + '"'
- if is_const_re.search(line):
- yield lno + 1, 'using == None/True/False'
-
-
-@checker('.py', only_pkg=True)
-def check_fileheader(fn, lines):
- # line number correction
- c = 1
- if lines[0:1] == ['#!/usr/bin/env python\n']:
- lines = lines[1:]
- c = 2
-
- llist = []
- docopen = False
- for lno, l in enumerate(lines):
- llist.append(l)
- if lno == 0:
- if l != '# -*- coding: utf-8 -*-\n':
- yield 1, "missing coding declaration"
- elif lno == 1:
- if l != '"""\n' and l != 'r"""\n':
- yield 2, 'missing docstring begin (""")'
- else:
- docopen = True
- elif docopen:
- if l == '"""\n':
- # end of docstring
- if lno <= 4:
- yield lno + c, "missing module name in docstring"
- break
-
- if l != '\n' and l[:4] != ' ' and docopen:
- yield lno + c, "missing correct docstring indentation"
-
- if lno == 2:
- # if not in package, don't check the module name
- modname = fn[:-3].replace('/', '.').replace('.__init__', '')
- while modname:
- if l.lower()[4:-1] == modname:
- break
- modname = '.'.join(modname.split('.')[1:])
- else:
- yield 3, "wrong module name in docstring heading"
- modnamelen = len(l.strip())
- elif lno == 3:
- if l.strip() != modnamelen * '~':
- yield 4, "wrong module name underline, should be ~~~...~"
-
- else:
- yield 0, "missing end and/or start of docstring..."
-
- # check for copyright and license fields
- license = llist[-2:-1]
- if not license or not license_re.match(license[0]):
- yield 0, "no correct license info"
-
- ci = -3
- copyright = llist[ci:ci + 1]
- while copyright and copyright_2_re.match(copyright[0]):
- ci -= 1
- copyright = llist[ci:ci + 1]
- if not copyright or not copyright_re.match(copyright[0]):
- yield 0, "no correct copyright info"
-
-
-@checker('.py', '.html', '.rst')
-def check_whitespace_and_spelling(fn, lines):
- for lno, line in enumerate(lines):
- if '\t' in line:
- yield lno + 1, "OMG TABS!!!1 "
- if line[:-1].rstrip(' \t') != line[:-1]:
- yield lno + 1, "trailing whitespace"
- for word in misspellings:
- if word in line and 'ALLOW-MISSPELLING' not in line:
- yield lno + 1, '"%s" used' % word
-
-
-bad_tags = ['<u>', '<s>', '<strike>', '<center>', '<font']
-
-
-@checker('.html')
-def check_xhtml(fn, lines):
- for lno, line in enumerate(lines):
- for bad_tag in bad_tags:
- if bad_tag in line:
- yield lno + 1, "used " + bad_tag
-
-
-def main(argv):
- parser = OptionParser(usage='Usage: %prog [-v] [-i ignorepath]* [path]')
- parser.add_option('-v', '--verbose', dest='verbose', default=False,
- action='store_true')
- parser.add_option('-i', '--ignore-path', dest='ignored_paths',
- default=[], action='append')
- options, args = parser.parse_args(argv[1:])
-
- if len(args) == 0:
- path = '.'
- elif len(args) == 1:
- path = args[0]
- else:
- print(args)
- parser.error('No more then one path supported')
-
- verbose = options.verbose
- ignored_paths = set(abspath(p) for p in options.ignored_paths)
-
- num = 0
-
- for root, dirs, files in os.walk(path):
- for vcs_dir in ['.svn', '.hg', '.git']:
- if vcs_dir in dirs:
- dirs.remove(vcs_dir)
- if abspath(root) in ignored_paths:
- del dirs[:]
- continue
- in_check_pkg = root.startswith('./sphinx')
- for fn in files:
-
- fn = join(root, fn)
- if fn[:2] == './':
- fn = fn[2:]
-
- if abspath(fn) in ignored_paths:
- continue
-
- ext = splitext(fn)[1]
- checkerlist = checkers.get(ext, None)
- if not checkerlist:
- continue
-
- if verbose:
- print("Checking %s..." % fn)
-
- try:
- with open(fn, 'rb') as f:
- lines = list(f)
- except (IOError, OSError) as err:
- print("%s: cannot open: %s" % (fn, err))
- num += 1
- continue
-
- try:
- lines = decode_source(fn, lines)
- except Exception as err:
- print(err)
- num += 1
- continue
-
- for checker in checkerlist:
- if not in_check_pkg and checker.only_pkg:
- continue
- for lno, msg in checker(fn, lines):
- print("%s:%d: %s" % (fn, lno, msg))
- num += 1
- if verbose:
- print()
- if num == 0:
- print("No errors found.")
- else:
- print("%d error%s found." % (num, num > 1 and "s" or ""))
- return int(num > 0)
-
-
-if __name__ == '__main__':
- sys.exit(main(sys.argv))
diff --git a/utils/checks.py b/utils/checks.py
new file mode 100644
index 000000000..03104d78a
--- /dev/null
+++ b/utils/checks.py
@@ -0,0 +1,111 @@
+# -*- coding: utf-8 -*-
+"""
+ utils.checks
+ ~~~~~~~~~~~~
+
+ Custom, Sphinx-only flake8 plugins.
+
+ :copyright: Copyright 2007-2017 by the Sphinx team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import os
+import re
+import sphinx
+
+name_mail_re = r'[\w ]+(<.*?>)?'
+copyright_re = re.compile(r'^ :copyright: Copyright 200\d(-20\d\d)? '
+ r'by %s(, %s)*[,.]$' % (name_mail_re, name_mail_re))
+copyright_2_re = re.compile(r'^ %s(, %s)*[,.]$' %
+ (name_mail_re, name_mail_re))
+license_re = re.compile(r' :license: (.*?).\n')
+
+
+def flake8ext(_func):
+ """Decorate flake8_asserts functions"""
+ _func.name = _func.__name__
+ _func.version = sphinx.__version__
+ _func.code = _func.__name__.upper()
+
+ return _func
+
+
+@flake8ext
+def sphinx_has_header(physical_line, filename, lines, line_number):
+ """Check for correct headers.
+
+ Make sure each Python file has a correct file header including
+ copyright and license information.
+
+ X101 invalid header found
+ """
+ # we have a state machine of sorts so we need to start on line 1. Also,
+ # there's no point checking really short files
+ if line_number != 1 or len(lines) < 10:
+ return
+
+ # this file uses a funky license but unfortunately it's not possible to
+ # ignore specific errors on a file-level basis yet [1]. Simply skip it.
+ #
+ # [1] https://gitlab.com/pycqa/flake8/issues/347
+ if os.path.samefile(filename, './sphinx/util/smartypants.py'):
+ return
+
+ # if the top-level package or not inside the package, ignore
+ mod_name = os.path.splitext(filename)[0].strip('./\\').replace(
+ '/', '.').replace('.__init__', '')
+ if mod_name == 'sphinx' or not mod_name.startswith('sphinx.'):
+ return
+
+ # line number correction
+ offset = 1
+ if lines[0:1] == ['#!/usr/bin/env python\n']:
+ lines = lines[1:]
+ offset = 2
+
+ llist = []
+ doc_open = False
+
+ for lno, line in enumerate(lines):
+ llist.append(line)
+ if lno == 0:
+ if line != '# -*- coding: utf-8 -*-\n':
+ return 0, 'X101 missing coding declaration'
+ elif lno == 1:
+ if line != '"""\n' and line != 'r"""\n':
+ return 0, 'X101 missing docstring begin (""")'
+ else:
+ doc_open = True
+ elif doc_open:
+ if line == '"""\n':
+ # end of docstring
+ if lno <= 4:
+ return 0, 'X101 missing module name in docstring'
+ break
+
+ if line != '\n' and line[:4] != ' ' and doc_open:
+ return 0, 'X101 missing correct docstring indentation'
+
+ if lno == 2:
+ mod_name_len = len(line.strip())
+ if line.strip() != mod_name:
+ return 4, 'X101 wrong module name in docstring heading'
+ elif lno == 3:
+ if line.strip() != mod_name_len * '~':
+ return (4, 'X101 wrong module name underline, should be '
+ '~~~...~')
+ else:
+ return 0, 'X101 missing end and/or start of docstring...'
+
+ # check for copyright and license fields
+ license = llist[-2:-1]
+ if not license or not license_re.match(license[0]):
+ return 0, 'X101 no correct license info'
+
+ offset = -3
+ copyright = llist[offset:offset + 1]
+ while copyright and copyright_2_re.match(copyright[0]):
+ offset -= 1
+ copyright = llist[offset:offset + 1]
+ if not copyright or not copyright_re.match(copyright[0]):
+ return 0, 'X101 no correct copyright info'
diff --git a/utils/jssplitter_generator.py b/utils/jssplitter_generator.py
index 073b7c7ae..b7273a5c1 100644
--- a/utils/jssplitter_generator.py
+++ b/utils/jssplitter_generator.py
@@ -5,9 +5,9 @@ import subprocess
import sys
import six
-# find char codes they are matched with Python's \\w(?u)
+# find char codes they are matched with Python's (?u)\\w
-match = re.compile(r'\w(?u)')
+match = re.compile(r'(?u)\w')
begin = -1
ranges = []
diff --git a/utils/reindent.py b/utils/reindent.py
deleted file mode 100755
index b79657636..000000000
--- a/utils/reindent.py
+++ /dev/null
@@ -1,320 +0,0 @@
-#! /usr/bin/env python
-
-# Released to the public domain, by Tim Peters, 03 October 2000.
-
-"""reindent [-d][-r][-v] [ path ... ]
-
--d (--dryrun) Dry run. Analyze, but don't make any changes to, files.
--r (--recurse) Recurse. Search for all .py files in subdirectories too.
--n (--nobackup) No backup. Does not make a ".bak" file before reindenting.
--v (--verbose) Verbose. Print informative msgs; else no output.
--h (--help) Help. Print this usage information and exit.
-
-Change Python (.py) files to use 4-space indents and no hard tab characters.
-Also trim excess spaces and tabs from ends of lines, and remove empty lines
-at the end of files. Also ensure the last line ends with a newline.
-
-If no paths are given on the command line, reindent operates as a filter,
-reading a single source file from standard input and writing the transformed
-source to standard output. In this case, the -d, -r and -v flags are
-ignored.
-
-You can pass one or more file and/or directory paths. When a directory
-path, all .py files within the directory will be examined, and, if the -r
-option is given, likewise recursively for subdirectories.
-
-If output is not to standard output, reindent overwrites files in place,
-renaming the originals with a .bak extension. If it finds nothing to
-change, the file is left alone. If reindent does change a file, the changed
-file is a fixed-point for future runs (i.e., running reindent on the
-resulting .py file won't change it again).
-
-The hard part of reindenting is figuring out what to do with comment
-lines. So long as the input files get a clean bill of health from
-tabnanny.py, reindent should do a good job.
-
-The backup file is a copy of the one that is being reindented. The ".bak"
-file is generated with shutil.copy(), but some corner cases regarding
-user/group and permissions could leave the backup file more readable that
-you'd prefer. You can always use the --nobackup option to prevent this.
-"""
-from __future__ import print_function
-
-import os
-import sys
-import shutil
-import tokenize
-from six.ranges import range
-
-__version__ = "1"
-
-if sys.version_info >= (3, 0):
- def tokens(readline, tokeneater):
- for token in tokenize.tokenize(readline):
- yield tokeneater(*token)
-else:
- tokens = tokenize.tokenize
-
-verbose = 0
-recurse = 0
-dryrun = 0
-makebackup = True
-
-
-def usage(msg=None):
- if msg is not None:
- print(msg, file=sys.stderr)
- print(__doc__, file=sys.stderr)
-
-
-def errprint(*args):
- sep = ""
- for arg in args:
- sys.stderr.write(sep + str(arg))
- sep = " "
- sys.stderr.write("\n")
-
-
-def main():
- import getopt
- global verbose, recurse, dryrun, makebackup
- try:
- opts, args = getopt.getopt(sys.argv[1:], "drnvh",
- ["dryrun", "recurse", "nobackup", "verbose", "help"])
- except getopt.error as msg:
- usage(msg)
- return
- for o, a in opts:
- if o in ('-d', '--dryrun'):
- dryrun += 1
- elif o in ('-r', '--recurse'):
- recurse += 1
- elif o in ('-n', '--nobackup'):
- makebackup = False
- elif o in ('-v', '--verbose'):
- verbose += 1
- elif o in ('-h', '--help'):
- usage()
- return
- if not args:
- r = Reindenter(sys.stdin)
- r.run()
- r.write(sys.stdout)
- return
- for arg in args:
- check(arg)
-
-
-def check(file):
- if os.path.isdir(file) and not os.path.islink(file):
- if verbose:
- print("listing directory", file)
- names = os.listdir(file)
- for name in names:
- fullname = os.path.join(file, name)
- if ((recurse and os.path.isdir(fullname) and
- not os.path.islink(fullname) and
- not os.path.split(fullname)[1].startswith(".")) or
- name.lower().endswith(".py")):
- check(fullname)
- return
-
- if verbose:
- print("checking", file, "...", end=' ')
- try:
- f = open(file)
- except IOError as msg:
- errprint("%s: I/O Error: %s" % (file, str(msg)))
- return
-
- with f:
- r = Reindenter(f)
- if r.run():
- if verbose:
- print("changed.")
- if dryrun:
- print("But this is a dry run, so leaving it alone.")
- if not dryrun:
- bak = file + ".bak"
- if makebackup:
- shutil.copyfile(file, bak)
- if verbose:
- print("backed up", file, "to", bak)
- with open(file, "w") as f:
- r.write(f)
- if verbose:
- print("wrote new", file)
- return True
- else:
- if verbose:
- print("unchanged.")
- return False
-
-
-def _rstrip(line, JUNK='\n \t'):
- """Return line stripped of trailing spaces, tabs, newlines.
-
- Note that line.rstrip() instead also strips sundry control characters,
- but at least one known Emacs user expects to keep junk like that, not
- mentioning Barry by name or anything <wink>.
- """
-
- i = len(line)
- while i > 0 and line[i - 1] in JUNK:
- i -= 1
- return line[:i]
-
-
-class Reindenter:
- def __init__(self, f):
- self.find_stmt = 1 # next token begins a fresh stmt?
- self.level = 0 # current indent level
-
- # Raw file lines.
- self.raw = f.readlines()
-
- # File lines, rstripped & tab-expanded. Dummy at start is so
- # that we can use tokenize's 1-based line numbering easily.
- # Note that a line is all-blank iff it's "\n".
- self.lines = [_rstrip(line).expandtabs() + "\n"
- for line in self.raw]
- self.lines.insert(0, None)
- self.index = 1 # index into self.lines of next line
-
- # List of (lineno, indentlevel) pairs, one for each stmt and
- # comment line. indentlevel is -1 for comment lines, as a
- # signal that tokenize doesn't know what to do about them;
- # indeed, they're our headache!
- self.stats = []
-
- def run(self):
- tokens(self.getline, self.tokeneater)
- # Remove trailing empty lines.
- lines = self.lines
- while lines and lines[-1] == "\n":
- lines.pop()
- # Sentinel.
- stats = self.stats
- stats.append((len(lines), 0))
- # Map count of leading spaces to # we want.
- have2want = {}
- # Program after transformation.
- after = self.after = []
- # Copy over initial empty lines -- there's nothing to do until
- # we see a line with *something* on it.
- i = stats[0][0]
- after.extend(lines[1:i])
- for i in range(len(stats) - 1):
- thisstmt, thislevel = stats[i]
- nextstmt = stats[i + 1][0]
- have = getlspace(lines[thisstmt])
- want = thislevel * 4
- if want < 0:
- # A comment line.
- if have:
- # An indented comment line. If we saw the same
- # indentation before, reuse what it most recently
- # mapped to.
- want = have2want.get(have, -1)
- if want < 0:
- # Then it probably belongs to the next real stmt.
- for j in range(i + 1, len(stats) - 1):
- jline, jlevel = stats[j]
- if jlevel >= 0:
- if have == getlspace(lines[jline]):
- want = jlevel * 4
- break
- if want < 0: # Maybe it's a hanging
- # comment like this one,
- # in which case we should shift it like its base
- # line got shifted.
- for j in range(i - 1, -1, -1):
- jline, jlevel = stats[j]
- if jlevel >= 0:
- want = (have + getlspace(after[jline - 1]) -
- getlspace(lines[jline]))
- break
- if want < 0:
- # Still no luck -- leave it alone.
- want = have
- else:
- want = 0
- assert want >= 0
- have2want[have] = want
- diff = want - have
- if diff == 0 or have == 0:
- after.extend(lines[thisstmt:nextstmt])
- else:
- for line in lines[thisstmt:nextstmt]:
- if diff > 0:
- if line == "\n":
- after.append(line)
- else:
- after.append(" " * diff + line)
- else:
- remove = min(getlspace(line), -diff)
- after.append(line[remove:])
- return self.raw != self.after
-
- def write(self, f):
- f.writelines(self.after)
-
- # Line-getter for tokenize.
- def getline(self):
- if self.index >= len(self.lines):
- line = ""
- else:
- line = self.lines[self.index]
- self.index += 1
- return line
-
- # Line-eater for tokenize.
- def tokeneater(self, type, token, position, end, line,
- INDENT=tokenize.INDENT,
- DEDENT=tokenize.DEDENT,
- NEWLINE=tokenize.NEWLINE,
- COMMENT=tokenize.COMMENT,
- NL=tokenize.NL):
-
- if type == NEWLINE:
- # A program statement, or ENDMARKER, will eventually follow,
- # after some (possibly empty) run of tokens of the form
- # (NL | COMMENT)* (INDENT | DEDENT+)?
- self.find_stmt = 1
-
- elif type == INDENT:
- self.find_stmt = 1
- self.level += 1
-
- elif type == DEDENT:
- self.find_stmt = 1
- self.level -= 1
-
- elif type == COMMENT:
- if self.find_stmt:
- self.stats.append((position[0], -1))
- # but we're still looking for a new stmt, so leave
- # find_stmt alone
-
- elif type == NL:
- pass
-
- elif self.find_stmt:
- # This is the first "real token" following a NEWLINE, so it
- # must be the first token of the next program statement, or an
- # ENDMARKER.
- self.find_stmt = 0
- if line: # not endmarker
- self.stats.append((position[0], self.level))
-
-
-# Count number of leading blanks.
-def getlspace(line):
- i, n = 0, len(line)
- while i < n and line[i] == " ":
- i += 1
- return i
-
-
-if __name__ == '__main__':
- main()
diff --git a/utils/release-checklist b/utils/release-checklist
index 751be382b..f18c20710 100644
--- a/utils/release-checklist
+++ b/utils/release-checklist
@@ -20,7 +20,6 @@ Release checklist
* Check diff by `git diff`
* `git commit -am 'Bump to x.y.z final'`
* `make clean`
-* `python setup.py compile_grammar`
* `python setup.py release bdist_wheel sdist upload --identity=[your key]`
* open https://pypi.python.org/pypi/Sphinx and check there are no obvious errors
* `git tag x.y.z` with version number