summaryrefslogtreecommitdiff
path: root/doc
diff options
context:
space:
mode:
Diffstat (limited to 'doc')
-rw-r--r--doc/DISTUTILS.rst.txt65
-rw-r--r--doc/HOWTO_BUILD_DOCS.rst.txt37
-rw-r--r--doc/HOWTO_DOCUMENT.rst.txt32
-rw-r--r--doc/HOWTO_RELEASE.rst.txt5
-rw-r--r--doc/Makefile29
-rw-r--r--doc/TESTS.rst.txt10
-rwxr-xr-xdoc/cdoc/numpyfilter.py10
-rwxr-xr-xdoc/cython/run_test.py2
-rwxr-xr-xdoc/cython/setup.py2
-rw-r--r--doc/example.py14
-rw-r--r--doc/neps/missing-data.rst2
-rw-r--r--doc/newdtype_example/example.py6
-rw-r--r--doc/newdtype_example/floatint/__init__.py2
-rw-r--r--doc/newdtype_example/setup.py2
-rw-r--r--doc/numpybook/comparison/ctypes/filter.py2
-rw-r--r--doc/numpybook/comparison/ctypes/interface.py2
-rw-r--r--doc/numpybook/comparison/pyrex/setup.py2
-rw-r--r--doc/numpybook/comparison/timing.py6
-rw-r--r--doc/numpybook/comparison/weave/filter.py2
-rw-r--r--doc/numpybook/comparison/weave/inline.py6
-rw-r--r--doc/numpybook/runcode.py20
-rwxr-xr-xdoc/postprocess.py2
-rwxr-xr-xdoc/pyrex/run_test.py2
-rw-r--r--doc/pyrex/setup.py2
-rw-r--r--doc/release/1.3.0-notes.rst5
-rw-r--r--doc/release/1.4.0-notes.rst3
-rw-r--r--doc/release/1.5.0-notes.rst25
-rw-r--r--doc/release/1.6.0-notes.rst31
-rw-r--r--doc/release/1.6.1-notes.rst8
-rw-r--r--doc/release/1.6.2-notes.rst76
-rw-r--r--doc/release/1.7.0-notes.rst33
-rw-r--r--doc/release/1.7.1-notes.rst25
-rw-r--r--doc/release/1.8.0-notes.rst217
m---------doc/scipy-sphinx-theme0
-rw-r--r--doc/source/_static/scipy.css189
-rw-r--r--doc/source/_templates/indexcontent.html2
-rw-r--r--doc/source/_templates/indexsidebar.html1
-rw-r--r--doc/source/_templates/layout.html5
-rw-r--r--doc/source/about.rst4
-rw-r--r--doc/source/bugs.rst20
-rw-r--r--doc/source/conf.py85
-rw-r--r--doc/source/reference/arrays.classes.rst2
-rw-r--r--doc/source/reference/arrays.indexing.rst4
-rw-r--r--doc/source/reference/arrays.ndarray.rst44
-rw-r--r--doc/source/reference/c-api.array.rst44
-rw-r--r--doc/source/reference/c-api.iterator.rst11
-rw-r--r--doc/source/reference/c-api.types-and-structures.rst10
-rw-r--r--doc/source/reference/c-api.ufunc.rst10
-rw-r--r--doc/source/reference/routines.linalg.rst18
-rw-r--r--doc/source/reference/routines.math.rst2
-rw-r--r--doc/source/reference/routines.polynomials.classes.rst34
-rw-r--r--doc/source/reference/ufuncs.rst10
-rw-r--r--doc/source/release.rst13
-rw-r--r--doc/source/scipyshiny_small.pngbin18991 -> 0 bytes
-rw-r--r--doc/source/user/c-info.ufunc-tutorial.rst165
m---------doc/sphinxext0
-rw-r--r--doc/sphinxext/LICENSE.txt94
-rw-r--r--doc/sphinxext/MANIFEST.in2
-rw-r--r--doc/sphinxext/README.txt45
-rw-r--r--doc/sphinxext/numpydoc/__init__.py1
-rw-r--r--doc/sphinxext/numpydoc/comment_eater.py169
-rw-r--r--doc/sphinxext/numpydoc/compiler_unparse.py865
-rw-r--r--doc/sphinxext/numpydoc/docscrape.py525
-rw-r--r--doc/sphinxext/numpydoc/docscrape_sphinx.py239
-rw-r--r--doc/sphinxext/numpydoc/linkcode.py83
-rw-r--r--doc/sphinxext/numpydoc/numpydoc.py178
-rw-r--r--doc/sphinxext/numpydoc/phantom_import.py167
-rw-r--r--doc/sphinxext/numpydoc/plot_directive.py642
-rw-r--r--doc/sphinxext/numpydoc/tests/test_docscrape.py749
-rw-r--r--doc/sphinxext/numpydoc/tests/test_linkcode.py3
-rw-r--r--doc/sphinxext/numpydoc/tests/test_phantom_import.py3
-rw-r--r--doc/sphinxext/numpydoc/tests/test_plot_directive.py3
-rw-r--r--doc/sphinxext/numpydoc/tests/test_traitsdoc.py3
-rw-r--r--doc/sphinxext/numpydoc/traitsdoc.py141
-rw-r--r--doc/sphinxext/setup.py31
-rwxr-xr-xdoc/summarize.py28
-rwxr-xr-xdoc/swig/test/setup.py2
-rwxr-xr-xdoc/swig/test/testArray.py8
-rwxr-xr-xdoc/swig/test/testFarray.py8
-rw-r--r--doc/swig/test/testFortran.py12
-rwxr-xr-xdoc/swig/test/testMatrix.py62
-rwxr-xr-xdoc/swig/test/testTensor.py62
-rwxr-xr-xdoc/swig/test/testVector.py70
83 files changed, 927 insertions, 4628 deletions
diff --git a/doc/DISTUTILS.rst.txt b/doc/DISTUTILS.rst.txt
index a2ac0b986..363112ea9 100644
--- a/doc/DISTUTILS.rst.txt
+++ b/doc/DISTUTILS.rst.txt
@@ -10,7 +10,7 @@ SciPy structure
Currently SciPy project consists of two packages:
-- NumPy (previously called SciPy core) --- it provides packages like:
+- NumPy --- it provides packages like:
+ numpy.distutils - extension to Python distutils
+ numpy.f2py - a tool to bind Fortran/C codes to Python
@@ -38,7 +38,6 @@ A SciPy package contains, in addition to its sources, the following
files and directories:
+ ``setup.py`` --- building script
- + ``info.py`` --- contains documentation and import flags
+ ``__init__.py`` --- package initializer
+ ``tests/`` --- directory of unittests
@@ -398,64 +397,32 @@ Useful functions in ``numpy.distutils.misc_util``
+ ``find_executable(exe, path=None)``
+ ``exec_command( command, execute_in='', use_shell=None, use_tee=None, **env )``
-The ``info.py`` file
-''''''''''''''''''''
-
-SciPy package import hooks assume that each package contains a
-``info.py`` file. This file contains overall documentation about the package
-and variables defining the order of package imports, dependency
-relations between packages, etc.
-
-On import, the following information will be looked for in ``info.py``:
-
-__doc__
- The documentation string of the package.
-
-__doc_title__
- The title of the package. If not defined then the first non-empty
- line of ``__doc__`` will be used.
-
-__all__
- List of symbols that package exports. Optional.
-
-global_symbols
- List of names that should be imported to numpy name space. To import
- all symbols to ``numpy`` namespace, define ``global_symbols=['*']``.
-
-depends
- List of names that the package depends on. Prefix ``numpy.``
- will be automatically added to package names. For example,
- use ``testing`` to indicate dependence on ``numpy.testing``
- package. Default value is ``[]``.
-
-postpone_import
- Boolean variable indicating that importing the package should be
- postponed until the first attempt of its usage. Default value is ``False``.
- Depreciated.
-
The ``__init__.py`` file
''''''''''''''''''''''''
-To speed up the import time and minimize memory usage, numpy
-uses ``ppimport`` hooks to transparently postpone importing large modules,
-which might not be used during the SciPy session. In order to
-have access to the documentation of all SciPy packages, including
-postponed packages, the docstring from ``info.py`` is imported
-into ``__init__.py``.
+The header of a typical SciPy ``__init__.py`` is::
-The header of a typical ``__init__.py`` is::
+ """
+ Package docstring, typically with a brief description and function listing.
+ """
+
+ # py3k related imports
+ from __future__ import division, print_function, absolute_import
- #
- # Package ... - ...
- #
-
- from info import __doc__
+ # import functions into module namespace
+ from .subpackage import *
...
+ __all__ = [s for s in dir() if not s.startswith('_')]
+
from numpy.testing import Tester
test = Tester().test
bench = Tester().bench
+Note that NumPy submodules still use a file named ``info.py`` in which the
+module docstring and ``__all__`` dict are defined. These files will be removed
+at some point.
+
Extra features in NumPy Distutils
'''''''''''''''''''''''''''''''''
diff --git a/doc/HOWTO_BUILD_DOCS.rst.txt b/doc/HOWTO_BUILD_DOCS.rst.txt
index 3b677263b..b43996899 100644
--- a/doc/HOWTO_BUILD_DOCS.rst.txt
+++ b/doc/HOWTO_BUILD_DOCS.rst.txt
@@ -18,6 +18,12 @@ in several different formats.
Instructions
------------
+If you obtained Numpy via git, get also the git submodules that contain
+additional parts required for building the documentation::
+
+ git submodule init
+ git submodule update
+
Since large parts of the main documentation are stored in
docstrings, you will need to first build Numpy, and install it so
that the correct version is imported by
@@ -62,35 +68,8 @@ Numpy's documentation uses several custom extensions to Sphinx. These
are shipped in the ``sphinxext/`` directory, and are automatically
enabled when building Numpy's documentation.
-However, if you want to make use of these extensions in third-party
-projects, they are available on PyPi_ as the numpydoc_ package, and
-can be installed with::
-
- easy_install numpydoc
-
-In addition, you will need to add::
-
- extensions = ['numpydoc']
-
-to ``conf.py`` in your Sphinx documentation.
-
-The following extensions are available:
-
- - ``numpydoc``: support for Numpy docstring format in Sphinx.
-
- - ``numpydoc.only_directives``: (DEPRECATED)
-
- - ``numpydoc.plot_directives``: Adaptation of Matplotlib's ``plot::``
- directive. Note that this implementation may still undergo severe
- changes or be eventually deprecated.
-
- - ``numpydoc.autosummary``: (DEPRECATED) An ``autosummary::`` directive.
- Available in Sphinx 0.6.2 and (to-be) 1.0 as ``sphinx.ext.autosummary``,
- and it the Sphinx 1.0 version is recommended over that included in
- Numpydoc.
-
- - ``numpydoc.traitsdoc``: For gathering documentation about Traits attributes.
-
+If you want to make use of these extensions in third-party
+projects, they are available on PyPi_ as the numpydoc_ package.
.. _PyPi: http://python.org/pypi
.. _numpydoc: http://python.org/pypi/numpydoc
diff --git a/doc/HOWTO_DOCUMENT.rst.txt b/doc/HOWTO_DOCUMENT.rst.txt
index 278797c9a..13e9b2607 100644
--- a/doc/HOWTO_DOCUMENT.rst.txt
+++ b/doc/HOWTO_DOCUMENT.rst.txt
@@ -225,14 +225,30 @@ The sections of the docstring are:
5. **Returns**
- Explanation of the returned values and their types, of the same
- format as **parameters**.
+ Explanation of the returned values and their types. Similar to the
+ **parameters** section, except the name of each return value is optional.
+ The type of each return value is always required::
+
+ Returns
+ -------
+ int
+ Description of anonymous integer return value.
+
+ If both the name and type are specified, the **returns** section takes the
+ same form as the **parameters** section::
+
+ Returns
+ -------
+ err_code : int
+ Non-zero value indicates error code, or zero on success.
+ err_msg : str or None
+ Human readable error message, or None on success.
6. **Other parameters**
An optional section used to describe infrequently used parameters.
It should only be used if a function has a large number of keyword
- prameters, to prevent cluttering the **parameters** section.
+ parameters, to prevent cluttering the **parameters** section.
7. **Raises**
@@ -544,20 +560,18 @@ Other points to keep in mind
----------------------------
* Equations : as discussed in the **Notes** section above, LaTeX formatting
should be kept to a minimum. Often it's possible to show equations as
- Python code or pseudo-code instead, which is much better readable in a
+ Python code or pseudo-code instead, which is much more readable in a
terminal. For inline display use double backticks (like ``y = np.sin(x)``).
For display with blank lines above and below, use a double colon and indent
the code, like::
end of previous sentence::
- y = np.sin(x)
+ y = np.sin(x)
* Notes and Warnings : If there are points in the docstring that deserve
special emphasis, the reST directives for a note or warning can be used
- in the vicinity of the context of the warning (inside a section). Syntax:
-
- ::
+ in the vicinity of the context of the warning (inside a section). Syntax::
.. warning:: Warning text.
@@ -601,7 +615,7 @@ Conclusion
`An example <http://github.com/numpy/numpy/blob/master/doc/example.py>`_ of the
format shown here is available. Refer to `How to Build API/Reference
Documentation
-<http://github.com/numpy/numpy/blob/master/doc/HOWTO_BUILD_DOCS.txt>`_
+<http://github.com/numpy/numpy/blob/master/doc/HOWTO_BUILD_DOCS.rst.txt>`_
on how to use Sphinx_ to build the manual.
This document itself was written in ReStructuredText, and may be converted to
diff --git a/doc/HOWTO_RELEASE.rst.txt b/doc/HOWTO_RELEASE.rst.txt
index d6746f4d6..f44d17aa9 100644
--- a/doc/HOWTO_RELEASE.rst.txt
+++ b/doc/HOWTO_RELEASE.rst.txt
@@ -188,7 +188,10 @@ Release process
Check the buildbot
------------------
-The buildbot is located at `<http://buildbot.scipy.org/>`_.
+For Numpy, check the buildbot located at `<http://buildbot.scipy.org/>`_.
+Also check that the benchmarks (which are not run as part of the test suite)
+work. Note that at the moment this applies only to Scipy; Numpy doesn't have
+a benchmark suite. So, run ``scipy.bench()``.
Agree on a release schedule
---------------------------
diff --git a/doc/Makefile b/doc/Makefile
index 52dd1ef06..d8c1ab918 100644
--- a/doc/Makefile
+++ b/doc/Makefile
@@ -1,7 +1,7 @@
# Makefile for Sphinx documentation
#
-PYVER =
+PYVER = 2.7
PYTHON = python$(PYVER)
# You can set these variables from the command line.
@@ -24,13 +24,13 @@ ALLSPHINXOPTS = -d build/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source
help:
@echo "Please use \`make <target>' where <target> is one of"
@echo " html to make standalone HTML files"
+ @echo " html-scipyorg to make standalone HTML files with scipy.org theming"
@echo " pickle to make pickle files (usable by e.g. sphinx-web)"
@echo " htmlhelp to make HTML files and a HTML help project"
@echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
@echo " changes to make an overview over all changed/added/deprecated items"
@echo " linkcheck to check all external links for integrity"
@echo " dist PYVER=... to make a distribution-ready tree"
- @echo " upload USER=... to upload results to docs.scipy.org"
@echo " gitwash-update GITWASH=path/to/gitwash update gitwash developer docs"
clean:
@@ -56,34 +56,21 @@ gitwash-update:
# - Different versions of easy_install install to different directories (!)
#
+
INSTALL_DIR = $(CURDIR)/build/inst-dist/
INSTALL_PPH = $(INSTALL_DIR)/lib/python$(PYVER)/site-packages:$(INSTALL_DIR)/local/lib/python$(PYVER)/site-packages:$(INSTALL_DIR)/lib/python$(PYVER)/dist-packages:$(INSTALL_DIR)/local/lib/python$(PYVER)/dist-packages
DIST_VARS=SPHINXBUILD="LANG=C PYTHONPATH=$(INSTALL_PPH) python$(PYVER) `which sphinx-build`" PYTHON="PYTHONPATH=$(INSTALL_PPH) python$(PYVER)" SPHINXOPTS="$(SPHINXOPTS)"
-UPLOAD_TARGET = $(USER)@docs.scipy.org:/home/docserver/www-root/doc/numpy/
-
-upload:
- @test -e build/dist || { echo "make dist is required first"; exit 1; }
- @test output-is-fine -nt build/dist || { \
- echo "Review the output in build/dist, and do 'touch output-is-fine' before uploading."; exit 1; }
- rsync -r -z --delete-after -p \
- $(if $(shell test -f build/dist/numpy-ref.pdf && echo "y"),, \
- --exclude '**-ref.pdf' --exclude '**-user.pdf') \
- $(if $(shell test -f build/dist/numpy-chm.zip && echo "y"),, \
- --exclude '**-chm.zip') \
- build/dist/ $(UPLOAD_TARGET)
-
dist:
make $(DIST_VARS) real-dist
-real-dist: dist-build html
+real-dist: dist-build html html-scipyorg
test -d build/latex || make latex
make -C build/latex all-pdf
-test -d build/htmlhelp || make htmlhelp-build
-rm -rf build/dist
- cp -r build/html build/dist
- perl -pi -e 's#^\s*(<li><a href=".*?">NumPy.*?Manual.*?&raquo;</li>)#<li><a href="/">Numpy and Scipy Documentation</a> &raquo;</li>#;' build/dist/*.html build/dist/*/*.html build/dist/*/*/*.html
+ cp -r build/html-scipyorg build/dist
cd build/html && zip -9r ../dist/numpy-html.zip .
cp build/latex/numpy-*.pdf build/dist
-zip build/dist/numpy-chm.zip build/htmlhelp/numpy.chm
@@ -114,6 +101,12 @@ html: generate
@echo
@echo "Build finished. The HTML pages are in build/html."
+html-scipyorg:
+ mkdir -p build/html build/doctrees
+ $(SPHINXBUILD) -t scipyorg -b html $(ALLSPHINXOPTS) build/html-scipyorg $(FILES)
+ @echo
+ @echo "Build finished. The HTML pages are in build/html."
+
pickle: generate
mkdir -p build/pickle build/doctrees
$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) build/pickle $(FILES)
diff --git a/doc/TESTS.rst.txt b/doc/TESTS.rst.txt
index 92f236f5a..2b66b5caa 100644
--- a/doc/TESTS.rst.txt
+++ b/doc/TESTS.rst.txt
@@ -68,7 +68,7 @@ you'd like to become part of SciPy, please write the tests as you
develop the package. Also since much of SciPy is legacy code that was
originally written without unit tests, there are still several modules
that don't have tests yet. Please feel free to choose one of these
-modules to develop test for either after or even as you read through
+modules and develop tests for it as you read through
this introduction.
Writing your own tests
@@ -199,9 +199,9 @@ with test generators::
for i in range(0,4,2):
yield check_even, i, i*3
-Note that 'check_even' is not itself a test (no 'test' in the name),
-but 'test_evens' is a generator that returns a series of tests, using
-'check_even', across a range of inputs.
+Note that ``check_even`` is not itself a test (no 'test' in the name),
+but ``test_evens`` is a generator that returns a series of tests, using
+``check_even``, across a range of inputs.
A problem with generator tests can be that if a test is failing, it's
hard to see for which parameters. To avoid this problem, ensure that:
@@ -220,7 +220,7 @@ hard to see for which parameters. To avoid this problem, ensure that:
Doctests
--------
-Doctests are a convenient way of documenting the behavior a function
+Doctests are a convenient way of documenting the behavior of a function
and allowing that behavior to be tested at the same time. The output
of an interactive Python session can be included in the docstring of a
function, and the test framework can run the example and compare the
diff --git a/doc/cdoc/numpyfilter.py b/doc/cdoc/numpyfilter.py
index 4a93141dd..32c6dffcb 100755
--- a/doc/cdoc/numpyfilter.py
+++ b/doc/cdoc/numpyfilter.py
@@ -6,14 +6,18 @@ Interpret C comments as ReStructuredText, and replace them by the HTML output.
Also, add Doxygen /** and /**< syntax automatically where appropriate.
"""
-from __future__ import division
+from __future__ import division, absolute_import, print_function
import sys
import re
import os
import textwrap
import optparse
-import cPickle as pickle
+
+if sys.version_info[0] >= 3:
+ import pickle
+else:
+ import cPickle as pickle
CACHE_FILE = 'build/rst-cache.pck'
@@ -43,7 +47,7 @@ def filter_comment(text):
if text.startswith('UFUNC_API'):
text = text[9:].strip()
- html = render_html(text)
+ html = render_html(text)
return html
def process_match(m, cache=None):
diff --git a/doc/cython/run_test.py b/doc/cython/run_test.py
index 64918ed9c..5ffd7e80f 100755
--- a/doc/cython/run_test.py
+++ b/doc/cython/run_test.py
@@ -1,5 +1,5 @@
#!/usr/bin/env python
-from __future__ import division
+from __future__ import division, absolute_import, print_function
from numpyx import test
test()
diff --git a/doc/cython/setup.py b/doc/cython/setup.py
index 0b063fef9..fe122d4db 100755
--- a/doc/cython/setup.py
+++ b/doc/cython/setup.py
@@ -5,7 +5,7 @@ Note: Cython is the successor project to Pyrex. For more information, see
http://cython.org.
"""
-from __future__ import division
+from __future__ import division, print_function
from distutils.core import setup
from distutils.extension import Extension
diff --git a/doc/example.py b/doc/example.py
index 5c9d3d27e..03be59d18 100644
--- a/doc/example.py
+++ b/doc/example.py
@@ -8,7 +8,7 @@ extend over multiple lines, the closing three quotation marks must be on
a line by itself, preferably preceeded by a blank line.
"""
-from __future__ import division
+from __future__ import division, absolute_import, print_function
import os # standard library imports first
@@ -57,14 +57,12 @@ def foo(var1, var2, long_var_name='hi') :
Returns
-------
+ type
+ Explanation of anonymous return value of type ``type``.
describe : type
- Explanation
- output : type
- Explanation
- tuple : type
- Explanation
- items : type
- even more explaining
+ Explanation of return value named `describe`.
+ out : type
+ Explanation of `out`.
Other Parameters
----------------
diff --git a/doc/neps/missing-data.rst b/doc/neps/missing-data.rst
index 6f124890b..338a8da96 100644
--- a/doc/neps/missing-data.rst
+++ b/doc/neps/missing-data.rst
@@ -320,7 +320,7 @@ A manual loop through a masked array like::
>>> a[3] = np.NA
>>> a
array([ 0., 1., 2., NA, 4.], maskna=True)
- >>> for i in xrange(len(a)):
+ >>> for i in range(len(a)):
... a[i] = np.log(a[i])
...
__main__:2: RuntimeWarning: divide by zero encountered in log
diff --git a/doc/newdtype_example/example.py b/doc/newdtype_example/example.py
index b9ccc465b..6e9bf4334 100644
--- a/doc/newdtype_example/example.py
+++ b/doc/newdtype_example/example.py
@@ -1,4 +1,4 @@
-from __future__ import division
+from __future__ import division, absolute_import, print_function
import floatint.floatint as ff
import numpy as np
@@ -12,7 +12,7 @@ g = np.array([1,2,3,4,5,6,7,8]).view(ff.floatint_type)
# Now, the elements will be the scalar type associated
# with the ndarray.
-print g[0]
-print type(g[1])
+print(g[0])
+print(type(g[1]))
# Now, you need to register ufuncs and more arrfuncs to do useful things...
diff --git a/doc/newdtype_example/floatint/__init__.py b/doc/newdtype_example/floatint/__init__.py
index 5c6aac4d6..ebede2753 100644
--- a/doc/newdtype_example/floatint/__init__.py
+++ b/doc/newdtype_example/floatint/__init__.py
@@ -1,3 +1,3 @@
-from __future__ import division
+from __future__ import division, absolute_import, print_function
diff --git a/doc/newdtype_example/setup.py b/doc/newdtype_example/setup.py
index 2d9ed6c99..494343b28 100644
--- a/doc/newdtype_example/setup.py
+++ b/doc/newdtype_example/setup.py
@@ -1,4 +1,4 @@
-from __future__ import division
+from __future__ import division, print_function
from numpy.distutils.core import setup
diff --git a/doc/numpybook/comparison/ctypes/filter.py b/doc/numpybook/comparison/ctypes/filter.py
index ebc274f31..13d768027 100644
--- a/doc/numpybook/comparison/ctypes/filter.py
+++ b/doc/numpybook/comparison/ctypes/filter.py
@@ -1,4 +1,4 @@
-from __future__ import division
+from __future__ import division, absolute_import, print_function
__all__ = ['filter2d']
diff --git a/doc/numpybook/comparison/ctypes/interface.py b/doc/numpybook/comparison/ctypes/interface.py
index ab1b14731..34e34ca71 100644
--- a/doc/numpybook/comparison/ctypes/interface.py
+++ b/doc/numpybook/comparison/ctypes/interface.py
@@ -1,4 +1,4 @@
-from __future__ import division
+from __future__ import division, absolute_import, print_function
__all__ = ['add', 'filter2d']
diff --git a/doc/numpybook/comparison/pyrex/setup.py b/doc/numpybook/comparison/pyrex/setup.py
index f0c764955..3fb69a705 100644
--- a/doc/numpybook/comparison/pyrex/setup.py
+++ b/doc/numpybook/comparison/pyrex/setup.py
@@ -1,5 +1,5 @@
#!/usr/bin/env python
-from __future__ import division
+from __future__ import division, print_function
from distutils.core import setup
from distutils.extension import Extension
diff --git a/doc/numpybook/comparison/timing.py b/doc/numpybook/comparison/timing.py
index 18004855e..0d47c7eba 100644
--- a/doc/numpybook/comparison/timing.py
+++ b/doc/numpybook/comparison/timing.py
@@ -1,4 +1,4 @@
-from __future__ import division
+from __future__ import division, absolute_import, print_function
import timeit
@@ -54,9 +54,9 @@ path = sys.path
for kind in ['f2py']:#['ctypes', 'pyrex', 'weave', 'f2py']:
res[kind] = []
sys.path = ['/Users/oliphant/numpybook/%s' % (kind,)] + path
- print sys.path
+ print(sys.path)
for n in N:
- print "%s - %d" % (kind, n)
+ print("%s - %d" % (kind, n))
t = timeit.Timer(eval('%s_run'%kind), eval('%s_pre %% (%d,%d)'%(kind,n,n)))
mytime = min(t.repeat(3,100))
res[kind].append(mytime)
diff --git a/doc/numpybook/comparison/weave/filter.py b/doc/numpybook/comparison/weave/filter.py
index 519a54c5b..bbdc9ea85 100644
--- a/doc/numpybook/comparison/weave/filter.py
+++ b/doc/numpybook/comparison/weave/filter.py
@@ -1,4 +1,4 @@
-from __future__ import division
+from __future__ import division, absolute_import, print_function
from scipy import weave, zeros_like
diff --git a/doc/numpybook/comparison/weave/inline.py b/doc/numpybook/comparison/weave/inline.py
index 1f98b7a62..3906553ed 100644
--- a/doc/numpybook/comparison/weave/inline.py
+++ b/doc/numpybook/comparison/weave/inline.py
@@ -1,4 +1,4 @@
-from __future__ import division
+from __future__ import division, absolute_import, print_function
from scipy import weave
from numpy import rand, zeros_like
@@ -40,8 +40,8 @@ def arr(a):
return b
a = [None]*10
-print example1(a)
-print a
+print(example1(a))
+print(a)
a = rand(512,512)
b = arr(a)
diff --git a/doc/numpybook/runcode.py b/doc/numpybook/runcode.py
index 92784d34e..456846cad 100644
--- a/doc/numpybook/runcode.py
+++ b/doc/numpybook/runcode.py
@@ -14,20 +14,20 @@ Options:
-n name of code section (default MyCode)
"""
-from __future__ import division
+from __future__ import division, absolute_import, print_function
import sys
import optparse
-import cStringIO
+import io
import re
import os
newre = re.compile(r"\\begin_inset Note.*PYNEW\s+\\end_inset", re.DOTALL)
def getoutput(tstr, dic):
- print "\n\nRunning..."
- print tstr,
- tempstr = cStringIO.StringIO()
+ print("\n\nRunning...")
+ print(tstr, end=' ')
+ tempstr = io.StringIO()
sys.stdout = tempstr
code = compile(tstr, '<input>', 'exec')
try:
@@ -44,8 +44,8 @@ def getoutput(tstr, dic):
else:
res = tempstr.getvalue() + '\n' + repr(res)
if res != '':
- print "\nOutput is"
- print res,
+ print("\nOutput is")
+ print(res, end=' ')
return res
# now find the code in the code segment
@@ -82,7 +82,7 @@ def getnewcodestr(substr, dic):
def runpycode(lyxstr, name='MyCode'):
schobj = re.compile(r"\\layout %s\s+>>> " % name)
- outstr = cStringIO.StringIO()
+ outstr = io.StringIO()
num = 0
indx = []
for it in schobj.finditer(lyxstr):
@@ -90,7 +90,7 @@ def runpycode(lyxstr, name='MyCode'):
num += 1
if num == 0:
- print "Nothing found for %s" % name
+ print("Nothing found for %s" % name)
return lyxstr
start = 0
@@ -141,7 +141,7 @@ def main(args):
fid = file(args[0])
str = fid.read()
fid.close()
- print "Processing %s" % options.name
+ print("Processing %s" % options.name)
newstr = runpycode(str, options.name)
fid = file(args[0],'w')
fid.write(newstr)
diff --git a/doc/postprocess.py b/doc/postprocess.py
index 512070193..f0ca22cd5 100755
--- a/doc/postprocess.py
+++ b/doc/postprocess.py
@@ -6,7 +6,7 @@ Post-processes HTML and Latex files output by Sphinx.
MODE is either 'html' or 'tex'.
"""
-from __future__ import division
+from __future__ import division, absolute_import, print_function
import re, optparse
diff --git a/doc/pyrex/run_test.py b/doc/pyrex/run_test.py
index 64918ed9c..5ffd7e80f 100755
--- a/doc/pyrex/run_test.py
+++ b/doc/pyrex/run_test.py
@@ -1,5 +1,5 @@
#!/usr/bin/env python
-from __future__ import division
+from __future__ import division, absolute_import, print_function
from numpyx import test
test()
diff --git a/doc/pyrex/setup.py b/doc/pyrex/setup.py
index 648bad9f4..361ccb183 100644
--- a/doc/pyrex/setup.py
+++ b/doc/pyrex/setup.py
@@ -12,7 +12,7 @@ http://www.scipy.org/Cookbook/Pyrex_and_NumPy
http://www.scipy.org/Cookbook/ArrayStruct_and_Pyrex
"""
-from __future__ import division
+from __future__ import division, print_function
from distutils.core import setup
from distutils.extension import Extension
diff --git a/doc/release/1.3.0-notes.rst b/doc/release/1.3.0-notes.rst
index fc7edddfe..73743bbcf 100644
--- a/doc/release/1.3.0-notes.rst
+++ b/doc/release/1.3.0-notes.rst
@@ -1,6 +1,5 @@
-=========================
NumPy 1.3.0 Release Notes
-=========================
+*************************
This minor includes numerous bug fixes, official python 2.6 support, and
several new features such as generalized ufuncs.
@@ -180,7 +179,7 @@ The following functions have been added to the multiarray C API:
* PyArray_GetEndianness: to get runtime endianness
Ufunc API
-~~~~~~~~~~~~~~
+~~~~~~~~~
The following functions have been added to the ufunc API:
diff --git a/doc/release/1.4.0-notes.rst b/doc/release/1.4.0-notes.rst
index 5429f8e76..9e3819229 100644
--- a/doc/release/1.4.0-notes.rst
+++ b/doc/release/1.4.0-notes.rst
@@ -1,6 +1,5 @@
-=========================
NumPy 1.4.0 Release Notes
-=========================
+*************************
This minor includes numerous bug fixes, as well as a few new features. It
is backward compatible with 1.3.0 release.
diff --git a/doc/release/1.5.0-notes.rst b/doc/release/1.5.0-notes.rst
index 94cf89ff7..e9e36f0de 100644
--- a/doc/release/1.5.0-notes.rst
+++ b/doc/release/1.5.0-notes.rst
@@ -1,18 +1,17 @@
-=========================
NumPy 1.5.0 Release Notes
-=========================
+*************************
Highlights
==========
Python 3 compatibility
-----------------------
+~~~~~~~~~~~~~~~~~~~~~~
This is the first NumPy release which is compatible with Python 3. Support for
Python 3 and Python 2 is done from a single code base. Extensive notes on
changes can be found at
-`<http://projects.scipy.org/numpy/browser/trunk/doc/Py3K.txt>`_.
+`<http://projects.scipy.org/numpy/browser/trunk/doc/Py3K.txt>`_.
Note that the Numpy testing framework relies on nose, which does not have a
Python 3 compatible release yet. A working Python 3 branch of nose can be found
@@ -21,7 +20,7 @@ at `<http://bitbucket.org/jpellerin/nose3/>`_ however.
Porting of SciPy to Python 3 is expected to be completed soon.
:pep:`3118` compatibility
--------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~
The new buffer protocol described by PEP 3118 is fully supported in this
version of Numpy. On Python versions >= 2.6 Numpy arrays expose the buffer
@@ -33,7 +32,7 @@ New features
============
Warning on casting complex to real
-----------------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Numpy now emits a `numpy.ComplexWarning` when a complex number is cast
into a real number. For example:
@@ -50,7 +49,7 @@ turned off in the standard way:
>>> warnings.simplefilter("ignore", np.ComplexWarning)
Dot method for ndarrays
------------------------
+~~~~~~~~~~~~~~~~~~~~~~~
Ndarrays now have the dot product also as a method, which allows writing
chains of matrix products as
@@ -62,7 +61,7 @@ instead of the longer alternative
>>> np.dot(a, np.dot(b, c))
linalg.slogdet function
------------------------
+~~~~~~~~~~~~~~~~~~~~~~~
The slogdet function returns the sign and logarithm of the determinant
of a matrix. Because the determinant may involve the product of many
@@ -70,7 +69,7 @@ small/large values, the result is often more accurate than that obtained
by simple multiplication.
new header
-----------
+~~~~~~~~~~
The new header file ndarraytypes.h contains the symbols from
ndarrayobject.h that do not depend on the PY_ARRAY_UNIQUE_SYMBOL and
@@ -85,7 +84,7 @@ Changes
=======
polynomial.polynomial
----------------------
+~~~~~~~~~~~~~~~~~~~~~
* The polyint and polyder functions now check that the specified number
integrations or derivations is a non-negative integer. The number 0 is
@@ -101,7 +100,7 @@ polynomial.polynomial
* The polymulx function was added.
polynomial.chebyshev
---------------------
+~~~~~~~~~~~~~~~~~~~~
* The chebint and chebder functions now check that the specified number
integrations or derivations is a non-negative integer. The number 0 is
@@ -119,13 +118,13 @@ polynomial.chebyshev
histogram
----------
+~~~~~~~~~
After a two years transition period, the old behavior of the histogram function
has been phased out, and the "new" keyword has been removed.
correlate
----------
+~~~~~~~~~
The old behavior of correlate was deprecated in 1.4.0, the new behavior (the
usual definition for cross-correlation) is now the default.
diff --git a/doc/release/1.6.0-notes.rst b/doc/release/1.6.0-notes.rst
index c5f53a0eb..e2c71e35c 100644
--- a/doc/release/1.6.0-notes.rst
+++ b/doc/release/1.6.0-notes.rst
@@ -1,6 +1,5 @@
-=========================
NumPy 1.6.0 Release Notes
-=========================
+*************************
This release includes several new features as well as numerous bug fixes and
improved documentation. It is backward compatible with the 1.5.0 release, and
@@ -21,7 +20,7 @@ New features
============
New 16-bit floating point type
-------------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This release adds support for the IEEE 754-2008 binary16 format, available as
the data type ``numpy.half``. Within Python, the type behaves similarly to
@@ -30,7 +29,7 @@ half-float API.
New iterator
-------------
+~~~~~~~~~~~~
A new iterator has been added, replacing the functionality of the
existing iterator and multi-iterator with a single object and API.
@@ -43,7 +42,7 @@ iterator.
Legendre, Laguerre, Hermite, HermiteE polynomials in ``numpy.polynomial``
--------------------------------------------------------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Extend the number of polynomials available in the polynomial package. In
addition, a new ``window`` attribute has been added to the classes in
@@ -54,7 +53,7 @@ of values without playing unnatural tricks with the domain.
Fortran assumed shape array and size function support in ``numpy.f2py``
------------------------------------------------------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
F2py now supports wrapping Fortran 90 routines that use assumed shape
arrays. Before such routines could be called from Python but the
@@ -68,7 +67,7 @@ that use two argument ``size`` function in dimension specifications.
Other new functions
--------------------
+~~~~~~~~~~~~~~~~~~~
``numpy.ravel_multi_index`` : Converts a multi-index tuple into
an array of flat indices, applying boundary modes to the indices.
@@ -91,14 +90,14 @@ Changes
=======
``default error handling``
---------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~
The default error handling has been change from ``print`` to ``warn`` for
all except for ``underflow``, which remains as ``ignore``.
``numpy.distutils``
--------------------
+~~~~~~~~~~~~~~~~~~~
Several new compilers are supported for building Numpy: the Portland Group
Fortran compiler on OS X, the PathScale compiler suite and the 64-bit Intel C
@@ -106,7 +105,7 @@ compiler on Linux.
``numpy.testing``
------------------
+~~~~~~~~~~~~~~~~~
The testing framework gained ``numpy.testing.assert_allclose``, which provides
a more convenient way to compare floating point arrays than
@@ -114,7 +113,7 @@ a more convenient way to compare floating point arrays than
``C API``
----------
+~~~~~~~~~
In addition to the APIs for the new iterator and half data type, a number
of other additions have been made to the C API. The type promotion
@@ -138,7 +137,7 @@ Removed features
================
``numpy.fft``
--------------
+~~~~~~~~~~~~~
The functions `refft`, `refft2`, `refftn`, `irefft`, `irefft2`, `irefftn`,
which were aliases for the same functions without the 'e' in the name, were
@@ -146,21 +145,21 @@ removed.
``numpy.memmap``
-----------------
+~~~~~~~~~~~~~~~~
The `sync()` and `close()` methods of memmap were removed. Use `flush()` and
"del memmap" instead.
``numpy.lib``
--------------
+~~~~~~~~~~~~~
The deprecated functions ``numpy.unique1d``, ``numpy.setmember1d``,
``numpy.intersect1d_nu`` and ``numpy.lib.ufunclike.log2`` were removed.
``numpy.ma``
-------------
+~~~~~~~~~~~~
Several deprecated items were removed from the ``numpy.ma`` module::
@@ -171,7 +170,7 @@ Several deprecated items were removed from the ``numpy.ma`` module::
``numpy.distutils``
--------------------
+~~~~~~~~~~~~~~~~~~~
The ``numpy.get_numpy_include`` function was removed, use ``numpy.get_include``
instead.
diff --git a/doc/release/1.6.1-notes.rst b/doc/release/1.6.1-notes.rst
index 5f59cb743..2f1aa6b04 100644
--- a/doc/release/1.6.1-notes.rst
+++ b/doc/release/1.6.1-notes.rst
@@ -1,12 +1,11 @@
-=========================
NumPy 1.6.1 Release Notes
-=========================
+*************************
This is a bugfix only release in the 1.6.x series.
-Issues fixed
-------------
+Issues Fixed
+============
#1834 einsum fails for specific shapes
#1837 einsum throws nan or freezes python for specific array shapes
@@ -19,4 +18,3 @@ Issues fixed
#1874 f2py: fix --include_paths bug
#1749 Fix ctypes.load_library()
#1895/1896 iter: writeonly operands weren't always being buffered correctly
-
diff --git a/doc/release/1.6.2-notes.rst b/doc/release/1.6.2-notes.rst
index 7b62e6c93..1222fc39c 100644
--- a/doc/release/1.6.2-notes.rst
+++ b/doc/release/1.6.2-notes.rst
@@ -1,14 +1,15 @@
-=========================
NumPy 1.6.2 Release Notes
-=========================
+*************************
This is a bugfix release in the 1.6.x series. Due to the delay of the NumPy
1.7.0 release, this release contains far more fixes than a regular NumPy bugfix
release. It also includes a number of documentation and build improvements.
+Issues fixed
+============
-``numpy.core`` issues fixed
----------------------------
+``numpy.core``
+~~~~~~~~~~~~~~
#2063 make unique() return consistent index
#1138 allow creating arrays from empty buffers or empty slices
@@ -29,8 +30,8 @@ release. It also includes a number of documentation and build improvements.
#2073 fix float16 __array_interface__ bug
-``numpy.lib`` issues fixed
---------------------------
+``numpy.lib``
+~~~~~~~~~~~~~
#2048 break reference cycle in NpzFile
#1573 savetxt() now handles complex arrays
@@ -42,33 +43,8 @@ release. It also includes a number of documentation and build improvements.
#1918 use Py_TYPE to access ob_type, so it works also on Py3
-``numpy.f2py`` changes
-----------------------
-
-ENH: Introduce new options extra_f77_compiler_args and extra_f90_compiler_args
-BLD: Improve reporting of fcompiler value
-BUG: Fix f2py test_kind.py test
-
-
-``numpy.poly`` changes
-----------------------
-
-ENH: Add some tests for polynomial printing
-ENH: Add companion matrix functions
-DOC: Rearrange the polynomial documents
-BUG: Fix up links to classes
-DOC: Add version added to some of the polynomial package modules
-DOC: Document xxxfit functions in the polynomial package modules
-BUG: The polynomial convenience classes let different types interact
-DOC: Document the use of the polynomial convenience classes
-DOC: Improve numpy reference documentation of polynomial classes
-ENH: Improve the computation of polynomials from roots
-STY: Code cleanup in polynomial [*]fromroots functions
-DOC: Remove references to cast and NA, which were added in 1.7
-
-
-``numpy.distutils`` issues fixed
--------------------------------
+``numpy.distutils``
+~~~~~~~~~~~~~~~~~~~
#1261 change compile flag on AIX from -O5 to -O3
#1377 update HP compiler flags
@@ -78,13 +54,39 @@ BLD: raise a clearer warning in case of building without cleaning up first
BLD: follow build_ext coding convention in build_clib
BLD: fix up detection of Intel CPU on OS X in system_info.py
BLD: add support for the new X11 directory structure on Ubuntu & co.
-BLD: add ufsparse to the libraries search path.
-BLD: add 'pgfortran' as a valid compiler in the Portland Group
+BLD: add ufsparse to the libraries search path.
+BLD: add 'pgfortran' as a valid compiler in the Portland Group
BLD: update version match regexp for IBM AIX Fortran compilers.
-``numpy.random`` issues fixed
------------------------------
+``numpy.random``
+~~~~~~~~~~~~~~~~
BUG: Use npy_intp instead of long in mtrand
+Changes
+=======
+
+``numpy.f2py``
+~~~~~~~~~~~~~~
+
+ENH: Introduce new options extra_f77_compiler_args and extra_f90_compiler_args
+BLD: Improve reporting of fcompiler value
+BUG: Fix f2py test_kind.py test
+
+
+``numpy.poly``
+~~~~~~~~~~~~~~
+
+ENH: Add some tests for polynomial printing
+ENH: Add companion matrix functions
+DOC: Rearrange the polynomial documents
+BUG: Fix up links to classes
+DOC: Add version added to some of the polynomial package modules
+DOC: Document xxxfit functions in the polynomial package modules
+BUG: The polynomial convenience classes let different types interact
+DOC: Document the use of the polynomial convenience classes
+DOC: Improve numpy reference documentation of polynomial classes
+ENH: Improve the computation of polynomials from roots
+STY: Code cleanup in polynomial [*]fromroots functions
+DOC: Remove references to cast and NA, which were added in 1.7
diff --git a/doc/release/1.7.0-notes.rst b/doc/release/1.7.0-notes.rst
index 26b098b16..754e282b0 100644
--- a/doc/release/1.7.0-notes.rst
+++ b/doc/release/1.7.0-notes.rst
@@ -1,6 +1,5 @@
-=========================
NumPy 1.7.0 Release Notes
-=========================
+*************************
This release includes several new features as well as numerous bug fixes and
refactorings. It supports Python 2.4 - 2.7 and 3.1 - 3.3 and is the last
@@ -67,7 +66,7 @@ New features
============
Reduction UFuncs Generalize axis= Parameter
--------------------------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Any ufunc.reduce function call, as well as other reductions like sum, prod,
any, all, max and min support the ability to choose a subset of the axes to
@@ -76,7 +75,7 @@ axis=# to pick a single axis. Now, one can also say axis=(#,#) to pick a
list of axes for reduction.
Reduction UFuncs New keepdims= Parameter
-----------------------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
There is a new keepdims= parameter, which if set to True, doesn't throw
away the reduction axes but instead sets them to have size one. When this
@@ -84,7 +83,7 @@ option is set, the reduction result will broadcast correctly to the
original operand which was reduced.
Datetime support
-----------------
+~~~~~~~~~~~~~~~~
.. note:: The datetime API is *experimental* in 1.7.0, and may undergo changes
in future versions of NumPy.
@@ -105,26 +104,26 @@ The notes in `doc/source/reference/arrays.datetime.rst <https://github.com/numpy
consulted for more details.
Custom formatter for printing arrays
-------------------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
See the new ``formatter`` parameter of the ``numpy.set_printoptions``
function.
New function numpy.random.choice
----------------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
A generic sampling function has been added which will generate samples from
a given array-like. The samples can be with or without replacement, and
with uniform or given non-uniform probabilities.
New function isclose
---------------------
+~~~~~~~~~~~~~~~~~~~~
Returns a boolean array where two arrays are element-wise equal within a
tolerance. Both relative and absolute tolerance can be specified.
Preliminary multi-dimensional support in the polynomial package
----------------------------------------------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Axis keywords have been added to the integration and differentiation
functions and a tensor keyword was added to the evaluation functions.
@@ -135,7 +134,7 @@ pseudo-Vandermonde matrices that can be used for fitting.
Ability to pad rank-n arrays
-----------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
A pad module containing functions for padding n-dimensional arrays has been
added. The various private padding functions are exposed as options to a
@@ -149,18 +148,18 @@ Current modes are ``constant``, ``edge``, ``linear_ramp``, ``maximum``,
New argument to searchsorted
-----------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The function searchsorted now accepts a 'sorter' argument that is a
permutation array that sorts the array to search.
Build system
-------------
+~~~~~~~~~~~~
Added experimental support for the AArch64 architecture.
C API
------
+~~~~~
New function ``PyArray_RequireWriteable`` provides a consistent interface
for checking array writeability -- any C code which works with arrays whose
@@ -173,7 +172,7 @@ Changes
=======
General
--------
+~~~~~~~
The function np.concatenate tries to match the layout of its input arrays.
Previously, the layout did not follow any particular reason, and depended
@@ -214,7 +213,7 @@ and so the collapsing process only continues so long as it encounters other
``b`` is the last entry in that list which is a ``matrix`` object.
Casting Rules
--------------
+~~~~~~~~~~~~~
Casting rules have undergone some changes in corner cases, due to the
NA-related work. In particular for combinations of scalar+scalar:
@@ -256,7 +255,7 @@ Deprecations
============
General
--------
+~~~~~~~
Specifying a custom string formatter with a `_format` array attribute is
deprecated. The new `formatter` keyword in ``numpy.set_printoptions`` or
@@ -269,7 +268,7 @@ Versions of numpy < 1.7.0 ignored axis argument value for 1D arrays. We
allow this for now, but in due course we will raise an error.
C-API
------
+~~~~~
Direct access to the fields of PyArrayObject* has been deprecated. Direct
access has been recommended against for many releases. Expect similar
diff --git a/doc/release/1.7.1-notes.rst b/doc/release/1.7.1-notes.rst
new file mode 100644
index 000000000..15a756cb1
--- /dev/null
+++ b/doc/release/1.7.1-notes.rst
@@ -0,0 +1,25 @@
+NumPy 1.7.1 Release Notes
+*************************
+
+This is a bugfix only release in the 1.7.x series.
+
+
+Issues fixed
+============
+
+gh-2973 Fix `1` is printed during numpy.test()
+gh-2983 BUG: gh-2969: Backport memory leak fix 80b3a34.
+gh-3007 Backport gh-3006
+gh-2984 Backport fix complex polynomial fit
+gh-2982 BUG: Make nansum work with booleans.
+gh-2985 Backport large sort fixes
+gh-3039 Backport object take
+gh-3105 Backport nditer fix op axes initialization
+gh-3108 BUG: npy-pkg-config ini files were missing after Bento build.
+gh-3124 BUG: PyArray_LexSort allocates too much temporary memory.
+gh-3131 BUG: Exported f2py_size symbol prevents linking multiple f2py
+modules.
+gh-3117 Backport gh-2992
+gh-3135 DOC: Add mention of PyArray_SetBaseObject stealing a reference
+gh-3134 DOC: Fix typo in fft docs (the indexing variable is 'm', not 'n').
+gh-3136 Backport #3128
diff --git a/doc/release/1.8.0-notes.rst b/doc/release/1.8.0-notes.rst
index f41c8e716..33d3d1b0e 100644
--- a/doc/release/1.8.0-notes.rst
+++ b/doc/release/1.8.0-notes.rst
@@ -1,13 +1,17 @@
-=========================
NumPy 1.8.0 Release Notes
-=========================
+*************************
-This release supports Python 2.6 -2.7 and 3.1 - 3.3.
+This release supports Python 2.6 -2.7 and 3.2 - 3.3.
Highlights
==========
+Python 2 and Python 3 are supported by the same code base. The
+2to3 fixer is no longer run.
+
+
+
Dropped Support
===============
@@ -37,33 +41,228 @@ compiler, then it's possible you will encounter problems. If so, please
file a bug and as a temporary workaround you can re-enable the old build
system by exporting the shell variable NPY_SEPARATE_COMPILATION=0.
-New features
+For the AdvancedNew iterator the ``oa_ndim`` flag should now be -1 to indicate
+that no ``op_axes`` and ``itershape`` are passed in. The ``oa_ndim == 0``
+case, now indicates a 0-D iteration and ``op_axes`` being NULL and the old
+usage is deprecated. This does not effect the ``NpyIter_New`` or
+``NpyIter_MultiNew`` functions.
+
+NPY_RELAXED_STRIDES_CHECKING
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+There is a new compile time environment variable
+``NPY_RELAXED_STRIDES_CHECKING``. If this variable is set to 1, then
+numpy will consider more arrays to be C- or F-contiguous -- for
+example, it becomes possible to have a column vector which is
+considered both C- and F-contiguous simultaneously. The new definition
+is more accurate, allows for faster code that makes fewer unnecessary
+copies, and simplifies numpy's code internally. However, it may also
+break third-party libraries that make too-strong assumptions about the
+stride values of C- and F-contiguous arrays. (It is also currently
+known that this breaks Cython code using memoryviews, which will be
+fixed in Cython.) THIS WILL BECOME THE DEFAULT IN A FUTURE RELEASE, SO
+PLEASE TEST YOUR CODE NOW AGAINST NUMPY BUILT WITH::
+
+ NPY_RELAXED_STRIDES_CHECKING=1 python setup.py install
+
+You can check whether NPY_RELAXED_STRIDES_CHECKING is in effect by
+running::
+
+ np.ones((10, 1), order="C").flags.f_contiguous
+
+This will be ``True`` if relaxed strides checking is enabled, and
+``False`` otherwise. The typical problem we've seen so far is C code
+that works with C-contiguous arrays, and assumes that the itemsize can
+be accessed by looking at the last element in the ``PyArray_STRIDES(arr)``
+array. When relaxed strides are in effect, this is not true (and in
+fact, it never was true in some corner cases). Instead, use
+``PyArray_ITEMSIZE(arr)``.
+
+For more information check the "Internal memory layout of an ndarray"
+section in the documentation.
+
+IO compatibility with large files
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Large NPZ files >2GB can be loaded on 64-bit systems.
+
+Binary operations with non-arrays as second argument
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Binary operations of the form ``<array-or-subclass> * <non-array-subclass>``
+where ``<non-array-subclass>`` declares an ``__array_priority__`` higher than
+that of ``<array-or-subclass>`` will now unconditionally return
+*NotImplemented*, giving ``<non-array-subclass>`` a chance to handle the
+operation. Previously, `NotImplemented` would only be returned if
+``<non-array-subclass>`` actually implemented the reversed operation, and after
+a (potentially expensive) array conversion of ``<non-array-subclass>`` had been
+attempted. (`bug <https://github.com/numpy/numpy/issues/3375>`_, `pull request
+<https://github.com/numpy/numpy/pull/3501>`_)
+
+
+New Features
============
+New constant
+~~~~~~~~~~~~
Euler's constant is now exposed in numpy as euler_gamma.
+New modes for qr
+~~~~~~~~~~~~~~~~
+New modes 'complete', 'reduced', and 'raw' have been added to the qr
+factorization and the old 'full' and 'economic' modes are deprecated.
+The 'reduced' mode replaces the old 'full' mode and is the default as was
+the 'full' mode, so backward compatibility can be maintained by not
+specifying the mode.
+
+The 'complete' mode returns a full dimensional factorization, which can be
+useful for obtaining a basis for the orthogonal complement of the range
+space. The 'raw' mode returns arrays that contain the Householder
+reflectors and scaling factors that can be used in the future to apply q
+without needing to convert to a matrix. The 'economic' mode is simply
+deprecated, there isn't much use for it and it isn't any more efficient
+than the 'raw' mode.
+
+New `invert` argument to `in1d`
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The function `in1d` now accepts a `invert` argument which, when `True`,
+causes the returned array to be inverted.
+
+Advanced indexing using `np.newaxis`
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+It is now possible to use `np.newaxis`/`None` together with index
+arrays instead of only in simple indices. This means that
+``array[np.newaxis, [0, 1]]`` will now work as expected.
+
+New functions `full` and `full_like`
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+New convenience functions to create arrays filled with a specific value;
+complementary to the existing `zeros` and `zeros_like` functions.
+
+C-API
+~~~~~
+
+New ufuncs can now be registered with built in input types and a custom
+output type. Before this change, NumPy wouldn't be able to find the right
+ufunc loop function when the ufunc was called from Python, because the ufunc
+loop signature matching logic wasn't looking at the output operand type.
+Now the correct ufunc loop is found, as long as the user provides an output
+argument with the correct output type.
+runtests.py
+~~~~~~~~~~~
+
+A simple test runner script ``runtests.py`` was added. It also builds Numpy via
+``setup.py build`` and can be used to run tests easily during development.
+
+
+Improvements
+============
+
+IO performance improvements
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Performance in reading large files was improved by chunking (see also IO compatibility).
+
+Performance improvements to `pad`
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+The `pad` function has a new implementation, greatly improving performance for
+all inputs except `mode=<function>` (retained for backwards compatibility).
+Scaling with dimensionality is dramatically improved for rank >= 4.
+
+Performance improvements to `isnan`, `isinf`, `isfinite` and `byteswap`
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+`isnan`, `isinf`, `isfinite` and `byteswap` have been improved to take
+advantage of compiler builtins to avoid expensive calls to libc.
+This improves performance of these operations by about a factor of two on gnu
+libc systems.
+
+Performance improvements via SSE2 vectorization
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Several functions have been optimized to make use of SSE2 CPU SIMD instructions.
+
+ * Float32 and float64:
+
+ * base math (`add`, `subtract`, `divide`, `multiply`)
+ * `sqrt`
+ * `minimum/maximum`
+ * `absolute`
+
+ * Bool:
+
+ * `logical_or`
+ * `logical_and`
+ * `logical_not`
+
+This improves performance of these operations up to 4x/2x for float32/float64
+and up to 10x for bool depending on the location of the data in the CPU caches.
+The performance gain is greatest for in-place operations.
+
+In order to use the improved functions the SSE2 instruction set must be enabled
+at compile time. It is enabled by default on x86_64 systems. On x86_32 with a
+capable CPU it must be enabled by passing the appropriate flag to the CFLAGS
+build variable (-msse2 with gcc).
Changes
=======
General
--------
+~~~~~~~
The function np.take now allows 0-d arrays as indices.
The separate compilation mode is now enabled by default.
+Several changes to np.insert and np.delete:
+* Previously, negative indices and indices that pointed past the end of
+ the array were simply ignored. Now, this will raise a Future or Deprecation
+ Warning. In the future they will be treated like normal indexing treats
+ them -- negative indices will wrap around, and out-of-bound indices will
+ generate an error.
+* Previously, boolean indices were treated as if they were integers (always
+ referring to either the 0th or 1st item in the array). In the future, they
+ will be treated as masks. In this release, they raise a FutureWarning
+ warning of this coming change.
+* In Numpy 1.7. np.insert already allowed the syntax
+ `np.insert(arr, 3, [1,2,3])` to insert multiple items at a single position.
+ In Numpy 1.8. this is also possible for `np.insert(arr, [3], [1, 2, 3])`.
+
+Padded regions from np.pad are now correctly rounded, not truncated.
+
+C-API
+~~~~~
Deprecations
============
+The 'full' and 'economic' modes of qr factorization are deprecated.
+
General
--------
+~~~~~~~
-Use of non-integer indices has been deprecated. Previously float indices
-were truncated to integers without warning.
+The use of non-integer for indices and most integer arguments has been
+deprecated. Previously float indices and function arguments such as axes or
+shapes were truncated to integers without warning. For example
+`arr.reshape(3., -1)` or `arr[0.]` will trigger a deprecation warning in
+NumPy 1.8., and in some future version of NumPy they will raise an error.
C-API
------
+~~~~~
+
+New Features
+============
+
+When creating a ufunc, the default ufunc operand flags can be overridden
+via the new op_flags attribute of the ufunc object. For example, to set
+the operand flag for the first input to read/write:
+
+PyObject \*ufunc = PyUFunc_FromFuncAndData(...);
+ufunc->op_flags[0] = NPY_ITER_READWRITE;
+
+This allows a ufunc to perform an operation in place. Also, global nditer flags
+can be overridden via the new iter_flags attribute of the ufunc object.
+For example, to set the reduce flag for a ufunc:
+ufunc->iter_flags = NPY_ITER_REDUCE_OK;
diff --git a/doc/scipy-sphinx-theme b/doc/scipy-sphinx-theme
new file mode 160000
+Subproject 65c59fe6a516e23db50ecb8ca160b9f9de12dc1
diff --git a/doc/source/_static/scipy.css b/doc/source/_static/scipy.css
deleted file mode 100644
index 89984b67d..000000000
--- a/doc/source/_static/scipy.css
+++ /dev/null
@@ -1,189 +0,0 @@
-@import "default.css";
-
-/**
- * Spacing fixes
- */
-
-div.body p, div.body dd, div.body li {
- line-height: 125%;
-}
-
-ul.simple {
- margin-top: 0;
- margin-bottom: 0;
- padding-top: 0;
- padding-bottom: 0;
-}
-
-/* spacing around blockquoted fields in parameters/attributes/returns */
-td.field-body > blockquote {
- margin-top: 0.1em;
- margin-bottom: 0.5em;
-}
-
-/* spacing around example code */
-div.highlight > pre {
- padding: 2px 5px 2px 5px;
-}
-
-/* spacing in see also definition lists */
-dl.last > dd {
- margin-top: 1px;
- margin-bottom: 5px;
- margin-left: 30px;
-}
-
-/* hide overflowing content in the sidebar */
-div.sphinxsidebarwrapper p.topless {
- overflow: hidden;
-}
-
-/**
- * Hide dummy toctrees
- */
-
-ul {
- padding-top: 0;
- padding-bottom: 0;
- margin-top: 0;
- margin-bottom: 0;
-}
-ul li {
- padding-top: 0;
- padding-bottom: 0;
- margin-top: 0;
- margin-bottom: 0;
-}
-ul li a.reference {
- padding-top: 0;
- padding-bottom: 0;
- margin-top: 0;
- margin-bottom: 0;
-}
-
-/**
- * Make high-level subsections easier to distinguish from top-level ones
- */
-div.body h3 {
- background-color: transparent;
-}
-
-div.body h4 {
- border: none;
- background-color: transparent;
-}
-
-/**
- * Scipy colors
- */
-
-body {
- background-color: rgb(100,135,220);
-}
-
-div.document {
- background-color: rgb(230,230,230);
-}
-
-div.sphinxsidebar {
- background-color: rgb(230,230,230);
-}
-
-div.related {
- background-color: rgb(100,135,220);
-}
-
-div.sphinxsidebar h3 {
- color: rgb(0,102,204);
-}
-
-div.sphinxsidebar h3 a {
- color: rgb(0,102,204);
-}
-
-div.sphinxsidebar h4 {
- color: rgb(0,82,194);
-}
-
-div.sphinxsidebar p {
- color: black;
-}
-
-div.sphinxsidebar a {
- color: #355f7c;
-}
-
-div.sphinxsidebar ul.want-points {
- list-style: disc;
-}
-
-.field-list th {
- color: rgb(0,102,204);
- white-space: nowrap;
-}
-
-/**
- * Extra admonitions
- */
-
-div.tip {
- background-color: #ffffe4;
- border: 1px solid #ee6;
-}
-
-div.plot-output {
- clear-after: both;
-}
-
-div.plot-output .figure {
- float: left;
- text-align: center;
- margin-bottom: 0;
- padding-bottom: 0;
-}
-
-div.plot-output .caption {
- margin-top: 2;
- padding-top: 0;
-}
-
-div.plot-output p.admonition-title {
- display: none;
-}
-
-div.plot-output:after {
- content: "";
- display: block;
- height: 0;
- clear: both;
-}
-
-
-/*
-div.admonition-example {
- background-color: #e4ffe4;
- border: 1px solid #ccc;
-}*/
-
-
-/**
- * Styling for field lists
- */
-
-table.field-list th {
- border-left: 1px solid #aaa !important;
- padding-left: 5px;
-}
-
-table.field-list {
- border-collapse: separate;
- border-spacing: 10px;
-}
-
-/**
- * Styling for footnotes
- */
-
-table.footnote td, table.footnote th {
- border: none;
-}
diff --git a/doc/source/_templates/indexcontent.html b/doc/source/_templates/indexcontent.html
index b497c93dd..55229a86d 100644
--- a/doc/source/_templates/indexcontent.html
+++ b/doc/source/_templates/indexcontent.html
@@ -15,8 +15,6 @@
<p><strong>Indices and tables:</strong></p>
<table class="contentstable" align="center"><tr>
<td width="50%">
- <p class="biglink"><a class="biglink" href="{{ pathto("modindex") }}">Module Index</a><br/>
- <span class="linkdescr">quick access to all modules</span></p>
<p class="biglink"><a class="biglink" href="{{ pathto("genindex") }}">General Index</a><br/>
<span class="linkdescr">all functions, classes, terms</span></p>
<p class="biglink"><a class="biglink" href="{{ pathto("glossary") }}">Glossary</a><br/>
diff --git a/doc/source/_templates/indexsidebar.html b/doc/source/_templates/indexsidebar.html
index 409743a03..9edb003af 100644
--- a/doc/source/_templates/indexsidebar.html
+++ b/doc/source/_templates/indexsidebar.html
@@ -1,5 +1,4 @@
<h3>Resources</h3>
<ul>
<li><a href="http://scipy.org/">Scipy.org website</a></li>
- <li>&nbsp;</li>
</ul>
diff --git a/doc/source/_templates/layout.html b/doc/source/_templates/layout.html
index 27798878e..77da54a00 100644
--- a/doc/source/_templates/layout.html
+++ b/doc/source/_templates/layout.html
@@ -1,6 +1,9 @@
{% extends "!layout.html" %}
+
{% block rootrellink %}
-<li><a href="{{ pathto('index') }}">{{ shorttitle }}</a>{{ reldelim1 }}</li>
+ {% if pagename != 'index' %}
+ <li class="active"><a href="{{ pathto('index') }}">{{ shorttitle|e }}</a></li>
+ {% endif %}
{% endblock %}
{% block sidebarsearch %}
diff --git a/doc/source/about.rst b/doc/source/about.rst
index bcfbe5323..9485b5142 100644
--- a/doc/source/about.rst
+++ b/doc/source/about.rst
@@ -36,7 +36,9 @@ Our main means of communication are:
- `Mailing lists <http://scipy.org/Mailing_Lists>`__
-- `Numpy Trac <http://projects.scipy.org/numpy>`__ (bug "tickets" go here)
+- `Numpy Issues <https://github.com/numpy/numpy/issues>`__ (bug reports go here)
+
+- `Old Numpy Trac <http://projects.scipy.org/numpy>`__ (no longer used)
More information about the development of Numpy can be found at
http://scipy.org/Developer_Zone
diff --git a/doc/source/bugs.rst b/doc/source/bugs.rst
index cd2c5d3e8..b55be4329 100644
--- a/doc/source/bugs.rst
+++ b/doc/source/bugs.rst
@@ -3,21 +3,17 @@ Reporting bugs
**************
File bug reports or feature requests, and make contributions
-(e.g. code patches), by submitting a "ticket" on the Trac pages:
+(e.g. code patches), by opening a "new issue" on GitHub:
-- Numpy Trac: http://scipy.org/scipy/numpy
+- Numpy Issues: http://github.com/numpy/numpy/issues
-Because of spam abuse, you must create an account on our Trac in order
-to submit a ticket, then click on the "New Ticket" tab that only
-appears when you have logged in. Please give as much information as
-you can in the ticket. It is extremely useful if you can supply a
-small self-contained code snippet that reproduces the problem. Also
-specify the component, the version you are referring to and the
-milestone.
+Please give as much information as you can in the ticket. It is extremely
+useful if you can supply a small self-contained code snippet that reproduces
+the problem. Also specify the component, the version you are referring to and
+the milestone.
-Report bugs to the appropriate Trac instance (there is one for NumPy
-and a different one for SciPy). There are also read-only mailing lists
-for tracking the status of your bug ticket.
+Report bugs to the appropriate GitHub project (there is one for NumPy
+and a different one for SciPy).
More information can be found on the http://scipy.org/Developer_Zone
website.
diff --git a/doc/source/conf.py b/doc/source/conf.py
index f22505f97..233f2e409 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
-from __future__ import division
+from __future__ import division, absolute_import, print_function
import sys, os, re
@@ -30,9 +30,6 @@ templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
-# The master toctree document.
-#master_doc = 'index'
-
# General substitutions.
project = 'NumPy'
copyright = '2008-2009, The Scipy community'
@@ -83,62 +80,50 @@ pygments_style = 'sphinx'
# HTML output
# -----------------------------------------------------------------------------
-# The style sheet to use for HTML and HTML Help pages. A file of that name
-# must exist either in Sphinx' static/ path, or in one of the custom paths
-# given in html_static_path.
-html_style = 'scipy.css'
-
-# The name for this set of Sphinx documents. If None, it defaults to
-# "<project> v<release> documentation".
-html_title = "%s v%s Manual (DRAFT)" % (project, version)
+themedir = os.path.join(os.pardir, 'scipy-sphinx-theme', '_theme')
+if not os.path.isdir(themedir):
+ raise RuntimeError("Get the scipy-sphinx-theme first, "
+ "via git submodule init && git submodule update")
+
+html_theme = 'scipy'
+html_theme_path = [themedir]
+
+if 'scipyorg' in tags:
+ # Build for the scipy.org website
+ html_theme_options = {
+ "edit_link": True,
+ "sidebar": "right",
+ "scipy_org_logo": True,
+ "rootlinks": [("http://scipy.org/", "Scipy.org"),
+ ("http://docs.scipy.org/", "Docs")]
+ }
+else:
+ # Default build
+ html_theme_options = {
+ "edit_link": False,
+ "sidebar": "left",
+ "scipy_org_logo": False,
+ "rootlinks": []
+ }
+ html_sidebars = {'index': 'indexsidebar.html'}
-# The name of an image file (within the static path) to place at the top of
-# the sidebar.
-html_logo = 'scipyshiny_small.png'
+html_additional_pages = {
+ 'index': 'indexcontent.html',
+}
-# Add any paths that contain custom static files (such as style sheets) here,
-# relative to this directory. They are copied after the builtin static files,
-# so a file named "default.css" will overwrite the builtin "default.css".
+html_title = "%s v%s Manual" % (project, version)
html_static_path = ['_static']
-
-# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
-# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
-# If true, SmartyPants will be used to convert quotes and dashes to
-# typographically correct entities.
-#html_use_smartypants = True
-
-# Custom sidebar templates, maps document names to template names.
-html_sidebars = {
- 'index': 'indexsidebar.html'
-}
-
-# Additional templates that should be rendered to pages, maps page names to
-# template names.
-html_additional_pages = {
- 'index': 'indexcontent.html',
-}
-
-# If false, no module index is generated.
html_use_modindex = True
+html_copy_source = False
+html_domain_indices = False
+html_file_suffix = '.html'
-# If true, the reST sources are included in the HTML build as _sources/<name>.
-#html_copy_source = True
-
-# If true, an OpenSearch description file will be output, and all pages will
-# contain a <link> tag referring to it. The value of this option must be the
-# base URL from which the finished HTML is served.
-#html_use_opensearch = ''
-
-# If nonempty, this is the file name suffix for HTML files (e.g. ".html").
-#html_file_suffix = '.html'
-
-# Output file base name for HTML help builder.
htmlhelp_basename = 'numpy'
-# Pngmath should try to align formulas properly
pngmath_use_preview = True
+pngmath_dvipng_args = ['-gamma', '1.5', '-D', '96', '-bg', 'Transparent']
# -----------------------------------------------------------------------------
diff --git a/doc/source/reference/arrays.classes.rst b/doc/source/reference/arrays.classes.rst
index b35e36545..5cdadd40e 100644
--- a/doc/source/reference/arrays.classes.rst
+++ b/doc/source/reference/arrays.classes.rst
@@ -340,7 +340,7 @@ The default iterator of an ndarray object is the default Python
iterator of a sequence type. Thus, when the array object itself is
used as an iterator. The default behavior is equivalent to::
- for i in xrange(arr.shape[0]):
+ for i in range(arr.shape[0]):
val = arr[i]
This default iterator selects a sub-array of dimension :math:`N-1`
diff --git a/doc/source/reference/arrays.indexing.rst b/doc/source/reference/arrays.indexing.rst
index f8966f5c1..e759b6ff8 100644
--- a/doc/source/reference/arrays.indexing.rst
+++ b/doc/source/reference/arrays.indexing.rst
@@ -170,8 +170,8 @@ concepts to remember include:
.. data:: newaxis
- The :const:`newaxis` object can be used in the basic slicing syntax
- discussed above. :const:`None` can also be used instead of
+ The :const:`newaxis` object can be used in all slicing operations
+ as discussed above. :const:`None` can also be used instead of
:const:`newaxis`.
diff --git a/doc/source/reference/arrays.ndarray.rst b/doc/source/reference/arrays.ndarray.rst
index 535ce8faa..5a528cbf6 100644
--- a/doc/source/reference/arrays.ndarray.rst
+++ b/doc/source/reference/arrays.ndarray.rst
@@ -115,7 +115,7 @@ array. Here, :math:`s_k` are integers which specify the :obj:`strides
<ndarray.strides>` of the array. The :term:`column-major` order (used,
for example, in the Fortran language and in *Matlab*) and
:term:`row-major` order (used in C) schemes are just specific kinds of
-strided scheme, and correspond to the strides:
+strided scheme, and correspond to memory that can be *addressed* by the strides:
.. math::
@@ -124,12 +124,51 @@ strided scheme, and correspond to the strides:
.. index:: single-segment, contiguous, non-contiguous
-where :math:`d_j` = `self.itemsize * self.shape[j]`.
+where :math:`d_j` `= self.itemsize * self.shape[j]`.
Both the C and Fortran orders are :term:`contiguous`, *i.e.,*
:term:`single-segment`, memory layouts, in which every part of the
memory block can be accessed by some combination of the indices.
+While a C-style and Fortran-style contiguous array, which has the corresponding
+flags set, can be addressed with the above strides, the actual strides may be
+different. This can happen in two cases:
+ 1. If ``self.shape[k] == 1`` then for any legal index ``index[k] == 0``.
+ This means that in the formula for the offset
+ :math:`n_k = 0` and thus :math:`s_k n_k = 0` and the value of
+ :math:`s_k` `= self.strides[k]` is arbitrary.
+ 2. If an array has no elements (``self.size == 0``) there is no legal index
+ and the strides are never used. Any array with no elements may be
+ considered C-style and Fortran-style contiguous.
+
+Point 1. means that ``self``and ``self.squeeze()`` always have the same
+contiguity and :term:`aligned` flags value. This also means that even a high
+dimensional array could be C-style and Fortran-style contiguous at the same
+time.
+
+.. index:: aligned
+
+An array is considered aligned if the memory offsets for all elements and the
+base offset itself is a multiple of `self.itemsize`.
+
+.. note::
+
+ Points (1) and (2) are not yet applied by default. Beginning with
+ Numpy 1.8.0, they are applied consistently only if the environment
+ variable ``NPY_RELAXED_STRIDES_CHECKING=1`` was defined when NumPy
+ was built. Eventually this will become the default.
+
+ You can check whether this option was enabled when your NumPy was
+ built by looking at the value of ``np.ones((10,1),
+ order='C').flags.f_contiguous``. If this is ``True``, then your
+ NumPy has relaxed strides checking enabled.
+
+.. warning::
+
+ It does *not* generally hold that ``self.strides[-1] == self.itemsize``
+ for C-style contiguous arrays or ``self.strides[0] == self.itemsize`` for
+ Fortran-style contiguous arrays is true.
+
Data in new :class:`ndarrays <ndarray>` is in the :term:`row-major`
(C) order, unless otherwise specified, but, for example, :ref:`basic
array slicing <arrays.indexing>` often produces :term:`views <view>`
@@ -144,7 +183,6 @@ in a different scheme.
irregularly strided array is passed in to such algorithms, a copy
is automatically made.
-
.. _arrays.ndarray.attributes:
Array attributes
diff --git a/doc/source/reference/c-api.array.rst b/doc/source/reference/c-api.array.rst
index 85cf3c317..cef400fad 100644
--- a/doc/source/reference/c-api.array.rst
+++ b/doc/source/reference/c-api.array.rst
@@ -165,13 +165,13 @@ with misaligned data.
at least *aobj* ->nd in size). You may want to typecast the
returned pointer to the data type of the ndarray.
-.. cfunction:: void* PyArray_GETPTR1(PyObject* obj, <npy_intp> i)
+.. cfunction:: void* PyArray_GETPTR1(PyArrayObject* obj, npy_intp i)
-.. cfunction:: void* PyArray_GETPTR2(PyObject* obj, <npy_intp> i, <npy_intp> j)
+.. cfunction:: void* PyArray_GETPTR2(PyArrayObject* obj, npy_intp i, npy_intp j)
-.. cfunction:: void* PyArray_GETPTR3(PyObject* obj, <npy_intp> i, <npy_intp> j, <npy_intp> k)
+.. cfunction:: void* PyArray_GETPTR3(PyArrayObject* obj, npy_intp i, npy_intp j, npy_intp k)
-.. cfunction:: void* PyArray_GETPTR4(PyObject* obj, <npy_intp> i, <npy_intp> j, <npy_intp> k, <npy_intp> l)
+.. cfunction:: void* PyArray_GETPTR4(PyArrayObject* obj, npy_intp i, npy_intp j, npy_intp k, npy_intp l)
Quick, inline access to the element at the given coordinates in
the ndarray, *obj*, which must have respectively 1, 2, 3, or 4
@@ -1062,7 +1062,7 @@ Converting data types
*arr* is an array scalar (has 0 dimensions), it finds the data type
of smallest size to which the value may be converted
without overflow or truncation to an integer.
-
+
This function will not demote complex to float or anything to
boolean, but will demote a signed integer to an unsigned integer
when the scalar value is positive.
@@ -1088,7 +1088,7 @@ Converting data types
Categories are determined by first checking which of boolean,
integer (int/uint), or floating point (float/complex) the maximum
kind of all the arrays and the scalars are.
-
+
If there are only scalars or the maximum category of the scalars
is higher than the maximum category of the arrays,
the data types are combined with :cfunc:`PyArray_PromoteTypes`
@@ -1310,11 +1310,21 @@ of the constant names is deprecated in 1.7.
The data area is in Fortran-style contiguous order (first index varies
the fastest).
-Notice that contiguous 1-d arrays are always both Fortran contiguous
-and C contiguous. Both of these flags can be checked and are convenience
-flags only as whether or not an array is :cdata:`NPY_ARRAY_C_CONTIGUOUS`
-or :cdata:`NPY_ARRAY_F_CONTIGUOUS` can be determined by the ``strides``,
-``dimensions``, and ``itemsize`` attributes.
+.. note::
+
+ Arrays can be both C-style and Fortran-style contiguous simultaneously.
+ This is clear for 1-dimensional arrays, but can also be true for higher
+ dimensional arrays.
+
+ Even for contiguous arrays a stride for a given dimension
+ ``arr.strides[dim]`` may be *arbitrary* if ``arr.shape[dim] == 1``
+ or the array has no elements.
+ It does *not* generally hold that ``self.strides[-1] == self.itemsize``
+ for C-style contiguous arrays or ``self.strides[0] == self.itemsize`` for
+ Fortran-style contiguous arrays is true. The correct way to access the
+ ``itemsize`` of an array from the C API is ``PyArray_ITEMSIZE(arr)``.
+
+ .. seealso:: :ref:`Internal memory layout of an ndarray <arrays.ndarray>`
.. cvar:: NPY_ARRAY_OWNDATA
@@ -1322,7 +1332,7 @@ or :cdata:`NPY_ARRAY_F_CONTIGUOUS` can be determined by the ``strides``,
.. cvar:: NPY_ARRAY_ALIGNED
- The data area is aligned appropriately (for all strides).
+ The data area and all array elements are aligned appropriately.
.. cvar:: NPY_ARRAY_WRITEABLE
@@ -1432,14 +1442,20 @@ For all of these macros *arr* must be an instance of a (subclass of)
:cdata:`NPY_ARRAY_OWNDATA`, :cdata:`NPY_ARRAY_ALIGNED`,
:cdata:`NPY_ARRAY_WRITEABLE`, :cdata:`NPY_ARRAY_UPDATEIFCOPY`.
-.. cfunction:: PyArray_ISCONTIGUOUS(arr)
+.. cfunction:: PyArray_IS_C_CONTIGUOUS(arr)
Evaluates true if *arr* is C-style contiguous.
-.. cfunction:: PyArray_ISFORTRAN(arr)
+.. cfunction:: PyArray_IS_F_CONTIGUOUS(arr)
Evaluates true if *arr* is Fortran-style contiguous.
+.. cfunction:: PyArray_ISFORTRAN(arr)
+
+ Evaluates true if *arr* is Fortran-style contiguous and *not*
+ C-style contiguous. :cfunc:`PyArray_IS_F_CONTIGUOUS`
+ is the correct way to test for Fortran-style contiguity.
+
.. cfunction:: PyArray_ISWRITEABLE(arr)
Evaluates true if the data area of *arr* can be written to
diff --git a/doc/source/reference/c-api.iterator.rst b/doc/source/reference/c-api.iterator.rst
index 7e2900bcc..1e3565bc1 100644
--- a/doc/source/reference/c-api.iterator.rst
+++ b/doc/source/reference/c-api.iterator.rst
@@ -634,12 +634,12 @@ Construction and Destruction
Extends :cfunc:`NpyIter_MultiNew` with several advanced options providing
more control over broadcasting and buffering.
- If 0/NULL values are passed to ``oa_ndim``, ``op_axes``, ``itershape``,
+ If -1/NULL values are passed to ``oa_ndim``, ``op_axes``, ``itershape``,
and ``buffersize``, it is equivalent to :cfunc:`NpyIter_MultiNew`.
- The parameter ``oa_ndim``, when non-zero, specifies the number of
+ The parameter ``oa_ndim``, when not zero or -1, specifies the number of
dimensions that will be iterated with customized broadcasting.
- If it is provided, ``op_axes`` and/or ``itershape`` must also be provided.
+ If it is provided, ``op_axes`` must and ``itershape`` can also be provided.
The ``op_axes`` parameter let you control in detail how the
axes of the operand arrays get matched together and iterated.
In ``op_axes``, you must provide an array of ``nop`` pointers
@@ -649,6 +649,11 @@ Construction and Destruction
-1 which means ``newaxis``. Within each ``op_axes[j]`` array, axes
may not be repeated. The following example is how normal broadcasting
applies to a 3-D array, a 2-D array, a 1-D array and a scalar.
+
+ **Note**: Before NumPy 1.8 ``oa_ndim == 0` was used for signalling that
+ that ``op_axes`` and ``itershape`` are unused. This is deprecated and
+ should be replaced with -1. Better backward compatibility may be
+ achieved by using :cfunc:`NpyIter_MultiNew` for this case.
.. code-block:: c
diff --git a/doc/source/reference/c-api.types-and-structures.rst b/doc/source/reference/c-api.types-and-structures.rst
index 07c7e07be..79a888912 100644
--- a/doc/source/reference/c-api.types-and-structures.rst
+++ b/doc/source/reference/c-api.types-and-structures.rst
@@ -652,6 +652,8 @@ PyUFunc_Type
void *ptr;
PyObject *obj;
PyObject *userloops;
+ npy_uint32 *op_flags;
+ npy_uint32 *iter_flags;
} PyUFuncObject;
.. cmacro:: PyUFuncObject.PyObject_HEAD
@@ -755,6 +757,14 @@ PyUFunc_Type
numbers are always larger than :cdata:`NPY_USERDEF`.
+ .. cmember:: npy_uint32 PyUFuncObject.op_flags
+
+ Override the default operand flags for each ufunc operand.
+
+ .. cmember:: npy_uint32 PyUFuncObject.iter_flags
+
+ Override the default nditer flags for the ufunc.
+
PyArrayIter_Type
----------------
diff --git a/doc/source/reference/c-api.ufunc.rst b/doc/source/reference/c-api.ufunc.rst
index 45268b261..d4de28188 100644
--- a/doc/source/reference/c-api.ufunc.rst
+++ b/doc/source/reference/c-api.ufunc.rst
@@ -140,6 +140,16 @@ Functions
in as *arg_types* which must be a pointer to memory at least as
large as ufunc->nargs.
+.. cfunction:: int PyUFunc_RegisterLoopForDescr(PyUFuncObject* ufunc,
+ PyArray_Descr* userdtype, PyUFuncGenericFunction function,
+ PyArray_Descr** arg_dtypes, void* data)
+
+ This function behaves like PyUFunc_RegisterLoopForType above, except
+ that it allows the user to register a 1-d loop using PyArray_Descr
+ objects instead of dtype type num values. This allows a 1-d loop to be
+ registered for structured array data-dtypes and custom data-types
+ instead of scalar data-types.
+
.. cfunction:: int PyUFunc_ReplaceLoopBySignature(PyUFuncObject* ufunc,
PyUFuncGenericFunction newfunc, int* signature,
PyUFuncGenericFunction* oldfunc)
diff --git a/doc/source/reference/routines.linalg.rst b/doc/source/reference/routines.linalg.rst
index 173a6ad53..00620347a 100644
--- a/doc/source/reference/routines.linalg.rst
+++ b/doc/source/reference/routines.linalg.rst
@@ -67,3 +67,21 @@ Exceptions
:toctree: generated/
linalg.LinAlgError
+
+Linear algebra on several matrices at once
+------------------------------------------
+
+Several of the linear algebra routines listed above are able to
+compute results for several matrices at once, if they are stacked into
+the same array.
+
+This is indicated in the documentation via input parameter
+specifications such as ``a : (..., M, M) array_like``. This means that
+if for instance given an input array ``a.shape == (N, M, M)``, it is
+interpreted as a "stack" of N matrices, each of size M-by-M. Similar
+specification applies to return values, for instance the determinant
+has ``det : (...)`` and will in this case return an array of shape
+``det(a).shape == (N,)``. This generalizes to linear algebra
+operations on higher-dimensional arrays: the last 1 or 2 dimensions of
+a multidimensional array are interpreted as vectors or matrices, as
+appropriate for each operation.
diff --git a/doc/source/reference/routines.math.rst b/doc/source/reference/routines.math.rst
index 7ce77c24d..0e7a60b76 100644
--- a/doc/source/reference/routines.math.rst
+++ b/doc/source/reference/routines.math.rst
@@ -143,6 +143,8 @@ Miscellaneous
sign
maximum
minimum
+ fmax
+ fmin
nan_to_num
real_if_close
diff --git a/doc/source/reference/routines.polynomials.classes.rst b/doc/source/reference/routines.polynomials.classes.rst
index 9294728c8..14729f08b 100644
--- a/doc/source/reference/routines.polynomials.classes.rst
+++ b/doc/source/reference/routines.polynomials.classes.rst
@@ -30,10 +30,10 @@ and more generally
.. math:: p(x) = \sum_{i=0}^n c_i T_i(x)
-where in this case the :math:`T_n` are the Chebyshev functions of degree
-`n`, but could just as easily be the basis functions of any of the other
-classes. The convention for all the classes is that the coefficient
-c[i] goes with the basis function of degree i.
+where in this case the :math:`T_n` are the Chebyshev functions of
+degree :math:`n`, but could just as easily be the basis functions of
+any of the other classes. The convention for all the classes is that
+the coefficient :math:`c[i]` goes with the basis function of degree i.
All of the classes have the same methods, and especially they implement the
Python numeric operators +, -, \*, //, %, divmod, \*\*, ==,
@@ -47,7 +47,7 @@ Basics
First we need a polynomial class and a polynomial instance to play with.
The classes can be imported directly from the polynomial package or from
the module of the relevant type. Here we import from the package and use
-the conventional Polynomial class because of its familiarity.::
+the conventional Polynomial class because of its familiarity::
>>> from numpy.polynomial import Polynomial as P
>>> p = P([1,2,3])
@@ -93,7 +93,7 @@ Powers::
Division:
-Floor_division, '//', is the division operator for the polynomial classes,
+Floor division, '//', is the division operator for the polynomial classes,
polynomials are treated like integers in this regard. For Python versions <
3.x the '/' operator maps to '//', as it does for Python, for later
versions the '/' will only work for division by scalars. At some point it
@@ -182,7 +182,7 @@ and window casting::
>>> p(T([0, 1]))
Chebyshev([ 2.5, 2. , 1.5], [-1., 1.], [-1., 1.])
-Which gives the polynomial 'p' in Chebyshev form. This works because
+Which gives the polynomial `p` in Chebyshev form. This works because
:math:`T_1(x) = x` and substituting :math:`x` for :math:`x` doesn't change
the original polynomial. However, all the multiplications and divisions
will be done using Chebyshev series, hence the type of the result.
@@ -199,7 +199,7 @@ Polynomial instances can be integrated and differentiated.::
>>> p.integ(2)
Polynomial([ 0., 0., 1., 1.], [-1., 1.], [-1., 1.])
-The first example integrates 'p' once, the second example integrates it
+The first example integrates `p` once, the second example integrates it
twice. By default, the lower bound of the integration and the integration
constant are 0, but both can be specified.::
@@ -227,7 +227,7 @@ Constructing polynomials by specifying coefficients is just one way of
obtaining a polynomial instance, they may also be created by specifying
their roots, by conversion from other polynomial types, and by least
squares fits. Fitting is discussed in its own section, the other methods
-are demonstrated below.::
+are demonstrated below::
>>> from numpy.polynomial import Polynomial as P
>>> from numpy.polynomial import Chebyshev as T
@@ -244,9 +244,9 @@ The convert method can also convert domain and window::
>>> p.convert(kind=P, domain=[0, 1])
Polynomial([-1.875, 2.875, -1.125, 0.125], [ 0., 1.], [-1., 1.])
-In numpy versions >= 1.7.0 the 'basis' and 'cast' class methods are also
+In numpy versions >= 1.7.0 the `basis` and `cast` class methods are also
available. The cast method works like the convert method while the basis
-method returns the basis polynomial of given degree.::
+method returns the basis polynomial of given degree::
>>> P.basis(3)
Polynomial([ 0., 0., 0., 1.], [-1., 1.], [-1., 1.])
@@ -276,8 +276,8 @@ polynomials up to degree 5 are plotted below.
<matplotlib.legend.Legend object at 0x3b3ee10>
>>> plt.show()
-In the range -1 <= x <= 1 they are nice, equiripple functions lying between +/- 1.
-The same plots over the range -2 <= x <= 2 look very different:
+In the range -1 <= `x` <= 1 they are nice, equiripple functions lying between +/- 1.
+The same plots over the range -2 <= `x` <= 2 look very different:
.. plot::
@@ -291,17 +291,17 @@ The same plots over the range -2 <= x <= 2 look very different:
>>> plt.show()
As can be seen, the "good" parts have shrunk to insignificance. In using
-Chebyshev polynomials for fitting we want to use the region where x is
-between -1 and 1 and that is what the 'window' specifies. However, it is
+Chebyshev polynomials for fitting we want to use the region where `x` is
+between -1 and 1 and that is what the `window` specifies. However, it is
unlikely that the data to be fit has all its data points in that interval,
-so we use 'domain' to specify the interval where the data points lie. When
+so we use `domain` to specify the interval where the data points lie. When
the fit is done, the domain is first mapped to the window by a linear
transformation and the usual least squares fit is done using the mapped
data points. The window and domain of the fit are part of the returned series
and are automatically used when computing values, derivatives, and such. If
they aren't specified in the call the fitting routine will use the default
window and the smallest domain that holds all the data points. This is
-illustrated below for a fit to a noisy sin curve.
+illustrated below for a fit to a noisy sine curve.
.. plot::
diff --git a/doc/source/reference/ufuncs.rst b/doc/source/reference/ufuncs.rst
index afcb1302b..2154bca37 100644
--- a/doc/source/reference/ufuncs.rst
+++ b/doc/source/reference/ufuncs.rst
@@ -321,9 +321,9 @@ advanced usage and will not typically be used.
Specifies the calculation iteration order/memory layout of the output array.
Defaults to 'K'. 'C' means the output should be C-contiguous, 'F' means
- F-contiguous, 'A' means F-contiguous if the inputs are F-contiguous, C-contiguous
- otherwise, and 'K' means to match the element ordering of the inputs
- as closely as possible.
+ F-contiguous, 'A' means F-contiguous if the inputs are F-contiguous and
+ not also not C-contiguous, C-contiguous otherwise, and 'K' means to match
+ the element ordering of the inputs as closely as possible.
*dtype*
@@ -604,6 +604,10 @@ Comparison functions
``a > b`` and uses it to return either `a` or `b` (as a whole). A similar
difference exists between ``minimum(a, b)`` and ``min(a, b)``.
+.. autosummary::
+
+ fmax
+ fmin
Floating functions
------------------
diff --git a/doc/source/release.rst b/doc/source/release.rst
index bf9e95a14..944607aa8 100644
--- a/doc/source/release.rst
+++ b/doc/source/release.rst
@@ -2,9 +2,12 @@
Release Notes
*************
-.. include:: ../release/1.3.0-notes.rst
-.. include:: ../release/1.4.0-notes.rst
-.. include:: ../release/1.5.0-notes.rst
-.. include:: ../release/1.6.0-notes.rst
-.. include:: ../release/1.6.1-notes.rst
+.. include:: ../release/1.8.0-notes.rst
+.. include:: ../release/1.7.1-notes.rst
+.. include:: ../release/1.7.0-notes.rst
.. include:: ../release/1.6.2-notes.rst
+.. include:: ../release/1.6.1-notes.rst
+.. include:: ../release/1.6.0-notes.rst
+.. include:: ../release/1.5.0-notes.rst
+.. include:: ../release/1.4.0-notes.rst
+.. include:: ../release/1.3.0-notes.rst
diff --git a/doc/source/scipyshiny_small.png b/doc/source/scipyshiny_small.png
deleted file mode 100644
index 7ef81a9e8..000000000
--- a/doc/source/scipyshiny_small.png
+++ /dev/null
Binary files differ
diff --git a/doc/source/user/c-info.ufunc-tutorial.rst b/doc/source/user/c-info.ufunc-tutorial.rst
index ef2a0f9ee..d3b1deb26 100644
--- a/doc/source/user/c-info.ufunc-tutorial.rst
+++ b/doc/source/user/c-info.ufunc-tutorial.rst
@@ -884,6 +884,171 @@ as well as all other properties of a ufunc.
}
#endif
+
+.. _`sec:Numpy-struct-dtype`:
+
+Example Numpy ufunc with structured array dtype arguments
+=========================================================
+
+This example shows how to create a ufunc for a structured array dtype.
+For the example we show a trivial ufunc for adding two arrays with dtype
+'u8,u8,u8'. The process is a bit different from the other examples since
+a call to PyUFunc_FromFuncAndData doesn't fully register ufuncs for
+custom dtypes and structured array dtypes. We need to also call
+PyUFunc_RegisterLoopForDescr to finish setting up the ufunc.
+
+We only give the C code as the setup.py file is exactly the same as
+the setup.py file in `Example Numpy ufunc for one dtype`_, except that
+the line
+
+ .. code-block:: python
+
+ config.add_extension('npufunc', ['single_type_logit.c'])
+
+is replaced with
+
+ .. code-block:: python
+
+ config.add_extension('npufunc', ['add_triplet.c'])
+
+The C file is given below.
+
+ .. code-block:: c
+
+ #include "Python.h"
+ #include "math.h"
+ #include "numpy/ndarraytypes.h"
+ #include "numpy/ufuncobject.h"
+ #include "numpy/npy_3kcompat.h"
+
+
+ /*
+ * add_triplet.c
+ * This is the C code for creating your own
+ * Numpy ufunc for a structured array dtype.
+ *
+ * Details explaining the Python-C API can be found under
+ * 'Extending and Embedding' and 'Python/C API' at
+ * docs.python.org .
+ */
+
+ static PyMethodDef StructUfuncTestMethods[] = {
+ {NULL, NULL, 0, NULL}
+ };
+
+ /* The loop definition must precede the PyMODINIT_FUNC. */
+
+ static void add_uint64_triplet(char **args, npy_intp *dimensions,
+ npy_intp* steps, void* data)
+ {
+ npy_intp i;
+ npy_intp is1=steps[0];
+ npy_intp is2=steps[1];
+ npy_intp os=steps[2];
+ npy_intp n=dimensions[0];
+ uint64_t *x, *y, *z;
+
+ char *i1=args[0];
+ char *i2=args[1];
+ char *op=args[2];
+
+ for (i = 0; i < n; i++) {
+
+ x = (uint64_t*)i1;
+ y = (uint64_t*)i2;
+ z = (uint64_t*)op;
+
+ z[0] = x[0] + y[0];
+ z[1] = x[1] + y[1];
+ z[2] = x[2] + y[2];
+
+ i1 += is1;
+ i2 += is2;
+ op += os;
+ }
+ }
+
+ /* This a pointer to the above function */
+ PyUFuncGenericFunction funcs[1] = {&add_uint64_triplet};
+
+ /* These are the input and return dtypes of add_uint64_triplet. */
+ static char types[3] = {NPY_UINT64, NPY_UINT64, NPY_UINT64};
+
+ static void *data[1] = {NULL};
+
+ #if defined(NPY_PY3K)
+ static struct PyModuleDef moduledef = {
+ PyModuleDef_HEAD_INIT,
+ "struct_ufunc_test",
+ NULL,
+ -1,
+ StructUfuncTestMethods,
+ NULL,
+ NULL,
+ NULL,
+ NULL
+ };
+ #endif
+
+ #if defined(NPY_PY3K)
+ PyMODINIT_FUNC PyInit_struct_ufunc_test(void)
+ #else
+ PyMODINIT_FUNC initstruct_ufunc_test(void)
+ #endif
+ {
+ PyObject *m, *add_triplet, *d;
+ PyObject *dtype_dict;
+ PyArray_Descr *dtype;
+ PyArray_Descr *dtypes[3];
+
+ #if defined(NPY_PY3K)
+ m = PyModule_Create(&moduledef);
+ #else
+ m = Py_InitModule("struct_ufunc_test", StructUfuncTestMethods);
+ #endif
+
+ if (m == NULL) {
+ #if defined(NPY_PY3K)
+ return NULL;
+ #else
+ return;
+ #endif
+ }
+
+ import_array();
+ import_umath();
+
+ /* Create a new ufunc object */
+ add_triplet = PyUFunc_FromFuncAndData(NULL, NULL, NULL, 0, 2, 1,
+ PyUFunc_None, "add_triplet",
+ "add_triplet_docstring", 0);
+
+ dtype_dict = Py_BuildValue("[(s, s), (s, s), (s, s)]",
+ "f0", "u8", "f1", "u8", "f2", "u8");
+ PyArray_DescrConverter(dtype_dict, &dtype);
+ Py_DECREF(dtype_dict);
+
+ dtypes[0] = dtype;
+ dtypes[1] = dtype;
+ dtypes[2] = dtype;
+
+ /* Register ufunc for structured dtype */
+ PyUFunc_RegisterLoopForDescr(add_triplet,
+ dtype,
+ &add_uint64_triplet,
+ dtypes,
+ NULL);
+
+ d = PyModule_GetDict(m);
+
+ PyDict_SetItemString(d, "add_triplet", add_triplet);
+ Py_DECREF(add_triplet);
+ #if defined(NPY_PY3K)
+ return m;
+ #endif
+ }
+
+
.. _`sec:PyUFunc-spec`:
PyUFunc_FromFuncAndData Specification
diff --git a/doc/sphinxext b/doc/sphinxext
new file mode 160000
+Subproject 447dd0b59c2fe91ca9643701036d3d04919ddc7
diff --git a/doc/sphinxext/LICENSE.txt b/doc/sphinxext/LICENSE.txt
deleted file mode 100644
index b15c699dc..000000000
--- a/doc/sphinxext/LICENSE.txt
+++ /dev/null
@@ -1,94 +0,0 @@
--------------------------------------------------------------------------------
- The files
- - numpydoc.py
- - docscrape.py
- - docscrape_sphinx.py
- - phantom_import.py
- have the following license:
-
-Copyright (C) 2008 Stefan van der Walt <stefan@mentat.za.net>, Pauli Virtanen <pav@iki.fi>
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- 1. Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- 2. Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
-
-THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
-INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
-HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
-STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
-IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGE.
-
--------------------------------------------------------------------------------
- The files
- - compiler_unparse.py
- - comment_eater.py
- - traitsdoc.py
- have the following license:
-
-This software is OSI Certified Open Source Software.
-OSI Certified is a certification mark of the Open Source Initiative.
-
-Copyright (c) 2006, Enthought, Inc.
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
- * Redistributions of source code must retain the above copyright notice, this
- list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above copyright notice,
- this list of conditions and the following disclaimer in the documentation
- and/or other materials provided with the distribution.
- * Neither the name of Enthought, Inc. nor the names of its contributors may
- be used to endorse or promote products derived from this software without
- specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
-ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
-ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
--------------------------------------------------------------------------------
- The file
- - plot_directive.py
- originates from Matplotlib (http://matplotlib.sf.net/) which has
- the following license:
-
-Copyright (c) 2002-2008 John D. Hunter; All Rights Reserved.
-
-1. This LICENSE AGREEMENT is between John D. Hunter (“JDH”), and the Individual or Organization (“Licensee”) accessing and otherwise using matplotlib software in source or binary form and its associated documentation.
-
-2. Subject to the terms and conditions of this License Agreement, JDH hereby grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce, analyze, test, perform and/or display publicly, prepare derivative works, distribute, and otherwise use matplotlib 0.98.3 alone or in any derivative version, provided, however, that JDH’s License Agreement and JDH’s notice of copyright, i.e., “Copyright (c) 2002-2008 John D. Hunter; All Rights Reserved” are retained in matplotlib 0.98.3 alone or in any derivative version prepared by Licensee.
-
-3. In the event Licensee prepares a derivative work that is based on or incorporates matplotlib 0.98.3 or any part thereof, and wants to make the derivative work available to others as provided herein, then Licensee hereby agrees to include in any such work a brief summary of the changes made to matplotlib 0.98.3.
-
-4. JDH is making matplotlib 0.98.3 available to Licensee on an “AS IS” basis. JDH MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, JDH MAKES NO AND DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF MATPLOTLIB 0.98.3 WILL NOT INFRINGE ANY THIRD PARTY RIGHTS.
-
-5. JDH SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF MATPLOTLIB 0.98.3 FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING MATPLOTLIB 0.98.3, OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
-
-6. This License Agreement will automatically terminate upon a material breach of its terms and conditions.
-
-7. Nothing in this License Agreement shall be deemed to create any relationship of agency, partnership, or joint venture between JDH and Licensee. This License Agreement does not grant permission to use JDH trademarks or trade name in a trademark sense to endorse or promote products or services of Licensee, or any third party.
-
-8. By copying, installing or otherwise using matplotlib 0.98.3, Licensee agrees to be bound by the terms and conditions of this License Agreement.
-
diff --git a/doc/sphinxext/MANIFEST.in b/doc/sphinxext/MANIFEST.in
deleted file mode 100644
index 5176d485b..000000000
--- a/doc/sphinxext/MANIFEST.in
+++ /dev/null
@@ -1,2 +0,0 @@
-recursive-include numpydoc/tests *.py
-include *.txt
diff --git a/doc/sphinxext/README.txt b/doc/sphinxext/README.txt
deleted file mode 100644
index 6ba63e6d8..000000000
--- a/doc/sphinxext/README.txt
+++ /dev/null
@@ -1,45 +0,0 @@
-=====================================
-numpydoc -- Numpy's Sphinx extensions
-=====================================
-
-Numpy's documentation uses several custom extensions to Sphinx. These
-are shipped in this ``numpydoc`` package, in case you want to make use
-of them in third-party projects.
-
-The following extensions are available:
-
- - ``numpydoc``: support for the Numpy docstring format in Sphinx, and add
- the code description directives ``np:function``, ``np-c:function``, etc.
- that support the Numpy docstring syntax.
-
- - ``numpydoc.traitsdoc``: For gathering documentation about Traits attributes.
-
- - ``numpydoc.plot_directive``: Adaptation of Matplotlib's ``plot::``
- directive. Note that this implementation may still undergo severe
- changes or eventually be deprecated.
-
-
-numpydoc
-========
-
-Numpydoc inserts a hook into Sphinx's autodoc that converts docstrings
-following the Numpy/Scipy format to a form palatable to Sphinx.
-
-Options
--------
-
-The following options can be set in conf.py:
-
-- numpydoc_use_plots: bool
-
- Whether to produce ``plot::`` directives for Examples sections that
- contain ``import matplotlib``.
-
-- numpydoc_show_class_members: bool
-
- Whether to show all members of a class in the Methods and Attributes
- sections automatically.
-
-- numpydoc_edit_link: bool (DEPRECATED -- edit your HTML template instead)
-
- Whether to insert an edit link after docstrings.
diff --git a/doc/sphinxext/numpydoc/__init__.py b/doc/sphinxext/numpydoc/__init__.py
deleted file mode 100644
index 68dbbb00a..000000000
--- a/doc/sphinxext/numpydoc/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-from .numpydoc import setup
diff --git a/doc/sphinxext/numpydoc/comment_eater.py b/doc/sphinxext/numpydoc/comment_eater.py
deleted file mode 100644
index 0e205072e..000000000
--- a/doc/sphinxext/numpydoc/comment_eater.py
+++ /dev/null
@@ -1,169 +0,0 @@
-from __future__ import division
-
-import sys
-if sys.version_info[0] >= 3:
- from io import StringIO
-else:
- from cStringIO import StringIO
-
-import compiler
-import inspect
-import textwrap
-import tokenize
-
-from .compiler_unparse import unparse
-
-
-class Comment(object):
- """ A comment block.
- """
- is_comment = True
- def __init__(self, start_lineno, end_lineno, text):
- # int : The first line number in the block. 1-indexed.
- self.start_lineno = start_lineno
- # int : The last line number. Inclusive!
- self.end_lineno = end_lineno
- # str : The text block including '#' character but not any leading spaces.
- self.text = text
-
- def add(self, string, start, end, line):
- """ Add a new comment line.
- """
- self.start_lineno = min(self.start_lineno, start[0])
- self.end_lineno = max(self.end_lineno, end[0])
- self.text += string
-
- def __repr__(self):
- return '%s(%r, %r, %r)' % (self.__class__.__name__, self.start_lineno,
- self.end_lineno, self.text)
-
-
-class NonComment(object):
- """ A non-comment block of code.
- """
- is_comment = False
- def __init__(self, start_lineno, end_lineno):
- self.start_lineno = start_lineno
- self.end_lineno = end_lineno
-
- def add(self, string, start, end, line):
- """ Add lines to the block.
- """
- if string.strip():
- # Only add if not entirely whitespace.
- self.start_lineno = min(self.start_lineno, start[0])
- self.end_lineno = max(self.end_lineno, end[0])
-
- def __repr__(self):
- return '%s(%r, %r)' % (self.__class__.__name__, self.start_lineno,
- self.end_lineno)
-
-
-class CommentBlocker(object):
- """ Pull out contiguous comment blocks.
- """
- def __init__(self):
- # Start with a dummy.
- self.current_block = NonComment(0, 0)
-
- # All of the blocks seen so far.
- self.blocks = []
-
- # The index mapping lines of code to their associated comment blocks.
- self.index = {}
-
- def process_file(self, file):
- """ Process a file object.
- """
- if sys.version_info[0] >= 3:
- nxt = file.__next__
- else:
- nxt = file.next
- for token in tokenize.generate_tokens(nxt):
- self.process_token(*token)
- self.make_index()
-
- def process_token(self, kind, string, start, end, line):
- """ Process a single token.
- """
- if self.current_block.is_comment:
- if kind == tokenize.COMMENT:
- self.current_block.add(string, start, end, line)
- else:
- self.new_noncomment(start[0], end[0])
- else:
- if kind == tokenize.COMMENT:
- self.new_comment(string, start, end, line)
- else:
- self.current_block.add(string, start, end, line)
-
- def new_noncomment(self, start_lineno, end_lineno):
- """ We are transitioning from a noncomment to a comment.
- """
- block = NonComment(start_lineno, end_lineno)
- self.blocks.append(block)
- self.current_block = block
-
- def new_comment(self, string, start, end, line):
- """ Possibly add a new comment.
-
- Only adds a new comment if this comment is the only thing on the line.
- Otherwise, it extends the noncomment block.
- """
- prefix = line[:start[1]]
- if prefix.strip():
- # Oops! Trailing comment, not a comment block.
- self.current_block.add(string, start, end, line)
- else:
- # A comment block.
- block = Comment(start[0], end[0], string)
- self.blocks.append(block)
- self.current_block = block
-
- def make_index(self):
- """ Make the index mapping lines of actual code to their associated
- prefix comments.
- """
- for prev, block in zip(self.blocks[:-1], self.blocks[1:]):
- if not block.is_comment:
- self.index[block.start_lineno] = prev
-
- def search_for_comment(self, lineno, default=None):
- """ Find the comment block just before the given line number.
-
- Returns None (or the specified default) if there is no such block.
- """
- if not self.index:
- self.make_index()
- block = self.index.get(lineno, None)
- text = getattr(block, 'text', default)
- return text
-
-
-def strip_comment_marker(text):
- """ Strip # markers at the front of a block of comment text.
- """
- lines = []
- for line in text.splitlines():
- lines.append(line.lstrip('#'))
- text = textwrap.dedent('\n'.join(lines))
- return text
-
-
-def get_class_traits(klass):
- """ Yield all of the documentation for trait definitions on a class object.
- """
- # FIXME: gracefully handle errors here or in the caller?
- source = inspect.getsource(klass)
- cb = CommentBlocker()
- cb.process_file(StringIO(source))
- mod_ast = compiler.parse(source)
- class_ast = mod_ast.node.nodes[0]
- for node in class_ast.code.nodes:
- # FIXME: handle other kinds of assignments?
- if isinstance(node, compiler.ast.Assign):
- name = node.nodes[0].name
- rhs = unparse(node.expr).strip()
- doc = strip_comment_marker(cb.search_for_comment(node.lineno, default=''))
- yield name, rhs, doc
-
diff --git a/doc/sphinxext/numpydoc/compiler_unparse.py b/doc/sphinxext/numpydoc/compiler_unparse.py
deleted file mode 100644
index 56da748d1..000000000
--- a/doc/sphinxext/numpydoc/compiler_unparse.py
+++ /dev/null
@@ -1,865 +0,0 @@
-""" Turn compiler.ast structures back into executable python code.
-
- The unparse method takes a compiler.ast tree and transforms it back into
- valid python code. It is incomplete and currently only works for
- import statements, function calls, function definitions, assignments, and
- basic expressions.
-
- Inspired by python-2.5-svn/Demo/parser/unparse.py
-
- fixme: We may want to move to using _ast trees because the compiler for
- them is about 6 times faster than compiler.compile.
-"""
-from __future__ import division
-
-import sys
-from compiler.ast import Const, Name, Tuple, Div, Mul, Sub, Add
-
-if sys.version_info[0] >= 3:
- from io import StringIO
-else:
- from cStringIO import StringIO
-
-def unparse(ast, single_line_functions=False):
- s = StringIO()
- UnparseCompilerAst(ast, s, single_line_functions)
- return s.getvalue().lstrip()
-
-op_precedence = { 'compiler.ast.Power':3, 'compiler.ast.Mul':2, 'compiler.ast.Div':2,
- 'compiler.ast.Add':1, 'compiler.ast.Sub':1 }
-
-class UnparseCompilerAst:
- """ Methods in this class recursively traverse an AST and
- output source code for the abstract syntax; original formatting
- is disregarged.
- """
-
- #########################################################################
- # object interface.
- #########################################################################
-
- def __init__(self, tree, file = sys.stdout, single_line_functions=False):
- """ Unparser(tree, file=sys.stdout) -> None.
-
- Print the source for tree to file.
- """
- self.f = file
- self._single_func = single_line_functions
- self._do_indent = True
- self._indent = 0
- self._dispatch(tree)
- self._write("\n")
- self.f.flush()
-
- #########################################################################
- # Unparser private interface.
- #########################################################################
-
- ### format, output, and dispatch methods ################################
-
- def _fill(self, text = ""):
- "Indent a piece of text, according to the current indentation level"
- if self._do_indent:
- self._write("\n"+" "*self._indent + text)
- else:
- self._write(text)
-
- def _write(self, text):
- "Append a piece of text to the current line."
- self.f.write(text)
-
- def _enter(self):
- "Print ':', and increase the indentation."
- self._write(": ")
- self._indent += 1
-
- def _leave(self):
- "Decrease the indentation level."
- self._indent -= 1
-
- def _dispatch(self, tree):
- "_dispatcher function, _dispatching tree type T to method _T."
- if isinstance(tree, list):
- for t in tree:
- self._dispatch(t)
- return
- meth = getattr(self, "_"+tree.__class__.__name__)
- if tree.__class__.__name__ == 'NoneType' and not self._do_indent:
- return
- meth(tree)
-
-
- #########################################################################
- # compiler.ast unparsing methods.
- #
- # There should be one method per concrete grammar type. They are
- # organized in alphabetical order.
- #########################################################################
-
- def _Add(self, t):
- self.__binary_op(t, '+')
-
- def _And(self, t):
- self._write(" (")
- for i, node in enumerate(t.nodes):
- self._dispatch(node)
- if i != len(t.nodes)-1:
- self._write(") and (")
- self._write(")")
-
- def _AssAttr(self, t):
- """ Handle assigning an attribute of an object
- """
- self._dispatch(t.expr)
- self._write('.'+t.attrname)
-
- def _Assign(self, t):
- """ Expression Assignment such as "a = 1".
-
- This only handles assignment in expressions. Keyword assignment
- is handled separately.
- """
- self._fill()
- for target in t.nodes:
- self._dispatch(target)
- self._write(" = ")
- self._dispatch(t.expr)
- if not self._do_indent:
- self._write('; ')
-
- def _AssName(self, t):
- """ Name on left hand side of expression.
-
- Treat just like a name on the right side of an expression.
- """
- self._Name(t)
-
- def _AssTuple(self, t):
- """ Tuple on left hand side of an expression.
- """
-
- # _write each elements, separated by a comma.
- for element in t.nodes[:-1]:
- self._dispatch(element)
- self._write(", ")
-
- # Handle the last one without writing comma
- last_element = t.nodes[-1]
- self._dispatch(last_element)
-
- def _AugAssign(self, t):
- """ +=,-=,*=,/=,**=, etc. operations
- """
-
- self._fill()
- self._dispatch(t.node)
- self._write(' '+t.op+' ')
- self._dispatch(t.expr)
- if not self._do_indent:
- self._write(';')
-
- def _Bitand(self, t):
- """ Bit and operation.
- """
-
- for i, node in enumerate(t.nodes):
- self._write("(")
- self._dispatch(node)
- self._write(")")
- if i != len(t.nodes)-1:
- self._write(" & ")
-
- def _Bitor(self, t):
- """ Bit or operation
- """
-
- for i, node in enumerate(t.nodes):
- self._write("(")
- self._dispatch(node)
- self._write(")")
- if i != len(t.nodes)-1:
- self._write(" | ")
-
- def _CallFunc(self, t):
- """ Function call.
- """
- self._dispatch(t.node)
- self._write("(")
- comma = False
- for e in t.args:
- if comma: self._write(", ")
- else: comma = True
- self._dispatch(e)
- if t.star_args:
- if comma: self._write(", ")
- else: comma = True
- self._write("*")
- self._dispatch(t.star_args)
- if t.dstar_args:
- if comma: self._write(", ")
- else: comma = True
- self._write("**")
- self._dispatch(t.dstar_args)
- self._write(")")
-
- def _Compare(self, t):
- self._dispatch(t.expr)
- for op, expr in t.ops:
- self._write(" " + op + " ")
- self._dispatch(expr)
-
- def _Const(self, t):
- """ A constant value such as an integer value, 3, or a string, "hello".
- """
- self._dispatch(t.value)
-
- def _Decorators(self, t):
- """ Handle function decorators (eg. @has_units)
- """
- for node in t.nodes:
- self._dispatch(node)
-
- def _Dict(self, t):
- self._write("{")
- for i, (k, v) in enumerate(t.items):
- self._dispatch(k)
- self._write(": ")
- self._dispatch(v)
- if i < len(t.items)-1:
- self._write(", ")
- self._write("}")
-
- def _Discard(self, t):
- """ Node for when return value is ignored such as in "foo(a)".
- """
- self._fill()
- self._dispatch(t.expr)
-
- def _Div(self, t):
- self.__binary_op(t, '/')
-
- def _Ellipsis(self, t):
- self._write("...")
-
- def _From(self, t):
- """ Handle "from xyz import foo, bar as baz".
- """
- # fixme: Are From and ImportFrom handled differently?
- self._fill("from ")
- self._write(t.modname)
- self._write(" import ")
- for i, (name,asname) in enumerate(t.names):
- if i != 0:
- self._write(", ")
- self._write(name)
- if asname is not None:
- self._write(" as "+asname)
-
- def _Function(self, t):
- """ Handle function definitions
- """
- if t.decorators is not None:
- self._fill("@")
- self._dispatch(t.decorators)
- self._fill("def "+t.name + "(")
- defaults = [None] * (len(t.argnames) - len(t.defaults)) + list(t.defaults)
- for i, arg in enumerate(zip(t.argnames, defaults)):
- self._write(arg[0])
- if arg[1] is not None:
- self._write('=')
- self._dispatch(arg[1])
- if i < len(t.argnames)-1:
- self._write(', ')
- self._write(")")
- if self._single_func:
- self._do_indent = False
- self._enter()
- self._dispatch(t.code)
- self._leave()
- self._do_indent = True
-
- def _Getattr(self, t):
- """ Handle getting an attribute of an object
- """
- if isinstance(t.expr, (Div, Mul, Sub, Add)):
- self._write('(')
- self._dispatch(t.expr)
- self._write(')')
- else:
- self._dispatch(t.expr)
-
- self._write('.'+t.attrname)
-
- def _If(self, t):
- self._fill()
-
- for i, (compare,code) in enumerate(t.tests):
- if i == 0:
- self._write("if ")
- else:
- self._write("elif ")
- self._dispatch(compare)
- self._enter()
- self._fill()
- self._dispatch(code)
- self._leave()
- self._write("\n")
-
- if t.else_ is not None:
- self._write("else")
- self._enter()
- self._fill()
- self._dispatch(t.else_)
- self._leave()
- self._write("\n")
-
- def _IfExp(self, t):
- self._dispatch(t.then)
- self._write(" if ")
- self._dispatch(t.test)
-
- if t.else_ is not None:
- self._write(" else (")
- self._dispatch(t.else_)
- self._write(")")
-
- def _Import(self, t):
- """ Handle "import xyz.foo".
- """
- self._fill("import ")
-
- for i, (name,asname) in enumerate(t.names):
- if i != 0:
- self._write(", ")
- self._write(name)
- if asname is not None:
- self._write(" as "+asname)
-
- def _Keyword(self, t):
- """ Keyword value assignment within function calls and definitions.
- """
- self._write(t.name)
- self._write("=")
- self._dispatch(t.expr)
-
- def _List(self, t):
- self._write("[")
- for i,node in enumerate(t.nodes):
- self._dispatch(node)
- if i < len(t.nodes)-1:
- self._write(", ")
- self._write("]")
-
- def _Module(self, t):
- if t.doc is not None:
- self._dispatch(t.doc)
- self._dispatch(t.node)
-
- def _Mul(self, t):
- self.__binary_op(t, '*')
-
- def _Name(self, t):
- self._write(t.name)
-
- def _NoneType(self, t):
- self._write("None")
-
- def _Not(self, t):
- self._write('not (')
- self._dispatch(t.expr)
- self._write(')')
-
- def _Or(self, t):
- self._write(" (")
- for i, node in enumerate(t.nodes):
- self._dispatch(node)
- if i != len(t.nodes)-1:
- self._write(") or (")
- self._write(")")
-
- def _Pass(self, t):
- self._write("pass\n")
-
- def _Printnl(self, t):
- self._fill("print ")
- if t.dest:
- self._write(">> ")
- self._dispatch(t.dest)
- self._write(", ")
- comma = False
- for node in t.nodes:
- if comma: self._write(', ')
- else: comma = True
- self._dispatch(node)
-
- def _Power(self, t):
- self.__binary_op(t, '**')
-
- def _Return(self, t):
- self._fill("return ")
- if t.value:
- if isinstance(t.value, Tuple):
- text = ', '.join([ name.name for name in t.value.asList() ])
- self._write(text)
- else:
- self._dispatch(t.value)
- if not self._do_indent:
- self._write('; ')
-
- def _Slice(self, t):
- self._dispatch(t.expr)
- self._write("[")
- if t.lower:
- self._dispatch(t.lower)
- self._write(":")
- if t.upper:
- self._dispatch(t.upper)
- #if t.step:
- # self._write(":")
- # self._dispatch(t.step)
- self._write("]")
-
- def _Sliceobj(self, t):
- for i, node in enumerate(t.nodes):
- if i != 0:
- self._write(":")
- if not (isinstance(node, Const) and node.value is None):
- self._dispatch(node)
-
- def _Stmt(self, tree):
- for node in tree.nodes:
- self._dispatch(node)
-
- def _Sub(self, t):
- self.__binary_op(t, '-')
-
- def _Subscript(self, t):
- self._dispatch(t.expr)
- self._write("[")
- for i, value in enumerate(t.subs):
- if i != 0:
- self._write(",")
- self._dispatch(value)
- self._write("]")
-
- def _TryExcept(self, t):
- self._fill("try")
- self._enter()
- self._dispatch(t.body)
- self._leave()
-
- for handler in t.handlers:
- self._fill('except ')
- self._dispatch(handler[0])
- if handler[1] is not None:
- self._write(', ')
- self._dispatch(handler[1])
- self._enter()
- self._dispatch(handler[2])
- self._leave()
-
- if t.else_:
- self._fill("else")
- self._enter()
- self._dispatch(t.else_)
- self._leave()
-
- def _Tuple(self, t):
-
- if not t.nodes:
- # Empty tuple.
- self._write("()")
- else:
- self._write("(")
-
- # _write each elements, separated by a comma.
- for element in t.nodes[:-1]:
- self._dispatch(element)
- self._write(", ")
-
- # Handle the last one without writing comma
- last_element = t.nodes[-1]
- self._dispatch(last_element)
-
- self._write(")")
-
- def _UnaryAdd(self, t):
- self._write("+")
- self._dispatch(t.expr)
-
- def _UnarySub(self, t):
- self._write("-")
- self._dispatch(t.expr)
-
- def _With(self, t):
- self._fill('with ')
- self._dispatch(t.expr)
- if t.vars:
- self._write(' as ')
- self._dispatch(t.vars.name)
- self._enter()
- self._dispatch(t.body)
- self._leave()
- self._write('\n')
-
- def _int(self, t):
- self._write(repr(t))
-
- def __binary_op(self, t, symbol):
- # Check if parenthesis are needed on left side and then dispatch
- has_paren = False
- left_class = str(t.left.__class__)
- if (left_class in op_precedence.keys() and
- op_precedence[left_class] < op_precedence[str(t.__class__)]):
- has_paren = True
- if has_paren:
- self._write('(')
- self._dispatch(t.left)
- if has_paren:
- self._write(')')
- # Write the appropriate symbol for operator
- self._write(symbol)
- # Check if parenthesis are needed on the right side and then dispatch
- has_paren = False
- right_class = str(t.right.__class__)
- if (right_class in op_precedence.keys() and
- op_precedence[right_class] < op_precedence[str(t.__class__)]):
- has_paren = True
- if has_paren:
- self._write('(')
- self._dispatch(t.right)
- if has_paren:
- self._write(')')
-
- def _float(self, t):
- # if t is 0.1, str(t)->'0.1' while repr(t)->'0.1000000000001'
- # We prefer str here.
- self._write(str(t))
-
- def _str(self, t):
- self._write(repr(t))
-
- def _tuple(self, t):
- self._write(str(t))
-
- #########################################################################
- # These are the methods from the _ast modules unparse.
- #
- # As our needs to handle more advanced code increase, we may want to
- # modify some of the methods below so that they work for compiler.ast.
- #########################################################################
-
-# # stmt
-# def _Expr(self, tree):
-# self._fill()
-# self._dispatch(tree.value)
-#
-# def _Import(self, t):
-# self._fill("import ")
-# first = True
-# for a in t.names:
-# if first:
-# first = False
-# else:
-# self._write(", ")
-# self._write(a.name)
-# if a.asname:
-# self._write(" as "+a.asname)
-#
-## def _ImportFrom(self, t):
-## self._fill("from ")
-## self._write(t.module)
-## self._write(" import ")
-## for i, a in enumerate(t.names):
-## if i == 0:
-## self._write(", ")
-## self._write(a.name)
-## if a.asname:
-## self._write(" as "+a.asname)
-## # XXX(jpe) what is level for?
-##
-#
-# def _Break(self, t):
-# self._fill("break")
-#
-# def _Continue(self, t):
-# self._fill("continue")
-#
-# def _Delete(self, t):
-# self._fill("del ")
-# self._dispatch(t.targets)
-#
-# def _Assert(self, t):
-# self._fill("assert ")
-# self._dispatch(t.test)
-# if t.msg:
-# self._write(", ")
-# self._dispatch(t.msg)
-#
-# def _Exec(self, t):
-# self._fill("exec ")
-# self._dispatch(t.body)
-# if t.globals:
-# self._write(" in ")
-# self._dispatch(t.globals)
-# if t.locals:
-# self._write(", ")
-# self._dispatch(t.locals)
-#
-# def _Print(self, t):
-# self._fill("print ")
-# do_comma = False
-# if t.dest:
-# self._write(">>")
-# self._dispatch(t.dest)
-# do_comma = True
-# for e in t.values:
-# if do_comma:self._write(", ")
-# else:do_comma=True
-# self._dispatch(e)
-# if not t.nl:
-# self._write(",")
-#
-# def _Global(self, t):
-# self._fill("global")
-# for i, n in enumerate(t.names):
-# if i != 0:
-# self._write(",")
-# self._write(" " + n)
-#
-# def _Yield(self, t):
-# self._fill("yield")
-# if t.value:
-# self._write(" (")
-# self._dispatch(t.value)
-# self._write(")")
-#
-# def _Raise(self, t):
-# self._fill('raise ')
-# if t.type:
-# self._dispatch(t.type)
-# if t.inst:
-# self._write(", ")
-# self._dispatch(t.inst)
-# if t.tback:
-# self._write(", ")
-# self._dispatch(t.tback)
-#
-#
-# def _TryFinally(self, t):
-# self._fill("try")
-# self._enter()
-# self._dispatch(t.body)
-# self._leave()
-#
-# self._fill("finally")
-# self._enter()
-# self._dispatch(t.finalbody)
-# self._leave()
-#
-# def _excepthandler(self, t):
-# self._fill("except ")
-# if t.type:
-# self._dispatch(t.type)
-# if t.name:
-# self._write(", ")
-# self._dispatch(t.name)
-# self._enter()
-# self._dispatch(t.body)
-# self._leave()
-#
-# def _ClassDef(self, t):
-# self._write("\n")
-# self._fill("class "+t.name)
-# if t.bases:
-# self._write("(")
-# for a in t.bases:
-# self._dispatch(a)
-# self._write(", ")
-# self._write(")")
-# self._enter()
-# self._dispatch(t.body)
-# self._leave()
-#
-# def _FunctionDef(self, t):
-# self._write("\n")
-# for deco in t.decorators:
-# self._fill("@")
-# self._dispatch(deco)
-# self._fill("def "+t.name + "(")
-# self._dispatch(t.args)
-# self._write(")")
-# self._enter()
-# self._dispatch(t.body)
-# self._leave()
-#
-# def _For(self, t):
-# self._fill("for ")
-# self._dispatch(t.target)
-# self._write(" in ")
-# self._dispatch(t.iter)
-# self._enter()
-# self._dispatch(t.body)
-# self._leave()
-# if t.orelse:
-# self._fill("else")
-# self._enter()
-# self._dispatch(t.orelse)
-# self._leave
-#
-# def _While(self, t):
-# self._fill("while ")
-# self._dispatch(t.test)
-# self._enter()
-# self._dispatch(t.body)
-# self._leave()
-# if t.orelse:
-# self._fill("else")
-# self._enter()
-# self._dispatch(t.orelse)
-# self._leave
-#
-# # expr
-# def _Str(self, tree):
-# self._write(repr(tree.s))
-##
-# def _Repr(self, t):
-# self._write("`")
-# self._dispatch(t.value)
-# self._write("`")
-#
-# def _Num(self, t):
-# self._write(repr(t.n))
-#
-# def _ListComp(self, t):
-# self._write("[")
-# self._dispatch(t.elt)
-# for gen in t.generators:
-# self._dispatch(gen)
-# self._write("]")
-#
-# def _GeneratorExp(self, t):
-# self._write("(")
-# self._dispatch(t.elt)
-# for gen in t.generators:
-# self._dispatch(gen)
-# self._write(")")
-#
-# def _comprehension(self, t):
-# self._write(" for ")
-# self._dispatch(t.target)
-# self._write(" in ")
-# self._dispatch(t.iter)
-# for if_clause in t.ifs:
-# self._write(" if ")
-# self._dispatch(if_clause)
-#
-# def _IfExp(self, t):
-# self._dispatch(t.body)
-# self._write(" if ")
-# self._dispatch(t.test)
-# if t.orelse:
-# self._write(" else ")
-# self._dispatch(t.orelse)
-#
-# unop = {"Invert":"~", "Not": "not", "UAdd":"+", "USub":"-"}
-# def _UnaryOp(self, t):
-# self._write(self.unop[t.op.__class__.__name__])
-# self._write("(")
-# self._dispatch(t.operand)
-# self._write(")")
-#
-# binop = { "Add":"+", "Sub":"-", "Mult":"*", "Div":"/", "Mod":"%",
-# "LShift":">>", "RShift":"<<", "BitOr":"|", "BitXor":"^", "BitAnd":"&",
-# "FloorDiv":"//", "Pow": "**"}
-# def _BinOp(self, t):
-# self._write("(")
-# self._dispatch(t.left)
-# self._write(")" + self.binop[t.op.__class__.__name__] + "(")
-# self._dispatch(t.right)
-# self._write(")")
-#
-# boolops = {_ast.And: 'and', _ast.Or: 'or'}
-# def _BoolOp(self, t):
-# self._write("(")
-# self._dispatch(t.values[0])
-# for v in t.values[1:]:
-# self._write(" %s " % self.boolops[t.op.__class__])
-# self._dispatch(v)
-# self._write(")")
-#
-# def _Attribute(self,t):
-# self._dispatch(t.value)
-# self._write(".")
-# self._write(t.attr)
-#
-## def _Call(self, t):
-## self._dispatch(t.func)
-## self._write("(")
-## comma = False
-## for e in t.args:
-## if comma: self._write(", ")
-## else: comma = True
-## self._dispatch(e)
-## for e in t.keywords:
-## if comma: self._write(", ")
-## else: comma = True
-## self._dispatch(e)
-## if t.starargs:
-## if comma: self._write(", ")
-## else: comma = True
-## self._write("*")
-## self._dispatch(t.starargs)
-## if t.kwargs:
-## if comma: self._write(", ")
-## else: comma = True
-## self._write("**")
-## self._dispatch(t.kwargs)
-## self._write(")")
-#
-# # slice
-# def _Index(self, t):
-# self._dispatch(t.value)
-#
-# def _ExtSlice(self, t):
-# for i, d in enumerate(t.dims):
-# if i != 0:
-# self._write(': ')
-# self._dispatch(d)
-#
-# # others
-# def _arguments(self, t):
-# first = True
-# nonDef = len(t.args)-len(t.defaults)
-# for a in t.args[0:nonDef]:
-# if first:first = False
-# else: self._write(", ")
-# self._dispatch(a)
-# for a,d in zip(t.args[nonDef:], t.defaults):
-# if first:first = False
-# else: self._write(", ")
-# self._dispatch(a),
-# self._write("=")
-# self._dispatch(d)
-# if t.vararg:
-# if first:first = False
-# else: self._write(", ")
-# self._write("*"+t.vararg)
-# if t.kwarg:
-# if first:first = False
-# else: self._write(", ")
-# self._write("**"+t.kwarg)
-#
-## def _keyword(self, t):
-## self._write(t.arg)
-## self._write("=")
-## self._dispatch(t.value)
-#
-# def _Lambda(self, t):
-# self._write("lambda ")
-# self._dispatch(t.args)
-# self._write(": ")
-# self._dispatch(t.body)
-
-
-
diff --git a/doc/sphinxext/numpydoc/docscrape.py b/doc/sphinxext/numpydoc/docscrape.py
deleted file mode 100644
index 0fe259447..000000000
--- a/doc/sphinxext/numpydoc/docscrape.py
+++ /dev/null
@@ -1,525 +0,0 @@
-"""Extract reference documentation from the NumPy source tree.
-
-"""
-from __future__ import division
-
-import sys
-import inspect
-import textwrap
-import re
-import pydoc
-from warnings import warn
-import collections
-
-if sys.version_info[0] >= 3:
- from io import StringIO
-else:
- from cStringIO import StringIO
-
-class Reader(object):
- """A line-based string reader.
-
- """
- def __init__(self, data):
- """
- Parameters
- ----------
- data : str
- String with lines separated by '\n'.
-
- """
- if isinstance(data,list):
- self._str = data
- else:
- self._str = data.split('\n') # store string as list of lines
-
- self.reset()
-
- def __getitem__(self, n):
- return self._str[n]
-
- def reset(self):
- self._l = 0 # current line nr
-
- def read(self):
- if not self.eof():
- out = self[self._l]
- self._l += 1
- return out
- else:
- return ''
-
- def seek_next_non_empty_line(self):
- for l in self[self._l:]:
- if l.strip():
- break
- else:
- self._l += 1
-
- def eof(self):
- return self._l >= len(self._str)
-
- def read_to_condition(self, condition_func):
- start = self._l
- for line in self[start:]:
- if condition_func(line):
- return self[start:self._l]
- self._l += 1
- if self.eof():
- return self[start:self._l+1]
- return []
-
- def read_to_next_empty_line(self):
- self.seek_next_non_empty_line()
- def is_empty(line):
- return not line.strip()
- return self.read_to_condition(is_empty)
-
- def read_to_next_unindented_line(self):
- def is_unindented(line):
- return (line.strip() and (len(line.lstrip()) == len(line)))
- return self.read_to_condition(is_unindented)
-
- def peek(self,n=0):
- if self._l + n < len(self._str):
- return self[self._l + n]
- else:
- return ''
-
- def is_empty(self):
- return not ''.join(self._str).strip()
-
-
-class NumpyDocString(object):
- def __init__(self, docstring, config={}):
- docstring = textwrap.dedent(docstring).split('\n')
-
- self._doc = Reader(docstring)
- self._parsed_data = {
- 'Signature': '',
- 'Summary': [''],
- 'Extended Summary': [],
- 'Parameters': [],
- 'Returns': [],
- 'Raises': [],
- 'Warns': [],
- 'Other Parameters': [],
- 'Attributes': [],
- 'Methods': [],
- 'See Also': [],
- 'Notes': [],
- 'Warnings': [],
- 'References': '',
- 'Examples': '',
- 'index': {}
- }
-
- self._parse()
-
- def __getitem__(self,key):
- return self._parsed_data[key]
-
- def __setitem__(self,key,val):
- if key not in self._parsed_data:
- warn("Unknown section %s" % key)
- else:
- self._parsed_data[key] = val
-
- def _is_at_section(self):
- self._doc.seek_next_non_empty_line()
-
- if self._doc.eof():
- return False
-
- l1 = self._doc.peek().strip() # e.g. Parameters
-
- if l1.startswith('.. index::'):
- return True
-
- l2 = self._doc.peek(1).strip() # ---------- or ==========
- return l2.startswith('-'*len(l1)) or l2.startswith('='*len(l1))
-
- def _strip(self,doc):
- i = 0
- j = 0
- for i,line in enumerate(doc):
- if line.strip(): break
-
- for j,line in enumerate(doc[::-1]):
- if line.strip(): break
-
- return doc[i:len(doc)-j]
-
- def _read_to_next_section(self):
- section = self._doc.read_to_next_empty_line()
-
- while not self._is_at_section() and not self._doc.eof():
- if not self._doc.peek(-1).strip(): # previous line was empty
- section += ['']
-
- section += self._doc.read_to_next_empty_line()
-
- return section
-
- def _read_sections(self):
- while not self._doc.eof():
- data = self._read_to_next_section()
- name = data[0].strip()
-
- if name.startswith('..'): # index section
- yield name, data[1:]
- elif len(data) < 2:
- yield StopIteration
- else:
- yield name, self._strip(data[2:])
-
- def _parse_param_list(self,content):
- r = Reader(content)
- params = []
- while not r.eof():
- header = r.read().strip()
- if ' : ' in header:
- arg_name, arg_type = header.split(' : ')[:2]
- else:
- arg_name, arg_type = header, ''
-
- desc = r.read_to_next_unindented_line()
- desc = dedent_lines(desc)
-
- params.append((arg_name,arg_type,desc))
-
- return params
-
-
- _name_rgx = re.compile(r"^\s*(:(?P<role>\w+):`(?P<name>[a-zA-Z0-9_.-]+)`|"
- r" (?P<name2>[a-zA-Z0-9_.-]+))\s*", re.X)
- def _parse_see_also(self, content):
- """
- func_name : Descriptive text
- continued text
- another_func_name : Descriptive text
- func_name1, func_name2, :meth:`func_name`, func_name3
-
- """
- items = []
-
- def parse_item_name(text):
- """Match ':role:`name`' or 'name'"""
- m = self._name_rgx.match(text)
- if m:
- g = m.groups()
- if g[1] is None:
- return g[3], None
- else:
- return g[2], g[1]
- raise ValueError("%s is not a item name" % text)
-
- def push_item(name, rest):
- if not name:
- return
- name, role = parse_item_name(name)
- items.append((name, list(rest), role))
- del rest[:]
-
- current_func = None
- rest = []
-
- for line in content:
- if not line.strip(): continue
-
- m = self._name_rgx.match(line)
- if m and line[m.end():].strip().startswith(':'):
- push_item(current_func, rest)
- current_func, line = line[:m.end()], line[m.end():]
- rest = [line.split(':', 1)[1].strip()]
- if not rest[0]:
- rest = []
- elif not line.startswith(' '):
- push_item(current_func, rest)
- current_func = None
- if ',' in line:
- for func in line.split(','):
- if func.strip():
- push_item(func, [])
- elif line.strip():
- current_func = line
- elif current_func is not None:
- rest.append(line.strip())
- push_item(current_func, rest)
- return items
-
- def _parse_index(self, section, content):
- """
- .. index: default
- :refguide: something, else, and more
-
- """
- def strip_each_in(lst):
- return [s.strip() for s in lst]
-
- out = {}
- section = section.split('::')
- if len(section) > 1:
- out['default'] = strip_each_in(section[1].split(','))[0]
- for line in content:
- line = line.split(':')
- if len(line) > 2:
- out[line[1]] = strip_each_in(line[2].split(','))
- return out
-
- def _parse_summary(self):
- """Grab signature (if given) and summary"""
- if self._is_at_section():
- return
-
- # If several signatures present, take the last one
- while True:
- summary = self._doc.read_to_next_empty_line()
- summary_str = " ".join([s.strip() for s in summary]).strip()
- if re.compile('^([\w., ]+=)?\s*[\w\.]+\(.*\)$').match(summary_str):
- self['Signature'] = summary_str
- if not self._is_at_section():
- continue
- break
-
- if summary is not None:
- self['Summary'] = summary
-
- if not self._is_at_section():
- self['Extended Summary'] = self._read_to_next_section()
-
- def _parse(self):
- self._doc.reset()
- self._parse_summary()
-
- for (section,content) in self._read_sections():
- if not section.startswith('..'):
- section = ' '.join([s.capitalize() for s in section.split(' ')])
- if section in ('Parameters', 'Returns', 'Raises', 'Warns',
- 'Other Parameters', 'Attributes', 'Methods'):
- self[section] = self._parse_param_list(content)
- elif section.startswith('.. index::'):
- self['index'] = self._parse_index(section, content)
- elif section == 'See Also':
- self['See Also'] = self._parse_see_also(content)
- else:
- self[section] = content
-
- # string conversion routines
-
- def _str_header(self, name, symbol='-'):
- return [name, len(name)*symbol]
-
- def _str_indent(self, doc, indent=4):
- out = []
- for line in doc:
- out += [' '*indent + line]
- return out
-
- def _str_signature(self):
- if self['Signature']:
- return [self['Signature'].replace('*','\*')] + ['']
- else:
- return ['']
-
- def _str_summary(self):
- if self['Summary']:
- return self['Summary'] + ['']
- else:
- return []
-
- def _str_extended_summary(self):
- if self['Extended Summary']:
- return self['Extended Summary'] + ['']
- else:
- return []
-
- def _str_param_list(self, name):
- out = []
- if self[name]:
- out += self._str_header(name)
- for param,param_type,desc in self[name]:
- out += ['%s : %s' % (param, param_type)]
- out += self._str_indent(desc)
- out += ['']
- return out
-
- def _str_section(self, name):
- out = []
- if self[name]:
- out += self._str_header(name)
- out += self[name]
- out += ['']
- return out
-
- def _str_see_also(self, func_role):
- if not self['See Also']: return []
- out = []
- out += self._str_header("See Also")
- last_had_desc = True
- for func, desc, role in self['See Also']:
- if role:
- link = ':%s:`%s`' % (role, func)
- elif func_role:
- link = ':%s:`%s`' % (func_role, func)
- else:
- link = "`%s`_" % func
- if desc or last_had_desc:
- out += ['']
- out += [link]
- else:
- out[-1] += ", %s" % link
- if desc:
- out += self._str_indent([' '.join(desc)])
- last_had_desc = True
- else:
- last_had_desc = False
- out += ['']
- return out
-
- def _str_index(self):
- idx = self['index']
- out = []
- out += ['.. index:: %s' % idx.get('default','')]
- for section, references in idx.items():
- if section == 'default':
- continue
- out += [' :%s: %s' % (section, ', '.join(references))]
- return out
-
- def __str__(self, func_role=''):
- out = []
- out += self._str_signature()
- out += self._str_summary()
- out += self._str_extended_summary()
- for param_list in ('Parameters', 'Returns', 'Other Parameters',
- 'Raises', 'Warns'):
- out += self._str_param_list(param_list)
- out += self._str_section('Warnings')
- out += self._str_see_also(func_role)
- for s in ('Notes','References','Examples'):
- out += self._str_section(s)
- for param_list in ('Attributes', 'Methods'):
- out += self._str_param_list(param_list)
- out += self._str_index()
- return '\n'.join(out)
-
-
-def indent(str,indent=4):
- indent_str = ' '*indent
- if str is None:
- return indent_str
- lines = str.split('\n')
- return '\n'.join(indent_str + l for l in lines)
-
-def dedent_lines(lines):
- """Deindent a list of lines maximally"""
- return textwrap.dedent("\n".join(lines)).split("\n")
-
-def header(text, style='-'):
- return text + '\n' + style*len(text) + '\n'
-
-
-class FunctionDoc(NumpyDocString):
- def __init__(self, func, role='func', doc=None, config={}):
- self._f = func
- self._role = role # e.g. "func" or "meth"
-
- if doc is None:
- if func is None:
- raise ValueError("No function or docstring given")
- doc = inspect.getdoc(func) or ''
- NumpyDocString.__init__(self, doc)
-
- if not self['Signature'] and func is not None:
- func, func_name = self.get_func()
- try:
- # try to read signature
- argspec = inspect.getargspec(func)
- argspec = inspect.formatargspec(*argspec)
- argspec = argspec.replace('*','\*')
- signature = '%s%s' % (func_name, argspec)
- except TypeError as e:
- signature = '%s()' % func_name
- self['Signature'] = signature
-
- def get_func(self):
- func_name = getattr(self._f, '__name__', self.__class__.__name__)
- if inspect.isclass(self._f):
- func = getattr(self._f, '__call__', self._f.__init__)
- else:
- func = self._f
- return func, func_name
-
- def __str__(self):
- out = ''
-
- func, func_name = self.get_func()
- signature = self['Signature'].replace('*', '\*')
-
- roles = {'func': 'function',
- 'meth': 'method'}
-
- if self._role:
- if self._role not in roles:
- print("Warning: invalid role %s" % self._role)
- out += '.. %s:: %s\n \n\n' % (roles.get(self._role,''),
- func_name)
-
- out += super(FunctionDoc, self).__str__(func_role=self._role)
- return out
-
-
-class ClassDoc(NumpyDocString):
-
- extra_public_methods = ['__call__']
-
- def __init__(self, cls, doc=None, modulename='', func_doc=FunctionDoc,
- config={}):
- if not inspect.isclass(cls) and cls is not None:
- raise ValueError("Expected a class or None, but got %r" % cls)
- self._cls = cls
-
- if modulename and not modulename.endswith('.'):
- modulename += '.'
- self._mod = modulename
-
- if doc is None:
- if cls is None:
- raise ValueError("No class or documentation string given")
- doc = pydoc.getdoc(cls)
-
- NumpyDocString.__init__(self, doc)
-
- if config.get('show_class_members', True):
- def splitlines_x(s):
- if not s:
- return []
- else:
- return s.splitlines()
-
- for field, items in [('Methods', self.methods),
- ('Attributes', self.properties)]:
- if not self[field]:
- self[field] = [
- (name, '',
- splitlines_x(pydoc.getdoc(getattr(self._cls, name))))
- for name in sorted(items)]
-
- @property
- def methods(self):
- if self._cls is None:
- return []
- return [name for name,func in inspect.getmembers(self._cls)
- if ((not name.startswith('_')
- or name in self.extra_public_methods)
- and isinstance(func, collections.Callable))]
-
- @property
- def properties(self):
- if self._cls is None:
- return []
- return [name for name,func in inspect.getmembers(self._cls)
- if not name.startswith('_') and
- (func is None or isinstance(func, property) or
- inspect.isgetsetdescriptor(func))]
diff --git a/doc/sphinxext/numpydoc/docscrape_sphinx.py b/doc/sphinxext/numpydoc/docscrape_sphinx.py
deleted file mode 100644
index 14d766d84..000000000
--- a/doc/sphinxext/numpydoc/docscrape_sphinx.py
+++ /dev/null
@@ -1,239 +0,0 @@
-from __future__ import division
-
-import re, inspect, textwrap, pydoc
-import sphinx
-import collections
-from .docscrape import NumpyDocString, FunctionDoc, ClassDoc
-
-class SphinxDocString(NumpyDocString):
- def __init__(self, docstring, config={}):
- self.use_plots = config.get('use_plots', False)
- NumpyDocString.__init__(self, docstring, config=config)
-
- # string conversion routines
- def _str_header(self, name, symbol='`'):
- return ['.. rubric:: ' + name, '']
-
- def _str_field_list(self, name):
- return [':' + name + ':']
-
- def _str_indent(self, doc, indent=4):
- out = []
- for line in doc:
- out += [' '*indent + line]
- return out
-
- def _str_signature(self):
- return ['']
- if self['Signature']:
- return ['``%s``' % self['Signature']] + ['']
- else:
- return ['']
-
- def _str_summary(self):
- return self['Summary'] + ['']
-
- def _str_extended_summary(self):
- return self['Extended Summary'] + ['']
-
- def _str_param_list(self, name):
- out = []
- if self[name]:
- out += self._str_field_list(name)
- out += ['']
- for param,param_type,desc in self[name]:
- out += self._str_indent(['**%s** : %s' % (param.strip(),
- param_type)])
- out += ['']
- out += self._str_indent(desc,8)
- out += ['']
- return out
-
- @property
- def _obj(self):
- if hasattr(self, '_cls'):
- return self._cls
- elif hasattr(self, '_f'):
- return self._f
- return None
-
- def _str_member_list(self, name):
- """
- Generate a member listing, autosummary:: table where possible,
- and a table where not.
-
- """
- out = []
- if self[name]:
- out += ['.. rubric:: %s' % name, '']
- prefix = getattr(self, '_name', '')
-
- if prefix:
- prefix = '~%s.' % prefix
-
- autosum = []
- others = []
- for param, param_type, desc in self[name]:
- param = param.strip()
-
- # Check if the referenced member can have a docstring or not
- param_obj = getattr(self._obj, param, None)
- if not (callable(param_obj)
- or isinstance(param_obj, property)
- or inspect.isgetsetdescriptor(param_obj)):
- param_obj = None
-
- if param_obj and (pydoc.getdoc(param_obj) or not desc):
- # Referenced object has a docstring
- autosum += [" %s%s" % (prefix, param)]
- else:
- others.append((param, param_type, desc))
-
- if autosum:
- out += ['.. autosummary::', ' :toctree:', '']
- out += autosum
-
- if others:
- maxlen_0 = max(3, max([len(x[0]) for x in others]))
- hdr = u"="*maxlen_0 + u" " + u"="*10
- fmt = u'%%%ds %%s ' % (maxlen_0,)
- out += ['', hdr]
- for param, param_type, desc in others:
- desc = u" ".join(x.strip() for x in desc).strip()
- if param_type:
- desc = "(%s) %s" % (param_type, desc)
- out += [fmt % (param.strip(), desc)]
- out += [hdr]
- out += ['']
- return out
-
- def _str_section(self, name):
- out = []
- if self[name]:
- out += self._str_header(name)
- out += ['']
- content = textwrap.dedent("\n".join(self[name])).split("\n")
- out += content
- out += ['']
- return out
-
- def _str_see_also(self, func_role):
- out = []
- if self['See Also']:
- see_also = super(SphinxDocString, self)._str_see_also(func_role)
- out = ['.. seealso::', '']
- out += self._str_indent(see_also[2:])
- return out
-
- def _str_warnings(self):
- out = []
- if self['Warnings']:
- out = ['.. warning::', '']
- out += self._str_indent(self['Warnings'])
- return out
-
- def _str_index(self):
- idx = self['index']
- out = []
- if len(idx) == 0:
- return out
-
- out += ['.. index:: %s' % idx.get('default','')]
- for section, references in idx.items():
- if section == 'default':
- continue
- elif section == 'refguide':
- out += [' single: %s' % (', '.join(references))]
- else:
- out += [' %s: %s' % (section, ','.join(references))]
- return out
-
- def _str_references(self):
- out = []
- if self['References']:
- out += self._str_header('References')
- if isinstance(self['References'], str):
- self['References'] = [self['References']]
- out.extend(self['References'])
- out += ['']
- # Latex collects all references to a separate bibliography,
- # so we need to insert links to it
- if sphinx.__version__ >= "0.6":
- out += ['.. only:: latex','']
- else:
- out += ['.. latexonly::','']
- items = []
- for line in self['References']:
- m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I)
- if m:
- items.append(m.group(1))
- out += [' ' + ", ".join(["[%s]_" % item for item in items]), '']
- return out
-
- def _str_examples(self):
- examples_str = "\n".join(self['Examples'])
-
- if (self.use_plots and 'import matplotlib' in examples_str
- and 'plot::' not in examples_str):
- out = []
- out += self._str_header('Examples')
- out += ['.. plot::', '']
- out += self._str_indent(self['Examples'])
- out += ['']
- return out
- else:
- return self._str_section('Examples')
-
- def __str__(self, indent=0, func_role="obj"):
- out = []
- out += self._str_signature()
- out += self._str_index() + ['']
- out += self._str_summary()
- out += self._str_extended_summary()
- for param_list in ('Parameters', 'Returns', 'Other Parameters',
- 'Raises', 'Warns'):
- out += self._str_param_list(param_list)
- out += self._str_warnings()
- out += self._str_see_also(func_role)
- out += self._str_section('Notes')
- out += self._str_references()
- out += self._str_examples()
- for param_list in ('Attributes', 'Methods'):
- out += self._str_member_list(param_list)
- out = self._str_indent(out,indent)
- return '\n'.join(out)
-
-class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
- def __init__(self, obj, doc=None, config={}):
- self.use_plots = config.get('use_plots', False)
- FunctionDoc.__init__(self, obj, doc=doc, config=config)
-
-class SphinxClassDoc(SphinxDocString, ClassDoc):
- def __init__(self, obj, doc=None, func_doc=None, config={}):
- self.use_plots = config.get('use_plots', False)
- ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config)
-
-class SphinxObjDoc(SphinxDocString):
- def __init__(self, obj, doc=None, config={}):
- self._f = obj
- SphinxDocString.__init__(self, doc, config=config)
-
-def get_doc_object(obj, what=None, doc=None, config={}):
- if what is None:
- if inspect.isclass(obj):
- what = 'class'
- elif inspect.ismodule(obj):
- what = 'module'
- elif isinstance(obj, collections.Callable):
- what = 'function'
- else:
- what = 'object'
- if what == 'class':
- return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc,
- config=config)
- elif what in ('function', 'method'):
- return SphinxFunctionDoc(obj, doc=doc, config=config)
- else:
- if doc is None:
- doc = pydoc.getdoc(obj)
- return SphinxObjDoc(obj, doc, config=config)
diff --git a/doc/sphinxext/numpydoc/linkcode.py b/doc/sphinxext/numpydoc/linkcode.py
deleted file mode 100644
index 58361e8c3..000000000
--- a/doc/sphinxext/numpydoc/linkcode.py
+++ /dev/null
@@ -1,83 +0,0 @@
-# -*- coding: utf-8 -*-
-"""
- linkcode
- ~~~~~~~~
-
- Add external links to module code in Python object descriptions.
-
- :copyright: Copyright 2007-2011 by the Sphinx team, see AUTHORS.
- :license: BSD, see LICENSE for details.
-
-"""
-from __future__ import division
-
-import warnings
-import collections
-
-warnings.warn("This extension has been accepted to Sphinx upstream. "
- "Use the version from there (Sphinx >= 1.2) "
- "https://bitbucket.org/birkenfeld/sphinx/pull-request/47/sphinxextlinkcode",
- FutureWarning, stacklevel=1)
-
-
-from docutils import nodes
-
-from sphinx import addnodes
-from sphinx.locale import _
-from sphinx.errors import SphinxError
-
-class LinkcodeError(SphinxError):
- category = "linkcode error"
-
-def doctree_read(app, doctree):
- env = app.builder.env
-
- resolve_target = getattr(env.config, 'linkcode_resolve', None)
- if not isinstance(env.config.linkcode_resolve, collections.Callable):
- raise LinkcodeError(
- "Function `linkcode_resolve` is not given in conf.py")
-
- domain_keys = dict(
- py=['module', 'fullname'],
- c=['names'],
- cpp=['names'],
- js=['object', 'fullname'],
- )
-
- for objnode in doctree.traverse(addnodes.desc):
- domain = objnode.get('domain')
- uris = set()
- for signode in objnode:
- if not isinstance(signode, addnodes.desc_signature):
- continue
-
- # Convert signode to a specified format
- info = {}
- for key in domain_keys.get(domain, []):
- value = signode.get(key)
- if not value:
- value = ''
- info[key] = value
- if not info:
- continue
-
- # Call user code to resolve the link
- uri = resolve_target(domain, info)
- if not uri:
- # no source
- continue
-
- if uri in uris or not uri:
- # only one link per name, please
- continue
- uris.add(uri)
-
- onlynode = addnodes.only(expr='html')
- onlynode += nodes.reference('', '', internal=False, refuri=uri)
- onlynode[0] += nodes.inline('', _('[source]'),
- classes=['viewcode-link'])
- signode += onlynode
-
-def setup(app):
- app.connect('doctree-read', doctree_read)
- app.add_config_value('linkcode_resolve', None, '')
diff --git a/doc/sphinxext/numpydoc/numpydoc.py b/doc/sphinxext/numpydoc/numpydoc.py
deleted file mode 100644
index 0243d23c4..000000000
--- a/doc/sphinxext/numpydoc/numpydoc.py
+++ /dev/null
@@ -1,178 +0,0 @@
-"""
-========
-numpydoc
-========
-
-Sphinx extension that handles docstrings in the Numpy standard format. [1]
-
-It will:
-
-- Convert Parameters etc. sections to field lists.
-- Convert See Also section to a See also entry.
-- Renumber references.
-- Extract the signature from the docstring, if it can't be determined otherwise.
-
-.. [1] https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt
-
-"""
-from __future__ import division
-
-import sphinx
-import collections
-
-if sphinx.__version__ < '1.0.1':
- raise RuntimeError("Sphinx 1.0.1 or newer is required")
-
-import os, sys, re, pydoc
-from .docscrape_sphinx import get_doc_object, SphinxDocString
-from sphinx.util.compat import Directive
-import inspect
-
-def mangle_docstrings(app, what, name, obj, options, lines,
- reference_offset=[0]):
-
- cfg = dict(use_plots=app.config.numpydoc_use_plots,
- show_class_members=app.config.numpydoc_show_class_members)
-
- if what == 'module':
- # Strip top title
- title_re = re.compile(u'^\\s*[#*=]{4,}\\n[a-z0-9 -]+\\n[#*=]{4,}\\s*',
- re.I|re.S)
- lines[:] = title_re.sub(u'', u"\n".join(lines)).split(u"\n")
- else:
- doc = get_doc_object(obj, what, u"\n".join(lines), config=cfg)
- if sys.version_info[0] >= 3:
- doc = str(doc)
- else:
- doc = str(doc).decode('utf-8')
- lines[:] = doc.split(u"\n")
-
- if app.config.numpydoc_edit_link and hasattr(obj, '__name__') and \
- obj.__name__:
- if hasattr(obj, '__module__'):
- v = dict(full_name=u"%s.%s" % (obj.__module__, obj.__name__))
- else:
- v = dict(full_name=obj.__name__)
- lines += [u'', u'.. htmlonly::', u'']
- lines += [u' %s' % x for x in
- (app.config.numpydoc_edit_link % v).split("\n")]
-
- # replace reference numbers so that there are no duplicates
- references = []
- for line in lines:
- line = line.strip()
- m = re.match(u'^.. \\[([a-z0-9_.-])\\]', line, re.I)
- if m:
- references.append(m.group(1))
-
- # start renaming from the longest string, to avoid overwriting parts
- references.sort(key=lambda x: -len(x))
- if references:
- for i, line in enumerate(lines):
- for r in references:
- if re.match(u'^\\d+$', r):
- new_r = u"R%d" % (reference_offset[0] + int(r))
- else:
- new_r = u"%s%d" % (r, reference_offset[0])
- lines[i] = lines[i].replace(u'[%s]_' % r,
- u'[%s]_' % new_r)
- lines[i] = lines[i].replace(u'.. [%s]' % r,
- u'.. [%s]' % new_r)
-
- reference_offset[0] += len(references)
-
-def mangle_signature(app, what, name, obj, options, sig, retann):
- # Do not try to inspect classes that don't define `__init__`
- if (inspect.isclass(obj) and
- (not hasattr(obj, '__init__') or
- 'initializes x; see ' in pydoc.getdoc(obj.__init__))):
- return '', ''
-
- if not (isinstance(obj, collections.Callable) or hasattr(obj, '__argspec_is_invalid_')): return
- if not hasattr(obj, '__doc__'): return
-
- doc = SphinxDocString(pydoc.getdoc(obj))
- if doc['Signature']:
- sig = re.sub(u"^[^(]*", u"", doc['Signature'])
- return sig, u''
-
-def setup(app, get_doc_object_=get_doc_object):
- if not hasattr(app, 'add_config_value'):
- return # probably called by nose, better bail out
-
- global get_doc_object
- get_doc_object = get_doc_object_
-
- app.connect('autodoc-process-docstring', mangle_docstrings)
- app.connect('autodoc-process-signature', mangle_signature)
- app.add_config_value('numpydoc_edit_link', None, False)
- app.add_config_value('numpydoc_use_plots', None, False)
- app.add_config_value('numpydoc_show_class_members', True, True)
-
- # Extra mangling domains
- app.add_domain(NumpyPythonDomain)
- app.add_domain(NumpyCDomain)
-
-#------------------------------------------------------------------------------
-# Docstring-mangling domains
-#------------------------------------------------------------------------------
-
-from docutils.statemachine import ViewList
-from sphinx.domains.c import CDomain
-from sphinx.domains.python import PythonDomain
-
-class ManglingDomainBase(object):
- directive_mangling_map = {}
-
- def __init__(self, *a, **kw):
- super(ManglingDomainBase, self).__init__(*a, **kw)
- self.wrap_mangling_directives()
-
- def wrap_mangling_directives(self):
- for name, objtype in list(self.directive_mangling_map.items()):
- self.directives[name] = wrap_mangling_directive(
- self.directives[name], objtype)
-
-class NumpyPythonDomain(ManglingDomainBase, PythonDomain):
- name = 'np'
- directive_mangling_map = {
- 'function': 'function',
- 'class': 'class',
- 'exception': 'class',
- 'method': 'function',
- 'classmethod': 'function',
- 'staticmethod': 'function',
- 'attribute': 'attribute',
- }
-
-class NumpyCDomain(ManglingDomainBase, CDomain):
- name = 'np-c'
- directive_mangling_map = {
- 'function': 'function',
- 'member': 'attribute',
- 'macro': 'function',
- 'type': 'class',
- 'var': 'object',
- }
-
-def wrap_mangling_directive(base_directive, objtype):
- class directive(base_directive):
- def run(self):
- env = self.state.document.settings.env
-
- name = None
- if self.arguments:
- m = re.match(r'^(.*\s+)?(.*?)(\(.*)?', self.arguments[0])
- name = m.group(2).strip()
-
- if not name:
- name = self.arguments[0]
-
- lines = list(self.content)
- mangle_docstrings(env.app, objtype, name, None, None, lines)
- self.content = ViewList(lines, self.content.parent)
-
- return base_directive.run(self)
-
- return directive
-
diff --git a/doc/sphinxext/numpydoc/phantom_import.py b/doc/sphinxext/numpydoc/phantom_import.py
deleted file mode 100644
index 6308f492d..000000000
--- a/doc/sphinxext/numpydoc/phantom_import.py
+++ /dev/null
@@ -1,167 +0,0 @@
-"""
-==============
-phantom_import
-==============
-
-Sphinx extension to make directives from ``sphinx.ext.autodoc`` and similar
-extensions to use docstrings loaded from an XML file.
-
-This extension loads an XML file in the Pydocweb format [1] and
-creates a dummy module that contains the specified docstrings. This
-can be used to get the current docstrings from a Pydocweb instance
-without needing to rebuild the documented module.
-
-.. [1] http://code.google.com/p/pydocweb
-
-"""
-from __future__ import division
-
-import imp, sys, compiler, types, os, inspect, re
-
-def setup(app):
- app.connect('builder-inited', initialize)
- app.add_config_value('phantom_import_file', None, True)
-
-def initialize(app):
- fn = app.config.phantom_import_file
- if (fn and os.path.isfile(fn)):
- print("[numpydoc] Phantom importing modules from", fn, "...")
- import_phantom_module(fn)
-
-#------------------------------------------------------------------------------
-# Creating 'phantom' modules from an XML description
-#------------------------------------------------------------------------------
-def import_phantom_module(xml_file):
- """
- Insert a fake Python module to sys.modules, based on a XML file.
-
- The XML file is expected to conform to Pydocweb DTD. The fake
- module will contain dummy objects, which guarantee the following:
-
- - Docstrings are correct.
- - Class inheritance relationships are correct (if present in XML).
- - Function argspec is *NOT* correct (even if present in XML).
- Instead, the function signature is prepended to the function docstring.
- - Class attributes are *NOT* correct; instead, they are dummy objects.
-
- Parameters
- ----------
- xml_file : str
- Name of an XML file to read
-
- """
- import lxml.etree as etree
-
- object_cache = {}
-
- tree = etree.parse(xml_file)
- root = tree.getroot()
-
- # Sort items so that
- # - Base classes come before classes inherited from them
- # - Modules come before their contents
- all_nodes = dict([(n.attrib['id'], n) for n in root])
-
- def _get_bases(node, recurse=False):
- bases = [x.attrib['ref'] for x in node.findall('base')]
- if recurse:
- j = 0
- while True:
- try:
- b = bases[j]
- except IndexError: break
- if b in all_nodes:
- bases.extend(_get_bases(all_nodes[b]))
- j += 1
- return bases
-
- type_index = ['module', 'class', 'callable', 'object']
-
- def base_cmp(a, b):
- x = cmp(type_index.index(a.tag), type_index.index(b.tag))
- if x != 0: return x
-
- if a.tag == 'class' and b.tag == 'class':
- a_bases = _get_bases(a, recurse=True)
- b_bases = _get_bases(b, recurse=True)
- x = cmp(len(a_bases), len(b_bases))
- if x != 0: return x
- if a.attrib['id'] in b_bases: return -1
- if b.attrib['id'] in a_bases: return 1
-
- return cmp(a.attrib['id'].count('.'), b.attrib['id'].count('.'))
-
- nodes = root.getchildren()
- nodes.sort(base_cmp)
-
- # Create phantom items
- for node in nodes:
- name = node.attrib['id']
- doc = (node.text or '').decode('string-escape') + "\n"
- if doc == "\n": doc = ""
-
- # create parent, if missing
- parent = name
- while True:
- parent = '.'.join(parent.split('.')[:-1])
- if not parent: break
- if parent in object_cache: break
- obj = imp.new_module(parent)
- object_cache[parent] = obj
- sys.modules[parent] = obj
-
- # create object
- if node.tag == 'module':
- obj = imp.new_module(name)
- obj.__doc__ = doc
- sys.modules[name] = obj
- elif node.tag == 'class':
- bases = [object_cache[b] for b in _get_bases(node)
- if b in object_cache]
- bases.append(object)
- init = lambda self: None
- init.__doc__ = doc
- obj = type(name, tuple(bases), {'__doc__': doc, '__init__': init})
- obj.__name__ = name.split('.')[-1]
- elif node.tag == 'callable':
- funcname = node.attrib['id'].split('.')[-1]
- argspec = node.attrib.get('argspec')
- if argspec:
- argspec = re.sub('^[^(]*', '', argspec)
- doc = "%s%s\n\n%s" % (funcname, argspec, doc)
- obj = lambda: 0
- obj.__argspec_is_invalid_ = True
- if sys.version_info[0] >= 3:
- obj.__name__ = funcname
- else:
- obj.func_name = funcname
- obj.__name__ = name
- obj.__doc__ = doc
- if inspect.isclass(object_cache[parent]):
- obj.__objclass__ = object_cache[parent]
- else:
- class Dummy(object): pass
- obj = Dummy()
- obj.__name__ = name
- obj.__doc__ = doc
- if inspect.isclass(object_cache[parent]):
- obj.__get__ = lambda: None
- object_cache[name] = obj
-
- if parent:
- if inspect.ismodule(object_cache[parent]):
- obj.__module__ = parent
- setattr(object_cache[parent], name.split('.')[-1], obj)
-
- # Populate items
- for node in root:
- obj = object_cache.get(node.attrib['id'])
- if obj is None: continue
- for ref in node.findall('ref'):
- if node.tag == 'class':
- if ref.attrib['ref'].startswith(node.attrib['id'] + '.'):
- setattr(obj, ref.attrib['name'],
- object_cache.get(ref.attrib['ref']))
- else:
- setattr(obj, ref.attrib['name'],
- object_cache.get(ref.attrib['ref']))
diff --git a/doc/sphinxext/numpydoc/plot_directive.py b/doc/sphinxext/numpydoc/plot_directive.py
deleted file mode 100644
index 7547642a2..000000000
--- a/doc/sphinxext/numpydoc/plot_directive.py
+++ /dev/null
@@ -1,642 +0,0 @@
-"""
-A special directive for generating a matplotlib plot.
-
-.. warning::
-
- This is a hacked version of plot_directive.py from Matplotlib.
- It's very much subject to change!
-
-
-Usage
------
-
-Can be used like this::
-
- .. plot:: examples/example.py
-
- .. plot::
-
- import matplotlib.pyplot as plt
- plt.plot([1,2,3], [4,5,6])
-
- .. plot::
-
- A plotting example:
-
- >>> import matplotlib.pyplot as plt
- >>> plt.plot([1,2,3], [4,5,6])
-
-The content is interpreted as doctest formatted if it has a line starting
-with ``>>>``.
-
-The ``plot`` directive supports the options
-
- format : {'python', 'doctest'}
- Specify the format of the input
-
- include-source : bool
- Whether to display the source code. Default can be changed in conf.py
-
-and the ``image`` directive options ``alt``, ``height``, ``width``,
-``scale``, ``align``, ``class``.
-
-Configuration options
----------------------
-
-The plot directive has the following configuration options:
-
- plot_include_source
- Default value for the include-source option
-
- plot_pre_code
- Code that should be executed before each plot.
-
- plot_basedir
- Base directory, to which plot:: file names are relative to.
- (If None or empty, file names are relative to the directoly where
- the file containing the directive is.)
-
- plot_formats
- File formats to generate. List of tuples or strings::
-
- [(suffix, dpi), suffix, ...]
-
- that determine the file format and the DPI. For entries whose
- DPI was omitted, sensible defaults are chosen.
-
- plot_html_show_formats
- Whether to show links to the files in HTML.
-
-TODO
-----
-
-* Refactor Latex output; now it's plain images, but it would be nice
- to make them appear side-by-side, or in floats.
-
-"""
-from __future__ import division
-
-import sys, os, glob, shutil, imp, warnings, re, textwrap, traceback
-import sphinx
-
-if sys.version_info[0] >= 3:
- from io import StringIO
-else:
- from cStringIO import StringIO
-
-import warnings
-warnings.warn("A plot_directive module is also available under "
- "matplotlib.sphinxext; expect this numpydoc.plot_directive "
- "module to be deprecated after relevant features have been "
- "integrated there.",
- FutureWarning, stacklevel=2)
-
-
-#------------------------------------------------------------------------------
-# Registration hook
-#------------------------------------------------------------------------------
-
-def setup(app):
- setup.app = app
- setup.config = app.config
- setup.confdir = app.confdir
-
- app.add_config_value('plot_pre_code', '', True)
- app.add_config_value('plot_include_source', False, True)
- app.add_config_value('plot_formats', ['png', 'hires.png', 'pdf'], True)
- app.add_config_value('plot_basedir', None, True)
- app.add_config_value('plot_html_show_formats', True, True)
-
- app.add_directive('plot', plot_directive, True, (0, 1, False),
- **plot_directive_options)
-
-#------------------------------------------------------------------------------
-# plot:: directive
-#------------------------------------------------------------------------------
-from docutils.parsers.rst import directives
-from docutils import nodes
-
-def plot_directive(name, arguments, options, content, lineno,
- content_offset, block_text, state, state_machine):
- return run(arguments, content, options, state_machine, state, lineno)
-plot_directive.__doc__ = __doc__
-
-def _option_boolean(arg):
- if not arg or not arg.strip():
- # no argument given, assume used as a flag
- return True
- elif arg.strip().lower() in ('no', '0', 'false'):
- return False
- elif arg.strip().lower() in ('yes', '1', 'true'):
- return True
- else:
- raise ValueError('"%s" unknown boolean' % arg)
-
-def _option_format(arg):
- return directives.choice(arg, ('python', 'lisp'))
-
-def _option_align(arg):
- return directives.choice(arg, ("top", "middle", "bottom", "left", "center",
- "right"))
-
-plot_directive_options = {'alt': directives.unchanged,
- 'height': directives.length_or_unitless,
- 'width': directives.length_or_percentage_or_unitless,
- 'scale': directives.nonnegative_int,
- 'align': _option_align,
- 'class': directives.class_option,
- 'include-source': _option_boolean,
- 'format': _option_format,
- }
-
-#------------------------------------------------------------------------------
-# Generating output
-#------------------------------------------------------------------------------
-
-from docutils import nodes, utils
-
-try:
- # Sphinx depends on either Jinja or Jinja2
- import jinja2
- def format_template(template, **kw):
- return jinja2.Template(template).render(**kw)
-except ImportError:
- import jinja
- def format_template(template, **kw):
- return jinja.from_string(template, **kw)
-
-TEMPLATE = """
-{{ source_code }}
-
-{{ only_html }}
-
- {% if source_link or (html_show_formats and not multi_image) %}
- (
- {%- if source_link -%}
- `Source code <{{ source_link }}>`__
- {%- endif -%}
- {%- if html_show_formats and not multi_image -%}
- {%- for img in images -%}
- {%- for fmt in img.formats -%}
- {%- if source_link or not loop.first -%}, {% endif -%}
- `{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__
- {%- endfor -%}
- {%- endfor -%}
- {%- endif -%}
- )
- {% endif %}
-
- {% for img in images %}
- .. figure:: {{ build_dir }}/{{ img.basename }}.png
- {%- for option in options %}
- {{ option }}
- {% endfor %}
-
- {% if html_show_formats and multi_image -%}
- (
- {%- for fmt in img.formats -%}
- {%- if not loop.first -%}, {% endif -%}
- `{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__
- {%- endfor -%}
- )
- {%- endif -%}
- {% endfor %}
-
-{{ only_latex }}
-
- {% for img in images %}
- .. image:: {{ build_dir }}/{{ img.basename }}.pdf
- {% endfor %}
-
-"""
-
-class ImageFile(object):
- def __init__(self, basename, dirname):
- self.basename = basename
- self.dirname = dirname
- self.formats = []
-
- def filename(self, format):
- return os.path.join(self.dirname, "%s.%s" % (self.basename, format))
-
- def filenames(self):
- return [self.filename(fmt) for fmt in self.formats]
-
-def run(arguments, content, options, state_machine, state, lineno):
- if arguments and content:
- raise RuntimeError("plot:: directive can't have both args and content")
-
- document = state_machine.document
- config = document.settings.env.config
-
- options.setdefault('include-source', config.plot_include_source)
-
- # determine input
- rst_file = document.attributes['source']
- rst_dir = os.path.dirname(rst_file)
-
- if arguments:
- if not config.plot_basedir:
- source_file_name = os.path.join(rst_dir,
- directives.uri(arguments[0]))
- else:
- source_file_name = os.path.join(setup.confdir, config.plot_basedir,
- directives.uri(arguments[0]))
- code = open(source_file_name, 'r').read()
- output_base = os.path.basename(source_file_name)
- else:
- source_file_name = rst_file
- code = textwrap.dedent("\n".join(map(str, content)))
- counter = document.attributes.get('_plot_counter', 0) + 1
- document.attributes['_plot_counter'] = counter
- base, ext = os.path.splitext(os.path.basename(source_file_name))
- output_base = '%s-%d.py' % (base, counter)
-
- base, source_ext = os.path.splitext(output_base)
- if source_ext in ('.py', '.rst', '.txt'):
- output_base = base
- else:
- source_ext = ''
-
- # ensure that LaTeX includegraphics doesn't choke in foo.bar.pdf filenames
- output_base = output_base.replace('.', '-')
-
- # is it in doctest format?
- is_doctest = contains_doctest(code)
- if 'format' in options:
- if options['format'] == 'python':
- is_doctest = False
- else:
- is_doctest = True
-
- # determine output directory name fragment
- source_rel_name = relpath(source_file_name, setup.confdir)
- source_rel_dir = os.path.dirname(source_rel_name)
- while source_rel_dir.startswith(os.path.sep):
- source_rel_dir = source_rel_dir[1:]
-
- # build_dir: where to place output files (temporarily)
- build_dir = os.path.join(os.path.dirname(setup.app.doctreedir),
- 'plot_directive',
- source_rel_dir)
- if not os.path.exists(build_dir):
- os.makedirs(build_dir)
-
- # output_dir: final location in the builder's directory
- dest_dir = os.path.abspath(os.path.join(setup.app.builder.outdir,
- source_rel_dir))
-
- # how to link to files from the RST file
- dest_dir_link = os.path.join(relpath(setup.confdir, rst_dir),
- source_rel_dir).replace(os.path.sep, '/')
- build_dir_link = relpath(build_dir, rst_dir).replace(os.path.sep, '/')
- source_link = dest_dir_link + '/' + output_base + source_ext
-
- # make figures
- try:
- results = makefig(code, source_file_name, build_dir, output_base,
- config)
- errors = []
- except PlotError as err:
- reporter = state.memo.reporter
- sm = reporter.system_message(
- 2, "Exception occurred in plotting %s: %s" % (output_base, err),
- line=lineno)
- results = [(code, [])]
- errors = [sm]
-
- # generate output restructuredtext
- total_lines = []
- for j, (code_piece, images) in enumerate(results):
- if options['include-source']:
- if is_doctest:
- lines = ['']
- lines += [row.rstrip() for row in code_piece.split('\n')]
- else:
- lines = ['.. code-block:: python', '']
- lines += [' %s' % row.rstrip()
- for row in code_piece.split('\n')]
- source_code = "\n".join(lines)
- else:
- source_code = ""
-
- opts = [':%s: %s' % (key, val) for key, val in list(options.items())
- if key in ('alt', 'height', 'width', 'scale', 'align', 'class')]
-
- only_html = ".. only:: html"
- only_latex = ".. only:: latex"
-
- if j == 0:
- src_link = source_link
- else:
- src_link = None
-
- result = format_template(
- TEMPLATE,
- dest_dir=dest_dir_link,
- build_dir=build_dir_link,
- source_link=src_link,
- multi_image=len(images) > 1,
- only_html=only_html,
- only_latex=only_latex,
- options=opts,
- images=images,
- source_code=source_code,
- html_show_formats=config.plot_html_show_formats)
-
- total_lines.extend(result.split("\n"))
- total_lines.extend("\n")
-
- if total_lines:
- state_machine.insert_input(total_lines, source=source_file_name)
-
- # copy image files to builder's output directory
- if not os.path.exists(dest_dir):
- os.makedirs(dest_dir)
-
- for code_piece, images in results:
- for img in images:
- for fn in img.filenames():
- shutil.copyfile(fn, os.path.join(dest_dir,
- os.path.basename(fn)))
-
- # copy script (if necessary)
- if source_file_name == rst_file:
- target_name = os.path.join(dest_dir, output_base + source_ext)
- f = open(target_name, 'w')
- f.write(unescape_doctest(code))
- f.close()
-
- return errors
-
-
-#------------------------------------------------------------------------------
-# Run code and capture figures
-#------------------------------------------------------------------------------
-
-import matplotlib
-matplotlib.use('Agg')
-import matplotlib.pyplot as plt
-import matplotlib.image as image
-from matplotlib import _pylab_helpers
-
-import exceptions
-
-def contains_doctest(text):
- try:
- # check if it's valid Python as-is
- compile(text, '<string>', 'exec')
- return False
- except SyntaxError:
- pass
- r = re.compile(r'^\s*>>>', re.M)
- m = r.search(text)
- return bool(m)
-
-def unescape_doctest(text):
- """
- Extract code from a piece of text, which contains either Python code
- or doctests.
-
- """
- if not contains_doctest(text):
- return text
-
- code = ""
- for line in text.split("\n"):
- m = re.match(r'^\s*(>>>|\.\.\.) (.*)$', line)
- if m:
- code += m.group(2) + "\n"
- elif line.strip():
- code += "# " + line.strip() + "\n"
- else:
- code += "\n"
- return code
-
-def split_code_at_show(text):
- """
- Split code at plt.show()
-
- """
-
- parts = []
- is_doctest = contains_doctest(text)
-
- part = []
- for line in text.split("\n"):
- if (not is_doctest and line.strip() == 'plt.show()') or \
- (is_doctest and line.strip() == '>>> plt.show()'):
- part.append(line)
- parts.append("\n".join(part))
- part = []
- else:
- part.append(line)
- if "\n".join(part).strip():
- parts.append("\n".join(part))
- return parts
-
-class PlotError(RuntimeError):
- pass
-
-def run_code(code, code_path, ns=None):
- # Change the working directory to the directory of the example, so
- # it can get at its data files, if any.
- pwd = os.getcwd()
- old_sys_path = list(sys.path)
- if code_path is not None:
- dirname = os.path.abspath(os.path.dirname(code_path))
- os.chdir(dirname)
- sys.path.insert(0, dirname)
-
- # Redirect stdout
- stdout = sys.stdout
- sys.stdout = StringIO()
-
- # Reset sys.argv
- old_sys_argv = sys.argv
- sys.argv = [code_path]
-
- try:
- try:
- code = unescape_doctest(code)
- if ns is None:
- ns = {}
- if not ns:
- exec(setup.config.plot_pre_code, ns)
- exec(code, ns)
- except (Exception, SystemExit) as err:
- raise PlotError(traceback.format_exc())
- finally:
- os.chdir(pwd)
- sys.argv = old_sys_argv
- sys.path[:] = old_sys_path
- sys.stdout = stdout
- return ns
-
-
-#------------------------------------------------------------------------------
-# Generating figures
-#------------------------------------------------------------------------------
-
-def out_of_date(original, derived):
- """
- Returns True if derivative is out-of-date wrt original,
- both of which are full file paths.
- """
- return (not os.path.exists(derived)
- or os.stat(derived).st_mtime < os.stat(original).st_mtime)
-
-
-def makefig(code, code_path, output_dir, output_base, config):
- """
- Run a pyplot script *code* and save the images under *output_dir*
- with file names derived from *output_base*
-
- """
-
- # -- Parse format list
- default_dpi = {'png': 80, 'hires.png': 200, 'pdf': 50}
- formats = []
- for fmt in config.plot_formats:
- if isinstance(fmt, str):
- formats.append((fmt, default_dpi.get(fmt, 80)))
- elif type(fmt) in (tuple, list) and len(fmt)==2:
- formats.append((str(fmt[0]), int(fmt[1])))
- else:
- raise PlotError('invalid image format "%r" in plot_formats' % fmt)
-
- # -- Try to determine if all images already exist
-
- code_pieces = split_code_at_show(code)
-
- # Look for single-figure output files first
- all_exists = True
- img = ImageFile(output_base, output_dir)
- for format, dpi in formats:
- if out_of_date(code_path, img.filename(format)):
- all_exists = False
- break
- img.formats.append(format)
-
- if all_exists:
- return [(code, [img])]
-
- # Then look for multi-figure output files
- results = []
- all_exists = True
- for i, code_piece in enumerate(code_pieces):
- images = []
- for j in range(1000):
- img = ImageFile('%s_%02d_%02d' % (output_base, i, j), output_dir)
- for format, dpi in formats:
- if out_of_date(code_path, img.filename(format)):
- all_exists = False
- break
- img.formats.append(format)
-
- # assume that if we have one, we have them all
- if not all_exists:
- all_exists = (j > 0)
- break
- images.append(img)
- if not all_exists:
- break
- results.append((code_piece, images))
-
- if all_exists:
- return results
-
- # -- We didn't find the files, so build them
-
- results = []
- ns = {}
-
- for i, code_piece in enumerate(code_pieces):
- # Clear between runs
- plt.close('all')
-
- # Run code
- run_code(code_piece, code_path, ns)
-
- # Collect images
- images = []
- fig_managers = _pylab_helpers.Gcf.get_all_fig_managers()
- for j, figman in enumerate(fig_managers):
- if len(fig_managers) == 1 and len(code_pieces) == 1:
- img = ImageFile(output_base, output_dir)
- else:
- img = ImageFile("%s_%02d_%02d" % (output_base, i, j),
- output_dir)
- images.append(img)
- for format, dpi in formats:
- try:
- figman.canvas.figure.savefig(img.filename(format), dpi=dpi)
- except exceptions.BaseException as err:
- raise PlotError(traceback.format_exc())
- img.formats.append(format)
-
- # Results
- results.append((code_piece, images))
-
- return results
-
-
-#------------------------------------------------------------------------------
-# Relative pathnames
-#------------------------------------------------------------------------------
-
-try:
- from os.path import relpath
-except ImportError:
- # Copied from Python 2.7
- if 'posix' in sys.builtin_module_names:
- def relpath(path, start=os.path.curdir):
- """Return a relative version of a path"""
- from os.path import sep, curdir, join, abspath, commonprefix, \
- pardir
-
- if not path:
- raise ValueError("no path specified")
-
- start_list = abspath(start).split(sep)
- path_list = abspath(path).split(sep)
-
- # Work out how much of the filepath is shared by start and path.
- i = len(commonprefix([start_list, path_list]))
-
- rel_list = [pardir] * (len(start_list)-i) + path_list[i:]
- if not rel_list:
- return curdir
- return join(*rel_list)
- elif 'nt' in sys.builtin_module_names:
- def relpath(path, start=os.path.curdir):
- """Return a relative version of a path"""
- from os.path import sep, curdir, join, abspath, commonprefix, \
- pardir, splitunc
-
- if not path:
- raise ValueError("no path specified")
- start_list = abspath(start).split(sep)
- path_list = abspath(path).split(sep)
- if start_list[0].lower() != path_list[0].lower():
- unc_path, rest = splitunc(path)
- unc_start, rest = splitunc(start)
- if bool(unc_path) ^ bool(unc_start):
- raise ValueError("Cannot mix UNC and non-UNC paths (%s and %s)"
- % (path, start))
- else:
- raise ValueError("path is on drive %s, start on drive %s"
- % (path_list[0], start_list[0]))
- # Work out how much of the filepath is shared by start and path.
- for i in range(min(len(start_list), len(path_list))):
- if start_list[i].lower() != path_list[i].lower():
- break
- else:
- i += 1
-
- rel_list = [pardir] * (len(start_list)-i) + path_list[i:]
- if not rel_list:
- return curdir
- return join(*rel_list)
- else:
- raise RuntimeError("Unsupported platform (no relpath available!)")
diff --git a/doc/sphinxext/numpydoc/tests/test_docscrape.py b/doc/sphinxext/numpydoc/tests/test_docscrape.py
deleted file mode 100644
index 07ac68003..000000000
--- a/doc/sphinxext/numpydoc/tests/test_docscrape.py
+++ /dev/null
@@ -1,749 +0,0 @@
-# -*- encoding:utf-8 -*-
-from __future__ import division
-
-import sys, textwrap
-
-from numpydoc.docscrape import NumpyDocString, FunctionDoc, ClassDoc
-from numpydoc.docscrape_sphinx import SphinxDocString, SphinxClassDoc
-from nose.tools import *
-
-doc_txt = '''\
- numpy.multivariate_normal(mean, cov, shape=None, spam=None)
-
- Draw values from a multivariate normal distribution with specified
- mean and covariance.
-
- The multivariate normal or Gaussian distribution is a generalisation
- of the one-dimensional normal distribution to higher dimensions.
-
- Parameters
- ----------
- mean : (N,) ndarray
- Mean of the N-dimensional distribution.
-
- .. math::
-
- (1+2+3)/3
-
- cov : (N,N) ndarray
- Covariance matrix of the distribution.
- shape : tuple of ints
- Given a shape of, for example, (m,n,k), m*n*k samples are
- generated, and packed in an m-by-n-by-k arrangement. Because
- each sample is N-dimensional, the output shape is (m,n,k,N).
-
- Returns
- -------
- out : ndarray
- The drawn samples, arranged according to `shape`. If the
- shape given is (m,n,...), then the shape of `out` is is
- (m,n,...,N).
-
- In other words, each entry ``out[i,j,...,:]`` is an N-dimensional
- value drawn from the distribution.
-
- Other Parameters
- ----------------
- spam : parrot
- A parrot off its mortal coil.
-
- Raises
- ------
- RuntimeError
- Some error
-
- Warns
- -----
- RuntimeWarning
- Some warning
-
- Warnings
- --------
- Certain warnings apply.
-
- Notes
- -----
-
- Instead of specifying the full covariance matrix, popular
- approximations include:
-
- - Spherical covariance (`cov` is a multiple of the identity matrix)
- - Diagonal covariance (`cov` has non-negative elements only on the diagonal)
-
- This geometrical property can be seen in two dimensions by plotting
- generated data-points:
-
- >>> mean = [0,0]
- >>> cov = [[1,0],[0,100]] # diagonal covariance, points lie on x or y-axis
-
- >>> x,y = multivariate_normal(mean,cov,5000).T
- >>> plt.plot(x,y,'x'); plt.axis('equal'); plt.show()
-
- Note that the covariance matrix must be symmetric and non-negative
- definite.
-
- References
- ----------
- .. [1] A. Papoulis, "Probability, Random Variables, and Stochastic
- Processes," 3rd ed., McGraw-Hill Companies, 1991
- .. [2] R.O. Duda, P.E. Hart, and D.G. Stork, "Pattern Classification,"
- 2nd ed., Wiley, 2001.
-
- See Also
- --------
- some, other, funcs
- otherfunc : relationship
-
- Examples
- --------
- >>> mean = (1,2)
- >>> cov = [[1,0],[1,0]]
- >>> x = multivariate_normal(mean,cov,(3,3))
- >>> print x.shape
- (3, 3, 2)
-
- The following is probably true, given that 0.6 is roughly twice the
- standard deviation:
-
- >>> print list( (x[0,0,:] - mean) < 0.6 )
- [True, True]
-
- .. index:: random
- :refguide: random;distributions, random;gauss
-
- '''
-doc = NumpyDocString(doc_txt)
-
-
-def test_signature():
- assert doc['Signature'].startswith('numpy.multivariate_normal(')
- assert doc['Signature'].endswith('spam=None)')
-
-def test_summary():
- assert doc['Summary'][0].startswith('Draw values')
- assert doc['Summary'][-1].endswith('covariance.')
-
-def test_extended_summary():
- assert doc['Extended Summary'][0].startswith('The multivariate normal')
-
-def test_parameters():
- assert_equal(len(doc['Parameters']), 3)
- assert_equal([n for n,_,_ in doc['Parameters']], ['mean','cov','shape'])
-
- arg, arg_type, desc = doc['Parameters'][1]
- assert_equal(arg_type, '(N,N) ndarray')
- assert desc[0].startswith('Covariance matrix')
- assert doc['Parameters'][0][-1][-2] == ' (1+2+3)/3'
-
-def test_other_parameters():
- assert_equal(len(doc['Other Parameters']), 1)
- assert_equal([n for n,_,_ in doc['Other Parameters']], ['spam'])
- arg, arg_type, desc = doc['Other Parameters'][0]
- assert_equal(arg_type, 'parrot')
- assert desc[0].startswith('A parrot off its mortal coil')
-
-def test_returns():
- assert_equal(len(doc['Returns']), 1)
- arg, arg_type, desc = doc['Returns'][0]
- assert_equal(arg, 'out')
- assert_equal(arg_type, 'ndarray')
- assert desc[0].startswith('The drawn samples')
- assert desc[-1].endswith('distribution.')
-
-def test_notes():
- assert doc['Notes'][0].startswith('Instead')
- assert doc['Notes'][-1].endswith('definite.')
- assert_equal(len(doc['Notes']), 17)
-
-def test_references():
- assert doc['References'][0].startswith('..')
- assert doc['References'][-1].endswith('2001.')
-
-def test_examples():
- assert doc['Examples'][0].startswith('>>>')
- assert doc['Examples'][-1].endswith('True]')
-
-def test_index():
- assert_equal(doc['index']['default'], 'random')
- assert_equal(len(doc['index']), 2)
- assert_equal(len(doc['index']['refguide']), 2)
-
-def non_blank_line_by_line_compare(a,b):
- a = textwrap.dedent(a)
- b = textwrap.dedent(b)
- a = [l for l in a.split('\n') if l.strip()]
- b = [l for l in b.split('\n') if l.strip()]
- for n,line in enumerate(a):
- if not line == b[n]:
- raise AssertionError("Lines %s of a and b differ: "
- "\n>>> %s\n<<< %s\n" %
- (n,line,b[n]))
-def test_str():
- non_blank_line_by_line_compare(str(doc),
-"""numpy.multivariate_normal(mean, cov, shape=None, spam=None)
-
-Draw values from a multivariate normal distribution with specified
-mean and covariance.
-
-The multivariate normal or Gaussian distribution is a generalisation
-of the one-dimensional normal distribution to higher dimensions.
-
-Parameters
-----------
-mean : (N,) ndarray
- Mean of the N-dimensional distribution.
-
- .. math::
-
- (1+2+3)/3
-
-cov : (N,N) ndarray
- Covariance matrix of the distribution.
-shape : tuple of ints
- Given a shape of, for example, (m,n,k), m*n*k samples are
- generated, and packed in an m-by-n-by-k arrangement. Because
- each sample is N-dimensional, the output shape is (m,n,k,N).
-
-Returns
--------
-out : ndarray
- The drawn samples, arranged according to `shape`. If the
- shape given is (m,n,...), then the shape of `out` is is
- (m,n,...,N).
-
- In other words, each entry ``out[i,j,...,:]`` is an N-dimensional
- value drawn from the distribution.
-
-Other Parameters
-----------------
-spam : parrot
- A parrot off its mortal coil.
-
-Raises
-------
-RuntimeError :
- Some error
-
-Warns
------
-RuntimeWarning :
- Some warning
-
-Warnings
---------
-Certain warnings apply.
-
-See Also
---------
-`some`_, `other`_, `funcs`_
-
-`otherfunc`_
- relationship
-
-Notes
------
-Instead of specifying the full covariance matrix, popular
-approximations include:
-
- - Spherical covariance (`cov` is a multiple of the identity matrix)
- - Diagonal covariance (`cov` has non-negative elements only on the diagonal)
-
-This geometrical property can be seen in two dimensions by plotting
-generated data-points:
-
->>> mean = [0,0]
->>> cov = [[1,0],[0,100]] # diagonal covariance, points lie on x or y-axis
-
->>> x,y = multivariate_normal(mean,cov,5000).T
->>> plt.plot(x,y,'x'); plt.axis('equal'); plt.show()
-
-Note that the covariance matrix must be symmetric and non-negative
-definite.
-
-References
-----------
-.. [1] A. Papoulis, "Probability, Random Variables, and Stochastic
- Processes," 3rd ed., McGraw-Hill Companies, 1991
-.. [2] R.O. Duda, P.E. Hart, and D.G. Stork, "Pattern Classification,"
- 2nd ed., Wiley, 2001.
-
-Examples
---------
->>> mean = (1,2)
->>> cov = [[1,0],[1,0]]
->>> x = multivariate_normal(mean,cov,(3,3))
->>> print x.shape
-(3, 3, 2)
-
-The following is probably true, given that 0.6 is roughly twice the
-standard deviation:
-
->>> print list( (x[0,0,:] - mean) < 0.6 )
-[True, True]
-
-.. index:: random
- :refguide: random;distributions, random;gauss""")
-
-
-def test_sphinx_str():
- sphinx_doc = SphinxDocString(doc_txt)
- non_blank_line_by_line_compare(str(sphinx_doc),
-"""
-.. index:: random
- single: random;distributions, random;gauss
-
-Draw values from a multivariate normal distribution with specified
-mean and covariance.
-
-The multivariate normal or Gaussian distribution is a generalisation
-of the one-dimensional normal distribution to higher dimensions.
-
-:Parameters:
-
- **mean** : (N,) ndarray
-
- Mean of the N-dimensional distribution.
-
- .. math::
-
- (1+2+3)/3
-
- **cov** : (N,N) ndarray
-
- Covariance matrix of the distribution.
-
- **shape** : tuple of ints
-
- Given a shape of, for example, (m,n,k), m*n*k samples are
- generated, and packed in an m-by-n-by-k arrangement. Because
- each sample is N-dimensional, the output shape is (m,n,k,N).
-
-:Returns:
-
- **out** : ndarray
-
- The drawn samples, arranged according to `shape`. If the
- shape given is (m,n,...), then the shape of `out` is is
- (m,n,...,N).
-
- In other words, each entry ``out[i,j,...,:]`` is an N-dimensional
- value drawn from the distribution.
-
-:Other Parameters:
-
- **spam** : parrot
-
- A parrot off its mortal coil.
-
-:Raises:
-
- **RuntimeError** :
-
- Some error
-
-:Warns:
-
- **RuntimeWarning** :
-
- Some warning
-
-.. warning::
-
- Certain warnings apply.
-
-.. seealso::
-
- :obj:`some`, :obj:`other`, :obj:`funcs`
-
- :obj:`otherfunc`
- relationship
-
-.. rubric:: Notes
-
-Instead of specifying the full covariance matrix, popular
-approximations include:
-
- - Spherical covariance (`cov` is a multiple of the identity matrix)
- - Diagonal covariance (`cov` has non-negative elements only on the diagonal)
-
-This geometrical property can be seen in two dimensions by plotting
-generated data-points:
-
->>> mean = [0,0]
->>> cov = [[1,0],[0,100]] # diagonal covariance, points lie on x or y-axis
-
->>> x,y = multivariate_normal(mean,cov,5000).T
->>> plt.plot(x,y,'x'); plt.axis('equal'); plt.show()
-
-Note that the covariance matrix must be symmetric and non-negative
-definite.
-
-.. rubric:: References
-
-.. [1] A. Papoulis, "Probability, Random Variables, and Stochastic
- Processes," 3rd ed., McGraw-Hill Companies, 1991
-.. [2] R.O. Duda, P.E. Hart, and D.G. Stork, "Pattern Classification,"
- 2nd ed., Wiley, 2001.
-
-.. only:: latex
-
- [1]_, [2]_
-
-.. rubric:: Examples
-
->>> mean = (1,2)
->>> cov = [[1,0],[1,0]]
->>> x = multivariate_normal(mean,cov,(3,3))
->>> print x.shape
-(3, 3, 2)
-
-The following is probably true, given that 0.6 is roughly twice the
-standard deviation:
-
->>> print list( (x[0,0,:] - mean) < 0.6 )
-[True, True]
-""")
-
-
-doc2 = NumpyDocString("""
- Returns array of indices of the maximum values of along the given axis.
-
- Parameters
- ----------
- a : {array_like}
- Array to look in.
- axis : {None, integer}
- If None, the index is into the flattened array, otherwise along
- the specified axis""")
-
-def test_parameters_without_extended_description():
- assert_equal(len(doc2['Parameters']), 2)
-
-doc3 = NumpyDocString("""
- my_signature(*params, **kwds)
-
- Return this and that.
- """)
-
-def test_escape_stars():
- signature = str(doc3).split('\n')[0]
- assert_equal(signature, 'my_signature(\*params, \*\*kwds)')
-
-doc4 = NumpyDocString(
- """a.conj()
-
- Return an array with all complex-valued elements conjugated.""")
-
-def test_empty_extended_summary():
- assert_equal(doc4['Extended Summary'], [])
-
-doc5 = NumpyDocString(
- """
- a.something()
-
- Raises
- ------
- LinAlgException
- If array is singular.
-
- Warns
- -----
- SomeWarning
- If needed
- """)
-
-def test_raises():
- assert_equal(len(doc5['Raises']), 1)
- name,_,desc = doc5['Raises'][0]
- assert_equal(name,'LinAlgException')
- assert_equal(desc,['If array is singular.'])
-
-def test_warns():
- assert_equal(len(doc5['Warns']), 1)
- name,_,desc = doc5['Warns'][0]
- assert_equal(name,'SomeWarning')
- assert_equal(desc,['If needed'])
-
-def test_see_also():
- doc6 = NumpyDocString(
- """
- z(x,theta)
-
- See Also
- --------
- func_a, func_b, func_c
- func_d : some equivalent func
- foo.func_e : some other func over
- multiple lines
- func_f, func_g, :meth:`func_h`, func_j,
- func_k
- :obj:`baz.obj_q`
- :class:`class_j`: fubar
- foobar
- """)
-
- assert len(doc6['See Also']) == 12
- for func, desc, role in doc6['See Also']:
- if func in ('func_a', 'func_b', 'func_c', 'func_f',
- 'func_g', 'func_h', 'func_j', 'func_k', 'baz.obj_q'):
- assert(not desc)
- else:
- assert(desc)
-
- if func == 'func_h':
- assert role == 'meth'
- elif func == 'baz.obj_q':
- assert role == 'obj'
- elif func == 'class_j':
- assert role == 'class'
- else:
- assert role is None
-
- if func == 'func_d':
- assert desc == ['some equivalent func']
- elif func == 'foo.func_e':
- assert desc == ['some other func over', 'multiple lines']
- elif func == 'class_j':
- assert desc == ['fubar', 'foobar']
-
-def test_see_also_print():
- class Dummy(object):
- """
- See Also
- --------
- func_a, func_b
- func_c : some relationship
- goes here
- func_d
- """
- pass
-
- obj = Dummy()
- s = str(FunctionDoc(obj, role='func'))
- assert(':func:`func_a`, :func:`func_b`' in s)
- assert(' some relationship' in s)
- assert(':func:`func_d`' in s)
-
-doc7 = NumpyDocString("""
-
- Doc starts on second line.
-
- """)
-
-def test_empty_first_line():
- assert doc7['Summary'][0].startswith('Doc starts')
-
-
-def test_no_summary():
- str(SphinxDocString("""
- Parameters
- ----------"""))
-
-
-def test_unicode():
- doc = SphinxDocString("""
- öäöäöäöäöåååå
-
- öäöäöäööäååå
-
- Parameters
- ----------
- ååå : äää
- ööö
-
- Returns
- -------
- ååå : ööö
- äää
-
- """)
- assert isinstance(doc['Summary'][0], str)
- if sys.version_info[0] >= 3:
- assert doc['Summary'][0] == u'öäöäöäöäöåååå'
- else:
- assert doc['Summary'][0] == u'öäöäöäöäöåååå'.encode('utf-8')
-
-def test_plot_examples():
- cfg = dict(use_plots=True)
-
- doc = SphinxDocString("""
- Examples
- --------
- >>> import matplotlib.pyplot as plt
- >>> plt.plot([1,2,3],[4,5,6])
- >>> plt.show()
- """, config=cfg)
- assert 'plot::' in str(doc), str(doc)
-
- doc = SphinxDocString("""
- Examples
- --------
- .. plot::
-
- import matplotlib.pyplot as plt
- plt.plot([1,2,3],[4,5,6])
- plt.show()
- """, config=cfg)
- assert str(doc).count('plot::') == 1, str(doc)
-
-def test_class_members():
-
- class Dummy(object):
- """
- Dummy class.
-
- """
- def spam(self, a, b):
- """Spam\n\nSpam spam."""
- pass
- def ham(self, c, d):
- """Cheese\n\nNo cheese."""
- pass
- @property
- def spammity(self):
- """Spammity index"""
- return 0.95
-
- class Ignorable(object):
- """local class, to be ignored"""
- pass
-
- for cls in (ClassDoc, SphinxClassDoc):
- doc = cls(Dummy, config=dict(show_class_members=False))
- assert 'Methods' not in str(doc), (cls, str(doc))
- assert 'spam' not in str(doc), (cls, str(doc))
- assert 'ham' not in str(doc), (cls, str(doc))
- assert 'spammity' not in str(doc), (cls, str(doc))
- assert 'Spammity index' not in str(doc), (cls, str(doc))
-
- doc = cls(Dummy, config=dict(show_class_members=True))
- assert 'Methods' in str(doc), (cls, str(doc))
- assert 'spam' in str(doc), (cls, str(doc))
- assert 'ham' in str(doc), (cls, str(doc))
- assert 'spammity' in str(doc), (cls, str(doc))
-
- if cls is SphinxClassDoc:
- assert '.. autosummary::' in str(doc), str(doc)
- else:
- assert 'Spammity index' in str(doc), str(doc)
-
-def test_duplicate_signature():
- # Duplicate function signatures occur e.g. in ufuncs, when the
- # automatic mechanism adds one, and a more detailed comes from the
- # docstring itself.
-
- doc = NumpyDocString(
- """
- z(x1, x2)
-
- z(a, theta)
- """)
-
- assert doc['Signature'].strip() == 'z(a, theta)'
-
-
-class_doc_txt = """
- Foo
-
- Parameters
- ----------
- f : callable ``f(t, y, *f_args)``
- Aaa.
- jac : callable ``jac(t, y, *jac_args)``
- Bbb.
-
- Attributes
- ----------
- t : float
- Current time.
- y : ndarray
- Current variable values.
-
- Methods
- -------
- a
- b
- c
-
- Examples
- --------
- For usage examples, see `ode`.
-"""
-
-def test_class_members_doc():
- doc = ClassDoc(None, class_doc_txt)
- non_blank_line_by_line_compare(str(doc),
- """
- Foo
-
- Parameters
- ----------
- f : callable ``f(t, y, *f_args)``
- Aaa.
- jac : callable ``jac(t, y, *jac_args)``
- Bbb.
-
- Examples
- --------
- For usage examples, see `ode`.
-
- Attributes
- ----------
- t : float
- Current time.
- y : ndarray
- Current variable values.
-
- Methods
- -------
- a :
-
- b :
-
- c :
-
- .. index::
-
- """)
-
-def test_class_members_doc_sphinx():
- doc = SphinxClassDoc(None, class_doc_txt)
- non_blank_line_by_line_compare(str(doc),
- """
- Foo
-
- :Parameters:
-
- **f** : callable ``f(t, y, *f_args)``
-
- Aaa.
-
- **jac** : callable ``jac(t, y, *jac_args)``
-
- Bbb.
-
- .. rubric:: Examples
-
- For usage examples, see `ode`.
-
- .. rubric:: Attributes
-
- === ==========
- t (float) Current time.
- y (ndarray) Current variable values.
- === ==========
-
- .. rubric:: Methods
-
- === ==========
- a
- b
- c
- === ==========
-
- """)
-
-if __name__ == "__main__":
- import nose
- nose.run()
-
diff --git a/doc/sphinxext/numpydoc/tests/test_linkcode.py b/doc/sphinxext/numpydoc/tests/test_linkcode.py
deleted file mode 100644
index 45de6b70d..000000000
--- a/doc/sphinxext/numpydoc/tests/test_linkcode.py
+++ /dev/null
@@ -1,3 +0,0 @@
-import numpydoc.linkcode
-
-# No tests at the moment...
diff --git a/doc/sphinxext/numpydoc/tests/test_phantom_import.py b/doc/sphinxext/numpydoc/tests/test_phantom_import.py
deleted file mode 100644
index b9ee76fc6..000000000
--- a/doc/sphinxext/numpydoc/tests/test_phantom_import.py
+++ /dev/null
@@ -1,3 +0,0 @@
-import numpydoc.phantom_import
-
-# No tests at the moment...
diff --git a/doc/sphinxext/numpydoc/tests/test_plot_directive.py b/doc/sphinxext/numpydoc/tests/test_plot_directive.py
deleted file mode 100644
index 3496d2c29..000000000
--- a/doc/sphinxext/numpydoc/tests/test_plot_directive.py
+++ /dev/null
@@ -1,3 +0,0 @@
-import numpydoc.plot_directive
-
-# No tests at the moment...
diff --git a/doc/sphinxext/numpydoc/tests/test_traitsdoc.py b/doc/sphinxext/numpydoc/tests/test_traitsdoc.py
deleted file mode 100644
index 99b1600fb..000000000
--- a/doc/sphinxext/numpydoc/tests/test_traitsdoc.py
+++ /dev/null
@@ -1,3 +0,0 @@
-import numpydoc.traitsdoc
-
-# No tests at the moment...
diff --git a/doc/sphinxext/numpydoc/traitsdoc.py b/doc/sphinxext/numpydoc/traitsdoc.py
deleted file mode 100644
index 784c4fc2f..000000000
--- a/doc/sphinxext/numpydoc/traitsdoc.py
+++ /dev/null
@@ -1,141 +0,0 @@
-"""
-=========
-traitsdoc
-=========
-
-Sphinx extension that handles docstrings in the Numpy standard format, [1]
-and support Traits [2].
-
-This extension can be used as a replacement for ``numpydoc`` when support
-for Traits is required.
-
-.. [1] http://projects.scipy.org/numpy/wiki/CodingStyleGuidelines#docstring-standard
-.. [2] http://code.enthought.com/projects/traits/
-
-"""
-from __future__ import division
-
-import inspect
-import os
-import pydoc
-
-from . import docscrape
-from . import docscrape_sphinx
-from .docscrape_sphinx import SphinxClassDoc, SphinxFunctionDoc, SphinxDocString
-
-from . import numpydoc
-
-from . import comment_eater
-
-class SphinxTraitsDoc(SphinxClassDoc):
- def __init__(self, cls, modulename='', func_doc=SphinxFunctionDoc):
- if not inspect.isclass(cls):
- raise ValueError("Initialise using a class. Got %r" % cls)
- self._cls = cls
-
- if modulename and not modulename.endswith('.'):
- modulename += '.'
- self._mod = modulename
- self._name = cls.__name__
- self._func_doc = func_doc
-
- docstring = pydoc.getdoc(cls)
- docstring = docstring.split('\n')
-
- # De-indent paragraph
- try:
- indent = min(len(s) - len(s.lstrip()) for s in docstring
- if s.strip())
- except ValueError:
- indent = 0
-
- for n,line in enumerate(docstring):
- docstring[n] = docstring[n][indent:]
-
- self._doc = docscrape.Reader(docstring)
- self._parsed_data = {
- 'Signature': '',
- 'Summary': '',
- 'Description': [],
- 'Extended Summary': [],
- 'Parameters': [],
- 'Returns': [],
- 'Raises': [],
- 'Warns': [],
- 'Other Parameters': [],
- 'Traits': [],
- 'Methods': [],
- 'See Also': [],
- 'Notes': [],
- 'References': '',
- 'Example': '',
- 'Examples': '',
- 'index': {}
- }
-
- self._parse()
-
- def _str_summary(self):
- return self['Summary'] + ['']
-
- def _str_extended_summary(self):
- return self['Description'] + self['Extended Summary'] + ['']
-
- def __str__(self, indent=0, func_role="func"):
- out = []
- out += self._str_signature()
- out += self._str_index() + ['']
- out += self._str_summary()
- out += self._str_extended_summary()
- for param_list in ('Parameters', 'Traits', 'Methods',
- 'Returns','Raises'):
- out += self._str_param_list(param_list)
- out += self._str_see_also("obj")
- out += self._str_section('Notes')
- out += self._str_references()
- out += self._str_section('Example')
- out += self._str_section('Examples')
- out = self._str_indent(out,indent)
- return '\n'.join(out)
-
-def looks_like_issubclass(obj, classname):
- """ Return True if the object has a class or superclass with the given class
- name.
-
- Ignores old-style classes.
- """
- t = obj
- if t.__name__ == classname:
- return True
- for klass in t.__mro__:
- if klass.__name__ == classname:
- return True
- return False
-
-def get_doc_object(obj, what=None, config=None):
- if what is None:
- if inspect.isclass(obj):
- what = 'class'
- elif inspect.ismodule(obj):
- what = 'module'
- elif isinstance(obj, collections.Callable):
- what = 'function'
- else:
- what = 'object'
- if what == 'class':
- doc = SphinxTraitsDoc(obj, '', func_doc=SphinxFunctionDoc, config=config)
- if looks_like_issubclass(obj, 'HasTraits'):
- for name, trait, comment in comment_eater.get_class_traits(obj):
- # Exclude private traits.
- if not name.startswith('_'):
- doc['Traits'].append((name, trait, comment.splitlines()))
- return doc
- elif what in ('function', 'method'):
- return SphinxFunctionDoc(obj, '', config=config)
- else:
- return SphinxDocString(pydoc.getdoc(obj), config=config)
-
-def setup(app):
- # init numpydoc
- numpydoc.setup(app, get_doc_object)
-
diff --git a/doc/sphinxext/setup.py b/doc/sphinxext/setup.py
deleted file mode 100644
index a6593aaa6..000000000
--- a/doc/sphinxext/setup.py
+++ /dev/null
@@ -1,31 +0,0 @@
-from __future__ import division
-
-import setuptools
-from distutils.core import setup
-
-import sys
-if sys.version_info[0] >= 3 and sys.version_info[1] < 3 or \
- sys.version_info[0] <= 2 and sys.version_info[1] < 6:
- raise RuntimeError("Python version 2.6, 2.7 or >= 3.3 required.")
-
-version = "0.4.dev"
-
-setup(
- name="numpydoc",
- packages=["numpydoc"],
- version=version,
- description="Sphinx extension to support docstrings in Numpy format",
- # classifiers from http://pypi.python.org/pypi?%3Aaction=list_classifiers
- classifiers=["Development Status :: 3 - Alpha",
- "Environment :: Plugins",
- "License :: OSI Approved :: BSD License",
- "Topic :: Documentation"],
- keywords="sphinx numpy",
- author="Pauli Virtanen and others",
- author_email="pav@iki.fi",
- url="http://github.com/numpy/numpy/tree/master/doc/sphinxext",
- license="BSD",
- requires=["sphinx (>= 1.0.1)"],
- package_data={'numpydoc': ['tests/test_*.py']},
- test_suite = 'nose.collector',
-)
diff --git a/doc/summarize.py b/doc/summarize.py
index d953bd642..833436cee 100755
--- a/doc/summarize.py
+++ b/doc/summarize.py
@@ -5,7 +5,7 @@ summarize.py
Show a summary about which Numpy functions are documented and which are not.
"""
-from __future__ import division
+from __future__ import division, absolute_import, print_function
import os, glob, re, sys, inspect, optparse
import collections
@@ -74,26 +74,26 @@ def main():
# report
in_sections = {}
- for name, locations in documented.iteritems():
+ for name, locations in documented.items():
for (filename, section, keyword, toctree) in locations:
in_sections.setdefault((filename, section, keyword), []).append(name)
- print "Documented"
- print "==========\n"
+ print("Documented")
+ print("==========\n")
last_filename = None
for (filename, section, keyword), names in sorted(in_sections.items()):
if filename != last_filename:
- print "--- %s\n" % filename
+ print("--- %s\n" % filename)
last_filename = filename
- print " ** ", section
- print format_in_columns(sorted(names), options.cols)
- print "\n"
+ print(" ** ", section)
+ print(format_in_columns(sorted(names), options.cols))
+ print("\n")
- print ""
- print "Undocumented"
- print "============\n"
- print format_in_columns(sorted(undocumented.keys()), options.cols)
+ print("")
+ print("Undocumented")
+ print("============\n")
+ print(format_in_columns(sorted(undocumented.keys()), options.cols))
def check_numpy():
documented = get_documented(glob.glob(SOURCE_DIR + '/*.rst'))
@@ -149,8 +149,8 @@ def format_in_columns(lst, max_columns):
Format a list containing strings to a string containing the items
in columns.
"""
- lst = map(str, lst)
- col_len = max(map(len, lst)) + 2
+ lst = [str(_m) for _m in lst]
+ col_len = max([len(_m) for _m in lst]) + 2
ncols = 80//col_len
if ncols > max_columns:
ncols = max_columns
diff --git a/doc/swig/test/setup.py b/doc/swig/test/setup.py
index 6395ffe46..c54b42ed4 100755
--- a/doc/swig/test/setup.py
+++ b/doc/swig/test/setup.py
@@ -1,5 +1,5 @@
#! /usr/bin/env python
-from __future__ import division
+from __future__ import division, print_function
# System imports
from distutils.core import *
diff --git a/doc/swig/test/testArray.py b/doc/swig/test/testArray.py
index 65e69e191..278a75f7a 100755
--- a/doc/swig/test/testArray.py
+++ b/doc/swig/test/testArray.py
@@ -1,5 +1,5 @@
#! /usr/bin/env python
-from __future__ import division
+from __future__ import division, absolute_import, print_function
# System imports
from distutils.util import get_platform
@@ -277,8 +277,8 @@ if __name__ == "__main__":
suite.addTest(unittest.makeSuite(Array2TestCase))
# Execute the test suite
- print "Testing Classes of Module Array"
- print "NumPy version", np.__version__
- print
+ print("Testing Classes of Module Array")
+ print("NumPy version", np.__version__)
+ print()
result = unittest.TextTestRunner(verbosity=2).run(suite)
sys.exit(len(result.errors) + len(result.failures))
diff --git a/doc/swig/test/testFarray.py b/doc/swig/test/testFarray.py
index 184fd2564..3905e26bd 100755
--- a/doc/swig/test/testFarray.py
+++ b/doc/swig/test/testFarray.py
@@ -1,5 +1,5 @@
#! /usr/bin/env python
-from __future__ import division
+from __future__ import division, absolute_import, print_function
# System imports
from distutils.util import get_platform
@@ -152,8 +152,8 @@ if __name__ == "__main__":
suite.addTest(unittest.makeSuite(FarrayTestCase))
# Execute the test suite
- print "Testing Classes of Module Farray"
- print "NumPy version", np.__version__
- print
+ print("Testing Classes of Module Farray")
+ print("NumPy version", np.__version__)
+ print()
result = unittest.TextTestRunner(verbosity=2).run(suite)
sys.exit(len(result.errors) + len(result.failures))
diff --git a/doc/swig/test/testFortran.py b/doc/swig/test/testFortran.py
index 2765b0653..2175ad1bf 100644
--- a/doc/swig/test/testFortran.py
+++ b/doc/swig/test/testFortran.py
@@ -1,5 +1,5 @@
#! /usr/bin/env python
-from __future__ import division
+from __future__ import division, absolute_import, print_function
# System imports
from distutils.util import get_platform
@@ -37,7 +37,7 @@ class FortranTestCase(unittest.TestCase):
# Test (type* IN_FARRAY2, int DIM1, int DIM2) typemap
def testSecondElementFortran(self):
"Test Fortran matrix initialized from reshaped NumPy fortranarray"
- print >>sys.stderr, self.typeStr, "... ",
+ print(self.typeStr, "... ", end=' ', file=sys.stderr)
second = Fortran.__dict__[self.typeStr + "SecondElement"]
matrix = np.asfortranarray(np.arange(9).reshape(3, 3),
self.typeCode)
@@ -45,7 +45,7 @@ class FortranTestCase(unittest.TestCase):
def testSecondElementObject(self):
"Test Fortran matrix initialized from nested list fortranarray"
- print >>sys.stderr, self.typeStr, "... ",
+ print(self.typeStr, "... ", end=' ', file=sys.stderr)
second = Fortran.__dict__[self.typeStr + "SecondElement"]
matrix = np.asfortranarray([[0,1,2],[3,4,5],[6,7,8]], self.typeCode)
self.assertEquals(second(matrix), 3)
@@ -166,8 +166,8 @@ if __name__ == "__main__":
suite.addTest(unittest.makeSuite( doubleTestCase))
# Execute the test suite
- print "Testing 2D Functions of Module Matrix"
- print "NumPy version", np.__version__
- print
+ print("Testing 2D Functions of Module Matrix")
+ print("NumPy version", np.__version__)
+ print()
result = unittest.TextTestRunner(verbosity=2).run(suite)
sys.exit(len(result.errors) + len(result.failures))
diff --git a/doc/swig/test/testMatrix.py b/doc/swig/test/testMatrix.py
index 68259de3e..d1721941e 100755
--- a/doc/swig/test/testMatrix.py
+++ b/doc/swig/test/testMatrix.py
@@ -1,5 +1,5 @@
#! /usr/bin/env python
-from __future__ import division
+from __future__ import division, absolute_import, print_function
# System imports
from distutils.util import get_platform
@@ -27,7 +27,7 @@ class MatrixTestCase(unittest.TestCase):
# Test (type IN_ARRAY2[ANY][ANY]) typemap
def testDet(self):
"Test det function"
- print >>sys.stderr, self.typeStr, "... ",
+ print(self.typeStr, "... ", end=' ', file=sys.stderr)
det = Matrix.__dict__[self.typeStr + "Det"]
matrix = [[8,7],[6,9]]
self.assertEquals(det(matrix), 30)
@@ -35,7 +35,7 @@ class MatrixTestCase(unittest.TestCase):
# Test (type IN_ARRAY2[ANY][ANY]) typemap
def testDetBadList(self):
"Test det function with bad list"
- print >>sys.stderr, self.typeStr, "... ",
+ print(self.typeStr, "... ", end=' ', file=sys.stderr)
det = Matrix.__dict__[self.typeStr + "Det"]
matrix = [[8,7], ["e", "pi"]]
self.assertRaises(BadListError, det, matrix)
@@ -43,7 +43,7 @@ class MatrixTestCase(unittest.TestCase):
# Test (type IN_ARRAY2[ANY][ANY]) typemap
def testDetWrongDim(self):
"Test det function with wrong dimensions"
- print >>sys.stderr, self.typeStr, "... ",
+ print(self.typeStr, "... ", end=' ', file=sys.stderr)
det = Matrix.__dict__[self.typeStr + "Det"]
matrix = [8,7]
self.assertRaises(TypeError, det, matrix)
@@ -51,7 +51,7 @@ class MatrixTestCase(unittest.TestCase):
# Test (type IN_ARRAY2[ANY][ANY]) typemap
def testDetWrongSize(self):
"Test det function with wrong size"
- print >>sys.stderr, self.typeStr, "... ",
+ print(self.typeStr, "... ", end=' ', file=sys.stderr)
det = Matrix.__dict__[self.typeStr + "Det"]
matrix = [[8,7,6], [5,4,3], [2,1,0]]
self.assertRaises(TypeError, det, matrix)
@@ -59,14 +59,14 @@ class MatrixTestCase(unittest.TestCase):
# Test (type IN_ARRAY2[ANY][ANY]) typemap
def testDetNonContainer(self):
"Test det function with non-container"
- print >>sys.stderr, self.typeStr, "... ",
+ print(self.typeStr, "... ", end=' ', file=sys.stderr)
det = Matrix.__dict__[self.typeStr + "Det"]
self.assertRaises(TypeError, det, None)
# Test (type* IN_ARRAY2, int DIM1, int DIM2) typemap
def testMax(self):
"Test max function"
- print >>sys.stderr, self.typeStr, "... ",
+ print(self.typeStr, "... ", end=' ', file=sys.stderr)
max = Matrix.__dict__[self.typeStr + "Max"]
matrix = [[6,5,4],[3,2,1]]
self.assertEquals(max(matrix), 6)
@@ -74,7 +74,7 @@ class MatrixTestCase(unittest.TestCase):
# Test (type* IN_ARRAY2, int DIM1, int DIM2) typemap
def testMaxBadList(self):
"Test max function with bad list"
- print >>sys.stderr, self.typeStr, "... ",
+ print(self.typeStr, "... ", end=' ', file=sys.stderr)
max = Matrix.__dict__[self.typeStr + "Max"]
matrix = [[6,"five",4], ["three", 2, "one"]]
self.assertRaises(BadListError, max, matrix)
@@ -82,21 +82,21 @@ class MatrixTestCase(unittest.TestCase):
# Test (type* IN_ARRAY2, int DIM1, int DIM2) typemap
def testMaxNonContainer(self):
"Test max function with non-container"
- print >>sys.stderr, self.typeStr, "... ",
+ print(self.typeStr, "... ", end=' ', file=sys.stderr)
max = Matrix.__dict__[self.typeStr + "Max"]
self.assertRaises(TypeError, max, None)
# Test (type* IN_ARRAY2, int DIM1, int DIM2) typemap
def testMaxWrongDim(self):
"Test max function with wrong dimensions"
- print >>sys.stderr, self.typeStr, "... ",
+ print(self.typeStr, "... ", end=' ', file=sys.stderr)
max = Matrix.__dict__[self.typeStr + "Max"]
self.assertRaises(TypeError, max, [0, 1, 2, 3])
# Test (int DIM1, int DIM2, type* IN_ARRAY2) typemap
def testMin(self):
"Test min function"
- print >>sys.stderr, self.typeStr, "... ",
+ print(self.typeStr, "... ", end=' ', file=sys.stderr)
min = Matrix.__dict__[self.typeStr + "Min"]
matrix = [[9,8],[7,6],[5,4]]
self.assertEquals(min(matrix), 4)
@@ -104,7 +104,7 @@ class MatrixTestCase(unittest.TestCase):
# Test (int DIM1, int DIM2, type* IN_ARRAY2) typemap
def testMinBadList(self):
"Test min function with bad list"
- print >>sys.stderr, self.typeStr, "... ",
+ print(self.typeStr, "... ", end=' ', file=sys.stderr)
min = Matrix.__dict__[self.typeStr + "Min"]
matrix = [["nine","eight"], ["seven","six"]]
self.assertRaises(BadListError, min, matrix)
@@ -112,21 +112,21 @@ class MatrixTestCase(unittest.TestCase):
# Test (int DIM1, int DIM2, type* IN_ARRAY2) typemap
def testMinWrongDim(self):
"Test min function with wrong dimensions"
- print >>sys.stderr, self.typeStr, "... ",
+ print(self.typeStr, "... ", end=' ', file=sys.stderr)
min = Matrix.__dict__[self.typeStr + "Min"]
self.assertRaises(TypeError, min, [1,3,5,7,9])
# Test (int DIM1, int DIM2, type* IN_ARRAY2) typemap
def testMinNonContainer(self):
"Test min function with non-container"
- print >>sys.stderr, self.typeStr, "... ",
+ print(self.typeStr, "... ", end=' ', file=sys.stderr)
min = Matrix.__dict__[self.typeStr + "Min"]
self.assertRaises(TypeError, min, False)
# Test (type INPLACE_ARRAY2[ANY][ANY]) typemap
def testScale(self):
"Test scale function"
- print >>sys.stderr, self.typeStr, "... ",
+ print(self.typeStr, "... ", end=' ', file=sys.stderr)
scale = Matrix.__dict__[self.typeStr + "Scale"]
matrix = np.array([[1,2,3],[2,1,2],[3,2,1]],self.typeCode)
scale(matrix,4)
@@ -135,7 +135,7 @@ class MatrixTestCase(unittest.TestCase):
# Test (type INPLACE_ARRAY2[ANY][ANY]) typemap
def testScaleWrongDim(self):
"Test scale function with wrong dimensions"
- print >>sys.stderr, self.typeStr, "... ",
+ print(self.typeStr, "... ", end=' ', file=sys.stderr)
scale = Matrix.__dict__[self.typeStr + "Scale"]
matrix = np.array([1,2,2,1],self.typeCode)
self.assertRaises(TypeError, scale, matrix)
@@ -143,7 +143,7 @@ class MatrixTestCase(unittest.TestCase):
# Test (type INPLACE_ARRAY2[ANY][ANY]) typemap
def testScaleWrongSize(self):
"Test scale function with wrong size"
- print >>sys.stderr, self.typeStr, "... ",
+ print(self.typeStr, "... ", end=' ', file=sys.stderr)
scale = Matrix.__dict__[self.typeStr + "Scale"]
matrix = np.array([[1,2],[2,1]],self.typeCode)
self.assertRaises(TypeError, scale, matrix)
@@ -151,7 +151,7 @@ class MatrixTestCase(unittest.TestCase):
# Test (type INPLACE_ARRAY2[ANY][ANY]) typemap
def testScaleWrongType(self):
"Test scale function with wrong type"
- print >>sys.stderr, self.typeStr, "... ",
+ print(self.typeStr, "... ", end=' ', file=sys.stderr)
scale = Matrix.__dict__[self.typeStr + "Scale"]
matrix = np.array([[1,2,3],[2,1,2],[3,2,1]],'c')
self.assertRaises(TypeError, scale, matrix)
@@ -159,7 +159,7 @@ class MatrixTestCase(unittest.TestCase):
# Test (type INPLACE_ARRAY2[ANY][ANY]) typemap
def testScaleNonArray(self):
"Test scale function with non-array"
- print >>sys.stderr, self.typeStr, "... ",
+ print(self.typeStr, "... ", end=' ', file=sys.stderr)
scale = Matrix.__dict__[self.typeStr + "Scale"]
matrix = [[1,2,3],[2,1,2],[3,2,1]]
self.assertRaises(TypeError, scale, matrix)
@@ -167,7 +167,7 @@ class MatrixTestCase(unittest.TestCase):
# Test (type* INPLACE_ARRAY2, int DIM1, int DIM2) typemap
def testFloor(self):
"Test floor function"
- print >>sys.stderr, self.typeStr, "... ",
+ print(self.typeStr, "... ", end=' ', file=sys.stderr)
floor = Matrix.__dict__[self.typeStr + "Floor"]
matrix = np.array([[6,7],[8,9]],self.typeCode)
floor(matrix,7)
@@ -176,7 +176,7 @@ class MatrixTestCase(unittest.TestCase):
# Test (type* INPLACE_ARRAY2, int DIM1, int DIM2) typemap
def testFloorWrongDim(self):
"Test floor function with wrong dimensions"
- print >>sys.stderr, self.typeStr, "... ",
+ print(self.typeStr, "... ", end=' ', file=sys.stderr)
floor = Matrix.__dict__[self.typeStr + "Floor"]
matrix = np.array([6,7,8,9],self.typeCode)
self.assertRaises(TypeError, floor, matrix)
@@ -184,7 +184,7 @@ class MatrixTestCase(unittest.TestCase):
# Test (type* INPLACE_ARRAY2, int DIM1, int DIM2) typemap
def testFloorWrongType(self):
"Test floor function with wrong type"
- print >>sys.stderr, self.typeStr, "... ",
+ print(self.typeStr, "... ", end=' ', file=sys.stderr)
floor = Matrix.__dict__[self.typeStr + "Floor"]
matrix = np.array([[6,7], [8,9]],'c')
self.assertRaises(TypeError, floor, matrix)
@@ -192,7 +192,7 @@ class MatrixTestCase(unittest.TestCase):
# Test (type* INPLACE_ARRAY2, int DIM1, int DIM2) typemap
def testFloorNonArray(self):
"Test floor function with non-array"
- print >>sys.stderr, self.typeStr, "... ",
+ print(self.typeStr, "... ", end=' ', file=sys.stderr)
floor = Matrix.__dict__[self.typeStr + "Floor"]
matrix = [[6,7], [8,9]]
self.assertRaises(TypeError, floor, matrix)
@@ -200,7 +200,7 @@ class MatrixTestCase(unittest.TestCase):
# Test (int DIM1, int DIM2, type* INPLACE_ARRAY2) typemap
def testCeil(self):
"Test ceil function"
- print >>sys.stderr, self.typeStr, "... ",
+ print(self.typeStr, "... ", end=' ', file=sys.stderr)
ceil = Matrix.__dict__[self.typeStr + "Ceil"]
matrix = np.array([[1,2],[3,4]],self.typeCode)
ceil(matrix,3)
@@ -209,7 +209,7 @@ class MatrixTestCase(unittest.TestCase):
# Test (int DIM1, int DIM2, type* INPLACE_ARRAY2) typemap
def testCeilWrongDim(self):
"Test ceil function with wrong dimensions"
- print >>sys.stderr, self.typeStr, "... ",
+ print(self.typeStr, "... ", end=' ', file=sys.stderr)
ceil = Matrix.__dict__[self.typeStr + "Ceil"]
matrix = np.array([1,2,3,4],self.typeCode)
self.assertRaises(TypeError, ceil, matrix)
@@ -217,7 +217,7 @@ class MatrixTestCase(unittest.TestCase):
# Test (int DIM1, int DIM2, type* INPLACE_ARRAY2) typemap
def testCeilWrongType(self):
"Test ceil function with wrong dimensions"
- print >>sys.stderr, self.typeStr, "... ",
+ print(self.typeStr, "... ", end=' ', file=sys.stderr)
ceil = Matrix.__dict__[self.typeStr + "Ceil"]
matrix = np.array([[1,2], [3,4]],'c')
self.assertRaises(TypeError, ceil, matrix)
@@ -225,7 +225,7 @@ class MatrixTestCase(unittest.TestCase):
# Test (int DIM1, int DIM2, type* INPLACE_ARRAY2) typemap
def testCeilNonArray(self):
"Test ceil function with non-array"
- print >>sys.stderr, self.typeStr, "... ",
+ print(self.typeStr, "... ", end=' ', file=sys.stderr)
ceil = Matrix.__dict__[self.typeStr + "Ceil"]
matrix = [[1,2], [3,4]]
self.assertRaises(TypeError, ceil, matrix)
@@ -233,7 +233,7 @@ class MatrixTestCase(unittest.TestCase):
# Test (type ARGOUT_ARRAY2[ANY][ANY]) typemap
def testLUSplit(self):
"Test luSplit function"
- print >>sys.stderr, self.typeStr, "... ",
+ print(self.typeStr, "... ", end=' ', file=sys.stderr)
luSplit = Matrix.__dict__[self.typeStr + "LUSplit"]
lower, upper = luSplit([[1,2,3],[4,5,6],[7,8,9]])
self.assertEquals((lower == [[1,0,0],[4,5,0],[7,8,9]]).all(), True)
@@ -355,8 +355,8 @@ if __name__ == "__main__":
suite.addTest(unittest.makeSuite( doubleTestCase))
# Execute the test suite
- print "Testing 2D Functions of Module Matrix"
- print "NumPy version", np.__version__
- print
+ print("Testing 2D Functions of Module Matrix")
+ print("NumPy version", np.__version__)
+ print()
result = unittest.TextTestRunner(verbosity=2).run(suite)
sys.exit(len(result.errors) + len(result.failures))
diff --git a/doc/swig/test/testTensor.py b/doc/swig/test/testTensor.py
index 41b037811..b6dd2e98a 100755
--- a/doc/swig/test/testTensor.py
+++ b/doc/swig/test/testTensor.py
@@ -1,5 +1,5 @@
#! /usr/bin/env python
-from __future__ import division
+from __future__ import division, absolute_import, print_function
# System imports
from distutils.util import get_platform
@@ -29,7 +29,7 @@ class TensorTestCase(unittest.TestCase):
# Test (type IN_ARRAY3[ANY][ANY][ANY]) typemap
def testNorm(self):
"Test norm function"
- print >>sys.stderr, self.typeStr, "... ",
+ print(self.typeStr, "... ", end=' ', file=sys.stderr)
norm = Tensor.__dict__[self.typeStr + "Norm"]
tensor = [[[0,1], [2,3]],
[[3,2], [1,0]]]
@@ -41,7 +41,7 @@ class TensorTestCase(unittest.TestCase):
# Test (type IN_ARRAY3[ANY][ANY][ANY]) typemap
def testNormBadList(self):
"Test norm function with bad list"
- print >>sys.stderr, self.typeStr, "... ",
+ print(self.typeStr, "... ", end=' ', file=sys.stderr)
norm = Tensor.__dict__[self.typeStr + "Norm"]
tensor = [[[0,"one"],[2,3]],
[[3,"two"],[1,0]]]
@@ -50,7 +50,7 @@ class TensorTestCase(unittest.TestCase):
# Test (type IN_ARRAY3[ANY][ANY][ANY]) typemap
def testNormWrongDim(self):
"Test norm function with wrong dimensions"
- print >>sys.stderr, self.typeStr, "... ",
+ print(self.typeStr, "... ", end=' ', file=sys.stderr)
norm = Tensor.__dict__[self.typeStr + "Norm"]
tensor = [[0,1,2,3],
[3,2,1,0]]
@@ -59,7 +59,7 @@ class TensorTestCase(unittest.TestCase):
# Test (type IN_ARRAY3[ANY][ANY][ANY]) typemap
def testNormWrongSize(self):
"Test norm function with wrong size"
- print >>sys.stderr, self.typeStr, "... ",
+ print(self.typeStr, "... ", end=' ', file=sys.stderr)
norm = Tensor.__dict__[self.typeStr + "Norm"]
tensor = [[[0,1,0], [2,3,2]],
[[3,2,3], [1,0,1]]]
@@ -68,14 +68,14 @@ class TensorTestCase(unittest.TestCase):
# Test (type IN_ARRAY3[ANY][ANY][ANY]) typemap
def testNormNonContainer(self):
"Test norm function with non-container"
- print >>sys.stderr, self.typeStr, "... ",
+ print(self.typeStr, "... ", end=' ', file=sys.stderr)
norm = Tensor.__dict__[self.typeStr + "Norm"]
self.assertRaises(TypeError, norm, None)
# Test (type* IN_ARRAY3, int DIM1, int DIM2, int DIM3) typemap
def testMax(self):
"Test max function"
- print >>sys.stderr, self.typeStr, "... ",
+ print(self.typeStr, "... ", end=' ', file=sys.stderr)
max = Tensor.__dict__[self.typeStr + "Max"]
tensor = [[[1,2], [3,4]],
[[5,6], [7,8]]]
@@ -84,7 +84,7 @@ class TensorTestCase(unittest.TestCase):
# Test (type* IN_ARRAY3, int DIM1, int DIM2, int DIM3) typemap
def testMaxBadList(self):
"Test max function with bad list"
- print >>sys.stderr, self.typeStr, "... ",
+ print(self.typeStr, "... ", end=' ', file=sys.stderr)
max = Tensor.__dict__[self.typeStr + "Max"]
tensor = [[[1,"two"], [3,4]],
[[5,"six"], [7,8]]]
@@ -93,21 +93,21 @@ class TensorTestCase(unittest.TestCase):
# Test (type* IN_ARRAY3, int DIM1, int DIM2, int DIM3) typemap
def testMaxNonContainer(self):
"Test max function with non-container"
- print >>sys.stderr, self.typeStr, "... ",
+ print(self.typeStr, "... ", end=' ', file=sys.stderr)
max = Tensor.__dict__[self.typeStr + "Max"]
self.assertRaises(TypeError, max, None)
# Test (type* IN_ARRAY3, int DIM1, int DIM2, int DIM3) typemap
def testMaxWrongDim(self):
"Test max function with wrong dimensions"
- print >>sys.stderr, self.typeStr, "... ",
+ print(self.typeStr, "... ", end=' ', file=sys.stderr)
max = Tensor.__dict__[self.typeStr + "Max"]
self.assertRaises(TypeError, max, [0, -1, 2, -3])
# Test (int DIM1, int DIM2, int DIM3, type* IN_ARRAY3) typemap
def testMin(self):
"Test min function"
- print >>sys.stderr, self.typeStr, "... ",
+ print(self.typeStr, "... ", end=' ', file=sys.stderr)
min = Tensor.__dict__[self.typeStr + "Min"]
tensor = [[[9,8], [7,6]],
[[5,4], [3,2]]]
@@ -116,7 +116,7 @@ class TensorTestCase(unittest.TestCase):
# Test (int DIM1, int DIM2, int DIM3, type* IN_ARRAY3) typemap
def testMinBadList(self):
"Test min function with bad list"
- print >>sys.stderr, self.typeStr, "... ",
+ print(self.typeStr, "... ", end=' ', file=sys.stderr)
min = Tensor.__dict__[self.typeStr + "Min"]
tensor = [[["nine",8], [7,6]],
[["five",4], [3,2]]]
@@ -125,21 +125,21 @@ class TensorTestCase(unittest.TestCase):
# Test (int DIM1, int DIM2, int DIM3, type* IN_ARRAY3) typemap
def testMinNonContainer(self):
"Test min function with non-container"
- print >>sys.stderr, self.typeStr, "... ",
+ print(self.typeStr, "... ", end=' ', file=sys.stderr)
min = Tensor.__dict__[self.typeStr + "Min"]
self.assertRaises(TypeError, min, True)
# Test (int DIM1, int DIM2, int DIM3, type* IN_ARRAY3) typemap
def testMinWrongDim(self):
"Test min function with wrong dimensions"
- print >>sys.stderr, self.typeStr, "... ",
+ print(self.typeStr, "... ", end=' ', file=sys.stderr)
min = Tensor.__dict__[self.typeStr + "Min"]
self.assertRaises(TypeError, min, [[1,3],[5,7]])
# Test (type INPLACE_ARRAY3[ANY][ANY][ANY]) typemap
def testScale(self):
"Test scale function"
- print >>sys.stderr, self.typeStr, "... ",
+ print(self.typeStr, "... ", end=' ', file=sys.stderr)
scale = Tensor.__dict__[self.typeStr + "Scale"]
tensor = np.array([[[1,0,1], [0,1,0], [1,0,1]],
[[0,1,0], [1,0,1], [0,1,0]],
@@ -152,7 +152,7 @@ class TensorTestCase(unittest.TestCase):
# Test (type INPLACE_ARRAY3[ANY][ANY][ANY]) typemap
def testScaleWrongType(self):
"Test scale function with wrong type"
- print >>sys.stderr, self.typeStr, "... ",
+ print(self.typeStr, "... ", end=' ', file=sys.stderr)
scale = Tensor.__dict__[self.typeStr + "Scale"]
tensor = np.array([[[1,0,1], [0,1,0], [1,0,1]],
[[0,1,0], [1,0,1], [0,1,0]],
@@ -162,7 +162,7 @@ class TensorTestCase(unittest.TestCase):
# Test (type INPLACE_ARRAY3[ANY][ANY][ANY]) typemap
def testScaleWrongDim(self):
"Test scale function with wrong dimensions"
- print >>sys.stderr, self.typeStr, "... ",
+ print(self.typeStr, "... ", end=' ', file=sys.stderr)
scale = Tensor.__dict__[self.typeStr + "Scale"]
tensor = np.array([[1,0,1], [0,1,0], [1,0,1],
[0,1,0], [1,0,1], [0,1,0]],self.typeCode)
@@ -171,7 +171,7 @@ class TensorTestCase(unittest.TestCase):
# Test (type INPLACE_ARRAY3[ANY][ANY][ANY]) typemap
def testScaleWrongSize(self):
"Test scale function with wrong size"
- print >>sys.stderr, self.typeStr, "... ",
+ print(self.typeStr, "... ", end=' ', file=sys.stderr)
scale = Tensor.__dict__[self.typeStr + "Scale"]
tensor = np.array([[[1,0], [0,1], [1,0]],
[[0,1], [1,0], [0,1]],
@@ -181,14 +181,14 @@ class TensorTestCase(unittest.TestCase):
# Test (type INPLACE_ARRAY3[ANY][ANY][ANY]) typemap
def testScaleNonArray(self):
"Test scale function with non-array"
- print >>sys.stderr, self.typeStr, "... ",
+ print(self.typeStr, "... ", end=' ', file=sys.stderr)
scale = Tensor.__dict__[self.typeStr + "Scale"]
self.assertRaises(TypeError, scale, True)
# Test (type* INPLACE_ARRAY3, int DIM1, int DIM2, int DIM3) typemap
def testFloor(self):
"Test floor function"
- print >>sys.stderr, self.typeStr, "... ",
+ print(self.typeStr, "... ", end=' ', file=sys.stderr)
floor = Tensor.__dict__[self.typeStr + "Floor"]
tensor = np.array([[[1,2], [3,4]],
[[5,6], [7,8]]],self.typeCode)
@@ -199,7 +199,7 @@ class TensorTestCase(unittest.TestCase):
# Test (type* INPLACE_ARRAY3, int DIM1, int DIM2, int DIM3) typemap
def testFloorWrongType(self):
"Test floor function with wrong type"
- print >>sys.stderr, self.typeStr, "... ",
+ print(self.typeStr, "... ", end=' ', file=sys.stderr)
floor = Tensor.__dict__[self.typeStr + "Floor"]
tensor = np.array([[[1,2], [3,4]],
[[5,6], [7,8]]],'c')
@@ -208,7 +208,7 @@ class TensorTestCase(unittest.TestCase):
# Test (type* INPLACE_ARRAY3, int DIM1, int DIM2, int DIM3) typemap
def testFloorWrongDim(self):
"Test floor function with wrong type"
- print >>sys.stderr, self.typeStr, "... ",
+ print(self.typeStr, "... ", end=' ', file=sys.stderr)
floor = Tensor.__dict__[self.typeStr + "Floor"]
tensor = np.array([[1,2], [3,4], [5,6], [7,8]],self.typeCode)
self.assertRaises(TypeError, floor, tensor)
@@ -216,14 +216,14 @@ class TensorTestCase(unittest.TestCase):
# Test (type* INPLACE_ARRAY3, int DIM1, int DIM2, int DIM3) typemap
def testFloorNonArray(self):
"Test floor function with non-array"
- print >>sys.stderr, self.typeStr, "... ",
+ print(self.typeStr, "... ", end=' ', file=sys.stderr)
floor = Tensor.__dict__[self.typeStr + "Floor"]
self.assertRaises(TypeError, floor, object)
# Test (int DIM1, int DIM2, int DIM3, type* INPLACE_ARRAY3) typemap
def testCeil(self):
"Test ceil function"
- print >>sys.stderr, self.typeStr, "... ",
+ print(self.typeStr, "... ", end=' ', file=sys.stderr)
ceil = Tensor.__dict__[self.typeStr + "Ceil"]
tensor = np.array([[[9,8], [7,6]],
[[5,4], [3,2]]],self.typeCode)
@@ -234,7 +234,7 @@ class TensorTestCase(unittest.TestCase):
# Test (int DIM1, int DIM2, int DIM3, type* INPLACE_ARRAY3) typemap
def testCeilWrongType(self):
"Test ceil function with wrong type"
- print >>sys.stderr, self.typeStr, "... ",
+ print(self.typeStr, "... ", end=' ', file=sys.stderr)
ceil = Tensor.__dict__[self.typeStr + "Ceil"]
tensor = np.array([[[9,8], [7,6]],
[[5,4], [3,2]]],'c')
@@ -243,7 +243,7 @@ class TensorTestCase(unittest.TestCase):
# Test (int DIM1, int DIM2, int DIM3, type* INPLACE_ARRAY3) typemap
def testCeilWrongDim(self):
"Test ceil function with wrong dimensions"
- print >>sys.stderr, self.typeStr, "... ",
+ print(self.typeStr, "... ", end=' ', file=sys.stderr)
ceil = Tensor.__dict__[self.typeStr + "Ceil"]
tensor = np.array([[9,8], [7,6], [5,4], [3,2]], self.typeCode)
self.assertRaises(TypeError, ceil, tensor)
@@ -251,7 +251,7 @@ class TensorTestCase(unittest.TestCase):
# Test (int DIM1, int DIM2, int DIM3, type* INPLACE_ARRAY3) typemap
def testCeilNonArray(self):
"Test ceil function with non-array"
- print >>sys.stderr, self.typeStr, "... ",
+ print(self.typeStr, "... ", end=' ', file=sys.stderr)
ceil = Tensor.__dict__[self.typeStr + "Ceil"]
tensor = [[[9,8], [7,6]],
[[5,4], [3,2]]]
@@ -260,7 +260,7 @@ class TensorTestCase(unittest.TestCase):
# Test (type ARGOUT_ARRAY3[ANY][ANY][ANY]) typemap
def testLUSplit(self):
"Test luSplit function"
- print >>sys.stderr, self.typeStr, "... ",
+ print(self.typeStr, "... ", end=' ', file=sys.stderr)
luSplit = Tensor.__dict__[self.typeStr + "LUSplit"]
lower, upper = luSplit([[[1,1], [1,1]],
[[1,1], [1,1]]])
@@ -395,8 +395,8 @@ if __name__ == "__main__":
suite.addTest(unittest.makeSuite( doubleTestCase))
# Execute the test suite
- print "Testing 3D Functions of Module Tensor"
- print "NumPy version", np.__version__
- print
+ print("Testing 3D Functions of Module Tensor")
+ print("NumPy version", np.__version__)
+ print()
result = unittest.TextTestRunner(verbosity=2).run(suite)
sys.exit(len(result.errors) + len(result.failures))
diff --git a/doc/swig/test/testVector.py b/doc/swig/test/testVector.py
index 2ad00b856..d644e464c 100755
--- a/doc/swig/test/testVector.py
+++ b/doc/swig/test/testVector.py
@@ -1,5 +1,5 @@
#! /usr/bin/env python
-from __future__ import division
+from __future__ import division, absolute_import, print_function
# System imports
from distutils.util import get_platform
@@ -27,98 +27,98 @@ class VectorTestCase(unittest.TestCase):
# Test the (type IN_ARRAY1[ANY]) typemap
def testLength(self):
"Test length function"
- print >>sys.stderr, self.typeStr, "... ",
+ print(self.typeStr, "... ", end=' ', file=sys.stderr)
length = Vector.__dict__[self.typeStr + "Length"]
self.assertEquals(length([5, 12, 0]), 13)
# Test the (type IN_ARRAY1[ANY]) typemap
def testLengthBadList(self):
"Test length function with bad list"
- print >>sys.stderr, self.typeStr, "... ",
+ print(self.typeStr, "... ", end=' ', file=sys.stderr)
length = Vector.__dict__[self.typeStr + "Length"]
self.assertRaises(BadListError, length, [5, "twelve", 0])
# Test the (type IN_ARRAY1[ANY]) typemap
def testLengthWrongSize(self):
"Test length function with wrong size"
- print >>sys.stderr, self.typeStr, "... ",
+ print(self.typeStr, "... ", end=' ', file=sys.stderr)
length = Vector.__dict__[self.typeStr + "Length"]
self.assertRaises(TypeError, length, [5, 12])
# Test the (type IN_ARRAY1[ANY]) typemap
def testLengthWrongDim(self):
"Test length function with wrong dimensions"
- print >>sys.stderr, self.typeStr, "... ",
+ print(self.typeStr, "... ", end=' ', file=sys.stderr)
length = Vector.__dict__[self.typeStr + "Length"]
self.assertRaises(TypeError, length, [[1,2], [3,4]])
# Test the (type IN_ARRAY1[ANY]) typemap
def testLengthNonContainer(self):
"Test length function with non-container"
- print >>sys.stderr, self.typeStr, "... ",
+ print(self.typeStr, "... ", end=' ', file=sys.stderr)
length = Vector.__dict__[self.typeStr + "Length"]
self.assertRaises(TypeError, length, None)
# Test the (type* IN_ARRAY1, int DIM1) typemap
def testProd(self):
"Test prod function"
- print >>sys.stderr, self.typeStr, "... ",
+ print(self.typeStr, "... ", end=' ', file=sys.stderr)
prod = Vector.__dict__[self.typeStr + "Prod"]
self.assertEquals(prod([1,2,3,4]), 24)
# Test the (type* IN_ARRAY1, int DIM1) typemap
def testProdBadList(self):
"Test prod function with bad list"
- print >>sys.stderr, self.typeStr, "... ",
+ print(self.typeStr, "... ", end=' ', file=sys.stderr)
prod = Vector.__dict__[self.typeStr + "Prod"]
self.assertRaises(BadListError, prod, [[1,"two"], ["e","pi"]])
# Test the (type* IN_ARRAY1, int DIM1) typemap
def testProdWrongDim(self):
"Test prod function with wrong dimensions"
- print >>sys.stderr, self.typeStr, "... ",
+ print(self.typeStr, "... ", end=' ', file=sys.stderr)
prod = Vector.__dict__[self.typeStr + "Prod"]
self.assertRaises(TypeError, prod, [[1,2], [8,9]])
# Test the (type* IN_ARRAY1, int DIM1) typemap
def testProdNonContainer(self):
"Test prod function with non-container"
- print >>sys.stderr, self.typeStr, "... ",
+ print(self.typeStr, "... ", end=' ', file=sys.stderr)
prod = Vector.__dict__[self.typeStr + "Prod"]
self.assertRaises(TypeError, prod, None)
# Test the (int DIM1, type* IN_ARRAY1) typemap
def testSum(self):
"Test sum function"
- print >>sys.stderr, self.typeStr, "... ",
+ print(self.typeStr, "... ", end=' ', file=sys.stderr)
sum = Vector.__dict__[self.typeStr + "Sum"]
self.assertEquals(sum([5,6,7,8]), 26)
# Test the (int DIM1, type* IN_ARRAY1) typemap
def testSumBadList(self):
"Test sum function with bad list"
- print >>sys.stderr, self.typeStr, "... ",
+ print(self.typeStr, "... ", end=' ', file=sys.stderr)
sum = Vector.__dict__[self.typeStr + "Sum"]
self.assertRaises(BadListError, sum, [3,4, 5, "pi"])
# Test the (int DIM1, type* IN_ARRAY1) typemap
def testSumWrongDim(self):
"Test sum function with wrong dimensions"
- print >>sys.stderr, self.typeStr, "... ",
+ print(self.typeStr, "... ", end=' ', file=sys.stderr)
sum = Vector.__dict__[self.typeStr + "Sum"]
self.assertRaises(TypeError, sum, [[3,4], [5,6]])
# Test the (int DIM1, type* IN_ARRAY1) typemap
def testSumNonContainer(self):
"Test sum function with non-container"
- print >>sys.stderr, self.typeStr, "... ",
+ print(self.typeStr, "... ", end=' ', file=sys.stderr)
sum = Vector.__dict__[self.typeStr + "Sum"]
self.assertRaises(TypeError, sum, True)
# Test the (type INPLACE_ARRAY1[ANY]) typemap
def testReverse(self):
"Test reverse function"
- print >>sys.stderr, self.typeStr, "... ",
+ print(self.typeStr, "... ", end=' ', file=sys.stderr)
reverse = Vector.__dict__[self.typeStr + "Reverse"]
vector = np.array([1,2,4],self.typeCode)
reverse(vector)
@@ -127,7 +127,7 @@ class VectorTestCase(unittest.TestCase):
# Test the (type INPLACE_ARRAY1[ANY]) typemap
def testReverseWrongDim(self):
"Test reverse function with wrong dimensions"
- print >>sys.stderr, self.typeStr, "... ",
+ print(self.typeStr, "... ", end=' ', file=sys.stderr)
reverse = Vector.__dict__[self.typeStr + "Reverse"]
vector = np.array([[1,2], [3,4]],self.typeCode)
self.assertRaises(TypeError, reverse, vector)
@@ -135,7 +135,7 @@ class VectorTestCase(unittest.TestCase):
# Test the (type INPLACE_ARRAY1[ANY]) typemap
def testReverseWrongSize(self):
"Test reverse function with wrong size"
- print >>sys.stderr, self.typeStr, "... ",
+ print(self.typeStr, "... ", end=' ', file=sys.stderr)
reverse = Vector.__dict__[self.typeStr + "Reverse"]
vector = np.array([9,8,7,6,5,4],self.typeCode)
self.assertRaises(TypeError, reverse, vector)
@@ -143,7 +143,7 @@ class VectorTestCase(unittest.TestCase):
# Test the (type INPLACE_ARRAY1[ANY]) typemap
def testReverseWrongType(self):
"Test reverse function with wrong type"
- print >>sys.stderr, self.typeStr, "... ",
+ print(self.typeStr, "... ", end=' ', file=sys.stderr)
reverse = Vector.__dict__[self.typeStr + "Reverse"]
vector = np.array([1,2,4],'c')
self.assertRaises(TypeError, reverse, vector)
@@ -151,14 +151,14 @@ class VectorTestCase(unittest.TestCase):
# Test the (type INPLACE_ARRAY1[ANY]) typemap
def testReverseNonArray(self):
"Test reverse function with non-array"
- print >>sys.stderr, self.typeStr, "... ",
+ print(self.typeStr, "... ", end=' ', file=sys.stderr)
reverse = Vector.__dict__[self.typeStr + "Reverse"]
self.assertRaises(TypeError, reverse, [2,4,6])
# Test the (type* INPLACE_ARRAY1, int DIM1) typemap
def testOnes(self):
"Test ones function"
- print >>sys.stderr, self.typeStr, "... ",
+ print(self.typeStr, "... ", end=' ', file=sys.stderr)
ones = Vector.__dict__[self.typeStr + "Ones"]
vector = np.zeros(5,self.typeCode)
ones(vector)
@@ -167,7 +167,7 @@ class VectorTestCase(unittest.TestCase):
# Test the (type* INPLACE_ARRAY1, int DIM1) typemap
def testOnesWrongDim(self):
"Test ones function with wrong dimensions"
- print >>sys.stderr, self.typeStr, "... ",
+ print(self.typeStr, "... ", end=' ', file=sys.stderr)
ones = Vector.__dict__[self.typeStr + "Ones"]
vector = np.zeros((5,5),self.typeCode)
self.assertRaises(TypeError, ones, vector)
@@ -175,7 +175,7 @@ class VectorTestCase(unittest.TestCase):
# Test the (type* INPLACE_ARRAY1, int DIM1) typemap
def testOnesWrongType(self):
"Test ones function with wrong type"
- print >>sys.stderr, self.typeStr, "... ",
+ print(self.typeStr, "... ", end=' ', file=sys.stderr)
ones = Vector.__dict__[self.typeStr + "Ones"]
vector = np.zeros((5,5),'c')
self.assertRaises(TypeError, ones, vector)
@@ -183,14 +183,14 @@ class VectorTestCase(unittest.TestCase):
# Test the (type* INPLACE_ARRAY1, int DIM1) typemap
def testOnesNonArray(self):
"Test ones function with non-array"
- print >>sys.stderr, self.typeStr, "... ",
+ print(self.typeStr, "... ", end=' ', file=sys.stderr)
ones = Vector.__dict__[self.typeStr + "Ones"]
self.assertRaises(TypeError, ones, [2,4,6,8])
# Test the (int DIM1, type* INPLACE_ARRAY1) typemap
def testZeros(self):
"Test zeros function"
- print >>sys.stderr, self.typeStr, "... ",
+ print(self.typeStr, "... ", end=' ', file=sys.stderr)
zeros = Vector.__dict__[self.typeStr + "Zeros"]
vector = np.ones(5,self.typeCode)
zeros(vector)
@@ -199,7 +199,7 @@ class VectorTestCase(unittest.TestCase):
# Test the (int DIM1, type* INPLACE_ARRAY1) typemap
def testZerosWrongDim(self):
"Test zeros function with wrong dimensions"
- print >>sys.stderr, self.typeStr, "... ",
+ print(self.typeStr, "... ", end=' ', file=sys.stderr)
zeros = Vector.__dict__[self.typeStr + "Zeros"]
vector = np.ones((5,5),self.typeCode)
self.assertRaises(TypeError, zeros, vector)
@@ -207,7 +207,7 @@ class VectorTestCase(unittest.TestCase):
# Test the (int DIM1, type* INPLACE_ARRAY1) typemap
def testZerosWrongType(self):
"Test zeros function with wrong type"
- print >>sys.stderr, self.typeStr, "... ",
+ print(self.typeStr, "... ", end=' ', file=sys.stderr)
zeros = Vector.__dict__[self.typeStr + "Zeros"]
vector = np.ones(6,'c')
self.assertRaises(TypeError, zeros, vector)
@@ -215,14 +215,14 @@ class VectorTestCase(unittest.TestCase):
# Test the (int DIM1, type* INPLACE_ARRAY1) typemap
def testZerosNonArray(self):
"Test zeros function with non-array"
- print >>sys.stderr, self.typeStr, "... ",
+ print(self.typeStr, "... ", end=' ', file=sys.stderr)
zeros = Vector.__dict__[self.typeStr + "Zeros"]
self.assertRaises(TypeError, zeros, [1,3,5,7,9])
# Test the (type ARGOUT_ARRAY1[ANY]) typemap
def testEOSplit(self):
"Test eoSplit function"
- print >>sys.stderr, self.typeStr, "... ",
+ print(self.typeStr, "... ", end=' ', file=sys.stderr)
eoSplit = Vector.__dict__[self.typeStr + "EOSplit"]
even, odd = eoSplit([1,2,3])
self.assertEquals((even == [1,0,3]).all(), True)
@@ -231,7 +231,7 @@ class VectorTestCase(unittest.TestCase):
# Test the (type* ARGOUT_ARRAY1, int DIM1) typemap
def testTwos(self):
"Test twos function"
- print >>sys.stderr, self.typeStr, "... ",
+ print(self.typeStr, "... ", end=' ', file=sys.stderr)
twos = Vector.__dict__[self.typeStr + "Twos"]
vector = twos(5)
self.assertEquals((vector == [2,2,2,2,2]).all(), True)
@@ -239,14 +239,14 @@ class VectorTestCase(unittest.TestCase):
# Test the (type* ARGOUT_ARRAY1, int DIM1) typemap
def testTwosNonInt(self):
"Test twos function with non-integer dimension"
- print >>sys.stderr, self.typeStr, "... ",
+ print(self.typeStr, "... ", end=' ', file=sys.stderr)
twos = Vector.__dict__[self.typeStr + "Twos"]
self.assertRaises(TypeError, twos, 5.0)
# Test the (int DIM1, type* ARGOUT_ARRAY1) typemap
def testThrees(self):
"Test threes function"
- print >>sys.stderr, self.typeStr, "... ",
+ print(self.typeStr, "... ", end=' ', file=sys.stderr)
threes = Vector.__dict__[self.typeStr + "Threes"]
vector = threes(6)
self.assertEquals((vector == [3,3,3,3,3,3]).all(), True)
@@ -254,7 +254,7 @@ class VectorTestCase(unittest.TestCase):
# Test the (type* ARGOUT_ARRAY1, int DIM1) typemap
def testThreesNonInt(self):
"Test threes function with non-integer dimension"
- print >>sys.stderr, self.typeStr, "... ",
+ print(self.typeStr, "... ", end=' ', file=sys.stderr)
threes = Vector.__dict__[self.typeStr + "Threes"]
self.assertRaises(TypeError, threes, "threes")
@@ -374,8 +374,8 @@ if __name__ == "__main__":
suite.addTest(unittest.makeSuite( doubleTestCase))
# Execute the test suite
- print "Testing 1D Functions of Module Vector"
- print "NumPy version", np.__version__
- print
+ print("Testing 1D Functions of Module Vector")
+ print("NumPy version", np.__version__)
+ print()
result = unittest.TextTestRunner(verbosity=2).run(suite)
sys.exit(len(result.errors) + len(result.failures))