summaryrefslogtreecommitdiff
path: root/doc/source
diff options
context:
space:
mode:
Diffstat (limited to 'doc/source')
-rw-r--r--doc/source/conf.py20
-rw-r--r--doc/source/dev/alignment.rst113
-rw-r--r--doc/source/dev/development_advanced_debugging.rst8
-rw-r--r--doc/source/dev/development_environment.rst2
-rw-r--r--doc/source/dev/examples/.doxyfile2
-rw-r--r--doc/source/dev/examples/doxy_class.hpp21
-rw-r--r--doc/source/dev/examples/doxy_func.h11
-rw-r--r--doc/source/dev/examples/doxy_rst.h15
-rw-r--r--doc/source/dev/howto-docs.rst219
-rw-r--r--doc/source/dev/howto_build_docs.rst21
-rw-r--r--doc/source/dev/internals.code-explanations.rst646
-rw-r--r--doc/source/dev/internals.rst175
-rw-r--r--doc/source/dev/underthehood.rst10
-rw-r--r--doc/source/doxyfile340
-rw-r--r--doc/source/f2py/advanced.rst52
-rw-r--r--doc/source/f2py/code/allocarr.f90 (renamed from doc/source/f2py/allocarr.f90)0
-rw-r--r--doc/source/f2py/code/array.f (renamed from doc/source/f2py/array.f)0
-rw-r--r--doc/source/f2py/code/calculate.f (renamed from doc/source/f2py/calculate.f)0
-rw-r--r--doc/source/f2py/code/callback.f (renamed from doc/source/f2py/callback.f)0
-rw-r--r--doc/source/f2py/code/callback2.pyf (renamed from doc/source/f2py/callback2.pyf)0
-rw-r--r--doc/source/f2py/code/common.f (renamed from doc/source/f2py/common.f)0
-rw-r--r--doc/source/f2py/code/extcallback.f (renamed from doc/source/f2py/extcallback.f)0
-rw-r--r--doc/source/f2py/code/fib1.f (renamed from doc/source/f2py/fib1.f)0
-rw-r--r--doc/source/f2py/code/fib1.pyf (renamed from doc/source/f2py/fib1.pyf)0
-rw-r--r--doc/source/f2py/code/fib2.pyf (renamed from doc/source/f2py/fib2.pyf)0
-rw-r--r--doc/source/f2py/code/fib3.f (renamed from doc/source/f2py/fib3.f)0
-rw-r--r--doc/source/f2py/code/ftype.f (renamed from doc/source/f2py/ftype.f)0
-rw-r--r--doc/source/f2py/code/moddata.f90 (renamed from doc/source/f2py/moddata.f90)0
-rw-r--r--doc/source/f2py/code/results/allocarr_session.dat (renamed from doc/source/f2py/allocarr_session.dat)0
-rw-r--r--doc/source/f2py/code/results/array_session.dat (renamed from doc/source/f2py/array_session.dat)0
-rw-r--r--doc/source/f2py/code/results/calculate_session.dat (renamed from doc/source/f2py/calculate_session.dat)0
-rw-r--r--doc/source/f2py/code/results/callback_session.dat (renamed from doc/source/f2py/callback_session.dat)0
-rw-r--r--doc/source/f2py/code/results/common_session.dat (renamed from doc/source/f2py/common_session.dat)0
-rw-r--r--doc/source/f2py/code/results/compile_session.dat (renamed from doc/source/f2py/compile_session.dat)0
-rw-r--r--doc/source/f2py/code/results/extcallback_session.dat (renamed from doc/source/f2py/extcallback_session.dat)0
-rw-r--r--doc/source/f2py/code/results/ftype_session.dat (renamed from doc/source/f2py/ftype_session.dat)0
-rw-r--r--doc/source/f2py/code/results/moddata_session.dat (renamed from doc/source/f2py/moddata_session.dat)0
-rw-r--r--doc/source/f2py/code/results/run_main_session.dat (renamed from doc/source/f2py/run_main_session.dat)0
-rw-r--r--doc/source/f2py/code/results/scalar_session.dat (renamed from doc/source/f2py/scalar_session.dat)0
-rw-r--r--doc/source/f2py/code/results/spam_session.dat (renamed from doc/source/f2py/spam_session.dat)0
-rw-r--r--doc/source/f2py/code/results/string_session.dat (renamed from doc/source/f2py/string_session.dat)0
-rw-r--r--doc/source/f2py/code/results/var_session.dat (renamed from doc/source/f2py/var_session.dat)0
-rw-r--r--doc/source/f2py/code/scalar.f (renamed from doc/source/f2py/scalar.f)0
-rw-r--r--doc/source/f2py/code/setup_example.py (renamed from doc/source/f2py/setup_example.py)0
-rw-r--r--doc/source/f2py/code/spam.pyf (renamed from doc/source/f2py/spam.pyf)0
-rw-r--r--doc/source/f2py/code/string.f (renamed from doc/source/f2py/string.f)0
-rw-r--r--doc/source/f2py/code/var.pyf (renamed from doc/source/f2py/var.pyf)0
-rw-r--r--doc/source/f2py/distutils.rst45
-rw-r--r--doc/source/f2py/f2py.getting-started.rst206
-rw-r--r--doc/source/f2py/index.rst16
-rw-r--r--doc/source/f2py/python-usage.rst266
-rw-r--r--doc/source/f2py/signature-file.rst601
-rw-r--r--doc/source/f2py/usage.rst114
-rw-r--r--doc/source/index.rst2
-rw-r--r--doc/source/reference/alignment.rst101
-rw-r--r--doc/source/reference/arrays.datetime.rst18
-rw-r--r--doc/source/reference/arrays.dtypes.rst17
-rw-r--r--doc/source/reference/arrays.ndarray.rst8
-rw-r--r--doc/source/reference/arrays.scalars.rst13
-rw-r--r--doc/source/reference/c-api/array.rst116
-rw-r--r--doc/source/reference/c-api/data_memory.rst158
-rw-r--r--doc/source/reference/c-api/index.rst1
-rw-r--r--doc/source/reference/c-api/iterator.rst2
-rw-r--r--doc/source/reference/c-api/types-and-structures.rst77
-rw-r--r--doc/source/reference/global_state.rst10
-rw-r--r--doc/source/reference/index.rst1
-rw-r--r--doc/source/reference/internals.code-explanations.rst615
-rw-r--r--doc/source/reference/internals.rst164
-rw-r--r--doc/source/reference/random/bit_generators/index.rst2
-rw-r--r--doc/source/reference/random/index.rst2
-rw-r--r--doc/source/reference/random/performance.rst2
-rw-r--r--doc/source/reference/routines.ma.rst11
-rw-r--r--doc/source/reference/routines.math.rst21
-rw-r--r--doc/source/reference/routines.polynomials.rst4
-rw-r--r--doc/source/reference/routines.statistics.rst6
-rw-r--r--doc/source/reference/simd/simd-optimizations.rst16
-rw-r--r--doc/source/reference/ufuncs.rst2
-rw-r--r--doc/source/release.rst3
-rw-r--r--doc/source/release/1.14.0-notes.rst2
-rw-r--r--doc/source/release/1.15.0-notes.rst4
-rw-r--r--doc/source/release/1.16.0-notes.rst4
-rw-r--r--doc/source/release/1.19.0-notes.rst2
-rw-r--r--doc/source/release/1.20.0-notes.rst2
-rw-r--r--doc/source/release/1.21.0-notes.rst2
-rw-r--r--doc/source/release/1.21.3-notes.rst44
-rw-r--r--doc/source/release/1.8.0-notes.rst6
-rw-r--r--doc/source/release/1.9.0-notes.rst2
-rw-r--r--doc/source/user/absolute_beginners.rst14
-rw-r--r--doc/source/user/basics.broadcasting.rst1
-rw-r--r--doc/source/user/basics.copies.rst152
-rw-r--r--doc/source/user/basics.creation.rst26
-rw-r--r--doc/source/user/basics.io.genfromtxt.rst2
-rw-r--r--doc/source/user/basics.rec.rst2
-rw-r--r--doc/source/user/basics.rst1
-rw-r--r--doc/source/user/building.rst3
-rw-r--r--doc/source/user/c-info.beyond-basics.rst5
-rw-r--r--doc/source/user/c-info.how-to-extend.rst2
-rw-r--r--doc/source/user/c-info.python-as-glue.rst89
-rw-r--r--doc/source/user/c-info.ufunc-tutorial.rst21
-rw-r--r--doc/source/user/how-to-how-to.rst2
-rw-r--r--doc/source/user/misc.rst2
-rw-r--r--doc/source/user/numpy-for-matlab-users.rst6
-rw-r--r--doc/source/user/quickstart.rst9
-rw-r--r--doc/source/user/whatisnumpy.rst2
104 files changed, 2979 insertions, 1703 deletions
diff --git a/doc/source/conf.py b/doc/source/conf.py
index 41b5cee25..a7a885c34 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -1,7 +1,7 @@
-# -*- coding: utf-8 -*-
import os
import re
import sys
+import importlib
# Minimum version, enforced by sphinx
needs_sphinx = '3.2.0'
@@ -86,6 +86,16 @@ extensions = [
'sphinx.ext.mathjax',
]
+skippable_extensions = [
+ ('breathe', 'skip generating C/C++ API from comment blocks.'),
+]
+for ext, warn in skippable_extensions:
+ ext_exist = importlib.util.find_spec(ext) is not None
+ if ext_exist:
+ extensions.append(ext)
+ else:
+ print(f"Unable to find Sphinx extension '{ext}', {warn}.")
+
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
@@ -477,3 +487,11 @@ class NumPyLexer(CLexer):
inherit,
],
}
+
+
+# -----------------------------------------------------------------------------
+# Breathe & Doxygen
+# -----------------------------------------------------------------------------
+breathe_projects = dict(numpy=os.path.join("..", "build", "doxygen", "xml"))
+breathe_default_project = "numpy"
+breathe_default_members = ("members", "undoc-members", "protected-members")
diff --git a/doc/source/dev/alignment.rst b/doc/source/dev/alignment.rst
new file mode 100644
index 000000000..bb1198ebf
--- /dev/null
+++ b/doc/source/dev/alignment.rst
@@ -0,0 +1,113 @@
+.. currentmodule:: numpy
+
+.. _alignment:
+
+****************
+Memory Alignment
+****************
+
+NumPy alignment goals
+=====================
+
+There are three use-cases related to memory alignment in NumPy (as of 1.14):
+
+ 1. Creating :term:`structured datatypes <structured data type>` with
+ :term:`fields <field>` aligned like in a C-struct.
+ 2. Speeding up copy operations by using :class:`uint` assignment in instead of
+ ``memcpy``.
+ 3. Guaranteeing safe aligned access for ufuncs/setitem/casting code.
+
+NumPy uses two different forms of alignment to achieve these goals:
+"True alignment" and "Uint alignment".
+
+"True" alignment refers to the architecture-dependent alignment of an
+equivalent C-type in C. For example, in x64 systems :attr:`float64` is
+equivalent to ``double`` in C. On most systems, this has either an alignment of
+4 or 8 bytes (and this can be controlled in GCC by the option
+``malign-double``). A variable is aligned in memory if its memory offset is a
+multiple of its alignment. On some systems (eg. sparc) memory alignment is
+required; on others, it gives a speedup.
+
+"Uint" alignment depends on the size of a datatype. It is defined to be the
+"True alignment" of the uint used by NumPy's copy-code to copy the datatype, or
+undefined/unaligned if there is no equivalent uint. Currently, NumPy uses
+``uint8``, ``uint16``, ``uint32``, ``uint64``, and ``uint64`` to copy data of
+size 1, 2, 4, 8, 16 bytes respectively, and all other sized datatypes cannot
+be uint-aligned.
+
+For example, on a (typical Linux x64 GCC) system, the NumPy :attr:`complex64`
+datatype is implemented as ``struct { float real, imag; }``. This has "true"
+alignment of 4 and "uint" alignment of 8 (equal to the true alignment of
+``uint64``).
+
+Some cases where uint and true alignment are different (default GCC Linux):
+ ====== ========= ======== ========
+ arch type true-aln uint-aln
+ ====== ========= ======== ========
+ x86_64 complex64 4 8
+ x86_64 float128 16 8
+ x86 float96 4 \-
+ ====== ========= ======== ========
+
+
+Variables in NumPy which control and describe alignment
+=======================================================
+
+There are 4 relevant uses of the word ``align`` used in NumPy:
+
+ * The :attr:`dtype.alignment` attribute (``descr->alignment`` in C). This is
+ meant to reflect the "true alignment" of the type. It has arch-dependent
+ default values for all datatypes, except for the structured types created
+ with ``align=True`` as described below.
+ * The ``ALIGNED`` flag of an ndarray, computed in ``IsAligned`` and checked
+ by :c:func:`PyArray_ISALIGNED`. This is computed from
+ :attr:`dtype.alignment`.
+ It is set to ``True`` if every item in the array is at a memory location
+ consistent with :attr:`dtype.alignment`, which is the case if the
+ ``data ptr`` and all strides of the array are multiples of that alignment.
+ * The ``align`` keyword of the dtype constructor, which only affects
+ :ref:`structured_arrays`. If the structure's field offsets are not manually
+ provided, NumPy determines offsets automatically. In that case,
+ ``align=True`` pads the structure so that each field is "true" aligned in
+ memory and sets :attr:`dtype.alignment` to be the largest of the field
+ "true" alignments. This is like what C-structs usually do. Otherwise if
+ offsets or itemsize were manually provided ``align=True`` simply checks that
+ all the fields are "true" aligned and that the total itemsize is a multiple
+ of the largest field alignment. In either case :attr:`dtype.isalignedstruct`
+ is also set to True.
+ * ``IsUintAligned`` is used to determine if an ndarray is "uint aligned" in
+ an analogous way to how ``IsAligned`` checks for true alignment.
+
+Consequences of alignment
+=========================
+
+Here is how the variables above are used:
+
+ 1. Creating aligned structs: To know how to offset a field when
+ ``align=True``, NumPy looks up ``field.dtype.alignment``. This includes
+ fields that are nested structured arrays.
+ 2. Ufuncs: If the ``ALIGNED`` flag of an array is False, ufuncs will
+ buffer/cast the array before evaluation. This is needed since ufunc inner
+ loops access raw elements directly, which might fail on some archs if the
+ elements are not true-aligned.
+ 3. Getitem/setitem/copyswap function: Similar to ufuncs, these functions
+ generally have two code paths. If ``ALIGNED`` is False they will
+ use a code path that buffers the arguments so they are true-aligned.
+ 4. Strided copy code: Here, "uint alignment" is used instead. If the itemsize
+ of an array is equal to 1, 2, 4, 8 or 16 bytes and the array is uint
+ aligned then instead NumPy will do ``*(uintN*)dst) = *(uintN*)src)`` for
+ appropriate N. Otherwise, NumPy copies by doing ``memcpy(dst, src, N)``.
+ 5. Nditer code: Since this often calls the strided copy code, it must
+ check for "uint alignment".
+ 6. Cast code: This checks for "true" alignment, as it does
+ ``*dst = CASTFUNC(*src)`` if aligned. Otherwise, it does
+ ``memmove(srcval, src); dstval = CASTFUNC(srcval); memmove(dst, dstval)``
+ where dstval/srcval are aligned.
+
+Note that the strided-copy and strided-cast code are deeply intertwined and so
+any arrays being processed by them must be both uint and true aligned, even
+though the copy-code only needs uint alignment and the cast code only true
+alignment. If there is ever a big rewrite of this code it would be good to
+allow them to use different alignments.
+
+
diff --git a/doc/source/dev/development_advanced_debugging.rst b/doc/source/dev/development_advanced_debugging.rst
index fa4014fdb..18a7f6ae9 100644
--- a/doc/source/dev/development_advanced_debugging.rst
+++ b/doc/source/dev/development_advanced_debugging.rst
@@ -3,8 +3,8 @@ Advanced debugging tools
========================
If you reached here, you want to dive into, or use, more advanced tooling.
-This is usually not necessary for first time contributers and most
-day-to-day developement.
+This is usually not necessary for first time contributors and most
+day-to-day development.
These are used more rarely, for example close to a new NumPy release,
or when a large or particular complex change was made.
@@ -25,7 +25,7 @@ narrow down.
We do not expect any of these tools to be run by most contributors.
However, you can ensure that we can track down such issues more easily easier:
-* Tests should cover all code paths, incluing error paths.
+* Tests should cover all code paths, including error paths.
* Try to write short and simple tests. If you have a very complicated test
consider creating an additional simpler test as well.
This can be helpful, because often it is only easy to find which test
@@ -112,7 +112,7 @@ where ``PYTHONMALLOC=malloc`` is necessary to avoid false positives from python
itself.
Depending on the system and valgrind version, you may see more false positives.
``valgrind`` supports "suppressions" to ignore some of these, and Python does
-have a supression file (and even a compile time option) which may help if you
+have a suppression file (and even a compile time option) which may help if you
find it necessary.
Valgrind helps:
diff --git a/doc/source/dev/development_environment.rst b/doc/source/dev/development_environment.rst
index 665198c69..37cf6f7af 100644
--- a/doc/source/dev/development_environment.rst
+++ b/doc/source/dev/development_environment.rst
@@ -122,7 +122,7 @@ source tree is to use::
NumPy uses a series of tests to probe the compiler and libc libraries for
-funtions. The results are stored in ``_numpyconfig.h`` and ``config.h`` files
+functions. The results are stored in ``_numpyconfig.h`` and ``config.h`` files
using ``HAVE_XXX`` definitions. These tests are run during the ``build_src``
phase of the ``_multiarray_umath`` module in the ``generate_config_h`` and
``generate_numpyconfig_h`` functions. Since the output of these calls includes
diff --git a/doc/source/dev/examples/.doxyfile b/doc/source/dev/examples/.doxyfile
new file mode 100644
index 000000000..966c1b636
--- /dev/null
+++ b/doc/source/dev/examples/.doxyfile
@@ -0,0 +1,2 @@
+INPUT += @CUR_DIR
+INCLUDE_PATH += @CUR_DIR
diff --git a/doc/source/dev/examples/doxy_class.hpp b/doc/source/dev/examples/doxy_class.hpp
new file mode 100644
index 000000000..ceba63487
--- /dev/null
+++ b/doc/source/dev/examples/doxy_class.hpp
@@ -0,0 +1,21 @@
+/**
+ * Template to represent limbo numbers.
+ *
+ * Specializations for integer types that are part of nowhere.
+ * It doesn't support with any real types.
+ *
+ * @param Tp Type of the integer. Required to be an integer type.
+ * @param N Number of elements.
+*/
+template<typename Tp, std::size_t N>
+class DoxyLimbo {
+ public:
+ /// Default constructor. Initialize nothing.
+ DoxyLimbo();
+ /// Set Default behavior for copy the limbo.
+ DoxyLimbo(const DoxyLimbo<Tp, N> &l);
+ /// Returns the raw data for the limbo.
+ const Tp *data();
+ protected:
+ Tp p_data[N]; ///< Example for inline comment.
+};
diff --git a/doc/source/dev/examples/doxy_func.h b/doc/source/dev/examples/doxy_func.h
new file mode 100644
index 000000000..792a9d1b7
--- /dev/null
+++ b/doc/source/dev/examples/doxy_func.h
@@ -0,0 +1,11 @@
+/**
+ * This a simple brief.
+ *
+ * And the details goes here.
+ * Multi lines are welcome.
+ *
+ * @param num leave a comment for parameter num.
+ * @param str leave a comment for the second parameter.
+ * @return leave a comment for the returned value.
+ */
+int doxy_javadoc_example(int num, const char *str);
diff --git a/doc/source/dev/examples/doxy_rst.h b/doc/source/dev/examples/doxy_rst.h
new file mode 100644
index 000000000..6ab4a0775
--- /dev/null
+++ b/doc/source/dev/examples/doxy_rst.h
@@ -0,0 +1,15 @@
+/**
+ * A comment block contains reST markup.
+ * @rst
+ * .. note::
+ *
+ * Thanks to Breathe_, we were able to bring it to Doxygen_
+ *
+ * Some code example::
+ *
+ * int example(int x) {
+ * return x * 2;
+ * }
+ * @endrst
+ */
+void doxy_reST_example(void);
diff --git a/doc/source/dev/howto-docs.rst b/doc/source/dev/howto-docs.rst
index 3156d3452..93fec509c 100644
--- a/doc/source/dev/howto-docs.rst
+++ b/doc/source/dev/howto-docs.rst
@@ -59,8 +59,8 @@ Obvious **wording** mistakes (like leaving out a "not") fall into the typo
category, but other rewordings -- even for grammar -- require a judgment call,
which raises the bar. Test the waters by first presenting the fix as an issue.
-Some functions/objects like numpy.ndarray.transpose, numpy.array etc. defined in
-C-extension modules have their docstrings defined seperately in `_add_newdocs.py
+Some functions/objects like numpy.ndarray.transpose, numpy.array etc. defined in
+C-extension modules have their docstrings defined separately in `_add_newdocs.py
<https://github.com/numpy/numpy/blob/main/numpy/core/_add_newdocs.py>`__
**********************
@@ -72,7 +72,7 @@ Your frustrations using our documents are our best guide to what needs fixing.
If you write a missing doc you join the front line of open source, but it's
a meaningful contribution just to let us know what's missing. If you want to
compose a doc, run your thoughts by the `mailing list
-<https://mail.python.org/mailman/listinfo/numpy-discussion>`__ for futher
+<https://mail.python.org/mailman/listinfo/numpy-discussion>`__ for further
ideas and feedback. If you want to alert us to a gap,
`open an issue <https://github.com/numpy/numpy/issues>`__. See
`this issue <https://github.com/numpy/numpy/issues/15760>`__ for an example.
@@ -215,6 +215,219 @@ Note that for documentation within NumPy, it is not necessary to do
Please use the ``numpydoc`` :ref:`formatting standard <numpydoc:format>` as
shown in their :ref:`example <numpydoc:example>`.
+.. _doc_c_code:
+
+Documenting C/C++ Code
+======================
+
+NumPy uses Doxygen_ to parse specially-formatted C/C++ comment blocks. This generates
+XML files, which are converted by Breathe_ into RST, which is used by Sphinx.
+
+**It takes three steps to complete the documentation process**:
+
+1. Writing the comment blocks
+-----------------------------
+
+Although there is still no commenting style set to follow, the Javadoc
+is more preferable than the others due to the similarities with the current
+existing non-indexed comment blocks.
+
+.. note::
+ Please see `"Documenting the code" <https://www.doxygen.nl/manual/docblocks.html>`__.
+
+**This is what Javadoc style looks like**:
+
+.. literalinclude:: examples/doxy_func.h
+
+**And here is how it is rendered**:
+
+.. doxygenfunction:: doxy_javadoc_example
+
+**For line comment, you can use a triple forward slash. For example**:
+
+.. literalinclude:: examples/doxy_class.hpp
+
+**And here is how it is rendered**:
+
+.. doxygenclass:: DoxyLimbo
+
+Common Doxygen Tags:
+++++++++++++++++++++
+
+.. note::
+ For more tags/commands, please take a look at https://www.doxygen.nl/manual/commands.html
+
+``@brief``
+
+Starts a paragraph that serves as a brief description. By default the first sentence
+of the documentation block is automatically treated as a brief description, since
+option `JAVADOC_AUTOBRIEF <https://www.doxygen.nl/manual/config.html#cfg_javadoc_autobrief>`__
+is enabled within doxygen configurations.
+
+``@details``
+
+Just like ``@brief`` starts a brief description, ``@details`` starts the detailed description.
+You can also start a new paragraph (blank line) then the ``@details`` command is not needed.
+
+``@param``
+
+Starts a parameter description for a function parameter with name <parameter-name>,
+followed by a description of the parameter. The existence of the parameter is checked
+and a warning is given if the documentation of this (or any other) parameter is missing
+or not present in the function declaration or definition.
+
+``@return``
+
+Starts a return value description for a function.
+Multiple adjacent ``@return`` commands will be joined into a single paragraph.
+The ``@return`` description ends when a blank line or some other sectioning command is encountered.
+
+``@code/@endcode``
+
+Starts/Ends a block of code. A code block is treated differently from ordinary text.
+It is interpreted as source code.
+
+``@rst/@endrst``
+
+Starts/Ends a block of reST markup.
+
+Example
+~~~~~~~
+**Take a look at the following example**:
+
+.. literalinclude:: examples/doxy_rst.h
+
+**And here is how it is rendered**:
+
+.. doxygenfunction:: doxy_reST_example
+
+2. Feeding Doxygen
+------------------
+
+Not all headers files are collected automatically. You have to add the desired
+C/C++ header paths within the sub-config files of Doxygen.
+
+Sub-config files have the unique name ``.doxyfile``, which you can usually find near
+directories that contain documented headers. You need to create a new config file if
+there's not one located in a path close(2-depth) to the headers you want to add.
+
+Sub-config files can accept any of Doxygen_ `configuration options <https://www.doxygen.nl/manual/config.html>`__,
+but do not override or re-initialize any configuration option,
+rather only use the concatenation operator "+=". For example::
+
+ # to specfiy certain headers
+ INPUT += @CUR_DIR/header1.h \
+ @CUR_DIR/header2.h
+ # to add all headers in certain path
+ INPUT += @CUR_DIR/to/headers
+ # to define certain macros
+ PREDEFINED += C_MACRO(X)=X
+ # to enable certain branches
+ PREDEFINED += NPY_HAVE_FEATURE \
+ NPY_HAVE_FEATURE2
+
+.. note::
+ @CUR_DIR is a template constant returns the current
+ dir path of the sub-config file.
+
+3. Inclusion directives
+-----------------------
+
+Breathe_ provides a wide range of custom directives to allow
+converting the documents generated by Doxygen_ into reST files.
+
+.. note::
+ For more information, please check out "`Directives & Config Variables <https://breathe.readthedocs.io/en/latest/directives.html>`__"
+
+Common directives:
+++++++++++++++++++
+
+``doxygenfunction``
+
+This directive generates the appropriate output for a single function.
+The function name is required to be unique in the project.
+
+.. code::
+
+ .. doxygenfunction:: <function name>
+ :outline:
+ :no-link:
+
+Checkout the `example <https://breathe.readthedocs.io/en/latest/function.html#function-example>`__
+to see it in action.
+
+
+``doxygenclass``
+
+This directive generates the appropriate output for a single class.
+It takes the standard project, path, outline and no-link options and
+additionally the members, protected-members, private-members, undoc-members,
+membergroups and members-only options:
+
+.. code::
+
+ .. doxygenclass:: <class name>
+ :members: [...]
+ :protected-members:
+ :private-members:
+ :undoc-members:
+ :membergroups: ...
+ :members-only:
+ :outline:
+ :no-link:
+
+Checkout the `doxygenclass documentation <https://breathe.readthedocs.io/en/latest/class.html#class-example>_`
+for more details and to see it in action.
+
+``doxygennamespace``
+
+This directive generates the appropriate output for the contents of a namespace.
+It takes the standard project, path, outline and no-link options and additionally the content-only,
+members, protected-members, private-members and undoc-members options.
+To reference a nested namespace, the full namespaced path must be provided,
+e.g. foo::bar for the bar namespace inside the foo namespace.
+
+.. code::
+
+ .. doxygennamespace:: <namespace>
+ :content-only:
+ :outline:
+ :members:
+ :protected-members:
+ :private-members:
+ :undoc-members:
+ :no-link:
+
+Checkout the `doxygennamespace documentation <https://breathe.readthedocs.io/en/latest/namespace.html#namespace-example>`__
+for more details and to see it in action.
+
+``doxygengroup``
+
+This directive generates the appropriate output for the contents of a doxygen group.
+A doxygen group can be declared with specific doxygen markup in the source comments
+as covered in the doxygen `grouping documentation <https://www.doxygen.nl/manual/grouping.html>`__.
+
+It takes the standard project, path, outline and no-link options and additionally the
+content-only, members, protected-members, private-members and undoc-members options.
+
+.. code::
+
+ .. doxygengroup:: <group name>
+ :content-only:
+ :outline:
+ :members:
+ :protected-members:
+ :private-members:
+ :undoc-members:
+ :no-link:
+ :inner:
+
+Checkout the `doxygengroup documentation <https://breathe.readthedocs.io/en/latest/group.html#group-example>`__
+for more details and to see it in action.
+
+.. _`Doxygen`: https://www.doxygen.nl/index.html
+.. _`Breathe`: https://breathe.readthedocs.io/en/latest/
+
*********************
Documentation reading
diff --git a/doc/source/dev/howto_build_docs.rst b/doc/source/dev/howto_build_docs.rst
index 884cf7935..b175926da 100644
--- a/doc/source/dev/howto_build_docs.rst
+++ b/doc/source/dev/howto_build_docs.rst
@@ -58,18 +58,28 @@ new virtual environment is recommended.
Dependencies
^^^^^^^^^^^^
-All of the necessary dependencies for building the NumPy docs can be installed
-with::
+All of the necessary dependencies for building the NumPy docs except for
+Doxygen_ can be installed with::
pip install -r doc_requirements.txt
-We currently use Sphinx_ for generating the API and reference
-documentation for NumPy. In addition, building the documentation requires
-the Sphinx extension `plot_directive`, which is shipped with
+We currently use Sphinx_ along with Doxygen_ for generating the API and
+reference documentation for NumPy. In addition, building the documentation
+requires the Sphinx extension `plot_directive`, which is shipped with
:doc:`Matplotlib <matplotlib:index>`. We also use numpydoc_ to render docstrings in
the generated API documentation. :doc:`SciPy <scipy:index>`
is installed since some parts of the documentation require SciPy functions.
+For installing Doxygen_, please check the official
+`download <https://www.doxygen.nl/download.html#srcbin>`_ and
+`installation <https://www.doxygen.nl/manual/install.html>`_ pages, or if you
+are using Linux then you can install it through your distribution package manager.
+
+.. note::
+
+ Try to install a newer version of Doxygen_ > 1.8.10 otherwise you may get some
+ warnings during the build.
+
Submodules
^^^^^^^^^^
@@ -80,6 +90,7 @@ additional parts required for building the documentation::
.. _Sphinx: http://www.sphinx-doc.org/
.. _numpydoc: https://numpydoc.readthedocs.io/en/latest/index.html
+.. _Doxygen: https://www.doxygen.nl/index.html
Instructions
------------
diff --git a/doc/source/dev/internals.code-explanations.rst b/doc/source/dev/internals.code-explanations.rst
new file mode 100644
index 000000000..b6edd61b1
--- /dev/null
+++ b/doc/source/dev/internals.code-explanations.rst
@@ -0,0 +1,646 @@
+.. currentmodule:: numpy
+
+.. _c-code-explanations:
+
+*************************
+NumPy C code explanations
+*************************
+
+ Fanaticism consists of redoubling your efforts when you have forgotten
+ your aim.
+ --- *George Santayana*
+
+ An authority is a person who can tell you more about something than
+ you really care to know.
+ --- *Unknown*
+
+This page attempts to explain the logic behind some of the new
+pieces of code. The purpose behind these explanations is to enable
+somebody to be able to understand the ideas behind the implementation
+somewhat more easily than just staring at the code. Perhaps in this
+way, the algorithms can be improved on, borrowed from, and/or
+optimized by more people.
+
+
+Memory model
+============
+
+.. index::
+ pair: ndarray; memory model
+
+One fundamental aspect of the :class:`ndarray` is that an array is seen as a
+"chunk" of memory starting at some location. The interpretation of
+this memory depends on the :term:`stride` information. For each dimension in
+an :math:`N`-dimensional array, an integer (:term:`stride`) dictates how many
+bytes must be skipped to get to the next element in that dimension.
+Unless you have a single-segment array, this :term:`stride` information must
+be consulted when traversing through an array. It is not difficult to
+write code that accepts strides, you just have to use ``char*``
+pointers because strides are in units of bytes. Keep in mind also that
+strides do not have to be unit-multiples of the element size. Also,
+remember that if the number of dimensions of the array is 0 (sometimes
+called a ``rank-0`` array), then the :term:`strides <stride>` and
+:term:`dimensions <dimension>` variables are ``NULL``.
+
+Besides the structural information contained in the strides and
+dimensions members of the :c:type:`PyArrayObject`, the flags contain
+important information about how the data may be accessed. In particular,
+the :c:data:`NPY_ARRAY_ALIGNED` flag is set when the memory is on a
+suitable boundary according to the datatype array. Even if you have
+a :term:`contiguous` chunk of memory, you cannot just assume it is safe to
+dereference a datatype-specific pointer to an element. Only if the
+:c:data:`NPY_ARRAY_ALIGNED` flag is set, this is a safe operation. On
+some platforms it will work but on others, like Solaris, it will cause
+a bus error. The :c:data:`NPY_ARRAY_WRITEABLE` should also be ensured
+if you plan on writing to the memory area of the array. It is also
+possible to obtain a pointer to an unwritable memory area. Sometimes,
+writing to the memory area when the :c:data:`NPY_ARRAY_WRITEABLE` flag is not
+set will just be rude. Other times it can cause program crashes (*e.g.*
+a data-area that is a read-only memory-mapped file).
+
+
+Data-type encapsulation
+=======================
+
+.. seealso:: :ref:`arrays.dtypes`
+
+.. index::
+ single: dtype
+
+The :ref:`datatype <arrays.dtypes>` is an important abstraction of the
+:class:`ndarray`. Operations
+will look to the datatype to provide the key functionality that is
+needed to operate on the array. This functionality is provided in the
+list of function pointers pointed to by the ``f`` member of the
+:c:type:`PyArray_Descr` structure. In this way, the number of datatypes can be
+extended simply by providing a :c:type:`PyArray_Descr` structure with suitable
+function pointers in the ``f`` member. For built-in types, there are some
+optimizations that bypass this mechanism, but the point of the datatype
+abstraction is to allow new datatypes to be added.
+
+One of the built-in datatypes, the :class:`void` datatype allows for
+arbitrary :term:`structured types <structured data type>` containing 1 or more
+fields as elements of the array. A :term:`field` is simply another datatype
+object along with an offset into the current structured type. In order to
+support arbitrarily nested fields, several recursive implementations of
+datatype access are implemented for the void type. A common idiom is to cycle
+through the elements of the dictionary and perform a specific operation based on
+the datatype object stored at the given offset. These offsets can be
+arbitrary numbers. Therefore, the possibility of encountering misaligned
+data must be recognized and taken into account if necessary.
+
+
+N-D Iterators
+=============
+
+.. seealso:: :ref:`arrays.nditer`
+
+.. index::
+ single: array iterator
+
+A very common operation in much of NumPy code is the need to iterate
+over all the elements of a general, strided, N-dimensional array. This
+operation of a general-purpose N-dimensional loop is abstracted in the
+notion of an iterator object. To write an N-dimensional loop, you only
+have to create an iterator object from an ndarray, work with the
+:c:member:`dataptr <PyArrayIterObject.dataptr>` member of the iterator object
+structure and call the macro :c:func:`PyArray_ITER_NEXT` on the iterator
+object to move to the next element. The ``next`` element is always in
+C-contiguous order. The macro works by first special-casing the C-contiguous,
+1-D, and 2-D cases which work very simply.
+
+For the general case, the iteration works by keeping track of a list
+of coordinate counters in the iterator object. At each iteration, the
+last coordinate counter is increased (starting from 0). If this
+counter is smaller than one less than the size of the array in that
+dimension (a pre-computed and stored value), then the counter is
+increased and the :c:member:`dataptr <PyArrayIterObject.dataptr>` member is
+increased by the strides in that
+dimension and the macro ends. If the end of a dimension is reached,
+the counter for the last dimension is reset to zero and the
+:c:member:`dataptr <PyArrayIterObject.dataptr>` is
+moved back to the beginning of that dimension by subtracting the
+strides value times one less than the number of elements in that
+dimension (this is also pre-computed and stored in the
+:c:member:`backstrides <PyArrayIterObject.backstrides>`
+member of the iterator object). In this case, the macro does not end,
+but a local dimension counter is decremented so that the next-to-last
+dimension replaces the role that the last dimension played and the
+previously-described tests are executed again on the next-to-last
+dimension. In this way, the :c:member:`dataptr <PyArrayIterObject.dataptr>`
+is adjusted appropriately for arbitrary striding.
+
+The :c:member:`coordinates <PyArrayIterObject.coordinates>` member of the
+:c:type:`PyArrayIterObject` structure maintains
+the current N-d counter unless the underlying array is C-contiguous in
+which case the coordinate counting is bypassed. The
+:c:member:`index <PyArrayIterObject.index>` member of
+the :c:type:`PyArrayIterObject` keeps track of the current flat index of the
+iterator. It is updated by the :c:func:`PyArray_ITER_NEXT` macro.
+
+
+Broadcasting
+============
+
+.. seealso:: :ref:`basics.broadcasting`
+
+.. index::
+ single: broadcasting
+
+In Numeric, the ancestor of NumPy, broadcasting was implemented in several
+lines of code buried deep in ``ufuncobject.c``. In NumPy, the notion of
+broadcasting has been abstracted so that it can be performed in multiple places.
+Broadcasting is handled by the function :c:func:`PyArray_Broadcast`. This
+function requires a :c:type:`PyArrayMultiIterObject` (or something that is a
+binary equivalent) to be passed in. The :c:type:`PyArrayMultiIterObject` keeps
+track of the broadcast number of dimensions and size in each
+dimension along with the total size of the broadcast result. It also
+keeps track of the number of arrays being broadcast and a pointer to
+an iterator for each of the arrays being broadcast.
+
+The :c:func:`PyArray_Broadcast` function takes the iterators that have already
+been defined and uses them to determine the broadcast shape in each
+dimension (to create the iterators at the same time that broadcasting
+occurs then use the :c:func:`PyArray_MultiIterNew` function).
+Then, the iterators are
+adjusted so that each iterator thinks it is iterating over an array
+with the broadcast size. This is done by adjusting the iterators
+number of dimensions, and the :term:`shape` in each dimension. This works
+because the iterator strides are also adjusted. Broadcasting only
+adjusts (or adds) length-1 dimensions. For these dimensions, the
+strides variable is simply set to 0 so that the data-pointer for the
+iterator over that array doesn't move as the broadcasting operation
+operates over the extended dimension.
+
+Broadcasting was always implemented in Numeric using 0-valued strides
+for the extended dimensions. It is done in exactly the same way in
+NumPy. The big difference is that now the array of strides is kept
+track of in a :c:type:`PyArrayIterObject`, the iterators involved in a
+broadcast result are kept track of in a :c:type:`PyArrayMultiIterObject`,
+and the :c:func:`PyArray_Broadcast` call implements the
+:ref:`general-broadcasting-rules`.
+
+
+Array Scalars
+=============
+
+.. seealso:: :ref:`arrays.scalars`
+
+.. index::
+ single: array scalars
+
+The array scalars offer a hierarchy of Python types that allow a one-to-one
+correspondence between the datatype stored in an array and the
+Python-type that is returned when an element is extracted from the
+array. An exception to this rule was made with object arrays. Object
+arrays are heterogeneous collections of arbitrary Python objects. When
+you select an item from an object array, you get back the original
+Python object (and not an object array scalar which does exist but is
+rarely used for practical purposes).
+
+The array scalars also offer the same methods and attributes as arrays
+with the intent that the same code can be used to support arbitrary
+dimensions (including 0-dimensions). The array scalars are read-only
+(immutable) with the exception of the void scalar which can also be
+written to so that structured array field setting works more naturally
+(``a[0]['f1'] = value``).
+
+
+Indexing
+========
+
+.. seealso:: :ref:`basics.indexing`, :ref:`arrays.indexing`
+
+.. index::
+ single: indexing
+
+All Python indexing operations ``arr[index]`` are organized by first preparing
+the index and finding the index type. The supported index types are:
+
+* integer
+* :const:`newaxis`
+* :term:`python:slice`
+* :py:data:`Ellipsis`
+* integer arrays/array-likes (advanced)
+* boolean (single boolean array); if there is more than one boolean array as
+ the index or the shape does not match exactly, the boolean array will be
+ converted to an integer array instead.
+* 0-d boolean (and also integer); 0-d boolean arrays are a special
+ case that has to be handled in the advanced indexing code. They signal
+ that a 0-d boolean array had to be interpreted as an integer array.
+
+As well as the scalar array special case signaling that an integer array
+was interpreted as an integer index, which is important because an integer
+array index forces a copy but is ignored if a scalar is returned (full integer
+index). The prepared index is guaranteed to be valid with the exception of
+out of bound values and broadcasting errors for advanced indexing. This
+includes that an :py:data:`Ellipsis` is added for incomplete indices for
+example when a two-dimensional array is indexed with a single integer.
+
+The next step depends on the type of index which was found. If all
+dimensions are indexed with an integer a scalar is returned or set. A
+single boolean indexing array will call specialized boolean functions.
+Indices containing an :py:data:`Ellipsis` or :term:`python:slice` but no
+advanced indexing will always create a view into the old array by calculating
+the new strides and memory offset. This view can then either be returned or,
+for assignments, filled using ``PyArray_CopyObject``. Note that
+``PyArray_CopyObject`` may also be called on temporary arrays in other branches
+to support complicated assignments when the array is of object :class:`dtype`.
+
+Advanced indexing
+-----------------
+
+By far the most complex case is advanced indexing, which may or may not be
+combined with typical view-based indexing. Here integer indices are
+interpreted as view-based. Before trying to understand this, you may want
+to make yourself familiar with its subtleties. The advanced indexing code
+has three different branches and one special case:
+
+* There is one indexing array and it, as well as the assignment array, can
+ be iterated trivially. For example, they may be contiguous. Also, the
+ indexing array must be of :class:`intp` type and the value array in
+ assignments should be of the correct type. This is purely a fast path.
+* There are only integer array indices so that no subarray exists.
+* View-based and advanced indexing is mixed. In this case, the view-based
+ indexing defines a collection of subarrays that are combined by the
+ advanced indexing. For example, ``arr[[1, 2, 3], :]`` is created by
+ vertically stacking the subarrays ``arr[1, :]``, ``arr[2, :]``, and
+ ``arr[3, :]``.
+* There is a subarray but it has exactly one element. This case can be handled
+ as if there is no subarray but needs some care during setup.
+
+Deciding what case applies, checking broadcasting, and determining the kind
+of transposition needed are all done in :c:func:`PyArray_MapIterNew`. After
+setting up, there are two cases. If there is no subarray or it only has one
+element, no subarray iteration is necessary and an iterator is prepared
+which iterates all indexing arrays *as well as* the result or value array.
+If there is a subarray, there are three iterators prepared. One for the
+indexing arrays, one for the result or value array (minus its subarray),
+and one for the subarrays of the original and the result/assignment array.
+The first two iterators give (or allow calculation) of the pointers into
+the start of the subarray, which then allows restarting the subarray
+iteration.
+
+When advanced indices are next to each other transposing may be necessary.
+All necessary transposing is handled by :c:func:`PyArray_MapIterSwapAxes` and
+has to be handled by the caller unless :c:func:`PyArray_MapIterNew` is asked to
+allocate the result.
+
+After preparation, getting and setting are relatively straightforward,
+although the different modes of iteration need to be considered. Unless
+there is only a single indexing array during item getting, the validity of
+the indices is checked beforehand. Otherwise, it is handled in the inner
+loop itself for optimization.
+
+.. _ufuncs-internals:
+
+Universal functions
+===================
+
+.. seealso:: :ref:`ufuncs`, :ref:`ufuncs-basics`
+
+.. index::
+ single: ufunc
+
+Universal functions are callable objects that take :math:`N` inputs
+and produce :math:`M` outputs by wrapping basic 1-D loops that work
+element-by-element into full easy-to-use functions that seamlessly
+implement :ref:`broadcasting <basics.broadcasting>`,
+:ref:`type-checking <ufuncs.casting>`,
+:ref:`buffered coercion <use-of-internal-buffers>`, and
+:ref:`output-argument handling <ufuncs-output-type>`. New universal functions
+are normally created in C, although there is a mechanism for creating ufuncs
+from Python functions (:func:`frompyfunc`). The user must supply a 1-D loop that
+implements the basic function taking the input scalar values and
+placing the resulting scalars into the appropriate output slots as
+explained in implementation.
+
+
+Setup
+-----
+
+Every :class:`ufunc` calculation involves some overhead related to setting up
+the calculation. The practical significance of this overhead is that
+even though the actual calculation of the ufunc is very fast, you will
+be able to write array and type-specific code that will work faster
+for small arrays than the ufunc. In particular, using ufuncs to
+perform many calculations on 0-D arrays will be slower than other
+Python-based solutions (the silently-imported ``scalarmath`` module exists
+precisely to give array scalars the look-and-feel of ufunc based
+calculations with significantly reduced overhead).
+
+When a :class:`ufunc` is called, many things must be done. The information
+collected from these setup operations is stored in a loop object. This
+loop object is a C-structure (that could become a Python object but is
+not initialized as such because it is only used internally). This loop
+object has the layout needed to be used with :c:func:`PyArray_Broadcast`
+so that the broadcasting can be handled in the same way as it is handled in
+other sections of code.
+
+The first thing done is to look up in the thread-specific global
+dictionary the current values for the buffer-size, the error mask, and
+the associated error object. The state of the error mask controls what
+happens when an error condition is found. It should be noted that
+checking of the hardware error flags is only performed after each 1-D
+loop is executed. This means that if the input and output arrays are
+contiguous and of the correct type so that a single 1-D loop is
+performed, then the flags may not be checked until all elements of the
+array have been calculated. Looking up these values in a thread-specific
+dictionary takes time which is easily ignored for all but
+very small arrays.
+
+After checking, the thread-specific global variables, the inputs are
+evaluated to determine how the ufunc should proceed and the input and
+output arrays are constructed if necessary. Any inputs which are not
+arrays are converted to arrays (using context if necessary). Which of
+the inputs are scalars (and therefore converted to 0-D arrays) is
+noted.
+
+Next, an appropriate 1-D loop is selected from the 1-D loops available
+to the :class:`ufunc` based on the input array types. This 1-D loop is selected
+by trying to match the signature of the datatypes of the inputs
+against the available signatures. The signatures corresponding to
+built-in types are stored in the :attr:`ufunc.types` member of the ufunc
+structure. The signatures corresponding to user-defined types are stored in a
+linked list of function information with the head element stored as a
+``CObject`` in the ``userloops`` dictionary keyed by the datatype number
+(the first user-defined type in the argument list is used as the key).
+The signatures are searched until a signature is found to which the
+input arrays can all be cast safely (ignoring any scalar arguments
+which are not allowed to determine the type of the result). The
+implication of this search procedure is that "lesser types" should be
+placed below "larger types" when the signatures are stored. If no 1-D
+loop is found, then an error is reported. Otherwise, the ``argument_list``
+is updated with the stored signature --- in case casting is necessary
+and to fix the output types assumed by the 1-D loop.
+
+If the ufunc has 2 inputs and 1 output and the second input is an
+``Object`` array then a special-case check is performed so that
+``NotImplemented`` is returned if the second input is not an ndarray, has
+the :obj:`~numpy.class.__array_priority__` attribute, and has an ``__r{op}__``
+special method. In this way, Python is signaled to give the other object a
+chance to complete the operation instead of using generic object-array
+calculations. This allows (for example) sparse matrices to override
+the multiplication operator 1-D loop.
+
+For input arrays that are smaller than the specified buffer size,
+copies are made of all non-contiguous, misaligned, or out-of-byteorder
+arrays to ensure that for small arrays, a single loop is
+used. Then, array iterators are created for all the input arrays and
+the resulting collection of iterators is broadcast to a single shape.
+
+The output arguments (if any) are then processed and any missing
+return arrays are constructed. If any provided output array doesn't
+have the correct type (or is misaligned) and is smaller than the
+buffer size, then a new output array is constructed with the special
+:c:data:`NPY_ARRAY_WRITEBACKIFCOPY` flag set. At the end of the function,
+:c:func:`PyArray_ResolveWritebackIfCopy` is called so that
+its contents will be copied back into the output array.
+Iterators for the output arguments are then processed.
+
+Finally, the decision is made about how to execute the looping
+mechanism to ensure that all elements of the input arrays are combined
+to produce the output arrays of the correct type. The options for loop
+execution are one-loop (for :term`contiguous`, aligned, and correct data
+type), strided-loop (for non-contiguous but still aligned and correct
+data type), and a buffered loop (for misaligned or incorrect data
+type situations). Depending on which execution method is called for,
+the loop is then set up and computed.
+
+
+Function call
+-------------
+
+This section describes how the basic universal function computation loop is
+set up and executed for each of the three different kinds of execution. If
+:c:data:`NPY_ALLOW_THREADS` is defined during compilation, then as long as
+no object arrays are involved, the Python Global Interpreter Lock (GIL) is
+released prior to calling the loops. It is re-acquired if necessary to
+handle error conditions. The hardware error flags are checked only after
+the 1-D loop is completed.
+
+
+One loop
+^^^^^^^^
+
+This is the simplest case of all. The ufunc is executed by calling the
+underlying 1-D loop exactly once. This is possible only when we have
+aligned data of the correct type (including byteorder) for both input
+and output and all arrays have uniform strides (either :term:`contiguous`,
+0-D, or 1-D). In this case, the 1-D computational loop is called once
+to compute the calculation for the entire array. Note that the
+hardware error flags are only checked after the entire calculation is
+complete.
+
+
+Strided loop
+^^^^^^^^^^^^
+
+When the input and output arrays are aligned and of the correct type,
+but the striding is not uniform (non-contiguous and 2-D or larger),
+then a second looping structure is employed for the calculation. This
+approach converts all of the iterators for the input and output
+arguments to iterate over all but the largest dimension. The inner
+loop is then handled by the underlying 1-D computational loop. The
+outer loop is a standard iterator loop on the converted iterators. The
+hardware error flags are checked after each 1-D loop is completed.
+
+
+Buffered loop
+^^^^^^^^^^^^^
+
+This is the code that handles the situation whenever the input and/or
+output arrays are either misaligned or of the wrong datatype
+(including being byteswapped) from what the underlying 1-D loop
+expects. The arrays are also assumed to be non-contiguous. The code
+works very much like the strided-loop except for the inner 1-D loop is
+modified so that pre-processing is performed on the inputs and post-processing
+is performed on the outputs in ``bufsize`` chunks (where
+``bufsize`` is a user-settable parameter). The underlying 1-D
+computational loop is called on data that is copied over (if it needs
+to be). The setup code and the loop code is considerably more
+complicated in this case because it has to handle:
+
+- memory allocation of the temporary buffers
+
+- deciding whether or not to use buffers on the input and output data
+ (misaligned and/or wrong datatype)
+
+- copying and possibly casting data for any inputs or outputs for which
+ buffers are necessary.
+
+- special-casing ``Object`` arrays so that reference counts are properly
+ handled when copies and/or casts are necessary.
+
+- breaking up the inner 1-D loop into ``bufsize`` chunks (with a possible
+ remainder).
+
+Again, the hardware error flags are checked at the end of each 1-D
+loop.
+
+
+Final output manipulation
+-------------------------
+
+Ufuncs allow other array-like classes to be passed seamlessly through
+the interface in that inputs of a particular class will induce the
+outputs to be of that same class. The mechanism by which this works is
+the following. If any of the inputs are not ndarrays and define the
+:obj:`~numpy.class.__array_wrap__` method, then the class with the largest
+:obj:`~numpy.class.__array_priority__` attribute determines the type of all the
+outputs (with the exception of any output arrays passed in). The
+:obj:`~numpy.class.__array_wrap__` method of the input array will be called
+with the ndarray being returned from the ufunc as its input. There are two
+calling styles of the :obj:`~numpy.class.__array_wrap__` function supported.
+The first takes the ndarray as the first argument and a tuple of "context" as
+the second argument. The context is (ufunc, arguments, output argument
+number). This is the first call tried. If a ``TypeError`` occurs, then the
+function is called with just the ndarray as the first argument.
+
+
+Methods
+-------
+
+There are three methods of ufuncs that require calculation similar to
+the general-purpose ufuncs. These are :meth:`ufunc.reduce`,
+:meth:`ufunc.accumulate`, and :meth:`ufunc.reduceat`. Each of these
+methods requires a setup command followed by a
+loop. There are four loop styles possible for the methods
+corresponding to no-elements, one-element, strided-loop, and buffered-loop.
+These are the same basic loop styles as implemented for the
+general-purpose function call except for the no-element and one-element
+cases which are special-cases occurring when the input array
+objects have 0 and 1 elements respectively.
+
+
+Setup
+^^^^^
+
+The setup function for all three methods is ``construct_reduce``.
+This function creates a reducing loop object and fills it with the
+parameters needed to complete the loop. All of the methods only work
+on ufuncs that take 2-inputs and return 1 output. Therefore, the
+underlying 1-D loop is selected assuming a signature of ``[otype,
+otype, otype]`` where ``otype`` is the requested reduction
+datatype. The buffer size and error handling are then retrieved from
+(per-thread) global storage. For small arrays that are misaligned or
+have incorrect datatype, a copy is made so that the un-buffered
+section of code is used. Then, the looping strategy is selected. If
+there is 1 element or 0 elements in the array, then a simple looping
+method is selected. If the array is not misaligned and has the
+correct datatype, then strided looping is selected. Otherwise,
+buffered looping must be performed. Looping parameters are then
+established, and the return array is constructed. The output array is
+of a different :term:`shape` depending on whether the method is
+:meth:`reduce <ufunc.reduce>`, :meth:`accumulate <ufunc.accumulate>`, or
+:meth:`reduceat <ufunc.reduceat>`. If an output array is already provided, then
+its shape is checked. If the output array is not C-contiguous,
+aligned, and of the correct data type, then a temporary copy is made
+with the :c:data:`NPY_ARRAY_WRITEBACKIFCOPY` flag set. In this way, the methods
+will be able to work with a well-behaved output array but the result will be
+copied back into the true output array when
+:c:func:`PyArray_ResolveWritebackIfCopy` is called at function completion.
+Finally, iterators are set up to loop over the correct :term:`axis`
+(depending on the value of axis provided to the method) and the setup
+routine returns to the actual computation routine.
+
+
+:meth:`Reduce <ufunc.reduce>`
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. index::
+ triple: ufunc; methods; reduce
+
+All of the ufunc methods use the same underlying 1-D computational
+loops with input and output arguments adjusted so that the appropriate
+reduction takes place. For example, the key to the functioning of
+:meth:`reduce <ufunc.reduce>` is that the 1-D loop is called with the output
+and the second input pointing to the same position in memory and both having
+a step-size of 0. The first input is pointing to the input array with a
+step-size given by the appropriate stride for the selected axis. In this
+way, the operation performed is
+
+.. math::
+ :nowrap:
+
+ \begin{align*}
+ o & = & i[0] \\
+ o & = & i[k]\textrm{<op>}o\quad k=1\ldots N
+ \end{align*}
+
+where :math:`N+1` is the number of elements in the input, :math:`i`,
+:math:`o` is the output, and :math:`i[k]` is the
+:math:`k^{\textrm{th}}` element of :math:`i` along the selected axis.
+This basic operation is repeated for arrays with greater than 1
+dimension so that the reduction takes place for every 1-D sub-array
+along the selected axis. An iterator with the selected dimension
+removed handles this looping.
+
+For buffered loops, care must be taken to copy and cast data before
+the loop function is called because the underlying loop expects
+aligned data of the correct datatype (including byteorder). The
+buffered loop must handle this copying and casting prior to calling
+the loop function on chunks no greater than the user-specified
+``bufsize``.
+
+
+:meth:`Accumulate <ufunc.accumulate>`
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. index::
+ triple: ufunc; methods; accumulate
+
+The :meth:`accumulate <ufunc.accumulate>` method is very similar to
+the :meth:`reduce <ufunc.reduce>` method in that
+the output and the second input both point to the output. The
+difference is that the second input points to memory one stride behind
+the current output pointer. Thus, the operation performed is
+
+.. math::
+ :nowrap:
+
+ \begin{align*}
+ o[0] & = & i[0] \\
+ o[k] & = & i[k]\textrm{<op>}o[k-1]\quad k=1\ldots N.
+ \end{align*}
+
+The output has the same shape as the input and each 1-D loop operates
+over :math:`N` elements when the shape in the selected axis is :math:`N+1`.
+Again, buffered loops take care to copy and cast the data before
+calling the underlying 1-D computational loop.
+
+
+:meth:`Reduceat <ufunc.reduceat>`
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. index::
+ triple: ufunc; methods; reduceat
+ single: ufunc
+
+The :meth:`reduceat <ufunc.reduceat>` function is a generalization of both the
+:meth:`reduce <ufunc.reduce>` and :meth:`accumulate <ufunc.accumulate>`
+functions. It implements a :meth:`reduce <ufunc.reduce>` over ranges of
+the input array specified by indices. The extra indices argument is checked to
+be sure that every input is not too large for the input array along
+the selected dimension before the loop calculations take place. The
+loop implementation is handled using code that is very similar to the
+:meth:`reduce <ufunc.reduce>` code repeated as many times as there are elements
+in the indices input. In particular: the first input pointer passed to the
+underlying 1-D computational loop points to the input array at the
+correct location indicated by the index array. In addition, the output
+pointer and the second input pointer passed to the underlying 1-D loop
+point to the same position in memory. The size of the 1-D
+computational loop is fixed to be the difference between the current
+index and the next index (when the current index is the last index,
+then the next index is assumed to be the length of the array along the
+selected dimension). In this way, the 1-D loop will implement a
+:meth:`reduce <ufunc.reduce>` over the specified indices.
+
+Misaligned or a loop datatype that does not match the input and/or
+output datatype is handled using buffered code wherein data is
+copied to a temporary buffer and cast to the correct datatype if
+necessary prior to calling the underlying 1-D function. The temporary
+buffers are created in (element) sizes no bigger than the user
+settable buffer-size value. Thus, the loop must be flexible enough to
+call the underlying 1-D computational loop enough times to complete
+the total calculation in chunks no bigger than the buffer-size.
diff --git a/doc/source/dev/internals.rst b/doc/source/dev/internals.rst
new file mode 100644
index 000000000..14e5f3141
--- /dev/null
+++ b/doc/source/dev/internals.rst
@@ -0,0 +1,175 @@
+.. currentmodule:: numpy
+
+.. _numpy-internals:
+
+*************************************
+Internal organization of NumPy arrays
+*************************************
+
+It helps to understand a bit about how NumPy arrays are handled under the covers
+to help understand NumPy better. This section will not go into great detail.
+Those wishing to understand the full details are requested to refer to Travis
+Oliphant's book `Guide to NumPy <http://web.mit.edu/dvp/Public/numpybook.pdf>`_.
+
+NumPy arrays consist of two major components: the raw array data (from now on,
+referred to as the data buffer), and the information about the raw array data.
+The data buffer is typically what people think of as arrays in C or Fortran,
+a :term:`contiguous` (and fixed) block of memory containing fixed-sized data
+items. NumPy also contains a significant set of data that describes how to
+interpret the data in the data buffer. This extra information contains (among
+other things):
+
+ 1) The basic data element's size in bytes.
+ 2) The start of the data within the data buffer (an offset relative to the
+ beginning of the data buffer).
+ 3) The number of :term:`dimensions <dimension>` and the size of each dimension.
+ 4) The separation between elements for each dimension (the :term:`stride`).
+ This does not have to be a multiple of the element size.
+ 5) The byte order of the data (which may not be the native byte order).
+ 6) Whether the buffer is read-only.
+ 7) Information (via the :class:`dtype` object) about the interpretation of the
+ basic data element. The basic data element may be as simple as an int or a
+ float, or it may be a compound object (e.g.,
+ :term:`struct-like <structured data type>`), a fixed character field,
+ or Python object pointers.
+ 8) Whether the array is to be interpreted as :term:`C-order <C order>`
+ or :term:`Fortran-order <Fortran order>`.
+
+This arrangement allows for the very flexible use of arrays. One thing that it
+allows is simple changes to the metadata to change the interpretation of the
+array buffer. Changing the byteorder of the array is a simple change involving
+no rearrangement of the data. The :term:`shape` of the array can be changed very
+easily without changing anything in the data buffer or any data copying at all.
+
+Among other things that are made possible is one can create a new array metadata
+object that uses the same data buffer
+to create a new :term:`view` of that data buffer that has a different
+interpretation of the buffer (e.g., different shape, offset, byte order,
+strides, etc) but shares the same data bytes. Many operations in NumPy do just
+this such as :term:`slicing <python:slice>`. Other operations, such as
+transpose, don't move data elements around in the array, but rather change the
+information about the shape and strides so that the indexing of the array
+changes, but the data in the doesn't move.
+
+Typically these new versions of the array metadata but the same data buffer are
+new views into the data buffer. There is a different :class:`ndarray` object,
+but it uses the same data buffer. This is why it is necessary to force copies
+through the use of the :func:`copy` method if one really wants to make a new
+and independent copy of the data buffer.
+
+New views into arrays mean the object reference counts for the data buffer
+increase. Simply doing away with the original array object will not remove the
+data buffer if other views of it still exist.
+
+Multidimensional array indexing order issues
+============================================
+
+.. seealso:: :ref:`basics.indexing`
+
+What is the right way to index
+multi-dimensional arrays? Before you jump to conclusions about the one and
+true way to index multi-dimensional arrays, it pays to understand why this is
+a confusing issue. This section will try to explain in detail how NumPy
+indexing works and why we adopt the convention we do for images, and when it
+may be appropriate to adopt other conventions.
+
+The first thing to understand is
+that there are two conflicting conventions for indexing 2-dimensional arrays.
+Matrix notation uses the first index to indicate which row is being selected and
+the second index to indicate which column is selected. This is opposite the
+geometrically oriented-convention for images where people generally think the
+first index represents x position (i.e., column) and the second represents y
+position (i.e., row). This alone is the source of much confusion;
+matrix-oriented users and image-oriented users expect two different things with
+regard to indexing.
+
+The second issue to understand is how indices correspond
+to the order in which the array is stored in memory. In Fortran, the first index
+is the most rapidly varying index when moving through the elements of a
+two-dimensional array as it is stored in memory. If you adopt the matrix
+convention for indexing, then this means the matrix is stored one column at a
+time (since the first index moves to the next row as it changes). Thus Fortran
+is considered a Column-major language. C has just the opposite convention. In
+C, the last index changes most rapidly as one moves through the array as
+stored in memory. Thus C is a Row-major language. The matrix is stored by
+rows. Note that in both cases it presumes that the matrix convention for
+indexing is being used, i.e., for both Fortran and C, the first index is the
+row. Note this convention implies that the indexing convention is invariant
+and that the data order changes to keep that so.
+
+But that's not the only way
+to look at it. Suppose one has large two-dimensional arrays (images or
+matrices) stored in data files. Suppose the data are stored by rows rather than
+by columns. If we are to preserve our index convention (whether matrix or
+image) that means that depending on the language we use, we may be forced to
+reorder the data if it is read into memory to preserve our indexing
+convention. For example, if we read row-ordered data into memory without
+reordering, it will match the matrix indexing convention for C, but not for
+Fortran. Conversely, it will match the image indexing convention for Fortran,
+but not for C. For C, if one is using data stored in row order, and one wants
+to preserve the image index convention, the data must be reordered when
+reading into memory.
+
+In the end, what you do for Fortran or C depends on
+which is more important, not reordering data or preserving the indexing
+convention. For large images, reordering data is potentially expensive, and
+often the indexing convention is inverted to avoid that.
+
+The situation with
+NumPy makes this issue yet more complicated. The internal machinery of NumPy
+arrays is flexible enough to accept any ordering of indices. One can simply
+reorder indices by manipulating the internal :term:`stride` information for
+arrays without reordering the data at all. NumPy will know how to map the new
+index order to the data without moving the data.
+
+So if this is true, why not choose
+the index order that matches what you most expect? In particular, why not define
+row-ordered images to use the image convention? (This is sometimes referred
+to as the Fortran convention vs the C convention, thus the 'C' and 'FORTRAN'
+order options for array ordering in NumPy.) The drawback of doing this is
+potential performance penalties. It's common to access the data sequentially,
+either implicitly in array operations or explicitly by looping over rows of an
+image. When that is done, then the data will be accessed in non-optimal order.
+As the first index is incremented, what is actually happening is that elements
+spaced far apart in memory are being sequentially accessed, with usually poor
+memory access speeds. For example, for a two-dimensional image ``im`` defined so
+that ``im[0, 10]`` represents the value at ``x = 0``, ``y = 10``. To be
+consistent with usual Python behavior then ``im[0]`` would represent a column
+at ``x = 0``. Yet that data would be spread over the whole array since the data
+are stored in row order. Despite the flexibility of NumPy's indexing, it can't
+really paper over the fact basic operations are rendered inefficient because of
+data order or that getting contiguous subarrays is still awkward (e.g.,
+``im[:, 0]`` for the first row, vs ``im[0]``). Thus one can't use an idiom such
+as for row in ``im``; for col in ``im`` does work, but doesn't yield contiguous
+column data.
+
+As it turns out, NumPy is
+smart enough when dealing with :ref:`ufuncs <ufuncs-internals>` to determine
+which index is the most rapidly varying one in memory and uses that for the
+innermost loop. Thus for ufuncs, there is no large intrinsic advantage to
+either approach in most cases. On the other hand, use of :attr:`ndarray.flat`
+with a FORTRAN ordered array will lead to non-optimal memory access as adjacent
+elements in the flattened array (iterator, actually) are not contiguous in
+memory.
+
+Indeed, the fact is that Python
+indexing on lists and other sequences naturally leads to an outside-to-inside
+ordering (the first index gets the largest grouping, the next largest,
+and the last gets the smallest element). Since image data are normally stored
+in rows, this corresponds to the position within rows being the last item
+indexed.
+
+If you do want to use Fortran ordering realize that
+there are two approaches to consider: 1) accept that the first index is just not
+the most rapidly changing in memory and have all your I/O routines reorder
+your data when going from memory to disk or visa versa, or use NumPy's
+mechanism for mapping the first index to the most rapidly varying data. We
+recommend the former if possible. The disadvantage of the latter is that many
+of NumPy's functions will yield arrays without Fortran ordering unless you are
+careful to use the ``order`` keyword. Doing this would be highly inconvenient.
+
+Otherwise, we recommend simply learning to reverse the usual order of indices
+when accessing elements of an array. Granted, it goes against the grain, but
+it is more in line with Python semantics and the natural order of the data.
+
+
diff --git a/doc/source/dev/underthehood.rst b/doc/source/dev/underthehood.rst
index 4dae48689..c0f37fd5b 100644
--- a/doc/source/dev/underthehood.rst
+++ b/doc/source/dev/underthehood.rst
@@ -4,4 +4,12 @@
Under-the-hood Documentation for developers
===========================================
-To be completed.
+These documents are intended as a low-level look into NumPy; focused
+towards developers.
+
+.. toctree::
+ :maxdepth: 1
+
+ internals
+ internals.code-explanations
+ alignment
diff --git a/doc/source/doxyfile b/doc/source/doxyfile
new file mode 100644
index 000000000..ea45b9578
--- /dev/null
+++ b/doc/source/doxyfile
@@ -0,0 +1,340 @@
+# Doxyfile 1.8.18
+#---------------------------------------------------------------------------
+# Project related configuration options
+#---------------------------------------------------------------------------
+DOXYFILE_ENCODING = UTF-8
+PROJECT_NAME = NumPy
+PROJECT_NUMBER =
+PROJECT_BRIEF = "NumPy is the fundamental package for scientific computing in Python"
+PROJECT_LOGO =
+OUTPUT_DIRECTORY = @ROOT_DIR/doc/build/doxygen
+CREATE_SUBDIRS = NO
+ALLOW_UNICODE_NAMES = NO
+OUTPUT_LANGUAGE = English
+OUTPUT_TEXT_DIRECTION = None
+BRIEF_MEMBER_DESC = YES
+REPEAT_BRIEF = YES
+ABBREVIATE_BRIEF = "The $name class" \
+ "The $name widget" \
+ "The $name file" \
+ is \
+ provides \
+ specifies \
+ contains \
+ represents \
+ a \
+ an \
+ the
+ALWAYS_DETAILED_SEC = NO
+INLINE_INHERITED_MEMB = NO
+FULL_PATH_NAMES = YES
+STRIP_FROM_PATH = @ROOT_DIR
+STRIP_FROM_INC_PATH =
+SHORT_NAMES = NO
+JAVADOC_AUTOBRIEF = YES
+JAVADOC_BANNER = NO
+QT_AUTOBRIEF = NO
+MULTILINE_CPP_IS_BRIEF = NO
+INHERIT_DOCS = YES
+SEPARATE_MEMBER_PAGES = NO
+TAB_SIZE = 4
+ALIASES =
+ALIASES += "rst=\verbatim embed:rst:leading-asterisk"
+ALIASES += "endrst=\endverbatim"
+OPTIMIZE_OUTPUT_FOR_C = NO
+OPTIMIZE_OUTPUT_JAVA = NO
+OPTIMIZE_FOR_FORTRAN = NO
+OPTIMIZE_OUTPUT_VHDL = NO
+OPTIMIZE_OUTPUT_SLICE = NO
+EXTENSION_MAPPING =
+MARKDOWN_SUPPORT = YES
+TOC_INCLUDE_HEADINGS = 5
+AUTOLINK_SUPPORT = YES
+BUILTIN_STL_SUPPORT = NO
+CPP_CLI_SUPPORT = NO
+SIP_SUPPORT = NO
+IDL_PROPERTY_SUPPORT = YES
+DISTRIBUTE_GROUP_DOC = NO
+GROUP_NESTED_COMPOUNDS = NO
+SUBGROUPING = YES
+INLINE_GROUPED_CLASSES = NO
+INLINE_SIMPLE_STRUCTS = NO
+TYPEDEF_HIDES_STRUCT = NO
+LOOKUP_CACHE_SIZE = 0
+#---------------------------------------------------------------------------
+# Build related configuration options
+#---------------------------------------------------------------------------
+EXTRACT_ALL = NO
+EXTRACT_PRIVATE = NO
+EXTRACT_PRIV_VIRTUAL = NO
+EXTRACT_PACKAGE = NO
+EXTRACT_STATIC = NO
+EXTRACT_LOCAL_CLASSES = YES
+EXTRACT_LOCAL_METHODS = NO
+EXTRACT_ANON_NSPACES = NO
+HIDE_UNDOC_MEMBERS = NO
+HIDE_UNDOC_CLASSES = NO
+HIDE_FRIEND_COMPOUNDS = NO
+HIDE_IN_BODY_DOCS = NO
+INTERNAL_DOCS = NO
+CASE_SENSE_NAMES = YES
+HIDE_SCOPE_NAMES = NO
+HIDE_COMPOUND_REFERENCE= NO
+SHOW_INCLUDE_FILES = YES
+SHOW_GROUPED_MEMB_INC = NO
+FORCE_LOCAL_INCLUDES = NO
+INLINE_INFO = YES
+SORT_MEMBER_DOCS = YES
+SORT_BRIEF_DOCS = NO
+SORT_MEMBERS_CTORS_1ST = NO
+SORT_GROUP_NAMES = NO
+SORT_BY_SCOPE_NAME = NO
+STRICT_PROTO_MATCHING = NO
+GENERATE_TODOLIST = YES
+GENERATE_TESTLIST = YES
+GENERATE_BUGLIST = YES
+GENERATE_DEPRECATEDLIST= YES
+ENABLED_SECTIONS =
+MAX_INITIALIZER_LINES = 30
+SHOW_USED_FILES = YES
+SHOW_FILES = YES
+SHOW_NAMESPACES = YES
+FILE_VERSION_FILTER =
+LAYOUT_FILE =
+CITE_BIB_FILES =
+#---------------------------------------------------------------------------
+# Configuration options related to warning and progress messages
+#---------------------------------------------------------------------------
+QUIET = no
+WARNINGS = YES
+WARN_IF_UNDOCUMENTED = YES
+WARN_IF_DOC_ERROR = YES
+WARN_NO_PARAMDOC = NO
+WARN_AS_ERROR = NO
+WARN_FORMAT = "$file:$line: $text"
+WARN_LOGFILE =
+#---------------------------------------------------------------------------
+# Configuration options related to the input files
+#---------------------------------------------------------------------------
+INPUT =
+INPUT_ENCODING = UTF-8
+FILE_PATTERNS = *.h, *.hpp
+RECURSIVE = YES
+EXCLUDE =
+EXCLUDE_SYMLINKS = NO
+EXCLUDE_PATTERNS =
+EXCLUDE_SYMBOLS =
+EXAMPLE_PATH =
+EXAMPLE_PATTERNS =
+EXAMPLE_RECURSIVE = NO
+IMAGE_PATH =
+INPUT_FILTER =
+FILTER_PATTERNS =
+FILTER_SOURCE_FILES = NO
+FILTER_SOURCE_PATTERNS =
+USE_MDFILE_AS_MAINPAGE =
+#---------------------------------------------------------------------------
+# Configuration options related to source browsing
+#---------------------------------------------------------------------------
+SOURCE_BROWSER = NO
+INLINE_SOURCES = NO
+STRIP_CODE_COMMENTS = YES
+REFERENCED_BY_RELATION = NO
+REFERENCES_RELATION = NO
+REFERENCES_LINK_SOURCE = YES
+SOURCE_TOOLTIPS = YES
+USE_HTAGS = NO
+VERBATIM_HEADERS = YES
+CLANG_ASSISTED_PARSING = NO
+CLANG_OPTIONS =
+CLANG_DATABASE_PATH =
+#---------------------------------------------------------------------------
+# Configuration options related to the alphabetical class index
+#---------------------------------------------------------------------------
+ALPHABETICAL_INDEX = YES
+COLS_IN_ALPHA_INDEX = 5
+IGNORE_PREFIX =
+#---------------------------------------------------------------------------
+# Configuration options related to the HTML output
+#---------------------------------------------------------------------------
+GENERATE_HTML = NO
+HTML_OUTPUT = html
+HTML_FILE_EXTENSION = .html
+HTML_HEADER =
+HTML_FOOTER =
+HTML_STYLESHEET =
+HTML_EXTRA_STYLESHEET =
+HTML_EXTRA_FILES =
+HTML_COLORSTYLE_HUE = 220
+HTML_COLORSTYLE_SAT = 100
+HTML_COLORSTYLE_GAMMA = 80
+HTML_TIMESTAMP = NO
+HTML_DYNAMIC_MENUS = YES
+HTML_DYNAMIC_SECTIONS = NO
+HTML_INDEX_NUM_ENTRIES = 100
+GENERATE_DOCSET = NO
+DOCSET_FEEDNAME = "Doxygen generated docs"
+DOCSET_BUNDLE_ID = org.doxygen.Project
+DOCSET_PUBLISHER_ID = org.doxygen.Publisher
+DOCSET_PUBLISHER_NAME = Publisher
+GENERATE_HTMLHELP = NO
+CHM_FILE =
+HHC_LOCATION =
+GENERATE_CHI = NO
+CHM_INDEX_ENCODING =
+BINARY_TOC = NO
+TOC_EXPAND = NO
+GENERATE_QHP = NO
+QCH_FILE =
+QHP_NAMESPACE = org.doxygen.Project
+QHP_VIRTUAL_FOLDER = doc
+QHP_CUST_FILTER_NAME =
+QHP_CUST_FILTER_ATTRS =
+QHP_SECT_FILTER_ATTRS =
+QHG_LOCATION =
+GENERATE_ECLIPSEHELP = NO
+ECLIPSE_DOC_ID = org.doxygen.Project
+DISABLE_INDEX = NO
+GENERATE_TREEVIEW = NO
+ENUM_VALUES_PER_LINE = 4
+TREEVIEW_WIDTH = 250
+EXT_LINKS_IN_WINDOW = NO
+HTML_FORMULA_FORMAT = png
+FORMULA_FONTSIZE = 10
+FORMULA_TRANSPARENT = YES
+FORMULA_MACROFILE =
+USE_MATHJAX = NO
+MATHJAX_FORMAT = HTML-CSS
+MATHJAX_RELPATH = https://cdn.jsdelivr.net/npm/mathjax@@2
+MATHJAX_EXTENSIONS =
+MATHJAX_CODEFILE =
+SEARCHENGINE = YES
+SERVER_BASED_SEARCH = NO
+EXTERNAL_SEARCH = NO
+SEARCHENGINE_URL =
+SEARCHDATA_FILE = searchdata.xml
+EXTERNAL_SEARCH_ID =
+EXTRA_SEARCH_MAPPINGS =
+#---------------------------------------------------------------------------
+# Configuration options related to the LaTeX output
+#---------------------------------------------------------------------------
+GENERATE_LATEX = NO
+LATEX_OUTPUT = latex
+LATEX_CMD_NAME =
+MAKEINDEX_CMD_NAME = makeindex
+LATEX_MAKEINDEX_CMD = makeindex
+COMPACT_LATEX = NO
+PAPER_TYPE = a4
+EXTRA_PACKAGES =
+LATEX_HEADER =
+LATEX_FOOTER =
+LATEX_EXTRA_STYLESHEET =
+LATEX_EXTRA_FILES =
+PDF_HYPERLINKS = YES
+USE_PDFLATEX = YES
+LATEX_BATCHMODE = NO
+LATEX_HIDE_INDICES = NO
+LATEX_SOURCE_CODE = NO
+LATEX_BIB_STYLE = plain
+LATEX_TIMESTAMP = NO
+LATEX_EMOJI_DIRECTORY =
+#---------------------------------------------------------------------------
+# Configuration options related to the RTF output
+#---------------------------------------------------------------------------
+GENERATE_RTF = NO
+RTF_OUTPUT = rtf
+COMPACT_RTF = NO
+RTF_HYPERLINKS = NO
+RTF_STYLESHEET_FILE =
+RTF_EXTENSIONS_FILE =
+RTF_SOURCE_CODE = NO
+#---------------------------------------------------------------------------
+# Configuration options related to the man page output
+#---------------------------------------------------------------------------
+GENERATE_MAN = NO
+MAN_OUTPUT = man
+MAN_EXTENSION = .3
+MAN_SUBDIR =
+MAN_LINKS = NO
+#---------------------------------------------------------------------------
+# Configuration options related to the XML output
+#---------------------------------------------------------------------------
+GENERATE_XML = YES
+XML_OUTPUT = xml
+XML_PROGRAMLISTING = YES
+XML_NS_MEMB_FILE_SCOPE = NO
+#---------------------------------------------------------------------------
+# Configuration options related to the DOCBOOK output
+#---------------------------------------------------------------------------
+GENERATE_DOCBOOK = NO
+DOCBOOK_OUTPUT = docbook
+DOCBOOK_PROGRAMLISTING = NO
+#---------------------------------------------------------------------------
+# Configuration options for the AutoGen Definitions output
+#---------------------------------------------------------------------------
+GENERATE_AUTOGEN_DEF = NO
+#---------------------------------------------------------------------------
+# Configuration options related to the Perl module output
+#---------------------------------------------------------------------------
+GENERATE_PERLMOD = NO
+PERLMOD_LATEX = NO
+PERLMOD_PRETTY = YES
+PERLMOD_MAKEVAR_PREFIX =
+#---------------------------------------------------------------------------
+# Configuration options related to the preprocessor
+#---------------------------------------------------------------------------
+ENABLE_PREPROCESSING = YES
+MACRO_EXPANSION = YES
+EXPAND_ONLY_PREDEF = NO
+SEARCH_INCLUDES = YES
+INCLUDE_PATH =
+INCLUDE_FILE_PATTERNS =
+PREDEFINED =
+EXPAND_AS_DEFINED =
+SKIP_FUNCTION_MACROS = YES
+#---------------------------------------------------------------------------
+# Configuration options related to external references
+#---------------------------------------------------------------------------
+TAGFILES =
+GENERATE_TAGFILE =
+ALLEXTERNALS = NO
+EXTERNAL_GROUPS = YES
+EXTERNAL_PAGES = YES
+#---------------------------------------------------------------------------
+# Configuration options related to the dot tool
+#---------------------------------------------------------------------------
+CLASS_DIAGRAMS = YES
+DIA_PATH =
+HIDE_UNDOC_RELATIONS = YES
+HAVE_DOT = NO
+DOT_NUM_THREADS = 0
+DOT_FONTNAME = Helvetica
+DOT_FONTSIZE = 10
+DOT_FONTPATH =
+CLASS_GRAPH = YES
+COLLABORATION_GRAPH = YES
+GROUP_GRAPHS = YES
+UML_LOOK = NO
+UML_LIMIT_NUM_FIELDS = 10
+TEMPLATE_RELATIONS = NO
+INCLUDE_GRAPH = YES
+INCLUDED_BY_GRAPH = YES
+CALL_GRAPH = NO
+CALLER_GRAPH = NO
+GRAPHICAL_HIERARCHY = YES
+DIRECTORY_GRAPH = YES
+DOT_IMAGE_FORMAT = png
+INTERACTIVE_SVG = NO
+DOT_PATH =
+DOTFILE_DIRS =
+MSCFILE_DIRS =
+DIAFILE_DIRS =
+PLANTUML_JAR_PATH =
+PLANTUML_CFG_FILE =
+PLANTUML_INCLUDE_PATH =
+DOT_GRAPH_MAX_NODES = 50
+MAX_DOT_GRAPH_DEPTH = 0
+DOT_TRANSPARENT = NO
+DOT_MULTI_TARGETS = NO
+GENERATE_LEGEND = YES
+DOT_CLEANUP = YES
diff --git a/doc/source/f2py/advanced.rst b/doc/source/f2py/advanced.rst
index 1b4625dde..c8efbaadb 100644
--- a/doc/source/f2py/advanced.rst
+++ b/doc/source/f2py/advanced.rst
@@ -1,48 +1,46 @@
-======================
-Advanced F2PY usages
-======================
+========================
+Advanced F2PY use cases
+========================
-Adding self-written functions to F2PY generated modules
-=======================================================
+Adding user-defined functions to F2PY generated modules
+=========================================================
-Self-written Python C/API functions can be defined inside
+User-defined Python C/API functions can be defined inside
signature files using ``usercode`` and ``pymethoddef`` statements
(they must be used inside the ``python module`` block). For
example, the following signature file ``spam.pyf``
-.. include:: spam.pyf
+.. include:: ./code/spam.pyf
:literal:
wraps the C library function ``system()``::
f2py -c spam.pyf
-In Python:
+In Python this can then be used as:
-.. include:: spam_session.dat
- :literal:
-
-Modifying the dictionary of a F2PY generated module
-===================================================
+.. literalinclude:: ./code/results/spam_session.dat
+ :language: python
-The following example illustrates how to add user-defined
-variables to a F2PY generated extension module. Given the following
-signature file
+Adding user-defined variables
+==============================
-.. include:: var.pyf
- :literal:
+The following example illustrates how to add user-defined variables to a F2PY
+generated extension module by modifying the dictionary of a F2PY generated
+module. Consider the following signature file (compiled with ``f2py -c var.pyf``):
-compile it as ``f2py -c var.pyf``.
+.. literalinclude:: ./code/var.pyf
+ :language: fortran
Notice that the second ``usercode`` statement must be defined inside
-an ``interface`` block and where the module dictionary is available through
-the variable ``d`` (see ``f2py var.pyf``-generated ``varmodule.c`` for
+an ``interface`` block and the module dictionary is available through
+the variable ``d`` (see ``varmodule.c`` generated by ``f2py var.pyf`` for
additional details).
-In Python:
+Usage in Python:
-.. include:: var_session.dat
- :literal:
+.. literalinclude:: ./code/results/var_session.dat
+ :language: python
Dealing with KIND specifiers
@@ -70,7 +68,7 @@ Use the ``--f2cmap`` command-line option to pass the file name to F2PY.
By default, F2PY assumes file name is ``.f2py_f2cmap`` in the current
working directory.
-Or more generally, the f2cmap file must contain a dictionary
+More generally, the f2cmap file must contain a dictionary
with items::
<Fortran typespec> : {<selector_expr>:<C type>}
@@ -79,7 +77,7 @@ that defines mapping between Fortran type::
<Fortran typespec>([kind=]<selector_expr>)
-and the corresponding <C type>. <C type> can be one of the following::
+and the corresponding <C type>. The <C type> can be one of the following::
char
signed_char
@@ -94,4 +92,4 @@ and the corresponding <C type>. <C type> can be one of the following::
complex_long_double
string
-For more information, see F2Py source code ``numpy/f2py/capi_maps.py``.
+For more information, see the F2Py source code ``numpy/f2py/capi_maps.py``.
diff --git a/doc/source/f2py/allocarr.f90 b/doc/source/f2py/code/allocarr.f90
index e0d6c2ec8..e0d6c2ec8 100644
--- a/doc/source/f2py/allocarr.f90
+++ b/doc/source/f2py/code/allocarr.f90
diff --git a/doc/source/f2py/array.f b/doc/source/f2py/code/array.f
index ef20c9c20..ef20c9c20 100644
--- a/doc/source/f2py/array.f
+++ b/doc/source/f2py/code/array.f
diff --git a/doc/source/f2py/calculate.f b/doc/source/f2py/code/calculate.f
index 4ff570d28..4ff570d28 100644
--- a/doc/source/f2py/calculate.f
+++ b/doc/source/f2py/code/calculate.f
diff --git a/doc/source/f2py/callback.f b/doc/source/f2py/code/callback.f
index d5cfc7574..d5cfc7574 100644
--- a/doc/source/f2py/callback.f
+++ b/doc/source/f2py/code/callback.f
diff --git a/doc/source/f2py/callback2.pyf b/doc/source/f2py/code/callback2.pyf
index 3d77eed24..3d77eed24 100644
--- a/doc/source/f2py/callback2.pyf
+++ b/doc/source/f2py/code/callback2.pyf
diff --git a/doc/source/f2py/common.f b/doc/source/f2py/code/common.f
index b098ab20c..b098ab20c 100644
--- a/doc/source/f2py/common.f
+++ b/doc/source/f2py/code/common.f
diff --git a/doc/source/f2py/extcallback.f b/doc/source/f2py/code/extcallback.f
index 9a800628e..9a800628e 100644
--- a/doc/source/f2py/extcallback.f
+++ b/doc/source/f2py/code/extcallback.f
diff --git a/doc/source/f2py/fib1.f b/doc/source/f2py/code/fib1.f
index cfbb1eea0..cfbb1eea0 100644
--- a/doc/source/f2py/fib1.f
+++ b/doc/source/f2py/code/fib1.f
diff --git a/doc/source/f2py/fib1.pyf b/doc/source/f2py/code/fib1.pyf
index 3d6cc0a54..3d6cc0a54 100644
--- a/doc/source/f2py/fib1.pyf
+++ b/doc/source/f2py/code/fib1.pyf
diff --git a/doc/source/f2py/fib2.pyf b/doc/source/f2py/code/fib2.pyf
index 4a5ae29f1..4a5ae29f1 100644
--- a/doc/source/f2py/fib2.pyf
+++ b/doc/source/f2py/code/fib2.pyf
diff --git a/doc/source/f2py/fib3.f b/doc/source/f2py/code/fib3.f
index 08b050cd2..08b050cd2 100644
--- a/doc/source/f2py/fib3.f
+++ b/doc/source/f2py/code/fib3.f
diff --git a/doc/source/f2py/ftype.f b/doc/source/f2py/code/ftype.f
index cabbb9e2d..cabbb9e2d 100644
--- a/doc/source/f2py/ftype.f
+++ b/doc/source/f2py/code/ftype.f
diff --git a/doc/source/f2py/moddata.f90 b/doc/source/f2py/code/moddata.f90
index 0e98f0467..0e98f0467 100644
--- a/doc/source/f2py/moddata.f90
+++ b/doc/source/f2py/code/moddata.f90
diff --git a/doc/source/f2py/allocarr_session.dat b/doc/source/f2py/code/results/allocarr_session.dat
index ba168c22a..ba168c22a 100644
--- a/doc/source/f2py/allocarr_session.dat
+++ b/doc/source/f2py/code/results/allocarr_session.dat
diff --git a/doc/source/f2py/array_session.dat b/doc/source/f2py/code/results/array_session.dat
index 714c03651..714c03651 100644
--- a/doc/source/f2py/array_session.dat
+++ b/doc/source/f2py/code/results/array_session.dat
diff --git a/doc/source/f2py/calculate_session.dat b/doc/source/f2py/code/results/calculate_session.dat
index c4c380700..c4c380700 100644
--- a/doc/source/f2py/calculate_session.dat
+++ b/doc/source/f2py/code/results/calculate_session.dat
diff --git a/doc/source/f2py/callback_session.dat b/doc/source/f2py/code/results/callback_session.dat
index 460c9ce28..460c9ce28 100644
--- a/doc/source/f2py/callback_session.dat
+++ b/doc/source/f2py/code/results/callback_session.dat
diff --git a/doc/source/f2py/common_session.dat b/doc/source/f2py/code/results/common_session.dat
index 2595bfbd5..2595bfbd5 100644
--- a/doc/source/f2py/common_session.dat
+++ b/doc/source/f2py/code/results/common_session.dat
diff --git a/doc/source/f2py/compile_session.dat b/doc/source/f2py/code/results/compile_session.dat
index 5c42742be..5c42742be 100644
--- a/doc/source/f2py/compile_session.dat
+++ b/doc/source/f2py/code/results/compile_session.dat
diff --git a/doc/source/f2py/extcallback_session.dat b/doc/source/f2py/code/results/extcallback_session.dat
index 5b97ab7cf..5b97ab7cf 100644
--- a/doc/source/f2py/extcallback_session.dat
+++ b/doc/source/f2py/code/results/extcallback_session.dat
diff --git a/doc/source/f2py/ftype_session.dat b/doc/source/f2py/code/results/ftype_session.dat
index e39cc128d..e39cc128d 100644
--- a/doc/source/f2py/ftype_session.dat
+++ b/doc/source/f2py/code/results/ftype_session.dat
diff --git a/doc/source/f2py/moddata_session.dat b/doc/source/f2py/code/results/moddata_session.dat
index 824bd86fc..824bd86fc 100644
--- a/doc/source/f2py/moddata_session.dat
+++ b/doc/source/f2py/code/results/moddata_session.dat
diff --git a/doc/source/f2py/run_main_session.dat b/doc/source/f2py/code/results/run_main_session.dat
index be6cacd22..be6cacd22 100644
--- a/doc/source/f2py/run_main_session.dat
+++ b/doc/source/f2py/code/results/run_main_session.dat
diff --git a/doc/source/f2py/scalar_session.dat b/doc/source/f2py/code/results/scalar_session.dat
index 3bb45ed68..3bb45ed68 100644
--- a/doc/source/f2py/scalar_session.dat
+++ b/doc/source/f2py/code/results/scalar_session.dat
diff --git a/doc/source/f2py/spam_session.dat b/doc/source/f2py/code/results/spam_session.dat
index bd5832d88..bd5832d88 100644
--- a/doc/source/f2py/spam_session.dat
+++ b/doc/source/f2py/code/results/spam_session.dat
diff --git a/doc/source/f2py/string_session.dat b/doc/source/f2py/code/results/string_session.dat
index e8f7854d9..e8f7854d9 100644
--- a/doc/source/f2py/string_session.dat
+++ b/doc/source/f2py/code/results/string_session.dat
diff --git a/doc/source/f2py/var_session.dat b/doc/source/f2py/code/results/var_session.dat
index fb0f798bf..fb0f798bf 100644
--- a/doc/source/f2py/var_session.dat
+++ b/doc/source/f2py/code/results/var_session.dat
diff --git a/doc/source/f2py/scalar.f b/doc/source/f2py/code/scalar.f
index c22f639ed..c22f639ed 100644
--- a/doc/source/f2py/scalar.f
+++ b/doc/source/f2py/code/scalar.f
diff --git a/doc/source/f2py/setup_example.py b/doc/source/f2py/code/setup_example.py
index 479acc004..479acc004 100644
--- a/doc/source/f2py/setup_example.py
+++ b/doc/source/f2py/code/setup_example.py
diff --git a/doc/source/f2py/spam.pyf b/doc/source/f2py/code/spam.pyf
index 21ea18b77..21ea18b77 100644
--- a/doc/source/f2py/spam.pyf
+++ b/doc/source/f2py/code/spam.pyf
diff --git a/doc/source/f2py/string.f b/doc/source/f2py/code/string.f
index 9246f02e7..9246f02e7 100644
--- a/doc/source/f2py/string.f
+++ b/doc/source/f2py/code/string.f
diff --git a/doc/source/f2py/var.pyf b/doc/source/f2py/code/var.pyf
index 8275ff3af..8275ff3af 100644
--- a/doc/source/f2py/var.pyf
+++ b/doc/source/f2py/code/var.pyf
diff --git a/doc/source/f2py/distutils.rst b/doc/source/f2py/distutils.rst
index 4cf30045e..575dacdff 100644
--- a/doc/source/f2py/distutils.rst
+++ b/doc/source/f2py/distutils.rst
@@ -4,16 +4,17 @@ Using via `numpy.distutils`
.. currentmodule:: numpy.distutils.core
-:mod:`numpy.distutils` is part of NumPy extending standard Python ``distutils``
-to deal with Fortran sources and F2PY signature files, e.g. compile Fortran
-sources, call F2PY to construct extension modules, etc.
+:mod:`numpy.distutils` is part of NumPy, and extends the standard Python
+``distutils`` module to deal with Fortran sources and F2PY signature files, e.g.
+compile Fortran sources, call F2PY to construct extension modules, etc.
.. topic:: Example
- Consider the following `setup file`__:
+ Consider the following `setup file`__ for the ``fib`` examples in the previous
+ section:
- .. include:: setup_example.py
- :literal:
+ .. literalinclude:: ./code/setup_example.py
+ :language: python
Running
@@ -26,30 +27,32 @@ sources, call F2PY to construct extension modules, etc.
__ setup_example.py
+Extensions to ``distutils``
+===========================
+
:mod:`numpy.distutils` extends ``distutils`` with the following features:
* :class:`Extension` class argument ``sources`` may contain Fortran source
files. In addition, the list ``sources`` may contain at most one
- F2PY signature file, and then the name of an Extension module must
- match with the ``<modulename>`` used in signature file. It is
+ F2PY signature file, and in this case, the name of an Extension module must
+ match with the ``<modulename>`` used in signature file. It is
assumed that an F2PY signature file contains exactly one ``python
module`` block.
- If ``sources`` does not contain a signature files, then F2PY is used
- to scan Fortran source files for routine signatures to construct the
- wrappers to Fortran codes.
+ If ``sources`` do not contain a signature file, then F2PY is used to scan
+ Fortran source files to construct wrappers to the Fortran codes.
- Additional options to F2PY process can be given using :class:`Extension`
- class argument ``f2py_options``.
+ Additional options to the F2PY executable can be given using the
+ :class:`Extension` class argument ``f2py_options``.
* The following new ``distutils`` commands are defined:
``build_src``
to construct Fortran wrapper extension modules, among many other things.
``config_fc``
- to change Fortran compiler options
+ to change Fortran compiler options.
- as well as ``build_ext`` and ``build_clib`` commands are enhanced
+ Additionally, the ``build_ext`` and ``build_clib`` commands are also enhanced
to support Fortran sources.
Run
@@ -60,15 +63,15 @@ sources, call F2PY to construct extension modules, etc.
to see available options for these commands.
-* When building Python packages containing Fortran sources, then one
- can choose different Fortran compilers by using ``build_ext``
+* When building Python packages containing Fortran sources, one
+ can choose different Fortran compilers by using the ``build_ext``
command option ``--fcompiler=<Vendor>``. Here ``<Vendor>`` can be one of the
- following names::
+ following names (on ``linux`` systems)::
- absoft sun mips intel intelv intele intelev nag compaq compaqv gnu vast pg hpux
+ absoft compaq fujitsu g95 gnu gnu95 intel intele intelem lahey nag nagfor nv pathf95 pg vast
- See ``numpy_distutils/fcompiler.py`` for up-to-date list of
- supported compilers or run
+ See ``numpy_distutils/fcompiler.py`` for an up-to-date list of
+ supported compilers for different platforms, or run
::
diff --git a/doc/source/f2py/f2py.getting-started.rst b/doc/source/f2py/f2py.getting-started.rst
index 27ddbb005..1709aad61 100644
--- a/doc/source/f2py/f2py.getting-started.rst
+++ b/doc/source/f2py/f2py.getting-started.rst
@@ -6,52 +6,55 @@ Wrapping Fortran or C functions to Python using F2PY consists of the
following steps:
* Creating the so-called signature file that contains descriptions of
- wrappers to Fortran or C functions, also called as signatures of the
- functions. In the case of Fortran routines, F2PY can create initial
+ wrappers to Fortran or C functions, also called the signatures of the
+ functions. For Fortran routines, F2PY can create an initial
signature file by scanning Fortran source codes and
- catching all relevant information needed to create wrapper
+ tracking all relevant information needed to create wrapper
functions.
-* Optionally, F2PY created signature files can be edited to optimize
- wrappers functions, make them "smarter" and more "Pythonic".
+ * Optionally, F2PY created signature files can be edited to optimize
+ wrapper functions, to make them "smarter" and more "Pythonic".
* F2PY reads a signature file and writes a Python C/API module containing
Fortran/C/Python bindings.
-
* F2PY compiles all sources and builds an extension module containing
- the wrappers. In building extension modules, F2PY uses
- ``numpy_distutils`` that supports a number of Fortran 77/90/95
- compilers, including Gnu, Intel,
- Sun Fortre, SGI MIPSpro, Absoft, NAG, Compaq etc. compilers.
+ the wrappers.
+
+ * In building the extension modules, F2PY uses ``numpy_distutils`` which
+ supports a number of Fortran 77/90/95 compilers, including Gnu, Intel, Sun
+ Fortran, SGI MIPSpro, Absoft, NAG, Compaq etc.
+
+Depending on the situation, these steps can be carried out in a single composite
+command or step-by-step; in which case some steps can be omitted or combined
+with others.
-Depending on a particular situation, these steps can be carried out
-either by just in one command or step-by-step, some steps can be
-omitted or combined with others.
+Below, we describe three typical approaches of using F2PY. These can be read in
+order of increasing effort, but also cater to different access levels depending
+on whether the Fortran code can be freely modified.
-Below I'll describe three typical approaches of using F2PY.
The following example Fortran 77 code will be used for
-illustration, save it as fib1.f:
+illustration, save it as ``fib1.f``:
-.. include:: fib1.f
- :literal:
+.. literalinclude:: ./code/fib1.f
+ :language: fortran
The quick way
==============
-The quickest way to wrap the Fortran subroutine ``FIB`` to Python is
-to run
+The quickest way to wrap the Fortran subroutine ``FIB`` for use in Python is to
+run
::
python -m numpy.f2py -c fib1.f -m fib1
-This command builds (see ``-c`` flag, execute ``python -m numpy.f2py`` without
-arguments to see the explanation of command line options) an extension
-module ``fib1.so`` (see ``-m`` flag) to the current directory. Now, in
-Python the Fortran subroutine ``FIB`` is accessible via ``fib1.fib``::
+This command compiles and wraps ``fib1.f`` (``-c``) to create the extension
+module ``fib1.so`` (``-m``) in the current directory. A list of command line
+options can be seen by executing ``python -m numpy.f2py``. Now, in Python the
+Fortran subroutine ``FIB`` is accessible via ``fib1.fib``::
- >>> import numpy
+ >>> import numpy as np
>>> import fib1
>>> print(fib1.fib.__doc__)
fib(a,[n])
@@ -67,21 +70,21 @@ Python the Fortran subroutine ``FIB`` is accessible via ``fib1.fib``::
n : input int, optional
Default: len(a)
- >>> a = numpy.zeros(8, 'd')
+ >>> a = np.zeros(8, 'd')
>>> fib1.fib(a)
>>> print(a)
[ 0. 1. 1. 2. 3. 5. 8. 13.]
.. note::
- * Note that F2PY found that the second argument ``n`` is the
+ * Note that F2PY recognized that the second argument ``n`` is the
dimension of the first array argument ``a``. Since by default all
arguments are input-only arguments, F2PY concludes that ``n`` can
be optional with the default value ``len(a)``.
* One can use different values for optional ``n``::
- >>> a1 = numpy.zeros(8, 'd')
+ >>> a1 = np.zeros(8, 'd')
>>> fib1.fib(a1, 6)
>>> print(a1)
[ 0. 1. 1. 2. 3. 5. 0. 0.]
@@ -96,98 +99,94 @@ Python the Fortran subroutine ``FIB`` is accessible via ``fib1.fib``::
>>>
F2PY implements basic compatibility checks between related
- arguments in order to avoid any unexpected crashes.
+ arguments in order to avoid unexpected crashes.
- * When a NumPy array, that is Fortran contiguous and has a dtype
- corresponding to presumed Fortran type, is used as an input array
+ * When a NumPy array, that is Fortran contiguous and has a ``dtype``
+ corresponding to a presumed Fortran type, is used as an input array
argument, then its C pointer is directly passed to Fortran.
- Otherwise F2PY makes a contiguous copy (with a proper dtype) of
- the input array and passes C pointer of the copy to Fortran
+ Otherwise F2PY makes a contiguous copy (with the proper ``dtype``) of
+ the input array and passes a C pointer of the copy to the Fortran
subroutine. As a result, any possible changes to the (copy of)
input array have no effect to the original argument, as
demonstrated below::
- >>> a = numpy.ones(8, 'i')
+ >>> a = np.ones(8, 'i')
>>> fib1.fib(a)
>>> print(a)
[1 1 1 1 1 1 1 1]
- Clearly, this is not an expected behaviour. The fact that the
- above example worked with ``dtype=float`` is considered
- accidental.
+ Clearly, this is unexpected, as Fortran typically passes by reference. That
+ the above example worked with ``dtype=float`` is considered accidental.
- F2PY provides ``intent(inplace)`` attribute that would modify
+ F2PY provides an ``intent(inplace)`` attribute that modifies
the attributes of an input array so that any changes made by
- Fortran routine will be effective also in input argument. For example,
- if one specifies ``intent(inplace) a`` (see below, how), then
- the example above would read::
+ Fortran routine will be reflected in the input argument. For example,
+ if one specifies the ``intent(inplace) a`` directive (see subsequent
+ sections on how), then the example above would read::
- >>> a = numpy.ones(8, 'i')
+ >>> a = np.ones(8, 'i')
>>> fib1.fib(a)
>>> print(a)
[ 0. 1. 1. 2. 3. 5. 8. 13.]
- However, the recommended way to get changes made by Fortran
- subroutine back to Python is to use ``intent(out)`` attribute. It
- is more efficient and a cleaner solution.
-
- * The usage of ``fib1.fib`` in Python is very similar to using
- ``FIB`` in Fortran. However, using *in situ* output arguments in
- Python indicates a poor style as there is no safety mechanism
- in Python with respect to wrong argument types. When using Fortran
- or C, compilers naturally discover any type mismatches during
- compile time but in Python the types must be checked in
- runtime. So, using *in situ* output arguments in Python may cause
- difficult to find bugs, not to mention that the codes will be less
- readable when all required type checks are implemented.
-
- Though the demonstrated way of wrapping Fortran routines to Python
- is very straightforward, it has several drawbacks (see the comments
- above). These drawbacks are due to the fact that there is no way
- that F2PY can determine what is the actual intention of one or the
- other argument, is it input or output argument, or both, or
- something else. So, F2PY conservatively assumes that all arguments
- are input arguments by default.
-
- However, there are ways (see below) how to "teach" F2PY about the
- true intentions (among other things) of function arguments; and then
- F2PY is able to generate more Pythonic (more explicit, easier to
- use, and less error prone) wrappers to Fortran functions.
+ However, the recommended way to have changes made by Fortran subroutine
+ propagate to Python is to use the ``intent(out)`` attribute. That approach is
+ more efficient and also cleaner.
+
+ * The usage of ``fib1.fib`` in Python is very similar to using ``FIB`` in
+ Fortran. However, using *in situ* output arguments in Python is poor style,
+ as there are no safety mechanisms in Python to protect against wrong
+ argument types. When using Fortran or C, compilers discover any type
+ mismatches during the compilation process, but in Python the types must be
+ checked at runtime. Consequently, using *in situ* output arguments in Python
+ may lead to difficult to find bugs, not to mention the fact that the
+ codes will be less readable when all required type checks are implemented.
+
+ Though the approach to wrapping Fortran routines for Python discussed so far is
+ very straightforward, it has several drawbacks (see the comments above).
+ The drawbacks are due to the fact that there is no way for F2PY to determine
+ the actual intention of the arguments; that is there is ambiguity in
+ distinguishing between input and output arguments. Consequently, F2PY assumes
+ that all arguments are input arguments by default.
+
+ However, there are ways (see below) to remove this ambiguity by "teaching"
+ F2PY about the true intentions of function arguments, and F2PY is then able to
+ generate more explicit, easier to use, and less error prone wrappers for
+ Fortran functions.
The smart way
==============
-Let's apply the steps of wrapping Fortran functions to Python one by
+Let us apply the steps for wrapping Fortran functions to Python one by
one.
-* First, we create a signature file from ``fib1.f`` by running
+* First, we create a signature file from ``fib1.f`` by running:
::
python -m numpy.f2py fib1.f -m fib2 -h fib1.pyf
- The signature file is saved to ``fib1.pyf`` (see ``-h`` flag) and
- its contents is shown below.
+ The signature file is saved to ``fib1.pyf`` (see the ``-h`` flag) and
+ its contents are shown below.
- .. include:: fib1.pyf
- :literal:
+ .. literalinclude:: ./code/fib1.pyf
+ :language: fortran
-* Next, we'll teach F2PY that the argument ``n`` is an input argument
- (use ``intent(in)`` attribute) and that the result, i.e. the
- contents of ``a`` after calling Fortran function ``FIB``, should be
- returned to Python (use ``intent(out)`` attribute). In addition, an
- array ``a`` should be created dynamically using the size given by
- the input argument ``n`` (use ``depend(n)`` attribute to indicate
- dependence relation).
+* Next, we'll teach F2PY that the argument ``n`` is an input argument (using the
+ ``intent(in)`` attribute) and that the result, i.e., the contents of ``a``
+ after calling the Fortran function ``FIB``, should be returned to Python (using
+ the ``intent(out)`` attribute). In addition, an array ``a`` should be created
+ dynamically using the size determined by the input argument ``n`` (using the
+ ``depend(n)`` attribute to indicate this dependence relation).
- The content of a modified version of ``fib1.pyf`` (saved as
+ The contents of a suitably modified version of ``fib1.pyf`` (saved as
``fib2.pyf``) is as follows:
- .. include:: fib2.pyf
- :literal:
+ .. literalinclude:: ./code/fib2.pyf
+ :language: fortran
-* And finally, we build the extension module by running
+* Finally, we build the extension module with ``numpy.distutils`` by running:
::
@@ -214,16 +213,14 @@ In Python::
.. note::
- * Clearly, the signature of ``fib2.fib`` now corresponds to the
- intention of Fortran subroutine ``FIB`` more closely: given the
- number ``n``, ``fib2.fib`` returns the first ``n`` Fibonacci numbers
- as a NumPy array. Also, the new Python signature ``fib2.fib``
- rules out any surprises that we experienced with ``fib1.fib``.
+ * The signature of ``fib2.fib`` now more closely corresponds to the
+ intention of Fortran subroutine ``FIB``: given the number ``n``,
+ ``fib2.fib`` returns the first ``n`` Fibonacci numbers as a NumPy array.
+ The new Python signature ``fib2.fib`` also rules out the unexpected behaviour in ``fib1.fib``.
- * Note that by default using single ``intent(out)`` also implies
+ * Note that by default, using a single ``intent(out)`` also implies
``intent(hide)``. Arguments that have the ``intent(hide)`` attribute
- specified will not be listed in the argument list of a wrapper
- function.
+ specified will not be listed in the argument list of a wrapper function.
The quick and smart way
========================
@@ -233,26 +230,25 @@ suitable for wrapping (e.g. third party) Fortran codes for which
modifications to their source codes are not desirable nor even
possible.
-However, if editing Fortran codes is acceptable, then the generation
-of an intermediate signature file can be skipped in most
-cases. Namely, F2PY specific attributes can be inserted directly to
-Fortran source codes using the so-called F2PY directive. A F2PY
-directive defines special comment lines (starting with ``Cf2py``, for
-example) which are ignored by Fortran compilers but F2PY interprets
-them as normal lines.
+However, if editing Fortran codes is acceptable, then the generation of an
+intermediate signature file can be skipped in most cases. F2PY specific
+attributes can be inserted directly into Fortran source codes using F2PY
+directives. A F2PY directive consists of special comment lines (starting with
+``Cf2py`` or ``!f2py``, for example) which are ignored by Fortran compilers but
+interpreted by F2PY as normal lines.
-Here is shown a modified version of the previous Fortran code, save it
-as ``fib3.f``:
+Consider a modified version of the previous Fortran code with F2PY directives,
+saved as ``fib3.f``:
-.. include:: fib3.f
- :literal:
+.. literalinclude:: ./code/fib3.f
+ :language: fortran
Building the extension module can be now carried out in one command::
python -m numpy.f2py -c -m fib3 fib3.f
-Notice that the resulting wrapper to ``FIB`` is as "smart" as in
-previous case::
+Notice that the resulting wrapper to ``FIB`` is as "smart" (unambiguous) as in
+the previous case::
>>> import fib3
>>> print(fib3.fib.__doc__)
diff --git a/doc/source/f2py/index.rst b/doc/source/f2py/index.rst
index 492139651..c774a0df6 100644
--- a/doc/source/f2py/index.rst
+++ b/doc/source/f2py/index.rst
@@ -1,8 +1,10 @@
-#####################################
-F2PY Users Guide and Reference Manual
-#####################################
+.. _f2py:
-The purpose of the ``F2PY`` --*Fortran to Python interface generator*--
+=====================================
+F2PY user guide and reference manual
+=====================================
+
+The purpose of the ``F2PY`` --*Fortran to Python interface generator*-- utility
is to provide a connection between Python and Fortran
languages. F2PY is a part of NumPy_ (``numpy.f2py``) and also available as a
standalone command line tool ``f2py`` when ``numpy`` is installed that
@@ -19,11 +21,11 @@ from Python.
.. toctree::
:maxdepth: 2
- f2py.getting-started
- signature-file
- python-usage
usage
+ f2py.getting-started
distutils
+ python-usage
+ signature-file
advanced
.. _Python: https://www.python.org/
diff --git a/doc/source/f2py/python-usage.rst b/doc/source/f2py/python-usage.rst
index 65c0cec64..ef8ccd7dd 100644
--- a/doc/source/f2py/python-usage.rst
+++ b/doc/source/f2py/python-usage.rst
@@ -4,74 +4,76 @@ Using F2PY bindings in Python
All wrappers for Fortran/C routines, common blocks, or for Fortran
90 module data generated by F2PY are exposed to Python as ``fortran``
-type objects. Routine wrappers are callable ``fortran`` type objects
+type objects. Routine wrappers are callable ``fortran`` type objects
while wrappers to Fortran data have attributes referring to data
objects.
-All ``fortran`` type objects have attribute ``_cpointer`` that contains
-CObject referring to the C pointer of the corresponding Fortran/C
-function or variable in C level. Such CObjects can be used as a
-callback argument of F2PY generated functions to bypass Python C/API
-layer of calling Python functions from Fortran or C when the
-computational part of such functions is implemented in C or Fortran
-and wrapped with F2PY (or any other tool capable of providing CObject
-of a function).
+All ``fortran`` type objects have an attribute ``_cpointer`` that contains a
+``CObject`` referring to the C pointer of the corresponding Fortran/C function
+or variable at the C level. Such ``CObjects`` can be used as a callback argument
+for F2PY generated functions to bypass the Python C/API layer for calling Python
+functions from Fortran or C when the computational aspects of such functions are
+implemented in C or Fortran and wrapped with F2PY (or any other tool capable of
+providing the ``CObject`` of a function).
-Consider a Fortran 77 file ``ftype.f``:
+Consider a Fortran 77 file ```ftype.f``:
- .. include:: ftype.f
- :literal:
+ .. literalinclude:: ./code/ftype.f
+ :language: fortran
-and build a wrapper using ``f2py -c ftype.f -m ftype``.
+and a wrapper built using ``f2py -c ftype.f -m ftype``.
In Python:
- .. include:: ftype_session.dat
- :literal:
+ .. literalinclude:: ./code/results/ftype_session.dat
+ :language: python
Scalar arguments
=================
-In general, a scalar argument of a F2PY generated wrapper function can
+In general, a scalar argument for a F2PY generated wrapper function can
be an ordinary Python scalar (integer, float, complex number) as well as
an arbitrary sequence object (list, tuple, array, string) of
scalars. In the latter case, the first element of the sequence object
is passed to Fortran routine as a scalar argument.
-Note that when type-casting is required and there is possible loss of
-information (e.g. when type-casting float to integer or complex to
-float), F2PY does not raise any exception. In complex to real
-type-casting only the real part of a complex number is used.
+.. note::
+
+ * When type-casting is required and there is possible loss of information via
+ narrowing e.g. when type-casting float to integer or complex to float, F2PY
+ *does not* raise an exception.
-``intent(inout)`` scalar arguments are assumed to be array objects in
-order to have *in situ* changes be effective. It is recommended to use
-arrays with proper type but also other types work.
+ * For complex to real type-casting only the real part of a complex number is used.
+
+ * ``intent(inout)`` scalar arguments are assumed to be array objects in
+ order to have *in situ* changes be effective. It is recommended to use
+ arrays with proper type but also other types work.
Consider the following Fortran 77 code:
- .. include:: scalar.f
- :literal:
+ .. literalinclude:: ./code/scalar.f
+ :language: fortran
and wrap it using ``f2py -c -m scalar scalar.f``.
In Python:
- .. include:: scalar_session.dat
- :literal:
+ .. literalinclude:: ./code/results/scalar_session.dat
+ :language: python
String arguments
=================
-F2PY generated wrapper functions accept (almost) any Python object as
-a string argument, ``str`` is applied for non-string objects.
+F2PY generated wrapper functions accept almost any Python object as
+a string argument, since ``str`` is applied for non-string objects.
Exceptions are NumPy arrays that must have type code ``'c'`` or
``'1'`` when used as string arguments.
-A string can have arbitrary length when using it as a string argument
-to F2PY generated wrapper function. If the length is greater than
-expected, the string is truncated. If the length is smaller than
+A string can have an arbitrary length when used as a string argument
+for an F2PY generated wrapper function. If the length is greater than
+expected, the string is truncated silently. If the length is smaller than
expected, additional memory is allocated and filled with ``\0``.
Because Python strings are immutable, an ``intent(inout)`` argument
@@ -79,43 +81,43 @@ expects an array version of a string in order to have *in situ* changes be effec
Consider the following Fortran 77 code:
- .. include:: string.f
- :literal:
+ .. literalinclude:: ./code/string.f
+ :language: fortran
and wrap it using ``f2py -c -m mystring string.f``.
Python session:
- .. include:: string_session.dat
- :literal:
+ .. literalinclude:: ./code/results/string_session.dat
+ :language: python
Array arguments
================
-In general, array arguments of F2PY generated wrapper functions accept
-arbitrary sequences that can be transformed to NumPy array objects.
-An exception is ``intent(inout)`` array arguments that always must be
-proper-contiguous and have proper type, otherwise an exception is
-raised. Another exception is ``intent(inplace)`` array arguments that
-attributes will be changed *in situ* if the argument has different type
-than expected (see ``intent(inplace)`` attribute for more
-information).
-
-In general, if a NumPy array is proper-contiguous and has a proper
-type then it is directly passed to wrapped Fortran/C function.
-Otherwise, an element-wise copy of an input array is made and the
-copy, being proper-contiguous and with proper type, is used as an
-array argument.
+In general, array arguments for F2PY generated wrapper functions accept
+arbitrary sequences that can be transformed to NumPy array objects. There are
+two notable exceptions:
+
+* ``intent(inout)`` array arguments must always be proper-contiguous (defined below) and have a
+ compatible ``dtype``, otherwise an exception is raised.
+* ``intent(inplace)`` array arguments will be changed *in situ* if the argument
+ has a different type than expected (see the ``intent(inplace)`` attribute for
+ more information).
+
+In general, if a NumPy array is proper-contiguous and has a proper type then it
+is directly passed to the wrapped Fortran/C function. Otherwise, an element-wise
+copy of the input array is made and the copy, being proper-contiguous and with
+proper type, is used as the array argument.
There are two types of proper-contiguous NumPy arrays:
-* Fortran-contiguous arrays when data is stored column-wise,
- i.e. indexing of data as stored in memory starts from the lowest
+* Fortran-contiguous arrays refer to data that is stored columnwise,
+ i.e. the indexing of data as stored in memory starts from the lowest
dimension;
-* C-contiguous or simply contiguous arrays when data is stored
- row-wise, i.e. indexing of data as stored in memory starts from the
- highest dimension.
+* C-contiguous, or simply contiguous arrays, refer to data that is stored
+ rowwise, i.e. the indexing of data as stored in memory starts from the highest
+ dimension.
For one-dimensional arrays these notions coincide.
@@ -132,30 +134,29 @@ To test whether an array is C-contiguous, use the ``.flags.c_contiguous``
attribute of NumPy arrays. To test for Fortran contiguity, use the
``.flags.f_contiguous`` attribute.
-Usually there is no need to worry about how the arrays are stored in
-memory and whether the wrapped functions, being either Fortran or C
-functions, assume one or another storage order. F2PY automatically
-ensures that wrapped functions get arguments with proper storage
-order; the corresponding algorithm is designed to make copies of
-arrays only when absolutely necessary. However, when dealing with very
-large multidimensional input arrays with sizes close to the size of
-the physical memory in your computer, then a care must be taken to use
-always proper-contiguous and proper type arguments.
+Usually there is no need to worry about how the arrays are stored in memory and
+whether the wrapped functions, being either Fortran or C functions, assume one
+or another storage order. F2PY automatically ensures that wrapped functions get
+arguments with the proper storage order; the underlying algorithm is designed to
+make copies of arrays only when absolutely necessary. However, when dealing with
+very large multidimensional input arrays with sizes close to the size of the
+physical memory in your computer, then care must be taken to ensure the usage of
+proper-contiguous and proper type arguments.
To transform input arrays to column major storage order before passing
them to Fortran routines, use the function ``numpy.asfortranarray(<array>)``.
Consider the following Fortran 77 code:
- .. include:: array.f
- :literal:
+ .. literalinclude:: ./code/array.f
+ :language: fortran
and wrap it using ``f2py -c -m arr array.f -DF2PY_REPORT_ON_ARRAY_COPY=1``.
In Python:
- .. include:: array_session.dat
- :literal:
+ .. literalinclude:: ./code/results/array_session.dat
+ :language: python
.. _Call-back arguments:
@@ -166,31 +167,32 @@ F2PY supports calling Python functions from Fortran or C codes.
Consider the following Fortran 77 code:
- .. include:: callback.f
- :literal:
+ .. literalinclude:: ./code/callback.f
+ :language: fortran
and wrap it using ``f2py -c -m callback callback.f``.
In Python:
- .. include:: callback_session.dat
- :literal:
+ .. literalinclude:: ./code/results/callback_session.dat
+ :language: python
In the above example F2PY was able to guess accurately the signature
-of a call-back function. However, sometimes F2PY cannot establish the
-signature as one would wish and then the signature of a call-back
-function must be modified in the signature file manually. Namely,
-signature files may contain special modules (the names of such modules
-contain a substring ``__user__``) that collect various signatures of
-call-back functions. Callback arguments in routine signatures have
-attribute ``external`` (see also ``intent(callback)`` attribute). To
-relate a callback argument and its signature in ``__user__`` module
-block, use ``use`` statement as illustrated below. The same signature
-of a callback argument can be referred in different routine
+of the call-back function. However, sometimes F2PY cannot establish the
+appropriate signature; in these cases the signature of the call-back
+function must be explicitly defined in the signature file.
+
+To facilitate this, signature files may contain special modules (the names of
+these modules contain the special ``__user__`` sub-string) that defines the
+various signatures for call-back functions. Callback arguments in routine
+signatures have the ``external`` attribute (see also the ``intent(callback)``
+attribute). To relate a callback argument with its signature in a ``__user__``
+module block, a ``use`` statement can be utilized as illustrated below. The same
+signature for a callback argument can be referred to in different routine
signatures.
-We use the same Fortran 77 code as in previous example but now
-we'll pretend that F2PY was not able to guess the signatures of
+We use the same Fortran 77 code as in the previous example but now
+we will pretend that F2PY was not able to guess the signatures of
call-back arguments correctly. First, we create an initial signature
file ``callback2.pyf`` using F2PY::
@@ -198,40 +200,40 @@ file ``callback2.pyf`` using F2PY::
Then modify it as follows
- .. include:: callback2.pyf
+ .. include:: ./code/callback2.pyf
:literal:
-Finally, build the extension module using ``f2py -c callback2.pyf callback.f``.
+Finally, we build the extension module using ``f2py -c callback2.pyf callback.f``.
-An example Python session would be identical to the previous example
-except that argument names would differ.
+An example Python session for this snippet would be identical to the previous
+example except that the argument names would differ.
Sometimes a Fortran package may require that users provide routines
that the package will use. F2PY can construct an interface to such
-routines so that Python functions could be called from Fortran.
+routines so that Python functions can be called from Fortran.
-Consider the following Fortran 77 subroutine that takes an array
+Consider the following Fortran 77 subroutine that takes an array as its input
and applies a function ``func`` to its elements.
- .. include:: calculate.f
- :literal:
+ .. literalinclude:: ./code/calculate.f
+ :language: fortran
-It is expected that function ``func`` has been defined
-externally. In order to use a Python function as ``func``, it must
-have an attribute ``intent(callback)`` (it must be specified before
-the ``external`` statement).
+The Fortran code expects that the function ``func`` has been defined externally.
+In order to use a Python function for ``func``, it must have an attribute
+``intent(callback)`` and, it must be specified before the ``external`` statement.
Finally, build an extension module using ``f2py -c -m foo calculate.f``
In Python:
- .. include:: calculate_session.dat
- :literal:
+ .. literalinclude:: ./code/results/calculate_session.dat
+ :language: python
-The function is included as an argument to the python function call to
-the Fortran subroutine even though it was *not* in the Fortran subroutine argument
-list. The "external" refers to the C function generated by f2py, not the python
-function itself. The python function must be supplied to the C function.
+The function is included as an argument to the python function call to the
+Fortran subroutine even though it was *not* in the Fortran subroutine argument
+list. The "external" keyword refers to the C function generated by f2py, not the
+python function itself. The python function is essentially being supplied to the
+C function.
The callback function may also be explicitly set in the module.
Then it is not necessary to pass the function in the argument list to
@@ -240,24 +242,24 @@ the python callback function is itself called by another Fortran function.
Consider the following Fortran 77 subroutine:
- .. include:: extcallback.f
- :literal:
+ .. literalinclude:: ./code/extcallback.f
+ :language: fortran
and wrap it using ``f2py -c -m pfromf extcallback.f``.
In Python:
- .. include:: extcallback_session.dat
- :literal:
+ .. literalinclude:: ./code/results/extcallback_session.dat
+ :language: python
Resolving arguments to call-back functions
-------------------------------------------
+===========================================
-F2PY generated interface is very flexible with respect to call-back
+F2PY generated interfaces are very flexible with respect to call-back
arguments. For each call-back argument an additional optional
argument ``<name>_extra_args`` is introduced by F2PY. This argument
can be used to pass extra arguments to user provided call-back
-arguments.
+functions.
If a F2PY generated wrapper function expects the following call-back
argument::
@@ -281,7 +283,7 @@ is provided by a user, and in addition,
fun_extra_args = (e_1,...,e_p)
is used, then the following rules are applied when a Fortran or C
-function calls the call-back argument ``gun``:
+function evaluates the call-back argument ``gun``:
* If ``p == 0`` then ``gun(a_1, ..., a_q)`` is called, here
``q = min(m, n)``.
@@ -292,8 +294,8 @@ function calls the call-back argument ``gun``:
* If ``n + p`` is less than the number of required arguments to ``gun``
then an exception is raised.
-The function ``gun`` may return any number of objects as a tuple. Then
-following rules are applied:
+If the function ``gun`` may return any number of objects as a tuple; then
+the following rules are applied:
* If ``k < l``, then ``y_{k + 1}, ..., y_l`` are ignored.
* If ``k > l``, then only ``x_1, ..., x_l`` are set.
@@ -303,62 +305,62 @@ Common blocks
==============
F2PY generates wrappers to ``common`` blocks defined in a routine
-signature block. Common blocks are visible by all Fortran codes linked
-with the current extension module, but not to other extension modules
-(this restriction is due to how Python imports shared libraries). In
+signature block. Common blocks are visible to all Fortran codes linked
+to the current extension module, but not to other extension modules
+(this restriction is due to the way Python imports shared libraries). In
Python, the F2PY wrappers to ``common`` blocks are ``fortran`` type
-objects that have (dynamic) attributes related to data members of
-common blocks. When accessed, these attributes return as NumPy array
-objects (multidimensional arrays are Fortran-contiguous) that
+objects that have (dynamic) attributes related to the data members of
+the common blocks. When accessed, these attributes return as NumPy array
+objects (multidimensional arrays are Fortran-contiguous) which
directly link to data members in common blocks. Data members can be
changed by direct assignment or by in-place changes to the
corresponding array objects.
Consider the following Fortran 77 code:
- .. include:: common.f
- :literal:
+ .. literalinclude:: ./code/common.f
+ :language: fortran
and wrap it using ``f2py -c -m common common.f``.
In Python:
- .. include:: common_session.dat
- :literal:
+ .. literalinclude:: ./code/results/common_session.dat
+ :language: python
Fortran 90 module data
=======================
-The F2PY interface to Fortran 90 module data is similar to Fortran 77
+The F2PY interface to Fortran 90 module data is similar to the handling of Fortran 77
common blocks.
Consider the following Fortran 90 code:
- .. include:: moddata.f90
- :literal:
+ .. literalinclude:: ./code/moddata.f90
+ :language: fortran
and wrap it using ``f2py -c -m moddata moddata.f90``.
In Python:
- .. include:: moddata_session.dat
- :literal:
+ .. literalinclude:: ./code/results/moddata_session.dat
+ :language: python
Allocatable arrays
--------------------
+===================
F2PY has basic support for Fortran 90 module allocatable arrays.
Consider the following Fortran 90 code:
- .. include:: allocarr.f90
- :literal:
+ .. literalinclude:: ./code/allocarr.f90
+ :language: fortran
and wrap it using ``f2py -c -m allocarr allocarr.f90``.
In Python:
- .. include:: allocarr_session.dat
- :literal:
+ .. literalinclude:: ./code/results/allocarr_session.dat
+ :language: python
diff --git a/doc/source/f2py/signature-file.rst b/doc/source/f2py/signature-file.rst
index 3a163ee23..b80b31509 100644
--- a/doc/source/f2py/signature-file.rst
+++ b/doc/source/f2py/signature-file.rst
@@ -2,23 +2,22 @@
Signature file
==================
-The syntax specification for signature files (.pyf files) is borrowed
-from the Fortran 90/95 language specification. Almost all Fortran
-90/95 standard constructs are understood, both in free and fixed
-format (recall that Fortran 77 is a subset of Fortran 90/95). F2PY
-introduces also some extensions to Fortran 90/95 language
-specification that help designing Fortran to Python interface, make it
-more "Pythonic".
-
-Signature files may contain arbitrary Fortran code (so that Fortran
-codes can be considered as signature files). F2PY silently ignores
+The syntax specification for signature files (.pyf files) is modeled on the
+Fortran 90/95 language specification. Almost all Fortran 90/95 standard
+constructs are understood, both in free and fixed format (recall that Fortran 77
+is a subset of Fortran 90/95). F2PY introduces some extensions to the Fortran
+90/95 language specification that help in the design of the Fortran to Python
+interface, making it more "Pythonic".
+
+Signature files may contain arbitrary Fortran code so that any Fortran 90/95
+codes can be treated as signature files. F2PY silently ignores
Fortran constructs that are irrelevant for creating the interface.
-However, this includes also syntax errors. So, be careful not making
-ones ;-).
+However, this also means that syntax errors are not caught by F2PY and will only
+be caught when the library is built.
-In general, the contents of signature files is case-sensitive. When
-scanning Fortran codes and writing a signature file, F2PY lowers all
-cases automatically except in multiline blocks or when ``--no-lower``
+In general, the contents of the signature files are case-sensitive. When
+scanning Fortran codes to generate a signature file, F2PY lowers all
+cases automatically except in multi-line blocks or when the ``--no-lower``
option is used.
The syntax of signature files is presented below.
@@ -27,13 +26,15 @@ Python module block
=====================
A signature file may contain one (recommended) or more ``python
-module`` blocks. ``python module`` block describes the contents of
+module`` blocks. The ``python module`` block describes the contents of
a Python/C extension module ``<modulename>module.c`` that F2PY
generates.
-Exception: if ``<modulename>`` contains a substring ``__user__``, then
-the corresponding ``python module`` block describes the signatures of
-so-called call-back functions (see :ref:`Call-back arguments`).
+.. warning::
+
+ Exception: if ``<modulename>`` contains a substring ``__user__``, then the
+ corresponding ``python module`` block describes the signatures of call-back
+ functions (see :ref:`Call-back arguments`).
A ``python module`` block has the following structure::
@@ -56,9 +57,9 @@ A ``python module`` block has the following structure::
]...
end [python module [<modulename>]]
-Here brackets ``[]`` indicate an optional part, dots ``...`` indicate
-one or more of a previous part. So, ``[]...`` reads zero or more of a
-previous part.
+Here brackets ``[]`` indicate an optional section, dots ``...`` indicate one or
+more of a previous section. So, ``[]...`` is to be read as zero or more of a
+previous section.
Fortran/C routine signatures
@@ -93,7 +94,7 @@ The signature of a Fortran block data has the following structure::
end [ block data [<block data name>] ]
Type declarations
------------------
+=================
The definition of the ``<argument/variable type declaration>`` part
is
@@ -123,33 +124,36 @@ where
and
-+ ``<attrspec>`` is a comma separated list of attributes_;
+* ``<attrspec>`` is a comma separated list of attributes_;
-+ ``<arrayspec>`` is a comma separated list of dimension bounds;
+* ``<arrayspec>`` is a comma separated list of dimension bounds;
-+ ``<init_expr>`` is a `C expression`__.
+* ``<init_expr>`` is a `C expression`__;
-+ ``<intlen>`` may be negative integer for ``integer`` type
+* ``<intlen>`` may be negative integer for ``integer`` type
specifications. In such cases ``integer*<negintlen>`` represents
- unsigned C integers.
+ unsigned C integers;
__ `C expressions`_
If an argument has no ``<argument type declaration>``, its type is
determined by applying ``implicit`` rules to its name.
-
Statements
-----------
+==========
+
+Attribute statements
+^^^^^^^^^^^^^^^^^^^^^
-Attribute statements:
- The ``<argument/variable attribute statement>`` is
+* The ``<argument/variable attribute statement>`` is
``<argument/variable type declaration>`` without ``<typespec>``.
- In addition, in an attribute statement one cannot use other
+* In addition, in an attribute statement one cannot use other
attributes, also ``<entitydecl>`` can be only a list of names.
-Use statements:
- The definition of the ``<use statement>`` part is
+Use statements
+^^^^^^^^^^^^^^^
+
+* The definition of the ``<use statement>`` part is
::
@@ -161,12 +165,14 @@ Use statements:
<rename_list> := <local_name> => <use_name> [ , <rename_list> ]
- Currently F2PY uses ``use`` statement only for linking call-back
+* Currently F2PY uses ``use`` statement only for linking call-back
modules and ``external`` arguments (call-back functions), see
:ref:`Call-back arguments`.
-Common block statements:
- The definition of the ``<common block statement>`` part is
+Common block statements
+^^^^^^^^^^^^^^^^^^^^^^^
+
+* The definition of the ``<common block statement>`` part is
::
@@ -178,18 +184,19 @@ Common block statements:
<shortentitydecl> := <name> [ ( <arrayspec> ) ] [ , <shortentitydecl> ]
- If a ``python module`` block contains two or more ``common`` blocks
+* If a ``python module`` block contains two or more ``common`` blocks
with the same name, the variables from the additional declarations
are appended. The types of variables in ``<shortentitydecl>`` are
defined using ``<argument type declarations>``. Note that the
corresponding ``<argument type declarations>`` may contain array
- specifications; then you don't need to specify these in
- ``<shortentitydecl>``.
+ specifications; then these need not be specified in ``<shortentitydecl>``.
-Other statements:
- The ``<other statement>`` part refers to any other Fortran language
+Other statements
+^^^^^^^^^^^^^^^^^
+
+* The ``<other statement>`` part refers to any other Fortran language
constructs that are not described above. F2PY ignores most of them
- except
+ except the following:
+ ``call`` statements and function calls of ``external`` arguments
(`more details`__?);
@@ -223,7 +230,7 @@ Other statements:
Implicit rules are used to determine the type specification of
a variable (from the first-letter of its name) if the variable
is not defined using ``<variable type declaration>``. Default
- implicit rule is given by
+ implicit rules are given by:
::
@@ -234,153 +241,170 @@ Other statements:
entry <entry name> [([<arguments>])]
- F2PY generates wrappers to all entry names using the signature
+ F2PY generates wrappers for all entry names using the signature
of the routine block.
- Tip: ``entry`` statement can be used to describe the signature
- of an arbitrary routine allowing F2PY to generate a number of
- wrappers from only one routine block signature. There are few
- restrictions while doing this: ``fortranname`` cannot be used,
- ``callstatement`` and ``callprotoargument`` can be used only if
- they are valid for all entry routines, etc.
+ .. note::
+
+ The ``entry`` statement can be used to describe the signature of an
+ arbitrary subroutine or function allowing F2PY to generate a number of
+ wrappers from only one routine block signature. There are few
+ restrictions while doing this: ``fortranname`` cannot be used,
+ ``callstatement`` and ``callprotoargument`` can be used only if they are
+ valid for all entry routines, etc.
+
+F2PY statements
+^^^^^^^^^^^^^^^^
In addition, F2PY introduces the following statements:
- + ``threadsafe``
- Use ``Py_BEGIN_ALLOW_THREADS .. Py_END_ALLOW_THREADS`` block
- around the call to Fortran/C function.
-
- + ``callstatement <C-expr|multi-line block>``
- Replace F2PY generated call statement to Fortran/C function with
- ``<C-expr|multi-line block>``. The wrapped Fortran/C function
- is available as ``(*f2py_func)``. To raise an exception, set
- ``f2py_success = 0`` in ``<C-expr|multi-line block>``.
-
- + ``callprotoargument <C-typespecs>``
- When ``callstatement`` statement is used then F2PY may not
- generate proper prototypes for Fortran/C functions (because
- ``<C-expr>`` may contain any function calls and F2PY has no way
- to determine what should be the proper prototype). With this
- statement you can explicitly specify the arguments of the
- corresponding prototype::
-
- extern <return type> FUNC_F(<routine name>,<ROUTINE NAME>)(<callprotoargument>);
-
- + ``fortranname [<actual Fortran/C routine name>]``
- You can use arbitrary ``<routine name>`` for a given Fortran/C
- function. Then you have to specify
- ``<actual Fortran/C routine name>`` with this statement.
-
- If ``fortranname`` statement is used without
- ``<actual Fortran/C routine name>`` then a dummy wrapper is
- generated.
-
- + ``usercode <multi-line block>``
- When used inside ``python module`` block, then given C code
- will be inserted to generated C/API source just before
- wrapper function definitions. Here you can define arbitrary
- C functions to be used in initialization of optional arguments,
- for example. If ``usercode`` is used twice inside ``python
- module`` block then the second multiline block is inserted
- after the definition of external routines.
-
- When used inside ``<routine signature>``, then given C code will
- be inserted to the corresponding wrapper function just after
- declaring variables but before any C statements. So, ``usercode``
- follow-up can contain both declarations and C statements.
-
- When used inside the first ``interface`` block, then given C
- code will be inserted at the end of the initialization
- function of the extension module. Here you can modify extension
- modules dictionary. For example, for defining additional
- variables etc.
-
- + ``pymethoddef <multiline block>``
- Multiline block will be inserted to the definition of
- module methods ``PyMethodDef``-array. It must be a
- comma-separated list of C arrays (see `Extending and Embedding`__
- Python documentation for details).
- ``pymethoddef`` statement can be used only inside
- ``python module`` block.
+``threadsafe``
+ Uses a ``Py_BEGIN_ALLOW_THREADS .. Py_END_ALLOW_THREADS`` block
+ around the call to Fortran/C function.
+
+``callstatement <C-expr|multi-line block>``
+ Replaces the F2PY generated call statement to Fortran/C function with
+ ``<C-expr|multi-line block>``. The wrapped Fortran/C function is available
+ as ``(*f2py_func)``.
+
+ To raise an exception, set ``f2py_success = 0`` in ``<C-expr|multi-line
+ block>``.
+
+``callprotoargument <C-typespecs>``
+ When the ``callstatement`` statement is used then F2PY may not
+ generate proper prototypes for Fortran/C functions (because
+ ``<C-expr>`` may contain any function calls and F2PY has no way
+ to determine what should be the proper prototype).
+
+ With this statement you can explicitly specify the arguments of the
+ corresponding prototype::
+
+ extern <return type> FUNC_F(<routine name>,<ROUTINE NAME>)(<callprotoargument>);
+
+``fortranname [<actual Fortran/C routine name>]``
+ F2PY allows for the use of an arbitrary ``<routine name>`` for a given
+ Fortran/C function. Then this statement is used for the ``<actual
+ Fortran/C routine name>``.
+
+ If ``fortranname`` statement is used without
+ ``<actual Fortran/C routine name>`` then a dummy wrapper is
+ generated.
+
+``usercode <multi-line block>``
+ When this is used inside a ``python module`` block, the given C code will
+ be inserted to generated C/API source just before wrapper function
+ definitions.
+
+ Here you can define arbitrary C functions to be used for the
+ initialization of optional arguments.
+
+ For example, if ``usercode`` is used twice inside ``python module`` block
+ then the second multi-line block is inserted after the definition of
+ the external routines.
+
+ When used inside ``<routine signature>``, then the given C code will be
+ inserted into the corresponding wrapper function just after the
+ declaration of variables but before any C statements. So, the
+ ``usercode`` follow-up can contain both declarations and C statements.
+
+ When used inside the first ``interface`` block, then the given C code will
+ be inserted at the end of the initialization function of the extension
+ module. This is how the extension modules dictionary can be modified and
+ has many use-cases; for example, to define additional variables.
+
+``pymethoddef <multiline block>``
+ This is a multi-line block which will be inserted into the definition of a
+ module methods ``PyMethodDef``-array. It must be a comma-separated list of
+ C arrays (see `Extending and Embedding`__ Python documentation for
+ details). ``pymethoddef`` statement can be used only inside ``python
+ module`` block.
__ https://docs.python.org/extending/index.html
Attributes
-------------
+============
The following attributes are used by F2PY:
``optional``
The corresponding argument is moved to the end of ``<optional
arguments>`` list. A default value for an optional argument can be
- specified ``<init_expr>``, see ``entitydecl`` definition. Note that
- the default value must be given as a valid C expression.
+ specified via ``<init_expr>``, see the ``entitydecl`` definition.
+
- Note that whenever ``<init_expr>`` is used, ``optional`` attribute
- is set automatically by F2PY.
+ .. note::
- For an optional array argument, all its dimensions must be bounded.
+ * The default value must be given as a valid C expression.
+ * Whenever ``<init_expr>`` is used, ``optional`` attribute
+ is set automatically by F2PY.
+ * For an optional array argument, all its dimensions must be bounded.
``required``
- The corresponding argument is considered as a required one. This is
- default. You need to specify ``required`` only if there is a need to
- disable automatic ``optional`` setting when ``<init_expr>`` is used.
+ The corresponding argument with this attribute considered mandatory. This is
+ the default. ``required`` should only be specified if there is a need to
+ disable the automatic ``optional`` setting when ``<init_expr>`` is used.
- If Python ``None`` object is used as a required argument, the
+ If a Python ``None`` object is used as a required argument, the
argument is treated as optional. That is, in the case of array
- argument, the memory is allocated. And if ``<init_expr>`` is given,
- the corresponding initialization is carried out.
+ argument, the memory is allocated. If ``<init_expr>`` is given, then the
+ corresponding initialization is carried out.
``dimension(<arrayspec>)``
- The corresponding variable is considered as an array with given
- dimensions in ``<arrayspec>``.
+ The corresponding variable is considered as an array with dimensions given in
+ ``<arrayspec>``.
``intent(<intentspec>)``
This specifies the "intention" of the corresponding
argument. ``<intentspec>`` is a comma separated list of the
following keys:
- + ``in``
- The argument is considered as an input-only argument. It means
- that the value of the argument is passed to Fortran/C function and
- that function is expected not to change the value of an argument.
-
- + ``inout``
- The argument is considered as an input/output or *in situ*
- output argument. ``intent(inout)`` arguments can be only
- "contiguous" NumPy arrays with proper type and size. Here
- "contiguous" can be either in Fortran or C sense. The latter one
- coincides with the contiguous concept used in NumPy and is
- effective only if ``intent(c)`` is used. Fortran contiguity
- is assumed by default.
-
- Using ``intent(inout)`` is generally not recommended, use
- ``intent(in,out)`` instead. See also ``intent(inplace)`` attribute.
-
- + ``inplace``
- The argument is considered as an input/output or *in situ*
- output argument. ``intent(inplace)`` arguments must be
- NumPy arrays with proper size. If the type of an array is
- not "proper" or the array is non-contiguous then the array
- will be changed in-place to fix the type and make it contiguous.
-
- Using ``intent(inplace)`` is generally not recommended either.
- For example, when slices have been taken from an
- ``intent(inplace)`` argument then after in-place changes,
- slices data pointers may point to unallocated memory area.
-
- + ``out``
- The argument is considered as a return variable. It is appended
- to the ``<returned variables>`` list. Using ``intent(out)``
- sets ``intent(hide)`` automatically, unless also
- ``intent(in)`` or ``intent(inout)`` were used.
-
- By default, returned multidimensional arrays are
- Fortran-contiguous. If ``intent(c)`` is used, then returned
- multidimensional arrays are C-contiguous.
-
- + ``hide``
- The argument is removed from the list of required or optional
+ * ``in``
+ The corresponding argument is considered to be input-only. This means that the value of
+ the argument is passed to a Fortran/C function and that the function is
+ expected to not change the value of this argument.
+
+ * ``inout``
+ The corresponding argument is marked for input/output or as an *in situ* output
+ argument. ``intent(inout)`` arguments can be only "contiguous" NumPy
+ arrays with proper type and size. Here "contiguous" can be either in the
+ Fortran or C sense. The latter coincides with the default contiguous
+ concept used in NumPy and is effective only if ``intent(c)`` is used. F2PY
+ assumes Fortran contiguous arguments by default.
+
+ .. note::
+
+ Using ``intent(inout)`` is generally not recommended, use ``intent(in,out)`` instead.
+
+ See also the ``intent(inplace)`` attribute.
+
+ * ``inplace``
+ The corresponding argument is considered to be an input/output or *in situ* output
+ argument. ``intent(inplace)`` arguments must be NumPy arrays of a proper
+ size. If the type of an array is not "proper" or the array is
+ non-contiguous then the array will be modified in-place to fix the type and
+ make it contiguous.
+
+ .. note::
+
+ Using ``intent(inplace)`` is generally not recommended either.
+
+ For example, when slices have been taken from an ``intent(inplace)`` argument
+ then after in-place changes, the data pointers for the slices may point to
+ an unallocated memory area.
+
+
+ * ``out``
+ The corresponding argument is considered to be a return variable. It is appended to the
+ ``<returned variables>`` list. Using ``intent(out)`` sets ``intent(hide)``
+ automatically, unless ``intent(in)`` or ``intent(inout)`` are specified
+ as well.
+
+ By default, returned multidimensional arrays are Fortran-contiguous. If
+ ``intent(c)`` attribute is used, then the returned multidimensional arrays
+ are C-contiguous.
+
+ * ``hide``
+ The corresponding argument is removed from the list of required or optional
arguments. Typically ``intent(hide)`` is used with ``intent(out)``
or when ``<init_expr>`` completely determines the value of the
argument like in the following example::
@@ -388,18 +412,17 @@ The following attributes are used by F2PY:
integer intent(hide),depend(a) :: n = len(a)
real intent(in),dimension(n) :: a
- + ``c``
- The argument is treated as a C scalar or C array argument. In
- the case of a scalar argument, its value is passed to C function
- as a C scalar argument (recall that Fortran scalar arguments are
- actually C pointer arguments). In the case of an array
- argument, the wrapper function is assumed to treat
+ * ``c``
+ The corresponding argument is treated as a C scalar or C array argument. For the case
+ of a scalar argument, its value is passed to a C function as a C scalar
+ argument (recall that Fortran scalar arguments are actually C pointer
+ arguments). For array arguments, the wrapper function is assumed to treat
multidimensional arrays as C-contiguous arrays.
There is no need to use ``intent(c)`` for one-dimensional
- arrays, no matter if the wrapped function is either a Fortran or
- a C function. This is because the concepts of Fortran- and
- C contiguity overlap in one-dimensional cases.
+ arrays, irrespective of whether the wrapped function is in Fortran or C.
+ This is because the concepts of Fortran- and C contiguity overlap in
+ one-dimensional cases.
If ``intent(c)`` is used as a statement but without an entity
declaration list, then F2PY adds the ``intent(c)`` attribute to all
@@ -409,110 +432,121 @@ The following attributes are used by F2PY:
attribute for ``<routine name>`` in order to disable Fortran
specific ``F_FUNC(..,..)`` macros.
- + ``cache``
- The argument is treated as a junk of memory. No Fortran nor C
- contiguity checks are carried out. Using ``intent(cache)``
- makes sense only for array arguments, also in connection with
- ``intent(hide)`` or ``optional`` attributes.
-
- + ``copy``
- Ensure that the original contents of ``intent(in)`` argument is
- preserved. Typically used in connection with ``intent(in,out)``
- attribute. F2PY creates an optional argument
- ``overwrite_<argument name>`` with the default value ``0``.
-
- + ``overwrite``
- The original contents of the ``intent(in)`` argument may be
- altered by the Fortran/C function. F2PY creates an optional
- argument ``overwrite_<argument name>`` with the default value
- ``1``.
-
- + ``out=<new name>``
- Replace the return name with ``<new name>`` in the ``__doc__``
- string of a wrapper function.
-
- + ``callback``
- Construct an external function suitable for calling Python function
+ * ``cache``
+ The corresponding argument is treated as junk memory. No Fortran nor C contiguity
+ checks are carried out. Using ``intent(cache)`` makes sense only for array
+ arguments, also in conjunction with ``intent(hide)`` or ``optional``
+ attributes.
+
+ * ``copy``
+ Ensures that the original contents of ``intent(in)`` argument is
+ preserved. Typically used with the ``intent(in,out)`` attribute. F2PY
+ creates an optional argument ``overwrite_<argument name>`` with the
+ default value ``0``.
+
+ * ``overwrite``
+ This indicates that the original contents of the ``intent(in)`` argument
+ may be altered by the Fortran/C function. F2PY creates an optional
+ argument ``overwrite_<argument name>`` with the default value ``1``.
+
+ * ``out=<new name>``
+ Replaces the returned name with ``<new name>`` in the ``__doc__`` string
+ of the wrapper function.
+
+ * ``callback``
+ Constructs an external function suitable for calling Python functions
from Fortran. ``intent(callback)`` must be specified before the
- corresponding ``external`` statement. If 'argument' is not in
- argument list then it will be added to Python wrapper but only
- initializing external function.
-
- Use ``intent(callback)`` in situations where a Fortran/C code
- assumes that a user implements a function with given prototype
- and links it to an executable. Don't use ``intent(callback)``
- if function appears in the argument list of a Fortran routine.
-
- With ``intent(hide)`` or ``optional`` attributes specified and
- using a wrapper function without specifying the callback argument
- in argument list then call-back function is looked in the
- namespace of F2PY generated extension module where it can be
- set as a module attribute by a user.
-
- + ``aux``
- Define auxiliary C variable in F2PY generated wrapper function.
- Useful to save parameter values so that they can be accessed
- in initialization expression of other variables. Note that
- ``intent(aux)`` silently implies ``intent(c)``.
+ corresponding ``external`` statement. If the 'argument' is not in
+ the argument list then it will be added to Python wrapper but only
+ by initializing an external function.
+
+ .. note::
+
+ Use ``intent(callback)`` in situations where the Fortran/C code assumes
+ that the user implemented a function with a given prototype and linked
+ it to an executable. Don't use ``intent(callback)`` if the function
+ appears in the argument list of a Fortran routine.
+
+ With ``intent(hide)`` or ``optional`` attributes specified and using a
+ wrapper function without specifying the callback argument in the argument
+ list; then the call-back function is assumed to be found in the namespace
+ of the F2PY generated extension module where it can be set as a module
+ attribute by a user.
+
+ * ``aux``
+ Defines an auxiliary C variable in the F2PY generated wrapper function.
+ Useful to save parameter values so that they can be accessed in
+ initialization expressions for other variables.
+
+ .. note::
+
+ ``intent(aux)`` silently implies ``intent(c)``.
The following rules apply:
- + If no ``intent(in | inout | out | hide)`` is specified,
+ * If none of ``intent(in | inout | out | hide)`` are specified,
``intent(in)`` is assumed.
- + ``intent(in,inout)`` is ``intent(in)``.
- + ``intent(in,hide)`` or ``intent(inout,hide)`` is
- ``intent(hide)``.
- + ``intent(out)`` is ``intent(out,hide)`` unless ``intent(in)`` or
- ``intent(inout)`` is specified.
- + If ``intent(copy)`` or ``intent(overwrite)`` is used, then an
- additional optional argument is introduced with a name
- ``overwrite_<argument name>`` and a default value 0 or 1, respectively.
- + ``intent(inout,inplace)`` is ``intent(inplace)``.
- + ``intent(in,inplace)`` is ``intent(inplace)``.
- + ``intent(hide)`` disables ``optional`` and ``required``.
+
+ * ``intent(in,inout)`` is ``intent(in)``;
+
+ * ``intent(in,hide)`` or ``intent(inout,hide)`` is ``intent(hide)``;
+
+ * ``intent(out)`` is ``intent(out,hide)`` unless ``intent(in)`` or
+ ``intent(inout)`` is specified.
+
+ * If ``intent(copy)`` or ``intent(overwrite)`` is used, then an additional
+ optional argument is introduced with a name ``overwrite_<argument name>``
+ and a default value 0 or 1, respectively.
+
+ * ``intent(inout,inplace)`` is ``intent(inplace)``;
+
+ * ``intent(in,inplace)`` is ``intent(inplace)``;
+
+ * ``intent(hide)`` disables ``optional`` and ``required``.
``check([<C-booleanexpr>])``
- Perform consistency check of arguments by evaluating
- ``<C-booleanexpr>``; if ``<C-booleanexpr>`` returns 0, an exception
- is raised.
+ Performs a consistency check on the arguments by evaluating
+ ``<C-booleanexpr>``; if ``<C-booleanexpr>`` returns 0, an exception is raised.
+
+ .. note::
- If ``check(..)`` is not used then F2PY generates few standard checks
- (e.g. in a case of an array argument, check for the proper shape
- and size) automatically. Use ``check()`` to disable checks generated
- by F2PY.
+ If ``check(..)`` is not used then F2PY automatically generates a few
+ standard checks (e.g. in a case of an array argument, it checks for the
+ proper shape and size). Use ``check()`` to disable checks
+ generated by F2PY.
``depend([<names>])``
This declares that the corresponding argument depends on the values
- of variables in the list ``<names>``. For example, ``<init_expr>``
+ of variables in the ``<names>`` list. For example, ``<init_expr>``
may use the values of other arguments. Using information given by
``depend(..)`` attributes, F2PY ensures that arguments are
- initialized in a proper order. If ``depend(..)`` attribute is not
+ initialized in a proper order. If the ``depend(..)`` attribute is not
used then F2PY determines dependence relations automatically. Use
- ``depend()`` to disable dependence relations generated by F2PY.
+ ``depend()`` to disable the dependence relations generated by F2PY.
When you edit dependence relations that were initially generated by
F2PY, be careful not to break the dependence relations of other
- relevant variables. Another thing to watch out is cyclic
+ relevant variables. Another thing to watch out for is cyclic
dependencies. F2PY is able to detect cyclic dependencies
when constructing wrappers and it complains if any are found.
``allocatable``
- The corresponding variable is Fortran 90 allocatable array defined
- as Fortran 90 module data.
+ The corresponding variable is a Fortran 90 allocatable array defined as
+ Fortran 90 module data.
.. _external:
``external``
The corresponding argument is a function provided by user. The
- signature of this so-called call-back function can be defined
+ signature of this call-back function can be defined
- in ``__user__`` module block,
- or by demonstrative (or real, if the signature file is a real Fortran
code) call in the ``<other statements>`` block.
- For example, F2PY generates from
+ For example, F2PY generates from:
- ::
+ .. code-block:: fortran
external cb_sub, cb_fun
integer n
@@ -520,7 +554,9 @@ The following attributes are used by F2PY:
call cb_sub(a,n)
r = cb_fun(4)
- the following call-back signatures::
+ the following call-back signatures:
+
+ .. code-block:: fortran
subroutine cb_sub(a,n)
real dimension(n) :: a
@@ -531,7 +567,9 @@ The following attributes are used by F2PY:
real :: r
end function cb_fun
- The corresponding user-provided Python function are then::
+ The corresponding user-provided Python function are then:
+
+ .. code-block:: python
def cb_sub(a,[n]):
...
@@ -540,49 +578,50 @@ The following attributes are used by F2PY:
...
return r
- See also ``intent(callback)`` attribute.
+ See also the ``intent(callback)`` attribute.
``parameter``
- The corresponding variable is a parameter and it must have a fixed
- value. F2PY replaces all parameter occurrences by their
- corresponding values.
+ This indicates that the corresponding variable is a parameter and it must have
+ a fixed value. F2PY replaces all parameter occurrences by their corresponding
+ values.
Extensions
============
F2PY directives
------------------
+^^^^^^^^^^^^^^^^
-The so-called F2PY directives allow using F2PY signature file
-constructs also in Fortran 77/90 source codes. With this feature you
-can skip (almost) completely intermediate signature file generations
-and apply F2PY directly to Fortran source codes.
+The F2PY directives allow using F2PY signature file constructs in
+Fortran 77/90 source codes. With this feature one can (almost) completely skip
+the intermediate signature file generation and apply F2PY directly to Fortran
+source codes.
-F2PY directive has the following form::
+F2PY directives have the following form::
<comment char>f2py ...
where allowed comment characters for fixed and free format Fortran
codes are ``cC*!#`` and ``!``, respectively. Everything that follows
``<comment char>f2py`` is ignored by a compiler but read by F2PY as a
-normal Fortran, non-comment line:
+normal non-comment Fortran line:
+.. note::
When F2PY finds a line with F2PY directive, the directive is first
replaced by 5 spaces and then the line is reread.
For fixed format Fortran codes, ``<comment char>`` must be at the
first column of a file, of course. For free format Fortran codes,
-F2PY directives can appear anywhere in a file.
+the F2PY directives can appear anywhere in a file.
C expressions
---------------
+^^^^^^^^^^^^^^
C expressions are used in the following parts of signature files:
-* ``<init_expr>`` of variable initialization;
+* ``<init_expr>`` for variable initialization;
* ``<C-booleanexpr>`` of the ``check`` attribute;
-* ``<arrayspec> of the ``dimension`` attribute;
-* ``callstatement`` statement, here also a C multiline block can be used.
+* ``<arrayspec>`` of the ``dimension`` attribute;
+* ``callstatement`` statement, here also a C multi-line block can be used.
A C expression may contain:
@@ -592,15 +631,19 @@ A C expression may contain:
according to given dependence relations;
* the following CPP macros:
- ``rank(<name>)``
+ * ``rank(<name>)``
Returns the rank of an array ``<name>``.
- ``shape(<name>,<n>)``
+
+ * ``shape(<name>,<n>)``
Returns the ``<n>``-th dimension of an array ``<name>``.
- ``len(<name>)``
+
+ * ``len(<name>)``
Returns the length of an array ``<name>``.
- ``size(<name>)``
+
+ * ``size(<name>)``
Returns the size of an array ``<name>``.
- ``slen(<name>)``
+
+ * ``slen(<name>)``
Returns the length of a string ``<name>``.
For initializing an array ``<array name>``, F2PY generates a loop over
@@ -615,7 +658,7 @@ from ``0`` to ``shape(<array name>,<i>)-1``.
For example, a function ``myrange(n)`` generated from the following
signature
-::
+.. code-block::
subroutine myrange(a,n)
fortranname ! myrange is a dummy wrapper
@@ -630,23 +673,23 @@ is equivalent to ``numpy.arange(n,dtype=float)``.
F2PY may lower cases also in C expressions when scanning Fortran codes
(see ``--[no]-lower`` option).
-Multiline blocks
-------------------
+Multi-line blocks
+^^^^^^^^^^^^^^^^^^
-A multiline block starts with ``'''`` (triple single-quotes) and ends
-with ``'''`` in some *strictly* subsequent line. Multiline blocks can
-be used only within .pyf files. The contents of a multiline block can
+A multi-line block starts with ``'''`` (triple single-quotes) and ends
+with ``'''`` in some *strictly* subsequent line. Multi-line blocks can
+be used only within .pyf files. The contents of a multi-line block can
be arbitrary (except that it cannot contain ``'''``) and no
transformations (e.g. lowering cases) are applied to it.
-Currently, multiline blocks can be used in the following constructs:
+Currently, multi-line blocks can be used in the following constructs:
-+ as a C expression of the ``callstatement`` statement;
+* as a C expression of the ``callstatement`` statement;
-+ as a C type specification of the ``callprotoargument`` statement;
+* as a C type specification of the ``callprotoargument`` statement;
-+ as a C code block of the ``usercode`` statement;
+* as a C code block of the ``usercode`` statement;
-+ as a list of C arrays of the ``pymethoddef`` statement;
+* as a list of C arrays of the ``pymethoddef`` statement;
-+ as documentation string.
+* as a documentation string.
diff --git a/doc/source/f2py/usage.rst b/doc/source/f2py/usage.rst
index 6c3b4b6ef..596148799 100644
--- a/doc/source/f2py/usage.rst
+++ b/doc/source/f2py/usage.rst
@@ -3,9 +3,9 @@ Using F2PY
===========
F2PY can be used either as a command line tool ``f2py`` or as a Python
-module ``numpy.f2py``. While we try to install the command line tool as part
+module ``numpy.f2py``. While we try to provide the command line tool as part
of the numpy setup, some platforms like Windows make it difficult to
-reliably put the executable on the ``PATH``. We will refer to ``f2py``
+reliably put the executables on the ``PATH``. We will refer to ``f2py``
in this document but you may have to run it as a module::
python -m numpy.f2py
@@ -21,32 +21,40 @@ Command ``f2py``
When used as a command line tool, ``f2py`` has three major modes,
distinguished by the usage of ``-c`` and ``-h`` switches:
+Signature file generation
+^^^^^^^^^^^^^^^^^^^^^^^^^^
+
1. To scan Fortran sources and generate a signature file, use
- ::
+ .. code-block:: sh
f2py -h <filename.pyf> <options> <fortran files> \
[[ only: <fortran functions> : ] \
[ skip: <fortran functions> : ]]... \
[<fortran files> ...]
- Note that a Fortran source file can contain many routines, and not
- necessarily all routines are needed to be used from Python. So, you
- can either specify which routines should be wrapped (in ``only: .. :``
- part) or which routines F2PY should ignored (in ``skip: .. :`` part).
+ .. note::
+
+ A Fortran source file can contain many routines, and it is often
+ not necessary to allow all routines be usable from Python. In such cases,
+ either specify which routines should be wrapped (in the ``only: .. :`` part)
+ or which routines F2PY should ignored (in the ``skip: .. :`` part).
If ``<filename.pyf>`` is specified as ``stdout`` then signatures
- are send to standard output instead of a file.
+ are written to standard output instead of a file.
- Among other options (see below), the following options can be used
+ Among other options (see below), the following can be used
in this mode:
``--overwrite-signature``
- Overwrite existing signature file.
+ Overwrites an existing signature file.
+
+Extension module construction
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
2. To construct an extension module, use
- ::
+ .. code-block:: sh
f2py -m <modulename> <options> <fortran files> \
[[ only: <fortran functions> : ] \
@@ -61,17 +69,19 @@ distinguished by the usage of ``-c`` and ``-h`` switches:
in this mode:
``--debug-capi``
- Add debugging hooks to the extension module. When using this
- extension module, various information about the wrapper is printed
- to standard output, for example, the values of variables, the
- steps taken, etc.
+ Adds debugging hooks to the extension module. When using this extension
+ module, various diagnostic information about the wrapper is written to
+ the standard output, for example, the values of variables, the steps taken,
+ etc.
``-include'<includefile>'``
Add a CPP ``#include`` statement to the extension module source.
- ``<includefile>`` should be given in one of the following forms::
+ ``<includefile>`` should be given in one of the following forms
- "filename.ext"
- <filename.ext>
+ .. code-block:: cpp
+
+ "filename.ext"
+ <filename.ext>
The include statement is inserted just before the wrapper
functions. This feature enables using arbitrary C functions
@@ -91,16 +101,19 @@ distinguished by the usage of ``-c`` and ``-h`` switches:
List system resources found by ``numpy_distutils/system_info.py``.
For example, try ``f2py --help-link lapack_opt``.
+Building a module
+^^^^^^^^^^^^^^^^^
+
3. To build an extension module, use
- ::
+ .. code-block:: sh
f2py -c <options> <fortran files> \
[[ only: <fortran functions> : ] \
[ skip: <fortran functions> : ]]... \
[ <fortran/c source files> ] [ <.o, .a, .so files> ]
- If ``<fortran files>`` contains a signature file, then a source for
+ If ``<fortran files>`` contains a signature file, then the source for
an extension module is constructed, all Fortran and C sources are
compiled, and finally all object and library files are linked to the
extension module ``<modulename>.so`` which is saved into the current
@@ -108,26 +121,25 @@ distinguished by the usage of ``-c`` and ``-h`` switches:
If ``<fortran files>`` does not contain a signature file, then an
extension module is constructed by scanning all Fortran source codes
- for routine signatures.
+ for routine signatures, before proceeding to build the extension module.
- Among other options (see below) and options described in previous
- mode, the following options can be used in this mode:
+ Among other options (see below) and options described for previous
+ modes, the following options can be used in this mode:
``--help-fcompiler``
- List available Fortran compilers.
- ``--help-compiler`` [depreciated]
- List available Fortran compilers.
+ List the available Fortran compilers.
+ ``--help-compiler`` **[depreciated]**
+ List the available Fortran compilers.
``--fcompiler=<Vendor>``
- Specify Fortran compiler type by vendor.
+ Specify a Fortran compiler type by vendor.
``--f77exec=<path>``
- Specify the path to F77 compiler
- ``--fcompiler-exec=<path>`` [depreciated]
- Specify the path to F77 compiler
+ Specify the path to a F77 compiler
+ ``--fcompiler-exec=<path>`` **[depreciated]**
+ Specify the path to a F77 compiler
``--f90exec=<path>``
- Specify the path to F90 compiler
- ``--f90compiler-exec=<path>`` [depreciated]
- Specify the path to F90 compiler
-
+ Specify the path to a F90 compiler
+ ``--f90compiler-exec=<path>`` **[depreciated]**
+ Specify the path to a F90 compiler
``--f77flags=<string>``
Specify F77 compiler flags
``--f90flags=<string>``
@@ -137,12 +149,11 @@ distinguished by the usage of ``-c`` and ``-h`` switches:
``--arch=<string>``
Specify architecture specific optimization flags
``--noopt``
- Compile without optimization
+ Compile without optimization flags
``--noarch``
- Compile without arch-dependent optimization
+ Compile without arch-dependent optimization flags
``--debug``
Compile with debugging information
-
``-l<libname>``
Use the library ``<libname>`` when linking.
``-D<macro>[=<defn=1>]``
@@ -155,34 +166,35 @@ distinguished by the usage of ``-c`` and ``-h`` switches:
``-L<dir>``
Add directory ``<dir>`` to the list of directories to be searched
for ``-l``.
-
``link-<resource>``
- Link extension module with <resource> as defined by
+ Link the extension module with <resource> as defined by
``numpy_distutils/system_info.py``. E.g. to link with optimized
LAPACK libraries (vecLib on MacOSX, ATLAS elsewhere), use
``--link-lapack_opt``. See also ``--help-link`` switch.
.. note:: The ``f2py -c`` option must be applied either to an existing ``.pyf`` file (plus the source/object/library files) or one must specify the ``-m <modulename>`` option (plus the sources/object/library files). Use one of the following options:
- ::
+ .. code-block:: sh
f2py -c -m fib1 fib1.f
- or
+ or
- ::
+ .. code-block:: sh
f2py -m fib1 fib1.f -h fib1.pyf
f2py -c fib1.pyf fib1.f
- For more information, see `Building C and C++ Extensions`__ Python documentation for details.
+ For more information, see the `Building C and C++ Extensions`__ Python documentation for details.
- __ https://docs.python.org/3/extending/building.html
+ __ https://docs.python.org/3/extending/building.html
When building an extension module, a combination of the following
- macros may be required for non-gcc Fortran compilers::
-
+ macros may be required for non-gcc Fortran compilers:
+
+ .. code-block:: sh
+
-DPREPEND_FORTRAN
-DNO_APPEND_FORTRAN
-DUPPERCASE_FORTRAN
@@ -197,11 +209,13 @@ distinguished by the usage of ``-c`` and ``-h`` switches:
of an array argument is larger than ``<int>``, a message about
the coping is sent to ``stderr``.
-Other options:
+Other options
+^^^^^^^^^^^^^
``-m <modulename>``
- Name of an extension module. Default is ``untitled``. Don't use this option
- if a signature file (\*.pyf) is used.
+ Name of an extension module. Default is ``untitled``.
+
+ .. warning:: Don't use this option if a signature file (\*.pyf) is used.
``--[no-]lower``
Do [not] lower the cases in ``<fortran files>``. By default,
``--lower`` is assumed with ``-h`` switch, and ``--no-lower``
@@ -214,7 +228,7 @@ Other options:
``--verbose``
Run with extra verbosity.
``-v``
- Print f2py version ID and exit.
+ Print the F2PY version and exit.
Execute ``f2py`` without any options to get an up-to-date list of
available options.
diff --git a/doc/source/index.rst b/doc/source/index.rst
index 21dec00fe..aac820a6f 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -1,7 +1,7 @@
.. _numpy_docs_mainpage:
###################
-NumPy Documentation
+NumPy documentation
###################
.. toctree::
diff --git a/doc/source/reference/alignment.rst b/doc/source/reference/alignment.rst
index 5e4315b38..70ded916a 100644
--- a/doc/source/reference/alignment.rst
+++ b/doc/source/reference/alignment.rst
@@ -1,104 +1,13 @@
-.. _alignment:
+:orphan:
+****************
Memory Alignment
-================
+****************
-Numpy Alignment Goals
----------------------
+.. This document has been moved to ../dev/alignment.rst.
-There are three use-cases related to memory alignment in numpy (as of 1.14):
+This document has been moved to :ref:`alignment`.
- 1. Creating structured datatypes with fields aligned like in a C-struct.
- 2. Speeding up copy operations by using uint assignment in instead of memcpy
- 3. Guaranteeing safe aligned access for ufuncs/setitem/casting code
-Numpy uses two different forms of alignment to achieve these goals:
-"True alignment" and "Uint alignment".
-
-"True" alignment refers to the architecture-dependent alignment of an
-equivalent C-type in C. For example, in x64 systems ``numpy.float64`` is
-equivalent to ``double`` in C. On most systems this has either an alignment of
-4 or 8 bytes (and this can be controlled in gcc by the option
-``malign-double``). A variable is aligned in memory if its memory offset is a
-multiple of its alignment. On some systems (eg sparc) memory alignment is
-required, on others it gives a speedup.
-
-"Uint" alignment depends on the size of a datatype. It is defined to be the
-"True alignment" of the uint used by numpy's copy-code to copy the datatype, or
-undefined/unaligned if there is no equivalent uint. Currently numpy uses uint8,
-uint16, uint32, uint64 and uint64 to copy data of size 1,2,4,8,16 bytes
-respectively, and all other sized datatypes cannot be uint-aligned.
-
-For example, on a (typical linux x64 gcc) system, the numpy ``complex64``
-datatype is implemented as ``struct { float real, imag; }``. This has "true"
-alignment of 4 and "uint" alignment of 8 (equal to the true alignment of
-``uint64``).
-
-Some cases where uint and true alignment are different (default gcc linux):
- arch type true-aln uint-aln
- ---- ---- -------- --------
- x86_64 complex64 4 8
- x86_64 float128 16 8
- x86 float96 4 -
-
-
-Variables in Numpy which control and describe alignment
--------------------------------------------------------
-
-There are 4 relevant uses of the word ``align`` used in numpy:
-
- * The ``dtype.alignment`` attribute (``descr->alignment`` in C). This is meant
- to reflect the "true alignment" of the type. It has arch-dependent default
- values for all datatypes, with the exception of structured types created
- with ``align=True`` as described below.
- * The ``ALIGNED`` flag of an ndarray, computed in ``IsAligned`` and checked
- by ``PyArray_ISALIGNED``. This is computed from ``dtype.alignment``.
- It is set to ``True`` if every item in the array is at a memory location
- consistent with ``dtype.alignment``, which is the case if the data ptr and
- all strides of the array are multiples of that alignment.
- * The ``align`` keyword of the dtype constructor, which only affects structured
- arrays. If the structure's field offsets are not manually provided numpy
- determines offsets automatically. In that case, ``align=True`` pads the
- structure so that each field is "true" aligned in memory and sets
- ``dtype.alignment`` to be the largest of the field "true" alignments. This
- is like what C-structs usually do. Otherwise if offsets or itemsize were
- manually provided ``align=True`` simply checks that all the fields are
- "true" aligned and that the total itemsize is a multiple of the largest
- field alignment. In either case ``dtype.isalignedstruct`` is also set to
- True.
- * ``IsUintAligned`` is used to determine if an ndarray is "uint aligned" in
- an analogous way to how ``IsAligned`` checks for true-alignment.
-
-Consequences of alignment
--------------------------
-
-Here is how the variables above are used:
-
- 1. Creating aligned structs: In order to know how to offset a field when
- ``align=True``, numpy looks up ``field.dtype.alignment``. This includes
- fields which are nested structured arrays.
- 2. Ufuncs: If the ``ALIGNED`` flag of an array is False, ufuncs will
- buffer/cast the array before evaluation. This is needed since ufunc inner
- loops access raw elements directly, which might fail on some archs if the
- elements are not true-aligned.
- 3. Getitem/setitem/copyswap function: Similar to ufuncs, these functions
- generally have two code paths. If ``ALIGNED`` is False they will
- use a code path that buffers the arguments so they are true-aligned.
- 4. Strided copy code: Here, "uint alignment" is used instead. If the itemsize
- of an array is equal to 1, 2, 4, 8 or 16 bytes and the array is uint
- aligned then instead numpy will do ``*(uintN*)dst) = *(uintN*)src)`` for
- appropriate N. Otherwise numpy copies by doing ``memcpy(dst, src, N)``.
- 5. Nditer code: Since this often calls the strided copy code, it must
- check for "uint alignment".
- 6. Cast code: This checks for "true" alignment, as it does
- ``*dst = CASTFUNC(*src)`` if aligned. Otherwise, it does
- ``memmove(srcval, src); dstval = CASTFUNC(srcval); memmove(dst, dstval)``
- where dstval/srcval are aligned.
-
-Note that the strided-copy and strided-cast code are deeply intertwined and so
-any arrays being processed by them must be both uint and true aligned, even
-though the copy-code only needs uint alignment and the cast code only true
-alignment. If there is ever a big rewrite of this code it would be good to
-allow them to use different alignments.
diff --git a/doc/source/reference/arrays.datetime.rst b/doc/source/reference/arrays.datetime.rst
index e3b8d270d..63c93821b 100644
--- a/doc/source/reference/arrays.datetime.rst
+++ b/doc/source/reference/arrays.datetime.rst
@@ -25,7 +25,7 @@ form of the string, and can be either a :ref:`date unit <arrays.dtypes.dateunits
:ref:`time unit <arrays.dtypes.timeunits>`. The date units are years ('Y'),
months ('M'), weeks ('W'), and days ('D'), while the time units are
hours ('h'), minutes ('m'), seconds ('s'), milliseconds ('ms'), and
-some additional SI-prefix seconds-based units. The datetime64 data type
+some additional SI-prefix seconds-based units. The datetime64 data type
also accepts the string "NAT", in any combination of lowercase/uppercase
letters, for a "Not A Time" value.
@@ -74,6 +74,18 @@ datetime type with generic units.
array(['2001-01-01T12:00:00.000', '2002-02-03T13:56:03.172'],
dtype='datetime64[ms]')
+An array of datetimes can be constructed from integers representing
+POSIX timestamps with the given unit.
+
+.. admonition:: Example
+
+ >>> np.array([0, 1577836800], dtype='datetime64[s]')
+ array(['1970-01-01T00:00:00', '2020-01-01T00:00:00'],
+ dtype='datetime64[s]')
+
+ >>> np.array([0, 1577836800000]).astype('datetime64[ms]')
+ array(['1970-01-01T00:00:00.000', '2020-01-01T00:00:00.000'],
+ dtype='datetime64[ms]')
The datetime type works with many common NumPy functions, for
example :func:`arange` can be used to generate ranges of dates.
@@ -120,9 +132,9 @@ Datetime and Timedelta Arithmetic
NumPy allows the subtraction of two Datetime values, an operation which
produces a number with a time unit. Because NumPy doesn't have a physical
quantities system in its core, the timedelta64 data type was created
-to complement datetime64. The arguments for timedelta64 are a number,
+to complement datetime64. The arguments for timedelta64 are a number,
to represent the number of units, and a date/time unit, such as
-(D)ay, (M)onth, (Y)ear, (h)ours, (m)inutes, or (s)econds. The timedelta64
+(D)ay, (M)onth, (Y)ear, (h)ours, (m)inutes, or (s)econds. The timedelta64
data type also accepts the string "NAT" in place of the number for a "Not A Time" value.
.. admonition:: Example
diff --git a/doc/source/reference/arrays.dtypes.rst b/doc/source/reference/arrays.dtypes.rst
index b5ffa1a8b..8606bc8f1 100644
--- a/doc/source/reference/arrays.dtypes.rst
+++ b/doc/source/reference/arrays.dtypes.rst
@@ -562,3 +562,20 @@ The following methods implement the pickle protocol:
dtype.__reduce__
dtype.__setstate__
+
+Utility method for typing:
+
+.. autosummary::
+ :toctree: generated/
+
+ dtype.__class_getitem__
+
+Comparison operations:
+
+.. autosummary::
+ :toctree: generated/
+
+ dtype.__ge__
+ dtype.__gt__
+ dtype.__le__
+ dtype.__lt__
diff --git a/doc/source/reference/arrays.ndarray.rst b/doc/source/reference/arrays.ndarray.rst
index f2204752d..0f703b475 100644
--- a/doc/source/reference/arrays.ndarray.rst
+++ b/doc/source/reference/arrays.ndarray.rst
@@ -249,7 +249,6 @@ Other attributes
ndarray.real
ndarray.imag
ndarray.flat
- ndarray.ctypes
.. _arrays.ndarray.array-interface:
@@ -621,3 +620,10 @@ String representations:
ndarray.__str__
ndarray.__repr__
+
+Utility method for typing:
+
+.. autosummary::
+ :toctree: generated/
+
+ ndarray.__class_getitem__
diff --git a/doc/source/reference/arrays.scalars.rst b/doc/source/reference/arrays.scalars.rst
index abef66692..c691e802f 100644
--- a/doc/source/reference/arrays.scalars.rst
+++ b/doc/source/reference/arrays.scalars.rst
@@ -196,10 +196,10 @@ Inexact types
``f16`` prints as ``0.1`` because it is as close to that value as possible,
whereas the other types do not as they have more precision and therefore have
closer values.
-
+
Conversely, floating-point scalars of different precisions which approximate
the same decimal value may compare unequal despite printing identically:
-
+
>>> f16 = np.float16("0.1")
>>> f32 = np.float32("0.1")
>>> f64 = np.float64("0.1")
@@ -399,7 +399,7 @@ are also provided.
complex256
Alias for `numpy.clongdouble`, named after its size in bits.
- The existance of these aliases depends on the platform.
+ The existence of these aliases depends on the platform.
Other aliases
~~~~~~~~~~~~~
@@ -498,6 +498,13 @@ The exceptions to the above rules are given below:
generic.__setstate__
generic.setflags
+Utility method for typing:
+
+.. autosummary::
+ :toctree: generated/
+
+ number.__class_getitem__
+
Defining new types
==================
diff --git a/doc/source/reference/c-api/array.rst b/doc/source/reference/c-api/array.rst
index 26a8f643d..bb4405825 100644
--- a/doc/source/reference/c-api/array.rst
+++ b/doc/source/reference/c-api/array.rst
@@ -325,8 +325,7 @@ From scratch
should be increased after the pointer is passed in, and the base member
of the returned ndarray should point to the Python object that owns
the data. This will ensure that the provided memory is not
- freed while the returned array is in existence. To free memory as soon
- as the ndarray is deallocated, set the OWNDATA flag on the returned ndarray.
+ freed while the returned array is in existence.
.. c:function:: PyObject* PyArray_SimpleNewFromDescr( \
int nd, npy_int const* dims, PyArray_Descr* descr)
@@ -519,34 +518,40 @@ From other objects
:c:data:`NPY_ARRAY_CARRAY`
- .. c:macro:: NPY_ARRAY_IN_ARRAY
+..
+ dedented to allow internal linking, pending a refactoring
- :c:data:`NPY_ARRAY_C_CONTIGUOUS` \| :c:data:`NPY_ARRAY_ALIGNED`
+.. c:macro:: NPY_ARRAY_IN_ARRAY
+
+ :c:data:`NPY_ARRAY_C_CONTIGUOUS` \| :c:data:`NPY_ARRAY_ALIGNED`
.. c:macro:: NPY_ARRAY_IN_FARRAY
:c:data:`NPY_ARRAY_F_CONTIGUOUS` \| :c:data:`NPY_ARRAY_ALIGNED`
- .. c:macro:: NPY_OUT_ARRAY
+.. c:macro:: NPY_OUT_ARRAY
- :c:data:`NPY_ARRAY_C_CONTIGUOUS` \| :c:data:`NPY_ARRAY_WRITEABLE` \|
- :c:data:`NPY_ARRAY_ALIGNED`
+ :c:data:`NPY_ARRAY_C_CONTIGUOUS` \| :c:data:`NPY_ARRAY_WRITEABLE` \|
+ :c:data:`NPY_ARRAY_ALIGNED`
- .. c:macro:: NPY_ARRAY_OUT_ARRAY
+.. c:macro:: NPY_ARRAY_OUT_ARRAY
- :c:data:`NPY_ARRAY_C_CONTIGUOUS` \| :c:data:`NPY_ARRAY_ALIGNED` \|
- :c:data:`NPY_ARRAY_WRITEABLE`
+ :c:data:`NPY_ARRAY_C_CONTIGUOUS` \| :c:data:`NPY_ARRAY_ALIGNED` \|
+ :c:data:`NPY_ARRAY_WRITEABLE`
.. c:macro:: NPY_ARRAY_OUT_FARRAY
:c:data:`NPY_ARRAY_F_CONTIGUOUS` \| :c:data:`NPY_ARRAY_WRITEABLE` \|
:c:data:`NPY_ARRAY_ALIGNED`
- .. c:macro:: NPY_ARRAY_INOUT_ARRAY
+..
+ dedented to allow internal linking, pending a refactoring
- :c:data:`NPY_ARRAY_C_CONTIGUOUS` \| :c:data:`NPY_ARRAY_WRITEABLE` \|
- :c:data:`NPY_ARRAY_ALIGNED` \| :c:data:`NPY_ARRAY_WRITEBACKIFCOPY` \|
- :c:data:`NPY_ARRAY_UPDATEIFCOPY`
+.. c:macro:: NPY_ARRAY_INOUT_ARRAY
+
+ :c:data:`NPY_ARRAY_C_CONTIGUOUS` \| :c:data:`NPY_ARRAY_WRITEABLE` \|
+ :c:data:`NPY_ARRAY_ALIGNED` \| :c:data:`NPY_ARRAY_WRITEBACKIFCOPY` \|
+ :c:data:`NPY_ARRAY_UPDATEIFCOPY`
.. c:macro:: NPY_ARRAY_INOUT_FARRAY
@@ -584,6 +589,9 @@ From other objects
did not have the _ARRAY_ macro namespace in them. That form
of the constant names is deprecated in 1.7.
+..
+ dedented to allow internal linking, pending a refactoring
+
.. c:macro:: NPY_ARRAY_NOTSWAPPED
Make sure the returned array has a data-type descriptor that is in
@@ -595,9 +603,13 @@ From other objects
not in machine byte- order), then a new data-type descriptor is
created and used with its byte-order field set to native.
-.. c:macro:: NPY_ARRAY_BEHAVED_NS
+ .. c:macro:: NPY_ARRAY_BEHAVED_NS
- :c:data:`NPY_ARRAY_ALIGNED` \| :c:data:`NPY_ARRAY_WRITEABLE` \| :c:data:`NPY_ARRAY_NOTSWAPPED`
+ :c:data:`NPY_ARRAY_ALIGNED` \| :c:data:`NPY_ARRAY_WRITEABLE` \|
+ :c:data:`NPY_ARRAY_NOTSWAPPED`
+
+..
+ dedented to allow internal linking, pending a refactoring
.. c:macro:: NPY_ARRAY_ELEMENTSTRIDES
@@ -723,6 +735,13 @@ From other objects
broadcastable to the shape of ``dest``. The data areas of dest
and src must not overlap.
+.. c:function:: int PyArray_CopyObject(PyArrayObject* dest, PyObject* src)
+
+ Assign an object ``src`` to a NumPy array ``dest`` according to
+ array-coercion rules. This is basically identical to
+ :c:func:`PyArray_FromAny`, but assigns directly to the output array.
+ Returns 0 on success and -1 on failures.
+
.. c:function:: int PyArray_MoveInto(PyArrayObject* dest, PyArrayObject* src)
Move data from the source array, ``src``, into the destination
@@ -1303,7 +1322,7 @@ User-defined data types
data-type object, *descr*, of the given *scalar* kind. Use
*scalar* = :c:data:`NPY_NOSCALAR` to register that an array of data-type
*descr* can be cast safely to a data-type whose type_number is
- *totype*.
+ *totype*. The return value is 0 on success or -1 on failure.
.. c:function:: int PyArray_TypeNumFromName( \
char const *str)
@@ -1443,7 +1462,9 @@ of the constant names is deprecated in 1.7.
.. c:macro:: NPY_ARRAY_OWNDATA
- The data area is owned by this array.
+ The data area is owned by this array. Should never be set manually, instead
+ create a ``PyObject`` wrapping the data and set the array's base to that
+ object. For an example, see the test in ``test_mem_policy``.
.. c:macro:: NPY_ARRAY_ALIGNED
@@ -2707,6 +2728,45 @@ cost of a slight overhead.
neighborhood. Calling this function after every point of the
neighborhood has been visited is undefined.
+Array mapping
+-------------
+
+Array mapping is the machinery behind advanced indexing.
+
+.. c:function:: PyObject* PyArray_MapIterArray(PyArrayObject *a, \
+ PyObject *index)
+
+ Use advanced indexing to iterate an array.
+
+.. c:function:: void PyArray_MapIterSwapAxes(PyArrayMapIterObject *mit, \
+ PyArrayObject **ret, int getmap)
+
+ Swap the axes to or from their inserted form. ``MapIter`` always puts the
+ advanced (array) indices first in the iteration. But if they are
+ consecutive, it will insert/transpose them back before returning.
+ This is stored as ``mit->consec != 0`` (the place where they are inserted).
+ For assignments, the opposite happens: the values to be assigned are
+ transposed (``getmap=1`` instead of ``getmap=0``). ``getmap=0`` and
+ ``getmap=1`` undo the other operation.
+
+.. c:function:: void PyArray_MapIterNext(PyArrayMapIterObject *mit)
+
+ This function needs to update the state of the map iterator
+ and point ``mit->dataptr`` to the memory-location of the next object.
+
+ Note that this function never handles an extra operand but provides
+ compatibility for an old (exposed) API.
+
+.. c:function:: PyObject* PyArray_MapIterArrayCopyIfOverlap(PyArrayObject *a, \
+ PyObject *index, int copy_if_overlap, PyArrayObject *extra_op)
+
+ Similar to :c:func:`PyArray_MapIterArray` but with an additional
+ ``copy_if_overlap`` argument. If ``copy_if_overlap != 0``, checks if ``a``
+ has memory overlap with any of the arrays in ``index`` and with
+ ``extra_op``, and make copies as appropriate to avoid problems if the
+ input is modified during the iteration. ``iter->array`` may contain a
+ copied array (UPDATEIFCOPY/WRITEBACKIFCOPY set).
+
Array Scalars
-------------
@@ -2719,13 +2779,19 @@ Array Scalars
whenever 0-dimensional arrays could be returned to Python.
.. c:function:: PyObject* PyArray_Scalar( \
- void* data, PyArray_Descr* dtype, PyObject* itemsize)
-
- Return an array scalar object of the given enumerated *typenum*
- and *itemsize* by **copying** from memory pointed to by *data*
- . If *swap* is nonzero then this function will byteswap the data
- if appropriate to the data-type because array scalars are always
- in correct machine-byte order.
+ void* data, PyArray_Descr* dtype, PyObject* base)
+
+ Return an array scalar object of the given *dtype* by **copying**
+ from memory pointed to by *data*. *base* is expected to be the
+ array object that is the owner of the data. *base* is required
+ if `dtype` is a ``void`` scalar, or if the ``NPY_USE_GETITEM``
+ flag is set and it is known that the ``getitem`` method uses
+ the ``arr`` argument without checking if it is ``NULL``. Otherwise
+ `base` may be ``NULL``.
+
+ If the data is not in native byte order (as indicated by
+ ``dtype->byteorder``) then this function will byteswap the data,
+ because array scalars are always in correct machine-byte order.
.. c:function:: PyObject* PyArray_ToScalar(void* data, PyArrayObject* arr)
diff --git a/doc/source/reference/c-api/data_memory.rst b/doc/source/reference/c-api/data_memory.rst
new file mode 100644
index 000000000..11a37adc4
--- /dev/null
+++ b/doc/source/reference/c-api/data_memory.rst
@@ -0,0 +1,158 @@
+.. _data_memory:
+
+Memory management in NumPy
+==========================
+
+The `numpy.ndarray` is a python class. It requires additional memory allocations
+to hold `numpy.ndarray.strides`, `numpy.ndarray.shape` and
+`numpy.ndarray.data` attributes. These attributes are specially allocated
+after creating the python object in `__new__`. The ``strides`` and
+``shape`` are stored in a piece of memory allocated internally.
+
+The ``data`` allocation used to store the actual array values (which could be
+pointers in the case of ``object`` arrays) can be very large, so NumPy has
+provided interfaces to manage its allocation and release. This document details
+how those interfaces work.
+
+Historical overview
+-------------------
+
+Since version 1.7.0, NumPy has exposed a set of ``PyDataMem_*`` functions
+(:c:func:`PyDataMem_NEW`, :c:func:`PyDataMem_FREE`, :c:func:`PyDataMem_RENEW`)
+which are backed by `alloc`, `free`, `realloc` respectively. In that version
+NumPy also exposed the `PyDataMem_EventHook` function described below, which
+wrap the OS-level calls.
+
+Since those early days, Python also improved its memory management
+capabilities, and began providing
+various :ref:`management policies <memoryoverview>` beginning in version
+3.4. These routines are divided into a set of domains, each domain has a
+:c:type:`PyMemAllocatorEx` structure of routines for memory management. Python also
+added a `tracemalloc` module to trace calls to the various routines. These
+tracking hooks were added to the NumPy ``PyDataMem_*`` routines.
+
+NumPy added a small cache of allocated memory in its internal
+``npy_alloc_cache``, ``npy_alloc_cache_zero``, and ``npy_free_cache``
+functions. These wrap ``alloc``, ``alloc-and-memset(0)`` and ``free``
+respectively, but when ``npy_free_cache`` is called, it adds the pointer to a
+short list of available blocks marked by size. These blocks can be re-used by
+subsequent calls to ``npy_alloc*``, avoiding memory thrashing.
+
+Configurable memory routines in NumPy (NEP 49)
+----------------------------------------------
+
+Users may wish to override the internal data memory routines with ones of their
+own. Since NumPy does not use the Python domain strategy to manage data memory,
+it provides an alternative set of C-APIs to change memory routines. There are
+no Python domain-wide strategies for large chunks of object data, so those are
+less suited to NumPy's needs. User who wish to change the NumPy data memory
+management routines can use :c:func:`PyDataMem_SetHandler`, which uses a
+:c:type:`PyDataMem_Handler` structure to hold pointers to functions used to
+manage the data memory. The calls are still wrapped by internal routines to
+call :c:func:`PyTraceMalloc_Track`, :c:func:`PyTraceMalloc_Untrack`, and will
+use the :c:func:`PyDataMem_EventHookFunc` mechanism. Since the functions may
+change during the lifetime of the process, each ``ndarray`` carries with it the
+functions used at the time of its instantiation, and these will be used to
+reallocate or free the data memory of the instance.
+
+.. c:type:: PyDataMem_Handler
+
+ A struct to hold function pointers used to manipulate memory
+
+ .. code-block:: c
+
+ typedef struct {
+ char name[128]; /* multiple of 64 to keep the struct aligned */
+ PyDataMemAllocator allocator;
+ } PyDataMem_Handler;
+
+ where the allocator structure is
+
+ .. code-block:: c
+
+ /* The declaration of free differs from PyMemAllocatorEx */
+ typedef struct {
+ void *ctx;
+ void* (*malloc) (void *ctx, size_t size);
+ void* (*calloc) (void *ctx, size_t nelem, size_t elsize);
+ void* (*realloc) (void *ctx, void *ptr, size_t new_size);
+ void (*free) (void *ctx, void *ptr, size_t size);
+ } PyDataMemAllocator;
+
+.. c:function:: PyObject * PyDataMem_SetHandler(PyObject *handler)
+
+ Set a new allocation policy. If the input value is ``NULL``, will reset the
+ policy to the default. Return the previous policy, or
+ return ``NULL`` if an error has occurred. We wrap the user-provided functions
+ so they will still call the python and numpy memory management callback
+ hooks.
+
+.. c:function:: PyObject * PyDataMem_GetHandler()
+
+ Return the current policy that will be used to allocate data for the
+ next ``PyArrayObject``. On failure, return ``NULL``.
+
+For an example of setting up and using the PyDataMem_Handler, see the test in
+:file:`numpy/core/tests/test_mem_policy.py`
+
+.. c:function:: void PyDataMem_EventHookFunc(void *inp, void *outp, size_t size, void *user_data);
+
+ This function will be called during data memory manipulation
+
+.. c:function:: PyDataMem_EventHookFunc * PyDataMem_SetEventHook(PyDataMem_EventHookFunc *newhook, void *user_data, void **old_data)
+
+ Sets the allocation event hook for numpy array data.
+
+ Returns a pointer to the previous hook or ``NULL``. If old_data is
+ non-``NULL``, the previous user_data pointer will be copied to it.
+
+ If not ``NULL``, hook will be called at the end of each ``PyDataMem_NEW/FREE/RENEW``:
+
+ .. code-block:: c
+
+ result = PyDataMem_NEW(size) -> (*hook)(NULL, result, size, user_data)
+ PyDataMem_FREE(ptr) -> (*hook)(ptr, NULL, 0, user_data)
+ result = PyDataMem_RENEW(ptr, size) -> (*hook)(ptr, result, size, user_data)
+
+ When the hook is called, the GIL will be held by the calling
+ thread. The hook should be written to be reentrant, if it performs
+ operations that might cause new allocation events (such as the
+ creation/destruction numpy objects, or creating/destroying Python
+ objects which might cause a gc)
+
+What happens when deallocating if there is no policy set
+--------------------------------------------------------
+
+A rare but useful technique is to allocate a buffer outside NumPy, use
+:c:func:`PyArray_NewFromDescr` to wrap the buffer in a ``ndarray``, then switch
+the ``OWNDATA`` flag to true. When the ``ndarray`` is released, the
+appropriate function from the ``ndarray``'s ``PyDataMem_Handler`` should be
+called to free the buffer. But the ``PyDataMem_Handler`` field was never set,
+it will be ``NULL``. For backward compatibility, NumPy will call ``free()`` to
+release the buffer. If ``NUMPY_WARN_IF_NO_MEM_POLICY`` is set to ``1``, a
+warning will be emitted. The current default is not to emit a warning, this may
+change in a future version of NumPy.
+
+A better technique would be to use a ``PyCapsule`` as a base object:
+
+.. code-block:: c
+
+ /* define a PyCapsule_Destructor, using the correct deallocator for buff */
+ void free_wrap(void *capsule){
+ void * obj = PyCapsule_GetPointer(capsule, PyCapsule_GetName(capsule));
+ free(obj);
+ };
+
+ /* then inside the function that creates arr from buff */
+ ...
+ arr = PyArray_NewFromDescr(... buf, ...);
+ if (arr == NULL) {
+ return NULL;
+ }
+ capsule = PyCapsule_New(buf, "my_wrapped_buffer",
+ (PyCapsule_Destructor)&free_wrap);
+ if (PyArray_SetBaseObject(arr, capsule) == -1) {
+ Py_DECREF(arr);
+ return NULL;
+ }
+ ...
diff --git a/doc/source/reference/c-api/index.rst b/doc/source/reference/c-api/index.rst
index bb1ed154e..6288ff33b 100644
--- a/doc/source/reference/c-api/index.rst
+++ b/doc/source/reference/c-api/index.rst
@@ -49,3 +49,4 @@ code.
generalized-ufuncs
coremath
deprecations
+ data_memory
diff --git a/doc/source/reference/c-api/iterator.rst b/doc/source/reference/c-api/iterator.rst
index 2208cdd2f..83644d8b2 100644
--- a/doc/source/reference/c-api/iterator.rst
+++ b/doc/source/reference/c-api/iterator.rst
@@ -1230,7 +1230,7 @@ Functions For Iteration
.. c:function:: npy_intp* NpyIter_GetIndexPtr(NpyIter* iter)
This gives back a pointer to the index being tracked, or NULL
- if no index is being tracked. It is only useable if one of
+ if no index is being tracked. It is only usable if one of
the flags :c:data:`NPY_ITER_C_INDEX` or :c:data:`NPY_ITER_F_INDEX`
were specified during construction.
diff --git a/doc/source/reference/c-api/types-and-structures.rst b/doc/source/reference/c-api/types-and-structures.rst
index 39a17cc72..605a4ae71 100644
--- a/doc/source/reference/c-api/types-and-structures.rst
+++ b/doc/source/reference/c-api/types-and-structures.rst
@@ -94,7 +94,7 @@ PyArray_Type and PyArrayObject
PyArray_Descr *descr;
int flags;
PyObject *weakreflist;
- /* version dependend private members */
+ /* version dependent private members */
} PyArrayObject;
.. c:macro:: PyObject_HEAD
@@ -178,7 +178,7 @@ PyArray_Type and PyArrayObject
.. note::
- Further members are considered private and version dependend. If the size
+ Further members are considered private and version dependent. If the size
of the struct is important for your code, special care must be taken.
A possible use-case when this is relevant is subclassing in C.
If your code relies on ``sizeof(PyArrayObject)`` to be constant,
@@ -286,48 +286,54 @@ PyArrayDescr_Type and PyArray_Descr
array like behavior. Each bit in this member is a flag which are named
as:
- .. c:macro:: NPY_ITEM_REFCOUNT
+..
+ dedented to allow internal linking, pending a refactoring
- Indicates that items of this data-type must be reference
- counted (using :c:func:`Py_INCREF` and :c:func:`Py_DECREF` ).
+.. c:macro:: NPY_ITEM_REFCOUNT
+
+ Indicates that items of this data-type must be reference
+ counted (using :c:func:`Py_INCREF` and :c:func:`Py_DECREF` ).
.. c:macro:: NPY_ITEM_HASOBJECT
Same as :c:data:`NPY_ITEM_REFCOUNT`.
- .. c:macro:: NPY_LIST_PICKLE
+..
+ dedented to allow internal linking, pending a refactoring
+
+.. c:macro:: NPY_LIST_PICKLE
- Indicates arrays of this data-type must be converted to a list
- before pickling.
+ Indicates arrays of this data-type must be converted to a list
+ before pickling.
- .. c:macro:: NPY_ITEM_IS_POINTER
+.. c:macro:: NPY_ITEM_IS_POINTER
- Indicates the item is a pointer to some other data-type
+ Indicates the item is a pointer to some other data-type
- .. c:macro:: NPY_NEEDS_INIT
+.. c:macro:: NPY_NEEDS_INIT
- Indicates memory for this data-type must be initialized (set
- to 0) on creation.
+ Indicates memory for this data-type must be initialized (set
+ to 0) on creation.
- .. c:macro:: NPY_NEEDS_PYAPI
+.. c:macro:: NPY_NEEDS_PYAPI
- Indicates this data-type requires the Python C-API during
- access (so don't give up the GIL if array access is going to
- be needed).
+ Indicates this data-type requires the Python C-API during
+ access (so don't give up the GIL if array access is going to
+ be needed).
- .. c:macro:: NPY_USE_GETITEM
+.. c:macro:: NPY_USE_GETITEM
- On array access use the ``f->getitem`` function pointer
- instead of the standard conversion to an array scalar. Must
- use if you don't define an array scalar to go along with
- the data-type.
+ On array access use the ``f->getitem`` function pointer
+ instead of the standard conversion to an array scalar. Must
+ use if you don't define an array scalar to go along with
+ the data-type.
- .. c:macro:: NPY_USE_SETITEM
+.. c:macro:: NPY_USE_SETITEM
- When creating a 0-d array from an array scalar use
- ``f->setitem`` instead of the standard copy from an array
- scalar. Must use if you don't define an array scalar to go
- along with the data-type.
+ When creating a 0-d array from an array scalar use
+ ``f->setitem`` instead of the standard copy from an array
+ scalar. Must use if you don't define an array scalar to go
+ along with the data-type.
.. c:macro:: NPY_FROM_FIELDS
@@ -961,8 +967,8 @@ PyUFunc_Type and PyUFuncObject
.. deprecated:: 1.22
Some fallback support for this slot exists, but will be removed
- eventually. A univiersal function which relied on this will have
- eventually have to be ported.
+ eventually. A universal function that relied on this will
+ have to be ported eventually.
See ref:`NEP 41 <NEP41>` and ref:`NEP 43 <NEP43>`
.. c:member:: void *reserved2
@@ -989,14 +995,17 @@ PyUFunc_Type and PyUFuncObject
For each distinct core dimension, a set of ``UFUNC_CORE_DIM*`` flags
- .. c:macro:: UFUNC_CORE_DIM_CAN_IGNORE
+..
+ dedented to allow internal linking, pending a refactoring
+
+.. c:macro:: UFUNC_CORE_DIM_CAN_IGNORE
- if the dim name ends in ``?``
+ if the dim name ends in ``?``
- .. c:macro:: UFUNC_CORE_DIM_SIZE_INFERRED
+.. c:macro:: UFUNC_CORE_DIM_SIZE_INFERRED
- if the dim size will be determined from the operands
- and not from a :ref:`frozen <frozen>` signature
+ if the dim size will be determined from the operands
+ and not from a :ref:`frozen <frozen>` signature
.. c:member:: PyObject *identity_value
diff --git a/doc/source/reference/global_state.rst b/doc/source/reference/global_state.rst
index f18481235..20874ceaa 100644
--- a/doc/source/reference/global_state.rst
+++ b/doc/source/reference/global_state.rst
@@ -84,3 +84,13 @@ contiguous in memory.
Most users will have no reason to change these; for details
see the :ref:`memory layout <memory-layout>` documentation.
+
+Warn if no memory allocation policy when deallocating data
+----------------------------------------------------------
+
+Some users might pass ownership of the data pointer to the ``ndarray`` by
+setting the ``OWNDATA`` flag. If they do this without setting (manually) a
+memory allocation policy, the default will be to call ``free``. If
+``NUMPY_WARN_IF_NO_MEM_POLICY`` is set to ``"1"``, a ``RuntimeWarning`` will
+be emitted. A better alternative is to use a ``PyCapsule`` with a deallocator
+and set the ``ndarray.base``.
diff --git a/doc/source/reference/index.rst b/doc/source/reference/index.rst
index f12d923df..a18211cca 100644
--- a/doc/source/reference/index.rst
+++ b/doc/source/reference/index.rst
@@ -26,7 +26,6 @@ For learning how to use NumPy, see the :ref:`complete documentation <numpy_docs_
distutils
distutils_guide
c-api/index
- internals
simd/simd-optimizations
swig
diff --git a/doc/source/reference/internals.code-explanations.rst b/doc/source/reference/internals.code-explanations.rst
index e8e428f2e..d34314610 100644
--- a/doc/source/reference/internals.code-explanations.rst
+++ b/doc/source/reference/internals.code-explanations.rst
@@ -1,618 +1,9 @@
-.. currentmodule:: numpy
+:orphan:
*************************
NumPy C Code Explanations
*************************
- Fanaticism consists of redoubling your efforts when you have forgotten
- your aim.
- --- *George Santayana*
+.. This document has been moved to ../dev/internals.code-explanations.rst.
- An authority is a person who can tell you more about something than
- you really care to know.
- --- *Unknown*
-
-This Chapter attempts to explain the logic behind some of the new
-pieces of code. The purpose behind these explanations is to enable
-somebody to be able to understand the ideas behind the implementation
-somewhat more easily than just staring at the code. Perhaps in this
-way, the algorithms can be improved on, borrowed from, and/or
-optimized by more people.
-
-
-Memory model
-============
-
-.. index::
- pair: ndarray; memory model
-
-One fundamental aspect of the ndarray is that an array is seen as a
-"chunk" of memory starting at some location. The interpretation of
-this memory depends on the stride information. For each dimension in
-an :math:`N` -dimensional array, an integer (stride) dictates how many
-bytes must be skipped to get to the next element in that dimension.
-Unless you have a single-segment array, this stride information must
-be consulted when traversing through an array. It is not difficult to
-write code that accepts strides, you just have to use (char \*)
-pointers because strides are in units of bytes. Keep in mind also that
-strides do not have to be unit-multiples of the element size. Also,
-remember that if the number of dimensions of the array is 0 (sometimes
-called a rank-0 array), then the strides and dimensions variables are
-NULL.
-
-Besides the structural information contained in the strides and
-dimensions members of the :c:type:`PyArrayObject`, the flags contain
-important information about how the data may be accessed. In particular,
-the :c:data:`NPY_ARRAY_ALIGNED` flag is set when the memory is on a
-suitable boundary according to the data-type array. Even if you have
-a contiguous chunk of memory, you cannot just assume it is safe to
-dereference a data- type-specific pointer to an element. Only if the
-:c:data:`NPY_ARRAY_ALIGNED` flag is set is this a safe operation (on
-some platforms it will work but on others, like Solaris, it will cause
-a bus error). The :c:data:`NPY_ARRAY_WRITEABLE` should also be ensured
-if you plan on writing to the memory area of the array. It is also
-possible to obtain a pointer to an unwritable memory area. Sometimes,
-writing to the memory area when the :c:data:`NPY_ARRAY_WRITEABLE` flag is not
-set will just be rude. Other times it can cause program crashes ( *e.g.*
-a data-area that is a read-only memory-mapped file).
-
-
-Data-type encapsulation
-=======================
-
-.. index::
- single: dtype
-
-The data-type is an important abstraction of the ndarray. Operations
-will look to the data-type to provide the key functionality that is
-needed to operate on the array. This functionality is provided in the
-list of function pointers pointed to by the 'f' member of the
-:c:type:`PyArray_Descr` structure. In this way, the number of data-types can be
-extended simply by providing a :c:type:`PyArray_Descr` structure with suitable
-function pointers in the 'f' member. For built-in types there are some
-optimizations that by-pass this mechanism, but the point of the data-
-type abstraction is to allow new data-types to be added.
-
-One of the built-in data-types, the void data-type allows for
-arbitrary structured types containing 1 or more fields as elements of the
-array. A field is simply another data-type object along with an offset
-into the current structured type. In order to support arbitrarily nested
-fields, several recursive implementations of data-type access are
-implemented for the void type. A common idiom is to cycle through the
-elements of the dictionary and perform a specific operation based on
-the data-type object stored at the given offset. These offsets can be
-arbitrary numbers. Therefore, the possibility of encountering mis-
-aligned data must be recognized and taken into account if necessary.
-
-
-N-D Iterators
-=============
-
-.. index::
- single: array iterator
-
-A very common operation in much of NumPy code is the need to iterate
-over all the elements of a general, strided, N-dimensional array. This
-operation of a general-purpose N-dimensional loop is abstracted in the
-notion of an iterator object. To write an N-dimensional loop, you only
-have to create an iterator object from an ndarray, work with the
-dataptr member of the iterator object structure and call the macro
-:c:func:`PyArray_ITER_NEXT` (it) on the iterator object to move to the next
-element. The "next" element is always in C-contiguous order. The macro
-works by first special casing the C-contiguous, 1-D, and 2-D cases
-which work very simply.
-
-For the general case, the iteration works by keeping track of a list
-of coordinate counters in the iterator object. At each iteration, the
-last coordinate counter is increased (starting from 0). If this
-counter is smaller than one less than the size of the array in that
-dimension (a pre-computed and stored value), then the counter is
-increased and the dataptr member is increased by the strides in that
-dimension and the macro ends. If the end of a dimension is reached,
-the counter for the last dimension is reset to zero and the dataptr is
-moved back to the beginning of that dimension by subtracting the
-strides value times one less than the number of elements in that
-dimension (this is also pre-computed and stored in the backstrides
-member of the iterator object). In this case, the macro does not end,
-but a local dimension counter is decremented so that the next-to-last
-dimension replaces the role that the last dimension played and the
-previously-described tests are executed again on the next-to-last
-dimension. In this way, the dataptr is adjusted appropriately for
-arbitrary striding.
-
-The coordinates member of the :c:type:`PyArrayIterObject` structure maintains
-the current N-d counter unless the underlying array is C-contiguous in
-which case the coordinate counting is by-passed. The index member of
-the :c:type:`PyArrayIterObject` keeps track of the current flat index of the
-iterator. It is updated by the :c:func:`PyArray_ITER_NEXT` macro.
-
-
-Broadcasting
-============
-
-.. index::
- single: broadcasting
-
-In Numeric, the ancestor of Numpy, broadcasting was implemented in several
-lines of code buried deep in ufuncobject.c. In NumPy, the notion of broadcasting
-has been abstracted so that it can be performed in multiple places.
-Broadcasting is handled by the function :c:func:`PyArray_Broadcast`. This
-function requires a :c:type:`PyArrayMultiIterObject` (or something that is a
-binary equivalent) to be passed in. The :c:type:`PyArrayMultiIterObject` keeps
-track of the broadcast number of dimensions and size in each
-dimension along with the total size of the broadcast result. It also
-keeps track of the number of arrays being broadcast and a pointer to
-an iterator for each of the arrays being broadcast.
-
-The :c:func:`PyArray_Broadcast` function takes the iterators that have already
-been defined and uses them to determine the broadcast shape in each
-dimension (to create the iterators at the same time that broadcasting
-occurs then use the :c:func:`PyArray_MultiIterNew` function).
-Then, the iterators are
-adjusted so that each iterator thinks it is iterating over an array
-with the broadcast size. This is done by adjusting the iterators
-number of dimensions, and the shape in each dimension. This works
-because the iterator strides are also adjusted. Broadcasting only
-adjusts (or adds) length-1 dimensions. For these dimensions, the
-strides variable is simply set to 0 so that the data-pointer for the
-iterator over that array doesn't move as the broadcasting operation
-operates over the extended dimension.
-
-Broadcasting was always implemented in Numeric using 0-valued strides
-for the extended dimensions. It is done in exactly the same way in
-NumPy. The big difference is that now the array of strides is kept
-track of in a :c:type:`PyArrayIterObject`, the iterators involved in a
-broadcast result are kept track of in a :c:type:`PyArrayMultiIterObject`,
-and the :c:func:`PyArray_Broadcast` call implements the broad-casting rules.
-
-
-Array Scalars
-=============
-
-.. index::
- single: array scalars
-
-The array scalars offer a hierarchy of Python types that allow a one-
-to-one correspondence between the data-type stored in an array and the
-Python-type that is returned when an element is extracted from the
-array. An exception to this rule was made with object arrays. Object
-arrays are heterogeneous collections of arbitrary Python objects. When
-you select an item from an object array, you get back the original
-Python object (and not an object array scalar which does exist but is
-rarely used for practical purposes).
-
-The array scalars also offer the same methods and attributes as arrays
-with the intent that the same code can be used to support arbitrary
-dimensions (including 0-dimensions). The array scalars are read-only
-(immutable) with the exception of the void scalar which can also be
-written to so that structured array field setting works more naturally
-(a[0]['f1'] = ``value`` ).
-
-
-Indexing
-========
-
-.. index::
- single: indexing
-
-All python indexing operations ``arr[index]`` are organized by first preparing
-the index and finding the index type. The supported index types are:
-
-* integer
-* newaxis
-* slice
-* ellipsis
-* integer arrays/array-likes (fancy)
-* boolean (single boolean array); if there is more than one boolean array as
- index or the shape does not match exactly, the boolean array will be
- converted to an integer array instead.
-* 0-d boolean (and also integer); 0-d boolean arrays are a special
- case which has to be handled in the advanced indexing code. They signal
- that a 0-d boolean array had to be interpreted as an integer array.
-
-As well as the scalar array special case signaling that an integer array
-was interpreted as an integer index, which is important because an integer
-array index forces a copy but is ignored if a scalar is returned (full integer
-index). The prepared index is guaranteed to be valid with the exception of
-out of bound values and broadcasting errors for advanced indexing. This
-includes that an ellipsis is added for incomplete indices for example when
-a two dimensional array is indexed with a single integer.
-
-The next step depends on the type of index which was found. If all
-dimensions are indexed with an integer a scalar is returned or set. A
-single boolean indexing array will call specialized boolean functions.
-Indices containing an ellipsis or slice but no advanced indexing will
-always create a view into the old array by calculating the new strides and
-memory offset. This view can then either be returned or, for assignments,
-filled using :c:func:`PyArray_CopyObject`. Note that `PyArray_CopyObject`
-may also be called on temporary arrays in other branches to support
-complicated assignments when the array is of object dtype.
-
-Advanced indexing
------------------
-
-By far the most complex case is advanced indexing, which may or may not be
-combined with typical view based indexing. Here integer indices are
-interpreted as view based. Before trying to understand this, you may want
-to make yourself familiar with its subtleties. The advanced indexing code
-has three different branches and one special case:
-
-* There is one indexing array and it, as well as the assignment array, can
- be iterated trivially. For example they may be contiguous. Also the
- indexing array must be of `intp` type and the value array in assignments
- should be of the correct type. This is purely a fast path.
-* There are only integer array indices so that no subarray exists.
-* View based and advanced indexing is mixed. In this case the view based
- indexing defines a collection of subarrays that are combined by the
- advanced indexing. For example, ``arr[[1, 2, 3], :]`` is created by
- vertically stacking the subarrays ``arr[1, :]``, ``arr[2,:]``, and
- ``arr[3, :]``.
-* There is a subarray but it has exactly one element. This case can be handled
- as if there is no subarray, but needs some care during setup.
-
-Deciding what case applies, checking broadcasting, and determining the kind
-of transposition needed are all done in `PyArray_MapIterNew`. After setting
-up, there are two cases. If there is no subarray or it only has one
-element, no subarray iteration is necessary and an iterator is prepared
-which iterates all indexing arrays *as well as* the result or value array.
-If there is a subarray, there are three iterators prepared. One for the
-indexing arrays, one for the result or value array (minus its subarray),
-and one for the subarrays of the original and the result/assignment array.
-The first two iterators give (or allow calculation) of the pointers into
-the start of the subarray, which then allows to restart the subarray
-iteration.
-
-When advanced indices are next to each other transposing may be necessary.
-All necessary transposing is handled by :c:func:`PyArray_MapIterSwapAxes` and
-has to be handled by the caller unless `PyArray_MapIterNew` is asked to
-allocate the result.
-
-After preparation, getting and setting is relatively straight forward,
-although the different modes of iteration need to be considered. Unless
-there is only a single indexing array during item getting, the validity of
-the indices is checked beforehand. Otherwise it is handled in the inner
-loop itself for optimization.
-
-
-Universal Functions
-===================
-
-.. index::
- single: ufunc
-
-Universal functions are callable objects that take :math:`N` inputs
-and produce :math:`M` outputs by wrapping basic 1-D loops that work
-element-by-element into full easy-to use functions that seamlessly
-implement broadcasting, type-checking and buffered coercion, and
-output-argument handling. New universal functions are normally created
-in C, although there is a mechanism for creating ufuncs from Python
-functions (:func:`frompyfunc`). The user must supply a 1-D loop that
-implements the basic function taking the input scalar values and
-placing the resulting scalars into the appropriate output slots as
-explained in implementation.
-
-
-Setup
------
-
-Every ufunc calculation involves some overhead related to setting up
-the calculation. The practical significance of this overhead is that
-even though the actual calculation of the ufunc is very fast, you will
-be able to write array and type-specific code that will work faster
-for small arrays than the ufunc. In particular, using ufuncs to
-perform many calculations on 0-D arrays will be slower than other
-Python-based solutions (the silently-imported scalarmath module exists
-precisely to give array scalars the look-and-feel of ufunc based
-calculations with significantly reduced overhead).
-
-When a ufunc is called, many things must be done. The information
-collected from these setup operations is stored in a loop-object. This
-loop object is a C-structure (that could become a Python object but is
-not initialized as such because it is only used internally). This loop
-object has the layout needed to be used with PyArray_Broadcast so that
-the broadcasting can be handled in the same way as it is handled in
-other sections of code.
-
-The first thing done is to look-up in the thread-specific global
-dictionary the current values for the buffer-size, the error mask, and
-the associated error object. The state of the error mask controls what
-happens when an error condition is found. It should be noted that
-checking of the hardware error flags is only performed after each 1-D
-loop is executed. This means that if the input and output arrays are
-contiguous and of the correct type so that a single 1-D loop is
-performed, then the flags may not be checked until all elements of the
-array have been calculated. Looking up these values in a thread-
-specific dictionary takes time which is easily ignored for all but
-very small arrays.
-
-After checking, the thread-specific global variables, the inputs are
-evaluated to determine how the ufunc should proceed and the input and
-output arrays are constructed if necessary. Any inputs which are not
-arrays are converted to arrays (using context if necessary). Which of
-the inputs are scalars (and therefore converted to 0-D arrays) is
-noted.
-
-Next, an appropriate 1-D loop is selected from the 1-D loops available
-to the ufunc based on the input array types. This 1-D loop is selected
-by trying to match the signature of the data-types of the inputs
-against the available signatures. The signatures corresponding to
-built-in types are stored in the types member of the ufunc structure.
-The signatures corresponding to user-defined types are stored in a
-linked-list of function-information with the head element stored as a
-``CObject`` in the userloops dictionary keyed by the data-type number
-(the first user-defined type in the argument list is used as the key).
-The signatures are searched until a signature is found to which the
-input arrays can all be cast safely (ignoring any scalar arguments
-which are not allowed to determine the type of the result). The
-implication of this search procedure is that "lesser types" should be
-placed below "larger types" when the signatures are stored. If no 1-D
-loop is found, then an error is reported. Otherwise, the argument_list
-is updated with the stored signature --- in case casting is necessary
-and to fix the output types assumed by the 1-D loop.
-
-If the ufunc has 2 inputs and 1 output and the second input is an
-Object array then a special-case check is performed so that
-NotImplemented is returned if the second input is not an ndarray, has
-the __array_priority\__ attribute, and has an __r{op}\__ special
-method. In this way, Python is signaled to give the other object a
-chance to complete the operation instead of using generic object-array
-calculations. This allows (for example) sparse matrices to override
-the multiplication operator 1-D loop.
-
-For input arrays that are smaller than the specified buffer size,
-copies are made of all non-contiguous, mis-aligned, or out-of-
-byteorder arrays to ensure that for small arrays, a single loop is
-used. Then, array iterators are created for all the input arrays and
-the resulting collection of iterators is broadcast to a single shape.
-
-The output arguments (if any) are then processed and any missing
-return arrays are constructed. If any provided output array doesn't
-have the correct type (or is mis-aligned) and is smaller than the
-buffer size, then a new output array is constructed with the special
-:c:data:`NPY_ARRAY_WRITEBACKIFCOPY` flag set. At the end of the function,
-:c:func:`PyArray_ResolveWritebackIfCopy` is called so that
-its contents will be copied back into the output array.
-Iterators for the output arguments are then processed.
-
-Finally, the decision is made about how to execute the looping
-mechanism to ensure that all elements of the input arrays are combined
-to produce the output arrays of the correct type. The options for loop
-execution are one-loop (for contiguous, aligned, and correct data
-type), strided-loop (for non-contiguous but still aligned and correct
-data type), and a buffered loop (for mis-aligned or incorrect data
-type situations). Depending on which execution method is called for,
-the loop is then setup and computed.
-
-
-Function call
--------------
-
-This section describes how the basic universal function computation loop is
-setup and executed for each of the three different kinds of execution. If
-:c:data:`NPY_ALLOW_THREADS` is defined during compilation, then as long as
-no object arrays are involved, the Python Global Interpreter Lock (GIL) is
-released prior to calling the loops. It is re-acquired if necessary to
-handle error conditions. The hardware error flags are checked only after
-the 1-D loop is completed.
-
-
-One Loop
-^^^^^^^^
-
-This is the simplest case of all. The ufunc is executed by calling the
-underlying 1-D loop exactly once. This is possible only when we have
-aligned data of the correct type (including byte-order) for both input
-and output and all arrays have uniform strides (either contiguous,
-0-D, or 1-D). In this case, the 1-D computational loop is called once
-to compute the calculation for the entire array. Note that the
-hardware error flags are only checked after the entire calculation is
-complete.
-
-
-Strided Loop
-^^^^^^^^^^^^
-
-When the input and output arrays are aligned and of the correct type,
-but the striding is not uniform (non-contiguous and 2-D or larger),
-then a second looping structure is employed for the calculation. This
-approach converts all of the iterators for the input and output
-arguments to iterate over all but the largest dimension. The inner
-loop is then handled by the underlying 1-D computational loop. The
-outer loop is a standard iterator loop on the converted iterators. The
-hardware error flags are checked after each 1-D loop is completed.
-
-
-Buffered Loop
-^^^^^^^^^^^^^
-
-This is the code that handles the situation whenever the input and/or
-output arrays are either misaligned or of the wrong data-type
-(including being byte-swapped) from what the underlying 1-D loop
-expects. The arrays are also assumed to be non-contiguous. The code
-works very much like the strided-loop except for the inner 1-D loop is
-modified so that pre-processing is performed on the inputs and post-
-processing is performed on the outputs in bufsize chunks (where
-bufsize is a user-settable parameter). The underlying 1-D
-computational loop is called on data that is copied over (if it needs
-to be). The setup code and the loop code is considerably more
-complicated in this case because it has to handle:
-
-- memory allocation of the temporary buffers
-
-- deciding whether or not to use buffers on the input and output data
- (mis-aligned and/or wrong data-type)
-
-- copying and possibly casting data for any inputs or outputs for which
- buffers are necessary.
-
-- special-casing Object arrays so that reference counts are properly
- handled when copies and/or casts are necessary.
-
-- breaking up the inner 1-D loop into bufsize chunks (with a possible
- remainder).
-
-Again, the hardware error flags are checked at the end of each 1-D
-loop.
-
-
-Final output manipulation
--------------------------
-
-Ufuncs allow other array-like classes to be passed seamlessly through
-the interface in that inputs of a particular class will induce the
-outputs to be of that same class. The mechanism by which this works is
-the following. If any of the inputs are not ndarrays and define the
-:obj:`~numpy.class.__array_wrap__` method, then the class with the largest
-:obj:`~numpy.class.__array_priority__` attribute determines the type of all the
-outputs (with the exception of any output arrays passed in). The
-:obj:`~numpy.class.__array_wrap__` method of the input array will be called with the
-ndarray being returned from the ufunc as it's input. There are two
-calling styles of the :obj:`~numpy.class.__array_wrap__` function supported. The first
-takes the ndarray as the first argument and a tuple of "context" as
-the second argument. The context is (ufunc, arguments, output argument
-number). This is the first call tried. If a TypeError occurs, then the
-function is called with just the ndarray as the first argument.
-
-
-Methods
--------
-
-There are three methods of ufuncs that require calculation similar to
-the general-purpose ufuncs. These are reduce, accumulate, and
-reduceat. Each of these methods requires a setup command followed by a
-loop. There are four loop styles possible for the methods
-corresponding to no-elements, one-element, strided-loop, and buffered-
-loop. These are the same basic loop styles as implemented for the
-general purpose function call except for the no-element and one-
-element cases which are special-cases occurring when the input array
-objects have 0 and 1 elements respectively.
-
-
-Setup
-^^^^^
-
-The setup function for all three methods is ``construct_reduce``.
-This function creates a reducing loop object and fills it with
-parameters needed to complete the loop. All of the methods only work
-on ufuncs that take 2-inputs and return 1 output. Therefore, the
-underlying 1-D loop is selected assuming a signature of [ ``otype``,
-``otype``, ``otype`` ] where ``otype`` is the requested reduction
-data-type. The buffer size and error handling is then retrieved from
-(per-thread) global storage. For small arrays that are mis-aligned or
-have incorrect data-type, a copy is made so that the un-buffered
-section of code is used. Then, the looping strategy is selected. If
-there is 1 element or 0 elements in the array, then a simple looping
-method is selected. If the array is not mis-aligned and has the
-correct data-type, then strided looping is selected. Otherwise,
-buffered looping must be performed. Looping parameters are then
-established, and the return array is constructed. The output array is
-of a different shape depending on whether the method is reduce,
-accumulate, or reduceat. If an output array is already provided, then
-it's shape is checked. If the output array is not C-contiguous,
-aligned, and of the correct data type, then a temporary copy is made
-with the WRITEBACKIFCOPY flag set. In this way, the methods will be able
-to work with a well-behaved output array but the result will be copied
-back into the true output array when :c:func:`PyArray_ResolveWritebackIfCopy`
-is called at function completion.
-Finally, iterators are set up to loop over the correct axis
-(depending on the value of axis provided to the method) and the setup
-routine returns to the actual computation routine.
-
-
-Reduce
-^^^^^^
-
-.. index::
- triple: ufunc; methods; reduce
-
-All of the ufunc methods use the same underlying 1-D computational
-loops with input and output arguments adjusted so that the appropriate
-reduction takes place. For example, the key to the functioning of
-reduce is that the 1-D loop is called with the output and the second
-input pointing to the same position in memory and both having a step-
-size of 0. The first input is pointing to the input array with a step-
-size given by the appropriate stride for the selected axis. In this
-way, the operation performed is
-
-.. math::
- :nowrap:
-
- \begin{align*}
- o & = & i[0] \\
- o & = & i[k]\textrm{<op>}o\quad k=1\ldots N
- \end{align*}
-
-where :math:`N+1` is the number of elements in the input, :math:`i`,
-:math:`o` is the output, and :math:`i[k]` is the
-:math:`k^{\textrm{th}}` element of :math:`i` along the selected axis.
-This basic operations is repeated for arrays with greater than 1
-dimension so that the reduction takes place for every 1-D sub-array
-along the selected axis. An iterator with the selected dimension
-removed handles this looping.
-
-For buffered loops, care must be taken to copy and cast data before
-the loop function is called because the underlying loop expects
-aligned data of the correct data-type (including byte-order). The
-buffered loop must handle this copying and casting prior to calling
-the loop function on chunks no greater than the user-specified
-bufsize.
-
-
-Accumulate
-^^^^^^^^^^
-
-.. index::
- triple: ufunc; methods; accumulate
-
-The accumulate function is very similar to the reduce function in that
-the output and the second input both point to the output. The
-difference is that the second input points to memory one stride behind
-the current output pointer. Thus, the operation performed is
-
-.. math::
- :nowrap:
-
- \begin{align*}
- o[0] & = & i[0] \\
- o[k] & = & i[k]\textrm{<op>}o[k-1]\quad k=1\ldots N.
- \end{align*}
-
-The output has the same shape as the input and each 1-D loop operates
-over :math:`N` elements when the shape in the selected axis is :math:`N+1`.
-Again, buffered loops take care to copy and cast the data before
-calling the underlying 1-D computational loop.
-
-
-Reduceat
-^^^^^^^^
-
-.. index::
- triple: ufunc; methods; reduceat
- single: ufunc
-
-The reduceat function is a generalization of both the reduce and
-accumulate functions. It implements a reduce over ranges of the input
-array specified by indices. The extra indices argument is checked to
-be sure that every input is not too large for the input array along
-the selected dimension before the loop calculations take place. The
-loop implementation is handled using code that is very similar to the
-reduce code repeated as many times as there are elements in the
-indices input. In particular: the first input pointer passed to the
-underlying 1-D computational loop points to the input array at the
-correct location indicated by the index array. In addition, the output
-pointer and the second input pointer passed to the underlying 1-D loop
-point to the same position in memory. The size of the 1-D
-computational loop is fixed to be the difference between the current
-index and the next index (when the current index is the last index,
-then the next index is assumed to be the length of the array along the
-selected dimension). In this way, the 1-D loop will implement a reduce
-over the specified indices.
-
-Mis-aligned or a loop data-type that does not match the input and/or
-output data-type is handled using buffered code where-in data is
-copied to a temporary buffer and cast to the correct data-type if
-necessary prior to calling the underlying 1-D function. The temporary
-buffers are created in (element) sizes no bigger than the user
-settable buffer-size value. Thus, the loop must be flexible enough to
-call the underlying 1-D computational loop enough times to complete
-the total calculation in chunks no bigger than the buffer-size.
+This document has been moved to :ref:`c-code-explanations`. \ No newline at end of file
diff --git a/doc/source/reference/internals.rst b/doc/source/reference/internals.rst
index ed8042c08..7a5e6374c 100644
--- a/doc/source/reference/internals.rst
+++ b/doc/source/reference/internals.rst
@@ -1,168 +1,10 @@
-.. _numpy-internals:
+:orphan:
***************
NumPy internals
***************
-.. toctree::
-
- internals.code-explanations
- alignment
-
-Internal organization of numpy arrays
-=====================================
-
-It helps to understand a bit about how numpy arrays are handled under the covers to help understand numpy better. This section will not go into great detail. Those wishing to understand the full details are referred to Travis Oliphant's book "Guide to NumPy".
-
-NumPy arrays consist of two major components, the raw array data (from now on,
-referred to as the data buffer), and the information about the raw array data.
-The data buffer is typically what people think of as arrays in C or Fortran,
-a contiguous (and fixed) block of memory containing fixed sized data items.
-NumPy also contains a significant set of data that describes how to interpret
-the data in the data buffer. This extra information contains (among other things):
-
- 1) The basic data element's size in bytes
- 2) The start of the data within the data buffer (an offset relative to the
- beginning of the data buffer).
- 3) The number of dimensions and the size of each dimension
- 4) The separation between elements for each dimension (the 'stride'). This
- does not have to be a multiple of the element size
- 5) The byte order of the data (which may not be the native byte order)
- 6) Whether the buffer is read-only
- 7) Information (via the dtype object) about the interpretation of the basic
- data element. The basic data element may be as simple as a int or a float,
- or it may be a compound object (e.g., struct-like), a fixed character field,
- or Python object pointers.
- 8) Whether the array is to interpreted as C-order or Fortran-order.
-
-This arrangement allow for very flexible use of arrays. One thing that it allows
-is simple changes of the metadata to change the interpretation of the array buffer.
-Changing the byteorder of the array is a simple change involving no rearrangement
-of the data. The shape of the array can be changed very easily without changing
-anything in the data buffer or any data copying at all
-
-Among other things that are made possible is one can create a new array metadata
-object that uses the same data buffer
-to create a new view of that data buffer that has a different interpretation
-of the buffer (e.g., different shape, offset, byte order, strides, etc) but
-shares the same data bytes. Many operations in numpy do just this such as
-slices. Other operations, such as transpose, don't move data elements
-around in the array, but rather change the information about the shape and strides so that the indexing of the array changes, but the data in the doesn't move.
-
-Typically these new versions of the array metadata but the same data buffer are
-new 'views' into the data buffer. There is a different ndarray object, but it
-uses the same data buffer. This is why it is necessary to force copies through
-use of the .copy() method if one really wants to make a new and independent
-copy of the data buffer.
-
-New views into arrays mean the object reference counts for the data buffer
-increase. Simply doing away with the original array object will not remove the
-data buffer if other views of it still exist.
-
-Multidimensional Array Indexing Order Issues
-============================================
-
-What is the right way to index
-multi-dimensional arrays? Before you jump to conclusions about the one and
-true way to index multi-dimensional arrays, it pays to understand why this is
-a confusing issue. This section will try to explain in detail how numpy
-indexing works and why we adopt the convention we do for images, and when it
-may be appropriate to adopt other conventions.
-
-The first thing to understand is
-that there are two conflicting conventions for indexing 2-dimensional arrays.
-Matrix notation uses the first index to indicate which row is being selected and
-the second index to indicate which column is selected. This is opposite the
-geometrically oriented-convention for images where people generally think the
-first index represents x position (i.e., column) and the second represents y
-position (i.e., row). This alone is the source of much confusion;
-matrix-oriented users and image-oriented users expect two different things with
-regard to indexing.
-
-The second issue to understand is how indices correspond
-to the order the array is stored in memory. In Fortran the first index is the
-most rapidly varying index when moving through the elements of a two
-dimensional array as it is stored in memory. If you adopt the matrix
-convention for indexing, then this means the matrix is stored one column at a
-time (since the first index moves to the next row as it changes). Thus Fortran
-is considered a Column-major language. C has just the opposite convention. In
-C, the last index changes most rapidly as one moves through the array as
-stored in memory. Thus C is a Row-major language. The matrix is stored by
-rows. Note that in both cases it presumes that the matrix convention for
-indexing is being used, i.e., for both Fortran and C, the first index is the
-row. Note this convention implies that the indexing convention is invariant
-and that the data order changes to keep that so.
-
-But that's not the only way
-to look at it. Suppose one has large two-dimensional arrays (images or
-matrices) stored in data files. Suppose the data are stored by rows rather than
-by columns. If we are to preserve our index convention (whether matrix or
-image) that means that depending on the language we use, we may be forced to
-reorder the data if it is read into memory to preserve our indexing
-convention. For example if we read row-ordered data into memory without
-reordering, it will match the matrix indexing convention for C, but not for
-Fortran. Conversely, it will match the image indexing convention for Fortran,
-but not for C. For C, if one is using data stored in row order, and one wants
-to preserve the image index convention, the data must be reordered when
-reading into memory.
-
-In the end, which you do for Fortran or C depends on
-which is more important, not reordering data or preserving the indexing
-convention. For large images, reordering data is potentially expensive, and
-often the indexing convention is inverted to avoid that.
-
-The situation with
-numpy makes this issue yet more complicated. The internal machinery of numpy
-arrays is flexible enough to accept any ordering of indices. One can simply
-reorder indices by manipulating the internal stride information for arrays
-without reordering the data at all. NumPy will know how to map the new index
-order to the data without moving the data.
-
-So if this is true, why not choose
-the index order that matches what you most expect? In particular, why not define
-row-ordered images to use the image convention? (This is sometimes referred
-to as the Fortran convention vs the C convention, thus the 'C' and 'FORTRAN'
-order options for array ordering in numpy.) The drawback of doing this is
-potential performance penalties. It's common to access the data sequentially,
-either implicitly in array operations or explicitly by looping over rows of an
-image. When that is done, then the data will be accessed in non-optimal order.
-As the first index is incremented, what is actually happening is that elements
-spaced far apart in memory are being sequentially accessed, with usually poor
-memory access speeds. For example, for a two dimensional image 'im' defined so
-that im[0, 10] represents the value at x=0, y=10. To be consistent with usual
-Python behavior then im[0] would represent a column at x=0. Yet that data
-would be spread over the whole array since the data are stored in row order.
-Despite the flexibility of numpy's indexing, it can't really paper over the fact
-basic operations are rendered inefficient because of data order or that getting
-contiguous subarrays is still awkward (e.g., im[:,0] for the first row, vs
-im[0]), thus one can't use an idiom such as for row in im; for col in im does
-work, but doesn't yield contiguous column data.
-
-As it turns out, numpy is
-smart enough when dealing with ufuncs to determine which index is the most
-rapidly varying one in memory and uses that for the innermost loop. Thus for
-ufuncs there is no large intrinsic advantage to either approach in most cases.
-On the other hand, use of .flat with an FORTRAN ordered array will lead to
-non-optimal memory access as adjacent elements in the flattened array (iterator,
-actually) are not contiguous in memory.
-
-Indeed, the fact is that Python
-indexing on lists and other sequences naturally leads to an outside-to inside
-ordering (the first index gets the largest grouping, the next the next largest,
-and the last gets the smallest element). Since image data are normally stored
-by rows, this corresponds to position within rows being the last item indexed.
-
-If you do want to use Fortran ordering realize that
-there are two approaches to consider: 1) accept that the first index is just not
-the most rapidly changing in memory and have all your I/O routines reorder
-your data when going from memory to disk or visa versa, or use numpy's
-mechanism for mapping the first index to the most rapidly varying data. We
-recommend the former if possible. The disadvantage of the latter is that many
-of numpy's functions will yield arrays without Fortran ordering unless you are
-careful to use the 'order' keyword. Doing this would be highly inconvenient.
-
-Otherwise we recommend simply learning to reverse the usual order of indices
-when accessing elements of an array. Granted, it goes against the grain, but
-it is more in line with Python semantics and the natural order of the data.
+.. This document has been moved to ../dev/internals.rst.
+This document has been moved to :ref:`numpy-internals`.
diff --git a/doc/source/reference/random/bit_generators/index.rst b/doc/source/reference/random/bit_generators/index.rst
index c5c349806..211f0d60e 100644
--- a/doc/source/reference/random/bit_generators/index.rst
+++ b/doc/source/reference/random/bit_generators/index.rst
@@ -4,7 +4,7 @@ Bit Generators
--------------
The random values produced by :class:`~Generator`
-orignate in a BitGenerator. The BitGenerators do not directly provide
+originate in a BitGenerator. The BitGenerators do not directly provide
random numbers and only contains methods used for seeding, getting or
setting the state, jumping or advancing the state, and for accessing
low-level wrappers for consumption by code that can efficiently
diff --git a/doc/source/reference/random/index.rst b/doc/source/reference/random/index.rst
index 96cd47017..aaabc9b39 100644
--- a/doc/source/reference/random/index.rst
+++ b/doc/source/reference/random/index.rst
@@ -55,7 +55,7 @@ properties than the legacy `MT19937` used in `RandomState`.
more_vals = random.standard_normal(10)
`Generator` can be used as a replacement for `RandomState`. Both class
-instances hold a internal `BitGenerator` instance to provide the bit
+instances hold an internal `BitGenerator` instance to provide the bit
stream, it is accessible as ``gen.bit_generator``. Some long-overdue API
cleanup means that legacy and compatibility methods have been removed from
`Generator`
diff --git a/doc/source/reference/random/performance.rst b/doc/source/reference/random/performance.rst
index 85855be59..cb9b94113 100644
--- a/doc/source/reference/random/performance.rst
+++ b/doc/source/reference/random/performance.rst
@@ -13,7 +13,7 @@ full-featured, and fast on most platforms, but somewhat slow when compiled for
parallelism would indicate using `PCG64DXSM`.
`Philox` is fairly slow, but its statistical properties have
-very high quality, and it is easy to get assuredly-independent stream by using
+very high quality, and it is easy to get an assuredly-independent stream by using
unique keys. If that is the style you wish to use for parallel streams, or you
are porting from another system that uses that style, then
`Philox` is your choice.
diff --git a/doc/source/reference/routines.ma.rst b/doc/source/reference/routines.ma.rst
index d961cbf02..5404c43d8 100644
--- a/doc/source/reference/routines.ma.rst
+++ b/doc/source/reference/routines.ma.rst
@@ -44,7 +44,9 @@ Ones and zeros
ma.masked_all
ma.masked_all_like
ma.ones
+ ma.ones_like
ma.zeros
+ ma.zeros_like
_____
@@ -287,11 +289,11 @@ Filling a masked array
_____
-Masked arrays arithmetics
-=========================
+Masked arrays arithmetic
+========================
-Arithmetics
-~~~~~~~~~~~
+Arithmetic
+~~~~~~~~~~
.. autosummary::
:toctree: generated/
@@ -331,6 +333,7 @@ Minimum/maximum
ma.max
ma.min
ma.ptp
+ ma.diff
ma.MaskedArray.argmax
ma.MaskedArray.argmin
diff --git a/doc/source/reference/routines.math.rst b/doc/source/reference/routines.math.rst
index 3c2f96830..2a09b8d20 100644
--- a/doc/source/reference/routines.math.rst
+++ b/doc/source/reference/routines.math.rst
@@ -143,6 +143,21 @@ Handling complex numbers
conj
conjugate
+Extrema Finding
+---------------
+.. autosummary::
+ :toctree: generated/
+
+ maximum
+ fmax
+ amax
+ nanmax
+
+ minimum
+ fmin
+ amin
+ nanmin
+
Miscellaneous
-------------
@@ -160,11 +175,7 @@ Miscellaneous
fabs
sign
heaviside
- maximum
- minimum
- fmax
- fmin
-
+
nan_to_num
real_if_close
diff --git a/doc/source/reference/routines.polynomials.rst b/doc/source/reference/routines.polynomials.rst
index ecfb012f0..4aea963c0 100644
--- a/doc/source/reference/routines.polynomials.rst
+++ b/doc/source/reference/routines.polynomials.rst
@@ -22,7 +22,7 @@ Therefore :mod:`numpy.polynomial` is recommended for new coding.
the polynomial functions prefixed with *poly* accessible from the `numpy`
namespace (e.g. `numpy.polyadd`, `numpy.polyval`, `numpy.polyfit`, etc.).
- The term *polynomial package* refers to the new API definied in
+ The term *polynomial package* refers to the new API defined in
`numpy.polynomial`, which includes the convenience classes for the
different kinds of polynomials (`numpy.polynomial.Polynomial`,
`numpy.polynomial.Chebyshev`, etc.).
@@ -110,7 +110,7 @@ See the documentation for the
`convenience classes <routines.polynomials.classes>`_ for further details on
the ``domain`` and ``window`` attributes.
-Another major difference bewteen the legacy polynomial module and the
+Another major difference between the legacy polynomial module and the
polynomial package is polynomial fitting. In the old module, fitting was
done via the `~numpy.polyfit` function. In the polynomial package, the
`~numpy.polynomial.polynomial.Polynomial.fit` class method is preferred. For
diff --git a/doc/source/reference/routines.statistics.rst b/doc/source/reference/routines.statistics.rst
index c675b6090..cd93e6025 100644
--- a/doc/source/reference/routines.statistics.rst
+++ b/doc/source/reference/routines.statistics.rst
@@ -9,11 +9,7 @@ Order statistics
.. autosummary::
:toctree: generated/
-
- amin
- amax
- nanmin
- nanmax
+
ptp
percentile
nanpercentile
diff --git a/doc/source/reference/simd/simd-optimizations.rst b/doc/source/reference/simd/simd-optimizations.rst
index 956824321..9de6d1734 100644
--- a/doc/source/reference/simd/simd-optimizations.rst
+++ b/doc/source/reference/simd/simd-optimizations.rst
@@ -14,7 +14,7 @@ written only once. There are three layers:
written using the maximum set of intrinsics possible.
- At *compile* time, a distutils command is used to define the minimum and
maximum features to support, based on user choice and compiler support. The
- appropriate macros are overlayed with the platform / architecture intrinsics,
+ appropriate macros are overlaid with the platform / architecture intrinsics,
and the three loops are compiled.
- At *runtime import*, the CPU is probed for the set of supported intrinsic
features. A mechanism is used to grab the pointer to the most appropriate
@@ -89,7 +89,7 @@ NOTES
~~~~~~~~~~~~~
- CPU features and other options are case-insensitive.
-- The order of the requsted optimizations doesn't matter.
+- The order of the requested optimizations doesn't matter.
- Either commas or spaces can be used as a separator, e.g. ``--cpu-dispatch``\ =
"avx2 avx512f" or ``--cpu-dispatch``\ = "avx2, avx512f" both work, but the
@@ -113,7 +113,7 @@ NOTES
compiler native flag ``-march=native`` or ``-xHost`` or ``QxHost`` is
enabled through environment variable ``CFLAGS``
-- The validation process for the requsted optimizations when it comes to
+- The validation process for the requested optimizations when it comes to
``--cpu-baseline`` isn't strict. For example, if the user requested
``AVX2`` but the compiler doesn't support it then we just skip it and return
the maximum optimization that the compiler can handle depending on the
@@ -379,15 +379,15 @@ through ``--cpu-dispatch``, but it can also represent other options such as:
#include "numpy/utils.h" // NPY_CAT, NPY_TOSTR
#ifndef NPY__CPU_TARGET_CURRENT
- // wrapping the dispatch-able source only happens to the addtional optimizations
- // but if the keyword 'baseline' provided within the configuration statments,
+ // wrapping the dispatch-able source only happens to the additional optimizations
+ // but if the keyword 'baseline' provided within the configuration statements,
// the infrastructure will add extra compiling for the dispatch-able source by
// passing it as-is to the compiler without any changes.
#define CURRENT_TARGET(X) X
#define NPY__CPU_TARGET_CURRENT baseline // for printing only
#else
// since we reach to this point, that's mean we're dealing with
- // the addtional optimizations, so it could be SSE42 or AVX512F
+ // the additional optimizations, so it could be SSE42 or AVX512F
#define CURRENT_TARGET(X) NPY_CAT(NPY_CAT(X, _), NPY__CPU_TARGET_CURRENT)
#endif
// Macro 'CURRENT_TARGET' adding the current target as suffux to the exported symbols,
@@ -418,7 +418,7 @@ through ``--cpu-dispatch``, but it can also represent other options such as:
#undef NPY__CPU_DISPATCH_BASELINE_CALL
#undef NPY__CPU_DISPATCH_CALL
// nothing strange here, just a normal preprocessor callback
- // enabled only if 'baseline' spesfied withiin the configration statments
+ // enabled only if 'baseline' specified within the configuration statements
#define NPY__CPU_DISPATCH_BASELINE_CALL(CB, ...) \
NPY__CPU_DISPATCH_EXPAND_(CB(__VA_ARGS__))
// 'NPY__CPU_DISPATCH_CALL' is an abstract macro is used for dispatching
@@ -427,7 +427,7 @@ through ``--cpu-dispatch``, but it can also represent other options such as:
// @param CHK, Expected a macro that can be used to detect CPU features
// in runtime, which takes a CPU feature name without string quotes and
// returns the testing result in a shape of boolean value.
- // NumPy already has macro called "NPY_CPU_HAVE", which fit this requirment.
+ // NumPy already has macro called "NPY_CPU_HAVE", which fits this requirement.
//
// @param CB, a callback macro that expected to be called multiple times depending
// on the required optimizations, the callback should receive the following arguments:
diff --git a/doc/source/reference/ufuncs.rst b/doc/source/reference/ufuncs.rst
index b832dad04..6ace5b233 100644
--- a/doc/source/reference/ufuncs.rst
+++ b/doc/source/reference/ufuncs.rst
@@ -185,7 +185,7 @@ attribute of the ufunc. (This list may be missing DTypes not defined
by NumPy.)
The ``signature`` only specifies the DType class/type. For example, it
-can specifiy that the operation should be ``datetime64`` or ``float64``
+can specify that the operation should be ``datetime64`` or ``float64``
operation. It does not specify the ``datetime64`` time-unit or the
``float64`` byte-order.
diff --git a/doc/source/release.rst b/doc/source/release.rst
index e9057a531..aa490b5f5 100644
--- a/doc/source/release.rst
+++ b/doc/source/release.rst
@@ -1,11 +1,12 @@
*************
-Release Notes
+Release notes
*************
.. toctree::
:maxdepth: 3
1.22.0 <release/1.22.0-notes>
+ 1.21.3 <release/1.21.3-notes>
1.21.2 <release/1.21.2-notes>
1.21.1 <release/1.21.1-notes>
1.21.0 <release/1.21.0-notes>
diff --git a/doc/source/release/1.14.0-notes.rst b/doc/source/release/1.14.0-notes.rst
index 8ee876fd3..346b5af99 100644
--- a/doc/source/release/1.14.0-notes.rst
+++ b/doc/source/release/1.14.0-notes.rst
@@ -332,7 +332,7 @@ eliminating their use internally and two new C-API functions,
* ``PyArray_SetWritebackIfCopyBase``
* ``PyArray_ResolveWritebackIfCopy``,
-have been added together with a complimentary flag,
+have been added together with a complementary flag,
``NPY_ARRAY_WRITEBACKIFCOPY``. Using the new functionality also requires that
some flags be changed when new arrays are created, to wit:
``NPY_ARRAY_INOUT_ARRAY`` should be replaced by ``NPY_ARRAY_INOUT_ARRAY2`` and
diff --git a/doc/source/release/1.15.0-notes.rst b/doc/source/release/1.15.0-notes.rst
index 7235ca915..2d9d068e5 100644
--- a/doc/source/release/1.15.0-notes.rst
+++ b/doc/source/release/1.15.0-notes.rst
@@ -326,8 +326,8 @@ passed explicitly, and are not yet computed automatically.
No longer does an IQR of 0 result in ``n_bins=1``, rather the number of bins
chosen is related to the data size in this situation.
-The edges retuned by `histogram`` and ``histogramdd`` now match the data float type
------------------------------------------------------------------------------------
+The edges returned by `histogram`` and ``histogramdd`` now match the data float type
+------------------------------------------------------------------------------------
When passed ``np.float16``, ``np.float32``, or ``np.longdouble`` data, the
returned edges are now of the same dtype. Previously, ``histogram`` would only
return the same type if explicit bins were given, and ``histogram`` would
diff --git a/doc/source/release/1.16.0-notes.rst b/doc/source/release/1.16.0-notes.rst
index 17d24160a..122f20eba 100644
--- a/doc/source/release/1.16.0-notes.rst
+++ b/doc/source/release/1.16.0-notes.rst
@@ -119,7 +119,7 @@ NaT comparisons
Consistent with the behavior of NaN, all comparisons other than inequality
checks with datetime64 or timedelta64 NaT ("not-a-time") values now always
return ``False``, and inequality checks with NaT now always return ``True``.
-This includes comparisons beteween NaT values. For compatibility with the
+This includes comparisons between NaT values. For compatibility with the
old behavior, use ``np.isnat`` to explicitly check for NaT or convert
datetime64/timedelta64 arrays with ``.astype(np.int64)`` before making
comparisons.
@@ -365,7 +365,7 @@ Alpine Linux (and other musl c library distros) support
We now default to use `fenv.h` for floating point status error reporting.
Previously we had a broken default that sometimes would not report underflow,
overflow, and invalid floating point operations. Now we can support non-glibc
-distrubutions like Alpine Linux as long as they ship `fenv.h`.
+distributions like Alpine Linux as long as they ship `fenv.h`.
Speedup ``np.block`` for large arrays
-------------------------------------
diff --git a/doc/source/release/1.19.0-notes.rst b/doc/source/release/1.19.0-notes.rst
index 8f5c2c0ce..410890697 100644
--- a/doc/source/release/1.19.0-notes.rst
+++ b/doc/source/release/1.19.0-notes.rst
@@ -402,7 +402,7 @@ Ability to disable madvise hugepages
------------------------------------
On Linux NumPy has previously added support for madavise hugepages which can
improve performance for very large arrays. Unfortunately, on older Kernel
-versions this led to peformance regressions, thus by default the support has
+versions this led to performance regressions, thus by default the support has
been disabled on kernels before version 4.6. To override the default, you can
use the environment variable::
diff --git a/doc/source/release/1.20.0-notes.rst b/doc/source/release/1.20.0-notes.rst
index b8b7a0c79..494e4f19e 100644
--- a/doc/source/release/1.20.0-notes.rst
+++ b/doc/source/release/1.20.0-notes.rst
@@ -842,7 +842,7 @@ The compiler command selection for Fortran Portland Group Compiler is changed
in `numpy.distutils.fcompiler`. This only affects the linking command. This
forces the use of the executable provided by the command line option (if
provided) instead of the pgfortran executable. If no executable is provided to
-the command line option it defaults to the pgf90 executable, wich is an alias
+the command line option it defaults to the pgf90 executable, which is an alias
for pgfortran according to the PGI documentation.
(`gh-16730 <https://github.com/numpy/numpy/pull/16730>`__)
diff --git a/doc/source/release/1.21.0-notes.rst b/doc/source/release/1.21.0-notes.rst
index 270cc32de..88a4503de 100644
--- a/doc/source/release/1.21.0-notes.rst
+++ b/doc/source/release/1.21.0-notes.rst
@@ -522,7 +522,7 @@ either of these distributions are produced.
Placeholder annotations have been improved
------------------------------------------
All placeholder annotations, that were previously annotated as ``typing.Any``,
-have been improved. Where appropiate they have been replaced with explicit
+have been improved. Where appropriate they have been replaced with explicit
function definitions, classes or other miscellaneous objects.
(`gh-18934 <https://github.com/numpy/numpy/pull/18934>`__)
diff --git a/doc/source/release/1.21.3-notes.rst b/doc/source/release/1.21.3-notes.rst
new file mode 100644
index 000000000..4058452ef
--- /dev/null
+++ b/doc/source/release/1.21.3-notes.rst
@@ -0,0 +1,44 @@
+.. currentmodule:: numpy
+
+==========================
+NumPy 1.21.3 Release Notes
+==========================
+
+NumPy 1.21.3 is a maintenance release that fixes a few bugs discovered after
+1.21.2. It also provides 64 bit Python 3.10.0 wheels. Note a few oddities about
+Python 3.10:
+
+* There are no 32 bit wheels for Windows, Mac, or Linux.
+* The Mac Intel builds are only available in universal2 wheels.
+
+The Python versions supported in this release are 3.7-3.10. If you want to
+compile your own version using gcc-11, you will need to use gcc-11.2+ to avoid
+problems.
+
+Contributors
+============
+
+A total of 7 people contributed to this release. People with a "+" by their
+names contributed a patch for the first time.
+
+* Aaron Meurer
+* Bas van Beek
+* Charles Harris
+* Developer-Ecosystem-Engineering +
+* Kevin Sheppard
+* Sebastian Berg
+* Warren Weckesser
+
+Pull requests merged
+====================
+
+A total of 8 pull requests were merged for this release.
+
+* `#19745 <https://github.com/numpy/numpy/pull/19745>`__: ENH: Add dtype-support to 3 ```generic``/``ndarray`` methods
+* `#19955 <https://github.com/numpy/numpy/pull/19955>`__: BUG: Resolve Divide by Zero on Apple silicon + test failures...
+* `#19958 <https://github.com/numpy/numpy/pull/19958>`__: MAINT: Mark type-check-only ufunc subclasses as ufunc aliases...
+* `#19994 <https://github.com/numpy/numpy/pull/19994>`__: BUG: np.tan(np.inf) test failure
+* `#20080 <https://github.com/numpy/numpy/pull/20080>`__: BUG: Correct incorrect advance in PCG with emulated int128
+* `#20081 <https://github.com/numpy/numpy/pull/20081>`__: BUG: Fix NaT handling in the PyArray_CompareFunc for datetime...
+* `#20082 <https://github.com/numpy/numpy/pull/20082>`__: DOC: Ensure that we add documentation also as to the dict for...
+* `#20106 <https://github.com/numpy/numpy/pull/20106>`__: BUG: core: result_type(0, np.timedelta64(4)) would seg. fault.
diff --git a/doc/source/release/1.8.0-notes.rst b/doc/source/release/1.8.0-notes.rst
index 80c39f8bc..65a471b92 100644
--- a/doc/source/release/1.8.0-notes.rst
+++ b/doc/source/release/1.8.0-notes.rst
@@ -33,7 +33,7 @@ Future Changes
The Datetime64 type remains experimental in this release. In 1.9 there will
-probably be some changes to make it more useable.
+probably be some changes to make it more usable.
The diagonal method currently returns a new array and raises a
FutureWarning. In 1.9 it will return a readonly view.
@@ -315,8 +315,8 @@ If used with the `overwrite_input` option the array will now only be partially
sorted instead of fully sorted.
-Overrideable operand flags in ufunc C-API
------------------------------------------
+Overridable operand flags in ufunc C-API
+----------------------------------------
When creating a ufunc, the default ufunc operand flags can be overridden
via the new op_flags attribute of the ufunc object. For example, to set
the operand flag for the first input to read/write:
diff --git a/doc/source/release/1.9.0-notes.rst b/doc/source/release/1.9.0-notes.rst
index 7ea29e354..a19a05cb7 100644
--- a/doc/source/release/1.9.0-notes.rst
+++ b/doc/source/release/1.9.0-notes.rst
@@ -389,7 +389,7 @@ uses a per-state lock instead of the GIL.
MaskedArray support for more complicated base classes
-----------------------------------------------------
Built-in assumptions that the baseclass behaved like a plain array are being
-removed. In particalur, ``repr`` and ``str`` should now work more reliably.
+removed. In particular, ``repr`` and ``str`` should now work more reliably.
C-API
diff --git a/doc/source/user/absolute_beginners.rst b/doc/source/user/absolute_beginners.rst
index bb570f622..27e9e1f63 100644
--- a/doc/source/user/absolute_beginners.rst
+++ b/doc/source/user/absolute_beginners.rst
@@ -391,7 +391,7 @@ this array to an array with three rows and two columns::
With ``np.reshape``, you can specify a few optional parameters::
- >>> numpy.reshape(a, newshape=(1, 6), order='C')
+ >>> np.reshape(a, newshape=(1, 6), order='C')
array([[0, 1, 2, 3, 4, 5]])
``a`` is the array to be reshaped.
@@ -613,7 +613,7 @@ How to create an array from existing data
-----
-You can easily use create a new array from a section of an existing array.
+You can easily create a new array from a section of an existing array.
Let's say you have this array:
@@ -899,12 +899,18 @@ You can aggregate matrices the same way you aggregated vectors::
.. image:: images/np_matrix_aggregation.png
You can aggregate all the values in a matrix and you can aggregate them across
-columns or rows using the ``axis`` parameter::
+columns or rows using the ``axis`` parameter. To illustrate this point, let's
+look at a slightly modified dataset::
+ >>> data = np.array([[1, 2], [5, 3], [4, 6]])
+ >>> data
+ array([[1, 2],
+ [5, 3],
+ [4, 6]])
>>> data.max(axis=0)
array([5, 6])
>>> data.max(axis=1)
- array([2, 4, 6])
+ array([2, 5, 6])
.. image:: images/np_matrix_aggregation_row.png
diff --git a/doc/source/user/basics.broadcasting.rst b/doc/source/user/basics.broadcasting.rst
index 5a252122f..ca299085a 100644
--- a/doc/source/user/basics.broadcasting.rst
+++ b/doc/source/user/basics.broadcasting.rst
@@ -170,6 +170,7 @@ An example of broadcasting when a 1-d array is added to a 2-d array::
[ 31., 32., 33.]])
>>> b = array([1.0, 2.0, 3.0, 4.0])
>>> a + b
+ Traceback (most recent call last):
ValueError: operands could not be broadcast together with shapes (4,3) (4,)
As shown in :ref:`broadcasting.figure-2`, ``b`` is added to each row of ``a``.
diff --git a/doc/source/user/basics.copies.rst b/doc/source/user/basics.copies.rst
new file mode 100644
index 000000000..583a59b95
--- /dev/null
+++ b/doc/source/user/basics.copies.rst
@@ -0,0 +1,152 @@
+.. _basics.copies-and-views:
+
+****************
+Copies and views
+****************
+
+When operating on NumPy arrays, it is possible to access the internal data
+buffer directly using a :ref:`view <view>` without copying data around. This
+ensures good performance but can also cause unwanted problems if the user is
+not aware of how this works. Hence, it is important to know the difference
+between these two terms and to know which operations return copies and
+which return views.
+
+The NumPy array is a data structure consisting of two parts:
+the :term:`contiguous` data buffer with the actual data elements and the
+metadata that contains information about the data buffer. The metadata
+includes data type, strides, and other important information that helps
+manipulate the :class:`.ndarray` easily. See the :ref:`numpy-internals`
+section for a detailed look.
+
+.. _view:
+
+View
+====
+
+It is possible to access the array differently by just changing certain
+metadata like :term:`stride` and :term:`dtype` without changing the
+data buffer. This creates a new way of looking at the data and these new
+arrays are called views. The data buffer remains the same, so any changes made
+to a view reflects in the original copy. A view can be forced through the
+:meth:`.ndarray.view` method.
+
+Copy
+====
+
+When a new array is created by duplicating the data buffer as well as the
+metadata, it is called a copy. Changes made to the copy
+do not reflect on the original array. Making a copy is slower and
+memory-consuming but sometimes necessary. A copy can be forced by using
+:meth:`.ndarray.copy`.
+
+Indexing operations
+===================
+
+.. seealso:: :ref:`basics.indexing`
+
+Views are created when elements can be addressed with offsets and strides
+in the original array. Hence, basic indexing always creates views.
+For example::
+
+ >>> x = np.arange(10)
+ >>> x
+ array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
+ >>> y = x[1:3] # creates a view
+ >>> y
+ array([1, 2])
+ >>> x[1:3] = [10, 11]
+ >>> x
+ array([ 0, 10, 11, 3, 4, 5, 6, 7, 8, 9])
+ >>> y
+ array([10, 11])
+
+Here, ``y`` gets changed when ``x`` is changed because it is a view.
+
+:ref:`advanced-indexing`, on the other hand, always creates copies.
+For example::
+
+ >>> x = np.arange(9).reshape(3, 3)
+ >>> x
+ array([[0, 1, 2],
+ [3, 4, 5],
+ [6, 7, 8]])
+ >>> y = x[[1, 2]]
+ >>> y
+ array([[3, 4, 5],
+ [6, 7, 8]])
+ >>> y.base is None
+ True
+
+Here, ``y`` is a copy, as signified by the :attr:`base <.ndarray.base>`
+attribute. We can also confirm this by assigning new values to ``x[[1, 2]]``
+which in turn will not affect ``y`` at all::
+
+ >>> x[[1, 2]] = [[10, 11, 12], [13, 14, 15]]
+ >>> x
+ array([[ 0, 1, 2],
+ [10, 11, 12],
+ [13, 14, 15]])
+ >>> y
+ array([[3, 4, 5],
+ [6, 7, 8]])
+
+It must be noted here that during the assignment of ``x[[1, 2]]`` no view
+or copy is created as the assignment happens in-place.
+
+
+Other operations
+================
+
+The :func:`numpy.reshape` function creates a view where possible or a copy
+otherwise. In most cases, the strides can be modified to reshape the
+array with a view. However, in some cases where the array becomes
+non-contiguous (perhaps after a :meth:`.ndarray.transpose` operation),
+the reshaping cannot be done by modifying strides and requires a copy.
+In these cases, we can raise an error by assigning the new shape to the
+shape attribute of the array. For example::
+
+ >>> x = np.ones((2, 3))
+ >>> y = x.T # makes the array non-contiguous
+ >>> y
+ array([[1., 1.],
+ [1., 1.],
+ [1., 1.]])
+ >>> z = y.view()
+ >>> z.shape = 6
+ Traceback (most recent call last):
+ ...
+ AttributeError: Incompatible shape for in-place modification. Use
+ `.reshape()` to make a copy with the desired shape.
+
+Taking the example of another operation, :func:`.ravel` returns a contiguous
+flattened view of the array wherever possible. On the other hand,
+:meth:`.ndarray.flatten` always returns a flattened copy of the array.
+However, to guarantee a view in most cases, ``x.reshape(-1)`` may be preferable.
+
+How to tell if the array is a view or a copy
+============================================
+
+The :attr:`base <.ndarray.base>` attribute of the ndarray makes it easy
+to tell if an array is a view or a copy. The base attribute of a view returns
+the original array while it returns ``None`` for a copy.
+
+ >>> x = np.arange(9)
+ >>> x
+ array([0, 1, 2, 3, 4, 5, 6, 7, 8])
+ >>> y = x.reshape(3, 3)
+ >>> y
+ array([[0, 1, 2],
+ [3, 4, 5],
+ [6, 7, 8]])
+ >>> y.base # .reshape() creates a view
+ array([0, 1, 2, 3, 4, 5, 6, 7, 8])
+ >>> z = y[[2, 1]]
+ >>> z
+ array([[6, 7, 8],
+ [3, 4, 5]])
+ >>> z.base is None # advanced indexing creates a copy
+ True
+
+Note that the ``base`` attribute should not be used to determine
+if an ndarray object is *new*; only if it is a view or a copy
+of another ndarray. \ No newline at end of file
diff --git a/doc/source/user/basics.creation.rst b/doc/source/user/basics.creation.rst
index a68def887..84ff1c30e 100644
--- a/doc/source/user/basics.creation.rst
+++ b/doc/source/user/basics.creation.rst
@@ -37,8 +37,7 @@ respectively. Lists and tuples can define ndarray creation:
>>> a1D = np.array([1, 2, 3, 4])
>>> a2D = np.array([[1, 2], [3, 4]])
- >>> a3D = np.array([[[1, 2], [3, 4]],
- [[5, 6], [7, 8]]])
+ >>> a3D = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])
When you use :func:`numpy.array` to define a new array, you should
consider the :doc:`dtype <basics.types>` of the elements in the array,
@@ -116,7 +115,7 @@ examples are shown::
Note: best practice for :func:`numpy.arange` is to use integer start, end, and
step values. There are some subtleties regarding ``dtype``. In the second
example, the ``dtype`` is defined. In the third example, the array is
-``dtype=float`` to accomodate the step size of ``0.1``. Due to roundoff error,
+``dtype=float`` to accommodate the step size of ``0.1``. Due to roundoff error,
the ``stop`` value is sometimes included.
:func:`numpy.linspace` will create arrays with a specified number of elements, and
@@ -173,11 +172,11 @@ list or tuple,
routine is helpful in generating linear least squares models, as such::
>>> np.vander(np.linspace(0, 2, 5), 2)
- array([[0. , 0. , 1. ],
- [0.25, 0.5 , 1. ],
- [1. , 1. , 1. ],
- [2.25, 1.5 , 1. ],
- [4. , 2. , 1. ]])
+ array([[0. , 1. ],
+ [0.5, 1. ],
+ [1. , 1. ],
+ [1.5, 1. ],
+ [2. , 1. ]])
>>> np.vander([1, 2, 3, 4], 2)
array([[1, 1],
[2, 1],
@@ -208,7 +207,7 @@ specified shape. The default dtype is ``float64``::
array([[[0., 0.],
[0., 0.],
[0., 0.]],
-
+ <BLANKLINE>
[[0., 0.],
[0., 0.],
[0., 0.]]])
@@ -223,7 +222,7 @@ specified shape. The default dtype is ``float64``::
array([[[1., 1.],
[1., 1.],
[1., 1.]],
-
+ <BLANKLINE>
[[1., 1.],
[1., 1.],
[1., 1.]]])
@@ -275,7 +274,7 @@ following example::
>>> b = a[:2]
>>> b += 1
>>> print('a =', a, '; b =', b)
- a = [2 3 3 4 5 6]; b = [2 3]
+ a = [2 3 3 4 5 6] ; b = [2 3]
In this example, you did not create a new array. You created a variable,
``b`` that viewed the first 2 elements of ``a``. When you added 1 to ``b`` you
@@ -286,7 +285,7 @@ would get the same result by adding 1 to ``a[:2]``. If you want to create a
>>> b = a[:2].copy()
>>> b += 1
>>> print('a = ', a, 'b = ', b)
- a = [1 2 3 4 5 6] b = [2 3]
+ a = [1 2 3 4] b = [2 3]
For more information and examples look at :ref:`Copies and Views
<quickstart.copies-and-views>`.
@@ -299,8 +298,7 @@ arrays into a 4-by-4 array using ``block``::
>>> B = np.eye(2, 2)
>>> C = np.zeros((2, 2))
>>> D = np.diag((-3, -4))
- >>> np.block([[A, B],
- [C, D]])
+ >>> np.block([[A, B], [C, D]])
array([[ 1., 1., 1., 0. ],
[ 1., 1., 0., 1. ],
[ 0., 0., -3., 0. ],
diff --git a/doc/source/user/basics.io.genfromtxt.rst b/doc/source/user/basics.io.genfromtxt.rst
index 5364acbe9..8fe7565aa 100644
--- a/doc/source/user/basics.io.genfromtxt.rst
+++ b/doc/source/user/basics.io.genfromtxt.rst
@@ -437,7 +437,7 @@ process these missing data.
By default, any empty string is marked as missing. We can also consider
more complex strings, such as ``"N/A"`` or ``"???"`` to represent missing
-or invalid data. The ``missing_values`` argument accepts three kind
+or invalid data. The ``missing_values`` argument accepts three kinds
of values:
a string or a comma-separated string
diff --git a/doc/source/user/basics.rec.rst b/doc/source/user/basics.rec.rst
index 0524fde8e..1e6f30506 100644
--- a/doc/source/user/basics.rec.rst
+++ b/doc/source/user/basics.rec.rst
@@ -128,7 +128,7 @@ summary they are:
... 'formats': ['i4', 'f4'],
... 'offsets': [0, 4],
... 'itemsize': 12})
- dtype({'names':['col1','col2'], 'formats':['<i4','<f4'], 'offsets':[0,4], 'itemsize':12})
+ dtype({'names': ['col1', 'col2'], 'formats': ['<i4', '<f4'], 'offsets': [0, 4], 'itemsize': 12})
Offsets may be chosen such that the fields overlap, though this will mean
that assigning to one field may clobber any overlapping field's data. As
diff --git a/doc/source/user/basics.rst b/doc/source/user/basics.rst
index bcd51d983..affb85db2 100644
--- a/doc/source/user/basics.rst
+++ b/doc/source/user/basics.rst
@@ -19,3 +19,4 @@ fundamental NumPy ideas and philosophy.
basics.dispatch
basics.subclassing
basics.ufuncs
+ basics.copies
diff --git a/doc/source/user/building.rst b/doc/source/user/building.rst
index 10983ce8f..22efca4a6 100644
--- a/doc/source/user/building.rst
+++ b/doc/source/user/building.rst
@@ -45,6 +45,9 @@ Building NumPy requires the following software installed:
2) Compilers
+ Much of NumPy is written in C. You will need a C compiler that complies
+ with the C99 standard.
+
While a FORTRAN 77 compiler is not necessary for building NumPy, it is
needed to run the ``numpy.f2py`` tests. These tests are skipped if the
compiler is not auto-detected.
diff --git a/doc/source/user/c-info.beyond-basics.rst b/doc/source/user/c-info.beyond-basics.rst
index 121384d04..7dd22afbf 100644
--- a/doc/source/user/c-info.beyond-basics.rst
+++ b/doc/source/user/c-info.beyond-basics.rst
@@ -174,14 +174,13 @@ incrementing is automatically performed by
:c:func:`PyArray_MultiIter_NEXT` ( ``obj`` ) macro (which can handle a
multiterator ``obj`` as either a :c:expr:`PyArrayMultiIterObject *` or a
:c:expr:`PyObject *`). The data from input number ``i`` is available using
-:c:func:`PyArray_MultiIter_DATA` ( ``obj``, ``i`` ) and the total (broadcasted)
-size as :c:func:`PyArray_MultiIter_SIZE` ( ``obj``). An example of using this
+:c:func:`PyArray_MultiIter_DATA` ( ``obj``, ``i`` ). An example of using this
feature follows.
.. code-block:: c
mobj = PyArray_MultiIterNew(2, obj1, obj2);
- size = PyArray_MultiIter_SIZE(obj);
+ size = mobj->size;
while(size--) {
ptr1 = PyArray_MultiIter_DATA(mobj, 0);
ptr2 = PyArray_MultiIter_DATA(mobj, 1);
diff --git a/doc/source/user/c-info.how-to-extend.rst b/doc/source/user/c-info.how-to-extend.rst
index ebb4b7518..96727a177 100644
--- a/doc/source/user/c-info.how-to-extend.rst
+++ b/doc/source/user/c-info.how-to-extend.rst
@@ -433,7 +433,7 @@ writeable). The syntax is
The requirements flag allows specification of what kind of
array is acceptable. If the object passed in does not satisfy
- this requirements then a copy is made so that thre returned
+ this requirements then a copy is made so that the returned
object will satisfy the requirements. these ndarray can use a
very generic pointer to memory. This flag allows specification
of the desired properties of the returned array object. All
diff --git a/doc/source/user/c-info.python-as-glue.rst b/doc/source/user/c-info.python-as-glue.rst
index 8643d0dd1..6d514f146 100644
--- a/doc/source/user/c-info.python-as-glue.rst
+++ b/doc/source/user/c-info.python-as-glue.rst
@@ -1,6 +1,6 @@
-********************
+====================
Using Python as glue
-********************
+====================
| There is no conversation more boring than the one where everybody
| agrees.
@@ -124,9 +124,9 @@ Creating source for a basic extension module
Probably the easiest way to introduce f2py is to offer a simple
example. Here is one of the subroutines contained in a file named
-:file:`add.f`:
+:file:`add.f`
-.. code-block:: none
+.. code-block:: fortran
C
SUBROUTINE ZADD(A,B,C,N)
@@ -149,14 +149,14 @@ routine can be automatically generated by f2py::
You should be able to run this command assuming your search-path is
set-up properly. This command will produce an extension module named
-addmodule.c in the current directory. This extension module can now be
+:file:`addmodule.c` in the current directory. This extension module can now be
compiled and used from Python just like any other extension module.
Creating a compiled extension module
------------------------------------
-You can also get f2py to compile add.f and also compile its produced
+You can also get f2py to both compile :file:`add.f` along with the produced
extension module leaving only a shared-library extension file that can
be imported from Python::
@@ -211,7 +211,7 @@ interface file use the -h option::
This command leaves the file add.pyf in the current directory. The
section of this file corresponding to zadd is:
-.. code-block:: none
+.. code-block:: fortran
subroutine zadd(a,b,c,n) ! in :add:add.f
double complex dimension(*) :: a
@@ -224,7 +224,7 @@ By placing intent directives and checking code, the interface can be
cleaned up quite a bit until the Python module method is both easier
to use and more robust.
-.. code-block:: none
+.. code-block:: fortran
subroutine zadd(a,b,c,n) ! in :add:add.f
double complex dimension(n) :: a
@@ -277,9 +277,9 @@ Inserting directives in Fortran source
The nice interface can also be generated automatically by placing the
variable directives as special comments in the original Fortran code.
-Thus, if I modify the source code to contain:
+Thus, if the source code is modified to contain:
-.. code-block:: none
+.. code-block:: fortran
C
SUBROUTINE ZADD(A,B,C,N)
@@ -298,14 +298,14 @@ Thus, if I modify the source code to contain:
20 CONTINUE
END
-Then, I can compile the extension module using::
+Then, one can compile the extension module using::
f2py -c -m add add.f
The resulting signature for the function add.zadd is exactly the same
one that was created previously. If the original source code had
contained ``A(N)`` instead of ``A(*)`` and so forth with ``B`` and ``C``,
-then I could obtain (nearly) the same interface simply by placing the
+then nearly the same interface can be obtained by placing the
``INTENT(OUT) :: C`` comment line in the source code. The only difference
is that ``N`` would be an optional input that would default to the length
of ``A``.
@@ -320,7 +320,7 @@ precision floating-point numbers using a fixed averaging filter. The
advantage of using Fortran to index into multi-dimensional arrays
should be clear from this example.
-.. code-block:: none
+.. code-block::
SUBROUTINE DFILTER2D(A,B,M,N)
C
@@ -407,13 +407,12 @@ conversion of the .pyf file to a .c file is handled by `numpy.disutils`.
Conclusion
----------
-The interface definition file (.pyf) is how you can fine-tune the
-interface between Python and Fortran. There is decent documentation
-for f2py found in the numpy/f2py/docs directory where-ever NumPy is
-installed on your system (usually under site-packages). There is also
-more information on using f2py (including how to use it to wrap C
-codes) at https://scipy-cookbook.readthedocs.io under the "Interfacing
-With Other Languages" heading.
+The interface definition file (.pyf) is how you can fine-tune the interface
+between Python and Fortran. There is decent documentation for f2py at
+:ref:`f2py`. There is also more information on using f2py (including how to use
+it to wrap C codes) at the `"Interfacing With Other Languages" heading of the
+SciPy Cookbook.
+<https://scipy-cookbook.readthedocs.io/items/idx_interfacing_with_other_languages.html>`_
The f2py method of linking compiled code is currently the most
sophisticated and integrated approach. It allows clean separation of
@@ -422,7 +421,7 @@ distribution of the extension module. The only draw-back is that it
requires the existence of a Fortran compiler in order for a user to
install the code. However, with the existence of the free-compilers
g77, gfortran, and g95, as well as high-quality commercial compilers,
-this restriction is not particularly onerous. In my opinion, Fortran
+this restriction is not particularly onerous. In our opinion, Fortran
is still the easiest way to write fast and clear code for scientific
computing. It handles complex numbers, and multi-dimensional indexing
in the most straightforward way. Be aware, however, that some Fortran
@@ -493,7 +492,7 @@ Complex addition in Cython
Here is part of a Cython module named ``add.pyx`` which implements the
complex addition functions we previously implemented using f2py:
-.. code-block:: none
+.. code-block:: cython
cimport cython
cimport numpy as np
@@ -546,7 +545,7 @@ Image filter in Cython
The two-dimensional example we created using Fortran is just as easy to write
in Cython:
-.. code-block:: none
+.. code-block:: cython
cimport numpy as np
import numpy as np
@@ -809,7 +808,7 @@ Calling the function
The function is accessed as an attribute of or an item from the loaded
shared-library. Thus, if ``./mylib.so`` has a function named
-``cool_function1``, I could access this function either as:
+``cool_function1``, it may be accessed either as:
.. code-block:: python
@@ -859,7 +858,7 @@ kind of array from a given input.
Complete example
----------------
-In this example, I will show how the addition function and the filter
+In this example, we will demonstrate how the addition function and the filter
function implemented previously using the other approaches can be
implemented using ctypes. First, the C code which implements the
algorithms contains the functions ``zadd``, ``dadd``, ``sadd``, ``cadd``,
@@ -1073,7 +1072,7 @@ Its disadvantages include
- It is difficult to distribute an extension module made using ctypes
because of a lack of support for building shared libraries in
- distutils (but I suspect this will change in time).
+ distutils.
- You must have shared-libraries of your code (no static libraries).
@@ -1095,15 +1094,14 @@ Additional tools you may find useful
These tools have been found useful by others using Python and so are
included here. They are discussed separately because they are
either older ways to do things now handled by f2py, Cython, or ctypes
-(SWIG, PyFort) or because I don't know much about them (SIP, Boost).
-I have not added links to these
-methods because my experience is that you can find the most relevant
-link faster using Google or some other search engine, and any links
-provided here would be quickly dated. Do not assume that just because
-it is included in this list, I don't think the package deserves your
-attention. I'm including information about these packages because many
-people have found them useful and I'd like to give you as many options
-as possible for tackling the problem of easily integrating your code.
+(SWIG, PyFort) or because of a lack of reasonable documentation (SIP, Boost).
+Links to these methods are not included since the most relevant
+can be found using Google or some other search engine, and any links provided
+here would be quickly dated. Do not assume that inclusion in this list means
+that the package deserves attention. Information about these packages are
+collected here because many people have found them useful and we'd like to give
+you as many options as possible for tackling the problem of easily integrating
+your code.
SWIG
@@ -1115,7 +1113,7 @@ SWIG
Simplified Wrapper and Interface Generator (SWIG) is an old and fairly
stable method for wrapping C/C++-libraries to a large variety of other
languages. It does not specifically understand NumPy arrays but can be
-made useable with NumPy through the use of typemaps. There are some
+made usable with NumPy through the use of typemaps. There are some
sample typemaps in the numpy/tools/swig directory under numpy.i together
with an example module that makes use of them. SWIG excels at wrapping
large C/C++ libraries because it can (almost) parse their headers and
@@ -1132,12 +1130,12 @@ to the Python-specific typemaps, SWIG can be used to interface a
library with other languages such as Perl, Tcl, and Ruby.
My experience with SWIG has been generally positive in that it is
-relatively easy to use and quite powerful. I used to use it quite
+relatively easy to use and quite powerful. It has been used
often before becoming more proficient at writing C-extensions.
-However, I struggled writing custom interfaces with SWIG because it
+However, writing custom interfaces with SWIG is often troublesome because it
must be done using the concept of typemaps which are not Python
-specific and are written in a C-like syntax. Therefore, I tend to
-prefer other gluing strategies and would only attempt to use SWIG to
+specific and are written in a C-like syntax. Therefore, other gluing strategies
+are preferred and SWIG would be probably considered only to
wrap a very-large C/C++ library. Nonetheless, there are others who use
SWIG quite happily.
@@ -1170,12 +1168,11 @@ those libraries which provides a concise interface for binding C++
classes and functions to Python. The amazing part of the Boost.Python
approach is that it works entirely in pure C++ without introducing a
new syntax. Many users of C++ report that Boost.Python makes it
-possible to combine the best of both worlds in a seamless fashion. I
-have not used Boost.Python because I am not a big user of C++ and
-using Boost to wrap simple C-subroutines is usually over-kill. It's
-primary purpose is to make C++ classes available in Python. So, if you
-have a set of C++ classes that need to be integrated cleanly into
-Python, consider learning about and using Boost.Python.
+possible to combine the best of both worlds in a seamless fashion. Using Boost
+to wrap simple C-subroutines is usually over-kill. Its primary purpose is to
+make C++ classes available in Python. So, if you have a set of C++ classes that
+need to be integrated cleanly into Python, consider learning about and using
+Boost.Python.
PyFort
diff --git a/doc/source/user/c-info.ufunc-tutorial.rst b/doc/source/user/c-info.ufunc-tutorial.rst
index 8ff45a934..9bd01b963 100644
--- a/doc/source/user/c-info.ufunc-tutorial.rst
+++ b/doc/source/user/c-info.ufunc-tutorial.rst
@@ -80,6 +80,7 @@ the module.
.. code-block:: c
+ #define PY_SSIZE_T_CLEAN
#include <Python.h>
#include <math.h>
@@ -252,11 +253,12 @@ the primary thing that must be changed to create your own ufunc.
.. code-block:: c
- #include "Python.h"
- #include "math.h"
+ #define PY_SSIZE_T_CLEAN
+ #include <Python.h>
#include "numpy/ndarraytypes.h"
#include "numpy/ufuncobject.h"
#include "numpy/npy_3kcompat.h"
+ #include <math.h>
/*
* single_type_logit.c
@@ -427,11 +429,12 @@ the primary thing that must be changed to create your own ufunc.
.. code-block:: c
- #include "Python.h"
- #include "math.h"
+ #define PY_SSIZE_T_CLEAN
+ #include <Python.h>
#include "numpy/ndarraytypes.h"
#include "numpy/ufuncobject.h"
#include "numpy/halffloat.h"
+ #include <math.h>
/*
* multi_type_logit.c
@@ -696,11 +699,12 @@ as well as all other properties of a ufunc.
.. code-block:: c
- #include "Python.h"
- #include "math.h"
+ #define PY_SSIZE_T_CLEAN
+ #include <Python.h>
#include "numpy/ndarraytypes.h"
#include "numpy/ufuncobject.h"
#include "numpy/halffloat.h"
+ #include <math.h>
/*
* multi_arg_logit.c
@@ -828,11 +832,12 @@ The C file is given below.
.. code-block:: c
- #include "Python.h"
- #include "math.h"
+ #define PY_SSIZE_T_CLEAN
+ #include <Python.h>
#include "numpy/ndarraytypes.h"
#include "numpy/ufuncobject.h"
#include "numpy/npy_3kcompat.h"
+ #include <math.h>
/*
diff --git a/doc/source/user/how-to-how-to.rst b/doc/source/user/how-to-how-to.rst
index 13d2b405f..cdf1ad5c3 100644
--- a/doc/source/user/how-to-how-to.rst
+++ b/doc/source/user/how-to-how-to.rst
@@ -102,7 +102,7 @@ knowledge).
We distinguish both tutorials and how-tos from `Explanations`, which are
deep dives intended to give understanding rather than immediate assistance,
-and `References`, which give complete, autoritative data on some concrete
+and `References`, which give complete, authoritative data on some concrete
part of NumPy (like its API) but aren't obligated to paint a broader picture.
For more on tutorials, see :doc:`content/tutorial-style-guide`
diff --git a/doc/source/user/misc.rst b/doc/source/user/misc.rst
index f0a7f5e4c..316473151 100644
--- a/doc/source/user/misc.rst
+++ b/doc/source/user/misc.rst
@@ -143,7 +143,7 @@ Only a survey of the choices. Little detail on how each works.
- Plusses:
- part of Python standard library
- - good for interfacing to existing sharable libraries, particularly
+ - good for interfacing to existing shareable libraries, particularly
Windows DLLs
- avoids API/reference counting issues
- good numpy support: arrays have all these in their ctypes
diff --git a/doc/source/user/numpy-for-matlab-users.rst b/doc/source/user/numpy-for-matlab-users.rst
index ed0be82a0..21e23482a 100644
--- a/doc/source/user/numpy-for-matlab-users.rst
+++ b/doc/source/user/numpy-for-matlab-users.rst
@@ -313,11 +313,11 @@ Linear algebra equivalents
* - ``a(:,find(v > 0.5))``
- ``a[:,np.nonzero(v > 0.5)[0]]``
- - extract the columms of ``a`` where vector v > 0.5
+ - extract the columns of ``a`` where vector v > 0.5
* - ``a(:,find(v>0.5))``
- ``a[:, v.T > 0.5]``
- - extract the columms of ``a`` where column vector v > 0.5
+ - extract the columns of ``a`` where column vector v > 0.5
* - ``a(a<0.5)=0``
- ``a[a < 0.5]=0``
@@ -819,6 +819,6 @@ found in the `topical software page <https://scipy.org/topical-software.html>`__
See
`List of Python software: scripting
<https://en.wikipedia.org/wiki/List_of_Python_software#Embedded_as_a_scripting_language>`_
-for a list of softwares that use Python as a scripting language
+for a list of software that use Python as a scripting language
MATLAB® and SimuLink® are registered trademarks of The MathWorks, Inc.
diff --git a/doc/source/user/quickstart.rst b/doc/source/user/quickstart.rst
index dd5773878..a9cfeca31 100644
--- a/doc/source/user/quickstart.rst
+++ b/doc/source/user/quickstart.rst
@@ -45,10 +45,11 @@ NumPy's main object is the homogeneous multidimensional array. It is a
table of elements (usually numbers), all of the same type, indexed by a
tuple of non-negative integers. In NumPy dimensions are called *axes*.
-For example, the coordinates of a point in 3D space ``[1, 2, 1]`` has
-one axis. That axis has 3 elements in it, so we say it has a length
-of 3. In the example pictured below, the array has 2 axes. The first
-axis has a length of 2, the second axis has a length of 3.
+For example, the array for the coordinates of a point in 3D space,
+``[1, 2, 1]``, has one axis. That axis has 3 elements in it, so we say
+it has a length of 3. In the example pictured below, the array has 2
+axes. The first axis has a length of 2, the second axis has a length of
+3.
::
diff --git a/doc/source/user/whatisnumpy.rst b/doc/source/user/whatisnumpy.rst
index 154f91c84..e152a4ae2 100644
--- a/doc/source/user/whatisnumpy.rst
+++ b/doc/source/user/whatisnumpy.rst
@@ -125,7 +125,7 @@ same shape, or a scalar and an array, or even two arrays of with
different shapes, provided that the smaller array is "expandable" to
the shape of the larger in such a way that the resulting broadcast is
unambiguous. For detailed "rules" of broadcasting see
-`basics.broadcasting`.
+:ref:`Broadcasting <basics.broadcasting>`.
Who Else Uses NumPy?
--------------------