summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.github/CODE_OF_CONDUCT.md2
-rw-r--r--.github/ISSUE_TEMPLATE/bug-report.md4
-rw-r--r--.github/ISSUE_TEMPLATE/feature-request.md2
-rw-r--r--.github/PULL_REQUEST_TEMPLATE.md33
-rw-r--r--azure-pipelines.yml10
-rw-r--r--doc/DISTUTILS.rst.txt48
-rw-r--r--doc/neps/nep-0029-deprecation_policy.rst2
-rw-r--r--doc/neps/nep-0042-new-dtypes.rst51
-rw-r--r--doc/source/_templates/indexcontent.html2
-rw-r--r--doc/source/about.rst62
-rw-r--r--doc/source/conf.py101
-rw-r--r--doc/source/dev/conduct/code_of_conduct.rst163
-rw-r--r--doc/source/dev/conduct/report_handling_manual.rst220
-rw-r--r--doc/source/dev/development_workflow.rst37
-rw-r--r--doc/source/dev/index.rst2
-rw-r--r--doc/source/doc_conventions.rst23
-rw-r--r--doc/source/docs/howto_document.rst5
-rw-r--r--doc/source/reference/arrays.indexing.rst8
-rw-r--r--doc/source/reference/arrays.interface.rst10
-rw-r--r--doc/source/reference/arrays.scalars.rst337
-rw-r--r--doc/source/reference/c-api/array.rst8
-rw-r--r--doc/source/reference/c-api/config.rst15
-rw-r--r--doc/source/reference/c-api/dtype.rst12
-rw-r--r--doc/source/reference/c-api/iterator.rst2
-rw-r--r--doc/source/reference/c-api/types-and-structures.rst519
-rw-r--r--doc/source/reference/c-api/ufunc.rst148
-rw-r--r--doc/source/reference/routines.ma.rst2
-rw-r--r--doc/source/user/basics.rec.rst11
-rw-r--r--doc/source/user/basics.types.rst64
-rw-r--r--doc/source/user/how-to-io.rst328
-rw-r--r--doc/source/user/howtos_index.rst2
-rw-r--r--doc/source/user/index.rst7
-rw-r--r--doc/source/user/ionumpy.rst20
-rw-r--r--numpy/__init__.pyi1144
-rw-r--r--numpy/core/__init__.py1
-rw-r--r--numpy/core/__init__.pyi0
-rw-r--r--numpy/core/_add_newdocs.py184
-rw-r--r--numpy/core/_add_newdocs_scalars.py195
-rw-r--r--numpy/core/_asarray.pyi77
-rw-r--r--numpy/core/einsumfunc.py11
-rw-r--r--numpy/core/fromnumeric.pyi53
-rw-r--r--numpy/core/function_base.pyi4
-rw-r--r--numpy/core/multiarray.py2
-rw-r--r--numpy/core/numeric.py2
-rw-r--r--numpy/core/numeric.pyi117
-rw-r--r--numpy/core/src/common/array_assign.c27
-rw-r--r--numpy/core/src/multiarray/_multiarray_tests.c.src68
-rw-r--r--numpy/core/src/multiarray/array_coercion.c4
-rw-r--r--numpy/core/src/multiarray/arrayfunction_override.c4
-rw-r--r--numpy/core/src/multiarray/common.c21
-rw-r--r--numpy/core/src/multiarray/ctors.c3
-rw-r--r--numpy/core/src/multiarray/descriptor.c9
-rw-r--r--numpy/core/src/multiarray/dtypemeta.c22
-rw-r--r--numpy/core/src/multiarray/mapping.c72
-rw-r--r--numpy/core/src/multiarray/multiarraymodule.c2
-rw-r--r--numpy/core/src/multiarray/nditer_constr.c234
-rw-r--r--numpy/core/src/multiarray/scalartypes.c.src38
-rw-r--r--numpy/core/src/multiarray/shape.c61
-rw-r--r--numpy/core/src/multiarray/shape.h7
-rw-r--r--numpy/core/src/multiarray/usertypes.c35
-rw-r--r--numpy/core/src/umath/ufunc_object.c9
-rw-r--r--numpy/core/tests/test_array_coercion.py15
-rw-r--r--numpy/core/tests/test_dtype.py37
-rw-r--r--numpy/lib/arraysetops.py2
-rw-r--r--numpy/lib/format.py2
-rw-r--r--numpy/lib/tests/test_arraysetops.py18
-rw-r--r--numpy/ma/core.py10
-rw-r--r--numpy/ma/extras.py8
-rw-r--r--numpy/ma/mrecords.py4
-rw-r--r--numpy/polynomial/_polybase.py8
-rw-r--r--numpy/typing/__init__.py11
-rw-r--r--numpy/typing/_array_like.py8
-rw-r--r--numpy/typing/_callable.py138
-rw-r--r--numpy/typing/_scalars.py26
-rw-r--r--numpy/typing/tests/data/fail/arithmetic.py19
-rw-r--r--numpy/typing/tests/data/fail/array_constructors.py (renamed from numpy/typing/tests/data/fail/linspace.py)13
-rw-r--r--numpy/typing/tests/data/fail/modules.py1
-rw-r--r--numpy/typing/tests/data/fail/ndarray_misc.py21
-rw-r--r--numpy/typing/tests/data/fail/scalars.py16
-rw-r--r--numpy/typing/tests/data/fail/simple.py12
-rw-r--r--numpy/typing/tests/data/pass/arithmetic.py257
-rw-r--r--numpy/typing/tests/data/pass/array_constructors.py66
-rw-r--r--numpy/typing/tests/data/pass/linspace.py22
-rw-r--r--numpy/typing/tests/data/pass/literal.py2
-rw-r--r--numpy/typing/tests/data/pass/ndarray_misc.py159
-rw-r--r--numpy/typing/tests/data/pass/scalars.py13
-rw-r--r--numpy/typing/tests/data/pass/simple.py9
-rw-r--r--numpy/typing/tests/data/pass/ufuncs.py5
-rw-r--r--numpy/typing/tests/data/reveal/arithmetic.py256
-rw-r--r--numpy/typing/tests/data/reveal/array_constructors.py42
-rw-r--r--numpy/typing/tests/data/reveal/linspace.py6
-rw-r--r--numpy/typing/tests/data/reveal/ndarray_misc.py150
-rw-r--r--numpy/typing/tests/data/reveal/scalars.py17
-rw-r--r--numpy/typing/tests/test_typing.py4
-rwxr-xr-xsetup.py64
-rw-r--r--test_requirements.txt2
96 files changed, 4226 insertions, 1883 deletions
diff --git a/.github/CODE_OF_CONDUCT.md b/.github/CODE_OF_CONDUCT.md
index 57c98060e..079098fae 100644
--- a/.github/CODE_OF_CONDUCT.md
+++ b/.github/CODE_OF_CONDUCT.md
@@ -1 +1 @@
-NumPy has a Code of Conduct, please see: https://www.numpy.org/devdocs/dev/conduct/code_of_conduct.html
+NumPy has a Code of Conduct, please see: https://numpy.org/code-of-conduct
diff --git a/.github/ISSUE_TEMPLATE/bug-report.md b/.github/ISSUE_TEMPLATE/bug-report.md
index d2df08689..78ffc1063 100644
--- a/.github/ISSUE_TEMPLATE/bug-report.md
+++ b/.github/ISSUE_TEMPLATE/bug-report.md
@@ -1,6 +1,6 @@
---
-name: "Bug Report"
-about: Submit a bug report to help us improve NumPy
+name: "Bug report"
+about: Report a bug. Not for security vulnerabilities -- see below.
---
diff --git a/.github/ISSUE_TEMPLATE/feature-request.md b/.github/ISSUE_TEMPLATE/feature-request.md
index 0be94f928..00c6f59c5 100644
--- a/.github/ISSUE_TEMPLATE/feature-request.md
+++ b/.github/ISSUE_TEMPLATE/feature-request.md
@@ -1,5 +1,5 @@
---
-name: "Feature Request"
+name: "Feature request"
about: Check instructions for submitting your idea on the mailing list first.
---
diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md
index 528580a8e..dee33ee5f 100644
--- a/.github/PULL_REQUEST_TEMPLATE.md
+++ b/.github/PULL_REQUEST_TEMPLATE.md
@@ -1,11 +1,22 @@
-<!-- Please be sure you are following the instructions in the dev guidelines
-http://www.numpy.org/devdocs/dev/development_workflow.html
--->
-
-<!-- We'd appreciate it if your commit message is properly formatted
-http://www.numpy.org/devdocs/dev/development_workflow.html#writing-the-commit-message
--->
-
-<!-- If you're submitting a new feature or substantial change in functionality,
-make sure you discuss your changes in the numpy-discussion mailing list first:
-https://mail.python.org/mailman/listinfo/numpy-discussion -->
+<!--
+
+ ----------------------------------------------------------------
+ MAKE SURE YOUR PR GETS THE ATTENTION IT DESERVES!
+ ----------------------------------------------------------------
+
+
+* FORMAT IT RIGHT:
+ http://www.numpy.org/devdocs/dev/development_workflow.html#writing-the-commit-message
+
+
+* IF IT'S A NEW FEATURE OR API CHANGE, TEST THE WATERS:
+ http://www.numpy.org/devdocs/dev/development_workflow.html#get-the-mailing-list-s-opinion
+
+
+* HIT ALL THE GUIDELINES:
+ https://numpy.org/devdocs/dev/index.html#guidelines
+
+
+* WHAT TO DO IF WE HAVEN'T GOTTEN BACK TO YOU:
+ http://www.numpy.org/devdocs/dev/development_workflow.html#getting-your-pr-reviewed
+-->
diff --git a/azure-pipelines.yml b/azure-pipelines.yml
index da57649b8..9382ac83c 100644
--- a/azure-pipelines.yml
+++ b/azure-pipelines.yml
@@ -217,11 +217,11 @@ stages:
BITS: 64
NPY_USE_BLAS_ILP64: '1'
OPENBLAS_SUFFIX: '64_'
- PyPy36-32bit:
- PYTHON_VERSION: 'PyPy3.6'
- PYTHON_ARCH: 'x32'
- TEST_MODE: fast
- BITS: 32
+ #PyPy36-32bit:
+ #PYTHON_VERSION: 'PyPy3.6'
+ #PYTHON_ARCH: 'x32'
+ #TEST_MODE: fast
+ #BITS: 32
steps:
- template: azure-steps-windows.yml
- job: Linux_PyPy3
diff --git a/doc/DISTUTILS.rst.txt b/doc/DISTUTILS.rst.txt
index f1f270462..c58a423c0 100644
--- a/doc/DISTUTILS.rst.txt
+++ b/doc/DISTUTILS.rst.txt
@@ -394,37 +394,37 @@ and ``/**end repeat**/`` lines, which may also be nested using
consecutively numbered delimiting lines such as ``/**begin repeat1``
and ``/**end repeat1**/``:
-1. "/\**begin repeat "on a line by itself marks the beginning of
-a segment that should be repeated.
+1. ``/**begin repeat`` on a line by itself marks the beginning of
+ a segment that should be repeated.
2. Named variable expansions are defined using ``#name=item1, item2, item3,
-..., itemN#`` and placed on successive lines. These variables are
-replaced in each repeat block with corresponding word. All named
-variables in the same repeat block must define the same number of
-words.
+ ..., itemN#`` and placed on successive lines. These variables are
+ replaced in each repeat block with corresponding word. All named
+ variables in the same repeat block must define the same number of
+ words.
3. In specifying the repeat rule for a named variable, ``item*N`` is short-
-hand for ``item, item, ..., item`` repeated N times. In addition,
-parenthesis in combination with \*N can be used for grouping several
-items that should be repeated. Thus, #name=(item1, item2)*4# is
-equivalent to #name=item1, item2, item1, item2, item1, item2, item1,
-item2#
+ hand for ``item, item, ..., item`` repeated N times. In addition,
+ parenthesis in combination with ``*N`` can be used for grouping several
+ items that should be repeated. Thus, ``#name=(item1, item2)*4#`` is
+ equivalent to ``#name=item1, item2, item1, item2, item1, item2, item1,
+ item2#``.
-4. "\*/ "on a line by itself marks the end of the variable expansion
-naming. The next line is the first line that will be repeated using
-the named rules.
+4. ``*/`` on a line by itself marks the end of the variable expansion
+ naming. The next line is the first line that will be repeated using
+ the named rules.
5. Inside the block to be repeated, the variables that should be expanded
-are specified as ``@name@``
+ are specified as ``@name@``.
-6. "/\**end repeat**/ "on a line by itself marks the previous line
-as the last line of the block to be repeated.
+6. ``/**end repeat**/`` on a line by itself marks the previous line
+ as the last line of the block to be repeated.
7. A loop in the NumPy C source code may have a ``@TYPE@`` variable, targeted
-for string substitution, which is preprocessed to a number of otherwise
-identical loops with several strings such as INT, LONG, UINT, ULONG. The
-``@TYPE@`` style syntax thus reduces code duplication and maintenance burden by
-mimicking languages that have generic type support.
+ for string substitution, which is preprocessed to a number of otherwise
+ identical loops with several strings such as ``INT``, ``LONG``, ``UINT``,
+ ``ULONG``. The ``@TYPE@`` style syntax thus reduces code duplication and
+ maintenance burden by mimicking languages that have generic type support.
The above rules may be clearer in the following template source example:
@@ -464,13 +464,13 @@ The above rules may be clearer in the following template source example:
/**end repeat**/
-The preprocessing of generically typed C source files (whether in NumPy
+The preprocessing of generically-typed C source files (whether in NumPy
proper or in any third party package using NumPy Distutils) is performed
by `conv_template.py`_.
-The type specific C files generated (extension: .c)
+The type-specific C files generated (extension: ``.c``)
by these modules during the build process are ready to be compiled. This
form of generic typing is also supported for C header files (preprocessed
-to produce .h files).
+to produce ``.h`` files).
.. _conv_template.py: https://github.com/numpy/numpy/blob/master/numpy/distutils/conv_template.py
diff --git a/doc/neps/nep-0029-deprecation_policy.rst b/doc/neps/nep-0029-deprecation_policy.rst
index 4674d24ec..957674ee6 100644
--- a/doc/neps/nep-0029-deprecation_policy.rst
+++ b/doc/neps/nep-0029-deprecation_policy.rst
@@ -77,7 +77,7 @@ release in November 2020 should support Python 3.7 and newer.
The current Python release cadence is 18 months so a 42 month window
ensures that there will always be at least two minor versions of Python
in the window. The window is extended 6 months beyond the anticipated two-release
-interval for Python to provides resilience against small fluctuations /
+interval for Python to provide resilience against small fluctuations /
delays in its release schedule.
Because Python minor version support is based only on historical
diff --git a/doc/neps/nep-0042-new-dtypes.rst b/doc/neps/nep-0042-new-dtypes.rst
index 2d1e3a329..99887a451 100644
--- a/doc/neps/nep-0042-new-dtypes.rst
+++ b/doc/neps/nep-0042-new-dtypes.rst
@@ -259,21 +259,48 @@ including the type hierarchy and the use of abstract DTypes.
Class getter
==============================================================================
-To create a dtype instance from a scalar type users now call ``np.dtype`` (for
-instance, ``np.dtype(np.int64)``).
-
-To get the DType of a scalar type, we propose this getter syntax::
+To create a DType instance from a scalar type users now call
+``np.dtype`` (for instance, ``np.dtype(np.int64)``). Sometimes it is
+also necessary to access the underlying DType class; this comes up in
+particular with type hinting because the "type" of a DType instance is
+the DType class. Taking inspiration from type hinting, we propose the
+following getter syntax::
np.dtype[np.int64]
-The notation works equally well with built-in and user-defined DTypes
-and is inspired by and potentially useful for type hinting.
+to get the DType class corresponding to a scalar type. The notation
+works equally well with built-in and user-defined DTypes.
This getter eliminates the need to create an explicit name for every
-DType, crowding the ``np`` namespace; the getter itself signifies the type.
+DType, crowding the ``np`` namespace; the getter itself signifies the
+type. It also opens the possibility of making ``np.ndarray`` generic
+over DType class using annotations like::
+
+ np.ndarray[np.dtype[np.float64]]
+
+The above is fairly verbose, so it is possible that we will include
+aliases like::
+
+ Float64 = np.dtype[np.float64]
+
+in ``numpy.typing``, thus keeping annotations concise but still
+avoiding crowding the ``np`` namespace as discussed above. For a
+user-defined DType::
+
+ class UserDtype(dtype): ...
+
+one can do ``np.ndarray[UserDtype]``, keeping annotations concise in
+that case without introducing boilerplate in NumPy itself. For a user
+user-defined scalar type::
+
+ class UserScalar(generic): ...
+
+we would need to add a typing overload to ``dtype``::
+
+ @overload
+ __new__(cls, dtype: Type[UserScalar], ...) -> UserDtype
-Since getter calls won't be needed often, this is unlikely to be burdensome.
-Classes can also offer concise alternatives.
+to allow ``np.dtype[UserScalar]``.
The initial implementation probably will return only concrete (not abstract)
DTypes.
@@ -393,7 +420,7 @@ casting and array coercion, which are described in detail below.
sortfunction`` that must return ``NotImplemented`` if the given ``sortkind``
is not known.
-* Functions that cannot be removed are implemented as special methods.
+* Functions that cannot be removed are implemented as special methods.
Many of these were previously defined part of the :c:type:`PyArray_ArrFuncs`
slot of the dtype instance (``PyArray_Descr *``) and include functions
such as ``nonzero``, ``fill`` (used for ``np.arange``), and
@@ -408,7 +435,7 @@ casting and array coercion, which are described in detail below.
object to ensure uniqueness for all DTypes. On the C side, ``kind`` and
``char`` are set to ``\0`` (NULL character).
While ``kind`` will be discouraged, the current ``np.issubdtype``
- may remain the preferred method for this type of check.
+ may remain the preferred method for this type of check.
* A method ``ensure_canonical(self) -> dtype`` returns a new dtype (or
``self``) with the ``canonical`` flag set.
@@ -1229,7 +1256,7 @@ Non-parametric dtypes do not have to implement:
* ``discover_descr_from_pyobject`` (uses ``default_descr`` instead)
* ``common_instance`` (uses ``default_descr`` instead)
-* ``ensure_canonical`` (uses ``default_descr`` instead).
+* ``ensure_canonical`` (uses ``default_descr`` instead).
Sorting is expected to be implemented using:
diff --git a/doc/source/_templates/indexcontent.html b/doc/source/_templates/indexcontent.html
index 5929e755d..6633aa9be 100644
--- a/doc/source/_templates/indexcontent.html
+++ b/doc/source/_templates/indexcontent.html
@@ -56,7 +56,7 @@
<p class="biglink"><a class="biglink" href="{{ pathto("bugs") }}">Reporting bugs</a></p>
<p class="biglink"><a class="biglink" href="{{ pathto("release") }}">Release Notes</a></p>
</td><td width="50%">
- <p class="biglink"><a class="biglink" href="{{ pathto("about") }}">About NumPy</a></p>
+ <p class="biglink"><a class="biglink" href="{{ pathto("doc_conventions") }}">Document conventions</a></p>
<p class="biglink"><a class="biglink" href="{{ pathto("license") }}">License of NumPy</a></p>
</td></tr>
</table>
diff --git a/doc/source/about.rst b/doc/source/about.rst
deleted file mode 100644
index 3e83833d1..000000000
--- a/doc/source/about.rst
+++ /dev/null
@@ -1,62 +0,0 @@
-About NumPy
-===========
-
-NumPy is the fundamental package
-needed for scientific computing with Python. This package contains:
-
-- a powerful N-dimensional :ref:`array object <arrays>`
-- sophisticated :ref:`(broadcasting) functions <ufuncs>`
-- basic :ref:`linear algebra functions <routines.linalg>`
-- basic :ref:`Fourier transforms <routines.fft>`
-- sophisticated :ref:`random number capabilities <numpyrandom>`
-- tools for integrating Fortran code
-- tools for integrating C/C++ code
-
-Besides its obvious scientific uses, *NumPy* can also be used as an
-efficient multi-dimensional container of generic data. Arbitrary
-data types can be defined. This allows *NumPy* to seamlessly and
-speedily integrate with a wide variety of databases.
-
-NumPy is a successor for two earlier scientific Python libraries:
-Numeric and Numarray.
-
-NumPy community
----------------
-
-NumPy is a distributed, volunteer, open-source project. *You* can help
-us make it better; if you believe something should be improved either
-in functionality or in documentation, don't hesitate to contact us --- or
-even better, contact us and participate in fixing the problem.
-
-Our main means of communication are:
-
-- `scipy.org website <https://scipy.org/>`__
-
-- `Mailing lists <https://scipy.org/scipylib/mailing-lists.html>`__
-
-- `NumPy Issues <https://github.com/numpy/numpy/issues>`__ (bug reports go here)
-
-- `Old NumPy Trac <http://projects.scipy.org/numpy>`__ (dead link)
-
-More information about the development of NumPy can be found at our `Developer Zone <https://scipy.scipy.org/scipylib/dev-zone.html>`__.
-
-The project management structure can be found at our :doc:`governance page <dev/governance/index>`
-
-
-About this documentation
-========================
-
-Conventions
------------
-
-Names of classes, objects, constants, etc. are given in **boldface** font.
-Often they are also links to a more detailed documentation of the
-referred object.
-
-This manual contains many examples of use, usually prefixed with the
-Python prompt ``>>>`` (which is not a part of the example code). The
-examples assume that you have first entered::
-
->>> import numpy as np
-
-before running the examples.
diff --git a/doc/source/conf.py b/doc/source/conf.py
index fe7ea0967..381a01612 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -6,6 +6,62 @@ import sys
# Minimum version, enforced by sphinx
needs_sphinx = '2.2.0'
+
+# This is a nasty hack to use platform-agnostic names for types in the
+# documentation.
+
+# must be kept alive to hold the patched names
+_name_cache = {}
+
+def replace_scalar_type_names():
+ """ Rename numpy types to use the canonical names to make sphinx behave """
+ import ctypes
+
+ Py_ssize_t = ctypes.c_int64 if ctypes.sizeof(ctypes.c_void_p) == 8 else ctypes.c_int32
+
+ class PyObject(ctypes.Structure):
+ pass
+
+ class PyTypeObject(ctypes.Structure):
+ pass
+
+ PyObject._fields_ = [
+ ('ob_refcnt', Py_ssize_t),
+ ('ob_type', ctypes.POINTER(PyTypeObject)),
+ ]
+
+
+ PyTypeObject._fields_ = [
+ # varhead
+ ('ob_base', PyObject),
+ ('ob_size', Py_ssize_t),
+ # declaration
+ ('tp_name', ctypes.c_char_p),
+ ]
+
+ # prevent numpy attaching docstrings to the scalar types
+ assert 'numpy.core._add_newdocs_scalars' not in sys.modules
+ sys.modules['numpy.core._add_newdocs_scalars'] = object()
+
+ import numpy
+
+ # change the __name__ of the scalar types
+ for name in [
+ 'byte', 'short', 'intc', 'int_', 'longlong',
+ 'ubyte', 'ushort', 'uintc', 'uint', 'ulonglong',
+ 'half', 'single', 'double', 'longdouble',
+ 'half', 'csingle', 'cdouble', 'clongdouble',
+ ]:
+ typ = getattr(numpy, name)
+ c_typ = PyTypeObject.from_address(id(typ))
+ c_typ.tp_name = _name_cache[typ] = b"numpy." + name.encode('utf8')
+
+ # now generate the docstrings as usual
+ del sys.modules['numpy.core._add_newdocs_scalars']
+ import numpy.core._add_newdocs_scalars
+
+replace_scalar_type_names()
+
# -----------------------------------------------------------------------------
# General configuration
# -----------------------------------------------------------------------------
@@ -227,6 +283,8 @@ intersphinx_mapping = {
'matplotlib': ('https://matplotlib.org', None),
'imageio': ('https://imageio.readthedocs.io/en/stable', None),
'skimage': ('https://scikit-image.org/docs/stable', None),
+ 'pandas': ('https://pandas.pydata.org/pandas-docs/stable', None),
+ 'scipy-lecture-notes': ('https://scipy-lectures.org', None),
}
@@ -310,6 +368,17 @@ for name in ['sphinx.ext.linkcode', 'numpydoc.linkcode']:
else:
print("NOTE: linkcode extension not found -- no links to source generated")
+
+def _get_c_source_file(obj):
+ if issubclass(obj, numpy.generic):
+ return r"core/src/multiarray/scalartypes.c.src"
+ elif obj is numpy.ndarray:
+ return r"core/src/multiarray/arrayobject.c"
+ else:
+ # todo: come up with a better way to generate these
+ return None
+
+
def linkcode_resolve(domain, info):
"""
Determine the URL corresponding to Python object
@@ -340,25 +409,33 @@ def linkcode_resolve(domain, info):
else:
obj = unwrap(obj)
- try:
- fn = inspect.getsourcefile(obj)
- except Exception:
- fn = None
- if not fn:
- return None
+ fn = None
+ lineno = None
- try:
- source, lineno = inspect.getsourcelines(obj)
- except Exception:
- lineno = None
+ # Make a poor effort at linking C extension types
+ if isinstance(obj, type) and obj.__module__ == 'numpy':
+ fn = _get_c_source_file(obj)
+
+ if fn is None:
+ try:
+ fn = inspect.getsourcefile(obj)
+ except Exception:
+ fn = None
+ if not fn:
+ return None
+
+ try:
+ source, lineno = inspect.getsourcelines(obj)
+ except Exception:
+ lineno = None
+
+ fn = relpath(fn, start=dirname(numpy.__file__))
if lineno:
linespec = "#L%d-L%d" % (lineno, lineno + len(source) - 1)
else:
linespec = ""
- fn = relpath(fn, start=dirname(numpy.__file__))
-
if 'dev' in numpy.__version__:
return "https://github.com/numpy/numpy/blob/master/numpy/%s%s" % (
fn, linespec)
diff --git a/doc/source/dev/conduct/code_of_conduct.rst b/doc/source/dev/conduct/code_of_conduct.rst
deleted file mode 100644
index f2f0a536d..000000000
--- a/doc/source/dev/conduct/code_of_conduct.rst
+++ /dev/null
@@ -1,163 +0,0 @@
-NumPy Code of Conduct
-=====================
-
-
-Introduction
-------------
-
-This code of conduct applies to all spaces managed by the NumPy project,
-including all public and private mailing lists, issue trackers, wikis, blogs,
-Twitter, and any other communication channel used by our community. The NumPy
-project does not organise in-person events, however events related to our
-community should have a code of conduct similar in spirit to this one.
-
-This code of conduct should be honored by everyone who participates in
-the NumPy community formally or informally, or claims any affiliation with the
-project, in any project-related activities and especially when representing the
-project, in any role.
-
-This code is not exhaustive or complete. It serves to distill our common
-understanding of a collaborative, shared environment and goals. Please try to
-follow this code in spirit as much as in letter, to create a friendly and
-productive environment that enriches the surrounding community.
-
-
-Specific Guidelines
--------------------
-
-We strive to:
-
-1. Be open. We invite anyone to participate in our community. We prefer to use
- public methods of communication for project-related messages, unless
- discussing something sensitive. This applies to messages for help or
- project-related support, too; not only is a public support request much more
- likely to result in an answer to a question, it also ensures that any
- inadvertent mistakes in answering are more easily detected and corrected.
-
-2. Be empathetic, welcoming, friendly, and patient. We work together to resolve
- conflict, and assume good intentions. We may all experience some frustration
- from time to time, but we do not allow frustration to turn into a personal
- attack. A community where people feel uncomfortable or threatened is not a
- productive one.
-
-3. Be collaborative. Our work will be used by other people, and in turn we will
- depend on the work of others. When we make something for the benefit of the
- project, we are willing to explain to others how it works, so that they can
- build on the work to make it even better. Any decision we make will affect
- users and colleagues, and we take those consequences seriously when making
- decisions.
-
-4. Be inquisitive. Nobody knows everything! Asking questions early avoids many
- problems later, so we encourage questions, although we may direct them to
- the appropriate forum. We will try hard to be responsive and helpful.
-
-5. Be careful in the words that we choose. We are careful and respectful in
- our communication and we take responsibility for our own speech. Be kind to
- others. Do not insult or put down other participants. We will not accept
- harassment or other exclusionary behaviour, such as:
-
- - Violent threats or language directed against another person.
- - Sexist, racist, or otherwise discriminatory jokes and language.
- - Posting sexually explicit or violent material.
- - Posting (or threatening to post) other people's personally identifying information ("doxing").
- - Sharing private content, such as emails sent privately or non-publicly,
- or unlogged forums such as IRC channel history, without the sender's consent.
- - Personal insults, especially those using racist or sexist terms.
- - Unwelcome sexual attention.
- - Excessive profanity. Please avoid swearwords; people differ greatly in their sensitivity to swearing.
- - Repeated harassment of others. In general, if someone asks you to stop, then stop.
- - Advocating for, or encouraging, any of the above behaviour.
-
-
-Diversity Statement
--------------------
-
-The NumPy project welcomes and encourages participation by everyone. We are
-committed to being a community that everyone enjoys being part of. Although
-we may not always be able to accommodate each individual's preferences, we try
-our best to treat everyone kindly.
-
-No matter how you identify yourself or how others perceive you: we welcome you.
-Though no list can hope to be comprehensive, we explicitly honour diversity in:
-age, culture, ethnicity, genotype, gender identity or expression, language,
-national origin, neurotype, phenotype, political beliefs, profession, race,
-religion, sexual orientation, socioeconomic status, subculture and technical
-ability, to the extent that these do not conflict with this code of conduct.
-
-
-Though we welcome people fluent in all languages, NumPy development is
-conducted in English.
-
-Standards for behaviour in the NumPy community are detailed in the Code of
-Conduct above. Participants in our community should uphold these standards
-in all their interactions and help others to do so as well (see next section).
-
-
-Reporting Guidelines
---------------------
-
-We know that it is painfully common for internet communication to start at or
-devolve into obvious and flagrant abuse. We also recognize that sometimes
-people may have a bad day, or be unaware of some of the guidelines in this Code
-of Conduct. Please keep this in mind when deciding on how to respond to a
-breach of this Code.
-
-For clearly intentional breaches, report those to the Code of Conduct committee
-(see below). For possibly unintentional breaches, you may reply to the person
-and point out this code of conduct (either in public or in private, whatever is
-most appropriate). If you would prefer not to do that, please feel free to
-report to the Code of Conduct Committee directly, or ask the Committee for
-advice, in confidence.
-
-You can report issues to the NumPy Code of Conduct committee, at
-numpy-conduct@googlegroups.com. Currently, the committee consists of:
-
-- Stefan van der Walt
-- Melissa Weber Mendonça
-- Anirudh Subramanian
-
-If your report involves any members of the committee, or if they feel they have
-a conflict of interest in handling it, then they will recuse themselves from
-considering your report. Alternatively, if for any reason you feel
-uncomfortable making a report to the committee, then you can also contact:
-
-- Senior `NumFOCUS staff <https://numfocus.org/code-of-conduct#persons-responsible>`__: conduct@numfocus.org
-
-
-Incident reporting resolution & Code of Conduct enforcement
------------------------------------------------------------
-
-*This section summarizes the most important points, more details can be found
-in* :ref:`CoC_reporting_manual`.
-
-We will investigate and respond to all complaints. The NumPy Code of Conduct
-Committee and the NumPy Steering Committee (if involved) will protect the
-identity of the reporter, and treat the content of complaints as confidential
-(unless the reporter agrees otherwise).
-
-In case of severe and obvious breaches, e.g. personal threat or violent, sexist
-or racist language, we will immediately disconnect the originator from NumPy
-communication channels; please see the manual for details.
-
-In cases not involving clear severe and obvious breaches of this code of
-conduct, the process for acting on any received code of conduct violation
-report will be:
-
-1. acknowledge report is received
-2. reasonable discussion/feedback
-3. mediation (if feedback didn't help, and only if both reporter and reportee agree to this)
-4. enforcement via transparent decision (see :ref:`CoC_resolutions`) by the
- Code of Conduct Committee
-
-The committee will respond to any report as soon as possible, and at most
-within 72 hours.
-
-
-Endnotes
---------
-
-We are thankful to the groups behind the following documents, from which we
-drew content and inspiration:
-
-- `The SciPy Code of Conduct <https://docs.scipy.org/doc/scipy/reference/dev/conduct/code_of_conduct.html>`_
-
diff --git a/doc/source/dev/conduct/report_handling_manual.rst b/doc/source/dev/conduct/report_handling_manual.rst
deleted file mode 100644
index d39b615bb..000000000
--- a/doc/source/dev/conduct/report_handling_manual.rst
+++ /dev/null
@@ -1,220 +0,0 @@
-:orphan:
-
-.. _CoC_reporting_manual:
-
-NumPy Code of Conduct - How to follow up on a report
-----------------------------------------------------
-
-This is the manual followed by NumPy's Code of Conduct Committee. It's used
-when we respond to an issue to make sure we're consistent and fair.
-
-Enforcing the Code of Conduct impacts our community today and for the future.
-It's an action that we do not take lightly. When reviewing enforcement
-measures, the Code of Conduct Committee will keep the following values and
-guidelines in mind:
-
-* Act in a personal manner rather than impersonal. The Committee can engage
- the parties to understand the situation, while respecting the privacy and any
- necessary confidentiality of reporters. However, sometimes it is necessary
- to communicate with one or more individuals directly: the Committee's goal is
- to improve the health of our community rather than only produce a formal
- decision.
-
-* Emphasize empathy for individuals rather than judging behavior, avoiding
- binary labels of "good" and "bad/evil". Overt, clear-cut aggression and
- harassment exists and we will be address that firmly. But many scenarios
- that can prove challenging to resolve are those where normal disagreements
- devolve into unhelpful or harmful behavior from multiple parties.
- Understanding the full context and finding a path that re-engages all is
- hard, but ultimately the most productive for our community.
-
-* We understand that email is a difficult medium and can be isolating.
- Receiving criticism over email, without personal contact, can be
- particularly painful. This makes it especially important to keep an
- atmosphere of open-minded respect of the views of others. It also means
- that we must be transparent in our actions, and that we will do everything
- in our power to make sure that all our members are treated fairly and with
- sympathy.
-
-* Discrimination can be subtle and it can be unconscious. It can show itself
- as unfairness and hostility in otherwise ordinary interactions. We know
- that this does occur, and we will take care to look out for it. We would
- very much like to hear from you if you feel you have been treated unfairly,
- and we will use these procedures to make sure that your complaint is heard
- and addressed.
-
-* Help increase engagement in good discussion practice: try to identify where
- discussion may have broken down and provide actionable information, pointers
- and resources that can lead to positive change on these points.
-
-* Be mindful of the needs of new members: provide them with explicit support
- and consideration, with the aim of increasing participation from
- underrepresented groups in particular.
-
-* Individuals come from different cultural backgrounds and native languages.
- Try to identify any honest misunderstandings caused by a non-native speaker
- and help them understand the issue and what they can change to avoid causing
- offence. Complex discussion in a foreign language can be very intimidating,
- and we want to grow our diversity also across nationalities and cultures.
-
-*Mediation*: voluntary, informal mediation is a tool at our disposal. In
-contexts such as when two or more parties have all escalated to the point of
-inappropriate behavior (something sadly common in human conflict), it may be
-useful to facilitate a mediation process. This is only an example: the
-Committee can consider mediation in any case, mindful that the process is meant
-to be strictly voluntary and no party can be pressured to participate. If the
-Committee suggests mediation, it should:
-
-* Find a candidate who can serve as a mediator.
-* Obtain the agreement of the reporter(s). The reporter(s) have complete
- freedom to decline the mediation idea, or to propose an alternate mediator.
-* Obtain the agreement of the reported person(s).
-* Settle on the mediator: while parties can propose a different mediator than
- the suggested candidate, only if common agreement is reached on all terms can
- the process move forward.
-* Establish a timeline for mediation to complete, ideally within two weeks.
-
-The mediator will engage with all the parties and seek a resolution that is
-satisfactory to all. Upon completion, the mediator will provide a report
-(vetted by all parties to the process) to the Committee, with recommendations
-on further steps. The Committee will then evaluate these results (whether
-satisfactory resolution was achieved or not) and decide on any additional
-action deemed necessary.
-
-
-How the committee will respond to reports
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-When the committee (or a committee member) receives a report, they will first
-determine whether the report is about a clear and severe breach (as defined
-below). If so, immediate action needs to be taken in addition to the regular
-report handling process.
-
-Clear and severe breach actions
-+++++++++++++++++++++++++++++++
-
-We know that it is painfully common for internet communication to start at or
-devolve into obvious and flagrant abuse. We will deal quickly with clear and
-severe breaches like personal threats, violent, sexist or racist language.
-
-When a member of the Code of Conduct committee becomes aware of a clear and
-severe breach, they will do the following:
-
-* Immediately disconnect the originator from all NumPy communication channels.
-* Reply to the reporter that their report has been received and that the
- originator has been disconnected.
-* In every case, the moderator should make a reasonable effort to contact the
- originator, and tell them specifically how their language or actions
- qualify as a "clear and severe breach". The moderator should also say
- that, if the originator believes this is unfair or they want to be
- reconnected to NumPy, they have the right to ask for a review, as below, by
- the Code of Conduct Committee.
- The moderator should copy this explanation to the Code of Conduct Committee.
-* The Code of Conduct Committee will formally review and sign off on all cases
- where this mechanism has been applied to make sure it is not being used to
- control ordinary heated disagreement.
-
-Report handling
-+++++++++++++++
-
-When a report is sent to the committee they will immediately reply to the
-reporter to confirm receipt. This reply must be sent within 72 hours, and the
-group should strive to respond much quicker than that.
-
-If a report doesn't contain enough information, the committee will obtain all
-relevant data before acting. The committee is empowered to act on the Steering
-Council’s behalf in contacting any individuals involved to get a more complete
-account of events.
-
-The committee will then review the incident and determine, to the best of their
-ability:
-
-* What happened.
-* Whether this event constitutes a Code of Conduct violation.
-* Who are the responsible party(ies).
-* Whether this is an ongoing situation, and there is a threat to anyone's
- physical safety.
-
-This information will be collected in writing, and whenever possible the
-group's deliberations will be recorded and retained (i.e. chat transcripts,
-email discussions, recorded conference calls, summaries of voice conversations,
-etc).
-
-It is important to retain an archive of all activities of this committee to
-ensure consistency in behavior and provide institutional memory for the
-project. To assist in this, the default channel of discussion for this
-committee will be a private mailing list accessible to current and future
-members of the committee as well as members of the Steering Council upon
-justified request. If the Committee finds the need to use off-list
-communications (e.g. phone calls for early/rapid response), it should in all
-cases summarize these back to the list so there's a good record of the process.
-
-The Code of Conduct Committee should aim to have a resolution agreed upon within
-two weeks. In the event that a resolution can't be determined in that time, the
-committee will respond to the reporter(s) with an update and projected timeline
-for resolution.
-
-
-.. _CoC_resolutions:
-
-Resolutions
-~~~~~~~~~~~
-
-The committee must agree on a resolution by consensus. If the group cannot reach
-consensus and deadlocks for over a week, the group will turn the matter over to
-the Steering Council for resolution.
-
-
-Possible responses may include:
-
-* Taking no further action
-
- - if we determine no violations have occurred.
- - if the matter has been resolved publicly while the committee was considering responses.
-
-* Coordinating voluntary mediation: if all involved parties agree, the
- Committee may facilitate a mediation process as detailed above.
-* Remind publicly, and point out that some behavior/actions/language have been
- judged inappropriate and why in the current context, or can but hurtful to
- some people, requesting the community to self-adjust.
-* A private reprimand from the committee to the individual(s) involved. In this
- case, the group chair will deliver that reprimand to the individual(s) over
- email, cc'ing the group.
-* A public reprimand. In this case, the committee chair will deliver that
- reprimand in the same venue that the violation occurred, within the limits of
- practicality. E.g., the original mailing list for an email violation, but
- for a chat room discussion where the person/context may be gone, they can be
- reached by other means. The group may choose to publish this message
- elsewhere for documentation purposes.
-* A request for a public or private apology, assuming the reporter agrees to
- this idea: they may at their discretion refuse further contact with the
- violator. The chair will deliver this request. The committee may, if it
- chooses, attach "strings" to this request: for example, the group may ask a
- violator to apologize in order to retain one’s membership on a mailing list.
-* A "mutually agreed upon hiatus" where the committee asks the individual to
- temporarily refrain from community participation. If the individual chooses
- not to take a temporary break voluntarily, the committee may issue a
- "mandatory cooling off period".
-* A permanent or temporary ban from some or all NumPy spaces (mailing lists,
- gitter.im, etc.). The group will maintain records of all such bans so that
- they may be reviewed in the future or otherwise maintained.
-
-Once a resolution is agreed upon, but before it is enacted, the committee will
-contact the original reporter and any other affected parties and explain the
-proposed resolution. The committee will ask if this resolution is acceptable,
-and must note feedback for the record.
-
-Finally, the committee will make a report to the NumPy Steering Council (as
-well as the NumPy core team in the event of an ongoing resolution, such as a
-ban).
-
-The committee will never publicly discuss the issue; all public statements will
-be made by the chair of the Code of Conduct Committee or the NumPy Steering
-Council.
-
-
-Conflicts of Interest
-~~~~~~~~~~~~~~~~~~~~~
-
-In the event of any conflict of interest, a committee member must immediately
-notify the other members, and recuse themselves if necessary.
diff --git a/doc/source/dev/development_workflow.rst b/doc/source/dev/development_workflow.rst
index d5a49a9f9..1665cfddb 100644
--- a/doc/source/dev/development_workflow.rst
+++ b/doc/source/dev/development_workflow.rst
@@ -188,6 +188,16 @@ Standard acronyms to start the commit message with are::
REL: related to releasing numpy
+.. _workflow_mailing_list:
+
+Get the mailing list's opinion
+=======================================================
+
+If you plan a new feature or API change, it's wisest to first email the
+NumPy `mailing list <https://mail.python.org/mailman/listinfo/numpy-discussion>`_
+asking for comment. If you haven't heard back in a week, it's
+OK to ping the list again.
+
.. _asking-for-merging:
Asking for your changes to be merged with the main repo
@@ -197,15 +207,24 @@ When you feel your work is finished, you can create a pull request (PR). Github
has a nice help page that outlines the process for `filing pull requests`_.
If your changes involve modifications to the API or addition/modification of a
-function, you should
-
-- send an email to the `NumPy mailing list`_ with a link to your PR along with
- a description of and a motivation for your changes. This may generate
- changes and feedback. It might be prudent to start with this step if your
- change may be controversial.
-- add a release note to the ``doc/release/upcoming_changes/`` directory,
- following the instructions and format in the
- ``doc/release/upcoming_changes/README.rst`` file.
+function, add a release note to the ``doc/release/upcoming_changes/``
+directory, following the instructions and format in the
+``doc/release/upcoming_changes/README.rst`` file.
+
+
+.. _workflow_PR_timeline:
+
+Getting your PR reviewed
+========================
+
+We review pull requests as soon as we can, typically within a week. If you get
+no review comments within two weeks, feel free to ask for feedback by
+adding a comment on your PR (this will notify maintainers).
+
+If your PR is large or complicated, asking for input on the numpy-discussion
+mailing list may also be useful.
+
+
.. _rebasing-on-master:
diff --git a/doc/source/dev/index.rst b/doc/source/dev/index.rst
index c4f35b68f..020df0b2b 100644
--- a/doc/source/dev/index.rst
+++ b/doc/source/dev/index.rst
@@ -9,7 +9,6 @@ Contributing to NumPy
.. toctree::
:hidden:
- conduct/code_of_conduct
Git Basics <gitwash/index>
development_environment
development_workflow
@@ -293,7 +292,6 @@ The rest of the story
.. toctree::
:maxdepth: 2
- conduct/code_of_conduct
Git Basics <gitwash/index>
development_environment
development_workflow
diff --git a/doc/source/doc_conventions.rst b/doc/source/doc_conventions.rst
new file mode 100644
index 000000000..e2bc419d1
--- /dev/null
+++ b/doc/source/doc_conventions.rst
@@ -0,0 +1,23 @@
+.. _documentation_conventions:
+
+##############################################################################
+Documentation conventions
+##############################################################################
+
+- Names that look like :func:`numpy.array` are links to detailed
+ documentation.
+
+- Examples often include the Python prompt ``>>>``. This is not part of the
+ code and will cause an error if typed or pasted into the Python
+ shell. It can be safely typed or pasted into the IPython shell; the ``>>>``
+ is ignored.
+
+- Examples often use ``np`` as an alias for ``numpy``; that is, they assume
+ you've run::
+
+ >>> import numpy as np
+
+- If you're a code contributor writing a docstring, see :ref:`docstring_intro`.
+
+- If you're a writer contributing ordinary (non-docstring) documentation, see
+ :ref:`userdoc_guide`.
diff --git a/doc/source/docs/howto_document.rst b/doc/source/docs/howto_document.rst
index 7cd97b954..ff726c67c 100644
--- a/doc/source/docs/howto_document.rst
+++ b/doc/source/docs/howto_document.rst
@@ -4,6 +4,8 @@
A Guide to NumPy Documentation
==============================
+.. _userdoc_guide:
+
User documentation
******************
- In general, we follow the
@@ -32,6 +34,9 @@ User documentation
we should add to the NumPy style rules.
+
+.. _docstring_intro:
+
Docstrings
**********
diff --git a/doc/source/reference/arrays.indexing.rst b/doc/source/reference/arrays.indexing.rst
index 180a79dae..b2a9f1d21 100644
--- a/doc/source/reference/arrays.indexing.rst
+++ b/doc/source/reference/arrays.indexing.rst
@@ -34,7 +34,7 @@ Basic Slicing and Indexing
Basic slicing extends Python's basic concept of slicing to N
dimensions. Basic slicing occurs when *obj* is a :class:`slice` object
(constructed by ``start:stop:step`` notation inside of brackets), an
-integer, or a tuple of slice objects and integers. :const:`Ellipsis`
+integer, or a tuple of slice objects and integers. :py:data:`Ellipsis`
and :const:`newaxis` objects can be interspersed with these as
well.
@@ -43,7 +43,7 @@ well.
In order to remain backward compatible with a common usage in
Numeric, basic slicing is also initiated if the selection object is
any non-ndarray and non-tuple sequence (such as a :class:`list`) containing
- :class:`slice` objects, the :const:`Ellipsis` object, or the :const:`newaxis`
+ :class:`slice` objects, the :py:data:`Ellipsis` object, or the :const:`newaxis`
object, but not for integer arrays or other embedded sequences.
.. index::
@@ -129,7 +129,7 @@ concepts to remember include:
[5],
[6]]])
-- :const:`Ellipsis` expands to the number of ``:`` objects needed for the
+- :py:data:`Ellipsis` expands to the number of ``:`` objects needed for the
selection tuple to index all dimensions. In most cases, this means that
length of the expanded selection tuple is ``x.ndim``. There may only be a
single ellipsis present.
@@ -333,7 +333,7 @@ the subspace defined by the basic indexing (excluding integers) and the
subspace from the advanced indexing part. Two cases of index combination
need to be distinguished:
-* The advanced indexes are separated by a slice, :const:`Ellipsis` or :const:`newaxis`.
+* The advanced indexes are separated by a slice, :py:data:`Ellipsis` or :const:`newaxis`.
For example ``x[arr1, :, arr2]``.
* The advanced indexes are all next to each other.
For example ``x[..., arr1, arr2, :]`` but *not* ``x[arr1, :, 1]``
diff --git a/doc/source/reference/arrays.interface.rst b/doc/source/reference/arrays.interface.rst
index 73e4aef0c..49772a298 100644
--- a/doc/source/reference/arrays.interface.rst
+++ b/doc/source/reference/arrays.interface.rst
@@ -231,13 +231,15 @@ as::
The flags member may consist of 5 bits showing how the data should be
interpreted and one bit showing how the Interface should be
-interpreted. The data-bits are :const:`CONTIGUOUS` (0x1),
-:const:`FORTRAN` (0x2), :const:`ALIGNED` (0x100), :const:`NOTSWAPPED`
-(0x200), and :const:`WRITEABLE` (0x400). A final flag
-:const:`ARR_HAS_DESCR` (0x800) indicates whether or not this structure
+interpreted. The data-bits are :c:macro:`NPY_ARRAY_C_CONTIGUOUS` (0x1),
+:c:macro:`NPY_ARRAY_F_CONTIGUOUS` (0x2), :c:macro:`NPY_ARRAY_ALIGNED` (0x100),
+:c:macro:`NPY_ARRAY_NOTSWAPPED` (0x200), and :c:macro:`NPY_ARRAY_WRITEABLE` (0x400). A final flag
+:c:macro:`NPY_ARR_HAS_DESCR` (0x800) indicates whether or not this structure
has the arrdescr field. The field should not be accessed unless this
flag is present.
+ .. c:macro:: NPY_ARR_HAS_DESCR
+
.. admonition:: New since June 16, 2006:
In the past most implementations used the ``desc`` member of the ``PyCObject``
diff --git a/doc/source/reference/arrays.scalars.rst b/doc/source/reference/arrays.scalars.rst
index 46d2bb8fa..13d117af2 100644
--- a/doc/source/reference/arrays.scalars.rst
+++ b/doc/source/reference/arrays.scalars.rst
@@ -41,6 +41,13 @@ of the flexible itemsize array types (:class:`string`,
pointer for the platform. All the number types can be obtained
using bit-width names as well.
+
+.. TODO - use something like this instead of the diagram above, as it generates
+ links to the classes and is a vector graphic. Unfortunately it looks worse
+ and the html <map> element providing the linked regions is misaligned.
+
+ .. inheritance-diagram:: byte short intc int_ longlong ubyte ushort uintc uint ulonglong half single double longdouble csingle cdouble clongdouble bool_ datetime64 timedelta64 object_ bytes_ str_ void
+
.. [#] However, array scalars are immutable, so none of the array
scalar attributes are settable.
@@ -51,14 +58,8 @@ of the flexible itemsize array types (:class:`string`,
Built-in scalar types
=====================
-The built-in scalar types are shown below. Along with their (mostly)
-C-derived names, the integer, float, and complex data-types are also
-available using a bit-width convention so that an array of the right
-size can always be ensured (e.g. :class:`int8`, :class:`float64`,
-:class:`complex128`). Two aliases (:class:`intp` and :class:`uintp`)
-pointing to the integer type that is sufficiently large to hold a C pointer
-are also provided. The C-like names are associated with character codes,
-which are shown in the table. Use of the character codes, however,
+The built-in scalar types are shown below. The C-like names are associated with character codes,
+which are shown in their descriptions. Use of the character codes, however,
is discouraged.
Some of the scalar types are essentially equivalent to fundamental
@@ -86,97 +87,119 @@ Python Boolean scalar.
.. warning::
- The :class:`bool_` type is not a subclass of the :class:`int_` type
- (the :class:`bool_` is not even a number type). This is different
- than Python's default implementation of :class:`bool` as a
- sub-class of :class:`int`.
-
-.. warning::
-
The :class:`int_` type does **not** inherit from the
:class:`int` built-in under Python 3, because type :class:`int` is no
longer a fixed-width integer type.
.. tip:: The default data type in NumPy is :class:`float_`.
-In the tables below, ``platform?`` means that the type may not be
-available on all platforms. Compatibility with different C or Python
-types is indicated: two types are compatible if their data is of the
-same size and interpreted in the same way.
-
-Booleans:
-
-=================== ============================= ===============
-Type Remarks Character code
-=================== ============================= ===============
-:class:`bool_` compatible: Python bool ``'?'``
-:class:`bool8` 8 bits
-=================== ============================= ===============
-
-Integers:
-
-=================== ============================= ===============
-:class:`byte` compatible: C ``char`` ``'b'``
-:class:`short` compatible: C ``short`` ``'h'``
-:class:`intc` compatible: C ``int`` ``'i'``
-:class:`int_` compatible: C ``long`` ``'l'``
-:class:`longlong` compatible: C ``long long`` ``'q'``
-:class:`intp` large enough to fit a pointer ``'p'``
-:class:`int8` 8 bits
-:class:`int16` 16 bits
-:class:`int32` 32 bits
-:class:`int64` 64 bits
-=================== ============================= ===============
-
-Unsigned integers:
-
-=================== ================================= ===============
-:class:`ubyte` compatible: C ``unsigned char`` ``'B'``
-:class:`ushort` compatible: C ``unsigned short`` ``'H'``
-:class:`uintc` compatible: C ``unsigned int`` ``'I'``
-:class:`uint` compatible: C ``long`` ``'L'``
-:class:`ulonglong` compatible: C ``long long`` ``'Q'``
-:class:`uintp` large enough to fit a pointer ``'P'``
-:class:`uint8` 8 bits
-:class:`uint16` 16 bits
-:class:`uint32` 32 bits
-:class:`uint64` 64 bits
-=================== ================================= ===============
-
-Floating-point numbers:
-
-=================== ============================= ===============
-:class:`half` ``'e'``
-:class:`single` compatible: C float ``'f'``
-:class:`double` compatible: C double
-:class:`float_` compatible: Python float ``'d'``
-:class:`longfloat` compatible: C long float ``'g'``
-:class:`float16` 16 bits
-:class:`float32` 32 bits
-:class:`float64` 64 bits
-:class:`float96` 96 bits, platform?
-:class:`float128` 128 bits, platform?
-=================== ============================= ===============
-
-Complex floating-point numbers:
-
-=================== ============================= ===============
-:class:`csingle` ``'F'``
-:class:`complex_` compatible: Python complex ``'D'``
-:class:`clongfloat` ``'G'``
-:class:`complex64` two 32-bit floats
-:class:`complex128` two 64-bit floats
-:class:`complex192` two 96-bit floats,
- platform?
-:class:`complex256` two 128-bit floats,
- platform?
-=================== ============================= ===============
-
-Any Python object:
-
-=================== ============================= ===============
-:class:`object_` any Python object ``'O'``
-=================== ============================= ===============
+.. autoclass:: numpy.generic
+ :exclude-members:
+
+.. autoclass:: numpy.number
+ :exclude-members:
+
+Integer types
+~~~~~~~~~~~~~
+
+.. autoclass:: numpy.integer
+ :exclude-members:
+
+Signed integer types
+++++++++++++++++++++
+
+.. autoclass:: numpy.signedinteger
+ :exclude-members:
+
+.. autoclass:: numpy.byte
+ :exclude-members:
+
+.. autoclass:: numpy.short
+ :exclude-members:
+
+.. autoclass:: numpy.intc
+ :exclude-members:
+
+.. autoclass:: numpy.int_
+ :exclude-members:
+
+.. autoclass:: numpy.longlong
+ :exclude-members:
+
+Unsigned integer types
+++++++++++++++++++++++
+
+.. autoclass:: numpy.unsignedinteger
+ :exclude-members:
+
+.. autoclass:: numpy.ubyte
+ :exclude-members:
+
+.. autoclass:: numpy.ushort
+ :exclude-members:
+
+.. autoclass:: numpy.uintc
+ :exclude-members:
+
+.. autoclass:: numpy.uint
+ :exclude-members:
+
+.. autoclass:: numpy.ulonglong
+ :exclude-members:
+
+Inexact types
+~~~~~~~~~~~~~
+
+.. autoclass:: numpy.inexact
+ :exclude-members:
+
+Floating-point types
+++++++++++++++++++++
+
+.. autoclass:: numpy.floating
+ :exclude-members:
+
+.. autoclass:: numpy.half
+ :exclude-members:
+
+.. autoclass:: numpy.single
+ :exclude-members:
+
+.. autoclass:: numpy.double
+ :exclude-members:
+
+.. autoclass:: numpy.longdouble
+ :exclude-members:
+
+Complex floating-point types
+++++++++++++++++++++++++++++
+
+.. autoclass:: numpy.complexfloating
+ :exclude-members:
+
+.. autoclass:: numpy.csingle
+ :exclude-members:
+
+.. autoclass:: numpy.cdouble
+ :exclude-members:
+
+.. autoclass:: numpy.clongdouble
+ :exclude-members:
+
+Other types
+~~~~~~~~~~~
+
+.. autoclass:: numpy.bool_
+ :exclude-members:
+
+.. autoclass:: numpy.datetime64
+ :exclude-members:
+
+.. autoclass:: numpy.timedelta64
+ :exclude-members:
+
+.. autoclass:: numpy.object_
+ :exclude-members:
.. note::
@@ -198,11 +221,17 @@ size and the data they describe can be of different length in different
arrays. (In the character codes ``#`` is an integer denoting how many
elements the data type consists of.)
-=================== ============================== ========
-:class:`bytes_` compatible: Python bytes ``'S#'``
-:class:`unicode_` compatible: Python unicode/str ``'U#'``
-:class:`void` ``'V#'``
-=================== ============================== ========
+.. autoclass:: numpy.flexible
+ :exclude-members:
+
+.. autoclass:: numpy.bytes_
+ :exclude-members:
+
+.. autoclass:: numpy.str_
+ :exclude-members:
+
+.. autoclass:: numpy.void
+ :exclude-members:
.. warning::
@@ -217,6 +246,117 @@ elements the data type consists of.)
convention more consistent with other Python modules such as the
:mod:`struct` module.
+Sized aliases
+~~~~~~~~~~~~~
+
+Along with their (mostly)
+C-derived names, the integer, float, and complex data-types are also
+available using a bit-width convention so that an array of the right
+size can always be ensured. Two aliases (:class:`numpy.intp` and :class:`numpy.uintp`)
+pointing to the integer type that is sufficiently large to hold a C pointer
+are also provided.
+
+.. note that these are documented with ..attribute because that is what
+ autoclass does for aliases under the hood.
+
+.. autoclass:: numpy.bool8
+
+.. attribute:: int8
+ int16
+ int32
+ int64
+
+ Aliases for the signed integer types (one of `numpy.byte`, `numpy.short`,
+ `numpy.intc`, `numpy.int_` and `numpy.longlong`) with the specified number
+ of bits.
+
+ Compatible with the C99 ``int8_t``, ``int16_t``, ``int32_t``, and
+ ``int64_t``, respectively.
+
+.. attribute:: uint8
+ uint16
+ uint32
+ uint64
+
+ Alias for the unsigned integer types (one of `numpy.byte`, `numpy.short`,
+ `numpy.intc`, `numpy.int_` and `numpy.longlong`) with the specified number
+ of bits.
+
+ Compatible with the C99 ``uint8_t``, ``uint16_t``, ``uint32_t``, and
+ ``uint64_t``, respectively.
+
+.. attribute:: intp
+
+ Alias for the signed integer type (one of `numpy.byte`, `numpy.short`,
+ `numpy.intc`, `numpy.int_` and `np.longlong`) that is the same size as a
+ pointer.
+
+ Compatible with the C ``intptr_t``.
+
+ :Character code: ``'p'``
+
+.. attribute:: uintp
+
+ Alias for the unsigned integer type (one of `numpy.byte`, `numpy.short`,
+ `numpy.intc`, `numpy.int_` and `np.longlong`) that is the same size as a
+ pointer.
+
+ Compatible with the C ``uintptr_t``.
+
+ :Character code: ``'P'``
+
+.. autoclass:: numpy.float16
+
+.. autoclass:: numpy.float32
+
+.. autoclass:: numpy.float64
+
+.. attribute:: float96
+ float128
+
+ Alias for `numpy.longdouble`, named after its size in bits.
+ The existence of these aliases depends on the platform.
+
+.. autoclass:: numpy.complex64
+
+.. autoclass:: numpy.complex128
+
+.. attribute:: complex192
+ complex256
+
+ Alias for `numpy.clongdouble`, named after its size in bits.
+ The existance of these aliases depends on the platform.
+
+Other aliases
+~~~~~~~~~~~~~
+
+The first two of these are conveniences which resemble the names of the
+builtin types, in the same style as `bool_`, `int_`, `str_`, `bytes_`, and
+`object_`:
+
+.. autoclass:: numpy.float_
+
+.. autoclass:: numpy.complex_
+
+Some more use alternate naming conventions for extended-precision floats and
+complex numbers:
+
+.. autoclass:: numpy.longfloat
+
+.. autoclass:: numpy.singlecomplex
+
+.. autoclass:: numpy.cfloat
+
+.. autoclass:: numpy.longcomplex
+
+.. autoclass:: numpy.clongfloat
+
+The following aliases originate from Python 2, and it is recommended that they
+not be used in new code.
+
+.. autoclass:: numpy.string_
+
+.. autoclass:: numpy.unicode_
Attributes
==========
@@ -276,7 +416,6 @@ The exceptions to the above rules are given below:
.. autosummary::
:toctree: generated/
- generic
generic.__array__
generic.__array_wrap__
generic.squeeze
diff --git a/doc/source/reference/c-api/array.rst b/doc/source/reference/c-api/array.rst
index cfe4d2d51..9fe45d2de 100644
--- a/doc/source/reference/c-api/array.rst
+++ b/doc/source/reference/c-api/array.rst
@@ -3259,6 +3259,8 @@ Memory management
:c:data:`NPY_USE_PYMEM` is 0, if :c:data:`NPY_USE_PYMEM` is 1, then
the Python memory allocator is used.
+ .. c:macro:: NPY_USE_PYMEM
+
.. c:function:: int PyArray_ResolveWritebackIfCopy(PyArrayObject* obj)
If ``obj.flags`` has :c:data:`NPY_ARRAY_WRITEBACKIFCOPY` or (deprecated)
@@ -3289,9 +3291,13 @@ be accomplished using two groups of macros. Typically, if one macro in
a group is used in a code block, all of them must be used in the same
code block. Currently, :c:data:`NPY_ALLOW_THREADS` is defined to the
python-defined :c:data:`WITH_THREADS` constant unless the environment
-variable :c:data:`NPY_NOSMP` is set in which case
+variable ``NPY_NOSMP`` is set in which case
:c:data:`NPY_ALLOW_THREADS` is defined to be 0.
+.. c:macro:: NPY_ALLOW_THREADS
+
+.. c:macro:: WITH_THREADS
+
Group 1
"""""""
diff --git a/doc/source/reference/c-api/config.rst b/doc/source/reference/c-api/config.rst
index c3e2c98af..cec5b973a 100644
--- a/doc/source/reference/c-api/config.rst
+++ b/doc/source/reference/c-api/config.rst
@@ -52,12 +52,15 @@ information is available to the pre-processor.
.. c:macro:: NPY_SIZEOF_LONG_DOUBLE
- sizeof(longdouble) (A macro defines **NPY_SIZEOF_LONGDOUBLE** as well.)
+.. c:macro:: NPY_SIZEOF_LONGDOUBLE
+
+ sizeof(longdouble)
.. c:macro:: NPY_SIZEOF_PY_INTPTR_T
- Size of a pointer on this platform (sizeof(void \*)) (A macro defines
- NPY_SIZEOF_INTP as well.)
+.. c:macro:: NPY_SIZEOF_INTP
+
+ Size of a pointer on this platform (sizeof(void \*))
Platform information
@@ -102,6 +105,12 @@ Platform information
One of :c:data:`NPY_CPU_BIG`, :c:data:`NPY_CPU_LITTLE`,
or :c:data:`NPY_CPU_UNKNOWN_ENDIAN`.
+ .. c:macro:: NPY_CPU_BIG
+
+ .. c:macro:: NPY_CPU_LITTLE
+
+ .. c:macro:: NPY_CPU_UNKNOWN_ENDIAN
+
Compiler directives
-------------------
diff --git a/doc/source/reference/c-api/dtype.rst b/doc/source/reference/c-api/dtype.rst
index a04d85212..47b998302 100644
--- a/doc/source/reference/c-api/dtype.rst
+++ b/doc/source/reference/c-api/dtype.rst
@@ -414,6 +414,12 @@ Printf Formatting
For help in printing, the following strings are defined as the correct
format specifier in printf and related commands.
- :c:data:`NPY_LONGLONG_FMT`, :c:data:`NPY_ULONGLONG_FMT`,
- :c:data:`NPY_INTP_FMT`, :c:data:`NPY_UINTP_FMT`,
- :c:data:`NPY_LONGDOUBLE_FMT`
+.. c:macro:: NPY_LONGLONG_FMT
+
+.. c:macro:: NPY_ULONGLONG_FMT
+
+.. c:macro:: NPY_INTP_FMT
+
+.. c:macro:: NPY_UINTP_FMT
+
+.. c:macro:: NPY_LONGDOUBLE_FMT
diff --git a/doc/source/reference/c-api/iterator.rst b/doc/source/reference/c-api/iterator.rst
index 7eac8c367..ae96bb3fb 100644
--- a/doc/source/reference/c-api/iterator.rst
+++ b/doc/source/reference/c-api/iterator.rst
@@ -1264,7 +1264,7 @@ functions provide that information.
NPY_MAX_INTP is placed in the stride.
Once the iterator is prepared for iteration (after a reset if
- :c:data:`NPY_DELAY_BUFALLOC` was used), call this to get the strides
+ :c:data:`NPY_ITER_DELAY_BUFALLOC` was used), call this to get the strides
which may be used to select a fast inner loop function. For example,
if the stride is 0, that means the inner loop can always load its
value into a variable once, then use the variable throughout the loop,
diff --git a/doc/source/reference/c-api/types-and-structures.rst b/doc/source/reference/c-api/types-and-structures.rst
index ee57d4680..8759af6a4 100644
--- a/doc/source/reference/c-api/types-and-structures.rst
+++ b/doc/source/reference/c-api/types-and-structures.rst
@@ -93,84 +93,84 @@ PyArray_Type and PyArrayObject
PyObject *weakreflist;
} PyArrayObject;
-.. c:macro:: PyArrayObject.PyObject_HEAD
+ .. c:macro:: PyObject_HEAD
- This is needed by all Python objects. It consists of (at least)
- a reference count member ( ``ob_refcnt`` ) and a pointer to the
- typeobject ( ``ob_type`` ). (Other elements may also be present
- if Python was compiled with special options see
- Include/object.h in the Python source tree for more
- information). The ob_type member points to a Python type
- object.
+ This is needed by all Python objects. It consists of (at least)
+ a reference count member ( ``ob_refcnt`` ) and a pointer to the
+ typeobject ( ``ob_type`` ). (Other elements may also be present
+ if Python was compiled with special options see
+ Include/object.h in the Python source tree for more
+ information). The ob_type member points to a Python type
+ object.
-.. c:member:: char *PyArrayObject.data
+ .. c:member:: char *data
- Accessible via :c:data:`PyArray_DATA`, this data member is a
- pointer to the first element of the array. This pointer can
- (and normally should) be recast to the data type of the array.
+ Accessible via :c:data:`PyArray_DATA`, this data member is a
+ pointer to the first element of the array. This pointer can
+ (and normally should) be recast to the data type of the array.
-.. c:member:: int PyArrayObject.nd
+ .. c:member:: int nd
- An integer providing the number of dimensions for this
- array. When nd is 0, the array is sometimes called a rank-0
- array. Such arrays have undefined dimensions and strides and
- cannot be accessed. Macro :c:data:`PyArray_NDIM` defined in
- ``ndarraytypes.h`` points to this data member. :c:data:`NPY_MAXDIMS`
- is the largest number of dimensions for any array.
+ An integer providing the number of dimensions for this
+ array. When nd is 0, the array is sometimes called a rank-0
+ array. Such arrays have undefined dimensions and strides and
+ cannot be accessed. Macro :c:data:`PyArray_NDIM` defined in
+ ``ndarraytypes.h`` points to this data member. :c:data:`NPY_MAXDIMS`
+ is the largest number of dimensions for any array.
-.. c:member:: npy_intp PyArrayObject.dimensions
+ .. c:member:: npy_intp dimensions
- An array of integers providing the shape in each dimension as
- long as nd :math:`\geq` 1. The integer is always large enough
- to hold a pointer on the platform, so the dimension size is
- only limited by memory. :c:data:`PyArray_DIMS` is the macro
- associated with this data member.
+ An array of integers providing the shape in each dimension as
+ long as nd :math:`\geq` 1. The integer is always large enough
+ to hold a pointer on the platform, so the dimension size is
+ only limited by memory. :c:data:`PyArray_DIMS` is the macro
+ associated with this data member.
-.. c:member:: npy_intp *PyArrayObject.strides
+ .. c:member:: npy_intp *strides
- An array of integers providing for each dimension the number of
- bytes that must be skipped to get to the next element in that
- dimension. Associated with macro :c:data:`PyArray_STRIDES`.
+ An array of integers providing for each dimension the number of
+ bytes that must be skipped to get to the next element in that
+ dimension. Associated with macro :c:data:`PyArray_STRIDES`.
-.. c:member:: PyObject *PyArrayObject.base
+ .. c:member:: PyObject *base
- Pointed to by :c:data:`PyArray_BASE`, this member is used to hold a
- pointer to another Python object that is related to this array.
- There are two use cases:
+ Pointed to by :c:data:`PyArray_BASE`, this member is used to hold a
+ pointer to another Python object that is related to this array.
+ There are two use cases:
- - If this array does not own its own memory, then base points to the
- Python object that owns it (perhaps another array object)
- - If this array has the (deprecated) :c:data:`NPY_ARRAY_UPDATEIFCOPY` or
- :c:data:`NPY_ARRAY_WRITEBACKIFCOPY` flag set, then this array is a working
- copy of a "misbehaved" array.
+ - If this array does not own its own memory, then base points to the
+ Python object that owns it (perhaps another array object)
+ - If this array has the (deprecated) :c:data:`NPY_ARRAY_UPDATEIFCOPY` or
+ :c:data:`NPY_ARRAY_WRITEBACKIFCOPY` flag set, then this array is a working
+ copy of a "misbehaved" array.
- When ``PyArray_ResolveWritebackIfCopy`` is called, the array pointed to
- by base will be updated with the contents of this array.
+ When ``PyArray_ResolveWritebackIfCopy`` is called, the array pointed to
+ by base will be updated with the contents of this array.
-.. c:member:: PyArray_Descr *PyArrayObject.descr
+ .. c:member:: PyArray_Descr *descr
- A pointer to a data-type descriptor object (see below). The
- data-type descriptor object is an instance of a new built-in
- type which allows a generic description of memory. There is a
- descriptor structure for each data type supported. This
- descriptor structure contains useful information about the type
- as well as a pointer to a table of function pointers to
- implement specific functionality. As the name suggests, it is
- associated with the macro :c:data:`PyArray_DESCR`.
+ A pointer to a data-type descriptor object (see below). The
+ data-type descriptor object is an instance of a new built-in
+ type which allows a generic description of memory. There is a
+ descriptor structure for each data type supported. This
+ descriptor structure contains useful information about the type
+ as well as a pointer to a table of function pointers to
+ implement specific functionality. As the name suggests, it is
+ associated with the macro :c:data:`PyArray_DESCR`.
-.. c:member:: int PyArrayObject.flags
+ .. c:member:: int flags
- Pointed to by the macro :c:data:`PyArray_FLAGS`, this data member represents
- the flags indicating how the memory pointed to by data is to be
- interpreted. Possible flags are :c:data:`NPY_ARRAY_C_CONTIGUOUS`,
- :c:data:`NPY_ARRAY_F_CONTIGUOUS`, :c:data:`NPY_ARRAY_OWNDATA`,
- :c:data:`NPY_ARRAY_ALIGNED`, :c:data:`NPY_ARRAY_WRITEABLE`,
- :c:data:`NPY_ARRAY_WRITEBACKIFCOPY`, and :c:data:`NPY_ARRAY_UPDATEIFCOPY`.
+ Pointed to by the macro :c:data:`PyArray_FLAGS`, this data member represents
+ the flags indicating how the memory pointed to by data is to be
+ interpreted. Possible flags are :c:data:`NPY_ARRAY_C_CONTIGUOUS`,
+ :c:data:`NPY_ARRAY_F_CONTIGUOUS`, :c:data:`NPY_ARRAY_OWNDATA`,
+ :c:data:`NPY_ARRAY_ALIGNED`, :c:data:`NPY_ARRAY_WRITEABLE`,
+ :c:data:`NPY_ARRAY_WRITEBACKIFCOPY`, and :c:data:`NPY_ARRAY_UPDATEIFCOPY`.
-.. c:member:: PyObject *PyArrayObject.weakreflist
+ .. c:member:: PyObject *weakreflist
- This member allows array objects to have weak references (using the
- weakref module).
+ This member allows array objects to have weak references (using the
+ weakref module).
PyArrayDescr_Type and PyArray_Descr
@@ -226,197 +226,195 @@ PyArrayDescr_Type and PyArray_Descr
npy_hash_t hash;
} PyArray_Descr;
-.. c:member:: PyTypeObject *PyArray_Descr.typeobj
+ .. c:member:: PyTypeObject *typeobj
- Pointer to a typeobject that is the corresponding Python type for
- the elements of this array. For the builtin types, this points to
- the corresponding array scalar. For user-defined types, this
- should point to a user-defined typeobject. This typeobject can
- either inherit from array scalars or not. If it does not inherit
- from array scalars, then the :c:data:`NPY_USE_GETITEM` and
- :c:data:`NPY_USE_SETITEM` flags should be set in the ``flags`` member.
+ Pointer to a typeobject that is the corresponding Python type for
+ the elements of this array. For the builtin types, this points to
+ the corresponding array scalar. For user-defined types, this
+ should point to a user-defined typeobject. This typeobject can
+ either inherit from array scalars or not. If it does not inherit
+ from array scalars, then the :c:data:`NPY_USE_GETITEM` and
+ :c:data:`NPY_USE_SETITEM` flags should be set in the ``flags`` member.
-.. c:member:: char PyArray_Descr.kind
+ .. c:member:: char kind
- A character code indicating the kind of array (using the array
- interface typestring notation). A 'b' represents Boolean, a 'i'
- represents signed integer, a 'u' represents unsigned integer, 'f'
- represents floating point, 'c' represents complex floating point, 'S'
- represents 8-bit zero-terminated bytes, 'U' represents 32-bit/character
- unicode string, and 'V' represents arbitrary.
+ A character code indicating the kind of array (using the array
+ interface typestring notation). A 'b' represents Boolean, a 'i'
+ represents signed integer, a 'u' represents unsigned integer, 'f'
+ represents floating point, 'c' represents complex floating point, 'S'
+ represents 8-bit zero-terminated bytes, 'U' represents 32-bit/character
+ unicode string, and 'V' represents arbitrary.
-.. c:member:: char PyArray_Descr.type
+ .. c:member:: char type
- A traditional character code indicating the data type.
+ A traditional character code indicating the data type.
-.. c:member:: char PyArray_Descr.byteorder
+ .. c:member:: char byteorder
- A character indicating the byte-order: '>' (big-endian), '<' (little-
- endian), '=' (native), '\|' (irrelevant, ignore). All builtin data-
- types have byteorder '='.
+ A character indicating the byte-order: '>' (big-endian), '<' (little-
+ endian), '=' (native), '\|' (irrelevant, ignore). All builtin data-
+ types have byteorder '='.
-.. c:member:: char PyArray_Descr.flags
+ .. c:member:: char flags
- A data-type bit-flag that determines if the data-type exhibits object-
- array like behavior. Each bit in this member is a flag which are named
- as:
+ A data-type bit-flag that determines if the data-type exhibits object-
+ array like behavior. Each bit in this member is a flag which are named
+ as:
- .. c:macro:: NPY_ITEM_REFCOUNT
+ .. c:macro:: NPY_ITEM_REFCOUNT
- Indicates that items of this data-type must be reference
- counted (using :c:func:`Py_INCREF` and :c:func:`Py_DECREF` ).
+ Indicates that items of this data-type must be reference
+ counted (using :c:func:`Py_INCREF` and :c:func:`Py_DECREF` ).
- .. c:macro:: NPY_ITEM_HASOBJECT
+ .. c:macro:: NPY_ITEM_HASOBJECT
- Same as :c:data:`NPY_ITEM_REFCOUNT`.
+ Same as :c:data:`NPY_ITEM_REFCOUNT`.
- .. c:macro:: NPY_LIST_PICKLE
+ .. c:macro:: NPY_LIST_PICKLE
- Indicates arrays of this data-type must be converted to a list
- before pickling.
+ Indicates arrays of this data-type must be converted to a list
+ before pickling.
- .. c:macro:: NPY_ITEM_IS_POINTER
+ .. c:macro:: NPY_ITEM_IS_POINTER
- Indicates the item is a pointer to some other data-type
+ Indicates the item is a pointer to some other data-type
- .. c:macro:: NPY_NEEDS_INIT
+ .. c:macro:: NPY_NEEDS_INIT
- Indicates memory for this data-type must be initialized (set
- to 0) on creation.
+ Indicates memory for this data-type must be initialized (set
+ to 0) on creation.
- .. c:macro:: NPY_NEEDS_PYAPI
+ .. c:macro:: NPY_NEEDS_PYAPI
- Indicates this data-type requires the Python C-API during
- access (so don't give up the GIL if array access is going to
- be needed).
+ Indicates this data-type requires the Python C-API during
+ access (so don't give up the GIL if array access is going to
+ be needed).
- .. c:macro:: NPY_USE_GETITEM
+ .. c:macro:: NPY_USE_GETITEM
- On array access use the ``f->getitem`` function pointer
- instead of the standard conversion to an array scalar. Must
- use if you don't define an array scalar to go along with
- the data-type.
+ On array access use the ``f->getitem`` function pointer
+ instead of the standard conversion to an array scalar. Must
+ use if you don't define an array scalar to go along with
+ the data-type.
- .. c:macro:: NPY_USE_SETITEM
+ .. c:macro:: NPY_USE_SETITEM
- When creating a 0-d array from an array scalar use
- ``f->setitem`` instead of the standard copy from an array
- scalar. Must use if you don't define an array scalar to go
- along with the data-type.
+ When creating a 0-d array from an array scalar use
+ ``f->setitem`` instead of the standard copy from an array
+ scalar. Must use if you don't define an array scalar to go
+ along with the data-type.
- .. c:macro:: NPY_FROM_FIELDS
+ .. c:macro:: NPY_FROM_FIELDS
- The bits that are inherited for the parent data-type if these
- bits are set in any field of the data-type. Currently (
- :c:data:`NPY_NEEDS_INIT` \| :c:data:`NPY_LIST_PICKLE` \|
- :c:data:`NPY_ITEM_REFCOUNT` \| :c:data:`NPY_NEEDS_PYAPI` ).
+ The bits that are inherited for the parent data-type if these
+ bits are set in any field of the data-type. Currently (
+ :c:data:`NPY_NEEDS_INIT` \| :c:data:`NPY_LIST_PICKLE` \|
+ :c:data:`NPY_ITEM_REFCOUNT` \| :c:data:`NPY_NEEDS_PYAPI` ).
- .. c:macro:: NPY_OBJECT_DTYPE_FLAGS
+ .. c:macro:: NPY_OBJECT_DTYPE_FLAGS
- Bits set for the object data-type: ( :c:data:`NPY_LIST_PICKLE`
- \| :c:data:`NPY_USE_GETITEM` \| :c:data:`NPY_ITEM_IS_POINTER` \|
- :c:data:`NPY_REFCOUNT` \| :c:data:`NPY_NEEDS_INIT` \|
- :c:data:`NPY_NEEDS_PYAPI`).
+ Bits set for the object data-type: ( :c:data:`NPY_LIST_PICKLE`
+ \| :c:data:`NPY_USE_GETITEM` \| :c:data:`NPY_ITEM_IS_POINTER` \|
+ :c:data:`NPY_ITEM_REFCOUNT` \| :c:data:`NPY_NEEDS_INIT` \|
+ :c:data:`NPY_NEEDS_PYAPI`).
- .. c:function:: PyDataType_FLAGCHK(PyArray_Descr *dtype, int flags)
+ .. c:function:: PyDataType_FLAGCHK(PyArray_Descr *dtype, int flags)
- Return true if all the given flags are set for the data-type
- object.
+ Return true if all the given flags are set for the data-type
+ object.
- .. c:function:: PyDataType_REFCHK(PyArray_Descr *dtype)
+ .. c:function:: PyDataType_REFCHK(PyArray_Descr *dtype)
- Equivalent to :c:func:`PyDataType_FLAGCHK` (*dtype*,
- :c:data:`NPY_ITEM_REFCOUNT`).
+ Equivalent to :c:func:`PyDataType_FLAGCHK` (*dtype*,
+ :c:data:`NPY_ITEM_REFCOUNT`).
-.. c:member:: int PyArray_Descr.type_num
+ .. c:member:: int type_num
- A number that uniquely identifies the data type. For new data-types,
- this number is assigned when the data-type is registered.
+ A number that uniquely identifies the data type. For new data-types,
+ this number is assigned when the data-type is registered.
-.. c:member:: int PyArray_Descr.elsize
+ .. c:member:: int elsize
- For data types that are always the same size (such as long), this
- holds the size of the data type. For flexible data types where
- different arrays can have a different elementsize, this should be
- 0.
+ For data types that are always the same size (such as long), this
+ holds the size of the data type. For flexible data types where
+ different arrays can have a different elementsize, this should be
+ 0.
-.. c:member:: int PyArray_Descr.alignment
+ .. c:member:: int alignment
- A number providing alignment information for this data type.
- Specifically, it shows how far from the start of a 2-element
- structure (whose first element is a ``char`` ), the compiler
- places an item of this type: ``offsetof(struct {char c; type v;},
- v)``
+ A number providing alignment information for this data type.
+ Specifically, it shows how far from the start of a 2-element
+ structure (whose first element is a ``char`` ), the compiler
+ places an item of this type: ``offsetof(struct {char c; type v;},
+ v)``
-.. c:member:: PyArray_ArrayDescr *PyArray_Descr.subarray
+ .. c:member:: PyArray_ArrayDescr *subarray
- If this is non- ``NULL``, then this data-type descriptor is a
- C-style contiguous array of another data-type descriptor. In
- other-words, each element that this descriptor describes is
- actually an array of some other base descriptor. This is most
- useful as the data-type descriptor for a field in another
- data-type descriptor. The fields member should be ``NULL`` if this
- is non- ``NULL`` (the fields member of the base descriptor can be
- non- ``NULL`` however). The :c:type:`PyArray_ArrayDescr` structure is
- defined using
+ If this is non- ``NULL``, then this data-type descriptor is a
+ C-style contiguous array of another data-type descriptor. In
+ other-words, each element that this descriptor describes is
+ actually an array of some other base descriptor. This is most
+ useful as the data-type descriptor for a field in another
+ data-type descriptor. The fields member should be ``NULL`` if this
+ is non- ``NULL`` (the fields member of the base descriptor can be
+ non- ``NULL`` however).
- .. code-block:: c
-
- typedef struct {
- PyArray_Descr *base;
- PyObject *shape;
- } PyArray_ArrayDescr;
+ .. c:type:: PyArray_ArrayDescr
- The elements of this structure are:
+ .. code-block:: c
- .. c:member:: PyArray_Descr *PyArray_ArrayDescr.base
+ typedef struct {
+ PyArray_Descr *base;
+ PyObject *shape;
+ } PyArray_ArrayDescr;
- The data-type-descriptor object of the base-type.
+ .. c:member:: PyArray_Descr *base
- .. c:member:: PyObject *PyArray_ArrayDescr.shape
+ The data-type-descriptor object of the base-type.
- The shape (always C-style contiguous) of the sub-array as a Python
- tuple.
+ .. c:member:: PyObject *shape
+ The shape (always C-style contiguous) of the sub-array as a Python
+ tuple.
-.. c:member:: PyObject *PyArray_Descr.fields
+ .. c:member:: PyObject *fields
- If this is non-NULL, then this data-type-descriptor has fields
- described by a Python dictionary whose keys are names (and also
- titles if given) and whose values are tuples that describe the
- fields. Recall that a data-type-descriptor always describes a
- fixed-length set of bytes. A field is a named sub-region of that
- total, fixed-length collection. A field is described by a tuple
- composed of another data- type-descriptor and a byte
- offset. Optionally, the tuple may contain a title which is
- normally a Python string. These tuples are placed in this
- dictionary keyed by name (and also title if given).
+ If this is non-NULL, then this data-type-descriptor has fields
+ described by a Python dictionary whose keys are names (and also
+ titles if given) and whose values are tuples that describe the
+ fields. Recall that a data-type-descriptor always describes a
+ fixed-length set of bytes. A field is a named sub-region of that
+ total, fixed-length collection. A field is described by a tuple
+ composed of another data- type-descriptor and a byte
+ offset. Optionally, the tuple may contain a title which is
+ normally a Python string. These tuples are placed in this
+ dictionary keyed by name (and also title if given).
-.. c:member:: PyObject *PyArray_Descr.names
+ .. c:member:: PyObject *names
- An ordered tuple of field names. It is NULL if no field is
- defined.
+ An ordered tuple of field names. It is NULL if no field is
+ defined.
-.. c:member:: PyArray_ArrFuncs *PyArray_Descr.f
+ .. c:member:: PyArray_ArrFuncs *f
- A pointer to a structure containing functions that the type needs
- to implement internal features. These functions are not the same
- thing as the universal functions (ufuncs) described later. Their
- signatures can vary arbitrarily.
+ A pointer to a structure containing functions that the type needs
+ to implement internal features. These functions are not the same
+ thing as the universal functions (ufuncs) described later. Their
+ signatures can vary arbitrarily.
-.. c:member:: PyObject *PyArray_Descr.metadata
+ .. c:member:: PyObject *metadata
- Metadata about this dtype.
+ Metadata about this dtype.
-.. c:member:: NpyAuxData *PyArray_Descr.c_metadata
+ .. c:member:: NpyAuxData *c_metadata
- Metadata specific to the C implementation
- of the particular dtype. Added for NumPy 1.7.0.
+ Metadata specific to the C implementation
+ of the particular dtype. Added for NumPy 1.7.0.
-.. c:member:: Npy_hash_t *PyArray_Descr.hash
+ .. c:member:: Npy_hash_t *hash
- Currently unused. Reserved for future use in caching
- hash values.
+ Currently unused. Reserved for future use in caching
+ hash values.
.. c:type:: PyArray_ArrFuncs
@@ -795,31 +793,31 @@ PyUFunc_Type and PyUFuncObject
} PyUFuncObject;
- .. c:macro: PyUFuncObject.PyObject_HEAD
+ .. c:macro: PyObject_HEAD
required for all Python objects.
- .. c:member:: int PyUFuncObject.nin
+ .. c:member:: int nin
The number of input arguments.
- .. c:member:: int PyUFuncObject.nout
+ .. c:member:: int nout
The number of output arguments.
- .. c:member:: int PyUFuncObject.nargs
+ .. c:member:: int nargs
The total number of arguments (*nin* + *nout*). This must be
less than :c:data:`NPY_MAXARGS`.
- .. c:member:: int PyUFuncObject.identity
+ .. c:member:: int identity
Either :c:data:`PyUFunc_One`, :c:data:`PyUFunc_Zero`,
:c:data:`PyUFunc_None` or :c:data:`PyUFunc_AllOnes` to indicate
the identity for this operation. It is only used for a
reduce-like call on an empty array.
- .. c:member:: void PyUFuncObject.functions( \
+ .. c:member:: void functions( \
char** args, npy_intp* dims, npy_intp* steps, void* extradata)
An array of function pointers --- one for each data type
@@ -837,7 +835,7 @@ PyUFunc_Type and PyUFuncObject
passed in as *extradata*. The size of this function pointer
array is ntypes.
- .. c:member:: void **PyUFuncObject.data
+ .. c:member:: void **data
Extra data to be passed to the 1-d vector loops or ``NULL`` if
no extra-data is needed. This C-array must be the same size (
@@ -846,22 +844,22 @@ PyUFunc_Type and PyUFuncObject
just 1-d vector loops that make use of this extra data to
receive a pointer to the actual function to call.
- .. c:member:: int PyUFuncObject.ntypes
+ .. c:member:: int ntypes
The number of supported data types for the ufunc. This number
specifies how many different 1-d loops (of the builtin data
types) are available.
- .. c:member:: int PyUFuncObject.reserved1
+ .. c:member:: int reserved1
Unused.
- .. c:member:: char *PyUFuncObject.name
+ .. c:member:: char *name
A string name for the ufunc. This is used dynamically to build
the __doc\__ attribute of ufuncs.
- .. c:member:: char *PyUFuncObject.types
+ .. c:member:: char *types
An array of :math:`nargs \times ntypes` 8-bit type_numbers
which contains the type signature for the function for each of
@@ -871,24 +869,24 @@ PyUFunc_Type and PyUFuncObject
vector loop. These type numbers do not have to be the same type
and mixed-type ufuncs are supported.
- .. c:member:: char *PyUFuncObject.doc
+ .. c:member:: char *doc
Documentation for the ufunc. Should not contain the function
signature as this is generated dynamically when __doc\__ is
retrieved.
- .. c:member:: void *PyUFuncObject.ptr
+ .. c:member:: void *ptr
Any dynamically allocated memory. Currently, this is used for
dynamic ufuncs created from a python function to store room for
the types, data, and name members.
- .. c:member:: PyObject *PyUFuncObject.obj
+ .. c:member:: PyObject *obj
For ufuncs dynamically created from python functions, this member
holds a reference to the underlying Python function.
- .. c:member:: PyObject *PyUFuncObject.userloops
+ .. c:member:: PyObject *userloops
A dictionary of user-defined 1-d vector loops (stored as CObject
ptrs) for user-defined types. A loop may be registered by the
@@ -896,74 +894,79 @@ PyUFunc_Type and PyUFuncObject
User defined type numbers are always larger than
:c:data:`NPY_USERDEF`.
- .. c:member:: int PyUFuncObject.core_enabled
+ .. c:member:: int core_enabled
0 for scalar ufuncs; 1 for generalized ufuncs
- .. c:member:: int PyUFuncObject.core_num_dim_ix
+ .. c:member:: int core_num_dim_ix
Number of distinct core dimension names in the signature
- .. c:member:: int *PyUFuncObject.core_num_dims
+ .. c:member:: int *core_num_dims
Number of core dimensions of each argument
- .. c:member:: int *PyUFuncObject.core_dim_ixs
+ .. c:member:: int *core_dim_ixs
Dimension indices in a flattened form; indices of argument ``k`` are
stored in ``core_dim_ixs[core_offsets[k] : core_offsets[k] +
core_numdims[k]]``
- .. c:member:: int *PyUFuncObject.core_offsets
+ .. c:member:: int *core_offsets
Position of 1st core dimension of each argument in ``core_dim_ixs``,
equivalent to cumsum(``core_num_dims``)
- .. c:member:: char *PyUFuncObject.core_signature
+ .. c:member:: char *core_signature
Core signature string
- .. c:member:: PyUFunc_TypeResolutionFunc *PyUFuncObject.type_resolver
+ .. c:member:: PyUFunc_TypeResolutionFunc *type_resolver
A function which resolves the types and fills an array with the dtypes
for the inputs and outputs
- .. c:member:: PyUFunc_LegacyInnerLoopSelectionFunc *PyUFuncObject.legacy_inner_loop_selector
+ .. c:member:: PyUFunc_LegacyInnerLoopSelectionFunc *legacy_inner_loop_selector
A function which returns an inner loop. The ``legacy`` in the name arises
because for NumPy 1.6 a better variant had been planned. This variant
has not yet come about.
- .. c:member:: void *PyUFuncObject.reserved2
+ .. c:member:: void *reserved2
For a possible future loop selector with a different signature.
- .. c:member:: PyUFunc_MaskedInnerLoopSelectionFunc *PyUFuncObject.masked_inner_loop_selector
+ .. c:member:: PyUFunc_MaskedInnerLoopSelectionFunc *masked_inner_loop_selector
Function which returns a masked inner loop for the ufunc
- .. c:member:: npy_uint32 PyUFuncObject.op_flags
+ .. c:member:: npy_uint32 op_flags
Override the default operand flags for each ufunc operand.
- .. c:member:: npy_uint32 PyUFuncObject.iter_flags
+ .. c:member:: npy_uint32 iter_flags
Override the default nditer flags for the ufunc.
Added in API version 0x0000000D
- .. c:member:: npy_intp *PyUFuncObject.core_dim_sizes
+ .. c:member:: npy_intp *core_dim_sizes
For each distinct core dimension, the possible
:ref:`frozen <frozen>` size if :c:data:`UFUNC_CORE_DIM_SIZE_INFERRED` is 0
- .. c:member:: npy_uint32 *PyUFuncObject.core_dim_flags
+ .. c:member:: npy_uint32 *core_dim_flags
For each distinct core dimension, a set of ``UFUNC_CORE_DIM*`` flags
- - :c:data:`UFUNC_CORE_DIM_CAN_IGNORE` if the dim name ends in ``?``
- - :c:data:`UFUNC_CORE_DIM_SIZE_INFERRED` if the dim size will be
- determined from the operands and not from a :ref:`frozen <frozen>` signature
+ .. c:macro:: UFUNC_CORE_DIM_CAN_IGNORE
+
+ if the dim name ends in ``?``
+
+ .. c:macro:: UFUNC_CORE_DIM_SIZE_INFERRED
+
+ if the dim size will be determined from the operands
+ and not from a :ref:`frozen <frozen>` signature
PyArrayIter_Type and PyArrayIterObject
--------------------------------------
@@ -1012,54 +1015,54 @@ PyArrayIter_Type and PyArrayIterObject
Bool contiguous;
} PyArrayIterObject;
- .. c:member:: int PyArrayIterObject.nd_m1
+ .. c:member:: int nd_m1
:math:`N-1` where :math:`N` is the number of dimensions in the
underlying array.
- .. c:member:: npy_intp PyArrayIterObject.index
+ .. c:member:: npy_intp index
The current 1-d index into the array.
- .. c:member:: npy_intp PyArrayIterObject.size
+ .. c:member:: npy_intp size
The total size of the underlying array.
- .. c:member:: npy_intp *PyArrayIterObject.coordinates
+ .. c:member:: npy_intp *coordinates
An :math:`N` -dimensional index into the array.
- .. c:member:: npy_intp *PyArrayIterObject.dims_m1
+ .. c:member:: npy_intp *dims_m1
The size of the array minus 1 in each dimension.
- .. c:member:: npy_intp *PyArrayIterObject.strides
+ .. c:member:: npy_intp *strides
The strides of the array. How many bytes needed to jump to the next
element in each dimension.
- .. c:member:: npy_intp *PyArrayIterObject.backstrides
+ .. c:member:: npy_intp *backstrides
How many bytes needed to jump from the end of a dimension back
to its beginning. Note that ``backstrides[k] == strides[k] *
dims_m1[k]``, but it is stored here as an optimization.
- .. c:member:: npy_intp *PyArrayIterObject.factors
+ .. c:member:: npy_intp *factors
This array is used in computing an N-d index from a 1-d index. It
contains needed products of the dimensions.
- .. c:member:: PyArrayObject *PyArrayIterObject.ao
+ .. c:member:: PyArrayObject *ao
A pointer to the underlying ndarray this iterator was created to
represent.
- .. c:member:: char *PyArrayIterObject.dataptr
+ .. c:member:: char *dataptr
This member points to an element in the ndarray indicated by the
index.
- .. c:member:: Bool PyArrayIterObject.contiguous
+ .. c:member:: Bool contiguous
This flag is true if the underlying array is
:c:data:`NPY_ARRAY_C_CONTIGUOUS`. It is used to simplify
@@ -1106,32 +1109,32 @@ PyArrayMultiIter_Type and PyArrayMultiIterObject
PyArrayIterObject *iters[NPY_MAXDIMS];
} PyArrayMultiIterObject;
- .. c:macro: PyArrayMultiIterObject.PyObject_HEAD
+ .. c:macro: PyObject_HEAD
Needed at the start of every Python object (holds reference count
and type identification).
- .. c:member:: int PyArrayMultiIterObject.numiter
+ .. c:member:: int numiter
The number of arrays that need to be broadcast to the same shape.
- .. c:member:: npy_intp PyArrayMultiIterObject.size
+ .. c:member:: npy_intp size
The total broadcasted size.
- .. c:member:: npy_intp PyArrayMultiIterObject.index
+ .. c:member:: npy_intp index
The current (1-d) index into the broadcasted result.
- .. c:member:: int PyArrayMultiIterObject.nd
+ .. c:member:: int nd
The number of dimensions in the broadcasted result.
- .. c:member:: npy_intp *PyArrayMultiIterObject.dimensions
+ .. c:member:: npy_intp *dimensions
The shape of the broadcasted result (only ``nd`` slots are used).
- .. c:member:: PyArrayIterObject **PyArrayMultiIterObject.iters
+ .. c:member:: PyArrayIterObject **iters
An array of iterator objects that holds the iterators for the
arrays to be broadcast together. On return, the iterators are
@@ -1249,12 +1252,12 @@ PyArray_Dims
The members of this structure are
- .. c:member:: npy_intp *PyArray_Dims.ptr
+ .. c:member:: npy_intp *ptr
A pointer to a list of (:c:type:`npy_intp`) integers which
usually represent array shape or array strides.
- .. c:member:: int PyArray_Dims.len
+ .. c:member:: int len
The length of the list of integers. It is assumed safe to
access *ptr* [0] to *ptr* [len-1].
@@ -1283,26 +1286,26 @@ PyArray_Chunk
The members are
- .. c:macro: PyArray_Chunk.PyObject_HEAD
+ .. c:macro: PyObject_HEAD
Necessary for all Python objects. Included here so that the
:c:type:`PyArray_Chunk` structure matches that of the buffer object
(at least to the len member).
- .. c:member:: PyObject *PyArray_Chunk.base
+ .. c:member:: PyObject *base
The Python object this chunk of memory comes from. Needed so that
memory can be accounted for properly.
- .. c:member:: void *PyArray_Chunk.ptr
+ .. c:member:: void *ptr
A pointer to the start of the single-segment chunk of memory.
- .. c:member:: npy_intp PyArray_Chunk.len
+ .. c:member:: npy_intp len
The length of the segment in bytes.
- .. c:member:: int PyArray_Chunk.flags
+ .. c:member:: int flags
Any data flags (*e.g.* :c:data:`NPY_ARRAY_WRITEABLE` ) that should
be used to interpret the memory.
@@ -1342,15 +1345,15 @@ PyArrayInterface
PyObject *descr;
} PyArrayInterface;
- .. c:member:: int PyArrayInterface.two
+ .. c:member:: int two
the integer 2 as a sanity check.
- .. c:member:: int PyArrayInterface.nd
+ .. c:member:: int nd
the number of dimensions in the array.
- .. c:member:: char PyArrayInterface.typekind
+ .. c:member:: char typekind
A character indicating what kind of array is present according to the
typestring convention with 't' -> bitfield, 'b' -> Boolean, 'i' ->
@@ -1358,11 +1361,11 @@ PyArrayInterface
complex floating point, 'O' -> object, 'S' -> (byte-)string, 'U' ->
unicode, 'V' -> void.
- .. c:member:: int PyArrayInterface.itemsize
+ .. c:member:: int itemsize
The number of bytes each item in the array requires.
- .. c:member:: int PyArrayInterface.flags
+ .. c:member:: int flags
Any of the bits :c:data:`NPY_ARRAY_C_CONTIGUOUS` (1),
:c:data:`NPY_ARRAY_F_CONTIGUOUS` (2), :c:data:`NPY_ARRAY_ALIGNED` (0x100),
@@ -1376,26 +1379,26 @@ PyArrayInterface
structure is present (it will be ignored by objects consuming
version 2 of the array interface).
- .. c:member:: npy_intp *PyArrayInterface.shape
+ .. c:member:: npy_intp *shape
An array containing the size of the array in each dimension.
- .. c:member:: npy_intp *PyArrayInterface.strides
+ .. c:member:: npy_intp *strides
An array containing the number of bytes to jump to get to the next
element in each dimension.
- .. c:member:: void *PyArrayInterface.data
+ .. c:member:: void *data
A pointer *to* the first element of the array.
- .. c:member:: PyObject *PyArrayInterface.descr
+ .. c:member:: PyObject *descr
A Python object describing the data-type in more detail (same
as the *descr* key in :obj:`__array_interface__`). This can be
``NULL`` if *typekind* and *itemsize* provide enough
information. This field is also ignored unless
- :c:data:`ARR_HAS_DESCR` flag is on in *flags*.
+ :c:data:`NPY_ARR_HAS_DESCR` flag is on in *flags*.
Internally used structures
@@ -1433,7 +1436,7 @@ for completeness and assistance in understanding the code.
Advanced indexing is handled with this Python type. It is simply a
loose wrapper around the C-structure containing the variables
needed for advanced array indexing. The associated C-structure,
- :c:type:`PyArrayMapIterObject`, is useful if you are trying to
+ ``PyArrayMapIterObject``, is useful if you are trying to
understand the advanced-index mapping code. It is defined in the
``arrayobject.h`` header. This type is not exposed to Python and
could be replaced with a C-structure. As a Python type it takes
diff --git a/doc/source/reference/c-api/ufunc.rst b/doc/source/reference/c-api/ufunc.rst
index 50963c81f..1b9b68642 100644
--- a/doc/source/reference/c-api/ufunc.rst
+++ b/doc/source/reference/c-api/ufunc.rst
@@ -12,12 +12,39 @@ Constants
.. c:var:: UFUNC_ERR_{HANDLER}
- ``{HANDLER}`` can be **IGNORE**, **WARN**, **RAISE**, or **CALL**
+ .. c:macro:: UFUNC_ERR_IGNORE
+
+ .. c:macro:: UFUNC_ERR_WARN
+
+ .. c:macro:: UFUNC_ERR_RAISE
+
+ .. c:macro:: UFUNC_ERR_CALL
.. c:var:: UFUNC_{THING}_{ERR}
- ``{THING}`` can be **MASK**, **SHIFT**, or **FPE**, and ``{ERR}`` can
- be **DIVIDEBYZERO**, **OVERFLOW**, **UNDERFLOW**, and **INVALID**.
+ .. c:macro:: UFUNC_MASK_DIVIDEBYZERO
+
+ .. c:macro:: UFUNC_MASK_OVERFLOW
+
+ .. c:macro:: UFUNC_MASK_UNDERFLOW
+
+ .. c:macro:: UFUNC_MASK_INVALID
+
+ .. c:macro:: UFUNC_SHIFT_DIVIDEBYZERO
+
+ .. c:macro:: UFUNC_SHIFT_OVERFLOW
+
+ .. c:macro:: UFUNC_SHIFT_UNDERFLOW
+
+ .. c:macro:: UFUNC_SHIFT_INVALID
+
+ .. c:macro:: UFUNC_FPE_DIVIDEBYZERO
+
+ .. c:macro:: UFUNC_FPE_OVERFLOW
+
+ .. c:macro:: UFUNC_FPE_UNDERFLOW
+
+ .. c:macro:: UFUNC_FPE_INVALID
.. c:var:: PyUFunc_{VALUE}
@@ -50,6 +77,66 @@ Macros
was released (because loop->obj was not true).
+Types
+-----
+
+.. c:type:: PyUFuncGenericFunction
+
+ pointers to functions that actually implement the underlying
+ (element-by-element) function :math:`N` times with the following
+ signature:
+
+ .. c:function:: void loopfunc(
+ char** args, npy_intp const *dimensions, npy_intp const *steps, void* data)
+
+ *args*
+
+ An array of pointers to the actual data for the input and output
+ arrays. The input arguments are given first followed by the output
+ arguments.
+
+ *dimensions*
+
+ A pointer to the size of the dimension over which this function is
+ looping.
+
+ *steps*
+
+ A pointer to the number of bytes to jump to get to the
+ next element in this dimension for each of the input and
+ output arguments.
+
+ *data*
+
+ Arbitrary data (extra arguments, function names, *etc.* )
+ that can be stored with the ufunc and will be passed in
+ when it is called.
+
+ This is an example of a func specialized for addition of doubles
+ returning doubles.
+
+ .. code-block:: c
+
+ static void
+ double_add(char **args,
+ npy_intp const *dimensions,
+ npy_intp const *steps,
+ void *extra)
+ {
+ npy_intp i;
+ npy_intp is1 = steps[0], is2 = steps[1];
+ npy_intp os = steps[2], n = dimensions[0];
+ char *i1 = args[0], *i2 = args[1], *op = args[2];
+ for (i = 0; i < n; i++) {
+ *((double *)op) = *((double *)i1) +
+ *((double *)i2);
+ i1 += is1;
+ i2 += is2;
+ op += os;
+ }
+ }
+
+
Functions
---------
@@ -71,60 +158,7 @@ Functions
:param func:
Must to an array of length *ntypes* containing
- :c:type:`PyUFuncGenericFunction` items. These items are pointers to
- functions that actually implement the underlying
- (element-by-element) function :math:`N` times with the following
- signature:
-
- .. c:function:: void loopfunc(
- char** args, npy_intp const *dimensions, npy_intp const *steps, void* data)
-
- *args*
-
- An array of pointers to the actual data for the input and output
- arrays. The input arguments are given first followed by the output
- arguments.
-
- *dimensions*
-
- A pointer to the size of the dimension over which this function is
- looping.
-
- *steps*
-
- A pointer to the number of bytes to jump to get to the
- next element in this dimension for each of the input and
- output arguments.
-
- *data*
-
- Arbitrary data (extra arguments, function names, *etc.* )
- that can be stored with the ufunc and will be passed in
- when it is called.
-
- This is an example of a func specialized for addition of doubles
- returning doubles.
-
- .. code-block:: c
-
- static void
- double_add(char **args,
- npy_intp const *dimensions,
- npy_intp const *steps,
- void *extra)
- {
- npy_intp i;
- npy_intp is1 = steps[0], is2 = steps[1];
- npy_intp os = steps[2], n = dimensions[0];
- char *i1 = args[0], *i2 = args[1], *op = args[2];
- for (i = 0; i < n; i++) {
- *((double *)op) = *((double *)i1) +
- *((double *)i2);
- i1 += is1;
- i2 += is2;
- op += os;
- }
- }
+ :c:type:`PyUFuncGenericFunction` items.
:param data:
Should be ``NULL`` or a pointer to an array of size *ntypes*
diff --git a/doc/source/reference/routines.ma.rst b/doc/source/reference/routines.ma.rst
index 97859ac67..18248fe09 100644
--- a/doc/source/reference/routines.ma.rst
+++ b/doc/source/reference/routines.ma.rst
@@ -272,7 +272,7 @@ Filling a masked array
ma.common_fill_value
ma.default_fill_value
ma.maximum_fill_value
- ma.maximum_fill_value
+ ma.minimum_fill_value
ma.set_fill_value
ma.MaskedArray.get_fill_value
diff --git a/doc/source/user/basics.rec.rst b/doc/source/user/basics.rec.rst
index f579b0d85..bb4ed89e9 100644
--- a/doc/source/user/basics.rec.rst
+++ b/doc/source/user/basics.rec.rst
@@ -575,11 +575,14 @@ Record Arrays
=============
As an optional convenience numpy provides an ndarray subclass,
-:class:`numpy.recarray`, and associated helper functions in the
-:mod:`numpy.lib.recfunctions` submodule (aliased as ``numpy.rec``), that allows
-access to fields of structured arrays by attribute instead of only by index.
-Record arrays also use a special datatype, :class:`numpy.record`, that allows
+:class:`numpy.recarray` that allows access to fields of structured arrays by
+attribute instead of only by index.
+Record arrays use a special datatype, :class:`numpy.record`, that allows
field access by attribute on the structured scalars obtained from the array.
+The :mod:`numpy.rec` module provides functions for creating recarrays from
+various objects.
+Additional helper functions for creating and manipulating structured arrays
+can be found in :mod:`numpy.lib.recfunctions`.
The simplest way to create a record array is with ``numpy.rec.array``::
diff --git a/doc/source/user/basics.types.rst b/doc/source/user/basics.types.rst
index 3c39b35d0..ec2af409a 100644
--- a/doc/source/user/basics.types.rst
+++ b/doc/source/user/basics.types.rst
@@ -19,78 +19,78 @@ The primitive types supported are tied closely to those in C:
- C type
- Description
- * - `np.bool_`
+ * - `numpy.bool_`
- ``bool``
- Boolean (True or False) stored as a byte
- * - `np.byte`
+ * - `numpy.byte`
- ``signed char``
- Platform-defined
- * - `np.ubyte`
+ * - `numpy.ubyte`
- ``unsigned char``
- Platform-defined
- * - `np.short`
+ * - `numpy.short`
- ``short``
- Platform-defined
- * - `np.ushort`
+ * - `numpy.ushort`
- ``unsigned short``
- Platform-defined
- * - `np.intc`
+ * - `numpy.intc`
- ``int``
- Platform-defined
- * - `np.uintc`
+ * - `numpy.uintc`
- ``unsigned int``
- Platform-defined
- * - `np.int_`
+ * - `numpy.int_`
- ``long``
- Platform-defined
- * - `np.uint`
+ * - `numpy.uint`
- ``unsigned long``
- Platform-defined
- * - `np.longlong`
+ * - `numpy.longlong`
- ``long long``
- Platform-defined
- * - `np.ulonglong`
+ * - `numpy.ulonglong`
- ``unsigned long long``
- Platform-defined
- * - `np.half` / `np.float16`
+ * - `numpy.half` / `numpy.float16`
-
- Half precision float:
sign bit, 5 bits exponent, 10 bits mantissa
- * - `np.single`
+ * - `numpy.single`
- ``float``
- Platform-defined single precision float:
typically sign bit, 8 bits exponent, 23 bits mantissa
- * - `np.double`
+ * - `numpy.double`
- ``double``
- Platform-defined double precision float:
typically sign bit, 11 bits exponent, 52 bits mantissa.
- * - `np.longdouble`
+ * - `numpy.longdouble`
- ``long double``
- Platform-defined extended-precision float
- * - `np.csingle`
+ * - `numpy.csingle`
- ``float complex``
- Complex number, represented by two single-precision floats (real and imaginary components)
- * - `np.cdouble`
+ * - `numpy.cdouble`
- ``double complex``
- Complex number, represented by two double-precision floats (real and imaginary components).
- * - `np.clongdouble`
+ * - `numpy.clongdouble`
- ``long double complex``
- Complex number, represented by two extended-precision floats (real and imaginary components).
@@ -105,59 +105,59 @@ aliases are provided:
- C type
- Description
- * - `np.int8`
+ * - `numpy.int8`
- ``int8_t``
- Byte (-128 to 127)
- * - `np.int16`
+ * - `numpy.int16`
- ``int16_t``
- Integer (-32768 to 32767)
- * - `np.int32`
+ * - `numpy.int32`
- ``int32_t``
- Integer (-2147483648 to 2147483647)
- * - `np.int64`
+ * - `numpy.int64`
- ``int64_t``
- Integer (-9223372036854775808 to 9223372036854775807)
- * - `np.uint8`
+ * - `numpy.uint8`
- ``uint8_t``
- Unsigned integer (0 to 255)
- * - `np.uint16`
+ * - `numpy.uint16`
- ``uint16_t``
- Unsigned integer (0 to 65535)
- * - `np.uint32`
+ * - `numpy.uint32`
- ``uint32_t``
- Unsigned integer (0 to 4294967295)
- * - `np.uint64`
+ * - `numpy.uint64`
- ``uint64_t``
- Unsigned integer (0 to 18446744073709551615)
- * - `np.intp`
+ * - `numpy.intp`
- ``intptr_t``
- Integer used for indexing, typically the same as ``ssize_t``
- * - `np.uintp`
+ * - `numpy.uintp`
- ``uintptr_t``
- Integer large enough to hold a pointer
- * - `np.float32`
+ * - `numpy.float32`
- ``float``
-
- * - `np.float64` / `np.float_`
+ * - `numpy.float64` / `numpy.float_`
- ``double``
- Note that this matches the precision of the builtin python `float`.
- * - `np.complex64`
+ * - `numpy.complex64`
- ``float complex``
- Complex number, represented by two 32-bit floats (real and imaginary components)
- * - `np.complex128` / `np.complex_`
+ * - `numpy.complex128` / `numpy.complex_`
- ``double complex``
- Note that this matches the precision of the builtin python `complex`.
diff --git a/doc/source/user/how-to-io.rst b/doc/source/user/how-to-io.rst
new file mode 100644
index 000000000..ca9fc41f0
--- /dev/null
+++ b/doc/source/user/how-to-io.rst
@@ -0,0 +1,328 @@
+.. _how-to-io:
+
+##############################################################################
+Reading and writing files
+##############################################################################
+
+This page tackles common applications; for the full collection of I/O
+routines, see :ref:`routines.io`.
+
+
+******************************************************************************
+Reading text and CSV_ files
+******************************************************************************
+
+.. _CSV: https://en.wikipedia.org/wiki/Comma-separated_values
+
+With no missing values
+==============================================================================
+
+Use :func:`numpy.loadtxt`.
+
+With missing values
+==============================================================================
+
+Use :func:`numpy.genfromtxt`.
+
+:func:`numpy.genfromtxt` will either
+
+ - return a :ref:`masked array<maskedarray.generic>`
+ **masking out missing values** (if ``usemask=True``), or
+
+ - **fill in the missing value** with the value specified in
+ ``filling_values`` (default is ``np.nan`` for float, -1 for int).
+
+With non-whitespace delimiters
+------------------------------------------------------------------------------
+::
+
+ >>> print(open("csv.txt").read()) # doctest: +SKIP
+ 1, 2, 3
+ 4,, 6
+ 7, 8, 9
+
+
+Masked-array output
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+::
+
+ >>> np.genfromtxt("csv.txt", delimiter=",", usemask=True) # doctest: +SKIP
+ masked_array(
+ data=[[1.0, 2.0, 3.0],
+ [4.0, --, 6.0],
+ [7.0, 8.0, 9.0]],
+ mask=[[False, False, False],
+ [False, True, False],
+ [False, False, False]],
+ fill_value=1e+20)
+
+Array output
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+::
+
+ >>> np.genfromtxt("csv.txt", delimiter=",") # doctest: +SKIP
+ array([[ 1., 2., 3.],
+ [ 4., nan, 6.],
+ [ 7., 8., 9.]])
+
+Array output, specified fill-in value
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+::
+
+ >>> np.genfromtxt("csv.txt", delimiter=",", dtype=np.int8, filling_values=99) # doctest: +SKIP
+ array([[ 1, 2, 3],
+ [ 4, 99, 6],
+ [ 7, 8, 9]], dtype=int8)
+
+Whitespace-delimited
+-------------------------------------------------------------------------------
+
+:func:`numpy.genfromtxt` can also parse whitespace-delimited data files
+that have missing values if
+
+* **Each field has a fixed width**: Use the width as the `delimiter` argument.
+ ::
+
+ # File with width=4. The data does not have to be justified (for example,
+ # the 2 in row 1), the last column can be less than width (for example, the 6
+ # in row 2), and no delimiting character is required (for instance 8888 and 9
+ # in row 3)
+
+ >>> f = open("fixedwidth.txt").read() # doctest: +SKIP
+ >>> print(f) # doctest: +SKIP
+ 1 2 3
+ 44 6
+ 7 88889
+
+ # Showing spaces as ^
+ >>> print(f.replace(" ","^")) # doctest: +SKIP
+ 1^^^2^^^^^^3
+ 44^^^^^^6
+ 7^^^88889
+
+ >>> np.genfromtxt("fixedwidth.txt", delimiter=4) # doctest: +SKIP
+ array([[1.000e+00, 2.000e+00, 3.000e+00],
+ [4.400e+01, nan, 6.000e+00],
+ [7.000e+00, 8.888e+03, 9.000e+00]])
+
+* **A special value (e.g. "x") indicates a missing field**: Use it as the
+ `missing_values` argument.
+ ::
+
+ >>> print(open("nan.txt").read()) # doctest: +SKIP
+ 1 2 3
+ 44 x 6
+ 7 8888 9
+
+ >>> np.genfromtxt("nan.txt", missing_values="x") # doctest: +SKIP
+ array([[1.000e+00, 2.000e+00, 3.000e+00],
+ [4.400e+01, nan, 6.000e+00],
+ [7.000e+00, 8.888e+03, 9.000e+00]])
+
+* **You want to skip the rows with missing values**: Set
+ `invalid_raise=False`.
+ ::
+
+ >>> print(open("skip.txt").read()) # doctest: +SKIP
+ 1 2 3
+ 44 6
+ 7 888 9
+
+ >>> np.genfromtxt("skip.txt", invalid_raise=False) # doctest: +SKIP
+ __main__:1: ConversionWarning: Some errors were detected !
+ Line #2 (got 2 columns instead of 3)
+ array([[ 1., 2., 3.],
+ [ 7., 888., 9.]])
+
+
+* **The delimiter whitespace character is different from the whitespace that
+ indicates missing data**. For instance, if columns are delimited by ``\t``,
+ then missing data will be recognized if it consists of one
+ or more spaces.
+ ::
+
+ >>> f = open("tabs.txt").read() # doctest: +SKIP
+ >>> print(f) # doctest: +SKIP
+ 1 2 3
+ 44 6
+ 7 888 9
+
+ # Tabs vs. spaces
+ >>> print(f.replace("\t","^")) # doctest: +SKIP
+ 1^2^3
+ 44^ ^6
+ 7^888^9
+
+ >>> np.genfromtxt("tabs.txt", delimiter="\t", missing_values=" +") # doctest: +SKIP
+ array([[ 1., 2., 3.],
+ [ 44., nan, 6.],
+ [ 7., 888., 9.]])
+
+******************************************************************************
+Read a file in .npy or .npz format
+******************************************************************************
+
+Choices:
+
+ - Use :func:`numpy.load`. It can read files generated by any of
+ :func:`numpy.save`, :func:`numpy.savez`, or :func:`numpy.savez_compressed`.
+
+ - Use memory mapping. See `numpy.lib.format.open_memmap`.
+
+******************************************************************************
+Write to a file to be read back by NumPy
+******************************************************************************
+
+Binary
+===============================================================================
+
+Use
+:func:`numpy.save`, or to store multiple arrays :func:`numpy.savez`
+or :func:`numpy.savez_compressed`.
+
+For :ref:`security and portability <how-to-io-pickle-file>`, set
+``allow_pickle=False`` unless the dtype contains Python objects, which
+requires pickling.
+
+Masked arrays :any:`can't currently be saved <MaskedArray.tofile>`,
+nor can other arbitrary array subclasses.
+
+Human-readable
+==============================================================================
+
+:func:`numpy.save` and :func:`numpy.savez` create binary files. To **write a
+human-readable file**, use :func:`numpy.savetxt`. The array can only be 1- or
+2-dimensional, and there's no ` savetxtz` for multiple files.
+
+Large arrays
+==============================================================================
+
+See :ref:`how-to-io-large-arrays`.
+
+******************************************************************************
+Read an arbitrarily formatted binary file ("binary blob")
+******************************************************************************
+
+Use a :doc:`structured array <basics.rec>`.
+
+**Example:**
+
+The ``.wav`` file header is a 44-byte block preceding ``data_size`` bytes of the
+actual sound data::
+
+ chunk_id "RIFF"
+ chunk_size 4-byte unsigned little-endian integer
+ format "WAVE"
+ fmt_id "fmt "
+ fmt_size 4-byte unsigned little-endian integer
+ audio_fmt 2-byte unsigned little-endian integer
+ num_channels 2-byte unsigned little-endian integer
+ sample_rate 4-byte unsigned little-endian integer
+ byte_rate 4-byte unsigned little-endian integer
+ block_align 2-byte unsigned little-endian integer
+ bits_per_sample 2-byte unsigned little-endian integer
+ data_id "data"
+ data_size 4-byte unsigned little-endian integer
+
+The ``.wav`` file header as a NumPy structured dtype::
+
+ wav_header_dtype = np.dtype([
+ ("chunk_id", (bytes, 4)), # flexible-sized scalar type, item size 4
+ ("chunk_size", "<u4"), # little-endian unsigned 32-bit integer
+ ("format", "S4"), # 4-byte string, alternate spelling of (bytes, 4)
+ ("fmt_id", "S4"),
+ ("fmt_size", "<u4"),
+ ("audio_fmt", "<u2"), #
+ ("num_channels", "<u2"), # .. more of the same ...
+ ("sample_rate", "<u4"), #
+ ("byte_rate", "<u4"),
+ ("block_align", "<u2"),
+ ("bits_per_sample", "<u2"),
+ ("data_id", "S4"),
+ ("data_size", "<u4"),
+ #
+ # the sound data itself cannot be represented here:
+ # it does not have a fixed size
+ ])
+
+ header = np.fromfile(f, dtype=wave_header_dtype, count=1)[0]
+
+This ``.wav`` example is for illustration; to read a ``.wav`` file in real
+life, use Python's built-in module :mod:`wave`.
+
+(Adapted from Pauli Virtanen, :ref:`advanced_numpy`, licensed
+under `CC BY 4.0 <https://creativecommons.org/licenses/by/4.0/>`_.)
+
+.. _how-to-io-large-arrays:
+
+******************************************************************************
+Write or read large arrays
+******************************************************************************
+
+**Arrays too large to fit in memory** can be treated like ordinary in-memory
+arrays using memory mapping.
+
+- Raw array data written with :func:`numpy.ndarray.tofile` or
+ :func:`numpy.ndarray.tobytes` can be read with :func:`numpy.memmap`::
+
+ array = numpy.memmap("mydata/myarray.arr", mode="r", dtype=np.int16, shape=(1024, 1024))
+
+- Files output by :func:`numpy.save` (that is, using the numpy format) can be read
+ using :func:`numpy.load` with the ``mmap_mode`` keyword argument::
+
+ large_array[some_slice] = np.load("path/to/small_array", mmap_mode="r")
+
+Memory mapping lacks features like data chunking and compression; more
+full-featured formats and libraries usable with NumPy include:
+
+* **HDF5**: `h5py <https://www.h5py.org/>`_ or `PyTables <https://www.pytables.org/>`_.
+* **Zarr**: `here <https://zarr.readthedocs.io/en/stable/tutorial.html#reading-and-writing-data>`_.
+* **NetCDF**: :class:`scipy.io.netcdf_file`.
+
+For tradeoffs among memmap, Zarr, and HDF5, see
+`pythonspeed.com <https://pythonspeed.com/articles/mmap-vs-zarr-hdf5/>`_.
+
+******************************************************************************
+Write files for reading by other (non-NumPy) tools
+******************************************************************************
+
+Formats for **exchanging data** with other tools include HDF5, Zarr, and
+NetCDF (see :ref:`how-to-io-large-arrays`).
+
+******************************************************************************
+Write or read a JSON file
+******************************************************************************
+
+NumPy arrays are **not** directly
+`JSON serializable <https://github.com/numpy/numpy/issues/12481>`_.
+
+
+.. _how-to-io-pickle-file:
+
+******************************************************************************
+Save/restore using a pickle file
+******************************************************************************
+
+Avoid when possible; :doc:`pickles <python:library/pickle>` are not secure
+against erroneous or maliciously constructed data.
+
+Use :func:`numpy.save` and :func:`numpy.load`. Set ``allow_pickle=False``,
+unless the array dtype includes Python objects, in which case pickling is
+required.
+
+******************************************************************************
+Convert from a pandas DataFrame to a NumPy array
+******************************************************************************
+
+See :meth:`pandas.DataFrame.to_numpy`.
+
+******************************************************************************
+ Save/restore using `~numpy.ndarray.tofile` and `~numpy.fromfile`
+******************************************************************************
+
+In general, prefer :func:`numpy.save` and :func:`numpy.load`.
+
+:func:`numpy.ndarray.tofile` and :func:`numpy.fromfile` lose information on
+endianness and precision and so are unsuitable for anything but scratch
+storage.
+
diff --git a/doc/source/user/howtos_index.rst b/doc/source/user/howtos_index.rst
index 45e013e6f..89a6f54e7 100644
--- a/doc/source/user/howtos_index.rst
+++ b/doc/source/user/howtos_index.rst
@@ -12,4 +12,4 @@ the package, see the :ref:`API reference <reference>`.
:maxdepth: 1
how-to-how-to
- ionumpy
+ how-to-io
diff --git a/doc/source/user/index.rst b/doc/source/user/index.rst
index 3a79f0f2e..11a019b48 100644
--- a/doc/source/user/index.rst
+++ b/doc/source/user/index.rst
@@ -26,7 +26,10 @@ classes contained in the package, see the :ref:`reference`.
howtos_index
-.. These are stuck here to avoid the "WARNING: document isn't included in any
+.. Links to these files are placed directly in the top-level html
+ (doc/source/_templates/indexcontent.html, which appears for the URLs
+ numpy.org/devdocs and numpy.org/doc/XX) and are not in any toctree, so
+ we include them here to avoid a "WARNING: document isn't included in any
toctree" message
.. toctree::
@@ -39,5 +42,5 @@ classes contained in the package, see the :ref:`reference`.
../docs/index
../bugs
../release
- ../about
+ ../doc_conventions
../license
diff --git a/doc/source/user/ionumpy.rst b/doc/source/user/ionumpy.rst
deleted file mode 100644
index a31720322..000000000
--- a/doc/source/user/ionumpy.rst
+++ /dev/null
@@ -1,20 +0,0 @@
-================================================
-How to read and write data using NumPy
-================================================
-
-.. currentmodule:: numpy
-
-.. testsetup::
-
- import numpy as np
- np.random.seed(1)
-
-**Objectives**
-
-- Writing NumPy arrays to files
-- Reading NumPy arrays from files
-- Dealing with encoding and dtype issues
-
-**Content**
-
-To be completed.
diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi
index d4eda6b31..3d40682e7 100644
--- a/numpy/__init__.pyi
+++ b/numpy/__init__.pyi
@@ -4,7 +4,30 @@ import datetime as dt
from abc import abstractmethod
from numpy.core._internal import _ctypes
-from numpy.typing import ArrayLike, DtypeLike, _Shape, _ShapeLike
+from numpy.typing import (
+ ArrayLike,
+ DtypeLike,
+ _Shape,
+ _ShapeLike,
+ _CharLike,
+ _BoolLike,
+ _IntLike,
+ _FloatLike,
+ _ComplexLike,
+ _NumberLike,
+)
+from numpy.typing._callable import (
+ _BoolOp,
+ _BoolSub,
+ _BoolTrueDiv,
+ _TD64Div,
+ _IntTrueDiv,
+ _UnsignedIntOp,
+ _SignedIntOp,
+ _FloatOp,
+ _ComplexOp,
+ _NumberOp,
+)
from typing import (
Any,
@@ -42,10 +65,9 @@ else:
def __index__(self) -> int: ...
# Ensures that the stubs are picked up
-from . import (
+from numpy import (
char,
compat,
- core,
ctypeslib,
emath,
fft,
@@ -107,6 +129,45 @@ from numpy.core.fromnumeric import (
var,
)
+from numpy.core._asarray import (
+ asarray as asarray,
+ asanyarray as asanyarray,
+ ascontiguousarray as ascontiguousarray,
+ asfortranarray as asfortranarray,
+ require as require,
+)
+
+from numpy.core.numeric import(
+ zeros_like as zeros_like,
+ ones as ones,
+ ones_like as ones_like,
+ empty_like as empty_like,
+ full as full,
+ full_like as full_like,
+ count_nonzero as count_nonzero,
+ isfortran as isfortran,
+ argwhere as argwhere,
+ flatnonzero as flatnonzero,
+ correlate as correlate,
+ convolve as convolve,
+ outer as outer,
+ tensordot as tensordot,
+ roll as roll,
+ rollaxis as rollaxis,
+ moveaxis as moveaxis,
+ cross as cross,
+ indices as indices,
+ fromfunction as fromfunction,
+ isscalar as isscalar,
+ binary_repr as binary_repr,
+ base_repr as base_repr,
+ identity as identity,
+ allclose as allclose,
+ isclose as isclose,
+ array_equal as array_equal,
+ array_equiv as array_equiv,
+)
+
# Add an object to `__all__` if their stubs are defined in an external file;
# their stubs will not be recognized otherwise.
# NOTE: This is redundant for objects defined within this file.
@@ -154,8 +215,316 @@ __all__ = [
"var",
]
-# TODO: remove when the full numpy namespace is defined
-def __getattr__(name: str) -> Any: ...
+DataSource: Any
+False_: Any
+MachAr: Any
+ScalarType: Any
+True_: Any
+UFUNC_PYVALS_NAME: Any
+angle: Any
+append: Any
+apply_along_axis: Any
+apply_over_axes: Any
+arange: Any
+array2string: Any
+array_repr: Any
+array_split: Any
+array_str: Any
+asarray_chkfinite: Any
+asfarray: Any
+asmatrix: Any
+asscalar: Any
+atleast_1d: Any
+atleast_2d: Any
+atleast_3d: Any
+average: Any
+bartlett: Any
+bincount: Any
+bitwise_not: Any
+blackman: Any
+block: Any
+bmat: Any
+bool8: Any
+broadcast: Any
+broadcast_arrays: Any
+broadcast_to: Any
+busday_count: Any
+busday_offset: Any
+busdaycalendar: Any
+byte: Any
+byte_bounds: Any
+bytes0: Any
+c_: Any
+can_cast: Any
+cast: Any
+cdouble: Any
+cfloat: Any
+char: Any
+chararray: Any
+clongdouble: Any
+clongfloat: Any
+column_stack: Any
+common_type: Any
+compare_chararrays: Any
+compat: Any
+complex256: Any
+complex_: Any
+concatenate: Any
+conj: Any
+copy: Any
+copyto: Any
+corrcoef: Any
+cov: Any
+csingle: Any
+ctypeslib: Any
+cumproduct: Any
+datetime_as_string: Any
+datetime_data: Any
+delete: Any
+deprecate: Any
+deprecate_with_doc: Any
+diag: Any
+diag_indices: Any
+diag_indices_from: Any
+diagflat: Any
+diff: Any
+digitize: Any
+disp: Any
+divide: Any
+dot: Any
+double: Any
+dsplit: Any
+dstack: Any
+ediff1d: Any
+einsum: Any
+einsum_path: Any
+emath: Any
+errstate: Any
+expand_dims: Any
+extract: Any
+eye: Any
+fft: Any
+fill_diagonal: Any
+finfo: Any
+fix: Any
+flip: Any
+fliplr: Any
+flipud: Any
+float128: Any
+float_: Any
+format_float_positional: Any
+format_float_scientific: Any
+format_parser: Any
+frombuffer: Any
+fromfile: Any
+fromiter: Any
+frompyfunc: Any
+fromregex: Any
+fromstring: Any
+genfromtxt: Any
+geomspace: Any
+get_include: Any
+get_printoptions: Any
+getbufsize: Any
+geterr: Any
+geterrcall: Any
+geterrobj: Any
+gradient: Any
+half: Any
+hamming: Any
+hanning: Any
+histogram: Any
+histogram2d: Any
+histogram_bin_edges: Any
+histogramdd: Any
+hsplit: Any
+hstack: Any
+i0: Any
+iinfo: Any
+imag: Any
+in1d: Any
+index_exp: Any
+info: Any
+inner: Any
+insert: Any
+int0: Any
+int_: Any
+intc: Any
+interp: Any
+intersect1d: Any
+intp: Any
+is_busday: Any
+iscomplex: Any
+iscomplexobj: Any
+isin: Any
+isneginf: Any
+isposinf: Any
+isreal: Any
+isrealobj: Any
+iterable: Any
+ix_: Any
+kaiser: Any
+kron: Any
+lexsort: Any
+lib: Any
+linalg: Any
+linspace: Any
+load: Any
+loads: Any
+loadtxt: Any
+logspace: Any
+longcomplex: Any
+longdouble: Any
+longfloat: Any
+longlong: Any
+lookfor: Any
+ma: Any
+mafromtxt: Any
+mask_indices: Any
+mat: Any
+math: Any
+matrix: Any
+matrixlib: Any
+max: Any
+may_share_memory: Any
+median: Any
+memmap: Any
+meshgrid: Any
+mgrid: Any
+min: Any
+min_scalar_type: Any
+mintypecode: Any
+mod: Any
+msort: Any
+nan_to_num: Any
+nanargmax: Any
+nanargmin: Any
+nancumprod: Any
+nancumsum: Any
+nanmax: Any
+nanmean: Any
+nanmedian: Any
+nanmin: Any
+nanpercentile: Any
+nanprod: Any
+nanquantile: Any
+nanstd: Any
+nansum: Any
+nanvar: Any
+nbytes: Any
+ndenumerate: Any
+ndfromtxt: Any
+ndindex: Any
+nditer: Any
+nested_iters: Any
+newaxis: Any
+numarray: Any
+object0: Any
+ogrid: Any
+packbits: Any
+pad: Any
+percentile: Any
+piecewise: Any
+place: Any
+poly: Any
+poly1d: Any
+polyadd: Any
+polyder: Any
+polydiv: Any
+polyfit: Any
+polyint: Any
+polymul: Any
+polynomial: Any
+polysub: Any
+polyval: Any
+printoptions: Any
+product: Any
+promote_types: Any
+put_along_axis: Any
+putmask: Any
+quantile: Any
+r_: Any
+random: Any
+ravel_multi_index: Any
+real: Any
+real_if_close: Any
+rec: Any
+recarray: Any
+recfromcsv: Any
+recfromtxt: Any
+record: Any
+result_type: Any
+roots: Any
+rot90: Any
+round: Any
+round_: Any
+row_stack: Any
+s_: Any
+save: Any
+savetxt: Any
+savez: Any
+savez_compressed: Any
+sctypeDict: Any
+sctypes: Any
+select: Any
+set_printoptions: Any
+set_string_function: Any
+setbufsize: Any
+setdiff1d: Any
+seterr: Any
+seterrcall: Any
+seterrobj: Any
+setxor1d: Any
+shares_memory: Any
+short: Any
+show_config: Any
+sinc: Any
+single: Any
+singlecomplex: Any
+sort_complex: Any
+source: Any
+split: Any
+stack: Any
+str0: Any
+string_: Any
+sys: Any
+take_along_axis: Any
+testing: Any
+tile: Any
+trapz: Any
+tri: Any
+tril: Any
+tril_indices: Any
+tril_indices_from: Any
+trim_zeros: Any
+triu: Any
+triu_indices: Any
+triu_indices_from: Any
+typeDict: Any
+typecodes: Any
+typename: Any
+ubyte: Any
+uint: Any
+uint0: Any
+uintc: Any
+uintp: Any
+ulonglong: Any
+unicode_: Any
+union1d: Any
+unique: Any
+unpackbits: Any
+unravel_index: Any
+unwrap: Any
+ushort: Any
+vander: Any
+vdot: Any
+vectorize: Any
+version: Any
+void0: Any
+vsplit: Any
+vstack: Any
+where: Any
+who: Any
_NdArraySubClass = TypeVar("_NdArraySubClass", bound=ndarray)
_ByteOrder = Literal["S", "<", ">", "=", "|", "L", "B", "N", "I"]
@@ -287,6 +656,21 @@ _OrderKACF = Optional[Literal["K", "A", "C", "F"]]
_OrderACF = Optional[Literal["A", "C", "F"]]
_OrderCF = Optional[Literal["C", "F"]]
+_ModeKind = Literal["raise", "wrap", "clip"]
+_PartitionKind = Literal["introselect"]
+_SortKind = Literal["quicksort", "mergesort", "heapsort", "stable"]
+_SortSide = Literal["left", "right"]
+
+_ArrayLikeBool = Union[_BoolLike, Sequence[_BoolLike], ndarray]
+_ArrayLikeIntOrBool = Union[
+ _IntLike,
+ _BoolLike,
+ ndarray,
+ Sequence[_IntLike],
+ Sequence[_BoolLike],
+ Sequence[Sequence[Any]], # TODO: wait for support for recursive types
+]
+
_ArraySelf = TypeVar("_ArraySelf", bound=_ArrayOrScalarCommon)
class _ArrayOrScalarCommon(
@@ -330,23 +714,10 @@ class _ArrayOrScalarCommon(
def __ne__(self, other): ...
def __gt__(self, other): ...
def __ge__(self, other): ...
- def __add__(self, other): ...
- def __radd__(self, other): ...
- def __sub__(self, other): ...
- def __rsub__(self, other): ...
- def __mul__(self, other): ...
- def __rmul__(self, other): ...
- def __truediv__(self, other): ...
- def __rtruediv__(self, other): ...
- def __floordiv__(self, other): ...
- def __rfloordiv__(self, other): ...
def __mod__(self, other): ...
def __rmod__(self, other): ...
def __divmod__(self, other): ...
def __rdivmod__(self, other): ...
- # NumPy's __pow__ doesn't handle a third argument
- def __pow__(self, other): ...
- def __rpow__(self, other): ...
def __lshift__(self, other): ...
def __rlshift__(self, other): ...
def __rshift__(self, other): ...
@@ -409,6 +780,8 @@ class _ArrayOrScalarCommon(
) -> _ArraySelf: ...
def swapaxes(self: _ArraySelf, axis1: int, axis2: int) -> _ArraySelf: ...
def tobytes(self, order: _OrderKACF = ...) -> bytes: ...
+ # NOTE: `tostring()` is deprecated and therefore excluded
+ # def tostring(self, order=...): ...
def tofile(
self, fid: Union[IO[bytes], str], sep: str = ..., format: str = ...
) -> None: ...
@@ -437,42 +810,376 @@ class _ArrayOrScalarCommon(
def __array_struct__(self): ...
def __array_wrap__(array, context=...): ...
def __setstate__(self, __state): ...
- def all(self, axis=..., out=..., keepdims=...): ...
- def any(self, axis=..., out=..., keepdims=...): ...
- def argmax(self, axis=..., out=...): ...
- def argmin(self, axis=..., out=...): ...
- def argpartition(self, kth, axis=..., kind=..., order=...): ...
- def argsort(self, axis=..., kind=..., order=...): ...
- def choose(self, choices, out=..., mode=...): ...
- def clip(self, min=..., max=..., out=..., **kwargs): ...
- def compress(self, condition, axis=..., out=...): ...
- def conj(self): ...
- def conjugate(self): ...
- def cumprod(self, axis=..., dtype=..., out=...): ...
- def cumsum(self, axis=..., dtype=..., out=...): ...
- def diagonal(self, offset=..., axis1=..., axis2=...): ...
- def dot(self, b, out=...): ...
- def max(self, axis=..., out=..., keepdims=..., initial=..., where=...): ...
- def mean(self, axis=..., dtype=..., out=..., keepdims=...): ...
- def min(self, axis=..., out=..., keepdims=..., initial=..., where=...): ...
- def newbyteorder(self, new_order=...): ...
- def nonzero(self): ...
- def partition(self, kth, axis=..., kind=..., order=...): ...
- def prod(self, axis=..., dtype=..., out=..., keepdims=..., initial=..., where=...): ...
- def ptp(self, axis=..., out=..., keepdims=...): ...
- def put(self, indices, values, mode=...): ...
- def repeat(self, repeats, axis=...): ...
- def round(self, decimals=..., out=...): ...
- def searchsorted(self, v, side=..., sorter=...): ...
- def setfield(self, val, dtype, offset=...): ...
- def sort(self, axis=..., kind=..., order=...): ...
- def std(self, axis=..., dtype=..., out=..., ddof=..., keepdims=...): ...
- def sum(self, axis=..., dtype=..., out=..., keepdims=..., initial=..., where=...): ...
- def take(self, indices, axis=..., out=..., mode=...): ...
- # NOTE: `tostring()` is deprecated and therefore excluded
- # def tostring(self, order=...): ...
- def trace(self, offset=..., axis1=..., axis2=..., dtype=..., out=...): ...
- def var(self, axis=..., dtype=..., out=..., ddof=..., keepdims=...): ...
+ # a `bool_` is returned when `keepdims=True` and `self` is a 0d array
+ @overload
+ def all(
+ self, axis: None = ..., out: None = ..., keepdims: Literal[False] = ...
+ ) -> bool_: ...
+ @overload
+ def all(
+ self, axis: Optional[_ShapeLike] = ..., out: None = ..., keepdims: bool = ...
+ ) -> Union[bool_, ndarray]: ...
+ @overload
+ def all(
+ self,
+ axis: Optional[_ShapeLike] = ...,
+ out: _NdArraySubClass = ...,
+ keepdims: bool = ...,
+ ) -> _NdArraySubClass: ...
+ @overload
+ def any(
+ self, axis: None = ..., out: None = ..., keepdims: Literal[False] = ...
+ ) -> bool_: ...
+ @overload
+ def any(
+ self, axis: Optional[_ShapeLike] = ..., out: None = ..., keepdims: bool = ...
+ ) -> Union[bool_, ndarray]: ...
+ @overload
+ def any(
+ self,
+ axis: Optional[_ShapeLike] = ...,
+ out: _NdArraySubClass = ...,
+ keepdims: bool = ...,
+ ) -> _NdArraySubClass: ...
+ @overload
+ def argmax(self, axis: None = ..., out: None = ...) -> signedinteger: ...
+ @overload
+ def argmax(
+ self, axis: _ShapeLike = ..., out: None = ...
+ ) -> Union[signedinteger, ndarray]: ...
+ @overload
+ def argmax(
+ self, axis: Optional[_ShapeLike] = ..., out: _NdArraySubClass = ...
+ ) -> _NdArraySubClass: ...
+ @overload
+ def argmin(self, axis: None = ..., out: None = ...) -> signedinteger: ...
+ @overload
+ def argmin(
+ self, axis: _ShapeLike = ..., out: None = ...
+ ) -> Union[signedinteger, ndarray]: ...
+ @overload
+ def argmin(
+ self, axis: Optional[_ShapeLike] = ..., out: _NdArraySubClass = ...
+ ) -> _NdArraySubClass: ...
+ def argsort(
+ self,
+ axis: Optional[int] = ...,
+ kind: Optional[_SortKind] = ...,
+ order: Union[None, str, Sequence[str]] = ...,
+ ) -> ndarray: ...
+ @overload
+ def choose(
+ self, choices: ArrayLike, out: None = ..., mode: _ModeKind = ...,
+ ) -> ndarray: ...
+ @overload
+ def choose(
+ self, choices: ArrayLike, out: _NdArraySubClass = ..., mode: _ModeKind = ...,
+ ) -> _NdArraySubClass: ...
+ @overload
+ def clip(
+ self,
+ min: ArrayLike = ...,
+ max: Optional[ArrayLike] = ...,
+ out: None = ...,
+ **kwargs: Any,
+ ) -> Union[number, ndarray]: ...
+ @overload
+ def clip(
+ self,
+ min: None = ...,
+ max: ArrayLike = ...,
+ out: None = ...,
+ **kwargs: Any,
+ ) -> Union[number, ndarray]: ...
+ @overload
+ def clip(
+ self,
+ min: ArrayLike = ...,
+ max: Optional[ArrayLike] = ...,
+ out: _NdArraySubClass = ...,
+ **kwargs: Any,
+ ) -> _NdArraySubClass: ...
+ @overload
+ def clip(
+ self,
+ min: None = ...,
+ max: ArrayLike = ...,
+ out: _NdArraySubClass = ...,
+ **kwargs: Any,
+ ) -> _NdArraySubClass: ...
+ @overload
+ def compress(
+ self, a: ArrayLike, axis: Optional[int] = ..., out: None = ...,
+ ) -> ndarray: ...
+ @overload
+ def compress(
+ self, a: ArrayLike, axis: Optional[int] = ..., out: _NdArraySubClass = ...,
+ ) -> _NdArraySubClass: ...
+ def conj(self: _ArraySelf) -> _ArraySelf: ...
+ def conjugate(self: _ArraySelf) -> _ArraySelf: ...
+ @overload
+ def cumprod(
+ self, axis: Optional[int] = ..., dtype: DtypeLike = ..., out: None = ...,
+ ) -> ndarray: ...
+ @overload
+ def cumprod(
+ self,
+ axis: Optional[int] = ...,
+ dtype: DtypeLike = ...,
+ out: _NdArraySubClass = ...,
+ ) -> _NdArraySubClass: ...
+ @overload
+ def cumsum(
+ self, axis: Optional[int] = ..., dtype: DtypeLike = ..., out: None = ...,
+ ) -> ndarray: ...
+ @overload
+ def cumsum(
+ self,
+ axis: Optional[int] = ...,
+ dtype: DtypeLike = ...,
+ out: _NdArraySubClass = ...,
+ ) -> _NdArraySubClass: ...
+ @overload
+ def max(
+ self,
+ axis: None = ...,
+ out: None = ...,
+ keepdims: Literal[False] = ...,
+ initial: _NumberLike = ...,
+ where: _ArrayLikeBool = ...,
+ ) -> number: ...
+ @overload
+ def max(
+ self,
+ axis: Optional[_ShapeLike] = ...,
+ out: None = ...,
+ keepdims: bool = ...,
+ initial: _NumberLike = ...,
+ where: _ArrayLikeBool = ...,
+ ) -> Union[number, ndarray]: ...
+ @overload
+ def max(
+ self,
+ axis: Optional[_ShapeLike] = ...,
+ out: _NdArraySubClass = ...,
+ keepdims: bool = ...,
+ initial: _NumberLike = ...,
+ where: _ArrayLikeBool = ...,
+ ) -> _NdArraySubClass: ...
+ @overload
+ def mean(
+ self,
+ axis: None = ...,
+ dtype: DtypeLike = ...,
+ out: None = ...,
+ keepdims: Literal[False] = ...,
+ ) -> number: ...
+ @overload
+ def mean(
+ self,
+ axis: Optional[_ShapeLike] = ...,
+ dtype: DtypeLike = ...,
+ out: None = ...,
+ keepdims: bool = ...,
+ ) -> Union[number, ndarray]: ...
+ @overload
+ def mean(
+ self,
+ axis: Optional[_ShapeLike] = ...,
+ dtype: DtypeLike = ...,
+ out: _NdArraySubClass = ...,
+ keepdims: bool = ...,
+ ) -> _NdArraySubClass: ...
+ @overload
+ def min(
+ self,
+ axis: None = ...,
+ out: None = ...,
+ keepdims: Literal[False] = ...,
+ initial: _NumberLike = ...,
+ where: _ArrayLikeBool = ...,
+ ) -> number: ...
+ @overload
+ def min(
+ self,
+ axis: Optional[_ShapeLike] = ...,
+ out: None = ...,
+ keepdims: bool = ...,
+ initial: _NumberLike = ...,
+ where: _ArrayLikeBool = ...,
+ ) -> Union[number, ndarray]: ...
+ @overload
+ def min(
+ self,
+ axis: Optional[_ShapeLike] = ...,
+ out: _NdArraySubClass = ...,
+ keepdims: bool = ...,
+ initial: _NumberLike = ...,
+ where: _ArrayLikeBool = ...,
+ ) -> _NdArraySubClass: ...
+ def newbyteorder(self: _ArraySelf, __new_order: _ByteOrder = ...) -> _ArraySelf: ...
+ @overload
+ def prod(
+ self,
+ axis: None = ...,
+ dtype: DtypeLike = ...,
+ out: None = ...,
+ keepdims: Literal[False] = ...,
+ initial: _NumberLike = ...,
+ where: _ArrayLikeBool = ...,
+ ) -> number: ...
+ @overload
+ def prod(
+ self,
+ axis: Optional[_ShapeLike] = ...,
+ dtype: DtypeLike = ...,
+ out: None = ...,
+ keepdims: bool = ...,
+ initial: _NumberLike = ...,
+ where: _ArrayLikeBool = ...,
+ ) -> Union[number, ndarray]: ...
+ @overload
+ def prod(
+ self,
+ axis: Optional[_ShapeLike] = ...,
+ dtype: DtypeLike = ...,
+ out: _NdArraySubClass = ...,
+ keepdims: bool = ...,
+ initial: _NumberLike = ...,
+ where: _ArrayLikeBool = ...,
+ ) -> _NdArraySubClass: ...
+ @overload
+ def ptp(
+ self, axis: None = ..., out: None = ..., keepdims: Literal[False] = ...,
+ ) -> number: ...
+ @overload
+ def ptp(
+ self, axis: Optional[_ShapeLike] = ..., out: None = ..., keepdims: bool = ...,
+ ) -> Union[number, ndarray]: ...
+ @overload
+ def ptp(
+ self,
+ axis: Optional[_ShapeLike] = ...,
+ out: _NdArraySubClass = ...,
+ keepdims: bool = ...,
+ ) -> _NdArraySubClass: ...
+ def repeat(
+ self, repeats: _ArrayLikeIntOrBool, axis: Optional[int] = ...
+ ) -> ndarray: ...
+ @overload
+ def round(self: _ArraySelf, decimals: int = ..., out: None = ...) -> _ArraySelf: ...
+ @overload
+ def round(
+ self, decimals: int = ..., out: _NdArraySubClass = ...
+ ) -> _NdArraySubClass: ...
+ @overload
+ def std(
+ self,
+ axis: None = ...,
+ dtype: DtypeLike = ...,
+ out: None = ...,
+ ddof: int = ...,
+ keepdims: Literal[False] = ...,
+ ) -> number: ...
+ @overload
+ def std(
+ self,
+ axis: Optional[_ShapeLike] = ...,
+ dtype: DtypeLike = ...,
+ out: None = ...,
+ ddof: int = ...,
+ keepdims: bool = ...,
+ ) -> Union[number, ndarray]: ...
+ @overload
+ def std(
+ self,
+ axis: Optional[_ShapeLike] = ...,
+ dtype: DtypeLike = ...,
+ out: _NdArraySubClass = ...,
+ ddof: int = ...,
+ keepdims: bool = ...,
+ ) -> _NdArraySubClass: ...
+ @overload
+ def sum(
+ self,
+ axis: None = ...,
+ dtype: DtypeLike = ...,
+ out: None = ...,
+ keepdims: Literal[False] = ...,
+ initial: _NumberLike = ...,
+ where: _ArrayLikeBool = ...,
+ ) -> number: ...
+ @overload
+ def sum(
+ self,
+ axis: Optional[_ShapeLike] = ...,
+ dtype: DtypeLike = ...,
+ out: None = ...,
+ keepdims: bool = ...,
+ initial: _NumberLike = ...,
+ where: _ArrayLikeBool = ...,
+ ) -> Union[number, ndarray]: ...
+ @overload
+ def sum(
+ self,
+ axis: Optional[_ShapeLike] = ...,
+ dtype: DtypeLike = ...,
+ out: _NdArraySubClass = ...,
+ keepdims: bool = ...,
+ initial: _NumberLike = ...,
+ where: _ArrayLikeBool = ...,
+ ) -> _NdArraySubClass: ...
+ @overload
+ def take(
+ self,
+ indices: Union[_IntLike, _BoolLike],
+ axis: Optional[int] = ...,
+ out: None = ...,
+ mode: _ModeKind = ...,
+ ) -> generic: ...
+ @overload
+ def take(
+ self,
+ indices: _ArrayLikeIntOrBool,
+ axis: Optional[int] = ...,
+ out: None = ...,
+ mode: _ModeKind = ...,
+ ) -> ndarray: ...
+ @overload
+ def take(
+ self,
+ indices: _ArrayLikeIntOrBool,
+ axis: Optional[int] = ...,
+ out: _NdArraySubClass = ...,
+ mode: _ModeKind = ...,
+ ) -> _NdArraySubClass: ...
+ @overload
+ def var(
+ self,
+ axis: None = ...,
+ dtype: DtypeLike = ...,
+ out: None = ...,
+ ddof: int = ...,
+ keepdims: Literal[False] = ...,
+ ) -> number: ...
+ @overload
+ def var(
+ self,
+ axis: Optional[_ShapeLike] = ...,
+ dtype: DtypeLike = ...,
+ out: None = ...,
+ ddof: int = ...,
+ keepdims: bool = ...,
+ ) -> Union[number, ndarray]: ...
+ @overload
+ def var(
+ self,
+ axis: Optional[_ShapeLike] = ...,
+ dtype: DtypeLike = ...,
+ out: _NdArraySubClass = ...,
+ ddof: int = ...,
+ keepdims: bool = ...,
+ ) -> _NdArraySubClass: ...
_BufferType = Union[ndarray, bytes, bytearray, memoryview]
_Casting = Literal["no", "equiv", "safe", "same_kind", "unsafe"]
@@ -507,6 +1214,67 @@ class ndarray(_ArrayOrScalarCommon, Iterable, Sized, Container):
def strides(self) -> _Shape: ...
@strides.setter
def strides(self, value: _ShapeLike): ...
+ def argpartition(
+ self,
+ kth: _ArrayLikeIntOrBool,
+ axis: Optional[int] = ...,
+ kind: _PartitionKind = ...,
+ order: Union[None, str, Sequence[str]] = ...,
+ ) -> ndarray: ...
+ def diagonal(
+ self: _ArraySelf, offset: int = ..., axis1: int = ..., axis2: int = ...
+ ) -> _ArraySelf: ...
+ @overload
+ def dot(self, b: ArrayLike, out: None = ...) -> Union[number, ndarray]: ...
+ @overload
+ def dot(self, b: ArrayLike, out: _NdArraySubClass = ...) -> _NdArraySubClass: ...
+ # `nonzero()` is deprecated for 0d arrays/generics
+ def nonzero(self) -> Tuple[ndarray, ...]: ...
+ def partition(
+ self,
+ kth: _ArrayLikeIntOrBool,
+ axis: int = ...,
+ kind: _PartitionKind = ...,
+ order: Union[None, str, Sequence[str]] = ...,
+ ) -> None: ...
+ # `put` is technically available to `generic`,
+ # but is pointless as `generic`s are immutable
+ def put(
+ self, ind: _ArrayLikeIntOrBool, v: ArrayLike, mode: _ModeKind = ...
+ ) -> None: ...
+ def searchsorted(
+ self, # >= 1D array
+ v: ArrayLike,
+ side: _SortSide = ...,
+ sorter: Optional[_ArrayLikeIntOrBool] = ..., # 1D int array
+ ) -> ndarray: ...
+ def setfield(
+ self, val: ArrayLike, dtype: DtypeLike, offset: int = ...
+ ) -> None: ...
+ def sort(
+ self,
+ axis: int = ...,
+ kind: Optional[_SortKind] = ...,
+ order: Union[None, str, Sequence[str]] = ...,
+ ) -> None: ...
+ @overload
+ def trace(
+ self, # >= 2D array
+ offset: int = ...,
+ axis1: int = ...,
+ axis2: int = ...,
+ dtype: DtypeLike = ...,
+ out: None = ...,
+ ) -> Union[number, ndarray]: ...
+ @overload
+ def trace(
+ self, # >= 2D array
+ offset: int = ...,
+ axis1: int = ...,
+ axis2: int = ...,
+ dtype: DtypeLike = ...,
+ out: _NdArraySubClass = ...,
+ ) -> _NdArraySubClass: ...
# Many of these special methods are irrelevant currently, since protocols
# aren't supported yet. That said, I'm adding them for completeness.
# https://docs.python.org/3/reference/datamodel.html
@@ -518,14 +1286,26 @@ class ndarray(_ArrayOrScalarCommon, Iterable, Sized, Container):
def __matmul__(self, other): ...
def __imatmul__(self, other): ...
def __rmatmul__(self, other): ...
+ def __add__(self, other: ArrayLike) -> Union[ndarray, generic]: ...
+ def __radd__(self, other: ArrayLike) -> Union[ndarray, generic]: ...
+ def __sub__(self, other: ArrayLike) -> Union[ndarray, generic]: ...
+ def __rsub__(self, other: ArrayLike) -> Union[ndarray, generic]: ...
+ def __mul__(self, other: ArrayLike) -> Union[ndarray, generic]: ...
+ def __rmul__(self, other: ArrayLike) -> Union[ndarray, generic]: ...
+ def __floordiv__(self, other: ArrayLike) -> Union[ndarray, generic]: ...
+ def __rfloordiv__(self, other: ArrayLike) -> Union[ndarray, generic]: ...
+ def __pow__(self, other: ArrayLike) -> Union[ndarray, generic]: ...
+ def __rpow__(self, other: ArrayLike) -> Union[ndarray, generic]: ...
+ def __truediv__(self, other: ArrayLike) -> Union[ndarray, generic]: ...
+ def __rtruediv__(self, other: ArrayLike) -> Union[ndarray, generic]: ...
# `np.generic` does not support inplace operations
- def __iadd__(self, other): ...
- def __isub__(self, other): ...
- def __imul__(self, other): ...
- def __itruediv__(self, other): ...
- def __ifloordiv__(self, other): ...
+ def __iadd__(self: _ArraySelf, other: ArrayLike) -> _ArraySelf: ...
+ def __isub__(self: _ArraySelf, other: ArrayLike) -> _ArraySelf: ...
+ def __imul__(self: _ArraySelf, other: ArrayLike) -> _ArraySelf: ...
+ def __itruediv__(self: _ArraySelf, other: ArrayLike) -> _ArraySelf: ...
+ def __ifloordiv__(self: _ArraySelf, other: ArrayLike) -> _ArraySelf: ...
+ def __ipow__(self: _ArraySelf, other: ArrayLike) -> _ArraySelf: ...
def __imod__(self, other): ...
- def __ipow__(self, other): ...
def __ilshift__(self, other): ...
def __irshift__(self, other): ...
def __iand__(self, other): ...
@@ -540,8 +1320,6 @@ class ndarray(_ArrayOrScalarCommon, Iterable, Sized, Container):
# See https://github.com/numpy/numpy-stubs/pull/80 for more details.
-_CharLike = Union[str, bytes]
-
class generic(_ArrayOrScalarCommon):
@abstractmethod
def __init__(self, *args: Any, **kwargs: Any) -> None: ...
@@ -553,6 +1331,19 @@ class number(generic): # type: ignore
def real(self: _ArraySelf) -> _ArraySelf: ...
@property
def imag(self: _ArraySelf) -> _ArraySelf: ...
+ # Ensure that objects annotated as `number` support arithmetic operations
+ __add__: _NumberOp
+ __radd__: _NumberOp
+ __sub__: _NumberOp
+ __rsub__: _NumberOp
+ __mul__: _NumberOp
+ __rmul__: _NumberOp
+ __floordiv__: _NumberOp
+ __rfloordiv__: _NumberOp
+ __pow__: _NumberOp
+ __rpow__: _NumberOp
+ __truediv__: _NumberOp
+ __rtruediv__: _NumberOp
class bool_(generic):
def __init__(self, __value: object = ...) -> None: ...
@@ -560,6 +1351,18 @@ class bool_(generic):
def real(self: _ArraySelf) -> _ArraySelf: ...
@property
def imag(self: _ArraySelf) -> _ArraySelf: ...
+ __add__: _BoolOp[bool_]
+ __radd__: _BoolOp[bool_]
+ __sub__: _BoolSub
+ __rsub__: _BoolSub
+ __mul__: _BoolOp[bool_]
+ __rmul__: _BoolOp[bool_]
+ __floordiv__: _BoolOp[int8]
+ __rfloordiv__: _BoolOp[int8]
+ __pow__: _BoolOp[int8]
+ __rpow__: _BoolOp[int8]
+ __truediv__: _BoolTrueDiv
+ __rtruediv__: _BoolTrueDiv
class object_(generic):
def __init__(self, __value: object = ...) -> None: ...
@@ -576,10 +1379,18 @@ class datetime64(generic):
__format: Union[_CharLike, Tuple[_CharLike, _IntLike]] = ...,
) -> None: ...
@overload
- def __init__(self, __value: int, __format: Union[_CharLike, Tuple[_CharLike, _IntLike]]) -> None: ...
- def __add__(self, other: Union[timedelta64, int]) -> datetime64: ...
- def __sub__(self, other: Union[timedelta64, datetime64, int]) -> timedelta64: ...
- def __rsub__(self, other: Union[datetime64, int]) -> timedelta64: ...
+ def __init__(
+ self,
+ __value: int,
+ __format: Union[_CharLike, Tuple[_CharLike, _IntLike]]
+ ) -> None: ...
+ def __add__(self, other: Union[timedelta64, _IntLike, _BoolLike]) -> datetime64: ...
+ def __radd__(self, other: Union[timedelta64, _IntLike, _BoolLike]) -> datetime64: ...
+ @overload
+ def __sub__(self, other: datetime64) -> timedelta64: ...
+ @overload
+ def __sub__(self, other: Union[timedelta64, _IntLike, _BoolLike]) -> datetime64: ...
+ def __rsub__(self, other: datetime64) -> timedelta64: ...
# Support for `__index__` was added in python 3.8 (bpo-20092)
if sys.version_info >= (3, 8):
@@ -595,8 +1406,20 @@ class integer(number): # type: ignore
# NOTE: `__index__` is technically defined in the bottom-most
# sub-classes (`int64`, `uint32`, etc)
def __index__(self) -> int: ...
-
-class signedinteger(integer): ... # type: ignore
+ __truediv__: _IntTrueDiv
+ __rtruediv__: _IntTrueDiv
+
+class signedinteger(integer): # type: ignore
+ __add__: _SignedIntOp
+ __radd__: _SignedIntOp
+ __sub__: _SignedIntOp
+ __rsub__: _SignedIntOp
+ __mul__: _SignedIntOp
+ __rmul__: _SignedIntOp
+ __floordiv__: _SignedIntOp
+ __rfloordiv__: _SignedIntOp
+ __pow__: _SignedIntOp
+ __rpow__: _SignedIntOp
class int8(signedinteger):
def __init__(self, __value: _IntValue = ...) -> None: ...
@@ -610,24 +1433,36 @@ class int32(signedinteger):
class int64(signedinteger):
def __init__(self, __value: _IntValue = ...) -> None: ...
-class timedelta64(signedinteger):
+class timedelta64(generic):
def __init__(
self,
__value: Union[None, int, _CharLike, dt.timedelta, timedelta64] = ...,
__format: Union[_CharLike, Tuple[_CharLike, _IntLike]] = ...,
) -> None: ...
- @overload
- def __add__(self, other: Union[timedelta64, int]) -> timedelta64: ...
- @overload
- def __add__(self, other: datetime64) -> datetime64: ...
- def __sub__(self, other: Union[timedelta64, int]) -> timedelta64: ...
- @overload
- def __truediv__(self, other: timedelta64) -> float: ...
- @overload
- def __truediv__(self, other: float) -> timedelta64: ...
+ def __add__(self, other: Union[timedelta64, _IntLike, _BoolLike]) -> timedelta64: ...
+ def __radd__(self, other: Union[timedelta64, _IntLike, _BoolLike]) -> timedelta64: ...
+ def __sub__(self, other: Union[timedelta64, _IntLike, _BoolLike]) -> timedelta64: ...
+ def __rsub__(self, other: Union[timedelta64, _IntLike, _BoolLike]) -> timedelta64: ...
+ def __mul__(self, other: Union[_FloatLike, _BoolLike]) -> timedelta64: ...
+ def __rmul__(self, other: Union[_FloatLike, _BoolLike]) -> timedelta64: ...
+ __truediv__: _TD64Div[float64]
+ __floordiv__: _TD64Div[signedinteger]
+ def __rtruediv__(self, other: timedelta64) -> float64: ...
+ def __rfloordiv__(self, other: timedelta64) -> signedinteger: ...
def __mod__(self, other: timedelta64) -> timedelta64: ...
-class unsignedinteger(integer): ... # type: ignore
+class unsignedinteger(integer): # type: ignore
+ # NOTE: `uint64 + signedinteger -> float64`
+ __add__: _UnsignedIntOp
+ __radd__: _UnsignedIntOp
+ __sub__: _UnsignedIntOp
+ __rsub__: _UnsignedIntOp
+ __mul__: _UnsignedIntOp
+ __rmul__: _UnsignedIntOp
+ __floordiv__: _UnsignedIntOp
+ __rfloordiv__: _UnsignedIntOp
+ __pow__: _UnsignedIntOp
+ __rpow__: _UnsignedIntOp
class uint8(unsignedinteger):
def __init__(self, __value: _IntValue = ...) -> None: ...
@@ -642,7 +1477,20 @@ class uint64(unsignedinteger):
def __init__(self, __value: _IntValue = ...) -> None: ...
class inexact(number): ... # type: ignore
-class floating(inexact): ... # type: ignore
+
+class floating(inexact): # type: ignore
+ __add__: _FloatOp
+ __radd__: _FloatOp
+ __sub__: _FloatOp
+ __rsub__: _FloatOp
+ __mul__: _FloatOp
+ __rmul__: _FloatOp
+ __truediv__: _FloatOp
+ __rtruediv__: _FloatOp
+ __floordiv__: _FloatOp
+ __rfloordiv__: _FloatOp
+ __pow__: _FloatOp
+ __rpow__: _FloatOp
_FloatType = TypeVar('_FloatType', bound=floating)
@@ -661,6 +1509,18 @@ class complexfloating(inexact, Generic[_FloatType]): # type: ignore
@property
def imag(self) -> _FloatType: ... # type: ignore[override]
def __abs__(self) -> _FloatType: ... # type: ignore[override]
+ __add__: _ComplexOp
+ __radd__: _ComplexOp
+ __sub__: _ComplexOp
+ __rsub__: _ComplexOp
+ __mul__: _ComplexOp
+ __rmul__: _ComplexOp
+ __truediv__: _ComplexOp
+ __rtruediv__: _ComplexOp
+ __floordiv__: _ComplexOp
+ __rfloordiv__: _ComplexOp
+ __pow__: _ComplexOp
+ __rpow__: _ComplexOp
class complex64(complexfloating[float32]):
def __init__(self, __value: _ComplexValue = ...) -> None: ...
@@ -671,14 +1531,20 @@ class complex128(complexfloating[float64], complex):
class flexible(generic): ... # type: ignore
class void(flexible):
- def __init__(self, __value: Union[int, integer, bool_, bytes]): ...
+ def __init__(self, __value: Union[_IntLike, _BoolLike, bytes]): ...
@property
def real(self: _ArraySelf) -> _ArraySelf: ...
@property
def imag(self: _ArraySelf) -> _ArraySelf: ...
+ def setfield(
+ self, val: ArrayLike, dtype: DtypeLike, offset: int = ...
+ ) -> None: ...
class character(flexible): ... # type: ignore
+# NOTE: Most `np.bytes_` / `np.str_` methods return their
+# builtin `bytes` / `str` counterpart
+
class bytes_(character, bytes):
@overload
def __init__(self, __value: object = ...) -> None: ...
@@ -720,13 +1586,6 @@ def zeros(
*,
like: ArrayLike = ...,
) -> ndarray: ...
-def ones(
- shape: _ShapeLike,
- dtype: DtypeLike = ...,
- order: _OrderCF = ...,
- *,
- like: ArrayLike = ...,
-) -> ndarray: ...
def empty(
shape: _ShapeLike,
dtype: DtypeLike = ...,
@@ -734,111 +1593,6 @@ def empty(
*,
like: ArrayLike = ...,
) -> ndarray: ...
-def zeros_like(
- a: ArrayLike,
- dtype: DtypeLike = ...,
- order: _OrderKACF = ...,
- subok: bool = ...,
- shape: Optional[Union[int, Sequence[int]]] = ...,
-) -> ndarray: ...
-def ones_like(
- a: ArrayLike,
- dtype: DtypeLike = ...,
- order: _OrderKACF = ...,
- subok: bool = ...,
- shape: Optional[_ShapeLike] = ...,
-) -> ndarray: ...
-def empty_like(
- a: ArrayLike,
- dtype: DtypeLike = ...,
- order: _OrderKACF = ...,
- subok: bool = ...,
- shape: Optional[_ShapeLike] = ...,
-) -> ndarray: ...
-def full(
- shape: _ShapeLike,
- fill_value: Any,
- dtype: DtypeLike = ...,
- order: _OrderCF = ...,
- *,
- like: ArrayLike = ...,
-) -> ndarray: ...
-def full_like(
- a: ArrayLike,
- fill_value: Any,
- dtype: DtypeLike = ...,
- order: _OrderKACF = ...,
- subok: bool = ...,
- shape: Optional[_ShapeLike] = ...,
-) -> ndarray: ...
-def count_nonzero(
- a: ArrayLike, axis: Optional[Union[int, Tuple[int], Tuple[int, int]]] = ...
-) -> Union[int, ndarray]: ...
-def isfortran(a: ndarray) -> bool: ...
-def argwhere(a: ArrayLike) -> ndarray: ...
-def flatnonzero(a: ArrayLike) -> ndarray: ...
-
-_CorrelateMode = Literal["valid", "same", "full"]
-
-def correlate(a: ArrayLike, v: ArrayLike, mode: _CorrelateMode = ...) -> ndarray: ...
-def convolve(a: ArrayLike, v: ArrayLike, mode: _CorrelateMode = ...) -> ndarray: ...
-def outer(a: ArrayLike, b: ArrayLike, out: ndarray = ...) -> ndarray: ...
-def tensordot(
- a: ArrayLike,
- b: ArrayLike,
- axes: Union[
- int, Tuple[int, int], Tuple[Tuple[int, int], ...], Tuple[List[int, int], ...]
- ] = ...,
-) -> ndarray: ...
-def roll(
- a: ArrayLike,
- shift: Union[int, Tuple[int, ...]],
- axis: Optional[Union[int, Tuple[int, ...]]] = ...,
-) -> ndarray: ...
-def rollaxis(a: ArrayLike, axis: int, start: int = ...) -> ndarray: ...
-def moveaxis(
- a: ndarray,
- source: Union[int, Sequence[int]],
- destination: Union[int, Sequence[int]],
-) -> ndarray: ...
-def cross(
- a: ArrayLike,
- b: ArrayLike,
- axisa: int = ...,
- axisb: int = ...,
- axisc: int = ...,
- axis: Optional[int] = ...,
-) -> ndarray: ...
-def indices(
- dimensions: Sequence[int], dtype: dtype = ..., sparse: bool = ...
-) -> Union[ndarray, Tuple[ndarray, ...]]: ...
-def fromfunction(
- function: Callable,
- shape: Tuple[int, int],
- *,
- like: ArrayLike = ...,
- **kwargs,
-) -> Any: ...
-def isscalar(element: Any) -> bool: ...
-def binary_repr(num: int, width: Optional[int] = ...) -> str: ...
-def base_repr(number: int, base: int = ..., padding: int = ...) -> str: ...
-def identity(n: int, dtype: DtypeLike = ..., *, like: ArrayLike = ...) -> ndarray: ...
-def allclose(
- a: ArrayLike,
- b: ArrayLike,
- rtol: float = ...,
- atol: float = ...,
- equal_nan: bool = ...,
-) -> bool: ...
-def isclose(
- a: ArrayLike,
- b: ArrayLike,
- rtol: float = ...,
- atol: float = ...,
- equal_nan: bool = ...,
-) -> Union[bool_, ndarray]: ...
-def array_equal(a1: ArrayLike, a2: ArrayLike) -> bool: ...
-def array_equiv(a1: ArrayLike, a2: ArrayLike) -> bool: ...
#
# Constants
@@ -1080,7 +1834,3 @@ def sctype2char(sctype: object) -> str: ...
def find_common_type(
array_types: Sequence[DtypeLike], scalar_types: Sequence[DtypeLike]
) -> dtype: ...
-
-_NumberLike = Union[int, float, complex, number, bool_]
-_IntLike = Union[int, integer]
-_BoolLike = Union[bool, bool_]
diff --git a/numpy/core/__init__.py b/numpy/core/__init__.py
index a0769cc89..e8d3a381b 100644
--- a/numpy/core/__init__.py
+++ b/numpy/core/__init__.py
@@ -96,6 +96,7 @@ from .numeric import absolute as abs
# do this after everything else, to minimize the chance of this misleadingly
# appearing in an import-time traceback
from . import _add_newdocs
+from . import _add_newdocs_scalars
# add these for module-freeze analysis (like PyInstaller)
from . import _dtype_ctypes
from . import _internal
diff --git a/numpy/core/__init__.pyi b/numpy/core/__init__.pyi
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/numpy/core/__init__.pyi
diff --git a/numpy/core/_add_newdocs.py b/numpy/core/_add_newdocs.py
index 879b3645d..aa858761d 100644
--- a/numpy/core/_add_newdocs.py
+++ b/numpy/core/_add_newdocs.py
@@ -9,8 +9,6 @@ NOTE: Many of the methods of ndarray have corresponding functions.
"""
-from numpy.core import numerictypes as _numerictypes
-from numpy.core import dtype
from numpy.core.function_base import add_newdoc
from numpy.core.overrides import array_function_like_doc
@@ -2618,7 +2616,7 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('argmin',
"""
a.argmin(axis=None, out=None)
- Return indices of the minimum values along the given axis of `a`.
+ Return indices of the minimum values along the given axis.
Refer to `numpy.argmin` for detailed documentation.
@@ -6283,183 +6281,3 @@ add_newdoc('numpy.core.numerictypes', 'character',
Abstract base class of all character string scalar types.
""")
-
-
-##############################################################################
-#
-# Documentation for concrete scalar classes
-#
-##############################################################################
-
-def numeric_type_aliases(aliases):
- def type_aliases_gen():
- for alias, doc in aliases:
- try:
- alias_type = getattr(_numerictypes, alias)
- except AttributeError:
- # The set of aliases that actually exist varies between platforms
- pass
- else:
- yield (alias_type, alias, doc)
- return list(type_aliases_gen())
-
-
-possible_aliases = numeric_type_aliases([
- ('int8', '8-bit signed integer (-128 to 127)'),
- ('int16', '16-bit signed integer (-32768 to 32767)'),
- ('int32', '32-bit signed integer (-2147483648 to 2147483647)'),
- ('int64', '64-bit signed integer (-9223372036854775808 to 9223372036854775807)'),
- ('intp', 'Signed integer large enough to fit pointer, compatible with C ``intptr_t``'),
- ('uint8', '8-bit unsigned integer (0 to 255)'),
- ('uint16', '16-bit unsigned integer (0 to 65535)'),
- ('uint32', '32-bit unsigned integer (0 to 4294967295)'),
- ('uint64', '64-bit unsigned integer (0 to 18446744073709551615)'),
- ('uintp', 'Unsigned integer large enough to fit pointer, compatible with C ``uintptr_t``'),
- ('float16', '16-bit-precision floating-point number type: sign bit, 5 bits exponent, 10 bits mantissa'),
- ('float32', '32-bit-precision floating-point number type: sign bit, 8 bits exponent, 23 bits mantissa'),
- ('float64', '64-bit precision floating-point number type: sign bit, 11 bits exponent, 52 bits mantissa'),
- ('float96', '96-bit extended-precision floating-point number type'),
- ('float128', '128-bit extended-precision floating-point number type'),
- ('complex64', 'Complex number type composed of 2 32-bit-precision floating-point numbers'),
- ('complex128', 'Complex number type composed of 2 64-bit-precision floating-point numbers'),
- ('complex192', 'Complex number type composed of 2 96-bit extended-precision floating-point numbers'),
- ('complex256', 'Complex number type composed of 2 128-bit extended-precision floating-point numbers'),
- ])
-
-
-def add_newdoc_for_scalar_type(obj, fixed_aliases, doc):
- o = getattr(_numerictypes, obj)
-
- character_code = dtype(o).char
- canonical_name_doc = "" if obj == o.__name__ else "Canonical name: ``np.{}``.\n ".format(obj)
- alias_doc = ''.join("Alias: ``np.{}``.\n ".format(alias) for alias in fixed_aliases)
- alias_doc += ''.join("Alias *on this platform*: ``np.{}``: {}.\n ".format(alias, doc)
- for (alias_type, alias, doc) in possible_aliases if alias_type is o)
-
- docstring = """
- {doc}
- Character code: ``'{character_code}'``.
- {canonical_name_doc}{alias_doc}
- """.format(doc=doc.strip(), character_code=character_code,
- canonical_name_doc=canonical_name_doc, alias_doc=alias_doc)
-
- add_newdoc('numpy.core.numerictypes', obj, docstring)
-
-
-add_newdoc_for_scalar_type('bool_', ['bool8'],
- """
- Boolean type (True or False), stored as a byte.
- """)
-
-add_newdoc_for_scalar_type('byte', [],
- """
- Signed integer type, compatible with C ``char``.
- """)
-
-add_newdoc_for_scalar_type('short', [],
- """
- Signed integer type, compatible with C ``short``.
- """)
-
-add_newdoc_for_scalar_type('intc', [],
- """
- Signed integer type, compatible with C ``int``.
- """)
-
-add_newdoc_for_scalar_type('int_', [],
- """
- Signed integer type, compatible with Python `int` anc C ``long``.
- """)
-
-add_newdoc_for_scalar_type('longlong', [],
- """
- Signed integer type, compatible with C ``long long``.
- """)
-
-add_newdoc_for_scalar_type('ubyte', [],
- """
- Unsigned integer type, compatible with C ``unsigned char``.
- """)
-
-add_newdoc_for_scalar_type('ushort', [],
- """
- Unsigned integer type, compatible with C ``unsigned short``.
- """)
-
-add_newdoc_for_scalar_type('uintc', [],
- """
- Unsigned integer type, compatible with C ``unsigned int``.
- """)
-
-add_newdoc_for_scalar_type('uint', [],
- """
- Unsigned integer type, compatible with C ``unsigned long``.
- """)
-
-add_newdoc_for_scalar_type('ulonglong', [],
- """
- Signed integer type, compatible with C ``unsigned long long``.
- """)
-
-add_newdoc_for_scalar_type('half', [],
- """
- Half-precision floating-point number type.
- """)
-
-add_newdoc_for_scalar_type('single', [],
- """
- Single-precision floating-point number type, compatible with C ``float``.
- """)
-
-add_newdoc_for_scalar_type('double', ['float_'],
- """
- Double-precision floating-point number type, compatible with Python `float`
- and C ``double``.
- """)
-
-add_newdoc_for_scalar_type('longdouble', ['longfloat'],
- """
- Extended-precision floating-point number type, compatible with C
- ``long double`` but not necessarily with IEEE 754 quadruple-precision.
- """)
-
-add_newdoc_for_scalar_type('csingle', ['singlecomplex'],
- """
- Complex number type composed of two single-precision floating-point
- numbers.
- """)
-
-add_newdoc_for_scalar_type('cdouble', ['cfloat', 'complex_'],
- """
- Complex number type composed of two double-precision floating-point
- numbers, compatible with Python `complex`.
- """)
-
-add_newdoc_for_scalar_type('clongdouble', ['clongfloat', 'longcomplex'],
- """
- Complex number type composed of two extended-precision floating-point
- numbers.
- """)
-
-add_newdoc_for_scalar_type('object_', [],
- """
- Any Python object.
- """)
-
-# TODO: work out how to put this on the base class, np.floating
-for float_name in ('half', 'single', 'double', 'longdouble'):
- add_newdoc('numpy.core.numerictypes', float_name, ('as_integer_ratio',
- """
- {ftype}.as_integer_ratio() -> (int, int)
-
- Return a pair of integers, whose ratio is exactly equal to the original
- floating point number, and with a positive denominator.
- Raise OverflowError on infinities and a ValueError on NaNs.
-
- >>> np.{ftype}(10.0).as_integer_ratio()
- (10, 1)
- >>> np.{ftype}(0.0).as_integer_ratio()
- (0, 1)
- >>> np.{ftype}(-.25).as_integer_ratio()
- (-1, 4)
- """.format(ftype=float_name)))
diff --git a/numpy/core/_add_newdocs_scalars.py b/numpy/core/_add_newdocs_scalars.py
new file mode 100644
index 000000000..c367c18ed
--- /dev/null
+++ b/numpy/core/_add_newdocs_scalars.py
@@ -0,0 +1,195 @@
+"""
+This file is separate from ``_add_newdocs.py`` so that it can be mocked out by
+our sphinx ``conf.py`` during doc builds, where we want to avoid showing
+platform-dependent information.
+"""
+from numpy.core import dtype
+from numpy.core import numerictypes as _numerictypes
+from numpy.core.function_base import add_newdoc
+
+##############################################################################
+#
+# Documentation for concrete scalar classes
+#
+##############################################################################
+
+def numeric_type_aliases(aliases):
+ def type_aliases_gen():
+ for alias, doc in aliases:
+ try:
+ alias_type = getattr(_numerictypes, alias)
+ except AttributeError:
+ # The set of aliases that actually exist varies between platforms
+ pass
+ else:
+ yield (alias_type, alias, doc)
+ return list(type_aliases_gen())
+
+
+possible_aliases = numeric_type_aliases([
+ ('int8', '8-bit signed integer (``-128`` to ``127``)'),
+ ('int16', '16-bit signed integer (``-32_768`` to ``32_767``)'),
+ ('int32', '32-bit signed integer (``-2_147_483_648`` to ``2_147_483_647``)'),
+ ('int64', '64-bit signed integer (``-9_223_372_036_854_775_808`` to ``9_223_372_036_854_775_807``)'),
+ ('intp', 'Signed integer large enough to fit pointer, compatible with C ``intptr_t``'),
+ ('uint8', '8-bit unsigned integer (``0`` to ``255``)'),
+ ('uint16', '16-bit unsigned integer (``0`` to ``65_535``)'),
+ ('uint32', '32-bit unsigned integer (``0`` to ``4_294_967_295``)'),
+ ('uint64', '64-bit unsigned integer (``0`` to ``18_446_744_073_709_551_615``)'),
+ ('uintp', 'Unsigned integer large enough to fit pointer, compatible with C ``uintptr_t``'),
+ ('float16', '16-bit-precision floating-point number type: sign bit, 5 bits exponent, 10 bits mantissa'),
+ ('float32', '32-bit-precision floating-point number type: sign bit, 8 bits exponent, 23 bits mantissa'),
+ ('float64', '64-bit precision floating-point number type: sign bit, 11 bits exponent, 52 bits mantissa'),
+ ('float96', '96-bit extended-precision floating-point number type'),
+ ('float128', '128-bit extended-precision floating-point number type'),
+ ('complex64', 'Complex number type composed of 2 32-bit-precision floating-point numbers'),
+ ('complex128', 'Complex number type composed of 2 64-bit-precision floating-point numbers'),
+ ('complex192', 'Complex number type composed of 2 96-bit extended-precision floating-point numbers'),
+ ('complex256', 'Complex number type composed of 2 128-bit extended-precision floating-point numbers'),
+ ])
+
+
+def add_newdoc_for_scalar_type(obj, fixed_aliases, doc):
+ # note: `:field: value` is rST syntax which renders as field lists.
+ o = getattr(_numerictypes, obj)
+
+ character_code = dtype(o).char
+ canonical_name_doc = "" if obj == o.__name__ else ":Canonical name: `numpy.{}`\n ".format(obj)
+ alias_doc = ''.join(":Alias: `numpy.{}`\n ".format(alias) for alias in fixed_aliases)
+ alias_doc += ''.join(":Alias on this platform: `numpy.{}`: {}.\n ".format(alias, doc)
+ for (alias_type, alias, doc) in possible_aliases if alias_type is o)
+ docstring = """
+ {doc}
+
+ :Character code: ``'{character_code}'``
+ {canonical_name_doc}{alias_doc}
+ """.format(doc=doc.strip(), character_code=character_code,
+ canonical_name_doc=canonical_name_doc, alias_doc=alias_doc)
+
+ add_newdoc('numpy.core.numerictypes', obj, docstring)
+
+
+add_newdoc_for_scalar_type('bool_', ['bool8'],
+ """
+ Boolean type (True or False), stored as a byte.
+
+ .. warning::
+
+ The :class:`bool_` type is not a subclass of the :class:`int_` type
+ (the :class:`bool_` is not even a number type). This is different
+ than Python's default implementation of :class:`bool` as a
+ sub-class of :class:`int`.
+ """)
+
+add_newdoc_for_scalar_type('byte', [],
+ """
+ Signed integer type, compatible with C ``char``.
+ """)
+
+add_newdoc_for_scalar_type('short', [],
+ """
+ Signed integer type, compatible with C ``short``.
+ """)
+
+add_newdoc_for_scalar_type('intc', [],
+ """
+ Signed integer type, compatible with C ``int``.
+ """)
+
+add_newdoc_for_scalar_type('int_', [],
+ """
+ Signed integer type, compatible with Python `int` and C ``long``.
+ """)
+
+add_newdoc_for_scalar_type('longlong', [],
+ """
+ Signed integer type, compatible with C ``long long``.
+ """)
+
+add_newdoc_for_scalar_type('ubyte', [],
+ """
+ Unsigned integer type, compatible with C ``unsigned char``.
+ """)
+
+add_newdoc_for_scalar_type('ushort', [],
+ """
+ Unsigned integer type, compatible with C ``unsigned short``.
+ """)
+
+add_newdoc_for_scalar_type('uintc', [],
+ """
+ Unsigned integer type, compatible with C ``unsigned int``.
+ """)
+
+add_newdoc_for_scalar_type('uint', [],
+ """
+ Unsigned integer type, compatible with C ``unsigned long``.
+ """)
+
+add_newdoc_for_scalar_type('ulonglong', [],
+ """
+ Signed integer type, compatible with C ``unsigned long long``.
+ """)
+
+add_newdoc_for_scalar_type('half', [],
+ """
+ Half-precision floating-point number type.
+ """)
+
+add_newdoc_for_scalar_type('single', [],
+ """
+ Single-precision floating-point number type, compatible with C ``float``.
+ """)
+
+add_newdoc_for_scalar_type('double', ['float_'],
+ """
+ Double-precision floating-point number type, compatible with Python `float`
+ and C ``double``.
+ """)
+
+add_newdoc_for_scalar_type('longdouble', ['longfloat'],
+ """
+ Extended-precision floating-point number type, compatible with C
+ ``long double`` but not necessarily with IEEE 754 quadruple-precision.
+ """)
+
+add_newdoc_for_scalar_type('csingle', ['singlecomplex'],
+ """
+ Complex number type composed of two single-precision floating-point
+ numbers.
+ """)
+
+add_newdoc_for_scalar_type('cdouble', ['cfloat', 'complex_'],
+ """
+ Complex number type composed of two double-precision floating-point
+ numbers, compatible with Python `complex`.
+ """)
+
+add_newdoc_for_scalar_type('clongdouble', ['clongfloat', 'longcomplex'],
+ """
+ Complex number type composed of two extended-precision floating-point
+ numbers.
+ """)
+
+add_newdoc_for_scalar_type('object_', [],
+ """
+ Any Python object.
+ """)
+
+# TODO: work out how to put this on the base class, np.floating
+for float_name in ('half', 'single', 'double', 'longdouble'):
+ add_newdoc('numpy.core.numerictypes', float_name, ('as_integer_ratio',
+ """
+ {ftype}.as_integer_ratio() -> (int, int)
+
+ Return a pair of integers, whose ratio is exactly equal to the original
+ floating point number, and with a positive denominator.
+ Raise OverflowError on infinities and a ValueError on NaNs.
+
+ >>> np.{ftype}(10.0).as_integer_ratio()
+ (10, 1)
+ >>> np.{ftype}(0.0).as_integer_ratio()
+ (0, 1)
+ >>> np.{ftype}(-.25).as_integer_ratio()
+ (-1, 4)
+ """.format(ftype=float_name)))
diff --git a/numpy/core/_asarray.pyi b/numpy/core/_asarray.pyi
new file mode 100644
index 000000000..e074d69d2
--- /dev/null
+++ b/numpy/core/_asarray.pyi
@@ -0,0 +1,77 @@
+import sys
+from typing import TypeVar, Union, Iterable, overload
+
+from numpy import ndarray, _OrderKACF
+from numpy.typing import ArrayLike, DtypeLike
+
+if sys.version_info >= (3, 8):
+ from typing import Literal
+else:
+ from typing_extensions import Literal
+
+_ArrayType = TypeVar("_ArrayType", bound=ndarray)
+
+def asarray(
+ a: object,
+ dtype: DtypeLike = ...,
+ order: _OrderKACF = ...,
+ *,
+ like: ArrayLike = ...
+) -> ndarray: ...
+@overload
+def asanyarray(
+ a: _ArrayType,
+ dtype: None = ...,
+ order: _OrderKACF = ...,
+ *,
+ like: ArrayLike = ...
+) -> _ArrayType: ...
+@overload
+def asanyarray(
+ a: object,
+ dtype: DtypeLike = ...,
+ order: _OrderKACF = ...,
+ *,
+ like: ArrayLike = ...
+) -> ndarray: ...
+def ascontiguousarray(
+ a: object, dtype: DtypeLike = ..., *, like: ArrayLike = ...
+) -> ndarray: ...
+def asfortranarray(
+ a: object, dtype: DtypeLike = ..., *, like: ArrayLike = ...
+) -> ndarray: ...
+
+_Requirements = Literal[
+ "C", "C_CONTIGUOUS", "CONTIGUOUS",
+ "F", "F_CONTIGUOUS", "FORTRAN",
+ "A", "ALIGNED",
+ "W", "WRITEABLE",
+ "O", "OWNDATA"
+]
+_E = Literal["E", "ENSUREARRAY"]
+_RequirementsWithE = Union[_Requirements, _E]
+
+@overload
+def require(
+ a: _ArrayType,
+ dtype: None = ...,
+ requirements: Union[None, _Requirements, Iterable[_Requirements]] = ...,
+ *,
+ like: ArrayLike = ...
+) -> _ArrayType: ...
+@overload
+def require(
+ a: object,
+ dtype: DtypeLike = ...,
+ requirements: Union[_E, Iterable[_RequirementsWithE]] = ...,
+ *,
+ like: ArrayLike = ...
+) -> ndarray: ...
+@overload
+def require(
+ a: object,
+ dtype: DtypeLike = ...,
+ requirements: Union[None, _Requirements, Iterable[_Requirements]] = ...,
+ *,
+ like: ArrayLike = ...
+) -> ndarray: ...
diff --git a/numpy/core/einsumfunc.py b/numpy/core/einsumfunc.py
index f65f4015c..e0942beca 100644
--- a/numpy/core/einsumfunc.py
+++ b/numpy/core/einsumfunc.py
@@ -1062,6 +1062,17 @@ def einsum(*operands, out=None, optimize=False, **kwargs):
--------
einsum_path, dot, inner, outer, tensordot, linalg.multi_dot
+ einops:
+ similar verbose interface is provided by
+ `einops <https://github.com/arogozhnikov/einops>`_ package to cover
+ additional operations: transpose, reshape/flatten, repeat/tile,
+ squeeze/unsqueeze and reductions.
+
+ opt_einsum:
+ `opt_einsum <https://optimized-einsum.readthedocs.io/en/stable/>`_
+ optimizes contraction order for einsum-like expressions
+ in backend-agnostic manner.
+
Notes
-----
.. versionadded:: 1.6.0
diff --git a/numpy/core/fromnumeric.pyi b/numpy/core/fromnumeric.pyi
index 7ad772b07..3167d12b9 100644
--- a/numpy/core/fromnumeric.pyi
+++ b/numpy/core/fromnumeric.pyi
@@ -10,22 +10,28 @@ from numpy import (
generic,
_OrderKACF,
_OrderACF,
+ _ArrayLikeBool,
+ _ArrayLikeIntOrBool,
+ _ModeKind,
+ _PartitionKind,
+ _SortKind,
+ _SortSide,
+)
+from numpy.typing import (
+ DtypeLike,
+ ArrayLike,
+ _ShapeLike,
+ _Shape,
_IntLike,
_BoolLike,
_NumberLike,
)
-from numpy.typing import DtypeLike, ArrayLike, _ShapeLike, _Shape
if sys.version_info >= (3, 8):
from typing import Literal
else:
from typing_extensions import Literal
-_Mode = Literal["raise", "wrap", "clip"]
-_PartitionKind = Literal["introselect"]
-_SortKind = Literal["quicksort", "mergesort", "heapsort", "stable"]
-_Side = Literal["left", "right"]
-
# Various annotations for scalars
# While dt.datetime and dt.timedelta are not technically part of NumPy,
@@ -44,21 +50,6 @@ _ScalarGenericDT = TypeVar(
_Number = TypeVar("_Number", bound=number)
-# An array-like object consisting of integers
-_IntOrBool = Union[_IntLike, _BoolLike]
-_ArrayLikeIntNested = ArrayLike # TODO: wait for support for recursive types
-_ArrayLikeBoolNested = ArrayLike # TODO: wait for support for recursive types
-
-# Integers and booleans can generally be used interchangeably
-_ArrayLikeIntOrBool = Union[
- _IntOrBool,
- ndarray,
- Sequence[_IntOrBool],
- Sequence[_ArrayLikeIntNested],
- Sequence[_ArrayLikeBoolNested],
-]
-_ArrayLikeBool = Union[_BoolLike, Sequence[_BoolLike], ndarray]
-
# The signature of take() follows a common theme with its overloads:
# 1. A generic comes in; the same generic comes out
# 2. A scalar comes in; a generic comes out
@@ -70,7 +61,7 @@ def take(
indices: int,
axis: Optional[int] = ...,
out: Optional[ndarray] = ...,
- mode: _Mode = ...,
+ mode: _ModeKind = ...,
) -> _ScalarGenericDT: ...
@overload
def take(
@@ -78,7 +69,7 @@ def take(
indices: int,
axis: Optional[int] = ...,
out: Optional[ndarray] = ...,
- mode: _Mode = ...,
+ mode: _ModeKind = ...,
) -> _ScalarNumpy: ...
@overload
def take(
@@ -86,7 +77,7 @@ def take(
indices: int,
axis: Optional[int] = ...,
out: Optional[ndarray] = ...,
- mode: _Mode = ...,
+ mode: _ModeKind = ...,
) -> _ScalarNumpy: ...
@overload
def take(
@@ -94,7 +85,7 @@ def take(
indices: _ArrayLikeIntOrBool,
axis: Optional[int] = ...,
out: Optional[ndarray] = ...,
- mode: _Mode = ...,
+ mode: _ModeKind = ...,
) -> Union[_ScalarNumpy, ndarray]: ...
def reshape(a: ArrayLike, newshape: _ShapeLike, order: _OrderACF = ...) -> ndarray: ...
@overload
@@ -102,24 +93,24 @@ def choose(
a: _ScalarIntOrBool,
choices: ArrayLike,
out: Optional[ndarray] = ...,
- mode: _Mode = ...,
+ mode: _ModeKind = ...,
) -> _ScalarIntOrBool: ...
@overload
def choose(
- a: _IntOrBool, choices: ArrayLike, out: Optional[ndarray] = ..., mode: _Mode = ...
+ a: Union[_IntLike, _BoolLike], choices: ArrayLike, out: Optional[ndarray] = ..., mode: _ModeKind = ...
) -> Union[integer, bool_]: ...
@overload
def choose(
a: _ArrayLikeIntOrBool,
choices: ArrayLike,
out: Optional[ndarray] = ...,
- mode: _Mode = ...,
+ mode: _ModeKind = ...,
) -> ndarray: ...
def repeat(
a: ArrayLike, repeats: _ArrayLikeIntOrBool, axis: Optional[int] = ...
) -> ndarray: ...
def put(
- a: ndarray, ind: _ArrayLikeIntOrBool, v: ArrayLike, mode: _Mode = ...
+ a: ndarray, ind: _ArrayLikeIntOrBool, v: ArrayLike, mode: _ModeKind = ...
) -> None: ...
def swapaxes(a: ArrayLike, axis1: int, axis2: int) -> ndarray: ...
def transpose(
@@ -184,14 +175,14 @@ def argmin(
def searchsorted(
a: ArrayLike,
v: _Scalar,
- side: _Side = ...,
+ side: _SortSide = ...,
sorter: Optional[_ArrayLikeIntOrBool] = ..., # 1D int array
) -> integer: ...
@overload
def searchsorted(
a: ArrayLike,
v: ArrayLike,
- side: _Side = ...,
+ side: _SortSide = ...,
sorter: Optional[_ArrayLikeIntOrBool] = ..., # 1D int array
) -> ndarray: ...
def resize(a: ArrayLike, new_shape: _ShapeLike) -> ndarray: ...
diff --git a/numpy/core/function_base.pyi b/numpy/core/function_base.pyi
index c6ebbd5f5..705712253 100644
--- a/numpy/core/function_base.pyi
+++ b/numpy/core/function_base.pyi
@@ -1,8 +1,8 @@
import sys
from typing import overload, Tuple, Union, Sequence, Any
-from numpy import ndarray, inexact, _NumberLike
-from numpy.typing import ArrayLike, DtypeLike, _SupportsArray
+from numpy import ndarray, inexact
+from numpy.typing import ArrayLike, DtypeLike, _SupportsArray, _NumberLike
if sys.version_info >= (3, 8):
from typing import SupportsIndex, Literal
diff --git a/numpy/core/multiarray.py b/numpy/core/multiarray.py
index 540d1ea9b..6b335f1a6 100644
--- a/numpy/core/multiarray.py
+++ b/numpy/core/multiarray.py
@@ -1100,7 +1100,7 @@ def putmask(a, mask, values):
Parameters
----------
- a : array_like
+ a : ndarray
Target array.
mask : array_like
Boolean mask array. It has to be the same shape as `a`.
diff --git a/numpy/core/numeric.py b/numpy/core/numeric.py
index a023bf0da..25235f738 100644
--- a/numpy/core/numeric.py
+++ b/numpy/core/numeric.py
@@ -409,7 +409,7 @@ def full_like(a, fill_value, dtype=None, order='K', subok=True, shape=None):
>>> y = np.arange(6, dtype=np.double)
>>> np.full_like(y, 0.1)
- array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1])
+ array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1])
"""
res = empty_like(a, dtype=dtype, order=order, subok=subok, shape=shape)
diff --git a/numpy/core/numeric.pyi b/numpy/core/numeric.pyi
new file mode 100644
index 000000000..19720fbdc
--- /dev/null
+++ b/numpy/core/numeric.pyi
@@ -0,0 +1,117 @@
+from typing import Any, Optional, Union, Sequence, Tuple
+
+from numpy import ndarray, dtype, bool_, _OrderKACF, _OrderCF
+from numpy.typing import ArrayLike, DtypeLike, _ShapeLike
+
+def zeros_like(
+ a: ArrayLike,
+ dtype: DtypeLike = ...,
+ order: _OrderKACF = ...,
+ subok: bool = ...,
+ shape: Optional[Union[int, Sequence[int]]] = ...,
+) -> ndarray: ...
+def ones(
+ shape: _ShapeLike,
+ dtype: DtypeLike = ...,
+ order: _OrderCF = ...,
+ *,
+ like: ArrayLike = ...,
+) -> ndarray: ...
+def ones_like(
+ a: ArrayLike,
+ dtype: DtypeLike = ...,
+ order: _OrderKACF = ...,
+ subok: bool = ...,
+ shape: Optional[_ShapeLike] = ...,
+) -> ndarray: ...
+def empty_like(
+ a: ArrayLike,
+ dtype: DtypeLike = ...,
+ order: _OrderKACF = ...,
+ subok: bool = ...,
+ shape: Optional[_ShapeLike] = ...,
+) -> ndarray: ...
+def full(
+ shape: _ShapeLike,
+ fill_value: Any,
+ dtype: DtypeLike = ...,
+ order: _OrderCF = ...,
+ *,
+ like: ArrayLike = ...,
+) -> ndarray: ...
+def full_like(
+ a: ArrayLike,
+ fill_value: Any,
+ dtype: DtypeLike = ...,
+ order: _OrderKACF = ...,
+ subok: bool = ...,
+ shape: Optional[_ShapeLike] = ...,
+) -> ndarray: ...
+def count_nonzero(
+ a: ArrayLike, axis: Optional[Union[int, Tuple[int], Tuple[int, int]]] = ...
+) -> Union[int, ndarray]: ...
+def isfortran(a: ndarray) -> bool: ...
+def argwhere(a: ArrayLike) -> ndarray: ...
+def flatnonzero(a: ArrayLike) -> ndarray: ...
+
+_CorrelateMode = Literal["valid", "same", "full"]
+
+def correlate(a: ArrayLike, v: ArrayLike, mode: _CorrelateMode = ...) -> ndarray: ...
+def convolve(a: ArrayLike, v: ArrayLike, mode: _CorrelateMode = ...) -> ndarray: ...
+def outer(a: ArrayLike, b: ArrayLike, out: ndarray = ...) -> ndarray: ...
+def tensordot(
+ a: ArrayLike,
+ b: ArrayLike,
+ axes: Union[
+ int, Tuple[int, int], Tuple[Tuple[int, int], ...], Tuple[List[int, int], ...]
+ ] = ...,
+) -> ndarray: ...
+def roll(
+ a: ArrayLike,
+ shift: Union[int, Tuple[int, ...]],
+ axis: Optional[Union[int, Tuple[int, ...]]] = ...,
+) -> ndarray: ...
+def rollaxis(a: ArrayLike, axis: int, start: int = ...) -> ndarray: ...
+def moveaxis(
+ a: ndarray,
+ source: Union[int, Sequence[int]],
+ destination: Union[int, Sequence[int]],
+) -> ndarray: ...
+def cross(
+ a: ArrayLike,
+ b: ArrayLike,
+ axisa: int = ...,
+ axisb: int = ...,
+ axisc: int = ...,
+ axis: Optional[int] = ...,
+) -> ndarray: ...
+def indices(
+ dimensions: Sequence[int], dtype: dtype = ..., sparse: bool = ...
+) -> Union[ndarray, Tuple[ndarray, ...]]: ...
+def fromfunction(
+ function: Callable,
+ shape: Tuple[int, int],
+ *,
+ like: ArrayLike = ...,
+ **kwargs,
+) -> Any: ...
+def isscalar(element: Any) -> bool: ...
+def binary_repr(num: int, width: Optional[int] = ...) -> str: ...
+def base_repr(number: int, base: int = ..., padding: int = ...) -> str: ...
+def identity(n: int, dtype: DtypeLike = ..., *, like: ArrayLike = ...) -> ndarray: ...
+def allclose(
+ a: ArrayLike,
+ b: ArrayLike,
+ rtol: float = ...,
+ atol: float = ...,
+ equal_nan: bool = ...,
+) -> bool: ...
+def isclose(
+ a: ArrayLike,
+ b: ArrayLike,
+ rtol: float = ...,
+ atol: float = ...,
+ equal_nan: bool = ...,
+) -> Union[bool_, ndarray]: ...
+def array_equal(a1: ArrayLike, a2: ArrayLike) -> bool: ...
+def array_equiv(a1: ArrayLike, a2: ArrayLike) -> bool: ...
diff --git a/numpy/core/src/common/array_assign.c b/numpy/core/src/common/array_assign.c
index 67abcae24..c55f6bdb4 100644
--- a/numpy/core/src/common/array_assign.c
+++ b/numpy/core/src/common/array_assign.c
@@ -64,19 +64,22 @@ broadcast_strides(int ndim, npy_intp const *shape,
return 0;
broadcast_error: {
- PyObject *errmsg;
-
- errmsg = PyUnicode_FromFormat("could not broadcast %s from shape ",
- strides_name);
- PyUString_ConcatAndDel(&errmsg,
- build_shape_string(strides_ndim, strides_shape));
- PyUString_ConcatAndDel(&errmsg,
- PyUnicode_FromString(" into shape "));
- PyUString_ConcatAndDel(&errmsg,
- build_shape_string(ndim, shape));
- PyErr_SetObject(PyExc_ValueError, errmsg);
- Py_DECREF(errmsg);
+ PyObject *shape1 = convert_shape_to_string(strides_ndim,
+ strides_shape, "");
+ if (shape1 == NULL) {
+ return -1;
+ }
+ PyObject *shape2 = convert_shape_to_string(ndim, shape, "");
+ if (shape2 == NULL) {
+ Py_DECREF(shape1);
+ return -1;
+ }
+ PyErr_Format(PyExc_ValueError,
+ "could not broadcast %s from shape %S into shape %S",
+ strides_name, shape1, shape2);
+ Py_DECREF(shape1);
+ Py_DECREF(shape2);
return -1;
}
}
diff --git a/numpy/core/src/multiarray/_multiarray_tests.c.src b/numpy/core/src/multiarray/_multiarray_tests.c.src
index 0bf6958cd..5b6b6dc78 100644
--- a/numpy/core/src/multiarray/_multiarray_tests.c.src
+++ b/numpy/core/src/multiarray/_multiarray_tests.c.src
@@ -619,6 +619,71 @@ fromstring_null_term_c_api(PyObject *dummy, PyObject *byte_obj)
}
+/*
+ * Create a custom field dtype from an existing void one (and test some errors).
+ * The dtypes created by this function may be not be usable (or even crash
+ * while using).
+ */
+static PyObject *
+create_custom_field_dtype(PyObject *NPY_UNUSED(mod), PyObject *args)
+{
+ PyArray_Descr *dtype;
+ PyTypeObject *scalar_type;
+ PyTypeObject *original_type = NULL;
+ int error_path;
+
+ if (!PyArg_ParseTuple(args, "O!O!i",
+ &PyArrayDescr_Type, &dtype,
+ &PyType_Type, &scalar_type,
+ &error_path)) {
+ return NULL;
+ }
+ /* check that the result should be more or less valid */
+ if (dtype->type_num != NPY_VOID || dtype->fields == NULL ||
+ !PyDict_CheckExact(dtype->fields) ||
+ PyTuple_Size(dtype->names) != 1 ||
+ !PyDataType_REFCHK(dtype) ||
+ dtype->elsize != sizeof(PyObject *)) {
+ PyErr_SetString(PyExc_ValueError,
+ "Bad dtype passed to test function, must be an object "
+ "containing void with a single field.");
+ return NULL;
+ }
+
+ /* Copy and then appropriate this dtype */
+ original_type = Py_TYPE(dtype);
+ dtype = PyArray_DescrNew(dtype);
+ if (dtype == NULL) {
+ return NULL;
+ }
+
+ Py_INCREF(scalar_type);
+ Py_SETREF(dtype->typeobj, scalar_type);
+ if (error_path == 1) {
+ /* Test that we reject this, if fields was not already set */
+ Py_SETREF(dtype->fields, NULL);
+ }
+ else if (error_path == 2) {
+ /*
+ * Test that we reject this if the type is not set to something that
+ * we are pretty sure can be safely replaced.
+ */
+ Py_SET_TYPE(dtype, scalar_type);
+ }
+ else if (error_path != 0) {
+ PyErr_SetString(PyExc_ValueError,
+ "invalid error argument to test function.");
+ }
+ if (PyArray_RegisterDataType(dtype) < 0) {
+ /* Fix original type in the error_path == 2 case. */
+ Py_SET_TYPE(dtype, original_type);
+ return NULL;
+ }
+ Py_INCREF(dtype);
+ return (PyObject *)dtype;
+}
+
+
/* check no elison for avoided increfs */
static PyObject *
incref_elide(PyObject *dummy, PyObject *args)
@@ -2090,6 +2155,9 @@ static PyMethodDef Multiarray_TestsMethods[] = {
{"fromstring_null_term_c_api",
fromstring_null_term_c_api,
METH_O, NULL},
+ {"create_custom_field_dtype",
+ create_custom_field_dtype,
+ METH_VARARGS, NULL},
{"incref_elide",
incref_elide,
METH_VARARGS, NULL},
diff --git a/numpy/core/src/multiarray/array_coercion.c b/numpy/core/src/multiarray/array_coercion.c
index aae8d5141..64a06d58b 100644
--- a/numpy/core/src/multiarray/array_coercion.c
+++ b/numpy/core/src/multiarray/array_coercion.c
@@ -538,7 +538,7 @@ npy_new_coercion_cache(
cache = _coercion_cache_cache[_coercion_cache_num];
}
else {
- cache = PyObject_MALLOC(sizeof(coercion_cache_obj));
+ cache = PyMem_Malloc(sizeof(coercion_cache_obj));
}
if (cache == NULL) {
PyErr_NoMemory();
@@ -570,7 +570,7 @@ npy_unlink_coercion_cache(coercion_cache_obj *current)
_coercion_cache_num++;
}
else {
- PyObject_FREE(current);
+ PyMem_Free(current);
}
return next;
}
diff --git a/numpy/core/src/multiarray/arrayfunction_override.c b/numpy/core/src/multiarray/arrayfunction_override.c
index 613fe6b3f..8e3bde78f 100644
--- a/numpy/core/src/multiarray/arrayfunction_override.c
+++ b/numpy/core/src/multiarray/arrayfunction_override.c
@@ -388,15 +388,18 @@ array_implement_c_array_function_creation(
PyObject *numpy_module = PyImport_Import(npy_ma_str_numpy);
if (numpy_module == NULL) {
+ Py_DECREF(relevant_args);
return NULL;
}
PyObject *public_api = PyObject_GetAttrString(numpy_module, function_name);
Py_DECREF(numpy_module);
if (public_api == NULL) {
+ Py_DECREF(relevant_args);
return NULL;
}
if (!PyCallable_Check(public_api)) {
+ Py_DECREF(relevant_args);
Py_DECREF(public_api);
return PyErr_Format(PyExc_RuntimeError,
"numpy.%s is not callable.",
@@ -406,6 +409,7 @@ array_implement_c_array_function_creation(
PyObject* result = array_implement_array_function_internal(
public_api, relevant_args, args, kwargs);
+ Py_DECREF(relevant_args);
Py_DECREF(public_api);
return result;
}
diff --git a/numpy/core/src/multiarray/common.c b/numpy/core/src/multiarray/common.c
index 6af71f351..841ed799d 100644
--- a/numpy/core/src/multiarray/common.c
+++ b/numpy/core/src/multiarray/common.c
@@ -233,7 +233,6 @@ NPY_NO_EXPORT PyObject *
convert_shape_to_string(npy_intp n, npy_intp const *vals, char *ending)
{
npy_intp i;
- PyObject *ret, *tmp;
/*
* Negative dimension indicates "newaxis", which can
@@ -245,14 +244,14 @@ convert_shape_to_string(npy_intp n, npy_intp const *vals, char *ending)
if (i == n) {
return PyUnicode_FromFormat("()%s", ending);
}
- else {
- ret = PyUnicode_FromFormat("(%" NPY_INTP_FMT, vals[i++]);
- if (ret == NULL) {
- return NULL;
- }
- }
+ PyObject *ret = PyUnicode_FromFormat("%" NPY_INTP_FMT, vals[i++]);
+ if (ret == NULL) {
+ return NULL;
+ }
for (; i < n; ++i) {
+ PyObject *tmp;
+
if (vals[i] < 0) {
tmp = PyUnicode_FromString(",newaxis");
}
@@ -264,19 +263,19 @@ convert_shape_to_string(npy_intp n, npy_intp const *vals, char *ending)
return NULL;
}
- PyUString_ConcatAndDel(&ret, tmp);
+ Py_SETREF(ret, PyUnicode_Concat(ret, tmp));
+ Py_DECREF(tmp);
if (ret == NULL) {
return NULL;
}
}
if (i == 1) {
- tmp = PyUnicode_FromFormat(",)%s", ending);
+ Py_SETREF(ret, PyUnicode_FromFormat("(%S,)%s", ret, ending));
}
else {
- tmp = PyUnicode_FromFormat(")%s", ending);
+ Py_SETREF(ret, PyUnicode_FromFormat("(%S)%s", ret, ending));
}
- PyUString_ConcatAndDel(&ret, tmp);
return ret;
}
diff --git a/numpy/core/src/multiarray/ctors.c b/numpy/core/src/multiarray/ctors.c
index f543d02d0..b09ec9f8e 100644
--- a/numpy/core/src/multiarray/ctors.c
+++ b/numpy/core/src/multiarray/ctors.c
@@ -610,6 +610,7 @@ PyArray_AssignFromCache(PyArrayObject *self, coercion_cache_obj *cache) {
PyErr_SetString(PyExc_RuntimeError,
"Inconsistent object during array creation? "
"Content of sequences changed (cache not consumed).");
+ npy_free_coercion_cache(cache);
return -1;
}
return 0;
@@ -1492,6 +1493,7 @@ PyArray_FromAny(PyObject *op, PyArray_Descr *newtype, int min_depth,
PyErr_SetString(PyExc_TypeError,
"WRITEBACKIFCOPY used for non-array input.");
Py_DECREF(dtype);
+ npy_free_coercion_cache(cache);
return NULL;
}
@@ -1500,6 +1502,7 @@ PyArray_FromAny(PyObject *op, PyArray_Descr *newtype, int min_depth,
&PyArray_Type, dtype, ndim, dims, NULL, NULL,
flags&NPY_ARRAY_F_CONTIGUOUS, NULL);
if (ret == NULL) {
+ npy_free_coercion_cache(cache);
return NULL;
}
if (cache == NULL) {
diff --git a/numpy/core/src/multiarray/descriptor.c b/numpy/core/src/multiarray/descriptor.c
index 257ededae..24a3507bc 100644
--- a/numpy/core/src/multiarray/descriptor.c
+++ b/numpy/core/src/multiarray/descriptor.c
@@ -2898,14 +2898,13 @@ arraydescr_setstate(PyArray_Descr *self, PyObject *args)
}
if (PyDataType_ISDATETIME(self) && (metadata != NULL)) {
- PyObject *old_metadata, *errmsg;
+ PyObject *old_metadata;
PyArray_DatetimeMetaData temp_dt_data;
if ((! PyTuple_Check(metadata)) || (PyTuple_Size(metadata) != 2)) {
- errmsg = PyUnicode_FromString("Invalid datetime dtype (metadata, c_metadata): ");
- PyUString_ConcatAndDel(&errmsg, PyObject_Repr(metadata));
- PyErr_SetObject(PyExc_ValueError, errmsg);
- Py_DECREF(errmsg);
+ PyErr_Format(PyExc_ValueError,
+ "Invalid datetime dtype (metadata, c_metadata): %R",
+ metadata);
return NULL;
}
diff --git a/numpy/core/src/multiarray/dtypemeta.c b/numpy/core/src/multiarray/dtypemeta.c
index 109f4a225..af14bb7e5 100644
--- a/numpy/core/src/multiarray/dtypemeta.c
+++ b/numpy/core/src/multiarray/dtypemeta.c
@@ -467,10 +467,28 @@ object_common_dtype(
NPY_NO_EXPORT int
dtypemeta_wrap_legacy_descriptor(PyArray_Descr *descr)
{
- if (Py_TYPE(descr) != &PyArrayDescr_Type) {
+ int has_type_set = Py_TYPE(descr) == &PyArrayDescr_Type;
+
+ if (!has_type_set) {
+ /* Accept if the type was filled in from an existing builtin dtype */
+ for (int i = 0; i < NPY_NTYPES; i++) {
+ PyArray_Descr *builtin = PyArray_DescrFromType(i);
+ has_type_set = Py_TYPE(descr) == Py_TYPE(builtin);
+ Py_DECREF(builtin);
+ if (has_type_set) {
+ break;
+ }
+ }
+ }
+ if (!has_type_set) {
PyErr_Format(PyExc_RuntimeError,
"During creation/wrapping of legacy DType, the original class "
- "was not PyArrayDescr_Type (it is replaced in this step).");
+ "was not of PyArrayDescr_Type (it is replaced in this step). "
+ "The extension creating a custom DType for type %S must be "
+ "modified to ensure `Py_TYPE(descr) == &PyArrayDescr_Type` or "
+ "that of an existing dtype (with the assumption it is just "
+ "copied over and can be replaced).",
+ descr->typeobj, Py_TYPE(descr));
return -1;
}
diff --git a/numpy/core/src/multiarray/mapping.c b/numpy/core/src/multiarray/mapping.c
index 0998a6b49..cb5c3823d 100644
--- a/numpy/core/src/multiarray/mapping.c
+++ b/numpy/core/src/multiarray/mapping.c
@@ -1418,10 +1418,7 @@ _get_field_view(PyArrayObject *arr, PyObject *ind, PyArrayObject **view)
return 0;
}
else if (tup == NULL){
- PyObject *errmsg = PyUnicode_FromString("no field of name ");
- PyUString_Concat(&errmsg, ind);
- PyErr_SetObject(PyExc_ValueError, errmsg);
- Py_DECREF(errmsg);
+ PyErr_Format(PyExc_ValueError, "no field of name %S", ind);
return 0;
}
if (_unpack_field(tup, &fieldtype, &offset) < 0) {
@@ -2345,7 +2342,6 @@ mapiter_fill_info(PyArrayMapIterObject *mit, npy_index_info *indices,
int consec_status = -1;
int axis, broadcast_axis;
npy_intp dimension;
- PyObject *errmsg, *tmp;
for (i = 0; i < mit->nd_fancy; i++) {
mit->dimensions[i] = 1;
@@ -2433,35 +2429,38 @@ mapiter_fill_info(PyArrayMapIterObject *mit, npy_index_info *indices,
return 0;
- broadcast_error:
+broadcast_error: ; // Declarations cannot follow labels, add empty statement.
/*
* Attempt to set a meaningful exception. Could also find out
* if a boolean index was converted.
*/
- errmsg = PyUnicode_FromString("shape mismatch: indexing arrays could not "
- "be broadcast together with shapes ");
+ PyObject *errmsg = PyUnicode_FromString("");
if (errmsg == NULL) {
return -1;
}
-
for (i = 0; i < index_num; i++) {
if (!(indices[i].type & HAS_FANCY)) {
continue;
}
- tmp = convert_shape_to_string(
- PyArray_NDIM((PyArrayObject *)indices[i].object),
- PyArray_SHAPE((PyArrayObject *)indices[i].object),
- " ");
+
+ int ndim = PyArray_NDIM((PyArrayObject *)indices[i].object);
+ npy_intp *shape = PyArray_SHAPE((PyArrayObject *)indices[i].object);
+ PyObject *tmp = convert_shape_to_string(ndim, shape, " ");
if (tmp == NULL) {
+ Py_DECREF(errmsg);
return -1;
}
- PyUString_ConcatAndDel(&errmsg, tmp);
+
+ Py_SETREF(errmsg, PyUnicode_Concat(errmsg, tmp));
+ Py_DECREF(tmp);
if (errmsg == NULL) {
return -1;
}
}
- PyErr_SetObject(PyExc_IndexError, errmsg);
+ PyErr_Format(PyExc_IndexError,
+ "shape mismatch: indexing arrays could not "
+ "be broadcast together with shapes %S", errmsg);
Py_DECREF(errmsg);
return -1;
}
@@ -2653,7 +2652,6 @@ PyArray_MapIterNew(npy_index_info *indices , int index_num, int index_type,
npy_uint32 extra_op_flags, PyArrayObject *extra_op,
PyArray_Descr *extra_op_dtype)
{
- PyObject *errmsg, *tmp;
/* For shape reporting on error */
PyArrayObject *original_extra_op = extra_op;
@@ -3183,45 +3181,29 @@ PyArray_MapIterNew(npy_index_info *indices , int index_num, int index_type,
goto finish;
broadcast_error:
- errmsg = PyUnicode_FromString("shape mismatch: value array "
- "of shape ");
- if (errmsg == NULL) {
- goto finish;
- }
-
/* Report the shape of the original array if it exists */
if (original_extra_op == NULL) {
original_extra_op = extra_op;
}
- tmp = convert_shape_to_string(PyArray_NDIM(original_extra_op),
- PyArray_DIMS(original_extra_op), " ");
- if (tmp == NULL) {
- goto finish;
- }
- PyUString_ConcatAndDel(&errmsg, tmp);
- if (errmsg == NULL) {
+ int extra_ndim = PyArray_NDIM(original_extra_op);
+ npy_intp *extra_dims = PyArray_DIMS(original_extra_op);
+ PyObject *shape1 = convert_shape_to_string(extra_ndim, extra_dims, " ");
+ if (shape1 == NULL) {
goto finish;
}
- tmp = PyUnicode_FromString("could not be broadcast to indexing "
- "result of shape ");
- PyUString_ConcatAndDel(&errmsg, tmp);
- if (errmsg == NULL) {
+ PyObject *shape2 = convert_shape_to_string(mit->nd, mit->dimensions, "");
+ if (shape2 == NULL)
+ Py_DECREF(shape1);
goto finish;
- }
- tmp = convert_shape_to_string(mit->nd, mit->dimensions, "");
- if (tmp == NULL) {
- goto finish;
- }
- PyUString_ConcatAndDel(&errmsg, tmp);
- if (errmsg == NULL) {
- goto finish;
- }
+ PyErr_Format(PyExc_ValueError,
+ "shape mismatch: value array of shape %S could not be broadcast "
+ "to indexing result of shape %S", shape1, shape2);
- PyErr_SetObject(PyExc_ValueError, errmsg);
- Py_DECREF(errmsg);
+ Py_DECREF(shape1);
+ Py_DECREF(shape2);
finish:
Py_XDECREF(extra_op);
@@ -3320,7 +3302,7 @@ PyArray_MapIterArrayCopyIfOverlap(PyArrayObject * a, PyObject * index,
Py_XDECREF(a_copy);
Py_XDECREF(subspace);
Py_XDECREF((PyObject *)mit);
- for (i=0; i < index_num; i++) {
+ for (i = 0; i < index_num; i++) {
Py_XDECREF(indices[i].object);
}
return NULL;
diff --git a/numpy/core/src/multiarray/multiarraymodule.c b/numpy/core/src/multiarray/multiarraymodule.c
index ff2b796d2..1aad70dc6 100644
--- a/numpy/core/src/multiarray/multiarraymodule.c
+++ b/numpy/core/src/multiarray/multiarraymodule.c
@@ -2296,6 +2296,7 @@ array_fromiter(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *keywds)
array_function_result = array_implement_c_array_function_creation(
"fromiter", args, keywds);
if (array_function_result != Py_NotImplemented) {
+ Py_DECREF(descr);
return array_function_result;
}
@@ -2942,6 +2943,7 @@ array_arange(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kws) {
array_function_result = array_implement_c_array_function_creation(
"arange", args, kws);
if (array_function_result != Py_NotImplemented) {
+ Py_XDECREF(typecode);
return array_function_result;
}
diff --git a/numpy/core/src/multiarray/nditer_constr.c b/numpy/core/src/multiarray/nditer_constr.c
index 4bc6d2ca1..b379a28ac 100644
--- a/numpy/core/src/multiarray/nditer_constr.c
+++ b/numpy/core/src/multiarray/nditer_constr.c
@@ -1750,73 +1750,70 @@ npyiter_fill_axisdata(NpyIter *iter, npy_uint32 flags, npyiter_opitflags *op_itf
return 1;
broadcast_error: {
- PyObject *errmsg, *tmp;
npy_intp remdims[NPY_MAXDIMS];
- char *tmpstr;
if (op_axes == NULL) {
- errmsg = PyUnicode_FromString("operands could not be broadcast "
- "together with shapes ");
- if (errmsg == NULL) {
+ PyObject *shape1 = PyUnicode_FromString("");
+ if (shape1 == NULL) {
return 0;
}
for (iop = 0; iop < nop; ++iop) {
if (op[iop] != NULL) {
- tmp = convert_shape_to_string(PyArray_NDIM(op[iop]),
- PyArray_DIMS(op[iop]),
- " ");
+ int ndims = PyArray_NDIM(op[iop]);
+ npy_intp *dims = PyArray_DIMS(op[iop]);
+ PyObject *tmp = convert_shape_to_string(ndims, dims, " ");
if (tmp == NULL) {
- Py_DECREF(errmsg);
+ Py_DECREF(shape1);
return 0;
}
- PyUString_ConcatAndDel(&errmsg, tmp);
- if (errmsg == NULL) {
+ Py_SETREF(shape1, PyUnicode_Concat(shape1, tmp));
+ Py_DECREF(tmp);
+ if (shape1 == NULL) {
return 0;
}
}
}
- if (itershape != NULL) {
- tmp = PyUnicode_FromString("and requested shape ");
- if (tmp == NULL) {
- Py_DECREF(errmsg);
- return 0;
- }
- PyUString_ConcatAndDel(&errmsg, tmp);
- if (errmsg == NULL) {
- return 0;
- }
-
- tmp = convert_shape_to_string(ndim, itershape, "");
- if (tmp == NULL) {
- Py_DECREF(errmsg);
- return 0;
- }
- PyUString_ConcatAndDel(&errmsg, tmp);
- if (errmsg == NULL) {
+ if (itershape == NULL) {
+ PyErr_Format(PyExc_ValueError,
+ "operands could not be broadcast together with "
+ "shapes %S", shape1);
+ Py_DECREF(shape1);
+ return 0;
+ }
+ else {
+ PyObject *shape2 = convert_shape_to_string(ndim, itershape, "");
+ if (shape2 == NULL) {
+ Py_DECREF(shape1);
return 0;
}
-
+ PyErr_Format(PyExc_ValueError,
+ "operands could not be broadcast together with "
+ "shapes %S and requested shape %S", shape1, shape2);
+ Py_DECREF(shape1);
+ Py_DECREF(shape2);
+ return 0;
}
- PyErr_SetObject(PyExc_ValueError, errmsg);
- Py_DECREF(errmsg);
}
else {
- errmsg = PyUnicode_FromString("operands could not be broadcast "
- "together with remapped shapes "
- "[original->remapped]: ");
+ PyObject *shape1 = PyUnicode_FromString("");
+ if (shape1 == NULL) {
+ return 0;
+ }
for (iop = 0; iop < nop; ++iop) {
if (op[iop] != NULL) {
int *axes = op_axes[iop];
+ int ndims = PyArray_NDIM(op[iop]);
+ npy_intp *dims = PyArray_DIMS(op[iop]);
+ char *tmpstr = (axes == NULL) ? " " : "->";
- tmpstr = (axes == NULL) ? " " : "->";
- tmp = convert_shape_to_string(PyArray_NDIM(op[iop]),
- PyArray_DIMS(op[iop]),
- tmpstr);
+ PyObject *tmp = convert_shape_to_string(ndims, dims, tmpstr);
if (tmp == NULL) {
+ Py_DECREF(shape1);
return 0;
}
- PyUString_ConcatAndDel(&errmsg, tmp);
- if (errmsg == NULL) {
+ Py_SETREF(shape1, PyUnicode_Concat(shape1, tmp));
+ Py_DECREF(tmp);
+ if (shape1 == NULL) {
return 0;
}
@@ -1831,80 +1828,83 @@ broadcast_error: {
remdims[idim] = -1;
}
}
- tmp = convert_shape_to_string(ndim, remdims, " ");
+ PyObject *tmp = convert_shape_to_string(ndim, remdims, " ");
if (tmp == NULL) {
+ Py_DECREF(shape1);
return 0;
}
- PyUString_ConcatAndDel(&errmsg, tmp);
- if (errmsg == NULL) {
+ Py_SETREF(shape1, PyUnicode_Concat(shape1, tmp));
+ Py_DECREF(tmp);
+ if (shape1 == NULL) {
return 0;
}
}
}
}
- if (itershape != NULL) {
- tmp = PyUnicode_FromString("and requested shape ");
- if (tmp == NULL) {
- Py_DECREF(errmsg);
- return 0;
- }
- PyUString_ConcatAndDel(&errmsg, tmp);
- if (errmsg == NULL) {
- return 0;
- }
-
- tmp = convert_shape_to_string(ndim, itershape, "");
- if (tmp == NULL) {
- Py_DECREF(errmsg);
- return 0;
- }
- PyUString_ConcatAndDel(&errmsg, tmp);
- if (errmsg == NULL) {
+ if (itershape == NULL) {
+ PyErr_Format(PyExc_ValueError,
+ "operands could not be broadcast together with "
+ "remapped shapes [original->remapped]: %S", shape1);
+ Py_DECREF(shape1);
+ return 0;
+ }
+ else {
+ PyObject *shape2 = convert_shape_to_string(ndim, itershape, "");
+ if (shape2 == NULL) {
+ Py_DECREF(shape1);
return 0;
}
-
+ PyErr_Format(PyExc_ValueError,
+ "operands could not be broadcast together with "
+ "remapped shapes [original->remapped]: %S and "
+ "requested shape %S", shape1, shape2);
+ Py_DECREF(shape1);
+ Py_DECREF(shape2);
+ return 0;
}
- PyErr_SetObject(PyExc_ValueError, errmsg);
- Py_DECREF(errmsg);
}
-
- return 0;
}
operand_different_than_broadcast: {
- npy_intp remdims[NPY_MAXDIMS];
- PyObject *errmsg, *tmp;
-
- /* Start of error message */
- if (op_flags[iop] & NPY_ITER_READONLY) {
- errmsg = PyUnicode_FromString("non-broadcastable operand "
- "with shape ");
- }
- else {
- errmsg = PyUnicode_FromString("non-broadcastable output "
- "operand with shape ");
- }
- if (errmsg == NULL) {
+ /* operand shape */
+ int ndims = PyArray_NDIM(op[iop]);
+ npy_intp *dims = PyArray_DIMS(op[iop]);
+ PyObject *shape1 = convert_shape_to_string(ndims, dims, "");
+ if (shape1 == NULL) {
return 0;
}
- /* Operand shape */
- tmp = convert_shape_to_string(PyArray_NDIM(op[iop]),
- PyArray_DIMS(op[iop]), "");
- if (tmp == NULL) {
+ /* Broadcast shape */
+ PyObject *shape2 = convert_shape_to_string(ndim, broadcast_shape, "");
+ if (shape2 == NULL) {
+ Py_DECREF(shape1);
return 0;
}
- PyUString_ConcatAndDel(&errmsg, tmp);
- if (errmsg == NULL) {
+
+ if (op_axes == NULL || op_axes[iop] == NULL) {
+ /* operand shape not remapped */
+
+ if (op_flags[iop] & NPY_ITER_READONLY) {
+ PyErr_Format(PyExc_ValueError,
+ "non-broadcastable operand with shape %S doesn't "
+ "match the broadcast shape %S", shape1, shape2);
+ }
+ else {
+ PyErr_Format(PyExc_ValueError,
+ "non-broadcastable output operand with shape %S doesn't "
+ "match the broadcast shape %S", shape1, shape2);
+ }
+ Py_DECREF(shape1);
+ Py_DECREF(shape2);
return 0;
}
- /* Remapped operand shape */
- if (op_axes != NULL && op_axes[iop] != NULL) {
- int *axes = op_axes[iop];
+ else {
+ /* operand shape remapped */
+ npy_intp remdims[NPY_MAXDIMS];
+ int *axes = op_axes[iop];
for (idim = 0; idim < ndim; ++idim) {
- npy_intp i = axes[ndim-idim-1];
-
+ npy_intp i = axes[ndim - idim - 1];
if (i >= 0 && i < PyArray_NDIM(op[iop])) {
remdims[idim] = PyArray_DIM(op[iop], i);
}
@@ -1913,48 +1913,30 @@ operand_different_than_broadcast: {
}
}
- tmp = PyUnicode_FromString(" [remapped to ");
- if (tmp == NULL) {
- return 0;
- }
- PyUString_ConcatAndDel(&errmsg, tmp);
- if (errmsg == NULL) {
+ PyObject *shape3 = convert_shape_to_string(ndim, remdims, "");
+ if (shape3 == NULL) {
+ Py_DECREF(shape1);
+ Py_DECREF(shape2);
return 0;
}
- tmp = convert_shape_to_string(ndim, remdims, "]");
- if (tmp == NULL) {
- return 0;
+ if (op_flags[iop] & NPY_ITER_READONLY) {
+ PyErr_Format(PyExc_ValueError,
+ "non-broadcastable operand with shape %S "
+ "[remapped to %S] doesn't match the broadcast shape %S",
+ shape1, shape3, shape2);
}
- PyUString_ConcatAndDel(&errmsg, tmp);
- if (errmsg == NULL) {
- return 0;
+ else {
+ PyErr_Format(PyExc_ValueError,
+ "non-broadcastable output operand with shape %S "
+ "[remapped to %S] doesn't match the broadcast shape %S",
+ shape1, shape3, shape2);
}
- }
-
- tmp = PyUnicode_FromString(" doesn't match the broadcast shape ");
- if (tmp == NULL) {
- return 0;
- }
- PyUString_ConcatAndDel(&errmsg, tmp);
- if (errmsg == NULL) {
+ Py_DECREF(shape1);
+ Py_DECREF(shape2);
+ Py_DECREF(shape3);
return 0;
}
-
- /* Broadcast shape */
- tmp = convert_shape_to_string(ndim, broadcast_shape, "");
- if (tmp == NULL) {
- return 0;
- }
- PyUString_ConcatAndDel(&errmsg, tmp);
- if (errmsg == NULL) {
- return 0;
- }
-
- PyErr_SetObject(PyExc_ValueError, errmsg);
- Py_DECREF(errmsg);
-
- return 0;
}
}
diff --git a/numpy/core/src/multiarray/scalartypes.c.src b/numpy/core/src/multiarray/scalartypes.c.src
index 74ee260af..1a50927a8 100644
--- a/numpy/core/src/multiarray/scalartypes.c.src
+++ b/numpy/core/src/multiarray/scalartypes.c.src
@@ -660,14 +660,12 @@ timedeltatype_str(PyObject *self)
* Can't use "%lld" if HAVE_LONG_LONG is not defined
*/
#if defined(HAVE_LONG_LONG)
- ret = PyUnicode_FromFormat("%lld ",
- (long long)(scal->obval * scal->obmeta.num));
+ ret = PyUnicode_FromFormat("%lld %s",
+ (long long)(scal->obval * scal->obmeta.num), basestr);
#else
- ret = PyUnicode_FromFormat("%ld ",
- (long)(scal->obval * scal->obmeta.num));
+ ret = PyUnicode_FromFormat("%ld %s",
+ (long)(scal->obval * scal->obmeta.num), basestr);
#endif
- PyUString_ConcatAndDel(&ret,
- PyUnicode_FromString(basestr));
}
return ret;
@@ -886,7 +884,7 @@ static PyObject *
static PyObject *
c@name@type_@kind@(PyObject *self)
{
- PyObject *rstr, *istr, *ret;
+ PyObject *rstr, *istr;
npy_c@name@ val = PyArrayScalar_VAL(self, C@Name@);
TrimMode trim = TrimMode_DptZeros;
@@ -899,16 +897,13 @@ c@name@type_@kind@(PyObject *self)
if (istr == NULL) {
return NULL;
}
-
- PyUString_ConcatAndDel(&istr, PyUnicode_FromString("j"));
- return istr;
+ PyObject *ret = PyUnicode_FromFormat("%Sj", istr);
+ Py_DECREF(istr);
+ return ret;
}
if (npy_isfinite(val.real)) {
rstr = @name@type_@kind@_either(val.real, trim, trim, 0);
- if (rstr == NULL) {
- return NULL;
- }
}
else if (npy_isnan(val.real)) {
rstr = PyUnicode_FromString("nan");
@@ -919,12 +914,12 @@ c@name@type_@kind@(PyObject *self)
else {
rstr = PyUnicode_FromString("-inf");
}
+ if (rstr == NULL) {
+ return NULL;
+ }
if (npy_isfinite(val.imag)) {
istr = @name@type_@kind@_either(val.imag, trim, trim, 1);
- if (istr == NULL) {
- return NULL;
- }
}
else if (npy_isnan(val.imag)) {
istr = PyUnicode_FromString("+nan");
@@ -935,11 +930,14 @@ c@name@type_@kind@(PyObject *self)
else {
istr = PyUnicode_FromString("-inf");
}
+ if (istr == NULL) {
+ Py_DECREF(rstr);
+ return NULL;
+ }
- ret = PyUnicode_FromString("(");
- PyUString_ConcatAndDel(&ret, rstr);
- PyUString_ConcatAndDel(&ret, istr);
- PyUString_ConcatAndDel(&ret, PyUnicode_FromString("j)"));
+ PyObject *ret = PyUnicode_FromFormat("(%S%Sj)", rstr, istr);
+ Py_DECREF(rstr);
+ Py_DECREF(istr);
return ret;
}
diff --git a/numpy/core/src/multiarray/shape.c b/numpy/core/src/multiarray/shape.c
index 1a38fe956..02c349759 100644
--- a/numpy/core/src/multiarray/shape.c
+++ b/numpy/core/src/multiarray/shape.c
@@ -458,14 +458,12 @@ _attempt_nocopy_reshape(PyArrayObject *self, int newnd, const npy_intp *newdims,
static void
raise_reshape_size_mismatch(PyArray_Dims *newshape, PyArrayObject *arr)
{
- PyObject *msg = PyUnicode_FromFormat("cannot reshape array of size %zd "
- "into shape ", PyArray_SIZE(arr));
PyObject *tmp = convert_shape_to_string(newshape->len, newshape->ptr, "");
-
- PyUString_ConcatAndDel(&msg, tmp);
- if (msg != NULL) {
- PyErr_SetObject(PyExc_ValueError, msg);
- Py_DECREF(msg);
+ if (tmp != NULL) {
+ PyErr_Format(PyExc_ValueError,
+ "cannot reshape array of size %zd into shape %S",
+ PyArray_SIZE(arr), tmp);
+ Py_DECREF(tmp);
}
}
@@ -979,55 +977,6 @@ PyArray_Flatten(PyArrayObject *a, NPY_ORDER order)
return (PyObject *)ret;
}
-/* See shape.h for parameters documentation */
-NPY_NO_EXPORT PyObject *
-build_shape_string(npy_intp n, npy_intp const *vals)
-{
- npy_intp i;
- PyObject *ret, *tmp;
-
- /*
- * Negative dimension indicates "newaxis", which can
- * be discarded for printing if it's a leading dimension.
- * Find the first non-"newaxis" dimension.
- */
- i = 0;
- while (i < n && vals[i] < 0) {
- ++i;
- }
-
- if (i == n) {
- return PyUnicode_FromFormat("()");
- }
- else {
- ret = PyUnicode_FromFormat("(%" NPY_INTP_FMT, vals[i++]);
- if (ret == NULL) {
- return NULL;
- }
- }
-
- for (; i < n; ++i) {
- if (vals[i] < 0) {
- tmp = PyUnicode_FromString(",newaxis");
- }
- else {
- tmp = PyUnicode_FromFormat(",%" NPY_INTP_FMT, vals[i]);
- }
- if (tmp == NULL) {
- Py_DECREF(ret);
- return NULL;
- }
-
- PyUString_ConcatAndDel(&ret, tmp);
- if (ret == NULL) {
- return NULL;
- }
- }
-
- tmp = PyUnicode_FromFormat(")");
- PyUString_ConcatAndDel(&ret, tmp);
- return ret;
-}
/*NUMPY_API
*
diff --git a/numpy/core/src/multiarray/shape.h b/numpy/core/src/multiarray/shape.h
index d25292556..875b5430f 100644
--- a/numpy/core/src/multiarray/shape.h
+++ b/numpy/core/src/multiarray/shape.h
@@ -2,13 +2,6 @@
#define _NPY_ARRAY_SHAPE_H_
/*
- * Builds a string representation of the shape given in 'vals'.
- * A negative value in 'vals' gets interpreted as newaxis.
- */
-NPY_NO_EXPORT PyObject *
-build_shape_string(npy_intp n, npy_intp const *vals);
-
-/*
* Creates a sorted stride perm matching the KEEPORDER behavior
* of the NpyIter object. Because this operates based on multiple
* input strides, the 'stride' member of the npy_stride_sort_item
diff --git a/numpy/core/src/multiarray/usertypes.c b/numpy/core/src/multiarray/usertypes.c
index 3727567e0..f8bb5ece7 100644
--- a/numpy/core/src/multiarray/usertypes.c
+++ b/numpy/core/src/multiarray/usertypes.c
@@ -196,7 +196,7 @@ PyArray_RegisterDataType(PyArray_Descr *descr)
}
}
typenum = NPY_USERDEF + NPY_NUMUSERTYPES;
- descr->type_num = typenum;
+ descr->type_num = -1;
if (PyDataType_ISUNSIZED(descr)) {
PyErr_SetString(PyExc_ValueError, "cannot register a" \
"flexible data-type");
@@ -215,18 +215,31 @@ PyArray_RegisterDataType(PyArray_Descr *descr)
" is missing.");
return -1;
}
- if (descr->flags & (NPY_ITEM_IS_POINTER | NPY_ITEM_REFCOUNT)) {
- PyErr_SetString(PyExc_ValueError,
- "Legacy user dtypes referencing python objects or generally "
- "allocated memory are unsupported. "
- "If you see this error in an existing, working code base, "
- "please contact the NumPy developers.");
- return -1;
- }
if (descr->typeobj == NULL) {
PyErr_SetString(PyExc_ValueError, "missing typeobject");
return -1;
}
+ if (descr->flags & (NPY_ITEM_IS_POINTER | NPY_ITEM_REFCOUNT)) {
+ /*
+ * User dtype can't actually do reference counting, however, there
+ * are existing hacks (e.g. xpress), which use a structured one:
+ * dtype((xpress.var, [('variable', 'O')]))
+ * so we have to support this. But such a structure must be constant
+ * (i.e. fixed at registration time, this is the case for `xpress`).
+ */
+ if (descr->names == NULL || descr->fields == NULL ||
+ !PyDict_CheckExact(descr->fields)) {
+ PyErr_Format(PyExc_ValueError,
+ "Failed to register dtype for %S: Legacy user dtypes "
+ "using `NPY_ITEM_IS_POINTER` or `NPY_ITEM_REFCOUNT` are"
+ "unsupported. It is possible to create such a dtype only "
+ "if it is a structured dtype with names and fields "
+ "hardcoded at registration time.\n"
+ "Please contact the NumPy developers if this used to work "
+ "but now fails.", descr->typeobj);
+ return -1;
+ }
+ }
if (test_deprecated_arrfuncs_members(f) < 0) {
return -1;
@@ -243,7 +256,7 @@ PyArray_RegisterDataType(PyArray_Descr *descr)
if (dtypemeta_wrap_legacy_descriptor(descr) < 0) {
return -1;
}
-
+ descr->type_num = typenum;
return typenum;
}
@@ -303,7 +316,7 @@ PyArray_RegisterCanCast(PyArray_Descr *descr, int totype,
if (!PyTypeNum_ISUSERDEF(descr->type_num) &&
!PyTypeNum_ISUSERDEF(totype)) {
PyErr_SetString(PyExc_ValueError,
- "At least one of the types provided to"
+ "At least one of the types provided to "
"RegisterCanCast must be user-defined.");
return -1;
}
diff --git a/numpy/core/src/umath/ufunc_object.c b/numpy/core/src/umath/ufunc_object.c
index f693eb5c2..8660ee413 100644
--- a/numpy/core/src/umath/ufunc_object.c
+++ b/numpy/core/src/umath/ufunc_object.c
@@ -5977,6 +5977,7 @@ _typecharfromnum(int num) {
return ret;
}
+
static PyObject *
ufunc_get_doc(PyUFuncObject *ufunc)
{
@@ -5997,18 +5998,18 @@ ufunc_get_doc(PyUFuncObject *ufunc)
* introspection on name and nin + nout to automate the first part
* of it the doc string shouldn't need the calling convention
*/
- doc = PyObject_CallFunctionObjArgs(
- _sig_formatter, (PyObject *)ufunc, NULL);
+ doc = PyObject_CallFunctionObjArgs(_sig_formatter,
+ (PyObject *)ufunc, NULL);
if (doc == NULL) {
return NULL;
}
if (ufunc->doc != NULL) {
- PyUString_ConcatAndDel(&doc,
- PyUnicode_FromFormat("\n\n%s", ufunc->doc));
+ Py_SETREF(doc, PyUnicode_FromFormat("%S\n\n%s", doc, ufunc->doc));
}
return doc;
}
+
static PyObject *
ufunc_get_nin(PyUFuncObject *ufunc)
{
diff --git a/numpy/core/tests/test_array_coercion.py b/numpy/core/tests/test_array_coercion.py
index ce66589ca..78def9360 100644
--- a/numpy/core/tests/test_array_coercion.py
+++ b/numpy/core/tests/test_array_coercion.py
@@ -674,3 +674,18 @@ class TestArrayLikes:
assert arr[()] is ArrayLike
arr = np.array([ArrayLike])
assert arr[0] is ArrayLike
+
+ @pytest.mark.skipif(
+ np.dtype(np.intp).itemsize < 8, reason="Needs 64bit platform")
+ def test_too_large_array_error_paths(self):
+ """Test the error paths, including for memory leaks"""
+ arr = np.array(0, dtype="uint8")
+ # Guarantees that a contiguous copy won't work:
+ arr = np.broadcast_to(arr, 2**62)
+
+ for i in range(5):
+ # repeat, to ensure caching cannot have an effect:
+ with pytest.raises(MemoryError):
+ np.array(arr)
+ with pytest.raises(MemoryError):
+ np.array([arr])
diff --git a/numpy/core/tests/test_dtype.py b/numpy/core/tests/test_dtype.py
index 2e2b0dbe2..898ceebcd 100644
--- a/numpy/core/tests/test_dtype.py
+++ b/numpy/core/tests/test_dtype.py
@@ -6,6 +6,7 @@ import gc
import numpy as np
from numpy.core._rational_tests import rational
+from numpy.core._multiarray_tests import create_custom_field_dtype
from numpy.testing import (
assert_, assert_equal, assert_array_equal, assert_raises, HAS_REFCOUNT)
from numpy.compat import pickle
@@ -1338,3 +1339,39 @@ class TestFromCTypes:
pair_type = np.dtype('{},{}'.format(*pair))
expected = np.dtype([('f0', pair[0]), ('f1', pair[1])])
assert_equal(pair_type, expected)
+
+
+class TestUserDType:
+ @pytest.mark.leaks_references(reason="dynamically creates custom dtype.")
+ def test_custom_structured_dtype(self):
+ class mytype:
+ pass
+
+ blueprint = np.dtype([("field", object)])
+ dt = create_custom_field_dtype(blueprint, mytype, 0)
+ assert dt.type == mytype
+ # We cannot (currently) *create* this dtype with `np.dtype` because
+ # mytype does not inherit from `np.generic`. This seems like an
+ # unnecessary restriction, but one that has been around forever:
+ assert np.dtype(mytype) == np.dtype("O")
+
+ with pytest.raises(RuntimeError):
+ # Registering a second time should fail
+ create_custom_field_dtype(blueprint, mytype, 0)
+
+ def test_custom_structured_dtype_errors(self):
+ class mytype:
+ pass
+
+ blueprint = np.dtype([("field", object)])
+
+ with pytest.raises(ValueError):
+ # Tests what happens if fields are unset during creation
+ # which is currently rejected due to the containing object
+ # (see PyArray_RegisterDataType).
+ create_custom_field_dtype(blueprint, mytype, 1)
+
+ with pytest.raises(RuntimeError):
+ # Tests that a dtype must have its type field set up to np.dtype
+ # or in this case a builtin instance.
+ create_custom_field_dtype(blueprint, mytype, 2)
diff --git a/numpy/lib/arraysetops.py b/numpy/lib/arraysetops.py
index 9464692e0..6c6c1ff80 100644
--- a/numpy/lib/arraysetops.py
+++ b/numpy/lib/arraysetops.py
@@ -93,7 +93,7 @@ def ediff1d(ary, to_end=None, to_begin=None):
else:
to_begin = np.asanyarray(to_begin)
if not np.can_cast(to_begin, dtype_req, casting="same_kind"):
- raise TypeError("dtype of `to_end` must be compatible "
+ raise TypeError("dtype of `to_begin` must be compatible "
"with input `ary` under the `same_kind` rule.")
to_begin = to_begin.ravel()
diff --git a/numpy/lib/format.py b/numpy/lib/format.py
index afbd3784a..5d951e262 100644
--- a/numpy/lib/format.py
+++ b/numpy/lib/format.py
@@ -746,7 +746,7 @@ def read_array(fp, allow_pickle=False, pickle_kwargs=None):
# Friendlier error message
raise UnicodeError("Unpickling a python object failed: %r\n"
"You may need to pass the encoding= option "
- "to numpy.load" % (err,))
+ "to numpy.load" % (err,)) from err
else:
if isfileobj(fp):
# We can use the fast fromfile() function.
diff --git a/numpy/lib/tests/test_arraysetops.py b/numpy/lib/tests/test_arraysetops.py
index 81ba789e3..847e6cb8a 100644
--- a/numpy/lib/tests/test_arraysetops.py
+++ b/numpy/lib/tests/test_arraysetops.py
@@ -125,32 +125,36 @@ class TestSetOps:
assert_array_equal([7, 1], ediff1d(two_elem, to_begin=7))
assert_array_equal([5, 6, 1], ediff1d(two_elem, to_begin=[5, 6]))
- @pytest.mark.parametrize("ary, prepend, append", [
+ @pytest.mark.parametrize("ary, prepend, append, expected", [
# should fail because trying to cast
# np.nan standard floating point value
# into an integer array:
(np.array([1, 2, 3], dtype=np.int64),
None,
- np.nan),
+ np.nan,
+ 'to_end'),
# should fail because attempting
# to downcast to int type:
(np.array([1, 2, 3], dtype=np.int64),
np.array([5, 7, 2], dtype=np.float32),
- None),
+ None,
+ 'to_begin'),
# should fail because attempting to cast
# two special floating point values
- # to integers (on both sides of ary):
+ # to integers (on both sides of ary),
+ # `to_begin` is in the error message as the impl checks this first:
(np.array([1., 3., 9.], dtype=np.int8),
np.nan,
- np.nan),
+ np.nan,
+ 'to_begin'),
])
- def test_ediff1d_forbidden_type_casts(self, ary, prepend, append):
+ def test_ediff1d_forbidden_type_casts(self, ary, prepend, append, expected):
# verify resolution of gh-11490
# specifically, raise an appropriate
# Exception when attempting to append or
# prepend with an incompatible type
- msg = 'must be compatible'
+ msg = 'dtype of `{}` must be compatible'.format(expected)
with assert_raises_regex(TypeError, msg):
ediff1d(ary=ary,
to_end=append,
diff --git a/numpy/ma/core.py b/numpy/ma/core.py
index b5371f51a..313d9e0b9 100644
--- a/numpy/ma/core.py
+++ b/numpy/ma/core.py
@@ -443,9 +443,9 @@ def _check_fill_value(fill_value, ndtype):
if isinstance(fill_value, (ndarray, np.void)):
try:
fill_value = np.array(fill_value, copy=False, dtype=ndtype)
- except ValueError:
+ except ValueError as e:
err_msg = "Unable to transform %s to dtype %s"
- raise ValueError(err_msg % (fill_value, ndtype))
+ raise ValueError(err_msg % (fill_value, ndtype)) from e
else:
fill_value = np.asarray(fill_value, dtype=object)
fill_value = np.array(_recursive_set_fill_value(fill_value, ndtype),
@@ -460,12 +460,12 @@ def _check_fill_value(fill_value, ndtype):
# Also in case of converting string arrays.
try:
fill_value = np.array(fill_value, copy=False, dtype=ndtype)
- except (OverflowError, ValueError):
+ except (OverflowError, ValueError) as e:
# Raise TypeError instead of OverflowError or ValueError.
# OverflowError is seldom used, and the real problem here is
# that the passed fill_value is not compatible with the ndtype.
err_msg = "Cannot convert fill_value %s to dtype %s"
- raise TypeError(err_msg % (fill_value, ndtype))
+ raise TypeError(err_msg % (fill_value, ndtype)) from e
return np.array(fill_value)
@@ -5389,7 +5389,7 @@ class MaskedArray(ndarray):
See Also
--------
- numpy.ndarray.around : corresponding function for ndarrays
+ numpy.ndarray.round : corresponding function for ndarrays
numpy.around : equivalent function
"""
result = self._data.round(decimals=decimals, out=out).view(type(self))
diff --git a/numpy/ma/extras.py b/numpy/ma/extras.py
index 613bcb550..1bf03e966 100644
--- a/numpy/ma/extras.py
+++ b/numpy/ma/extras.py
@@ -901,11 +901,11 @@ def compress_rows(a):
Suppress whole rows of a 2-D array that contain masked values.
This is equivalent to ``np.ma.compress_rowcols(a, 0)``, see
- `extras.compress_rowcols` for details.
+ `compress_rowcols` for details.
See Also
--------
- extras.compress_rowcols
+ compress_rowcols
"""
a = asarray(a)
@@ -918,11 +918,11 @@ def compress_cols(a):
Suppress whole columns of a 2-D array that contain masked values.
This is equivalent to ``np.ma.compress_rowcols(a, 1)``, see
- `extras.compress_rowcols` for details.
+ `compress_rowcols` for details.
See Also
--------
- extras.compress_rowcols
+ compress_rowcols
"""
a = asarray(a)
diff --git a/numpy/ma/mrecords.py b/numpy/ma/mrecords.py
index cd93a9a14..c017bee95 100644
--- a/numpy/ma/mrecords.py
+++ b/numpy/ma/mrecords.py
@@ -198,8 +198,8 @@ class MaskedRecords(MaskedArray):
fielddict = ndarray.__getattribute__(self, 'dtype').fields
try:
res = fielddict[attr][:2]
- except (TypeError, KeyError):
- raise AttributeError("record array has no attribute %s" % attr)
+ except (TypeError, KeyError) as e:
+ raise AttributeError("record array has no attribute %s" % attr) from e
# So far, so good
_localdict = ndarray.__getattribute__(self, '__dict__')
_data = ndarray.view(self, _localdict['_baseclass'])
diff --git a/numpy/polynomial/_polybase.py b/numpy/polynomial/_polybase.py
index f4a67a222..59c380f10 100644
--- a/numpy/polynomial/_polybase.py
+++ b/numpy/polynomial/_polybase.py
@@ -547,8 +547,8 @@ class ABCPolyBase(abc.ABC):
othercoef = self._get_coefficients(other)
try:
quo, rem = self._div(self.coef, othercoef)
- except ZeroDivisionError as e:
- raise e
+ except ZeroDivisionError:
+ raise
except Exception:
return NotImplemented
quo = self.__class__(quo, self.domain, self.window)
@@ -605,8 +605,8 @@ class ABCPolyBase(abc.ABC):
def __rdivmod__(self, other):
try:
quo, rem = self._div(other, self.coef)
- except ZeroDivisionError as e:
- raise e
+ except ZeroDivisionError:
+ raise
except Exception:
return NotImplemented
quo = self.__class__(quo, self.domain, self.window)
diff --git a/numpy/typing/__init__.py b/numpy/typing/__init__.py
index 86fd5e787..987aa39aa 100644
--- a/numpy/typing/__init__.py
+++ b/numpy/typing/__init__.py
@@ -90,6 +90,16 @@ since its usage is discouraged.
Please see : https://numpy.org/devdocs/reference/arrays.dtypes.html
"""
+from ._scalars import (
+ _CharLike,
+ _BoolLike,
+ _IntLike,
+ _FloatLike,
+ _ComplexLike,
+ _NumberLike,
+ _ScalarLike,
+ _VoidLike,
+)
from ._array_like import _SupportsArray, ArrayLike
from ._shape import _Shape, _ShapeLike
from ._dtype_like import DtypeLike
@@ -97,4 +107,3 @@ from ._dtype_like import DtypeLike
from numpy._pytesttester import PytestTester
test = PytestTester(__name__)
del PytestTester
-
diff --git a/numpy/typing/_array_like.py b/numpy/typing/_array_like.py
index 76c0c839c..1c00b200f 100644
--- a/numpy/typing/_array_like.py
+++ b/numpy/typing/_array_like.py
@@ -2,6 +2,7 @@ import sys
from typing import Any, overload, Sequence, TYPE_CHECKING, Union
from numpy import ndarray
+from ._scalars import _ScalarLike
from ._dtype_like import DtypeLike
if sys.version_info >= (3, 8):
@@ -31,4 +32,9 @@ else:
# is resolved. See also the mypy issue:
#
# https://github.com/python/typing/issues/593
-ArrayLike = Union[bool, int, float, complex, _SupportsArray, Sequence]
+ArrayLike = Union[
+ _ScalarLike,
+ Sequence[_ScalarLike],
+ Sequence[Sequence[Any]], # TODO: Wait for support for recursive types
+ _SupportsArray,
+]
diff --git a/numpy/typing/_callable.py b/numpy/typing/_callable.py
new file mode 100644
index 000000000..0d876ae8d
--- /dev/null
+++ b/numpy/typing/_callable.py
@@ -0,0 +1,138 @@
+"""
+A module with various ``typing.Protocol`` subclasses that implement
+the ``__call__`` magic method.
+
+See the `Mypy documentation`_ on protocols for more details.
+
+.. _`Mypy documentation`: https://mypy.readthedocs.io/en/stable/protocols.html#callback-protocols
+
+"""
+
+import sys
+from typing import Union, TypeVar, overload, Any
+
+from numpy import (
+ generic,
+ bool_,
+ timedelta64,
+ number,
+ integer,
+ unsignedinteger,
+ signedinteger,
+ int32,
+ int64,
+ floating,
+ float32,
+ float64,
+ complexfloating,
+ complex128,
+)
+from ._scalars import (
+ _BoolLike,
+ _IntLike,
+ _FloatLike,
+ _ComplexLike,
+ _NumberLike,
+)
+
+if sys.version_info >= (3, 8):
+ from typing import Protocol
+ HAVE_PROTOCOL = True
+else:
+ try:
+ from typing_extensions import Protocol
+ except ImportError:
+ HAVE_PROTOCOL = False
+ else:
+ HAVE_PROTOCOL = True
+
+if HAVE_PROTOCOL:
+ _NumberType = TypeVar("_NumberType", bound=number)
+ _NumberType_co = TypeVar("_NumberType_co", covariant=True, bound=number)
+ _GenericType_co = TypeVar("_GenericType_co", covariant=True, bound=generic)
+
+ class _BoolOp(Protocol[_GenericType_co]):
+ @overload
+ def __call__(self, __other: _BoolLike) -> _GenericType_co: ...
+ @overload # platform dependent
+ def __call__(self, __other: int) -> Union[int32, int64]: ...
+ @overload
+ def __call__(self, __other: float) -> float64: ...
+ @overload
+ def __call__(self, __other: complex) -> complex128: ...
+ @overload
+ def __call__(self, __other: _NumberType) -> _NumberType: ...
+
+ class _BoolSub(Protocol):
+ # Note that `__other: bool_` is absent here
+ @overload # platform dependent
+ def __call__(self, __other: int) -> Union[int32, int64]: ...
+ @overload
+ def __call__(self, __other: float) -> float64: ...
+ @overload
+ def __call__(self, __other: complex) -> complex128: ...
+ @overload
+ def __call__(self, __other: _NumberType) -> _NumberType: ...
+
+ class _BoolTrueDiv(Protocol):
+ @overload
+ def __call__(self, __other: Union[float, _IntLike, _BoolLike]) -> float64: ...
+ @overload
+ def __call__(self, __other: complex) -> complex128: ...
+ @overload
+ def __call__(self, __other: _NumberType) -> _NumberType: ...
+
+ class _TD64Div(Protocol[_NumberType_co]):
+ @overload
+ def __call__(self, __other: timedelta64) -> _NumberType_co: ...
+ @overload
+ def __call__(self, __other: _FloatLike) -> timedelta64: ...
+
+ class _IntTrueDiv(Protocol):
+ @overload
+ def __call__(self, __other: Union[_IntLike, float]) -> floating: ...
+ @overload
+ def __call__(self, __other: complex) -> complexfloating[floating]: ...
+
+ class _UnsignedIntOp(Protocol):
+ # NOTE: `uint64 + signedinteger -> float64`
+ @overload
+ def __call__(self, __other: Union[bool, unsignedinteger]) -> unsignedinteger: ...
+ @overload
+ def __call__(self, __other: Union[int, signedinteger]) -> Union[signedinteger, float64]: ...
+ @overload
+ def __call__(self, __other: float) -> floating: ...
+ @overload
+ def __call__(self, __other: complex) -> complexfloating[floating]: ...
+
+ class _SignedIntOp(Protocol):
+ @overload
+ def __call__(self, __other: Union[int, signedinteger]) -> signedinteger: ...
+ @overload
+ def __call__(self, __other: float) -> floating: ...
+ @overload
+ def __call__(self, __other: complex) -> complexfloating[floating]: ...
+
+ class _FloatOp(Protocol):
+ @overload
+ def __call__(self, __other: _FloatLike) -> floating: ...
+ @overload
+ def __call__(self, __other: complex) -> complexfloating[floating]: ...
+
+ class _ComplexOp(Protocol):
+ def __call__(self, __other: _ComplexLike) -> complexfloating[floating]: ...
+
+ class _NumberOp(Protocol):
+ def __call__(self, __other: _NumberLike) -> number: ...
+
+else:
+ _BoolOp = Any
+ _BoolSub = Any
+ _BoolTrueDiv = Any
+ _TD64Div = Any
+ _IntTrueDiv = Any
+ _UnsignedIntOp = Any
+ _SignedIntOp = Any
+ _FloatOp = Any
+ _ComplexOp = Any
+ _NumberOp = Any
diff --git a/numpy/typing/_scalars.py b/numpy/typing/_scalars.py
new file mode 100644
index 000000000..e4fc28b07
--- /dev/null
+++ b/numpy/typing/_scalars.py
@@ -0,0 +1,26 @@
+from typing import Union, Tuple, Any
+
+import numpy as np
+
+# NOTE: `_StrLike` and `_BytesLike` are pointless, as `np.str_` and `np.bytes_`
+# are already subclasses of their builtin counterpart
+
+_CharLike = Union[str, bytes]
+
+_BoolLike = Union[bool, np.bool_]
+_IntLike = Union[int, np.integer]
+_FloatLike = Union[_IntLike, float, np.floating]
+_ComplexLike = Union[_FloatLike, complex, np.complexfloating]
+_NumberLike = Union[int, float, complex, np.number, np.bool_]
+
+_ScalarLike = Union[
+ int,
+ float,
+ complex,
+ str,
+ bytes,
+ np.generic,
+]
+
+# `_VoidLike` is technically not a scalar, but it's close enough
+_VoidLike = Union[Tuple[Any, ...], np.void]
diff --git a/numpy/typing/tests/data/fail/arithmetic.py b/numpy/typing/tests/data/fail/arithmetic.py
new file mode 100644
index 000000000..169e104f9
--- /dev/null
+++ b/numpy/typing/tests/data/fail/arithmetic.py
@@ -0,0 +1,19 @@
+import numpy as np
+
+b_ = np.bool_()
+dt = np.datetime64(0, "D")
+td = np.timedelta64(0, "D")
+
+b_ - b_ # E: No overload variant
+
+dt + dt # E: Unsupported operand types
+td - dt # E: Unsupported operand types
+td % 1 # E: Unsupported operand types
+td / dt # E: No overload
+
+# NOTE: The 1 tests below currently don't work due to the broad
+# (i.e. untyped) signature of `.__mod__()`.
+# TODO: Revisit this once annotations are added to the
+# `_ArrayOrScalarCommon` magic methods.
+
+# td % dt # E: Unsupported operand types
diff --git a/numpy/typing/tests/data/fail/linspace.py b/numpy/typing/tests/data/fail/array_constructors.py
index a9769c5d6..5218572a6 100644
--- a/numpy/typing/tests/data/fail/linspace.py
+++ b/numpy/typing/tests/data/fail/array_constructors.py
@@ -1,5 +1,18 @@
import numpy as np
+a: np.ndarray
+
+np.require(a, requirements=1) # E: No overload variant
+np.require(a, requirements="TEST") # E: incompatible type
+
+np.zeros("test") # E: incompatible type
+np.zeros() # E: Too few arguments
+
+np.ones("test") # E: incompatible type
+np.ones() # E: Too few arguments
+
+np.array(0, float, True) # E: Too many positional
+
np.linspace(None, 'bob') # E: No overload variant
np.linspace(0, 2, num=10.0) # E: No overload variant
np.linspace(0, 2, endpoint='True') # E: No overload variant
diff --git a/numpy/typing/tests/data/fail/modules.py b/numpy/typing/tests/data/fail/modules.py
index e7ffe8920..be031e6e1 100644
--- a/numpy/typing/tests/data/fail/modules.py
+++ b/numpy/typing/tests/data/fail/modules.py
@@ -1,3 +1,4 @@
import numpy as np
np.testing.bob # E: Module has no attribute
+np.bob # E: Module has no attribute \ No newline at end of file
diff --git a/numpy/typing/tests/data/fail/ndarray_misc.py b/numpy/typing/tests/data/fail/ndarray_misc.py
new file mode 100644
index 000000000..1e1496bfe
--- /dev/null
+++ b/numpy/typing/tests/data/fail/ndarray_misc.py
@@ -0,0 +1,21 @@
+"""
+Tests for miscellaneous (non-magic) ``np.ndarray``/``np.generic`` methods.
+
+More extensive tests are performed for the methods'
+function-based counterpart in `../from_numeric.py`.
+
+"""
+
+import numpy as np
+
+f8: np.float64
+
+f8.argpartition(0) # E: has no attribute
+f8.diagonal() # E: has no attribute
+f8.dot(1) # E: has no attribute
+f8.nonzero() # E: has no attribute
+f8.partition(0) # E: has no attribute
+f8.put(0, 2) # E: has no attribute
+f8.setfield(2, np.float64) # E: has no attribute
+f8.sort() # E: has no attribute
+f8.trace() # E: has no attribute
diff --git a/numpy/typing/tests/data/fail/scalars.py b/numpy/typing/tests/data/fail/scalars.py
index 47c031163..13bb45483 100644
--- a/numpy/typing/tests/data/fail/scalars.py
+++ b/numpy/typing/tests/data/fail/scalars.py
@@ -28,22 +28,6 @@ np.complex64(1, 2) # E: Too many arguments
np.datetime64(0) # E: non-matching overload
-dt_64 = np.datetime64(0, "D")
-td_64 = np.timedelta64(1, "h")
-
-dt_64 + dt_64 # E: Unsupported operand types
-td_64 - dt_64 # E: Unsupported operand types
-td_64 % 1 # E: Unsupported operand types
-
-# NOTE: The 2 tests below currently don't work due to the broad
-# (i.e. untyped) signature of `generic.__truediv__()` and `.__mod__()`.
-# TODO: Revisit this once annotations are added to the
-# `_ArrayOrScalarCommon` magic methods.
-
-# td_64 / dt_64 # E: No overload
-# td_64 % dt_64 # E: Unsupported operand types
-
-
class A:
def __float__(self):
return 1.0
diff --git a/numpy/typing/tests/data/fail/simple.py b/numpy/typing/tests/data/fail/simple.py
deleted file mode 100644
index 57c08fb7d..000000000
--- a/numpy/typing/tests/data/fail/simple.py
+++ /dev/null
@@ -1,12 +0,0 @@
-"""Simple expression that should fail with mypy."""
-
-import numpy as np
-
-# Array creation routines checks
-np.zeros("test") # E: incompatible type
-np.zeros() # E: Too few arguments
-
-np.ones("test") # E: incompatible type
-np.ones() # E: Too few arguments
-
-np.array(0, float, True) # E: Too many positional
diff --git a/numpy/typing/tests/data/pass/arithmetic.py b/numpy/typing/tests/data/pass/arithmetic.py
new file mode 100644
index 000000000..f26eab879
--- /dev/null
+++ b/numpy/typing/tests/data/pass/arithmetic.py
@@ -0,0 +1,257 @@
+import numpy as np
+
+c16 = np.complex128(1)
+f8 = np.float64(1)
+i8 = np.int64(1)
+u8 = np.uint64(1)
+
+c8 = np.complex64(1)
+f4 = np.float32(1)
+i4 = np.int32(1)
+u4 = np.uint32(1)
+
+dt = np.datetime64(1, "D")
+td = np.timedelta64(1, "D")
+
+b_ = np.bool_(1)
+
+b = bool(1)
+c = complex(1)
+f = float(1)
+i = int(1)
+
+AR = np.ones(1, dtype=np.float64)
+AR.setflags(write=False)
+
+# Time structures
+
+dt + td
+dt + i
+dt + i4
+dt + i8
+dt - dt
+dt - i
+dt - i4
+dt - i8
+
+td + td
+td + i
+td + i4
+td + i8
+td - td
+td - i
+td - i4
+td - i8
+td / f
+td / f4
+td / f8
+td / td
+td // td
+td % td
+
+
+# boolean
+
+b_ / b
+b_ / b_
+b_ / i
+b_ / i8
+b_ / i4
+b_ / u8
+b_ / u4
+b_ / f
+b_ / f8
+b_ / f4
+b_ / c
+b_ / c16
+b_ / c8
+
+b / b_
+b_ / b_
+i / b_
+i8 / b_
+i4 / b_
+u8 / b_
+u4 / b_
+f / b_
+f8 / b_
+f4 / b_
+c / b_
+c16 / b_
+c8 / b_
+
+# Complex
+
+c16 + c16
+c16 + f8
+c16 + i8
+c16 + c8
+c16 + f4
+c16 + i4
+c16 + b_
+c16 + b
+c16 + c
+c16 + f
+c16 + i
+c16 + AR
+
+c16 + c16
+f8 + c16
+i8 + c16
+c8 + c16
+f4 + c16
+i4 + c16
+b_ + c16
+b + c16
+c + c16
+f + c16
+i + c16
+AR + c16
+
+c8 + c16
+c8 + f8
+c8 + i8
+c8 + c8
+c8 + f4
+c8 + i4
+c8 + b_
+c8 + b
+c8 + c
+c8 + f
+c8 + i
+c8 + AR
+
+c16 + c8
+f8 + c8
+i8 + c8
+c8 + c8
+f4 + c8
+i4 + c8
+b_ + c8
+b + c8
+c + c8
+f + c8
+i + c8
+AR + c8
+
+# Float
+
+f8 + f8
+f8 + i8
+f8 + f4
+f8 + i4
+f8 + b_
+f8 + b
+f8 + c
+f8 + f
+f8 + i
+f8 + AR
+
+f8 + f8
+i8 + f8
+f4 + f8
+i4 + f8
+b_ + f8
+b + f8
+c + f8
+f + f8
+i + f8
+AR + f8
+
+f4 + f8
+f4 + i8
+f4 + f4
+f4 + i4
+f4 + b_
+f4 + b
+f4 + c
+f4 + f
+f4 + i
+f4 + AR
+
+f8 + f4
+i8 + f4
+f4 + f4
+i4 + f4
+b_ + f4
+b + f4
+c + f4
+f + f4
+i + f4
+AR + f4
+
+# Int
+
+i8 + i8
+i8 + u8
+i8 + i4
+i8 + u4
+i8 + b_
+i8 + b
+i8 + c
+i8 + f
+i8 + i
+i8 + AR
+
+u8 + u8
+u8 + i4
+u8 + u4
+u8 + b_
+u8 + b
+u8 + c
+u8 + f
+u8 + i
+u8 + AR
+
+i8 + i8
+u8 + i8
+i4 + i8
+u4 + i8
+b_ + i8
+b + i8
+c + i8
+f + i8
+i + i8
+AR + i8
+
+u8 + u8
+i4 + u8
+u4 + u8
+b_ + u8
+b + u8
+c + u8
+f + u8
+i + u8
+AR + u8
+
+i4 + i8
+i4 + i4
+i4 + i
+i4 + b_
+i4 + b
+i4 + AR
+
+u4 + i8
+u4 + i4
+u4 + u8
+u4 + u4
+u4 + i
+u4 + b_
+u4 + b
+u4 + AR
+
+i8 + i4
+i4 + i4
+i + i4
+b_ + i4
+b + i4
+AR + i4
+
+i8 + u4
+i4 + u4
+u8 + u4
+u4 + u4
+b_ + u4
+b + u4
+i + u4
+AR + u4
diff --git a/numpy/typing/tests/data/pass/array_constructors.py b/numpy/typing/tests/data/pass/array_constructors.py
new file mode 100644
index 000000000..bf29e52b9
--- /dev/null
+++ b/numpy/typing/tests/data/pass/array_constructors.py
@@ -0,0 +1,66 @@
+from typing import List
+import numpy as np
+
+class Index:
+ def __index__(self) -> int:
+ return 0
+
+class SubClass(np.ndarray): ...
+
+A = np.array([1])
+B = A.view(SubClass).copy()
+C = [1]
+
+np.array(1, dtype=float)
+np.array(1, copy=False)
+np.array(1, order='F')
+np.array(1, order=None)
+np.array(1, subok=True)
+np.array(1, ndmin=3)
+np.array(1, str, copy=True, order='C', subok=False, ndmin=2)
+
+np.asarray(A)
+np.asarray(B)
+np.asarray(C)
+
+np.asanyarray(A)
+np.asanyarray(B)
+np.asanyarray(B, dtype=int)
+np.asanyarray(C)
+
+np.ascontiguousarray(A)
+np.ascontiguousarray(B)
+np.ascontiguousarray(C)
+
+np.asfortranarray(A)
+np.asfortranarray(B)
+np.asfortranarray(C)
+
+np.require(A)
+np.require(B)
+np.require(B, dtype=int)
+np.require(B, requirements=None)
+np.require(B, requirements="E")
+np.require(B, requirements=["ENSUREARRAY"])
+np.require(B, requirements={"F", "E"})
+np.require(B, requirements=["C", "OWNDATA"])
+np.require(B, requirements="W")
+np.require(B, requirements="A")
+np.require(C)
+
+np.linspace(0, 2)
+np.linspace(0.5, [0, 1, 2])
+np.linspace([0, 1, 2], 3)
+np.linspace(0j, 2)
+np.linspace(0, 2, num=10)
+np.linspace(0, 2, endpoint=True)
+np.linspace(0, 2, retstep=True)
+np.linspace(0j, 2j, retstep=True)
+np.linspace(0, 2, dtype=bool)
+np.linspace([0, 1], [2, 3], axis=Index())
+
+np.logspace(0, 2, base=2)
+np.logspace(0, 2, base=2)
+np.logspace(0, 2, base=[1j, 2j], num=2)
+
+np.geomspace(1, 2)
diff --git a/numpy/typing/tests/data/pass/linspace.py b/numpy/typing/tests/data/pass/linspace.py
deleted file mode 100644
index 8c6d0d56b..000000000
--- a/numpy/typing/tests/data/pass/linspace.py
+++ /dev/null
@@ -1,22 +0,0 @@
-import numpy as np
-
-class Index:
- def __index__(self) -> int:
- return 0
-
-np.linspace(0, 2)
-np.linspace(0.5, [0, 1, 2])
-np.linspace([0, 1, 2], 3)
-np.linspace(0j, 2)
-np.linspace(0, 2, num=10)
-np.linspace(0, 2, endpoint=True)
-np.linspace(0, 2, retstep=True)
-np.linspace(0j, 2j, retstep=True)
-np.linspace(0, 2, dtype=bool)
-np.linspace([0, 1], [2, 3], axis=Index())
-
-np.logspace(0, 2, base=2)
-np.logspace(0, 2, base=2)
-np.logspace(0, 2, base=[1j, 2j], num=2)
-
-np.geomspace(1, 2)
diff --git a/numpy/typing/tests/data/pass/literal.py b/numpy/typing/tests/data/pass/literal.py
index 321ce3c2b..8eaeb6afb 100644
--- a/numpy/typing/tests/data/pass/literal.py
+++ b/numpy/typing/tests/data/pass/literal.py
@@ -31,6 +31,8 @@ order_list: List[Tuple[frozenset, Callable]] = [
(KACF, partial(np.add, 1, 1)), # i.e. np.ufunc.__call__
(ACF, partial(np.reshape, AR, 1)),
(KACF, partial(np.ravel, AR)),
+ (KACF, partial(np.asarray, 1)),
+ (KACF, partial(np.asanyarray, 1)),
]
for order_set, func in order_list:
diff --git a/numpy/typing/tests/data/pass/ndarray_misc.py b/numpy/typing/tests/data/pass/ndarray_misc.py
new file mode 100644
index 000000000..6c6f5d50b
--- /dev/null
+++ b/numpy/typing/tests/data/pass/ndarray_misc.py
@@ -0,0 +1,159 @@
+"""
+Tests for miscellaneous (non-magic) ``np.ndarray``/``np.generic`` methods.
+
+More extensive tests are performed for the methods'
+function-based counterpart in `../from_numeric.py`.
+
+"""
+
+from typing import cast
+import numpy as np
+
+class SubClass(np.ndarray): ...
+
+i4 = np.int32(1)
+A = np.array([[1]], dtype=np.int32)
+B0 = np.empty((), dtype=np.int32).view(SubClass)
+B1 = np.empty((1,), dtype=np.int32).view(SubClass)
+B2 = np.empty((1, 1), dtype=np.int32).view(SubClass)
+C = np.array([0, 1, 2], dtype=np.int32)
+D = np.empty(3).view(SubClass)
+
+i4.all()
+A.all()
+A.all(axis=0)
+A.all(keepdims=True)
+A.all(out=B0)
+
+i4.any()
+A.any()
+A.any(axis=0)
+A.any(keepdims=True)
+A.any(out=B0)
+
+i4.argmax()
+A.argmax()
+A.argmax(axis=0)
+A.argmax(out=B0)
+
+i4.argmin()
+A.argmin()
+A.argmin(axis=0)
+A.argmin(out=B0)
+
+i4.argsort()
+A.argsort()
+
+i4.choose([()])
+_choices = np.array([[0, 1, 2], [3, 4, 5], [6, 7, 8]], dtype=np.int32)
+C.choose(_choices)
+C.choose(_choices, out=D)
+
+i4.clip(1)
+A.clip(1)
+A.clip(None, 1)
+A.clip(1, out=B2)
+A.clip(None, 1, out=B2)
+
+i4.compress([1])
+A.compress([1])
+A.compress([1], out=B1)
+
+i4.conj()
+A.conj()
+B0.conj()
+
+i4.conjugate()
+A.conjugate()
+B0.conjugate()
+
+i4.cumprod()
+A.cumprod()
+A.cumprod(out=B1)
+
+i4.cumsum()
+A.cumsum()
+A.cumsum(out=B1)
+
+i4.max()
+A.max()
+A.max(axis=0)
+A.max(keepdims=True)
+A.max(out=B0)
+
+i4.mean()
+A.mean()
+A.mean(axis=0)
+A.mean(keepdims=True)
+A.mean(out=B0)
+
+i4.min()
+A.min()
+A.min(axis=0)
+A.min(keepdims=True)
+A.min(out=B0)
+
+i4.newbyteorder()
+A.newbyteorder()
+B0.newbyteorder('|')
+
+i4.prod()
+A.prod()
+A.prod(axis=0)
+A.prod(keepdims=True)
+A.prod(out=B0)
+
+i4.ptp()
+A.ptp()
+A.ptp(axis=0)
+A.ptp(keepdims=True)
+A.astype(int).ptp(out=B0)
+
+i4.round()
+A.round()
+A.round(out=B2)
+
+i4.repeat(1)
+A.repeat(1)
+B0.repeat(1)
+
+i4.std()
+A.std()
+A.std(axis=0)
+A.std(keepdims=True)
+A.std(out=B0.astype(np.float64))
+
+i4.sum()
+A.sum()
+A.sum(axis=0)
+A.sum(keepdims=True)
+A.sum(out=B0)
+
+i4.take(0)
+A.take(0)
+A.take([0])
+A.take(0, out=B0)
+A.take([0], out=B1)
+
+i4.var()
+A.var()
+A.var(axis=0)
+A.var(keepdims=True)
+A.var(out=B0)
+
+A.argpartition([0])
+
+A.diagonal()
+
+A.dot(1)
+A.dot(1, out=B0)
+
+A.nonzero()
+
+C.searchsorted(1)
+
+A.trace()
+A.trace(out=B0)
+
+void = cast(np.void, np.array(1, dtype=[("f", np.float64)]).take(0))
+void.setfield(10, np.float64)
diff --git a/numpy/typing/tests/data/pass/scalars.py b/numpy/typing/tests/data/pass/scalars.py
index c02e1ed36..49ddb8ed9 100644
--- a/numpy/typing/tests/data/pass/scalars.py
+++ b/numpy/typing/tests/data/pass/scalars.py
@@ -108,19 +108,6 @@ np.timedelta64(dt.timedelta(2))
np.timedelta64(None)
np.timedelta64(None, "D")
-dt_64 = np.datetime64(0, "D")
-td_64 = np.timedelta64(1, "h")
-
-dt_64 + td_64
-dt_64 - dt_64
-dt_64 - td_64
-
-td_64 + td_64
-td_64 - td_64
-td_64 / 1.0
-td_64 / td_64
-td_64 % td_64
-
np.void(1)
np.void(np.int64(1))
np.void(True)
diff --git a/numpy/typing/tests/data/pass/simple.py b/numpy/typing/tests/data/pass/simple.py
index 527050557..4d397bd29 100644
--- a/numpy/typing/tests/data/pass/simple.py
+++ b/numpy/typing/tests/data/pass/simple.py
@@ -17,15 +17,6 @@ ndarray_func(np.array([1, 2]))
array == 1
array.dtype == float
-# Array creation routines checks
-np.array(1, dtype=float)
-np.array(1, copy=False)
-np.array(1, order='F')
-np.array(1, order=None)
-np.array(1, subok=True)
-np.array(1, ndmin=3)
-np.array(1, str, copy=True, order='C', subok=False, ndmin=2)
-
ndarray_func(np.zeros([1, 2]))
ndarray_func(np.ones([1, 2]))
ndarray_func(np.empty([1, 2]))
diff --git a/numpy/typing/tests/data/pass/ufuncs.py b/numpy/typing/tests/data/pass/ufuncs.py
index 82172952a..ad4d483d4 100644
--- a/numpy/typing/tests/data/pass/ufuncs.py
+++ b/numpy/typing/tests/data/pass/ufuncs.py
@@ -6,7 +6,10 @@ np.sin(1, out=np.empty(1))
np.matmul(np.ones((2, 2, 2)), np.ones((2, 2, 2)), axes=[(0, 1), (0, 1), (0, 1)])
np.sin(1, signature="D")
np.sin(1, extobj=[16, 1, lambda: None])
-np.sin(1) + np.sin(1)
+# NOTE: `np.generic` subclasses are not guaranteed to support addition;
+# re-enable this we can infer the exact return type of `np.sin(...)`.
+#
+# np.sin(1) + np.sin(1)
np.sin.types[0]
np.sin.__name__
diff --git a/numpy/typing/tests/data/reveal/arithmetic.py b/numpy/typing/tests/data/reveal/arithmetic.py
new file mode 100644
index 000000000..b8c457aaf
--- /dev/null
+++ b/numpy/typing/tests/data/reveal/arithmetic.py
@@ -0,0 +1,256 @@
+import numpy as np
+
+c16 = np.complex128()
+f8 = np.float64()
+i8 = np.int64()
+u8 = np.uint64()
+
+c8 = np.complex64()
+f4 = np.float32()
+i4 = np.int32()
+u4 = np.uint32()
+
+dt = np.datetime64(0, "D")
+td = np.timedelta64(0, "D")
+
+b_ = np.bool_()
+
+b = bool()
+c = complex()
+f = float()
+i = int()
+
+AR = np.array([0], dtype=np.float64)
+AR.setflags(write=False)
+
+# Time structures
+
+reveal_type(dt + td) # E: numpy.datetime64
+reveal_type(dt + i) # E: numpy.datetime64
+reveal_type(dt + i4) # E: numpy.datetime64
+reveal_type(dt + i8) # E: numpy.datetime64
+reveal_type(dt - dt) # E: numpy.timedelta64
+reveal_type(dt - i) # E: numpy.datetime64
+reveal_type(dt - i4) # E: numpy.datetime64
+reveal_type(dt - i8) # E: numpy.datetime64
+
+reveal_type(td + td) # E: numpy.timedelta64
+reveal_type(td + i) # E: numpy.timedelta64
+reveal_type(td + i4) # E: numpy.timedelta64
+reveal_type(td + i8) # E: numpy.timedelta64
+reveal_type(td - td) # E: numpy.timedelta64
+reveal_type(td - i) # E: numpy.timedelta64
+reveal_type(td - i4) # E: numpy.timedelta64
+reveal_type(td - i8) # E: numpy.timedelta64
+reveal_type(td / f) # E: numpy.timedelta64
+reveal_type(td / f4) # E: numpy.timedelta64
+reveal_type(td / f8) # E: numpy.timedelta64
+reveal_type(td / td) # E: float64
+reveal_type(td // td) # E: signedinteger
+reveal_type(td % td) # E: numpy.timedelta64
+
+# boolean
+
+reveal_type(b_ / b) # E: float64
+reveal_type(b_ / b_) # E: float64
+reveal_type(b_ / i) # E: float64
+reveal_type(b_ / i8) # E: float64
+reveal_type(b_ / i4) # E: float64
+reveal_type(b_ / u8) # E: float64
+reveal_type(b_ / u4) # E: float64
+reveal_type(b_ / f) # E: float64
+reveal_type(b_ / f8) # E: float64
+reveal_type(b_ / f4) # E: float32
+reveal_type(b_ / c) # E: complex128
+reveal_type(b_ / c16) # E: complex128
+reveal_type(b_ / c8) # E: complex64
+
+reveal_type(b / b_) # E: float64
+reveal_type(b_ / b_) # E: float64
+reveal_type(i / b_) # E: float64
+reveal_type(i8 / b_) # E: float64
+reveal_type(i4 / b_) # E: float64
+reveal_type(u8 / b_) # E: float64
+reveal_type(u4 / b_) # E: float64
+reveal_type(f / b_) # E: float64
+reveal_type(f8 / b_) # E: float64
+reveal_type(f4 / b_) # E: float32
+reveal_type(c / b_) # E: complex128
+reveal_type(c16 / b_) # E: complex128
+reveal_type(c8 / b_) # E: complex64
+
+# Complex
+
+reveal_type(c16 + c16) # E: complexfloating
+reveal_type(c16 + f8) # E: complexfloating
+reveal_type(c16 + i8) # E: complexfloating
+reveal_type(c16 + c8) # E: complexfloating
+reveal_type(c16 + f4) # E: complexfloating
+reveal_type(c16 + i4) # E: complexfloating
+reveal_type(c16 + b_) # E: complex128
+reveal_type(c16 + b) # E: complexfloating
+reveal_type(c16 + c) # E: complexfloating
+reveal_type(c16 + f) # E: complexfloating
+reveal_type(c16 + i) # E: complexfloating
+reveal_type(c16 + AR) # E: Union[numpy.ndarray, numpy.generic]
+
+reveal_type(c16 + c16) # E: complexfloating
+reveal_type(f8 + c16) # E: complexfloating
+reveal_type(i8 + c16) # E: complexfloating
+reveal_type(c8 + c16) # E: complexfloating
+reveal_type(f4 + c16) # E: complexfloating
+reveal_type(i4 + c16) # E: complexfloating
+reveal_type(b_ + c16) # E: complex128
+reveal_type(b + c16) # E: complexfloating
+reveal_type(c + c16) # E: complexfloating
+reveal_type(f + c16) # E: complexfloating
+reveal_type(i + c16) # E: complexfloating
+reveal_type(AR + c16) # E: Union[numpy.ndarray, numpy.generic]
+
+reveal_type(c8 + c16) # E: complexfloating
+reveal_type(c8 + f8) # E: complexfloating
+reveal_type(c8 + i8) # E: complexfloating
+reveal_type(c8 + c8) # E: complexfloating
+reveal_type(c8 + f4) # E: complexfloating
+reveal_type(c8 + i4) # E: complexfloating
+reveal_type(c8 + b_) # E: complex64
+reveal_type(c8 + b) # E: complexfloating
+reveal_type(c8 + c) # E: complexfloating
+reveal_type(c8 + f) # E: complexfloating
+reveal_type(c8 + i) # E: complexfloating
+reveal_type(c8 + AR) # E: Union[numpy.ndarray, numpy.generic]
+
+reveal_type(c16 + c8) # E: complexfloating
+reveal_type(f8 + c8) # E: complexfloating
+reveal_type(i8 + c8) # E: complexfloating
+reveal_type(c8 + c8) # E: complexfloating
+reveal_type(f4 + c8) # E: complexfloating
+reveal_type(i4 + c8) # E: complexfloating
+reveal_type(b_ + c8) # E: complex64
+reveal_type(b + c8) # E: complexfloating
+reveal_type(c + c8) # E: complexfloating
+reveal_type(f + c8) # E: complexfloating
+reveal_type(i + c8) # E: complexfloating
+reveal_type(AR + c8) # E: Union[numpy.ndarray, numpy.generic]
+
+# Float
+
+reveal_type(f8 + f8) # E: floating
+reveal_type(f8 + i8) # E: floating
+reveal_type(f8 + f4) # E: floating
+reveal_type(f8 + i4) # E: floating
+reveal_type(f8 + b_) # E: float64
+reveal_type(f8 + b) # E: floating
+reveal_type(f8 + c) # E: complexfloating
+reveal_type(f8 + f) # E: floating
+reveal_type(f8 + i) # E: floating
+reveal_type(f8 + AR) # E: Union[numpy.ndarray, numpy.generic]
+
+reveal_type(f8 + f8) # E: floating
+reveal_type(i8 + f8) # E: floating
+reveal_type(f4 + f8) # E: floating
+reveal_type(i4 + f8) # E: floating
+reveal_type(b_ + f8) # E: float64
+reveal_type(b + f8) # E: floating
+reveal_type(c + f8) # E: complexfloating
+reveal_type(f + f8) # E: floating
+reveal_type(i + f8) # E: floating
+reveal_type(AR + f8) # E: Union[numpy.ndarray, numpy.generic]
+
+reveal_type(f4 + f8) # E: floating
+reveal_type(f4 + i8) # E: floating
+reveal_type(f4 + f4) # E: floating
+reveal_type(f4 + i4) # E: floating
+reveal_type(f4 + b_) # E: float32
+reveal_type(f4 + b) # E: floating
+reveal_type(f4 + c) # E: complexfloating
+reveal_type(f4 + f) # E: floating
+reveal_type(f4 + i) # E: floating
+reveal_type(f4 + AR) # E: Union[numpy.ndarray, numpy.generic]
+
+reveal_type(f8 + f4) # E: floating
+reveal_type(i8 + f4) # E: floating
+reveal_type(f4 + f4) # E: floating
+reveal_type(i4 + f4) # E: floating
+reveal_type(b_ + f4) # E: float32
+reveal_type(b + f4) # E: floating
+reveal_type(c + f4) # E: complexfloating
+reveal_type(f + f4) # E: floating
+reveal_type(i + f4) # E: floating
+reveal_type(AR + f4) # E: Union[numpy.ndarray, numpy.generic]
+
+# Int
+
+reveal_type(i8 + i8) # E: signedinteger
+reveal_type(i8 + u8) # E: Union[numpy.signedinteger, numpy.float64]
+reveal_type(i8 + i4) # E: signedinteger
+reveal_type(i8 + u4) # E: Union[numpy.signedinteger, numpy.float64]
+reveal_type(i8 + b_) # E: int64
+reveal_type(i8 + b) # E: signedinteger
+reveal_type(i8 + c) # E: complexfloating
+reveal_type(i8 + f) # E: floating
+reveal_type(i8 + i) # E: signedinteger
+reveal_type(i8 + AR) # E: Union[numpy.ndarray, numpy.generic]
+
+reveal_type(u8 + u8) # E: unsignedinteger
+reveal_type(u8 + i4) # E: Union[numpy.signedinteger, numpy.float64]
+reveal_type(u8 + u4) # E: unsignedinteger
+reveal_type(u8 + b_) # E: uint64
+reveal_type(u8 + b) # E: unsignedinteger
+reveal_type(u8 + c) # E: complexfloating
+reveal_type(u8 + f) # E: floating
+reveal_type(u8 + i) # E: Union[numpy.signedinteger, numpy.float64]
+reveal_type(u8 + AR) # E: Union[numpy.ndarray, numpy.generic]
+
+reveal_type(i8 + i8) # E: signedinteger
+reveal_type(u8 + i8) # E: Union[numpy.signedinteger, numpy.float64]
+reveal_type(i4 + i8) # E: signedinteger
+reveal_type(u4 + i8) # E: Union[numpy.signedinteger, numpy.float64]
+reveal_type(b_ + i8) # E: int64
+reveal_type(b + i8) # E: signedinteger
+reveal_type(c + i8) # E: complexfloating
+reveal_type(f + i8) # E: floating
+reveal_type(i + i8) # E: signedinteger
+reveal_type(AR + i8) # E: Union[numpy.ndarray, numpy.generic]
+
+reveal_type(u8 + u8) # E: unsignedinteger
+reveal_type(i4 + u8) # E: Union[numpy.signedinteger, numpy.float64]
+reveal_type(u4 + u8) # E: unsignedinteger
+reveal_type(b_ + u8) # E: uint64
+reveal_type(b + u8) # E: unsignedinteger
+reveal_type(c + u8) # E: complexfloating
+reveal_type(f + u8) # E: floating
+reveal_type(i + u8) # E: Union[numpy.signedinteger, numpy.float64]
+reveal_type(AR + u8) # E: Union[numpy.ndarray, numpy.generic]
+
+reveal_type(i4 + i8) # E: signedinteger
+reveal_type(i4 + i4) # E: signedinteger
+reveal_type(i4 + i) # E: signedinteger
+reveal_type(i4 + b_) # E: int32
+reveal_type(i4 + b) # E: signedinteger
+reveal_type(i4 + AR) # E: Union[numpy.ndarray, numpy.generic]
+
+reveal_type(u4 + i8) # E: Union[numpy.signedinteger, numpy.float64]
+reveal_type(u4 + i4) # E: Union[numpy.signedinteger, numpy.float64]
+reveal_type(u4 + u8) # E: unsignedinteger
+reveal_type(u4 + u4) # E: unsignedinteger
+reveal_type(u4 + i) # E: Union[numpy.signedinteger, numpy.float64]
+reveal_type(u4 + b_) # E: uint32
+reveal_type(u4 + b) # E: unsignedinteger
+reveal_type(u4 + AR) # E: Union[numpy.ndarray, numpy.generic]
+
+reveal_type(i8 + i4) # E: signedinteger
+reveal_type(i4 + i4) # E: signedinteger
+reveal_type(i + i4) # E: signedinteger
+reveal_type(b_ + i4) # E: int32
+reveal_type(b + i4) # E: signedinteger
+reveal_type(AR + i4) # E: Union[numpy.ndarray, numpy.generic]
+
+reveal_type(i8 + u4) # E: Union[numpy.signedinteger, numpy.float64]
+reveal_type(i4 + u4) # E: Union[numpy.signedinteger, numpy.float64]
+reveal_type(u8 + u4) # E: unsignedinteger
+reveal_type(u4 + u4) # E: unsignedinteger
+reveal_type(b_ + u4) # E: uint32
+reveal_type(b + u4) # E: unsignedinteger
+reveal_type(i + u4) # E: Union[numpy.signedinteger, numpy.float64]
+reveal_type(AR + u4) # E: Union[numpy.ndarray, numpy.generic]
diff --git a/numpy/typing/tests/data/reveal/array_constructors.py b/numpy/typing/tests/data/reveal/array_constructors.py
new file mode 100644
index 000000000..ba8a8eda1
--- /dev/null
+++ b/numpy/typing/tests/data/reveal/array_constructors.py
@@ -0,0 +1,42 @@
+from typing import List
+import numpy as np
+
+class SubClass(np.ndarray): ...
+
+A: np.ndarray
+B: SubClass
+C: List[int]
+
+reveal_type(np.asarray(A)) # E: ndarray
+reveal_type(np.asarray(B)) # E: ndarray
+reveal_type(np.asarray(C)) # E: ndarray
+
+reveal_type(np.asanyarray(A)) # E: ndarray
+reveal_type(np.asanyarray(B)) # E: SubClass
+reveal_type(np.asanyarray(B, dtype=int)) # E: ndarray
+reveal_type(np.asanyarray(C)) # E: ndarray
+
+reveal_type(np.ascontiguousarray(A)) # E: ndarray
+reveal_type(np.ascontiguousarray(B)) # E: ndarray
+reveal_type(np.ascontiguousarray(C)) # E: ndarray
+
+reveal_type(np.asfortranarray(A)) # E: ndarray
+reveal_type(np.asfortranarray(B)) # E: ndarray
+reveal_type(np.asfortranarray(C)) # E: ndarray
+
+reveal_type(np.require(A)) # E: ndarray
+reveal_type(np.require(B)) # E: SubClass
+reveal_type(np.require(B, requirements=None)) # E: SubClass
+reveal_type(np.require(B, dtype=int)) # E: ndarray
+reveal_type(np.require(B, requirements="E")) # E: ndarray
+reveal_type(np.require(B, requirements=["ENSUREARRAY"])) # E: ndarray
+reveal_type(np.require(B, requirements={"F", "E"})) # E: ndarray
+reveal_type(np.require(B, requirements=["C", "OWNDATA"])) # E: SubClass
+reveal_type(np.require(B, requirements="W")) # E: SubClass
+reveal_type(np.require(B, requirements="A")) # E: SubClass
+reveal_type(np.require(C)) # E: ndarray
+
+reveal_type(np.linspace(0, 10)) # E: numpy.ndarray
+reveal_type(np.linspace(0, 10, retstep=True)) # E: Tuple[numpy.ndarray, numpy.inexact]
+reveal_type(np.logspace(0, 10)) # E: numpy.ndarray
+reveal_type(np.geomspace(1, 10)) # E: numpy.ndarray
diff --git a/numpy/typing/tests/data/reveal/linspace.py b/numpy/typing/tests/data/reveal/linspace.py
deleted file mode 100644
index cfbbdf390..000000000
--- a/numpy/typing/tests/data/reveal/linspace.py
+++ /dev/null
@@ -1,6 +0,0 @@
-import numpy as np
-
-reveal_type(np.linspace(0, 10)) # E: numpy.ndarray
-reveal_type(np.linspace(0, 10, retstep=True)) # E: Tuple[numpy.ndarray, numpy.inexact]
-reveal_type(np.logspace(0, 10)) # E: numpy.ndarray
-reveal_type(np.geomspace(1, 10)) # E: numpy.ndarray
diff --git a/numpy/typing/tests/data/reveal/ndarray_misc.py b/numpy/typing/tests/data/reveal/ndarray_misc.py
new file mode 100644
index 000000000..22a4564df
--- /dev/null
+++ b/numpy/typing/tests/data/reveal/ndarray_misc.py
@@ -0,0 +1,150 @@
+"""
+Tests for miscellaneous (non-magic) ``np.ndarray``/``np.generic`` methods.
+
+More extensive tests are performed for the methods'
+function-based counterpart in `../from_numeric.py`.
+
+"""
+
+import numpy as np
+
+class SubClass(np.ndarray): ...
+
+f8: np.float64
+A: np.ndarray
+B: SubClass
+
+reveal_type(f8.all()) # E: numpy.bool_
+reveal_type(A.all()) # E: numpy.bool_
+reveal_type(A.all(axis=0)) # E: Union[numpy.bool_, numpy.ndarray]
+reveal_type(A.all(keepdims=True)) # E: Union[numpy.bool_, numpy.ndarray]
+reveal_type(A.all(out=B)) # E: SubClass
+
+reveal_type(f8.any()) # E: numpy.bool_
+reveal_type(A.any()) # E: numpy.bool_
+reveal_type(A.any(axis=0)) # E: Union[numpy.bool_, numpy.ndarray]
+reveal_type(A.any(keepdims=True)) # E: Union[numpy.bool_, numpy.ndarray]
+reveal_type(A.any(out=B)) # E: SubClass
+
+reveal_type(f8.argmax()) # E: numpy.signedinteger
+reveal_type(A.argmax()) # E: numpy.signedinteger
+reveal_type(A.argmax(axis=0)) # E: Union[numpy.signedinteger, numpy.ndarray]
+reveal_type(A.argmax(out=B)) # E: SubClass
+
+reveal_type(f8.argmin()) # E: numpy.signedinteger
+reveal_type(A.argmin()) # E: numpy.signedinteger
+reveal_type(A.argmin(axis=0)) # E: Union[numpy.signedinteger, numpy.ndarray]
+reveal_type(A.argmin(out=B)) # E: SubClass
+
+reveal_type(f8.argsort()) # E: numpy.ndarray
+reveal_type(A.argsort()) # E: numpy.ndarray
+
+reveal_type(f8.astype(np.int64).choose([()])) # E: numpy.ndarray
+reveal_type(A.choose([0])) # E: numpy.ndarray
+reveal_type(A.choose([0], out=B)) # E: SubClass
+
+reveal_type(f8.clip(1)) # E: Union[numpy.number, numpy.ndarray]
+reveal_type(A.clip(1)) # E: Union[numpy.number, numpy.ndarray]
+reveal_type(A.clip(None, 1)) # E: Union[numpy.number, numpy.ndarray]
+reveal_type(A.clip(1, out=B)) # E: SubClass
+reveal_type(A.clip(None, 1, out=B)) # E: SubClass
+
+reveal_type(f8.compress([0])) # E: numpy.ndarray
+reveal_type(A.compress([0])) # E: numpy.ndarray
+reveal_type(A.compress([0], out=B)) # E: SubClass
+
+reveal_type(f8.conj()) # E: numpy.float64
+reveal_type(A.conj()) # E: numpy.ndarray
+reveal_type(B.conj()) # E: SubClass
+
+reveal_type(f8.conjugate()) # E: numpy.float64
+reveal_type(A.conjugate()) # E: numpy.ndarray
+reveal_type(B.conjugate()) # E: SubClass
+
+reveal_type(f8.cumprod()) # E: numpy.ndarray
+reveal_type(A.cumprod()) # E: numpy.ndarray
+reveal_type(A.cumprod(out=B)) # E: SubClass
+
+reveal_type(f8.cumsum()) # E: numpy.ndarray
+reveal_type(A.cumsum()) # E: numpy.ndarray
+reveal_type(A.cumsum(out=B)) # E: SubClass
+
+reveal_type(f8.max()) # E: numpy.number
+reveal_type(A.max()) # E: numpy.number
+reveal_type(A.max(axis=0)) # E: Union[numpy.number, numpy.ndarray]
+reveal_type(A.max(keepdims=True)) # E: Union[numpy.number, numpy.ndarray]
+reveal_type(A.max(out=B)) # E: SubClass
+
+reveal_type(f8.mean()) # E: numpy.number
+reveal_type(A.mean()) # E: numpy.number
+reveal_type(A.mean(axis=0)) # E: Union[numpy.number, numpy.ndarray]
+reveal_type(A.mean(keepdims=True)) # E: Union[numpy.number, numpy.ndarray]
+reveal_type(A.mean(out=B)) # E: SubClass
+
+reveal_type(f8.min()) # E: numpy.number
+reveal_type(A.min()) # E: numpy.number
+reveal_type(A.min(axis=0)) # E: Union[numpy.number, numpy.ndarray]
+reveal_type(A.min(keepdims=True)) # E: Union[numpy.number, numpy.ndarray]
+reveal_type(A.min(out=B)) # E: SubClass
+
+reveal_type(f8.newbyteorder()) # E: numpy.float64
+reveal_type(A.newbyteorder()) # E: numpy.ndarray
+reveal_type(B.newbyteorder('|')) # E: SubClass
+
+reveal_type(f8.prod()) # E: numpy.number
+reveal_type(A.prod()) # E: numpy.number
+reveal_type(A.prod(axis=0)) # E: Union[numpy.number, numpy.ndarray]
+reveal_type(A.prod(keepdims=True)) # E: Union[numpy.number, numpy.ndarray]
+reveal_type(A.prod(out=B)) # E: SubClass
+
+reveal_type(f8.ptp()) # E: numpy.number
+reveal_type(A.ptp()) # E: numpy.number
+reveal_type(A.ptp(axis=0)) # E: Union[numpy.number, numpy.ndarray]
+reveal_type(A.ptp(keepdims=True)) # E: Union[numpy.number, numpy.ndarray]
+reveal_type(A.ptp(out=B)) # E: SubClass
+
+reveal_type(f8.round()) # E: numpy.float64
+reveal_type(A.round()) # E: numpy.ndarray
+reveal_type(A.round(out=B)) # E: SubClass
+
+reveal_type(f8.repeat(1)) # E: numpy.ndarray
+reveal_type(A.repeat(1)) # E: numpy.ndarray
+reveal_type(B.repeat(1)) # E: numpy.ndarray
+
+reveal_type(f8.std()) # E: numpy.number
+reveal_type(A.std()) # E: numpy.number
+reveal_type(A.std(axis=0)) # E: Union[numpy.number, numpy.ndarray]
+reveal_type(A.std(keepdims=True)) # E: Union[numpy.number, numpy.ndarray]
+reveal_type(A.std(out=B)) # E: SubClass
+
+reveal_type(f8.sum()) # E: numpy.number
+reveal_type(A.sum()) # E: numpy.number
+reveal_type(A.sum(axis=0)) # E: Union[numpy.number, numpy.ndarray]
+reveal_type(A.sum(keepdims=True)) # E: Union[numpy.number, numpy.ndarray]
+reveal_type(A.sum(out=B)) # E: SubClass
+
+reveal_type(f8.take(0)) # E: numpy.generic
+reveal_type(A.take(0)) # E: numpy.generic
+reveal_type(A.take([0])) # E: numpy.ndarray
+reveal_type(A.take(0, out=B)) # E: SubClass
+reveal_type(A.take([0], out=B)) # E: SubClass
+
+reveal_type(f8.var()) # E: numpy.number
+reveal_type(A.var()) # E: numpy.number
+reveal_type(A.var(axis=0)) # E: Union[numpy.number, numpy.ndarray]
+reveal_type(A.var(keepdims=True)) # E: Union[numpy.number, numpy.ndarray]
+reveal_type(A.var(out=B)) # E: SubClass
+
+reveal_type(A.argpartition([0])) # E: numpy.ndarray
+
+reveal_type(A.diagonal()) # E: numpy.ndarray
+
+reveal_type(A.dot(1)) # E: Union[numpy.number, numpy.ndarray]
+reveal_type(A.dot(1, out=B)) # E: SubClass
+
+reveal_type(A.nonzero()) # E: tuple[numpy.ndarray]
+
+reveal_type(A.searchsorted([1])) # E: numpy.ndarray
+
+reveal_type(A.trace()) # E: Union[numpy.number, numpy.ndarray]
+reveal_type(A.trace(out=B)) # E: SubClass
diff --git a/numpy/typing/tests/data/reveal/scalars.py b/numpy/typing/tests/data/reveal/scalars.py
index 882fe9612..ec3713b0f 100644
--- a/numpy/typing/tests/data/reveal/scalars.py
+++ b/numpy/typing/tests/data/reveal/scalars.py
@@ -12,22 +12,5 @@ reveal_type(x.itemsize) # E: int
reveal_type(x.shape) # E: tuple[builtins.int]
reveal_type(x.strides) # E: tuple[builtins.int]
-# Time structures
-dt = np.datetime64(0, "D")
-td = np.timedelta64(0, "D")
-
-reveal_type(dt + td) # E: numpy.datetime64
-reveal_type(dt + 1) # E: numpy.datetime64
-reveal_type(dt - dt) # E: numpy.timedelta64
-reveal_type(dt - 1) # E: numpy.timedelta64
-
-reveal_type(td + td) # E: numpy.timedelta64
-reveal_type(td + 1) # E: numpy.timedelta64
-reveal_type(td - td) # E: numpy.timedelta64
-reveal_type(td - 1) # E: numpy.timedelta64
-reveal_type(td / 1.0) # E: numpy.timedelta64
-reveal_type(td / td) # E: float
-reveal_type(td % td) # E: numpy.timedelta64
-
reveal_type(np.complex64().real) # E: numpy.float32
reveal_type(np.complex128().imag) # E: numpy.float64
diff --git a/numpy/typing/tests/test_typing.py b/numpy/typing/tests/test_typing.py
index beb53ddec..cba1dc1be 100644
--- a/numpy/typing/tests/test_typing.py
+++ b/numpy/typing/tests/test_typing.py
@@ -36,6 +36,7 @@ def get_test_cases(directory):
)
+@pytest.mark.slow
@pytest.mark.skipif(NO_MYPY, reason="Mypy is not installed")
@pytest.mark.parametrize("path", get_test_cases(PASS_DIR))
def test_success(path):
@@ -50,6 +51,7 @@ def test_success(path):
assert re.match(r"Success: no issues found in \d+ source files?", stdout.strip())
+@pytest.mark.slow
@pytest.mark.skipif(NO_MYPY, reason="Mypy is not installed")
@pytest.mark.parametrize("path", get_test_cases(FAIL_DIR))
def test_fail(path):
@@ -99,6 +101,7 @@ def test_fail(path):
pytest.fail(f"Error {repr(errors[lineno])} not found")
+@pytest.mark.slow
@pytest.mark.skipif(NO_MYPY, reason="Mypy is not installed")
@pytest.mark.parametrize("path", get_test_cases(REVEAL_DIR))
def test_reveal(path):
@@ -130,6 +133,7 @@ def test_reveal(path):
assert marker in error_line
+@pytest.mark.slow
@pytest.mark.skipif(NO_MYPY, reason="Mypy is not installed")
@pytest.mark.parametrize("path", get_test_cases(PASS_DIR))
def test_code_runs(path):
diff --git a/setup.py b/setup.py
index 1f5212676..acd015d1d 100755
--- a/setup.py
+++ b/setup.py
@@ -23,7 +23,7 @@ import os
import sys
import subprocess
import textwrap
-import sysconfig
+import warnings
if sys.version_info[:2] < (3, 6):
@@ -43,10 +43,12 @@ Programming Language :: Python :: 3
Programming Language :: Python :: 3.6
Programming Language :: Python :: 3.7
Programming Language :: Python :: 3.8
+Programming Language :: Python :: 3.9
Programming Language :: Python :: 3 :: Only
Programming Language :: Python :: Implementation :: CPython
Topic :: Software Development
Topic :: Scientific/Engineering
+Typing :: Typed
Operating System :: Microsoft :: Windows
Operating System :: POSIX
Operating System :: Unix
@@ -59,6 +61,14 @@ MICRO = 0
ISRELEASED = False
VERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO)
+# The first version not in the `Programming Language :: Python :: ...` classifiers above
+if sys.version_info >= (3, 10):
+ warnings.warn(
+ f"NumPy {VERSION} may not yet support Python "
+ f"{sys.version_info.major}.{sys.version_info.minor}.",
+ RuntimeWarning,
+ )
+
# Return the git revision as a string
def git_version():
@@ -88,6 +98,7 @@ def git_version():
return GIT_REVISION
+
# BEFORE importing setuptools, remove MANIFEST. Otherwise it may not be
# properly updated when the contents of directories change (true for distutils,
# not sure about setuptools).
@@ -150,7 +161,7 @@ if not release:
a.close()
-def configuration(parent_package='',top_path=None):
+def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration(None, parent_package, top_path)
@@ -163,7 +174,7 @@ def configuration(parent_package='',top_path=None):
config.add_data_files(('numpy', 'LICENSE.txt'))
config.add_data_files(('numpy', 'numpy/*.pxd'))
- config.get_version('numpy/version.py') # sets config.version
+ config.get_version('numpy/version.py') # sets config.version
return config
@@ -175,13 +186,12 @@ def check_submodules():
if not os.path.exists('.git'):
return
with open('.gitmodules') as f:
- for l in f:
- if 'path' in l:
- p = l.split('=')[-1].strip()
+ for line in f:
+ if 'path' in line:
+ p = line.split('=')[-1].strip()
if not os.path.exists(p):
raise ValueError('Submodule {} missing'.format(p))
-
proc = subprocess.Popen(['git', 'submodule', 'status'],
stdout=subprocess.PIPE)
status, _ = proc.communicate()
@@ -273,9 +283,9 @@ def generate_cython():
print("Cythonizing sources")
for d in ('random',):
p = subprocess.call([sys.executable,
- os.path.join(cwd, 'tools', 'cythonize.py'),
- 'numpy/{0}'.format(d)],
- cwd=cwd)
+ os.path.join(cwd, 'tools', 'cythonize.py'),
+ 'numpy/{0}'.format(d)],
+ cwd=cwd)
if p != 0:
raise RuntimeError("Running cythonize failed!")
@@ -346,7 +356,6 @@ def parse_setuppy_commands():
"""))
return False
-
# The following commands aren't supported. They can only be executed when
# the user explicitly adds a --force command-line argument.
bad_commands = dict(
@@ -384,8 +393,8 @@ def parse_setuppy_commands():
)
bad_commands['nosetests'] = bad_commands['test']
for command in ('upload_docs', 'easy_install', 'bdist', 'bdist_dumb',
- 'register', 'check', 'install_data', 'install_headers',
- 'install_lib', 'install_scripts', ):
+ 'register', 'check', 'install_data', 'install_headers',
+ 'install_lib', 'install_scripts', ):
bad_commands[command] = "`setup.py %s` is not supported" % command
for command in bad_commands.keys():
@@ -405,7 +414,8 @@ def parse_setuppy_commands():
# If we got here, we didn't detect what setup.py command was given
import warnings
warnings.warn("Unrecognized setuptools command, proceeding with "
- "generating Cython sources and expanding templates", stacklevel=2)
+ "generating Cython sources and expanding templates",
+ stacklevel=2)
return True
@@ -440,25 +450,24 @@ def setup_package():
'f2py%s.%s = numpy.f2py.f2py2e:main' % sys.version_info[:2],
]
- cmdclass={"sdist": sdist_checked,
- }
+ cmdclass = {"sdist": sdist_checked, }
metadata = dict(
- name = 'numpy',
- maintainer = "NumPy Developers",
- maintainer_email = "numpy-discussion@python.org",
- description = DOCLINES[0],
- long_description = "\n".join(DOCLINES[2:]),
- url = "https://www.numpy.org",
- author = "Travis E. Oliphant et al.",
- download_url = "https://pypi.python.org/pypi/numpy",
+ name='numpy',
+ maintainer="NumPy Developers",
+ maintainer_email="numpy-discussion@python.org",
+ description=DOCLINES[0],
+ long_description="\n".join(DOCLINES[2:]),
+ url="https://www.numpy.org",
+ author="Travis E. Oliphant et al.",
+ download_url="https://pypi.python.org/pypi/numpy",
project_urls={
"Bug Tracker": "https://github.com/numpy/numpy/issues",
"Documentation": get_docs_url(),
"Source Code": "https://github.com/numpy/numpy",
},
- license = 'BSD',
+ license='BSD',
classifiers=[_f for _f in CLASSIFIERS.split('\n') if _f],
- platforms = ["Windows", "Linux", "Solaris", "Mac OS-X", "Unix"],
+ platforms=["Windows", "Linux", "Solaris", "Mac OS-X", "Unix"],
test_suite='pytest',
cmdclass=cmdclass,
python_requires='>=3.6',
@@ -479,8 +488,7 @@ def setup_package():
# patches distutils, even though we don't use it
import setuptools # noqa: F401
from numpy.distutils.core import setup
- cwd = os.path.abspath(os.path.dirname(__file__))
- if not 'sdist' in sys.argv:
+ if 'sdist' not in sys.argv:
# Generate Cython sources, unless we're generating an sdist
generate_cython()
diff --git a/test_requirements.txt b/test_requirements.txt
index c86d46eb8..067dd07c8 100644
--- a/test_requirements.txt
+++ b/test_requirements.txt
@@ -1,7 +1,7 @@
cython==0.29.21
wheel
setuptools<49.2.0
-hypothesis==5.36.1
+hypothesis==5.37.0
pytest==6.0.2
pytz==2020.1
pytest-cov==2.10.1