summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.coveragerc1
-rw-r--r--README.md13
-rw-r--r--azure-pipelines.yml40
-rw-r--r--benchmarks/benchmarks/bench_random.py13
-rw-r--r--doc/neps/nep-template.rst29
-rw-r--r--doc/source/docs/howto_build_docs.rst15
-rw-r--r--doc/source/reference/arrays.classes.rst128
-rw-r--r--doc/source/reference/c-api.array.rst122
-rw-r--r--doc/source/reference/c-api.types-and-structures.rst28
-rw-r--r--doc/source/reference/c-api.ufunc.rst22
-rw-r--r--doc/source/reference/random/index.rst38
-rw-r--r--doc/source/reference/random/new-or-different.rst8
-rw-r--r--doc/source/reference/routines.char.rst1
-rw-r--r--doc/source/reference/routines.other.rst19
-rw-r--r--doc/source/user/basics.dispatch.rst8
-rw-r--r--doc/source/user/basics.rst1
-rw-r--r--doc/source/user/quickstart.rst4
-rw-r--r--numpy/core/_add_newdocs.py275
-rw-r--r--numpy/core/arrayprint.py1
-rw-r--r--numpy/core/fromnumeric.py22
-rw-r--r--numpy/core/function_base.py99
-rw-r--r--numpy/core/include/numpy/ufuncobject.h12
-rw-r--r--numpy/core/multiarray.py9
-rw-r--r--numpy/core/src/multiarray/arraytypes.c.src16
-rw-r--r--numpy/core/src/multiarray/descriptor.c3
-rw-r--r--numpy/core/tests/test_dtype.py30
-rw-r--r--numpy/distutils/fcompiler/compaq.py2
-rw-r--r--numpy/doc/basics.py4
-rw-r--r--numpy/doc/broadcasting.py4
-rw-r--r--numpy/doc/dispatch.py271
-rw-r--r--numpy/doc/ufuncs.py4
-rw-r--r--numpy/fft/pocketfft.py42
-rw-r--r--numpy/lib/format.py2
-rw-r--r--numpy/lib/function_base.py7
-rw-r--r--numpy/lib/tests/test_function_base.py16
-rw-r--r--numpy/linalg/lapack_lite/fortran.py2
-rw-r--r--numpy/ma/core.py47
-rw-r--r--numpy/polynomial/polyutils.py1
-rw-r--r--numpy/polynomial/tests/test_classes.py13
-rw-r--r--numpy/polynomial/tests/test_polynomial.py4
-rw-r--r--numpy/random/generator.pyx98
-rw-r--r--numpy/random/tests/test_generator_mt19937.py7
-rw-r--r--shippable.yml12
-rw-r--r--tools/npy_tempita/__init__.py56
-rw-r--r--tools/openblas_support.py167
-rwxr-xr-xtools/pypy-test.sh19
-rwxr-xr-xtools/test-installed-numpy.py66
-rwxr-xr-xtools/travis-before-install.sh11
-rwxr-xr-xtools/travis-test.sh4
-rw-r--r--tox.ini6
50 files changed, 1210 insertions, 612 deletions
diff --git a/.coveragerc b/.coveragerc
index 1f61c25a4..9048b9cc4 100644
--- a/.coveragerc
+++ b/.coveragerc
@@ -1,3 +1,4 @@
[run]
branch = True
include = */numpy/*
+disable_warnings = include-ignored
diff --git a/README.md b/README.md
index f1d024565..46fff43a0 100644
--- a/README.md
+++ b/README.md
@@ -35,4 +35,17 @@ Tests can then be run after installation with:
python -c 'import numpy; numpy.test()'
+
+Call for Contributions
+----------------------
+
+NumPy appreciates help from a wide range of different backgrounds.
+Work such as high level documentation or website improvements are valuable
+and we would like to grow our team with people filling these roles.
+Small improvements or fixes are always appreciated and issues labeled as easy
+may be a good starting point.
+If you are considering larger contributions outside the traditional coding work,
+please contact us through the mailing list.
+
+
[![Powered by NumFOCUS](https://img.shields.io/badge/powered%20by-NumFOCUS-orange.svg?style=flat&colorA=E1523D&colorB=007D8A)](https://numfocus.org)
diff --git a/azure-pipelines.yml b/azure-pipelines.yml
index 9e9001611..86aed8dab 100644
--- a/azure-pipelines.yml
+++ b/azure-pipelines.yml
@@ -20,21 +20,17 @@ jobs:
docker pull i386/ubuntu:bionic
docker run -v $(pwd):/numpy i386/ubuntu:bionic /bin/bash -c "cd numpy && \
apt-get -y update && \
- apt-get -y install python3.6-dev python3-pip locales && \
+ apt-get -y install python3.6-dev python3-pip locales python3-certifi && \
locale-gen fr_FR && update-locale && \
pip3 install setuptools nose cython==0.29.0 pytest pytz pickle5 && \
apt-get -y install gfortran-5 wget && \
- cd .. && \
- mkdir openblas && cd openblas && \
- wget https://3f23b170c54c2533c070-1c8a9b3114517dc5fe17b7c3f8c63a43.ssl.cf2.rackcdn.com/openblas-v0.3.5-274-g6a8b4269-manylinux1_i686.tar.gz && \
- tar zxvf openblas-v0.3.5-274-g6a8b4269-manylinux1_i686.tar.gz && \
- cp -r ./usr/local/lib/* /usr/lib && \
- cp ./usr/local/include/* /usr/include && \
- cd ../numpy && \
+ target=\$(python3 tools/openblas_support.py) && \
+ cp -r \$target/usr/local/lib/* /usr/lib && \
+ cp \$target/usr/local/include/* /usr/include && \
python3 -m pip install . && \
F77=gfortran-5 F90=gfortran-5 \
CFLAGS='-UNDEBUG -std=c99' python3 runtests.py -n --mode=full -- -rsx --junitxml=junit/test-results.xml && \
- cd ../openblas && python3 -c \"$(TEST_GET_CONFIG)\""
+ cd .. && python3 -c \"$(TEST_GET_CONFIG)\""
displayName: 'Run 32-bit Ubuntu Docker Build / Tests'
- task: PublishTestResults@2
condition: succeededOrFailed()
@@ -83,11 +79,10 @@ jobs:
# matches our MacOS wheel builds -- currently based
# primarily on file size / name details
- script: |
- wget "https://3f23b170c54c2533c070-1c8a9b3114517dc5fe17b7c3f8c63a43.ssl.cf2.rackcdn.com/openblas-v0.3.5-274-g6a8b4269-macosx_10_9_x86_64-gf_1becaaa.tar.gz"
- tar -zxvf openblas-v0.3.5-274-g6a8b4269-macosx_10_9_x86_64-gf_1becaaa.tar.gz
+ target=$(python tools/openblas_support.py)
# manually link to appropriate system paths
- cp ./usr/local/lib/* /usr/local/lib/
- cp ./usr/local/include/* /usr/local/include/
+ cp $target/usr/local/lib/* /usr/local/lib/
+ cp $target/usr/local/include/* /usr/local/include/
displayName: 'install pre-built openblas'
- script: python -m pip install --upgrade pip setuptools wheel
displayName: 'Install tools'
@@ -124,11 +119,6 @@ jobs:
- job: Windows
pool:
vmImage: 'VS2017-Win2016'
- variables:
- # openblas URLs from numpy-wheels
- # appveyor / Windows config
- OPENBLAS_32: "https://3f23b170c54c2533c070-1c8a9b3114517dc5fe17b7c3f8c63a43.ssl.cf2.rackcdn.com/openblas-v0.3.5-274-g6a8b4269-win32-gcc_7_1_0.zip"
- OPENBLAS_64: "https://3f23b170c54c2533c070-1c8a9b3114517dc5fe17b7c3f8c63a43.ssl.cf2.rackcdn.com/openblas-v0.3.5-274-g6a8b4269-win_amd64-gcc_7_1_0.zip"
strategy:
maxParallel: 6
matrix:
@@ -136,33 +126,28 @@ jobs:
PYTHON_VERSION: '3.6'
PYTHON_ARCH: 'x86'
TEST_MODE: fast
- OPENBLAS: $(OPENBLAS_32)
BITS: 32
Python37-32bit-fast:
PYTHON_VERSION: '3.7'
PYTHON_ARCH: 'x86'
TEST_MODE: fast
- OPENBLAS: $(OPENBLAS_32)
BITS: 32
Python35-64bit-full:
PYTHON_VERSION: '3.5'
PYTHON_ARCH: 'x64'
TEST_MODE: full
- OPENBLAS: $(OPENBLAS_64)
BITS: 64
Python36-64bit-full:
PYTHON_VERSION: '3.6'
PYTHON_ARCH: 'x64'
TEST_MODE: full
INSTALL_PICKLE5: 1
- OPENBLAS: $(OPENBLAS_64)
BITS: 64
Python37-64bit-full:
PYTHON_VERSION: '3.7'
PYTHON_ARCH: 'x64'
TEST_MODE: full
INSTALL_PICKLE5: 1
- OPENBLAS: $(OPENBLAS_64)
BITS: 64
steps:
- task: UsePythonVersion@0
@@ -176,17 +161,16 @@ jobs:
displayName: 'Install dependencies; some are optional to avoid test skips'
- script: if [%INSTALL_PICKLE5%]==[1] python -m pip install pickle5
displayName: 'Install optional pickle5 backport (only for python3.6 and 3.7)'
+
- powershell: |
- $wc = New-Object net.webclient
- $wc.Downloadfile("$(OPENBLAS)", "openblas.zip")
- $tmpdir = New-TemporaryFile | %{ rm $_; mkdir $_ }
- Expand-Archive "openblas.zip" $tmpdir
$pyversion = python -c "from __future__ import print_function; import sys; print(sys.version.split()[0])"
Write-Host "Python Version: $pyversion"
$target = "C:\\hostedtoolcache\\windows\\Python\\$pyversion\\$(PYTHON_ARCH)\\lib\\openblas.a"
Write-Host "target path: $target"
- cp $tmpdir\$(BITS)\lib\libopenblas_v0.3.5-274-g6a8b4269-gcc_7_1_0.a $target
+ $openblas = python tools/openblas_support.py
+ cp $openblas $target
displayName: 'Download / Install OpenBLAS'
+
- powershell: |
choco install -y mingw --forcex86 --force --version=5.3.0
displayName: 'Install 32-bit mingw for 32-bit builds'
diff --git a/benchmarks/benchmarks/bench_random.py b/benchmarks/benchmarks/bench_random.py
index d9302a494..c52b463e5 100644
--- a/benchmarks/benchmarks/bench_random.py
+++ b/benchmarks/benchmarks/bench_random.py
@@ -173,3 +173,16 @@ class Bounded(Benchmark):
self.rg.randint(0, max + 1, nom_size, dtype=dt)
else:
self.rg.integers(0, max + 1, nom_size, dtype=dt)
+
+class Choice(Benchmark):
+ params = [1e3, 1e6, 1e8]
+
+ def setup(self, v):
+ self.a = np.arange(v)
+ self.rng = np.random.default_rng()
+
+ def time_legacy_choice(self, v):
+ np.random.choice(self.a, 1000, replace=False)
+
+ def time_choice(self, v):
+ self.rng.choice(self.a, 1000, replace=False)
diff --git a/doc/neps/nep-template.rst b/doc/neps/nep-template.rst
index e869ebae3..2b49ec709 100644
--- a/doc/neps/nep-template.rst
+++ b/doc/neps/nep-template.rst
@@ -8,19 +8,36 @@ NEP Template and Instructions
:Created: <date created on, in yyyy-mm-dd format>
:Resolution: <url> (required for Accepted | Rejected | Withdrawn)
+
Abstract
--------
The abstract should be a short description of what the NEP will achieve.
+Motivation and Scope
+--------------------
+
+This section describes the need for the proposed change. It should describe
+the existing problem, who it affects, what it is trying to solve, and why.
+This section should explicitly address the scope of and key requirements for
+the proposed change.
+
+
Detailed description
--------------------
-This section describes the need for the NEP. It should describe the existing
-problem that it is trying to solve and why this NEP makes the situation better.
-It should include examples of how the new functionality would be used and
-perhaps some use cases.
+This section should provide a detailed description of the proposed change.
+It should include examples of how the new functionality would be used,
+intended use-cases and pseudo-code illustrating its use.
+
+
+Related Work
+------------
+
+This section should list relevant and/or similar technologies, possibly in other
+libraries. It does not need to be comprehensive, just list the major examples of
+prior and relevant art.
Implementation
@@ -28,8 +45,8 @@ Implementation
This section lists the major steps required to implement the NEP. Where
possible, it should be noted where one step is dependent on another, and which
-steps may be optionally omitted. Where it makes sense, each step should
-include a link related pull requests as the implementation progresses.
+steps may be optionally omitted. Where it makes sense, each step should
+include a link to related pull requests as the implementation progresses.
Any pull requests or development branches containing work on this NEP should
be linked to from here. (A NEP does not need to be implemented in a single
diff --git a/doc/source/docs/howto_build_docs.rst b/doc/source/docs/howto_build_docs.rst
index 98d1b88ba..4bb7628c1 100644
--- a/doc/source/docs/howto_build_docs.rst
+++ b/doc/source/docs/howto_build_docs.rst
@@ -30,11 +30,9 @@ In addition, building the documentation requires the Sphinx extension
`plot_directive`, which is shipped with Matplotlib_. This Sphinx extension can
be installed by installing Matplotlib. You will also need python3.6.
-Since large parts of the main documentation are stored in
-docstrings, you will need to first build NumPy, and install it so
-that the correct version is imported by
-
- >>> import numpy
+Since large parts of the main documentation are obtained from numpy via
+``import numpy`` and examining the docstrings, you will need to first build
+NumPy, and install it so that the correct version is imported.
Note that you can eg. install NumPy to a temporary location and set
the PYTHONPATH environment variable appropriately.
@@ -46,8 +44,11 @@ generate the docs, so write::
make html
in the ``doc/`` directory. If all goes well, this will generate a
-``build/html`` subdirectory containing the built documentation. Note
-that building the documentation on Windows is currently not actively
+``build/html`` subdirectory containing the built documentation. If you get
+a message about ``installed numpy != current repo git version``, you must
+either override the check by setting ``GITVER`` or re-install NumPy.
+
+Note that building the documentation on Windows is currently not actively
supported, though it should be possible. (See Sphinx_ documentation
for more information.)
diff --git a/doc/source/reference/arrays.classes.rst b/doc/source/reference/arrays.classes.rst
index 4f97b4ece..a91215476 100644
--- a/doc/source/reference/arrays.classes.rst
+++ b/doc/source/reference/arrays.classes.rst
@@ -6,8 +6,15 @@ Standard array subclasses
.. currentmodule:: numpy
-The :class:`ndarray` in NumPy is a "new-style" Python
-built-in-type. Therefore, it can be inherited from (in Python or in C)
+.. note::
+
+ Subclassing a ``numpy.ndarray`` is possible but if your goal is to create
+ an array with *modified* behavior, as do dask arrays for distributed
+ computation and cupy arrays for GPU-based computation, subclassing is
+ discouraged. Instead, using numpy's
+ :ref:`dispatch mechanism <basics.dispatch>` is recommended.
+
+The :class:`ndarray` can be inherited from (in Python or in C)
if desired. Therefore, it can form a foundation for many useful
classes. Often whether to sub-class the array object or to simply use
the core array component as an internal part of a new class is a
@@ -147,6 +154,121 @@ NumPy provides several hooks that classes can customize:
:func:`__array_prepare__`, :data:`__array_priority__` mechanism
described below for ufuncs (which may eventually be deprecated).
+.. py:method:: class.__array_function__(func, types, args, kwargs)
+
+ .. versionadded:: 1.16
+
+ .. note::
+
+ - In NumPy 1.17, the protocol is enabled by default, but can be disabled
+ with ``NUMPY_EXPERIMENTAL_ARRAY_FUNCTION=0``.
+ - In NumPy 1.16, you need to set the environment variable
+ ``NUMPY_EXPERIMENTAL_ARRAY_FUNCTION=1`` before importing NumPy to use
+ NumPy function overrides.
+ - Eventually, expect to ``__array_function__`` to always be enabled.
+
+ - ``func`` is an arbitrary callable exposed by NumPy's public API,
+ which was called in the form ``func(*args, **kwargs)``.
+ - ``types`` is a `collection <collections.abc.Collection>`_
+ of unique argument types from the original NumPy function call that
+ implement ``__array_function__``.
+ - The tuple ``args`` and dict ``kwargs`` are directly passed on from the
+ original call.
+
+ As a convenience for ``__array_function__`` implementors, ``types``
+ provides all argument types with an ``'__array_function__'`` attribute.
+ This allows implementors to quickly identify cases where they should defer
+ to ``__array_function__`` implementations on other arguments.
+ Implementations should not rely on the iteration order of ``types``.
+
+ Most implementations of ``__array_function__`` will start with two
+ checks:
+
+ 1. Is the given function something that we know how to overload?
+ 2. Are all arguments of a type that we know how to handle?
+
+ If these conditions hold, ``__array_function__`` should return the result
+ from calling its implementation for ``func(*args, **kwargs)``. Otherwise,
+ it should return the sentinel value ``NotImplemented``, indicating that the
+ function is not implemented by these types.
+
+ There are no general requirements on the return value from
+ ``__array_function__``, although most sensible implementations should
+ probably return array(s) with the same type as one of the function's
+ arguments.
+
+ It may also be convenient to define a custom decorators (``implements``
+ below) for registering ``__array_function__`` implementations.
+
+ .. code:: python
+
+ HANDLED_FUNCTIONS = {}
+
+ class MyArray:
+ def __array_function__(self, func, types, args, kwargs):
+ if func not in HANDLED_FUNCTIONS:
+ return NotImplemented
+ # Note: this allows subclasses that don't override
+ # __array_function__ to handle MyArray objects
+ if not all(issubclass(t, MyArray) for t in types):
+ return NotImplemented
+ return HANDLED_FUNCTIONS[func](*args, **kwargs)
+
+ def implements(numpy_function):
+ """Register an __array_function__ implementation for MyArray objects."""
+ def decorator(func):
+ HANDLED_FUNCTIONS[numpy_function] = func
+ return func
+ return decorator
+
+ @implements(np.concatenate)
+ def concatenate(arrays, axis=0, out=None):
+ ... # implementation of concatenate for MyArray objects
+
+ @implements(np.broadcast_to)
+ def broadcast_to(array, shape):
+ ... # implementation of broadcast_to for MyArray objects
+
+ Note that it is not required for ``__array_function__`` implementations to
+ include *all* of the corresponding NumPy function's optional arguments
+ (e.g., ``broadcast_to`` above omits the irrelevant ``subok`` argument).
+ Optional arguments are only passed in to ``__array_function__`` if they
+ were explicitly used in the NumPy function call.
+
+ Just like the case for builtin special methods like ``__add__``, properly
+ written ``__array_function__`` methods should always return
+ ``NotImplemented`` when an unknown type is encountered. Otherwise, it will
+ be impossible to correctly override NumPy functions from another object
+ if the operation also includes one of your objects.
+
+ For the most part, the rules for dispatch with ``__array_function__``
+ match those for ``__array_ufunc__``. In particular:
+
+ - NumPy will gather implementations of ``__array_function__`` from all
+ specified inputs and call them in order: subclasses before
+ superclasses, and otherwise left to right. Note that in some edge cases
+ involving subclasses, this differs slightly from the
+ `current behavior <https://bugs.python.org/issue30140>`_ of Python.
+ - Implementations of ``__array_function__`` indicate that they can
+ handle the operation by returning any value other than
+ ``NotImplemented``.
+ - If all ``__array_function__`` methods return ``NotImplemented``,
+ NumPy will raise ``TypeError``.
+
+ If no ``__array_function__`` methods exists, NumPy will default to calling
+ its own implementation, intended for use on NumPy arrays. This case arises,
+ for example, when all array-like arguments are Python numbers or lists.
+ (NumPy arrays do have a ``__array_function__`` method, given below, but it
+ always returns ``NotImplemented`` if any argument other than a NumPy array
+ subclass implements ``__array_function__``.)
+
+ One deviation from the current behavior of ``__array_ufunc__`` is that
+ NumPy will only call ``__array_function__`` on the *first* argument of each
+ unique type. This matches Python's `rule for calling reflected methods
+ <https://docs.python.org/3/reference/datamodel.html#object.__ror__>`_, and
+ this ensures that checking overloads has acceptable performance even when
+ there are a large number of overloaded arguments.
+
.. py:method:: class.__array_finalize__(obj)
This method is called whenever the system internally allocates a
@@ -448,7 +570,7 @@ object, then the Python code::
some code involving val
...
-calls ``val = myiter.next()`` repeatedly until :exc:`StopIteration` is
+calls ``val = next(myiter)`` repeatedly until :exc:`StopIteration` is
raised by the iterator. There are several ways to iterate over an
array that may be useful: default iteration, flat iteration, and
:math:`N`-dimensional enumeration.
diff --git a/doc/source/reference/c-api.array.rst b/doc/source/reference/c-api.array.rst
index 39f936414..3d6246baa 100644
--- a/doc/source/reference/c-api.array.rst
+++ b/doc/source/reference/c-api.array.rst
@@ -20,27 +20,44 @@ Array API
Array structure and data access
-------------------------------
-These macros all access the :c:type:`PyArrayObject` structure members. The input
-argument, arr, can be any :c:type:`PyObject *<PyObject>` that is directly interpretable
-as a :c:type:`PyArrayObject *` (any instance of the :c:data:`PyArray_Type` and its
-sub-types).
+These macros access the :c:type:`PyArrayObject` structure members and are
+defined in ``ndarraytypes.h``. The input argument, *arr*, can be any
+:c:type:`PyObject *<PyObject>` that is directly interpretable as a
+:c:type:`PyArrayObject *` (any instance of the :c:data:`PyArray_Type`
+and itssub-types).
.. c:function:: int PyArray_NDIM(PyArrayObject *arr)
The number of dimensions in the array.
-.. c:function:: npy_intp *PyArray_DIMS(PyArrayObject *arr)
+.. c:function:: int PyArray_FLAGS(PyArrayObject* arr)
- Returns a pointer to the dimensions/shape of the array. The
- number of elements matches the number of dimensions
- of the array. Can return ``NULL`` for 0-dimensional arrays.
+ Returns an integer representing the :ref:`array-flags<array-flags>`.
-.. c:function:: npy_intp *PyArray_SHAPE(PyArrayObject *arr)
+.. c:function:: int PyArray_TYPE(PyArrayObject* arr)
+
+ Return the (builtin) typenumber for the elements of this array.
+
+.. c:function:: int PyArray_SETITEM( \
+ PyArrayObject* arr, void* itemptr, PyObject* obj)
+
+ Convert obj and place it in the ndarray, *arr*, at the place
+ pointed to by itemptr. Return -1 if an error occurs or 0 on
+ success.
+
+.. c:function:: void PyArray_ENABLEFLAGS(PyArrayObject* arr, int flags)
.. versionadded:: 1.7
- A synonym for PyArray_DIMS, named to be consistent with the
- 'shape' usage within Python.
+ Enables the specified array flags. This function does no validation,
+ and assumes that you know what you're doing.
+
+.. c:function:: void PyArray_CLEARFLAGS(PyArrayObject* arr, int flags)
+
+ .. versionadded:: 1.7
+
+ Clears the specified array flags. This function does no validation,
+ and assumes that you know what you're doing.
.. c:function:: void *PyArray_DATA(PyArrayObject *arr)
@@ -53,6 +70,19 @@ sub-types).
array then be sure you understand how to access the data in the
array to avoid memory and/or alignment problems.
+.. c:function:: npy_intp *PyArray_DIMS(PyArrayObject *arr)
+
+ Returns a pointer to the dimensions/shape of the array. The
+ number of elements matches the number of dimensions
+ of the array. Can return ``NULL`` for 0-dimensional arrays.
+
+.. c:function:: npy_intp *PyArray_SHAPE(PyArrayObject *arr)
+
+ .. versionadded:: 1.7
+
+ A synonym for :c:func:`PyArray_DIMS`, named to be consistent with the
+ `shape <numpy.ndarray.shape>` usage within Python.
+
.. c:function:: npy_intp *PyArray_STRIDES(PyArrayObject* arr)
Returns a pointer to the strides of the array. The
@@ -67,6 +97,27 @@ sub-types).
Return the stride in the *n* :math:`^{\textrm{th}}` dimension.
+.. c:function:: npy_intp PyArray_ITEMSIZE(PyArrayObject* arr)
+
+ Return the itemsize for the elements of this array.
+
+ Note that, in the old API that was deprecated in version 1.7, this function
+ had the return type ``int``.
+
+.. c:function:: npy_intp PyArray_SIZE(PyArrayObject* arr)
+
+ Returns the total size (in number of elements) of the array.
+
+.. c:function:: npy_intp PyArray_Size(PyArrayObject* obj)
+
+ Returns 0 if *obj* is not a sub-class of ndarray. Otherwise,
+ returns the total number of elements in the array. Safer version
+ of :c:func:`PyArray_SIZE` (*obj*).
+
+.. c:function:: npy_intp PyArray_NBYTES(PyArrayObject* arr)
+
+ Returns the total number of bytes consumed by the array.
+
.. c:function:: PyObject *PyArray_BASE(PyArrayObject* arr)
This returns the base object of the array. In most cases, this
@@ -93,60 +144,12 @@ sub-types).
A synonym for PyArray_DESCR, named to be consistent with the
'dtype' usage within Python.
-.. c:function:: void PyArray_ENABLEFLAGS(PyArrayObject* arr, int flags)
-
- .. versionadded:: 1.7
-
- Enables the specified array flags. This function does no validation,
- and assumes that you know what you're doing.
-
-.. c:function:: void PyArray_CLEARFLAGS(PyArrayObject* arr, int flags)
-
- .. versionadded:: 1.7
-
- Clears the specified array flags. This function does no validation,
- and assumes that you know what you're doing.
-
-.. c:function:: int PyArray_FLAGS(PyArrayObject* arr)
-
-.. c:function:: npy_intp PyArray_ITEMSIZE(PyArrayObject* arr)
-
- Return the itemsize for the elements of this array.
-
- Note that, in the old API that was deprecated in version 1.7, this function
- had the return type ``int``.
-
-.. c:function:: int PyArray_TYPE(PyArrayObject* arr)
-
- Return the (builtin) typenumber for the elements of this array.
-
.. c:function:: PyObject *PyArray_GETITEM(PyArrayObject* arr, void* itemptr)
Get a Python object of a builtin type from the ndarray, *arr*,
at the location pointed to by itemptr. Return ``NULL`` on failure.
`numpy.ndarray.item` is identical to PyArray_GETITEM.
-
-.. c:function:: int PyArray_SETITEM( \
- PyArrayObject* arr, void* itemptr, PyObject* obj)
-
- Convert obj and place it in the ndarray, *arr*, at the place
- pointed to by itemptr. Return -1 if an error occurs or 0 on
- success.
-
-.. c:function:: npy_intp PyArray_SIZE(PyArrayObject* arr)
-
- Returns the total size (in number of elements) of the array.
-
-.. c:function:: npy_intp PyArray_Size(PyArrayObject* obj)
-
- Returns 0 if *obj* is not a sub-class of ndarray. Otherwise,
- returns the total number of elements in the array. Safer version
- of :c:func:`PyArray_SIZE` (*obj*).
-
-.. c:function:: npy_intp PyArray_NBYTES(PyArrayObject* arr)
-
- Returns the total number of bytes consumed by the array.
Data access
@@ -1397,6 +1400,7 @@ Special functions for NPY_OBJECT
Returns 0 for success, -1 for failure.
+.. _array-flags:
Array flags
-----------
diff --git a/doc/source/reference/c-api.types-and-structures.rst b/doc/source/reference/c-api.types-and-structures.rst
index a716b5a06..336dff211 100644
--- a/doc/source/reference/c-api.types-and-structures.rst
+++ b/doc/source/reference/c-api.types-and-structures.rst
@@ -1,3 +1,4 @@
+
*****************************
Python Types and C-Structures
*****************************
@@ -75,7 +76,8 @@ PyArray_Type and PyArrayObject
these structure members should normally be accessed using the
provided macros. If you need a shorter name, then you can make use
of :c:type:`NPY_AO` (deprecated) which is defined to be equivalent to
- :c:type:`PyArrayObject`.
+ :c:type:`PyArrayObject`. Direct access to the struct fields are
+ deprecated. Use the `PyArray_*(arr)` form instead.
.. code-block:: c
@@ -103,7 +105,8 @@ PyArray_Type and PyArrayObject
.. c:member:: char *PyArrayObject.data
- A pointer to the first element of the array. This pointer can
+ Accessible via :c:data:`PyArray_DATA`, this data member is a
+ pointer to the first element of the array. This pointer can
(and normally should) be recast to the data type of the array.
.. c:member:: int PyArrayObject.nd
@@ -111,26 +114,29 @@ PyArray_Type and PyArrayObject
An integer providing the number of dimensions for this
array. When nd is 0, the array is sometimes called a rank-0
array. Such arrays have undefined dimensions and strides and
- cannot be accessed. :c:data:`NPY_MAXDIMS` is the largest number of
- dimensions for any array.
+ cannot be accessed. Macro :c:data:`PyArray_NDIM` defined in
+ ``ndarraytypes.h`` points to this data member. :c:data:`NPY_MAXDIMS`
+ is the largest number of dimensions for any array.
.. c:member:: npy_intp PyArrayObject.dimensions
An array of integers providing the shape in each dimension as
long as nd :math:`\geq` 1. The integer is always large enough
to hold a pointer on the platform, so the dimension size is
- only limited by memory.
+ only limited by memory. :c:data:`PyArray_DIMS` is the macro
+ associated with this data member.
.. c:member:: npy_intp *PyArrayObject.strides
An array of integers providing for each dimension the number of
bytes that must be skipped to get to the next element in that
- dimension.
+ dimension. Associated with macro :c:data:`PyArray_STRIDES`.
.. c:member:: PyObject *PyArrayObject.base
- This member is used to hold a pointer to another Python object that
- is related to this array. There are two use cases:
+ Pointed to by :c:data:`PyArray_BASE`, this member is used to hold a
+ pointer to another Python object that is related to this array.
+ There are two use cases:
- If this array does not own its own memory, then base points to the
Python object that owns it (perhaps another array object)
@@ -149,11 +155,13 @@ PyArray_Type and PyArrayObject
descriptor structure for each data type supported. This
descriptor structure contains useful information about the type
as well as a pointer to a table of function pointers to
- implement specific functionality.
+ implement specific functionality. As the name suggests, it is
+ associated with the macro :c:data:`PyArray_DESCR`.
.. c:member:: int PyArrayObject.flags
- Flags indicating how the memory pointed to by data is to be
+ Pointed to by the macro :c:data:`PyArray_FLAGS`, this data member represents
+ the flags indicating how the memory pointed to by data is to be
interpreted. Possible flags are :c:data:`NPY_ARRAY_C_CONTIGUOUS`,
:c:data:`NPY_ARRAY_F_CONTIGUOUS`, :c:data:`NPY_ARRAY_OWNDATA`,
:c:data:`NPY_ARRAY_ALIGNED`, :c:data:`NPY_ARRAY_WRITEABLE`,
diff --git a/doc/source/reference/c-api.ufunc.rst b/doc/source/reference/c-api.ufunc.rst
index ba5673cc3..92a679510 100644
--- a/doc/source/reference/c-api.ufunc.rst
+++ b/doc/source/reference/c-api.ufunc.rst
@@ -49,28 +49,6 @@ Macros
Used in universal function code to re-acquire the Python GIL if it
was released (because loop->obj was not true).
-.. c:function:: UFUNC_CHECK_ERROR(loop)
-
- A macro used internally to check for errors and goto fail if
- found. This macro requires a fail label in the current code
- block. The *loop* variable must have at least members (obj,
- errormask, and errorobj). If *loop* ->obj is nonzero, then
- :c:func:`PyErr_Occurred` () is called (meaning the GIL must be held). If
- *loop* ->obj is zero, then if *loop* ->errormask is nonzero,
- :c:func:`PyUFunc_checkfperr` is called with arguments *loop* ->errormask
- and *loop* ->errobj. If the result of this check of the IEEE
- floating point registers is true then the code redirects to the
- fail label which must be defined.
-
-.. c:function:: UFUNC_CHECK_STATUS(ret)
-
- Deprecated: use npy_clear_floatstatus from npy_math.h instead.
-
- A macro that expands to platform-dependent code. The *ret*
- variable can be any integer. The :c:data:`UFUNC_FPE_{ERR}` bits are
- set in *ret* according to the status of the corresponding error
- flags of the floating point processor.
-
Functions
---------
diff --git a/doc/source/reference/random/index.rst b/doc/source/reference/random/index.rst
index 51e72513d..fb79e0306 100644
--- a/doc/source/reference/random/index.rst
+++ b/doc/source/reference/random/index.rst
@@ -41,10 +41,28 @@ statistically more reliable than the legacy methods in `~.RandomState`
from numpy import random
random.standard_normal()
-`~Generator` can be used as a direct replacement for `~.RandomState`, although
-the random values are generated by `~.PCG64`. The
-`~Generator` holds an instance of a BitGenerator. It is accessible as
-``gen.bit_generator``.
+`~Generator` can be used as a replacement for `~.RandomState`. Both class
+instances now hold a internal `BitGenerator` instance to provide the bit
+stream, it is accessible as ``gen.bit_generator``. Some long-overdue API
+cleanup means that legacy and compatibility methods have been removed from
+`~.Generator`
+
+=================== ============== ============
+`~.RandomState` `~.Generator` Notes
+------------------- -------------- ------------
+``random_sample``, ``random`` Compatible with `random.random`
+``rand``
+------------------- -------------- ------------
+``randint``, ``integers`` Add an ``endpoint`` kwarg
+``random_integers``
+------------------- -------------- ------------
+``tomaxint`` removed Use ``integers(0, np.iinfo(np.int).max,``
+ ``endpoint=False)``
+------------------- -------------- ------------
+``seed`` removed Use `~.SeedSequence.spawn`
+=================== ============== ============
+
+See `new-or-different` for more information
.. code-block:: python
@@ -55,6 +73,18 @@ the random values are generated by `~.PCG64`. The
rg.standard_normal()
rg.bit_generator
+Something like the following code can be used to support both ``RandomState``
+and ``Generator``, with the understanding that the interfaces are slightly
+different
+
+.. code-block:: python
+
+ try:
+ rg_integers = rg.integers
+ except AttributeError:
+ rg_integers = rg.randint
+ a = rg_integers(1000)
+
Seeds can be passed to any of the BitGenerators. The provided value is mixed
via `~.SeedSequence` to spread a possible sequence of seeds across a wider
range of initialization states for the BitGenerator. Here `~.PCG64` is used and
diff --git a/doc/source/reference/random/new-or-different.rst b/doc/source/reference/random/new-or-different.rst
index 4eb175d57..5442f46c9 100644
--- a/doc/source/reference/random/new-or-different.rst
+++ b/doc/source/reference/random/new-or-different.rst
@@ -23,10 +23,12 @@ Feature Older Equivalent Notes
source, called a `BitGenerator
<bit_generators>` A number of these
are provided. ``RandomState`` uses
- only the Mersenne Twister.
+ the Mersenne Twister `~.MT19937` by
+ default, but can also be instantiated
+ with any BitGenerator.
------------------ -------------------- -------------
-``random`` ``random_sample`` Access the values in a BitGenerator,
- convert them to ``float64`` in the
+``random`` ``random_sample``, Access the values in a BitGenerator,
+ ``rand`` convert them to ``float64`` in the
interval ``[0.0.,`` `` 1.0)``.
In addition to the ``size`` kwarg, now
supports ``dtype='d'`` or ``dtype='f'``,
diff --git a/doc/source/reference/routines.char.rst b/doc/source/reference/routines.char.rst
index 513f975e7..ed8393855 100644
--- a/doc/source/reference/routines.char.rst
+++ b/doc/source/reference/routines.char.rst
@@ -58,6 +58,7 @@ comparison.
less_equal
greater
less
+ compare_chararrays
String information
------------------
diff --git a/doc/source/reference/routines.other.rst b/doc/source/reference/routines.other.rst
index 0a3677904..28c9a1ad1 100644
--- a/doc/source/reference/routines.other.rst
+++ b/doc/source/reference/routines.other.rst
@@ -21,6 +21,7 @@ Memory ranges
shares_memory
may_share_memory
+ byte_bounds
Array mixins
------------
@@ -35,3 +36,21 @@ NumPy version comparison
:toctree: generated/
lib.NumpyVersion
+
+Utility
+-------
+
+.. autosummary::
+ :toctree: generated/
+
+ get_include
+ deprecate
+ deprecate_with_doc
+
+Matlab-like Functions
+---------------------
+.. autosummary::
+ :toctree: generated/
+
+ who
+ disp \ No newline at end of file
diff --git a/doc/source/user/basics.dispatch.rst b/doc/source/user/basics.dispatch.rst
new file mode 100644
index 000000000..f7b8da262
--- /dev/null
+++ b/doc/source/user/basics.dispatch.rst
@@ -0,0 +1,8 @@
+.. _basics.dispatch:
+
+*******************************
+Writing custom array containers
+*******************************
+
+.. automodule:: numpy.doc.dispatch
+
diff --git a/doc/source/user/basics.rst b/doc/source/user/basics.rst
index 7875aff6e..e0fc0ece3 100644
--- a/doc/source/user/basics.rst
+++ b/doc/source/user/basics.rst
@@ -12,4 +12,5 @@ NumPy basics
basics.broadcasting
basics.byteswapping
basics.rec
+ basics.dispatch
basics.subclassing
diff --git a/doc/source/user/quickstart.rst b/doc/source/user/quickstart.rst
index 09647be86..c8d964599 100644
--- a/doc/source/user/quickstart.rst
+++ b/doc/source/user/quickstart.rst
@@ -25,7 +25,7 @@ The Basics
NumPy's main object is the homogeneous multidimensional array. It is a
table of elements (usually numbers), all of the same type, indexed by a
-tuple of positive integers. In NumPy dimensions are called *axes*.
+tuple of non-negative integers. In NumPy dimensions are called *axes*.
For example, the coordinates of a point in 3D space ``[1, 2, 1]`` has
one axis. That axis has 3 elements in it, so we say it has a length
@@ -270,7 +270,7 @@ can change the printing options using ``set_printoptions``.
::
- >>> np.set_printoptions(threshold=np.nan)
+ >>> np.set_printoptions(threshold=sys.maxsize) # sys module should be imported
Basic Operations
diff --git a/numpy/core/_add_newdocs.py b/numpy/core/_add_newdocs.py
index 84339ef23..f041e0cd6 100644
--- a/numpy/core/_add_newdocs.py
+++ b/numpy/core/_add_newdocs.py
@@ -10,6 +10,8 @@ NOTE: Many of the methods of ndarray have corresponding functions.
"""
from __future__ import division, absolute_import, print_function
+import sys
+
from numpy.core import numerictypes as _numerictypes
from numpy.core import dtype
from numpy.core.function_base import add_newdoc
@@ -92,7 +94,7 @@ add_newdoc('numpy.core', 'flatiter', ('coords',
>>> fl = x.flat
>>> fl.coords
(0, 0)
- >>> fl.next()
+ >>> next(fl)
0
>>> fl.coords
(0, 1)
@@ -111,7 +113,7 @@ add_newdoc('numpy.core', 'flatiter', ('index',
>>> fl = x.flat
>>> fl.index
0
- >>> fl.next()
+ >>> next(fl)
0
>>> fl.index
1
@@ -664,7 +666,7 @@ add_newdoc('numpy.core', 'broadcast', ('iters',
>>> y = np.array([[4], [5], [6]])
>>> b = np.broadcast(x, y)
>>> row, col = b.iters
- >>> row.next(), col.next()
+ >>> next(row), next(col)
(1, 4)
"""))
@@ -793,8 +795,7 @@ add_newdoc('numpy.core.multiarray', 'array',
dtype : data-type, optional
The desired data-type for the array. If not given, then the type will
be determined as the minimum type required to hold the objects in the
- sequence. This argument can only be used to 'upcast' the array. For
- downcasting, use the .astype(t) method.
+ sequence.
copy : bool, optional
If true (default), then the object is copied. Otherwise, a copy will
only be made if __array__ returns a copy, if obj is a nested sequence,
@@ -1471,57 +1472,58 @@ add_newdoc('numpy.core.multiarray', 'promote_types',
""")
-add_newdoc('numpy.core.multiarray', 'newbuffer',
- """
- newbuffer(size)
+if sys.version_info.major < 3:
+ add_newdoc('numpy.core.multiarray', 'newbuffer',
+ """
+ newbuffer(size)
- Return a new uninitialized buffer object.
+ Return a new uninitialized buffer object.
- Parameters
- ----------
- size : int
- Size in bytes of returned buffer object.
+ Parameters
+ ----------
+ size : int
+ Size in bytes of returned buffer object.
- Returns
- -------
- newbuffer : buffer object
- Returned, uninitialized buffer object of `size` bytes.
+ Returns
+ -------
+ newbuffer : buffer object
+ Returned, uninitialized buffer object of `size` bytes.
- """)
+ """)
-add_newdoc('numpy.core.multiarray', 'getbuffer',
- """
- getbuffer(obj [,offset[, size]])
+ add_newdoc('numpy.core.multiarray', 'getbuffer',
+ """
+ getbuffer(obj [,offset[, size]])
- Create a buffer object from the given object referencing a slice of
- length size starting at offset.
+ Create a buffer object from the given object referencing a slice of
+ length size starting at offset.
- Default is the entire buffer. A read-write buffer is attempted followed
- by a read-only buffer.
+ Default is the entire buffer. A read-write buffer is attempted followed
+ by a read-only buffer.
- Parameters
- ----------
- obj : object
+ Parameters
+ ----------
+ obj : object
- offset : int, optional
+ offset : int, optional
- size : int, optional
+ size : int, optional
- Returns
- -------
- buffer_obj : buffer
+ Returns
+ -------
+ buffer_obj : buffer
- Examples
- --------
- >>> buf = np.getbuffer(np.ones(5), 1, 3)
- >>> len(buf)
- 3
- >>> buf[0]
- '\\x00'
- >>> buf
- <read-write buffer for 0x8af1e70, size 3, offset 1 at 0x8ba4ec0>
+ Examples
+ --------
+ >>> buf = np.getbuffer(np.ones(5), 1, 3)
+ >>> len(buf)
+ 3
+ >>> buf[0]
+ '\\x00'
+ >>> buf
+ <read-write buffer for 0x8af1e70, size 3, offset 1 at 0x8ba4ec0>
- """)
+ """)
add_newdoc('numpy.core.multiarray', 'c_einsum',
"""
@@ -1987,13 +1989,6 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_struct__',
"""Array protocol: C-struct side."""))
-add_newdoc('numpy.core.multiarray', 'ndarray', ('_as_parameter_',
- """Allow the array to be interpreted as a ctypes object by returning the
- data-memory location as an integer
-
- """))
-
-
add_newdoc('numpy.core.multiarray', 'ndarray', ('base',
"""
Base object if memory is from some other object.
@@ -2740,6 +2735,8 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('byteswap',
Toggle between low-endian and big-endian data representation by
returning a byteswapped array, optionally swapped in-place.
+ Arrays of byte-strings are not swapped. The real and imaginary
+ parts of a complex number are swapped individually.
Parameters
----------
@@ -2762,13 +2759,24 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('byteswap',
>>> list(map(hex, A))
['0x100', '0x1', '0x3322']
- Arrays of strings are not swapped
+ Arrays of byte-strings are not swapped
- >>> A = np.array(['ceg', 'fac'])
+ >>> A = np.array([b'ceg', b'fac'])
>>> A.byteswap()
- Traceback (most recent call last):
- ...
- UnicodeDecodeError: ...
+ array([b'ceg', b'fac'], dtype='|S3')
+
+ ``A.newbyteorder().byteswap()`` produces an array with the same values
+ but different representation in memory
+
+ >>> A = np.array([1, 2, 3])
+ >>> A.view(np.uint8)
+ array([1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0,
+ 0, 0], dtype=uint8)
+ >>> A.newbyteorder().byteswap(inplace=True)
+ array([1, 2, 3])
+ >>> A.view(np.uint8)
+ array([0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0,
+ 0, 3], dtype=uint8)
"""))
@@ -3255,87 +3263,6 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('min',
"""))
-add_newdoc('numpy.core.multiarray', 'shares_memory',
- """
- shares_memory(a, b, max_work=None)
-
- Determine if two arrays share memory
-
- Parameters
- ----------
- a, b : ndarray
- Input arrays
- max_work : int, optional
- Effort to spend on solving the overlap problem (maximum number
- of candidate solutions to consider). The following special
- values are recognized:
-
- max_work=MAY_SHARE_EXACT (default)
- The problem is solved exactly. In this case, the function returns
- True only if there is an element shared between the arrays.
- max_work=MAY_SHARE_BOUNDS
- Only the memory bounds of a and b are checked.
-
- Raises
- ------
- numpy.TooHardError
- Exceeded max_work.
-
- Returns
- -------
- out : bool
-
- See Also
- --------
- may_share_memory
-
- Examples
- --------
- >>> np.may_share_memory(np.array([1,2]), np.array([5,8,9]))
- False
-
- """)
-
-
-add_newdoc('numpy.core.multiarray', 'may_share_memory',
- """
- may_share_memory(a, b, max_work=None)
-
- Determine if two arrays might share memory
-
- A return of True does not necessarily mean that the two arrays
- share any element. It just means that they *might*.
-
- Only the memory bounds of a and b are checked by default.
-
- Parameters
- ----------
- a, b : ndarray
- Input arrays
- max_work : int, optional
- Effort to spend on solving the overlap problem. See
- `shares_memory` for details. Default for ``may_share_memory``
- is to do a bounds check.
-
- Returns
- -------
- out : bool
-
- See Also
- --------
- shares_memory
-
- Examples
- --------
- >>> np.may_share_memory(np.array([1,2]), np.array([5,8,9]))
- False
- >>> x = np.zeros([3, 4])
- >>> np.may_share_memory(x[:,0], x[:,1])
- True
-
- """)
-
-
add_newdoc('numpy.core.multiarray', 'ndarray', ('newbyteorder',
"""
arr.newbyteorder(new_order='S')
@@ -3437,81 +3364,6 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('put',
"""))
-add_newdoc('numpy.core.multiarray', 'copyto',
- """
- copyto(dst, src, casting='same_kind', where=True)
-
- Copies values from one array to another, broadcasting as necessary.
-
- Raises a TypeError if the `casting` rule is violated, and if
- `where` is provided, it selects which elements to copy.
-
- .. versionadded:: 1.7.0
-
- Parameters
- ----------
- dst : ndarray
- The array into which values are copied.
- src : array_like
- The array from which values are copied.
- casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
- Controls what kind of data casting may occur when copying.
-
- * 'no' means the data types should not be cast at all.
- * 'equiv' means only byte-order changes are allowed.
- * 'safe' means only casts which can preserve values are allowed.
- * 'same_kind' means only safe casts or casts within a kind,
- like float64 to float32, are allowed.
- * 'unsafe' means any data conversions may be done.
- where : array_like of bool, optional
- A boolean array which is broadcasted to match the dimensions
- of `dst`, and selects elements to copy from `src` to `dst`
- wherever it contains the value True.
-
- """)
-
-add_newdoc('numpy.core.multiarray', 'putmask',
- """
- putmask(a, mask, values)
-
- Changes elements of an array based on conditional and input values.
-
- Sets ``a.flat[n] = values[n]`` for each n where ``mask.flat[n]==True``.
-
- If `values` is not the same size as `a` and `mask` then it will repeat.
- This gives behavior different from ``a[mask] = values``.
-
- Parameters
- ----------
- a : array_like
- Target array.
- mask : array_like
- Boolean mask array. It has to be the same shape as `a`.
- values : array_like
- Values to put into `a` where `mask` is True. If `values` is smaller
- than `a` it will be repeated.
-
- See Also
- --------
- place, put, take, copyto
-
- Examples
- --------
- >>> x = np.arange(6).reshape(2, 3)
- >>> np.putmask(x, x>2, x**2)
- >>> x
- array([[ 0, 1, 2],
- [ 9, 16, 25]])
-
- If `values` is smaller than `a` it is repeated:
-
- >>> x = np.arange(5)
- >>> np.putmask(x, x>1, [-33, -44])
- >>> x
- array([ 0, 1, -33, -44, -33])
-
- """)
-
add_newdoc('numpy.core.multiarray', 'ndarray', ('ravel',
"""
@@ -6997,3 +6849,4 @@ for float_name in ('half', 'single', 'double', 'longdouble'):
>>> np.{ftype}(-.25).as_integer_ratio()
(-1, 4)
""".format(ftype=float_name)))
+
diff --git a/numpy/core/arrayprint.py b/numpy/core/arrayprint.py
index 739ae7711..108364824 100644
--- a/numpy/core/arrayprint.py
+++ b/numpy/core/arrayprint.py
@@ -114,6 +114,7 @@ def set_printoptions(precision=None, threshold=None, edgeitems=None,
threshold : int, optional
Total number of array elements which trigger summarization
rather than full repr (default 1000).
+ To always use the full repr without summarization, pass `sys.maxsize`.
edgeitems : int, optional
Number of array items in summary at beginning and end of
each dimension (default 3).
diff --git a/numpy/core/fromnumeric.py b/numpy/core/fromnumeric.py
index 08f17aae4..3389e7d66 100644
--- a/numpy/core/fromnumeric.py
+++ b/numpy/core/fromnumeric.py
@@ -908,14 +908,18 @@ def sort(a, axis=-1, kind=None, order=None):
.. versionadded:: 1.12.0
- quicksort has been changed to an introsort which will switch
- heapsort when it does not make enough progress. This makes its
- worst case O(n*log(n)).
-
- 'stable' automatically choses the best stable sorting algorithm
- for the data type being sorted. It, along with 'mergesort' is
- currently mapped to timsort or radix sort depending on the
- data type. API forward compatibility currently limits the
+ quicksort has been changed to `introsort <https://en.wikipedia.org/wiki/Introsort>`_.
+ When sorting does not make enough progress it switches to
+ `heapsort <https://en.wikipedia.org/wiki/Heapsort>`_.
+ This implementation makes quicksort O(n*log(n)) in the worst case.
+
+ 'stable' automatically chooses the best stable sorting algorithm
+ for the data type being sorted.
+ It, along with 'mergesort' is currently mapped to
+ `timsort <https://en.wikipedia.org/wiki/Timsort>`_
+ or `radix sort <https://en.wikipedia.org/wiki/Radix_sort>`_
+ depending on the data type.
+ API forward compatibility currently limits the
ability to select the implementation and it is hardwired for the different
data types.
@@ -924,7 +928,7 @@ def sort(a, axis=-1, kind=None, order=None):
Timsort is added for better performance on already or nearly
sorted data. On random data timsort is almost identical to
mergesort. It is now used for stable sort while quicksort is still the
- default sort if none is chosen. For details of timsort, refer to
+ default sort if none is chosen. For timsort details, refer to
`CPython listsort.txt <https://github.com/python/cpython/blob/3.7/Objects/listsort.txt>`_.
'mergesort' and 'stable' are mapped to radix sort for integer data types. Radix sort is an
O(n) sort instead of O(n log n).
diff --git a/numpy/core/function_base.py b/numpy/core/function_base.py
index 296213823..d83af9911 100644
--- a/numpy/core/function_base.py
+++ b/numpy/core/function_base.py
@@ -3,6 +3,7 @@ from __future__ import division, absolute_import, print_function
import functools
import warnings
import operator
+import types
from . import numeric as _nx
from .numeric import (result_type, NaN, shares_memory, MAY_SHARE_BOUNDS,
@@ -430,36 +431,90 @@ def geomspace(start, stop, num=50, endpoint=True, dtype=None, axis=0):
return result.astype(dtype, copy=False)
-#always succeed
-def add_newdoc(place, obj, doc):
+def _needs_add_docstring(obj):
"""
- Adds documentation to obj which is in module place.
+ Returns true if the only way to set the docstring of `obj` from python is
+ via add_docstring.
- If doc is a string add it to obj as a docstring
+ This function errs on the side of being overly conservative.
+ """
+ Py_TPFLAGS_HEAPTYPE = 1 << 9
+
+ if isinstance(obj, (types.FunctionType, types.MethodType, property)):
+ return False
+
+ if isinstance(obj, type) and obj.__flags__ & Py_TPFLAGS_HEAPTYPE:
+ return False
+
+ return True
+
+
+def _add_docstring(obj, doc, warn_on_python):
+ if warn_on_python and not _needs_add_docstring(obj):
+ warnings.warn(
+ "add_newdoc was used on a pure-python object {}. "
+ "Prefer to attach it directly to the source."
+ .format(obj),
+ UserWarning,
+ stacklevel=3)
+ try:
+ add_docstring(obj, doc)
+ except Exception:
+ pass
+
+
+def add_newdoc(place, obj, doc, warn_on_python=True):
+ """
+ Add documentation to an existing object, typically one defined in C
- If doc is a tuple, then the first element is interpreted as
- an attribute of obj and the second as the docstring
- (method, docstring)
+ The purpose is to allow easier editing of the docstrings without requiring
+ a re-compile. This exists primarily for internal use within numpy itself.
- If doc is a list, then each element of the list should be a
- sequence of length two --> [(method1, docstring1),
- (method2, docstring2), ...]
+ Parameters
+ ----------
+ place : str
+ The absolute name of the module to import from
+ obj : str
+ The name of the object to add documentation to, typically a class or
+ function name
+ doc : {str, Tuple[str, str], List[Tuple[str, str]]}
+ If a string, the documentation to apply to `obj`
+
+ If a tuple, then the first element is interpreted as an attribute of
+ `obj` and the second as the docstring to apply - ``(method, docstring)``
+
+ If a list, then each element of the list should be a tuple of length
+ two - ``[(method1, docstring1), (method2, docstring2), ...]``
+ warn_on_python : bool
+ If True, the default, emit `UserWarning` if this is used to attach
+ documentation to a pure-python object.
- This routine never raises an error.
+ Notes
+ -----
+ This routine never raises an error if the docstring can't be written, but
+ will raise an error if the object being documented does not exist.
This routine cannot modify read-only docstrings, as appear
in new-style classes or built-in functions. Because this
routine never raises an error the caller must check manually
that the docstrings were changed.
+
+ Since this function grabs the ``char *`` from a c-level str object and puts
+ it into the ``tp_doc`` slot of the type of `obj`, it violates a number of
+ C-API best-practices, by:
+
+ - modifying a `PyTypeObject` after calling `PyType_Ready`
+ - calling `Py_INCREF` on the str and losing the reference, so the str
+ will never be released
+
+ If possible it should be avoided.
"""
- try:
- new = getattr(__import__(place, globals(), {}, [obj]), obj)
- if isinstance(doc, str):
- add_docstring(new, doc.strip())
- elif isinstance(doc, tuple):
- add_docstring(getattr(new, doc[0]), doc[1].strip())
- elif isinstance(doc, list):
- for val in doc:
- add_docstring(getattr(new, val[0]), val[1].strip())
- except Exception:
- pass
+ new = getattr(__import__(place, globals(), {}, [obj]), obj)
+ if isinstance(doc, str):
+ _add_docstring(new, doc.strip(), warn_on_python)
+ elif isinstance(doc, tuple):
+ attr, docstring = doc
+ _add_docstring(getattr(new, attr), docstring.strip(), warn_on_python)
+ elif isinstance(doc, list):
+ for attr, docstring in doc:
+ _add_docstring(getattr(new, attr), docstring.strip(), warn_on_python)
diff --git a/numpy/core/include/numpy/ufuncobject.h b/numpy/core/include/numpy/ufuncobject.h
index 15dcdf010..5ff4a0041 100644
--- a/numpy/core/include/numpy/ufuncobject.h
+++ b/numpy/core/include/numpy/ufuncobject.h
@@ -340,14 +340,6 @@ typedef struct _loop1d_info {
#define UFUNC_PYVALS_NAME "UFUNC_PYVALS"
-#define UFUNC_CHECK_ERROR(arg) \
- do {if ((((arg)->obj & UFUNC_OBJ_NEEDS_API) && PyErr_Occurred()) || \
- ((arg)->errormask && \
- PyUFunc_checkfperr((arg)->errormask, \
- (arg)->errobj, \
- &(arg)->first))) \
- goto fail;} while (0)
-
/*
* THESE MACROS ARE DEPRECATED.
* Use npy_set_floatstatus_* in the npymath library.
@@ -357,10 +349,6 @@ typedef struct _loop1d_info {
#define UFUNC_FPE_UNDERFLOW NPY_FPE_UNDERFLOW
#define UFUNC_FPE_INVALID NPY_FPE_INVALID
-#define UFUNC_CHECK_STATUS(ret) \
- { \
- ret = npy_clear_floatstatus(); \
- }
#define generate_divbyzero_error() npy_set_floatstatus_divbyzero()
#define generate_overflow_error() npy_set_floatstatus_overflow()
diff --git a/numpy/core/multiarray.py b/numpy/core/multiarray.py
index 4f2c5b78e..c0fcc10ff 100644
--- a/numpy/core/multiarray.py
+++ b/numpy/core/multiarray.py
@@ -7,6 +7,7 @@ by importing from the extension module.
"""
import functools
+import sys
import warnings
import sys
@@ -16,7 +17,7 @@ import numpy as np
from numpy.core._multiarray_umath import *
from numpy.core._multiarray_umath import (
_fastCopyAndTranspose, _flagdict, _insert, _reconstruct, _vec_string,
- _ARRAY_API, _monotonicity
+ _ARRAY_API, _monotonicity, _get_ndarray_c_version
)
__all__ = [
@@ -31,15 +32,17 @@ __all__ = [
'count_nonzero', 'c_einsum', 'datetime_as_string', 'datetime_data',
'digitize', 'dot', 'dragon4_positional', 'dragon4_scientific', 'dtype',
'empty', 'empty_like', 'error', 'flagsobj', 'flatiter', 'format_longfloat',
- 'frombuffer', 'fromfile', 'fromiter', 'fromstring', 'getbuffer', 'inner',
+ 'frombuffer', 'fromfile', 'fromiter', 'fromstring', 'inner',
'int_asbuffer', 'interp', 'interp_complex', 'is_busday', 'lexsort',
'matmul', 'may_share_memory', 'min_scalar_type', 'ndarray', 'nditer',
- 'nested_iters', 'newbuffer', 'normalize_axis_index', 'packbits',
+ 'nested_iters', 'normalize_axis_index', 'packbits',
'promote_types', 'putmask', 'ravel_multi_index', 'result_type', 'scalar',
'set_datetimeparse_function', 'set_legacy_print_mode', 'set_numeric_ops',
'set_string_function', 'set_typeDict', 'shares_memory', 'test_interrupt',
'tracemalloc_domain', 'typeinfo', 'unpackbits', 'unravel_index', 'vdot',
'where', 'zeros']
+if sys.version_info.major < 3:
+ __all__ += ['newbuffer', 'getbuffer']
# For backward compatibility, make sure pickle imports these functions from here
_reconstruct.__module__ = 'numpy.core.multiarray'
diff --git a/numpy/core/src/multiarray/arraytypes.c.src b/numpy/core/src/multiarray/arraytypes.c.src
index 3b986ed04..5d9e990e8 100644
--- a/numpy/core/src/multiarray/arraytypes.c.src
+++ b/numpy/core/src/multiarray/arraytypes.c.src
@@ -221,9 +221,7 @@ static int
if (PySequence_NoString_Check(op)) {
PyErr_SetString(PyExc_ValueError,
"setting an array element with a sequence.");
- Py_DECREF(type);
- Py_XDECREF(value);
- Py_XDECREF(traceback);
+ npy_PyErr_ChainExceptionsCause(type, value, traceback);
}
else {
PyErr_Restore(type, value, traceback);
@@ -4414,7 +4412,17 @@ PyArray_DescrFromType(int type)
{
PyArray_Descr *ret = NULL;
- if (type < NPY_NTYPES) {
+ if (type < 0) {
+ /*
+ * It's not valid for type to be less than 0.
+ * If that happens, then no other branch of
+ * this if/else chain should be followed.
+ * This is effectively a no-op that ensures
+ * the default error is raised.
+ */
+ ret = NULL;
+ }
+ else if (type < NPY_NTYPES) {
ret = _builtin_descrs[type];
}
else if (type == NPY_NOTYPE) {
diff --git a/numpy/core/src/multiarray/descriptor.c b/numpy/core/src/multiarray/descriptor.c
index 82d1ef4c9..ff85c3fcb 100644
--- a/numpy/core/src/multiarray/descriptor.c
+++ b/numpy/core/src/multiarray/descriptor.c
@@ -1546,7 +1546,8 @@ PyArray_DescrConverter(PyObject *obj, PyArray_Descr **at)
/* A typecode like 'd' */
if (len == 1) {
- check_num = type[0];
+ /* Python byte string characters are unsigned */
+ check_num = (unsigned char) type[0];
}
/* A kind + size like 'f8' */
else {
diff --git a/numpy/core/tests/test_dtype.py b/numpy/core/tests/test_dtype.py
index bc5c23caf..f60eab696 100644
--- a/numpy/core/tests/test_dtype.py
+++ b/numpy/core/tests/test_dtype.py
@@ -88,6 +88,36 @@ class TestBuiltin(object):
assert_raises(TypeError, np.dtype, 'q8')
assert_raises(TypeError, np.dtype, 'Q8')
+ @pytest.mark.parametrize(
+ 'value',
+ ['m8', 'M8', 'datetime64', 'timedelta64',
+ 'i4, (2,3)f8, f4', 'a3, 3u8, (3,4)a10',
+ '>f', '<f', '=f', '|f',
+ ])
+ def test_dtype_bytes_str_equivalence(self, value):
+ bytes_value = value.encode('ascii')
+ from_bytes = np.dtype(bytes_value)
+ from_str = np.dtype(value)
+ assert_dtype_equal(from_bytes, from_str)
+
+ def test_dtype_from_bytes(self):
+ # Empty bytes object
+ assert_raises(TypeError, np.dtype, b'')
+ # Byte order indicator, but no type
+ assert_raises(TypeError, np.dtype, b'|')
+
+ # Single character with ordinal < NPY_NTYPES returns
+ # type by index into _builtin_descrs
+ assert_dtype_equal(np.dtype(bytes([0])), np.dtype('bool'))
+ assert_dtype_equal(np.dtype(bytes([17])), np.dtype(object))
+
+ # Single character where value is a valid type code
+ assert_dtype_equal(np.dtype(b'f'), np.dtype('float32'))
+
+ # Bytes with non-ascii values raise errors
+ assert_raises(TypeError, np.dtype, b'\xff')
+ assert_raises(TypeError, np.dtype, b's\xff')
+
def test_bad_param(self):
# Can't give a size that's too small
assert_raises(ValueError, np.dtype,
diff --git a/numpy/distutils/fcompiler/compaq.py b/numpy/distutils/fcompiler/compaq.py
index 07d502706..671b3a55f 100644
--- a/numpy/distutils/fcompiler/compaq.py
+++ b/numpy/distutils/fcompiler/compaq.py
@@ -95,7 +95,7 @@ class CompaqVisualFCompiler(FCompiler):
raise e
except ValueError:
e = get_exception()
- if not "path']" in str(e):
+ if not "'path'" in str(e):
print("Unexpected ValueError in", __file__)
raise e
diff --git a/numpy/doc/basics.py b/numpy/doc/basics.py
index 7946c6432..1871512bf 100644
--- a/numpy/doc/basics.py
+++ b/numpy/doc/basics.py
@@ -314,8 +314,8 @@ compiler's ``long double`` available as ``np.longdouble`` (and
``np.clongdouble`` for the complex numbers). You can find out what your
numpy provides with ``np.finfo(np.longdouble)``.
-NumPy does not provide a dtype with more precision than C
-``long double``\\s; in particular, the 128-bit IEEE quad precision
+NumPy does not provide a dtype with more precision than C's
+``long double``\\; in particular, the 128-bit IEEE quad precision
data type (FORTRAN's ``REAL*16``\\) is not available.
For efficient memory alignment, ``np.longdouble`` is usually stored
diff --git a/numpy/doc/broadcasting.py b/numpy/doc/broadcasting.py
index 0bdb6ae7d..f7bd2515b 100644
--- a/numpy/doc/broadcasting.py
+++ b/numpy/doc/broadcasting.py
@@ -42,7 +42,7 @@ We can think of the scalar ``b`` being *stretched* during the arithmetic
operation into an array with the same shape as ``a``. The new elements in
``b`` are simply copies of the original scalar. The stretching analogy is
only conceptual. NumPy is smart enough to use the original scalar value
-without actually making copies, so that broadcasting operations are as
+without actually making copies so that broadcasting operations are as
memory and computationally efficient as possible.
The code in the second example is more efficient than that in the first
@@ -52,7 +52,7 @@ because broadcasting moves less memory around during the multiplication
General Broadcasting Rules
==========================
When operating on two arrays, NumPy compares their shapes element-wise.
-It starts with the trailing dimensions, and works its way forward. Two
+It starts with the trailing dimensions and works its way forward. Two
dimensions are compatible when
1) they are equal, or
diff --git a/numpy/doc/dispatch.py b/numpy/doc/dispatch.py
new file mode 100644
index 000000000..09a3e5134
--- /dev/null
+++ b/numpy/doc/dispatch.py
@@ -0,0 +1,271 @@
+""".. _dispatch_mechanism:
+
+Numpy's dispatch mechanism, introduced in numpy version v1.16 is the
+recommended approach for writing custom N-dimensional array containers that are
+compatible with the numpy API and provide custom implementations of numpy
+functionality. Applications include `dask <http://dask.pydata.org>`_ arrays, an
+N-dimensional array distributed across multiple nodes, and `cupy
+<https://docs-cupy.chainer.org/en/stable/>`_ arrays, an N-dimensional array on
+a GPU.
+
+To get a feel for writing custom array containers, we'll begin with a simple
+example that has rather narrow utility but illustrates the concepts involved.
+
+>>> import numpy as np
+>>> class DiagonalArray:
+... def __init__(self, N, value):
+... self._N = N
+... self._i = value
+... def __repr__(self):
+... return f"{self.__class__.__name__}(N={self._N}, value={self._i})"
+... def __array__(self):
+... return self._i * np.eye(self._N)
+...
+
+Our custom array can be instantiated like:
+
+>>> arr = DiagonalArray(5, 1)
+>>> arr
+DiagonalArray(N=5, value=1)
+
+We can convert to a numpy array using :func:`numpy.array` or
+:func:`numpy.asarray`, which will call its ``__array__`` method to obtain a
+standard ``numpy.ndarray``.
+
+>>> np.asarray(arr)
+array([[1., 0., 0., 0., 0.],
+ [0., 1., 0., 0., 0.],
+ [0., 0., 1., 0., 0.],
+ [0., 0., 0., 1., 0.],
+ [0., 0., 0., 0., 1.]])
+
+If we operate on ``arr`` with a numpy function, numpy will again use the
+``__array__`` interface to convert it to an array and then apply the function
+in the usual way.
+
+>>> np.multiply(arr, 2)
+array([[2., 0., 0., 0., 0.],
+ [0., 2., 0., 0., 0.],
+ [0., 0., 2., 0., 0.],
+ [0., 0., 0., 2., 0.],
+ [0., 0., 0., 0., 2.]])
+
+
+Notice that the return type is a standard ``numpy.ndarray``.
+
+>>> type(arr)
+numpy.ndarray
+
+How can we pass our custom array type through this function? Numpy allows a
+class to indicate that it would like to handle computations in a custom-defined
+way through the interaces ``__array_ufunc__`` and ``__array_function__``. Let's
+take one at a time, starting with ``_array_ufunc__``. This method covers
+:ref:`ufuncs`, a class of functions that includes, for example,
+:func:`numpy.multiply` and :func:`numpy.sin`.
+
+The ``__array_ufunc__`` receives:
+
+- ``ufunc``, a function like ``numpy.multiply``
+- ``method``, a string, differentiating between ``numpy.multiply(...)`` and
+ variants like ``numpy.multiply.outer``, ``numpy.multiply.accumulate``, and so
+ on. For the common case, ``numpy.multiply(...)``, ``method == '__call__'``.
+- ``inputs``, which could be a mixture of different types
+- ``kwargs``, keyword arguments passed to the function
+
+For this example we will only handle the method ``'__call__``.
+
+>>> from numbers import Number
+>>> class DiagonalArray:
+... def __init__(self, N, value):
+... self._N = N
+... self._i = value
+... def __repr__(self):
+... return f"{self.__class__.__name__}(N={self._N}, value={self._i})"
+... def __array__(self):
+... return self._i * np.eye(self._N)
+... def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
+... if method == '__call__':
+... N = None
+... scalars = []
+... for input in inputs:
+... if isinstance(input, Number):
+... scalars.append(input)
+... elif isinstance(input, self.__class__):
+... scalars.append(input._i)
+... if N is not None:
+... if N != self._N:
+... raise TypeError("inconsistent sizes")
+... else:
+... N = self._N
+... else:
+... return NotImplemented
+... return self.__class__(N, ufunc(*scalars, **kwargs))
+... else:
+... return NotImplemented
+...
+
+Now our custom array type passes through numpy functions.
+
+>>> arr = DiagonalArray(5, 1)
+>>> np.multiply(arr, 3)
+DiagonalArray(N=5, value=3)
+>>> np.add(arr, 3)
+DiagonalArray(N=5, value=4)
+>>> np.sin(arr)
+DiagonalArray(N=5, value=0.8414709848078965)
+
+At this point ``arr + 3`` does not work.
+
+>>> arr + 3
+TypeError: unsupported operand type(s) for *: 'DiagonalArray' and 'int'
+
+To support it, we need to define the Python interfaces ``__add__``, ``__lt__``,
+and so on to dispatch to the corresponding ufunc. We can achieve this
+conveniently by inheriting from the mixin
+:class:`~numpy.lib.mixins.NDArrayOperatorsMixin`.
+
+>>> import numpy.lib.mixins
+>>> class DiagonalArray(numpy.lib.mixins.NDArrayOperatorsMixin):
+... def __init__(self, N, value):
+... self._N = N
+... self._i = value
+... def __repr__(self):
+... return f"{self.__class__.__name__}(N={self._N}, value={self._i})"
+... def __array__(self):
+... return self._i * np.eye(self._N)
+... def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
+... if method == '__call__':
+... N = None
+... scalars = []
+... for input in inputs:
+... if isinstance(input, Number):
+... scalars.append(input)
+... elif isinstance(input, self.__class__):
+... scalars.append(input._i)
+... if N is not None:
+... if N != self._N:
+... raise TypeError("inconsistent sizes")
+... else:
+... N = self._N
+... else:
+... return NotImplemented
+... return self.__class__(N, ufunc(*scalars, **kwargs))
+... else:
+... return NotImplemented
+...
+
+>>> arr = DiagonalArray(5, 1)
+>>> arr + 3
+DiagonalArray(N=5, value=4)
+>>> arr > 0
+DiagonalArray(N=5, value=True)
+
+Now let's tackle ``__array_function__``. We'll create dict that maps numpy
+functions to our custom variants.
+
+>>> HANDLED_FUNCTIONS = {}
+>>> class DiagonalArray(numpy.lib.mixins.NDArrayOperatorsMixin):
+... def __init__(self, N, value):
+... self._N = N
+... self._i = value
+... def __repr__(self):
+... return f"{self.__class__.__name__}(N={self._N}, value={self._i})"
+... def __array__(self):
+... return self._i * np.eye(self._N)
+... def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
+... if method == '__call__':
+... N = None
+... scalars = []
+... for input in inputs:
+... # In this case we accept only scalar numbers or DiagonalArrays.
+... if isinstance(input, Number):
+... scalars.append(input)
+... elif isinstance(input, self.__class__):
+... scalars.append(input._i)
+... if N is not None:
+... if N != self._N:
+... raise TypeError("inconsistent sizes")
+... else:
+... N = self._N
+... else:
+... return NotImplemented
+... return self.__class__(N, ufunc(*scalars, **kwargs))
+... else:
+... return NotImplemented
+... def __array_function__(self, func, types, args, kwargs):
+... if func not in HANDLED_FUNCTIONS:
+... return NotImplemented
+... # Note: this allows subclasses that don't override
+... # __array_function__ to handle DiagonalArray objects.
+... if not all(issubclass(t, self.__class__) for t in types):
+... return NotImplemented
+... return HANDLED_FUNCTIONS[func](*args, **kwargs)
+...
+
+A convenient pattern is to define a decorator ``implements`` that can be used
+to add functions to ``HANDLED_FUNCTIONS``.
+
+>>> def implements(np_function):
+... "Register an __array_function__ implementation for DiagonalArray objects."
+... def decorator(func):
+... HANDLED_FUNCTIONS[np_function] = func
+... return func
+... return decorator
+...
+
+Now we write implementations of numpy functions for ``DiagonalArray``.
+For completeness, to support the usage ``arr.sum()`` add a method ``sum`` that
+calls ``numpy.sum(self)``, and the same for ``mean``.
+
+>>> @implements(np.sum)
+... def sum(a):
+... "Implementation of np.sum for DiagonalArray objects"
+... return arr._i * arr._N
+...
+>>> @implements(np.mean)
+... def sum(a):
+... "Implementation of np.mean for DiagonalArray objects"
+... return arr._i / arr._N
+...
+>>> arr = DiagonalArray(5, 1)
+>>> np.sum(arr)
+5
+>>> np.mean(arr)
+0.2
+
+If the user tries to use any numpy functions not included in
+``HANDLED_FUNCTIONS``, a ``TypeError`` will be raised by numpy, indicating that
+this operation is not supported. For example, concatenating two
+``DiagonalArrays`` does not produce another diagonal array, so it is not
+supported.
+
+>>> np.concatenate([arr, arr])
+TypeError: no implementation found for 'numpy.concatenate' on types that implement __array_function__: [<class '__main__.DiagonalArray'>]
+
+Additionally, our implementations of ``sum`` and ``mean`` do not accept the
+optional arguments that numpy's implementation does.
+
+>>> np.sum(arr, axis=0)
+TypeError: sum() got an unexpected keyword argument 'axis'
+
+The user always has the option of converting to a normal ``numpy.ndarray`` with
+:func:`numpy.asarray` and using standard numpy from there.
+
+>>> np.concatenate([np.asarray(arr), np.asarray(arr)])
+array([[1., 0., 0., 0., 0.],
+ [0., 1., 0., 0., 0.],
+ [0., 0., 1., 0., 0.],
+ [0., 0., 0., 1., 0.],
+ [0., 0., 0., 0., 1.],
+ [1., 0., 0., 0., 0.],
+ [0., 1., 0., 0., 0.],
+ [0., 0., 1., 0., 0.],
+ [0., 0., 0., 1., 0.],
+ [0., 0., 0., 0., 1.]])
+
+Refer to the `dask source code <https://github.com/dask/dask>`_ and
+`cupy source code <https://github.com/cupy/cupy>`_ for more fully-worked
+examples of custom array containers.
+
+See also `NEP 18 <http://www.numpy.org/neps/nep-0018-array-function-protocol.html>`_.
+"""
diff --git a/numpy/doc/ufuncs.py b/numpy/doc/ufuncs.py
index a112e559c..df2c455ec 100644
--- a/numpy/doc/ufuncs.py
+++ b/numpy/doc/ufuncs.py
@@ -13,9 +13,9 @@ example is the addition operator: ::
>>> np.array([0,2,3,4]) + np.array([1,1,-1,2])
array([1, 3, 2, 6])
-The unfunc module lists all the available ufuncs in numpy. Documentation on
+The ufunc module lists all the available ufuncs in numpy. Documentation on
the specific ufuncs may be found in those modules. This documentation is
-intended to address the more general aspects of unfuncs common to most of
+intended to address the more general aspects of ufuncs common to most of
them. All of the ufuncs that make use of Python operators (e.g., +, -, etc.)
have equivalent functions defined (e.g. add() for +)
diff --git a/numpy/fft/pocketfft.py b/numpy/fft/pocketfft.py
index 45dc162f6..b7f6f1434 100644
--- a/numpy/fft/pocketfft.py
+++ b/numpy/fft/pocketfft.py
@@ -392,8 +392,9 @@ def irfft(a, n=None, axis=-1, norm=None):
Length of the transformed axis of the output.
For `n` output points, ``n//2+1`` input points are necessary. If the
input is longer than this, it is cropped. If it is shorter than this,
- it is padded with zeros. If `n` is not given, it is determined from
- the length of the input along the axis specified by `axis`.
+ it is padded with zeros. If `n` is not given, it is taken to be
+ ``2*(m-1)`` where ``m`` is the length of the input along the axis
+ specified by `axis`.
axis : int, optional
Axis over which to compute the inverse FFT. If not given, the last
axis is used.
@@ -436,6 +437,14 @@ def irfft(a, n=None, axis=-1, norm=None):
thus resample a series to `m` points via Fourier interpolation by:
``a_resamp = irfft(rfft(a), m)``.
+ The correct interpretation of the hermitian input depends on the length of
+ the original data, as given by `n`. This is because each input shape could
+ correspond to either an odd or even length signal. By default, `irfft`
+ assumes an even output length which puts the last entry at the Nyquist
+ frequency; aliasing with its symmetric counterpart. By Hermitian symmetry,
+ the value is thus treated as purely real. To avoid losing information, the
+ correct length of the real input **must** be given.
+
Examples
--------
>>> np.fft.ifft([1, -1j, -1, 1j])
@@ -473,8 +482,9 @@ def hfft(a, n=None, axis=-1, norm=None):
Length of the transformed axis of the output. For `n` output
points, ``n//2 + 1`` input points are necessary. If the input is
longer than this, it is cropped. If it is shorter than this, it is
- padded with zeros. If `n` is not given, it is determined from the
- length of the input along the axis specified by `axis`.
+ padded with zeros. If `n` is not given, it is taken to be ``2*(m-1)``
+ where ``m`` is the length of the input along the axis specified by
+ `axis`.
axis : int, optional
Axis over which to compute the FFT. If not given, the last
axis is used.
@@ -513,6 +523,14 @@ def hfft(a, n=None, axis=-1, norm=None):
* even: ``ihfft(hfft(a, 2*len(a) - 2) == a``, within roundoff error,
* odd: ``ihfft(hfft(a, 2*len(a) - 1) == a``, within roundoff error.
+ The correct interpretation of the hermitian input depends on the length of
+ the original data, as given by `n`. This is because each input shape could
+ correspond to either an odd or even length signal. By default, `hfft`
+ assumes an even output length which puts the last entry at the Nyquist
+ frequency; aliasing with its symmetric counterpart. By Hermitian symmetry,
+ the value is thus treated as purely real. To avoid losing information, the
+ shape of the full signal **must** be given.
+
Examples
--------
>>> signal = np.array([1, 2, 3, 4, 3, 2])
@@ -1167,8 +1185,9 @@ def irfftn(a, s=None, axes=None, norm=None):
where ``s[-1]//2+1`` points of the input are used.
Along any axis, if the shape indicated by `s` is smaller than that of
the input, the input is cropped. If it is larger, the input is padded
- with zeros. If `s` is not given, the shape of the input along the
- axes specified by `axes` is used.
+ with zeros. If `s` is not given, the shape of the input along the axes
+ specified by axes is used. Except for the last axis which is taken to be
+ ``2*(m-1)`` where ``m`` is the length of the input along that axis.
axes : sequence of ints, optional
Axes over which to compute the inverse FFT. If not given, the last
`len(s)` axes are used, or all axes if `s` is also not specified.
@@ -1213,6 +1232,15 @@ def irfftn(a, s=None, axes=None, norm=None):
See `rfft` for definitions and conventions used for real input.
+ The correct interpretation of the hermitian input depends on the shape of
+ the original data, as given by `s`. This is because each input shape could
+ correspond to either an odd or even length signal. By default, `irfftn`
+ assumes an even output length which puts the last entry at the Nyquist
+ frequency; aliasing with its symmetric counterpart. When performing the
+ final complex to real transform, the last value is thus treated as purely
+ real. To avoid losing information, the correct shape of the real input
+ **must** be given.
+
Examples
--------
>>> a = np.zeros((3, 2, 2))
@@ -1244,7 +1272,7 @@ def irfft2(a, s=None, axes=(-2, -1), norm=None):
a : array_like
The input array
s : sequence of ints, optional
- Shape of the inverse FFT.
+ Shape of the real output to the inverse FFT.
axes : sequence of ints, optional
The axes over which to compute the inverse fft.
Default is the last two axes.
diff --git a/numpy/lib/format.py b/numpy/lib/format.py
index 93bdbce97..3bf818812 100644
--- a/numpy/lib/format.py
+++ b/numpy/lib/format.py
@@ -384,7 +384,7 @@ def _wrap_header_guess_version(header):
return ret
header = _wrap_header(header, (3, 0))
- warnings.warn("Stored array in format 3.0. It can only be"
+ warnings.warn("Stored array in format 3.0. It can only be "
"read by NumPy >= 1.17", UserWarning, stacklevel=2)
return header
diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py
index 1fcb6137c..9d380e67d 100644
--- a/numpy/lib/function_base.py
+++ b/numpy/lib/function_base.py
@@ -358,12 +358,12 @@ def average(a, axis=None, weights=None, returned=False):
Examples
--------
- >>> data = list(range(1,5))
+ >>> data = np.arange(1, 5)
>>> data
- [1, 2, 3, 4]
+ array([1, 2, 3, 4])
>>> np.average(data)
2.5
- >>> np.average(range(1,11), weights=range(10,0,-1))
+ >>> np.average(np.arange(1, 11), weights=np.arange(10, 0, -1))
4.0
>>> data = np.arange(6).reshape((3,2))
@@ -3102,6 +3102,7 @@ def i0(x):
array([ 1.00000000+0.j , 0.18785373+0.64616944j]) # may vary
"""
+ x = np.asanyarray(x)
x = np.abs(x)
return piecewise(x, [x <= 8.0], [_i0_1, _i0_2])
diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py
index c0b8ad6b8..eae52c002 100644
--- a/numpy/lib/tests/test_function_base.py
+++ b/numpy/lib/tests/test_function_base.py
@@ -2005,6 +2005,22 @@ class Test_I0(object):
assert_equal(i0_0.shape, (1,))
assert_array_equal(np.i0([0.]), np.array([1.]))
+ def test_non_array(self):
+ a = np.arange(4)
+
+ class array_like:
+ __array_interface__ = a.__array_interface__
+
+ def __array_wrap__(self, arr):
+ return self
+
+ # E.g. pandas series survive ufunc calls through array-wrap:
+ assert isinstance(np.abs(array_like()), array_like)
+ exp = np.i0(a)
+ res = np.i0(array_like())
+
+ assert_array_equal(exp, res)
+
class TestKaiser(object):
diff --git a/numpy/linalg/lapack_lite/fortran.py b/numpy/linalg/lapack_lite/fortran.py
index 87c27aab9..dc0a5ebd9 100644
--- a/numpy/linalg/lapack_lite/fortran.py
+++ b/numpy/linalg/lapack_lite/fortran.py
@@ -54,7 +54,7 @@ class PushbackIterator(object):
Return an iterator for which items can be pushed back into.
Call the .pushback(item) method to have item returned as the next
- value of .next().
+ value of next().
"""
def __init__(self, iterable):
object.__init__(self)
diff --git a/numpy/ma/core.py b/numpy/ma/core.py
index 20db2d655..f221b319a 100644
--- a/numpy/ma/core.py
+++ b/numpy/ma/core.py
@@ -800,7 +800,7 @@ class _DomainCheckInterval(object):
def __init__(self, a, b):
"domain_check_interval(a,b)(x) = true where x < a or y > b"
- if (a > b):
+ if a > b:
(a, b) = (b, a)
self.a = a
self.b = b
@@ -1165,7 +1165,7 @@ class _DomainedBinaryOperation(_MaskedUFunc):
if domain is not None:
m |= domain(da, db)
# Take care of the scalar case first
- if (not m.ndim):
+ if not m.ndim:
if m:
return masked
else:
@@ -1743,7 +1743,7 @@ def mask_or(m1, m2, copy=False, shrink=True):
if m1 is m2 and is_mask(m1):
return m1
(dtype1, dtype2) = (getattr(m1, 'dtype', None), getattr(m2, 'dtype', None))
- if (dtype1 != dtype2):
+ if dtype1 != dtype2:
raise ValueError("Incompatible dtypes '%s'<>'%s'" % (dtype1, dtype2))
if dtype1.names is not None:
# Allocate an output mask array with the properly broadcast shape.
@@ -2681,15 +2681,13 @@ class MaskedIterator(object):
--------
>>> x = np.ma.array([3, 2], mask=[0, 1])
>>> fl = x.flat
- >>> fl.next()
+ >>> next(fl)
3
- >>> fl.next()
+ >>> next(fl)
masked
- >>> fl.next()
+ >>> next(fl)
Traceback (most recent call last):
- File "<stdin>", line 1, in <module>
- File "/home/ralf/python/numpy/numpy/ma/core.py", line 2243, in next
- d = self.dataiter.next()
+ ...
StopIteration
"""
@@ -3077,7 +3075,7 @@ class MaskedArray(ndarray):
def view(self, dtype=None, type=None, fill_value=None):
"""
- Return a view of the MaskedArray data
+ Return a view of the MaskedArray data.
Parameters
----------
@@ -3091,6 +3089,14 @@ class MaskedArray(ndarray):
type : Python type, optional
Type of the returned view, either ndarray or a subclass. The
default None results in type preservation.
+ fill_value : scalar, optional
+ The value to use for invalid entries (None by default).
+ If None, then this argument is inferred from the passed `dtype`, or
+ in its absence the original array, as discussed in the notes below.
+
+ See Also
+ --------
+ numpy.ndarray.view : Equivalent method on ndarray object.
Notes
-----
@@ -3143,7 +3149,7 @@ class MaskedArray(ndarray):
# also make the mask be a view (so attr changes to the view's
# mask do no affect original object's mask)
# (especially important to avoid affecting np.masked singleton)
- if (getmask(output) is not nomask):
+ if getmask(output) is not nomask:
output._mask = output._mask.view()
# Make sure to reset the _fill_value if needed
@@ -3156,7 +3162,6 @@ class MaskedArray(ndarray):
else:
output.fill_value = fill_value
return output
- view.__doc__ = ndarray.view.__doc__
def __getitem__(self, indx):
"""
@@ -3382,7 +3387,7 @@ class MaskedArray(ndarray):
if mask is masked:
mask = True
- if (current_mask is nomask):
+ if current_mask is nomask:
# Make sure the mask is set
# Just don't do anything if there's nothing to do.
if mask is nomask:
@@ -5036,7 +5041,7 @@ class MaskedArray(ndarray):
result = self.filled(0).sum(axis, dtype=dtype, out=out, **kwargs)
if isinstance(out, MaskedArray):
outmask = getmask(out)
- if (outmask is nomask):
+ if outmask is nomask:
outmask = out._mask = make_mask_none(out.shape)
outmask.flat = newmask
return out
@@ -5118,7 +5123,7 @@ class MaskedArray(ndarray):
result = self.filled(1).prod(axis, dtype=dtype, out=out, **kwargs)
if isinstance(out, MaskedArray):
outmask = getmask(out)
- if (outmask is nomask):
+ if outmask is nomask:
outmask = out._mask = make_mask_none(out.shape)
outmask.flat = newmask
return out
@@ -5197,7 +5202,7 @@ class MaskedArray(ndarray):
out.flat = result
if isinstance(out, MaskedArray):
outmask = getmask(out)
- if (outmask is nomask):
+ if outmask is nomask:
outmask = out._mask = make_mask_none(out.shape)
outmask.flat = getmask(result)
return out
@@ -5239,9 +5244,9 @@ class MaskedArray(ndarray):
return m
if not axis:
- return (self - m)
+ return self - m
else:
- return (self - expand_dims(m, axis))
+ return self - expand_dims(m, axis)
def var(self, axis=None, dtype=None, out=None, ddof=0,
keepdims=np._NoValue):
@@ -5656,7 +5661,7 @@ class MaskedArray(ndarray):
result = self.filled(fill_value).min(axis=axis, out=out, **kwargs)
if isinstance(out, MaskedArray):
outmask = getmask(out)
- if (outmask is nomask):
+ if outmask is nomask:
outmask = out._mask = make_mask_none(out.shape)
outmask.flat = newmask
else:
@@ -5790,7 +5795,7 @@ class MaskedArray(ndarray):
result = self.filled(fill_value).max(axis=axis, out=out, **kwargs)
if isinstance(out, MaskedArray):
outmask = getmask(out)
- if (outmask is nomask):
+ if outmask is nomask:
outmask = out._mask = make_mask_none(out.shape)
outmask.flat = newmask
else:
@@ -6733,7 +6738,7 @@ def power(a, b, third=None):
invalid = np.logical_not(np.isfinite(result.view(ndarray)))
# Add the initial mask
if m is not nomask:
- if not (result.ndim):
+ if not result.ndim:
return masked
result._mask = np.logical_or(m, invalid)
# Fix the invalid parts
diff --git a/numpy/polynomial/polyutils.py b/numpy/polynomial/polyutils.py
index e9fbc0fcf..a9059f522 100644
--- a/numpy/polynomial/polyutils.py
+++ b/numpy/polynomial/polyutils.py
@@ -46,6 +46,7 @@ Functions
from __future__ import division, absolute_import, print_function
import operator
+import warnings
import numpy as np
diff --git a/numpy/polynomial/tests/test_classes.py b/numpy/polynomial/tests/test_classes.py
index 15e24f92b..2261f960b 100644
--- a/numpy/polynomial/tests/test_classes.py
+++ b/numpy/polynomial/tests/test_classes.py
@@ -16,7 +16,7 @@ from numpy.testing import (
assert_almost_equal, assert_raises, assert_equal, assert_,
)
from numpy.compat import long
-
+from numpy.polynomial.polyutils import RankWarning
#
# fixtures
@@ -133,6 +133,17 @@ def test_fromroots(Poly):
assert_almost_equal(p2.coef[-1], 1)
+def test_bad_conditioned_fit(Poly):
+
+ x = [0., 0., 1.]
+ y = [1., 2., 3.]
+
+ # check RankWarning is raised
+ with pytest.warns(RankWarning) as record:
+ Poly.fit(x, y, 2)
+ assert record[0].message.args[0] == "The fit may be poorly conditioned"
+
+
def test_fit(Poly):
def f(x):
diff --git a/numpy/polynomial/tests/test_polynomial.py b/numpy/polynomial/tests/test_polynomial.py
index 08b73da15..1436963c6 100644
--- a/numpy/polynomial/tests/test_polynomial.py
+++ b/numpy/polynomial/tests/test_polynomial.py
@@ -9,7 +9,7 @@ import numpy as np
import numpy.polynomial.polynomial as poly
from numpy.testing import (
assert_almost_equal, assert_raises, assert_equal, assert_,
- assert_array_equal)
+ assert_warns, assert_array_equal)
def trim(x):
@@ -297,6 +297,8 @@ class TestIntegral(object):
assert_raises(ValueError, poly.polyint, [0], lbnd=[0])
assert_raises(ValueError, poly.polyint, [0], scl=[0])
assert_raises(TypeError, poly.polyint, [0], axis=.5)
+ with assert_warns(DeprecationWarning):
+ poly.polyint([1, 1], 1.)
# test integration of zero polynomial
for i in range(2, 5):
diff --git a/numpy/random/generator.pyx b/numpy/random/generator.pyx
index 6adf0f00b..c7432d8c1 100644
--- a/numpy/random/generator.pyx
+++ b/numpy/random/generator.pyx
@@ -353,7 +353,8 @@ cdef class Generator:
Return random integers from `low` (inclusive) to `high` (exclusive), or
if endpoint=True, `low` (inclusive) to `high` (inclusive). Replaces
- randint (with endpoint=False) and random_integers (with endpoint=True)
+ `RandomState.randint` (with endpoint=False) and
+ `RandomState.random_integers` (with endpoint=True)
Return random integers from the "discrete uniform" distribution of
the specified dtype. If `high` is None (the default), then results are
@@ -503,15 +504,8 @@ cdef class Generator:
return self.integers(0, 4294967296, size=n_uint32,
dtype=np.uint32).astype('<u4').tobytes()[:length]
- def randint(self, low, high=None, size=None, dtype=np.int64, endpoint=False):
- """
- Deprecated, renamed to ``integers``
- """
- warnings.warn("Renamed to integers", RuntimeWarning)
- self.integers(low, high, size, dtype, endpoint)
-
@cython.wraparound(True)
- def choice(self, a, size=None, replace=True, p=None, axis=0):
+ def choice(self, a, size=None, replace=True, p=None, axis=0, bint shuffle=True):
"""
choice(a, size=None, replace=True, p=None, axis=0):
@@ -538,6 +532,9 @@ cdef class Generator:
axis : int, optional
The axis along which the selection is performed. The default, 0,
selects by row.
+ shuffle : boolean, optional
+ Whether the sample is shuffled when sampling without replacement.
+ Default is True, False provides a speedup.
Returns
-------
@@ -593,14 +590,12 @@ cdef class Generator:
dtype='<U11')
"""
- cdef char* idx_ptr
- cdef int64_t buf
- cdef char* buf_ptr
- cdef set idx_set
cdef int64_t val, t, loc, size_i, pop_size_i
cdef int64_t *idx_data
cdef np.npy_intp j
+ cdef uint64_t set_size, mask
+ cdef uint64_t[::1] hash_set
# Format and Verify input
a = np.array(a, copy=False)
if a.ndim == 0:
@@ -687,36 +682,45 @@ cdef class Generator:
size_i = size
pop_size_i = pop_size
# This is a heuristic tuning. should be improvable
- if pop_size_i > 200 and (size > 200 or size > (10 * pop_size // size)):
+ if shuffle:
+ cutoff = 50
+ else:
+ cutoff = 20
+ if pop_size_i > 10000 and (size_i > (pop_size_i // cutoff)):
# Tail shuffle size elements
- idx = np.arange(pop_size, dtype=np.int64)
- idx_ptr = np.PyArray_BYTES(<np.ndarray>idx)
- buf_ptr = <char*>&buf
- self._shuffle_raw(pop_size_i, max(pop_size_i - size_i,1),
- 8, 8, idx_ptr, buf_ptr)
+ idx = np.PyArray_Arange(0, pop_size_i, 1, np.NPY_INT64)
+ idx_data = <int64_t*>(<np.ndarray>idx).data
+ with self.lock, nogil:
+ self._shuffle_int(pop_size_i, max(pop_size_i - size_i, 1),
+ idx_data)
# Copy to allow potentially large array backing idx to be gc
idx = idx[(pop_size - size):].copy()
else:
- # Floyds's algorithm with precomputed indices
- # Worst case, O(n**2) when size is close to pop_size
+ # Floyd's algorithm
idx = np.empty(size, dtype=np.int64)
idx_data = <int64_t*>np.PyArray_DATA(<np.ndarray>idx)
- idx_set = set()
- loc = 0
- # Sample indices with one pass to avoid reacquiring the lock
- with self.lock:
- for j in range(pop_size_i - size_i, pop_size_i):
- idx_data[loc] = random_interval(&self._bitgen, j)
- loc += 1
- loc = 0
- while len(idx_set) < size_i:
+ # smallest power of 2 larger than 1.2 * size
+ set_size = <uint64_t>(1.2 * size_i)
+ mask = _gen_mask(set_size)
+ set_size = 1 + mask
+ hash_set = np.full(set_size, <uint64_t>-1, np.uint64)
+ with self.lock, cython.wraparound(False), nogil:
for j in range(pop_size_i - size_i, pop_size_i):
- if idx_data[loc] not in idx_set:
- val = idx_data[loc]
- else:
- idx_data[loc] = val = j
- idx_set.add(val)
- loc += 1
+ val = random_bounded_uint64(&self._bitgen, 0, j, 0, 0)
+ loc = val & mask
+ while hash_set[loc] != <uint64_t>-1 and hash_set[loc] != <uint64_t>val:
+ loc = (loc + 1) & mask
+ if hash_set[loc] == <uint64_t>-1: # then val not in hash_set
+ hash_set[loc] = val
+ idx_data[j - pop_size_i + size_i] = val
+ else: # we need to insert j instead
+ loc = j & mask
+ while hash_set[loc] != <uint64_t>-1:
+ loc = (loc + 1) & mask
+ hash_set[loc] = j
+ idx_data[j - pop_size_i + size_i] = j
+ if shuffle:
+ self._shuffle_int(size_i, 1, idx_data)
if shape is not None:
idx.shape = shape
@@ -3888,6 +3892,28 @@ cdef class Generator:
string.memcpy(data + j * stride, data + i * stride, itemsize)
string.memcpy(data + i * stride, buf, itemsize)
+ cdef inline void _shuffle_int(self, np.npy_intp n, np.npy_intp first,
+ int64_t* data) nogil:
+ """
+ Parameters
+ ----------
+ n
+ Number of elements in data
+ first
+ First observation to shuffle. Shuffles n-1,
+ n-2, ..., first, so that when first=1 the entire
+ array is shuffled
+ data
+ Location of data
+ """
+ cdef np.npy_intp i, j
+ cdef int64_t temp
+ for i in reversed(range(first, n)):
+ j = random_bounded_uint64(&self._bitgen, 0, i, 0, 0)
+ temp = data[j]
+ data[j] = data[i]
+ data[i] = temp
+
def permutation(self, object x):
"""
permutation(x)
diff --git a/numpy/random/tests/test_generator_mt19937.py b/numpy/random/tests/test_generator_mt19937.py
index ef821d46f..a962fe84e 100644
--- a/numpy/random/tests/test_generator_mt19937.py
+++ b/numpy/random/tests/test_generator_mt19937.py
@@ -568,7 +568,10 @@ class TestRandomDist(object):
def test_choice_uniform_noreplace(self):
random = Generator(MT19937(self.seed))
actual = random.choice(4, 3, replace=False)
- desired = np.array([0, 1, 3], dtype=np.int64)
+ desired = np.array([2, 0, 3], dtype=np.int64)
+ assert_array_equal(actual, desired)
+ actual = random.choice(4, 4, replace=False, shuffle=False)
+ desired = np.arange(4, dtype=np.int64)
assert_array_equal(actual, desired)
def test_choice_nonuniform_noreplace(self):
@@ -688,7 +691,7 @@ class TestRandomDist(object):
def test_choice_large_sample(self):
import hashlib
- choice_hash = '5ca163da624c938bb3bc93e89a7dec4c'
+ choice_hash = 'd44962a0b1e92f4a3373c23222244e21'
random = Generator(MT19937(self.seed))
actual = random.choice(10000, 5000, replace=False)
if sys.byteorder != 'little':
diff --git a/shippable.yml b/shippable.yml
index 9fbd17d9a..2f4856525 100644
--- a/shippable.yml
+++ b/shippable.yml
@@ -23,15 +23,11 @@ build:
ci:
# install dependencies
- sudo apt-get install gcc gfortran
- # ARMv8 OpenBLAS built using script available here:
- # https://github.com/tylerjereddy/openblas-static-gcc/tree/master/ARMv8
- # build done on GCC compile farm machine named gcc115
- # tarball uploaded manually to an unshared Dropbox location
- - wget -O openblas-v0.3.5-armv8.tar.gz https://www.dropbox.com/s/pbqkxzlmih4cky1/openblas-v0.3.5-armv8.tar.gz?dl=0
- - tar zxvf openblas-v0.3.5-armv8.tar.gz
- - sudo cp -r ./64/lib/* /usr/lib
- - sudo cp ./64/include/* /usr/include
+ - target=$(python tools/openblas_support.py)
+ - sudo cp -r "${target}"/64/lib/* /usr/lib
+ - sudo cp "${target}"/64/include/* /usr/include
- pip install --upgrade pip
+
# we will pay the ~13 minute cost of compiling Cython only when a new
# version is scraped in by pip; otherwise, use the cached
# wheel shippable places on Amazon S3 after we build it once
diff --git a/tools/npy_tempita/__init__.py b/tools/npy_tempita/__init__.py
index dfb40e965..f75f23a21 100644
--- a/tools/npy_tempita/__init__.py
+++ b/tools/npy_tempita/__init__.py
@@ -105,21 +105,21 @@ class Template(object):
def __init__(self, content, name=None, namespace=None, stacklevel=None,
get_template=None, default_inherit=None, line_offset=0,
- delimeters=None):
+ delimiters=None):
self.content = content
- # set delimeters
- if delimeters is None:
- delimeters = (self.default_namespace['start_braces'],
+ # set delimiters
+ if delimiters is None:
+ delimiters = (self.default_namespace['start_braces'],
self.default_namespace['end_braces'])
else:
- assert len(delimeters) == 2 and all(
- [isinstance(delimeter, basestring_)
- for delimeter in delimeters])
+ assert len(delimiters) == 2 and all(
+ [isinstance(delimiter, basestring_)
+ for delimiter in delimiters])
self.default_namespace = self.__class__.default_namespace.copy()
- self.default_namespace['start_braces'] = delimeters[0]
- self.default_namespace['end_braces'] = delimeters[1]
- self.delimeters = delimeters
+ self.default_namespace['start_braces'] = delimiters[0]
+ self.default_namespace['end_braces'] = delimiters[1]
+ self.delimiters = delimiters
self._unicode = is_unicode(content)
if name is None and stacklevel is not None:
@@ -143,7 +143,7 @@ class Template(object):
self.name = name
self._parsed = parse(
content, name=name, line_offset=line_offset,
- delimeters=self.delimeters)
+ delimiters=self.delimiters)
if namespace is None:
namespace = {}
self.namespace = namespace
@@ -392,9 +392,9 @@ class Template(object):
return msg
-def sub(content, delimeters=None, **kw):
+def sub(content, delimiters=None, **kw):
name = kw.get('__name')
- tmpl = Template(content, name=name, delimeters=delimeters)
+ tmpl = Template(content, name=name, delimiters=delimiters)
return tmpl.substitute(kw)
@@ -652,28 +652,28 @@ del _Empty
############################################################
-def lex(s, name=None, trim_whitespace=True, line_offset=0, delimeters=None):
- if delimeters is None:
- delimeters = (Template.default_namespace['start_braces'],
+def lex(s, name=None, trim_whitespace=True, line_offset=0, delimiters=None):
+ if delimiters is None:
+ delimiters = (Template.default_namespace['start_braces'],
Template.default_namespace['end_braces'])
in_expr = False
chunks = []
last = 0
last_pos = (line_offset + 1, 1)
- token_re = re.compile(r'%s|%s' % (re.escape(delimeters[0]),
- re.escape(delimeters[1])))
+ token_re = re.compile(r'%s|%s' % (re.escape(delimiters[0]),
+ re.escape(delimiters[1])))
for match in token_re.finditer(s):
expr = match.group(0)
pos = find_position(s, match.end(), last, last_pos)
- if expr == delimeters[0] and in_expr:
- raise TemplateError('%s inside expression' % delimeters[0],
+ if expr == delimiters[0] and in_expr:
+ raise TemplateError('%s inside expression' % delimiters[0],
position=pos,
name=name)
- elif expr == delimeters[1] and not in_expr:
- raise TemplateError('%s outside expression' % delimeters[1],
+ elif expr == delimiters[1] and not in_expr:
+ raise TemplateError('%s outside expression' % delimiters[1],
position=pos,
name=name)
- if expr == delimeters[0]:
+ if expr == delimiters[0]:
part = s[last:match.start()]
if part:
chunks.append(part)
@@ -684,7 +684,7 @@ def lex(s, name=None, trim_whitespace=True, line_offset=0, delimeters=None):
last = match.end()
last_pos = pos
if in_expr:
- raise TemplateError('No %s to finish last expression' % delimeters[1],
+ raise TemplateError('No %s to finish last expression' % delimiters[1],
name=name, position=last_pos)
part = s[last:]
if part:
@@ -822,12 +822,12 @@ def find_position(string, index, last_index, last_pos):
return (last_pos[0] + lines, column)
-def parse(s, name=None, line_offset=0, delimeters=None):
+def parse(s, name=None, line_offset=0, delimiters=None):
- if delimeters is None:
- delimeters = (Template.default_namespace['start_braces'],
+ if delimiters is None:
+ delimiters = (Template.default_namespace['start_braces'],
Template.default_namespace['end_braces'])
- tokens = lex(s, name=name, line_offset=line_offset, delimeters=delimeters)
+ tokens = lex(s, name=name, line_offset=line_offset, delimiters=delimiters)
result = []
while tokens:
next_chunk, tokens = parse_expr(tokens, name)
diff --git a/tools/openblas_support.py b/tools/openblas_support.py
index 52d283a6c..460f36de5 100644
--- a/tools/openblas_support.py
+++ b/tools/openblas_support.py
@@ -1,5 +1,141 @@
+from __future__ import division, absolute_import, print_function
import os
+import sys
import textwrap
+import platform
+try:
+ from urllib.request import urlopen
+ from urllib.error import HTTPError
+except:
+ #Python2
+ from urllib2 import urlopen, HTTPError
+
+from tempfile import mkstemp, gettempdir
+import zipfile
+import tarfile
+
+OPENBLAS_V = 'v0.3.5'
+OPENBLAS_LONG = 'v0.3.5-274-g6a8b4269'
+BASE_LOC = ''
+RACKSPACE = 'https://3f23b170c54c2533c070-1c8a9b3114517dc5fe17b7c3f8c63a43.ssl.cf2.rackcdn.com'
+ARCHITECTURES = ['', 'windows', 'darwin', 'arm', 'x86', 'ppc64']
+
+IS_32BIT = sys.maxsize < 2**32
+def get_arch():
+ if platform.system() == 'Windows':
+ ret = 'windows'
+ elif platform.system() == 'Darwin':
+ ret = 'darwin'
+ # Python3 returns a named tuple, but Python2 does not, so we are stuck
+ elif 'arm' in os.uname()[-1]:
+ ret = 'arm';
+ elif 'aarch64' in os.uname()[-1]:
+ ret = 'arm';
+ elif 'x86' in os.uname()[-1]:
+ ret = 'x86'
+ elif 'ppc64' in os.uname()[-1]:
+ ret = 'ppc64'
+ else:
+ ret = ''
+ assert ret in ARCHITECTURES
+ return ret
+
+def download_openblas(target, arch):
+ filename = ''
+ if arch == 'arm':
+ # ARMv8 OpenBLAS built using script available here:
+ # https://github.com/tylerjereddy/openblas-static-gcc/tree/master/ARMv8
+ # build done on GCC compile farm machine named gcc115
+ # tarball uploaded manually to an unshared Dropbox location
+ filename = ('https://www.dropbox.com/s/pbqkxzlmih4cky1/'
+ 'openblas-{}-armv8.tar.gz?dl=1'.format(OPENBLAS_V))
+ typ = 'tar.gz'
+ elif arch == 'ppc64':
+ # build script for POWER8 OpenBLAS available here:
+ # https://github.com/tylerjereddy/openblas-static-gcc/blob/master/power8
+ # built on GCC compile farm machine named gcc112
+ # manually uploaded tarball to an unshared Dropbox location
+ filename = ('https://www.dropbox.com/s/zcwhk7c2zptwy0s/'
+ 'openblas-{}-ppc64le-power8.tar.gz?dl=1'.format(OPENBLAS_V))
+ typ = 'tar.gz'
+ elif arch == 'darwin':
+ filename = '{0}/openblas-{1}-macosx_10_9_x86_64-gf_1becaaa.tar.gz'.format(
+ RACKSPACE, OPENBLAS_LONG)
+ typ = 'tar.gz'
+ elif arch == 'windows':
+ if IS_32BIT:
+ suffix = 'win32-gcc_7_1_0.zip'
+ else:
+ suffix = 'win_amd64-gcc_7_1_0.zip'
+ filename = '{0}/openblas-{1}-{2}'.format(RACKSPACE, OPENBLAS_LONG, suffix)
+ typ = 'zip'
+ elif arch == 'x86':
+ if IS_32BIT:
+ suffix = 'manylinux1_i686.tar.gz'
+ else:
+ suffix = 'manylinux1_x86_64.tar.gz'
+ filename = '{0}/openblas-{1}-{2}'.format(RACKSPACE, OPENBLAS_LONG, suffix)
+ typ = 'tar.gz'
+ if not filename:
+ return None
+ try:
+ with open(target, 'wb') as fid:
+ fid.write(urlopen(filename).read())
+ except HTTPError:
+ print('Could not download "%s"' % filename)
+ return None
+ return typ
+
+def setup_openblas(arch=get_arch()):
+ '''
+ Download and setup an openblas library for building. If successful,
+ the configuration script will find it automatically.
+
+ Returns
+ -------
+ msg : str
+ path to extracted files on success, otherwise indicates what went wrong
+ To determine success, do ``os.path.exists(msg)``
+ '''
+ _, tmp = mkstemp()
+ if not arch:
+ raise ValueError('unknown architecture')
+ typ = download_openblas(tmp, arch)
+ if not typ:
+ return ''
+ if arch == 'windows':
+ if not typ == 'zip':
+ return 'expecting to download zipfile on windows, not %s' % str(typ)
+ return unpack_windows_zip(tmp)
+ else:
+ if not typ == 'tar.gz':
+ return 'expecting to download tar.gz, not %s' % str(typ)
+ return unpack_targz(tmp)
+
+def unpack_windows_zip(fname):
+ import sysconfig
+ with zipfile.ZipFile(fname, 'r') as zf:
+ # Get the openblas.a file, but not openblas.dll.a nor openblas.dev.a
+ lib = [x for x in zf.namelist() if OPENBLAS_LONG in x and
+ x.endswith('a') and not x.endswith('dll.a') and
+ not x.endswith('dev.a')]
+ if not lib:
+ return 'could not find libopenblas_%s*.a ' \
+ 'in downloaded zipfile' % OPENBLAS_LONG
+ target = os.path.join(gettempdir(), 'openblas.a')
+ with open(target, 'wb') as fid:
+ fid.write(zf.read(lib[0]))
+ return target
+
+def unpack_targz(fname):
+ target = os.path.join(gettempdir(), 'openblas')
+ if not os.path.exists(target):
+ os.mkdir(target)
+ with tarfile.open(fname, 'r') as zf:
+ # TODO: check that all the zf.getnames() files do not escape the
+ # extract directory (no leading '../', '/')
+ zf.extractall(target)
+ return target
def make_init(dirname):
'''
@@ -40,3 +176,34 @@ def make_init(dirname):
stacklevel=1)
"""))
+def test_setup(arches):
+ '''
+ Make sure all the downloadable files exist and can be opened
+ '''
+ for arch in arches:
+ if arch == '':
+ continue
+ try:
+ target = setup_openblas(arch)
+ except:
+ print('Could not setup %s' % arch)
+ raise
+ if not target:
+ raise RuntimeError('Could not setup %s' % arch)
+ print(target)
+
+if __name__ == '__main__':
+ import argparse
+ parser = argparse.ArgumentParser(
+ description='Download and expand an OpenBLAS archive for this ' \
+ 'architecture')
+ parser.add_argument('--test', nargs='*', default=None,
+ help='Test different architectures. "all", or any of %s' % ARCHITECTURES)
+ args = parser.parse_args()
+ if args.test is None:
+ print(setup_openblas())
+ else:
+ if len(args.test) == 0 or 'all' in args.test:
+ test_setup(ARCHITECTURES)
+ else:
+ test_setup(args.test)
diff --git a/tools/pypy-test.sh b/tools/pypy-test.sh
index 28afdea5d..314ebbb36 100755
--- a/tools/pypy-test.sh
+++ b/tools/pypy-test.sh
@@ -11,22 +11,19 @@ sudo apt-get -yq install libatlas-base-dev liblapack-dev gfortran-5
F77=gfortran-5 F90=gfortran-5 \
# Download the proper OpenBLAS x64 precompiled library
-OPENBLAS=openblas-v0.3.5-274-g6a8b4269-manylinux1_x86_64.tar.gz
-echo getting $OPENBLAS
-wget -q https://3f23b170c54c2533c070-1c8a9b3114517dc5fe17b7c3f8c63a43.ssl.cf2.rackcdn.com/$OPENBLAS -O openblas.tar.gz
-mkdir -p openblas
-(cd openblas; tar -xf ../openblas.tar.gz)
-export LD_LIBRARY_PATH=$PWD/openblas/usr/local/lib
-export LIB=$PWD/openblas/usr/local/lib
-export INCLUDE=$PWD/openblas/usr/local/include
+target=$(python tools/openblas_support.py)
+echo getting OpenBLAS into $target
+export LD_LIBRARY_PATH=$target/usr/local/lib
+export LIB=$target/usr/local/lib
+export INCLUDE=$target/usr/local/include
# Use a site.cfg to build with local openblas
cat << EOF > site.cfg
[openblas]
libraries = openblas
-library_dirs = $PWD/openblas/usr/local/lib:$LIB
-include_dirs = $PWD/openblas/usr/local/lib:$LIB
-runtime_library_dirs = $PWD/openblas/usr/local/lib
+library_dirs = $target/usr/local/lib:$LIB
+include_dirs = $target/usr/local/lib:$LIB
+runtime_library_dirs = $target/usr/local/lib
EOF
echo getting PyPy 3.6 nightly
diff --git a/tools/test-installed-numpy.py b/tools/test-installed-numpy.py
deleted file mode 100755
index 5240253b6..000000000
--- a/tools/test-installed-numpy.py
+++ /dev/null
@@ -1,66 +0,0 @@
-#!/usr/bin/env python
-from __future__ import division, absolute_import, print_function
-
-# A simple script to test the installed version of numpy by calling
-# 'numpy.test()'. Key features:
-# -- convenient command-line syntax
-# -- sets exit status appropriately, useful for automated test environments
-
-# It would be better to set this up as a module in the numpy namespace, so
-# that it could be run as:
-# python -m numpy.run_tests <args>
-# But, python2.4's -m switch only works with top-level modules, not modules
-# that are inside packages. So, once we drop 2.4 support, maybe...
-
-import sys, os
-# In case we are run from the source directory, we don't want to import numpy
-# from there, we want to import the installed version:
-sys.path.pop(0)
-
-from optparse import OptionParser
-parser = OptionParser("usage: %prog [options] -- [nosetests options]")
-parser.add_option("-v", "--verbose",
- action="count", dest="verbose", default=1,
- help="increase verbosity")
-parser.add_option("--doctests",
- action="store_true", dest="doctests", default=False,
- help="Run doctests in module")
-parser.add_option("--coverage",
- action="store_true", dest="coverage", default=False,
- help="report coverage of NumPy code (requires 'pytest-cov' module")
-parser.add_option("-m", "--mode",
- action="store", dest="mode", default="fast",
- help="'fast', 'full', or something that could be "
- "passed to pytest [default: %default]")
-parser.add_option("-n", "--durations",
- dest="durations", default=-1,
- help="show time to run slowest N tests [default: -1]")
-(options, args) = parser.parse_args()
-
-import numpy
-
-# Check that NPY_RELAXED_STRIDES_CHECKING is active when set.
-# The same flags check is also used in the tests to switch behavior.
-if (os.environ.get('NPY_RELAXED_STRIDES_CHECKING', "1") != "0"):
- if not numpy.ones((10, 1), order='C').flags.f_contiguous:
- print('NPY_RELAXED_STRIDES_CHECKING set, but not active.')
- sys.exit(1)
-elif numpy.ones((10, 1), order='C').flags.f_contiguous:
- print('NPY_RELAXED_STRIDES_CHECKING not set, but active.')
- sys.exit(1)
-
-if options.coverage:
- # Produce code coverage XML report for codecov.io
- args += ["--cov-report=xml"]
-
-result = numpy.test(options.mode,
- verbose=options.verbose,
- extra_argv=args,
- doctests=options.doctests,
- durations=int(options.durations),
- coverage=options.coverage)
-
-if result:
- sys.exit(0)
-else:
- sys.exit(1)
diff --git a/tools/travis-before-install.sh b/tools/travis-before-install.sh
index db1f0bc5c..7e5131dcf 100755
--- a/tools/travis-before-install.sh
+++ b/tools/travis-before-install.sh
@@ -26,14 +26,9 @@ if [ -n "$INSTALL_PICKLE5" ]; then
fi
if [ -n "$PPC64_LE" ]; then
- # build script for POWER8 OpenBLAS available here:
- # https://github.com/tylerjereddy/openblas-static-gcc/blob/master/power8
- # built on GCC compile farm machine named gcc112
- # manually uploaded tarball to an unshared Dropbox location
- wget -O openblas-power8.tar.gz https://www.dropbox.com/s/zcwhk7c2zptwy0s/openblas-v0.3.5-ppc64le-power8.tar.gz?dl=0
- tar zxvf openblas-power8.tar.gz
- sudo cp -r ./64/lib/* /usr/lib
- sudo cp ./64/include/* /usr/include
+ target=$(python tools/openblas_support.py)
+ sudo cp -r $target/64/lib/* /usr/lib
+ sudo cp $target/64/include/* /usr/include
fi
pip install --upgrade pip setuptools
diff --git a/tools/travis-test.sh b/tools/travis-test.sh
index d900b88e7..77eb66b0b 100755
--- a/tools/travis-test.sh
+++ b/tools/travis-test.sh
@@ -83,9 +83,9 @@ run_test()
export PYTHONWARNINGS=default
if [ -n "$RUN_FULL_TESTS" ]; then
export PYTHONWARNINGS="ignore::DeprecationWarning:virtualenv"
- $PYTHON ../tools/test-installed-numpy.py -v --durations 10 --mode=full $COVERAGE_FLAG
+ $PYTHON ../runtests.py -n -v --durations 10 --mode=full $COVERAGE_FLAG
else
- $PYTHON ../tools/test-installed-numpy.py -v --durations 10
+ $PYTHON ../runtests.py -n -v --durations 10
fi
if [ -n "$RUN_COVERAGE" ]; then
diff --git a/tox.ini b/tox.ini
index 75229ed86..3223b9e1b 100644
--- a/tox.ini
+++ b/tox.ini
@@ -15,7 +15,7 @@
# To run against a specific subset of Python versions, use:
# tox -e py37
-# Extra arguments will be passed to test-installed-numpy.py. To run
+# Extra arguments will be passed to runtests.py. To run
# the full testsuite:
# tox full
# To run with extra verbosity:
@@ -33,7 +33,7 @@ envlist =
deps=
pytest
changedir={envdir}
-commands={envpython} {toxinidir}/tools/test-installed-numpy.py --mode=full {posargs:}
+commands={envpython} {toxinidir}/runtests.py --mode=full {posargs:}
[testenv:py37-not-relaxed-strides]
basepython=python3.7
@@ -43,4 +43,4 @@ env=NPY_RELAXED_STRIDES_CHECKING=0
# if you want it:
[testenv:debug]
basepython=python-dbg
-commands=gdb --args {envpython} {toxinidir}/tools/test-installed-numpy.py --mode=full {posargs:}
+commands=gdb --args {envpython} {toxinidir}/runtests.py --mode=full {posargs:}