diff options
| -rw-r--r-- | numpy/__init__.py | 8 | ||||
| -rw-r--r-- | numpy/core/_add_newdocs.py | 23 | ||||
| -rw-r--r-- | numpy/core/shape_base.pyi | 65 | ||||
| -rw-r--r-- | numpy/core/src/umath/reduction.c | 46 | ||||
| -rw-r--r-- | numpy/lib/function_base.py | 7 | ||||
| -rw-r--r-- | numpy/typing/tests/data/fail/array_constructors.py | 4 | ||||
| -rw-r--r-- | numpy/typing/tests/data/reveal/array_constructors.py | 34 |
7 files changed, 115 insertions, 72 deletions
diff --git a/numpy/__init__.py b/numpy/__init__.py index baff5e141..8546238ec 100644 --- a/numpy/__init__.py +++ b/numpy/__init__.py @@ -383,10 +383,10 @@ else: error_message = "{}: {}".format(w[-1].category.__name__, str(w[-1].message)) msg = ( "Polyfit sanity test emitted a warning, most likely due " - "to using a buggy Accelerate backend. If you compiled " - "yourself, more information is available at " - "https://numpy.org/doc/stable/user/building.html#accelerated-blas-lapack-libraries " - "Otherwise report this to the vendor " + "to using a buggy Accelerate backend." + "\nIf you compiled yourself, more information is available at:" + "\nhttps://numpy.org/doc/stable/user/building.html#accelerated-blas-lapack-libraries" + "\nOtherwise report this to the vendor " "that provided NumPy.\n{}\n".format(error_message)) raise RuntimeError(msg) del _mac_os_check diff --git a/numpy/core/_add_newdocs.py b/numpy/core/_add_newdocs.py index 15b2b3ad3..a29e2e8a8 100644 --- a/numpy/core/_add_newdocs.py +++ b/numpy/core/_add_newdocs.py @@ -1585,8 +1585,8 @@ add_newdoc('numpy.core.multiarray', 'arange', For integer arguments the function is equivalent to the Python built-in `range` function, but returns an ndarray rather than a list. - When using a non-integer step, such as 0.1, the results will often not - be consistent. It is better to use `numpy.linspace` for these cases. + When using a non-integer step, such as 0.1, it is often better to use + `numpy.linspace`. See the warnings section below for more information. Parameters ---------- @@ -1619,6 +1619,25 @@ add_newdoc('numpy.core.multiarray', 'arange', this rule may result in the last element of `out` being greater than `stop`. + Warnings + -------- + The length of the output might not be numerically stable. + + Another stability issue is due to the internal implementation of + `numpy.arange`. + The actual step value used to populate the array is + ``dtype(start + step) - dtype(start)`` and not `step`. Precision loss + can occur here, due to casting or due to using floating points when + `start` is much larger than `step`. This can lead to unexpected + behaviour. For example:: + + >>> np.arange(0, 5, 0.5, dtype=int) + array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) + >>> np.arange(-3, 3, 0.5, dtype=int) + array([-3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8]) + + In such cases, the use of `numpy.linspace` should be preferred. + See Also -------- numpy.linspace : Evenly spaced numbers with careful handling of endpoints. diff --git a/numpy/core/shape_base.pyi b/numpy/core/shape_base.pyi index ec40a8814..9aaeceed7 100644 --- a/numpy/core/shape_base.pyi +++ b/numpy/core/shape_base.pyi @@ -1,39 +1,72 @@ import sys -from typing import TypeVar, overload, List, Sequence +from typing import TypeVar, overload, List, Sequence, Any -from numpy import ndarray -from numpy.typing import ArrayLike +from numpy import generic, dtype +from numpy.typing import ArrayLike, NDArray, _NestedSequence, _SupportsArray if sys.version_info >= (3, 8): from typing import SupportsIndex else: from typing_extensions import SupportsIndex -_ArrayType = TypeVar("_ArrayType", bound=ndarray) +_SCT = TypeVar("_SCT", bound=generic) +_ArrayType = TypeVar("_ArrayType", bound=NDArray[Any]) +_ArrayLike = _NestedSequence[_SupportsArray[dtype[_SCT]]] + +__all__: List[str] + +@overload +def atleast_1d(__arys: _ArrayLike[_SCT]) -> NDArray[_SCT]: ... +@overload +def atleast_1d(__arys: ArrayLike) -> NDArray[Any]: ... +@overload +def atleast_1d(*arys: ArrayLike) -> List[NDArray[Any]]: ... + +@overload +def atleast_2d(__arys: _ArrayLike[_SCT]) -> NDArray[_SCT]: ... +@overload +def atleast_2d(__arys: ArrayLike) -> NDArray[Any]: ... @overload -def atleast_1d(__arys: ArrayLike) -> ndarray: ... +def atleast_2d(*arys: ArrayLike) -> List[NDArray[Any]]: ... + +@overload +def atleast_3d(__arys: _ArrayLike[_SCT]) -> NDArray[_SCT]: ... @overload -def atleast_1d(*arys: ArrayLike) -> List[ndarray]: ... +def atleast_3d(__arys: ArrayLike) -> NDArray[Any]: ... +@overload +def atleast_3d(*arys: ArrayLike) -> List[NDArray[Any]]: ... @overload -def atleast_2d(__arys: ArrayLike) -> ndarray: ... +def vstack(tup: Sequence[_ArrayLike[_SCT]]) -> NDArray[_SCT]: ... @overload -def atleast_2d(*arys: ArrayLike) -> List[ndarray]: ... +def vstack(tup: Sequence[ArrayLike]) -> NDArray[Any]: ... @overload -def atleast_3d(__arys: ArrayLike) -> ndarray: ... +def hstack(tup: Sequence[_ArrayLike[_SCT]]) -> NDArray[_SCT]: ... @overload -def atleast_3d(*arys: ArrayLike) -> List[ndarray]: ... +def hstack(tup: Sequence[ArrayLike]) -> NDArray[Any]: ... -def vstack(tup: Sequence[ArrayLike]) -> ndarray: ... -def hstack(tup: Sequence[ArrayLike]) -> ndarray: ... @overload def stack( - arrays: Sequence[ArrayLike], axis: SupportsIndex = ..., out: None = ... -) -> ndarray: ... + arrays: Sequence[_ArrayLike[_SCT]], + axis: SupportsIndex = ..., + out: None = ..., +) -> NDArray[_SCT]: ... +@overload +def stack( + arrays: Sequence[ArrayLike], + axis: SupportsIndex = ..., + out: None = ..., +) -> NDArray[Any]: ... @overload def stack( - arrays: Sequence[ArrayLike], axis: SupportsIndex = ..., out: _ArrayType = ... + arrays: Sequence[ArrayLike], + axis: SupportsIndex = ..., + out: _ArrayType = ..., ) -> _ArrayType: ... -def block(arrays: ArrayLike) -> ndarray: ... + +@overload +def block(arrays: _ArrayLike[_SCT]) -> NDArray[_SCT]: ... +@overload +def block(arrays: ArrayLike) -> NDArray[Any]: ... diff --git a/numpy/core/src/umath/reduction.c b/numpy/core/src/umath/reduction.c index f1423d8b9..86cc20eb1 100644 --- a/numpy/core/src/umath/reduction.c +++ b/numpy/core/src/umath/reduction.c @@ -166,7 +166,7 @@ PyArray_CopyInitialReduceValues( * identity : If Py_None, PyArray_CopyInitialReduceValues is used, otherwise * this value is used to initialize the result to * the reduction's unit. - * loop : The loop which does the reduction. + * loop : `reduce_loop` from `ufunc_object.c`. TODO: Refactor * data : Data which is passed to the inner loop. * buffersize : Buffer size for the iterator. For the default, pass in 0. * funcname : The name of the reduction function, for error messages. @@ -182,18 +182,15 @@ PyArray_CopyInitialReduceValues( * generalized ufuncs!) */ NPY_NO_EXPORT PyArrayObject * -PyUFunc_ReduceWrapper(PyArrayObject *operand, PyArrayObject *out, - PyArrayObject *wheremask, - PyArray_Descr *operand_dtype, - PyArray_Descr *result_dtype, - NPY_CASTING casting, - npy_bool *axis_flags, int reorderable, - int keepdims, - PyObject *identity, - PyArray_ReduceLoopFunc *loop, - void *data, npy_intp buffersize, const char *funcname, - int errormask) +PyUFunc_ReduceWrapper( + PyArrayObject *operand, PyArrayObject *out, PyArrayObject *wheremask, + PyArray_Descr *operand_dtype, PyArray_Descr *result_dtype, + NPY_CASTING casting, + npy_bool *axis_flags, int reorderable, int keepdims, + PyObject *identity, PyArray_ReduceLoopFunc *loop, + void *data, npy_intp buffersize, const char *funcname, int errormask) { + assert(loop != NULL); PyArrayObject *result = NULL; npy_intp skip_first_count = 0; @@ -201,7 +198,7 @@ PyUFunc_ReduceWrapper(PyArrayObject *operand, PyArrayObject *out, NpyIter *iter = NULL; PyArrayObject *op[3]; PyArray_Descr *op_dtypes[3]; - npy_uint32 flags, op_flags[3]; + npy_uint32 it_flags, op_flags[3]; /* More than one axis means multiple orders are possible */ if (!reorderable && count_axes(PyArray_NDIM(operand), axis_flags) > 1) { @@ -227,7 +224,7 @@ PyUFunc_ReduceWrapper(PyArrayObject *operand, PyArrayObject *out, op_dtypes[0] = result_dtype; op_dtypes[1] = operand_dtype; - flags = NPY_ITER_BUFFERED | + it_flags = NPY_ITER_BUFFERED | NPY_ITER_EXTERNAL_LOOP | NPY_ITER_GROWINNER | NPY_ITER_DONT_NEGATE_STRIDES | @@ -293,7 +290,7 @@ PyUFunc_ReduceWrapper(PyArrayObject *operand, PyArrayObject *out, } } - iter = NpyIter_AdvancedNew(wheremask == NULL ? 2 : 3, op, flags, + iter = NpyIter_AdvancedNew(wheremask == NULL ? 2 : 3, op, it_flags, NPY_KEEPORDER, casting, op_flags, op_dtypes, @@ -304,11 +301,14 @@ PyUFunc_ReduceWrapper(PyArrayObject *operand, PyArrayObject *out, result = NpyIter_GetOperandArray(iter)[0]; + int needs_api = NpyIter_IterationNeedsAPI(iter); + /* Start with the floating-point exception flags cleared */ + npy_clear_floatstatus_barrier((char*)&iter); + /* * Initialize the result to the reduction unit if possible, * otherwise copy the initial values and get a view to the rest. */ - if (identity != Py_None) { if (PyArray_FillWithScalar(result, identity) < 0) { goto fail; @@ -331,15 +331,11 @@ PyUFunc_ReduceWrapper(PyArrayObject *operand, PyArrayObject *out, goto fail; } - /* Start with the floating-point exception flags cleared */ - npy_clear_floatstatus_barrier((char*)&iter); - if (NpyIter_GetIterSize(iter) != 0) { NpyIter_IterNextFunc *iternext; char **dataptr; npy_intp *strideptr; npy_intp *countptr; - int needs_api; iternext = NpyIter_GetIterNext(iter, NULL); if (iternext == NULL) { @@ -349,16 +345,6 @@ PyUFunc_ReduceWrapper(PyArrayObject *operand, PyArrayObject *out, strideptr = NpyIter_GetInnerStrideArray(iter); countptr = NpyIter_GetInnerLoopSizePtr(iter); - needs_api = NpyIter_IterationNeedsAPI(iter); - - /* Straightforward reduction */ - if (loop == NULL) { - PyErr_Format(PyExc_RuntimeError, - "reduction operation %s did not supply an " - "inner loop function", funcname); - goto fail; - } - if (loop(iter, dataptr, strideptr, countptr, iternext, needs_api, skip_first_count, data) < 0) { goto fail; diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index 783d45c2f..2e9ae6644 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -88,8 +88,11 @@ def rot90(m, k=1, axes=(0, 1)): Notes ----- - rot90(m, k=1, axes=(1,0)) is the reverse of rot90(m, k=1, axes=(0,1)) - rot90(m, k=1, axes=(1,0)) is equivalent to rot90(m, k=-1, axes=(0,1)) + ``rot90(m, k=1, axes=(1,0))`` is the reverse of + ``rot90(m, k=1, axes=(0,1))`` + + ``rot90(m, k=1, axes=(1,0))`` is equivalent to + ``rot90(m, k=-1, axes=(0,1))`` Examples -------- diff --git a/numpy/typing/tests/data/fail/array_constructors.py b/numpy/typing/tests/data/fail/array_constructors.py index eb57e5c00..0e2250513 100644 --- a/numpy/typing/tests/data/fail/array_constructors.py +++ b/numpy/typing/tests/data/fail/array_constructors.py @@ -27,5 +27,5 @@ np.logspace(0, 2, base=None) # E: Argument "base" np.geomspace(None, 'bob') # E: Argument 1 np.stack(generator) # E: No overload variant -np.hstack({1, 2}) # E: incompatible type -np.vstack(1) # E: incompatible type +np.hstack({1, 2}) # E: No overload variant +np.vstack(1) # E: No overload variant diff --git a/numpy/typing/tests/data/reveal/array_constructors.py b/numpy/typing/tests/data/reveal/array_constructors.py index 1b9006220..44c85e988 100644 --- a/numpy/typing/tests/data/reveal/array_constructors.py +++ b/numpy/typing/tests/data/reveal/array_constructors.py @@ -147,25 +147,27 @@ reveal_type(np.fromfunction(func, (3, 5))) # E: SubClass[{float64}] reveal_type(np.identity(10)) # E: numpy.ndarray[Any, Any] -reveal_type(np.atleast_1d(A)) # E: numpy.ndarray[Any, Any] -reveal_type(np.atleast_1d(C)) # E: numpy.ndarray[Any, Any] -reveal_type(np.atleast_1d(A, A)) # E: list[numpy.ndarray[Any, Any]] -reveal_type(np.atleast_1d(A, C)) # E: list[numpy.ndarray[Any, Any]] -reveal_type(np.atleast_1d(C, C)) # E: list[numpy.ndarray[Any, Any]] +reveal_type(np.atleast_1d(A)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]] +reveal_type(np.atleast_1d(C)) # E: numpy.ndarray[Any, numpy.dtype[Any]] +reveal_type(np.atleast_1d(A, A)) # E: list[numpy.ndarray[Any, numpy.dtype[Any]]] +reveal_type(np.atleast_1d(A, C)) # E: list[numpy.ndarray[Any, numpy.dtype[Any]]] +reveal_type(np.atleast_1d(C, C)) # E: list[numpy.ndarray[Any, numpy.dtype[Any]]] -reveal_type(np.atleast_2d(A)) # E: numpy.ndarray[Any, Any] +reveal_type(np.atleast_2d(A)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]] -reveal_type(np.atleast_3d(A)) # E: numpy.ndarray[Any, Any] +reveal_type(np.atleast_3d(A)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]] -reveal_type(np.vstack([A, A])) # E: numpy.ndarray[Any, Any] -reveal_type(np.vstack([A, C])) # E: numpy.ndarray[Any, Any] -reveal_type(np.vstack([C, C])) # E: numpy.ndarray[Any, Any] +reveal_type(np.vstack([A, A])) # E: numpy.ndarray[Any, numpy.dtype[{float64}]] +reveal_type(np.vstack([A, C])) # E: numpy.ndarray[Any, numpy.dtype[Any]] +reveal_type(np.vstack([C, C])) # E: numpy.ndarray[Any, numpy.dtype[Any]] -reveal_type(np.hstack([A, A])) # E: numpy.ndarray[Any, Any] +reveal_type(np.hstack([A, A])) # E: numpy.ndarray[Any, numpy.dtype[{float64}]] -reveal_type(np.stack([A, A])) # E: numpy.ndarray[Any, Any] -reveal_type(np.stack([A, A], axis=0)) # E: numpy.ndarray[Any, Any] -reveal_type(np.stack([A, A], out=B)) # E: SubClass +reveal_type(np.stack([A, A])) # E: numpy.ndarray[Any, numpy.dtype[{float64}]] +reveal_type(np.stack([A, C])) # E: numpy.ndarray[Any, numpy.dtype[Any]] +reveal_type(np.stack([C, C])) # E: numpy.ndarray[Any, numpy.dtype[Any]] +reveal_type(np.stack([A, A], axis=0)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]] +reveal_type(np.stack([A, A], out=B)) # E: SubClass[{float64}] -reveal_type(np.block([[A, A], [A, A]])) # E: numpy.ndarray[Any, Any] -reveal_type(np.block(C)) # E: numpy.ndarray[Any, Any] +reveal_type(np.block([[A, A], [A, A]])) # E: numpy.ndarray[Any, numpy.dtype[{float64}]] +reveal_type(np.block(C)) # E: numpy.ndarray[Any, numpy.dtype[Any]] |
