summaryrefslogtreecommitdiff
path: root/numpy
diff options
context:
space:
mode:
Diffstat (limited to 'numpy')
-rw-r--r--numpy/__init__.py5
-rw-r--r--numpy/core/_add_newdocs.py4
-rw-r--r--numpy/core/multiarray.py27
-rw-r--r--numpy/core/src/multiarray/array_coercion.c18
-rw-r--r--numpy/core/src/multiarray/ctors.c4
-rw-r--r--numpy/core/src/multiarray/einsum_sumprod.c.src186
-rw-r--r--numpy/core/src/multiarray/multiarraymodule.c39
-rw-r--r--numpy/core/tests/test_array_coercion.py30
-rw-r--r--numpy/fft/_pocketfft.py19
-rw-r--r--numpy/lib/npyio.py40
-rw-r--r--numpy/random/_generator.pyx19
-rw-r--r--numpy/random/tests/test_generator_mt19937.py15
-rw-r--r--numpy/tests/test_reloading.py14
13 files changed, 232 insertions, 188 deletions
diff --git a/numpy/__init__.py b/numpy/__init__.py
index 879e8f013..a242bb7df 100644
--- a/numpy/__init__.py
+++ b/numpy/__init__.py
@@ -389,7 +389,12 @@ else:
# Note that this will currently only make a difference on Linux
core.multiarray._set_madvise_hugepage(use_hugepage)
+ # Give a warning if NumPy is reloaded or imported on a sub-interpreter
+ # We do this from python, since the C-module may not be reloaded and
+ # it is tidier organized.
+ core.multiarray._multiarray_umath._reload_guard()
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
+
diff --git a/numpy/core/_add_newdocs.py b/numpy/core/_add_newdocs.py
index e2bf6c439..2cbfe52be 100644
--- a/numpy/core/_add_newdocs.py
+++ b/numpy/core/_add_newdocs.py
@@ -1147,13 +1147,13 @@ add_newdoc('numpy.core.multiarray', 'compare_chararrays',
add_newdoc('numpy.core.multiarray', 'fromiter',
"""
- fromiter(iterable, dtype, count=-1, *, like=None)
+ fromiter(iter, dtype, count=-1, *, like=None)
Create a new 1-dimensional array from an iterable object.
Parameters
----------
- iterable : iterable object
+ iter : iterable object
An iterable object providing data for the array.
dtype : data-type
The data-type of the returned array.
diff --git a/numpy/core/multiarray.py b/numpy/core/multiarray.py
index f736973de..07179a627 100644
--- a/numpy/core/multiarray.py
+++ b/numpy/core/multiarray.py
@@ -259,12 +259,16 @@ def inner(a, b):
Returns
-------
out : ndarray
- `out.shape = a.shape[:-1] + b.shape[:-1]`
+ If `a` and `b` are both
+ scalars or both 1-D arrays then a scalar is returned; otherwise
+ an array is returned.
+ ``out.shape = (*a.shape[:-1], *b.shape[:-1])``
Raises
------
ValueError
- If the last dimension of `a` and `b` has different size.
+ If both `a` and `b` are nonscalar and their last dimensions have
+ different sizes.
See Also
--------
@@ -284,8 +288,8 @@ def inner(a, b):
or explicitly::
- np.inner(a, b)[i0,...,ir-1,j0,...,js-1]
- = sum(a[i0,...,ir-1,:]*b[j0,...,js-1,:])
+ np.inner(a, b)[i0,...,ir-2,j0,...,js-2]
+ = sum(a[i0,...,ir-2,:]*b[j0,...,js-2,:])
In addition `a` or `b` may be scalars, in which case::
@@ -300,14 +304,25 @@ def inner(a, b):
>>> np.inner(a, b)
2
- A multidimensional example:
+ Some multidimensional examples:
>>> a = np.arange(24).reshape((2,3,4))
>>> b = np.arange(4)
- >>> np.inner(a, b)
+ >>> c = np.inner(a, b)
+ >>> c.shape
+ (2, 3)
+ >>> c
array([[ 14, 38, 62],
[ 86, 110, 134]])
+ >>> a = np.arange(2).reshape((1,1,2))
+ >>> b = np.arange(6).reshape((3,2))
+ >>> c = np.inner(a, b)
+ >>> c.shape
+ (1, 1, 3)
+ >>> c
+ array([[[1, 3, 5]]])
+
An example where `b` is a scalar:
>>> np.inner(np.eye(2), 7)
diff --git a/numpy/core/src/multiarray/array_coercion.c b/numpy/core/src/multiarray/array_coercion.c
index 53d891049..603e9d93b 100644
--- a/numpy/core/src/multiarray/array_coercion.c
+++ b/numpy/core/src/multiarray/array_coercion.c
@@ -979,14 +979,28 @@ PyArray_DiscoverDTypeAndShape_Recursive(
* and to handle it recursively. That is, unless we have hit the
* dimension limit.
*/
- npy_bool is_sequence = (PySequence_Check(obj) && PySequence_Size(obj) >= 0);
+ npy_bool is_sequence = PySequence_Check(obj);
+ if (is_sequence) {
+ is_sequence = PySequence_Size(obj) >= 0;
+ if (NPY_UNLIKELY(!is_sequence)) {
+ /* NOTE: This should likely just raise all errors */
+ if (PyErr_ExceptionMatches(PyExc_RecursionError) ||
+ PyErr_ExceptionMatches(PyExc_MemoryError)) {
+ /*
+ * Consider these unrecoverable errors, continuing execution
+ * might crash the interpreter.
+ */
+ return -1;
+ }
+ PyErr_Clear();
+ }
+ }
if (NPY_UNLIKELY(*flags & DISCOVER_TUPLES_AS_ELEMENTS) &&
PyTuple_Check(obj)) {
is_sequence = NPY_FALSE;
}
if (curr_dims == max_dims || !is_sequence) {
/* Clear any PySequence_Size error which would corrupts further calls */
- PyErr_Clear();
max_dims = handle_scalar(
obj, curr_dims, &max_dims, out_descr, out_shape, fixed_DType,
flags, NULL);
diff --git a/numpy/core/src/multiarray/ctors.c b/numpy/core/src/multiarray/ctors.c
index f6031e370..58571b678 100644
--- a/numpy/core/src/multiarray/ctors.c
+++ b/numpy/core/src/multiarray/ctors.c
@@ -2124,7 +2124,7 @@ PyArray_FromInterface(PyObject *origin)
if (iface == NULL) {
if (PyErr_Occurred()) {
- PyErr_Clear(); /* TODO[gh-14801]: propagate crashes during attribute access? */
+ return NULL;
}
return Py_NotImplemented;
}
@@ -2392,7 +2392,7 @@ PyArray_FromArrayAttr(PyObject *op, PyArray_Descr *typecode, PyObject *context)
array_meth = PyArray_LookupSpecial_OnInstance(op, "__array__");
if (array_meth == NULL) {
if (PyErr_Occurred()) {
- PyErr_Clear(); /* TODO[gh-14801]: propagate crashes during attribute access? */
+ return NULL;
}
return Py_NotImplemented;
}
diff --git a/numpy/core/src/multiarray/einsum_sumprod.c.src b/numpy/core/src/multiarray/einsum_sumprod.c.src
index caba0e00a..86d5b82fc 100644
--- a/numpy/core/src/multiarray/einsum_sumprod.c.src
+++ b/numpy/core/src/multiarray/einsum_sumprod.c.src
@@ -589,7 +589,7 @@ finish_after_unrolled_loop:
goto finish_after_unrolled_loop;
}
-static void
+static NPY_GCC_OPT_3 void
@name@_sum_of_products_contig_contig_outstride0_two(int nop, char **dataptr,
npy_intp const *NPY_UNUSED(strides), npy_intp count)
{
@@ -597,156 +597,60 @@ static void
@type@ *data1 = (@type@ *)dataptr[1];
@temptype@ accum = 0;
-#if EINSUM_USE_SSE1 && @float32@
- __m128 a, accum_sse = _mm_setzero_ps();
-#elif EINSUM_USE_SSE2 && @float64@
- __m128d a, accum_sse = _mm_setzero_pd();
-#endif
-
NPY_EINSUM_DBG_PRINT1("@name@_sum_of_products_contig_contig_outstride0_two (%d)\n",
(int)count);
-
-/* This is placed before the main loop to make small counts faster */
-finish_after_unrolled_loop:
- switch (count) {
-/**begin repeat2
- * #i = 6, 5, 4, 3, 2, 1, 0#
- */
- case @i@+1:
- accum += @from@(data0[@i@]) * @from@(data1[@i@]);
-/**end repeat2**/
- case 0:
- *(@type@ *)dataptr[2] = @to@(@from@(*(@type@ *)dataptr[2]) + accum);
- return;
- }
-
-#if EINSUM_USE_SSE1 && @float32@
+#if @NPYV_CHK@ // NPYV check for @type@
/* Use aligned instructions if possible */
- if (EINSUM_IS_SSE_ALIGNED(data0) && EINSUM_IS_SSE_ALIGNED(data1)) {
- /* Unroll the loop by 8 */
- while (count >= 8) {
- count -= 8;
-
- _mm_prefetch(data0 + 512, _MM_HINT_T0);
- _mm_prefetch(data1 + 512, _MM_HINT_T0);
+ const int is_aligned = EINSUM_IS_ALIGNED(data0) && EINSUM_IS_ALIGNED(data1);
+ const int vstep = npyv_nlanes_@sfx@;
+ npyv_@sfx@ vaccum = npyv_zero_@sfx@();
-/**begin repeat2
- * #i = 0, 4#
- */
- /*
- * NOTE: This accumulation changes the order, so will likely
- * produce slightly different results.
+ /**begin repeat2
+ * #cond = if(is_aligned), else#
+ * #ld = loada, load#
+ * #st = storea, store#
+ */
+ @cond@ {
+ const npy_intp vstepx4 = vstep * 4;
+ for (; count >= vstepx4; count -= vstepx4, data0 += vstepx4, data1 += vstepx4) {
+ /**begin repeat3
+ * #i = 0, 1, 2, 3#
*/
- a = _mm_mul_ps(_mm_load_ps(data0+@i@), _mm_load_ps(data1+@i@));
- accum_sse = _mm_add_ps(accum_sse, a);
-/**end repeat2**/
- data0 += 8;
- data1 += 8;
+ npyv_@sfx@ a@i@ = npyv_@ld@_@sfx@(data0 + vstep * @i@);
+ npyv_@sfx@ b@i@ = npyv_@ld@_@sfx@(data1 + vstep * @i@);
+ /**end repeat3**/
+ npyv_@sfx@ ab3 = npyv_muladd_@sfx@(a3, b3, vaccum);
+ npyv_@sfx@ ab2 = npyv_muladd_@sfx@(a2, b2, ab3);
+ npyv_@sfx@ ab1 = npyv_muladd_@sfx@(a1, b1, ab2);
+ vaccum = npyv_muladd_@sfx@(a0, b0, ab1);
}
-
- /* Add the four SSE values and put in accum */
- a = _mm_shuffle_ps(accum_sse, accum_sse, _MM_SHUFFLE(2,3,0,1));
- accum_sse = _mm_add_ps(a, accum_sse);
- a = _mm_shuffle_ps(accum_sse, accum_sse, _MM_SHUFFLE(1,0,3,2));
- accum_sse = _mm_add_ps(a, accum_sse);
- _mm_store_ss(&accum, accum_sse);
-
- /* Finish off the loop */
- goto finish_after_unrolled_loop;
}
-#elif EINSUM_USE_SSE2 && @float64@
- /* Use aligned instructions if possible */
- if (EINSUM_IS_SSE_ALIGNED(data0) && EINSUM_IS_SSE_ALIGNED(data1)) {
- /* Unroll the loop by 8 */
- while (count >= 8) {
- count -= 8;
-
- _mm_prefetch(data0 + 512, _MM_HINT_T0);
- _mm_prefetch(data1 + 512, _MM_HINT_T0);
-
-/**begin repeat2
- * #i = 0, 2, 4, 6#
- */
- /*
- * NOTE: This accumulation changes the order, so will likely
- * produce slightly different results.
- */
- a = _mm_mul_pd(_mm_load_pd(data0+@i@), _mm_load_pd(data1+@i@));
- accum_sse = _mm_add_pd(accum_sse, a);
-/**end repeat2**/
- data0 += 8;
- data1 += 8;
- }
-
- /* Add the two SSE2 values and put in accum */
- a = _mm_shuffle_pd(accum_sse, accum_sse, _MM_SHUFFLE2(0,1));
- accum_sse = _mm_add_pd(a, accum_sse);
- _mm_store_sd(&accum, accum_sse);
-
- /* Finish off the loop */
- goto finish_after_unrolled_loop;
+ /**end repeat2**/
+ for (; count > 0; count -= vstep, data0 += vstep, data1 += vstep) {
+ npyv_@sfx@ a = npyv_load_tillz_@sfx@(data0, count);
+ npyv_@sfx@ b = npyv_load_tillz_@sfx@(data1, count);
+ vaccum = npyv_muladd_@sfx@(a, b, vaccum);
}
-#endif
-
- /* Unroll the loop by 8 */
- while (count >= 8) {
- count -= 8;
-
-#if EINSUM_USE_SSE1 && @float32@
- _mm_prefetch(data0 + 512, _MM_HINT_T0);
- _mm_prefetch(data1 + 512, _MM_HINT_T0);
-
-/**begin repeat2
- * #i = 0, 4#
- */
- /*
- * NOTE: This accumulation changes the order, so will likely
- * produce slightly different results.
- */
- a = _mm_mul_ps(_mm_loadu_ps(data0+@i@), _mm_loadu_ps(data1+@i@));
- accum_sse = _mm_add_ps(accum_sse, a);
-/**end repeat2**/
-#elif EINSUM_USE_SSE2 && @float64@
- _mm_prefetch(data0 + 512, _MM_HINT_T0);
- _mm_prefetch(data1 + 512, _MM_HINT_T0);
-
-/**begin repeat2
- * #i = 0, 2, 4, 6#
- */
- /*
- * NOTE: This accumulation changes the order, so will likely
- * produce slightly different results.
- */
- a = _mm_mul_pd(_mm_loadu_pd(data0+@i@), _mm_loadu_pd(data1+@i@));
- accum_sse = _mm_add_pd(accum_sse, a);
-/**end repeat2**/
+ accum = npyv_sum_@sfx@(vaccum);
+ npyv_cleanup();
#else
-/**begin repeat2
- * #i = 0, 1, 2, 3, 4, 5, 6, 7#
- */
- accum += @from@(data0[@i@]) * @from@(data1[@i@]);
-/**end repeat2**/
-#endif
- data0 += 8;
- data1 += 8;
+#ifndef NPY_DISABLE_OPTIMIZATION
+ for (; count >= 4; count -= 4, data0 += 4, data1 += 4) {
+ /**begin repeat2
+ * #i = 0, 1, 2, 3#
+ */
+ const @type@ ab@i@ = @from@(data0[@i@]) * @from@(data1[@i@]);
+ /**end repeat2**/
+ accum += ab0 + ab1 + ab2 + ab3;
}
-
-#if EINSUM_USE_SSE1 && @float32@
- /* Add the four SSE values and put in accum */
- a = _mm_shuffle_ps(accum_sse, accum_sse, _MM_SHUFFLE(2,3,0,1));
- accum_sse = _mm_add_ps(a, accum_sse);
- a = _mm_shuffle_ps(accum_sse, accum_sse, _MM_SHUFFLE(1,0,3,2));
- accum_sse = _mm_add_ps(a, accum_sse);
- _mm_store_ss(&accum, accum_sse);
-#elif EINSUM_USE_SSE2 && @float64@
- /* Add the two SSE2 values and put in accum */
- a = _mm_shuffle_pd(accum_sse, accum_sse, _MM_SHUFFLE2(0,1));
- accum_sse = _mm_add_pd(a, accum_sse);
- _mm_store_sd(&accum, accum_sse);
-#endif
-
- /* Finish off the loop */
- goto finish_after_unrolled_loop;
+#endif // !NPY_DISABLE_OPTIMIZATION
+ for (; count > 0; --count, ++data0, ++data1) {
+ const @type@ a = @from@(*data0);
+ const @type@ b = @from@(*data1);
+ accum += a * b;
+ }
+#endif // NPYV check for @type@
+ *(@type@ *)dataptr[2] = @to@(@from@(*(@type@ *)dataptr[2]) + accum);
}
static void
diff --git a/numpy/core/src/multiarray/multiarraymodule.c b/numpy/core/src/multiarray/multiarraymodule.c
index 32c5ac0dc..870b633ed 100644
--- a/numpy/core/src/multiarray/multiarraymodule.c
+++ b/numpy/core/src/multiarray/multiarraymodule.c
@@ -4085,6 +4085,42 @@ normalize_axis_index(PyObject *NPY_UNUSED(self), PyObject *args, PyObject *kwds)
}
+static PyObject *
+_reload_guard(PyObject *NPY_UNUSED(self)) {
+ static int initialized = 0;
+
+#if !defined(PYPY_VERSION)
+ if (PyThreadState_Get()->interp != PyInterpreterState_Main()) {
+ if (PyErr_WarnEx(PyExc_UserWarning,
+ "NumPy was imported from a Python sub-interpreter but "
+ "NumPy does not properly support sub-interpreters. "
+ "This will likely work for most users but might cause hard to "
+ "track down issues or subtle bugs. "
+ "A common user of the rare sub-interpreter feature is wsgi "
+ "which also allows single-interpreter mode.\n"
+ "Improvements in the case of bugs are welcome, but is not "
+ "on the NumPy roadmap, and full support may require "
+ "significant effort to achieve.", 2) < 0) {
+ return NULL;
+ }
+ /* No need to give the other warning in a sub-interpreter as well... */
+ initialized = 1;
+ Py_RETURN_NONE;
+ }
+#endif
+ if (initialized) {
+ if (PyErr_WarnEx(PyExc_UserWarning,
+ "The NumPy module was reloaded (imported a second time). "
+ "This can in some cases result in small but subtle issues "
+ "and is discouraged.", 2) < 0) {
+ return NULL;
+ }
+ }
+ initialized = 1;
+ Py_RETURN_NONE;
+}
+
+
static struct PyMethodDef array_module_methods[] = {
{"_get_implementing_args",
(PyCFunction)array__get_implementing_args,
@@ -4276,6 +4312,9 @@ static struct PyMethodDef array_module_methods[] = {
METH_VARARGS, NULL},
{"_set_madvise_hugepage", (PyCFunction)_set_madvise_hugepage,
METH_O, NULL},
+ {"_reload_guard", (PyCFunction)_reload_guard,
+ METH_NOARGS,
+ "Give a warning on reload and big warning in sub-interpreters."},
{NULL, NULL, 0, NULL} /* sentinel */
};
diff --git a/numpy/core/tests/test_array_coercion.py b/numpy/core/tests/test_array_coercion.py
index 78def9360..b966ee7b0 100644
--- a/numpy/core/tests/test_array_coercion.py
+++ b/numpy/core/tests/test_array_coercion.py
@@ -689,3 +689,33 @@ class TestArrayLikes:
np.array(arr)
with pytest.raises(MemoryError):
np.array([arr])
+
+ @pytest.mark.parametrize("attribute",
+ ["__array_interface__", "__array__", "__array_struct__"])
+ def test_bad_array_like_attributes(self, attribute):
+ # Check that errors during attribute retrieval are raised unless
+ # they are Attribute errors.
+
+ class BadInterface:
+ def __getattr__(self, attr):
+ if attr == attribute:
+ raise RuntimeError
+ super().__getattr__(attr)
+
+ with pytest.raises(RuntimeError):
+ np.array(BadInterface())
+
+ @pytest.mark.parametrize("error", [RecursionError, MemoryError])
+ def test_bad_array_like_bad_length(self, error):
+ # RecursionError and MemoryError are considered "critical" in
+ # sequences. We could expand this more generally though. (NumPy 1.20)
+ class BadSequence:
+ def __len__(self):
+ raise error
+ def __getitem__(self):
+ # must have getitem to be a Sequence
+ return 1
+
+ with pytest.raises(error):
+ np.array(BadSequence())
+
diff --git a/numpy/fft/_pocketfft.py b/numpy/fft/_pocketfft.py
index 83ac86036..2066b95ea 100644
--- a/numpy/fft/_pocketfft.py
+++ b/numpy/fft/_pocketfft.py
@@ -1242,6 +1242,15 @@ def rfft2(a, s=None, axes=(-2, -1), norm=None):
This is really just `rfftn` with different default behavior.
For more details see `rfftn`.
+ Examples
+ --------
+ >>> a = np.mgrid[:5, :5][0]
+ >>> np.fft.rfft2(a)
+ array([[ 50. +0.j , 0. +0.j , 0. +0.j ],
+ [-12.5+17.20477401j, 0. +0.j , 0. +0.j ],
+ [-12.5 +4.0614962j , 0. +0.j , 0. +0.j ],
+ [-12.5 -4.0614962j , 0. +0.j , 0. +0.j ],
+ [-12.5-17.20477401j, 0. +0.j , 0. +0.j ]])
"""
return rfftn(a, s, axes, norm)
@@ -1399,5 +1408,15 @@ def irfft2(a, s=None, axes=(-2, -1), norm=None):
This is really `irfftn` with different defaults.
For more details see `irfftn`.
+ Examples
+ --------
+ >>> a = np.mgrid[:5, :5][0]
+ >>> A = np.fft.rfft2(a)
+ >>> np.fft.irfft2(A, s=a.shape)
+ array([[0., 0., 0., 0., 0.],
+ [1., 1., 1., 1., 1.],
+ [2., 2., 2., 2., 2.],
+ [3., 3., 3., 3., 3.],
+ [4., 4., 4., 4., 4.]])
"""
return irfftn(a, s, axes, norm)
diff --git a/numpy/lib/npyio.py b/numpy/lib/npyio.py
index af8e28e42..efebb5fb7 100644
--- a/numpy/lib/npyio.py
+++ b/numpy/lib/npyio.py
@@ -539,10 +539,11 @@ def _savez_dispatcher(file, *args, **kwds):
def savez(file, *args, **kwds):
"""Save several arrays into a single file in uncompressed ``.npz`` format.
- If arguments are passed in with no keywords, the corresponding variable
- names, in the ``.npz`` file, are 'arr_0', 'arr_1', etc. If keyword
- arguments are given, the corresponding variable names, in the ``.npz``
- file will match the keyword names.
+ Provide arrays as keyword arguments to store them under the
+ corresponding name in the output file: ``savez(fn, x=x, y=y)``.
+
+ If arrays are specified as positional arguments, i.e., ``savez(fn,
+ x, y)``, their names will be `arr_0`, `arr_1`, etc.
Parameters
----------
@@ -552,13 +553,12 @@ def savez(file, *args, **kwds):
``.npz`` extension will be appended to the filename if it is not
already there.
args : Arguments, optional
- Arrays to save to the file. Since it is not possible for Python to
- know the names of the arrays outside `savez`, the arrays will be saved
- with names "arr_0", "arr_1", and so on. These arguments can be any
- expression.
+ Arrays to save to the file. Please use keyword arguments (see
+ `kwds` below) to assign names to arrays. Arrays specified as
+ args will be named "arr_0", "arr_1", and so on.
kwds : Keyword arguments, optional
- Arrays to save to the file. Arrays will be saved in the file with the
- keyword names.
+ Arrays to save to the file. Each array will be saved to the
+ output file with its corresponding keyword name.
Returns
-------
@@ -613,6 +613,7 @@ def savez(file, *args, **kwds):
['x', 'y']
>>> npzfile['x']
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
+
"""
_savez(file, args, kwds, False)
@@ -627,9 +628,11 @@ def savez_compressed(file, *args, **kwds):
"""
Save several arrays into a single file in compressed ``.npz`` format.
- If keyword arguments are given, then filenames are taken from the keywords.
- If arguments are passed in with no keywords, then stored filenames are
- arr_0, arr_1, etc.
+ Provide arrays as keyword arguments to store them under the
+ corresponding name in the output file: ``savez(fn, x=x, y=y)``.
+
+ If arrays are specified as positional arguments, i.e., ``savez(fn,
+ x, y)``, their names will be `arr_0`, `arr_1`, etc.
Parameters
----------
@@ -639,13 +642,12 @@ def savez_compressed(file, *args, **kwds):
``.npz`` extension will be appended to the filename if it is not
already there.
args : Arguments, optional
- Arrays to save to the file. Since it is not possible for Python to
- know the names of the arrays outside `savez`, the arrays will be saved
- with names "arr_0", "arr_1", and so on. These arguments can be any
- expression.
+ Arrays to save to the file. Please use keyword arguments (see
+ `kwds` below) to assign names to arrays. Arrays specified as
+ args will be named "arr_0", "arr_1", and so on.
kwds : Keyword arguments, optional
- Arrays to save to the file. Arrays will be saved in the file with the
- keyword names.
+ Arrays to save to the file. Each array will be saved to the
+ output file with its corresponding keyword name.
Returns
-------
diff --git a/numpy/random/_generator.pyx b/numpy/random/_generator.pyx
index 7ffa36775..e00bc4d98 100644
--- a/numpy/random/_generator.pyx
+++ b/numpy/random/_generator.pyx
@@ -859,7 +859,8 @@ cdef class Generator:
greater than or equal to low. The default value is 0.
high : float or array_like of floats
Upper boundary of the output interval. All values generated will be
- less than high. The default value is 1.0.
+ less than high. high - low must be non-negative. The default value
+ is 1.0.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. If size is ``None`` (default),
@@ -885,10 +886,6 @@ cdef class Generator:
anywhere within the interval ``[a, b)``, and zero elsewhere.
When ``high`` == ``low``, values of ``low`` will be returned.
- If ``high`` < ``low``, the results are officially undefined
- and may eventually raise an error, i.e. do not rely on this
- function to behave when passed arguments satisfying that
- inequality condition.
Examples
--------
@@ -914,7 +911,7 @@ cdef class Generator:
"""
cdef bint is_scalar = True
cdef np.ndarray alow, ahigh, arange
- cdef double _low, _high, range
+ cdef double _low, _high, rng
cdef object temp
alow = <np.ndarray>np.PyArray_FROM_OTF(low, np.NPY_DOUBLE, np.NPY_ALIGNED)
@@ -923,13 +920,13 @@ cdef class Generator:
if np.PyArray_NDIM(alow) == np.PyArray_NDIM(ahigh) == 0:
_low = PyFloat_AsDouble(low)
_high = PyFloat_AsDouble(high)
- range = _high - _low
- if not np.isfinite(range):
- raise OverflowError('Range exceeds valid bounds')
+ rng = _high - _low
+ if not np.isfinite(rng):
+ raise OverflowError('high - low range exceeds valid bounds')
return cont(&random_uniform, &self._bitgen, size, self.lock, 2,
_low, '', CONS_NONE,
- range, '', CONS_NONE,
+ rng, 'high - low', CONS_NON_NEGATIVE,
0.0, '', CONS_NONE,
None)
@@ -943,7 +940,7 @@ cdef class Generator:
raise OverflowError('Range exceeds valid bounds')
return cont(&random_uniform, &self._bitgen, size, self.lock, 2,
alow, '', CONS_NONE,
- arange, '', CONS_NONE,
+ arange, 'high - low', CONS_NON_NEGATIVE,
0.0, '', CONS_NONE,
None)
diff --git a/numpy/random/tests/test_generator_mt19937.py b/numpy/random/tests/test_generator_mt19937.py
index b69cd38d4..c4fb5883c 100644
--- a/numpy/random/tests/test_generator_mt19937.py
+++ b/numpy/random/tests/test_generator_mt19937.py
@@ -1666,6 +1666,21 @@ class TestRandomDist:
# DBL_MAX by increasing fmin a bit
random.uniform(low=np.nextafter(fmin, 1), high=fmax / 1e17)
+ def test_uniform_zero_range(self):
+ func = random.uniform
+ result = func(1.5, 1.5)
+ assert_allclose(result, 1.5)
+ result = func([0.0, np.pi], [0.0, np.pi])
+ assert_allclose(result, [0.0, np.pi])
+ result = func([[2145.12], [2145.12]], [2145.12, 2145.12])
+ assert_allclose(result, 2145.12 + np.zeros((2, 2)))
+
+ def test_uniform_neg_range(self):
+ func = random.uniform
+ assert_raises(ValueError, func, 2, 1)
+ assert_raises(ValueError, func, [1, 2], [1, 1])
+ assert_raises(ValueError, func, [[0, 1],[2, 3]], 2)
+
def test_scalar_exception_propagation(self):
# Tests that exceptions are correctly propagated in distributions
# when called with objects that throw exceptions when converted to
diff --git a/numpy/tests/test_reloading.py b/numpy/tests/test_reloading.py
index 61ae91b00..5c4309f4a 100644
--- a/numpy/tests/test_reloading.py
+++ b/numpy/tests/test_reloading.py
@@ -1,4 +1,4 @@
-from numpy.testing import assert_raises, assert_, assert_equal
+from numpy.testing import assert_raises, assert_warns, assert_, assert_equal
from numpy.compat import pickle
import sys
@@ -16,13 +16,15 @@ def test_numpy_reloading():
VisibleDeprecationWarning = np.VisibleDeprecationWarning
ModuleDeprecationWarning = np.ModuleDeprecationWarning
- reload(np)
+ with assert_warns(UserWarning):
+ reload(np)
assert_(_NoValue is np._NoValue)
assert_(ModuleDeprecationWarning is np.ModuleDeprecationWarning)
assert_(VisibleDeprecationWarning is np.VisibleDeprecationWarning)
assert_raises(RuntimeError, reload, numpy._globals)
- reload(np)
+ with assert_warns(UserWarning):
+ reload(np)
assert_(_NoValue is np._NoValue)
assert_(ModuleDeprecationWarning is np.ModuleDeprecationWarning)
assert_(VisibleDeprecationWarning is np.VisibleDeprecationWarning)
@@ -45,13 +47,15 @@ def test_full_reimport():
# This is generally unsafe, especially, since we also reload the C-modules.
code = textwrap.dedent(r"""
import sys
+ from pytest import warns
import numpy as np
for k in list(sys.modules.keys()):
if "numpy" in k:
del sys.modules[k]
- import numpy as np
+ with warns(UserWarning):
+ import numpy as np
""")
p = subprocess.run([sys.executable, '-c', code])
- assert p.returncode == 0
+