diff options
author | Sebastian Berg <sebastian@sipsolutions.net> | 2021-04-19 19:16:58 -0500 |
---|---|---|
committer | Sebastian Berg <sebastian@sipsolutions.net> | 2021-04-20 17:36:28 -0500 |
commit | bb2a1ad9161838212ac90af4f6d6af430bbab00d (patch) | |
tree | 756a4b81a1feff727353d10fc8043d96c87be9c8 /numpy/core | |
parent | 120237d2b8f9f35d9fdde0ed3e7f98bff8e4b358 (diff) | |
download | numpy-bb2a1ad9161838212ac90af4f6d6af430bbab00d.tar.gz |
BUG: Initialize the full nditer buffer in case of error
This is necessary because in some rare cases (reductions), we may
not actually use the full buffer. In that case, the cleanup-on-error
code would have to grow smart enough to handle these cases.
It seems much simpler to just always initialize the full buffers, even
if we may not end up using them.
Admittedly, the old logic might have skipped the buffer clearing
(especially the full buffer) in a fair bit of cases, but since
this is only relevant for `object` dtype, I assume this is fine.
Diffstat (limited to 'numpy/core')
-rw-r--r-- | numpy/core/src/multiarray/nditer_api.c | 3 | ||||
-rw-r--r-- | numpy/core/src/multiarray/nditer_constr.c | 3 | ||||
-rw-r--r-- | numpy/core/tests/test_nditer.py | 19 |
3 files changed, 25 insertions, 0 deletions
diff --git a/numpy/core/src/multiarray/nditer_api.c b/numpy/core/src/multiarray/nditer_api.c index 81209651b..063e30919 100644 --- a/numpy/core/src/multiarray/nditer_api.c +++ b/numpy/core/src/multiarray/nditer_api.c @@ -1760,6 +1760,9 @@ npyiter_allocate_buffers(NpyIter *iter, char **errmsg) } goto fail; } + if (PyDataType_FLAGCHK(op_dtype[iop], NPY_NEEDS_INIT)) { + memset(buffer, '\0', itemsize*buffersize); + } buffers[iop] = buffer; } } diff --git a/numpy/core/src/multiarray/nditer_constr.c b/numpy/core/src/multiarray/nditer_constr.c index 982dca849..2197fe798 100644 --- a/numpy/core/src/multiarray/nditer_constr.c +++ b/numpy/core/src/multiarray/nditer_constr.c @@ -594,6 +594,9 @@ NpyIter_Copy(NpyIter *iter) if (buffers[iop] == NULL) { out_of_memory = 1; } + if (PyDataType_FLAGCHK(dtypes[iop], NPY_NEEDS_INIT)) { + memset(buffers[iop], '\0', itemsize*buffersize); + } } } diff --git a/numpy/core/tests/test_nditer.py b/numpy/core/tests/test_nditer.py index ddcc8f283..c32822944 100644 --- a/numpy/core/tests/test_nditer.py +++ b/numpy/core/tests/test_nditer.py @@ -2920,6 +2920,25 @@ def test_object_iter_cleanup(): assert_raises(TypeError, np.logical_or.reduce, np.array([T(), T()], dtype='O')) +def test_object_iter_cleanup_reduce(): + # Similar as above, but a complex reduction case that was previously + # missed (see gh-18810)/ + # the following array is special in that it cananot be flattened: + arr = np.array([[None, 1], [-1, -1], [None, 2], [-1, -1]])[::2] + with pytest.raises(TypeError): + np.sum(arr) + +@pytest.mark.parametrize("arr", [ + np.ones((8000, 4, 2), dtype=object)[:, ::2, :], + np.ones((8000, 4, 2), dtype=object, order="F")[:, ::2, :], + np.ones((8000, 4, 2), dtype=object)[:, ::2, :].copy("F")]) +def test_object_iter_cleanup_large_reduce(arr): + # More complicated calls are possible for large arrays: + out = np.ones(8000, dtype=np.intp) + # force casting with `dtype=object` + res = np.sum(arr, axis=(1, 2), dtype=object, out=out) + assert_array_equal(res, np.full(8000, 4, dtype=object)) + def test_iter_too_large(): # The total size of the iterator must not exceed the maximum intp due # to broadcasting. Dividing by 1024 will keep it small enough to |