summaryrefslogtreecommitdiff
path: root/numpy
diff options
context:
space:
mode:
Diffstat (limited to 'numpy')
-rw-r--r--numpy/core/_add_newdocs.py42
-rw-r--r--numpy/core/src/multiarray/dragon4.c13
-rw-r--r--numpy/core/src/multiarray/getset.c34
-rw-r--r--numpy/core/src/umath/ufunc_object.c24
-rw-r--r--numpy/core/src/umath/ufunc_type_resolution.c2
-rw-r--r--numpy/core/src/umath/umathmodule.c2
-rw-r--r--numpy/core/tests/test_deprecations.py14
-rw-r--r--numpy/core/tests/test_multiarray.py63
-rw-r--r--numpy/core/tests/test_scalarprint.py1
-rw-r--r--numpy/core/tests/test_ufunc.py11
-rw-r--r--numpy/lib/stride_tricks.py1
11 files changed, 144 insertions, 63 deletions
diff --git a/numpy/core/_add_newdocs.py b/numpy/core/_add_newdocs.py
index dcec768f0..a8d73af3f 100644
--- a/numpy/core/_add_newdocs.py
+++ b/numpy/core/_add_newdocs.py
@@ -4469,14 +4469,13 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('view',
memory.
For ``a.view(some_dtype)``, if ``some_dtype`` has a different number of
- bytes per entry than the previous dtype (for example, converting a
- regular array to a structured array), then the behavior of the view
- cannot be predicted just from the superficial appearance of ``a`` (shown
- by ``print(a)``). It also depends on exactly how ``a`` is stored in
- memory. Therefore if ``a`` is C-ordered versus fortran-ordered, versus
- defined as a slice or transpose, etc., the view may give different
- results.
+ bytes per entry than the previous dtype (for example, converting a regular
+ array to a structured array), then the last axis of ``a`` must be
+ contiguous. This axis will be resized in the result.
+ .. versionchanged:: 1.23.0
+ Only the last axis needs to be contiguous. Previously, the entire array
+ had to be C-contiguous.
Examples
--------
@@ -4521,19 +4520,34 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('view',
Views that change the dtype size (bytes per entry) should normally be
avoided on arrays defined by slices, transposes, fortran-ordering, etc.:
- >>> x = np.array([[1,2,3],[4,5,6]], dtype=np.int16)
- >>> y = x[:, 0:2]
+ >>> x = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int16)
+ >>> y = x[:, ::2]
>>> y
- array([[1, 2],
- [4, 5]], dtype=int16)
+ array([[1, 3],
+ [4, 6]], dtype=int16)
>>> y.view(dtype=[('width', np.int16), ('length', np.int16)])
Traceback (most recent call last):
...
- ValueError: To change to a dtype of a different size, the array must be C-contiguous
+ ValueError: To change to a dtype of a different size, the last axis must be contiguous
>>> z = y.copy()
>>> z.view(dtype=[('width', np.int16), ('length', np.int16)])
- array([[(1, 2)],
- [(4, 5)]], dtype=[('width', '<i2'), ('length', '<i2')])
+ array([[(1, 3)],
+ [(4, 6)]], dtype=[('width', '<i2'), ('length', '<i2')])
+
+ However, views that change dtype are totally fine for arrays with a
+ contiguous last axis, even if the rest of the axes are not C-contiguous:
+
+ >>> x = np.arange(2 * 3 * 4, dtype=np.int8).reshape(2, 3, 4)
+ >>> x.transpose(1, 0, 2).view(np.int16)
+ array([[[ 256, 770],
+ [3340, 3854]],
+ <BLANKLINE>
+ [[1284, 1798],
+ [4368, 4882]],
+ <BLANKLINE>
+ [[2312, 2826],
+ [5396, 5910]]], dtype=int16)
+
"""))
diff --git a/numpy/core/src/multiarray/dragon4.c b/numpy/core/src/multiarray/dragon4.c
index ce0293615..5d245b106 100644
--- a/numpy/core/src/multiarray/dragon4.c
+++ b/numpy/core/src/multiarray/dragon4.c
@@ -1809,9 +1809,16 @@ FormatPositional(char *buffer, npy_uint32 bufferSize, BigInt *mantissa,
pos--;
numFractionDigits--;
}
- if (trim_mode == TrimMode_LeaveOneZero && buffer[pos-1] == '.') {
- buffer[pos++] = '0';
- numFractionDigits++;
+ if (buffer[pos-1] == '.') {
+ /* in TrimMode_LeaveOneZero, add trailing 0 back */
+ if (trim_mode == TrimMode_LeaveOneZero){
+ buffer[pos++] = '0';
+ numFractionDigits++;
+ }
+ /* in TrimMode_DptZeros, remove trailing decimal point */
+ else if (trim_mode == TrimMode_DptZeros) {
+ pos--;
+ }
}
}
diff --git a/numpy/core/src/multiarray/getset.c b/numpy/core/src/multiarray/getset.c
index ce21e948e..ac6465acd 100644
--- a/numpy/core/src/multiarray/getset.c
+++ b/numpy/core/src/multiarray/getset.c
@@ -496,9 +496,6 @@ array_descr_set(PyArrayObject *self, PyObject *arg, void *NPY_UNUSED(ignored))
/* Changing the size of the dtype results in a shape change */
if (newtype->elsize != PyArray_DESCR(self)->elsize) {
- int axis;
- npy_intp newdim;
-
/* forbidden cases */
if (PyArray_NDIM(self) == 0) {
PyErr_SetString(PyExc_ValueError,
@@ -513,31 +510,20 @@ array_descr_set(PyArrayObject *self, PyObject *arg, void *NPY_UNUSED(ignored))
goto fail;
}
- /* determine which axis to resize */
- if (PyArray_IS_C_CONTIGUOUS(self)) {
- axis = PyArray_NDIM(self) - 1;
- }
- else if (PyArray_IS_F_CONTIGUOUS(self)) {
- /* 2015-11-27 1.11.0, gh-6747 */
- if (DEPRECATE(
- "Changing the shape of an F-contiguous array by "
- "descriptor assignment is deprecated. To maintain the "
- "Fortran contiguity of a multidimensional Fortran "
- "array, use 'a.T.view(...).T' instead") < 0) {
- goto fail;
- }
- axis = 0;
- }
- else {
- /* Don't mention the deprecated F-contiguous support */
+ /* resize on last axis only */
+ int axis = PyArray_NDIM(self) - 1;
+ if (PyArray_DIMS(self)[axis] != 1 &&
+ PyArray_STRIDES(self)[axis] != PyArray_DESCR(self)->elsize) {
PyErr_SetString(PyExc_ValueError,
- "To change to a dtype of a different size, the array must "
- "be C-contiguous");
+ "To change to a dtype of a different size, the last axis "
+ "must be contiguous");
goto fail;
}
+ npy_intp newdim;
+
if (newtype->elsize < PyArray_DESCR(self)->elsize) {
- /* if it is compatible, increase the size of the relevant axis */
+ /* if it is compatible, increase the size of the last axis */
if (newtype->elsize == 0 ||
PyArray_DESCR(self)->elsize % newtype->elsize != 0) {
PyErr_SetString(PyExc_ValueError,
@@ -549,7 +535,7 @@ array_descr_set(PyArrayObject *self, PyObject *arg, void *NPY_UNUSED(ignored))
PyArray_DIMS(self)[axis] *= newdim;
PyArray_STRIDES(self)[axis] = newtype->elsize;
}
- else if (newtype->elsize > PyArray_DESCR(self)->elsize) {
+ else /* newtype->elsize > PyArray_DESCR(self)->elsize */ {
/* if it is compatible, decrease the size of the relevant axis */
newdim = PyArray_DIMS(self)[axis] * PyArray_DESCR(self)->elsize;
if ((newdim % newtype->elsize) != 0) {
diff --git a/numpy/core/src/umath/ufunc_object.c b/numpy/core/src/umath/ufunc_object.c
index 52b354353..415ff0f07 100644
--- a/numpy/core/src/umath/ufunc_object.c
+++ b/numpy/core/src/umath/ufunc_object.c
@@ -2764,9 +2764,13 @@ reducelike_promote_and_resolve(PyUFuncObject *ufunc,
* The first operand and output should be the same array, so they should
* be identical. The second argument can be different for reductions,
* but is checked to be identical for accumulate and reduceat.
+ * Ideally, the type-resolver ensures that all are identical, but we do
+ * not enforce this here strictly. Otherwise correct handling of
+ * byte-order changes (or metadata) requires a lot of care; see gh-20699.
*/
- if (out_descrs[0] != out_descrs[2] || (
- enforce_uniform_args && out_descrs[0] != out_descrs[1])) {
+ if (!PyArray_EquivTypes(out_descrs[0], out_descrs[2]) || (
+ enforce_uniform_args && !PyArray_EquivTypes(
+ out_descrs[0], out_descrs[1]))) {
PyErr_Format(PyExc_TypeError,
"the resolved dtypes are not compatible with %s.%s. "
"Resolved (%R, %R, %R)",
@@ -3028,8 +3032,12 @@ PyUFunc_Accumulate(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *out,
return NULL;
}
- /* The below code assumes that all descriptors are identical: */
- assert(descrs[0] == descrs[1] && descrs[0] == descrs[2]);
+ /*
+ * The below code assumes that all descriptors are interchangeable, we
+ * allow them to not be strictly identical (but they typically should be)
+ */
+ assert(PyArray_EquivTypes(descrs[0], descrs[1])
+ && PyArray_EquivTypes(descrs[0], descrs[2]));
if (PyDataType_REFCHK(descrs[2]) && descrs[2]->type_num != NPY_OBJECT) {
/* This can be removed, but the initial element copy needs fixing */
@@ -3441,8 +3449,12 @@ PyUFunc_Reduceat(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *ind,
return NULL;
}
- /* The below code assumes that all descriptors are identical: */
- assert(descrs[0] == descrs[1] && descrs[0] == descrs[2]);
+ /*
+ * The below code assumes that all descriptors are interchangeable, we
+ * allow them to not be strictly identical (but they typically should be)
+ */
+ assert(PyArray_EquivTypes(descrs[0], descrs[1])
+ && PyArray_EquivTypes(descrs[0], descrs[2]));
if (PyDataType_REFCHK(descrs[2]) && descrs[2]->type_num != NPY_OBJECT) {
/* This can be removed, but the initial element copy needs fixing */
diff --git a/numpy/core/src/umath/ufunc_type_resolution.c b/numpy/core/src/umath/ufunc_type_resolution.c
index 9ed923cf5..90846ca55 100644
--- a/numpy/core/src/umath/ufunc_type_resolution.c
+++ b/numpy/core/src/umath/ufunc_type_resolution.c
@@ -1528,7 +1528,7 @@ PyUFunc_DefaultLegacyInnerLoopSelector(PyUFuncObject *ufunc,
}
if (j == nargs) {
*out_innerloop = ufunc->functions[i];
- *out_innerloopdata = ufunc->data[i];
+ *out_innerloopdata = (ufunc->data == NULL) ? NULL : ufunc->data[i];
return 0;
}
diff --git a/numpy/core/src/umath/umathmodule.c b/numpy/core/src/umath/umathmodule.c
index f8d010ee0..d79506000 100644
--- a/numpy/core/src/umath/umathmodule.c
+++ b/numpy/core/src/umath/umathmodule.c
@@ -56,7 +56,7 @@ object_ufunc_loop_selector(PyUFuncObject *ufunc,
int *out_needs_api)
{
*out_innerloop = ufunc->functions[0];
- *out_innerloopdata = ufunc->data[0];
+ *out_innerloopdata = (ufunc->data == NULL) ? NULL : ufunc->data[0];
*out_needs_api = 1;
return 0;
diff --git a/numpy/core/tests/test_deprecations.py b/numpy/core/tests/test_deprecations.py
index d148c89f5..76486f755 100644
--- a/numpy/core/tests/test_deprecations.py
+++ b/numpy/core/tests/test_deprecations.py
@@ -257,20 +257,6 @@ class TestDatetime64Timezone(_DeprecationTestCase):
self.assert_deprecated(np.datetime64, args=(dt,))
-class TestNonCContiguousViewDeprecation(_DeprecationTestCase):
- """View of non-C-contiguous arrays deprecated in 1.11.0.
-
- The deprecation will not be raised for arrays that are both C and F
- contiguous, as C contiguous is dominant. There are more such arrays
- with relaxed stride checking than without so the deprecation is not
- as visible with relaxed stride checking in force.
- """
-
- def test_fortran_contiguous(self):
- self.assert_deprecated(np.ones((2,2)).T.view, args=(complex,))
- self.assert_deprecated(np.ones((2,2)).T.view, args=(np.int8,))
-
-
class TestArrayDataAttributeAssignmentDeprecation(_DeprecationTestCase):
"""Assigning the 'data' attribute of an ndarray is unsafe as pointed
out in gh-7093. Eventually, such assignment should NOT be allowed, but
diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py
index 0c611abb5..2529705d5 100644
--- a/numpy/core/tests/test_multiarray.py
+++ b/numpy/core/tests/test_multiarray.py
@@ -9191,3 +9191,66 @@ def test_getfield():
pytest.raises(ValueError, a.getfield, 'uint8', -1)
pytest.raises(ValueError, a.getfield, 'uint8', 16)
pytest.raises(ValueError, a.getfield, 'uint64', 0)
+
+
+class TestViewDtype:
+ """
+ Verify that making a view of a non-contiguous array works as expected.
+ """
+ def test_smaller_dtype_multiple(self):
+ # x is non-contiguous
+ x = np.arange(10, dtype='<i4')[::2]
+ with pytest.raises(ValueError,
+ match='the last axis must be contiguous'):
+ x.view('<i2')
+ expected = [[0, 0], [2, 0], [4, 0], [6, 0], [8, 0]]
+ assert_array_equal(x[:, np.newaxis].view('<i2'), expected)
+
+ def test_smaller_dtype_not_multiple(self):
+ # x is non-contiguous
+ x = np.arange(5, dtype='<i4')[::2]
+
+ with pytest.raises(ValueError,
+ match='the last axis must be contiguous'):
+ x.view('S3')
+ with pytest.raises(ValueError,
+ match='When changing to a smaller dtype'):
+ x[:, np.newaxis].view('S3')
+
+ # Make sure the problem is because of the dtype size
+ expected = [[b''], [b'\x02'], [b'\x04']]
+ assert_array_equal(x[:, np.newaxis].view('S4'), expected)
+
+ def test_larger_dtype_multiple(self):
+ # x is non-contiguous in the first dimension, contiguous in the last
+ x = np.arange(20, dtype='<i2').reshape(10, 2)[::2, :]
+ expected = np.array([[65536], [327684], [589832],
+ [851980], [1114128]], dtype='<i4')
+ assert_array_equal(x.view('<i4'), expected)
+
+ def test_larger_dtype_not_multiple(self):
+ # x is non-contiguous in the first dimension, contiguous in the last
+ x = np.arange(20, dtype='<i2').reshape(10, 2)[::2, :]
+ with pytest.raises(ValueError,
+ match='When changing to a larger dtype'):
+ x.view('S3')
+ # Make sure the problem is because of the dtype size
+ expected = [[b'\x00\x00\x01'], [b'\x04\x00\x05'], [b'\x08\x00\t'],
+ [b'\x0c\x00\r'], [b'\x10\x00\x11']]
+ assert_array_equal(x.view('S4'), expected)
+
+ def test_f_contiguous(self):
+ # x is F-contiguous
+ x = np.arange(4 * 3, dtype='<i4').reshape(4, 3).T
+ with pytest.raises(ValueError,
+ match='the last axis must be contiguous'):
+ x.view('<i2')
+
+ def test_non_c_contiguous(self):
+ # x is contiguous in axis=-1, but not C-contiguous in other axes
+ x = np.arange(2 * 3 * 4, dtype='i1').\
+ reshape(2, 3, 4).transpose(1, 0, 2)
+ expected = [[[256, 770], [3340, 3854]],
+ [[1284, 1798], [4368, 4882]],
+ [[2312, 2826], [5396, 5910]]]
+ assert_array_equal(x.view('<i2'), expected)
diff --git a/numpy/core/tests/test_scalarprint.py b/numpy/core/tests/test_scalarprint.py
index ee21d4aa5..4deb5a0a4 100644
--- a/numpy/core/tests/test_scalarprint.py
+++ b/numpy/core/tests/test_scalarprint.py
@@ -306,6 +306,7 @@ class TestRealScalars:
assert_equal(fpos(tp('1.2'), unique=False, precision=4, trim='-'),
"1.2" if tp != np.float16 else "1.2002")
assert_equal(fpos(tp('1.'), trim='-'), "1")
+ assert_equal(fpos(tp('1.001'), precision=1, trim='-'), "1")
@pytest.mark.skipif(not platform.machine().startswith("ppc64"),
reason="only applies to ppc float128 values")
diff --git a/numpy/core/tests/test_ufunc.py b/numpy/core/tests/test_ufunc.py
index 76e4cdcfd..9a9d46da0 100644
--- a/numpy/core/tests/test_ufunc.py
+++ b/numpy/core/tests/test_ufunc.py
@@ -2148,6 +2148,17 @@ class TestUfunc:
# It would be safe, but not equiv casting:
ufunc(a, c, out=out, casting="equiv")
+ def test_reducelike_byteorder_resolution(self):
+ # See gh-20699, byte-order changes need some extra care in the type
+ # resolution to make the following succeed:
+ arr_be = np.arange(10, dtype=">i8")
+ arr_le = np.arange(10, dtype="<i8")
+
+ assert np.add.reduce(arr_be) == np.add.reduce(arr_le)
+ assert_array_equal(np.add.accumulate(arr_be), np.add.accumulate(arr_le))
+ assert_array_equal(
+ np.add.reduceat(arr_be, [1]), np.add.reduceat(arr_le, [1]))
+
def test_reducelike_out_promotes(self):
# Check that the out argument to reductions is considered for
# promotion. See also gh-20455.
diff --git a/numpy/lib/stride_tricks.py b/numpy/lib/stride_tricks.py
index 5093993a9..6794ad557 100644
--- a/numpy/lib/stride_tricks.py
+++ b/numpy/lib/stride_tricks.py
@@ -86,6 +86,7 @@ def as_strided(x, shape=None, strides=None, subok=False, writeable=True):
Vectorized write operations on such arrays will typically be
unpredictable. They may even give different results for small, large,
or transposed arrays.
+
Since writing to these arrays has to be tested and done with great
care, you may want to use ``writeable=False`` to avoid accidental write
operations.