summaryrefslogtreecommitdiff
path: root/numpy
diff options
context:
space:
mode:
Diffstat (limited to 'numpy')
-rw-r--r--numpy/core/_add_newdocs.py19
-rw-r--r--numpy/core/setup.py7
-rw-r--r--numpy/core/src/multiarray/dragon4.c13
-rw-r--r--numpy/core/src/umath/ufunc_object.c8
-rw-r--r--numpy/core/tests/test_scalarprint.py1
-rw-r--r--numpy/core/tests/test_ufunc.py11
6 files changed, 54 insertions, 5 deletions
diff --git a/numpy/core/_add_newdocs.py b/numpy/core/_add_newdocs.py
index 523da75b3..a8d73af3f 100644
--- a/numpy/core/_add_newdocs.py
+++ b/numpy/core/_add_newdocs.py
@@ -2392,6 +2392,12 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('dtype',
"""
Data-type of the array's elements.
+ .. warning::
+
+ Setting ``arr.dtype`` is discouraged and may be deprecated in the
+ future. Setting will replace the ``dtype`` without modifying the
+ memory (see also `ndarray.view` and `ndarray.astype`).
+
Parameters
----------
None
@@ -2402,6 +2408,8 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('dtype',
See Also
--------
+ ndarray.astype : Cast the values contained in the array to a new data-type.
+ ndarray.view : Create a view of the same data but a different data-type.
numpy.dtype
Examples
@@ -2627,6 +2635,11 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('shape',
the array and the remaining dimensions. Reshaping an array in-place will
fail if a copy is required.
+ .. warning::
+
+ Setting ``arr.shape`` is discouraged and may be deprecated in the
+ future. Using `ndarray.reshape` is the preferred approach.
+
Examples
--------
>>> x = np.array([1, 2, 3, 4])
@@ -2697,6 +2710,12 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('strides',
A more detailed explanation of strides can be found in the
"ndarray.rst" file in the NumPy reference guide.
+ .. warning::
+
+ Setting ``arr.strides`` is discouraged and may be deprecated in the
+ future. `numpy.lib.stride_tricks.as_strided` should be preferred
+ to create a new view of the same data in a safer way.
+
Notes
-----
Imagine an array of 32-bit integers (each 4 bytes)::
diff --git a/numpy/core/setup.py b/numpy/core/setup.py
index 0f2f3c210..a67a4cab6 100644
--- a/numpy/core/setup.py
+++ b/numpy/core/setup.py
@@ -24,6 +24,11 @@ NPY_RELAXED_STRIDES_CHECKING = (os.environ.get('NPY_RELAXED_STRIDES_CHECKING', "
NPY_RELAXED_STRIDES_DEBUG = (os.environ.get('NPY_RELAXED_STRIDES_DEBUG', "0") != "0")
NPY_RELAXED_STRIDES_DEBUG = NPY_RELAXED_STRIDES_DEBUG and NPY_RELAXED_STRIDES_CHECKING
+# Set NPY_DISABLE_SVML=1 in the environment to disable the vendored SVML
+# library. This option only has significance on a Linux x86_64 host and is most
+# useful to avoid improperly requiring SVML when cross compiling.
+NPY_DISABLE_SVML = (os.environ.get('NPY_DISABLE_SVML', "0") == "1")
+
# XXX: ugly, we use a class to avoid calling twice some expensive functions in
# config.h/numpyconfig.h. I don't see a better way because distutils force
# config.h generation inside an Extension class, and as such sharing
@@ -68,6 +73,8 @@ def can_link_svml():
"""SVML library is supported only on x86_64 architecture and currently
only on linux
"""
+ if NPY_DISABLE_SVML:
+ return False
machine = platform.machine()
system = platform.system()
return "x86_64" in machine and system == "Linux"
diff --git a/numpy/core/src/multiarray/dragon4.c b/numpy/core/src/multiarray/dragon4.c
index ce0293615..5d245b106 100644
--- a/numpy/core/src/multiarray/dragon4.c
+++ b/numpy/core/src/multiarray/dragon4.c
@@ -1809,9 +1809,16 @@ FormatPositional(char *buffer, npy_uint32 bufferSize, BigInt *mantissa,
pos--;
numFractionDigits--;
}
- if (trim_mode == TrimMode_LeaveOneZero && buffer[pos-1] == '.') {
- buffer[pos++] = '0';
- numFractionDigits++;
+ if (buffer[pos-1] == '.') {
+ /* in TrimMode_LeaveOneZero, add trailing 0 back */
+ if (trim_mode == TrimMode_LeaveOneZero){
+ buffer[pos++] = '0';
+ numFractionDigits++;
+ }
+ /* in TrimMode_DptZeros, remove trailing decimal point */
+ else if (trim_mode == TrimMode_DptZeros) {
+ pos--;
+ }
}
}
diff --git a/numpy/core/src/umath/ufunc_object.c b/numpy/core/src/umath/ufunc_object.c
index 52b354353..83e18a363 100644
--- a/numpy/core/src/umath/ufunc_object.c
+++ b/numpy/core/src/umath/ufunc_object.c
@@ -2764,9 +2764,13 @@ reducelike_promote_and_resolve(PyUFuncObject *ufunc,
* The first operand and output should be the same array, so they should
* be identical. The second argument can be different for reductions,
* but is checked to be identical for accumulate and reduceat.
+ * Ideally, the type-resolver ensures that all are identical, but we do
+ * not enforce this here strictly. Otherwise correct handling of
+ * byte-order changes (or metadata) requires a lot of care; see gh-20699.
*/
- if (out_descrs[0] != out_descrs[2] || (
- enforce_uniform_args && out_descrs[0] != out_descrs[1])) {
+ if (!PyArray_EquivTypes(out_descrs[0], out_descrs[2]) || (
+ enforce_uniform_args && !PyArray_EquivTypes(
+ out_descrs[0], out_descrs[1]))) {
PyErr_Format(PyExc_TypeError,
"the resolved dtypes are not compatible with %s.%s. "
"Resolved (%R, %R, %R)",
diff --git a/numpy/core/tests/test_scalarprint.py b/numpy/core/tests/test_scalarprint.py
index ee21d4aa5..4deb5a0a4 100644
--- a/numpy/core/tests/test_scalarprint.py
+++ b/numpy/core/tests/test_scalarprint.py
@@ -306,6 +306,7 @@ class TestRealScalars:
assert_equal(fpos(tp('1.2'), unique=False, precision=4, trim='-'),
"1.2" if tp != np.float16 else "1.2002")
assert_equal(fpos(tp('1.'), trim='-'), "1")
+ assert_equal(fpos(tp('1.001'), precision=1, trim='-'), "1")
@pytest.mark.skipif(not platform.machine().startswith("ppc64"),
reason="only applies to ppc float128 values")
diff --git a/numpy/core/tests/test_ufunc.py b/numpy/core/tests/test_ufunc.py
index 76e4cdcfd..9a9d46da0 100644
--- a/numpy/core/tests/test_ufunc.py
+++ b/numpy/core/tests/test_ufunc.py
@@ -2148,6 +2148,17 @@ class TestUfunc:
# It would be safe, but not equiv casting:
ufunc(a, c, out=out, casting="equiv")
+ def test_reducelike_byteorder_resolution(self):
+ # See gh-20699, byte-order changes need some extra care in the type
+ # resolution to make the following succeed:
+ arr_be = np.arange(10, dtype=">i8")
+ arr_le = np.arange(10, dtype="<i8")
+
+ assert np.add.reduce(arr_be) == np.add.reduce(arr_le)
+ assert_array_equal(np.add.accumulate(arr_be), np.add.accumulate(arr_le))
+ assert_array_equal(
+ np.add.reduceat(arr_be, [1]), np.add.reduceat(arr_le, [1]))
+
def test_reducelike_out_promotes(self):
# Check that the out argument to reductions is considered for
# promotion. See also gh-20455.