summaryrefslogtreecommitdiff
path: root/numpy/core
diff options
context:
space:
mode:
Diffstat (limited to 'numpy/core')
-rw-r--r--numpy/core/_add_newdocs.py112
-rw-r--r--numpy/core/arrayprint.py2
-rw-r--r--numpy/core/fromnumeric.py10
-rw-r--r--numpy/core/include/numpy/npy_cpu.h37
-rw-r--r--numpy/core/include/numpy/npy_endian.h43
-rw-r--r--numpy/core/numeric.py13
-rw-r--r--numpy/core/numerictypes.py18
-rw-r--r--numpy/core/src/multiarray/compiled_base.c121
-rw-r--r--numpy/core/src/multiarray/compiled_base.h2
-rw-r--r--numpy/core/src/multiarray/ctors.c65
-rw-r--r--numpy/core/src/multiarray/dragon4.c62
-rw-r--r--numpy/core/src/multiarray/mapping.c2
-rw-r--r--numpy/core/src/multiarray/multiarraymodule.c2
-rw-r--r--numpy/core/src/multiarray/number.c53
-rw-r--r--numpy/core/src/private/npy_config.h3
-rw-r--r--numpy/core/src/private/ufunc_override.c4
-rw-r--r--numpy/core/src/private/ufunc_override.h28
-rw-r--r--numpy/core/src/umath/ufunc_object.c296
-rw-r--r--numpy/core/tests/test_deprecations.py14
-rw-r--r--numpy/core/tests/test_multiarray.py41
-rw-r--r--numpy/core/tests/test_regression.py20
-rw-r--r--numpy/core/tests/test_umath.py3
22 files changed, 534 insertions, 417 deletions
diff --git a/numpy/core/_add_newdocs.py b/numpy/core/_add_newdocs.py
index f596e613f..b65920fde 100644
--- a/numpy/core/_add_newdocs.py
+++ b/numpy/core/_add_newdocs.py
@@ -1319,6 +1319,7 @@ add_newdoc('numpy.core.multiarray', 'concatenate',
hstack : Stack arrays in sequence horizontally (column wise)
vstack : Stack arrays in sequence vertically (row wise)
dstack : Stack arrays in sequence depth wise (along third dimension)
+ block : Assemble arrays from blocks.
Notes
-----
@@ -1348,19 +1349,19 @@ add_newdoc('numpy.core.multiarray', 'concatenate',
>>> a[1] = np.ma.masked
>>> b = np.arange(2, 5)
>>> a
- masked_array(data = [0 -- 2],
- mask = [False True False],
- fill_value = 999999)
+ masked_array(data=[0, --, 2],
+ mask=[False, True, False],
+ fill_value=999999)
>>> b
array([2, 3, 4])
>>> np.concatenate([a, b])
- masked_array(data = [0 1 2 2 3 4],
- mask = False,
- fill_value = 999999)
+ masked_array(data=[0, 1, 2, 2, 3, 4],
+ mask=False,
+ fill_value=999999)
>>> np.ma.concatenate([a, b])
- masked_array(data = [0 -- 2 2 3 4],
- mask = [False True False False False False],
- fill_value = 999999)
+ masked_array(data=[0, --, 2, 2, 3, 4],
+ mask=[False, True, False, False, False, False],
+ fill_value=999999)
""")
@@ -5296,99 +5297,6 @@ add_newdoc('numpy.core.umath', 'seterrobj',
#
##############################################################################
-add_newdoc('numpy.core.multiarray', 'digitize',
- """
- digitize(x, bins, right=False)
-
- Return the indices of the bins to which each value in input array belongs.
-
- ========= ============= ============================
- `right` order of bins returned index `i` satisfies
- ========= ============= ============================
- ``False`` increasing ``bins[i-1] <= x < bins[i]``
- ``True`` increasing ``bins[i-1] < x <= bins[i]``
- ``False`` decreasing ``bins[i-1] > x >= bins[i]``
- ``True`` decreasing ``bins[i-1] >= x > bins[i]``
- ========= ============= ============================
-
- If values in `x` are beyond the bounds of `bins`, 0 or ``len(bins)`` is
- returned as appropriate.
-
- Parameters
- ----------
- x : array_like
- Input array to be binned. Prior to NumPy 1.10.0, this array had to
- be 1-dimensional, but can now have any shape.
- bins : array_like
- Array of bins. It has to be 1-dimensional and monotonic.
- right : bool, optional
- Indicating whether the intervals include the right or the left bin
- edge. Default behavior is (right==False) indicating that the interval
- does not include the right edge. The left bin end is open in this
- case, i.e., bins[i-1] <= x < bins[i] is the default behavior for
- monotonically increasing bins.
-
- Returns
- -------
- indices : ndarray of ints
- Output array of indices, of same shape as `x`.
-
- Raises
- ------
- ValueError
- If `bins` is not monotonic.
- TypeError
- If the type of the input is complex.
-
- See Also
- --------
- bincount, histogram, unique, searchsorted
-
- Notes
- -----
- If values in `x` are such that they fall outside the bin range,
- attempting to index `bins` with the indices that `digitize` returns
- will result in an IndexError.
-
- .. versionadded:: 1.10.0
-
- `np.digitize` is implemented in terms of `np.searchsorted`. This means
- that a binary search is used to bin the values, which scales much better
- for larger number of bins than the previous linear search. It also removes
- the requirement for the input array to be 1-dimensional.
-
- For monotonically _increasing_ `bins`, the following are equivalent::
-
- np.digitize(x, bins, right=True)
- np.searchsorted(bins, x, side='left')
-
- Note that as the order of the arguments are reversed, the side must be too.
- The `searchsorted` call is marginally faster, as it does not do any
- monotonicity checks. Perhaps more importantly, it supports all dtypes.
-
- Examples
- --------
- >>> x = np.array([0.2, 6.4, 3.0, 1.6])
- >>> bins = np.array([0.0, 1.0, 2.5, 4.0, 10.0])
- >>> inds = np.digitize(x, bins)
- >>> inds
- array([1, 4, 3, 2])
- >>> for n in range(x.size):
- ... print(bins[inds[n]-1], "<=", x[n], "<", bins[inds[n]])
- ...
- 0.0 <= 0.2 < 1.0
- 4.0 <= 6.4 < 10.0
- 2.5 <= 3.0 < 4.0
- 1.0 <= 1.6 < 2.5
-
- >>> x = np.array([1.2, 10.0, 12.4, 15.5, 20.])
- >>> bins = np.array([0, 5, 10, 15, 20])
- >>> np.digitize(x,bins,right=True)
- array([1, 2, 3, 4, 4])
- >>> np.digitize(x,bins,right=False)
- array([1, 3, 3, 4, 5])
- """)
-
add_newdoc('numpy.core.multiarray', 'bincount',
"""
bincount(x, weights=None, minlength=0)
diff --git a/numpy/core/arrayprint.py b/numpy/core/arrayprint.py
index 6d15cb23f..a4b5aecc3 100644
--- a/numpy/core/arrayprint.py
+++ b/numpy/core/arrayprint.py
@@ -528,6 +528,8 @@ def array2string(a, max_line_width=None, precision=None,
The output is left-padded by the length of the prefix string, and
wrapping is forced at the column ``max_line_width - len(suffix)``.
+ It should be noted that the content of prefix and suffix strings are
+ not included in the output.
style : _NoValue, optional
Has no effect, do not use.
diff --git a/numpy/core/fromnumeric.py b/numpy/core/fromnumeric.py
index 373e0fde8..b9cc98cae 100644
--- a/numpy/core/fromnumeric.py
+++ b/numpy/core/fromnumeric.py
@@ -1198,6 +1198,16 @@ def resize(a, new_shape):
--------
ndarray.resize : resize an array in-place.
+ Notes
+ -----
+ Warning: This functionality does **not** consider axes separately,
+ i.e. it does not apply interpolation/extrapolation.
+ It fills the return array with the required number of elements, taken
+ from `a` as they are laid out in memory, disregarding strides and axes.
+ (This is in case the new shape is smaller. For larger, see above.)
+ This functionality is therefore not suitable to resize images,
+ or data where each axis represents a separate and distinct entity.
+
Examples
--------
>>> a=np.array([[0,1],[2,3]])
diff --git a/numpy/core/include/numpy/npy_cpu.h b/numpy/core/include/numpy/npy_cpu.h
index 106ffa450..5edd8f42e 100644
--- a/numpy/core/include/numpy/npy_cpu.h
+++ b/numpy/core/include/numpy/npy_cpu.h
@@ -39,17 +39,19 @@
* _M_AMD64 defined by MS compiler
*/
#define NPY_CPU_AMD64
+#elif defined(__powerpc64__) && defined(__LITTLE_ENDIAN__)
+ #define NPY_CPU_PPC64LE
+#elif defined(__powerpc64__) && defined(__BIG_ENDIAN__)
+ #define NPY_CPU_PPC64
#elif defined(__ppc__) || defined(__powerpc__) || defined(_ARCH_PPC)
/*
* __ppc__ is defined by gcc, I remember having seen __powerpc__ once,
* but can't find it ATM
* _ARCH_PPC is used by at least gcc on AIX
+ * As __powerpc__ and _ARCH_PPC are also defined by PPC64 check
+ * for those specifically first before defaulting to ppc
*/
#define NPY_CPU_PPC
-#elif defined(__ppc64le__)
- #define NPY_CPU_PPC64LE
-#elif defined(__ppc64__)
- #define NPY_CPU_PPC64
#elif defined(__sparc__) || defined(__sparc)
/* __sparc__ is defined by gcc and Forte (e.g. Sun) compilers */
#define NPY_CPU_SPARC
@@ -61,10 +63,27 @@
#define NPY_CPU_HPPA
#elif defined(__alpha__)
#define NPY_CPU_ALPHA
-#elif defined(__arm__) && defined(__ARMEL__)
- #define NPY_CPU_ARMEL
-#elif defined(__arm__) && defined(__ARMEB__)
- #define NPY_CPU_ARMEB
+#elif defined(__arm__) || defined(__aarch64__)
+ #if defined(__ARMEB__) || defined(__AARCH64EB__)
+ #if defined(__ARM_32BIT_STATE)
+ #define NPY_CPU_ARMEB_AARCH32
+ #elif defined(__ARM_64BIT_STATE)
+ #define NPY_CPU_ARMEB_AARCH64
+ #else
+ #define NPY_CPU_ARMEB
+ #endif
+ #elif defined(__ARMEL__) || defined(__AARCH64EL__)
+ #if defined(__ARM_32BIT_STATE)
+ #define NPY_CPU_ARMEL_AARCH32
+ #elif defined(__ARM_64BIT_STATE)
+ #define NPY_CPU_ARMEL_AARCH64
+ #else
+ #define NPY_CPU_ARMEL
+ #endif
+ #else
+ # error Unknown ARM CPU, please report this to numpy maintainers with \
+ information about your platform (OS, CPU and compiler)
+ #endif
#elif defined(__sh__) && defined(__LITTLE_ENDIAN__)
#define NPY_CPU_SH_LE
#elif defined(__sh__) && defined(__BIG_ENDIAN__)
@@ -75,8 +94,6 @@
#define NPY_CPU_MIPSEB
#elif defined(__or1k__)
#define NPY_CPU_OR1K
-#elif defined(__aarch64__)
- #define NPY_CPU_AARCH64
#elif defined(__mc68000__)
#define NPY_CPU_M68K
#elif defined(__arc__) && defined(__LITTLE_ENDIAN__)
diff --git a/numpy/core/include/numpy/npy_endian.h b/numpy/core/include/numpy/npy_endian.h
index 649bdb0a6..44cdffd14 100644
--- a/numpy/core/include/numpy/npy_endian.h
+++ b/numpy/core/include/numpy/npy_endian.h
@@ -37,28 +37,31 @@
#define NPY_LITTLE_ENDIAN 1234
#define NPY_BIG_ENDIAN 4321
- #if defined(NPY_CPU_X86) \
- || defined(NPY_CPU_AMD64) \
- || defined(NPY_CPU_IA64) \
- || defined(NPY_CPU_ALPHA) \
- || defined(NPY_CPU_ARMEL) \
- || defined(NPY_CPU_AARCH64) \
- || defined(NPY_CPU_SH_LE) \
- || defined(NPY_CPU_MIPSEL) \
- || defined(NPY_CPU_PPC64LE) \
- || defined(NPY_CPU_ARCEL) \
+ #if defined(NPY_CPU_X86) \
+ || defined(NPY_CPU_AMD64) \
+ || defined(NPY_CPU_IA64) \
+ || defined(NPY_CPU_ALPHA) \
+ || defined(NPY_CPU_ARMEL) \
+ || defined(NPY_CPU_ARMEL_AARCH32) \
+ || defined(NPY_CPU_ARMEL_AARCH64) \
+ || defined(NPY_CPU_SH_LE) \
+ || defined(NPY_CPU_MIPSEL) \
+ || defined(NPY_CPU_PPC64LE) \
+ || defined(NPY_CPU_ARCEL) \
|| defined(NPY_CPU_RISCV64)
#define NPY_BYTE_ORDER NPY_LITTLE_ENDIAN
- #elif defined(NPY_CPU_PPC) \
- || defined(NPY_CPU_SPARC) \
- || defined(NPY_CPU_S390) \
- || defined(NPY_CPU_HPPA) \
- || defined(NPY_CPU_PPC64) \
- || defined(NPY_CPU_ARMEB) \
- || defined(NPY_CPU_SH_BE) \
- || defined(NPY_CPU_MIPSEB) \
- || defined(NPY_CPU_OR1K) \
- || defined(NPY_CPU_M68K) \
+ #elif defined(NPY_CPU_PPC) \
+ || defined(NPY_CPU_SPARC) \
+ || defined(NPY_CPU_S390) \
+ || defined(NPY_CPU_HPPA) \
+ || defined(NPY_CPU_PPC64) \
+ || defined(NPY_CPU_ARMEB) \
+ || defined(NPY_CPU_ARMEB_AARCH32) \
+ || defined(NPY_CPU_ARMEB_AARCH64) \
+ || defined(NPY_CPU_SH_BE) \
+ || defined(NPY_CPU_MIPSEB) \
+ || defined(NPY_CPU_OR1K) \
+ || defined(NPY_CPU_M68K) \
|| defined(NPY_CPU_ARCEB)
#define NPY_BYTE_ORDER NPY_BIG_ENDIAN
#else
diff --git a/numpy/core/numeric.py b/numpy/core/numeric.py
index b49a7f551..e5570791a 100644
--- a/numpy/core/numeric.py
+++ b/numpy/core/numeric.py
@@ -1509,11 +1509,14 @@ def normalize_axis_tuple(axis, ndim, argname=None, allow_duplicate=False):
--------
normalize_axis_index : normalizing a single scalar axis
"""
- try:
- axis = [operator.index(axis)]
- except TypeError:
- axis = tuple(axis)
- axis = tuple(normalize_axis_index(ax, ndim, argname) for ax in axis)
+ # Optimization to speed-up the most common cases.
+ if type(axis) not in (tuple, list):
+ try:
+ axis = [operator.index(axis)]
+ except TypeError:
+ pass
+ # Going via an iterator directly is slower than via list comprehension.
+ axis = tuple([normalize_axis_index(ax, ndim, argname) for ax in axis])
if not allow_duplicate and len(set(axis)) != len(axis):
if argname:
raise ValueError('repeated axis in `{}` argument'.format(argname))
diff --git a/numpy/core/numerictypes.py b/numpy/core/numerictypes.py
index 727fb66d1..817af4c7b 100644
--- a/numpy/core/numerictypes.py
+++ b/numpy/core/numerictypes.py
@@ -92,7 +92,7 @@ from numpy.core.multiarray import (
datetime_as_string, busday_offset, busday_count, is_busday,
busdaycalendar
)
-
+from numpy._globals import VisibleDeprecationWarning
# we add more at the bottom
__all__ = ['sctypeDict', 'sctypeNA', 'typeDict', 'typeNA', 'sctypes',
@@ -210,8 +210,20 @@ def english_capitalize(s):
sctypeDict = {} # Contains all leaf-node scalar types with aliases
-sctypeNA = {} # Contails all leaf-node types -> numarray type equivalences
-allTypes = {} # Collect the types we will add to the module here
+class TypeNADict(dict):
+ def __getitem__(self, key):
+ # 2018-06-24, 1.16
+ warnings.warn('sctypeNA and typeNA will be removed in v1.18 '
+ 'of numpy', VisibleDeprecationWarning, stacklevel=2)
+ return dict.__getitem__(self, key)
+ def get(self, key, default=None):
+ # 2018-06-24, 1.16
+ warnings.warn('sctypeNA and typeNA will be removed in v1.18 '
+ 'of numpy', VisibleDeprecationWarning, stacklevel=2)
+ return dict.get(self, key, default)
+
+sctypeNA = TypeNADict() # Contails all leaf-node types -> numarray type equivalences
+allTypes = {} # Collect the types we will add to the module here
# separate the actual type info from the abtract base classes
diff --git a/numpy/core/src/multiarray/compiled_base.c b/numpy/core/src/multiarray/compiled_base.c
index 8c140f5e2..1c27f8394 100644
--- a/numpy/core/src/multiarray/compiled_base.c
+++ b/numpy/core/src/multiarray/compiled_base.c
@@ -21,11 +21,17 @@
* and 0 if the array is not monotonic.
*/
static int
-check_array_monotonic(const double *a, npy_int lena)
+check_array_monotonic(const double *a, npy_intp lena)
{
npy_intp i;
double next;
- double last = a[0];
+ double last;
+
+ if (lena == 0) {
+ /* all bin edges hold the same value */
+ return 1;
+ }
+ last = a[0];
/* Skip repeated values at the beginning of the array */
for (i = 1; (i < lena) && (a[i] == last); i++);
@@ -209,106 +215,41 @@ fail:
return NULL;
}
-/*
- * digitize(x, bins, right=False) returns an array of integers the same length
- * as x. The values i returned are such that bins[i - 1] <= x < bins[i] if
- * bins is monotonically increasing, or bins[i - 1] > x >= bins[i] if bins
- * is monotonically decreasing. Beyond the bounds of bins, returns either
- * i = 0 or i = len(bins) as appropriate. If right == True the comparison
- * is bins [i - 1] < x <= bins[i] or bins [i - 1] >= x > bins[i]
- */
+/* Internal function to expose check_array_monotonic to python */
NPY_NO_EXPORT PyObject *
-arr_digitize(PyObject *NPY_UNUSED(self), PyObject *args, PyObject *kwds)
+arr__monotonicity(PyObject *NPY_UNUSED(self), PyObject *args, PyObject *kwds)
{
+ static char *kwlist[] = {"x", NULL};
PyObject *obj_x = NULL;
- PyObject *obj_bins = NULL;
PyArrayObject *arr_x = NULL;
- PyArrayObject *arr_bins = NULL;
- PyObject *ret = NULL;
- npy_intp len_bins;
- int monotonic, right = 0;
- NPY_BEGIN_THREADS_DEF
-
- static char *kwlist[] = {"x", "bins", "right", NULL};
+ long monotonic;
+ npy_intp len_x;
+ NPY_BEGIN_THREADS_DEF;
- if (!PyArg_ParseTupleAndKeywords(args, kwds, "OO|i:digitize", kwlist,
- &obj_x, &obj_bins, &right)) {
- goto fail;
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|_monotonicity", kwlist,
+ &obj_x)) {
+ return NULL;
}
- /* PyArray_SearchSorted will make `x` contiguous even if we don't */
- arr_x = (PyArrayObject *)PyArray_FROMANY(obj_x, NPY_DOUBLE, 0, 0,
- NPY_ARRAY_CARRAY_RO);
+ /*
+ * TODO:
+ * `x` could be strided, needs change to check_array_monotonic
+ * `x` is forced to double for this check
+ */
+ arr_x = (PyArrayObject *)PyArray_FROMANY(
+ obj_x, NPY_DOUBLE, 1, 1, NPY_ARRAY_CARRAY_RO);
if (arr_x == NULL) {
- goto fail;
- }
-
- /* TODO: `bins` could be strided, needs change to check_array_monotonic */
- arr_bins = (PyArrayObject *)PyArray_FROMANY(obj_bins, NPY_DOUBLE, 1, 1,
- NPY_ARRAY_CARRAY_RO);
- if (arr_bins == NULL) {
- goto fail;
- }
-
- len_bins = PyArray_SIZE(arr_bins);
- if (len_bins == 0) {
- PyErr_SetString(PyExc_ValueError, "bins must have non-zero length");
- goto fail;
+ return NULL;
}
- NPY_BEGIN_THREADS_THRESHOLDED(len_bins)
- monotonic = check_array_monotonic((const double *)PyArray_DATA(arr_bins),
- len_bins);
+ len_x = PyArray_SIZE(arr_x);
+ NPY_BEGIN_THREADS_THRESHOLDED(len_x)
+ monotonic = check_array_monotonic(
+ (const double *)PyArray_DATA(arr_x), len_x);
NPY_END_THREADS
+ Py_DECREF(arr_x);
- if (monotonic == 0) {
- PyErr_SetString(PyExc_ValueError,
- "bins must be monotonically increasing or decreasing");
- goto fail;
- }
-
- /* PyArray_SearchSorted needs an increasing array */
- if (monotonic == - 1) {
- PyArrayObject *arr_tmp = NULL;
- npy_intp shape = PyArray_DIM(arr_bins, 0);
- npy_intp stride = -PyArray_STRIDE(arr_bins, 0);
- void *data = (void *)(PyArray_BYTES(arr_bins) - stride * (shape - 1));
-
- arr_tmp = (PyArrayObject *)PyArray_NewFromDescrAndBase(
- &PyArray_Type, PyArray_DescrFromType(NPY_DOUBLE),
- 1, &shape, &stride, data,
- PyArray_FLAGS(arr_bins), NULL, (PyObject *)arr_bins);
- Py_DECREF(arr_bins);
- if (!arr_tmp) {
- goto fail;
- }
- arr_bins = arr_tmp;
- }
-
- ret = PyArray_SearchSorted(arr_bins, (PyObject *)arr_x,
- right ? NPY_SEARCHLEFT : NPY_SEARCHRIGHT, NULL);
- if (!ret) {
- goto fail;
- }
-
- /* If bins is decreasing, ret has bins from end, not start */
- if (monotonic == -1) {
- npy_intp *ret_data =
- (npy_intp *)PyArray_DATA((PyArrayObject *)ret);
- npy_intp len_ret = PyArray_SIZE((PyArrayObject *)ret);
-
- NPY_BEGIN_THREADS_THRESHOLDED(len_ret)
- while (len_ret--) {
- *ret_data = len_bins - *ret_data;
- ret_data++;
- }
- NPY_END_THREADS
- }
-
- fail:
- Py_XDECREF(arr_x);
- Py_XDECREF(arr_bins);
- return ret;
+ return PyInt_FromLong(monotonic);
}
/*
diff --git a/numpy/core/src/multiarray/compiled_base.h b/numpy/core/src/multiarray/compiled_base.h
index 51508531c..082139910 100644
--- a/numpy/core/src/multiarray/compiled_base.h
+++ b/numpy/core/src/multiarray/compiled_base.h
@@ -7,7 +7,7 @@ arr_insert(PyObject *, PyObject *, PyObject *);
NPY_NO_EXPORT PyObject *
arr_bincount(PyObject *, PyObject *, PyObject *);
NPY_NO_EXPORT PyObject *
-arr_digitize(PyObject *, PyObject *, PyObject *kwds);
+arr__monotonicity(PyObject *, PyObject *, PyObject *kwds);
NPY_NO_EXPORT PyObject *
arr_interp(PyObject *, PyObject *, PyObject *);
NPY_NO_EXPORT PyObject *
diff --git a/numpy/core/src/multiarray/ctors.c b/numpy/core/src/multiarray/ctors.c
index 7367902cc..938850997 100644
--- a/numpy/core/src/multiarray/ctors.c
+++ b/numpy/core/src/multiarray/ctors.c
@@ -666,7 +666,6 @@ discover_dimensions(PyObject *obj, int *maxndim, npy_intp *d, int check_it,
int *out_is_object)
{
PyObject *e;
- int r;
npy_intp n, i;
Py_buffer buffer_view;
PyObject * seq;
@@ -846,46 +845,48 @@ discover_dimensions(PyObject *obj, int *maxndim, npy_intp *d, int check_it,
return 0;
}
else {
- npy_intp dtmp[NPY_MAXDIMS];
- int j, maxndim_m1 = *maxndim - 1;
- e = PySequence_Fast_GET_ITEM(seq, 0);
-
- r = discover_dimensions(e, &maxndim_m1, d + 1, check_it,
- stop_at_string, stop_at_tuple,
- out_is_object);
- if (r < 0) {
+ int all_elems_maxndim = *maxndim - 1;
+ npy_intp *all_elems_d = d + 1;
+ int all_dimensions_match = 1;
+
+ /* Get the dimensions of the first item as a baseline */
+ PyObject *first = PySequence_Fast_GET_ITEM(seq, 0);
+ if (discover_dimensions(
+ first, &all_elems_maxndim, all_elems_d, check_it,
+ stop_at_string, stop_at_tuple, out_is_object) < 0) {
Py_DECREF(seq);
- return r;
+ return -1;
}
- /* For the dimension truncation check below */
- *maxndim = maxndim_m1 + 1;
+ /* Compare the dimensions of all the remaining items */
for (i = 1; i < n; ++i) {
- e = PySequence_Fast_GET_ITEM(seq, i);
- /* Get the dimensions of the first item */
- r = discover_dimensions(e, &maxndim_m1, dtmp, check_it,
- stop_at_string, stop_at_tuple,
- out_is_object);
- if (r < 0) {
+ int j;
+ int elem_maxndim = *maxndim - 1;
+ npy_intp elem_d[NPY_MAXDIMS];
+
+ PyObject *elem = PySequence_Fast_GET_ITEM(seq, i);
+ if (discover_dimensions(
+ elem, &elem_maxndim, elem_d, check_it,
+ stop_at_string, stop_at_tuple, out_is_object) < 0) {
Py_DECREF(seq);
- return r;
+ return -1;
}
- /* Reduce max_ndim_m1 to just items which match */
- for (j = 0; j < maxndim_m1; ++j) {
- if (dtmp[j] != d[j+1]) {
- maxndim_m1 = j;
+ /* Find the number of left-dimensions which match, j */
+ for (j = 0; j < elem_maxndim && j < all_elems_maxndim; ++j) {
+ if (elem_d[j] != all_elems_d[j]) {
break;
}
}
+ if (j != elem_maxndim || j != all_elems_maxndim) {
+ all_dimensions_match = 0;
+ }
+ all_elems_maxndim = j;
}
- /*
- * If the dimensions are truncated, need to produce
- * an object array.
- */
- if (maxndim_m1 + 1 < *maxndim) {
+ *maxndim = all_elems_maxndim + 1;
+ if (!all_dimensions_match) {
+ /* typically results in an array containing variable-length lists */
*out_is_object = 1;
- *maxndim = maxndim_m1 + 1;
}
}
@@ -1704,9 +1705,9 @@ PyArray_GetArrayParamsFromObject(PyObject *op,
*out_ndim = NPY_MAXDIMS;
is_object = 0;
- if (discover_dimensions(op, out_ndim, out_dims, check_it,
- stop_at_string, stop_at_tuple,
- &is_object) < 0) {
+ if (discover_dimensions(
+ op, out_ndim, out_dims, check_it,
+ stop_at_string, stop_at_tuple, &is_object) < 0) {
Py_DECREF(*out_dtype);
if (PyErr_Occurred()) {
return -1;
diff --git a/numpy/core/src/multiarray/dragon4.c b/numpy/core/src/multiarray/dragon4.c
index c14653ac5..abbf05220 100644
--- a/numpy/core/src/multiarray/dragon4.c
+++ b/numpy/core/src/multiarray/dragon4.c
@@ -2698,7 +2698,7 @@ Dragon4_PrintFloat_Intel_extended128(
}
#endif /* HAVE_LDOUBLE_INTEL_EXTENDED_16_BYTES_LE */
-#if defined(HAVE_LDOUBLE_IEEE_QUAD_LE)
+#if defined(HAVE_LDOUBLE_IEEE_QUAD_LE) || defined(HAVE_LDOUBLE_IEEE_QUAD_BE)
/*
* IEEE binary128 floating-point format
*
@@ -2707,18 +2707,14 @@ Dragon4_PrintFloat_Intel_extended128(
* mantissa: 112 bits
*
* Currently binary128 format exists on only a few CPUs, such as on the POWER9
- * arch. Because of this, this code has not been tested. I am not sure if the
- * arch also supports uint128, and C does not seem to support int128 literals.
- * So we use uint64 to do manipulation. Unfortunately this means we are endian
- * dependent. Assume little-endian for now, can fix later once binary128
- * becomes more common.
+ * arch or aarch64. Because of this, this code has not been extensively tested.
+ * I am not sure if the arch also supports uint128, and C does not seem to
+ * support int128 literals. So we use uint64 to do manipulation.
*/
static npy_uint32
Dragon4_PrintFloat_IEEE_binary128(
- Dragon4_Scratch *scratch, npy_float128 *value, Dragon4_Options *opt)
+ Dragon4_Scratch *scratch, FloatVal128 val128, Dragon4_Options *opt)
{
- FloatUnion128 buf128;
-
char *buffer = scratch->repr;
npy_uint32 bufferSize = sizeof(scratch->repr);
BigInt *bigints = scratch->bigints;
@@ -2731,8 +2727,6 @@ Dragon4_PrintFloat_IEEE_binary128(
npy_bool hasUnequalMargins;
char signbit = '\0';
- buf128.floatingPoint = *value;
-
if (bufferSize == 0) {
return 0;
}
@@ -2742,11 +2736,10 @@ Dragon4_PrintFloat_IEEE_binary128(
return 0;
}
- /* Assumes little-endian !!! */
- mantissa_hi = buf128.integer.a & bitmask_u64(48);
- mantissa_lo = buf128.integer.b;
- floatExponent = (buf128.integer.a >> 48) & bitmask_u32(15);
- floatSign = buf128.integer.a >> 63;
+ mantissa_hi = val128.hi & bitmask_u64(48);
+ mantissa_lo = val128.lo;
+ floatExponent = (val128.hi >> 48) & bitmask_u32(15);
+ floatSign = val128.hi >> 63;
/* output the sign */
if (floatSign != 0) {
@@ -2810,8 +2803,45 @@ Dragon4_PrintFloat_IEEE_binary128(
return Format_floatbits(buffer, bufferSize, bigints, exponent,
signbit, mantissaBit, hasUnequalMargins, opt);
}
+
+#if defined(HAVE_LDOUBLE_IEEE_QUAD_LE)
+static npy_uint32
+Dragon4_PrintFloat_IEEE_binary128_le(
+ Dragon4_Scratch *scratch, npy_float128 *value, Dragon4_Options *opt)
+{
+ FloatVal128 val128;
+ FloatUnion128 buf128;
+
+ buf128.floatingPoint = *value;
+ val128.lo = buf128.integer.a;
+ val128.hi = buf128.integer.b;
+
+ return Dragon4_PrintFloat_IEEE_binary128(scratch, val128, opt);
+}
#endif /* HAVE_LDOUBLE_IEEE_QUAD_LE */
+#if defined(HAVE_LDOUBLE_IEEE_QUAD_BE)
+/*
+ * This function is untested, very few, if any, architectures implement
+ * big endian IEEE binary128 floating point.
+ */
+static npy_uint32
+Dragon4_PrintFloat_IEEE_binary128_be(
+ Dragon4_Scratch *scratch, npy_float128 *value, Dragon4_Options *opt)
+{
+ FloatVal128 val128;
+ FloatUnion128 buf128;
+
+ buf128.floatingPoint = *value;
+ val128.lo = buf128.integer.b;
+ val128.hi = buf128.integer.a;
+
+ return Dragon4_PrintFloat_IEEE_binary128(scratch, val128, opt);
+}
+#endif /* HAVE_LDOUBLE_IEEE_QUAD_BE */
+
+#endif /* HAVE_LDOUBLE_IEEE_QUAD_LE | HAVE_LDOUBLE_IEEE_BE*/
+
#if (defined(HAVE_LDOUBLE_IBM_DOUBLE_DOUBLE_LE) || \
defined(HAVE_LDOUBLE_IBM_DOUBLE_DOUBLE_BE))
/*
diff --git a/numpy/core/src/multiarray/mapping.c b/numpy/core/src/multiarray/mapping.c
index cdca1d606..f338226c2 100644
--- a/numpy/core/src/multiarray/mapping.c
+++ b/numpy/core/src/multiarray/mapping.c
@@ -1540,13 +1540,11 @@ _get_field_view(PyArrayObject *arr, PyObject *ind, PyArrayObject **view,
"cannot use field titles in multi-field index");
}
if (titlecmp != 0 || PyDict_SetItem(fields, title, tup) < 0) {
- Py_DECREF(title);
Py_DECREF(name);
Py_DECREF(fields);
Py_DECREF(names);
return 0;
}
- Py_DECREF(title);
}
/* disallow duplicate field indices */
if (PyDict_Contains(fields, name)) {
diff --git a/numpy/core/src/multiarray/multiarraymodule.c b/numpy/core/src/multiarray/multiarraymodule.c
index e6af5a81e..6e57f1d6d 100644
--- a/numpy/core/src/multiarray/multiarraymodule.c
+++ b/numpy/core/src/multiarray/multiarraymodule.c
@@ -4345,7 +4345,7 @@ static struct PyMethodDef array_module_methods[] = {
"indicated by mask."},
{"bincount", (PyCFunction)arr_bincount,
METH_VARARGS | METH_KEYWORDS, NULL},
- {"digitize", (PyCFunction)arr_digitize,
+ {"_monotonicity", (PyCFunction)arr__monotonicity,
METH_VARARGS | METH_KEYWORDS, NULL},
{"interp", (PyCFunction)arr_interp,
METH_VARARGS | METH_KEYWORDS, NULL},
diff --git a/numpy/core/src/multiarray/number.c b/numpy/core/src/multiarray/number.c
index 448d2d9c2..f71d39405 100644
--- a/numpy/core/src/multiarray/number.c
+++ b/numpy/core/src/multiarray/number.c
@@ -15,6 +15,7 @@
#include "temp_elide.h"
#include "binop_override.h"
+#include "ufunc_override.h"
/*************************************************************************
**************** Implement Number Protocol ****************************
@@ -550,6 +551,50 @@ array_power(PyArrayObject *a1, PyObject *o2, PyObject *modulo)
return value;
}
+static PyObject *
+array_positive(PyArrayObject *m1)
+{
+ /*
+ * For backwards compatibility, where + just implied a copy,
+ * we cannot just call n_ops.positive. Instead, we do the following
+ * 1. Try n_ops.positive
+ * 2. If we get an exception, check whether __array_ufunc__ is
+ * overridden; if so, we live in the future and we allow the
+ * TypeError to be passed on.
+ * 3. If not, give a deprecation warning and return a copy.
+ */
+ PyObject *value;
+ if (can_elide_temp_unary(m1)) {
+ value = PyArray_GenericInplaceUnaryFunction(m1, n_ops.positive);
+ }
+ else {
+ value = PyArray_GenericUnaryFunction(m1, n_ops.positive);
+ }
+ if (value == NULL) {
+ /*
+ * We first fetch the error, as it needs to be clear to check
+ * for the override. When the deprecation is removed,
+ * this whole stanza can be deleted.
+ */
+ PyObject *exc, *val, *tb;
+ PyErr_Fetch(&exc, &val, &tb);
+ if (has_non_default_array_ufunc((PyObject *)m1)) {
+ PyErr_Restore(exc, val, tb);
+ return NULL;
+ }
+ /* 2018-06-28, 1.16.0 */
+ if (DEPRECATE("Applying '+' to a non-numerical array is "
+ "ill-defined. Returning a copy, but in the future "
+ "this will error.") < 0) {
+ return NULL;
+ }
+ Py_XDECREF(exc);
+ Py_XDECREF(val);
+ Py_XDECREF(tb);
+ value = PyArray_Return((PyArrayObject *)PyArray_Copy(m1));
+ }
+ return value;
+}
static PyObject *
array_negative(PyArrayObject *m1)
@@ -927,12 +972,6 @@ array_hex(PyArrayObject *v)
#endif
static PyObject *
-_array_copy_nice(PyArrayObject *self)
-{
- return PyArray_Return((PyArrayObject *) PyArray_Copy(self));
-}
-
-static PyObject *
array_index(PyArrayObject *v)
{
if (!PyArray_ISINTEGER(v) || PyArray_NDIM(v) != 0) {
@@ -955,7 +994,7 @@ NPY_NO_EXPORT PyNumberMethods array_as_number = {
(binaryfunc)array_divmod, /*nb_divmod*/
(ternaryfunc)array_power, /*nb_power*/
(unaryfunc)array_negative, /*nb_neg*/
- (unaryfunc)_array_copy_nice, /*nb_pos*/
+ (unaryfunc)array_positive, /*nb_pos*/
(unaryfunc)array_absolute, /*(unaryfunc)array_abs,*/
(inquiry)_array_nonzero, /*nb_nonzero*/
(unaryfunc)array_invert, /*nb_invert*/
diff --git a/numpy/core/src/private/npy_config.h b/numpy/core/src/private/npy_config.h
index 107b3cb5b..8143e7719 100644
--- a/numpy/core/src/private/npy_config.h
+++ b/numpy/core/src/private/npy_config.h
@@ -15,7 +15,8 @@
* amd64 is not harmed much by the bloat as the system provides 16 byte
* alignment by default.
*/
-#if (defined NPY_CPU_X86 || defined _WIN32)
+#if (defined NPY_CPU_X86 || defined _WIN32 || defined NPY_CPU_ARMEL_AARCH32 ||\
+ defined NPY_CPU_ARMEB_AARCH32)
#define NPY_MAX_COPY_ALIGNMENT 8
#else
#define NPY_MAX_COPY_ALIGNMENT 16
diff --git a/numpy/core/src/private/ufunc_override.c b/numpy/core/src/private/ufunc_override.c
index 69c3cc56c..33b54c665 100644
--- a/numpy/core/src/private/ufunc_override.c
+++ b/numpy/core/src/private/ufunc_override.c
@@ -22,7 +22,7 @@
* nor to the default __array_ufunc__ method, so instead we import locally.
* TODO: Can this really not be done more smartly?
*/
-static PyObject *
+NPY_NO_EXPORT PyObject *
get_non_default_array_ufunc(PyObject *obj)
{
static PyObject *ndarray = NULL;
@@ -61,7 +61,7 @@ get_non_default_array_ufunc(PyObject *obj)
* Returns 1 if this is the case, 0 if not.
*/
-static int
+NPY_NO_EXPORT int
has_non_default_array_ufunc(PyObject * obj)
{
PyObject *method = get_non_default_array_ufunc(obj);
diff --git a/numpy/core/src/private/ufunc_override.h b/numpy/core/src/private/ufunc_override.h
index fd1ee2135..5b269d270 100644
--- a/numpy/core/src/private/ufunc_override.h
+++ b/numpy/core/src/private/ufunc_override.h
@@ -4,6 +4,34 @@
#include "npy_config.h"
/*
+ * Check whether an object has __array_ufunc__ defined on its class and it
+ * is not the default, i.e., the object is not an ndarray, and its
+ * __array_ufunc__ is not the same as that of ndarray.
+ *
+ * Returns a new reference, the value of type(obj).__array_ufunc__
+ *
+ * If the __array_ufunc__ matches that of ndarray, or does not exist, return
+ * NULL.
+ *
+ * Note that since this module is used with both multiarray and umath, we do
+ * not have access to PyArray_Type and therewith neither to PyArray_CheckExact
+ * nor to the default __array_ufunc__ method, so instead we import locally.
+ * TODO: Can this really not be done more smartly?
+ */
+NPY_NO_EXPORT PyObject *
+get_non_default_array_ufunc(PyObject *obj);
+
+/*
+ * Check whether an object has __array_ufunc__ defined on its class and it
+ * is not the default, i.e., the object is not an ndarray, and its
+ * __array_ufunc__ is not the same as that of ndarray.
+ *
+ * Returns 1 if this is the case, 0 if not.
+ */
+NPY_NO_EXPORT int
+has_non_default_array_ufunc(PyObject * obj);
+
+/*
* Check whether a set of input and output args have a non-default
* `__array_ufunc__` method. Returns the number of overrides, setting
* corresponding objects in PyObject array with_override (if not NULL).
diff --git a/numpy/core/src/umath/ufunc_object.c b/numpy/core/src/umath/ufunc_object.c
index a3fd72839..20c448d8b 100644
--- a/numpy/core/src/umath/ufunc_object.c
+++ b/numpy/core/src/umath/ufunc_object.c
@@ -71,6 +71,13 @@ typedef struct {
provided, then this is NULL. */
} ufunc_full_args;
+/* C representation of the context argument to __array_wrap__ */
+typedef struct {
+ PyUFuncObject *ufunc;
+ ufunc_full_args args;
+ int out_i;
+} _ufunc_context;
+
/* Get the arg tuple to pass in the context argument to __array_wrap__ and
* __array_prepare__.
*
@@ -303,6 +310,141 @@ _find_array_prepare(ufunc_full_args args,
}
+/*
+ * This function analyzes the input arguments
+ * and determines an appropriate __array_wrap__ function to call
+ * for the outputs.
+ *
+ * If an output argument is provided, then it is wrapped
+ * with its own __array_wrap__ not with the one determined by
+ * the input arguments.
+ *
+ * if the provided output argument is already an array,
+ * the wrapping function is None (which means no wrapping will
+ * be done --- not even PyArray_Return).
+ *
+ * A NULL is placed in output_wrap for outputs that
+ * should just have PyArray_Return called.
+ */
+static void
+_find_array_wrap(ufunc_full_args args, PyObject *kwds,
+ PyObject **output_wrap, int nin, int nout)
+{
+ int i;
+ PyObject *obj;
+ PyObject *wrap = NULL;
+
+ /*
+ * If a 'subok' parameter is passed and isn't True, don't wrap but put None
+ * into slots with out arguments which means return the out argument
+ */
+ if (kwds != NULL && (obj = PyDict_GetItem(kwds,
+ npy_um_str_subok)) != NULL) {
+ if (obj != Py_True) {
+ /* skip search for wrap members */
+ goto handle_out;
+ }
+ }
+
+ /*
+ * Determine the wrapping function given by the input arrays
+ * (could be NULL).
+ */
+ wrap = _find_array_method(args.in, npy_um_str_array_wrap);
+
+ /*
+ * For all the output arrays decide what to do.
+ *
+ * 1) Use the wrap function determined from the input arrays
+ * This is the default if the output array is not
+ * passed in.
+ *
+ * 2) Use the __array_wrap__ method of the output object
+ * passed in. -- this is special cased for
+ * exact ndarray so that no PyArray_Return is
+ * done in that case.
+ */
+handle_out:
+ if (args.out == NULL) {
+ for (i = 0; i < nout; i++) {
+ Py_XINCREF(wrap);
+ output_wrap[i] = wrap;
+ }
+ }
+ else {
+ for (i = 0; i < nout; i++) {
+ output_wrap[i] = _get_output_array_method(
+ PyTuple_GET_ITEM(args.out, i), npy_um_str_array_wrap, wrap);
+ }
+ }
+
+ Py_XDECREF(wrap);
+ return;
+}
+
+
+/*
+ * Apply the __array_wrap__ function with the given array and content.
+ *
+ * Interprets wrap=None and wrap=NULL as intended by _find_array_wrap
+ *
+ * Steals a reference to obj and wrap.
+ * Pass context=NULL to indicate there is no context.
+ */
+static PyObject *
+_apply_array_wrap(
+ PyObject *wrap, PyArrayObject *obj, _ufunc_context const *context) {
+ if (wrap == NULL) {
+ /* default behavior */
+ return PyArray_Return(obj);
+ }
+ else if (wrap == Py_None) {
+ Py_DECREF(wrap);
+ return (PyObject *)obj;
+ }
+ else {
+ PyObject *res;
+ PyObject *py_context = NULL;
+
+ /* Convert the context object to a tuple, if present */
+ if (context == NULL) {
+ py_context = Py_None;
+ Py_INCREF(py_context);
+ }
+ else {
+ PyObject *args_tup;
+ /* Call the method with appropriate context */
+ args_tup = _get_wrap_prepare_args(context->args);
+ if (args_tup == NULL) {
+ goto fail;
+ }
+ py_context = Py_BuildValue("OOi",
+ context->ufunc, args_tup, context->out_i);
+ Py_DECREF(args_tup);
+ if (py_context == NULL) {
+ goto fail;
+ }
+ }
+ /* try __array_wrap__(obj, context) */
+ res = PyObject_CallFunctionObjArgs(wrap, obj, py_context, NULL);
+ Py_DECREF(py_context);
+
+ /* try __array_wrap__(obj) if the context argument is not accepted */
+ if (res == NULL && PyErr_ExceptionMatches(PyExc_TypeError)) {
+ PyErr_Clear();
+ res = PyObject_CallFunctionObjArgs(wrap, obj, NULL);
+ }
+ Py_DECREF(wrap);
+ Py_DECREF(obj);
+ return res;
+ fail:
+ Py_DECREF(wrap);
+ Py_DECREF(obj);
+ return NULL;
+ }
+}
+
+
/*UFUNC_API
*
* On return, if errobj is populated with a non-NULL value, the caller
@@ -4019,7 +4161,7 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc, PyObject *args,
int axes[NPY_MAXDIMS];
PyObject *axes_in = NULL;
PyArrayObject *mp = NULL, *ret = NULL;
- PyObject *op, *res = NULL;
+ PyObject *op;
PyObject *obj_ind, *context;
PyArrayObject *indices = NULL;
PyArray_Descr *otype = NULL;
@@ -4265,25 +4407,31 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc, PyObject *args,
return NULL;
}
- /* If an output parameter was provided, don't wrap it */
- if (out != NULL) {
- return (PyObject *)ret;
- }
-
- if (Py_TYPE(op) != Py_TYPE(ret)) {
- res = PyObject_CallMethod(op, "__array_wrap__", "O", ret);
- if (res == NULL) {
- PyErr_Clear();
- }
- else if (res == Py_None) {
- Py_DECREF(res);
+ /* Wrap and return the output */
+ {
+ /* Find __array_wrap__ - note that these rules are different to the
+ * normal ufunc path
+ */
+ PyObject *wrap;
+ if (out != NULL) {
+ wrap = Py_None;
+ Py_INCREF(wrap);
+ }
+ else if (Py_TYPE(op) != Py_TYPE(ret)) {
+ wrap = PyObject_GetAttr(op, npy_um_str_array_wrap);
+ if (wrap == NULL) {
+ PyErr_Clear();
+ }
+ else if (!PyCallable_Check(wrap)) {
+ Py_DECREF(wrap);
+ wrap = NULL;
+ }
}
else {
- Py_DECREF(ret);
- return res;
+ wrap = NULL;
}
+ return _apply_array_wrap(wrap, ret, NULL);
}
- return PyArray_Return(ret);
fail:
Py_XDECREF(otype);
@@ -4291,78 +4439,6 @@ fail:
return NULL;
}
-/*
- * This function analyzes the input arguments
- * and determines an appropriate __array_wrap__ function to call
- * for the outputs.
- *
- * If an output argument is provided, then it is wrapped
- * with its own __array_wrap__ not with the one determined by
- * the input arguments.
- *
- * if the provided output argument is already an array,
- * the wrapping function is None (which means no wrapping will
- * be done --- not even PyArray_Return).
- *
- * A NULL is placed in output_wrap for outputs that
- * should just have PyArray_Return called.
- */
-static void
-_find_array_wrap(ufunc_full_args args, PyObject *kwds,
- PyObject **output_wrap, int nin, int nout)
-{
- int i;
- PyObject *obj;
- PyObject *wrap = NULL;
-
- /*
- * If a 'subok' parameter is passed and isn't True, don't wrap but put None
- * into slots with out arguments which means return the out argument
- */
- if (kwds != NULL && (obj = PyDict_GetItem(kwds,
- npy_um_str_subok)) != NULL) {
- if (obj != Py_True) {
- /* skip search for wrap members */
- goto handle_out;
- }
- }
-
- /*
- * Determine the wrapping function given by the input arrays
- * (could be NULL).
- */
- wrap = _find_array_method(args.in, npy_um_str_array_wrap);
-
- /*
- * For all the output arrays decide what to do.
- *
- * 1) Use the wrap function determined from the input arrays
- * This is the default if the output array is not
- * passed in.
- *
- * 2) Use the __array_wrap__ method of the output object
- * passed in. -- this is special cased for
- * exact ndarray so that no PyArray_Return is
- * done in that case.
- */
-handle_out:
- if (args.out == NULL) {
- for (i = 0; i < nout; i++) {
- Py_XINCREF(wrap);
- output_wrap[i] = wrap;
- }
- }
- else {
- for (i = 0; i < nout; i++) {
- output_wrap[i] = _get_output_array_method(
- PyTuple_GET_ITEM(args.out, i), npy_um_str_array_wrap, wrap);
- }
- }
-
- Py_XDECREF(wrap);
- return;
-}
-
static PyObject *
ufunc_generic_call(PyUFuncObject *ufunc, PyObject *args, PyObject *kwds)
@@ -4418,42 +4494,20 @@ ufunc_generic_call(PyUFuncObject *ufunc, PyObject *args, PyObject *kwds)
/* wrap outputs */
for (i = 0; i < ufunc->nout; i++) {
int j = ufunc->nin+i;
- PyObject *wrap = wraparr[i];
+ _ufunc_context context;
+ PyObject *wrapped;
- if (wrap == NULL) {
- /* default behavior */
- retobj[i] = PyArray_Return(mps[j]);
- }
- else if (wrap == Py_None) {
- Py_DECREF(wrap);
- retobj[i] = (PyObject *)mps[j];
- }
- else {
- PyObject *res;
- PyObject *args_tup;
+ context.ufunc = ufunc;
+ context.args = full_args;
+ context.out_i = i;
- /* Call the method with appropriate context */
- args_tup = _get_wrap_prepare_args(full_args);
- if (args_tup == NULL) {
- goto fail;
- }
- res = PyObject_CallFunction(
- wrap, "O(OOi)", mps[j], ufunc, args_tup, i);
- Py_DECREF(args_tup);
-
- /* Handle __array_wrap__ that does not accept a context argument */
- if (res == NULL && PyErr_ExceptionMatches(PyExc_TypeError)) {
- PyErr_Clear();
- res = PyObject_CallFunctionObjArgs(wrap, mps[j], NULL);
- }
- Py_DECREF(wrap);
- Py_DECREF(mps[j]);
- mps[j] = NULL; /* Prevent fail double-freeing this */
- if (res == NULL) {
- goto fail;
- }
- retobj[i] = res;
+ wrapped = _apply_array_wrap(wraparr[i], mps[j], &context);
+ mps[j] = NULL; /* Prevent fail double-freeing this */
+ if (wrapped == NULL) {
+ goto fail;
}
+
+ retobj[i] = wrapped;
}
Py_XDECREF(full_args.in);
diff --git a/numpy/core/tests/test_deprecations.py b/numpy/core/tests/test_deprecations.py
index 8eb258666..5d66d963f 100644
--- a/numpy/core/tests/test_deprecations.py
+++ b/numpy/core/tests/test_deprecations.py
@@ -504,3 +504,17 @@ class TestGeneratorSum(_DeprecationTestCase):
# 2018-02-25, 1.15.0
def test_generator_sum(self):
self.assert_deprecated(np.sum, args=((i for i in range(5)),))
+
+
+class TestSctypeNA(_VisibleDeprecationTestCase):
+ # 2018-06-24, 1.16
+ def test_sctypeNA(self):
+ self.assert_deprecated(lambda: np.sctypeNA['?'])
+ self.assert_deprecated(lambda: np.typeNA['?'])
+ self.assert_deprecated(lambda: np.typeNA.get('?'))
+
+
+class TestPositiveOnNonNumerical(_DeprecationTestCase):
+ # 2018-06-28, 1.16.0
+ def test_positive_on_non_number(self):
+ self.assert_deprecated(operator.pos, args=(np.array('foo'),))
diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py
index e85a73154..1511f5b6b 100644
--- a/numpy/core/tests/test_multiarray.py
+++ b/numpy/core/tests/test_multiarray.py
@@ -688,6 +688,9 @@ class TestScalarIndexing(object):
class TestCreation(object):
+ """
+ Test the np.array constructor
+ """
def test_from_attribute(self):
class x(object):
def __array__(self, dtype=None):
@@ -903,6 +906,34 @@ class TestCreation(object):
assert_raises(ValueError, np.ndarray, buffer=buf, strides=(0,),
shape=(max_bytes//itemsize + 1,), dtype=dtype)
+ def test_jagged_ndim_object(self):
+ # Lists of mismatching depths are treated as object arrays
+ a = np.array([[1], 2, 3])
+ assert_equal(a.shape, (3,))
+ assert_equal(a.dtype, object)
+
+ a = np.array([1, [2], 3])
+ assert_equal(a.shape, (3,))
+ assert_equal(a.dtype, object)
+
+ a = np.array([1, 2, [3]])
+ assert_equal(a.shape, (3,))
+ assert_equal(a.dtype, object)
+
+ def test_jagged_shape_object(self):
+ # The jagged dimension of a list is turned into an object array
+ a = np.array([[1, 1], [2], [3]])
+ assert_equal(a.shape, (3,))
+ assert_equal(a.dtype, object)
+
+ a = np.array([[1], [2, 2], [3]])
+ assert_equal(a.shape, (3,))
+ assert_equal(a.dtype, object)
+
+ a = np.array([[1], [2], [3, 3]])
+ assert_equal(a.shape, (3,))
+ assert_equal(a.dtype, object)
+
class TestStructured(object):
def test_subarray_field_access(self):
@@ -3383,6 +3414,16 @@ class TestBinop(object):
assert_equal(obj_arr ** -1, pow_for(-1, obj_arr))
assert_equal(obj_arr ** 2, pow_for(2, obj_arr))
+ def test_pos_array_ufunc_override(self):
+ class A(np.ndarray):
+ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
+ return getattr(ufunc, method)(*[i.view(np.ndarray) for
+ i in inputs], **kwargs)
+ tst = np.array('foo').view(A)
+ with assert_raises(TypeError):
+ +tst
+
+
class TestTemporaryElide(object):
# elision is only triggered on relatively large arrays
diff --git a/numpy/core/tests/test_regression.py b/numpy/core/tests/test_regression.py
index ba4413138..5f4410d54 100644
--- a/numpy/core/tests/test_regression.py
+++ b/numpy/core/tests/test_regression.py
@@ -46,9 +46,11 @@ class TestRegression(object):
assert_array_equal(a, b)
def test_typeNA(self):
- # Ticket #31
- assert_equal(np.typeNA[np.int64], 'Int64')
- assert_equal(np.typeNA[np.uint64], 'UInt64')
+ # Issue gh-515
+ with suppress_warnings() as sup:
+ sup.filter(np.VisibleDeprecationWarning)
+ assert_equal(np.typeNA[np.int64], 'Int64')
+ assert_equal(np.typeNA[np.uint64], 'UInt64')
def test_dtype_names(self):
# Ticket #35
@@ -2391,3 +2393,15 @@ class TestRegression(object):
squeezed = scvalue.squeeze(axis=axis)
assert_equal(squeezed, scvalue)
assert_equal(type(squeezed), type(scvalue))
+
+ def test_field_access_by_title(self):
+ # gh-11507
+ s = 'Some long field name'
+ if HAS_REFCOUNT:
+ base = sys.getrefcount(s)
+ t = np.dtype([((s, 'f1'), np.float64)])
+ data = np.zeros(10, t)
+ for i in range(10):
+ v = str(data[['f1']])
+ if HAS_REFCOUNT:
+ assert_(base <= sys.getrefcount(s))
diff --git a/numpy/core/tests/test_umath.py b/numpy/core/tests/test_umath.py
index 95107b538..f98367688 100644
--- a/numpy/core/tests/test_umath.py
+++ b/numpy/core/tests/test_umath.py
@@ -1568,13 +1568,14 @@ class TestSpecialMethods(object):
class A(object):
def __array__(self):
- return np.zeros(1)
+ return np.zeros(2)
def __array_wrap__(self, arr, context):
raise RuntimeError
a = A()
assert_raises(RuntimeError, ncu.maximum, a, a)
+ assert_raises(RuntimeError, ncu.maximum.reduce, a)
def test_failing_out_wrap(self):