summaryrefslogtreecommitdiff
path: root/numpy
diff options
context:
space:
mode:
Diffstat (limited to 'numpy')
-rw-r--r--numpy/core/numeric.py14
-rw-r--r--numpy/core/src/multiarray/nditer_api.c55
-rw-r--r--numpy/core/src/multiarray/nditer_constr.c168
-rw-r--r--numpy/core/src/multiarray/nditer_impl.h2
-rw-r--r--numpy/core/src/multiarray/nditer_pywrap.c23
-rw-r--r--numpy/core/src/private/lowlevel_strided_loops.h1
-rw-r--r--numpy/core/src/scalarmathmodule.c.src17
-rw-r--r--numpy/core/src/umath/reduction.c2
-rw-r--r--numpy/core/src/umath/ufunc_object.c16
-rw-r--r--numpy/core/tests/test_einsum.py1
-rw-r--r--numpy/core/tests/test_nditer.py105
-rw-r--r--numpy/core/tests/test_numeric.py22
-rw-r--r--numpy/core/tests/test_scalarmath.py18
-rw-r--r--numpy/ctypeslib.py17
-rw-r--r--numpy/distutils/tests/test_exec_command.py4
-rw-r--r--numpy/fft/fftpack_litemodule.c12
-rw-r--r--numpy/lib/index_tricks.py25
17 files changed, 344 insertions, 158 deletions
diff --git a/numpy/core/numeric.py b/numpy/core/numeric.py
index 5c5d1a0da..25f977254 100644
--- a/numpy/core/numeric.py
+++ b/numpy/core/numeric.py
@@ -1153,15 +1153,19 @@ def roll(a, shift, axis=None):
n = a.size
reshape = True
else:
- n = a.shape[axis]
+ try:
+ n = a.shape[axis]
+ except IndexError:
+ raise ValueError('axis must be >= 0 and < %d' % a.ndim)
reshape = False
+ if n == 0:
+ return a
shift %= n
- indexes = concatenate((arange(n-shift,n),arange(n-shift)))
+ indexes = concatenate((arange(n - shift, n), arange(n - shift)))
res = a.take(indexes, axis)
if reshape:
- return res.reshape(a.shape)
- else:
- return res
+ res = res.reshape(a.shape)
+ return res
def rollaxis(a, axis, start=0):
"""
diff --git a/numpy/core/src/multiarray/nditer_api.c b/numpy/core/src/multiarray/nditer_api.c
index 09e572f10..40043648d 100644
--- a/numpy/core/src/multiarray/nditer_api.c
+++ b/numpy/core/src/multiarray/nditer_api.c
@@ -134,12 +134,10 @@ NpyIter_RemoveAxis(NpyIter *iter, int axis)
axisdata = NIT_INDEX_AXISDATA(axisdata_del, 1);
memmove(axisdata_del, axisdata, (ndim-1-xdim)*sizeof_axisdata);
- /* If there is more than one dimension, shrink the iterator */
- if (ndim > 1) {
- NIT_NDIM(iter) = ndim-1;
- }
- /* Otherwise convert it to a singleton dimension */
- else {
+ /* Shrink the iterator */
+ NIT_NDIM(iter) = ndim - 1;
+ /* If it is now 0-d fill the singleton dimension */
+ if (ndim == 1) {
npy_intp *strides = NAD_STRIDES(axisdata_del);
NAD_SHAPE(axisdata_del) = 1;
for (iop = 0; iop < nop; ++iop) {
@@ -642,6 +640,9 @@ NpyIter_GetIterIndex(NpyIter *iter)
npy_intp sizeof_axisdata;
iterindex = 0;
+ if (ndim == 0) {
+ return 0;
+ }
sizeof_axisdata = NIT_AXISDATA_SIZEOF(itflags, ndim, nop);
axisdata = NIT_INDEX_AXISDATA(NIT_AXISDATA(iter), ndim-1);
@@ -1750,6 +1751,8 @@ npyiter_goto_iterindex(NpyIter *iter, npy_intp iterindex)
NIT_ITERINDEX(iter) = iterindex;
+ ndim = ndim ? ndim : 1;
+
if (iterindex == 0) {
dataptr = NIT_RESETDATAPTR(iter);
@@ -2062,8 +2065,9 @@ npyiter_copy_to_buffers(NpyIter *iter, char **prev_dataptrs)
/* If last time around, the reduce loop structure was full, we reuse it */
if (reuse_reduce_loops) {
- npy_intp full_transfersize;
+ npy_intp full_transfersize, prev_reduce_outersize;
+ prev_reduce_outersize = NBF_REDUCE_OUTERSIZE(bufferdata);
reduce_outerstrides = NBF_REDUCE_OUTERSTRIDES(bufferdata);
reduce_outerptrs = NBF_REDUCE_OUTERPTRS(bufferdata);
reduce_outerdim = NBF_REDUCE_OUTERDIM(bufferdata);
@@ -2086,6 +2090,13 @@ npyiter_copy_to_buffers(NpyIter *iter, char **prev_dataptrs)
else {
transfersize = full_transfersize;
}
+ if (prev_reduce_outersize < NBF_REDUCE_OUTERSIZE(bufferdata)) {
+ /*
+ * If the previous time around less data was copied it may not
+ * be safe to reuse the buffers even if the pointers match.
+ */
+ reuse_reduce_loops = 0;
+ }
NBF_BUFITEREND(bufferdata) = iterindex + reduce_innersize;
NPY_IT_DBG_PRINT3("Reused reduce transfersize: %d innersize: %d "
@@ -2184,6 +2195,11 @@ npyiter_copy_to_buffers(NpyIter *iter, char **prev_dataptrs)
break;
/* Just a copy */
case 0:
+ /* Do not reuse buffer if it did not exist */
+ if (!(op_itflags[iop] & NPY_OP_ITFLAG_USINGBUFFER) &&
+ (prev_dataptrs != NULL)) {
+ prev_dataptrs[iop] = NULL;
+ }
/*
* No copyswap or cast was requested, so all we're
* doing is copying the data to fill the buffer and
@@ -2227,6 +2243,11 @@ npyiter_copy_to_buffers(NpyIter *iter, char **prev_dataptrs)
break;
/* Just a copy, but with a reduction */
case NPY_OP_ITFLAG_REDUCE:
+ /* Do not reuse buffer if it did not exist */
+ if (!(op_itflags[iop] & NPY_OP_ITFLAG_USINGBUFFER) &&
+ (prev_dataptrs != NULL)) {
+ prev_dataptrs[iop] = NULL;
+ }
if (ad_strides[iop] == 0) {
strides[iop] = 0;
/* It's all in one stride in the inner loop dimension */
@@ -2615,6 +2636,7 @@ npyiter_checkreducesize(NpyIter *iter, npy_intp count,
*/
if (count <= reducespace) {
*reduce_innersize = count;
+ NIT_ITFLAGS(iter) |= NPY_ITFLAG_REUSE_REDUCE_LOOPS;
return count;
}
else if (nonzerocoord) {
@@ -2622,6 +2644,8 @@ npyiter_checkreducesize(NpyIter *iter, npy_intp count,
count = reducespace;
}
*reduce_innersize = count;
+ /* NOTE: This is similar to the (coord != 0) case below. */
+ NIT_ITFLAGS(iter) &= ~NPY_ITFLAG_REUSE_REDUCE_LOOPS;
return count;
}
else {
@@ -2661,8 +2685,20 @@ npyiter_checkreducesize(NpyIter *iter, npy_intp count,
return count;
}
- /* In this case, we can reuse the reduce loops */
- NIT_ITFLAGS(iter) |= NPY_ITFLAG_REUSE_REDUCE_LOOPS;
+ coord = NAD_INDEX(axisdata);
+ if (coord != 0) {
+ /*
+ * In this case, it is only safe to reuse the buffer if the amount
+ * of data copied is not more then the current axes, as is the
+ * case when reuse_reduce_loops was active already.
+ * It should be in principle OK when the idim loop returns immidiatly.
+ */
+ NIT_ITFLAGS(iter) &= ~NPY_ITFLAG_REUSE_REDUCE_LOOPS;
+ }
+ else {
+ /* In this case, we can reuse the reduce loops */
+ NIT_ITFLAGS(iter) |= NPY_ITFLAG_REUSE_REDUCE_LOOPS;
+ }
*reduce_innersize = reducespace;
count /= reducespace;
@@ -2687,7 +2723,6 @@ npyiter_checkreducesize(NpyIter *iter, npy_intp count,
"the outer loop? %d\n", iop, (int)stride0op[iop]);
}
shape = NAD_SHAPE(axisdata);
- coord = NAD_INDEX(axisdata);
reducespace += (shape-coord-1) * factor;
factor *= shape;
NIT_ADVANCE_AXISDATA(axisdata, 1);
diff --git a/numpy/core/src/multiarray/nditer_constr.c b/numpy/core/src/multiarray/nditer_constr.c
index cfbaea321..a40cbc7bc 100644
--- a/numpy/core/src/multiarray/nditer_constr.c
+++ b/numpy/core/src/multiarray/nditer_constr.c
@@ -54,8 +54,7 @@ static int
npyiter_fill_axisdata(NpyIter *iter, npy_uint32 flags, npyiter_opitflags *op_itflags,
char **op_dataptr,
npy_uint32 *op_flags, int **op_axes,
- npy_intp *itershape,
- int output_scalars);
+ npy_intp *itershape);
static void
npyiter_replace_axisdata(NpyIter *iter, int iop,
PyArrayObject *op,
@@ -75,7 +74,7 @@ static PyArray_Descr *
npyiter_get_common_dtype(int nop, PyArrayObject **op,
npyiter_opitflags *op_itflags, PyArray_Descr **op_dtype,
PyArray_Descr **op_request_dtypes,
- int only_inputs, int output_scalars);
+ int only_inputs);
static PyArrayObject *
npyiter_new_temp_array(NpyIter *iter, PyTypeObject *subtype,
npy_uint32 flags, npyiter_opitflags *op_itflags,
@@ -86,7 +85,7 @@ npyiter_allocate_arrays(NpyIter *iter,
npy_uint32 flags,
PyArray_Descr **op_dtype, PyTypeObject *subtype,
npy_uint32 *op_flags, npyiter_opitflags *op_itflags,
- int **op_axes, int output_scalars);
+ int **op_axes);
static void
npyiter_get_priority_subtype(int nop, PyArrayObject **op,
npyiter_opitflags *op_itflags,
@@ -122,8 +121,7 @@ NpyIter_AdvancedNew(int nop, PyArrayObject **op_in, npy_uint32 flags,
npy_int8 *perm;
NpyIter_BufferData *bufferdata = NULL;
- int any_allocate = 0, any_missing_dtypes = 0,
- output_scalars = 0, need_subtype = 0;
+ int any_allocate = 0, any_missing_dtypes = 0, need_subtype = 0;
/* The subtype for automatically allocated outputs */
double subtype_priority = NPY_PRIORITY;
@@ -158,6 +156,22 @@ NpyIter_AdvancedNew(int nop, PyArrayObject **op_in, npy_uint32 flags,
return NULL;
}
+ /*
+ * Before 1.8, if `oa_ndim == 0`, this meant `op_axes != NULL` was an error.
+ * With 1.8, `oa_ndim == -1` takes this role, while op_axes in that case
+ * enforces a 0-d iterator. Using `oa_ndim == 0` with `op_axes == NULL`
+ * is thus deprecated with version 1.8.
+ */
+ if ((oa_ndim == 0) && (op_axes == NULL)) {
+ char* mesg = "using `oa_ndim == 0` when `op_axes` is NULL is "
+ "deprecated. Use `oa_ndim == -1` or the MultiNew "
+ "iterator for NumPy <1.8 compatibility";
+ if (DEPRECATE(mesg) < 0) {
+ return NULL;
+ }
+ oa_ndim = -1;
+ }
+
/* Error check 'oa_ndim' and 'op_axes', which must be used together */
if (!npyiter_check_op_axes(nop, oa_ndim, op_axes, itershape)) {
return NULL;
@@ -175,12 +189,6 @@ NpyIter_AdvancedNew(int nop, PyArrayObject **op_in, npy_uint32 flags,
/* Calculate how many dimensions the iterator should have */
ndim = npyiter_calculate_ndim(nop, op_in, oa_ndim);
- /* If 'ndim' is zero, any outputs should be scalars */
- if (ndim == 0) {
- output_scalars = 1;
- ndim = 1;
- }
-
NPY_IT_TIME_POINT(c_calculate_ndim);
/* Allocate memory for the iterator */
@@ -231,8 +239,7 @@ NpyIter_AdvancedNew(int nop, PyArrayObject **op_in, npy_uint32 flags,
/* Fill in the AXISDATA arrays and set the ITERSIZE field */
if (!npyiter_fill_axisdata(iter, flags, op_itflags, op_dataptr,
- op_flags, op_axes, itershape,
- output_scalars)) {
+ op_flags, op_axes, itershape)) {
NpyIter_Deallocate(iter);
return NULL;
}
@@ -338,8 +345,7 @@ NpyIter_AdvancedNew(int nop, PyArrayObject **op_in, npy_uint32 flags,
dtype = npyiter_get_common_dtype(nop, op,
op_itflags, op_dtype,
op_request_dtypes,
- only_inputs,
- output_scalars);
+ only_inputs);
if (dtype == NULL) {
NpyIter_Deallocate(iter);
return NULL;
@@ -389,7 +395,7 @@ NpyIter_AdvancedNew(int nop, PyArrayObject **op_in, npy_uint32 flags,
* done now using a memory layout matching the iterator.
*/
if (!npyiter_allocate_arrays(iter, flags, op_dtype, subtype, op_flags,
- op_itflags, op_axes, output_scalars)) {
+ op_itflags, op_axes)) {
NpyIter_Deallocate(iter);
return NULL;
}
@@ -504,7 +510,7 @@ NpyIter_MultiNew(int nop, PyArrayObject **op_in, npy_uint32 flags,
{
return NpyIter_AdvancedNew(nop, op_in, flags, order, casting,
op_flags, op_request_dtypes,
- 0, NULL, NULL, 0);
+ -1, NULL, NULL, 0);
}
/*NUMPY_API
@@ -521,7 +527,7 @@ NpyIter_New(PyArrayObject *op, npy_uint32 flags,
return NpyIter_AdvancedNew(1, &op, flags, order, casting,
&op_flags, &dtype,
- 0, NULL, NULL, 0);
+ -1, NULL, NULL, 0);
}
/*NUMPY_API
@@ -758,53 +764,60 @@ npyiter_check_op_axes(int nop, int oa_ndim, int **op_axes,
char axes_dupcheck[NPY_MAXDIMS];
int iop, idim;
- if (oa_ndim == 0 && (op_axes != NULL || itershape != NULL)) {
- PyErr_Format(PyExc_ValueError,
- "If 'op_axes' or 'itershape' is not NULL in the"
- "iterator constructor, 'oa_ndim' must be greater than zero");
- return 0;
- }
- else if (oa_ndim > 0) {
- if (oa_ndim > NPY_MAXDIMS) {
+ if (oa_ndim < 0) {
+ /*
+ * If `oa_ndim < 0`, `op_axes` and `itershape` are signalled to
+ * be unused and should be NULL. (Before NumPy 1.8 this was
+ * signalled by `oa_ndim == 0`.)
+ */
+ if (op_axes != NULL || itershape != NULL) {
PyErr_Format(PyExc_ValueError,
+ "If 'op_axes' or 'itershape' is not NULL in the iterator "
+ "constructor, 'oa_ndim' must be zero or greater");
+ return 0;
+ }
+ return 1;
+ }
+ if (oa_ndim > NPY_MAXDIMS) {
+ PyErr_Format(PyExc_ValueError,
"Cannot construct an iterator with more than %d dimensions "
"(%d were requested for op_axes)",
(int)NPY_MAXDIMS, oa_ndim);
- return 0;
- }
- else if (op_axes == NULL) {
- PyErr_Format(PyExc_ValueError,
- "If 'oa_ndim' is greater than zero in the iterator "
- "constructor, then op_axes cannot be NULL");
- return 0;
- }
+ return 0;
+ }
+ if (op_axes == NULL) {
+ PyErr_Format(PyExc_ValueError,
+ "If 'oa_ndim' is zero or greater in the iterator "
+ "constructor, then op_axes cannot be NULL");
+ return 0;
+ }
- /* Check that there are no duplicates in op_axes */
- for (iop = 0; iop < nop; ++iop) {
- int *axes = op_axes[iop];
- if (axes != NULL) {
- memset(axes_dupcheck, 0, NPY_MAXDIMS);
- for (idim = 0; idim < oa_ndim; ++idim) {
- npy_intp i = axes[idim];
- if (i >= 0) {
- if (i >= NPY_MAXDIMS) {
- PyErr_Format(PyExc_ValueError,
- "The 'op_axes' provided to the iterator "
- "constructor for operand %d "
- "contained invalid "
- "values %d", (int)iop, (int)i);
- return 0;
- } else if(axes_dupcheck[i] == 1) {
- PyErr_Format(PyExc_ValueError,
- "The 'op_axes' provided to the iterator "
- "constructor for operand %d "
- "contained duplicate "
- "value %d", (int)iop, (int)i);
- return 0;
- }
- else {
- axes_dupcheck[i] = 1;
- }
+ /* Check that there are no duplicates in op_axes */
+ for (iop = 0; iop < nop; ++iop) {
+ int *axes = op_axes[iop];
+ if (axes != NULL) {
+ memset(axes_dupcheck, 0, NPY_MAXDIMS);
+ for (idim = 0; idim < oa_ndim; ++idim) {
+ npy_intp i = axes[idim];
+ if (i >= 0) {
+ if (i >= NPY_MAXDIMS) {
+ PyErr_Format(PyExc_ValueError,
+ "The 'op_axes' provided to the iterator "
+ "constructor for operand %d "
+ "contained invalid "
+ "values %d", (int)iop, (int)i);
+ return 0;
+ }
+ else if (axes_dupcheck[i] == 1) {
+ PyErr_Format(PyExc_ValueError,
+ "The 'op_axes' provided to the iterator "
+ "constructor for operand %d "
+ "contained duplicate "
+ "value %d", (int)iop, (int)i);
+ return 0;
+ }
+ else {
+ axes_dupcheck[i] = 1;
}
}
}
@@ -819,7 +832,7 @@ npyiter_calculate_ndim(int nop, PyArrayObject **op_in,
int oa_ndim)
{
/* If 'op_axes' is being used, force 'ndim' */
- if (oa_ndim > 0 ) {
+ if (oa_ndim >= 0 ) {
return oa_ndim;
}
/* Otherwise it's the maximum 'ndim' from the operands */
@@ -1439,8 +1452,7 @@ static int
npyiter_fill_axisdata(NpyIter *iter, npy_uint32 flags, npyiter_opitflags *op_itflags,
char **op_dataptr,
npy_uint32 *op_flags, int **op_axes,
- npy_intp *itershape,
- int output_scalars)
+ npy_intp *itershape)
{
npy_uint32 itflags = NIT_ITFLAGS(iter);
int idim, ndim = NIT_NDIM(iter);
@@ -1540,6 +1552,13 @@ npyiter_fill_axisdata(NpyIter *iter, npy_uint32 flags, npyiter_opitflags *op_itf
axisdata = NIT_AXISDATA(iter);
sizeof_axisdata = NIT_AXISDATA_SIZEOF(itflags, ndim, nop);
+ if (ndim == 0) {
+ /* Need to fill the first axisdata, even if the iterator is 0-d */
+ NAD_SHAPE(axisdata) = 1;
+ NAD_INDEX(axisdata) = 0;
+ memcpy(NAD_PTRS(axisdata), op_dataptr, NPY_SIZEOF_INTP*nop);
+ }
+
/* Now process the operands, filling in the axisdata */
for (idim = 0; idim < ndim; ++idim) {
npy_intp bshape = broadcast_shape[ndim-idim-1];
@@ -1560,7 +1579,7 @@ npyiter_fill_axisdata(NpyIter *iter, npy_uint32 flags, npyiter_opitflags *op_itf
ondim = PyArray_NDIM(op_cur);
if (bshape == 1) {
strides[iop] = 0;
- if (idim >= ondim && !output_scalars &&
+ if (idim >= ondim &&
(op_flags[iop] & NPY_ITER_NO_BROADCAST)) {
goto operand_different_than_broadcast;
}
@@ -1681,8 +1700,8 @@ npyiter_fill_axisdata(NpyIter *iter, npy_uint32 flags, npyiter_opitflags *op_itf
}
/* Now fill in the ITERSIZE member */
- NIT_ITERSIZE(iter) = broadcast_shape[0];
- for (idim = 1; idim < ndim; ++idim) {
+ NIT_ITERSIZE(iter) = 1;
+ for (idim = 0; idim < ndim; ++idim) {
NIT_ITERSIZE(iter) *= broadcast_shape[idim];
}
/* The range defaults to everything */
@@ -2003,7 +2022,10 @@ npyiter_replace_axisdata(NpyIter *iter, int iop,
NIT_RESETDATAPTR(iter)[iop] = op_dataptr;
NIT_BASEOFFSETS(iter)[iop] = baseoffset;
axisdata = axisdata0;
- for (idim = 0; idim < ndim; ++idim, NIT_ADVANCE_AXISDATA(axisdata, 1)) {
+ /* Fill at least one axisdata, for the 0-d case */
+ NAD_PTRS(axisdata)[iop] = op_dataptr;
+ NIT_ADVANCE_AXISDATA(axisdata, 1);
+ for (idim = 1; idim < ndim; ++idim, NIT_ADVANCE_AXISDATA(axisdata, 1)) {
NAD_PTRS(axisdata)[iop] = op_dataptr;
}
}
@@ -2029,7 +2051,7 @@ npyiter_compute_index_strides(NpyIter *iter, npy_uint32 flags)
/*
* If there is only one element being iterated, we just have
* to touch the first AXISDATA because nothing will ever be
- * incremented.
+ * incremented. This also initializes the data for the 0-d case.
*/
if (NIT_ITERSIZE(iter) == 1) {
if (itflags & NPY_ITFLAG_HASINDEX) {
@@ -2399,7 +2421,7 @@ static PyArray_Descr *
npyiter_get_common_dtype(int nop, PyArrayObject **op,
npyiter_opitflags *op_itflags, PyArray_Descr **op_dtype,
PyArray_Descr **op_request_dtypes,
- int only_inputs, int output_scalars)
+ int only_inputs)
{
int iop;
npy_intp narrs = 0, ndtypes = 0;
@@ -2698,7 +2720,7 @@ npyiter_allocate_arrays(NpyIter *iter,
npy_uint32 flags,
PyArray_Descr **op_dtype, PyTypeObject *subtype,
npy_uint32 *op_flags, npyiter_opitflags *op_itflags,
- int **op_axes, int output_scalars)
+ int **op_axes)
{
npy_uint32 itflags = NIT_ITFLAGS(iter);
int idim, ndim = NIT_NDIM(iter);
@@ -2729,7 +2751,7 @@ npyiter_allocate_arrays(NpyIter *iter,
if (op[iop] == NULL) {
PyArrayObject *out;
PyTypeObject *op_subtype;
- int ondim = output_scalars ? 0 : ndim;
+ int ondim = ndim;
/* Check whether the subtype was disabled */
op_subtype = (op_flags[iop] & NPY_ITER_NO_SUBTYPE) ?
@@ -2902,7 +2924,7 @@ npyiter_allocate_arrays(NpyIter *iter,
if ((itflags & NPY_ITFLAG_BUFFER) &&
!(op_itflags[iop] & NPY_OP_ITFLAG_CAST)) {
NpyIter_AxisData *axisdata = NIT_AXISDATA(iter);
- if (ndim == 1) {
+ if (ndim <= 1) {
op_itflags[iop] |= NPY_OP_ITFLAG_BUFNEVER;
NBF_STRIDES(bufferdata)[iop] = NAD_STRIDES(axisdata)[iop];
}
diff --git a/numpy/core/src/multiarray/nditer_impl.h b/numpy/core/src/multiarray/nditer_impl.h
index 1251baa6e..ae24f46e6 100644
--- a/numpy/core/src/multiarray/nditer_impl.h
+++ b/numpy/core/src/multiarray/nditer_impl.h
@@ -294,7 +294,7 @@ struct NpyIter_AD {
#define NIT_SIZEOF_ITERATOR(itflags, ndim, nop) ( \
sizeof(struct NpyIter_InternalOnly) + \
NIT_AXISDATA_OFFSET(itflags, ndim, nop) + \
- NIT_AXISDATA_SIZEOF(itflags, ndim, nop)*(ndim))
+ NIT_AXISDATA_SIZEOF(itflags, ndim, nop)*(ndim ? ndim : 1))
/* Internal helper functions shared between implementation files */
NPY_NO_EXPORT void
diff --git a/numpy/core/src/multiarray/nditer_pywrap.c b/numpy/core/src/multiarray/nditer_pywrap.c
index 4621491a3..61f0c42b6 100644
--- a/numpy/core/src/multiarray/nditer_pywrap.c
+++ b/numpy/core/src/multiarray/nditer_pywrap.c
@@ -95,7 +95,6 @@ NpyIter_GlobalFlagsConverter(PyObject *flags_in, npy_uint32 *flags)
npy_uint32 flag;
if (flags_in == NULL || flags_in == Py_None) {
- *flags = 0;
return 1;
}
@@ -526,7 +525,7 @@ npyiter_convert_op_axes(PyObject *op_axes_in, npy_intp nop,
return 0;
}
- *oa_ndim = 0;
+ *oa_ndim = -1;
/* Copy the tuples into op_axes */
for (iop = 0; iop < nop; ++iop) {
@@ -545,13 +544,8 @@ npyiter_convert_op_axes(PyObject *op_axes_in, npy_intp nop,
Py_DECREF(a);
return 0;
}
- if (*oa_ndim == 0) {
+ if (*oa_ndim == -1) {
*oa_ndim = PySequence_Size(a);
- if (*oa_ndim == 0) {
- PyErr_SetString(PyExc_ValueError,
- "op_axes must have at least one dimension");
- return 0;
- }
if (*oa_ndim > NPY_MAXDIMS) {
PyErr_SetString(PyExc_ValueError,
"Too many dimensions in op_axes");
@@ -575,7 +569,7 @@ npyiter_convert_op_axes(PyObject *op_axes_in, npy_intp nop,
op_axes[iop][idim] = -1;
}
else {
- op_axes[iop][idim] = PyInt_AsLong(v);
+ op_axes[iop][idim] = PyArray_PyIntAsInt(v);
if (op_axes[iop][idim]==-1 &&
PyErr_Occurred()) {
Py_DECREF(a);
@@ -589,7 +583,7 @@ npyiter_convert_op_axes(PyObject *op_axes_in, npy_intp nop,
}
}
- if (*oa_ndim == 0) {
+ if (*oa_ndim == -1) {
PyErr_SetString(PyExc_ValueError,
"If op_axes is provided, at least one list of axes "
"must be contained within it");
@@ -726,7 +720,7 @@ npyiter_init(NewNpyArrayIterObject *self, PyObject *args, PyObject *kwds)
NPY_CASTING casting = NPY_SAFE_CASTING;
npy_uint32 op_flags[NPY_MAXARGS];
PyArray_Descr *op_request_dtypes[NPY_MAXARGS];
- int oa_ndim = 0;
+ int oa_ndim = -1;
int op_axes_arrays[NPY_MAXARGS][NPY_MAXDIMS];
int *op_axes[NPY_MAXARGS];
PyArray_Dims itershape = {NULL, 0};
@@ -784,7 +778,7 @@ npyiter_init(NewNpyArrayIterObject *self, PyObject *args, PyObject *kwds)
}
if (itershape.len > 0) {
- if (oa_ndim == 0) {
+ if (oa_ndim == -1) {
oa_ndim = itershape.len;
memset(op_axes, 0, sizeof(op_axes[0]) * nop);
}
@@ -800,10 +794,9 @@ npyiter_init(NewNpyArrayIterObject *self, PyObject *args, PyObject *kwds)
itershape.ptr = NULL;
}
-
self->iter = NpyIter_AdvancedNew(nop, op, flags, order, casting, op_flags,
op_request_dtypes,
- oa_ndim, oa_ndim > 0 ? op_axes : NULL,
+ oa_ndim, oa_ndim >= 0 ? op_axes : NULL,
itershape.ptr,
buffersize);
@@ -860,7 +853,7 @@ NpyIter_NestedIters(PyObject *NPY_UNUSED(self),
int iop, nop = 0, inest, nnest = 0;
PyArrayObject *op[NPY_MAXARGS];
- npy_uint32 flags = 0, flags_inner = 0;
+ npy_uint32 flags = 0, flags_inner;
NPY_ORDER order = NPY_KEEPORDER;
NPY_CASTING casting = NPY_SAFE_CASTING;
npy_uint32 op_flags[NPY_MAXARGS], op_flags_inner[NPY_MAXARGS];
diff --git a/numpy/core/src/private/lowlevel_strided_loops.h b/numpy/core/src/private/lowlevel_strided_loops.h
index 94c6a2121..c9fd1248f 100644
--- a/numpy/core/src/private/lowlevel_strided_loops.h
+++ b/numpy/core/src/private/lowlevel_strided_loops.h
@@ -256,6 +256,7 @@ PyArray_CastRawArrays(npy_intp count,
* 'stransfer' with the provided dst_stride/src_stride and
* dst_strides[0]/src_strides[0], so the caller can use those values to
* specialize the function.
+ * Note that even if ndim == 0, everything needs to be set as if ndim == 1.
*
* The return value is the number of elements it couldn't copy. A return value
* of 0 means all elements were copied, a larger value means the end of
diff --git a/numpy/core/src/scalarmathmodule.c.src b/numpy/core/src/scalarmathmodule.c.src
index 57c610b9e..b87d9b405 100644
--- a/numpy/core/src/scalarmathmodule.c.src
+++ b/numpy/core/src/scalarmathmodule.c.src
@@ -494,16 +494,25 @@ half_ctype_remainder(npy_half a, npy_half b, npy_half *out) {
/**end repeat**/
/**begin repeat
- * #name = half, float, double, longdouble#
- * #type = npy_half, npy_float, npy_double, npy_longdouble#
+ * #name = float, double, longdouble#
+ * #type = npy_float, npy_double, npy_longdouble#
*/
static npy_@name@ (*_basic_@name@_pow)(@type@ a, @type@ b);
static void
-@name@_ctype_power(@type@ a, @type@ b, @type@ *out) {
+@name@_ctype_power(@type@ a, @type@ b, @type@ *out)
+{
*out = _basic_@name@_pow(a, b);
}
/**end repeat**/
+static void
+half_ctype_power(npy_half a, npy_half b, npy_half *out)
+{
+ const npy_float af = npy_half_to_float(a);
+ const npy_float bf = npy_half_to_float(b);
+ const npy_float outf = _basic_float_pow(af,bf);
+ *out = npy_float_to_half(outf);
+}
/**begin repeat
* #name = byte, ubyte, short, ushort, int, uint,
@@ -1130,7 +1139,6 @@ static PyObject *
int first;
@type@ out = @zero@;
-
switch(_@name@_convert2_to_ctypes(a, &arg1, b, &arg2)) {
case 0:
break;
@@ -1724,7 +1732,6 @@ get_functions(void)
i += 3;
j++;
}
- _basic_half_pow = funcdata[j - 1];
_basic_float_pow = funcdata[j];
_basic_double_pow = funcdata[j + 1];
_basic_longdouble_pow = funcdata[j + 2];
diff --git a/numpy/core/src/umath/reduction.c b/numpy/core/src/umath/reduction.c
index e6ed04e99..f69aea2d0 100644
--- a/numpy/core/src/umath/reduction.c
+++ b/numpy/core/src/umath/reduction.c
@@ -513,7 +513,7 @@ PyUFunc_ReduceWrapper(PyArrayObject *operand, PyArrayObject *out,
NPY_KEEPORDER, casting,
op_flags,
op_dtypes,
- 0, NULL, NULL, buffersize);
+ -1, NULL, NULL, buffersize);
if (iter == NULL) {
goto fail;
}
diff --git a/numpy/core/src/umath/ufunc_object.c b/numpy/core/src/umath/ufunc_object.c
index 124185bfd..9c499d322 100644
--- a/numpy/core/src/umath/ufunc_object.c
+++ b/numpy/core/src/umath/ufunc_object.c
@@ -1211,7 +1211,7 @@ iterator_loop(PyUFuncObject *ufunc,
NPY_ITER_DELAY_BUFALLOC,
order, NPY_UNSAFE_CASTING,
op_flags, dtype,
- 0, NULL, NULL, buffersize);
+ -1, NULL, NULL, buffersize);
if (iter == NULL) {
return -1;
}
@@ -1509,7 +1509,7 @@ execute_fancy_ufunc_loop(PyUFuncObject *ufunc,
NPY_ITER_GROWINNER,
order, NPY_UNSAFE_CASTING,
op_flags, dtypes,
- 0, NULL, NULL, buffersize);
+ -1, NULL, NULL, buffersize);
if (iter == NULL) {
return -1;
}
@@ -1976,18 +1976,6 @@ PyUFunc_GeneralizedFunction(PyUFuncObject *ufunc,
NPY_ITER_NO_BROADCAST;
}
- /*
- * If there are no iteration dimensions, create a fake one
- * so that the scalar edge case works right.
- */
- if (iter_ndim == 0) {
- iter_ndim = 1;
- iter_shape[0] = 1;
- for (i = 0; i < nop; ++i) {
- op_axes[i][0] = -1;
- }
- }
-
/* Create the iterator */
iter = NpyIter_AdvancedNew(nop, op, NPY_ITER_MULTI_INDEX|
NPY_ITER_REFS_OK|
diff --git a/numpy/core/tests/test_einsum.py b/numpy/core/tests/test_einsum.py
index ed7d455dc..aa0328a8b 100644
--- a/numpy/core/tests/test_einsum.py
+++ b/numpy/core/tests/test_einsum.py
@@ -241,6 +241,7 @@ class TestEinSum(TestCase):
assert_equal(np.einsum(a, [0,0]), np.trace(a).astype(dtype))
# multiply(a, b)
+ assert_equal(np.einsum("..., ...", 3, 4), 12) # scalar case
for n in range(1,17):
a = np.arange(3*n, dtype=dtype).reshape(3,n)
b = np.arange(2*3*n, dtype=dtype).reshape(2,3,n)
diff --git a/numpy/core/tests/test_nditer.py b/numpy/core/tests/test_nditer.py
index ebbe25fd3..ec5f63c5e 100644
--- a/numpy/core/tests/test_nditer.py
+++ b/numpy/core/tests/test_nditer.py
@@ -2017,6 +2017,57 @@ def test_iter_buffering_growinner():
# Should end up with just one inner loop here
assert_equal(i[0].size, a.size)
+
+@dec.slow
+def test_iter_buffered_reduce_reuse():
+ # large enough array for all views, including negative strides.
+ a = np.arange(2*3**5)[3**5:3**5+1]
+ flags = ['buffered', 'delay_bufalloc', 'multi_index', 'reduce_ok', 'refs_ok']
+ op_flags = [('readonly',), ('readwrite','allocate')]
+ op_axes_list = [[(0,1,2), (0,1,-1)], [(0,1,2), (0,-1,-1)]]
+ # wrong dtype to force buffering
+ op_dtypes = [np.float, a.dtype]
+
+ def get_params():
+ for xs in xrange(-3**2, 3**2 + 1):
+ for ys in xrange(xs, 3**2 + 1):
+ for op_axes in op_axes_list:
+ # last stride is reduced and because of that not
+ # important for this test, as it is the inner stride.
+ strides = (xs * a.itemsize, ys * a.itemsize, a.itemsize)
+ arr = np.lib.stride_tricks.as_strided(a, (3,3,3), strides)
+
+ for skip in [0, 1]:
+ yield arr, op_axes, skip
+
+ for arr, op_axes, skip in get_params():
+ nditer2 = np.nditer([arr.copy(), None],
+ op_axes=op_axes, flags=flags, op_flags=op_flags,
+ op_dtypes=op_dtypes)
+ nditer2.operands[-1][...] = 0
+ nditer2.reset()
+ nditer2.iterindex = skip
+
+ for (a2_in, b2_in) in nditer2:
+ b2_in += a2_in.astype(np.int_)
+
+ comp_res = nditer2.operands[-1]
+
+ for bufsize in xrange(0, 3**3):
+ nditer1 = np.nditer([arr, None],
+ op_axes=op_axes, flags=flags, op_flags=op_flags,
+ buffersize=bufsize, op_dtypes=op_dtypes)
+ nditer1.operands[-1][...] = 0
+ nditer1.reset()
+ nditer1.iterindex = skip
+
+ for (a1_in, b1_in) in nditer1:
+ b1_in += a1_in.astype(np.int_)
+
+ res = nditer1.operands[-1]
+ assert_array_equal(res, comp_res)
+
+
def test_iter_no_broadcast():
# Test that the no_broadcast flag works
a = np.arange(24).reshape(2,3,4)
@@ -2472,5 +2523,59 @@ def test_iter_allocated_array_dtypes():
c[1,1] = a / b
assert_equal(it.operands[2], [[8, 12], [20, 5]])
+
+def test_0d_iter():
+ # Basic test for iteration of 0-d arrays:
+ i = nditer([2, 3], ['multi_index'], [['readonly']]*2)
+ assert_equal(i.ndim, 0)
+ assert_equal(i.next(), (2, 3))
+ assert_equal(i.multi_index, ())
+ assert_equal(i.iterindex, 0)
+ assert_raises(StopIteration, i.next)
+ # test reset:
+ i.reset()
+ assert_equal(i.next(), (2, 3))
+ assert_raises(StopIteration, i.next)
+
+ # test forcing to 0-d
+ i = nditer(np.arange(5), ['multi_index'], [['readonly']], op_axes=[()])
+ assert_equal(i.ndim, 0)
+ assert_equal(len(i), 1)
+ # note that itershape=(), still behaves like None due to the conversions
+
+ # Test a more complex buffered casting case (same as another test above)
+ sdt = [('a', 'f4'), ('b', 'i8'), ('c', 'c8', (2,3)), ('d', 'O')]
+ a = np.array(0.5, dtype='f4')
+ i = nditer(a, ['buffered','refs_ok'], ['readonly'],
+ casting='unsafe', op_dtypes=sdt)
+ vals = i.next()
+ assert_equal(vals['a'], 0.5)
+ assert_equal(vals['b'], 0)
+ assert_equal(vals['c'], [[(0.5)]*3]*2)
+ assert_equal(vals['d'], 0.5)
+
+
+def test_0d_nested_iter():
+ a = np.arange(12).reshape(2,3,2)
+ i, j = np.nested_iters(a, [[],[1,0,2]])
+ vals = []
+ for x in i:
+ vals.append([y for y in j])
+ assert_equal(vals, [[0,1,2,3,4,5,6,7,8,9,10,11]])
+
+ i, j = np.nested_iters(a, [[1,0,2],[]])
+ vals = []
+ for x in i:
+ vals.append([y for y in j])
+ assert_equal(vals, [[0],[1],[2],[3],[4],[5],[6],[7],[8],[9],[10],[11]])
+
+ i, j, k = np.nested_iters(a, [[2,0], [] ,[1]])
+ vals = []
+ for x in i:
+ for y in j:
+ vals.append([z for z in k])
+ assert_equal(vals, [[0,2,4],[1,3,5],[6,8,10],[7,9,11]])
+
+
if __name__ == "__main__":
run_module_suite()
diff --git a/numpy/core/tests/test_numeric.py b/numpy/core/tests/test_numeric.py
index 047070cce..f5953955b 100644
--- a/numpy/core/tests/test_numeric.py
+++ b/numpy/core/tests/test_numeric.py
@@ -1504,5 +1504,27 @@ class TestStringFunction(object):
np.set_string_function(None, repr=False)
assert_equal(str(a), "[1]")
+class TestRoll(TestCase):
+ def test_roll1d(self):
+ x = np.arange(10)
+ xr = np.roll(x, 2)
+ assert_equal(xr, np.array([8, 9, 0, 1, 2, 3, 4, 5, 6, 7]))
+
+ def test_roll2d(self):
+ x2 = np.reshape(np.arange(10), (2,5))
+ x2r = np.roll(x2, 1)
+ assert_equal(x2r, np.array([[9, 0, 1, 2, 3], [4, 5, 6, 7, 8]]))
+
+ x2r = np.roll(x2, 1, axis=0)
+ assert_equal(x2r, np.array([[5, 6, 7, 8, 9], [0, 1, 2, 3, 4]]))
+
+ x2r = np.roll(x2, 1, axis=1)
+ assert_equal(x2r, np.array([[4, 0, 1, 2, 3], [9, 5, 6, 7, 8]]))
+
+ def test_roll_empty(self):
+ x = np.array([])
+ assert_equal(np.roll(x, 1), np.array([]))
+
+
if __name__ == "__main__":
run_module_suite()
diff --git a/numpy/core/tests/test_scalarmath.py b/numpy/core/tests/test_scalarmath.py
index 3078c583b..75137411c 100644
--- a/numpy/core/tests/test_scalarmath.py
+++ b/numpy/core/tests/test_scalarmath.py
@@ -46,7 +46,7 @@ class TestTypes(TestCase):
class TestPower(TestCase):
def test_small_types(self):
- for t in [np.int8, np.int16]:
+ for t in [np.int8, np.int16, np.float16]:
a = t(3)
b = a ** 4
assert_(b == 81, "error with %r: got %r" % (t,b))
@@ -60,7 +60,21 @@ class TestPower(TestCase):
assert_(b == 6765201, msg)
else:
assert_almost_equal(b, 6765201, err_msg=msg)
-
+ def test_mixed_types(self):
+ typelist = [np.int8,np.int16,np.float16,
+ np.float32,np.float64,np.int8,
+ np.int16,np.int32,np.int64]
+ for t1 in typelist:
+ for t2 in typelist:
+ a = t1(3)
+ b = t2(2)
+ result = a**b
+ msg = ("error with %r and %r:"
+ "got %r, expected %r") % (t1, t2, result, 9)
+ if np.issubdtype(np.dtype(result), np.integer):
+ assert_(result == 9, msg)
+ else:
+ assert_almost_equal(result, 9, err_msg=msg)
class TestComplexDivision(TestCase):
def test_zero_division(self):
diff --git a/numpy/ctypeslib.py b/numpy/ctypeslib.py
index f2c5be523..aa49de34b 100644
--- a/numpy/ctypeslib.py
+++ b/numpy/ctypeslib.py
@@ -122,15 +122,16 @@ else:
else:
libdir = loader_path
- # Need to save exception when using Python 3k, see PEP 3110.
- exc = None
for ln in libname_ext:
- try:
- libpath = os.path.join(libdir, ln)
- return ctypes.cdll[libpath]
- except OSError as e:
- exc = e
- raise exc
+ libpath = os.path.join(libdir, ln)
+ if os.path.exists(libpath):
+ try:
+ return ctypes.cdll[libpath]
+ except OSError:
+ ## defective lib file
+ raise
+ ## if no successful return in the libname_ext loop:
+ raise OSError("no file with expected extension")
ctypes_load_library = deprecate(load_library, 'ctypes_load_library',
'load_library')
diff --git a/numpy/distutils/tests/test_exec_command.py b/numpy/distutils/tests/test_exec_command.py
index 9b1a9e5d0..11f262369 100644
--- a/numpy/distutils/tests/test_exec_command.py
+++ b/numpy/distutils/tests/test_exec_command.py
@@ -20,6 +20,8 @@ class redirect_stdout(object):
def __exit__(self, exc_type, exc_value, traceback):
self._stdout.flush()
sys.stdout = self.old_stdout
+ # note: closing sys.stdout won't close it.
+ self._stdout.close()
class redirect_stderr(object):
"""Context manager to redirect stderr for exec_command test."""
@@ -33,6 +35,8 @@ class redirect_stderr(object):
def __exit__(self, exc_type, exc_value, traceback):
self._stderr.flush()
sys.stderr = self.old_stderr
+ # note: closing sys.stderr won't close it.
+ self._stderr.close()
class emulate_nonposix(object):
"""Context manager to emulate os.name != 'posix' """
diff --git a/numpy/fft/fftpack_litemodule.c b/numpy/fft/fftpack_litemodule.c
index 499c72828..6f6a6c9f3 100644
--- a/numpy/fft/fftpack_litemodule.c
+++ b/numpy/fft/fftpack_litemodule.c
@@ -45,10 +45,12 @@ fftpack_cfftf(PyObject *NPY_UNUSED(self), PyObject *args)
nrepeats = PyArray_SIZE(data)/npts;
dptr = (double *)PyArray_DATA(data);
NPY_SIGINT_ON;
+ Py_BEGIN_ALLOW_THREADS;
for (i = 0; i < nrepeats; i++) {
cfftf(npts, dptr, wsave);
dptr += npts*2;
}
+ Py_END_ALLOW_THREADS;
NPY_SIGINT_OFF;
PyArray_Free(op2, (char *)wsave);
return (PyObject *)data;
@@ -96,10 +98,12 @@ fftpack_cfftb(PyObject *NPY_UNUSED(self), PyObject *args)
nrepeats = PyArray_SIZE(data)/npts;
dptr = (double *)PyArray_DATA(data);
NPY_SIGINT_ON;
+ Py_BEGIN_ALLOW_THREADS;
for (i = 0; i < nrepeats; i++) {
cfftb(npts, dptr, wsave);
dptr += npts*2;
}
+ Py_END_ALLOW_THREADS;
NPY_SIGINT_OFF;
PyArray_Free(op2, (char *)wsave);
return (PyObject *)data;
@@ -131,7 +135,9 @@ fftpack_cffti(PyObject *NPY_UNUSED(self), PyObject *args)
}
NPY_SIGINT_ON;
+ Py_BEGIN_ALLOW_THREADS;
cffti(n, (double *)PyArray_DATA((PyArrayObject*)op));
+ Py_END_ALLOW_THREADS;
NPY_SIGINT_OFF;
return (PyObject *)op;
@@ -183,6 +189,7 @@ fftpack_rfftf(PyObject *NPY_UNUSED(self), PyObject *args)
NPY_SIGINT_ON;
+ Py_BEGIN_ALLOW_THREADS;
for (i = 0; i < nrepeats; i++) {
memcpy((char *)(rptr+1), dptr, npts*sizeof(double));
rfftf(npts, rptr+1, wsave);
@@ -191,6 +198,7 @@ fftpack_rfftf(PyObject *NPY_UNUSED(self), PyObject *args)
rptr += rstep;
dptr += npts;
}
+ Py_END_ALLOW_THREADS;
NPY_SIGINT_OFF;
PyArray_Free(op2, (char *)wsave);
Py_DECREF(data);
@@ -245,6 +253,7 @@ fftpack_rfftb(PyObject *NPY_UNUSED(self), PyObject *args)
dptr = (double *)PyArray_DATA(data);
NPY_SIGINT_ON;
+ Py_BEGIN_ALLOW_THREADS;
for (i = 0; i < nrepeats; i++) {
memcpy((char *)(rptr + 1), (dptr + 2), (npts - 1)*sizeof(double));
rptr[0] = dptr[0];
@@ -252,6 +261,7 @@ fftpack_rfftb(PyObject *NPY_UNUSED(self), PyObject *args)
rptr += npts;
dptr += npts*2;
}
+ Py_END_ALLOW_THREADS;
NPY_SIGINT_OFF;
PyArray_Free(op2, (char *)wsave);
Py_DECREF(data);
@@ -285,7 +295,9 @@ fftpack_rffti(PyObject *NPY_UNUSED(self), PyObject *args)
return NULL;
}
NPY_SIGINT_ON;
+ Py_BEGIN_ALLOW_THREADS;
rffti(n, (double *)PyArray_DATA((PyArrayObject*)op));
+ Py_END_ALLOW_THREADS;
NPY_SIGINT_OFF;
return (PyObject *)op;
diff --git a/numpy/lib/index_tricks.py b/numpy/lib/index_tricks.py
index ad9af9840..045166b84 100644
--- a/numpy/lib/index_tricks.py
+++ b/numpy/lib/index_tricks.py
@@ -532,30 +532,7 @@ class ndindex(object):
(2, 0, 0)
(2, 1, 0)
- """
- # This is a hack to handle 0-d arrays correctly.
- # Fixing nditer would be more work but should be done eventually,
- # and then this entire __new__ method can be removed.
- def __new__(cls, *shape):
- if len(shape) == 1 and isinstance(shape[0], tuple):
- shape = shape[0]
- if len(shape) == 0:
- class zero_dim_iter(object):
- def __init__(self):
- self._N = 1
- def __iter__(self):
- return self
- def ndincr(self):
- self.next()
- def next(self):
- if self._N > 0:
- self._N -= 1
- return ()
- raise StopIteration
- return zero_dim_iter()
- else:
- return super(ndindex, cls).__new__(cls)
-
+ """
def __init__(self, *shape):
if len(shape) == 1 and isinstance(shape[0], tuple):
shape = shape[0]