summaryrefslogtreecommitdiff
path: root/numpy
diff options
context:
space:
mode:
Diffstat (limited to 'numpy')
-rw-r--r--numpy/core/code_generators/numpy_api.py28
-rw-r--r--numpy/core/src/multiarray/array_assign_array.c49
-rw-r--r--numpy/core/src/multiarray/ctors.c848
3 files changed, 57 insertions, 868 deletions
diff --git a/numpy/core/code_generators/numpy_api.py b/numpy/core/code_generators/numpy_api.py
index def4d5785..22f0a349a 100644
--- a/numpy/core/code_generators/numpy_api.py
+++ b/numpy/core/code_generators/numpy_api.py
@@ -69,7 +69,7 @@ multiarray_types_api = {
'PyHalfArrType_Type': 217,
'NpyIter_Type': 218,
# End 1.6 API
- 'NpyNA_Type': 287,
+ 'NpyNA_Type': 281,
}
#define NPY_NUMUSERTYPES (*(int *)PyArray_API[6])
@@ -318,20 +318,18 @@ multiarray_funcs_api = {
'PyArray_ConvertClipmodeSequence': 279,
'PyArray_MatrixProduct2': 280,
# End 1.6 API
- 'PyArray_MaskedCopyInto': 281,
- 'PyArray_MaskedMoveInto': 282,
- 'PyArray_SetBaseObject': 283,
- 'PyArray_HasNASupport': 284,
- 'PyArray_ContainsNA': 285,
- 'PyArray_AllocateMaskNA': 286,
- 'NpyIter_GetFirstMaskNAOp': 288,
- 'NpyIter_GetMaskNAIndexArray': 289,
- 'PyArray_ReduceMaskNAArray': 290,
- 'PyArray_CreateSortedStridePerm': 291,
- 'PyArray_AssignZero': 292,
- 'PyArray_AssignOne': 293,
- 'PyArray_AssignNA': 294,
- 'PyArray_AssignMaskNA': 295,
+ 'PyArray_SetBaseObject': 282,
+ 'PyArray_HasNASupport': 283,
+ 'PyArray_ContainsNA': 284,
+ 'PyArray_AllocateMaskNA': 285,
+ 'NpyIter_GetFirstMaskNAOp': 286,
+ 'NpyIter_GetMaskNAIndexArray': 287,
+ 'PyArray_ReduceMaskNAArray': 288,
+ 'PyArray_CreateSortedStridePerm': 289,
+ 'PyArray_AssignZero': 290,
+ 'PyArray_AssignOne': 291,
+ 'PyArray_AssignNA': 292,
+ 'PyArray_AssignMaskNA': 293,
}
ufunc_types_api = {
diff --git a/numpy/core/src/multiarray/array_assign_array.c b/numpy/core/src/multiarray/array_assign_array.c
index 8e428dcff..bdc262e17 100644
--- a/numpy/core/src/multiarray/array_assign_array.c
+++ b/numpy/core/src/multiarray/array_assign_array.c
@@ -67,8 +67,8 @@ raw_array_assign_array(int ndim, npy_intp *shape,
}
/*
- * Overlap check for the 1D case. Higher dimensional arrays cause
- * a temporary copy before getting here.
+ * Overlap check for the 1D case. Higher dimensional arrays and
+ * opposite strides cause a temporary copy before getting here.
*/
if (ndim == 1 && src_data < dst_data &&
src_data + shape_it[0] * src_strides_it[0] > dst_data) {
@@ -471,11 +471,15 @@ array_assign_array(PyArrayObject *dst, PyArrayObject *src,
}
/*
- * When ndim is 1, the lower-level inner loop handles copying
- * of overlapping data. For bigger ndim, we make a temporary
- * copy of 'src' if 'src' and 'dst' overlap.'
+ * When ndim is 1 and the strides point in the same direction,
+ * the lower-level inner loop handles copying
+ * of overlapping data. For bigger ndim and opposite-strided 1D
+ * data, we make a temporary copy of 'src' if 'src' and 'dst' overlap.'
*/
- if (PyArray_NDIM(dst) > 1 && arrays_overlap(src, dst)) {
+ if (((PyArray_NDIM(dst) == 1 && PyArray_NDIM(src) >= 1 &&
+ PyArray_STRIDES(dst)[0] *
+ PyArray_STRIDES(src)[PyArray_NDIM(src) - 1] < 0) ||
+ PyArray_NDIM(dst) > 1) && arrays_overlap(src, dst)) {
PyArrayObject *tmp;
/*
@@ -506,11 +510,34 @@ array_assign_array(PyArrayObject *dst, PyArrayObject *src,
}
/* Broadcast 'src' to 'dst' for raw iteration */
- if (broadcast_strides(PyArray_NDIM(dst), PyArray_DIMS(dst),
- PyArray_NDIM(src), PyArray_DIMS(src),
- PyArray_STRIDES(src), "input array",
- src_strides) < 0) {
- goto fail;
+ if (PyArray_NDIM(src) > PyArray_NDIM(dst)) {
+ int ndim_tmp = PyArray_NDIM(src);
+ npy_intp *src_shape_tmp = PyArray_DIMS(src);
+ npy_intp *src_strides_tmp = PyArray_STRIDES(src);
+ /*
+ * As a special case for backwards compatibility, strip
+ * away unit dimensions from the left of 'src'
+ */
+ while (ndim_tmp > PyArray_NDIM(dst) && src_shape_tmp[0] == 1) {
+ --ndim_tmp;
+ ++src_shape_tmp;
+ ++src_strides_tmp;
+ }
+
+ if (broadcast_strides(PyArray_NDIM(dst), PyArray_DIMS(dst),
+ ndim_tmp, src_shape_tmp,
+ src_strides_tmp, "input array",
+ src_strides) < 0) {
+ goto fail;
+ }
+ }
+ else {
+ if (broadcast_strides(PyArray_NDIM(dst), PyArray_DIMS(dst),
+ PyArray_NDIM(src), PyArray_DIMS(src),
+ PyArray_STRIDES(src), "input array",
+ src_strides) < 0) {
+ goto fail;
+ }
}
if (src_has_maskna) {
diff --git a/numpy/core/src/multiarray/ctors.c b/numpy/core/src/multiarray/ctors.c
index cc7036cbe..b60d6ba2d 100644
--- a/numpy/core/src/multiarray/ctors.c
+++ b/numpy/core/src/multiarray/ctors.c
@@ -379,188 +379,6 @@ copy_and_swap(void *dst, void *src, int itemsize, npy_intp numitems,
}
}
-/*NUMPY_API
- * Move the memory of one array into another, allowing for overlapping data.
- *
- * This is in general a difficult problem to solve efficiently, because
- * strides can be negative. Consider "a = np.arange(3); a[::-1] = a", which
- * previously produced the incorrect [0, 1, 0].
- *
- * Instead of trying to be fancy, we simply check for overlap and make
- * a temporary copy when one exists.
- *
- * Returns 0 on success, negative on failure.
- */
-NPY_NO_EXPORT int
-PyArray_MoveInto(PyArrayObject *dst, PyArrayObject *src)
-{
- /*
- * Performance fix for expresions like "a[1000:6000] += x". In this
- * case, first an in-place add is done, followed by an assignment,
- * equivalently expressed like this:
- *
- * tmp = a[1000:6000] # Calls array_subscript_nice in mapping.c
- * np.add(tmp, x, tmp)
- * a[1000:6000] = tmp # Calls array_ass_sub in mapping.c
- *
- * In the assignment the underlying data type, shape, strides, and
- * data pointers are identical, but src != dst because they are separately
- * generated slices. By detecting this and skipping the redundant
- * copy of values to themselves, we potentially give a big speed boost.
- *
- * Note that we don't call EquivTypes, because usually the exact same
- * dtype object will appear, and we don't want to slow things down
- * with a complicated comparison. The comparisons are ordered to
- * try and reject this with as little work as possible.
- */
- if (PyArray_DATA(src) == PyArray_DATA(dst) &&
- PyArray_MASKNA_DATA(src) == PyArray_MASKNA_DATA(dst) &&
- PyArray_DESCR(src) == PyArray_DESCR(dst) &&
- PyArray_NDIM(src) == PyArray_NDIM(dst) &&
- PyArray_CompareLists(PyArray_DIMS(src),
- PyArray_DIMS(dst),
- PyArray_NDIM(src)) &&
- PyArray_CompareLists(PyArray_STRIDES(src),
- PyArray_STRIDES(dst),
- PyArray_NDIM(src))) {
- /*printf("Redundant copy operation detected\n");*/
- return 0;
- }
-
- /*
- * A special case is when there is just one dimension with positive
- * strides, and we pass that to CopyInto, which correctly handles
- * it for most cases. It may still incorrectly handle copying of
- * partially-overlapping data elements, where the data pointer was offset
- * by a fraction of the element size.
- *
- * For NA masked arrays, we always use the overlapping check and
- * copy to handle this.
- */
- if ((!PyArray_HASMASKNA(dst) && PyArray_NDIM(dst) == 1 &&
- PyArray_NDIM(src) == 1 &&
- PyArray_STRIDE(dst, 0) > 0 &&
- PyArray_STRIDE(src, 0) > 0) ||
- !arrays_overlap(dst, src)) {
- return PyArray_CopyInto(dst, src);
- }
- else {
- PyArrayObject *tmp;
- int ret;
-
- /*
- * Allocate a temporary copy array.
- */
- tmp = (PyArrayObject *)PyArray_NewLikeArray(dst,
- NPY_KEEPORDER, NULL, 0);
- if (tmp == NULL) {
- return -1;
- }
-
- /* Make the temporary copy have an NA mask if necessary */
- if (PyArray_HASMASKNA(src)) {
- if (PyArray_AllocateMaskNA(tmp, 1, 0, 1) < 0) {
- Py_DECREF(tmp);
- return -1;
- }
- }
-
- ret = PyArray_CopyInto(tmp, src);
- if (ret == 0) {
- ret = PyArray_CopyInto(dst, tmp);
- }
- Py_DECREF(tmp);
- return ret;
- }
-}
-
-/*NUMPY_API
- * Copy the memory of one array into another, allowing for overlapping data
- * and selecting which elements to move based on a mask.
- *
- * Precisely handling the overlapping data is in general a difficult
- * problem to solve efficiently, because strides can be negative.
- * Consider "a = np.arange(3); a[::-1] = a", which previously produced
- * the incorrect [0, 1, 0].
- *
- * Instead of trying to be fancy, we simply check for overlap and make
- * a temporary copy when one exists.
- *
- * Returns 0 on success, negative on failure.
- */
-NPY_NO_EXPORT int
-PyArray_MaskedMoveInto(PyArrayObject *dst, PyArrayObject *src,
- PyArrayObject *mask, NPY_CASTING casting)
-{
- /*
- * Performance fix for expresions like "a[1000:6000] += x". In this
- * case, first an in-place add is done, followed by an assignment,
- * equivalently expressed like this:
- *
- * tmp = a[1000:6000] # Calls array_subscript_nice in mapping.c
- * np.add(tmp, x, tmp)
- * a[1000:6000] = tmp # Calls array_ass_sub in mapping.c
- *
- * In the assignment the underlying data type, shape, strides, and
- * data pointers are identical, but src != dst because they are separately
- * generated slices. By detecting this and skipping the redundant
- * copy of values to themselves, we potentially give a big speed boost.
- *
- * Note that we don't call EquivTypes, because usually the exact same
- * dtype object will appear, and we don't want to slow things down
- * with a complicated comparison. The comparisons are ordered to
- * try and reject this with as little work as possible.
- */
- if (PyArray_DATA(src) == PyArray_DATA(dst) &&
- PyArray_DESCR(src) == PyArray_DESCR(dst) &&
- PyArray_NDIM(src) == PyArray_NDIM(dst) &&
- PyArray_CompareLists(PyArray_DIMS(src),
- PyArray_DIMS(dst),
- PyArray_NDIM(src)) &&
- PyArray_CompareLists(PyArray_STRIDES(src),
- PyArray_STRIDES(dst),
- PyArray_NDIM(src))) {
- /*printf("Redundant copy operation detected\n");*/
- return 0;
- }
-
- /*
- * A special case is when there is just one dimension with positive
- * strides, and we pass that to CopyInto, which correctly handles
- * it for most cases. It may still incorrectly handle copying of
- * partially-overlapping data elements, where the data pointer was offset
- * by a fraction of the element size.
- */
- if ((PyArray_NDIM(dst) == 1 &&
- PyArray_NDIM(src) == 1 &&
- PyArray_STRIDE(dst, 0) > 0 &&
- PyArray_STRIDE(src, 0) > 0) ||
- !arrays_overlap(dst, src)) {
- return PyArray_MaskedCopyInto(dst, src, mask, casting);
- }
- else {
- PyArrayObject *tmp;
- int ret;
-
- /*
- * Allocate a temporary copy array.
- */
- tmp = (PyArrayObject *)PyArray_NewLikeArray(dst,
- NPY_KEEPORDER, NULL, 0);
- if (tmp == NULL) {
- return -1;
- }
- ret = PyArray_CopyInto(tmp, src);
- if (ret == 0) {
- ret = PyArray_MaskedCopyInto(dst, tmp, mask, casting);
- }
- Py_DECREF(tmp);
- return ret;
- }
-}
-
-
-
/* adapted from Numarray */
static int
setArrayFromSequence(PyArrayObject *a, PyObject *s,
@@ -3096,7 +2914,7 @@ PyArray_CopyAnyInto(PyArrayObject *dst, PyArrayObject *src)
}
/*NUMPY_API
- * Copy an Array into another array -- memory must not overlap.
+ * Copy an Array into another array.
* Broadcast to the destination shape if necessary.
*
* Returns 0 on success, -1 on failure.
@@ -3104,674 +2922,20 @@ PyArray_CopyAnyInto(PyArrayObject *dst, PyArrayObject *src)
NPY_NO_EXPORT int
PyArray_CopyInto(PyArrayObject *dst, PyArrayObject *src)
{
- int src_has_maskna, dst_has_maskna;
- NPY_BEGIN_THREADS_DEF;
-
- if (!PyArray_ISWRITEABLE(dst)) {
- PyErr_SetString(PyExc_RuntimeError,
- "cannot write to array");
- return -1;
- }
-
- src_has_maskna = PyArray_HASMASKNA(src);
- dst_has_maskna = PyArray_HASMASKNA(dst);
- /* Can't copy an NA to an array which doesn't support it */
- if (src_has_maskna && !dst_has_maskna) {
- if (PyArray_ContainsNA(src)) {
- PyErr_SetString(PyExc_ValueError,
- "Cannot assign NA value to an array which "
- "does not support NAs");
- return -1;
- }
- /* If there are no actual NAs, allow the copy */
- else {
- src_has_maskna = 0;
- }
- }
-
- /* Special case for simple strides and no NA mask */
- if (!dst_has_maskna && PyArray_NDIM(dst) >= PyArray_NDIM(src) &&
- PyArray_TRIVIALLY_ITERABLE_PAIR(dst, src)) {
- PyArray_StridedUnaryOp *stransfer = NULL;
- NpyAuxData *transferdata = NULL;
- char *dst_data, *src_data;
- npy_intp count, dst_stride, src_stride, src_itemsize;
-
- int needs_api = 0;
-
- PyArray_PREPARE_TRIVIAL_PAIR_ITERATION(dst, src, count,
- dst_data, src_data, dst_stride, src_stride);
-
- /*
- * Check for overlap with positive strides, and if found,
- * possibly reverse the order
- */
- if (dst_data > src_data && src_stride > 0 && dst_stride > 0 &&
- (dst_data < src_data+src_stride*count) &&
- (src_data < dst_data+dst_stride*count)) {
- dst_data += dst_stride*(count-1);
- src_data += src_stride*(count-1);
- dst_stride = -dst_stride;
- src_stride = -src_stride;
- }
-
- if (PyArray_GetDTypeTransferFunction(
- PyArray_ISALIGNED(src) && PyArray_ISALIGNED(dst),
- src_stride, dst_stride,
- PyArray_DESCR(src), PyArray_DESCR(dst),
- 0,
- &stransfer, &transferdata,
- &needs_api) != NPY_SUCCEED) {
- return -1;
- }
-
- src_itemsize = PyArray_DESCR(src)->elsize;
-
- if (!needs_api) {
- NPY_BEGIN_THREADS;
- }
-
- stransfer(dst_data, dst_stride, src_data, src_stride,
- count, src_itemsize, transferdata);
-
- if (!needs_api) {
- NPY_END_THREADS;
- }
-
- NPY_AUXDATA_FREE(transferdata);
-
- return PyErr_Occurred() ? -1 : 0;
- }
- /* Copying unmasked into unmasked */
- else if (!dst_has_maskna) {
- PyArray_StridedUnaryOp *stransfer = NULL;
- NpyAuxData *transferdata = NULL;
- PyArrayObject *op[2];
- npy_uint32 op_flags[2];
- PyArray_Descr *op_dtypes_values[2], **op_dtypes = NULL;
- NpyIter *iter;
- npy_intp src_size;
-
- NpyIter_IterNextFunc *iternext;
- char **dataptr;
- npy_intp *stride;
- npy_intp *countptr;
- npy_intp src_itemsize;
- int needs_api;
-
- op[0] = dst;
- op[1] = src;
- /*
- * TODO: In NumPy 2.0, reenable NPY_ITER_NO_BROADCAST. This
- * was removed during NumPy 1.6 testing for compatibility
- * with NumPy 1.5, as per Travis's -10 veto power.
- */
- /*op_flags[0] = NPY_ITER_WRITEONLY|NPY_ITER_NO_BROADCAST;*/
- op_flags[0] = NPY_ITER_WRITEONLY;
- /*
- * If src has an NA mask, it was already confirmed to
- * contain no NA values, so ignoring the NA mask is fine.
- */
- op_flags[1] = NPY_ITER_READONLY | NPY_ITER_IGNORE_MASKNA;
-
- /*
- * If 'src' is being broadcast to 'dst', and it is smaller
- * than the default NumPy buffer size, allow the iterator to
- * make a copy of 'src' with the 'dst' dtype if necessary.
- *
- * This is a performance operation, to allow fewer casts followed
- * by more plain copies.
- */
- src_size = PyArray_SIZE(src);
- if (src_size <= NPY_BUFSIZE && src_size < PyArray_SIZE(dst)) {
- op_flags[1] |= NPY_ITER_COPY;
- op_dtypes = op_dtypes_values;
- op_dtypes_values[0] = NULL;
- op_dtypes_values[1] = PyArray_DESCR(dst);
- }
-
- iter = NpyIter_MultiNew(2, op,
- NPY_ITER_EXTERNAL_LOOP|
- NPY_ITER_REFS_OK|
- NPY_ITER_ZEROSIZE_OK,
- NPY_KEEPORDER,
- NPY_UNSAFE_CASTING,
- op_flags,
- op_dtypes);
- if (iter == NULL) {
- return -1;
- }
-
- iternext = NpyIter_GetIterNext(iter, NULL);
- if (iternext == NULL) {
- NpyIter_Deallocate(iter);
- return -1;
- }
- dataptr = NpyIter_GetDataPtrArray(iter);
- stride = NpyIter_GetInnerStrideArray(iter);
- countptr = NpyIter_GetInnerLoopSizePtr(iter);
- src_itemsize = PyArray_DESCR(src)->elsize;
-
- needs_api = NpyIter_IterationNeedsAPI(iter);
-
- /*
- * Because buffering is disabled in the iterator, the inner loop
- * strides will be the same throughout the iteration loop. Thus,
- * we can pass them to this function to take advantage of
- * contiguous strides, etc.
- */
- if (PyArray_GetDTypeTransferFunction(
- PyArray_ISALIGNED(src) && PyArray_ISALIGNED(dst),
- stride[1], stride[0],
- NpyIter_GetDescrArray(iter)[1], PyArray_DESCR(dst),
- 0,
- &stransfer, &transferdata,
- &needs_api) != NPY_SUCCEED) {
- NpyIter_Deallocate(iter);
- return -1;
- }
-
-
- if (NpyIter_GetIterSize(iter) != 0) {
- if (!needs_api) {
- NPY_BEGIN_THREADS;
- }
-
- do {
- stransfer(dataptr[0], stride[0],
- dataptr[1], stride[1],
- *countptr, src_itemsize, transferdata);
- } while(iternext(iter));
-
- if (!needs_api) {
- NPY_END_THREADS;
- }
- }
-
- NPY_AUXDATA_FREE(transferdata);
- NpyIter_Deallocate(iter);
-
- return PyErr_Occurred() ? -1 : 0;
- }
- /* Copying non NA-masked into NA-masked */
- else if (!src_has_maskna) {
- PyArray_StridedUnaryOp *stransfer = NULL;
- NpyAuxData *transferdata = NULL;
- PyArrayObject *op[2];
- npy_uint32 op_flags[2];
- PyArray_Descr *op_dtypes_values[2], **op_dtypes = NULL;
- NpyIter *iter;
- npy_intp src_size;
-
- NpyIter_IterNextFunc *iternext;
- char **dataptr;
- npy_intp *stride;
- npy_intp *countptr;
- npy_intp src_itemsize;
- int needs_api;
-
- op[0] = dst;
- op[1] = src;
- /*
- * TODO: In NumPy 2.0, reenable NPY_ITER_NO_BROADCAST. This
- * was removed during NumPy 1.6 testing for compatibility
- * with NumPy 1.5, as per Travis's -10 veto power.
- */
- /*op_flags[0] = NPY_ITER_WRITEONLY|NPY_ITER_NO_BROADCAST|NPY_ITER_USE_MASKNA;*/
- op_flags[0] = NPY_ITER_WRITEONLY | NPY_ITER_USE_MASKNA;
- op_flags[1] = NPY_ITER_READONLY;
-
- /*
- * If 'src' is being broadcast to 'dst', and it is smaller
- * than the default NumPy buffer size, allow the iterator to
- * make a copy of 'src' with the 'dst' dtype if necessary.
- *
- * This is a performance operation, to allow fewer casts followed
- * by more plain copies.
- */
- src_size = PyArray_SIZE(src);
- if (src_size <= NPY_BUFSIZE && src_size < PyArray_SIZE(dst)) {
- op_flags[1] |= NPY_ITER_COPY;
- op_dtypes = op_dtypes_values;
- op_dtypes_values[0] = NULL;
- op_dtypes_values[1] = PyArray_DESCR(dst);
- }
-
- iter = NpyIter_MultiNew(2, op,
- NPY_ITER_EXTERNAL_LOOP|
- NPY_ITER_REFS_OK|
- NPY_ITER_ZEROSIZE_OK,
- NPY_KEEPORDER,
- NPY_UNSAFE_CASTING,
- op_flags,
- op_dtypes);
- if (iter == NULL) {
- return -1;
- }
-
- iternext = NpyIter_GetIterNext(iter, NULL);
- if (iternext == NULL) {
- NpyIter_Deallocate(iter);
- return -1;
- }
- dataptr = NpyIter_GetDataPtrArray(iter);
- stride = NpyIter_GetInnerStrideArray(iter);
- countptr = NpyIter_GetInnerLoopSizePtr(iter);
- src_itemsize = PyArray_DESCR(src)->elsize;
-
- needs_api = NpyIter_IterationNeedsAPI(iter);
-
- /*
- * Because buffering is disabled in the iterator, the inner loop
- * strides will be the same throughout the iteration loop. Thus,
- * we can pass them to this function to take advantage of
- * contiguous strides, etc.
- */
- if (PyArray_GetDTypeTransferFunction(
- PyArray_ISALIGNED(src) && PyArray_ISALIGNED(dst),
- stride[1], stride[0],
- NpyIter_GetDescrArray(iter)[1], PyArray_DESCR(dst),
- 0,
- &stransfer, &transferdata,
- &needs_api) != NPY_SUCCEED) {
- NpyIter_Deallocate(iter);
- return -1;
- }
-
-
- if (NpyIter_GetIterSize(iter) != 0) {
- /* Because buffering is disabled, this stride will be fixed */
- npy_intp maskna_stride = stride[2];
- if (!needs_api) {
- NPY_BEGIN_THREADS;
- }
-
- /* Specialize for contiguous mask stride */
- if (maskna_stride == 1) {
- do {
- char *maskna_ptr = dataptr[2];
- npy_intp count = *countptr;
-
- stransfer(dataptr[0], stride[0],
- dataptr[1], stride[1],
- count, src_itemsize, transferdata);
- memset(maskna_ptr, 1, count);
- } while(iternext(iter));
- }
- else {
- do {
- char *maskna_ptr = dataptr[2];
- npy_intp count = *countptr;
-
- stransfer(dataptr[0], stride[0],
- dataptr[1], stride[1],
- count, src_itemsize, transferdata);
- while (count-- != 0) {
- *maskna_ptr = 1;
- maskna_ptr += maskna_stride;
- }
- } while(iternext(iter));
- }
-
- if (!needs_api) {
- NPY_END_THREADS;
- }
- }
-
- NPY_AUXDATA_FREE(transferdata);
- NpyIter_Deallocate(iter);
-
- return PyErr_Occurred() ? -1 : 0;
- }
- /* Copying NA-masked into NA-masked */
- else {
- PyArray_MaskedStridedUnaryOp *stransfer = NULL;
- NpyAuxData *transferdata = NULL;
- PyArrayObject *op[2];
- npy_uint32 op_flags[2];
- PyArray_Descr *op_dtypes_values[2], **op_dtypes = NULL;
- NpyIter *iter;
- npy_intp src_size;
-
- NpyIter_IterNextFunc *iternext;
- char **dataptr;
- npy_intp *stride;
- npy_intp *countptr;
- npy_intp src_itemsize;
- int needs_api;
-
- op[0] = dst;
- op[1] = src;
- /*
- * TODO: In NumPy 2.0, reenable NPY_ITER_NO_BROADCAST. This
- * was removed during NumPy 1.6 testing for compatibility
- * with NumPy 1.5, as per Travis's -10 veto power.
- */
- /*op_flags[0] = NPY_ITER_WRITEONLY|NPY_ITER_NO_BROADCAST|NPY_ITER_USE_MASKNA;*/
- op_flags[0] = NPY_ITER_WRITEONLY | NPY_ITER_USE_MASKNA;
- op_flags[1] = NPY_ITER_READONLY | NPY_ITER_USE_MASKNA;
-
- /*
- * If 'src' is being broadcast to 'dst', and it is smaller
- * than the default NumPy buffer size, allow the iterator to
- * make a copy of 'src' with the 'dst' dtype if necessary.
- *
- * This is a performance operation, to allow fewer casts followed
- * by more plain copies.
- */
- src_size = PyArray_SIZE(src);
- if (src_size <= NPY_BUFSIZE && src_size < PyArray_SIZE(dst)) {
- op_flags[1] |= NPY_ITER_COPY;
- op_dtypes = op_dtypes_values;
- op_dtypes_values[0] = NULL;
- op_dtypes_values[1] = PyArray_DESCR(dst);
- }
-
- iter = NpyIter_MultiNew(2, op,
- NPY_ITER_EXTERNAL_LOOP|
- NPY_ITER_REFS_OK|
- NPY_ITER_ZEROSIZE_OK,
- NPY_KEEPORDER,
- NPY_UNSAFE_CASTING,
- op_flags,
- op_dtypes);
- if (iter == NULL) {
- return -1;
- }
-
- iternext = NpyIter_GetIterNext(iter, NULL);
- if (iternext == NULL) {
- NpyIter_Deallocate(iter);
- return -1;
- }
- dataptr = NpyIter_GetDataPtrArray(iter);
- stride = NpyIter_GetInnerStrideArray(iter);
- countptr = NpyIter_GetInnerLoopSizePtr(iter);
- src_itemsize = PyArray_DESCR(src)->elsize;
-
- needs_api = NpyIter_IterationNeedsAPI(iter);
-
- /*
- * Because buffering is disabled in the iterator, the inner loop
- * strides will be the same throughout the iteration loop. Thus,
- * we can pass them to this function to take advantage of
- * contiguous strides, etc.
- */
- if (PyArray_GetMaskedDTypeTransferFunction(
- PyArray_ISALIGNED(src) && PyArray_ISALIGNED(dst),
- stride[1], stride[0], stride[3],
- NpyIter_GetDescrArray(iter)[1],
- PyArray_DESCR(dst),
- PyArray_MASKNA_DTYPE(src),
- 0,
- &stransfer, &transferdata,
- &needs_api) != NPY_SUCCEED) {
- NpyIter_Deallocate(iter);
- return -1;
- }
-
-
- if (NpyIter_GetIterSize(iter) != 0) {
- /* Because buffering is disabled, this stride will be fixed */
- npy_intp dst_maskna_stride = stride[2];
- npy_intp src_maskna_stride = stride[3];
- if (!needs_api) {
- NPY_BEGIN_THREADS;
- }
-
- /* Specialize for contiguous mask stride */
- if (src_maskna_stride == 1 && dst_maskna_stride == 1) {
- do {
- char *dst_maskna_ptr = dataptr[2];
- char *src_maskna_ptr = dataptr[3];
- npy_intp count = *countptr;
-
- stransfer(dataptr[0], stride[0],
- dataptr[1], stride[1],
- (npy_mask *)src_maskna_ptr, src_maskna_stride,
- count, src_itemsize, transferdata);
- memcpy(dst_maskna_ptr, src_maskna_ptr, count);
- } while(iternext(iter));
- }
- else {
- do {
- char *dst_maskna_ptr = dataptr[2];
- char *src_maskna_ptr = dataptr[3];
- npy_intp count = *countptr;
-
- stransfer(dataptr[0], stride[0],
- dataptr[1], stride[1],
- (npy_mask *)src_maskna_ptr, src_maskna_stride,
- count, src_itemsize, transferdata);
- while (count-- != 0) {
- *dst_maskna_ptr = *src_maskna_ptr;
- src_maskna_ptr += src_maskna_stride;
- dst_maskna_ptr += dst_maskna_stride;
- }
- } while(iternext(iter));
- }
-
- if (!needs_api) {
- NPY_END_THREADS;
- }
- }
-
- NPY_AUXDATA_FREE(transferdata);
- NpyIter_Deallocate(iter);
-
- return PyErr_Occurred() ? -1 : 0;
- }
+ return array_assign_array(dst, src, NULL, NPY_UNSAFE_CASTING, 0, NULL);
}
/*NUMPY_API
- * Copy an Array into another array, wherever the mask specifies.
- * The memory of src and dst must not overlap.
- *
- * Broadcast to the destination shape if necessary.
+ * Move the memory of one array into another, allowing for overlapping data.
*
- * Returns 0 on success, -1 on failure.
+ * Returns 0 on success, negative on failure.
*/
NPY_NO_EXPORT int
-PyArray_MaskedCopyInto(PyArrayObject *dst, PyArrayObject *src,
- PyArrayObject *mask, NPY_CASTING casting)
+PyArray_MoveInto(PyArrayObject *dst, PyArrayObject *src)
{
- PyArray_MaskedStridedUnaryOp *stransfer = NULL;
- NpyAuxData *transferdata = NULL;
- NPY_BEGIN_THREADS_DEF;
-
- if (!PyArray_ISWRITEABLE(dst)) {
- PyErr_SetString(PyExc_RuntimeError,
- "cannot write to array");
- return -1;
- }
-
- if (!PyArray_CanCastArrayTo(src, PyArray_DESCR(dst), casting)) {
- PyObject *errmsg;
- errmsg = PyUString_FromString("Cannot cast array data from ");
- PyUString_ConcatAndDel(&errmsg,
- PyObject_Repr((PyObject *)PyArray_DESCR(src)));
- PyUString_ConcatAndDel(&errmsg,
- PyUString_FromString(" to "));
- PyUString_ConcatAndDel(&errmsg,
- PyObject_Repr((PyObject *)PyArray_DESCR(dst)));
- PyUString_ConcatAndDel(&errmsg,
- PyUString_FromFormat(" according to the rule %s",
- npy_casting_to_string(casting)));
- PyErr_SetObject(PyExc_TypeError, errmsg);
- return -1;
- }
-
-
- if (PyArray_NDIM(dst) >= PyArray_NDIM(src) &&
- PyArray_NDIM(dst) >= PyArray_NDIM(mask) &&
- PyArray_TRIVIALLY_ITERABLE_TRIPLE(dst, src, mask)) {
- char *dst_data, *src_data, *mask_data;
- npy_intp count, dst_stride, src_stride, src_itemsize, mask_stride;
-
- int needs_api = 0;
-
- PyArray_PREPARE_TRIVIAL_TRIPLE_ITERATION(dst, src, mask, count,
- dst_data, src_data, mask_data,
- dst_stride, src_stride, mask_stride);
-
- /*
- * Check for overlap with positive strides, and if found,
- * possibly reverse the order
- */
- if (dst_data > src_data && src_stride > 0 && dst_stride > 0 &&
- (dst_data < src_data+src_stride*count) &&
- (src_data < dst_data+dst_stride*count)) {
- dst_data += dst_stride*(count-1);
- src_data += src_stride*(count-1);
- mask_data += mask_stride*(count-1);
- dst_stride = -dst_stride;
- src_stride = -src_stride;
- mask_stride = -mask_stride;
- }
-
- if (PyArray_GetMaskedDTypeTransferFunction(
- PyArray_ISALIGNED(src) && PyArray_ISALIGNED(dst),
- src_stride, dst_stride, mask_stride,
- PyArray_DESCR(src),
- PyArray_DESCR(dst),
- PyArray_DESCR(mask),
- 0,
- &stransfer, &transferdata,
- &needs_api) != NPY_SUCCEED) {
- return -1;
- }
-
- src_itemsize = PyArray_DESCR(src)->elsize;
-
- if (!needs_api) {
- NPY_BEGIN_THREADS;
- }
-
- stransfer(dst_data, dst_stride, src_data, src_stride,
- (npy_uint8 *)mask_data, mask_stride,
- count, src_itemsize, transferdata);
-
- if (!needs_api) {
- NPY_END_THREADS;
- }
-
- NPY_AUXDATA_FREE(transferdata);
-
- return PyErr_Occurred() ? -1 : 0;
- }
- else {
- PyArrayObject *op[3];
- npy_uint32 op_flags[3];
- PyArray_Descr *op_dtypes_values[3], **op_dtypes = NULL;
- NpyIter *iter;
- npy_intp src_size;
-
- NpyIter_IterNextFunc *iternext;
- char **dataptr;
- npy_intp *stride;
- npy_intp *countptr;
- npy_intp src_itemsize;
- int needs_api;
-
- op[0] = dst;
- op[1] = src;
- op[2] = mask;
- /*
- * TODO: In NumPy 2.0, renable NPY_ITER_NO_BROADCAST. This
- * was removed during NumPy 1.6 testing for compatibility
- * with NumPy 1.5, as per Travis's -10 veto power.
- */
- /*op_flags[0] = NPY_ITER_WRITEONLY|NPY_ITER_NO_BROADCAST;*/
- op_flags[0] = NPY_ITER_WRITEONLY;
- op_flags[1] = NPY_ITER_READONLY;
- op_flags[2] = NPY_ITER_READONLY;
-
- /*
- * If 'src' is being broadcast to 'dst', and it is smaller
- * than the default NumPy buffer size, allow the iterator to
- * make a copy of 'src' with the 'dst' dtype if necessary.
- *
- * This is a performance operation, to allow fewer casts followed
- * by more plain copies.
- */
- src_size = PyArray_SIZE(src);
- if (src_size <= NPY_BUFSIZE && src_size < PyArray_SIZE(dst)) {
- op_flags[1] |= NPY_ITER_COPY;
- op_dtypes = op_dtypes_values;
- op_dtypes_values[0] = NULL;
- op_dtypes_values[1] = PyArray_DESCR(dst);
- op_dtypes_values[2] = NULL;
- }
-
- iter = NpyIter_MultiNew(3, op,
- NPY_ITER_EXTERNAL_LOOP|
- NPY_ITER_REFS_OK|
- NPY_ITER_ZEROSIZE_OK,
- NPY_KEEPORDER,
- NPY_UNSAFE_CASTING,
- op_flags,
- op_dtypes);
- if (iter == NULL) {
- return -1;
- }
-
- iternext = NpyIter_GetIterNext(iter, NULL);
- if (iternext == NULL) {
- NpyIter_Deallocate(iter);
- return -1;
- }
- dataptr = NpyIter_GetDataPtrArray(iter);
- stride = NpyIter_GetInnerStrideArray(iter);
- countptr = NpyIter_GetInnerLoopSizePtr(iter);
- src_itemsize = PyArray_DESCR(src)->elsize;
-
- needs_api = NpyIter_IterationNeedsAPI(iter);
-
- /*
- * Because buffering is disabled in the iterator, the inner loop
- * strides will be the same throughout the iteration loop. Thus,
- * we can pass them to this function to take advantage of
- * contiguous strides, etc.
- */
- if (PyArray_GetMaskedDTypeTransferFunction(
- PyArray_ISALIGNED(src) && PyArray_ISALIGNED(dst),
- stride[1], stride[0], stride[2],
- NpyIter_GetDescrArray(iter)[1],
- PyArray_DESCR(dst),
- PyArray_DESCR(mask),
- 0,
- &stransfer, &transferdata,
- &needs_api) != NPY_SUCCEED) {
- NpyIter_Deallocate(iter);
- return -1;
- }
-
-
- if (NpyIter_GetIterSize(iter) != 0) {
- if (!needs_api) {
- NPY_BEGIN_THREADS;
- }
-
- do {
- stransfer(dataptr[0], stride[0],
- dataptr[1], stride[1],
- (npy_uint8 *)dataptr[2], stride[2],
- *countptr, src_itemsize, transferdata);
- } while(iternext(iter));
-
- if (!needs_api) {
- NPY_END_THREADS;
- }
- }
-
- NPY_AUXDATA_FREE(transferdata);
- NpyIter_Deallocate(iter);
-
- return PyErr_Occurred() ? -1 : 0;
- }
+ return array_assign_array(dst, src, NULL, NPY_UNSAFE_CASTING, 0, NULL);
}
-
/*NUMPY_API
* PyArray_CheckAxis
*