summaryrefslogtreecommitdiff
path: root/numpy
diff options
context:
space:
mode:
authorMark Wiebe <mwwiebe@gmail.com>2011-08-07 18:09:11 -0700
committerCharles Harris <charlesr.harris@gmail.com>2011-08-27 07:26:53 -0600
commit42b9c84cfcdd27057b902ea094247866e2d741da (patch)
tree9cdf00c2f7a0048a1ec1ded633a9eb714654f7c8 /numpy
parentd5ef96136874f9e3f475833ca5f966e9599b2223 (diff)
downloadnumpy-42b9c84cfcdd27057b902ea094247866e2d741da.tar.gz
ENH: missingdata: Implement routine for array to array assignment
Diffstat (limited to 'numpy')
-rw-r--r--numpy/core/SConscript1
-rw-r--r--numpy/core/setup.py1
-rw-r--r--numpy/core/src/multiarray/array_assign.c2
-rw-r--r--numpy/core/src/multiarray/array_assign_array.c626
-rw-r--r--numpy/core/src/multiarray/array_assign_scalar.c76
-rw-r--r--numpy/core/src/multiarray/arrayobject.c2
-rw-r--r--numpy/core/src/multiarray/dtype_transfer.c148
-rw-r--r--numpy/core/src/multiarray/mapping.c6
-rw-r--r--numpy/core/src/multiarray/na_mask.c4
-rw-r--r--numpy/core/src/multiarray/na_mask.h5
-rw-r--r--numpy/core/src/private/lowlevel_strided_loops.h52
11 files changed, 843 insertions, 80 deletions
diff --git a/numpy/core/SConscript b/numpy/core/SConscript
index 56b3a04f3..1a0171224 100644
--- a/numpy/core/SConscript
+++ b/numpy/core/SConscript
@@ -445,6 +445,7 @@ if ENABLE_SEPARATE_COMPILATION:
pjoin('src', 'multiarray', 'arrayobject.c'),
pjoin('src', 'multiarray', 'array_assign.c'),
pjoin('src', 'multiarray', 'array_assign_scalar.c'),
+ pjoin('src', 'multiarray', 'array_assign_array.c'),
pjoin('src', 'multiarray', 'datetime.c'),
pjoin('src', 'multiarray', 'datetime_strings.c'),
pjoin('src', 'multiarray', 'datetime_busday.c'),
diff --git a/numpy/core/setup.py b/numpy/core/setup.py
index c08fd1601..8ca28bfde 100644
--- a/numpy/core/setup.py
+++ b/numpy/core/setup.py
@@ -753,6 +753,7 @@ def configuration(parent_package='',top_path=None):
join('src', 'multiarray', 'arraytypes.c.src'),
join('src', 'multiarray', 'array_assign.c'),
join('src', 'multiarray', 'array_assign_scalar.c'),
+ join('src', 'multiarray', 'array_assign_array.c'),
join('src', 'multiarray', 'buffer.c'),
join('src', 'multiarray', 'calculation.c'),
join('src', 'multiarray', 'common.c'),
diff --git a/numpy/core/src/multiarray/array_assign.c b/numpy/core/src/multiarray/array_assign.c
index f50567eac..6136f3821 100644
--- a/numpy/core/src/multiarray/array_assign.c
+++ b/numpy/core/src/multiarray/array_assign.c
@@ -18,6 +18,8 @@
#include "npy_config.h"
#include "numpy/npy_3kcompat.h"
+#include "shape.h"
+
#include "array_assign.h"
/* See array_assign.h for parameter documentation */
diff --git a/numpy/core/src/multiarray/array_assign_array.c b/numpy/core/src/multiarray/array_assign_array.c
new file mode 100644
index 000000000..00fe31229
--- /dev/null
+++ b/numpy/core/src/multiarray/array_assign_array.c
@@ -0,0 +1,626 @@
+/*
+ * This file implements assignment from an ndarray to another ndarray.
+ *
+ * Written by Mark Wiebe (mwwiebe@gmail.com)
+ * Copyright (c) 2011 by Enthought, Inc.
+ *
+ * See LICENSE.txt for the license.
+ */
+
+#define PY_SSIZE_T_CLEAN
+#include <Python.h>
+
+#define NPY_NO_DEPRECATED_API
+#define _MULTIARRAYMODULE
+#include <numpy/ndarraytypes.h>
+
+#include "npy_config.h"
+#include "numpy/npy_3kcompat.h"
+
+#include "convert_datatype.h"
+#include "methods.h"
+#include "shape.h"
+#include "lowlevel_strided_loops.h"
+#include "na_singleton.h"
+#include "na_mask.h"
+
+#include "array_assign.h"
+
+/*
+ * Assigns the array from 'src' to 'dst'. The strides must already have
+ * been broadcast.
+ *
+ * Returns 0 on success, -1 on failure.
+ */
+NPY_NO_EXPORT int
+raw_array_assign_array(int ndim, npy_intp *shape,
+ PyArray_Descr *dst_dtype, char *dst_data, npy_intp *dst_strides,
+ PyArray_Descr *src_dtype, char *src_data, npy_intp *src_strides)
+{
+ int idim;
+ npy_intp shape_it[NPY_MAXDIMS];
+ npy_intp dst_strides_it[NPY_MAXDIMS];
+ npy_intp src_strides_it[NPY_MAXDIMS];
+ npy_intp coord[NPY_MAXDIMS];
+ NPY_BEGIN_THREADS_DEF;
+
+ PyArray_StridedUnaryOp *stransfer = NULL;
+ NpyAuxData *transferdata = NULL;
+ int aligned, needs_api = 0;
+ npy_intp src_itemsize = src_dtype->elsize;
+
+ /* Check alignment */
+ aligned = raw_array_is_aligned(ndim,
+ dst_data, dst_strides, dst_dtype->alignment) &&
+ raw_array_is_aligned(ndim,
+ src_data, src_strides, src_dtype->alignment);
+
+ /* Use raw iteration with no heap allocation */
+ if (PyArray_PrepareTwoRawArrayIter(
+ ndim, shape,
+ dst_data, dst_strides,
+ src_data, src_strides,
+ &ndim, shape_it,
+ &dst_data, dst_strides_it,
+ &src_data, src_strides_it) < 0) {
+ return -1;
+ }
+
+ /* Get the function to do the casting */
+ if (PyArray_GetDTypeTransferFunction(aligned,
+ src_strides_it[0], dst_strides_it[0],
+ src_dtype, dst_dtype,
+ 0,
+ &stransfer, &transferdata,
+ &needs_api) != NPY_SUCCEED) {
+ return -1;
+ }
+
+ if (!needs_api) {
+ NPY_BEGIN_THREADS;
+ }
+
+ NPY_RAW_ITER_START(idim, ndim, coord, shape_it) {
+ /* Process the innermost dimension */
+ stransfer(dst_data, dst_strides_it[0], src_data, src_strides_it[0],
+ shape_it[0], src_itemsize, transferdata);
+ } NPY_RAW_ITER_TWO_NEXT(idim, ndim, coord, shape_it,
+ dst_data, dst_strides_it,
+ src_data, src_strides_it);
+
+ if (!needs_api) {
+ NPY_END_THREADS;
+ }
+
+ NPY_AUXDATA_FREE(transferdata);
+
+ return (needs_api && PyErr_Occurred()) ? -1 : 0;
+}
+
+/*
+ * Assigns the array from 'src' to 'dst, whereever the 'wheremask'
+ * value is True. The strides must already have been broadcast.
+ *
+ * Returns 0 on success, -1 on failure.
+ */
+NPY_NO_EXPORT int
+raw_array_wheremasked_assign_array(int ndim, npy_intp *shape,
+ PyArray_Descr *dst_dtype, char *dst_data, npy_intp *dst_strides,
+ PyArray_Descr *src_dtype, char *src_data, npy_intp *src_strides,
+ PyArray_Descr *wheremask_dtype, char *wheremask_data,
+ npy_intp *wheremask_strides)
+{
+ int idim;
+ npy_intp shape_it[NPY_MAXDIMS];
+ npy_intp dst_strides_it[NPY_MAXDIMS];
+ npy_intp src_strides_it[NPY_MAXDIMS];
+ npy_intp wheremask_strides_it[NPY_MAXDIMS];
+ npy_intp coord[NPY_MAXDIMS];
+ NPY_BEGIN_THREADS_DEF;
+
+ PyArray_MaskedStridedUnaryOp *stransfer = NULL;
+ NpyAuxData *transferdata = NULL;
+ int aligned, needs_api = 0;
+ npy_intp src_itemsize = src_dtype->elsize;
+
+ /* Check alignment */
+ aligned = raw_array_is_aligned(ndim,
+ dst_data, dst_strides, dst_dtype->alignment) &&
+ raw_array_is_aligned(ndim,
+ src_data, src_strides, src_dtype->alignment);
+
+ /* Use raw iteration with no heap allocation */
+ if (PyArray_PrepareThreeRawArrayIter(
+ ndim, shape,
+ dst_data, dst_strides,
+ src_data, src_strides,
+ wheremask_data, wheremask_strides,
+ &ndim, shape_it,
+ &dst_data, dst_strides_it,
+ &src_data, src_strides_it,
+ &wheremask_data, wheremask_strides_it) < 0) {
+ return -1;
+ }
+
+ /* Get the function to do the casting */
+ if (PyArray_GetMaskedDTypeTransferFunction(aligned,
+ src_strides_it[0],
+ dst_strides_it[0],
+ wheremask_strides_it[0],
+ src_dtype, dst_dtype, wheremask_dtype,
+ 0,
+ &stransfer, &transferdata,
+ &needs_api) != NPY_SUCCEED) {
+ return -1;
+ }
+
+ if (!needs_api) {
+ NPY_BEGIN_THREADS;
+ }
+
+ NPY_RAW_ITER_START(idim, ndim, coord, shape_it) {
+ /* Process the innermost dimension */
+ stransfer(dst_data, dst_strides_it[0], src_data, src_strides_it[0],
+ (npy_mask *)wheremask_data, wheremask_strides_it[0],
+ shape_it[0], src_itemsize, transferdata);
+ } NPY_RAW_ITER_THREE_NEXT(idim, ndim, coord, shape_it,
+ dst_data, dst_strides_it,
+ src_data, src_strides_it,
+ wheremask_data, wheremask_strides_it);
+
+ if (!needs_api) {
+ NPY_END_THREADS;
+ }
+
+ NPY_AUXDATA_FREE(transferdata);
+
+ return (needs_api && PyErr_Occurred()) ? -1 : 0;
+}
+
+/*
+ * Assigns the elements of 'src' to 'dst' where the 'wheremask'
+ * is True, except for those which are masked as NA according
+ * to 'maskna'.
+ *
+ * Returns 0 on success, -1 on failure.
+ */
+NPY_NO_EXPORT int
+raw_array_wheremasked_assign_array_preservena(int ndim, npy_intp *shape,
+ PyArray_Descr *dst_dtype, char *dst_data, npy_intp *dst_strides,
+ PyArray_Descr *src_dtype, char *src_data, npy_intp *src_strides,
+ PyArray_Descr *maskna_dtype, char *maskna_data,
+ npy_intp *maskna_strides,
+ PyArray_Descr *wheremask_dtype, char *wheremask_data,
+ npy_intp *wheremask_strides)
+{
+ int idim;
+ npy_intp shape_it[NPY_MAXDIMS];
+ npy_intp dst_strides_it[NPY_MAXDIMS];
+ npy_intp src_strides_it[NPY_MAXDIMS];
+ npy_intp maskna_strides_it[NPY_MAXDIMS];
+ npy_intp wheremask_strides_it[NPY_MAXDIMS];
+ npy_intp coord[NPY_MAXDIMS];
+ NPY_BEGIN_THREADS_DEF;
+
+ PyArray_MaskedStridedUnaryOp *stransfer = NULL;
+ NpyAuxData *transferdata = NULL;
+ int aligned, needs_api = 0;
+ npy_intp src_itemsize = src_dtype->elsize;
+
+ PyArray_StridedBinaryOp *maskand_stransfer = NULL;
+ NpyAuxData *maskand_transferdata = NULL;
+
+ char *maskna_buffer;
+ npy_intp maskna_itemsize;
+
+ /* Check alignment */
+ aligned = raw_array_is_aligned(ndim,
+ dst_data, dst_strides, dst_dtype->alignment) &&
+ raw_array_is_aligned(ndim,
+ src_data, src_strides, src_dtype->alignment);
+
+ /* Use raw iteration with no heap allocation */
+ if (PyArray_PrepareFourRawArrayIter(
+ ndim, shape,
+ dst_data, dst_strides,
+ src_data, src_strides,
+ maskna_data, maskna_strides,
+ wheremask_data, wheremask_strides,
+ &ndim, shape_it,
+ &dst_data, dst_strides_it,
+ &src_data, src_strides_it,
+ &maskna_data, maskna_strides_it,
+ &wheremask_data, wheremask_strides_it) < 0) {
+ return -1;
+ }
+
+ /* Allocate a buffer for inverting/anding the mask */
+ maskna_itemsize = maskna_dtype->elsize;
+ maskna_buffer = PyArray_malloc(NPY_ARRAY_ASSIGN_BUFFERSIZE *
+ maskna_itemsize);
+ if (maskna_buffer == NULL) {
+ PyErr_NoMemory();
+ return -1;
+ }
+
+ /* Get the function to do the casting */
+ if (PyArray_GetMaskedDTypeTransferFunction(aligned,
+ src_strides[0], dst_strides_it[0], maskna_itemsize,
+ src_dtype, dst_dtype, maskna_dtype,
+ 0,
+ &stransfer, &transferdata,
+ &needs_api) != NPY_SUCCEED) {
+ PyArray_free(maskna_buffer);
+ return -1;
+ }
+
+ /*
+ * Get the function to invert the mask. The output
+ * of the binary operation is the dtype 'maskna_dtype'
+ */
+ if (PyArray_GetMaskAndFunction(
+ maskna_strides_it[0], maskna_dtype, 0,
+ wheremask_strides_it[0], wheremask_dtype, 0,
+ &maskand_stransfer, &maskand_transferdata) < 0) {
+ PyArray_free(maskna_buffer);
+ NPY_AUXDATA_FREE(transferdata);
+ return -1;
+ }
+
+ if (!needs_api) {
+ NPY_BEGIN_THREADS;
+ }
+
+ NPY_RAW_ITER_START(idim, ndim, coord, shape_it) {
+ npy_intp buffered_count, count;
+ char *dst_d, *src_d, *maskna_d, *wheremask_d;
+ /* Process the innermost dimension a buffer size at a time */
+ count = shape_it[0];
+ dst_d = dst_data;
+ src_d = dst_data;
+ maskna_d = maskna_data;
+ wheremask_d = wheremask_data;
+ do {
+ buffered_count = count < NPY_ARRAY_ASSIGN_BUFFERSIZE
+ ? count
+ : NPY_ARRAY_ASSIGN_BUFFERSIZE;
+
+ /* Prepare the mask into the buffer */
+ maskand_stransfer(maskna_buffer, maskna_itemsize,
+ maskna_d, maskna_strides_it[0],
+ wheremask_d, wheremask_strides_it[0],
+ buffered_count, maskand_transferdata);
+
+ /* Transfer the data based on the buffered mask */
+ stransfer(dst_d, dst_strides_it[0], src_d, src_strides_it[0],
+ (npy_mask *)maskna_buffer, maskna_itemsize,
+ buffered_count, src_itemsize, transferdata);
+
+ dst_d += buffered_count * dst_strides_it[0];
+ src_d += buffered_count * src_strides_it[0];
+ maskna_d += buffered_count * maskna_strides_it[0];
+ wheremask_d += buffered_count * wheremask_strides_it[0];
+ count -= buffered_count;
+ } while (count > 0);
+ } NPY_RAW_ITER_FOUR_NEXT(idim, ndim, coord, shape_it,
+ dst_data, dst_strides_it,
+ src_data, src_strides_it,
+ maskna_data, maskna_strides_it,
+ wheremask_data, wheremask_strides_it);
+
+ if (!needs_api) {
+ NPY_END_THREADS;
+ }
+
+ PyArray_free(maskna_buffer);
+ NPY_AUXDATA_FREE(transferdata);
+ NPY_AUXDATA_FREE(maskand_transferdata);
+
+ return (needs_api && PyErr_Occurred()) ? -1 : 0;
+}
+
+
+/* See array_assign.h for documentation */
+NPY_NO_EXPORT int
+array_assign_array(PyArrayObject *dst, PyArrayObject *src,
+ PyArrayObject *wheremask,
+ NPY_CASTING casting,
+ npy_bool preservena, npy_bool *preservewhichna)
+{
+ int dst_has_maskna = PyArray_HASMASKNA(dst);
+ int src_has_maskna = PyArray_HASMASKNA(src);
+
+ npy_intp src_strides[NPY_MAXDIMS], src_maskna_strides[NPY_MAXDIMS];
+
+
+ /* Use array_assign_scalar if 'src' NDIM is 0 */
+ if (PyArray_NDIM(src) == 0) {
+ /* If the array is masked, assign to the NA mask */
+ if (PyArray_HASMASKNA(src)) {
+ NpyNA *na = NpyNA_FromObject((PyObject *)src, 1);
+
+ if (na != NULL) {
+ /* TODO: With multi-NA, preservena must also be followed */
+ int retcode = PyArray_AssignNA(dst, wheremask, na);
+ Py_DECREF(na);
+ return retcode;
+ }
+ }
+
+ return array_assign_scalar(dst, PyArray_DESCR(src), PyArray_DATA(src),
+ wheremask, casting, preservena, preservewhichna);
+ }
+
+ /* Check that 'dst' is writeable */
+ if (!PyArray_ISWRITEABLE(dst)) {
+ PyErr_SetString(PyExc_RuntimeError,
+ "cannot assign to a read-only array");
+ return -1;
+ }
+
+ /* Check the casting rule */
+ if (!PyArray_CanCastTypeTo(PyArray_DESCR(src),
+ PyArray_DESCR(dst), casting)) {
+ PyObject *errmsg;
+ errmsg = PyUString_FromString("Cannot cast scalar from ");
+ PyUString_ConcatAndDel(&errmsg,
+ PyObject_Repr((PyObject *)PyArray_DESCR(src)));
+ PyUString_ConcatAndDel(&errmsg,
+ PyUString_FromString(" to "));
+ PyUString_ConcatAndDel(&errmsg,
+ PyObject_Repr((PyObject *)PyArray_DESCR(dst)));
+ PyUString_ConcatAndDel(&errmsg,
+ PyUString_FromFormat(" according to the rule %s",
+ npy_casting_to_string(casting)));
+ PyErr_SetObject(PyExc_TypeError, errmsg);
+ return -1;
+ }
+
+ if (preservewhichna != NULL) {
+ PyErr_SetString(PyExc_RuntimeError,
+ "multi-NA support is not yet implemented");
+ return -1;
+ }
+
+ if (src_has_maskna && !dst_has_maskna) {
+ /* TODO: add 'wheremask' as a parameter to ContainsNA */
+ if (PyArray_ContainsNA(src)) {
+ PyErr_SetString(PyExc_ValueError,
+ "Cannot assign NA value to an array which "
+ "does not support NAs");
+ return -1;
+ }
+ else {
+ src_has_maskna = 0;
+ }
+ }
+
+ /* Broadcast 'src' to 'dst' for raw iteration */
+ if (broadcast_strides(PyArray_NDIM(dst), PyArray_DIMS(dst),
+ PyArray_NDIM(src), PyArray_DIMS(src),
+ PyArray_STRIDES(src), "input array",
+ src_strides) < 0) {
+ return -1;
+ }
+
+ if (src_has_maskna) {
+ if (broadcast_strides(PyArray_NDIM(dst), PyArray_DIMS(dst),
+ PyArray_NDIM(src), PyArray_DIMS(src),
+ PyArray_MASKNA_STRIDES(src), "input array",
+ src_maskna_strides) < 0) {
+ return -1;
+ }
+ }
+
+ if (wheremask == NULL) {
+ /* A straightforward value assignment */
+ if (!preservena || !dst_has_maskna) {
+ /* If assigning to an array with an NA mask, set to all exposed */
+ if (dst_has_maskna) {
+ if (src_has_maskna) {
+ /* Assign the NA mask */
+ if (raw_array_assign_array(
+ PyArray_NDIM(dst), PyArray_DIMS(dst),
+ PyArray_MASKNA_DTYPE(dst),
+ PyArray_MASKNA_DATA(dst),
+ PyArray_MASKNA_STRIDES(dst),
+ PyArray_MASKNA_DTYPE(src),
+ PyArray_MASKNA_DATA(src),
+ src_maskna_strides) < 0) {
+ return -1;
+ }
+
+ /* Assign the values based on the 'src' NA mask */
+ return raw_array_wheremasked_assign_array(
+ PyArray_NDIM(dst), PyArray_DIMS(dst),
+ PyArray_DESCR(dst), PyArray_DATA(dst),
+ PyArray_STRIDES(dst),
+ PyArray_DESCR(src), PyArray_DATA(src),
+ src_strides,
+ PyArray_MASKNA_DTYPE(src),
+ PyArray_MASKNA_DATA(src),
+ src_maskna_strides);
+
+ }
+ else {
+ if (PyArray_AssignMaskNA(dst, NULL, 1) < 0) {
+ return -1;
+ }
+ }
+ }
+
+ /* Do the assignment with raw array iteration */
+ if (raw_array_assign_array(PyArray_NDIM(dst), PyArray_DIMS(dst),
+ PyArray_DESCR(dst), PyArray_DATA(dst), PyArray_STRIDES(dst),
+ PyArray_DESCR(src), PyArray_DATA(src), src_strides) < 0) {
+ return -1;
+ }
+ }
+ /* A value assignment without overwriting NA values */
+ else {
+ if (src_has_maskna) {
+ /* Assign the NA mask, wheremasked with the 'dst' NA mask */
+ if (raw_array_wheremasked_assign_array(
+ PyArray_NDIM(dst), PyArray_DIMS(dst),
+ PyArray_MASKNA_DTYPE(dst),
+ PyArray_MASKNA_DATA(dst),
+ PyArray_MASKNA_STRIDES(dst),
+ PyArray_MASKNA_DTYPE(src),
+ PyArray_MASKNA_DATA(src),
+ src_maskna_strides,
+ PyArray_MASKNA_DTYPE(dst),
+ PyArray_MASKNA_DATA(dst),
+ PyArray_MASKNA_STRIDES(dst)) < 0) {
+ return -1;
+ }
+ }
+
+ /*
+ * The 'dst' NA mask now has exposed precisely the values we
+ * want to assign, so use it for this assignment.
+ */
+ if (raw_array_wheremasked_assign_array(
+ PyArray_NDIM(dst), PyArray_DIMS(dst),
+ PyArray_DESCR(dst), PyArray_DATA(dst),
+ PyArray_STRIDES(dst),
+ PyArray_DESCR(src), PyArray_DATA(src),
+ PyArray_STRIDES(src),
+ PyArray_MASKNA_DTYPE(dst), PyArray_MASKNA_DATA(dst),
+ PyArray_MASKNA_STRIDES(dst)) < 0) {
+ return -1;
+ }
+ }
+ }
+ else {
+ npy_intp wheremask_strides[NPY_MAXDIMS];
+
+ if (PyArray_ContainsNA(wheremask)) {
+ if (!dst_has_maskna) {
+ PyErr_SetString(PyExc_ValueError,
+ "Cannot assign NA value to an array which "
+ "does not support NAs");
+ return -1;
+ }
+ else {
+ /* TODO: add support for this */
+ PyErr_SetString(PyExc_ValueError,
+ "A where mask with NA values is not supported "
+ "yet");
+ return -1;
+ }
+ }
+
+ /* Broadcast the wheremask to 'dst' for raw iteration */
+ if (broadcast_strides(PyArray_NDIM(dst), PyArray_DIMS(dst),
+ PyArray_NDIM(wheremask), PyArray_DIMS(wheremask),
+ PyArray_STRIDES(wheremask), "where mask",
+ wheremask_strides) < 0) {
+ return -1;
+ }
+
+ /* A straightforward where-masked assignment */
+ if (!preservena || !dst_has_maskna) {
+ /* If assigning to an array with an NA mask, set to all exposed */
+ if (dst_has_maskna) {
+ /*
+ * TODO: If the where mask has NA values, this part
+ * changes too.
+ */
+ if (src_has_maskna) {
+ /* Assign the NA mask */
+ if (raw_array_wheremasked_assign_array(
+ PyArray_NDIM(dst), PyArray_DIMS(dst),
+ PyArray_MASKNA_DTYPE(dst),
+ PyArray_MASKNA_DATA(dst),
+ PyArray_MASKNA_STRIDES(dst),
+ PyArray_MASKNA_DTYPE(src),
+ PyArray_MASKNA_DATA(src),
+ src_maskna_strides,
+ PyArray_DESCR(wheremask),
+ PyArray_DATA(wheremask),
+ wheremask_strides) < 0) {
+ return -1;
+ }
+
+ /*
+ * Assign the values based on the wheremask, not
+ * overwriting values also masked by the 'src' NA mask
+ */
+ return raw_array_wheremasked_assign_array_preservena(
+ PyArray_NDIM(dst), PyArray_DIMS(dst),
+ PyArray_DESCR(dst), PyArray_DATA(dst),
+ PyArray_STRIDES(dst),
+ PyArray_DESCR(src), PyArray_DATA(src),
+ src_strides,
+ PyArray_MASKNA_DTYPE(src),
+ PyArray_MASKNA_DATA(src),
+ src_maskna_strides,
+ PyArray_DESCR(wheremask),
+ PyArray_DATA(wheremask),
+ wheremask_strides);
+
+ }
+ else {
+ if (PyArray_AssignMaskNA(dst, wheremask, 1) < 0) {
+ return -1;
+ }
+ }
+ }
+
+ /* Do the masked assignment with raw array iteration */
+ if (raw_array_wheremasked_assign_array(
+ PyArray_NDIM(dst), PyArray_DIMS(dst),
+ PyArray_DESCR(dst), PyArray_DATA(dst), PyArray_STRIDES(dst),
+ PyArray_DESCR(src), PyArray_DATA(src), src_strides,
+ PyArray_DESCR(wheremask), PyArray_DATA(wheremask),
+ wheremask_strides) < 0) {
+ return -1;
+ }
+ }
+ /* A masked value assignment without overwriting NA values */
+ else {
+ if (src_has_maskna) {
+ /*
+ * Assign the NA mask, wheremasked with the 'dst' NA mask
+ * and the parameter 'wheremask'
+ */
+ if (raw_array_wheremasked_assign_array_preservena(
+ PyArray_NDIM(dst), PyArray_DIMS(dst),
+ PyArray_MASKNA_DTYPE(dst),
+ PyArray_MASKNA_DATA(dst),
+ PyArray_MASKNA_STRIDES(dst),
+ PyArray_MASKNA_DTYPE(src),
+ PyArray_MASKNA_DATA(src),
+ src_maskna_strides,
+ PyArray_MASKNA_DTYPE(dst),
+ PyArray_MASKNA_DATA(dst),
+ PyArray_MASKNA_STRIDES(dst),
+ PyArray_DESCR(wheremask),
+ PyArray_DATA(wheremask),
+ wheremask_strides) < 0) {
+ return -1;
+ }
+ }
+
+ /*
+ * The 'dst' NA mask together with the 'wheremask' now have
+ * exposed precisely the values we want to assign, so use
+ * it's another wheremasked preservena assignment.
+ */
+ if (raw_array_wheremasked_assign_array_preservena(
+ PyArray_NDIM(dst), PyArray_DIMS(dst),
+ PyArray_DESCR(dst), PyArray_DATA(dst),
+ PyArray_STRIDES(dst),
+ PyArray_DESCR(src), PyArray_DATA(src),
+ PyArray_STRIDES(src),
+ PyArray_MASKNA_DTYPE(dst), PyArray_MASKNA_DATA(dst),
+ PyArray_MASKNA_STRIDES(dst),
+ PyArray_DESCR(wheremask), PyArray_DATA(wheremask),
+ wheremask_strides) < 0) {
+ return -1;
+ }
+ }
+ }
+
+ return 0;
+}
diff --git a/numpy/core/src/multiarray/array_assign_scalar.c b/numpy/core/src/multiarray/array_assign_scalar.c
index ba8207124..5f2675176 100644
--- a/numpy/core/src/multiarray/array_assign_scalar.c
+++ b/numpy/core/src/multiarray/array_assign_scalar.c
@@ -167,80 +167,6 @@ raw_array_wheremasked_assign_scalar(int ndim, npy_intp *shape,
/*
* Assigns the scalar value to every element of the destination raw array
- * except for those which are masked as NA according to 'maskna'.
- *
- * Returns 0 on success, -1 on failure.
- */
-NPY_NO_EXPORT int
-raw_array_assign_scalar_preservena(int ndim, npy_intp *shape,
- PyArray_Descr *dst_dtype, char *dst_data, npy_intp *dst_strides,
- PyArray_Descr *src_dtype, char *src_data,
- PyArray_Descr *maskna_dtype, char *maskna_data,
- npy_intp *maskna_strides)
-{
- int idim;
- npy_intp shape_it[NPY_MAXDIMS], dst_strides_it[NPY_MAXDIMS];
- npy_intp maskna_strides_it[NPY_MAXDIMS];
- npy_intp coord[NPY_MAXDIMS];
- NPY_BEGIN_THREADS_DEF;
-
- PyArray_MaskedStridedUnaryOp *stransfer = NULL;
- NpyAuxData *transferdata = NULL;
- int aligned, needs_api = 0;
- npy_intp src_itemsize = src_dtype->elsize;
-
- /* Check alignment */
- aligned = raw_array_is_aligned(ndim, dst_data, dst_strides,
- dst_dtype->alignment);
- if (((npy_intp)src_data & (src_dtype->alignment - 1)) != 0) {
- aligned = 0;
- }
-
- /* Use raw iteration with no heap allocation */
- if (PyArray_PrepareTwoRawArrayIter(
- ndim, shape,
- dst_data, dst_strides,
- maskna_data, maskna_strides,
- &ndim, shape_it,
- &dst_data, dst_strides_it,
- &maskna_data, maskna_strides_it) < 0) {
- return -1;
- }
-
- /* Get the function to do the casting */
- if (PyArray_GetMaskedDTypeTransferFunction(aligned,
- 0, dst_strides_it[0], maskna_strides_it[0],
- src_dtype, dst_dtype, maskna_dtype,
- 0,
- &stransfer, &transferdata,
- &needs_api) != NPY_SUCCEED) {
- return -1;
- }
-
- if (!needs_api) {
- NPY_BEGIN_THREADS;
- }
-
- NPY_RAW_ITER_START(idim, ndim, coord, shape_it) {
- /* Transfer the data based on the NA mask */
- stransfer(dst_data, dst_strides_it[0], src_data, 0,
- (npy_mask *)maskna_data, maskna_strides_it[0],
- shape_it[0], src_itemsize, transferdata);
- } NPY_RAW_ITER_TWO_NEXT(idim, ndim, coord, shape_it,
- dst_data, dst_strides_it,
- maskna_data, maskna_strides_it);
-
- if (!needs_api) {
- NPY_END_THREADS;
- }
-
- NPY_AUXDATA_FREE(transferdata);
-
- return (needs_api && PyErr_Occurred()) ? -1 : 0;
-}
-
-/*
- * Assigns the scalar value to every element of the destination raw array
* where the 'wheremask' is True, except for those which are masked as NA
* according to 'maskna'.
*
@@ -470,7 +396,7 @@ array_assign_scalar(PyArrayObject *dst,
}
/* A value assignment without overwriting NA values */
else {
- if (raw_array_assign_scalar_preservena(
+ if (raw_array_wheremasked_assign_scalar(
PyArray_NDIM(dst), PyArray_DIMS(dst),
PyArray_DESCR(dst), PyArray_DATA(dst), PyArray_STRIDES(dst),
src_dtype, src_data,
diff --git a/numpy/core/src/multiarray/arrayobject.c b/numpy/core/src/multiarray/arrayobject.c
index e8a8f1d27..7e4aadca7 100644
--- a/numpy/core/src/multiarray/arrayobject.c
+++ b/numpy/core/src/multiarray/arrayobject.c
@@ -211,7 +211,7 @@ PyArray_CopyObject(PyArrayObject *dest, PyObject *src_object)
}
/* Assigning NA affects the mask if it exists */
else if (na != NULL) {
- if (PyArray_AssignNA(dest, na) < 0) {
+ if (PyArray_AssignNA(dest, NULL, na) < 0) {
Py_DECREF(na);
Py_DECREF(src_object);
return -1;
diff --git a/numpy/core/src/multiarray/dtype_transfer.c b/numpy/core/src/multiarray/dtype_transfer.c
index fddc1406e..39a8ba1bc 100644
--- a/numpy/core/src/multiarray/dtype_transfer.c
+++ b/numpy/core/src/multiarray/dtype_transfer.c
@@ -4261,6 +4261,154 @@ PyArray_PrepareThreeRawArrayIter(int ndim, npy_intp *shape,
return 0;
}
+/* See lowlevel_strided_loops.h for parameter docs. */
+NPY_NO_EXPORT int
+PyArray_PrepareFourRawArrayIter(int ndim, npy_intp *shape,
+ char *dataA, npy_intp *stridesA,
+ char *dataB, npy_intp *stridesB,
+ char *dataC, npy_intp *stridesC,
+ char *dataD, npy_intp *stridesD,
+ int *out_ndim, npy_intp *out_shape,
+ char **out_dataA, npy_intp *out_stridesA,
+ char **out_dataB, npy_intp *out_stridesB,
+ char **out_dataC, npy_intp *out_stridesC,
+ char **out_dataD, npy_intp *out_stridesD)
+{
+ npy_stride_sort_item strideperm[NPY_MAXDIMS];
+ int i, j;
+
+ /* Special case 0 and 1 dimensions */
+ if (ndim == 0) {
+ *out_ndim = 1;
+ *out_dataA = dataA;
+ *out_dataB = dataB;
+ *out_dataC = dataC;
+ *out_dataD = dataD;
+ out_shape[0] = 1;
+ out_stridesA[0] = 0;
+ out_stridesB[0] = 0;
+ out_stridesC[0] = 0;
+ out_stridesD[0] = 0;
+ return 0;
+ }
+ else if (ndim == 1) {
+ npy_intp stride_entryA = stridesA[0];
+ npy_intp stride_entryB = stridesB[0];
+ npy_intp stride_entryC = stridesC[0];
+ npy_intp stride_entryD = stridesD[0];
+ npy_intp shape_entry = shape[0];
+ *out_ndim = 1;
+ out_shape[0] = shape[0];
+ /* Always make a positive stride for the first operand */
+ if (stride_entryA >= 0) {
+ *out_dataA = dataA;
+ *out_dataB = dataB;
+ *out_dataC = dataC;
+ *out_dataD = dataD;
+ out_stridesA[0] = stride_entryA;
+ out_stridesB[0] = stride_entryB;
+ out_stridesC[0] = stride_entryC;
+ out_stridesD[0] = stride_entryD;
+ }
+ else {
+ *out_dataA = dataA + stride_entryA * (shape_entry - 1);
+ *out_dataB = dataB + stride_entryB * (shape_entry - 1);
+ *out_dataC = dataC + stride_entryC * (shape_entry - 1);
+ *out_dataD = dataD + stride_entryD * (shape_entry - 1);
+ out_stridesA[0] = -stride_entryA;
+ out_stridesB[0] = -stride_entryB;
+ out_stridesC[0] = -stride_entryC;
+ out_stridesD[0] = -stride_entryD;
+ }
+ return 0;
+ }
+
+ /* Sort the axes based on the destination strides */
+ PyArray_CreateSortedStridePerm(ndim, stridesA, strideperm);
+ for (i = 0; i < ndim; ++i) {
+ int iperm = strideperm[ndim - i - 1].perm;
+ out_shape[i] = shape[iperm];
+ out_stridesA[i] = stridesA[iperm];
+ out_stridesB[i] = stridesB[iperm];
+ out_stridesC[i] = stridesC[iperm];
+ out_stridesD[i] = stridesD[iperm];
+ }
+
+ /* Reverse any negative strides of operand A */
+ for (i = 0; i < ndim; ++i) {
+ npy_intp stride_entryA = out_stridesA[i];
+ npy_intp stride_entryB = out_stridesB[i];
+ npy_intp stride_entryC = out_stridesC[i];
+ npy_intp stride_entryD = out_stridesD[i];
+ npy_intp shape_entry = out_shape[i];
+
+ if (stride_entryA < 0) {
+ dataA += stride_entryA * (shape_entry - 1);
+ dataB += stride_entryB * (shape_entry - 1);
+ dataC += stride_entryC * (shape_entry - 1);
+ dataD += stride_entryD * (shape_entry - 1);
+ out_stridesA[i] = -stride_entryA;
+ out_stridesB[i] = -stride_entryB;
+ out_stridesC[i] = -stride_entryC;
+ out_stridesD[i] = -stride_entryD;
+ }
+ /* Detect 0-size arrays here */
+ if (shape_entry == 0) {
+ *out_ndim = 1;
+ *out_dataA = dataA;
+ *out_dataB = dataB;
+ *out_dataC = dataC;
+ *out_dataD = dataD;
+ out_shape[0] = 0;
+ out_stridesA[0] = 0;
+ out_stridesB[0] = 0;
+ out_stridesC[0] = 0;
+ out_stridesD[0] = 0;
+ return 0;
+ }
+ }
+
+ /* Coalesce any dimensions where possible */
+ i = 0;
+ for (j = 1; j < ndim; ++j) {
+ if (out_shape[i] == 1) {
+ /* Drop axis i */
+ out_shape[i] = out_shape[j];
+ out_stridesA[i] = out_stridesA[j];
+ out_stridesB[i] = out_stridesB[j];
+ out_stridesC[i] = out_stridesC[j];
+ out_stridesD[i] = out_stridesD[j];
+ }
+ else if (out_shape[j] == 1) {
+ /* Drop axis j */
+ }
+ else if (out_stridesA[i] * out_shape[i] == out_stridesA[j] &&
+ out_stridesB[i] * out_shape[i] == out_stridesB[j] &&
+ out_stridesC[i] * out_shape[i] == out_stridesC[j] &&
+ out_stridesD[i] * out_shape[i] == out_stridesD[j]) {
+ /* Coalesce axes i and j */
+ out_shape[i] *= out_shape[j];
+ }
+ else {
+ /* Can't coalesce, go to next i */
+ ++i;
+ out_shape[i] = out_shape[j];
+ out_stridesA[i] = out_stridesA[j];
+ out_stridesB[i] = out_stridesB[j];
+ out_stridesC[i] = out_stridesC[j];
+ out_stridesD[i] = out_stridesD[j];
+ }
+ }
+ ndim = i+1;
+
+ *out_dataA = dataA;
+ *out_dataB = dataB;
+ *out_dataC = dataC;
+ *out_dataD = dataD;
+ *out_ndim = ndim;
+ return 0;
+}
+
/*
* Casts the elements from one n-dimensional array to another n-dimensional
* array with identical shape but possibly different strides and dtypes.
diff --git a/numpy/core/src/multiarray/mapping.c b/numpy/core/src/multiarray/mapping.c
index 65ad859b1..dc1dd66f4 100644
--- a/numpy/core/src/multiarray/mapping.c
+++ b/numpy/core/src/multiarray/mapping.c
@@ -706,7 +706,11 @@ array_boolean_subscript(PyArrayObject *self,
return NULL;
}
- /* See the Boolean Indexing section of the missing data NEP */
+ /*
+ * See the Boolean Indexing section of the missing data NEP.
+ *
+ * TODO: Add 'wheremask' as a parameter to ContainsNA.
+ */
if (PyArray_ContainsNA(bmask)) {
PyErr_SetString(PyExc_ValueError,
"The boolean mask indexing array "
diff --git a/numpy/core/src/multiarray/na_mask.c b/numpy/core/src/multiarray/na_mask.c
index f9ffcba3a..6d7677d5e 100644
--- a/numpy/core/src/multiarray/na_mask.c
+++ b/numpy/core/src/multiarray/na_mask.c
@@ -348,7 +348,7 @@ PyArray_AllocateMaskNA(PyArrayObject *arr,
* Returns -1 on failure, 0 on success.
*/
NPY_NO_EXPORT int
-PyArray_AssignNA(PyArrayObject *arr, NpyNA *na)
+PyArray_AssignNA(PyArrayObject *arr, PyArrayObject *wheremask, NpyNA *na)
{
NpyNA_fields *fna = (NpyNA_fields *)na;
char maskvalue;
@@ -376,7 +376,7 @@ PyArray_AssignNA(PyArrayObject *arr, NpyNA *na)
maskvalue = (char)NpyMaskValue_Create(0, fna->payload);
}
- return PyArray_AssignMaskNA(arr, NULL, maskvalue);
+ return PyArray_AssignMaskNA(arr, wheremask, maskvalue);
}
/*
diff --git a/numpy/core/src/multiarray/na_mask.h b/numpy/core/src/multiarray/na_mask.h
index fc6c40730..920b32dfa 100644
--- a/numpy/core/src/multiarray/na_mask.h
+++ b/numpy/core/src/multiarray/na_mask.h
@@ -6,10 +6,13 @@
/*
* Assigns the given NA value to all the elements in the array.
*
+ * If 'wheremask' isn't NULL, it specifies which elements to assign
+ * NA to.
+ *
* Returns -1 on failure, 0 on success.
*/
NPY_NO_EXPORT int
-PyArray_AssignNA(PyArrayObject *arr, NpyNA *na);
+PyArray_AssignNA(PyArrayObject *arr, PyArrayObject *wheremask, NpyNA *na);
/*
* A ufunc-like function, which returns a boolean or an array
diff --git a/numpy/core/src/private/lowlevel_strided_loops.h b/numpy/core/src/private/lowlevel_strided_loops.h
index 98004892b..6ffa36ee9 100644
--- a/numpy/core/src/private/lowlevel_strided_loops.h
+++ b/numpy/core/src/private/lowlevel_strided_loops.h
@@ -410,6 +410,34 @@ PyArray_PrepareThreeRawArrayIter(int ndim, npy_intp *shape,
char **out_dataB, npy_intp *out_stridesB,
char **out_dataC, npy_intp *out_stridesC);
+/*
+ * The same as PyArray_PrepareOneRawArrayIter, but for four
+ * operands instead of one. Any broadcasting of the four operands
+ * should have already been done before calling this function,
+ * as the ndim and shape is only specified once for all operands.
+ *
+ * Only the strides of the first operand are used to reorder
+ * the dimensions, no attempt to consider all the strides together
+ * is made, as is done in the NpyIter object.
+ *
+ * You can use this together with NPY_RAW_ITER_START and
+ * NPY_RAW_ITER_FOUR_NEXT to handle the looping boilerplate of everything
+ * but the innermost loop (which is for idim == 0).
+ *
+ * Returns 0 on success, -1 on failure.
+ */
+NPY_NO_EXPORT int
+PyArray_PrepareFourRawArrayIter(int ndim, npy_intp *shape,
+ char *dataA, npy_intp *stridesA,
+ char *dataB, npy_intp *stridesB,
+ char *dataC, npy_intp *stridesC,
+ char *dataD, npy_intp *stridesD,
+ int *out_ndim, npy_intp *out_shape,
+ char **out_dataA, npy_intp *out_stridesA,
+ char **out_dataB, npy_intp *out_stridesB,
+ char **out_dataC, npy_intp *out_stridesC,
+ char **out_dataD, npy_intp *out_stridesD);
+
/* Start raw iteration */
#define NPY_RAW_ITER_START(idim, ndim, coord, shape) \
memset((coord), 0, (ndim) * sizeof(coord[0])); \
@@ -467,6 +495,30 @@ PyArray_PrepareThreeRawArrayIter(int ndim, npy_intp *shape,
} \
} while ((idim) < (ndim))
+/* Increment to the next n-dimensional coordinate for four raw arrays */
+#define NPY_RAW_ITER_FOUR_NEXT(idim, ndim, coord, shape, \
+ dataA, stridesA, \
+ dataB, stridesB, \
+ dataC, stridesC, \
+ dataD, stridesD) \
+ for ((idim) = 1; (idim) < (ndim); ++(idim)) { \
+ if (++(coord)[idim] == (shape)[idim]) { \
+ (coord)[idim] = 0; \
+ (dataA) -= ((shape)[idim] - 1) * (stridesA)[idim]; \
+ (dataB) -= ((shape)[idim] - 1) * (stridesB)[idim]; \
+ (dataC) -= ((shape)[idim] - 1) * (stridesC)[idim]; \
+ (dataD) -= ((shape)[idim] - 1) * (stridesD)[idim]; \
+ } \
+ else { \
+ (dataA) += (stridesA)[idim]; \
+ (dataB) += (stridesB)[idim]; \
+ (dataC) += (stridesC)[idim]; \
+ (dataD) += (stridesD)[idim]; \
+ break; \
+ } \
+ } \
+ } while ((idim) < (ndim))
+
/*
* TRIVIAL ITERATION