summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.gitignore4
-rw-r--r--doc/release/1.9.0-notes.rst12
-rw-r--r--numpy/core/fromnumeric.py2
-rw-r--r--numpy/core/include/numpy/npy_3kcompat.h70
-rw-r--r--numpy/core/numerictypes.py7
-rw-r--r--numpy/core/src/multiarray/common.c14
-rw-r--r--numpy/core/src/multiarray/item_selection.c40
-rw-r--r--numpy/core/src/multiarray/lowlevel_strided_loops.c.src11
-rw-r--r--numpy/core/src/multiarray/multiarraymodule.c26
-rw-r--r--numpy/core/src/multiarray/scalartypes.c.src31
-rw-r--r--numpy/core/tests/test_abc.py45
-rw-r--r--numpy/core/tests/test_datetime.py2
-rw-r--r--numpy/core/tests/test_indexing.py22
-rw-r--r--numpy/lib/function_base.py90
-rw-r--r--numpy/lib/tests/test_function_base.py63
-rw-r--r--numpy/lib/tests/test_twodim_base.py44
-rw-r--r--numpy/lib/twodim_base.py62
17 files changed, 418 insertions, 127 deletions
diff --git a/.gitignore b/.gitignore
index a98742ffe..78a960f59 100644
--- a/.gitignore
+++ b/.gitignore
@@ -112,11 +112,15 @@ numpy/core/src/multiarray/scalartypes.c
numpy/core/src/npymath/ieee754.c
numpy/core/src/npymath/npy_math.c
numpy/core/src/npymath/npy_math_complex.c
+numpy/core/src/npysort/binsearch.c
numpy/core/src/npysort/heapsort.c
numpy/core/src/npysort/mergesort.c
numpy/core/src/npysort/quicksort.c
numpy/core/src/npysort/selection.c
numpy/core/src/npysort/sort.c
+numpy/core/src/private/npy_binsearch.h
+numpy/core/src/private/npy_partition.h
+numpy/core/src/private/scalarmathmodule.h
numpy/core/src/scalarmathmodule.c
numpy/core/src/umath/funcs.inc
numpy/core/src/umath/loops.c
diff --git a/doc/release/1.9.0-notes.rst b/doc/release/1.9.0-notes.rst
index f1d553952..aa0431c6e 100644
--- a/doc/release/1.9.0-notes.rst
+++ b/doc/release/1.9.0-notes.rst
@@ -105,6 +105,12 @@ for ``tostring`` which exports arrays as ``bytes``. This is more consistent
in Python 3 where ``str`` and ``bytes`` are not the same.
+compatibility to python ``numbers`` module
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+All numerical numpy types are now registered with the type hierarchy in
+the python ``numbers`` module.
+
+
Improvements
============
@@ -274,6 +280,12 @@ Non-integer scalars for sequence repetition
Using non-integer numpy scalars to repeat python sequences is deprecated.
For example ``np.float_(2) * [1]`` will be an error in the future.
+``select`` input deprecations
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+The integer and empty input to ``select`` is deprecated. In the future only
+boolean arrays will be valid conditions and an empty ``condlist`` will be
+considered an input error instead of returning the default.
+
C-API
~~~~~
diff --git a/numpy/core/fromnumeric.py b/numpy/core/fromnumeric.py
index 728c95294..3de81305d 100644
--- a/numpy/core/fromnumeric.py
+++ b/numpy/core/fromnumeric.py
@@ -584,7 +584,7 @@ def partition(a, kth, axis=-1, kind='introselect', order=None):
The various selection algorithms are characterized by their average speed,
worst case performance, work space size, and whether they are stable. A
stable sort keeps items with the same key in the same relative order. The
- three available algorithms have the following properties:
+ available algorithms have the following properties:
================= ======= ============= ============ =======
kind speed worst case work space stable
diff --git a/numpy/core/include/numpy/npy_3kcompat.h b/numpy/core/include/numpy/npy_3kcompat.h
index 36b1def4b..fec95779a 100644
--- a/numpy/core/include/numpy/npy_3kcompat.h
+++ b/numpy/core/include/numpy/npy_3kcompat.h
@@ -163,8 +163,10 @@ npy_PyFile_Dup2(PyObject *file, char *mode, npy_off_t *orig_pos)
return NULL;
}
- /* The handle needs to be dup'd because we have to call fclose
- at the end */
+ /*
+ * The handle needs to be dup'd because we have to call fclose
+ * at the end
+ */
os = PyImport_ImportModule("os");
if (os == NULL) {
return NULL;
@@ -231,8 +233,10 @@ npy_PyFile_DupClose2(PyObject *file, FILE* handle, npy_off_t orig_pos)
/* Close the FILE* handle */
fclose(handle);
- /* Restore original file handle position, in order to not confuse
- Python-side data structures */
+ /*
+ * Restore original file handle position, in order to not confuse
+ * Python-side data structures
+ */
fd = PyObject_AsFileDescriptor(file);
if (fd == -1) {
return -1;
@@ -270,24 +274,6 @@ npy_PyFile_Check(PyObject *file)
/*
* DEPRECATED DO NOT USE
- * use npy_PyFile_Dup2 instead
- * this function will mess ups python3 internal file object buffering
- * Get a FILE* handle to the file represented by the Python object
- */
-static NPY_INLINE FILE*
-npy_PyFile_Dup(PyObject *file, char *mode)
-{
- npy_off_t orig;
- if (DEPRECATE("npy_PyFile_Dup is deprecated, use "
- "npy_PyFile_Dup2") < 0) {
- return NULL;
- }
-
- return npy_PyFile_Dup2(file, mode, &orig);
-}
-
-/*
- * DEPRECATED DO NOT USE
* use npy_PyFile_DupClose2 instead
* this function will mess ups python3 internal file object buffering
* Close the dup-ed file handle, and seek the Python one to the current position
@@ -311,16 +297,46 @@ npy_PyFile_DupClose(PyObject *file, FILE* handle)
#else
-/* DEPRECATED DO NOT USE */
-#define npy_PyFile_Dup(file, mode) PyFile_AsFile(file)
-#define npy_PyFile_DupClose(file, handle) (0)
+/* DEPRECATED, DO NOT USE */
+#define npy_PyFile_DupClose(f, h, p) npy_PyFile_DupClose2((f), (h), (p))
+
/* use these */
-#define npy_PyFile_Dup2(file, mode, orig_pos_p) PyFile_AsFile(file)
-#define npy_PyFile_DupClose2(file, handle, orig_pos) (0)
+static NPY_INLINE FILE *
+npy_PyFile_Dup2(PyObject *file,
+ const char *NPY_UNUSED(mode), npy_off_t *NPY_UNUSED(orig_pos))
+{
+ return PyFile_AsFile(file);
+}
+
+static NPY_INLINE int
+npy_PyFile_DupClose2(PyObject *NPY_UNUSED(file), FILE* NPY_UNUSED(handle),
+ npy_off_t NPY_UNUSED(orig_pos))
+{
+ return 0;
+}
+
#define npy_PyFile_Check PyFile_Check
#endif
+/*
+ * DEPRECATED, DO NOT USE
+ * Use npy_PyFile_Dup2 instead.
+ * This function will mess up python3 internal file object buffering.
+ * Get a FILE* handle to the file represented by the Python object.
+ */
+static NPY_INLINE FILE*
+npy_PyFile_Dup(PyObject *file, char *mode)
+{
+ npy_off_t orig;
+ if (DEPRECATE("npy_PyFile_Dup is deprecated, use "
+ "npy_PyFile_Dup2") < 0) {
+ return NULL;
+ }
+
+ return npy_PyFile_Dup2(file, mode, &orig);
+}
+
static NPY_INLINE PyObject*
npy_PyFile_OpenFile(PyObject *filename, const char *mode)
{
diff --git a/numpy/core/numerictypes.py b/numpy/core/numerictypes.py
index 8dc4ca75e..1545bc734 100644
--- a/numpy/core/numerictypes.py
+++ b/numpy/core/numerictypes.py
@@ -98,6 +98,7 @@ from numpy.core.multiarray import (
import types as _types
import sys
from numpy.compat import bytes, long
+import numbers
# we don't export these for import *, but we do want them accessible
# as numerictypes.bool, etc.
@@ -960,6 +961,12 @@ def _can_coerce_all(dtypelist, start=0):
thisind += 1
return None
+def _register_types():
+ numbers.Integral.register(integer)
+ numbers.Complex.register(inexact)
+ numbers.Real.register(floating)
+_register_types()
+
def find_common_type(array_types, scalar_types):
"""
Determine common type following standard coercion rules.
diff --git a/numpy/core/src/multiarray/common.c b/numpy/core/src/multiarray/common.c
index 0e8a21394..1729d50e2 100644
--- a/numpy/core/src/multiarray/common.c
+++ b/numpy/core/src/multiarray/common.c
@@ -650,14 +650,12 @@ _IsAligned(PyArrayObject *ap)
{
unsigned int i;
npy_uintp aligned;
- const unsigned int alignment = PyArray_DESCR(ap)->alignment;
-
- /* The special casing for STRING and VOID types was removed
- * in accordance with http://projects.scipy.org/numpy/ticket/1227
- * It used to be that IsAligned always returned True for these
- * types, which is indeed the case when they are created using
- * PyArray_DescrConverter(), but not necessarily when using
- * PyArray_DescrAlignConverter(). */
+ npy_uintp alignment = PyArray_DESCR(ap)->alignment;
+
+ /* alignment 1 types should have a efficient alignment for copy loops */
+ if (PyArray_ISFLEXIBLE(ap) || PyArray_ISSTRING(ap)) {
+ alignment = 16;
+ }
if (alignment == 1) {
return 1;
diff --git a/numpy/core/src/multiarray/item_selection.c b/numpy/core/src/multiarray/item_selection.c
index e4126109e..d6e0980c6 100644
--- a/numpy/core/src/multiarray/item_selection.c
+++ b/numpy/core/src/multiarray/item_selection.c
@@ -2235,41 +2235,51 @@ PyArray_Compress(PyArrayObject *self, PyObject *condition, int axis,
}
/*
- * count number of nonzero bytes in 16 byte block
+ * count number of nonzero bytes in 48 byte block
* w must be aligned to 8 bytes
*
* even though it uses 64 bit types its faster than the bytewise sum on 32 bit
* but a 32 bit type version would make it even faster on these platforms
*/
-static NPY_INLINE int
-count_nonzero_bytes_128(const npy_uint64 * w)
+static NPY_INLINE npy_intp
+count_nonzero_bytes_384(const npy_uint64 * w)
{
const npy_uint64 w1 = w[0];
const npy_uint64 w2 = w[1];
+ const npy_uint64 w3 = w[2];
+ const npy_uint64 w4 = w[3];
+ const npy_uint64 w5 = w[4];
+ const npy_uint64 w6 = w[5];
+ npy_intp r;
+
+ /*
+ * last part of sideways add popcount, first three bisections can be
+ * skipped as we are dealing with bytes.
+ * multiplication equivalent to (x + (x>>8) + (x>>16) + (x>>24)) & 0xFF
+ * multiplication overflow well defined for unsigned types.
+ * w1 + w2 guaranteed to not overflow as we only have 0 and 1 data.
+ */
+ r = ((w1 + w2 + w3 + w4 + w5 + w6) * 0x0101010101010101ULL) >> 56ULL;
/*
* bytes not exclusively 0 or 1, sum them individually.
* should only happen if one does weird stuff with views or external
* buffers.
+ * Doing this after the optimistic computation allows saving registers and
+ * better pipelining
*/
- if (NPY_UNLIKELY(((w1 | w2) & 0xFEFEFEFEFEFEFEFEULL) != 0)) {
+ if (NPY_UNLIKELY(
+ ((w1 | w2 | w3 | w4 | w5 | w6) & 0xFEFEFEFEFEFEFEFEULL) != 0)) {
/* reload from pointer to avoid a unnecessary stack spill with gcc */
const char * c = (const char *)w;
npy_uintp i, count = 0;
- for (i = 0; i < 16; i++) {
+ for (i = 0; i < 48; i++) {
count += (c[i] != 0);
}
return count;
}
- /*
- * last part of sideways add popcount, first three bisections can be
- * skipped as we are dealing with bytes.
- * multiplication equivalent to (x + (x>>8) + (x>>16) + (x>>24)) & 0xFF
- * multiplication overflow well defined for unsigned types.
- * w1 + w2 guaranteed to not overflow as we only have 0 and 1 data.
- */
- return ((w1 + w2) * 0x0101010101010101ULL) >> 56ULL;
+ return r;
}
/*
@@ -2311,9 +2321,9 @@ count_boolean_trues(int ndim, char *data, npy_intp *ashape, npy_intp *astrides)
const char *e = data + shape[0];
if (NPY_CPU_HAVE_UNALIGNED_ACCESS ||
npy_is_aligned(d, sizeof(npy_uint64))) {
- npy_uintp stride = 2 * sizeof(npy_uint64);
+ npy_uintp stride = 6 * sizeof(npy_uint64);
for (; d < e - (shape[0] % stride); d += stride) {
- count += count_nonzero_bytes_128((const npy_uint64 *)d);
+ count += count_nonzero_bytes_384((const npy_uint64 *)d);
}
}
for (; d < e; ++d) {
diff --git a/numpy/core/src/multiarray/lowlevel_strided_loops.c.src b/numpy/core/src/multiarray/lowlevel_strided_loops.c.src
index e3d0c4b88..02920014b 100644
--- a/numpy/core/src/multiarray/lowlevel_strided_loops.c.src
+++ b/numpy/core/src/multiarray/lowlevel_strided_loops.c.src
@@ -1429,6 +1429,7 @@ mapiter_trivial_@name@(PyArrayObject *self, PyArrayObject *ind,
default:
#endif
while (itersize--) {
+ assert(npy_is_aligned(ind_ptr, _ALIGN(npy_intp)));
indval = *((npy_intp*)ind_ptr);
#if @isget@
if (check_and_adjust_index(&indval, fancy_dim, 1, _save) < 0 ) {
@@ -1443,6 +1444,8 @@ mapiter_trivial_@name@(PyArrayObject *self, PyArrayObject *ind,
#if @isget@
#if @elsize@
+ assert(npy_is_aligned(result_ptr, _ALIGN(@copytype@)));
+ assert(npy_is_aligned(self_ptr, _ALIGN(@copytype@)));
*(@copytype@ *)result_ptr = *(@copytype@ *)self_ptr;
#else
copyswap(result_ptr, self_ptr, 0, self);
@@ -1450,6 +1453,8 @@ mapiter_trivial_@name@(PyArrayObject *self, PyArrayObject *ind,
#else /* !@isget@ */
#if @elsize@
+ assert(npy_is_aligned(result_ptr, _ALIGN(@copytype@)));
+ assert(npy_is_aligned(self_ptr, _ALIGN(@copytype@)));
*(@copytype@ *)self_ptr = *(@copytype@ *)result_ptr;
#else
copyswap(self_ptr, result_ptr, 0, self);
@@ -1567,6 +1572,8 @@ mapiter_@name@(PyArrayMapIterObject *mit)
while (count--) {
self_ptr = baseoffset;
for (i=0; i < @numiter@; i++) {
+ assert(npy_is_aligned(outer_ptrs[i],
+ _ALIGN(npy_intp)));
indval = *((npy_intp*)outer_ptrs[i]);
#if @isget@ && @one_iter@
@@ -1587,12 +1594,16 @@ mapiter_@name@(PyArrayMapIterObject *mit)
#if @isget@
#if @elsize@
+ assert(npy_is_aligned(outer_ptrs[i], _ALIGN(@copytype@)));
+ assert(npy_is_aligned(self_ptr, _ALIGN(@copytype@)));
*(@copytype@ *)(outer_ptrs[i]) = *(@copytype@ *)self_ptr;
#else
copyswap(outer_ptrs[i], self_ptr, 0, array);
#endif
#else /* !@isget@ */
#if @elsize@
+ assert(npy_is_aligned(outer_ptrs[i], _ALIGN(@copytype@)));
+ assert(npy_is_aligned(self_ptr, _ALIGN(@copytype@)));
*(@copytype@ *)self_ptr = *(@copytype@ *)(outer_ptrs[i]);
#else
copyswap(self_ptr, outer_ptrs[i], 0, array);
diff --git a/numpy/core/src/multiarray/multiarraymodule.c b/numpy/core/src/multiarray/multiarraymodule.c
index 6f4e1bf09..0de7dff9d 100644
--- a/numpy/core/src/multiarray/multiarraymodule.c
+++ b/numpy/core/src/multiarray/multiarraymodule.c
@@ -2992,8 +2992,8 @@ static PyObject *
array_result_type(PyObject *NPY_UNUSED(dummy), PyObject *args)
{
npy_intp i, len, narr = 0, ndtypes = 0;
- PyArrayObject *arr[NPY_MAXARGS];
- PyArray_Descr *dtypes[NPY_MAXARGS];
+ PyArrayObject **arr = NULL;
+ PyArray_Descr **dtypes = NULL;
PyObject *ret = NULL;
len = PyTuple_GET_SIZE(args);
@@ -3003,25 +3003,21 @@ array_result_type(PyObject *NPY_UNUSED(dummy), PyObject *args)
goto finish;
}
+ arr = PyArray_malloc(2 * len * sizeof(void *));
+ if (arr == NULL) {
+ return PyErr_NoMemory();
+ }
+ dtypes = (PyArray_Descr**)&arr[len];
+
for (i = 0; i < len; ++i) {
PyObject *obj = PyTuple_GET_ITEM(args, i);
if (PyArray_Check(obj)) {
- if (narr == NPY_MAXARGS) {
- PyErr_SetString(PyExc_ValueError,
- "too many arguments");
- goto finish;
- }
Py_INCREF(obj);
arr[narr] = (PyArrayObject *)obj;
++narr;
}
else if (PyArray_IsScalar(obj, Generic) ||
PyArray_IsPythonNumber(obj)) {
- if (narr == NPY_MAXARGS) {
- PyErr_SetString(PyExc_ValueError,
- "too many arguments");
- goto finish;
- }
arr[narr] = (PyArrayObject *)PyArray_FromAny(obj,
NULL, 0, 0, 0, NULL);
if (arr[narr] == NULL) {
@@ -3030,11 +3026,6 @@ array_result_type(PyObject *NPY_UNUSED(dummy), PyObject *args)
++narr;
}
else {
- if (ndtypes == NPY_MAXARGS) {
- PyErr_SetString(PyExc_ValueError,
- "too many arguments");
- goto finish;
- }
if (!PyArray_DescrConverter(obj, &dtypes[ndtypes])) {
goto finish;
}
@@ -3051,6 +3042,7 @@ finish:
for (i = 0; i < ndtypes; ++i) {
Py_DECREF(dtypes[i]);
}
+ PyArray_free(arr);
return ret;
}
diff --git a/numpy/core/src/multiarray/scalartypes.c.src b/numpy/core/src/multiarray/scalartypes.c.src
index afae70fd5..a1c95995a 100644
--- a/numpy/core/src/multiarray/scalartypes.c.src
+++ b/numpy/core/src/multiarray/scalartypes.c.src
@@ -1121,6 +1121,21 @@ voidtype_dtypedescr_get(PyVoidScalarObject *self)
static PyObject *
+inttype_numerator_get(PyObject *self)
+{
+ Py_INCREF(self);
+ return self;
+}
+
+
+static PyObject *
+inttype_denominator_get(PyObject *self)
+{
+ return PyInt_FromLong(1);
+}
+
+
+static PyObject *
gentype_data_get(PyObject *self)
{
#if defined(NPY_PY3K)
@@ -2064,6 +2079,20 @@ static PyMethodDef voidtype_methods[] = {
{NULL, NULL, 0, NULL}
};
+static PyGetSetDef inttype_getsets[] = {
+ {"numerator",
+ (getter)inttype_numerator_get,
+ (setter)0,
+ "numerator of value (the value itself)",
+ NULL},
+ {"denominator",
+ (getter)inttype_denominator_get,
+ (setter)0,
+ "denominator of value (1)",
+ NULL},
+ {NULL, NULL, NULL, NULL, NULL}
+};
+
/**begin repeat
* #name = cfloat,clongdouble#
*/
@@ -4030,6 +4059,8 @@ initialize_numeric_types(void)
PyVoidArrType_Type.tp_as_mapping = &voidtype_as_mapping;
PyVoidArrType_Type.tp_as_sequence = &voidtype_as_sequence;
+ PyIntegerArrType_Type.tp_getset = inttype_getsets;
+
/**begin repeat
* #NAME= Number, Integer, SignedInteger, UnsignedInteger, Inexact,
* Floating, ComplexFloating, Flexible, Character#
diff --git a/numpy/core/tests/test_abc.py b/numpy/core/tests/test_abc.py
new file mode 100644
index 000000000..54edd7e90
--- /dev/null
+++ b/numpy/core/tests/test_abc.py
@@ -0,0 +1,45 @@
+from __future__ import division, absolute_import, print_function
+
+import numpy as np
+from numpy.testing import TestCase, assert_
+
+import numbers
+from numpy.core.numerictypes import sctypes
+
+class ABC(TestCase):
+ def test_floats(self):
+ for t in sctypes['float']:
+ assert_(isinstance(t(), numbers.Real),
+ "{0} is not instance of Real".format(t.__name__))
+ assert_(issubclass(t, numbers.Real),
+ "{0} is not subclass of Real".format(t.__name__))
+ assert_(not isinstance(t(), numbers.Rational),
+ "{0} is instance of Rational".format(t.__name__))
+ assert_(not issubclass(t, numbers.Rational),
+ "{0} is subclass of Rational".format(t.__name__))
+
+ def test_complex(self):
+ for t in sctypes['complex']:
+ assert_(isinstance(t(), numbers.Complex),
+ "{0} is not instance of Complex".format(t.__name__))
+ assert_(issubclass(t, numbers.Complex),
+ "{0} is not subclass of Complex".format(t.__name__))
+ assert_(not isinstance(t(), numbers.Real),
+ "{0} is instance of Real".format(t.__name__))
+ assert_(not issubclass(t, numbers.Real),
+ "{0} is subclass of Real".format(t.__name__))
+
+ def test_int(self):
+ for t in sctypes['int']:
+ assert_(isinstance(t(), numbers.Integral),
+ "{0} is not instance of Integral".format(t.__name__))
+ assert_(issubclass(t, numbers.Integral),
+ "{0} is not subclass of Integral".format(t.__name__))
+
+ def test_uint(self):
+ for t in sctypes['uint']:
+ assert_(isinstance(t(), numbers.Integral),
+ "{0} is not instance of Integral".format(t.__name__))
+ assert_(issubclass(t, numbers.Integral),
+ "{0} is not subclass of Integral".format(t.__name__))
+
diff --git a/numpy/core/tests/test_datetime.py b/numpy/core/tests/test_datetime.py
index bfc7237a4..bf0ba6807 100644
--- a/numpy/core/tests/test_datetime.py
+++ b/numpy/core/tests/test_datetime.py
@@ -595,7 +595,7 @@ class TestDateTime(TestCase):
def test_cast_overflow(self):
# gh-4486
def cast():
- numpy.datetime64("1970-01-01 00:00:00.000000000000000").astype("<M8[D]")
+ numpy.datetime64("1971-01-01 00:00:00.000000000000000").astype("<M8[D]")
assert_raises(OverflowError, cast)
def cast2():
numpy.datetime64("2014").astype("<M8[fs]")
diff --git a/numpy/core/tests/test_indexing.py b/numpy/core/tests/test_indexing.py
index 736210722..f09940af7 100644
--- a/numpy/core/tests/test_indexing.py
+++ b/numpy/core/tests/test_indexing.py
@@ -336,6 +336,28 @@ class TestIndexing(TestCase):
assert_equal(sys.getrefcount(np.dtype(np.intp)), refcount)
+ def test_unaligned(self):
+ v = (np.zeros(64, dtype=np.int8) + ord('a'))[1:-7]
+ d = v.view(np.dtype("S8"))
+ # unaligned source
+ x = (np.zeros(16, dtype=np.int8) + ord('a'))[1:-7]
+ x = x.view(np.dtype("S8"))
+ x[...] = np.array("b" * 8, dtype="S")
+ b = np.arange(d.size)
+ #trivial
+ assert_equal(d[b], d)
+ d[b] = x
+ # nontrivial
+ # unaligned index array
+ b = np.zeros(d.size + 1).view(np.int8)[1:-(np.intp(0).itemsize - 1)]
+ b = b.view(np.intp)[:d.size]
+ b[...] = np.arange(d.size)
+ assert_equal(d[b.astype(np.int16)], d)
+ d[b.astype(np.int16)] = x
+ # boolean
+ d[b % 2 == 0]
+ d[b % 2 == 0] = x[::2]
+
class TestFieldIndexing(TestCase):
def test_scalar_return_type(self):
diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py
index edce15776..df5876715 100644
--- a/numpy/lib/function_base.py
+++ b/numpy/lib/function_base.py
@@ -771,29 +771,68 @@ def select(condlist, choicelist, default=0):
array([ 0, 1, 2, 0, 0, 0, 36, 49, 64, 81])
"""
- n = len(condlist)
- n2 = len(choicelist)
- if n2 != n:
+ # Check the size of condlist and choicelist are the same, or abort.
+ if len(condlist) != len(choicelist):
raise ValueError(
- "list of cases must be same length as list of conditions")
- choicelist = [default] + choicelist
- S = 0
- pfac = 1
- for k in range(1, n+1):
- S += k * pfac * asarray(condlist[k-1])
- if k < n:
- pfac *= (1-asarray(condlist[k-1]))
- # handle special case of a 1-element condition but
- # a multi-element choice
- if type(S) in ScalarType or max(asarray(S).shape) == 1:
- pfac = asarray(1)
- for k in range(n2+1):
- pfac = pfac + asarray(choicelist[k])
- if type(S) in ScalarType:
- S = S*ones(asarray(pfac).shape, type(S))
- else:
- S = S*ones(asarray(pfac).shape, S.dtype)
- return choose(S, tuple(choicelist))
+ 'list of cases must be same length as list of conditions')
+
+ # Now that the dtype is known, handle the deprecated select([], []) case
+ if len(condlist) == 0:
+ warnings.warn("select with an empty condition list is not possible"
+ "and will be deprecated",
+ DeprecationWarning)
+ return np.asarray(default)[()]
+
+ choicelist = [np.asarray(choice) for choice in choicelist]
+ choicelist.append(np.asarray(default))
+
+ # need to get the result type before broadcasting for correct scalar
+ # behaviour
+ dtype = np.result_type(*choicelist)
+
+ # Convert conditions to arrays and broadcast conditions and choices
+ # as the shape is needed for the result. Doing it seperatly optimizes
+ # for example when all choices are scalars.
+ condlist = np.broadcast_arrays(*condlist)
+ choicelist = np.broadcast_arrays(*choicelist)
+
+ # If cond array is not an ndarray in boolean format or scalar bool, abort.
+ deprecated_ints = False
+ for i in range(len(condlist)):
+ cond = condlist[i]
+ if cond.dtype.type is not np.bool_:
+ if np.issubdtype(cond.dtype, np.integer):
+ # A previous implementation accepted int ndarrays accidentally.
+ # Supported here deliberately, but deprecated.
+ condlist[i] = condlist[i].astype(bool)
+ deprecated_ints = True
+ else:
+ raise ValueError(
+ 'invalid entry in choicelist: should be boolean ndarray')
+
+ if deprecated_ints:
+ msg = "select condlists containing integer ndarrays is deprecated " \
+ "and will be removed in the future. Use `.astype(bool)` to " \
+ "convert to bools."
+ warnings.warn(msg, DeprecationWarning)
+
+ if choicelist[0].ndim == 0:
+ # This may be common, so avoid the call.
+ result_shape = condlist[0].shape
+ else:
+ result_shape = np.broadcast_arrays(condlist[0], choicelist[0])[0].shape
+
+ result = np.full(result_shape, choicelist[-1], dtype)
+
+ # Use np.copyto to burn each choicelist array onto result, using the
+ # corresponding condlist as a boolean mask. This is done in reverse
+ # order since the first choice should take precedence.
+ choicelist = choicelist[-2::-1]
+ condlist = condlist[::-1]
+ for choice, cond in zip(choicelist, condlist):
+ np.copyto(result, choice, where=cond)
+
+ return result
def copy(a, order='K'):
@@ -3240,7 +3279,7 @@ def meshgrid(*xi, **kwargs):
Make N-D coordinate arrays for vectorized evaluations of
N-D scalar/vector fields over N-D grids, given
one-dimensional coordinate arrays x1, x2,..., xn.
-
+
.. versionchanged:: 1.9
1-D and 0-D cases are allowed.
@@ -3291,9 +3330,8 @@ def meshgrid(*xi, **kwargs):
for i in range(nx):
for j in range(ny):
# treat xv[j,i], yv[j,i]
-
- In the 1-D and 0-D case, the indexing and sparse keywords have no
- effect.
+
+ In the 1-D and 0-D case, the indexing and sparse keywords have no effect.
See Also
--------
diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py
index 9a26ce5a3..399a5a308 100644
--- a/numpy/lib/tests/test_function_base.py
+++ b/numpy/lib/tests/test_function_base.py
@@ -150,6 +150,13 @@ class TestAverage(TestCase):
class TestSelect(TestCase):
+ choices = [np.array([1, 2, 3]),
+ np.array([4, 5, 6]),
+ np.array([7, 8, 9])]
+ conditions = [np.array([False, False, False]),
+ np.array([False, True, False]),
+ np.array([False, False, True])]
+
def _select(self, cond, values, default=0):
output = []
for m in range(len(cond)):
@@ -157,18 +164,62 @@ class TestSelect(TestCase):
return output
def test_basic(self):
- choices = [np.array([1, 2, 3]),
- np.array([4, 5, 6]),
- np.array([7, 8, 9])]
- conditions = [np.array([0, 0, 0]),
- np.array([0, 1, 0]),
- np.array([0, 0, 1])]
+ choices = self.choices
+ conditions = self.conditions
assert_array_equal(select(conditions, choices, default=15),
self._select(conditions, choices, default=15))
assert_equal(len(choices), 3)
assert_equal(len(conditions), 3)
+ def test_broadcasting(self):
+ conditions = [np.array(True), np.array([False, True, False])]
+ choices = [1, np.arange(12).reshape(4, 3)]
+ assert_array_equal(select(conditions, choices), np.ones((4, 3)))
+ # default can broadcast too:
+ assert_equal(select([True], [0], default=[0]).shape, (1,))
+
+ def test_return_dtype(self):
+ assert_equal(select(self.conditions, self.choices, 1j).dtype,
+ np.complex_)
+ # But the conditions need to be stronger then the scalar default
+ # if it is scalar.
+ choices = [choice.astype(np.int8) for choice in self.choices]
+ assert_equal(select(self.conditions, choices).dtype, np.int8)
+
+ d = np.array([1, 2, 3, np.nan, 5, 7])
+ m = np.isnan(d)
+ assert_equal(select([m], [d]), [0, 0, 0, np.nan, 0, 0])
+
+ def test_deprecated_empty(self):
+ with warnings.catch_warnings(record=True):
+ warnings.simplefilter("always")
+ assert_equal(select([], [], 3j), 3j)
+
+ with warnings.catch_warnings():
+ warnings.simplefilter("always")
+ assert_warns(DeprecationWarning, select, [], [])
+ warnings.simplefilter("error")
+ assert_raises(DeprecationWarning, select, [], [])
+
+ def test_non_bool_deprecation(self):
+ choices = self.choices
+ conditions = self.conditions[:]
+ with warnings.catch_warnings():
+ warnings.filterwarnings("always")
+ conditions[0] = conditions[0].astype(np.int_)
+ assert_warns(DeprecationWarning, select, conditions, choices)
+ conditions[0] = conditions[0].astype(np.uint8)
+ assert_warns(DeprecationWarning, select, conditions, choices)
+ warnings.filterwarnings("error")
+ assert_raises(DeprecationWarning, select, conditions, choices)
+
+ def test_many_arguments(self):
+ # This used to be limited by NPY_MAXARGS == 32
+ conditions = [np.array([False])] * 100
+ choices = [np.array([1])] * 100
+ select(conditions, choices)
+
class TestInsert(TestCase):
def test_basic(self):
diff --git a/numpy/lib/tests/test_twodim_base.py b/numpy/lib/tests/test_twodim_base.py
index 022c45bd0..9e81cfe4b 100644
--- a/numpy/lib/tests/test_twodim_base.py
+++ b/numpy/lib/tests/test_twodim_base.py
@@ -286,6 +286,7 @@ def test_tril_triu_ndim2():
yield assert_equal, b.dtype, a.dtype
yield assert_equal, c.dtype, a.dtype
+
def test_tril_triu_ndim3():
for dtype in np.typecodes['AllFloat'] + np.typecodes['AllInteger']:
a = np.array([
@@ -324,16 +325,21 @@ def test_mask_indices():
def test_tril_indices():
# indices without and with offset
il1 = tril_indices(4)
- il2 = tril_indices(4, 2)
+ il2 = tril_indices(4, k=2)
+ il3 = tril_indices(4, m=5)
+ il4 = tril_indices(4, k=2, m=5)
a = np.array([[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 15, 16]])
+ b = np.arange(1, 21).reshape(4, 5)
# indexing:
yield (assert_array_equal, a[il1],
array([1, 5, 6, 9, 10, 11, 13, 14, 15, 16]))
+ yield (assert_array_equal, b[il3],
+ array([1, 6, 7, 11, 12, 13, 16, 17, 18, 19]))
# And for assigning values:
a[il1] = -1
@@ -342,7 +348,12 @@ def test_tril_indices():
[-1, -1, 7, 8],
[-1, -1, -1, 12],
[-1, -1, -1, -1]]))
-
+ b[il3] = -1
+ yield (assert_array_equal, b,
+ array([[-1, 2, 3, 4, 5],
+ [-1, -1, 8, 9, 10],
+ [-1, -1, -1, 14, 15],
+ [-1, -1, -1, -1, 20]]))
# These cover almost the whole array (two diagonals right of the main one):
a[il2] = -10
yield (assert_array_equal, a,
@@ -350,21 +361,32 @@ def test_tril_indices():
[-10, -10, -10, -10],
[-10, -10, -10, -10],
[-10, -10, -10, -10]]))
+ b[il4] = -10
+ yield (assert_array_equal, b,
+ array([[-10, -10, -10, 4, 5],
+ [-10, -10, -10, -10, 10],
+ [-10, -10, -10, -10, -10],
+ [-10, -10, -10, -10, -10]]))
class TestTriuIndices(object):
def test_triu_indices(self):
iu1 = triu_indices(4)
- iu2 = triu_indices(4, 2)
+ iu2 = triu_indices(4, k=2)
+ iu3 = triu_indices(4, m=5)
+ iu4 = triu_indices(4, k=2, m=5)
a = np.array([[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 15, 16]])
+ b = np.arange(1, 21).reshape(4, 5)
# Both for indexing:
yield (assert_array_equal, a[iu1],
array([1, 2, 3, 4, 6, 7, 8, 11, 12, 16]))
+ yield (assert_array_equal, b[iu3],
+ array([1, 2, 3, 4, 5, 7, 8, 9, 10, 13, 14, 15, 19, 20]))
# And for assigning values:
a[iu1] = -1
@@ -373,6 +395,12 @@ class TestTriuIndices(object):
[5, -1, -1, -1],
[9, 10, -1, -1],
[13, 14, 15, -1]]))
+ b[iu3] = -1
+ yield (assert_array_equal, b,
+ array([[-1, -1, -1, -1, -1],
+ [ 6, -1, -1, -1, -1],
+ [11, 12, -1, -1, -1],
+ [16, 17, 18, -1, -1]]))
# These cover almost the whole array (two diagonals right of the
# main one):
@@ -382,20 +410,26 @@ class TestTriuIndices(object):
[5, -1, -1, -10],
[9, 10, -1, -1],
[13, 14, 15, -1]]))
+ b[iu4] = -10
+ yield (assert_array_equal, b,
+ array([[-1, -1, -10, -10, -10],
+ [6, -1, -1, -10, -10],
+ [11, 12, -1, -1, -10],
+ [16, 17, 18, -1, -1]]))
class TestTrilIndicesFrom(object):
def test_exceptions(self):
assert_raises(ValueError, tril_indices_from, np.ones((2,)))
assert_raises(ValueError, tril_indices_from, np.ones((2, 2, 2)))
- assert_raises(ValueError, tril_indices_from, np.ones((2, 3)))
+ # assert_raises(ValueError, tril_indices_from, np.ones((2, 3)))
class TestTriuIndicesFrom(object):
def test_exceptions(self):
assert_raises(ValueError, triu_indices_from, np.ones((2,)))
assert_raises(ValueError, triu_indices_from, np.ones((2, 2, 2)))
- assert_raises(ValueError, triu_indices_from, np.ones((2, 3)))
+ # assert_raises(ValueError, triu_indices_from, np.ones((2, 3)))
class TestVander(object):
diff --git a/numpy/lib/twodim_base.py b/numpy/lib/twodim_base.py
index d168e0fca..5a0c0e7ee 100644
--- a/numpy/lib/twodim_base.py
+++ b/numpy/lib/twodim_base.py
@@ -11,10 +11,11 @@ __all__ = ['diag', 'diagflat', 'eye', 'fliplr', 'flipud', 'rot90', 'tri',
from numpy.core.numeric import (
asanyarray, subtract, arange, zeros, greater_equal, multiply, ones,
- asarray, where,
+ asarray, where, dtype as np_dtype, less
)
+
def fliplr(m):
"""
Flip array in the left/right direction.
@@ -372,6 +373,7 @@ def tri(N, M=None, k=0, dtype=float):
dtype : dtype, optional
Data type of the returned array. The default is float.
+
Returns
-------
tri : ndarray of shape (N, M)
@@ -393,8 +395,14 @@ def tri(N, M=None, k=0, dtype=float):
"""
if M is None:
M = N
- m = greater_equal(subtract.outer(arange(N), arange(M)), -k)
- return m.astype(dtype)
+
+ m = greater_equal.outer(arange(N), arange(-k, M-k))
+
+ # Avoid making a copy if the requested type is already bool
+ if np_dtype(dtype) != np_dtype(bool):
+ m = m.astype(dtype)
+
+ return m
def tril(m, k=0):
@@ -430,8 +438,7 @@ def tril(m, k=0):
"""
m = asanyarray(m)
- out = multiply(tri(m.shape[-2], m.shape[-1], k=k, dtype=m.dtype), m)
- return out
+ return multiply(tri(*m.shape[-2:], k=k, dtype=bool), m, dtype=m.dtype)
def triu(m, k=0):
@@ -457,8 +464,7 @@ def triu(m, k=0):
"""
m = asanyarray(m)
- out = multiply((1 - tri(m.shape[-2], m.shape[-1], k - 1, dtype=m.dtype)), m)
- return out
+ return multiply(~tri(*m.shape[-2:], k=k-1, dtype=bool), m, dtype=m.dtype)
# Originally borrowed from John Hunter and matplotlib
@@ -757,17 +763,24 @@ def mask_indices(n, mask_func, k=0):
return where(a != 0)
-def tril_indices(n, k=0):
+def tril_indices(n, k=0, m=None):
"""
- Return the indices for the lower-triangle of an (n, n) array.
+ Return the indices for the lower-triangle of an (n, m) array.
Parameters
----------
n : int
- The row dimension of the square arrays for which the returned
+ The row dimension of the arrays for which the returned
indices will be valid.
k : int, optional
Diagonal offset (see `tril` for details).
+ m : int, optional
+ .. versionadded:: 1.9.0
+
+ The column dimension of the arrays for which the returned
+ arrays will be valid.
+ By default `m` is taken equal to `n`.
+
Returns
-------
@@ -827,7 +840,7 @@ def tril_indices(n, k=0):
[-10, -10, -10, -10]])
"""
- return mask_indices(n, tril, k)
+ return where(tri(n, m, k=k, dtype=bool))
def tril_indices_from(arr, k=0):
@@ -853,14 +866,14 @@ def tril_indices_from(arr, k=0):
.. versionadded:: 1.4.0
"""
- if not (arr.ndim == 2 and arr.shape[0] == arr.shape[1]):
- raise ValueError("input array must be 2-d and square")
- return tril_indices(arr.shape[0], k)
+ if arr.ndim != 2:
+ raise ValueError("input array must be 2-d")
+ return tril_indices(arr.shape[-2], k=k, m=arr.shape[-1])
-def triu_indices(n, k=0):
+def triu_indices(n, k=0, m=None):
"""
- Return the indices for the upper-triangle of an (n, n) array.
+ Return the indices for the upper-triangle of an (n, m) array.
Parameters
----------
@@ -869,6 +882,13 @@ def triu_indices(n, k=0):
be valid.
k : int, optional
Diagonal offset (see `triu` for details).
+ m : int, optional
+ .. versionadded:: 1.9.0
+
+ The column dimension of the arrays for which the returned
+ arrays will be valid.
+ By default `m` is taken equal to `n`.
+
Returns
-------
@@ -930,12 +950,12 @@ def triu_indices(n, k=0):
[ 12, 13, 14, -1]])
"""
- return mask_indices(n, triu, k)
+ return where(~tri(n, m, k=k-1, dtype=bool))
def triu_indices_from(arr, k=0):
"""
- Return the indices for the upper-triangle of a (N, N) array.
+ Return the indices for the upper-triangle of arr.
See `triu_indices` for full details.
@@ -960,6 +980,6 @@ def triu_indices_from(arr, k=0):
.. versionadded:: 1.4.0
"""
- if not (arr.ndim == 2 and arr.shape[0] == arr.shape[1]):
- raise ValueError("input array must be 2-d and square")
- return triu_indices(arr.shape[0], k)
+ if arr.ndim != 2:
+ raise ValueError("input array must be 2-d")
+ return triu_indices(arr.shape[-2], k=k, m=arr.shape[-1])