summaryrefslogtreecommitdiff
path: root/numpy/core
diff options
context:
space:
mode:
authormattip <matti.picus@gmail.com>2019-02-13 18:14:05 +0200
committermattip <matti.picus@gmail.com>2019-05-11 09:44:05 -0700
commitc2de04da9b0feb2853b9814c7953aa51a29a058e (patch)
tree6cf444560593dd835df2e037ccc915a464531036 /numpy/core
parent634d66d5c5461c6c1480e97c1d3fbc4c4ea96a00 (diff)
downloadnumpy-c2de04da9b0feb2853b9814c7953aa51a29a058e.tar.gz
ENH: add 'order' keyword to packbits, unpackbits
Diffstat (limited to 'numpy/core')
-rw-r--r--numpy/core/multiarray.py16
-rw-r--r--numpy/core/src/multiarray/compiled_base.c98
2 files changed, 90 insertions, 24 deletions
diff --git a/numpy/core/multiarray.py b/numpy/core/multiarray.py
index a839ae402..8ea71cde4 100644
--- a/numpy/core/multiarray.py
+++ b/numpy/core/multiarray.py
@@ -8,6 +8,7 @@ by importing from the extension module.
import functools
import warnings
+import sys
from . import overrides
from . import _multiarray_umath
@@ -1113,7 +1114,7 @@ def putmask(a, mask, values):
@array_function_from_c_func_and_dispatcher(_multiarray_umath.packbits)
-def packbits(a, axis=None):
+def packbits(a, axis=None, order='big'):
"""
packbits(a, axis=None)
@@ -1129,6 +1130,11 @@ def packbits(a, axis=None):
axis : int, optional
The dimension over which bit-packing is done.
``None`` implies packing the flattened array.
+ order : 'big' or 'little', only the first letter is checked
+ The order of the returned bits. The default is the common standard
+ where 3 => [0, 0, 0, 0, 0, 1, 1]
+
+ .. versionadded:: 1.17.0
Returns
-------
@@ -1164,7 +1170,7 @@ def packbits(a, axis=None):
@array_function_from_c_func_and_dispatcher(_multiarray_umath.unpackbits)
-def unpackbits(a, axis=None, count=None):
+def unpackbits(a, axis=None, count=None, order='big'):
"""
unpackbits(a, axis=None, count=None)
@@ -1194,6 +1200,12 @@ def unpackbits(a, axis=None, count=None):
.. versionadded:: 1.17.0
+ order : 'big' or 'little', only the first letter is checked
+ The order of the returned bits. The default is the common standard
+ where 3 => [0, 0, 0, 0, 0, 1, 1]
+
+ .. versionadded:: 1.17.0
+
Returns
-------
unpacked : ndarray, uint8 type
diff --git a/numpy/core/src/multiarray/compiled_base.c b/numpy/core/src/multiarray/compiled_base.c
index 8e70d6cf1..8376c3c29 100644
--- a/numpy/core/src/multiarray/compiled_base.c
+++ b/numpy/core/src/multiarray/compiled_base.c
@@ -1511,7 +1511,8 @@ pack_inner(const char *inptr,
npy_intp in_stride,
char *outptr,
npy_intp n_out,
- npy_intp out_stride)
+ npy_intp out_stride,
+ char order)
{
/*
* Loop through the elements of inptr.
@@ -1531,9 +1532,13 @@ pack_inner(const char *inptr,
vn_out -= (vn_out & 1);
for (index = 0; index < vn_out; index += 2) {
unsigned int r;
- /* swap as packbits is "big endian", note x86 can load unaligned */
- npy_uint64 a = npy_bswap8(*(npy_uint64*)inptr);
- npy_uint64 b = npy_bswap8(*(npy_uint64*)(inptr + 8));
+ npy_uint64 a = *(npy_uint64*)inptr;
+ npy_uint64 b = *(npy_uint64*)(inptr + 8);
+ if (order == 'b') {
+ /* swap as packbits is "big endian", note x86 can load unaligned */
+ a = npy_bswap8(a);
+ b = npy_bswap8(b);
+ }
__m128i v = _mm_set_epi64(_m_from_int64(b), _m_from_int64(a));
/* false -> 0x00 and true -> 0xFF (there is no cmpneq) */
v = _mm_cmpeq_epi8(v, zero);
@@ -1570,13 +1575,25 @@ pack_inner(const char *inptr,
if (index == n_out - 1) {
build <<= 8 - remain;
}
+ if (order == 'l') {
+ char tmp = 0;
+ tmp |= build & 1 ? 128 : 0;
+ tmp |= build & 2 ? 64 : 0;
+ tmp |= build & 4 ? 32 : 0;
+ tmp |= build & 8 ? 16 : 0;
+ tmp |= build & 16 ? 8 : 0;
+ tmp |= build & 32 ? 4 : 0;
+ tmp |= build & 64 ? 2 : 0;
+ tmp |= build & 128 ? 1 : 0;
+ build = tmp;
+ }
*outptr = build;
outptr += out_stride;
}
}
static PyObject *
-pack_bits(PyObject *input, int axis)
+pack_bits(PyObject *input, int axis, char order)
{
PyArrayObject *inp;
PyArrayObject *new = NULL;
@@ -1661,7 +1678,7 @@ pack_bits(PyObject *input, int axis)
pack_inner(PyArray_ITER_DATA(it), PyArray_ITEMSIZE(new),
PyArray_DIM(new, axis), PyArray_STRIDE(new, axis),
PyArray_ITER_DATA(ot), PyArray_DIM(out, axis),
- PyArray_STRIDE(out, axis));
+ PyArray_STRIDE(out, axis), order);
PyArray_ITER_NEXT(it);
PyArray_ITER_NEXT(ot);
}
@@ -1680,11 +1697,14 @@ fail:
return NULL;
}
+typedef char lookup[256][8];
+
static PyObject *
-unpack_bits(PyObject *input, int axis, PyObject *count_obj)
+unpack_bits(PyObject *input, int axis, PyObject *count_obj, char order)
{
static int unpack_init = 0;
- static char unpack_lookup[256][8];
+ static lookup unpack_lookup_b;
+ static lookup unpack_lookup_l;
PyArrayObject *inp;
PyArrayObject *new = NULL;
PyArrayObject *out = NULL;
@@ -1773,7 +1793,20 @@ unpack_bits(PyObject *input, int axis, PyObject *count_obj)
/* setup lookup table under GIL, big endian 0..256 as bytes */
if (unpack_init == 0) {
npy_uint64 j;
- npy_uint64 * unpack_lookup_64 = (npy_uint64 *)unpack_lookup;
+ npy_uint64 * unpack_lookup_64 = (npy_uint64 *)unpack_lookup_l;
+ for (j=0; j < 256; j++) {
+ npy_uint64 v = 0;
+ v |= (npy_uint64)((j & 1) == 1);
+ v |= (npy_uint64)((j & 2) == 2) << 8;
+ v |= (npy_uint64)((j & 4) == 4) << 16;
+ v |= (npy_uint64)((j & 8) == 8) << 24;
+ v |= (npy_uint64)((j & 16) == 16) << 32;
+ v |= (npy_uint64)((j & 32) == 32) << 40;
+ v |= (npy_uint64)((j & 64) == 64) << 48;
+ v |= (npy_uint64)((j & 128) == 128) << 56;
+ unpack_lookup_64[j] = v;
+ }
+ unpack_lookup_64 = (npy_uint64 *)unpack_lookup_b;
for (j=0; j < 256; j++) {
npy_uint64 v = 0;
v |= (npy_uint64)((j & 1) == 1);
@@ -1784,11 +1817,10 @@ unpack_bits(PyObject *input, int axis, PyObject *count_obj)
v |= (npy_uint64)((j & 32) == 32) << 40;
v |= (npy_uint64)((j & 64) == 64) << 48;
v |= (npy_uint64)((j & 128) == 128) << 56;
-#if NPY_BYTE_ORDER == NPY_LITTLE_ENDIAN
v = npy_bswap8(v);
-#endif
unpack_lookup_64[j] = v;
}
+
unpack_init = 1;
}
@@ -1813,17 +1845,22 @@ unpack_bits(PyObject *input, int axis, PyObject *count_obj)
npy_intp index;
unsigned const char *inptr = PyArray_ITER_DATA(it);
char *outptr = PyArray_ITER_DATA(ot);
+ lookup *unpack_lookup;
+ unpack_lookup = &unpack_lookup_b;
+ if (order == 'l') {
+ unpack_lookup = &unpack_lookup_l;
+ }
if (out_stride == 1) {
/* for unity stride we can just copy out of the lookup table */
for (index = 0; index < in_n; index++) {
- memcpy(outptr, unpack_lookup[*inptr], 8);
+ memcpy(outptr, unpack_lookup[0][*inptr], 8);
outptr += 8;
inptr += in_stride;
}
/* Clean up the tail portion */
if (in_tail) {
- memcpy(outptr, unpack_lookup[*inptr], in_tail);
+ memcpy(outptr, unpack_lookup[0][*inptr], in_tail);
}
/* Add padding */
else if (out_pad) {
@@ -1876,13 +1913,22 @@ io_pack(PyObject *NPY_UNUSED(self), PyObject *args, PyObject *kwds)
{
PyObject *obj;
int axis = NPY_MAXDIMS;
- static char *kwlist[] = {"a", "axis", NULL};
+ static char *kwlist[] = {"in", "axis", "order", NULL};
+ const char * c = NULL;
- if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|O&:pack" , kwlist,
- &obj, PyArray_AxisConverter, &axis)) {
+ if (!PyArg_ParseTupleAndKeywords( args, kwds, "O|O&s:pack" , kwlist,
+ &obj, PyArray_AxisConverter, &axis, &c)) {
return NULL;
}
- return pack_bits(obj, axis);
+ if (c == NULL) {
+ c = "b";
+ }
+ if (c[0] != 'l' && c[0] != 'b') {
+ PyErr_SetString(PyExc_ValueError,
+ "'order' must begin with 'l' or 'b'");
+ return NULL;
+ }
+ return pack_bits(obj, axis, c[0]);
}
@@ -1892,12 +1938,20 @@ io_unpack(PyObject *NPY_UNUSED(self), PyObject *args, PyObject *kwds)
PyObject *obj;
int axis = NPY_MAXDIMS;
PyObject *count = Py_None;
- static char *kwlist[] = {"a", "axis", "count", NULL};
+ static char *kwlist[] = {"in", "axis", "count", "order", NULL};
+ const char * c = NULL;
- if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|O&O:unpack" , kwlist,
- &obj, PyArray_AxisConverter, &axis,
- &count)) {
+ if (!PyArg_ParseTupleAndKeywords( args, kwds, "O|O&Os:unpack" , kwlist,
+ &obj, PyArray_AxisConverter, &axis, &count, &c)) {
+ return NULL;
+ }
+ if (c == NULL) {
+ c = "b";
+ }
+ if (c[0] != 'l' && c[0] != 'b') {
+ PyErr_SetString(PyExc_ValueError,
+ "'order' must begin with 'l' or 'b'");
return NULL;
}
- return unpack_bits(obj, axis, count);
+ return unpack_bits(obj, axis, count, c[0]);
}