summaryrefslogtreecommitdiff
path: root/numpy
diff options
context:
space:
mode:
Diffstat (limited to 'numpy')
-rw-r--r--numpy/core/fromnumeric.py2
-rw-r--r--numpy/core/numeric.py107
-rw-r--r--numpy/core/src/umath/loops.c.src5
-rw-r--r--numpy/core/src/umath/simd.inc.src166
-rw-r--r--numpy/core/tests/test_numeric.py110
5 files changed, 344 insertions, 46 deletions
diff --git a/numpy/core/fromnumeric.py b/numpy/core/fromnumeric.py
index a2937c5c5..67d2c5b48 100644
--- a/numpy/core/fromnumeric.py
+++ b/numpy/core/fromnumeric.py
@@ -518,7 +518,7 @@ def transpose(a, axes=None):
See Also
--------
- rollaxis
+ moveaxis
argsort
Notes
diff --git a/numpy/core/numeric.py b/numpy/core/numeric.py
index 4f3d418e6..a18b38072 100644
--- a/numpy/core/numeric.py
+++ b/numpy/core/numeric.py
@@ -1,6 +1,7 @@
from __future__ import division, absolute_import, print_function
import sys
+import operator
import warnings
import collections
from numpy.core import multiarray
@@ -15,8 +16,10 @@ from ._internal import TooHardError
if sys.version_info[0] >= 3:
import pickle
basestring = str
+ import builtins
else:
import cPickle as pickle
+ import __builtin__ as builtins
loads = pickle.loads
@@ -31,15 +34,15 @@ __all__ = [
'ascontiguousarray', 'asfortranarray', 'isfortran', 'empty_like',
'zeros_like', 'ones_like', 'correlate', 'convolve', 'inner', 'dot',
'einsum', 'outer', 'vdot', 'alterdot', 'restoredot', 'roll',
- 'rollaxis', 'cross', 'tensordot', 'array2string', 'get_printoptions',
- 'set_printoptions', 'array_repr', 'array_str', 'set_string_function',
- 'little_endian', 'require', 'fromiter', 'array_equal', 'array_equiv',
- 'indices', 'fromfunction', 'isclose', 'load', 'loads', 'isscalar',
- 'binary_repr', 'base_repr', 'ones', 'identity', 'allclose',
- 'compare_chararrays', 'putmask', 'seterr', 'geterr', 'setbufsize',
- 'getbufsize', 'seterrcall', 'geterrcall', 'errstate', 'flatnonzero',
- 'Inf', 'inf', 'infty', 'Infinity', 'nan', 'NaN', 'False_', 'True_',
- 'bitwise_not', 'CLIP', 'RAISE', 'WRAP', 'MAXDIMS', 'BUFSIZE',
+ 'rollaxis', 'moveaxis', 'cross', 'tensordot', 'array2string',
+ 'get_printoptions', 'set_printoptions', 'array_repr', 'array_str',
+ 'set_string_function', 'little_endian', 'require', 'fromiter',
+ 'array_equal', 'array_equiv', 'indices', 'fromfunction', 'isclose', 'load',
+ 'loads', 'isscalar', 'binary_repr', 'base_repr', 'ones', 'identity',
+ 'allclose', 'compare_chararrays', 'putmask', 'seterr', 'geterr',
+ 'setbufsize', 'getbufsize', 'seterrcall', 'geterrcall', 'errstate',
+ 'flatnonzero', 'Inf', 'inf', 'infty', 'Infinity', 'nan', 'NaN', 'False_',
+ 'True_', 'bitwise_not', 'CLIP', 'RAISE', 'WRAP', 'MAXDIMS', 'BUFSIZE',
'ALLOW_THREADS', 'ComplexWarning', 'full', 'full_like', 'matmul',
'shares_memory', 'may_share_memory', 'MAY_SHARE_BOUNDS', 'MAY_SHARE_EXACT',
'TooHardError',
@@ -1422,6 +1425,7 @@ def rollaxis(a, axis, start=0):
See Also
--------
+ moveaxis : Move array axes to new positions.
roll : Roll the elements of an array by a number of positions along a
given axis.
@@ -1457,6 +1461,91 @@ def rollaxis(a, axis, start=0):
return a.transpose(axes)
+def _validate_axis(axis, ndim, argname):
+ try:
+ axis = [operator.index(axis)]
+ except TypeError:
+ axis = list(axis)
+ axis = [a + ndim if a < 0 else a for a in axis]
+ if not builtins.all(0 <= a < ndim for a in axis):
+ raise ValueError('invalid axis for this array in `%s` argument' %
+ argname)
+ if len(set(axis)) != len(axis):
+ raise ValueError('repeated axis in `%s` argument' % argname)
+ return axis
+
+
+def moveaxis(a, source, destination):
+ """
+ Move axes of an array to new positions.
+
+ Other axes remain in their original order.
+
+ .. versionadded::1.11.0
+
+ Parameters
+ ----------
+ a : np.ndarray
+ The array whose axes should be reordered.
+ source : int or sequence of int
+ Original positions of the axes to move. These must be unique.
+ destination : int or sequence of int
+ Destination positions for each of the original axes. These must also be
+ unique.
+
+ Returns
+ -------
+ result : np.ndarray
+ Array with moved axes. This array is a view of the input array.
+
+ See Also
+ --------
+ transpose: Permute the dimensions of an array.
+ swapaxes: Interchange two axes of an array.
+
+ Examples
+ --------
+
+ >>> x = np.zeros((3, 4, 5))
+ >>> np.moveaxis(x, 0, -1).shape
+ (4, 5, 3)
+ >>> np.moveaxis(x, -1, 0).shape
+ (5, 3, 4)
+
+ These all achieve the same result:
+
+ >>> np.transpose(x).shape
+ (5, 4, 3)
+ >>> np.swapaxis(x, 0, -1).shape
+ (5, 4, 3)
+ >>> np.moveaxis(x, [0, 1], [-1, -2]).shape
+ (5, 4, 3)
+ >>> np.moveaxis(x, [0, 1, 2], [-1, -2, -3]).shape
+ (5, 4, 3)
+
+ """
+ try:
+ # allow duck-array types if they define transpose
+ transpose = a.transpose
+ except AttributeError:
+ a = asarray(a)
+ transpose = a.transpose
+
+ source = _validate_axis(source, a.ndim, 'source')
+ destination = _validate_axis(destination, a.ndim, 'destination')
+ if len(source) != len(destination):
+ raise ValueError('`source` and `destination` arguments must have '
+ 'the same number of elements')
+
+ order = [n for n in range(a.ndim) if n not in source]
+
+ for dest, src in sorted(zip(destination, source)):
+ order.insert(dest, src)
+
+ result = transpose(order)
+ return result
+
+
# fix hack in scipy which imports this function
def _move_axis_to_0(a, axis):
return rollaxis(a, axis, 0)
diff --git a/numpy/core/src/umath/loops.c.src b/numpy/core/src/umath/loops.c.src
index aff6180c7..fc9ffec94 100644
--- a/numpy/core/src/umath/loops.c.src
+++ b/numpy/core/src/umath/loops.c.src
@@ -1558,14 +1558,11 @@ NPY_NO_EXPORT void
/**begin repeat1
* #kind = isnan, isinf, isfinite, signbit#
* #func = npy_isnan, npy_isinf, npy_isfinite, npy_signbit#
- * #isnan = 1, 0*3#
**/
NPY_NO_EXPORT void
@TYPE@_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
{
- char * margs[] = {args[0], args[0], args[1]};
- npy_intp msteps[] = {steps[0], steps[0], steps[1]};
- if (!@isnan@ || !run_binary_simd_not_equal_@TYPE@(margs, dimensions, msteps)) {
+ if (!run_@kind@_simd_@TYPE@(args, dimensions, steps)) {
UNARY_LOOP {
const @type@ in1 = *(@type@ *)ip1;
*((npy_bool *)op1) = @func@(in1) != 0;
diff --git a/numpy/core/src/umath/simd.inc.src b/numpy/core/src/umath/simd.inc.src
index 84695f5d6..21ff97784 100644
--- a/numpy/core/src/umath/simd.inc.src
+++ b/numpy/core/src/umath/simd.inc.src
@@ -25,6 +25,7 @@
#endif
#include <assert.h>
#include <stdlib.h>
+#include <float.h>
#include <string.h> /* for memcpy */
/* Figure out the right abs function for pointer addresses */
@@ -259,6 +260,32 @@ run_binary_simd_@kind@_@TYPE@(char **args, npy_intp *dimensions, npy_intp *steps
/**end repeat1**/
+/**begin repeat1
+ * #kind = isnan, isfinite, isinf, signbit#
+ */
+
+#if @vector@ && defined NPY_HAVE_SSE2_INTRINSICS
+
+static void
+sse2_@kind@_@TYPE@(npy_bool * op, @type@ * ip1, npy_intp n);
+
+#endif
+
+static NPY_INLINE int
+run_@kind@_simd_@TYPE@(char **args, npy_intp *dimensions, npy_intp *steps)
+{
+#if @vector@ && defined NPY_HAVE_SSE2_INTRINSICS
+ if (steps[0] == sizeof(@type@) && steps[1] == 1 &&
+ npy_is_aligned(args[0], sizeof(@type@))) {
+ sse2_@kind@_@TYPE@((npy_bool*)args[1], (@type@*)args[0], dimensions[0]);
+ return 1;
+ }
+#endif
+ return 0;
+}
+
+/**end repeat1**/
+
/**end repeat**/
/*
@@ -528,11 +555,104 @@ sse2_compress4_to_byte_@TYPE@(@vtype@ r1, @vtype@ r2, @vtype@ r3, @vtype@ * r4,
#endif
}
+static void
+sse2_signbit_@TYPE@(npy_bool * op, @type@ * ip1, npy_intp n)
+{
+ LOOP_BLOCK_ALIGN_VAR(ip1, @type@, 16) {
+ op[i] = npy_signbit(ip1[i]);
+ }
+ LOOP_BLOCKED(@type@, 16) {
+ @vtype@ a = @vpre@_load_@vsuf@(&ip1[i]);
+ int r = @vpre@_movemask_@vsuf@(a);
+ if (sizeof(@type@) == 8) {
+ op[i] = r & 1;
+ op[i + 1] = (r >> 1);
+ }
+ else {
+ op[i] = r & 1;
+ op[i + 1] = (r >> 1) & 1;
+ op[i + 2] = (r >> 2) & 1;
+ op[i + 3] = (r >> 3);
+ }
+ }
+ LOOP_BLOCKED_END {
+ op[i] = npy_signbit(ip1[i]);
+ }
+}
+
+/**begin repeat1
+ * #kind = isnan, isfinite, isinf#
+ * #var = 0, 1, 2#
+ */
+
+static void
+sse2_@kind@_@TYPE@(npy_bool * op, @type@ * ip1, npy_intp n)
+{
+#if @var@ != 0 /* isinf/isfinite */
+ /* signbit mask 0x7FFFFFFF after andnot */
+ const @vtype@ mask = @vpre@_set1_@vsuf@(-0.@c@);
+ const @vtype@ ones = @vpre@_cmpeq_@vsuf@(@vpre@_setzero_@vsuf@(),
+ @vpre@_setzero_@vsuf@());
+#if @double@
+ const @vtype@ fltmax = @vpre@_set1_@vsuf@(DBL_MAX);
+#else
+ const @vtype@ fltmax = @vpre@_set1_@vsuf@(FLT_MAX);
+#endif
+#endif
+ LOOP_BLOCK_ALIGN_VAR(ip1, @type@, 16) {
+ op[i] = npy_@kind@(ip1[i]);
+ }
+ LOOP_BLOCKED(@type@, 64) {
+ @vtype@ a = @vpre@_load_@vsuf@(&ip1[i + 0 * 16 / sizeof(@type@)]);
+ @vtype@ b = @vpre@_load_@vsuf@(&ip1[i + 1 * 16 / sizeof(@type@)]);
+ @vtype@ c = @vpre@_load_@vsuf@(&ip1[i + 2 * 16 / sizeof(@type@)]);
+ @vtype@ d = @vpre@_load_@vsuf@(&ip1[i + 3 * 16 / sizeof(@type@)]);
+ @vtype@ r1, r2, r3, r4;
+#if @var@ != 0 /* isinf/isfinite */
+ /* fabs via masking of sign bit */
+ r1 = @vpre@_andnot_@vsuf@(mask, a);
+ r2 = @vpre@_andnot_@vsuf@(mask, b);
+ r3 = @vpre@_andnot_@vsuf@(mask, c);
+ r4 = @vpre@_andnot_@vsuf@(mask, d);
+#if @var@ == 1 /* isfinite */
+ /* negative compare against max float, nan is always true */
+ r1 = @vpre@_cmpnle_@vsuf@(r1, fltmax);
+ r2 = @vpre@_cmpnle_@vsuf@(r2, fltmax);
+ r3 = @vpre@_cmpnle_@vsuf@(r3, fltmax);
+ r4 = @vpre@_cmpnle_@vsuf@(r4, fltmax);
+#else /* isinf */
+ r1 = @vpre@_cmpnlt_@vsuf@(fltmax, r1);
+ r2 = @vpre@_cmpnlt_@vsuf@(fltmax, r2);
+ r3 = @vpre@_cmpnlt_@vsuf@(fltmax, r3);
+ r4 = @vpre@_cmpnlt_@vsuf@(fltmax, r4);
+#endif
+ /* flip results to what we want (andnot as there is no sse not) */
+ r1 = @vpre@_andnot_@vsuf@(r1, ones);
+ r2 = @vpre@_andnot_@vsuf@(r2, ones);
+ r3 = @vpre@_andnot_@vsuf@(r3, ones);
+ r4 = @vpre@_andnot_@vsuf@(r4, ones);
+#endif
+#if @var@ == 0 /* isnan */
+ r1 = @vpre@_cmpneq_@vsuf@(a, a);
+ r2 = @vpre@_cmpneq_@vsuf@(b, b);
+ r3 = @vpre@_cmpneq_@vsuf@(c, c);
+ r4 = @vpre@_cmpneq_@vsuf@(d, d);
+#endif
+ sse2_compress4_to_byte_@TYPE@(r1, r2, r3, &r4, &op[i]);
+ }
+ LOOP_BLOCKED_END {
+ op[i] = npy_@kind@(ip1[i]);
+ }
+ /* silence exceptions from comparisons */
+ npy_clear_floatstatus();
+}
+
+/**end repeat1**/
+
/**begin repeat1
* #kind = equal, not_equal, less, less_equal, greater, greater_equal#
* #OP = ==, !=, <, <=, >, >=#
* #VOP = cmpeq, cmpneq, cmplt, cmple, cmpgt, cmpge#
- * #neq = 0, 1, 0*4#
*/
/* sets invalid fpu flag on QNaN for consistency with packed compare */
@@ -554,36 +674,20 @@ sse2_binary_@kind@_@TYPE@(npy_bool * op, @type@ * ip1, @type@ * ip2, npy_intp n)
LOOP_BLOCK_ALIGN_VAR(ip1, @type@, 16) {
op[i] = sse2_ordered_cmp_@kind@_@TYPE@(ip1[i], ip2[i]);
}
- /* isnan special unary case */
- if (@neq@ && ip1 == ip2) {
- LOOP_BLOCKED(@type@, 64) {
- @vtype@ a = @vpre@_load_@vsuf@(&ip1[i + 0 * 16 / sizeof(@type@)]);
- @vtype@ b = @vpre@_load_@vsuf@(&ip1[i + 1 * 16 / sizeof(@type@)]);
- @vtype@ c = @vpre@_load_@vsuf@(&ip1[i + 2 * 16 / sizeof(@type@)]);
- @vtype@ d = @vpre@_load_@vsuf@(&ip1[i + 3 * 16 / sizeof(@type@)]);
- @vtype@ r1 = @vpre@_@VOP@_@vsuf@(a, a);
- @vtype@ r2 = @vpre@_@VOP@_@vsuf@(b, b);
- @vtype@ r3 = @vpre@_@VOP@_@vsuf@(c, c);
- @vtype@ r4 = @vpre@_@VOP@_@vsuf@(d, d);
- sse2_compress4_to_byte_@TYPE@(r1, r2, r3, &r4, &op[i]);
- }
- }
- else {
- LOOP_BLOCKED(@type@, 64) {
- @vtype@ a1 = @vpre@_load_@vsuf@(&ip1[i + 0 * 16 / sizeof(@type@)]);
- @vtype@ b1 = @vpre@_load_@vsuf@(&ip1[i + 1 * 16 / sizeof(@type@)]);
- @vtype@ c1 = @vpre@_load_@vsuf@(&ip1[i + 2 * 16 / sizeof(@type@)]);
- @vtype@ d1 = @vpre@_load_@vsuf@(&ip1[i + 3 * 16 / sizeof(@type@)]);
- @vtype@ a2 = @vpre@_loadu_@vsuf@(&ip2[i + 0 * 16 / sizeof(@type@)]);
- @vtype@ b2 = @vpre@_loadu_@vsuf@(&ip2[i + 1 * 16 / sizeof(@type@)]);
- @vtype@ c2 = @vpre@_loadu_@vsuf@(&ip2[i + 2 * 16 / sizeof(@type@)]);
- @vtype@ d2 = @vpre@_loadu_@vsuf@(&ip2[i + 3 * 16 / sizeof(@type@)]);
- @vtype@ r1 = @vpre@_@VOP@_@vsuf@(a1, a2);
- @vtype@ r2 = @vpre@_@VOP@_@vsuf@(b1, b2);
- @vtype@ r3 = @vpre@_@VOP@_@vsuf@(c1, c2);
- @vtype@ r4 = @vpre@_@VOP@_@vsuf@(d1, d2);
- sse2_compress4_to_byte_@TYPE@(r1, r2, r3, &r4, &op[i]);
- }
+ LOOP_BLOCKED(@type@, 64) {
+ @vtype@ a1 = @vpre@_load_@vsuf@(&ip1[i + 0 * 16 / sizeof(@type@)]);
+ @vtype@ b1 = @vpre@_load_@vsuf@(&ip1[i + 1 * 16 / sizeof(@type@)]);
+ @vtype@ c1 = @vpre@_load_@vsuf@(&ip1[i + 2 * 16 / sizeof(@type@)]);
+ @vtype@ d1 = @vpre@_load_@vsuf@(&ip1[i + 3 * 16 / sizeof(@type@)]);
+ @vtype@ a2 = @vpre@_loadu_@vsuf@(&ip2[i + 0 * 16 / sizeof(@type@)]);
+ @vtype@ b2 = @vpre@_loadu_@vsuf@(&ip2[i + 1 * 16 / sizeof(@type@)]);
+ @vtype@ c2 = @vpre@_loadu_@vsuf@(&ip2[i + 2 * 16 / sizeof(@type@)]);
+ @vtype@ d2 = @vpre@_loadu_@vsuf@(&ip2[i + 3 * 16 / sizeof(@type@)]);
+ @vtype@ r1 = @vpre@_@VOP@_@vsuf@(a1, a2);
+ @vtype@ r2 = @vpre@_@VOP@_@vsuf@(b1, b2);
+ @vtype@ r3 = @vpre@_@VOP@_@vsuf@(c1, c2);
+ @vtype@ r4 = @vpre@_@VOP@_@vsuf@(d1, d2);
+ sse2_compress4_to_byte_@TYPE@(r1, r2, r3, &r4, &op[i]);
}
LOOP_BLOCKED_END {
op[i] = sse2_ordered_cmp_@kind@_@TYPE@(ip1[i], ip2[i]);
diff --git a/numpy/core/tests/test_numeric.py b/numpy/core/tests/test_numeric.py
index d63118080..a114d5a5a 100644
--- a/numpy/core/tests/test_numeric.py
+++ b/numpy/core/tests/test_numeric.py
@@ -11,7 +11,8 @@ from numpy.core import umath
from numpy.random import rand, randint, randn
from numpy.testing import (
TestCase, run_module_suite, assert_, assert_equal, assert_raises,
- assert_array_equal, assert_almost_equal, assert_array_almost_equal, dec
+ assert_raises_regex, assert_array_equal, assert_almost_equal,
+ assert_array_almost_equal, dec
)
@@ -234,6 +235,31 @@ class TestBoolCmp(TestCase):
self.nf[self.ef] = np.nan
self.nd[self.ed] = np.nan
+ self.inff = self.f.copy()
+ self.infd = self.d.copy()
+ self.inff[::3][self.ef[::3]] = np.inf
+ self.infd[::3][self.ed[::3]] = np.inf
+ self.inff[1::3][self.ef[1::3]] = -np.inf
+ self.infd[1::3][self.ed[1::3]] = -np.inf
+ self.inff[2::3][self.ef[2::3]] = np.nan
+ self.infd[2::3][self.ed[2::3]] = np.nan
+ self.efnonan = self.ef.copy()
+ self.efnonan[2::3] = False
+ self.ednonan = self.ed.copy()
+ self.ednonan[2::3] = False
+
+ self.signf = self.f.copy()
+ self.signd = self.d.copy()
+ self.signf[self.ef] *= -1.
+ self.signd[self.ed] *= -1.
+ self.signf[1::6][self.ef[1::6]] = -np.inf
+ self.signd[1::6][self.ed[1::6]] = -np.inf
+ self.signf[3::6][self.ef[3::6]] = -np.nan
+ self.signd[3::6][self.ed[3::6]] = -np.nan
+ self.signf[4::6][self.ef[4::6]] = -0.
+ self.signd[4::6][self.ed[4::6]] = -0.
+
+
def test_float(self):
# offset for alignment test
for i in range(4):
@@ -255,6 +281,10 @@ class TestBoolCmp(TestCase):
# isnan on amd64 takes the same codepath
assert_array_equal(np.isnan(self.nf[i:]), self.ef[i:])
+ assert_array_equal(np.isfinite(self.nf[i:]), ~self.ef[i:])
+ assert_array_equal(np.isfinite(self.inff[i:]), ~self.ef[i:])
+ assert_array_equal(np.isinf(self.inff[i:]), self.efnonan[i:])
+ assert_array_equal(np.signbit(self.signf[i:]), self.ef[i:])
def test_double(self):
# offset for alignment test
@@ -277,6 +307,10 @@ class TestBoolCmp(TestCase):
# isnan on amd64 takes the same codepath
assert_array_equal(np.isnan(self.nd[i:]), self.ed[i:])
+ assert_array_equal(np.isfinite(self.nd[i:]), ~self.ed[i:])
+ assert_array_equal(np.isfinite(self.infd[i:]), ~self.ed[i:])
+ assert_array_equal(np.isinf(self.infd[i:]), self.ednonan[i:])
+ assert_array_equal(np.signbit(self.signd[i:]), self.ed[i:])
class TestSeterr(TestCase):
@@ -2029,6 +2063,80 @@ class TestRollaxis(TestCase):
assert_(not res.flags['OWNDATA'])
+class TestMoveaxis(TestCase):
+ def test_move_to_end(self):
+ x = np.random.randn(5, 6, 7)
+ for source, expected in [(0, (6, 7, 5)),
+ (1, (5, 7, 6)),
+ (2, (5, 6, 7)),
+ (-1, (5, 6, 7))]:
+ actual = np.moveaxis(x, source, -1).shape
+ assert_(actual, expected)
+
+ def test_move_new_position(self):
+ x = np.random.randn(1, 2, 3, 4)
+ for source, destination, expected in [
+ (0, 1, (2, 1, 3, 4)),
+ (1, 2, (1, 3, 2, 4)),
+ (1, -1, (1, 3, 4, 2)),
+ ]:
+ actual = np.moveaxis(x, source, destination).shape
+ assert_(actual, expected)
+
+ def test_preserve_order(self):
+ x = np.zeros((1, 2, 3, 4))
+ for source, destination in [
+ (0, 0),
+ (3, -1),
+ (-1, 3),
+ ([0, -1], [0, -1]),
+ ([2, 0], [2, 0]),
+ (range(4), range(4)),
+ ]:
+ actual = np.moveaxis(x, source, destination).shape
+ assert_(actual, (1, 2, 3, 4))
+
+ def test_move_multiples(self):
+ x = np.zeros((0, 1, 2, 3))
+ for source, destination, expected in [
+ ([0, 1], [2, 3], (2, 3, 0, 1)),
+ ([2, 3], [0, 1], (2, 3, 0, 1)),
+ ([0, 1, 2], [2, 3, 0], (2, 3, 0, 1)),
+ ([3, 0], [1, 0], (0, 3, 1, 2)),
+ ([0, 3], [0, 1], (0, 3, 1, 2)),
+ ]:
+ actual = np.moveaxis(x, source, destination).shape
+ assert_(actual, expected)
+
+ def test_errors(self):
+ x = np.random.randn(1, 2, 3)
+ assert_raises_regex(ValueError, 'invalid axis .* `source`',
+ np.moveaxis, x, 3, 0)
+ assert_raises_regex(ValueError, 'invalid axis .* `source`',
+ np.moveaxis, x, -4, 0)
+ assert_raises_regex(ValueError, 'invalid axis .* `destination`',
+ np.moveaxis, x, 0, 5)
+ assert_raises_regex(ValueError, 'repeated axis in `source`',
+ np.moveaxis, x, [0, 0], [0, 1])
+ assert_raises_regex(ValueError, 'repeated axis in `destination`',
+ np.moveaxis, x, [0, 1], [1, 1])
+ assert_raises_regex(ValueError, 'must have the same number',
+ np.moveaxis, x, 0, [0, 1])
+ assert_raises_regex(ValueError, 'must have the same number',
+ np.moveaxis, x, [0, 1], [0])
+
+ def test_array_likes(self):
+ x = np.ma.zeros((1, 2, 3))
+ result = np.moveaxis(x, 0, 0)
+ assert_(x.shape, result.shape)
+ assert_(isinstance(result, np.ma.MaskedArray))
+
+ x = [1, 2, 3]
+ result = np.moveaxis(x, 0, 0)
+ assert_(x, list(result))
+ assert_(isinstance(result, np.ndarray))
+
+
class TestCross(TestCase):
def test_2x2(self):
u = [1, 2]