summaryrefslogtreecommitdiff
path: root/numpy
diff options
context:
space:
mode:
authorMark Wiebe <mwiebe@enthought.com>2011-07-26 12:07:14 -0500
committerMark Wiebe <mwiebe@enthought.com>2011-07-26 12:09:24 -0500
commitaffea42d886e8233fdd6f3c5760708e3a9e9b1b8 (patch)
treed99aa6da31a7a7fcff39c39a334fc2023d2f79b4 /numpy
parent694a3835a98e5f68dbbd416d3df81418fb16f28d (diff)
downloadnumpy-affea42d886e8233fdd6f3c5760708e3a9e9b1b8.tar.gz
STY: Remove trailing whitespace
Diffstat (limited to 'numpy')
-rw-r--r--numpy/core/_internal.py2
-rw-r--r--numpy/core/code_generators/generate_umath.py2
-rw-r--r--numpy/core/include/numpy/ndarraytypes.h2
-rw-r--r--numpy/core/include/numpy/npy_common.h2
-rw-r--r--numpy/core/include/numpy/numpyconfig.h2
-rw-r--r--numpy/core/include/numpy/ufuncobject.h4
-rw-r--r--numpy/core/src/npymath/halffloat.c32
-rw-r--r--numpy/core/src/npymath/ieee754.c.src8
-rw-r--r--numpy/core/src/private/lowlevel_strided_loops.h10
-rw-r--r--numpy/core/src/private/npy_config.h4
-rw-r--r--numpy/core/src/umath/ufunc_type_resolution.c4
-rw-r--r--numpy/core/tests/test_datetime.py2
-rw-r--r--numpy/core/tests/test_half.py10
-rw-r--r--numpy/core/tests/test_ufunc.py2
-rw-r--r--numpy/f2py/doc/multiarray/array_from_pyobj.c26
-rw-r--r--numpy/lib/npyio.py4
-rw-r--r--numpy/lib/tests/test_index_tricks.py2
-rw-r--r--numpy/numarray/include/numpy/arraybase.h6
-rw-r--r--numpy/numarray/include/numpy/cfunc.h6
-rw-r--r--numpy/numarray/include/numpy/numcomplex.h8
-rw-r--r--numpy/numarray/include/numpy/nummacro.h14
21 files changed, 76 insertions, 76 deletions
diff --git a/numpy/core/_internal.py b/numpy/core/_internal.py
index 0487b320e..713687199 100644
--- a/numpy/core/_internal.py
+++ b/numpy/core/_internal.py
@@ -185,7 +185,7 @@ def _commastring(astr):
else:
newitem = (dtype, eval(repeats))
result.append(newitem)
-
+
return result
def _getintp_ctype():
diff --git a/numpy/core/code_generators/generate_umath.py b/numpy/core/code_generators/generate_umath.py
index 296f4683d..477cd122b 100644
--- a/numpy/core/code_generators/generate_umath.py
+++ b/numpy/core/code_generators/generate_umath.py
@@ -121,7 +121,7 @@ import string
if sys.version_info[0] < 3:
UPPER_TABLE = string.maketrans(string.ascii_lowercase, string.ascii_uppercase)
else:
- UPPER_TABLE = bytes.maketrans(bytes(string.ascii_lowercase, "ascii"),
+ UPPER_TABLE = bytes.maketrans(bytes(string.ascii_lowercase, "ascii"),
bytes(string.ascii_uppercase, "ascii"))
def english_upper(s):
diff --git a/numpy/core/include/numpy/ndarraytypes.h b/numpy/core/include/numpy/ndarraytypes.h
index 24f1491de..af705b936 100644
--- a/numpy/core/include/numpy/ndarraytypes.h
+++ b/numpy/core/include/numpy/ndarraytypes.h
@@ -74,7 +74,7 @@ enum NPY_TYPES { NPY_BOOL=0,
* New 1.6 types appended, may be integrated
* into the above in 2.0.
*/
- NPY_DATETIME, NPY_TIMEDELTA, NPY_HALF,
+ NPY_DATETIME, NPY_TIMEDELTA, NPY_HALF,
NPY_NTYPES,
NPY_NOTYPE,
diff --git a/numpy/core/include/numpy/npy_common.h b/numpy/core/include/numpy/npy_common.h
index ad326da68..118850541 100644
--- a/numpy/core/include/numpy/npy_common.h
+++ b/numpy/core/include/numpy/npy_common.h
@@ -135,7 +135,7 @@ typedef unsigned PY_LONG_LONG npy_ulonglong;
/* "%Ld" only parses 4 bytes -- "L" is floating modifier on MacOS X/BSD */
# define NPY_LONGLONG_FMT "lld"
# define NPY_ULONGLONG_FMT "llu"
-/*
+/*
another possible variant -- *quad_t works on *BSD, but is deprecated:
#define LONGLONG_FMT "qd"
#define ULONGLONG_FMT "qu"
diff --git a/numpy/core/include/numpy/numpyconfig.h b/numpy/core/include/numpy/numpyconfig.h
index ff7938cd9..19685b3dc 100644
--- a/numpy/core/include/numpy/numpyconfig.h
+++ b/numpy/core/include/numpy/numpyconfig.h
@@ -3,7 +3,7 @@
#include "_numpyconfig.h"
-/*
+/*
* On Mac OS X, because there is only one configuration stage for all the archs
* in universal builds, any macro which depends on the arch needs to be
* harcoded
diff --git a/numpy/core/include/numpy/ufuncobject.h b/numpy/core/include/numpy/ufuncobject.h
index c7096371d..d00fe10ea 100644
--- a/numpy/core/include/numpy/ufuncobject.h
+++ b/numpy/core/include/numpy/ufuncobject.h
@@ -108,14 +108,14 @@ typedef struct _tagPyUFuncObject {
void *ptr;
PyObject *obj;
PyObject *userloops;
-
+
/* generalized ufunc parameters */
/* 0 for scalar ufunc; 1 for generalized ufunc */
int core_enabled;
/* number of distinct dimension names in signature */
int core_num_dim_ix;
-
+
/*
* dimension indices of input/output argument k are stored in
* core_dim_ixs[core_offsets[k]..core_offsets[k]+core_num_dims[k]-1]
diff --git a/numpy/core/src/npymath/halffloat.c b/numpy/core/src/npymath/halffloat.c
index cea9a3bd7..d5ef57d7b 100644
--- a/numpy/core/src/npymath/halffloat.c
+++ b/numpy/core/src/npymath/halffloat.c
@@ -145,7 +145,7 @@ npy_half npy_half_nextafter(npy_half x, npy_half y)
return ret;
}
-
+
int npy_half_eq_nonan(npy_half h1, npy_half h2)
{
return (h1 == h2 || ((h1 | h2) & 0x7fff) == 0);
@@ -239,7 +239,7 @@ npy_uint16 npy_floatbits_to_halfbits(npy_uint32 f)
h_sgn = (npy_uint16) ((f&0x80000000u) >> 16);
f_exp = (f&0x7f800000u);
-
+
/* Exponent overflow/NaN converts to signed inf/NaN */
if (f_exp >= 0x47800000u) {
if (f_exp == 0x7f800000u) {
@@ -265,15 +265,15 @@ npy_uint16 npy_floatbits_to_halfbits(npy_uint32 f)
return (npy_uint16) (h_sgn + 0x7c00u);
}
}
-
+
/* Exponent underflow converts to a subnormal half or signed zero */
if (f_exp <= 0x38000000u) {
- /*
+ /*
* Signed zeros, subnormal floats, and floats with small
* exponents all convert to signed zero halfs.
*/
if (f_exp < 0x33000000u) {
-#if NPY_HALF_GENERATE_UNDERFLOW
+#if NPY_HALF_GENERATE_UNDERFLOW
/* If f != 0, it underflowed to 0 */
if ((f&0x7fffffff) != 0) {
npy_set_floatstatus_underflow();
@@ -284,7 +284,7 @@ npy_uint16 npy_floatbits_to_halfbits(npy_uint32 f)
/* Make the subnormal significand */
f_exp >>= 23;
f_sig = (0x00800000u + (f&0x007fffffu));
-#if NPY_HALF_GENERATE_UNDERFLOW
+#if NPY_HALF_GENERATE_UNDERFLOW
/* If it's not exactly represented, it underflowed */
if ((f_sig&(((npy_uint32)1 << (126 - f_exp)) - 1)) != 0) {
npy_set_floatstatus_underflow();
@@ -292,7 +292,7 @@ npy_uint16 npy_floatbits_to_halfbits(npy_uint32 f)
#endif
f_sig >>= (113 - f_exp);
/* Handle rounding by adding 1 to the bit beyond half precision */
-#if NPY_HALF_ROUND_TIES_TO_EVEN
+#if NPY_HALF_ROUND_TIES_TO_EVEN
/*
* If the last bit in the half significand is 0 (already even), and
* the remaining bit pattern is 1000...0, then we do not add one
@@ -317,7 +317,7 @@ npy_uint16 npy_floatbits_to_halfbits(npy_uint32 f)
h_exp = (npy_uint16) ((f_exp - 0x38000000u) >> 13);
/* Handle rounding by adding 1 to the bit beyond half precision */
f_sig = (f&0x007fffffu);
-#if NPY_HALF_ROUND_TIES_TO_EVEN
+#if NPY_HALF_ROUND_TIES_TO_EVEN
/*
* If the last bit in the half significand is 0 (already even), and
* the remaining bit pattern is 1000...0, then we do not add one
@@ -354,7 +354,7 @@ npy_uint16 npy_doublebits_to_halfbits(npy_uint64 d)
h_sgn = (d&0x8000000000000000ULL) >> 48;
d_exp = (d&0x7ff0000000000000ULL);
-
+
/* Exponent overflow/NaN converts to signed inf/NaN */
if (d_exp >= 0x40f0000000000000ULL) {
if (d_exp == 0x7ff0000000000000ULL) {
@@ -380,15 +380,15 @@ npy_uint16 npy_doublebits_to_halfbits(npy_uint64 d)
return h_sgn + 0x7c00u;
}
}
-
+
/* Exponent underflow converts to subnormal half or signed zero */
if (d_exp <= 0x3f00000000000000ULL) {
- /*
+ /*
* Signed zeros, subnormal floats, and floats with small
* exponents all convert to signed zero halfs.
*/
if (d_exp < 0x3e60000000000000ULL) {
-#if NPY_HALF_GENERATE_UNDERFLOW
+#if NPY_HALF_GENERATE_UNDERFLOW
/* If d != 0, it underflowed to 0 */
if ((d&0x7fffffffffffffffULL) != 0) {
npy_set_floatstatus_underflow();
@@ -399,7 +399,7 @@ npy_uint16 npy_doublebits_to_halfbits(npy_uint64 d)
/* Make the subnormal significand */
d_exp >>= 52;
d_sig = (0x0010000000000000ULL + (d&0x000fffffffffffffULL));
-#if NPY_HALF_GENERATE_UNDERFLOW
+#if NPY_HALF_GENERATE_UNDERFLOW
/* If it's not exactly represented, it underflowed */
if ((d_sig&(((npy_uint64)1 << (1051 - d_exp)) - 1)) != 0) {
npy_set_floatstatus_underflow();
@@ -407,7 +407,7 @@ npy_uint16 npy_doublebits_to_halfbits(npy_uint64 d)
#endif
d_sig >>= (1009 - d_exp);
/* Handle rounding by adding 1 to the bit beyond half precision */
-#if NPY_HALF_ROUND_TIES_TO_EVEN
+#if NPY_HALF_ROUND_TIES_TO_EVEN
/*
* If the last bit in the half significand is 0 (already even), and
* the remaining bit pattern is 1000...0, then we do not add one
@@ -432,7 +432,7 @@ npy_uint16 npy_doublebits_to_halfbits(npy_uint64 d)
h_exp = (npy_uint16) ((d_exp - 0x3f00000000000000ULL) >> 42);
/* Handle rounding by adding 1 to the bit beyond half precision */
d_sig = (d&0x000fffffffffffffULL);
-#if NPY_HALF_ROUND_TIES_TO_EVEN
+#if NPY_HALF_ROUND_TIES_TO_EVEN
/*
* If the last bit in the half significand is 0 (already even), and
* the remaining bit pattern is 1000...0, then we do not add one
@@ -527,4 +527,4 @@ npy_uint64 npy_halfbits_to_doublebits(npy_uint16 h)
return d_sgn + (((npy_uint64)(h&0x7fffu) + 0xfc000u) << 42);
}
}
-
+
diff --git a/numpy/core/src/npymath/ieee754.c.src b/numpy/core/src/npymath/ieee754.c.src
index 71be7b8ce..a81cabe5b 100644
--- a/numpy/core/src/npymath/ieee754.c.src
+++ b/numpy/core/src/npymath/ieee754.c.src
@@ -128,7 +128,7 @@ float _nextf(float x, int p)
#ifdef HAVE_LDOUBLE_DOUBLE_DOUBLE_BE
-/*
+/*
* FIXME: this is ugly and untested. The asm part only works with gcc, and we
* should consolidate the GET_LDOUBLE* / SET_LDOUBLE macros
*/
@@ -561,7 +561,7 @@ npy_longdouble npy_nextafterl(npy_longdouble x, npy_longdouble y)
void npy_set_floatstatus_divbyzero(void)
{
fpsetsticky(FP_X_DZ);
-}
+}
void npy_set_floatstatus_overflow(void)
{
@@ -593,7 +593,7 @@ void npy_set_floatstatus_invalid(void)
void npy_set_floatstatus_divbyzero(void)
{
feraiseexcept(FE_DIVBYZERO);
-}
+}
void npy_set_floatstatus_overflow(void)
{
@@ -617,7 +617,7 @@ void npy_set_floatstatus_invalid(void)
void npy_set_floatstatus_divbyzero(void)
{
fp_raise_xcp(FP_DIV_BY_ZERO);
-}
+}
void npy_set_floatstatus_overflow(void)
{
diff --git a/numpy/core/src/private/lowlevel_strided_loops.h b/numpy/core/src/private/lowlevel_strided_loops.h
index 7318e51dc..b6b53ba45 100644
--- a/numpy/core/src/private/lowlevel_strided_loops.h
+++ b/numpy/core/src/private/lowlevel_strided_loops.h
@@ -19,7 +19,7 @@
* The 'transferdata' parameter is slightly special, following a
* generic auxiliary data pattern defined in ndarraytypes.h
* Use NPY_AUXDATA_CLONE and NPY_AUXDATA_FREE to deal with this data.
- *
+ *
*/
typedef void (PyArray_StridedTransferFn)(char *dst, npy_intp dst_stride,
char *src, npy_intp src_stride,
@@ -47,10 +47,10 @@ typedef void (PyArray_MaskedStridedTransferFn)(char *dst, npy_intp dst_stride,
* aligned:
* Should be 1 if the src and dst pointers are always aligned,
* 0 otherwise.
- * src_stride:
+ * src_stride:
* Should be the src stride if it will always be the same,
* NPY_MAX_INTP otherwise.
- * dst_stride:
+ * dst_stride:
* Should be the dst stride if it will always be the same,
* NPY_MAX_INTP otherwise.
* itemsize:
@@ -143,10 +143,10 @@ PyArray_GetDTypeCopySwapFn(int aligned,
* aligned:
* Should be 1 if the src and dst pointers are always aligned,
* 0 otherwise.
- * src_stride:
+ * src_stride:
* Should be the src stride if it will always be the same,
* NPY_MAX_INTP otherwise.
- * dst_stride:
+ * dst_stride:
* Should be the dst stride if it will always be the same,
* NPY_MAX_INTP otherwise.
* src_dtype:
diff --git a/numpy/core/src/private/npy_config.h b/numpy/core/src/private/npy_config.h
index b4842b832..237dc94ab 100644
--- a/numpy/core/src/private/npy_config.h
+++ b/numpy/core/src/private/npy_config.h
@@ -10,7 +10,7 @@
#endif
/* Safe to use ldexp and frexp for long double for MSVC builds */
-#if (SIZEOF_LONG_DOUBLE == SIZEOF_DOUBLE) || defined(_MSC_VER)
+#if (SIZEOF_LONG_DOUBLE == SIZEOF_DOUBLE) || defined(_MSC_VER)
#ifdef HAVE_LDEXP
#define HAVE_LDEXPL 1
#endif
@@ -24,7 +24,7 @@
#undef HAVE_ATAN2
#endif
-/*
+/*
* On Mac OS X, because there is only one configuration stage for all the archs
* in universal builds, any macro which depends on the arch needs to be
* harcoded
diff --git a/numpy/core/src/umath/ufunc_type_resolution.c b/numpy/core/src/umath/ufunc_type_resolution.c
index b08a7f165..fb7352070 100644
--- a/numpy/core/src/umath/ufunc_type_resolution.c
+++ b/numpy/core/src/umath/ufunc_type_resolution.c
@@ -665,7 +665,7 @@ timedelta_dtype_with_copied_meta(PyArray_Descr *dtype)
* m8[<A>] + M8[<B>] => m8[gcd(<A>,<B>)] + M8[gcd(<A>,<B>)]
* TODO: Non-linear time unit cases require highly special-cased loops
* M8[<A>] + m8[Y|M|B]
- * m8[Y|M|B] + M8[<A>]
+ * m8[Y|M|B] + M8[<A>]
*/
NPY_NO_EXPORT int
PyUFunc_AdditionTypeResolution(PyUFuncObject *ufunc,
@@ -1360,7 +1360,7 @@ static NpyAuxData *
ufunc_masker_data_clone(NpyAuxData *data)
{
_ufunc_masker_data *n;
-
+
/* Allocate a new one */
n = (_ufunc_masker_data *)PyArray_malloc(sizeof(_ufunc_masker_data));
if (n == NULL) {
diff --git a/numpy/core/tests/test_datetime.py b/numpy/core/tests/test_datetime.py
index 2770364f0..82577c0fc 100644
--- a/numpy/core/tests/test_datetime.py
+++ b/numpy/core/tests/test_datetime.py
@@ -514,7 +514,7 @@ class TestDateTime(TestCase):
formatter={'datetime': lambda x :
"'%s'" % np.datetime_as_string(x, timezone='UTC')}),
"['2011-03-16T13:55Z', '1920-01-01T03:12Z']")
-
+
def test_pickle(self):
# Check that pickle roundtripping works
diff --git a/numpy/core/tests/test_half.py b/numpy/core/tests/test_half.py
index 37fce6460..8f1aa92c2 100644
--- a/numpy/core/tests/test_half.py
+++ b/numpy/core/tests/test_half.py
@@ -221,7 +221,7 @@ class TestHalf(TestCase):
a = np.zeros((5,), dtype=float16)
a.fill(1)
assert_equal(a, np.ones((5,), dtype=float16))
-
+
# nonzero and copyswap
a = np.array([0,0,-1,-1/1e20,0,2.0**-24, 7.629e-6], dtype=float16)
assert_equal(a.nonzero()[0],
@@ -235,7 +235,7 @@ class TestHalf(TestCase):
b = np.ones((20,), dtype=float16)
assert_equal(np.dot(a,b),
95)
-
+
# argmax
a = np.array([0, -np.inf, -2, 0.5, 12.55, 7.3, 2.1, 12.4], dtype=float16)
assert_equal(a.argmax(),
@@ -271,7 +271,7 @@ class TestHalf(TestCase):
assert_equal(np.nextafter(a_f16[0], hinf), -a_f16[1])
assert_equal(np.nextafter(a_f16[1:], hinf), a_f16[:-1])
assert_equal(np.nextafter(a_f16[:-1], -hinf), a_f16[1:])
-
+
def test_half_ufuncs(self):
"""Test the various ufuncs"""
@@ -302,7 +302,7 @@ class TestHalf(TestCase):
assert_equal(np.signbit(b), [True,False,False,False,False])
assert_equal(np.copysign(b,a), [2,5,1,4,3])
-
+
assert_equal(np.maximum(a,b), [0,5,2,4,3])
x = np.maximum(b,c)
assert_(np.isnan(x[3]))
@@ -365,7 +365,7 @@ class TestHalf(TestCase):
bx16 = np.array((1e4,),dtype=float16)
sy16 = float16(1e-4)
by16 = float16(1e4)
-
+
# Underflow errors
assert_raises_fpe('underflow', lambda a,b:a*b, sx16, sx16)
assert_raises_fpe('underflow', lambda a,b:a*b, sx16, sy16)
diff --git a/numpy/core/tests/test_ufunc.py b/numpy/core/tests/test_ufunc.py
index 9117b9ac8..773ce9a3b 100644
--- a/numpy/core/tests/test_ufunc.py
+++ b/numpy/core/tests/test_ufunc.py
@@ -299,7 +299,7 @@ class TestUfunc(TestCase):
b[:] = 0
np.add(a,0.5,sig=('i4','i4','i4'),out=b, casting='unsafe')
assert_equal(b, [0, 0, 1])
-
+
def test_inner1d(self):
a = np.arange(6).reshape((2,3))
diff --git a/numpy/f2py/doc/multiarray/array_from_pyobj.c b/numpy/f2py/doc/multiarray/array_from_pyobj.c
index 7e0de9a74..5a700eecf 100644
--- a/numpy/f2py/doc/multiarray/array_from_pyobj.c
+++ b/numpy/f2py/doc/multiarray/array_from_pyobj.c
@@ -1,14 +1,14 @@
-/*
- * File: array_from_pyobj.c
+/*
+ * File: array_from_pyobj.c
*
* Description:
- * ------------
+ * ------------
* Provides array_from_pyobj function that returns a contigious array
* object with the given dimensions and required storage order, either
* in row-major (C) or column-major (Fortran) order. The function
* array_from_pyobj is very flexible about its Python object argument
* that can be any number, list, tuple, or array.
- *
+ *
* array_from_pyobj is used in f2py generated Python extension
* modules.
*
@@ -80,8 +80,8 @@ PyArrayObject* array_from_pyobj(const int type_num,
if (intent & F2PY_INTENT_CACHE) {
/* Don't expect correct storage order or anything reasonable when
- returning cache array. */
- if ((intent & F2PY_INTENT_HIDE)
+ returning cache array. */
+ if ((intent & F2PY_INTENT_HIDE)
|| (obj==Py_None)) {
PyArrayObject *arr = NULL;
CHECK_DIMS_DEFINED(rank,dims,"optional,intent(cache) must"
@@ -92,7 +92,7 @@ PyArrayObject* array_from_pyobj(const int type_num,
Py_INCREF(arr);
return arr;
}
- if (PyArray_Check(obj)
+ if (PyArray_Check(obj)
&& ISCONTIGUOUS((PyArrayObject *)obj)
&& HAS_PROPER_ELSIZE((PyArrayObject *)obj,type_num)
) {
@@ -124,7 +124,7 @@ PyArrayObject* array_from_pyobj(const int type_num,
intent(inout) */
PyArrayObject *arr = (PyArrayObject *)obj;
- int is_cont = (intent & F2PY_INTENT_C) ?
+ int is_cont = (intent & F2PY_INTENT_C) ?
(ISCONTIGUOUS(arr)) : (array_has_column_major_storage(arr));
if (check_and_fix_dimensions(arr,rank,dims))
@@ -166,7 +166,7 @@ PyArrayObject* array_from_pyobj(const int type_num,
if ((obj==Py_None) && (intent & F2PY_OPTIONAL)) {
PyArrayObject *arr = NULL;
- CHECK_DIMS_DEFINED(rank,dims,"optional must have defined dimensions.\n");
+ CHECK_DIMS_DEFINED(rank,dims,"optional must have defined dimensions.\n");
arr = (PyArrayObject *)PyArray_FromDims(rank,dims,type_num);
ARR_IS_NULL(arr,"FromDims failed: optional.\n");
if (intent & F2PY_INTENT_OUT) {
@@ -240,8 +240,8 @@ void lazy_transpose(PyArrayObject* arr) {
Note that this function is assumed to be used even times for a
given array. Otherwise, the caller should set flags &= ~CONTIGUOUS.
*/
- int rank,i,s,j;
- rank = arr->nd;
+ int rank,i,s,j;
+ rank = arr->nd;
if (rank < 2) return;
for(i=0,j=rank-1;i<rank/2;++i,--j) {
@@ -268,7 +268,7 @@ int check_and_fix_dimensions(const PyArrayObject* arr,const int rank,int *dims)
int free_axe = -1;
int i;
/* Fill dims where -1 or 0; check dimensions; calc new_size; */
- for(i=0;i<arr->nd;++i) {
+ for(i=0;i<arr->nd;++i) {
if (dims[i] >= 0) {
if (dims[i]!=arr->dimensions[i]) {
fprintf(stderr,"%d-th dimension must be fixed to %d but got %d\n",
@@ -311,7 +311,7 @@ int check_and_fix_dimensions(const PyArrayObject* arr,const int rank,int *dims)
if (arr->dimensions[i]!=dims[i]) {
fprintf(stderr,"%d-th dimension must be fixed to %d but got %d\n",
i,dims[i],arr->dimensions[i]);
- return 1;
+ return 1;
}
if (!dims[i]) dims[i] = 1;
} else
diff --git a/numpy/lib/npyio.py b/numpy/lib/npyio.py
index 13f659d70..f7cde270d 100644
--- a/numpy/lib/npyio.py
+++ b/numpy/lib/npyio.py
@@ -627,7 +627,7 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None,
data-type, arrays are returned for each field. Default is False.
ndmin : int, optional
The returned array will have at least `ndmin` dimensions.
- Otherwise mono-dimensional axes will be squeezed.
+ Otherwise mono-dimensional axes will be squeezed.
Legal values: 0 (default), 1 or 2.
.. versionadded:: 1.6.0
@@ -803,7 +803,7 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None,
fh.close()
X = np.array(X, dtype)
- # Multicolumn data are returned with shape (1, N, M), i.e.
+ # Multicolumn data are returned with shape (1, N, M), i.e.
# (1, 1, M) for a single row - remove the singleton dimension there
if X.ndim == 3 and X.shape[:2] == (1, 1):
X.shape = (1, -1)
diff --git a/numpy/lib/tests/test_index_tricks.py b/numpy/lib/tests/test_index_tricks.py
index f0190937b..e4c0bde93 100644
--- a/numpy/lib/tests/test_index_tricks.py
+++ b/numpy/lib/tests/test_index_tricks.py
@@ -48,7 +48,7 @@ class TestRavelUnravelIndex(TestCase):
uncoords = coords[0]+5*coords[1]
assert_equal(np.ravel_multi_index(coords, shape, order='F'), uncoords)
assert_equal(coords, np.unravel_index(uncoords, shape, order='F'))
-
+
coords = np.array([[1,0,1,2,3,4],[1,6,1,3,2,0],[1,3,1,0,9,5]],
dtype=dtype)
shape = (5,8,10)
diff --git a/numpy/numarray/include/numpy/arraybase.h b/numpy/numarray/include/numpy/arraybase.h
index a964979ce..32f9948f4 100644
--- a/numpy/numarray/include/numpy/arraybase.h
+++ b/numpy/numarray/include/numpy/arraybase.h
@@ -12,7 +12,7 @@ typedef npy_uint8 UInt8;
typedef npy_int16 Int16;
typedef npy_uint16 UInt16;
typedef npy_int32 Int32;
-typedef npy_uint32 UInt32;
+typedef npy_uint32 UInt32;
typedef npy_int64 Int64;
typedef npy_uint64 UInt64;
typedef npy_float32 Float32;
@@ -65,7 +65,7 @@ typedef struct { Float64 r, i; } Complex64;
#define PyArray(m) ((PyArrayObject *)(m))
#define PyArray_ISFORTRAN_CONTIGUOUS(m) (((PyArray(m))->flags & FORTRAN_CONTIGUOUS) != 0)
-#define PyArray_ISWRITABLE PyArray_ISWRITEABLE
+#define PyArray_ISWRITABLE PyArray_ISWRITEABLE
-#endif
+#endif
diff --git a/numpy/numarray/include/numpy/cfunc.h b/numpy/numarray/include/numpy/cfunc.h
index b581be08f..1739290ae 100644
--- a/numpy/numarray/include/numpy/cfunc.h
+++ b/numpy/numarray/include/numpy/cfunc.h
@@ -6,11 +6,11 @@ typedef int (*UFUNC)(long, long, long, void **, long*);
/* typedef void (*CFUNC_2ARG)(long, void *, void *); */
/* typedef void (*CFUNC_3ARG)(long, void *, void *, void *); */
typedef int (*CFUNCfromPyValue)(PyObject *, void *);
-typedef int (*CFUNC_STRIDE_CONV_FUNC)(long, long, maybelong *,
+typedef int (*CFUNC_STRIDE_CONV_FUNC)(long, long, maybelong *,
void *, long, maybelong*, void *, long, maybelong *);
-typedef int (*CFUNC_STRIDED_FUNC)(PyObject *, long, PyArrayObject **,
- char **data);
+typedef int (*CFUNC_STRIDED_FUNC)(PyObject *, long, PyArrayObject **,
+ char **data);
#define MAXARRAYS 16
diff --git a/numpy/numarray/include/numpy/numcomplex.h b/numpy/numarray/include/numpy/numcomplex.h
index 9ed4198c7..7b4960e40 100644
--- a/numpy/numarray/include/numpy/numcomplex.h
+++ b/numpy/numarray/include/numpy/numcomplex.h
@@ -68,7 +68,7 @@ typedef struct { Float64 a, theta; } PolarComplex64;
#define NUM_CLE(p, q) ((p).r <= (q).r)
#define NUM_CGE(p, q) ((p).r >= (q).r)
-/* e**z = e**x * (cos(y)+ i*sin(y)) where z = x + i*y
+/* e**z = e**x * (cos(y)+ i*sin(y)) where z = x + i*y
so e**z = e**x * cos(y) + i * e**x * sin(y)
*/
#define NUM_CEXP(p, s) \
@@ -79,7 +79,7 @@ typedef struct { Float64 a, theta; } PolarComplex64;
/* e**w = z; w = u + i*v; z = r * e**(i*theta);
-e**u * e**(i*v) = r * e**(i*theta);
+e**u * e**(i*v) = r * e**(i*theta);
log(z) = w; log(z) = log(r) + i*theta;
*/
@@ -110,7 +110,7 @@ log(z) = w; log(z) = log(r) + i*theta;
NUM_CEXP(s, s); \
} \
}
-
+
#define NUM_CSQRT(p, s) { Complex64 temp; temp.r = 0.5; temp.i=0; \
NUM_CPOW(p, temp, s); \
}
@@ -201,7 +201,7 @@ log(z) = w; log(z) = log(r) + i*theta;
NUM_CIMUL(s, s); \
NUM_CRMUL(s, 0.5, s); \
}
-
+
/* asinh(z) = log( z + (z**2 + 1)**0.5 ) */
#define NUM_CASINH(p, s) { Complex64 p1; NUM_CASS(p, p1); \
NUM_CMUL(p, p, s); \
diff --git a/numpy/numarray/include/numpy/nummacro.h b/numpy/numarray/include/numpy/nummacro.h
index 6eb98ddd0..0f87dfb84 100644
--- a/numpy/numarray/include/numpy/nummacro.h
+++ b/numpy/numarray/include/numpy/nummacro.h
@@ -6,7 +6,7 @@
/* The structs defined here are private implementation details of numarray
which are subject to change w/o notice.
*/
-
+
#define PY_BOOL_CHAR "b"
#define PY_INT8_CHAR "b"
#define PY_INT16_CHAR "h"
@@ -44,7 +44,7 @@ typedef enum
#define UNCONVERTED 0
#define C_ARRAY (NUM_CONTIGUOUS | NUM_NOTSWAPPED | NUM_ALIGNED)
-#define MUST_BE_COMPUTED 2
+#define MUST_BE_COMPUTED 2
#define NUM_FLOORDIVIDE(a,b,out) (out) = floor((a)/(b))
@@ -334,7 +334,7 @@ _makeSetPa(Bool)
out[i] = NA_GETPb(ai, type, base); \
base += stride; \
} \
- }
+ }
#define NA_GET1Da(ai, type, base, cnt, out) \
{ int i, stride = PyArray_STRIDES(ai)[PyArray_NDIM(ai)-1]; \
@@ -342,7 +342,7 @@ _makeSetPa(Bool)
out[i] = NA_GETPa(ai, type, base); \
base += stride; \
} \
- }
+ }
#define NA_GET1Df(ai, type, base, cnt, out) \
{ int i, stride = PyArray_STRIDES(ai)[PyArray_NDIM(ai)-1]; \
@@ -350,7 +350,7 @@ _makeSetPa(Bool)
out[i] = NA_GETPf(ai, type, base); \
base += stride; \
} \
- }
+ }
#define NA_GET1D(ai, type, base, cnt, out) \
if (PyArray_ISCARRAY(ai)) { \
@@ -360,7 +360,7 @@ _makeSetPa(Bool)
} else { \
NA_GET1Da(ai, type, base, cnt, out); \
}
-
+
#define NA_SET1Db(ai, type, base, cnt, in) \
{ int i, stride = PyArray_STRIDES(ai)[PyArray_NDIM(ai)-1]; \
for(i=0; i<cnt; i++) { \
@@ -393,7 +393,7 @@ _makeSetPa(Bool)
} else { \
NA_SET1Da(ai, type, base, cnt, out); \
}
-
+
/* ========================== utilities ================================== */
#if !defined(MIN)