summaryrefslogtreecommitdiff
path: root/numpy
diff options
context:
space:
mode:
Diffstat (limited to 'numpy')
-rw-r--r--numpy/__init__.py2
-rw-r--r--numpy/add_newdocs.py16
-rw-r--r--numpy/core/defmatrix.py20
-rw-r--r--numpy/core/fromnumeric.py8
-rw-r--r--numpy/core/memmap.py3
-rw-r--r--numpy/core/numerictypes.py8
-rw-r--r--numpy/core/tests/test_numerictypes.py6
-rw-r--r--numpy/core/tests/test_regression.py8
-rw-r--r--numpy/core/tests/test_scalarmath.py2
-rw-r--r--numpy/distutils/cpuinfo.py4
-rw-r--r--numpy/doc/newdtype_example/example.py3
-rw-r--r--numpy/doc/newdtype_example/setup.py3
-rw-r--r--numpy/lib/financial.py54
-rw-r--r--numpy/lib/function_base.py52
-rw-r--r--numpy/lib/index_tricks.py2
-rw-r--r--numpy/lib/io.py61
-rw-r--r--numpy/lib/tests/test__datasource.py6
-rw-r--r--numpy/lib/tests/test_function_base.py52
-rw-r--r--numpy/lib/tests/test_io.py22
-rw-r--r--numpy/lib/tests/test_regression.py2
-rw-r--r--numpy/lib/twodim_base.py12
-rw-r--r--numpy/lib/utils.py12
-rw-r--r--numpy/linalg/linalg.py20
-rw-r--r--numpy/linalg/tests/test_regression.py8
-rw-r--r--numpy/ma/core.py48
-rw-r--r--numpy/ma/extras.py26
-rw-r--r--numpy/ma/morestats.py16
-rw-r--r--numpy/ma/mrecords.py22
-rw-r--r--numpy/ma/mstats.py38
-rw-r--r--numpy/ma/tests/test_core.py2
-rw-r--r--numpy/ma/tests/test_extras.py8
-rw-r--r--numpy/ma/tests/test_mrecords.py40
-rw-r--r--numpy/ma/testutils.py2
-rw-r--r--numpy/oldnumeric/compat.py6
-rw-r--r--numpy/oldnumeric/ma.py2
-rw-r--r--numpy/testing/utils.py2
36 files changed, 295 insertions, 303 deletions
diff --git a/numpy/__init__.py b/numpy/__init__.py
index edb1d3db3..00494fb37 100644
--- a/numpy/__init__.py
+++ b/numpy/__init__.py
@@ -1,7 +1,7 @@
"""
NumPy
==========
-Provides
+Provides
1) An array object of arbitrary homogeneous items
2) Fast mathematical operations over arrays
3) Linear Algebra, Fourier Transforms, Random Number Generation
diff --git a/numpy/add_newdocs.py b/numpy/add_newdocs.py
index be2b86f63..c61ffcab8 100644
--- a/numpy/add_newdocs.py
+++ b/numpy/add_newdocs.py
@@ -1314,10 +1314,10 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('std',
Notes
-----
The standard deviation is the square root of the average of the squared
- deviations from the mean, i.e. var = sqrt(mean(abs(x - x.mean())**2)).
- The computed standard deviation is computed by dividing by the number of
- elements, N-ddof. The option ddof defaults to zero, that is, a
- biased estimate. Note that for complex numbers std takes the absolute
+ deviations from the mean, i.e. var = sqrt(mean(abs(x - x.mean())**2)).
+ The computed standard deviation is computed by dividing by the number of
+ elements, N-ddof. The option ddof defaults to zero, that is, a
+ biased estimate. Note that for complex numbers std takes the absolute
value before squaring, so that the result is always real and nonnegative.
"""))
@@ -1503,10 +1503,10 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('var',
Notes
-----
The variance is the average of the squared deviations from the mean,
- i.e. var = mean(abs(x - x.mean())**2). The mean is computed by
- dividing by N-ddof, where N is the number of elements. The argument
- ddof defaults to zero; for an unbiased estimate supply ddof=1. Note
- that for complex numbers the absolute value is taken before squaring,
+ i.e. var = mean(abs(x - x.mean())**2). The mean is computed by
+ dividing by N-ddof, where N is the number of elements. The argument
+ ddof defaults to zero; for an unbiased estimate supply ddof=1. Note
+ that for complex numbers the absolute value is taken before squaring,
so that the result is always real and nonnegative.
"""))
diff --git a/numpy/core/defmatrix.py b/numpy/core/defmatrix.py
index 85eab179f..de37a2686 100644
--- a/numpy/core/defmatrix.py
+++ b/numpy/core/defmatrix.py
@@ -390,11 +390,11 @@ class matrix(N.ndarray):
-----
The standard deviation is the square root of the
average of the squared deviations from the mean, i.e. var =
- sqrt(mean(abs(x - x.mean())**2)). The computed standard
- deviation is computed by dividing by the number of elements,
- N-ddof. The option ddof defaults to zero, that is, a biased
- estimate. Note that for complex numbers std takes the absolute
- value before squaring, so that the result is always real
+ sqrt(mean(abs(x - x.mean())**2)). The computed standard
+ deviation is computed by dividing by the number of elements,
+ N-ddof. The option ddof defaults to zero, that is, a biased
+ estimate. Note that for complex numbers std takes the absolute
+ value before squaring, so that the result is always real
and nonnegative.
"""
@@ -439,11 +439,11 @@ class matrix(N.ndarray):
-----
The variance is the average of the squared deviations from the
- mean, i.e. var = mean(abs(x - x.mean())**2). The mean is
- computed by dividing by N-ddof, where N is the number of elements.
- The argument ddof defaults to zero; for an unbiased estimate
- supply ddof=1. Note that for complex numbers the absolute value
- is taken before squaring, so that the result is always real
+ mean, i.e. var = mean(abs(x - x.mean())**2). The mean is
+ computed by dividing by N-ddof, where N is the number of elements.
+ The argument ddof defaults to zero; for an unbiased estimate
+ supply ddof=1. Note that for complex numbers the absolute value
+ is taken before squaring, so that the result is always real
and nonnegative.
"""
return N.ndarray.var(self, axis, dtype, out)._align(axis)
diff --git a/numpy/core/fromnumeric.py b/numpy/core/fromnumeric.py
index 471a50a8c..35c2e9a65 100644
--- a/numpy/core/fromnumeric.py
+++ b/numpy/core/fromnumeric.py
@@ -1671,10 +1671,10 @@ def std(a, axis=None, dtype=None, out=None, ddof=0):
Notes
-----
The standard deviation is the square root of the average of the squared
- deviations from the mean, i.e. var = sqrt(mean(abs(x - x.mean())**2)).
- The computed standard deviation is computed by dividing by the number of
- elements, N-ddof. The option ddof defaults to zero, that is, a
- biased estimate. Note that for complex numbers std takes the absolute
+ deviations from the mean, i.e. var = sqrt(mean(abs(x - x.mean())**2)).
+ The computed standard deviation is computed by dividing by the number of
+ elements, N-ddof. The option ddof defaults to zero, that is, a
+ biased estimate. Note that for complex numbers std takes the absolute
value before squaring, so that the result is always real and nonnegative.
Examples
diff --git a/numpy/core/memmap.py b/numpy/core/memmap.py
index 93950c5c0..c7705d263 100644
--- a/numpy/core/memmap.py
+++ b/numpy/core/memmap.py
@@ -20,7 +20,7 @@ class memmap(ndarray):
Memory-mapped files are used for accessing small segments of large files
on disk, without reading the entire file into memory. Numpy's memmaps are
- array-like objects. This differs from python's mmap module which are
+ array-like objects. This differs from python's mmap module which are
file-like objects.
Parameters
@@ -250,4 +250,3 @@ class memmap(ndarray):
# flush any changes to disk, even if it's a view
self.flush()
self._close()
-
diff --git a/numpy/core/numerictypes.py b/numpy/core/numerictypes.py
index ae0b57eec..3c4e8ad8c 100644
--- a/numpy/core/numerictypes.py
+++ b/numpy/core/numerictypes.py
@@ -606,7 +606,7 @@ def _find_common_coerce(a, b):
return newdtype
thisind += 1
return None
-
+
def find_common_type(array_types, scalar_types):
"""Determine common type following standard coercion rules
@@ -617,13 +617,13 @@ def find_common_type(array_types, scalar_types):
A list of dtype convertible objects representing arrays
scalar_types : sequence
A list of dtype convertible objects representing scalars
-
+
Returns
-------
datatype : dtype
The common data-type which is the maximum of the array_types
ignoring the scalar_types unless the maximum of the scalar_types
- is of a different kind.
+ is of a different kind.
If the kinds is not understood, then None is returned.
"""
@@ -646,7 +646,7 @@ def find_common_type(array_types, scalar_types):
index_sc = _kind_list.index(maxsc.kind)
except ValueError:
return None
-
+
if index_sc > index_a:
return _find_common_coerce(maxsc,maxa)
else:
diff --git a/numpy/core/tests/test_numerictypes.py b/numpy/core/tests/test_numerictypes.py
index f0533e062..bfbd91fec 100644
--- a/numpy/core/tests/test_numerictypes.py
+++ b/numpy/core/tests/test_numerictypes.py
@@ -355,10 +355,10 @@ class TestCommonType(NumpyTestCase):
res = numpy.find_common_type(['u8','i8','i8'],['f8'])
assert(res == 'f8')
-
-
-
+
+
+
if __name__ == "__main__":
NumpyTest().run()
diff --git a/numpy/core/tests/test_regression.py b/numpy/core/tests/test_regression.py
index 4f9996227..68231e2ed 100644
--- a/numpy/core/tests/test_regression.py
+++ b/numpy/core/tests/test_regression.py
@@ -818,10 +818,10 @@ class TestRegression(NumpyTestCase):
np.indices((0,3,4)).T.reshape(-1,3)
def check_flat_byteorder(self, level=rlevel):
- """Ticket #657"""
- x = np.arange(10)
- assert_array_equal(x.astype('>i4'),x.astype('<i4').flat[:])
- assert_array_equal(x.astype('>i4').flat[:],x.astype('<i4'))
+ """Ticket #657"""
+ x = np.arange(10)
+ assert_array_equal(x.astype('>i4'),x.astype('<i4').flat[:])
+ assert_array_equal(x.astype('>i4').flat[:],x.astype('<i4'))
def check_uint64_from_negative(self, level=rlevel) :
assert_equal(np.uint64(-2), np.uint64(18446744073709551614))
diff --git a/numpy/core/tests/test_scalarmath.py b/numpy/core/tests/test_scalarmath.py
index a3175664b..d07460516 100644
--- a/numpy/core/tests/test_scalarmath.py
+++ b/numpy/core/tests/test_scalarmath.py
@@ -76,7 +76,7 @@ class TestRepr(NumpyTestCase):
def check_float_repr(self):
from numpy import nan, inf
for t in [np.float32, np.float64, np.longdouble]:
- if t is np.longdouble: # skip it for now.
+ if t is np.longdouble: # skip it for now.
continue
finfo=np.finfo(t)
last_fraction_bit_idx = finfo.nexp + finfo.nmant
diff --git a/numpy/distutils/cpuinfo.py b/numpy/distutils/cpuinfo.py
index 5781cca3c..c35338292 100644
--- a/numpy/distutils/cpuinfo.py
+++ b/numpy/distutils/cpuinfo.py
@@ -276,10 +276,10 @@ class LinuxCPUInfo(CPUInfoBase):
return re.match(r'.*?\bsse2\b',self.info[0]['flags']) is not None
def _has_sse3(self):
- return re.match(r'.*?\bpni\b',self.info[0]['flags']) is not None
+ return re.match(r'.*?\bpni\b',self.info[0]['flags']) is not None
def _has_ssse3(self):
- return re.match(r'.*?\bssse3\b',self.info[0]['flags']) is not None
+ return re.match(r'.*?\bssse3\b',self.info[0]['flags']) is not None
def _has_3dnow(self):
return re.match(r'.*?\b3dnow\b',self.info[0]['flags']) is not None
diff --git a/numpy/doc/newdtype_example/example.py b/numpy/doc/newdtype_example/example.py
index de068e72a..7ee64ca00 100644
--- a/numpy/doc/newdtype_example/example.py
+++ b/numpy/doc/newdtype_example/example.py
@@ -8,10 +8,9 @@ import numpy as np
# But we can get a view as an ndarray of the given type:
g = np.array([1,2,3,4,5,6,7,8]).view(ff.floatint_type)
-# Now, the elements will be the scalar type associated
+# Now, the elements will be the scalar type associated
# with the ndarray.
print g[0]
print type(g[1])
# Now, you need to register ufuncs and more arrfuncs to do useful things...
-
diff --git a/numpy/doc/newdtype_example/setup.py b/numpy/doc/newdtype_example/setup.py
index a60e5a650..3b9d75578 100644
--- a/numpy/doc/newdtype_example/setup.py
+++ b/numpy/doc/newdtype_example/setup.py
@@ -4,10 +4,9 @@ from numpy.distutils.core import setup
def configuration(parent_package = '', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('floatint',parent_package,top_path)
-
+
config.add_extension('floatint',
sources = ['floatint.c']);
return config
setup(configuration=configuration)
-
diff --git a/numpy/lib/financial.py b/numpy/lib/financial.py
index 5bb4a3af7..2a751281f 100644
--- a/numpy/lib/financial.py
+++ b/numpy/lib/financial.py
@@ -2,12 +2,12 @@
# patterned after spreadsheet computations.
# There is some complexity in each function
-# so that the functions behave like ufuncs with
+# so that the functions behave like ufuncs with
# broadcasting and being able to be called with scalars
-# or arrays (or other sequences).
+# or arrays (or other sequences).
import numpy as np
-__all__ = ['fv', 'pmt', 'nper', 'ipmt', 'ppmt', 'pv', 'rate',
+__all__ = ['fv', 'pmt', 'nper', 'ipmt', 'ppmt', 'pv', 'rate',
'irr', 'npv', 'mirr']
_when_to_num = {'end':0, 'begin':1,
@@ -19,7 +19,7 @@ _when_to_num = {'end':0, 'begin':1,
eqstr = """
- nper / (1 + rate*when) \ / nper \
+ nper / (1 + rate*when) \ / nper \
fv + pv*(1+rate) + pmt*|-------------------|*| (1+rate) - 1 | = 0
\ rate / \ /
@@ -28,23 +28,23 @@ eqstr = """
where (all can be scalars or sequences)
Parameters
- ----------
- rate :
+ ----------
+ rate :
Rate of interest (per period)
- nper :
+ nper :
Number of compounding periods
- pmt :
- Payment
+ pmt :
+ Payment
pv :
Present value
fv :
- Future value
- when :
+ Future value
+ when :
When payments are due ('begin' (1) or 'end' (0))
-
+
"""
-def _convert_when(when):
+def _convert_when(when):
try:
return _when_to_num[when]
except KeyError:
@@ -85,19 +85,19 @@ def pmt(rate, nper, pv, fv=0, when='end'):
temp = (1+rate)**nper
miter = np.broadcast(rate, nper, pv, fv, when)
zer = np.zeros(miter.shape)
- fact = np.where(rate==zer, nper+zer, (1+rate*when)*(temp-1)/rate+zer)
+ fact = np.where(rate==zer, nper+zer, (1+rate*when)*(temp-1)/rate+zer)
return -(fv + pv*temp) / fact
pmt.__doc__ += eqstr + """
Example
-------
-What would the monthly payment need to be to pay off a $200,000 loan in 15
+What would the monthly payment need to be to pay off a $200,000 loan in 15
years at an annual interest rate of 7.5%?
>>> pmt(0.075/12, 12*15, 200000)
-1854.0247200054619
-In order to pay-off (i.e. have a future-value of 0) the $200,000 obtained
+In order to pay-off (i.e. have a future-value of 0) the $200,000 obtained
today, a monthly payment of $1,854.02 would be required.
"""
@@ -160,19 +160,19 @@ def pv(rate, nper, pmt, fv=0.0, when='end'):
pv.__doc__ += eqstr
# Computed with Sage
-# (y + (r + 1)^n*x + p*((r + 1)^n - 1)*(r*w + 1)/r)/(n*(r + 1)^(n - 1)*x - p*((r + 1)^n - 1)*(r*w + 1)/r^2 + n*p*(r + 1)^(n - 1)*(r*w + 1)/r + p*((r + 1)^n - 1)*w/r)
+# (y + (r + 1)^n*x + p*((r + 1)^n - 1)*(r*w + 1)/r)/(n*(r + 1)^(n - 1)*x - p*((r + 1)^n - 1)*(r*w + 1)/r^2 + n*p*(r + 1)^(n - 1)*(r*w + 1)/r + p*((r + 1)^n - 1)*w/r)
def _g_div_gp(r, n, p, x, y, w):
t1 = (r+1)**n
t2 = (r+1)**(n-1)
return (y + t1*x + p*(t1 - 1)*(r*w + 1)/r)/(n*t2*x - p*(t1 - 1)*(r*w + 1)/(r**2) + n*p*t2*(r*w + 1)/r + p*(t1 - 1)*w/r)
-# Use Newton's iteration until the change is less than 1e-6
+# Use Newton's iteration until the change is less than 1e-6
# for all values or a maximum of 100 iterations is reached.
-# Newton's rule is
-# r_{n+1} = r_{n} - g(r_n)/g'(r_n)
+# Newton's rule is
+# r_{n+1} = r_{n} - g(r_n)/g'(r_n)
# where
-# g(r) is the formula
+# g(r) is the formula
# g'(r) is the derivative with respect to r.
def rate(nper, pmt, pv, fv, when='end', guess=0.10, tol=1e-6, maxiter=100):
"""Number of periods found by solving the equation
@@ -194,7 +194,7 @@ def rate(nper, pmt, pv, fv, when='end', guess=0.10, tol=1e-6, maxiter=100):
else:
return rn
rate.__doc__ += eqstr
-
+
def irr(values):
"""Internal Rate of Return
@@ -212,7 +212,7 @@ def irr(values):
if rate.size == 1:
rate = rate.item()
return rate
-
+
def npv(rate, values):
"""Net Present Value
@@ -223,15 +223,15 @@ def npv(rate, values):
def mirr(values, finance_rate, reinvest_rate):
"""Modified internal rate of return
-
+
Parameters
----------
values:
Cash flows (must contain at least one positive and one negative value)
or nan is returned.
- finance_rate :
+ finance_rate :
Interest rate paid on the cash flows
- reinvest_rate :
+ reinvest_rate :
Interest rate received on the cash flows upon reinvestment
"""
@@ -240,7 +240,7 @@ def mirr(values, finance_rate, reinvest_rate):
neg = values < 0
if not (pos.size > 0 and neg.size > 0):
return np.nan
-
+
n = pos.size + neg.size
numer = -npv(reinvest_rate, values[pos])*((1+reinvest_rate)**n)
denom = npv(finance_rate, values[neg])*(1+finance_rate)
diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py
index e165c2672..4c02d7c7a 100644
--- a/numpy/lib/function_base.py
+++ b/numpy/lib/function_base.py
@@ -328,49 +328,49 @@ def histogramdd(sample, bins=10, range=None, normed=False, weights=None):
def average(a, axis=None, weights=None, returned=False):
"""Return the weighted average of array a over the given axis.
-
-
+
+
Parameters
----------
a : array_like
Data to be averaged.
axis : {None, integer}, optional
- Axis along which to average a. If None, averaging is done over the
- entire array irrespective of its shape.
+ Axis along which to average a. If None, averaging is done over the
+ entire array irrespective of its shape.
weights : {None, array_like}, optional
- The importance each datum has in the computation of the
- average. The weights array can either be 1D, in which case its length
- must be the size of a along the given axis, or of the same shape as a.
- If weights=None, all data are assumed to have weight equal to one.
+ The importance each datum has in the computation of the
+ average. The weights array can either be 1D, in which case its length
+ must be the size of a along the given axis, or of the same shape as a.
+ If weights=None, all data are assumed to have weight equal to one.
returned :{False, boolean}, optional
If True, the tuple (average, sum_of_weights) is returned,
- otherwise only the average is returmed. Note that if weights=None, then
+ otherwise only the average is returmed. Note that if weights=None, then
the sum of the weights is also the number of elements averaged over.
Returns
-------
average, [sum_of_weights] : {array_type, double}
- Return the average along the specified axis. When returned is True,
- return a tuple with the average as the first element and the sum
- of the weights as the second element. The return type is Float if a is
+ Return the average along the specified axis. When returned is True,
+ return a tuple with the average as the first element and the sum
+ of the weights as the second element. The return type is Float if a is
of integer type, otherwise it is of the same type as a.
sum_of_weights is has the same type as the average.
-
+
Example
-------
>>> average(range(1,11), weights=range(10,0,-1))
4.0
-
+
Exceptions
----------
ZeroDivisionError
- Raised when all weights along axis are zero. See numpy.ma.average for a
- version robust to this type of error.
+ Raised when all weights along axis are zero. See numpy.ma.average for a
+ version robust to this type of error.
TypeError
- Raised when the length of 1D weights is not the same as the shape of a
- along axis.
-
+ Raised when the length of 1D weights is not the same as the shape of a
+ along axis.
+
"""
if not isinstance(a, np.matrix) :
a = np.asarray(a)
@@ -390,7 +390,7 @@ def average(a, axis=None, weights=None, returned=False):
raise TypeError, "1D weights expected when shapes of a and weights differ."
if wgt.shape[0] != a.shape[axis] :
raise ValueError, "Length of weights not compatible with specified axis."
-
+
# setup wgt to broadcast along axis
wgt = np.array(wgt, copy=0, ndmin=a.ndim).swapaxes(-1,axis)
@@ -681,20 +681,20 @@ except RuntimeError:
def interp(x, xp, fp, left=None, right=None):
"""Return the value of a piecewise-linear function at each value in x.
- The piecewise-linear function, f, is defined by the known data-points
- fp=f(xp). The xp points must be sorted in increasing order but this is
+ The piecewise-linear function, f, is defined by the known data-points
+ fp=f(xp). The xp points must be sorted in increasing order but this is
not checked.
-
- For values of x < xp[0] return the value given by left. If left is None,
+
+ For values of x < xp[0] return the value given by left. If left is None,
then return fp[0].
- For values of x > xp[-1] return the value given by right. If right is
+ For values of x > xp[-1] return the value given by right. If right is
None, then return fp[-1].
"""
if isinstance(x, (float, int, number)):
return compiled_interp([x], xp, fp, left, right).item()
else:
return compiled_interp(x, xp, fp, left, right)
-
+
def angle(z, deg=0):
"""Return the angle of the complex argument z.
diff --git a/numpy/lib/index_tricks.py b/numpy/lib/index_tricks.py
index 22b8ef388..2039d5b5e 100644
--- a/numpy/lib/index_tricks.py
+++ b/numpy/lib/index_tricks.py
@@ -294,7 +294,7 @@ class AxisConcatenator(object):
objs.append(newobj)
if not scalar and isinstance(newobj, _nx.ndarray):
arraytypes.append(newobj.dtype)
-
+
# Esure that scalars won't up-cast unless warranted
final_dtype = find_common_type(arraytypes, scalartypes)
if final_dtype is not None:
diff --git a/numpy/lib/io.py b/numpy/lib/io.py
index 9e61ab2f8..4c7180245 100644
--- a/numpy/lib/io.py
+++ b/numpy/lib/io.py
@@ -232,37 +232,37 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None, converters=None,
Parameters
----------
- fname : filename or a file handle.
+ fname : filename or a file handle.
Support for gzipped files is automatic, if the filename ends in .gz
- dtype : data-type
- Data type of the resulting array. If this is a record data-type, the
- resulting array will be 1-d and each row will be interpreted as an
- element of the array. The number of columns used must match the number
+ dtype : data-type
+ Data type of the resulting array. If this is a record data-type, the
+ resulting array will be 1-d and each row will be interpreted as an
+ element of the array. The number of columns used must match the number
of fields in the data-type in this case.
- comments : str
+ comments : str
The character used to indicate the start of a comment in the file.
delimiter : str
- A string-like character used to separate values in the file. If delimiter
+ A string-like character used to separate values in the file. If delimiter
is unspecified or none, any whitespace string is a separator.
converters : {}
- A dictionary mapping column number to a function that will convert that
- column to a float. Eg, if column 0 is a date string:
- converters={0:datestr2num}. Converters can also be used to provide
- a default value for missing data: converters={3:lambda s: float(s or 0)}.
-
+ A dictionary mapping column number to a function that will convert that
+ column to a float. Eg, if column 0 is a date string:
+ converters={0:datestr2num}. Converters can also be used to provide
+ a default value for missing data: converters={3:lambda s: float(s or 0)}.
+
skiprows : int
The number of rows from the top to skip.
usecols : sequence
- A sequence of integer column indexes to extract where 0 is the first
+ A sequence of integer column indexes to extract where 0 is the first
column, eg. usecols=(1,4,5) will extract the 2nd, 5th and 6th columns.
unpack : bool
- If True, will transpose the matrix allowing you to unpack into named
+ If True, will transpose the matrix allowing you to unpack into named
arguments on the left hand side.
Examples
@@ -271,8 +271,8 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None, converters=None,
>>> x,y,z = load('somefile.dat', usecols=(3,5,7), unpack=True)
>>> r = np.loadtxt('record.dat', dtype={'names':('gender','age','weight'),
'formats': ('S1','i4', 'f4')})
-
- SeeAlso: scipy.io.loadmat to read and write matfiles.
+
+ SeeAlso: scipy.io.loadmat to read and write matfiles.
"""
if _string_like(fname):
@@ -332,23 +332,23 @@ def savetxt(fname, X, fmt='%.18e',delimiter=' '):
Parameters
----------
fname : filename or a file handle
- If the filename ends in .gz, the file is automatically saved in
- compressed gzip format. The load() command understands gzipped files
+ If the filename ends in .gz, the file is automatically saved in
+ compressed gzip format. The load() command understands gzipped files
transparently.
X : array or sequence
Data to write to file.
- fmt : string
- A format string %[flags][width][.precision]specifier. See notes below for
+ fmt : string
+ A format string %[flags][width][.precision]specifier. See notes below for
a description of some common flags and specifiers.
delimiter : str
Character separating columns.
-
+
Examples
--------
>>> savetxt('test.out', x, delimiter=',') # X is an array
>>> savetxt('test.out', (x,y,z)) # x,y,z equal sized 1D arrays
- >>> savetxt('test.out', x, fmt='%1.4e') # use exponential notation
-
+ >>> savetxt('test.out', x, fmt='%1.4e') # use exponential notation
+
Notes on fmt
------------
flags:
@@ -362,19 +362,19 @@ def savetxt(fname, X, fmt='%.18e',delimiter=' '):
For e, E and f specifiers, the number of digits to print after the decimal
point.
For g and G, the maximum number of significant digits.
- For s, the maximum number of characters.
+ For s, the maximum number of characters.
specifiers:
c : character
d or i : signed decimal integer
- e or E : scientific notation with e or E.
+ e or E : scientific notation with e or E.
f : decimal floating point
g,G : use the shorter of e,E or f
o : signed octal
s : string of characters
u : unsigned decimal integer
x,X : unsigned hexadecimal integer
-
- This is not an exhaustive specification.
+
+ This is not an exhaustive specification.
"""
if _string_like(fname):
@@ -403,7 +403,7 @@ def savetxt(fname, X, fmt='%.18e',delimiter=' '):
import re
def fromregex(file, regexp, dtype):
"""Construct a record array from a text file, using regular-expressions parsing.
-
+
Array is constructed from all matches of the regular expression
in the file. Groups in the regular expression are converted to fields.
@@ -423,7 +423,7 @@ def fromregex(file, regexp, dtype):
>>> f.write("1312 foo\n1534 bar\n 444 qux")
>>> f.close()
>>> np.fromregex('test.dat', r"(\d+)\s+(...)", [('num', np.int64), ('key', 'S3')])
- array([(1312L, 'foo'), (1534L, 'bar'), (444L, 'qux')],
+ array([(1312L, 'foo'), (1534L, 'bar'), (444L, 'qux')],
dtype=[('num', '<i8'), ('key', '|S3')])
"""
@@ -433,7 +433,7 @@ def fromregex(file, regexp, dtype):
regexp = re.compile(regexp)
if not isinstance(dtype, np.dtype):
dtype = np.dtype(dtype)
-
+
seq = regexp.findall(file.read())
if seq and not isinstance(seq[0], tuple):
# make sure np.array doesn't interpret strings as binary data
@@ -441,4 +441,3 @@ def fromregex(file, regexp, dtype):
seq = [(x,) for x in seq]
output = np.array(seq, dtype=dtype)
return output
-
diff --git a/numpy/lib/tests/test__datasource.py b/numpy/lib/tests/test__datasource.py
index ad3635490..93e77d28c 100644
--- a/numpy/lib/tests/test__datasource.py
+++ b/numpy/lib/tests/test__datasource.py
@@ -200,7 +200,7 @@ class TestDataSourceAbspath(NumpyTestCase):
tmpfilename = os.path.split(tmpfile)[-1]
tmp_path = lambda x: os.path.abspath(self.ds.abspath(x))
-
+
assert tmp_path(valid_httpurl()).startswith(self.tmpdir)
assert tmp_path(invalid_httpurl()).startswith(self.tmpdir)
assert tmp_path(tmpfile).startswith(self.tmpdir)
@@ -208,7 +208,7 @@ class TestDataSourceAbspath(NumpyTestCase):
for fn in malicious_files:
assert tmp_path(http_path+fn).startswith(self.tmpdir)
assert tmp_path(fn).startswith(self.tmpdir)
-
+
def test_windows_os_sep(self):
orig_os_sep = os.sep
try:
@@ -244,7 +244,7 @@ class TestRepositoryAbspath(NumpyTestCase):
for fn in malicious_files:
assert tmp_path(http_path+fn).startswith(self.tmpdir)
assert tmp_path(fn).startswith(self.tmpdir)
-
+
def test_windows_os_sep(self):
orig_os_sep = os.sep
try:
diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py
index d1786969d..52c8cb4d0 100644
--- a/numpy/lib/tests/test_function_base.py
+++ b/numpy/lib/tests/test_function_base.py
@@ -58,36 +58,36 @@ class TestAverage(NumpyTestCase):
assert_almost_equal(y5.mean(1), average(y5, 1))
y6 = matrix(rand(5,5))
- assert_array_equal(y6.mean(0), average(y6,0))
-
+ assert_array_equal(y6.mean(0), average(y6,0))
+
def check_weights(self):
y = arange(10)
w = arange(10)
assert_almost_equal(average(y, weights=w), (arange(10)**2).sum()*1./arange(10).sum())
-
+
y1 = array([[1,2,3],[4,5,6]])
w0 = [1,2]
actual = average(y1,weights=w0,axis=0)
desired = array([3.,4.,5.])
assert_almost_equal(actual, desired)
-
-
+
+
w1 = [0,0,1]
desired = array([3., 6.])
assert_almost_equal(average(y1, weights=w1, axis=1), desired)
# This should raise an error. Can we test for that ?
# assert_equal(average(y1, weights=w1), 9./2.)
-
-
+
+
# 2D Case
w2 = [[0,0,1],[0,0,2]]
desired = array([3., 6.])
assert_array_equal(average(y1, weights=w2, axis=1), desired)
-
+
assert_equal(average(y1, weights=w2), 5.)
-
-
+
+
def check_returned(self):
y = array([[1,2,3],[4,5,6]])
@@ -97,23 +97,23 @@ class TestAverage(NumpyTestCase):
avg, scl = average(y, 0, returned=True)
assert_array_equal(scl, array([2.,2.,2.]))
-
+
avg, scl = average(y, 1, returned=True)
assert_array_equal(scl, array([3.,3.]))
-
+
# With weights
w0 = [1,2]
avg, scl = average(y, weights=w0, axis=0, returned=True)
assert_array_equal(scl, array([3., 3., 3.]))
-
+
w1 = [1,2,3]
avg, scl = average(y, weights=w1, axis=1, returned=True)
assert_array_equal(scl, array([6., 6.]))
-
+
w2 = [[0,0,1],[1,2,3]]
avg, scl = average(y, weights=w2, axis=1, returned=True)
assert_array_equal(scl, array([1.,6.]))
-
+
class TestSelect(NumpyTestCase):
def _select(self,cond,values,default=0):
@@ -433,7 +433,7 @@ class TestHistogram(NumpyTestCase):
(a,b)=histogram(v)
#check if the sum of the bins equals the number of samples
assert(sum(a,axis=0)==n)
- #check that the bin counts are evenly spaced when the data is from a
+ #check that the bin counts are evenly spaced when the data is from a
# linear function
(a,b)=histogram(linspace(0,10,100))
assert(all(a==10))
@@ -443,7 +443,7 @@ class TestHistogramdd(NumpyTestCase):
x = array([[-.5, .5, 1.5], [-.5, 1.5, 2.5], [-.5, 2.5, .5], \
[.5, .5, 1.5], [.5, 1.5, 2.5], [.5, 2.5, 2.5]])
H, edges = histogramdd(x, (2,3,3), range = [[-1,1], [0,3], [0,3]])
- answer = asarray([[[0,1,0], [0,0,1], [1,0,0]], [[0,1,0], [0,0,1],
+ answer = asarray([[[0,1,0], [0,0,1], [1,0,0]], [[0,1,0], [0,0,1],
[0,0,1]]])
assert_array_equal(H,answer)
# Check normalization
@@ -451,12 +451,12 @@ class TestHistogramdd(NumpyTestCase):
H, edges = histogramdd(x, bins = ed, normed = True)
assert(all(H == answer/12.))
# Check that H has the correct shape.
- H, edges = histogramdd(x, (2,3,4), range = [[-1,1], [0,3], [0,4]],
+ H, edges = histogramdd(x, (2,3,4), range = [[-1,1], [0,3], [0,4]],
normed=True)
- answer = asarray([[[0,1,0,0], [0,0,1,0], [1,0,0,0]], [[0,1,0,0],
+ answer = asarray([[[0,1,0,0], [0,0,1,0], [1,0,0,0]], [[0,1,0,0],
[0,0,1,0], [0,0,1,0]]])
assert_array_almost_equal(H, answer/6., 4)
- # Check that a sequence of arrays is accepted and H has the correct
+ # Check that a sequence of arrays is accepted and H has the correct
# shape.
z = [squeeze(y) for y in split(x,3,axis=1)]
H, edges = histogramdd(z, bins=(4,3,2),range=[[-2,2], [0,3], [0,2]])
@@ -473,7 +473,7 @@ class TestHistogramdd(NumpyTestCase):
def check_shape_3d(self):
# All possible permutations for bins of different lengths in 3D.
- bins = ((5, 4, 6), (6, 4, 5), (5, 6, 4), (4, 6, 5), (6, 5, 4),
+ bins = ((5, 4, 6), (6, 4, 5), (5, 6, 4), (4, 6, 5), (6, 5, 4),
(4, 5, 6))
r = rand(10,3)
for b in bins:
@@ -482,11 +482,11 @@ class TestHistogramdd(NumpyTestCase):
def check_shape_4d(self):
# All possible permutations for bins of different lengths in 4D.
- bins = ((7, 4, 5, 6), (4, 5, 7, 6), (5, 6, 4, 7), (7, 6, 5, 4),
- (5, 7, 6, 4), (4, 6, 7, 5), (6, 5, 7, 4), (7, 5, 4, 6),
- (7, 4, 6, 5), (6, 4, 7, 5), (6, 7, 5, 4), (4, 6, 5, 7),
- (4, 7, 5, 6), (5, 4, 6, 7), (5, 7, 4, 6), (6, 7, 4, 5),
- (6, 5, 4, 7), (4, 7, 6, 5), (4, 5, 6, 7), (7, 6, 4, 5),
+ bins = ((7, 4, 5, 6), (4, 5, 7, 6), (5, 6, 4, 7), (7, 6, 5, 4),
+ (5, 7, 6, 4), (4, 6, 7, 5), (6, 5, 7, 4), (7, 5, 4, 6),
+ (7, 4, 6, 5), (6, 4, 7, 5), (6, 7, 5, 4), (4, 6, 5, 7),
+ (4, 7, 5, 6), (5, 4, 6, 7), (5, 7, 4, 6), (6, 7, 4, 5),
+ (6, 5, 4, 7), (4, 7, 6, 5), (4, 5, 6, 7), (7, 6, 4, 5),
(5, 4, 7, 6), (5, 6, 7, 4), (6, 4, 5, 7), (7, 5, 6, 4))
r = rand(10,4)
diff --git a/numpy/lib/tests/test_io.py b/numpy/lib/tests/test_io.py
index 9e398cf23..31eceb7f6 100644
--- a/numpy/lib/tests/test_io.py
+++ b/numpy/lib/tests/test_io.py
@@ -38,8 +38,8 @@ class TestSaveTxt(NumpyTestCase):
np.savetxt(c, a, delimiter=',', fmt='%d')
c.seek(0)
assert_equal(c.readlines(), ['1,2\n', '3,4\n'])
-
-
+
+
## def test_format(self):
## a = np.array([(1, 2), (3, 4)])
## c = StringIO.StringIO()
@@ -47,21 +47,21 @@ class TestSaveTxt(NumpyTestCase):
## np.savetxt(c, a, fmt=['%02d', '%3.1f'])
## c.seek(0)
## assert_equal(c.readlines(), ['01 2.0\n', '03 4.0\n'])
-##
+##
## # A single multiformat string
## c = StringIO.StringIO()
## np.savetxt(c, a, fmt='%02d : %3.1f')
## c.seek(0)
## lines = c.readlines()
## assert_equal(lines, ['01 : 2.0\n', '03 : 4.0\n'])
-##
+##
## # Specify delimiter, should be overiden
## c = StringIO.StringIO()
## np.savetxt(c, a, fmt='%02d : %3.1f', delimiter=',')
## c.seek(0)
## lines = c.readlines()
## assert_equal(lines, ['01 : 2.0\n', '03 : 4.0\n'])
-
+
class TestLoadTxt(NumpyTestCase):
def test_record(self):
@@ -122,7 +122,7 @@ class TestLoadTxt(NumpyTestCase):
converters={3:lambda s: int(s or -999)})
a = np.array([1,2,3,-999,5], int)
assert_array_equal(x, a)
-
+
def test_comments(self):
c = StringIO.StringIO()
c.write('# comment\n1,2,3,5\n')
@@ -131,7 +131,7 @@ class TestLoadTxt(NumpyTestCase):
comments='#')
a = np.array([1,2,3,5], int)
assert_array_equal(x, a)
-
+
def test_skiprows(self):
c = StringIO.StringIO()
c.write('comment\n1,2,3,5\n')
@@ -140,7 +140,7 @@ class TestLoadTxt(NumpyTestCase):
skiprows=1)
a = np.array([1,2,3,5], int)
assert_array_equal(x, a)
-
+
c = StringIO.StringIO()
c.write('# comment\n1,2,3,5\n')
c.seek(0)
@@ -148,7 +148,7 @@ class TestLoadTxt(NumpyTestCase):
skiprows=1)
a = np.array([1,2,3,5], int)
assert_array_equal(x, a)
-
+
def test_usecols(self):
a =np.array( [[1,2],[3,4]], float)
c = StringIO.StringIO()
@@ -156,14 +156,14 @@ class TestLoadTxt(NumpyTestCase):
c.seek(0)
x = np.loadtxt(c, dtype=float, usecols=(1,))
assert_array_equal(x, a[:,1])
-
+
a =np.array( [[1,2,3],[3,4,5]], float)
c = StringIO.StringIO()
np.savetxt(c, a)
c.seek(0)
x = np.loadtxt(c, dtype=float, usecols=(1,2))
assert_array_equal(x, a[:,1:])
-
+
class Testfromregex(NumpyTestCase):
def test_record(self):
diff --git a/numpy/lib/tests/test_regression.py b/numpy/lib/tests/test_regression.py
index 2e0b3cfb0..868184b81 100644
--- a/numpy/lib/tests/test_regression.py
+++ b/numpy/lib/tests/test_regression.py
@@ -28,6 +28,6 @@ class TestRegression(NumpyTestCase):
tested = np.polyfit(x, y, 4)
assert_array_almost_equal(ref, tested)
-
+
if __name__ == "__main__":
NumpyTest().run()
diff --git a/numpy/lib/twodim_base.py b/numpy/lib/twodim_base.py
index a66e94d65..44082521c 100644
--- a/numpy/lib/twodim_base.py
+++ b/numpy/lib/twodim_base.py
@@ -83,9 +83,9 @@ def diag(v, k=0):
raise ValueError, "Input must be 1- or 2-d."
def diagflat(v,k=0):
- """Return a 2D array whose k'th diagonal is a flattened v and all other
- elements are zero.
-
+ """Return a 2D array whose k'th diagonal is a flattened v and all other
+ elements are zero.
+
Examples
--------
>>> diagflat([[1,2],[3,4]]])
@@ -93,12 +93,12 @@ def diagflat(v,k=0):
[0, 2, 0, 0],
[0, 0, 3, 0],
[0, 0, 0, 4]])
-
+
>>> diagflat([1,2], 1)
array([[0, 1, 0],
- [0, 0, 2],
+ [0, 0, 2],
[0, 0, 0]])
- """
+ """
try:
wrap = v.__array_wrap__
except AttributeError:
diff --git a/numpy/lib/utils.py b/numpy/lib/utils.py
index b02ba540f..3f7d22092 100644
--- a/numpy/lib/utils.py
+++ b/numpy/lib/utils.py
@@ -512,7 +512,7 @@ def lookfor(what, module=None, import_modules=True, regenerate=False):
if not whats: return
for name, (docstring, kind, index) in cache.iteritems():
- if kind in ('module', 'object'):
+ if kind in ('module', 'object'):
# don't show modules or objects
continue
ok = True
@@ -528,7 +528,7 @@ def lookfor(what, module=None, import_modules=True, regenerate=False):
# XXX: this is full Harrison-Stetson heuristics now,
# XXX: it probably could be improved
- kind_relevance = {'func': 1000, 'class': 1000,
+ kind_relevance = {'func': 1000, 'class': 1000,
'module': -1000, 'object': -1000}
def relevance(name, docstr, kind, index):
@@ -597,7 +597,7 @@ def _lookfor_generate_cache(module, import_modules, regenerate):
cache : dict {obj_full_name: (docstring, kind, index), ...}
Docstring cache for the module, either cached one (regenerate=False)
or newly generated.
-
+
"""
global _lookfor_caches
@@ -623,7 +623,7 @@ def _lookfor_generate_cache(module, import_modules, regenerate):
index += 1
kind = "object"
-
+
if inspect.ismodule(item):
kind = "module"
try:
@@ -649,13 +649,13 @@ def _lookfor_generate_cache(module, import_modules, regenerate):
stack.append(("%s.%s" % (name, n), v))
elif callable(item):
kind = "func"
-
+
doc = inspect.getdoc(item)
if doc is not None:
cache[name] = (doc, kind, index)
return cache
-
+
#-----------------------------------------------------------------------------
# The following SafeEval class and company are adapted from Michael Spencer's
diff --git a/numpy/linalg/linalg.py b/numpy/linalg/linalg.py
index 1706ff4a9..f56005292 100644
--- a/numpy/linalg/linalg.py
+++ b/numpy/linalg/linalg.py
@@ -972,8 +972,8 @@ def svd(a, full_matrices=1, compute_uv=1):
def cond(x,p=None):
"""Compute the condition number of a matrix.
- The condition number of x is the norm of x times the norm
- of the inverse of x. The norm can be the usual L2
+ The condition number of x is the norm of x times the norm
+ of the inverse of x. The norm can be the usual L2
(root-of-sum-of-squares) norm or a number of other matrix norms.
Parameters
@@ -983,16 +983,16 @@ def cond(x,p=None):
p : {None, 1, -1, 2, -2, inf, -inf, 'fro'}
Order of the norm:
- p norm for matrices
+ p norm for matrices
===== ============================
None 2-norm, computed directly using the SVD
- 'fro' Frobenius norm
- inf max(sum(abs(x), axis=1))
- -inf min(sum(abs(x), axis=1))
- 1 max(sum(abs(x), axis=0))
- -1 min(sum(abs(x), axis=0))
- 2 2-norm (largest sing. value)
- -2 smallest singular value
+ 'fro' Frobenius norm
+ inf max(sum(abs(x), axis=1))
+ -inf min(sum(abs(x), axis=1))
+ 1 max(sum(abs(x), axis=0))
+ -1 min(sum(abs(x), axis=0))
+ 2 2-norm (largest sing. value)
+ -2 smallest singular value
===== ============================
Returns
diff --git a/numpy/linalg/tests/test_regression.py b/numpy/linalg/tests/test_regression.py
index f712adb54..1335643bb 100644
--- a/numpy/linalg/tests/test_regression.py
+++ b/numpy/linalg/tests/test_regression.py
@@ -12,9 +12,9 @@ rlevel = 1
class TestRegression(NumpyTestCase):
def test_eig_build(self, level = rlevel):
"""Ticket #652"""
- rva = [1.03221168e+02 +0.j,
+ rva = [1.03221168e+02 +0.j,
-1.91843603e+01 +0.j,
- -6.04004526e-01+15.84422474j,
+ -6.04004526e-01+15.84422474j,
-6.04004526e-01-15.84422474j,
-1.13692929e+01 +0.j,
-6.57612485e-01+10.41755503j,
@@ -24,7 +24,7 @@ class TestRegression(NumpyTestCase):
7.80732773e+00 +0.j ,
-7.65390898e-01 +0.j,
1.51971555e-15 +0.j ,
- -1.51308713e-15 +0.j]
+ -1.51308713e-15 +0.j]
a = arange(13*13, dtype = float64)
a.shape = (13,13)
a = a%17
@@ -38,7 +38,7 @@ class TestRegression(NumpyTestCase):
cov = array([[ 77.70273908, 3.51489954, 15.64602427],
[3.51489954, 88.97013878, -1.07431931],
[15.64602427, -1.07431931, 98.18223512]])
-
+
vals, vecs = linalg.eigh(cov)
assert_array_almost_equal(vals, rvals)
diff --git a/numpy/ma/core.py b/numpy/ma/core.py
index 959fa619e..83c74a12e 100644
--- a/numpy/ma/core.py
+++ b/numpy/ma/core.py
@@ -26,13 +26,13 @@ __all__ = ['MAError', 'MaskType', 'MaskedArray',
'arctanh', 'argmax', 'argmin', 'argsort', 'around',
'array', 'asarray','asanyarray',
'bitwise_and', 'bitwise_or', 'bitwise_xor',
- 'ceil', 'choose', 'common_fill_value', 'compress', 'compressed',
+ 'ceil', 'choose', 'common_fill_value', 'compress', 'compressed',
'concatenate', 'conjugate', 'cos', 'cosh', 'count',
'default_fill_value', 'diagonal', 'divide', 'dump', 'dumps',
'empty', 'empty_like', 'equal', 'exp',
'fabs', 'fmod', 'filled', 'floor', 'floor_divide','fix_invalid',
'frombuffer', 'fromfunction',
- 'getdata','getmask', 'getmaskarray', 'greater', 'greater_equal',
+ 'getdata','getmask', 'getmaskarray', 'greater', 'greater_equal',
'hypot',
'identity', 'ids', 'indices', 'inner', 'innerproduct',
'isMA', 'isMaskedArray', 'is_mask', 'is_masked', 'isarray',
@@ -41,16 +41,16 @@ __all__ = ['MAError', 'MaskType', 'MaskedArray',
'make_mask', 'make_mask_none', 'mask_or', 'masked',
'masked_array', 'masked_equal', 'masked_greater',
'masked_greater_equal', 'masked_inside', 'masked_invalid',
- 'masked_less','masked_less_equal', 'masked_not_equal',
- 'masked_object','masked_outside', 'masked_print_option',
- 'masked_singleton','masked_values', 'masked_where', 'max', 'maximum',
+ 'masked_less','masked_less_equal', 'masked_not_equal',
+ 'masked_object','masked_outside', 'masked_print_option',
+ 'masked_singleton','masked_values', 'masked_where', 'max', 'maximum',
'mean', 'min', 'minimum', 'multiply',
'negative', 'nomask', 'nonzero', 'not_equal',
'ones', 'outer', 'outerproduct',
'power', 'product', 'ptp', 'put', 'putmask',
'rank', 'ravel', 'remainder', 'repeat', 'reshape', 'resize',
'right_shift', 'round_',
- 'set_fill_value', 'shape', 'sin', 'sinh', 'size', 'sometrue', 'sort',
+ 'set_fill_value', 'shape', 'sin', 'sinh', 'size', 'sometrue', 'sort',
'sqrt', 'std', 'subtract', 'sum', 'swapaxes',
'take', 'tan', 'tanh', 'transpose', 'true_divide',
'var', 'where',
@@ -192,7 +192,7 @@ def _check_fill_value(fill_value, dtype):
else:
fill_value = default_fill_value(dtype)
else:
- fill_value = narray(fill_value).tolist()
+ fill_value = narray(fill_value).tolist()
fval = numpy.resize(fill_value, len(descr))
if len(descr) > 1:
fill_value = [numpy.asarray(f).astype(d[1]).item()
@@ -259,7 +259,7 @@ def filled(a, value = None):
"""
if hasattr(a, 'filled'):
return a.filled(value)
- elif isinstance(a, ndarray):
+ elif isinstance(a, ndarray):
# Should we check for contiguity ? and a.flags['CONTIGUOUS']:
return a
elif isinstance(a, dict):
@@ -1579,21 +1579,21 @@ class MaskedArray(numeric.ndarray):
if self._mask is not nomask:
data = data[numpy.logical_not(ndarray.ravel(self._mask))]
return data
-
-
+
+
def compress(self, condition, axis=None, out=None):
"""Return a where condition is True.
If condition is a MaskedArray, missing values are considered as False.
-
+
Returns
-------
A MaskedArray object.
-
+
Notes
-----
- Please note the difference with compressed() !
+ Please note the difference with compressed() !
The output of compress has a mask, the output of compressed does not.
-
+
"""
# Get the basic components
(_data, _mask) = (self._data, self._mask)
@@ -2169,16 +2169,16 @@ masked_%(name)s(data = %(data)s,
Notes
-----
- The value returned is by default a biased estimate of the
+ The value returned is by default a biased estimate of the
true variance, since the mean is computed by dividing by N-ddof.
For the (more standard) unbiased estimate, use ddof=1 or.
- Note that for complex numbers the absolute value is taken before
+ Note that for complex numbers the absolute value is taken before
squaring, so that the result is always real and nonnegative.
"""
if self._mask is nomask:
# TODO: Do we keep super, or var _data and take a view ?
- return super(MaskedArray, self).var(axis=axis, dtype=dtype,
+ return super(MaskedArray, self).var(axis=axis, dtype=dtype,
ddof=ddof)
else:
cnt = self.count(axis=axis)-ddof
@@ -2213,17 +2213,17 @@ masked_%(name)s(data = %(data)s,
Notes
-----
- The value returned is by default a biased estimate of the
- true standard deviation, since the mean is computed by dividing
- by N-ddof. For the more standard unbiased estimate, use ddof=1.
- Note that for complex numbers the absolute value is taken before
+ The value returned is by default a biased estimate of the
+ true standard deviation, since the mean is computed by dividing
+ by N-ddof. For the more standard unbiased estimate, use ddof=1.
+ Note that for complex numbers the absolute value is taken before
squaring, so that the result is always real and nonnegative.
"""
dvar = self.var(axis,dtype,ddof=ddof)
if axis is not None or dvar is not masked:
dvar = sqrt(dvar)
return dvar
-
+
#............................................
def round(self, decimals=0, out=None):
result = self._data.round(decimals).view(type(self))
@@ -2564,7 +2564,7 @@ masked_%(name)s(data = %(data)s,
#........................
def tofile(self, fid, sep="", format="%s"):
raise NotImplementedError("Not implemented yet, sorry...")
-
+
#--------------------------------------------
# Pickling
def __getstate__(self):
@@ -2886,7 +2886,7 @@ def sort(a, axis=-1, kind='quicksort', order=None, endwith=True, fill_value=None
a = narray(a, copy=True, subok=True)
if axis is None:
a = a.flatten()
- axis = 0
+ axis = 0
if fill_value is None:
if endwith:
filler = minimum_fill_value(a)
diff --git a/numpy/ma/extras.py b/numpy/ma/extras.py
index a8dec0637..928ea9498 100644
--- a/numpy/ma/extras.py
+++ b/numpy/ma/extras.py
@@ -11,18 +11,18 @@ __version__ = '1.0'
__revision__ = "$Revision: 3473 $"
__date__ = '$Date: 2007-10-29 17:18:13 +0200 (Mon, 29 Oct 2007) $'
-__all__ = ['apply_along_axis', 'atleast_1d', 'atleast_2d', 'atleast_3d',
- 'average',
+__all__ = ['apply_along_axis', 'atleast_1d', 'atleast_2d', 'atleast_3d',
+ 'average',
'column_stack','compress_cols','compress_rowcols', 'compress_rows',
'count_masked',
'dot','dstack',
'expand_dims',
- 'flatnotmasked_contiguous','flatnotmasked_edges',
- 'hsplit','hstack',
+ 'flatnotmasked_contiguous','flatnotmasked_edges',
+ 'hsplit','hstack',
'mask_cols','mask_rowcols','mask_rows','masked_all','masked_all_like',
- 'median','mediff1d','mr_',
- 'notmasked_contiguous','notmasked_edges',
- 'row_stack',
+ 'median','mediff1d','mr_',
+ 'notmasked_contiguous','notmasked_edges',
+ 'row_stack',
'vstack',
]
@@ -262,7 +262,7 @@ def average(a, axis=None, weights=None, returned=False):
the size of a along the given axis.
If no weights are given, weights are assumed to be 1.
returned : bool
- Flag indicating whether a tuple (result, sum of weights/counts)
+ Flag indicating whether a tuple (result, sum of weights/counts)
should be returned as output (True), or just the result (False).
"""
@@ -417,7 +417,7 @@ def median(a, axis=0, out=None, overwrite_input=False):
else:
choice = slice(idx-1,idx+1)
return data[choice].mean(0)
- #
+ #
if overwrite_input:
if axis is None:
sorted = a.ravel()
@@ -432,7 +432,7 @@ def median(a, axis=0, out=None, overwrite_input=False):
else:
result = apply_along_axis(_median1D, axis, sorted)
return result
-
+
@@ -445,7 +445,7 @@ def compress_rowcols(x, axis=None):
- If axis is None, rows and columns are suppressed.
- If axis is 0, only rows are suppressed.
- If axis is 1 or -1, only columns are suppressed.
-
+
Parameters
----------
axis : int, optional
@@ -504,7 +504,7 @@ def mask_rowcols(a, axis=None):
axis : int, optional
Axis along which to perform the operation.
If None, applies to a flattened version of the array.
-
+
Returns
-------
a *pure* ndarray.
@@ -795,7 +795,7 @@ def notmasked_contiguous(a, axis=None):
axis : int, optional
Axis along which to perform the operation.
If None, applies to a flattened version of the array.
-
+
Returns
-------
a sorted sequence of slices (start index, end index).
diff --git a/numpy/ma/morestats.py b/numpy/ma/morestats.py
index 9816608b7..7dbc844b0 100644
--- a/numpy/ma/morestats.py
+++ b/numpy/ma/morestats.py
@@ -59,7 +59,7 @@ Returns
Notes
-----
The function is restricted to 2D arrays.
-
+
"""
def _hd_1D(data,prob,var):
"Computes the HD quantiles for a 1D array. Returns nan for invalid data."
@@ -114,7 +114,7 @@ Parameters
Axis along which to compute the quantiles. If None, use a flattened array.
var : boolean
Whether to return the variance of the estimate.
-
+
"""
result = hdquantiles(data,[0.5], axis=axis, var=var)
return result.squeeze()
@@ -137,7 +137,7 @@ Parameters
Notes
-----
The function is restricted to 2D arrays.
-
+
"""
def _hdsd_1D(data,prob):
"Computes the std error for 1D arrays."
@@ -192,7 +192,7 @@ Parameters
Confidence level of the intervals.
axis : int
Axis along which to cut. If None, uses a flattened version of the input.
-
+
"""
data = masked_array(data, copy=False)
trimmed = trim_both(data, proportiontocut=proportiontocut, axis=axis)
@@ -215,7 +215,7 @@ Parameters
Sequence of quantiles to compute.
axis : int
Axis along which to compute the quantiles. If None, use a flattened array.
-
+
"""
def _mjci_1D(data, p):
data = data.compressed()
@@ -345,7 +345,7 @@ def rank_data(data, axis=None, use_missing=False):
along the given axis.
If some values are tied, their rank is averaged.
- If some values are masked, their rank is set to 0 if use_missing is False,
+ If some values are masked, their rank is set to 0 if use_missing is False,
or set to the average rank of the unmasked values if use_missing is True.
Parameters
@@ -353,8 +353,8 @@ def rank_data(data, axis=None, use_missing=False):
data : sequence
Input data. The data is transformed to a masked array
axis : integer
- Axis along which to perform the ranking.
- If None, the array is first flattened. An exception is raised if
+ Axis along which to perform the ranking.
+ If None, the array is first flattened. An exception is raised if
the axis is specified for arrays with a dimension larger than 2
use_missing : boolean
Whether the masked values have a rank of 0 (False) or equal to the
diff --git a/numpy/ma/mrecords.py b/numpy/ma/mrecords.py
index 29b090540..5fcd8717e 100644
--- a/numpy/ma/mrecords.py
+++ b/numpy/ma/mrecords.py
@@ -142,7 +142,7 @@ class MaskedRecords(MaskedArray, object):
msg = "Mask and data not compatible: data size is %i, "+\
"mask size is %i."
raise MAError(msg % (nd, nm))
- copy = True
+ copy = True
if not keep_mask:
self.__setmask__(mask)
self._sharedmask = True
@@ -214,7 +214,7 @@ class MaskedRecords(MaskedArray, object):
def _getmask(self):
"""Return the mask of the mrecord.
A record is masked when all the fields are masked.
-
+
"""
if self.size > 1:
return self._fieldmask.view((bool_, len(self.dtype))).all(1)
@@ -415,7 +415,7 @@ The fieldname base is either `_data` or `_mask`."""
return ndarray.view(self, obj)
#......................................................
def filled(self, fill_value=None):
- """Returns an array of the same class as the _data part, where masked
+ """Returns an array of the same class as the _data part, where masked
values are filled with fill_value.
If fill_value is None, self.fill_value is used instead.
@@ -487,11 +487,11 @@ The fieldname base is either `_data` or `_mask`."""
self._fieldmask.tostring(),
self._fill_value,
)
- return state
+ return state
#
def __setstate__(self, state):
- """Restore the internal state of the masked array, for pickling purposes.
- ``state`` is typically the output of the ``__getstate__`` output, and is a
+ """Restore the internal state of the masked array, for pickling purposes.
+ ``state`` is typically the output of the ``__getstate__`` output, and is a
5-tuple:
- class name
@@ -570,8 +570,8 @@ def fromarrays(arraylist, dtype=None, shape=None, formats=None,
"""
datalist = [getdata(x) for x in arraylist]
masklist = [getmaskarray(x) for x in arraylist]
- _array = recfromarrays(datalist,
- dtype=dtype, shape=shape, formats=formats,
+ _array = recfromarrays(datalist,
+ dtype=dtype, shape=shape, formats=formats,
names=names, titles=titles, aligned=aligned,
byteorder=byteorder).view(mrecarray)
_array._fieldmask[:] = zip(*masklist)
@@ -629,8 +629,8 @@ def fromrecords(reclist, dtype=None, shape=None, formats=None, names=None,
if dtype is None:
dtype = reclist.dtype
reclist = reclist.tolist()
- mrec = recfromrecords(reclist, dtype=dtype, shape=shape, formats=formats,
- names=names, titles=titles,
+ mrec = recfromrecords(reclist, dtype=dtype, shape=shape, formats=formats,
+ names=names, titles=titles,
aligned=aligned, byteorder=byteorder).view(mrecarray)
# Set the fill_value if needed
if fill_value is not None:
@@ -805,5 +805,3 @@ if 1:
import cPickle
_ = cPickle.dumps(mbase)
mrec_ = cPickle.loads(_)
-
- \ No newline at end of file
diff --git a/numpy/ma/mstats.py b/numpy/ma/mstats.py
index 7dc5a7cc3..093215e30 100644
--- a/numpy/ma/mstats.py
+++ b/numpy/ma/mstats.py
@@ -33,9 +33,9 @@ __all__ = ['cov','meppf','plotting_positions','meppf','mquantiles',
def winsorize(data, alpha=0.2):
"""Returns a Winsorized version of the input array.
-
- The (alpha/2.) lowest values are set to the (alpha/2.)th percentile,
- and the (alpha/2.) highest values are set to the (1-alpha/2.)th
+
+ The (alpha/2.) lowest values are set to the (alpha/2.)th percentile,
+ and the (alpha/2.) highest values are set to the (1-alpha/2.)th
percentile.
Masked values are skipped.
@@ -44,7 +44,7 @@ def winsorize(data, alpha=0.2):
data : ndarray
Input data to Winsorize. The data is first flattened.
alpha : float
- Percentage of total Winsorization: alpha/2. on the left,
+ Percentage of total Winsorization: alpha/2. on the left,
alpha/2. on the right
"""
@@ -57,8 +57,8 @@ def winsorize(data, alpha=0.2):
#..............................................................................
def trim_both(data, proportiontocut=0.2, axis=None):
- """Trims the data by masking the int(trim*n) smallest and int(trim*n)
- largest values of data along the given axis, where n is the number
+ """Trims the data by masking the int(trim*n) smallest and int(trim*n)
+ largest values of data along the given axis, where n is the number
of unmasked values.
Parameters
@@ -66,11 +66,11 @@ def trim_both(data, proportiontocut=0.2, axis=None):
data : ndarray
Data to trim.
proportiontocut : float
- Percentage of trimming. If n is the number of unmasked values
+ Percentage of trimming. If n is the number of unmasked values
before trimming, the number of values after trimming is:
(1-2*trim)*n.
axis : int
- Axis along which to perform the trimming.
+ Axis along which to perform the trimming.
If None, the input array is first flattened.
Notes
@@ -99,7 +99,7 @@ def trim_both(data, proportiontocut=0.2, axis=None):
#..............................................................................
def trim_tail(data, proportiontocut=0.2, tail='left', axis=None):
- """Trims the data by masking int(trim*n) values from ONE tail of the
+ """Trims the data by masking int(trim*n) values from ONE tail of the
data along the given axis, where n is the number of unmasked values.
Parameters
@@ -107,16 +107,16 @@ def trim_tail(data, proportiontocut=0.2, tail='left', axis=None):
data : ndarray
Data to trim.
proportiontocut : float
- Percentage of trimming. If n is the number of unmasked values
- before trimming, the number of values after trimming is
+ Percentage of trimming. If n is the number of unmasked values
+ before trimming, the number of values after trimming is
(1-trim)*n.
tail : string
- Trimming direction, in ('left', 'right').
- If left, the ``proportiontocut`` lowest values are set to the
- corresponding percentile. If right, the ``proportiontocut``
+ Trimming direction, in ('left', 'right').
+ If left, the ``proportiontocut`` lowest values are set to the
+ corresponding percentile. If right, the ``proportiontocut``
highest values are used instead.
axis : int
- Axis along which to perform the trimming.
+ Axis along which to perform the trimming.
If None, the input array is first flattened.
Notes
@@ -158,7 +158,7 @@ def trim_tail(data, proportiontocut=0.2, tail='left', axis=None):
#..............................................................................
def trimmed_mean(data, proportiontocut=0.2, axis=None):
- """Returns the trimmed mean of the data along the given axis.
+ """Returns the trimmed mean of the data along the given axis.
Trimming is performed on both ends of the distribution.
Parameters
@@ -169,7 +169,7 @@ def trimmed_mean(data, proportiontocut=0.2, axis=None):
Proportion of the data to cut from each side of the data .
As a result, (2*proportiontocut*n) values are actually trimmed.
axis : int
- Axis along which to perform the trimming.
+ Axis along which to perform the trimming.
If None, the input array is first flattened.
"""
@@ -188,7 +188,7 @@ def trimmed_stde(data, proportiontocut=0.2, axis=None):
Proportion of the data to cut from each side of the data .
As a result, (2*proportiontocut*n) values are actually trimmed.
axis : int
- Axis along which to perform the trimming.
+ Axis along which to perform the trimming.
If None, the input array is first flattened.
Notes
@@ -222,7 +222,7 @@ median along the given axis.
data : ndarray
Data to trim.
axis : int
- Axis along which to perform the trimming.
+ Axis along which to perform the trimming.
If None, the input array is first flattened.
"""
diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py
index e1d4048a8..bdbe896e7 100644
--- a/numpy/ma/tests/test_core.py
+++ b/numpy/ma/tests/test_core.py
@@ -848,7 +848,7 @@ class TestMA(NumpyTestCase):
assert_equal(xf.dtype, float_)
assert_equal(xs.data, ['A', 'b', 'pi'])
assert_equal(xs.dtype, '|S3')
-
+
#...............................................................................
diff --git a/numpy/ma/tests/test_extras.py b/numpy/ma/tests/test_extras.py
index 2e1eebb04..fe0ef3b2e 100644
--- a/numpy/ma/tests/test_extras.py
+++ b/numpy/ma/tests/test_extras.py
@@ -324,7 +324,7 @@ class TestApplyAlongAxis(NumpyTestCase):
return b[1]
xa = apply_along_axis(myfunc,2,a)
assert_equal(xa,[[1,4],[7,10]])
-
+
class TestMedian(NumpyTestCase):
def __init__(self, *args, **kwds):
NumpyTestCase.__init__(self, *args, **kwds)
@@ -333,7 +333,7 @@ class TestMedian(NumpyTestCase):
"Tests median w/ 2D"
(n,p) = (101,30)
x = masked_array(numpy.linspace(-1.,1.,n),)
- x[:10] = x[-10:] = masked
+ x[:10] = x[-10:] = masked
z = masked_array(numpy.empty((n,p), dtype=numpy.float_))
z[:,0] = x[:]
idx = numpy.arange(len(x))
@@ -352,8 +352,8 @@ class TestMedian(NumpyTestCase):
assert_equal(median(x,0),[[99,10],[11,99],[13,14]])
x = numpy.ma.arange(24).reshape(4,3,2)
x[x%5==0] = masked
- assert_equal(median(x,0), [[12,10],[8,9],[16,17]])
-
+ assert_equal(median(x,0), [[12,10],[8,9],[16,17]])
+
###############################################################################
#------------------------------------------------------------------------------
diff --git a/numpy/ma/tests/test_mrecords.py b/numpy/ma/tests/test_mrecords.py
index 2f3931878..cb550a9aa 100644
--- a/numpy/ma/tests/test_mrecords.py
+++ b/numpy/ma/tests/test_mrecords.py
@@ -41,7 +41,7 @@ class TestMRecords(NumpyTestCase):
ddtype = [('a',int),('b',float),('c','|S8')]
mask = [0,1,0,0,1]
self.base = ma.array(zip(ilist,flist,slist), mask=mask, dtype=ddtype)
-
+
def test_byview(self):
"Test creation by view"
base = self.base
@@ -69,7 +69,7 @@ class TestMRecords(NumpyTestCase):
assert_equal(mbase_first.mask, nomask)
assert_equal(mbase_first._fieldmask.item(), (False, False, False))
assert_equal(mbase_first['a'], mbase['a'][0])
- mbase_last = mbase[-1]
+ mbase_last = mbase[-1]
assert isinstance(mbase_last, mrecarray)
assert_equal(mbase_last.dtype, mbase.dtype)
assert_equal(mbase_last.tolist(), (None,None,None))
@@ -87,7 +87,7 @@ class TestMRecords(NumpyTestCase):
assert_equal(getattr(mbase_sl,field), base[:2][field])
def test_set_fields(self):
- "Tests setting fields."
+ "Tests setting fields."
base = self.base.copy()
mbase = base.view(mrecarray)
mbase = mbase.copy()
@@ -101,7 +101,7 @@ class TestMRecords(NumpyTestCase):
assert_equal(mbase['a']._data, [1]*5)
assert_equal(ma.getmaskarray(mbase['a']), [0]*5)
assert_equal(mbase._mask, [False]*5)
- assert_equal(mbase._fieldmask.tolist(),
+ assert_equal(mbase._fieldmask.tolist(),
np.array([(0,0,0),(0,1,1),(0,0,0),(0,0,0),(0,1,1)],
dtype=bool))
# Set a field to mask ........................
@@ -109,7 +109,7 @@ class TestMRecords(NumpyTestCase):
assert_equal(mbase.c.mask, [1]*5)
assert_equal(ma.getmaskarray(mbase['c']), [1]*5)
assert_equal(ma.getdata(mbase['c']), ['N/A']*5)
- assert_equal(mbase._fieldmask.tolist(),
+ assert_equal(mbase._fieldmask.tolist(),
np.array([(0,0,1),(0,1,1),(0,0,1),(0,0,1),(0,1,1)],
dtype=bool))
# Set fields by slices .......................
@@ -129,12 +129,12 @@ class TestMRecords(NumpyTestCase):
assert_equal(ma.getmaskarray(mbase['b']), [1]*5)
assert_equal(mbase['a']._mask, mbase['b']._mask)
assert_equal(mbase['a']._mask, mbase['c']._mask)
- assert_equal(mbase._fieldmask.tolist(),
+ assert_equal(mbase._fieldmask.tolist(),
np.array([(1,1,1)]*5, dtype=bool))
# Delete the mask ............................
mbase._mask = nomask
assert_equal(ma.getmaskarray(mbase['c']), [0]*5)
- assert_equal(mbase._fieldmask.tolist(),
+ assert_equal(mbase._fieldmask.tolist(),
np.array([(0,0,0)]*5, dtype=bool))
#
def test_set_mask_fromarray(self):
@@ -154,7 +154,7 @@ class TestMRecords(NumpyTestCase):
def test_set_mask_fromfields(self):
mbase = self.base.copy().view(mrecarray)
#
- nmask = np.array([(0,1,0),(0,1,0),(1,0,1),(1,0,1),(0,0,0)],
+ nmask = np.array([(0,1,0),(0,1,0),(1,0,1),(1,0,1),(0,0,0)],
dtype=[('a',bool),('b',bool),('c',bool)])
mbase.mask = nmask
assert_equal(mbase.a.mask, [0,0,1,1,0])
@@ -240,8 +240,8 @@ class TestMRecords(NumpyTestCase):
_b = ma.array([1.1,2.2,3.3],mask=[0,0,1],dtype=float)
_c = ma.array(['one','two','three'],mask=[0,0,1],dtype='|S8')
ddtype = [('a',int),('b',float),('c','|S8')]
- mrec = fromarrays([_a,_b,_c], dtype=ddtype,
- fill_value=(99999,99999.,'N/A'))
+ mrec = fromarrays([_a,_b,_c], dtype=ddtype,
+ fill_value=(99999,99999.,'N/A'))
mrecfilled = mrec.filled()
assert_equal(mrecfilled['a'], np.array((1,2,99999), dtype=int))
assert_equal(mrecfilled['b'], np.array((1.1,2.2,99999.), dtype=float))
@@ -253,8 +253,8 @@ class TestMRecords(NumpyTestCase):
_b = ma.array([1.1,2.2,3.3],mask=[0,0,1],dtype=float)
_c = ma.array(['one','two','three'],mask=[1,0,0],dtype='|S8')
ddtype = [('a',int),('b',float),('c','|S8')]
- mrec = fromarrays([_a,_b,_c], dtype=ddtype,
- fill_value=(99999,99999.,'N/A'))
+ mrec = fromarrays([_a,_b,_c], dtype=ddtype,
+ fill_value=(99999,99999.,'N/A'))
#
assert_equal(mrec.tolist(),
[(1,1.1,None),(2,2.2,'two'),(None,None,'three')])
@@ -272,11 +272,11 @@ class TestMRecordsImport(NumpyTestCase):
_b = ma.array([1.1,2.2,3.3],mask=[0,0,1],dtype=float)
_c = ma.array(['one','two','three'],mask=[0,0,1],dtype='|S8')
ddtype = [('a',int),('b',float),('c','|S8')]
- mrec = fromarrays([_a,_b,_c], dtype=ddtype,
- fill_value=(99999,99999.,'N/A'))
+ mrec = fromarrays([_a,_b,_c], dtype=ddtype,
+ fill_value=(99999,99999.,'N/A'))
nrec = recfromarrays((_a.data,_b.data,_c.data), dtype=ddtype)
self.data = (mrec, nrec, ddtype)
-
+
def test_fromarrays(self):
_a = ma.array([1,2,3],mask=[0,0,1],dtype=int)
_b = ma.array([1.1,2.2,3.3],mask=[0,0,1],dtype=float)
@@ -284,8 +284,8 @@ class TestMRecordsImport(NumpyTestCase):
(mrec, nrec, _) = self.data
for (f,l) in zip(('a','b','c'),(_a,_b,_c)):
assert_equal(getattr(mrec,f)._mask, l._mask)
-
-
+
+
def test_fromrecords(self):
"Test construction from records."
(mrec, nrec, ddtype) = self.data
@@ -300,7 +300,7 @@ class TestMRecordsImport(NumpyTestCase):
_mrec = fromrecords(nrec)
assert_equal(_mrec.dtype, mrec.dtype)
for field in _mrec.dtype.names:
- assert_equal(getattr(_mrec, field), getattr(mrec._data, field))
+ assert_equal(getattr(_mrec, field), getattr(mrec._data, field))
#
_mrec = fromrecords(nrec.tolist(), names='c1,c2,c3')
assert_equal(_mrec.dtype, [('c1',int),('c2',float),('c3','|S5')])
@@ -311,7 +311,7 @@ class TestMRecordsImport(NumpyTestCase):
assert_equal(_mrec.dtype, mrec.dtype)
assert_equal_records(_mrec._data, mrec.filled())
assert_equal_records(_mrec._fieldmask, mrec._fieldmask)
-
+
def test_fromrecords_wmask(self):
"Tests construction from records w/ mask."
(mrec, nrec, ddtype) = self.data
@@ -328,7 +328,7 @@ class TestMRecordsImport(NumpyTestCase):
assert_equal_records(_mrec._data, mrec._data)
assert_equal(_mrec._fieldmask.tolist(), mrec._fieldmask.tolist())
#
- _mrec = fromrecords(nrec.tolist(), dtype=ddtype,
+ _mrec = fromrecords(nrec.tolist(), dtype=ddtype,
mask=mrec._fieldmask.tolist())
assert_equal_records(_mrec._data, mrec._data)
assert_equal(_mrec._fieldmask.tolist(), mrec._fieldmask.tolist())
diff --git a/numpy/ma/testutils.py b/numpy/ma/testutils.py
index 4ea49379f..c4722f940 100644
--- a/numpy/ma/testutils.py
+++ b/numpy/ma/testutils.py
@@ -216,4 +216,4 @@ def assert_mask_equal(m1, m2):
assert_array_equal(m1, m2)
if __name__ == '__main__':
- pass \ No newline at end of file
+ pass
diff --git a/numpy/oldnumeric/compat.py b/numpy/oldnumeric/compat.py
index 63924ca69..7f123fa69 100644
--- a/numpy/oldnumeric/compat.py
+++ b/numpy/oldnumeric/compat.py
@@ -88,7 +88,7 @@ def _LoadArray(fp):
dstr = fp.read(sz)
m = mu.fromstring(dstr, typeconv.convtypecode(typecode))
m.shape = shape
-
+
if (LittleEndian and endian == 'B') or (not LittleEndian and endian == 'L'):
return m.byteswap(True)
else:
@@ -97,10 +97,10 @@ def _LoadArray(fp):
import pickle, copy
class Unpickler(pickle.Unpickler):
def load_array(self):
- self.stack.append(_LoadArray(self))
+ self.stack.append(_LoadArray(self))
dispatch = copy.copy(pickle.Unpickler.dispatch)
- dispatch['A'] = load_array
+ dispatch['A'] = load_array
class Pickler(pickle.Pickler):
def __init__(self, *args, **kwds):
diff --git a/numpy/oldnumeric/ma.py b/numpy/oldnumeric/ma.py
index 1e8e831ba..532e4f905 100644
--- a/numpy/oldnumeric/ma.py
+++ b/numpy/oldnumeric/ma.py
@@ -2265,5 +2265,3 @@ def average(a, axis=0, weights=None, returned=0):
def take(a, indices, axis=0):
return new_take(a, indices, axis)
-
-
diff --git a/numpy/testing/utils.py b/numpy/testing/utils.py
index 8374999ff..64cc30478 100644
--- a/numpy/testing/utils.py
+++ b/numpy/testing/utils.py
@@ -311,7 +311,7 @@ def raises(*exceptions):
# Anything else.
raise
else:
- raise AssertionError('%s() did not raise one of (%s)' %
+ raise AssertionError('%s() did not raise one of (%s)' %
(function.__name__, ', '.join([e.__name__ for e in exceptions])))
try:
f2.__name__ = function.__name__