summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.travis.yml6
-rw-r--r--doc/release/1.11.0-notes.rst16
-rw-r--r--doc/release/1.12.0-notes.rst37
-rw-r--r--numpy/_build_utils/src/apple_sgemv_fix.c2
-rw-r--r--numpy/core/code_generators/ufunc_docstrings.py6
-rw-r--r--numpy/core/fromnumeric.py226
-rw-r--r--numpy/core/function_base.py11
-rw-r--r--numpy/core/include/numpy/ndarraytypes.h2
-rw-r--r--numpy/core/include/numpy/npy_3kcompat.h8
-rw-r--r--numpy/core/info.py2
-rw-r--r--numpy/core/numeric.py4
-rw-r--r--numpy/core/src/multiarray/arrayobject.c2
-rw-r--r--numpy/core/src/multiarray/ctors.c2
-rw-r--r--numpy/core/src/multiarray/datetime_busdaycal.c2
-rw-r--r--numpy/core/src/multiarray/datetime_busdaycal.h2
-rw-r--r--numpy/core/src/multiarray/datetime_strings.c6
-rw-r--r--numpy/core/src/multiarray/dtype_transfer.c2
-rw-r--r--numpy/core/src/multiarray/einsum.c.src2
-rw-r--r--numpy/core/src/multiarray/item_selection.c4
-rw-r--r--numpy/core/src/multiarray/lowlevel_strided_loops.c.src2
-rw-r--r--numpy/core/src/multiarray/methods.c2
-rw-r--r--numpy/core/src/multiarray/multiarraymodule.c2
-rw-r--r--numpy/core/src/multiarray/nditer_api.c4
-rw-r--r--numpy/core/src/multiarray/nditer_constr.c2
-rw-r--r--numpy/core/src/multiarray/nditer_pywrap.c2
-rw-r--r--numpy/core/src/multiarray/nditer_templ.c.src2
-rw-r--r--numpy/core/src/multiarray/scalarapi.c2
-rw-r--r--numpy/core/src/multiarray/usertypes.c2
-rw-r--r--numpy/core/src/private/ufunc_override.h6
-rw-r--r--numpy/core/src/umath/ufunc_object.c2
-rw-r--r--numpy/core/tests/test_einsum.py2
-rw-r--r--numpy/core/tests/test_function_base.py15
-rw-r--r--numpy/core/tests/test_multiarray.py46
-rw-r--r--numpy/core/tests/test_numeric.py18
-rw-r--r--numpy/core/tests/test_records.py9
-rw-r--r--numpy/core/tests/test_umath.py25
-rw-r--r--numpy/distutils/exec_command.py2
-rw-r--r--numpy/distutils/fcompiler/gnu.py2
-rw-r--r--numpy/distutils/from_template.py2
-rw-r--r--numpy/doc/byteswapping.py2
-rw-r--r--numpy/doc/internals.py2
-rw-r--r--numpy/f2py/capi_maps.py2
-rwxr-xr-xnumpy/f2py/crackfortran.py2
-rw-r--r--numpy/f2py/rules.py6
-rw-r--r--numpy/lib/arraypad.py2
-rw-r--r--numpy/lib/function_base.py170
-rw-r--r--numpy/lib/nanfunctions.py285
-rw-r--r--numpy/lib/npyio.py35
-rw-r--r--numpy/lib/tests/test_arraypad.py11
-rw-r--r--numpy/lib/tests/test_function_base.py20
-rw-r--r--numpy/lib/tests/test_io.py40
-rw-r--r--numpy/lib/tests/test_nanfunctions.py5
-rw-r--r--numpy/linalg/lapack_lite/dlapack_lite.c6
-rw-r--r--numpy/linalg/lapack_lite/zlapack_lite.c2
-rw-r--r--numpy/ma/core.py8
-rw-r--r--numpy/random/mtrand/mtrand.pyx30
-rw-r--r--numpy/tests/test_scripts.py11
57 files changed, 756 insertions, 374 deletions
diff --git a/.travis.yml b/.travis.yml
index ccb182816..040d7362e 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -77,6 +77,12 @@ before_install:
- pushd builds
# Build into own virtualenv
# We therefore control our own environment, avoid travis' numpy
+ #
+ # Some change in virtualenv 14.0.5 caused `test_f2py` to fail. So, we have
+ # pinned `virtualenv` to the last known working version to avoid this failure.
+ # Appears we had some issues with certificates on Travis. It looks like
+ # bumping to 14.0.6 will help.
+ - pip install -U 'virtualenv==14.0.6'
- virtualenv --python=python venv
- source venv/bin/activate
- python -V
diff --git a/doc/release/1.11.0-notes.rst b/doc/release/1.11.0-notes.rst
index c9287ed3f..aa11cdf07 100644
--- a/doc/release/1.11.0-notes.rst
+++ b/doc/release/1.11.0-notes.rst
@@ -76,12 +76,13 @@ printing it would convert from or to local time::
>>>> np.datetime64('2000-01-01T00:00:00')
numpy.datetime64('2000-01-01T00:00:00-0800') # note the timezone offset -08:00
+
A consensus of datetime64 users agreed that this behavior is undesirable
-and at odds with how datetime64 is usually used (e.g., by pandas_). For
-most use cases, a timezone naive datetime type is preferred, similar to the
-``datetime.datetime`` type in the Python standard library. Accordingly,
-datetime64 no longer assumes that input is in local time, nor does it print
-local times::
+and at odds with how datetime64 is usually used (e.g., by `pandas
+<http://pandas.pydata.org>`__). For most use cases, a timezone naive datetime
+type is preferred, similar to the ``datetime.datetime`` type in the Python
+standard library. Accordingly, datetime64 no longer assumes that input is in
+local time, nor does it print local times::
>>>> np.datetime64('2000-01-01T00:00:00')
numpy.datetime64('2000-01-01T00:00:00')
@@ -99,14 +100,12 @@ As a corollary to this change, we no longer prohibit casting between datetimes
with date units and datetimes with time units. With timezone naive datetimes,
the rule for casting from dates to times is no longer ambiguous.
-pandas_: http://pandas.pydata.org
-
``linalg.norm`` return type changes
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The return type of the ``linalg.norm`` function is now floating point without
exception. Some of the norm types previously returned integers.
-and returns floating results.polynomial fit changes
+polynomial fit changes
~~~~~~~~~~~~~~~~~~~~~~
The various fit functions in the numpy polynomial package no longer accept
non-integers for degree specification.
@@ -143,6 +142,7 @@ FutureWarning to changed behavior
due to a bug, sometimes no warning was raised and the dimensions were
already preserved.
+
C API
~~~~~
diff --git a/doc/release/1.12.0-notes.rst b/doc/release/1.12.0-notes.rst
index ee4e2d24a..ce606e5b5 100644
--- a/doc/release/1.12.0-notes.rst
+++ b/doc/release/1.12.0-notes.rst
@@ -32,10 +32,39 @@ default order for arrays that are now both.
``MaskedArray`` takes view of data **and** mask when slicing
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
XXX
+``np.percentile`` 'midpoint' interpolation method fixed for exact indices
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+'midpoint' interpolator now gives the same result as 'lower' and 'higher' when
+the two coincide. Previous behavior of 'lower' + 0.5 is fixed.
+
+
+``keepdims`` kwarg is passed through to user-class methods
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+numpy functions that take a ``keepdims`` kwarg now pass the value
+through to the corresponding methods on ndarray sub-classes. Previously the
+``keepdims`` keyword would be silently dropped. These functions now have
+the following behavior:
+
+1. If user does not provide ``keepdims``, no keyword is passed to the underlying
+ method.
+2. Any user-provided value of ``keepdims`` is passed through as a keyword
+ argument to the method.
+
+This will raise in the case where the method does not support a
+``keepdims`` kwarg and the user explicitly passes in ``keepdims``.
+
+
+The following functions are changed: ``sum``, ``product``,
+``sometrue``, ``alltrue``, ``any``, ``all``, ``amax``, ``amin``,
+``prod``, ``mean``, ``std``, ``var``, ``nanmin``, ``nanmax``,
+``nansum``, ``nanprod``, ``nanmean``, ``nanmedian``, ``nanvar``,
+``nanstd``
+
+
DeprecationWarning to error
~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -56,6 +85,11 @@ New Features
Improvements
============
+*np.loadtxt* now supports a single integer as ``usecol`` argument
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Instead of using ``usecol=(n,)`` to read the nth column of a file
+it is now allowed to use ``usecol=n``. Also the error message is
+more user friendly when a non-integer is passed as a column index.
Changes
@@ -63,4 +97,3 @@ Changes
Deprecations
============
-
diff --git a/numpy/_build_utils/src/apple_sgemv_fix.c b/numpy/_build_utils/src/apple_sgemv_fix.c
index ffdfb81f7..4c9c82ece 100644
--- a/numpy/_build_utils/src/apple_sgemv_fix.c
+++ b/numpy/_build_utils/src/apple_sgemv_fix.c
@@ -155,7 +155,7 @@ void sgemv_( const char* trans, const int* m, const int* n,
*
* Because Fortran uses column major order and X.T and Y.T are row vectors,
* the leading dimensions of X.T and Y.T in SGEMM become equal to the
- * strides of the the column vectors X and Y in SGEMV. */
+ * strides of the column vectors X and Y in SGEMV. */
switch (*trans) {
case 'T':
diff --git a/numpy/core/code_generators/ufunc_docstrings.py b/numpy/core/code_generators/ufunc_docstrings.py
index 34ac59984..e3600406c 100644
--- a/numpy/core/code_generators/ufunc_docstrings.py
+++ b/numpy/core/code_generators/ufunc_docstrings.py
@@ -2393,11 +2393,11 @@ add_newdoc('numpy.core.umath', 'fmin',
Examples
--------
>>> np.fmin([2, 3, 4], [1, 5, 2])
- array([2, 5, 4])
+ array([1, 3, 2])
>>> np.fmin(np.eye(2), [0.5, 2])
- array([[ 1. , 2. ],
- [ 0.5, 2. ]])
+ array([[ 0.5, 0. ],
+ [ 0. , 1. ]])
>>> np.fmin([np.nan, 0, np.nan],[0, np.nan, np.nan])
array([ 0., 0., NaN])
diff --git a/numpy/core/fromnumeric.py b/numpy/core/fromnumeric.py
index 4faeb557a..52a15e30d 100644
--- a/numpy/core/fromnumeric.py
+++ b/numpy/core/fromnumeric.py
@@ -17,7 +17,6 @@ from . import _methods
_dt_ = nt.sctype2char
-
# functions that are methods
__all__ = [
'alen', 'all', 'alltrue', 'amax', 'amin', 'any', 'argmax',
@@ -1380,6 +1379,7 @@ def trace(a, offset=0, axis1=0, axis2=1, dtype=None, out=None):
return asanyarray(a).trace(offset, axis1, axis2, dtype, out)
+
def ravel(a, order='C'):
"""Return a contiguous flattened array.
@@ -1740,7 +1740,7 @@ def clip(a, a_min, a_max, out=None):
return clip(a_min, a_max, out)
-def sum(a, axis=None, dtype=None, out=None, keepdims=False):
+def sum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue):
"""
Sum of array elements over a given axis.
@@ -1770,9 +1770,15 @@ def sum(a, axis=None, dtype=None, out=None, keepdims=False):
the same shape as the expected output, but the type of the output
values will be cast if necessary.
keepdims : bool, optional
- If this is set to True, the axes which are reduced are left in the
- result as dimensions with size one. With this option, the result
- will broadcast correctly against the input array.
+ If this is set to True, the axes which are reduced are left
+ in the result as dimensions with size one. With this option,
+ the result will broadcast correctly against the original `arr`.
+
+ If the default value is passed, then `keepdims` will not be
+ passed through to the `sum` method of sub-classes of
+ `ndarray`, however any non-default value will be. If the
+ sub-classes `sum` method does not implement `keepdims` any
+ exceptions will be raised.
Returns
-------
@@ -1821,26 +1827,27 @@ def sum(a, axis=None, dtype=None, out=None, keepdims=False):
-128
"""
+ kwargs = {}
+ if keepdims is not np._NoValue:
+ kwargs['keepdims'] = keepdims
if isinstance(a, _gentype):
res = _sum_(a)
if out is not None:
out[...] = res
return out
return res
- elif type(a) is not mu.ndarray:
+ if type(a) is not mu.ndarray:
try:
sum = a.sum
except AttributeError:
- return _methods._sum(a, axis=axis, dtype=dtype,
- out=out, keepdims=keepdims)
- # NOTE: Dropping the keepdims parameters here...
- return sum(axis=axis, dtype=dtype, out=out)
- else:
- return _methods._sum(a, axis=axis, dtype=dtype,
- out=out, keepdims=keepdims)
+ pass
+ else:
+ return sum(axis=axis, dtype=dtype, out=out, **kwargs)
+ return _methods._sum(a, axis=axis, dtype=dtype,
+ out=out, **kwargs)
-def product(a, axis=None, dtype=None, out=None, keepdims=False):
+def product(a, axis=None, dtype=None, out=None, keepdims=np._NoValue):
"""
Return the product of array elements over a given axis.
@@ -1849,11 +1856,13 @@ def product(a, axis=None, dtype=None, out=None, keepdims=False):
prod : equivalent function; see for details.
"""
- return um.multiply.reduce(a, axis=axis, dtype=dtype,
- out=out, keepdims=keepdims)
+ kwargs = {}
+ if keepdims is not np._NoValue:
+ kwargs['keepdims'] = keepdims
+ return um.multiply.reduce(a, axis=axis, dtype=dtype, out=out, **kwargs)
-def sometrue(a, axis=None, out=None, keepdims=False):
+def sometrue(a, axis=None, out=None, keepdims=np._NoValue):
"""
Check whether some values are true.
@@ -1865,14 +1874,13 @@ def sometrue(a, axis=None, out=None, keepdims=False):
"""
arr = asanyarray(a)
-
- try:
- return arr.any(axis=axis, out=out, keepdims=keepdims)
- except TypeError:
- return arr.any(axis=axis, out=out)
+ kwargs = {}
+ if keepdims is not np._NoValue:
+ kwargs['keepdims'] = keepdims
+ return arr.any(axis=axis, out=out, **kwargs)
-def alltrue(a, axis=None, out=None, keepdims=False):
+def alltrue(a, axis=None, out=None, keepdims=np._NoValue):
"""
Check if all elements of input array are true.
@@ -1882,14 +1890,13 @@ def alltrue(a, axis=None, out=None, keepdims=False):
"""
arr = asanyarray(a)
-
- try:
- return arr.all(axis=axis, out=out, keepdims=keepdims)
- except TypeError:
- return arr.all(axis=axis, out=out)
+ kwargs = {}
+ if keepdims is not np._NoValue:
+ kwargs['keepdims'] = keepdims
+ return arr.all(axis=axis, out=out, **kwargs)
-def any(a, axis=None, out=None, keepdims=False):
+def any(a, axis=None, out=None, keepdims=np._NoValue):
"""
Test whether any array element along a given axis evaluates to True.
@@ -1915,11 +1922,18 @@ def any(a, axis=None, out=None, keepdims=False):
(e.g., if it is of type float, then it will remain so, returning
1.0 for True and 0.0 for False, regardless of the type of `a`).
See `doc.ufuncs` (Section "Output arguments") for details.
+
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `arr`.
+ If the default value is passed, then `keepdims` will not be
+ passed through to the `any` method of sub-classes of
+ `ndarray`, however any non-default value will be. If the
+ sub-classes `sum` method does not implement `keepdims` any
+ exceptions will be raised.
+
Returns
-------
any : bool or ndarray
@@ -1963,14 +1977,13 @@ def any(a, axis=None, out=None, keepdims=False):
"""
arr = asanyarray(a)
-
- try:
- return arr.any(axis=axis, out=out, keepdims=keepdims)
- except TypeError:
- return arr.any(axis=axis, out=out)
+ kwargs = {}
+ if keepdims is not np._NoValue:
+ kwargs['keepdims'] = keepdims
+ return arr.any(axis=axis, out=out, **kwargs)
-def all(a, axis=None, out=None, keepdims=False):
+def all(a, axis=None, out=None, keepdims=np._NoValue):
"""
Test whether all array elements along a given axis evaluate to True.
@@ -1994,11 +2007,18 @@ def all(a, axis=None, out=None, keepdims=False):
type is preserved (e.g., if ``dtype(out)`` is float, the result
will consist of 0.0's and 1.0's). See `doc.ufuncs` (Section
"Output arguments") for more details.
+
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `arr`.
+ If the default value is passed, then `keepdims` will not be
+ passed through to the `all` method of sub-classes of
+ `ndarray`, however any non-default value will be. If the
+ sub-classes `sum` method does not implement `keepdims` any
+ exceptions will be raised.
+
Returns
-------
all : ndarray, bool
@@ -2037,11 +2057,10 @@ def all(a, axis=None, out=None, keepdims=False):
"""
arr = asanyarray(a)
-
- try:
- return arr.all(axis=axis, out=out, keepdims=keepdims)
- except TypeError:
- return arr.all(axis=axis, out=out)
+ kwargs = {}
+ if keepdims is not np._NoValue:
+ kwargs['keepdims'] = keepdims
+ return arr.all(axis=axis, out=out, **kwargs)
def cumsum(a, axis=None, dtype=None, out=None):
@@ -2177,7 +2196,7 @@ def ptp(a, axis=None, out=None):
return ptp(axis, out)
-def amax(a, axis=None, out=None, keepdims=False):
+def amax(a, axis=None, out=None, keepdims=np._NoValue):
"""
Return the maximum of an array or maximum along an axis.
@@ -2197,11 +2216,18 @@ def amax(a, axis=None, out=None, keepdims=False):
Alternative output array in which to place the result. Must
be of the same shape and buffer length as the expected output.
See `doc.ufuncs` (Section "Output arguments") for more details.
+
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `arr`.
+ If the default value is passed, then `keepdims` will not be
+ passed through to the `amax` method of sub-classes of
+ `ndarray`, however any non-default value will be. If the
+ sub-classes `sum` method does not implement `keepdims` any
+ exceptions will be raised.
+
Returns
-------
amax : ndarray or scalar
@@ -2255,20 +2281,23 @@ def amax(a, axis=None, out=None, keepdims=False):
4.0
"""
+ kwargs = {}
+ if keepdims is not np._NoValue:
+ kwargs['keepdims'] = keepdims
+
if type(a) is not mu.ndarray:
try:
amax = a.max
except AttributeError:
- return _methods._amax(a, axis=axis,
- out=out, keepdims=keepdims)
- # NOTE: Dropping the keepdims parameter
- return amax(axis=axis, out=out)
- else:
- return _methods._amax(a, axis=axis,
- out=out, keepdims=keepdims)
+ pass
+ else:
+ return amax(axis=axis, out=out, **kwargs)
+
+ return _methods._amax(a, axis=axis,
+ out=out, **kwargs)
-def amin(a, axis=None, out=None, keepdims=False):
+def amin(a, axis=None, out=None, keepdims=np._NoValue):
"""
Return the minimum of an array or minimum along an axis.
@@ -2288,11 +2317,18 @@ def amin(a, axis=None, out=None, keepdims=False):
Alternative output array in which to place the result. Must
be of the same shape and buffer length as the expected output.
See `doc.ufuncs` (Section "Output arguments") for more details.
+
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `arr`.
+ If the default value is passed, then `keepdims` will not be
+ passed through to the `amin` method of sub-classes of
+ `ndarray`, however any non-default value will be. If the
+ sub-classes `sum` method does not implement `keepdims` any
+ exceptions will be raised.
+
Returns
-------
amin : ndarray or scalar
@@ -2346,17 +2382,19 @@ def amin(a, axis=None, out=None, keepdims=False):
0.0
"""
+ kwargs = {}
+ if keepdims is not np._NoValue:
+ kwargs['keepdims'] = keepdims
if type(a) is not mu.ndarray:
try:
amin = a.min
except AttributeError:
- return _methods._amin(a, axis=axis,
- out=out, keepdims=keepdims)
- # NOTE: Dropping the keepdims parameter
- return amin(axis=axis, out=out)
- else:
- return _methods._amin(a, axis=axis,
- out=out, keepdims=keepdims)
+ pass
+ else:
+ return amin(axis=axis, out=out, **kwargs)
+
+ return _methods._amin(a, axis=axis,
+ out=out, **kwargs)
def alen(a):
@@ -2392,7 +2430,7 @@ def alen(a):
return len(array(a, ndmin=1))
-def prod(a, axis=None, dtype=None, out=None, keepdims=False):
+def prod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue):
"""
Return the product of array elements over a given axis.
@@ -2427,6 +2465,12 @@ def prod(a, axis=None, dtype=None, out=None, keepdims=False):
result as dimensions with size one. With this option, the result
will broadcast correctly against the input array.
+ If the default value is passed, then `keepdims` will not be
+ passed through to the `prod` method of sub-classes of
+ `ndarray`, however any non-default value will be. If the
+ sub-classes `sum` method does not implement `keepdims` any
+ exceptions will be raised.
+
Returns
-------
product_along_axis : ndarray, see `dtype` parameter above.
@@ -2484,16 +2528,19 @@ def prod(a, axis=None, dtype=None, out=None, keepdims=False):
True
"""
+ kwargs = {}
+ if keepdims is not np._NoValue:
+ kwargs['keepdims'] = keepdims
if type(a) is not mu.ndarray:
try:
prod = a.prod
except AttributeError:
- return _methods._prod(a, axis=axis, dtype=dtype,
- out=out, keepdims=keepdims)
- return prod(axis=axis, dtype=dtype, out=out)
- else:
- return _methods._prod(a, axis=axis, dtype=dtype,
- out=out, keepdims=keepdims)
+ pass
+ else:
+ return prod(axis=axis, dtype=dtype, out=out, **kwargs)
+
+ return _methods._prod(a, axis=axis, dtype=dtype,
+ out=out, **kwargs)
def cumprod(a, axis=None, dtype=None, out=None):
@@ -2793,7 +2840,7 @@ def round_(a, decimals=0, out=None):
return round(decimals, out)
-def mean(a, axis=None, dtype=None, out=None, keepdims=False):
+def mean(a, axis=None, dtype=None, out=None, keepdims=np._NoValue):
"""
Compute the arithmetic mean along the specified axis.
@@ -2823,11 +2870,18 @@ def mean(a, axis=None, dtype=None, out=None, keepdims=False):
is ``None``; if provided, it must have the same shape as the
expected output, but the type will be cast if necessary.
See `doc.ufuncs` for details.
+
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `arr`.
+ If the default value is passed, then `keepdims` will not be
+ passed through to the `mean` method of sub-classes of
+ `ndarray`, however any non-default value will be. If the
+ sub-classes `sum` method does not implement `keepdims` any
+ exceptions will be raised.
+
Returns
-------
m : ndarray, see dtype parameter above
@@ -2874,18 +2928,22 @@ def mean(a, axis=None, dtype=None, out=None, keepdims=False):
0.55000000074505806
"""
+ kwargs = {}
+ if keepdims is not np._NoValue:
+ kwargs['keepdims'] = keepdims
if type(a) is not mu.ndarray:
try:
mean = a.mean
- return mean(axis=axis, dtype=dtype, out=out)
except AttributeError:
pass
+ else:
+ return mean(axis=axis, dtype=dtype, out=out, **kwargs)
return _methods._mean(a, axis=axis, dtype=dtype,
- out=out, keepdims=keepdims)
+ out=out, **kwargs)
-def std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False):
+def std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue):
"""
Compute the standard deviation along the specified axis.
@@ -2922,6 +2980,12 @@ def std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False):
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `arr`.
+ If the default value is passed, then `keepdims` will not be
+ passed through to the `std` method of sub-classes of
+ `ndarray`, however any non-default value will be. If the
+ sub-classes `sum` method does not implement `keepdims` any
+ exceptions will be raised.
+
Returns
-------
standard_deviation : ndarray, see dtype parameter above.
@@ -2981,19 +3045,23 @@ def std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False):
0.44999999925494177
"""
+ kwargs = {}
+ if keepdims is not np._NoValue:
+ kwargs['keepdims'] = keepdims
+
if type(a) is not mu.ndarray:
try:
std = a.std
- return std(axis=axis, dtype=dtype, out=out, ddof=ddof)
except AttributeError:
pass
+ else:
+ return std(axis=axis, dtype=dtype, out=out, ddof=ddof, **kwargs)
return _methods._std(a, axis=axis, dtype=dtype, out=out, ddof=ddof,
- keepdims=keepdims)
+ **kwargs)
-def var(a, axis=None, dtype=None, out=None, ddof=0,
- keepdims=False):
+def var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue):
"""
Compute the variance along the specified axis.
@@ -3031,6 +3099,12 @@ def var(a, axis=None, dtype=None, out=None, ddof=0,
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `arr`.
+ If the default value is passed, then `keepdims` will not be
+ passed through to the `var` method of sub-classes of
+ `ndarray`, however any non-default value will be. If the
+ sub-classes `sum` method does not implement `keepdims` any
+ exceptions will be raised.
+
Returns
-------
variance : ndarray, see dtype parameter above
@@ -3089,12 +3163,18 @@ def var(a, axis=None, dtype=None, out=None, ddof=0,
0.2025
"""
+ kwargs = {}
+ if keepdims is not np._NoValue:
+ kwargs['keepdims'] = keepdims
+
if type(a) is not mu.ndarray:
try:
var = a.var
- return var(axis=axis, dtype=dtype, out=out, ddof=ddof)
+
except AttributeError:
pass
+ else:
+ return var(axis=axis, dtype=dtype, out=out, ddof=ddof, **kwargs)
return _methods._var(a, axis=axis, dtype=dtype, out=out, ddof=ddof,
- keepdims=keepdims)
+ **kwargs)
diff --git a/numpy/core/function_base.py b/numpy/core/function_base.py
index c82c9bb6b..21ca1af01 100644
--- a/numpy/core/function_base.py
+++ b/numpy/core/function_base.py
@@ -96,18 +96,23 @@ def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None):
y = _nx.arange(0, num, dtype=dt)
+ delta = stop - start
if num > 1:
- delta = stop - start
step = delta / div
if step == 0:
# Special handling for denormal numbers, gh-5437
y /= div
- y *= delta
+ y = y * delta
else:
- y *= step
+ # One might be tempted to use faster, in-place multiplication here,
+ # but this prevents step from overriding what class is produced,
+ # and thus prevents, e.g., use of Quantities; see gh-7142.
+ y = y * step
else:
# 0 and 1 item long sequences have an undefined step
step = NaN
+ # Multiply with delta to allow possible override of output class.
+ y = y * delta
y += start
diff --git a/numpy/core/include/numpy/ndarraytypes.h b/numpy/core/include/numpy/ndarraytypes.h
index f1fe89f1a..34f7b4e21 100644
--- a/numpy/core/include/numpy/ndarraytypes.h
+++ b/numpy/core/include/numpy/ndarraytypes.h
@@ -781,7 +781,7 @@ typedef int (PyArray_FinalizeFunc)(PyArrayObject *, PyObject *);
/*
* An array never has the next four set; they're only used as parameter
- * flags to the the various FromAny functions
+ * flags to the various FromAny functions
*
* This flag may be requested in constructor functions.
*/
diff --git a/numpy/core/include/numpy/npy_3kcompat.h b/numpy/core/include/numpy/npy_3kcompat.h
index 6a11cf960..db60a312c 100644
--- a/numpy/core/include/numpy/npy_3kcompat.h
+++ b/numpy/core/include/numpy/npy_3kcompat.h
@@ -320,7 +320,13 @@ static NPY_INLINE FILE *
npy_PyFile_Dup2(PyObject *file,
const char *NPY_UNUSED(mode), npy_off_t *NPY_UNUSED(orig_pos))
{
- return PyFile_AsFile(file);
+ FILE * fp = PyFile_AsFile(file);
+ if (fp == NULL) {
+ PyErr_SetString(PyExc_IOError,
+ "first argument must be an open file");
+ return NULL;
+ }
+ return fp;
}
static NPY_INLINE int
diff --git a/numpy/core/info.py b/numpy/core/info.py
index 241f209b5..c6f7bbcf2 100644
--- a/numpy/core/info.py
+++ b/numpy/core/info.py
@@ -4,7 +4,7 @@ Functions
- array - NumPy Array construction
- zeros - Return an array of all zeros
-- empty - Return an unitialized array
+- empty - Return an uninitialized array
- shape - Return shape of sequence or array
- rank - Return number of dimensions
- size - Return number of elements in entire array or a
diff --git a/numpy/core/numeric.py b/numpy/core/numeric.py
index 0b728f804..a672fdc53 100644
--- a/numpy/core/numeric.py
+++ b/numpy/core/numeric.py
@@ -2198,7 +2198,7 @@ def base_repr(number, base=2, padding=0):
Parameters
----------
number : int
- The value to convert. Only positive values are handled.
+ The value to convert. Positive and negative values are handled.
base : int, optional
Convert `number` to the `base` number system. The valid range is 2-36,
the default value is 2.
@@ -2232,6 +2232,8 @@ def base_repr(number, base=2, padding=0):
digits = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'
if base > len(digits):
raise ValueError("Bases greater than 36 not handled in base_repr.")
+ elif base < 2:
+ raise ValueError("Bases less than 2 not handled in base_repr.")
num = abs(number)
res = []
diff --git a/numpy/core/src/multiarray/arrayobject.c b/numpy/core/src/multiarray/arrayobject.c
index fd5b15a0a..eb952836c 100644
--- a/numpy/core/src/multiarray/arrayobject.c
+++ b/numpy/core/src/multiarray/arrayobject.c
@@ -11,7 +11,7 @@
by
Travis Oliphant, oliphant@ee.byu.edu
- Brigham Young Univeristy
+ Brigham Young University
maintainer email: oliphant.travis@ieee.org
diff --git a/numpy/core/src/multiarray/ctors.c b/numpy/core/src/multiarray/ctors.c
index 2b8c35234..785b3073a 100644
--- a/numpy/core/src/multiarray/ctors.c
+++ b/numpy/core/src/multiarray/ctors.c
@@ -1049,7 +1049,7 @@ PyArray_NewFromDescr_int(PyTypeObject *subtype, PyArray_Descr *descr, int nd,
sd = descr->elsize;
}
/*
- * It is bad to have unitialized OBJECT pointers
+ * It is bad to have uninitialized OBJECT pointers
* which could also be sub-fields of a VOID array
*/
if (zeroed || PyDataType_FLAGCHK(descr, NPY_NEEDS_INIT)) {
diff --git a/numpy/core/src/multiarray/datetime_busdaycal.c b/numpy/core/src/multiarray/datetime_busdaycal.c
index 91ba24c97..b0c53b362 100644
--- a/numpy/core/src/multiarray/datetime_busdaycal.c
+++ b/numpy/core/src/multiarray/datetime_busdaycal.c
@@ -214,7 +214,7 @@ qsort_datetime_compare(const void *elem1, const void *elem2)
}
/*
- * Sorts the the array of dates provided in place and removes
+ * Sorts the array of dates provided in place and removes
* NaT, duplicates and any date which is already excluded on account
* of the weekmask.
*
diff --git a/numpy/core/src/multiarray/datetime_busdaycal.h b/numpy/core/src/multiarray/datetime_busdaycal.h
index cd79d0bb5..02903e3d2 100644
--- a/numpy/core/src/multiarray/datetime_busdaycal.h
+++ b/numpy/core/src/multiarray/datetime_busdaycal.h
@@ -37,7 +37,7 @@ NPY_NO_EXPORT int
PyArray_WeekMaskConverter(PyObject *weekmask_in, npy_bool *weekmask);
/*
- * Sorts the the array of dates provided in place and removes
+ * Sorts the array of dates provided in place and removes
* NaT, duplicates and any date which is already excluded on account
* of the weekmask.
*
diff --git a/numpy/core/src/multiarray/datetime_strings.c b/numpy/core/src/multiarray/datetime_strings.c
index 09ddc46d4..4114acae2 100644
--- a/numpy/core/src/multiarray/datetime_strings.c
+++ b/numpy/core/src/multiarray/datetime_strings.c
@@ -484,7 +484,7 @@ parse_iso_8601_datetime(char *str, Py_ssize_t len,
if (sublen >= 2 && isdigit(substr[0]) && isdigit(substr[1])) {
out->hour = 10 * (substr[0] - '0') + (substr[1] - '0');
- if (out->hour < 0 || out->hour >= 24) {
+ if (out->hour >= 24) {
PyErr_Format(PyExc_ValueError,
"Hours out of range in datetime string \"%s\"", str);
goto error;
@@ -515,7 +515,7 @@ parse_iso_8601_datetime(char *str, Py_ssize_t len,
if (sublen >= 2 && isdigit(substr[0]) && isdigit(substr[1])) {
out->min = 10 * (substr[0] - '0') + (substr[1] - '0');
- if (out->hour < 0 || out->min >= 60) {
+ if (out->min >= 60) {
PyErr_Format(PyExc_ValueError,
"Minutes out of range in datetime string \"%s\"", str);
goto error;
@@ -546,7 +546,7 @@ parse_iso_8601_datetime(char *str, Py_ssize_t len,
if (sublen >= 2 && isdigit(substr[0]) && isdigit(substr[1])) {
out->sec = 10 * (substr[0] - '0') + (substr[1] - '0');
- if (out->sec < 0 || out->sec >= 60) {
+ if (out->sec >= 60) {
PyErr_Format(PyExc_ValueError,
"Seconds out of range in datetime string \"%s\"", str);
goto error;
diff --git a/numpy/core/src/multiarray/dtype_transfer.c b/numpy/core/src/multiarray/dtype_transfer.c
index bfb22ac30..fd371a1f6 100644
--- a/numpy/core/src/multiarray/dtype_transfer.c
+++ b/numpy/core/src/multiarray/dtype_transfer.c
@@ -4,7 +4,7 @@
* implemented here.
*
* Copyright (c) 2010 by Mark Wiebe (mwwiebe@gmail.com)
- * The Univerity of British Columbia
+ * The University of British Columbia
*
* See LICENSE.txt for the license.
diff --git a/numpy/core/src/multiarray/einsum.c.src b/numpy/core/src/multiarray/einsum.c.src
index bde543703..ee9ee1abd 100644
--- a/numpy/core/src/multiarray/einsum.c.src
+++ b/numpy/core/src/multiarray/einsum.c.src
@@ -3,7 +3,7 @@
* which provides an einstein-summation operation.
*
* Copyright (c) 2011 by Mark Wiebe (mwwiebe@gmail.com)
- * The Univerity of British Columbia
+ * The University of British Columbia
*
* See LICENSE.txt for the license.
*/
diff --git a/numpy/core/src/multiarray/item_selection.c b/numpy/core/src/multiarray/item_selection.c
index 64fa70b6d..9789235c2 100644
--- a/numpy/core/src/multiarray/item_selection.c
+++ b/numpy/core/src/multiarray/item_selection.c
@@ -842,7 +842,7 @@ _new_sortlike(PyArrayObject *op, int axis, PyArray_SortFunc *sort,
/*
* For dtype's with objects, copyswapn Py_XINCREF's src
* and Py_XDECREF's dst. This would crash if called on
- * an unitialized buffer, or leak a reference to each
+ * an uninitialized buffer, or leak a reference to each
* object if initialized.
*
* So, first do the copy with no refcounting...
@@ -1003,7 +1003,7 @@ _new_argsortlike(PyArrayObject *op, int axis, PyArray_ArgSortFunc *argsort,
/*
* For dtype's with objects, copyswapn Py_XINCREF's src
* and Py_XDECREF's dst. This would crash if called on
- * an unitialized valbuffer, or leak a reference to
+ * an uninitialized valbuffer, or leak a reference to
* each object item if initialized.
*
* So, first do the copy with no refcounting...
diff --git a/numpy/core/src/multiarray/lowlevel_strided_loops.c.src b/numpy/core/src/multiarray/lowlevel_strided_loops.c.src
index 0fe63c13b..b8381ab68 100644
--- a/numpy/core/src/multiarray/lowlevel_strided_loops.c.src
+++ b/numpy/core/src/multiarray/lowlevel_strided_loops.c.src
@@ -3,7 +3,7 @@
* strided data.
*
* Copyright (c) 2010 by Mark Wiebe (mwwiebe@gmail.com)
- * The Univerity of British Columbia
+ * The University of British Columbia
*
* See LICENSE.txt for the license.
*/
diff --git a/numpy/core/src/multiarray/methods.c b/numpy/core/src/multiarray/methods.c
index 84d4e2c9e..56b6086ff 100644
--- a/numpy/core/src/multiarray/methods.c
+++ b/numpy/core/src/multiarray/methods.c
@@ -583,8 +583,6 @@ array_tofile(PyArrayObject *self, PyObject *args, PyObject *kwds)
fd = npy_PyFile_Dup2(file, "wb", &orig_pos);
if (fd == NULL) {
- PyErr_SetString(PyExc_IOError,
- "first argument must be a string or open file");
goto fail;
}
if (PyArray_ToFile(self, fd, sep, format) < 0) {
diff --git a/numpy/core/src/multiarray/multiarraymodule.c b/numpy/core/src/multiarray/multiarraymodule.c
index 1df3d653d..bf25130bb 100644
--- a/numpy/core/src/multiarray/multiarraymodule.c
+++ b/numpy/core/src/multiarray/multiarraymodule.c
@@ -3869,7 +3869,7 @@ _PyArray_SigintHandler(int signum)
{
PyOS_setsig(signum, SIG_IGN);
/*
- * jump buffer may be unitialized as SIGINT allowing functions are usually
+ * jump buffer may be uninitialized as SIGINT allowing functions are usually
* run in other threads than the master thread that receives the signal
*/
if (sigint_buf_init > 0) {
diff --git a/numpy/core/src/multiarray/nditer_api.c b/numpy/core/src/multiarray/nditer_api.c
index c00360bfb..21bbbaad4 100644
--- a/numpy/core/src/multiarray/nditer_api.c
+++ b/numpy/core/src/multiarray/nditer_api.c
@@ -3,7 +3,7 @@
* This excludes functions specialized using the templating system.
*
* Copyright (c) 2010-2011 by Mark Wiebe (mwwiebe@gmail.com)
- * The Univerity of British Columbia
+ * The University of British Columbia
*
* Copyright (c) 2011 Enthought, Inc
*
@@ -1847,7 +1847,7 @@ npyiter_goto_iterindex(NpyIter *iter, npy_intp iterindex)
}
/*
- * This gets called after the the buffers have been exhausted, and
+ * This gets called after the buffers have been exhausted, and
* their data needs to be written back to the arrays. The multi-index
* must be positioned for the beginning of the buffer.
*/
diff --git a/numpy/core/src/multiarray/nditer_constr.c b/numpy/core/src/multiarray/nditer_constr.c
index 9c5afedf6..3cbbb2b27 100644
--- a/numpy/core/src/multiarray/nditer_constr.c
+++ b/numpy/core/src/multiarray/nditer_constr.c
@@ -3,7 +3,7 @@
* aspects of NumPy's nditer.
*
* Copyright (c) 2010-2011 by Mark Wiebe (mwwiebe@gmail.com)
- * The Univerity of British Columbia
+ * The University of British Columbia
*
* Copyright (c) 2011 Enthought, Inc
*
diff --git a/numpy/core/src/multiarray/nditer_pywrap.c b/numpy/core/src/multiarray/nditer_pywrap.c
index 25e48ba05..67f5ab99f 100644
--- a/numpy/core/src/multiarray/nditer_pywrap.c
+++ b/numpy/core/src/multiarray/nditer_pywrap.c
@@ -2,7 +2,7 @@
* This file implements the CPython wrapper of the new NumPy iterator.
*
* Copyright (c) 2010 by Mark Wiebe (mwwiebe@gmail.com)
- * The Univerity of British Columbia
+ * The University of British Columbia
*
* See LICENSE.txt for the license.
*/
diff --git a/numpy/core/src/multiarray/nditer_templ.c.src b/numpy/core/src/multiarray/nditer_templ.c.src
index 8976b132e..0f0d59972 100644
--- a/numpy/core/src/multiarray/nditer_templ.c.src
+++ b/numpy/core/src/multiarray/nditer_templ.c.src
@@ -3,7 +3,7 @@
* are specialized using the templating system.
*
* Copyright (c) 2010-2011 by Mark Wiebe (mwwiebe@gmail.com)
- * The Univerity of British Columbia
+ * The University of British Columbia
*
* See LICENSE.txt for the license.
*/
diff --git a/numpy/core/src/multiarray/scalarapi.c b/numpy/core/src/multiarray/scalarapi.c
index 71a82d7a0..85824f2ce 100644
--- a/numpy/core/src/multiarray/scalarapi.c
+++ b/numpy/core/src/multiarray/scalarapi.c
@@ -799,7 +799,7 @@ PyArray_Scalar(void *data, PyArray_Descr *descr, PyObject *base)
Py_INCREF(descr);
vobj->obval = NULL;
Py_SIZE(vobj) = itemsize;
- vobj->flags = NPY_ARRAY_BEHAVED | NPY_ARRAY_OWNDATA;
+ vobj->flags = NPY_ARRAY_CARRAY | NPY_ARRAY_F_CONTIGUOUS | NPY_ARRAY_OWNDATA;
swap = 0;
if (PyDataType_HASFIELDS(descr)) {
if (base) {
diff --git a/numpy/core/src/multiarray/usertypes.c b/numpy/core/src/multiarray/usertypes.c
index f69abcc6b..c32a710de 100644
--- a/numpy/core/src/multiarray/usertypes.c
+++ b/numpy/core/src/multiarray/usertypes.c
@@ -11,7 +11,7 @@
by
Travis Oliphant, oliphant@ee.byu.edu
- Brigham Young Univeristy
+ Brigham Young University
maintainer email: oliphant.travis@ieee.org
diff --git a/numpy/core/src/private/ufunc_override.h b/numpy/core/src/private/ufunc_override.h
index 4042eae2f..59a90c770 100644
--- a/numpy/core/src/private/ufunc_override.h
+++ b/numpy/core/src/private/ufunc_override.h
@@ -198,6 +198,12 @@ PyUFunc_CheckOverride(PyUFuncObject *ufunc, char *method,
/* Pos of each override in args */
int with_override_pos[NPY_MAXARGS];
+ /* 2016-01-29: Disable for now in master -- can re-enable once details are
+ * sorted out. All commented bits are tagged NUMPY_UFUNC_DISABLED. -njs
+ */
+ result = NULL;
+ return 0;
+
/*
* Check inputs
*/
diff --git a/numpy/core/src/umath/ufunc_object.c b/numpy/core/src/umath/ufunc_object.c
index 63ed4f492..9e8c3c985 100644
--- a/numpy/core/src/umath/ufunc_object.c
+++ b/numpy/core/src/umath/ufunc_object.c
@@ -582,7 +582,7 @@ _is_same_name(const char* s1, const char* s2)
/*
* Sets core_num_dim_ix, core_num_dims, core_dim_ixs, core_offsets,
* and core_signature in PyUFuncObject "ufunc". Returns 0 unless an
- * error occured.
+ * error occurred.
*/
static int
_parse_signature(PyUFuncObject *ufunc, const char *signature)
diff --git a/numpy/core/tests/test_einsum.py b/numpy/core/tests/test_einsum.py
index 1f863a7db..77fb75f10 100644
--- a/numpy/core/tests/test_einsum.py
+++ b/numpy/core/tests/test_einsum.py
@@ -581,7 +581,7 @@ class TestEinSum(TestCase):
def test_einsum_fixed_collapsingbug(self):
# Issue #5147.
- # The bug only occured when output argument of einssum was used.
+ # The bug only occurred when output argument of einssum was used.
x = np.random.normal(0, 1, (5, 5, 5, 5))
y1 = np.zeros((5, 5))
np.einsum('aabb->ab', x, out=y1)
diff --git a/numpy/core/tests/test_function_base.py b/numpy/core/tests/test_function_base.py
index 2df7ba3ea..6b5430611 100644
--- a/numpy/core/tests/test_function_base.py
+++ b/numpy/core/tests/test_function_base.py
@@ -1,7 +1,7 @@
from __future__ import division, absolute_import, print_function
from numpy import (logspace, linspace, dtype, array, finfo, typecodes, arange,
- isnan)
+ isnan, ndarray)
from numpy.testing import (
TestCase, run_module_suite, assert_, assert_equal, assert_raises,
assert_array_equal
@@ -115,6 +115,19 @@ class TestLinspace(TestCase):
b = PhysicalQuantity(1.0)
assert_equal(linspace(a, b), linspace(0.0, 1.0))
+ def test_subclass(self):
+ class PhysicalQuantity2(ndarray):
+ __array_priority__ = 10
+
+ a = array(0).view(PhysicalQuantity2)
+ b = array(1).view(PhysicalQuantity2)
+ ls = linspace(a, b)
+ assert type(ls) is PhysicalQuantity2
+ assert_equal(ls, linspace(0.0, 1.0))
+ ls = linspace(a, b, 1)
+ assert type(ls) is PhysicalQuantity2
+ assert_equal(ls, linspace(0.0, 1.0, 1))
+
def test_denormal_numbers(self):
# Regression test for gh-5437. Will probably fail when compiled
# with ICC, which flushes denormals to zero
diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py
index f432aa975..d57e7c106 100644
--- a/numpy/core/tests/test_multiarray.py
+++ b/numpy/core/tests/test_multiarray.py
@@ -2136,6 +2136,9 @@ class TestMethods(TestCase):
assert_equal(c, np.dot(a, b))
def test_dot_override(self):
+ # 2016-01-29: NUMPY_UFUNC_DISABLED
+ return
+
class A(object):
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
return "A"
@@ -2543,6 +2546,9 @@ class TestBinop(object):
assert_array_equal(res, l[4] + l[4])
def test_ufunc_override_rop_precedence(self):
+ # 2016-01-29: NUMPY_UFUNC_DISABLED
+ return
+
# Check that __rmul__ and other right-hand operations have
# precedence over __numpy_ufunc__
@@ -2661,6 +2667,9 @@ class TestBinop(object):
yield check, op_name, False
def test_ufunc_override_rop_simple(self):
+ # 2016-01-29: NUMPY_UFUNC_DISABLED
+ return
+
# Check parts of the binary op overriding behavior in an
# explicit test case that is easier to understand.
class SomeClass(object):
@@ -2765,6 +2774,9 @@ class TestBinop(object):
assert_(isinstance(res, SomeClass3))
def test_ufunc_override_normalize_signature(self):
+ # 2016-01-29: NUMPY_UFUNC_DISABLED
+ return
+
# gh-5674
class SomeClass(object):
def __numpy_ufunc__(self, ufunc, method, i, inputs, **kw):
@@ -2781,6 +2793,9 @@ class TestBinop(object):
assert_equal(kw['signature'], 'ii->i')
def test_numpy_ufunc_index(self):
+ # 2016-01-29: NUMPY_UFUNC_DISABLED
+ return
+
# Check that index is set appropriately, also if only an output
# is passed on (latter is another regression tests for github bug 4753)
class CheckIndex(object):
@@ -2818,6 +2833,9 @@ class TestBinop(object):
assert_equal(np.add(a, dummy, out=a), 0)
def test_out_override(self):
+ # 2016-01-29: NUMPY_UFUNC_DISABLED
+ return
+
# regression test for github bug 4753
class OutClass(np.ndarray):
def __numpy_ufunc__(self, ufunc, method, i, inputs, **kw):
@@ -3564,6 +3582,14 @@ class TestIO(object):
def tearDown(self):
shutil.rmtree(self.tempdir)
+ def test_nofile(self):
+ # this should probably be supported as a file
+ # but for now test for proper errors
+ b = io.BytesIO()
+ assert_raises(IOError, np.fromfile, b, np.uint8, 80)
+ d = np.ones(7);
+ assert_raises(IOError, lambda x: x.tofile(b), d)
+
def test_bool_fromstring(self):
v = np.array([True, False, True, False], dtype=np.bool_)
y = np.fromstring('1 0 -2.3 0.0', sep=' ', dtype=np.bool_)
@@ -4615,24 +4641,6 @@ class TestDot(TestCase):
assert_equal(np.dot(arr, 3), desired)
assert_equal(np.dot(3, arr), desired)
- def test_dot_override(self):
- class A(object):
- def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
- return "A"
-
- class B(object):
- def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
- return NotImplemented
-
- a = A()
- b = B()
- c = np.array([[1]])
-
- assert_equal(np.dot(a, b), "A")
- assert_equal(c.dot(a), "A")
- assert_raises(TypeError, np.dot, b, c)
- assert_raises(TypeError, c.dot, b)
-
def test_accelerate_framework_sgemv_fix(self):
def aligned_array(shape, align, dtype, order='C'):
@@ -4893,6 +4901,8 @@ class MatmulCommon():
assert_equal(res, tgt12_21)
def test_numpy_ufunc_override(self):
+ # 2016-01-29: NUMPY_UFUNC_DISABLED
+ return
class A(np.ndarray):
def __new__(cls, *args, **kwargs):
diff --git a/numpy/core/tests/test_numeric.py b/numpy/core/tests/test_numeric.py
index 34be84135..e22a5e193 100644
--- a/numpy/core/tests/test_numeric.py
+++ b/numpy/core/tests/test_numeric.py
@@ -1044,6 +1044,12 @@ class TestBaseRepr(TestCase):
assert_equal(np.base_repr(-12, 10, 4), '-000012')
assert_equal(np.base_repr(-12, 4), '-30')
+ def test_base_range(self):
+ with self.assertRaises(ValueError):
+ np.base_repr(1, 1)
+ with self.assertRaises(ValueError):
+ np.base_repr(1, 37)
+
class TestArrayComparisons(TestCase):
def test_array_equal(self):
@@ -2466,5 +2472,17 @@ class TestBroadcast(TestCase):
assert_equal(mit.numiter, j)
+class TestKeepdims(TestCase):
+
+ class sub_array(np.ndarray):
+ def sum(self, axis=None, dtype=None, out=None):
+ return np.ndarray.sum(self, axis, dtype, out, keepdims=True)
+
+ def test_raise(self):
+ sub_class = self.sub_array
+ x = np.arange(30).view(sub_class)
+ assert_raises(TypeError, np.sum, x, keepdims=True)
+
+
if __name__ == "__main__":
run_module_suite()
diff --git a/numpy/core/tests/test_records.py b/numpy/core/tests/test_records.py
index 9fbdf51d6..2c85546a7 100644
--- a/numpy/core/tests/test_records.py
+++ b/numpy/core/tests/test_records.py
@@ -299,6 +299,15 @@ class TestRecord(TestCase):
assert_equal(a, pickle.loads(pickle.dumps(a)))
assert_equal(a[0], pickle.loads(pickle.dumps(a[0])))
+ def test_pickle_3(self):
+ # Issue #7140
+ a = self.data
+ pa = pickle.loads(pickle.dumps(a[0]))
+ assert_(pa.flags.c_contiguous)
+ assert_(pa.flags.f_contiguous)
+ assert_(pa.flags.writeable)
+ assert_(pa.flags.aligned)
+
def test_objview_record(self):
# https://github.com/numpy/numpy/issues/2599
dt = np.dtype([('foo', 'i8'), ('bar', 'O')])
diff --git a/numpy/core/tests/test_umath.py b/numpy/core/tests/test_umath.py
index 2ba988b87..917e05e6a 100644
--- a/numpy/core/tests/test_umath.py
+++ b/numpy/core/tests/test_umath.py
@@ -1215,7 +1215,24 @@ class TestSpecialMethods(TestCase):
assert_equal(ncu.maximum(a, B()), 0)
assert_equal(ncu.maximum(a, C()), 0)
+ def test_ufunc_override_disabled(self):
+ # 2016-01-29: NUMPY_UFUNC_DISABLED
+ # This test should be removed when __numpy_ufunc__ is re-enabled.
+
+ class MyArray(object):
+ def __numpy_ufunc__(self, *args, **kwargs):
+ self._numpy_ufunc_called = True
+
+ my_array = MyArray()
+ real_array = np.ones(10)
+ assert_raises(TypeError, lambda: real_array + my_array)
+ assert_raises(TypeError, np.add, real_array, my_array)
+ assert not hasattr(my_array, "_numpy_ufunc_called")
+
+
def test_ufunc_override(self):
+ # 2016-01-29: NUMPY_UFUNC_DISABLED
+ return
class A(object):
def __numpy_ufunc__(self, func, method, pos, inputs, **kwargs):
@@ -1241,6 +1258,8 @@ class TestSpecialMethods(TestCase):
assert_equal(res1[5], {})
def test_ufunc_override_mro(self):
+ # 2016-01-29: NUMPY_UFUNC_DISABLED
+ return
# Some multi arg functions for testing.
def tres_mul(a, b, c):
@@ -1332,6 +1351,8 @@ class TestSpecialMethods(TestCase):
assert_raises(TypeError, four_mul_ufunc, 1, c, c_sub, c)
def test_ufunc_override_methods(self):
+ # 2016-01-29: NUMPY_UFUNC_DISABLED
+ return
class A(object):
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
@@ -1436,6 +1457,8 @@ class TestSpecialMethods(TestCase):
assert_equal(res[4], (a, [4, 2], 'b0'))
def test_ufunc_override_out(self):
+ # 2016-01-29: NUMPY_UFUNC_DISABLED
+ return
class A(object):
def __numpy_ufunc__(self, ufunc, method, pos, inputs, **kwargs):
@@ -1470,6 +1493,8 @@ class TestSpecialMethods(TestCase):
assert_equal(res7['out'][1], 'out1')
def test_ufunc_override_exception(self):
+ # 2016-01-29: NUMPY_UFUNC_DISABLED
+ return
class A(object):
def __numpy_ufunc__(self, *a, **kwargs):
diff --git a/numpy/distutils/exec_command.py b/numpy/distutils/exec_command.py
index 9fa09cd51..50f03feeb 100644
--- a/numpy/distutils/exec_command.py
+++ b/numpy/distutils/exec_command.py
@@ -21,7 +21,7 @@ Created: 11 January 2003
Requires: Python 2.x
-Succesfully tested on:
+Successfully tested on:
======== ============ =================================================
os.name sys.platform comments
diff --git a/numpy/distutils/fcompiler/gnu.py b/numpy/distutils/fcompiler/gnu.py
index 37be0800d..9ba5759df 100644
--- a/numpy/distutils/fcompiler/gnu.py
+++ b/numpy/distutils/fcompiler/gnu.py
@@ -313,7 +313,7 @@ class Gnu95FCompiler(GnuFCompiler):
if target:
d = os.path.normpath(self.get_libgcc_dir())
root = os.path.join(d, *((os.pardir,)*4))
- path = os.path.join(root, target, "lib")
+ path = os.path.join(root, "lib")
mingwdir = os.path.normpath(path)
if os.path.exists(os.path.join(mingwdir, "libmingwex.a")):
opt.append(mingwdir)
diff --git a/numpy/distutils/from_template.py b/numpy/distutils/from_template.py
index d10b50218..e38e4d608 100644
--- a/numpy/distutils/from_template.py
+++ b/numpy/distutils/from_template.py
@@ -11,7 +11,7 @@ process_file(filename)
All function and subroutine blocks in a source file with names that
contain '<..>' will be replicated according to the rules in '<..>'.
- The number of comma-separeted words in '<..>' will determine the number of
+ The number of comma-separated words in '<..>' will determine the number of
replicates.
'<..>' may have two different forms, named and short. For example,
diff --git a/numpy/doc/byteswapping.py b/numpy/doc/byteswapping.py
index 59c049878..22eb71e6d 100644
--- a/numpy/doc/byteswapping.py
+++ b/numpy/doc/byteswapping.py
@@ -108,7 +108,7 @@ the correct endianness:
>>> fixed_end_dtype_arr[0]
1
-Note the the array has not changed in memory:
+Note the array has not changed in memory:
>>> fixed_end_dtype_arr.tobytes() == big_end_str
True
diff --git a/numpy/doc/internals.py b/numpy/doc/internals.py
index 6bd6b1ae9..c25872bc0 100644
--- a/numpy/doc/internals.py
+++ b/numpy/doc/internals.py
@@ -49,7 +49,7 @@ uses the same data buffer. This is why it is necessary to force copies through
use of the .copy() method if one really wants to make a new and independent
copy of the data buffer.
-New views into arrays mean the the object reference counts for the data buffer
+New views into arrays mean the object reference counts for the data buffer
increase. Simply doing away with the original array object will not remove the
data buffer if other views of it still exist.
diff --git a/numpy/f2py/capi_maps.py b/numpy/f2py/capi_maps.py
index 5270cabb5..441629faa 100644
--- a/numpy/f2py/capi_maps.py
+++ b/numpy/f2py/capi_maps.py
@@ -211,7 +211,7 @@ if os.path.isfile('.f2py_f2cmap'):
else:
errmess("\tIgnoring map {'%s':{'%s':'%s'}}: '%s' must be in %s\n" % (
k, k1, d[k][k1], d[k][k1], list(c2py_map.keys())))
- outmess('Succesfully applied user defined changes from .f2py_f2cmap\n')
+ outmess('Successfully applied user defined changes from .f2py_f2cmap\n')
except Exception as msg:
errmess(
'Failed to apply user defined changes from .f2py_f2cmap: %s. Skipping.\n' % (msg))
diff --git a/numpy/f2py/crackfortran.py b/numpy/f2py/crackfortran.py
index 9f8c8962a..a51eb5d38 100755
--- a/numpy/f2py/crackfortran.py
+++ b/numpy/f2py/crackfortran.py
@@ -635,7 +635,7 @@ def crackline(line, reset=0):
"""
reset=-1 --- initialize
reset=0 --- crack the line
- reset=1 --- final check if mismatch of blocks occured
+ reset=1 --- final check if mismatch of blocks occurred
Cracked data is saved in grouplist[0].
"""
diff --git a/numpy/f2py/rules.py b/numpy/f2py/rules.py
index 2ea8351a2..6a1f5ae6e 100644
--- a/numpy/f2py/rules.py
+++ b/numpy/f2py/rules.py
@@ -16,13 +16,13 @@ wrapper_function(args)
if (successful) {
callfortran
- if (succesful) {
+ if (successful) {
put_a_to_python
- if (succesful) {
+ if (successful) {
put_b_to_python
- if (succesful) {
+ if (successful) {
buildvalue = ...
diff --git a/numpy/lib/arraypad.py b/numpy/lib/arraypad.py
index dad1f4764..c30ef6bf5 100644
--- a/numpy/lib/arraypad.py
+++ b/numpy/lib/arraypad.py
@@ -1337,7 +1337,7 @@ def pad(array, pad_width, mode, **kwargs):
'reflect_type': 'even',
}
- if isinstance(mode, str):
+ if isinstance(mode, np.compat.basestring):
# Make sure have allowed kwargs appropriate for mode
for key in kwargs:
if key not in allowedkwargs[mode]:
diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py
index 844c069c0..788807086 100644
--- a/numpy/lib/function_base.py
+++ b/numpy/lib/function_base.py
@@ -85,9 +85,6 @@ def _hist_optim_numbins_estimator(a, estimator):
will choose the appropriate estimator and return it's estimate for the optimal
number of bins.
"""
- assert isinstance(estimator, basestring)
- # private function should not be called otherwise
-
if a.size == 0:
return 1
@@ -124,7 +121,7 @@ def _hist_optim_numbins_estimator(a, estimator):
def fd(x):
"""
- Freedman Diaconis rule using Inter Quartile Range (IQR) for binwidth
+ Freedman Diaconis rule using interquartile range (IQR) for binwidth
Considered a variation of the Scott rule with more robustness as the IQR
is less affected by outliers than the standard deviation. However the IQR depends on
fewer points than the sd so it is less accurate, especially for long tailed distributions.
@@ -944,11 +941,16 @@ def piecewise(x, condlist, funclist, *args, **kw):
condlist = condlist.T
if n == n2 - 1: # compute the "otherwise" condition.
totlist = np.logical_or.reduce(condlist, axis=0)
- condlist = np.vstack([condlist, ~totlist])
+ # Only able to stack vertically if the array is 1d or less
+ if x.ndim <= 1:
+ condlist = np.vstack([condlist, ~totlist])
+ else:
+ condlist = [asarray(c, dtype=bool) for c in condlist]
+ totlist = condlist[0]
+ for k in range(1, n):
+ totlist |= condlist[k]
+ condlist.append(~totlist)
n += 1
- if (n != n2):
- raise ValueError(
- "function list and condition list must be the same")
y = zeros(x.shape, x.dtype)
for k in range(n):
@@ -1022,7 +1024,7 @@ def select(condlist, choicelist, default=0):
dtype = np.result_type(*choicelist)
# Convert conditions to arrays and broadcast conditions and choices
- # as the shape is needed for the result. Doing it seperatly optimizes
+ # as the shape is needed for the result. Doing it separately optimizes
# for example when all choices are scalars.
condlist = np.broadcast_arrays(*condlist)
choicelist = np.broadcast_arrays(*choicelist)
@@ -1244,7 +1246,7 @@ def gradient(f, *varargs, **kwargs):
# Convert datetime64 data into ints. Make dummy variable `y`
# that is a view of ints if the data is datetime64, otherwise
- # just set y equal to the the array `f`.
+ # just set y equal to the array `f`.
if f.dtype.char in ["M", "m"]:
y = f.view('int64')
else:
@@ -3228,22 +3230,22 @@ def median(a, axis=None, out=None, overwrite_input=False, keepdims=False):
----------
a : array_like
Input array or object that can be converted to an array.
- axis : int or sequence of int, optional
- Axis along which the medians are computed. The default (axis=None)
+ axis : {int, sequence of int, None}, optional
+ Axis or axes along which the medians are computed. The default
is to compute the median along a flattened version of the array.
A sequence of axes is supported since version 1.9.0.
out : ndarray, optional
- Alternative output array in which to place the result. It must have
- the same shape and buffer length as the expected output, but the
- type (of the output) will be cast if necessary.
+ Alternative output array in which to place the result. It must
+ have the same shape and buffer length as the expected output,
+ but the type (of the output) will be cast if necessary.
overwrite_input : bool, optional
- If True, then allow use of memory of input array (a) for
+ If True, then allow use of memory of input array `a` for
calculations. The input array will be modified by the call to
- median. This will save memory when you do not need to preserve the
- contents of the input array. Treat the input as undefined, but it
- will probably be fully or partially sorted. Default is False. Note
- that, if `overwrite_input` is True and the input is not already an
- ndarray, an error will be raised.
+ `median`. This will save memory when you do not need to preserve
+ the contents of the input array. Treat the input as undefined,
+ but it will probably be fully or partially sorted. Default is
+ False. If `overwrite_input` is ``True`` and `a` is not already an
+ `ndarray`, an error will be raised.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
@@ -3251,15 +3253,14 @@ def median(a, axis=None, out=None, overwrite_input=False, keepdims=False):
.. versionadded:: 1.9.0
-
Returns
-------
median : ndarray
- A new array holding the result (unless `out` is specified, in which
- case that array is returned instead). If the input contains
- integers, or floats of smaller precision than 64, then the output
- data-type is float64. Otherwise, the output data-type is the same
- as that of the input.
+ A new array holding the result. If the input contains integers
+ or floats smaller than ``float64``, then the output data-type is
+ ``np.float64``. Otherwise, the data-type of the output is the
+ same as that of the input. If `out` is specified, that array is
+ returned instead.
See Also
--------
@@ -3267,10 +3268,10 @@ def median(a, axis=None, out=None, overwrite_input=False, keepdims=False):
Notes
-----
- Given a vector V of length N, the median of V is the middle value of
- a sorted copy of V, ``V_sorted`` - i.e., ``V_sorted[(N-1)/2]``, when N is
- odd. When N is even, it is the average of the two middle values of
- ``V_sorted``.
+ Given a vector ``V`` of length ``N``, the median of ``V`` is the
+ middle value of a sorted copy of ``V``, ``V_sorted`` - i
+ e., ``V_sorted[(N-1)/2]``, when ``N`` is odd, and the average of the
+ two middle values of ``V_sorted`` when ``N`` is even.
Examples
--------
@@ -3383,73 +3384,79 @@ def percentile(a, q, axis=None, out=None,
"""
Compute the qth percentile of the data along the specified axis.
- Returns the qth percentile of the array elements.
+ Returns the qth percentile(s) of the array elements.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
q : float in range of [0,100] (or sequence of floats)
- Percentile to compute which must be between 0 and 100 inclusive.
- axis : int or sequence of int, optional
- Axis along which the percentiles are computed. The default (None)
- is to compute the percentiles along a flattened version of the array.
- A sequence of axes is supported since version 1.9.0.
+ Percentile to compute, which must be between 0 and 100 inclusive.
+ axis : {int, sequence of int, None}, optional
+ Axis or axes along which the percentiles are computed. The
+ default is to compute the percentile(s) along a flattened
+ version of the array. A sequence of axes is supported since
+ version 1.9.0.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output,
but the type (of the output) will be cast if necessary.
overwrite_input : bool, optional
- If True, then allow use of memory of input array `a` for
+ If True, then allow use of memory of input array `a`
calculations. The input array will be modified by the call to
- percentile. This will save memory when you do not need to preserve
- the contents of the input array. In this case you should not make
- any assumptions about the content of the passed in array `a` after
- this function completes -- treat it as undefined. Default is False.
- Note that, if the `a` input is not already an array this parameter
- will have no effect, `a` will be converted to an array internally
- regardless of the value of this parameter.
+ `percentile`. This will save memory when you do not need to
+ preserve the contents of the input array. In this case you
+ should not make any assumptions about the contents of the input
+ `a` after this function completes -- treat it as undefined.
+ Default is False. If `a` is not already an array, this parameter
+ will have no effect as `a` will be converted to an array
+ internally regardless of the value of this parameter.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
- This optional parameter specifies the interpolation method to use,
- when the desired quantile lies between two data points `i` and `j`:
- * linear: `i + (j - i) * fraction`, where `fraction` is the
- fractional part of the index surrounded by `i` and `j`.
- * lower: `i`.
- * higher: `j`.
- * nearest: `i` or `j` whichever is nearest.
- * midpoint: (`i` + `j`) / 2.
+ This optional parameter specifies the interpolation method to
+ use when the desired quantile lies between two data points
+ ``i < j``:
+ * linear: ``i + (j - i) * fraction``, where ``fraction``
+ is the fractional part of the index surrounded by ``i``
+ and ``j``.
+ * lower: ``i``.
+ * higher: ``j``.
+ * nearest: ``i`` or ``j``, whichever is nearest.
+ * midpoint: ``(i + j) / 2``.
.. versionadded:: 1.9.0
keepdims : bool, optional
- If this is set to True, the axes which are reduced are left
- in the result as dimensions with size one. With this option,
- the result will broadcast correctly against the original array `a`.
+ If this is set to True, the axes which are reduced are left in
+ the result as dimensions with size one. With this option, the
+ result will broadcast correctly against the original array `a`.
.. versionadded:: 1.9.0
Returns
-------
percentile : scalar or ndarray
- If a single percentile `q` is given and axis=None a scalar is
- returned. If multiple percentiles `q` are given an array holding
- the result is returned. The results are listed in the first axis.
- (If `out` is specified, in which case that array is returned
- instead). If the input contains integers, or floats of smaller
- precision than 64, then the output data-type is float64. Otherwise,
- the output data-type is the same as that of the input.
+ If `q` is a single percentile and `axis=None`, then the result
+ is a scalar. If multiple percentiles are given, first axis of
+ the result corresponds to the percentiles. The other axes are
+ the axes that remain after the reduction of `a`. If the input
+ contains integers or floats smaller than ``float64``, the output
+ data-type is ``float64``. Otherwise, the output data-type is the
+ same as that of the input. If `out` is specified, that array is
+ returned instead.
See Also
--------
- mean, median
+ mean, median, nanpercentile
Notes
-----
- Given a vector V of length N, the q-th percentile of V is the q-th ranked
- value in a sorted copy of V. The values and distances of the two
- nearest neighbors as well as the `interpolation` parameter will
- determine the percentile if the normalized ranking does not match q
- exactly. This function is the same as the median if ``q=50``, the same
- as the minimum if ``q=0`` and the same as the maximum if ``q=100``.
+ Given a vector ``V`` of length ``N``, the ``q``-th percentile of
+ ``V`` is the value ``q/100`` of the way from the mimumum to the
+ maximum in in a sorted copy of ``V``. The values and distances of
+ the two nearest neighbors as well as the `interpolation` parameter
+ will determine the percentile if the normalized ranking does not
+ match the location of ``q`` exactly. This function is the same as
+ the median if ``q=50``, the same as the minimum if ``q=0`` and the
+ same as the maximum if ``q=100``.
Examples
--------
@@ -3458,28 +3465,26 @@ def percentile(a, q, axis=None, out=None,
array([[10, 7, 4],
[ 3, 2, 1]])
>>> np.percentile(a, 50)
- array([ 3.5])
+ 3.5
>>> np.percentile(a, 50, axis=0)
array([[ 6.5, 4.5, 2.5]])
>>> np.percentile(a, 50, axis=1)
+ array([ 7., 2.])
+ >>> np.percentile(a, 50, axis=1, keepdims=True)
array([[ 7.],
[ 2.]])
>>> m = np.percentile(a, 50, axis=0)
>>> out = np.zeros_like(m)
- >>> np.percentile(a, 50, axis=0, out=m)
+ >>> np.percentile(a, 50, axis=0, out=out)
array([[ 6.5, 4.5, 2.5]])
>>> m
array([[ 6.5, 4.5, 2.5]])
>>> b = a.copy()
>>> np.percentile(b, 50, axis=1, overwrite_input=True)
- array([[ 7.],
- [ 2.]])
- >>> assert not np.all(a==b)
- >>> b = a.copy()
- >>> np.percentile(b, 50, axis=None, overwrite_input=True)
- array([ 3.5])
+ array([ 7., 2.])
+ >>> assert not np.all(a == b)
"""
q = array(q, dtype=np.float64, copy=True)
@@ -3541,7 +3546,7 @@ def _percentile(a, q, axis=None, out=None,
elif interpolation == 'higher':
indices = ceil(indices).astype(intp)
elif interpolation == 'midpoint':
- indices = floor(indices) + 0.5
+ indices = 0.5 * (floor(indices) + ceil(indices))
elif interpolation == 'nearest':
indices = around(indices).astype(intp)
elif interpolation == 'linear':
@@ -3619,7 +3624,7 @@ def _percentile(a, q, axis=None, out=None,
r = add(x1, x2)
if np.any(n):
- warnings.warn("Invalid value encountered in median",
+ warnings.warn("Invalid value encountered in percentile",
RuntimeWarning)
if zerod:
if ap.ndim == 1:
@@ -3731,7 +3736,8 @@ def trapz(y, x=None, dx=1.0, axis=-1):
#always succeed
def add_newdoc(place, obj, doc):
- """Adds documentation to obj which is in module place.
+ """
+ Adds documentation to obj which is in module place.
If doc is a string add it to obj as a docstring
@@ -3749,7 +3755,7 @@ def add_newdoc(place, obj, doc):
in new-style classes or built-in functions. Because this
routine never raises an error the caller must check manually
that the docstrings were changed.
- """
+ """
try:
new = getattr(__import__(place, globals(), {}, [obj]), obj)
if isinstance(doc, str):
diff --git a/numpy/lib/nanfunctions.py b/numpy/lib/nanfunctions.py
index 6b28b4a35..56f0010af 100644
--- a/numpy/lib/nanfunctions.py
+++ b/numpy/lib/nanfunctions.py
@@ -23,6 +23,7 @@ import warnings
import numpy as np
from numpy.lib.function_base import _ureduce as _ureduce
+
__all__ = [
'nansum', 'nanmax', 'nanmin', 'nanargmax', 'nanargmin', 'nanmean',
'nanmedian', 'nanpercentile', 'nanvar', 'nanstd', 'nanprod',
@@ -141,7 +142,7 @@ def _divide_by_count(a, b, out=None):
return np.divide(a, b, out=out, casting='unsafe')
-def nanmin(a, axis=None, out=None, keepdims=False):
+def nanmin(a, axis=None, out=None, keepdims=np._NoValue):
"""
Return minimum of an array or minimum along an axis, ignoring any NaNs.
When all-NaN slices are encountered a ``RuntimeWarning`` is raised and
@@ -163,9 +164,14 @@ def nanmin(a, axis=None, out=None, keepdims=False):
.. versionadded:: 1.8.0
keepdims : bool, optional
- If this is set to True, the axes which are reduced are left in the
- result as dimensions with size one. With this option, the result
- will broadcast correctly against the original `a`.
+ If this is set to True, the axes which are reduced are left
+ in the result as dimensions with size one. With this option,
+ the result will broadcast correctly against the original `a`.
+
+ If the value is anything but the default, then
+ `keepdims` will be passed through to the `min` method
+ of sub-classes of `ndarray`. If the sub-classes methods
+ does not implement `keepdims` any exceptions will be raised.
.. versionadded:: 1.8.0
@@ -220,27 +226,30 @@ def nanmin(a, axis=None, out=None, keepdims=False):
-inf
"""
+ kwargs = {}
+ if keepdims is not np._NoValue:
+ kwargs['keepdims'] = keepdims
if not isinstance(a, np.ndarray) or type(a) is np.ndarray:
# Fast, but not safe for subclasses of ndarray
- res = np.fmin.reduce(a, axis=axis, out=out, keepdims=keepdims)
+ res = np.fmin.reduce(a, axis=axis, out=out, **kwargs)
if np.isnan(res).any():
warnings.warn("All-NaN axis encountered", RuntimeWarning)
else:
# Slow, but safe for subclasses of ndarray
a, mask = _replace_nan(a, +np.inf)
- res = np.amin(a, axis=axis, out=out, keepdims=keepdims)
+ res = np.amin(a, axis=axis, out=out, **kwargs)
if mask is None:
return res
# Check for all-NaN axis
- mask = np.all(mask, axis=axis, keepdims=keepdims)
+ mask = np.all(mask, axis=axis, **kwargs)
if np.any(mask):
res = _copyto(res, np.nan, mask)
warnings.warn("All-NaN axis encountered", RuntimeWarning)
return res
-def nanmax(a, axis=None, out=None, keepdims=False):
+def nanmax(a, axis=None, out=None, keepdims=np._NoValue):
"""
Return the maximum of an array or maximum along an axis, ignoring any
NaNs. When all-NaN slices are encountered a ``RuntimeWarning`` is
@@ -262,9 +271,14 @@ def nanmax(a, axis=None, out=None, keepdims=False):
.. versionadded:: 1.8.0
keepdims : bool, optional
- If this is set to True, the axes which are reduced are left in the
- result as dimensions with size one. With this option, the result
- will broadcast correctly against the original `a`.
+ If this is set to True, the axes which are reduced are left
+ in the result as dimensions with size one. With this option,
+ the result will broadcast correctly against the original `a`.
+
+ If the value is anything but the default, then
+ `keepdims` will be passed through to the `max` method
+ of sub-classes of `ndarray`. If the sub-classes methods
+ does not implement `keepdims` any exceptions will be raised.
.. versionadded:: 1.8.0
@@ -319,20 +333,23 @@ def nanmax(a, axis=None, out=None, keepdims=False):
inf
"""
+ kwargs = {}
+ if keepdims is not np._NoValue:
+ kwargs['keepdims'] = keepdims
if not isinstance(a, np.ndarray) or type(a) is np.ndarray:
# Fast, but not safe for subclasses of ndarray
- res = np.fmax.reduce(a, axis=axis, out=out, keepdims=keepdims)
+ res = np.fmax.reduce(a, axis=axis, out=out, **kwargs)
if np.isnan(res).any():
warnings.warn("All-NaN slice encountered", RuntimeWarning)
else:
# Slow, but safe for subclasses of ndarray
a, mask = _replace_nan(a, -np.inf)
- res = np.amax(a, axis=axis, out=out, keepdims=keepdims)
+ res = np.amax(a, axis=axis, out=out, **kwargs)
if mask is None:
return res
# Check for all-NaN axis
- mask = np.all(mask, axis=axis, keepdims=keepdims)
+ mask = np.all(mask, axis=axis, **kwargs)
if np.any(mask):
res = _copyto(res, np.nan, mask)
warnings.warn("All-NaN axis encountered", RuntimeWarning)
@@ -428,7 +445,7 @@ def nanargmax(a, axis=None):
return res
-def nansum(a, axis=None, dtype=None, out=None, keepdims=0):
+def nansum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue):
"""
Return the sum of array elements over a given axis treating Not a
Numbers (NaNs) as zero.
@@ -462,9 +479,15 @@ def nansum(a, axis=None, dtype=None, out=None, keepdims=0):
.. versionadded:: 1.8.0
keepdims : bool, optional
- If True, the axes which are reduced are left in the result as
- dimensions with size one. With this option, the result will
- broadcast correctly against the original `arr`.
+ If this is set to True, the axes which are reduced are left
+ in the result as dimensions with size one. With this option,
+ the result will broadcast correctly against the original `a`.
+
+
+ If the value is anything but the default, then
+ `keepdims` will be passed through to the `mean` or `sum` methods
+ of sub-classes of `ndarray`. If the sub-classes methods
+ does not implement `keepdims` any exceptions will be raised.
.. versionadded:: 1.8.0
@@ -513,7 +536,7 @@ def nansum(a, axis=None, dtype=None, out=None, keepdims=0):
return np.sum(a, axis=axis, dtype=dtype, out=out, keepdims=keepdims)
-def nanprod(a, axis=None, dtype=None, out=None, keepdims=0):
+def nanprod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue):
"""
Return the product of array elements over a given axis treating Not a
Numbers (NaNs) as zero.
@@ -583,7 +606,7 @@ def nanprod(a, axis=None, dtype=None, out=None, keepdims=0):
return np.prod(a, axis=axis, dtype=dtype, out=out, keepdims=keepdims)
-def nanmean(a, axis=None, dtype=None, out=None, keepdims=False):
+def nanmean(a, axis=None, dtype=None, out=None, keepdims=np._NoValue):
"""
Compute the arithmetic mean along the specified axis, ignoring NaNs.
@@ -613,9 +636,14 @@ def nanmean(a, axis=None, dtype=None, out=None, keepdims=False):
expected output, but the type will be cast if necessary. See
`doc.ufuncs` for details.
keepdims : bool, optional
- If this is set to True, the axes which are reduced are left in the
- result as dimensions with size one. With this option, the result
- will broadcast correctly against the original `arr`.
+ If this is set to True, the axes which are reduced are left
+ in the result as dimensions with size one. With this option,
+ the result will broadcast correctly against the original `a`.
+
+ If the value is anything but the default, then
+ `keepdims` will be passed through to the `mean` or `sum` methods
+ of sub-classes of `ndarray`. If the sub-classes methods
+ does not implement `keepdims` any exceptions will be raised.
Returns
-------
@@ -727,10 +755,12 @@ def _nanmedian(a, axis=None, out=None, overwrite_input=False):
out[...] = result
return result
+
def _nanmedian_small(a, axis=None, out=None, overwrite_input=False):
"""
- sort + indexing median, faster for small medians along multiple dimensions
- due to the high overhead of apply_along_axis
+ sort + indexing median, faster for small medians along multiple
+ dimensions due to the high overhead of apply_along_axis
+
see nanmedian for parameter usage
"""
a = np.ma.masked_array(a, np.isnan(a))
@@ -742,7 +772,8 @@ def _nanmedian_small(a, axis=None, out=None, overwrite_input=False):
return out
return m.filled(np.nan)
-def nanmedian(a, axis=None, out=None, overwrite_input=False, keepdims=False):
+
+def nanmedian(a, axis=None, out=None, overwrite_input=False, keepdims=np._NoValue):
"""
Compute the median along the specified axis, while ignoring NaNs.
@@ -754,36 +785,41 @@ def nanmedian(a, axis=None, out=None, overwrite_input=False, keepdims=False):
----------
a : array_like
Input array or object that can be converted to an array.
- axis : int, optional
- Axis along which the medians are computed. The default (axis=None)
+ axis : {int, sequence of int, None}, optional
+ Axis or axes along which the medians are computed. The default
is to compute the median along a flattened version of the array.
A sequence of axes is supported since version 1.9.0.
out : ndarray, optional
- Alternative output array in which to place the result. It must have
- the same shape and buffer length as the expected output, but the
- type (of the output) will be cast if necessary.
+ Alternative output array in which to place the result. It must
+ have the same shape and buffer length as the expected output,
+ but the type (of the output) will be cast if necessary.
overwrite_input : bool, optional
- If True, then allow use of memory of input array (a) for
+ If True, then allow use of memory of input array `a` for
calculations. The input array will be modified by the call to
- median. This will save memory when you do not need to preserve
+ `median`. This will save memory when you do not need to preserve
the contents of the input array. Treat the input as undefined,
but it will probably be fully or partially sorted. Default is
- False. Note that, if `overwrite_input` is True and the input
- is not already an ndarray, an error will be raised.
+ False. If `overwrite_input` is ``True`` and `a` is not already an
+ `ndarray`, an error will be raised.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
- the result will broadcast correctly against the original `arr`.
-
+ the result will broadcast correctly against the original `a`.
+ If this is anything but the default value it will be passed
+ through (in the special case of an empty array) to the
+ `mean` function of the underlying array. If the array is
+ a sub-class and `mean` does not have the kwarg `keepdims` this
+ will raise a RuntimeError.
Returns
-------
median : ndarray
- A new array holding the result. If the input contains integers, or
- floats of smaller precision than 64, then the output data-type is
- float64. Otherwise, the output data-type is the same as that of the
- input.
+ A new array holding the result. If the input contains integers
+ or floats smaller than ``float64``, then the output data-type is
+ ``np.float64``. Otherwise, the data-type of the output is the
+ same as that of the input. If `out` is specified, that array is
+ returned instead.
See Also
--------
@@ -791,10 +827,10 @@ def nanmedian(a, axis=None, out=None, overwrite_input=False, keepdims=False):
Notes
-----
- Given a vector V of length N, the median of V is the middle value of
- a sorted copy of V, ``V_sorted`` - i.e., ``V_sorted[(N-1)/2]``, when N is
- odd. When N is even, it is the average of the two middle values of
- ``V_sorted``.
+ Given a vector ``V`` of length ``N``, the median of ``V`` is the
+ middle value of a sorted copy of ``V``, ``V_sorted`` - i.e.,
+ ``V_sorted[(N-1)/2]``, when ``N`` is odd and the average of the two
+ middle values of ``V_sorted`` when ``N`` is even.
Examples
--------
@@ -829,19 +865,19 @@ def nanmedian(a, axis=None, out=None, overwrite_input=False, keepdims=False):
r, k = _ureduce(a, func=_nanmedian, axis=axis, out=out,
overwrite_input=overwrite_input)
- if keepdims:
+ if keepdims and keepdims is not np._NoValue:
return r.reshape(k)
else:
return r
def nanpercentile(a, q, axis=None, out=None, overwrite_input=False,
- interpolation='linear', keepdims=False):
+ interpolation='linear', keepdims=np._NoValue):
"""
- Compute the qth percentile of the data along the specified axis, while
- ignoring nan values.
+ Compute the qth percentile of the data along the specified axis,
+ while ignoring nan values.
- Returns the qth percentile of the array elements.
+ Returns the qth percentile(s) of the array elements.
.. versionadded:: 1.9.0
@@ -850,11 +886,13 @@ def nanpercentile(a, q, axis=None, out=None, overwrite_input=False,
a : array_like
Input array or object that can be converted to an array.
q : float in range of [0,100] (or sequence of floats)
- Percentile to compute which must be between 0 and 100 inclusive.
- axis : int or sequence of int, optional
- Axis along which the percentiles are computed. The default (None)
- is to compute the percentiles along a flattened version of the array.
- A sequence of axes is supported since version 1.9.0.
+ Percentile to compute, which must be between 0 and 100
+ inclusive.
+ axis : {int, sequence of int, None}, optional
+ Axis or axes along which the percentiles are computed. The
+ default is to compute the percentile(s) along a flattened
+ version of the array. A sequence of axes is supported since
+ version 1.9.0.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output,
@@ -862,39 +900,52 @@ def nanpercentile(a, q, axis=None, out=None, overwrite_input=False,
overwrite_input : bool, optional
If True, then allow use of memory of input array `a` for
calculations. The input array will be modified by the call to
- percentile. This will save memory when you do not need to preserve
- the contents of the input array. In this case you should not make
- any assumptions about the content of the passed in array `a` after
- this function completes -- treat it as undefined. Default is False.
- Note that, if the `a` input is not already an array this parameter
- will have no effect, `a` will be converted to an array internally
- regardless of the value of this parameter.
+ `percentile`. This will save memory when you do not need to
+ preserve the contents of the input array. In this case you
+ should not make any assumptions about the contents of the input
+ `a` after this function completes -- treat it as undefined.
+ Default is False. If `a` is not already an array, this parameter
+ will have no effect as `a` will be converted to an array
+ internally regardless of the value of this parameter.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
- This optional parameter specifies the interpolation method to use,
- when the desired quantile lies between two data points `i` and `j`:
- * linear: `i + (j - i) * fraction`, where `fraction` is the
- fractional part of the index surrounded by `i` and `j`.
- * lower: `i`.
- * higher: `j`.
- * nearest: `i` or `j` whichever is nearest.
- * midpoint: (`i` + `j`) / 2.
-
+ This optional parameter specifies the interpolation method to
+ use when the desired quantile lies between two data points
+ ``i < j``:
+ * linear: ``i + (j - i) * fraction``, where ``fraction`` is
+ the fractional part of the index surrounded by ``i`` and
+ ``j``.
+ * lower: ``i``.
+ * higher: ``j``.
+ * nearest: ``i`` or ``j``, whichever is nearest.
+ * midpoint: ``(i + j) / 2``.
keepdims : bool, optional
+<<<<<<< 35b5f5be1ffffada84c8be207e7b8b196a58f786
+ If this is set to True, the axes which are reduced are left in
+ the result as dimensions with size one. With this option, the
+ result will broadcast correctly against the original array `a`.
+=======
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
- the result will broadcast correctly against the original `arr`.
+ the result will broadcast correctly against the original `a`.
+ If this is anything but the default value it will be passed
+ through (in the special case of an empty array) to the
+ `mean` function of the underlying array. If the array is
+ a sub-class and `mean` does not have the kwarg `keepdims` this
+ will raise a RuntimeError.
+>>>>>>> BUG: many functions silently drop `keepdims` kwarg
Returns
-------
- nanpercentile : scalar or ndarray
- If a single percentile `q` is given and axis=None a scalar is
- returned. If multiple percentiles `q` are given an array holding
- the result is returned. The results are listed in the first axis.
- (If `out` is specified, in which case that array is returned
- instead). If the input contains integers, or floats of smaller
- precision than 64, then the output data-type is float64. Otherwise,
- the output data-type is the same as that of the input.
+ percentile : scalar or ndarray
+ If `q` is a single percentile and `axis=None`, then the result
+ is a scalar. If multiple percentiles are given, first axis of
+ the result corresponds to the percentiles. The other axes are
+ the axes that remain after the reduction of `a`. If the input
+ contains integers or floats smaller than ``float64``, the output
+ data-type is ``float64``. Otherwise, the output data-type is the
+ same as that of the input. If `out` is specified, that array is
+ returned instead.
See Also
--------
@@ -902,12 +953,14 @@ def nanpercentile(a, q, axis=None, out=None, overwrite_input=False,
Notes
-----
- Given a vector V of length N, the q-th percentile of V is the q-th ranked
- value in a sorted copy of V. The values and distances of the two
- nearest neighbors as well as the `interpolation` parameter will
- determine the percentile if the normalized ranking does not match q
- exactly. This function is the same as the median if ``q=50``, the same
- as the minimum if ``q=0``and the same as the maximum if ``q=100``.
+ Given a vector ``V`` of length ``N``, the ``q``-th percentile of
+ ``V`` is the value ``q/100`` of the way from the mimumum to the
+ maximum in in a sorted copy of ``V``. The values and distances of
+ the two nearest neighbors as well as the `interpolation` parameter
+ will determine the percentile if the normalized ranking does not
+ match the location of ``q`` exactly. This function is the same as
+ the median if ``q=50``, the same as the minimum if ``q=0`` and the
+ same as the maximum if ``q=100``.
Examples
--------
@@ -921,24 +974,21 @@ def nanpercentile(a, q, axis=None, out=None, overwrite_input=False,
>>> np.nanpercentile(a, 50)
3.5
>>> np.nanpercentile(a, 50, axis=0)
- array([[ 6.5, 4.5, 2.5]])
- >>> np.nanpercentile(a, 50, axis=1)
+ array([ 6.5, 2., 2.5])
+ >>> np.nanpercentile(a, 50, axis=1, keepdims=True)
array([[ 7.],
[ 2.]])
>>> m = np.nanpercentile(a, 50, axis=0)
>>> out = np.zeros_like(m)
- >>> np.nanpercentile(a, 50, axis=0, out=m)
- array([[ 6.5, 4.5, 2.5]])
+ >>> np.nanpercentile(a, 50, axis=0, out=out)
+ array([ 6.5, 2., 2.5])
>>> m
- array([[ 6.5, 4.5, 2.5]])
+ array([ 6.5, 2. , 2.5])
+
>>> b = a.copy()
>>> np.nanpercentile(b, 50, axis=1, overwrite_input=True)
- array([[ 7.],
- [ 2.]])
+ array([ 7., 2.])
>>> assert not np.all(a==b)
- >>> b = a.copy()
- >>> np.nanpercentile(b, 50, axis=None, overwrite_input=True)
- array([ 3.5])
"""
@@ -952,7 +1002,7 @@ def nanpercentile(a, q, axis=None, out=None, overwrite_input=False,
r, k = _ureduce(a, func=_nanpercentile, q=q, axis=axis, out=out,
overwrite_input=overwrite_input,
interpolation=interpolation)
- if keepdims:
+ if keepdims and keepdims is not np._NoValue:
if q.ndim == 0:
return r.reshape(k)
else:
@@ -962,7 +1012,7 @@ def nanpercentile(a, q, axis=None, out=None, overwrite_input=False,
def _nanpercentile(a, q, axis=None, out=None, overwrite_input=False,
- interpolation='linear', keepdims=False):
+ interpolation='linear'):
"""
Private function that doesn't support extended axis or keepdims.
These methods are extended to this function using _ureduce
@@ -979,7 +1029,8 @@ def _nanpercentile(a, q, axis=None, out=None, overwrite_input=False,
# Move that axis to the beginning to match percentile's
# convention.
if q.ndim != 0:
- result = np.swapaxes(result, 0, axis)
+ result = np.rollaxis(result, axis)
+
if out is not None:
out[...] = result
return result
@@ -987,9 +1038,10 @@ def _nanpercentile(a, q, axis=None, out=None, overwrite_input=False,
def _nanpercentile1d(arr1d, q, overwrite_input=False, interpolation='linear'):
"""
- Private function for rank 1 arrays. Compute percentile ignoring NaNs.
- See nanpercentile for parameter usage
+ Private function for rank 1 arrays. Compute percentile ignoring
+ NaNs.
+ See nanpercentile for parameter usage
"""
c = np.isnan(arr1d)
s = np.where(c)[0]
@@ -1016,7 +1068,7 @@ def _nanpercentile1d(arr1d, q, overwrite_input=False, interpolation='linear'):
interpolation=interpolation)
-def nanvar(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False):
+def nanvar(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue):
"""
Compute the variance along the specified axis, while ignoring NaNs.
@@ -1052,7 +1104,8 @@ def nanvar(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False):
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
- the result will broadcast correctly against the original `arr`.
+ the result will broadcast correctly against the original `a`.
+
Returns
-------
@@ -1091,6 +1144,9 @@ def nanvar(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False):
below). Specifying a higher-accuracy accumulator using the ``dtype``
keyword can alleviate this issue.
+ For this function to work on sub-classes of ndarray, they must define
+ `sum` with the kwarg `keepdims`
+
Examples
--------
>>> a = np.array([[1, np.nan], [3, 4]])
@@ -1118,8 +1174,17 @@ def nanvar(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False):
warnings.simplefilter('ignore')
# Compute mean
- cnt = np.sum(~mask, axis=axis, dtype=np.intp, keepdims=True)
- avg = np.sum(arr, axis=axis, dtype=dtype, keepdims=True)
+ if type(arr) is np.matrix:
+ _keepdims = np._NoValue
+ else:
+ _keepdims = True
+ # we need to special case matrix for reverse compatibility
+ # in order for this to work, these sums need to be called with
+ # keepdims=True, however matrix now raises an error in this case, but
+ # the reason that it drops the keepdims kwarg is to force keepdims=True
+ # so this used to work by serendipity.
+ cnt = np.sum(~mask, axis=axis, dtype=np.intp, keepdims=_keepdims)
+ avg = np.sum(arr, axis=axis, dtype=dtype, keepdims=_keepdims)
avg = _divide_by_count(avg, cnt)
# Compute squared deviation from mean.
@@ -1147,7 +1212,7 @@ def nanvar(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False):
return var
-def nanstd(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False):
+def nanstd(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue):
"""
Compute the standard deviation along the specified axis, while
ignoring NaNs.
@@ -1181,10 +1246,16 @@ def nanstd(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False):
Means Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of non-NaN
elements. By default `ddof` is zero.
+
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
- the result will broadcast correctly against the original `arr`.
+ the result will broadcast correctly against the original `a`.
+
+ If this value is anything but the default it is passed through
+ as-is to the relevant functions of the sub-classes. If these
+ functions do not have a `keepdims` kwarg, a RuntimeError will
+ be raised.
Returns
-------
diff --git a/numpy/lib/npyio.py b/numpy/lib/npyio.py
index ebf43f3e4..a6e4a8dac 100644
--- a/numpy/lib/npyio.py
+++ b/numpy/lib/npyio.py
@@ -6,7 +6,7 @@ import re
import itertools
import warnings
import weakref
-from operator import itemgetter
+from operator import itemgetter, index as opindex
import numpy as np
from . import format
@@ -720,10 +720,18 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None,
``converters = {3: lambda s: float(s.strip() or 0)}``. Default: None.
skiprows : int, optional
Skip the first `skiprows` lines; default: 0.
- usecols : sequence, optional
- Which columns to read, with 0 being the first. For example,
- ``usecols = (1,4,5)`` will extract the 2nd, 5th and 6th columns.
+
+ usecols : int or sequence, optional
+ Which columns to read, with 0 being the first. For example,
+ usecols = (1,4,5) will extract the 2nd, 5th and 6th columns.
The default, None, results in all columns being read.
+
+ .. versionadded:: 1.11.0
+
+ Also when a single column has to be read it is possible to use
+ an integer instead of a tuple. E.g ``usecols = 3`` reads the
+ third column the same way as `usecols = (3,)`` would.
+
unpack : bool, optional
If True, the returned array is transposed, so that arguments may be
unpacked using ``x, y, z = loadtxt(...)``. When used with a structured
@@ -792,8 +800,25 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None,
user_converters = converters
if delimiter is not None:
delimiter = asbytes(delimiter)
+
if usecols is not None:
- usecols = list(usecols)
+ # Allow usecols to be a single int or a sequence of ints
+ try:
+ usecols_as_list = list(usecols)
+ except TypeError:
+ usecols_as_list = [usecols]
+ for col_idx in usecols_as_list:
+ try:
+ opindex(col_idx)
+ except TypeError as e:
+ e.args = (
+ "usecols must be an int or a sequence of ints but "
+ "it contains at least one element of type %s" %
+ type(col_idx),
+ )
+ raise
+ # Fall back to existing code
+ usecols = usecols_as_list
fown = False
try:
diff --git a/numpy/lib/tests/test_arraypad.py b/numpy/lib/tests/test_arraypad.py
index 30ea35d55..f19a0b13a 100644
--- a/numpy/lib/tests/test_arraypad.py
+++ b/numpy/lib/tests/test_arraypad.py
@@ -953,6 +953,17 @@ class TestNdarrayPadWidth(TestCase):
assert_array_equal(a, b)
+class TestUnicodeInput(TestCase):
+ def test_unicode_mode(self):
+ try:
+ constant_mode = unicode('constant')
+ except NameError:
+ constant_mode = 'constant'
+ a = np.pad([1], 2, mode=constant_mode)
+ b = np.array([0, 0, 1, 0, 0])
+ assert_array_equal(a, b)
+
+
class ValueError1(TestCase):
def test_check_simple(self):
arr = np.arange(30)
diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py
index d6a838f3a..ba2448815 100644
--- a/numpy/lib/tests/test_function_base.py
+++ b/numpy/lib/tests/test_function_base.py
@@ -1862,6 +1862,10 @@ class TestPiecewise(TestCase):
x = piecewise([1, 2], [[True, False], [False, True]], [3, 4])
assert_array_equal(x, [3, 4])
+ def test_scalar_domains_three_conditions(self):
+ x = piecewise(3, [True, False, False], [4, 2, 0])
+ assert_equal(x, 4)
+
def test_default(self):
# No value specified for x[1], should be 0
x = piecewise([1, 2], [True, False], [2])
@@ -1886,6 +1890,13 @@ class TestPiecewise(TestCase):
x = 3
piecewise(x, [x <= 3, x > 3], [4, 0]) # Should succeed.
+ def test_multidimensional_extrafunc(self):
+ x = np.array([[-2.5, -1.5, -0.5],
+ [0.5, 1.5, 2.5]])
+ y = piecewise(x, [x < 0, x >= 2], [-1, 1, 3])
+ assert_array_equal(y, np.array([[-1., -1., -1.],
+ [3., 3., 1.]]))
+
class TestBincount(TestCase):
@@ -2055,7 +2066,7 @@ def compare_results(res, desired):
assert_array_equal(res[i], desired[i])
-class TestScoreatpercentile(TestCase):
+class TestPercentile(TestCase):
def test_basic(self):
x = np.arange(8) * 0.5
@@ -2089,7 +2100,7 @@ class TestScoreatpercentile(TestCase):
# Test defaults
assert_equal(np.percentile(range(10), 50), 4.5)
- # explicitly specify interpolation_method 'fraction' (the default)
+ # explicitly specify interpolation_method 'linear' (the default)
assert_equal(np.percentile(range(10), 50,
interpolation='linear'), 4.5)
@@ -2104,6 +2115,10 @@ class TestScoreatpercentile(TestCase):
def test_midpoint(self):
assert_equal(np.percentile(range(10), 51,
interpolation='midpoint'), 4.5)
+ assert_equal(np.percentile(range(11), 51,
+ interpolation='midpoint'), 5.5)
+ assert_equal(np.percentile(range(11), 50,
+ interpolation='midpoint'), 5)
def test_nearest(self):
assert_equal(np.percentile(range(10), 51,
@@ -2406,6 +2421,7 @@ class TestScoreatpercentile(TestCase):
np.array([np.nan] * 2))
assert_(w[0].category is RuntimeWarning)
assert_(w[1].category is RuntimeWarning)
+ assert_(w[2].category is RuntimeWarning)
a = np.arange(24, dtype=float).reshape(2, 3, 4)
a[1, 2, 3] = np.nan
diff --git a/numpy/lib/tests/test_io.py b/numpy/lib/tests/test_io.py
index 32e0c32de..c0f8c1953 100644
--- a/numpy/lib/tests/test_io.py
+++ b/numpy/lib/tests/test_io.py
@@ -194,7 +194,7 @@ class TestSavezLoad(RoundtripTest, TestCase):
def test_big_arrays(self):
L = (1 << 31) + 100000
a = np.empty(L, dtype=np.uint8)
- with temppath(prefix="numpy_test_big_arrays_") as tmp:
+ with temppath(prefix="numpy_test_big_arrays_", suffix=".npz") as tmp:
np.savez(tmp, a=a)
del a
npfile = np.load(tmp)
@@ -608,6 +608,29 @@ class TestLoadTxt(TestCase):
x = np.loadtxt(c, dtype=float, usecols=np.array([1, 2]))
assert_array_equal(x, a[:, 1:])
+ # Testing with an integer instead of a sequence
+ for int_type in [int, np.int8, np.int16,
+ np.int32, np.int64, np.uint8, np.uint16,
+ np.uint32, np.uint64]:
+ to_read = int_type(1)
+ c.seek(0)
+ x = np.loadtxt(c, dtype=float, usecols=to_read)
+ assert_array_equal(x, a[:, 1])
+
+ # Testing with some crazy custom integer type
+ class CrazyInt(object):
+ def __index__(self):
+ return 1
+
+ crazy_int = CrazyInt()
+ c.seek(0)
+ x = np.loadtxt(c, dtype=float, usecols=crazy_int)
+ assert_array_equal(x, a[:, 1])
+
+ c.seek(0)
+ x = np.loadtxt(c, dtype=float, usecols=(crazy_int,))
+ assert_array_equal(x, a[:, 1])
+
# Checking with dtypes defined converters.
data = '''JOE 70.1 25.3
BOB 60.5 27.9
@@ -619,6 +642,21 @@ class TestLoadTxt(TestCase):
assert_equal(arr['stid'], [b"JOE", b"BOB"])
assert_equal(arr['temp'], [25.3, 27.9])
+ # Testing non-ints in usecols
+ c.seek(0)
+ bogus_idx = 1.5
+ assert_raises_regex(
+ TypeError,
+ '^usecols must be.*%s' % type(bogus_idx),
+ np.loadtxt, c, usecols=bogus_idx
+ )
+
+ assert_raises_regex(
+ TypeError,
+ '^usecols must be.*%s' % type(bogus_idx),
+ np.loadtxt, c, usecols=[0, bogus_idx, 0]
+ )
+
def test_fancy_dtype(self):
c = TextIO()
c.write('1,2,3.0\n4,5,6.0\n')
diff --git a/numpy/lib/tests/test_nanfunctions.py b/numpy/lib/tests/test_nanfunctions.py
index ac88c4ea5..989c563d9 100644
--- a/numpy/lib/tests/test_nanfunctions.py
+++ b/numpy/lib/tests/test_nanfunctions.py
@@ -711,7 +711,7 @@ class TestNanFunctions_Percentile(TestCase):
# For checking consistency in higher dimensional case
large_mat = np.ones((3, 4, 5))
large_mat[:, 0:2:4, :] = 0
- large_mat[:, :, 3:] = 2*large_mat[:, :, 3:]
+ large_mat[:, :, 3:] *= 2
for axis in [None, 0, 1]:
for keepdim in [False, True]:
with warnings.catch_warnings(record=True) as w:
@@ -727,6 +727,9 @@ class TestNanFunctions_Percentile(TestCase):
keepdims=keepdim)
assert_equal(nan_val, val)
+ megamat = np.ones((3, 4, 5, 6))
+ assert_equal(np.nanpercentile(megamat, perc, axis=(1, 2)).shape, (2, 3, 6))
+
if __name__ == "__main__":
run_module_suite()
diff --git a/numpy/linalg/lapack_lite/dlapack_lite.c b/numpy/linalg/lapack_lite/dlapack_lite.c
index 6b65397bd..9f864d7ce 100644
--- a/numpy/linalg/lapack_lite/dlapack_lite.c
+++ b/numpy/linalg/lapack_lite/dlapack_lite.c
@@ -22678,7 +22678,7 @@ L160:
===============
The algorithm used in this program is basically backward (forward)
- substitution, with scaling to make the the code robust against
+ substitution, with scaling to make the code robust against
possible overflow.
Each eigenvector is normalized so that the element of largest
@@ -61663,7 +61663,7 @@ L180:
===============
The algorithm used in this program is basically backward (forward)
- substitution, with scaling to make the the code robust against
+ substitution, with scaling to make the code robust against
possible overflow.
Each eigenvector is normalized so that the element of largest
@@ -99417,7 +99417,7 @@ L180:
===============
The algorithm used in this program is basically backward (forward)
- substitution, with scaling to make the the code robust against
+ substitution, with scaling to make the code robust against
possible overflow.
Each eigenvector is normalized so that the element of largest
diff --git a/numpy/linalg/lapack_lite/zlapack_lite.c b/numpy/linalg/lapack_lite/zlapack_lite.c
index e6b03429b..9ce05ec1f 100644
--- a/numpy/linalg/lapack_lite/zlapack_lite.c
+++ b/numpy/linalg/lapack_lite/zlapack_lite.c
@@ -22760,7 +22760,7 @@ L160:
===============
The algorithm used in this program is basically backward (forward)
- substitution, with scaling to make the the code robust against
+ substitution, with scaling to make the code robust against
possible overflow.
Each eigenvector is normalized so that the element of largest
diff --git a/numpy/ma/core.py b/numpy/ma/core.py
index 690655b36..24d41bcaa 100644
--- a/numpy/ma/core.py
+++ b/numpy/ma/core.py
@@ -3146,11 +3146,11 @@ class MaskedArray(ndarray):
if self._fill_value is not None:
dout._fill_value = self._fill_value[indx]
- # If we're indexing a multidimensional field in a
+ # If we're indexing a multidimensional field in a
# structured array (such as dtype("(2,)i2,(2,)i1")),
# dimensionality goes up (M[field].ndim == M.ndim +
- # len(M.dtype[field].shape)). That's fine for
- # M[field] but problematic for M[field].fill_value
+ # len(M.dtype[field].shape)). That's fine for
+ # M[field] but problematic for M[field].fill_value
# which should have shape () to avoid breaking several
# methods. There is no great way out, so set to
# first element. See issue #6723.
@@ -5583,7 +5583,7 @@ class MaskedArray(ndarray):
def ptp(self, axis=None, out=None, fill_value=None):
"""
- Return (maximum - minimum) along the the given dimension
+ Return (maximum - minimum) along the given dimension
(i.e. peak-to-peak value).
Parameters
diff --git a/numpy/random/mtrand/mtrand.pyx b/numpy/random/mtrand/mtrand.pyx
index cf8d28cb0..e5998c001 100644
--- a/numpy/random/mtrand/mtrand.pyx
+++ b/numpy/random/mtrand/mtrand.pyx
@@ -1496,6 +1496,12 @@ cdef class RandomState:
anywhere within the interval ``[a, b)``, and zero elsewhere.
+ When ``high`` == ``low``, values of ``low`` will be returned.
+ If ``high`` < ``low``, the results are officially undefined
+ and may eventually raise an error, i.e. do not rely on this
+ function to behave when passed arguments satisfying that
+ inequality condition.
+
Examples
--------
Draw samples from the distribution:
@@ -1985,9 +1991,9 @@ cdef class RandomState:
----------
.. [1] Peyton Z. Peebles Jr., "Probability, Random Variables and
Random Signal Principles", 4th ed, 2001, p. 57.
- .. [2] "Poisson Process", Wikipedia,
+ .. [2] Wikipedia, "Poisson process",
http://en.wikipedia.org/wiki/Poisson_process
- .. [3] "Exponential Distribution, Wikipedia,
+ .. [3] Wikipedia, "Exponential distribution",
http://en.wikipedia.org/wiki/Exponential_distribution
"""
@@ -2087,8 +2093,8 @@ cdef class RandomState:
.. [1] Weisstein, Eric W. "Gamma Distribution." From MathWorld--A
Wolfram Web Resource.
http://mathworld.wolfram.com/GammaDistribution.html
- .. [2] Wikipedia, "Gamma-distribution",
- http://en.wikipedia.org/wiki/Gamma-distribution
+ .. [2] Wikipedia, "Gamma distribution",
+ http://en.wikipedia.org/wiki/Gamma_distribution
Examples
--------
@@ -2178,8 +2184,8 @@ cdef class RandomState:
.. [1] Weisstein, Eric W. "Gamma Distribution." From MathWorld--A
Wolfram Web Resource.
http://mathworld.wolfram.com/GammaDistribution.html
- .. [2] Wikipedia, "Gamma-distribution",
- http://en.wikipedia.org/wiki/Gamma-distribution
+ .. [2] Wikipedia, "Gamma distribution",
+ http://en.wikipedia.org/wiki/Gamma_distribution
Examples
--------
@@ -2375,7 +2381,7 @@ cdef class RandomState:
.. [1] Weisstein, Eric W. "Noncentral F-Distribution."
From MathWorld--A Wolfram Web Resource.
http://mathworld.wolfram.com/NoncentralF-Distribution.html
- .. [2] Wikipedia, "Noncentral F distribution",
+ .. [2] Wikipedia, "Noncentral F-distribution",
http://en.wikipedia.org/wiki/Noncentral_F-distribution
Examples
@@ -3267,7 +3273,7 @@ cdef class RandomState:
.. [3] Weisstein, Eric W. "Laplace Distribution."
From MathWorld--A Wolfram Web Resource.
http://mathworld.wolfram.com/LaplaceDistribution.html
- .. [4] Wikipedia, "Laplace Distribution",
+ .. [4] Wikipedia, "Laplace distribution",
http://en.wikipedia.org/wiki/Laplace_distribution
Examples
@@ -3988,7 +3994,7 @@ cdef class RandomState:
.. [4] Weisstein, Eric W. "Binomial Distribution." From MathWorld--A
Wolfram Web Resource.
http://mathworld.wolfram.com/BinomialDistribution.html
- .. [5] Wikipedia, "Binomial-distribution",
+ .. [5] Wikipedia, "Binomial distribution",
http://en.wikipedia.org/wiki/Binomial_distribution
Examples
@@ -4453,7 +4459,7 @@ cdef class RandomState:
.. [2] Weisstein, Eric W. "Hypergeometric Distribution." From
MathWorld--A Wolfram Web Resource.
http://mathworld.wolfram.com/HypergeometricDistribution.html
- .. [3] Wikipedia, "Hypergeometric-distribution",
+ .. [3] Wikipedia, "Hypergeometric distribution",
http://en.wikipedia.org/wiki/Hypergeometric_distribution
Examples
@@ -4563,8 +4569,8 @@ cdef class RandomState:
Journal of Animal Ecology, 12:42-58.
.. [3] D. J. Hand, F. Daly, D. Lunn, E. Ostrowski, A Handbook of Small
Data Sets, CRC Press, 1994.
- .. [4] Wikipedia, "Logarithmic-distribution",
- http://en.wikipedia.org/wiki/Logarithmic-distribution
+ .. [4] Wikipedia, "Logarithmic distribution",
+ http://en.wikipedia.org/wiki/Logarithmic_distribution
Examples
--------
diff --git a/numpy/tests/test_scripts.py b/numpy/tests/test_scripts.py
index 0fc7f879f..1c108ddb1 100644
--- a/numpy/tests/test_scripts.py
+++ b/numpy/tests/test_scripts.py
@@ -74,14 +74,9 @@ def test_f2py():
success = stdout.strip() == asbytes('2')
assert_(success, "Warning: f2py not found in path")
else:
- # unclear what f2py cmd was installed as, check plain (f2py),
- # with major version (f2py3), or major/minor version (f2py3.4)
- code, stdout, stderr = run_command([sys.executable, '-V'])
-
- # for some reason, 'python -V' returns version in 'stderr' for
- # Python 2.x but in 'stdout' for Python 3.x
- version = (stdout or stderr)[7:].strip()
- major, minor, revision = version.decode('utf-8').split('.')
+ version = sys.version_info
+ major = str(version.major)
+ minor = str(version.minor)
f2py_cmds = ('f2py', 'f2py' + major, 'f2py' + major + '.' + minor)
success = False