summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.travis.yml5
-rw-r--r--INSTALL.txt32
-rw-r--r--LICENSE.txt2
-rw-r--r--doc/TESTS.rst.txt10
-rw-r--r--doc/release/1.10.0-notes.rst65
-rw-r--r--doc/source/reference/routines.array-manipulation.rst1
-rw-r--r--doc/source/reference/routines.io.rst1
-rw-r--r--doc/source/reference/routines.linalg.rst2
-rw-r--r--doc/source/reference/ufuncs.rst24
-rw-r--r--numpy/__init__.py11
-rw-r--r--numpy/add_newdocs.py21
-rw-r--r--numpy/core/arrayprint.py4
-rw-r--r--numpy/core/fromnumeric.py30
-rw-r--r--numpy/core/getlimits.py2
-rw-r--r--numpy/core/include/numpy/ndarraytypes.h4
-rw-r--r--numpy/core/include/numpy/npy_3kcompat.h13
-rw-r--r--numpy/core/include/numpy/npy_common.h13
-rw-r--r--numpy/core/setup.py1
-rw-r--r--numpy/core/src/multiarray/arraytypes.c.src16
-rw-r--r--numpy/core/src/multiarray/calculation.c4
-rw-r--r--numpy/core/src/multiarray/common.c11
-rw-r--r--numpy/core/src/multiarray/compiled_base.c57
-rw-r--r--numpy/core/src/multiarray/convert.c33
-rw-r--r--numpy/core/src/multiarray/ctors.c72
-rw-r--r--numpy/core/src/multiarray/descriptor.c5
-rw-r--r--numpy/core/src/multiarray/dtype_transfer.c7
-rw-r--r--numpy/core/src/multiarray/hashdescr.c93
-rw-r--r--numpy/core/src/multiarray/item_selection.c669
-rw-r--r--numpy/core/src/multiarray/mapping.c39
-rw-r--r--numpy/core/src/multiarray/multiarray_tests.c.src8
-rw-r--r--numpy/core/src/multiarray/multiarraymodule.c9
-rw-r--r--numpy/core/src/npysort/heapsort.c.src96
-rw-r--r--numpy/core/src/npysort/mergesort.c.src144
-rw-r--r--numpy/core/src/npysort/npysort_common.h7
-rw-r--r--numpy/core/src/npysort/quicksort.c.src167
-rw-r--r--numpy/core/src/npysort/selection.c.src48
-rw-r--r--numpy/core/src/private/npy_config.h5
-rw-r--r--numpy/core/src/private/npy_partition.h.src16
-rw-r--r--numpy/core/src/private/npy_sort.h10
-rw-r--r--numpy/core/src/private/ufunc_override.h9
-rw-r--r--numpy/core/src/umath/scalarmath.c.src18
-rw-r--r--numpy/core/src/umath/simd.inc.src36
-rw-r--r--numpy/core/src/umath/ufunc_object.c272
-rw-r--r--numpy/core/src/umath/ufunc_type_resolution.c2
-rw-r--r--numpy/core/src/umath/umathmodule.c7
-rw-r--r--numpy/core/tests/test_dtype.py22
-rw-r--r--numpy/core/tests/test_indexing.py19
-rw-r--r--numpy/core/tests/test_multiarray.py160
-rw-r--r--numpy/core/tests/test_regression.py15
-rw-r--r--numpy/core/tests/test_scalarmath.py24
-rw-r--r--numpy/core/tests/test_ufunc.py28
-rw-r--r--numpy/core/tests/test_umath.py148
-rw-r--r--numpy/distutils/__init__.py2
-rw-r--r--numpy/distutils/ccompiler.py14
-rw-r--r--numpy/distutils/fcompiler/gnu.py142
-rw-r--r--numpy/distutils/intelccompiler.py50
-rw-r--r--numpy/distutils/mingw32ccompiler.py163
-rw-r--r--numpy/distutils/system_info.py17
-rw-r--r--numpy/distutils/tests/test_fcompiler_gnu.py15
-rw-r--r--numpy/f2py/__main__.py23
-rw-r--r--numpy/f2py/capi_maps.py8
-rw-r--r--numpy/f2py/setup.py30
-rw-r--r--numpy/lib/_iotools.py39
-rw-r--r--numpy/lib/arraysetops.py7
-rw-r--r--numpy/lib/format.py68
-rw-r--r--numpy/lib/function_base.py54
-rw-r--r--numpy/lib/index_tricks.py2
-rw-r--r--numpy/lib/npyio.py133
-rw-r--r--numpy/lib/recfunctions.py4
-rw-r--r--numpy/lib/shape_base.py7
-rw-r--r--numpy/lib/stride_tricks.py165
-rw-r--r--numpy/lib/tests/data/py2-objarr.npybin0 -> 258 bytes
-rw-r--r--numpy/lib/tests/data/py2-objarr.npzbin0 -> 366 bytes
-rw-r--r--numpy/lib/tests/data/py3-objarr.npybin0 -> 341 bytes
-rw-r--r--numpy/lib/tests/data/py3-objarr.npzbin0 -> 449 bytes
-rw-r--r--numpy/lib/tests/test__iotools.py18
-rw-r--r--numpy/lib/tests/test_format.py127
-rw-r--r--numpy/lib/tests/test_function_base.py40
-rw-r--r--numpy/lib/tests/test_io.py77
-rw-r--r--numpy/lib/tests/test_shape_base.py6
-rw-r--r--numpy/lib/tests/test_stride_tricks.py123
-rw-r--r--numpy/lib/tests/test_type_check.py2
-rw-r--r--numpy/lib/type_check.py56
-rw-r--r--numpy/linalg/linalg.py65
-rw-r--r--numpy/linalg/tests/test_linalg.py43
-rw-r--r--numpy/ma/core.py39
-rw-r--r--numpy/ma/extras.py68
-rw-r--r--numpy/ma/mrecords.py2
-rw-r--r--numpy/ma/tests/test_core.py30
-rw-r--r--numpy/ma/tests/test_extras.py82
-rw-r--r--numpy/ma/tests/test_regression.py15
-rw-r--r--numpy/matrixlib/defmatrix.py2
-rw-r--r--numpy/polynomial/polyutils.py2
-rw-r--r--numpy/random/mtrand/distributions.c7
-rw-r--r--numpy/random/mtrand/mtrand.pyx521
-rw-r--r--numpy/random/tests/test_regression.py11
-rw-r--r--numpy/testing/nosetester.py4
-rw-r--r--numpy/testing/tests/test_utils.py70
-rw-r--r--numpy/testing/utils.py66
-rwxr-xr-xsetup.py1
-rw-r--r--tools/swig/numpy.i34
101 files changed, 3325 insertions, 1646 deletions
diff --git a/.travis.yml b/.travis.yml
index b39d335ad..0bf2ab447 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -16,8 +16,9 @@ matrix:
env: NPY_SEPARATE_COMPILATION=0 PYTHON_OO=1
- python: 3.4
env: NPY_RELAXED_STRIDES_CHECKING=0
- - python: 2.7
- env: USE_BENTO=1
+# disable bento test until waf issue is resolved.
+# - python: 2.7
+# env: USE_BENTO=1
- python: 2.7
env: USE_WHEEL=1
before_install:
diff --git a/INSTALL.txt b/INSTALL.txt
index 01726df04..7a0a57ee5 100644
--- a/INSTALL.txt
+++ b/INSTALL.txt
@@ -130,6 +130,38 @@ is broken). gcc 4.4 will hopefully be able to run natively.
This is the only tested way to get a numpy with a FULL blas/lapack (scipy
does not work because of C++).
+Carl Kleffner's mingw-w64 toolchain
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Carl Kleffner has been working on mingw-w64 / OpenBLAS support and has put
+together toolchains for that option. The toolchains are available at
+https://bitbucket.org/carlkl/mingw-w64-for-python/downloads. The site.cfg
+should be configured like so:
+
+[openblas]
+libraries = openblaspy
+library_dirs = <openblaspath>/lib
+include_dirs = <openblaspath>/include
+
+The libopenblaspy.dll from <openblaspath>/bin must be copied to numpy/core
+before the build. For this mingw-w64 toolchain manual creation of the python
+import libs is necessary, i.e.:
+
+gendef python2.7.dll
+dlltool -D python27.dll -d python27.def -l libpython27.dll.a
+move libpython27.dll.a libs\libpython27.dll.a
+
+For python-2.6 up to python 3.2 use
+https://bitbucket.org/carlkl/mingw-w64-for-python/downloads/mingwpy_win32_vc90.tar.xz
+or
+https://bitbucket.org/carlkl/mingw-w64-for-python/downloads/mingwpy_amd64_vc90.tar.xz
+
+For python-3.3 and python-3.4 use
+https://bitbucket.org/carlkl/mingw-w64-for-python/downloads/mingwpy_win32_vc100.tar.xz
+or
+https://bitbucket.org/carlkl/mingw-w64-for-python/downloads/mingwpy_amd64_vc100.tar.xz
+
+
MS compilers
------------
diff --git a/LICENSE.txt b/LICENSE.txt
index 7e972cff8..b4139af86 100644
--- a/LICENSE.txt
+++ b/LICENSE.txt
@@ -1,4 +1,4 @@
-Copyright (c) 2005-2011, NumPy Developers.
+Copyright (c) 2005-2015, NumPy Developers.
All rights reserved.
Redistribution and use in source and binary forms, with or without
diff --git a/doc/TESTS.rst.txt b/doc/TESTS.rst.txt
index bfea0e117..690baca3d 100644
--- a/doc/TESTS.rst.txt
+++ b/doc/TESTS.rst.txt
@@ -374,3 +374,13 @@ at the end of the test run. Skipped tests are marked as ``'S'`` in
the test results (or ``'SKIPPED'`` for ``verbose > 1``), and known
failing tests are marked as ``'K'`` (or ``'KNOWN'`` if ``verbose >
1``).
+
+Tests on random data
+--------------------
+
+Tests on random data are good, but since test failures are meant to expose
+new bugs or regressions, a test that passes most of the time but fails
+occasionally with no code changes is not helpful. Make the random data
+deterministic by setting the random number seed before generating it. Use
+either Python's ``random.seed(some_number)`` or Numpy's
+``numpy.random.seed(some_number)``, depending on the source of random numbers.
diff --git a/doc/release/1.10.0-notes.rst b/doc/release/1.10.0-notes.rst
index 9ed8653f5..6a6bbd4c6 100644
--- a/doc/release/1.10.0-notes.rst
+++ b/doc/release/1.10.0-notes.rst
@@ -64,6 +64,9 @@ C API
The changes to *swapaxes* also apply to the *PyArray_SwapAxes* C function,
which now returns a view in all cases.
+The dtype structure (PyArray_Descr) has a new member at the end to cache
+its hash value. This shouldn't affect any well-written applications.
+
recarray field return types
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Previously the returned types for recarray fields accessed by attribute and by
@@ -74,6 +77,15 @@ Notably, this affect recarrays containing strings with whitespace, as trailing
whitespace is trimmed from chararrays but kept in ndarrays of string type.
Also, the dtype.type of nested structured fields is now inherited.
+'out' keyword argument of ufuncs now accepts tuples of arrays
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+When using the 'out' keyword argument of a ufunc, a tuple of arrays, one per
+ufunc output, can be provided. For ufuncs with a single output a single array
+is also a valid 'out' keyword argument. Previously a single array could be
+provided in the 'out' keyword argument, and it would be used as the first
+output for ufuncs with multiple outputs, is deprecated, and will result in a
+`DeprecationWarning` now and an error in the future.
+
New Features
============
@@ -111,6 +123,24 @@ number of rows read in a single call. Using this functionality, it is
possible to read in multiple arrays stored in a single file by making
repeated calls to the function.
+New function *np.broadcast_to* for invoking array broadcasting
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+*np.broadcast_to* manually broadcasts an array to a given shape according to
+numpy's broadcasting rules. The functionality is similar to broadcast_arrays,
+which in fact has been rewritten to use broadcast_to internally, but only a
+single array is necessary.
+
+New context manager *clear_and_catch_warnings* for testing warnings
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+When Python emits a warning, it records that this warning has been emitted in
+the module that caused the warning, in a module attribute
+``__warningregistry__``. Once this has happened, it is not possible to emit
+the warning again, unless you clear the relevant entry in
+``__warningregistry__``. This makes is hard and fragile to test warnings,
+because if your test comes after another that has already caused the warning,
+you will not be able to emit the warning or test it. The context manager
+``clear_and_catch_warnings`` clears warnings from the module registry on entry
+and resets them on exit, meaning that warnings can be re-raised.
Improvements
============
@@ -143,6 +173,12 @@ interpolation behavior.
NumPy arrays are supported as input for ``pad_width``, and an exception is
raised if its values are not of integral type.
+*np.argmax* and *np.argmin* now support an ``out`` argument
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+The ``out`` parameter was added to *np.argmax* and *np.argmin* for consistency
+with *ndarray.argmax* and *ndarray.argmin*. The new parameter behaves exactly
+as it does in those methods.
+
More system C99 complex functions detected and used
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
All of the functions ``in complex.h`` are now detected. There are new
@@ -173,6 +209,20 @@ what was provided by *np.allclose*.
compare NaNs as equal by setting ``equal_nan=True``. Subclasses, such as
*np.ma.MaskedArray*, are also preserved now.
+*np.genfromtxt* now handles large integers correctly
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+*np.genfromtxt* now correctly handles integers larger than ``2**31-1`` on
+32-bit systems and larger than ``2**63-1`` on 64-bit systems (it previously
+crashed with an ``OverflowError`` in these cases). Integers larger than
+``2**63-1`` are converted to floating-point values.
+
+*np.load*, *np.save* have pickle backward compatibility flags
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The functions *np.load* and *np.save* have additional keyword
+arguments for controlling backward compatibility of pickled Python
+objects. This enables Numpy on Python 3 to load npy files containing
+object arrays that were generated on Python 2.
Changes
=======
@@ -213,3 +263,18 @@ deprecated.
pkgload, PackageLoader
~~~~~~~~~~~~~~~~~~~~~~
These ways of loading packages are now deprecated.
+
+bias, ddof arguments to corrcoef
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The values for the ``bias`` and ``ddof`` arguments to the ``corrcoef``
+function canceled in the division implied by the correlation coefficient and
+so had no effect on the returned values.
+
+We now deprecate these arguments to ``corrcoef`` and the masked array version
+``ma.corrcoef``.
+
+Because we are deprecating the ``bias`` argument to ``ma.corrcoef``, we also
+deprecate the use of the ``allow_masked`` argument as a positional argument,
+as its position will change with the removal of ``bias``. ``allow_masked``
+will in due course become a keyword-only argument.
diff --git a/doc/source/reference/routines.array-manipulation.rst b/doc/source/reference/routines.array-manipulation.rst
index 81af0a315..2b3ba342a 100644
--- a/doc/source/reference/routines.array-manipulation.rst
+++ b/doc/source/reference/routines.array-manipulation.rst
@@ -40,6 +40,7 @@ Changing number of dimensions
atleast_2d
atleast_3d
broadcast
+ broadcast_to
broadcast_arrays
expand_dims
squeeze
diff --git a/doc/source/reference/routines.io.rst b/doc/source/reference/routines.io.rst
index b99754912..ff8c05c16 100644
--- a/doc/source/reference/routines.io.rst
+++ b/doc/source/reference/routines.io.rst
@@ -42,6 +42,7 @@ String formatting
.. autosummary::
:toctree: generated/
+ array2string
array_repr
array_str
diff --git a/doc/source/reference/routines.linalg.rst b/doc/source/reference/routines.linalg.rst
index 52f42a66c..94533aaa9 100644
--- a/doc/source/reference/routines.linalg.rst
+++ b/doc/source/reference/routines.linalg.rst
@@ -72,6 +72,8 @@ Exceptions
Linear algebra on several matrices at once
------------------------------------------
+.. versionadded:: 1.8.0
+
Several of the linear algebra routines listed above are able to
compute results for several matrices at once, if they are stacked into
the same array.
diff --git a/doc/source/reference/ufuncs.rst b/doc/source/reference/ufuncs.rst
index 3d6112058..a97534612 100644
--- a/doc/source/reference/ufuncs.rst
+++ b/doc/source/reference/ufuncs.rst
@@ -299,7 +299,20 @@ advanced usage and will not typically be used.
.. versionadded:: 1.6
- The first output can provided as either a positional or a keyword parameter.
+ The first output can be provided as either a positional or a keyword
+ parameter. Keyword 'out' arguments are incompatible with positional
+ ones.
+
+ ..versionadded:: 1.10
+
+ The 'out' keyword argument is expected to be a tuple with one entry per
+ output (which can be `None` for arrays to be allocated by the ufunc).
+ For ufuncs with a single output, passing a single array (instead of a
+ tuple holding a single array) is also valid.
+
+ Passing a single array in the 'out' keyword argument to a ufunc with
+ multiple outputs is deprecated, and will raise a warning in numpy 1.10,
+ and an error in a future release.
*where*
@@ -337,7 +350,8 @@ advanced usage and will not typically be used.
.. versionadded:: 1.6
- Overrides the dtype of the calculation and output arrays. Similar to *sig*.
+ Overrides the dtype of the calculation and output arrays. Similar to
+ *signature*.
*subok*
@@ -346,7 +360,7 @@ advanced usage and will not typically be used.
Defaults to true. If set to false, the output will always be a strict
array, not a subtype.
-*sig*
+*signature*
Either a data-type, a tuple of data-types, or a special signature
string indicating the input and output types of a ufunc. This argument
@@ -357,7 +371,9 @@ advanced usage and will not typically be used.
available and searching for a loop with data-types to which all inputs
can be cast safely. This keyword argument lets you bypass that
search and choose a particular loop. A list of available signatures is
- provided by the **types** attribute of the ufunc object.
+ provided by the **types** attribute of the ufunc object. For backwards
+ compatibility this argument can also be provided as *sig*, although
+ the long form is preferred.
*extobj*
diff --git a/numpy/__init__.py b/numpy/__init__.py
index 772c75b63..d4ef54d83 100644
--- a/numpy/__init__.py
+++ b/numpy/__init__.py
@@ -132,6 +132,16 @@ class VisibleDeprecationWarning(UserWarning):
pass
+class _NoValue:
+ """Special keyword value.
+
+ This class may be used as the default value assigned to a
+ deprecated keyword in order to check if it has been given a user
+ defined value.
+ """
+ pass
+
+
# oldnumeric and numarray were removed in 1.9. In case some packages import
# but do not use them, we define them here for backward compatibility.
oldnumeric = 'removed'
@@ -214,3 +224,4 @@ else:
import warnings
warnings.filterwarnings("ignore", message="numpy.dtype size changed")
warnings.filterwarnings("ignore", message="numpy.ufunc size changed")
+ warnings.filterwarnings("ignore", message="numpy.ndarray size changed")
diff --git a/numpy/add_newdocs.py b/numpy/add_newdocs.py
index 7dd8c5649..4cc626ca9 100644
--- a/numpy/add_newdocs.py
+++ b/numpy/add_newdocs.py
@@ -2982,7 +2982,7 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('__setstate__',
add_newdoc('numpy.core.multiarray', 'ndarray', ('all',
"""
- a.all(axis=None, out=None)
+ a.all(axis=None, out=None, keepdims=False)
Returns True if all elements evaluate to True.
@@ -2997,7 +2997,7 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('all',
add_newdoc('numpy.core.multiarray', 'ndarray', ('any',
"""
- a.any(axis=None, out=None)
+ a.any(axis=None, out=None, keepdims=False)
Returns True if any of the elements of `a` evaluate to True.
@@ -3198,9 +3198,10 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('choose',
add_newdoc('numpy.core.multiarray', 'ndarray', ('clip',
"""
- a.clip(a_min, a_max, out=None)
+ a.clip(min=None, max=None, out=None)
- Return an array whose values are limited to ``[a_min, a_max]``.
+ Return an array whose values are limited to ``[min, max]``.
+ One of max or min must be given.
Refer to `numpy.clip` for full documentation.
@@ -3656,7 +3657,7 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('max',
add_newdoc('numpy.core.multiarray', 'ndarray', ('mean',
"""
- a.mean(axis=None, dtype=None, out=None)
+ a.mean(axis=None, dtype=None, out=None, keepdims=False)
Returns the average of the array elements along given axis.
@@ -3671,7 +3672,7 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('mean',
add_newdoc('numpy.core.multiarray', 'ndarray', ('min',
"""
- a.min(axis=None, out=None)
+ a.min(axis=None, out=None, keepdims=False)
Return the minimum along a given axis.
@@ -3769,7 +3770,7 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('nonzero',
add_newdoc('numpy.core.multiarray', 'ndarray', ('prod',
"""
- a.prod(axis=None, dtype=None, out=None)
+ a.prod(axis=None, dtype=None, out=None, keepdims=False)
Return the product of the array elements over the given axis
@@ -4300,7 +4301,7 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('squeeze',
add_newdoc('numpy.core.multiarray', 'ndarray', ('std',
"""
- a.std(axis=None, dtype=None, out=None, ddof=0)
+ a.std(axis=None, dtype=None, out=None, ddof=0, keepdims=False)
Returns the standard deviation of the array elements along given axis.
@@ -4315,7 +4316,7 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('std',
add_newdoc('numpy.core.multiarray', 'ndarray', ('sum',
"""
- a.sum(axis=None, dtype=None, out=None)
+ a.sum(axis=None, dtype=None, out=None, keepdims=False)
Return the sum of the array elements over the given axis.
@@ -4547,7 +4548,7 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('transpose',
add_newdoc('numpy.core.multiarray', 'ndarray', ('var',
"""
- a.var(axis=None, dtype=None, out=None, ddof=0)
+ a.var(axis=None, dtype=None, out=None, ddof=0, keepdims=False)
Returns the variance of the array elements, along given axis.
diff --git a/numpy/core/arrayprint.py b/numpy/core/arrayprint.py
index 125d57672..2dc56928c 100644
--- a/numpy/core/arrayprint.py
+++ b/numpy/core/arrayprint.py
@@ -417,6 +417,10 @@ def array2string(a, max_line_width=None, precision=None,
If a formatter is specified for a certain type, the `precision` keyword is
ignored for that type.
+ This is a very flexible function; `array_repr` and `array_str` are using
+ `array2string` internally so keywords with the same name should work
+ identically in all three functions.
+
Examples
--------
>>> x = np.array([1e-16,1,2,3])
diff --git a/numpy/core/fromnumeric.py b/numpy/core/fromnumeric.py
index aef09411a..549647df2 100644
--- a/numpy/core/fromnumeric.py
+++ b/numpy/core/fromnumeric.py
@@ -526,6 +526,8 @@ def transpose(a, axes=None):
Use `transpose(a, argsort(axes))` to invert the transposition of tensors
when using the `axes` keyword argument.
+ Transposing a 1-D array returns an unchanged view of the original array.
+
Examples
--------
>>> x = np.arange(4).reshape((2,2))
@@ -691,8 +693,16 @@ def argpartition(a, kth, axis=-1, kind='introselect', order=None):
>>> x[np.argpartition(x, (1, 3))]
array([1, 2, 3, 4])
+ >>> x = [3, 4, 2, 1]
+ >>> np.array(x)[np.argpartition(x, 3)]
+ array([2, 1, 3, 4])
+
"""
- return a.argpartition(kth, axis, kind=kind, order=order)
+ try:
+ argpartition = a.argpartition
+ except AttributeError:
+ return _wrapit(a, 'argpartition',kth, axis, kind, order)
+ return argpartition(kth, axis, kind=kind, order=order)
def sort(a, axis=-1, kind='quicksort', order=None):
@@ -890,7 +900,7 @@ def argsort(a, axis=-1, kind='quicksort', order=None):
return argsort(axis, kind, order)
-def argmax(a, axis=None):
+def argmax(a, axis=None, out=None):
"""
Returns the indices of the maximum values along an axis.
@@ -901,6 +911,9 @@ def argmax(a, axis=None):
axis : int, optional
By default, the index is into the flattened array, otherwise
along the specified axis.
+ out : array, optional
+ If provided, the result will be inserted into this array. It should
+ be of the appropriate shape and dtype.
Returns
-------
@@ -943,11 +956,11 @@ def argmax(a, axis=None):
try:
argmax = a.argmax
except AttributeError:
- return _wrapit(a, 'argmax', axis)
- return argmax(axis)
+ return _wrapit(a, 'argmax', axis, out)
+ return argmax(axis, out)
-def argmin(a, axis=None):
+def argmin(a, axis=None, out=None):
"""
Returns the indices of the minimum values along an axis.
@@ -958,6 +971,9 @@ def argmin(a, axis=None):
axis : int, optional
By default, the index is into the flattened array, otherwise
along the specified axis.
+ out : array, optional
+ If provided, the result will be inserted into this array. It should
+ be of the appropriate shape and dtype.
Returns
-------
@@ -1000,8 +1016,8 @@ def argmin(a, axis=None):
try:
argmin = a.argmin
except AttributeError:
- return _wrapit(a, 'argmin', axis)
- return argmin(axis)
+ return _wrapit(a, 'argmin', axis, out)
+ return argmin(axis, out)
def searchsorted(a, v, side='left', sorter=None):
diff --git a/numpy/core/getlimits.py b/numpy/core/getlimits.py
index 165ea6860..bd1c4571b 100644
--- a/numpy/core/getlimits.py
+++ b/numpy/core/getlimits.py
@@ -206,7 +206,7 @@ class iinfo(object):
Parameters
----------
- type : integer type, dtype, or instance
+ int_type : integer type, dtype, or instance
The kind of integer data type to get information about.
See Also
diff --git a/numpy/core/include/numpy/ndarraytypes.h b/numpy/core/include/numpy/ndarraytypes.h
index 78f79d5fe..edae27c72 100644
--- a/numpy/core/include/numpy/ndarraytypes.h
+++ b/numpy/core/include/numpy/ndarraytypes.h
@@ -619,6 +619,10 @@ typedef struct _PyArray_Descr {
* for NumPy 1.7.0.
*/
NpyAuxData *c_metadata;
+ /* Cached hash value (-1 if not yet computed).
+ * This was added for NumPy 2.0.0.
+ */
+ npy_hash_t hash;
} PyArray_Descr;
typedef struct _arr_descr {
diff --git a/numpy/core/include/numpy/npy_3kcompat.h b/numpy/core/include/numpy/npy_3kcompat.h
index 8a9109c5c..ef5b5694c 100644
--- a/numpy/core/include/numpy/npy_3kcompat.h
+++ b/numpy/core/include/numpy/npy_3kcompat.h
@@ -486,19 +486,6 @@ NpyCapsule_Check(PyObject *ptr)
#endif
-/*
- * Hash value compatibility.
- * As of Python 3.2 hash values are of type Py_hash_t.
- * Previous versions use C long.
- */
-#if PY_VERSION_HEX < 0x03020000
-typedef long npy_hash_t;
-#define NPY_SIZEOF_HASH_T NPY_SIZEOF_LONG
-#else
-typedef Py_hash_t npy_hash_t;
-#define NPY_SIZEOF_HASH_T NPY_SIZEOF_INTP
-#endif
-
#ifdef __cplusplus
}
#endif
diff --git a/numpy/core/include/numpy/npy_common.h b/numpy/core/include/numpy/npy_common.h
index 92b03d20c..eff5dd339 100644
--- a/numpy/core/include/numpy/npy_common.h
+++ b/numpy/core/include/numpy/npy_common.h
@@ -317,6 +317,19 @@ typedef float npy_float;
typedef double npy_double;
/*
+ * Hash value compatibility.
+ * As of Python 3.2 hash values are of type Py_hash_t.
+ * Previous versions use C long.
+ */
+#if PY_VERSION_HEX < 0x03020000
+typedef long npy_hash_t;
+#define NPY_SIZEOF_HASH_T NPY_SIZEOF_LONG
+#else
+typedef Py_hash_t npy_hash_t;
+#define NPY_SIZEOF_HASH_T NPY_SIZEOF_INTP
+#endif
+
+/*
* Disabling C99 complex usage: a lot of C code in numpy/scipy rely on being
* able to do .real/.imag. Will have to convert code first.
*/
diff --git a/numpy/core/setup.py b/numpy/core/setup.py
index 7f0649158..11b443cf8 100644
--- a/numpy/core/setup.py
+++ b/numpy/core/setup.py
@@ -758,6 +758,7 @@ def configuration(parent_package='',top_path=None):
join('src', 'multiarray', 'ucsnarrow.h'),
join('src', 'multiarray', 'usertypes.h'),
join('src', 'multiarray', 'vdot.h'),
+ join('src', 'private', 'npy_config.h'),
join('src', 'private', 'templ_common.h.src'),
join('src', 'private', 'lowlevel_strided_loops.h'),
join('include', 'numpy', 'arrayobject.h'),
diff --git a/numpy/core/src/multiarray/arraytypes.c.src b/numpy/core/src/multiarray/arraytypes.c.src
index 685aba542..8287c2268 100644
--- a/numpy/core/src/multiarray/arraytypes.c.src
+++ b/numpy/core/src/multiarray/arraytypes.c.src
@@ -3680,11 +3680,10 @@ static void
@name@_fastputmask(@type@ *in, npy_bool *mask, npy_intp ni, @type@ *vals,
npy_intp nv)
{
- npy_intp i;
- @type@ s_val;
+ npy_intp i, j;
if (nv == 1) {
- s_val = *vals;
+ @type@ s_val = *vals;
for (i = 0; i < ni; i++) {
if (mask[i]) {
in[i] = s_val;
@@ -3692,9 +3691,12 @@ static void
}
}
else {
- for (i = 0; i < ni; i++) {
+ for (i = 0, j = 0; i < ni; i++, j++) {
+ if (j >= nv) {
+ j = 0;
+ }
if (mask[i]) {
- in[i] = vals[i%nv];
+ in[i] = vals[j];
}
}
}
@@ -4029,6 +4031,8 @@ static PyArray_Descr @from@_Descr = {
NULL,
/* c_metadata */
NULL,
+ /* hash */
+ -1,
};
/**end repeat**/
@@ -4170,6 +4174,8 @@ NPY_NO_EXPORT PyArray_Descr @from@_Descr = {
NULL,
/* c_metadata */
NULL,
+ /* hash */
+ -1,
};
/**end repeat**/
diff --git a/numpy/core/src/multiarray/calculation.c b/numpy/core/src/multiarray/calculation.c
index edcca9857..e3cec21b1 100644
--- a/numpy/core/src/multiarray/calculation.c
+++ b/numpy/core/src/multiarray/calculation.c
@@ -618,7 +618,7 @@ PyArray_Round(PyArrayObject *a, int decimals, PyArrayObject *out)
}
/* arr.real = a.real.round(decimals) */
- part = PyObject_GetAttrString(arr, "real");
+ part = PyObject_GetAttrString((PyObject *)a, "real");
if (part == NULL) {
Py_DECREF(arr);
return NULL;
@@ -639,7 +639,7 @@ PyArray_Round(PyArrayObject *a, int decimals, PyArrayObject *out)
}
/* arr.imag = a.imag.round(decimals) */
- part = PyObject_GetAttrString(arr, "imag");
+ part = PyObject_GetAttrString((PyObject *)a, "imag");
if (part == NULL) {
Py_DECREF(arr);
return NULL;
diff --git a/numpy/core/src/multiarray/common.c b/numpy/core/src/multiarray/common.c
index 816778b91..a5f3b3d55 100644
--- a/numpy/core/src/multiarray/common.c
+++ b/numpy/core/src/multiarray/common.c
@@ -684,7 +684,16 @@ _IsAligned(PyArrayObject *ap)
/* alignment 1 types should have a efficient alignment for copy loops */
if (PyArray_ISFLEXIBLE(ap) || PyArray_ISSTRING(ap)) {
- alignment = NPY_MAX_COPY_ALIGNMENT;
+ npy_intp itemsize = PyArray_ITEMSIZE(ap);
+ /* power of two sizes may be loaded in larger moves */
+ if (((itemsize & (itemsize - 1)) == 0)) {
+ alignment = itemsize > NPY_MAX_COPY_ALIGNMENT ?
+ NPY_MAX_COPY_ALIGNMENT : itemsize;
+ }
+ else {
+ /* if not power of two it will be accessed bytewise */
+ alignment = 1;
+ }
}
if (alignment == 1) {
diff --git a/numpy/core/src/multiarray/compiled_base.c b/numpy/core/src/multiarray/compiled_base.c
index d258d1ad8..baf405d1e 100644
--- a/numpy/core/src/multiarray/compiled_base.c
+++ b/numpy/core/src/multiarray/compiled_base.c
@@ -477,10 +477,14 @@ fail:
return NULL;
}
-/** @brief Use bisection on a sorted array to find first entry > key.
+/** @brief find index of a sorted array such that arr[i] <= key < arr[i + 1].
*
- * Use bisection to find an index i s.t. arr[i] <= key < arr[i + 1]. If there is
- * no such i the error returns are:
+ * If an starting index guess is in-range, the array values around this
+ * index are first checked. This allows for repeated calls for well-ordered
+ * keys (a very common case) to use the previous index as a very good guess.
+ *
+ * If the guess value is not useful, bisection of the array is used to
+ * find the index. If there is no such index, the return values are:
* key < arr[0] -- -1
* key == arr[len - 1] -- len - 1
* key > arr[len - 1] -- len
@@ -489,10 +493,12 @@ fail:
* @param key key value.
* @param arr contiguous sorted array to be searched.
* @param len length of the array.
+ * @param guess initial guess of index
* @return index
*/
static npy_intp
-binary_search(double key, double arr [], npy_intp len)
+binary_search_with_guess(double key, double arr [], npy_intp len,
+ npy_intp guess)
{
npy_intp imin = 0;
npy_intp imax = len;
@@ -500,6 +506,37 @@ binary_search(double key, double arr [], npy_intp len)
if (key > arr[len - 1]) {
return len;
}
+ else if (key < arr[0]) {
+ return -1;
+ }
+
+ if (guess < 0) {
+ guess = 0;
+ }
+ else if (guess >= len - 1) {
+ guess = len - 2;
+ }
+
+ /* check most likely values: guess, guess + 1, guess - 1 */
+ if ((key > arr[guess]) && (key <= arr[guess + 1])) {
+ return guess;
+ }
+ else if ((guess < len - 2) && (key > arr[guess + 1]) &&
+ (key <= arr[guess + 2])) {
+ return guess + 1;
+ }
+ else if ((guess > 1) && (key > arr[guess - 1]) &&
+ (key <= arr[guess])) {
+ return guess - 1;
+ }
+ /* may be able to restrict bounds to range likely to be in memory */
+ if ((guess > 8) && (key > arr[guess - 8])) {
+ imin = guess - 8;
+ }
+ if ((guess < len - 9) && (key <= arr[guess + 8])) {
+ imax = guess + 8;
+ }
+ /* finally, find index by bisection */
while (imin < imax) {
npy_intp imid = imin + ((imax - imin) >> 1);
if (key >= arr[imid]) {
@@ -519,7 +556,7 @@ arr_interp(PyObject *NPY_UNUSED(self), PyObject *args, PyObject *kwdict)
PyObject *fp, *xp, *x;
PyObject *left = NULL, *right = NULL;
PyArrayObject *afp = NULL, *axp = NULL, *ax = NULL, *af = NULL;
- npy_intp i, lenx, lenxp;
+ npy_intp i, lenx, lenxp, j, jprev;
double lval, rval;
double *dy, *dx, *dz, *dres, *slopes;
@@ -565,7 +602,6 @@ arr_interp(PyObject *NPY_UNUSED(self), PyObject *args, PyObject *kwdict)
dx = (double *)PyArray_DATA(axp);
dz = (double *)PyArray_DATA(ax);
dres = (double *)PyArray_DATA(af);
-
/* Get left and right fill values. */
if ((left == NULL) || (left == Py_None)) {
lval = dy[0];
@@ -587,6 +623,7 @@ arr_interp(PyObject *NPY_UNUSED(self), PyObject *args, PyObject *kwdict)
}
/* only pre-calculate slopes if there are relatively few of them. */
+ j = jprev = 0;
if (lenxp <= lenx) {
slopes = (double *) PyArray_malloc((lenxp - 1)*sizeof(double));
if (! slopes) {
@@ -598,14 +635,14 @@ arr_interp(PyObject *NPY_UNUSED(self), PyObject *args, PyObject *kwdict)
}
for (i = 0; i < lenx; i++) {
const double x = dz[i];
- npy_intp j;
if (npy_isnan(x)) {
dres[i] = x;
continue;
}
- j = binary_search(x, dx, lenxp);
+ j = binary_search_with_guess(x, dx, lenxp, jprev);
+ jprev = j;
if (j == -1) {
dres[i] = lval;
}
@@ -626,14 +663,14 @@ arr_interp(PyObject *NPY_UNUSED(self), PyObject *args, PyObject *kwdict)
NPY_BEGIN_ALLOW_THREADS;
for (i = 0; i < lenx; i++) {
const double x = dz[i];
- npy_intp j;
if (npy_isnan(x)) {
dres[i] = x;
continue;
}
- j = binary_search(x, dx, lenxp);
+ j = binary_search_with_guess(x, dx, lenxp, jprev);
+ jprev = j;
if (j == -1) {
dres[i] = lval;
}
diff --git a/numpy/core/src/multiarray/convert.c b/numpy/core/src/multiarray/convert.c
index b610343cc..e1c2cb8d9 100644
--- a/numpy/core/src/multiarray/convert.c
+++ b/numpy/core/src/multiarray/convert.c
@@ -350,16 +350,33 @@ PyArray_FillWithScalar(PyArrayObject *arr, PyObject *obj)
}
/* Python integer */
else if (PyLong_Check(obj) || PyInt_Check(obj)) {
- npy_longlong v = PyLong_AsLongLong(obj);
- if (v == -1 && PyErr_Occurred()) {
- return -1;
+ /* Try long long before unsigned long long */
+ npy_longlong ll_v = PyLong_AsLongLong(obj);
+ if (ll_v == -1 && PyErr_Occurred()) {
+ /* Long long failed, try unsigned long long */
+ npy_ulonglong ull_v;
+ PyErr_Clear();
+ ull_v = PyLong_AsUnsignedLongLong(obj);
+ if (ull_v == (unsigned long long)-1 && PyErr_Occurred()) {
+ return -1;
+ }
+ value = (char *)value_buffer;
+ *(npy_ulonglong *)value = ull_v;
+
+ dtype = PyArray_DescrFromType(NPY_ULONGLONG);
+ if (dtype == NULL) {
+ return -1;
+ }
}
- value = (char *)value_buffer;
- *(npy_longlong *)value = v;
+ else {
+ /* Long long succeeded */
+ value = (char *)value_buffer;
+ *(npy_longlong *)value = ll_v;
- dtype = PyArray_DescrFromType(NPY_LONGLONG);
- if (dtype == NULL) {
- return -1;
+ dtype = PyArray_DescrFromType(NPY_LONGLONG);
+ if (dtype == NULL) {
+ return -1;
+ }
}
}
/* Python float */
diff --git a/numpy/core/src/multiarray/ctors.c b/numpy/core/src/multiarray/ctors.c
index 010420826..70161bf74 100644
--- a/numpy/core/src/multiarray/ctors.c
+++ b/numpy/core/src/multiarray/ctors.c
@@ -568,7 +568,8 @@ PyArray_AssignFromSequence(PyArrayObject *self, PyObject *v)
static int
discover_itemsize(PyObject *s, int nd, int *itemsize, int string_type)
{
- int n, r, i;
+ int r;
+ npy_intp n, i;
if (PyArray_Check(s)) {
*itemsize = PyArray_MAX(*itemsize, PyArray_ITEMSIZE((PyArrayObject *)s));
@@ -645,7 +646,8 @@ discover_dimensions(PyObject *obj, int *maxndim, npy_intp *d, int check_it,
int *out_is_object)
{
PyObject *e;
- int r, n, i;
+ int r;
+ npy_intp n, i;
Py_buffer buffer_view;
PyObject * seq;
@@ -2069,6 +2071,51 @@ PyArray_FromStructInterface(PyObject *input)
return NULL;
}
+/*
+ * Checks if the object in descr is the default 'descr' member for the
+ * __array_interface__ dictionary with 'typestr' member typestr.
+ */
+NPY_NO_EXPORT int
+_is_default_descr(PyObject *descr, PyObject *typestr) {
+ PyObject *tuple, *name, *typestr2;
+#if defined(NPY_PY3K)
+ PyObject *tmp = NULL;
+#endif
+ int ret = 0;
+
+ if (!PyList_Check(descr) || PyList_GET_SIZE(descr) != 1) {
+ return 0;
+ }
+ tuple = PyList_GET_ITEM(descr, 0);
+ if (!(PyTuple_Check(tuple) && PyTuple_GET_SIZE(tuple) == 2)) {
+ return 0;
+ }
+ name = PyTuple_GET_ITEM(tuple, 0);
+ if (!(PyUString_Check(name) && PyUString_GET_SIZE(name) == 0)) {
+ return 0;
+ }
+ typestr2 = PyTuple_GET_ITEM(tuple, 1);
+#if defined(NPY_PY3K)
+ /* Allow unicode type strings */
+ if (PyUnicode_Check(typestr2)) {
+ tmp = PyUnicode_AsASCIIString(typestr2);
+ if (tmp == NULL) {
+ return 0;
+ }
+ typestr2 = tmp;
+ }
+#endif
+ if (PyBytes_Check(typestr2) &&
+ PyObject_RichCompareBool(typestr, typestr2, Py_EQ)) {
+ ret = 1;
+ }
+#if defined(NPY_PY3K)
+ Py_XDECREF(tmp);
+#endif
+
+ return ret;
+}
+
#define PyIntOrLong_Check(obj) (PyInt_Check(obj) || PyLong_Check(obj))
/*NUMPY_API*/
@@ -2087,11 +2134,6 @@ PyArray_FromInterface(PyObject *origin)
npy_intp dims[NPY_MAXDIMS], strides[NPY_MAXDIMS];
int dataflags = NPY_ARRAY_BEHAVED;
- /* Get the typestring -- ignore array_descr */
- /* Get the shape */
- /* Get the memory from __array_data__ and __array_offset__ */
- /* Get the strides */
-
iface = PyArray_GetAttrString_SuppressException(origin,
"__array_interface__");
if (iface == NULL) {
@@ -2135,6 +2177,22 @@ PyArray_FromInterface(PyObject *origin)
goto fail;
}
+ /*
+ * If the dtype is NPY_VOID, see if there is extra information in
+ * the 'descr' attribute.
+ */
+ if (dtype->type_num == NPY_VOID) {
+ PyObject *descr = PyDict_GetItemString(iface, "descr");
+ PyArray_Descr *new_dtype = NULL;
+
+ if (descr != NULL && !_is_default_descr(descr, attr) &&
+ PyArray_DescrConverter2(descr, &new_dtype) == NPY_SUCCEED &&
+ new_dtype != NULL) {
+ Py_DECREF(dtype);
+ dtype = new_dtype;
+ }
+ }
+
/* Get shape tuple from interface specification */
attr = PyDict_GetItemString(iface, "shape");
if (attr == NULL) {
diff --git a/numpy/core/src/multiarray/descriptor.c b/numpy/core/src/multiarray/descriptor.c
index 0993190b7..bbcd5da36 100644
--- a/numpy/core/src/multiarray/descriptor.c
+++ b/numpy/core/src/multiarray/descriptor.c
@@ -1591,6 +1591,7 @@ PyArray_DescrNew(PyArray_Descr *base)
}
Py_XINCREF(newdescr->typeobj);
Py_XINCREF(newdescr->metadata);
+ newdescr->hash = -1;
return newdescr;
}
@@ -1994,6 +1995,8 @@ arraydescr_names_set(PyArray_Descr *self, PyObject *val)
return -1;
}
}
+ /* Invalidate cached hash value */
+ self->hash = -1;
/* Update dictionary keys in fields */
new_names = PySequence_Tuple(val);
new_fields = PyDict_New();
@@ -2443,6 +2446,8 @@ arraydescr_setstate(PyArray_Descr *self, PyObject *args)
version);
return NULL;
}
+ /* Invalidate cached hash value */
+ self->hash = -1;
if (version == 1 || version == 0) {
if (fields != Py_None) {
diff --git a/numpy/core/src/multiarray/dtype_transfer.c b/numpy/core/src/multiarray/dtype_transfer.c
index 7a7379ad5..f11ea395f 100644
--- a/numpy/core/src/multiarray/dtype_transfer.c
+++ b/numpy/core/src/multiarray/dtype_transfer.c
@@ -1362,6 +1362,13 @@ get_nbo_cast_transfer_function(int aligned,
break;
}
+ if (PyDataType_FLAGCHK(src_dtype, NPY_NEEDS_PYAPI) ||
+ PyDataType_FLAGCHK(dst_dtype, NPY_NEEDS_PYAPI)) {
+ if (out_needs_api) {
+ *out_needs_api = 1;
+ }
+ }
+
/* Get the cast function */
castfunc = PyArray_GetCastFunc(src_dtype, dst_dtype->type_num);
if (!castfunc) {
diff --git a/numpy/core/src/multiarray/hashdescr.c b/numpy/core/src/multiarray/hashdescr.c
index 29d69fddb..6ed4f7905 100644
--- a/numpy/core/src/multiarray/hashdescr.c
+++ b/numpy/core/src/multiarray/hashdescr.c
@@ -28,7 +28,7 @@
static int _is_array_descr_builtin(PyArray_Descr* descr);
static int _array_descr_walk(PyArray_Descr* descr, PyObject *l);
-static int _array_descr_walk_fields(PyObject* fields, PyObject* l);
+static int _array_descr_walk_fields(PyObject *names, PyObject* fields, PyObject* l);
static int _array_descr_builtin(PyArray_Descr* descr, PyObject *l);
/*
@@ -86,7 +86,6 @@ static int _array_descr_builtin(PyArray_Descr* descr, PyObject *l)
"(Hash) Error while computing builting hash");
goto clean_t;
}
- Py_INCREF(item);
PyList_Append(l, item);
}
@@ -104,18 +103,35 @@ clean_t:
*
* Return 0 on success
*/
-static int _array_descr_walk_fields(PyObject* fields, PyObject* l)
+static int _array_descr_walk_fields(PyObject *names, PyObject* fields, PyObject* l)
{
- PyObject *key, *value, *foffset, *fdescr;
+ PyObject *key, *value, *foffset, *fdescr, *ftitle;
Py_ssize_t pos = 0;
int st;
- while (PyDict_Next(fields, &pos, &key, &value)) {
+ if (!PyTuple_Check(names)) {
+ PyErr_SetString(PyExc_SystemError,
+ "(Hash) names is not a tuple ???");
+ return -1;
+ }
+ if (!PyDict_Check(fields)) {
+ PyErr_SetString(PyExc_SystemError,
+ "(Hash) fields is not a dict ???");
+ return -1;
+ }
+
+ for (pos = 0; pos < PyTuple_GET_SIZE(names); pos++) {
/*
* For each field, add the key + descr + offset to l
*/
-
+ key = PyTuple_GET_ITEM(names, pos);
+ value = PyDict_GetItem(fields, key);
/* XXX: are those checks necessary ? */
+ if (value == NULL) {
+ PyErr_SetString(PyExc_SystemError,
+ "(Hash) names and fields inconsistent ???");
+ return -1;
+ }
if (!PyUString_Check(key)) {
PyErr_SetString(PyExc_SystemError,
"(Hash) key of dtype dict not a string ???");
@@ -126,15 +142,14 @@ static int _array_descr_walk_fields(PyObject* fields, PyObject* l)
"(Hash) value of dtype dict not a dtype ???");
return -1;
}
- if (PyTuple_Size(value) < 2) {
+ if (PyTuple_GET_SIZE(value) < 2) {
PyErr_SetString(PyExc_SystemError,
"(Hash) Less than 2 items in dtype dict ???");
return -1;
}
- Py_INCREF(key);
PyList_Append(l, key);
- fdescr = PyTuple_GetItem(value, 0);
+ fdescr = PyTuple_GET_ITEM(value, 0);
if (!PyArray_DescrCheck(fdescr)) {
PyErr_SetString(PyExc_SystemError,
"(Hash) First item in compound dtype tuple not a descr ???");
@@ -149,16 +164,20 @@ static int _array_descr_walk_fields(PyObject* fields, PyObject* l)
}
}
- foffset = PyTuple_GetItem(value, 1);
+ foffset = PyTuple_GET_ITEM(value, 1);
if (!PyInt_Check(foffset)) {
PyErr_SetString(PyExc_SystemError,
"(Hash) Second item in compound dtype tuple not an int ???");
return -1;
}
else {
- Py_INCREF(foffset);
PyList_Append(l, foffset);
}
+
+ if (PyTuple_GET_SIZE(value) > 2) {
+ ftitle = PyTuple_GET_ITEM(value, 2);
+ PyList_Append(l, ftitle);
+ }
}
return 0;
@@ -186,12 +205,10 @@ static int _array_descr_walk_subarray(PyArray_ArrayDescr* adescr, PyObject *l)
"(Hash) Error while getting shape item of subarray dtype ???");
return -1;
}
- Py_INCREF(item);
PyList_Append(l, item);
}
}
else if (PyInt_Check(adescr->shape)) {
- Py_INCREF(adescr->shape);
PyList_Append(l, adescr->shape);
}
else {
@@ -219,12 +236,7 @@ static int _array_descr_walk(PyArray_Descr* descr, PyObject *l)
}
else {
if(descr->fields != NULL && descr->fields != Py_None) {
- if (!PyDict_Check(descr->fields)) {
- PyErr_SetString(PyExc_SystemError,
- "(Hash) fields is not a dict ???");
- return -1;
- }
- st = _array_descr_walk_fields(descr->fields, l);
+ st = _array_descr_walk_fields(descr->names, descr->fields, l);
if (st) {
return -1;
}
@@ -245,8 +257,7 @@ static int _array_descr_walk(PyArray_Descr* descr, PyObject *l)
*/
static int _PyArray_DescrHashImp(PyArray_Descr *descr, npy_hash_t *hash)
{
- PyObject *l, *tl, *item;
- Py_ssize_t i;
+ PyObject *l, *tl;
int st;
l = PyList_New(0);
@@ -256,44 +267,31 @@ static int _PyArray_DescrHashImp(PyArray_Descr *descr, npy_hash_t *hash)
st = _array_descr_walk(descr, l);
if (st) {
- goto clean_l;
+ Py_DECREF(l);
+ return -1;
}
/*
* Convert the list to tuple and compute the tuple hash using python
* builtin function
*/
- tl = PyTuple_New(PyList_Size(l));
- for(i = 0; i < PyList_Size(l); ++i) {
- item = PyList_GetItem(l, i);
- if (item == NULL) {
- PyErr_SetString(PyExc_SystemError,
- "(Hash) Error while translating the list into a tuple " \
- "(NULL item)");
- goto clean_tl;
- }
- PyTuple_SetItem(tl, i, item);
- }
+ tl = PyList_AsTuple(l);
+ Py_DECREF(l);
+ if (tl == NULL)
+ return -1;
*hash = PyObject_Hash(tl);
+ Py_DECREF(tl);
if (*hash == -1) {
/* XXX: does PyObject_Hash set an exception on failure ? */
#if 0
PyErr_SetString(PyExc_SystemError,
"(Hash) Error while hashing final tuple");
#endif
- goto clean_tl;
+ return -1;
}
- Py_DECREF(tl);
- Py_DECREF(l);
return 0;
-
-clean_tl:
- Py_DECREF(tl);
-clean_l:
- Py_DECREF(l);
- return -1;
}
NPY_NO_EXPORT npy_hash_t
@@ -301,7 +299,6 @@ PyArray_DescrHash(PyObject* odescr)
{
PyArray_Descr *descr;
int st;
- npy_hash_t hash;
if (!PyArray_DescrCheck(odescr)) {
PyErr_SetString(PyExc_ValueError,
@@ -310,10 +307,12 @@ PyArray_DescrHash(PyObject* odescr)
}
descr = (PyArray_Descr*)odescr;
- st = _PyArray_DescrHashImp(descr, &hash);
- if (st) {
- return -1;
+ if (descr->hash == -1) {
+ st = _PyArray_DescrHashImp(descr, &descr->hash);
+ if (st) {
+ return -1;
+ }
}
- return hash;
+ return descr->hash;
}
diff --git a/numpy/core/src/multiarray/item_selection.c b/numpy/core/src/multiarray/item_selection.c
index 7016760b3..877887109 100644
--- a/numpy/core/src/multiarray/item_selection.c
+++ b/numpy/core/src/multiarray/item_selection.c
@@ -427,10 +427,11 @@ NPY_NO_EXPORT PyObject *
PyArray_PutMask(PyArrayObject *self, PyObject* values0, PyObject* mask0)
{
PyArray_FastPutmaskFunc *func;
- PyArrayObject *mask, *values;
+ PyArrayObject *mask, *values;
PyArray_Descr *dtype;
- npy_intp i, chunk, ni, max_item, nv, tmp;
+ npy_intp i, j, chunk, ni, max_item, nv;
char *src, *dest;
+ npy_bool *mask_data;
int copied = 0;
mask = NULL;
@@ -469,6 +470,7 @@ PyArray_PutMask(PyArrayObject *self, PyObject* values0, PyObject* mask0)
"the same size");
goto fail;
}
+ mask_data = PyArray_DATA(mask);
dtype = PyArray_DESCR(self);
Py_INCREF(dtype);
values = (PyArrayObject *)PyArray_FromAny(values0, dtype,
@@ -483,14 +485,20 @@ PyArray_PutMask(PyArrayObject *self, PyObject* values0, PyObject* mask0)
Py_INCREF(Py_None);
return Py_None;
}
+ src = PyArray_DATA(values);
+
if (PyDataType_REFCHK(PyArray_DESCR(self))) {
- for (i = 0; i < ni; i++) {
- tmp = ((npy_bool *)(PyArray_DATA(mask)))[i];
- if (tmp) {
- src = PyArray_BYTES(values) + chunk * (i % nv);
- PyArray_Item_INCREF(src, PyArray_DESCR(self));
- PyArray_Item_XDECREF(dest+i*chunk, PyArray_DESCR(self));
- memmove(dest + i * chunk, src, chunk);
+ for (i = 0, j = 0; i < ni; i++, j++) {
+ if (j >= nv) {
+ j = 0;
+ }
+ if (mask_data[i]) {
+ char *src_ptr = src + j*chunk;
+ char *dest_ptr = dest + i*chunk;
+
+ PyArray_Item_INCREF(src_ptr, PyArray_DESCR(self));
+ PyArray_Item_XDECREF(dest_ptr, PyArray_DESCR(self));
+ memmove(dest_ptr, src_ptr, chunk);
}
}
}
@@ -499,16 +507,17 @@ PyArray_PutMask(PyArrayObject *self, PyObject* values0, PyObject* mask0)
NPY_BEGIN_THREADS_DESCR(PyArray_DESCR(self));
func = PyArray_DESCR(self)->f->fastputmask;
if (func == NULL) {
- for (i = 0; i < ni; i++) {
- tmp = ((npy_bool *)(PyArray_DATA(mask)))[i];
- if (tmp) {
- src = PyArray_BYTES(values) + chunk*(i % nv);
- memmove(dest + i*chunk, src, chunk);
+ for (i = 0, j = 0; i < ni; i++, j++) {
+ if (j >= nv) {
+ j = 0;
+ }
+ if (mask_data[i]) {
+ memmove(dest + i*chunk, src + j*chunk, chunk);
}
}
}
else {
- func(dest, PyArray_DATA(mask), ni, PyArray_DATA(values), nv);
+ func(dest, mask_data, ni, src, nv);
}
NPY_END_THREADS;
}
@@ -537,9 +546,9 @@ NPY_NO_EXPORT PyObject *
PyArray_Repeat(PyArrayObject *aop, PyObject *op, int axis)
{
npy_intp *counts;
- npy_intp n, n_outer, i, j, k, chunk, total;
- npy_intp tmp;
- int nd;
+ npy_intp n, n_outer, i, j, k, chunk;
+ npy_intp total = 0;
+ npy_bool broadcast = NPY_FALSE;
PyArrayObject *repeats = NULL;
PyObject *ap = NULL;
PyArrayObject *ret = NULL;
@@ -549,34 +558,35 @@ PyArray_Repeat(PyArrayObject *aop, PyObject *op, int axis)
if (repeats == NULL) {
return NULL;
}
- nd = PyArray_NDIM(repeats);
+
+ /*
+ * Scalar and size 1 'repeat' arrays broadcast to any shape, for all
+ * other inputs the dimension must match exactly.
+ */
+ if (PyArray_NDIM(repeats) == 0 || PyArray_SIZE(repeats) == 1) {
+ broadcast = NPY_TRUE;
+ }
+
counts = (npy_intp *)PyArray_DATA(repeats);
- if ((ap=PyArray_CheckAxis(aop, &axis, NPY_ARRAY_CARRAY))==NULL) {
+ if ((ap = PyArray_CheckAxis(aop, &axis, NPY_ARRAY_CARRAY)) == NULL) {
Py_DECREF(repeats);
return NULL;
}
aop = (PyArrayObject *)ap;
- if (nd == 1) {
- n = PyArray_DIMS(repeats)[0];
- }
- else {
- /* nd == 0 */
- n = PyArray_DIMS(aop)[axis];
- }
- if (PyArray_DIMS(aop)[axis] != n) {
- PyErr_SetString(PyExc_ValueError,
- "a.shape[axis] != len(repeats)");
+ n = PyArray_DIM(aop, axis);
+
+ if (!broadcast && PyArray_SIZE(repeats) != n) {
+ PyErr_Format(PyExc_ValueError,
+ "operands could not be broadcast together "
+ "with shape (%zd,) (%zd,)", n, PyArray_DIM(repeats, 0));
goto fail;
}
-
- if (nd == 0) {
- total = counts[0]*n;
+ if (broadcast) {
+ total = counts[0] * n;
}
else {
-
- total = 0;
for (j = 0; j < n; j++) {
if (counts[j] < 0) {
PyErr_SetString(PyExc_ValueError, "count < 0");
@@ -586,7 +596,6 @@ PyArray_Repeat(PyArrayObject *aop, PyObject *op, int axis)
}
}
-
/* Construct new array */
PyArray_DIMS(aop)[axis] = total;
Py_INCREF(PyArray_DESCR(aop));
@@ -614,7 +623,7 @@ PyArray_Repeat(PyArrayObject *aop, PyObject *op, int axis)
}
for (i = 0; i < n_outer; i++) {
for (j = 0; j < n; j++) {
- tmp = nd ? counts[j] : counts[0];
+ npy_intp tmp = broadcast ? counts[0] : counts[j];
for (k = 0; k < tmp; k++) {
memcpy(new_data, old_data, chunk);
new_data += chunk;
@@ -808,7 +817,7 @@ _new_sortlike(PyArrayObject *op, int axis, PyArray_SortFunc *sort,
NPY_BEGIN_THREADS_DEF;
/* Check if there is any sorting to do */
- if (N <= 1) {
+ if (N <= 1 || PyArray_SIZE(op) == 0) {
return 0;
}
@@ -955,7 +964,7 @@ _new_argsortlike(PyArrayObject *op, int axis, PyArray_ArgSortFunc *argsort,
needidxbuffer = rstride != sizeof(npy_intp);
/* Check if there is any argsorting to do */
- if (N <= 1) {
+ if (N <= 1 || PyArray_SIZE(op) == 0) {
memset(PyArray_DATA(rop), 0, PyArray_NBYTES(rop));
return (PyObject *)rop;
}
@@ -1084,87 +1093,20 @@ fail:
}
-/* Be sure to save this global_compare when necessary */
-static PyArrayObject *global_obj;
-
-static int
-sortCompare (const void *a, const void *b)
-{
- return PyArray_DESCR(global_obj)->f->compare(a,b,global_obj);
-}
-
-/*
- * Consumes reference to ap (op gets it) op contains a version of
- * the array with axes swapped if local variable axis is not the
- * last dimension. Origin must be defined locally.
- */
-#define SWAPAXES(op, ap) { \
- orign = PyArray_NDIM(ap)-1; \
- if (axis != orign) { \
- (op) = (PyArrayObject *)PyArray_SwapAxes((ap), axis, orign); \
- Py_DECREF((ap)); \
- if ((op) == NULL) return NULL; \
- } \
- else (op) = (ap); \
- }
-
-/*
- * Consumes reference to ap (op gets it) origin must be previously
- * defined locally. SWAPAXES must have been called previously.
- * op contains the swapped version of the array.
- */
-#define SWAPBACK(op, ap) { \
- if (axis != orign) { \
- (op) = (PyArrayObject *)PyArray_SwapAxes((ap), axis, orign); \
- Py_DECREF((ap)); \
- if ((op) == NULL) return NULL; \
- } \
- else (op) = (ap); \
- }
-
-/* These swap axes in-place if necessary */
-#define SWAPINTP(a,b) {npy_intp c; c=(a); (a) = (b); (b) = c;}
-#define SWAPAXES2(ap) { \
- orign = PyArray_NDIM(ap)-1; \
- if (axis != orign) { \
- SWAPINTP(PyArray_DIMS(ap)[axis], PyArray_DIMS(ap)[orign]); \
- SWAPINTP(PyArray_STRIDES(ap)[axis], PyArray_STRIDES(ap)[orign]); \
- PyArray_UpdateFlags(ap, NPY_ARRAY_C_CONTIGUOUS | \
- NPY_ARRAY_F_CONTIGUOUS); \
- } \
- }
-
-#define SWAPBACK2(ap) { \
- if (axis != orign) { \
- SWAPINTP(PyArray_DIMS(ap)[axis], PyArray_DIMS(ap)[orign]); \
- SWAPINTP(PyArray_STRIDES(ap)[axis], PyArray_STRIDES(ap)[orign]); \
- PyArray_UpdateFlags(ap, NPY_ARRAY_C_CONTIGUOUS | \
- NPY_ARRAY_F_CONTIGUOUS); \
- } \
- }
-
/*NUMPY_API
* Sort an array in-place
*/
NPY_NO_EXPORT int
PyArray_Sort(PyArrayObject *op, int axis, NPY_SORTKIND which)
{
- PyArrayObject *ap = NULL, *store_arr = NULL;
- char *ip;
- npy_intp i, n, m;
- int elsize, orign;
- int res = 0;
+ PyArray_SortFunc *sort;
int axis_orig = axis;
- int (*sort)(void *, size_t, size_t, npy_comparator);
+ int n = PyArray_NDIM(op);
- n = PyArray_NDIM(op);
- if ((n == 0) || (PyArray_SIZE(op) == 1)) {
- return 0;
- }
if (axis < 0) {
axis += n;
}
- if ((axis < 0) || (axis >= n)) {
+ if (axis < 0 || axis >= n) {
PyErr_Format(PyExc_ValueError, "axis(=%d) out of bounds", axis_orig);
return -1;
}
@@ -1172,83 +1114,35 @@ PyArray_Sort(PyArrayObject *op, int axis, NPY_SORTKIND which)
return -1;
}
- /* Determine if we should use type-specific algorithm or not */
- if (PyArray_DESCR(op)->f->sort[which] != NULL) {
- return _new_sortlike(op, axis, PyArray_DESCR(op)->f->sort[which],
- NULL, NULL, 0);
- }
-
- if (PyArray_DESCR(op)->f->compare == NULL) {
- PyErr_SetString(PyExc_TypeError,
- "type does not have compare function");
+ if (which < 0 || which >= NPY_NSORTS) {
+ PyErr_SetString(PyExc_ValueError, "not a valid sort kind");
return -1;
}
- SWAPAXES2(op);
-
- switch (which) {
- case NPY_QUICKSORT :
- sort = npy_quicksort;
- break;
- case NPY_HEAPSORT :
- sort = npy_heapsort;
- break;
- case NPY_MERGESORT :
- sort = npy_mergesort;
- break;
- default:
+ sort = PyArray_DESCR(op)->f->sort[which];
+ if (sort == NULL) {
+ if (PyArray_DESCR(op)->f->compare) {
+ switch (which) {
+ default:
+ case NPY_QUICKSORT:
+ sort = npy_quicksort;
+ break;
+ case NPY_HEAPSORT:
+ sort = npy_heapsort;
+ break;
+ case NPY_MERGESORT:
+ sort = npy_mergesort;
+ break;
+ }
+ }
+ else {
PyErr_SetString(PyExc_TypeError,
- "requested sort kind is not supported");
- goto fail;
- }
-
- ap = (PyArrayObject *)PyArray_FromAny((PyObject *)op,
- NULL, 1, 0,
- NPY_ARRAY_DEFAULT | NPY_ARRAY_UPDATEIFCOPY, NULL);
- if (ap == NULL) {
- goto fail;
- }
- elsize = PyArray_DESCR(ap)->elsize;
- m = PyArray_DIMS(ap)[PyArray_NDIM(ap)-1];
- if (m == 0) {
- goto finish;
- }
- n = PyArray_SIZE(ap)/m;
-
- /* Store global -- allows re-entry -- restore before leaving*/
- store_arr = global_obj;
- global_obj = ap;
- for (ip = PyArray_DATA(ap), i = 0; i < n; i++, ip += elsize*m) {
- res = sort(ip, m, elsize, sortCompare);
- if (res < 0) {
- break;
+ "type does not have compare function");
+ return -1;
}
}
- global_obj = store_arr;
-
- if (PyErr_Occurred()) {
- goto fail;
- }
- else if (res == -NPY_ENOMEM) {
- PyErr_NoMemory();
- goto fail;
- }
- else if (res == -NPY_ECOMP) {
- PyErr_SetString(PyExc_TypeError,
- "sort comparison failed");
- goto fail;
- }
-
- finish:
- Py_DECREF(ap); /* Should update op if needed */
- SWAPBACK2(op);
- return 0;
-
- fail:
- Py_XDECREF(ap);
- SWAPBACK2(op);
- return -1;
+ return _new_sortlike(op, axis, sort, NULL, NULL, 0);
}
@@ -1288,7 +1182,8 @@ partition_prep_kth_array(PyArrayObject * ktharray,
if (kth[i] < 0) {
kth[i] += shape[axis];
}
- if (PyArray_SIZE(op) != 0 && ((kth[i] < 0) || (kth[i] >= shape[axis]))) {
+ if (PyArray_SIZE(op) != 0 &&
+ (kth[i] < 0 || kth[i] >= shape[axis])) {
PyErr_Format(PyExc_ValueError, "kth(=%zd) out of bounds (%zd)",
kth[i], shape[axis]);
Py_XDECREF(kthrvl);
@@ -1300,267 +1195,119 @@ partition_prep_kth_array(PyArrayObject * ktharray,
* sort the array of kths so the partitions will
* not trample on each other
*/
- PyArray_Sort(kthrvl, -1, NPY_QUICKSORT);
+ if (PyArray_SIZE(kthrvl) > 1) {
+ PyArray_Sort(kthrvl, -1, NPY_QUICKSORT);
+ }
return kthrvl;
}
-
/*NUMPY_API
* Partition an array in-place
*/
NPY_NO_EXPORT int
-PyArray_Partition(PyArrayObject *op, PyArrayObject * ktharray, int axis, NPY_SELECTKIND which)
+PyArray_Partition(PyArrayObject *op, PyArrayObject * ktharray, int axis,
+ NPY_SELECTKIND which)
{
- PyArrayObject *ap = NULL, *store_arr = NULL;
- char *ip;
- npy_intp i, n, m;
- int elsize, orign;
- int res = 0;
+ PyArrayObject *kthrvl;
+ PyArray_PartitionFunc *part;
+ PyArray_SortFunc *sort;
int axis_orig = axis;
- int (*sort)(void *, size_t, size_t, npy_comparator);
- PyArray_PartitionFunc * part = get_partition_func(PyArray_TYPE(op), which);
+ int n = PyArray_NDIM(op);
+ int ret;
- n = PyArray_NDIM(op);
- if (n == 0) {
- return 0;
- }
if (axis < 0) {
axis += n;
}
- if ((axis < 0) || (axis >= n)) {
+ if (axis < 0 || axis >= n) {
PyErr_Format(PyExc_ValueError, "axis(=%d) out of bounds", axis_orig);
return -1;
}
- if (PyArray_FailUnlessWriteable(op, "sort array") < 0) {
+ if (PyArray_FailUnlessWriteable(op, "partition array") < 0) {
return -1;
}
- if (part) {
- PyArrayObject * kthrvl = partition_prep_kth_array(ktharray, op, axis);
- if (kthrvl == NULL)
- return -1;
-
- res = _new_sortlike(op, axis, NULL,
- part,
- PyArray_DATA(kthrvl),
- PyArray_SIZE(kthrvl));
- Py_DECREF(kthrvl);
- return res;
- }
-
-
- if (PyArray_DESCR(op)->f->compare == NULL) {
- PyErr_SetString(PyExc_TypeError,
- "type does not have compare function");
+ if (which < 0 || which >= NPY_NSELECTS) {
+ PyErr_SetString(PyExc_ValueError, "not a valid partition kind");
return -1;
}
-
- SWAPAXES2(op);
-
- /* select not implemented, use quicksort, slower but equivalent */
- switch (which) {
- case NPY_INTROSELECT :
+ part = get_partition_func(PyArray_TYPE(op), which);
+ if (part == NULL) {
+ /* Use sorting, slower but equivalent */
+ if (PyArray_DESCR(op)->f->compare) {
sort = npy_quicksort;
- break;
- default:
+ }
+ else {
PyErr_SetString(PyExc_TypeError,
- "requested sort kind is not supported");
- goto fail;
- }
-
- ap = (PyArrayObject *)PyArray_FromAny((PyObject *)op,
- NULL, 1, 0,
- NPY_ARRAY_DEFAULT | NPY_ARRAY_UPDATEIFCOPY, NULL);
- if (ap == NULL) {
- goto fail;
- }
- elsize = PyArray_DESCR(ap)->elsize;
- m = PyArray_DIMS(ap)[PyArray_NDIM(ap)-1];
- if (m == 0) {
- goto finish;
- }
- n = PyArray_SIZE(ap)/m;
-
- /* Store global -- allows re-entry -- restore before leaving*/
- store_arr = global_obj;
- global_obj = ap;
- /* we don't need to care about kth here as we are using a full sort */
- for (ip = PyArray_DATA(ap), i = 0; i < n; i++, ip += elsize*m) {
- res = sort(ip, m, elsize, sortCompare);
- if (res < 0) {
- break;
+ "type does not have compare function");
+ return -1;
}
}
- global_obj = store_arr;
- if (PyErr_Occurred()) {
- goto fail;
- }
- else if (res == -NPY_ENOMEM) {
- PyErr_NoMemory();
- goto fail;
- }
- else if (res == -NPY_ECOMP) {
- PyErr_SetString(PyExc_TypeError,
- "sort comparison failed");
- goto fail;
+ /* Process ktharray even if using sorting to do bounds checking */
+ kthrvl = partition_prep_kth_array(ktharray, op, axis);
+ if (kthrvl == NULL) {
+ return -1;
}
+ ret = _new_sortlike(op, axis, sort, part,
+ PyArray_DATA(kthrvl), PyArray_SIZE(kthrvl));
- finish:
- Py_DECREF(ap); /* Should update op if needed */
- SWAPBACK2(op);
- return 0;
+ Py_DECREF(kthrvl);
- fail:
- Py_XDECREF(ap);
- SWAPBACK2(op);
- return -1;
+ return ret;
}
-static char *global_data;
-
-static int
-argsort_static_compare(const void *ip1, const void *ip2)
-{
- int isize = PyArray_DESCR(global_obj)->elsize;
- const npy_intp *ipa = ip1;
- const npy_intp *ipb = ip2;
- return PyArray_DESCR(global_obj)->f->compare(global_data + (isize * *ipa),
- global_data + (isize * *ipb),
- global_obj);
-}
-
/*NUMPY_API
* ArgSort an array
*/
NPY_NO_EXPORT PyObject *
PyArray_ArgSort(PyArrayObject *op, int axis, NPY_SORTKIND which)
{
- PyArrayObject *ap = NULL, *ret = NULL, *store, *op2;
- npy_intp *ip;
- npy_intp i, j, n, m, orign;
- int argsort_elsize;
- char *store_ptr;
- int res = 0;
- int (*sort)(void *, size_t, size_t, npy_comparator);
-
- n = PyArray_NDIM(op);
- if ((n == 0) || (PyArray_SIZE(op) == 1)) {
- ret = (PyArrayObject *)PyArray_New(Py_TYPE(op), PyArray_NDIM(op),
- PyArray_DIMS(op),
- NPY_INTP,
- NULL, NULL, 0, 0,
- (PyObject *)op);
- if (ret == NULL) {
- return NULL;
- }
- *((npy_intp *)PyArray_DATA(ret)) = 0;
- return (PyObject *)ret;
- }
+ PyArrayObject *op2;
+ PyArray_ArgSortFunc *argsort;
+ PyObject *ret;
- /* Creates new reference op2 */
- if ((op2=(PyArrayObject *)PyArray_CheckAxis(op, &axis, 0)) == NULL) {
+ if (which < 0 || which >= NPY_NSORTS) {
+ PyErr_SetString(PyExc_ValueError,
+ "not a valid sort kind");
return NULL;
}
- /* Determine if we should use new algorithm or not */
- if (PyArray_DESCR(op2)->f->argsort[which] != NULL) {
- ret = (PyArrayObject *)_new_argsortlike(op2, axis,
- PyArray_DESCR(op2)->f->argsort[which],
- NULL, NULL, 0);
- Py_DECREF(op2);
- return (PyObject *)ret;
- }
- if (PyArray_DESCR(op2)->f->compare == NULL) {
- PyErr_SetString(PyExc_TypeError,
- "type does not have compare function");
- Py_DECREF(op2);
- op = NULL;
- goto fail;
- }
-
- switch (which) {
- case NPY_QUICKSORT :
- sort = npy_quicksort;
- break;
- case NPY_HEAPSORT :
- sort = npy_heapsort;
- break;
- case NPY_MERGESORT :
- sort = npy_mergesort;
- break;
- default:
+ argsort = PyArray_DESCR(op)->f->argsort[which];
+ if (argsort == NULL) {
+ if (PyArray_DESCR(op)->f->compare) {
+ switch (which) {
+ default:
+ case NPY_QUICKSORT:
+ argsort = npy_aquicksort;
+ break;
+ case NPY_HEAPSORT:
+ argsort = npy_aheapsort;
+ break;
+ case NPY_MERGESORT:
+ argsort = npy_amergesort;
+ break;
+ }
+ }
+ else {
PyErr_SetString(PyExc_TypeError,
- "requested sort kind is not supported");
- Py_DECREF(op2);
- op = NULL;
- goto fail;
- }
-
- /* ap will contain the reference to op2 */
- SWAPAXES(ap, op2);
- op = (PyArrayObject *)PyArray_ContiguousFromAny((PyObject *)ap,
- NPY_NOTYPE,
- 1, 0);
- Py_DECREF(ap);
- if (op == NULL) {
- return NULL;
- }
- ret = (PyArrayObject *)PyArray_New(Py_TYPE(op), PyArray_NDIM(op),
- PyArray_DIMS(op), NPY_INTP,
- NULL, NULL, 0, 0, (PyObject *)op);
- if (ret == NULL) {
- goto fail;
- }
- ip = (npy_intp *)PyArray_DATA(ret);
- argsort_elsize = PyArray_DESCR(op)->elsize;
- m = PyArray_DIMS(op)[PyArray_NDIM(op)-1];
- if (m == 0) {
- goto finish;
- }
- n = PyArray_SIZE(op)/m;
- store_ptr = global_data;
- global_data = PyArray_DATA(op);
- store = global_obj;
- global_obj = op;
- for (i = 0; i < n; i++, ip += m, global_data += m*argsort_elsize) {
- for (j = 0; j < m; j++) {
- ip[j] = j;
- }
- res = sort((char *)ip, m, sizeof(npy_intp), argsort_static_compare);
- if (res < 0) {
- break;
+ "type does not have compare function");
+ return NULL;
}
}
- global_data = store_ptr;
- global_obj = store;
- if (PyErr_Occurred()) {
- goto fail;
- }
- else if (res == -NPY_ENOMEM) {
- PyErr_NoMemory();
- goto fail;
- }
- else if (res == -NPY_ECOMP) {
- PyErr_SetString(PyExc_TypeError,
- "sort comparison failed");
- goto fail;
+ op2 = (PyArrayObject *)PyArray_CheckAxis(op, &axis, 0);
+ if (op2 == NULL) {
+ return NULL;
}
- finish:
- Py_DECREF(op);
- SWAPBACK(op, ret);
- return (PyObject *)op;
+ ret = _new_argsortlike(op2, axis, argsort, NULL, NULL, 0);
- fail:
- Py_XDECREF(op);
- Py_XDECREF(ret);
- return NULL;
+ Py_DECREF(op2);
+ return ret;
}
@@ -1568,136 +1315,52 @@ PyArray_ArgSort(PyArrayObject *op, int axis, NPY_SORTKIND which)
* ArgPartition an array
*/
NPY_NO_EXPORT PyObject *
-PyArray_ArgPartition(PyArrayObject *op, PyArrayObject * ktharray, int axis, NPY_SELECTKIND which)
+PyArray_ArgPartition(PyArrayObject *op, PyArrayObject *ktharray, int axis,
+ NPY_SELECTKIND which)
{
- PyArrayObject *ap = NULL, *ret = NULL, *store, *op2;
- npy_intp *ip;
- npy_intp i, j, n, m, orign;
- int argsort_elsize;
- char *store_ptr;
- int res = 0;
- int (*sort)(void *, size_t, size_t, npy_comparator);
- PyArray_ArgPartitionFunc * argpart =
- get_argpartition_func(PyArray_TYPE(op), which);
-
- n = PyArray_NDIM(op);
- if ((n == 0) || (PyArray_SIZE(op) == 1)) {
- ret = (PyArrayObject *)PyArray_New(Py_TYPE(op), PyArray_NDIM(op),
- PyArray_DIMS(op),
- NPY_INTP,
- NULL, NULL, 0, 0,
- (PyObject *)op);
- if (ret == NULL) {
- return NULL;
- }
- *((npy_intp *)PyArray_DATA(ret)) = 0;
- return (PyObject *)ret;
- }
+ PyArrayObject *op2, *kthrvl;
+ PyArray_ArgPartitionFunc *argpart;
+ PyArray_ArgSortFunc *argsort;
+ PyObject *ret;
- /* Creates new reference op2 */
- if ((op2=(PyArrayObject *)PyArray_CheckAxis(op, &axis, 0)) == NULL) {
+ if (which < 0 || which >= NPY_NSELECTS) {
+ PyErr_SetString(PyExc_ValueError,
+ "not a valid partition kind");
return NULL;
}
- /* Determine if we should use new algorithm or not */
- if (argpart) {
- PyArrayObject * kthrvl = partition_prep_kth_array(ktharray, op2, axis);
- if (kthrvl == NULL) {
- Py_DECREF(op2);
+ argpart = get_argpartition_func(PyArray_TYPE(op), which);
+ if (argpart == NULL) {
+ /* Use sorting, slower but equivalent */
+ if (PyArray_DESCR(op)->f->compare) {
+ argsort = npy_aquicksort;
+ }
+ else {
+ PyErr_SetString(PyExc_TypeError,
+ "type does not have compare function");
return NULL;
}
-
- ret = (PyArrayObject *)_new_argsortlike(op2, axis, NULL,
- argpart,
- PyArray_DATA(kthrvl),
- PyArray_SIZE(kthrvl));
- Py_DECREF(kthrvl);
- Py_DECREF(op2);
- return (PyObject *)ret;
}
- if (PyArray_DESCR(op2)->f->compare == NULL) {
- PyErr_SetString(PyExc_TypeError,
- "type does not have compare function");
- Py_DECREF(op2);
- op = NULL;
- goto fail;
- }
-
- /* select not implemented, use quicksort, slower but equivalent */
- switch (which) {
- case NPY_INTROSELECT :
- sort = npy_quicksort;
- break;
- default:
- PyErr_SetString(PyExc_TypeError,
- "requested sort kind is not supported");
- Py_DECREF(op2);
- op = NULL;
- goto fail;
+ op2 = (PyArrayObject *)PyArray_CheckAxis(op, &axis, 0);
+ if (op2 == NULL) {
+ return NULL;
}
- /* ap will contain the reference to op2 */
- SWAPAXES(ap, op2);
- op = (PyArrayObject *)PyArray_ContiguousFromAny((PyObject *)ap,
- NPY_NOTYPE,
- 1, 0);
- Py_DECREF(ap);
- if (op == NULL) {
+ /* Process ktharray even if using sorting to do bounds checking */
+ kthrvl = partition_prep_kth_array(ktharray, op2, axis);
+ if (kthrvl == NULL) {
+ Py_DECREF(op2);
return NULL;
}
- ret = (PyArrayObject *)PyArray_New(Py_TYPE(op), PyArray_NDIM(op),
- PyArray_DIMS(op), NPY_INTP,
- NULL, NULL, 0, 0, (PyObject *)op);
- if (ret == NULL) {
- goto fail;
- }
- ip = (npy_intp *)PyArray_DATA(ret);
- argsort_elsize = PyArray_DESCR(op)->elsize;
- m = PyArray_DIMS(op)[PyArray_NDIM(op)-1];
- if (m == 0) {
- goto finish;
- }
- n = PyArray_SIZE(op)/m;
- store_ptr = global_data;
- global_data = PyArray_DATA(op);
- store = global_obj;
- global_obj = op;
- /* we don't need to care about kth here as we are using a full sort */
- for (i = 0; i < n; i++, ip += m, global_data += m*argsort_elsize) {
- for (j = 0; j < m; j++) {
- ip[j] = j;
- }
- res = sort((char *)ip, m, sizeof(npy_intp), argsort_static_compare);
- if (res < 0) {
- break;
- }
- }
- global_data = store_ptr;
- global_obj = store;
- if (PyErr_Occurred()) {
- goto fail;
- }
- else if (res == -NPY_ENOMEM) {
- PyErr_NoMemory();
- goto fail;
- }
- else if (res == -NPY_ECOMP) {
- PyErr_SetString(PyExc_TypeError,
- "sort comparison failed");
- goto fail;
- }
+ ret = _new_argsortlike(op2, axis, argsort, argpart,
+ PyArray_DATA(kthrvl), PyArray_SIZE(kthrvl));
- finish:
- Py_DECREF(op);
- SWAPBACK(op, ret);
- return (PyObject *)op;
+ Py_DECREF(kthrvl);
+ Py_DECREF(op2);
- fail:
- Py_XDECREF(op);
- Py_XDECREF(ret);
- return NULL;
+ return ret;
}
diff --git a/numpy/core/src/multiarray/mapping.c b/numpy/core/src/multiarray/mapping.c
index 40622ca61..a510c7b0c 100644
--- a/numpy/core/src/multiarray/mapping.c
+++ b/numpy/core/src/multiarray/mapping.c
@@ -1981,21 +1981,28 @@ array_assign_subscript(PyArrayObject *self, PyObject *ind, PyObject *op)
}
if (tmp_arr == NULL) {
- /* Fill extra op */
-
- if (PyArray_CopyObject(mit->extra_op, op) < 0) {
- /*
- * This is a deprecated special case to allow non-matching shapes
- * for the index and value arrays.
- */
- if (index_type != HAS_FANCY || index_num != 1) {
- /* This is not a "flat like" 1-d special case */
- goto fail;
- }
- if (attempt_1d_fallback(self, indices[0].object, op) < 0) {
+ /* Fill extra op, need to swap first */
+ tmp_arr = mit->extra_op;
+ Py_INCREF(tmp_arr);
+ if (mit->consec) {
+ PyArray_MapIterSwapAxes(mit, &tmp_arr, 1);
+ if (tmp_arr == NULL) {
goto fail;
}
- goto success;
+ }
+ if (PyArray_CopyObject(tmp_arr, op) < 0) {
+ /*
+ * This is a deprecated special case to allow non-matching shapes
+ * for the index and value arrays.
+ */
+ if (index_type != HAS_FANCY || index_num != 1) {
+ /* This is not a "flat like" 1-d special case */
+ goto fail;
+ }
+ if (attempt_1d_fallback(self, indices[0].object, op) < 0) {
+ goto fail;
+ }
+ goto success;
}
}
@@ -2557,8 +2564,10 @@ PyArray_MapIterCheckIndices(PyArrayMapIterObject *mit)
* 0 if an extra operand should be used, otherwise it must be 0.
* Should be at least READONLY, WRITEONLY or READWRITE.
* @param Extra operand. For getmap, this would be the result, for setmap
- * this would be the arrays to get from. Can be NULL, and will be
- * allocated in that case.
+ * this would be the arrays to get from.
+ * Can be NULL, and will be allocated in that case. However,
+ * it matches the mapiter iteration, so you have to call
+ * MapIterSwapAxes(mit, &extra_op, 1) on it.
* The operand has no effect on the shape.
* @param Dtype for the extra operand, borrows the reference and must not
* be NULL (if extra_op_flags is not 0).
diff --git a/numpy/core/src/multiarray/multiarray_tests.c.src b/numpy/core/src/multiarray/multiarray_tests.c.src
index a80d3af19..9a2cd0c1b 100644
--- a/numpy/core/src/multiarray/multiarray_tests.c.src
+++ b/numpy/core/src/multiarray/multiarray_tests.c.src
@@ -155,7 +155,7 @@ test_neighborhood_iterator(PyObject* NPY_UNUSED(self), PyObject* args)
for (i = 0; i < 2 * PyArray_NDIM(ax); ++i) {
PyObject* bound;
bound = PySequence_GetItem(b, i);
- if (bounds == NULL) {
+ if (bound == NULL) {
goto clean_itx;
}
if (!PyInt_Check(bound)) {
@@ -314,7 +314,7 @@ test_neighborhood_iterator_oob(PyObject* NPY_UNUSED(self), PyObject* args)
for (i = 0; i < 2 * PyArray_NDIM(ax); ++i) {
PyObject* bound;
bound = PySequence_GetItem(b1, i);
- if (bounds == NULL) {
+ if (bound == NULL) {
goto clean_itx;
}
if (!PyInt_Check(bound)) {
@@ -338,7 +338,7 @@ test_neighborhood_iterator_oob(PyObject* NPY_UNUSED(self), PyObject* args)
for (i = 0; i < 2 * PyArray_NDIM(ax); ++i) {
PyObject* bound;
bound = PySequence_GetItem(b2, i);
- if (bounds == NULL) {
+ if (bound == NULL) {
goto clean_itx;
}
if (!PyInt_Check(bound)) {
@@ -762,7 +762,7 @@ array_indexing(PyObject *NPY_UNUSED(self), PyObject *args)
if (mode == 1) {
if (PySequence_SetItem(arr, i, op) < 0) {
return NULL;
- }
+ }
Py_RETURN_NONE;
}
diff --git a/numpy/core/src/multiarray/multiarraymodule.c b/numpy/core/src/multiarray/multiarraymodule.c
index 79e471cee..1a8fda94d 100644
--- a/numpy/core/src/multiarray/multiarraymodule.c
+++ b/numpy/core/src/multiarray/multiarraymodule.c
@@ -4316,15 +4316,6 @@ PyMODINIT_FUNC initmultiarray(void) {
goto err;
}
-#if defined(MS_WIN64) && defined(__GNUC__)
- PyErr_WarnEx(PyExc_Warning,
- "Numpy built with MINGW-W64 on Windows 64 bits is experimental, " \
- "and only available for \n" \
- "testing. You are advised not to use it for production. \n\n" \
- "CRASHES ARE TO BE EXPECTED - PLEASE REPORT THEM TO NUMPY DEVELOPERS",
- 1);
-#endif
-
/* Initialize access to the PyDateTime API */
numpy_pydatetime_import();
diff --git a/numpy/core/src/npysort/heapsort.c.src b/numpy/core/src/npysort/heapsort.c.src
index cfdd3fd2a..88f7978cc 100644
--- a/numpy/core/src/npysort/heapsort.c.src
+++ b/numpy/core/src/npysort/heapsort.c.src
@@ -287,30 +287,27 @@ aheapsort_@suff@(@type@ *v, npy_intp *tosort, npy_intp n, PyArrayObject *arr)
*/
-/*
- * This sort has almost the same signature as libc qsort and is intended to
- * provide a heapsort for array types that don't have type specific sorts.
- * The difference in the signature is an error return, as it might be the
- * case that a memory allocation fails.
- */
int
-npy_heapsort(void *base, size_t num, size_t size, npy_comparator cmp)
+npy_heapsort(char *start, npy_intp num, PyArrayObject *arr)
{
- char *tmp = malloc(size);
- char *a = (char *) base - size;
- size_t i, j, l;
+ npy_intp elsize = PyArray_ITEMSIZE(arr);
+ PyArray_CompareFunc *cmp = PyArray_DESCR(arr)->f->compare;
+ char *tmp = malloc(elsize);
+ char *a = start - elsize;
+ npy_intp i, j, l;
if (tmp == NULL) {
return -NPY_ENOMEM;
}
for (l = num >> 1; l > 0; --l) {
- GENERIC_COPY(tmp, a + l*size, size);
+ GENERIC_COPY(tmp, a + l*elsize, elsize);
for (i = l, j = l << 1; j <= num;) {
- if (j < num && GENERIC_LT(a + j*size, a + (j+1)*size, cmp))
- j += 1;
- if (GENERIC_LT(tmp, a + j*size, cmp)) {
- GENERIC_COPY(a + i*size, a + j*size, size);
+ if (j < num && cmp(a + j*elsize, a + (j+1)*elsize, arr) < 0) {
+ ++j;
+ }
+ if (cmp(tmp, a + j*elsize, arr) < 0) {
+ GENERIC_COPY(a + i*elsize, a + j*elsize, elsize);
i = j;
j += j;
}
@@ -318,18 +315,19 @@ npy_heapsort(void *base, size_t num, size_t size, npy_comparator cmp)
break;
}
}
- GENERIC_COPY(a + i*size, tmp, size);
+ GENERIC_COPY(a + i*elsize, tmp, elsize);
}
for (; num > 1;) {
- GENERIC_COPY(tmp, a + num*size, size);
- GENERIC_COPY(a + num*size, a + size, size);
+ GENERIC_COPY(tmp, a + num*elsize, elsize);
+ GENERIC_COPY(a + num*elsize, a + elsize, elsize);
num -= 1;
for (i = 1, j = 2; j <= num;) {
- if (j < num && GENERIC_LT(a + j*size, a + (j+1)*size, cmp))
- j++;
- if (GENERIC_LT(tmp, a + j*size, cmp)) {
- GENERIC_COPY(a + i*size, a + j*size, size);
+ if (j < num && cmp(a + j*elsize, a + (j+1)*elsize, arr) < 0) {
+ ++j;
+ }
+ if (cmp(tmp, a + j*elsize, arr) < 0) {
+ GENERIC_COPY(a + i*elsize, a + j*elsize, elsize);
i = j;
j += j;
}
@@ -337,9 +335,61 @@ npy_heapsort(void *base, size_t num, size_t size, npy_comparator cmp)
break;
}
}
- GENERIC_COPY(a + i*size, tmp, size);
+ GENERIC_COPY(a + i*elsize, tmp, elsize);
}
free(tmp);
return 0;
}
+
+
+int
+npy_aheapsort(char *v, npy_intp *tosort, npy_intp n, PyArrayObject *arr)
+{
+ npy_intp elsize = PyArray_ITEMSIZE(arr);
+ PyArray_CompareFunc *cmp = PyArray_DESCR(arr)->f->compare;
+ npy_intp *a, i, j, l, tmp;
+
+ /* The array needs to be offset by one for heapsort indexing */
+ a = tosort - 1;
+
+ for (l = n >> 1; l > 0; --l) {
+ tmp = a[l];
+ for (i = l, j = l<<1; j <= n;) {
+ if (j < n && cmp(v + a[j]*elsize, v + a[j+1]*elsize, arr) < 0) {
+ ++j;
+ }
+ if (cmp(v + tmp*elsize, v + a[j]*elsize, arr) < 0) {
+ a[i] = a[j];
+ i = j;
+ j += j;
+ }
+ else {
+ break;
+ }
+ }
+ a[i] = tmp;
+ }
+
+ for (; n > 1;) {
+ tmp = a[n];
+ a[n] = a[1];
+ n -= 1;
+ for (i = 1, j = 2; j <= n;) {
+ if (j < n && cmp(v + a[j]*elsize, v + a[j+1]*elsize, arr) < 0) {
+ ++j;
+ }
+ if (cmp(v + tmp*elsize, v + a[j]*elsize, arr) < 0) {
+ a[i] = a[j];
+ i = j;
+ j += j;
+ }
+ else {
+ break;
+ }
+ }
+ a[i] = tmp;
+ }
+
+ return 0;
+}
diff --git a/numpy/core/src/npysort/mergesort.c.src b/numpy/core/src/npysort/mergesort.c.src
index c99c0e614..406be66d0 100644
--- a/numpy/core/src/npysort/mergesort.c.src
+++ b/numpy/core/src/npysort/mergesort.c.src
@@ -350,79 +350,135 @@ amergesort_@suff@(@type@ *v, npy_intp *tosort, npy_intp num, PyArrayObject *arr)
static void
-npy_mergesort0(char *pl, char *pr, char *pw, char *vp, size_t size, npy_comparator cmp)
+npy_mergesort0(char *pl, char *pr, char *pw, char *vp, npy_intp elsize,
+ PyArray_CompareFunc *cmp, PyArrayObject *arr)
{
char *pi, *pj, *pk, *pm;
- if ((size_t)(pr - pl) > SMALL_MERGESORT*size) {
+ if (pr - pl > SMALL_MERGESORT*elsize) {
/* merge sort */
- pm = pl + (((pr - pl)/size) >> 1)*size;
- npy_mergesort0(pl, pm, pw, vp, size, cmp);
- npy_mergesort0(pm, pr, pw, vp, size, cmp);
+ pm = pl + (((pr - pl)/elsize) >> 1)*elsize;
+ npy_mergesort0(pl, pm, pw, vp, elsize, cmp, arr);
+ npy_mergesort0(pm, pr, pw, vp, elsize, cmp, arr);
GENERIC_COPY(pw, pl, pm - pl);
pi = pw + (pm - pl);
pj = pw;
pk = pl;
while (pj < pi && pm < pr) {
- if (GENERIC_LT(pm, pj, cmp)) {
- GENERIC_COPY(pk, pm, size);
- pm += size;
- pk += size;
+ if (cmp(pm, pj, arr) < 0) {
+ GENERIC_COPY(pk, pm, elsize);
+ pm += elsize;
+ pk += elsize;
}
else {
- GENERIC_COPY(pk, pj, size);
- pj += size;
- pk += size;
+ GENERIC_COPY(pk, pj, elsize);
+ pj += elsize;
+ pk += elsize;
}
}
GENERIC_COPY(pk, pj, pi - pj);
}
else {
/* insertion sort */
- for (pi = pl + size; pi < pr; pi += size) {
- GENERIC_COPY(vp, pi, size);
+ for (pi = pl + elsize; pi < pr; pi += elsize) {
+ GENERIC_COPY(vp, pi, elsize);
pj = pi;
- pk = pi - size;
- while (pj > pl && GENERIC_LT(vp, pk, cmp)) {
- GENERIC_COPY(pj, pk, size);
- pj -= size;
- pk -= size;
+ pk = pi - elsize;
+ while (pj > pl && cmp(vp, pk, arr) < 0) {
+ GENERIC_COPY(pj, pk, elsize);
+ pj -= elsize;
+ pk -= elsize;
}
- GENERIC_COPY(pj, vp, size);
+ GENERIC_COPY(pj, vp, elsize);
}
}
}
-/*
- * This sort has almost the same signature as libc qsort and is intended to
- * provide a mergesort for array types that don't have type specific sorts.
- * The difference in the signature is an error return, as it might be the
- * case that a memory allocation fails.
- */
int
-npy_mergesort(void *base, size_t num, size_t size, npy_comparator cmp)
+npy_mergesort(char *start, npy_intp num, PyArrayObject *arr)
{
- char *pl, *pr, *pw, *vp;
- int err = 0;
-
- pl = base;
- pr = pl + num*size;
- pw = malloc((num/2) * size);
- if (pw == NULL) {
- err = -NPY_ENOMEM;
- goto fail_0;
+ npy_intp elsize = PyArray_ITEMSIZE(arr);
+ PyArray_CompareFunc *cmp = PyArray_DESCR(arr)->f->compare;
+ char *pl = start;
+ char *pr = pl + num*elsize;
+ char *pw = malloc((num >> 1) *elsize);
+ char *vp = malloc(elsize);
+ int err = -NPY_ENOMEM;
+
+ if (pw != NULL && vp != NULL) {
+ npy_mergesort0(pl, pr, pw, vp, elsize, cmp, arr);
+ err = 0;
}
- vp = malloc(size);
- if (vp == NULL) {
- err = -NPY_ENOMEM;
- goto fail_1;
- }
- npy_mergesort0(pl, pr, pw, vp, size, cmp);
free(vp);
-fail_1:
free(pw);
-fail_0:
+
return err;
}
+
+
+static void
+npy_amergesort0(npy_intp *pl, npy_intp *pr, char *v, npy_intp *pw,
+ npy_intp elsize, PyArray_CompareFunc *cmp, PyArrayObject *arr)
+{
+ char *vp;
+ npy_intp vi, *pi, *pj, *pk, *pm;
+
+ if (pr - pl > SMALL_MERGESORT) {
+ /* merge sort */
+ pm = pl + ((pr - pl) >> 1);
+ npy_amergesort0(pl, pm, v, pw, elsize, cmp, arr);
+ npy_amergesort0(pm, pr, v, pw, elsize, cmp, arr);
+ for (pi = pw, pj = pl; pj < pm;) {
+ *pi++ = *pj++;
+ }
+ pi = pw + (pm - pl);
+ pj = pw;
+ pk = pl;
+ while (pj < pi && pm < pr) {
+ if (cmp(v + (*pm)*elsize, v + (*pj)*elsize, arr) < 0) {
+ *pk++ = *pm++;
+ }
+ else {
+ *pk++ = *pj++;
+ }
+ }
+ while (pj < pi) {
+ *pk++ = *pj++;
+ }
+ }
+ else {
+ /* insertion sort */
+ for (pi = pl + 1; pi < pr; ++pi) {
+ vi = *pi;
+ vp = v + vi*elsize;
+ pj = pi;
+ pk = pi - 1;
+ while (pj > pl && cmp(vp, v + (*pk)*elsize, arr) < 0) {
+ *pj-- = *pk--;
+ }
+ *pj = vi;
+ }
+ }
+}
+
+
+int
+npy_amergesort(char *v, npy_intp *tosort, npy_intp num, PyArrayObject *arr)
+{
+ npy_intp elsize = PyArray_ITEMSIZE(arr);
+ PyArray_CompareFunc *cmp = PyArray_DESCR(arr)->f->compare;
+ npy_intp *pl, *pr, *pw;
+
+ pl = tosort;
+ pr = pl + num;
+ pw = malloc((num >> 1) * sizeof(npy_intp));
+ if (pw == NULL) {
+ return -NPY_ENOMEM;
+ }
+ npy_amergesort0(pl, pr, v, pw, elsize, cmp, arr);
+ free(pw);
+
+ return 0;
+}
diff --git a/numpy/core/src/npysort/npysort_common.h b/numpy/core/src/npysort/npysort_common.h
index a3ce7664d..a22045b41 100644
--- a/numpy/core/src/npysort/npysort_common.h
+++ b/numpy/core/src/npysort/npysort_common.h
@@ -357,11 +357,4 @@ GENERIC_SWAP(char *a, char *b, size_t len)
}
}
-
-NPY_INLINE static int
-GENERIC_LT(char *a, char *b, int (*cmp)(const void *, const void *))
-{
- return cmp(a, b) < 0;
-}
-
#endif
diff --git a/numpy/core/src/npysort/quicksort.c.src b/numpy/core/src/npysort/quicksort.c.src
index a27530eb4..5334aca76 100644
--- a/numpy/core/src/npysort/quicksort.c.src
+++ b/numpy/core/src/npysort/quicksort.c.src
@@ -216,6 +216,10 @@ quicksort_@suff@(@type@ *start, npy_intp num, PyArrayObject *arr)
@type@ *pr = start + (num - 1)*len;
@type@ *stack[PYA_QS_STACK], **sptr = stack, *pm, *pi, *pj, *pk;
+ if (vp == NULL) {
+ return -NPY_ENOMEM;
+ }
+
for (;;) {
while ((size_t)(pr - pl) > SMALL_QUICKSORT*len) {
/* quicksort partition */
@@ -350,14 +354,163 @@ aquicksort_@suff@(@type@ *v, npy_intp* tosort, npy_intp num, PyArrayObject *arr)
*/
-/*
- * This sort has almost the same signature as libc qsort and is intended to
- * supply an error return for compatibility with the other generic sort
- * kinds.
- */
int
-npy_quicksort(void *base, size_t num, size_t size, npy_comparator cmp)
+npy_quicksort(char *start, npy_intp num, PyArrayObject *arr)
{
- qsort(base, num, size, cmp);
+ npy_intp elsize = PyArray_ITEMSIZE(arr);
+ PyArray_CompareFunc *cmp = PyArray_DESCR(arr)->f->compare;
+ char *vp = malloc(elsize);
+ char *pl = start;
+ char *pr = start + (num - 1)*elsize;
+ char *stack[PYA_QS_STACK];
+ char **sptr = stack;
+ char *pm, *pi, *pj, *pk;
+
+ if (vp == NULL) {
+ return -NPY_ENOMEM;
+ }
+
+ for (;;) {
+ while(pr - pl > SMALL_QUICKSORT*elsize) {
+ /* quicksort partition */
+ pm = pl + (((pr - pl) / elsize) >> 1) * elsize;
+ if (cmp(pm, pl, arr) < 0) {
+ GENERIC_SWAP(pm, pl, elsize);
+ }
+ if (cmp(pr, pm, arr) < 0) {
+ GENERIC_SWAP(pr, pm, elsize);
+ }
+ if (cmp(pm, pl, arr) < 0) {
+ GENERIC_SWAP(pm, pl, elsize);
+ }
+ GENERIC_COPY(vp, pm, elsize);
+ pi = pl;
+ pj = pr - elsize;
+ GENERIC_SWAP(pm, pj, elsize);
+ for (;;) {
+ do {
+ pi += elsize;
+ } while (cmp(pi, vp, arr) < 0);
+ do {
+ pj -= elsize;
+ } while (cmp(vp, pj, arr) < 0);
+ if (pi >= pj) {
+ break;
+ }
+ GENERIC_SWAP(pi, pj, elsize);
+ }
+ pk = pr - elsize;
+ GENERIC_SWAP(pi, pk, elsize);
+ /* push largest partition on stack */
+ if (pi - pl < pr - pi) {
+ *sptr++ = pi + elsize;
+ *sptr++ = pr;
+ pr = pi - elsize;
+ }
+ else {
+ *sptr++ = pl;
+ *sptr++ = pi - elsize;
+ pl = pi + elsize;
+ }
+ }
+
+ /* insertion sort */
+ for (pi = pl + elsize; pi <= pr; pi += elsize) {
+ GENERIC_COPY(vp, pi, elsize);
+ pj = pi;
+ pk = pi - elsize;
+ while (pj > pl && cmp(vp, pk, arr) < 0) {
+ GENERIC_COPY(pj, pk, elsize);
+ pj -= elsize;
+ pk -= elsize;
+ }
+ GENERIC_COPY(pj, vp, elsize);
+ }
+ if (sptr == stack) {
+ break;
+ }
+ pr = *(--sptr);
+ pl = *(--sptr);
+ }
+
+ free(vp);
+ return 0;
+}
+
+
+int
+npy_aquicksort(char *v, npy_intp* tosort, npy_intp num, PyArrayObject *arr)
+{
+ npy_intp elsize = PyArray_ITEMSIZE(arr);
+ PyArray_CompareFunc *cmp = PyArray_DESCR(arr)->f->compare;
+ char *vp;
+ npy_intp *pl = tosort;
+ npy_intp *pr = tosort + num - 1;
+ npy_intp *stack[PYA_QS_STACK];
+ npy_intp **sptr = stack;
+ npy_intp *pm, *pi, *pj, *pk, vi;
+
+ for (;;) {
+ while ((pr - pl) > SMALL_QUICKSORT) {
+ /* quicksort partition */
+ pm = pl + ((pr - pl) >> 1);
+ if (cmp(v + (*pm)*elsize, v + (*pl)*elsize, arr) < 0) {
+ INTP_SWAP(*pm, *pl);
+ }
+ if (cmp(v + (*pr)*elsize, v + (*pm)*elsize, arr) < 0) {
+ INTP_SWAP(*pr, *pm);
+ }
+ if (cmp(v + (*pm)*elsize, v + (*pl)*elsize, arr) < 0) {
+ INTP_SWAP(*pm, *pl);
+ }
+ vp = v + (*pm)*elsize;
+ pi = pl;
+ pj = pr - 1;
+ INTP_SWAP(*pm,*pj);
+ for (;;) {
+ do {
+ ++pi;
+ } while (cmp(v + (*pi)*elsize, vp, arr) < 0);
+ do {
+ --pj;
+ } while (cmp(vp, v + (*pj)*elsize, arr) < 0);
+ if (pi >= pj) {
+ break;
+ }
+ INTP_SWAP(*pi,*pj);
+ }
+ pk = pr - 1;
+ INTP_SWAP(*pi,*pk);
+ /* push largest partition on stack */
+ if (pi - pl < pr - pi) {
+ *sptr++ = pi + 1;
+ *sptr++ = pr;
+ pr = pi - 1;
+ }
+ else {
+ *sptr++ = pl;
+ *sptr++ = pi - 1;
+ pl = pi + 1;
+ }
+ }
+
+ /* insertion sort */
+ for (pi = pl + 1; pi <= pr; ++pi) {
+ vi = *pi;
+ vp = v + vi*elsize;
+ pj = pi;
+ pk = pi - 1;
+ while (pj > pl && cmp(vp, v + (*pk)*elsize, arr) < 0) {
+ *pj-- = *pk--;
+ }
+ *pj = vi;
+ }
+ if (sptr == stack) {
+ break;
+ }
+ pr = *(--sptr);
+ pl = *(--sptr);
+ }
+
return 0;
}
diff --git a/numpy/core/src/npysort/selection.c.src b/numpy/core/src/npysort/selection.c.src
index 4167b2694..51c7d6721 100644
--- a/numpy/core/src/npysort/selection.c.src
+++ b/numpy/core/src/npysort/selection.c.src
@@ -424,51 +424,3 @@ int
/**end repeat1**/
/**end repeat**/
-
-
-/*
- *****************************************************************************
- ** STRING SORTS **
- *****************************************************************************
- */
-
-
-/**begin repeat
- *
- * #TYPE = STRING, UNICODE#
- * #suff = string, unicode#
- * #type = npy_char, npy_ucs4#
- */
-
-int
-introselect_@suff@(@type@ *start, npy_intp num, npy_intp kth, PyArrayObject *arr)
-{
- return quicksort_@suff@(start, num, arr);
-}
-
-int
-aintroselect_@suff@(@type@ *v, npy_intp* tosort, npy_intp num, npy_intp kth, void *null)
-{
- return aquicksort_@suff@(v, tosort, num, null);
-}
-
-/**end repeat**/
-
-
-/*
- *****************************************************************************
- ** GENERIC SORT **
- *****************************************************************************
- */
-
-
-/*
- * This sort has almost the same signature as libc qsort and is intended to
- * supply an error return for compatibility with the other generic sort
- * kinds.
- */
-int
-npy_introselect(void *base, size_t num, size_t size, size_t kth, npy_comparator cmp)
-{
- return npy_quicksort(base, num, size, cmp);
-}
diff --git a/numpy/core/src/private/npy_config.h b/numpy/core/src/private/npy_config.h
index 6e98dc7e9..580b00706 100644
--- a/numpy/core/src/private/npy_config.h
+++ b/numpy/core/src/private/npy_config.h
@@ -3,6 +3,7 @@
#include "config.h"
#include "numpy/numpyconfig.h"
+#include "numpy/npy_cpu.h"
/*
* largest alignment the copy loops might require
@@ -13,7 +14,11 @@
* amd64 is not harmed much by the bloat as the system provides 16 byte
* alignment by default.
*/
+#if (defined NPY_CPU_X86 || defined _WIN32)
+#define NPY_MAX_COPY_ALIGNMENT 8
+#else
#define NPY_MAX_COPY_ALIGNMENT 16
+#endif
/* blacklist */
diff --git a/numpy/core/src/private/npy_partition.h.src b/numpy/core/src/private/npy_partition.h.src
index fd79068f7..07aecd4f8 100644
--- a/numpy/core/src/private/npy_partition.h.src
+++ b/numpy/core/src/private/npy_partition.h.src
@@ -55,22 +55,6 @@ NPY_VISIBILITY_HIDDEN int aintroselect_@suff@(@type@ *v, npy_intp* tosort, npy_i
/**end repeat**/
-NPY_VISIBILITY_HIDDEN int introselect_string(npy_char *vec, npy_intp cnt,
- npy_intp kth, PyArrayObject *arr);
-NPY_VISIBILITY_HIDDEN int aintroselect_string(npy_char *vec, npy_intp *ind,
- npy_intp cnt, npy_intp kth,
- void *null);
-
-
-NPY_VISIBILITY_HIDDEN int introselect_unicode(npy_ucs4 *vec, npy_intp cnt,
- npy_intp kth, PyArrayObject *arr);
-NPY_VISIBILITY_HIDDEN int aintroselect_unicode(npy_ucs4 *vec, npy_intp *ind,
- npy_intp cnt, npy_intp kth,
- void *null);
-
-NPY_VISIBILITY_HIDDEN int npy_introselect(void *base, size_t num, size_t size,
- size_t kth, npy_comparator cmp);
-
typedef struct {
enum NPY_TYPES typenum;
PyArray_PartitionFunc * part[NPY_NSELECTS];
diff --git a/numpy/core/src/private/npy_sort.h b/numpy/core/src/private/npy_sort.h
index 46825a0c5..85630b2df 100644
--- a/numpy/core/src/private/npy_sort.h
+++ b/numpy/core/src/private/npy_sort.h
@@ -9,7 +9,6 @@
#define NPY_ENOMEM 1
#define NPY_ECOMP 2
-typedef int (*npy_comparator)(const void *, const void *);
int quicksort_bool(npy_bool *vec, npy_intp cnt, void *null);
int heapsort_bool(npy_bool *vec, npy_intp cnt, void *null);
@@ -187,8 +186,11 @@ int aheapsort_timedelta(npy_timedelta *vec, npy_intp *ind, npy_intp cnt, void *n
int amergesort_timedelta(npy_timedelta *vec, npy_intp *ind, npy_intp cnt, void *null);
-int npy_quicksort(void *base, size_t num, size_t size, npy_comparator cmp);
-int npy_heapsort(void *base, size_t num, size_t size, npy_comparator cmp);
-int npy_mergesort(void *base, size_t num, size_t size, npy_comparator cmp);
+int npy_quicksort(char *vec, npy_intp cnt, PyArrayObject *arr);
+int npy_heapsort(char *vec, npy_intp cnt, PyArrayObject *arr);
+int npy_mergesort(char *vec, npy_intp cnt, PyArrayObject *arr);
+int npy_aquicksort(char *vec, npy_intp *ind, npy_intp cnt, PyArrayObject *arr);
+int npy_aheapsort(char *vec, npy_intp *ind, npy_intp cnt, PyArrayObject *arr);
+int npy_amergesort(char *vec, npy_intp *ind, npy_intp cnt, PyArrayObject *arr);
#endif
diff --git a/numpy/core/src/private/ufunc_override.h b/numpy/core/src/private/ufunc_override.h
index c47c46a66..c3f9f601e 100644
--- a/numpy/core/src/private/ufunc_override.h
+++ b/numpy/core/src/private/ufunc_override.h
@@ -13,7 +13,14 @@ normalize___call___args(PyUFuncObject *ufunc, PyObject *args,
{
/* ufunc.__call__(*args, **kwds) */
int nargs = PyTuple_GET_SIZE(args);
- PyObject *obj;
+ PyObject *obj = PyDict_GetItemString(*normal_kwds, "sig");
+
+ /* ufuncs accept 'sig' or 'signature' normalize to 'signature' */
+ if (obj != NULL) {
+ Py_INCREF(obj);
+ PyDict_SetItemString(*normal_kwds, "signature", obj);
+ PyDict_DelItemString(*normal_kwds, "sig");
+ }
*normal_args = PyTuple_GetSlice(args, 0, nin);
diff --git a/numpy/core/src/umath/scalarmath.c.src b/numpy/core/src/umath/scalarmath.c.src
index e2c8137b3..e4fc617a5 100644
--- a/numpy/core/src/umath/scalarmath.c.src
+++ b/numpy/core/src/umath/scalarmath.c.src
@@ -485,10 +485,8 @@ static void
/**begin repeat
- * #name = byte, short, int, long, longlong,
- * float, double, longdouble#
- * #type = npy_byte, npy_short, npy_int, npy_long, npy_longlong,
- * npy_float, npy_double, npy_longdouble#
+ * #name = byte, short, int, long, longlong#
+ * #type = npy_byte, npy_short, npy_int, npy_long, npy_longlong#
*/
static void
@name@_ctype_absolute(@type@ a, @type@ *out)
@@ -497,6 +495,18 @@ static void
}
/**end repeat**/
+/**begin repeat
+ * #name = float, double, longdouble#
+ * #type = npy_float, npy_double, npy_longdouble#
+ * #c = f,,l#
+ */
+static void
+@name@_ctype_absolute(@type@ a, @type@ *out)
+{
+ *out = npy_fabs@c@(a);
+}
+/**end repeat**/
+
static void
half_ctype_absolute(npy_half a, npy_half *out)
{
diff --git a/numpy/core/src/umath/simd.inc.src b/numpy/core/src/umath/simd.inc.src
index 5b111eb0d..55a638d6c 100644
--- a/numpy/core/src/umath/simd.inc.src
+++ b/numpy/core/src/umath/simd.inc.src
@@ -27,6 +27,19 @@
#include <stdlib.h>
#include <string.h> /* for memcpy */
+/* Figure out the right abs function for pointer addresses */
+static NPY_INLINE npy_intp
+abs_intp(npy_intp x)
+{
+#if (NPY_SIZEOF_INTP <= NPY_SIZEOF_INT)
+ return abs(x);
+#elif (NPY_SIZEOF_INTP <= NPY_SIZEOF_LONG)
+ return labs(x);
+#else
+ return llabs(x);
+#endif
+}
+
/*
* stride is equal to element size and input and destination are equal or
* don't overlap within one register
@@ -34,10 +47,11 @@
#define IS_BLOCKABLE_UNARY(esize, vsize) \
(steps[0] == (esize) && steps[0] == steps[1] && \
(npy_is_aligned(args[0], esize) && npy_is_aligned(args[1], esize)) && \
- ((abs(args[1] - args[0]) >= (vsize)) || ((abs(args[1] - args[0]) == 0))))
+ ((abs_intp(args[1] - args[0]) >= (vsize)) || \
+ ((abs_intp(args[1] - args[0]) == 0))))
#define IS_BLOCKABLE_REDUCE(esize, vsize) \
- (steps[1] == (esize) && abs(args[1] - args[0]) >= (vsize) && \
+ (steps[1] == (esize) && abs_intp(args[1] - args[0]) >= (vsize) && \
npy_is_aligned(args[1], (esize)) && \
npy_is_aligned(args[0], (esize)))
@@ -45,20 +59,26 @@
(steps[0] == steps[1] && steps[1] == steps[2] && steps[2] == (esize) && \
npy_is_aligned(args[2], (esize)) && npy_is_aligned(args[1], (esize)) && \
npy_is_aligned(args[0], (esize)) && \
- (abs(args[2] - args[0]) >= (vsize) || abs(args[2] - args[0]) == 0) && \
- (abs(args[2] - args[1]) >= (vsize) || abs(args[2] - args[1]) >= 0))
+ (abs_intp(args[2] - args[0]) >= (vsize) || \
+ abs_intp(args[2] - args[0]) == 0) && \
+ (abs_intp(args[2] - args[1]) >= (vsize) || \
+ abs_intp(args[2] - args[1]) >= 0))
#define IS_BLOCKABLE_BINARY_SCALAR1(esize, vsize) \
(steps[0] == 0 && steps[1] == steps[2] && steps[2] == (esize) && \
npy_is_aligned(args[2], (esize)) && npy_is_aligned(args[1], (esize)) && \
- ((abs(args[2] - args[1]) >= (vsize)) || (abs(args[2] - args[1]) == 0)) && \
- abs(args[2] - args[0]) >= (esize))
+ ((abs_intp(args[2] - args[1]) >= (vsize)) || \
+ (abs_intp(args[2] - args[1]) == 0)) && \
+ abs_intp(args[2] - args[0]) >= (esize))
#define IS_BLOCKABLE_BINARY_SCALAR2(esize, vsize) \
(steps[1] == 0 && steps[0] == steps[2] && steps[2] == (esize) && \
npy_is_aligned(args[2], (esize)) && npy_is_aligned(args[0], (esize)) && \
- ((abs(args[2] - args[0]) >= (vsize)) || (abs(args[2] - args[0]) == 0)) && \
- abs(args[2] - args[1]) >= (esize))
+ ((abs_intp(args[2] - args[0]) >= (vsize)) || \
+ (abs_intp(args[2] - args[0]) == 0)) && \
+ abs_intp(args[2] - args[1]) >= (esize))
+
+#undef abs_intp
#define IS_BLOCKABLE_BINARY_BOOL(esize, vsize) \
(steps[0] == (esize) && steps[0] == steps[1] && steps[2] == (1) && \
diff --git a/numpy/core/src/umath/ufunc_object.c b/numpy/core/src/umath/ufunc_object.c
index dc5065f14..9f89d71c2 100644
--- a/numpy/core/src/umath/ufunc_object.c
+++ b/numpy/core/src/umath/ufunc_object.c
@@ -750,6 +750,35 @@ fail:
return -1;
}
+/*
+ * Checks if 'obj' is a valid output array for a ufunc, i.e. it is
+ * either None or a writeable array, increments its reference count
+ * and stores a pointer to it in 'store'. Returns 0 on success, sets
+ * an exception and returns -1 on failure.
+ */
+static int
+_set_out_array(PyObject *obj, PyArrayObject **store)
+{
+ if (obj == Py_None) {
+ /* Translate None to NULL */
+ return 0;
+ }
+ if PyArray_Check(obj) {
+ /* If it's an array, store it */
+ if (PyArray_FailUnlessWriteable((PyArrayObject *)obj,
+ "output array") < 0) {
+ return -1;
+ }
+ Py_INCREF(obj);
+ *store = (PyArrayObject *)obj;
+
+ return 0;
+ }
+ PyErr_SetString(PyExc_TypeError, "return arrays must be of ArrayType");
+
+ return -1;
+}
+
/********* GENERIC UFUNC USING ITERATOR *********/
/*
@@ -759,23 +788,27 @@ fail:
* non-zero references in out_op. This
* function does not do its own clean-up.
*/
-static int get_ufunc_arguments(PyUFuncObject *ufunc,
- PyObject *args, PyObject *kwds,
- PyArrayObject **out_op,
- NPY_ORDER *out_order,
- NPY_CASTING *out_casting,
- PyObject **out_extobj,
- PyObject **out_typetup,
- int *out_subok,
- PyArrayObject **out_wheremask)
+static int
+get_ufunc_arguments(PyUFuncObject *ufunc,
+ PyObject *args, PyObject *kwds,
+ PyArrayObject **out_op,
+ NPY_ORDER *out_order,
+ NPY_CASTING *out_casting,
+ PyObject **out_extobj,
+ PyObject **out_typetup,
+ int *out_subok,
+ PyArrayObject **out_wheremask)
{
- int i, nargs, nin = ufunc->nin;
+ int i, nargs;
+ int nin = ufunc->nin;
+ int nout = ufunc->nout;
PyObject *obj, *context;
PyObject *str_key_obj = NULL;
const char *ufunc_name;
int type_num;
int any_flexible = 0, any_object = 0, any_flexible_userloops = 0;
+ int has_sig = 0;
ufunc_name = ufunc->name ? ufunc->name : "<unnamed ufunc>";
@@ -878,23 +911,7 @@ static int get_ufunc_arguments(PyUFuncObject *ufunc,
/* Get positional output arguments */
for (i = nin; i < nargs; ++i) {
obj = PyTuple_GET_ITEM(args, i);
- /* Translate None to NULL */
- if (obj == Py_None) {
- continue;
- }
- /* If it's an array, can use it */
- if (PyArray_Check(obj)) {
- if (PyArray_FailUnlessWriteable((PyArrayObject *)obj,
- "output array") < 0) {
- return -1;
- }
- Py_INCREF(obj);
- out_op[i] = (PyArrayObject *)obj;
- }
- else {
- PyErr_SetString(PyExc_TypeError,
- "return arrays must be "
- "of ArrayType");
+ if (_set_out_array(obj, out_op + i) < 0) {
return -1;
}
}
@@ -929,7 +946,7 @@ static int get_ufunc_arguments(PyUFuncObject *ufunc,
switch (str[0]) {
case 'c':
/* Provides a policy for allowed casting */
- if (strncmp(str,"casting",7) == 0) {
+ if (strcmp(str, "casting") == 0) {
if (!PyArray_CastingConverter(value, out_casting)) {
goto fail;
}
@@ -938,7 +955,7 @@ static int get_ufunc_arguments(PyUFuncObject *ufunc,
break;
case 'd':
/* Another way to specify 'sig' */
- if (strncmp(str,"dtype",5) == 0) {
+ if (strcmp(str, "dtype") == 0) {
/* Allow this parameter to be None */
PyArray_Descr *dtype;
if (!PyArray_DescrConverter2(value, &dtype)) {
@@ -960,40 +977,79 @@ static int get_ufunc_arguments(PyUFuncObject *ufunc,
* Overrides the global parameters buffer size,
* error mask, and error object
*/
- if (strncmp(str,"extobj",6) == 0) {
+ if (strcmp(str, "extobj") == 0) {
*out_extobj = value;
bad_arg = 0;
}
break;
case 'o':
- /* First output may be specified as a keyword parameter */
- if (strncmp(str,"out",3) == 0) {
- if (out_op[nin] != NULL) {
+ /*
+ * Output arrays may be specified as a keyword argument,
+ * either as a single array or None for single output
+ * ufuncs, or as a tuple of arrays and Nones.
+ */
+ if (strcmp(str, "out") == 0) {
+ if (nargs > nin) {
PyErr_SetString(PyExc_ValueError,
"cannot specify 'out' as both a "
"positional and keyword argument");
goto fail;
}
-
- if (PyArray_Check(value)) {
- const char *name = "output array";
- PyArrayObject *value_arr = (PyArrayObject *)value;
- if (PyArray_FailUnlessWriteable(value_arr, name) < 0) {
+ if (PyTuple_Check(value)) {
+ if (PyTuple_GET_SIZE(value) != nout) {
+ PyErr_SetString(PyExc_ValueError,
+ "The 'out' tuple must have exactly "
+ "one entry per ufunc output");
+ goto fail;
+ }
+ /* 'out' must be a tuple of arrays and Nones */
+ for(i = 0; i < nout; ++i) {
+ PyObject *val = PyTuple_GET_ITEM(value, i);
+ if (_set_out_array(val, out_op+nin+i) < 0) {
+ goto fail;
+ }
+ }
+ }
+ else if (nout == 1) {
+ /* Can be an array if it only has one output */
+ if (_set_out_array(value, out_op + nin) < 0) {
goto fail;
}
- Py_INCREF(value);
- out_op[nin] = (PyArrayObject *)value;
}
else {
- PyErr_SetString(PyExc_TypeError,
- "return arrays must be "
- "of ArrayType");
- goto fail;
+ /*
+ * If the deprecated behavior is ever removed,
+ * keep only the else branch of this if-else
+ */
+ if (PyArray_Check(value) || value == Py_None) {
+ if (DEPRECATE("passing a single array to the "
+ "'out' keyword argument of a "
+ "ufunc with\n"
+ "more than one output will "
+ "result in an error in the "
+ "future") < 0) {
+ /* The future error message */
+ PyErr_SetString(PyExc_TypeError,
+ "'out' must be a tuple of arrays");
+ goto fail;
+ }
+ if (_set_out_array(value, out_op+nin) < 0) {
+ goto fail;
+ }
+ }
+ else {
+ PyErr_SetString(PyExc_TypeError,
+ nout > 1 ? "'out' must be a tuple "
+ "of arrays" :
+ "'out' must be an array or a "
+ "tuple of a single array");
+ goto fail;
+ }
}
bad_arg = 0;
}
/* Allows the default output layout to be overridden */
- else if (strncmp(str,"order",5) == 0) {
+ else if (strcmp(str, "order") == 0) {
if (!PyArray_OrderConverter(value, out_order)) {
goto fail;
}
@@ -1002,7 +1058,13 @@ static int get_ufunc_arguments(PyUFuncObject *ufunc,
break;
case 's':
/* Allows a specific function inner loop to be selected */
- if (strncmp(str,"sig",3) == 0) {
+ if (strcmp(str, "sig") == 0 ||
+ strcmp(str, "signature") == 0) {
+ if (has_sig == 1) {
+ PyErr_SetString(PyExc_ValueError,
+ "cannot specify both 'sig' and 'signature'");
+ goto fail;
+ }
if (*out_typetup != NULL) {
PyErr_SetString(PyExc_RuntimeError,
"cannot specify both 'sig' and 'dtype'");
@@ -1011,8 +1073,9 @@ static int get_ufunc_arguments(PyUFuncObject *ufunc,
*out_typetup = value;
Py_INCREF(value);
bad_arg = 0;
+ has_sig = 1;
}
- else if (strncmp(str,"subok",5) == 0) {
+ else if (strcmp(str, "subok") == 0) {
if (!PyBool_Check(value)) {
PyErr_SetString(PyExc_TypeError,
"'subok' must be a boolean");
@@ -1027,8 +1090,7 @@ static int get_ufunc_arguments(PyUFuncObject *ufunc,
* Provides a boolean array 'where=' mask if
* out_wheremask is supplied.
*/
- if (out_wheremask != NULL &&
- strncmp(str,"where",5) == 0) {
+ if (out_wheremask != NULL && strcmp(str, "where") == 0) {
PyArray_Descr *dtype;
dtype = PyArray_DescrFromType(NPY_BOOL);
if (dtype == NULL) {
@@ -3946,6 +4008,38 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc, PyObject *args,
}
/*
+ * Returns an incref'ed pointer to the proper wrapping object for a
+ * ufunc output argument, given the output argument 'out', and the
+ * input's wrapping function, 'wrap'.
+ */
+static PyObject*
+_get_out_wrap(PyObject *out, PyObject *wrap) {
+ PyObject *owrap;
+
+ if (out == Py_None) {
+ /* Iterator allocated outputs get the input's wrapping */
+ Py_XINCREF(wrap);
+ return wrap;
+ }
+ if (PyArray_CheckExact(out)) {
+ /* None signals to not call any wrapping */
+ Py_RETURN_NONE;
+ }
+ /*
+ * For array subclasses use their __array_wrap__ method, or the
+ * input's wrapping if not available
+ */
+ owrap = PyObject_GetAttr(out, npy_um_str_array_wrap);
+ if (owrap == NULL || !PyCallable_Check(owrap)) {
+ Py_XDECREF(owrap);
+ owrap = wrap;
+ Py_XINCREF(wrap);
+ PyErr_Clear();
+ }
+ return owrap;
+}
+
+/*
* This function analyzes the input arguments
* and determines an appropriate __array_wrap__ function to call
* for the outputs.
@@ -3966,7 +4060,7 @@ _find_array_wrap(PyObject *args, PyObject *kwds,
PyObject **output_wrap, int nin, int nout)
{
Py_ssize_t nargs;
- int i;
+ int i, idx_offset, start_idx;
int np = 0;
PyObject *with_wrap[NPY_MAXARGS], *wraps[NPY_MAXARGS];
PyObject *obj, *wrap = NULL;
@@ -4043,45 +4137,45 @@ _find_array_wrap(PyObject *args, PyObject *kwds,
*/
handle_out:
nargs = PyTuple_GET_SIZE(args);
- for (i = 0; i < nout; i++) {
- int j = nin + i;
- int incref = 1;
- output_wrap[i] = wrap;
- obj = NULL;
- if (j < nargs) {
- obj = PyTuple_GET_ITEM(args, j);
- /* Output argument one may also be in a keyword argument */
- if (i == 0 && obj == Py_None && kwds != NULL) {
- obj = PyDict_GetItem(kwds, npy_um_str_out);
- }
+ /* Default is using positional arguments */
+ obj = args;
+ idx_offset = nin;
+ start_idx = 0;
+ if (nin == nargs && kwds != NULL) {
+ /* There may be a keyword argument we can use instead */
+ obj = PyDict_GetItem(kwds, npy_um_str_out);
+ if (obj == NULL) {
+ /* No, go back to positional (even though there aren't any) */
+ obj = args;
}
- /* Output argument one may also be in a keyword argument */
- else if (i == 0 && kwds != NULL) {
- obj = PyDict_GetItem(kwds, npy_um_str_out);
- }
-
- if (obj != Py_None && obj != NULL) {
- if (PyArray_CheckExact(obj)) {
- /* None signals to not call any wrapping */
- output_wrap[i] = Py_None;
+ else {
+ idx_offset = 0;
+ if (PyTuple_Check(obj)) {
+ /* If a tuple, must have all nout items */
+ nargs = nout;
}
else {
- PyObject *owrap = PyObject_GetAttr(obj, npy_um_str_array_wrap);
- incref = 0;
- if (!(owrap) || !(PyCallable_Check(owrap))) {
- Py_XDECREF(owrap);
- owrap = wrap;
- incref = 1;
- PyErr_Clear();
- }
- output_wrap[i] = owrap;
+ /* If the kwarg is not a tuple then it is an array (or None) */
+ output_wrap[0] = _get_out_wrap(obj, wrap);
+ start_idx = 1;
+ nargs = 1;
}
}
+ }
- if (incref) {
- Py_XINCREF(output_wrap[i]);
+ for (i = start_idx; i < nout; ++i) {
+ int j = idx_offset + i;
+
+ if (j < nargs) {
+ output_wrap[i] = _get_out_wrap(PyTuple_GET_ITEM(obj, j),
+ wrap);
+ }
+ else {
+ output_wrap[i] = wrap;
+ Py_XINCREF(wrap);
}
}
+
Py_XDECREF(wrap);
return;
}
@@ -4363,6 +4457,14 @@ PyUFunc_FromFuncAndDataAndSignature(PyUFuncGenericFunction *func, void **data,
{
PyUFuncObject *ufunc;
+ if (nin + nout > NPY_MAXARGS) {
+ PyErr_Format(PyExc_ValueError,
+ "Cannot construct a ufunc with more than %d operands "
+ "(requested number were: inputs = %d and outputs = %d)",
+ NPY_MAXARGS, nin, nout);
+ return NULL;
+ }
+
ufunc = PyArray_malloc(sizeof(PyUFuncObject));
if (ufunc == NULL) {
return NULL;
@@ -5007,6 +5109,12 @@ ufunc_at(PyUFuncObject *ufunc, PyObject *args)
return NULL;
}
+ if (ufunc->nout != 1) {
+ PyErr_SetString(PyExc_ValueError,
+ "Only single output ufuncs supported at this time");
+ return NULL;
+ }
+
if (!PyArg_ParseTuple(args, "OO|O", &op1, &idx, &op2)) {
return NULL;
}
@@ -5439,7 +5547,7 @@ ufunc_get_identity(PyUFuncObject *ufunc)
case PyUFunc_Zero:
return PyInt_FromLong(0);
}
- return Py_None;
+ Py_RETURN_NONE;
}
static PyObject *
diff --git a/numpy/core/src/umath/ufunc_type_resolution.c b/numpy/core/src/umath/ufunc_type_resolution.c
index ec28bb9e4..fe2e8cac3 100644
--- a/numpy/core/src/umath/ufunc_type_resolution.c
+++ b/numpy/core/src/umath/ufunc_type_resolution.c
@@ -1594,7 +1594,6 @@ set_ufunc_loop_data_types(PyUFuncObject *self, PyArrayObject **op,
else if (op[i] != NULL &&
PyArray_DESCR(op[i])->type_num == type_nums[i]) {
out_dtypes[i] = ensure_dtype_nbo(PyArray_DESCR(op[i]));
- Py_XINCREF(out_dtypes[i]);
}
/*
* For outputs, copy the dtype from op[0] if the type_num
@@ -1603,7 +1602,6 @@ set_ufunc_loop_data_types(PyUFuncObject *self, PyArrayObject **op,
else if (i >= nin && op[0] != NULL &&
PyArray_DESCR(op[0])->type_num == type_nums[i]) {
out_dtypes[i] = ensure_dtype_nbo(PyArray_DESCR(op[0]));
- Py_XINCREF(out_dtypes[i]);
}
/* Otherwise create a plain descr from the type number */
else {
diff --git a/numpy/core/src/umath/umathmodule.c b/numpy/core/src/umath/umathmodule.c
index d792e8b24..624588410 100644
--- a/numpy/core/src/umath/umathmodule.c
+++ b/numpy/core/src/umath/umathmodule.c
@@ -103,6 +103,13 @@ ufunc_frompyfunc(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *NPY_UNUS
PyErr_SetString(PyExc_TypeError, "function must be callable");
return NULL;
}
+ if (nin + nout > NPY_MAXARGS) {
+ PyErr_Format(PyExc_ValueError,
+ "Cannot construct a ufunc with more than %d operands "
+ "(requested number were: inputs = %d and outputs = %d)",
+ NPY_MAXARGS, nin, nout);
+ return NULL;
+ }
self = PyArray_malloc(sizeof(PyUFuncObject));
if (self == NULL) {
return NULL;
diff --git a/numpy/core/tests/test_dtype.py b/numpy/core/tests/test_dtype.py
index 2621c8696..852660432 100644
--- a/numpy/core/tests/test_dtype.py
+++ b/numpy/core/tests/test_dtype.py
@@ -3,6 +3,7 @@ from __future__ import division, absolute_import, print_function
import sys
import numpy as np
from numpy.testing import *
+from numpy.core.test_rational import rational
def assert_dtype_equal(a, b):
assert_equal(a, b)
@@ -124,6 +125,21 @@ class TestRecord(TestCase):
'titles': ['RRed pixel', 'Blue pixel']})
assert_dtype_not_equal(a, b)
+ def test_mutate(self):
+ # Mutating a dtype should reset the cached hash value
+ a = np.dtype([('yo', np.int)])
+ b = np.dtype([('yo', np.int)])
+ c = np.dtype([('ye', np.int)])
+ assert_dtype_equal(a, b)
+ assert_dtype_not_equal(a, c)
+ a.names = ['ye']
+ assert_dtype_equal(a, c)
+ assert_dtype_not_equal(a, b)
+ state = b.__reduce__()[2]
+ a.__setstate__(state)
+ assert_dtype_equal(a, b)
+ assert_dtype_not_equal(a, c)
+
def test_not_lists(self):
"""Test if an appropriate exception is raised when passing bad values to
the dtype constructor.
@@ -533,5 +549,11 @@ class TestDtypeAttributes(TestCase):
assert_equal(np.dtype(user_def_subcls).name, 'user_def_subcls')
+def test_rational_dtype():
+ # test for bug gh-5719
+ a = np.array([1111], dtype=rational).astype
+ assert_raises(OverflowError, a, 'int8')
+
+
if __name__ == "__main__":
run_module_suite()
diff --git a/numpy/core/tests/test_indexing.py b/numpy/core/tests/test_indexing.py
index e59bb257b..3beef71fb 100644
--- a/numpy/core/tests/test_indexing.py
+++ b/numpy/core/tests/test_indexing.py
@@ -444,7 +444,7 @@ class TestBroadcastedAssignments(TestCase):
# Too large and not only ones.
assert_raises(ValueError, assign, a, s_[...], np.ones((2, 1)))
-
+
with warnings.catch_warnings():
# Will be a ValueError as well.
warnings.simplefilter("error", DeprecationWarning)
@@ -547,6 +547,23 @@ class TestFancyIndexingEquivalence(TestCase):
b[[0], :3] = [[1, (1,2), 3]]
assert_array_equal(a, b[0])
+ # Check that swapping of axes works.
+ # There was a bug that made the later assignment throw a ValueError
+ # do to an incorrectly transposed temporary right hand side (gh-5714)
+ b = b.T
+ b[:3, [0]] = [[1], [(1,2)], [3]]
+ assert_array_equal(a, b[:, 0])
+
+ # Another test for the memory order of the subspace
+ arr = np.ones((3, 4, 5), dtype=object)
+ # Equivalent slicing assignment for comparison
+ cmp_arr = arr.copy()
+ cmp_arr[:1, ...] = [[[1], [2], [3], [4]]]
+ arr[[0], ...] = [[[1], [2], [3], [4]]]
+ assert_array_equal(arr, cmp_arr)
+ arr = arr.copy('F')
+ arr[[0], ...] = [[[1], [2], [3], [4]]]
+ assert_array_equal(arr, cmp_arr)
def test_cast_equivalence(self):
# Yes, normal slicing uses unsafe casting.
diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py
index b0d677052..314adf4d1 100644
--- a/numpy/core/tests/test_multiarray.py
+++ b/numpy/core/tests/test_multiarray.py
@@ -69,6 +69,17 @@ class TestFlags(TestCase):
assert_equal(self.a.flags.aligned, True)
assert_equal(self.a.flags.updateifcopy, False)
+ def test_string_align(self):
+ a = np.zeros(4, dtype=np.dtype('|S4'))
+ assert_(a.flags.aligned)
+ # not power of two are accessed bytewise and thus considered aligned
+ a = np.zeros(5, dtype=np.dtype('|S4'))
+ assert_(a.flags.aligned)
+
+ def test_void_align(self):
+ a = np.zeros(4, dtype=np.dtype([("a", "i4"), ("b", "i4")]))
+ assert_(a.flags.aligned)
+
class TestHash(TestCase):
# see #3793
def test_int(self):
@@ -194,6 +205,14 @@ class TestAttributes(TestCase):
y[...] = 1
assert_equal(x, y)
+ def test_fill_max_uint64(self):
+ x = empty((3, 2, 1), dtype=uint64)
+ y = empty((3, 2, 1), dtype=uint64)
+ value = 2**64 - 1
+ y[...] = value
+ x.fill(value)
+ assert_array_equal(x, y)
+
def test_fill_struct_array(self):
# Filling from a scalar
x = array([(0, 0.0), (1, 1.0)], dtype='i4,f8')
@@ -849,11 +868,22 @@ class TestBool(TestCase):
self.assertEqual(np.count_nonzero(a), builtins.sum(a.tolist()))
class TestMethods(TestCase):
- def test_test_round(self):
- assert_equal(array([1.2, 1.5]).round(), [1, 2])
- assert_equal(array(1.5).round(), 2)
- assert_equal(array([12.2, 15.5]).round(-1), [10, 20])
- assert_equal(array([12.15, 15.51]).round(1), [12.2, 15.5])
+ def test_round(self):
+ def check_round(arr, expected, *round_args):
+ assert_equal(arr.round(*round_args), expected)
+ # With output array
+ out = np.zeros_like(arr)
+ res = arr.round(*round_args, out=out)
+ assert_equal(out, expected)
+ assert_equal(out, res)
+
+ check_round(array([1.2, 1.5]), [1, 2])
+ check_round(array(1.5), 2)
+ check_round(array([12.2, 15.5]), [10, 20], -1)
+ check_round(array([12.15, 15.51]), [12.2, 15.5], 1)
+ # Complex rounding
+ check_round(array([4.5 + 1.5j]), [4 + 2j])
+ check_round(array([12.5 + 15.5j]), [10 + 20j], -1)
def test_transpose(self):
a = array([[1, 2], [3, 4]])
@@ -1021,6 +1051,15 @@ class TestMethods(TestCase):
d.sort()
assert_equal(d, c, "test sort with default axis")
+ # check axis handling for multidimensional empty arrays
+ a = np.array([])
+ a.shape = (3, 2, 1, 0)
+ for axis in range(-a.ndim, a.ndim):
+ msg = 'test empty array sort with axis={0}'.format(axis)
+ assert_equal(np.sort(a, axis=axis), a, msg)
+ msg = 'test empty array sort with axis=None'
+ assert_equal(np.sort(a, axis=None), a.ravel(), msg)
+
def test_copy(self):
def assert_fortran(arr):
assert_(arr.flags.fortran)
@@ -1181,7 +1220,6 @@ class TestMethods(TestCase):
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
-
# check axis handling. This should be the same for all type
# specific argsorts, so we only check it for one type and one kind
a = np.array([[3, 2], [1, 0]])
@@ -1193,6 +1231,18 @@ class TestMethods(TestCase):
# using None is known fail at this point
#assert_equal(a.copy().argsort(axis=None, c)
+ # check axis handling for multidimensional empty arrays
+ a = np.array([])
+ a.shape = (3, 2, 1, 0)
+ for axis in range(-a.ndim, a.ndim):
+ msg = 'test empty array argsort with axis={0}'.format(axis)
+ assert_equal(np.argsort(a, axis=axis),
+ np.zeros_like(a, dtype=np.intp), msg)
+ msg = 'test empty array argsort with axis=None'
+ assert_equal(np.argsort(a, axis=None),
+ np.zeros_like(a.ravel(), dtype=np.intp), msg)
+
+
# check that stable argsorts are stable
r = np.arange(100)
# scalars
@@ -1417,6 +1467,50 @@ class TestMethods(TestCase):
b = a.searchsorted(a, 'r', s)
assert_equal(b, out + 1)
+ def test_argpartition_out_of_range(self):
+ # Test out of range values in kth raise an error, gh-5469
+ d = np.arange(10)
+ assert_raises(ValueError, d.argpartition, 10)
+ assert_raises(ValueError, d.argpartition, -11)
+ # Test also for generic type argpartition, which uses sorting
+ # and used to not bound check kth
+ d_obj = np.arange(10, dtype=object)
+ assert_raises(ValueError, d_obj.argpartition, 10)
+ assert_raises(ValueError, d_obj.argpartition, -11)
+
+ def test_partition_out_of_range(self):
+ # Test out of range values in kth raise an error, gh-5469
+ d = np.arange(10)
+ assert_raises(ValueError, d.partition, 10)
+ assert_raises(ValueError, d.partition, -11)
+ # Test also for generic type partition, which uses sorting
+ # and used to not bound check kth
+ d_obj = np.arange(10, dtype=object)
+ assert_raises(ValueError, d_obj.partition, 10)
+ assert_raises(ValueError, d_obj.partition, -11)
+
+ def test_partition_empty_array(self):
+ # check axis handling for multidimensional empty arrays
+ a = np.array([])
+ a.shape = (3, 2, 1, 0)
+ for axis in range(-a.ndim, a.ndim):
+ msg = 'test empty array partition with axis={0}'.format(axis)
+ assert_equal(np.partition(a, 0, axis=axis), a, msg)
+ msg = 'test empty array partition with axis=None'
+ assert_equal(np.partition(a, 0, axis=None), a.ravel(), msg)
+
+ def test_argpartition_empty_array(self):
+ # check axis handling for multidimensional empty arrays
+ a = np.array([])
+ a.shape = (3, 2, 1, 0)
+ for axis in range(-a.ndim, a.ndim):
+ msg = 'test empty array argpartition with axis={0}'.format(axis)
+ assert_equal(np.partition(a, 0, axis=axis),
+ np.zeros_like(a, dtype=np.intp), msg)
+ msg = 'test empty array argpartition with axis=None'
+ assert_equal(np.partition(a, 0, axis=None),
+ np.zeros_like(a.ravel(), dtype=np.intp), msg)
+
def test_partition(self):
d = np.arange(10)
assert_raises(TypeError, np.partition, d, 2, kind=1)
@@ -1748,6 +1842,12 @@ class TestMethods(TestCase):
assert_array_equal(np.partition(d, kth)[kth], tgt,
err_msg="data: %r\n kth: %r" % (d, kth))
+ def test_argpartition_gh5524(self):
+ # A test for functionality of argpartition on lists.
+ d = [6,7,3,2,9,0]
+ p = np.argpartition(d,1)
+ self.assert_partitioned(np.array(d)[p],[1])
+
def test_flatten(self):
x0 = np.array([[1, 2, 3], [4, 5, 6]], np.int32)
x1 = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]], np.int32)
@@ -2276,6 +2376,22 @@ class TestBinop(object):
assert_equal(obj2.sum(), 42)
assert_(isinstance(obj2, SomeClass2))
+ def test_ufunc_override_normalize_signature(self):
+ # gh-5674
+ class SomeClass(object):
+ def __numpy_ufunc__(self, ufunc, method, i, inputs, **kw):
+ return kw
+
+ a = SomeClass()
+ kw = np.add(a, [1])
+ assert_('sig' not in kw and 'signature' not in kw)
+ kw = np.add(a, [1], sig='ii->i')
+ assert_('sig' not in kw and 'signature' in kw)
+ assert_equal(kw['signature'], 'ii->i')
+ kw = np.add(a, [1], signature='ii->i')
+ assert_('sig' not in kw and 'signature' in kw)
+ assert_equal(kw['signature'], 'ii->i')
+
class TestCAPI(TestCase):
def test_IsPythonScalar(self):
@@ -2544,6 +2660,22 @@ class TestArgmax(TestCase):
d[5942] = "as"
assert_equal(d.argmax(), 5942)
+ def test_np_vs_ndarray(self):
+ # make sure both ndarray.argmax and numpy.argmax support out/axis args
+ a = np.random.normal(size=(2,3))
+
+ #check positional args
+ out1 = zeros(2, dtype=int)
+ out2 = zeros(2, dtype=int)
+ assert_equal(a.argmax(1, out1), np.argmax(a, 1, out2))
+ assert_equal(out1, out2)
+
+ #check keyword args
+ out1 = zeros(3, dtype=int)
+ out2 = zeros(3, dtype=int)
+ assert_equal(a.argmax(out=out1, axis=0), np.argmax(a, out=out2, axis=0))
+ assert_equal(out1, out2)
+
class TestArgmin(TestCase):
@@ -2654,6 +2786,22 @@ class TestArgmin(TestCase):
d[6001] = "0"
assert_equal(d.argmin(), 6001)
+ def test_np_vs_ndarray(self):
+ # make sure both ndarray.argmin and numpy.argmin support out/axis args
+ a = np.random.normal(size=(2,3))
+
+ #check positional args
+ out1 = zeros(2, dtype=int)
+ out2 = ones(2, dtype=int)
+ assert_equal(a.argmin(1, out1), np.argmin(a, 1, out2))
+ assert_equal(out1, out2)
+
+ #check keyword args
+ out1 = zeros(3, dtype=int)
+ out2 = ones(3, dtype=int)
+ assert_equal(a.argmin(out=out1, axis=0), np.argmin(a, out=out2, axis=0))
+ assert_equal(out1, out2)
+
class TestMinMax(TestCase):
def test_scalar(self):
diff --git a/numpy/core/tests/test_regression.py b/numpy/core/tests/test_regression.py
index 431f80534..fa2f52a23 100644
--- a/numpy/core/tests/test_regression.py
+++ b/numpy/core/tests/test_regression.py
@@ -10,6 +10,7 @@ import warnings
import tempfile
from os import path
from io import BytesIO
+from itertools import chain
import numpy as np
from numpy.testing import (
@@ -2110,6 +2111,20 @@ class TestRegression(TestCase):
test_string = np.string_('')
assert_equal(pickle.loads(pickle.dumps(test_string)), test_string)
+ def test_frompyfunc_many_args(self):
+ # gh-5672
+
+ def passer(*args):
+ pass
+
+ assert_raises(ValueError, np.frompyfunc, passer, 32, 1)
+
+ def test_repeat_broadcasting(self):
+ # gh-5743
+ a = np.arange(60).reshape(3, 4, 5)
+ for axis in chain(range(-a.ndim, a.ndim), [None]):
+ assert_equal(a.repeat(2, axis=axis), a.repeat([2], axis=axis))
+
if __name__ == "__main__":
run_module_suite()
diff --git a/numpy/core/tests/test_scalarmath.py b/numpy/core/tests/test_scalarmath.py
index 3ba3beff9..8b6816958 100644
--- a/numpy/core/tests/test_scalarmath.py
+++ b/numpy/core/tests/test_scalarmath.py
@@ -11,6 +11,9 @@ types = [np.bool_, np.byte, np.ubyte, np.short, np.ushort, np.intc, np.uintc,
np.single, np.double, np.longdouble, np.csingle,
np.cdouble, np.clongdouble]
+floating_types = np.floating.__subclasses__()
+
+
# This compares scalarmath against ufuncs.
class TestTypes(TestCase):
@@ -284,5 +287,26 @@ class TestSizeOf(TestCase):
assert_raises(TypeError, d.__sizeof__, "a")
+class TestAbs(TestCase):
+
+ def _test_abs_func(self, absfunc):
+ for tp in floating_types:
+ x = tp(-1.5)
+ assert_equal(absfunc(x), 1.5)
+ x = tp(0.0)
+ res = absfunc(x)
+ # assert_equal() checks zero signedness
+ assert_equal(res, 0.0)
+ x = tp(-0.0)
+ res = absfunc(x)
+ assert_equal(res, 0.0)
+
+ def test_builtin_abs(self):
+ self._test_abs_func(abs)
+
+ def test_numpy_abs(self):
+ self._test_abs_func(np.abs)
+
+
if __name__ == "__main__":
run_module_suite()
diff --git a/numpy/core/tests/test_ufunc.py b/numpy/core/tests/test_ufunc.py
index a285d5334..699a1b2ea 100644
--- a/numpy/core/tests/test_ufunc.py
+++ b/numpy/core/tests/test_ufunc.py
@@ -7,7 +7,30 @@ from numpy.testing import *
import numpy.core.umath_tests as umt
import numpy.core.operand_flag_tests as opflag_tests
from numpy.compat import asbytes
-from numpy.core.test_rational import *
+from numpy.core.test_rational import rational, test_add, test_add_rationals
+
+
+class TestUfuncKwargs(TestCase):
+ def test_kwarg_exact(self):
+ assert_raises(TypeError, np.add, 1, 2, castingx='safe')
+ assert_raises(TypeError, np.add, 1, 2, dtypex=np.int)
+ assert_raises(TypeError, np.add, 1, 2, extobjx=[4096])
+ assert_raises(TypeError, np.add, 1, 2, outx=None)
+ assert_raises(TypeError, np.add, 1, 2, sigx='ii->i')
+ assert_raises(TypeError, np.add, 1, 2, signaturex='ii->i')
+ assert_raises(TypeError, np.add, 1, 2, subokx=False)
+ assert_raises(TypeError, np.add, 1, 2, wherex=[True])
+
+ def test_sig_signature(self):
+ assert_raises(ValueError, np.add, 1, 2, sig='ii->i',
+ signature='ii->i')
+
+ def test_sig_dtype(self):
+ assert_raises(RuntimeError, np.add, 1, 2, sig='ii->i',
+ dtype=np.int)
+ assert_raises(RuntimeError, np.add, 1, 2, signature='ii->i',
+ dtype=np.int)
+
class TestUfunc(TestCase):
def test_pickle(self):
@@ -1072,6 +1095,9 @@ class TestUfunc(TestCase):
self.assertRaises(TypeError, np.add.at, values, [0, 1], 1)
assert_array_equal(values, np.array(['a', 1], dtype=np.object))
+ # Test multiple output ufuncs raise error, gh-5665
+ assert_raises(ValueError, np.modf.at, np.arange(10), [1])
+
def test_reduce_arguments(self):
f = np.add.reduce
d = np.ones((5,2), dtype=int)
diff --git a/numpy/core/tests/test_umath.py b/numpy/core/tests/test_umath.py
index 092953872..e8eee8090 100644
--- a/numpy/core/tests/test_umath.py
+++ b/numpy/core/tests/test_umath.py
@@ -36,35 +36,137 @@ class TestConstants(TestCase):
def test_euler_gamma(self):
assert_allclose(ncu.euler_gamma, 0.5772156649015329, 1e-15)
+
class TestOut(TestCase):
def test_out_subok(self):
- for b in (True, False):
- aout = np.array(0.5)
-
- r = np.add(aout, 2, out=aout)
- assert_(r is aout)
- assert_array_equal(r, aout)
-
- r = np.add(aout, 2, out=aout, subok=b)
- assert_(r is aout)
- assert_array_equal(r, aout)
-
- r = np.add(aout, 2, aout, subok=False)
- assert_(r is aout)
- assert_array_equal(r, aout)
-
- d = np.ones(5)
- o1 = np.zeros(5)
- o2 = np.zeros(5, dtype=np.int32)
- r1, r2 = np.frexp(d, o1, o2, subok=b)
+ for subok in (True, False):
+ a = np.array(0.5)
+ o = np.empty(())
+
+ r = np.add(a, 2, o, subok=subok)
+ assert_(r is o)
+ r = np.add(a, 2, out=o, subok=subok)
+ assert_(r is o)
+ r = np.add(a, 2, out=(o,), subok=subok)
+ assert_(r is o)
+
+ d = np.array(5.7)
+ o1 = np.empty(())
+ o2 = np.empty((), dtype=np.int32)
+
+ r1, r2 = np.frexp(d, o1, None, subok=subok)
+ assert_(r1 is o1)
+ r1, r2 = np.frexp(d, None, o2, subok=subok)
+ assert_(r2 is o2)
+ r1, r2 = np.frexp(d, o1, o2, subok=subok)
assert_(r1 is o1)
- assert_array_equal(r1, o1)
assert_(r2 is o2)
- assert_array_equal(r2, o2)
- r1, r2 = np.frexp(d, out=o1, subok=b)
+ r1, r2 = np.frexp(d, out=(o1, None), subok=subok)
assert_(r1 is o1)
- assert_array_equal(r1, o1)
+ r1, r2 = np.frexp(d, out=(None, o2), subok=subok)
+ assert_(r2 is o2)
+ r1, r2 = np.frexp(d, out=(o1, o2), subok=subok)
+ assert_(r1 is o1)
+ assert_(r2 is o2)
+
+ with warnings.catch_warnings(record=True) as w:
+ warnings.filterwarnings('always', '', DeprecationWarning)
+ r1, r2 = np.frexp(d, out=o1, subok=subok)
+ assert_(r1 is o1)
+ assert_(w[0].category is DeprecationWarning)
+
+ assert_raises(ValueError, np.add, a, 2, o, o, subok=subok)
+ assert_raises(ValueError, np.add, a, 2, o, out=o, subok=subok)
+ assert_raises(ValueError, np.add, a, 2, None, out=o, subok=subok)
+ assert_raises(ValueError, np.add, a, 2, out=(o, o), subok=subok)
+ assert_raises(ValueError, np.add, a, 2, out=(), subok=subok)
+ assert_raises(TypeError, np.add, a, 2, [], subok=subok)
+ assert_raises(TypeError, np.add, a, 2, out=[], subok=subok)
+ assert_raises(TypeError, np.add, a, 2, out=([],), subok=subok)
+ o.flags.writeable = False
+ assert_raises(ValueError, np.add, a, 2, o, subok=subok)
+ assert_raises(ValueError, np.add, a, 2, out=o, subok=subok)
+ assert_raises(ValueError, np.add, a, 2, out=(o,), subok=subok)
+
+
+ def test_out_wrap_subok(self):
+ class ArrayWrap(np.ndarray):
+ __array_priority__ = 10
+ def __new__(cls, arr):
+ return np.asarray(arr).view(cls).copy()
+ def __array_wrap__(self, arr, context):
+ return arr.view(type(self))
+
+ for subok in (True, False):
+ a = ArrayWrap([0.5])
+
+ r = np.add(a, 2, subok=subok)
+ if subok:
+ assert_(isinstance(r, ArrayWrap))
+ else:
+ assert_(type(r) == np.ndarray)
+
+ r = np.add(a, 2, None, subok=subok)
+ if subok:
+ assert_(isinstance(r, ArrayWrap))
+ else:
+ assert_(type(r) == np.ndarray)
+
+ r = np.add(a, 2, out=None, subok=subok)
+ if subok:
+ assert_(isinstance(r, ArrayWrap))
+ else:
+ assert_(type(r) == np.ndarray)
+
+ r = np.add(a, 2, out=(None,), subok=subok)
+ if subok:
+ assert_(isinstance(r, ArrayWrap))
+ else:
+ assert_(type(r) == np.ndarray)
+
+ d = ArrayWrap([5.7])
+ o1 = np.empty((1,))
+ o2 = np.empty((1,), dtype=np.int32)
+
+ r1, r2 = np.frexp(d, o1, subok=subok)
+ if subok:
+ assert_(isinstance(r2, ArrayWrap))
+ else:
+ assert_(type(r2) == np.ndarray)
+
+ r1, r2 = np.frexp(d, o1, None, subok=subok)
+ if subok:
+ assert_(isinstance(r2, ArrayWrap))
+ else:
+ assert_(type(r2) == np.ndarray)
+
+ r1, r2 = np.frexp(d, None, o2, subok=subok)
+ if subok:
+ assert_(isinstance(r1, ArrayWrap))
+ else:
+ assert_(type(r1) == np.ndarray)
+
+ r1, r2 = np.frexp(d, out=(o1, None), subok=subok)
+ if subok:
+ assert_(isinstance(r2, ArrayWrap))
+ else:
+ assert_(type(r2) == np.ndarray)
+
+ r1, r2 = np.frexp(d, out=(None, o2), subok=subok)
+ if subok:
+ assert_(isinstance(r1, ArrayWrap))
+ else:
+ assert_(type(r1) == np.ndarray)
+
+ with warnings.catch_warnings(record=True) as w:
+ warnings.filterwarnings('always', '', DeprecationWarning)
+ r1, r2 = np.frexp(d, out=o1, subok=subok)
+ if subok:
+ assert_(isinstance(r2, ArrayWrap))
+ else:
+ assert_(type(r2) == np.ndarray)
+ assert_(w[0].category is DeprecationWarning)
class TestDivision(TestCase):
diff --git a/numpy/distutils/__init__.py b/numpy/distutils/__init__.py
index b43e08b05..c16b10ae5 100644
--- a/numpy/distutils/__init__.py
+++ b/numpy/distutils/__init__.py
@@ -13,7 +13,7 @@ if sys.version_info[0] < 3:
from .npy_pkg_config import *
try:
- import __config__
+ from . import __config__
_INSTALLED = True
except ImportError:
_INSTALLED = False
diff --git a/numpy/distutils/ccompiler.py b/numpy/distutils/ccompiler.py
index d7fe702a6..1148c58c1 100644
--- a/numpy/distutils/ccompiler.py
+++ b/numpy/distutils/ccompiler.py
@@ -509,7 +509,9 @@ def CCompiler_cxx_compiler(self):
The C++ compiler, as a `CCompiler` instance.
"""
- if self.compiler_type=='msvc': return self
+ if self.compiler_type in ('msvc', 'intelw', 'intelemw'):
+ return self
+
cxx = copy(self)
cxx.compiler_so = [cxx.compiler_cxx[0]] + cxx.compiler_so[1:]
if sys.platform.startswith('aix') and 'ld_so_aix' in cxx.linker_so[0]:
@@ -525,15 +527,21 @@ replace_method(CCompiler, 'cxx_compiler', CCompiler_cxx_compiler)
compiler_class['intel'] = ('intelccompiler', 'IntelCCompiler',
"Intel C Compiler for 32-bit applications")
compiler_class['intele'] = ('intelccompiler', 'IntelItaniumCCompiler',
- "Intel C Itanium Compiler for Itanium-based applications")
+ "Intel C Itanium Compiler for Itanium-based applications")
compiler_class['intelem'] = ('intelccompiler', 'IntelEM64TCCompiler',
"Intel C Compiler for 64-bit applications")
+compiler_class['intelw'] = ('intelccompiler', 'IntelCCompilerW',
+ "Intel C Compiler for 32-bit applications on Windows")
+compiler_class['intelemw'] = ('intelem64tccompiler', 'IntelEM64TCCompilerW',
+ "Intel C Compiler for 64-bit applications on Windows")
compiler_class['pathcc'] = ('pathccompiler', 'PathScaleCCompiler',
"PathScale Compiler for SiCortex-based applications")
ccompiler._default_compilers += (('linux.*', 'intel'),
('linux.*', 'intele'),
('linux.*', 'intelem'),
- ('linux.*', 'pathcc'))
+ ('linux.*', 'pathcc'),
+ ('nt', 'intelw'),
+ ('nt', 'intelemw'))
if sys.platform == 'win32':
compiler_class['mingw32'] = ('mingw32ccompiler', 'Mingw32CCompiler',
diff --git a/numpy/distutils/fcompiler/gnu.py b/numpy/distutils/fcompiler/gnu.py
index ccd5e8d48..f568135c0 100644
--- a/numpy/distutils/fcompiler/gnu.py
+++ b/numpy/distutils/fcompiler/gnu.py
@@ -8,7 +8,6 @@ import platform
import tempfile
from subprocess import Popen, PIPE, STDOUT
-from numpy.distutils.cpuinfo import cpu
from numpy.distutils.fcompiler import FCompiler
from numpy.distutils.exec_command import exec_command
from numpy.distutils.misc_util import msvc_runtime_library
@@ -21,6 +20,8 @@ TARGET_R = re.compile("Target: ([a-zA-Z0-9_\-]*)")
# XXX: handle cross compilation
def is_win64():
return sys.platform == "win32" and platform.architecture()[0] == "64bit"
+def is_win32():
+ return sys.platform == "win32" and platform.architecture()[0] == "32bit"
if is_win64():
#_EXTRAFLAGS = ["-fno-leading-underscore"]
@@ -35,22 +36,45 @@ class GnuFCompiler(FCompiler):
def gnu_version_match(self, version_string):
"""Handle the different versions of GNU fortran compilers"""
- # Try to find a valid version string
- m = re.search(r'([0-9.]+)', version_string)
- if m:
- # g77 provides a longer version string that starts with GNU
- # Fortran
- if version_string.startswith('GNU Fortran'):
- return ('g77', m.group(1))
-
- # gfortran only outputs a version string such as #.#.#, so check
- # if the match is at the start of the string
- elif m.start() == 0:
+ # Strip warning(s) that may be emitted by gfortran
+ while version_string.startswith('gfortran: warning'):
+ version_string = version_string[version_string.find('\n')+1:]
+
+ # Gfortran versions from after 2010 will output a simple string
+ # (usually "x.y", "x.y.z" or "x.y.z-q") for ``-dumpversion``; older
+ # gfortrans may still return long version strings (``-dumpversion`` was
+ # an alias for ``--version``)
+ if len(version_string) <= 20:
+ # Try to find a valid version string
+ m = re.search(r'([0-9.]+)', version_string)
+ if m:
+ # g77 provides a longer version string that starts with GNU
+ # Fortran
+ if version_string.startswith('GNU Fortran'):
+ return ('g77', m.group(1))
+
+ # gfortran only outputs a version string such as #.#.#, so check
+ # if the match is at the start of the string
+ elif m.start() == 0:
+ return ('gfortran', m.group(1))
+ else:
+ # Output probably from --version, try harder:
+ m = re.search(r'GNU Fortran\s+95.*?([0-9-.]+)', version_string)
+ if m:
return ('gfortran', m.group(1))
+ m = re.search(r'GNU Fortran.*?\-?([0-9-.]+)', version_string)
+ if m:
+ v = m.group(1)
+ if v.startswith('0') or v.startswith('2') or v.startswith('3'):
+ # the '0' is for early g77's
+ return ('g77', v)
+ else:
+ # at some point in the 4.x series, the ' 95' was dropped
+ # from the version string
+ return ('gfortran', v)
- # If these checks fail, then raise an error to make the problem easy
- # to find.
- err = 'A valid Fortran verison was not found in this string:\n'
+ # If still nothing, raise an error to make the problem easy to find.
+ err = 'A valid Fortran version was not found in this string:\n'
raise ValueError(err + version_string)
def version_match(self, version_string):
@@ -59,19 +83,11 @@ class GnuFCompiler(FCompiler):
return None
return v[1]
- # 'g77 --version' results
- # SunOS: GNU Fortran (GCC 3.2) 3.2 20020814 (release)
- # Debian: GNU Fortran (GCC) 3.3.3 20040110 (prerelease) (Debian)
- # GNU Fortran (GCC) 3.3.3 (Debian 20040401)
- # GNU Fortran 0.5.25 20010319 (prerelease)
- # Redhat: GNU Fortran (GCC 3.2.2 20030222 (Red Hat Linux 3.2.2-5)) 3.2.2 20030222 (Red Hat Linux 3.2.2-5)
- # GNU Fortran (GCC) 3.4.2 (mingw-special)
-
possible_executables = ['g77', 'f77']
executables = {
'version_cmd' : [None, "-dumpversion"],
'compiler_f77' : [None, "-g", "-Wall", "-fno-second-underscore"],
- 'compiler_f90' : None, # Use --fcompiler=gnu95 for f90 codes
+ 'compiler_f90' : None, # Use --fcompiler=gnu95 for f90 codes
'compiler_fix' : None,
'linker_so' : [None, "-g", "-Wall"],
'archiver' : ["ar", "-cr"],
@@ -92,19 +108,11 @@ class GnuFCompiler(FCompiler):
executables[key].append('-mno-cygwin')
g2c = 'g2c'
-
suggested_f90_compiler = 'gnu95'
- #def get_linker_so(self):
- # # win32 linking should be handled by standard linker
- # # Darwin g77 cannot be used as a linker.
- # #if re.match(r'(darwin)', sys.platform):
- # # return
- # return FCompiler.get_linker_so(self)
-
def get_flags_linker_so(self):
opt = self.linker_so[1:]
- if sys.platform=='darwin':
+ if sys.platform == 'darwin':
target = os.environ.get('MACOSX_DEPLOYMENT_TARGET', None)
# If MACOSX_DEPLOYMENT_TARGET is set, we simply trust the value
# and leave it alone. But, distutils will complain if the
@@ -130,7 +138,7 @@ class GnuFCompiler(FCompiler):
opt.extend(['-undefined', 'dynamic_lookup', '-bundle'])
else:
- opt.append("-shared")
+ opt.append("-shared -Wl,-gc-sections -Wl,-s")
if sys.platform.startswith('sunos'):
# SunOS often has dynamically loaded symbols defined in the
# static library libg2c.a The linker doesn't like this. To
@@ -157,10 +165,12 @@ class GnuFCompiler(FCompiler):
# if windows and not cygwin, libg2c lies in a different folder
if sys.platform == 'win32' and not d.startswith('/usr/lib'):
d = os.path.normpath(d)
- if not os.path.exists(os.path.join(d, "lib%s.a" % self.g2c)):
- d2 = os.path.abspath(os.path.join(d,
- '../../../../lib'))
- if os.path.exists(os.path.join(d2, "lib%s.a" % self.g2c)):
+ path = os.path.join(d, "lib%s.a" % self.g2c)
+ if not os.path.exists(path):
+ root = os.path.join(d, *((os.pardir,)*4))
+ d2 = os.path.abspath(os.path.join(root, 'lib'))
+ path = os.path.join(d2, "lib%s.a" % self.g2c)
+ if os.path.exists(path):
opt.append(d2)
opt.append(d)
return opt
@@ -180,7 +190,7 @@ class GnuFCompiler(FCompiler):
opt.append(g2c)
c_compiler = self.c_compiler
if sys.platform == 'win32' and c_compiler and \
- c_compiler.compiler_type=='msvc':
+ c_compiler.compiler_type == 'msvc':
# the following code is not needed (read: breaks) when using MinGW
# in case want to link F77 compiled code with MSVC
opt.append('gcc')
@@ -196,13 +206,22 @@ class GnuFCompiler(FCompiler):
def get_flags_opt(self):
v = self.get_version()
- if v and v<='3.3.3':
+ if v and v <= '3.3.3':
# With this compiler version building Fortran BLAS/LAPACK
# with -O3 caused failures in lib.lapack heevr,syevr tests.
opt = ['-O2']
+ elif v and v >= '4.6.0':
+ if is_win32():
+ # use -mincoming-stack-boundary=2
+ # due to the change to 16 byte stack alignment since GCC 4.6
+ # but 32 bit Windows ABI defines 4 bytes stack alignment
+ opt = ['-O2 -march=core2 -mtune=generic -mfpmath=sse -msse2'
+ '-mincoming-stack-boundary=2']
+ else:
+ opt = ['-O2 -march=x86-64 -DMS_WIN64 -mtune=generic -msse2']
else:
- opt = ['-O3']
- opt.append('-funroll-loops')
+ opt = ['-O2']
+
return opt
def _c_arch_flags(self):
@@ -234,25 +253,18 @@ class Gnu95FCompiler(GnuFCompiler):
if not v or v[0] != 'gfortran':
return None
v = v[1]
- if v>='4.':
+ if v >= '4.':
# gcc-4 series releases do not support -mno-cygwin option
pass
else:
- # use -mno-cygwin flag for gfortran when Python is not Cygwin-Python
+ # use -mno-cygwin flag for gfortran when Python is not
+ # Cygwin-Python
if sys.platform == 'win32':
for key in ['version_cmd', 'compiler_f77', 'compiler_f90',
'compiler_fix', 'linker_so', 'linker_exe']:
self.executables[key].append('-mno-cygwin')
return v
- # 'gfortran --version' results:
- # XXX is the below right?
- # Debian: GNU Fortran 95 (GCC 4.0.3 20051023 (prerelease) (Debian 4.0.2-3))
- # GNU Fortran 95 (GCC) 4.1.2 20061115 (prerelease) (Debian 4.1.1-21)
- # OS X: GNU Fortran 95 (GCC) 4.1.0
- # GNU Fortran 95 (GCC) 4.2.0 20060218 (experimental)
- # GNU Fortran (GCC) 4.3.0 20070316 (experimental)
-
possible_executables = ['gfortran', 'f95']
executables = {
'version_cmd' : ["<F90>", "-dumpversion"],
@@ -311,10 +323,10 @@ class Gnu95FCompiler(GnuFCompiler):
target = self.get_target()
if target:
d = os.path.normpath(self.get_libgcc_dir())
- root = os.path.join(d, os.pardir, os.pardir, os.pardir, os.pardir)
- mingwdir = os.path.normpath(os.path.join(root, target, "lib"))
- full = os.path.join(mingwdir, "libmingwex.a")
- if os.path.exists(full):
+ root = os.path.join(d, *((os.pardir,)*4))
+ path = os.path.join(root, target, "lib")
+ mingwdir = os.path.normpath(path)
+ if os.path.exists(os.path.join(mingwdir, "libmingwex.a")):
opt.append(mingwdir)
return opt
@@ -349,14 +361,10 @@ class Gnu95FCompiler(GnuFCompiler):
return ""
def get_flags_opt(self):
- if is_win64():
- return ['-O0']
- else:
- return GnuFCompiler.get_flags_opt(self)
+ return GnuFCompiler.get_flags_opt(self)
def _can_target(cmd, arch):
- """Return true is the command supports the -arch flag for the given
- architecture."""
+ """Return true if the architecture supports the -arch flag"""
newcmd = cmd[:]
fid, filename = tempfile.mkstemp(suffix=".f")
try:
@@ -378,9 +386,13 @@ if __name__ == '__main__':
from distutils import log
log.set_verbosity(2)
- compiler = GnuFCompiler()
- compiler.customize()
- print(compiler.get_version())
+ try:
+ compiler = GnuFCompiler()
+ compiler.customize()
+ print(compiler.get_version())
+ except Exception:
+ msg = get_exception()
+ print(msg)
try:
compiler = Gnu95FCompiler()
diff --git a/numpy/distutils/intelccompiler.py b/numpy/distutils/intelccompiler.py
index 1d8dcd9fd..aed652ee6 100644
--- a/numpy/distutils/intelccompiler.py
+++ b/numpy/distutils/intelccompiler.py
@@ -2,15 +2,18 @@ from __future__ import division, absolute_import, print_function
from distutils.unixccompiler import UnixCCompiler
from numpy.distutils.exec_command import find_executable
+from numpy.distutils.msvc9compiler import MSVCCompiler
+from numpy.distutils.ccompiler import simple_version_match
+
class IntelCCompiler(UnixCCompiler):
- """ A modified Intel compiler compatible with an gcc built Python."""
+ """A modified Intel compiler compatible with a GCC-built Python."""
compiler_type = 'intel'
cc_exe = 'icc'
cc_args = 'fPIC'
- def __init__ (self, verbose=0, dry_run=0, force=0):
- UnixCCompiler.__init__ (self, verbose, dry_run, force)
+ def __init__(self, verbose=0, dry_run=0, force=0):
+ UnixCCompiler.__init__(self, verbose, dry_run, force)
self.cc_exe = 'icc -fPIC'
compiler = self.cc_exe
self.set_executables(compiler=compiler,
@@ -19,6 +22,7 @@ class IntelCCompiler(UnixCCompiler):
linker_exe=compiler,
linker_so=compiler + ' -shared')
+
class IntelItaniumCCompiler(IntelCCompiler):
compiler_type = 'intele'
@@ -28,14 +32,16 @@ class IntelItaniumCCompiler(IntelCCompiler):
if cc_exe:
break
+
class IntelEM64TCCompiler(UnixCCompiler):
- """ A modified Intel x86_64 compiler compatible with a 64bit gcc built Python.
+ """
+ A modified Intel x86_64 compiler compatible with a 64bit GCC-built Python.
"""
compiler_type = 'intelem'
cc_exe = 'icc -m64 -fPIC'
cc_args = "-fPIC"
- def __init__ (self, verbose=0, dry_run=0, force=0):
- UnixCCompiler.__init__ (self, verbose, dry_run, force)
+ def __init__(self, verbose=0, dry_run=0, force=0):
+ UnixCCompiler.__init__(self, verbose, dry_run, force)
self.cc_exe = 'icc -m64 -fPIC'
compiler = self.cc_exe
self.set_executables(compiler=compiler,
@@ -43,3 +49,35 @@ class IntelEM64TCCompiler(UnixCCompiler):
compiler_cxx=compiler,
linker_exe=compiler,
linker_so=compiler + ' -shared')
+
+
+class IntelCCompilerW(MSVCCompiler):
+ """
+ A modified Intel compiler on Windows compatible with an MSVC-built Python.
+ """
+ compiler_type = 'intelw'
+
+ def __init__(self, verbose=0, dry_run=0, force=0):
+ MSVCCompiler.__init__(self, verbose, dry_run, force)
+ version_match = simple_version_match(start='Intel\(R\).*?32,')
+ self.__version = version_match
+
+ def initialize(self, plat_name=None):
+ MSVCCompiler.initialize(self, plat_name)
+ self.cc = self.find_exe("icl.exe")
+ self.linker = self.find_exe("xilink")
+ self.compile_options = ['/nologo', '/O3', '/MD', '/W3']
+ self.compile_options_debug = ['/nologo', '/Od', '/MDd', '/W3', '/Z7',
+ '/D_DEBUG']
+
+
+class IntelEM64TCCompilerW(IntelCCompilerW):
+ """
+ A modified Intel x86_64 compiler compatible with a 64bit MSVC-built Python.
+ """
+ compiler_type = 'intelemw'
+
+ def __init__(self, verbose=0, dry_run=0, force=0):
+ MSVCCompiler.__init__(self, verbose, dry_run, force)
+ version_match = simple_version_match(start='Intel\(R\).*?64,')
+ self.__version = version_match
diff --git a/numpy/distutils/mingw32ccompiler.py b/numpy/distutils/mingw32ccompiler.py
index c720d142a..f72c3bbbb 100644
--- a/numpy/distutils/mingw32ccompiler.py
+++ b/numpy/distutils/mingw32ccompiler.py
@@ -30,11 +30,12 @@ else:
import distutils.cygwinccompiler
from distutils.version import StrictVersion
from numpy.distutils.ccompiler import gen_preprocess_options, gen_lib_options
-from distutils.errors import DistutilsExecError, CompileError, UnknownFileError
-
from distutils.unixccompiler import UnixCCompiler
from distutils.msvccompiler import get_build_version as get_build_msvc_version
-from numpy.distutils.misc_util import msvc_runtime_library, get_build_architecture
+from distutils.errors import (DistutilsExecError, CompileError,
+ UnknownFileError)
+from numpy.distutils.misc_util import (msvc_runtime_library,
+ get_build_architecture)
# Useful to generate table of symbols from a dll
_START = re.compile(r'\[Ordinal/Name Pointer\] Table')
@@ -53,8 +54,8 @@ class Mingw32CCompiler(distutils.cygwinccompiler.CygwinCCompiler):
dry_run=0,
force=0):
- distutils.cygwinccompiler.CygwinCCompiler.__init__ (self,
- verbose, dry_run, force)
+ distutils.cygwinccompiler.CygwinCCompiler.__init__ (self, verbose,
+ dry_run, force)
# we need to support 3.2 which doesn't match the standard
# get_versions methods regex
@@ -80,79 +81,99 @@ class Mingw32CCompiler(distutils.cygwinccompiler.CygwinCCompiler):
# g++.exe: g++: No such file or directory
# error (mingw 1.0 in Enthon24 tree, gcc-3.4.5).
# If the --driver-name part is required for some environment
- # then make the inclusion of this part specific to that environment.
+ # then make the inclusion of this part specific to that
+ # environment.
self.linker = 'dllwrap' # --driver-name g++'
elif self.linker_dll == 'gcc':
self.linker = 'g++'
- # **changes: eric jones 4/11/01
- # 1. Check for import library on Windows. Build if it doesn't exist.
+ p = subprocess.Popen(['gcc', '--version'], shell=True,
+ stdout=subprocess.PIPE)
+ out_string = p.stdout.read()
+ p.stdout.close()
+
+ # Before build with MinGW-W64 generate the python import library
+ # with gendef and dlltool according to the MingW-W64 FAQ.
+ # Use the MinGW-W64 provided msvc runtime import libraries.
+ # Don't call build_import_library() and build_msvcr_library.
+
+ if 'MinGW-W64' not in str(out_string):
- build_import_library()
+ # **changes: eric jones 4/11/01
+ # 1. Check for import library on Windows. Build if it doesn't
+ # exist.
+ build_import_library()
- # Check for custom msvc runtime library on Windows. Build if it doesn't exist.
- msvcr_success = build_msvcr_library()
- msvcr_dbg_success = build_msvcr_library(debug=True)
- if msvcr_success or msvcr_dbg_success:
- # add preprocessor statement for using customized msvcr lib
- self.define_macro('NPY_MINGW_USE_CUSTOM_MSVCR')
+ # Check for custom msvc runtime library on Windows. Build if it
+ # doesn't exist.
+ msvcr_success = build_msvcr_library()
+ msvcr_dbg_success = build_msvcr_library(debug=True)
+ if msvcr_success or msvcr_dbg_success:
+ # add preprocessor statement for using customized msvcr lib
+ self.define_macro('NPY_MINGW_USE_CUSTOM_MSVCR')
# Define the MSVC version as hint for MinGW
msvcr_version = '0x%03i0' % int(msvc_runtime_library().lstrip('msvcr'))
self.define_macro('__MSVCRT_VERSION__', msvcr_version)
- # **changes: eric jones 4/11/01
- # 2. increased optimization and turned off all warnings
- # 3. also added --driver-name g++
- #self.set_executables(compiler='gcc -mno-cygwin -O2 -w',
- # compiler_so='gcc -mno-cygwin -mdll -O2 -w',
- # linker_exe='gcc -mno-cygwin',
- # linker_so='%s --driver-name g++ -mno-cygwin -mdll -static %s'
- # % (self.linker, entry_point))
-
- # MS_WIN64 should be defined when building for amd64 on windows, but
- # python headers define it only for MS compilers, which has all kind of
- # bad consequences, like using Py_ModuleInit4 instead of
+ # MS_WIN64 should be defined when building for amd64 on windows,
+ # but python headers define it only for MS compilers, which has all
+ # kind of bad consequences, like using Py_ModuleInit4 instead of
# Py_ModuleInit4_64, etc... So we add it here
if get_build_architecture() == 'AMD64':
if self.gcc_version < "4.0":
self.set_executables(
compiler='gcc -g -DDEBUG -DMS_WIN64 -mno-cygwin -O0 -Wall',
- compiler_so='gcc -g -DDEBUG -DMS_WIN64 -mno-cygwin -O0 -Wall -Wstrict-prototypes',
+ compiler_so='gcc -g -DDEBUG -DMS_WIN64 -mno-cygwin -O0'
+ ' -Wall -Wstrict-prototypes',
linker_exe='gcc -g -mno-cygwin',
linker_so='gcc -g -mno-cygwin -shared')
else:
# gcc-4 series releases do not support -mno-cygwin option
self.set_executables(
- compiler='gcc -g -DDEBUG -DMS_WIN64 -O0 -Wall',
- compiler_so='gcc -g -DDEBUG -DMS_WIN64 -O0 -Wall -Wstrict-prototypes',
- linker_exe='gcc -g',
- linker_so='gcc -g -shared')
+ compiler='gcc -march=x86-64 -mtune=generic -DMS_WIN64'
+ ' -O2 -msse2 -Wall',
+ compiler_so='gcc -march=x86-64 -mtune=generic -DMS_WIN64'
+ ' -O2 -msse2 -Wall -Wstrict-prototypes',
+ linker_exe='gcc',
+ linker_so='gcc -shared -Wl,-gc-sections -Wl,-s')
else:
if self.gcc_version <= "3.0.0":
- self.set_executables(compiler='gcc -mno-cygwin -O2 -w',
- compiler_so='gcc -mno-cygwin -mdll -O2 -w -Wstrict-prototypes',
- linker_exe='g++ -mno-cygwin',
- linker_so='%s -mno-cygwin -mdll -static %s'
- % (self.linker, entry_point))
+ self.set_executables(
+ compiler='gcc -mno-cygwin -O2 -w',
+ compiler_so='gcc -mno-cygwin -mdll -O2 -w'
+ ' -Wstrict-prototypes',
+ linker_exe='g++ -mno-cygwin',
+ linker_so='%s -mno-cygwin -mdll -static %s' %
+ (self.linker, entry_point))
elif self.gcc_version < "4.0":
- self.set_executables(compiler='gcc -mno-cygwin -O2 -Wall',
- compiler_so='gcc -mno-cygwin -O2 -Wall -Wstrict-prototypes',
- linker_exe='g++ -mno-cygwin',
- linker_so='g++ -mno-cygwin -shared')
+ self.set_executables(
+ compiler='gcc -mno-cygwin -O2 -Wall',
+ compiler_so='gcc -mno-cygwin -O2 -Wall'
+ ' -Wstrict-prototypes',
+ linker_exe='g++ -mno-cygwin',
+ linker_so='g++ -mno-cygwin -shared')
else:
- # gcc-4 series releases do not support -mno-cygwin option
- self.set_executables(compiler='gcc -O2 -Wall',
- compiler_so='gcc -O2 -Wall -Wstrict-prototypes',
- linker_exe='g++ ',
- linker_so='g++ -shared')
- # added for python2.3 support
- # we can't pass it through set_executables because pre 2.2 would fail
+ # gcc-4 series releases do not support -mno-cygwin option i686
+ # build needs '-mincoming-stack-boundary=2' due to ABI
+ # incompatibility to Win32 ABI
+ self.set_executables(
+ compiler='gcc -O2 -march=core2 -mtune=generic'
+ ' -mfpmath=sse -msse2'
+ ' -mincoming-stack-boundary=2 -Wall',
+ compiler_so='gcc -O2 -march=core2 -mtune=generic'
+ ' -mfpmath=sse -msse2'
+ ' -mincoming-stack-boundary=2 -Wall'
+ ' -Wstrict-prototypes',
+ linker_exe='g++ ',
+ linker_so='g++ -shared -Wl,-gc-sections -Wl,-s')
+ # added for python2.3 support we can't pass it through set_executables
+ # because pre 2.2 would fail
self.compiler_cxx = ['g++']
- # Maybe we should also append -mthreads, but then the finished
- # dlls need another dll (mingwm10.dll see Mingw32 docs)
- # (-mthreads: Support thread-safe exception handling on `Mingw32')
+ # Maybe we should also append -mthreads, but then the finished dlls
+ # need another dll (mingwm10.dll see Mingw32 docs) (-mthreads: Support
+ # thread-safe exception handling on `Mingw32')
# no additional libraries needed
#self.dll_libraries=[]
@@ -331,7 +352,8 @@ def build_msvcr_library(debug=False):
# Skip using a custom library for versions < MSVC 8.0
if int(msvcr_name.lstrip('msvcr')) < 80:
- log.debug('Skip building msvcr library: custom functionality not present')
+ log.debug('Skip building msvcr library:'
+ ' custom functionality not present')
return False
if debug:
@@ -341,14 +363,16 @@ def build_msvcr_library(debug=False):
out_name = "lib%s.a" % msvcr_name
out_file = os.path.join(sys.prefix, 'libs', out_name)
if os.path.isfile(out_file):
- log.debug('Skip building msvcr library: "%s" exists' % (out_file))
+ log.debug('Skip building msvcr library: "%s" exists' %
+ (out_file,))
return True
# Find the msvcr dll
msvcr_dll_name = msvcr_name + '.dll'
dll_file = find_dll(msvcr_dll_name)
if not dll_file:
- log.warn('Cannot build msvcr library: "%s" not found' % msvcr_dll_name)
+ log.warn('Cannot build msvcr library: "%s" not found' %
+ msvcr_dll_name)
return False
def_name = "lib%s.def" % msvcr_name
@@ -387,14 +411,15 @@ def _build_import_library_amd64():
out_name = "libpython%d%d.a" % tuple(sys.version_info[:2])
out_file = os.path.join(sys.prefix, 'libs', out_name)
if os.path.isfile(out_file):
- log.debug('Skip building import library: "%s" exists' % (out_file))
+ log.debug('Skip building import library: "%s" exists' %
+ (out_file))
return
def_name = "python%d%d.def" % tuple(sys.version_info[:2])
def_file = os.path.join(sys.prefix, 'libs', def_name)
- log.info('Building import library (arch=AMD64): "%s" (from %s)' \
- % (out_file, dll_file))
+ log.info('Building import library (arch=AMD64): "%s" (from %s)' %
+ (out_file, dll_file))
generate_def(dll_file, def_file)
@@ -432,9 +457,6 @@ def _build_import_library_x86():
# for now, fail silently
if status:
log.warn('Failed to build import library for gcc. Linking will fail.')
- #if not success:
- # msg = "Couldn't find import library, and failed to build it."
- # raise DistutilsPlatformError(msg)
return
#=====================================
@@ -462,16 +484,17 @@ if sys.platform == 'win32':
# one, and we can't retrieve it from python
_MSVCRVER_TO_FULLVER['80'] = "8.0.50727.42"
_MSVCRVER_TO_FULLVER['90'] = "9.0.21022.8"
- # Value from msvcrt.CRT_ASSEMBLY_VERSION under Python 3.3.0 on Windows XP:
+ # Value from msvcrt.CRT_ASSEMBLY_VERSION under Python 3.3.0
+ # on Windows XP:
_MSVCRVER_TO_FULLVER['100'] = "10.0.30319.460"
if hasattr(msvcrt, "CRT_ASSEMBLY_VERSION"):
major, minor, rest = msvcrt.CRT_ASSEMBLY_VERSION.split(".", 2)
_MSVCRVER_TO_FULLVER[major + minor] = msvcrt.CRT_ASSEMBLY_VERSION
del major, minor, rest
except ImportError:
- # If we are here, means python was not built with MSVC. Not sure what to do
- # in that case: manifest building will fail, but it should not be used in
- # that case anyway
+ # If we are here, means python was not built with MSVC. Not sure what
+ # to do in that case: manifest building will fail, but it should not be
+ # used in that case anyway
log.warn('Cannot import msvcrt: using manifest will not be possible')
def msvc_manifest_xml(maj, min):
@@ -480,8 +503,8 @@ def msvc_manifest_xml(maj, min):
try:
fullver = _MSVCRVER_TO_FULLVER[str(maj * 10 + min)]
except KeyError:
- raise ValueError("Version %d,%d of MSVCRT not supported yet" \
- % (maj, min))
+ raise ValueError("Version %d,%d of MSVCRT not supported yet" %
+ (maj, min))
# Don't be fooled, it looks like an XML, but it is not. In particular, it
# should not have any space before starting, and its size should be
# divisible by 4, most likely for alignement constraints when the xml is
@@ -557,7 +580,7 @@ def manifest_name(config):
return root + exext + ".manifest"
def rc_name(config):
- # Get configest name (including suffix)
+ # Get configtest name (including suffix)
root = configtest_name(config)
return root + ".rc"
@@ -574,9 +597,3 @@ def generate_manifest(config):
config.temp_files.append(manifest_name(config))
man.write(manxml)
man.close()
- # # Write the rc file
- # manrc = manifest_rc(manifest_name(self), "exe")
- # rc = open(rc_name(self), "w")
- # self.temp_files.append(manrc)
- # rc.write(manrc)
- # rc.close()
diff --git a/numpy/distutils/system_info.py b/numpy/distutils/system_info.py
index b2a49843a..3459f67f2 100644
--- a/numpy/distutils/system_info.py
+++ b/numpy/distutils/system_info.py
@@ -1766,6 +1766,23 @@ class openblas_lapack_info(openblas_info):
res = False
finally:
shutil.rmtree(tmpdir)
+ if sys.platform == 'win32' and not res:
+ c = distutils.ccompiler.new_compiler(compiler='mingw32')
+ tmpdir = tempfile.mkdtemp()
+ src = os.path.join(tmpdir, 'source.c')
+ out = os.path.join(tmpdir, 'a.out')
+ try:
+ with open(src, 'wt') as f:
+ f.write(s)
+ obj = c.compile([src], output_dir=tmpdir)
+ try:
+ c.link_executable(obj, out, libraries=info['libraries'],
+ library_dirs=info['library_dirs'])
+ res = True
+ except distutils.ccompiler.LinkError:
+ res = False
+ finally:
+ shutil.rmtree(tmpdir)
return res
diff --git a/numpy/distutils/tests/test_fcompiler_gnu.py b/numpy/distutils/tests/test_fcompiler_gnu.py
index ecbd85e76..7ca99db22 100644
--- a/numpy/distutils/tests/test_fcompiler_gnu.py
+++ b/numpy/distutils/tests/test_fcompiler_gnu.py
@@ -1,6 +1,6 @@
from __future__ import division, absolute_import, print_function
-from numpy.testing import *
+from numpy.testing import TestCase, assert_, run_module_suite
import numpy.distutils.fcompiler
@@ -14,8 +14,19 @@ g77_version_strings = [
]
gfortran_version_strings = [
+ ('GNU Fortran 95 (GCC 4.0.3 20051023 (prerelease) (Debian 4.0.2-3))',
+ '4.0.3'),
+ ('GNU Fortran 95 (GCC) 4.1.0', '4.1.0'),
+ ('GNU Fortran 95 (GCC) 4.2.0 20060218 (experimental)', '4.2.0'),
+ ('GNU Fortran (GCC) 4.3.0 20070316 (experimental)', '4.3.0'),
+ ('GNU Fortran (rubenvb-4.8.0) 4.8.0', '4.8.0'),
('4.8.0', '4.8.0'),
('4.0.3-7', '4.0.3'),
+ ("gfortran: warning: couldn't understand kern.osversion '14.1.0\n4.9.1",
+ '4.9.1'),
+ ("gfortran: warning: couldn't understand kern.osversion '14.1.0\n"
+ "gfortran: warning: yet another warning\n4.9.1",
+ '4.9.1')
]
class TestG77Versions(TestCase):
@@ -31,7 +42,7 @@ class TestG77Versions(TestCase):
v = fc.version_match(vs)
assert_(v is None, (vs, v))
-class TestGortranVersions(TestCase):
+class TestGFortranVersions(TestCase):
def test_gfortran_version(self):
fc = numpy.distutils.fcompiler.new_fcompiler(compiler='gnu95')
for vs, version in gfortran_version_strings:
diff --git a/numpy/f2py/__main__.py b/numpy/f2py/__main__.py
new file mode 100644
index 000000000..11dbf5f52
--- /dev/null
+++ b/numpy/f2py/__main__.py
@@ -0,0 +1,23 @@
+# See http://cens.ioc.ee/projects/f2py2e/
+import os, sys
+for mode in ["g3-numpy", "2e-numeric", "2e-numarray", "2e-numpy"]:
+ try:
+ i=sys.argv.index("--"+mode)
+ del sys.argv[i]
+ break
+ except ValueError: pass
+os.environ["NO_SCIPY_IMPORT"]="f2py"
+if mode=="g3-numpy":
+ sys.stderr.write("G3 f2py support is not implemented, yet.\\n")
+ sys.exit(1)
+elif mode=="2e-numeric":
+ from f2py2e import main
+elif mode=="2e-numarray":
+ sys.argv.append("-DNUMARRAY")
+ from f2py2e import main
+elif mode=="2e-numpy":
+ from numpy.f2py import main
+else:
+ sys.stderr.write("Unknown mode: " + repr(mode) + "\\n")
+ sys.exit(1)
+main()
diff --git a/numpy/f2py/capi_maps.py b/numpy/f2py/capi_maps.py
index 536a576dd..6155165ba 100644
--- a/numpy/f2py/capi_maps.py
+++ b/numpy/f2py/capi_maps.py
@@ -178,14 +178,14 @@ if os.path.isfile('.f2py_f2cmap'):
f = open('.f2py_f2cmap', 'r')
d = eval(f.read(), {}, {})
f.close()
- for k, d1 in d.items():
- for k1 in d1.keys():
+ for k, d1 in list(d.items()):
+ for k1 in list(d1.keys()):
d1[k1.lower()] = d1[k1]
d[k.lower()] = d[k]
- for k in d.keys():
+ for k in list(d.keys()):
if k not in f2cmap_all:
f2cmap_all[k]={}
- for k1 in d[k].keys():
+ for k1 in list(d[k].keys()):
if d[k][k1] in c2py_map:
if k1 in f2cmap_all[k]:
outmess("\tWarning: redefinition of {'%s':{'%s':'%s'->'%s'}}\n"%(k, k1, f2cmap_all[k][k1], d[k][k1]))
diff --git a/numpy/f2py/setup.py b/numpy/f2py/setup.py
index a27a001a9..3e2cf6867 100644
--- a/numpy/f2py/setup.py
+++ b/numpy/f2py/setup.py
@@ -50,32 +50,10 @@ def configuration(parent_package='',top_path=None):
if newer(__file__, target):
log.info('Creating %s', target)
f = open(target, 'w')
- f.write('''\
-#!%s
-# See http://cens.ioc.ee/projects/f2py2e/
-import os, sys
-for mode in ["g3-numpy", "2e-numeric", "2e-numarray", "2e-numpy"]:
- try:
- i=sys.argv.index("--"+mode)
- del sys.argv[i]
- break
- except ValueError: pass
-os.environ["NO_SCIPY_IMPORT"]="f2py"
-if mode=="g3-numpy":
- sys.stderr.write("G3 f2py support is not implemented, yet.\\n")
- sys.exit(1)
-elif mode=="2e-numeric":
- from f2py2e import main
-elif mode=="2e-numarray":
- sys.argv.append("-DNUMARRAY")
- from f2py2e import main
-elif mode=="2e-numpy":
- from numpy.f2py import main
-else:
- sys.stderr.write("Unknown mode: " + repr(mode) + "\\n")
- sys.exit(1)
-main()
-'''%(sys.executable))
+ f.write('#!%s\n' % (sys.executable))
+ mainloc = os.path.join(os.path.dirname(__file__), "__main__.py")
+ with open(mainloc) as mf:
+ f.write(mf.read())
f.close()
return target
diff --git a/numpy/lib/_iotools.py b/numpy/lib/_iotools.py
index 316704b42..44bd48df7 100644
--- a/numpy/lib/_iotools.py
+++ b/numpy/lib/_iotools.py
@@ -160,7 +160,7 @@ class LineSplitter(object):
delimiter : str, int, or sequence of ints, optional
If a string, character used to delimit consecutive fields.
If an integer or a sequence of integers, width(s) of each field.
- comment : str, optional
+ comments : str, optional
Character used to mark the beginning of a comment. Default is '#'.
autostrip : bool, optional
Whether to strip each individual field. Default is True.
@@ -271,7 +271,7 @@ class NameValidator(object):
deletechars : str, optional
A string combining invalid characters that must be deleted from the
names.
- casesensitive : {True, False, 'upper', 'lower'}, optional
+ case_sensitive : {True, False, 'upper', 'lower'}, optional
* If True, field names are case-sensitive.
* If False or 'upper', field names are converted to upper case.
* If 'lower', field names are converted to lower case.
@@ -341,7 +341,7 @@ class NameValidator(object):
defaultfmt : str, optional
Default format string, used if validating a given string
reduces its length to zero.
- nboutput : integer, optional
+ nbfields : integer, optional
Final number of validated names, used to expand or shrink the
initial list of names.
@@ -518,12 +518,18 @@ class StringConverter(object):
"""
#
_mapper = [(nx.bool_, str2bool, False),
- (nx.integer, int, -1),
- (nx.floating, float, nx.nan),
- (complex, _bytes_to_complex, nx.nan + 0j),
- (nx.string_, bytes, asbytes('???'))]
+ (nx.integer, int, -1)]
+
+ # On 32-bit systems, we need to make sure that we explicitly include
+ # nx.int64 since ns.integer is nx.int32.
+ if nx.dtype(nx.integer).itemsize < nx.dtype(nx.int64).itemsize:
+ _mapper.append((nx.int64, int, -1))
+
+ _mapper.extend([(nx.floating, float, nx.nan),
+ (complex, _bytes_to_complex, nx.nan + 0j),
+ (nx.string_, bytes, asbytes('???'))])
+
(_defaulttype, _defaultfunc, _defaultfill) = zip(*_mapper)
- #
@classmethod
def _getdtype(cls, val):
@@ -677,7 +683,22 @@ class StringConverter(object):
def _strict_call(self, value):
try:
- return self.func(value)
+
+ # We check if we can convert the value using the current function
+ new_value = self.func(value)
+
+ # In addition to having to check whether func can convert the
+ # value, we also have to make sure that we don't get overflow
+ # errors for integers.
+ if self.func is int:
+ try:
+ np.array(value, dtype=self.type)
+ except OverflowError:
+ raise ValueError
+
+ # We're still here so we can now return the new value
+ return new_value
+
except ValueError:
if value.strip() in self.missing_values:
if not self._status:
diff --git a/numpy/lib/arraysetops.py b/numpy/lib/arraysetops.py
index cb24eb24e..7776d7e76 100644
--- a/numpy/lib/arraysetops.py
+++ b/numpy/lib/arraysetops.py
@@ -97,10 +97,11 @@ def unique(ar, return_index=False, return_inverse=False, return_counts=False):
"""
Find the unique elements of an array.
- Returns the sorted unique elements of an array. There are two optional
+ Returns the sorted unique elements of an array. There are three optional
outputs in addition to the unique elements: the indices of the input array
- that give the unique values, and the indices of the unique array that
- reconstruct the input array.
+ that give the unique values, the indices of the unique array that
+ reconstruct the input array, and the number of times each unique value
+ comes up in the input array.
Parameters
----------
diff --git a/numpy/lib/format.py b/numpy/lib/format.py
index 4ff0a660f..66a1b356c 100644
--- a/numpy/lib/format.py
+++ b/numpy/lib/format.py
@@ -314,21 +314,19 @@ def _write_array_header(fp, d, version=None):
header = header + ' '*topad + '\n'
header = asbytes(_filter_header(header))
- if len(header) >= (256*256) and version == (1, 0):
- raise ValueError("header does not fit inside %s bytes required by the"
- " 1.0 format" % (256*256))
- if len(header) < (256*256):
- header_len_str = struct.pack('<H', len(header))
+ hlen = len(header)
+ if hlen < 256*256 and version in (None, (1, 0)):
version = (1, 0)
- elif len(header) < (2**32):
- header_len_str = struct.pack('<I', len(header))
+ header_prefix = magic(1, 0) + struct.pack('<H', hlen)
+ elif hlen < 2**32 and version in (None, (2, 0)):
version = (2, 0)
+ header_prefix = magic(2, 0) + struct.pack('<I', hlen)
else:
- raise ValueError("header does not fit inside 4 GiB required by "
- "the 2.0 format")
+ msg = "Header length %s too big for version=%s"
+ msg %= (hlen, version)
+ raise ValueError(msg)
- fp.write(magic(*version))
- fp.write(header_len_str)
+ fp.write(header_prefix)
fp.write(header)
return version
@@ -389,7 +387,7 @@ def read_array_header_1_0(fp):
If the data is invalid.
"""
- _read_array_header(fp, version=(1, 0))
+ return _read_array_header(fp, version=(1, 0))
def read_array_header_2_0(fp):
"""
@@ -422,7 +420,7 @@ def read_array_header_2_0(fp):
If the data is invalid.
"""
- _read_array_header(fp, version=(2, 0))
+ return _read_array_header(fp, version=(2, 0))
def _filter_header(s):
@@ -517,7 +515,7 @@ def _read_array_header(fp, version):
return d['shape'], d['fortran_order'], dtype
-def write_array(fp, array, version=None):
+def write_array(fp, array, version=None, allow_pickle=True, pickle_kwargs=None):
"""
Write an array to an NPY file, including a header.
@@ -535,11 +533,18 @@ def write_array(fp, array, version=None):
version : (int, int) or None, optional
The version number of the format. None means use the oldest
supported version that is able to store the data. Default: None
+ allow_pickle : bool, optional
+ Whether to allow writing pickled data. Default: True
+ pickle_kwargs : dict, optional
+ Additional keyword arguments to pass to pickle.dump, excluding
+ 'protocol'. These are only useful when pickling objects in object
+ arrays on Python 3 to Python 2 compatible format.
Raises
------
ValueError
- If the array cannot be persisted.
+ If the array cannot be persisted. This includes the case of
+ allow_pickle=False and array being an object array.
Various other errors
If the array contains Python objects as part of its dtype, the
process of pickling them may raise various errors if the objects
@@ -561,7 +566,12 @@ def write_array(fp, array, version=None):
# We contain Python objects so we cannot write out the data
# directly. Instead, we will pickle it out with version 2 of the
# pickle protocol.
- pickle.dump(array, fp, protocol=2)
+ if not allow_pickle:
+ raise ValueError("Object arrays cannot be saved when "
+ "allow_pickle=False")
+ if pickle_kwargs is None:
+ pickle_kwargs = {}
+ pickle.dump(array, fp, protocol=2, **pickle_kwargs)
elif array.flags.f_contiguous and not array.flags.c_contiguous:
if isfileobj(fp):
array.T.tofile(fp)
@@ -580,7 +590,7 @@ def write_array(fp, array, version=None):
fp.write(chunk.tobytes('C'))
-def read_array(fp):
+def read_array(fp, allow_pickle=True, pickle_kwargs=None):
"""
Read an array from an NPY file.
@@ -589,6 +599,12 @@ def read_array(fp):
fp : file_like object
If this is not a real file object, then this may take extra memory
and time.
+ allow_pickle : bool, optional
+ Whether to allow reading pickled data. Default: True
+ pickle_kwargs : dict
+ Additional keyword arguments to pass to pickle.load. These are only
+ useful when loading object arrays saved on Python 2 when using
+ Python 3.
Returns
-------
@@ -598,7 +614,8 @@ def read_array(fp):
Raises
------
ValueError
- If the data is invalid.
+ If the data is invalid, or allow_pickle=False and the file contains
+ an object array.
"""
version = read_magic(fp)
@@ -612,7 +629,20 @@ def read_array(fp):
# Now read the actual data.
if dtype.hasobject:
# The array contained Python objects. We need to unpickle the data.
- array = pickle.load(fp)
+ if not allow_pickle:
+ raise ValueError("Object arrays cannot be loaded when "
+ "allow_pickle=False")
+ if pickle_kwargs is None:
+ pickle_kwargs = {}
+ try:
+ array = pickle.load(fp, **pickle_kwargs)
+ except UnicodeError as err:
+ if sys.version_info[0] >= 3:
+ # Friendlier error message
+ raise UnicodeError("Unpickling a python object failed: %r\n"
+ "You may need to pass the encoding= option "
+ "to numpy.load" % (err,))
+ raise
else:
if isfileobj(fp):
# We can use the fast fromfile() function.
diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py
index 2baf83830..d22e8c047 100644
--- a/numpy/lib/function_base.py
+++ b/numpy/lib/function_base.py
@@ -906,9 +906,9 @@ def gradient(f, *varargs, **kwargs):
Returns
-------
- gradient : ndarray
- N arrays of the same shape as `f` giving the derivative of `f` with
- respect to each dimension.
+ gradient : list of ndarray
+ Each element of `list` has the same shape as `f` giving the derivative
+ of `f` with respect to each dimension.
Examples
--------
@@ -918,6 +918,10 @@ def gradient(f, *varargs, **kwargs):
>>> np.gradient(x, 2)
array([ 0.5 , 0.75, 1.25, 1.75, 2.25, 2.5 ])
+ For two dimensional arrays, the return will be two arrays ordered by
+ axis. In this example the first array stands for the gradient in
+ rows and the second one in columns direction:
+
>>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=np.float))
[array([[ 2., 2., -1.],
[ 2., 2., -1.]]), array([[ 1. , 2.5, 4. ],
@@ -1949,54 +1953,59 @@ def cov(m, y=None, rowvar=1, bias=0, ddof=None):
return (dot(X, X.T.conj()) / fact).squeeze()
-def corrcoef(x, y=None, rowvar=1, bias=0, ddof=None):
+def corrcoef(x, y=None, rowvar=1, bias=np._NoValue, ddof=np._NoValue):
"""
- Return correlation coefficients.
+ Return Pearson product-moment correlation coefficients.
Please refer to the documentation for `cov` for more detail. The
- relationship between the correlation coefficient matrix, `P`, and the
+ relationship between the correlation coefficient matrix, `R`, and the
covariance matrix, `C`, is
- .. math:: P_{ij} = \\frac{ C_{ij} } { \\sqrt{ C_{ii} * C_{jj} } }
+ .. math:: R_{ij} = \\frac{ C_{ij} } { \\sqrt{ C_{ii} * C_{jj} } }
- The values of `P` are between -1 and 1, inclusive.
+ The values of `R` are between -1 and 1, inclusive.
Parameters
----------
x : array_like
A 1-D or 2-D array containing multiple variables and observations.
- Each row of `m` represents a variable, and each column a single
+ Each row of `x` represents a variable, and each column a single
observation of all those variables. Also see `rowvar` below.
y : array_like, optional
An additional set of variables and observations. `y` has the same
- shape as `m`.
+ shape as `x`.
rowvar : int, optional
If `rowvar` is non-zero (default), then each row represents a
variable, with observations in the columns. Otherwise, the relationship
is transposed: each column represents a variable, while the rows
contain observations.
- bias : int, optional
- Default normalization is by ``(N - 1)``, where ``N`` is the number of
- observations (unbiased estimate). If `bias` is 1, then
- normalization is by ``N``. These values can be overridden by using
- the keyword ``ddof`` in numpy versions >= 1.5.
- ddof : int, optional
- .. versionadded:: 1.5
- If not ``None`` normalization is by ``(N - ddof)``, where ``N`` is
- the number of observations; this overrides the value implied by
- ``bias``. The default value is ``None``.
+ bias : _NoValue, optional
+ .. deprecated:: 1.10.0
+ Has no affect, do not use.
+ ddof : _NoValue, optional
+ .. deprecated:: 1.10.0
+ Has no affect, do not use.
Returns
-------
- out : ndarray
+ R : ndarray
The correlation coefficient matrix of the variables.
See Also
--------
cov : Covariance matrix
+ Notes
+ -----
+ This function accepts but discards arguments `bias` and `ddof`. This is
+ for backwards compatibility with previous versions of this function. These
+ arguments had no effect on the return values of the function and can be
+ safely ignored in this and previous versions of numpy.
"""
- c = cov(x, y, rowvar, bias, ddof)
+ if bias is not np._NoValue or ddof is not np._NoValue:
+ warnings.warn('bias and ddof have no affect and are deprecated',
+ DeprecationWarning)
+ c = cov(x, y, rowvar)
try:
d = diag(c)
except ValueError: # scalar covariance
@@ -3730,6 +3739,7 @@ def insert(arr, obj, values, axis=None):
[3, 5, 3]])
Difference between sequence and scalars:
+
>>> np.insert(a, [1], [[1],[2],[3]], axis=1)
array([[1, 1, 1],
[2, 2, 2],
diff --git a/numpy/lib/index_tricks.py b/numpy/lib/index_tricks.py
index eb9aad6ad..e97338106 100644
--- a/numpy/lib/index_tricks.py
+++ b/numpy/lib/index_tricks.py
@@ -480,7 +480,7 @@ class ndenumerate(object):
Parameters
----------
- a : ndarray
+ arr : ndarray
Input array.
See Also
diff --git a/numpy/lib/npyio.py b/numpy/lib/npyio.py
index 0632ba1f8..ec89397a0 100644
--- a/numpy/lib/npyio.py
+++ b/numpy/lib/npyio.py
@@ -164,6 +164,12 @@ class NpzFile(object):
f : BagObj instance
An object on which attribute can be performed as an alternative
to getitem access on the `NpzFile` instance itself.
+ allow_pickle : bool, optional
+ Allow loading pickled data. Default: True
+ pickle_kwargs : dict, optional
+ Additional keyword arguments to pass on to pickle.load.
+ These are only useful when loading object arrays saved on
+ Python 2 when using Python 3.
Parameters
----------
@@ -195,12 +201,15 @@ class NpzFile(object):
"""
- def __init__(self, fid, own_fid=False):
+ def __init__(self, fid, own_fid=False, allow_pickle=True,
+ pickle_kwargs=None):
# Import is postponed to here since zipfile depends on gzip, an
# optional component of the so-called standard library.
_zip = zipfile_factory(fid)
self._files = _zip.namelist()
self.files = []
+ self.allow_pickle = allow_pickle
+ self.pickle_kwargs = pickle_kwargs
for x in self._files:
if x.endswith('.npy'):
self.files.append(x[:-4])
@@ -256,7 +265,9 @@ class NpzFile(object):
bytes.close()
if magic == format.MAGIC_PREFIX:
bytes = self.zip.open(key)
- return format.read_array(bytes)
+ return format.read_array(bytes,
+ allow_pickle=self.allow_pickle,
+ pickle_kwargs=self.pickle_kwargs)
else:
return self.zip.read(key)
else:
@@ -289,7 +300,8 @@ class NpzFile(object):
return self.files.__contains__(key)
-def load(file, mmap_mode=None):
+def load(file, mmap_mode=None, allow_pickle=True, fix_imports=True,
+ encoding='ASCII'):
"""
Load arrays or pickled objects from ``.npy``, ``.npz`` or pickled files.
@@ -306,6 +318,23 @@ def load(file, mmap_mode=None):
and sliced like any ndarray. Memory mapping is especially useful
for accessing small fragments of large files without reading the
entire file into memory.
+ allow_pickle : bool, optional
+ Allow loading pickled object arrays stored in npy files. Reasons for
+ disallowing pickles include security, as loading pickled data can
+ execute arbitrary code. If pickles are disallowed, loading object
+ arrays will fail.
+ Default: True
+ fix_imports : bool, optional
+ Only useful when loading Python 2 generated pickled files on Python 3,
+ which includes npy/npz files containing object arrays. If `fix_imports`
+ is True, pickle will try to map the old Python 2 names to the new names
+ used in Python 3.
+ encoding : str, optional
+ What encoding to use when reading Python 2 strings. Only useful when
+ loading Python 2 generated pickled files on Python 3, which includes
+ npy/npz files containing object arrays. Values other than 'latin1',
+ 'ASCII', and 'bytes' are not allowed, as they can corrupt numerical
+ data. Default: 'ASCII'
Returns
-------
@@ -317,6 +346,8 @@ def load(file, mmap_mode=None):
------
IOError
If the input file does not exist or cannot be read.
+ ValueError
+ The file contains an object array, but allow_pickle=False given.
See Also
--------
@@ -381,6 +412,26 @@ def load(file, mmap_mode=None):
else:
fid = file
+ if encoding not in ('ASCII', 'latin1', 'bytes'):
+ # The 'encoding' value for pickle also affects what encoding
+ # the serialized binary data of Numpy arrays is loaded
+ # in. Pickle does not pass on the encoding information to
+ # Numpy. The unpickling code in numpy.core.multiarray is
+ # written to assume that unicode data appearing where binary
+ # should be is in 'latin1'. 'bytes' is also safe, as is 'ASCII'.
+ #
+ # Other encoding values can corrupt binary data, and we
+ # purposefully disallow them. For the same reason, the errors=
+ # argument is not exposed, as values other than 'strict'
+ # result can similarly silently corrupt numerical data.
+ raise ValueError("encoding must be 'ASCII', 'latin1', or 'bytes'")
+
+ if sys.version_info[0] >= 3:
+ pickle_kwargs = dict(encoding=encoding, fix_imports=fix_imports)
+ else:
+ # Nothing to do on Python 2
+ pickle_kwargs = {}
+
try:
# Code to distinguish from NumPy binary files and pickles.
_ZIP_PREFIX = asbytes('PK\x03\x04')
@@ -392,17 +443,22 @@ def load(file, mmap_mode=None):
# Transfer file ownership to NpzFile
tmp = own_fid
own_fid = False
- return NpzFile(fid, own_fid=tmp)
+ return NpzFile(fid, own_fid=tmp, allow_pickle=allow_pickle,
+ pickle_kwargs=pickle_kwargs)
elif magic == format.MAGIC_PREFIX:
# .npy file
if mmap_mode:
return format.open_memmap(file, mode=mmap_mode)
else:
- return format.read_array(fid)
+ return format.read_array(fid, allow_pickle=allow_pickle,
+ pickle_kwargs=pickle_kwargs)
else:
# Try a pickle
+ if not allow_pickle:
+ raise ValueError("allow_pickle=False, but file does not contain "
+ "non-pickled data")
try:
- return pickle.load(fid)
+ return pickle.load(fid, **pickle_kwargs)
except:
raise IOError(
"Failed to interpret file %s as a pickle" % repr(file))
@@ -411,7 +467,7 @@ def load(file, mmap_mode=None):
fid.close()
-def save(file, arr):
+def save(file, arr, allow_pickle=True, fix_imports=True):
"""
Save an array to a binary file in NumPy ``.npy`` format.
@@ -422,6 +478,19 @@ def save(file, arr):
then the filename is unchanged. If file is a string, a ``.npy``
extension will be appended to the file name if it does not already
have one.
+ allow_pickle : bool, optional
+ Allow saving object arrays using Python pickles. Reasons for disallowing
+ pickles include security (loading pickled data can execute arbitrary
+ code) and portability (pickled objects may not be loadable on different
+ Python installations, for example if the stored objects require libraries
+ that are not available, and not all pickled data is compatible between
+ Python 2 and Python 3).
+ Default: True
+ fix_imports : bool, optional
+ Only useful in forcing objects in object arrays on Python 3 to be
+ pickled in a Python 2 compatible way. If `fix_imports` is True, pickle
+ will try to map the new Python 3 names to the old module names used in
+ Python 2, so that the pickle data stream is readable with Python 2.
arr : array_like
Array data to be saved.
@@ -458,9 +527,16 @@ def save(file, arr):
else:
fid = file
+ if sys.version_info[0] >= 3:
+ pickle_kwargs = dict(fix_imports=fix_imports)
+ else:
+ # Nothing to do on Python 2
+ pickle_kwargs = None
+
try:
arr = np.asanyarray(arr)
- format.write_array(fid, arr)
+ format.write_array(fid, arr, allow_pickle=allow_pickle,
+ pickle_kwargs=pickle_kwargs)
finally:
if own_fid:
fid.close()
@@ -572,7 +648,7 @@ def savez_compressed(file, *args, **kwds):
_savez(file, args, kwds, True)
-def _savez(file, args, kwds, compress):
+def _savez(file, args, kwds, compress, allow_pickle=True, pickle_kwargs=None):
# Import is postponed to here since zipfile depends on gzip, an optional
# component of the so-called standard library.
import zipfile
@@ -606,7 +682,9 @@ def _savez(file, args, kwds, compress):
fname = key + '.npy'
fid = open(tmpfile, 'wb')
try:
- format.write_array(fid, np.asanyarray(val))
+ format.write_array(fid, np.asanyarray(val),
+ allow_pickle=allow_pickle,
+ pickle_kwargs=pickle_kwargs)
fid.close()
fid = None
zipf.write(tmpfile, arcname=fname)
@@ -640,7 +718,7 @@ def _getconv(dtype):
elif issubclass(typ, np.floating):
return floatconv
elif issubclass(typ, np.complex):
- return complex
+ return lambda x: complex(asstr(x))
elif issubclass(typ, np.bytes_):
return bytes
else:
@@ -667,8 +745,9 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None,
each row will be interpreted as an element of the array. In this
case, the number of columns used must match the number of fields in
the data-type.
- comments : str, optional
- The character used to indicate the start of a comment;
+ comments : str or sequence, optional
+ The characters or list of characters used to indicate the start of a
+ comment;
default: '#'.
delimiter : str, optional
The string used to separate values. By default, this is any
@@ -741,7 +820,14 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None,
"""
# Type conversions for Py3 convenience
if comments is not None:
- comments = asbytes(comments)
+ if isinstance(comments, (basestring, bytes)):
+ comments = [asbytes(comments)]
+ else:
+ comments = [asbytes(comment) for comment in comments]
+
+ # Compile regex for comments beforehand
+ comments = (re.escape(comment) for comment in comments)
+ regex_comments = re.compile(asbytes('|').join(comments))
user_converters = converters
if delimiter is not None:
delimiter = asbytes(delimiter)
@@ -813,11 +899,16 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None,
return tuple(ret)
def split_line(line):
- """Chop off comments, strip, and split at delimiter."""
- if comments is None:
- line = asbytes(line).strip(asbytes('\r\n'))
- else:
- line = asbytes(line).split(comments)[0].strip(asbytes('\r\n'))
+ """Chop off comments, strip, and split at delimiter.
+
+ Note that although the file is opened as text, this function
+ returns bytes.
+
+ """
+ line = asbytes(line)
+ if comments is not None:
+ line = regex_comments.split(asbytes(line), maxsplit=1)[0]
+ line = line.strip(asbytes('\r\n'))
if line:
return line.split(delimiter)
else:
@@ -1240,8 +1331,8 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
The string used to separate values. By default, any consecutive
whitespaces act as delimiter. An integer or sequence of integers
can also be provided as width(s) of each field.
- skip_rows : int, optional
- `skip_rows` was deprecated in numpy 1.5, and will be removed in
+ skiprows : int, optional
+ `skiprows` was deprecated in numpy 1.5, and will be removed in
numpy 2.0. Please use `skip_header` instead.
skip_header : int, optional
The number of lines to skip at the beginning of the file.
diff --git a/numpy/lib/recfunctions.py b/numpy/lib/recfunctions.py
index a61b1749b..4ae1079d2 100644
--- a/numpy/lib/recfunctions.py
+++ b/numpy/lib/recfunctions.py
@@ -268,7 +268,7 @@ def izip_records(seqarrays, fill_value=None, flatten=True):
Parameters
----------
- seqarray : sequence of arrays
+ seqarrays : sequence of arrays
Sequence of arrays.
fill_value : {None, integer}
Value used to pad shorter iterables.
@@ -683,7 +683,7 @@ def stack_arrays(arrays, defaults=None, usemask=True, asrecarray=False,
Parameters
----------
- seqarrays : array or sequence
+ arrays : array or sequence
Sequence of input arrays.
defaults : dictionary, optional
Dictionary mapping field names to the corresponding default values.
diff --git a/numpy/lib/shape_base.py b/numpy/lib/shape_base.py
index 2d18c5bc8..011434dda 100644
--- a/numpy/lib/shape_base.py
+++ b/numpy/lib/shape_base.py
@@ -850,7 +850,12 @@ def tile(A, reps):
except TypeError:
tup = (reps,)
d = len(tup)
- c = _nx.array(A, copy=False, subok=True, ndmin=d)
+ if all(x == 1 for x in tup) and isinstance(A, _nx.ndarray):
+ # Fixes the problem that the function does not make a copy if A is a
+ # numpy array and the repetitions are 1 in all dimensions
+ return _nx.array(A, copy=True, subok=True, ndmin=d)
+ else:
+ c = _nx.array(A, copy=False, subok=True, ndmin=d)
shape = list(c.shape)
n = max(c.size, 1)
if (d < c.ndim):
diff --git a/numpy/lib/stride_tricks.py b/numpy/lib/stride_tricks.py
index b81307a65..e7649cb60 100644
--- a/numpy/lib/stride_tricks.py
+++ b/numpy/lib/stride_tricks.py
@@ -9,7 +9,8 @@ from __future__ import division, absolute_import, print_function
import numpy as np
-__all__ = ['broadcast_arrays']
+__all__ = ['broadcast_to', 'broadcast_arrays']
+
class DummyArray(object):
"""Dummy object that just exists to hang __array_interface__ dictionaries
@@ -20,6 +21,20 @@ class DummyArray(object):
self.__array_interface__ = interface
self.base = base
+
+def _maybe_view_as_subclass(original_array, new_array):
+ if type(original_array) is not type(new_array):
+ # if input was an ndarray subclass and subclasses were OK,
+ # then view the result as that subclass.
+ new_array = new_array.view(type=type(original_array))
+ # Since we have done something akin to a view from original_array, we
+ # should let the subclass finalize (if it has it implemented, i.e., is
+ # not None).
+ if new_array.__array_finalize__:
+ new_array.__array_finalize__(original_array)
+ return new_array
+
+
def as_strided(x, shape=None, strides=None, subok=False):
""" Make an ndarray from the given array with the given shape and strides.
"""
@@ -31,18 +46,85 @@ def as_strided(x, shape=None, strides=None, subok=False):
if strides is not None:
interface['strides'] = tuple(strides)
array = np.asarray(DummyArray(interface, base=x))
- # Make sure dtype is correct in case of custom dtype
- if array.dtype.kind == 'V':
+
+ if array.dtype.fields is None and x.dtype.fields is not None:
+ # This should only happen if x.dtype is [('', 'Vx')]
array.dtype = x.dtype
- if type(x) is not type(array):
- # if input was an ndarray subclass and subclasses were OK,
- # then view the result as that subclass.
- array = array.view(type=type(x))
- # Since we have done something akin to a view from x, we should let
- # the subclass finalize (if it has it implemented, i.e., is not None).
- if array.__array_finalize__:
- array.__array_finalize__(x)
- return array
+
+ return _maybe_view_as_subclass(x, array)
+
+
+def _broadcast_to(array, shape, subok, readonly):
+ shape = tuple(shape) if np.iterable(shape) else (shape,)
+ array = np.array(array, copy=False, subok=subok)
+ if not shape and array.shape:
+ raise ValueError('cannot broadcast a non-scalar to a scalar array')
+ if any(size < 0 for size in shape):
+ raise ValueError('all elements of broadcast shape must be non-'
+ 'negative')
+ broadcast = np.nditer(
+ (array,), flags=['multi_index', 'refs_ok', 'zerosize_ok'],
+ op_flags=['readonly'], itershape=shape, order='C').itviews[0]
+ result = _maybe_view_as_subclass(array, broadcast)
+ if not readonly and array.flags.writeable:
+ result.flags.writeable = True
+ return result
+
+
+def broadcast_to(array, shape, subok=False):
+ """Broadcast an array to a new shape.
+
+ Parameters
+ ----------
+ array : array_like
+ The array to broadcast.
+ shape : tuple
+ The shape of the desired array.
+ subok : bool, optional
+ If True, then sub-classes will be passed-through, otherwise
+ the returned array will be forced to be a base-class array (default).
+
+ Returns
+ -------
+ broadcast : array
+ A readonly view on the original array with the given shape. It is
+ typically not contiguous. Furthermore, more than one element of a
+ broadcasted array may refer to a single memory location.
+
+ Raises
+ ------
+ ValueError
+ If the array is not compatible with the new shape according to NumPy's
+ broadcasting rules.
+
+ Examples
+ --------
+ >>> x = np.array([1, 2, 3])
+ >>> np.broadcast_to(x, (3, 3))
+ array([[1, 2, 3],
+ [1, 2, 3],
+ [1, 2, 3]])
+ """
+ return _broadcast_to(array, shape, subok=subok, readonly=True)
+
+
+def _broadcast_shape(*args):
+ """Returns the shape of the ararys that would result from broadcasting the
+ supplied arrays against each other.
+ """
+ if not args:
+ raise ValueError('must provide at least one argument')
+ if len(args) == 1:
+ # a single argument does not work with np.broadcast
+ return np.asarray(args[0]).shape
+ # use the old-iterator because np.nditer does not handle size 0 arrays
+ # consistently
+ b = np.broadcast(*args[:32])
+ # unfortunately, it cannot handle 32 or more arguments directly
+ for pos in range(32, len(args), 31):
+ b = np.broadcast(b, *args[pos:(pos + 31)])
+ return b.shape
+
def broadcast_arrays(*args, **kwargs):
"""
@@ -87,55 +169,24 @@ def broadcast_arrays(*args, **kwargs):
[3, 3, 3]])]
"""
+ # nditer is not used here to avoid the limit of 32 arrays.
+ # Otherwise, something like the following one-liner would suffice:
+ # return np.nditer(args, flags=['multi_index', 'zerosize_ok'],
+ # order='C').itviews
+
subok = kwargs.pop('subok', False)
if kwargs:
raise TypeError('broadcast_arrays() got an unexpected keyword '
'argument {}'.format(kwargs.pop()))
args = [np.array(_m, copy=False, subok=subok) for _m in args]
- shapes = [x.shape for x in args]
- if len(set(shapes)) == 1:
+
+ shape = _broadcast_shape(*args)
+
+ if all(array.shape == shape for array in args):
# Common case where nothing needs to be broadcasted.
return args
- shapes = [list(s) for s in shapes]
- strides = [list(x.strides) for x in args]
- nds = [len(s) for s in shapes]
- biggest = max(nds)
- # Go through each array and prepend dimensions of length 1 to each of
- # the shapes in order to make the number of dimensions equal.
- for i in range(len(args)):
- diff = biggest - nds[i]
- if diff > 0:
- shapes[i] = [1] * diff + shapes[i]
- strides[i] = [0] * diff + strides[i]
- # Chech each dimension for compatibility. A dimension length of 1 is
- # accepted as compatible with any other length.
- common_shape = []
- for axis in range(biggest):
- lengths = [s[axis] for s in shapes]
- unique = set(lengths + [1])
- if len(unique) > 2:
- # There must be at least two non-1 lengths for this axis.
- raise ValueError("shape mismatch: two or more arrays have "
- "incompatible dimensions on axis %r." % (axis,))
- elif len(unique) == 2:
- # There is exactly one non-1 length. The common shape will take
- # this value.
- unique.remove(1)
- new_length = unique.pop()
- common_shape.append(new_length)
- # For each array, if this axis is being broadcasted from a
- # length of 1, then set its stride to 0 so that it repeats its
- # data.
- for i in range(len(args)):
- if shapes[i][axis] == 1:
- shapes[i][axis] = new_length
- strides[i][axis] = 0
- else:
- # Every array has a length of 1 on this axis. Strides can be
- # left alone as nothing is broadcasted.
- common_shape.append(1)
-
- # Construct the new arrays.
- broadcasted = [as_strided(x, shape=sh, strides=st, subok=subok)
- for (x, sh, st) in zip(args, shapes, strides)]
- return broadcasted
+
+ # TODO: consider making the results of broadcast_arrays readonly to match
+ # broadcast_to. This will require a deprecation cycle.
+ return [_broadcast_to(array, shape, subok=subok, readonly=False)
+ for array in args]
diff --git a/numpy/lib/tests/data/py2-objarr.npy b/numpy/lib/tests/data/py2-objarr.npy
new file mode 100644
index 000000000..12936c92d
--- /dev/null
+++ b/numpy/lib/tests/data/py2-objarr.npy
Binary files differ
diff --git a/numpy/lib/tests/data/py2-objarr.npz b/numpy/lib/tests/data/py2-objarr.npz
new file mode 100644
index 000000000..68a3b53a1
--- /dev/null
+++ b/numpy/lib/tests/data/py2-objarr.npz
Binary files differ
diff --git a/numpy/lib/tests/data/py3-objarr.npy b/numpy/lib/tests/data/py3-objarr.npy
new file mode 100644
index 000000000..6776074b4
--- /dev/null
+++ b/numpy/lib/tests/data/py3-objarr.npy
Binary files differ
diff --git a/numpy/lib/tests/data/py3-objarr.npz b/numpy/lib/tests/data/py3-objarr.npz
new file mode 100644
index 000000000..05eac0b76
--- /dev/null
+++ b/numpy/lib/tests/data/py3-objarr.npz
Binary files differ
diff --git a/numpy/lib/tests/test__iotools.py b/numpy/lib/tests/test__iotools.py
index 060f815d5..e0a917a21 100644
--- a/numpy/lib/tests/test__iotools.py
+++ b/numpy/lib/tests/test__iotools.py
@@ -152,17 +152,31 @@ class TestStringConverter(TestCase):
def test_upgrade(self):
"Tests the upgrade method."
+
converter = StringConverter()
assert_equal(converter._status, 0)
+
# test int
assert_equal(converter.upgrade(asbytes('0')), 0)
assert_equal(converter._status, 1)
+
+ # On systems where integer defaults to 32-bit, the statuses will be
+ # offset by one, so we check for this here.
+ import numpy.core.numeric as nx
+ status_offset = int(nx.dtype(nx.integer).itemsize < nx.dtype(nx.int64).itemsize)
+
+ # test int > 2**32
+ assert_equal(converter.upgrade(asbytes('17179869184')), 17179869184)
+ assert_equal(converter._status, 1 + status_offset)
+
# test float
assert_allclose(converter.upgrade(asbytes('0.')), 0.0)
- assert_equal(converter._status, 2)
+ assert_equal(converter._status, 2 + status_offset)
+
# test complex
assert_equal(converter.upgrade(asbytes('0j')), complex('0j'))
- assert_equal(converter._status, 3)
+ assert_equal(converter._status, 3 + status_offset)
+
# test str
assert_equal(converter.upgrade(asbytes('a')), asbytes('a'))
assert_equal(converter._status, len(converter._mapper) - 1)
diff --git a/numpy/lib/tests/test_format.py b/numpy/lib/tests/test_format.py
index ee77386bc..4f8a65148 100644
--- a/numpy/lib/tests/test_format.py
+++ b/numpy/lib/tests/test_format.py
@@ -284,7 +284,7 @@ import warnings
from io import BytesIO
import numpy as np
-from numpy.compat import asbytes, asbytes_nested
+from numpy.compat import asbytes, asbytes_nested, sixu
from numpy.testing import (
run_module_suite, assert_, assert_array_equal, assert_raises, raises,
dec
@@ -534,6 +534,87 @@ def test_python2_python3_interoperability():
assert_array_equal(data, np.ones(2))
+def test_pickle_python2_python3():
+ # Test that loading object arrays saved on Python 2 works both on
+ # Python 2 and Python 3 and vice versa
+ data_dir = os.path.join(os.path.dirname(__file__), 'data')
+
+ if sys.version_info[0] >= 3:
+ xrange = range
+ else:
+ import __builtin__
+ xrange = __builtin__.xrange
+
+ expected = np.array([None, xrange, sixu('\u512a\u826f'),
+ asbytes('\xe4\xb8\x8d\xe8\x89\xaf')],
+ dtype=object)
+
+ for fname in ['py2-objarr.npy', 'py2-objarr.npz',
+ 'py3-objarr.npy', 'py3-objarr.npz']:
+ path = os.path.join(data_dir, fname)
+
+ if (fname.endswith('.npz') and sys.version_info[0] == 2 and
+ sys.version_info[1] < 7):
+ # Reading object arrays directly from zipfile appears to fail
+ # on Py2.6, see cfae0143b4
+ continue
+
+ for encoding in ['bytes', 'latin1']:
+ if (sys.version_info[0] >= 3 and sys.version_info[1] < 4 and
+ encoding == 'bytes'):
+ # The bytes encoding is available starting from Python 3.4
+ continue
+
+ data_f = np.load(path, encoding=encoding)
+ if fname.endswith('.npz'):
+ data = data_f['x']
+ data_f.close()
+ else:
+ data = data_f
+
+ if sys.version_info[0] >= 3:
+ if encoding == 'latin1' and fname.startswith('py2'):
+ assert_(isinstance(data[3], str))
+ assert_array_equal(data[:-1], expected[:-1])
+ # mojibake occurs
+ assert_array_equal(data[-1].encode(encoding), expected[-1])
+ else:
+ assert_(isinstance(data[3], bytes))
+ assert_array_equal(data, expected)
+ else:
+ assert_array_equal(data, expected)
+
+ if sys.version_info[0] >= 3:
+ if fname.startswith('py2'):
+ if fname.endswith('.npz'):
+ data = np.load(path)
+ assert_raises(UnicodeError, data.__getitem__, 'x')
+ data.close()
+ data = np.load(path, fix_imports=False, encoding='latin1')
+ assert_raises(ImportError, data.__getitem__, 'x')
+ data.close()
+ else:
+ assert_raises(UnicodeError, np.load, path)
+ assert_raises(ImportError, np.load, path,
+ encoding='latin1', fix_imports=False)
+
+
+def test_pickle_disallow():
+ data_dir = os.path.join(os.path.dirname(__file__), 'data')
+
+ path = os.path.join(data_dir, 'py2-objarr.npy')
+ assert_raises(ValueError, np.load, path,
+ allow_pickle=False, encoding='latin1')
+
+ path = os.path.join(data_dir, 'py2-objarr.npz')
+ f = np.load(path, allow_pickle=False, encoding='latin1')
+ assert_raises(ValueError, f.__getitem__, 'x')
+
+ path = os.path.join(tempdir, 'pickle-disabled.npy')
+ assert_raises(ValueError, np.save, path, np.array([None], dtype=object),
+ allow_pickle=False)
+
+
def test_version_2_0():
f = BytesIO()
# requires more than 2 byte for header
@@ -629,6 +710,26 @@ malformed_magic = asbytes_nested([
'',
])
+def test_read_magic():
+ s1 = BytesIO()
+ s2 = BytesIO()
+
+ arr = np.ones((3, 6), dtype=float)
+
+ format.write_array(s1, arr, version=(1, 0))
+ format.write_array(s2, arr, version=(2, 0))
+
+ s1.seek(0)
+ s2.seek(0)
+
+ version1 = format.read_magic(s1)
+ version2 = format.read_magic(s2)
+
+ assert_(version1 == (1, 0))
+ assert_(version2 == (2, 0))
+
+ assert_(s1.tell() == format.MAGIC_LEN)
+ assert_(s2.tell() == format.MAGIC_LEN)
def test_read_magic_bad_magic():
for magic in malformed_magic:
@@ -659,6 +760,30 @@ def test_large_header():
assert_raises(ValueError, format.write_array_header_1_0, s, d)
+def test_read_array_header_1_0():
+ s = BytesIO()
+
+ arr = np.ones((3, 6), dtype=float)
+ format.write_array(s, arr, version=(1, 0))
+
+ s.seek(format.MAGIC_LEN)
+ shape, fortran, dtype = format.read_array_header_1_0(s)
+
+ assert_((shape, fortran, dtype) == ((3, 6), False, float))
+
+
+def test_read_array_header_2_0():
+ s = BytesIO()
+
+ arr = np.ones((3, 6), dtype=float)
+ format.write_array(s, arr, version=(2, 0))
+
+ s.seek(format.MAGIC_LEN)
+ shape, fortran, dtype = format.read_array_header_2_0(s)
+
+ assert_((shape, fortran, dtype) == ((3, 6), False, float))
+
+
def test_bad_header():
# header of length less than 2 should fail
s = BytesIO()
diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py
index 03521ca4c..cf9fcf5e2 100644
--- a/numpy/lib/tests/test_function_base.py
+++ b/numpy/lib/tests/test_function_base.py
@@ -8,8 +8,9 @@ from numpy.testing import (
run_module_suite, TestCase, assert_, assert_equal, assert_array_equal,
assert_almost_equal, assert_array_almost_equal, assert_raises,
assert_allclose, assert_array_max_ulp, assert_warns,
- assert_raises_regex, dec
+ assert_raises_regex, dec, clear_and_catch_warnings
)
+import numpy.lib.function_base as nfb
from numpy.random import rand
from numpy.lib import *
from numpy.compat import long
@@ -668,7 +669,7 @@ class TestVectorize(TestCase):
args = np.array([0, 0.5*np.pi, np.pi, 1.5*np.pi, 2*np.pi])
r1 = f(args)
r2 = np.cos(args)
- assert_array_equal(r1, r2)
+ assert_array_almost_equal(r1, r2)
def test_keywords(self):
import math
@@ -1305,6 +1306,12 @@ class TestCheckFinite(TestCase):
assert_(a.dtype == np.float64)
+class catch_warn_nfb(clear_and_catch_warnings):
+ """ Context manager to catch, reset warnings in function_base module
+ """
+ class_modules = (nfb,)
+
+
class TestCorrCoef(TestCase):
A = np.array(
[[0.15391142, 0.18045767, 0.14197213],
@@ -1335,8 +1342,26 @@ class TestCorrCoef(TestCase):
assert_almost_equal(corrcoef(self.A, self.B), self.res2)
def test_ddof(self):
- assert_almost_equal(corrcoef(self.A, ddof=-1), self.res1)
- assert_almost_equal(corrcoef(self.A, self.B, ddof=-1), self.res2)
+ # ddof raises DeprecationWarning
+ with catch_warn_nfb():
+ warnings.simplefilter("always")
+ assert_warns(DeprecationWarning, corrcoef, self.A, ddof=-1)
+ warnings.simplefilter("ignore")
+ # ddof has no or negligible effect on the function
+ assert_almost_equal(corrcoef(self.A, ddof=-1), self.res1)
+ assert_almost_equal(corrcoef(self.A, self.B, ddof=-1), self.res2)
+ assert_almost_equal(corrcoef(self.A, ddof=3), self.res1)
+ assert_almost_equal(corrcoef(self.A, self.B, ddof=3), self.res2)
+
+ def test_bias(self):
+ # bias raises DeprecationWarning
+ with catch_warn_nfb():
+ warnings.simplefilter("always")
+ assert_warns(DeprecationWarning, corrcoef, self.A, self.B, 1, 0)
+ assert_warns(DeprecationWarning, corrcoef, self.A, bias=0)
+ warnings.simplefilter("ignore")
+ # bias has no or negligible effect on the function
+ assert_almost_equal(corrcoef(self.A, bias=1), self.res1)
def test_complex(self):
x = np.array([[1, 2, 3], [1j, 2j, 3j]])
@@ -1356,13 +1381,6 @@ class TestCorrCoef(TestCase):
assert_array_equal(corrcoef(np.array([]).reshape(2, 0)),
np.array([[np.nan, np.nan], [np.nan, np.nan]]))
- def test_wrong_ddof(self):
- x = np.array([[0, 2], [1, 1], [2, 0]]).T
- with warnings.catch_warnings(record=True):
- warnings.simplefilter('always', RuntimeWarning)
- assert_array_equal(corrcoef(x, ddof=5),
- np.array([[np.nan, np.nan], [np.nan, np.nan]]))
-
class TestCov(TestCase):
def test_basic(self):
diff --git a/numpy/lib/tests/test_io.py b/numpy/lib/tests/test_io.py
index 7054ab1fe..8a939f85e 100644
--- a/numpy/lib/tests/test_io.py
+++ b/numpy/lib/tests/test_io.py
@@ -15,10 +15,10 @@ import numpy as np
import numpy.ma as ma
from numpy.lib._iotools import (ConverterError, ConverterLockError,
ConversionWarning)
-from numpy.compat import asbytes, asbytes_nested, bytes, asstr
+from numpy.compat import asbytes, bytes, unicode
from nose import SkipTest
from numpy.ma.testutils import (
- TestCase, assert_equal, assert_array_equal,
+ TestCase, assert_equal, assert_array_equal, assert_allclose,
assert_raises, assert_raises_regex, run_module_suite
)
from numpy.testing import assert_warns, assert_, build_err_msg
@@ -216,7 +216,7 @@ class TestSavezLoad(RoundtripTest, TestCase):
l = np.load(c)
assert_equal(a, l['file_a'])
assert_equal(b, l['file_b'])
-
+
def test_BagObj(self):
a = np.array([[1, 2], [3, 4]], float)
b = np.array([[1 + 2j, 2 + 7j], [3 - 6j, 4 + 12j]], complex)
@@ -553,15 +553,49 @@ class TestLoadTxt(TestCase):
a = np.array([[2, -999], [7, 9]], int)
assert_array_equal(x, a)
- def test_comments(self):
+ def test_comments_unicode(self):
+ c = TextIO()
+ c.write('# comment\n1,2,3,5\n')
+ c.seek(0)
+ x = np.loadtxt(c, dtype=int, delimiter=',',
+ comments=unicode('#'))
+ a = np.array([1, 2, 3, 5], int)
+ assert_array_equal(x, a)
+
+ def test_comments_byte(self):
c = TextIO()
c.write('# comment\n1,2,3,5\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
- comments='#')
+ comments=b'#')
+ a = np.array([1, 2, 3, 5], int)
+ assert_array_equal(x, a)
+
+ def test_comments_multiple(self):
+ c = TextIO()
+ c.write('# comment\n1,2,3\n@ comment2\n4,5,6 // comment3')
+ c.seek(0)
+ x = np.loadtxt(c, dtype=int, delimiter=',',
+ comments=['#', '@', '//'])
+ a = np.array([[1, 2, 3], [4, 5, 6]], int)
+ assert_array_equal(x, a)
+
+ def test_comments_multi_chars(self):
+ c = TextIO()
+ c.write('/* comment\n1,2,3,5\n')
+ c.seek(0)
+ x = np.loadtxt(c, dtype=int, delimiter=',',
+ comments='/*')
a = np.array([1, 2, 3, 5], int)
assert_array_equal(x, a)
+ # Check that '/*' is not transformed to ['/', '*']
+ c = TextIO()
+ c.write('*/ comment\n1,2,3,5\n')
+ c.seek(0)
+ assert_raises(ValueError, np.loadtxt, c, dtype=int, delimiter=',',
+ comments='/*')
+
def test_skiprows(self):
c = TextIO()
c.write('comment\n1,2,3,5\n')
@@ -707,6 +741,14 @@ class TestLoadTxt(TestCase):
res = np.loadtxt(c, dtype=dt)
assert_equal(res, tgt, err_msg="%s" % dt)
+ def test_from_complex(self):
+ tgt = (complex(1, 1), complex(1, -1))
+ c = TextIO()
+ c.write("%s %s" % tgt)
+ c.seek(0)
+ res = np.loadtxt(c, dtype=np.complex)
+ assert_equal(res, tgt)
+
def test_universal_newline(self):
f, name = mkstemp()
os.write(f, b'1 21\r3 42\r')
@@ -1762,6 +1804,31 @@ M 33 21.99
res = np.genfromtxt(count())
assert_array_equal(res, np.arange(10))
+ def test_auto_dtype_largeint(self):
+ """
+ Regression test for numpy/numpy#5635 whereby large integers could
+ cause OverflowErrors.
+ """
+ "Test the automatic definition of the output dtype"
+
+ # 2**66 = 73786976294838206464 => should convert to float
+ # 2**34 = 17179869184 => should convert to int64
+ # 2**10 = 1024 => should convert to int (int32 on 32-bit systems,
+ # int64 on 64-bit systems)
+
+ data = TextIO('73786976294838206464 17179869184 1024')
+
+ test = np.ndfromtxt(data, dtype=None)
+
+ assert_equal(test.dtype.names, ['f0', 'f1', 'f2'])
+
+ assert test.dtype['f0'] == np.float
+ assert test.dtype['f1'] == np.int64
+ assert test.dtype['f2'] == np.integer
+
+ assert_allclose(test['f0'], 73786976294838206464.)
+ assert_equal(test['f1'], 17179869184)
+ assert_equal(test['f2'], 1024)
def test_gzip_load():
a = np.random.random((5, 5))
diff --git a/numpy/lib/tests/test_shape_base.py b/numpy/lib/tests/test_shape_base.py
index 23f3edfbe..fb9d7f364 100644
--- a/numpy/lib/tests/test_shape_base.py
+++ b/numpy/lib/tests/test_shape_base.py
@@ -324,6 +324,12 @@ class TestTile(TestCase):
assert_equal(tile(b, (2, 2)), [[1, 2, 1, 2], [3, 4, 3, 4],
[1, 2, 1, 2], [3, 4, 3, 4]])
+ def test_tile_one_repetition_on_array_gh4679(self):
+ a = np.arange(5)
+ b = tile(a, 1)
+ b += 2
+ assert_equal(a, np.arange(5))
+
def test_empty(self):
a = np.array([[[]]])
d = tile(a, (3, 2, 5)).shape
diff --git a/numpy/lib/tests/test_stride_tricks.py b/numpy/lib/tests/test_stride_tricks.py
index bc7e30ca4..e079e0bf4 100644
--- a/numpy/lib/tests/test_stride_tricks.py
+++ b/numpy/lib/tests/test_stride_tricks.py
@@ -5,8 +5,9 @@ from numpy.testing import (
run_module_suite, assert_equal, assert_array_equal,
assert_raises, assert_
)
-from numpy.lib.stride_tricks import as_strided, broadcast_arrays
-
+from numpy.lib.stride_tricks import (
+ as_strided, broadcast_arrays, _broadcast_shape, broadcast_to
+)
def assert_shapes_correct(input_shapes, expected_shape):
# Broadcast a list of arrays with the given input shapes and check the
@@ -217,6 +218,62 @@ def test_same_as_ufunc():
assert_same_as_ufunc(input_shapes[0], input_shapes[1], False, True)
assert_same_as_ufunc(input_shapes[0], input_shapes[1], True, True)
+
+def test_broadcast_to_succeeds():
+ data = [
+ [np.array(0), (0,), np.array(0)],
+ [np.array(0), (1,), np.zeros(1)],
+ [np.array(0), (3,), np.zeros(3)],
+ [np.ones(1), (1,), np.ones(1)],
+ [np.ones(1), (2,), np.ones(2)],
+ [np.ones(1), (1, 2, 3), np.ones((1, 2, 3))],
+ [np.arange(3), (3,), np.arange(3)],
+ [np.arange(3), (1, 3), np.arange(3).reshape(1, -1)],
+ [np.arange(3), (2, 3), np.array([[0, 1, 2], [0, 1, 2]])],
+ # test if shape is not a tuple
+ [np.ones(0), 0, np.ones(0)],
+ [np.ones(1), 1, np.ones(1)],
+ [np.ones(1), 2, np.ones(2)],
+ # these cases with size 0 are strange, but they reproduce the behavior
+ # of broadcasting with ufuncs (see test_same_as_ufunc above)
+ [np.ones(1), (0,), np.ones(0)],
+ [np.ones((1, 2)), (0, 2), np.ones((0, 2))],
+ [np.ones((2, 1)), (2, 0), np.ones((2, 0))],
+ ]
+ for input_array, shape, expected in data:
+ actual = broadcast_to(input_array, shape)
+ assert_array_equal(expected, actual)
+
+
+def test_broadcast_to_raises():
+ data = [
+ [(0,), ()],
+ [(1,), ()],
+ [(3,), ()],
+ [(3,), (1,)],
+ [(3,), (2,)],
+ [(3,), (4,)],
+ [(1, 2), (2, 1)],
+ [(1, 1), (1,)],
+ [(1,), -1],
+ [(1,), (-1,)],
+ [(1, 2), (-1, 2)],
+ ]
+ for orig_shape, target_shape in data:
+ arr = np.zeros(orig_shape)
+ assert_raises(ValueError, lambda: broadcast_to(arr, target_shape))
+
+
+def test_broadcast_shape():
+ # broadcast_shape is already exercized indirectly by broadcast_arrays
+ assert_raises(ValueError, _broadcast_shape)
+ assert_equal(_broadcast_shape([1, 2]), (2,))
+ assert_equal(_broadcast_shape(np.ones((1, 1))), (1, 1))
+ assert_equal(_broadcast_shape(np.ones((1, 1)), np.ones((3, 4))), (3, 4))
+ assert_equal(_broadcast_shape(*([np.ones((1, 2))] * 32)), (1, 2))
+ assert_equal(_broadcast_shape(*([np.ones((1, 2))] * 100)), (1, 2))
+
+
def test_as_strided():
a = np.array([None])
a_view = as_strided(a)
@@ -233,6 +290,29 @@ def test_as_strided():
expected = np.array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]])
assert_array_equal(a_view, expected)
+ # Regression test for gh-5081
+ dt = np.dtype([('num', 'i4'), ('obj', 'O')])
+ a = np.empty((4,), dtype=dt)
+ a['num'] = np.arange(1, 5)
+ a_view = as_strided(a, shape=(3, 4), strides=(0, a.itemsize))
+ expected_num = [[1, 2, 3, 4]] * 3
+ expected_obj = [[None]*4]*3
+ assert_equal(a_view.dtype, dt)
+ assert_array_equal(expected_num, a_view['num'])
+ assert_array_equal(expected_obj, a_view['obj'])
+
+ # Make sure that void types without fields are kept unchanged
+ a = np.empty((4,), dtype='V4')
+ a_view = as_strided(a, shape=(3, 4), strides=(0, a.itemsize))
+ assert_equal(a.dtype, a_view.dtype)
+
+ # Make sure that the only type that could fail is properly handled
+ dt = np.dtype({'names': [''], 'formats': ['V4']})
+ a = np.empty((4,), dtype=dt)
+ a_view = as_strided(a, shape=(3, 4), strides=(0, a.itemsize))
+ assert_equal(a.dtype, a_view.dtype)
+
+
class VerySimpleSubClass(np.ndarray):
def __new__(cls, *args, **kwargs):
@@ -277,6 +357,45 @@ def test_subclasses():
assert_(type(b_view) is np.ndarray)
assert_(a_view.shape == b_view.shape)
+ # and for broadcast_to
+ shape = (2, 4)
+ a_view = broadcast_to(a, shape)
+ assert_(type(a_view) is np.ndarray)
+ assert_(a_view.shape == shape)
+ a_view = broadcast_to(a, shape, subok=True)
+ assert_(type(a_view) is SimpleSubClass)
+ assert_(a_view.info == 'simple finalized')
+ assert_(a_view.shape == shape)
+
+
+def test_writeable():
+ # broadcast_to should return a readonly array
+ original = np.array([1, 2, 3])
+ result = broadcast_to(original, (2, 3))
+ assert_equal(result.flags.writeable, False)
+ assert_raises(ValueError, result.__setitem__, slice(None), 0)
+
+ # but the result of broadcast_arrays needs to be writeable (for now), to
+ # preserve backwards compatibility
+ for results in [broadcast_arrays(original),
+ broadcast_arrays(0, original)]:
+ for result in results:
+ assert_equal(result.flags.writeable, True)
+ # keep readonly input readonly
+ original.flags.writeable = False
+ _, result = broadcast_arrays(0, original)
+ assert_equal(result.flags.writeable, False)
+
+
+def test_reference_types():
+ input_array = np.array('a', dtype=object)
+ expected = np.array(['a'] * 3, dtype=object)
+ actual = broadcast_to(input_array, (3,))
+ assert_array_equal(expected, actual)
+
+ actual, _ = broadcast_arrays(input_array, np.ones(3))
+ assert_array_equal(expected, actual)
+
if __name__ == "__main__":
run_module_suite()
diff --git a/numpy/lib/tests/test_type_check.py b/numpy/lib/tests/test_type_check.py
index 3931f95e5..7afd1206c 100644
--- a/numpy/lib/tests/test_type_check.py
+++ b/numpy/lib/tests/test_type_check.py
@@ -277,6 +277,8 @@ class TestNanToNum(TestCase):
def test_integer(self):
vals = nan_to_num(1)
assert_all(vals == 1)
+ vals = nan_to_num([1])
+ assert_array_equal(vals, np.array([1], np.int))
def test_complex_good(self):
vals = nan_to_num(1+1j)
diff --git a/numpy/lib/type_check.py b/numpy/lib/type_check.py
index a45d0bd86..99677b394 100644
--- a/numpy/lib/type_check.py
+++ b/numpy/lib/type_check.py
@@ -324,12 +324,13 @@ def nan_to_num(x):
Returns
-------
- out : ndarray, float
- Array with the same shape as `x` and dtype of the element in `x` with
- the greatest precision. NaN is replaced by zero, and infinity
- (-infinity) is replaced by the largest (smallest or most negative)
- floating point value that fits in the output dtype. All finite numbers
- are upcast to the output dtype (default float64).
+ out : ndarray
+ New Array with the same shape as `x` and dtype of the element in
+ `x` with the greatest precision. If `x` is inexact, then NaN is
+ replaced by zero, and infinity (-infinity) is replaced by the
+ largest (smallest or most negative) floating point value that fits
+ in the output dtype. If `x` is not inexact, then a copy of `x` is
+ returned.
See Also
--------
@@ -354,33 +355,22 @@ def nan_to_num(x):
-1.28000000e+002, 1.28000000e+002])
"""
- try:
- t = x.dtype.type
- except AttributeError:
- t = obj2sctype(type(x))
- if issubclass(t, _nx.complexfloating):
- return nan_to_num(x.real) + 1j * nan_to_num(x.imag)
- else:
- try:
- y = x.copy()
- except AttributeError:
- y = array(x)
- if not issubclass(t, _nx.integer):
- if not y.shape:
- y = array([x])
- scalar = True
- else:
- scalar = False
- are_inf = isposinf(y)
- are_neg_inf = isneginf(y)
- are_nan = isnan(y)
- maxf, minf = _getmaxmin(y.dtype.type)
- y[are_nan] = 0
- y[are_inf] = maxf
- y[are_neg_inf] = minf
- if scalar:
- y = y[0]
- return y
+ x = _nx.array(x, subok=True)
+ xtype = x.dtype.type
+ if not issubclass(xtype, _nx.inexact):
+ return x
+
+ iscomplex = issubclass(xtype, _nx.complexfloating)
+ isscalar = (x.ndim == 0)
+
+ x = x[None] if isscalar else x
+ dest = (x.real, x.imag) if iscomplex else (x,)
+ maxf, minf = _getmaxmin(x.real.dtype)
+ for d in dest:
+ _nx.copyto(d, 0.0, where=isnan(d))
+ _nx.copyto(d, maxf, where=isposinf(d))
+ _nx.copyto(d, minf, where=isneginf(d))
+ return x[0] if isscalar else x
#-----------------------------------------------------------------------------
diff --git a/numpy/linalg/linalg.py b/numpy/linalg/linalg.py
index e70227e5a..30180f24a 100644
--- a/numpy/linalg/linalg.py
+++ b/numpy/linalg/linalg.py
@@ -319,6 +319,8 @@ def solve(a, b):
Notes
-----
+
+ .. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
@@ -476,6 +478,8 @@ def inv(a):
Notes
-----
+
+ .. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
@@ -553,6 +557,8 @@ def cholesky(a):
Notes
-----
+
+ .. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
@@ -849,6 +855,8 @@ def eigvals(a):
Notes
-----
+
+ .. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
@@ -937,6 +945,8 @@ def eigvalsh(a, UPLO='L'):
Notes
-----
+
+ .. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
@@ -1010,13 +1020,18 @@ def eig(a):
See Also
--------
+ eigvals : eigenvalues of a non-symmetric array.
+
+ eigh : eigenvalues and eigenvectors of a symmetric or Hermitian
+ (conjugate symmetric) array.
+
eigvalsh : eigenvalues of a symmetric or Hermitian (conjugate symmetric)
array.
- eigvals : eigenvalues of a non-symmetric array.
-
Notes
-----
+
+ .. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
@@ -1123,7 +1138,7 @@ def eigh(a, UPLO='L'):
Parameters
----------
- A : (..., M, M) array
+ a : (..., M, M) array
Hermitian/Symmetric matrices whose eigenvalues and
eigenvectors are to be computed.
UPLO : {'L', 'U'}, optional
@@ -1152,6 +1167,8 @@ def eigh(a, UPLO='L'):
Notes
-----
+
+ .. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
@@ -1259,6 +1276,8 @@ def svd(a, full_matrices=1, compute_uv=1):
Notes
-----
+
+ .. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
@@ -1628,13 +1647,15 @@ def slogdet(a):
Notes
-----
+
+ .. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
+ .. versionadded:: 1.6.0.
The determinant is computed via LU factorization using the LAPACK
routine z/dgetrf.
- .. versionadded:: 1.6.0.
Examples
--------
@@ -1697,6 +1718,8 @@ def det(a):
Notes
-----
+
+ .. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
@@ -2059,18 +2082,22 @@ def norm(x, ord=None, axis=None, keepdims=False):
"""
x = asarray(x)
- # Check the default case first and handle it immediately.
- if ord is None and axis is None:
+ # Immediately handle some default, simple, fast, and common cases.
+ if axis is None:
ndim = x.ndim
- x = x.ravel(order='K')
- if isComplexType(x.dtype.type):
- sqnorm = dot(x.real, x.real) + dot(x.imag, x.imag)
- else:
- sqnorm = dot(x, x)
- ret = sqrt(sqnorm)
- if keepdims:
- ret = ret.reshape(ndim*[1])
- return ret
+ if ((ord is None) or
+ (ord in ('f', 'fro') and ndim == 2) or
+ (ord == 2 and ndim == 1)):
+
+ x = x.ravel(order='K')
+ if isComplexType(x.dtype.type):
+ sqnorm = dot(x.real, x.real) + dot(x.imag, x.imag)
+ else:
+ sqnorm = dot(x, x)
+ ret = sqrt(sqnorm)
+ if keepdims:
+ ret = ret.reshape(ndim*[1])
+ return ret
# Normalize the `axis` argument to a tuple.
nd = x.ndim
@@ -2119,10 +2146,14 @@ def norm(x, ord=None, axis=None, keepdims=False):
return add.reduce(absx, axis=axis, keepdims=keepdims) ** (1.0 / ord)
elif len(axis) == 2:
row_axis, col_axis = axis
- if not (-nd <= row_axis < nd and -nd <= col_axis < nd):
+ if row_axis < 0:
+ row_axis += nd
+ if col_axis < 0:
+ col_axis += nd
+ if not (0 <= row_axis < nd and 0 <= col_axis < nd):
raise ValueError('Invalid axis %r for an array with shape %r' %
(axis, x.shape))
- if row_axis % nd == col_axis % nd:
+ if row_axis == col_axis:
raise ValueError('Duplicate axes given.')
if ord == 2:
ret = _multi_svd_norm(x, row_axis, col_axis, amax)
diff --git a/numpy/linalg/tests/test_linalg.py b/numpy/linalg/tests/test_linalg.py
index 29e1f3480..ca59aa566 100644
--- a/numpy/linalg/tests/test_linalg.py
+++ b/numpy/linalg/tests/test_linalg.py
@@ -882,26 +882,29 @@ class _TestNorm(object):
# Matrix norms.
B = np.arange(1, 25, dtype=self.dt).reshape(2, 3, 4)
-
- for order in [None, -2, 2, -1, 1, np.Inf, -np.Inf, 'fro', 'nuc']:
- assert_almost_equal(norm(A, ord=order), norm(A, ord=order,
- axis=(0, 1)))
-
- n = norm(B, ord=order, axis=(1, 2))
- expected = [norm(B[k], ord=order) for k in range(B.shape[0])]
- assert_almost_equal(n, expected)
-
- n = norm(B, ord=order, axis=(2, 1))
- expected = [norm(B[k].T, ord=order) for k in range(B.shape[0])]
- assert_almost_equal(n, expected)
-
- n = norm(B, ord=order, axis=(0, 2))
- expected = [norm(B[:, k,:], ord=order) for k in range(B.shape[1])]
- assert_almost_equal(n, expected)
-
- n = norm(B, ord=order, axis=(0, 1))
- expected = [norm(B[:,:, k], ord=order) for k in range(B.shape[2])]
- assert_almost_equal(n, expected)
+ nd = B.ndim
+ for order in [None, -2, 2, -1, 1, np.Inf, -np.Inf, 'fro']:
+ for axis in itertools.combinations(range(-nd, nd), 2):
+ row_axis, col_axis = axis
+ if row_axis < 0:
+ row_axis += nd
+ if col_axis < 0:
+ col_axis += nd
+ if row_axis == col_axis:
+ assert_raises(ValueError, norm, B, ord=order, axis=axis)
+ else:
+ n = norm(B, ord=order, axis=axis)
+
+ # The logic using k_index only works for nd = 3.
+ # This has to be changed if nd is increased.
+ k_index = nd - (row_axis + col_axis)
+ if row_axis < col_axis:
+ expected = [norm(B[:].take(k, axis=k_index), ord=order)
+ for k in range(B.shape[k_index])]
+ else:
+ expected = [norm(B[:].take(k, axis=k_index).T, ord=order)
+ for k in range(B.shape[k_index])]
+ assert_almost_equal(n, expected)
def test_keepdims(self):
A = np.arange(1,25, dtype=self.dt).reshape(2,3,4)
diff --git a/numpy/ma/core.py b/numpy/ma/core.py
index 78f5fbc4d..51e9f0f28 100644
--- a/numpy/ma/core.py
+++ b/numpy/ma/core.py
@@ -681,6 +681,9 @@ def fix_invalid(a, mask=nomask, copy=True, fill_value=None):
----------
a : array_like
Input array, a (subclass of) ndarray.
+ mask : sequence, optional
+ Mask. Must be convertible to an array of booleans with the same
+ shape as `data`. True indicates a masked (i.e. invalid) data.
copy : bool, optional
Whether to use a copy of `a` (True) or to fix `a` in place (False).
Default is True.
@@ -4519,6 +4522,26 @@ class MaskedArray(ndarray):
return D.astype(dtype).filled(0).sum(axis=None, out=out)
trace.__doc__ = ndarray.trace.__doc__
+ def dot(self, other, out=None):
+ am = ~getmaskarray(self)
+ bm = ~getmaskarray(other)
+ if out is None:
+ d = np.dot(filled(self, 0), filled(other, 0))
+ m = ~np.dot(am, bm)
+ if d.ndim == 0:
+ d = np.asarray(d)
+ r = d.view(get_masked_subclass(self, other))
+ r.__setmask__(m)
+ return r
+ d = self.filled(0).dot(other.filled(0), out._data)
+ if out.mask.shape != d.shape:
+ out._mask = numpy.empty(d.shape, MaskType)
+ np.dot(am, bm, out._mask)
+ np.logical_not(out._mask, out._mask)
+ return out
+ dot.__doc__ = ndarray.dot.__doc__
+
+
def sum(self, axis=None, dtype=None, out=None):
"""
Return the sum of the array elements over the given axis.
@@ -7378,21 +7401,21 @@ def append(a, b, axis=None):
Parameters
----------
- arr : array_like
+ a : array_like
Values are appended to a copy of this array.
- values : array_like
- These values are appended to a copy of `arr`. It must be of the
- correct shape (the same shape as `arr`, excluding `axis`). If `axis`
- is not specified, `values` can be any shape and will be flattened
+ b : array_like
+ These values are appended to a copy of `a`. It must be of the
+ correct shape (the same shape as `a`, excluding `axis`). If `axis`
+ is not specified, `b` can be any shape and will be flattened
before use.
axis : int, optional
- The axis along which `values` are appended. If `axis` is not given,
- both `arr` and `values` are flattened before use.
+ The axis along which `v` are appended. If `axis` is not given,
+ both `a` and `b` are flattened before use.
Returns
-------
append : MaskedArray
- A copy of `arr` with `values` appended to `axis`. Note that `append`
+ A copy of `a` with `b` appended to `axis`. Note that `append`
does not occur in-place: a new array is allocated and filled. If
`axis` is None, the result is a flattened array.
diff --git a/numpy/ma/extras.py b/numpy/ma/extras.py
index b6082180a..d389099ae 100644
--- a/numpy/ma/extras.py
+++ b/numpy/ma/extras.py
@@ -48,7 +48,6 @@ import numpy as np
from numpy import ndarray, array as nxarray
import numpy.core.umath as umath
from numpy.lib.index_tricks import AxisConcatenator
-from numpy.linalg import lstsq
#...............................................................................
@@ -730,6 +729,10 @@ def compress_rowcols(x, axis=None):
Parameters
----------
+ x : array_like, MaskedArray
+ The array to operate on. If not a MaskedArray instance (or if no array
+ elements are masked), `x` is interpreted as a MaskedArray with
+ `mask` set to `nomask`. Must be a 2D array.
axis : int, optional
Axis along which to perform the operation. Default is None.
@@ -1044,13 +1047,7 @@ def dot(a, b, strict=False):
if strict and (a.ndim == 2) and (b.ndim == 2):
a = mask_rows(a)
b = mask_cols(b)
- #
- d = np.dot(filled(a, 0), filled(b, 0))
- #
- am = (~getmaskarray(a))
- bm = (~getmaskarray(b))
- m = ~np.dot(am, bm)
- return masked_array(d, mask=m)
+ return a.dot(b)
#####--------------------------------------------------------------------------
#---- --- arraysetops ---
@@ -1373,9 +1370,10 @@ def cov(x, y=None, rowvar=True, bias=False, allow_masked=True, ddof=None):
return result
-def corrcoef(x, y=None, rowvar=True, bias=False, allow_masked=True, ddof=None):
+def corrcoef(x, y=None, rowvar=True, bias=np._NoValue, allow_masked=True,
+ ddof=np._NoValue):
"""
- Return correlation coefficients of the input array.
+ Return Pearson product-moment correlation coefficients.
Except for the handling of missing data this function does the same as
`numpy.corrcoef`. For more details and examples, see `numpy.corrcoef`.
@@ -1394,45 +1392,41 @@ def corrcoef(x, y=None, rowvar=True, bias=False, allow_masked=True, ddof=None):
variable, with observations in the columns. Otherwise, the relationship
is transposed: each column represents a variable, while the rows
contain observations.
- bias : bool, optional
- Default normalization (False) is by ``(N-1)``, where ``N`` is the
- number of observations given (unbiased estimate). If `bias` is 1,
- then normalization is by ``N``. This keyword can be overridden by
- the keyword ``ddof`` in numpy versions >= 1.5.
+ bias : _NoValue, optional
+ .. deprecated:: 1.10.0
+ Has no affect, do not use.
allow_masked : bool, optional
If True, masked values are propagated pair-wise: if a value is masked
in `x`, the corresponding value is masked in `y`.
- If False, raises an exception.
- ddof : {None, int}, optional
- .. versionadded:: 1.5
- If not ``None`` normalization is by ``(N - ddof)``, where ``N`` is
- the number of observations; this overrides the value implied by
- ``bias``. The default value is ``None``.
+ If False, raises an exception. Because `bias` is deprecated, this
+ argument needs to be treated as keyword only to avoid a warning.
+ ddof : _NoValue, optional
+ .. deprecated:: 1.10.0
+ Has no affect, do not use.
See Also
--------
numpy.corrcoef : Equivalent function in top-level NumPy module.
cov : Estimate the covariance matrix.
- """
- # Check inputs
- if ddof is not None and ddof != int(ddof):
- raise ValueError("ddof must be an integer")
- # Set up ddof
- if ddof is None:
- if bias:
- ddof = 0
- else:
- ddof = 1
-
+ Notes
+ -----
+ This function accepts but discards arguments `bias` and `ddof`. This is
+ for backwards compatibility with previous versions of this function. These
+ arguments had no effect on the return values of the function and can be
+ safely ignored in this and previous versions of numpy.
+ """
+ msg = 'bias and ddof have no affect and are deprecated'
+ if bias is not np._NoValue or ddof is not np._NoValue:
+ warnings.warn(msg, DeprecationWarning)
# Get the data
(x, xnotmask, rowvar) = _covhelper(x, y, rowvar, allow_masked)
# Compute the covariance matrix
if not rowvar:
- fact = np.dot(xnotmask.T, xnotmask) * 1. - ddof
+ fact = np.dot(xnotmask.T, xnotmask) * 1.
c = (dot(x.T, x.conj(), strict=False) / fact).squeeze()
else:
- fact = np.dot(xnotmask, xnotmask.T) * 1. - ddof
+ fact = np.dot(xnotmask, xnotmask.T) * 1.
c = (dot(x, x.T.conj(), strict=False) / fact).squeeze()
# Check whether we have a scalar
try:
@@ -1448,13 +1442,13 @@ def corrcoef(x, y=None, rowvar=True, bias=False, allow_masked=True, ddof=None):
if rowvar:
for i in range(n - 1):
for j in range(i + 1, n):
- _x = mask_cols(vstack((x[i], x[j]))).var(axis=1, ddof=ddof)
+ _x = mask_cols(vstack((x[i], x[j]))).var(axis=1)
_denom[i, j] = _denom[j, i] = ma.sqrt(ma.multiply.reduce(_x))
else:
for i in range(n - 1):
for j in range(i + 1, n):
_x = mask_cols(
- vstack((x[:, i], x[:, j]))).var(axis=1, ddof=ddof)
+ vstack((x[:, i], x[:, j]))).var(axis=1)
_denom[i, j] = _denom[j, i] = ma.sqrt(ma.multiply.reduce(_x))
return c / _denom
@@ -1561,7 +1555,7 @@ def flatnotmasked_edges(a):
Parameters
----------
- arr : array_like
+ a : array_like
Input 1-D `MaskedArray`
Returns
diff --git a/numpy/ma/mrecords.py b/numpy/ma/mrecords.py
index e66596509..644383925 100644
--- a/numpy/ma/mrecords.py
+++ b/numpy/ma/mrecords.py
@@ -639,7 +639,7 @@ def fromtextfile(fname, delimitor=None, commentchar='#', missingchar='',
Parameters
----------
- filename : {file name/handle}
+ fname : {file name/handle}
Handle of an opened file.
delimitor : {None, string}, optional
Alphanumeric character used to separate columns in the file.
diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py
index 1d4462306..f0d5d6788 100644
--- a/numpy/ma/tests/test_core.py
+++ b/numpy/ma/tests/test_core.py
@@ -2953,6 +2953,12 @@ class TestMaskedArrayMathMethods(TestCase):
assert_equal(mX.ptp(0), cols)
assert_equal(mX.ptp(1), rows)
+ def test_add_object(self):
+ x = masked_array(['a', 'b'], mask = [1, 0], dtype=object)
+ y = x + 'x'
+ assert_equal(y[1], 'bx')
+ assert_(y.mask[0])
+
def test_sum_object(self):
# Test sum on object dtype
a = masked_array([1, 2, 3], mask=[1, 0, 0], dtype=np.object)
@@ -2982,6 +2988,30 @@ class TestMaskedArrayMathMethods(TestCase):
X.trace() - sum(mXdiag.mask * X.diagonal(),
axis=0))
+ def test_dot(self):
+ # Tests dot on MaskedArrays.
+ (x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d
+ fx = mx.filled(0)
+ r = mx.dot(mx)
+ assert_almost_equal(r.filled(0), fx.dot(fx))
+ assert_(r.mask is nomask)
+
+ fX = mX.filled(0)
+ r = mX.dot(mX)
+ assert_almost_equal(r.filled(0), fX.dot(fX))
+ assert_(r.mask[1,3])
+ r1 = empty_like(r)
+ mX.dot(mX, r1)
+ assert_almost_equal(r, r1)
+
+ mYY = mXX.swapaxes(-1, -2)
+ fXX, fYY = mXX.filled(0), mYY.filled(0)
+ r = mXX.dot(mYY)
+ assert_almost_equal(r.filled(0), fXX.dot(fYY))
+ r1 = empty_like(r)
+ mXX.dot(mYY, r1)
+ assert_almost_equal(r, r1)
+
def test_varstd(self):
# Tests var & std on MaskedArrays.
(x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d
diff --git a/numpy/ma/tests/test_extras.py b/numpy/ma/tests/test_extras.py
index 9e1e8ba38..ee8e6bc18 100644
--- a/numpy/ma/tests/test_extras.py
+++ b/numpy/ma/tests/test_extras.py
@@ -14,12 +14,16 @@ __version__ = '1.0'
__revision__ = "$Revision: 3473 $"
__date__ = '$Date: 2007-10-29 17:18:13 +0200 (Mon, 29 Oct 2007) $'
+import warnings
+
import numpy as np
-from numpy.testing import TestCase, run_module_suite
+from numpy.testing import (TestCase, run_module_suite, assert_warns,
+ assert_raises, clear_and_catch_warnings)
from numpy.ma.testutils import (rand, assert_, assert_array_equal,
assert_equal, assert_almost_equal)
from numpy.ma.core import (array, arange, masked, MaskedArray, masked_array,
getmaskarray, shape, nomask, ones, zeros, count)
+import numpy.ma.extras as mae
from numpy.ma.extras import (
atleast_2d, mr_, dot, polyfit,
cov, corrcoef, median, average,
@@ -629,15 +633,46 @@ class TestCov(TestCase):
x.shape[0] / frac))
+class catch_warn_mae(clear_and_catch_warnings):
+ """ Context manager to catch, reset warnings in ma.extras module
+ """
+ class_modules = (mae,)
+
+
class TestCorrcoef(TestCase):
def setUp(self):
self.data = array(np.random.rand(12))
+ self.data2 = array(np.random.rand(12))
def test_ddof(self):
- # Test ddof keyword
- x = self.data
- assert_almost_equal(np.corrcoef(x, ddof=0), corrcoef(x, ddof=0))
+ # ddof raises DeprecationWarning
+ x, y = self.data, self.data2
+ expected = np.corrcoef(x)
+ expected2 = np.corrcoef(x, y)
+ with catch_warn_mae():
+ warnings.simplefilter("always")
+ assert_warns(DeprecationWarning, corrcoef, x, ddof=-1)
+ warnings.simplefilter("ignore")
+ # ddof has no or negligible effect on the function
+ assert_almost_equal(np.corrcoef(x, ddof=0), corrcoef(x, ddof=0))
+ assert_almost_equal(corrcoef(x, ddof=-1), expected)
+ assert_almost_equal(corrcoef(x, y, ddof=-1), expected2)
+ assert_almost_equal(corrcoef(x, ddof=3), expected)
+ assert_almost_equal(corrcoef(x, y, ddof=3), expected2)
+
+ def test_bias(self):
+ x, y = self.data, self.data2
+ expected = np.corrcoef(x)
+ # bias raises DeprecationWarning
+ with catch_warn_mae():
+ warnings.simplefilter("always")
+ assert_warns(DeprecationWarning, corrcoef, x, y, True, False)
+ assert_warns(DeprecationWarning, corrcoef, x, y, True, True)
+ assert_warns(DeprecationWarning, corrcoef, x, bias=False)
+ warnings.simplefilter("ignore")
+ # bias has no or negligible effect on the function
+ assert_almost_equal(corrcoef(x, bias=1), expected)
def test_1d_wo_missing(self):
# Test cov on 1D variable w/o missing values
@@ -645,8 +680,10 @@ class TestCorrcoef(TestCase):
assert_almost_equal(np.corrcoef(x), corrcoef(x))
assert_almost_equal(np.corrcoef(x, rowvar=False),
corrcoef(x, rowvar=False))
- assert_almost_equal(np.corrcoef(x, rowvar=False, bias=True),
- corrcoef(x, rowvar=False, bias=True))
+ with catch_warn_mae():
+ warnings.simplefilter("ignore")
+ assert_almost_equal(np.corrcoef(x, rowvar=False, bias=True),
+ corrcoef(x, rowvar=False, bias=True))
def test_2d_wo_missing(self):
# Test corrcoef on 1 2D variable w/o missing values
@@ -654,8 +691,10 @@ class TestCorrcoef(TestCase):
assert_almost_equal(np.corrcoef(x), corrcoef(x))
assert_almost_equal(np.corrcoef(x, rowvar=False),
corrcoef(x, rowvar=False))
- assert_almost_equal(np.corrcoef(x, rowvar=False, bias=True),
- corrcoef(x, rowvar=False, bias=True))
+ with catch_warn_mae():
+ warnings.simplefilter("ignore")
+ assert_almost_equal(np.corrcoef(x, rowvar=False, bias=True),
+ corrcoef(x, rowvar=False, bias=True))
def test_1d_w_missing(self):
# Test corrcoef 1 1D variable w/missing values
@@ -666,21 +705,26 @@ class TestCorrcoef(TestCase):
assert_almost_equal(np.corrcoef(nx), corrcoef(x))
assert_almost_equal(np.corrcoef(nx, rowvar=False),
corrcoef(x, rowvar=False))
- assert_almost_equal(np.corrcoef(nx, rowvar=False, bias=True),
- corrcoef(x, rowvar=False, bias=True))
- #
+ with catch_warn_mae():
+ warnings.simplefilter("ignore")
+ assert_almost_equal(np.corrcoef(nx, rowvar=False, bias=True),
+ corrcoef(x, rowvar=False, bias=True))
try:
corrcoef(x, allow_masked=False)
except ValueError:
pass
- #
# 2 1D variables w/ missing values
nx = x[1:-1]
assert_almost_equal(np.corrcoef(nx, nx[::-1]), corrcoef(x, x[::-1]))
assert_almost_equal(np.corrcoef(nx, nx[::-1], rowvar=False),
corrcoef(x, x[::-1], rowvar=False))
- assert_almost_equal(np.corrcoef(nx, nx[::-1], rowvar=False, bias=True),
- corrcoef(x, x[::-1], rowvar=False, bias=True))
+ with catch_warn_mae():
+ warnings.simplefilter("ignore")
+ # ddof and bias have no or negligible effect on the function
+ assert_almost_equal(np.corrcoef(nx, nx[::-1]),
+ corrcoef(x, x[::-1], bias=1))
+ assert_almost_equal(np.corrcoef(nx, nx[::-1]),
+ corrcoef(x, x[::-1], ddof=2))
def test_2d_w_missing(self):
# Test corrcoef on 2D variable w/ missing value
@@ -691,6 +735,15 @@ class TestCorrcoef(TestCase):
test = corrcoef(x)
control = np.corrcoef(x)
assert_almost_equal(test[:-1, :-1], control[:-1, :-1])
+ with catch_warn_mae():
+ warnings.simplefilter("ignore")
+ # ddof and bias have no or negligible effect on the function
+ assert_almost_equal(corrcoef(x, ddof=-2)[:-1, :-1],
+ control[:-1, :-1])
+ assert_almost_equal(corrcoef(x, ddof=3)[:-1, :-1],
+ control[:-1, :-1])
+ assert_almost_equal(corrcoef(x, bias=1)[:-1, :-1],
+ control[:-1, :-1])
class TestPolynomial(TestCase):
@@ -750,6 +803,7 @@ class TestPolynomial(TestCase):
for (a, a_) in zip((C, R, K, S, D), (c, r, k, s, d)):
assert_almost_equal(a, a_)
+
class TestArraySetOps(TestCase):
def test_unique_onlist(self):
diff --git a/numpy/ma/tests/test_regression.py b/numpy/ma/tests/test_regression.py
index 7b32199ea..dba74d357 100644
--- a/numpy/ma/tests/test_regression.py
+++ b/numpy/ma/tests/test_regression.py
@@ -1,7 +1,10 @@
from __future__ import division, absolute_import, print_function
+import warnings
+
import numpy as np
-from numpy.testing import *
+from numpy.testing import (assert_, TestCase, assert_array_equal,
+ assert_allclose, run_module_suite)
from numpy.compat import sixu
rlevel = 1
@@ -66,10 +69,12 @@ class TestRegression(TestCase):
# See gh-3336
x = np.ma.masked_equal([1, 2, 3, 4, 5], 4)
y = np.array([2, 2.5, 3.1, 3, 5])
- r0 = np.ma.corrcoef(x, y, ddof=0)
- r1 = np.ma.corrcoef(x, y, ddof=1)
- # ddof should not have an effect (it gets cancelled out)
- assert_allclose(r0.data, r1.data)
+ with warnings.catch_warnings():
+ warnings.simplefilter("ignore")
+ r0 = np.ma.corrcoef(x, y, ddof=0)
+ r1 = np.ma.corrcoef(x, y, ddof=1)
+ # ddof should not have an effect (it gets cancelled out)
+ assert_allclose(r0.data, r1.data)
if __name__ == "__main__":
run_module_suite()
diff --git a/numpy/matrixlib/defmatrix.py b/numpy/matrixlib/defmatrix.py
index d0b39ad9f..ffd4578ba 100644
--- a/numpy/matrixlib/defmatrix.py
+++ b/numpy/matrixlib/defmatrix.py
@@ -74,6 +74,8 @@ def asmatrix(data, dtype=None):
----------
data : array_like
Input data.
+ dtype : data-type
+ Data-type of the output matrix.
Returns
-------
diff --git a/numpy/polynomial/polyutils.py b/numpy/polynomial/polyutils.py
index 9348559ed..c1b7528d5 100644
--- a/numpy/polynomial/polyutils.py
+++ b/numpy/polynomial/polyutils.py
@@ -134,7 +134,7 @@ def as_series(alist, trim=True):
Parameters
----------
- a : array_like
+ alist : array_like
A 1- or 2-d array_like
trim : boolean, optional
When True, trailing zeros are removed from the inputs.
diff --git a/numpy/random/mtrand/distributions.c b/numpy/random/mtrand/distributions.c
index 001e2f6a1..84174e105 100644
--- a/numpy/random/mtrand/distributions.c
+++ b/numpy/random/mtrand/distributions.c
@@ -42,6 +42,7 @@
*/
#include <math.h>
+#include <stdlib.h>
#include "distributions.h"
#include <stdio.h>
@@ -315,7 +316,7 @@ long rk_binomial_btpe(rk_state *state, long n, double p)
v = v*(u-p3)*lamr;
Step50:
- k = fabs(y - m);
+ k = labs(y - m);
if ((k > 20) && (k < ((nrq)/2.0 - 1))) goto Step52;
s = r/q;
@@ -788,9 +789,9 @@ long rk_hypergeometric_hrua(rk_state *state, long good, long bad, long sample)
d4 = ((double)mingoodbad) / popsize;
d5 = 1.0 - d4;
d6 = m*d4 + 0.5;
- d7 = sqrt((popsize - m) * sample * d4 *d5 / (popsize-1) + 0.5);
+ d7 = sqrt((double)(popsize - m) * sample * d4 * d5 / (popsize - 1) + 0.5);
d8 = D1*d7 + D2;
- d9 = (long)floor((double)((m+1)*(mingoodbad+1))/(popsize+2));
+ d9 = (long)floor((double)(m + 1) * (mingoodbad + 1) / (popsize + 2));
d10 = (loggam(d9+1) + loggam(mingoodbad-d9+1) + loggam(m-d9+1) +
loggam(maxgoodbad-m+d9+1));
d11 = min(min(m, mingoodbad)+1.0, floor(d6+16*d7));
diff --git a/numpy/random/mtrand/mtrand.pyx b/numpy/random/mtrand/mtrand.pyx
index 703e9ec28..c4927a3f3 100644
--- a/numpy/random/mtrand/mtrand.pyx
+++ b/numpy/random/mtrand/mtrand.pyx
@@ -634,7 +634,7 @@ cdef class RandomState:
----------
seed : int or array_like, optional
Seed for `RandomState`.
- Must be convertable to 32 bit unsigned integers.
+ Must be convertible to 32 bit unsigned integers.
See Also
--------
@@ -1252,8 +1252,8 @@ cdef class RandomState:
olow = <ndarray>PyArray_FROM_OTF(low, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
ohigh = <ndarray>PyArray_FROM_OTF(high, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
temp = np.subtract(ohigh, olow)
- Py_INCREF(temp) # needed to get around Pyrex's automatic reference-counting
- # rules because EnsureArray steals a reference
+ Py_INCREF(temp) # needed to get around Pyrex's automatic reference-counting
+ # rules because EnsureArray steals a reference
odiff = <ndarray>PyArray_EnsureArray(temp)
return cont2_array(self.internal_state, rk_uniform, size, olow, odiff,
self.lock)
@@ -1443,7 +1443,7 @@ cdef class RandomState:
"""
standard_normal(size=None)
- Returns samples from a Standard Normal distribution (mean=0, stdev=1).
+ Draw samples from a standard Normal distribution (mean=0, stdev=1).
Parameters
----------
@@ -1511,22 +1511,22 @@ cdef class RandomState:
.. math:: p(x) = \\frac{1}{\\sqrt{ 2 \\pi \\sigma^2 }}
e^{ - \\frac{ (x - \\mu)^2 } {2 \\sigma^2} },
- where :math:`\\mu` is the mean and :math:`\\sigma` the standard deviation.
- The square of the standard deviation, :math:`\\sigma^2`, is called the
- variance.
+ where :math:`\\mu` is the mean and :math:`\\sigma` the standard
+ deviation. The square of the standard deviation, :math:`\\sigma^2`,
+ is called the variance.
The function has its peak at the mean, and its "spread" increases with
the standard deviation (the function reaches 0.607 times its maximum at
:math:`x + \\sigma` and :math:`x - \\sigma` [2]_). This implies that
- `numpy.random.normal` is more likely to return samples lying close to the
- mean, rather than those far away.
+ `numpy.random.normal` is more likely to return samples lying close to
+ the mean, rather than those far away.
References
----------
.. [1] Wikipedia, "Normal distribution",
http://en.wikipedia.org/wiki/Normal_distribution
- .. [2] P. R. Peebles Jr., "Central Limit Theorem" in "Probability, Random
- Variables and Random Signal Principles", 4th ed., 2001,
+ .. [2] P. R. Peebles Jr., "Central Limit Theorem" in "Probability,
+ Random Variables and Random Signal Principles", 4th ed., 2001,
pp. 51, 51, 125.
Examples
@@ -1579,7 +1579,7 @@ cdef class RandomState:
"""
beta(a, b, size=None)
- The Beta distribution over ``[0, 1]``.
+ Draw samples from a Beta distribution.
The Beta distribution is a special case of the Dirichlet distribution,
and is related to the Gamma distribution. It has the probability
@@ -1641,7 +1641,7 @@ cdef class RandomState:
"""
exponential(scale=1.0, size=None)
- Exponential distribution.
+ Draw samples from an exponential distribution.
Its probability density function is
@@ -1688,7 +1688,8 @@ cdef class RandomState:
PyErr_Clear()
- oscale = <ndarray> PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
+ oscale = <ndarray> PyArray_FROM_OTF(scale, NPY_DOUBLE,
+ NPY_ARRAY_ALIGNED)
if np.any(np.less_equal(oscale, 0.0)):
raise ValueError("scale <= 0")
return cont1_array(self.internal_state, rk_exponential, size, oscale,
@@ -1729,7 +1730,7 @@ cdef class RandomState:
"""
standard_gamma(shape, size=None)
- Draw samples from a Standard Gamma distribution.
+ Draw samples from a standard Gamma distribution.
Samples are drawn from a Gamma distribution with specified parameters,
shape (sometimes designated "k") and scale=1.
@@ -1800,10 +1801,12 @@ cdef class RandomState:
if not PyErr_Occurred():
if fshape <= 0:
raise ValueError("shape <= 0")
- return cont1_array_sc(self.internal_state, rk_standard_gamma, size, fshape, self.lock)
+ return cont1_array_sc(self.internal_state, rk_standard_gamma,
+ size, fshape, self.lock)
PyErr_Clear()
- oshape = <ndarray> PyArray_FROM_OTF(shape, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
+ oshape = <ndarray> PyArray_FROM_OTF(shape, NPY_DOUBLE,
+ NPY_ARRAY_ALIGNED)
if np.any(np.less_equal(oshape, 0.0)):
raise ValueError("shape <= 0")
return cont1_array(self.internal_state, rk_standard_gamma, size,
@@ -1907,11 +1910,12 @@ cdef class RandomState:
"""
f(dfnum, dfden, size=None)
- Draw samples from a F distribution.
+ Draw samples from an F distribution.
Samples are drawn from an F distribution with specified parameters,
- `dfnum` (degrees of freedom in numerator) and `dfden` (degrees of freedom
- in denominator), where both parameters should be greater than zero.
+ `dfnum` (degrees of freedom in numerator) and `dfden` (degrees of
+ freedom in denominator), where both parameters should be greater than
+ zero.
The random variate of the F distribution (also known as the
Fisher distribution) is a continuous probability distribution
@@ -1958,7 +1962,8 @@ cdef class RandomState:
Examples
--------
- An example from Glantz[1], pp 47-40.
+ An example from Glantz[1], pp 47-40:
+
Two groups, children of diabetics (25 people) and children from people
without diabetes (25 controls). Fasting blood glucose was measured,
case group had a mean value of 86.1, controls had a mean value of
@@ -2046,11 +2051,11 @@ cdef class RandomState:
References
----------
- Weisstein, Eric W. "Noncentral F-Distribution." From MathWorld--A Wolfram
- Web Resource. http://mathworld.wolfram.com/NoncentralF-Distribution.html
-
- Wikipedia, "Noncentral F distribution",
- http://en.wikipedia.org/wiki/Noncentral_F-distribution
+ .. [1] Weisstein, Eric W. "Noncentral F-Distribution."
+ From MathWorld--A Wolfram Web Resource.
+ http://mathworld.wolfram.com/NoncentralF-Distribution.html
+ .. [2] Wikipedia, "Noncentral F distribution",
+ http://en.wikipedia.org/wiki/Noncentral_F-distribution
Examples
--------
@@ -2157,8 +2162,8 @@ cdef class RandomState:
References
----------
- `NIST/SEMATECH e-Handbook of Statistical Methods
- <http://www.itl.nist.gov/div898/handbook/eda/section3/eda3666.htm>`_
+ .. [1] NIST "Engineering Statistics Handbook"
+ http://www.itl.nist.gov/div898/handbook/eda/section3/eda3666.htm
Examples
--------
@@ -2206,23 +2211,25 @@ cdef class RandomState:
Notes
-----
- The probability density function for the noncentral Chi-square distribution
- is
+ The probability density function for the noncentral Chi-square
+ distribution is
.. math:: P(x;df,nonc) = \\sum^{\\infty}_{i=0}
- \\frac{e^{-nonc/2}(nonc/2)^{i}}{i!}P_{Y_{df+2i}}(x),
+ \\frac{e^{-nonc/2}(nonc/2)^{i}}{i!}
+ \\P_{Y_{df+2i}}(x),
where :math:`Y_{q}` is the Chi-square with q degrees of freedom.
- In Delhi (2007), it is noted that the noncentral chi-square is useful in
- bombing and coverage problems, the probability of killing the point target
- given by the noncentral chi-squared distribution.
+ In Delhi (2007), it is noted that the noncentral chi-square is
+ useful in bombing and coverage problems, the probability of
+ killing the point target given by the noncentral chi-squared
+ distribution.
References
----------
- .. [1] Delhi, M.S. Holla, "On a noncentral chi-square distribution in the
- analysis of weapon systems effectiveness", Metrika, Volume 15,
- Number 1 / December, 1970.
+ .. [1] Delhi, M.S. Holla, "On a noncentral chi-square distribution in
+ the analysis of weapon systems effectiveness", Metrika,
+ Volume 15, Number 1 / December, 1970.
.. [2] Wikipedia, "Noncentral chi-square distribution"
http://en.wikipedia.org/wiki/Noncentral_chi-square_distribution
@@ -2282,7 +2289,7 @@ cdef class RandomState:
"""
standard_cauchy(size=None)
- Standard Cauchy distribution with mode = 0.
+ Draw samples from a standard Cauchy distribution with mode = 0.
Also known as the Lorentz distribution.
@@ -2346,10 +2353,11 @@ cdef class RandomState:
"""
standard_t(df, size=None)
- Standard Student's t distribution with df degrees of freedom.
+ Draw samples from a standard Student's t distribution with `df` degrees
+ of freedom.
- A special case of the hyperbolic distribution.
- As `df` gets large, the result resembles that of the standard normal
+ A special case of the hyperbolic distribution. As `df` gets
+ large, the result resembles that of the standard normal
distribution (`standard_normal`).
Parameters
@@ -2373,15 +2381,15 @@ cdef class RandomState:
.. math:: P(x, df) = \\frac{\\Gamma(\\frac{df+1}{2})}{\\sqrt{\\pi df}
\\Gamma(\\frac{df}{2})}\\Bigl( 1+\\frac{x^2}{df} \\Bigr)^{-(df+1)/2}
- The t test is based on an assumption that the data come from a Normal
- distribution. The t test provides a way to test whether the sample mean
- (that is the mean calculated from the data) is a good estimate of the true
- mean.
+ The t test is based on an assumption that the data come from a
+ Normal distribution. The t test provides a way to test whether
+ the sample mean (that is the mean calculated from the data) is
+ a good estimate of the true mean.
- The derivation of the t-distribution was forst published in 1908 by William
- Gisset while working for the Guinness Brewery in Dublin. Due to proprietary
- issues, he had to publish under a pseudonym, and so he used the name
- Student.
+ The derivation of the t-distribution was first published in
+ 1908 by William Gisset while working for the Guinness Brewery
+ in Dublin. Due to proprietary issues, he had to publish under
+ a pseudonym, and so he used the name Student.
References
----------
@@ -2498,11 +2506,11 @@ cdef class RandomState:
References
----------
- Abramowitz, M. and Stegun, I. A. (ed.), *Handbook of Mathematical
- Functions*, New York: Dover, 1965.
-
- von Mises, R., *Mathematical Theory of Probability and Statistics*,
- New York: Academic Press, 1964.
+ .. [1] Abramowitz, M. and Stegun, I. A. (Eds.). "Handbook of
+ Mathematical Functions with Formulas, Graphs, and Mathematical
+ Tables, 9th printing," New York: Dover, 1972.
+ .. [2] von Mises, R., "Mathematical Theory of Probability
+ and Statistics", New York: Academic Press, 1964.
Examples
--------
@@ -2537,7 +2545,8 @@ cdef class RandomState:
PyErr_Clear()
omu = <ndarray> PyArray_FROM_OTF(mu, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
- okappa = <ndarray> PyArray_FROM_OTF(kappa, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
+ okappa = <ndarray> PyArray_FROM_OTF(kappa, NPY_DOUBLE,
+ NPY_ARRAY_ALIGNED)
if np.any(np.less(okappa, 0.0)):
raise ValueError("kappa < 0")
return cont2_array(self.internal_state, rk_vonmises, size, omu, okappa,
@@ -2547,21 +2556,25 @@ cdef class RandomState:
"""
pareto(a, size=None)
- Draw samples from a Pareto II or Lomax distribution with specified shape.
-
- The Lomax or Pareto II distribution is a shifted Pareto distribution. The
- classical Pareto distribution can be obtained from the Lomax distribution
- by adding 1 and multiplying by the scale parameter ``m`` (see Notes).
- The smallest value of the Lomax distribution is zero while for the
- classical Pareto distribution it is ``mu``, where the standard Pareto
- distribution has location ``mu = 1``. Lomax can also be considered as a
- simplified version of the Generalized Pareto distribution (available in
- SciPy), with the scale set to one and the location set to zero.
-
- The Pareto distribution must be greater than zero, and is unbounded above.
- It is also known as the "80-20 rule". In this distribution, 80 percent of
- the weights are in the lowest 20 percent of the range, while the other 20
- percent fill the remaining 80 percent of the range.
+ Draw samples from a Pareto II or Lomax distribution with
+ specified shape.
+
+ The Lomax or Pareto II distribution is a shifted Pareto
+ distribution. The classical Pareto distribution can be
+ obtained from the Lomax distribution by adding 1 and
+ multiplying by the scale parameter ``m`` (see Notes). The
+ smallest value of the Lomax distribution is zero while for the
+ classical Pareto distribution it is ``mu``, where the standard
+ Pareto distribution has location ``mu = 1``. Lomax can also
+ be considered as a simplified version of the Generalized
+ Pareto distribution (available in SciPy), with the scale set
+ to one and the location set to zero.
+
+ The Pareto distribution must be greater than zero, and is
+ unbounded above. It is also known as the "80-20 rule". In
+ this distribution, 80 percent of the weights are in the lowest
+ 20 percent of the range, while the other 20 percent fill the
+ remaining 80 percent of the range.
Parameters
----------
@@ -2587,14 +2600,16 @@ cdef class RandomState:
where :math:`a` is the shape and :math:`m` the scale.
- The Pareto distribution, named after the Italian economist Vilfredo Pareto,
- is a power law probability distribution useful in many real world problems.
- Outside the field of economics it is generally referred to as the Bradford
- distribution. Pareto developed the distribution to describe the
- distribution of wealth in an economy. It has also found use in insurance,
- web page access statistics, oil field sizes, and many other problems,
- including the download frequency for projects in Sourceforge [1]_. It is
- one of the so-called "fat-tailed" distributions.
+ The Pareto distribution, named after the Italian economist
+ Vilfredo Pareto, is a power law probability distribution
+ useful in many real world problems. Outside the field of
+ economics it is generally referred to as the Bradford
+ distribution. Pareto developed the distribution to describe
+ the distribution of wealth in an economy. It has also found
+ use in insurance, web page access statistics, oil field sizes,
+ and many other problems, including the download frequency for
+ projects in Sourceforge [1]_. It is one of the so-called
+ "fat-tailed" distributions.
References
@@ -2645,7 +2660,7 @@ cdef class RandomState:
"""
weibull(a, size=None)
- Weibull distribution.
+ Draw samples from a Weibull distribution.
Draw samples from a 1-parameter Weibull distribution with the given
shape parameter `a`.
@@ -2666,6 +2681,10 @@ cdef class RandomState:
``m * n * k`` samples are drawn. Default is None, in which case a
single value is returned.
+ Returns
+ -------
+ samples : ndarray
+
See Also
--------
scipy.stats.distributions.weibull_max
@@ -2675,10 +2694,11 @@ cdef class RandomState:
Notes
-----
- The Weibull (or Type III asymptotic extreme value distribution for smallest
- values, SEV Type III, or Rosin-Rammler distribution) is one of a class of
- Generalized Extreme Value (GEV) distributions used in modeling extreme
- value problems. This class includes the Gumbel and Frechet distributions.
+ The Weibull (or Type III asymptotic extreme value distribution
+ for smallest values, SEV Type III, or Rosin-Rammler
+ distribution) is one of a class of Generalized Extreme Value
+ (GEV) distributions used in modeling extreme value problems.
+ This class includes the Gumbel and Frechet distributions.
The probability density for the Weibull distribution is
@@ -2695,12 +2715,13 @@ cdef class RandomState:
References
----------
- .. [1] Waloddi Weibull, Professor, Royal Technical University, Stockholm,
+ .. [1] Waloddi Weibull, Royal Technical University, Stockholm,
1939 "A Statistical Theory Of The Strength Of Materials",
Ingeniorsvetenskapsakademiens Handlingar Nr 151, 1939,
Generalstabens Litografiska Anstalts Forlag, Stockholm.
- .. [2] Waloddi Weibull, 1951 "A Statistical Distribution Function of Wide
- Applicability", Journal Of Applied Mechanics ASME Paper.
+ .. [2] Waloddi Weibull, "A Statistical Distribution Function of
+ Wide Applicability", Journal Of Applied Mechanics ASME Paper
+ 1951.
.. [3] Wikipedia, "Weibull distribution",
http://en.wikipedia.org/wiki/Weibull_distribution
@@ -2770,7 +2791,7 @@ cdef class RandomState:
Raises
------
ValueError
- If a<1.
+ If a < 1.
Notes
-----
@@ -2789,10 +2810,10 @@ cdef class RandomState:
----------
.. [1] Christian Kleiber, Samuel Kotz, "Statistical size distributions
in economics and actuarial sciences", Wiley, 2003.
- .. [2] Heckert, N. A. and Filliben, James J. (2003). NIST Handbook 148:
+ .. [2] Heckert, N. A. and Filliben, James J. "NIST Handbook 148:
Dataplot Reference Manual, Volume 2: Let Subcommands and Library
- Functions", National Institute of Standards and Technology Handbook
- Series, June 2003.
+ Functions", National Institute of Standards and Technology
+ Handbook Series, June 2003.
http://www.itl.nist.gov/div898/software/dataplot/refman2/auxillar/powpdf.pdf
Examples
@@ -2869,15 +2890,19 @@ cdef class RandomState:
Parameters
----------
- loc : float
+ loc : float, optional
The position, :math:`\\mu`, of the distribution peak.
- scale : float
+ scale : float, optional
:math:`\\lambda`, the exponential decay.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. Default is None, in which case a
single value is returned.
+ Returns
+ -------
+ samples : ndarray or float
+
Notes
-----
It has the probability density function
@@ -2885,28 +2910,24 @@ cdef class RandomState:
.. math:: f(x; \\mu, \\lambda) = \\frac{1}{2\\lambda}
\\exp\\left(-\\frac{|x - \\mu|}{\\lambda}\\right).
- The first law of Laplace, from 1774, states that the frequency of an error
- can be expressed as an exponential function of the absolute magnitude of
- the error, which leads to the Laplace distribution. For many problems in
- Economics and Health sciences, this distribution seems to model the data
- better than the standard Gaussian distribution
-
+ The first law of Laplace, from 1774, states that the frequency
+ of an error can be expressed as an exponential function of the
+ absolute magnitude of the error, which leads to the Laplace
+ distribution. For many problems in economics and health
+ sciences, this distribution seems to model the data better
+ than the standard Gaussian distribution.
References
----------
- .. [1] Abramowitz, M. and Stegun, I. A. (Eds.). Handbook of Mathematical
- Functions with Formulas, Graphs, and Mathematical Tables, 9th
- printing. New York: Dover, 1972.
-
- .. [2] The Laplace distribution and generalizations
- By Samuel Kotz, Tomasz J. Kozubowski, Krzysztof Podgorski,
- Birkhauser, 2001.
-
+ .. [1] Abramowitz, M. and Stegun, I. A. (Eds.). "Handbook of
+ Mathematical Functions with Formulas, Graphs, and Mathematical
+ Tables, 9th printing," New York: Dover, 1972.
+ .. [2] Kotz, Samuel, et. al. "The Laplace Distribution and
+ Generalizations, " Birkhauser, 2001.
.. [3] Weisstein, Eric W. "Laplace Distribution."
From MathWorld--A Wolfram Web Resource.
http://mathworld.wolfram.com/LaplaceDistribution.html
-
- .. [4] Wikipedia, "Laplace distribution",
+ .. [4] Wikipedia, "Laplace Distribution",
http://en.wikipedia.org/wiki/Laplace_distribution
Examples
@@ -2955,11 +2976,11 @@ cdef class RandomState:
"""
gumbel(loc=0.0, scale=1.0, size=None)
- Gumbel distribution.
+ Draw samples from a Gumbel distribution.
- Draw samples from a Gumbel distribution with specified location and scale.
- For more information on the Gumbel distribution, see Notes and References
- below.
+ Draw samples from a Gumbel distribution with specified location and
+ scale. For more information on the Gumbel distribution, see
+ Notes and References below.
Parameters
----------
@@ -2974,59 +2995,54 @@ cdef class RandomState:
Returns
-------
- out : ndarray
- The samples
+ samples : ndarray or scalar
See Also
--------
scipy.stats.gumbel_l
scipy.stats.gumbel_r
scipy.stats.genextreme
- probability density function, distribution, or cumulative density
- function, etc. for each of the above
weibull
Notes
-----
- The Gumbel (or Smallest Extreme Value (SEV) or the Smallest Extreme Value
- Type I) distribution is one of a class of Generalized Extreme Value (GEV)
- distributions used in modeling extreme value problems. The Gumbel is a
- special case of the Extreme Value Type I distribution for maximums from
- distributions with "exponential-like" tails.
+ The Gumbel (or Smallest Extreme Value (SEV) or the Smallest Extreme
+ Value Type I) distribution is one of a class of Generalized Extreme
+ Value (GEV) distributions used in modeling extreme value problems.
+ The Gumbel is a special case of the Extreme Value Type I distribution
+ for maximums from distributions with "exponential-like" tails.
The probability density for the Gumbel distribution is
.. math:: p(x) = \\frac{e^{-(x - \\mu)/ \\beta}}{\\beta} e^{ -e^{-(x - \\mu)/
\\beta}},
- where :math:`\\mu` is the mode, a location parameter, and :math:`\\beta` is
- the scale parameter.
+ where :math:`\\mu` is the mode, a location parameter, and
+ :math:`\\beta` is the scale parameter.
The Gumbel (named for German mathematician Emil Julius Gumbel) was used
very early in the hydrology literature, for modeling the occurrence of
- flood events. It is also used for modeling maximum wind speed and rainfall
- rates. It is a "fat-tailed" distribution - the probability of an event in
- the tail of the distribution is larger than if one used a Gaussian, hence
- the surprisingly frequent occurrence of 100-year floods. Floods were
- initially modeled as a Gaussian process, which underestimated the frequency
- of extreme events.
-
+ flood events. It is also used for modeling maximum wind speed and
+ rainfall rates. It is a "fat-tailed" distribution - the probability of
+ an event in the tail of the distribution is larger than if one used a
+ Gaussian, hence the surprisingly frequent occurrence of 100-year
+ floods. Floods were initially modeled as a Gaussian process, which
+ underestimated the frequency of extreme events.
It is one of a class of extreme value distributions, the Generalized
Extreme Value (GEV) distributions, which also includes the Weibull and
Frechet.
- The function has a mean of :math:`\\mu + 0.57721\\beta` and a variance of
- :math:`\\frac{\\pi^2}{6}\\beta^2`.
+ The function has a mean of :math:`\\mu + 0.57721\\beta` and a variance
+ of :math:`\\frac{\\pi^2}{6}\\beta^2`.
References
----------
- Gumbel, E. J., *Statistics of Extremes*, New York: Columbia University
- Press, 1958.
-
- Reiss, R.-D. and Thomas, M., *Statistical Analysis of Extreme Values from
- Insurance, Finance, Hydrology and Other Fields*, Basel: Birkhauser Verlag,
- 2001.
+ .. [1] Gumbel, E. J., "Statistics of Extremes,"
+ New York: Columbia University Press, 1958.
+ .. [2] Reiss, R.-D. and Thomas, M., "Statistical Analysis of Extreme
+ Values from Insurance, Finance, Hydrology and Other Fields,"
+ Basel: Birkhauser Verlag, 2001.
Examples
--------
@@ -3089,9 +3105,9 @@ cdef class RandomState:
"""
logistic(loc=0.0, scale=1.0, size=None)
- Draw samples from a Logistic distribution.
+ Draw samples from a logistic distribution.
- Samples are drawn from a Logistic distribution with specified
+ Samples are drawn from a logistic distribution with specified
parameters, loc (location or mean, also median), and scale (>0).
Parameters
@@ -3131,14 +3147,14 @@ cdef class RandomState:
References
----------
- .. [1] Reiss, R.-D. and Thomas M. (2001), Statistical Analysis of Extreme
- Values, from Insurance, Finance, Hydrology and Other Fields,
- Birkhauser Verlag, Basel, pp 132-133.
+ .. [1] Reiss, R.-D. and Thomas M. (2001), "Statistical Analysis of
+ Extreme Values, from Insurance, Finance, Hydrology and Other
+ Fields," Birkhauser Verlag, Basel, pp 132-133.
.. [2] Weisstein, Eric W. "Logistic Distribution." From
MathWorld--A Wolfram Web Resource.
http://mathworld.wolfram.com/LogisticDistribution.html
.. [3] Wikipedia, "Logistic-distribution",
- http://en.wikipedia.org/wiki/Logistic-distribution
+ http://en.wikipedia.org/wiki/Logistic_distribution
Examples
--------
@@ -3180,7 +3196,7 @@ cdef class RandomState:
"""
lognormal(mean=0.0, sigma=1.0, size=None)
- Return samples drawn from a log-normal distribution.
+ Draw samples from a log-normal distribution.
Draw samples from a log-normal distribution with specified mean,
standard deviation, and array shape. Note that the mean and standard
@@ -3228,12 +3244,12 @@ cdef class RandomState:
References
----------
- Limpert, E., Stahel, W. A., and Abbt, M., "Log-normal Distributions
- across the Sciences: Keys and Clues," *BioScience*, Vol. 51, No. 5,
- May, 2001. http://stat.ethz.ch/~stahel/lognormal/bioscience.pdf
-
- Reiss, R.D. and Thomas, M., *Statistical Analysis of Extreme Values*,
- Basel: Birkhauser Verlag, 2001, pp. 31-32.
+ .. [1] Limpert, E., Stahel, W. A., and Abbt, M., "Log-normal
+ Distributions across the Sciences: Keys and Clues,"
+ BioScience, Vol. 51, No. 5, May, 2001.
+ http://stat.ethz.ch/~stahel/lognormal/bioscience.pdf
+ .. [2] Reiss, R.D. and Thomas, M., "Statistical Analysis of Extreme
+ Values," Basel: Birkhauser Verlag, 2001, pp. 31-32.
Examples
--------
@@ -3257,7 +3273,8 @@ cdef class RandomState:
>>> plt.show()
Demonstrate that taking the products of random samples from a uniform
- distribution can be fit well by a log-normal probability density function.
+ distribution can be fit well by a log-normal probability density
+ function.
>>> # Generate a thousand samples: each is the product of 100 random
>>> # values, drawn from a normal distribution.
@@ -3331,10 +3348,10 @@ cdef class RandomState:
References
----------
- .. [1] Brighton Webs Ltd., Rayleigh Distribution,
- http://www.brighton-webs.co.uk/distributions/rayleigh.asp
+ .. [1] Brighton Webs Ltd., "Rayleigh Distribution,"
+ http://www.brighton-webs.co.uk/distributions/rayleigh.asp
.. [2] Wikipedia, "Rayleigh distribution"
- http://en.wikipedia.org/wiki/Rayleigh_distribution
+ http://en.wikipedia.org/wiki/Rayleigh_distribution
Examples
--------
@@ -3379,18 +3396,16 @@ cdef class RandomState:
"""
wald(mean, scale, size=None)
- Draw samples from a Wald, or Inverse Gaussian, distribution.
+ Draw samples from a Wald, or inverse Gaussian, distribution.
As the scale approaches infinity, the distribution becomes more like a
- Gaussian.
-
- Some references claim that the Wald is an Inverse Gaussian with mean=1, but
- this is by no means universal.
+ Gaussian. Some references claim that the Wald is an inverse Gaussian
+ with mean equal to 1, but this is by no means universal.
- The Inverse Gaussian distribution was first studied in relationship to
- Brownian motion. In 1956 M.C.K. Tweedie used the name Inverse Gaussian
- because there is an inverse relationship between the time to cover a unit
- distance and distance covered in unit time.
+ The inverse Gaussian distribution was first studied in relationship to
+ Brownian motion. In 1956 M.C.K. Tweedie used the name inverse Gaussian
+ because there is an inverse relationship between the time to cover a
+ unit distance and distance covered in unit time.
Parameters
----------
@@ -3415,20 +3430,20 @@ cdef class RandomState:
.. math:: P(x;mean,scale) = \\sqrt{\\frac{scale}{2\\pi x^3}}e^
\\frac{-scale(x-mean)^2}{2\\cdotp mean^2x}
- As noted above the Inverse Gaussian distribution first arise from attempts
- to model Brownian Motion. It is also a competitor to the Weibull for use in
- reliability modeling and modeling stock returns and interest rate
- processes.
+ As noted above the inverse Gaussian distribution first arise
+ from attempts to model Brownian motion. It is also a
+ competitor to the Weibull for use in reliability modeling and
+ modeling stock returns and interest rate processes.
References
----------
.. [1] Brighton Webs Ltd., Wald Distribution,
- http://www.brighton-webs.co.uk/distributions/wald.asp
+ http://www.brighton-webs.co.uk/distributions/wald.asp
.. [2] Chhikara, Raj S., and Folks, J. Leroy, "The Inverse Gaussian
- Distribution: Theory : Methodology, and Applications", CRC Press,
- 1988.
+ Distribution: Theory : Methodology, and Applications", CRC Press,
+ 1988.
.. [3] Wikipedia, "Wald distribution"
- http://en.wikipedia.org/wiki/Wald_distribution
+ http://en.wikipedia.org/wiki/Wald_distribution
Examples
--------
@@ -3468,9 +3483,10 @@ cdef class RandomState:
Draw samples from the triangular distribution.
- The triangular distribution is a continuous probability distribution with
- lower limit left, peak at mode, and upper limit right. Unlike the other
- distributions, these parameters directly define the shape of the pdf.
+ The triangular distribution is a continuous probability
+ distribution with lower limit left, peak at mode, and upper
+ limit right. Unlike the other distributions, these parameters
+ directly define the shape of the pdf.
Parameters
----------
@@ -3493,7 +3509,7 @@ cdef class RandomState:
Notes
-----
- The probability density function for the Triangular distribution is
+ The probability density function for the triangular distribution is
.. math:: P(x;l, m, r) = \\begin{cases}
\\frac{2(x-l)}{(r-l)(m-l)}& \\text{for $l \\leq x \\leq m$},\\\\
@@ -3501,14 +3517,15 @@ cdef class RandomState:
0& \\text{otherwise}.
\\end{cases}
- The triangular distribution is often used in ill-defined problems where the
- underlying distribution is not known, but some knowledge of the limits and
- mode exists. Often it is used in simulations.
+ The triangular distribution is often used in ill-defined
+ problems where the underlying distribution is not known, but
+ some knowledge of the limits and mode exists. Often it is used
+ in simulations.
References
----------
.. [1] Wikipedia, "Triangular distribution"
- http://en.wikipedia.org/wiki/Triangular_distribution
+ http://en.wikipedia.org/wiki/Triangular_distribution
Examples
--------
@@ -3533,8 +3550,8 @@ cdef class RandomState:
raise ValueError("mode > right")
if fleft == fright:
raise ValueError("left == right")
- return cont3_array_sc(self.internal_state, rk_triangular, size, fleft,
- fmode, fright, self.lock)
+ return cont3_array_sc(self.internal_state, rk_triangular, size,
+ fleft, fmode, fright, self.lock)
PyErr_Clear()
oleft = <ndarray>PyArray_FROM_OTF(left, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
@@ -3557,7 +3574,7 @@ cdef class RandomState:
Draw samples from a binomial distribution.
- Samples are drawn from a Binomial distribution with specified
+ Samples are drawn from a binomial distribution with specified
parameters, n trials and p probability of success where
n an integer >= 0 and p is in the interval [0,1]. (n may be
input as a float, but it is truncated to an integer in use)
@@ -3585,7 +3602,7 @@ cdef class RandomState:
Notes
-----
- The probability density for the Binomial distribution is
+ The probability density for the binomial distribution is
.. math:: P(N) = \\binom{n}{N}p^N(1-p)^{n-N},
@@ -3618,7 +3635,7 @@ cdef class RandomState:
--------
Draw samples from the distribution:
- >>> n, p = 10, .5 # number of trials, probability of each trial
+ >>> n, p = 10, .5 # number of trials, probability of each trial
>>> s = np.random.binomial(n, p, 1000)
# result of flipping a coin 10 times, tested 1000 times.
@@ -3629,8 +3646,8 @@ cdef class RandomState:
Let's do 20,000 trials of the model, and count the number that
generate zero positive results.
- >>> sum(np.random.binomial(9,0.1,20000)==0)/20000.
- answer = 0.38885, or 38%.
+ >>> sum(np.random.binomial(9, 0.1, 20000) == 0)/20000.
+ # answer = 0.38885, or 38%.
"""
cdef ndarray on, op
@@ -3668,9 +3685,9 @@ cdef class RandomState:
"""
negative_binomial(n, p, size=None)
- Draw samples from a negative_binomial distribution.
+ Draw samples from a negative binomial distribution.
- Samples are drawn from a negative_Binomial distribution with specified
+ Samples are drawn from a negative binomial distribution with specified
parameters, `n` trials and `p` probability of success where `n` is an
integer > 0 and `p` is in the interval [0, 1].
@@ -3692,19 +3709,19 @@ cdef class RandomState:
Notes
-----
- The probability density for the Negative Binomial distribution is
+ The probability density for the negative binomial distribution is
.. math:: P(N;n,p) = \\binom{N+n-1}{n-1}p^{n}(1-p)^{N},
- where :math:`n-1` is the number of successes, :math:`p` is the probability
- of success, and :math:`N+n-1` is the number of trials.
+ where :math:`n-1` is the number of successes, :math:`p` is the
+ probability of success, and :math:`N+n-1` is the number of trials.
+ The negative binomial distribution gives the probability of n-1
+ successes and N failures in N+n-1 trials, and success on the (N+n)th
+ trial.
- The negative binomial distribution gives the probability of n-1 successes
- and N failures in N+n-1 trials, and success on the (N+n)th trial.
-
- If one throws a die repeatedly until the third time a "1" appears, then the
- probability distribution of the number of non-"1"s that appear before the
- third "1" is a negative binomial distribution.
+ If one throws a die repeatedly until the third time a "1" appears,
+ then the probability distribution of the number of non-"1"s that
+ appear before the third "1" is a negative binomial distribution.
References
----------
@@ -3718,11 +3735,11 @@ cdef class RandomState:
--------
Draw samples from the distribution:
- A real world example. A company drills wild-cat oil exploration wells, each
- with an estimated probability of success of 0.1. What is the probability
- of having one success for each successive well, that is what is the
- probability of a single success after drilling 5 wells, after 6 wells,
- etc.?
+ A real world example. A company drills wild-cat oil
+ exploration wells, each with an estimated probability of
+ success of 0.1. What is the probability of having one success
+ for each successive well, that is what is the probability of a
+ single success after drilling 5 wells, after 6 wells, etc.?
>>> s = np.random.negative_binomial(1, 0.1, 100000)
>>> for i in range(1, 11):
@@ -3766,8 +3783,8 @@ cdef class RandomState:
Draw samples from a Poisson distribution.
- The Poisson distribution is the limit of the Binomial
- distribution for large N.
+ The Poisson distribution is the limit of the binomial distribution
+ for large N.
Parameters
----------
@@ -3779,6 +3796,11 @@ cdef class RandomState:
``m * n * k`` samples are drawn. Default is None, in which case a
single value is returned.
+ Returns
+ -------
+ samples : ndarray or scalar
+ The drawn samples, of shape *size*, if it was provided.
+
Notes
-----
The Poisson distribution
@@ -3787,7 +3809,8 @@ cdef class RandomState:
For events with an expected separation :math:`\\lambda` the Poisson
distribution :math:`f(k; \\lambda)` describes the probability of
- :math:`k` events occurring within the observed interval :math:`\\lambda`.
+ :math:`k` events occurring within the observed
+ interval :math:`\\lambda`.
Because the output is limited to the range of the C long type, a
ValueError is raised when `lam` is within 10 sigma of the maximum
@@ -3795,10 +3818,11 @@ cdef class RandomState:
References
----------
- .. [1] Weisstein, Eric W. "Poisson Distribution." From MathWorld--A Wolfram
- Web Resource. http://mathworld.wolfram.com/PoissonDistribution.html
+ .. [1] Weisstein, Eric W. "Poisson Distribution."
+ From MathWorld--A Wolfram Web Resource.
+ http://mathworld.wolfram.com/PoissonDistribution.html
.. [2] Wikipedia, "Poisson distribution",
- http://en.wikipedia.org/wiki/Poisson_distribution
+ http://en.wikipedia.org/wiki/Poisson_distribution
Examples
--------
@@ -3836,7 +3860,8 @@ cdef class RandomState:
raise ValueError("lam < 0")
if np.any(np.greater(olam, self.poisson_lam_max)):
raise ValueError("lam value too large.")
- return discd_array(self.internal_state, rk_poisson, size, olam, self.lock)
+ return discd_array(self.internal_state, rk_poisson, size, olam,
+ self.lock)
def zipf(self, a, size=None):
"""
@@ -3885,8 +3910,9 @@ cdef class RandomState:
References
----------
- Zipf, G. K., *Selected Studies of the Principle of Relative Frequency
- in Language*, Cambridge, MA: Harvard Univ. Press, 1932.
+ .. [1] Zipf, G. K., "Selected Studies of the Principle of Relative
+ Frequency in Language," Cambridge, MA: Harvard Univ. Press,
+ 1932.
Examples
--------
@@ -3991,7 +4017,8 @@ cdef class RandomState:
raise ValueError("p < 0.0")
if np.any(np.greater(op, 1.0)):
raise ValueError("p > 1.0")
- return discd_array(self.internal_state, rk_geometric, size, op, self.lock)
+ return discd_array(self.internal_state, rk_geometric, size, op,
+ self.lock)
def hypergeometric(self, ngood, nbad, nsample, size=None):
"""
@@ -3999,7 +4026,7 @@ cdef class RandomState:
Draw samples from a Hypergeometric distribution.
- Samples are drawn from a Hypergeometric distribution with specified
+ Samples are drawn from a hypergeometric distribution with specified
parameters, ngood (ways to make a good selection), nbad (ways to make
a bad selection), and nsample = number of items sampled, which is less
than or equal to the sum ngood + nbad.
@@ -4041,14 +4068,14 @@ cdef class RandomState:
Consider an urn with black and white marbles in it, ngood of them
black and nbad are white. If you draw nsample balls without
- replacement, then the Hypergeometric distribution describes the
+ replacement, then the hypergeometric distribution describes the
distribution of black balls in the drawn sample.
- Note that this distribution is very similar to the Binomial
+ Note that this distribution is very similar to the binomial
distribution, except that in this case, samples are drawn without
replacement, whereas in the Binomial case samples are drawn with
replacement (or the sample space is infinite). As the sample space
- becomes large, this distribution approaches the Binomial.
+ becomes large, this distribution approaches the binomial.
References
----------
@@ -4058,7 +4085,7 @@ cdef class RandomState:
MathWorld--A Wolfram Web Resource.
http://mathworld.wolfram.com/HypergeometricDistribution.html
.. [3] Wikipedia, "Hypergeometric-distribution",
- http://en.wikipedia.org/wiki/Hypergeometric-distribution
+ http://en.wikipedia.org/wiki/Hypergeometric_distribution
Examples
--------
@@ -4101,7 +4128,8 @@ cdef class RandomState:
ongood = <ndarray>PyArray_FROM_OTF(ngood, NPY_LONG, NPY_ARRAY_ALIGNED)
onbad = <ndarray>PyArray_FROM_OTF(nbad, NPY_LONG, NPY_ARRAY_ALIGNED)
- onsample = <ndarray>PyArray_FROM_OTF(nsample, NPY_LONG, NPY_ARRAY_ALIGNED)
+ onsample = <ndarray>PyArray_FROM_OTF(nsample, NPY_LONG,
+ NPY_ARRAY_ALIGNED)
if np.any(np.less(ongood, 0)):
raise ValueError("ngood < 0")
if np.any(np.less(onbad, 0)):
@@ -4117,10 +4145,10 @@ cdef class RandomState:
"""
logseries(p, size=None)
- Draw samples from a Logarithmic Series distribution.
+ Draw samples from a logarithmic series distribution.
- Samples are drawn from a Log Series distribution with specified
- parameter, p (probability, 0 < p < 1).
+ Samples are drawn from a log series distribution with specified
+ shape parameter, 0 < ``p`` < 1.
Parameters
----------
@@ -4151,7 +4179,7 @@ cdef class RandomState:
where p = probability.
- The Log Series distribution is frequently used to represent species
+ The log series distribution is frequently used to represent species
richness and occurrence, first proposed by Fisher, Corbet, and
Williams in 1943 [2]. It may also be used to model the numbers of
occupants seen in cars [3].
@@ -4207,7 +4235,8 @@ cdef class RandomState:
raise ValueError("p <= 0.0")
if np.any(np.greater_equal(op, 1.0)):
raise ValueError("p >= 1.0")
- return discd_array(self.internal_state, rk_logseries, size, op, self.lock)
+ return discd_array(self.internal_state, rk_logseries, size, op,
+ self.lock)
# Multivariate distributions:
def multivariate_normal(self, mean, cov, size=None):
@@ -4286,16 +4315,15 @@ cdef class RandomState:
References
----------
- Papoulis, A., *Probability, Random Variables, and Stochastic Processes*,
- 3rd ed., New York: McGraw-Hill, 1991.
-
- Duda, R. O., Hart, P. E., and Stork, D. G., *Pattern Classification*,
- 2nd ed., New York: Wiley, 2001.
+ .. [1] Papoulis, A., "Probability, Random Variables, and Stochastic
+ Processes," 3rd ed., New York: McGraw-Hill, 1991.
+ .. [2] Duda, R. O., Hart, P. E., and Stork, D. G., "Pattern
+ Classification," 2nd ed., New York: Wiley, 2001.
Examples
--------
>>> mean = (1, 2)
- >>> cov = [[1, 0], [1, 0]]
+ >>> cov = [[1, 0], [0, 1]]
>>> x = np.random.multivariate_normal(mean, cov, (3, 3))
>>> x.shape
(3, 3, 2)
@@ -4370,8 +4398,8 @@ cdef class RandomState:
possible outcomes. An example of such an experiment is throwing a dice,
where the outcome can be 1 through 6. Each sample drawn from the
distribution represents `n` such experiments. Its values,
- ``X_i = [X_0, X_1, ..., X_p]``, represent the number of times the outcome
- was ``i``.
+ ``X_i = [X_0, X_1, ..., X_p]``, represent the number of times the
+ outcome was ``i``.
Parameters
----------
@@ -4382,10 +4410,19 @@ cdef class RandomState:
should sum to 1 (however, the last element is always assumed to
account for the remaining probability, as long as
``sum(pvals[:-1]) <= 1)``.
- size : tuple of ints
- Given a `size` of ``(M, N, K)``, then ``M*N*K`` samples are drawn,
- and the output shape becomes ``(M, N, K, p)``, since each sample
- has shape ``(p,)``.
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. Default is None, in which case a
+ single value is returned.
+
+ Returns
+ -------
+ out : ndarray
+ The drawn samples, of shape *size*, if that was provided. If not,
+ the shape is ``(N,)``.
+
+ In other words, each entry ``out[i,j,...,:]`` is an N-dimensional
+ value drawn from the distribution.
Examples
--------
@@ -4465,13 +4502,15 @@ cdef class RandomState:
alpha : array
Parameter of the distribution (k dimension for sample of
dimension k).
- size : array
- Number of samples to draw.
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. Default is None, in which case a
+ single value is returned.
Returns
-------
samples : ndarray,
- The drawn samples, of shape (alpha.ndim, size).
+ The drawn samples, of shape (size, alpha.ndim).
Notes
-----
@@ -4496,8 +4535,8 @@ cdef class RandomState:
Taking an example cited in Wikipedia, this distribution can be used if
one wanted to cut strings (each of initial length 1.0) into K pieces
with different lengths, where each piece had, on average, a designated
- average length, but allowing some variation in the relative sizes of the
- pieces.
+ average length, but allowing some variation in the relative sizes of
+ the pieces.
>>> s = np.random.dirichlet((10, 5, 3), 20).transpose()
diff --git a/numpy/random/tests/test_regression.py b/numpy/random/tests/test_regression.py
index ccffd033e..1a5854e82 100644
--- a/numpy/random/tests/test_regression.py
+++ b/numpy/random/tests/test_regression.py
@@ -1,5 +1,6 @@
from __future__ import division, absolute_import, print_function
+import sys
from numpy.testing import (TestCase, run_module_suite, assert_,
assert_array_equal)
from numpy import random
@@ -21,6 +22,16 @@ class TestRegression(TestCase):
assert_(np.all(np.random.hypergeometric(3, 18, 11, size=10) < 4))
assert_(np.all(np.random.hypergeometric(18, 3, 11, size=10) > 0))
+ # Test for ticket #5623
+ args = [
+ (2**20 - 2, 2**20 - 2, 2**20 - 2), # Check for 32-bit systems
+ ]
+ is_64bits = sys.maxsize > 2**32
+ if is_64bits:
+ args.append((2**40 - 2, 2**40 - 2, 2**40 - 2)) # Check for 64-bit systems
+ for arg in args:
+ assert_(np.random.hypergeometric(*arg) > 0)
+
def test_logseries_convergence(self):
# Test for ticket #923
N = 1000
diff --git a/numpy/testing/nosetester.py b/numpy/testing/nosetester.py
index b802f4472..fb1f507f0 100644
--- a/numpy/testing/nosetester.py
+++ b/numpy/testing/nosetester.py
@@ -84,10 +84,10 @@ def run_module_suite(file_to_run=None, argv=None):
Parameters
----------
- file_to_run: str, optional
+ file_to_run : str, optional
Path to test module, or None.
By default, run the module from which this function is called.
- argv: list of strings
+ argv : list of strings
Arguments to be passed to the nose test runner. ``argv[0]`` is
ignored. All command line arguments accepted by ``nosetests``
will work. If it is the default value None, sys.argv is used.
diff --git a/numpy/testing/tests/test_utils.py b/numpy/testing/tests/test_utils.py
index 756ea997e..68075fc3d 100644
--- a/numpy/testing/tests/test_utils.py
+++ b/numpy/testing/tests/test_utils.py
@@ -4,7 +4,12 @@ import warnings
import sys
import numpy as np
-from numpy.testing import *
+from numpy.testing import (
+ assert_equal, assert_array_equal, assert_almost_equal,
+ assert_array_almost_equal, build_err_msg, raises, assert_raises,
+ assert_warns, assert_no_warnings, assert_allclose, assert_approx_equal,
+ assert_array_almost_equal_nulp, assert_array_max_ulp,
+ clear_and_catch_warnings, run_module_suite)
import unittest
class _GenericTest(object):
@@ -252,6 +257,7 @@ class TestArrayAlmostEqual(_GenericTest, unittest.TestCase):
assert_array_almost_equal(b, a)
assert_array_almost_equal(b, b)
+
class TestAlmostEqual(_GenericTest, unittest.TestCase):
def setUp(self):
self._assert_func = assert_almost_equal
@@ -688,5 +694,67 @@ class TestULP(unittest.TestCase):
self.assertRaises(AssertionError,
lambda: assert_array_max_ulp(nan, nzero,
maxulp=maxulp))
+
+def assert_warn_len_equal(mod, n_in_context):
+ mod_warns = mod.__warningregistry__
+ # Python 3.4 appears to clear any pre-existing warnings of the same type,
+ # when raising warnings inside a catch_warnings block. So, there is a
+ # warning generated by the tests within the context manager, but no
+ # previous warnings.
+ if 'version' in mod_warns:
+ assert_equal(len(mod_warns), 2) # including 'version'
+ else:
+ assert_equal(len(mod_warns), n_in_context)
+
+
+def _get_fresh_mod():
+ # Get this module, with warning registry empty
+ my_mod = sys.modules[__name__]
+ try:
+ my_mod.__warningregistry__.clear()
+ except AttributeError:
+ pass
+ return my_mod
+
+
+def test_clear_and_catch_warnings():
+ # Initial state of module, no warnings
+ my_mod = _get_fresh_mod()
+ assert_equal(getattr(my_mod, '__warningregistry__', {}), {})
+ with clear_and_catch_warnings(modules=[my_mod]):
+ warnings.simplefilter('ignore')
+ warnings.warn('Some warning')
+ assert_equal(my_mod.__warningregistry__, {})
+ # Without specified modules, don't clear warnings during context
+ with clear_and_catch_warnings():
+ warnings.simplefilter('ignore')
+ warnings.warn('Some warning')
+ assert_warn_len_equal(my_mod, 1)
+ # Confirm that specifying module keeps old warning, does not add new
+ with clear_and_catch_warnings(modules=[my_mod]):
+ warnings.simplefilter('ignore')
+ warnings.warn('Another warning')
+ assert_warn_len_equal(my_mod, 1)
+ # Another warning, no module spec does add to warnings dict, except on
+ # Python 3.4 (see comments in `assert_warn_len_equal`)
+ with clear_and_catch_warnings():
+ warnings.simplefilter('ignore')
+ warnings.warn('Another warning')
+ assert_warn_len_equal(my_mod, 2)
+
+
+class my_cacw(clear_and_catch_warnings):
+ class_modules = (sys.modules[__name__],)
+
+
+def test_clear_and_catch_warnings_inherit():
+ # Test can subclass and add default modules
+ my_mod = _get_fresh_mod()
+ with my_cacw():
+ warnings.simplefilter('ignore')
+ warnings.warn('Some warning')
+ assert_equal(my_mod.__warningregistry__, {})
+
+
if __name__ == '__main__':
run_module_suite()
diff --git a/numpy/testing/utils.py b/numpy/testing/utils.py
index 1051288c2..4527a51d9 100644
--- a/numpy/testing/utils.py
+++ b/numpy/testing/utils.py
@@ -28,7 +28,7 @@ __all__ = ['assert_equal', 'assert_almost_equal', 'assert_approx_equal',
'raises', 'rand', 'rundocs', 'runstring', 'verbose', 'measure',
'assert_', 'assert_array_almost_equal_nulp', 'assert_raises_regex',
'assert_array_max_ulp', 'assert_warns', 'assert_no_warnings',
- 'assert_allclose', 'IgnoreException']
+ 'assert_allclose', 'IgnoreException', 'clear_and_catch_warnings']
verbose = 0
@@ -1413,6 +1413,8 @@ def nulp_diff(x, y, dtype=None):
first input array
y : array_like
second input array
+ dtype : dtype, optional
+ Data-type to convert `x` and `y` to if given. Default is None.
Returns
-------
@@ -1716,3 +1718,65 @@ def tempdir(*args, **kwargs):
tmpdir = mkdtemp(*args, **kwargs)
yield tmpdir
shutil.rmtree(tmpdir)
+
+
+class clear_and_catch_warnings(warnings.catch_warnings):
+ """ Context manager that resets warning registry for catching warnings
+
+ Warnings can be slippery, because, whenever a warning is triggered, Python
+ adds a ``__warningregistry__`` member to the *calling* module. This makes
+ it impossible to retrigger the warning in this module, whatever you put in
+ the warnings filters. This context manager accepts a sequence of `modules`
+ as a keyword argument to its constructor and:
+
+ * stores and removes any ``__warningregistry__`` entries in given `modules`
+ on entry;
+ * resets ``__warningregistry__`` to its previous state on exit.
+
+ This makes it possible to trigger any warning afresh inside the context
+ manager without disturbing the state of warnings outside.
+
+ For compatibility with Python 3.0, please consider all arguments to be
+ keyword-only.
+
+ Parameters
+ ----------
+ record : bool, optional
+ Specifies whether warnings should be captured by a custom
+ implementation of ``warnings.showwarning()`` and be appended to a list
+ returned by the context manager. Otherwise None is returned by the
+ context manager. The objects appended to the list are arguments whose
+ attributes mirror the arguments to ``showwarning()``.
+ modules : sequence, optional
+ Sequence of modules for which to reset warnings registry on entry and
+ restore on exit
+
+ Examples
+ --------
+ >>> import warnings
+ >>> with clear_and_catch_warnings(modules=[np.core.fromnumeric]):
+ ... warnings.simplefilter('always')
+ ... # do something that raises a warning in np.core.fromnumeric
+ """
+ class_modules = ()
+
+ def __init__(self, record=False, modules=()):
+ self.modules = set(modules).union(self.class_modules)
+ self._warnreg_copies = {}
+ super(clear_and_catch_warnings, self).__init__(record=record)
+
+ def __enter__(self):
+ for mod in self.modules:
+ if hasattr(mod, '__warningregistry__'):
+ mod_reg = mod.__warningregistry__
+ self._warnreg_copies[mod] = mod_reg.copy()
+ mod_reg.clear()
+ return super(clear_and_catch_warnings, self).__enter__()
+
+ def __exit__(self, *exc_info):
+ super(clear_and_catch_warnings, self).__exit__(*exc_info)
+ for mod in self.modules:
+ if hasattr(mod, '__warningregistry__'):
+ mod.__warningregistry__.clear()
+ if mod in self._warnreg_copies:
+ mod.__warningregistry__.update(self._warnreg_copies[mod])
diff --git a/setup.py b/setup.py
index 7079c4cc4..7eed56e5c 100755
--- a/setup.py
+++ b/setup.py
@@ -213,6 +213,7 @@ def setup_package():
platforms = ["Windows", "Linux", "Solaris", "Mac OS-X", "Unix"],
test_suite='nose.collector',
cmdclass={"sdist": sdist_checked},
+ package_data={'numpy.core': ['libopenblaspy.dll']},
)
# Run build
diff --git a/tools/swig/numpy.i b/tools/swig/numpy.i
index d9f1d48f1..b9a7ce7f4 100644
--- a/tools/swig/numpy.i
+++ b/tools/swig/numpy.i
@@ -1,4 +1,38 @@
/* -*- C -*- (not really, but good for syntax highlighting) */
+
+/*
+ * Copyright (c) 2005-2015, NumPy Developers.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ *
+ * * Neither the name of the NumPy Developers nor the names of any
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
#ifdef SWIGPYTHON
%{