summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.gitattributes3
-rw-r--r--.travis.yml3
-rw-r--r--INSTALL.txt63
-rw-r--r--doc/release/1.10.0-notes.rst57
-rw-r--r--doc/release/1.9.1-notes.rst33
-rw-r--r--doc/source/reference/c-api.generalized-ufuncs.rst66
-rw-r--r--doc/source/reference/c-api.iterator.rst2
-rw-r--r--doc/source/reference/c-api.ufunc.rst16
-rw-r--r--doc/source/release.rst1
-rw-r--r--doc/source/user/c-info.python-as-glue.rst619
-rw-r--r--numpy/_import_tools.py6
-rw-r--r--numpy/core/bscript28
-rw-r--r--numpy/core/code_generators/ufunc_docstrings.py22
-rw-r--r--numpy/core/fromnumeric.py62
-rw-r--r--numpy/core/include/numpy/ndarrayobject.h21
-rw-r--r--numpy/core/include/numpy/npy_common.h12
-rw-r--r--numpy/core/include/numpy/npy_cpu.h34
-rw-r--r--numpy/core/memmap.py5
-rw-r--r--numpy/core/numerictypes.py2
-rw-r--r--numpy/core/records.py12
-rw-r--r--numpy/core/setup.py37
-rw-r--r--numpy/core/setup_common.py16
-rw-r--r--numpy/core/src/multiarray/arraytypes.c.src6
-rw-r--r--numpy/core/src/multiarray/buffer.c29
-rw-r--r--numpy/core/src/multiarray/datetime.c5
-rw-r--r--numpy/core/src/multiarray/item_selection.c5
-rw-r--r--numpy/core/src/multiarray/multiarray_tests.c.src19
-rw-r--r--numpy/core/src/multiarray/multiarraymodule.c9
-rw-r--r--numpy/core/src/npymath/npy_math.c.src6
-rw-r--r--numpy/core/src/npymath/npy_math_complex.c.src12
-rw-r--r--numpy/core/src/npymath/npy_math_private.h24
-rw-r--r--numpy/core/src/private/npy_config.h10
-rw-r--r--numpy/core/src/umath/loops.c.src182
-rw-r--r--numpy/core/src/umath/ufunc_object.c130
-rw-r--r--numpy/core/src/umath/umath_tests.c.src136
-rw-r--r--numpy/core/tests/test_datetime.py10
-rw-r--r--numpy/core/tests/test_dtype.py7
-rw-r--r--numpy/core/tests/test_multiarray.py71
-rw-r--r--numpy/core/tests/test_nditer.py7
-rw-r--r--numpy/core/tests/test_numeric.py14
-rw-r--r--numpy/core/tests/test_records.py9
-rw-r--r--numpy/core/tests/test_ufunc.py29
-rw-r--r--numpy/core/tests/test_umath.py41
-rw-r--r--numpy/distutils/ccompiler.py42
-rw-r--r--numpy/distutils/command/build.py8
-rw-r--r--numpy/distutils/command/build_clib.py13
-rw-r--r--numpy/distutils/command/build_ext.py17
-rw-r--r--numpy/distutils/misc_util.py49
-rw-r--r--numpy/f2py/cb_rules.py6
-rw-r--r--numpy/f2py/cfuncs.py126
-rwxr-xr-xnumpy/f2py/crackfortran.py10
-rw-r--r--numpy/f2py/rules.py13
-rw-r--r--numpy/f2py/setup.py32
-rw-r--r--numpy/f2py/src/fortranobject.c84
-rw-r--r--numpy/f2py/src/fortranobject.h2
-rw-r--r--numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c62
-rw-r--r--numpy/f2py/tests/src/regression/inout.f909
-rw-r--r--numpy/f2py/tests/test_regression.py32
-rw-r--r--numpy/fft/fftpack_litemodule.c32
-rw-r--r--numpy/lib/arraysetops.py10
-rw-r--r--numpy/lib/format.py45
-rw-r--r--numpy/lib/function_base.py120
-rw-r--r--numpy/lib/npyio.py8
-rw-r--r--numpy/lib/tests/data/python3.npybin0 -> 96 bytes
-rw-r--r--numpy/lib/tests/data/win64python2.npybin0 -> 96 bytes
-rw-r--r--numpy/lib/tests/test_format.py10
-rw-r--r--numpy/lib/tests/test_function_base.py38
-rw-r--r--numpy/lib/tests/test_io.py11
-rw-r--r--numpy/lib/tests/test_nanfunctions.py16
-rw-r--r--numpy/lib/utils.py158
-rw-r--r--numpy/linalg/linalg.py236
-rw-r--r--numpy/linalg/tests/test_linalg.py133
-rw-r--r--numpy/ma/core.py58
-rw-r--r--numpy/ma/extras.py13
-rw-r--r--numpy/ma/tests/test_core.py7
-rw-r--r--numpy/ma/tests/test_extras.py3
-rw-r--r--numpy/polynomial/hermite.py62
-rw-r--r--numpy/polynomial/hermite_e.py60
-rw-r--r--numpy/random/mtrand/mtrand.pyx10
-rw-r--r--numpy/testing/__init__.py3
-rw-r--r--numpy/testing/utils.py2
-rwxr-xr-xtools/travis-test.sh3
82 files changed, 2130 insertions, 1294 deletions
diff --git a/.gitattributes b/.gitattributes
index 8f3332044..82162cb8d 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -1,2 +1,5 @@
* text=auto
tools/win32build/nsis_scripts/*.nsi.in eol=crlf
+
+# Numerical data files
+numpy/lib/tests/data/*.npy binary
diff --git a/.travis.yml b/.travis.yml
index e0355aa6a..b39d335ad 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -31,7 +31,8 @@ before_install:
# pip install coverage
- python -V
- pip install --upgrade pip setuptools
- - pip install --find-links http://wheels.astropy.org/ --find-links http://wheels2.astropy.org/ --use-wheel Cython
+ # Speed up install by not compiling Cython
+ - pip install --install-option="--no-cython-compile" Cython
- sudo apt-get install -qq libatlas-dev libatlas-base-dev gfortran
- popd
diff --git a/INSTALL.txt b/INSTALL.txt
index 278aab9ef..9c12ba602 100644
--- a/INSTALL.txt
+++ b/INSTALL.txt
@@ -35,6 +35,21 @@ Building NumPy requires the following software installed:
Python__ http://www.python.org
nose__ http://somethingaboutorange.com/mrl/projects/nose/
+Basic Installation
+==================
+
+To install numpy run:
+
+ python setup.py build -j 4 install --prefix $HOME/.local
+
+This will compile numpy on 4 CPUs and install it into the specified prefix.
+To perform an inplace build that can be run from the source folder run:
+
+ python setup.py build_ext --inplace -j 4
+
+The number of build jobs can also be specified via the environment variable
+NPY_NUM_BUILD_JOBS.
+
Fortran ABI mismatch
====================
@@ -65,40 +80,34 @@ this means that g77 has been used. If libgfortran.so is a dependency,
gfortran has been used. If both are dependencies, this means both have been
used, which is almost always a very bad idea.
-Building with ATLAS support
-===========================
-
-Ubuntu 8.10 (Intrepid)
-----------------------
-
-You can install the necessary packages for optimized ATLAS with this
-command:
-
- sudo apt-get install libatlas-base-dev
-
-If you have a recent CPU with SIMD support (SSE, SSE2, etc...), you should
-also install the corresponding package for optimal performance. For
-example, for SSE2:
+Building with optimized BLAS support
+====================================
- sudo apt-get install libatlas3gf-sse2
+Ubuntu/Debian
+-------------
-*NOTE*: if you build your own atlas, Intrepid changed its default fortran
-compiler to gfortran. So you should rebuild everything from scratch,
-including lapack, to use it on Intrepid.
+In order to build with optimized a BLAS providing development package must be installed.
+Options are for example:
-Ubuntu 8.04 and lower
----------------------
+ - libblas-dev
+ reference BLAS not very optimized
+ - libatlas-base-dev
+ generic tuned ATLAS, it is recommended to tune it to the available hardware,
+ see /usr/share/doc/libatlas3-base/README.Debian for instructions
+ - libopenblas-base
+ fast and runtime detected so no tuning required but as of version 2.11 still
+ suffers from correctness issues on some CPUs, test your applications
+ thoughly.
-You can install the necessary packages for optimized ATLAS with this
-command:
+The actual implementation can be exchanged also after installation via the
+alternatives mechanism:
- sudo apt-get install atlas3-base-dev
+ update-alternatives --config libblas.so.3
+ update-alternatives --config liblapack.so.3
-If you have a recent CPU with SIMD support (SSE, SSE2, etc...), you should
-also install the corresponding package for optimal performance. For
-example, for SSE2:
+Or by preloading a specific BLAS library with
+ LD_PRELOAD=/usr/lib/atlas-base/atlas/libblas.so.3 python ...
- sudo apt-get install atlas3-sse2
Windows 64 bits notes
=====================
diff --git a/doc/release/1.10.0-notes.rst b/doc/release/1.10.0-notes.rst
index 35eeab1cb..7b473f3b9 100644
--- a/doc/release/1.10.0-notes.rst
+++ b/doc/release/1.10.0-notes.rst
@@ -6,6 +6,11 @@ This release supports Python 2.6 - 2.7 and 3.2 - 3.4.
Highlights
==========
+* numpy.distutils now supports parallel compilation via the --jobs/-j argument
+ passed to setup.py build
+* Addition of *np.linalg.multi_dot*: compute the dot product of two or more
+ arrays in a single function call, while automatically selecting the fastest
+ evaluation order.
Dropped Support
@@ -17,6 +22,8 @@ Dropped Support
Future Changes
==============
+* The SafeEval class will be removed.
+* The alterdot and restoredot functions will be removed.
Compatibility notes
@@ -27,30 +34,45 @@ NPY_RELAXED_STRIDE_CHECKING is now true by default.
New Features
============
-`np.cbrt` to compute cube root for real floats
+*np.cbrt* to compute cube root for real floats
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-`np.cbrt` wraps the C99 cube root function `cbrt`.
-Compared to `np.power(x, 1./3.)` it is well defined for negative real floats
+*np.cbrt* wraps the C99 cube root function *cbrt*.
+Compared to *np.power(x, 1./3.)* it is well defined for negative real floats
and a bit faster.
+numpy.distutils now allows parallel compilation
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+By passing *--jobs=n* or *-j n* to *setup.py build* the compilation of
+extensions is now performed in *n* parallel processes.
+The parallelization is limited to files within one extension so projects using
+Cython will not profit because it builds extensions from single files.
+
Improvements
============
-`np.digitize` using binary search
+*np.digitize* using binary search
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-`np.digitize` is now implemented in terms of `np.searchsorted`. This means
+*np.digitize* is now implemented in terms of *np.searchsorted*. This means
that a binary search is used to bin the values, which scales much better
for larger number of bins than the previous linear search. It also removes
the requirement for the input array to be 1-dimensional.
-`np.poly` now casts integer inputs to float
+*np.poly* now casts integer inputs to float
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-`np.poly` will now cast 1-dimensional input arrays of integer type to double
+*np.poly* will now cast 1-dimensional input arrays of integer type to double
precision floating point, to prevent integer overflow when computing the monic
polynomial. It is still possible to obtain higher precision results by
passing in an array of object type, filled e.g. with Python ints.
+*np.interp* can now be used with periodic functions
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+*np.interp* now has a new parameter *period* that supplies the period of the
+input data *xp*. In such case, the input data is properly normalized to the
+given period and one end point is added to each extremity of *xp* in order to
+close the previous and the next period cycles, resulting in the correct
+interpolation behavior.
+
Changes
=======
@@ -61,11 +83,28 @@ The cblas versions of dot, inner, and vdot have been integrated into
the multiarray module. In particular, vdot is now a multiarray function,
which it was not before.
+stricter check of gufunc signature compliance
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Inputs to generalized universal functions are now more strictly checked
+against the function's signature: all core dimensions are now required to
+be present in input arrays; core dimensions with the same label must have
+the exact same size; and output core dimension's must be specified, either
+by a same label input core dimension or by a passed-in output array.
+
Deprecations
============
-alterdot, restoredot Deprecated
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+SafeEval
+~~~~~~~~
+The SafeEval class in numpy/lib/utils.py is deprecated and will be removed
+in the next release.
+
+alterdot, restoredot
+~~~~~~~~~~~~~~~~~~~~
The alterdot and restoredot functions no longer do anything, and are
deprecated.
+
+pkgload, PackageLoader
+~~~~~~~~~~~~~~~~~~~~~~
+These ways of loading packages are now deprecated.
diff --git a/doc/release/1.9.1-notes.rst b/doc/release/1.9.1-notes.rst
new file mode 100644
index 000000000..a72e71aae
--- /dev/null
+++ b/doc/release/1.9.1-notes.rst
@@ -0,0 +1,33 @@
+NumPy 1.9.1 Release Notes
+*************************
+
+This is a bugfix only release in the 1.9.x series.
+
+Issues fixed
+============
+
+* gh-5184: restore linear edge behaviour of gradient to as it was in < 1.9.
+ The second order behaviour is available via the `edge_order` keyword
+* gh-4007: workaround Accelerate sgemv crash on OSX 10.9
+* gh-5100: restore object dtype inference from iterable objects without `len()`
+* gh-5163: avoid gcc-4.1.2 (red hat 5) miscompilation causing a crash
+* gh-5138: fix nanmedian on arrays containing inf
+* gh-5240: fix not returning out array from ufuncs with subok=False set
+* gh-5203: copy inherited masks in MaskedArray.__array_finalize__
+* gh-2317: genfromtxt did not handle filling_values=0 correctly
+* gh-5067: restore api of npy_PyFile_DupClose in python2
+* gh-5063: cannot convert invalid sequence index to tuple
+* gh-5082: Segmentation fault with argmin() on unicode arrays
+* gh-5095: don't propagate subtypes from np.where
+* gh-5104: np.inner segfaults with SciPy's sparse matrices
+* gh-5251: Issue with fromarrays not using correct format for unicode arrays
+* gh-5136: Import dummy_threading if importing threading fails
+* gh-5148: Make numpy import when run with Python flag '-OO'
+* gh-5147: Einsum double contraction in particular order causes ValueError
+* gh-479: Make f2py work with intent(in out)
+* gh-5170: Make python2 .npy files readable in python3
+* gh-5027: Use 'll' as the default length specifier for long long
+* gh-4896: fix build error with MSVC 2013 caused by C99 complex support
+* gh-4465: Make PyArray_PutTo respect writeable flag
+* gh-5225: fix crash when using arange on datetime without dtype set
+* gh-5231: fix build in c99 mode
diff --git a/doc/source/reference/c-api.generalized-ufuncs.rst b/doc/source/reference/c-api.generalized-ufuncs.rst
index 14f33efcb..92dc8aec0 100644
--- a/doc/source/reference/c-api.generalized-ufuncs.rst
+++ b/doc/source/reference/c-api.generalized-ufuncs.rst
@@ -18,30 +18,52 @@ arguments is called the "signature" of a ufunc. For example, the
ufunc numpy.add has signature ``(),()->()`` defining two scalar inputs
and one scalar output.
-Another example is the function ``inner1d(a,b)`` with a signature of
-``(i),(i)->()``. This applies the inner product along the last axis of
+Another example is the function ``inner1d(a, b)`` with a signature of
+``(i),(i)->()``. This applies the inner product along the last axis of
each input, but keeps the remaining indices intact.
-For example, where ``a`` is of shape ``(3,5,N)``
-and ``b`` is of shape ``(5,N)``, this will return an output of shape ``(3,5)``.
+For example, where ``a`` is of shape ``(3, 5, N)`` and ``b`` is of shape
+``(5, N)``, this will return an output of shape ``(3,5)``.
The underlying elementary function is called ``3 * 5`` times. In the
signature, we specify one core dimension ``(i)`` for each input and zero core
dimensions ``()`` for the output, since it takes two 1-d arrays and
returns a scalar. By using the same name ``i``, we specify that the two
-corresponding dimensions should be of the same size (or one of them is
-of size 1 and will be broadcasted).
+corresponding dimensions should be of the same size.
The dimensions beyond the core dimensions are called "loop" dimensions. In
-the above example, this corresponds to ``(3,5)``.
-
-The usual numpy "broadcasting" rules apply, where the signature
-determines how the dimensions of each input/output object are split
-into core and loop dimensions:
-
-#. While an input array has a smaller dimensionality than the corresponding
- number of core dimensions, 1's are pre-pended to its shape.
+the above example, this corresponds to ``(3, 5)``.
+
+The signature determines how the dimensions of each input/output array are
+split into core and loop dimensions:
+
+#. Each dimension in the signature is matched to a dimension of the
+ corresponding passed-in array, starting from the end of the shape tuple.
+ These are the core dimensions, and they must be present in the arrays, or
+ an error will be raised.
+#. Core dimensions assigned to the same label in the signature (e.g. the
+ ``i`` in ``inner1d``'s ``(i),(i)->()``) must have exactly matching sizes,
+ no broadcasting is performed.
#. The core dimensions are removed from all inputs and the remaining
- dimensions are broadcasted; defining the loop dimensions.
-#. The output is given by the loop dimensions plus the output core dimensions.
+ dimensions are broadcast together, defining the loop dimensions.
+#. The shape of each output is determined from the loop dimensions plus the
+ output's core dimensions
+
+Typically, the size of all core dimensions in an output will be determined by
+the size of a core dimension with the same label in an input array. This is
+not a requirement, and it is possible to define a signature where a label
+comes up for the first time in an output, although some precautions must be
+taken when calling such a function. An example would be the function
+``euclidean_pdist(a)``, with signature ``(n,d)->(p)``, that given an array of
+``n`` ``d``-dimensional vectors, computes all unique pairwise Euclidean
+distances among them. The output dimension ``p`` must therefore be equal to
+``n * (n - 1) / 2``, but it is the caller's responsibility to pass in an
+output array of the right size. If the size of a core dimension of an output
+cannot be determined from a passed in input or output array, an error will be
+raised.
+
+Note: Prior to Numpy 1.10.0, less strict checks were in place: missing core
+dimensions were created by prepending 1's to the shape as necessary, core
+dimensions with the same label were broadcast together, and undetermined
+dimensions were created with size 1.
Definitions
@@ -70,7 +92,7 @@ Core Dimension
Dimension Name
A dimension name represents a core dimension in the signature.
Different dimensions may share a name, indicating that they are of
- the same size (or are broadcastable).
+ the same size.
Dimension Index
A dimension index is an integer representing a dimension name. It
@@ -93,8 +115,7 @@ following format:
* Dimension lists for different arguments are separated by ``","``.
Input/output arguments are separated by ``"->"``.
* If one uses the same dimension name in multiple locations, this
- enforces the same size (or broadcastable size) of the corresponding
- dimensions.
+ enforces the same size of the corresponding dimensions.
The formal syntax of signatures is as follows::
@@ -111,10 +132,9 @@ The formal syntax of signatures is as follows::
Notes:
#. All quotes are for clarity.
-#. Core dimensions that share the same name must be broadcastable, as
- the two ``i`` in our example above. Each dimension name typically
- corresponding to one level of looping in the elementary function's
- implementation.
+#. Core dimensions that share the same name must have the exact same size.
+ Each dimension name typically corresponds to one level of looping in the
+ elementary function's implementation.
#. White spaces are ignored.
Here are some examples of signatures:
diff --git a/doc/source/reference/c-api.iterator.rst b/doc/source/reference/c-api.iterator.rst
index 084fdcbce..e6a21c1b3 100644
--- a/doc/source/reference/c-api.iterator.rst
+++ b/doc/source/reference/c-api.iterator.rst
@@ -18,8 +18,6 @@ preservation of memory layouts, and buffering of data with the wrong
alignment or type, without requiring difficult coding.
This page documents the API for the iterator.
-The C-API naming convention chosen is based on the one in the numpy-refactor
-branch, so will integrate naturally into the refactored code base.
The iterator is named ``NpyIter`` and functions are
named ``NpyIter_*``.
diff --git a/doc/source/reference/c-api.ufunc.rst b/doc/source/reference/c-api.ufunc.rst
index 71abffd04..3673958d9 100644
--- a/doc/source/reference/c-api.ufunc.rst
+++ b/doc/source/reference/c-api.ufunc.rst
@@ -114,7 +114,6 @@ Functions
data type, it will be internally upcast to the int_ (or uint)
data type.
-
:param doc:
Allows passing in a documentation string to be stored with the
ufunc. The documentation string should not contain the name
@@ -128,6 +127,21 @@ Functions
structure and it does get set with this value when the ufunc
object is created.
+.. cfunction:: PyObject* PyUFunc_FromFuncAndDataAndSignature(PyUFuncGenericFunction* func,
+ void** data, char* types, int ntypes, int nin, int nout, int identity,
+ char* name, char* doc, int check_return, char *signature)
+
+ This function is very similar to PyUFunc_FromFuncAndData above, but has
+ an extra *signature* argument, to define generalized universal functions.
+ Similarly to how ufuncs are built around an element-by-element operation,
+ gufuncs are around subarray-by-subarray operations, the signature defining
+ the subarrays to operate on.
+
+ :param signature:
+ The signature for the new gufunc. Setting it to NULL is equivalent
+ to calling PyUFunc_FromFuncAndData. A copy of the string is made,
+ so the passed in buffer can be freed.
+
.. cfunction:: int PyUFunc_RegisterLoopForType(PyUFuncObject* ufunc,
int usertype, PyUFuncGenericFunction function, int* arg_types, void* data)
diff --git a/doc/source/release.rst b/doc/source/release.rst
index 657eb9a5d..c853fc0f0 100644
--- a/doc/source/release.rst
+++ b/doc/source/release.rst
@@ -3,6 +3,7 @@ Release Notes
*************
.. include:: ../release/1.10.0-notes.rst
+.. include:: ../release/1.9.1-notes.rst
.. include:: ../release/1.9.0-notes.rst
.. include:: ../release/1.8.2-notes.rst
.. include:: ../release/1.8.1-notes.rst
diff --git a/doc/source/user/c-info.python-as-glue.rst b/doc/source/user/c-info.python-as-glue.rst
index 8dfd39beb..f4ef87cd0 100644
--- a/doc/source/user/c-info.python-as-glue.rst
+++ b/doc/source/user/c-info.python-as-glue.rst
@@ -356,18 +356,15 @@ version of the input.
Calling f2py from Python
------------------------
-The f2py program is written in Python and can be run from inside your
-module. This provides a facility that is somewhat similar to the use
-of weave.ext_tools described below. An example of the final interface
-executed using Python code is:
+The f2py program is written in Python and can be run from inside your code
+to compile Fortran code at runtime, as follows:
.. code-block:: python
- import numpy.f2py as f2py
- fid = open('add.f')
- source = fid.read()
- fid.close()
- f2py.compile(source, modulename='add')
+ from numpy import f2py
+ with open("add.f") as sourcefile:
+ sourcecode = sourcefile.read()
+ f2py.compile(sourcecode, modulename='add')
import add
The source string can be any valid Fortran code. If you want to save
@@ -438,465 +435,193 @@ written C-code.
single: f2py
-weave
-=====
-
-Weave is a scipy package that can be used to automate the process of
-extending Python with C/C++ code. It can be used to speed up
-evaluation of an array expression that would otherwise create
-temporary variables, to directly "inline" C/C++ code into Python, or
-to create a fully-named extension module. You must either install
-scipy or get the weave package separately and install it using the
-standard python setup.py install. You must also have a C/C++-compiler
-installed and useable by Python distutils in order to use weave.
-
-.. index::
- single: weave
-
-Somewhat dated, but still useful documentation for weave can be found
-at the link http://www.scipy/Weave. There are also many examples found
-in the examples directory which is installed under the weave directory
-in the place where weave is installed on your system.
-
-
-Speed up code involving arrays (also see scipy.numexpr)
--------------------------------------------------------
-
-This is the easiest way to use weave and requires minimal changes to
-your Python code. It involves placing quotes around the expression of
-interest and calling weave.blitz. Weave will parse the code and
-generate C++ code using Blitz C++ arrays. It will then compile the
-code and catalog the shared library so that the next time this exact
-string is asked for (and the array types are the same), the already-
-compiled shared library will be loaded and used. Because Blitz makes
-extensive use of C++ templating, it can take a long time to compile
-the first time. After that, however, the code should evaluate more
-quickly than the equivalent NumPy expression. This is especially true
-if your array sizes are large and the expression would require NumPy
-to create several temporaries. Only expressions involving basic
-arithmetic operations and basic array slicing can be converted to
-Blitz C++ code.
-
-For example, consider the expression::
-
- d = 4*a + 5*a*b + 6*b*c
-
-where a, b, and c are all arrays of the same type and shape. When the
-data-type is double-precision and the size is 1000x1000, this
-expression takes about 0.5 seconds to compute on an 1.1Ghz AMD Athlon
-machine. When this expression is executed instead using blitz:
-
-.. code-block:: python
-
- d = empty(a.shape, 'd'); weave.blitz(expr)
-
-execution time is only about 0.20 seconds (about 0.14 seconds spent in
-weave and the rest in allocating space for d). Thus, we've sped up the
-code by a factor of 2 using only a simnple command (weave.blitz). Your
-mileage may vary, but factors of 2-8 speed-ups are possible with this
-very simple technique.
-
-If you are interested in using weave in this way, then you should also
-look at scipy.numexpr which is another similar way to speed up
-expressions by eliminating the need for temporary variables. Using
-numexpr does not require a C/C++ compiler.
-
-
-Inline C-code
--------------
-
-Probably the most widely-used method of employing weave is to
-"in-line" C/C++ code into Python in order to speed up a time-critical
-section of Python code. In this method of using weave, you define a
-string containing useful C-code and then pass it to the function
-**weave.inline** ( ``code_string``, ``variables`` ), where
-code_string is a string of valid C/C++ code and variables is a list of
-variables that should be passed in from Python. The C/C++ code should
-refer to the variables with the same names as they are defined with in
-Python. If weave.line should return anything the the special value
-return_val should be set to whatever object should be returned. The
-following example shows how to use weave on basic Python objects:
-
-.. code-block:: python
-
- code = r"""
- int i;
- py::tuple results(2);
- for (i=0; i<a.length(); i++) {
- a[i] = i;
- }
- results[0] = 3.0;
- results[1] = 4.0;
- return_val = results;
- """
- a = [None]*10
- res = weave.inline(code,['a'])
-
-The C++ code shown in the code string uses the name 'a' to refer to
-the Python list that is passed in. Because the Python List is a
-mutable type, the elements of the list itself are modified by the C++
-code. A set of C++ classes are used to access Python objects using
-simple syntax.
-
-The main advantage of using C-code, however, is to speed up processing
-on an array of data. Accessing a NumPy array in C++ code using weave,
-depends on what kind of type converter is chosen in going from NumPy
-arrays to C++ code. The default converter creates 5 variables for the
-C-code for every NumPy array passed in to weave.inline. The following
-table shows these variables which can all be used in the C++ code. The
-table assumes that ``myvar`` is the name of the array in Python with
-data-type {dtype} (i.e. float64, float32, int8, etc.)
-
-=========== ============== =========================================
-Variable Type Contents
-=========== ============== =========================================
-myvar {dtype}* Pointer to the first element of the array
-Nmyvar npy_intp* A pointer to the dimensions array
-Smyvar npy_intp* A pointer to the strides array
-Dmyvar int The number of dimensions
-myvar_array PyArrayObject* The entire structure for the array
-=========== ============== =========================================
-
-The in-lined code can contain references to any of these variables as
-well as to the standard macros MYVAR1(i), MYVAR2(i,j), MYVAR3(i,j,k),
-and MYVAR4(i,j,k,l). These name-based macros (they are the Python name
-capitalized followed by the number of dimensions needed) will de-
-reference the memory for the array at the given location with no error
-checking (be-sure to use the correct macro and ensure the array is
-aligned and in correct byte-swap order in order to get useful
-results). The following code shows how you might use these variables
-and macros to code a loop in C that computes a simple 2-d weighted
-averaging filter.
-
-.. code-block:: c++
-
- int i,j;
- for(i=1;i<Na[0]-1;i++) {
- for(j=1;j<Na[1]-1;j++) {
- B2(i,j) = A2(i,j) + (A2(i-1,j) +
- A2(i+1,j)+A2(i,j-1)
- + A2(i,j+1))*0.5
- + (A2(i-1,j-1)
- + A2(i-1,j+1)
- + A2(i+1,j-1)
- + A2(i+1,j+1))*0.25
- }
- }
-
-The above code doesn't have any error checking and so could fail with
-a Python crash if, ``a`` had the wrong number of dimensions, or ``b``
-did not have the same shape as ``a``. However, it could be placed
-inside a standard Python function with the necessary error checking to
-produce a robust but fast subroutine.
-
-One final note about weave.inline: if you have additional code you
-want to include in the final extension module such as supporting
-function calls, include statements, etc. you can pass this code in as a
-string using the keyword support_code: ``weave.inline(code, variables,
-support_code=support)``. If you need the extension module to link
-against an additional library then you can also pass in
-distutils-style keyword arguments such as library_dirs, libraries,
-and/or runtime_library_dirs which point to the appropriate libraries
-and directories.
-
-Simplify creation of an extension module
-----------------------------------------
-
-The inline function creates one extension module for each function to-
-be inlined. It also generates a lot of intermediate code that is
-duplicated for each extension module. If you have several related
-codes to execute in C, it would be better to make them all separate
-functions in a single extension module with multiple functions. You
-can also use the tools weave provides to produce this larger extension
-module. In fact, the weave.inline function just uses these more
-general tools to do its work.
-
-The approach is to:
-
-1. construct a extension module object using
- ext_tools.ext_module(``module_name``);
-
-2. create function objects using ext_tools.ext_function(``func_name``,
- ``code``, ``variables``);
-
-3. (optional) add support code to the function using the
- .customize.add_support_code( ``support_code`` ) method of the
- function object;
-
-4. add the functions to the extension module object using the
- .add_function(``func``) method;
-
-5. when all the functions are added, compile the extension with its
- .compile() method.
-
-Several examples are available in the examples directory where weave
-is installed on your system. Look particularly at ramp2.py,
-increment_example.py and fibonacii.py
-
-
-Conclusion
-----------
-
-Weave is a useful tool for quickly routines in C/C++ and linking them
-into Python. It's caching-mechanism allows for on-the-fly compilation
-which makes it particularly attractive for in-house code. Because of
-the requirement that the user have a C++-compiler, it can be difficult
-(but not impossible) to distribute a package that uses weave to other
-users who don't have a compiler installed. Of course, weave could be
-used to construct an extension module which is then distributed in the
-normal way *(* using a setup.py file). While you can use weave to
-build larger extension modules with many methods, creating methods
-with a variable- number of arguments is not possible. Thus, for a more
-sophisticated module, you will still probably want a Python-layer that
-calls the weave-produced extension.
-
-.. index::
- single: weave
-
+Cython
+======
-Pyrex
-=====
+`Cython <http://cython.org>`_ is a compiler for a Python dialect that adds
+(optional) static typing for speed, and allows mixing C or C++ code
+into your modules. It produces C or C++ extensions that can be compiled
+and imported in Python code.
-Pyrex is a way to write C-extension modules using Python-like syntax.
-It is an interesting way to generate extension modules that is growing
-in popularity, particularly among people who have rusty or non-
-existent C-skills. It does require the user to write the "interface"
-code and so is more time-consuming than SWIG or f2py if you are trying
-to interface to a large library of code. However, if you are writing
-an extension module that will include quite a bit of your own
-algorithmic code, as well, then Pyrex is a good match. A big weakness
-perhaps is the inability to easily and quickly access the elements of
-a multidimensional array.
+If you are writing an extension module that will include quite a bit of your
+own algorithmic code as well, then Cython is a good match. Among its
+features is the ability to easily and quickly
+work with multidimensional arrays.
.. index::
- single: pyrex
+ single: cython
-Notice that Pyrex is an extension-module generator only. Unlike weave
-or f2py, it includes no automatic facility for compiling and linking
+Notice that Cython is an extension-module generator only. Unlike f2py,
+it includes no automatic facility for compiling and linking
the extension module (which must be done in the usual fashion). It
-does provide a modified distutils class called build_ext which lets
-you build an extension module from a .pyx source. Thus, you could
-write in a setup.py file:
+does provide a modified distutils class called ``build_ext`` which lets
+you build an extension module from a ``.pyx`` source. Thus, you could
+write in a ``setup.py`` file:
.. code-block:: python
- from Pyrex.Distutils import build_ext
+ from Cython.Distutils import build_ext
from distutils.extension import Extension
from distutils.core import setup
-
import numpy
- py_ext = Extension('mine', ['mine.pyx'],
- include_dirs=[numpy.get_include()])
setup(name='mine', description='Nothing',
- ext_modules=[pyx_ext],
+ ext_modules=[Extension('filter', ['filter.pyx'],
+ include_dirs=[numpy.get_include()])],
cmdclass = {'build_ext':build_ext})
Adding the NumPy include directory is, of course, only necessary if
-you are using NumPy arrays in the extension module (which is what I
-assume you are using Pyrex for). The distutils extensions in NumPy
+you are using NumPy arrays in the extension module (which is what we
+assume you are using Cython for). The distutils extensions in NumPy
also include support for automatically producing the extension-module
and linking it from a ``.pyx`` file. It works so that if the user does
-not have Pyrex installed, then it looks for a file with the same
+not have Cython installed, then it looks for a file with the same
file-name but a ``.c`` extension which it then uses instead of trying
to produce the ``.c`` file again.
-Pyrex does not natively understand NumPy arrays. However, it is not
-difficult to include information that lets Pyrex deal with them
-usefully. In fact, the numpy.random.mtrand module was written using
-Pyrex so an example of Pyrex usage is already included in the NumPy
-source distribution. That experience led to the creation of a standard
-c_numpy.pxd file that you can use to simplify interacting with NumPy
-array objects in a Pyrex-written extension. The file may not be
-complete (it wasn't at the time of this writing). If you have
-additions you'd like to contribute, please send them. The file is
-located in the .../site-packages/numpy/doc/pyrex directory where you
-have Python installed. There is also an example in that directory of
-using Pyrex to construct a simple extension module. It shows that
-Pyrex looks a lot like Python but also contains some new syntax that
-is necessary in order to get C-like speed.
-
-If you just use Pyrex to compile a standard Python module, then you
-will get a C-extension module that runs either as fast or, possibly,
-more slowly than the equivalent Python module. Speed increases are
-possible only when you use cdef to statically define C variables and
-use a special construct to create for loops:
+If you just use Cython to compile a standard Python module, then you
+will get a C extension module that typically runs a bit faster than the
+equivalent Python module. Further speed increases can be gained by using
+the ``cdef`` keyword to statically define C variables.
+
+Let's look at two examples we've seen before to see how they might be
+implemented using Cython. These examples were compiled into extension
+modules using Cython 0.21.1.
+
+
+Complex addition in Cython
+--------------------------
+
+Here is part of a Cython module named ``add.pyx`` which implements the
+complex addition functions we previously implemented using f2py:
.. code-block:: none
- cdef int i
- for i from start <= i < stop
+ cimport cython
+ cimport numpy as np
+ import numpy as np
-Let's look at two examples we've seen before to see how they might be
-implemented using Pyrex. These examples were compiled into extension
-modules using Pyrex-0.9.3.1.
+ # We need to initialize NumPy.
+ np.import_array()
+ #@cython.boundscheck(False)
+ def zadd(in1, in2):
+ cdef double complex[:] a = in1.ravel()
+ cdef double complex[:] b = in2.ravel()
-Pyrex-add
----------
+ out = np.empty(a.shape[0], np.complex64)
+ cdef double complex[:] c = out.ravel()
-Here is part of a Pyrex-file I named add.pyx which implements the add
-functions we previously implemented using f2py:
+ for i in range(c.shape[0]):
+ c[i].real = a[i].real + b[i].real
+ c[i].imag = a[i].imag + b[i].imag
-.. code-block:: none
+ return out
- cimport c_numpy
- from c_numpy cimport import_array, ndarray, npy_intp, npy_cdouble, \
- npy_cfloat, NPY_DOUBLE, NPY_CDOUBLE, NPY_FLOAT, \
- NPY_CFLOAT
-
- #We need to initialize NumPy
- import_array()
-
- def zadd(object ao, object bo):
- cdef ndarray c, a, b
- cdef npy_intp i
- a = c_numpy.PyArray_ContiguousFromAny(ao,
- NPY_CDOUBLE, 1, 1)
- b = c_numpy.PyArray_ContiguousFromAny(bo,
- NPY_CDOUBLE, 1, 1)
- c = c_numpy.PyArray_SimpleNew(a.nd, a.dimensions,
- a.descr.type_num)
- for i from 0 <= i < a.dimensions[0]:
- (<npy_cdouble *>c.data)[i].real = \
- (<npy_cdouble *>a.data)[i].real + \
- (<npy_cdouble *>b.data)[i].real
- (<npy_cdouble *>c.data)[i].imag = \
- (<npy_cdouble *>a.data)[i].imag + \
- (<npy_cdouble *>b.data)[i].imag
- return c
+This module shows use of the ``cimport`` statement to load the definitions
+from the ``numpy.pxd`` header that ships with Cython. It looks like NumPy is
+imported twice; ``cimport`` only makes the NumPy C-API available, while the
+regular ``import`` causes a Python-style import at runtime and makes it
+possible to call into the familiar NumPy Python API.
-This module shows use of the ``cimport`` statement to load the
-definitions from the c_numpy.pxd file. As shown, both versions of the
-import statement are supported. It also shows use of the NumPy C-API
-to construct NumPy arrays from arbitrary input objects. The array c is
-created using PyArray_SimpleNew. Then the c-array is filled by
-addition. Casting to a particiular data-type is accomplished using
-<cast \*>. Pointers are de-referenced with bracket notation and
-members of structures are accessed using '.' notation even if the
-object is techinically a pointer to a structure. The use of the
-special for loop construct ensures that the underlying code will have
-a similar C-loop so the addition calculation will proceed quickly.
-Notice that we have not checked for NULL after calling to the C-API
---- a cardinal sin when writing C-code. For routines that return
-Python objects, Pyrex inserts the checks for NULL into the C-code for
-you and returns with failure if need be. There is also a way to get
-Pyrex to automatically check for exceptions when you call functions
-that don't return Python objects. See the documentation of Pyrex for
-details.
-
-
-Pyrex-filter
-------------
+The example also demonstrates Cython's "typed memoryviews", which are like
+NumPy arrays at the C level, in the sense that they are shaped and strided
+arrays that know their own extent (unlike a C array addressed through a bare
+pointer). The syntax ``double complex[:]`` denotes a one-dimensional array
+(vector) of doubles, with arbitrary strides. A contiguous array of ints would
+be ``int[::1]``, while a matrix of floats would be ``float[:, :]``.
+
+Shown commented is the ``cython.boundscheck`` decorator, which turns
+bounds-checking for memory view accesses on or off on a per-function basis.
+We can use this to further speed up our code, at the expense of safety
+(or a manual check prior to entering the loop).
+
+Other than the view syntax, the function is immediately readable to a Python
+programmer. Static typing of the variable ``i`` is implicit. Instead of the
+view syntax, we could also have used Cython's special NumPy array syntax,
+but the view syntax is preferred.
-The two-dimensional example we created using weave is a bit uglier to
-implement in Pyrex because two-dimensional indexing using Pyrex is not
-as simple. But, it is straightforward (and possibly faster because of
-pre-computed indices). Here is the Pyrex-file I named image.pyx.
+
+Image filter in Cython
+----------------------
+
+The two-dimensional example we created using Fortran is just as easy to write
+in Cython:
.. code-block:: none
- cimport c_numpy
- from c_numpy cimport import_array, ndarray, npy_intp,\
- NPY_DOUBLE, NPY_CDOUBLE, \
- NPY_FLOAT, NPY_CFLOAT, NPY_ALIGNED \
-
- #We need to initialize NumPy
- import_array()
- def filter(object ao):
- cdef ndarray a, b
- cdef npy_intp i, j, M, N, oS
- cdef npy_intp r,rm1,rp1,c,cm1,cp1
- cdef double value
- # Require an ALIGNED array
- # (but not necessarily contiguous)
- # We will use strides to access the elements.
- a = c_numpy.PyArray_FROMANY(ao, NPY_DOUBLE, \
- 2, 2, NPY_ALIGNED)
- b = c_numpy.PyArray_SimpleNew(a.nd,a.dimensions, \
- a.descr.type_num)
- M = a.dimensions[0]
- N = a.dimensions[1]
- S0 = a.strides[0]
- S1 = a.strides[1]
- for i from 1 <= i < M-1:
- r = i*S0
- rm1 = r-S0
- rp1 = r+S0
- oS = i*N
- for j from 1 <= j < N-1:
- c = j*S1
- cm1 = c-S1
- cp1 = c+S1
- (<double *>b.data)[oS+j] = \
- (<double *>(a.data+r+c))[0] + \
- ((<double *>(a.data+rm1+c))[0] + \
- (<double *>(a.data+rp1+c))[0] + \
- (<double *>(a.data+r+cm1))[0] + \
- (<double *>(a.data+r+cp1))[0])*0.5 + \
- ((<double *>(a.data+rm1+cm1))[0] + \
- (<double *>(a.data+rp1+cm1))[0] + \
- (<double *>(a.data+rp1+cp1))[0] + \
- (<double *>(a.data+rm1+cp1))[0])*0.25
- return b
+ cimport numpy as np
+ import numpy as np
+
+ np.import_array()
+
+ def filter(img):
+ cdef double[:, :] a = np.asarray(img, dtype=np.double)
+ out = np.zeros(img.shape, dtype=np.double)
+ cdef double[:, ::1] b = out
+
+ cdef np.npy_intp i, j
+
+ for i in range(1, a.shape[0] - 1):
+ for j in range(1, a.shape[1] - 1):
+ b[i, j] = (a[i, j]
+ + .5 * ( a[i-1, j] + a[i+1, j]
+ + a[i, j-1] + a[i, j+1])
+ + .25 * ( a[i-1, j-1] + a[i-1, j+1]
+ + a[i+1, j-1] + a[i+1, j+1]))
+
+ return out
This 2-d averaging filter runs quickly because the loop is in C and
-the pointer computations are done only as needed. However, it is not
-particularly easy to understand what is happening. A 2-d image, ``in``
-, can be filtered using this code very quickly using:
+the pointer computations are done only as needed. If the code above is
+compiled as a module ``image``, then a 2-d image, ``img``, can be filtered
+using this code very quickly using:
.. code-block:: python
import image
- out = image.filter(in)
+ out = image.filter(img)
+
+Regarding the code, two things are of note: firstly, it is impossible to
+return a memory view to Python. Instead, a NumPy array ``out`` is first
+created, and then a view ``b`` onto this array is used for the computation.
+Secondly, the view ``b`` is typed ``double[:, ::1]``. This means 2-d array
+with contiguous rows, i.e., C matrix order. Specifying the order explicitly
+can speed up some algorithms since they can skip stride computations.
Conclusion
----------
-There are several disadvantages of using Pyrex:
-
-1. The syntax for Pyrex can get a bit bulky, and it can be confusing at
- first to understand what kind of objects you are getting and how to
- interface them with C-like constructs.
+Cython is the extension mechanism of choice for several scientific Python
+libraries, including Pandas, SAGE, scikit-image and scikit-learn,
+as well as the XML processing library LXML.
+The language and compiler are well-maintained.
-2. Inappropriate Pyrex syntax or incorrect calls to C-code or type-
- mismatches can result in failures such as
+There are several disadvantages of using Cython:
- 1. Pyrex failing to generate the extension module source code,
+1. When coding custom algorithms, and sometimes when wrapping existing C
+ libraries, some familiarity with C is required. In particular, when using
+ C memory management (``malloc`` and friends), it's easy to introduce
+ memory leaks. However, just compiling a Python module renamed to ``.pyx``
+ can already speed it up, and adding a few type declarations can give
+ dramatic speedups in some code.
- 2. Compiler failure while generating the extension module binary due to
- incorrect C syntax,
-
- 3. Python failure when trying to use the module.
-
-
-3. It is easy to lose a clean separation between Python and C which makes
+2. It is easy to lose a clean separation between Python and C which makes
re-using your C-code for other non-Python-related projects more
difficult.
-4. Multi-dimensional arrays are "bulky" to index (appropriate macros
- may be able to fix this).
-
-5. The C-code generated by Pyrex is hard to read and modify (and typically
+3. The C-code generated by Cython is hard to read and modify (and typically
compiles with annoying but harmless warnings).
-Writing a good Pyrex extension module still takes a bit of effort
-because not only does it require (a little) familiarity with C, but
-also with Pyrex's brand of Python-mixed-with C. One big advantage of
-Pyrex-generated extension modules is that they are easy to distribute
-using distutils. In summary, Pyrex is a very capable tool for either
-gluing C-code or generating an extension module quickly and should not
-be over-looked. It is especially useful for people that can't or won't
-write C-code or Fortran code. But, if you are already able to write
-simple subroutines in C or Fortran, then I would use one of the other
-approaches such as f2py (for Fortran), ctypes (for C shared-
-libraries), or weave (for inline C-code).
+One big advantage of Cython-generated extension modules is that they are
+easy to distribute. In summary, Cython is a very capable tool for either
+gluing C code or generating an extension module quickly and should not be
+over-looked. It is especially useful for people that can't or won't write
+C or Fortran code.
.. index::
- single: pyrex
-
-
+ single: cython
ctypes
@@ -1367,10 +1092,10 @@ Additional tools you may find useful
====================================
These tools have been found useful by others using Python and so are
-included here. They are discussed separately because I see them as
-either older ways to do things more modernly handled by f2py, weave,
-Pyrex, or ctypes (SWIG, PyFort, PyInline) or because I don't know much
-about them (SIP, Boost, Instant). I have not added links to these
+included here. They are discussed separately because they are
+either older ways to do things now handled by f2py, Cython, or ctypes
+(SWIG, PyFort) or because I don't know much about them (SIP, Boost).
+I have not added links to these
methods because my experience is that you can find the most relevant
link faster using Google or some other search engine, and any links
provided here would be quickly dated. Do not assume that just because
@@ -1452,70 +1177,6 @@ have a set of C++ classes that need to be integrated cleanly into
Python, consider learning about and using Boost.Python.
-Instant
--------
-
-.. index::
- single: Instant
-
-This is a relatively new package (called pyinstant at sourceforge)
-that builds on top of SWIG to make it easy to inline C and C++ code in
-Python very much like weave. However, Instant builds extension modules
-on the fly with specific module names and specific method names. In
-this repsect it is more more like f2py in its behavior. The extension
-modules are built on-the fly (as long as the SWIG is installed). They
-can then be imported. Here is an example of using Instant with NumPy
-arrays (adapted from the test2 included in the Instant distribution):
-
-.. code-block:: python
-
- code="""
- PyObject* add(PyObject* a_, PyObject* b_){
- /*
- various checks
- */
- PyArrayObject* a=(PyArrayObject*) a_;
- PyArrayObject* b=(PyArrayObject*) b_;
- int n = a->dimensions[0];
- int dims[1];
- dims[0] = n;
- PyArrayObject* ret;
- ret = (PyArrayObject*) PyArray_FromDims(1, dims, NPY_DOUBLE);
- int i;
- char *aj=a->data;
- char *bj=b->data;
- double *retj = (double *)ret->data;
- for (i=0; i < n; i++) {
- *retj++ = *((double *)aj) + *((double *)bj);
- aj += a->strides[0];
- bj += b->strides[0];
- }
- return (PyObject *)ret;
- }
- """
- import Instant, numpy
- ext = Instant.Instant()
- ext.create_extension(code=s, headers=["numpy/arrayobject.h"],
- include_dirs=[numpy.get_include()],
- init_code='import_array();', module="test2b_ext")
- import test2b_ext
- a = numpy.arange(1000)
- b = numpy.arange(1000)
- d = test2b_ext.add(a,b)
-
-Except perhaps for the dependence on SWIG, Instant is a
-straightforward utility for writing extension modules.
-
-
-PyInline
---------
-
-This is a much older module that allows automatic building of
-extension modules so that C-code can be included with Python code.
-It's latest release (version 0.03) was in 2001, and it appears that it
-is not being updated.
-
-
PyFort
------
diff --git a/numpy/_import_tools.py b/numpy/_import_tools.py
index 526217359..9b1942e8d 100644
--- a/numpy/_import_tools.py
+++ b/numpy/_import_tools.py
@@ -2,6 +2,7 @@ from __future__ import division, absolute_import, print_function
import os
import sys
+import warnings
__all__ = ['PackageLoader']
@@ -162,7 +163,10 @@ class PackageLoader(object):
postpone= : bool
when True, don't load packages [default: False]
- """
+ """
+ warnings.warn('pkgload and PackageLoader are obsolete '
+ 'and will be removed in a future version of numpy',
+ DeprecationWarning)
frame = self.parent_frame
self.info_modules = {}
if options.get('force', False):
diff --git a/numpy/core/bscript b/numpy/core/bscript
index 682f35e8e..5df5a3f8a 100644
--- a/numpy/core/bscript
+++ b/numpy/core/bscript
@@ -60,27 +60,13 @@ def define_no_smp():
#--------------------------------
# Checking SMP and thread options
#--------------------------------
- # Python 2.3 causes a segfault when
- # trying to re-acquire the thread-state
- # which is done in error-handling
- # ufunc code. NPY_ALLOW_C_API and friends
- # cause the segfault. So, we disable threading
- # for now.
- if sys.version[:5] < '2.4.2':
- nosmp = 1
- else:
- # Perhaps a fancier check is in order here.
- # so that threads are only enabled if there
- # are actually multiple CPUS? -- but
- # threaded code can be nice even on a single
- # CPU so that long-calculating code doesn't
- # block.
- try:
- nosmp = os.environ['NPY_NOSMP']
- nosmp = 1
- except KeyError:
- nosmp = 0
- return nosmp == 1
+ # Perhaps a fancier check is in order here.
+ # so that threads are only enabled if there
+ # are actually multiple CPUS? -- but
+ # threaded code can be nice even on a single
+ # CPU so that long-calculating code doesn't
+ # block.
+ return 'NPY_NOSMP' in os.environ
def write_numpy_config(conf):
subst_dict = {}
diff --git a/numpy/core/code_generators/ufunc_docstrings.py b/numpy/core/code_generators/ufunc_docstrings.py
index 67c01cc67..1abbb666b 100644
--- a/numpy/core/code_generators/ufunc_docstrings.py
+++ b/numpy/core/code_generators/ufunc_docstrings.py
@@ -932,8 +932,8 @@ add_newdoc('numpy.core.umath', 'divide',
Returns
-------
y : {ndarray, scalar}
- The quotient `x1/x2`, element-wise. Returns a scalar if
- both `x1` and `x2` are scalars.
+ The quotient ``x1/x2``, element-wise. Returns a scalar if
+ both ``x1`` and ``x2`` are scalars.
See Also
--------
@@ -942,13 +942,13 @@ add_newdoc('numpy.core.umath', 'divide',
Notes
-----
- Equivalent to `x1` / `x2` in terms of array-broadcasting.
+ Equivalent to ``x1`` / ``x2`` in terms of array-broadcasting.
- Behavior on division by zero can be changed using `seterr`.
+ Behavior on division by zero can be changed using ``seterr``.
- When both `x1` and `x2` are of an integer type, `divide` will return
- integers and throw away the fractional part. Moreover, division by zero
- always yields zero in integer arithmetic.
+ In Python 2, when both ``x1`` and ``x2`` are of an integer type,
+ ``divide`` will behave like ``floor_divide``. In Python 3, it behaves
+ like ``true_divide``.
Examples
--------
@@ -961,20 +961,20 @@ add_newdoc('numpy.core.umath', 'divide',
[ Inf, 4. , 2.5],
[ Inf, 7. , 4. ]])
- Note the behavior with integer types:
+ Note the behavior with integer types (Python 2 only):
>>> np.divide(2, 4)
0
>>> np.divide(2, 4.)
0.5
- Division by zero always yields zero in integer arithmetic, and does not
- raise an exception or a warning:
+ Division by zero always yields zero in integer arithmetic (again,
+ Python 2 only), and does not raise an exception or a warning:
>>> np.divide(np.array([0, 1], dtype=int), np.array([0, 0], dtype=int))
array([0, 0])
- Division by zero can, however, be caught using `seterr`:
+ Division by zero can, however, be caught using ``seterr``:
>>> old_err_state = np.seterr(divide='raise')
>>> np.divide(1, 0)
diff --git a/numpy/core/fromnumeric.py b/numpy/core/fromnumeric.py
index 1cb1ed8bc..84a10bf04 100644
--- a/numpy/core/fromnumeric.py
+++ b/numpy/core/fromnumeric.py
@@ -513,6 +513,12 @@ def transpose(a, axes=None):
See Also
--------
rollaxis
+ argsort
+
+ Notes
+ -----
+ Use `transpose(a, argsort(axes))` to invert the transposition of tensors
+ when using the `axes` keyword argument.
Examples
--------
@@ -1082,6 +1088,9 @@ def resize(a, new_shape):
Examples
--------
>>> a=np.array([[0,1],[2,3]])
+ >>> np.resize(a,(2,3))
+ array([[0, 1, 2],
+ [3, 0, 1]])
>>> np.resize(a,(1,4))
array([[0, 1, 2, 3]])
>>> np.resize(a,(2,4))
@@ -2095,8 +2104,14 @@ def amax(a, axis=None, out=None, keepdims=False):
----------
a : array_like
Input data.
- axis : int, optional
- Axis along which to operate. By default, flattened input is used.
+ axis : None or int or tuple of ints, optional
+ Axis or axes along which to operate. By default, flattened input is
+ used.
+
+ .. versionadded: 1.7.0
+
+ If this is a tuple of ints, the maximum is selected over multiple axes,
+ instead of a single axis or all the axes as before.
out : ndarray, optional
Alternative output array in which to place the result. Must
be of the same shape and buffer length as the expected output.
@@ -2179,8 +2194,14 @@ def amin(a, axis=None, out=None, keepdims=False):
----------
a : array_like
Input data.
- axis : int, optional
- Axis along which to operate. By default, flattened input is used.
+ axis : None or int or tuple of ints, optional
+ Axis or axes along which to operate. By default, flattened input is
+ used.
+
+ .. versionadded: 1.7.0
+
+ If this is a tuple of ints, the minimum is selected over multiple axes,
+ instead of a single axis or all the axes as before.
out : ndarray, optional
Alternative output array in which to place the result. Must
be of the same shape and buffer length as the expected output.
@@ -2693,9 +2714,14 @@ def mean(a, axis=None, dtype=None, out=None, keepdims=False):
a : array_like
Array containing numbers whose mean is desired. If `a` is not an
array, a conversion is attempted.
- axis : int, optional
- Axis along which the means are computed. The default is to compute
- the mean of the flattened array.
+ axis : None or int or tuple of ints, optional
+ Axis or axes along which the means are computed. The default is to
+ compute the mean of the flattened array.
+
+ .. versionadded: 1.7.0
+
+ If this is a tuple of ints, a mean is performed over multiple axes,
+ instead of a single axis or all the axes as before.
dtype : data-type, optional
Type to use in computing the mean. For integer inputs, the default
is `float64`; for floating point inputs, it is the same as the
@@ -2778,9 +2804,14 @@ def std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False):
----------
a : array_like
Calculate the standard deviation of these values.
- axis : int, optional
- Axis along which the standard deviation is computed. The default is
- to compute the standard deviation of the flattened array.
+ axis : None or int or tuple of ints, optional
+ Axis or axes along which the standard deviation is computed. The
+ default is to compute the standard deviation of the flattened array.
+
+ .. versionadded: 1.7.0
+
+ If this is a tuple of ints, a standard deviation is performed over
+ multiple axes, instead of a single axis or all the axes as before.
dtype : dtype, optional
Type to use in computing the standard deviation. For arrays of
integer type the default is float64, for arrays of float types it is
@@ -2881,9 +2912,14 @@ def var(a, axis=None, dtype=None, out=None, ddof=0,
a : array_like
Array containing numbers whose variance is desired. If `a` is not an
array, a conversion is attempted.
- axis : int, optional
- Axis along which the variance is computed. The default is to compute
- the variance of the flattened array.
+ axis : None or int or tuple of ints, optional
+ Axis or axes along which the variance is computed. The default is to
+ compute the variance of the flattened array.
+
+ .. versionadded: 1.7.0
+
+ If this is a tuple of ints, a variance is performed over multiple axes,
+ instead of a single axis or all the axes as before.
dtype : data-type, optional
Type to use in computing the variance. For arrays of integer type
the default is `float32`; for arrays of float types it is the same as
diff --git a/numpy/core/include/numpy/ndarrayobject.h b/numpy/core/include/numpy/ndarrayobject.h
index b8c7c3a2d..fbaaeacea 100644
--- a/numpy/core/include/numpy/ndarrayobject.h
+++ b/numpy/core/include/numpy/ndarrayobject.h
@@ -14,6 +14,7 @@ extern "C" CONFUSE_EMACS
everything when you're typing */
#endif
+#include <Python.h>
#include "ndarraytypes.h"
/* Includes the "function" C-API -- these are all stored in a
@@ -50,14 +51,26 @@ extern "C" CONFUSE_EMACS
#define PyArray_CheckScalar(m) (PyArray_IsScalar(m, Generic) || \
PyArray_IsZeroDim(m))
-
+#if PY_MAJOR_VERSION >= 3
+#define PyArray_IsPythonNumber(obj) \
+ (PyFloat_Check(obj) || PyComplex_Check(obj) || \
+ PyLong_Check(obj) || PyBool_Check(obj))
+#define PyArray_IsIntegerScalar(obj) (PyLong_Check(obj) \
+ || PyArray_IsScalar((obj), Integer))
+#define PyArray_IsPythonScalar(obj) \
+ (PyArray_IsPythonNumber(obj) || PyBytes_Check(obj) || \
+ PyUnicode_Check(obj))
+#else
#define PyArray_IsPythonNumber(obj) \
(PyInt_Check(obj) || PyFloat_Check(obj) || PyComplex_Check(obj) || \
PyLong_Check(obj) || PyBool_Check(obj))
-
+#define PyArray_IsIntegerScalar(obj) (PyInt_Check(obj) \
+ || PyLong_Check(obj) \
+ || PyArray_IsScalar((obj), Integer))
#define PyArray_IsPythonScalar(obj) \
(PyArray_IsPythonNumber(obj) || PyString_Check(obj) || \
PyUnicode_Check(obj))
+#endif
#define PyArray_IsAnyScalar(obj) \
(PyArray_IsScalar(obj, Generic) || PyArray_IsPythonScalar(obj))
@@ -65,10 +78,6 @@ extern "C" CONFUSE_EMACS
#define PyArray_CheckAnyScalar(obj) (PyArray_IsPythonScalar(obj) || \
PyArray_CheckScalar(obj))
-#define PyArray_IsIntegerScalar(obj) (PyInt_Check(obj) \
- || PyLong_Check(obj) \
- || PyArray_IsScalar((obj), Integer))
-
#define PyArray_GETCONTIGUOUS(m) (PyArray_ISCONTIGUOUS(m) ? \
Py_INCREF(m), (m) : \
diff --git a/numpy/core/include/numpy/npy_common.h b/numpy/core/include/numpy/npy_common.h
index 5cba8c9d2..92b03d20c 100644
--- a/numpy/core/include/numpy/npy_common.h
+++ b/numpy/core/include/numpy/npy_common.h
@@ -133,6 +133,7 @@ extern long long __cdecl _ftelli64(FILE *);
#else
#define npy_ftell ftell
#endif
+ #include <sys/types.h>
#define npy_lseek lseek
#define npy_off_t off_t
@@ -264,18 +265,9 @@ typedef unsigned PY_LONG_LONG npy_ulonglong;
# ifdef _MSC_VER
# define NPY_LONGLONG_FMT "I64d"
# define NPY_ULONGLONG_FMT "I64u"
-# elif defined(__APPLE__) || defined(__FreeBSD__)
-/* "%Ld" only parses 4 bytes -- "L" is floating modifier on MacOS X/BSD */
+# else
# define NPY_LONGLONG_FMT "lld"
# define NPY_ULONGLONG_FMT "llu"
-/*
- another possible variant -- *quad_t works on *BSD, but is deprecated:
- #define LONGLONG_FMT "qd"
- #define ULONGLONG_FMT "qu"
-*/
-# else
-# define NPY_LONGLONG_FMT "Ld"
-# define NPY_ULONGLONG_FMT "Lu"
# endif
# ifdef _MSC_VER
# define NPY_LONGLONG_SUFFIX(x) (x##i64)
diff --git a/numpy/core/include/numpy/npy_cpu.h b/numpy/core/include/numpy/npy_cpu.h
index 24d4ce1fc..60abae4e0 100644
--- a/numpy/core/include/numpy/npy_cpu.h
+++ b/numpy/core/include/numpy/npy_cpu.h
@@ -20,6 +20,7 @@
#define _NPY_CPUARCH_H_
#include "numpyconfig.h"
+#include <string.h> /* for memcpy */
#if defined( __i386__ ) || defined(i386) || defined(_M_IX86)
/*
@@ -80,38 +81,7 @@
information about your platform (OS, CPU and compiler)
#endif
-/*
- This "white-lists" the architectures that we know don't require
- pointer alignment. We white-list, since the memcpy version will
- work everywhere, whereas assignment will only work where pointer
- dereferencing doesn't require alignment.
-
- TODO: There may be more architectures we can white list.
-*/
-#if defined(NPY_CPU_X86) || defined(NPY_CPU_AMD64)
- #define NPY_COPY_PYOBJECT_PTR(dst, src) (*((PyObject **)(dst)) = *((PyObject **)(src)))
-#else
- #if NPY_SIZEOF_PY_INTPTR_T == 4
- #define NPY_COPY_PYOBJECT_PTR(dst, src) \
- ((char*)(dst))[0] = ((char*)(src))[0]; \
- ((char*)(dst))[1] = ((char*)(src))[1]; \
- ((char*)(dst))[2] = ((char*)(src))[2]; \
- ((char*)(dst))[3] = ((char*)(src))[3];
- #elif NPY_SIZEOF_PY_INTPTR_T == 8
- #define NPY_COPY_PYOBJECT_PTR(dst, src) \
- ((char*)(dst))[0] = ((char*)(src))[0]; \
- ((char*)(dst))[1] = ((char*)(src))[1]; \
- ((char*)(dst))[2] = ((char*)(src))[2]; \
- ((char*)(dst))[3] = ((char*)(src))[3]; \
- ((char*)(dst))[4] = ((char*)(src))[4]; \
- ((char*)(dst))[5] = ((char*)(src))[5]; \
- ((char*)(dst))[6] = ((char*)(src))[6]; \
- ((char*)(dst))[7] = ((char*)(src))[7];
- #else
- #error Unknown architecture, please report this to numpy maintainers with \
- information about your platform (OS, CPU and compiler)
- #endif
-#endif
+#define NPY_COPY_PYOBJECT_PTR(dst, src) memcpy(dst, src, sizeof(PyObject *))
#if (defined(NPY_CPU_X86) || defined(NPY_CPU_AMD64))
#define NPY_CPU_HAVE_UNALIGNED_ACCESS 1
diff --git a/numpy/core/memmap.py b/numpy/core/memmap.py
index b1c96ee29..4b10f361c 100644
--- a/numpy/core/memmap.py
+++ b/numpy/core/memmap.py
@@ -111,6 +111,11 @@ class memmap(ndarray):
certain size depending on the platform. This size is always < 2GB
even on 64-bit systems.
+ When a memmap causes a file to be created or extended beyond its
+ current size in the filesystem, the contents of the new part are
+ unspecified. On systems with POSIX filesystem semantics, the extended
+ part will be filled with zero bytes.
+
Examples
--------
>>> data = np.arange(12, dtype='float32')
diff --git a/numpy/core/numerictypes.py b/numpy/core/numerictypes.py
index 1545bc734..0c03cce89 100644
--- a/numpy/core/numerictypes.py
+++ b/numpy/core/numerictypes.py
@@ -670,7 +670,7 @@ def issubclass_(arg1, arg2):
Determine if a class is a subclass of a second class.
`issubclass_` is equivalent to the Python built-in ``issubclass``,
- except that it returns False instead of raising a TypeError is one
+ except that it returns False instead of raising a TypeError if one
of the arguments is not a class.
Parameters
diff --git a/numpy/core/records.py b/numpy/core/records.py
index d0f82a25c..bf4d835ea 100644
--- a/numpy/core/records.py
+++ b/numpy/core/records.py
@@ -71,7 +71,6 @@ _byteorderconv = {'b':'>',
# are equally allowed
numfmt = nt.typeDict
-_typestr = nt._typestr
def find_duplicate(list):
"""Find duplication in a list, return a list of duplicated elements"""
@@ -268,7 +267,7 @@ class record(nt.void):
"""Pretty-print all fields."""
# pretty-print all fields
names = self.dtype.names
- maxlen = max([len(name) for name in names])
+ maxlen = max(len(name) for name in names)
rows = []
fmt = '%% %ds: %%s' % maxlen
for name in names:
@@ -527,15 +526,12 @@ def fromarrays(arrayList, dtype=None, shape=None, formats=None,
if formats is None and dtype is None:
# go through each object in the list to see if it is an ndarray
# and determine the formats.
- formats = ''
+ formats = []
for obj in arrayList:
if not isinstance(obj, ndarray):
raise ValueError("item in the array list must be an ndarray.")
- formats += _typestr[obj.dtype.type]
- if issubclass(obj.dtype.type, nt.flexible):
- formats += repr(obj.itemsize)
- formats += ','
- formats = formats[:-1]
+ formats.append(obj.dtype.str)
+ formats = ','.join(formats)
if dtype is not None:
descr = sb.dtype(dtype)
diff --git a/numpy/core/setup.py b/numpy/core/setup.py
index 9cb9d7361..a51eb690b 100644
--- a/numpy/core/setup.py
+++ b/numpy/core/setup.py
@@ -78,27 +78,13 @@ def is_npy_no_signal():
def is_npy_no_smp():
"""Return True if the NPY_NO_SMP symbol must be defined in public
header (when SMP support cannot be reliably enabled)."""
- # Python 2.3 causes a segfault when
- # trying to re-acquire the thread-state
- # which is done in error-handling
- # ufunc code. NPY_ALLOW_C_API and friends
- # cause the segfault. So, we disable threading
- # for now.
- if sys.version[:5] < '2.4.2':
- nosmp = 1
- else:
- # Perhaps a fancier check is in order here.
- # so that threads are only enabled if there
- # are actually multiple CPUS? -- but
- # threaded code can be nice even on a single
- # CPU so that long-calculating code doesn't
- # block.
- try:
- nosmp = os.environ['NPY_NOSMP']
- nosmp = 1
- except KeyError:
- nosmp = 0
- return nosmp == 1
+ # Perhaps a fancier check is in order here.
+ # so that threads are only enabled if there
+ # are actually multiple CPUS? -- but
+ # threaded code can be nice even on a single
+ # CPU so that long-calculating code doesn't
+ # block.
+ return 'NPY_NOSMP' in os.environ
def win32_checks(deflist):
from numpy.distutils.misc_util import get_build_architecture
@@ -279,11 +265,11 @@ def check_types(config_cmd, ext, build_dir):
expected['long'] = [8, 4]
expected['float'] = [4]
expected['double'] = [8]
- expected['long double'] = [8, 12, 16]
- expected['Py_intptr_t'] = [4, 8]
+ expected['long double'] = [16, 12, 8]
+ expected['Py_intptr_t'] = [8, 4]
expected['PY_LONG_LONG'] = [8]
expected['long long'] = [8]
- expected['off_t'] = [4, 8]
+ expected['off_t'] = [8, 4]
# Check we have the python header (-dev* packages on Linux)
result = config_cmd.check_header('Python.h')
@@ -323,7 +309,8 @@ def check_types(config_cmd, ext, build_dir):
# definition is binary compatible with C99 complex type (check done at
# build time in npy_common.h)
complex_def = "struct {%s __x; %s __y;}" % (type, type)
- res = config_cmd.check_type_size(complex_def, expected=2*expected[type])
+ res = config_cmd.check_type_size(complex_def,
+ expected=[2 * x for x in expected[type]])
if res >= 0:
public_defines.append(('NPY_SIZEOF_COMPLEX_%s' % sym2def(type), '%d' % res))
else:
diff --git a/numpy/core/setup_common.py b/numpy/core/setup_common.py
index 3dc1cecf5..e51797c03 100644
--- a/numpy/core/setup_common.py
+++ b/numpy/core/setup_common.py
@@ -172,10 +172,20 @@ def check_long_double_representation(cmd):
body = LONG_DOUBLE_REPRESENTATION_SRC % {'type': 'long double'}
# We need to use _compile because we need the object filename
- src, object = cmd._compile(body, None, None, 'c')
+ src, obj = cmd._compile(body, None, None, 'c')
try:
- type = long_double_representation(pyod(object))
- return type
+ ltype = long_double_representation(pyod(obj))
+ return ltype
+ except ValueError:
+ # try linking to support CC="gcc -flto" or icc -ipo
+ # struct needs to be volatile so it isn't optimized away
+ body = body.replace('struct', 'volatile struct')
+ body += "int main(void) { return 0; }\n"
+ src, obj = cmd._compile(body, None, None, 'c')
+ cmd.temp_files.append("_configtest")
+ cmd.compiler.link_executable([obj], "_configtest")
+ ltype = long_double_representation(pyod("_configtest"))
+ return ltype
finally:
cmd._clean()
diff --git a/numpy/core/src/multiarray/arraytypes.c.src b/numpy/core/src/multiarray/arraytypes.c.src
index 1b9bc9a8c..89b5404b4 100644
--- a/numpy/core/src/multiarray/arraytypes.c.src
+++ b/numpy/core/src/multiarray/arraytypes.c.src
@@ -4259,8 +4259,12 @@ datetime_dtype_metadata_clone(NpyAuxData *data)
/*
* Initializes the c_metadata field for the _builtin_descrs DATETIME
* and TIMEDELTA.
+ *
+ * must not be static, gcc 4.1.2 on redhat 5 then miscompiles this function
+ * see gh-5163
+ *
*/
-static int
+NPY_NO_EXPORT int
initialize_builtin_datetime_metadata(void)
{
PyArray_DatetimeDTypeMetaData *data1, *data2;
diff --git a/numpy/core/src/multiarray/buffer.c b/numpy/core/src/multiarray/buffer.c
index 6ba98d1cd..7f7607e1f 100644
--- a/numpy/core/src/multiarray/buffer.c
+++ b/numpy/core/src/multiarray/buffer.c
@@ -629,6 +629,8 @@ array_getbuffer(PyObject *obj, Py_buffer *view, int flags)
{
PyArrayObject *self;
_buffer_info_t *info = NULL;
+ int i;
+ Py_ssize_t sd;
self = (PyArrayObject*)obj;
@@ -703,6 +705,31 @@ array_getbuffer(PyObject *obj, Py_buffer *view, int flags)
}
if ((flags & PyBUF_STRIDES) == PyBUF_STRIDES) {
view->strides = info->strides;
+
+#ifdef NPY_RELAXED_STRIDES_CHECKING
+ /*
+ * If NPY_RELAXED_STRIDES_CHECKING is on, the array may be
+ * contiguous, but it won't look that way to Python when it
+ * tries to determine contiguity by looking at the strides
+ * (since one of the elements may be -1). In that case, just
+ * regenerate strides from shape.
+ */
+ if (PyArray_CHKFLAGS(self, NPY_ARRAY_C_CONTIGUOUS) &&
+ !((flags & PyBUF_F_CONTIGUOUS) == PyBUF_F_CONTIGUOUS)) {
+ sd = view->itemsize;
+ for (i = view->ndim-1; i >= 0; --i) {
+ view->strides[i] = sd;
+ sd *= view->shape[i];
+ }
+ }
+ else if (PyArray_CHKFLAGS(self, NPY_ARRAY_F_CONTIGUOUS)) {
+ sd = view->itemsize;
+ for (i = 0; i < view->ndim; ++i) {
+ view->strides[i] = sd;
+ sd *= view->shape[i];
+ }
+ }
+#endif
}
else {
view->strides = NULL;
@@ -922,7 +949,7 @@ _descriptor_from_pep3118_format_fast(char *s, PyObject **result)
*result = (PyObject*)PyArray_DescrNewByteorder(descr, byte_order);
Py_DECREF(descr);
}
-
+
return 1;
}
diff --git a/numpy/core/src/multiarray/datetime.c b/numpy/core/src/multiarray/datetime.c
index a5be9f670..a870650fc 100644
--- a/numpy/core/src/multiarray/datetime.c
+++ b/numpy/core/src/multiarray/datetime.c
@@ -3047,7 +3047,7 @@ cast_timedelta_to_timedelta(PyArray_DatetimeMetaData *src_meta,
* Returns true if the object is something that is best considered
* a Datetime, false otherwise.
*/
-static npy_bool
+static NPY_GCC_NONNULL(1) npy_bool
is_any_numpy_datetime(PyObject *obj)
{
return (PyArray_IsScalar(obj, Datetime) ||
@@ -3296,7 +3296,8 @@ datetime_arange(PyObject *start, PyObject *stop, PyObject *step,
}
}
else {
- if (is_any_numpy_datetime(start) || is_any_numpy_datetime(stop)) {
+ if ((start && is_any_numpy_datetime(start)) ||
+ is_any_numpy_datetime(stop)) {
type_nums[0] = NPY_DATETIME;
}
else {
diff --git a/numpy/core/src/multiarray/item_selection.c b/numpy/core/src/multiarray/item_selection.c
index b2bf17f4c..cd0ae1680 100644
--- a/numpy/core/src/multiarray/item_selection.c
+++ b/numpy/core/src/multiarray/item_selection.c
@@ -265,6 +265,11 @@ PyArray_PutTo(PyArrayObject *self, PyObject* values0, PyObject *indices0,
"put: first argument must be an array");
return NULL;
}
+
+ if (PyArray_FailUnlessWriteable(self, "put: output array") < 0) {
+ return NULL;
+ }
+
if (!PyArray_ISCONTIGUOUS(self)) {
PyArrayObject *obj;
int flags = NPY_ARRAY_CARRAY | NPY_ARRAY_UPDATEIFCOPY;
diff --git a/numpy/core/src/multiarray/multiarray_tests.c.src b/numpy/core/src/multiarray/multiarray_tests.c.src
index a22319cfe..77e699562 100644
--- a/numpy/core/src/multiarray/multiarray_tests.c.src
+++ b/numpy/core/src/multiarray/multiarray_tests.c.src
@@ -2,6 +2,22 @@
#include <Python.h>
#include "numpy/arrayobject.h"
+/* test PyArray_IsPythonScalar, before including private py3 compat header */
+static PyObject *
+IsPythonScalar(PyObject * dummy, PyObject *args)
+{
+ PyObject *arg = NULL;
+ if (!PyArg_ParseTuple(args, "O", &arg)) {
+ return NULL;
+ }
+ if (PyArray_IsPythonScalar(arg)) {
+ Py_RETURN_TRUE;
+ }
+ else {
+ Py_RETURN_FALSE;
+ }
+}
+
#include "npy_pycompat.h"
/*
@@ -860,6 +876,9 @@ test_nditer_too_large(PyObject *NPY_UNUSED(self), PyObject *args) {
static PyMethodDef Multiarray_TestsMethods[] = {
+ {"IsPythonScalar",
+ IsPythonScalar,
+ METH_VARARGS, NULL},
{"test_neighborhood_iterator",
test_neighborhood_iterator,
METH_VARARGS, NULL},
diff --git a/numpy/core/src/multiarray/multiarraymodule.c b/numpy/core/src/multiarray/multiarraymodule.c
index 51c594781..1bf0613e4 100644
--- a/numpy/core/src/multiarray/multiarraymodule.c
+++ b/numpy/core/src/multiarray/multiarraymodule.c
@@ -253,7 +253,7 @@ PyArray_As1D(PyObject **op, char **ptr, int *d1, int typecode)
{
npy_intp newd1;
PyArray_Descr *descr;
- char msg[] = "PyArray_As1D: use PyArray_AsCArray.";
+ static const char msg[] = "PyArray_As1D: use PyArray_AsCArray.";
if (DEPRECATE(msg) < 0) {
return -1;
@@ -274,7 +274,7 @@ PyArray_As2D(PyObject **op, char ***ptr, int *d1, int *d2, int typecode)
{
npy_intp newdims[2];
PyArray_Descr *descr;
- char msg[] = "PyArray_As1D: use PyArray_AsCArray.";
+ static const char msg[] = "PyArray_As1D: use PyArray_AsCArray.";
if (DEPRECATE(msg) < 0) {
return -1;
@@ -346,8 +346,9 @@ PyArray_ConcatenateArrays(int narrays, PyArrayObject **arrays, int axis)
}
if (ndim == 1 && axis != 0) {
- char msg[] = "axis != 0 for ndim == 1; this will raise an error in "
- "future versions of numpy";
+ static const char msg[] = "axis != 0 for ndim == 1; "
+ "this will raise an error in "
+ "future versions of numpy";
if (DEPRECATE(msg) < 0) {
return NULL;
}
diff --git a/numpy/core/src/npymath/npy_math.c.src b/numpy/core/src/npymath/npy_math.c.src
index ba5c58fb4..b7f28bb39 100644
--- a/numpy/core/src/npymath/npy_math.c.src
+++ b/numpy/core/src/npymath/npy_math.c.src
@@ -491,7 +491,11 @@ double npy_log2(double x)
#ifndef HAVE_CBRT@C@
@type@ npy_cbrt@c@(@type@ x)
{
- if (x < 0) {
+ /* don't set invalid flag */
+ if (npy_isnan(x)) {
+ return NPY_NAN;
+ }
+ else if (x < 0) {
return -npy_pow@c@(-x, 1. / 3.);
}
else {
diff --git a/numpy/core/src/npymath/npy_math_complex.c.src b/numpy/core/src/npymath/npy_math_complex.c.src
index 920f107b8..9cbfd32ae 100644
--- a/numpy/core/src/npymath/npy_math_complex.c.src
+++ b/numpy/core/src/npymath/npy_math_complex.c.src
@@ -247,7 +247,8 @@
#ifdef HAVE_@KIND@@C@
@type@ npy_@kind@@c@(@ctype@ z)
{
- __@ctype@_to_c99_cast z1 = {z};
+ __@ctype@_to_c99_cast z1;
+ z1.npy_z = z;
return @kind@@c@(z1.c99_z);
}
#endif
@@ -260,8 +261,9 @@
#ifdef HAVE_@KIND@@C@
@ctype@ npy_@kind@@c@(@ctype@ z)
{
- __@ctype@_to_c99_cast z1 = {z};
+ __@ctype@_to_c99_cast z1;
__@ctype@_to_c99_cast ret;
+ z1.npy_z = z;
ret.c99_z = @kind@@c@(z1.c99_z);
return ret.npy_z;
}
@@ -275,9 +277,11 @@
#ifdef HAVE_@KIND@@C@
@ctype@ npy_@kind@@c@(@ctype@ x, @ctype@ y)
{
- __@ctype@_to_c99_cast xcast = {x};
- __@ctype@_to_c99_cast ycast = {y};
+ __@ctype@_to_c99_cast xcast;
+ __@ctype@_to_c99_cast ycast;
__@ctype@_to_c99_cast ret;
+ xcast.npy_z = x;
+ ycast.npy_z = y;
ret.c99_z = @kind@@c@(xcast.c99_z, ycast.c99_z);
return ret.npy_z;
}
diff --git a/numpy/core/src/npymath/npy_math_private.h b/numpy/core/src/npymath/npy_math_private.h
index b3b1690be..284d203bf 100644
--- a/numpy/core/src/npymath/npy_math_private.h
+++ b/numpy/core/src/npymath/npy_math_private.h
@@ -485,6 +485,24 @@ do { \
* support is available
*/
#ifdef NPY_USE_C99_COMPLEX
+
+/* Microsoft C defines _MSC_VER */
+#ifdef _MSC_VER
+typedef union {
+ npy_cdouble npy_z;
+ _Dcomplex c99_z;
+} __npy_cdouble_to_c99_cast;
+
+typedef union {
+ npy_cfloat npy_z;
+ _Fcomplex c99_z;
+} __npy_cfloat_to_c99_cast;
+
+typedef union {
+ npy_clongdouble npy_z;
+ _Lcomplex c99_z;
+} __npy_clongdouble_to_c99_cast;
+#else /* !_MSC_VER */
typedef union {
npy_cdouble npy_z;
complex double c99_z;
@@ -499,7 +517,9 @@ typedef union {
npy_clongdouble npy_z;
complex long double c99_z;
} __npy_clongdouble_to_c99_cast;
-#else
+#endif /* !_MSC_VER */
+
+#else /* !NPY_USE_C99_COMPLEX */
typedef union {
npy_cdouble npy_z;
npy_cdouble c99_z;
@@ -514,6 +534,6 @@ typedef union {
npy_clongdouble npy_z;
npy_clongdouble c99_z;
} __npy_clongdouble_to_c99_cast;
-#endif
+#endif /* !NPY_USE_C99_COMPLEX */
#endif /* !_NPY_MATH_PRIVATE_H_ */
diff --git a/numpy/core/src/private/npy_config.h b/numpy/core/src/private/npy_config.h
index 71d448ee9..882913e2f 100644
--- a/numpy/core/src/private/npy_config.h
+++ b/numpy/core/src/private/npy_config.h
@@ -21,16 +21,6 @@
*/
#define NPY_MAX_COPY_ALIGNMENT 16
-/* Safe to use ldexp and frexp for long double for MSVC builds */
-#if (NPY_SIZEOF_LONGDOUBLE == NPY_SIZEOF_DOUBLE) || defined(_MSC_VER)
- #ifdef HAVE_LDEXP
- #define HAVE_LDEXPL 1
- #endif
- #ifdef HAVE_FREXP
- #define HAVE_FREXPL 1
- #endif
-#endif
-
/* Disable broken Sun Workshop Pro math functions */
#ifdef __SUNPRO_C
#undef HAVE_ATAN2
diff --git a/numpy/core/src/umath/loops.c.src b/numpy/core/src/umath/loops.c.src
index 14076f3fa..a69fc8147 100644
--- a/numpy/core/src/umath/loops.c.src
+++ b/numpy/core/src/umath/loops.c.src
@@ -47,20 +47,23 @@
*****************************************************************************
*/
+/* unary loop input and output contiguous */
+#define IS_UNARY_CONT(tin, tout) (steps[0] == sizeof(tin) && \
+ steps[1] == sizeof(tout))
#define IS_BINARY_REDUCE ((args[0] == args[2])\
&& (steps[0] == steps[2])\
&& (steps[0] == 0))
-/* binary loop input and output continous */
+/* binary loop input and output contiguous */
#define IS_BINARY_CONT(tin, tout) (steps[0] == sizeof(tin) && \
steps[1] == sizeof(tin) && \
steps[2] == sizeof(tout))
-/* binary loop input and output continous with first scalar */
+/* binary loop input and output contiguous with first scalar */
#define IS_BINARY_CONT_S1(tin, tout) (steps[0] == 0 && \
steps[1] == sizeof(tin) && \
steps[2] == sizeof(tout))
-/* binary loop input and output continous with second scalar */
+/* binary loop input and output contiguous with second scalar */
#define IS_BINARY_CONT_S2(tin, tout) (steps[0] == sizeof(tin) && \
steps[1] == 0 && \
steps[2] == sizeof(tout))
@@ -79,6 +82,33 @@
npy_intp i;\
for(i = 0; i < n; i++, ip1 += is1, op1 += os1)
+/*
+ * loop with contiguous specialization
+ * op should be the code working on `tin in` and
+ * storing the result in `tout * out`
+ * combine with NPY_GCC_OPT_3 to allow autovectorization
+ * should only be used where its worthwhile to avoid code bloat
+ */
+#define UNARY_LOOP_FAST(tin, tout, op) \
+ do { \
+ /* condition allows compiler to optimize the generic macro */ \
+ if (IS_UNARY_CONT(tin, tout)) { \
+ UNARY_LOOP { \
+ const tin in = *(tin *)ip1; \
+ tout * out = (tout *)op1; \
+ op; \
+ } \
+ } \
+ else { \
+ UNARY_LOOP { \
+ const tin in = *(tin *)ip1; \
+ tout * out = (tout *)op1; \
+ op; \
+ } \
+ } \
+ } \
+ while (0)
+
#define UNARY_LOOP_TWO_OUT\
char *ip1 = args[0], *op1 = args[1], *op2 = args[2];\
npy_intp is1 = steps[0], os1 = steps[1], os2 = steps[2];\
@@ -93,6 +123,51 @@
npy_intp i;\
for(i = 0; i < n; i++, ip1 += is1, ip2 += is2, op1 += os1)
+/*
+ * loop with contiguous specialization
+ * op should be the code working on `tin in1`, `tin in2` and
+ * storing the result in `tout * out`
+ * combine with NPY_GCC_OPT_3 to allow autovectorization
+ * should only be used where its worthwhile to avoid code bloat
+ */
+#define BINARY_LOOP_FAST(tin, tout, op) \
+ do { \
+ /* condition allows compiler to optimize the generic macro */ \
+ if (IS_BINARY_CONT(tin, tout)) { \
+ BINARY_LOOP { \
+ const tin in1 = *(tin *)ip1; \
+ const tin in2 = *(tin *)ip2; \
+ tout * out = (tout *)op1; \
+ op; \
+ } \
+ } \
+ else if (IS_BINARY_CONT_S1(tin, tout)) { \
+ const tin in1 = *(tin *)args[0]; \
+ BINARY_LOOP { \
+ const tin in2 = *(tin *)ip2; \
+ tout * out = (tout *)op1; \
+ op; \
+ } \
+ } \
+ else if (IS_BINARY_CONT_S2(tin, tout)) { \
+ const tin in2 = *(tin *)args[1]; \
+ BINARY_LOOP { \
+ const tin in1 = *(tin *)ip1; \
+ tout * out = (tout *)op1; \
+ op; \
+ } \
+ } \
+ else { \
+ BINARY_LOOP { \
+ const tin in1 = *(tin *)ip1; \
+ const tin in2 = *(tin *)ip2; \
+ tout * out = (tout *)op1; \
+ op; \
+ } \
+ } \
+ } \
+ while (0)
+
#define BINARY_REDUCE_LOOP_INNER\
char *ip2 = args[1]; \
npy_intp is2 = steps[1]; \
@@ -703,58 +778,40 @@ NPY_NO_EXPORT void
}
}
-NPY_NO_EXPORT void
+NPY_NO_EXPORT NPY_GCC_OPT_3 void
@TYPE@_square(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(data))
{
- UNARY_LOOP {
- const @type@ in1 = *(@type@ *)ip1;
- *((@type@ *)op1) = in1*in1;
- }
+ UNARY_LOOP_FAST(@type@, @type@, *out = in * in);
}
NPY_NO_EXPORT void
@TYPE@_reciprocal(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(data))
{
- UNARY_LOOP {
- const @type@ in1 = *(@type@ *)ip1;
- *((@type@ *)op1) = (@type@)(1.0/in1);
- }
+ UNARY_LOOP_FAST(@type@, @type@, *out = 1.0 / in);
}
NPY_NO_EXPORT void
@TYPE@_conjugate(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
{
- UNARY_LOOP {
- const @type@ in1 = *(@type@ *)ip1;
- *((@type@ *)op1) = in1;
- }
+ UNARY_LOOP_FAST(@type@, @type@, *out = in);
}
-NPY_NO_EXPORT void
+NPY_NO_EXPORT NPY_GCC_OPT_3 void
@TYPE@_negative(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
{
- UNARY_LOOP {
- const @type@ in1 = *(@type@ *)ip1;
- *((@type@ *)op1) = (@type@)(-(@type@)in1);
- }
+ UNARY_LOOP_FAST(@type@, @type@, *out = -in);
}
-NPY_NO_EXPORT void
+NPY_NO_EXPORT NPY_GCC_OPT_3 void
@TYPE@_logical_not(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
{
- UNARY_LOOP {
- const @type@ in1 = *(@type@ *)ip1;
- *((npy_bool *)op1) = !in1;
- }
+ UNARY_LOOP_FAST(@type@, npy_bool, *out = !in);
}
-NPY_NO_EXPORT void
+NPY_NO_EXPORT NPY_GCC_OPT_3 void
@TYPE@_invert(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
{
- UNARY_LOOP {
- const @type@ in1 = *(@type@ *)ip1;
- *((@type@ *)op1) = ~in1;
- }
+ UNARY_LOOP_FAST(@type@, @type@, *out = ~in);
}
/**begin repeat1
@@ -764,7 +821,7 @@ NPY_NO_EXPORT void
* #OP = +, -,*, &, |, ^, <<, >>#
*/
-NPY_NO_EXPORT void
+NPY_NO_EXPORT NPY_GCC_OPT_3 void
@TYPE@_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
{
if(IS_BINARY_REDUCE) {
@@ -774,11 +831,7 @@ NPY_NO_EXPORT void
*((@type@ *)iop1) = io1;
}
else {
- BINARY_LOOP {
- const @type@ in1 = *(@type@ *)ip1;
- const @type@ in2 = *(@type@ *)ip2;
- *((@type@ *)op1) = in1 @OP@ in2;
- }
+ BINARY_LOOP_FAST(@type@, @type@, *out = in1 @OP@ in2);
}
}
@@ -797,39 +850,7 @@ NPY_NO_EXPORT NPY_GCC_OPT_3 void
* gcc vectorization of this is not good (PR60575) but manual integer
* vectorization is too tedious to be worthwhile
*/
- if (IS_BINARY_CONT(@type@, npy_bool)) {
- npy_intp i, n = dimensions[0];
- @type@ * a = (@type@ *)args[0], * b = (@type@ *)args[1];
- npy_bool * o = (npy_bool *)args[2];
- for (i = 0; i < n; i++) {
- o[i] = a[i] @OP@ b[i];
- }
- }
- else if (IS_BINARY_CONT_S1(@type@, npy_bool)) {
- npy_intp i, n = dimensions[0];
- @type@ a = *(@type@ *)args[0];
- @type@ * b = (@type@ *)args[1];
- npy_bool * o = (npy_bool *)args[2];
- for (i = 0; i < n; i++) {
- o[i] = a @OP@ b[i];
- }
- }
- else if (IS_BINARY_CONT_S2(@type@, npy_bool)) {
- npy_intp i, n = dimensions[0];
- @type@ * a = (@type@ *)args[0];
- @type@ b = *(@type@*)args[1];
- npy_bool * o = (npy_bool *)args[2];
- for (i = 0; i < n; i++) {
- o[i] = a[i] @OP@ b;
- }
- }
- else {
- BINARY_LOOP {
- const @type@ in1 = *(@type@ *)ip1;
- const @type@ in2 = *(@type@ *)ip2;
- *((npy_bool *)op1) = in1 @OP@ in2;
- }
- }
+ BINARY_LOOP_FAST(@type@, npy_bool, *out = in1 @OP@ in2);
}
/**end repeat1**/
@@ -914,22 +935,16 @@ NPY_NO_EXPORT void
* #type = npy_byte, npy_short, npy_int, npy_long, npy_longlong#
*/
-NPY_NO_EXPORT void
+NPY_NO_EXPORT NPY_GCC_OPT_3 void
@TYPE@_absolute(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
{
- UNARY_LOOP {
- const @type@ in1 = *(@type@ *)ip1;
- *((@type@ *)op1) = (in1 >= 0) ? in1 : -in1;
- }
+ UNARY_LOOP_FAST(@type@, @type@, *out = (in >= 0) ? in : -in);
}
-NPY_NO_EXPORT void
+NPY_NO_EXPORT NPY_GCC_OPT_3 void
@TYPE@_sign(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
{
- UNARY_LOOP {
- const @type@ in1 = *(@type@ *)ip1;
- *((@type@ *)op1) = in1 > 0 ? 1 : (in1 < 0 ? -1 : 0);
- }
+ UNARY_LOOP_FAST(@type@, @type@, *out = in > 0 ? 1 : (in < 0 ? -1 : 0));
}
NPY_NO_EXPORT void
@@ -997,13 +1012,10 @@ NPY_NO_EXPORT void
}
}
-NPY_NO_EXPORT void
+NPY_NO_EXPORT NPY_GCC_OPT_3 void
@TYPE@_sign(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
{
- UNARY_LOOP {
- const @type@ in1 = *(@type@ *)ip1;
- *((@type@ *)op1) = in1 > 0 ? 1 : 0;
- }
+ UNARY_LOOP_FAST(@type@, @type@, *out = in > 0 ? 1 : 0);
}
NPY_NO_EXPORT void
diff --git a/numpy/core/src/umath/ufunc_object.c b/numpy/core/src/umath/ufunc_object.c
index b12fbf57f..dc5065f14 100644
--- a/numpy/core/src/umath/ufunc_object.c
+++ b/numpy/core/src/umath/ufunc_object.c
@@ -1901,60 +1901,67 @@ PyUFunc_GeneralizedFunction(PyUFuncObject *ufunc,
}
/*
- * Validate the core dimensions of all the operands,
- * and collect all of the labeled core dimension sizes
- * into the array 'core_dim_sizes'. Initialize them to
- * 1, for example in the case where the operand broadcasts
- * to a core dimension, it won't be visited.
+ * Validate the core dimensions of all the operands, and collect all of
+ * the labelled core dimensions into 'core_dim_sizes'.
+ *
+ * The behavior has been changed in Numpy 1.10.0, and the following
+ * requirements must be fulfilled or an error will be raised:
+ * * Arguments, both input and output, must have at least as many
+ * dimensions as the corresponding number of core dimensions. In
+ * previous versions, 1's were prepended to the shape as needed.
+ * * Core dimensions with same labels must have exactly matching sizes.
+ * In previous versions, core dimensions of size 1 would broadcast
+ * against other core dimensions with the same label.
+ * * All core dimensions must have their size specified by a passed in
+ * input or output argument. In previous versions, core dimensions in
+ * an output argument that were not specified in an input argument,
+ * and whose size could not be inferred from a passed in output
+ * argument, would have their size set to 1.
*/
for (i = 0; i < ufunc->core_num_dim_ix; ++i) {
- core_dim_sizes[i] = 1;
+ core_dim_sizes[i] = -1;
}
for (i = 0; i < nop; ++i) {
if (op[i] != NULL) {
int dim_offset = ufunc->core_offsets[i];
int num_dims = ufunc->core_num_dims[i];
int core_start_dim = PyArray_NDIM(op[i]) - num_dims;
- /* Make sure any output operand has enough dimensions */
- if (i >= nin && core_start_dim < 0) {
+
+ /* Check if operands have enough dimensions */
+ if (core_start_dim < 0) {
PyErr_Format(PyExc_ValueError,
- "%s: Output operand %d does not have enough dimensions "
- "(has %d, gufunc core with signature %s "
- "requires %d)",
- ufunc_name, i - nin, PyArray_NDIM(op[i]),
+ "%s: %s operand %d does not have enough "
+ "dimensions (has %d, gufunc core with "
+ "signature %s requires %d)",
+ ufunc_name, i < nin ? "Input" : "Output",
+ i < nin ? i : i - nin, PyArray_NDIM(op[i]),
ufunc->core_signature, num_dims);
retval = -1;
goto fail;
}
/*
- * Make sure each core dimension matches all other core
- * dimensions with the same label
- *
- * NOTE: For input operands, core_start_dim may be negative.
- * In that case, the operand is being broadcast onto
- * core dimensions. For example, a scalar will broadcast
- * to fit any core signature.
+ * Make sure every core dimension exactly matches all other core
+ * dimensions with the same label.
*/
- if (core_start_dim >= 0) {
- idim = 0;
- } else {
- idim = -core_start_dim;
- }
- for (; idim < num_dims; ++idim) {
- int core_dim_index = ufunc->core_dim_ixs[dim_offset + idim];
+ for (idim = 0; idim < num_dims; ++idim) {
+ int core_dim_index = ufunc->core_dim_ixs[dim_offset+idim];
npy_intp op_dim_size =
- PyArray_SHAPE(op[i])[core_start_dim + idim];
- if (core_dim_sizes[core_dim_index] == 1) {
+ PyArray_DIM(op[i], core_start_dim+idim);
+
+ if (core_dim_sizes[core_dim_index] == -1) {
core_dim_sizes[core_dim_index] = op_dim_size;
- } else if ((i >= nin || op_dim_size != 1) &&
- core_dim_sizes[core_dim_index] != op_dim_size) {
+ }
+ else if (op_dim_size != core_dim_sizes[core_dim_index]) {
PyErr_Format(PyExc_ValueError,
- "%s: Operand %d has a mismatch in its core "
- "dimension %d, with gufunc signature %s "
- "(size %zd is different from %zd)",
- ufunc_name, i, idim, ufunc->core_signature,
- op_dim_size, core_dim_sizes[core_dim_index]);
+ "%s: %s operand %d has a mismatch in its "
+ "core dimension %d, with gufunc "
+ "signature %s (size %zd is different "
+ "from %zd)",
+ ufunc_name, i < nin ? "Input" : "Output",
+ i < nin ? i : i - nin, idim,
+ ufunc->core_signature, op_dim_size,
+ core_dim_sizes[core_dim_index]);
retval = -1;
goto fail;
}
@@ -1962,6 +1969,44 @@ PyUFunc_GeneralizedFunction(PyUFuncObject *ufunc,
}
}
+ /*
+ * Make sure no core dimension is unspecified.
+ */
+ for (i = 0; i < ufunc->core_num_dim_ix; ++i) {
+ if (core_dim_sizes[i] == -1) {
+ break;
+ }
+ }
+ if (i != ufunc->core_num_dim_ix) {
+ /*
+ * There is at least one core dimension missing, find in which
+ * operand it comes up first (it has to be an output operand).
+ */
+ const int missing_core_dim = i;
+ int out_op;
+ for (out_op = nin; out_op < nop; ++out_op) {
+ int first_idx = ufunc->core_offsets[out_op];
+ int last_idx = first_idx + ufunc->core_num_dims[out_op];
+ for (i = first_idx; i < last_idx; ++i) {
+ if (ufunc->core_dim_ixs[i] == missing_core_dim) {
+ break;
+ }
+ }
+ if (i < last_idx) {
+ /* Change index offsets for error message */
+ out_op -= nin;
+ i -= first_idx;
+ break;
+ }
+ }
+ PyErr_Format(PyExc_ValueError,
+ "%s: Output operand %d has core dimension %d "
+ "unspecified, with gufunc signature %s",
+ ufunc_name, out_op, i, ufunc->core_signature);
+ retval = -1;
+ goto fail;
+ }
+
/* Fill in the initial part of 'iter_shape' */
for (idim = 0; idim < broadcast_ndim; ++idim) {
iter_shape[idim] = -1;
@@ -3926,18 +3971,19 @@ _find_array_wrap(PyObject *args, PyObject *kwds,
PyObject *with_wrap[NPY_MAXARGS], *wraps[NPY_MAXARGS];
PyObject *obj, *wrap = NULL;
- /* If a 'subok' parameter is passed and isn't True, don't wrap */
+ /*
+ * If a 'subok' parameter is passed and isn't True, don't wrap but put None
+ * into slots with out arguments which means return the out argument
+ */
if (kwds != NULL && (obj = PyDict_GetItem(kwds,
npy_um_str_subok)) != NULL) {
if (obj != Py_True) {
- for (i = 0; i < nout; i++) {
- output_wrap[i] = NULL;
- }
- return;
+ /* skip search for wrap members */
+ goto handle_out;
}
}
- nargs = PyTuple_GET_SIZE(args);
+
for (i = 0; i < nin; i++) {
obj = PyTuple_GET_ITEM(args, i);
if (PyArray_CheckExact(obj) || PyArray_IsAnyScalar(obj)) {
@@ -3995,6 +4041,8 @@ _find_array_wrap(PyObject *args, PyObject *kwds,
* exact ndarray so that no PyArray_Return is
* done in that case.
*/
+handle_out:
+ nargs = PyTuple_GET_SIZE(args);
for (i = 0; i < nout; i++) {
int j = nin + i;
int incref = 1;
diff --git a/numpy/core/src/umath/umath_tests.c.src b/numpy/core/src/umath/umath_tests.c.src
index 46d06288d..33d9846bd 100644
--- a/numpy/core/src/umath/umath_tests.c.src
+++ b/numpy/core/src/umath/umath_tests.c.src
@@ -38,6 +38,9 @@
INIT_OUTER_LOOP_3 \
npy_intp s3 = *steps++;
+#define BEGIN_OUTER_LOOP_2 \
+ for (N_ = 0; N_ < dN; N_++, args[0] += s0, args[1] += s1) {
+
#define BEGIN_OUTER_LOOP_3 \
for (N_ = 0; N_ < dN; N_++, args[0] += s0, args[1] += s1, args[2] += s2) {
@@ -58,13 +61,14 @@ char *inner1d_signature = "(i),(i)->()";
/**begin repeat
#TYPE=LONG,DOUBLE#
- #typ=npy_long, npy_double#
+ #typ=npy_long,npy_double#
*/
/*
* This implements the function
* out[n] = sum_i { in1[n, i] * in2[n, i] }.
*/
+
static void
@TYPE@_inner1d(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
{
@@ -91,7 +95,7 @@ char *innerwt_signature = "(i),(i),(i)->()";
/**begin repeat
#TYPE=LONG,DOUBLE#
- #typ=npy_long, npy_double#
+ #typ=npy_long,npy_double#
*/
@@ -127,7 +131,7 @@ char *matrix_multiply_signature = "(m,n),(n,p)->(m,p)";
/**begin repeat
#TYPE=FLOAT,DOUBLE,LONG#
- #typ=npy_float, npy_double,npy_long#
+ #typ=npy_float,npy_double,npy_long#
*/
/*
@@ -135,7 +139,6 @@ char *matrix_multiply_signature = "(m,n),(n,p)->(m,p)";
* out[k, m, p] = sum_n { in1[k, m, n] * in2[k, n, p] }.
*/
-
static void
@TYPE@_matrix_multiply(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
{
@@ -177,27 +180,66 @@ static void
/**end repeat**/
-/* The following lines were generated using a slightly modified
- version of code_generators/generate_umath.py and adding these
- lines to defdict:
-
-defdict = {
-'inner1d' :
- Ufunc(2, 1, None_,
- r'''inner on the last dimension and broadcast on the rest \n"
- " \"(i),(i)->()\" \n''',
- TD('ld'),
- ),
-'innerwt' :
- Ufunc(3, 1, None_,
- r'''inner1d with a weight argument \n"
- " \"(i),(i),(i)->()\" \n''',
- TD('ld'),
- ),
-}
+char *euclidean_pdist_signature = "(n,d)->(p)";
+/**begin repeat
+
+ #TYPE=FLOAT,DOUBLE#
+ #typ=npy_float,npy_double#
+ #sqrt_func=sqrtf,sqrt#
*/
+/*
+ * This implements the function
+ * out[j*(2*n-3-j)+k-1] = sum_d { (in1[j, d] - in1[k, d])^2 }
+ * with 0 < k < j < n, i.e. computes all unique pairwise euclidean distances.
+ */
+
+static void
+@TYPE@_euclidean_pdist(char **args, npy_intp *dimensions, npy_intp *steps,
+ void *NPY_UNUSED(func))
+{
+ INIT_OUTER_LOOP_2
+ npy_intp len_n = *dimensions++;
+ npy_intp len_d = *dimensions++;
+ npy_intp len_p = *dimensions;
+ npy_intp stride_n = *steps++;
+ npy_intp stride_d = *steps++;
+ npy_intp stride_p = *steps;
+
+ assert(len_n * (len_n - 1) / 2 == len_p);
+
+ BEGIN_OUTER_LOOP_2
+ const char *data_this = (const char *)args[0];
+ char *data_out = args[1];
+ npy_intp n;
+ for (n = 0; n < len_n; ++n) {
+ const char *data_that = data_this + stride_n;
+ npy_intp nn;
+ for (nn = n + 1; nn < len_n; ++nn) {
+ const char *ptr_this = data_this;
+ const char *ptr_that = data_that;
+ @typ@ out = 0;
+ npy_intp d;
+ for (d = 0; d < len_d; ++d) {
+ const @typ@ delta = *(const @typ@ *)ptr_this -
+ *(const @typ@ *)ptr_that;
+ out += delta * delta;
+ ptr_this += stride_d;
+ ptr_that += stride_d;
+ }
+ *(@typ@ *)data_out = @sqrt_func@(out);
+ data_that += stride_n;
+ data_out += stride_p;
+ }
+ data_this += stride_n;
+ }
+ END_OUTER_LOOP
+}
+
+/**end repeat**/
+
+
static PyUFuncGenericFunction inner1d_functions[] = { LONG_inner1d, DOUBLE_inner1d };
static void * inner1d_data[] = { (void *)NULL, (void *)NULL };
static char inner1d_signatures[] = { NPY_LONG, NPY_LONG, NPY_LONG, NPY_DOUBLE, NPY_DOUBLE, NPY_DOUBLE };
@@ -208,39 +250,49 @@ static PyUFuncGenericFunction matrix_multiply_functions[] = { LONG_matrix_multip
static void *matrix_multiply_data[] = { (void *)NULL, (void *)NULL, (void *)NULL };
static char matrix_multiply_signatures[] = { NPY_LONG, NPY_LONG, NPY_LONG, NPY_FLOAT, NPY_FLOAT, NPY_FLOAT, NPY_DOUBLE, NPY_DOUBLE, NPY_DOUBLE };
+static PyUFuncGenericFunction euclidean_pdist_functions[] =
+ { FLOAT_euclidean_pdist, DOUBLE_euclidean_pdist };
+static void *eucldiean_pdist_data[] = { (void *)NULL, (void *)NULL };
+static char euclidean_pdist_signatures[] = { NPY_FLOAT, NPY_FLOAT,
+ NPY_DOUBLE, NPY_DOUBLE };
+
+
static void
addUfuncs(PyObject *dictionary) {
PyObject *f;
- f = PyUFunc_FromFuncAndDataAndSignature(inner1d_functions, inner1d_data, inner1d_signatures, 2,
- 2, 1, PyUFunc_None, "inner1d",
- "inner on the last dimension and broadcast on the rest \n"\
- " \"(i),(i)->()\" \n",
- 0, inner1d_signature);
+ f = PyUFunc_FromFuncAndDataAndSignature(inner1d_functions, inner1d_data,
+ inner1d_signatures, 2, 2, 1, PyUFunc_None, "inner1d",
+ "inner on the last dimension and broadcast on the rest \n"
+ " \"(i),(i)->()\" \n",
+ 0, inner1d_signature);
PyDict_SetItemString(dictionary, "inner1d", f);
Py_DECREF(f);
- f = PyUFunc_FromFuncAndDataAndSignature(innerwt_functions, innerwt_data, innerwt_signatures, 2,
- 3, 1, PyUFunc_None, "innerwt",
- "inner1d with a weight argument \n"\
- " \"(i),(i),(i)->()\" \n",
- 0, innerwt_signature);
+ f = PyUFunc_FromFuncAndDataAndSignature(innerwt_functions, innerwt_data,
+ innerwt_signatures, 2, 3, 1, PyUFunc_None, "innerwt",
+ "inner1d with a weight argument \n"
+ " \"(i),(i),(i)->()\" \n",
+ 0, innerwt_signature);
PyDict_SetItemString(dictionary, "innerwt", f);
Py_DECREF(f);
f = PyUFunc_FromFuncAndDataAndSignature(matrix_multiply_functions,
- matrix_multiply_data, matrix_multiply_signatures,
- 3, 2, 1, PyUFunc_None, "matrix_multiply",
- "matrix multiplication on last two dimensions \n"\
- " \"(m,n),(n,p)->(m,p)\" \n",
- 0, matrix_multiply_signature);
+ matrix_multiply_data, matrix_multiply_signatures,
+ 3, 2, 1, PyUFunc_None, "matrix_multiply",
+ "matrix multiplication on last two dimensions \n"
+ " \"(m,n),(n,p)->(m,p)\" \n",
+ 0, matrix_multiply_signature);
PyDict_SetItemString(dictionary, "matrix_multiply", f);
Py_DECREF(f);
+ f = PyUFunc_FromFuncAndDataAndSignature(euclidean_pdist_functions,
+ eucldiean_pdist_data, euclidean_pdist_signatures,
+ 2, 1, 1, PyUFunc_None, "euclidean_pdist",
+ "pairwise euclidean distance on last two dimensions \n"
+ " \"(n,d)->(p)\" \n",
+ 0, euclidean_pdist_signature);
+ PyDict_SetItemString(dictionary, "euclidean_pdist", f);
+ Py_DECREF(f);
}
-/*
- End of auto-generated code.
-*/
-
-
static PyObject *
UMath_Tests_test_signature(PyObject *NPY_UNUSED(dummy), PyObject *args)
diff --git a/numpy/core/tests/test_datetime.py b/numpy/core/tests/test_datetime.py
index bf0ba6807..4e432f885 100644
--- a/numpy/core/tests/test_datetime.py
+++ b/numpy/core/tests/test_datetime.py
@@ -1412,6 +1412,11 @@ class TestDateTime(TestCase):
np.datetime64('2012-02-03T14Z', 's'),
np.timedelta64(5, 'Y'))
+ def test_datetime_arange_no_dtype(self):
+ d = np.array('2010-01-04', dtype="M8[D]")
+ assert_equal(np.arange(d, d + 1), d)
+ assert_raises(ValueError, np.arange, d)
+
def test_timedelta_arange(self):
a = np.arange(3, 10, dtype='m8')
assert_equal(a.dtype, np.dtype('m8'))
@@ -1430,6 +1435,11 @@ class TestDateTime(TestCase):
assert_raises(TypeError, np.arange, np.timedelta64(0, 'Y'),
np.timedelta64(5, 'D'))
+ def test_timedelta_arange_no_dtype(self):
+ d = np.array(5, dtype="m8[D]")
+ assert_equal(np.arange(d, d + 1), d)
+ assert_raises(ValueError, np.arange, d)
+
def test_datetime_maximum_reduce(self):
a = np.array(['2010-01-02', '1999-03-14', '1833-03'], dtype='M8[D]')
assert_equal(np.maximum.reduce(a).dtype, np.dtype('M8[D]'))
diff --git a/numpy/core/tests/test_dtype.py b/numpy/core/tests/test_dtype.py
index 18660351c..2621c8696 100644
--- a/numpy/core/tests/test_dtype.py
+++ b/numpy/core/tests/test_dtype.py
@@ -508,13 +508,8 @@ class TestDtypeAttributeDeletion(object):
"isbuiltin", "isnative", "isalignedstruct", "fields",
"metadata", "hasobject"]
- if sys.version[:3] == '2.4':
- error = TypeError
- else:
- error = AttributeError
-
for s in attr:
- assert_raises(error, delattr, dt, s)
+ assert_raises(AttributeError, delattr, dt, s)
def test_dtype_writable_attributes_deletion(self):
diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py
index f5a8bc4d2..1e47a2297 100644
--- a/numpy/core/tests/test_multiarray.py
+++ b/numpy/core/tests/test_multiarray.py
@@ -1757,6 +1757,40 @@ class TestMethods(TestCase):
a.diagonal()
assert_(sys.getrefcount(a) < 50)
+ def test_put(self):
+ icodes = np.typecodes['AllInteger']
+ fcodes = np.typecodes['AllFloat']
+ for dt in icodes + fcodes + 'O':
+ tgt = np.array([0, 1, 0, 3, 0, 5], dtype=dt)
+
+ # test 1-d
+ a = np.zeros(6, dtype=dt)
+ a.put([1, 3, 5], [1, 3, 5])
+ assert_equal(a, tgt)
+
+ # test 2-d
+ a = np.zeros((2, 3), dtype=dt)
+ a.put([1, 3, 5], [1, 3, 5])
+ assert_equal(a, tgt.reshape(2, 3))
+
+ for dt in '?':
+ tgt = np.array([False, True, False, True, False, True], dtype=dt)
+
+ # test 1-d
+ a = np.zeros(6, dtype=dt)
+ a.put([1, 3, 5], [True]*3)
+ assert_equal(a, tgt)
+
+ # test 2-d
+ a = np.zeros((2, 3), dtype=dt)
+ a.put([1, 3, 5], [True]*3)
+ assert_equal(a, tgt.reshape(2, 3))
+
+ # check must be writeable
+ a = np.zeros(6)
+ a.flags.writeable = False
+ assert_raises(ValueError, a.put, [1, 3, 5], [1, 3, 5])
+
def test_ravel(self):
a = np.array([[0, 1], [2, 3]])
assert_equal(a.ravel(), [0, 1, 2, 3])
@@ -1887,7 +1921,7 @@ class TestMethods(TestCase):
a = np.array([1-1j, 1, 2.0, 'f'], object)
assert_raises(AttributeError, lambda: a.conj())
- assert_raises(AttributeError, lambda: a.conjugate())
+ assert_raises(AttributeError, lambda: a.conjugate())
class TestBinop(object):
@@ -2128,6 +2162,16 @@ class TestBinop(object):
assert_(isinstance(obj2, SomeClass2))
+class TestCAPI(TestCase):
+ def test_IsPythonScalar(self):
+ from numpy.core.multiarray_tests import IsPythonScalar
+ assert_(IsPythonScalar(b'foobar'))
+ assert_(IsPythonScalar(1))
+ assert_(IsPythonScalar(2**80))
+ assert_(IsPythonScalar(2.))
+ assert_(IsPythonScalar("a"))
+
+
class TestSubscripting(TestCase):
def test_test_zero_rank(self):
x = array([1, 2, 3])
@@ -4282,6 +4326,31 @@ class TestNewBufferProtocol(object):
x3 = np.arange(dt3.itemsize, dtype=np.int8).view(dt3)
self._check_roundtrip(x3)
+ def test_relaxed_strides(self):
+ # Test that relaxed strides are converted to non-relaxed
+ c = np.ones((1, 10, 10), dtype='i8')
+
+ # Check for NPY_RELAXED_STRIDES_CHECKING:
+ if np.ones((10, 1), order="C").flags.f_contiguous:
+ c.strides = (-1, 80, 8)
+
+ assert memoryview(c).strides == (800, 80, 8)
+
+ # Writing C-contiguous data to a BytesIO buffer should work
+ fd = io.BytesIO()
+ fd.write(c.data)
+
+ fortran = c.T
+ assert memoryview(fortran).strides == (8, 80, 800)
+
+ arr = np.ones((1, 10))
+ if arr.flags.f_contiguous:
+ shape, strides = get_buffer_info(arr, ['F_CONTIGUOUS'])
+ assert_(strides[0] == 8)
+ arr = np.ones((10, 1), order='F')
+ shape, strides = get_buffer_info(arr, ['C_CONTIGUOUS'])
+ assert_(strides[-1] == 8)
+
class TestArrayAttributeDeletion(object):
diff --git a/numpy/core/tests/test_nditer.py b/numpy/core/tests/test_nditer.py
index 0055c038b..dbf2cfaae 100644
--- a/numpy/core/tests/test_nditer.py
+++ b/numpy/core/tests/test_nditer.py
@@ -2481,13 +2481,8 @@ def test_iter_non_writable_attribute_deletion():
"iterationneedsapi", "has_multi_index", "has_index", "dtypes",
"ndim", "nop", "itersize", "finished"]
- if sys.version[:3] == '2.4':
- error = TypeError
- else:
- error = AttributeError
-
for s in attr:
- assert_raises(error, delattr, it, s)
+ assert_raises(AttributeError, delattr, it, s)
def test_iter_writable_attribute_deletion():
diff --git a/numpy/core/tests/test_numeric.py b/numpy/core/tests/test_numeric.py
index 46e864495..ea145ef81 100644
--- a/numpy/core/tests/test_numeric.py
+++ b/numpy/core/tests/test_numeric.py
@@ -1697,6 +1697,20 @@ class TestStdVar(TestCase):
assert_almost_equal(std(self.A, ddof=2)**2,
self.real_var*len(self.A)/float(len(self.A)-2))
+ def test_out_scalar(self):
+ d = np.arange(10)
+ out = np.array(0.)
+ r = np.std(d, out=out)
+ assert_(r is out)
+ assert_array_equal(r, out)
+ r = np.var(d, out=out)
+ assert_(r is out)
+ assert_array_equal(r, out)
+ r = np.mean(d, out=out)
+ assert_(r is out)
+ assert_array_equal(r, out)
+
+
class TestStdVarComplex(TestCase):
def test_basic(self):
A = array([1, 1.j, -1, -1.j])
diff --git a/numpy/core/tests/test_records.py b/numpy/core/tests/test_records.py
index 8c9ce5c70..355e5480a 100644
--- a/numpy/core/tests/test_records.py
+++ b/numpy/core/tests/test_records.py
@@ -1,5 +1,6 @@
from __future__ import division, absolute_import, print_function
+import sys
from os import path
import numpy as np
from numpy.testing import *
@@ -15,6 +16,14 @@ class TestFromrecords(TestCase):
r = np.rec.fromrecords([[456, 'dbe', 1.2], [2, 'de', 1.3]],
names='col1,col2,col3')
assert_equal(r[0].item(), (456, 'dbe', 1.2))
+ assert_equal(r['col1'].dtype.kind, 'i')
+ if sys.version_info[0] >= 3:
+ assert_equal(r['col2'].dtype.kind, 'U')
+ assert_equal(r['col2'].dtype.itemsize, 12)
+ else:
+ assert_equal(r['col2'].dtype.kind, 'S')
+ assert_equal(r['col2'].dtype.itemsize, 3)
+ assert_equal(r['col3'].dtype.kind, 'f')
def test_method_array(self):
r = np.rec.array(asbytes('abcdefg') * 100, formats='i2,a3,i4', shape=3, byteorder='big')
diff --git a/numpy/core/tests/test_ufunc.py b/numpy/core/tests/test_ufunc.py
index eacc266be..a285d5334 100644
--- a/numpy/core/tests/test_ufunc.py
+++ b/numpy/core/tests/test_ufunc.py
@@ -388,21 +388,18 @@ class TestUfunc(TestCase):
msg = "extend & broadcast loop dimensions"
b = np.arange(4).reshape((2, 2))
assert_array_equal(umt.inner1d(a, b), np.sum(a*b, axis=-1), err_msg=msg)
- msg = "broadcast in core dimensions"
+ # Broadcast in core dimensions should fail
a = np.arange(8).reshape((4, 2))
b = np.arange(4).reshape((4, 1))
- assert_array_equal(umt.inner1d(a, b), np.sum(a*b, axis=-1), err_msg=msg)
- msg = "extend & broadcast core and loop dimensions"
+ assert_raises(ValueError, umt.inner1d, a, b)
+ # Extend core dimensions should fail
a = np.arange(8).reshape((4, 2))
b = np.array(7)
- assert_array_equal(umt.inner1d(a, b), np.sum(a*b, axis=-1), err_msg=msg)
- msg = "broadcast should fail"
+ assert_raises(ValueError, umt.inner1d, a, b)
+ # Broadcast should fail
a = np.arange(2).reshape((2, 1, 1))
b = np.arange(3).reshape((3, 1, 1))
- try:
- ret = umt.inner1d(a, b)
- assert_equal(ret, None, err_msg=msg)
- except ValueError: None
+ assert_raises(ValueError, umt.inner1d, a, b)
def test_type_cast(self):
msg = "type cast"
@@ -542,8 +539,8 @@ class TestUfunc(TestCase):
a2 = d2.transpose(p2)[s2]
ref = ref and a1.base != None
ref = ref and a2.base != None
- if broadcastable(a1.shape[-1], a2.shape[-2]) and \
- broadcastable(a1.shape[0], a2.shape[0]):
+ if (a1.shape[-1] == a2.shape[-2] and
+ broadcastable(a1.shape[0], a2.shape[0])):
assert_array_almost_equal(
umt.matrix_multiply(a1, a2),
np.sum(a2[..., np.newaxis].swapaxes(-3, -1) *
@@ -553,6 +550,16 @@ class TestUfunc(TestCase):
assert_equal(ref, True, err_msg="reference check")
+ def test_euclidean_pdist(self):
+ a = np.arange(12, dtype=np.float).reshape(4, 3)
+ out = np.empty((a.shape[0] * (a.shape[0] - 1) // 2,), dtype=a.dtype)
+ umt.euclidean_pdist(a, out)
+ b = np.sqrt(np.sum((a[:, None] - a)**2, axis=-1))
+ b = b[~np.tri(a.shape[0], dtype=bool)]
+ assert_almost_equal(out, b)
+ # An output array is required to determine p with signature (n,d)->(p)
+ assert_raises(ValueError, umt.euclidean_pdist, a)
+
def test_object_logical(self):
a = np.array([3, None, True, False, "test", ""], dtype=object)
assert_equal(np.logical_or(a, None),
diff --git a/numpy/core/tests/test_umath.py b/numpy/core/tests/test_umath.py
index f8d6520d7..c71b7b658 100644
--- a/numpy/core/tests/test_umath.py
+++ b/numpy/core/tests/test_umath.py
@@ -36,6 +36,36 @@ class TestConstants(TestCase):
def test_euler_gamma(self):
assert_allclose(ncu.euler_gamma, 0.5772156649015329, 1e-15)
+class TestOut(TestCase):
+ def test_out_subok(self):
+ for b in (True, False):
+ aout = np.array(0.5)
+
+ r = np.add(aout, 2, out=aout)
+ assert_(r is aout)
+ assert_array_equal(r, aout)
+
+ r = np.add(aout, 2, out=aout, subok=b)
+ assert_(r is aout)
+ assert_array_equal(r, aout)
+
+ r = np.add(aout, 2, aout, subok=False)
+ assert_(r is aout)
+ assert_array_equal(r, aout)
+
+ d = np.ones(5)
+ o1 = np.zeros(5)
+ o2 = np.zeros(5, dtype=np.int32)
+ r1, r2 = np.frexp(d, o1, o2, subok=b)
+ assert_(r1 is o1)
+ assert_array_equal(r1, o1)
+ assert_(r2 is o2)
+ assert_array_equal(r2, o2)
+
+ r1, r2 = np.frexp(d, out=o1, subok=b)
+ assert_(r1 is o1)
+ assert_array_equal(r1, o1)
+
class TestDivision(TestCase):
def test_division_int(self):
@@ -759,6 +789,17 @@ class TestBool(TestCase):
assert_equal(np.bitwise_xor(arg1, arg2), out)
+class TestInt(TestCase):
+ def test_logical_not(self):
+ x = np.ones(10, dtype=np.int16)
+ o = np.ones(10 * 2, dtype=np.bool)
+ tgt = o.copy()
+ tgt[::2] = False
+ os = o[::2]
+ assert_array_equal(np.logical_not(x, out=os), False)
+ assert_array_equal(o, tgt)
+
+
class TestFloatingPoint(TestCase):
def test_floating_point(self):
assert_equal(ncu.FLOATING_POINT_SUPPORT, 1)
diff --git a/numpy/distutils/ccompiler.py b/numpy/distutils/ccompiler.py
index 8484685c0..d7fe702a6 100644
--- a/numpy/distutils/ccompiler.py
+++ b/numpy/distutils/ccompiler.py
@@ -16,7 +16,7 @@ from distutils.version import LooseVersion
from numpy.distutils import log
from numpy.distutils.exec_command import exec_command
from numpy.distutils.misc_util import cyg2win32, is_sequence, mingw32, \
- quote_args
+ quote_args, get_num_build_jobs
from numpy.distutils.compat import get_exception
@@ -165,9 +165,10 @@ def CCompiler_compile(self, sources, output_dir=None, macros=None,
return []
# FIXME:RELATIVE_IMPORT
if sys.version_info[0] < 3:
- from .fcompiler import FCompiler
+ from .fcompiler import FCompiler, is_f_file, has_f90_header
else:
- from numpy.distutils.fcompiler import FCompiler
+ from numpy.distutils.fcompiler import (FCompiler, is_f_file,
+ has_f90_header)
if isinstance(self, FCompiler):
display = []
for fc in ['f77', 'f90', 'fix']:
@@ -189,20 +190,45 @@ def CCompiler_compile(self, sources, output_dir=None, macros=None,
display += "\nextra options: '%s'" % (' '.join(extra_postargs))
log.info(display)
- # build any sources in same order as they were originally specified
- # especially important for fortran .f90 files using modules
+ def single_compile(args):
+ obj, (src, ext) = args
+ self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts)
+
if isinstance(self, FCompiler):
objects_to_build = list(build.keys())
+ f77_objects, other_objects = [], []
for obj in objects:
if obj in objects_to_build:
src, ext = build[obj]
if self.compiler_type=='absoft':
obj = cyg2win32(obj)
src = cyg2win32(src)
- self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts)
+ if is_f_file(src) and not has_f90_header(src):
+ f77_objects.append((obj, (src, ext)))
+ else:
+ other_objects.append((obj, (src, ext)))
+
+ # f77 objects can be built in parallel
+ build_items = f77_objects
+ # build f90 modules serial, module files are generated during
+ # compilation and may be used by files later in the list so the
+ # ordering is important
+ for o in other_objects:
+ single_compile(o)
+ else:
+ build_items = build.items()
+
+ jobs = get_num_build_jobs()
+ if len(build) > 1 and jobs > 1:
+ # build parallel
+ import multiprocessing.pool
+ pool = multiprocessing.pool.ThreadPool(jobs)
+ pool.map(single_compile, build_items)
+ pool.close()
else:
- for obj, (src, ext) in build.items():
- self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts)
+ # build serial
+ for o in build_items:
+ single_compile(o)
# Return *all* object filenames, not just the ones we just built.
return objects
diff --git a/numpy/distutils/command/build.py b/numpy/distutils/command/build.py
index b6912be15..f7249ae81 100644
--- a/numpy/distutils/command/build.py
+++ b/numpy/distutils/command/build.py
@@ -16,6 +16,8 @@ class build(old_build):
user_options = old_build.user_options + [
('fcompiler=', None,
"specify the Fortran compiler type"),
+ ('jobs=', 'j',
+ "number of parallel jobs"),
]
help_options = old_build.help_options + [
@@ -26,8 +28,14 @@ class build(old_build):
def initialize_options(self):
old_build.initialize_options(self)
self.fcompiler = None
+ self.jobs = None
def finalize_options(self):
+ if self.jobs:
+ try:
+ self.jobs = int(self.jobs)
+ except ValueError:
+ raise ValueError("--jobs/-j argument must be an integer")
build_scripts = self.build_scripts
old_build.finalize_options(self)
plat_specifier = ".%s-%s" % (get_platform(), sys.version[0:3])
diff --git a/numpy/distutils/command/build_clib.py b/numpy/distutils/command/build_clib.py
index 84ca87250..6e65a3bfb 100644
--- a/numpy/distutils/command/build_clib.py
+++ b/numpy/distutils/command/build_clib.py
@@ -30,6 +30,8 @@ class build_clib(old_build_clib):
('fcompiler=', None,
"specify the Fortran compiler type"),
('inplace', 'i', 'Build in-place'),
+ ('jobs=', 'j',
+ "number of parallel jobs"),
]
boolean_options = old_build_clib.boolean_options + ['inplace']
@@ -38,7 +40,16 @@ class build_clib(old_build_clib):
old_build_clib.initialize_options(self)
self.fcompiler = None
self.inplace = 0
- return
+ self.jobs = None
+
+ def finalize_options(self):
+ if self.jobs:
+ try:
+ self.jobs = int(self.jobs)
+ except ValueError:
+ raise ValueError("--jobs/-j argument must be an integer")
+ old_build_clib.finalize_options(self)
+ self.set_undefined_options('build', ('jobs', 'jobs'))
def have_f_sources(self):
for (lib_name, build_info) in self.libraries:
diff --git a/numpy/distutils/command/build_ext.py b/numpy/distutils/command/build_ext.py
index b48e4227a..59c453607 100644
--- a/numpy/distutils/command/build_ext.py
+++ b/numpy/distutils/command/build_ext.py
@@ -34,6 +34,8 @@ class build_ext (old_build_ext):
user_options = old_build_ext.user_options + [
('fcompiler=', None,
"specify the Fortran compiler type"),
+ ('jobs=', 'j',
+ "number of parallel jobs"),
]
help_options = old_build_ext.help_options + [
@@ -44,12 +46,19 @@ class build_ext (old_build_ext):
def initialize_options(self):
old_build_ext.initialize_options(self)
self.fcompiler = None
+ self.jobs = None
def finalize_options(self):
+ if self.jobs:
+ try:
+ self.jobs = int(self.jobs)
+ except ValueError:
+ raise ValueError("--jobs/-j argument must be an integer")
incl_dirs = self.include_dirs
old_build_ext.finalize_options(self)
if incl_dirs is not None:
self.include_dirs.extend(self.distribution.include_dirs or [])
+ self.set_undefined_options('build', ('jobs', 'jobs'))
def run(self):
if not self.extensions:
@@ -407,11 +416,6 @@ class build_ext (old_build_ext):
if ext.language=='c++' and cxx_compiler is not None:
linker = cxx_compiler.link_shared_object
- if sys.version[:3]>='2.3':
- kws = {'target_lang':ext.language}
- else:
- kws = {}
-
linker(objects, ext_filename,
libraries=libraries,
library_dirs=library_dirs,
@@ -419,7 +423,8 @@ class build_ext (old_build_ext):
extra_postargs=extra_args,
export_symbols=self.get_export_symbols(ext),
debug=self.debug,
- build_temp=self.build_temp,**kws)
+ build_temp=self.build_temp,
+ target_lang=ext.language)
def _add_dummy_mingwex_sym(self, c_sources):
build_src = self.get_finalized_command("build_src").build_src
diff --git a/numpy/distutils/misc_util.py b/numpy/distutils/misc_util.py
index c146178f0..19103f2c1 100644
--- a/numpy/distutils/misc_util.py
+++ b/numpy/distutils/misc_util.py
@@ -13,6 +13,10 @@ import shutil
import distutils
from distutils.errors import DistutilsError
+try:
+ from threading import local as tlocal
+except ImportError:
+ from dummy_threading import local as tlocal
try:
set
@@ -31,7 +35,8 @@ __all__ = ['Configuration', 'get_numpy_include_dirs', 'default_config_dict',
'get_script_files', 'get_lib_source_files', 'get_data_files',
'dot_join', 'get_frame', 'minrelpath', 'njoin',
'is_sequence', 'is_string', 'as_list', 'gpaths', 'get_language',
- 'quote_args', 'get_build_architecture', 'get_info', 'get_pkg_info']
+ 'quote_args', 'get_build_architecture', 'get_info', 'get_pkg_info',
+ 'get_num_build_jobs']
class InstallableLib(object):
"""
@@ -60,6 +65,36 @@ class InstallableLib(object):
self.build_info = build_info
self.target_dir = target_dir
+
+def get_num_build_jobs():
+ """
+ Get number of parallel build jobs set by the --jobs command line argument
+ of setup.py
+ If the command did not receive a setting the environment variable
+ NPY_NUM_BUILD_JOBS checked and if that is unset it returns 1.
+
+ Returns
+ -------
+ out : int
+ number of parallel jobs that can be run
+
+ """
+ from numpy.distutils.core import get_distribution
+ envjobs = int(os.environ.get("NPY_NUM_BUILD_JOBS", 1))
+ dist = get_distribution()
+ # may be None during configuration
+ if dist is None:
+ return envjobs
+
+ # any of these three may have the job set, take the largest
+ cmdattr = (getattr(dist.get_command_obj('build'), 'jobs', None),
+ getattr(dist.get_command_obj('build_ext'), 'jobs', None),
+ getattr(dist.get_command_obj('build_clib'), 'jobs', None))
+ if all(x is None for x in cmdattr):
+ return envjobs
+ else:
+ return max(x for x in cmdattr if x is not None)
+
def quote_args(args):
# don't used _nt_quote_args as it does not check if
# args items already have quotes or not.
@@ -249,9 +284,9 @@ def gpaths(paths, local_path='', include_non_existing=True):
return _fix_paths(paths, local_path, include_non_existing)
-_temporary_directory = None
def clean_up_temporary_directory():
- global _temporary_directory
+ tdata = tlocal()
+ _temporary_directory = getattr(tdata, 'tempdir', None)
if not _temporary_directory:
return
try:
@@ -261,13 +296,13 @@ def clean_up_temporary_directory():
_temporary_directory = None
def make_temp_file(suffix='', prefix='', text=True):
- global _temporary_directory
- if not _temporary_directory:
- _temporary_directory = tempfile.mkdtemp()
+ tdata = tlocal()
+ if not hasattr(tdata, 'tempdir'):
+ tdata.tempdir = tempfile.mkdtemp()
atexit.register(clean_up_temporary_directory)
fid, name = tempfile.mkstemp(suffix=suffix,
prefix=prefix,
- dir=_temporary_directory,
+ dir=tdata.tempdir,
text=text)
fo = os.fdopen(fid, 'w')
return fo, name
diff --git a/numpy/f2py/cb_rules.py b/numpy/f2py/cb_rules.py
index f3bf848a7..5cf6b3010 100644
--- a/numpy/f2py/cb_rules.py
+++ b/numpy/f2py/cb_rules.py
@@ -357,11 +357,11 @@ cb_arg_rules=[
'pyobjfrom': [{debugcapi:'\tfprintf(stderr,"debug-capi:cb:#varname#\\n");'},
{isintent_c: """\
\tif (#name#_nofargs>capi_i) {
-\t\tPyArrayObject *tmp_arr = (PyArrayObject *)PyArray_New(&PyArray_Type,#rank#,#varname_i#_Dims,#atype#,NULL,(char*)#varname_i#,0,NPY_CARRAY,NULL); /*XXX: Hmm, what will destroy this array??? */
+\t\tPyArrayObject *tmp_arr = (PyArrayObject *)PyArray_New(&PyArray_Type,#rank#,#varname_i#_Dims,#atype#,NULL,(char*)#varname_i#,0,NPY_ARRAY_CARRAY,NULL); /*XXX: Hmm, what will destroy this array??? */
""",
l_not(isintent_c): """\
\tif (#name#_nofargs>capi_i) {
-\t\tPyArrayObject *tmp_arr = (PyArrayObject *)PyArray_New(&PyArray_Type,#rank#,#varname_i#_Dims,#atype#,NULL,(char*)#varname_i#,0,NPY_FARRAY,NULL); /*XXX: Hmm, what will destroy this array??? */
+\t\tPyArrayObject *tmp_arr = (PyArrayObject *)PyArray_New(&PyArray_Type,#rank#,#varname_i#_Dims,#atype#,NULL,(char*)#varname_i#,0,NPY_ARRAY_FARRAY,NULL); /*XXX: Hmm, what will destroy this array??? */
""",
},
"""
@@ -384,7 +384,7 @@ cb_arg_rules=[
\t\t\tfprintf(stderr,\"rv_cb_arr is NULL\\n\");
\t\t\tgoto capi_fail;
\t\t}
-\t\tMEMCOPY(#varname_i#,rv_cb_arr->data,PyArray_NBYTES(rv_cb_arr));
+\t\tMEMCOPY(#varname_i#,PyArray_DATA(rv_cb_arr),PyArray_NBYTES(rv_cb_arr));
\t\tif (capi_tmp != (PyObject *)rv_cb_arr) {
\t\t\tPy_DECREF(rv_cb_arr);
\t\t}
diff --git a/numpy/f2py/cfuncs.py b/numpy/f2py/cfuncs.py
index 7fb630697..01e189dc1 100644
--- a/numpy/f2py/cfuncs.py
+++ b/numpy/f2py/cfuncs.py
@@ -223,7 +223,7 @@ cppmacros['SWAP']="""\
\ta = b;\\
\tb = c;}
"""
-#cppmacros['ISCONTIGUOUS']='#define ISCONTIGUOUS(m) ((m)->flags & NPY_CONTIGUOUS)'
+#cppmacros['ISCONTIGUOUS']='#define ISCONTIGUOUS(m) (PyArray_FLAGS(m) & NPY_ARRAY_C_CONTIGUOUS)'
cppmacros['PRINTPYOBJERR']="""\
#define PRINTPYOBJERR(obj)\\
\tfprintf(stderr,\"#modulename#.error is related to \");\\
@@ -248,8 +248,8 @@ needs['len..']=['f2py_size']
cppmacros['len..']="""\
#define rank(var) var ## _Rank
#define shape(var,dim) var ## _Dims[dim]
-#define old_rank(var) (((PyArrayObject *)(capi_ ## var ## _tmp))->nd)
-#define old_shape(var,dim) (((PyArrayObject *)(capi_ ## var ## _tmp))->dimensions[dim])
+#define old_rank(var) (PyArray_NDIM((PyArrayObject *)(capi_ ## var ## _tmp)))
+#define old_shape(var,dim) PyArray_DIM(((PyArrayObject *)(capi_ ## var ## _tmp)),dim)
#define fshape(var,dim) shape(var,rank(var)-dim-1)
#define len(var) shape(var,0)
#define flen(var) fshape(var,0)
@@ -316,35 +316,35 @@ cppmacros['pyobj_from_string1size']='#define pyobj_from_string1size(v,len) (PyUS
needs['TRYPYARRAYTEMPLATE']=['PRINTPYOBJERR']
cppmacros['TRYPYARRAYTEMPLATE']="""\
/* New SciPy */
-#define TRYPYARRAYTEMPLATECHAR case NPY_STRING: *(char *)(arr->data)=*v; break;
-#define TRYPYARRAYTEMPLATELONG case NPY_LONG: *(long *)(arr->data)=*v; break;
-#define TRYPYARRAYTEMPLATEOBJECT case NPY_OBJECT: (arr->descr->f->setitem)(pyobj_from_ ## ctype ## 1(*v),arr->data); break;
+#define TRYPYARRAYTEMPLATECHAR case NPY_STRING: *(char *)(PyArray_DATA(arr))=*v; break;
+#define TRYPYARRAYTEMPLATELONG case NPY_LONG: *(long *)(PyArray_DATA(arr))=*v; break;
+#define TRYPYARRAYTEMPLATEOBJECT case NPY_OBJECT: (PyArray_DESCR(arr)->f->setitem)(pyobj_from_ ## ctype ## 1(*v),PyArray_DATA(arr)); break;
#define TRYPYARRAYTEMPLATE(ctype,typecode) \\
PyArrayObject *arr = NULL;\\
if (!obj) return -2;\\
if (!PyArray_Check(obj)) return -1;\\
if (!(arr=(PyArrayObject *)obj)) {fprintf(stderr,\"TRYPYARRAYTEMPLATE:\");PRINTPYOBJERR(obj);return 0;}\\
- if (arr->descr->type==typecode) {*(ctype *)(arr->data)=*v; return 1;}\\
- switch (arr->descr->type_num) {\\
- case NPY_DOUBLE: *(double *)(arr->data)=*v; break;\\
- case NPY_INT: *(int *)(arr->data)=*v; break;\\
- case NPY_LONG: *(long *)(arr->data)=*v; break;\\
- case NPY_FLOAT: *(float *)(arr->data)=*v; break;\\
- case NPY_CDOUBLE: *(double *)(arr->data)=*v; break;\\
- case NPY_CFLOAT: *(float *)(arr->data)=*v; break;\\
- case NPY_BOOL: *(npy_bool *)(arr->data)=(*v!=0); break;\\
- case NPY_UBYTE: *(unsigned char *)(arr->data)=*v; break;\\
- case NPY_BYTE: *(signed char *)(arr->data)=*v; break;\\
- case NPY_SHORT: *(short *)(arr->data)=*v; break;\\
- case NPY_USHORT: *(npy_ushort *)(arr->data)=*v; break;\\
- case NPY_UINT: *(npy_uint *)(arr->data)=*v; break;\\
- case NPY_ULONG: *(npy_ulong *)(arr->data)=*v; break;\\
- case NPY_LONGLONG: *(npy_longlong *)(arr->data)=*v; break;\\
- case NPY_ULONGLONG: *(npy_ulonglong *)(arr->data)=*v; break;\\
- case NPY_LONGDOUBLE: *(npy_longdouble *)(arr->data)=*v; break;\\
- case NPY_CLONGDOUBLE: *(npy_longdouble *)(arr->data)=*v; break;\\
- case NPY_OBJECT: (arr->descr->f->setitem)(pyobj_from_ ## ctype ## 1(*v),arr->data, arr); break;\\
+ if (PyArray_DESCR(arr)->type==typecode) {*(ctype *)(PyArray_DATA(arr))=*v; return 1;}\\
+ switch (PyArray_TYPE(arr)) {\\
+ case NPY_DOUBLE: *(double *)(PyArray_DATA(arr))=*v; break;\\
+ case NPY_INT: *(int *)(PyArray_DATA(arr))=*v; break;\\
+ case NPY_LONG: *(long *)(PyArray_DATA(arr))=*v; break;\\
+ case NPY_FLOAT: *(float *)(PyArray_DATA(arr))=*v; break;\\
+ case NPY_CDOUBLE: *(double *)(PyArray_DATA(arr))=*v; break;\\
+ case NPY_CFLOAT: *(float *)(PyArray_DATA(arr))=*v; break;\\
+ case NPY_BOOL: *(npy_bool *)(PyArray_DATA(arr))=(*v!=0); break;\\
+ case NPY_UBYTE: *(unsigned char *)(PyArray_DATA(arr))=*v; break;\\
+ case NPY_BYTE: *(signed char *)(PyArray_DATA(arr))=*v; break;\\
+ case NPY_SHORT: *(short *)(PyArray_DATA(arr))=*v; break;\\
+ case NPY_USHORT: *(npy_ushort *)(PyArray_DATA(arr))=*v; break;\\
+ case NPY_UINT: *(npy_uint *)(PyArray_DATA(arr))=*v; break;\\
+ case NPY_ULONG: *(npy_ulong *)(PyArray_DATA(arr))=*v; break;\\
+ case NPY_LONGLONG: *(npy_longlong *)(PyArray_DATA(arr))=*v; break;\\
+ case NPY_ULONGLONG: *(npy_ulonglong *)(PyArray_DATA(arr))=*v; break;\\
+ case NPY_LONGDOUBLE: *(npy_longdouble *)(PyArray_DATA(arr))=*v; break;\\
+ case NPY_CLONGDOUBLE: *(npy_longdouble *)(PyArray_DATA(arr))=*v; break;\\
+ case NPY_OBJECT: (PyArray_DESCR(arr)->f->setitem)(pyobj_from_ ## ctype ## 1(*v),PyArray_DATA(arr), arr); break;\\
default: return -2;\\
};\\
return 1
@@ -352,36 +352,36 @@ cppmacros['TRYPYARRAYTEMPLATE']="""\
needs['TRYCOMPLEXPYARRAYTEMPLATE']=['PRINTPYOBJERR']
cppmacros['TRYCOMPLEXPYARRAYTEMPLATE']="""\
-#define TRYCOMPLEXPYARRAYTEMPLATEOBJECT case NPY_OBJECT: (arr->descr->f->setitem)(pyobj_from_complex_ ## ctype ## 1((*v)),arr->data, arr); break;
+#define TRYCOMPLEXPYARRAYTEMPLATEOBJECT case NPY_OBJECT: (PyArray_DESCR(arr)->f->setitem)(pyobj_from_complex_ ## ctype ## 1((*v)),PyArray_DATA(arr), arr); break;
#define TRYCOMPLEXPYARRAYTEMPLATE(ctype,typecode)\\
PyArrayObject *arr = NULL;\\
if (!obj) return -2;\\
if (!PyArray_Check(obj)) return -1;\\
if (!(arr=(PyArrayObject *)obj)) {fprintf(stderr,\"TRYCOMPLEXPYARRAYTEMPLATE:\");PRINTPYOBJERR(obj);return 0;}\\
- if (arr->descr->type==typecode) {\\
- *(ctype *)(arr->data)=(*v).r;\\
- *(ctype *)(arr->data+sizeof(ctype))=(*v).i;\\
+ if (PyArray_DESCR(arr)->type==typecode) {\\
+ *(ctype *)(PyArray_DATA(arr))=(*v).r;\\
+ *(ctype *)(PyArray_DATA(arr)+sizeof(ctype))=(*v).i;\\
return 1;\\
}\\
- switch (arr->descr->type_num) {\\
- case NPY_CDOUBLE: *(double *)(arr->data)=(*v).r;*(double *)(arr->data+sizeof(double))=(*v).i;break;\\
- case NPY_CFLOAT: *(float *)(arr->data)=(*v).r;*(float *)(arr->data+sizeof(float))=(*v).i;break;\\
- case NPY_DOUBLE: *(double *)(arr->data)=(*v).r; break;\\
- case NPY_LONG: *(long *)(arr->data)=(*v).r; break;\\
- case NPY_FLOAT: *(float *)(arr->data)=(*v).r; break;\\
- case NPY_INT: *(int *)(arr->data)=(*v).r; break;\\
- case NPY_SHORT: *(short *)(arr->data)=(*v).r; break;\\
- case NPY_UBYTE: *(unsigned char *)(arr->data)=(*v).r; break;\\
- case NPY_BYTE: *(signed char *)(arr->data)=(*v).r; break;\\
- case NPY_BOOL: *(npy_bool *)(arr->data)=((*v).r!=0 && (*v).i!=0); break;\\
- case NPY_USHORT: *(npy_ushort *)(arr->data)=(*v).r; break;\\
- case NPY_UINT: *(npy_uint *)(arr->data)=(*v).r; break;\\
- case NPY_ULONG: *(npy_ulong *)(arr->data)=(*v).r; break;\\
- case NPY_LONGLONG: *(npy_longlong *)(arr->data)=(*v).r; break;\\
- case NPY_ULONGLONG: *(npy_ulonglong *)(arr->data)=(*v).r; break;\\
- case NPY_LONGDOUBLE: *(npy_longdouble *)(arr->data)=(*v).r; break;\\
- case NPY_CLONGDOUBLE: *(npy_longdouble *)(arr->data)=(*v).r;*(npy_longdouble *)(arr->data+sizeof(npy_longdouble))=(*v).i;break;\\
- case NPY_OBJECT: (arr->descr->f->setitem)(pyobj_from_complex_ ## ctype ## 1((*v)),arr->data, arr); break;\\
+ switch (PyArray_TYPE(arr)) {\\
+ case NPY_CDOUBLE: *(double *)(PyArray_DATA(arr))=(*v).r;*(double *)(PyArray_DATA(arr)+sizeof(double))=(*v).i;break;\\
+ case NPY_CFLOAT: *(float *)(PyArray_DATA(arr))=(*v).r;*(float *)(PyArray_DATA(arr)+sizeof(float))=(*v).i;break;\\
+ case NPY_DOUBLE: *(double *)(PyArray_DATA(arr))=(*v).r; break;\\
+ case NPY_LONG: *(long *)(PyArray_DATA(arr))=(*v).r; break;\\
+ case NPY_FLOAT: *(float *)(PyArray_DATA(arr))=(*v).r; break;\\
+ case NPY_INT: *(int *)(PyArray_DATA(arr))=(*v).r; break;\\
+ case NPY_SHORT: *(short *)(PyArray_DATA(arr))=(*v).r; break;\\
+ case NPY_UBYTE: *(unsigned char *)(PyArray_DATA(arr))=(*v).r; break;\\
+ case NPY_BYTE: *(signed char *)(PyArray_DATA(arr))=(*v).r; break;\\
+ case NPY_BOOL: *(npy_bool *)(PyArray_DATA(arr))=((*v).r!=0 && (*v).i!=0); break;\\
+ case NPY_USHORT: *(npy_ushort *)(PyArray_DATA(arr))=(*v).r; break;\\
+ case NPY_UINT: *(npy_uint *)(PyArray_DATA(arr))=(*v).r; break;\\
+ case NPY_ULONG: *(npy_ulong *)(PyArray_DATA(arr))=(*v).r; break;\\
+ case NPY_LONGLONG: *(npy_longlong *)(PyArray_DATA(arr))=(*v).r; break;\\
+ case NPY_ULONGLONG: *(npy_ulonglong *)(PyArray_DATA(arr))=(*v).r; break;\\
+ case NPY_LONGDOUBLE: *(npy_longdouble *)(PyArray_DATA(arr))=(*v).r; break;\\
+ case NPY_CLONGDOUBLE: *(npy_longdouble *)(PyArray_DATA(arr))=(*v).r;*(npy_longdouble *)(PyArray_DATA(arr)+sizeof(npy_longdouble))=(*v).i;break;\\
+ case NPY_OBJECT: (PyArray_DESCR(arr)->f->setitem)(pyobj_from_complex_ ## ctype ## 1((*v)),PyArray_DATA(arr), arr); break;\\
default: return -2;\\
};\\
return -1;
@@ -391,11 +391,11 @@ cppmacros['TRYCOMPLEXPYARRAYTEMPLATE']="""\
## \tif (PyArray_Check(obj)) arr = (PyArrayObject *)obj;\\
## \telse arr = (PyArrayObject *)PyArray_ContiguousFromObject(obj,typenum,0,0);\\
## \tif (arr) {\\
-## \t\tif (arr->descr->type_num==NPY_OBJECT) {\\
-## \t\t\tif (!ctype ## _from_pyobj(v,(arr->descr->getitem)(arr->data),\"\"))\\
+## \t\tif (PyArray_TYPE(arr)==NPY_OBJECT) {\\
+## \t\t\tif (!ctype ## _from_pyobj(v,(PyArray_DESCR(arr)->getitem)(PyArray_DATA(arr)),\"\"))\\
## \t\t\tgoto capi_fail;\\
## \t\t} else {\\
-## \t\t\t(arr->descr->cast[typenum])(arr->data,1,(char*)v,1,1);\\
+## \t\t\t(PyArray_DESCR(arr)->cast[typenum])(PyArray_DATA(arr),1,(char*)v,1,1);\\
## \t\t}\\
## \t\tif ((PyObject *)arr != obj) { Py_DECREF(arr); }\\
## \t\treturn 1;\\
@@ -407,11 +407,11 @@ cppmacros['TRYCOMPLEXPYARRAYTEMPLATE']="""\
## \tif (PyArray_Check(obj)) arr = (PyArrayObject *)obj;\\
## \telse arr = (PyArrayObject *)PyArray_ContiguousFromObject(obj,typenum,0,0);\\
## \tif (arr) {\\
-## \t\tif (arr->descr->type_num==NPY_OBJECT) {\\
-## \t\t\tif (!ctype ## _from_pyobj(v,(arr->descr->getitem)(arr->data),\"\"))\\
+## \t\tif (PyArray_TYPE(arr)==NPY_OBJECT) {\\
+## \t\t\tif (!ctype ## _from_pyobj(v,(PyArray_DESCR(arr)->getitem)(PyArray_DATA(arr)),\"\"))\\
## \t\t\tgoto capi_fail;\\
## \t\t} else {\\
-## \t\t\t(arr->descr->cast[typenum])((void *)(arr->data),1,(void *)(v),1,1);\\
+## \t\t\t(PyArray_DESCR(arr)->cast[typenum])((void *)(PyArray_DATA(arr)),1,(void *)(v),1,1);\\
## \t\t}\\
## \t\tif ((PyObject *)arr != obj) { Py_DECREF(arr); }\\
## \t\treturn 1;\\
@@ -536,15 +536,15 @@ cppmacros['OLDPYNUM']="""\
cfuncs['calcarrindex']="""\
static int calcarrindex(int *i,PyArrayObject *arr) {
\tint k,ii = i[0];
-\tfor (k=1; k < arr->nd; k++)
-\t\tii += (ii*(arr->dimensions[k] - 1)+i[k]); /* assuming contiguous arr */
+\tfor (k=1; k < PyArray_NDIM(arr); k++)
+\t\tii += (ii*(PyArray_DIM(arr,k) - 1)+i[k]); /* assuming contiguous arr */
\treturn ii;
}"""
cfuncs['calcarrindextr']="""\
static int calcarrindextr(int *i,PyArrayObject *arr) {
-\tint k,ii = i[arr->nd-1];
-\tfor (k=1; k < arr->nd; k++)
-\t\tii += (ii*(arr->dimensions[arr->nd-k-1] - 1)+i[arr->nd-k-1]); /* assuming contiguous arr */
+\tint k,ii = i[PyArray_NDIM(arr)-1];
+\tfor (k=1; k < PyArray_NDIM(arr); k++)
+\t\tii += (ii*(PyArray_DIM(arr,PyArray_NDIM(arr)-k-1) - 1)+i[PyArray_NDIM(arr)-k-1]); /* assuming contiguous arr */
\treturn ii;
}"""
cfuncs['forcomb']="""\
@@ -592,7 +592,7 @@ cfuncs['try_pyarr_from_string']="""\
static int try_pyarr_from_string(PyObject *obj,const string str) {
\tPyArrayObject *arr = NULL;
\tif (PyArray_Check(obj) && (!((arr = (PyArrayObject *)obj) == NULL)))
-\t\t{ STRINGCOPYN(arr->data,str,PyArray_NBYTES(arr)); }
+\t\t{ STRINGCOPYN(PyArray_DATA(arr),str,PyArray_NBYTES(arr)); }
\treturn 1;
capi_fail:
\tPRINTPYOBJERR(obj);
@@ -623,9 +623,9 @@ fprintf(stderr,\"string_from_pyobj(str='%s',len=%d,inistr='%s',obj=%p)\\n\",(cha
\t\t\tgoto capi_fail;
\t\t}
\t\tif (*len == -1)
-\t\t\t*len = (arr->descr->elsize)*PyArray_SIZE(arr);
+\t\t\t*len = (PyArray_ITEMSIZE(arr))*PyArray_SIZE(arr);
\t\tSTRINGMALLOC(*str,*len);
-\t\tSTRINGCOPYN(*str,arr->data,*len+1);
+\t\tSTRINGCOPYN(*str,PyArray_DATA(arr),*len+1);
\t\treturn 1;
\t}
\tif (PyString_Check(obj)) {
diff --git a/numpy/f2py/crackfortran.py b/numpy/f2py/crackfortran.py
index 893081126..0fde37bcf 100755
--- a/numpy/f2py/crackfortran.py
+++ b/numpy/f2py/crackfortran.py
@@ -1274,7 +1274,7 @@ def markinnerspaces(line):
cb=''
for c in line:
if cb=='\\' and c in ['\\', '\'', '"']:
- l=l+c;
+ l=l+c
cb=c
continue
if f==0 and c in ['\'', '"']: cc=c; cc1={'\'':'"','"':'\''}[c]
@@ -2198,8 +2198,10 @@ def analyzevars(block):
if 'intent' not in vars[n]:
vars[n]['intent']=[]
for c in [x.strip() for x in markoutercomma(intent).split('@,@')]:
- if not c in vars[n]['intent']:
- vars[n]['intent'].append(c)
+ # Remove spaces so that 'in out' becomes 'inout'
+ tmp = c.replace(' ', '')
+ if tmp not in vars[n]['intent']:
+ vars[n]['intent'].append(tmp)
intent=None
if note:
note=note.replace('\\n\\n', '\n\n')
@@ -2220,7 +2222,7 @@ def analyzevars(block):
if 'check' not in vars[n]:
vars[n]['check']=[]
for c in [x.strip() for x in markoutercomma(check).split('@,@')]:
- if not c in vars[n]['check']:
+ if c not in vars[n]['check']:
vars[n]['check'].append(c)
check=None
if dim and 'dimension' not in vars[n]:
diff --git a/numpy/f2py/rules.py b/numpy/f2py/rules.py
index 4c186712c..b46776777 100644
--- a/numpy/f2py/rules.py
+++ b/numpy/f2py/rules.py
@@ -109,6 +109,8 @@ module_rules={
* $D"""+"""ate:$
* Do not edit this file directly unless you know what you are doing!!!
*/
+
+#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION
#ifdef __cplusplus
extern \"C\" {
#endif
@@ -232,6 +234,7 @@ PyMODINIT_FUNC init#modulename#(void) {
#ifdef __cplusplus
}
#endif
+#undef NPY_NO_DEPRECATED_API
""",
'separatorsfor':{'latexdoc':'\n\n',
'restdoc':'\n\n'},
@@ -1027,9 +1030,9 @@ if (#varname#_capi==Py_None) {
# \tif ((#varname#_capi != Py_None) && PyArray_Check(#varname#_capi) \\
# \t\t&& (#varname#_capi != (PyObject *)capi_#varname#_tmp)) {
-# \t\tif (((PyArrayObject *)#varname#_capi)->nd != capi_#varname#_tmp->nd) {
-# \t\t\tif (#varname#_capi != capi_#varname#_tmp->base)
-# \t\t\t\tcopy_ND_array((PyArrayObject *)capi_#varname#_tmp->base,(PyArrayObject *)#varname#_capi);
+# \t\tif (PyArray_NDIM((PyArrayObject *)#varname#_capi) != PyArray_NDIM(capi_#varname#_tmp)) {
+# \t\t\tif (#varname#_capi != PyArray_BASE(capi_#varname#_tmp))
+# \t\t\t\tcopy_ND_array(PyArray_BASE((PyArrayObject *)capi_#varname#_tmp),(PyArrayObject *)#varname#_capi);
# \t\t} else
# \t\t\tcopy_ND_array(capi_#varname#_tmp,(PyArrayObject *)#varname#_capi);
# \t}
@@ -1047,7 +1050,7 @@ if (#varname#_capi==Py_None) {
\t\tif (!PyErr_Occurred())
\t\t\tPyErr_SetString(#modulename#_error,\"failed in converting #nth# `#varname#\' of #pyname# to C/Fortran array\" );
\t} else {
-\t\t#varname# = (#ctype# *)(capi_#varname#_tmp->data);
+\t\t#varname# = (#ctype# *)(PyArray_DATA(capi_#varname#_tmp));
""",
{hasinitvalue:[
{isintent_nothide:'\tif (#varname#_capi == Py_None) {'},
@@ -1056,7 +1059,7 @@ if (#varname#_capi==Py_None) {
"""\
\t\tint *_i,capi_i=0;
\t\tCFUNCSMESS(\"#name#: Initializing #varname#=#init#\\n\");
-\t\tif (initforcomb(capi_#varname#_tmp->dimensions,capi_#varname#_tmp->nd,1)) {
+\t\tif (initforcomb(PyArray_DIMS(capi_#varname#_tmp),PyArray_NDIM(capi_#varname#_tmp),1)) {
\t\t\twhile ((_i = nextforcomb()))
\t\t\t\t#varname#[capi_i++] = #init#; /* fortran way */
\t\t} else {
diff --git a/numpy/f2py/setup.py b/numpy/f2py/setup.py
index 3c6140b53..a27a001a9 100644
--- a/numpy/f2py/setup.py
+++ b/numpy/f2py/setup.py
@@ -88,26 +88,24 @@ main()
if __name__ == "__main__":
config = configuration(top_path='')
- version = config.get_version()
print('F2PY Version', version)
config = config.todict()
- if sys.version[:3]>='2.3':
- config['download_url'] = "http://cens.ioc.ee/projects/f2py2e/2.x"\
- "/F2PY-2-latest.tar.gz"
- config['classifiers'] = [
- 'Development Status :: 5 - Production/Stable',
- 'Intended Audience :: Developers',
- 'Intended Audience :: Science/Research',
- 'License :: OSI Approved :: NumPy License',
- 'Natural Language :: English',
- 'Operating System :: OS Independent',
- 'Programming Language :: C',
- 'Programming Language :: Fortran',
- 'Programming Language :: Python',
- 'Topic :: Scientific/Engineering',
- 'Topic :: Software Development :: Code Generators',
- ]
+ config['download_url'] = "http://cens.ioc.ee/projects/f2py2e/2.x"\
+ "/F2PY-2-latest.tar.gz"
+ config['classifiers'] = [
+ 'Development Status :: 5 - Production/Stable',
+ 'Intended Audience :: Developers',
+ 'Intended Audience :: Science/Research',
+ 'License :: OSI Approved :: NumPy License',
+ 'Natural Language :: English',
+ 'Operating System :: OS Independent',
+ 'Programming Language :: C',
+ 'Programming Language :: Fortran',
+ 'Programming Language :: Python',
+ 'Topic :: Scientific/Engineering',
+ 'Topic :: Software Development :: Code Generators',
+ ]
setup(version=version,
description = "F2PY - Fortran to Python Interface Generaton",
author = "Pearu Peterson",
diff --git a/numpy/f2py/src/fortranobject.c b/numpy/f2py/src/fortranobject.c
index 001a4c7de..e88e83f0e 100644
--- a/numpy/f2py/src/fortranobject.c
+++ b/numpy/f2py/src/fortranobject.c
@@ -1,4 +1,5 @@
#define FORTRANOBJECT_C
+#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION
#include "fortranobject.h"
#ifdef __cplusplus
@@ -58,11 +59,11 @@ PyFortranObject_New(FortranDataDef* defs, f2py_void_func init) {
int n = fp->defs[i].rank-1;
v = PyArray_New(&PyArray_Type, n, fp->defs[i].dims.d,
NPY_STRING, NULL, fp->defs[i].data, fp->defs[i].dims.d[n],
- NPY_FARRAY, NULL);
+ NPY_ARRAY_FARRAY, NULL);
}
else {
v = PyArray_New(&PyArray_Type, fp->defs[i].rank, fp->defs[i].dims.d,
- fp->defs[i].type, NULL, fp->defs[i].data, 0, NPY_FARRAY,
+ fp->defs[i].type, NULL, fp->defs[i].data, 0, NPY_ARRAY_FARRAY,
NULL);
}
if (v==NULL) return NULL;
@@ -273,7 +274,7 @@ fortran_getattr(PyFortranObject *fp, char *name) {
k = fp->defs[i].rank;
if (fp->defs[i].data !=NULL) { /* array is allocated */
PyObject *v = PyArray_New(&PyArray_Type, k, fp->defs[i].dims.d,
- fp->defs[i].type, NULL, fp->defs[i].data, 0, NPY_FARRAY,
+ fp->defs[i].type, NULL, fp->defs[i].data, 0, NPY_ARRAY_FARRAY,
NULL);
if (v==NULL) return NULL;
/* Py_INCREF(v); */
@@ -345,7 +346,7 @@ fortran_setattr(PyFortranObject *fp, char *name, PyObject *v) {
for(k=0;k<fp->defs[i].rank;k++) dims[k]=-1;
if ((arr = array_from_pyobj(fp->defs[i].type,dims,fp->defs[i].rank,F2PY_INTENT_IN,v))==NULL)
return -1;
- (*(fp->defs[i].func))(&fp->defs[i].rank,arr->dimensions,set_data,&flag);
+ (*(fp->defs[i].func))(&fp->defs[i].rank,PyArray_DIMS(arr),set_data,&flag);
} else { /* deallocate */
for(k=0;k<fp->defs[i].rank;k++) dims[k]=0;
(*(fp->defs[i].func))(&fp->defs[i].rank,dims,set_data,&flag);
@@ -357,11 +358,11 @@ fortran_setattr(PyFortranObject *fp, char *name, PyObject *v) {
return -1;
}
if (fp->defs[i].data!=NULL) { /* copy Python object to Fortran array */
- npy_intp s = PyArray_MultiplyList(fp->defs[i].dims.d,arr->nd);
+ npy_intp s = PyArray_MultiplyList(fp->defs[i].dims.d,PyArray_NDIM(arr));
if (s==-1)
- s = PyArray_MultiplyList(arr->dimensions,arr->nd);
+ s = PyArray_MultiplyList(PyArray_DIMS(arr),PyArray_NDIM(arr));
if (s<0 ||
- (memcpy(fp->defs[i].data,arr->data,s*PyArray_ITEMSIZE(arr)))==NULL) {
+ (memcpy(fp->defs[i].data,PyArray_DATA(arr),s*PyArray_ITEMSIZE(arr)))==NULL) {
if ((PyObject*)arr!=v) {
Py_DECREF(arr);
}
@@ -616,8 +617,9 @@ void dump_dims(int rank, npy_intp* dims) {
}
printf("]\n");
}
-void dump_attrs(const PyArrayObject* arr) {
- int rank = arr->nd;
+void dump_attrs(const PyArrayObject* obj) {
+ const PyArrayObject_fields *arr = (const PyArrayObject_fields*) obj;
+ int rank = PyArray_NDIM(arr);
npy_intp size = PyArray_Size((PyObject *)arr);
printf("\trank = %d, flags = %d, size = %" NPY_INTP_FMT "\n",
rank,arr->flags,size);
@@ -630,7 +632,9 @@ void dump_attrs(const PyArrayObject* arr) {
#define SWAPTYPE(a,b,t) {t c; c = (a); (a) = (b); (b) = c; }
-static int swap_arrays(PyArrayObject* arr1, PyArrayObject* arr2) {
+static int swap_arrays(PyArrayObject* obj1, PyArrayObject* obj2) {
+ PyArrayObject_fields *arr1 = (PyArrayObject_fields*) obj1,
+ *arr2 = (PyArrayObject_fields*) obj2;
SWAPTYPE(arr1->data,arr2->data,char*);
SWAPTYPE(arr1->nd,arr2->nd,int);
SWAPTYPE(arr1->dimensions,arr2->dimensions,npy_intp*);
@@ -707,17 +711,17 @@ PyArrayObject* array_from_pyobj(const int type_num,
if (intent & F2PY_INTENT_CACHE) {
/* intent(cache) */
- if (PyArray_ISONESEGMENT(obj)
- && PyArray_ITEMSIZE((PyArrayObject *)obj)>=elsize) {
- if (check_and_fix_dimensions((PyArrayObject *)obj,rank,dims)) {
+ if (PyArray_ISONESEGMENT(arr)
+ && PyArray_ITEMSIZE(arr)>=elsize) {
+ if (check_and_fix_dimensions(arr,rank,dims)) {
return NULL; /*XXX: set exception */
}
if (intent & F2PY_INTENT_OUT)
- Py_INCREF(obj);
- return (PyArrayObject *)obj;
+ Py_INCREF(arr);
+ return arr;
}
strcpy(mess, "failed to initialize intent(cache) array");
- if (!PyArray_ISONESEGMENT(obj))
+ if (!PyArray_ISONESEGMENT(arr))
strcat(mess, " -- input must be in one segment");
if (PyArray_ITEMSIZE(arr)<elsize)
sprintf(mess+strlen(mess)," -- expected at least elsize=%d but got %d",
@@ -766,7 +770,7 @@ PyArrayObject* array_from_pyobj(const int type_num,
);
if (!(ARRAY_ISCOMPATIBLE(arr,type_num)))
sprintf(mess+strlen(mess)," -- input '%c' not compatible to '%c'",
- arr->descr->type,typechar);
+ PyArray_DESCR(arr)->type,typechar);
if (!(F2PY_CHECK_ALIGNMENT(arr, intent)))
sprintf(mess+strlen(mess)," -- input not %d-aligned", F2PY_GET_ALIGNMENT(intent));
PyErr_SetString(PyExc_ValueError,mess);
@@ -777,7 +781,7 @@ PyArrayObject* array_from_pyobj(const int type_num,
{
PyArrayObject *retarr = (PyArrayObject *) \
- PyArray_New(&PyArray_Type, arr->nd, arr->dimensions, type_num,
+ PyArray_New(&PyArray_Type, PyArray_NDIM(arr), PyArray_DIMS(arr), type_num,
NULL,NULL,0,
!(intent&F2PY_INTENT_C),
NULL);
@@ -814,8 +818,8 @@ PyArrayObject* array_from_pyobj(const int type_num,
F2PY_REPORT_ON_ARRAY_COPY_FROMANY;
arr = (PyArrayObject *) \
PyArray_FromAny(obj,PyArray_DescrFromType(type_num), 0,0,
- ((intent & F2PY_INTENT_C)?NPY_CARRAY:NPY_FARRAY) \
- | NPY_FORCECAST, NULL);
+ ((intent & F2PY_INTENT_C)?NPY_ARRAY_CARRAY:NPY_ARRAY_FARRAY) \
+ | NPY_ARRAY_FORCECAST, NULL);
if (arr==NULL)
return NULL;
if (check_and_fix_dimensions(arr,rank,dims))
@@ -836,20 +840,20 @@ int check_and_fix_dimensions(const PyArrayObject* arr,const int rank,npy_intp *d
the dimensions from arr. It also checks that non-blank dims will
match with the corresponding values in arr dimensions.
*/
- const npy_intp arr_size = (arr->nd)?PyArray_Size((PyObject *)arr):1;
+ const npy_intp arr_size = (PyArray_NDIM(arr))?PyArray_Size((PyObject *)arr):1;
#ifdef DEBUG_COPY_ND_ARRAY
dump_attrs(arr);
printf("check_and_fix_dimensions:init: dims=");
dump_dims(rank,dims);
#endif
- if (rank > arr->nd) { /* [1,2] -> [[1],[2]]; 1 -> [[1]] */
+ if (rank > PyArray_NDIM(arr)) { /* [1,2] -> [[1],[2]]; 1 -> [[1]] */
npy_intp new_size = 1;
int free_axe = -1;
int i;
npy_intp d;
/* Fill dims where -1 or 0; check dimensions; calc new_size; */
- for(i=0;i<arr->nd;++i) {
- d = arr->dimensions[i];
+ for(i=0;i<PyArray_NDIM(arr);++i) {
+ d = PyArray_DIM(arr,i);
if (dims[i] >= 0) {
if (d>1 && dims[i]!=d) {
fprintf(stderr,"%d-th dimension must be fixed to %" NPY_INTP_FMT
@@ -863,7 +867,7 @@ int check_and_fix_dimensions(const PyArrayObject* arr,const int rank,npy_intp *d
}
new_size *= dims[i];
}
- for(i=arr->nd;i<rank;++i)
+ for(i=PyArray_NDIM(arr);i<rank;++i)
if (dims[i]>1) {
fprintf(stderr,"%d-th dimension must be %" NPY_INTP_FMT
" but got 0 (not defined).\n",
@@ -883,12 +887,12 @@ int check_and_fix_dimensions(const PyArrayObject* arr,const int rank,npy_intp *d
" indices)\n", new_size,arr_size);
return 1;
}
- } else if (rank==arr->nd) {
+ } else if (rank==PyArray_NDIM(arr)) {
npy_intp new_size = 1;
int i;
npy_intp d;
for (i=0; i<rank; ++i) {
- d = arr->dimensions[i];
+ d = PyArray_DIM(arr,i);
if (dims[i]>=0) {
if (d > 1 && d!=dims[i]) {
fprintf(stderr,"%d-th dimension must be fixed to %" NPY_INTP_FMT
@@ -910,19 +914,19 @@ int check_and_fix_dimensions(const PyArrayObject* arr,const int rank,npy_intp *d
npy_intp d;
int effrank;
npy_intp size;
- for (i=0,effrank=0;i<arr->nd;++i)
- if (arr->dimensions[i]>1) ++effrank;
+ for (i=0,effrank=0;i<PyArray_NDIM(arr);++i)
+ if (PyArray_DIM(arr,i)>1) ++effrank;
if (dims[rank-1]>=0)
if (effrank>rank) {
fprintf(stderr,"too many axes: %d (effrank=%d), expected rank=%d\n",
- arr->nd,effrank,rank);
+ PyArray_NDIM(arr),effrank,rank);
return 1;
}
for (i=0,j=0;i<rank;++i) {
- while (j<arr->nd && arr->dimensions[j]<2) ++j;
- if (j>=arr->nd) d = 1;
- else d = arr->dimensions[j++];
+ while (j<PyArray_NDIM(arr) && PyArray_DIM(arr,j)<2) ++j;
+ if (j>=PyArray_NDIM(arr)) d = 1;
+ else d = PyArray_DIM(arr,j++);
if (dims[i]>=0) {
if (d>1 && d!=dims[i]) {
fprintf(stderr,"%d-th dimension must be fixed to %" NPY_INTP_FMT
@@ -935,20 +939,20 @@ int check_and_fix_dimensions(const PyArrayObject* arr,const int rank,npy_intp *d
dims[i] = d;
}
- for (i=rank;i<arr->nd;++i) { /* [[1,2],[3,4]] -> [1,2,3,4] */
- while (j<arr->nd && arr->dimensions[j]<2) ++j;
- if (j>=arr->nd) d = 1;
- else d = arr->dimensions[j++];
+ for (i=rank;i<PyArray_NDIM(arr);++i) { /* [[1,2],[3,4]] -> [1,2,3,4] */
+ while (j<PyArray_NDIM(arr) && PyArray_DIM(arr,j)<2) ++j;
+ if (j>=PyArray_NDIM(arr)) d = 1;
+ else d = PyArray_DIM(arr,j++);
dims[rank-1] *= d;
}
for (i=0,size=1;i<rank;++i) size *= dims[i];
if (size != arr_size) {
fprintf(stderr,"unexpected array size: size=%" NPY_INTP_FMT ", arr_size=%" NPY_INTP_FMT
", rank=%d, effrank=%d, arr.nd=%d, dims=[",
- size,arr_size,rank,effrank,arr->nd);
+ size,arr_size,rank,effrank,PyArray_NDIM(arr));
for (i=0;i<rank;++i) fprintf(stderr," %" NPY_INTP_FMT,dims[i]);
fprintf(stderr," ], arr.dims=[");
- for (i=0;i<arr->nd;++i) fprintf(stderr," %" NPY_INTP_FMT,arr->dimensions[i]);
+ for (i=0;i<PyArray_NDIM(arr);++i) fprintf(stderr," %" NPY_INTP_FMT,PyArray_DIM(arr,i));
fprintf(stderr," ]\n");
return 1;
}
@@ -1029,4 +1033,6 @@ F2PyCapsule_Check(PyObject *ptr)
#ifdef __cplusplus
}
#endif
+
+#undef NPY_NO_DEPRECATED_API
/************************* EOF fortranobject.c *******************************/
diff --git a/numpy/f2py/src/fortranobject.h b/numpy/f2py/src/fortranobject.h
index 689f78c92..c9b54e259 100644
--- a/numpy/f2py/src/fortranobject.h
+++ b/numpy/f2py/src/fortranobject.h
@@ -119,7 +119,7 @@ int F2PyCapsule_Check(PyObject *ptr);
#endif
-#define ISCONTIGUOUS(m) ((m)->flags & NPY_CONTIGUOUS)
+#define ISCONTIGUOUS(m) (PyArray_FLAGS(m) & NPY_ARRAY_C_CONTIGUOUS)
#define F2PY_INTENT_IN 1
#define F2PY_INTENT_INOUT 2
#define F2PY_INTENT_OUT 4
diff --git a/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c b/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c
index 9a21f2420..2da6a2c5d 100644
--- a/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c
+++ b/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c
@@ -90,22 +90,22 @@ static PyObject *f2py_rout_wrap_attrs(PyObject *capi_self,
&PyArray_Type,&arr_capi))
return NULL;
arr = (PyArrayObject *)arr_capi;
- sprintf(s,"%p",arr->data);
- dimensions = PyTuple_New(arr->nd);
- strides = PyTuple_New(arr->nd);
- for (i=0;i<arr->nd;++i) {
- PyTuple_SetItem(dimensions,i,PyInt_FromLong(arr->dimensions[i]));
- PyTuple_SetItem(strides,i,PyInt_FromLong(arr->strides[i]));
+ sprintf(s,"%p",PyArray_DATA(arr));
+ dimensions = PyTuple_New(PyArray_NDIM(arr));
+ strides = PyTuple_New(PyArray_NDIM(arr));
+ for (i=0;i<PyArray_NDIM(arr);++i) {
+ PyTuple_SetItem(dimensions,i,PyInt_FromLong(PyArray_DIM(arr,i)));
+ PyTuple_SetItem(strides,i,PyInt_FromLong(PyArray_STRIDE(arr,i)));
}
- return Py_BuildValue("siOOO(cciii)ii",s,arr->nd,
+ return Py_BuildValue("siOOO(cciii)ii",s,PyArray_NDIM(arr),
dimensions,strides,
- (arr->base==NULL?Py_None:arr->base),
- arr->descr->kind,
- arr->descr->type,
- arr->descr->type_num,
- arr->descr->elsize,
- arr->descr->alignment,
- arr->flags,
+ (PyArray_BASE(arr)==NULL?Py_None:PyArray_BASE(arr)),
+ PyArray_DESCR(arr)->kind,
+ PyArray_DESCR(arr)->type,
+ PyArray_TYPE(arr),
+ PyArray_ITEMSIZE(arr),
+ PyArray_DESCR(arr)->alignment,
+ PyArray_FLAGS(arr),
PyArray_ITEMSIZE(arr));
}
@@ -190,24 +190,24 @@ PyMODINIT_FUNC inittest_array_from_pyobj_ext(void) {
PyDict_SetItemString(d, "NPY_NOTYPE", PyInt_FromLong(NPY_NOTYPE));
PyDict_SetItemString(d, "NPY_USERDEF", PyInt_FromLong(NPY_USERDEF));
- PyDict_SetItemString(d, "CONTIGUOUS", PyInt_FromLong(NPY_CONTIGUOUS));
- PyDict_SetItemString(d, "FORTRAN", PyInt_FromLong(NPY_FORTRAN));
- PyDict_SetItemString(d, "OWNDATA", PyInt_FromLong(NPY_OWNDATA));
- PyDict_SetItemString(d, "FORCECAST", PyInt_FromLong(NPY_FORCECAST));
- PyDict_SetItemString(d, "ENSURECOPY", PyInt_FromLong(NPY_ENSURECOPY));
- PyDict_SetItemString(d, "ENSUREARRAY", PyInt_FromLong(NPY_ENSUREARRAY));
- PyDict_SetItemString(d, "ALIGNED", PyInt_FromLong(NPY_ALIGNED));
- PyDict_SetItemString(d, "WRITEABLE", PyInt_FromLong(NPY_WRITEABLE));
- PyDict_SetItemString(d, "UPDATEIFCOPY", PyInt_FromLong(NPY_UPDATEIFCOPY));
+ PyDict_SetItemString(d, "CONTIGUOUS", PyInt_FromLong(NPY_ARRAY_C_CONTIGUOUS));
+ PyDict_SetItemString(d, "FORTRAN", PyInt_FromLong(NPY_ARRAY_F_CONTIGUOUS));
+ PyDict_SetItemString(d, "OWNDATA", PyInt_FromLong(NPY_ARRAY_OWNDATA));
+ PyDict_SetItemString(d, "FORCECAST", PyInt_FromLong(NPY_ARRAY_FORCECAST));
+ PyDict_SetItemString(d, "ENSURECOPY", PyInt_FromLong(NPY_ARRAY_ENSURECOPY));
+ PyDict_SetItemString(d, "ENSUREARRAY", PyInt_FromLong(NPY_ARRAY_ENSUREARRAY));
+ PyDict_SetItemString(d, "ALIGNED", PyInt_FromLong(NPY_ARRAY_ALIGNED));
+ PyDict_SetItemString(d, "WRITEABLE", PyInt_FromLong(NPY_ARRAY_WRITEABLE));
+ PyDict_SetItemString(d, "UPDATEIFCOPY", PyInt_FromLong(NPY_ARRAY_UPDATEIFCOPY));
- PyDict_SetItemString(d, "BEHAVED", PyInt_FromLong(NPY_BEHAVED));
- PyDict_SetItemString(d, "BEHAVED_NS", PyInt_FromLong(NPY_BEHAVED_NS));
- PyDict_SetItemString(d, "CARRAY", PyInt_FromLong(NPY_CARRAY));
- PyDict_SetItemString(d, "FARRAY", PyInt_FromLong(NPY_FARRAY));
- PyDict_SetItemString(d, "CARRAY_RO", PyInt_FromLong(NPY_CARRAY_RO));
- PyDict_SetItemString(d, "FARRAY_RO", PyInt_FromLong(NPY_FARRAY_RO));
- PyDict_SetItemString(d, "DEFAULT", PyInt_FromLong(NPY_DEFAULT));
- PyDict_SetItemString(d, "UPDATE_ALL", PyInt_FromLong(NPY_UPDATE_ALL));
+ PyDict_SetItemString(d, "BEHAVED", PyInt_FromLong(NPY_ARRAY_BEHAVED));
+ PyDict_SetItemString(d, "BEHAVED_NS", PyInt_FromLong(NPY_ARRAY_BEHAVED_NS));
+ PyDict_SetItemString(d, "CARRAY", PyInt_FromLong(NPY_ARRAY_CARRAY));
+ PyDict_SetItemString(d, "FARRAY", PyInt_FromLong(NPY_ARRAY_FARRAY));
+ PyDict_SetItemString(d, "CARRAY_RO", PyInt_FromLong(NPY_ARRAY_CARRAY_RO));
+ PyDict_SetItemString(d, "FARRAY_RO", PyInt_FromLong(NPY_ARRAY_FARRAY_RO));
+ PyDict_SetItemString(d, "DEFAULT", PyInt_FromLong(NPY_ARRAY_DEFAULT));
+ PyDict_SetItemString(d, "UPDATE_ALL", PyInt_FromLong(NPY_ARRAY_UPDATE_ALL));
if (PyErr_Occurred())
Py_FatalError("can't initialize module wrap");
diff --git a/numpy/f2py/tests/src/regression/inout.f90 b/numpy/f2py/tests/src/regression/inout.f90
new file mode 100644
index 000000000..80cdad90c
--- /dev/null
+++ b/numpy/f2py/tests/src/regression/inout.f90
@@ -0,0 +1,9 @@
+! Check that intent(in out) translates as intent(inout).
+! The separation seems to be a common usage.
+ subroutine foo(x)
+ implicit none
+ real(4), intent(in out) :: x
+ dimension x(3)
+ x(1) = x(1) + x(2) + x(3)
+ return
+ end
diff --git a/numpy/f2py/tests/test_regression.py b/numpy/f2py/tests/test_regression.py
new file mode 100644
index 000000000..9bd3f3fe3
--- /dev/null
+++ b/numpy/f2py/tests/test_regression.py
@@ -0,0 +1,32 @@
+from __future__ import division, absolute_import, print_function
+
+import os
+import math
+
+import numpy as np
+from numpy.testing import dec, assert_raises, assert_equal
+
+import util
+
+def _path(*a):
+ return os.path.join(*((os.path.dirname(__file__),) + a))
+
+class TestIntentInOut(util.F2PyTest):
+ # Check that intent(in out) translates as intent(inout)
+ sources = [_path('src', 'regression', 'inout.f90')]
+
+ @dec.slow
+ def test_inout(self):
+ # non-contiguous should raise error
+ x = np.arange(6, dtype=np.float32)[::2]
+ assert_raises(ValueError, self.module.foo, x)
+
+ # check values with contiguous array
+ x = np.arange(3, dtype=np.float32)
+ self.module.foo(x)
+ assert_equal(x, [3, 1, 2])
+
+
+if __name__ == "__main__":
+ import nose
+ nose.runmodule()
diff --git a/numpy/fft/fftpack_litemodule.c b/numpy/fft/fftpack_litemodule.c
index 95da3194f..9753047dc 100644
--- a/numpy/fft/fftpack_litemodule.c
+++ b/numpy/fft/fftpack_litemodule.c
@@ -6,11 +6,9 @@
static PyObject *ErrorObject;
-/* ----------------------------------------------------- */
+static const char fftpack_cfftf__doc__[] = "";
-static char fftpack_cfftf__doc__[] = "";
-
-PyObject *
+static PyObject *
fftpack_cfftf(PyObject *NPY_UNUSED(self), PyObject *args)
{
PyObject *op1, *op2;
@@ -61,9 +59,9 @@ fail:
return NULL;
}
-static char fftpack_cfftb__doc__[] = "";
+static const char fftpack_cfftb__doc__[] = "";
-PyObject *
+static PyObject *
fftpack_cfftb(PyObject *NPY_UNUSED(self), PyObject *args)
{
PyObject *op1, *op2;
@@ -114,7 +112,7 @@ fail:
return NULL;
}
-static char fftpack_cffti__doc__[] ="";
+static const char fftpack_cffti__doc__[] = "";
static PyObject *
fftpack_cffti(PyObject *NPY_UNUSED(self), PyObject *args)
@@ -143,9 +141,9 @@ fftpack_cffti(PyObject *NPY_UNUSED(self), PyObject *args)
return (PyObject *)op;
}
-static char fftpack_rfftf__doc__[] ="";
+static const char fftpack_rfftf__doc__[] = "";
-PyObject *
+static PyObject *
fftpack_rfftf(PyObject *NPY_UNUSED(self), PyObject *args)
{
PyObject *op1, *op2;
@@ -187,7 +185,6 @@ fftpack_rfftf(PyObject *NPY_UNUSED(self), PyObject *args)
rptr = (double *)PyArray_DATA(ret);
dptr = (double *)PyArray_DATA(data);
-
Py_BEGIN_ALLOW_THREADS;
NPY_SIGINT_ON;
for (i = 0; i < nrepeats; i++) {
@@ -211,10 +208,9 @@ fail:
return NULL;
}
-static char fftpack_rfftb__doc__[] ="";
-
+static const char fftpack_rfftb__doc__[] = "";
-PyObject *
+static PyObject *
fftpack_rfftb(PyObject *NPY_UNUSED(self), PyObject *args)
{
PyObject *op1, *op2;
@@ -274,8 +270,7 @@ fail:
return NULL;
}
-
-static char fftpack_rffti__doc__[] ="";
+static const char fftpack_rffti__doc__[] = "";
static PyObject *
fftpack_rffti(PyObject *NPY_UNUSED(self), PyObject *args)
@@ -316,11 +311,6 @@ static struct PyMethodDef fftpack_methods[] = {
{NULL, NULL, 0, NULL} /* sentinel */
};
-
-/* Initialization function for the module (*must* be called initfftpack) */
-
-static char fftpack_module_documentation[] = "" ;
-
#if PY_MAJOR_VERSION >= 3
static struct PyModuleDef moduledef = {
PyModuleDef_HEAD_INIT,
@@ -349,6 +339,8 @@ initfftpack_lite(void)
#if PY_MAJOR_VERSION >= 3
m = PyModule_Create(&moduledef);
#else
+ static const char fftpack_module_documentation[] = "";
+
m = Py_InitModule4("fftpack_lite", fftpack_methods,
fftpack_module_documentation,
(PyObject*)NULL,PYTHON_API_VERSION);
diff --git a/numpy/lib/arraysetops.py b/numpy/lib/arraysetops.py
index 5c3b504de..d3b6119f4 100644
--- a/numpy/lib/arraysetops.py
+++ b/numpy/lib/arraysetops.py
@@ -241,6 +241,11 @@ def intersect1d(ar1, ar2, assume_unique=False):
>>> np.intersect1d([1, 3, 4, 3], [3, 1, 2, 1])
array([1, 3])
+ To intersect more than two arrays, use functools.reduce:
+
+ >>> from functools import reduce
+ >>> reduce(np.intersect1d, ([1, 3, 4, 3], [3, 1, 2, 1], [6, 3, 4, 2]))
+ array([3])
"""
if not assume_unique:
# Might be faster than unique( intersect1d( ar1, ar2 ) )?
@@ -421,6 +426,11 @@ def union1d(ar1, ar2):
>>> np.union1d([-1, 0, 1], [-2, 0, 2])
array([-2, -1, 0, 1, 2])
+ To find the union of more than two arrays, use functools.reduce:
+
+ >>> from functools import reduce
+ >>> reduce(np.union1d, ([1, 3, 4, 3], [3, 1, 2, 1], [6, 3, 4, 2]))
+ array([1, 2, 3, 4, 6])
"""
return unique(np.concatenate((ar1, ar2)))
diff --git a/numpy/lib/format.py b/numpy/lib/format.py
index 98743b6ad..b93f86ca3 100644
--- a/numpy/lib/format.py
+++ b/numpy/lib/format.py
@@ -141,7 +141,7 @@ import sys
import io
import warnings
from numpy.lib.utils import safe_eval
-from numpy.compat import asbytes, isfileobj, long, basestring
+from numpy.compat import asbytes, asstr, isfileobj, long, basestring
if sys.version_info[0] >= 3:
import pickle
@@ -298,7 +298,8 @@ def _write_array_header(fp, d, version=None):
# can take advantage of our premature optimization.
current_header_len = MAGIC_LEN + 2 + len(header) + 1 # 1 for the newline
topad = 16 - (current_header_len % 16)
- header = asbytes(header + ' '*topad + '\n')
+ header = header + ' '*topad + '\n'
+ header = asbytes(_filter_header(header))
if len(header) >= (256*256) and version == (1, 0):
raise ValueError("header does not fit inside %s bytes required by the"
@@ -410,6 +411,45 @@ def read_array_header_2_0(fp):
"""
_read_array_header(fp, version=(2, 0))
+
+def _filter_header(s):
+ """Clean up 'L' in npz header ints.
+
+ Cleans up the 'L' in strings representing integers. Needed to allow npz
+ headers produced in Python2 to be read in Python3.
+
+ Parameters
+ ----------
+ s : byte string
+ Npy file header.
+
+ Returns
+ -------
+ header : str
+ Cleaned up header.
+
+ """
+ import tokenize
+ if sys.version_info[0] >= 3:
+ from io import StringIO
+ else:
+ from StringIO import StringIO
+
+ tokens = []
+ last_token_was_number = False
+ for token in tokenize.generate_tokens(StringIO(asstr(s)).read):
+ token_type = token[0]
+ token_string = token[1]
+ if (last_token_was_number and
+ token_type == tokenize.NAME and
+ token_string == "L"):
+ continue
+ else:
+ tokens.append(token)
+ last_token_was_number = (token_type == tokenize.NUMBER)
+ return tokenize.untokenize(tokens)
+
+
def _read_array_header(fp, version):
"""
see read_array_header_1_0
@@ -434,6 +474,7 @@ def _read_array_header(fp, version):
# "shape" : tuple of int
# "fortran_order" : bool
# "descr" : dtype.descr
+ header = _filter_header(header)
try:
d = safe_eval(header)
except SyntaxError as e:
diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py
index 8b384bfaa..36ce94bad 100644
--- a/numpy/lib/function_base.py
+++ b/numpy/lib/function_base.py
@@ -515,8 +515,7 @@ def average(a, axis=None, weights=None, returned=False):
scl = avg.dtype.type(a.size/avg.size)
else:
a = a + 0.0
- wgt = np.array(weights, dtype=a.dtype, copy=0)
-
+ wgt = np.asarray(weights)
# Sanity checks
if a.shape != wgt.shape:
if axis is None:
@@ -533,7 +532,7 @@ def average(a, axis=None, weights=None, returned=False):
# setup wgt to broadcast along axis
wgt = np.array(wgt, copy=0, ndmin=a.ndim).swapaxes(-1, axis)
- scl = wgt.sum(axis=axis)
+ scl = wgt.sum(axis=axis, dtype=np.result_type(a.dtype, wgt.dtype))
if (scl == 0.0).any():
raise ZeroDivisionError(
"Weights sum to zero, can't be normalized")
@@ -883,28 +882,33 @@ def copy(a, order='K'):
# Basic operations
-def gradient(f, *varargs):
+def gradient(f, *varargs, **kwargs):
"""
Return the gradient of an N-dimensional array.
-
+
The gradient is computed using second order accurate central differences
- in the interior and second order accurate one-sides (forward or backwards)
- differences at the boundaries. The returned gradient hence has the same
- shape as the input array.
+ in the interior and either first differences or second order accurate
+ one-sides (forward or backwards) differences at the boundaries. The
+ returned gradient hence has the same shape as the input array.
Parameters
----------
f : array_like
- An N-dimensional array containing samples of a scalar function.
- `*varargs` : scalars
- 0, 1, or N scalars specifying the sample distances in each direction,
- that is: `dx`, `dy`, `dz`, ... The default distance is 1.
+ An N-dimensional array containing samples of a scalar function.
+ varargs : list of scalar, optional
+ N scalars specifying the sample distances for each dimension,
+ i.e. `dx`, `dy`, `dz`, ... Default distance: 1.
+ edge_order : {1, 2}, optional
+ Gradient is calculated using N\ :sup:`th` order accurate differences
+ at the boundaries. Default: 1.
+
+ .. versionadded:: 1.9.1
Returns
-------
gradient : ndarray
- N arrays of the same shape as `f` giving the derivative of `f` with
- respect to each dimension.
+ N arrays of the same shape as `f` giving the derivative of `f` with
+ respect to each dimension.
Examples
--------
@@ -916,15 +920,14 @@ def gradient(f, *varargs):
>>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=np.float))
[array([[ 2., 2., -1.],
- [ 2., 2., -1.]]),
- array([[ 1. , 2.5, 4. ],
- [ 1. , 1. , 1. ]])]
+ [ 2., 2., -1.]]), array([[ 1. , 2.5, 4. ],
+ [ 1. , 1. , 1. ]])]
- >>> x = np.array([0,1,2,3,4])
- >>> dx = gradient(x)
+ >>> x = np.array([0, 1, 2, 3, 4])
+ >>> dx = np.gradient(x)
>>> y = x**2
- >>> gradient(y,dx)
- array([0., 2., 4., 6., 8.])
+ >>> np.gradient(y, dx, edge_order=2)
+ array([-0., 2., 4., 6., 8.])
"""
f = np.asanyarray(f)
N = len(f.shape) # number of dimensions
@@ -939,6 +942,13 @@ def gradient(f, *varargs):
raise SyntaxError(
"invalid number of arguments")
+ edge_order = kwargs.pop('edge_order', 1)
+ if kwargs:
+ raise TypeError('"{}" are not valid keyword arguments.'.format(
+ '", "'.join(kwargs.keys())))
+ if edge_order > 2:
+ raise ValueError("'edge_order' greater than 2 not supported")
+
# use central differences on interior and one-sided differences on the
# endpoints. This preserves second order-accuracy over the full domain.
@@ -978,7 +988,7 @@ def gradient(f, *varargs):
"at least two elements are required.")
# Numerical differentiation: 1st order edges, 2nd order interior
- if y.shape[axis] == 2:
+ if y.shape[axis] == 2 or edge_order == 1:
# Use first order differences for time data
out = np.empty_like(y, dtype=otype)
@@ -1026,7 +1036,8 @@ def gradient(f, *varargs):
out[slice1] = (3.0*y[slice2] - 4.0*y[slice3] + y[slice4])/2.0
# divide by step size
- outvals.append(out / dx[axis])
+ out /= dx[axis]
+ outvals.append(out)
# reset the slice object in this dimension to ":"
slice1[axis] = slice(None)
@@ -1102,7 +1113,7 @@ def diff(a, n=1, axis=-1):
return a[slice1]-a[slice2]
-def interp(x, xp, fp, left=None, right=None):
+def interp(x, xp, fp, left=None, right=None, period=None):
"""
One-dimensional linear interpolation.
@@ -1115,7 +1126,9 @@ def interp(x, xp, fp, left=None, right=None):
The x-coordinates of the interpolated values.
xp : 1-D sequence of floats
- The x-coordinates of the data points, must be increasing.
+ The x-coordinates of the data points, must be increasing if argument
+ `period` is not specified. Otherwise, `xp` is internally sorted after
+ normalizing the periodic boundaries with ``xp = xp % period``.
fp : 1-D sequence of floats
The y-coordinates of the data points, same length as `xp`.
@@ -1126,6 +1139,12 @@ def interp(x, xp, fp, left=None, right=None):
right : float, optional
Value to return for `x > xp[-1]`, default is `fp[-1]`.
+ period : None or float, optional
+ .. versionadded:: 1.10.0
+ A period for the x-coordinates. This parameter allows the proper
+ interpolation of angular x-coordinates. Parameters `left` and `right`
+ are ignored if `period` is specified.
+
Returns
-------
y : {float, ndarray}
@@ -1135,6 +1154,8 @@ def interp(x, xp, fp, left=None, right=None):
------
ValueError
If `xp` and `fp` have different length
+ If `xp` or `fp` are not 1-D sequences
+ If `period == 0`
Notes
-----
@@ -1144,7 +1165,6 @@ def interp(x, xp, fp, left=None, right=None):
np.all(np.diff(xp) > 0)
-
Examples
--------
>>> xp = [1, 2, 3]
@@ -1170,13 +1190,51 @@ def interp(x, xp, fp, left=None, right=None):
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.show()
+ Interpolation with periodic x-coordinates:
+
+ >>> x = [-180, -170, -185, 185, -10, -5, 0, 365]
+ >>> xp = [190, -190, 350, -350]
+ >>> fp = [5, 10, 3, 4]
+ >>> np.interp(x, xp, fp, period=360)
+ array([7.5, 5., 8.75, 6.25, 3., 3.25, 3.5, 3.75])
+
"""
- if isinstance(x, (float, int, number)):
- return compiled_interp([x], xp, fp, left, right).item()
- elif isinstance(x, np.ndarray) and x.ndim == 0:
- return compiled_interp([x], xp, fp, left, right).item()
+ if period is None:
+ if isinstance(x, (float, int, number)):
+ return compiled_interp([x], xp, fp, left, right).item()
+ elif isinstance(x, np.ndarray) and x.ndim == 0:
+ return compiled_interp([x], xp, fp, left, right).item()
+ else:
+ return compiled_interp(x, xp, fp, left, right)
else:
- return compiled_interp(x, xp, fp, left, right)
+ if period == 0:
+ raise ValueError("period must be a non-zero value")
+ period = abs(period)
+ left = None
+ right = None
+ return_array = True
+ if isinstance(x, (float, int, number)):
+ return_array = False
+ x = [x]
+ x = np.asarray(x, dtype=np.float64)
+ xp = np.asarray(xp, dtype=np.float64)
+ fp = np.asarray(fp, dtype=np.float64)
+ if xp.ndim != 1 or fp.ndim != 1:
+ raise ValueError("Data points must be 1-D sequences")
+ if xp.shape[0] != fp.shape[0]:
+ raise ValueError("fp and xp are not of the same length")
+ # normalizing periodic boundaries
+ x = x % period
+ xp = xp % period
+ asort_xp = np.argsort(xp)
+ xp = xp[asort_xp]
+ fp = fp[asort_xp]
+ xp = np.concatenate((xp[-1:]-period, xp, xp[0:1]+period))
+ fp = np.concatenate((fp[-1:], fp, fp[0:1]))
+ if return_array:
+ return compiled_interp(x, xp, fp, left, right)
+ else:
+ return compiled_interp(x, xp, fp, left, right).item()
def angle(z, deg=0):
diff --git a/numpy/lib/npyio.py b/numpy/lib/npyio.py
index 641203f34..cbef1a6e2 100644
--- a/numpy/lib/npyio.py
+++ b/numpy/lib/npyio.py
@@ -122,6 +122,14 @@ class BagObj(object):
return object.__getattribute__(self, '_obj')[key]
except KeyError:
raise AttributeError(key)
+
+ def __dir__(self):
+ """
+ Enables dir(bagobj) to list the files in an NpzFile.
+
+ This also enables tab-completion in an interpreter or IPython.
+ """
+ return object.__getattribute__(self, '_obj').keys()
def zipfile_factory(*args, **kwargs):
diff --git a/numpy/lib/tests/data/python3.npy b/numpy/lib/tests/data/python3.npy
new file mode 100644
index 000000000..7c6997dd6
--- /dev/null
+++ b/numpy/lib/tests/data/python3.npy
Binary files differ
diff --git a/numpy/lib/tests/data/win64python2.npy b/numpy/lib/tests/data/win64python2.npy
new file mode 100644
index 000000000..d9bc36af7
--- /dev/null
+++ b/numpy/lib/tests/data/win64python2.npy
Binary files differ
diff --git a/numpy/lib/tests/test_format.py b/numpy/lib/tests/test_format.py
index c09386789..ee77386bc 100644
--- a/numpy/lib/tests/test_format.py
+++ b/numpy/lib/tests/test_format.py
@@ -524,6 +524,16 @@ def test_compressed_roundtrip():
assert_array_equal(arr, arr1)
+def test_python2_python3_interoperability():
+ if sys.version_info[0] >= 3:
+ fname = 'win64python2.npy'
+ else:
+ fname = 'python3.npy'
+ path = os.path.join(os.path.dirname(__file__), 'data', fname)
+ data = np.load(path)
+ assert_array_equal(data, np.ones(2))
+
+
def test_version_2_0():
f = BytesIO()
# requires more than 2 byte for header
diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py
index c800f8347..80faf85a6 100644
--- a/numpy/lib/tests/test_function_base.py
+++ b/numpy/lib/tests/test_function_base.py
@@ -124,6 +124,11 @@ class TestAverage(TestCase):
assert_array_equal(average(y1, weights=w2, axis=1), desired)
assert_equal(average(y1, weights=w2), 5.)
+ y3 = rand(5).astype(np.float32)
+ w3 = rand(5).astype(np.float64)
+
+ assert_(np.average(y3, weights=w3).dtype == np.result_type(y3, w3))
+
def test_returned(self):
y = np.array([[1, 2, 3], [4, 5, 6]])
@@ -526,8 +531,18 @@ class TestGradient(TestCase):
def test_masked(self):
# Make sure that gradient supports subclasses like masked arrays
- x = np.ma.array([[1, 1], [3, 4]])
- assert_equal(type(gradient(x)[0]), type(x))
+ x = np.ma.array([[1, 1], [3, 4]],
+ mask=[[False, False], [False, False]])
+ out = gradient(x)[0]
+ assert_equal(type(out), type(x))
+ # And make sure that the output and input don't have aliased mask
+ # arrays
+ assert_(x.mask is not out.mask)
+ # Also check that edge_order=2 doesn't alter the original mask
+ x2 = np.ma.arange(5)
+ x2[2] = np.ma.masked
+ np.gradient(x2, edge_order=2)
+ assert_array_equal(x2.mask, [False, False, True, False, False])
def test_datetime64(self):
# Make sure gradient() can handle special types like datetime64
@@ -536,7 +551,7 @@ class TestGradient(TestCase):
'1910-10-12', '1910-12-12', '1912-12-12'],
dtype='datetime64[D]')
dx = np.array(
- [-7, -3, 0, 31, 61, 396, 1066],
+ [-5, -3, 0, 31, 61, 396, 731],
dtype='timedelta64[D]')
assert_array_equal(gradient(x), dx)
assert_(dx.dtype == np.dtype('timedelta64[D]'))
@@ -547,7 +562,7 @@ class TestGradient(TestCase):
[-5, -3, 10, 12, 61, 321, 300],
dtype='timedelta64[D]')
dx = np.array(
- [-3, 7, 7, 25, 154, 119, -161],
+ [2, 7, 7, 25, 154, 119, -21],
dtype='timedelta64[D]')
assert_array_equal(gradient(x), dx)
assert_(dx.dtype == np.dtype('timedelta64[D]'))
@@ -561,7 +576,7 @@ class TestGradient(TestCase):
dx = x[1] - x[0]
y = 2 * x ** 3 + 4 * x ** 2 + 2 * x
analytical = 6 * x ** 2 + 8 * x + 2
- num_error = np.abs((np.gradient(y, dx) / analytical) - 1)
+ num_error = np.abs((np.gradient(y, dx, edge_order=2) / analytical) - 1)
assert_(np.all(num_error < 0.03) == True)
@@ -1604,6 +1619,9 @@ class TestInterp(TestCase):
def test_exceptions(self):
assert_raises(ValueError, interp, 0, [], [])
assert_raises(ValueError, interp, 0, [0], [1, 2])
+ assert_raises(ValueError, interp, 0, [0, 1], [1, 2], period=0)
+ assert_raises(ValueError, interp, 0, [], [], period=360)
+ assert_raises(ValueError, interp, 0, [0], [1, 2], period=360)
def test_basic(self):
x = np.linspace(0, 1, 5)
@@ -1644,6 +1662,16 @@ class TestInterp(TestCase):
fp = np.sin(xp)
assert_almost_equal(np.interp(np.pi, xp, fp), 0.0)
+ def test_period(self):
+ x = [-180, -170, -185, 185, -10, -5, 0, 365]
+ xp = [190, -190, 350, -350]
+ fp = [5, 10, 3, 4]
+ y = [7.5, 5., 8.75, 6.25, 3., 3.25, 3.5, 3.75]
+ assert_almost_equal(np.interp(x, xp, fp, period=360), y)
+ x = np.array(x, order='F').reshape(2, -1)
+ y = np.array(y, order='C').reshape(2, -1)
+ assert_almost_equal(np.interp(x, xp, fp, period=360), y)
+
def compare_results(res, desired):
for i in range(len(desired)):
diff --git a/numpy/lib/tests/test_io.py b/numpy/lib/tests/test_io.py
index 4038d6a7f..68b2018cd 100644
--- a/numpy/lib/tests/test_io.py
+++ b/numpy/lib/tests/test_io.py
@@ -216,6 +216,17 @@ class TestSavezLoad(RoundtripTest, TestCase):
l = np.load(c)
assert_equal(a, l['file_a'])
assert_equal(b, l['file_b'])
+
+ def test_BagObj(self):
+ a = np.array([[1, 2], [3, 4]], float)
+ b = np.array([[1 + 2j, 2 + 7j], [3 - 6j, 4 + 12j]], complex)
+ c = BytesIO()
+ np.savez(c, file_a=a, file_b=b)
+ c.seek(0)
+ l = np.load(c)
+ assert_equal(sorted(dir(l.f)), ['file_a','file_b'])
+ assert_equal(a, l.f.file_a)
+ assert_equal(b, l.f.file_b)
def test_savez_filename_clashes(self):
# Test that issue #852 is fixed
diff --git a/numpy/lib/tests/test_nanfunctions.py b/numpy/lib/tests/test_nanfunctions.py
index 3da6b5149..35ae86c20 100644
--- a/numpy/lib/tests/test_nanfunctions.py
+++ b/numpy/lib/tests/test_nanfunctions.py
@@ -645,6 +645,22 @@ class TestNanFunctions_Median(TestCase):
assert_raises(IndexError, np.nanmedian, d, axis=(0, 4))
assert_raises(ValueError, np.nanmedian, d, axis=(1, 1))
+ def test_float_special(self):
+ with warnings.catch_warnings(record=True):
+ warnings.simplefilter('ignore', RuntimeWarning)
+ a = np.array([[np.inf, np.nan], [np.nan, np.nan]])
+ assert_equal(np.nanmedian(a, axis=0), [np.inf, np.nan])
+ assert_equal(np.nanmedian(a, axis=1), [np.inf, np.nan])
+ assert_equal(np.nanmedian(a), np.inf)
+
+ # minimum fill value check
+ a = np.array([[np.nan, np.nan, np.inf], [np.nan, np.nan, np.inf]])
+ assert_equal(np.nanmedian(a, axis=1), np.inf)
+
+ # no mask path
+ a = np.array([[np.inf, np.inf], [np.inf, np.inf]])
+ assert_equal(np.nanmedian(a, axis=1), np.inf)
+
class TestNanFunctions_Percentile(TestCase):
diff --git a/numpy/lib/utils.py b/numpy/lib/utils.py
index df0052493..519d0e9b9 100644
--- a/numpy/lib/utils.py
+++ b/numpy/lib/utils.py
@@ -4,6 +4,7 @@ import os
import sys
import types
import re
+import warnings
from numpy.core.numerictypes import issubclass_, issubsctype, issubdtype
from numpy.core import ndarray, ufunc, asarray
@@ -1002,111 +1003,70 @@ class SafeEval(object):
This includes strings with lists, dicts and tuples using the abstract
syntax tree created by ``compiler.parse``.
- For an example of usage, see `safe_eval`.
+ .. deprecated:: 1.10.0
See Also
--------
safe_eval
"""
+ def __init__(self):
+ warnings.warn("SafeEval is deprecated in 1.10 and will be removed.",
+ DeprecationWarning)
- if sys.version_info[0] < 3:
- def visit(self, node, **kw):
- cls = node.__class__
- meth = getattr(self, 'visit'+cls.__name__, self.default)
- return meth(node, **kw)
+ def visit(self, node):
+ cls = node.__class__
+ meth = getattr(self, 'visit' + cls.__name__, self.default)
+ return meth(node)
- def default(self, node, **kw):
- raise SyntaxError("Unsupported source construct: %s"
- % node.__class__)
+ def default(self, node):
+ raise SyntaxError("Unsupported source construct: %s"
+ % node.__class__)
- def visitExpression(self, node, **kw):
- for child in node.getChildNodes():
- return self.visit(child, **kw)
+ def visitExpression(self, node):
+ return self.visit(node.body)
- def visitConst(self, node, **kw):
- return node.value
+ def visitNum(self, node):
+ return node.n
- def visitDict(self, node,**kw):
- return dict(
- [(self.visit(k), self.visit(v)) for k, v in node.items]
- )
-
- def visitTuple(self, node, **kw):
- return tuple([self.visit(i) for i in node.nodes])
-
- def visitList(self, node, **kw):
- return [self.visit(i) for i in node.nodes]
-
- def visitUnaryAdd(self, node, **kw):
- return +self.visit(node.getChildNodes()[0])
-
- def visitUnarySub(self, node, **kw):
- return -self.visit(node.getChildNodes()[0])
-
- def visitName(self, node, **kw):
- if node.name == 'False':
- return False
- elif node.name == 'True':
- return True
- elif node.name == 'None':
- return None
- else:
- raise SyntaxError("Unknown name: %s" % node.name)
- else:
-
- def visit(self, node):
- cls = node.__class__
- meth = getattr(self, 'visit' + cls.__name__, self.default)
- return meth(node)
-
- def default(self, node):
- raise SyntaxError("Unsupported source construct: %s"
- % node.__class__)
-
- def visitExpression(self, node):
- return self.visit(node.body)
-
- def visitNum(self, node):
- return node.n
+ def visitStr(self, node):
+ return node.s
- def visitStr(self, node):
- return node.s
+ def visitBytes(self, node):
+ return node.s
- def visitBytes(self, node):
- return node.s
+ def visitDict(self, node,**kw):
+ return dict([(self.visit(k), self.visit(v))
+ for k, v in zip(node.keys, node.values)])
- def visitDict(self, node,**kw):
- return dict([(self.visit(k), self.visit(v))
- for k, v in zip(node.keys, node.values)])
+ def visitTuple(self, node):
+ return tuple([self.visit(i) for i in node.elts])
- def visitTuple(self, node):
- return tuple([self.visit(i) for i in node.elts])
+ def visitList(self, node):
+ return [self.visit(i) for i in node.elts]
- def visitList(self, node):
- return [self.visit(i) for i in node.elts]
+ def visitUnaryOp(self, node):
+ import ast
+ if isinstance(node.op, ast.UAdd):
+ return +self.visit(node.operand)
+ elif isinstance(node.op, ast.USub):
+ return -self.visit(node.operand)
+ else:
+ raise SyntaxError("Unknown unary op: %r" % node.op)
+
+ def visitName(self, node):
+ if node.id == 'False':
+ return False
+ elif node.id == 'True':
+ return True
+ elif node.id == 'None':
+ return None
+ else:
+ raise SyntaxError("Unknown name: %s" % node.id)
- def visitUnaryOp(self, node):
- import ast
- if isinstance(node.op, ast.UAdd):
- return +self.visit(node.operand)
- elif isinstance(node.op, ast.USub):
- return -self.visit(node.operand)
- else:
- raise SyntaxError("Unknown unary op: %r" % node.op)
-
- def visitName(self, node):
- if node.id == 'False':
- return False
- elif node.id == 'True':
- return True
- elif node.id == 'None':
- return None
- else:
- raise SyntaxError("Unknown name: %s" % node.id)
+ def visitNameConstant(self, node):
+ return node.value
- def visitNameConstant(self, node):
- return node.value
def safe_eval(source):
"""
@@ -1151,26 +1111,8 @@ def safe_eval(source):
SyntaxError: Unsupported source construct: compiler.ast.CallFunc
"""
- # Local imports to speed up numpy's import time.
- import warnings
-
- with warnings.catch_warnings():
- # compiler package is deprecated for 3.x, which is already solved
- # here
- warnings.simplefilter('ignore', DeprecationWarning)
- try:
- import compiler
- except ImportError:
- import ast as compiler
-
- walker = SafeEval()
- try:
- ast = compiler.parse(source, mode="eval")
- except SyntaxError:
- raise
- try:
- return walker.visit(ast)
- except SyntaxError:
- raise
+ # Local import to speed up numpy's import time.
+ import ast
+ return ast.literal_eval(source)
#-----------------------------------------------------------------------------
diff --git a/numpy/linalg/linalg.py b/numpy/linalg/linalg.py
index 6b2299fe7..43b2dc4dc 100644
--- a/numpy/linalg/linalg.py
+++ b/numpy/linalg/linalg.py
@@ -14,7 +14,7 @@ from __future__ import division, absolute_import, print_function
__all__ = ['matrix_power', 'solve', 'tensorsolve', 'tensorinv', 'inv',
'cholesky', 'eigvals', 'eigvalsh', 'pinv', 'slogdet', 'det',
'svd', 'eig', 'eigh', 'lstsq', 'norm', 'qr', 'cond', 'matrix_rank',
- 'LinAlgError']
+ 'LinAlgError', 'multi_dot']
import warnings
@@ -23,7 +23,7 @@ from numpy.core import (
csingle, cdouble, inexact, complexfloating, newaxis, ravel, all, Inf, dot,
add, multiply, sqrt, maximum, fastCopyAndTranspose, sum, isfinite, size,
finfo, errstate, geterrobj, longdouble, rollaxis, amin, amax, product, abs,
- broadcast
+ broadcast, atleast_2d, intp, asanyarray
)
from numpy.lib import triu, asfarray
from numpy.linalg import lapack_lite, _umath_linalg
@@ -1921,7 +1921,7 @@ def _multi_svd_norm(x, row_axis, col_axis, op):
return result
-def norm(x, ord=None, axis=None):
+def norm(x, ord=None, axis=None, keepdims=False):
"""
Matrix or vector norm.
@@ -1942,6 +1942,11 @@ def norm(x, ord=None, axis=None):
axes that hold 2-D matrices, and the matrix norms of these matrices
are computed. If `axis` is None then either a vector norm (when `x`
is 1-D) or a matrix norm (when `x` is 2-D) is returned.
+ keepdims : bool, optional
+ .. versionadded:: 1.10.0
+ If this is set to True, the axes which are normed over are left in the
+ result as dimensions with size one. With this option the result will
+ broadcast correctly against the original `x`.
Returns
-------
@@ -2053,35 +2058,43 @@ def norm(x, ord=None, axis=None):
# Check the default case first and handle it immediately.
if ord is None and axis is None:
+ ndim = x.ndim
x = x.ravel(order='K')
if isComplexType(x.dtype.type):
sqnorm = dot(x.real, x.real) + dot(x.imag, x.imag)
else:
sqnorm = dot(x, x)
- return sqrt(sqnorm)
+ ret = sqrt(sqnorm)
+ if keepdims:
+ ret = ret.reshape(ndim*[1])
+ return ret
# Normalize the `axis` argument to a tuple.
nd = x.ndim
if axis is None:
axis = tuple(range(nd))
elif not isinstance(axis, tuple):
+ try:
+ axis = int(axis)
+ except:
+ raise TypeError("'axis' must be None, an integer or a tuple of integers")
axis = (axis,)
if len(axis) == 1:
if ord == Inf:
- return abs(x).max(axis=axis)
+ return abs(x).max(axis=axis, keepdims=keepdims)
elif ord == -Inf:
- return abs(x).min(axis=axis)
+ return abs(x).min(axis=axis, keepdims=keepdims)
elif ord == 0:
# Zero norm
- return (x != 0).sum(axis=axis)
+ return (x != 0).sum(axis=axis, keepdims=keepdims)
elif ord == 1:
# special case for speedup
- return add.reduce(abs(x), axis=axis)
+ return add.reduce(abs(x), axis=axis, keepdims=keepdims)
elif ord is None or ord == 2:
# special case for speedup
s = (x.conj() * x).real
- return sqrt(add.reduce(s, axis=axis))
+ return sqrt(add.reduce(s, axis=axis, keepdims=keepdims))
else:
try:
ord + 1
@@ -2100,7 +2113,7 @@ def norm(x, ord=None, axis=None):
# if the type changed, we can safely overwrite absx
abs(absx, out=absx)
absx **= ord
- return add.reduce(absx, axis=axis) ** (1.0 / ord)
+ return add.reduce(absx, axis=axis, keepdims=keepdims) ** (1.0 / ord)
elif len(axis) == 2:
row_axis, col_axis = axis
if not (-nd <= row_axis < nd and -nd <= col_axis < nd):
@@ -2109,28 +2122,217 @@ def norm(x, ord=None, axis=None):
if row_axis % nd == col_axis % nd:
raise ValueError('Duplicate axes given.')
if ord == 2:
- return _multi_svd_norm(x, row_axis, col_axis, amax)
+ ret = _multi_svd_norm(x, row_axis, col_axis, amax)
elif ord == -2:
- return _multi_svd_norm(x, row_axis, col_axis, amin)
+ ret = _multi_svd_norm(x, row_axis, col_axis, amin)
elif ord == 1:
if col_axis > row_axis:
col_axis -= 1
- return add.reduce(abs(x), axis=row_axis).max(axis=col_axis)
+ ret = add.reduce(abs(x), axis=row_axis).max(axis=col_axis)
elif ord == Inf:
if row_axis > col_axis:
row_axis -= 1
- return add.reduce(abs(x), axis=col_axis).max(axis=row_axis)
+ ret = add.reduce(abs(x), axis=col_axis).max(axis=row_axis)
elif ord == -1:
if col_axis > row_axis:
col_axis -= 1
- return add.reduce(abs(x), axis=row_axis).min(axis=col_axis)
+ ret = add.reduce(abs(x), axis=row_axis).min(axis=col_axis)
elif ord == -Inf:
if row_axis > col_axis:
row_axis -= 1
- return add.reduce(abs(x), axis=col_axis).min(axis=row_axis)
+ ret = add.reduce(abs(x), axis=col_axis).min(axis=row_axis)
elif ord in [None, 'fro', 'f']:
- return sqrt(add.reduce((x.conj() * x).real, axis=axis))
+ ret = sqrt(add.reduce((x.conj() * x).real, axis=axis))
else:
raise ValueError("Invalid norm order for matrices.")
+ if keepdims:
+ ret_shape = list(x.shape)
+ ret_shape[axis[0]] = 1
+ ret_shape[axis[1]] = 1
+ ret = ret.reshape(ret_shape)
+ return ret
else:
raise ValueError("Improper number of dimensions to norm.")
+
+
+# multi_dot
+
+def multi_dot(arrays):
+ """
+ Compute the dot product of two or more arrays in a single function call,
+ while automatically selecting the fastest evaluation order.
+
+ `multi_dot` chains `numpy.dot` and uses an optimal parenthesizations
+ of the matrices [1]_ [2]_. Depending on the shape of the matrices
+ this can speed up the multiplication a lot.
+
+ The first and last argument can be 1-D and are treated respectively as
+ row and column vector. The other arguments must be 2-D.
+
+ Think of `multi_dot` as::
+
+ def multi_dot(arrays): return functools.reduce(np.dot, arrays)
+
+
+ Parameters
+ ----------
+ arrays : sequence of array_like
+ First and last argument can be 1-D and are treated respectively as
+ row and column vector, the other arguments must be 2-D.
+
+ Returns
+ -------
+ output : ndarray
+ Returns the dot product of the supplied arrays.
+
+ See Also
+ --------
+ dot : dot multiplication with two arguments.
+
+ References
+ ----------
+
+ .. [1] Cormen, "Introduction to Algorithms", Chapter 15.2, p. 370-378
+ .. [2] http://en.wikipedia.org/wiki/Matrix_chain_multiplication
+
+ Examples
+ --------
+ `multi_dot` allows you to write::
+
+ >>> import numpy as np
+ >>> # Prepare some data
+ >>> A = np.random.random(10000, 100)
+ >>> B = np.random.random(100, 1000)
+ >>> C = np.random.random(1000, 5)
+ >>> D = np.random.random(5, 333)
+ >>> # the actual dot multiplication
+ >>> multi_dot([A, B, C, D])
+
+ instead of::
+
+ >>> np.dot(np.dot(np.dot(A, B), C), D)
+ >>> # or
+ >>> A.dot(B).dot(C).dot(D)
+
+
+ Example: multiplication costs of different parenthesizations
+ ------------------------------------------------------------
+
+ The cost for a matrix multiplication can be calculated with the
+ following function::
+
+ def cost(A, B): return A.shape[0] * A.shape[1] * B.shape[1]
+
+ Let's assume we have three matrices
+ :math:`A_{10x100}, B_{100x5}, C_{5x50}$`.
+
+ The costs for the two different parenthesizations are as follows::
+
+ cost((AB)C) = 10*100*5 + 10*5*50 = 5000 + 2500 = 7500
+ cost(A(BC)) = 10*100*50 + 100*5*50 = 50000 + 25000 = 75000
+
+ """
+ n = len(arrays)
+ # optimization only makes sense for len(arrays) > 2
+ if n < 2:
+ raise ValueError("Expecting at least two arrays.")
+ elif n == 2:
+ return dot(arrays[0], arrays[1])
+
+ arrays = [asanyarray(a) for a in arrays]
+
+ # save original ndim to reshape the result array into the proper form later
+ ndim_first, ndim_last = arrays[0].ndim, arrays[-1].ndim
+ # Explicitly convert vectors to 2D arrays to keep the logic of the internal
+ # _multi_dot_* functions as simple as possible.
+ if arrays[0].ndim == 1:
+ arrays[0] = atleast_2d(arrays[0])
+ if arrays[-1].ndim == 1:
+ arrays[-1] = atleast_2d(arrays[-1]).T
+ _assertRank2(*arrays)
+
+ # _multi_dot_three is much faster than _multi_dot_matrix_chain_order
+ if n == 3:
+ result = _multi_dot_three(arrays[0], arrays[1], arrays[2])
+ else:
+ order = _multi_dot_matrix_chain_order(arrays)
+ result = _multi_dot(arrays, order, 0, n - 1)
+
+ # return proper shape
+ if ndim_first == 1 and ndim_last == 1:
+ return result[0, 0] # scalar
+ elif ndim_first == 1 or ndim_last == 1:
+ return result.ravel() # 1-D
+ else:
+ return result
+
+
+def _multi_dot_three(A, B, C):
+ """
+ Find best ordering for three arrays and do the multiplication.
+
+ Doing in manually instead of using dynamic programing is
+ approximately 15 times faster.
+
+ """
+ # cost1 = cost((AB)C)
+ cost1 = (A.shape[0] * A.shape[1] * B.shape[1] + # (AB)
+ A.shape[0] * B.shape[1] * C.shape[1]) # (--)C
+ # cost2 = cost((AB)C)
+ cost2 = (B.shape[0] * B.shape[1] * C.shape[1] + # (BC)
+ A.shape[0] * A.shape[1] * C.shape[1]) # A(--)
+
+ if cost1 < cost2:
+ return dot(dot(A, B), C)
+ else:
+ return dot(A, dot(B, C))
+
+
+def _multi_dot_matrix_chain_order(arrays, return_costs=False):
+ """
+ Return a np.array which encodes the opimal order of mutiplications.
+
+ The optimal order array is then used by `_multi_dot()` to do the
+ multiplication.
+
+ Also return the cost matrix if `return_costs` is `True`
+
+ The implementation CLOSELY follows Cormen, "Introduction to Algorithms",
+ Chapter 15.2, p. 370-378. Note that Cormen uses 1-based indices.
+
+ cost[i, j] = min([
+ cost[prefix] + cost[suffix] + cost_mult(prefix, suffix)
+ for k in range(i, j)])
+
+ """
+ n = len(arrays)
+ # p stores the dimensions of the matrices
+ # Example for p: A_{10x100}, B_{100x5}, C_{5x50} --> p = [10, 100, 5, 50]
+ p = [a.shape[0] for a in arrays] + [arrays[-1].shape[1]]
+ # m is a matrix of costs of the subproblems
+ # m[i,j]: min number of scalar multiplications needed to compute A_{i..j}
+ m = zeros((n, n), dtype=double)
+ # s is the actual ordering
+ # s[i, j] is the value of k at which we split the product A_i..A_j
+ s = empty((n, n), dtype=intp)
+
+ for l in range(1, n):
+ for i in range(n - l):
+ j = i + l
+ m[i, j] = Inf
+ for k in range(i, j):
+ q = m[i, k] + m[k+1, j] + p[i]*p[k+1]*p[j+1]
+ if q < m[i, j]:
+ m[i, j] = q
+ s[i, j] = k # Note that Cormen uses 1-based index
+
+ return (s, m) if return_costs else s
+
+
+def _multi_dot(arrays, order, i, j):
+ """Actually do the multiplication with the given order."""
+ if i == j:
+ return arrays[i]
+ else:
+ return dot(_multi_dot(arrays, order, i, order[i, j]),
+ _multi_dot(arrays, order, order[i, j] + 1, j))
diff --git a/numpy/linalg/tests/test_linalg.py b/numpy/linalg/tests/test_linalg.py
index 8edf36aa6..63baa590d 100644
--- a/numpy/linalg/tests/test_linalg.py
+++ b/numpy/linalg/tests/test_linalg.py
@@ -12,7 +12,8 @@ import numpy as np
from numpy import array, single, double, csingle, cdouble, dot, identity
from numpy import multiply, atleast_2d, inf, asarray, matrix
from numpy import linalg
-from numpy.linalg import matrix_power, norm, matrix_rank
+from numpy.linalg import matrix_power, norm, matrix_rank, multi_dot
+from numpy.linalg.linalg import _multi_dot_matrix_chain_order
from numpy.testing import (
assert_, assert_equal, assert_raises, assert_array_equal,
assert_almost_equal, assert_allclose, run_module_suite,
@@ -207,7 +208,7 @@ for tgt, src in ((GENERALIZED_SQUARE_CASES, SQUARE_CASES),
for case in src:
if not isinstance(case.a, np.ndarray):
continue
-
+
a = np.array([case.a, 2*case.a, 3*case.a])
if case.b is None:
b = None
@@ -885,6 +886,48 @@ class _TestNorm(object):
expected = [norm(B[:,:, k], ord=order) for k in range(B.shape[2])]
assert_almost_equal(n, expected)
+ def test_keepdims(self):
+ A = np.arange(1,25, dtype=self.dt).reshape(2,3,4)
+
+ allclose_err = 'order {0}, axis = {1}'
+ shape_err = 'Shape mismatch found {0}, expected {1}, order={2}, axis={3}'
+
+ # check the order=None, axis=None case
+ expected = norm(A, ord=None, axis=None)
+ found = norm(A, ord=None, axis=None, keepdims=True)
+ assert_allclose(np.squeeze(found), expected,
+ err_msg=allclose_err.format(None,None))
+ expected_shape = (1,1,1)
+ assert_(found.shape == expected_shape,
+ shape_err.format(found.shape, expected_shape, None, None))
+
+ # Vector norms.
+ for order in [None, -1, 0, 1, 2, 3, np.Inf, -np.Inf]:
+ for k in range(A.ndim):
+ expected = norm(A, ord=order, axis=k)
+ found = norm(A, ord=order, axis=k, keepdims=True)
+ assert_allclose(np.squeeze(found), expected,
+ err_msg=allclose_err.format(order,k))
+ expected_shape = list(A.shape)
+ expected_shape[k] = 1
+ expected_shape = tuple(expected_shape)
+ assert_(found.shape == expected_shape,
+ shape_err.format(found.shape, expected_shape, order, k))
+
+ # Matrix norms.
+ for order in [None, -2, 2, -1, 1, np.Inf, -np.Inf, 'fro']:
+ for k in itertools.permutations(range(A.ndim), 2):
+ expected = norm(A, ord=order, axis=k)
+ found = norm(A, ord=order, axis=k, keepdims=True)
+ assert_allclose(np.squeeze(found), expected,
+ err_msg=allclose_err.format(order,k))
+ expected_shape = list(A.shape)
+ expected_shape[k[0]] = 1
+ expected_shape[k[1]] = 1
+ expected_shape = tuple(expected_shape)
+ assert_(found.shape == expected_shape,
+ shape_err.format(found.shape, expected_shape, order, k))
+
def test_bad_args(self):
# Check that bad arguments raise the appropriate exceptions.
@@ -909,6 +952,7 @@ class _TestNorm(object):
assert_raises(ValueError, norm, B, None, (2, 3))
assert_raises(ValueError, norm, B, None, (0, 1, 2))
+class TestNorm_NonSystematic(object):
def test_longdouble_norm(self):
# Non-regression test: p-norm of longdouble would previously raise
# UnboundLocalError.
@@ -1149,5 +1193,90 @@ def test_xerbla_override():
raise SkipTest('Numpy xerbla not linked in.')
+class TestMultiDot(object):
+ def test_basic_function_with_three_arguments(self):
+ # multi_dot with three arguments uses a fast hand coded algorithm to
+ # determine the optimal order. Therefore test it separately.
+ A = np.random.random((6, 2))
+ B = np.random.random((2, 6))
+ C = np.random.random((6, 2))
+
+ assert_almost_equal(multi_dot([A, B, C]), A.dot(B).dot(C))
+ assert_almost_equal(multi_dot([A, B, C]), np.dot(A, np.dot(B, C)))
+
+ def test_basic_function_with_dynamic_programing_optimization(self):
+ # multi_dot with four or more arguments uses the dynamic programing
+ # optimization and therefore deserve a separate
+ A = np.random.random((6, 2))
+ B = np.random.random((2, 6))
+ C = np.random.random((6, 2))
+ D = np.random.random((2, 1))
+ assert_almost_equal(multi_dot([A, B, C, D]), A.dot(B).dot(C).dot(D))
+
+ def test_vector_as_first_argument(self):
+ # The first argument can be 1-D
+ A1d = np.random.random(2) # 1-D
+ B = np.random.random((2, 6))
+ C = np.random.random((6, 2))
+ D = np.random.random((2, 2))
+
+ # the result should be 1-D
+ assert_equal(multi_dot([A1d, B, C, D]).shape, (2,))
+
+ def test_vector_as_last_argument(self):
+ # The last argument can be 1-D
+ A = np.random.random((6, 2))
+ B = np.random.random((2, 6))
+ C = np.random.random((6, 2))
+ D1d = np.random.random(2) # 1-D
+
+ # the result should be 1-D
+ assert_equal(multi_dot([A, B, C, D1d]).shape, (6,))
+
+ def test_vector_as_first_and_last_argument(self):
+ # The first and last arguments can be 1-D
+ A1d = np.random.random(2) # 1-D
+ B = np.random.random((2, 6))
+ C = np.random.random((6, 2))
+ D1d = np.random.random(2) # 1-D
+
+ # the result should be a scalar
+ assert_equal(multi_dot([A1d, B, C, D1d]).shape, ())
+
+ def test_dynamic_programming_logic(self):
+ # Test for the dynamic programming part
+ # This test is directly taken from Cormen page 376.
+ arrays = [np.random.random((30, 35)),
+ np.random.random((35, 15)),
+ np.random.random((15, 5)),
+ np.random.random((5, 10)),
+ np.random.random((10, 20)),
+ np.random.random((20, 25))]
+ m_expected = np.array([[0., 15750., 7875., 9375., 11875., 15125.],
+ [0., 0., 2625., 4375., 7125., 10500.],
+ [0., 0., 0., 750., 2500., 5375.],
+ [0., 0., 0., 0., 1000., 3500.],
+ [0., 0., 0., 0., 0., 5000.],
+ [0., 0., 0., 0., 0., 0.]])
+ s_expected = np.array([[0, 1, 1, 3, 3, 3],
+ [0, 0, 2, 3, 3, 3],
+ [0, 0, 0, 3, 3, 3],
+ [0, 0, 0, 0, 4, 5],
+ [0, 0, 0, 0, 0, 5],
+ [0, 0, 0, 0, 0, 0]], dtype=np.int)
+ s_expected -= 1 # Cormen uses 1-based index, python does not.
+
+ s, m = _multi_dot_matrix_chain_order(arrays, return_costs=True)
+
+ # Only the upper triangular part (without the diagonal) is interesting.
+ assert_almost_equal(np.triu(s[:-1, 1:]),
+ np.triu(s_expected[:-1, 1:]))
+ assert_almost_equal(np.triu(m), np.triu(m_expected))
+
+ def test_too_few_input_arrays(self):
+ assert_raises(ValueError, multi_dot, [])
+ assert_raises(ValueError, multi_dot, [np.random.random((3, 3))])
+
+
if __name__ == "__main__":
run_module_suite()
diff --git a/numpy/ma/core.py b/numpy/ma/core.py
index 00164b851..34e52d86e 100644
--- a/numpy/ma/core.py
+++ b/numpy/ma/core.py
@@ -780,6 +780,8 @@ class _DomainSafeDivide:
# component of numpy's import time.
if self.tolerance is None:
self.tolerance = np.finfo(float).tiny
+ # don't call ma ufuncs from __array_wrap__ which would fail for scalars
+ a, b = np.asarray(a), np.asarray(b)
return umath.absolute(a) * self.tolerance >= umath.absolute(b)
@@ -2783,12 +2785,50 @@ class MaskedArray(ndarray):
"""
# Get main attributes .........
self._update_from(obj)
+ # We have to decide how to initialize self.mask, based on
+ # obj.mask. This is very difficult. There might be some
+ # correspondence between the elements in the array we are being
+ # created from (= obj) and us. Or... there might not. This method can
+ # be called in all kinds of places for all kinds of reasons -- could
+ # be empty_like, could be slicing, could be a ufunc, could be a view,
+ # ... The numpy subclassing interface simply doesn't give us any way
+ # to know, which means that at best this method will be based on
+ # guesswork and heuristics. To make things worse, there isn't even any
+ # clear consensus about what the desired behavior is. For instance,
+ # most users think that np.empty_like(marr) -- which goes via this
+ # method -- should return a masked array with an empty mask (see
+ # gh-3404 and linked discussions), but others disagree, and they have
+ # existing code which depends on empty_like returning an array that
+ # matches the input mask.
+ #
+ # Historically our algorithm was: if the template object mask had the
+ # same *number of elements* as us, then we used *it's mask object
+ # itself* as our mask, so that writes to us would also write to the
+ # original array. This is horribly broken in multiple ways.
+ #
+ # Now what we do instead is, if the template object mask has the same
+ # number of elements as us, and we do not have the same base pointer
+ # as the template object (b/c views like arr[...] should keep the same
+ # mask), then we make a copy of the template object mask and use
+ # that. This is also horribly broken but somewhat less so. Maybe.
if isinstance(obj, ndarray):
- odtype = obj.dtype
- if odtype.names:
- _mask = getattr(obj, '_mask', make_mask_none(obj.shape, odtype))
+ # XX: This looks like a bug -- shouldn't it check self.dtype
+ # instead?
+ if obj.dtype.names:
+ _mask = getattr(obj, '_mask',
+ make_mask_none(obj.shape, obj.dtype))
else:
_mask = getattr(obj, '_mask', nomask)
+ # If self and obj point to exactly the same data, then probably
+ # self is a simple view of obj (e.g., self = obj[...]), so they
+ # should share the same mask. (This isn't 100% reliable, e.g. self
+ # could be the first row of obj, or have strange strides, but as a
+ # heuristic it's not bad.) In all other cases, we make a copy of
+ # the mask, so that future modifications to 'self' do not end up
+ # side-effecting 'obj' as well.
+ if (obj.__array_interface__["data"][0]
+ != self.__array_interface__["data"][0]):
+ _mask = _mask.copy()
else:
_mask = nomask
self._mask = _mask
@@ -5073,7 +5113,11 @@ class MaskedArray(ndarray):
return self
if fill_value is None:
if endwith:
- filler = minimum_fill_value(self)
+ # nan > inf
+ if np.issubdtype(self.dtype, np.floating):
+ filler = np.nan
+ else:
+ filler = minimum_fill_value(self)
else:
filler = maximum_fill_value(self)
else:
@@ -6182,7 +6226,11 @@ def sort(a, axis= -1, kind='quicksort', order=None, endwith=True, fill_value=Non
axis = 0
if fill_value is None:
if endwith:
- filler = minimum_fill_value(a)
+ # nan > inf
+ if np.issubdtype(a.dtype, np.floating):
+ filler = np.nan
+ else:
+ filler = minimum_fill_value(a)
else:
filler = maximum_fill_value(a)
else:
diff --git a/numpy/ma/extras.py b/numpy/ma/extras.py
index a993fd05d..1849df72b 100644
--- a/numpy/ma/extras.py
+++ b/numpy/ma/extras.py
@@ -671,8 +671,8 @@ def median(a, axis=None, out=None, overwrite_input=False):
"""
if not hasattr(a, 'mask') or np.count_nonzero(a.mask) == 0:
- return masked_array(np.median(a, axis=axis, out=out,
- overwrite_input=overwrite_input), copy=False)
+ return masked_array(np.median(getattr(a, 'data', a), axis=axis,
+ out=out, overwrite_input=overwrite_input), copy=False)
if overwrite_input:
if axis is None:
asorted = a.ravel()
@@ -705,7 +705,14 @@ def median(a, axis=None, out=None, overwrite_input=False):
low = high
else:
low[odd] = high[odd]
- return np.ma.mean([low, high], axis=0, out=out)
+
+ if np.issubdtype(asorted.dtype, np.inexact):
+ # avoid inf / x = masked
+ s = np.ma.sum([low, high], axis=0, out=out)
+ np.true_divide(s.data, 2., casting='unsafe', out=s.data)
+ else:
+ s = np.ma.mean([low, high], axis=0, out=out)
+ return s
#..............................................................................
diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py
index 34951875d..4ac3465aa 100644
--- a/numpy/ma/tests/test_core.py
+++ b/numpy/ma/tests/test_core.py
@@ -2226,6 +2226,13 @@ class TestMaskedArrayMethods(TestCase):
assert_equal(b.shape, a.shape)
assert_equal(b.fill_value, a.fill_value)
+ # check empty_like mask handling
+ a = masked_array([1, 2, 3], mask=[False, True, False])
+ b = empty_like(a)
+ assert_(not np.may_share_memory(a.mask, b.mask))
+ b = a.view(masked_array)
+ assert_(np.may_share_memory(a.mask, b.mask))
+
def test_put(self):
# Tests put.
d = arange(5)
diff --git a/numpy/ma/tests/test_extras.py b/numpy/ma/tests/test_extras.py
index 6ce1dc346..d7bc765a9 100644
--- a/numpy/ma/tests/test_extras.py
+++ b/numpy/ma/tests/test_extras.py
@@ -504,6 +504,9 @@ class TestApplyOverAxes(TestCase):
class TestMedian(TestCase):
+ def test_pytype(self):
+ r = np.ma.median([[np.inf, np.inf], [np.inf, np.inf]], axis=-1)
+ assert_equal(r, np.inf)
def test_2d(self):
# Tests median w/ 2D
diff --git a/numpy/polynomial/hermite.py b/numpy/polynomial/hermite.py
index 1fd49d774..1d3bef390 100644
--- a/numpy/polynomial/hermite.py
+++ b/numpy/polynomial/hermite.py
@@ -1577,13 +1577,13 @@ def hermcompanion(c):
n = len(c) - 1
mat = np.zeros((n, n), dtype=c.dtype)
- scl = np.hstack((1., np.sqrt(2.*np.arange(1, n))))
- scl = np.multiply.accumulate(scl)
+ scl = np.hstack((1., 1./np.sqrt(2.*np.arange(n - 1, 0, -1))))
+ scl = np.multiply.accumulate(scl)[::-1]
top = mat.reshape(-1)[1::n+1]
bot = mat.reshape(-1)[n::n+1]
top[...] = np.sqrt(.5*np.arange(1, n))
bot[...] = top
- mat[:, -1] -= (c[:-1]/c[-1])*(scl/scl[-1])*.5
+ mat[:, -1] -= scl*c[:-1]/(2.0*c[-1])
return mat
@@ -1646,6 +1646,49 @@ def hermroots(c):
return r
+def _normed_hermite_n(x, n):
+ """
+ Evaluate a normalized Hermite polynomial.
+
+ Compute the value of the normalized Hermite polynomial of degree ``n``
+ at the points ``x``.
+
+
+ Parameters
+ ----------
+ x : ndarray of double.
+ Points at which to evaluate the function
+ n : int
+ Degree of the normalized Hermite function to be evaluated.
+
+ Returns
+ -------
+ values : ndarray
+ The shape of the return value is described above.
+
+ Notes
+ -----
+ .. versionadded:: 1.10.0
+
+ This function is needed for finding the Gauss points and integration
+ weights for high degrees. The values of the standard Hermite functions
+ overflow when n >= 207.
+
+ """
+ if n == 0:
+ return np.ones(x.shape)/np.sqrt(np.sqrt(np.pi))
+
+ c0 = 0.
+ c1 = 1./np.sqrt(np.sqrt(np.pi))
+ nd = float(n)
+ for i in range(n - 1):
+ tmp = c0
+ c0 = -c1*np.sqrt((nd - 1.)/nd)
+ c1 = tmp + c1*x*np.sqrt(2./nd)
+ nd = nd - 1.0
+ return c0 + c1*x*np.sqrt(2)
+
+
def hermgauss(deg):
"""
Gauss-Hermite quadrature.
@@ -1688,22 +1731,21 @@ def hermgauss(deg):
# first approximation of roots. We use the fact that the companion
# matrix is symmetric in this case in order to obtain better zeros.
- c = np.array([0]*deg + [1])
+ c = np.array([0]*deg + [1], dtype=np.float64)
m = hermcompanion(c)
- x = la.eigvals(m)
+ x = la.eigvalsh(m)
x.sort()
# improve roots by one application of Newton
- dy = hermval(x, c)
- df = hermval(x, hermder(c))
+ dy = _normed_hermite_n(x, ideg)
+ df = _normed_hermite_n(x, ideg - 1) * np.sqrt(2*ideg)
x -= dy/df
# compute the weights. We scale the factor to avoid possible numerical
# overflow.
- fm = hermval(x, c[1:])
+ fm = _normed_hermite_n(x, ideg - 1)
fm /= np.abs(fm).max()
- df /= np.abs(df).max()
- w = 1/(fm * df)
+ w = 1/(fm * fm)
# for Hermite we can also symmetrize
w = (w + w[::-1])/2
diff --git a/numpy/polynomial/hermite_e.py b/numpy/polynomial/hermite_e.py
index 6e33dc0bc..fce13a84e 100644
--- a/numpy/polynomial/hermite_e.py
+++ b/numpy/polynomial/hermite_e.py
@@ -1575,13 +1575,13 @@ def hermecompanion(c):
n = len(c) - 1
mat = np.zeros((n, n), dtype=c.dtype)
- scl = np.hstack((1., np.sqrt(np.arange(1, n))))
- scl = np.multiply.accumulate(scl)
+ scl = np.hstack((1., 1./np.sqrt(np.arange(n - 1, 0, -1))))
+ scl = np.multiply.accumulate(scl)[::-1]
top = mat.reshape(-1)[1::n+1]
bot = mat.reshape(-1)[n::n+1]
top[...] = np.sqrt(np.arange(1, n))
bot[...] = top
- mat[:, -1] -= (c[:-1]/c[-1])*(scl/scl[-1])
+ mat[:, -1] -= scl*c[:-1]/c[-1]
return mat
@@ -1644,6 +1644,49 @@ def hermeroots(c):
return r
+def _normed_hermite_e_n(x, n):
+ """
+ Evaluate a normalized HermiteE polynomial.
+
+ Compute the value of the normalized HermiteE polynomial of degree ``n``
+ at the points ``x``.
+
+
+ Parameters
+ ----------
+ x : ndarray of double.
+ Points at which to evaluate the function
+ n : int
+ Degree of the normalized HermiteE function to be evaluated.
+
+ Returns
+ -------
+ values : ndarray
+ The shape of the return value is described above.
+
+ Notes
+ -----
+ .. versionadded:: 1.10.0
+
+ This function is needed for finding the Gauss points and integration
+ weights for high degrees. The values of the standard HermiteE functions
+ overflow when n >= 207.
+
+ """
+ if n == 0:
+ return np.ones(x.shape)/np.sqrt(np.sqrt(2*np.pi))
+
+ c0 = 0.
+ c1 = 1./np.sqrt(np.sqrt(2*np.pi))
+ nd = float(n)
+ for i in range(n - 1):
+ tmp = c0
+ c0 = -c1*np.sqrt((nd - 1.)/nd)
+ c1 = tmp + c1*x*np.sqrt(1./nd)
+ nd = nd - 1.0
+ return c0 + c1*x
+
+
def hermegauss(deg):
"""
Gauss-HermiteE quadrature.
@@ -1688,20 +1731,19 @@ def hermegauss(deg):
# matrix is symmetric in this case in order to obtain better zeros.
c = np.array([0]*deg + [1])
m = hermecompanion(c)
- x = la.eigvals(m)
+ x = la.eigvalsh(m)
x.sort()
# improve roots by one application of Newton
- dy = hermeval(x, c)
- df = hermeval(x, hermeder(c))
+ dy = _normed_hermite_e_n(x, ideg)
+ df = _normed_hermite_e_n(x, ideg - 1) * np.sqrt(ideg)
x -= dy/df
# compute the weights. We scale the factor to avoid possible numerical
# overflow.
- fm = hermeval(x, c[1:])
+ fm = _normed_hermite_e_n(x, ideg - 1)
fm /= np.abs(fm).max()
- df /= np.abs(df).max()
- w = 1/(fm * df)
+ w = 1/(fm * fm)
# for Hermite_e we can also symmetrize
w = (w + w[::-1])/2
diff --git a/numpy/random/mtrand/mtrand.pyx b/numpy/random/mtrand/mtrand.pyx
index b089d7742..c03666527 100644
--- a/numpy/random/mtrand/mtrand.pyx
+++ b/numpy/random/mtrand/mtrand.pyx
@@ -2919,7 +2919,7 @@ cdef class RandomState:
Plot Gaussian for comparison:
- >>> g = (1/(scale * np.sqrt(2 * np.pi)) *
+ >>> g = (1/(scale * np.sqrt(2 * np.pi)) *
... np.exp(-(x - loc)**2 / (2 * scale**2)))
>>> plt.plot(x,g)
@@ -4220,8 +4220,8 @@ cdef class RandomState:
mean : 1-D array_like, of length N
Mean of the N-dimensional distribution.
cov : 2-D array_like, of shape (N, N)
- Covariance matrix of the distribution. Must be symmetric and
- positive-semidefinite for "physically meaningful" results.
+ Covariance matrix of the distribution. It must be symmetric and
+ positive-semidefinite for proper sampling.
size : int or tuple of ints, optional
Given a shape of, for example, ``(m,n,k)``, ``m*n*k`` samples are
generated, and packed in an `m`-by-`n`-by-`k` arrangement. Because
@@ -4268,7 +4268,9 @@ cdef class RandomState:
>>> x,y = np.random.multivariate_normal(mean,cov,5000).T
>>> plt.plot(x,y,'x'); plt.axis('equal'); plt.show()
- Note that the covariance matrix must be non-negative definite.
+ Note that the covariance matrix must be positive semidefinite (a.k.a.
+ nonnegative-definite). Otherwise, the behavior of this method is
+ undefined and backwards compatibility is not guaranteed.
References
----------
diff --git a/numpy/testing/__init__.py b/numpy/testing/__init__.py
index 258cbe928..dcc02ad57 100644
--- a/numpy/testing/__init__.py
+++ b/numpy/testing/__init__.py
@@ -10,7 +10,6 @@ from __future__ import division, absolute_import, print_function
from unittest import TestCase
from . import decorators as dec
+from .nosetester import run_module_suite, NoseTester as Tester
from .utils import *
-from .nosetester import NoseTester as Tester
-from .nosetester import run_module_suite
test = Tester().test
diff --git a/numpy/testing/utils.py b/numpy/testing/utils.py
index dc5929753..3b20f9238 100644
--- a/numpy/testing/utils.py
+++ b/numpy/testing/utils.py
@@ -159,7 +159,7 @@ else:
""" Return memory usage of running python. [Not implemented]"""
raise NotImplementedError
-if os.name=='nt' and sys.version[:3] > '2.3':
+if os.name=='nt':
# Code "stolen" from enthought/debug/memusage.py
def GetPerformanceAttributes(object, counter, instance = None,
inum=-1, format = None, machine=None):
diff --git a/tools/travis-test.sh b/tools/travis-test.sh
index 17d520891..3981c3b58 100755
--- a/tools/travis-test.sh
+++ b/tools/travis-test.sh
@@ -1,6 +1,9 @@
#!/bin/sh
set -ex
+# travis boxes give you 1.5 cpus
+export NPY_NUM_BUILD_JOBS=2
+
# setup env
if [ -r /usr/lib/libeatmydata/libeatmydata.so ]; then
# much faster package installation