summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--COMPATIBILITY59
-rw-r--r--DEV_README.txt18
-rw-r--r--MANIFEST.in2
-rw-r--r--TEST_COMMIT18
-rw-r--r--benchmarks/benchmarks/bench_app.py2
-rw-r--r--benchmarks/benchmarks/common.py12
-rw-r--r--doc/HOWTO_MERGE_WIKI_DOCS.rst.txt49
-rw-r--r--doc/Makefile2
-rw-r--r--doc/release/1.10.0-notes.rst22
-rw-r--r--doc/release/1.10.2-notes.rst153
-rw-r--r--doc/release/1.11.0-notes.rst45
-rw-r--r--doc/source/reference/arrays.indexing.rst2
-rw-r--r--doc/source/release.rst1
-rw-r--r--doc/source/user/numpy-for-matlab-users.rst70
-rw-r--r--numpy/_build_utils/apple_accelerate.py2
-rw-r--r--numpy/_build_utils/waf.py531
-rw-r--r--numpy/add_newdocs.py39
-rw-r--r--numpy/compat/tests/test_compat.py2
-rw-r--r--numpy/core/code_generators/generate_umath.py2
-rw-r--r--numpy/core/fromnumeric.py2
-rw-r--r--numpy/core/function_base.py45
-rw-r--r--numpy/core/include/numpy/ndarrayobject.h2
-rw-r--r--numpy/core/include/numpy/npy_common.h15
-rw-r--r--numpy/core/numeric.py15
-rw-r--r--numpy/core/records.py1
-rw-r--r--numpy/core/setup.py15
-rw-r--r--numpy/core/setup_common.py3
-rw-r--r--numpy/core/src/multiarray/buffer.c12
-rw-r--r--numpy/core/src/multiarray/compiled_base.c37
-rw-r--r--numpy/core/src/multiarray/descriptor.c39
-rw-r--r--numpy/core/src/multiarray/getset.c20
-rw-r--r--numpy/core/src/multiarray/mapping.c25
-rw-r--r--numpy/core/src/multiarray/multiarraymodule.c84
-rw-r--r--numpy/core/src/private/mem_overlap.c25
-rw-r--r--numpy/core/src/umath/loops.c.src4
-rw-r--r--numpy/core/tests/test_deprecations.py16
-rw-r--r--numpy/core/tests/test_dtype.py4
-rw-r--r--numpy/core/tests/test_item_selection.py7
-rw-r--r--numpy/core/tests/test_mem_overlap.py28
-rw-r--r--numpy/core/tests/test_multiarray.py3
-rw-r--r--numpy/core/tests/test_numeric.py20
-rw-r--r--numpy/core/tests/test_print.py8
-rw-r--r--numpy/core/tests/test_records.py8
-rw-r--r--numpy/core/tests/test_scalarinherit.py1
-rw-r--r--numpy/core/tests/test_umath.py28
-rw-r--r--numpy/distutils/mingw32ccompiler.py4
-rw-r--r--numpy/distutils/misc_util.py35
-rw-r--r--numpy/distutils/msvc9compiler.py2
-rw-r--r--numpy/distutils/msvccompiler.py2
-rw-r--r--numpy/distutils/npy_pkg_config.py3
-rw-r--r--numpy/distutils/system_info.py73
-rw-r--r--numpy/distutils/tests/test_system_info.py17
-rw-r--r--numpy/f2py/__main__.py2
-rw-r--r--numpy/f2py/capi_maps.py6
-rw-r--r--numpy/f2py/tests/test_array_from_pyobj.py6
-rw-r--r--numpy/f2py/tests/util.py9
-rw-r--r--numpy/lib/format.py3
-rw-r--r--numpy/lib/function_base.py13
-rw-r--r--numpy/lib/polynomial.py3
-rw-r--r--numpy/lib/shape_base.py21
-rw-r--r--numpy/lib/tests/test__datasource.py8
-rw-r--r--numpy/lib/tests/test_format.py3
-rw-r--r--numpy/lib/tests/test_function_base.py57
-rw-r--r--numpy/lib/tests/test_io.py17
-rw-r--r--numpy/lib/tests/test_packbits.py3
-rw-r--r--numpy/lib/tests/test_shape_base.py30
-rw-r--r--numpy/linalg/linalg.py2
-rw-r--r--numpy/linalg/tests/test_deprecations.py2
-rw-r--r--numpy/linalg/tests/test_linalg.py3
-rw-r--r--numpy/ma/core.py372
-rw-r--r--numpy/ma/extras.py145
-rw-r--r--numpy/ma/tests/test_core.py59
-rw-r--r--numpy/ma/tests/test_extras.py54
-rw-r--r--numpy/random/info.py4
-rw-r--r--numpy/random/mtrand/distributions.c3
-rw-r--r--numpy/random/mtrand/mtrand.pyx11
-rw-r--r--numpy/random/tests/test_random.py7
-rw-r--r--numpy/testing/decorators.py19
-rw-r--r--numpy/testing/noseclasses.py19
-rw-r--r--numpy/testing/nosetester.py11
-rw-r--r--numpy/testing/tests/test_decorators.py20
-rw-r--r--numpy/testing/tests/test_utils.py19
-rw-r--r--numpy/testing/utils.py44
-rw-r--r--numpy/tests/test_scripts.py18
-rw-r--r--pavement.py2
-rwxr-xr-xruntests.py1
-rw-r--r--tools/swig/numpy.i4
-rwxr-xr-xtools/travis-test.sh10
-rw-r--r--tools/win32build/build-cpucaps.py2
89 files changed, 1361 insertions, 1285 deletions
diff --git a/COMPATIBILITY b/COMPATIBILITY
deleted file mode 100644
index d2cd3cd27..000000000
--- a/COMPATIBILITY
+++ /dev/null
@@ -1,59 +0,0 @@
-
-
-X.flat returns an indexable 1-D iterator (mostly similar to an array
-but always 1-d) --- only has .copy and .__array__ attributes of an array!!!
-
-.typecode() --> .dtype.char
-
-.iscontiguous() --> .flags['CONTIGUOUS'] or .flags.contiguous
-
-.byteswapped() -> .byteswap()
-
-.itemsize() -> .itemsize
-
-.toscalar() -> .item()
-
-If you used typecode characters:
-
-'c' -> 'S1' or 'c'
-'b' -> 'B'
-'1' -> 'b'
-'s' -> 'h'
-'w' -> 'H'
-'u' -> 'I'
-
-
-C -level
-
-some API calls that used to take PyObject * now take PyArrayObject *
-(this should only cause warnings during compile and not actual problems).
- PyArray_Take
-
-These commands now return a buffer that must be freed once it is used
-using PyMemData_FREE(ptr);
-
-a->descr->zero --> PyArray_Zero(a)
-a->descr->one --> PyArray_One(a)
-
-Numeric/arrayobject.h --> numpy/oldnumeric.h
-
-
-# These will actually work and are defines for PyArray_BYTE,
-# but you really should change it in your code
-PyArray_CHAR --> PyArray_CHAR
- (or PyArray_STRING which is more flexible)
-PyArray_SBYTE --> PyArray_BYTE
-
-Any uses of character codes will need adjusting....
-use PyArray_XXXLTR where XXX is the name of the type.
-
-
-If you used function pointers directly (why did you do that?),
-the arguments have changed. Everything that was an int is now an intp.
-Also, arrayobjects should be passed in at the end.
-
-a->descr->cast[i](fromdata, fromstep, todata, tostep, n)
-a->descr->cast[i](fromdata, todata, n, PyArrayObject *in, PyArrayObject *out)
- anything but single-stepping is not supported by this function
- use the PyArray_CastXXXX functions.
-
diff --git a/DEV_README.txt b/DEV_README.txt
deleted file mode 100644
index 7dc8bceed..000000000
--- a/DEV_README.txt
+++ /dev/null
@@ -1,18 +0,0 @@
-Thank you for your willingness to help make NumPy the best array system
-available.
-
-We have a few simple rules:
-
- * try hard to keep the Git repository in a buildable state and to not
- indiscriminately muck with what others have contributed.
-
- * Simple changes (including bug fixes) and obvious improvements are
- always welcome. Changes that fundamentally change behavior need
- discussion on numpy-discussions@scipy.org before anything is
- done.
-
- * Please add meaningful comments when you check changes in. These
- comments form the basis of the change-log.
-
- * Add unit tests to exercise new code, and regression tests
- whenever you fix a bug.
diff --git a/MANIFEST.in b/MANIFEST.in
index 6f4826478..3695dfe57 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -4,7 +4,6 @@
# data, etc files to distribution. Avoid using MANIFEST.in for that.
#
include MANIFEST.in
-include COMPATIBILITY
include *.txt
include setupegg.py
include site.cfg.example
@@ -12,6 +11,7 @@ include numpy/random/mtrand/generate_mtrand_c.py
recursive-include numpy/random/mtrand *.pyx *.pxd
# Add build support that should go in sdist, but not go in bdist/be installed
recursive-include numpy/_build_utils *
+recursive-include numpy/linalg/lapack_lite *.c *.h
# Add sdist files whose use depends on local configuration.
include numpy/core/src/multiarray/cblasfuncs.c
include numpy/core/src/multiarray/python_xerbla.c
diff --git a/TEST_COMMIT b/TEST_COMMIT
deleted file mode 100644
index ca662401b..000000000
--- a/TEST_COMMIT
+++ /dev/null
@@ -1,18 +0,0 @@
-oliphant: yes
-stefanv: yes
-rkern: yes
-pearu: yes
-fperez: yes
-chanley: yes
-cookedm: yes
-swalton: yes
-eric: yes
-charris: no
-fonnesbeck: no
-afayolle: no
-dubois: no
-sasha: yes
-tim_hochberg: yes
-jarrod.millman: yes
-ariver: 2010-01-14 20:02:18
-rgommers: test build bot v3
diff --git a/benchmarks/benchmarks/bench_app.py b/benchmarks/benchmarks/bench_app.py
index 0e2aca64b..ccf6e4c4a 100644
--- a/benchmarks/benchmarks/bench_app.py
+++ b/benchmarks/benchmarks/bench_app.py
@@ -4,6 +4,8 @@ from .common import Benchmark
import numpy as np
+from six.moves import xrange
+
class LaplaceInplace(Benchmark):
params = ['inplace', 'normal']
diff --git a/benchmarks/benchmarks/common.py b/benchmarks/benchmarks/common.py
index c99b0afb8..e98396bed 100644
--- a/benchmarks/benchmarks/common.py
+++ b/benchmarks/benchmarks/common.py
@@ -1,3 +1,5 @@
+from __future__ import absolute_import, division, print_function
+
import numpy
import random
@@ -26,7 +28,7 @@ TYPES1 = [
# values which will be used to construct our sample data matrices
# replicate 10 times to speed up initial imports of this helper
# and generate some redundancy
-values = [random.uniform(0, 100) for x in range(nx*ny/10)]*10
+values = [random.uniform(0, 100) for x in range(nx*ny//10)]*10
squares = {t: numpy.array(values,
dtype=getattr(numpy, t)).reshape((nx, ny))
@@ -34,16 +36,16 @@ squares = {t: numpy.array(values,
# adjust complex ones to have non-degenerated imagery part -- use
# original data transposed for that
-for t, v in squares.iteritems():
+for t, v in squares.items():
if t.startswith('complex'):
v += v.T*1j
# smaller squares
-squares_ = {t: s[:nxs, :nys] for t, s in squares.iteritems()}
+squares_ = {t: s[:nxs, :nys] for t, s in squares.items()}
# vectors
-vectors = {t: s[0] for t, s in squares.iteritems()}
+vectors = {t: s[0] for t, s in squares.items()}
-indexes = range(nx)
+indexes = list(range(nx))
# so we do not have all items
indexes.pop(5)
indexes.pop(95)
diff --git a/doc/HOWTO_MERGE_WIKI_DOCS.rst.txt b/doc/HOWTO_MERGE_WIKI_DOCS.rst.txt
deleted file mode 100644
index 3431d28b0..000000000
--- a/doc/HOWTO_MERGE_WIKI_DOCS.rst.txt
+++ /dev/null
@@ -1,49 +0,0 @@
-========================================
-Merging documentation back from Doc-Wiki
-========================================
-
-This document describes how to merge back docstring edits from the pydocweb
-wiki (at http://docs.scipy.org/doc/) to NumPy/SciPy trunk.
-
-Basic steps
------------
-It works like this, both for NumPy and SciPy:
-
-1. Go to http://docs.scipy.org/scipy/patch/ and log in.
-2. Click on "Select OK to apply"
-3. Click on "Generate patch"
-4. Select all the text in the browser and save as a patch.
-5. Check the patch file for errors etc., edit if necessary.
- Especially browse through the changes in example codes.
-
- .. warning::
-
- The examples in the documentation will be run eg. on user's computers
- eventually, and we do a very limited screening of the edits on the wiki.
- Hence, before committing things to SVN, you absolutely **MUST** read
- through all changes to the examples (``>>>`` lines, ``plot::``, and
- ``doctest::``) and check that they don't try to do anything silly and
- dangerous.
-
-6. Apply patch (typically ``patch -p1 < newdocs.patch`` from base numpy dir).
- This may ask you to specify location of a few files by hand, though.
-7. Run tests to see if something is broken
-8. Commit
-
-Errors in patch file
---------------------
-
-Note that it is necessary to check the generated patch before trying
-to apply. If there are errors they are noted at the top of the
-file. There are two known reasons for errors:
-
-* If the error message is "source location for docstring is not
- known", then the function usually needs to get handled with
- ``add_newdoc()`` in numpy/add_newdocs.py.
-
- This may also be a sign that the docstring is generated and assigned
- by some automatic means, in which case the generation system may
- need to be revised.
-
-* If there are other messages, this may indicate a bug in the
- patch generation itself.
diff --git a/doc/Makefile b/doc/Makefile
index 47f191374..063ab0db8 100644
--- a/doc/Makefile
+++ b/doc/Makefile
@@ -101,7 +101,7 @@ upload:
ssh $(USERNAME)@new.scipy.org mv $(UPLOAD_DIR)/numpy-html.zip \
$(UPLOAD_DIR)/numpy-html-$(RELEASE).zip
ssh $(USERNAME)@new.scipy.org rm $(UPLOAD_DIR)/dist.tar.gz
- ssh $(USERNAME)@new.scipy.org cp -r $(UPLOAD_DIR)/* /srv/docs_scipy_org/doc/numpy
+ ssh $(USERNAME)@new.scipy.org ln -snf numpy-$(RELEASE) /srv/docs_scipy_org/doc/numpy
ssh $(USERNAME)@new.scipy.org /srv/bin/fixperm-scipy_org.sh
#------------------------------------------------------------------------------
diff --git a/doc/release/1.10.0-notes.rst b/doc/release/1.10.0-notes.rst
index 0341d2a6a..35e967f44 100644
--- a/doc/release/1.10.0-notes.rst
+++ b/doc/release/1.10.0-notes.rst
@@ -20,7 +20,8 @@ Highlights
* Addition of `nanprod` to the set of nanfunctions.
* Support for the '@' operator in Python 3.5.
-Dropped Support:
+Dropped Support
+===============
* The _dotblas module has been removed. CBLAS Support is now in
Multiarray.
@@ -35,15 +36,22 @@ Dropped Support:
* Keywords ``skiprows`` and ``missing`` removed from np.genfromtxt.
* Keyword ``old_behavior`` removed from np.correlate.
-Future Changes:
+Future Changes
+==============
* In array comparisons like ``arr1 == arr2``, many corner cases
involving strings or structured dtypes that used to return scalars
now issue ``FutureWarning`` or ``DeprecationWarning``, and in the
future will be change to either perform elementwise comparisons or
raise an error.
-* The SafeEval class will be removed.
-* The alterdot and restoredot functions will be removed.
+* In ``np.lib.split`` an empty array in the result always had dimension
+ ``(0,)`` no matter the dimensions of the array being split. In Numpy 1.11
+ that behavior will be changed so that the dimensions will be preserved. A
+ ``FutureWarning`` for this change has been in place since Numpy 1.9 but,
+ due to a bug, sometimes no warning was raised and the dimensions were
+ already preserved.
+* The SafeEval class will be removed in Numpy 1.11.
+* The alterdot and restoredot functions will be removed in Numpy 1.11.
See below for more details on these changes.
@@ -70,6 +78,12 @@ relaxed stride checking
~~~~~~~~~~~~~~~~~~~~~~~
NPY_RELAXED_STRIDE_CHECKING is now true by default.
+UPDATE: In 1.10.2 the default value of NPY_RELAXED_STRIDE_CHECKING was
+changed to false for back compatibility reasons. More time is needed before
+it can be made the default. As part of the roadmap a deprecation of
+dimension changing views of f_contiguous not c_contiguous arrays was also
+added.
+
Concatenation of 1d arrays along any but ``axis=0`` raises ``IndexError``
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Using axis != 0 has raised a DeprecationWarning since NumPy 1.7, it now
diff --git a/doc/release/1.10.2-notes.rst b/doc/release/1.10.2-notes.rst
new file mode 100644
index 000000000..a597a817c
--- /dev/null
+++ b/doc/release/1.10.2-notes.rst
@@ -0,0 +1,153 @@
+NumPy 1.10.2 Release Notes
+**************************
+
+This release deals with a number of bugs that turned up in 1.10.1 and
+adds various build and release improvements.
+
+Numpy 1.10.1 supports Python 2.6 - 2.7 and 3.2 - 3.5.
+
+
+Compatibility notes
+===================
+
+Relaxed stride checking is no longer the default
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+There were back compatibility problems involving views changing the dtype of
+multidimensional Fortran arrays that need to be dealt with over a longer
+timeframe.
+
+Fix swig bug in ``numpy.i``
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Relaxed stride checking revealed a bug in ``array_is_fortran(a)``, that was
+using PyArray_ISFORTRAN to check for Fortran contiguity instead of
+PyArray_IS_F_CONTIGUOUS. You may want to regenerate swigged files using the
+updated numpy.i
+
+Deprecate views changing dimensions in fortran order
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+This deprecates assignment of a new descriptor to the dtype attribute of
+a non-C-contiguous array if it result in changing the shape. This
+effectively bars viewing a multidimensional Fortran array using a dtype
+that changes the element size along the first axis.
+
+The reason for the deprecation is that, when relaxed strides checking is
+enabled, arrays that are both C and Fortran contiguous are always treated
+as C contiguous which breaks some code that depended the two being mutually
+exclusive for non-scalar arrays of ndim > 1. This deprecation prepares the
+way to always enable relaxed stride checking.
+
+
+Issues Fixed
+============
+
+* gh-6019 Masked array repr fails for structured array with multi-dimensional column.
+* gh-6462 Median of empty array produces IndexError.
+* gh-6467 Performance regression for record array access.
+* gh-6468 numpy.interp uses 'left' value even when x[0]==xp[0].
+* gh-6475 np.allclose returns a memmap when one of its arguments is a memmap.
+* gh-6491 Error in broadcasting stride_tricks array.
+* gh-6495 Unrecognized command line option '-ffpe-summary' in gfortran.
+* gh-6497 Failure of reduce operation on recarrays.
+* gh-6498 Mention change in default casting rule in 1.10 release notes.
+* gh-6530 The partition function errors out on empty input.
+* gh-6532 numpy.inner return wrong inaccurate value sometimes.
+* gh-6563 Intent(out) broken in recent versions of f2py.
+* gh-6569 Cannot run tests after 'python setup.py build_ext -i'
+* gh-6572 Error in broadcasting stride_tricks array component.
+* gh-6575 BUG: Split produces empty arrays with wrong number of dimensions
+* gh-6590 Fortran Array problem in numpy 1.10.
+* gh-6602 Random __all__ missing choice and dirichlet.
+* gh-6611 ma.dot no longer always returns a masked array in 1.10.
+* gh-6618 NPY_FORTRANORDER in make_fortran() in numpy.i
+* gh-6636 Memory leak in nested dtypes in numpy.recarray
+* gh-6641 Subsetting recarray by fields yields a structured array.
+* gh-6667 ma.make_mask handles ma.nomask input incorrectly.
+* gh-6675 Optimized blas detection broken in master and 1.10.
+* gh-6678 Getting unexpected error from: X.dtype = complex (or Y = X.view(complex))
+* gh-6718 f2py test fail in pip installed numpy-1.10.1 in virtualenv.
+* gh-6719 Error compiling Cython file: Pythonic division not allowed without gil.
+* gh-6771 Numpy.rec.fromarrays losing dtype metadata between versions 1.9.2 and 1.10.1
+* gh-6781 The travis-ci script in maintenance/1.10.x needs fixing.
+
+
+Merged PRs
+==========
+
+The following PRs have been merged into 1.10.2. When the PR is a backport,
+the PR number for the original PR against master is listed.
+
+* gh-5773 MAINT: Hide testing helper tracebacks when using them with pytest.
+* gh-6094 BUG: Fixed a bug with string representation of masked structured arrays.
+* gh-6208 MAINT: Speedup field access by removing unneeded safety checks.
+* gh-6460 BUG: Replacing the os.environ.clear by less invasive procedure.
+* gh-6470 BUG: Fix AttributeError in numpy distutils.
+* gh-6472 MAINT: Use Python 3.5 instead of 3.5-dev for travis 3.5 testing.
+* gh-6474 REL: Update Paver script for sdist and auto-switch test warnings.
+* gh-6478 BUG: Fix Intel compiler flags for OS X build.
+* gh-6481 MAINT: LIBPATH with spaces is now supported Python 2.7+ and Win32.
+* gh-6487 BUG: Allow nested use of parameters in definition of arrays in f2py.
+* gh-6488 BUG: Extend common blocks rather than overwriting in f2py.
+* gh-6499 DOC: Mention that default casting for inplace operations has changed.
+* gh-6500 BUG: Recarrays viewed as subarrays don't convert to np.record type.
+* gh-6501 REL: Add "make upload" command for built docs, update "make dist".
+* gh-6526 BUG: Fix use of __doc__ in setup.py for -OO mode.
+* gh-6527 BUG: Fix the IndexError when taking the median of an empty array.
+* gh-6537 BUG: Make ma.atleast_* with scalar argument return arrays.
+* gh-6538 BUG: Fix ma.masked_values does not shrink mask if requested.
+* gh-6546 BUG: Fix inner product regression for non-contiguous arrays.
+* gh-6553 BUG: Fix partition and argpartition error for empty input.
+* gh-6556 BUG: Error in broadcast_arrays with as_strided array.
+* gh-6558 MAINT: Minor update to "make upload" doc build command.
+* gh-6562 BUG: Disable view safety checks in recarray.
+* gh-6567 BUG: Revert some import * fixes in f2py.
+* gh-6574 DOC: Release notes for Numpy 1.10.2.
+* gh-6577 BUG: Fix for #6569, allowing build_ext --inplace
+* gh-6579 MAINT: Fix mistake in doc upload rule.
+* gh-6596 BUG: Fix swig for relaxed stride checking.
+* gh-6606 DOC: Update 1.10.2 release notes.
+* gh-6614 BUG: Add choice and dirichlet to numpy.random.__all__.
+* gh-6621 BUG: Fix swig make_fortran function.
+* gh-6628 BUG: Make allclose return python bool.
+* gh-6642 BUG: Fix memleak in _convert_from_dict.
+* gh-6643 ENH: make recarray.getitem return a recarray.
+* gh-6653 BUG: Fix ma dot to always return masked array.
+* gh-6668 BUG: ma.make_mask should always return nomask for nomask argument.
+* gh-6686 BUG: Fix a bug in assert_string_equal.
+* gh-6695 BUG: Fix removing tempdirs created during build.
+* gh-6697 MAINT: Fix spurious semicolon in macro definition of PyArray_FROM_OT.
+* gh-6698 TST: test np.rint bug for large integers.
+* gh-6717 BUG: Readd fallback CBLAS detection on linux.
+* gh-6721 BUG: Fix for #6719.
+* gh-6726 BUG: Fix bugs exposed by relaxed stride rollback.
+* gh-6757 BUG: link cblas library if cblas is detected.
+* gh-6756 TST: only test f2py, not f2py2.7 etc, fixes #6718.
+* gh-6747 DEP: Deprecate changing shape of non-C-contiguous array via descr.
+* gh-6775 MAINT: Include from __future__ boilerplate in some files missing it.
+* gh-6780 BUG: metadata is not copied to base_dtype.
+* gh-6783 BUG: Fix travis ci testing for new google infrastructure.
+* gh-6785 BUG: Quick and dirty fix for interp.
+
+Initial support for mingwpy was reverted as it was causing problems for
+non-windows builds.
+
+* gh-6536 BUG: Revert gh-5614 to fix non-windows build problems
+
+A fix for np.lib.split was reverted because it resulted in "fixing"
+behavior that will be present in the Numpy 1.11 and that was already
+present in Numpy 1.9. See the discussion of the issue at gh-6575 for
+clarification.
+
+* gh-6576 BUG: Revert gh-6376 to fix split behavior for empty arrays.
+
+Relaxed stride checking was reverted. There were back compatibility
+problems involving views changing the dtype of multidimensional Fortran
+arrays that need to be dealt with over a longer timeframe.
+
+* gh-6735 MAINT: Make no relaxed stride checking the default for 1.10.
+
+
+Notes
+=====
+A bug in the Numpy 1.10.1 release resulted in exceptions being raised for
+``RuntimeWarning`` and ``DeprecationWarning`` in projects depending on Numpy.
+That has been fixed.
diff --git a/doc/release/1.11.0-notes.rst b/doc/release/1.11.0-notes.rst
index 68ee370ee..fac868ca3 100644
--- a/doc/release/1.11.0-notes.rst
+++ b/doc/release/1.11.0-notes.rst
@@ -8,20 +8,24 @@ Highlights
==========
-Dropped Support:
+Dropped Support
+===============
* Bento build support and related files have been removed.
* Single file build support and related files have been removed.
-Future Changes:
+Future Changes
+==============
+
+* Relaxed stride checking will become the default.
Compatibility notes
===================
-Deprecated to error
-~~~~~~~~~~~~~~~~~~~
+DeprecationWarning to error
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
* Indexing with floats raises IndexError,
e.g., a[0, 0.0].
@@ -34,6 +38,15 @@ Deprecated to error
* Non-integers used as index values raise TypeError,
e.g., in reshape, take, and specifying reduce axis.
+FutureWarning to changed behavior
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+* In ``np.lib.split`` an empty array in the result always had dimension
+ ``(0,)`` no matter the dimensions of the array being split. This
+ has been changed so that the dimensions will be preserved. A
+ ``FutureWarning`` for this change has been in place since Numpy 1.9 but,
+ due to a bug, sometimes no warning was raised and the dimensions were
+ already preserved.
C API
~~~~~
@@ -60,6 +73,10 @@ via ``python runtests.py --bench``. For more details, see ``benchmarks/README.rs
arrays have memory overlap is added. ``np.may_share_memory`` also now
has an option to spend more effort to reduce false positives.
+* ``SkipTest`` and ``KnownFailureException`` exception classes are exposed in the
+``numpy.testing`` namespace. Raise them in a test function to mark the test to
+be skipped or mark it as a known failure, respectively.
+
Improvements
============
@@ -74,6 +91,13 @@ The function now internally calls the generic ``npy_amergesort``
when the type does not implement a merge-sort kind of ``argsort``
method.
+Memory and speed improvements for masked arrays
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Creating a masked array with ``mask=True`` (resp. ``mask=False``) now uses
+``np.ones`` (resp. ``np.zeros``) to create the mask, which is faster and avoid
+a big memory peak. Another optimization was done to avoid a memory peak and
+useless computations when printing a masked array.
+
Changes
=======
@@ -81,3 +105,16 @@ Changes
Deprecations
============
+Views of arrays in Fortran order
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+The f_contiguous flag was used to signal that views as a dtypes that
+changed the element size would change the first index. This was always a
+bit problematical for arrays that were both f_contiguous and c_contiguous
+because c_contiguous took precendence. Relaxed stride checking results in
+more such dual contiguous arrays and breaks some existing code as a result.
+Note that this also affects changing the dtype by assigning to the dtype
+attribute of an array. The aim of this deprecation is to restrict views to
+c_contiguous arrays at some future time. A work around that is backward
+compatible is to use `a.T.view(...).T` instead. A parameter will also be
+added to the view method to explicitly ask for Fortran order views, but
+that will not be backward compatible.
diff --git a/doc/source/reference/arrays.indexing.rst b/doc/source/reference/arrays.indexing.rst
index 228e9a8d4..50b2492d2 100644
--- a/doc/source/reference/arrays.indexing.rst
+++ b/doc/source/reference/arrays.indexing.rst
@@ -193,7 +193,7 @@ basic slicing that returns a :term:`view`).
fundamentally different than ``x[(1,2,3)]``. The latter is
equivalent to ``x[1,2,3]`` which will trigger basic selection while
the former will trigger advanced indexing. Be sure to understand
- why this is occurs.
+ why this occurs.
Also recognize that ``x[[1,2,3]]`` will trigger advanced indexing,
whereas ``x[[1,2,slice(None)]]`` will trigger basic slicing.
diff --git a/doc/source/release.rst b/doc/source/release.rst
index 201d3e77f..9e908dd98 100644
--- a/doc/source/release.rst
+++ b/doc/source/release.rst
@@ -3,6 +3,7 @@ Release Notes
*************
.. include:: ../release/1.11.0-notes.rst
+.. include:: ../release/1.10.2-notes.rst
.. include:: ../release/1.10.1-notes.rst
.. include:: ../release/1.10.0-notes.rst
.. include:: ../release/1.9.2-notes.rst
diff --git a/doc/source/user/numpy-for-matlab-users.rst b/doc/source/user/numpy-for-matlab-users.rst
index 2b8f43749..d94233a2e 100644
--- a/doc/source/user/numpy-for-matlab-users.rst
+++ b/doc/source/user/numpy-for-matlab-users.rst
@@ -42,7 +42,7 @@ Some Key Differences
'array' or 'matrix'? Which should I use?
========================================
-Numpy provides, in addition to `np.ndarray`` an additional matrix type
+Numpy provides, in addition to ``np.ndarray``, an additional matrix type
that you may see used in some existing code. Which one to use?
Short answer
@@ -51,7 +51,7 @@ Short answer
**Use arrays**.
- They are the standard vector/matrix/tensor type of numpy. Many numpy
- function return arrays, not matrices.
+ functions return arrays, not matrices.
- There is a clear distinction between element-wise operations and
linear algebra operations.
- You can have standard vectors or row/column vectors if you like.
@@ -123,7 +123,7 @@ There are pros and cons to using both:
- ``:)`` Is quite at home handling data of any rank.
- ``:)`` Closer in semantics to tensor algebra, if you are familiar
with that.
- - ``:)`` *All* operations (``*``, ``/``, ``+``, ```` etc.) are
+ - ``:)`` *All* operations (``*``, ``/``, ``+``, ``-`` etc.) are
elementwise
- ``matrix``
@@ -159,7 +159,7 @@ which hopefully make things easier for Matlab converts.
return ``array``\ s, but the ``matlib`` versions return ``matrix``
objects.
- ``mat`` has been changed to be a synonym for ``asmatrix``, rather
- than ``matrix``, thus making it concise way to convert an ``array``
+ than ``matrix``, thus making it a concise way to convert an ``array``
to a ``matrix`` without copying the data.
- Some top-level functions have been removed. For example
``numpy.rand()`` now needs to be accessed as ``numpy.random.rand()``.
@@ -237,7 +237,7 @@ General Purpose Equivalents
- ``scipy.integrate.ode(f).set_integrator('dopri5')``
- integrate an ODE with Runge-Kutta 4,5
* - ``ode15s``
- - ``scipy.integrate.ode(f).set_integrator('vode', method='bdf', order=15)``
+ - ``scipy.integrate.ode(f).set_integrator('vode', method='bdf', order=5)``
- integrate an ODE with BDF method
Linear Algebra Equivalents
@@ -252,7 +252,7 @@ Linear Algebra Equivalents
* - ``ndims(a)``
- ``ndim(a)`` or ``a.ndim``
- - get the number of dimensions of a (tensor rank)
+ - get the number of dimensions of ``a`` (tensor rank)
* - ``numel(a)``
- ``size(a)`` or ``a.size``
@@ -264,7 +264,7 @@ Linear Algebra Equivalents
* - ``size(a,n)``
- ``a.shape[n-1]``
- - get the number of elements of the n-th dimension of array a. (Note that MATLAB® uses 1 based indexing while Python uses 0 based indexing, See note :ref:`INDEXING <numpy-for-matlab-users.notes>`)
+ - get the number of elements of the n-th dimension of array ``a``. (Note that MATLAB® uses 1 based indexing while Python uses 0 based indexing, See note :ref:`INDEXING <numpy-for-matlab-users.notes>`)
* - ``[ 1 2 3; 4 5 6 ]``
- ``array([[1.,2.,3.], [4.,5.,6.]])``
@@ -273,7 +273,7 @@ Linear Algebra Equivalents
* - ``[ a b; c d ]``
- ``vstack([hstack([a,b]), hstack([c,d])])`` or
``bmat('a b; c d').A``
- - construct a matrix from blocks a,b,c, and d
+ - construct a matrix from blocks ``a``, ``b``, ``c``, and ``d``
* - ``a(end)``
- ``a[-1]``
@@ -345,27 +345,29 @@ Linear Algebra Equivalents
* - ``(a>0.5)``
- ``(a>0.5)``
- - matrix whose i,jth element is (a_ij > 0.5)
+ - matrix whose i,jth element is (a_ij > 0.5). The Matlab result is
+ an array of 0s and 1s. The NumPy result is an array of the boolean
+ values ``False`` and ``True``.
* - ``find(a>0.5)``
- ``nonzero(a>0.5)``
- - find the indices where (a > 0.5)
+ - find the indices where (``a`` > 0.5)
* - ``a(:,find(v>0.5))``
- ``a[:,nonzero(v>0.5)[0]]``
- - extract the columms of a where vector v > 0.5
+ - extract the columms of ``a`` where vector v > 0.5
* - ``a(:,find(v>0.5))``
- ``a[:,v.T>0.5]``
- - extract the columms of a where column vector v > 0.5
+ - extract the columms of ``a`` where column vector v > 0.5
* - ``a(a<0.5)=0``
- ``a[a<0.5]=0``
- - a with elements less than 0.5 zeroed out
+ - ``a`` with elements less than 0.5 zeroed out
* - ``a .* (a>0.5)``
- ``a * (a>0.5)``
- - a with elements less than 0.5 zeroed out
+ - ``a`` with elements less than 0.5 zeroed out
* - ``a(:) = 3``
- ``a[:] = 3``
@@ -380,7 +382,7 @@ Linear Algebra Equivalents
- numpy slices are by reference
* - ``y=x(:)``
- - ``y = x.flatten(1)``
+ - ``y = x.flatten()``
- turn array into vector (note that this forces a copy)
* - ``1:10``
@@ -413,11 +415,11 @@ Linear Algebra Equivalents
* - ``diag(a)``
- ``diag(a)``
- - vector of diagonal elements of a
+ - vector of diagonal elements of ``a``
* - ``diag(a,0)``
- ``diag(a,0)``
- - square diagonal matrix whose nonzero values are the elements of a
+ - square diagonal matrix whose nonzero values are the elements of ``a``
* - ``rand(3,4)``
- ``random.rand(3,4)``
@@ -445,7 +447,7 @@ Linear Algebra Equivalents
* - ``repmat(a, m, n)``
- ``tile(a, (m, n))``
- - create m by n copies of a
+ - create m by n copies of ``a``
* - ``[a b]``
- ``concatenate((a,b),1)`` or ``hstack((a,b))`` or ``column_stack((a,b))`` or ``c_[a,b]``
@@ -453,27 +455,27 @@ Linear Algebra Equivalents
* - ``[a; b]``
- ``concatenate((a,b))`` or ``vstack((a,b))`` or ``r_[a,b]``
- - concatenate rows of a and b
+ - concatenate rows of ``a`` and ``b``
* - ``max(max(a))``
- ``a.max()``
- - maximum element of a (with ndims(a)<=2 for matlab)
+ - maximum element of ``a`` (with ndims(a)<=2 for matlab)
* - ``max(a)``
- ``a.max(0)``
- - maximum element of each column of matrix a
+ - maximum element of each column of matrix ``a``
* - ``max(a,[],2)``
- ``a.max(1)``
- - maximum element of each row of matrix a
+ - maximum element of each row of matrix ``a``
* - ``max(a,b)``
- ``maximum(a, b)``
- - compares a and b element-wise, and returns the maximum value from each pair
+ - compares ``a`` and ``b`` element-wise, and returns the maximum value from each pair
* - ``norm(v)``
- ``sqrt(dot(v,v))`` or ``np.linalg.norm(v)``
- - L2 norm of vector v
+ - L2 norm of vector ``v``
* - ``a & b``
- ``logical_and(a,b)``
@@ -493,15 +495,15 @@ Linear Algebra Equivalents
* - ``inv(a)``
- ``linalg.inv(a)``
- - inverse of square matrix a
+ - inverse of square matrix ``a``
* - ``pinv(a)``
- ``linalg.pinv(a)``
- - pseudo-inverse of matrix a
+ - pseudo-inverse of matrix ``a``
* - ``rank(a)``
- ``linalg.matrix_rank(a)``
- - rank of a matrix a
+ - rank of a matrix ``a``
* - ``a\b``
- ``linalg.solve(a,b)`` if ``a`` is square; ``linalg.lstsq(a,b)`` otherwise
@@ -513,23 +515,23 @@ Linear Algebra Equivalents
* - ``[U,S,V]=svd(a)``
- ``U, S, Vh = linalg.svd(a), V = Vh.T``
- - singular value decomposition of a
+ - singular value decomposition of ``a``
* - ``chol(a)``
- ``linalg.cholesky(a).T``
- - cholesky factorization of a matrix (chol(a) in matlab returns an upper triangular matrix, but linalg.cholesky(a) returns a lower triangular matrix)
+ - cholesky factorization of a matrix (``chol(a)`` in matlab returns an upper triangular matrix, but ``linalg.cholesky(a)`` returns a lower triangular matrix)
* - ``[V,D]=eig(a)``
- ``D,V = linalg.eig(a)``
- - eigenvalues and eigenvectors of a
+ - eigenvalues and eigenvectors of ``a``
* - ``[V,D]=eig(a,b)``
- ``V,D = np.linalg.eig(a,b)``
- - eigenvalues and eigenvectors of a,b
+ - eigenvalues and eigenvectors of ``a``, ``b``
* - ``[V,D]=eigs(a,k)``
-
- - find the k largest eigenvalues and eigenvectors of a
+ - find the ``k`` largest eigenvalues and eigenvectors of ``a``
* - ``[Q,R,P]=qr(a,0)``
- ``Q,R = scipy.linalg.qr(a)``
@@ -545,11 +547,11 @@ Linear Algebra Equivalents
* - ``fft(a)``
- ``fft(a)``
- - Fourier transform of a
+ - Fourier transform of ``a``
* - ``ifft(a)``
- ``ifft(a)``
- - inverse Fourier transform of a
+ - inverse Fourier transform of ``a``
* - ``sort(a)``
- ``sort(a)`` or ``a.sort()``
diff --git a/numpy/_build_utils/apple_accelerate.py b/numpy/_build_utils/apple_accelerate.py
index d7351f4c5..2d5bbab5e 100644
--- a/numpy/_build_utils/apple_accelerate.py
+++ b/numpy/_build_utils/apple_accelerate.py
@@ -1,3 +1,5 @@
+from __future__ import division, absolute_import, print_function
+
import os
import sys
import re
diff --git a/numpy/_build_utils/waf.py b/numpy/_build_utils/waf.py
deleted file mode 100644
index 263640d9e..000000000
--- a/numpy/_build_utils/waf.py
+++ /dev/null
@@ -1,531 +0,0 @@
-from __future__ import division, absolute_import, print_function
-
-import os
-import re
-
-import waflib.Configure
-import waflib.Tools.c_config
-from waflib import Logs, Utils
-
-from .common \
- import \
- LONG_DOUBLE_REPRESENTATION_SRC, pyod, \
- long_double_representation
-
-DEFKEYS = waflib.Tools.c_config.DEFKEYS
-DEFINE_COMMENTS = "define_commentz"
-
-def to_header(dct):
- if 'header_name' in dct:
- dct = Utils.to_list(dct['header_name'])
- return ''.join(['#include <%s>\n' % x for x in dct])
- return ''
-
-# Make the given string safe to be used as a CPP macro
-def sanitize_string(s):
- key_up = s.upper()
- return re.sub('[^A-Z0-9_]', '_', key_up)
-
-def validate_arguments(self, kw):
- if not 'env' in kw:
- kw['env'] = self.env.derive()
- if not "compile_mode" in kw:
- kw["compile_mode"] = "c"
- if not 'compile_filename' in kw:
- kw['compile_filename'] = 'test.c' + \
- ((kw['compile_mode'] == 'cxx') and 'pp' or '')
- if not 'features' in kw:
- kw['features'] = [kw['compile_mode']]
- if not 'execute' in kw:
- kw['execute'] = False
- if not 'okmsg' in kw:
- kw['okmsg'] = 'yes'
- if not 'errmsg' in kw:
- kw['errmsg'] = 'no !'
-
- if 'define_name' in kw:
- comment = kw.get('define_comment', None)
- self.undefine_with_comment(kw['define_name'], comment)
-
-def try_compile(self, kw):
- self.start_msg(kw["msg"])
- ret = None
- try:
- ret = self.run_c_code(**kw)
- except self.errors.ConfigurationError as e:
- self.end_msg(kw['errmsg'], 'YELLOW')
- if Logs.verbose > 1:
- raise
- else:
- self.fatal('The configuration failed')
- else:
- kw['success'] = ret
- self.end_msg(self.ret_msg(kw['okmsg'], kw))
-
-@waflib.Configure.conf
-def check_header(self, header_name, **kw):
- code = """
-%s
-
-int main()
-{
-}
-""" % to_header({"header_name": header_name})
-
- kw["code"] = code
- kw["define_comment"] = "/* Define to 1 if you have the <%s> header file. */" % header_name
- kw["define_name"] = "HAVE_%s" % sanitize_string(header_name)
- if not "features" in kw:
- kw["features"] = ["c"]
- kw["msg"] = "Checking for header %r" % header_name
-
- validate_arguments(self, kw)
- try_compile(self, kw)
- ret = kw["success"]
- if ret == 0:
- kw["define_value"] = 1
- else:
- kw["define_value"] = 0
-
- self.post_check(**kw)
- if not kw.get('execute', False):
- return ret == 0
- return ret
-
-@waflib.Configure.conf
-def check_declaration(self, symbol, **kw):
- code = r"""
-int main()
-{
-#ifndef %s
- (void) %s;
-#endif
- ;
- return 0;
-}
-""" % (symbol, symbol)
-
- kw["code"] = to_header(kw) + code
- kw["msg"] = "Checking for macro %r" % symbol
- kw["errmsg"] = "not found"
- kw["okmsg"] = "yes"
-
- validate_arguments(self, kw)
- try_compile(self, kw)
- ret = kw["success"]
-
- kw["define_name"] = "HAVE_DECL_%s" % sanitize_string(symbol)
- kw["define_comment"] = "/* Set to 1 if %s is defined. */" % symbol
- self.post_check(**kw)
- if not kw.get('execute', False):
- return ret == 0
- return ret
-
-@waflib.Configure.conf
-def check_type(self, type_name, **kw):
- code = r"""
-int main() {
- if ((%(type_name)s *) 0)
- return 0;
- if (sizeof (%(type_name)s))
- return 0;
-}
-""" % {"type_name": type_name}
-
- kw["code"] = to_header(kw) + code
- kw["msg"] = "Checking for type %r" % type_name
- kw["errmsg"] = "not found"
- kw["okmsg"] = "yes"
-
- validate_arguments(self, kw)
- try_compile(self, kw)
- ret = kw["success"]
- if ret == 0:
- kw["define_value"] = 1
- else:
- kw["define_value"] = 0
-
- kw["define_name"] = "HAVE_%s" % sanitize_string(type_name)
- kw["define_comment"] = "/* Define to 1 if the system has the type `%s'. */" % type_name
- self.post_check(**kw)
- if not kw.get('execute', False):
- return ret == 0
- return ret
-
-def do_binary_search(conf, type_name, kw):
- code = """\
-typedef %(type)s waf_check_sizeof_type;
-int main ()
-{
- static int test_array [1 - 2 * !(((long) (sizeof (waf_check_sizeof_type))) >= 0)];
- test_array [0] = 0
-
- ;
- return 0;
-}
-""" % {"type": type_name}
- kw["code"] = to_header(kw) + code
-
- try:
- conf.run_c_code(**kw)
- except conf.errors.ConfigurationError as e:
- conf.end_msg("failed !")
- if waflib.Logs.verbose > 1:
- raise
- else:
- conf.fatal("The configuration failed !")
-
- body = r"""
-typedef %(type)s waf_check_sizeof_type;
-int main ()
-{
- static int test_array [1 - 2 * !(((long) (sizeof (waf_check_sizeof_type))) <= %(size)s)];
- test_array [0] = 0
-
- ;
- return 0;
-}
-"""
- # The principle is simple: we first find low and high bounds
- # of size for the type, where low/high are looked up on a log
- # scale. Then, we do a binary search to find the exact size
- # between low and high
- low = 0
- mid = 0
- while True:
- try:
- kw["code"] = to_header(kw) + body % {"type": type_name, "size": mid}
- validate_arguments(conf, kw)
- conf.run_c_code(**kw)
- break
- except conf.errors.ConfigurationError:
- #log.info("failure to test for bound %d" % mid)
- low = mid + 1
- mid = 2 * mid + 1
-
- high = mid
- ret = None
- # Binary search:
- while low != high:
- mid = (high - low) / 2 + low
- try:
- kw["code"] = to_header(kw) + body % {"type": type_name, "size": mid}
- validate_arguments(conf, kw)
- ret = conf.run_c_code(**kw)
- high = mid
- except conf.errors.ConfigurationError:
- low = mid + 1
-
- return low
-
-@waflib.Configure.conf
-def check_type_size(conf, type_name, expected_sizes=None, **kw):
- kw["define_name"] = "SIZEOF_%s" % sanitize_string(type_name)
- kw["define_comment"] = "/* The size of `%s', as computed by sizeof. */" % type_name
- kw["msg"] = "Checking sizeof(%s)" % type_name
-
- validate_arguments(conf, kw)
- conf.start_msg(kw["msg"])
-
- if expected_sizes is not None:
- try:
- val = int(expected_sizes)
- except TypeError:
- values = expected_sizes
- else:
- values = [val]
-
- size = None
- for value in values:
- code = """\
- typedef %(type)s waf_check_sizeof_type;
- int main ()
- {
- static int test_array [1 - 2 * !(((long) (sizeof (waf_check_sizeof_type))) == %(size)d)];
- test_array [0] = 0
-
- ;
- return 0;
- }
- """ % {"type": type_name, "size": value}
- kw["code"] = to_header(kw) + code
- try:
- conf.run_c_code(**kw)
- size = value
- break
- except conf.errors.ConfigurationError:
- pass
- if size is None:
- size = do_binary_search(conf, type_name, kw)
- else:
- size = do_binary_search(conf, type_name, kw)
-
- kw["define_value"] = size
- kw["success"] = 0
- conf.end_msg(size)
- conf.post_check(**kw)
- return size
-
-@waflib.Configure.conf
-def check_functions_at_once(self, funcs, **kw):
- header = []
- header = ['#ifdef __cplusplus']
- header.append('extern "C" {')
- header.append('#endif')
- for f in funcs:
- header.append("\tchar %s();" % f)
- # Handle MSVC intrinsics: force MS compiler to make a function
- # call. Useful to test for some functions when built with
- # optimization on, to avoid build error because the intrinsic
- # and our 'fake' test declaration do not match.
- header.append("#ifdef _MSC_VER")
- header.append("#pragma function(%s)" % f)
- header.append("#endif")
- header.append('#ifdef __cplusplus')
- header.append('};')
- header.append('#endif')
- funcs_decl = "\n".join(header)
-
- tmp = []
- for f in funcs:
- tmp.append("\t%s();" % f)
- tmp = "\n".join(tmp)
-
- code = r"""
-%(include)s
-%(funcs_decl)s
-
-int main (void)
-{
- %(tmp)s
- return 0;
-}
-""" % {"tmp": tmp, "include": to_header(kw), "funcs_decl": funcs_decl}
- kw["code"] = code
- if not "features" in kw:
- kw["features"] = ["c", "cprogram"]
-
- msg = ", ".join(funcs)
- if len(msg) > 30:
- _funcs = list(funcs)
- msg = []
- while len(", ".join(msg)) < 30 and _funcs:
- msg.append(_funcs.pop(0))
- msg = ", ".join(msg) + ",..."
- if "lib" in kw:
- kw["msg"] = "Checking for functions %s in library %r" % (msg, kw["lib"])
- else:
- kw["msg"] = "Checking for functions %s" % msg
-
- validate_arguments(self, kw)
- try_compile(self, kw)
- ret = kw["success"]
-
- # We set the config.h define here because we need to define several of them
- # in one shot
- if ret == 0:
- for f in funcs:
- self.define_with_comment("HAVE_%s" % sanitize_string(f), 1,
- "/* Define to 1 if you have the `%s' function. */" % f)
-
- self.post_check(**kw)
- if not kw.get('execute', False):
- return ret == 0
- return ret
-
-@waflib.Configure.conf
-def check_inline(conf, **kw):
- validate_arguments(conf, kw)
-
- code = """
-#ifndef __cplusplus
-static %(inline)s int static_func (void)
-{
- return 0;
-}
-%(inline)s int nostatic_func (void)
-{
- return 0;
-}
-#endif"""
-
- conf.start_msg("Checking for inline support")
- inline = None
- for k in ['inline', '__inline__', '__inline']:
- try:
- kw["code"] = code % {"inline": k}
- ret = conf.run_c_code(**kw)
- inline = k
- break
- except conf.errors.ConfigurationError:
- pass
-
- if inline is None:
- conf.end_msg("failed", 'YELLOW')
- if Logs.verbose > 1:
- raise
- else:
- conf.fatal('The configuration failed')
- else:
- kw['success'] = ret
- conf.end_msg(inline)
- return inline
-
-@waflib.Configure.conf
-def check_ldouble_representation(conf, **kw):
- msg = {
- 'INTEL_EXTENDED_12_BYTES_LE': "Intel extended, little endian",
- 'INTEL_EXTENDED_16_BYTES_LE': "Intel extended, little endian",
- 'IEEE_QUAD_BE': "IEEE Quad precision, big endian",
- 'IEEE_QUAD_LE': "IEEE Quad precision, little endian",
- 'IEEE_DOUBLE_LE': "IEEE Double precision, little endian",
- 'IEEE_DOUBLE_BE': "IEEE Double precision, big endian"
- }
-
- code = LONG_DOUBLE_REPRESENTATION_SRC % {'type': 'long double'}
- validate_arguments(conf, kw)
-
- conf.start_msg("Checking for long double representation... ")
- try:
- kw["code"] = code
- ret = conf.run_c_code(**kw)
- except conf.errors.ConfigurationError as e:
- conf.end_msg(kw['errmsg'], 'YELLOW')
- if Logs.verbose > 1:
- raise
- else:
- conf.fatal('The configuration failed')
- else:
- task_gen = conf.test_bld.groups[0][0]
- obj_filename = task_gen.tasks[0].outputs[0].abspath()
- tp = long_double_representation(pyod(obj_filename))
- kw['success'] = ret
- conf.end_msg(msg[tp])
- kw["define_name"] = "HAVE_LDOUBLE_%s" % tp
- kw["define_comment"] = "/* Define for arch-specific long double representation */"
- ret = kw["success"]
-
- conf.post_check(**kw)
- if not kw.get('execute', False):
- return ret == 0
- return ret
-
-@waflib.Configure.conf
-def post_check(self, *k, **kw):
- "set the variables after a test was run successfully"
-
- is_success = False
- if kw['execute']:
- if kw['success'] is not None:
- if kw.get('define_ret', False):
- is_success = kw['success']
- else:
- is_success = (kw['success'] == 0)
- else:
- is_success = (kw['success'] == 0)
-
- def define_or_stuff():
- nm = kw['define_name']
- cmt = kw.get('define_comment', None)
- value = kw.get("define_value", is_success)
- if kw['execute'] and kw.get('define_ret', None) and isinstance(is_success, str):
- self.define_with_comment(kw['define_name'], value, cmt, quote=kw.get('quote', 1))
- else:
- self.define_cond(kw['define_name'], value, cmt)
-
- if 'define_name' in kw:
- define_or_stuff()
-
- if is_success and 'uselib_store' in kw:
- from waflib.Tools import ccroot
-
- # TODO see get_uselib_vars from ccroot.py
- _vars = set([])
- for x in kw['features']:
- if x in ccroot.USELIB_VARS:
- _vars |= ccroot.USELIB_VARS[x]
-
- for k in _vars:
- lk = k.lower()
- if k == 'INCLUDES': lk = 'includes'
- if k == 'DEFKEYS': lk = 'defines'
- if lk in kw:
- val = kw[lk]
- # remove trailing slash
- if isinstance(val, str):
- val = val.rstrip(os.path.sep)
- self.env.append_unique(k + '_' + kw['uselib_store'], val)
- return is_success
-
-@waflib.Configure.conf
-def define_with_comment(conf, define, value, comment=None, quote=True):
- if comment is None:
- return conf.define(define, value, quote)
-
- assert define and isinstance(define, str)
-
- comment_tbl = conf.env[DEFINE_COMMENTS] or {}
- comment_tbl[define] = comment
- conf.env[DEFINE_COMMENTS] = comment_tbl
-
- return conf.define(define, value, quote)
-
-@waflib.Configure.conf
-def undefine_with_comment(conf, define, comment=None):
- if comment is None:
- return conf.undefine(define)
-
- comment_tbl = conf.env[DEFINE_COMMENTS] or {}
- comment_tbl[define] = comment
- conf.env[DEFINE_COMMENTS] = comment_tbl
-
- conf.undefine(define)
-
-@waflib.Configure.conf
-def get_comment(self, key):
- assert key and isinstance(key, str)
-
- if key in self.env[DEFINE_COMMENTS]:
- return self.env[DEFINE_COMMENTS][key]
- return None
-
-@waflib.Configure.conf
-def define_cond(self, name, value, comment):
- """Conditionally define a name.
- Formally equivalent to: if value: define(name, 1) else: undefine(name)"""
- if value:
- self.define_with_comment(name, value, comment)
- else:
- self.undefine(name)
-
-@waflib.Configure.conf
-def get_config_header(self, defines=True, headers=False, define_prefix=None):
- """
- Create the contents of a ``config.h`` file from the defines and includes
- set in conf.env.define_key / conf.env.include_key. No include guards are added.
-
- :param defines: write the defines values
- :type defines: bool
- :param headers: write the headers
- :type headers: bool
- :return: the contents of a ``config.h`` file
- :rtype: string
- """
- tpl = self.env["CONFIG_HEADER_TEMPLATE"] or "%(content)s"
-
- lst = []
- if headers:
- for x in self.env[INCKEYS]:
- lst.append('#include <%s>' % x)
-
- if defines:
- for x in self.env[DEFKEYS]:
- cmt = self.get_comment(x)
- if cmt is not None:
- lst.append(cmt)
- if self.is_defined(x):
- val = self.get_define(x)
- lst.append('#define %s %s\n' % (x, val))
- else:
- lst.append('/* #undef %s */\n' % x)
- return tpl % {"content": "\n".join(lst)}
diff --git a/numpy/add_newdocs.py b/numpy/add_newdocs.py
index b00e229c3..c14036089 100644
--- a/numpy/add_newdocs.py
+++ b/numpy/add_newdocs.py
@@ -3826,6 +3826,45 @@ add_newdoc('numpy.core.multiarray', 'shares_memory',
""")
+add_newdoc('numpy.core.multiarray', 'may_share_memory',
+ """
+ may_share_memory(a, b, max_work=None)
+
+ Determine if two arrays might share memory
+
+ A return of True does not necessarily mean that the two arrays
+ share any element. It just means that they *might*.
+
+ Only the memory bounds of a and b are checked by default.
+
+ Parameters
+ ----------
+ a, b : ndarray
+ Input arrays
+ max_work : int, optional
+ Effort to spend on solving the overlap problem. See
+ `shares_memory` for details. Default for ``may_share_memory``
+ is to do a bounds check.
+
+ Returns
+ -------
+ out : bool
+
+ See Also
+ --------
+ shares_memory
+
+ Examples
+ --------
+ >>> np.may_share_memory(np.array([1,2]), np.array([5,8,9]))
+ False
+ >>> x = np.zeros([3, 4])
+ >>> np.may_share_memory(x[:,0], x[:,1])
+ True
+
+ """)
+
+
add_newdoc('numpy.core.multiarray', 'ndarray', ('newbyteorder',
"""
arr.newbyteorder(new_order='S')
diff --git a/numpy/compat/tests/test_compat.py b/numpy/compat/tests/test_compat.py
index 9822ab374..1ac24401a 100644
--- a/numpy/compat/tests/test_compat.py
+++ b/numpy/compat/tests/test_compat.py
@@ -1,3 +1,5 @@
+from __future__ import division, absolute_import, print_function
+
from os.path import join
from numpy.compat import isfileobj
diff --git a/numpy/core/code_generators/generate_umath.py b/numpy/core/code_generators/generate_umath.py
index e3c4fbaa2..b3d8f43ae 100644
--- a/numpy/core/code_generators/generate_umath.py
+++ b/numpy/core/code_generators/generate_umath.py
@@ -1006,7 +1006,7 @@ InitOperators(PyObject *dictionary) {
%s
}
""" % (filename, code1, code2, code3)
- return code;
+ return code
if __name__ == "__main__":
diff --git a/numpy/core/fromnumeric.py b/numpy/core/fromnumeric.py
index 0fc572cb6..197513294 100644
--- a/numpy/core/fromnumeric.py
+++ b/numpy/core/fromnumeric.py
@@ -1134,7 +1134,7 @@ def resize(a, new_shape):
a = ravel(a)
Na = len(a)
if not Na:
- return mu.zeros(new_shape, a.dtype.char)
+ return mu.zeros(new_shape, a.dtype)
total_size = um.multiply.reduce(new_shape)
n_copies = int(total_size / Na)
extra = total_size % Na
diff --git a/numpy/core/function_base.py b/numpy/core/function_base.py
index 05fea557a..c82c9bb6b 100644
--- a/numpy/core/function_base.py
+++ b/numpy/core/function_base.py
@@ -1,6 +1,6 @@
from __future__ import division, absolute_import, print_function
-__all__ = ['logspace', 'linspace', 'may_share_memory']
+__all__ = ['logspace', 'linspace']
from . import numeric as _nx
from .numeric import result_type, NaN, shares_memory, MAY_SHARE_BOUNDS, TooHardError
@@ -201,46 +201,3 @@ def logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None):
if dtype is None:
return _nx.power(base, y)
return _nx.power(base, y).astype(dtype)
-
-
-def may_share_memory(a, b, max_work=None):
- """Determine if two arrays can share memory
-
- A return of True does not necessarily mean that the two arrays
- share any element. It just means that they *might*.
-
- Only the memory bounds of a and b are checked by default.
-
- Parameters
- ----------
- a, b : ndarray
- Input arrays
- max_work : int, optional
- Effort to spend on solving the overlap problem. See
- `shares_memory` for details. Default for ``may_share_memory``
- is to do a bounds check.
-
- Returns
- -------
- out : bool
-
- See Also
- --------
- shares_memory
-
- Examples
- --------
- >>> np.may_share_memory(np.array([1,2]), np.array([5,8,9]))
- False
- >>> x = np.zeros([3, 4])
- >>> np.may_share_memory(x[:,0], x[:,1])
- True
-
- """
- if max_work is None:
- max_work = MAY_SHARE_BOUNDS
- try:
- return shares_memory(a, b, max_work=max_work)
- except (TooHardError, OverflowError):
- # Unable to determine, assume yes
- return True
diff --git a/numpy/core/include/numpy/ndarrayobject.h b/numpy/core/include/numpy/ndarrayobject.h
index fbaaeacea..c97a3a797 100644
--- a/numpy/core/include/numpy/ndarrayobject.h
+++ b/numpy/core/include/numpy/ndarrayobject.h
@@ -96,7 +96,7 @@ extern "C" CONFUSE_EMACS
NULL)
#define PyArray_FROM_OT(m,type) PyArray_FromAny(m, \
- PyArray_DescrFromType(type), 0, 0, 0, NULL);
+ PyArray_DescrFromType(type), 0, 0, 0, NULL)
#define PyArray_FROM_OTF(m, type, flags) \
PyArray_FromAny(m, PyArray_DescrFromType(type), 0, 0, \
diff --git a/numpy/core/include/numpy/npy_common.h b/numpy/core/include/numpy/npy_common.h
index eff5dd339..47ef94c92 100644
--- a/numpy/core/include/numpy/npy_common.h
+++ b/numpy/core/include/numpy/npy_common.h
@@ -61,6 +61,21 @@
#define NPY_UNLIKELY(x) (x)
#endif
+#ifdef HAVE___BUILTIN_PREFETCH
+/* unlike _mm_prefetch also works on non-x86 */
+#define NPY_PREFETCH(x, rw, loc) __builtin_prefetch((x), (rw), (loc))
+#else
+#ifdef HAVE__MM_PREFETCH
+/* _MM_HINT_ET[01] (rw = 1) unsupported, only available in gcc >= 4.9 */
+#define NPY_PREFETCH(x, rw, loc) _mm_prefetch((x), loc == 0 ? _MM_HINT_NTA : \
+ (loc == 1 ? _MM_HINT_T2 : \
+ (loc == 2 ? _MM_HINT_T1 : \
+ (loc == 3 ? _MM_HINT_T0 : -1))))
+#else
+#define NPY_PREFETCH(x, rw,loc)
+#endif
+#endif
+
#if defined(_MSC_VER)
#define NPY_INLINE __inline
#elif defined(__GNUC__)
diff --git a/numpy/core/numeric.py b/numpy/core/numeric.py
index 5c0e27239..3b442ea78 100644
--- a/numpy/core/numeric.py
+++ b/numpy/core/numeric.py
@@ -41,7 +41,8 @@ __all__ = [
'Inf', 'inf', 'infty', 'Infinity', 'nan', 'NaN', 'False_', 'True_',
'bitwise_not', 'CLIP', 'RAISE', 'WRAP', 'MAXDIMS', 'BUFSIZE',
'ALLOW_THREADS', 'ComplexWarning', 'full', 'full_like', 'matmul',
- 'shares_memory', 'MAY_SHARE_BOUNDS', 'MAY_SHARE_EXACT', 'TooHardError',
+ 'shares_memory', 'may_share_memory', 'MAY_SHARE_BOUNDS', 'MAY_SHARE_EXACT',
+ 'TooHardError',
]
if sys.version_info[0] < 3:
@@ -384,6 +385,7 @@ fromiter = multiarray.fromiter
fromfile = multiarray.fromfile
frombuffer = multiarray.frombuffer
shares_memory = multiarray.shares_memory
+may_share_memory = multiarray.may_share_memory
if sys.version_info[0] < 3:
newbuffer = multiarray.newbuffer
getbuffer = multiarray.getbuffer
@@ -696,8 +698,12 @@ def require(a, dtype=None, requirements=None):
def isfortran(a):
"""
- Returns True if array is arranged in Fortran-order in memory
- and not C-order.
+ Returns True if the array is Fortran contiguous but *not* C contiguous.
+
+ This function is obsolete and, because of changes due to relaxed stride
+ checking, its return value for the same array may differ for versions
+ of Numpy >= 1.10 and previous versions. If you only want to check if an
+ array is Fortran contiguous use ``a.flags.f_contiguous`` instead.
Parameters
----------
@@ -2274,7 +2280,8 @@ def allclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):
True
"""
- return all(isclose(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan))
+ res = all(isclose(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan))
+ return bool(res)
def isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):
"""
diff --git a/numpy/core/records.py b/numpy/core/records.py
index 4ce3fe98a..b07755384 100644
--- a/numpy/core/records.py
+++ b/numpy/core/records.py
@@ -502,6 +502,7 @@ class recarray(ndarray):
# we might also be returning a single element
if isinstance(obj, ndarray):
if obj.dtype.fields:
+ obj = obj.view(recarray)
if issubclass(obj.dtype.type, nt.void):
return obj.view(dtype=(self.dtype.type, obj.dtype))
return obj
diff --git a/numpy/core/setup.py b/numpy/core/setup.py
index aa9e03e06..2e9e277af 100644
--- a/numpy/core/setup.py
+++ b/numpy/core/setup.py
@@ -257,17 +257,10 @@ def check_types(config_cmd, ext, build_dir):
# Expected size (in number of bytes) for each type. This is an
# optimization: those are only hints, and an exhaustive search for the size
# is done if the hints are wrong.
- expected = {}
- expected['short'] = [2]
- expected['int'] = [4]
- expected['long'] = [8, 4]
- expected['float'] = [4]
- expected['double'] = [8]
- expected['long double'] = [16, 12, 8]
- expected['Py_intptr_t'] = [8, 4]
- expected['PY_LONG_LONG'] = [8]
- expected['long long'] = [8]
- expected['off_t'] = [8, 4]
+ expected = {'short': [2], 'int': [4], 'long': [8, 4],
+ 'float': [4], 'double': [8], 'long double': [16, 12, 8],
+ 'Py_intptr_t': [8, 4], 'PY_LONG_LONG': [8], 'long long': [8],
+ 'off_t': [8, 4]}
# Check we have the python header (-dev* packages on Linux)
result = config_cmd.check_header('Python.h')
diff --git a/numpy/core/setup_common.py b/numpy/core/setup_common.py
index 68efd1791..d93e475e3 100644
--- a/numpy/core/setup_common.py
+++ b/numpy/core/setup_common.py
@@ -125,7 +125,10 @@ OPTIONAL_INTRINSICS = [("__builtin_isnan", '5.'),
("__builtin_expect", '5, 0'),
("__builtin_mul_overflow", '5, 5, (int*)5'),
("_mm_load_ps", '(float*)0', "xmmintrin.h"), # SSE
+ ("_mm_prefetch", '(float*)0, _MM_HINT_NTA',
+ "xmmintrin.h"), # SSE
("_mm_load_pd", '(double*)0', "emmintrin.h"), # SSE2
+ ("__builtin_prefetch", "(float*)0, 0, 3"),
]
# function attributes
diff --git a/numpy/core/src/multiarray/buffer.c b/numpy/core/src/multiarray/buffer.c
index 7f7607e1f..5fa3ba95b 100644
--- a/numpy/core/src/multiarray/buffer.c
+++ b/numpy/core/src/multiarray/buffer.c
@@ -629,8 +629,6 @@ array_getbuffer(PyObject *obj, Py_buffer *view, int flags)
{
PyArrayObject *self;
_buffer_info_t *info = NULL;
- int i;
- Py_ssize_t sd;
self = (PyArrayObject*)obj;
@@ -715,15 +713,19 @@ array_getbuffer(PyObject *obj, Py_buffer *view, int flags)
* regenerate strides from shape.
*/
if (PyArray_CHKFLAGS(self, NPY_ARRAY_C_CONTIGUOUS) &&
- !((flags & PyBUF_F_CONTIGUOUS) == PyBUF_F_CONTIGUOUS)) {
- sd = view->itemsize;
+ !((flags & PyBUF_F_CONTIGUOUS) == PyBUF_F_CONTIGUOUS)) {
+ Py_ssize_t sd = view->itemsize;
+ int i;
+
for (i = view->ndim-1; i >= 0; --i) {
view->strides[i] = sd;
sd *= view->shape[i];
}
}
else if (PyArray_CHKFLAGS(self, NPY_ARRAY_F_CONTIGUOUS)) {
- sd = view->itemsize;
+ Py_ssize_t sd = view->itemsize;
+ int i;
+
for (i = 0; i < view->ndim; ++i) {
view->strides[i] = sd;
sd *= view->shape[i];
diff --git a/numpy/core/src/multiarray/compiled_base.c b/numpy/core/src/multiarray/compiled_base.c
index 8ffeedac2..b9db3bb8f 100644
--- a/numpy/core/src/multiarray/compiled_base.c
+++ b/numpy/core/src/multiarray/compiled_base.c
@@ -529,14 +529,15 @@ binary_search_with_guess(const npy_double key, const npy_double *arr,
}
/*
- * It would seem that for the following code to work, 'len' should
- * at least be 4. But because of the way 'guess' is normalized, it
- * will always be set to 1 if len <= 4. Given that, and that keys
- * outside of the 'arr' bounds have already been handled, and the
- * order in which comparisons happen below, it should become obvious
- * that it will work with any array of at least 2 items.
+ * If len <= 4 use linear search.
+ * From above we know key >= arr[0] when we start.
*/
- assert (len >= 2);
+ if (len <= 4) {
+ npy_intp i;
+
+ for (i = 1; i < len && key >= arr[i]; ++i);
+ return i - 1;
+ }
if (guess > len - 3) {
guess = len - 3;
@@ -546,36 +547,36 @@ binary_search_with_guess(const npy_double key, const npy_double *arr,
}
/* check most likely values: guess - 1, guess, guess + 1 */
- if (key <= arr[guess]) {
- if (key <= arr[guess - 1]) {
+ if (key < arr[guess]) {
+ if (key < arr[guess - 1]) {
imax = guess - 1;
/* last attempt to restrict search to items in cache */
if (guess > LIKELY_IN_CACHE_SIZE &&
- key > arr[guess - LIKELY_IN_CACHE_SIZE]) {
+ key >= arr[guess - LIKELY_IN_CACHE_SIZE]) {
imin = guess - LIKELY_IN_CACHE_SIZE;
}
}
else {
- /* key > arr[guess - 1] */
+ /* key >= arr[guess - 1] */
return guess - 1;
}
}
else {
- /* key > arr[guess] */
- if (key <= arr[guess + 1]) {
+ /* key >= arr[guess] */
+ if (key < arr[guess + 1]) {
return guess;
}
else {
- /* key > arr[guess + 1] */
- if (key <= arr[guess + 2]) {
+ /* key >= arr[guess + 1] */
+ if (key < arr[guess + 2]) {
return guess + 1;
}
else {
- /* key > arr[guess + 2] */
+ /* key >= arr[guess + 2] */
imin = guess + 2;
/* last attempt to restrict search to items in cache */
if (guess < len - LIKELY_IN_CACHE_SIZE - 1 &&
- key <= arr[guess + LIKELY_IN_CACHE_SIZE]) {
+ key < arr[guess + LIKELY_IN_CACHE_SIZE]) {
imax = guess + LIKELY_IN_CACHE_SIZE;
}
}
@@ -673,7 +674,7 @@ arr_interp(PyObject *NPY_UNUSED(self), PyObject *args, PyObject *kwdict)
}
}
- /* binary_search_with_guess needs at least a 2 item long array */
+ /* binary_search_with_guess needs at least a 3 item long array */
if (lenxp == 1) {
const npy_double xp_val = dx[0];
const npy_double fp_val = dy[0];
diff --git a/numpy/core/src/multiarray/descriptor.c b/numpy/core/src/multiarray/descriptor.c
index 05397228e..03a4654a0 100644
--- a/numpy/core/src/multiarray/descriptor.c
+++ b/numpy/core/src/multiarray/descriptor.c
@@ -36,6 +36,19 @@ static PyObject *typeDict = NULL; /* Must be explicitly loaded */
static PyArray_Descr *
_use_inherit(PyArray_Descr *type, PyObject *newobj, int *errflag);
+
+/*
+ * Returns value of PyMapping_GetItemString but as a borrowed reference instead
+ * of a new reference.
+ */
+static PyObject *
+Borrowed_PyMapping_GetItemString(PyObject *o, char *key)
+{
+ PyObject *ret = PyMapping_GetItemString(o, key);
+ Py_XDECREF(ret);
+ return ret;
+}
+
/*
* Creates a dtype object from ctypes inputs.
*
@@ -793,11 +806,19 @@ _use_inherit(PyArray_Descr *type, PyObject *newobj, int *errflag)
}
new->elsize = conv->elsize;
if (PyDataType_HASFIELDS(conv)) {
+ Py_XDECREF(new->fields);
new->fields = conv->fields;
Py_XINCREF(new->fields);
+
+ Py_XDECREF(new->names);
new->names = conv->names;
Py_XINCREF(new->names);
}
+ if (conv->metadata != NULL) {
+ Py_XDECREF(new->metadata);
+ new->metadata = conv->metadata;
+ Py_XINCREF(new->metadata);
+ }
new->flags = conv->flags;
Py_DECREF(conv);
*errflag = 0;
@@ -952,17 +973,19 @@ _convert_from_dict(PyObject *obj, int align)
if (fields == NULL) {
return (PyArray_Descr *)PyErr_NoMemory();
}
- /* Use PyMapping_GetItemString to support dictproxy objects as well */
- names = PyMapping_GetItemString(obj, "names");
- descrs = PyMapping_GetItemString(obj, "formats");
+ /*
+ * Use PyMapping_GetItemString to support dictproxy objects as well.
+ */
+ names = Borrowed_PyMapping_GetItemString(obj, "names");
+ descrs = Borrowed_PyMapping_GetItemString(obj, "formats");
if (!names || !descrs) {
Py_DECREF(fields);
PyErr_Clear();
return _use_fields_dict(obj, align);
}
n = PyObject_Length(names);
- offsets = PyMapping_GetItemString(obj, "offsets");
- titles = PyMapping_GetItemString(obj, "titles");
+ offsets = Borrowed_PyMapping_GetItemString(obj, "offsets");
+ titles = Borrowed_PyMapping_GetItemString(obj, "titles");
if (!offsets || !titles) {
PyErr_Clear();
}
@@ -980,7 +1003,7 @@ _convert_from_dict(PyObject *obj, int align)
* If a property 'aligned' is in the dict, it overrides the align flag
* to be True if it not already true.
*/
- tmp = PyMapping_GetItemString(obj, "aligned");
+ tmp = Borrowed_PyMapping_GetItemString(obj, "aligned");
if (tmp == NULL) {
PyErr_Clear();
} else {
@@ -1154,7 +1177,7 @@ _convert_from_dict(PyObject *obj, int align)
}
/* Override the itemsize if provided */
- tmp = PyMapping_GetItemString(obj, "itemsize");
+ tmp = Borrowed_PyMapping_GetItemString(obj, "itemsize");
if (tmp == NULL) {
PyErr_Clear();
} else {
@@ -1186,7 +1209,7 @@ _convert_from_dict(PyObject *obj, int align)
}
/* Add the metadata if provided */
- metadata = PyMapping_GetItemString(obj, "metadata");
+ metadata = Borrowed_PyMapping_GetItemString(obj, "metadata");
if (metadata == NULL) {
PyErr_Clear();
diff --git a/numpy/core/src/multiarray/getset.c b/numpy/core/src/multiarray/getset.c
index 549ea333a..c2a88e3b9 100644
--- a/numpy/core/src/multiarray/getset.c
+++ b/numpy/core/src/multiarray/getset.c
@@ -488,11 +488,25 @@ array_descr_set(PyArrayObject *self, PyObject *arg)
if ((newtype->elsize != PyArray_DESCR(self)->elsize) &&
- (PyArray_NDIM(self) == 0 || !PyArray_ISONESEGMENT(self) ||
- PyDataType_HASSUBARRAY(newtype))) {
+ (PyArray_NDIM(self) == 0 ||
+ !PyArray_ISONESEGMENT(self) ||
+ PyDataType_HASSUBARRAY(newtype))) {
goto fail;
}
- if (PyArray_ISCONTIGUOUS(self)) {
+
+ /* Deprecate not C contiguous and a dimension changes */
+ if (newtype->elsize != PyArray_DESCR(self)->elsize &&
+ !PyArray_IS_C_CONTIGUOUS(self)) {
+ /* 11/27/2015 1.11.0 */
+ if (DEPRECATE("Changing the shape of non-C contiguous array by\n"
+ "descriptor assignment is deprecated. To maintain\n"
+ "the Fortran contiguity of a multidimensional Fortran\n"
+ "array, use 'a.T.view(...).T' instead") < 0) {
+ return -1;
+ }
+ }
+
+ if (PyArray_IS_C_CONTIGUOUS(self)) {
i = PyArray_NDIM(self) - 1;
}
else {
diff --git a/numpy/core/src/multiarray/mapping.c b/numpy/core/src/multiarray/mapping.c
index 44de1cbf2..6c56d77bb 100644
--- a/numpy/core/src/multiarray/mapping.c
+++ b/numpy/core/src/multiarray/mapping.c
@@ -169,7 +169,8 @@ prepare_index(PyArrayObject *self, PyObject *index,
int new_ndim, fancy_ndim, used_ndim, index_ndim;
int curr_idx, get_idx;
- npy_intp i, n;
+ int i;
+ npy_intp n;
npy_bool make_tuple = 0;
PyObject *obj = NULL;
@@ -348,14 +349,15 @@ prepare_index(PyArrayObject *self, PyObject *index,
#else
if (PyLong_CheckExact(obj) || !PyArray_Check(obj)) {
#endif
- i = PyArray_PyIntAsIntp(obj);
- if ((i == -1) && PyErr_Occurred()) {
+ npy_intp ind = PyArray_PyIntAsIntp(obj);
+
+ if ((ind == -1) && PyErr_Occurred()) {
PyErr_Clear();
}
else {
index_type |= HAS_INTEGER;
indices[curr_idx].object = NULL;
- indices[curr_idx].value = i;
+ indices[curr_idx].value = ind;
indices[curr_idx].type = HAS_INTEGER;
used_ndim += 1;
new_ndim += 0;
@@ -527,15 +529,16 @@ prepare_index(PyArrayObject *self, PyObject *index,
* sure that array-likes or odder arrays are always
* handled right.
*/
- i = PyArray_PyIntAsIntp((PyObject *)arr);
+ npy_intp ind = PyArray_PyIntAsIntp((PyObject *)arr);
+
Py_DECREF(arr);
- if ((i == -1) && PyErr_Occurred()) {
+ if ((ind == -1) && PyErr_Occurred()) {
goto failed_building_indices;
}
else {
index_type |= (HAS_INTEGER | HAS_SCALAR_ARRAY);
indices[curr_idx].object = NULL;
- indices[curr_idx].value = i;
+ indices[curr_idx].value = ind;
indices[curr_idx].type = HAS_INTEGER;
used_ndim += 1;
new_ndim += 0;
@@ -1293,7 +1296,7 @@ _get_field_view(PyArrayObject *arr, PyObject *ind, PyArrayObject **view)
PyArray_NDIM(arr),
PyArray_SHAPE(arr),
PyArray_STRIDES(arr),
- PyArray_DATA(arr) + offset,
+ PyArray_BYTES(arr) + offset,
PyArray_FLAGS(arr),
(PyObject *)arr);
if (*view == NULL) {
@@ -2445,8 +2448,8 @@ mapiter_fill_info(PyArrayMapIterObject *mit, npy_index_info *indices,
/* advance curr_dim for non-fancy indices */
else if (indices[i].type == HAS_ELLIPSIS) {
- curr_dim += indices[i].value;
- result_dim += indices[i].value;
+ curr_dim += (int)indices[i].value;
+ result_dim += (int)indices[i].value;
}
else if (indices[i].type != HAS_NEWAXIS){
curr_dim += 1;
@@ -2891,7 +2894,7 @@ PyArray_MapIterNew(npy_index_info *indices , int index_num, int index_type,
stride = extra_op_dtype->elsize;
for (i=PyArray_NDIM(subspace) - 1; i >= 0; i--) {
strides[mit->nd_fancy + strideperm[i].perm] = stride;
- stride *= PyArray_DIM(subspace, strideperm[i].perm);
+ stride *= PyArray_DIM(subspace, (int)strideperm[i].perm);
}
/*
diff --git a/numpy/core/src/multiarray/multiarraymodule.c b/numpy/core/src/multiarray/multiarraymodule.c
index 10c22ae5a..b9d79029e 100644
--- a/numpy/core/src/multiarray/multiarraymodule.c
+++ b/numpy/core/src/multiarray/multiarraymodule.c
@@ -3989,8 +3989,11 @@ test_interrupt(PyObject *NPY_UNUSED(self), PyObject *args)
static PyObject *
-array_shares_memory(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kwds)
+array_shares_memory_impl(PyObject *args, PyObject *kwds, Py_ssize_t default_max_work,
+ int raise_exceptions)
{
+ PyObject * self_obj = NULL;
+ PyObject * other_obj = NULL;
PyArrayObject * self = NULL;
PyArrayObject * other = NULL;
PyObject *max_work_obj = NULL;
@@ -3998,16 +4001,40 @@ array_shares_memory(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kwd
mem_overlap_t result;
static PyObject *too_hard_cls = NULL;
- Py_ssize_t max_work = NPY_MAY_SHARE_EXACT;
+ Py_ssize_t max_work;
NPY_BEGIN_THREADS_DEF;
- if (!PyArg_ParseTupleAndKeywords(args, kwds, "O&O&|O", kwlist,
- PyArray_Converter, &self,
- PyArray_Converter, &other,
- &max_work_obj)) {
+ max_work = default_max_work;
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "OO|O", kwlist,
+ &self_obj, &other_obj, &max_work_obj)) {
return NULL;
}
+ if (PyArray_Check(self_obj)) {
+ self = (PyArrayObject*)self_obj;
+ Py_INCREF(self);
+ }
+ else {
+ /* Use FromAny to enable checking overlap for objects exposing array
+ interfaces etc. */
+ self = (PyArrayObject*)PyArray_FromAny(self_obj, NULL, 0, 0, 0, NULL);
+ if (self == NULL) {
+ goto fail;
+ }
+ }
+
+ if (PyArray_Check(other_obj)) {
+ other = (PyArrayObject*)other_obj;
+ Py_INCREF(other);
+ }
+ else {
+ other = (PyArrayObject*)PyArray_FromAny(other_obj, NULL, 0, 0, 0, NULL);
+ if (other == NULL) {
+ goto fail;
+ }
+ }
+
if (max_work_obj == NULL || max_work_obj == Py_None) {
/* noop */
}
@@ -4043,17 +4070,29 @@ array_shares_memory(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kwd
Py_RETURN_TRUE;
}
else if (result == MEM_OVERLAP_OVERFLOW) {
- PyErr_SetString(PyExc_OverflowError,
- "Integer overflow in computing overlap");
- return NULL;
+ if (raise_exceptions) {
+ PyErr_SetString(PyExc_OverflowError,
+ "Integer overflow in computing overlap");
+ return NULL;
+ }
+ else {
+ /* Don't know, so say yes */
+ Py_RETURN_TRUE;
+ }
}
else if (result == MEM_OVERLAP_TOO_HARD) {
- npy_cache_import("numpy.core._internal", "TooHardError",
- &too_hard_cls);
- if (too_hard_cls) {
- PyErr_SetString(too_hard_cls, "Exceeded max_work");
+ if (raise_exceptions) {
+ npy_cache_import("numpy.core._internal", "TooHardError",
+ &too_hard_cls);
+ if (too_hard_cls) {
+ PyErr_SetString(too_hard_cls, "Exceeded max_work");
+ }
+ return NULL;
+ }
+ else {
+ /* Don't know, so say yes */
+ Py_RETURN_TRUE;
}
- return NULL;
}
else {
/* Doesn't happen usually */
@@ -4069,6 +4108,20 @@ fail:
}
+static PyObject *
+array_shares_memory(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kwds)
+{
+ return array_shares_memory_impl(args, kwds, NPY_MAY_SHARE_EXACT, 1);
+}
+
+
+static PyObject *
+array_may_share_memory(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kwds)
+{
+ return array_shares_memory_impl(args, kwds, NPY_MAY_SHARE_BOUNDS, 0);
+}
+
+
static struct PyMethodDef array_module_methods[] = {
{"_get_ndarray_c_version",
(PyCFunction)array__get_ndarray_c_version,
@@ -4178,6 +4231,9 @@ static struct PyMethodDef array_module_methods[] = {
{"shares_memory",
(PyCFunction)array_shares_memory,
METH_VARARGS | METH_KEYWORDS, NULL},
+ {"may_share_memory",
+ (PyCFunction)array_may_share_memory,
+ METH_VARARGS | METH_KEYWORDS, NULL},
/* Datetime-related functions */
{"datetime_data",
(PyCFunction)array_datetime_data,
diff --git a/numpy/core/src/private/mem_overlap.c b/numpy/core/src/private/mem_overlap.c
index 3cab83497..b2b80b4e6 100644
--- a/numpy/core/src/private/mem_overlap.c
+++ b/numpy/core/src/private/mem_overlap.c
@@ -479,6 +479,7 @@ NPY_VISIBILITY_HIDDEN mem_overlap_t
solve_diophantine(unsigned int n, diophantine_term_t *E, npy_int64 b,
Py_ssize_t max_work, int require_ub_nontrivial, npy_int64 *x)
{
+ mem_overlap_t res;
unsigned int j;
for (j = 0; j < n; ++j) {
@@ -535,15 +536,27 @@ solve_diophantine(unsigned int n, diophantine_term_t *E, npy_int64 b,
return MEM_OVERLAP_NO;
}
else {
- diophantine_term_t Ep[n];
- npy_int64 Epsilon[n], Gamma[n];
Py_ssize_t count = 0;
+ diophantine_term_t *Ep = NULL;
+ npy_int64 *Epsilon = NULL, *Gamma = NULL;
- if (diophantine_precompute(n, E, Ep, Gamma, Epsilon)) {
- return MEM_OVERLAP_OVERFLOW;
+ Ep = malloc(n * sizeof(diophantine_term_t));
+ Epsilon = malloc(n * sizeof(npy_int64));
+ Gamma = malloc(n * sizeof(npy_int64));
+ if (Ep == NULL || Epsilon == NULL || Gamma == NULL) {
+ res = MEM_OVERLAP_ERROR;
+ }
+ else if (diophantine_precompute(n, E, Ep, Gamma, Epsilon)) {
+ res = MEM_OVERLAP_OVERFLOW;
+ }
+ else {
+ res = diophantine_dfs(n, n-1, E, Ep, Gamma, Epsilon, b, max_work,
+ require_ub_nontrivial, x, &count);
}
- return diophantine_dfs(n, n-1, E, Ep, Gamma, Epsilon, b, max_work,
- require_ub_nontrivial, x, &count);
+ free(Ep);
+ free(Gamma);
+ free(Epsilon);
+ return res;
}
}
diff --git a/numpy/core/src/umath/loops.c.src b/numpy/core/src/umath/loops.c.src
index 854c1e17a..aff6180c7 100644
--- a/numpy/core/src/umath/loops.c.src
+++ b/numpy/core/src/umath/loops.c.src
@@ -1444,6 +1444,8 @@ pairwise_sum_@TYPE@(@dtype@ *a, npy_uintp n, npy_intp stride)
r[7] = @trf@(a[7 * stride]);
for (i = 8; i < n - (n % 8); i += 8) {
+ /* small blocksizes seems to mess with hardware prefetch */
+ NPY_PREFETCH(&a[(i + 512 / sizeof(a[0])) * stride], 0, 3);
r[0] += @trf@(a[(i + 0) * stride]);
r[1] += @trf@(a[(i + 1) * stride]);
r[2] += @trf@(a[(i + 2) * stride]);
@@ -2190,6 +2192,8 @@ pairwise_sum_@TYPE@(@ftype@ *rr, @ftype@ * ri, @ftype@ * a, npy_uintp n,
r[7] = a[6 * stride + 1];
for (i = 8; i < n - (n % 8); i += 8) {
+ /* small blocksizes seems to mess with hardware prefetch */
+ NPY_PREFETCH(&a[(i + 512 / sizeof(a[0])) * stride], 0, 3);
r[0] += a[(i + 0) * stride];
r[1] += a[(i + 0) * stride + 1];
r[2] += a[(i + 2) * stride];
diff --git a/numpy/core/tests/test_deprecations.py b/numpy/core/tests/test_deprecations.py
index e2542195f..8f7e55d91 100644
--- a/numpy/core/tests/test_deprecations.py
+++ b/numpy/core/tests/test_deprecations.py
@@ -375,7 +375,7 @@ class TestBooleanIndexShapeMismatchDeprecation():
arr.__getitem__, (slice(None), index))
-class TestFullDefaultDtype:
+class TestFullDefaultDtype(object):
"""np.full defaults to float when dtype is not set. In the future, it will
use the fill value's dtype.
"""
@@ -386,5 +386,19 @@ class TestFullDefaultDtype:
assert_no_warnings(np.full, 1, 1, float)
+class TestNonCContiguousViewDeprecation(_DeprecationTestCase):
+ """View of non-C-contiguous arrays deprecated in 1.11.0.
+
+ The deprecation will not be raised for arrays that are both C and F
+ contiguous, as C contiguous is dominant. There are more such arrays
+ with relaxed stride checking than without so the deprecation is not
+ as visible with relaxed stride checking in force.
+ """
+
+ def test_fortran_contiguous(self):
+ self.assert_deprecated(np.ones((2,2)).T.view, args=(np.complex,))
+ self.assert_deprecated(np.ones((2,2)).T.view, args=(np.int8,))
+
+
if __name__ == "__main__":
run_module_suite()
diff --git a/numpy/core/tests/test_dtype.py b/numpy/core/tests/test_dtype.py
index 29f2ee7bd..6d898eaa1 100644
--- a/numpy/core/tests/test_dtype.py
+++ b/numpy/core/tests/test_dtype.py
@@ -408,6 +408,10 @@ class TestMetadata(TestCase):
d = np.dtype([('a', np.dtype(int, metadata={'datum': 1}))])
self.assertEqual(d['a'].metadata, {'datum': 1})
+ def base_metadata_copied(self):
+ d = np.dtype((np.void, np.dtype('i4,i4', metadata={'datum': 1})))
+ assert_equal(d.metadata, {'datum': 1})
+
class TestString(TestCase):
def test_complex_dtype_str(self):
dt = np.dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)),
diff --git a/numpy/core/tests/test_item_selection.py b/numpy/core/tests/test_item_selection.py
index 5e9cadd8f..ddce20fe9 100644
--- a/numpy/core/tests/test_item_selection.py
+++ b/numpy/core/tests/test_item_selection.py
@@ -18,10 +18,9 @@ class TestTake(TestCase):
index_arrays = [np.empty(0, dtype=np.intp),
np.empty(tuple(), dtype=np.intp),
np.empty((1, 1), dtype=np.intp)]
- real_indices = {}
- real_indices['raise'] = {-1:1, 4:IndexError}
- real_indices['wrap'] = {-1:1, 4:0}
- real_indices['clip'] = {-1:0, 4:1}
+ real_indices = {'raise': {-1: 1, 4: IndexError},
+ 'wrap': {-1: 1, 4: 0},
+ 'clip': {-1: 0, 4: 1}}
# Currently all types but object, use the same function generation.
# So it should not be necessary to test all. However test also a non
# refcounted struct on top of object.
diff --git a/numpy/core/tests/test_mem_overlap.py b/numpy/core/tests/test_mem_overlap.py
index 728cc675d..8d39fa4c0 100644
--- a/numpy/core/tests/test_mem_overlap.py
+++ b/numpy/core/tests/test_mem_overlap.py
@@ -482,5 +482,33 @@ def test_internal_overlap_fuzz():
no_overlap += 1
+def test_non_ndarray_inputs():
+ # Regression check for gh-5604
+
+ class MyArray(object):
+ def __init__(self, data):
+ self.data = data
+
+ @property
+ def __array_interface__(self):
+ return self.data.__array_interface__
+
+ class MyArray2(object):
+ def __init__(self, data):
+ self.data = data
+
+ def __array__(self):
+ return self.data
+
+ for cls in [MyArray, MyArray2]:
+ x = np.arange(5)
+
+ assert_(np.may_share_memory(cls(x[::2]), x[1::2]))
+ assert_(not np.shares_memory(cls(x[::2]), x[1::2]))
+
+ assert_(np.shares_memory(cls(x[1::3]), x[::2]))
+ assert_(np.may_share_memory(cls(x[1::3]), x[::2]))
+
+
if __name__ == "__main__":
run_module_suite()
diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py
index 15dd9302c..693847273 100644
--- a/numpy/core/tests/test_multiarray.py
+++ b/numpy/core/tests/test_multiarray.py
@@ -17,7 +17,6 @@ from decimal import Decimal
import numpy as np
-from nose import SkipTest
from numpy.compat import asbytes, getexception, strchar, unicode, sixu
from test_print import in_foreign_locale
from numpy.core.multiarray_tests import (
@@ -29,7 +28,7 @@ from numpy.testing import (
TestCase, run_module_suite, assert_, assert_raises,
assert_equal, assert_almost_equal, assert_array_equal,
assert_array_almost_equal, assert_allclose,
- assert_array_less, runstring, dec
+ assert_array_less, runstring, dec, SkipTest
)
# Need to test an object that does not fully implement math interface
diff --git a/numpy/core/tests/test_numeric.py b/numpy/core/tests/test_numeric.py
index 2fa8593b9..43dad42f1 100644
--- a/numpy/core/tests/test_numeric.py
+++ b/numpy/core/tests/test_numeric.py
@@ -30,7 +30,15 @@ class TestResize(TestCase):
def test_zeroresize(self):
A = np.array([[1, 2], [3, 4]])
Ar = np.resize(A, (0,))
- assert_equal(Ar, np.array([]))
+ assert_array_equal(Ar, np.array([]))
+ assert_equal(A.dtype, Ar.dtype)
+
+ def test_reshape_from_zero(self):
+ # See also gh-6740
+ A = np.zeros(0, dtype=[('a', np.float32, 1)])
+ Ar = np.resize(A, (2, 1))
+ assert_array_equal(Ar, np.zeros((2, 1), Ar.dtype))
+ assert_equal(A.dtype, Ar.dtype)
class TestNonarrayArgs(TestCase):
@@ -1471,6 +1479,16 @@ class TestAllclose(object):
x = np.array([1.0, np.nan])
assert_(np.allclose(x, x, equal_nan=True))
+ def test_return_class_is_ndarray(self):
+ # Issue gh-6475
+ # Check that allclose does not preserve subtypes
+ class Foo(np.ndarray):
+ def __new__(cls, *args, **kwargs):
+ return np.array(*args, **kwargs).view(cls)
+
+ a = Foo([1])
+ assert_(type(np.allclose(a, a)) is bool)
+
class TestIsclose(object):
rtol = 1e-5
diff --git a/numpy/core/tests/test_print.py b/numpy/core/tests/test_print.py
index f595cbe44..6234b641e 100644
--- a/numpy/core/tests/test_print.py
+++ b/numpy/core/tests/test_print.py
@@ -6,7 +6,7 @@ import nose
import numpy as np
from numpy.testing import (
- run_module_suite, assert_, assert_equal
+ run_module_suite, assert_, assert_equal, SkipTest
)
@@ -207,7 +207,7 @@ def test_scalar_format():
def in_foreign_locale(func):
"""
Swap LC_NUMERIC locale to one in which the decimal point is ',' and not '.'
- If not possible, raise nose.SkipTest
+ If not possible, raise SkipTest
"""
if sys.platform == 'win32':
@@ -225,8 +225,8 @@ def in_foreign_locale(func):
except locale.Error:
pass
else:
- raise nose.SkipTest("Skipping locale test, because "
- "French locale not found")
+ raise SkipTest("Skipping locale test, because "
+ "French locale not found")
return func(*args, **kwargs)
finally:
locale.setlocale(locale.LC_NUMERIC, locale=curloc)
diff --git a/numpy/core/tests/test_records.py b/numpy/core/tests/test_records.py
index 290bc4fa7..e0f0a3a8f 100644
--- a/numpy/core/tests/test_records.py
+++ b/numpy/core/tests/test_records.py
@@ -121,6 +121,14 @@ class TestFromrecords(TestCase):
assert_equal(type(rv), np.recarray)
assert_equal(rv.dtype.type, np.record)
+ #check that getitem also preserves np.recarray and np.record
+ r = np.rec.array(np.ones(4, dtype=[('a', 'i4'), ('b', 'i4'),
+ ('c', 'i4,i4')]))
+ assert_equal(r['c'].dtype.type, np.record)
+ assert_equal(type(r['c']), np.recarray)
+ assert_equal(r[['a', 'b']].dtype.type, np.record)
+ assert_equal(type(r[['a', 'b']]), np.recarray)
+
# check that accessing nested structures keep record type, but
# not for subarrays, non-void structures, non-structured voids
test_dtype = [('a', 'f4,f4'), ('b', 'V8'), ('c', ('f4',2)),
diff --git a/numpy/core/tests/test_scalarinherit.py b/numpy/core/tests/test_scalarinherit.py
index a2ca3e458..d8fd0acc3 100644
--- a/numpy/core/tests/test_scalarinherit.py
+++ b/numpy/core/tests/test_scalarinherit.py
@@ -2,6 +2,7 @@
""" Test printing of scalar types.
"""
+from __future__ import division, absolute_import, print_function
import numpy as np
from numpy.testing import TestCase, run_module_suite
diff --git a/numpy/core/tests/test_umath.py b/numpy/core/tests/test_umath.py
index ebf8e0380..2ba988b87 100644
--- a/numpy/core/tests/test_umath.py
+++ b/numpy/core/tests/test_umath.py
@@ -1836,16 +1836,14 @@ def test_spacing_gfortran():
# WRITE(*,*) spacing(1000._SGL)
# WRITE(*,*) spacing(10500._SGL)
# END PROGRAM
- ref = {}
- ref[np.float64] = [1.69406589450860068E-021,
- 2.22044604925031308E-016,
- 1.13686837721616030E-013,
- 1.81898940354585648E-012]
- ref[np.float32] = [
- 9.09494702E-13,
- 1.19209290E-07,
- 6.10351563E-05,
- 9.76562500E-04]
+ ref = {np.float64: [1.69406589450860068E-021,
+ 2.22044604925031308E-016,
+ 1.13686837721616030E-013,
+ 1.81898940354585648E-012],
+ np.float32: [9.09494702E-13,
+ 1.19209290E-07,
+ 6.10351563E-05,
+ 9.76562500E-04]}
for dt, dec_ in zip([np.float32, np.float64], (10, 20)):
x = np.array([1e-5, 1, 1000, 10500], dtype=dt)
@@ -1928,5 +1926,15 @@ def test_complex_nan_comparisons():
assert_equal(x == y, False, err_msg="%r == %r" % (x, y))
+def test_rint_big_int():
+ # np.rint bug for large integer values on Windows 32-bit and MKL
+ # https://github.com/numpy/numpy/issues/6685
+ val = 4607998452777363968
+ # This is exactly representable in floating point
+ assert_equal(val, int(float(val)))
+ # Rint should not change the value
+ assert_equal(val, np.rint(val))
+
+
if __name__ == "__main__":
run_module_suite()
diff --git a/numpy/distutils/mingw32ccompiler.py b/numpy/distutils/mingw32ccompiler.py
index d22a2818e..111653a82 100644
--- a/numpy/distutils/mingw32ccompiler.py
+++ b/numpy/distutils/mingw32ccompiler.py
@@ -244,9 +244,7 @@ def find_python_dll():
# - find it in python main dir
# - in system32,
# - ortherwise (Sxs), I don't know how to get it.
- lib_dirs = []
- lib_dirs.append(sys.prefix)
- lib_dirs.append(os.path.join(sys.prefix, 'lib'))
+ lib_dirs = [sys.prefix, os.path.join(sys.prefix, 'lib')]
try:
lib_dirs.append(os.path.join(os.environ['SYSTEMROOT'], 'system32'))
except KeyError:
diff --git a/numpy/distutils/misc_util.py b/numpy/distutils/misc_util.py
index 75d864c5a..345e60f26 100644
--- a/numpy/distutils/misc_util.py
+++ b/numpy/distutils/misc_util.py
@@ -18,6 +18,20 @@ try:
except ImportError:
from dummy_threading import local as tlocal
+# stores temporary directory of each thread to only create one per thread
+_tdata = tlocal()
+
+# store all created temporary directories so they can be deleted on exit
+_tmpdirs = []
+def clean_up_temporary_directory():
+ for d in _tmpdirs:
+ try:
+ shutil.rmtree(d)
+ except OSError:
+ pass
+
+atexit.register(clean_up_temporary_directory)
+
try:
set
except NameError:
@@ -283,26 +297,13 @@ def gpaths(paths, local_path='', include_non_existing=True):
paths = (paths,)
return _fix_paths(paths, local_path, include_non_existing)
-
-def clean_up_temporary_directory():
- tdata = tlocal()
- _temporary_directory = getattr(tdata, 'tempdir', None)
- if not _temporary_directory:
- return
- try:
- shutil.rmtree(_temporary_directory)
- except OSError:
- pass
- _temporary_directory = None
-
def make_temp_file(suffix='', prefix='', text=True):
- tdata = tlocal()
- if not hasattr(tdata, 'tempdir'):
- tdata.tempdir = tempfile.mkdtemp()
- atexit.register(clean_up_temporary_directory)
+ if not hasattr(_tdata, 'tempdir'):
+ _tdata.tempdir = tempfile.mkdtemp()
+ _tmpdirs.append(_tdata.tempdir)
fid, name = tempfile.mkstemp(suffix=suffix,
prefix=prefix,
- dir=tdata.tempdir,
+ dir=_tdata.tempdir,
text=text)
fo = os.fdopen(fid, 'w')
return fo, name
diff --git a/numpy/distutils/msvc9compiler.py b/numpy/distutils/msvc9compiler.py
index 636165bd5..c53f45531 100644
--- a/numpy/distutils/msvc9compiler.py
+++ b/numpy/distutils/msvc9compiler.py
@@ -1,3 +1,5 @@
+from __future__ import division, absolute_import, print_function
+
import os
import distutils.msvc9compiler
from distutils.msvc9compiler import *
diff --git a/numpy/distutils/msvccompiler.py b/numpy/distutils/msvccompiler.py
index 4c3658d5c..78a386d5d 100644
--- a/numpy/distutils/msvccompiler.py
+++ b/numpy/distutils/msvccompiler.py
@@ -1,3 +1,5 @@
+from __future__ import division, absolute_import, print_function
+
import os
import distutils.msvccompiler
from distutils.msvccompiler import *
diff --git a/numpy/distutils/npy_pkg_config.py b/numpy/distutils/npy_pkg_config.py
index 6156439e1..1c801fd9c 100644
--- a/numpy/distutils/npy_pkg_config.py
+++ b/numpy/distutils/npy_pkg_config.py
@@ -141,8 +141,7 @@ class LibraryInfo(object):
return _escape_backslash(val)
def __str__(self):
- m = ['Name: %s' % self.name]
- m.append('Description: %s' % self.description)
+ m = ['Name: %s' % self.name, 'Description: %s' % self.description]
if self.requires:
m.append('Requires:')
else:
diff --git a/numpy/distutils/system_info.py b/numpy/distutils/system_info.py
index 0da13a7df..94436243e 100644
--- a/numpy/distutils/system_info.py
+++ b/numpy/distutils/system_info.py
@@ -470,15 +470,13 @@ class system_info(object):
):
self.__class__.info = {}
self.local_prefixes = []
- defaults = {}
- defaults['library_dirs'] = os.pathsep.join(default_lib_dirs)
- defaults['include_dirs'] = os.pathsep.join(default_include_dirs)
- defaults['runtime_library_dirs'] = os.pathsep.join(default_runtime_dirs)
- defaults['rpath'] = ''
- defaults['src_dirs'] = os.pathsep.join(default_src_dirs)
- defaults['search_static_first'] = str(self.search_static_first)
- defaults['extra_compile_args'] = ''
- defaults['extra_link_args'] = ''
+ defaults = {'library_dirs': os.pathsep.join(default_lib_dirs),
+ 'include_dirs': os.pathsep.join(default_include_dirs),
+ 'runtime_library_dirs': os.pathsep.join(default_runtime_dirs),
+ 'rpath': '',
+ 'src_dirs': os.pathsep.join(default_src_dirs),
+ 'search_static_first': str(self.search_static_first),
+ 'extra_compile_args': '', 'extra_link_args': ''}
self.cp = ConfigParser(defaults)
self.files = []
self.files.extend(get_standard_file('.numpy-site.cfg'))
@@ -1680,9 +1678,64 @@ class blas_info(system_info):
info = self.check_libs(lib_dirs, blas_libs, [])
if info is None:
return
- info['language'] = 'f77' # XXX: is it generally true?
+ if platform.system() == 'Windows':
+ # The check for windows is needed because has_cblas uses the
+ # same compiler that was used to compile Python and msvc is
+ # often not installed when mingw is being used. This rough
+ # treatment is not desirable, but windows is tricky.
+ info['language'] = 'f77' # XXX: is it generally true?
+ else:
+ lib = self.has_cblas(info)
+ if lib is not None:
+ info['language'] = 'c'
+ info['libraries'] = [lib]
+ info['define_macros'] = [('HAVE_CBLAS', None)]
self.set_info(**info)
+ def has_cblas(self, info):
+ # primitive cblas check by looking for the header and trying to link
+ # cblas or blas
+ res = False
+ c = distutils.ccompiler.new_compiler()
+ tmpdir = tempfile.mkdtemp()
+ s = """#include <cblas.h>
+ int main(int argc, const char *argv[])
+ {
+ double a[4] = {1,2,3,4};
+ double b[4] = {5,6,7,8};
+ return cblas_ddot(4, a, 1, b, 1) > 10;
+ }"""
+ src = os.path.join(tmpdir, 'source.c')
+ try:
+ with open(src, 'wt') as f:
+ f.write(s)
+
+ try:
+ # check we can compile (find headers)
+ obj = c.compile([src], output_dir=tmpdir,
+ include_dirs=self.get_include_dirs())
+
+ # check we can link (find library)
+ # some systems have separate cblas and blas libs. First
+ # check for cblas lib, and if not present check for blas lib.
+ try:
+ c.link_executable(obj, os.path.join(tmpdir, "a.out"),
+ libraries=["cblas"],
+ library_dirs=info['library_dirs'],
+ extra_postargs=info.get('extra_link_args', []))
+ res = "cblas"
+ except distutils.ccompiler.LinkError:
+ c.link_executable(obj, os.path.join(tmpdir, "a.out"),
+ libraries=["blas"],
+ library_dirs=info['library_dirs'],
+ extra_postargs=info.get('extra_link_args', []))
+ res = "blas"
+ except distutils.ccompiler.CompileError:
+ res = None
+ finally:
+ shutil.rmtree(tmpdir)
+ return res
+
class openblas_info(blas_info):
section = 'openblas'
diff --git a/numpy/distutils/tests/test_system_info.py b/numpy/distutils/tests/test_system_info.py
index f9d45319e..58ad05a59 100644
--- a/numpy/distutils/tests/test_system_info.py
+++ b/numpy/distutils/tests/test_system_info.py
@@ -64,15 +64,14 @@ class test_system_info(system_info):
):
self.__class__.info = {}
self.local_prefixes = []
- defaults = {}
- defaults['library_dirs'] = ''
- defaults['include_dirs'] = ''
- defaults['runtime_library_dirs'] = ''
- defaults['rpath'] = ''
- defaults['src_dirs'] = ''
- defaults['search_static_first'] = "0"
- defaults['extra_compile_args'] = ''
- defaults['extra_link_args'] = ''
+ defaults = {'library_dirs': '',
+ 'include_dirs': '',
+ 'runtime_library_dirs': '',
+ 'rpath': '',
+ 'src_dirs': '',
+ 'search_static_first': "0",
+ 'extra_compile_args': '',
+ 'extra_link_args': ''}
self.cp = ConfigParser(defaults)
# We have to parse the config files afterwards
# to have a consistent temporary filepath
diff --git a/numpy/f2py/__main__.py b/numpy/f2py/__main__.py
index 8f6d25619..cb8f261c1 100644
--- a/numpy/f2py/__main__.py
+++ b/numpy/f2py/__main__.py
@@ -1,4 +1,6 @@
# See http://cens.ioc.ee/projects/f2py2e/
+from __future__ import division, print_function
+
import os
import sys
for mode in ["g3-numpy", "2e-numeric", "2e-numarray", "2e-numpy"]:
diff --git a/numpy/f2py/capi_maps.py b/numpy/f2py/capi_maps.py
index 6e5293cc8..5270cabb5 100644
--- a/numpy/f2py/capi_maps.py
+++ b/numpy/f2py/capi_maps.py
@@ -519,8 +519,7 @@ def sign2map(a, var):
if k[:4] == 'out=':
out_a = k[4:]
break
- ret = {'varname': a, 'outvarname': out_a}
- ret['ctype'] = getctype(var)
+ ret = {'varname': a, 'outvarname': out_a, 'ctype': getctype(var)}
intent_flags = []
for f, s in isintent_dict.items():
if f(var):
@@ -823,8 +822,7 @@ void
def common_sign2map(a, var): # obsolute
- ret = {'varname': a}
- ret['ctype'] = getctype(var)
+ ret = {'varname': a, 'ctype': getctype(var)}
if isstringarray(var):
ret['ctype'] = 'char'
if ret['ctype'] in c2capi_map:
diff --git a/numpy/f2py/tests/test_array_from_pyobj.py b/numpy/f2py/tests/test_array_from_pyobj.py
index 9551c099e..48bb7c0f4 100644
--- a/numpy/f2py/tests/test_array_from_pyobj.py
+++ b/numpy/f2py/tests/test_array_from_pyobj.py
@@ -5,13 +5,11 @@ import os
import sys
import copy
-import nose
-
from numpy import (
array, alltrue, ndarray, zeros, dtype, intp, clongdouble
)
from numpy.testing import (
- run_module_suite, assert_, assert_equal
+ run_module_suite, assert_, assert_equal, SkipTest
)
from numpy.core.multiarray import typeinfo
import util
@@ -28,7 +26,7 @@ def setup():
# Check compiler availability first
if not util.has_c_compiler():
- raise nose.SkipTest("No C compiler available")
+ raise SkipTest("No C compiler available")
if wrap is None:
config_code = """
diff --git a/numpy/f2py/tests/util.py b/numpy/f2py/tests/util.py
index 5b4e072e7..8d06d9680 100644
--- a/numpy/f2py/tests/util.py
+++ b/numpy/f2py/tests/util.py
@@ -17,10 +17,9 @@ import textwrap
import re
import random
-import nose
-
from numpy.compat import asbytes, asstr
import numpy.f2py
+from numpy.testing import SkipTest
try:
from hashlib import md5
@@ -334,7 +333,7 @@ class F2PyTest(object):
# Check compiler availability first
if not has_c_compiler():
- raise nose.SkipTest("No C compiler available")
+ raise SkipTest("No C compiler available")
codes = []
if self.sources:
@@ -350,9 +349,9 @@ class F2PyTest(object):
elif fn.endswith('.f90'):
needs_f90 = True
if needs_f77 and not has_f77_compiler():
- raise nose.SkipTest("No Fortran 77 compiler available")
+ raise SkipTest("No Fortran 77 compiler available")
if needs_f90 and not has_f90_compiler():
- raise nose.SkipTest("No Fortran 90 compiler available")
+ raise SkipTest("No Fortran 90 compiler available")
# Build the module
if self.code is not None:
diff --git a/numpy/lib/format.py b/numpy/lib/format.py
index 66a1b356c..a0f2c5497 100644
--- a/numpy/lib/format.py
+++ b/numpy/lib/format.py
@@ -265,8 +265,7 @@ def header_data_from_array_1_0(array):
This has the appropriate entries for writing its string representation
to the header of the file.
"""
- d = {}
- d['shape'] = array.shape
+ d = {'shape': array.shape}
if array.flags.c_contiguous:
d['fortran_order'] = False
elif array.flags.f_contiguous:
diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py
index fef69dff3..9261dba22 100644
--- a/numpy/lib/function_base.py
+++ b/numpy/lib/function_base.py
@@ -336,8 +336,12 @@ def histogram(a, bins=10, range=None, normed=False, weights=None,
if (range is not None):
mn, mx = range
if (mn > mx):
- raise AttributeError(
+ raise ValueError(
'max must be larger than min in range parameter.')
+ if not np.all(np.isfinite([mn, mx])):
+ raise ValueError(
+ 'range parameter must be finite.')
+
if isinstance(bins, basestring):
bins = _hist_optim_numbins_estimator(a, bins)
@@ -422,7 +426,7 @@ def histogram(a, bins=10, range=None, normed=False, weights=None,
else:
bins = asarray(bins)
if (np.diff(bins) < 0).any():
- raise AttributeError(
+ raise ValueError(
'bins must increase monotonically.')
# Initialize empty histogram
@@ -533,7 +537,7 @@ def histogramdd(sample, bins=10, range=None, normed=False, weights=None):
try:
M = len(bins)
if M != D:
- raise AttributeError(
+ raise ValueError(
'The dimension of bins must be equal to the dimension of the '
' sample x.')
except TypeError:
@@ -551,6 +555,9 @@ def histogramdd(sample, bins=10, range=None, normed=False, weights=None):
smin = atleast_1d(array(sample.min(0), float))
smax = atleast_1d(array(sample.max(0), float))
else:
+ if not np.all(np.isfinite(range)):
+ raise ValueError(
+ 'range parameter must be finite.')
smin = zeros(D)
smax = zeros(D)
for i in arange(D):
diff --git a/numpy/lib/polynomial.py b/numpy/lib/polynomial.py
index de9376300..2f677438b 100644
--- a/numpy/lib/polynomial.py
+++ b/numpy/lib/polynomial.py
@@ -427,7 +427,8 @@ def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False):
default) just the coefficients are returned, when True diagnostic
information from the singular value decomposition is also returned.
w : array_like, shape (M,), optional
- weights to apply to the y-coordinates of the sample points.
+ Weights to apply to the y-coordinates of the sample points. For
+ gaussian uncertainties, use 1/sigma (not 1/sigma**2).
cov : bool, optional
Return the estimate and the covariance matrix of the estimate
If full is True, then cov is not returned.
diff --git a/numpy/lib/shape_base.py b/numpy/lib/shape_base.py
index b2beef0a8..ffbe56721 100644
--- a/numpy/lib/shape_base.py
+++ b/numpy/lib/shape_base.py
@@ -421,18 +421,9 @@ def array_split(ary, indices_or_sections, axis=0):
end = div_points[i + 1]
sub_arys.append(_nx.swapaxes(sary[st:end], axis, 0))
- # This "kludge" was introduced here to replace arrays shaped (0, 10)
- # or similar with an array shaped (0,).
- # There seems no need for this, so give a FutureWarning to remove later.
- if any(arr.size == 0 and arr.ndim != 1 for arr in sub_arys):
- warnings.warn("in the future np.array_split will retain the shape of "
- "arrays with a zero size, instead of replacing them by "
- "`array([])`, which always has a shape of (0,).",
- FutureWarning)
- sub_arys = _replace_zero_by_x_arrays(sub_arys)
-
return sub_arys
+
def split(ary,indices_or_sections,axis=0):
"""
Split an array into multiple sub-arrays.
@@ -808,6 +799,9 @@ def tile(A, reps):
Thus for an `A` of shape (2, 3, 4, 5), a `reps` of (2, 2) is treated as
(1, 1, 2, 2).
+ Note : Although tile may be used for broadcasting, it is strongly
+ recommended to use numpy's broadcasting operations and functions.
+
Parameters
----------
A : array_like
@@ -823,6 +817,7 @@ def tile(A, reps):
See Also
--------
repeat : Repeat elements of an array.
+ broadcast_to : Broadcast an array to a new shape
Examples
--------
@@ -846,6 +841,12 @@ def tile(A, reps):
[1, 2],
[3, 4]])
+ >>> c = np.array([1,2,3,4])
+ >>> np.tile(c,(4,1))
+ array([[1, 2, 3, 4],
+ [1, 2, 3, 4],
+ [1, 2, 3, 4],
+ [1, 2, 3, 4]])
"""
try:
tup = tuple(reps)
diff --git a/numpy/lib/tests/test__datasource.py b/numpy/lib/tests/test__datasource.py
index 090f71f67..f4bece352 100644
--- a/numpy/lib/tests/test__datasource.py
+++ b/numpy/lib/tests/test__datasource.py
@@ -7,7 +7,7 @@ from shutil import rmtree
from numpy.compat import asbytes
from numpy.testing import (
- run_module_suite, TestCase, assert_
+ run_module_suite, TestCase, assert_, SkipTest
)
import numpy.lib._datasource as datasource
@@ -137,8 +137,7 @@ class TestDataSourceOpen(TestCase):
import gzip
except ImportError:
# We don't have the gzip capabilities to test.
- import nose
- raise nose.SkipTest
+ raise SkipTest
# Test datasource's internal file_opener for Gzip files.
filepath = os.path.join(self.tmpdir, 'foobar.txt.gz')
fp = gzip.open(filepath, 'w')
@@ -154,8 +153,7 @@ class TestDataSourceOpen(TestCase):
import bz2
except ImportError:
# We don't have the bz2 capabilities to test.
- import nose
- raise nose.SkipTest
+ raise SkipTest
# Test datasource's internal file_opener for BZip2 files.
filepath = os.path.join(self.tmpdir, 'foobar.txt.bz2')
fp = bz2.BZ2File(filepath, 'w')
diff --git a/numpy/lib/tests/test_format.py b/numpy/lib/tests/test_format.py
index 4f8a65148..1bf65fa61 100644
--- a/numpy/lib/tests/test_format.py
+++ b/numpy/lib/tests/test_format.py
@@ -287,7 +287,7 @@ import numpy as np
from numpy.compat import asbytes, asbytes_nested, sixu
from numpy.testing import (
run_module_suite, assert_, assert_array_equal, assert_raises, raises,
- dec
+ dec, SkipTest
)
from numpy.lib import format
@@ -812,7 +812,6 @@ def test_bad_header():
def test_large_file_support():
- from nose import SkipTest
if (sys.platform == 'win32' or sys.platform == 'cygwin'):
raise SkipTest("Unknown if Windows has sparse filesystems")
# try creating a large sparse file
diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py
index cc53c2b8e..a5ac78e33 100644
--- a/numpy/lib/tests/test_function_base.py
+++ b/numpy/lib/tests/test_function_base.py
@@ -1267,6 +1267,13 @@ class TestHistogram(TestCase):
assert_array_equal(a, np.array([0]))
assert_array_equal(b, np.array([0, 1]))
+ def test_finite_range(self):
+ # Normal ranges should be fine
+ vals = np.linspace(0.0, 1.0, num=100)
+ histogram(vals, range=[0.25,0.75])
+ assert_raises(ValueError, histogram, vals, range=[np.nan,0.75])
+ assert_raises(ValueError, histogram, vals, range=[0.25,np.inf])
+
class TestHistogramOptimBinNums(TestCase):
"""
@@ -1489,6 +1496,16 @@ class TestHistogramdd(TestCase):
assert_(hist[0] == 0.0)
assert_(hist[1] == 0.0)
+ def test_finite_range(self):
+ vals = np.random.random((100,3))
+ histogramdd(vals, range=[[0.0,1.0],[0.25,0.75],[0.25,0.5]])
+ assert_raises(ValueError, histogramdd, vals,
+ range=[[0.0,1.0],[0.25,0.75],[0.25,np.inf]])
+ assert_raises(ValueError, histogramdd, vals,
+ range=[[0.0,1.0],[np.nan,0.75],[0.25,0.5]])
+
+
+
class TestUnique(TestCase):
@@ -1957,10 +1974,42 @@ class TestInterp(TestCase):
assert_almost_equal(np.interp(x0, x, y), x0)
def test_right_left_behavior(self):
- assert_equal(interp([-1, 0, 1], [0], [1]), [1, 1, 1])
- assert_equal(interp([-1, 0, 1], [0], [1], left=0), [0, 1, 1])
- assert_equal(interp([-1, 0, 1], [0], [1], right=0), [1, 1, 0])
- assert_equal(interp([-1, 0, 1], [0], [1], left=0, right=0), [0, 1, 0])
+ # Needs range of sizes to test different code paths.
+ # size ==1 is special cased, 1 < size < 5 is linear search, and
+ # size >= 5 goes through local search and possibly binary search.
+ for size in range(1, 10):
+ xp = np.arange(size, dtype=np.double)
+ yp = np.ones(size, dtype=np.double)
+ incpts = np.array([-1, 0, size - 1, size], dtype=np.double)
+ decpts = incpts[::-1]
+
+ incres = interp(incpts, xp, yp)
+ decres = interp(decpts, xp, yp)
+ inctgt = np.array([1, 1, 1, 1], dtype=np.float)
+ dectgt = inctgt[::-1]
+ assert_equal(incres, inctgt)
+ assert_equal(decres, dectgt)
+
+ incres = interp(incpts, xp, yp, left=0)
+ decres = interp(decpts, xp, yp, left=0)
+ inctgt = np.array([0, 1, 1, 1], dtype=np.float)
+ dectgt = inctgt[::-1]
+ assert_equal(incres, inctgt)
+ assert_equal(decres, dectgt)
+
+ incres = interp(incpts, xp, yp, right=2)
+ decres = interp(decpts, xp, yp, right=2)
+ inctgt = np.array([1, 1, 1, 2], dtype=np.float)
+ dectgt = inctgt[::-1]
+ assert_equal(incres, inctgt)
+ assert_equal(decres, dectgt)
+
+ incres = interp(incpts, xp, yp, left=0, right=2)
+ decres = interp(decpts, xp, yp, left=0, right=2)
+ inctgt = np.array([0, 1, 1, 2], dtype=np.float)
+ dectgt = inctgt[::-1]
+ assert_equal(incres, inctgt)
+ assert_equal(decres, dectgt)
def test_scalar_interpolation_point(self):
x = np.linspace(0, 1, 5)
diff --git a/numpy/lib/tests/test_io.py b/numpy/lib/tests/test_io.py
index f4ce67805..af904e96a 100644
--- a/numpy/lib/tests/test_io.py
+++ b/numpy/lib/tests/test_io.py
@@ -1905,12 +1905,17 @@ def test_load_refcount():
np.savez(f, [1, 2, 3])
f.seek(0)
- gc.collect()
- n_before = len(gc.get_objects())
- np.load(f)
- n_after = len(gc.get_objects())
-
- assert_equal(n_before, n_after)
+ assert_(gc.isenabled())
+ gc.disable()
+ try:
+ gc.collect()
+ np.load(f)
+ # gc.collect returns the number of unreachable objects in cycles that
+ # were found -- we are checking that no cycles were created by np.load
+ n_objects_in_cycles = gc.collect()
+ finally:
+ gc.enable()
+ assert_equal(n_objects_in_cycles, 0)
if __name__ == "__main__":
run_module_suite()
diff --git a/numpy/lib/tests/test_packbits.py b/numpy/lib/tests/test_packbits.py
index 186e8960d..0de084ef9 100644
--- a/numpy/lib/tests/test_packbits.py
+++ b/numpy/lib/tests/test_packbits.py
@@ -1,5 +1,6 @@
-import numpy as np
+from __future__ import division, absolute_import, print_function
+import numpy as np
from numpy.testing import assert_array_equal, assert_equal, assert_raises
diff --git a/numpy/lib/tests/test_shape_base.py b/numpy/lib/tests/test_shape_base.py
index 8ab72b9f9..3f05f80c0 100644
--- a/numpy/lib/tests/test_shape_base.py
+++ b/numpy/lib/tests/test_shape_base.py
@@ -103,21 +103,17 @@ class TestArraySplit(TestCase):
def test_integer_split_2D_rows(self):
a = np.array([np.arange(10), np.arange(10)])
- res = assert_warns(FutureWarning, array_split, a, 3, axis=0)
-
- # After removing the FutureWarning, the last should be zeros((0, 10))
- desired = [np.array([np.arange(10)]), np.array([np.arange(10)]),
- np.array([])]
- compare_results(res, desired)
+ res = array_split(a, 3, axis=0)
+ tgt = [np.array([np.arange(10)]), np.array([np.arange(10)]),
+ np.zeros((0, 10))]
+ compare_results(res, tgt)
assert_(a.dtype.type is res[-1].dtype.type)
# Same thing for manual splits:
- res = assert_warns(FutureWarning, array_split, a, [0, 1, 2], axis=0)
-
- # After removing the FutureWarning, the last should be zeros((0, 10))
- desired = [np.array([]), np.array([np.arange(10)]),
- np.array([np.arange(10)])]
- compare_results(res, desired)
+ res = array_split(a, [0, 1, 2], axis=0)
+ tgt = [np.zeros((0, 10)), np.array([np.arange(10)]),
+ np.array([np.arange(10)])]
+ compare_results(res, tgt)
assert_(a.dtype.type is res[-1].dtype.type)
def test_integer_split_2D_cols(self):
@@ -132,12 +128,10 @@ class TestArraySplit(TestCase):
""" This will fail if we change default axis
"""
a = np.array([np.arange(10), np.arange(10)])
- res = assert_warns(FutureWarning, array_split, a, 3)
-
- # After removing the FutureWarning, the last should be zeros((0, 10))
- desired = [np.array([np.arange(10)]), np.array([np.arange(10)]),
- np.array([])]
- compare_results(res, desired)
+ res = array_split(a, 3)
+ tgt = [np.array([np.arange(10)]), np.array([np.arange(10)]),
+ np.zeros((0, 10))]
+ compare_results(res, tgt)
assert_(a.dtype.type is res[-1].dtype.type)
# perhaps should check higher dimensions
diff --git a/numpy/linalg/linalg.py b/numpy/linalg/linalg.py
index f5cb3cb77..2e969727b 100644
--- a/numpy/linalg/linalg.py
+++ b/numpy/linalg/linalg.py
@@ -1622,7 +1622,7 @@ def pinv(a, rcond=1e-15 ):
if s[i] > cutoff:
s[i] = 1./s[i]
else:
- s[i] = 0.;
+ s[i] = 0.
res = dot(transpose(vt), multiply(s[:, newaxis], transpose(u)))
return wrap(res)
diff --git a/numpy/linalg/tests/test_deprecations.py b/numpy/linalg/tests/test_deprecations.py
index 13d244199..9b6fe343f 100644
--- a/numpy/linalg/tests/test_deprecations.py
+++ b/numpy/linalg/tests/test_deprecations.py
@@ -1,6 +1,8 @@
"""Test deprecation and future warnings.
"""
+from __future__ import division, absolute_import, print_function
+
import numpy as np
from numpy.testing import assert_warns, run_module_suite
diff --git a/numpy/linalg/tests/test_linalg.py b/numpy/linalg/tests/test_linalg.py
index 7c577d86f..afa098f12 100644
--- a/numpy/linalg/tests/test_linalg.py
+++ b/numpy/linalg/tests/test_linalg.py
@@ -17,7 +17,7 @@ from numpy.linalg.linalg import _multi_dot_matrix_chain_order
from numpy.testing import (
assert_, assert_equal, assert_raises, assert_array_equal,
assert_almost_equal, assert_allclose, run_module_suite,
- dec
+ dec, SkipTest
)
@@ -1215,7 +1215,6 @@ def test_xerbla_override():
# Check that our xerbla has been successfully linked in. If it is not,
# the default xerbla routine is called, which prints a message to stdout
# and may, or may not, abort the process depending on the LAPACK package.
- from nose import SkipTest
XERBLA_OK = 255
diff --git a/numpy/ma/core.py b/numpy/ma/core.py
index 4ea52d0ab..25e542cd6 100644
--- a/numpy/ma/core.py
+++ b/numpy/ma/core.py
@@ -32,9 +32,12 @@ import numpy.core.numerictypes as ntypes
from numpy import ndarray, amax, amin, iscomplexobj, bool_, _NoValue
from numpy import array as narray
from numpy.lib.function_base import angle
-from numpy.compat import getargspec, formatargspec, long, basestring, unicode, bytes, sixu
+from numpy.compat import (
+ getargspec, formatargspec, long, basestring, unicode, bytes, sixu
+ )
from numpy import expand_dims as n_expand_dims
+
if sys.version_info[0] >= 3:
import pickle
else:
@@ -1245,7 +1248,7 @@ def _recursive_make_descr(datatype, newtype=bool_):
# Is this some kind of composite a la (np.float,2)
elif datatype.subdtype:
mdescr = list(datatype.subdtype)
- mdescr[0] = newtype
+ mdescr[0] = _recursive_make_descr(datatype.subdtype[0], newtype)
return tuple(mdescr)
else:
return newtype
@@ -1492,9 +1495,10 @@ def make_mask(m, copy=False, shrink=True, dtype=MaskType):
shrink : bool, optional
Whether to shrink `m` to ``nomask`` if all its values are False.
dtype : dtype, optional
- Data-type of the output mask. By default, the output mask has
- a dtype of MaskType (bool). If the dtype is flexible, each field
- has a boolean dtype.
+ Data-type of the output mask. By default, the output mask has a
+ dtype of MaskType (bool). If the dtype is flexible, each field has
+ a boolean dtype. This is ignored when `m` is ``nomask``, in which
+ case ``nomask`` is always returned.
Returns
-------
@@ -1544,7 +1548,7 @@ def make_mask(m, copy=False, shrink=True, dtype=MaskType):
dtype=[('man', '|b1'), ('mouse', '|b1')])
"""
- if m is nomask and shrink:
+ if m is nomask:
return nomask
elif isinstance(m, ndarray):
# We won't return after this point to make sure we can shrink the mask
@@ -2467,25 +2471,18 @@ def flatten_structured_array(a):
return out
-class _arraymethod(object):
+def _arraymethod(funcname, onmask=True):
"""
- Define a wrapper for basic array methods.
+ Return a class method wrapper around a basic array method.
- Upon call, returns a masked array, where the new ``_data`` array is
- the output of the corresponding method called on the original
- ``_data``.
+ Creates a class method which returns a masked array, where the new
+ ``_data`` array is the output of the corresponding basic method called
+ on the original ``_data``.
If `onmask` is True, the new mask is the output of the method called
on the initial mask. Otherwise, the new mask is just a reference
to the initial mask.
- Attributes
- ----------
- _onmask : bool
- Holds the `onmask` parameter.
- obj : object
- The object calling `_arraymethod`.
-
Parameters
----------
funcname : str
@@ -2495,47 +2492,31 @@ class _arraymethod(object):
alone (False). Default is True. Make available as `_onmask`
attribute.
- """
-
- def __init__(self, funcname, onmask=True):
- self.__name__ = funcname
- self._onmask = onmask
- self.obj = None
- self.__doc__ = self.getdoc()
-
- def getdoc(self):
- "Return the doc of the function (from the doc of the method)."
- methdoc = getattr(ndarray, self.__name__, None) or \
- getattr(np, self.__name__, None)
- if methdoc is not None:
- return methdoc.__doc__
-
- def __get__(self, obj, objtype=None):
- self.obj = obj
- return self
+ Returns
+ -------
+ method : instancemethod
+ Class method wrapper of the specified basic array method.
- def __call__(self, *args, **params):
- methodname = self.__name__
- instance = self.obj
- # Fallback : if the instance has not been initialized, use the first
- # arg
- if instance is None:
- args = list(args)
- instance = args.pop(0)
- data = instance._data
- mask = instance._mask
- cls = type(instance)
- result = getattr(data, methodname)(*args, **params).view(cls)
- result._update_from(instance)
+ """
+ def wrapped_method(self, *args, **params):
+ result = getattr(self._data, funcname)(*args, **params)
+ result = result.view(type(self))
+ result._update_from(self)
+ mask = self._mask
if result.ndim:
- if not self._onmask:
+ if not onmask:
result.__setmask__(mask)
elif mask is not nomask:
- result.__setmask__(getattr(mask, methodname)(*args, **params))
+ result.__setmask__(getattr(mask, funcname)(*args, **params))
else:
if mask.ndim and (not mask.dtype.names and mask.all()):
return masked
return result
+ methdoc = getattr(ndarray, funcname, None) or getattr(np, funcname, None)
+ if methdoc is not None:
+ wrapped_method.__doc__ = methdoc.__doc__
+ wrapped_method.__name__ = funcname
+ return wrapped_method
class MaskedIterator(object):
@@ -2703,6 +2684,8 @@ class MaskedArray(ndarray):
_defaultmask = nomask
_defaulthardmask = False
_baseclass = ndarray
+ # Maximum number of elements per axis used when printing an array.
+ _print_width = 100
def __new__(cls, data=None, mask=nomask, dtype=None, copy=False,
subok=True, ndmin=0, fill_value=None,
@@ -2775,13 +2758,19 @@ class MaskedArray(ndarray):
_data._sharedmask = True
else:
# Case 2. : With a mask in input.
- # Read the mask with the current mdtype
- try:
- mask = np.array(mask, copy=copy, dtype=mdtype)
- # Or assume it's a sequence of bool/int
- except TypeError:
- mask = np.array([tuple([m] * len(mdtype)) for m in mask],
- dtype=mdtype)
+ # If mask is boolean, create an array of True or False
+ if mask is True and mdtype == MaskType:
+ mask = np.ones(_data.shape, dtype=mdtype)
+ elif mask is False and mdtype == MaskType:
+ mask = np.zeros(_data.shape, dtype=mdtype)
+ else:
+ # Read the mask with the current mdtype
+ try:
+ mask = np.array(mask, copy=copy, dtype=mdtype)
+ # Or assume it's a sequence of bool/int
+ except TypeError:
+ mask = np.array([tuple([m] * len(mdtype)) for m in mask],
+ dtype=mdtype)
# Make sure the mask and the data have the same shape
if mask.shape != _data.shape:
(nd, nm) = (_data.size, mask.size)
@@ -3714,7 +3703,7 @@ class MaskedArray(ndarray):
if m is nomask:
res = self._data
else:
- if m.shape == ():
+ if m.shape == () and m.itemsize==len(m.dtype):
if m.dtype.names:
m = m.view((bool, len(m.dtype)))
if m.any():
@@ -3729,8 +3718,19 @@ class MaskedArray(ndarray):
# convert to object array to make filled work
names = self.dtype.names
if names is None:
- res = self._data.astype("O")
- res.view(ndarray)[m] = f
+ data = self._data
+ mask = m
+ # For big arrays, to avoid a costly conversion to the
+ # object dtype, extract the corners before the conversion.
+ for axis in range(self.ndim):
+ if data.shape[axis] > self._print_width:
+ ind = self._print_width // 2
+ arr = np.split(data, (ind, -ind), axis=axis)
+ data = np.concatenate((arr[0], arr[2]), axis=axis)
+ arr = np.split(mask, (ind, -ind), axis=axis)
+ mask = np.concatenate((arr[0], arr[2]), axis=axis)
+ res = data.astype("O")
+ res.view(ndarray)[mask] = f
else:
rdtype = _recursive_make_descr(self.dtype, "O")
res = self._data.astype(rdtype)
@@ -4674,24 +4674,44 @@ class MaskedArray(ndarray):
return D.astype(dtype).filled(0).sum(axis=None, out=out)
trace.__doc__ = ndarray.trace.__doc__
- def dot(self, other, out=None):
- am = ~getmaskarray(self)
- bm = ~getmaskarray(other)
- if out is None:
- d = np.dot(filled(self, 0), filled(other, 0))
- m = ~np.dot(am, bm)
- if d.ndim == 0:
- d = np.asarray(d)
- r = d.view(get_masked_subclass(self, other))
- r.__setmask__(m)
- return r
- d = self.filled(0).dot(other.filled(0), out._data)
- if out.mask.shape != d.shape:
- out._mask = np.empty(d.shape, MaskType)
- np.dot(am, bm, out._mask)
- np.logical_not(out._mask, out._mask)
- return out
- dot.__doc__ = ndarray.dot.__doc__
+ def dot(self, b, out=None, strict=False):
+ """
+ a.dot(b, out=None)
+
+ Masked dot product of two arrays. Note that `out` and `strict` are
+ located in different positions than in `ma.dot`. In order to
+ maintain compatibility with the functional version, it is
+ recommended that the optional arguments be treated as keyword only.
+ At some point that may be mandatory.
+
+ .. versionadded:: 1.10.0
+
+ Parameters
+ ----------
+ b : masked_array_like
+ Inputs array.
+ out : masked_array, optional
+ Output argument. This must have the exact kind that would be
+ returned if it was not used. In particular, it must have the
+ right type, must be C-contiguous, and its dtype must be the
+ dtype that would be returned for `ma.dot(a,b)`. This is a
+ performance feature. Therefore, if these conditions are not
+ met, an exception is raised, instead of attempting to be
+ flexible.
+ strict : bool, optional
+ Whether masked data are propagated (True) or set to 0 (False)
+ for the computation. Default is False. Propagating the mask
+ means that if a masked value appears in a row or column, the
+ whole row or column is considered masked.
+
+ .. versionadded:: 1.10.2
+
+ See Also
+ --------
+ numpy.ma.dot : equivalent function
+
+ """
+ return dot(self, b, out=out, strict=strict)
def sum(self, axis=None, dtype=None, out=None):
"""
@@ -5849,6 +5869,18 @@ class mvoid(MaskedArray):
"""
m = self._mask
+ if isinstance(m[indx], ndarray):
+ # Can happen when indx is a multi-dimensional field:
+ # A = ma.masked_array(data=[([0,1],)], mask=[([True,
+ # False],)], dtype=[("A", ">i2", (2,))])
+ # x = A[0]; y = x["A"]; then y.mask["A"].size==2
+ # and we can not say masked/unmasked.
+ # The result is no longer mvoid!
+ # See also issue #6724.
+ return masked_array(
+ data=self._data[indx], mask=m[indx],
+ fill_value=self._fill_value[indx],
+ hard_mask=self._hardmask)
if m is not nomask and m[indx]:
return masked
return self._data[indx]
@@ -5907,7 +5939,7 @@ class mvoid(MaskedArray):
--------
MaskedArray.filled
- """
+ """
return asarray(self).filled(fill_value)[()]
def tolist(self):
@@ -7044,6 +7076,186 @@ def round_(a, decimals=0, out=None):
round = round_
+# Needed by dot, so move here from extras.py. It will still be exported
+# from extras.py for compatibility.
+def mask_rowcols(a, axis=None):
+ """
+ Mask rows and/or columns of a 2D array that contain masked values.
+
+ Mask whole rows and/or columns of a 2D array that contain
+ masked values. The masking behavior is selected using the
+ `axis` parameter.
+
+ - If `axis` is None, rows *and* columns are masked.
+ - If `axis` is 0, only rows are masked.
+ - If `axis` is 1 or -1, only columns are masked.
+
+ Parameters
+ ----------
+ a : array_like, MaskedArray
+ The array to mask. If not a MaskedArray instance (or if no array
+ elements are masked). The result is a MaskedArray with `mask` set
+ to `nomask` (False). Must be a 2D array.
+ axis : int, optional
+ Axis along which to perform the operation. If None, applies to a
+ flattened version of the array.
+
+ Returns
+ -------
+ a : MaskedArray
+ A modified version of the input array, masked depending on the value
+ of the `axis` parameter.
+
+ Raises
+ ------
+ NotImplementedError
+ If input array `a` is not 2D.
+
+ See Also
+ --------
+ mask_rows : Mask rows of a 2D array that contain masked values.
+ mask_cols : Mask cols of a 2D array that contain masked values.
+ masked_where : Mask where a condition is met.
+
+ Notes
+ -----
+ The input array's mask is modified by this function.
+
+ Examples
+ --------
+ >>> import numpy.ma as ma
+ >>> a = np.zeros((3, 3), dtype=np.int)
+ >>> a[1, 1] = 1
+ >>> a
+ array([[0, 0, 0],
+ [0, 1, 0],
+ [0, 0, 0]])
+ >>> a = ma.masked_equal(a, 1)
+ >>> a
+ masked_array(data =
+ [[0 0 0]
+ [0 -- 0]
+ [0 0 0]],
+ mask =
+ [[False False False]
+ [False True False]
+ [False False False]],
+ fill_value=999999)
+ >>> ma.mask_rowcols(a)
+ masked_array(data =
+ [[0 -- 0]
+ [-- -- --]
+ [0 -- 0]],
+ mask =
+ [[False True False]
+ [ True True True]
+ [False True False]],
+ fill_value=999999)
+
+ """
+ a = array(a, subok=False)
+ if a.ndim != 2:
+ raise NotImplementedError("mask_rowcols works for 2D arrays only.")
+ m = getmask(a)
+ # Nothing is masked: return a
+ if m is nomask or not m.any():
+ return a
+ maskedval = m.nonzero()
+ a._mask = a._mask.copy()
+ if not axis:
+ a[np.unique(maskedval[0])] = masked
+ if axis in [None, 1, -1]:
+ a[:, np.unique(maskedval[1])] = masked
+ return a
+
+
+# Include masked dot here to avoid import problems in getting it from
+# extras.py. Note that it is not included in __all__, but rather exported
+# from extras in order to avoid backward compatibility problems.
+def dot(a, b, strict=False, out=None):
+ """
+ Return the dot product of two arrays.
+
+ This function is the equivalent of `numpy.dot` that takes masked values
+ into account. Note that `strict` and `out` are in different position
+ than in the method version. In order to maintain compatibility with the
+ corresponding method, it is recommended that the optional arguments be
+ treated as keyword only. At some point that may be mandatory.
+
+ .. note::
+ Works only with 2-D arrays at the moment.
+
+
+ Parameters
+ ----------
+ a, b : masked_array_like
+ Inputs arrays.
+ strict : bool, optional
+ Whether masked data are propagated (True) or set to 0 (False) for
+ the computation. Default is False. Propagating the mask means that
+ if a masked value appears in a row or column, the whole row or
+ column is considered masked.
+ out : masked_array, optional
+ Output argument. This must have the exact kind that would be returned
+ if it was not used. In particular, it must have the right type, must be
+ C-contiguous, and its dtype must be the dtype that would be returned
+ for `dot(a,b)`. This is a performance feature. Therefore, if these
+ conditions are not met, an exception is raised, instead of attempting
+ to be flexible.
+
+ .. versionadded:: 1.10.2
+
+ See Also
+ --------
+ numpy.dot : Equivalent function for ndarrays.
+
+ Examples
+ --------
+ >>> a = ma.array([[1, 2, 3], [4, 5, 6]], mask=[[1, 0, 0], [0, 0, 0]])
+ >>> b = ma.array([[1, 2], [3, 4], [5, 6]], mask=[[1, 0], [0, 0], [0, 0]])
+ >>> np.ma.dot(a, b)
+ masked_array(data =
+ [[21 26]
+ [45 64]],
+ mask =
+ [[False False]
+ [False False]],
+ fill_value = 999999)
+ >>> np.ma.dot(a, b, strict=True)
+ masked_array(data =
+ [[-- --]
+ [-- 64]],
+ mask =
+ [[ True True]
+ [ True False]],
+ fill_value = 999999)
+
+ """
+ # !!!: Works only with 2D arrays. There should be a way to get it to run
+ # with higher dimension
+ if strict and (a.ndim == 2) and (b.ndim == 2):
+ a = mask_rowcols(a, 0)
+ b = mask_rowcols(b, 1)
+ am = ~getmaskarray(a)
+ bm = ~getmaskarray(b)
+
+ if out is None:
+ d = np.dot(filled(a, 0), filled(b, 0))
+ m = ~np.dot(am, bm)
+ if d.ndim == 0:
+ d = np.asarray(d)
+ r = d.view(get_masked_subclass(a, b))
+ r.__setmask__(m)
+ return r
+ else:
+ d = np.dot(filled(a, 0), filled(b, 0), out._data)
+ if out.mask.shape != d.shape:
+ out._mask = np.empty(d.shape, MaskType)
+ np.dot(am, bm, out._mask)
+ np.logical_not(out._mask, out._mask)
+ return out
+
+
def inner(a, b):
"""
Returns the inner product of a and b for arrays of floating point types.
diff --git a/numpy/ma/extras.py b/numpy/ma/extras.py
index ae4e0cee5..e1d228e73 100644
--- a/numpy/ma/extras.py
+++ b/numpy/ma/extras.py
@@ -29,7 +29,8 @@ from . import core as ma
from .core import (
MaskedArray, MAError, add, array, asarray, concatenate, filled,
getmask, getmaskarray, make_mask_descr, masked, masked_array, mask_or,
- nomask, ones, sort, zeros, getdata
+ nomask, ones, sort, zeros, getdata, get_masked_subclass, dot,
+ mask_rowcols
)
import numpy as np
@@ -846,96 +847,6 @@ def compress_cols(a):
raise NotImplementedError("compress_cols works for 2D arrays only.")
return compress_rowcols(a, 1)
-def mask_rowcols(a, axis=None):
- """
- Mask rows and/or columns of a 2D array that contain masked values.
-
- Mask whole rows and/or columns of a 2D array that contain
- masked values. The masking behavior is selected using the
- `axis` parameter.
-
- - If `axis` is None, rows *and* columns are masked.
- - If `axis` is 0, only rows are masked.
- - If `axis` is 1 or -1, only columns are masked.
-
- Parameters
- ----------
- a : array_like, MaskedArray
- The array to mask. If not a MaskedArray instance (or if no array
- elements are masked). The result is a MaskedArray with `mask` set
- to `nomask` (False). Must be a 2D array.
- axis : int, optional
- Axis along which to perform the operation. If None, applies to a
- flattened version of the array.
-
- Returns
- -------
- a : MaskedArray
- A modified version of the input array, masked depending on the value
- of the `axis` parameter.
-
- Raises
- ------
- NotImplementedError
- If input array `a` is not 2D.
-
- See Also
- --------
- mask_rows : Mask rows of a 2D array that contain masked values.
- mask_cols : Mask cols of a 2D array that contain masked values.
- masked_where : Mask where a condition is met.
-
- Notes
- -----
- The input array's mask is modified by this function.
-
- Examples
- --------
- >>> import numpy.ma as ma
- >>> a = np.zeros((3, 3), dtype=np.int)
- >>> a[1, 1] = 1
- >>> a
- array([[0, 0, 0],
- [0, 1, 0],
- [0, 0, 0]])
- >>> a = ma.masked_equal(a, 1)
- >>> a
- masked_array(data =
- [[0 0 0]
- [0 -- 0]
- [0 0 0]],
- mask =
- [[False False False]
- [False True False]
- [False False False]],
- fill_value=999999)
- >>> ma.mask_rowcols(a)
- masked_array(data =
- [[0 -- 0]
- [-- -- --]
- [0 -- 0]],
- mask =
- [[False True False]
- [ True True True]
- [False True False]],
- fill_value=999999)
-
- """
- a = array(a, subok=False)
- if a.ndim != 2:
- raise NotImplementedError("mask_rowcols works for 2D arrays only.")
- m = getmask(a)
- # Nothing is masked: return a
- if m is nomask or not m.any():
- return a
- maskedval = m.nonzero()
- a._mask = a._mask.copy()
- if not axis:
- a[np.unique(maskedval[0])] = masked
- if axis in [None, 1, -1]:
- a[:, np.unique(maskedval[1])] = masked
- return a
-
def mask_rows(a, axis=None):
"""
Mask rows of a 2D array that contain masked values.
@@ -1027,58 +938,6 @@ def mask_cols(a, axis=None):
return mask_rowcols(a, 1)
-def dot(a, b, strict=False):
- """
- Return the dot product of two arrays.
-
- .. note::
- Works only with 2-D arrays at the moment.
-
- This function is the equivalent of `numpy.dot` that takes masked values
- into account, see `numpy.dot` for details.
-
- Parameters
- ----------
- a, b : ndarray
- Inputs arrays.
- strict : bool, optional
- Whether masked data are propagated (True) or set to 0 (False) for the
- computation. Default is False.
- Propagating the mask means that if a masked value appears in a row or
- column, the whole row or column is considered masked.
-
- See Also
- --------
- numpy.dot : Equivalent function for ndarrays.
-
- Examples
- --------
- >>> a = ma.array([[1, 2, 3], [4, 5, 6]], mask=[[1, 0, 0], [0, 0, 0]])
- >>> b = ma.array([[1, 2], [3, 4], [5, 6]], mask=[[1, 0], [0, 0], [0, 0]])
- >>> np.ma.dot(a, b)
- masked_array(data =
- [[21 26]
- [45 64]],
- mask =
- [[False False]
- [False False]],
- fill_value = 999999)
- >>> np.ma.dot(a, b, strict=True)
- masked_array(data =
- [[-- --]
- [-- 64]],
- mask =
- [[ True True]
- [ True False]],
- fill_value = 999999)
-
- """
- #!!!: Works only with 2D arrays. There should be a way to get it to run with higher dimension
- if strict and (a.ndim == 2) and (b.ndim == 2):
- a = mask_rows(a)
- b = mask_cols(b)
- return a.dot(b)
-
#####--------------------------------------------------------------------------
#---- --- arraysetops ---
#####--------------------------------------------------------------------------
diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py
index 0a9821254..cecdedf26 100644
--- a/numpy/ma/tests/test_core.py
+++ b/numpy/ma/tests/test_core.py
@@ -11,8 +11,10 @@ __author__ = "Pierre GF Gerard-Marchant"
import warnings
import pickle
import operator
+import itertools
from functools import reduce
+
import numpy as np
import numpy.ma.core
import numpy.core.fromnumeric as fromnumeric
@@ -189,6 +191,15 @@ class TestMaskedArray(TestCase):
dma_3 = MaskedArray(dma_1, mask=[1, 0, 0, 0] * 6)
fail_if_equal(dma_3.mask, dma_1.mask)
+ x = array([1, 2, 3], mask=True)
+ assert_equal(x._mask, [True, True, True])
+ x = array([1, 2, 3], mask=False)
+ assert_equal(x._mask, [False, False, False])
+ y = array([1, 2, 3], mask=x._mask, copy=False)
+ assert_(np.may_share_memory(x.mask, y.mask))
+ y = array([1, 2, 3], mask=x._mask, copy=True)
+ assert_(not np.may_share_memory(x.mask, y.mask))
+
def test_creation_with_list_of_maskedarrays(self):
# Tests creaating a masked array from alist of masked arrays.
x = array(np.arange(5), mask=[1, 0, 0, 0, 0])
@@ -401,6 +412,14 @@ class TestMaskedArray(TestCase):
assert_not_equal(y._data.ctypes.data, x._data.ctypes.data)
assert_not_equal(y._mask.ctypes.data, x._mask.ctypes.data)
+ def test_copy_immutable(self):
+ # Tests that the copy method is immutable, GitHub issue #5247
+ a = np.ma.array([1, 2, 3])
+ b = np.ma.array([4, 5, 6])
+ a_copy_method = a.copy
+ b.copy
+ assert_equal(a_copy_method(), [1, 2, 3])
+
def test_deepcopy(self):
from copy import deepcopy
a = array([0, 1, 2], mask=[False, True, False])
@@ -589,6 +608,13 @@ class TestMaskedArray(TestCase):
control = np.array([(0, 1), (2, 0)], dtype=a['B'].dtype)
assert_equal(test, control)
+ # test if mask gets set correctly (see #6760)
+ Z = numpy.ma.zeros(2, numpy.dtype([("A", "(2,2)i1,(2,2)i1", (2,2))]))
+ assert_equal(Z.data.dtype, numpy.dtype([('A', [('f0', 'i1', (2, 2)),
+ ('f1', 'i1', (2, 2))], (2, 2))]))
+ assert_equal(Z.mask.dtype, numpy.dtype([('A', [('f0', '?', (2, 2)),
+ ('f1', '?', (2, 2))], (2, 2))]))
+
def test_filled_w_f_order(self):
# Test filled w/ F-contiguous array
a = array(np.array([(0, 1, 2), (4, 5, 6)], order='F'),
@@ -615,6 +641,18 @@ class TestMaskedArray(TestCase):
control = "[(--, (2, --)) (4, (--, 6.0))]"
assert_equal(str(test), control)
+ # Test 0-d array with multi-dimensional dtype
+ t_2d0 = masked_array(data = (0, [[0.0, 0.0, 0.0],
+ [0.0, 0.0, 0.0]],
+ 0.0),
+ mask = (False, [[True, False, True],
+ [False, False, True]],
+ False),
+ dtype = "int, (2,3)float, float")
+ control = "(0, [[--, 0.0, --], [0.0, 0.0, --]], 0.0)"
+ assert_equal(str(t_2d0), control)
+
+
def test_flatten_structured_array(self):
# Test flatten_structured_array on arrays
# On ndarray
@@ -681,6 +719,14 @@ class TestMaskedArray(TestCase):
self.assertTrue(f['a'] is masked)
assert_equal(f[1], 4)
+ # exotic dtype
+ A = masked_array(data=[([0,1],)],
+ mask=[([True, False],)],
+ dtype=[("A", ">i2", (2,))])
+ assert_equal(A[0]["A"], A["A"][0])
+ assert_equal(A[0]["A"], masked_array(data=[0, 1],
+ mask=[True, False], dtype=">i2"))
+
def test_mvoid_iter(self):
# Test iteration on __getitem__
ndtype = [('a', int), ('b', int)]
@@ -3184,7 +3230,7 @@ class TestMaskedArrayMathMethods(TestCase):
assert_almost_equal(r.filled(0), fX.dot(fX))
assert_(r.mask[1,3])
r1 = empty_like(r)
- mX.dot(mX, r1)
+ mX.dot(mX, out=r1)
assert_almost_equal(r, r1)
mYY = mXX.swapaxes(-1, -2)
@@ -3192,7 +3238,7 @@ class TestMaskedArrayMathMethods(TestCase):
r = mXX.dot(mYY)
assert_almost_equal(r.filled(0), fXX.dot(fYY))
r1 = empty_like(r)
- mXX.dot(mYY, r1)
+ mXX.dot(mYY, out=r1)
assert_almost_equal(r, r1)
def test_dot_shape_mismatch(self):
@@ -3808,6 +3854,15 @@ class TestMaskedArrayFunctions(TestCase):
assert_equal(test.dtype, bdtype)
assert_equal(test, np.array([(0, 0), (0, 1)], dtype=bdtype))
+ # test that nomask is returned when m is nomask.
+ bools = [True, False]
+ dtypes = [MaskType, np.float]
+ msgformat = 'copy=%s, shrink=%s, dtype=%s'
+ for cpy, shr, dt in itertools.product(bools, bools, dtypes):
+ res = make_mask(nomask, copy=cpy, shrink=shr, dtype=dt)
+ assert_(res is nomask, msgformat % (cpy, shr, dt))
+
+
def test_mask_or(self):
# Initialize
mtype = [('a', np.bool), ('b', np.bool)]
diff --git a/numpy/ma/tests/test_extras.py b/numpy/ma/tests/test_extras.py
index c41c629fc..6138d0573 100644
--- a/numpy/ma/tests/test_extras.py
+++ b/numpy/ma/tests/test_extras.py
@@ -538,26 +538,26 @@ class TestCompressFunctions(TestCase):
m = [1, 0, 0, 0, 0, 0]
a = masked_array(n, mask=m).reshape(2, 3)
b = masked_array(n, mask=m).reshape(3, 2)
- c = dot(a, b, True)
+ c = dot(a, b, strict=True)
assert_equal(c.mask, [[1, 1], [1, 0]])
- c = dot(b, a, True)
+ c = dot(b, a, strict=True)
assert_equal(c.mask, [[1, 1, 1], [1, 0, 0], [1, 0, 0]])
- c = dot(a, b, False)
+ c = dot(a, b, strict=False)
assert_equal(c, np.dot(a.filled(0), b.filled(0)))
- c = dot(b, a, False)
+ c = dot(b, a, strict=False)
assert_equal(c, np.dot(b.filled(0), a.filled(0)))
#
m = [0, 0, 0, 0, 0, 1]
a = masked_array(n, mask=m).reshape(2, 3)
b = masked_array(n, mask=m).reshape(3, 2)
- c = dot(a, b, True)
+ c = dot(a, b, strict=True)
assert_equal(c.mask, [[0, 1], [1, 1]])
- c = dot(b, a, True)
+ c = dot(b, a, strict=True)
assert_equal(c.mask, [[0, 0, 1], [0, 0, 1], [1, 1, 1]])
- c = dot(a, b, False)
+ c = dot(a, b, strict=False)
assert_equal(c, np.dot(a.filled(0), b.filled(0)))
assert_equal(c, dot(a, b))
- c = dot(b, a, False)
+ c = dot(b, a, strict=False)
assert_equal(c, np.dot(b.filled(0), a.filled(0)))
#
m = [0, 0, 0, 0, 0, 0]
@@ -570,37 +570,53 @@ class TestCompressFunctions(TestCase):
#
a = masked_array(n, mask=[1, 0, 0, 0, 0, 0]).reshape(2, 3)
b = masked_array(n, mask=[0, 0, 0, 0, 0, 0]).reshape(3, 2)
- c = dot(a, b, True)
+ c = dot(a, b, strict=True)
assert_equal(c.mask, [[1, 1], [0, 0]])
- c = dot(a, b, False)
+ c = dot(a, b, strict=False)
assert_equal(c, np.dot(a.filled(0), b.filled(0)))
- c = dot(b, a, True)
+ c = dot(b, a, strict=True)
assert_equal(c.mask, [[1, 0, 0], [1, 0, 0], [1, 0, 0]])
- c = dot(b, a, False)
+ c = dot(b, a, strict=False)
assert_equal(c, np.dot(b.filled(0), a.filled(0)))
#
a = masked_array(n, mask=[0, 0, 0, 0, 0, 1]).reshape(2, 3)
b = masked_array(n, mask=[0, 0, 0, 0, 0, 0]).reshape(3, 2)
- c = dot(a, b, True)
+ c = dot(a, b, strict=True)
assert_equal(c.mask, [[0, 0], [1, 1]])
c = dot(a, b)
assert_equal(c, np.dot(a.filled(0), b.filled(0)))
- c = dot(b, a, True)
+ c = dot(b, a, strict=True)
assert_equal(c.mask, [[0, 0, 1], [0, 0, 1], [0, 0, 1]])
- c = dot(b, a, False)
+ c = dot(b, a, strict=False)
assert_equal(c, np.dot(b.filled(0), a.filled(0)))
#
a = masked_array(n, mask=[0, 0, 0, 0, 0, 1]).reshape(2, 3)
b = masked_array(n, mask=[0, 0, 1, 0, 0, 0]).reshape(3, 2)
- c = dot(a, b, True)
+ c = dot(a, b, strict=True)
assert_equal(c.mask, [[1, 0], [1, 1]])
- c = dot(a, b, False)
+ c = dot(a, b, strict=False)
assert_equal(c, np.dot(a.filled(0), b.filled(0)))
- c = dot(b, a, True)
+ c = dot(b, a, strict=True)
assert_equal(c.mask, [[0, 0, 1], [1, 1, 1], [0, 0, 1]])
- c = dot(b, a, False)
+ c = dot(b, a, strict=False)
assert_equal(c, np.dot(b.filled(0), a.filled(0)))
+ def test_dot_returns_maskedarray(self):
+ # See gh-6611
+ a = np.eye(3)
+ b = array(a)
+ assert_(type(dot(a, a)) is MaskedArray)
+ assert_(type(dot(a, b)) is MaskedArray)
+ assert_(type(dot(b, a)) is MaskedArray)
+ assert_(type(dot(b, b)) is MaskedArray)
+
+ def test_dot_out(self):
+ a = array(np.eye(3))
+ out = array(np.zeros((3, 3)))
+ res = dot(a, a, out=out)
+ assert_(res is out)
+ assert_equal(a, res)
+
class TestApplyAlongAxis(TestCase):
# Tests 2D functions
diff --git a/numpy/random/info.py b/numpy/random/info.py
index 396e62381..be9c8d9bd 100644
--- a/numpy/random/info.py
+++ b/numpy/random/info.py
@@ -13,6 +13,8 @@ random_integers Uniformly distributed integers in a given range.
permutation Randomly permute a sequence / generate a random sequence.
shuffle Randomly permute a sequence in place.
seed Seed the random number generator.
+choice Random sample from 1-D array.
+
==================== =========================================================
==================== =========================================================
@@ -91,6 +93,8 @@ __all__ = [
'binomial',
'bytes',
'chisquare',
+ 'choice',
+ 'dirichlet',
'exponential',
'f',
'gamma',
diff --git a/numpy/random/mtrand/distributions.c b/numpy/random/mtrand/distributions.c
index f5ee6d8c1..39004178d 100644
--- a/numpy/random/mtrand/distributions.c
+++ b/numpy/random/mtrand/distributions.c
@@ -231,6 +231,9 @@ double rk_chisquare(rk_state *state, double df)
double rk_noncentral_chisquare(rk_state *state, double df, double nonc)
{
+ if (nonc == 0){
+ return rk_chisquare(state, df);
+ }
if(1 < df)
{
const double Chi2 = rk_chisquare(state, df - 1);
diff --git a/numpy/random/mtrand/mtrand.pyx b/numpy/random/mtrand/mtrand.pyx
index 97ea9506e..080591e5e 100644
--- a/numpy/random/mtrand/mtrand.pyx
+++ b/numpy/random/mtrand/mtrand.pyx
@@ -127,6 +127,7 @@ cdef extern from "initarray.h":
# Initialize numpy
import_array()
+cimport cython
import numpy as np
import operator
import warnings
@@ -2219,7 +2220,7 @@ cdef class RandomState:
Degrees of freedom, should be > 0 as of Numpy 1.10,
should be > 1 for earlier versions.
nonc : float
- Non-centrality, should be > 0.
+ Non-centrality, should be non-negative.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. Default is None, in which case a
@@ -2285,8 +2286,8 @@ cdef class RandomState:
if not PyErr_Occurred():
if fdf <= 0:
raise ValueError("df <= 0")
- if fnonc <= 0:
- raise ValueError("nonc <= 0")
+ if fnonc < 0:
+ raise ValueError("nonc < 0")
return cont2_array_sc(self.internal_state, rk_noncentral_chisquare,
size, fdf, fnonc, self.lock)
@@ -2296,7 +2297,7 @@ cdef class RandomState:
ononc = <ndarray>PyArray_FROM_OTF(nonc, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
if np.any(np.less_equal(odf, 0.0)):
raise ValueError("df <= 0")
- if np.any(np.less_equal(ononc, 0.0)):
+ if np.any(np.less(ononc, 0.0)):
raise ValueError("nonc < 0")
return cont2_array(self.internal_state, rk_noncentral_chisquare, size,
odf, ononc, self.lock)
@@ -4484,7 +4485,7 @@ cdef class RandomState:
mnarr = <ndarray>multin
mnix = <long*>PyArray_DATA(mnarr)
sz = PyArray_SIZE(mnarr)
- with self.lock, nogil:
+ with self.lock, nogil, cython.cdivision(True):
i = 0
while i < sz:
Sum = 1.0
diff --git a/numpy/random/tests/test_random.py b/numpy/random/tests/test_random.py
index 596c218a2..ab7f90d82 100644
--- a/numpy/random/tests/test_random.py
+++ b/numpy/random/tests/test_random.py
@@ -504,6 +504,13 @@ class TestRandomDist(TestCase):
[ 0.332334982684171 , 0.15451287602753125]])
np.testing.assert_array_almost_equal(actual, desired, decimal=14)
+ np.random.seed(self.seed)
+ actual = np.random.noncentral_chisquare(df=5, nonc=0, size=(3, 2))
+ desired = np.array([[9.597154162763948, 11.725484450296079],
+ [10.413711048138335, 3.694475922923986],
+ [13.484222138963087, 14.377255424602957]])
+ np.testing.assert_array_almost_equal(actual, desired, decimal=14)
+
def test_noncentral_f(self):
np.random.seed(self.seed)
actual = np.random.noncentral_f(dfnum=5, dfden=2, nonc=1,
diff --git a/numpy/testing/decorators.py b/numpy/testing/decorators.py
index 56962b93c..df3d297ff 100644
--- a/numpy/testing/decorators.py
+++ b/numpy/testing/decorators.py
@@ -18,6 +18,7 @@ from __future__ import division, absolute_import, print_function
import warnings
import collections
+from .utils import SkipTest
def slow(t):
"""
@@ -141,14 +142,14 @@ def skipif(skip_condition, msg=None):
def skipper_func(*args, **kwargs):
"""Skipper for normal test functions."""
if skip_val():
- raise nose.SkipTest(get_msg(f, msg))
+ raise SkipTest(get_msg(f, msg))
else:
return f(*args, **kwargs)
def skipper_gen(*args, **kwargs):
"""Skipper for test generators."""
if skip_val():
- raise nose.SkipTest(get_msg(f, msg))
+ raise SkipTest(get_msg(f, msg))
else:
for x in f(*args, **kwargs):
yield x
@@ -166,7 +167,7 @@ def skipif(skip_condition, msg=None):
def knownfailureif(fail_condition, msg=None):
"""
- Make function raise KnownFailureTest exception if given condition is true.
+ Make function raise KnownFailureException exception if given condition is true.
If the condition is a callable, it is used at runtime to dynamically
make the decision. This is useful for tests that may require costly
@@ -178,15 +179,15 @@ def knownfailureif(fail_condition, msg=None):
Flag to determine whether to mark the decorated test as a known
failure (if True) or not (if False).
msg : str, optional
- Message to give on raising a KnownFailureTest exception.
+ Message to give on raising a KnownFailureException exception.
Default is None.
Returns
-------
decorator : function
- Decorator, which, when applied to a function, causes SkipTest
- to be raised when `skip_condition` is True, and the function
- to be called normally otherwise.
+ Decorator, which, when applied to a function, causes
+ KnownFailureException to be raised when `fail_condition` is True,
+ and the function to be called normally otherwise.
Notes
-----
@@ -207,11 +208,11 @@ def knownfailureif(fail_condition, msg=None):
# Local import to avoid a hard nose dependency and only incur the
# import time overhead at actual test-time.
import nose
- from .noseclasses import KnownFailureTest
+ from .noseclasses import KnownFailureException
def knownfailer(*args, **kwargs):
if fail_val():
- raise KnownFailureTest(msg)
+ raise KnownFailureException(msg)
else:
return f(*args, **kwargs)
return nose.tools.make_decorator(f)(knownfailer)
diff --git a/numpy/testing/noseclasses.py b/numpy/testing/noseclasses.py
index e6cc10179..197e20bac 100644
--- a/numpy/testing/noseclasses.py
+++ b/numpy/testing/noseclasses.py
@@ -8,6 +8,7 @@ from __future__ import division, absolute_import, print_function
import os
import doctest
+import inspect
import nose
from nose.plugins import doctests as npd
@@ -16,7 +17,8 @@ from nose.plugins.base import Plugin
from nose.util import src
import numpy
from .nosetester import get_package_name
-import inspect
+from .utils import KnownFailureException, KnownFailureTest
+
# Some of the classes in this module begin with 'Numpy' to clearly distinguish
# them from the plethora of very similar names from nose/unittest/doctest
@@ -298,19 +300,14 @@ class Unplugger(object):
if p.name != self.to_unplug]
-class KnownFailureTest(Exception):
- '''Raise this exception to mark a test as a known failing test.'''
- pass
-
-
-class KnownFailure(ErrorClassPlugin):
+class KnownFailurePlugin(ErrorClassPlugin):
'''Plugin that installs a KNOWNFAIL error class for the
- KnownFailureClass exception. When KnownFailureTest is raised,
+ KnownFailureClass exception. When KnownFailure is raised,
the exception will be logged in the knownfail attribute of the
result, 'K' or 'KNOWNFAIL' (verbose) will be output, and the
exception will not be counted as an error or failure.'''
enabled = True
- knownfail = ErrorClass(KnownFailureTest,
+ knownfail = ErrorClass(KnownFailureException,
label='KNOWNFAIL',
isfailure=False)
@@ -318,7 +315,7 @@ class KnownFailure(ErrorClassPlugin):
env_opt = 'NOSE_WITHOUT_KNOWNFAIL'
parser.add_option('--no-knownfail', action='store_true',
dest='noKnownFail', default=env.get(env_opt, False),
- help='Disable special handling of KnownFailureTest '
+ help='Disable special handling of KnownFailure '
'exceptions')
def configure(self, options, conf):
@@ -329,6 +326,8 @@ class KnownFailure(ErrorClassPlugin):
if disable:
self.enabled = False
+KnownFailure = KnownFailurePlugin # backwards compat
+
# Class allows us to save the results of the tests in runTests - see runTests
# method docstring for details
diff --git a/numpy/testing/nosetester.py b/numpy/testing/nosetester.py
index 95ded8d93..551e630ec 100644
--- a/numpy/testing/nosetester.py
+++ b/numpy/testing/nosetester.py
@@ -121,8 +121,8 @@ def run_module_suite(file_to_run=None, argv=None):
argv = argv + [file_to_run]
nose = import_nose()
- from .noseclasses import KnownFailure
- nose.run(argv=argv, addplugins=[KnownFailure()])
+ from .noseclasses import KnownFailurePlugin
+ nose.run(argv=argv, addplugins=[KnownFailurePlugin()])
class NoseTester(object):
@@ -177,7 +177,8 @@ class NoseTester(object):
'swig_ext']
def __init__(self, package=None, raise_warnings=None):
- if raise_warnings is None and '.dev0' in np.__version__:
+ if raise_warnings is None and (
+ not hasattr(np, '__version__') or '.dev0' in np.__version__):
raise_warnings = "develop"
elif raise_warnings is None:
raise_warnings = "release"
@@ -300,8 +301,8 @@ class NoseTester(object):
'--cover-tests', '--cover-erase']
# construct list of plugins
import nose.plugins.builtin
- from .noseclasses import KnownFailure, Unplugger
- plugins = [KnownFailure()]
+ from .noseclasses import KnownFailurePlugin, Unplugger
+ plugins = [KnownFailurePlugin()]
plugins += [p() for p in nose.plugins.builtin.plugins]
# add doctesting if required
doctest_argv = '--with-doctest' in argv
diff --git a/numpy/testing/tests/test_decorators.py b/numpy/testing/tests/test_decorators.py
index f8a5be672..7dbb5a828 100644
--- a/numpy/testing/tests/test_decorators.py
+++ b/numpy/testing/tests/test_decorators.py
@@ -1,7 +1,7 @@
from __future__ import division, absolute_import, print_function
-from numpy.testing import dec, assert_, assert_raises, run_module_suite
-from numpy.testing.noseclasses import KnownFailureTest
+from numpy.testing import (dec, assert_, assert_raises, run_module_suite,
+ SkipTest, KnownFailureException)
import nose
def test_slow():
@@ -40,7 +40,7 @@ def test_skip_functions_hardcoded():
f1('a')
except DidntSkipException:
raise Exception('Failed to skip')
- except nose.SkipTest:
+ except SkipTest:
pass
@dec.skipif(False)
@@ -51,7 +51,7 @@ def test_skip_functions_hardcoded():
f2('a')
except DidntSkipException:
pass
- except nose.SkipTest:
+ except SkipTest:
raise Exception('Skipped when not expected to')
@@ -68,7 +68,7 @@ def test_skip_functions_callable():
f1('a')
except DidntSkipException:
raise Exception('Failed to skip')
- except nose.SkipTest:
+ except SkipTest:
pass
@dec.skipif(skip_tester)
@@ -80,7 +80,7 @@ def test_skip_functions_callable():
f2('a')
except DidntSkipException:
pass
- except nose.SkipTest:
+ except SkipTest:
raise Exception('Skipped when not expected to')
@@ -93,7 +93,7 @@ def test_skip_generators_hardcoded():
try:
for j in g1(10):
pass
- except KnownFailureTest:
+ except KnownFailureException:
pass
else:
raise Exception('Failed to mark as known failure')
@@ -107,7 +107,7 @@ def test_skip_generators_hardcoded():
try:
for j in g2(10):
pass
- except KnownFailureTest:
+ except KnownFailureException:
raise Exception('Marked incorretly as known failure')
except DidntSkipException:
pass
@@ -126,7 +126,7 @@ def test_skip_generators_callable():
skip_flag = 'skip me!'
for j in g1(10):
pass
- except KnownFailureTest:
+ except KnownFailureException:
pass
else:
raise Exception('Failed to mark as known failure')
@@ -141,7 +141,7 @@ def test_skip_generators_callable():
skip_flag = 'do not skip'
for j in g2(10):
pass
- except KnownFailureTest:
+ except KnownFailureException:
raise Exception('Marked incorretly as known failure')
except DidntSkipException:
pass
diff --git a/numpy/testing/tests/test_utils.py b/numpy/testing/tests/test_utils.py
index a31fce4af..13aeffe02 100644
--- a/numpy/testing/tests/test_utils.py
+++ b/numpy/testing/tests/test_utils.py
@@ -9,7 +9,8 @@ from numpy.testing import (
assert_array_almost_equal, build_err_msg, raises, assert_raises,
assert_warns, assert_no_warnings, assert_allclose, assert_approx_equal,
assert_array_almost_equal_nulp, assert_array_max_ulp,
- clear_and_catch_warnings, run_module_suite
+ clear_and_catch_warnings, run_module_suite,
+ assert_string_equal
)
import unittest
@@ -715,6 +716,22 @@ class TestULP(unittest.TestCase):
lambda: assert_array_max_ulp(nan, nzero,
maxulp=maxulp))
+class TestStringEqual(unittest.TestCase):
+ def test_simple(self):
+ assert_string_equal("hello", "hello")
+ assert_string_equal("hello\nmultiline", "hello\nmultiline")
+
+ try:
+ assert_string_equal("foo\nbar", "hello\nbar")
+ except AssertionError as exc:
+ assert_equal(str(exc), "Differences in strings:\n- foo\n+ hello")
+ else:
+ raise AssertionError("exception not raised")
+
+ self.assertRaises(AssertionError,
+ lambda: assert_string_equal("foo", "hello"))
+
+
def assert_warn_len_equal(mod, n_in_context):
mod_warns = mod.__warningregistry__
# Python 3.4 appears to clear any pre-existing warnings of the same type,
diff --git a/numpy/testing/utils.py b/numpy/testing/utils.py
index c6d863f94..00f7ce4d1 100644
--- a/numpy/testing/utils.py
+++ b/numpy/testing/utils.py
@@ -13,6 +13,7 @@ from functools import partial
import shutil
import contextlib
from tempfile import mkdtemp
+
from .nosetester import import_nose
from numpy.core import float32, empty, arange, array_repr, ndarray
@@ -28,11 +29,27 @@ __all__ = ['assert_equal', 'assert_almost_equal', 'assert_approx_equal',
'raises', 'rand', 'rundocs', 'runstring', 'verbose', 'measure',
'assert_', 'assert_array_almost_equal_nulp', 'assert_raises_regex',
'assert_array_max_ulp', 'assert_warns', 'assert_no_warnings',
- 'assert_allclose', 'IgnoreException', 'clear_and_catch_warnings']
+ 'assert_allclose', 'IgnoreException', 'clear_and_catch_warnings',
+ 'SkipTest', 'KnownFailureException']
-verbose = 0
+class KnownFailureException(Exception):
+ '''Raise this exception to mark a test as a known failing test.'''
+ pass
+
+KnownFailureTest = KnownFailureException # backwards compat
+
+
+# nose.SkipTest is unittest.case.SkipTest
+# import it into the namespace, so that it's available as np.testing.SkipTest
+try:
+ from unittest.case import SkipTest
+except ImportError:
+ # on py2.6 unittest.case is not available. Ask nose for a replacement.
+ SkipTest = import_nose().SkipTest
+
+verbose = 0
def assert_(val, msg=''):
"""
@@ -1018,11 +1035,12 @@ def assert_string_equal(actual, desired):
if not d2.startswith('+ '):
raise AssertionError(repr(d2))
l.append(d2)
- d3 = diff.pop(0)
- if d3.startswith('? '):
- l.append(d3)
- else:
- diff.insert(0, d3)
+ if diff:
+ d3 = diff.pop(0)
+ if d3.startswith('? '):
+ l.append(d3)
+ else:
+ diff.insert(0, d3)
if re.match(r'\A'+d2[2:]+r'\Z', d1[2:]):
continue
diff_list.extend(l)
@@ -1102,6 +1120,18 @@ def assert_raises(*args,**kwargs):
deemed to have suffered an error, exactly as for an
unexpected exception.
+ Alternatively, `assert_raises` can be used as a context manager:
+
+ >>> from numpy.testing import assert_raises
+ >>> with assert_raises(ZeroDivisionError):
+ ... 1 / 0
+
+ is equivalent to
+
+ >>> def div(x, y):
+ ... return x / y
+ >>> assert_raises(ZeroDivisionError, div, 1, 0)
+
"""
__tracebackhide__ = True # Hide traceback for py.test
nose = import_nose()
diff --git a/numpy/tests/test_scripts.py b/numpy/tests/test_scripts.py
index c7bb125b3..552383d77 100644
--- a/numpy/tests/test_scripts.py
+++ b/numpy/tests/test_scripts.py
@@ -12,6 +12,7 @@ import numpy as np
from numpy.compat.py3k import basestring, asbytes
from nose.tools import assert_equal
from numpy.testing.decorators import skipif
+from numpy.testing import assert_
skipif_inplace = skipif(isfile(pathjoin(dirname(np.__file__), '..', 'setup.py')))
@@ -63,7 +64,18 @@ def test_f2py():
if sys.platform == 'win32':
f2py_cmd = r"%s\Scripts\f2py.py" % dirname(sys.executable)
code, stdout, stderr = run_command([sys.executable, f2py_cmd, '-v'])
+ assert_equal(stdout.strip(), asbytes('2'))
else:
- f2py_cmd = 'f2py' + basename(sys.executable)[6:]
- code, stdout, stderr = run_command([f2py_cmd, '-v'])
- assert_equal(stdout.strip(), asbytes('2'))
+ # unclear what f2py cmd was installed as, check plain (f2py) and
+ # current python version specific one (f2py3.4)
+ f2py_cmds = ['f2py', 'f2py' + basename(sys.executable)[6:]]
+ success = False
+ for f2py_cmd in f2py_cmds:
+ try:
+ code, stdout, stderr = run_command([f2py_cmd, '-v'])
+ assert_equal(stdout.strip(), asbytes('2'))
+ success = True
+ break
+ except FileNotFoundError:
+ pass
+ assert_(success, "wasn't able to find f2py or %s on commandline" % f2py_cmds[1])
diff --git a/pavement.py b/pavement.py
index f4b1b2b16..ef6c6af52 100644
--- a/pavement.py
+++ b/pavement.py
@@ -54,7 +54,7 @@ TODO
- fix bdist_mpkg: we build the same source twice -> how to make sure we use
the same underlying python for egg install in venv and for bdist_mpkg
"""
-from __future__ import division, absolute_import, print_function
+from __future__ import division, print_function
# What need to be installed to build everything on mac os x:
# - wine: python 2.6 and 2.5 + makensis + cpuid plugin + mingw, all in the PATH
diff --git a/runtests.py b/runtests.py
index 9376ae55f..957cbef10 100755
--- a/runtests.py
+++ b/runtests.py
@@ -24,6 +24,7 @@ Generate C code coverage listing under build/lcov/:
$ python runtests.py --lcov-html
"""
+from __future__ import division, print_function
#
# This is a generic test runner script for projects using Numpy's test
diff --git a/tools/swig/numpy.i b/tools/swig/numpy.i
index 2ddc11de7..67a519e6d 100644
--- a/tools/swig/numpy.i
+++ b/tools/swig/numpy.i
@@ -96,7 +96,7 @@
%#endif
%#define array_is_contiguous(a) (PyArray_ISCONTIGUOUS((PyArrayObject*)a))
%#define array_is_native(a) (PyArray_ISNOTSWAPPED((PyArrayObject*)a))
-%#define array_is_fortran(a) (PyArray_ISFORTRAN((PyArrayObject*)a))
+%#define array_is_fortran(a) (PyArray_IS_F_CONTIGUOUS((PyArrayObject*)a))
}
/**********************************************************************/
@@ -295,7 +295,7 @@
Py_INCREF(array_descr(ary));
result = (PyArrayObject*) PyArray_FromArray(ary,
array_descr(ary),
- NPY_FORTRANORDER);
+ NPY_ARRAY_F_CONTIGUOUS);
*is_new_object = 1;
}
return result;
diff --git a/tools/travis-test.sh b/tools/travis-test.sh
index b888a7eb0..795915d0b 100755
--- a/tools/travis-test.sh
+++ b/tools/travis-test.sh
@@ -10,6 +10,8 @@ if [ -r /usr/lib/libeatmydata/libeatmydata.so ]; then
export LD_PRELOAD=/usr/lib/libeatmydata/libeatmydata.so
fi
+# make some warnings fatal, mostly to match windows compilers
+werrors="-Werror=declaration-after-statement -Werror=vla -Werror=nonnull"
setup_base()
{
@@ -27,16 +29,14 @@ if [ -z "$USE_DEBUG" ]; then
$PIP install .
else
sysflags="$($PYTHON -c "from distutils import sysconfig; print (sysconfig.get_config_var('CFLAGS'))")"
- # windows compilers have this requirement
- CFLAGS="$sysflags -Werror=declaration-after-statement -Werror=nonnull -Wlogical-op" $PIP install . 2>&1 | tee log
+ CFLAGS="$sysflags $werrors -Wlogical-op" $PIP install . 2>&1 | tee log
grep -v "_configtest" log | grep -vE "ld returned 1|no previously-included files matching" | grep -E "warning\>";
# accept a mysterious memset warning that shows with -flto
test $(grep -v "_configtest" log | grep -vE "ld returned 1|no previously-included files matching" | grep -E "warning\>" -c) -lt 2;
fi
else
sysflags="$($PYTHON -c "from distutils import sysconfig; print (sysconfig.get_config_var('CFLAGS'))")"
- # windows compilers have this requirement
- CFLAGS="$sysflags -Werror=declaration-after-statement -Werror=nonnull" $PYTHON setup.py build_ext --inplace
+ CFLAGS="$sysflags $werrors" $PYTHON setup.py build_ext --inplace
fi
}
@@ -100,7 +100,7 @@ if [ -n "$USE_WHEEL" ] && [ $# -eq 0 ]; then
. venv-for-wheel/bin/activate
# Move out of source directory to avoid finding local numpy
pushd dist
- $PIP install --pre --upgrade --find-links . numpy
+ $PIP install --pre --no-index --upgrade --find-links=. numpy
$PIP install nose
popd
run_test
diff --git a/tools/win32build/build-cpucaps.py b/tools/win32build/build-cpucaps.py
index d6a9dabc2..0c0a32dc5 100644
--- a/tools/win32build/build-cpucaps.py
+++ b/tools/win32build/build-cpucaps.py
@@ -1,3 +1,5 @@
+from __future__ import division, print_function
+
import os
import subprocess
# build cpucaps.dll