summaryrefslogtreecommitdiff
path: root/numpy
diff options
context:
space:
mode:
Diffstat (limited to 'numpy')
-rw-r--r--numpy/__init__.py6
-rw-r--r--numpy/_globals.py2
-rw-r--r--numpy/_import_tools.py3
-rw-r--r--numpy/add_newdocs.py82
-rw-r--r--numpy/compat/tests/__init__.py0
-rw-r--r--numpy/compat/tests/test_compat.py2
-rw-r--r--numpy/conftest.py54
-rw-r--r--numpy/core/__init__.py2
-rw-r--r--numpy/core/_internal.py21
-rw-r--r--numpy/core/arrayprint.py271
-rw-r--r--numpy/core/code_generators/cversions.txt1
-rw-r--r--numpy/core/code_generators/genapi.py7
-rw-r--r--numpy/core/code_generators/generate_numpy_api.py9
-rw-r--r--numpy/core/code_generators/generate_umath.py4
-rw-r--r--numpy/core/code_generators/ufunc_docstrings.py42
-rw-r--r--numpy/core/einsumfunc.py165
-rw-r--r--numpy/core/fromnumeric.py32
-rw-r--r--numpy/core/function_base.py10
-rw-r--r--numpy/core/getlimits.py42
-rw-r--r--numpy/core/include/numpy/npy_cpu.h6
-rw-r--r--numpy/core/include/numpy/npy_endian.h6
-rw-r--r--numpy/core/include/numpy/numpyconfig.h2
-rw-r--r--numpy/core/numeric.py327
-rw-r--r--numpy/core/numerictypes.py112
-rw-r--r--numpy/core/records.py8
-rw-r--r--numpy/core/setup.py8
-rw-r--r--numpy/core/setup_common.py1
-rw-r--r--numpy/core/shape_base.py14
-rw-r--r--numpy/core/src/multiarray/_datetime.h3
-rw-r--r--numpy/core/src/multiarray/alloc.c11
-rw-r--r--numpy/core/src/multiarray/alloc.h12
-rw-r--r--numpy/core/src/multiarray/array_assign_array.c3
-rw-r--r--numpy/core/src/multiarray/arrayobject.c199
-rw-r--r--numpy/core/src/multiarray/arraytypes.c.src272
-rw-r--r--numpy/core/src/multiarray/cblasfuncs.c3
-rw-r--r--numpy/core/src/multiarray/compiled_base.c22
-rw-r--r--numpy/core/src/multiarray/conversion_utils.c5
-rw-r--r--numpy/core/src/multiarray/convert.c9
-rw-r--r--numpy/core/src/multiarray/ctors.c12
-rw-r--r--numpy/core/src/multiarray/datetime.c94
-rw-r--r--numpy/core/src/multiarray/datetime_busdaycal.c3
-rw-r--r--numpy/core/src/multiarray/descriptor.c30
-rw-r--r--numpy/core/src/multiarray/descriptor.h4
-rw-r--r--numpy/core/src/multiarray/dtype_transfer.c411
-rw-r--r--numpy/core/src/multiarray/einsum.c.src12
-rw-r--r--numpy/core/src/multiarray/getset.c133
-rw-r--r--numpy/core/src/multiarray/item_selection.c39
-rw-r--r--numpy/core/src/multiarray/iterators.c68
-rw-r--r--numpy/core/src/multiarray/mapping.c366
-rw-r--r--numpy/core/src/multiarray/methods.c60
-rw-r--r--numpy/core/src/multiarray/multiarray_tests.c.src156
-rw-r--r--numpy/core/src/multiarray/multiarraymodule.c306
-rw-r--r--numpy/core/src/multiarray/nditer_pywrap.c20
-rw-r--r--numpy/core/src/multiarray/number.c65
-rw-r--r--numpy/core/src/multiarray/number.h1
-rw-r--r--numpy/core/src/multiarray/scalarapi.c2
-rw-r--r--numpy/core/src/multiarray/scalartypes.c.src145
-rw-r--r--numpy/core/src/multiarray/shape.c46
-rw-r--r--numpy/core/src/multiarray/strfuncs.c200
-rw-r--r--numpy/core/src/multiarray/strfuncs.h13
-rw-r--r--numpy/core/src/multiarray/temp_elide.c8
-rw-r--r--numpy/core/src/private/mem_overlap.c7
-rw-r--r--numpy/core/src/private/npy_config.h13
-rw-r--r--numpy/core/src/private/ufunc_override.c7
-rw-r--r--numpy/core/src/umath/extobj.c318
-rw-r--r--numpy/core/src/umath/extobj.h32
-rw-r--r--numpy/core/src/umath/loops.c.src59
-rw-r--r--numpy/core/src/umath/loops.h.src3
-rw-r--r--numpy/core/src/umath/override.c130
-rw-r--r--numpy/core/src/umath/reduction.c15
-rw-r--r--numpy/core/src/umath/reduction.h4
-rw-r--r--numpy/core/src/umath/test_rational.c.src9
-rw-r--r--numpy/core/src/umath/ufunc_object.c419
-rw-r--r--numpy/core/src/umath/ufunc_object.h3
-rw-r--r--numpy/core/src/umath/ufunc_type_resolution.c141
-rw-r--r--numpy/core/src/umath/ufunc_type_resolution.h9
-rw-r--r--numpy/core/src/umath/umath_tests.c.src6
-rw-r--r--numpy/core/tests/__init__.py0
-rw-r--r--numpy/core/tests/test_abc.py29
-rw-r--r--numpy/core/tests/test_arrayprint.py26
-rw-r--r--numpy/core/tests/test_datetime.py24
-rw-r--r--numpy/core/tests/test_defchararray.py76
-rw-r--r--numpy/core/tests/test_deprecations.py36
-rw-r--r--numpy/core/tests/test_dtype.py174
-rw-r--r--numpy/core/tests/test_einsum.py57
-rw-r--r--numpy/core/tests/test_errstate.py4
-rw-r--r--numpy/core/tests/test_extint128.py2
-rw-r--r--numpy/core/tests/test_function_base.py8
-rw-r--r--numpy/core/tests/test_getlimits.py24
-rw-r--r--numpy/core/tests/test_half.py9
-rw-r--r--numpy/core/tests/test_indexerrors.py4
-rw-r--r--numpy/core/tests/test_indexing.py40
-rw-r--r--numpy/core/tests/test_item_selection.py6
-rw-r--r--numpy/core/tests/test_longdouble.py6
-rw-r--r--numpy/core/tests/test_machar.py13
-rw-r--r--numpy/core/tests/test_memmap.py24
-rw-r--r--numpy/core/tests/test_multiarray.py922
-rw-r--r--numpy/core/tests/test_nditer.py107
-rw-r--r--numpy/core/tests/test_numeric.py286
-rw-r--r--numpy/core/tests/test_numerictypes.py136
-rw-r--r--numpy/core/tests/test_print.py6
-rw-r--r--numpy/core/tests/test_records.py33
-rw-r--r--numpy/core/tests/test_regression.py371
-rw-r--r--numpy/core/tests/test_scalarinherit.py40
-rw-r--r--numpy/core/tests/test_scalarmath.py54
-rw-r--r--numpy/core/tests/test_scalarprint.py4
-rw-r--r--numpy/core/tests/test_shape_base.py54
-rw-r--r--numpy/core/tests/test_ufunc.py148
-rw-r--r--numpy/core/tests/test_umath.py330
-rw-r--r--numpy/core/tests/test_umath_complex.py100
-rw-r--r--numpy/core/tests/test_unicode.py110
-rw-r--r--numpy/ctypeslib.py2
-rw-r--r--numpy/distutils/ccompiler.py22
-rw-r--r--numpy/distutils/command/build_clib.py102
-rw-r--r--numpy/distutils/command/build_ext.py172
-rw-r--r--numpy/distutils/command/config.py2
-rw-r--r--numpy/distutils/cpuinfo.py8
-rw-r--r--numpy/distutils/fcompiler/__init__.py37
-rw-r--r--numpy/distutils/fcompiler/gnu.py196
-rw-r--r--numpy/distutils/fcompiler/intel.py6
-rw-r--r--numpy/distutils/intelccompiler.py4
-rw-r--r--numpy/distutils/mingw32ccompiler.py22
-rw-r--r--numpy/distutils/misc_util.py51
-rw-r--r--numpy/distutils/msvc9compiler.py10
-rw-r--r--numpy/distutils/system_info.py65
-rw-r--r--numpy/distutils/tests/__init__.py0
-rw-r--r--numpy/distutils/tests/test_exec_command.py84
-rw-r--r--numpy/distutils/tests/test_fcompiler_gnu.py9
-rw-r--r--numpy/distutils/tests/test_fcompiler_intel.py6
-rw-r--r--numpy/distutils/tests/test_misc_util.py10
-rw-r--r--numpy/distutils/tests/test_npy_pkg_config.py42
-rw-r--r--numpy/distutils/tests/test_system_info.py35
-rw-r--r--numpy/doc/basics.py70
-rw-r--r--numpy/doc/creation.py2
-rw-r--r--numpy/doc/glossary.py17
-rw-r--r--numpy/doc/indexing.py2
-rw-r--r--numpy/doc/misc.py3
-rw-r--r--numpy/doc/subclassing.py4
-rw-r--r--numpy/f2py/__init__.py2
-rw-r--r--numpy/f2py/auxfuncs.py2
-rw-r--r--numpy/f2py/capi_maps.py4
-rw-r--r--numpy/f2py/cfuncs.py1000
-rwxr-xr-xnumpy/f2py/crackfortran.py66
-rw-r--r--numpy/f2py/f2py_testing.py2
-rw-r--r--numpy/f2py/src/fortranobject.c119
-rw-r--r--numpy/f2py/tests/__init__.py0
-rw-r--r--numpy/f2py/tests/src/common/block.f11
-rw-r--r--numpy/f2py/tests/test_array_from_pyobj.py16
-rw-r--r--numpy/f2py/tests/test_assumed_shape.py2
-rw-r--r--numpy/f2py/tests/test_block_docstring.py23
-rw-r--r--numpy/f2py/tests/test_callback.py2
-rw-r--r--numpy/f2py/tests/test_common.py26
-rw-r--r--numpy/f2py/tests/test_kind.py2
-rw-r--r--numpy/f2py/tests/test_mixed.py2
-rw-r--r--numpy/f2py/tests/test_parameter.py2
-rw-r--r--numpy/f2py/tests/test_regression.py2
-rw-r--r--numpy/f2py/tests/test_return_character.py2
-rw-r--r--numpy/f2py/tests/test_return_complex.py2
-rw-r--r--numpy/f2py/tests/test_return_integer.py2
-rw-r--r--numpy/f2py/tests/test_return_logical.py2
-rw-r--r--numpy/f2py/tests/test_return_real.py2
-rw-r--r--numpy/f2py/tests/test_size.py11
-rw-r--r--numpy/f2py/tests/test_string.py2
-rw-r--r--numpy/f2py/tests/util.py2
-rw-r--r--numpy/fft/__init__.py2
-rw-r--r--numpy/fft/tests/__init__.py0
-rw-r--r--numpy/fft/tests/test_fftpack.py14
-rw-r--r--numpy/fft/tests/test_helper.py34
-rw-r--r--numpy/lib/__init__.py2
-rw-r--r--numpy/lib/_iotools.py2
-rw-r--r--numpy/lib/arraypad.py30
-rw-r--r--numpy/lib/arraysetops.py13
-rw-r--r--numpy/lib/format.py51
-rw-r--r--numpy/lib/function_base.py170
-rw-r--r--numpy/lib/index_tricks.py4
-rw-r--r--numpy/lib/nanfunctions.py107
-rw-r--r--numpy/lib/npyio.py29
-rw-r--r--numpy/lib/recfunctions.py165
-rw-r--r--numpy/lib/shape_base.py25
-rw-r--r--numpy/lib/stride_tricks.py7
-rw-r--r--numpy/lib/tests/__init__.py0
-rw-r--r--numpy/lib/tests/test__datasource.py70
-rw-r--r--numpy/lib/tests/test__iotools.py13
-rw-r--r--numpy/lib/tests/test_arraypad.py48
-rw-r--r--numpy/lib/tests/test_arraysetops.py26
-rw-r--r--numpy/lib/tests/test_financial.py6
-rw-r--r--numpy/lib/tests/test_format.py9
-rw-r--r--numpy/lib/tests/test_function_base.py268
-rw-r--r--numpy/lib/tests/test_index_tricks.py20
-rw-r--r--numpy/lib/tests/test_io.py82
-rw-r--r--numpy/lib/tests/test_mixins.py5
-rw-r--r--numpy/lib/tests/test_nanfunctions.py18
-rw-r--r--numpy/lib/tests/test_polynomial.py4
-rw-r--r--numpy/lib/tests/test_recfunctions.py139
-rw-r--r--numpy/lib/tests/test_regression.py71
-rw-r--r--numpy/lib/tests/test_shape_base.py73
-rw-r--r--numpy/lib/tests/test_stride_tricks.py8
-rw-r--r--numpy/lib/tests/test_twodim_base.py121
-rw-r--r--numpy/lib/tests/test_type_check.py46
-rw-r--r--numpy/lib/tests/test_ufunclike.py5
-rw-r--r--numpy/lib/twodim_base.py7
-rw-r--r--numpy/lib/type_check.py40
-rw-r--r--numpy/lib/utils.py6
-rw-r--r--numpy/linalg/__init__.py2
-rw-r--r--numpy/linalg/linalg.py169
-rw-r--r--numpy/linalg/tests/__init__.py0
-rw-r--r--numpy/linalg/tests/test_build.py6
-rw-r--r--numpy/linalg/tests/test_linalg.py44
-rw-r--r--numpy/linalg/tests/test_regression.py59
-rw-r--r--numpy/ma/__init__.py2
-rw-r--r--numpy/ma/core.py318
-rw-r--r--numpy/ma/extras.py8
-rw-r--r--numpy/ma/mrecords.py6
-rw-r--r--numpy/ma/tests/__init__.py0
-rw-r--r--numpy/ma/tests/test_core.py613
-rw-r--r--numpy/ma/tests/test_deprecations.py6
-rw-r--r--numpy/ma/tests/test_extras.py73
-rw-r--r--numpy/ma/tests/test_mrecords.py87
-rw-r--r--numpy/ma/tests/test_old_ma.py578
-rw-r--r--numpy/ma/tests/test_regression.py21
-rw-r--r--numpy/ma/tests/test_subclassing.py112
-rw-r--r--numpy/ma/testutils.py12
-rw-r--r--numpy/ma/timer_comparison.py2
-rw-r--r--numpy/matrixlib/__init__.py2
-rw-r--r--numpy/matrixlib/defmatrix.py6
-rw-r--r--numpy/matrixlib/tests/__init__.py0
-rw-r--r--numpy/matrixlib/tests/test_defmatrix.py35
-rw-r--r--numpy/matrixlib/tests/test_multiarray.py4
-rw-r--r--numpy/matrixlib/tests/test_numeric.py4
-rw-r--r--numpy/matrixlib/tests/test_regression.py21
-rw-r--r--numpy/polynomial/__init__.py2
-rw-r--r--numpy/polynomial/_polybase.py34
-rw-r--r--numpy/polynomial/chebyshev.py149
-rw-r--r--numpy/polynomial/hermite.py34
-rw-r--r--numpy/polynomial/hermite_e.py34
-rw-r--r--numpy/polynomial/laguerre.py34
-rw-r--r--numpy/polynomial/legendre.py40
-rw-r--r--numpy/polynomial/polynomial.py16
-rw-r--r--numpy/polynomial/polyutils.py4
-rw-r--r--numpy/polynomial/tests/__init__.py0
-rw-r--r--numpy/polynomial/tests/test_chebyshev.py52
-rw-r--r--numpy/polynomial/tests/test_classes.py25
-rw-r--r--numpy/polynomial/tests/test_hermite.py25
-rw-r--r--numpy/polynomial/tests/test_hermite_e.py25
-rw-r--r--numpy/polynomial/tests/test_laguerre.py25
-rw-r--r--numpy/polynomial/tests/test_legendre.py25
-rw-r--r--numpy/polynomial/tests/test_polynomial.py21
-rw-r--r--numpy/polynomial/tests/test_polyutils.py9
-rw-r--r--numpy/polynomial/tests/test_printing.py54
-rw-r--r--numpy/random/__init__.py2
-rw-r--r--numpy/random/mtrand/distributions.c4
-rw-r--r--numpy/random/mtrand/mtrand.pyx59
-rw-r--r--numpy/random/mtrand/numpy.pxd1
-rw-r--r--numpy/random/mtrand/randomkit.c26
-rw-r--r--numpy/random/tests/__init__.py0
-rw-r--r--numpy/random/tests/test_random.py88
-rw-r--r--numpy/random/tests/test_regression.py7
-rw-r--r--numpy/testing/__init__.py4
-rw-r--r--numpy/testing/decorators.py265
-rw-r--r--numpy/testing/nose_tools/__init__.py0
-rw-r--r--numpy/testing/nose_tools/decorators.py282
-rw-r--r--numpy/testing/nose_tools/noseclasses.py366
-rw-r--r--numpy/testing/nose_tools/nosetester.py560
-rw-r--r--numpy/testing/nose_tools/parameterized.py489
-rw-r--r--numpy/testing/nose_tools/utils.py2229
-rw-r--r--numpy/testing/noseclasses.py344
-rw-r--r--numpy/testing/nosetester.py525
-rwxr-xr-xnumpy/testing/setup.py1
-rw-r--r--numpy/testing/tests/__init__.py0
-rw-r--r--numpy/testing/tests/test_decorators.py14
-rw-r--r--numpy/testing/tests/test_utils.py6
-rw-r--r--numpy/testing/utils.py2218
-rw-r--r--numpy/tests/__init__.py0
-rw-r--r--numpy/tests/test_ctypeslib.py46
-rw-r--r--numpy/tests/test_matlib.py2
-rw-r--r--numpy/tests/test_scripts.py7
-rw-r--r--numpy/tests/test_warnings.py6
277 files changed, 12915 insertions, 9875 deletions
diff --git a/numpy/__init__.py b/numpy/__init__.py
index 0f1bcf766..db99294bc 100644
--- a/numpy/__init__.py
+++ b/numpy/__init__.py
@@ -148,9 +148,9 @@ else:
# We don't actually use this ourselves anymore, but I'm not 100% sure that
# no-one else in the world is using it (though I hope not)
- from .testing import Tester
- test = testing.nosetester._numpy_tester().test
- bench = testing.nosetester._numpy_tester().bench
+ from .testing import Tester, _numpy_tester
+ test = _numpy_tester().test
+ bench = _numpy_tester().bench
# Allow distributors to run custom init code
from . import _distributor_init
diff --git a/numpy/_globals.py b/numpy/_globals.py
index 64a84da96..2d7b69bc4 100644
--- a/numpy/_globals.py
+++ b/numpy/_globals.py
@@ -53,7 +53,7 @@ class VisibleDeprecationWarning(UserWarning):
pass
-class _NoValue:
+class _NoValue(object):
"""Special keyword value.
This class may be used as the default value assigned to a deprecated
diff --git a/numpy/_import_tools.py b/numpy/_import_tools.py
index 18ac78d29..cb8bc477c 100644
--- a/numpy/_import_tools.py
+++ b/numpy/_import_tools.py
@@ -303,8 +303,7 @@ class PackageLoader(object):
lines.append(line)
line = tab
line += ' ' + word
- else:
- lines.append(line)
+ lines.append(line)
return '\n'.join(lines)
def get_pkgdocs(self):
diff --git a/numpy/add_newdocs.py b/numpy/add_newdocs.py
index 449196efb..307a8d837 100644
--- a/numpy/add_newdocs.py
+++ b/numpy/add_newdocs.py
@@ -931,7 +931,7 @@ add_newdoc('numpy.core.multiarray', 'zeros',
>>> np.zeros(5)
array([ 0., 0., 0., 0., 0.])
- >>> np.zeros((5,), dtype=np.int)
+ >>> np.zeros((5,), dtype=int)
array([0, 0, 0, 0, 0])
>>> np.zeros((2, 1))
@@ -1038,7 +1038,7 @@ add_newdoc('numpy.core.multiarray', 'fromiter',
Examples
--------
>>> iterable = (x*x for x in range(5))
- >>> np.fromiter(iterable, np.float)
+ >>> np.fromiter(iterable, float)
array([ 0., 1., 4., 9., 16.])
""")
@@ -1158,7 +1158,7 @@ add_newdoc('numpy.core.multiarray', 'frombuffer',
add_newdoc('numpy.core.multiarray', 'concatenate',
"""
- concatenate((a1, a2, ...), axis=0)
+ concatenate((a1, a2, ...), axis=0, out=None)
Join a sequence of arrays along an existing axis.
@@ -1169,6 +1169,10 @@ add_newdoc('numpy.core.multiarray', 'concatenate',
corresponding to `axis` (the first, by default).
axis : int, optional
The axis along which the arrays will be joined. Default is 0.
+ out : ndarray, optional
+ If provided, the destination to place the result. The shape must be
+ correct, matching that of what concatenate would have returned if no
+ out argument were specified.
Returns
-------
@@ -1338,7 +1342,8 @@ add_newdoc('numpy.core.multiarray', 'arange',
step : number, optional
Spacing between values. For any output `out`, this is the distance
between two adjacent values, ``out[i+1] - out[i]``. The default
- step size is 1. If `step` is specified, `start` must also be given.
+ step size is 1. If `step` is specified as a position argument,
+ `start` must also be given.
dtype : dtype
The type of the output array. If `dtype` is not given, infer the data
type from the other input arguments.
@@ -1589,7 +1594,7 @@ add_newdoc('numpy.core.multiarray', 'lexsort',
add_newdoc('numpy.core.multiarray', 'can_cast',
"""
- can_cast(from, totype, casting = 'safe')
+ can_cast(from_, to, casting='safe')
Returns True if cast between data types can occur according to the
casting rule. If from is a scalar or array scalar, also returns
@@ -1598,9 +1603,9 @@ add_newdoc('numpy.core.multiarray', 'can_cast',
Parameters
----------
- from : dtype, dtype specifier, scalar, or array
+ from_ : dtype, dtype specifier, scalar, or array
Data type, scalar, or array to cast from.
- totype : dtype or dtype specifier
+ to : dtype or dtype specifier
Data type to cast to.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur.
@@ -1635,9 +1640,9 @@ add_newdoc('numpy.core.multiarray', 'can_cast',
>>> np.can_cast(np.int32, np.int64)
True
- >>> np.can_cast(np.float64, np.complex)
+ >>> np.can_cast(np.float64, complex)
True
- >>> np.can_cast(np.complex, np.float)
+ >>> np.can_cast(complex, float)
False
>>> np.can_cast('i8', 'f8')
@@ -3096,7 +3101,7 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('__copy__',
add_newdoc('numpy.core.multiarray', 'ndarray', ('__deepcopy__',
- """a.__deepcopy__() -> Deep copy of array.
+ """a.__deepcopy__(memo, /) -> Deep copy of array.
Used if copy.deepcopy is called on an array.
@@ -3292,7 +3297,7 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('astype',
add_newdoc('numpy.core.multiarray', 'ndarray', ('byteswap',
"""
- a.byteswap(inplace)
+ a.byteswap(inplace=False)
Swap the bytes of the array elements
@@ -3315,7 +3320,7 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('byteswap',
>>> A = np.array([1, 256, 8755], dtype=np.int16)
>>> map(hex, A)
['0x1', '0x100', '0x2233']
- >>> A.byteswap(True)
+ >>> A.byteswap(inplace=True)
array([ 256, 1, 13090], dtype=int16)
>>> map(hex, A)
['0x100', '0x1', '0x3322']
@@ -3418,7 +3423,7 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('copy',
Controls the memory layout of the copy. 'C' means C-order,
'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous,
'C' otherwise. 'K' means match the layout of `a` as closely
- as possible. (Note that this function and :func:numpy.copy are very
+ as possible. (Note that this function and :func:`numpy.copy` are very
similar, but have different default values for their order=
arguments.)
@@ -3764,7 +3769,7 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('itemset',
add_newdoc('numpy.core.multiarray', 'ndarray', ('max',
"""
- a.max(axis=None, out=None)
+ a.max(axis=None, out=None, keepdims=False)
Return the maximum along a given axis.
@@ -5141,7 +5146,7 @@ add_newdoc('numpy.core.multiarray', 'bincount',
------
ValueError
If the input is not 1-dimensional, or contains elements with negative
- values, or if `minlength` is non-positive.
+ values, or if `minlength` is negative.
TypeError
If the type of the input is float or complex.
@@ -5163,7 +5168,7 @@ add_newdoc('numpy.core.multiarray', 'bincount',
The input array needs to be of integer dtype, otherwise a
TypeError is raised:
- >>> np.bincount(np.arange(5, dtype=np.float))
+ >>> np.bincount(np.arange(5, dtype=float))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: array cannot be safely cast to required type
@@ -5444,9 +5449,11 @@ add_newdoc('numpy.core', 'ufunc',
----------
*x : array_like
Input arrays.
- out : ndarray or tuple of ndarray, optional
+ out : ndarray, None, or tuple of ndarray and None, optional
Alternate array object(s) in which to put the result; if provided, it
- must have a shape that the inputs broadcast to.
+ must have a shape that the inputs broadcast to. A tuple of arrays
+ (possible only as a keyword argument) must have length equal to the
+ number of outputs; use `None` for outputs to be allocated by the ufunc.
where : array_like, optional
Values of True indicate to calculate the ufunc at that position, values
of False indicate to leave the value in the output alone.
@@ -5667,9 +5674,14 @@ add_newdoc('numpy.core', 'ufunc', ('reduce',
The type used to represent the intermediate results. Defaults
to the data-type of the output array if this is provided, or
the data-type of the input array if no output array is provided.
- out : ndarray, optional
- A location into which the result is stored. If not provided, a
- freshly-allocated array is returned.
+ out : ndarray, None, or tuple of ndarray and None, optional
+ A location into which the result is stored. If not provided or `None`,
+ a freshly-allocated array is returned. For consistency with
+ :ref:`ufunc.__call__`, if given as a keyword, this may be wrapped in a
+ 1-element tuple.
+
+ .. versionchanged:: 1.13.0
+ Tuples are allowed for keyword argument.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
@@ -5712,7 +5724,7 @@ add_newdoc('numpy.core', 'ufunc', ('reduce',
add_newdoc('numpy.core', 'ufunc', ('accumulate',
"""
- accumulate(array, axis=0, dtype=None, out=None, keepdims=None)
+ accumulate(array, axis=0, dtype=None, out=None)
Accumulate the result of applying the operator to all elements.
@@ -5741,11 +5753,14 @@ add_newdoc('numpy.core', 'ufunc', ('accumulate',
The data-type used to represent the intermediate results. Defaults
to the data-type of the output array if such is provided, or the
the data-type of the input array if no output array is provided.
- out : ndarray, optional
- A location into which the result is stored. If not provided a
- freshly-allocated array is returned.
- keepdims : bool
- Has no effect. Deprecated, and will be removed in future.
+ out : ndarray, None, or tuple of ndarray and None, optional
+ A location into which the result is stored. If not provided or `None`,
+ a freshly-allocated array is returned. For consistency with
+ :ref:`ufunc.__call__`, if given as a keyword, this may be wrapped in a
+ 1-element tuple.
+
+ .. versionchanged:: 1.13.0
+ Tuples are allowed for keyword argument.
Returns
-------
@@ -5820,9 +5835,14 @@ add_newdoc('numpy.core', 'ufunc', ('reduceat',
The type used to represent the intermediate results. Defaults
to the data type of the output array if this is provided, or
the data type of the input array if no output array is provided.
- out : ndarray, optional
- A location into which the result is stored. If not provided a
- freshly-allocated array is returned.
+ out : ndarray, None, or tuple of ndarray and None, optional
+ A location into which the result is stored. If not provided or `None`,
+ a freshly-allocated array is returned. For consistency with
+ :ref:`ufunc.__call__`, if given as a keyword, this may be wrapped in a
+ 1-element tuple.
+
+ .. versionchanged:: 1.13.0
+ Tuples are allowed for keyword argument.
Returns
-------
@@ -6084,7 +6104,7 @@ add_newdoc('numpy.core.multiarray', 'dtype',
Using tuples. ``int`` is a fixed type, 3 the field's shape. ``void``
is a flexible type, here of size 10:
- >>> np.dtype([('hello',(np.int,3)),('world',np.void,10)])
+ >>> np.dtype([('hello',(int,3)),('world',np.void,10)])
dtype([('hello', '<i4', 3), ('world', '|V10')])
Subdivide ``int16`` into 2 ``int8``'s, called x and y. 0 and 1 are
diff --git a/numpy/compat/tests/__init__.py b/numpy/compat/tests/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/numpy/compat/tests/__init__.py
diff --git a/numpy/compat/tests/test_compat.py b/numpy/compat/tests/test_compat.py
index 1ac24401a..b91971d38 100644
--- a/numpy/compat/tests/test_compat.py
+++ b/numpy/compat/tests/test_compat.py
@@ -4,7 +4,7 @@ from os.path import join
from numpy.compat import isfileobj
from numpy.testing import assert_, run_module_suite
-from numpy.testing.utils import tempdir
+from numpy.testing import tempdir
def test_isfileobj():
diff --git a/numpy/conftest.py b/numpy/conftest.py
new file mode 100644
index 000000000..ea4197049
--- /dev/null
+++ b/numpy/conftest.py
@@ -0,0 +1,54 @@
+"""
+Pytest configuration and fixtures for the Numpy test suite.
+"""
+from __future__ import division, absolute_import, print_function
+
+import warnings
+import pytest
+
+from numpy.core.multiarray_tests import get_fpu_mode
+
+
+_old_fpu_mode = None
+_collect_results = {}
+
+
+@pytest.hookimpl()
+def pytest_itemcollected(item):
+ """
+ Check FPU precision mode was not changed during test collection.
+
+ The clumsy way we do it here is mainly necessary because numpy
+ still uses yield tests, which can execute code at test collection
+ time.
+ """
+ global _old_fpu_mode
+
+ mode = get_fpu_mode()
+
+ if _old_fpu_mode is None:
+ _old_fpu_mode = mode
+ elif mode != _old_fpu_mode:
+ _collect_results[item] = (_old_fpu_mode, mode)
+ _old_fpu_mode = mode
+
+
+@pytest.fixture(scope="function", autouse=True)
+def check_fpu_mode(request):
+ """
+ Check FPU precision mode was not changed during the test.
+ """
+ old_mode = get_fpu_mode()
+ yield
+ new_mode = get_fpu_mode()
+
+ if old_mode != new_mode:
+ raise AssertionError("FPU precision mode changed from {0:#x} to {1:#x}"
+ " during the test".format(old_mode, new_mode))
+
+ collect_result = _collect_results.get(request.node)
+ if collect_result is not None:
+ old_mode, new_mode = collect_result
+ raise AssertionError("FPU precision mode changed from {0:#x} to {1:#x}"
+ " when collecting the test".format(old_mode,
+ new_mode))
diff --git a/numpy/core/__init__.py b/numpy/core/__init__.py
index b3a6967e1..5ad27fbe1 100644
--- a/numpy/core/__init__.py
+++ b/numpy/core/__init__.py
@@ -71,7 +71,7 @@ __all__ += shape_base.__all__
__all__ += einsumfunc.__all__
-from numpy.testing.nosetester import _numpy_tester
+from numpy.testing import _numpy_tester
test = _numpy_tester().test
bench = _numpy_tester().bench
diff --git a/numpy/core/_internal.py b/numpy/core/_internal.py
index 10fcbfdfe..004c2762b 100644
--- a/numpy/core/_internal.py
+++ b/numpy/core/_internal.py
@@ -206,6 +206,8 @@ class dummy_ctype(object):
return self._cls(other)
def __eq__(self, other):
return self._cls == other._cls
+ def __ne__(self, other):
+ return self._cls != other._cls
def _getintp_ctype():
val = _getintp_ctype.cache
@@ -281,20 +283,26 @@ class _ctypes(object):
_as_parameter_ = property(get_as_parameter, None, doc="_as parameter_")
-# Given a datatype and an order object
-# return a new names tuple
-# with the order indicated
def _newnames(datatype, order):
+ """
+ Given a datatype and an order object, return a new names tuple, with the
+ order indicated
+ """
oldnames = datatype.names
nameslist = list(oldnames)
if isinstance(order, str):
order = [order]
+ seen = set()
if isinstance(order, (list, tuple)):
for name in order:
try:
nameslist.remove(name)
except ValueError:
- raise ValueError("unknown field name: %s" % (name,))
+ if name in seen:
+ raise ValueError("duplicate field name: %s" % (name,))
+ else:
+ raise ValueError("unknown field name: %s" % (name,))
+ seen.add(name)
return tuple(list(order) + nameslist)
raise ValueError("unsupported order value: %s" % (order,))
@@ -695,10 +703,11 @@ def array_ufunc_errmsg_formatter(dummy, ufunc, method, *inputs, **kwargs):
for k, v in kwargs.items()])
args = inputs + kwargs.get('out', ())
types_string = ', '.join(repr(type(arg).__name__) for arg in args)
- return ('operand type(s) do not implement __array_ufunc__'
- '({!r}, {!r}, {}): {}'
+ return ('operand type(s) all returned NotImplemented from '
+ '__array_ufunc__({!r}, {!r}, {}): {}'
.format(ufunc, method, args_string, types_string))
+
def _ufunc_doc_signature_formatter(ufunc):
"""
Builds a signature string which resembles PEP 457
diff --git a/numpy/core/arrayprint.py b/numpy/core/arrayprint.py
index e54f4602a..46fbc9e5d 100644
--- a/numpy/core/arrayprint.py
+++ b/numpy/core/arrayprint.py
@@ -5,7 +5,8 @@ $Id: arrayprint.py,v 1.9 2005/09/13 13:58:44 teoliphant Exp $
"""
from __future__ import division, absolute_import, print_function
-__all__ = ["array2string", "set_printoptions", "get_printoptions"]
+__all__ = ["array2string", "array_str", "array_repr", "set_string_function",
+ "set_printoptions", "get_printoptions"]
__docformat__ = 'restructuredtext'
#
@@ -15,6 +16,13 @@ __docformat__ = 'restructuredtext'
# and by Perry Greenfield 2000-4-1 for numarray
# and by Travis Oliphant 2005-8-22 for numpy
+
+# Note: Both scalartypes.c.src and arrayprint.py implement strs for numpy
+# scalars but for different purposes. scalartypes.c.src has str/reprs for when
+# the scalar is printed on its own, while arrayprint.py has strs for when
+# scalars are printed inside an ndarray. Only the latter strs are currently
+# user-customizable.
+
import sys
import functools
if sys.version_info[0] >= 3:
@@ -28,12 +36,17 @@ else:
except ImportError:
from dummy_thread import get_ident
+import numpy as np
from . import numerictypes as _nt
from .umath import maximum, minimum, absolute, not_equal, isnan, isinf
+from . import multiarray
from .multiarray import (array, format_longfloat, datetime_as_string,
- datetime_data, dtype)
-from .fromnumeric import ravel
-from .numeric import asarray
+ datetime_data, dtype, ndarray)
+from .fromnumeric import ravel, any
+from .numeric import concatenate, asarray, errstate
+from .numerictypes import (longlong, intc, int_, float_, complex_, bool_,
+ flexible)
+import warnings
if sys.version_info[0] >= 3:
_MAXINT = sys.maxsize
@@ -218,10 +231,9 @@ def get_printoptions():
return d
def _leading_trailing(a):
- from . import numeric as _nc
if a.ndim == 1:
if len(a) > 2*_summaryEdgeItems:
- b = _nc.concatenate((a[:_summaryEdgeItems],
+ b = concatenate((a[:_summaryEdgeItems],
a[-_summaryEdgeItems:]))
else:
b = a
@@ -233,7 +245,7 @@ def _leading_trailing(a):
min(len(a), _summaryEdgeItems), 0, -1)])
else:
l = [_leading_trailing(a[i]) for i in range(0, len(a))]
- b = _nc.concatenate(tuple(l))
+ b = concatenate(tuple(l))
return b
def _boolFormatter(x):
@@ -399,7 +411,7 @@ def _recursive_guard(fillvalue='...'):
@_recursive_guard()
def array2string(a, max_line_width=None, precision=None,
suppress_small=None, separator=' ', prefix="",
- style=repr, formatter=None):
+ style=np._NoValue, formatter=None):
"""
Return a string representation of an array.
@@ -425,9 +437,10 @@ def array2string(a, max_line_width=None, precision=None,
The length of the prefix string is used to align the
output correctly.
- style : function, optional
- A function that accepts an ndarray and returns a string. Used only
- when the shape of `a` is equal to ``()``, i.e. for 0-D arrays.
+ style : _NoValue, optional
+ Has no effect, do not use.
+
+ .. deprecated:: 1.14.0
formatter : dict of callables, optional
If not None, the keys should indicate the type(s) that the respective
formatting function applies to. Callables should return a string.
@@ -494,6 +507,11 @@ def array2string(a, max_line_width=None, precision=None,
"""
+ # Deprecation 05-16-2017 v1.14
+ if style is not np._NoValue:
+ warnings.warn("'style' argument is deprecated and no longer functional",
+ DeprecationWarning, stacklevel=3)
+
if max_line_width is None:
max_line_width = _line_width
@@ -506,16 +524,7 @@ def array2string(a, max_line_width=None, precision=None,
if formatter is None:
formatter = _formatter
- if a.shape == ():
- x = a.item()
- if a.dtype.fields is not None:
- arr = array([x], dtype=a.dtype)
- format_function = _get_format_function(
- arr, precision, suppress_small, formatter)
- lst = format_function(arr[0])
- else:
- lst = style(x)
- elif functools.reduce(product, a.shape) == 0:
+ if a.size == 0:
# treat as a null array if any of shape elements == 0
lst = "[]"
else:
@@ -542,7 +551,7 @@ def _formatArray(a, format_function, rank, max_line_len,
"""
if rank == 0:
- raise ValueError("rank shouldn't be zero.")
+ return format_function(a[()]) + '\n'
if summary_insert and 2*edge_items < len(a):
leading_items = edge_items
@@ -615,9 +624,7 @@ class FloatFormat(object):
pass
def fillFormat(self, data):
- from . import numeric as _nc
-
- with _nc.errstate(all='ignore'):
+ with errstate(all='ignore'):
special = isnan(data) | isinf(data)
valid = not_equal(data, 0) & ~special
non_zero = absolute(data.compress(valid))
@@ -652,7 +659,7 @@ class FloatFormat(object):
precision = 0
precision = min(self.precision, precision)
self.max_str_len = len(str(int(max_val))) + precision + 2
- if _nc.any(special):
+ if any(special):
self.max_str_len = max(self.max_str_len,
len(_nan_str),
len(_inf_str)+1)
@@ -666,9 +673,7 @@ class FloatFormat(object):
self.format = format
def __call__(self, x, strip_zeros=True):
- from . import numeric as _nc
-
- with _nc.errstate(invalid='ignore'):
+ with errstate(invalid='ignore'):
if isnan(x):
if self.sign:
return self.special_fmt % ('+' + _nan_str,)
@@ -809,22 +814,21 @@ class DatetimeFormat(object):
class TimedeltaFormat(object):
def __init__(self, data):
- if data.dtype.kind == 'm':
- nat_value = array(['NaT'], dtype=data.dtype)[0]
- int_dtype = dtype(data.dtype.byteorder + 'i8')
- int_view = data.view(int_dtype)
- v = int_view[not_equal(int_view, nat_value.view(int_dtype))]
- if len(v) > 0:
- # Max str length of non-NaT elements
- max_str_len = max(len(str(maximum.reduce(v))),
- len(str(minimum.reduce(v))))
- else:
- max_str_len = 0
- if len(v) < len(data):
- # data contains a NaT
- max_str_len = max(max_str_len, 5)
- self.format = '%' + str(max_str_len) + 'd'
- self._nat = "'NaT'".rjust(max_str_len)
+ nat_value = array(['NaT'], dtype=data.dtype)[0]
+ int_dtype = dtype(data.dtype.byteorder + 'i8')
+ int_view = data.view(int_dtype)
+ v = int_view[not_equal(int_view, nat_value.view(int_dtype))]
+ if len(v) > 0:
+ # Max str length of non-NaT elements
+ max_str_len = max(len(str(maximum.reduce(v))),
+ len(str(minimum.reduce(v))))
+ else:
+ max_str_len = 0
+ if len(v) < len(data):
+ # data contains a NaT
+ max_str_len = max(max_str_len, 5)
+ self.format = '%' + str(max_str_len) + 'd'
+ self._nat = "'NaT'".rjust(max_str_len)
def __call__(self, x):
# TODO: After NAT == NAT deprecation should be simplified:
@@ -854,3 +858,180 @@ class StructureFormat(object):
for field, format_function in zip(x, self.format_functions):
s += format_function(field) + ", "
return (s[:-2] if 1 < self.num_fields else s[:-1]) + ")"
+
+
+_typelessdata = [int_, float_, complex_]
+if issubclass(intc, int):
+ _typelessdata.append(intc)
+if issubclass(longlong, int):
+ _typelessdata.append(longlong)
+
+def array_repr(arr, max_line_width=None, precision=None, suppress_small=None):
+ """
+ Return the string representation of an array.
+
+ Parameters
+ ----------
+ arr : ndarray
+ Input array.
+ max_line_width : int, optional
+ The maximum number of columns the string should span. Newline
+ characters split the string appropriately after array elements.
+ precision : int, optional
+ Floating point precision. Default is the current printing precision
+ (usually 8), which can be altered using `set_printoptions`.
+ suppress_small : bool, optional
+ Represent very small numbers as zero, default is False. Very small
+ is defined by `precision`, if the precision is 8 then
+ numbers smaller than 5e-9 are represented as zero.
+
+ Returns
+ -------
+ string : str
+ The string representation of an array.
+
+ See Also
+ --------
+ array_str, array2string, set_printoptions
+
+ Examples
+ --------
+ >>> np.array_repr(np.array([1,2]))
+ 'array([1, 2])'
+ >>> np.array_repr(np.ma.array([0.]))
+ 'MaskedArray([ 0.])'
+ >>> np.array_repr(np.array([], np.int32))
+ 'array([], dtype=int32)'
+
+ >>> x = np.array([1e-6, 4e-7, 2, 3])
+ >>> np.array_repr(x, precision=6, suppress_small=True)
+ 'array([ 0.000001, 0. , 2. , 3. ])'
+
+ """
+ if type(arr) is not ndarray:
+ class_name = type(arr).__name__
+ else:
+ class_name = "array"
+
+ if arr.size > 0 or arr.shape == (0,):
+ lst = array2string(arr, max_line_width, precision, suppress_small,
+ ', ', class_name + "(")
+ else: # show zero-length shape unless it is (0,)
+ lst = "[], shape=%s" % (repr(arr.shape),)
+
+ skipdtype = (arr.dtype.type in _typelessdata) and arr.size > 0
+
+ if skipdtype:
+ return "%s(%s)" % (class_name, lst)
+ else:
+ typename = arr.dtype.name
+ # Quote typename in the output if it is "complex".
+ if typename and not (typename[0].isalpha() and typename.isalnum()):
+ typename = "'%s'" % typename
+
+ lf = ' '
+ if issubclass(arr.dtype.type, flexible):
+ if arr.dtype.names:
+ typename = "%s" % str(arr.dtype)
+ else:
+ typename = "'%s'" % str(arr.dtype)
+ lf = '\n'+' '*len(class_name + "(")
+ return "%s(%s,%sdtype=%s)" % (class_name, lst, lf, typename)
+
+def array_str(a, max_line_width=None, precision=None, suppress_small=None):
+ """
+ Return a string representation of the data in an array.
+
+ The data in the array is returned as a single string. This function is
+ similar to `array_repr`, the difference being that `array_repr` also
+ returns information on the kind of array and its data type.
+
+ Parameters
+ ----------
+ a : ndarray
+ Input array.
+ max_line_width : int, optional
+ Inserts newlines if text is longer than `max_line_width`. The
+ default is, indirectly, 75.
+ precision : int, optional
+ Floating point precision. Default is the current printing precision
+ (usually 8), which can be altered using `set_printoptions`.
+ suppress_small : bool, optional
+ Represent numbers "very close" to zero as zero; default is False.
+ Very close is defined by precision: if the precision is 8, e.g.,
+ numbers smaller (in absolute value) than 5e-9 are represented as
+ zero.
+
+ See Also
+ --------
+ array2string, array_repr, set_printoptions
+
+ Examples
+ --------
+ >>> np.array_str(np.arange(3))
+ '[0 1 2]'
+
+ """
+ return array2string(a, max_line_width, precision, suppress_small, ' ', "")
+
+def set_string_function(f, repr=True):
+ """
+ Set a Python function to be used when pretty printing arrays.
+
+ Parameters
+ ----------
+ f : function or None
+ Function to be used to pretty print arrays. The function should expect
+ a single array argument and return a string of the representation of
+ the array. If None, the function is reset to the default NumPy function
+ to print arrays.
+ repr : bool, optional
+ If True (default), the function for pretty printing (``__repr__``)
+ is set, if False the function that returns the default string
+ representation (``__str__``) is set.
+
+ See Also
+ --------
+ set_printoptions, get_printoptions
+
+ Examples
+ --------
+ >>> def pprint(arr):
+ ... return 'HA! - What are you going to do now?'
+ ...
+ >>> np.set_string_function(pprint)
+ >>> a = np.arange(10)
+ >>> a
+ HA! - What are you going to do now?
+ >>> print(a)
+ [0 1 2 3 4 5 6 7 8 9]
+
+ We can reset the function to the default:
+
+ >>> np.set_string_function(None)
+ >>> a
+ array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
+
+ `repr` affects either pretty printing or normal string representation.
+ Note that ``__repr__`` is still affected by setting ``__str__``
+ because the width of each array element in the returned string becomes
+ equal to the length of the result of ``__str__()``.
+
+ >>> x = np.arange(4)
+ >>> np.set_string_function(lambda x:'random', repr=False)
+ >>> x.__str__()
+ 'random'
+ >>> x.__repr__()
+ 'array([ 0, 1, 2, 3])'
+
+ """
+ if f is None:
+ if repr:
+ return multiarray.set_string_function(array_repr, 1)
+ else:
+ return multiarray.set_string_function(array_str, 0)
+ else:
+ return multiarray.set_string_function(f, repr)
+
+set_string_function(array_str, 0)
+set_string_function(array_repr, 1)
diff --git a/numpy/core/code_generators/cversions.txt b/numpy/core/code_generators/cversions.txt
index 54140f24a..6e6547129 100644
--- a/numpy/core/code_generators/cversions.txt
+++ b/numpy/core/code_generators/cversions.txt
@@ -36,4 +36,5 @@
0x0000000a = 9b8bce614655d3eb02acddcb508203cb
# Version 11 (NumPy 1.13) Added PyArray_MapIterArrayCopyIfOverlap
+# Version 11 (NumPy 1.14) No Change
0x0000000b = edb1ba83730c650fd9bc5772a919cda7
diff --git a/numpy/core/code_generators/genapi.py b/numpy/core/code_generators/genapi.py
index b618dedf5..42c564a97 100644
--- a/numpy/core/code_generators/genapi.py
+++ b/numpy/core/code_generators/genapi.py
@@ -52,6 +52,7 @@ API_FILES = [join('multiarray', 'alloc.c'),
join('multiarray', 'scalarapi.c'),
join('multiarray', 'sequence.c'),
join('multiarray', 'shape.c'),
+ join('multiarray', 'strfuncs.c'),
join('multiarray', 'usertypes.c'),
join('umath', 'loops.c.src'),
join('umath', 'ufunc_object.c'),
@@ -71,7 +72,7 @@ def _repl(str):
return str.replace('Bool', 'npy_bool')
-class StealRef:
+class StealRef(object):
def __init__(self, arg):
self.arg = arg # counting from 1
@@ -82,7 +83,7 @@ class StealRef:
return 'NPY_STEALS_REF_TO_ARG(%d)' % self.arg
-class NonNull:
+class NonNull(object):
def __init__(self, arg):
self.arg = arg # counting from 1
@@ -271,7 +272,7 @@ def find_functions(filename, tag='API'):
state = SCANNING
else:
function_args.append(line)
- except:
+ except Exception:
print(filename, lineno + 1)
raise
fo.close()
diff --git a/numpy/core/code_generators/generate_numpy_api.py b/numpy/core/code_generators/generate_numpy_api.py
index 79d774a89..b4aeaa277 100644
--- a/numpy/core/code_generators/generate_numpy_api.py
+++ b/numpy/core/code_generators/generate_numpy_api.py
@@ -220,8 +220,13 @@ def do_generate_api(targets, sources):
multiarray_api_dict[name] = TypeApi(name, index, 'PyTypeObject', api_name)
if len(multiarray_api_dict) != len(multiarray_api_index):
- raise AssertionError("Multiarray API size mismatch %d %d" %
- (len(multiarray_api_dict), len(multiarray_api_index)))
+ keys_dict = set(multiarray_api_dict.keys())
+ keys_index = set(multiarray_api_index.keys())
+ raise AssertionError(
+ "Multiarray API size mismatch - "
+ "index has extra keys {}, dict has extra keys {}"
+ .format(keys_index - keys_dict, keys_dict - keys_index)
+ )
extension_list = []
for name, index in genapi.order_dict(multiarray_api_index):
diff --git a/numpy/core/code_generators/generate_umath.py b/numpy/core/code_generators/generate_umath.py
index 2241618f7..af058b4be 100644
--- a/numpy/core/code_generators/generate_umath.py
+++ b/numpy/core/code_generators/generate_umath.py
@@ -314,9 +314,7 @@ defdict = {
'true_divide':
Ufunc(2, 1, None, # One is only a unit to the right, not the left
docstrings.get('numpy.core.umath.true_divide'),
- 'PyUFunc_DivisionTypeResolver',
- TD('bBhH', out='d'),
- TD('iIlLqQ', out='d'),
+ 'PyUFunc_TrueDivisionTypeResolver',
TD(flts+cmplx),
[TypeDescription('m', FullTypeDescr, 'mq', 'm'),
TypeDescription('m', FullTypeDescr, 'md', 'm'),
diff --git a/numpy/core/code_generators/ufunc_docstrings.py b/numpy/core/code_generators/ufunc_docstrings.py
index 7beda59f2..6aae57234 100644
--- a/numpy/core/code_generators/ufunc_docstrings.py
+++ b/numpy/core/code_generators/ufunc_docstrings.py
@@ -19,9 +19,11 @@ def get(name):
# common parameter text to all ufuncs
_params_text = textwrap.dedent("""
- out : ndarray or tuple of ndarray, optional
- Alternate array object(s) in which to put the result; if provided, it
- must have a shape that the inputs broadcast to.
+ out : ndarray, None, or tuple of ndarray and None, optional
+ A location into which the result is stored. If provided, it must have
+ a shape that the inputs broadcast to. If not provided or `None`,
+ a freshly-allocated array is returned. A tuple (possible only as a
+ keyword argument) must have length equal to the number of outputs.
where : array_like, optional
Values of True indicate to calculate the ufunc at that position, values
of False indicate to leave the value in the output alone.
@@ -926,25 +928,24 @@ add_newdoc('numpy.core.umath', 'heaviside',
The Heaviside step function is defined as::
- 0 if x < 0
- heaviside(x, h0) = h0 if x == 0
- 1 if x > 0
+ 0 if x1 < 0
+ heaviside(x1, x2) = x2 if x1 == 0
+ 1 if x1 > 0
- where `h0` is often taken to be 0.5, but 0 and 1 are also sometimes used.
+ where `x2` is often taken to be 0.5, but 0 and 1 are also sometimes used.
Parameters
----------
- x : array_like
+ x1 : array_like
Input values.
- $PARAMS
- h0 : array_like
- The value of the function at x = 0.
+ x2 : array_like
+ The value of the function when x1 is 0.
$PARAMS
Returns
-------
out : ndarray
- The output array, element-wise Heaviside step function of `x`.
+ The output array, element-wise Heaviside step function of `x1`.
Notes
-----
@@ -1720,6 +1721,7 @@ add_newdoc('numpy.core.umath', 'isnat',
----------
x : array_like
Input array with datetime or timedelta data type.
+ $PARAMS
Returns
-------
@@ -2885,8 +2887,18 @@ add_newdoc('numpy.core.umath', 'remainder',
Computes the remainder complementary to the `floor_divide` function. It is
equivalent to the Python modulus operator``x1 % x2`` and has the same sign
- as the divisor `x2`. It should not be confused with the Matlab(TM) ``rem``
- function.
+ as the divisor `x2`. The MATLAB function equivalent to ``np.remainder``
+ is ``mod``.
+
+ .. warning::
+
+ This should not be confused with:
+
+ * Python 3.7's `math.remainder` and C's ``remainder``, which
+ computes the IEEE remainder, which are the complement to
+ ``round(x1 / x2)``.
+ * The MATLAB ``rem`` function and or the C ``%`` operator which is the
+ complement to ``int(x1 / x2)``.
Parameters
----------
@@ -2906,7 +2918,7 @@ add_newdoc('numpy.core.umath', 'remainder',
--------
floor_divide : Equivalent of Python ``//`` operator.
divmod : Simultaneous floor division and remainder.
- fmod : Equivalent of the Matlab(TM) ``rem`` function.
+ fmod : Equivalent of the MATLAB ``rem`` function.
divide, floor
Notes
diff --git a/numpy/core/einsumfunc.py b/numpy/core/einsumfunc.py
index e242363a4..37d691027 100644
--- a/numpy/core/einsumfunc.py
+++ b/numpy/core/einsumfunc.py
@@ -5,7 +5,7 @@ Implementation of optimized einsum.
from __future__ import division, absolute_import, print_function
from numpy.core.multiarray import c_einsum
-from numpy.core.numeric import asarray, asanyarray, result_type
+from numpy.core.numeric import asarray, asanyarray, result_type, tensordot, dot
__all__ = ['einsum', 'einsum_path']
@@ -256,6 +256,114 @@ def _greedy_path(input_sets, output_set, idx_dict, memory_limit):
return path
+def _can_dot(inputs, result, idx_removed):
+ """
+ Checks if we can use BLAS (np.tensordot) call and its beneficial to do so.
+
+ Parameters
+ ----------
+ inputs : list of str
+ Specifies the subscripts for summation.
+ result : str
+ Resulting summation.
+ idx_removed : set
+ Indices that are removed in the summation
+
+
+ Returns
+ -------
+ type : bool
+ Returns true if BLAS should and can be used, else False
+
+ Notes
+ -----
+ If the operations is BLAS level 1 or 2 and is not already aligned
+ we default back to einsum as the memory movement to copy is more
+ costly than the operation itself.
+
+
+ Examples
+ --------
+
+ # Standard GEMM operation
+ >>> _can_dot(['ij', 'jk'], 'ik', set('j'))
+ True
+
+ # Can use the standard BLAS, but requires odd data movement
+ >>> _can_dot(['ijj', 'jk'], 'ik', set('j'))
+ False
+
+ # DDOT where the memory is not aligned
+ >>> _can_dot(['ijk', 'ikj'], '', set('ijk'))
+ False
+
+ """
+
+ # All `dot` calls remove indices
+ if len(idx_removed) == 0:
+ return False
+
+ # BLAS can only handle two operands
+ if len(inputs) != 2:
+ return False
+
+ # Build a few temporaries
+ input_left, input_right = inputs
+ set_left = set(input_left)
+ set_right = set(input_right)
+ keep_left = set_left - idx_removed
+ keep_right = set_right - idx_removed
+ rs = len(idx_removed)
+
+ # Indices must overlap between the two operands
+ if not len(set_left & set_right):
+ return False
+
+ # We cannot have duplicate indices ("ijj, jk -> ik")
+ if (len(set_left) != len(input_left)) or (len(set_right) != len(input_right)):
+ return False
+
+ # Cannot handle partial inner ("ij, ji -> i")
+ if len(keep_left & keep_right):
+ return False
+
+ # At this point we are a DOT, GEMV, or GEMM operation
+
+ # Handle inner products
+
+ # DDOT with aligned data
+ if input_left == input_right:
+ return True
+
+ # DDOT without aligned data (better to use einsum)
+ if set_left == set_right:
+ return False
+
+ # Handle the 4 possible (aligned) GEMV or GEMM cases
+
+ # GEMM or GEMV no transpose
+ if input_left[-rs:] == input_right[:rs]:
+ return True
+
+ # GEMM or GEMV transpose both
+ if input_left[:rs] == input_right[-rs:]:
+ return True
+
+ # GEMM or GEMV transpose right
+ if input_left[-rs:] == input_right[-rs:]:
+ return True
+
+ # GEMM or GEMV transpose left
+ if input_left[:rs] == input_right[:rs]:
+ return True
+
+ # Einsum is faster than GEMV if we have to copy data
+ if not keep_left or not keep_right:
+ return False
+
+ # We are a matrix-matrix product, but we need to copy data
+ return True
+
def _parse_einsum_input(operands):
"""
A reproduction of einsum c side einsum parsing in python.
@@ -542,7 +650,7 @@ def einsum_path(*operands, **kwargs):
" %s" % unknown_kwargs)
# Figure out what the path really is
- path_type = kwargs.pop('optimize', False)
+ path_type = kwargs.pop('optimize', True)
if path_type is True:
path_type = 'greedy'
if path_type is None:
@@ -653,6 +761,8 @@ def einsum_path(*operands, **kwargs):
for x in contract_inds:
tmp_inputs.append(input_list.pop(x))
+ do_blas = _can_dot(tmp_inputs, out_inds, idx_removed)
+
# Last contraction
if (cnum - len(path)) == -1:
idx_result = output_subscript
@@ -663,7 +773,7 @@ def einsum_path(*operands, **kwargs):
input_list.append(idx_result)
einsum_str = ",".join(tmp_inputs) + "->" + idx_result
- contraction = (contract_inds, idx_removed, einsum_str, input_list[:])
+ contraction = (contract_inds, idx_removed, einsum_str, input_list[:], do_blas)
contraction_list.append(contraction)
opt_cost = sum(cost_list) + 1
@@ -690,7 +800,7 @@ def einsum_path(*operands, **kwargs):
path_print += "-" * 74
for n, contraction in enumerate(contraction_list):
- inds, idx_rm, einsum_str, remaining = contraction
+ inds, idx_rm, einsum_str, remaining, blas = contraction
remaining_str = ",".join(remaining) + "->" + output_subscript
path_run = (scale_list[n], einsum_str, remaining_str)
path_print += "\n%4d %24s %40s" % path_run
@@ -748,7 +858,7 @@ def einsum(*operands, **kwargs):
Controls if intermediate optimization should occur. No optimization
will occur if False and True will default to the 'greedy' algorithm.
Also accepts an explicit contraction list from the ``np.einsum_path``
- function. See ``np.einsum_path`` for more details. Default is False.
+ function. See ``np.einsum_path`` for more details. Default is True.
Returns
-------
@@ -969,19 +1079,54 @@ def einsum(*operands, **kwargs):
# Build the contraction list and operand
operands, contraction_list = einsum_path(*operands, optimize=optimize_arg,
einsum_call=True)
+
+ handle_out = False
+
# Start contraction loop
for num, contraction in enumerate(contraction_list):
- inds, idx_rm, einsum_str, remaining = contraction
+ inds, idx_rm, einsum_str, remaining, blas = contraction
tmp_operands = []
for x in inds:
tmp_operands.append(operands.pop(x))
- # If out was specified
+ # Do we need to deal with the output?
if specified_out and ((num + 1) == len(contraction_list)):
- einsum_kwargs["out"] = out_array
+ handle_out = True
+
+ # Call tensordot
+ if blas:
+
+ # Checks have already been handled
+ input_str, results_index = einsum_str.split('->')
+ input_left, input_right = input_str.split(',')
+
+ tensor_result = input_left + input_right
+ for s in idx_rm:
+ tensor_result = tensor_result.replace(s, "")
+
+ # Find indices to contract over
+ left_pos, right_pos = [], []
+ for s in idx_rm:
+ left_pos.append(input_left.find(s))
+ right_pos.append(input_right.find(s))
+
+ # Contract!
+ new_view = tensordot(*tmp_operands, axes=(tuple(left_pos), tuple(right_pos)))
+
+ # Build a new view if needed
+ if (tensor_result != results_index) or handle_out:
+ if handle_out:
+ einsum_kwargs["out"] = out_array
+ new_view = c_einsum(tensor_result + '->' + results_index, new_view, **einsum_kwargs)
+
+ # Call einsum
+ else:
+ # If out was specified
+ if handle_out:
+ einsum_kwargs["out"] = out_array
- # Do the contraction
- new_view = c_einsum(einsum_str, *tmp_operands, **einsum_kwargs)
+ # Do the contraction
+ new_view = c_einsum(einsum_str, *tmp_operands, **einsum_kwargs)
# Append new items and derefernce what we can
operands.append(new_view)
diff --git a/numpy/core/fromnumeric.py b/numpy/core/fromnumeric.py
index a8c2fd2fb..6f7c45859 100644
--- a/numpy/core/fromnumeric.py
+++ b/numpy/core/fromnumeric.py
@@ -28,12 +28,7 @@ __all__ = [
'std', 'sum', 'swapaxes', 'take', 'trace', 'transpose', 'var',
]
-
-try:
- _gentype = types.GeneratorType
-except AttributeError:
- _gentype = type(None)
-
+_gentype = types.GeneratorType
# save away Python sum
_sum_ = sum
@@ -1120,18 +1115,16 @@ def resize(a, new_shape):
new_shape = (new_shape,)
a = ravel(a)
Na = len(a)
- if not Na:
- return mu.zeros(new_shape, a.dtype)
total_size = um.multiply.reduce(new_shape)
+ if Na == 0 or total_size == 0:
+ return mu.zeros(new_shape, a.dtype)
+
n_copies = int(total_size / Na)
extra = total_size % Na
- if total_size == 0:
- return a[:0]
-
if extra != 0:
- n_copies = n_copies+1
- extra = Na-extra
+ n_copies = n_copies + 1
+ extra = Na - extra
a = concatenate((a,)*n_copies)
if extra > 0:
@@ -1531,14 +1524,15 @@ def nonzero(a):
[0, 2, 0],
[1, 1, 0]])
>>> np.nonzero(x)
- (array([0, 1, 2, 2], dtype=int64), array([0, 1, 0, 1], dtype=int64))
+ (array([0, 1, 2, 2]), array([0, 1, 0, 1]))
>>> x[np.nonzero(x)]
- array([ 1., 1., 1.])
+ array([1, 2, 1, 1])
>>> np.transpose(np.nonzero(x))
array([[0, 0],
[1, 1],
- [2, 2]])
+ [2, 0],
+ [2, 1])
A common use for ``nonzero`` is to find the indices of an array, where
a condition is True. Given an array `a`, the condition `a` > 3 is a
@@ -2248,7 +2242,7 @@ def amax(a, axis=None, out=None, keepdims=np._NoValue):
>>> np.amax(a, axis=1) # Maxima along the second axis
array([1, 3])
- >>> b = np.arange(5, dtype=np.float)
+ >>> b = np.arange(5, dtype=float)
>>> b[2] = np.NaN
>>> np.amax(b)
nan
@@ -2349,7 +2343,7 @@ def amin(a, axis=None, out=None, keepdims=np._NoValue):
>>> np.amin(a, axis=1) # Minima along the second axis
array([0, 2])
- >>> b = np.arange(5, dtype=np.float)
+ >>> b = np.arange(5, dtype=float)
>>> b[2] = np.NaN
>>> np.amin(b)
nan
@@ -2499,7 +2493,7 @@ def prod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue):
is the default platform integer:
>>> x = np.array([1, 2, 3], dtype=np.int8)
- >>> np.prod(x).dtype == np.int
+ >>> np.prod(x).dtype == int
True
"""
diff --git a/numpy/core/function_base.py b/numpy/core/function_base.py
index d6757bb74..0415e16ac 100644
--- a/numpy/core/function_base.py
+++ b/numpy/core/function_base.py
@@ -292,13 +292,13 @@ def geomspace(start, stop, num=50, endpoint=True, dtype=None):
Negative, decreasing, and complex inputs are allowed:
- >>> geomspace(1000, 1, num=4)
+ >>> np.geomspace(1000, 1, num=4)
array([ 1000., 100., 10., 1.])
- >>> geomspace(-1000, -1, num=4)
+ >>> np.geomspace(-1000, -1, num=4)
array([-1000., -100., -10., -1.])
- >>> geomspace(1j, 1000j, num=4) # Straight line
+ >>> np.geomspace(1j, 1000j, num=4) # Straight line
array([ 0. +1.j, 0. +10.j, 0. +100.j, 0.+1000.j])
- >>> geomspace(-1+0j, 1+0j, num=5) # Circle
+ >>> np.geomspace(-1+0j, 1+0j, num=5) # Circle
array([-1.00000000+0.j , -0.70710678+0.70710678j,
0.00000000+1.j , 0.70710678+0.70710678j,
1.00000000+0.j ])
@@ -339,7 +339,7 @@ def geomspace(start, stop, num=50, endpoint=True, dtype=None):
# complex and another is negative and log would produce NaN otherwise
start = start + (stop - stop)
stop = stop + (start - start)
- if _nx.issubdtype(dtype, complex):
+ if _nx.issubdtype(dtype, _nx.complexfloating):
start = start + 0j
stop = stop + 0j
diff --git a/numpy/core/getlimits.py b/numpy/core/getlimits.py
index 5b5e69352..e450a660d 100644
--- a/numpy/core/getlimits.py
+++ b/numpy/core/getlimits.py
@@ -68,7 +68,8 @@ class MachArLike(object):
params = _MACHAR_PARAMS[ftype]
float_conv = lambda v: array([v], ftype)
float_to_float = lambda v : _fr1(float_conv(v))
- float_to_str = lambda v: (params['fmt'] % array(_fr0(v)[0], ftype))
+ self._float_to_str = lambda v: (params['fmt'] %
+ array(_fr0(v)[0], ftype))
self.title = params['title']
# Parameter types same as for discovered MachAr object.
self.epsilon = self.eps = float_to_float(kwargs.pop('eps'))
@@ -79,11 +80,30 @@ class MachArLike(object):
self.__dict__.update(kwargs)
self.precision = int(-log10(self.eps))
self.resolution = float_to_float(float_conv(10) ** (-self.precision))
- self._str_eps = float_to_str(self.eps)
- self._str_epsneg = float_to_str(self.epsneg)
- self._str_xmin = float_to_str(self.xmin)
- self._str_xmax = float_to_str(self.xmax)
- self._str_resolution = float_to_str(self.resolution)
+
+ # Properties below to delay need for float_to_str, and thus avoid circular
+ # imports during early numpy module loading.
+ # See: https://github.com/numpy/numpy/pull/8983#discussion_r115838683
+
+ @property
+ def _str_eps(self):
+ return self._float_to_str(self.eps)
+
+ @property
+ def _str_epsneg(self):
+ return self._float_to_str(self.epsneg)
+
+ @property
+ def _str_xmin(self):
+ return self._float_to_str(self.xmin)
+
+ @property
+ def _str_xmax(self):
+ return self._float_to_str(self.xmax)
+
+ @property
+ def _str_resolution(self):
+ return self._float_to_str(self.resolution)
# Known parameters for float16
@@ -538,13 +558,3 @@ class iinfo(object):
return "%s(min=%s, max=%s, dtype=%s)" % (self.__class__.__name__,
self.min, self.max, self.dtype)
-if __name__ == '__main__':
- f = finfo(ntypes.single)
- print('single epsilon:', f.eps)
- print('single tiny:', f.tiny)
- f = finfo(ntypes.float)
- print('float epsilon:', f.eps)
- print('float tiny:', f.tiny)
- f = finfo(ntypes.longfloat)
- print('longfloat epsilon:', f.eps)
- print('longfloat tiny:', f.tiny)
diff --git a/numpy/core/include/numpy/npy_cpu.h b/numpy/core/include/numpy/npy_cpu.h
index 60abae4e0..84653ea18 100644
--- a/numpy/core/include/numpy/npy_cpu.h
+++ b/numpy/core/include/numpy/npy_cpu.h
@@ -15,6 +15,8 @@
* NPY_CPU_ARMEB
* NPY_CPU_SH_LE
* NPY_CPU_SH_BE
+ * NPY_CPU_ARCEL
+ * NPY_CPU_ARCEB
*/
#ifndef _NPY_CPUARCH_H_
#define _NPY_CPUARCH_H_
@@ -76,6 +78,10 @@
#define NPY_CPU_AARCH64
#elif defined(__mc68000__)
#define NPY_CPU_M68K
+#elif defined(__arc__) && defined(__LITTLE_ENDIAN__)
+ #define NPY_CPU_ARCEL
+#elif defined(__arc__) && defined(__BIG_ENDIAN__)
+ #define NPY_CPU_ARCEB
#else
#error Unknown CPU, please report this to numpy maintainers with \
information about your platform (OS, CPU and compiler)
diff --git a/numpy/core/include/numpy/npy_endian.h b/numpy/core/include/numpy/npy_endian.h
index e34b1d97e..1a42121db 100644
--- a/numpy/core/include/numpy/npy_endian.h
+++ b/numpy/core/include/numpy/npy_endian.h
@@ -45,7 +45,8 @@
|| defined(NPY_CPU_AARCH64) \
|| defined(NPY_CPU_SH_LE) \
|| defined(NPY_CPU_MIPSEL) \
- || defined(NPY_CPU_PPC64LE)
+ || defined(NPY_CPU_PPC64LE) \
+ || defined(NPY_CPU_ARCEL)
#define NPY_BYTE_ORDER NPY_LITTLE_ENDIAN
#elif defined(NPY_CPU_PPC) \
|| defined(NPY_CPU_SPARC) \
@@ -56,7 +57,8 @@
|| defined(NPY_CPU_SH_BE) \
|| defined(NPY_CPU_MIPSEB) \
|| defined(NPY_CPU_OR1K) \
- || defined(NPY_CPU_M68K)
+ || defined(NPY_CPU_M68K) \
+ || defined(NPY_CPU_ARCEB)
#define NPY_BYTE_ORDER NPY_BIG_ENDIAN
#else
#error Unknown CPU: can not set endianness
diff --git a/numpy/core/include/numpy/numpyconfig.h b/numpy/core/include/numpy/numpyconfig.h
index 701f02c6e..04a3738b9 100644
--- a/numpy/core/include/numpy/numpyconfig.h
+++ b/numpy/core/include/numpy/numpyconfig.h
@@ -34,5 +34,7 @@
#define NPY_1_10_API_VERSION 0x00000008
#define NPY_1_11_API_VERSION 0x00000008
#define NPY_1_12_API_VERSION 0x00000008
+#define NPY_1_13_API_VERSION 0x00000008
+#define NPY_1_14_API_VERSION 0x00000008
#endif
diff --git a/numpy/core/numeric.py b/numpy/core/numeric.py
index 6b4a93ce0..aa3a4076c 100644
--- a/numpy/core/numeric.py
+++ b/numpy/core/numeric.py
@@ -22,9 +22,9 @@ if sys.version_info[0] < 3:
from .multiarray import newbuffer, getbuffer
from . import umath
-from .umath import (invert, sin, UFUNC_BUFSIZE_DEFAULT, ERR_IGNORE,
- ERR_WARN, ERR_RAISE, ERR_CALL, ERR_PRINT, ERR_LOG,
- ERR_DEFAULT, PINF, NAN)
+from .umath import (multiply, invert, sin, UFUNC_BUFSIZE_DEFAULT,
+ ERR_IGNORE, ERR_WARN, ERR_RAISE, ERR_CALL, ERR_PRINT,
+ ERR_LOG, ERR_DEFAULT, PINF, NAN)
from . import numerictypes
from .numerictypes import longlong, intc, int_, float_, complex_, bool_
from ._internal import TooHardError, AxisError
@@ -46,28 +46,23 @@ loads = pickle.loads
__all__ = [
'newaxis', 'ndarray', 'flatiter', 'nditer', 'nested_iters', 'ufunc',
- 'arange', 'array', 'zeros', 'count_nonzero', 'empty', 'broadcast',
- 'dtype', 'fromstring', 'fromfile', 'frombuffer', 'int_asbuffer',
- 'where', 'argwhere', 'copyto', 'concatenate', 'fastCopyAndTranspose',
- 'lexsort', 'set_numeric_ops', 'can_cast', 'promote_types',
- 'min_scalar_type', 'result_type', 'asarray', 'asanyarray',
- 'ascontiguousarray', 'asfortranarray', 'isfortran', 'empty_like',
- 'zeros_like', 'ones_like', 'correlate', 'convolve', 'inner', 'dot',
- 'outer', 'vdot', 'roll',
- 'rollaxis', 'moveaxis', 'cross', 'tensordot', 'array2string',
- 'get_printoptions', 'set_printoptions', 'array_repr', 'array_str',
- 'set_string_function', 'little_endian', 'require', 'fromiter',
- 'array_equal', 'array_equiv', 'indices', 'fromfunction', 'isclose', 'load',
- 'loads', 'isscalar', 'binary_repr', 'base_repr', 'ones', 'identity',
- 'allclose', 'compare_chararrays', 'putmask', 'seterr', 'geterr',
- 'setbufsize', 'getbufsize', 'seterrcall', 'geterrcall', 'errstate',
- 'flatnonzero', 'Inf', 'inf', 'infty', 'Infinity', 'nan', 'NaN', 'False_',
- 'True_', 'bitwise_not', 'CLIP', 'RAISE', 'WRAP', 'MAXDIMS', 'BUFSIZE',
- 'ALLOW_THREADS', 'ComplexWarning', 'full', 'full_like', 'matmul',
- 'shares_memory', 'may_share_memory', 'MAY_SHARE_BOUNDS', 'MAY_SHARE_EXACT',
- 'TooHardError', 'AxisError'
- ]
-
+ 'arange', 'array', 'zeros', 'count_nonzero', 'empty', 'broadcast', 'dtype',
+ 'fromstring', 'fromfile', 'frombuffer', 'int_asbuffer', 'where',
+ 'argwhere', 'copyto', 'concatenate', 'fastCopyAndTranspose', 'lexsort',
+ 'set_numeric_ops', 'can_cast', 'promote_types', 'min_scalar_type',
+ 'result_type', 'asarray', 'asanyarray', 'ascontiguousarray',
+ 'asfortranarray', 'isfortran', 'empty_like', 'zeros_like', 'ones_like',
+ 'correlate', 'convolve', 'inner', 'dot', 'outer', 'vdot', 'roll',
+ 'rollaxis', 'moveaxis', 'cross', 'tensordot', 'little_endian', 'require',
+ 'fromiter', 'array_equal', 'array_equiv', 'indices', 'fromfunction',
+ 'isclose', 'load', 'loads', 'isscalar', 'binary_repr', 'base_repr', 'ones',
+ 'identity', 'allclose', 'compare_chararrays', 'putmask', 'seterr',
+ 'geterr', 'setbufsize', 'getbufsize', 'seterrcall', 'geterrcall',
+ 'errstate', 'flatnonzero', 'Inf', 'inf', 'infty', 'Infinity', 'nan', 'NaN',
+ 'False_', 'True_', 'bitwise_not', 'CLIP', 'RAISE', 'WRAP', 'MAXDIMS',
+ 'BUFSIZE', 'ALLOW_THREADS', 'ComplexWarning', 'full', 'full_like',
+ 'matmul', 'shares_memory', 'may_share_memory', 'MAY_SHARE_BOUNDS',
+ 'MAY_SHARE_EXACT', 'TooHardError', 'AxisError' ]
if sys.version_info[0] < 3:
__all__.extend(['getbuffer', 'newbuffer'])
@@ -133,7 +128,7 @@ def zeros_like(a, dtype=None, order='K', subok=True):
array([[0, 0, 0],
[0, 0, 0]])
- >>> y = np.arange(3, dtype=np.float)
+ >>> y = np.arange(3, dtype=float)
>>> y
array([ 0., 1., 2.])
>>> np.zeros_like(y)
@@ -176,7 +171,7 @@ def ones(shape, dtype=None, order='C'):
>>> np.ones(5)
array([ 1., 1., 1., 1., 1.])
- >>> np.ones((5,), dtype=np.int)
+ >>> np.ones((5,), dtype=int)
array([1, 1, 1, 1, 1])
>>> np.ones((2, 1))
@@ -243,7 +238,7 @@ def ones_like(a, dtype=None, order='K', subok=True):
array([[1, 1, 1],
[1, 1, 1]])
- >>> y = np.arange(3, dtype=np.float)
+ >>> y = np.arange(3, dtype=float)
>>> y
array([ 0., 1., 2.])
>>> np.ones_like(y)
@@ -344,7 +339,7 @@ def full_like(a, fill_value, dtype=None, order='K', subok=True):
Examples
--------
- >>> x = np.arange(6, dtype=np.int)
+ >>> x = np.arange(6, dtype=int)
>>> np.full_like(x, 1)
array([1, 1, 1, 1, 1, 1])
>>> np.full_like(x, 0.1)
@@ -363,20 +358,6 @@ def full_like(a, fill_value, dtype=None, order='K', subok=True):
multiarray.copyto(res, fill_value, casting='unsafe')
return res
-
-def extend_all(module):
- adict = {}
- for a in __all__:
- adict[a] = 1
- try:
- mall = getattr(module, '__all__')
- except AttributeError:
- mall = [k for k in module.__dict__.keys() if not k.startswith('_')]
- for a in mall:
- if a not in adict:
- __all__.append(a)
-
-
def count_nonzero(a, axis=None):
"""
Counts the number of non-zero values in the array ``a``.
@@ -436,8 +417,7 @@ def count_nonzero(a, axis=None):
if issubdtype(a.dtype, np.number):
return (a != 0).sum(axis=axis, dtype=np.intp)
- if (issubdtype(a.dtype, np.string_) or
- issubdtype(a.dtype, np.unicode_)):
+ if issubdtype(a.dtype, np.character):
nullstr = a.dtype.type('')
return (a != nullstr).sum(axis=axis, dtype=np.intp)
@@ -445,7 +425,7 @@ def count_nonzero(a, axis=None):
counts = np.apply_along_axis(multiarray.count_nonzero, axis[0], a)
if axis.size == 1:
- return counts
+ return counts.astype(np.intp, copy=False)
else:
# for subsequent axis numbers, that number decreases
# by one in this new 'counts' array if it was larger
@@ -838,7 +818,7 @@ def argwhere(a):
``np.argwhere(a)`` is the same as ``np.transpose(np.nonzero(a))``.
The output of ``argwhere`` is not suitable for indexing arrays.
- For this purpose use ``where(a)`` instead.
+ For this purpose use ``nonzero(a)`` instead.
Examples
--------
@@ -1106,7 +1086,10 @@ def outer(a, b, out=None):
See also
--------
- inner, einsum
+ inner
+ einsum : ``einsum('i,j->ij', a.ravel(), b.ravel())`` is the equivalent.
+ ufunc.outer : A generalization to N dimensions and other operations.
+ ``np.multiply.outer(a.ravel(), b.ravel())`` is the equivalent.
References
----------
@@ -1278,7 +1261,7 @@ def tensordot(a, b, axes=2):
"""
try:
iter(axes)
- except:
+ except Exception:
axes_a = list(range(-axes, 0))
axes_b = list(range(0, axes))
else:
@@ -1323,7 +1306,7 @@ def tensordot(a, b, axes=2):
N2 = 1
for axis in axes_a:
N2 *= as_[axis]
- newshape_a = (-1, N2)
+ newshape_a = (int(multiply.reduce([as_[ax] for ax in notin])), N2)
olda = [as_[axis] for axis in notin]
notin = [k for k in range(ndb) if k not in axes_b]
@@ -1331,7 +1314,7 @@ def tensordot(a, b, axes=2):
N2 = 1
for axis in axes_b:
N2 *= bs[axis]
- newshape_b = (N2, -1)
+ newshape_b = (N2, int(multiply.reduce([bs[ax] for ax in notin])))
oldb = [bs[axis] for axis in notin]
at = a.transpose(newaxes_a).reshape(newshape_a)
@@ -1433,6 +1416,10 @@ def rollaxis(a, axis, start=0):
"""
Roll the specified axis backwards, until it lies in a given position.
+ This function continues to be supported for backward compatibility, but you
+ should prefer `moveaxis`. The `moveaxis` function was added in NumPy
+ 1.11.
+
Parameters
----------
a : ndarray
@@ -1548,7 +1535,7 @@ def moveaxis(a, source, destination):
Other axes remain in their original order.
- .. versionadded::1.11.0
+ .. versionadded:: 1.11.0
Parameters
----------
@@ -1615,7 +1602,7 @@ def moveaxis(a, source, destination):
# fix hack in scipy which imports this function
def _move_axis_to_0(a, axis):
- return rollaxis(a, axis, 0)
+ return moveaxis(a, axis, 0)
def cross(a, b, axisa=-1, axisb=-1, axisc=-1, axis=None):
@@ -1740,8 +1727,8 @@ def cross(a, b, axisa=-1, axisb=-1, axisc=-1, axis=None):
axisb = normalize_axis_index(axisb, b.ndim, msg_prefix='axisb')
# Move working axis to the end of the shape
- a = rollaxis(a, axisa, a.ndim)
- b = rollaxis(b, axisb, b.ndim)
+ a = moveaxis(a, axisa, -1)
+ b = moveaxis(b, axisb, -1)
msg = ("incompatible dimensions for cross product\n"
"(dimension must be 2 or 3)")
if a.shape[-1] not in (2, 3) or b.shape[-1] not in (2, 3):
@@ -1812,195 +1799,7 @@ def cross(a, b, axisa=-1, axisb=-1, axisc=-1, axis=None):
multiply(a0, b1, out=cp2)
cp2 -= a1 * b0
- # This works because we are moving the last axis
- return rollaxis(cp, -1, axisc)
-
-
-# Use numarray's printing function
-from .arrayprint import array2string, get_printoptions, set_printoptions
-
-
-_typelessdata = [int_, float_, complex_]
-if issubclass(intc, int):
- _typelessdata.append(intc)
-
-
-if issubclass(longlong, int):
- _typelessdata.append(longlong)
-
-
-def array_repr(arr, max_line_width=None, precision=None, suppress_small=None):
- """
- Return the string representation of an array.
-
- Parameters
- ----------
- arr : ndarray
- Input array.
- max_line_width : int, optional
- The maximum number of columns the string should span. Newline
- characters split the string appropriately after array elements.
- precision : int, optional
- Floating point precision. Default is the current printing precision
- (usually 8), which can be altered using `set_printoptions`.
- suppress_small : bool, optional
- Represent very small numbers as zero, default is False. Very small
- is defined by `precision`, if the precision is 8 then
- numbers smaller than 5e-9 are represented as zero.
-
- Returns
- -------
- string : str
- The string representation of an array.
-
- See Also
- --------
- array_str, array2string, set_printoptions
-
- Examples
- --------
- >>> np.array_repr(np.array([1,2]))
- 'array([1, 2])'
- >>> np.array_repr(np.ma.array([0.]))
- 'MaskedArray([ 0.])'
- >>> np.array_repr(np.array([], np.int32))
- 'array([], dtype=int32)'
-
- >>> x = np.array([1e-6, 4e-7, 2, 3])
- >>> np.array_repr(x, precision=6, suppress_small=True)
- 'array([ 0.000001, 0. , 2. , 3. ])'
-
- """
- if type(arr) is not ndarray:
- class_name = type(arr).__name__
- else:
- class_name = "array"
-
- if arr.size > 0 or arr.shape == (0,):
- lst = array2string(arr, max_line_width, precision, suppress_small,
- ', ', class_name + "(")
- else: # show zero-length shape unless it is (0,)
- lst = "[], shape=%s" % (repr(arr.shape),)
-
- skipdtype = (arr.dtype.type in _typelessdata) and arr.size > 0
-
- if skipdtype:
- return "%s(%s)" % (class_name, lst)
- else:
- typename = arr.dtype.name
- # Quote typename in the output if it is "complex".
- if typename and not (typename[0].isalpha() and typename.isalnum()):
- typename = "'%s'" % typename
-
- lf = ' '
- if issubclass(arr.dtype.type, flexible):
- if arr.dtype.names:
- typename = "%s" % str(arr.dtype)
- else:
- typename = "'%s'" % str(arr.dtype)
- lf = '\n'+' '*len(class_name + "(")
- return "%s(%s,%sdtype=%s)" % (class_name, lst, lf, typename)
-
-
-def array_str(a, max_line_width=None, precision=None, suppress_small=None):
- """
- Return a string representation of the data in an array.
-
- The data in the array is returned as a single string. This function is
- similar to `array_repr`, the difference being that `array_repr` also
- returns information on the kind of array and its data type.
-
- Parameters
- ----------
- a : ndarray
- Input array.
- max_line_width : int, optional
- Inserts newlines if text is longer than `max_line_width`. The
- default is, indirectly, 75.
- precision : int, optional
- Floating point precision. Default is the current printing precision
- (usually 8), which can be altered using `set_printoptions`.
- suppress_small : bool, optional
- Represent numbers "very close" to zero as zero; default is False.
- Very close is defined by precision: if the precision is 8, e.g.,
- numbers smaller (in absolute value) than 5e-9 are represented as
- zero.
-
- See Also
- --------
- array2string, array_repr, set_printoptions
-
- Examples
- --------
- >>> np.array_str(np.arange(3))
- '[0 1 2]'
-
- """
- return array2string(a, max_line_width, precision, suppress_small, ' ', "", str)
-
-
-def set_string_function(f, repr=True):
- """
- Set a Python function to be used when pretty printing arrays.
-
- Parameters
- ----------
- f : function or None
- Function to be used to pretty print arrays. The function should expect
- a single array argument and return a string of the representation of
- the array. If None, the function is reset to the default NumPy function
- to print arrays.
- repr : bool, optional
- If True (default), the function for pretty printing (``__repr__``)
- is set, if False the function that returns the default string
- representation (``__str__``) is set.
-
- See Also
- --------
- set_printoptions, get_printoptions
-
- Examples
- --------
- >>> def pprint(arr):
- ... return 'HA! - What are you going to do now?'
- ...
- >>> np.set_string_function(pprint)
- >>> a = np.arange(10)
- >>> a
- HA! - What are you going to do now?
- >>> print(a)
- [0 1 2 3 4 5 6 7 8 9]
-
- We can reset the function to the default:
-
- >>> np.set_string_function(None)
- >>> a
- array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
-
- `repr` affects either pretty printing or normal string representation.
- Note that ``__repr__`` is still affected by setting ``__str__``
- because the width of each array element in the returned string becomes
- equal to the length of the result of ``__str__()``.
-
- >>> x = np.arange(4)
- >>> np.set_string_function(lambda x:'random', repr=False)
- >>> x.__str__()
- 'random'
- >>> x.__repr__()
- 'array([ 0, 1, 2, 3])'
-
- """
- if f is None:
- if repr:
- return multiarray.set_string_function(array_repr, 1)
- else:
- return multiarray.set_string_function(array_str, 0)
- else:
- return multiarray.set_string_function(f, repr)
-
-
-set_string_function(array_str, 0)
-set_string_function(array_repr, 1)
+ return moveaxis(cp, -1, axisc)
little_endian = (sys.byteorder == 'little')
@@ -2154,6 +1953,8 @@ def isscalar(num):
False
>>> np.isscalar(False)
True
+ >>> np.isscalar('numpy')
+ True
"""
if isinstance(num, generic):
@@ -2521,13 +2322,10 @@ def isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):
"""
def within_tol(x, y, atol, rtol):
with errstate(invalid='ignore'):
- result = less_equal(abs(x-y), atol + rtol * abs(y))
- if isscalar(a) and isscalar(b):
- result = bool(result)
- return result
+ return less_equal(abs(x-y), atol + rtol * abs(y))
- x = array(a, copy=False, subok=True, ndmin=1)
- y = array(b, copy=False, subok=True, ndmin=1)
+ x = asanyarray(a)
+ y = asanyarray(b)
# Make sure y is an inexact type to avoid bad behavior on abs(MIN_INT).
# This will cause casting of x later. Also, make sure to allow subclasses
@@ -2554,12 +2352,11 @@ def isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):
if equal_nan:
# Make NaN == NaN
both_nan = isnan(x) & isnan(y)
+
+ # Needed to treat masked arrays correctly. = True would not work.
cond[both_nan] = both_nan[both_nan]
- if isscalar(a) and isscalar(b):
- return bool(cond)
- else:
- return cond
+ return cond[()] # Flatten 0d arrays to scalars
def array_equal(a1, a2):
@@ -2597,7 +2394,7 @@ def array_equal(a1, a2):
"""
try:
a1, a2 = asarray(a1), asarray(a2)
- except:
+ except Exception:
return False
if a1.shape != a2.shape:
return False
@@ -2641,11 +2438,11 @@ def array_equiv(a1, a2):
"""
try:
a1, a2 = asarray(a1), asarray(a2)
- except:
+ except Exception:
return False
try:
multiarray.broadcast(a1, a2)
- except:
+ except Exception:
return False
return bool(asarray(a1 == a2).all())
@@ -3085,10 +2882,26 @@ nan = NaN = NAN
False_ = bool_(False)
True_ = bool_(True)
+
+def extend_all(module):
+ adict = {}
+ for a in __all__:
+ adict[a] = 1
+ try:
+ mall = getattr(module, '__all__')
+ except AttributeError:
+ mall = [k for k in module.__dict__.keys() if not k.startswith('_')]
+ for a in mall:
+ if a not in adict:
+ __all__.append(a)
+
from .umath import *
from .numerictypes import *
from . import fromnumeric
from .fromnumeric import *
+from . import arrayprint
+from .arrayprint import *
extend_all(fromnumeric)
extend_all(umath)
extend_all(numerictypes)
+extend_all(arrayprint)
diff --git a/numpy/core/numerictypes.py b/numpy/core/numerictypes.py
index 600d5af33..b61f5e7bc 100644
--- a/numpy/core/numerictypes.py
+++ b/numpy/core/numerictypes.py
@@ -85,6 +85,7 @@ from __future__ import division, absolute_import, print_function
import types as _types
import sys
import numbers
+import warnings
from numpy.compat import bytes, long
from numpy.core.multiarray import (
@@ -501,11 +502,11 @@ def maximum_sctype(t):
Examples
--------
- >>> np.maximum_sctype(np.int)
+ >>> np.maximum_sctype(int)
<type 'numpy.int64'>
>>> np.maximum_sctype(np.uint8)
<type 'numpy.uint64'>
- >>> np.maximum_sctype(np.complex)
+ >>> np.maximum_sctype(complex)
<type 'numpy.complex192'>
>>> np.maximum_sctype(str)
@@ -528,33 +529,6 @@ def maximum_sctype(t):
else:
return sctypes[base][-1]
-try:
- buffer_type = _types.BufferType
-except AttributeError:
- # Py3K
- buffer_type = memoryview
-
-_python_types = {int: 'int_',
- float: 'float_',
- complex: 'complex_',
- bool: 'bool_',
- bytes: 'bytes_',
- unicode: 'unicode_',
- buffer_type: 'void',
- }
-
-if sys.version_info[0] >= 3:
- def _python_type(t):
- """returns the type corresponding to a certain Python type"""
- if not isinstance(t, type):
- t = type(t)
- return allTypes[_python_types.get(t, 'object_')]
-else:
- def _python_type(t):
- """returns the type corresponding to a certain Python type"""
- if not isinstance(t, _types.TypeType):
- t = type(t)
- return allTypes[_python_types.get(t, 'object_')]
def issctype(rep):
"""
@@ -597,7 +571,7 @@ def issctype(rep):
if res and res != object_:
return True
return False
- except:
+ except Exception:
return False
def obj2sctype(rep, default=None):
@@ -639,22 +613,19 @@ def obj2sctype(rep, default=None):
<type 'list'>
"""
- try:
- if issubclass(rep, generic):
- return rep
- except TypeError:
- pass
- if isinstance(rep, dtype):
- return rep.type
- if isinstance(rep, type):
- return _python_type(rep)
+ # prevent abtract classes being upcast
+ if isinstance(rep, type) and issubclass(rep, generic):
+ return rep
+ # extract dtype from arrays
if isinstance(rep, ndarray):
return rep.dtype.type
+ # fall back on dtype to convert
try:
res = dtype(rep)
- except:
+ except Exception:
return default
- return res.type
+ else:
+ return res.type
def issubclass_(arg1, arg2):
@@ -684,9 +655,9 @@ def issubclass_(arg1, arg2):
Examples
--------
- >>> np.issubclass_(np.int32, np.int)
+ >>> np.issubclass_(np.int32, int)
True
- >>> np.issubclass_(np.int32, np.float)
+ >>> np.issubclass_(np.int32, float)
False
"""
@@ -717,9 +688,9 @@ def issubsctype(arg1, arg2):
--------
>>> np.issubsctype('S8', str)
True
- >>> np.issubsctype(np.array([1]), np.int)
+ >>> np.issubsctype(np.array([1]), int)
True
- >>> np.issubsctype(np.array([1]), np.float)
+ >>> np.issubsctype(np.array([1]), float)
False
"""
@@ -745,20 +716,46 @@ def issubdtype(arg1, arg2):
Examples
--------
- >>> np.issubdtype('S1', str)
+ >>> np.issubdtype('S1', np.string_)
True
>>> np.issubdtype(np.float64, np.float32)
False
"""
- if issubclass_(arg2, generic):
- return issubclass(dtype(arg1).type, arg2)
- mro = dtype(arg2).type.mro()
- if len(mro) > 1:
- val = mro[1]
- else:
- val = mro[0]
- return issubclass(dtype(arg1).type, val)
+ if not issubclass_(arg1, generic):
+ arg1 = dtype(arg1).type
+ if not issubclass_(arg2, generic):
+ arg2_orig = arg2
+ arg2 = dtype(arg2).type
+ if not isinstance(arg2_orig, dtype):
+ # weird deprecated behaviour, that tried to infer np.floating from
+ # float, and similar less obvious things, such as np.generic from
+ # basestring
+ mro = arg2.mro()
+ arg2 = mro[1] if len(mro) > 1 else mro[0]
+
+ def type_repr(x):
+ """ Helper to produce clear error messages """
+ if not isinstance(x, type):
+ return repr(x)
+ elif issubclass(x, generic):
+ return "np.{}".format(x.__name__)
+ else:
+ return x.__name__
+
+ # 1.14, 2017-08-01
+ warnings.warn(
+ "Conversion of the second argument of issubdtype from `{raw}` "
+ "to `{abstract}` is deprecated. In future, it will be treated "
+ "as `{concrete} == np.dtype({raw}).type`.".format(
+ raw=type_repr(arg2_orig),
+ abstract=type_repr(arg2),
+ concrete=type_repr(dtype(arg2_orig).type)
+ ),
+ FutureWarning, stacklevel=2
+ )
+
+ return issubclass(arg1, arg2)
# This dictionary allows look up based on any alias for an array data-type
@@ -821,7 +818,7 @@ def sctype2char(sctype):
Examples
--------
- >>> for sctype in [np.int32, np.float, np.complex, np.string_, np.ndarray]:
+ >>> for sctype in [np.int32, float, complex, np.string_, np.ndarray]:
... print(np.sctype2char(sctype))
l
d
@@ -958,6 +955,7 @@ def _register_types():
numbers.Integral.register(integer)
numbers.Complex.register(inexact)
numbers.Real.register(floating)
+ numbers.Number.register(number)
_register_types()
@@ -986,7 +984,7 @@ def find_common_type(array_types, scalar_types):
Examples
--------
- >>> np.find_common_type([], [np.int64, np.float32, np.complex])
+ >>> np.find_common_type([], [np.int64, np.float32, complex])
dtype('complex128')
>>> np.find_common_type([np.int64, np.float32], [])
dtype('float64')
@@ -1002,7 +1000,7 @@ def find_common_type(array_types, scalar_types):
Complex is of a different type, so it up-casts the float in the
`array_types` argument:
- >>> np.find_common_type([np.float32], [np.complex])
+ >>> np.find_common_type([np.float32], [complex])
dtype('complex128')
Type specifier strings are convertible to dtypes and can therefore
diff --git a/numpy/core/records.py b/numpy/core/records.py
index ecc293812..b6ff8bf65 100644
--- a/numpy/core/records.py
+++ b/numpy/core/records.py
@@ -80,7 +80,7 @@ def find_duplicate(list):
dup.append(list[i])
return dup
-class format_parser:
+class format_parser(object):
"""
Class to convert formats, names, titles description to a dtype.
@@ -473,7 +473,7 @@ class recarray(ndarray):
newattr = attr not in self.__dict__
try:
ret = object.__setattr__(self, attr, val)
- except:
+ except Exception:
fielddict = ndarray.__getattribute__(self, 'dtype').fields or {}
if attr not in fielddict:
exctype, value = sys.exc_info()[:2]
@@ -487,7 +487,7 @@ class recarray(ndarray):
# internal attribute.
try:
object.__delattr__(self, attr)
- except:
+ except Exception:
return ret
try:
res = fielddict[attr][:2]
@@ -704,7 +704,7 @@ def fromstring(datastring, dtype=None, shape=None, offset=0, formats=None,
itemsize = descr.itemsize
if (shape is None or shape == 0 or shape == -1):
- shape = (len(datastring) - offset) / itemsize
+ shape = (len(datastring) - offset) // itemsize
_array = recarray(shape, descr, buf=datastring, offset=offset)
return _array
diff --git a/numpy/core/setup.py b/numpy/core/setup.py
index e057c5614..f56e705ab 100644
--- a/numpy/core/setup.py
+++ b/numpy/core/setup.py
@@ -187,7 +187,7 @@ def check_complex(config, mathlibs):
if os.uname()[0] == "Interix":
warnings.warn("Disabling broken complex support. See #1365", stacklevel=2)
return priv, pub
- except:
+ except Exception:
# os.uname not available on all platforms. blanket except ugly but safe
pass
@@ -741,6 +741,7 @@ def configuration(parent_package='',top_path=None):
join('src', 'multiarray', 'scalartypes.h'),
join('src', 'multiarray', 'sequence.h'),
join('src', 'multiarray', 'shape.h'),
+ join('src', 'multiarray', 'strfuncs.h'),
join('src', 'multiarray', 'ucsnarrow.h'),
join('src', 'multiarray', 'usertypes.h'),
join('src', 'multiarray', 'vdot.h'),
@@ -814,6 +815,7 @@ def configuration(parent_package='',top_path=None):
join('src', 'multiarray', 'shape.c'),
join('src', 'multiarray', 'scalarapi.c'),
join('src', 'multiarray', 'scalartypes.c.src'),
+ join('src', 'multiarray', 'strfuncs.c'),
join('src', 'multiarray', 'temp_elide.c'),
join('src', 'multiarray', 'usertypes.c'),
join('src', 'multiarray', 'ucsnarrow.c'),
@@ -872,6 +874,7 @@ def configuration(parent_package='',top_path=None):
join('src', 'umath', 'loops.h.src'),
join('src', 'umath', 'loops.c.src'),
join('src', 'umath', 'ufunc_object.c'),
+ join('src', 'umath', 'extobj.c'),
join('src', 'umath', 'scalarmath.c.src'),
join('src', 'umath', 'ufunc_type_resolution.c'),
join('src', 'umath', 'override.c'),
@@ -931,7 +934,8 @@ def configuration(parent_package='',top_path=None):
sources=[join('src', 'multiarray', 'multiarray_tests.c.src'),
join('src', 'private', 'mem_overlap.c')],
depends=[join('src', 'private', 'mem_overlap.h'),
- join('src', 'private', 'npy_extint128.h')])
+ join('src', 'private', 'npy_extint128.h')],
+ libraries=['npymath'])
#######################################################################
# operand_flag_tests module #
diff --git a/numpy/core/setup_common.py b/numpy/core/setup_common.py
index 1b3984063..094cd1841 100644
--- a/numpy/core/setup_common.py
+++ b/numpy/core/setup_common.py
@@ -39,6 +39,7 @@ C_ABI_VERSION = 0x01000009
# 0x0000000a - 1.11.x
# 0x0000000a - 1.12.x
# 0x0000000b - 1.13.x
+# 0x0000000b - 1.14.x
C_API_VERSION = 0x0000000b
class MismatchCAPIWarning(Warning):
diff --git a/numpy/core/shape_base.py b/numpy/core/shape_base.py
index 6405ac634..026ad603a 100644
--- a/numpy/core/shape_base.py
+++ b/numpy/core/shape_base.py
@@ -293,7 +293,7 @@ def hstack(tup):
return _nx.concatenate(arrs, 1)
-def stack(arrays, axis=0):
+def stack(arrays, axis=0, out=None):
"""
Join a sequence of arrays along a new axis.
@@ -309,6 +309,10 @@ def stack(arrays, axis=0):
Each array must have the same shape.
axis : int, optional
The axis in the result array along which the input arrays are stacked.
+ out : ndarray, optional
+ If provided, the destination to place the result. The shape must be
+ correct, matching that of what stack would have returned if no
+ out argument were specified.
Returns
-------
@@ -358,7 +362,7 @@ def stack(arrays, axis=0):
sl = (slice(None),) * axis + (_nx.newaxis,)
expanded_arrays = [arr[sl] for arr in arrays]
- return _nx.concatenate(expanded_arrays, axis=axis)
+ return _nx.concatenate(expanded_arrays, axis=axis, out=out)
class _Recurser(object):
@@ -439,9 +443,9 @@ def block(arrays):
"""
Assemble an nd-array from nested lists of blocks.
- Blocks in the innermost lists are `concatenate`d along the last
- dimension (-1), then these are `concatenate`d along the second-last
- dimension (-2), and so on until the outermost list is reached
+ Blocks in the innermost lists are concatenated (see `concatenate`) along
+ the last dimension (-1), then these are concatenated along the
+ second-last dimension (-2), and so on until the outermost list is reached.
Blocks can be of any dimension, but will not be broadcasted using the normal
rules. Instead, leading axes of size 1 are inserted, to make ``block.ndim``
diff --git a/numpy/core/src/multiarray/_datetime.h b/numpy/core/src/multiarray/_datetime.h
index 345aed28a..3db1254d4 100644
--- a/numpy/core/src/multiarray/_datetime.h
+++ b/numpy/core/src/multiarray/_datetime.h
@@ -175,7 +175,8 @@ convert_datetime_metadata_to_tuple(PyArray_DatetimeMetaData *meta);
*/
NPY_NO_EXPORT int
convert_datetime_metadata_tuple_to_datetime_metadata(PyObject *tuple,
- PyArray_DatetimeMetaData *out_meta);
+ PyArray_DatetimeMetaData *out_meta,
+ npy_bool from_pickle);
/*
* Gets a tzoffset in minutes by calling the fromutc() function on
diff --git a/numpy/core/src/multiarray/alloc.c b/numpy/core/src/multiarray/alloc.c
index e145e3404..f8305d115 100644
--- a/numpy/core/src/multiarray/alloc.c
+++ b/numpy/core/src/multiarray/alloc.c
@@ -126,8 +126,11 @@ npy_free_cache(void * p, npy_uintp sz)
NPY_NO_EXPORT void *
npy_alloc_cache_dim(npy_uintp sz)
{
- /* dims + strides */
- if (NPY_UNLIKELY(sz < 2)) {
+ /*
+ * make sure any temporary allocation can be used for array metadata which
+ * uses one memory block for both dimensions and strides
+ */
+ if (sz < 2) {
sz = 2;
}
return _npy_alloc_cache(sz, sizeof(npy_intp), NBUCKETS_DIM, dimcache,
@@ -137,8 +140,8 @@ npy_alloc_cache_dim(npy_uintp sz)
NPY_NO_EXPORT void
npy_free_cache_dim(void * p, npy_uintp sz)
{
- /* dims + strides */
- if (NPY_UNLIKELY(sz < 2)) {
+ /* see npy_alloc_cache_dim */
+ if (sz < 2) {
sz = 2;
}
_npy_free_cache(p, sz, NBUCKETS_DIM, dimcache,
diff --git a/numpy/core/src/multiarray/alloc.h b/numpy/core/src/multiarray/alloc.h
index 39eb99544..2b69efc35 100644
--- a/numpy/core/src/multiarray/alloc.h
+++ b/numpy/core/src/multiarray/alloc.h
@@ -21,4 +21,16 @@ npy_alloc_cache_dim(npy_uintp sz);
NPY_NO_EXPORT void
npy_free_cache_dim(void * p, npy_uintp sd);
+static NPY_INLINE void
+npy_free_cache_dim_obj(PyArray_Dims dims)
+{
+ npy_free_cache_dim(dims.ptr, dims.len);
+}
+
+static NPY_INLINE void
+npy_free_cache_dim_array(PyArrayObject * arr)
+{
+ npy_free_cache_dim(PyArray_DIMS(arr), PyArray_NDIM(arr));
+}
+
#endif
diff --git a/numpy/core/src/multiarray/array_assign_array.c b/numpy/core/src/multiarray/array_assign_array.c
index 28cc7031a..d1bce8c3b 100644
--- a/numpy/core/src/multiarray/array_assign_array.c
+++ b/numpy/core/src/multiarray/array_assign_array.c
@@ -293,7 +293,8 @@ PyArray_AssignArray(PyArrayObject *dst, PyArrayObject *src,
if (((PyArray_NDIM(dst) == 1 && PyArray_NDIM(src) >= 1 &&
PyArray_STRIDES(dst)[0] *
PyArray_STRIDES(src)[PyArray_NDIM(src) - 1] < 0) ||
- PyArray_NDIM(dst) > 1) && arrays_overlap(src, dst)) {
+ PyArray_NDIM(dst) > 1 || PyArray_HASFIELDS(dst)) &&
+ arrays_overlap(src, dst)) {
PyArrayObject *tmp;
/*
diff --git a/numpy/core/src/multiarray/arrayobject.c b/numpy/core/src/multiarray/arrayobject.c
index df3890201..36d48af9f 100644
--- a/numpy/core/src/multiarray/arrayobject.c
+++ b/numpy/core/src/multiarray/arrayobject.c
@@ -53,6 +53,7 @@ maintainer email: oliphant.travis@ieee.org
#include "alloc.h"
#include "mem_overlap.h"
#include "numpyos.h"
+#include "strfuncs.h"
#include "binop_override.h"
@@ -433,93 +434,6 @@ array_dealloc(PyArrayObject *self)
Py_TYPE(self)->tp_free((PyObject *)self);
}
-/*
- * Extend string. On failure, returns NULL and leaves *strp alone.
- * XXX we do this in multiple places; time for a string library?
- */
-static char *
-extend(char **strp, Py_ssize_t n, Py_ssize_t *maxp)
-{
- char *str = *strp;
- Py_ssize_t new_cap;
-
- if (n >= *maxp - 16) {
- new_cap = *maxp * 2;
-
- if (new_cap <= *maxp) { /* overflow */
- return NULL;
- }
- str = PyArray_realloc(*strp, new_cap);
- if (str != NULL) {
- *strp = str;
- *maxp = new_cap;
- }
- }
- return str;
-}
-
-static int
-dump_data(char **string, Py_ssize_t *n, Py_ssize_t *max_n, char *data, int nd,
- npy_intp *dimensions, npy_intp *strides, PyArrayObject* self)
-{
- PyArray_Descr *descr=PyArray_DESCR(self);
- PyObject *op = NULL, *sp = NULL;
- char *ostring;
- npy_intp i, N, ret = 0;
-
-#define CHECK_MEMORY do { \
- if (extend(string, *n, max_n) == NULL) { \
- ret = -1; \
- goto end; \
- } \
- } while (0)
-
- if (nd == 0) {
- if ((op = descr->f->getitem(data, self)) == NULL) {
- return -1;
- }
- sp = PyObject_Repr(op);
- if (sp == NULL) {
- ret = -1;
- goto end;
- }
- ostring = PyString_AsString(sp);
- N = PyString_Size(sp)*sizeof(char);
- *n += N;
- CHECK_MEMORY;
- memmove(*string + (*n - N), ostring, N);
- }
- else {
- CHECK_MEMORY;
- (*string)[*n] = '[';
- *n += 1;
- for (i = 0; i < dimensions[0]; i++) {
- if (dump_data(string, n, max_n,
- data + (*strides)*i,
- nd - 1, dimensions + 1,
- strides + 1, self) < 0) {
- return -1;
- }
- CHECK_MEMORY;
- if (i < dimensions[0] - 1) {
- (*string)[*n] = ',';
- (*string)[*n+1] = ' ';
- *n += 2;
- }
- }
- CHECK_MEMORY;
- (*string)[*n] = ']';
- *n += 1;
- }
-
-#undef CHECK_MEMORY
-
-end:
- Py_XDECREF(op);
- Py_XDECREF(sp);
- return ret;
-}
-
/*NUMPY_API
* Prints the raw data of the ndarray in a form useful for debugging
* low-level C issues.
@@ -582,72 +496,6 @@ PyArray_DebugPrint(PyArrayObject *obj)
fflush(stdout);
}
-static PyObject *
-array_repr_builtin(PyArrayObject *self, int repr)
-{
- PyObject *ret;
- char *string;
- /* max_n initial value is arbitrary, dump_data will extend it */
- Py_ssize_t n = 0, max_n = PyArray_NBYTES(self) * 4 + 7;
-
- if ((string = PyArray_malloc(max_n)) == NULL) {
- return PyErr_NoMemory();
- }
-
- if (dump_data(&string, &n, &max_n, PyArray_DATA(self),
- PyArray_NDIM(self), PyArray_DIMS(self),
- PyArray_STRIDES(self), self) < 0) {
- PyArray_free(string);
- return NULL;
- }
-
- if (repr) {
- if (PyArray_ISEXTENDED(self)) {
- ret = PyUString_FromFormat("array(%s, '%c%d')",
- string,
- PyArray_DESCR(self)->type,
- PyArray_DESCR(self)->elsize);
- }
- else {
- ret = PyUString_FromFormat("array(%s, '%c')",
- string,
- PyArray_DESCR(self)->type);
- }
- }
- else {
- ret = PyUString_FromStringAndSize(string, n);
- }
-
- PyArray_free(string);
- return ret;
-}
-
-static PyObject *PyArray_StrFunction = NULL;
-static PyObject *PyArray_ReprFunction = NULL;
-
-/*NUMPY_API
- * Set the array print function to be a Python function.
- */
-NPY_NO_EXPORT void
-PyArray_SetStringFunction(PyObject *op, int repr)
-{
- if (repr) {
- /* Dispose of previous callback */
- Py_XDECREF(PyArray_ReprFunction);
- /* Add a reference to new callback */
- Py_XINCREF(op);
- /* Remember new callback */
- PyArray_ReprFunction = op;
- }
- else {
- /* Dispose of previous callback */
- Py_XDECREF(PyArray_StrFunction);
- /* Add a reference to new callback */
- Py_XINCREF(op);
- /* Remember new callback */
- PyArray_StrFunction = op;
- }
-}
/*NUMPY_API
* This function is scheduled to be removed
@@ -660,39 +508,6 @@ PyArray_SetDatetimeParseFunction(PyObject *op)
}
-static PyObject *
-array_repr(PyArrayObject *self)
-{
- PyObject *s, *arglist;
-
- if (PyArray_ReprFunction == NULL) {
- s = array_repr_builtin(self, 1);
- }
- else {
- arglist = Py_BuildValue("(O)", self);
- s = PyEval_CallObject(PyArray_ReprFunction, arglist);
- Py_DECREF(arglist);
- }
- return s;
-}
-
-static PyObject *
-array_str(PyArrayObject *self)
-{
- PyObject *s, *arglist;
-
- if (PyArray_StrFunction == NULL) {
- s = array_repr_builtin(self, 0);
- }
- else {
- arglist = Py_BuildValue("(O)", self);
- s = PyEval_CallObject(PyArray_StrFunction, arglist);
- Py_DECREF(arglist);
- }
- return s;
-}
-
-
/*NUMPY_API
*/
@@ -1345,6 +1160,7 @@ array_richcompare(PyArrayObject *self, PyObject *other, int cmp_op)
result = PyArray_GenericBinaryFunction(self, other, n_ops.less_equal);
break;
case Py_EQ:
+ RICHCMP_GIVE_UP_IF_NEEDED(obj_self, other);
/*
* The ufunc does not support void/structured types, so these
* need to be handled specifically. Only a few cases are supported.
@@ -1392,7 +1208,6 @@ array_richcompare(PyArrayObject *self, PyObject *other, int cmp_op)
return result;
}
- RICHCMP_GIVE_UP_IF_NEEDED(obj_self, other);
result = PyArray_GenericBinaryFunction(self,
(PyObject *)other,
n_ops.equal);
@@ -1418,6 +1233,7 @@ array_richcompare(PyArrayObject *self, PyObject *other, int cmp_op)
}
break;
case Py_NE:
+ RICHCMP_GIVE_UP_IF_NEEDED(obj_self, other);
/*
* The ufunc does not support void/structured types, so these
* need to be handled specifically. Only a few cases are supported.
@@ -1465,7 +1281,6 @@ array_richcompare(PyArrayObject *self, PyObject *other, int cmp_op)
return result;
}
- RICHCMP_GIVE_UP_IF_NEEDED(obj_self, other);
result = PyArray_GenericBinaryFunction(self, (PyObject *)other,
n_ops.not_equal);
if (result == NULL) {
@@ -1703,14 +1518,14 @@ array_new(PyTypeObject *subtype, PyObject *args, PyObject *kwds)
}
}
- PyDimMem_FREE(dims.ptr);
- PyDimMem_FREE(strides.ptr);
+ npy_free_cache_dim_obj(dims);
+ npy_free_cache_dim_obj(strides);
return (PyObject *)ret;
fail:
Py_XDECREF(descr);
- PyDimMem_FREE(dims.ptr);
- PyDimMem_FREE(strides.ptr);
+ npy_free_cache_dim_obj(dims);
+ npy_free_cache_dim_obj(strides);
return NULL;
}
diff --git a/numpy/core/src/multiarray/arraytypes.c.src b/numpy/core/src/multiarray/arraytypes.c.src
index b11134305..43dd101c5 100644
--- a/numpy/core/src/multiarray/arraytypes.c.src
+++ b/numpy/core/src/multiarray/arraytypes.c.src
@@ -203,7 +203,7 @@ static PyObject *
return @func1@((@type1@)t1);
}
else {
- PyArray_DESCR(ap)->f->copyswap(&t1, ip, !PyArray_ISNOTSWAPPED(ap), ap);
+ PyArray_DESCR(ap)->f->copyswap(&t1, ip, PyArray_ISBYTESWAPPED(ap), ap);
return @func1@((@type1@)t1);
}
}
@@ -239,7 +239,8 @@ static int
if (ap == NULL || PyArray_ISBEHAVED(ap))
*((@type@ *)ov)=temp;
else {
- PyArray_DESCR(ap)->f->copyswap(ov, &temp, !PyArray_ISNOTSWAPPED(ap), ap);
+ PyArray_DESCR(ap)->f->copyswap(ov, &temp, PyArray_ISBYTESWAPPED(ap),
+ ap);
}
return 0;
}
@@ -265,7 +266,7 @@ static PyObject *
else {
int size = sizeof(@type@);
- npy_bool swap = !PyArray_ISNOTSWAPPED(ap);
+ npy_bool swap = PyArray_ISBYTESWAPPED(ap);
copy_and_swap(&t1, ip, size, 1, 0, swap);
copy_and_swap(&t2, ip + size, size, 1, 0, swap);
return PyComplex_FromDoubles((double)t1, (double)t2);
@@ -325,11 +326,11 @@ static int
}
memcpy(ov, &temp, PyArray_DESCR(ap)->elsize);
- if (!PyArray_ISNOTSWAPPED(ap)) {
+ if (PyArray_ISBYTESWAPPED(ap)) {
byte_swap_vector(ov, 2, sizeof(@ftype@));
}
rsize = sizeof(@ftype@);
- copy_and_swap(ov, &temp, rsize, 2, rsize, !PyArray_ISNOTSWAPPED(ap));
+ copy_and_swap(ov, &temp, rsize, 2, rsize, PyArray_ISBYTESWAPPED(ap));
return 0;
}
@@ -422,7 +423,7 @@ LONGDOUBLE_setitem(PyObject *op, void *ov, void *vap)
}
else {
copy_and_swap(ov, &temp, PyArray_DESCR(ap)->elsize, 1, 0,
- !PyArray_ISNOTSWAPPED(ap));
+ PyArray_ISBYTESWAPPED(ap));
}
return 0;
}
@@ -439,7 +440,7 @@ UNICODE_getitem(void *ip, void *vap)
{
PyArrayObject *ap = vap;
Py_ssize_t size = PyArray_ITEMSIZE(ap);
- int swap = !PyArray_ISNOTSWAPPED(ap);
+ int swap = PyArray_ISBYTESWAPPED(ap);
int align = !PyArray_ISALIGNED(ap);
return (PyObject *)PyUnicode_FromUCS4(ip, size, swap, align);
@@ -512,7 +513,7 @@ UNICODE_setitem(PyObject *op, void *ov, void *vap)
if (PyArray_DESCR(ap)->elsize > datalen) {
memset((char*)ov + datalen, 0, (PyArray_DESCR(ap)->elsize - datalen));
}
- if (!PyArray_ISNOTSWAPPED(ap)) {
+ if (PyArray_ISBYTESWAPPED(ap)) {
byte_swap_vector(ov, PyArray_DESCR(ap)->elsize >> 2, 4);
}
Py_DECREF(temp);
@@ -699,7 +700,7 @@ VOID_getitem(void *input, void *vap)
PyArrayObject *ret;
if (!(PyArray_IntpConverter(descr->subarray->shape, &shape))) {
- PyDimMem_FREE(shape.ptr);
+ npy_free_cache_dim_obj(shape);
PyErr_SetString(PyExc_ValueError,
"invalid shape in fixed-type tuple.");
return NULL;
@@ -708,7 +709,7 @@ VOID_getitem(void *input, void *vap)
ret = (PyArrayObject *)PyArray_NewFromDescr(&PyArray_Type,
descr->subarray->base, shape.len, shape.ptr,
NULL, ip, PyArray_FLAGS(ap)&(~NPY_ARRAY_F_CONTIGUOUS), NULL);
- PyDimMem_FREE(shape.ptr);
+ npy_free_cache_dim_obj(shape);
if (!ret) {
return NULL;
}
@@ -773,71 +774,178 @@ VOID_getitem(void *input, void *vap)
NPY_NO_EXPORT int PyArray_CopyObject(PyArrayObject *, PyObject *);
+/* Given a structured PyArrayObject arr, index i and structured datatype descr,
+ * modify the dtype of arr to contain a single field corresponding to the ith
+ * field of descr, recompute the alignment flag, and return the offset of the
+ * field (in offset_p). This is useful in preparation for calling copyswap on
+ * individual fields of a numpy structure, in VOID_setitem. Compare to inner
+ * loops in VOID_getitem and VOID_nonzero.
+ *
+ * WARNING: Clobbers arr's dtype and alignment flag.
+ */
+NPY_NO_EXPORT int
+_setup_field(int i, PyArray_Descr *descr, PyArrayObject *arr,
+ npy_intp *offset_p)
+{
+ PyObject *key;
+ PyObject *tup;
+ PyArray_Descr *new;
+ npy_intp offset;
+
+ key = PyTuple_GET_ITEM(descr->names, i);
+ tup = PyDict_GetItem(descr->fields, key);
+ if (_unpack_field(tup, &new, &offset) < 0) {
+ return -1;
+ }
+
+ ((PyArrayObject_fields *)(arr))->descr = new;
+ if ((new->alignment > 1) && ((offset % new->alignment) != 0)) {
+ PyArray_CLEARFLAGS(arr, NPY_ARRAY_ALIGNED);
+ }
+ else {
+ PyArray_ENABLEFLAGS(arr, NPY_ARRAY_ALIGNED);
+ }
+
+ *offset_p = offset;
+ return 0;
+}
+
+/* Helper function for VOID_setitem, which uses the copyswap or casting code to
+ * copy structured datatypes between numpy arrays or scalars.
+ */
+static int
+_copy_and_return_void_setitem(PyArray_Descr *dstdescr, char *dstdata,
+ PyArray_Descr *srcdescr, char *srcdata){
+ PyArrayObject_fields dummy_struct;
+ PyArrayObject *dummy = (PyArrayObject *)&dummy_struct;
+ npy_int names_size = PyTuple_GET_SIZE(dstdescr->names);
+ npy_intp offset;
+ npy_int i;
+ int ret;
+
+ /* Fast path if dtypes are equal */
+ if (PyArray_EquivTypes(srcdescr, dstdescr)) {
+ for (i = 0; i < names_size; i++) {
+ /* neither line can ever fail, in principle */
+ if (_setup_field(i, dstdescr, dummy, &offset)) {
+ return -1;
+ }
+ PyArray_DESCR(dummy)->f->copyswap(dstdata + offset,
+ srcdata + offset, 0, dummy);
+ }
+ return 0;
+ }
+
+ /* Slow path */
+ ret = PyArray_CastRawArrays(1, srcdata, dstdata, 0, 0,
+ srcdescr, dstdescr, 0);
+ if (ret != NPY_SUCCEED) {
+ return -1;
+ }
+ return 0;
+}
+
static int
VOID_setitem(PyObject *op, void *input, void *vap)
{
char *ip = input;
PyArrayObject *ap = vap;
PyArray_Descr *descr;
+ int flags;
int itemsize=PyArray_DESCR(ap)->elsize;
int res;
descr = PyArray_DESCR(ap);
- if (descr->names && PyTuple_Check(op)) {
- PyObject *key;
- PyObject *names;
- int i, n;
- PyObject *tup;
- int savedflags;
-
- res = 0;
- /* get the names from the fields dictionary*/
- names = descr->names;
- n = PyTuple_GET_SIZE(names);
- if (PyTuple_GET_SIZE(op) != n) {
- PyErr_SetString(PyExc_ValueError,
- "size of tuple must match number of fields.");
- return -1;
- }
- savedflags = PyArray_FLAGS(ap);
- for (i = 0; i < n; i++) {
- PyArray_Descr *new;
- npy_intp offset;
- key = PyTuple_GET_ITEM(names, i);
- tup = PyDict_GetItem(descr->fields, key);
- if (_unpack_field(tup, &new, &offset) < 0) {
- ((PyArrayObject_fields *)ap)->descr = descr;
+ flags = PyArray_FLAGS(ap);
+ if (PyDataType_HASFIELDS(descr)) {
+ PyObject *errmsg;
+ npy_int i;
+ npy_intp offset;
+ int failed = 0;
+
+ /* If op is 0d-ndarray or numpy scalar, directly get dtype & data ptr */
+ if (PyArray_Check(op)) {
+ PyArrayObject *oparr = (PyArrayObject *)op;
+ if (PyArray_SIZE(oparr) != 1) {
+ PyErr_SetString(PyExc_ValueError,
+ "setting an array element with a sequence.");
return -1;
}
- /*
- * TODO: temporarily modifying the array like this
- * is bad coding style, should be changed.
- */
- ((PyArrayObject_fields *)ap)->descr = new;
- /* remember to update alignment flags */
- if ((new->alignment > 1)
- && ((((npy_intp)(ip+offset)) % new->alignment) != 0)) {
- PyArray_CLEARFLAGS(ap, NPY_ARRAY_ALIGNED);
+ return _copy_and_return_void_setitem(descr, ip,
+ PyArray_DESCR(oparr), PyArray_DATA(oparr));
+ }
+ else if (PyArray_IsScalar(op, Void)) {
+ PyArray_Descr *srcdescr = ((PyVoidScalarObject *)op)->descr;
+ char *srcdata = ((PyVoidScalarObject *)op)->obval;
+ return _copy_and_return_void_setitem(descr, ip, srcdescr, srcdata);
+ }
+ else if (PyTuple_Check(op)) {
+ /* if it's a tuple, copy field-by-field to ap, */
+ npy_intp names_size = PyTuple_GET_SIZE(descr->names);
+
+ if (names_size != PyTuple_Size(op)) {
+ errmsg = PyUString_FromFormat(
+ "could not assign tuple of length %zd to structure "
+ "with %" NPY_INTP_FMT " fields.",
+ PyTuple_Size(op), names_size);
+ PyErr_SetObject(PyExc_ValueError, errmsg);
+ Py_DECREF(errmsg);
+ return -1;
}
- else {
- PyArray_ENABLEFLAGS(ap, NPY_ARRAY_ALIGNED);
+
+ for (i = 0; i < names_size; i++) {
+ PyObject *item;
+
+ /* temporarily make ap have only this field */
+ if (_setup_field(i, descr, ap, &offset) == -1) {
+ failed = 1;
+ break;
+ }
+ item = PyTuple_GetItem(op, i);
+ if (item == NULL) {
+ failed = 1;
+ break;
+ }
+ /* use setitem to set this field */
+ if (PyArray_DESCR(ap)->f->setitem(item, ip + offset, ap) < 0) {
+ failed = 1;
+ break;
+ }
}
- res = new->f->setitem(PyTuple_GET_ITEM(op, i), ip+offset, ap);
- ((PyArrayObject_fields *)ap)->flags = savedflags;
- if (res < 0) {
- break;
+ }
+ else {
+ /* Otherwise must be non-void scalar. Try to assign to each field */
+ npy_intp names_size = PyTuple_GET_SIZE(descr->names);
+
+ for (i = 0; i < names_size; i++) {
+ /* temporarily make ap have only this field */
+ if (_setup_field(i, descr, ap, &offset) == -1) {
+ failed = 1;
+ break;
+ }
+ /* use setitem to set this field */
+ if (PyArray_DESCR(ap)->f->setitem(op, ip + offset, ap) < 0) {
+ failed = 1;
+ break;
+ }
}
}
- ((PyArrayObject_fields *)ap)->descr = descr;
- return res;
- }
- if (descr->subarray) {
+ /* reset clobbered attributes */
+ ((PyArrayObject_fields *)(ap))->descr = descr;
+ ((PyArrayObject_fields *)(ap))->flags = flags;
+
+ if (failed) {
+ return -1;
+ }
+ return 0;
+ }
+ else if (PyDataType_HASSUBARRAY(descr)) {
/* copy into an array of the same basic type */
PyArray_Dims shape = {NULL, -1};
PyArrayObject *ret;
if (!(PyArray_IntpConverter(descr->subarray->shape, &shape))) {
- PyDimMem_FREE(shape.ptr);
+ npy_free_cache_dim_obj(shape);
PyErr_SetString(PyExc_ValueError,
"invalid shape in fixed-type tuple.");
return -1;
@@ -846,7 +954,7 @@ VOID_setitem(PyObject *op, void *input, void *vap)
ret = (PyArrayObject *)PyArray_NewFromDescr(&PyArray_Type,
descr->subarray->base, shape.len, shape.ptr,
NULL, ip, PyArray_FLAGS(ap), NULL);
- PyDimMem_FREE(shape.ptr);
+ npy_free_cache_dim_obj(shape);
if (!ret) {
return -1;
}
@@ -861,19 +969,17 @@ VOID_setitem(PyObject *op, void *input, void *vap)
return res;
}
- /* Default is to use buffer interface to set item */
+ /*
+ * Fall through case - non-structured void datatype. This is a very
+ * undiscerning case: It interprets any object as a buffer
+ * and reads as many bytes as possible, padding with 0.
+ */
{
const void *buffer;
Py_ssize_t buflen;
- if (PyDataType_FLAGCHK(descr, NPY_ITEM_HASOBJECT)
- || PyDataType_FLAGCHK(descr, NPY_ITEM_IS_POINTER)) {
- PyErr_SetString(PyExc_ValueError,
- "Setting void-array with object members using buffer.");
- return -1;
- }
res = PyObject_AsReadBuffer(op, &buffer, &buflen);
if (res == -1) {
- goto fail;
+ return -1;
}
memcpy(ip, buffer, PyArray_MIN(buflen, itemsize));
if (itemsize > buflen) {
@@ -881,9 +987,6 @@ VOID_setitem(PyObject *op, void *input, void *vap)
}
}
return 0;
-
-fail:
- return -1;
}
static PyObject *
@@ -903,7 +1006,7 @@ DATETIME_getitem(void *ip, void *vap)
dt = *((npy_datetime *)ip);
}
else {
- PyArray_DESCR(ap)->f->copyswap(&dt, ip, !PyArray_ISNOTSWAPPED(ap), ap);
+ PyArray_DESCR(ap)->f->copyswap(&dt, ip, PyArray_ISBYTESWAPPED(ap), ap);
}
return convert_datetime_to_pyobject(dt, meta);
@@ -927,7 +1030,7 @@ TIMEDELTA_getitem(void *ip, void *vap)
td = *((npy_timedelta *)ip);
}
else {
- PyArray_DESCR(ap)->f->copyswap(&td, ip, !PyArray_ISNOTSWAPPED(ap), ap);
+ PyArray_DESCR(ap)->f->copyswap(&td, ip, PyArray_ISBYTESWAPPED(ap), ap);
}
return convert_timedelta_to_pyobject(td, meta);
@@ -958,8 +1061,8 @@ DATETIME_setitem(PyObject *op, void *ov, void *vap)
*((npy_datetime *)ov)=temp;
}
else {
- PyArray_DESCR(ap)->f->copyswap(ov, &temp,
- !PyArray_ISNOTSWAPPED(ap), ap);
+ PyArray_DESCR(ap)->f->copyswap(ov, &temp, PyArray_ISBYTESWAPPED(ap),
+ ap);
}
return 0;
@@ -990,7 +1093,8 @@ TIMEDELTA_setitem(PyObject *op, void *ov, void *vap)
*((npy_timedelta *)ov)=temp;
}
else {
- PyArray_DESCR(ap)->f->copyswap(ov, &temp, !PyArray_ISNOTSWAPPED(ap), ap);
+ PyArray_DESCR(ap)->f->copyswap(ov, &temp, PyArray_ISBYTESWAPPED(ap),
+ ap);
}
return 0;
@@ -2374,7 +2478,8 @@ static npy_bool
*/
@type@ tmp;
#if @isfloat@
- PyArray_DESCR(ap)->f->copyswap(&tmp, ip, !PyArray_ISNOTSWAPPED(ap), ap);
+ PyArray_DESCR(ap)->f->copyswap(&tmp, ip, PyArray_ISBYTESWAPPED(ap),
+ ap);
#else
memcpy(&tmp, ip, sizeof(@type@));
#endif
@@ -2397,7 +2502,8 @@ static npy_bool
}
else {
@type@ tmp;
- PyArray_DESCR(ap)->f->copyswap(&tmp, ip, !PyArray_ISNOTSWAPPED(ap), ap);
+ PyArray_DESCR(ap)->f->copyswap(&tmp, ip, PyArray_ISBYTESWAPPED(ap),
+ ap);
return (npy_bool) ((tmp.real != 0) || (tmp.imag != 0));
}
}
@@ -2459,13 +2565,13 @@ UNICODE_nonzero (npy_ucs4 *ip, PyArrayObject *ap)
npy_bool seen_null = NPY_FALSE;
char *buffer = NULL;
- if ((!PyArray_ISNOTSWAPPED(ap)) || (!PyArray_ISALIGNED(ap))) {
+ if (PyArray_ISBYTESWAPPED(ap) || !PyArray_ISALIGNED(ap)) {
buffer = PyArray_malloc(PyArray_DESCR(ap)->elsize);
if (buffer == NULL) {
return nonz;
}
memcpy(buffer, ip, PyArray_DESCR(ap)->elsize);
- if (!PyArray_ISNOTSWAPPED(ap)) {
+ if (PyArray_ISBYTESWAPPED(ap)) {
byte_swap_vector(buffer, len, 4);
}
ip = (npy_ucs4 *)buffer;
@@ -2744,6 +2850,15 @@ OBJECT_compare(PyObject **ip1, PyObject **ip2, PyArrayObject *NPY_UNUSED(ap))
* the alignment of pointers, so it doesn't need to be handled
* here.
*/
+
+ int ret;
+ /*
+ * work around gh-3879, we cannot abort an in-progress quicksort
+ * so at least do not raise again
+ */
+ if (PyErr_Occurred()) {
+ return 0;
+ }
if ((*ip1 == NULL) || (*ip2 == NULL)) {
if (ip1 == ip2) {
return 1;
@@ -2754,7 +2869,12 @@ OBJECT_compare(PyObject **ip1, PyObject **ip2, PyArrayObject *NPY_UNUSED(ap))
return 1;
}
- if (PyObject_RichCompareBool(*ip1, *ip2, Py_LT) == 1) {
+ ret = PyObject_RichCompareBool(*ip1, *ip2, Py_LT);
+ if (ret < 0) {
+ /* error occurred, avoid the next call to PyObject_RichCompareBool */
+ return 0;
+ }
+ if (ret == 1) {
return -1;
}
else if (PyObject_RichCompareBool(*ip1, *ip2, Py_GT) == 1) {
diff --git a/numpy/core/src/multiarray/cblasfuncs.c b/numpy/core/src/multiarray/cblasfuncs.c
index 3b0b2f4f6..8432ae5cf 100644
--- a/numpy/core/src/multiarray/cblasfuncs.c
+++ b/numpy/core/src/multiarray/cblasfuncs.c
@@ -456,7 +456,8 @@ cblas_matrixproduct(int typenum, PyArrayObject *ap1, PyArrayObject *ap2,
if (numbytes == 0 || l == 0) {
Py_DECREF(ap1);
Py_DECREF(ap2);
- return PyArray_Return(out_buf);
+ Py_DECREF(out_buf);
+ return PyArray_Return(result);
}
if (ap2shape == _scalar) {
diff --git a/numpy/core/src/multiarray/compiled_base.c b/numpy/core/src/multiarray/compiled_base.c
index 87a32d150..36ef1d1c4 100644
--- a/numpy/core/src/multiarray/compiled_base.c
+++ b/numpy/core/src/multiarray/compiled_base.c
@@ -10,6 +10,8 @@
#include "npy_config.h"
#include "templ_common.h" /* for npy_mul_with_overflow_intp */
#include "lowlevel_strided_loops.h" /* for npy_bswap8 */
+#include "alloc.h"
+#include "common.h"
/*
@@ -579,7 +581,7 @@ arr_interp(PyObject *NPY_UNUSED(self), PyObject *args, PyObject *kwdict)
}
else {
lval = PyFloat_AsDouble(left);
- if ((lval == -1) && PyErr_Occurred()) {
+ if (error_converting(lval)) {
goto fail;
}
}
@@ -588,7 +590,7 @@ arr_interp(PyObject *NPY_UNUSED(self), PyObject *args, PyObject *kwdict)
}
else {
rval = PyFloat_AsDouble(right);
- if ((rval == -1) && PyErr_Occurred()) {
+ if (error_converting(rval)) {
goto fail;
}
}
@@ -735,11 +737,11 @@ arr_interp_complex(PyObject *NPY_UNUSED(self), PyObject *args, PyObject *kwdict)
}
else {
lval.real = PyComplex_RealAsDouble(left);
- if ((lval.real == -1) && PyErr_Occurred()) {
+ if (error_converting(lval.real)) {
goto fail;
}
lval.imag = PyComplex_ImagAsDouble(left);
- if ((lval.imag == -1) && PyErr_Occurred()) {
+ if (error_converting(lval.imag)) {
goto fail;
}
}
@@ -749,11 +751,11 @@ arr_interp_complex(PyObject *NPY_UNUSED(self), PyObject *args, PyObject *kwdict)
}
else {
rval.real = PyComplex_RealAsDouble(right);
- if ((rval.real == -1) && PyErr_Occurred()) {
+ if (error_converting(rval.real)) {
goto fail;
}
rval.imag = PyComplex_ImagAsDouble(right);
- if ((rval.imag == -1) && PyErr_Occurred()) {
+ if (error_converting(rval.imag)) {
goto fail;
}
}
@@ -1091,7 +1093,7 @@ arr_ravel_multi_index(PyObject *self, PyObject *args, PyObject *kwds)
for (i = 0; i < dimensions.len; ++i) {
Py_XDECREF(op[i]);
}
- PyDimMem_FREE(dimensions.ptr);
+ npy_free_cache_dim_obj(dimensions);
NpyIter_Deallocate(iter);
return PyArray_Return(ret);
@@ -1100,7 +1102,7 @@ fail:
for (i = 0; i < dimensions.len; ++i) {
Py_XDECREF(op[i]);
}
- PyDimMem_FREE(dimensions.ptr);
+ npy_free_cache_dim_obj(dimensions);
NpyIter_Deallocate(iter);
return NULL;
}
@@ -1352,7 +1354,7 @@ arr_unravel_index(PyObject *self, PyObject *args, PyObject *kwds)
Py_DECREF(ret_arr);
Py_XDECREF(indices);
- PyDimMem_FREE(dimensions.ptr);
+ npy_free_cache_dim_obj(dimensions);
NpyIter_Deallocate(iter);
return ret_tuple;
@@ -1362,7 +1364,7 @@ fail:
Py_XDECREF(ret_arr);
Py_XDECREF(dtype);
Py_XDECREF(indices);
- PyDimMem_FREE(dimensions.ptr);
+ npy_free_cache_dim_obj(dimensions);
NpyIter_Deallocate(iter);
return NULL;
}
diff --git a/numpy/core/src/multiarray/conversion_utils.c b/numpy/core/src/multiarray/conversion_utils.c
index 3689bbada..2bb1cbfc1 100644
--- a/numpy/core/src/multiarray/conversion_utils.c
+++ b/numpy/core/src/multiarray/conversion_utils.c
@@ -15,6 +15,7 @@
#include "arraytypes.h"
#include "conversion_utils.h"
+#include "alloc.h"
static int
PyArray_PyIntAsInt_ErrMsg(PyObject *o, const char * msg) NPY_GCC_NONNULL(2);
@@ -119,7 +120,7 @@ PyArray_IntpConverter(PyObject *obj, PyArray_Dims *seq)
return NPY_FAIL;
}
if (len > 0) {
- seq->ptr = PyDimMem_NEW(len);
+ seq->ptr = npy_alloc_cache_dim(len);
if (seq->ptr == NULL) {
PyErr_NoMemory();
return NPY_FAIL;
@@ -128,7 +129,7 @@ PyArray_IntpConverter(PyObject *obj, PyArray_Dims *seq)
seq->len = len;
nd = PyArray_IntpFromIndexSequence(obj, (npy_intp *)seq->ptr, len);
if (nd == -1 || nd != len) {
- PyDimMem_FREE(seq->ptr);
+ npy_free_cache_dim_obj(*seq);
seq->ptr = NULL;
return NPY_FAIL;
}
diff --git a/numpy/core/src/multiarray/convert.c b/numpy/core/src/multiarray/convert.c
index 1a87234ce..212da892d 100644
--- a/numpy/core/src/multiarray/convert.c
+++ b/numpy/core/src/multiarray/convert.c
@@ -13,6 +13,7 @@
#include "npy_pycompat.h"
+#include "common.h"
#include "arrayobject.h"
#include "ctors.h"
#include "mapping.h"
@@ -411,7 +412,7 @@ PyArray_FillWithScalar(PyArrayObject *arr, PyObject *obj)
else if (PyLong_Check(obj) || PyInt_Check(obj)) {
/* Try long long before unsigned long long */
npy_longlong ll_v = PyLong_AsLongLong(obj);
- if (ll_v == -1 && PyErr_Occurred()) {
+ if (error_converting(ll_v)) {
/* Long long failed, try unsigned long long */
npy_ulonglong ull_v;
PyErr_Clear();
@@ -441,7 +442,7 @@ PyArray_FillWithScalar(PyArrayObject *arr, PyObject *obj)
/* Python float */
else if (PyFloat_Check(obj)) {
npy_double v = PyFloat_AsDouble(obj);
- if (v == -1 && PyErr_Occurred()) {
+ if (error_converting(v)) {
return -1;
}
value = (char *)value_buffer;
@@ -457,11 +458,11 @@ PyArray_FillWithScalar(PyArrayObject *arr, PyObject *obj)
npy_double re, im;
re = PyComplex_RealAsDouble(obj);
- if (re == -1 && PyErr_Occurred()) {
+ if (error_converting(re)) {
return -1;
}
im = PyComplex_ImagAsDouble(obj);
- if (im == -1 && PyErr_Occurred()) {
+ if (error_converting(im)) {
return -1;
}
value = (char *)value_buffer;
diff --git a/numpy/core/src/multiarray/ctors.c b/numpy/core/src/multiarray/ctors.c
index 7eae0beaa..c9b3125ae 100644
--- a/numpy/core/src/multiarray/ctors.c
+++ b/numpy/core/src/multiarray/ctors.c
@@ -1529,12 +1529,6 @@ PyArray_GetArrayParamsFromObject(PyObject *op,
if (!writeable) {
tmp = PyArray_FromArrayAttr(op, requested_dtype, context);
if (tmp != Py_NotImplemented) {
- if (writeable
- && PyArray_FailUnlessWriteable((PyArrayObject *)tmp,
- "array interface object") < 0) {
- Py_DECREF(tmp);
- return -1;
- }
*out_arr = (PyArrayObject *)tmp;
return (*out_arr) == NULL ? -1 : 0;
}
@@ -1860,7 +1854,7 @@ PyArray_CheckFromAny(PyObject *op, PyArray_Descr *descr, int min_depth,
PyObject *obj;
if (requires & NPY_ARRAY_NOTSWAPPED) {
if (!descr && PyArray_Check(op) &&
- !PyArray_ISNBO(PyArray_DESCR((PyArrayObject *)op)->byteorder)) {
+ PyArray_ISBYTESWAPPED((PyArrayObject* )op)) {
descr = PyArray_DescrNew(PyArray_DESCR((PyArrayObject *)op));
}
else if (descr && !PyArray_ISNBO(descr->byteorder)) {
@@ -2896,7 +2890,7 @@ PyArray_Empty(int nd, npy_intp *dims, PyArray_Descr *type, int is_f_order)
/*
* PyArray_NewFromDescr steals a ref,
- * but we need to look at type later.
+ * but we need to look at type later.
* */
Py_INCREF(type);
@@ -3010,7 +3004,7 @@ PyArray_Arange(double start, double stop, double step, int type_num)
}
/*
- * the formula is len = (intp) ceil((start - stop) / step);
+ * the formula is len = (intp) ceil((stop - start) / step);
*/
static npy_intp
_calc_length(PyObject *start, PyObject *stop, PyObject *step, PyObject **next, int cmplx)
diff --git a/numpy/core/src/multiarray/datetime.c b/numpy/core/src/multiarray/datetime.c
index 3cf9a2bd5..93babe8bd 100644
--- a/numpy/core/src/multiarray/datetime.c
+++ b/numpy/core/src/multiarray/datetime.c
@@ -20,6 +20,7 @@
#include "npy_config.h"
#include "npy_pycompat.h"
+#include "common.h"
#include "numpy/arrayscalars.h"
#include "methods.h"
#include "_datetime.h"
@@ -1718,8 +1719,6 @@ datetime_type_promotion(PyArray_Descr *type1, PyArray_Descr *type2)
* a date time unit enum value. The 'metastr' parameter
* is used for error messages, and may be NULL.
*
- * Generic units have no representation as a string in this form.
- *
* Returns 0 on success, -1 on failure.
*/
NPY_NO_EXPORT NPY_DATETIMEUNIT
@@ -1761,6 +1760,9 @@ parse_datetime_unit_from_string(char *str, Py_ssize_t len, char *metastr)
return NPY_FR_as;
}
}
+ else if (len == 7 && !strncmp(str, "generic", 7)) {
+ return NPY_FR_GENERIC;
+ }
/* If nothing matched, it's an error */
if (metastr == NULL) {
@@ -1802,7 +1804,8 @@ convert_datetime_metadata_to_tuple(PyArray_DatetimeMetaData *meta)
*/
NPY_NO_EXPORT int
convert_datetime_metadata_tuple_to_datetime_metadata(PyObject *tuple,
- PyArray_DatetimeMetaData *out_meta)
+ PyArray_DatetimeMetaData *out_meta,
+ npy_bool from_pickle)
{
char *basestr = NULL;
Py_ssize_t len = 0, tuple_size;
@@ -1853,13 +1856,62 @@ convert_datetime_metadata_tuple_to_datetime_metadata(PyObject *tuple,
/* Convert the values to longs */
out_meta->num = PyInt_AsLong(PyTuple_GET_ITEM(tuple, 1));
- if (out_meta->num == -1 && PyErr_Occurred()) {
+ if (error_converting(out_meta->num)) {
return -1;
}
- if (tuple_size == 4) {
+ /*
+ * The event metadata was removed way back in numpy 1.7 (cb4545), but was
+ * not deprecated at the time.
+ */
+
+ /* (unit, num, event) */
+ if (tuple_size == 3) {
+ /* Numpy 1.14, 2017-08-11 */
+ if (DEPRECATE(
+ "When passing a 3-tuple as (unit, num, event), the event "
+ "is ignored (since 1.7) - use (unit, num) instead") < 0) {
+ return -1;
+ }
+ }
+ /* (unit, num, den, event) */
+ else if (tuple_size == 4) {
+ PyObject *event = PyTuple_GET_ITEM(tuple, 3);
+ if (from_pickle) {
+ /* if (event == 1) */
+ PyObject *one = PyLong_FromLong(1);
+ int equal_one;
+ if (one == NULL) {
+ return -1;
+ }
+ equal_one = PyObject_RichCompareBool(event, one, Py_EQ);
+ if (equal_one == -1) {
+ return -1;
+ }
+
+ /* if the event data is not 1, it had semantics different to how
+ * datetime types now behave, which are no longer respected.
+ */
+ if (!equal_one) {
+ if (PyErr_WarnEx(PyExc_UserWarning,
+ "Loaded pickle file contains non-default event data "
+ "for a datetime type, which has been ignored since 1.7",
+ 1) < 0) {
+ return -1;
+ }
+ }
+ }
+ else if (event != Py_None) {
+ /* Numpy 1.14, 2017-08-11 */
+ if (DEPRECATE(
+ "When passing a 4-tuple as (unit, num, den, event), the "
+ "event argument is ignored (since 1.7), so should be None"
+ ) < 0) {
+ return -1;
+ }
+ }
den = PyInt_AsLong(PyTuple_GET_ITEM(tuple, 2));
- if (den == -1 && PyErr_Occurred()) {
+ if (error_converting(den)) {
return -1;
}
}
@@ -1895,8 +1947,8 @@ convert_pyobject_to_datetime_metadata(PyObject *obj,
Py_ssize_t len = 0;
if (PyTuple_Check(obj)) {
- return convert_datetime_metadata_tuple_to_datetime_metadata(obj,
- out_meta);
+ return convert_datetime_metadata_tuple_to_datetime_metadata(
+ obj, out_meta, NPY_FALSE);
}
/* Get an ASCII string */
@@ -2126,7 +2178,7 @@ convert_pydatetime_to_datetimestruct(PyObject *obj, npy_datetimestruct *out,
return -1;
}
out->year = PyInt_AsLong(tmp);
- if (out->year == -1 && PyErr_Occurred()) {
+ if (error_converting(out->year)) {
Py_DECREF(tmp);
return -1;
}
@@ -2138,7 +2190,7 @@ convert_pydatetime_to_datetimestruct(PyObject *obj, npy_datetimestruct *out,
return -1;
}
out->month = PyInt_AsLong(tmp);
- if (out->month == -1 && PyErr_Occurred()) {
+ if (error_converting(out->month)) {
Py_DECREF(tmp);
return -1;
}
@@ -2150,7 +2202,7 @@ convert_pydatetime_to_datetimestruct(PyObject *obj, npy_datetimestruct *out,
return -1;
}
out->day = PyInt_AsLong(tmp);
- if (out->day == -1 && PyErr_Occurred()) {
+ if (error_converting(out->day)) {
Py_DECREF(tmp);
return -1;
}
@@ -2184,7 +2236,7 @@ convert_pydatetime_to_datetimestruct(PyObject *obj, npy_datetimestruct *out,
return -1;
}
out->hour = PyInt_AsLong(tmp);
- if (out->hour == -1 && PyErr_Occurred()) {
+ if (error_converting(out->hour)) {
Py_DECREF(tmp);
return -1;
}
@@ -2196,7 +2248,7 @@ convert_pydatetime_to_datetimestruct(PyObject *obj, npy_datetimestruct *out,
return -1;
}
out->min = PyInt_AsLong(tmp);
- if (out->min == -1 && PyErr_Occurred()) {
+ if (error_converting(out->min)) {
Py_DECREF(tmp);
return -1;
}
@@ -2208,7 +2260,7 @@ convert_pydatetime_to_datetimestruct(PyObject *obj, npy_datetimestruct *out,
return -1;
}
out->sec = PyInt_AsLong(tmp);
- if (out->sec == -1 && PyErr_Occurred()) {
+ if (error_converting(out->sec)) {
Py_DECREF(tmp);
return -1;
}
@@ -2220,7 +2272,7 @@ convert_pydatetime_to_datetimestruct(PyObject *obj, npy_datetimestruct *out,
return -1;
}
out->us = PyInt_AsLong(tmp);
- if (out->us == -1 && PyErr_Occurred()) {
+ if (error_converting(out->us)) {
Py_DECREF(tmp);
return -1;
}
@@ -2271,7 +2323,7 @@ convert_pydatetime_to_datetimestruct(PyObject *obj, npy_datetimestruct *out,
return -1;
}
seconds_offset = PyInt_AsLong(tmp);
- if (seconds_offset == -1 && PyErr_Occurred()) {
+ if (error_converting(seconds_offset)) {
Py_DECREF(tmp);
return -1;
}
@@ -2456,7 +2508,7 @@ convert_pyobject_to_datetime(PyArray_DatetimeMetaData *meta, PyObject *obj,
}
PyArray_DESCR(arr)->f->copyswap(&dt,
PyArray_DATA(arr),
- !PyArray_ISNOTSWAPPED(arr),
+ PyArray_ISBYTESWAPPED(arr),
obj);
/* Copy the value directly if units weren't specified */
@@ -2654,7 +2706,7 @@ convert_pyobject_to_timedelta(PyArray_DatetimeMetaData *meta, PyObject *obj,
}
PyArray_DESCR(arr)->f->copyswap(&dt,
PyArray_DATA(arr),
- !PyArray_ISNOTSWAPPED(arr),
+ PyArray_ISBYTESWAPPED(arr),
obj);
/* Copy the value directly if units weren't specified */
@@ -2694,7 +2746,7 @@ convert_pyobject_to_timedelta(PyArray_DatetimeMetaData *meta, PyObject *obj,
return -1;
}
days = PyLong_AsLongLong(tmp);
- if (days == -1 && PyErr_Occurred()) {
+ if (error_converting(days)) {
Py_DECREF(tmp);
return -1;
}
@@ -2706,7 +2758,7 @@ convert_pyobject_to_timedelta(PyArray_DatetimeMetaData *meta, PyObject *obj,
return -1;
}
seconds = PyInt_AsLong(tmp);
- if (seconds == -1 && PyErr_Occurred()) {
+ if (error_converting(seconds)) {
Py_DECREF(tmp);
return -1;
}
@@ -2718,7 +2770,7 @@ convert_pyobject_to_timedelta(PyArray_DatetimeMetaData *meta, PyObject *obj,
return -1;
}
useconds = PyInt_AsLong(tmp);
- if (useconds == -1 && PyErr_Occurred()) {
+ if (error_converting(useconds)) {
Py_DECREF(tmp);
return -1;
}
diff --git a/numpy/core/src/multiarray/datetime_busdaycal.c b/numpy/core/src/multiarray/datetime_busdaycal.c
index 7eaf0cd7a..7a26868e8 100644
--- a/numpy/core/src/multiarray/datetime_busdaycal.c
+++ b/numpy/core/src/multiarray/datetime_busdaycal.c
@@ -18,6 +18,7 @@
#include "npy_config.h"
#include "npy_pycompat.h"
+#include "common.h"
#include "numpy/arrayscalars.h"
#include "lowlevel_strided_loops.h"
#include "_datetime.h"
@@ -168,7 +169,7 @@ invalid_weekmask_string:
}
val = PyInt_AsLong(f);
- if (val == -1 && PyErr_Occurred()) {
+ if (error_converting(val)) {
Py_DECREF(f);
Py_DECREF(obj);
return 0;
diff --git a/numpy/core/src/multiarray/descriptor.c b/numpy/core/src/multiarray/descriptor.c
index 12735513c..1ae6e34a6 100644
--- a/numpy/core/src/multiarray/descriptor.c
+++ b/numpy/core/src/multiarray/descriptor.c
@@ -16,6 +16,7 @@
#include "_datetime.h"
#include "common.h"
#include "descriptor.h"
+#include "alloc.h"
/*
* offset: A starting offset.
@@ -306,7 +307,7 @@ _convert_from_tuple(PyObject *obj)
int i;
if (!(PyArray_IntpConverter(val, &shape)) || (shape.len > NPY_MAXDIMS)) {
- PyDimMem_FREE(shape.ptr);
+ npy_free_cache_dim_obj(shape);
PyErr_SetString(PyExc_ValueError,
"invalid shape in fixed-type tuple.");
goto fail;
@@ -320,12 +321,12 @@ _convert_from_tuple(PyObject *obj)
&& PyNumber_Check(val))
|| (shape.len == 0
&& PyTuple_Check(val))) {
- PyDimMem_FREE(shape.ptr);
+ npy_free_cache_dim_obj(shape);
return type;
}
newdescr = PyArray_DescrNewFromType(NPY_VOID);
if (newdescr == NULL) {
- PyDimMem_FREE(shape.ptr);
+ npy_free_cache_dim_obj(shape);
goto fail;
}
@@ -335,14 +336,14 @@ _convert_from_tuple(PyObject *obj)
PyErr_SetString(PyExc_ValueError,
"invalid shape in fixed-type tuple: "
"dimension smaller then zero.");
- PyDimMem_FREE(shape.ptr);
+ npy_free_cache_dim_obj(shape);
goto fail;
}
if (shape.ptr[i] > NPY_MAX_INT) {
PyErr_SetString(PyExc_ValueError,
"invalid shape in fixed-type tuple: "
"dimension does not fit into a C int.");
- PyDimMem_FREE(shape.ptr);
+ npy_free_cache_dim_obj(shape);
goto fail;
}
}
@@ -351,12 +352,12 @@ _convert_from_tuple(PyObject *obj)
PyErr_SetString(PyExc_ValueError,
"invalid shape in fixed-type tuple: dtype size in "
"bytes must fit into a C int.");
- PyDimMem_FREE(shape.ptr);
+ npy_free_cache_dim_obj(shape);
goto fail;
}
newdescr->elsize = type->elsize * items;
if (newdescr->elsize == -1) {
- PyDimMem_FREE(shape.ptr);
+ npy_free_cache_dim_obj(shape);
goto fail;
}
@@ -381,7 +382,7 @@ _convert_from_tuple(PyObject *obj)
*/
newdescr->subarray->shape = PyTuple_New(shape.len);
if (newdescr->subarray->shape == NULL) {
- PyDimMem_FREE(shape.ptr);
+ npy_free_cache_dim_obj(shape);
goto fail;
}
for (i=0; i < shape.len; i++) {
@@ -391,12 +392,12 @@ _convert_from_tuple(PyObject *obj)
if (PyTuple_GET_ITEM(newdescr->subarray->shape, i) == NULL) {
Py_DECREF(newdescr->subarray->shape);
newdescr->subarray->shape = NULL;
- PyDimMem_FREE(shape.ptr);
+ npy_free_cache_dim_obj(shape);
goto fail;
}
}
- PyDimMem_FREE(shape.ptr);
+ npy_free_cache_dim_obj(shape);
type = newdescr;
}
return type;
@@ -1130,7 +1131,7 @@ _convert_from_dict(PyObject *obj, int align)
goto fail;
}
offset = PyArray_PyIntAsInt(off);
- if (offset == -1 && PyErr_Occurred()) {
+ if (error_converting(offset)) {
Py_DECREF(off);
Py_DECREF(tup);
Py_DECREF(ind);
@@ -1269,7 +1270,7 @@ _convert_from_dict(PyObject *obj, int align)
PyErr_Clear();
} else {
itemsize = (int)PyArray_PyIntAsInt(tmp);
- if (itemsize == -1 && PyErr_Occurred()) {
+ if (error_converting(itemsize)) {
Py_DECREF(new);
return NULL;
}
@@ -2886,7 +2887,8 @@ arraydescr_setstate(PyArray_Descr *self, PyObject *args)
if (convert_datetime_metadata_tuple_to_datetime_metadata(
PyTuple_GET_ITEM(metadata, 1),
- &temp_dt_data) < 0) {
+ &temp_dt_data,
+ NPY_TRUE) < 0) {
return NULL;
}
@@ -3118,7 +3120,7 @@ static PyMethodDef arraydescr_methods[] = {
*
* Returns 1 if it has a simple layout, 0 otherwise.
*/
-static int
+NPY_NO_EXPORT int
is_dtype_struct_simple_unaligned_layout(PyArray_Descr *dtype)
{
PyObject *names, *fields, *key, *tup, *title;
diff --git a/numpy/core/src/multiarray/descriptor.h b/numpy/core/src/multiarray/descriptor.h
index ff1fc980a..f95041195 100644
--- a/numpy/core/src/multiarray/descriptor.h
+++ b/numpy/core/src/multiarray/descriptor.h
@@ -10,6 +10,10 @@ array_set_typeDict(PyObject *NPY_UNUSED(ignored), PyObject *args);
NPY_NO_EXPORT PyArray_Descr *
_arraydescr_fromobj(PyObject *obj);
+
+NPY_NO_EXPORT int
+is_dtype_struct_simple_unaligned_layout(PyArray_Descr *dtype);
+
/*
* Creates a string repr of the dtype, excluding the 'dtype()' part
* surrounding the object. This object may be a string, a list, or
diff --git a/numpy/core/src/multiarray/dtype_transfer.c b/numpy/core/src/multiarray/dtype_transfer.c
index 58739b831..9c27255aa 100644
--- a/numpy/core/src/multiarray/dtype_transfer.c
+++ b/numpy/core/src/multiarray/dtype_transfer.c
@@ -25,9 +25,11 @@
#include "ctors.h"
#include "_datetime.h"
#include "datetime_strings.h"
+#include "descriptor.h"
#include "shape.h"
#include "lowlevel_strided_loops.h"
+#include "alloc.h"
#define NPY_LOWLEVEL_BUFFER_BLOCKSIZE 128
@@ -2342,7 +2344,7 @@ get_subarray_transfer_function(int aligned,
if (PyDataType_HASSUBARRAY(dst_dtype)) {
if (!(PyArray_IntpConverter(dst_dtype->subarray->shape,
&dst_shape))) {
- PyDimMem_FREE(src_shape.ptr);
+ npy_free_cache_dim_obj(src_shape);
PyErr_SetString(PyExc_ValueError,
"invalid subarray shape");
return NPY_FAIL;
@@ -2355,8 +2357,8 @@ get_subarray_transfer_function(int aligned,
* Just a straight one-element copy.
*/
if (dst_size == 1 && src_size == 1) {
- PyDimMem_FREE(src_shape.ptr);
- PyDimMem_FREE(dst_shape.ptr);
+ npy_free_cache_dim_obj(src_shape);
+ npy_free_cache_dim_obj(dst_shape);
return PyArray_GetDTypeTransferFunction(aligned,
src_stride, dst_stride,
@@ -2367,8 +2369,8 @@ get_subarray_transfer_function(int aligned,
}
/* Copy the src value to all the dst values */
else if (src_size == 1) {
- PyDimMem_FREE(src_shape.ptr);
- PyDimMem_FREE(dst_shape.ptr);
+ npy_free_cache_dim_obj(src_shape);
+ npy_free_cache_dim_obj(dst_shape);
return get_one_to_n_transfer_function(aligned,
src_stride, dst_stride,
@@ -2382,8 +2384,8 @@ get_subarray_transfer_function(int aligned,
else if (src_shape.len == dst_shape.len &&
PyArray_CompareLists(src_shape.ptr, dst_shape.ptr,
src_shape.len)) {
- PyDimMem_FREE(src_shape.ptr);
- PyDimMem_FREE(dst_shape.ptr);
+ npy_free_cache_dim_obj(src_shape);
+ npy_free_cache_dim_obj(dst_shape);
return get_n_to_n_transfer_function(aligned,
src_stride, dst_stride,
@@ -2407,8 +2409,8 @@ get_subarray_transfer_function(int aligned,
out_stransfer, out_transferdata,
out_needs_api);
- PyDimMem_FREE(src_shape.ptr);
- PyDimMem_FREE(dst_shape.ptr);
+ npy_free_cache_dim_obj(src_shape);
+ npy_free_cache_dim_obj(dst_shape);
return ret;
}
}
@@ -2520,7 +2522,7 @@ _strided_to_strided_field_transfer(char *dst, npy_intp dst_stride,
/*
* Handles fields transfer. To call this, at least one of the dtypes
- * must have fields
+ * must have fields. Does not take care of object<->structure conversion
*/
static int
get_fields_transfer_function(int aligned,
@@ -2531,22 +2533,26 @@ get_fields_transfer_function(int aligned,
NpyAuxData **out_transferdata,
int *out_needs_api)
{
- PyObject *names, *key, *tup, *title;
+ PyObject *key, *tup, *title;
PyArray_Descr *src_fld_dtype, *dst_fld_dtype;
- npy_int i, names_size, field_count, structsize;
+ npy_int i, field_count, structsize;
int src_offset, dst_offset;
_field_transfer_data *data;
_single_field_transfer *fields;
+ int failed = 0;
+
+ /*
+ * There are three cases to take care of: 1. src is non-structured,
+ * 2. dst is non-structured, or 3. both are structured.
+ */
- /* Copy the src value to all the fields of dst */
+ /* 1. src is non-structured. Copy the src value to all the fields of dst */
if (!PyDataType_HASFIELDS(src_dtype)) {
- names = dst_dtype->names;
- names_size = PyTuple_GET_SIZE(dst_dtype->names);
+ field_count = PyTuple_GET_SIZE(dst_dtype->names);
- field_count = names_size;
+ /* Allocate the field-data structure and populate it */
structsize = sizeof(_field_transfer_data) +
(field_count + 1) * sizeof(_single_field_transfer);
- /* Allocate the data and populate it */
data = (_field_transfer_data *)PyArray_malloc(structsize);
if (data == NULL) {
PyErr_NoMemory();
@@ -2556,8 +2562,8 @@ get_fields_transfer_function(int aligned,
data->base.clone = &_field_transfer_data_clone;
fields = &data->fields;
- for (i = 0; i < names_size; ++i) {
- key = PyTuple_GET_ITEM(names, i);
+ for (i = 0; i < field_count; ++i) {
+ key = PyTuple_GET_ITEM(dst_dtype->names, i);
tup = PyDict_GetItem(dst_dtype->fields, key);
if (!PyArg_ParseTuple(tup, "Oi|O", &dst_fld_dtype,
&dst_offset, &title)) {
@@ -2583,7 +2589,7 @@ get_fields_transfer_function(int aligned,
}
/*
- * If the references should be removed from src, add
+ * If references should be decrefd in src, add
* another transfer function to do that.
*/
if (move_references && PyDataType_REFCHK(src_dtype)) {
@@ -2611,24 +2617,19 @@ get_fields_transfer_function(int aligned,
return NPY_SUCCEED;
}
- /* Copy the value of the first field to dst */
- else if (!PyDataType_HASFIELDS(dst_dtype)) {
- names = src_dtype->names;
- names_size = PyTuple_GET_SIZE(src_dtype->names);
- /*
- * If DECREF is needed on source fields, may need
- * to process all the fields
- */
- if (move_references && PyDataType_REFCHK(src_dtype)) {
- field_count = names_size + 1;
- }
- else {
- field_count = 1;
+ /* 2. dst is non-structured. Allow transfer from single-field src to dst */
+ if (!PyDataType_HASFIELDS(dst_dtype)) {
+ if (PyTuple_GET_SIZE(src_dtype->names) != 1) {
+ PyErr_SetString(PyExc_ValueError,
+ "Can't cast from structure to non-structure, except if the "
+ "structure only has a single field.");
+ return NPY_FAIL;
}
+
+ /* Allocate the field-data structure and populate it */
structsize = sizeof(_field_transfer_data) +
- field_count * sizeof(_single_field_transfer);
- /* Allocate the data and populate it */
+ 1 * sizeof(_single_field_transfer);
data = (_field_transfer_data *)PyArray_malloc(structsize);
if (data == NULL) {
PyErr_NoMemory();
@@ -2638,286 +2639,102 @@ get_fields_transfer_function(int aligned,
data->base.clone = &_field_transfer_data_clone;
fields = &data->fields;
- key = PyTuple_GET_ITEM(names, 0);
+ key = PyTuple_GET_ITEM(src_dtype->names, 0);
tup = PyDict_GetItem(src_dtype->fields, key);
- if (!PyArg_ParseTuple(tup, "Oi|O", &src_fld_dtype,
- &src_offset, &title)) {
- PyArray_free(data);
+ if (!PyArg_ParseTuple(tup, "Oi|O",
+ &src_fld_dtype, &src_offset, &title)) {
return NPY_FAIL;
}
- field_count = 0;
- /*
- * Special case bool type, the existence of fields implies True
- *
- * TODO: Perhaps a better behavior would be to combine all the
- * input fields with an OR? The same would apply to subarrays.
- */
- if (dst_dtype->type_num == NPY_BOOL) {
- if (get_bool_setdstone_transfer_function(dst_stride,
- &fields[field_count].stransfer,
- &fields[field_count].data,
- out_needs_api) != NPY_SUCCEED) {
- PyArray_free(data);
- return NPY_FAIL;
- }
- fields[field_count].src_offset = 0;
- fields[field_count].dst_offset = 0;
- fields[field_count].src_itemsize = 0;
- field_count++;
-
- /* If the src field has references, may need to clear them */
- if (move_references && PyDataType_REFCHK(src_fld_dtype)) {
- if (get_decsrcref_transfer_function(0,
- src_stride,
- src_fld_dtype,
- &fields[field_count].stransfer,
- &fields[field_count].data,
- out_needs_api) != NPY_SUCCEED) {
- NPY_AUXDATA_FREE(fields[0].data);
- PyArray_free(data);
- return NPY_FAIL;
- }
- fields[field_count].src_offset = src_offset;
- fields[field_count].dst_offset = 0;
- fields[field_count].src_itemsize = src_fld_dtype->elsize;
- field_count++;
- }
- }
- /* Transfer the first field to the output */
- else {
- if (PyArray_GetDTypeTransferFunction(0,
- src_stride, dst_stride,
- src_fld_dtype, dst_dtype,
- move_references,
- &fields[field_count].stransfer,
- &fields[field_count].data,
- out_needs_api) != NPY_SUCCEED) {
- PyArray_free(data);
- return NPY_FAIL;
- }
- fields[field_count].src_offset = src_offset;
- fields[field_count].dst_offset = 0;
- fields[field_count].src_itemsize = src_fld_dtype->elsize;
- field_count++;
- }
- /*
- * If the references should be removed from src, add
- * more transfer functions to decrement the references
- * for all the other fields.
- */
- if (move_references && PyDataType_REFCHK(src_dtype)) {
- for (i = 1; i < names_size; ++i) {
- key = PyTuple_GET_ITEM(names, i);
- tup = PyDict_GetItem(src_dtype->fields, key);
- if (!PyArg_ParseTuple(tup, "Oi|O", &src_fld_dtype,
- &src_offset, &title)) {
- return NPY_FAIL;
- }
- if (PyDataType_REFCHK(src_fld_dtype)) {
- if (get_decsrcref_transfer_function(0,
- src_stride,
- src_fld_dtype,
- &fields[field_count].stransfer,
- &fields[field_count].data,
- out_needs_api) != NPY_SUCCEED) {
- for (i = field_count-1; i >= 0; --i) {
- NPY_AUXDATA_FREE(fields[i].data);
- }
- PyArray_free(data);
- return NPY_FAIL;
- }
- fields[field_count].src_offset = src_offset;
- fields[field_count].dst_offset = 0;
- fields[field_count].src_itemsize = src_fld_dtype->elsize;
- field_count++;
- }
- }
+ if (PyArray_GetDTypeTransferFunction(0,
+ src_stride, dst_stride,
+ src_fld_dtype, dst_dtype,
+ move_references,
+ &fields[0].stransfer,
+ &fields[0].data,
+ out_needs_api) != NPY_SUCCEED) {
+ PyArray_free(data);
+ return NPY_FAIL;
}
+ fields[0].src_offset = src_offset;
+ fields[0].dst_offset = 0;
+ fields[0].src_itemsize = src_fld_dtype->elsize;
- data->field_count = field_count;
+ data->field_count = 1;
*out_stransfer = &_strided_to_strided_field_transfer;
*out_transferdata = (NpyAuxData *)data;
return NPY_SUCCEED;
}
- /* Match up the fields to copy */
- else {
- /* Keeps track of the names we already used */
- PyObject *used_names_dict = NULL;
- int cmpval;
-
- const char *msg =
- "Assignment between structured arrays with different field names "
- "will change in numpy 1.14.\n\n"
- "Previously fields in the dst would be set to the value of the "
- "identically-named field in the src. In numpy 1.14 fields will "
- "instead be assigned 'by position': The Nth field of the dst "
- "will be set to the Nth field of the src array.\n\n"
- "See the release notes for details";
- /*
- * 2016-09-19, 1.12
- * Warn if the field names of the dst and src are not
- * identical, since then behavior will change in 1.13.
- */
- cmpval = PyObject_RichCompareBool(src_dtype->names,
- dst_dtype->names, Py_EQ);
- if (PyErr_Occurred()) {
- return NPY_FAIL;
- }
- if (cmpval != 1) {
- if (DEPRECATE_FUTUREWARNING(msg) < 0) {
- return NPY_FAIL;
- }
- }
- names = dst_dtype->names;
- names_size = PyTuple_GET_SIZE(dst_dtype->names);
+ /* 3. Otherwise both src and dst are structured arrays */
+ field_count = PyTuple_GET_SIZE(dst_dtype->names);
- /*
- * If DECREF is needed on source fields, will need
- * to also go through its fields.
- */
- if (move_references && PyDataType_REFCHK(src_dtype)) {
- field_count = names_size + PyTuple_GET_SIZE(src_dtype->names);
- used_names_dict = PyDict_New();
- if (used_names_dict == NULL) {
- return NPY_FAIL;
- }
- }
- else {
- field_count = names_size;
- }
- structsize = sizeof(_field_transfer_data) +
- field_count * sizeof(_single_field_transfer);
- /* Allocate the data and populate it */
- data = (_field_transfer_data *)PyArray_malloc(structsize);
- if (data == NULL) {
- PyErr_NoMemory();
- Py_XDECREF(used_names_dict);
- return NPY_FAIL;
- }
- data->base.free = &_field_transfer_data_free;
- data->base.clone = &_field_transfer_data_clone;
- fields = &data->fields;
+ /* Match up the fields to copy (field-by-field transfer) */
+ if (PyTuple_GET_SIZE(src_dtype->names) != field_count) {
+ PyErr_SetString(PyExc_ValueError, "structures must have the same size");
+ return NPY_FAIL;
+ }
- for (i = 0; i < names_size; ++i) {
- key = PyTuple_GET_ITEM(names, i);
- tup = PyDict_GetItem(dst_dtype->fields, key);
- if (!PyArg_ParseTuple(tup, "Oi|O", &dst_fld_dtype,
- &dst_offset, &title)) {
- for (i = i-1; i >= 0; --i) {
- NPY_AUXDATA_FREE(fields[i].data);
- }
- PyArray_free(data);
- Py_XDECREF(used_names_dict);
- return NPY_FAIL;
- }
- tup = PyDict_GetItem(src_dtype->fields, key);
- if (tup != NULL) {
- if (!PyArg_ParseTuple(tup, "Oi|O", &src_fld_dtype,
- &src_offset, &title)) {
- for (i = i-1; i >= 0; --i) {
- NPY_AUXDATA_FREE(fields[i].data);
- }
- PyArray_free(data);
- Py_XDECREF(used_names_dict);
- return NPY_FAIL;
- }
- if (PyArray_GetDTypeTransferFunction(0,
- src_stride, dst_stride,
- src_fld_dtype, dst_fld_dtype,
- move_references,
- &fields[i].stransfer,
- &fields[i].data,
- out_needs_api) != NPY_SUCCEED) {
- for (i = i-1; i >= 0; --i) {
- NPY_AUXDATA_FREE(fields[i].data);
- }
- PyArray_free(data);
- Py_XDECREF(used_names_dict);
- return NPY_FAIL;
- }
- fields[i].src_offset = src_offset;
- fields[i].dst_offset = dst_offset;
- fields[i].src_itemsize = src_fld_dtype->elsize;
+ /* Allocate the field-data structure and populate it */
+ structsize = sizeof(_field_transfer_data) +
+ field_count * sizeof(_single_field_transfer);
+ data = (_field_transfer_data *)PyArray_malloc(structsize);
+ if (data == NULL) {
+ PyErr_NoMemory();
+ return NPY_FAIL;
+ }
+ data->base.free = &_field_transfer_data_free;
+ data->base.clone = &_field_transfer_data_clone;
+ fields = &data->fields;
- if (used_names_dict != NULL) {
- PyDict_SetItem(used_names_dict, key, Py_True);
- }
- }
- else {
- if (get_setdstzero_transfer_function(0,
- dst_stride,
- dst_fld_dtype,
- &fields[i].stransfer,
- &fields[i].data,
- out_needs_api) != NPY_SUCCEED) {
- for (i = i-1; i >= 0; --i) {
- NPY_AUXDATA_FREE(fields[i].data);
- }
- PyArray_free(data);
- Py_XDECREF(used_names_dict);
- return NPY_FAIL;
- }
- fields[i].src_offset = 0;
- fields[i].dst_offset = dst_offset;
- fields[i].src_itemsize = 0;
- }
+ /* set up the transfer function for each field */
+ for (i = 0; i < field_count; ++i) {
+ key = PyTuple_GET_ITEM(dst_dtype->names, i);
+ tup = PyDict_GetItem(dst_dtype->fields, key);
+ if (!PyArg_ParseTuple(tup, "Oi|O", &dst_fld_dtype,
+ &dst_offset, &title)) {
+ failed = 1;
+ break;
+ }
+ key = PyTuple_GET_ITEM(src_dtype->names, i);
+ tup = PyDict_GetItem(src_dtype->fields, key);
+ if (!PyArg_ParseTuple(tup, "Oi|O", &src_fld_dtype,
+ &src_offset, &title)) {
+ failed = 1;
+ break;
}
- if (move_references && PyDataType_REFCHK(src_dtype)) {
- /* Use field_count to track additional functions added */
- field_count = names_size;
-
- names = src_dtype->names;
- names_size = PyTuple_GET_SIZE(src_dtype->names);
- for (i = 0; i < names_size; ++i) {
- key = PyTuple_GET_ITEM(names, i);
- if (PyDict_GetItem(used_names_dict, key) == NULL) {
- tup = PyDict_GetItem(src_dtype->fields, key);
- if (!PyArg_ParseTuple(tup, "Oi|O", &src_fld_dtype,
- &src_offset, &title)) {
- for (i = field_count-1; i >= 0; --i) {
- NPY_AUXDATA_FREE(fields[i].data);
- }
- PyArray_free(data);
- Py_XDECREF(used_names_dict);
- return NPY_FAIL;
- }
- if (PyDataType_REFCHK(src_fld_dtype)) {
- if (get_decsrcref_transfer_function(0,
- src_stride,
- src_fld_dtype,
- &fields[field_count].stransfer,
- &fields[field_count].data,
- out_needs_api) != NPY_SUCCEED) {
- for (i = field_count-1; i >= 0; --i) {
- NPY_AUXDATA_FREE(fields[i].data);
- }
- PyArray_free(data);
- return NPY_FAIL;
- }
- fields[field_count].src_offset = src_offset;
- fields[field_count].dst_offset = 0;
- fields[field_count].src_itemsize =
- src_fld_dtype->elsize;
- field_count++;
- }
- }
- }
+ if (PyArray_GetDTypeTransferFunction(0,
+ src_stride, dst_stride,
+ src_fld_dtype, dst_fld_dtype,
+ move_references,
+ &fields[i].stransfer,
+ &fields[i].data,
+ out_needs_api) != NPY_SUCCEED) {
+ failed = 1;
+ break;
}
+ fields[i].src_offset = src_offset;
+ fields[i].dst_offset = dst_offset;
+ fields[i].src_itemsize = src_fld_dtype->elsize;
+ }
- Py_XDECREF(used_names_dict);
+ if (failed) {
+ for (i = i-1; i >= 0; --i) {
+ NPY_AUXDATA_FREE(fields[i].data);
+ }
+ PyArray_free(data);
+ return NPY_FAIL;
+ }
- data->field_count = field_count;
+ data->field_count = field_count;
- *out_stransfer = &_strided_to_strided_field_transfer;
- *out_transferdata = (NpyAuxData *)data;
+ *out_stransfer = &_strided_to_strided_field_transfer;
+ *out_transferdata = (NpyAuxData *)data;
- return NPY_SUCCEED;
- }
+ return NPY_SUCCEED;
}
static int
@@ -3371,7 +3188,7 @@ get_setdstzero_transfer_function(int aligned,
return NPY_FAIL;
}
dst_size = PyArray_MultiplyList(dst_shape.ptr, dst_shape.len);
- PyDimMem_FREE(dst_shape.ptr);
+ npy_free_cache_dim_obj(dst_shape);
/* Get a function for contiguous dst of the subarray type */
if (get_setdstzero_transfer_function(aligned,
@@ -3484,7 +3301,7 @@ get_decsrcref_transfer_function(int aligned,
return NPY_FAIL;
}
src_size = PyArray_MultiplyList(src_shape.ptr, src_shape.len);
- PyDimMem_FREE(src_shape.ptr);
+ npy_free_cache_dim_obj(src_shape);
/* Get a function for contiguous src of the subarray type */
if (get_decsrcref_transfer_function(aligned,
@@ -3648,8 +3465,10 @@ PyArray_GetDTypeTransferFunction(int aligned,
* If there are no references and the data types are equivalent,
* return a simple copy
*/
- if (!PyDataType_REFCHK(src_dtype) && !PyDataType_REFCHK(dst_dtype) &&
- PyArray_EquivTypes(src_dtype, dst_dtype)) {
+ if (PyArray_EquivTypes(src_dtype, dst_dtype) &&
+ !PyDataType_REFCHK(src_dtype) && !PyDataType_REFCHK(dst_dtype) &&
+ ( !PyDataType_HASFIELDS(dst_dtype) ||
+ is_dtype_struct_simple_unaligned_layout(dst_dtype)) ) {
/*
* We can't pass through the aligned flag because it's not
* appropriate. Consider a size-8 string, it will say it's
diff --git a/numpy/core/src/multiarray/einsum.c.src b/numpy/core/src/multiarray/einsum.c.src
index ee9ee1abd..943b8aecf 100644
--- a/numpy/core/src/multiarray/einsum.c.src
+++ b/numpy/core/src/multiarray/einsum.c.src
@@ -2333,6 +2333,7 @@ unbuffered_loop_nop1_ndim2(NpyIter *iter)
npy_intp coord, shape[2], strides[2][2];
char *ptrs[2][2], *ptr;
sum_of_products_fn sop;
+ NPY_BEGIN_THREADS_DEF;
#if NPY_EINSUM_DBG_TRACING
NpyIter_DebugPrint(iter);
@@ -2363,6 +2364,7 @@ unbuffered_loop_nop1_ndim2(NpyIter *iter)
* Since the iterator wasn't tracking coordinates, the
* loop provided by the iterator is in Fortran-order.
*/
+ NPY_BEGIN_THREADS_THRESHOLDED(shape[1] * shape[0]);
for (coord = shape[1]; coord > 0; --coord) {
sop(1, ptrs[0], strides[0], shape[0]);
@@ -2371,6 +2373,7 @@ unbuffered_loop_nop1_ndim2(NpyIter *iter)
ptr = ptrs[1][1] + strides[1][1];
ptrs[0][1] = ptrs[1][1] = ptr;
}
+ NPY_END_THREADS;
return 0;
}
@@ -2381,6 +2384,7 @@ unbuffered_loop_nop1_ndim3(NpyIter *iter)
npy_intp coords[2], shape[3], strides[3][2];
char *ptrs[3][2], *ptr;
sum_of_products_fn sop;
+ NPY_BEGIN_THREADS_DEF;
#if NPY_EINSUM_DBG_TRACING
NpyIter_DebugPrint(iter);
@@ -2414,6 +2418,7 @@ unbuffered_loop_nop1_ndim3(NpyIter *iter)
* Since the iterator wasn't tracking coordinates, the
* loop provided by the iterator is in Fortran-order.
*/
+ NPY_BEGIN_THREADS_THRESHOLDED(shape[2] * shape[1] * shape[0]);
for (coords[1] = shape[2]; coords[1] > 0; --coords[1]) {
for (coords[0] = shape[1]; coords[0] > 0; --coords[0]) {
sop(1, ptrs[0], strides[0], shape[0]);
@@ -2428,6 +2433,7 @@ unbuffered_loop_nop1_ndim3(NpyIter *iter)
ptr = ptrs[2][1] + strides[2][1];
ptrs[0][1] = ptrs[1][1] = ptrs[2][1] = ptr;
}
+ NPY_END_THREADS;
return 0;
}
@@ -2438,6 +2444,7 @@ unbuffered_loop_nop2_ndim2(NpyIter *iter)
npy_intp coord, shape[2], strides[2][3];
char *ptrs[2][3], *ptr;
sum_of_products_fn sop;
+ NPY_BEGIN_THREADS_DEF;
#if NPY_EINSUM_DBG_TRACING
NpyIter_DebugPrint(iter);
@@ -2468,6 +2475,7 @@ unbuffered_loop_nop2_ndim2(NpyIter *iter)
* Since the iterator wasn't tracking coordinates, the
* loop provided by the iterator is in Fortran-order.
*/
+ NPY_BEGIN_THREADS_THRESHOLDED(shape[1] * shape[0]);
for (coord = shape[1]; coord > 0; --coord) {
sop(2, ptrs[0], strides[0], shape[0]);
@@ -2478,6 +2486,7 @@ unbuffered_loop_nop2_ndim2(NpyIter *iter)
ptr = ptrs[1][2] + strides[1][2];
ptrs[0][2] = ptrs[1][2] = ptr;
}
+ NPY_END_THREADS;
return 0;
}
@@ -2488,6 +2497,7 @@ unbuffered_loop_nop2_ndim3(NpyIter *iter)
npy_intp coords[2], shape[3], strides[3][3];
char *ptrs[3][3], *ptr;
sum_of_products_fn sop;
+ NPY_BEGIN_THREADS_DEF;
#if NPY_EINSUM_DBG_TRACING
NpyIter_DebugPrint(iter);
@@ -2521,6 +2531,7 @@ unbuffered_loop_nop2_ndim3(NpyIter *iter)
* Since the iterator wasn't tracking coordinates, the
* loop provided by the iterator is in Fortran-order.
*/
+ NPY_BEGIN_THREADS_THRESHOLDED(shape[2] * shape[1] * shape[0]);
for (coords[1] = shape[2]; coords[1] > 0; --coords[1]) {
for (coords[0] = shape[1]; coords[0] > 0; --coords[0]) {
sop(2, ptrs[0], strides[0], shape[0]);
@@ -2539,6 +2550,7 @@ unbuffered_loop_nop2_ndim3(NpyIter *iter)
ptr = ptrs[2][2] + strides[2][2];
ptrs[0][2] = ptrs[1][2] = ptrs[2][2] = ptr;
}
+ NPY_END_THREADS;
return 0;
}
diff --git a/numpy/core/src/multiarray/getset.c b/numpy/core/src/multiarray/getset.c
index 3ed1666ae..77d9b8c66 100644
--- a/numpy/core/src/multiarray/getset.c
+++ b/numpy/core/src/multiarray/getset.c
@@ -18,6 +18,7 @@
#include "getset.h"
#include "arrayobject.h"
#include "mem_overlap.h"
+#include "alloc.h"
/******************* array attribute get and set routines ******************/
@@ -65,12 +66,12 @@ array_shape_set(PyArrayObject *self, PyObject *val)
}
/* Free old dimensions and strides */
- PyDimMem_FREE(PyArray_DIMS(self));
+ npy_free_cache_dim_array(self);
nd = PyArray_NDIM(ret);
((PyArrayObject_fields *)self)->nd = nd;
if (nd > 0) {
/* create new dimensions and strides */
- ((PyArrayObject_fields *)self)->dimensions = PyDimMem_NEW(3*nd);
+ ((PyArrayObject_fields *)self)->dimensions = npy_alloc_cache_dim(3*nd);
if (PyArray_DIMS(self) == NULL) {
Py_DECREF(ret);
PyErr_SetString(PyExc_MemoryError,"");
@@ -158,11 +159,11 @@ array_strides_set(PyArrayObject *self, PyObject *obj)
memcpy(PyArray_STRIDES(self), newstrides.ptr, sizeof(npy_intp)*newstrides.len);
PyArray_UpdateFlags(self, NPY_ARRAY_C_CONTIGUOUS | NPY_ARRAY_F_CONTIGUOUS |
NPY_ARRAY_ALIGNED);
- PyDimMem_FREE(newstrides.ptr);
+ npy_free_cache_dim_obj(newstrides);
return 0;
fail:
- PyDimMem_FREE(newstrides.ptr);
+ npy_free_cache_dim_obj(newstrides);
return -1;
}
@@ -436,12 +437,6 @@ static int
array_descr_set(PyArrayObject *self, PyObject *arg)
{
PyArray_Descr *newtype = NULL;
- npy_intp newdim;
- int i;
- char *msg = "new type not compatible with array.";
- PyObject *safe;
- static PyObject *checkfunc = NULL;
-
if (arg == NULL) {
PyErr_SetString(PyExc_AttributeError,
@@ -458,16 +453,18 @@ array_descr_set(PyArrayObject *self, PyObject *arg)
/* check that we are not reinterpreting memory containing Objects. */
if (_may_have_objects(PyArray_DESCR(self)) || _may_have_objects(newtype)) {
+ static PyObject *checkfunc = NULL;
+ PyObject *safe;
+
npy_cache_import("numpy.core._internal", "_view_is_safe", &checkfunc);
if (checkfunc == NULL) {
- return -1;
+ goto fail;
}
safe = PyObject_CallFunction(checkfunc, "OO",
PyArray_DESCR(self), newtype);
if (safe == NULL) {
- Py_DECREF(newtype);
- return -1;
+ goto fail;
}
Py_DECREF(safe);
}
@@ -491,58 +488,76 @@ array_descr_set(PyArrayObject *self, PyObject *arg)
}
- if ((newtype->elsize != PyArray_DESCR(self)->elsize) &&
- (PyArray_NDIM(self) == 0 ||
- !PyArray_ISONESEGMENT(self) ||
- PyDataType_HASSUBARRAY(newtype))) {
- goto fail;
- }
+ /* Changing the size of the dtype results in a shape change */
+ if (newtype->elsize != PyArray_DESCR(self)->elsize) {
+ int axis;
+ npy_intp newdim;
- /* Deprecate not C contiguous and a dimension changes */
- if (newtype->elsize != PyArray_DESCR(self)->elsize &&
- !PyArray_IS_C_CONTIGUOUS(self)) {
- /* 11/27/2015 1.11.0 */
- if (DEPRECATE("Changing the shape of non-C contiguous array by\n"
- "descriptor assignment is deprecated. To maintain\n"
- "the Fortran contiguity of a multidimensional Fortran\n"
- "array, use 'a.T.view(...).T' instead") < 0) {
- return -1;
+ /* forbidden cases */
+ if (PyArray_NDIM(self) == 0) {
+ PyErr_SetString(PyExc_ValueError,
+ "Changing the dtype of a 0d array is only supported "
+ "if the itemsize is unchanged");
+ goto fail;
}
- }
-
- if (PyArray_IS_C_CONTIGUOUS(self)) {
- i = PyArray_NDIM(self) - 1;
- }
- else {
- i = 0;
- }
- if (newtype->elsize < PyArray_DESCR(self)->elsize) {
- /*
- * if it is compatible increase the size of the
- * dimension at end (or at the front for NPY_ARRAY_F_CONTIGUOUS)
- */
- if (PyArray_DESCR(self)->elsize % newtype->elsize != 0) {
+ else if (PyDataType_HASSUBARRAY(newtype)) {
+ PyErr_SetString(PyExc_ValueError,
+ "Changing the dtype to a subarray type is only supported "
+ "if the total itemsize is unchanged");
goto fail;
}
- newdim = PyArray_DESCR(self)->elsize / newtype->elsize;
- PyArray_DIMS(self)[i] *= newdim;
- PyArray_STRIDES(self)[i] = newtype->elsize;
- }
- else if (newtype->elsize > PyArray_DESCR(self)->elsize) {
- /*
- * Determine if last (or first if NPY_ARRAY_F_CONTIGUOUS) dimension
- * is compatible
- */
- newdim = PyArray_DIMS(self)[i] * PyArray_DESCR(self)->elsize;
- if ((newdim % newtype->elsize) != 0) {
+
+ /* determine which axis to resize */
+ if (PyArray_IS_C_CONTIGUOUS(self)) {
+ axis = PyArray_NDIM(self) - 1;
+ }
+ else if (PyArray_IS_F_CONTIGUOUS(self)) {
+ /* 2015-11-27 1.11.0, gh-6747 */
+ if (DEPRECATE(
+ "Changing the shape of an F-contiguous array by "
+ "descriptor assignment is deprecated. To maintain the "
+ "Fortran contiguity of a multidimensional Fortran "
+ "array, use 'a.T.view(...).T' instead") < 0) {
+ goto fail;
+ }
+ axis = 0;
+ }
+ else {
+ /* Don't mention the deprecated F-contiguous support */
+ PyErr_SetString(PyExc_ValueError,
+ "To change to a dtype of a different size, the array must "
+ "be C-contiguous");
goto fail;
}
- PyArray_DIMS(self)[i] = newdim / newtype->elsize;
- PyArray_STRIDES(self)[i] = newtype->elsize;
+
+ if (newtype->elsize < PyArray_DESCR(self)->elsize) {
+ /* if it is compatible, increase the size of the relevant axis */
+ if (PyArray_DESCR(self)->elsize % newtype->elsize != 0) {
+ PyErr_SetString(PyExc_ValueError,
+ "When changing to a smaller dtype, its size must be a "
+ "divisor of the size of original dtype");
+ goto fail;
+ }
+ newdim = PyArray_DESCR(self)->elsize / newtype->elsize;
+ PyArray_DIMS(self)[axis] *= newdim;
+ PyArray_STRIDES(self)[axis] = newtype->elsize;
+ }
+ else if (newtype->elsize > PyArray_DESCR(self)->elsize) {
+ /* if it is compatible, decrease the size of the relevant axis */
+ newdim = PyArray_DIMS(self)[axis] * PyArray_DESCR(self)->elsize;
+ if ((newdim % newtype->elsize) != 0) {
+ PyErr_SetString(PyExc_ValueError,
+ "When changing to a larger dtype, its size must be a "
+ "divisor of the total size in bytes of the last axis "
+ "of the array.");
+ goto fail;
+ }
+ PyArray_DIMS(self)[axis] = newdim / newtype->elsize;
+ PyArray_STRIDES(self)[axis] = newtype->elsize;
+ }
}
- /* fall through -- adjust type*/
- Py_DECREF(PyArray_DESCR(self));
+ /* Viewing as a subarray increases the number of dimensions */
if (PyDataType_HASSUBARRAY(newtype)) {
/*
* create new array object from data and update
@@ -560,7 +575,7 @@ array_descr_set(PyArrayObject *self, PyObject *arg)
if (temp == NULL) {
return -1;
}
- PyDimMem_FREE(PyArray_DIMS(self));
+ npy_free_cache_dim_array(self);
((PyArrayObject_fields *)self)->dimensions = PyArray_DIMS(temp);
((PyArrayObject_fields *)self)->nd = PyArray_NDIM(temp);
((PyArrayObject_fields *)self)->strides = PyArray_STRIDES(temp);
@@ -572,12 +587,12 @@ array_descr_set(PyArrayObject *self, PyObject *arg)
Py_DECREF(temp);
}
+ Py_DECREF(PyArray_DESCR(self));
((PyArrayObject_fields *)self)->descr = newtype;
PyArray_UpdateFlags(self, NPY_ARRAY_UPDATE_ALL);
return 0;
fail:
- PyErr_SetString(PyExc_ValueError, msg);
Py_DECREF(newtype);
return -1;
}
diff --git a/numpy/core/src/multiarray/item_selection.c b/numpy/core/src/multiarray/item_selection.c
index c88cdfdcb..21bcd6cad 100644
--- a/numpy/core/src/multiarray/item_selection.c
+++ b/numpy/core/src/multiarray/item_selection.c
@@ -23,6 +23,7 @@
#include "npy_sort.h"
#include "npy_partition.h"
#include "npy_binsearch.h"
+#include "alloc.h"
/*NUMPY_API
* Take
@@ -765,7 +766,7 @@ PyArray_Choose(PyArrayObject *ip, PyObject *op, PyArrayObject *out,
Py_XDECREF(mps[i]);
}
Py_DECREF(ap);
- PyDataMem_FREE(mps);
+ npy_free_cache(mps, n * sizeof(mps[0]));
if (out != NULL && out != obj) {
Py_INCREF(out);
Py_DECREF(obj);
@@ -779,7 +780,7 @@ PyArray_Choose(PyArrayObject *ip, PyObject *op, PyArrayObject *out,
Py_XDECREF(mps[i]);
}
Py_XDECREF(ap);
- PyDataMem_FREE(mps);
+ npy_free_cache(mps, n * sizeof(mps[0]));
PyArray_XDECREF_ERR(obj);
return NULL;
}
@@ -827,7 +828,7 @@ _new_sortlike(PyArrayObject *op, int axis, PyArray_SortFunc *sort,
NPY_BEGIN_THREADS_DESCR(PyArray_DESCR(op));
if (needcopy) {
- buffer = PyDataMem_NEW(N * elsize);
+ buffer = npy_alloc_cache(N * elsize);
if (buffer == NULL) {
ret = -1;
goto fail;
@@ -869,12 +870,9 @@ _new_sortlike(PyArrayObject *op, int axis, PyArray_SortFunc *sort,
if (part == NULL) {
ret = sort(bufptr, N, op);
-#if defined(NPY_PY3K)
- /* Object comparisons may raise an exception in Python 3 */
if (hasrefs && PyErr_Occurred()) {
ret = -1;
}
-#endif
if (ret < 0) {
goto fail;
}
@@ -885,12 +883,9 @@ _new_sortlike(PyArrayObject *op, int axis, PyArray_SortFunc *sort,
npy_intp i;
for (i = 0; i < nkth; ++i) {
ret = part(bufptr, N, kth[i], pivots, &npiv, op);
-#if defined(NPY_PY3K)
- /* Object comparisons may raise an exception in Python 3 */
if (hasrefs && PyErr_Occurred()) {
ret = -1;
}
-#endif
if (ret < 0) {
goto fail;
}
@@ -914,7 +909,7 @@ _new_sortlike(PyArrayObject *op, int axis, PyArray_SortFunc *sort,
}
fail:
- PyDataMem_FREE(buffer);
+ npy_free_cache(buffer, N * elsize);
NPY_END_THREADS_DESCR(PyArray_DESCR(op));
if (ret < 0 && !PyErr_Occurred()) {
/* Out of memory during sorting or buffer creation */
@@ -978,7 +973,7 @@ _new_argsortlike(PyArrayObject *op, int axis, PyArray_ArgSortFunc *argsort,
NPY_BEGIN_THREADS_DESCR(PyArray_DESCR(op));
if (needcopy) {
- valbuffer = PyDataMem_NEW(N * elsize);
+ valbuffer = npy_alloc_cache(N * elsize);
if (valbuffer == NULL) {
ret = -1;
goto fail;
@@ -986,7 +981,7 @@ _new_argsortlike(PyArrayObject *op, int axis, PyArray_ArgSortFunc *argsort,
}
if (needidxbuffer) {
- idxbuffer = (npy_intp *)PyDataMem_NEW(N * sizeof(npy_intp));
+ idxbuffer = (npy_intp *)npy_alloc_cache(N * sizeof(npy_intp));
if (idxbuffer == NULL) {
ret = -1;
goto fail;
@@ -1076,8 +1071,8 @@ _new_argsortlike(PyArrayObject *op, int axis, PyArray_ArgSortFunc *argsort,
}
fail:
- PyDataMem_FREE(valbuffer);
- PyDataMem_FREE(idxbuffer);
+ npy_free_cache(valbuffer, N * elsize);
+ npy_free_cache(idxbuffer, N * sizeof(npy_intp));
NPY_END_THREADS_DESCR(PyArray_DESCR(op));
if (ret < 0) {
if (!PyErr_Occurred()) {
@@ -1493,13 +1488,13 @@ PyArray_LexSort(PyObject *sort_keys, int axis)
char *valbuffer, *indbuffer;
int *swaps;
- valbuffer = PyDataMem_NEW(N*maxelsize);
+ valbuffer = npy_alloc_cache(N * maxelsize);
if (valbuffer == NULL) {
goto fail;
}
- indbuffer = PyDataMem_NEW(N*sizeof(npy_intp));
+ indbuffer = npy_alloc_cache(N * sizeof(npy_intp));
if (indbuffer == NULL) {
- PyDataMem_FREE(indbuffer);
+ npy_free_cache(indbuffer, N * sizeof(npy_intp));
goto fail;
}
swaps = malloc(n*sizeof(int));
@@ -1531,8 +1526,8 @@ PyArray_LexSort(PyObject *sort_keys, int axis)
#else
if (rcode < 0) {
#endif
- PyDataMem_FREE(valbuffer);
- PyDataMem_FREE(indbuffer);
+ npy_free_cache(valbuffer, N * maxelsize);
+ npy_free_cache(indbuffer, N * sizeof(npy_intp));
free(swaps);
goto fail;
}
@@ -1542,8 +1537,8 @@ PyArray_LexSort(PyObject *sort_keys, int axis)
sizeof(npy_intp), N, sizeof(npy_intp));
PyArray_ITER_NEXT(rit);
}
- PyDataMem_FREE(valbuffer);
- PyDataMem_FREE(indbuffer);
+ npy_free_cache(valbuffer, N * maxelsize);
+ npy_free_cache(indbuffer, N * sizeof(npy_intp));
free(swaps);
}
else {
@@ -2330,7 +2325,7 @@ finish:
return NULL;
}
- for (i = 0; i < ndim; ++i) {
+ for (i = 0; i < PyArray_NDIM(ret); ++i) {
if (PyArray_DIMS(ret)[i] == 0) {
is_empty = 1;
break;
diff --git a/numpy/core/src/multiarray/iterators.c b/numpy/core/src/multiarray/iterators.c
index 01910a657..b8cf4edf6 100644
--- a/numpy/core/src/multiarray/iterators.c
+++ b/numpy/core/src/multiarray/iterators.c
@@ -926,7 +926,7 @@ iter_ass_subscript(PyArrayIterObject *self, PyObject *ind, PyObject *val)
goto skip;
}
start = PyArray_PyIntAsIntp(ind);
- if (start==-1 && PyErr_Occurred()) {
+ if (error_converting(start)) {
PyErr_Clear();
}
else {
@@ -1055,7 +1055,28 @@ static PyMappingMethods iter_as_mapping = {
};
-
+/* Two options:
+ * 1) underlying array is contiguous
+ * -- return 1-d wrapper around it
+ * 2) underlying array is not contiguous
+ * -- make new 1-d contiguous array with updateifcopy flag set
+ * to copy back to the old array
+ *
+ * If underlying array is readonly, then we make the output array readonly
+ * and updateifcopy does not apply.
+ *
+ * Changed 2017-07-21, 1.14.0.
+ *
+ * In order to start the process of removing UPDATEIFCOPY, see gh-7054, the
+ * behavior is changed to always return an non-writeable copy when the base
+ * array is non-contiguous. Doing that will hopefully smoke out those few
+ * folks who assign to the result with the expectation that the base array
+ * will be changed. At a later date non-contiguous arrays will always return
+ * writeable copies.
+ *
+ * Note that the type and argument expected for the __array__ method is
+ * ignored.
+ */
static PyArrayObject *
iter_array(PyArrayIterObject *it, PyObject *NPY_UNUSED(op))
{
@@ -1063,27 +1084,14 @@ iter_array(PyArrayIterObject *it, PyObject *NPY_UNUSED(op))
PyArrayObject *ret;
npy_intp size;
- /* Any argument ignored */
-
- /* Two options:
- * 1) underlying array is contiguous
- * -- return 1-d wrapper around it
- * 2) underlying array is not contiguous
- * -- make new 1-d contiguous array with updateifcopy flag set
- * to copy back to the old array
- *
- * If underlying array is readonly, then we make the output array readonly
- * and updateifcopy does not apply.
- */
size = PyArray_SIZE(it->ao);
Py_INCREF(PyArray_DESCR(it->ao));
+
if (PyArray_ISCONTIGUOUS(it->ao)) {
- ret = (PyArrayObject *)PyArray_NewFromDescr(&PyArray_Type,
- PyArray_DESCR(it->ao),
- 1, &size,
- NULL, PyArray_DATA(it->ao),
- PyArray_FLAGS(it->ao),
- (PyObject *)it->ao);
+ ret = (PyArrayObject *)PyArray_NewFromDescr(
+ &PyArray_Type, PyArray_DESCR(it->ao), 1, &size,
+ NULL, PyArray_DATA(it->ao), PyArray_FLAGS(it->ao),
+ (PyObject *)it->ao);
if (ret == NULL) {
return NULL;
}
@@ -1094,11 +1102,10 @@ iter_array(PyArrayIterObject *it, PyObject *NPY_UNUSED(op))
}
}
else {
- ret = (PyArrayObject *)PyArray_NewFromDescr(&PyArray_Type,
- PyArray_DESCR(it->ao),
- 1, &size,
- NULL, NULL,
- 0, (PyObject *)it->ao);
+ ret = (PyArrayObject *)PyArray_NewFromDescr(
+ &PyArray_Type, PyArray_DESCR(it->ao), 1, &size,
+ NULL, NULL, 0,
+ (PyObject *)it->ao);
if (ret == NULL) {
return NULL;
}
@@ -1106,16 +1113,7 @@ iter_array(PyArrayIterObject *it, PyObject *NPY_UNUSED(op))
Py_DECREF(ret);
return NULL;
}
- if (PyArray_ISWRITEABLE(it->ao)) {
- Py_INCREF(it->ao);
- if (PyArray_SetUpdateIfCopyBase(ret, it->ao) < 0) {
- Py_DECREF(ret);
- return NULL;
- }
- }
- else {
- PyArray_CLEARFLAGS(ret, NPY_ARRAY_WRITEABLE);
- }
+ PyArray_CLEARFLAGS(ret, NPY_ARRAY_WRITEABLE);
}
return ret;
diff --git a/numpy/core/src/multiarray/mapping.c b/numpy/core/src/multiarray/mapping.c
index 6c300a2bf..1a92365c8 100644
--- a/numpy/core/src/multiarray/mapping.c
+++ b/numpy/core/src/multiarray/mapping.c
@@ -139,6 +139,196 @@ PyArray_MapIterSwapAxes(PyArrayMapIterObject *mit, PyArrayObject **ret, int getm
*ret = (PyArrayObject *)new;
}
+static NPY_INLINE void
+multi_DECREF(PyObject **objects, npy_intp n)
+{
+ npy_intp i;
+ for (i = 0; i < n; i++) {
+ Py_DECREF(objects[i]);
+ }
+}
+
+/**
+ * Unpack a tuple into an array of new references. Returns the number of objects
+ * unpacked.
+ *
+ * Useful if a tuple is being iterated over multiple times, or for a code path
+ * that doesn't always want the overhead of allocating a tuple.
+ */
+static NPY_INLINE npy_intp
+unpack_tuple(PyTupleObject *index, PyObject **result, npy_intp result_n)
+{
+ npy_intp n, i;
+ n = PyTuple_GET_SIZE(index);
+ if (n > result_n) {
+ PyErr_SetString(PyExc_IndexError,
+ "too many indices for array");
+ return -1;
+ }
+ for (i = 0; i < n; i++) {
+ result[i] = PyTuple_GET_ITEM(index, i);
+ Py_INCREF(result[i]);
+ }
+ return n;
+}
+
+/* Unpack a single scalar index, taking a new reference to match unpack_tuple */
+static NPY_INLINE npy_intp
+unpack_scalar(PyObject *index, PyObject **result, npy_intp result_n)
+{
+ Py_INCREF(index);
+ result[0] = index;
+ return 1;
+}
+
+/**
+ * Turn an index argument into a c-array of `PyObject *`s, one for each index.
+ *
+ * When a scalar is passed, this is written directly to the buffer. When a
+ * tuple is passed, the tuple elements are unpacked into the buffer.
+ *
+ * When some other sequence is passed, this implements the following section
+ * from the advanced indexing docs to decide whether to unpack or just write
+ * one element:
+ *
+ * > In order to remain backward compatible with a common usage in Numeric,
+ * > basic slicing is also initiated if the selection object is any non-ndarray
+ * > sequence (such as a list) containing slice objects, the Ellipsis object,
+ * > or the newaxis object, but not for integer arrays or other embedded
+ * > sequences.
+ *
+ * It might be worth deprecating this behaviour (gh-4434), in which case the
+ * entire function should become a simple check of PyTuple_Check.
+ *
+ * @param index The index object, which may or may not be a tuple. This is
+ * a borrowed reference.
+ * @param result An empty buffer of PyObject* to write each index component
+ * to. The references written are new.
+ * @param result_n The length of the result buffer
+ *
+ * @returns The number of items in `result`, or -1 if an error occured.
+ * The entries in `result` at and beyond this index should be
+ * assumed to contain garbage, even if they were initialized
+ * to NULL, so are not safe to Py_XDECREF. Use multi_DECREF to
+ * dispose of them.
+ */
+NPY_NO_EXPORT npy_intp
+unpack_indices(PyObject *index, PyObject **result, npy_intp result_n)
+{
+ npy_intp n, i;
+ npy_bool commit_to_unpack;
+
+ /* Fast route for passing a tuple */
+ if (PyTuple_CheckExact(index)) {
+ return unpack_tuple((PyTupleObject *)index, result, result_n);
+ }
+
+ /* Obvious single-entry cases */
+ if (0 /* to aid macros below */
+#if !defined(NPY_PY3K)
+ || PyInt_CheckExact(index)
+#else
+ || PyLong_CheckExact(index)
+#endif
+ || index == Py_None
+ || PySlice_Check(index)
+ || PyArray_Check(index)
+ || !PySequence_Check(index)) {
+
+ return unpack_scalar(index, result, result_n);
+ }
+
+ /*
+ * Passing a tuple subclass - coerce to the base type. This incurs an
+ * allocation, but doesn't need to be a fast path anyway
+ */
+ if (PyTuple_Check(index)) {
+ PyTupleObject *tup = (PyTupleObject *) PySequence_Tuple(index);
+ if (tup == NULL) {
+ return -1;
+ }
+ n = unpack_tuple(tup, result, result_n);
+ Py_DECREF(tup);
+ return n;
+ }
+
+ /*
+ * At this point, we're left with a non-tuple, non-array, sequence:
+ * typically, a list. We use some somewhat-arbitrary heuristics from here
+ * onwards to decided whether to treat that list as a single index, or a
+ * list of indices.
+ */
+
+ /* if len fails, treat like a scalar */
+ n = PySequence_Size(index);
+ if (n < 0) {
+ PyErr_Clear();
+ return unpack_scalar(index, result, result_n);
+ }
+
+ /*
+ * Backwards compatibility only takes effect for short sequences - otherwise
+ * we treat it like any other scalar.
+ *
+ * Sequences < NPY_MAXDIMS with any slice objects
+ * or newaxis, Ellipsis or other arrays or sequences
+ * embedded, are considered equivalent to an indexing
+ * tuple. (`a[[[1,2], [3,4]]] == a[[1,2], [3,4]]`)
+ */
+ if (n >= NPY_MAXDIMS) {
+ return unpack_scalar(index, result, result_n);
+ }
+
+ /* In case we change result_n elsewhere */
+ assert(n <= result_n);
+
+ /*
+ * Some other type of short sequence - assume we should unpack it like a
+ * tuple, and then decide whether that was actually necessary.
+ */
+ commit_to_unpack = 0;
+ for (i = 0; i < n; i++) {
+ PyObject *tmp_obj = result[i] = PySequence_GetItem(index, i);
+
+ if (commit_to_unpack) {
+ /* propagate errors */
+ if (tmp_obj == NULL) {
+ multi_DECREF(result, i);
+ return -1;
+ }
+ }
+ else {
+ /*
+ * if getitem fails (unusual) before we've committed, then stop
+ * unpacking
+ */
+ if (tmp_obj == NULL) {
+ PyErr_Clear();
+ break;
+ }
+
+ /* decide if we should treat this sequence like a tuple */
+ if (PyArray_Check(tmp_obj)
+ || PySequence_Check(tmp_obj)
+ || PySlice_Check(tmp_obj)
+ || tmp_obj == Py_Ellipsis
+ || tmp_obj == Py_None) {
+ commit_to_unpack = 1;
+ }
+ }
+ }
+
+ /* unpacking was the right thing to do, and we already did it */
+ if (commit_to_unpack) {
+ return n;
+ }
+ /* got to the end, never found an indication that we should have unpacked */
+ else {
+ /* we partially filled result, so empty it first */
+ multi_DECREF(result, i);
+ return unpack_scalar(index, result, result_n);
+ }
+}
/**
* Prepare an npy_index_object from the python slicing object.
@@ -174,7 +364,6 @@ prepare_index(PyArrayObject *self, PyObject *index,
int i;
npy_intp n;
- npy_bool make_tuple = 0;
PyObject *obj = NULL;
PyArrayObject *arr;
@@ -182,81 +371,16 @@ prepare_index(PyArrayObject *self, PyObject *index,
int ellipsis_pos = -1;
/*
- * The index might be a multi-dimensional index, but not yet a tuple
- * this makes it a tuple in that case.
- *
- * TODO: Refactor into its own function.
+ * The choice of only unpacking `2*NPY_MAXDIMS` items is historic.
+ * The longest "reasonable" index that produces a result of <= 32 dimensions
+ * is `(0,)*np.MAXDIMS + (None,)*np.MAXDIMS`. Longer indices can exist, but
+ * are uncommon.
*/
- if (!PyTuple_CheckExact(index)
- /* Next three are just to avoid slow checks */
-#if !defined(NPY_PY3K)
- && (!PyInt_CheckExact(index))
-#else
- && (!PyLong_CheckExact(index))
-#endif
- && (index != Py_None)
- && (!PySlice_Check(index))
- && (!PyArray_Check(index))
- && (PySequence_Check(index))) {
- /*
- * Sequences < NPY_MAXDIMS with any slice objects
- * or newaxis, Ellipsis or other arrays or sequences
- * embedded, are considered equivalent to an indexing
- * tuple. (`a[[[1,2], [3,4]]] == a[[1,2], [3,4]]`)
- */
+ PyObject *raw_indices[NPY_MAXDIMS*2];
- if (PyTuple_Check(index)) {
- /* If it is already a tuple, make it an exact tuple anyway */
- n = 0;
- make_tuple = 1;
- }
- else {
- n = PySequence_Size(index);
- }
- if (n < 0 || n >= NPY_MAXDIMS) {
- n = 0;
- }
- for (i = 0; i < n; i++) {
- PyObject *tmp_obj = PySequence_GetItem(index, i);
- /* if getitem fails (unusual) treat this as a single index */
- if (tmp_obj == NULL) {
- PyErr_Clear();
- make_tuple = 0;
- break;
- }
- if (PyArray_Check(tmp_obj) || PySequence_Check(tmp_obj)
- || PySlice_Check(tmp_obj) || tmp_obj == Py_Ellipsis
- || tmp_obj == Py_None) {
- make_tuple = 1;
- Py_DECREF(tmp_obj);
- break;
- }
- Py_DECREF(tmp_obj);
- }
-
- if (make_tuple) {
- /* We want to interpret it as a tuple, so make it one */
- index = PySequence_Tuple(index);
- if (index == NULL) {
- return -1;
- }
- }
- }
-
- /* If the index is not a tuple, handle it the same as (index,) */
- if (!PyTuple_CheckExact(index)) {
- obj = index;
- index_ndim = 1;
- }
- else {
- n = PyTuple_GET_SIZE(index);
- if (n > NPY_MAXDIMS * 2) {
- PyErr_SetString(PyExc_IndexError,
- "too many indices for array");
- goto fail;
- }
- index_ndim = (int)n;
- obj = NULL;
+ index_ndim = unpack_indices(index, raw_indices, NPY_MAXDIMS*2);
+ if (index_ndim == -1) {
+ return -1;
}
/*
@@ -275,14 +399,7 @@ prepare_index(PyArrayObject *self, PyObject *index,
goto failed_building_indices;
}
- /* Check for single index. obj is already set then. */
- if ((curr_idx != 0) || (obj == NULL)) {
- obj = PyTuple_GET_ITEM(index, get_idx++);
- }
- else {
- /* only one loop */
- get_idx += 1;
- }
+ obj = raw_indices[get_idx++];
/**** Try the cascade of possible indices ****/
@@ -355,7 +472,7 @@ prepare_index(PyArrayObject *self, PyObject *index,
#endif
npy_intp ind = PyArray_PyIntAsIntp(obj);
- if ((ind == -1) && PyErr_Occurred()) {
+ if (error_converting(ind)) {
PyErr_Clear();
}
else {
@@ -526,7 +643,7 @@ prepare_index(PyArrayObject *self, PyObject *index,
npy_intp ind = PyArray_PyIntAsIntp((PyObject *)arr);
Py_DECREF(arr);
- if ((ind == -1) && PyErr_Occurred()) {
+ if (error_converting(ind)) {
goto failed_building_indices;
}
else {
@@ -686,9 +803,7 @@ prepare_index(PyArrayObject *self, PyObject *index,
*ndim = new_ndim + fancy_ndim;
*out_fancy_ndim = fancy_ndim;
- if (make_tuple) {
- Py_DECREF(index);
- }
+ multi_DECREF(raw_indices, index_ndim);
return index_type;
@@ -696,10 +811,7 @@ prepare_index(PyArrayObject *self, PyObject *index,
for (i=0; i < curr_idx; i++) {
Py_XDECREF(indices[i].object);
}
- fail:
- if (make_tuple) {
- Py_DECREF(index);
- }
+ multi_DECREF(raw_indices, index_ndim);
return -1;
}
@@ -1334,10 +1446,6 @@ _get_field_view(PyArrayObject *arr, PyObject *ind, PyArrayObject **view)
PyObject *fields, *names;
PyArray_Descr *view_dtype;
- /* variables needed to make a copy, to remove in the future */
- static PyObject *copyfunc = NULL;
- PyObject *viewcopy;
-
seqlen = PySequence_Size(ind);
/* quit if have a 0-d array (seqlen==-1) or a 0-len array */
@@ -1390,6 +1498,35 @@ _get_field_view(PyArrayObject *arr, PyObject *ind, PyArrayObject **view)
Py_DECREF(names);
return 0;
}
+ // disallow use of titles as index
+ if (PyTuple_Size(tup) == 3) {
+ PyObject *title = PyTuple_GET_ITEM(tup, 2);
+ int titlecmp = PyObject_RichCompareBool(title, name, Py_EQ);
+ if (titlecmp == 1) {
+ // if title == name, we were given a title, not a field name
+ PyErr_SetString(PyExc_KeyError,
+ "cannot use field titles in multi-field index");
+ }
+ if (titlecmp != 0 || PyDict_SetItem(fields, title, tup) < 0) {
+ Py_DECREF(title);
+ Py_DECREF(name);
+ Py_DECREF(fields);
+ Py_DECREF(names);
+ return 0;
+ }
+ Py_DECREF(title);
+ }
+ // disallow duplicate field indices
+ if (PyDict_Contains(fields, name)) {
+ PyObject *errmsg = PyUString_FromString(
+ "duplicate field of name ");
+ PyUString_ConcatAndDel(&errmsg, name);
+ PyErr_SetObject(PyExc_KeyError, errmsg);
+ Py_DECREF(errmsg);
+ Py_DECREF(fields);
+ Py_DECREF(names);
+ return 0;
+ }
if (PyDict_SetItem(fields, name, tup) < 0) {
Py_DECREF(name);
Py_DECREF(fields);
@@ -1433,29 +1570,6 @@ _get_field_view(PyArrayObject *arr, PyObject *ind, PyArrayObject **view)
return 0;
}
- /*
- * Return copy for now (future plan to return the view above). All the
- * following code in this block can then be replaced by "return 0;"
- */
- npy_cache_import("numpy.core._internal", "_copy_fields", &copyfunc);
- if (copyfunc == NULL) {
- Py_DECREF(*view);
- *view = NULL;
- return 0;
- }
-
- PyArray_CLEARFLAGS(*view, NPY_ARRAY_WARN_ON_WRITE);
- viewcopy = PyObject_CallFunction(copyfunc, "O", *view);
- if (viewcopy == NULL) {
- Py_DECREF(*view);
- *view = NULL;
- return 0;
- }
- Py_DECREF(*view);
- *view = (PyArrayObject*)viewcopy;
-
- /* warn when writing to the copy */
- PyArray_ENABLEFLAGS(*view, NPY_ARRAY_WARN_ON_WRITE);
return 0;
}
return -1;
@@ -1489,11 +1603,6 @@ array_subscript(PyArrayObject *self, PyObject *op)
if (view == NULL) {
return NULL;
}
-
- /* warn if writing to a copy. copies will have no base */
- if (PyArray_BASE(view) == NULL) {
- PyArray_ENABLEFLAGS(view, NPY_ARRAY_WARN_ON_WRITE);
- }
return (PyObject*)view;
}
}
@@ -1780,17 +1889,6 @@ array_assign_subscript(PyArrayObject *self, PyObject *ind, PyObject *op)
PyArrayObject *view;
int ret = _get_field_view(self, ind, &view);
if (ret == 0){
-
-#if defined(NPY_PY3K)
- if (!PyUnicode_Check(ind)) {
-#else
- if (!PyString_Check(ind) && !PyUnicode_Check(ind)) {
-#endif
- PyErr_SetString(PyExc_ValueError,
- "multi-field assignment is not supported");
- return -1;
- }
-
if (view == NULL) {
return -1;
}
diff --git a/numpy/core/src/multiarray/methods.c b/numpy/core/src/multiarray/methods.c
index 898887042..efa97dd65 100644
--- a/numpy/core/src/multiarray/methods.c
+++ b/numpy/core/src/multiarray/methods.c
@@ -21,6 +21,7 @@
#include "shape.h"
#include "methods.h"
+#include "alloc.h"
/* NpyArg_ParseKeywords
@@ -201,11 +202,11 @@ array_reshape(PyArrayObject *self, PyObject *args, PyObject *kwds)
}
}
ret = PyArray_Newshape(self, &newshape, order);
- PyDimMem_FREE(newshape.ptr);
+ npy_free_cache_dim_obj(newshape);
return ret;
fail:
- PyDimMem_FREE(newshape.ptr);
+ npy_free_cache_dim_obj(newshape);
return NULL;
}
@@ -517,12 +518,13 @@ PyArray_Byteswap(PyArrayObject *self, npy_bool inplace)
static PyObject *
-array_byteswap(PyArrayObject *self, PyObject *args)
+array_byteswap(PyArrayObject *self, PyObject *args, PyObject *kwds)
{
npy_bool inplace = NPY_FALSE;
+ static char *kwlist[] = {"inplace", NULL};
- if (!PyArg_ParseTuple(args, "|O&:byteswap",
- PyArray_BoolConverter, &inplace)) {
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "|O&:byteswap", kwlist,
+ PyArray_BoolConverter, &inplace)) {
return NULL;
}
return PyArray_Byteswap(self, inplace);
@@ -637,7 +639,7 @@ array_toscalar(PyArrayObject *self, PyObject *args)
npy_intp value, size = PyArray_SIZE(self);
value = PyArray_PyIntAsIntp(PyTuple_GET_ITEM(args, 0));
- if (value == -1 && PyErr_Occurred()) {
+ if (error_converting(value)) {
return NULL;
}
@@ -657,7 +659,7 @@ array_toscalar(PyArrayObject *self, PyObject *args)
for (idim = 0; idim < ndim; ++idim) {
value = PyArray_PyIntAsIntp(PyTuple_GET_ITEM(args, idim));
- if (value == -1 && PyErr_Occurred()) {
+ if (error_converting(value)) {
return NULL;
}
multi_index[idim] = value;
@@ -714,7 +716,7 @@ array_setscalar(PyArrayObject *self, PyObject *args)
npy_intp value, size = PyArray_SIZE(self);
value = PyArray_PyIntAsIntp(PyTuple_GET_ITEM(args, 0));
- if (value == -1 && PyErr_Occurred()) {
+ if (error_converting(value)) {
return NULL;
}
@@ -734,7 +736,7 @@ array_setscalar(PyArrayObject *self, PyObject *args)
for (idim = 0; idim < ndim; ++idim) {
value = PyArray_PyIntAsIntp(PyTuple_GET_ITEM(args, idim));
- if (value == -1 && PyErr_Occurred()) {
+ if (error_converting(value)) {
return NULL;
}
multi_index[idim] = value;
@@ -971,20 +973,18 @@ array_getarray(PyArrayObject *self, PyObject *args)
/* convert to PyArray_Type */
if (!PyArray_CheckExact(self)) {
PyArrayObject *new;
- PyTypeObject *subtype = &PyArray_Type;
-
- if (!PyType_IsSubtype(Py_TYPE(self), &PyArray_Type)) {
- subtype = &PyArray_Type;
- }
Py_INCREF(PyArray_DESCR(self));
- new = (PyArrayObject *)PyArray_NewFromDescr(subtype,
- PyArray_DESCR(self),
- PyArray_NDIM(self),
- PyArray_DIMS(self),
- PyArray_STRIDES(self),
- PyArray_DATA(self),
- PyArray_FLAGS(self), NULL);
+ new = (PyArrayObject *)PyArray_NewFromDescr(
+ &PyArray_Type,
+ PyArray_DESCR(self),
+ PyArray_NDIM(self),
+ PyArray_DIMS(self),
+ PyArray_STRIDES(self),
+ PyArray_DATA(self),
+ PyArray_FLAGS(self),
+ NULL
+ );
if (new == NULL) {
return NULL;
}
@@ -1070,7 +1070,7 @@ array_copy(PyArrayObject *self, PyObject *args, PyObject *kwds)
/* Separate from array_copy to make __copy__ preserve Fortran contiguity. */
static PyObject *
-array_copy_keeporder(PyArrayObject *self, PyObject *args, PyObject *kwds)
+array_copy_keeporder(PyArrayObject *self, PyObject *args)
{
if (!PyArg_ParseTuple(args, ":__copy__")) {
return NULL;
@@ -1111,7 +1111,7 @@ array_resize(PyArrayObject *self, PyObject *args, PyObject *kwds)
}
ret = PyArray_Resize(self, &newshape, refcheck, NPY_CORDER);
- PyDimMem_FREE(newshape.ptr);
+ npy_free_cache_dim_obj(newshape);
if (ret == NULL) {
return NULL;
}
@@ -1779,7 +1779,7 @@ array_setstate(PyArrayObject *self, PyObject *args)
PyArray_CLEARFLAGS(self, NPY_ARRAY_UPDATEIFCOPY);
if (PyArray_DIMS(self) != NULL) {
- PyDimMem_FREE(PyArray_DIMS(self));
+ npy_free_cache_dim_array(self);
fa->dimensions = NULL;
}
@@ -1788,7 +1788,7 @@ array_setstate(PyArrayObject *self, PyObject *args)
fa->nd = nd;
if (nd > 0) {
- fa->dimensions = PyDimMem_NEW(3*nd);
+ fa->dimensions = npy_alloc_cache_dim(3*nd);
if (fa->dimensions == NULL) {
return PyErr_NoMemory();
}
@@ -1802,7 +1802,7 @@ array_setstate(PyArrayObject *self, PyObject *args)
}
if (!PyDataType_FLAGCHK(typecode, NPY_LIST_PICKLE)) {
- int swap=!PyArray_ISNOTSWAPPED(self);
+ int swap = PyArray_ISBYTESWAPPED(self);
fa->data = datastr;
#ifndef NPY_PY3K
/* Check that the string is not interned */
@@ -1816,7 +1816,7 @@ array_setstate(PyArrayObject *self, PyObject *args)
fa->data = PyDataMem_NEW(num);
if (PyArray_DATA(self) == NULL) {
fa->nd = 0;
- PyDimMem_FREE(PyArray_DIMS(self));
+ npy_free_cache_dim_array(self);
Py_DECREF(rawdata);
return PyErr_NoMemory();
}
@@ -1860,7 +1860,7 @@ array_setstate(PyArrayObject *self, PyObject *args)
if (PyArray_DATA(self) == NULL) {
fa->nd = 0;
fa->data = PyDataMem_NEW(PyArray_DESCR(self)->elsize);
- PyDimMem_FREE(PyArray_DIMS(self));
+ npy_free_cache_dim_array(self);
return PyErr_NoMemory();
}
if (PyDataType_FLAGCHK(PyArray_DESCR(self), NPY_NEEDS_INIT)) {
@@ -2002,7 +2002,7 @@ array_transpose(PyArrayObject *self, PyObject *args)
return NULL;
}
ret = PyArray_Transpose(self, &permute);
- PyDimMem_FREE(permute.ptr);
+ npy_free_cache_dim_obj(permute);
}
return ret;
@@ -2569,7 +2569,7 @@ NPY_NO_EXPORT PyMethodDef array_methods[] = {
METH_VARARGS | METH_KEYWORDS, NULL},
{"byteswap",
(PyCFunction)array_byteswap,
- METH_VARARGS, NULL},
+ METH_VARARGS | METH_KEYWORDS, NULL},
{"choose",
(PyCFunction)array_choose,
METH_VARARGS | METH_KEYWORDS, NULL},
diff --git a/numpy/core/src/multiarray/multiarray_tests.c.src b/numpy/core/src/multiarray/multiarray_tests.c.src
index de05cc280..a20cf6257 100644
--- a/numpy/core/src/multiarray/multiarray_tests.c.src
+++ b/numpy/core/src/multiarray/multiarray_tests.c.src
@@ -3,8 +3,10 @@
#include <Python.h>
#define _NPY_NO_DEPRECATIONS /* for NPY_CHAR */
#include "numpy/arrayobject.h"
+#include "numpy/npy_math.h"
#include "mem_overlap.h"
#include "npy_extint128.h"
+#include "common.h"
/* test PyArray_IsPythonScalar, before including private py3 compat header */
static PyObject *
@@ -1000,11 +1002,11 @@ array_solve_diophantine(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject
for (j = 0; j < nterms; ++j) {
terms[j].a = (npy_int64)PyInt_AsSsize_t(PyTuple_GET_ITEM(A, j));
- if (terms[j].a == -1 && PyErr_Occurred()) {
+ if (error_converting(terms[j].a)) {
goto fail;
}
terms[j].ub = (npy_int64)PyInt_AsSsize_t(PyTuple_GET_ITEM(U, j));
- if (terms[j].ub == -1 && PyErr_Occurred()) {
+ if (error_converting(terms[j].ub)) {
goto fail;
}
}
@@ -1559,6 +1561,125 @@ extint_ceildiv_128_64(PyObject *NPY_UNUSED(self), PyObject *args) {
}
+static char get_fpu_mode_doc[] = (
+ "get_fpu_mode()\n"
+ "\n"
+ "Get the current FPU control word, in a platform-dependent format.\n"
+ "Returns None if not implemented on current platform.");
+
+static PyObject *
+get_fpu_mode(PyObject *NPY_UNUSED(self), PyObject *args)
+{
+ if (!PyArg_ParseTuple(args, "")) {
+ return NULL;
+ }
+
+#if defined(_MSC_VER)
+ {
+ unsigned int result = 0;
+ result = _controlfp(0, 0);
+ return PyLong_FromLongLong(result);
+ }
+#elif defined(__GNUC__) && (defined(__x86_64__) || defined(__i386__))
+ {
+ unsigned short cw = 0;
+ __asm__("fstcw %w0" : "=m" (cw));
+ return PyLong_FromLongLong(cw);
+ }
+#else
+ Py_RETURN_NONE;
+#endif
+}
+
+/*
+ * npymath wrappers
+ */
+
+/**begin repeat
+ * #name = cabs, carg#
+ */
+
+/**begin repeat1
+ * #itype = npy_cfloat, npy_cdouble, npy_clongdouble#
+ * #ITYPE = NPY_CFLOAT, NPY_CDOUBLE, NPY_CLONGDOUBLE#
+ * #otype = npy_float, npy_double, npy_longdouble#
+ * #OTYPE = NPY_FLOAT, NPY_DOUBLE, NPY_LONGDOUBLE#
+ * #suffix= f, , l#
+ */
+
+static PyObject *
+call_npy_@name@@suffix@(PyObject *NPY_UNUSED(self), PyObject *args)
+{
+ PyObject *z_py = NULL, *z_arr = NULL, *w_arr = NULL;
+
+ if (!PyArg_ParseTuple(args, "O", &z_py)) {
+ return NULL;
+ }
+
+ z_arr = PyArray_FROMANY(z_py, @ITYPE@, 0, 0, NPY_ARRAY_CARRAY_RO);
+ if (z_arr == NULL) {
+ return NULL;
+ }
+
+ w_arr = PyArray_SimpleNew(0, NULL, @OTYPE@);
+ if (w_arr == NULL) {
+ Py_DECREF(z_arr);
+ return NULL;
+ }
+
+ *(@otype@*)PyArray_DATA((PyArrayObject *)w_arr) =
+ npy_@name@@suffix@(*(@itype@*)PyArray_DATA((PyArrayObject *)z_arr));
+
+ Py_DECREF(z_arr);
+ return w_arr;
+}
+
+/**end repeat1**/
+
+/**end repeat**/
+
+/**begin repeat
+ * #name = log10, cosh, sinh, tan, tanh#
+ */
+
+/**begin repeat1
+ * #type = npy_float, npy_double, npy_longdouble#
+ * #TYPE = NPY_FLOAT, NPY_DOUBLE, NPY_LONGDOUBLE#
+ * #suffix= f, , l#
+ */
+
+static PyObject *
+call_npy_@name@@suffix@(PyObject *NPY_UNUSED(self), PyObject *args)
+{
+ PyObject *z_py = NULL, *z_arr = NULL, *w_arr = NULL;
+
+ if (!PyArg_ParseTuple(args, "O", &z_py)) {
+ return NULL;
+ }
+
+ z_arr = PyArray_FROMANY(z_py, @TYPE@, 0, 0, NPY_ARRAY_CARRAY_RO);
+ if (z_arr == NULL) {
+ return NULL;
+ }
+
+ w_arr = PyArray_SimpleNew(0, NULL, @TYPE@);
+ if (w_arr == NULL) {
+ Py_DECREF(z_arr);
+ return NULL;
+ }
+
+ *(@type@*)PyArray_DATA((PyArrayObject *)w_arr) =
+ npy_@name@@suffix@(*(@type@*)PyArray_DATA((PyArrayObject *)z_arr));
+
+ Py_DECREF(z_arr);
+ return w_arr;
+}
+
+/**end repeat1**/
+
+/**end repeat**/
+
+
static PyMethodDef Multiarray_TestsMethods[] = {
{"IsPythonScalar",
IsPythonScalar,
@@ -1649,6 +1770,37 @@ static PyMethodDef Multiarray_TestsMethods[] = {
{"extint_ceildiv_128_64",
extint_ceildiv_128_64,
METH_VARARGS, NULL},
+ {"get_fpu_mode",
+ get_fpu_mode,
+ METH_VARARGS, get_fpu_mode_doc},
+/**begin repeat
+ * #name = cabs, carg#
+ */
+
+/**begin repeat1
+ * #suffix = f, , l#
+ */
+ {"npy_@name@@suffix@",
+ call_npy_@name@@suffix@,
+ METH_VARARGS, NULL},
+/**end repeat1**/
+
+/**end repeat**/
+
+/**begin repeat
+ * #name = log10, cosh, sinh, tan, tanh#
+ */
+
+/**begin repeat1
+ * #suffix= f, , l#
+ */
+ {"npy_@name@@suffix@",
+ call_npy_@name@@suffix@,
+ METH_VARARGS, NULL},
+/**end repeat1**/
+
+/**end repeat**/
+
{NULL, NULL, 0, NULL} /* Sentinel */
};
diff --git a/numpy/core/src/multiarray/multiarraymodule.c b/numpy/core/src/multiarray/multiarraymodule.c
index 81a1bc543..66a076dc6 100644
--- a/numpy/core/src/multiarray/multiarraymodule.c
+++ b/numpy/core/src/multiarray/multiarraymodule.c
@@ -315,20 +315,39 @@ PyArray_Free(PyObject *op, void *ptr)
return 0;
}
+/*
+ * Get the ndarray subclass with the highest priority
+ */
+NPY_NO_EXPORT PyTypeObject *
+PyArray_GetSubType(int narrays, PyArrayObject **arrays) {
+ PyTypeObject *subtype = &PyArray_Type;
+ double priority = NPY_PRIORITY;
+ int i;
+
+ /* Get the priority subtype for the array */
+ for (i = 0; i < narrays; ++i) {
+ if (Py_TYPE(arrays[i]) != subtype) {
+ double pr = PyArray_GetPriority((PyObject *)(arrays[i]), 0.0);
+ if (pr > priority) {
+ priority = pr;
+ subtype = Py_TYPE(arrays[i]);
+ }
+ }
+ }
+
+ return subtype;
+}
+
/*
* Concatenates a list of ndarrays.
*/
NPY_NO_EXPORT PyArrayObject *
-PyArray_ConcatenateArrays(int narrays, PyArrayObject **arrays, int axis)
+PyArray_ConcatenateArrays(int narrays, PyArrayObject **arrays, int axis,
+ PyArrayObject* ret)
{
- PyTypeObject *subtype = &PyArray_Type;
- double priority = NPY_PRIORITY;
int iarrays, idim, ndim;
- npy_intp shape[NPY_MAXDIMS], s, strides[NPY_MAXDIMS];
- int strideperm[NPY_MAXDIMS];
- PyArray_Descr *dtype = NULL;
- PyArrayObject *ret = NULL;
+ npy_intp shape[NPY_MAXDIMS];
PyArrayObject_fields *sliding_view = NULL;
if (narrays <= 0) {
@@ -383,47 +402,57 @@ PyArray_ConcatenateArrays(int narrays, PyArrayObject **arrays, int axis)
}
}
- /* Get the priority subtype for the array */
- for (iarrays = 0; iarrays < narrays; ++iarrays) {
- if (Py_TYPE(arrays[iarrays]) != subtype) {
- double pr = PyArray_GetPriority((PyObject *)(arrays[iarrays]), 0.0);
- if (pr > priority) {
- priority = pr;
- subtype = Py_TYPE(arrays[iarrays]);
- }
+ if (ret != NULL) {
+ if (PyArray_NDIM(ret) != ndim) {
+ PyErr_SetString(PyExc_ValueError,
+ "Output array has wrong dimensionality");
+ return NULL;
+ }
+ if (!PyArray_CompareLists(shape, PyArray_SHAPE(ret), ndim)) {
+ PyErr_SetString(PyExc_ValueError,
+ "Output array is the wrong shape");
+ return NULL;
}
+ Py_INCREF(ret);
}
+ else {
+ npy_intp s, strides[NPY_MAXDIMS];
+ int strideperm[NPY_MAXDIMS];
- /* Get the resulting dtype from combining all the arrays */
- dtype = PyArray_ResultType(narrays, arrays, 0, NULL);
- if (dtype == NULL) {
- return NULL;
- }
+ /* Get the priority subtype for the array */
+ PyTypeObject *subtype = PyArray_GetSubType(narrays, arrays);
- /*
- * Figure out the permutation to apply to the strides to match
- * the memory layout of the input arrays, using ambiguity
- * resolution rules matching that of the NpyIter.
- */
- PyArray_CreateMultiSortedStridePerm(narrays, arrays, ndim, strideperm);
- s = dtype->elsize;
- for (idim = ndim-1; idim >= 0; --idim) {
- int iperm = strideperm[idim];
- strides[iperm] = s;
- s *= shape[iperm];
- }
-
- /* Allocate the array for the result. This steals the 'dtype' reference. */
- ret = (PyArrayObject *)PyArray_NewFromDescr(subtype,
- dtype,
- ndim,
- shape,
- strides,
- NULL,
- 0,
- NULL);
- if (ret == NULL) {
- return NULL;
+ /* Get the resulting dtype from combining all the arrays */
+ PyArray_Descr *dtype = PyArray_ResultType(narrays, arrays, 0, NULL);
+ if (dtype == NULL) {
+ return NULL;
+ }
+
+ /*
+ * Figure out the permutation to apply to the strides to match
+ * the memory layout of the input arrays, using ambiguity
+ * resolution rules matching that of the NpyIter.
+ */
+ PyArray_CreateMultiSortedStridePerm(narrays, arrays, ndim, strideperm);
+ s = dtype->elsize;
+ for (idim = ndim-1; idim >= 0; --idim) {
+ int iperm = strideperm[idim];
+ strides[iperm] = s;
+ s *= shape[iperm];
+ }
+
+ /* Allocate the array for the result. This steals the 'dtype' reference. */
+ ret = (PyArrayObject *)PyArray_NewFromDescr(subtype,
+ dtype,
+ ndim,
+ shape,
+ strides,
+ NULL,
+ 0,
+ NULL);
+ if (ret == NULL) {
+ return NULL;
+ }
}
/*
@@ -462,15 +491,10 @@ PyArray_ConcatenateArrays(int narrays, PyArrayObject **arrays, int axis)
*/
NPY_NO_EXPORT PyArrayObject *
PyArray_ConcatenateFlattenedArrays(int narrays, PyArrayObject **arrays,
- NPY_ORDER order)
+ NPY_ORDER order, PyArrayObject *ret)
{
- PyTypeObject *subtype = &PyArray_Type;
- double priority = NPY_PRIORITY;
int iarrays;
- npy_intp stride;
npy_intp shape = 0;
- PyArray_Descr *dtype = NULL;
- PyArrayObject *ret = NULL;
PyArrayObject_fields *sliding_view = NULL;
if (narrays <= 0) {
@@ -494,36 +518,45 @@ PyArray_ConcatenateFlattenedArrays(int narrays, PyArrayObject **arrays,
}
}
- /* Get the priority subtype for the array */
- for (iarrays = 0; iarrays < narrays; ++iarrays) {
- if (Py_TYPE(arrays[iarrays]) != subtype) {
- double pr = PyArray_GetPriority((PyObject *)(arrays[iarrays]), 0.0);
- if (pr > priority) {
- priority = pr;
- subtype = Py_TYPE(arrays[iarrays]);
- }
+ if (ret != NULL) {
+ if (PyArray_NDIM(ret) != 1) {
+ PyErr_SetString(PyExc_ValueError,
+ "Output array must be 1D");
+ return NULL;
+ }
+ if (shape != PyArray_SIZE(ret)) {
+ PyErr_SetString(PyExc_ValueError,
+ "Output array is the wrong size");
+ return NULL;
}
+ Py_INCREF(ret);
}
+ else {
+ npy_intp stride;
- /* Get the resulting dtype from combining all the arrays */
- dtype = PyArray_ResultType(narrays, arrays, 0, NULL);
- if (dtype == NULL) {
- return NULL;
- }
+ /* Get the priority subtype for the array */
+ PyTypeObject *subtype = PyArray_GetSubType(narrays, arrays);
- stride = dtype->elsize;
+ /* Get the resulting dtype from combining all the arrays */
+ PyArray_Descr *dtype = PyArray_ResultType(narrays, arrays, 0, NULL);
+ if (dtype == NULL) {
+ return NULL;
+ }
- /* Allocate the array for the result. This steals the 'dtype' reference. */
- ret = (PyArrayObject *)PyArray_NewFromDescr(subtype,
- dtype,
- 1,
- &shape,
- &stride,
- NULL,
- 0,
- NULL);
- if (ret == NULL) {
- return NULL;
+ stride = dtype->elsize;
+
+ /* Allocate the array for the result. This steals the 'dtype' reference. */
+ ret = (PyArrayObject *)PyArray_NewFromDescr(subtype,
+ dtype,
+ 1,
+ &shape,
+ &stride,
+ NULL,
+ 0,
+ NULL);
+ if (ret == NULL) {
+ return NULL;
+ }
}
/*
@@ -558,22 +591,11 @@ PyArray_ConcatenateFlattenedArrays(int narrays, PyArrayObject **arrays,
return ret;
}
-
-/*NUMPY_API
- * Concatenate
- *
- * Concatenate an arbitrary Python sequence into an array.
- * op is a python object supporting the sequence interface.
- * Its elements will be concatenated together to form a single
- * multidimensional array. If axis is NPY_MAXDIMS or bigger, then
- * each sequence object will be flattened before concatenation
-*/
NPY_NO_EXPORT PyObject *
-PyArray_Concatenate(PyObject *op, int axis)
+PyArray_ConcatenateInto(PyObject *op, int axis, PyArrayObject *ret)
{
int iarrays, narrays;
PyArrayObject **arrays;
- PyArrayObject *ret;
if (!PySequence_Check(op)) {
PyErr_SetString(PyExc_TypeError,
@@ -606,10 +628,10 @@ PyArray_Concatenate(PyObject *op, int axis)
}
if (axis >= NPY_MAXDIMS) {
- ret = PyArray_ConcatenateFlattenedArrays(narrays, arrays, NPY_CORDER);
+ ret = PyArray_ConcatenateFlattenedArrays(narrays, arrays, NPY_CORDER, ret);
}
else {
- ret = PyArray_ConcatenateArrays(narrays, arrays, axis);
+ ret = PyArray_ConcatenateArrays(narrays, arrays, axis, ret);
}
for (iarrays = 0; iarrays < narrays; ++iarrays) {
@@ -629,6 +651,21 @@ fail:
return NULL;
}
+/*NUMPY_API
+ * Concatenate
+ *
+ * Concatenate an arbitrary Python sequence into an array.
+ * op is a python object supporting the sequence interface.
+ * Its elements will be concatenated together to form a single
+ * multidimensional array. If axis is NPY_MAXDIMS or bigger, then
+ * each sequence object will be flattened before concatenation
+*/
+NPY_NO_EXPORT PyObject *
+PyArray_Concatenate(PyObject *op, int axis)
+{
+ return PyArray_ConcatenateInto(op, axis, NULL);
+}
+
static int
_signbit_set(PyArrayObject *arr)
{
@@ -1418,29 +1455,34 @@ array_putmask(PyObject *NPY_UNUSED(module), PyObject *args, PyObject *kwds)
/*
* Compare the field dictionaries for two types.
*
- * Return 1 if the contents are the same, 0 if not.
+ * Return 1 if the field types and field names of the two descrs are equal and
+ * in the same order, 0 if not.
*/
static int
-_equivalent_fields(PyObject *field1, PyObject *field2) {
+_equivalent_fields(PyArray_Descr *type1, PyArray_Descr *type2) {
- int same, val;
+ int val;
- if (field1 == field2) {
+ if (type1->fields == type2->fields && type1->names == type2->names) {
return 1;
}
- if (field1 == NULL || field2 == NULL) {
+ if (type1->fields == NULL || type2->fields == NULL) {
return 0;
}
- val = PyObject_RichCompareBool(field1, field2, Py_EQ);
+ val = PyObject_RichCompareBool(type1->fields, type2->fields, Py_EQ);
if (val != 1 || PyErr_Occurred()) {
- same = 0;
+ PyErr_Clear();
+ return 0;
}
- else {
- same = 1;
+
+ val = PyObject_RichCompareBool(type1->names, type2->names, Py_EQ);
+ if (val != 1 || PyErr_Occurred()) {
+ PyErr_Clear();
+ return 0;
}
- PyErr_Clear();
- return same;
+
+ return 1;
}
/*
@@ -1499,10 +1541,8 @@ PyArray_EquivTypes(PyArray_Descr *type1, PyArray_Descr *type2)
return ((type_num1 == type_num2)
&& _equivalent_subarrays(type1->subarray, type2->subarray));
}
- if (type_num1 == NPY_VOID
- || type_num2 == NPY_VOID) {
- return ((type_num1 == type_num2)
- && _equivalent_fields(type1->fields, type2->fields));
+ if (type_num1 == NPY_VOID || type_num2 == NPY_VOID) {
+ return ((type_num1 == type_num2) && _equivalent_fields(type1, type2));
}
if (type_num1 == NPY_DATETIME
|| type_num1 == NPY_TIMEDELTA
@@ -1662,7 +1702,7 @@ _array_fromobject(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kws)
ndmin_obj = PyDict_GetItem(kws, npy_ma_str_ndmin);
if (ndmin_obj) {
ndmin = PyLong_AsLong(ndmin_obj);
- if (ndmin == -1 && PyErr_Occurred()) {
+ if (error_converting(ndmin)) {
goto clean_type;
}
else if (ndmin > NPY_MAXDIMS) {
@@ -1853,12 +1893,12 @@ array_empty(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kwds)
ret = (PyArrayObject *)PyArray_Empty(shape.len, shape.ptr,
typecode, is_f_order);
- PyDimMem_FREE(shape.ptr);
+ npy_free_cache_dim_obj(shape);
return (PyObject *)ret;
fail:
Py_XDECREF(typecode);
- PyDimMem_FREE(shape.ptr);
+ npy_free_cache_dim_obj(shape);
return NULL;
}
@@ -2007,12 +2047,12 @@ array_zeros(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kwds)
ret = (PyArrayObject *)PyArray_Zeros(shape.len, shape.ptr,
typecode, (int) is_f_order);
- PyDimMem_FREE(shape.ptr);
+ npy_free_cache_dim_obj(shape);
return (PyObject *)ret;
fail:
Py_XDECREF(typecode);
- PyDimMem_FREE(shape.ptr);
+ npy_free_cache_dim_obj(shape);
return (PyObject *)ret;
}
@@ -2156,14 +2196,24 @@ static PyObject *
array_concatenate(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *kwds)
{
PyObject *a0;
+ PyObject *out = NULL;
int axis = 0;
- static char *kwlist[] = {"seq", "axis", NULL};
+ static char *kwlist[] = {"seq", "axis", "out", NULL};
- if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|O&:concatenate", kwlist,
- &a0, PyArray_AxisConverter, &axis)) {
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|O&O:concatenate", kwlist,
+ &a0, PyArray_AxisConverter, &axis, &out)) {
return NULL;
}
- return PyArray_Concatenate(a0, axis);
+ if (out != NULL) {
+ if (out == Py_None) {
+ out = NULL;
+ }
+ else if (!PyArray_Check(out)) {
+ PyErr_SetString(PyExc_TypeError, "'out' must be an array");
+ return NULL;
+ }
+ }
+ return PyArray_ConcatenateInto(a0, axis, (PyArrayObject *)out);
}
static PyObject *
@@ -2947,7 +2997,7 @@ array__reconstruct(PyObject *NPY_UNUSED(dummy), PyObject *args)
}
ret = PyArray_NewFromDescr(subtype, dtype,
(int)shape.len, shape.ptr, NULL, NULL, 0, NULL);
- PyDimMem_FREE(shape.ptr);
+ npy_free_cache_dim_obj(shape);
evil_global_disable_warn_O4O8_flag = 0;
@@ -2957,7 +3007,7 @@ fail:
evil_global_disable_warn_O4O8_flag = 0;
Py_XDECREF(dtype);
- PyDimMem_FREE(shape.ptr);
+ npy_free_cache_dim_obj(shape);
return NULL;
}
@@ -3224,7 +3274,7 @@ array_can_cast_safely(PyObject *NPY_UNUSED(self), PyObject *args,
npy_bool ret;
PyObject *retobj = NULL;
NPY_CASTING casting = NPY_SAFE_CASTING;
- static char *kwlist[] = {"from", "to", "casting", NULL};
+ static char *kwlist[] = {"from_", "to", "casting", NULL};
if(!PyArg_ParseTupleAndKeywords(args, kwds, "OO&|O&:can_cast", kwlist,
&from_obj,
@@ -4607,15 +4657,13 @@ PyMODINIT_FUNC initmultiarray(void) {
if (PyType_Ready(&NpyBusDayCalendar_Type) < 0) {
return RETVAL;
}
-/* FIXME
- * There is no error handling here
- */
+
c_api = NpyCapsule_FromVoidPtr((void *)PyArray_API, NULL);
- PyDict_SetItemString(d, "_ARRAY_API", c_api);
- Py_DECREF(c_api);
- if (PyErr_Occurred()) {
+ if (c_api == NULL) {
goto err;
}
+ PyDict_SetItemString(d, "_ARRAY_API", c_api);
+ Py_DECREF(c_api);
/*
* PyExc_Exception should catch all the standard errors that are
@@ -4633,10 +4681,10 @@ PyMODINIT_FUNC initmultiarray(void) {
PyDict_SetItemString(d, "__version__", s);
Py_DECREF(s);
-/* FIXME
- * There is no error handling here
- */
s = NpyCapsule_FromVoidPtr((void *)_datetime_strings, NULL);
+ if (s == NULL) {
+ goto err;
+ }
PyDict_SetItemString(d, "DATETIMEUNITS", s);
Py_DECREF(s);
@@ -4666,23 +4714,15 @@ PyMODINIT_FUNC initmultiarray(void) {
ADDCONST(MAY_SHARE_EXACT);
#undef ADDCONST
- Py_INCREF(&PyArray_Type);
PyDict_SetItemString(d, "ndarray", (PyObject *)&PyArray_Type);
- Py_INCREF(&PyArrayIter_Type);
PyDict_SetItemString(d, "flatiter", (PyObject *)&PyArrayIter_Type);
- Py_INCREF(&PyArrayMultiIter_Type);
PyDict_SetItemString(d, "nditer", (PyObject *)&NpyIter_Type);
- Py_INCREF(&NpyIter_Type);
PyDict_SetItemString(d, "broadcast",
(PyObject *)&PyArrayMultiIter_Type);
- Py_INCREF(&PyArrayDescr_Type);
PyDict_SetItemString(d, "dtype", (PyObject *)&PyArrayDescr_Type);
-
- Py_INCREF(&PyArrayFlags_Type);
PyDict_SetItemString(d, "flagsobj", (PyObject *)&PyArrayFlags_Type);
/* Business day calendar object */
- Py_INCREF(&NpyBusDayCalendar_Type);
PyDict_SetItemString(d, "busdaycalendar",
(PyObject *)&NpyBusDayCalendar_Type);
set_flaginfo(d);
diff --git a/numpy/core/src/multiarray/nditer_pywrap.c b/numpy/core/src/multiarray/nditer_pywrap.c
index 9661ed12b..1af396821 100644
--- a/numpy/core/src/multiarray/nditer_pywrap.c
+++ b/numpy/core/src/multiarray/nditer_pywrap.c
@@ -15,6 +15,8 @@
#include <numpy/arrayobject.h>
#include "npy_config.h"
#include "npy_pycompat.h"
+#include "alloc.h"
+#include "common.h"
typedef struct NewNpyArrayIterObject_tag NewNpyArrayIterObject;
@@ -758,7 +760,7 @@ npyiter_init(NewNpyArrayIterObject *self, PyObject *args, PyObject *kwds)
&op_axes_in,
PyArray_IntpConverter, &itershape,
&buffersize)) {
- PyDimMem_FREE(itershape.ptr);
+ npy_free_cache_dim_obj(itershape);
return -1;
}
@@ -804,7 +806,7 @@ npyiter_init(NewNpyArrayIterObject *self, PyObject *args, PyObject *kwds)
}
}
else if (itershape.ptr != NULL) {
- PyDimMem_FREE(itershape.ptr);
+ npy_free_cache_dim_obj(itershape);
itershape.ptr = NULL;
}
@@ -832,7 +834,7 @@ npyiter_init(NewNpyArrayIterObject *self, PyObject *args, PyObject *kwds)
self->finished = 0;
}
- PyDimMem_FREE(itershape.ptr);
+ npy_free_cache_dim_obj(itershape);
/* Release the references we got to the ops and dtypes */
for (iop = 0; iop < nop; ++iop) {
@@ -843,7 +845,7 @@ npyiter_init(NewNpyArrayIterObject *self, PyObject *args, PyObject *kwds)
return 0;
fail:
- PyDimMem_FREE(itershape.ptr);
+ npy_free_cache_dim_obj(itershape);
for (iop = 0; iop < nop; ++iop) {
Py_XDECREF(op[iop]);
Py_XDECREF(op_request_dtypes[iop]);
@@ -1618,7 +1620,7 @@ npyiter_multi_index_set(NewNpyArrayIterObject *self, PyObject *value)
for (idim = 0; idim < ndim; ++idim) {
PyObject *v = PySequence_GetItem(value, idim);
multi_index[idim] = PyInt_AsLong(v);
- if (multi_index[idim]==-1 && PyErr_Occurred()) {
+ if (error_converting(multi_index[idim])) {
Py_XDECREF(v);
return -1;
}
@@ -1678,7 +1680,7 @@ static int npyiter_index_set(NewNpyArrayIterObject *self, PyObject *value)
if (NpyIter_HasIndex(self->iter)) {
npy_intp ind;
ind = PyInt_AsLong(value);
- if (ind==-1 && PyErr_Occurred()) {
+ if (error_converting(ind)) {
return -1;
}
if (NpyIter_GotoIndex(self->iter, ind) != NPY_SUCCEED) {
@@ -1728,7 +1730,7 @@ static int npyiter_iterindex_set(NewNpyArrayIterObject *self, PyObject *value)
}
iterindex = PyInt_AsLong(value);
- if (iterindex==-1 && PyErr_Occurred()) {
+ if (error_converting(iterindex)) {
return -1;
}
if (NpyIter_GotoIterIndex(self->iter, iterindex) != NPY_SUCCEED) {
@@ -2256,7 +2258,7 @@ npyiter_subscript(NewNpyArrayIterObject *self, PyObject *op)
if (PyInt_Check(op) || PyLong_Check(op) ||
(PyIndex_Check(op) && !PySequence_Check(op))) {
npy_intp i = PyArray_PyIntAsIntp(op);
- if (i == -1 && PyErr_Occurred()) {
+ if (error_converting(i)) {
return NULL;
}
return npyiter_seq_item(self, i);
@@ -2305,7 +2307,7 @@ npyiter_ass_subscript(NewNpyArrayIterObject *self, PyObject *op,
if (PyInt_Check(op) || PyLong_Check(op) ||
(PyIndex_Check(op) && !PySequence_Check(op))) {
npy_intp i = PyArray_PyIntAsIntp(op);
- if (i == -1 && PyErr_Occurred()) {
+ if (error_converting(i)) {
return -1;
}
return npyiter_seq_ass_item(self, i, value);
diff --git a/numpy/core/src/multiarray/number.c b/numpy/core/src/multiarray/number.c
index 1f5523b90..8d1e1a24c 100644
--- a/numpy/core/src/multiarray/number.c
+++ b/numpy/core/src/multiarray/number.c
@@ -91,6 +91,7 @@ PyArray_SetNumericOps(PyObject *dict)
SET(sqrt);
SET(cbrt);
SET(negative);
+ SET(positive);
SET(absolute);
SET(invert);
SET(left_shift);
@@ -143,6 +144,7 @@ PyArray_GetNumericOps(void)
GET(_ones_like);
GET(sqrt);
GET(negative);
+ GET(positive);
GET(absolute);
GET(invert);
GET(left_shift);
@@ -443,7 +445,7 @@ is_scalar_with_conversion(PyObject *o2, double* out_exponent)
return NPY_NOSCALAR;
}
val = PyInt_AsSsize_t(value);
- if (val == -1 && PyErr_Occurred()) {
+ if (error_converting(val)) {
PyErr_Clear();
return NPY_NOSCALAR;
}
@@ -453,9 +455,14 @@ is_scalar_with_conversion(PyObject *o2, double* out_exponent)
return NPY_NOSCALAR;
}
-/* optimize float array or complex array to a scalar power */
-static PyObject *
-fast_scalar_power(PyArrayObject *a1, PyObject *o2, int inplace)
+/*
+ * optimize float array or complex array to a scalar power
+ * returns 0 on success, -1 if no optimization is possible
+ * the result is in value (can be NULL if an error occurred)
+ */
+static int
+fast_scalar_power(PyArrayObject *a1, PyObject *o2, int inplace,
+ PyObject **value)
{
double exponent;
NPY_SCALARKIND kind; /* NPY_NOSCALAR is not scalar */
@@ -464,17 +471,7 @@ fast_scalar_power(PyArrayObject *a1, PyObject *o2, int inplace)
PyObject *fastop = NULL;
if (PyArray_ISFLOAT(a1) || PyArray_ISCOMPLEX(a1)) {
if (exponent == 1.0) {
- /* we have to do this one special, as the
- "copy" method of array objects isn't set
- up early enough to be added
- by PyArray_SetNumericOps.
- */
- if (inplace) {
- Py_INCREF(a1);
- return (PyObject *)a1;
- } else {
- return PyArray_Copy(a1);
- }
+ fastop = n_ops.positive;
}
else if (exponent == -1.0) {
fastop = n_ops.reciprocal;
@@ -489,15 +486,16 @@ fast_scalar_power(PyArrayObject *a1, PyObject *o2, int inplace)
fastop = n_ops.square;
}
else {
- return NULL;
+ return -1;
}
if (inplace || can_elide_temp_unary(a1)) {
- return PyArray_GenericInplaceUnaryFunction(a1, fastop);
+ *value = PyArray_GenericInplaceUnaryFunction(a1, fastop);
}
else {
- return PyArray_GenericUnaryFunction(a1, fastop);
+ *value = PyArray_GenericUnaryFunction(a1, fastop);
}
+ return 0;
}
/* Because this is called with all arrays, we need to
* change the output if the kind of the scalar is different
@@ -507,36 +505,35 @@ fast_scalar_power(PyArrayObject *a1, PyObject *o2, int inplace)
else if (exponent == 2.0) {
fastop = n_ops.square;
if (inplace) {
- return PyArray_GenericInplaceUnaryFunction(a1, fastop);
+ *value = PyArray_GenericInplaceUnaryFunction(a1, fastop);
}
else {
/* We only special-case the FLOAT_SCALAR and integer types */
if (kind == NPY_FLOAT_SCALAR && PyArray_ISINTEGER(a1)) {
- PyObject *res;
PyArray_Descr *dtype = PyArray_DescrFromType(NPY_DOUBLE);
a1 = (PyArrayObject *)PyArray_CastToType(a1, dtype,
PyArray_ISFORTRAN(a1));
- if (a1 == NULL) {
- return NULL;
+ if (a1 != NULL) {
+ /* cast always creates a new array */
+ *value = PyArray_GenericInplaceUnaryFunction(a1, fastop);
+ Py_DECREF(a1);
}
- /* cast always creates a new array */
- res = PyArray_GenericInplaceUnaryFunction(a1, fastop);
- Py_DECREF(a1);
- return res;
}
else {
- return PyArray_GenericUnaryFunction(a1, fastop);
+ *value = PyArray_GenericUnaryFunction(a1, fastop);
}
}
+ return 0;
}
}
- return NULL;
+ /* no fast operation found */
+ return -1;
}
static PyObject *
array_power(PyArrayObject *a1, PyObject *o2, PyObject *modulo)
{
- PyObject *value;
+ PyObject *value = NULL;
if (modulo != Py_None) {
/* modular exponentiation is not implemented (gh-8804) */
@@ -545,8 +542,7 @@ array_power(PyArrayObject *a1, PyObject *o2, PyObject *modulo)
}
BINOP_GIVE_UP_IF_NEEDED(a1, o2, nb_power, array_power);
- value = fast_scalar_power(a1, o2, 0);
- if (!value) {
+ if (fast_scalar_power(a1, o2, 0, &value) != 0) {
value = PyArray_GenericBinaryFunction(a1, o2, n_ops.power);
}
return value;
@@ -565,7 +561,7 @@ array_negative(PyArrayObject *m1)
static PyObject *
array_absolute(PyArrayObject *m1)
{
- if (can_elide_temp_unary(m1)) {
+ if (can_elide_temp_unary(m1) && !PyArray_ISCOMPLEX(m1)) {
return PyArray_GenericInplaceUnaryFunction(m1, n_ops.absolute);
}
return PyArray_GenericUnaryFunction(m1, n_ops.absolute);
@@ -686,12 +682,11 @@ static PyObject *
array_inplace_power(PyArrayObject *a1, PyObject *o2, PyObject *NPY_UNUSED(modulo))
{
/* modulo is ignored! */
- PyObject *value;
+ PyObject *value = NULL;
INPLACE_GIVE_UP_IF_NEEDED(
a1, o2, nb_inplace_power, array_inplace_power);
- value = fast_scalar_power(a1, o2, 1);
- if (!value) {
+ if (fast_scalar_power(a1, o2, 1, &value) != 0) {
value = PyArray_GenericInplaceBinaryFunction(a1, o2, n_ops.power);
}
return value;
diff --git a/numpy/core/src/multiarray/number.h b/numpy/core/src/multiarray/number.h
index 113fc2475..99a2a722b 100644
--- a/numpy/core/src/multiarray/number.h
+++ b/numpy/core/src/multiarray/number.h
@@ -15,6 +15,7 @@ typedef struct {
PyObject *sqrt;
PyObject *cbrt;
PyObject *negative;
+ PyObject *positive;
PyObject *absolute;
PyObject *invert;
PyObject *left_shift;
diff --git a/numpy/core/src/multiarray/scalarapi.c b/numpy/core/src/multiarray/scalarapi.c
index 85824f2ce..0cb6b072d 100644
--- a/numpy/core/src/multiarray/scalarapi.c
+++ b/numpy/core/src/multiarray/scalarapi.c
@@ -415,7 +415,7 @@ PyArray_ScalarFromObject(PyObject *object)
else if (PyLong_Check(object)) {
npy_longlong val;
val = PyLong_AsLongLong(object);
- if (val==-1 && PyErr_Occurred()) {
+ if (error_converting(val)) {
PyErr_Clear();
return NULL;
}
diff --git a/numpy/core/src/multiarray/scalartypes.c.src b/numpy/core/src/multiarray/scalartypes.c.src
index f6bd5f5a7..7a6ed6a86 100644
--- a/numpy/core/src/multiarray/scalartypes.c.src
+++ b/numpy/core/src/multiarray/scalartypes.c.src
@@ -24,6 +24,7 @@
#include "scalartypes.h"
#include "_datetime.h"
#include "datetime_strings.h"
+#include "alloc.h"
#include <stdlib.h>
@@ -194,9 +195,21 @@ gentype_generic_method(PyObject *self, PyObject *args, PyObject *kwds,
}
}
+static PyObject *
+gentype_add(PyObject *m1, PyObject* m2)
+{
+ /* special case str.__radd__, which should not call array_add */
+ if (PyString_Check(m1) || PyUnicode_Check(m1)) {
+ Py_INCREF(Py_NotImplemented);
+ return Py_NotImplemented;
+ }
+ BINOP_GIVE_UP_IF_NEEDED(m1, m2, nb_add, gentype_add);
+ return PyArray_Type.tp_as_number->nb_add(m1, m2);
+}
+
/**begin repeat
*
- * #name = add, subtract, remainder, divmod, lshift, rshift,
+ * #name = subtract, remainder, divmod, lshift, rshift,
* and, xor, or, floor_divide, true_divide#
*/
static PyObject *
@@ -243,7 +256,7 @@ gentype_multiply(PyObject *m1, PyObject *m2)
(Py_TYPE(m1)->tp_as_number->nb_multiply == NULL))) {
/* Try to convert m2 to an int and try sequence repeat */
repeat = PyArray_PyIntAsIntp(m2);
- if (repeat == -1 && PyErr_Occurred()) {
+ if (error_converting(repeat)) {
return NULL;
}
/* Note that npy_intp is compatible to Py_Ssize_t */
@@ -256,7 +269,7 @@ gentype_multiply(PyObject *m1, PyObject *m2)
(Py_TYPE(m2)->tp_as_number->nb_multiply == NULL))) {
/* Try to convert m1 to an int and try sequence repeat */
repeat = PyArray_PyIntAsIntp(m1);
- if (repeat == -1 && PyErr_Occurred()) {
+ if (error_converting(repeat)) {
return NULL;
}
return PySequence_Repeat(m2, repeat);
@@ -338,7 +351,6 @@ gentype_str(PyObject *self)
return ret;
}
-
static PyObject *
gentype_repr(PyObject *self)
{
@@ -353,6 +365,20 @@ gentype_repr(PyObject *self)
return ret;
}
+static PyObject *
+genint_type_str(PyObject *self)
+{
+ PyObject *item, *item_str;
+ item = gentype_generic_method(self, NULL, NULL, "item");
+ if (item == NULL) {
+ return NULL;
+ }
+
+ item_str = PyObject_Str(item);
+ Py_DECREF(item);
+ return item_str;
+}
+
/*
* The __format__ method for PEP 3101.
*/
@@ -1343,10 +1369,9 @@ gentype_imag_get(PyObject *self)
int elsize;
typecode = PyArray_DescrFromScalar(self);
elsize = typecode->elsize;
- temp = PyDataMem_NEW(elsize);
- memset(temp, '\0', elsize);
+ temp = npy_alloc_cache_zero(elsize);
ret = PyArray_Scalar(temp, typecode, NULL);
- PyDataMem_FREE(temp);
+ npy_free_cache(temp, elsize);
}
Py_XDECREF(typecode);
@@ -1516,9 +1541,9 @@ gentype_wraparray(PyObject *NPY_UNUSED(scalar), PyObject *args)
*/
/**begin repeat
*
- * #name = tolist, item, tostring, tobytes, astype, copy, __deepcopy__,
- * searchsorted, view, swapaxes, conj, conjugate, nonzero, flatten,
- * ravel, fill, transpose, newbyteorder#
+ * #name = tolist, item, __deepcopy__, __copy__,
+ * swapaxes, conj, conjugate, nonzero,
+ * fill, transpose, newbyteorder#
*/
static PyObject *
gentype_@name@(PyObject *self, PyObject *args)
@@ -1548,11 +1573,13 @@ static Py_ssize_t
gentype_getreadbuf(PyObject *, Py_ssize_t, void **);
static PyObject *
-gentype_byteswap(PyObject *self, PyObject *args)
+gentype_byteswap(PyObject *self, PyObject *args, PyObject *kwds)
{
npy_bool inplace = NPY_FALSE;
+ static char *kwlist[] = {"inplace", NULL};
- if (!PyArg_ParseTuple(args, "|O&:byteswap", PyArray_BoolConverter, &inplace)) {
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "|O&:byteswap", kwlist,
+ PyArray_BoolConverter, &inplace)) {
return NULL;
}
if (inplace) {
@@ -1593,8 +1620,9 @@ gentype_byteswap(PyObject *self, PyObject *args)
*
* #name = take, getfield, put, repeat, tofile, mean, trace, diagonal, clip,
* std, var, sum, cumsum, prod, cumprod, compress, sort, argsort,
- * round, argmax, argmin, max, min, ptp, any, all, resize, reshape,
- * choose#
+ * round, argmax, argmin, max, min, ptp, any, all, astype, resize,
+ * reshape, choose, tostring, tobytes, copy, searchsorted, view,
+ * flatten, ravel#
*/
static PyObject *
gentype_@name@(PyObject *self, PyObject *args, PyObject *kwds)
@@ -1628,7 +1656,7 @@ voidtype_setfield(PyVoidScalarObject *self, PyObject *args, PyObject *kwds)
* However, as a special case, void-scalar assignment broadcasts
* differently from ndarrays when assigning to an object field: Assignment
* to an ndarray object field broadcasts, but assignment to a void-scalar
- * object-field should not, in order to allow nested ndarrays.
+ * object-field should not, in order to allow nested ndarrays.
* These lines should then behave identically:
*
* b = np.zeros(1, dtype=[('x', 'O')])
@@ -1858,19 +1886,19 @@ static PyMethodDef gentype_methods[] = {
METH_VARARGS, NULL},
{"tobytes",
(PyCFunction)gentype_tobytes,
- METH_VARARGS, NULL},
+ METH_VARARGS | METH_KEYWORDS, NULL},
{"tofile",
(PyCFunction)gentype_tofile,
METH_VARARGS | METH_KEYWORDS, NULL},
{"tostring",
(PyCFunction)gentype_tostring,
- METH_VARARGS, NULL},
+ METH_VARARGS | METH_KEYWORDS, NULL},
{"byteswap",
(PyCFunction)gentype_byteswap,
- METH_VARARGS, NULL},
+ METH_VARARGS | METH_KEYWORDS, NULL},
{"astype",
(PyCFunction)gentype_astype,
- METH_VARARGS, NULL},
+ METH_VARARGS | METH_KEYWORDS, NULL},
{"getfield",
(PyCFunction)gentype_getfield,
METH_VARARGS | METH_KEYWORDS, NULL},
@@ -1879,7 +1907,7 @@ static PyMethodDef gentype_methods[] = {
METH_VARARGS | METH_KEYWORDS, NULL},
{"copy",
(PyCFunction)gentype_copy,
- METH_VARARGS, NULL},
+ METH_VARARGS | METH_KEYWORDS, NULL},
{"resize",
(PyCFunction)gentype_resize,
METH_VARARGS | METH_KEYWORDS, NULL},
@@ -1897,7 +1925,7 @@ static PyMethodDef gentype_methods[] = {
/* for the copy module */
{"__copy__",
- (PyCFunction)gentype_copy,
+ (PyCFunction)gentype___copy__,
METH_VARARGS, NULL},
{"__deepcopy__",
(PyCFunction)gentype___deepcopy__,
@@ -1945,7 +1973,7 @@ static PyMethodDef gentype_methods[] = {
METH_VARARGS | METH_KEYWORDS, NULL},
{"searchsorted",
(PyCFunction)gentype_searchsorted,
- METH_VARARGS, NULL},
+ METH_VARARGS | METH_KEYWORDS, NULL},
{"argmax",
(PyCFunction)gentype_argmax,
METH_VARARGS | METH_KEYWORDS, NULL},
@@ -1960,7 +1988,7 @@ static PyMethodDef gentype_methods[] = {
METH_VARARGS, NULL},
{"view",
(PyCFunction)gentype_view,
- METH_VARARGS, NULL},
+ METH_VARARGS | METH_KEYWORDS, NULL},
{"swapaxes",
(PyCFunction)gentype_swapaxes,
METH_VARARGS, NULL},
@@ -2023,10 +2051,10 @@ static PyMethodDef gentype_methods[] = {
METH_VARARGS | METH_KEYWORDS, NULL},
{"flatten",
(PyCFunction)gentype_flatten,
- METH_VARARGS, NULL},
+ METH_VARARGS | METH_KEYWORDS, NULL},
{"ravel",
(PyCFunction)gentype_ravel,
- METH_VARARGS, NULL},
+ METH_VARARGS | METH_KEYWORDS, NULL},
{"round",
(PyCFunction)gentype_round,
METH_VARARGS | METH_KEYWORDS, NULL},
@@ -2151,35 +2179,31 @@ static PyObject *
voidtype_subscript(PyVoidScalarObject *self, PyObject *ind)
{
npy_intp n;
- PyObject *ret, *args;
+ PyObject *ret, *res;
- if (!(PyDataType_HASFIELDS(self->descr))) {
- PyErr_SetString(PyExc_IndexError,
- "can't index void scalar without fields");
- return NULL;
+ /* structured voids will accept an integer index */
+ if (PyDataType_HASFIELDS(self->descr)) {
+ n = PyArray_PyIntAsIntp(ind);
+ if (!error_converting(n)) {
+ return voidtype_item(self, (Py_ssize_t)n);
+ }
+ PyErr_Clear();
}
-#if defined(NPY_PY3K)
- if (PyUString_Check(ind)) {
-#else
- if (PyBytes_Check(ind) || PyUnicode_Check(ind)) {
-#endif
- args = Py_BuildValue("(O)", ind);
- ret = gentype_generic_method((PyObject *)self, args, NULL, "__getitem__");
- Py_DECREF(args);
- return ret;
- }
+ res = PyArray_FromScalar((PyObject*)self, NULL);
- /* try to convert it to a number */
- n = PyArray_PyIntAsIntp(ind);
- if (error_converting(n)) {
- goto fail;
+ /* ellipsis should return 0d array */
+ if(ind == Py_Ellipsis){
+ return res;
}
- return voidtype_item(self, (Py_ssize_t)n);
-fail:
- PyErr_SetString(PyExc_IndexError, "invalid index");
- return NULL;
+ /*
+ * other cases (field names, empty tuple) will return either
+ * scalar or non-0d array. Compute this using ndarray subscript.
+ */
+ ret = array_subscript((PyArrayObject *)res, ind);
+ Py_DECREF(res);
+ return PyArray_Return((PyArrayObject*)ret);
}
static int
@@ -2473,7 +2497,7 @@ static void
void_dealloc(PyVoidScalarObject *v)
{
if (v->flags & NPY_ARRAY_OWNDATA) {
- PyDataMem_FREE(v->obval);
+ npy_free_cache(v->obval, Py_SIZE(v));
}
Py_XDECREF(v->descr);
Py_XDECREF(v->base);
@@ -2889,9 +2913,7 @@ static PyObject *
void_arrtype_new(PyTypeObject *type, PyObject *args, PyObject *NPY_UNUSED(kwds))
{
PyObject *obj, *arr;
- npy_ulonglong memu = 1;
PyObject *new = NULL;
- char *destptr;
if (!PyArg_ParseTuple(args, "O:void", &obj)) {
return NULL;
@@ -2913,7 +2935,8 @@ void_arrtype_new(PyTypeObject *type, PyObject *args, PyObject *NPY_UNUSED(kwds))
}
if (new && PyLong_Check(new)) {
PyObject *ret;
- memu = PyLong_AsUnsignedLongLong(new);
+ char *destptr;
+ npy_ulonglong memu = PyLong_AsUnsignedLongLong(new);
Py_DECREF(new);
if (PyErr_Occurred() || (memu > NPY_MAX_INT)) {
PyErr_Clear();
@@ -2922,13 +2945,13 @@ void_arrtype_new(PyTypeObject *type, PyObject *args, PyObject *NPY_UNUSED(kwds))
(int) NPY_MAX_INT);
return NULL;
}
- destptr = PyDataMem_NEW((int) memu);
+ destptr = npy_alloc_cache_zero(memu);
if (destptr == NULL) {
return PyErr_NoMemory();
}
ret = type->tp_alloc(type, 0);
if (ret == NULL) {
- PyDataMem_FREE(destptr);
+ npy_free_cache(destptr, memu);
return PyErr_NoMemory();
}
((PyVoidScalarObject *)ret)->obval = destptr;
@@ -2939,7 +2962,6 @@ void_arrtype_new(PyTypeObject *type, PyObject *args, PyObject *NPY_UNUSED(kwds))
((PyVoidScalarObject *)ret)->flags = NPY_ARRAY_BEHAVED |
NPY_ARRAY_OWNDATA;
((PyVoidScalarObject *)ret)->base = NULL;
- memset(destptr, '\0', (size_t) memu);
return ret;
}
@@ -4183,6 +4205,19 @@ initialize_numeric_types(void)
/**end repeat**/
+
+ /**begin repeat
+ * #Type = Bool, Byte, UByte, Short, UShort, Int, UInt, Long,
+ * ULong, LongLong, ULongLong#
+ */
+
+ /* both str/repr use genint_type_str to avoid trailing "L" of longs */
+ Py@Type@ArrType_Type.tp_str = genint_type_str;
+ Py@Type@ArrType_Type.tp_repr = genint_type_str;
+
+ /**end repeat**/
+
+
PyHalfArrType_Type.tp_print = halftype_print;
PyFloatArrType_Type.tp_print = floattype_print;
PyDoubleArrType_Type.tp_print = doubletype_print;
diff --git a/numpy/core/src/multiarray/shape.c b/numpy/core/src/multiarray/shape.c
index b32b67146..07ab9b626 100644
--- a/numpy/core/src/multiarray/shape.c
+++ b/numpy/core/src/multiarray/shape.c
@@ -19,6 +19,7 @@
#include "templ_common.h" /* for npy_mul_with_overflow_intp */
#include "common.h" /* for convert_shape_to_string */
+#include "alloc.h"
static int
_fix_unknown_dimension(PyArray_Dims *newshape, PyArrayObject *arr);
@@ -145,26 +146,33 @@ PyArray_Resize(PyArrayObject *self, PyArray_Dims *newshape, int refcheck,
}
}
- if (PyArray_NDIM(self) != new_nd) {
- /* Different number of dimensions. */
- ((PyArrayObject_fields *)self)->nd = new_nd;
- /* Need new dimensions and strides arrays */
- dimptr = PyDimMem_RENEW(PyArray_DIMS(self), 3*new_nd);
- if (dimptr == NULL) {
- PyErr_SetString(PyExc_MemoryError,
- "cannot allocate memory for array");
- return NULL;
+ if (new_nd > 0) {
+ if (PyArray_NDIM(self) != new_nd) {
+ /* Different number of dimensions. */
+ ((PyArrayObject_fields *)self)->nd = new_nd;
+ /* Need new dimensions and strides arrays */
+ dimptr = PyDimMem_RENEW(PyArray_DIMS(self), 3*new_nd);
+ if (dimptr == NULL) {
+ PyErr_SetString(PyExc_MemoryError,
+ "cannot allocate memory for array");
+ return NULL;
+ }
+ ((PyArrayObject_fields *)self)->dimensions = dimptr;
+ ((PyArrayObject_fields *)self)->strides = dimptr + new_nd;
}
- ((PyArrayObject_fields *)self)->dimensions = dimptr;
- ((PyArrayObject_fields *)self)->strides = dimptr + new_nd;
+ /* make new_strides variable */
+ _array_fill_strides(new_strides, new_dimensions, new_nd,
+ PyArray_DESCR(self)->elsize, PyArray_FLAGS(self),
+ &(((PyArrayObject_fields *)self)->flags));
+ memmove(PyArray_DIMS(self), new_dimensions, new_nd*sizeof(npy_intp));
+ memmove(PyArray_STRIDES(self), new_strides, new_nd*sizeof(npy_intp));
+ }
+ else {
+ PyDimMem_FREE(((PyArrayObject_fields *)self)->dimensions);
+ ((PyArrayObject_fields *)self)->nd = 0;
+ ((PyArrayObject_fields *)self)->dimensions = NULL;
+ ((PyArrayObject_fields *)self)->strides = NULL;
}
-
- /* make new_strides variable */
- _array_fill_strides(
- new_strides, new_dimensions, new_nd, PyArray_DESCR(self)->elsize,
- PyArray_FLAGS(self), &(((PyArrayObject_fields *)self)->flags));
- memmove(PyArray_DIMS(self), new_dimensions, new_nd*sizeof(npy_intp));
- memmove(PyArray_STRIDES(self), new_strides, new_nd*sizeof(npy_intp));
Py_RETURN_NONE;
}
@@ -309,7 +317,7 @@ PyArray_Reshape(PyArrayObject *self, PyObject *shape)
return NULL;
}
ret = PyArray_Newshape(self, &newdims, NPY_CORDER);
- PyDimMem_FREE(newdims.ptr);
+ npy_free_cache_dim_obj(newdims);
return ret;
}
diff --git a/numpy/core/src/multiarray/strfuncs.c b/numpy/core/src/multiarray/strfuncs.c
new file mode 100644
index 000000000..5a0d20335
--- /dev/null
+++ b/numpy/core/src/multiarray/strfuncs.c
@@ -0,0 +1,200 @@
+#define NPY_NO_DEPRECATED_API NPY_API_VERSION
+#define _MULTIARRAYMODULE
+
+#include <Python.h>
+#include <numpy/arrayobject.h>
+
+#include "npy_pycompat.h"
+
+#include "strfuncs.h"
+
+static PyObject *PyArray_StrFunction = NULL;
+static PyObject *PyArray_ReprFunction = NULL;
+
+/*NUMPY_API
+ * Set the array print function to be a Python function.
+ */
+NPY_NO_EXPORT void
+PyArray_SetStringFunction(PyObject *op, int repr)
+{
+ if (repr) {
+ /* Dispose of previous callback */
+ Py_XDECREF(PyArray_ReprFunction);
+ /* Add a reference to new callback */
+ Py_XINCREF(op);
+ /* Remember new callback */
+ PyArray_ReprFunction = op;
+ }
+ else {
+ /* Dispose of previous callback */
+ Py_XDECREF(PyArray_StrFunction);
+ /* Add a reference to new callback */
+ Py_XINCREF(op);
+ /* Remember new callback */
+ PyArray_StrFunction = op;
+ }
+}
+
+
+/*
+ * Extend string. On failure, returns NULL and leaves *strp alone.
+ * XXX we do this in multiple places; time for a string library?
+ */
+static char *
+extend(char **strp, Py_ssize_t n, Py_ssize_t *maxp)
+{
+ char *str = *strp;
+ Py_ssize_t new_cap;
+
+ if (n >= *maxp - 16) {
+ new_cap = *maxp * 2;
+
+ if (new_cap <= *maxp) { /* overflow */
+ return NULL;
+ }
+ str = PyArray_realloc(*strp, new_cap);
+ if (str != NULL) {
+ *strp = str;
+ *maxp = new_cap;
+ }
+ }
+ return str;
+}
+
+
+static int
+dump_data(char **string, Py_ssize_t *n, Py_ssize_t *max_n, char *data, int nd,
+ npy_intp *dimensions, npy_intp *strides, PyArrayObject* self)
+{
+ PyArray_Descr *descr=PyArray_DESCR(self);
+ PyObject *op = NULL, *sp = NULL;
+ char *ostring;
+ npy_intp i, N, ret = 0;
+
+#define CHECK_MEMORY do { \
+ if (extend(string, *n, max_n) == NULL) { \
+ ret = -1; \
+ goto end; \
+ } \
+ } while (0)
+
+ if (nd == 0) {
+ if ((op = descr->f->getitem(data, self)) == NULL) {
+ return -1;
+ }
+ sp = PyObject_Repr(op);
+ if (sp == NULL) {
+ ret = -1;
+ goto end;
+ }
+ ostring = PyString_AsString(sp);
+ N = PyString_Size(sp)*sizeof(char);
+ *n += N;
+ CHECK_MEMORY;
+ memmove(*string + (*n - N), ostring, N);
+ }
+ else {
+ CHECK_MEMORY;
+ (*string)[*n] = '[';
+ *n += 1;
+ for (i = 0; i < dimensions[0]; i++) {
+ if (dump_data(string, n, max_n,
+ data + (*strides)*i,
+ nd - 1, dimensions + 1,
+ strides + 1, self) < 0) {
+ return -1;
+ }
+ CHECK_MEMORY;
+ if (i < dimensions[0] - 1) {
+ (*string)[*n] = ',';
+ (*string)[*n+1] = ' ';
+ *n += 2;
+ }
+ }
+ CHECK_MEMORY;
+ (*string)[*n] = ']';
+ *n += 1;
+ }
+
+#undef CHECK_MEMORY
+
+end:
+ Py_XDECREF(op);
+ Py_XDECREF(sp);
+ return ret;
+}
+
+
+static PyObject *
+array_repr_builtin(PyArrayObject *self, int repr)
+{
+ PyObject *ret;
+ char *string;
+ /* max_n initial value is arbitrary, dump_data will extend it */
+ Py_ssize_t n = 0, max_n = PyArray_NBYTES(self) * 4 + 7;
+
+ if ((string = PyArray_malloc(max_n)) == NULL) {
+ return PyErr_NoMemory();
+ }
+
+ if (dump_data(&string, &n, &max_n, PyArray_DATA(self),
+ PyArray_NDIM(self), PyArray_DIMS(self),
+ PyArray_STRIDES(self), self) < 0) {
+ PyArray_free(string);
+ return NULL;
+ }
+
+ if (repr) {
+ if (PyArray_ISEXTENDED(self)) {
+ ret = PyUString_FromFormat("array(%s, '%c%d')",
+ string,
+ PyArray_DESCR(self)->type,
+ PyArray_DESCR(self)->elsize);
+ }
+ else {
+ ret = PyUString_FromFormat("array(%s, '%c')",
+ string,
+ PyArray_DESCR(self)->type);
+ }
+ }
+ else {
+ ret = PyUString_FromStringAndSize(string, n);
+ }
+
+ PyArray_free(string);
+ return ret;
+}
+
+
+NPY_NO_EXPORT PyObject *
+array_repr(PyArrayObject *self)
+{
+ PyObject *s, *arglist;
+
+ if (PyArray_ReprFunction == NULL) {
+ s = array_repr_builtin(self, 1);
+ }
+ else {
+ arglist = Py_BuildValue("(O)", self);
+ s = PyEval_CallObject(PyArray_ReprFunction, arglist);
+ Py_DECREF(arglist);
+ }
+ return s;
+}
+
+
+NPY_NO_EXPORT PyObject *
+array_str(PyArrayObject *self)
+{
+ PyObject *s, *arglist;
+
+ if (PyArray_StrFunction == NULL) {
+ s = array_repr_builtin(self, 0);
+ }
+ else {
+ arglist = Py_BuildValue("(O)", self);
+ s = PyEval_CallObject(PyArray_StrFunction, arglist);
+ Py_DECREF(arglist);
+ }
+ return s;
+}
diff --git a/numpy/core/src/multiarray/strfuncs.h b/numpy/core/src/multiarray/strfuncs.h
new file mode 100644
index 000000000..8e80897c2
--- /dev/null
+++ b/numpy/core/src/multiarray/strfuncs.h
@@ -0,0 +1,13 @@
+#ifndef _NPY_ARRAY_STRFUNCS_H_
+#define _NPY_ARRAY_STRFUNCS_H_
+
+NPY_NO_EXPORT void
+PyArray_SetStringFunction(PyObject *op, int repr);
+
+NPY_NO_EXPORT PyObject *
+array_repr(PyArrayObject *self);
+
+NPY_NO_EXPORT PyObject *
+array_str(PyArrayObject *self);
+
+#endif
diff --git a/numpy/core/src/multiarray/temp_elide.c b/numpy/core/src/multiarray/temp_elide.c
index fae6763e4..3822f5d0d 100644
--- a/numpy/core/src/multiarray/temp_elide.c
+++ b/numpy/core/src/multiarray/temp_elide.c
@@ -283,8 +283,10 @@ can_elide_temp(PyArrayObject * alhs, PyObject * orhs, int * cannot)
* array of a basic type, own its data and size larger than threshold
*/
if (Py_REFCNT(alhs) != 1 || !PyArray_CheckExact(alhs) ||
- PyArray_DESCR(alhs)->type_num >= NPY_OBJECT ||
+ !PyArray_ISNUMBER(alhs) ||
!(PyArray_FLAGS(alhs) & NPY_ARRAY_OWNDATA) ||
+ !PyArray_ISWRITEABLE(alhs) ||
+ PyArray_CHKFLAGS(alhs, NPY_ARRAY_UPDATEIFCOPY) ||
PyArray_NBYTES(alhs) < NPY_MIN_ELIDE_BYTES) {
return 0;
}
@@ -359,8 +361,10 @@ can_elide_temp_unary(PyArrayObject * m1)
{
int cannot;
if (Py_REFCNT(m1) != 1 || !PyArray_CheckExact(m1) ||
- PyArray_DESCR(m1)->type_num == NPY_VOID ||
+ !PyArray_ISNUMBER(m1) ||
!(PyArray_FLAGS(m1) & NPY_ARRAY_OWNDATA) ||
+ !PyArray_ISWRITEABLE(m1) ||
+ PyArray_CHKFLAGS(m1, NPY_ARRAY_UPDATEIFCOPY) ||
PyArray_NBYTES(m1) < NPY_MIN_ELIDE_BYTES) {
return 0;
}
diff --git a/numpy/core/src/private/mem_overlap.c b/numpy/core/src/private/mem_overlap.c
index b2b80b4e6..2145791e1 100644
--- a/numpy/core/src/private/mem_overlap.c
+++ b/numpy/core/src/private/mem_overlap.c
@@ -181,9 +181,6 @@
All rights reserved.
Licensed under 3-clause BSD license, see LICENSE.txt.
*/
-#include <stdlib.h>
-#include <stdio.h>
-#include <assert.h>
#include <Python.h>
#define NPY_NO_DEPRECATED_API NPY_API_VERSION
@@ -191,6 +188,10 @@
#include "mem_overlap.h"
#include "npy_extint128.h"
+#include <stdlib.h>
+#include <stdio.h>
+#include <assert.h>
+
#define MAX(a, b) (((a) >= (b)) ? (a) : (b))
#define MIN(a, b) (((a) <= (b)) ? (a) : (b))
diff --git a/numpy/core/src/private/npy_config.h b/numpy/core/src/private/npy_config.h
index b8e18e961..107b3cb5b 100644
--- a/numpy/core/src/private/npy_config.h
+++ b/numpy/core/src/private/npy_config.h
@@ -62,6 +62,19 @@
#endif
+/* MSVC _hypot messes with fp precision mode on 32-bit, see gh-9567 */
+#if defined(_MSC_VER) && (_MSC_VER >= 1900) && !defined(_WIN64)
+
+#undef HAVE_CABS
+#undef HAVE_CABSF
+#undef HAVE_CABSL
+
+#undef HAVE_HYPOT
+#undef HAVE_HYPOTF
+#undef HAVE_HYPOTL
+
+#endif
+
/* Intel C for Windows uses POW for 64 bits longdouble*/
#if defined(_MSC_VER) && defined(__INTEL_COMPILER)
diff --git a/numpy/core/src/private/ufunc_override.c b/numpy/core/src/private/ufunc_override.c
index 401228236..e405155cf 100644
--- a/numpy/core/src/private/ufunc_override.c
+++ b/numpy/core/src/private/ufunc_override.c
@@ -56,9 +56,9 @@ get_non_default_array_ufunc(PyObject *obj)
/*
* Check whether a set of input and output args have a non-default
* `__array_ufunc__` method. Return the number of overrides, setting
- * corresponding objects in PyObject array with_override (if not NULL)
- * using borrowed references, and the corresponding __array_ufunc__ methods
- * in methods, using new references
+ * corresponding objects in PyObject array with_override and the corresponding
+ * __array_ufunc__ methods in methods (both only if not NULL, and both using
+ * new references).
*
* returns -1 on failure.
*/
@@ -134,6 +134,7 @@ PyUFunc_WithOverride(PyObject *args, PyObject *kwds,
goto fail;
}
if (with_override != NULL) {
+ Py_INCREF(obj);
with_override[num_override_args] = obj;
}
if (methods != NULL) {
diff --git a/numpy/core/src/umath/extobj.c b/numpy/core/src/umath/extobj.c
new file mode 100644
index 000000000..344981622
--- /dev/null
+++ b/numpy/core/src/umath/extobj.c
@@ -0,0 +1,318 @@
+#define _UMATHMODULE
+#define NPY_NO_DEPRECATED_API NPY_API_VERSION
+
+#include <Python.h>
+
+#include "npy_config.h"
+
+#define PY_ARRAY_UNIQUE_SYMBOL _npy_umathmodule_ARRAY_API
+#define NO_IMPORT_ARRAY
+
+#include "npy_pycompat.h"
+
+#include "extobj.h"
+#include "numpy/ufuncobject.h"
+
+#include "ufunc_object.h" /* for npy_um_str_pyvals_name */
+#include "common.h"
+
+#if USE_USE_DEFAULTS==1
+static int PyUFunc_NUM_NODEFAULTS = 0;
+
+/*
+ * This is a strategy to buy a little speed up and avoid the dictionary
+ * look-up in the default case. It should work in the presence of
+ * threads. If it is deemed too complicated or it doesn't actually work
+ * it could be taken out.
+ */
+NPY_NO_EXPORT int
+ufunc_update_use_defaults(void)
+{
+ PyObject *errobj = NULL;
+ int errmask, bufsize;
+ int res;
+
+ PyUFunc_NUM_NODEFAULTS += 1;
+ res = PyUFunc_GetPyValues("test", &bufsize, &errmask, &errobj);
+ PyUFunc_NUM_NODEFAULTS -= 1;
+ if (res < 0) {
+ Py_XDECREF(errobj);
+ return -1;
+ }
+ if ((errmask != UFUNC_ERR_DEFAULT) || (bufsize != NPY_BUFSIZE)
+ || (PyTuple_GET_ITEM(errobj, 1) != Py_None)) {
+ PyUFunc_NUM_NODEFAULTS += 1;
+ }
+ else if (PyUFunc_NUM_NODEFAULTS > 0) {
+ PyUFunc_NUM_NODEFAULTS -= 1;
+ }
+ Py_XDECREF(errobj);
+ return 0;
+}
+#endif
+
+/*
+ * fpstatus is the ufunc_formatted hardware status
+ * errmask is the handling mask specified by the user.
+ * errobj is a Python object with (string, callable object or None)
+ * or NULL
+ */
+
+/*
+ * 2. for each of the flags
+ * determine whether to ignore, warn, raise error, or call Python function.
+ * If ignore, do nothing
+ * If warn, print a warning and continue
+ * If raise return an error
+ * If call, call a user-defined function with string
+ */
+
+NPY_NO_EXPORT int
+_error_handler(int method, PyObject *errobj, char *errtype, int retstatus, int *first)
+{
+ PyObject *pyfunc, *ret, *args;
+ char *name = PyBytes_AS_STRING(PyTuple_GET_ITEM(errobj,0));
+ char msg[100];
+
+ NPY_ALLOW_C_API_DEF
+
+ /* don't need C API for a simple print */
+ if (method == UFUNC_ERR_PRINT) {
+ if (*first) {
+ fprintf(stderr, "Warning: %s encountered in %s\n", errtype, name);
+ *first = 0;
+ }
+ return 0;
+ }
+
+ NPY_ALLOW_C_API;
+ switch(method) {
+ case UFUNC_ERR_WARN:
+ PyOS_snprintf(msg, sizeof(msg), "%s encountered in %s", errtype, name);
+ if (PyErr_Warn(PyExc_RuntimeWarning, msg) < 0) {
+ goto fail;
+ }
+ break;
+ case UFUNC_ERR_RAISE:
+ PyErr_Format(PyExc_FloatingPointError, "%s encountered in %s",
+ errtype, name);
+ goto fail;
+ case UFUNC_ERR_CALL:
+ pyfunc = PyTuple_GET_ITEM(errobj, 1);
+ if (pyfunc == Py_None) {
+ PyErr_Format(PyExc_NameError,
+ "python callback specified for %s (in " \
+ " %s) but no function found.",
+ errtype, name);
+ goto fail;
+ }
+ args = Py_BuildValue("NN", PyUString_FromString(errtype),
+ PyInt_FromLong((long) retstatus));
+ if (args == NULL) {
+ goto fail;
+ }
+ ret = PyObject_CallObject(pyfunc, args);
+ Py_DECREF(args);
+ if (ret == NULL) {
+ goto fail;
+ }
+ Py_DECREF(ret);
+ break;
+ case UFUNC_ERR_LOG:
+ if (first) {
+ *first = 0;
+ pyfunc = PyTuple_GET_ITEM(errobj, 1);
+ if (pyfunc == Py_None) {
+ PyErr_Format(PyExc_NameError,
+ "log specified for %s (in %s) but no " \
+ "object with write method found.",
+ errtype, name);
+ goto fail;
+ }
+ PyOS_snprintf(msg, sizeof(msg),
+ "Warning: %s encountered in %s\n", errtype, name);
+ ret = PyObject_CallMethod(pyfunc, "write", "s", msg);
+ if (ret == NULL) {
+ goto fail;
+ }
+ Py_DECREF(ret);
+ }
+ break;
+ }
+ NPY_DISABLE_C_API;
+ return 0;
+
+fail:
+ NPY_DISABLE_C_API;
+ return -1;
+}
+
+
+
+NPY_NO_EXPORT PyObject *
+get_global_ext_obj(void)
+{
+ PyObject *thedict;
+ PyObject *ref = NULL;
+
+#if USE_USE_DEFAULTS==1
+ if (PyUFunc_NUM_NODEFAULTS != 0) {
+#endif
+ thedict = PyThreadState_GetDict();
+ if (thedict == NULL) {
+ thedict = PyEval_GetBuiltins();
+ }
+ ref = PyDict_GetItem(thedict, npy_um_str_pyvals_name);
+#if USE_USE_DEFAULTS==1
+ }
+#endif
+
+ return ref;
+}
+
+
+/*
+ * Extracts some values from the global pyvals tuple.
+ * all destinations may be NULL, in which case they are not retrieved
+ * ref - should hold the global tuple
+ * name - is the name of the ufunc (ufuncobj->name)
+ *
+ * bufsize - receives the buffer size to use
+ * errmask - receives the bitmask for error handling
+ * errobj - receives the python object to call with the error,
+ * if an error handling method is 'call'
+ */
+NPY_NO_EXPORT int
+_extract_pyvals(PyObject *ref, const char *name, int *bufsize,
+ int *errmask, PyObject **errobj)
+{
+ PyObject *retval;
+
+ /* default errobj case, skips dictionary lookup */
+ if (ref == NULL) {
+ if (errmask) {
+ *errmask = UFUNC_ERR_DEFAULT;
+ }
+ if (errobj) {
+ *errobj = Py_BuildValue("NO", PyBytes_FromString(name), Py_None);
+ }
+ if (bufsize) {
+ *bufsize = NPY_BUFSIZE;
+ }
+ return 0;
+ }
+
+ if (!PyList_Check(ref) || (PyList_GET_SIZE(ref)!=3)) {
+ PyErr_Format(PyExc_TypeError,
+ "%s must be a length 3 list.", UFUNC_PYVALS_NAME);
+ return -1;
+ }
+
+ if (bufsize != NULL) {
+ *bufsize = PyInt_AsLong(PyList_GET_ITEM(ref, 0));
+ if (error_converting(*bufsize)) {
+ return -1;
+ }
+ if ((*bufsize < NPY_MIN_BUFSIZE) ||
+ (*bufsize > NPY_MAX_BUFSIZE) ||
+ (*bufsize % 16 != 0)) {
+ PyErr_Format(PyExc_ValueError,
+ "buffer size (%d) is not in range "
+ "(%"NPY_INTP_FMT" - %"NPY_INTP_FMT") or not a multiple of 16",
+ *bufsize, (npy_intp) NPY_MIN_BUFSIZE,
+ (npy_intp) NPY_MAX_BUFSIZE);
+ return -1;
+ }
+ }
+
+ if (errmask != NULL) {
+ *errmask = PyInt_AsLong(PyList_GET_ITEM(ref, 1));
+ if (*errmask < 0) {
+ if (PyErr_Occurred()) {
+ return -1;
+ }
+ PyErr_Format(PyExc_ValueError,
+ "invalid error mask (%d)",
+ *errmask);
+ return -1;
+ }
+ }
+
+ if (errobj != NULL) {
+ *errobj = NULL;
+ retval = PyList_GET_ITEM(ref, 2);
+ if (retval != Py_None && !PyCallable_Check(retval)) {
+ PyObject *temp;
+ temp = PyObject_GetAttrString(retval, "write");
+ if (temp == NULL || !PyCallable_Check(temp)) {
+ PyErr_SetString(PyExc_TypeError,
+ "python object must be callable or have " \
+ "a callable write method");
+ Py_XDECREF(temp);
+ return -1;
+ }
+ Py_DECREF(temp);
+ }
+
+ *errobj = Py_BuildValue("NO", PyBytes_FromString(name), retval);
+ if (*errobj == NULL) {
+ return -1;
+ }
+ }
+ return 0;
+}
+
+/*
+ * check the floating point status
+ * - errmask: mask of status to check
+ * - extobj: ufunc pyvals object
+ * may be null, in which case the thread global one is fetched
+ * - ufunc_name: name of ufunc
+ */
+NPY_NO_EXPORT int
+_check_ufunc_fperr(int errmask, PyObject *extobj, const char *ufunc_name) {
+ int fperr;
+ PyObject *errobj = NULL;
+ int ret;
+ int first = 1;
+
+ if (!errmask) {
+ return 0;
+ }
+ fperr = PyUFunc_getfperr();
+ if (!fperr) {
+ return 0;
+ }
+
+ /* Get error object globals */
+ if (extobj == NULL) {
+ extobj = get_global_ext_obj();
+ }
+ if (_extract_pyvals(extobj, ufunc_name,
+ NULL, NULL, &errobj) < 0) {
+ Py_XDECREF(errobj);
+ return -1;
+ }
+
+ ret = PyUFunc_handlefperr(errmask, errobj, fperr, &first);
+ Py_XDECREF(errobj);
+
+ return ret;
+}
+
+
+NPY_NO_EXPORT int
+_get_bufsize_errmask(PyObject * extobj, const char *ufunc_name,
+ int *buffersize, int *errormask)
+{
+ /* Get the buffersize and errormask */
+ if (extobj == NULL) {
+ extobj = get_global_ext_obj();
+ }
+ if (_extract_pyvals(extobj, ufunc_name,
+ buffersize, errormask, NULL) < 0) {
+ return -1;
+ }
+
+ return 0;
+}
diff --git a/numpy/core/src/umath/extobj.h b/numpy/core/src/umath/extobj.h
new file mode 100644
index 000000000..1a569dfbd
--- /dev/null
+++ b/numpy/core/src/umath/extobj.h
@@ -0,0 +1,32 @@
+#ifndef _NPY_PRIVATE__EXTOBJ_H_
+#define _NPY_PRIVATE__EXTOBJ_H_
+
+#include <numpy/ndarraytypes.h> /* for NPY_NO_EXPORT */
+
+NPY_NO_EXPORT int
+_error_handler(int method, PyObject *errobj, char *errtype, int retstatus, int *first);
+
+NPY_NO_EXPORT PyObject *
+get_global_ext_obj(void);
+
+NPY_NO_EXPORT int
+_extract_pyvals(PyObject *ref, const char *name, int *bufsize,
+ int *errmask, PyObject **errobj);
+
+NPY_NO_EXPORT int
+_check_ufunc_fperr(int errmask, PyObject *extobj, const char *ufunc_name);
+
+NPY_NO_EXPORT int
+_get_bufsize_errmask(PyObject * extobj, const char *ufunc_name,
+ int *buffersize, int *errormask);
+
+/********************/
+#define USE_USE_DEFAULTS 1
+/********************/
+
+#if USE_USE_DEFAULTS==1
+NPY_NO_EXPORT int
+ufunc_update_use_defaults(void);
+#endif
+
+#endif
diff --git a/numpy/core/src/umath/loops.c.src b/numpy/core/src/umath/loops.c.src
index 40ebc119a..670c39ea2 100644
--- a/numpy/core/src/umath/loops.c.src
+++ b/numpy/core/src/umath/loops.c.src
@@ -980,16 +980,6 @@ NPY_NO_EXPORT void
/**end repeat1**/
NPY_NO_EXPORT void
-@TYPE@_true_divide(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
-{
- BINARY_LOOP {
- const double in1 = (double)(*(@type@ *)ip1);
- const double in2 = (double)(*(@type@ *)ip2);
- *((double *)op1) = in1/in2;
- }
-}
-
-NPY_NO_EXPORT void
@TYPE@_power(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
{
BINARY_LOOP {
@@ -1297,6 +1287,7 @@ NPY_NO_EXPORT void
NPY_NO_EXPORT void
@TYPE@_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
{
+ npy_bool give_future_warning = 0;
BINARY_LOOP {
const @type@ in1 = *(@type@ *)ip1;
const @type@ in2 = *(@type@ *)ip2;
@@ -1304,42 +1295,47 @@ NPY_NO_EXPORT void
*((npy_bool *)op1) = res;
if ((in1 == NPY_DATETIME_NAT || in2 == NPY_DATETIME_NAT) && res) {
- NPY_ALLOW_C_API_DEF
- NPY_ALLOW_C_API;
- /* 2016-01-18, 1.11 */
- if (DEPRECATE_FUTUREWARNING(
- "In the future, 'NAT @OP@ x' and 'x @OP@ NAT' "
- "will always be False.") < 0) {
- NPY_DISABLE_C_API;
- return;
- }
- NPY_DISABLE_C_API;
+ give_future_warning = 1;
}
}
+ if (give_future_warning) {
+ NPY_ALLOW_C_API_DEF
+ NPY_ALLOW_C_API;
+ /* 2016-01-18, 1.11 */
+ if (DEPRECATE_FUTUREWARNING(
+ "In the future, 'NAT @OP@ x' and 'x @OP@ NAT' "
+ "will always be False.") < 0) {
+ /* nothing to do, we return anyway */
+ }
+ NPY_DISABLE_C_API;
+ }
}
/**end repeat1**/
NPY_NO_EXPORT void
@TYPE@_not_equal(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
{
+ npy_bool give_future_warning = 0;
BINARY_LOOP {
const @type@ in1 = *(@type@ *)ip1;
const @type@ in2 = *(@type@ *)ip2;
*((npy_bool *)op1) = in1 != in2;
if (in1 == NPY_DATETIME_NAT && in2 == NPY_DATETIME_NAT) {
- NPY_ALLOW_C_API_DEF
- NPY_ALLOW_C_API;
- /* 2016-01-18, 1.11 */
- if (DEPRECATE_FUTUREWARNING(
- "In the future, NAT != NAT will be True "
- "rather than False.") < 0) {
- NPY_DISABLE_C_API;
- return;
- }
- NPY_DISABLE_C_API;
+ give_future_warning = 1;
}
}
+ if (give_future_warning) {
+ NPY_ALLOW_C_API_DEF
+ NPY_ALLOW_C_API;
+ /* 2016-01-18, 1.11 */
+ if (DEPRECATE_FUTUREWARNING(
+ "In the future, NAT != NAT will be True "
+ "rather than False.") < 0) {
+ /* nothing to do, we return anyway */
+ }
+ NPY_DISABLE_C_API;
+ }
}
@@ -1862,6 +1858,7 @@ NPY_NO_EXPORT void
*((@type@ *)op1) = (in1 @OP@ in2 || npy_isnan(in2)) ? in1 : in2;
}
}
+ npy_clear_floatstatus();
}
/**end repeat1**/
@@ -2200,6 +2197,7 @@ HALF_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED
const npy_half in2 = *(npy_half *)ip2;
*((npy_half *)op1) = (@OP@(in1, in2) || npy_half_isnan(in2)) ? in1 : in2;
}
+ npy_clear_floatstatus();
}
/**end repeat**/
@@ -2749,6 +2747,7 @@ NPY_NO_EXPORT void
((@ftype@ *)op1)[1] = in2i;
}
}
+ npy_clear_floatstatus();
}
/**end repeat1**/
diff --git a/numpy/core/src/umath/loops.h.src b/numpy/core/src/umath/loops.h.src
index 4243c6522..a978b03ee 100644
--- a/numpy/core/src/umath/loops.h.src
+++ b/numpy/core/src/umath/loops.h.src
@@ -120,9 +120,6 @@ NPY_NO_EXPORT void
/**end repeat2**/
NPY_NO_EXPORT void
-@S@@TYPE@_true_divide(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-
-NPY_NO_EXPORT void
@S@@TYPE@_power(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
NPY_NO_EXPORT void
diff --git a/numpy/core/src/umath/override.c b/numpy/core/src/umath/override.c
index 6b441cbbb..7e787b8fe 100644
--- a/numpy/core/src/umath/override.c
+++ b/numpy/core/src/umath/override.c
@@ -144,14 +144,16 @@ normalize_reduce_args(PyUFuncObject *ufunc, PyObject *args,
return -1;
}
obj = PyTuple_GET_ITEM(args, i);
- if (obj != Py_None) {
- if (i == 3) {
- obj = PyTuple_GetSlice(args, 3, 4);
- }
- PyDict_SetItemString(*normal_kwds, kwlist[i], obj);
- if (i == 3) {
- Py_DECREF(obj);
+ if (i == 3) {
+ /* remove out=None */
+ if (obj == Py_None) {
+ continue;
}
+ obj = PyTuple_GetSlice(args, 3, 4);
+ }
+ PyDict_SetItemString(*normal_kwds, kwlist[i], obj);
+ if (i == 3) {
+ Py_DECREF(obj);
}
}
return 0;
@@ -188,14 +190,16 @@ normalize_accumulate_args(PyUFuncObject *ufunc, PyObject *args,
return -1;
}
obj = PyTuple_GET_ITEM(args, i);
- if (obj != Py_None) {
- if (i == 3) {
- obj = PyTuple_GetSlice(args, 3, 4);
- }
- PyDict_SetItemString(*normal_kwds, kwlist[i], obj);
- if (i == 3) {
- Py_DECREF(obj);
+ if (i == 3) {
+ /* remove out=None */
+ if (obj == Py_None) {
+ continue;
}
+ obj = PyTuple_GetSlice(args, 3, 4);
+ }
+ PyDict_SetItemString(*normal_kwds, kwlist[i], obj);
+ if (i == 3) {
+ Py_DECREF(obj);
}
}
return 0;
@@ -234,14 +238,16 @@ normalize_reduceat_args(PyUFuncObject *ufunc, PyObject *args,
return -1;
}
obj = PyTuple_GET_ITEM(args, i);
- if (obj != Py_None) {
- if (i == 4) {
- obj = PyTuple_GetSlice(args, 4, 5);
- }
- PyDict_SetItemString(*normal_kwds, kwlist[i], obj);
- if (i == 4) {
- Py_DECREF(obj);
+ if (i == 4) {
+ /* remove out=None */
+ if (obj == Py_None) {
+ continue;
}
+ obj = PyTuple_GetSlice(args, 4, 5);
+ }
+ PyDict_SetItemString(*normal_kwds, kwlist[i], obj);
+ if (i == 4) {
+ Py_DECREF(obj);
}
}
return 0;
@@ -360,11 +366,11 @@ PyUFunc_CheckOverride(PyUFuncObject *ufunc, char *method,
if (out != NULL) {
int nout = ufunc->nout;
- if (PyTuple_Check(out)) {
+ if (PyTuple_CheckExact(out)) {
int all_none = 1;
if (PyTuple_GET_SIZE(out) != nout) {
- PyErr_Format(PyExc_TypeError,
+ PyErr_Format(PyExc_ValueError,
"The 'out' tuple must have exactly "
"%d entries: one per ufunc output", nout);
goto fail;
@@ -466,34 +472,15 @@ PyUFunc_CheckOverride(PyUFuncObject *ufunc, char *method,
status = -1;
}
if (status != 0) {
- Py_XDECREF(normal_args);
- goto fail;
- }
-
- len = PyTuple_GET_SIZE(normal_args);
- override_args = PyTuple_New(len + 3);
- if (override_args == NULL) {
goto fail;
}
- /* PyTuple_SET_ITEM steals reference */
- Py_INCREF(Py_None);
- PyTuple_SET_ITEM(override_args, 0, Py_None);
- Py_INCREF(ufunc);
- PyTuple_SET_ITEM(override_args, 1, (PyObject *)ufunc);
method_name = PyUString_FromString(method);
if (method_name == NULL) {
goto fail;
}
- Py_INCREF(method_name);
- PyTuple_SET_ITEM(override_args, 2, method_name);
- for (i = 0; i < len; i++) {
- PyObject *item = PyTuple_GET_ITEM(normal_args, i);
- Py_INCREF(item);
- PyTuple_SET_ITEM(override_args, i + 3, item);
- }
- Py_DECREF(normal_args);
+ len = PyTuple_GET_SIZE(normal_args);
/* Call __array_ufunc__ functions in correct order */
while (1) {
@@ -527,12 +514,33 @@ PyUFunc_CheckOverride(PyUFuncObject *ufunc, char *method,
/* override_obj had no subtypes to the right. */
if (override_obj) {
- /* We won't call this one again */
- with_override[i] = NULL;
override_array_ufunc = array_ufunc_methods[i];
+ /* We won't call this one again (references decref'd below) */
+ with_override[i] = NULL;
+ array_ufunc_methods[i] = NULL;
break;
}
}
+ /*
+ * Set override arguments for each call since the tuple must
+ * not be mutated after use in PyPy
+ * We increase all references since SET_ITEM steals
+ * them and they will be DECREF'd when the tuple is deleted.
+ */
+ override_args = PyTuple_New(len + 3);
+ if (override_args == NULL) {
+ goto fail;
+ }
+ Py_INCREF(ufunc);
+ PyTuple_SET_ITEM(override_args, 1, (PyObject *)ufunc);
+ Py_INCREF(method_name);
+ PyTuple_SET_ITEM(override_args, 2, method_name);
+ for (i = 0; i < len; i++) {
+ PyObject *item = PyTuple_GET_ITEM(normal_args, i);
+
+ Py_INCREF(item);
+ PyTuple_SET_ITEM(override_args, i + 3, item);
+ }
/* Check if there is a method left to call */
if (!override_obj) {
@@ -543,7 +551,11 @@ PyUFunc_CheckOverride(PyUFuncObject *ufunc, char *method,
npy_cache_import("numpy.core._internal",
"array_ufunc_errmsg_formatter",
&errmsg_formatter);
+
if (errmsg_formatter != NULL) {
+ /* All tuple items must be set before use */
+ Py_INCREF(Py_None);
+ PyTuple_SET_ITEM(override_args, 0, Py_None);
errmsg = PyObject_Call(errmsg_formatter, override_args,
normal_kwds);
if (errmsg != NULL) {
@@ -551,17 +563,20 @@ PyUFunc_CheckOverride(PyUFuncObject *ufunc, char *method,
Py_DECREF(errmsg);
}
}
+ Py_DECREF(override_args);
goto fail;
}
- /* Set the self argument, since we have an unbound method */
- Py_INCREF(override_obj);
- PyTuple_SetItem(override_args, 0, override_obj);
-
+ /*
+ * Set the self argument of our unbound method.
+ * This also steals the reference, so no need to DECREF after.
+ */
+ PyTuple_SET_ITEM(override_args, 0, override_obj);
/* Call the method */
*result = PyObject_Call(
override_array_ufunc, override_args, normal_kwds);
-
+ Py_DECREF(override_array_ufunc);
+ Py_DECREF(override_args);
if (*result == NULL) {
/* Exception occurred */
goto fail;
@@ -576,19 +591,18 @@ PyUFunc_CheckOverride(PyUFuncObject *ufunc, char *method,
break;
}
}
-
+ status = 0;
/* Override found, return it. */
- Py_XDECREF(method_name);
- Py_XDECREF(normal_kwds);
- Py_DECREF(override_args);
- return 0;
-
+ goto cleanup;
fail:
+ status = -1;
+cleanup:
for (i = 0; i < num_override_args; i++) {
+ Py_XDECREF(with_override[i]);
Py_XDECREF(array_ufunc_methods[i]);
}
+ Py_XDECREF(normal_args);
Py_XDECREF(method_name);
Py_XDECREF(normal_kwds);
- Py_XDECREF(override_args);
- return 1;
+ return status;
}
diff --git a/numpy/core/src/umath/reduction.c b/numpy/core/src/umath/reduction.c
index 47598bed9..390b28c31 100644
--- a/numpy/core/src/umath/reduction.c
+++ b/numpy/core/src/umath/reduction.c
@@ -21,8 +21,10 @@
#include "npy_config.h"
#include "npy_pycompat.h"
+#include "numpy/ufuncobject.h"
#include "lowlevel_strided_loops.h"
#include "reduction.h"
+#include "extobj.h" /* for _check_ufunc_fperr */
/*
* Allocates a result array for a reduction operation, with
@@ -437,6 +439,7 @@ PyArray_InitializeReduceResult(
* data : Data which is passed to assign_identity and the inner loop.
* buffersize : Buffer size for the iterator. For the default, pass in 0.
* funcname : The name of the reduction function, for error messages.
+ * errormask : forwarded from _get_bufsize_errmask
*
* TODO FIXME: if you squint, this is essentially an second independent
* implementation of generalized ufuncs with signature (i)->(), plus a few
@@ -458,7 +461,8 @@ PyUFunc_ReduceWrapper(PyArrayObject *operand, PyArrayObject *out,
int subok,
PyArray_AssignReduceIdentityFunc *assign_identity,
PyArray_ReduceLoopFunc *loop,
- void *data, npy_intp buffersize, const char *funcname)
+ void *data, npy_intp buffersize, const char *funcname,
+ int errormask)
{
PyArrayObject *result = NULL, *op_view = NULL;
npy_intp skip_first_count = 0;
@@ -555,6 +559,9 @@ PyUFunc_ReduceWrapper(PyArrayObject *operand, PyArrayObject *out,
goto fail;
}
+ /* Start with the floating-point exception flags cleared */
+ PyUFunc_clearfperr();
+
if (NpyIter_GetIterSize(iter) != 0) {
NpyIter_IterNextFunc *iternext;
char **dataptr;
@@ -586,6 +593,12 @@ PyUFunc_ReduceWrapper(PyArrayObject *operand, PyArrayObject *out,
goto fail;
}
}
+
+ /* Check whether any errors occurred during the loop */
+ if (PyErr_Occurred() ||
+ _check_ufunc_fperr(errormask, NULL, "reduce") < 0) {
+ goto fail;
+ }
NpyIter_Deallocate(iter);
Py_DECREF(op_view);
diff --git a/numpy/core/src/umath/reduction.h b/numpy/core/src/umath/reduction.h
index 43cd071e0..7a55c5df5 100644
--- a/numpy/core/src/umath/reduction.h
+++ b/numpy/core/src/umath/reduction.h
@@ -137,6 +137,7 @@ typedef int (PyArray_ReduceLoopFunc)(NpyIter *iter,
* data : Data which is passed to assign_identity and the inner loop.
* buffersize : Buffer size for the iterator. For the default, pass in 0.
* funcname : The name of the reduction function, for error messages.
+ * errormask : forwarded from _get_bufsize_errmask
*/
NPY_NO_EXPORT PyArrayObject *
PyUFunc_ReduceWrapper(PyArrayObject *operand, PyArrayObject *out,
@@ -149,6 +150,7 @@ PyUFunc_ReduceWrapper(PyArrayObject *operand, PyArrayObject *out,
int subok,
PyArray_AssignReduceIdentityFunc *assign_identity,
PyArray_ReduceLoopFunc *loop,
- void *data, npy_intp buffersize, const char *funcname);
+ void *data, npy_intp buffersize, const char *funcname,
+ int errormask);
#endif
diff --git a/numpy/core/src/umath/test_rational.c.src b/numpy/core/src/umath/test_rational.c.src
index 01ded5bbd..26c3d3799 100644
--- a/numpy/core/src/umath/test_rational.c.src
+++ b/numpy/core/src/umath/test_rational.c.src
@@ -9,6 +9,9 @@
#include <numpy/npy_3kcompat.h>
#include <math.h>
+#include "common.h" /* for error_converting */
+
+
/* Relevant arithmetic exceptions */
/* Uncomment the following line to work around a bug in numpy */
@@ -425,7 +428,7 @@ pyrational_new(PyTypeObject* type, PyObject* args, PyObject* kwds) {
PyObject* y;
int eq;
n[i] = PyInt_AsLong(x[i]);
- if (n[i]==-1 && PyErr_Occurred()) {
+ if (error_converting(n[i])) {
if (PyErr_ExceptionMatches(PyExc_TypeError)) {
PyErr_Format(PyExc_TypeError,
"expected integer %s, got %s",
@@ -473,7 +476,7 @@ pyrational_new(PyTypeObject* type, PyObject* args, PyObject* kwds) {
PyObject* y_; \
int eq_; \
long n_ = PyInt_AsLong(object); \
- if (n_==-1 && PyErr_Occurred()) { \
+ if (error_converting(n_)) { \
if (PyErr_ExceptionMatches(PyExc_TypeError)) { \
PyErr_Clear(); \
Py_INCREF(Py_NotImplemented); \
@@ -750,7 +753,7 @@ npyrational_setitem(PyObject* item, void* data, void* arr) {
long n = PyInt_AsLong(item);
PyObject* y;
int eq;
- if (n==-1 && PyErr_Occurred()) {
+ if (error_converting(n)) {
return -1;
}
y = PyInt_FromLong(n);
diff --git a/numpy/core/src/umath/ufunc_object.c b/numpy/core/src/umath/ufunc_object.c
index e1219039c..16693b366 100644
--- a/numpy/core/src/umath/ufunc_object.c
+++ b/numpy/core/src/umath/ufunc_object.c
@@ -46,6 +46,8 @@
#include "ufunc_object.h"
#include "override.h"
#include "npy_import.h"
+#include "extobj.h"
+#include "common.h"
/********** PRINTF DEBUG TRACING **************/
#define NPY_UF_DBG_TRACING 0
@@ -63,21 +65,12 @@
#endif
/**********************************************/
-
-/********************/
-#define USE_USE_DEFAULTS 1
-/********************/
-
/* ---------------------------------------------------------------- */
static int
_does_loop_use_arrays(void *data);
static int
-_extract_pyvals(PyObject *ref, const char *name, int *bufsize,
- int *errmask, PyObject **errobj);
-
-static int
assign_reduce_identity_zero(PyArrayObject *result, void *data);
static int
@@ -87,103 +80,6 @@ static int
assign_reduce_identity_one(PyArrayObject *result, void *data);
-/*
- * fpstatus is the ufunc_formatted hardware status
- * errmask is the handling mask specified by the user.
- * errobj is a Python object with (string, callable object or None)
- * or NULL
- */
-
-/*
- * 2. for each of the flags
- * determine whether to ignore, warn, raise error, or call Python function.
- * If ignore, do nothing
- * If warn, print a warning and continue
- * If raise return an error
- * If call, call a user-defined function with string
- */
-
-static int
-_error_handler(int method, PyObject *errobj, char *errtype, int retstatus, int *first)
-{
- PyObject *pyfunc, *ret, *args;
- char *name = PyBytes_AS_STRING(PyTuple_GET_ITEM(errobj,0));
- char msg[100];
-
- NPY_ALLOW_C_API_DEF
-
- /* don't need C API for a simple print */
- if (method == UFUNC_ERR_PRINT) {
- if (*first) {
- fprintf(stderr, "Warning: %s encountered in %s\n", errtype, name);
- *first = 0;
- }
- return 0;
- }
-
- NPY_ALLOW_C_API;
- switch(method) {
- case UFUNC_ERR_WARN:
- PyOS_snprintf(msg, sizeof(msg), "%s encountered in %s", errtype, name);
- if (PyErr_Warn(PyExc_RuntimeWarning, msg) < 0) {
- goto fail;
- }
- break;
- case UFUNC_ERR_RAISE:
- PyErr_Format(PyExc_FloatingPointError, "%s encountered in %s",
- errtype, name);
- goto fail;
- case UFUNC_ERR_CALL:
- pyfunc = PyTuple_GET_ITEM(errobj, 1);
- if (pyfunc == Py_None) {
- PyErr_Format(PyExc_NameError,
- "python callback specified for %s (in " \
- " %s) but no function found.",
- errtype, name);
- goto fail;
- }
- args = Py_BuildValue("NN", PyUString_FromString(errtype),
- PyInt_FromLong((long) retstatus));
- if (args == NULL) {
- goto fail;
- }
- ret = PyObject_CallObject(pyfunc, args);
- Py_DECREF(args);
- if (ret == NULL) {
- goto fail;
- }
- Py_DECREF(ret);
- break;
- case UFUNC_ERR_LOG:
- if (first) {
- *first = 0;
- pyfunc = PyTuple_GET_ITEM(errobj, 1);
- if (pyfunc == Py_None) {
- PyErr_Format(PyExc_NameError,
- "log specified for %s (in %s) but no " \
- "object with write method found.",
- errtype, name);
- goto fail;
- }
- PyOS_snprintf(msg, sizeof(msg),
- "Warning: %s encountered in %s\n", errtype, name);
- ret = PyObject_CallMethod(pyfunc, "write", "s", msg);
- if (ret == NULL) {
- goto fail;
- }
- Py_DECREF(ret);
- }
- break;
- }
- NPY_DISABLE_C_API;
- return 0;
-
-fail:
- NPY_DISABLE_C_API;
- return -1;
-}
-
-
/*UFUNC_API*/
NPY_NO_EXPORT int
PyUFunc_getfperr(void)
@@ -239,49 +135,6 @@ PyUFunc_clearfperr()
npy_clear_floatstatus();
}
-
-#if USE_USE_DEFAULTS==1
-static int PyUFunc_NUM_NODEFAULTS = 0;
-#endif
-
-static PyObject *
-get_global_ext_obj(void)
-{
- PyObject *thedict;
- PyObject *ref = NULL;
-
-#if USE_USE_DEFAULTS==1
- if (PyUFunc_NUM_NODEFAULTS != 0) {
-#endif
- thedict = PyThreadState_GetDict();
- if (thedict == NULL) {
- thedict = PyEval_GetBuiltins();
- }
- ref = PyDict_GetItem(thedict, npy_um_str_pyvals_name);
-#if USE_USE_DEFAULTS==1
- }
-#endif
-
- return ref;
-}
-
-
-static int
-_get_bufsize_errmask(PyObject * extobj, const char *ufunc_name,
- int *buffersize, int *errormask)
-{
- /* Get the buffersize and errormask */
- if (extobj == NULL) {
- extobj = get_global_ext_obj();
- }
- if (_extract_pyvals(extobj, ufunc_name,
- buffersize, errormask, NULL) < 0) {
- return -1;
- }
-
- return 0;
-}
-
/*
* This function analyzes the input arguments
* and determines an appropriate __array_prepare__ function to call
@@ -426,97 +279,6 @@ _find_array_prepare(PyObject *args, PyObject *kwds,
return;
}
-/*
- * Extracts some values from the global pyvals tuple.
- * all destinations may be NULL, in which case they are not retrieved
- * ref - should hold the global tuple
- * name - is the name of the ufunc (ufuncobj->name)
- *
- * bufsize - receives the buffer size to use
- * errmask - receives the bitmask for error handling
- * errobj - receives the python object to call with the error,
- * if an error handling method is 'call'
- */
-static int
-_extract_pyvals(PyObject *ref, const char *name, int *bufsize,
- int *errmask, PyObject **errobj)
-{
- PyObject *retval;
-
- /* default errobj case, skips dictionary lookup */
- if (ref == NULL) {
- if (errmask) {
- *errmask = UFUNC_ERR_DEFAULT;
- }
- if (errobj) {
- *errobj = Py_BuildValue("NO", PyBytes_FromString(name), Py_None);
- }
- if (bufsize) {
- *bufsize = NPY_BUFSIZE;
- }
- return 0;
- }
-
- if (!PyList_Check(ref) || (PyList_GET_SIZE(ref)!=3)) {
- PyErr_Format(PyExc_TypeError,
- "%s must be a length 3 list.", UFUNC_PYVALS_NAME);
- return -1;
- }
-
- if (bufsize != NULL) {
- *bufsize = PyInt_AsLong(PyList_GET_ITEM(ref, 0));
- if ((*bufsize == -1) && PyErr_Occurred()) {
- return -1;
- }
- if ((*bufsize < NPY_MIN_BUFSIZE) ||
- (*bufsize > NPY_MAX_BUFSIZE) ||
- (*bufsize % 16 != 0)) {
- PyErr_Format(PyExc_ValueError,
- "buffer size (%d) is not in range "
- "(%"NPY_INTP_FMT" - %"NPY_INTP_FMT") or not a multiple of 16",
- *bufsize, (npy_intp) NPY_MIN_BUFSIZE,
- (npy_intp) NPY_MAX_BUFSIZE);
- return -1;
- }
- }
-
- if (errmask != NULL) {
- *errmask = PyInt_AsLong(PyList_GET_ITEM(ref, 1));
- if (*errmask < 0) {
- if (PyErr_Occurred()) {
- return -1;
- }
- PyErr_Format(PyExc_ValueError,
- "invalid error mask (%d)",
- *errmask);
- return -1;
- }
- }
-
- if (errobj != NULL) {
- *errobj = NULL;
- retval = PyList_GET_ITEM(ref, 2);
- if (retval != Py_None && !PyCallable_Check(retval)) {
- PyObject *temp;
- temp = PyObject_GetAttrString(retval, "write");
- if (temp == NULL || !PyCallable_Check(temp)) {
- PyErr_SetString(PyExc_TypeError,
- "python object must be callable or have " \
- "a callable write method");
- Py_XDECREF(temp);
- return -1;
- }
- Py_DECREF(temp);
- }
-
- *errobj = Py_BuildValue("NO", PyBytes_FromString(name), retval);
- if (*errobj == NULL) {
- return -1;
- }
- }
- return 0;
-}
-
/*UFUNC_API
*
@@ -761,8 +523,8 @@ _set_out_array(PyObject *obj, PyArrayObject **store)
* Produce a name for the ufunc, if one is not already set
* This is used in the PyUFunc_handlefperr machinery, and in error messages
*/
-static const char*
-_get_ufunc_name(PyUFuncObject *ufunc) {
+NPY_NO_EXPORT const char*
+ufunc_get_name_cstr(PyUFuncObject *ufunc) {
return ufunc->name ? ufunc->name : "<unnamed ufunc>";
}
@@ -789,7 +551,7 @@ get_ufunc_arguments(PyUFuncObject *ufunc,
int nout = ufunc->nout;
PyObject *obj, *context;
PyObject *str_key_obj = NULL;
- const char *ufunc_name = _get_ufunc_name(ufunc);
+ const char *ufunc_name = ufunc_get_name_cstr(ufunc);
int type_num;
int any_flexible = 0, any_object = 0, any_flexible_userloops = 0;
@@ -1084,7 +846,7 @@ get_ufunc_arguments(PyUFuncObject *ufunc,
"positional and keyword argument");
goto fail;
}
- if (PyTuple_Check(value)) {
+ if (PyTuple_CheckExact(value)) {
if (PyTuple_GET_SIZE(value) != nout) {
PyErr_SetString(PyExc_ValueError,
"The 'out' tuple must have exactly "
@@ -1752,6 +1514,7 @@ execute_fancy_ufunc_loop(PyUFuncObject *ufunc,
npy_intp *strides;
npy_intp *countptr;
+ PyArrayObject **op_it;
npy_uint32 iter_flags;
if (wheremask != NULL) {
@@ -1783,12 +1546,13 @@ execute_fancy_ufunc_loop(PyUFuncObject *ufunc,
for (i = nin; i < nop; ++i) {
/*
* We don't write to all elements, and the iterator may make
- * UPDATEIFCOPY temporary copies. The output arrays must be considered
- * READWRITE by the iterator, so that the elements we don't write to are
- * copied to the possible temporary array.
+ * UPDATEIFCOPY temporary copies. The output arrays (unless they are
+ * allocated by the iterator itself) must be considered READWRITE by the
+ * iterator, so that the elements we don't write to are copied to the
+ * possible temporary array.
*/
op_flags[i] = default_op_out_flags |
- NPY_ITER_READWRITE |
+ (op[i] != NULL ? NPY_ITER_READWRITE : NPY_ITER_WRITEONLY) |
NPY_ITER_ALIGNED |
NPY_ITER_ALLOCATE |
NPY_ITER_NO_BROADCAST |
@@ -1828,11 +1592,24 @@ execute_fancy_ufunc_loop(PyUFuncObject *ufunc,
needs_api = NpyIter_IterationNeedsAPI(iter);
/* Call the __array_prepare__ functions where necessary */
+ op_it = NpyIter_GetOperandArray(iter);
for (i = nin; i < nop; ++i) {
- PyArrayObject *op_tmp;
+ PyArrayObject *op_tmp, *orig_op_tmp;
- /* prepare_ufunc_output may decref & replace pointer */
- op_tmp = op[i];
+ /*
+ * The array can be allocated by the iterator -- it is placed in op[i]
+ * and returned to the caller, and this needs an extra incref.
+ */
+ if (op[i] == NULL) {
+ op_tmp = op_it[i];
+ Py_INCREF(op_tmp);
+ }
+ else {
+ op_tmp = op[i];
+ }
+
+ /* prepare_ufunc_output may decref & replace the pointer */
+ orig_op_tmp = op_tmp;
Py_INCREF(op_tmp);
if (prepare_ufunc_output(ufunc, &op_tmp,
@@ -1842,7 +1619,7 @@ execute_fancy_ufunc_loop(PyUFuncObject *ufunc,
}
/* Validate that the prepare_ufunc_output didn't mess with pointers */
- if (PyArray_BYTES(op_tmp) != PyArray_BYTES(op[i])) {
+ if (PyArray_BYTES(op_tmp) != PyArray_BYTES(orig_op_tmp)) {
PyErr_SetString(PyExc_ValueError,
"The __array_prepare__ functions modified the data "
"pointer addresses in an invalid fashion");
@@ -1853,8 +1630,8 @@ execute_fancy_ufunc_loop(PyUFuncObject *ufunc,
/*
* Put the updated operand back and undo the DECREF above. If
- * COPY_IF_OVERLAP made a temporary copy, the output will be copied in
- * by UPDATEIFCOPY even if op[i] was changed.
+ * COPY_IF_OVERLAP made a temporary copy, the output will be copied
+ * by UPDATEIFCOPY even if op[i] was changed by prepare_ufunc_output.
*/
op[i] = op_tmp;
Py_DECREF(op_tmp);
@@ -1954,44 +1731,6 @@ make_arr_prep_args(npy_intp nin, PyObject *args, PyObject *kwds)
}
/*
- * check the floating point status
- * - errmask: mask of status to check
- * - extobj: ufunc pyvals object
- * may be null, in which case the thread global one is fetched
- * - ufunc_name: name of ufunc
- */
-static int
-_check_ufunc_fperr(int errmask, PyObject *extobj, const char *ufunc_name) {
- int fperr;
- PyObject *errobj = NULL;
- int ret;
- int first = 1;
-
- if (!errmask) {
- return 0;
- }
- fperr = PyUFunc_getfperr();
- if (!fperr) {
- return 0;
- }
-
- /* Get error object globals */
- if (extobj == NULL) {
- extobj = get_global_ext_obj();
- }
- if (_extract_pyvals(extobj, ufunc_name,
- NULL, NULL, &errobj) < 0) {
- Py_XDECREF(errobj);
- return -1;
- }
-
- ret = PyUFunc_handlefperr(errmask, errobj, fperr, &first);
- Py_XDECREF(errobj);
-
- return ret;
-}
-
-/*
* Validate the core dimensions of all the operands, and collect all of
* the labelled core dimensions into 'core_dim_sizes'.
*
@@ -2035,7 +1774,7 @@ _get_coredim_sizes(PyUFuncObject *ufunc, PyArrayObject **op,
"%s: %s operand %d does not have enough "
"dimensions (has %d, gufunc core with "
"signature %s requires %d)",
- _get_ufunc_name(ufunc), i < nin ? "Input" : "Output",
+ ufunc_get_name_cstr(ufunc), i < nin ? "Input" : "Output",
i < nin ? i : i - nin, PyArray_NDIM(op[i]),
ufunc->core_signature, num_dims);
return -1;
@@ -2059,7 +1798,7 @@ _get_coredim_sizes(PyUFuncObject *ufunc, PyArrayObject **op,
"core dimension %d, with gufunc "
"signature %s (size %zd is different "
"from %zd)",
- _get_ufunc_name(ufunc), i < nin ? "Input" : "Output",
+ ufunc_get_name_cstr(ufunc), i < nin ? "Input" : "Output",
i < nin ? i : i - nin, idim,
ufunc->core_signature, op_dim_size,
core_dim_sizes[core_dim_index]);
@@ -2102,13 +1841,12 @@ _get_coredim_sizes(PyUFuncObject *ufunc, PyArrayObject **op,
PyErr_Format(PyExc_ValueError,
"%s: Output operand %d has core dimension %d "
"unspecified, with gufunc signature %s",
- _get_ufunc_name(ufunc), out_op, i, ufunc->core_signature);
+ ufunc_get_name_cstr(ufunc), out_op, i, ufunc->core_signature);
return -1;
}
return 0;
}
-
static int
PyUFunc_GeneralizedFunction(PyUFuncObject *ufunc,
PyObject *args, PyObject *kwds,
@@ -2171,7 +1909,7 @@ PyUFunc_GeneralizedFunction(PyUFuncObject *ufunc,
nout = ufunc->nout;
nop = nin + nout;
- ufunc_name = _get_ufunc_name(ufunc);
+ ufunc_name = ufunc_get_name_cstr(ufunc);
NPY_UF_DBG_PRINT1("\nEvaluating ufunc %s\n", ufunc_name);
@@ -2633,7 +2371,7 @@ PyUFunc_GenericFunction(PyUFuncObject *ufunc,
nout = ufunc->nout;
nop = nin + nout;
- ufunc_name = _get_ufunc_name(ufunc);
+ ufunc_name = ufunc_get_name_cstr(ufunc);
NPY_UF_DBG_PRINT1("\nEvaluating ufunc %s\n", ufunc_name);
@@ -2873,7 +2611,7 @@ reduce_type_resolver(PyUFuncObject *ufunc, PyArrayObject *arr,
int i, retcode;
PyArrayObject *op[3] = {arr, arr, NULL};
PyArray_Descr *dtypes[3] = {NULL, NULL, NULL};
- const char *ufunc_name = _get_ufunc_name(ufunc);
+ const char *ufunc_name = ufunc_get_name_cstr(ufunc);
PyObject *type_tup = NULL;
*out_dtype = NULL;
@@ -3062,7 +2800,7 @@ PyUFunc_Reduce(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *out,
PyArray_Descr *dtype;
PyArrayObject *result;
PyArray_AssignReduceIdentityFunc *assign_identity = NULL;
- const char *ufunc_name = _get_ufunc_name(ufunc);
+ const char *ufunc_name = ufunc_get_name_cstr(ufunc);
/* These parameters come from a TLS global */
int buffersize = 0, errormask = 0;
@@ -3145,7 +2883,7 @@ PyUFunc_Reduce(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *out,
keepdims, 0,
assign_identity,
reduce_loop,
- ufunc, buffersize, ufunc_name);
+ ufunc, buffersize, ufunc_name, errormask);
Py_DECREF(dtype);
return result;
@@ -3170,7 +2908,7 @@ PyUFunc_Accumulate(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *out,
PyUFuncGenericFunction innerloop = NULL;
void *innerloopdata = NULL;
- const char *ufunc_name = _get_ufunc_name(ufunc);
+ const char *ufunc_name = ufunc_get_name_cstr(ufunc);
/* These parameters come from extobj= or from a TLS global */
int buffersize = 0, errormask = 0;
@@ -3537,7 +3275,7 @@ PyUFunc_Reduceat(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *ind,
PyUFuncGenericFunction innerloop = NULL;
void *innerloopdata = NULL;
- const char *ufunc_name = _get_ufunc_name(ufunc);
+ const char *ufunc_name = ufunc_get_name_cstr(ufunc);
char *opname = "reduceat";
/* These parameters come from extobj= or from a TLS global */
@@ -3899,7 +3637,7 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc, PyObject *args,
static char *reduce_kwlist[] = {
"array", "axis", "dtype", "out", "keepdims", NULL};
static char *accumulate_kwlist[] = {
- "array", "axis", "dtype", "out", "keepdims", NULL};
+ "array", "axis", "dtype", "out", NULL};
static char *reduceat_kwlist[] = {
"array", "indices", "axis", "dtype", "out", NULL};
@@ -3927,7 +3665,20 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc, PyObject *args,
_reduce_type[operation]);
return NULL;
}
-
+ /* if there is a tuple of 1 for `out` in kwds, unpack it */
+ if (kwds != NULL) {
+ PyObject *out_obj = PyDict_GetItem(kwds, npy_um_str_out);
+ if (out_obj != NULL && PyTuple_CheckExact(out_obj)) {
+ if (PyTuple_GET_SIZE(out_obj) != 1) {
+ PyErr_SetString(PyExc_ValueError,
+ "The 'out' tuple must have exactly one entry");
+ return NULL;
+ }
+ out_obj = PyTuple_GET_ITEM(out_obj, 0);
+ PyDict_SetItem(kwds, npy_um_str_out, out_obj);
+ }
+ }
+
if (operation == UFUNC_REDUCEAT) {
PyArray_Descr *indtype;
indtype = PyArray_DescrFromType(NPY_INTP);
@@ -3948,26 +3699,15 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc, PyObject *args,
}
}
else if (operation == UFUNC_ACCUMULATE) {
- PyObject *bad_keepdimarg = NULL;
- if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|OO&O&O:accumulate",
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|OO&O&:accumulate",
accumulate_kwlist,
&op,
&axes_in,
PyArray_DescrConverter2, &otype,
- PyArray_OutputConverter, &out,
- &bad_keepdimarg)) {
+ PyArray_OutputConverter, &out)) {
Py_XDECREF(otype);
return NULL;
}
- /* Until removed outright by https://github.com/numpy/numpy/pull/8187 */
- if (bad_keepdimarg != NULL) {
- if (DEPRECATE_FUTUREWARNING(
- "keepdims argument has no effect on accumulate, and will be "
- "removed in future") < 0) {
- Py_XDECREF(otype);
- return NULL;
- }
- }
}
else {
if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|OO&O&i:reduce",
@@ -4031,7 +3771,7 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc, PyObject *args,
for (i = 0; i < naxes; ++i) {
PyObject *tmp = PyTuple_GET_ITEM(axes_in, i);
int axis = PyArray_PyIntAsInt(tmp);
- if (axis == -1 && PyErr_Occurred()) {
+ if (error_converting(axis)) {
Py_XDECREF(otype);
Py_DECREF(mp);
return NULL;
@@ -4048,7 +3788,7 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc, PyObject *args,
else {
int axis = PyArray_PyIntAsInt(axes_in);
/* TODO: PyNumber_Index would be good to use here */
- if (axis == -1 && PyErr_Occurred()) {
+ if (error_converting(axis)) {
Py_XDECREF(otype);
Py_DECREF(mp);
return NULL;
@@ -4507,39 +4247,6 @@ ufunc_geterr(PyObject *NPY_UNUSED(dummy), PyObject *args)
return res;
}
-#if USE_USE_DEFAULTS==1
-/*
- * This is a strategy to buy a little speed up and avoid the dictionary
- * look-up in the default case. It should work in the presence of
- * threads. If it is deemed too complicated or it doesn't actually work
- * it could be taken out.
- */
-static int
-ufunc_update_use_defaults(void)
-{
- PyObject *errobj = NULL;
- int errmask, bufsize;
- int res;
-
- PyUFunc_NUM_NODEFAULTS += 1;
- res = PyUFunc_GetPyValues("test", &bufsize, &errmask, &errobj);
- PyUFunc_NUM_NODEFAULTS -= 1;
- if (res < 0) {
- Py_XDECREF(errobj);
- return -1;
- }
- if ((errmask != UFUNC_ERR_DEFAULT) || (bufsize != NPY_BUFSIZE)
- || (PyTuple_GET_ITEM(errobj, 1) != Py_None)) {
- PyUFunc_NUM_NODEFAULTS += 1;
- }
- else if (PyUFunc_NUM_NODEFAULTS > 0) {
- PyUFunc_NUM_NODEFAULTS -= 1;
- }
- Py_XDECREF(errobj);
- return 0;
-}
-#endif
-
NPY_NO_EXPORT PyObject *
ufunc_seterr(PyObject *NPY_UNUSED(dummy), PyObject *args)
{
@@ -5596,8 +5303,10 @@ ufunc_get_doc(PyUFuncObject *ufunc)
if (doc == NULL) {
return NULL;
}
- PyUString_ConcatAndDel(&doc,
- PyUString_FromFormat("\n\n%s", ufunc->doc));
+ if (ufunc->doc != NULL) {
+ PyUString_ConcatAndDel(&doc,
+ PyUString_FromFormat("\n\n%s", ufunc->doc));
+ }
return doc;
}
diff --git a/numpy/core/src/umath/ufunc_object.h b/numpy/core/src/umath/ufunc_object.h
index 5613f38b4..d6fd3837a 100644
--- a/numpy/core/src/umath/ufunc_object.h
+++ b/numpy/core/src/umath/ufunc_object.h
@@ -7,6 +7,9 @@ ufunc_geterr(PyObject *NPY_UNUSED(dummy), PyObject *args);
NPY_NO_EXPORT PyObject *
ufunc_seterr(PyObject *NPY_UNUSED(dummy), PyObject *args);
+NPY_NO_EXPORT const char*
+ufunc_get_name_cstr(PyUFuncObject *ufunc);
+
/* interned strings (on umath import) */
NPY_VISIBILITY_HIDDEN extern PyObject * npy_um_str_out;
NPY_VISIBILITY_HIDDEN extern PyObject * npy_um_str_subok;
diff --git a/numpy/core/src/umath/ufunc_type_resolution.c b/numpy/core/src/umath/ufunc_type_resolution.c
index 0fd3c45c5..e77b48fc4 100644
--- a/numpy/core/src/umath/ufunc_type_resolution.c
+++ b/numpy/core/src/umath/ufunc_type_resolution.c
@@ -21,6 +21,7 @@
#include "numpy/ufuncobject.h"
#include "ufunc_type_resolution.h"
+#include "ufunc_object.h"
#include "common.h"
static const char *
@@ -56,9 +57,7 @@ PyUFunc_ValidateCasting(PyUFuncObject *ufunc,
PyArray_Descr **dtypes)
{
int i, nin = ufunc->nin, nop = nin + ufunc->nout;
- const char *ufunc_name;
-
- ufunc_name = ufunc->name ? ufunc->name : "<unnamed ufunc>";
+ const char *ufunc_name = ufunc_get_name_cstr(ufunc);
for (i = 0; i < nop; ++i) {
if (i < nin) {
@@ -184,9 +183,7 @@ PyUFunc_SimpleBinaryComparisonTypeResolver(PyUFuncObject *ufunc,
PyArray_Descr **out_dtypes)
{
int i, type_num1, type_num2;
- const char *ufunc_name;
-
- ufunc_name = ufunc->name ? ufunc->name : "<unnamed ufunc>";
+ const char *ufunc_name = ufunc_get_name_cstr(ufunc);
if (ufunc->nin != 2 || ufunc->nout != 1) {
PyErr_Format(PyExc_RuntimeError, "ufunc %s is configured "
@@ -290,9 +287,7 @@ PyUFunc_SimpleUnaryOperationTypeResolver(PyUFuncObject *ufunc,
PyArray_Descr **out_dtypes)
{
int i, type_num1;
- const char *ufunc_name;
-
- ufunc_name = ufunc->name ? ufunc->name : "<unnamed ufunc>";
+ const char *ufunc_name = ufunc_get_name_cstr(ufunc);
if (ufunc->nin != 1 || ufunc->nout != 1) {
PyErr_Format(PyExc_RuntimeError, "ufunc %s is configured "
@@ -430,9 +425,7 @@ PyUFunc_SimpleBinaryOperationTypeResolver(PyUFuncObject *ufunc,
PyArray_Descr **out_dtypes)
{
int i, type_num1, type_num2;
- const char *ufunc_name;
-
- ufunc_name = ufunc->name ? ufunc->name : "<unnamed ufunc>";
+ const char *ufunc_name = ufunc_get_name_cstr(ufunc);
if (ufunc->nin != 2 || ufunc->nout != 1) {
PyErr_Format(PyExc_RuntimeError, "ufunc %s is configured "
@@ -614,9 +607,7 @@ PyUFunc_AdditionTypeResolver(PyUFuncObject *ufunc,
{
int type_num1, type_num2;
int i;
- const char *ufunc_name;
-
- ufunc_name = ufunc->name ? ufunc->name : "<unnamed ufunc>";
+ const char *ufunc_name = ufunc_get_name_cstr(ufunc);
type_num1 = PyArray_DESCR(operands[0])->type_num;
type_num2 = PyArray_DESCR(operands[1])->type_num;
@@ -804,9 +795,7 @@ PyUFunc_SubtractionTypeResolver(PyUFuncObject *ufunc,
{
int type_num1, type_num2;
int i;
- const char *ufunc_name;
-
- ufunc_name = ufunc->name ? ufunc->name : "<unnamed ufunc>";
+ const char *ufunc_name = ufunc_get_name_cstr(ufunc);
type_num1 = PyArray_DESCR(operands[0])->type_num;
type_num2 = PyArray_DESCR(operands[1])->type_num;
@@ -986,9 +975,7 @@ PyUFunc_MultiplicationTypeResolver(PyUFuncObject *ufunc,
{
int type_num1, type_num2;
int i;
- const char *ufunc_name;
-
- ufunc_name = ufunc->name ? ufunc->name : "<unnamed ufunc>";
+ const char *ufunc_name = ufunc_get_name_cstr(ufunc);
type_num1 = PyArray_DESCR(operands[0])->type_num;
type_num2 = PyArray_DESCR(operands[1])->type_num;
@@ -1130,9 +1117,7 @@ PyUFunc_DivisionTypeResolver(PyUFuncObject *ufunc,
{
int type_num1, type_num2;
int i;
- const char *ufunc_name;
-
- ufunc_name = ufunc->name ? ufunc->name : "<unnamed ufunc>";
+ const char *ufunc_name = ufunc_get_name_cstr(ufunc);
type_num1 = PyArray_DESCR(operands[0])->type_num;
type_num2 = PyArray_DESCR(operands[1])->type_num;
@@ -1234,19 +1219,63 @@ type_reso_error: {
}
}
+
+/*
+ * True division should return float64 results when both inputs are integer
+ * types. The PyUFunc_DefaultTypeResolver promotes 8 bit integers to float16
+ * and 16 bit integers to float32, so that is overridden here by specifying a
+ * 'dd->d' signature. Returns -1 on failure.
+*/
+NPY_NO_EXPORT int
+PyUFunc_TrueDivisionTypeResolver(PyUFuncObject *ufunc,
+ NPY_CASTING casting,
+ PyArrayObject **operands,
+ PyObject *type_tup,
+ PyArray_Descr **out_dtypes)
+{
+ int type_num1, type_num2;
+ static PyObject *default_type_tup = NULL;
+
+ /* Set default type for integer inputs to NPY_DOUBLE */
+ if (default_type_tup == NULL) {
+ PyArray_Descr *tmp = PyArray_DescrFromType(NPY_DOUBLE);
+
+ if (tmp == NULL) {
+ return -1;
+ }
+ default_type_tup = PyTuple_Pack(3, tmp, tmp, tmp);
+ if (default_type_tup == NULL) {
+ Py_DECREF(tmp);
+ return -1;
+ }
+ Py_DECREF(tmp);
+ }
+
+ type_num1 = PyArray_DESCR(operands[0])->type_num;
+ type_num2 = PyArray_DESCR(operands[1])->type_num;
+
+ if (type_tup == NULL &&
+ (PyTypeNum_ISINTEGER(type_num1) || PyTypeNum_ISBOOL(type_num1)) &&
+ (PyTypeNum_ISINTEGER(type_num2) || PyTypeNum_ISBOOL(type_num2))) {
+ return PyUFunc_DefaultTypeResolver(ufunc, casting, operands,
+ default_type_tup, out_dtypes);
+ }
+ return PyUFunc_DivisionTypeResolver(ufunc, casting, operands,
+ type_tup, out_dtypes);
+}
/*
- * Function to check and report floor division warning when python2.x is
- * invoked with -3 switch
+ * Function to check and report floor division warning when python2.x is
+ * invoked with -3 switch
* See PEP238 and #7949 for numpy
- * This function will not be hit for py3 or when __future__ imports division.
+ * This function will not be hit for py3 or when __future__ imports division.
* See generate_umath.py for reason
*/
NPY_NO_EXPORT int
PyUFunc_MixedDivisionTypeResolver(PyUFuncObject *ufunc,
- NPY_CASTING casting,
- PyArrayObject **operands,
- PyObject *type_tup,
- PyArray_Descr **out_dtypes)
+ NPY_CASTING casting,
+ PyArrayObject **operands,
+ PyObject *type_tup,
+ PyArray_Descr **out_dtypes)
{
/* Depreciation checks needed only on python 2 */
#if !defined(NPY_PY3K)
@@ -1255,17 +1284,15 @@ PyUFunc_MixedDivisionTypeResolver(PyUFuncObject *ufunc,
type_num1 = PyArray_DESCR(operands[0])->type_num;
type_num2 = PyArray_DESCR(operands[1])->type_num;
- /* If both types are integer, warn the user, same as python does */
+ /* If both types are integer, warn the user, same as python does */
if (Py_DivisionWarningFlag &&
- (PyTypeNum_ISINTEGER(type_num1) || PyTypeNum_ISBOOL(type_num1)) &&
- (PyTypeNum_ISINTEGER(type_num2) || PyTypeNum_ISBOOL(type_num2)))
- {
+ (PyTypeNum_ISINTEGER(type_num1) || PyTypeNum_ISBOOL(type_num1)) &&
+ (PyTypeNum_ISINTEGER(type_num2) || PyTypeNum_ISBOOL(type_num2))) {
PyErr_Warn(PyExc_DeprecationWarning, "numpy: classic int division");
- }
-#endif
-
- return PyUFunc_DivisionTypeResolver(ufunc, casting, operands,
- type_tup, out_dtypes);
+ }
+#endif
+ return PyUFunc_DivisionTypeResolver(ufunc, casting, operands,
+ type_tup, out_dtypes);
}
@@ -1305,8 +1332,9 @@ find_userloop(PyUFuncObject *ufunc,
if (obj == NULL) {
continue;
}
- funcdata = (PyUFunc_Loop1d *)NpyCapsule_AsVoidPtr(obj);
- while (funcdata != NULL) {
+ for (funcdata = (PyUFunc_Loop1d *)NpyCapsule_AsVoidPtr(obj);
+ funcdata != NULL;
+ funcdata = funcdata->next) {
int *types = funcdata->arg_types;
for (j = 0; j < nargs; ++j) {
@@ -1320,8 +1348,6 @@ find_userloop(PyUFuncObject *ufunc,
*out_innerloopdata = funcdata->data;
return 1;
}
-
- funcdata = funcdata->next;
}
}
}
@@ -1343,7 +1369,7 @@ PyUFunc_DefaultLegacyInnerLoopSelector(PyUFuncObject *ufunc,
PyObject *errmsg;
int i, j;
- ufunc_name = ufunc->name ? ufunc->name : "(unknown)";
+ ufunc_name = ufunc_get_name_cstr(ufunc);
/*
* If there are user-loops search them first.
@@ -1727,8 +1753,9 @@ linear_search_userloop_type_resolver(PyUFuncObject *self,
if (obj == NULL) {
continue;
}
- funcdata = (PyUFunc_Loop1d *)NpyCapsule_AsVoidPtr(obj);
- while (funcdata != NULL) {
+ for (funcdata = (PyUFunc_Loop1d *)NpyCapsule_AsVoidPtr(obj);
+ funcdata != NULL;
+ funcdata = funcdata->next) {
int *types = funcdata->arg_types;
switch (ufunc_loop_matches(self, op,
input_casting, output_casting,
@@ -1744,8 +1771,6 @@ linear_search_userloop_type_resolver(PyUFuncObject *self,
set_ufunc_loop_data_types(self, op, out_dtype, types, funcdata->arg_dtypes);
return 1;
}
-
- funcdata = funcdata->next;
}
}
}
@@ -1792,8 +1817,10 @@ type_tuple_userloop_type_resolver(PyUFuncObject *self,
if (obj == NULL) {
continue;
}
- funcdata = (PyUFunc_Loop1d *)NpyCapsule_AsVoidPtr(obj);
- while (funcdata != NULL) {
+
+ for (funcdata = (PyUFunc_Loop1d *)NpyCapsule_AsVoidPtr(obj);
+ funcdata != NULL;
+ funcdata = funcdata->next) {
int *types = funcdata->arg_types;
int matched = 1;
@@ -1832,14 +1859,12 @@ type_tuple_userloop_type_resolver(PyUFuncObject *self,
"matching the type-tuple, "
"but the inputs and/or outputs could not be "
"cast according to the casting rule",
- self->name ? self->name : "(unknown)");
+ ufunc_get_name_cstr(self));
return -1;
/* Error */
case -1:
return -1;
}
-
- funcdata = funcdata->next;
}
}
}
@@ -1938,7 +1963,7 @@ linear_search_type_resolver(PyUFuncObject *self,
/* For making a better error message on coercion error */
char err_dst_typecode = '-', err_src_typecode = '-';
- ufunc_name = self->name ? self->name : "(unknown)";
+ ufunc_name = ufunc_get_name_cstr(self);
use_min_scalar = should_use_min_scalar(op, nin);
@@ -2047,7 +2072,7 @@ type_tuple_type_resolver(PyUFuncObject *self,
/* For making a better error message on coercion error */
char err_dst_typecode = '-', err_src_typecode = '-';
- ufunc_name = self->name ? self->name : "(unknown)";
+ ufunc_name = ufunc_get_name_cstr(self);
use_min_scalar = should_use_min_scalar(op, nin);
@@ -2059,7 +2084,7 @@ type_tuple_type_resolver(PyUFuncObject *self,
PyErr_Format(PyExc_ValueError,
"a type-tuple must be specified "
"of length 1 or %d for ufunc '%s'", (int)nop,
- self->name ? self->name : "(unknown)");
+ ufunc_get_name_cstr(self));
return -1;
}
@@ -2112,7 +2137,7 @@ type_tuple_type_resolver(PyUFuncObject *self,
"requires 1 typecode, or "
"%d typecode(s) before " \
"and %d after the -> sign",
- self->name ? self->name : "(unknown)",
+ ufunc_get_name_cstr(self),
self->nin, self->nout);
Py_XDECREF(str_obj);
return -1;
diff --git a/numpy/core/src/umath/ufunc_type_resolution.h b/numpy/core/src/umath/ufunc_type_resolution.h
index eaf5e91ce..fa9f1dbfa 100644
--- a/numpy/core/src/umath/ufunc_type_resolution.h
+++ b/numpy/core/src/umath/ufunc_type_resolution.h
@@ -42,7 +42,7 @@ PyUFunc_AbsoluteTypeResolver(PyUFuncObject *ufunc,
PyArrayObject **operands,
PyObject *type_tup,
PyArray_Descr **out_dtypes);
-
+
NPY_NO_EXPORT int
PyUFunc_IsNaTTypeResolver(PyUFuncObject *ufunc,
NPY_CASTING casting,
@@ -79,6 +79,13 @@ PyUFunc_MixedDivisionTypeResolver(PyUFuncObject *ufunc,
PyArray_Descr **out_dtypes);
NPY_NO_EXPORT int
+PyUFunc_TrueDivisionTypeResolver(PyUFuncObject *ufunc,
+ NPY_CASTING casting,
+ PyArrayObject **operands,
+ PyObject *type_tup,
+ PyArray_Descr **out_dtypes);
+
+NPY_NO_EXPORT int
PyUFunc_DivisionTypeResolver(PyUFuncObject *ufunc,
NPY_CASTING casting,
PyArrayObject **operands,
diff --git a/numpy/core/src/umath/umath_tests.c.src b/numpy/core/src/umath/umath_tests.c.src
index 6cd181897..8d9009a1a 100644
--- a/numpy/core/src/umath/umath_tests.c.src
+++ b/numpy/core/src/umath/umath_tests.c.src
@@ -305,6 +305,12 @@ addUfuncs(PyObject *dictionary) {
0, euclidean_pdist_signature);
PyDict_SetItemString(dictionary, "euclidean_pdist", f);
Py_DECREF(f);
+ f = PyUFunc_FromFuncAndDataAndSignature(inner1d_functions, inner1d_data,
+ inner1d_signatures, 2, 2, 1, PyUFunc_None, "inner1d_no_doc",
+ NULL,
+ 0, inner1d_signature);
+ PyDict_SetItemString(dictionary, "inner1d_no_doc", f);
+ Py_DECREF(f);
}
diff --git a/numpy/core/tests/__init__.py b/numpy/core/tests/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/numpy/core/tests/__init__.py
diff --git a/numpy/core/tests/test_abc.py b/numpy/core/tests/test_abc.py
index 2430866fd..77cf40620 100644
--- a/numpy/core/tests/test_abc.py
+++ b/numpy/core/tests/test_abc.py
@@ -1,43 +1,56 @@
from __future__ import division, absolute_import, print_function
-from numpy.testing import TestCase, assert_, run_module_suite
+from numpy.testing import assert_, run_module_suite
import numbers
+
+import numpy as np
from numpy.core.numerictypes import sctypes
-class ABC(TestCase):
+class TestABC(object):
+ def test_abstract(self):
+ assert_(issubclass(np.number, numbers.Number))
+
+ assert_(issubclass(np.inexact, numbers.Complex))
+ assert_(issubclass(np.complexfloating, numbers.Complex))
+ assert_(issubclass(np.floating, numbers.Real))
+
+ assert_(issubclass(np.integer, numbers.Integral))
+ assert_(issubclass(np.signedinteger, numbers.Integral))
+ assert_(issubclass(np.unsignedinteger, numbers.Integral))
+
def test_floats(self):
for t in sctypes['float']:
- assert_(isinstance(t(), numbers.Real),
+ assert_(isinstance(t(), numbers.Real),
"{0} is not instance of Real".format(t.__name__))
assert_(issubclass(t, numbers.Real),
"{0} is not subclass of Real".format(t.__name__))
- assert_(not isinstance(t(), numbers.Rational),
+ assert_(not isinstance(t(), numbers.Rational),
"{0} is instance of Rational".format(t.__name__))
assert_(not issubclass(t, numbers.Rational),
"{0} is subclass of Rational".format(t.__name__))
def test_complex(self):
for t in sctypes['complex']:
- assert_(isinstance(t(), numbers.Complex),
+ assert_(isinstance(t(), numbers.Complex),
"{0} is not instance of Complex".format(t.__name__))
assert_(issubclass(t, numbers.Complex),
"{0} is not subclass of Complex".format(t.__name__))
- assert_(not isinstance(t(), numbers.Real),
+ assert_(not isinstance(t(), numbers.Real),
"{0} is instance of Real".format(t.__name__))
assert_(not issubclass(t, numbers.Real),
"{0} is subclass of Real".format(t.__name__))
def test_int(self):
for t in sctypes['int']:
- assert_(isinstance(t(), numbers.Integral),
+ assert_(isinstance(t(), numbers.Integral),
"{0} is not instance of Integral".format(t.__name__))
assert_(issubclass(t, numbers.Integral),
"{0} is not subclass of Integral".format(t.__name__))
def test_uint(self):
for t in sctypes['uint']:
- assert_(isinstance(t(), numbers.Integral),
+ assert_(isinstance(t(), numbers.Integral),
"{0} is not instance of Integral".format(t.__name__))
assert_(issubclass(t, numbers.Integral),
"{0} is not subclass of Integral".format(t.__name__))
diff --git a/numpy/core/tests/test_arrayprint.py b/numpy/core/tests/test_arrayprint.py
index e7ac0cdfd..7d4acd35d 100644
--- a/numpy/core/tests/test_arrayprint.py
+++ b/numpy/core/tests/test_arrayprint.py
@@ -5,7 +5,7 @@ import sys
import numpy as np
from numpy.testing import (
- TestCase, run_module_suite, assert_, assert_equal
+ run_module_suite, assert_, assert_equal
)
class TestArrayRepr(object):
@@ -61,7 +61,7 @@ class TestArrayRepr(object):
'array([list([1, 2]), list([3])], dtype=object)')
-class TestComplexArray(TestCase):
+class TestComplexArray(object):
def test_str(self):
rvals = [0, 1, -1, np.inf, -np.inf, np.nan]
cvals = [complex(rp, ip) for rp in rvals for ip in rvals]
@@ -108,19 +108,13 @@ class TestComplexArray(TestCase):
for res, val in zip(actual, wanted):
assert_(res == val)
-class TestArray2String(TestCase):
+class TestArray2String(object):
def test_basic(self):
"""Basic test of array2string."""
a = np.arange(3)
assert_(np.array2string(a) == '[0 1 2]')
assert_(np.array2string(a, max_line_width=4) == '[0 1\n 2]')
- def test_style_keyword(self):
- """This should only apply to 0-D arrays. See #1218."""
- stylestr = np.array2string(np.array(1.5),
- style=lambda x: "Value in 0-D array: " + str(x))
- assert_(stylestr == 'Value in 0-D array: 1.5')
-
def test_format_function(self):
"""Test custom format function for each element in array."""
def _format_function(x):
@@ -189,13 +183,13 @@ class TestArray2String(TestCase):
assert_equal(np.array2string(array_scalar), "( 1., 2.12345679, 3.)")
-class TestPrintOptions:
+class TestPrintOptions(object):
"""Test getting and setting global print options."""
- def setUp(self):
+ def setup(self):
self.oldopts = np.get_printoptions()
- def tearDown(self):
+ def teardown(self):
np.set_printoptions(**self.oldopts)
def test_basic(self):
@@ -242,6 +236,14 @@ class TestPrintOptions:
np.set_printoptions(formatter={'float_kind':None})
assert_equal(repr(x), "array([ 0., 1., 2.])")
+ def test_0d_arrays(self):
+ assert_equal(repr(np.datetime64('2005-02-25')[...]),
+ "array('2005-02-25', dtype='datetime64[D]')")
+
+ x = np.array(1)
+ np.set_printoptions(formatter={'all':lambda x: "test"})
+ assert_equal(repr(x), "array(test)")
+
def test_unicode_object_array():
import sys
if sys.version_info[0] >= 3:
diff --git a/numpy/core/tests/test_datetime.py b/numpy/core/tests/test_datetime.py
index 48afa728d..92a1325bc 100644
--- a/numpy/core/tests/test_datetime.py
+++ b/numpy/core/tests/test_datetime.py
@@ -6,7 +6,7 @@ import numpy
import numpy as np
import datetime
from numpy.testing import (
- TestCase, run_module_suite, assert_, assert_equal, assert_raises,
+ run_module_suite, assert_, assert_equal, assert_raises,
assert_warns, dec, suppress_warnings
)
@@ -18,7 +18,7 @@ except ImportError:
_has_pytz = False
-class TestDateTime(TestCase):
+class TestDateTime(object):
def test_datetime_dtype_creation(self):
for unit in ['Y', 'M', 'W', 'D',
'h', 'm', 's', 'ms', 'us',
@@ -1131,7 +1131,19 @@ class TestDateTime(TestCase):
assert_(np.not_equal(dt_other, dt_nat))
assert_(np.not_equal(td_nat, td_other))
assert_(np.not_equal(td_other, td_nat))
- self.assertEqual(len(sup.log), 0)
+ assert_equal(len(sup.log), 0)
+
+ def test_datetime_futurewarning_once_nat(self):
+ # Test that the futurewarning is only given once per inner loop
+ arr1 = np.array(['NaT', 'NaT', '2000-01-01'] * 2, dtype='M8[s]')
+ arr2 = np.array(['NaT', '2000-01-01', 'NaT'] * 2, dtype='M8[s]')
+ # All except less, because for less it can't be wrong (NaT is min)
+ for op in [np.equal, np.less, np.less_equal,
+ np.greater, np.greater_equal]:
+ with suppress_warnings() as sup:
+ rec = sup.record(FutureWarning, ".*NAT")
+ op(arr1, arr2)
+ assert_(len(rec) == 1, "failed for {}".format(op))
def test_datetime_minmax(self):
# The metadata of the result should become the GCD
@@ -1227,10 +1239,10 @@ class TestDateTime(TestCase):
def test_divisor_conversion_fs(self):
assert_(np.dtype('M8[fs/100]') == np.dtype('M8[10as]'))
- self.assertRaises(ValueError, lambda: np.dtype('M8[3fs/10000]'))
+ assert_raises(ValueError, lambda: np.dtype('M8[3fs/10000]'))
def test_divisor_conversion_as(self):
- self.assertRaises(ValueError, lambda: np.dtype('M8[as/10]'))
+ assert_raises(ValueError, lambda: np.dtype('M8[as/10]'))
def test_string_parser_variants(self):
# Allow space instead of 'T' between date and time
@@ -1947,7 +1959,7 @@ class TestDateTime(TestCase):
assert_raises(ValueError, np.isnat, np.zeros(10, t))
-class TestDateTimeData(TestCase):
+class TestDateTimeData(object):
def test_basic(self):
a = np.array(['1980-03-23'], dtype=np.datetime64)
diff --git a/numpy/core/tests/test_defchararray.py b/numpy/core/tests/test_defchararray.py
index 11d7c3b90..436643899 100644
--- a/numpy/core/tests/test_defchararray.py
+++ b/numpy/core/tests/test_defchararray.py
@@ -5,13 +5,14 @@ import sys
import numpy as np
from numpy.core.multiarray import _vec_string
from numpy.testing import (
- TestCase, run_module_suite, assert_, assert_equal, assert_array_equal
+ run_module_suite, assert_, assert_equal, assert_array_equal, assert_raises,
+ suppress_warnings,
)
kw_unicode_true = {'unicode': True} # make 2to3 work properly
kw_unicode_false = {'unicode': False}
-class TestBasic(TestCase):
+class TestBasic(object):
def test_from_object_array(self):
A = np.array([['abc', 2],
['long ', '0123456789']], dtype='O')
@@ -23,7 +24,7 @@ class TestBasic(TestCase):
def test_from_object_array_unicode(self):
A = np.array([['abc', u'Sigma \u03a3'],
['long ', '0123456789']], dtype='O')
- self.assertRaises(ValueError, np.char.array, (A,))
+ assert_raises(ValueError, np.char.array, (A,))
B = np.char.array(A, **kw_unicode_true)
assert_equal(B.dtype.itemsize, 10 * np.array('a', 'U').dtype.itemsize)
assert_array_equal(B, [['abc', u'Sigma \u03a3'],
@@ -62,7 +63,7 @@ class TestBasic(TestCase):
def fail():
np.char.array(A, **kw_unicode_false)
- self.assertRaises(UnicodeEncodeError, fail)
+ assert_raises(UnicodeEncodeError, fail)
def test_unicode_upconvert(self):
A = np.char.array(['abc'])
@@ -82,59 +83,59 @@ class TestBasic(TestCase):
assert_equal(A.itemsize, 4)
assert_(issubclass(A.dtype.type, np.unicode_))
-class TestVecString(TestCase):
+class TestVecString(object):
def test_non_existent_method(self):
def fail():
_vec_string('a', np.string_, 'bogus')
- self.assertRaises(AttributeError, fail)
+ assert_raises(AttributeError, fail)
def test_non_string_array(self):
def fail():
_vec_string(1, np.string_, 'strip')
- self.assertRaises(TypeError, fail)
+ assert_raises(TypeError, fail)
def test_invalid_args_tuple(self):
def fail():
_vec_string(['a'], np.string_, 'strip', 1)
- self.assertRaises(TypeError, fail)
+ assert_raises(TypeError, fail)
def test_invalid_type_descr(self):
def fail():
_vec_string(['a'], 'BOGUS', 'strip')
- self.assertRaises(TypeError, fail)
+ assert_raises(TypeError, fail)
def test_invalid_function_args(self):
def fail():
_vec_string(['a'], np.string_, 'strip', (1,))
- self.assertRaises(TypeError, fail)
+ assert_raises(TypeError, fail)
def test_invalid_result_type(self):
def fail():
_vec_string(['a'], np.integer, 'strip')
- self.assertRaises(TypeError, fail)
+ assert_raises(TypeError, fail)
def test_broadcast_error(self):
def fail():
_vec_string([['abc', 'def']], np.integer, 'find', (['a', 'd', 'j'],))
- self.assertRaises(ValueError, fail)
+ assert_raises(ValueError, fail)
-class TestWhitespace(TestCase):
- def setUp(self):
+class TestWhitespace(object):
+ def setup(self):
self.A = np.array([['abc ', '123 '],
['789 ', 'xyz ']]).view(np.chararray)
self.B = np.array([['abc', '123'],
@@ -148,16 +149,16 @@ class TestWhitespace(TestCase):
assert_(not np.any(self.A < self.B))
assert_(not np.any(self.A != self.B))
-class TestChar(TestCase):
- def setUp(self):
+class TestChar(object):
+ def setup(self):
self.A = np.array('abc1', dtype='c').view(np.chararray)
def test_it(self):
assert_equal(self.A.shape, (4,))
assert_equal(self.A.upper()[:2].tobytes(), b'AB')
-class TestComparisons(TestCase):
- def setUp(self):
+class TestComparisons(object):
+ def setup(self):
self.A = np.array([['abc', '123'],
['789', 'xyz']]).view(np.chararray)
self.B = np.array([['efg', '123 '],
@@ -184,21 +185,21 @@ class TestComparisons(TestCase):
class TestComparisonsMixed1(TestComparisons):
"""Ticket #1276"""
- def setUp(self):
- TestComparisons.setUp(self)
+ def setup(self):
+ TestComparisons.setup(self)
self.B = np.array([['efg', '123 '],
['051', 'tuv']], np.unicode_).view(np.chararray)
class TestComparisonsMixed2(TestComparisons):
"""Ticket #1276"""
- def setUp(self):
- TestComparisons.setUp(self)
+ def setup(self):
+ TestComparisons.setup(self)
self.A = np.array([['abc', '123'],
['789', 'xyz']], np.unicode_).view(np.chararray)
-class TestInformation(TestCase):
- def setUp(self):
+class TestInformation(object):
+ def setup(self):
self.A = np.array([[' abc ', ''],
['12345', 'MixedCase'],
['123 \t 345 \0 ', 'UPPER']]).view(np.chararray)
@@ -230,7 +231,7 @@ class TestInformation(TestCase):
def fail():
self.A.endswith('3', 'fdjk')
- self.assertRaises(TypeError, fail)
+ assert_raises(TypeError, fail)
def test_find(self):
assert_(issubclass(self.A.find('a').dtype.type, np.integer))
@@ -244,7 +245,7 @@ class TestInformation(TestCase):
def fail():
self.A.index('a')
- self.assertRaises(ValueError, fail)
+ assert_raises(ValueError, fail)
assert_(np.char.index('abcba', 'b') == 1)
assert_(issubclass(np.char.index('abcba', 'b').dtype.type, np.integer))
@@ -288,7 +289,7 @@ class TestInformation(TestCase):
def fail():
self.A.rindex('a')
- self.assertRaises(ValueError, fail)
+ assert_raises(ValueError, fail)
assert_(np.char.rindex('abcba', 'b') == 3)
assert_(issubclass(np.char.rindex('abcba', 'b').dtype.type, np.integer))
@@ -300,11 +301,11 @@ class TestInformation(TestCase):
def fail():
self.A.startswith('3', 'fdjk')
- self.assertRaises(TypeError, fail)
+ assert_raises(TypeError, fail)
-class TestMethods(TestCase):
- def setUp(self):
+class TestMethods(object):
+ def setup(self):
self.A = np.array([[' abc ', ''],
['12345', 'MixedCase'],
['123 \t 345 \0 ', 'UPPER']],
@@ -346,8 +347,11 @@ class TestMethods(TestCase):
A = np.char.array([b'\\u03a3'])
assert_(A.decode('unicode-escape')[0] == '\u03a3')
else:
- A = np.char.array(['736563726574206d657373616765'])
- assert_(A.decode('hex_codec')[0] == 'secret message')
+ with suppress_warnings() as sup:
+ if sys.py3kwarning:
+ sup.filter(DeprecationWarning, "'hex_codec'")
+ A = np.char.array(['736563726574206d657373616765'])
+ assert_(A.decode('hex_codec')[0] == 'secret message')
def test_encode(self):
B = self.B.encode('unicode_escape')
@@ -579,7 +583,7 @@ class TestMethods(TestCase):
def fail():
self.A.isnumeric()
- self.assertRaises(TypeError, fail)
+ assert_raises(TypeError, fail)
assert_(issubclass(self.B.isnumeric().dtype.type, np.bool_))
assert_array_equal(self.B.isnumeric(), [
[False, False], [True, False], [False, False]])
@@ -589,14 +593,14 @@ class TestMethods(TestCase):
def fail():
self.A.isdecimal()
- self.assertRaises(TypeError, fail)
+ assert_raises(TypeError, fail)
assert_(issubclass(self.B.isdecimal().dtype.type, np.bool_))
assert_array_equal(self.B.isdecimal(), [
[False, False], [True, False], [False, False]])
-class TestOperations(TestCase):
- def setUp(self):
+class TestOperations(object):
+ def setup(self):
self.A = np.array([['abc', '123'],
['789', 'xyz']]).view(np.chararray)
self.B = np.array([['efg', '456'],
diff --git a/numpy/core/tests/test_deprecations.py b/numpy/core/tests/test_deprecations.py
index 0ce7465fb..e3e8c32f9 100644
--- a/numpy/core/tests/test_deprecations.py
+++ b/numpy/core/tests/test_deprecations.py
@@ -28,7 +28,7 @@ class _DeprecationTestCase(object):
message = ''
warning_cls = DeprecationWarning
- def setUp(self):
+ def setup(self):
self.warn_ctx = warnings.catch_warnings(record=True)
self.log = self.warn_ctx.__enter__()
@@ -42,7 +42,7 @@ class _DeprecationTestCase(object):
warnings.filterwarnings("always", message=self.message,
category=self.warning_cls)
- def tearDown(self):
+ def teardown(self):
self.warn_ctx.__exit__()
def assert_deprecated(self, function, num=1, ignore_others=False,
@@ -259,7 +259,7 @@ class TestNonCContiguousViewDeprecation(_DeprecationTestCase):
"""
def test_fortran_contiguous(self):
- self.assert_deprecated(np.ones((2,2)).T.view, args=(np.complex,))
+ self.assert_deprecated(np.ones((2,2)).T.view, args=(complex,))
self.assert_deprecated(np.ones((2,2)).T.view, args=(np.int8,))
@@ -376,20 +376,10 @@ class TestNumericStyleTypecodes(_DeprecationTestCase):
args=(dt,))
-class TestAccumulateKeepDims(_DeprecationTestCase):
- """
- Deprecate the keepdims argument to np.ufunc.accumulate, which was never used or documented
- """
- def test_keepdims(self):
- with warnings.catch_warnings():
- warnings.filterwarnings('always', '', FutureWarning)
- assert_warns(FutureWarning, np.add.accumulate, [1], keepdims=True)
-
-
class TestTestDeprecated(object):
def test_assert_deprecated(self):
test_case_instance = _DeprecationTestCase()
- test_case_instance.setUp()
+ test_case_instance.setup()
assert_raises(AssertionError,
test_case_instance.assert_deprecated,
lambda: None)
@@ -398,7 +388,7 @@ class TestTestDeprecated(object):
warnings.warn("foo", category=DeprecationWarning, stacklevel=2)
test_case_instance.assert_deprecated(foo)
- test_case_instance.tearDown()
+ test_case_instance.teardown()
class TestClassicIntDivision(_DeprecationTestCase):
@@ -444,5 +434,21 @@ class TestNPY_CHAR(_DeprecationTestCase):
assert_(npy_char_deprecation() == 'S1')
+class TestDatetimeEvent(_DeprecationTestCase):
+ # 2017-08-11, 1.14.0
+ def test_3_tuple(self):
+ for cls in (np.datetime64, np.timedelta64):
+ # two valid uses - (unit, num) and (unit, num, den, None)
+ self.assert_not_deprecated(cls, args=(1, ('ms', 2)))
+ self.assert_not_deprecated(cls, args=(1, ('ms', 2, 1, None)))
+
+ # trying to use the event argument, removed in 1.7.0, is deprecated
+ # it used to be a uint8
+ self.assert_deprecated(cls, args=(1, ('ms', 2, 'event')))
+ self.assert_deprecated(cls, args=(1, ('ms', 2, 63)))
+ self.assert_deprecated(cls, args=(1, ('ms', 2, 1, 'event')))
+ self.assert_deprecated(cls, args=(1, ('ms', 2, 1, 63)))
+
+
if __name__ == "__main__":
run_module_suite()
diff --git a/numpy/core/tests/test_dtype.py b/numpy/core/tests/test_dtype.py
index 452cbd4bd..9cefb2ad1 100644
--- a/numpy/core/tests/test_dtype.py
+++ b/numpy/core/tests/test_dtype.py
@@ -1,11 +1,12 @@
from __future__ import division, absolute_import, print_function
+import pickle
import sys
import numpy as np
from numpy.core.test_rational import rational
from numpy.testing import (
- TestCase, run_module_suite, assert_, assert_equal, assert_raises,
+ run_module_suite, assert_, assert_equal, assert_raises,
dec
)
@@ -19,10 +20,10 @@ def assert_dtype_not_equal(a, b):
assert_(hash(a) != hash(b),
"two different types hash to the same value !")
-class TestBuiltin(TestCase):
+class TestBuiltin(object):
def test_run(self):
"""Only test hash runs at all."""
- for t in [np.int, np.float, np.complex, np.int32, np.str, np.object,
+ for t in [int, float, complex, np.int32, str, object,
np.unicode]:
dt = np.dtype(t)
hash(dt)
@@ -30,12 +31,12 @@ class TestBuiltin(TestCase):
def test_dtype(self):
# Make sure equivalent byte order char hash the same (e.g. < and = on
# little endian)
- for t in [np.int, np.float]:
+ for t in [int, float]:
dt = np.dtype(t)
dt2 = dt.newbyteorder("<")
dt3 = dt.newbyteorder(">")
if dt == dt2:
- self.assertTrue(dt.byteorder != dt2.byteorder, "bogus test")
+ assert_(dt.byteorder != dt2.byteorder, "bogus test")
assert_dtype_equal(dt, dt2)
else:
self.assertTrue(dt.byteorder != dt3.byteorder, "bogus test")
@@ -50,8 +51,8 @@ class TestBuiltin(TestCase):
else:
left = uintp
right = np.dtype(np.ulonglong)
- self.assertTrue(left == right)
- self.assertTrue(hash(left) == hash(right))
+ assert_(left == right)
+ assert_(hash(left) == hash(right))
def test_invalid_types(self):
# Make sure invalid type strings raise an error
@@ -103,17 +104,26 @@ class TestBuiltin(TestCase):
'formats':['i1', 'f4'],
'offsets':[0, 2]}, align=True)
-class TestRecord(TestCase):
+ def test_field_order_equality(self):
+ x = np.dtype({'names': ['A', 'B'],
+ 'formats': ['i4', 'f4'],
+ 'offsets': [0, 4]})
+ y = np.dtype({'names': ['B', 'A'],
+ 'formats': ['f4', 'i4'],
+ 'offsets': [4, 0]})
+ assert_equal(x == y, False)
+
+class TestRecord(object):
def test_equivalent_record(self):
"""Test whether equivalent record dtypes hash the same."""
- a = np.dtype([('yo', np.int)])
- b = np.dtype([('yo', np.int)])
+ a = np.dtype([('yo', int)])
+ b = np.dtype([('yo', int)])
assert_dtype_equal(a, b)
def test_different_names(self):
# In theory, they may hash the same (collision) ?
- a = np.dtype([('yo', np.int)])
- b = np.dtype([('ye', np.int)])
+ a = np.dtype([('yo', int)])
+ b = np.dtype([('ye', int)])
assert_dtype_not_equal(a, b)
def test_different_titles(self):
@@ -128,9 +138,9 @@ class TestRecord(TestCase):
def test_mutate(self):
# Mutating a dtype should reset the cached hash value
- a = np.dtype([('yo', np.int)])
- b = np.dtype([('yo', np.int)])
- c = np.dtype([('ye', np.int)])
+ a = np.dtype([('yo', int)])
+ b = np.dtype([('yo', int)])
+ c = np.dtype([('ye', int)])
assert_dtype_equal(a, b)
assert_dtype_not_equal(a, c)
a.names = ['ye']
@@ -145,10 +155,10 @@ class TestRecord(TestCase):
"""Test if an appropriate exception is raised when passing bad values to
the dtype constructor.
"""
- self.assertRaises(TypeError, np.dtype,
- dict(names=set(['A', 'B']), formats=['f8', 'i4']))
- self.assertRaises(TypeError, np.dtype,
- dict(names=['A', 'B'], formats=set(['f8', 'i4'])))
+ assert_raises(TypeError, np.dtype,
+ dict(names=set(['A', 'B']), formats=['f8', 'i4']))
+ assert_raises(TypeError, np.dtype,
+ dict(names=['A', 'B'], formats=set(['f8', 'i4'])))
def test_aligned_size(self):
# Check that structured dtypes get padded to an aligned size
@@ -210,11 +220,12 @@ class TestRecord(TestCase):
dt = np.dtype({'names':['f0', 'f1', 'f2'], 'formats':['<u4', '<u2', '<u2'],
'offsets':[4, 0, 2]}, align=True)
assert_equal(dt.itemsize, 8)
+ # field name should not matter: assignment is by position
dt2 = np.dtype({'names':['f2', 'f0', 'f1'],
- 'formats':['<u2', '<u4', '<u2'],
- 'offsets':[2, 4, 0]}, align=True)
+ 'formats':['<u4', '<u2', '<u2'],
+ 'offsets':[4, 0, 2]}, align=True)
vals = [(0, 1, 2), (3, -1, 4)]
- vals2 = [(2, 0, 1), (4, 3, -1)]
+ vals2 = [(0, 1, 2), (3, -1, 4)]
a = np.array(vals, dt)
b = np.array(vals2, dt2)
assert_equal(a.astype(dt2), b)
@@ -275,9 +286,9 @@ class TestRecord(TestCase):
def test_nonint_offsets(self):
# gh-8059
def make_dtype(off):
- return np.dtype({'names': ['A'], 'formats': ['i4'],
+ return np.dtype({'names': ['A'], 'formats': ['i4'],
'offsets': [off]})
-
+
assert_raises(TypeError, make_dtype, 'ASD')
assert_raises(OverflowError, make_dtype, 2**70)
assert_raises(TypeError, make_dtype, 2.3)
@@ -288,10 +299,10 @@ class TestRecord(TestCase):
np.zeros(1, dtype=dt)[0].item()
-class TestSubarray(TestCase):
+class TestSubarray(object):
def test_single_subarray(self):
- a = np.dtype((np.int, (2)))
- b = np.dtype((np.int, (2,)))
+ a = np.dtype((int, (2)))
+ b = np.dtype((int, (2,)))
assert_dtype_equal(a, b)
assert_equal(type(a.subdtype[1]), tuple)
@@ -299,29 +310,29 @@ class TestSubarray(TestCase):
def test_equivalent_record(self):
"""Test whether equivalent subarray dtypes hash the same."""
- a = np.dtype((np.int, (2, 3)))
- b = np.dtype((np.int, (2, 3)))
+ a = np.dtype((int, (2, 3)))
+ b = np.dtype((int, (2, 3)))
assert_dtype_equal(a, b)
def test_nonequivalent_record(self):
"""Test whether different subarray dtypes hash differently."""
- a = np.dtype((np.int, (2, 3)))
- b = np.dtype((np.int, (3, 2)))
+ a = np.dtype((int, (2, 3)))
+ b = np.dtype((int, (3, 2)))
assert_dtype_not_equal(a, b)
- a = np.dtype((np.int, (2, 3)))
- b = np.dtype((np.int, (2, 2)))
+ a = np.dtype((int, (2, 3)))
+ b = np.dtype((int, (2, 2)))
assert_dtype_not_equal(a, b)
- a = np.dtype((np.int, (1, 2, 3)))
- b = np.dtype((np.int, (1, 2)))
+ a = np.dtype((int, (1, 2, 3)))
+ b = np.dtype((int, (1, 2)))
assert_dtype_not_equal(a, b)
def test_shape_equal(self):
"""Test some data types that are equal"""
assert_dtype_equal(np.dtype('f8'), np.dtype(('f8', tuple())))
assert_dtype_equal(np.dtype('f8'), np.dtype(('f8', 1)))
- assert_dtype_equal(np.dtype((np.int, 2)), np.dtype((np.int, (2,))))
+ assert_dtype_equal(np.dtype((int, 2)), np.dtype((int, (2,))))
assert_dtype_equal(np.dtype(('<f4', (3, 2))), np.dtype(('<f4', (3, 2))))
d = ([('a', 'f4', (1, 2)), ('b', 'f8', (3, 1))], (3, 2))
assert_dtype_equal(np.dtype(d), np.dtype(d))
@@ -414,47 +425,47 @@ class TestSubarray(TestCase):
assert_equal(t1.alignment, t2.alignment)
-class TestMonsterType(TestCase):
+class TestMonsterType(object):
"""Test deeply nested subtypes."""
def test1(self):
simple1 = np.dtype({'names': ['r', 'b'], 'formats': ['u1', 'u1'],
'titles': ['Red pixel', 'Blue pixel']})
- a = np.dtype([('yo', np.int), ('ye', simple1),
- ('yi', np.dtype((np.int, (3, 2))))])
- b = np.dtype([('yo', np.int), ('ye', simple1),
- ('yi', np.dtype((np.int, (3, 2))))])
+ a = np.dtype([('yo', int), ('ye', simple1),
+ ('yi', np.dtype((int, (3, 2))))])
+ b = np.dtype([('yo', int), ('ye', simple1),
+ ('yi', np.dtype((int, (3, 2))))])
assert_dtype_equal(a, b)
- c = np.dtype([('yo', np.int), ('ye', simple1),
+ c = np.dtype([('yo', int), ('ye', simple1),
('yi', np.dtype((a, (3, 2))))])
- d = np.dtype([('yo', np.int), ('ye', simple1),
+ d = np.dtype([('yo', int), ('ye', simple1),
('yi', np.dtype((a, (3, 2))))])
assert_dtype_equal(c, d)
-class TestMetadata(TestCase):
+class TestMetadata(object):
def test_no_metadata(self):
d = np.dtype(int)
- self.assertEqual(d.metadata, None)
+ assert_(d.metadata is None)
def test_metadata_takes_dict(self):
d = np.dtype(int, metadata={'datum': 1})
- self.assertEqual(d.metadata, {'datum': 1})
+ assert_(d.metadata == {'datum': 1})
def test_metadata_rejects_nondict(self):
- self.assertRaises(TypeError, np.dtype, int, metadata='datum')
- self.assertRaises(TypeError, np.dtype, int, metadata=1)
- self.assertRaises(TypeError, np.dtype, int, metadata=None)
+ assert_raises(TypeError, np.dtype, int, metadata='datum')
+ assert_raises(TypeError, np.dtype, int, metadata=1)
+ assert_raises(TypeError, np.dtype, int, metadata=None)
def test_nested_metadata(self):
d = np.dtype([('a', np.dtype(int, metadata={'datum': 1}))])
- self.assertEqual(d['a'].metadata, {'datum': 1})
+ assert_(d['a'].metadata == {'datum': 1})
- def base_metadata_copied(self):
+ def test_base_metadata_copied(self):
d = np.dtype((np.void, np.dtype('i4,i4', metadata={'datum': 1})))
- assert_equal(d.metadata, {'datum': 1})
+ assert_(d.metadata == {'datum': 1})
-class TestString(TestCase):
+class TestString(object):
def test_complex_dtype_str(self):
dt = np.dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)),
('rtile', '>f4', (64, 36))], (3,)),
@@ -581,7 +592,7 @@ class TestString(TestCase):
# Pull request #4722
np.array(["", ""]).astype(object)
-class TestDtypeAttributeDeletion(TestCase):
+class TestDtypeAttributeDeletion(object):
def test_dtype_non_writable_attributes_deletion(self):
dt = np.dtype(np.double)
@@ -599,7 +610,7 @@ class TestDtypeAttributeDeletion(TestCase):
assert_raises(AttributeError, delattr, dt, s)
-class TestDtypeAttributes(TestCase):
+class TestDtypeAttributes(object):
def test_descr_has_trailing_void(self):
# see gh-6359
dtype = np.dtype({
@@ -624,6 +635,59 @@ class TestDtypeAttributes(TestCase):
assert_equal(np.dtype(user_def_subcls).name, 'user_def_subcls')
+class TestPickling(object):
+
+ def check_pickling(self, dtype):
+ for proto in range(pickle.HIGHEST_PROTOCOL + 1):
+ pickled = pickle.loads(pickle.dumps(dtype, proto))
+ assert_equal(pickled, dtype)
+ assert_equal(pickled.descr, dtype.descr)
+ if dtype.metadata is not None:
+ assert_equal(pickled.metadata, dtype.metadata)
+ # Check the reconstructed dtype is functional
+ x = np.zeros(3, dtype=dtype)
+ y = np.zeros(3, dtype=pickled)
+ assert_equal(x, y)
+ assert_equal(x[0], y[0])
+
+ def test_builtin(self):
+ for t in [int, float, complex, np.int32, str, object,
+ np.unicode, bool]:
+ self.check_pickling(np.dtype(t))
+
+ def test_structured(self):
+ dt = np.dtype(([('a', '>f4', (2, 1)), ('b', '<f8', (1, 3))], (2, 2)))
+ self.check_pickling(dt)
+ dt = np.dtype('i4, i1', align=True)
+ self.check_pickling(dt)
+ dt = np.dtype('i4, i1', align=False)
+ self.check_pickling(dt)
+ dt = np.dtype({
+ 'names': ['A', 'B'],
+ 'formats': ['f4', 'f4'],
+ 'offsets': [0, 8],
+ 'itemsize': 16})
+ self.check_pickling(dt)
+ dt = np.dtype({'names': ['r', 'b'],
+ 'formats': ['u1', 'u1'],
+ 'titles': ['Red pixel', 'Blue pixel']})
+ self.check_pickling(dt)
+
+ def test_datetime(self):
+ for base in ['m8', 'M8']:
+ for unit in ['', 'Y', 'M', 'W', 'D', 'h', 'm', 's', 'ms',
+ 'us', 'ns', 'ps', 'fs', 'as']:
+ dt = np.dtype('%s[%s]' % (base, unit) if unit else base)
+ self.check_pickling(dt)
+ if unit:
+ dt = np.dtype('%s[7%s]' % (base, unit))
+ self.check_pickling(dt)
+
+ def test_metadata(self):
+ dt = np.dtype(int, metadata={'datum': 1})
+ self.check_pickling(dt)
+
+
def test_rational_dtype():
# test for bug gh-5719
a = np.array([1111], dtype=rational).astype
diff --git a/numpy/core/tests/test_einsum.py b/numpy/core/tests/test_einsum.py
index 8466d924e..7cc9f67ef 100644
--- a/numpy/core/tests/test_einsum.py
+++ b/numpy/core/tests/test_einsum.py
@@ -2,7 +2,7 @@ from __future__ import division, absolute_import, print_function
import numpy as np
from numpy.testing import (
- TestCase, run_module_suite, assert_, assert_equal, assert_array_equal,
+ run_module_suite, assert_, assert_equal, assert_array_equal,
assert_almost_equal, assert_raises, suppress_warnings
)
@@ -14,7 +14,7 @@ for size, char in zip(sizes, chars):
global_size_dict[char] = size
-class TestEinSum(TestCase):
+class TestEinSum(object):
def test_einsum_errors(self):
for do_opt in [True, False]:
# Need enough arguments
@@ -568,48 +568,37 @@ class TestEinSum(TestCase):
A = np.arange(2 * 3 * 4).reshape(2, 3, 4)
B = np.arange(3)
- ref = np.einsum('ijk,j->ijk', A, B)
- assert_equal(np.einsum('ij...,j...->ij...', A, B), ref)
- assert_equal(np.einsum('ij...,...j->ij...', A, B), ref)
- assert_equal(np.einsum('ij...,j->ij...', A, B), ref) # used to raise error
-
- assert_equal(np.einsum('ij...,j...->ij...', A, B, optimize=True), ref)
- assert_equal(np.einsum('ij...,...j->ij...', A, B, optimize=True), ref)
- assert_equal(np.einsum('ij...,j->ij...', A, B, optimize=True), ref) # used to raise error
+ ref = np.einsum('ijk,j->ijk', A, B, optimize=False)
+ for opt in [True, False]:
+ assert_equal(np.einsum('ij...,j...->ij...', A, B, optimize=opt), ref)
+ assert_equal(np.einsum('ij...,...j->ij...', A, B, optimize=opt), ref)
+ assert_equal(np.einsum('ij...,j->ij...', A, B, optimize=opt), ref) # used to raise error
A = np.arange(12).reshape((4, 3))
B = np.arange(6).reshape((3, 2))
- ref = np.einsum('ik,kj->ij', A, B)
- assert_equal(np.einsum('ik...,k...->i...', A, B), ref)
- assert_equal(np.einsum('ik...,...kj->i...j', A, B), ref)
- assert_equal(np.einsum('...k,kj', A, B), ref) # used to raise error
- assert_equal(np.einsum('ik,k...->i...', A, B), ref) # used to raise error
-
- assert_equal(np.einsum('ik...,k...->i...', A, B, optimize=True), ref)
- assert_equal(np.einsum('ik...,...kj->i...j', A, B, optimize=True), ref)
- assert_equal(np.einsum('...k,kj', A, B, optimize=True), ref) # used to raise error
- assert_equal(np.einsum('ik,k...->i...', A, B, optimize=True), ref) # used to raise error
+ ref = np.einsum('ik,kj->ij', A, B, optimize=False)
+ for opt in [True, False]:
+ assert_equal(np.einsum('ik...,k...->i...', A, B, optimize=opt), ref)
+ assert_equal(np.einsum('ik...,...kj->i...j', A, B, optimize=opt), ref)
+ assert_equal(np.einsum('...k,kj', A, B, optimize=opt), ref) # used to raise error
+ assert_equal(np.einsum('ik,k...->i...', A, B, optimize=opt), ref) # used to raise error
dims = [2, 3, 4, 5]
a = np.arange(np.prod(dims)).reshape(dims)
v = np.arange(dims[2])
- ref = np.einsum('ijkl,k->ijl', a, v)
- assert_equal(np.einsum('ijkl,k', a, v), ref)
- assert_equal(np.einsum('...kl,k', a, v), ref) # used to raise error
- assert_equal(np.einsum('...kl,k...', a, v), ref)
- # no real diff from 1st
-
- assert_equal(np.einsum('ijkl,k', a, v, optimize=True), ref)
- assert_equal(np.einsum('...kl,k', a, v, optimize=True), ref) # used to raise error
- assert_equal(np.einsum('...kl,k...', a, v, optimize=True), ref)
+ ref = np.einsum('ijkl,k->ijl', a, v, optimize=False)
+ for opt in [True, False]:
+ assert_equal(np.einsum('ijkl,k', a, v, optimize=opt), ref)
+ assert_equal(np.einsum('...kl,k', a, v, optimize=opt), ref) # used to raise error
+ assert_equal(np.einsum('...kl,k...', a, v, optimize=opt), ref)
J, K, M = 160, 160, 120
A = np.arange(J * K * M).reshape(1, 1, 1, J, K, M)
B = np.arange(J * K * M * 3).reshape(J, K, M, 3)
- ref = np.einsum('...lmn,...lmno->...o', A, B)
- assert_equal(np.einsum('...lmn,lmno->...o', A, B), ref) # used to raise error
- assert_equal(np.einsum('...lmn,lmno->...o', A, B,
- optimize=True), ref) # used to raise error
+ ref = np.einsum('...lmn,...lmno->...o', A, B, optimize=False)
+ for opt in [True, False]:
+ assert_equal(np.einsum('...lmn,lmno->...o', A, B,
+ optimize=opt), ref) # used to raise error
def test_einsum_fixedstridebug(self):
# Issue #4485 obscure einsum bug
@@ -777,7 +766,7 @@ class TestEinSum(TestCase):
self.optimize_compare('aef,fbc,dca->bde')
-class TestEinSumPath(TestCase):
+class TestEinSumPath(object):
def build_operands(self, string):
# Builds views based off initial operands
diff --git a/numpy/core/tests/test_errstate.py b/numpy/core/tests/test_errstate.py
index 7fc749a7e..ae06af7fd 100644
--- a/numpy/core/tests/test_errstate.py
+++ b/numpy/core/tests/test_errstate.py
@@ -3,10 +3,10 @@ from __future__ import division, absolute_import, print_function
import platform
import numpy as np
-from numpy.testing import TestCase, assert_, run_module_suite, dec
+from numpy.testing import assert_, run_module_suite, dec
-class TestErrstate(TestCase):
+class TestErrstate(object):
@dec.skipif(platform.machine() == "armv5tel", "See gh-413.")
def test_invalid(self):
with np.errstate(all='raise', under='ignore'):
diff --git a/numpy/core/tests/test_extint128.py b/numpy/core/tests/test_extint128.py
index 755ee2c04..d87585dcf 100644
--- a/numpy/core/tests/test_extint128.py
+++ b/numpy/core/tests/test_extint128.py
@@ -59,7 +59,7 @@ def exc_iter(*args):
try:
yield iterate()
- except:
+ except Exception:
import traceback
msg = "At: %r\n%s" % (repr(value[0]),
traceback.format_exc())
diff --git a/numpy/core/tests/test_function_base.py b/numpy/core/tests/test_function_base.py
index 94c55bdd1..bffe5237a 100644
--- a/numpy/core/tests/test_function_base.py
+++ b/numpy/core/tests/test_function_base.py
@@ -3,7 +3,7 @@ from __future__ import division, absolute_import, print_function
from numpy import (logspace, linspace, geomspace, dtype, array, sctypes,
arange, isnan, ndarray, sqrt, nextafter)
from numpy.testing import (
- TestCase, run_module_suite, assert_, assert_equal, assert_raises,
+ run_module_suite, assert_, assert_equal, assert_raises,
assert_array_equal, assert_allclose, suppress_warnings
)
@@ -40,7 +40,7 @@ class PhysicalQuantity2(ndarray):
__array_priority__ = 10
-class TestLogspace(TestCase):
+class TestLogspace(object):
def test_basic(self):
y = logspace(0, 6)
@@ -76,7 +76,7 @@ class TestLogspace(TestCase):
assert_equal(ls, logspace(1.0, 7.0, 1))
-class TestGeomspace(TestCase):
+class TestGeomspace(object):
def test_basic(self):
y = geomspace(1, 1e6)
@@ -191,7 +191,7 @@ class TestGeomspace(TestCase):
assert_raises(ValueError, geomspace, 0, 0)
-class TestLinspace(TestCase):
+class TestLinspace(object):
def test_basic(self):
y = linspace(0, 10)
diff --git a/numpy/core/tests/test_getlimits.py b/numpy/core/tests/test_getlimits.py
index 4adb80f7f..455f5257c 100644
--- a/numpy/core/tests/test_getlimits.py
+++ b/numpy/core/tests/test_getlimits.py
@@ -7,44 +7,44 @@ import numpy as np
from numpy.core import finfo, iinfo
from numpy import half, single, double, longdouble
from numpy.testing import (
- TestCase, run_module_suite, assert_equal, assert_
+ run_module_suite, assert_equal, assert_, assert_raises
)
from numpy.core.getlimits import (_discovered_machar, _float16_ma, _float32_ma,
_float64_ma, _float128_ma, _float80_ma)
##################################################
-class TestPythonFloat(TestCase):
+class TestPythonFloat(object):
def test_singleton(self):
ftype = finfo(float)
ftype2 = finfo(float)
assert_equal(id(ftype), id(ftype2))
-class TestHalf(TestCase):
+class TestHalf(object):
def test_singleton(self):
ftype = finfo(half)
ftype2 = finfo(half)
assert_equal(id(ftype), id(ftype2))
-class TestSingle(TestCase):
+class TestSingle(object):
def test_singleton(self):
ftype = finfo(single)
ftype2 = finfo(single)
assert_equal(id(ftype), id(ftype2))
-class TestDouble(TestCase):
+class TestDouble(object):
def test_singleton(self):
ftype = finfo(double)
ftype2 = finfo(double)
assert_equal(id(ftype), id(ftype2))
-class TestLongdouble(TestCase):
- def test_singleton(self,level=2):
+class TestLongdouble(object):
+ def test_singleton(self):
ftype = finfo(longdouble)
ftype2 = finfo(longdouble)
assert_equal(id(ftype), id(ftype2))
-class TestFinfo(TestCase):
+class TestFinfo(object):
def test_basic(self):
dts = list(zip(['f2', 'f4', 'f8', 'c8', 'c16'],
[np.float16, np.float32, np.float64, np.complex64,
@@ -55,9 +55,9 @@ class TestFinfo(TestCase):
'nmant', 'precision', 'resolution', 'tiny'):
assert_equal(getattr(finfo(dt1), attr),
getattr(finfo(dt2), attr), attr)
- self.assertRaises(ValueError, finfo, 'i4')
+ assert_raises(ValueError, finfo, 'i4')
-class TestIinfo(TestCase):
+class TestIinfo(object):
def test_basic(self):
dts = list(zip(['i1', 'i2', 'i4', 'i8',
'u1', 'u2', 'u4', 'u8'],
@@ -67,14 +67,14 @@ class TestIinfo(TestCase):
for attr in ('bits', 'min', 'max'):
assert_equal(getattr(iinfo(dt1), attr),
getattr(iinfo(dt2), attr), attr)
- self.assertRaises(ValueError, iinfo, 'f4')
+ assert_raises(ValueError, iinfo, 'f4')
def test_unsigned_max(self):
types = np.sctypes['uint']
for T in types:
assert_equal(iinfo(T).max, T(-1))
-class TestRepr(TestCase):
+class TestRepr(object):
def test_iinfo_repr(self):
expected = "iinfo(min=-32768, max=32767, dtype=int16)"
assert_equal(repr(np.iinfo(np.int16)), expected)
diff --git a/numpy/core/tests/test_half.py b/numpy/core/tests/test_half.py
index 7a4d36333..813cf9572 100644
--- a/numpy/core/tests/test_half.py
+++ b/numpy/core/tests/test_half.py
@@ -4,8 +4,7 @@ import platform
import numpy as np
from numpy import uint16, float16, float32, float64
-from numpy.testing import TestCase, run_module_suite, assert_, assert_equal, \
- dec
+from numpy.testing import run_module_suite, assert_, assert_equal, dec
def assert_raises_fpe(strmatch, callable, *args, **kwargs):
@@ -18,8 +17,8 @@ def assert_raises_fpe(strmatch, callable, *args, **kwargs):
assert_(False,
"Did not raise floating point %s error" % strmatch)
-class TestHalf(TestCase):
- def setUp(self):
+class TestHalf(object):
+ def setup(self):
# An array of all possible float16 values
self.all_f16 = np.arange(0x10000, dtype=uint16)
self.all_f16.dtype = float16
@@ -66,7 +65,7 @@ class TestHalf(TestCase):
# Check the range for which all integers can be represented
i_int = np.arange(-2048, 2049)
i_f16 = np.array(i_int, dtype=float16)
- j = np.array(i_f16, dtype=np.int)
+ j = np.array(i_f16, dtype=int)
assert_equal(i_int, j)
def test_nans_infs(self):
diff --git a/numpy/core/tests/test_indexerrors.py b/numpy/core/tests/test_indexerrors.py
index e6b6be361..50919ffec 100644
--- a/numpy/core/tests/test_indexerrors.py
+++ b/numpy/core/tests/test_indexerrors.py
@@ -1,9 +1,9 @@
from __future__ import division, absolute_import, print_function
import numpy as np
-from numpy.testing import TestCase, run_module_suite, assert_raises
+from numpy.testing import run_module_suite, assert_raises
-class TestIndexErrors(TestCase):
+class TestIndexErrors(object):
'''Tests to exercise indexerrors not covered by other tests.'''
def test_arraytypes_fasttake(self):
diff --git a/numpy/core/tests/test_indexing.py b/numpy/core/tests/test_indexing.py
index 55eeb694a..4c3bac529 100644
--- a/numpy/core/tests/test_indexing.py
+++ b/numpy/core/tests/test_indexing.py
@@ -9,7 +9,7 @@ import numpy as np
from numpy.core.multiarray_tests import array_indexing
from itertools import product
from numpy.testing import (
- TestCase, run_module_suite, assert_, assert_equal, assert_raises,
+ run_module_suite, assert_, assert_equal, assert_raises,
assert_array_equal, assert_warns, HAS_REFCOUNT
)
@@ -28,7 +28,7 @@ except ImportError:
_HAS_CTYPE = False
-class TestIndexing(TestCase):
+class TestIndexing(object):
def test_index_no_floats(self):
a = np.array([[[5]]])
@@ -106,6 +106,12 @@ class TestIndexing(TestCase):
a = np.array(0)
assert_(isinstance(a[()], np.int_))
+ def test_void_scalar_empty_tuple(self):
+ s = np.zeros((), dtype='V4')
+ assert_equal(s[()].dtype, s.dtype)
+ assert_equal(s[()], s)
+ assert_equal(type(s[...]), np.ndarray)
+
def test_same_kind_index_casting(self):
# Indexes should be cast with same-kind and not safe, even if that
# is somewhat unsafe. So test various different code paths.
@@ -511,7 +517,7 @@ class TestIndexing(TestCase):
arr[slices] = 10
assert_array_equal(arr, 10.)
-class TestFieldIndexing(TestCase):
+class TestFieldIndexing(object):
def test_scalar_return_type(self):
# Field access on an array should return an array, even if it
# is 0-d.
@@ -520,7 +526,7 @@ class TestFieldIndexing(TestCase):
assert_(isinstance(a[['a']], np.ndarray))
-class TestBroadcastedAssignments(TestCase):
+class TestBroadcastedAssignments(object):
def assign(self, a, ind, val):
a[ind] = val
return a
@@ -571,7 +577,7 @@ class TestBroadcastedAssignments(TestCase):
assert_((a[::-1] == v).all())
-class TestSubclasses(TestCase):
+class TestSubclasses(object):
def test_basic(self):
class SubClass(np.ndarray):
pass
@@ -616,7 +622,7 @@ class TestSubclasses(TestCase):
assert_array_equal(new_s.finalize_status, new_s)
assert_array_equal(new_s.old, s)
-class TestFancyIndexingCast(TestCase):
+class TestFancyIndexingCast(object):
def test_boolean_index_cast_assign(self):
# Setup the boolean index and float arrays.
shape = (8, 63)
@@ -638,7 +644,7 @@ class TestFancyIndexingCast(TestCase):
zero_array.__setitem__, bool_index, np.array([1j]))
assert_equal(zero_array[0, 1], 0)
-class TestFancyIndexingEquivalence(TestCase):
+class TestFancyIndexingEquivalence(object):
def test_object_assign(self):
# Check that the field and object special case using copyto is active.
# The right hand side cannot be converted to an array here.
@@ -686,7 +692,7 @@ class TestFancyIndexingEquivalence(TestCase):
assert_array_equal(a, b[0])
-class TestMultiIndexingAutomated(TestCase):
+class TestMultiIndexingAutomated(object):
"""
These tests use code to mimic the C-Code indexing for selection.
@@ -708,7 +714,7 @@ class TestMultiIndexingAutomated(TestCase):
"""
- def setUp(self):
+ def setup(self):
self.a = np.arange(np.prod([3, 1, 5, 6])).reshape(3, 1, 5, 6)
self.b = np.empty((3, 0, 5, 6))
self.complex_indices = ['skip', Ellipsis,
@@ -847,7 +853,7 @@ class TestMultiIndexingAutomated(TestCase):
try:
flat_indx = np.ravel_multi_index(np.nonzero(indx),
arr.shape[ax:ax+indx.ndim], mode='raise')
- except:
+ except Exception:
error_unless_broadcast_to_empty = True
# fill with 0s instead, and raise error later
flat_indx = np.array([0]*indx.sum(), dtype=np.intp)
@@ -946,7 +952,7 @@ class TestMultiIndexingAutomated(TestCase):
try:
mi = np.ravel_multi_index(indx[1:], orig_slice,
mode='raise')
- except:
+ except Exception:
# This happens with 0-sized orig_slice (sometimes?)
# here it is a ValueError, but indexing gives a:
raise IndexError('invalid index into 0-sized')
@@ -1103,7 +1109,7 @@ class TestMultiIndexingAutomated(TestCase):
for index in self.complex_indices:
self._check_single_index(a, index)
-class TestFloatNonIntegerArgument(TestCase):
+class TestFloatNonIntegerArgument(object):
"""
These test that ``TypeError`` is raised when you try to use
non-integers as arguments to for indexing and slicing e.g. ``a[0.0:5]``
@@ -1158,7 +1164,7 @@ class TestFloatNonIntegerArgument(TestCase):
assert_raises(TypeError, np.min, d, (.2, 1.2))
-class TestBooleanIndexing(TestCase):
+class TestBooleanIndexing(object):
# Using a boolean as integer argument/indexing is an error.
def test_bool_as_int_argument_errors(self):
a = np.array([[[1]]])
@@ -1178,7 +1184,7 @@ class TestBooleanIndexing(TestCase):
assert_raises(IndexError, lambda: a[False, [0, 1], ...])
-class TestArrayToIndexDeprecation(TestCase):
+class TestArrayToIndexDeprecation(object):
"""Creating an an index from array not 0-D is an error.
"""
@@ -1191,7 +1197,7 @@ class TestArrayToIndexDeprecation(TestCase):
assert_raises(TypeError, np.take, a, [0], a)
-class TestNonIntegerArrayLike(TestCase):
+class TestNonIntegerArrayLike(object):
"""Tests that array_likes only valid if can safely cast to integer.
For instance, lists give IndexError when they cannot be safely cast to
@@ -1208,7 +1214,7 @@ class TestNonIntegerArrayLike(TestCase):
a.__getitem__([])
-class TestMultipleEllipsisError(TestCase):
+class TestMultipleEllipsisError(object):
"""An index can only have a single ellipsis.
"""
@@ -1219,7 +1225,7 @@ class TestMultipleEllipsisError(TestCase):
assert_raises(IndexError, a.__getitem__, ((Ellipsis,) * 3,))
-class TestCApiAccess(TestCase):
+class TestCApiAccess(object):
def test_getitem(self):
subscript = functools.partial(array_indexing, 0)
diff --git a/numpy/core/tests/test_item_selection.py b/numpy/core/tests/test_item_selection.py
index 1eb09f1e0..a0a458ca5 100644
--- a/numpy/core/tests/test_item_selection.py
+++ b/numpy/core/tests/test_item_selection.py
@@ -4,12 +4,12 @@ import sys
import numpy as np
from numpy.testing import (
- TestCase, run_module_suite, assert_, assert_raises,
+ run_module_suite, assert_, assert_raises,
assert_array_equal, HAS_REFCOUNT
)
-class TestTake(TestCase):
+class TestTake(object):
def test_simple(self):
a = [[1, 2], [3, 4]]
a_str = [[b'1', b'2'], [b'3', b'4']]
@@ -24,7 +24,7 @@ class TestTake(TestCase):
# Currently all types but object, use the same function generation.
# So it should not be necessary to test all. However test also a non
# refcounted struct on top of object.
- types = np.int, np.object, np.dtype([('', 'i', 2)])
+ types = int, object, np.dtype([('', 'i', 2)])
for t in types:
# ta works, even if the array may be odd if buffer interface is used
ta = np.array(a if np.issubdtype(t, np.number) else a_str, dtype=t)
diff --git a/numpy/core/tests/test_longdouble.py b/numpy/core/tests/test_longdouble.py
index eda52c90a..625d40c1b 100644
--- a/numpy/core/tests/test_longdouble.py
+++ b/numpy/core/tests/test_longdouble.py
@@ -5,9 +5,9 @@ import locale
import numpy as np
from numpy.testing import (
run_module_suite, assert_, assert_equal, dec, assert_raises,
- assert_array_equal, TestCase, temppath,
+ assert_array_equal, temppath,
)
-from test_print import in_foreign_locale
+from .test_print import in_foreign_locale
LD_INFO = np.finfo(np.longdouble)
longdouble_longer_than_double = (LD_INFO.eps < np.finfo(np.double).eps)
@@ -110,7 +110,7 @@ def test_fromstring_missing():
np.array([1]))
-class FileBased(TestCase):
+class TestFileBased(object):
ldbl = 1 + LD_INFO.eps
tgt = np.array([ldbl]*5)
diff --git a/numpy/core/tests/test_machar.py b/numpy/core/tests/test_machar.py
index 765b38ae0..7acb02eef 100644
--- a/numpy/core/tests/test_machar.py
+++ b/numpy/core/tests/test_machar.py
@@ -1,11 +1,16 @@
+"""
+Test machar. Given recent changes to hardcode type data, we might want to get
+rid of both MachAr and this test at some point.
+
+"""
from __future__ import division, absolute_import, print_function
from numpy.core.machar import MachAr
import numpy.core.numerictypes as ntypes
from numpy import errstate, array
-from numpy.testing import TestCase, run_module_suite
+from numpy.testing import run_module_suite
-class TestMachAr(TestCase):
+class TestMachAr(object):
def _run_machar_highprec(self):
# Instantiate MachAr instance with high enough precision to cause
# underflow
@@ -13,6 +18,7 @@ class TestMachAr(TestCase):
hiprec = ntypes.float96
MachAr(lambda v:array([v], hiprec))
except AttributeError:
+ # Fixme, this needs to raise a 'skip' exception.
"Skipping test: no ntypes.float96 available on this platform."
def test_underlow(self):
@@ -22,7 +28,8 @@ class TestMachAr(TestCase):
try:
self._run_machar_highprec()
except FloatingPointError as e:
- self.fail("Caught %s exception, should not have been raised." % e)
+ msg = "Caught %s exception, should not have been raised." % e
+ raise AssertionError(msg)
if __name__ == "__main__":
diff --git a/numpy/core/tests/test_memmap.py b/numpy/core/tests/test_memmap.py
index c0c352230..1cd09ab21 100644
--- a/numpy/core/tests/test_memmap.py
+++ b/numpy/core/tests/test_memmap.py
@@ -12,12 +12,12 @@ from numpy.compat import Path
from numpy import arange, allclose, asarray
from numpy.testing import (
- TestCase, run_module_suite, assert_, assert_equal, assert_array_equal,
+ run_module_suite, assert_, assert_equal, assert_array_equal,
dec, suppress_warnings
)
-class TestMemmap(TestCase):
- def setUp(self):
+class TestMemmap(object):
+ def setup(self):
self.tmpfp = NamedTemporaryFile(prefix='mmap')
self.tempdir = mkdtemp()
self.shape = (3, 4)
@@ -25,7 +25,7 @@ class TestMemmap(TestCase):
self.data = arange(12, dtype=self.dtype)
self.data.resize(self.shape)
- def tearDown(self):
+ def teardown(self):
self.tmpfp.close()
shutil.rmtree(self.tempdir)
@@ -41,7 +41,7 @@ class TestMemmap(TestCase):
shape=self.shape)
assert_(allclose(self.data, newfp))
assert_array_equal(self.data, newfp)
- self.assertEqual(newfp.flags.writeable, False)
+ assert_equal(newfp.flags.writeable, False)
def test_open_with_filename(self):
tmpname = mktemp('', 'mmap', dir=self.tempdir)
@@ -60,8 +60,8 @@ class TestMemmap(TestCase):
mode = "w+"
fp = memmap(self.tmpfp, dtype=self.dtype, mode=mode,
shape=self.shape, offset=offset)
- self.assertEqual(offset, fp.offset)
- self.assertEqual(mode, fp.mode)
+ assert_equal(offset, fp.offset)
+ assert_equal(mode, fp.mode)
del fp
def test_filename(self):
@@ -70,9 +70,9 @@ class TestMemmap(TestCase):
shape=self.shape)
abspath = os.path.abspath(tmpname)
fp[:] = self.data[:]
- self.assertEqual(abspath, fp.filename)
+ assert_equal(abspath, fp.filename)
b = fp[:1]
- self.assertEqual(abspath, b.filename)
+ assert_equal(abspath, b.filename)
del b
del fp
@@ -83,16 +83,16 @@ class TestMemmap(TestCase):
shape=self.shape)
abspath = os.path.realpath(os.path.abspath(tmpname))
fp[:] = self.data[:]
- self.assertEqual(abspath, str(fp.filename.resolve()))
+ assert_equal(abspath, str(fp.filename.resolve()))
b = fp[:1]
- self.assertEqual(abspath, str(b.filename.resolve()))
+ assert_equal(abspath, str(b.filename.resolve()))
del b
del fp
def test_filename_fileobj(self):
fp = memmap(self.tmpfp, dtype=self.dtype, mode="w+",
shape=self.shape)
- self.assertEqual(fp.filename, self.tmpfp.name)
+ assert_equal(fp.filename, self.tmpfp.name)
@dec.knownfailureif(sys.platform == 'gnu0', "This test is known to fail on hurd")
def test_flush(self):
diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py
index 835d03528..bbdf4dbfa 100644
--- a/numpy/core/tests/test_multiarray.py
+++ b/numpy/core/tests/test_multiarray.py
@@ -22,21 +22,21 @@ from decimal import Decimal
import numpy as np
from numpy.compat import strchar, unicode
-from test_print import in_foreign_locale
+from .test_print import in_foreign_locale
from numpy.core.multiarray_tests import (
test_neighborhood_iterator, test_neighborhood_iterator_oob,
test_pydatamem_seteventhook_start, test_pydatamem_seteventhook_end,
test_inplace_increment, get_buffer_info, test_as_c_array,
)
from numpy.testing import (
- TestCase, run_module_suite, assert_, assert_raises, assert_warns,
- assert_equal, assert_almost_equal, assert_array_equal,
+ run_module_suite, assert_, assert_raises, assert_warns,
+ assert_equal, assert_almost_equal, assert_array_equal, assert_raises_regex,
assert_array_almost_equal, assert_allclose, IS_PYPY, HAS_REFCOUNT,
assert_array_less, runstring, dec, SkipTest, temppath, suppress_warnings
)
# Need to test an object that does not fully implement math interface
-from datetime import timedelta
+from datetime import timedelta, datetime
if sys.version_info[:2] > (3, 2):
@@ -74,15 +74,15 @@ def _aligned_zeros(shape, dtype=float, order="C", align=None):
return data
-class TestFlags(TestCase):
- def setUp(self):
+class TestFlags(object):
+ def setup(self):
self.a = np.arange(10)
def test_writeable(self):
mydict = locals()
self.a.flags.writeable = False
- self.assertRaises(ValueError, runstring, 'self.a[0] = 3', mydict)
- self.assertRaises(ValueError, runstring, 'self.a[0:1].itemset(3)', mydict)
+ assert_raises(ValueError, runstring, 'self.a[0] = 3', mydict)
+ assert_raises(ValueError, runstring, 'self.a[0:1].itemset(3)', mydict)
self.a.flags.writeable = True
self.a[0] = 5
self.a[0] = 0
@@ -110,7 +110,7 @@ class TestFlags(TestCase):
assert_(a.flags.aligned)
-class TestHash(TestCase):
+class TestHash(object):
# see #3793
def test_int(self):
for st, ut, s in [(np.int8, np.uint8, 8),
@@ -132,8 +132,8 @@ class TestHash(TestCase):
err_msg="%r: 2**%d - 1" % (ut, i))
-class TestAttributes(TestCase):
- def setUp(self):
+class TestAttributes(object):
+ def setup(self):
self.one = np.arange(10)
self.two = np.arange(20).reshape(4, 5)
self.three = np.arange(60, dtype=np.float64).reshape(2, 5, 6)
@@ -164,7 +164,7 @@ class TestAttributes(TestCase):
assert_equal(self.three.dtype, np.dtype(np.float_))
assert_equal(self.one.dtype.char, 'l')
assert_equal(self.three.dtype.char, 'd')
- self.assertTrue(self.three.dtype.str[0] in '<>')
+ assert_(self.three.dtype.str[0] in '<>')
assert_equal(self.one.dtype.str[1], 'i')
assert_equal(self.three.dtype.str[1], 'f')
@@ -194,12 +194,12 @@ class TestAttributes(TestCase):
strides=strides*x.itemsize)
assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1]))
- self.assertRaises(ValueError, make_array, 4, 4, -2)
- self.assertRaises(ValueError, make_array, 4, 2, -1)
- self.assertRaises(ValueError, make_array, 8, 3, 1)
+ assert_raises(ValueError, make_array, 4, 4, -2)
+ assert_raises(ValueError, make_array, 4, 2, -1)
+ assert_raises(ValueError, make_array, 8, 3, 1)
assert_equal(make_array(8, 3, 0), np.array([3]*8))
# Check behavior reported in gh-2503:
- self.assertRaises(ValueError, make_array, (2, 3), 5, np.array([-2, -3]))
+ assert_raises(ValueError, make_array, (2, 3), 5, np.array([-2, -3]))
make_array(0, 0, 10)
def test_set_stridesattr(self):
@@ -216,9 +216,9 @@ class TestAttributes(TestCase):
assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1]))
assert_equal(make_array(7, 3, 1), np.array([3, 4, 5, 6, 7, 8, 9]))
- self.assertRaises(ValueError, make_array, 4, 4, -2)
- self.assertRaises(ValueError, make_array, 4, 2, -1)
- self.assertRaises(RuntimeError, make_array, 8, 3, 1)
+ assert_raises(ValueError, make_array, 4, 4, -2)
+ assert_raises(ValueError, make_array, 4, 2, -1)
+ assert_raises(RuntimeError, make_array, 8, 3, 1)
# Check that the true extent of the array is used.
# Test relies on as_strided base not exposing a buffer.
x = np.lib.stride_tricks.as_strided(np.arange(1), (10, 10), (0, 0))
@@ -226,12 +226,12 @@ class TestAttributes(TestCase):
def set_strides(arr, strides):
arr.strides = strides
- self.assertRaises(ValueError, set_strides, x, (10*x.itemsize, x.itemsize))
+ assert_raises(ValueError, set_strides, x, (10*x.itemsize, x.itemsize))
# Test for offset calculations:
x = np.lib.stride_tricks.as_strided(np.arange(10, dtype=np.int8)[-1],
shape=(10,), strides=(-1,))
- self.assertRaises(ValueError, set_strides, x[::-1], -1)
+ assert_raises(ValueError, set_strides, x[::-1], -1)
a = x[::-1]
a.strides = 1
a[::2].strides = 2
@@ -265,7 +265,7 @@ class TestAttributes(TestCase):
assert_array_equal(x['b'], [-2, -2])
-class TestArrayConstruction(TestCase):
+class TestArrayConstruction(object):
def test_array(self):
d = np.ones(6)
r = np.array([d, d])
@@ -297,7 +297,7 @@ class TestArrayConstruction(TestCase):
assert_equal(r[0], [d, d + 1])
assert_equal(r[1], d + 2)
- tgt = np.ones((2, 3), dtype=np.bool)
+ tgt = np.ones((2, 3), dtype=bool)
tgt[0, 2] = False
tgt[1, 0:2] = False
r = np.array([[True, True, False], [False, False, True]])
@@ -343,7 +343,7 @@ class TestArrayConstruction(TestCase):
assert_(np.asfortranarray(d).flags.f_contiguous)
-class TestAssignment(TestCase):
+class TestAssignment(object):
def test_assignment_broadcasting(self):
a = np.arange(6).reshape(2, 3)
@@ -449,7 +449,7 @@ class TestAssignment(TestCase):
assert_equal(arr[0], tinya)
-class TestDtypedescr(TestCase):
+class TestDtypedescr(object):
def test_construction(self):
d1 = np.dtype('i4')
assert_equal(d1, np.dtype(np.int32))
@@ -457,48 +457,48 @@ class TestDtypedescr(TestCase):
assert_equal(d2, np.dtype(np.float64))
def test_byteorders(self):
- self.assertNotEqual(np.dtype('<i4'), np.dtype('>i4'))
- self.assertNotEqual(np.dtype([('a', '<i4')]), np.dtype([('a', '>i4')]))
+ assert_(np.dtype('<i4') != np.dtype('>i4'))
+ assert_(np.dtype([('a', '<i4')]) != np.dtype([('a', '>i4')]))
-class TestZeroRank(TestCase):
- def setUp(self):
+class TestZeroRank(object):
+ def setup(self):
self.d = np.array(0), np.array('x', object)
def test_ellipsis_subscript(self):
a, b = self.d
- self.assertEqual(a[...], 0)
- self.assertEqual(b[...], 'x')
- self.assertTrue(a[...].base is a) # `a[...] is a` in numpy <1.9.
- self.assertTrue(b[...].base is b) # `b[...] is b` in numpy <1.9.
+ assert_equal(a[...], 0)
+ assert_equal(b[...], 'x')
+ assert_(a[...].base is a) # `a[...] is a` in numpy <1.9.
+ assert_(b[...].base is b) # `b[...] is b` in numpy <1.9.
def test_empty_subscript(self):
a, b = self.d
- self.assertEqual(a[()], 0)
- self.assertEqual(b[()], 'x')
- self.assertTrue(type(a[()]) is a.dtype.type)
- self.assertTrue(type(b[()]) is str)
+ assert_equal(a[()], 0)
+ assert_equal(b[()], 'x')
+ assert_(type(a[()]) is a.dtype.type)
+ assert_(type(b[()]) is str)
def test_invalid_subscript(self):
a, b = self.d
- self.assertRaises(IndexError, lambda x: x[0], a)
- self.assertRaises(IndexError, lambda x: x[0], b)
- self.assertRaises(IndexError, lambda x: x[np.array([], int)], a)
- self.assertRaises(IndexError, lambda x: x[np.array([], int)], b)
+ assert_raises(IndexError, lambda x: x[0], a)
+ assert_raises(IndexError, lambda x: x[0], b)
+ assert_raises(IndexError, lambda x: x[np.array([], int)], a)
+ assert_raises(IndexError, lambda x: x[np.array([], int)], b)
def test_ellipsis_subscript_assignment(self):
a, b = self.d
a[...] = 42
- self.assertEqual(a, 42)
+ assert_equal(a, 42)
b[...] = ''
- self.assertEqual(b.item(), '')
+ assert_equal(b.item(), '')
def test_empty_subscript_assignment(self):
a, b = self.d
a[()] = 42
- self.assertEqual(a, 42)
+ assert_equal(a, 42)
b[()] = ''
- self.assertEqual(b.item(), '')
+ assert_equal(b.item(), '')
def test_invalid_subscript_assignment(self):
a, b = self.d
@@ -506,20 +506,20 @@ class TestZeroRank(TestCase):
def assign(x, i, v):
x[i] = v
- self.assertRaises(IndexError, assign, a, 0, 42)
- self.assertRaises(IndexError, assign, b, 0, '')
- self.assertRaises(ValueError, assign, a, (), '')
+ assert_raises(IndexError, assign, a, 0, 42)
+ assert_raises(IndexError, assign, b, 0, '')
+ assert_raises(ValueError, assign, a, (), '')
def test_newaxis(self):
a, b = self.d
- self.assertEqual(a[np.newaxis].shape, (1,))
- self.assertEqual(a[..., np.newaxis].shape, (1,))
- self.assertEqual(a[np.newaxis, ...].shape, (1,))
- self.assertEqual(a[..., np.newaxis].shape, (1,))
- self.assertEqual(a[np.newaxis, ..., np.newaxis].shape, (1, 1))
- self.assertEqual(a[..., np.newaxis, np.newaxis].shape, (1, 1))
- self.assertEqual(a[np.newaxis, np.newaxis, ...].shape, (1, 1))
- self.assertEqual(a[(np.newaxis,)*10].shape, (1,)*10)
+ assert_equal(a[np.newaxis].shape, (1,))
+ assert_equal(a[..., np.newaxis].shape, (1,))
+ assert_equal(a[np.newaxis, ...].shape, (1,))
+ assert_equal(a[..., np.newaxis].shape, (1,))
+ assert_equal(a[np.newaxis, ..., np.newaxis].shape, (1, 1))
+ assert_equal(a[..., np.newaxis, np.newaxis].shape, (1, 1))
+ assert_equal(a[np.newaxis, np.newaxis, ...].shape, (1, 1))
+ assert_equal(a[(np.newaxis,)*10].shape, (1,)*10)
def test_invalid_newaxis(self):
a, b = self.d
@@ -527,40 +527,40 @@ class TestZeroRank(TestCase):
def subscript(x, i):
x[i]
- self.assertRaises(IndexError, subscript, a, (np.newaxis, 0))
- self.assertRaises(IndexError, subscript, a, (np.newaxis,)*50)
+ assert_raises(IndexError, subscript, a, (np.newaxis, 0))
+ assert_raises(IndexError, subscript, a, (np.newaxis,)*50)
def test_constructor(self):
x = np.ndarray(())
x[()] = 5
- self.assertEqual(x[()], 5)
+ assert_equal(x[()], 5)
y = np.ndarray((), buffer=x)
y[()] = 6
- self.assertEqual(x[()], 6)
+ assert_equal(x[()], 6)
def test_output(self):
x = np.array(2)
- self.assertRaises(ValueError, np.add, x, [1], x)
+ assert_raises(ValueError, np.add, x, [1], x)
-class TestScalarIndexing(TestCase):
- def setUp(self):
+class TestScalarIndexing(object):
+ def setup(self):
self.d = np.array([0, 1])[0]
def test_ellipsis_subscript(self):
a = self.d
- self.assertEqual(a[...], 0)
- self.assertEqual(a[...].shape, ())
+ assert_equal(a[...], 0)
+ assert_equal(a[...].shape, ())
def test_empty_subscript(self):
a = self.d
- self.assertEqual(a[()], 0)
- self.assertEqual(a[()].shape, ())
+ assert_equal(a[()], 0)
+ assert_equal(a[()].shape, ())
def test_invalid_subscript(self):
a = self.d
- self.assertRaises(IndexError, lambda x: x[0], a)
- self.assertRaises(IndexError, lambda x: x[np.array([], int)], a)
+ assert_raises(IndexError, lambda x: x[0], a)
+ assert_raises(IndexError, lambda x: x[np.array([], int)], a)
def test_invalid_subscript_assignment(self):
a = self.d
@@ -568,18 +568,18 @@ class TestScalarIndexing(TestCase):
def assign(x, i, v):
x[i] = v
- self.assertRaises(TypeError, assign, a, 0, 42)
+ assert_raises(TypeError, assign, a, 0, 42)
def test_newaxis(self):
a = self.d
- self.assertEqual(a[np.newaxis].shape, (1,))
- self.assertEqual(a[..., np.newaxis].shape, (1,))
- self.assertEqual(a[np.newaxis, ...].shape, (1,))
- self.assertEqual(a[..., np.newaxis].shape, (1,))
- self.assertEqual(a[np.newaxis, ..., np.newaxis].shape, (1, 1))
- self.assertEqual(a[..., np.newaxis, np.newaxis].shape, (1, 1))
- self.assertEqual(a[np.newaxis, np.newaxis, ...].shape, (1, 1))
- self.assertEqual(a[(np.newaxis,)*10].shape, (1,)*10)
+ assert_equal(a[np.newaxis].shape, (1,))
+ assert_equal(a[..., np.newaxis].shape, (1,))
+ assert_equal(a[np.newaxis, ...].shape, (1,))
+ assert_equal(a[..., np.newaxis].shape, (1,))
+ assert_equal(a[np.newaxis, ..., np.newaxis].shape, (1, 1))
+ assert_equal(a[..., np.newaxis, np.newaxis].shape, (1, 1))
+ assert_equal(a[np.newaxis, np.newaxis, ...].shape, (1, 1))
+ assert_equal(a[(np.newaxis,)*10].shape, (1,)*10)
def test_invalid_newaxis(self):
a = self.d
@@ -587,8 +587,8 @@ class TestScalarIndexing(TestCase):
def subscript(x, i):
x[i]
- self.assertRaises(IndexError, subscript, a, (np.newaxis, 0))
- self.assertRaises(IndexError, subscript, a, (np.newaxis,)*50)
+ assert_raises(IndexError, subscript, a, (np.newaxis, 0))
+ assert_raises(IndexError, subscript, a, (np.newaxis,)*50)
def test_overlapping_assignment(self):
# With positive strides
@@ -639,13 +639,13 @@ class TestScalarIndexing(TestCase):
assert_equal(a, [0, 1, 0, 1, 2])
-class TestCreation(TestCase):
+class TestCreation(object):
def test_from_attribute(self):
class x(object):
def __array__(self, dtype=None):
pass
- self.assertRaises(ValueError, np.array, x())
+ assert_raises(ValueError, np.array, x())
def test_from_string(self):
types = np.typecodes['AllInteger'] + np.typecodes['Float']
@@ -759,20 +759,20 @@ class TestCreation(TestCase):
str(d)
def test_sequence_non_homogenous(self):
- assert_equal(np.array([4, 2**80]).dtype, np.object)
- assert_equal(np.array([4, 2**80, 4]).dtype, np.object)
- assert_equal(np.array([2**80, 4]).dtype, np.object)
- assert_equal(np.array([2**80] * 3).dtype, np.object)
- assert_equal(np.array([[1, 1],[1j, 1j]]).dtype, np.complex)
- assert_equal(np.array([[1j, 1j],[1, 1]]).dtype, np.complex)
- assert_equal(np.array([[1, 1, 1],[1, 1j, 1.], [1, 1, 1]]).dtype, np.complex)
+ assert_equal(np.array([4, 2**80]).dtype, object)
+ assert_equal(np.array([4, 2**80, 4]).dtype, object)
+ assert_equal(np.array([2**80, 4]).dtype, object)
+ assert_equal(np.array([2**80] * 3).dtype, object)
+ assert_equal(np.array([[1, 1],[1j, 1j]]).dtype, complex)
+ assert_equal(np.array([[1j, 1j],[1, 1]]).dtype, complex)
+ assert_equal(np.array([[1, 1, 1],[1, 1j, 1.], [1, 1, 1]]).dtype, complex)
@dec.skipif(sys.version_info[0] >= 3)
def test_sequence_long(self):
assert_equal(np.array([long(4), long(4)]).dtype, np.long)
- assert_equal(np.array([long(4), 2**80]).dtype, np.object)
- assert_equal(np.array([long(4), 2**80, long(4)]).dtype, np.object)
- assert_equal(np.array([2**80, long(4)]).dtype, np.object)
+ assert_equal(np.array([long(4), 2**80]).dtype, object)
+ assert_equal(np.array([long(4), 2**80, long(4)]).dtype, object)
+ assert_equal(np.array([2**80, long(4)]).dtype, object)
def test_non_sequence_sequence(self):
"""Should not segfault.
@@ -856,7 +856,7 @@ class TestCreation(TestCase):
shape=(max_bytes//itemsize + 1,), dtype=dtype)
-class TestStructured(TestCase):
+class TestStructured(object):
def test_subarray_field_access(self):
a = np.zeros((3, 5), dtype=[('a', ('i4', (2, 2)))])
a['a'] = np.arange(60).reshape(3, 5, 2, 2)
@@ -876,7 +876,7 @@ class TestStructured(TestCase):
# multi-dimensional field types work properly
a = np.rec.fromrecords(
[([1, 2, 3], 'a', [[1, 2], [3, 4]]), ([3, 3, 3], 'b', [[0, 0], [0, 0]])],
- dtype=[('a', ('f4', 3)), ('b', np.object), ('c', ('i4', (2, 2)))])
+ dtype=[('a', ('f4', 3)), ('b', object), ('c', ('i4', (2, 2)))])
b = a.copy()
assert_equal(a == b, [True, True])
assert_equal(a != b, [False, False])
@@ -952,16 +952,13 @@ class TestStructured(TestCase):
# Check that equality comparison works on structured arrays if
# they are 'equiv'-castable
a = np.array([(5, 42), (10, 1)], dtype=[('a', '>i4'), ('b', '<f8')])
- b = np.array([(42, 5), (1, 10)], dtype=[('b', '>f8'), ('a', '<i4')])
+ b = np.array([(5, 42), (10, 1)], dtype=[('a', '<i4'), ('b', '>f8')])
assert_(np.can_cast(a.dtype, b.dtype, casting='equiv'))
assert_equal(a == b, [True, True])
- # Check that 'equiv' casting can reorder fields and change byte
- # order
- # New in 1.12: This behavior changes in 1.13, test for dep warning
+ # Check that 'equiv' casting can change byte order
assert_(np.can_cast(a.dtype, b.dtype, casting='equiv'))
- with assert_warns(FutureWarning):
- c = a.astype(b.dtype, casting='equiv')
+ c = a.astype(b.dtype, casting='equiv')
assert_equal(a == c, [True, True])
# Check that 'safe' casting can change byte order and up-cast
@@ -1096,20 +1093,68 @@ class TestStructured(TestCase):
b = a[0]
assert_(b.base is a)
-
-class TestBool(TestCase):
+ def test_assignment(self):
+ def testassign(arr, v):
+ c = arr.copy()
+ c[0] = v # assign using setitem
+ c[1:] = v # assign using "dtype_transfer" code paths
+ return c
+
+ dt = np.dtype([('foo', 'i8'), ('bar', 'i8')])
+ arr = np.ones(2, dt)
+ v1 = np.array([(2,3)], dtype=[('foo', 'i8'), ('bar', 'i8')])
+ v2 = np.array([(2,3)], dtype=[('bar', 'i8'), ('foo', 'i8')])
+ v3 = np.array([(2,3)], dtype=[('bar', 'i8'), ('baz', 'i8')])
+ v4 = np.array([(2,)], dtype=[('bar', 'i8')])
+ v5 = np.array([(2,3)], dtype=[('foo', 'f8'), ('bar', 'f8')])
+ w = arr.view({'names': ['bar'], 'formats': ['i8'], 'offsets': [8]})
+
+ ans = np.array([(2,3),(2,3)], dtype=dt)
+ assert_equal(testassign(arr, v1), ans)
+ assert_equal(testassign(arr, v2), ans)
+ assert_equal(testassign(arr, v3), ans)
+ assert_raises(ValueError, lambda: testassign(arr, v4))
+ assert_equal(testassign(arr, v5), ans)
+ w[:] = 4
+ assert_equal(arr, np.array([(1,4),(1,4)], dtype=dt))
+
+ # test field-reordering, assignment by position, and self-assignment
+ a = np.array([(1,2,3)],
+ dtype=[('foo', 'i8'), ('bar', 'i8'), ('baz', 'f4')])
+ a[['foo', 'bar']] = a[['bar', 'foo']]
+ assert_equal(a[0].item(), (2,1,3))
+
+ # test that this works even for 'simple_unaligned' structs
+ # (ie, that PyArray_EquivTypes cares about field order too)
+ a = np.array([(1,2)], dtype=[('a', 'i4'), ('b', 'i4')])
+ a[['a', 'b']] = a[['b', 'a']]
+ assert_equal(a[0].item(), (2,1))
+
+ def test_structuredscalar_indexing(self):
+ # test gh-7262
+ x = np.empty(shape=1, dtype="(2)3S,(2)3U")
+ assert_equal(x[["f0","f1"]][0], x[0][["f0","f1"]])
+ assert_equal(x[0], x[0][()])
+
+ def test_multiindex_titles(self):
+ a = np.zeros(4, dtype=[(('a', 'b'), 'i'), ('c', 'i'), ('d', 'i')])
+ assert_raises(KeyError, lambda : a[['a','c']])
+ assert_raises(KeyError, lambda : a[['b','b']])
+ a[['b','c']] # no exception
+
+class TestBool(object):
def test_test_interning(self):
a0 = np.bool_(0)
b0 = np.bool_(False)
- self.assertTrue(a0 is b0)
+ assert_(a0 is b0)
a1 = np.bool_(1)
b1 = np.bool_(True)
- self.assertTrue(a1 is b1)
- self.assertTrue(np.array([True])[0] is a1)
- self.assertTrue(np.array(True)[()] is a1)
+ assert_(a1 is b1)
+ assert_(np.array([True])[0] is a1)
+ assert_(np.array(True)[()] is a1)
def test_sum(self):
- d = np.ones(101, dtype=np.bool)
+ d = np.ones(101, dtype=bool)
assert_equal(d.sum(), d.size)
assert_equal(d[::2].sum(), d[::2].size)
assert_equal(d[::-2].sum(), d[::-2].size)
@@ -1123,16 +1168,16 @@ class TestBool(TestCase):
powers = [2 ** i for i in range(length)]
for i in range(2**power):
l = [(i & x) != 0 for x in powers]
- a = np.array(l, dtype=np.bool)
+ a = np.array(l, dtype=bool)
c = builtins.sum(l)
- self.assertEqual(np.count_nonzero(a), c)
+ assert_equal(np.count_nonzero(a), c)
av = a.view(np.uint8)
av *= 3
- self.assertEqual(np.count_nonzero(a), c)
+ assert_equal(np.count_nonzero(a), c)
av *= 4
- self.assertEqual(np.count_nonzero(a), c)
+ assert_equal(np.count_nonzero(a), c)
av[av != 0] = 0xFF
- self.assertEqual(np.count_nonzero(a), c)
+ assert_equal(np.count_nonzero(a), c)
def test_count_nonzero(self):
# check all 12 bit combinations in a length 17 array
@@ -1148,15 +1193,15 @@ class TestBool(TestCase):
def test_count_nonzero_unaligned(self):
# prevent mistakes as e.g. gh-4060
for o in range(7):
- a = np.zeros((18,), dtype=np.bool)[o+1:]
+ a = np.zeros((18,), dtype=bool)[o+1:]
a[:o] = True
- self.assertEqual(np.count_nonzero(a), builtins.sum(a.tolist()))
- a = np.ones((18,), dtype=np.bool)[o+1:]
+ assert_equal(np.count_nonzero(a), builtins.sum(a.tolist()))
+ a = np.ones((18,), dtype=bool)[o+1:]
a[:o] = False
- self.assertEqual(np.count_nonzero(a), builtins.sum(a.tolist()))
+ assert_equal(np.count_nonzero(a), builtins.sum(a.tolist()))
-class TestMethods(TestCase):
+class TestMethods(object):
def test_compress(self):
tgt = [[5, 6, 7, 8, 9]]
arr = np.arange(10).reshape(2, 5)
@@ -1201,8 +1246,8 @@ class TestMethods(TestCase):
a = np.array(ba, ctype)
a2 = np.array(ba2, ctype)
if ctype in ['1', 'b']:
- self.assertRaises(ArithmeticError, a.prod)
- self.assertRaises(ArithmeticError, a2.prod, axis=1)
+ assert_raises(ArithmeticError, a.prod)
+ assert_raises(ArithmeticError, a2.prod, axis=1)
else:
assert_equal(a.prod(axis=0), 26400)
assert_array_equal(a2.prod(axis=0),
@@ -1283,9 +1328,9 @@ class TestMethods(TestCase):
def test_transpose(self):
a = np.array([[1, 2], [3, 4]])
assert_equal(a.transpose(), [[1, 3], [2, 4]])
- self.assertRaises(ValueError, lambda: a.transpose(0))
- self.assertRaises(ValueError, lambda: a.transpose(0, 0))
- self.assertRaises(ValueError, lambda: a.transpose(0, 1, 2))
+ assert_raises(ValueError, lambda: a.transpose(0))
+ assert_raises(ValueError, lambda: a.transpose(0, 0))
+ assert_raises(ValueError, lambda: a.transpose(0, 1, 2))
def test_sort(self):
# test ordering for floats and complex containing nans. It is only
@@ -1381,7 +1426,7 @@ class TestMethods(TestCase):
assert_equal(c, a, msg)
# test object array sorts.
- a = np.empty((101,), dtype=np.object)
+ a = np.empty((101,), dtype=object)
a[:] = list(range(101))
b = a[::-1]
for kind in ['q', 'h', 'm']:
@@ -1476,6 +1521,21 @@ class TestMethods(TestCase):
arr = np.empty(1000, dt)
arr[::-1].sort()
+ def test_sort_raises(self):
+ #gh-9404
+ arr = np.array([0, datetime.now(), 1], dtype=object)
+ for kind in ['q', 'm', 'h']:
+ assert_raises(TypeError, arr.sort, kind=kind)
+ #gh-3879
+ class Raiser(object):
+ def raises_anything(*args, **kwargs):
+ raise TypeError("SOMETHING ERRORED")
+ __eq__ = __ne__ = __lt__ = __gt__ = __ge__ = __le__ = raises_anything
+ arr = np.array([[Raiser(), n] for n in range(10)]).reshape(-1)
+ np.random.shuffle(arr)
+ for kind in ['q', 'm', 'h']:
+ assert_raises(TypeError, arr.sort, kind=kind)
+
def test_sort_degraded(self):
# test degraded dataset would take minutes to run with normal qsort
d = np.arange(1000000)
@@ -1538,6 +1598,9 @@ class TestMethods(TestCase):
assert_equal(r.word, np.array(['my', 'first', 'name']))
assert_equal(r.number, np.array([3.1, 4.5, 6.2]))
+ assert_raises_regex(ValueError, 'duplicate',
+ lambda: r.sort(order=['id', 'id']))
+
if sys.byteorder == 'little':
strtype = '>i2'
else:
@@ -1609,7 +1672,7 @@ class TestMethods(TestCase):
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test object array argsorts.
- a = np.empty((101,), dtype=np.object)
+ a = np.empty((101,), dtype=object)
a[:] = list(range(101))
b = a[::-1]
r = np.arange(101)
@@ -1676,7 +1739,7 @@ class TestMethods(TestCase):
a = np.zeros(100)
assert_equal(a.argsort(kind='m'), r)
# complex
- a = np.zeros(100, dtype=np.complex)
+ a = np.zeros(100, dtype=complex)
assert_equal(a.argsort(kind='m'), r)
# string
a = np.array(['aaaaaaaaa' for i in range(100)])
@@ -2026,8 +2089,8 @@ class TestMethods(TestCase):
# sorted
d = np.arange(49)
- self.assertEqual(np.partition(d, 5, kind=k)[5], 5)
- self.assertEqual(np.partition(d, 15, kind=k)[15], 15)
+ assert_equal(np.partition(d, 5, kind=k)[5], 5)
+ assert_equal(np.partition(d, 15, kind=k)[15], 15)
assert_array_equal(d[np.argpartition(d, 5, kind=k)],
np.partition(d, 5, kind=k))
assert_array_equal(d[np.argpartition(d, 15, kind=k)],
@@ -2035,8 +2098,8 @@ class TestMethods(TestCase):
# rsorted
d = np.arange(47)[::-1]
- self.assertEqual(np.partition(d, 6, kind=k)[6], 6)
- self.assertEqual(np.partition(d, 16, kind=k)[16], 16)
+ assert_equal(np.partition(d, 6, kind=k)[6], 6)
+ assert_equal(np.partition(d, 16, kind=k)[16], 16)
assert_array_equal(d[np.argpartition(d, 6, kind=k)],
np.partition(d, 6, kind=k))
assert_array_equal(d[np.argpartition(d, 16, kind=k)],
@@ -2076,7 +2139,7 @@ class TestMethods(TestCase):
tgt = np.sort(np.arange(47) % 7)
np.random.shuffle(d)
for i in range(d.size):
- self.assertEqual(np.partition(d, i, kind=k)[i], tgt[i])
+ assert_equal(np.partition(d, i, kind=k)[i], tgt[i])
assert_array_equal(d[np.argpartition(d, 6, kind=k)],
np.partition(d, 6, kind=k))
assert_array_equal(d[np.argpartition(d, 16, kind=k)],
@@ -2128,7 +2191,7 @@ class TestMethods(TestCase):
for s in (9, 16)]
for dt, s in td:
aae = assert_array_equal
- at = self.assertTrue
+ at = assert_
d = np.arange(s, dtype=dt)
np.random.shuffle(d)
@@ -2137,7 +2200,7 @@ class TestMethods(TestCase):
d0 = np.transpose(d1)
for i in range(d.size):
p = np.partition(d, i, kind=k)
- self.assertEqual(p[i], i)
+ assert_equal(p[i], i)
# all before are smaller
assert_array_less(p[:i], p[i])
# all after are larger
@@ -2479,6 +2542,17 @@ class TestMethods(TestCase):
if HAS_REFCOUNT:
assert_(sys.getrefcount(a) < 50)
+ def test_size_zero_memleak(self):
+ # Regression test for issue 9615
+ # Exercises a special-case code path for dot products of length
+ # zero in cblasfuncs (making it is specific to floating dtypes).
+ a = np.array([], dtype=np.float64)
+ x = np.array(2.0)
+ for _ in range(100):
+ np.dot(a, a, out=x)
+ if HAS_REFCOUNT:
+ assert_(sys.getrefcount(x) < 50)
+
def test_trace(self):
a = np.arange(12).reshape((3, 4))
assert_equal(a.trace(), 15)
@@ -2504,7 +2578,7 @@ class TestMethods(TestCase):
b = np.arange(8).reshape((2, 2, 2)).view(MyArray)
t = b.trace()
- assert isinstance(t, MyArray)
+ assert_(isinstance(t, MyArray))
def test_put(self):
icodes = np.typecodes['AllInteger']
@@ -2795,76 +2869,6 @@ class TestBinop(object):
assert_equal(a, 5)
assert_equal(b, 3)
- def test_extension_incref_elide(self):
- # test extension (e.g. cython) calling PyNumber_* slots without
- # increasing the reference counts
- #
- # def incref_elide(a):
- # d = input.copy() # refcount 1
- # return d, d + d # PyNumber_Add without increasing refcount
- from numpy.core.multiarray_tests import incref_elide
- d = np.ones(100000)
- orig, res = incref_elide(d)
- d + d
- # the return original should not be changed to an inplace operation
- assert_array_equal(orig, d)
- assert_array_equal(res, d + d)
-
- def test_extension_incref_elide_stack(self):
- # scanning if the refcount == 1 object is on the python stack to check
- # that we are called directly from python is flawed as object may still
- # be above the stack pointer and we have no access to the top of it
- #
- # def incref_elide_l(d):
- # return l[4] + l[4] # PyNumber_Add without increasing refcount
- from numpy.core.multiarray_tests import incref_elide_l
- # padding with 1 makes sure the object on the stack is not overwriten
- l = [1, 1, 1, 1, np.ones(100000)]
- res = incref_elide_l(l)
- # the return original should not be changed to an inplace operation
- assert_array_equal(l[4], np.ones(100000))
- assert_array_equal(res, l[4] + l[4])
-
- def test_temporary_with_cast(self):
- # check that we don't elide into a temporary which would need casting
- d = np.ones(200000, dtype=np.int64)
- assert_equal(((d + d) + 2**222).dtype, np.dtype('O'))
-
- r = ((d + d) / 2)
- assert_equal(r.dtype, np.dtype('f8'))
-
- r = np.true_divide((d + d), 2)
- assert_equal(r.dtype, np.dtype('f8'))
-
- r = ((d + d) / 2.)
- assert_equal(r.dtype, np.dtype('f8'))
-
- r = ((d + d) // 2)
- assert_equal(r.dtype, np.dtype(np.int64))
-
- # commutative elision into the astype result
- f = np.ones(100000, dtype=np.float32)
- assert_equal(((f + f) + f.astype(np.float64)).dtype, np.dtype('f8'))
-
- # no elision into f + f
- d = f.astype(np.float64)
- assert_equal(((f + f) + d).dtype, np.dtype('f8'))
-
- def test_elide_broadcast(self):
- # test no elision on broadcast to higher dimension
- # only triggers elision code path in debug mode as triggering it in
- # normal mode needs 256kb large matching dimension, so a lot of memory
- d = np.ones((2000, 1), dtype=int)
- b = np.ones((2000), dtype=np.bool)
- r = (1 - d) + b
- assert_equal(r, 1)
- assert_equal(r.shape, (2000, 2000))
-
- def test_elide_scalar(self):
- # check inplace op does not create ndarray from scalars
- a = np.bool_()
- assert_(type(~(a & a)) is np.bool_)
-
# ndarray.__rop__ always calls ufunc
# ndarray.__iop__ always calls ufunc
# ndarray.__op__, __rop__:
@@ -3107,7 +3111,7 @@ class TestBinop(object):
warnings.filterwarnings('always', '', DeprecationWarning)
assert_equal(np.modf(dummy, out=a), (0,))
assert_(w[0].category is DeprecationWarning)
- assert_raises(TypeError, np.modf, dummy, out=(a,))
+ assert_raises(ValueError, np.modf, dummy, out=(a,))
# 2 inputs, 1 output
assert_equal(np.add(a, dummy), 0)
@@ -3139,8 +3143,131 @@ class TestBinop(object):
assert_equal(A[0], 30)
assert_(isinstance(A, OutClass))
+ def test_pow_override_with_errors(self):
+ # regression test for gh-9112
+ class PowerOnly(np.ndarray):
+ def __array_ufunc__(self, ufunc, method, *inputs, **kw):
+ if ufunc is not np.power:
+ raise NotImplementedError
+ return "POWER!"
+ # explicit cast to float, to ensure the fast power path is taken.
+ a = np.array(5., dtype=np.float64).view(PowerOnly)
+ assert_equal(a ** 2.5, "POWER!")
+ with assert_raises(NotImplementedError):
+ a ** 0.5
+ with assert_raises(NotImplementedError):
+ a ** 0
+ with assert_raises(NotImplementedError):
+ a ** 1
+ with assert_raises(NotImplementedError):
+ a ** -1
+ with assert_raises(NotImplementedError):
+ a ** 2
+
+
+class TestTemporaryElide(object):
+ # elision is only triggered on relatively large arrays
+
+ def test_extension_incref_elide(self):
+ # test extension (e.g. cython) calling PyNumber_* slots without
+ # increasing the reference counts
+ #
+ # def incref_elide(a):
+ # d = input.copy() # refcount 1
+ # return d, d + d # PyNumber_Add without increasing refcount
+ from numpy.core.multiarray_tests import incref_elide
+ d = np.ones(100000)
+ orig, res = incref_elide(d)
+ d + d
+ # the return original should not be changed to an inplace operation
+ assert_array_equal(orig, d)
+ assert_array_equal(res, d + d)
+
+ def test_extension_incref_elide_stack(self):
+ # scanning if the refcount == 1 object is on the python stack to check
+ # that we are called directly from python is flawed as object may still
+ # be above the stack pointer and we have no access to the top of it
+ #
+ # def incref_elide_l(d):
+ # return l[4] + l[4] # PyNumber_Add without increasing refcount
+ from numpy.core.multiarray_tests import incref_elide_l
+ # padding with 1 makes sure the object on the stack is not overwriten
+ l = [1, 1, 1, 1, np.ones(100000)]
+ res = incref_elide_l(l)
+ # the return original should not be changed to an inplace operation
+ assert_array_equal(l[4], np.ones(100000))
+ assert_array_equal(res, l[4] + l[4])
+
+ def test_temporary_with_cast(self):
+ # check that we don't elide into a temporary which would need casting
+ d = np.ones(200000, dtype=np.int64)
+ assert_equal(((d + d) + 2**222).dtype, np.dtype('O'))
+
+ r = ((d + d) / 2)
+ assert_equal(r.dtype, np.dtype('f8'))
+
+ r = np.true_divide((d + d), 2)
+ assert_equal(r.dtype, np.dtype('f8'))
+
+ r = ((d + d) / 2.)
+ assert_equal(r.dtype, np.dtype('f8'))
+
+ r = ((d + d) // 2)
+ assert_equal(r.dtype, np.dtype(np.int64))
+
+ # commutative elision into the astype result
+ f = np.ones(100000, dtype=np.float32)
+ assert_equal(((f + f) + f.astype(np.float64)).dtype, np.dtype('f8'))
+
+ # no elision into lower type
+ d = f.astype(np.float64)
+ assert_equal(((f + f) + d).dtype, d.dtype)
+ l = np.ones(100000, dtype=np.longdouble)
+ assert_equal(((d + d) + l).dtype, l.dtype)
-class TestCAPI(TestCase):
+ # test unary abs with different output dtype
+ for dt in (np.complex64, np.complex128, np.clongdouble):
+ c = np.ones(100000, dtype=dt)
+ r = abs(c * 2.0)
+ assert_equal(r.dtype, np.dtype('f%d' % (c.itemsize // 2)))
+
+ def test_elide_broadcast(self):
+ # test no elision on broadcast to higher dimension
+ # only triggers elision code path in debug mode as triggering it in
+ # normal mode needs 256kb large matching dimension, so a lot of memory
+ d = np.ones((2000, 1), dtype=int)
+ b = np.ones((2000), dtype=bool)
+ r = (1 - d) + b
+ assert_equal(r, 1)
+ assert_equal(r.shape, (2000, 2000))
+
+ def test_elide_scalar(self):
+ # check inplace op does not create ndarray from scalars
+ a = np.bool_()
+ assert_(type(~(a & a)) is np.bool_)
+
+ def test_elide_scalar_readonly(self):
+ # The imaginary part of a real array is readonly. This needs to go
+ # through fast_scalar_power which is only called for powers of
+ # +1, -1, 0, 0.5, and 2, so use 2. Also need valid refcount for
+ # elision which can be gotten for the imaginary part of a real
+ # array. Should not error.
+ a = np.empty(100000, dtype=np.float64)
+ a.imag ** 2
+
+ def test_elide_readonly(self):
+ # don't try to elide readonly temporaries
+ r = np.asarray(np.broadcast_to(np.zeros(1), 100000).flat) * 0.0
+ assert_equal(r, 0)
+
+ def test_elide_updateifcopy(self):
+ a = np.ones(2**20)[::2]
+ b = a.flat.__array__() + 1
+ del b
+ assert_equal(a, 1)
+
+
+class TestCAPI(object):
def test_IsPythonScalar(self):
from numpy.core.multiarray_tests import IsPythonScalar
assert_(IsPythonScalar(b'foobar'))
@@ -3150,16 +3277,16 @@ class TestCAPI(TestCase):
assert_(IsPythonScalar("a"))
-class TestSubscripting(TestCase):
+class TestSubscripting(object):
def test_test_zero_rank(self):
x = np.array([1, 2, 3])
- self.assertTrue(isinstance(x[0], np.int_))
+ assert_(isinstance(x[0], np.int_))
if sys.version_info[0] < 3:
- self.assertTrue(isinstance(x[0], int))
- self.assertTrue(type(x[0, ...]) is np.ndarray)
+ assert_(isinstance(x[0], int))
+ assert_(type(x[0, ...]) is np.ndarray)
-class TestPickling(TestCase):
+class TestPickling(object):
def test_roundtrip(self):
import pickle
carray = np.array([[2, 9], [7, 0], [3, 8]])
@@ -3225,7 +3352,7 @@ class TestPickling(TestCase):
assert_equal(a, p)
-class TestFancyIndexing(TestCase):
+class TestFancyIndexing(object):
def test_list(self):
x = np.ones((1, 1))
x[:, [0]] = 2.0
@@ -3279,7 +3406,7 @@ class TestFancyIndexing(TestCase):
assert_array_equal(x, np.array([[1, 10, 3, 4], [5, 6, 7, 8]]))
-class TestStringCompare(TestCase):
+class TestStringCompare(object):
def test_string(self):
g1 = np.array(["This", "is", "example"])
g2 = np.array(["This", "was", "example"])
@@ -3311,7 +3438,7 @@ class TestStringCompare(TestCase):
assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0, 1, 2]])
-class TestArgmax(TestCase):
+class TestArgmax(object):
nan_arr = [
([0, 1, 2, 3, np.nan], 4),
@@ -3387,8 +3514,13 @@ class TestArgmax(TestCase):
def test_combinations(self):
for arr, pos in self.nan_arr:
+ with suppress_warnings() as sup:
+ sup.filter(RuntimeWarning,
+ "invalid value encountered in reduce")
+ max_val = np.max(arr)
+
assert_equal(np.argmax(arr), pos, err_msg="%r" % arr)
- assert_equal(arr[np.argmax(arr)], np.max(arr), err_msg="%r" % arr)
+ assert_equal(arr[np.argmax(arr)], max_val, err_msg="%r" % arr)
def test_output_shape(self):
# see also gh-616
@@ -3440,7 +3572,7 @@ class TestArgmax(TestCase):
assert_equal(a.argmax(), 1)
-class TestArgmin(TestCase):
+class TestArgmin(object):
nan_arr = [
([0, 1, 2, 3, np.nan], 4),
@@ -3516,8 +3648,13 @@ class TestArgmin(TestCase):
def test_combinations(self):
for arr, pos in self.nan_arr:
+ with suppress_warnings() as sup:
+ sup.filter(RuntimeWarning,
+ "invalid value encountered in reduce")
+ min_val = np.min(arr)
+
assert_equal(np.argmin(arr), pos, err_msg="%r" % arr)
- assert_equal(arr[np.argmin(arr)], np.min(arr), err_msg="%r" % arr)
+ assert_equal(arr[np.argmin(arr)], min_val, err_msg="%r" % arr)
def test_minimum_signed_integers(self):
@@ -3583,7 +3720,7 @@ class TestArgmin(TestCase):
assert_equal(a.argmin(), 1)
-class TestMinMax(TestCase):
+class TestMinMax(object):
def test_scalar(self):
assert_raises(np.AxisError, np.amax, 1, 1)
@@ -3613,14 +3750,14 @@ class TestMinMax(TestCase):
assert_equal(np.amax(a), a[0])
-class TestNewaxis(TestCase):
+class TestNewaxis(object):
def test_basic(self):
sk = np.array([0, -0.1, 0.1])
res = 250*sk[:, np.newaxis]
assert_almost_equal(res.ravel(), 250*sk)
-class TestClip(TestCase):
+class TestClip(object):
def _check_range(self, x, cmin, cmax):
assert_(np.all(x >= cmin))
assert_(np.all(x <= cmax))
@@ -3694,7 +3831,7 @@ class TestClip(TestCase):
assert_array_equal(result, expected)
-class TestCompress(TestCase):
+class TestCompress(object):
def test_axis(self):
tgt = [[5, 6, 7, 8, 9]]
arr = np.arange(10).reshape(2, 5)
@@ -3812,7 +3949,7 @@ class TestTake(object):
assert_(rec1['x'] == 5.0 and rec1['y'] == 4.0)
-class TestLexsort(TestCase):
+class TestLexsort(object):
def test_basic(self):
a = [1, 2, 1, 3, 1, 5]
b = [0, 4, 5, 6, 2, 3]
@@ -3859,19 +3996,19 @@ class TestLexsort(TestCase):
x = np.linspace(0., 1., 42*3).reshape(42, 3)
assert_raises(np.AxisError, np.lexsort, x, axis=2)
-class TestIO(TestCase):
+class TestIO(object):
"""Test tofile, fromfile, tobytes, and fromstring"""
- def setUp(self):
+ def setup(self):
shape = (2, 4, 3)
rand = np.random.random
- self.x = rand(shape) + rand(shape).astype(np.complex)*1j
+ self.x = rand(shape) + rand(shape).astype(complex)*1j
self.x[0,:, 1] = [np.nan, np.inf, -np.inf, np.nan]
self.dtype = self.x.dtype
self.tempdir = tempfile.mkdtemp()
self.filename = tempfile.mktemp(dir=self.tempdir)
- def tearDown(self):
+ def teardown(self):
shutil.rmtree(self.tempdir)
def test_nofile(self):
@@ -3960,7 +4097,7 @@ class TestIO(TestCase):
with io.open(self.filename, 'rb', buffering=0) as f:
f.seek = fail
f.tell = fail
- self.assertRaises(IOError, np.fromfile, f, dtype=self.dtype)
+ assert_raises(IOError, np.fromfile, f, dtype=self.dtype)
def test_io_open_unbuffered_fromfile(self):
# gh-6632
@@ -4181,7 +4318,7 @@ class TestFromBuffer(object):
def test_ip_basic(self):
for byteorder in ['<', '>']:
- for dtype in [float, int, np.complex]:
+ for dtype in [float, int, complex]:
dt = np.dtype(dtype).newbyteorder(byteorder)
x = (np.random.random((4, 7))*5).astype(dt)
buf = x.tobytes()
@@ -4191,8 +4328,8 @@ class TestFromBuffer(object):
yield self.tst_basic, b'', np.array([]), {}
-class TestFlat(TestCase):
- def setUp(self):
+class TestFlat(object):
+ def setup(self):
a0 = np.arange(20.0)
a = a0.reshape(4, 5)
a0.shape = (4, 5)
@@ -4228,17 +4365,19 @@ class TestFlat(TestCase):
assert_(c.flags.writeable is False)
assert_(d.flags.writeable is False)
+ # for 1.14 all are set to non-writeable on the way to replacing the
+ # UPDATEIFCOPY array returned for non-contiguous arrays.
assert_(e.flags.writeable is True)
- assert_(f.flags.writeable is True)
+ assert_(f.flags.writeable is False)
assert_(c.flags.updateifcopy is False)
assert_(d.flags.updateifcopy is False)
assert_(e.flags.updateifcopy is False)
- assert_(f.flags.updateifcopy is True)
- assert_(f.base is self.b0)
+ # UPDATEIFCOPY is removed.
+ assert_(f.flags.updateifcopy is False)
-class TestResize(TestCase):
+class TestResize(object):
def test_basic(self):
x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
if IS_PYPY:
@@ -4252,7 +4391,7 @@ class TestResize(TestCase):
def test_check_reference(self):
x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
y = x
- self.assertRaises(ValueError, x.resize, (5, 1))
+ assert_raises(ValueError, x.resize, (5, 1))
del y # avoid pyflakes unused variable warning.
def test_int_shape(self):
@@ -4270,11 +4409,23 @@ class TestResize(TestCase):
x.resize()
assert_array_equal(x, np.eye(3))
+ def test_0d_shape(self):
+ # to it multiple times to test it does not break alloc cache gh-9216
+ for i in range(10):
+ x = np.empty((1,))
+ x.resize(())
+ assert_equal(x.shape, ())
+ assert_equal(x.size, 1)
+ x = np.empty(())
+ x.resize((1,))
+ assert_equal(x.shape, (1,))
+ assert_equal(x.size, 1)
+
def test_invalid_arguments(self):
- self.assertRaises(TypeError, np.eye(3).resize, 'hi')
- self.assertRaises(ValueError, np.eye(3).resize, -1)
- self.assertRaises(TypeError, np.eye(3).resize, order=1)
- self.assertRaises(TypeError, np.eye(3).resize, refcheck='hi')
+ assert_raises(TypeError, np.eye(3).resize, 'hi')
+ assert_raises(ValueError, np.eye(3).resize, -1)
+ assert_raises(TypeError, np.eye(3).resize, order=1)
+ assert_raises(TypeError, np.eye(3).resize, refcheck='hi')
def test_freeform_shape(self):
x = np.eye(3)
@@ -4305,7 +4456,7 @@ class TestResize(TestCase):
assert_array_equal(a['k'][:-5], 1)
-class TestRecord(TestCase):
+class TestRecord(object):
def test_field_rename(self):
dt = np.dtype([('f', float), ('i', int)])
dt.names = ['p', 'q']
@@ -4401,23 +4552,11 @@ class TestRecord(TestCase):
# multiple subfields
fn2 = func('f2')
b[fn2] = 3
- with suppress_warnings() as sup:
- sup.filter(FutureWarning,
- "Assignment between structured arrays.*")
- sup.filter(FutureWarning,
- "Numpy has detected that you .*")
-
- assert_equal(b[['f1', 'f2']][0].tolist(), (2, 3))
- assert_equal(b[['f2', 'f1']][0].tolist(), (3, 2))
- assert_equal(b[['f1', 'f3']][0].tolist(), (2, (1,)))
- # view of subfield view/copy
- assert_equal(b[['f1', 'f2']][0].view(('i4', 2)).tolist(),
- (2, 3))
- assert_equal(b[['f2', 'f1']][0].view(('i4', 2)).tolist(),
- (3, 2))
- view_dtype = [('f1', 'i4'), ('f3', [('', 'i4')])]
- assert_equal(b[['f1', 'f3']][0].view(view_dtype).tolist(),
- (2, (1,)))
+
+ assert_equal(b[['f1', 'f2']][0].tolist(), (2, 3))
+ assert_equal(b[['f2', 'f1']][0].tolist(), (3, 2))
+ assert_equal(b[['f1', 'f3']][0].tolist(), (2, (1,)))
+
# non-ascii unicode field indexing is well behaved
if not is_py3:
raise SkipTest('non ascii unicode field indexing skipped; '
@@ -4426,54 +4565,6 @@ class TestRecord(TestCase):
assert_raises(ValueError, a.__setitem__, u'\u03e0', 1)
assert_raises(ValueError, a.__getitem__, u'\u03e0')
- def test_field_names_deprecation(self):
-
- def collect_warnings(f, *args, **kwargs):
- with warnings.catch_warnings(record=True) as log:
- warnings.simplefilter("always")
- f(*args, **kwargs)
- return [w.category for w in log]
-
- a = np.zeros((1,), dtype=[('f1', 'i4'),
- ('f2', 'i4'),
- ('f3', [('sf1', 'i4')])])
- a['f1'][0] = 1
- a['f2'][0] = 2
- a['f3'][0] = (3,)
- b = np.zeros((1,), dtype=[('f1', 'i4'),
- ('f2', 'i4'),
- ('f3', [('sf1', 'i4')])])
- b['f1'][0] = 1
- b['f2'][0] = 2
- b['f3'][0] = (3,)
-
- # All the different functions raise a warning, but not an error
- assert_equal(collect_warnings(a[['f1', 'f2']].__setitem__, 0, (10, 20)),
- [FutureWarning])
- # For <=1.12 a is not modified, but it will be in 1.13
- assert_equal(a, b)
-
- # Views also warn
- subset = a[['f1', 'f2']]
- subset_view = subset.view()
- assert_equal(collect_warnings(subset_view['f1'].__setitem__, 0, 10),
- [FutureWarning])
- # But the write goes through:
- assert_equal(subset['f1'][0], 10)
- # Only one warning per multiple field indexing, though (even if there
- # are multiple views involved):
- assert_equal(collect_warnings(subset['f1'].__setitem__, 0, 10), [])
-
- # make sure views of a multi-field index warn too
- c = np.zeros(3, dtype='i8,i8,i8')
- assert_equal(collect_warnings(c[['f0', 'f2']].view, 'i8,i8'),
- [FutureWarning])
-
- # make sure assignment using a different dtype warns
- a = np.zeros(2, dtype=[('a', 'i4'), ('b', 'i4')])
- b = np.zeros(2, dtype=[('b', 'i4'), ('a', 'i4')])
- assert_equal(collect_warnings(a.__setitem__, (), b), [FutureWarning])
-
def test_record_hash(self):
a = np.array([(1, 2), (1, 2)], dtype='i1,i2')
a.flags.writeable = False
@@ -4481,14 +4572,14 @@ class TestRecord(TestCase):
b.flags.writeable = False
c = np.array([(1, 2), (3, 4)], dtype='i1,i2')
c.flags.writeable = False
- self.assertTrue(hash(a[0]) == hash(a[1]))
- self.assertTrue(hash(a[0]) == hash(b[0]))
- self.assertTrue(hash(a[0]) != hash(b[1]))
- self.assertTrue(hash(c[0]) == hash(a[0]) and c[0] == a[0])
+ assert_(hash(a[0]) == hash(a[1]))
+ assert_(hash(a[0]) == hash(b[0]))
+ assert_(hash(a[0]) != hash(b[1]))
+ assert_(hash(c[0]) == hash(a[0]) and c[0] == a[0])
def test_record_no_hash(self):
a = np.array([(1, 2), (1, 2)], dtype='i1,i2')
- self.assertRaises(TypeError, hash, a[0])
+ assert_raises(TypeError, hash, a[0])
def test_empty_structure_creation(self):
# make sure these do not raise errors (gh-5631)
@@ -4497,7 +4588,7 @@ class TestRecord(TestCase):
np.array([(), (), (), (), ()], dtype={'names': [], 'formats': [],
'offsets': [], 'itemsize': 12})
-class TestView(TestCase):
+class TestView(object):
def test_basic(self):
x = np.array([(1, 2, 3, 4), (5, 6, 7, 8)],
dtype=[('r', np.int8), ('g', np.int8),
@@ -4522,11 +4613,11 @@ def _std(a, **args):
return a.std(**args)
-class TestStats(TestCase):
+class TestStats(object):
funcs = [_mean, _var, _std]
- def setUp(self):
+ def setup(self):
np.random.seed(range(3))
self.rmat = np.random.random((4, 5))
self.cmat = self.rmat + 1j * self.rmat
@@ -4692,7 +4783,7 @@ class TestStats(TestCase):
def test_mean_float16(self):
# This fail if the sum inside mean is done in float16 instead
# of float32.
- assert _mean(np.ones(100000, dtype='float16')) == 1
+ assert_(_mean(np.ones(100000, dtype='float16')) == 1)
def test_var_values(self):
for mat in [self.rmat, self.cmat, self.omat]:
@@ -4729,7 +4820,7 @@ class TestStats(TestCase):
res = dat.var(1)
assert_(res.info == dat.info)
-class TestVdot(TestCase):
+class TestVdot(object):
def test_basic(self):
dt_numeric = np.typecodes['AllFloat'] + np.typecodes['AllInteger']
dt_complex = np.typecodes['Complex']
@@ -4751,7 +4842,7 @@ class TestVdot(TestCase):
assert_equal(np.vdot(b, b), 3)
# test boolean
- b = np.eye(3, dtype=np.bool)
+ b = np.eye(3, dtype=bool)
res = np.vdot(b, b)
assert_(np.isscalar(res))
assert_equal(np.vdot(b, b), True)
@@ -4789,8 +4880,8 @@ class TestVdot(TestCase):
np.vdot(a.flatten(), b.flatten()))
-class TestDot(TestCase):
- def setUp(self):
+class TestDot(object):
+ def setup(self):
np.random.seed(128)
self.A = np.random.rand(4, 2)
self.b1 = np.random.rand(2, 1)
@@ -5067,7 +5158,7 @@ class TestDot(TestCase):
assert_dot_close(A_f_12, X_f_2, desired)
-class MatmulCommon():
+class MatmulCommon(object):
"""Common tests for '@' operator and numpy.matmul.
Do not derive from TestCase to avoid nose running it.
@@ -5262,23 +5353,23 @@ class MatmulCommon():
assert_equal(res, tgt12_21)
-class TestMatmul(MatmulCommon, TestCase):
+class TestMatmul(MatmulCommon):
matmul = np.matmul
def test_out_arg(self):
- a = np.ones((2, 2), dtype=np.float)
- b = np.ones((2, 2), dtype=np.float)
- tgt = np.full((2,2), 2, dtype=np.float)
+ a = np.ones((2, 2), dtype=float)
+ b = np.ones((2, 2), dtype=float)
+ tgt = np.full((2,2), 2, dtype=float)
# test as positional argument
msg = "out positional argument"
- out = np.zeros((2, 2), dtype=np.float)
+ out = np.zeros((2, 2), dtype=float)
self.matmul(a, b, out)
assert_array_equal(out, tgt, err_msg=msg)
# test as keyword argument
msg = "out keyword argument"
- out = np.zeros((2, 2), dtype=np.float)
+ out = np.zeros((2, 2), dtype=float)
self.matmul(a, b, out=out)
assert_array_equal(out, tgt, err_msg=msg)
@@ -5301,13 +5392,13 @@ class TestMatmul(MatmulCommon, TestCase):
# test out non-contiguous
# msg = "out argument with non-contiguous layout"
- # c = np.zeros((2, 2, 2), dtype=np.float)
+ # c = np.zeros((2, 2, 2), dtype=float)
# self.matmul(a, b, out=c[..., 0])
# assert_array_equal(c, tgt, err_msg=msg)
if sys.version_info[:2] >= (3, 5):
- class TestMatmulOperator(MatmulCommon, TestCase):
+ class TestMatmulOperator(MatmulCommon):
import operator
matmul = operator.matmul
@@ -5342,7 +5433,7 @@ if sys.version_info[:2] >= (3, 5):
assert_raises(TypeError, exec_, "a @= b", globals(), locals())
-class TestInner(TestCase):
+class TestInner(object):
def test_inner_type_mismatch(self):
c = 1.
@@ -5435,7 +5526,7 @@ class TestInner(TestCase):
assert_equal(np.inner(b, a).transpose(2,3,0,1), desired)
-class TestSummarization(TestCase):
+class TestSummarization(object):
def test_1d(self):
A = np.arange(1001)
strA = '[ 0 1 2 ..., 998 999 1000]'
@@ -5455,26 +5546,26 @@ class TestSummarization(TestCase):
assert_(repr(A) == reprA)
-class TestAlen(TestCase):
+class TestAlen(object):
def test_basic(self):
m = np.array([1, 2, 3])
- self.assertEqual(np.alen(m), 3)
+ assert_equal(np.alen(m), 3)
m = np.array([[1, 2, 3], [4, 5, 7]])
- self.assertEqual(np.alen(m), 2)
+ assert_equal(np.alen(m), 2)
m = [1, 2, 3]
- self.assertEqual(np.alen(m), 3)
+ assert_equal(np.alen(m), 3)
m = [[1, 2, 3], [4, 5, 7]]
- self.assertEqual(np.alen(m), 2)
+ assert_equal(np.alen(m), 2)
def test_singleton(self):
- self.assertEqual(np.alen(5), 1)
+ assert_equal(np.alen(5), 1)
-class TestChoose(TestCase):
- def setUp(self):
+class TestChoose(object):
+ def setup(self):
self.x = 2*np.ones((3,), dtype=int)
self.y = 3*np.ones((3,), dtype=int)
self.x2 = 2*np.ones((2, 3), dtype=int)
@@ -5494,8 +5585,8 @@ class TestChoose(TestCase):
assert_equal(A, [[2, 2, 3], [2, 2, 3]])
-class TestRepeat(TestCase):
- def setUp(self):
+class TestRepeat(object):
+ def setup(self):
self.m = np.array([1, 2, 3, 4, 5, 6])
self.m_rect = self.m.reshape((2, 3))
@@ -5535,7 +5626,7 @@ class TestRepeat(TestCase):
NEIGH_MODE = {'zero': 0, 'one': 1, 'constant': 2, 'circular': 3, 'mirror': 4}
-class TestNeighborhoodIter(TestCase):
+class TestNeighborhoodIter(object):
# Simple, 2d tests
def _test_simple2d(self, dt):
# Test zero and one padding for simple data type
@@ -5565,7 +5656,7 @@ class TestNeighborhoodIter(TestCase):
assert_array_equal(l, r)
def test_simple2d(self):
- self._test_simple2d(np.float)
+ self._test_simple2d(float)
def test_simple2d_object(self):
self._test_simple2d(Decimal)
@@ -5581,7 +5672,7 @@ class TestNeighborhoodIter(TestCase):
assert_array_equal(l, r)
def test_mirror2d(self):
- self._test_mirror2d(np.float)
+ self._test_mirror2d(float)
def test_mirror2d_object(self):
self._test_mirror2d(Decimal)
@@ -5603,7 +5694,7 @@ class TestNeighborhoodIter(TestCase):
assert_array_equal(l, r)
def test_simple_float(self):
- self._test_simple(np.float)
+ self._test_simple(float)
def test_simple_object(self):
self._test_simple(Decimal)
@@ -5614,11 +5705,11 @@ class TestNeighborhoodIter(TestCase):
r = np.array([[2, 1, 1, 2, 3], [1, 1, 2, 3, 4], [1, 2, 3, 4, 5],
[2, 3, 4, 5, 5], [3, 4, 5, 5, 4]], dtype=dt)
l = test_neighborhood_iterator(x, [-2, 2], x[1], NEIGH_MODE['mirror'])
- self.assertTrue([i.dtype == dt for i in l])
+ assert_([i.dtype == dt for i in l])
assert_array_equal(l, r)
def test_mirror(self):
- self._test_mirror(np.float)
+ self._test_mirror(float)
def test_mirror_object(self):
self._test_mirror(Decimal)
@@ -5632,13 +5723,13 @@ class TestNeighborhoodIter(TestCase):
assert_array_equal(l, r)
def test_circular(self):
- self._test_circular(np.float)
+ self._test_circular(float)
def test_circular_object(self):
self._test_circular(Decimal)
# Test stacking neighborhood iterators
-class TestStackedNeighborhoodIter(TestCase):
+class TestStackedNeighborhoodIter(object):
# Simple, 1d test: stacking 2 constant-padded neigh iterators
def test_simple_const(self):
dt = np.float64
@@ -6319,7 +6410,7 @@ def test_flat_element_deletion():
del it[1:2]
except TypeError:
pass
- except:
+ except Exception:
raise AssertionError
@@ -6328,7 +6419,7 @@ def test_scalar_element_deletion():
assert_raises(ValueError, a[0].__delitem__, 'x')
-class TestMemEventHook(TestCase):
+class TestMemEventHook(object):
def test_mem_seteventhook(self):
# The actual tests are within the C code in
# multiarray/multiarray_tests.c.src
@@ -6340,7 +6431,7 @@ class TestMemEventHook(TestCase):
gc.collect()
test_pydatamem_seteventhook_end()
-class TestMapIter(TestCase):
+class TestMapIter(object):
def test_mapiter(self):
# The actual tests are within the C code in
# multiarray/multiarray_tests.c.src
@@ -6362,7 +6453,7 @@ class TestMapIter(TestCase):
assert_equal(b, [100.1, 51., 6., 3., 4., 5.])
-class TestAsCArray(TestCase):
+class TestAsCArray(object):
def test_1darray(self):
array = np.arange(24, dtype=np.double)
from_c = test_as_c_array(array, 3)
@@ -6379,7 +6470,7 @@ class TestAsCArray(TestCase):
assert_equal(array[1, 2, 3], from_c)
-class TestConversion(TestCase):
+class TestConversion(object):
def test_array_scalar_relational_operation(self):
# All integer
for dt1 in np.typecodes['AllInteger']:
@@ -6444,12 +6535,12 @@ class TestConversion(TestCase):
assert_raises(Error, bool, self_containing) # previously stack overflow
-class TestWhere(TestCase):
+class TestWhere(object):
def test_basic(self):
- dts = [np.bool, np.int16, np.int32, np.int64, np.double, np.complex128,
+ dts = [bool, np.int16, np.int32, np.int64, np.double, np.complex128,
np.longdouble, np.clongdouble]
for dt in dts:
- c = np.ones(53, dtype=np.bool)
+ c = np.ones(53, dtype=bool)
assert_equal(np.where( c, dt(0), dt(1)), dt(0))
assert_equal(np.where(~c, dt(0), dt(1)), dt(1))
assert_equal(np.where(True, dt(0), dt(1)), dt(0))
@@ -6541,7 +6632,7 @@ class TestWhere(TestCase):
assert_equal(np.where(c, a, b), r)
# non bool mask
- c = c.astype(np.int)
+ c = c.astype(int)
c[c != 0] = 34242324
assert_equal(np.where(c, a, b), r)
# invert
@@ -6597,10 +6688,21 @@ class TestWhere(TestCase):
assert_array_equal(ibad,
np.atleast_2d(np.array([[],[]], dtype=np.intp)))
+ def test_largedim(self):
+ # invalid read regression gh-9304
+ shape = [10, 2, 3, 4, 5, 6]
+ np.random.seed(2)
+ array = np.random.rand(*shape)
+
+ for i in range(10):
+ benchmark = array.nonzero()
+ result = array.nonzero()
+ assert_array_equal(benchmark, result)
+
if not IS_PYPY:
# sys.getsizeof() is not valid on PyPy
- class TestSizeOf(TestCase):
+ class TestSizeOf(object):
def test_empty_array(self):
x = np.array([])
@@ -6646,7 +6748,7 @@ if not IS_PYPY:
assert_raises(TypeError, d.__sizeof__, "a")
-class TestHashing(TestCase):
+class TestHashing(object):
def test_arrays_not_hashable(self):
x = np.ones(3)
@@ -6654,10 +6756,10 @@ class TestHashing(TestCase):
def test_collections_hashable(self):
x = np.array([])
- self.assertFalse(isinstance(x, collections.Hashable))
+ assert_(not isinstance(x, collections.Hashable))
-class TestArrayPriority(TestCase):
+class TestArrayPriority(object):
# This will go away when __array_priority__ is settled, meanwhile
# it serves to check unintended changes.
op = operator
@@ -6743,54 +6845,54 @@ class TestArrayPriority(TestCase):
assert_(isinstance(f(b, a), self.Other), msg)
-class TestBytestringArrayNonzero(TestCase):
+class TestBytestringArrayNonzero(object):
def test_empty_bstring_array_is_falsey(self):
- self.assertFalse(np.array([''], dtype=np.str))
+ assert_(not np.array([''], dtype=str))
def test_whitespace_bstring_array_is_falsey(self):
- a = np.array(['spam'], dtype=np.str)
+ a = np.array(['spam'], dtype=str)
a[0] = ' \0\0'
- self.assertFalse(a)
+ assert_(not a)
def test_all_null_bstring_array_is_falsey(self):
- a = np.array(['spam'], dtype=np.str)
+ a = np.array(['spam'], dtype=str)
a[0] = '\0\0\0\0'
- self.assertFalse(a)
+ assert_(not a)
def test_null_inside_bstring_array_is_truthy(self):
- a = np.array(['spam'], dtype=np.str)
+ a = np.array(['spam'], dtype=str)
a[0] = ' \0 \0'
- self.assertTrue(a)
+ assert_(a)
-class TestUnicodeArrayNonzero(TestCase):
+class TestUnicodeArrayNonzero(object):
def test_empty_ustring_array_is_falsey(self):
- self.assertFalse(np.array([''], dtype=np.unicode))
+ assert_(not np.array([''], dtype=np.unicode))
def test_whitespace_ustring_array_is_falsey(self):
a = np.array(['eggs'], dtype=np.unicode)
a[0] = ' \0\0'
- self.assertFalse(a)
+ assert_(not a)
def test_all_null_ustring_array_is_falsey(self):
a = np.array(['eggs'], dtype=np.unicode)
a[0] = '\0\0\0\0'
- self.assertFalse(a)
+ assert_(not a)
def test_null_inside_ustring_array_is_truthy(self):
a = np.array(['eggs'], dtype=np.unicode)
a[0] = ' \0 \0'
- self.assertTrue(a)
+ assert_(a)
-class TestCTypes(TestCase):
+class TestCTypes(object):
def test_ctypes_is_available(self):
test_arr = np.array([[1, 2, 3], [4, 5, 6]])
- self.assertEqual(ctypes, test_arr.ctypes._ctypes)
+ assert_equal(ctypes, test_arr.ctypes._ctypes)
assert_equal(tuple(test_arr.ctypes.shape), (2, 3))
def test_ctypes_is_not_available(self):
@@ -6799,8 +6901,8 @@ class TestCTypes(TestCase):
try:
test_arr = np.array([[1, 2, 3], [4, 5, 6]])
- self.assertIsInstance(
- test_arr.ctypes._ctypes, _internal._missing_ctypes)
+ assert_(isinstance(test_arr.ctypes._ctypes,
+ _internal._missing_ctypes))
assert_equal(tuple(test_arr.ctypes.shape), (2, 3))
finally:
_internal.ctypes = ctypes
@@ -6811,5 +6913,73 @@ def test_orderconverter_with_nonASCII_unicode_ordering():
a = np.arange(5)
assert_raises(ValueError, a.flatten, order=u'\xe2')
+
+def test_equal_override():
+ # gh-9153: ndarray.__eq__ uses special logic for structured arrays, which
+ # did not respect overrides with __array_priority__ or __array_ufunc__.
+ # The PR fixed this for __array_priority__ and __array_ufunc__ = None.
+ class MyAlwaysEqual(object):
+ def __eq__(self, other):
+ return "eq"
+
+ def __ne__(self, other):
+ return "ne"
+
+ class MyAlwaysEqualOld(MyAlwaysEqual):
+ __array_priority__ = 10000
+
+ class MyAlwaysEqualNew(MyAlwaysEqual):
+ __array_ufunc__ = None
+
+ array = np.array([(0, 1), (2, 3)], dtype='i4,i4')
+ for my_always_equal_cls in MyAlwaysEqualOld, MyAlwaysEqualNew:
+ my_always_equal = my_always_equal_cls()
+ assert_equal(my_always_equal == array, 'eq')
+ assert_equal(array == my_always_equal, 'eq')
+ assert_equal(my_always_equal != array, 'ne')
+ assert_equal(array != my_always_equal, 'ne')
+
+
+def test_npymath_complex():
+ # Smoketest npymath functions
+ from numpy.core.multiarray_tests import (
+ npy_cabs, npy_carg)
+
+ funcs = {npy_cabs: np.absolute,
+ npy_carg: np.angle}
+ vals = (1, np.inf, -np.inf, np.nan)
+ types = (np.complex64, np.complex128, np.clongdouble)
+
+ for fun, npfun in funcs.items():
+ for x, y in itertools.product(vals, vals):
+ for t in types:
+ z = t(complex(x, y))
+ got = fun(z)
+ expected = npfun(z)
+ assert_allclose(got, expected)
+
+
+def test_npymath_real():
+ # Smoketest npymath functions
+ from numpy.core.multiarray_tests import (
+ npy_log10, npy_cosh, npy_sinh, npy_tan, npy_tanh)
+
+ funcs = {npy_log10: np.log10,
+ npy_cosh: np.cosh,
+ npy_sinh: np.sinh,
+ npy_tan: np.tan,
+ npy_tanh: np.tanh}
+ vals = (1, np.inf, -np.inf, np.nan)
+ types = (np.float32, np.float64, np.longdouble)
+
+ with np.errstate(all='ignore'):
+ for fun, npfun in funcs.items():
+ for x, t in itertools.product(vals, types):
+ z = t(x)
+ got = fun(z)
+ expected = npfun(z)
+ assert_allclose(got, expected)
+
+
if __name__ == "__main__":
run_module_suite()
diff --git a/numpy/core/tests/test_nditer.py b/numpy/core/tests/test_nditer.py
index 77521317e..59e11f22e 100644
--- a/numpy/core/tests/test_nditer.py
+++ b/numpy/core/tests/test_nditer.py
@@ -1816,100 +1816,45 @@ def test_iter_buffered_cast_structured_type():
if HAS_REFCOUNT:
assert_equal(sys.getrefcount(a[0]), rc)
- # struct type -> simple (takes the first value)
- sdt = [('a', 'f4'), ('b', 'i8'), ('d', 'O')]
- a = np.array([(5.5, 7, 'test'), (8, 10, 11)], dtype=sdt)
+ # single-field struct type -> simple
+ sdt = [('a', 'f4')]
+ a = np.array([(5.5,), (8,)], dtype=sdt)
i = nditer(a, ['buffered', 'refs_ok'], ['readonly'],
casting='unsafe',
op_dtypes='i4')
assert_equal([x_[()] for x_ in i], [5, 8])
+ # make sure multi-field struct type -> simple doesn't work
+ sdt = [('a', 'f4'), ('b', 'i8'), ('d', 'O')]
+ a = np.array([(5.5, 7, 'test'), (8, 10, 11)], dtype=sdt)
+ assert_raises(ValueError, lambda: (
+ nditer(a, ['buffered', 'refs_ok'], ['readonly'],
+ casting='unsafe',
+ op_dtypes='i4')))
+
# struct type -> struct type (field-wise copy)
sdt1 = [('a', 'f4'), ('b', 'i8'), ('d', 'O')]
sdt2 = [('d', 'u2'), ('a', 'O'), ('b', 'f8')]
a = np.array([(1, 2, 3), (4, 5, 6)], dtype=sdt1)
- # New in 1.12: This behavior changes in 1.13, test for dep warning
- with assert_warns(FutureWarning):
- i = nditer(a, ['buffered', 'refs_ok'], ['readonly'],
- casting='unsafe',
- op_dtypes=sdt2)
+ i = nditer(a, ['buffered', 'refs_ok'], ['readonly'],
+ casting='unsafe',
+ op_dtypes=sdt2)
assert_equal(i[0].dtype, np.dtype(sdt2))
assert_equal([np.array(x_) for x_ in i],
- [np.array((3, 1, 2), dtype=sdt2),
- np.array((6, 4, 5), dtype=sdt2)])
+ [np.array((1, 2, 3), dtype=sdt2),
+ np.array((4, 5, 6), dtype=sdt2)])
- # struct type -> struct type (field gets discarded)
+ # make sure struct type -> struct type with different
+ # number of fields fails
sdt1 = [('a', 'f4'), ('b', 'i8'), ('d', 'O')]
sdt2 = [('b', 'O'), ('a', 'f8')]
a = np.array([(1, 2, 3), (4, 5, 6)], dtype=sdt1)
- # New in 1.12: This behavior changes in 1.13, test for dep warning
- with assert_warns(FutureWarning):
- i = nditer(a, ['buffered', 'refs_ok'], ['readwrite'],
- casting='unsafe',
- op_dtypes=sdt2)
- assert_equal(i[0].dtype, np.dtype(sdt2))
- vals = []
- for x in i:
- vals.append(np.array(x))
- x['a'] = x['b']+3
- assert_equal(vals, [np.array((2, 1), dtype=sdt2),
- np.array((5, 4), dtype=sdt2)])
- assert_equal(a, np.array([(5, 2, None), (8, 5, None)], dtype=sdt1))
-
- # struct type -> struct type (structured field gets discarded)
- sdt1 = [('a', 'f4'), ('b', 'i8'), ('d', [('a', 'i2'), ('b', 'i4')])]
- sdt2 = [('b', 'O'), ('a', 'f8')]
- a = np.array([(1, 2, (0, 9)), (4, 5, (20, 21))], dtype=sdt1)
- # New in 1.12: This behavior changes in 1.13, test for dep warning
- with assert_warns(FutureWarning):
- i = nditer(a, ['buffered', 'refs_ok'], ['readwrite'],
- casting='unsafe',
- op_dtypes=sdt2)
- assert_equal(i[0].dtype, np.dtype(sdt2))
- vals = []
- for x in i:
- vals.append(np.array(x))
- x['a'] = x['b']+3
- assert_equal(vals, [np.array((2, 1), dtype=sdt2),
- np.array((5, 4), dtype=sdt2)])
- assert_equal(a, np.array([(5, 2, (0, 0)), (8, 5, (0, 0))], dtype=sdt1))
-
- # struct type -> struct type (structured field w/ ref gets discarded)
- sdt1 = [('a', 'f4'), ('b', 'i8'), ('d', [('a', 'i2'), ('b', 'O')])]
- sdt2 = [('b', 'O'), ('a', 'f8')]
- a = np.array([(1, 2, (0, 9)), (4, 5, (20, 21))], dtype=sdt1)
- # New in 1.12: This behavior changes in 1.13, test for dep warning
- with assert_warns(FutureWarning):
- i = nditer(a, ['buffered', 'refs_ok'], ['readwrite'],
- casting='unsafe',
- op_dtypes=sdt2)
- assert_equal(i[0].dtype, np.dtype(sdt2))
- vals = []
- for x in i:
- vals.append(np.array(x))
- x['a'] = x['b']+3
- assert_equal(vals, [np.array((2, 1), dtype=sdt2),
- np.array((5, 4), dtype=sdt2)])
- assert_equal(a, np.array([(5, 2, (0, None)), (8, 5, (0, None))], dtype=sdt1))
-
- # struct type -> struct type back (structured field w/ ref gets discarded)
- sdt1 = [('b', 'O'), ('a', 'f8')]
- sdt2 = [('a', 'f4'), ('b', 'i8'), ('d', [('a', 'i2'), ('b', 'O')])]
- a = np.array([(1, 2), (4, 5)], dtype=sdt1)
- # New in 1.12: This behavior changes in 1.13, test for dep warning
- with assert_warns(FutureWarning):
- i = nditer(a, ['buffered', 'refs_ok'], ['readwrite'],
- casting='unsafe',
- op_dtypes=sdt2)
- assert_equal(i[0].dtype, np.dtype(sdt2))
- vals = []
- for x in i:
- vals.append(np.array(x))
- assert_equal(x['d'], np.array((0, None), dtype=[('a', 'i2'), ('b', 'O')]))
- x['a'] = x['b']+3
- assert_equal(vals, [np.array((2, 1, (0, None)), dtype=sdt2),
- np.array((5, 4, (0, None)), dtype=sdt2)])
- assert_equal(a, np.array([(1, 4), (4, 7)], dtype=sdt1))
+
+ assert_raises(ValueError, lambda : (
+ nditer(a, ['buffered', 'refs_ok'], ['readwrite'],
+ casting='unsafe',
+ op_dtypes=sdt2)))
+
def test_iter_buffered_cast_subarray():
# Tests buffering of subarrays
@@ -2145,7 +2090,7 @@ def test_iter_buffered_reduce_reuse():
op_flags = [('readonly',), ('readwrite', 'allocate')]
op_axes_list = [[(0, 1, 2), (0, 1, -1)], [(0, 1, 2), (0, -1, -1)]]
# wrong dtype to force buffering
- op_dtypes = [np.float, a.dtype]
+ op_dtypes = [float, a.dtype]
def get_params():
for xs in range(-3**2, 3**2 + 1):
@@ -2641,7 +2586,7 @@ def test_iter_element_deletion():
del it[1:2]
except TypeError:
pass
- except:
+ except Exception:
raise AssertionError
def test_iter_allocated_array_dtypes():
diff --git a/numpy/core/tests/test_numeric.py b/numpy/core/tests/test_numeric.py
index 0f87ffdf2..e8c637179 100644
--- a/numpy/core/tests/test_numeric.py
+++ b/numpy/core/tests/test_numeric.py
@@ -10,13 +10,13 @@ import numpy as np
from numpy.core import umath
from numpy.random import rand, randint, randn
from numpy.testing import (
- TestCase, run_module_suite, assert_, assert_equal, assert_raises,
+ run_module_suite, assert_, assert_equal, assert_raises,
assert_raises_regex, assert_array_equal, assert_almost_equal,
assert_array_almost_equal, dec, HAS_REFCOUNT, suppress_warnings
)
-class TestResize(TestCase):
+class TestResize(object):
def test_copies(self):
A = np.array([[1, 2], [3, 4]])
Ar1 = np.array([[1, 2, 3, 4], [1, 2, 3, 4]])
@@ -34,6 +34,12 @@ class TestResize(TestCase):
assert_array_equal(Ar, np.array([]))
assert_equal(A.dtype, Ar.dtype)
+ Ar = np.resize(A, (0, 2))
+ assert_equal(Ar.shape, (0, 2))
+
+ Ar = np.resize(A, (2, 0))
+ assert_equal(Ar.shape, (2, 0))
+
def test_reshape_from_zero(self):
# See also gh-6740
A = np.zeros(0, dtype=[('a', np.float32, 1)])
@@ -42,7 +48,7 @@ class TestResize(TestCase):
assert_equal(A.dtype, Ar.dtype)
-class TestNonarrayArgs(TestCase):
+class TestNonarrayArgs(object):
# check that non-array arguments to functions wrap them in arrays
def test_choose(self):
choices = [[0, 1, 2],
@@ -202,45 +208,45 @@ class TestNonarrayArgs(TestCase):
assert_(w[0].category is RuntimeWarning)
-class TestBoolScalar(TestCase):
+class TestBoolScalar(object):
def test_logical(self):
f = np.False_
t = np.True_
s = "xyz"
- self.assertTrue((t and s) is s)
- self.assertTrue((f and s) is f)
+ assert_((t and s) is s)
+ assert_((f and s) is f)
def test_bitwise_or(self):
f = np.False_
t = np.True_
- self.assertTrue((t | t) is t)
- self.assertTrue((f | t) is t)
- self.assertTrue((t | f) is t)
- self.assertTrue((f | f) is f)
+ assert_((t | t) is t)
+ assert_((f | t) is t)
+ assert_((t | f) is t)
+ assert_((f | f) is f)
def test_bitwise_and(self):
f = np.False_
t = np.True_
- self.assertTrue((t & t) is t)
- self.assertTrue((f & t) is f)
- self.assertTrue((t & f) is f)
- self.assertTrue((f & f) is f)
+ assert_((t & t) is t)
+ assert_((f & t) is f)
+ assert_((t & f) is f)
+ assert_((f & f) is f)
def test_bitwise_xor(self):
f = np.False_
t = np.True_
- self.assertTrue((t ^ t) is f)
- self.assertTrue((f ^ t) is t)
- self.assertTrue((t ^ f) is t)
- self.assertTrue((f ^ f) is f)
+ assert_((t ^ t) is f)
+ assert_((f ^ t) is t)
+ assert_((t ^ f) is t)
+ assert_((f ^ f) is f)
-class TestBoolArray(TestCase):
- def setUp(self):
+class TestBoolArray(object):
+ def setup(self):
# offset for simd tests
- self.t = np.array([True] * 41, dtype=np.bool)[1::]
- self.f = np.array([False] * 41, dtype=np.bool)[1::]
- self.o = np.array([False] * 42, dtype=np.bool)[2::]
+ self.t = np.array([True] * 41, dtype=bool)[1::]
+ self.f = np.array([False] * 41, dtype=bool)[1::]
+ self.o = np.array([False] * 42, dtype=bool)[2::]
self.nm = self.f.copy()
self.im = self.t.copy()
self.nm[3] = True
@@ -249,31 +255,31 @@ class TestBoolArray(TestCase):
self.im[-2] = False
def test_all_any(self):
- self.assertTrue(self.t.all())
- self.assertTrue(self.t.any())
- self.assertFalse(self.f.all())
- self.assertFalse(self.f.any())
- self.assertTrue(self.nm.any())
- self.assertTrue(self.im.any())
- self.assertFalse(self.nm.all())
- self.assertFalse(self.im.all())
+ assert_(self.t.all())
+ assert_(self.t.any())
+ assert_(not self.f.all())
+ assert_(not self.f.any())
+ assert_(self.nm.any())
+ assert_(self.im.any())
+ assert_(not self.nm.all())
+ assert_(not self.im.all())
# check bad element in all positions
for i in range(256 - 7):
- d = np.array([False] * 256, dtype=np.bool)[7::]
+ d = np.array([False] * 256, dtype=bool)[7::]
d[i] = True
- self.assertTrue(np.any(d))
- e = np.array([True] * 256, dtype=np.bool)[7::]
+ assert_(np.any(d))
+ e = np.array([True] * 256, dtype=bool)[7::]
e[i] = False
- self.assertFalse(np.all(e))
+ assert_(not np.all(e))
assert_array_equal(e, ~d)
# big array test for blocked libc loops
for i in list(range(9, 6000, 507)) + [7764, 90021, -10]:
- d = np.array([False] * 100043, dtype=np.bool)
+ d = np.array([False] * 100043, dtype=bool)
d[i] = True
- self.assertTrue(np.any(d), msg="%r" % i)
- e = np.array([True] * 100043, dtype=np.bool)
+ assert_(np.any(d), msg="%r" % i)
+ e = np.array([True] * 100043, dtype=bool)
e[i] = False
- self.assertFalse(np.all(e), msg="%r" % i)
+ assert_(not np.all(e), msg="%r" % i)
def test_logical_not_abs(self):
assert_array_equal(~self.t, self.f)
@@ -322,12 +328,12 @@ class TestBoolArray(TestCase):
assert_array_equal(self.im ^ False, self.im)
-class TestBoolCmp(TestCase):
- def setUp(self):
+class TestBoolCmp(object):
+ def setup(self):
self.f = np.ones(256, dtype=np.float32)
- self.ef = np.ones(self.f.size, dtype=np.bool)
+ self.ef = np.ones(self.f.size, dtype=bool)
self.d = np.ones(128, dtype=np.float64)
- self.ed = np.ones(self.d.size, dtype=np.bool)
+ self.ed = np.ones(self.d.size, dtype=bool)
# generate values for all permutation of 256bit simd vectors
s = 0
for i in range(32):
@@ -422,28 +428,28 @@ class TestBoolCmp(TestCase):
assert_array_equal(np.signbit(self.signd[i:]), self.ed[i:])
-class TestSeterr(TestCase):
+class TestSeterr(object):
def test_default(self):
err = np.geterr()
- self.assertEqual(err, dict(
- divide='warn',
- invalid='warn',
- over='warn',
- under='ignore',
- ))
+ assert_equal(err,
+ dict(divide='warn',
+ invalid='warn',
+ over='warn',
+ under='ignore')
+ )
def test_set(self):
with np.errstate():
err = np.seterr()
old = np.seterr(divide='print')
- self.assertTrue(err == old)
+ assert_(err == old)
new = np.seterr()
- self.assertTrue(new['divide'] == 'print')
+ assert_(new['divide'] == 'print')
np.seterr(over='raise')
- self.assertTrue(np.geterr()['over'] == 'raise')
- self.assertTrue(new['divide'] == 'print')
+ assert_(np.geterr()['over'] == 'raise')
+ assert_(new['divide'] == 'print')
np.seterr(**old)
- self.assertTrue(np.geterr() == old)
+ assert_(np.geterr() == old)
@dec.skipif(platform.machine() == "armv5tel", "See gh-413.")
def test_divide_err(self):
@@ -466,7 +472,7 @@ class TestSeterr(TestCase):
with np.errstate(divide='warn'):
np.seterrobj([20000, 1, None])
np.array([1.]) / np.array([0.])
- self.assertEqual(len(w), 1)
+ assert_equal(len(w), 1)
def log_err(*args):
self.called += 1
@@ -477,12 +483,12 @@ class TestSeterr(TestCase):
with np.errstate(divide='ignore'):
np.seterrobj([20000, 3, log_err])
np.array([1.]) / np.array([0.])
- self.assertEqual(self.called, 1)
+ assert_equal(self.called, 1)
np.seterrobj(olderrobj)
with np.errstate(divide='ignore'):
np.divide(1., 0., extobj=[20000, 3, log_err])
- self.assertEqual(self.called, 2)
+ assert_equal(self.called, 2)
finally:
np.seterrobj(olderrobj)
del self.called
@@ -506,7 +512,7 @@ class TestSeterr(TestCase):
np.seterrobj(olderrobj)
-class TestFloatExceptions(TestCase):
+class TestFloatExceptions(object):
def assert_raises_fpe(self, fpeerr, flop, x, y):
ftype = type(x)
try:
@@ -590,20 +596,20 @@ class TestFloatExceptions(TestCase):
warnings.simplefilter("always")
with np.errstate(all="warn"):
np.divide(1, 0.)
- self.assertEqual(len(w), 1)
- self.assertTrue("divide by zero" in str(w[0].message))
+ assert_equal(len(w), 1)
+ assert_("divide by zero" in str(w[0].message))
np.array(1e300) * np.array(1e300)
- self.assertEqual(len(w), 2)
- self.assertTrue("overflow" in str(w[-1].message))
+ assert_equal(len(w), 2)
+ assert_("overflow" in str(w[-1].message))
np.array(np.inf) - np.array(np.inf)
- self.assertEqual(len(w), 3)
- self.assertTrue("invalid value" in str(w[-1].message))
+ assert_equal(len(w), 3)
+ assert_("invalid value" in str(w[-1].message))
np.array(1e-300) * np.array(1e-300)
- self.assertEqual(len(w), 4)
- self.assertTrue("underflow" in str(w[-1].message))
+ assert_equal(len(w), 4)
+ assert_("underflow" in str(w[-1].message))
-class TestTypes(TestCase):
+class TestTypes(object):
def check_promotion_cases(self, promote_func):
# tests that the scalars get coerced correctly.
b = np.bool_(0)
@@ -794,8 +800,8 @@ class TestTypes(TestCase):
def test_can_cast(self):
assert_(np.can_cast(np.int32, np.int64))
- assert_(np.can_cast(np.float64, np.complex))
- assert_(not np.can_cast(np.complex, np.float))
+ assert_(np.can_cast(np.float64, complex))
+ assert_(not np.can_cast(complex, float))
assert_(np.can_cast('i8', 'f8'))
assert_(not np.can_cast('i8', 'f4'))
@@ -866,13 +872,16 @@ class TestTypes(TestCase):
assert_raises(TypeError, np.can_cast, 'i4', None)
assert_raises(TypeError, np.can_cast, None, 'i4')
+ # Also test keyword arguments
+ assert_(np.can_cast(from_=np.int32, to=np.int64))
+
# Custom exception class to test exception propagation in fromiter
class NIterError(Exception):
pass
-class TestFromiter(TestCase):
+class TestFromiter(object):
def makegen(self):
for x in range(24):
yield x**2
@@ -881,25 +890,25 @@ class TestFromiter(TestCase):
ai32 = np.fromiter(self.makegen(), np.int32)
ai64 = np.fromiter(self.makegen(), np.int64)
af = np.fromiter(self.makegen(), float)
- self.assertTrue(ai32.dtype == np.dtype(np.int32))
- self.assertTrue(ai64.dtype == np.dtype(np.int64))
- self.assertTrue(af.dtype == np.dtype(float))
+ assert_(ai32.dtype == np.dtype(np.int32))
+ assert_(ai64.dtype == np.dtype(np.int64))
+ assert_(af.dtype == np.dtype(float))
def test_lengths(self):
expected = np.array(list(self.makegen()))
a = np.fromiter(self.makegen(), int)
a20 = np.fromiter(self.makegen(), int, 20)
- self.assertTrue(len(a) == len(expected))
- self.assertTrue(len(a20) == 20)
- self.assertRaises(ValueError, np.fromiter,
+ assert_(len(a) == len(expected))
+ assert_(len(a20) == 20)
+ assert_raises(ValueError, np.fromiter,
self.makegen(), int, len(expected) + 10)
def test_values(self):
expected = np.array(list(self.makegen()))
a = np.fromiter(self.makegen(), int)
a20 = np.fromiter(self.makegen(), int, 20)
- self.assertTrue(np.alltrue(a == expected, axis=0))
- self.assertTrue(np.alltrue(a20 == expected[:20], axis=0))
+ assert_(np.alltrue(a == expected, axis=0))
+ assert_(np.alltrue(a20 == expected[:20], axis=0))
def load_data(self, n, eindex):
# Utility method for the issue 2592 tests.
@@ -912,18 +921,18 @@ class TestFromiter(TestCase):
def test_2592(self):
# Test iteration exceptions are correctly raised.
count, eindex = 10, 5
- self.assertRaises(NIterError, np.fromiter,
+ assert_raises(NIterError, np.fromiter,
self.load_data(count, eindex), dtype=int, count=count)
def test_2592_edge(self):
# Test iter. exceptions, edge case (exception at end of iterator).
count = 10
eindex = count-1
- self.assertRaises(NIterError, np.fromiter,
+ assert_raises(NIterError, np.fromiter,
self.load_data(count, eindex), dtype=int, count=count)
-class TestNonzero(TestCase):
+class TestNonzero(object):
def test_nonzero_trivial(self):
assert_equal(np.count_nonzero(np.array([])), 0)
assert_equal(np.count_nonzero(np.array([], dtype='?')), 0)
@@ -975,11 +984,11 @@ class TestNonzero(TestCase):
def test_sparse(self):
# test special sparse condition boolean code path
for i in range(20):
- c = np.zeros(200, dtype=np.bool)
+ c = np.zeros(200, dtype=bool)
c[i::20] = True
assert_equal(np.nonzero(c)[0], np.arange(i, 200 + i, 20))
- c = np.zeros(400, dtype=np.bool)
+ c = np.zeros(400, dtype=bool)
c[10 + i:20 + i] = True
c[20 + i*2] = True
assert_equal(np.nonzero(c)[0],
@@ -1020,6 +1029,10 @@ class TestNonzero(TestCase):
# either integer or tuple arguments for axis
msg = "Mismatch for dtype: %s"
+ def assert_equal_w_dt(a, b, err_msg):
+ assert_equal(a.dtype, b.dtype, err_msg=err_msg)
+ assert_equal(a, b, err_msg=err_msg)
+
for dt in np.typecodes['All']:
err_msg = msg % (np.dtype(dt).name,)
@@ -1039,13 +1052,13 @@ class TestNonzero(TestCase):
m[1, 0] = '1970-01-12'
m = m.astype(dt)
- expected = np.array([2, 0, 0])
- assert_equal(np.count_nonzero(m, axis=0),
- expected, err_msg=err_msg)
+ expected = np.array([2, 0, 0], dtype=np.intp)
+ assert_equal_w_dt(np.count_nonzero(m, axis=0),
+ expected, err_msg=err_msg)
- expected = np.array([1, 1, 0])
- assert_equal(np.count_nonzero(m, axis=1),
- expected, err_msg=err_msg)
+ expected = np.array([1, 1, 0], dtype=np.intp)
+ assert_equal_w_dt(np.count_nonzero(m, axis=1),
+ expected, err_msg=err_msg)
expected = np.array(2)
assert_equal(np.count_nonzero(m, axis=(0, 1)),
@@ -1060,13 +1073,13 @@ class TestNonzero(TestCase):
# setup is slightly different for this dtype
m = np.array([np.void(1)] * 6).reshape((2, 3))
- expected = np.array([0, 0, 0])
- assert_equal(np.count_nonzero(m, axis=0),
- expected, err_msg=err_msg)
+ expected = np.array([0, 0, 0], dtype=np.intp)
+ assert_equal_w_dt(np.count_nonzero(m, axis=0),
+ expected, err_msg=err_msg)
- expected = np.array([0, 0])
- assert_equal(np.count_nonzero(m, axis=1),
- expected, err_msg=err_msg)
+ expected = np.array([0, 0], dtype=np.intp)
+ assert_equal_w_dt(np.count_nonzero(m, axis=1),
+ expected, err_msg=err_msg)
expected = np.array(0)
assert_equal(np.count_nonzero(m, axis=(0, 1)),
@@ -1089,7 +1102,7 @@ class TestNonzero(TestCase):
rng = np.random.RandomState(1234)
m = rng.randint(-100, 100, size=size)
- n = m.astype(np.object)
+ n = m.astype(object)
for length in range(len(axis)):
for combo in combinations(axis, length):
@@ -1108,7 +1121,7 @@ class TestNonzero(TestCase):
assert_equal(m.nonzero(), tgt)
-class TestIndex(TestCase):
+class TestIndex(object):
def test_boolean(self):
a = rand(3, 5, 8)
V = rand(5, 8)
@@ -1125,7 +1138,7 @@ class TestIndex(TestCase):
assert_equal(c.dtype, np.dtype('int32'))
-class TestBinaryRepr(TestCase):
+class TestBinaryRepr(object):
def test_zero(self):
assert_equal(np.binary_repr(0), '0')
@@ -1162,7 +1175,7 @@ class TestBinaryRepr(TestCase):
assert_equal(np.binary_repr(num, width=width), exp)
-class TestBaseRepr(TestCase):
+class TestBaseRepr(object):
def test_base3(self):
assert_equal(np.base_repr(3**5, 3), '100000')
@@ -1178,13 +1191,13 @@ class TestBaseRepr(TestCase):
assert_equal(np.base_repr(-12, 4), '-30')
def test_base_range(self):
- with self.assertRaises(ValueError):
+ with assert_raises(ValueError):
np.base_repr(1, 1)
- with self.assertRaises(ValueError):
+ with assert_raises(ValueError):
np.base_repr(1, 37)
-class TestArrayComparisons(TestCase):
+class TestArrayComparisons(object):
def test_array_equal(self):
res = np.array_equal(np.array([1, 2]), np.array([1, 2]))
assert_(res)
@@ -1264,8 +1277,8 @@ def assert_array_strict_equal(x, y):
assert_(x.dtype.isnative == y.dtype.isnative)
-class TestClip(TestCase):
- def setUp(self):
+class TestClip(object):
+ def setup(self):
self.nr = 5
self.nc = 3
@@ -1380,7 +1393,7 @@ class TestClip(TestCase):
# Address Issue gh-5354 for clipping complex arrays
# Test native complex input without explicit min/max
# ie, either min=None or max=None
- a = np.ones(10, dtype=np.complex)
+ a = np.ones(10, dtype=complex)
m = a.min()
M = a.max()
am = self.fastclip(a, m, None)
@@ -1691,7 +1704,7 @@ class TestClip(TestCase):
a2 = np.clip(a, m, M, out=a)
self.clip(a, m, M, ac)
assert_array_strict_equal(a2, ac)
- self.assertTrue(a2 is a)
+ assert_(a2 is a)
def test_clip_nan(self):
d = np.arange(7.)
@@ -1706,10 +1719,10 @@ class TestAllclose(object):
rtol = 1e-5
atol = 1e-8
- def setUp(self):
+ def setup(self):
self.olderr = np.seterr(invalid='ignore')
- def tearDown(self):
+ def teardown(self):
np.seterr(**self.olderr)
def tst_allclose(self, x, y):
@@ -1920,13 +1933,13 @@ class TestIsclose(object):
def test_non_finite_scalar(self):
# GH7014, when two scalars are compared the output should also be a
# scalar
- assert_(np.isclose(np.inf, -np.inf) is False)
- assert_(np.isclose(0, np.inf) is False)
- assert_(type(np.isclose(0, np.inf)) is bool)
+ assert_(np.isclose(np.inf, -np.inf) is np.False_)
+ assert_(np.isclose(0, np.inf) is np.False_)
+ assert_(type(np.isclose(0, np.inf)) is np.bool_)
-class TestStdVar(TestCase):
- def setUp(self):
+class TestStdVar(object):
+ def setup(self):
self.A = np.array([1, -1, 1, -1])
self.real_var = 1
@@ -1964,7 +1977,7 @@ class TestStdVar(TestCase):
assert_array_equal(r, out)
-class TestStdVarComplex(TestCase):
+class TestStdVarComplex(object):
def test_basic(self):
A = np.array([1, 1.j, -1, -1.j])
real_var = 1
@@ -1976,10 +1989,10 @@ class TestStdVarComplex(TestCase):
assert_equal(np.std(1j), 0)
-class TestCreationFuncs(TestCase):
+class TestCreationFuncs(object):
# Test ones, zeros, empty and full.
- def setUp(self):
+ def setup(self):
dtypes = {np.dtype(tp) for tp in itertools.chain(*np.sctypes.values())}
# void, bytes, str
variable_sized = {tp for tp in dtypes if tp.str.endswith('0')}
@@ -2047,10 +2060,10 @@ class TestCreationFuncs(TestCase):
assert_(sys.getrefcount(dim) == beg)
-class TestLikeFuncs(TestCase):
+class TestLikeFuncs(object):
'''Test ones_like, zeros_like, empty_like and full_like'''
- def setUp(self):
+ def setup(self):
self.data = [
# Array scalars
(np.array(3.), None),
@@ -2165,7 +2178,7 @@ class TestLikeFuncs(TestCase):
self.check_like_function(np.full_like, np.inf, True)
-class TestCorrelate(TestCase):
+class TestCorrelate(object):
def _setup(self, dt):
self.x = np.array([1, 2, 3, 4, 5], dtype=dt)
self.xs = np.arange(1, 20)[::3]
@@ -2179,7 +2192,7 @@ class TestCorrelate(TestCase):
-102., -54., -19.], dtype=dt)
def test_float(self):
- self._setup(np.float)
+ self._setup(float)
z = np.correlate(self.x, self.y, 'full')
assert_array_almost_equal(z, self.z1)
z = np.correlate(self.x, self.y[:-1], 'full')
@@ -2208,15 +2221,15 @@ class TestCorrelate(TestCase):
assert_array_equal(k, np.ones(3))
def test_complex(self):
- x = np.array([1, 2, 3, 4+1j], dtype=np.complex)
- y = np.array([-1, -2j, 3+1j], dtype=np.complex)
- r_z = np.array([3-1j, 6, 8+1j, 11+5j, -5+8j, -4-1j], dtype=np.complex)
+ x = np.array([1, 2, 3, 4+1j], dtype=complex)
+ y = np.array([-1, -2j, 3+1j], dtype=complex)
+ r_z = np.array([3-1j, 6, 8+1j, 11+5j, -5+8j, -4-1j], dtype=complex)
r_z = r_z[::-1].conjugate()
z = np.correlate(y, x, mode='full')
assert_array_almost_equal(z, r_z)
-class TestConvolve(TestCase):
+class TestConvolve(object):
def test_object(self):
d = [1.] * 100
k = [1.] * 3
@@ -2258,7 +2271,7 @@ class TestStringFunction(object):
assert_equal(str(a), "[1]")
-class TestRoll(TestCase):
+class TestRoll(object):
def test_roll1d(self):
x = np.arange(10)
xr = np.roll(x, 2)
@@ -2316,7 +2329,7 @@ class TestRoll(TestCase):
assert_equal(np.roll(x, 1), np.array([]))
-class TestRollaxis(TestCase):
+class TestRollaxis(object):
# expected shape indexed by (axis, start) for array of
# shape (1, 2, 3, 4)
@@ -2378,7 +2391,7 @@ class TestRollaxis(TestCase):
assert_(not res.flags['OWNDATA'])
-class TestMoveaxis(TestCase):
+class TestMoveaxis(object):
def test_move_to_end(self):
x = np.random.randn(5, 6, 7)
for source, expected in [(0, (6, 7, 5)),
@@ -2452,7 +2465,7 @@ class TestMoveaxis(TestCase):
assert_(isinstance(result, np.ndarray))
-class TestCross(TestCase):
+class TestCross(object):
def test_2x2(self):
u = [1, 2]
v = [3, 4]
@@ -2615,7 +2628,7 @@ class TestRequire(object):
yield self.set_and_check_flag, flag, None, a
-class TestBroadcast(TestCase):
+class TestBroadcast(object):
def test_broadcast_in_args(self):
# gh-5881
arrs = [np.empty((6, 7)), np.empty((5, 6, 1)), np.empty((7,)),
@@ -2652,7 +2665,7 @@ class TestBroadcast(TestCase):
assert_equal(mit.numiter, j)
-class TestKeepdims(TestCase):
+class TestKeepdims(object):
class sub_array(np.ndarray):
def sum(self, axis=None, dtype=None, out=None):
@@ -2664,5 +2677,16 @@ class TestKeepdims(TestCase):
assert_raises(TypeError, np.sum, x, keepdims=True)
+class TestTensordot(object):
+
+ def test_zero_dimension(self):
+ # Test resolution to issue #5663
+ a = np.ndarray((3,0))
+ b = np.ndarray((0,4))
+ td = np.tensordot(a, b, (1, 0))
+ assert_array_equal(td, np.dot(a, b))
+ assert_array_equal(td, np.einsum('ij,jk', a, b))
+
+
if __name__ == "__main__":
run_module_suite()
diff --git a/numpy/core/tests/test_numerictypes.py b/numpy/core/tests/test_numerictypes.py
index 293031c03..8831cd1bb 100644
--- a/numpy/core/tests/test_numerictypes.py
+++ b/numpy/core/tests/test_numerictypes.py
@@ -1,10 +1,11 @@
from __future__ import division, absolute_import, print_function
import sys
+import itertools
import numpy as np
from numpy.testing import (
- TestCase, run_module_suite, assert_, assert_equal
+ run_module_suite, assert_, assert_equal, assert_raises
)
# This is the structure of the table used for plain objects:
@@ -102,99 +103,99 @@ def normalize_descr(descr):
# Creation tests
############################################################
-class create_zeros(object):
+class CreateZeros(object):
"""Check the creation of heterogeneous arrays zero-valued"""
def test_zeros0D(self):
"""Check creation of 0-dimensional objects"""
h = np.zeros((), dtype=self._descr)
- self.assertTrue(normalize_descr(self._descr) == h.dtype.descr)
- self.assertTrue(h.dtype.fields['x'][0].name[:4] == 'void')
- self.assertTrue(h.dtype.fields['x'][0].char == 'V')
- self.assertTrue(h.dtype.fields['x'][0].type == np.void)
+ assert_(normalize_descr(self._descr) == h.dtype.descr)
+ assert_(h.dtype.fields['x'][0].name[:4] == 'void')
+ assert_(h.dtype.fields['x'][0].char == 'V')
+ assert_(h.dtype.fields['x'][0].type == np.void)
# A small check that data is ok
assert_equal(h['z'], np.zeros((), dtype='u1'))
def test_zerosSD(self):
"""Check creation of single-dimensional objects"""
h = np.zeros((2,), dtype=self._descr)
- self.assertTrue(normalize_descr(self._descr) == h.dtype.descr)
- self.assertTrue(h.dtype['y'].name[:4] == 'void')
- self.assertTrue(h.dtype['y'].char == 'V')
- self.assertTrue(h.dtype['y'].type == np.void)
+ assert_(normalize_descr(self._descr) == h.dtype.descr)
+ assert_(h.dtype['y'].name[:4] == 'void')
+ assert_(h.dtype['y'].char == 'V')
+ assert_(h.dtype['y'].type == np.void)
# A small check that data is ok
assert_equal(h['z'], np.zeros((2,), dtype='u1'))
def test_zerosMD(self):
"""Check creation of multi-dimensional objects"""
h = np.zeros((2, 3), dtype=self._descr)
- self.assertTrue(normalize_descr(self._descr) == h.dtype.descr)
- self.assertTrue(h.dtype['z'].name == 'uint8')
- self.assertTrue(h.dtype['z'].char == 'B')
- self.assertTrue(h.dtype['z'].type == np.uint8)
+ assert_(normalize_descr(self._descr) == h.dtype.descr)
+ assert_(h.dtype['z'].name == 'uint8')
+ assert_(h.dtype['z'].char == 'B')
+ assert_(h.dtype['z'].type == np.uint8)
# A small check that data is ok
assert_equal(h['z'], np.zeros((2, 3), dtype='u1'))
-class test_create_zeros_plain(create_zeros, TestCase):
+class TestCreateZerosPlain(CreateZeros):
"""Check the creation of heterogeneous arrays zero-valued (plain)"""
_descr = Pdescr
-class test_create_zeros_nested(create_zeros, TestCase):
+class TestCreateZerosNested(CreateZeros):
"""Check the creation of heterogeneous arrays zero-valued (nested)"""
_descr = Ndescr
-class create_values(object):
+class CreateValues(object):
"""Check the creation of heterogeneous arrays with values"""
def test_tuple(self):
"""Check creation from tuples"""
h = np.array(self._buffer, dtype=self._descr)
- self.assertTrue(normalize_descr(self._descr) == h.dtype.descr)
+ assert_(normalize_descr(self._descr) == h.dtype.descr)
if self.multiple_rows:
- self.assertTrue(h.shape == (2,))
+ assert_(h.shape == (2,))
else:
- self.assertTrue(h.shape == ())
+ assert_(h.shape == ())
def test_list_of_tuple(self):
"""Check creation from list of tuples"""
h = np.array([self._buffer], dtype=self._descr)
- self.assertTrue(normalize_descr(self._descr) == h.dtype.descr)
+ assert_(normalize_descr(self._descr) == h.dtype.descr)
if self.multiple_rows:
- self.assertTrue(h.shape == (1, 2))
+ assert_(h.shape == (1, 2))
else:
- self.assertTrue(h.shape == (1,))
+ assert_(h.shape == (1,))
def test_list_of_list_of_tuple(self):
"""Check creation from list of list of tuples"""
h = np.array([[self._buffer]], dtype=self._descr)
- self.assertTrue(normalize_descr(self._descr) == h.dtype.descr)
+ assert_(normalize_descr(self._descr) == h.dtype.descr)
if self.multiple_rows:
- self.assertTrue(h.shape == (1, 1, 2))
+ assert_(h.shape == (1, 1, 2))
else:
- self.assertTrue(h.shape == (1, 1))
+ assert_(h.shape == (1, 1))
-class test_create_values_plain_single(create_values, TestCase):
+class TestCreateValuesPlainSingle(CreateValues):
"""Check the creation of heterogeneous arrays (plain, single row)"""
_descr = Pdescr
multiple_rows = 0
_buffer = PbufferT[0]
-class test_create_values_plain_multiple(create_values, TestCase):
+class TestCreateValuesPlainMultiple(CreateValues):
"""Check the creation of heterogeneous arrays (plain, multiple rows)"""
_descr = Pdescr
multiple_rows = 1
_buffer = PbufferT
-class test_create_values_nested_single(create_values, TestCase):
+class TestCreateValuesNestedSingle(CreateValues):
"""Check the creation of heterogeneous arrays (nested, single row)"""
_descr = Ndescr
multiple_rows = 0
_buffer = NbufferT[0]
-class test_create_values_nested_multiple(create_values, TestCase):
+class TestCreateValuesNestedMultiple(CreateValues):
"""Check the creation of heterogeneous arrays (nested, multiple rows)"""
_descr = Ndescr
multiple_rows = 1
@@ -205,18 +206,18 @@ class test_create_values_nested_multiple(create_values, TestCase):
# Reading tests
############################################################
-class read_values_plain(object):
+class ReadValuesPlain(object):
"""Check the reading of values in heterogeneous arrays (plain)"""
def test_access_fields(self):
h = np.array(self._buffer, dtype=self._descr)
if not self.multiple_rows:
- self.assertTrue(h.shape == ())
+ assert_(h.shape == ())
assert_equal(h['x'], np.array(self._buffer[0], dtype='i4'))
assert_equal(h['y'], np.array(self._buffer[1], dtype='f8'))
assert_equal(h['z'], np.array(self._buffer[2], dtype='u1'))
else:
- self.assertTrue(len(h) == 2)
+ assert_(len(h) == 2)
assert_equal(h['x'], np.array([self._buffer[0][0],
self._buffer[1][0]], dtype='i4'))
assert_equal(h['y'], np.array([self._buffer[0][1],
@@ -225,31 +226,31 @@ class read_values_plain(object):
self._buffer[1][2]], dtype='u1'))
-class test_read_values_plain_single(read_values_plain, TestCase):
+class TestReadValuesPlainSingle(ReadValuesPlain):
"""Check the creation of heterogeneous arrays (plain, single row)"""
_descr = Pdescr
multiple_rows = 0
_buffer = PbufferT[0]
-class test_read_values_plain_multiple(read_values_plain, TestCase):
+class TestReadValuesPlainMultiple(ReadValuesPlain):
"""Check the values of heterogeneous arrays (plain, multiple rows)"""
_descr = Pdescr
multiple_rows = 1
_buffer = PbufferT
-class read_values_nested(object):
+class ReadValuesNested(object):
"""Check the reading of values in heterogeneous arrays (nested)"""
def test_access_top_fields(self):
"""Check reading the top fields of a nested array"""
h = np.array(self._buffer, dtype=self._descr)
if not self.multiple_rows:
- self.assertTrue(h.shape == ())
+ assert_(h.shape == ())
assert_equal(h['x'], np.array(self._buffer[0], dtype='i4'))
assert_equal(h['y'], np.array(self._buffer[4], dtype='f8'))
assert_equal(h['z'], np.array(self._buffer[5], dtype='u1'))
else:
- self.assertTrue(len(h) == 2)
+ assert_(len(h) == 2)
assert_equal(h['x'], np.array([self._buffer[0][0],
self._buffer[1][0]], dtype='i4'))
assert_equal(h['y'], np.array([self._buffer[0][4],
@@ -308,41 +309,41 @@ class read_values_nested(object):
def test_nested1_descriptor(self):
"""Check access nested descriptors of a nested array (1st level)"""
h = np.array(self._buffer, dtype=self._descr)
- self.assertTrue(h.dtype['Info']['value'].name == 'complex128')
- self.assertTrue(h.dtype['Info']['y2'].name == 'float64')
+ assert_(h.dtype['Info']['value'].name == 'complex128')
+ assert_(h.dtype['Info']['y2'].name == 'float64')
if sys.version_info[0] >= 3:
- self.assertTrue(h.dtype['info']['Name'].name == 'str256')
+ assert_(h.dtype['info']['Name'].name == 'str256')
else:
- self.assertTrue(h.dtype['info']['Name'].name == 'unicode256')
- self.assertTrue(h.dtype['info']['Value'].name == 'complex128')
+ assert_(h.dtype['info']['Name'].name == 'unicode256')
+ assert_(h.dtype['info']['Value'].name == 'complex128')
def test_nested2_descriptor(self):
"""Check access nested descriptors of a nested array (2nd level)"""
h = np.array(self._buffer, dtype=self._descr)
- self.assertTrue(h.dtype['Info']['Info2']['value'].name == 'void256')
- self.assertTrue(h.dtype['Info']['Info2']['z3'].name == 'void64')
+ assert_(h.dtype['Info']['Info2']['value'].name == 'void256')
+ assert_(h.dtype['Info']['Info2']['z3'].name == 'void64')
-class test_read_values_nested_single(read_values_nested, TestCase):
+class TestReadValuesNestedSingle(ReadValuesNested):
"""Check the values of heterogeneous arrays (nested, single row)"""
_descr = Ndescr
multiple_rows = False
_buffer = NbufferT[0]
-class test_read_values_nested_multiple(read_values_nested, TestCase):
+class TestReadValuesNestedMultiple(ReadValuesNested):
"""Check the values of heterogeneous arrays (nested, multiple rows)"""
_descr = Ndescr
multiple_rows = True
_buffer = NbufferT
-class TestEmptyField(TestCase):
+class TestEmptyField(object):
def test_assign(self):
a = np.arange(10, dtype=np.float32)
a.dtype = [("int", "<0i4"), ("float", "<2f4")]
assert_(a['int'].shape == (5, 0))
assert_(a['float'].shape == (5, 2))
-class TestCommonType(TestCase):
+class TestCommonType(object):
def test_scalar_loses1(self):
res = np.find_common_type(['f4', 'f4', 'i2'], ['f8'])
assert_(res == 'f4')
@@ -363,19 +364,50 @@ class TestCommonType(TestCase):
res = np.find_common_type(['u8', 'i8', 'i8'], ['f8'])
assert_(res == 'f8')
-class TestMultipleFields(TestCase):
- def setUp(self):
+class TestMultipleFields(object):
+ def setup(self):
self.ary = np.array([(1, 2, 3, 4), (5, 6, 7, 8)], dtype='i4,f4,i2,c8')
def _bad_call(self):
return self.ary['f0', 'f1']
def test_no_tuple(self):
- self.assertRaises(IndexError, self._bad_call)
+ assert_raises(IndexError, self._bad_call)
def test_return(self):
res = self.ary[['f0', 'f2']].tolist()
assert_(res == [(1, 3), (5, 7)])
+
+class TestIsSubDType(object):
+ # scalar types can be promoted into dtypes
+ wrappers = [np.dtype, lambda x: x]
+
+ def test_both_abstract(self):
+ assert_(np.issubdtype(np.floating, np.inexact))
+ assert_(not np.issubdtype(np.inexact, np.floating))
+
+ def test_same(self):
+ for cls in (np.float32, np.int32):
+ for w1, w2 in itertools.product(self.wrappers, repeat=2):
+ assert_(np.issubdtype(w1(cls), w2(cls)))
+
+ def test_subclass(self):
+ # note we cannot promote floating to a dtype, as it would turn into a
+ # concrete type
+ for w in self.wrappers:
+ assert_(np.issubdtype(w(np.float32), np.floating))
+ assert_(np.issubdtype(w(np.float64), np.floating))
+
+ def test_subclass_backwards(self):
+ for w in self.wrappers:
+ assert_(not np.issubdtype(np.floating, w(np.float32)))
+ assert_(not np.issubdtype(np.floating, w(np.float64)))
+
+ def test_sibling_class(self):
+ for w1, w2 in itertools.product(self.wrappers, repeat=2):
+ assert_(not np.issubdtype(w1(np.float32), w2(np.float64)))
+ assert_(not np.issubdtype(w1(np.float64), w2(np.float32)))
+
if __name__ == "__main__":
run_module_suite()
diff --git a/numpy/core/tests/test_print.py b/numpy/core/tests/test_print.py
index b1ce12f56..305258d6f 100644
--- a/numpy/core/tests/test_print.py
+++ b/numpy/core/tests/test_print.py
@@ -35,7 +35,7 @@ def test_float_types():
""" Check formatting.
This is only for the str function, and only for simple types.
- The precision of np.float and np.longdouble aren't the same as the
+ The precision of np.float32 and np.longdouble aren't the same as the
python float precision.
"""
@@ -51,7 +51,7 @@ def test_nan_inf_float():
""" Check formatting of nan & inf.
This is only for the str function, and only for simple types.
- The precision of np.float and np.longdouble aren't the same as the
+ The precision of np.float32 and np.longdouble aren't the same as the
python float precision.
"""
@@ -79,7 +79,7 @@ def test_complex_types():
"""Check formatting of complex types.
This is only for the str function, and only for simple types.
- The precision of np.float and np.longdouble aren't the same as the
+ The precision of np.float32 and np.longdouble aren't the same as the
python float precision.
"""
diff --git a/numpy/core/tests/test_records.py b/numpy/core/tests/test_records.py
index 6f1ed37d1..27d35fa65 100644
--- a/numpy/core/tests/test_records.py
+++ b/numpy/core/tests/test_records.py
@@ -8,12 +8,12 @@ from os import path
import numpy as np
from numpy.testing import (
- TestCase, run_module_suite, assert_, assert_equal, assert_array_equal,
+ run_module_suite, assert_, assert_equal, assert_array_equal,
assert_array_almost_equal, assert_raises, assert_warns
)
-class TestFromrecords(TestCase):
+class TestFromrecords(object):
def test_fromrecords(self):
r = np.rec.fromrecords([[456, 'dbe', 1.2], [2, 'de', 1.3]],
names='col1,col2,col3')
@@ -29,7 +29,7 @@ class TestFromrecords(TestCase):
def test_fromrecords_0len(self):
""" Verify fromrecords works with a 0-length input """
- dtype = [('a', np.float), ('b', np.float)]
+ dtype = [('a', float), ('b', float)]
r = np.rec.fromrecords([], dtype=dtype)
assert_equal(r.shape, (0,))
@@ -153,11 +153,6 @@ class TestFromrecords(TestCase):
assert_equal(r['c'].dtype.type, np.record)
assert_equal(type(r['c']), np.recarray)
- # suppress deprecation warning in 1.12 (remove in 1.13)
- with assert_warns(FutureWarning):
- assert_equal(r[['a', 'b']].dtype.type, np.record)
- assert_equal(type(r[['a', 'b']]), np.recarray)
-
#and that it preserves subclasses (gh-6949)
class C(np.recarray):
pass
@@ -235,13 +230,13 @@ class TestFromrecords(TestCase):
def test_fromrecords_with_explicit_dtype(self):
a = np.rec.fromrecords([(1, 'a'), (2, 'bbb')],
- dtype=[('a', int), ('b', np.object)])
+ dtype=[('a', int), ('b', object)])
assert_equal(a.a, [1, 2])
assert_equal(a[0].a, 1)
assert_equal(a.b, ['a', 'bbb'])
assert_equal(a[-1].b, 'bbb')
#
- ndtype = np.dtype([('a', int), ('b', np.object)])
+ ndtype = np.dtype([('a', int), ('b', object)])
a = np.rec.fromrecords([(1, 'a'), (2, 'bbb')], dtype=ndtype)
assert_equal(a.a, [1, 2])
assert_equal(a[0].a, 1)
@@ -298,8 +293,8 @@ class TestFromrecords(TestCase):
assert_equal(rec['f1'], [b'', b'', b''])
-class TestRecord(TestCase):
- def setUp(self):
+class TestRecord(object):
+ def setup(self):
self.data = np.rec.fromrecords([(1, 2, 3), (4, 5, 6)],
dtype=[("col1", "<i4"),
("col2", "<i4"),
@@ -323,7 +318,7 @@ class TestRecord(TestCase):
def assign_invalid_column(x):
x[0].col5 = 1
- self.assertRaises(AttributeError, assign_invalid_column, a)
+ assert_raises(AttributeError, assign_invalid_column, a)
def test_nonwriteable_setfield(self):
# gh-8171
@@ -334,15 +329,6 @@ class TestRecord(TestCase):
with assert_raises(ValueError):
r.setfield([2,3], *r.dtype.fields['f'])
- def test_out_of_order_fields(self):
- """Ticket #1431."""
- # this test will be invalid in 1.13
- # suppress deprecation warning in 1.12 (remove in 1.13)
- with assert_warns(FutureWarning):
- x = self.data[['col1', 'col2']]
- y = self.data[['col2', 'col1']]
- assert_equal(x[0][0], y[0][1])
-
def test_pickle_1(self):
# Issue #1529
a = np.array([(1, [])], dtype=[('a', np.int32), ('b', np.int32, 0)])
@@ -371,8 +357,7 @@ class TestRecord(TestCase):
# https://github.com/numpy/numpy/issues/3256
ra = np.recarray((2,), dtype=[('x', object), ('y', float), ('z', int)])
- with assert_warns(FutureWarning):
- ra[['x','y']] # TypeError?
+ ra[['x','y']] # TypeError?
def test_record_scalar_setitem(self):
# https://github.com/numpy/numpy/issues/3561
diff --git a/numpy/core/tests/test_regression.py b/numpy/core/tests/test_regression.py
index fb9ea5252..84469d03b 100644
--- a/numpy/core/tests/test_regression.py
+++ b/numpy/core/tests/test_regression.py
@@ -13,26 +13,25 @@ from itertools import chain
import numpy as np
from numpy.testing import (
- run_module_suite, TestCase, assert_, assert_equal, IS_PYPY,
+ run_module_suite, assert_, assert_equal, IS_PYPY,
assert_almost_equal, assert_array_equal, assert_array_almost_equal,
- assert_raises, assert_warns, dec, suppress_warnings
+ assert_raises, assert_warns, dec, suppress_warnings,
+ _assert_valid_refcount, HAS_REFCOUNT,
)
-from numpy.testing.utils import _assert_valid_refcount, HAS_REFCOUNT
from numpy.compat import asbytes, asunicode, long
-rlevel = 1
-class TestRegression(TestCase):
- def test_invalid_round(self, level=rlevel):
+class TestRegression(object):
+ def test_invalid_round(self):
# Ticket #3
v = 4.7599999999999998
assert_array_equal(np.array([v]), np.array(v))
- def test_mem_empty(self, level=rlevel):
+ def test_mem_empty(self):
# Ticket #7
np.empty((1,), dtype=[('x', np.int64)])
- def test_pickle_transposed(self, level=rlevel):
+ def test_pickle_transposed(self):
# Ticket #16
a = np.transpose(np.array([[2, 9], [7, 0], [3, 8]]))
f = BytesIO()
@@ -42,44 +41,44 @@ class TestRegression(TestCase):
f.close()
assert_array_equal(a, b)
- def test_typeNA(self, level=rlevel):
+ def test_typeNA(self):
# Ticket #31
assert_equal(np.typeNA[np.int64], 'Int64')
assert_equal(np.typeNA[np.uint64], 'UInt64')
- def test_dtype_names(self, level=rlevel):
+ def test_dtype_names(self):
# Ticket #35
# Should succeed
np.dtype([(('name', 'label'), np.int32, 3)])
- def test_reduce(self, level=rlevel):
+ def test_reduce(self):
# Ticket #40
assert_almost_equal(np.add.reduce([1., .5], dtype=None), 1.5)
- def test_zeros_order(self, level=rlevel):
+ def test_zeros_order(self):
# Ticket #43
np.zeros([3], int, 'C')
np.zeros([3], order='C')
np.zeros([3], int, order='C')
- def test_asarray_with_order(self, level=rlevel):
+ def test_asarray_with_order(self):
# Check that nothing is done when order='F' and array C/F-contiguous
a = np.ones(2)
assert_(a is np.asarray(a, order='F'))
- def test_ravel_with_order(self, level=rlevel):
+ def test_ravel_with_order(self):
# Check that ravel works when order='F' and array C/F-contiguous
a = np.ones(2)
assert_(not a.ravel('F').flags.owndata)
- def test_sort_bigendian(self, level=rlevel):
+ def test_sort_bigendian(self):
# Ticket #47
a = np.linspace(0, 10, 11)
c = a.astype(np.dtype('<f8'))
c.sort()
assert_array_almost_equal(c, a)
- def test_negative_nd_indexing(self, level=rlevel):
+ def test_negative_nd_indexing(self):
# Ticket #49
c = np.arange(125).reshape((5, 5, 5))
origidx = np.array([-1, 0, 1])
@@ -87,7 +86,7 @@ class TestRegression(TestCase):
c[idx]
assert_array_equal(idx, origidx)
- def test_char_dump(self, level=rlevel):
+ def test_char_dump(self):
# Ticket #50
f = BytesIO()
ca = np.char.array(np.arange(1000, 1010), itemsize=4)
@@ -96,7 +95,7 @@ class TestRegression(TestCase):
ca = np.load(f)
f.close()
- def test_noncontiguous_fill(self, level=rlevel):
+ def test_noncontiguous_fill(self):
# Ticket #58.
a = np.zeros((5, 3))
b = a[:, :2,]
@@ -104,60 +103,60 @@ class TestRegression(TestCase):
def rs():
b.shape = (10,)
- self.assertRaises(AttributeError, rs)
+ assert_raises(AttributeError, rs)
- def test_bool(self, level=rlevel):
+ def test_bool(self):
# Ticket #60
np.bool_(1) # Should succeed
- def test_indexing1(self, level=rlevel):
+ def test_indexing1(self):
# Ticket #64
descr = [('x', [('y', [('z', 'c16', (2,)),]),]),]
buffer = ((([6j, 4j],),),)
h = np.array(buffer, dtype=descr)
h['x']['y']['z']
- def test_indexing2(self, level=rlevel):
+ def test_indexing2(self):
# Ticket #65
descr = [('x', 'i4', (2,))]
buffer = ([3, 2],)
h = np.array(buffer, dtype=descr)
h['x']
- def test_round(self, level=rlevel):
+ def test_round(self):
# Ticket #67
x = np.array([1+2j])
assert_almost_equal(x**(-1), [1/(1+2j)])
- def test_scalar_compare(self, level=rlevel):
+ def test_scalar_compare(self):
# Trac Ticket #72
# https://github.com/numpy/numpy/issues/565
a = np.array(['test', 'auto'])
assert_array_equal(a == 'auto', np.array([False, True]))
- self.assertTrue(a[1] == 'auto')
- self.assertTrue(a[0] != 'auto')
+ assert_(a[1] == 'auto')
+ assert_(a[0] != 'auto')
b = np.linspace(0, 10, 11)
# This should return true for now, but will eventually raise an error:
with suppress_warnings() as sup:
sup.filter(FutureWarning)
- self.assertTrue(b != 'auto')
- self.assertTrue(b[0] != 'auto')
+ assert_(b != 'auto')
+ assert_(b[0] != 'auto')
- def test_unicode_swapping(self, level=rlevel):
+ def test_unicode_swapping(self):
# Ticket #79
ulen = 1
ucs_value = u'\U0010FFFF'
ua = np.array([[[ucs_value*ulen]*2]*3]*4, dtype='U%s' % ulen)
ua.newbyteorder() # Should succeed.
- def test_object_array_fill(self, level=rlevel):
+ def test_object_array_fill(self):
# Ticket #86
x = np.zeros(1, 'O')
x.fill([])
- def test_mem_dtype_align(self, level=rlevel):
+ def test_mem_dtype_align(self):
# Ticket #93
- self.assertRaises(TypeError, np.dtype,
+ assert_raises(TypeError, np.dtype,
{'names':['a'], 'formats':['foo']}, align=1)
@dec.knownfailureif((sys.version_info[0] >= 3) or
@@ -165,16 +164,16 @@ class TestRegression(TestCase):
platform.architecture()[0] == "64bit"),
"numpy.intp('0xff', 16) not supported on Py3, "
"as it does not inherit from Python int")
- def test_intp(self, level=rlevel):
+ def test_intp(self):
# Ticket #99
i_width = np.int_(0).nbytes*2 - 1
np.intp('0x' + 'f'*i_width, 16)
- self.assertRaises(OverflowError, np.intp, '0x' + 'f'*(i_width+1), 16)
- self.assertRaises(ValueError, np.intp, '0x1', 32)
+ assert_raises(OverflowError, np.intp, '0x' + 'f'*(i_width+1), 16)
+ assert_raises(ValueError, np.intp, '0x1', 32)
assert_equal(255, np.intp('0xFF', 16))
assert_equal(1024, np.intp(1024))
- def test_endian_bool_indexing(self, level=rlevel):
+ def test_endian_bool_indexing(self):
# Ticket #105
a = np.arange(10., dtype='>f8')
b = np.arange(10., dtype='<f8')
@@ -187,7 +186,7 @@ class TestRegression(TestCase):
assert_(np.all(a[ya] > 0.5))
assert_(np.all(b[yb] > 0.5))
- def test_endian_where(self, level=rlevel):
+ def test_endian_where(self):
# GitHub issue #369
net = np.zeros(3, dtype='>f4')
net[1] = 0.00458849
@@ -197,7 +196,7 @@ class TestRegression(TestCase):
correct = np.array([ 0.60520202, 0.00458849, 0.60520202])
assert_array_almost_equal(test, correct)
- def test_endian_recarray(self, level=rlevel):
+ def test_endian_recarray(self):
# Ticket #2185
dt = np.dtype([
('head', '>u4'),
@@ -213,7 +212,7 @@ class TestRegression(TestCase):
buf[0]['data'][0] = d
assert_(buf[0]['head'] == 1)
- def test_mem_dot(self, level=rlevel):
+ def test_mem_dot(self):
# Ticket #106
x = np.random.randn(0, 1)
y = np.random.randn(10, 1)
@@ -227,7 +226,7 @@ class TestRegression(TestCase):
np.core.multiarray.dot(x, np.transpose(y), out=z)
assert_equal(_z, np.ones(10))
- def test_arange_endian(self, level=rlevel):
+ def test_arange_endian(self):
# Ticket #111
ref = np.arange(10)
x = np.arange(10, dtype='<f8')
@@ -235,31 +234,31 @@ class TestRegression(TestCase):
x = np.arange(10, dtype='>f8')
assert_array_equal(ref, x)
- def test_argmax(self, level=rlevel):
+ def test_argmax(self):
# Ticket #119
a = np.random.normal(0, 1, (4, 5, 6, 7, 8))
for i in range(a.ndim):
a.argmax(i) # Should succeed
- def test_mem_divmod(self, level=rlevel):
+ def test_mem_divmod(self):
# Ticket #126
for i in range(10):
divmod(np.array([i])[0], 10)
- def test_hstack_invalid_dims(self, level=rlevel):
+ def test_hstack_invalid_dims(self):
# Ticket #128
x = np.arange(9).reshape((3, 3))
y = np.array([0, 0, 0])
- self.assertRaises(ValueError, np.hstack, (x, y))
+ assert_raises(ValueError, np.hstack, (x, y))
- def test_squeeze_type(self, level=rlevel):
+ def test_squeeze_type(self):
# Ticket #133
a = np.array([3])
b = np.array(3)
assert_(type(a.squeeze()) is np.ndarray)
assert_(type(b.squeeze()) is np.ndarray)
- def test_add_identity(self, level=rlevel):
+ def test_add_identity(self):
# Ticket #143
assert_equal(0, np.add.identity)
@@ -268,11 +267,11 @@ class TestRegression(TestCase):
a = np.float_(23.) + 2**135
assert_equal(a, 23. + 2**135)
- def test_binary_repr_0(self, level=rlevel):
+ def test_binary_repr_0(self):
# Ticket #151
assert_equal('0', np.binary_repr(0))
- def test_rec_iterate(self, level=rlevel):
+ def test_rec_iterate(self):
# Ticket #160
descr = np.dtype([('i', int), ('f', float), ('s', '|S3')])
x = np.rec.array([(1, 1.1, '1.0'),
@@ -280,19 +279,19 @@ class TestRegression(TestCase):
x[0].tolist()
[i for i in x[0]]
- def test_unicode_string_comparison(self, level=rlevel):
+ def test_unicode_string_comparison(self):
# Ticket #190
a = np.array('hello', np.unicode_)
b = np.array('world')
a == b
- def test_tobytes_FORTRANORDER_discontiguous(self, level=rlevel):
+ def test_tobytes_FORTRANORDER_discontiguous(self):
# Fix in r2836
# Create non-contiguous Fortran ordered array
x = np.array(np.random.rand(3, 3), order='F')[:, :2]
assert_array_almost_equal(x.ravel(), np.fromstring(x.tobytes()))
- def test_flat_assignment(self, level=rlevel):
+ def test_flat_assignment(self):
# Correct behaviour of ticket #194
x = np.empty((3, 1))
x.flat = np.arange(3)
@@ -300,7 +299,7 @@ class TestRegression(TestCase):
x.flat = np.arange(3, dtype=float)
assert_array_almost_equal(x, [[0], [1], [2]])
- def test_broadcast_flat_assignment(self, level=rlevel):
+ def test_broadcast_flat_assignment(self):
# Ticket #194
x = np.empty((3, 1))
@@ -310,8 +309,8 @@ class TestRegression(TestCase):
def bfb():
x[:] = np.arange(3, dtype=float)
- self.assertRaises(ValueError, bfa)
- self.assertRaises(ValueError, bfb)
+ assert_raises(ValueError, bfa)
+ assert_raises(ValueError, bfb)
def test_nonarray_assignment(self):
# See also Issue gh-2870, test for non-array assignment
@@ -328,7 +327,7 @@ class TestRegression(TestCase):
assert_raises(ValueError, assign, a, r, np.nan)
a[r] = np.array(np.nan)
- def test_unpickle_dtype_with_object(self, level=rlevel):
+ def test_unpickle_dtype_with_object(self):
# Implemented in r2840
dt = np.dtype([('x', int), ('y', np.object_), ('z', 'O')])
f = BytesIO()
@@ -338,15 +337,15 @@ class TestRegression(TestCase):
f.close()
assert_equal(dt, dt_)
- def test_mem_array_creation_invalid_specification(self, level=rlevel):
+ def test_mem_array_creation_invalid_specification(self):
# Ticket #196
dt = np.dtype([('x', int), ('y', np.object_)])
# Wrong way
- self.assertRaises(ValueError, np.array, [1, 'object'], dt)
+ assert_raises(ValueError, np.array, [1, 'object'], dt)
# Correct way
np.array([(1, 'object')], dt)
- def test_recarray_single_element(self, level=rlevel):
+ def test_recarray_single_element(self):
# Ticket #202
a = np.array([1, 2, 3], dtype=np.int32)
b = a.copy()
@@ -354,23 +353,23 @@ class TestRegression(TestCase):
assert_array_equal(a, b)
assert_equal(a, r[0][0])
- def test_zero_sized_array_indexing(self, level=rlevel):
+ def test_zero_sized_array_indexing(self):
# Ticket #205
tmp = np.array([])
def index_tmp():
tmp[np.array(10)]
- self.assertRaises(IndexError, index_tmp)
+ assert_raises(IndexError, index_tmp)
- def test_chararray_rstrip(self, level=rlevel):
+ def test_chararray_rstrip(self):
# Ticket #222
x = np.chararray((1,), 5)
x[0] = b'a '
x = x.rstrip()
assert_equal(x[0], b'a')
- def test_object_array_shape(self, level=rlevel):
+ def test_object_array_shape(self):
# Ticket #239
assert_equal(np.array([[1, 2], 3, 4], dtype=object).shape, (3,))
assert_equal(np.array([[1, 2], [3, 4]], dtype=object).shape, (2, 2))
@@ -379,20 +378,20 @@ class TestRegression(TestCase):
assert_equal(np.array([[], [], []], dtype=object).shape, (3, 0))
assert_equal(np.array([[3, 4], [5, 6], None], dtype=object).shape, (3,))
- def test_mem_around(self, level=rlevel):
+ def test_mem_around(self):
# Ticket #243
x = np.zeros((1,))
y = [0]
decimal = 6
np.around(abs(x-y), decimal) <= 10.0**(-decimal)
- def test_character_array_strip(self, level=rlevel):
+ def test_character_array_strip(self):
# Ticket #246
x = np.char.array(("x", "x ", "x "))
for c in x:
assert_equal(c, "x")
- def test_lexsort(self, level=rlevel):
+ def test_lexsort(self):
# Lexsort memory error
v = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
assert_equal(np.lexsort(v), 0)
@@ -444,36 +443,36 @@ class TestRegression(TestCase):
for name in result.dtype.names:
assert_(isinstance(name, str))
- def test_pickle_dtype(self, level=rlevel):
+ def test_pickle_dtype(self):
# Ticket #251
- pickle.dumps(np.float)
+ pickle.dumps(float)
- def test_swap_real(self, level=rlevel):
+ def test_swap_real(self):
# Ticket #265
assert_equal(np.arange(4, dtype='>c8').imag.max(), 0.0)
assert_equal(np.arange(4, dtype='<c8').imag.max(), 0.0)
assert_equal(np.arange(4, dtype='>c8').real.max(), 3.0)
assert_equal(np.arange(4, dtype='<c8').real.max(), 3.0)
- def test_object_array_from_list(self, level=rlevel):
+ def test_object_array_from_list(self):
# Ticket #270
- self.assertEqual(np.array([1, 'A', None]).shape, (3,))
+ assert_(np.array([1, 'A', None]).shape == (3,))
- def test_multiple_assign(self, level=rlevel):
+ def test_multiple_assign(self):
# Ticket #273
a = np.zeros((3, 1), int)
a[[1, 2]] = 1
- def test_empty_array_type(self, level=rlevel):
+ def test_empty_array_type(self):
assert_equal(np.array([]).dtype, np.zeros(0).dtype)
- def test_void_copyswap(self, level=rlevel):
+ def test_void_copyswap(self):
dt = np.dtype([('one', '<i4'), ('two', '<i4')])
x = np.array((1, 2), dtype=dt)
x = x.byteswap()
assert_(x['one'] > 1 and x['two'] > 2)
- def test_method_args(self, level=rlevel):
+ def test_method_args(self):
# Make sure methods and functions have same default axis
# keyword and arguments
funcs1 = ['argmax', 'argmin', 'sum', ('product', 'prod'),
@@ -515,17 +514,17 @@ class TestRegression(TestCase):
res2 = getattr(np, func)(arr1, arr2)
assert_(abs(res1-res2).max() < 1e-8, func)
- def test_mem_lexsort_strings(self, level=rlevel):
+ def test_mem_lexsort_strings(self):
# Ticket #298
lst = ['abc', 'cde', 'fgh']
np.lexsort((lst,))
- def test_fancy_index(self, level=rlevel):
+ def test_fancy_index(self):
# Ticket #302
x = np.array([1, 2])[np.array([0])]
assert_equal(x.shape, (1,))
- def test_recarray_copy(self, level=rlevel):
+ def test_recarray_copy(self):
# Ticket #312
dt = [('x', np.int16), ('y', np.float64)]
ra = np.array([(1, 2.3)], dtype=dt)
@@ -533,64 +532,64 @@ class TestRegression(TestCase):
rb['x'] = 2.
assert_(ra['x'] != rb['x'])
- def test_rec_fromarray(self, level=rlevel):
+ def test_rec_fromarray(self):
# Ticket #322
x1 = np.array([[1, 2], [3, 4], [5, 6]])
x2 = np.array(['a', 'dd', 'xyz'])
x3 = np.array([1.1, 2, 3])
np.rec.fromarrays([x1, x2, x3], formats="(2,)i4,a3,f8")
- def test_object_array_assign(self, level=rlevel):
+ def test_object_array_assign(self):
x = np.empty((2, 2), object)
x.flat[2] = (1, 2, 3)
assert_equal(x.flat[2], (1, 2, 3))
- def test_ndmin_float64(self, level=rlevel):
+ def test_ndmin_float64(self):
# Ticket #324
x = np.array([1, 2, 3], dtype=np.float64)
assert_equal(np.array(x, dtype=np.float32, ndmin=2).ndim, 2)
assert_equal(np.array(x, dtype=np.float64, ndmin=2).ndim, 2)
- def test_ndmin_order(self, level=rlevel):
+ def test_ndmin_order(self):
# Issue #465 and related checks
assert_(np.array([1, 2], order='C', ndmin=3).flags.c_contiguous)
assert_(np.array([1, 2], order='F', ndmin=3).flags.f_contiguous)
assert_(np.array(np.ones((2, 2), order='F'), ndmin=3).flags.f_contiguous)
assert_(np.array(np.ones((2, 2), order='C'), ndmin=3).flags.c_contiguous)
- def test_mem_axis_minimization(self, level=rlevel):
+ def test_mem_axis_minimization(self):
# Ticket #327
data = np.arange(5)
data = np.add.outer(data, data)
- def test_mem_float_imag(self, level=rlevel):
+ def test_mem_float_imag(self):
# Ticket #330
np.float64(1.0).imag
- def test_dtype_tuple(self, level=rlevel):
+ def test_dtype_tuple(self):
# Ticket #334
assert_(np.dtype('i4') == np.dtype(('i4', ())))
- def test_dtype_posttuple(self, level=rlevel):
+ def test_dtype_posttuple(self):
# Ticket #335
np.dtype([('col1', '()i4')])
- def test_numeric_carray_compare(self, level=rlevel):
+ def test_numeric_carray_compare(self):
# Ticket #341
assert_equal(np.array(['X'], 'c'), b'X')
- def test_string_array_size(self, level=rlevel):
+ def test_string_array_size(self):
# Ticket #342
- self.assertRaises(ValueError,
+ assert_raises(ValueError,
np.array, [['X'], ['X', 'X', 'X']], '|S1')
- def test_dtype_repr(self, level=rlevel):
+ def test_dtype_repr(self):
# Ticket #344
dt1 = np.dtype(('uint32', 2))
dt2 = np.dtype(('uint32', (2,)))
assert_equal(dt1.__repr__(), dt2.__repr__())
- def test_reshape_order(self, level=rlevel):
+ def test_reshape_order(self):
# Make sure reshape order works.
a = np.arange(6).reshape(2, 3, order='F')
assert_equal(a, [[0, 2, 4], [1, 3, 5]])
@@ -598,13 +597,13 @@ class TestRegression(TestCase):
b = a[:, 1]
assert_equal(b.reshape(2, 2, order='F'), [[2, 6], [4, 8]])
- def test_reshape_zero_strides(self, level=rlevel):
+ def test_reshape_zero_strides(self):
# Issue #380, test reshaping of zero strided arrays
a = np.ones(1)
a = np.lib.stride_tricks.as_strided(a, shape=(5,), strides=(0,))
assert_(a.reshape(5, 1).strides[0] == 0)
- def test_reshape_zero_size(self, level=rlevel):
+ def test_reshape_zero_size(self):
# GitHub Issue #2700, setting shape failed for 0-sized arrays
a = np.ones((0, 2))
a.shape = (-1, 2)
@@ -621,22 +620,22 @@ class TestRegression(TestCase):
assert_equal(a.reshape(3, 2, 1, 1, order='F').strides, strides_f)
assert_equal(np.array(0, dtype=np.int32).reshape(1, 1).strides, (4, 4))
- def test_repeat_discont(self, level=rlevel):
+ def test_repeat_discont(self):
# Ticket #352
a = np.arange(12).reshape(4, 3)[:, 2]
assert_equal(a.repeat(3), [2, 2, 2, 5, 5, 5, 8, 8, 8, 11, 11, 11])
- def test_array_index(self, level=rlevel):
+ def test_array_index(self):
# Make sure optimization is not called in this case.
a = np.array([1, 2, 3])
a2 = np.array([[1, 2, 3]])
assert_equal(a[np.where(a == 3)], a2[np.where(a2 == 3)])
- def test_object_argmax(self, level=rlevel):
+ def test_object_argmax(self):
a = np.array([1, 2, 3], dtype=object)
assert_(a.argmax() == 2)
- def test_recarray_fields(self, level=rlevel):
+ def test_recarray_fields(self):
# Ticket #372
dt0 = np.dtype([('f0', 'i4'), ('f1', 'i4')])
dt1 = np.dtype([('f0', 'i8'), ('f1', 'i8')])
@@ -647,22 +646,22 @@ class TestRegression(TestCase):
np.rec.fromarrays([(1, 2), (3, 4)])]:
assert_(a.dtype in [dt0, dt1])
- def test_random_shuffle(self, level=rlevel):
+ def test_random_shuffle(self):
# Ticket #374
a = np.arange(5).reshape((5, 1))
b = a.copy()
np.random.shuffle(b)
assert_equal(np.sort(b, axis=0), a)
- def test_refcount_vdot(self, level=rlevel):
+ def test_refcount_vdot(self):
# Changeset #3443
_assert_valid_refcount(np.vdot)
- def test_startswith(self, level=rlevel):
+ def test_startswith(self):
ca = np.char.array(['Hi', 'There'])
assert_equal(ca.startswith('H'), [True, False])
- def test_noncommutative_reduce_accumulate(self, level=rlevel):
+ def test_noncommutative_reduce_accumulate(self):
# Ticket #413
tosubtract = np.arange(5)
todivide = np.array([2.0, 0.5, 0.25])
@@ -673,28 +672,28 @@ class TestRegression(TestCase):
assert_array_equal(np.divide.accumulate(todivide),
np.array([2., 4., 16.]))
- def test_convolve_empty(self, level=rlevel):
+ def test_convolve_empty(self):
# Convolve should raise an error for empty input array.
- self.assertRaises(ValueError, np.convolve, [], [1])
- self.assertRaises(ValueError, np.convolve, [1], [])
+ assert_raises(ValueError, np.convolve, [], [1])
+ assert_raises(ValueError, np.convolve, [1], [])
- def test_multidim_byteswap(self, level=rlevel):
+ def test_multidim_byteswap(self):
# Ticket #449
r = np.array([(1, (0, 1, 2))], dtype="i2,3i2")
assert_array_equal(r.byteswap(),
np.array([(256, (0, 256, 512))], r.dtype))
- def test_string_NULL(self, level=rlevel):
+ def test_string_NULL(self):
# Changeset 3557
assert_equal(np.array("a\x00\x0b\x0c\x00").item(),
'a\x00\x0b\x0c')
- def test_junk_in_string_fields_of_recarray(self, level=rlevel):
+ def test_junk_in_string_fields_of_recarray(self):
# Ticket #483
r = np.array([[b'abc']], dtype=[('var1', '|S20')])
assert_(asbytes(r['var1'][0][0]) == b'abc')
- def test_take_output(self, level=rlevel):
+ def test_take_output(self):
# Ensure that 'take' honours output parameter.
x = np.arange(12).reshape((3, 4))
a = np.take(x, [0, 2], axis=1)
@@ -715,13 +714,13 @@ class TestRegression(TestCase):
if HAS_REFCOUNT:
assert_(ref_d == sys.getrefcount(d))
- def test_array_str_64bit(self, level=rlevel):
+ def test_array_str_64bit(self):
# Ticket #501
s = np.array([1, np.nan], dtype=np.float64)
with np.errstate(all='raise'):
np.array_str(s) # Should succeed
- def test_frompyfunc_endian(self, level=rlevel):
+ def test_frompyfunc_endian(self):
# Ticket #503
from math import radians
uradians = np.frompyfunc(radians, 1, 1)
@@ -730,33 +729,33 @@ class TestRegression(TestCase):
assert_almost_equal(uradians(big_endian).astype(float),
uradians(little_endian).astype(float))
- def test_mem_string_arr(self, level=rlevel):
+ def test_mem_string_arr(self):
# Ticket #514
s = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
t = []
np.hstack((t, s))
- def test_arr_transpose(self, level=rlevel):
+ def test_arr_transpose(self):
# Ticket #516
x = np.random.rand(*(2,)*16)
x.transpose(list(range(16))) # Should succeed
- def test_string_mergesort(self, level=rlevel):
+ def test_string_mergesort(self):
# Ticket #540
x = np.array(['a']*32)
assert_array_equal(x.argsort(kind='m'), np.arange(32))
- def test_argmax_byteorder(self, level=rlevel):
+ def test_argmax_byteorder(self):
# Ticket #546
a = np.arange(3, dtype='>f')
assert_(a[a.argmax()] == a.max())
- def test_rand_seed(self, level=rlevel):
+ def test_rand_seed(self):
# Ticket #555
for l in np.arange(4):
np.random.seed(l)
- def test_mem_deallocation_leak(self, level=rlevel):
+ def test_mem_deallocation_leak(self):
# Ticket #562
a = np.zeros(5, dtype=float)
b = np.array(a, dtype=float)
@@ -764,9 +763,9 @@ class TestRegression(TestCase):
def test_mem_on_invalid_dtype(self):
"Ticket #583"
- self.assertRaises(ValueError, np.fromiter, [['12', ''], ['13', '']], str)
+ assert_raises(ValueError, np.fromiter, [['12', ''], ['13', '']], str)
- def test_dot_negative_stride(self, level=rlevel):
+ def test_dot_negative_stride(self):
# Ticket #588
x = np.array([[1, 5, 25, 125., 625]])
y = np.array([[20.], [160.], [640.], [1280.], [1024.]])
@@ -774,7 +773,7 @@ class TestRegression(TestCase):
y2 = y[::-1]
assert_equal(np.dot(x, z), np.dot(x, y2))
- def test_object_casting(self, level=rlevel):
+ def test_object_casting(self):
# This used to trigger the object-type version of
# the bitwise_or operation, because float64 -> object
# casting succeeds
@@ -783,16 +782,16 @@ class TestRegression(TestCase):
y = np.zeros([484, 286])
x |= y
- self.assertRaises(TypeError, rs)
+ assert_raises(TypeError, rs)
- def test_unicode_scalar(self, level=rlevel):
+ def test_unicode_scalar(self):
# Ticket #600
x = np.array(["DROND", "DROND1"], dtype="U6")
el = x[1]
new = pickle.loads(pickle.dumps(el))
assert_equal(new, el)
- def test_arange_non_native_dtype(self, level=rlevel):
+ def test_arange_non_native_dtype(self):
# Ticket #616
for T in ('>f4', '<f4'):
dt = np.dtype(T)
@@ -800,73 +799,73 @@ class TestRegression(TestCase):
assert_equal(np.arange(0.5, dtype=dt).dtype, dt)
assert_equal(np.arange(5, dtype=dt).dtype, dt)
- def test_bool_flat_indexing_invalid_nr_elements(self, level=rlevel):
+ def test_bool_flat_indexing_invalid_nr_elements(self):
s = np.ones(10, dtype=float)
x = np.array((15,), dtype=float)
def ia(x, s, v):
x[(s > 0)] = v
- self.assertRaises(IndexError, ia, x, s, np.zeros(9, dtype=float))
- self.assertRaises(IndexError, ia, x, s, np.zeros(11, dtype=float))
+ assert_raises(IndexError, ia, x, s, np.zeros(9, dtype=float))
+ assert_raises(IndexError, ia, x, s, np.zeros(11, dtype=float))
# Old special case (different code path):
- self.assertRaises(ValueError, ia, x.flat, s, np.zeros(9, dtype=float))
- self.assertRaises(ValueError, ia, x.flat, s, np.zeros(11, dtype=float))
+ assert_raises(ValueError, ia, x.flat, s, np.zeros(9, dtype=float))
+ assert_raises(ValueError, ia, x.flat, s, np.zeros(11, dtype=float))
- def test_mem_scalar_indexing(self, level=rlevel):
+ def test_mem_scalar_indexing(self):
# Ticket #603
x = np.array([0], dtype=float)
index = np.array(0, dtype=np.int32)
x[index]
- def test_binary_repr_0_width(self, level=rlevel):
+ def test_binary_repr_0_width(self):
assert_equal(np.binary_repr(0, width=3), '000')
- def test_fromstring(self, level=rlevel):
+ def test_fromstring(self):
assert_equal(np.fromstring("12:09:09", dtype=int, sep=":"),
[12, 9, 9])
- def test_searchsorted_variable_length(self, level=rlevel):
+ def test_searchsorted_variable_length(self):
x = np.array(['a', 'aa', 'b'])
y = np.array(['d', 'e'])
assert_equal(x.searchsorted(y), [3, 3])
- def test_string_argsort_with_zeros(self, level=rlevel):
+ def test_string_argsort_with_zeros(self):
# Check argsort for strings containing zeros.
x = np.fromstring("\x00\x02\x00\x01", dtype="|S2")
assert_array_equal(x.argsort(kind='m'), np.array([1, 0]))
assert_array_equal(x.argsort(kind='q'), np.array([1, 0]))
- def test_string_sort_with_zeros(self, level=rlevel):
+ def test_string_sort_with_zeros(self):
# Check sort for strings containing zeros.
x = np.fromstring("\x00\x02\x00\x01", dtype="|S2")
y = np.fromstring("\x00\x01\x00\x02", dtype="|S2")
assert_array_equal(np.sort(x, kind="q"), y)
- def test_copy_detection_zero_dim(self, level=rlevel):
+ def test_copy_detection_zero_dim(self):
# Ticket #658
np.indices((0, 3, 4)).T.reshape(-1, 3)
- def test_flat_byteorder(self, level=rlevel):
+ def test_flat_byteorder(self):
# Ticket #657
x = np.arange(10)
assert_array_equal(x.astype('>i4'), x.astype('<i4').flat[:])
assert_array_equal(x.astype('>i4').flat[:], x.astype('<i4'))
- def test_uint64_from_negative(self, level=rlevel):
+ def test_uint64_from_negative(self):
assert_equal(np.uint64(-2), np.uint64(18446744073709551614))
- def test_sign_bit(self, level=rlevel):
+ def test_sign_bit(self):
x = np.array([0, -0.0, 0])
assert_equal(str(np.abs(x)), '[ 0. 0. 0.]')
- def test_flat_index_byteswap(self, level=rlevel):
+ def test_flat_index_byteswap(self):
for dt in (np.dtype('<i4'), np.dtype('>i4')):
x = np.array([-1, 0, 1], dtype=dt)
assert_equal(x.flat[0].dtype, x[0].dtype)
- def test_copy_detection_corner_case(self, level=rlevel):
+ def test_copy_detection_corner_case(self):
# Ticket #658
np.indices((0, 3, 4)).T.reshape(-1, 3)
@@ -874,13 +873,13 @@ class TestRegression(TestCase):
# With NPY_RELAXED_STRIDES_CHECKING the test becomes superfluous,
# 0-sized reshape itself is tested elsewhere.
@dec.skipif(np.ones(1).strides[0] == np.iinfo(np.intp).max)
- def test_copy_detection_corner_case2(self, level=rlevel):
+ def test_copy_detection_corner_case2(self):
# Ticket #771: strides are not set correctly when reshaping 0-sized
# arrays
b = np.indices((0, 3, 4)).T.reshape(-1, 3)
assert_equal(b.strides, (3 * b.itemsize, b.itemsize))
- def test_object_array_refcounting(self, level=rlevel):
+ def test_object_array_refcounting(self):
# Ticket #633
if not hasattr(sys, 'getrefcount'):
return
@@ -983,7 +982,7 @@ class TestRegression(TestCase):
del tmp # Avoid pyflakes unused variable warning
- def test_mem_custom_float_to_array(self, level=rlevel):
+ def test_mem_custom_float_to_array(self):
# Ticket 702
class MyFloat(object):
def __float__(self):
@@ -992,7 +991,7 @@ class TestRegression(TestCase):
tmp = np.atleast_1d([MyFloat()])
tmp.astype(float) # Should succeed
- def test_object_array_refcount_self_assign(self, level=rlevel):
+ def test_object_array_refcount_self_assign(self):
# Ticket #711
class VictimObject(object):
deleted = False
@@ -1009,23 +1008,23 @@ class TestRegression(TestCase):
arr[:] = arr # trying to induce a segfault by doing it again...
assert_(not arr[0].deleted)
- def test_mem_fromiter_invalid_dtype_string(self, level=rlevel):
+ def test_mem_fromiter_invalid_dtype_string(self):
x = [1, 2, 3]
- self.assertRaises(ValueError,
+ assert_raises(ValueError,
np.fromiter, [xi for xi in x], dtype='S')
- def test_reduce_big_object_array(self, level=rlevel):
+ def test_reduce_big_object_array(self):
# Ticket #713
oldsize = np.setbufsize(10*16)
a = np.array([None]*161, object)
assert_(not np.any(a))
np.setbufsize(oldsize)
- def test_mem_0d_array_index(self, level=rlevel):
+ def test_mem_0d_array_index(self):
# Ticket #714
np.zeros(10)[np.array(0)]
- def test_floats_from_string(self, level=rlevel):
+ def test_floats_from_string(self):
# Ticket #640, floats from string
fsingle = np.single('1.234')
fdouble = np.double('1.234')
@@ -1034,7 +1033,7 @@ class TestRegression(TestCase):
assert_almost_equal(fdouble, 1.234)
assert_almost_equal(flongdouble, 1.234)
- def test_nonnative_endian_fill(self, level=rlevel):
+ def test_nonnative_endian_fill(self):
# Non-native endian arrays were incorrectly filled with scalars
# before r5034.
if sys.byteorder == 'little':
@@ -1045,7 +1044,7 @@ class TestRegression(TestCase):
x.fill(1)
assert_equal(x, np.array([1], dtype=dtype))
- def test_dot_alignment_sse2(self, level=rlevel):
+ def test_dot_alignment_sse2(self):
# Test for ticket #551, changeset r5140
x = np.zeros((30, 40))
y = pickle.loads(pickle.dumps(x))
@@ -1054,7 +1053,7 @@ class TestRegression(TestCase):
# This shouldn't cause a segmentation fault:
np.dot(z, y)
- def test_astype_copy(self, level=rlevel):
+ def test_astype_copy(self):
# Ticket #788, changeset r5155
# The test data file was generated by scipy.io.savemat.
# The dtype is float64, but the isbuiltin attribute is 0.
@@ -1072,7 +1071,7 @@ class TestRegression(TestCase):
assert_((xp.__array_interface__['data'][0] !=
xpd.__array_interface__['data'][0]))
- def test_compress_small_type(self, level=rlevel):
+ def test_compress_small_type(self):
# Ticket #789, changeset 5217.
# compress with out argument segfaulted if cannot cast safely
import numpy as np
@@ -1086,7 +1085,7 @@ class TestRegression(TestCase):
except TypeError:
pass
- def test_attributes(self, level=rlevel):
+ def test_attributes(self):
# Ticket #791
class TestArray(np.ndarray):
def __new__(cls, data, info):
@@ -1158,7 +1157,7 @@ class TestRegression(TestCase):
assert_(type(dat.nonzero()[0]) is np.ndarray)
assert_(type(dat.nonzero()[1]) is np.ndarray)
- def test_recarray_tolist(self, level=rlevel):
+ def test_recarray_tolist(self):
# Ticket #793, changeset r5215
# Comparisons fail for NaN, so we can't use random memory
# for the test.
@@ -1173,12 +1172,12 @@ class TestRegression(TestCase):
a = np.arange(5)
assert_raises(ValueError, a.item)
- def test_char_array_creation(self, level=rlevel):
+ def test_char_array_creation(self):
a = np.array('123', dtype='c')
b = np.array([b'1', b'2', b'3'])
assert_equal(a, b)
- def test_unaligned_unicode_access(self, level=rlevel):
+ def test_unaligned_unicode_access(self):
# Ticket #825
for i in range(1, 9):
msg = 'unicode offset: %d chars' % i
@@ -1189,7 +1188,7 @@ class TestRegression(TestCase):
else:
assert_equal(str(x), "[('a', u'b')]", err_msg=msg)
- def test_sign_for_complex_nan(self, level=rlevel):
+ def test_sign_for_complex_nan(self):
# Ticket 794.
with np.errstate(invalid='ignore'):
C = np.array([-np.inf, -2+1j, 0, 2-1j, np.inf, np.nan])
@@ -1197,7 +1196,7 @@ class TestRegression(TestCase):
want = np.array([-1+0j, -1+0j, 0+0j, 1+0j, 1+0j, np.nan])
assert_equal(have, want)
- def test_for_equal_names(self, level=rlevel):
+ def test_for_equal_names(self):
# Ticket #674
dt = np.dtype([('foo', float), ('bar', float)])
a = np.zeros(10, dt)
@@ -1207,7 +1206,7 @@ class TestRegression(TestCase):
assert_(a.dtype.names[0] == "notfoo")
assert_(a.dtype.names[1] == "bar")
- def test_for_object_scalar_creation(self, level=rlevel):
+ def test_for_object_scalar_creation(self):
# Ticket #816
a = np.object_()
b = np.object_(3)
@@ -1224,18 +1223,18 @@ class TestRegression(TestCase):
def test_array_resize_method_system_error(self):
# Ticket #840 - order should be an invalid keyword.
x = np.array([[0, 1], [2, 3]])
- self.assertRaises(TypeError, x.resize, (2, 2), order='C')
+ assert_raises(TypeError, x.resize, (2, 2), order='C')
- def test_for_zero_length_in_choose(self, level=rlevel):
+ def test_for_zero_length_in_choose(self):
"Ticket #882"
a = np.array(1)
- self.assertRaises(ValueError, lambda x: x.choose([]), a)
+ assert_raises(ValueError, lambda x: x.choose([]), a)
def test_array_ndmin_overflow(self):
"Ticket #947."
- self.assertRaises(ValueError, lambda: np.array([1], ndmin=33))
+ assert_raises(ValueError, lambda: np.array([1], ndmin=33))
- def test_void_scalar_with_titles(self, level=rlevel):
+ def test_void_scalar_with_titles(self):
# No ticket
data = [('john', 4), ('mary', 5)]
dtype1 = [(('source:yy', 'name'), 'O'), (('source:xx', 'id'), int)]
@@ -1308,7 +1307,7 @@ class TestRegression(TestCase):
good = 'Maximum allowed size exceeded'
try:
np.arange(sz)
- self.assertTrue(np.size == sz)
+ assert_(np.size == sz)
except ValueError as e:
if not str(e) == good:
self.fail("Got msg '%s', expected '%s'" % (e, good))
@@ -1360,7 +1359,7 @@ class TestRegression(TestCase):
a = np.ones(100, dtype=np.int8)
b = np.ones(100, dtype=np.int32)
i = np.lexsort((a[::-1], b))
- assert_equal(i, np.arange(100, dtype=np.int))
+ assert_equal(i, np.arange(100, dtype=int))
def test_object_array_to_fixed_string(self):
# Ticket #1235.
@@ -1380,7 +1379,7 @@ class TestRegression(TestCase):
a = np.array([[u'abc', u'\u03a3'],
[u'asdf', u'erw']],
dtype='U')
- self.assertRaises(UnicodeEncodeError, np.array, a, 'S4')
+ assert_raises(UnicodeEncodeError, np.array, a, 'S4')
def test_mixed_string_unicode_array_creation(self):
a = np.array(['1234', u'123'])
@@ -1462,7 +1461,7 @@ class TestRegression(TestCase):
def test_duplicate_title_and_name(self):
# Ticket #1254
dtspec = [(('a', 'a'), 'i'), ('b', 'i')]
- self.assertRaises(ValueError, np.dtype, dtspec)
+ assert_raises(ValueError, np.dtype, dtspec)
def test_signed_integer_division_overflow(self):
# Ticket #1317.
@@ -1471,7 +1470,7 @@ class TestRegression(TestCase):
min //= -1
with np.errstate(divide="ignore"):
- for t in (np.int8, np.int16, np.int32, np.int64, np.int, np.long):
+ for t in (np.int8, np.int16, np.int32, np.int64, int, np.long):
test_type(t)
def test_buffer_hashlib(self):
@@ -1491,7 +1490,7 @@ class TestRegression(TestCase):
# Check if log1p is behaving on 32 bit intel systems.
assert_(np.isfinite(np.log1p(np.exp2(-53))))
- def test_fromiter_comparison(self, level=rlevel):
+ def test_fromiter_comparison(self):
a = np.fromiter(list(range(10)), dtype='b')
b = np.fromiter(list(range(10)), dtype='B')
assert_(np.alltrue(a == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])))
@@ -1563,9 +1562,9 @@ class TestRegression(TestCase):
@dec.skipif(not HAS_REFCOUNT, "python has no sys.getrefcount")
def test_take_refcount(self):
# ticket #939
- a = np.arange(16, dtype=np.float)
+ a = np.arange(16, dtype=float)
a.shape = (4, 4)
- lut = np.ones((5 + 3, 4), np.float)
+ lut = np.ones((5 + 3, 4), float)
rgba = np.empty(shape=a.shape + (4,), dtype=lut.dtype)
c1 = sys.getrefcount(rgba)
try:
@@ -1807,7 +1806,7 @@ class TestRegression(TestCase):
a['f2'] = 1
except ValueError:
pass
- except:
+ except Exception:
raise AssertionError
def test_ticket_1608(self):
@@ -2068,8 +2067,8 @@ class TestRegression(TestCase):
assert_equal(arr, arr_cp)
assert_equal(arr.shape, arr_cp.shape)
assert_equal(int(arr), int(arr_cp))
- self.assertTrue(arr is not arr_cp)
- self.assertTrue(isinstance(arr_cp, type(arr)))
+ assert_(arr is not arr_cp)
+ assert_(isinstance(arr_cp, type(arr)))
def test_deepcopy_F_order_object_array(self):
# Ticket #6456.
@@ -2079,13 +2078,13 @@ class TestRegression(TestCase):
arr_cp = copy.deepcopy(arr)
assert_equal(arr, arr_cp)
- self.assertTrue(arr is not arr_cp)
+ assert_(arr is not arr_cp)
# Ensure that we have actually copied the item.
- self.assertTrue(arr[0, 1] is not arr_cp[1, 1])
+ assert_(arr[0, 1] is not arr_cp[1, 1])
# Ensure we are allowed to have references to the same object.
- self.assertTrue(arr[0, 1] is arr[1, 1])
+ assert_(arr[0, 1] is arr[1, 1])
# Check the references hold for the copied objects.
- self.assertTrue(arr_cp[0, 1] is arr_cp[1, 1])
+ assert_(arr_cp[0, 1] is arr_cp[1, 1])
def test_deepcopy_empty_object_array(self):
# Ticket #8536.
@@ -2173,7 +2172,7 @@ class TestRegression(TestCase):
# gh-6250
recordtype = np.dtype([('a', np.float64),
('b', np.int32),
- ('d', (np.str, 5))])
+ ('d', (str, 5))])
# Simple case
a = np.zeros(2, dtype=recordtype)
@@ -2248,5 +2247,19 @@ class TestRegression(TestCase):
else:
assert_(t.__hash__ != None)
+ def test_scalar_copy(self):
+ scalar_types = set(np.sctypeDict.values())
+ values = {
+ np.void: b"a",
+ np.bytes_: b"a",
+ np.unicode_: "a",
+ np.datetime64: "2017-08-25",
+ }
+ for sctype in scalar_types:
+ item = sctype(values.get(sctype, 1))
+ item2 = copy.copy(item)
+ assert_equal(item, item2)
+
+
if __name__ == "__main__":
run_module_suite()
diff --git a/numpy/core/tests/test_scalarinherit.py b/numpy/core/tests/test_scalarinherit.py
index e8cf7fde0..c5cd266eb 100644
--- a/numpy/core/tests/test_scalarinherit.py
+++ b/numpy/core/tests/test_scalarinherit.py
@@ -5,7 +5,7 @@
from __future__ import division, absolute_import, print_function
import numpy as np
-from numpy.testing import TestCase, run_module_suite, assert_
+from numpy.testing import run_module_suite, assert_
class A(object):
@@ -23,7 +23,7 @@ class B0(np.float64, A):
class C0(B0):
pass
-class TestInherit(TestCase):
+class TestInherit(object):
def test_init(self):
x = B(1.0)
assert_(str(x) == '1.0')
@@ -38,5 +38,41 @@ class TestInherit(TestCase):
y = C0(2.0)
assert_(str(y) == '2.0')
+
+class TestCharacter(object):
+ def test_char_radd(self):
+ # GH issue 9620, reached gentype_add and raise TypeError
+ np_s = np.string_('abc')
+ np_u = np.unicode_('abc')
+ s = b'def'
+ u = u'def'
+ assert_(np_s.__radd__(np_s) is NotImplemented)
+ assert_(np_s.__radd__(np_u) is NotImplemented)
+ assert_(np_s.__radd__(s) is NotImplemented)
+ assert_(np_s.__radd__(u) is NotImplemented)
+ assert_(np_u.__radd__(np_s) is NotImplemented)
+ assert_(np_u.__radd__(np_u) is NotImplemented)
+ assert_(np_u.__radd__(s) is NotImplemented)
+ assert_(np_u.__radd__(u) is NotImplemented)
+ assert_(s + np_s == b'defabc')
+ assert_(u + np_u == u'defabc')
+
+
+ class Mystr(str, np.generic):
+ # would segfault
+ pass
+
+ ret = s + Mystr('abc')
+ assert_(type(ret) is type(s))
+
+ def test_char_repeat(self):
+ np_s = np.string_('abc')
+ np_u = np.unicode_('abc')
+ np_i = np.int(5)
+ res_np = np_s * np_i
+ res_s = b'abc' * 5
+ assert_(res_np == res_s)
+
+
if __name__ == "__main__":
run_module_suite()
diff --git a/numpy/core/tests/test_scalarmath.py b/numpy/core/tests/test_scalarmath.py
index c76db98f8..cff9f7985 100644
--- a/numpy/core/tests/test_scalarmath.py
+++ b/numpy/core/tests/test_scalarmath.py
@@ -6,11 +6,10 @@ import itertools
import operator
import numpy as np
-from numpy.testing.utils import _gen_alignment_data
from numpy.testing import (
- TestCase, run_module_suite, assert_, assert_equal, assert_raises,
+ run_module_suite, assert_, assert_equal, assert_raises,
assert_almost_equal, assert_allclose, assert_array_equal, IS_PYPY,
- suppress_warnings
+ suppress_warnings, dec, _gen_alignment_data,
)
types = [np.bool_, np.byte, np.ubyte, np.short, np.ushort, np.intc, np.uintc,
@@ -23,13 +22,13 @@ floating_types = np.floating.__subclasses__()
# This compares scalarmath against ufuncs.
-class TestTypes(TestCase):
- def test_types(self, level=1):
+class TestTypes(object):
+ def test_types(self):
for atype in types:
a = atype(1)
assert_(a == 1, "error with %r: got %r" % (atype, a))
- def test_type_add(self, level=1):
+ def test_type_add(self):
# list of types
for k, atype in enumerate(types):
a_scalar = atype(3)
@@ -49,7 +48,7 @@ class TestTypes(TestCase):
"error with types (%d/'%c' + %d/'%c')" %
(k, np.dtype(atype).char, l, np.dtype(btype).char))
- def test_type_create(self, level=1):
+ def test_type_create(self):
for k, atype in enumerate(types):
a = np.array([1, 2, 3], atype)
b = atype([1, 2, 3])
@@ -62,7 +61,7 @@ class TestTypes(TestCase):
np.add(1, 1)
-class TestBaseMath(TestCase):
+class TestBaseMath(object):
def test_blocked(self):
# test alignments offsets for simd instructions
# alignments for vz + 2 * (vs - 1) + 1
@@ -108,7 +107,7 @@ class TestBaseMath(TestCase):
np.add(d, np.ones_like(d))
-class TestPower(TestCase):
+class TestPower(object):
def test_small_types(self):
for t in [np.int8, np.int16, np.float16]:
a = t(3)
@@ -127,7 +126,7 @@ class TestPower(TestCase):
def test_integers_to_negative_integer_power(self):
# Note that the combination of uint64 with a signed integer
- # has common type np.float. The other combinations should all
+ # has common type np.float64. The other combinations should all
# raise a ValueError for integer ** negative integer.
exp = [np.array(-1, dt)[()] for dt in 'bhilq']
@@ -200,7 +199,7 @@ def _signs(dt):
return (+1, -1)
-class TestModulus(TestCase):
+class TestModulus(object):
def test_modulus_basic(self):
dt = np.typecodes['AllInteger'] + np.typecodes['Float']
@@ -292,7 +291,7 @@ class TestModulus(TestCase):
assert_(np.isnan(rem), 'dt: %s' % dt)
-class TestComplexDivision(TestCase):
+class TestComplexDivision(object):
def test_zero_division(self):
with np.errstate(all="ignore"):
for t in [np.complex64, np.complex128]:
@@ -364,7 +363,7 @@ class TestComplexDivision(TestCase):
assert_equal(result.imag, ex[1])
-class TestConversion(TestCase):
+class TestConversion(object):
def test_int_from_long(self):
l = [1e6, 1e12, 1e18, -1e6, -1e12, -1e18]
li = [10**6, 10**12, 10**18, -10**6, -10**12, -10**18]
@@ -401,9 +400,22 @@ class TestConversion(TestCase):
def test_longdouble_int(self):
# gh-627
x = np.longdouble(np.inf)
+ assert_raises(OverflowError, int, x)
+ with suppress_warnings() as sup:
+ sup.record(np.ComplexWarning)
+ x = np.clongdouble(np.inf)
+ assert_raises(OverflowError, int, x)
+ assert_equal(len(sup.log), 1)
+
+ @dec.knownfailureif(not IS_PYPY)
+ def test_clongdouble___int__(self):
+ x = np.longdouble(np.inf)
assert_raises(OverflowError, x.__int__)
- x = np.clongdouble(np.inf)
- assert_raises(OverflowError, x.__int__)
+ with suppress_warnings() as sup:
+ sup.record(np.ComplexWarning)
+ x = np.clongdouble(np.inf)
+ assert_raises(OverflowError, x.__int__)
+ self.assertEqual(len(sup.log), 1)
def test_numpy_scalar_relational_operators(self):
# All integer
@@ -468,7 +480,7 @@ class TestConversion(TestCase):
assert_(np.equal(np.datetime64('NaT'), None))
-#class TestRepr(TestCase):
+#class TestRepr(object):
# def test_repr(self):
# for t in types:
# val = t(1197346475.0137341)
@@ -512,7 +524,7 @@ class TestRepr(object):
if not IS_PYPY:
# sys.getsizeof() is not valid on PyPy
- class TestSizeOf(TestCase):
+ class TestSizeOf(object):
def test_equal_nbytes(self):
for type in types:
@@ -524,7 +536,7 @@ if not IS_PYPY:
assert_raises(TypeError, d.__sizeof__, "a")
-class TestMultiply(TestCase):
+class TestMultiply(object):
def test_seq_repeat(self):
# Test that basic sequences get repeated when multiplied with
# numpy integers. And errors are raised when multiplied with others.
@@ -562,7 +574,7 @@ class TestMultiply(TestCase):
assert_array_equal(np.int_(3) * arr_like, np.full(3, 3))
-class TestNegative(TestCase):
+class TestNegative(object):
def test_exceptions(self):
a = np.ones((), dtype=np.bool_)[()]
assert_raises(TypeError, operator.neg, a)
@@ -576,7 +588,7 @@ class TestNegative(TestCase):
assert_equal(operator.neg(a) + a, 0)
-class TestSubtract(TestCase):
+class TestSubtract(object):
def test_exceptions(self):
a = np.ones((), dtype=np.bool_)[()]
assert_raises(TypeError, operator.sub, a, a)
@@ -590,7 +602,7 @@ class TestSubtract(TestCase):
assert_equal(operator.sub(a, a), 0)
-class TestAbs(TestCase):
+class TestAbs(object):
def _test_abs_func(self, absfunc):
for tp in floating_types:
diff --git a/numpy/core/tests/test_scalarprint.py b/numpy/core/tests/test_scalarprint.py
index 8d0f27182..7e17e0425 100644
--- a/numpy/core/tests/test_scalarprint.py
+++ b/numpy/core/tests/test_scalarprint.py
@@ -5,10 +5,10 @@
from __future__ import division, absolute_import, print_function
import numpy as np
-from numpy.testing import TestCase, assert_, run_module_suite
+from numpy.testing import assert_, run_module_suite
-class TestRealScalars(TestCase):
+class TestRealScalars(object):
def test_str(self):
svals = [0.0, -0.0, 1, -1, np.inf, -np.inf, np.nan]
styps = [np.float16, np.float32, np.float64, np.longdouble]
diff --git a/numpy/core/tests/test_shape_base.py b/numpy/core/tests/test_shape_base.py
index c1680d181..5c1e569b7 100644
--- a/numpy/core/tests/test_shape_base.py
+++ b/numpy/core/tests/test_shape_base.py
@@ -4,13 +4,13 @@ import warnings
import numpy as np
from numpy.core import (array, arange, atleast_1d, atleast_2d, atleast_3d,
block, vstack, hstack, newaxis, concatenate, stack)
-from numpy.testing import (TestCase, assert_, assert_raises,
+from numpy.testing import (assert_, assert_raises,
assert_array_equal, assert_equal, run_module_suite,
assert_raises_regex, assert_almost_equal)
from numpy.compat import long
-class TestAtleast1d(TestCase):
+class TestAtleast1d(object):
def test_0D_array(self):
a = array(1)
b = array(2)
@@ -51,7 +51,7 @@ class TestAtleast1d(TestCase):
assert_(atleast_1d([[2, 3], [4, 5]]).shape == (2, 2))
-class TestAtleast2d(TestCase):
+class TestAtleast2d(object):
def test_0D_array(self):
a = array(1)
b = array(2)
@@ -90,7 +90,7 @@ class TestAtleast2d(TestCase):
assert_(atleast_2d([[[3, 1], [4, 5]], [[3, 5], [1, 2]]]).shape == (2, 2, 2))
-class TestAtleast3d(TestCase):
+class TestAtleast3d(object):
def test_0D_array(self):
a = array(1)
b = array(2)
@@ -122,7 +122,7 @@ class TestAtleast3d(TestCase):
assert_array_equal(res, desired)
-class TestHstack(TestCase):
+class TestHstack(object):
def test_non_iterable(self):
assert_raises(TypeError, hstack, 1)
@@ -151,7 +151,7 @@ class TestHstack(TestCase):
assert_array_equal(res, desired)
-class TestVstack(TestCase):
+class TestVstack(object):
def test_non_iterable(self):
assert_raises(TypeError, vstack, 1)
@@ -187,7 +187,7 @@ class TestVstack(TestCase):
assert_array_equal(res, desired)
-class TestConcatenate(TestCase):
+class TestConcatenate(object):
def test_exceptions(self):
# test axis must be in bounds
for ndim in [1, 2, 3]:
@@ -208,8 +208,8 @@ class TestConcatenate(TestCase):
np.concatenate((a, b), axis=axis[0]) # OK
assert_raises(ValueError, np.concatenate, (a, b), axis=axis[1])
assert_raises(ValueError, np.concatenate, (a, b), axis=axis[2])
- a = np.rollaxis(a, -1)
- b = np.rollaxis(b, -1)
+ a = np.moveaxis(a, -1, 0)
+ b = np.moveaxis(b, -1, 0)
axis.append(axis.pop(0))
# No arrays to concatenate raises ValueError
@@ -230,6 +230,12 @@ class TestConcatenate(TestCase):
'0', '1', '2', 'x'])
assert_array_equal(r, d)
+ out = np.zeros(a.size + len(b))
+ r = np.concatenate((a, b), axis=None)
+ rout = np.concatenate((a, b), axis=None, out=out)
+ assert_(out is rout)
+ assert_equal(r, rout)
+
def test_large_concatenate_axis_None(self):
# When no axis is given, concatenate uses flattened versions.
# This also had a bug with many arrays (see gh-5979).
@@ -278,6 +284,34 @@ class TestConcatenate(TestCase):
assert_array_equal(concatenate((a0, a1, a2), -1), res)
assert_array_equal(concatenate((a0.T, a1.T, a2.T), 0), res.T)
+ out = res.copy()
+ rout = concatenate((a0, a1, a2), 2, out=out)
+ assert_(out is rout)
+ assert_equal(res, rout)
+
+ def test_bad_out_shape(self):
+ a = array([1, 2])
+ b = array([3, 4])
+
+ assert_raises(ValueError, concatenate, (a, b), out=np.empty(5))
+ assert_raises(ValueError, concatenate, (a, b), out=np.empty((4,1)))
+ assert_raises(ValueError, concatenate, (a, b), out=np.empty((1,4)))
+ concatenate((a, b), out=np.empty(4))
+
+ def test_out_dtype(self):
+ out = np.empty(4, np.float32)
+ res = concatenate((array([1, 2]), array([3, 4])), out=out)
+ assert_(out is res)
+
+ out = np.empty(4, np.complex64)
+ res = concatenate((array([0.1, 0.2]), array([0.3, 0.4])), out=out)
+ assert_(out is res)
+
+ # invalid cast
+ out = np.empty(4, np.int32)
+ assert_raises(TypeError, concatenate,
+ (array([0.1, 0.2]), array([0.3, 0.4])), out=out)
+
def test_stack():
# non-iterable input
@@ -333,7 +367,7 @@ def test_stack():
stack, [m, m])
-class TestBlock(TestCase):
+class TestBlock(object):
def test_block_simple_row_wise(self):
a_2d = np.ones((2, 2))
b_2d = 2 * a_2d
diff --git a/numpy/core/tests/test_ufunc.py b/numpy/core/tests/test_ufunc.py
index 3d6251253..57e0ec272 100644
--- a/numpy/core/tests/test_ufunc.py
+++ b/numpy/core/tests/test_ufunc.py
@@ -1,20 +1,23 @@
from __future__ import division, absolute_import, print_function
+import warnings
+import itertools
+
import numpy as np
import numpy.core.umath_tests as umt
import numpy.core.operand_flag_tests as opflag_tests
from numpy.core.test_rational import rational, test_add, test_add_rationals
from numpy.testing import (
- TestCase, run_module_suite, assert_, assert_equal, assert_raises,
+ run_module_suite, assert_, assert_equal, assert_raises,
assert_array_equal, assert_almost_equal, assert_array_almost_equal,
- assert_no_warnings
+ assert_no_warnings, assert_allclose,
)
-class TestUfuncKwargs(TestCase):
+class TestUfuncKwargs(object):
def test_kwarg_exact(self):
assert_raises(TypeError, np.add, 1, 2, castingx='safe')
- assert_raises(TypeError, np.add, 1, 2, dtypex=np.int)
+ assert_raises(TypeError, np.add, 1, 2, dtypex=int)
assert_raises(TypeError, np.add, 1, 2, extobjx=[4096])
assert_raises(TypeError, np.add, 1, 2, outx=None)
assert_raises(TypeError, np.add, 1, 2, sigx='ii->i')
@@ -28,12 +31,12 @@ class TestUfuncKwargs(TestCase):
def test_sig_dtype(self):
assert_raises(RuntimeError, np.add, 1, 2, sig='ii->i',
- dtype=np.int)
+ dtype=int)
assert_raises(RuntimeError, np.add, 1, 2, signature='ii->i',
- dtype=np.int)
+ dtype=int)
-class TestUfunc(TestCase):
+class TestUfunc(object):
def test_pickle(self):
import pickle
assert_(pickle.loads(pickle.dumps(np.sin)) is np.sin)
@@ -171,22 +174,22 @@ class TestUfunc(TestCase):
# check unary PyUFunc_O_O
msg = "PyUFunc_O_O"
- x = np.ones(10, dtype=np.object)[0::2]
+ x = np.ones(10, dtype=object)[0::2]
assert_(np.all(np.abs(x) == 1), msg)
# check unary PyUFunc_O_O_method
msg = "PyUFunc_O_O_method"
- x = np.zeros(10, dtype=np.object)[0::2]
+ x = np.zeros(10, dtype=object)[0::2]
for i in range(len(x)):
x[i] = foo()
assert_(np.all(np.conjugate(x) == True), msg)
# check binary PyUFunc_OO_O
msg = "PyUFunc_OO_O"
- x = np.ones(10, dtype=np.object)[0::2]
+ x = np.ones(10, dtype=object)[0::2]
assert_(np.all(np.add(x, x) == 2), msg)
# check binary PyUFunc_OO_O_method
msg = "PyUFunc_OO_O_method"
- x = np.zeros(10, dtype=np.object)[0::2]
+ x = np.zeros(10, dtype=object)[0::2]
for i in range(len(x)):
x[i] = foo()
assert_(np.all(np.logical_xor(x, x)), msg)
@@ -353,14 +356,78 @@ class TestUfunc(TestCase):
assert_equal(b, [0, 0, 1])
def test_true_divide(self):
- # True_divide has a non uniform signature, see #3484.
- # This also tests type_tuple_type_resolver.
- a = np.full(5, 12.5)
- b = np.full(5, 10.0)
- tgt = np.full(5, 1.25)
- assert_almost_equal(np.true_divide(a, b, dtype=np.float64), tgt)
- assert_almost_equal(np.true_divide(a, b, dtype=np.float32), tgt)
- assert_raises(TypeError, np.true_divide, a, b, dtype=np.int)
+ a = np.array(10)
+ b = np.array(20)
+ tgt = np.array(0.5)
+
+ for tc in 'bhilqBHILQefdgFDG':
+ dt = np.dtype(tc)
+ aa = a.astype(dt)
+ bb = b.astype(dt)
+
+ # Check result value and dtype.
+ for x, y in itertools.product([aa, -aa], [bb, -bb]):
+
+ # Check with no output type specified
+ if tc in 'FDG':
+ tgt = complex(x)/complex(y)
+ else:
+ tgt = float(x)/float(y)
+
+ res = np.true_divide(x, y)
+ rtol = max(np.finfo(res).resolution, 1e-15)
+ assert_allclose(res, tgt, rtol=rtol)
+
+ if tc in 'bhilqBHILQ':
+ assert_(res.dtype.name == 'float64')
+ else:
+ assert_(res.dtype.name == dt.name )
+
+ # Check with output type specified. This also checks for the
+ # incorrect casts in issue gh-3484 because the unary '-' does
+ # not change types, even for unsigned types, Hence casts in the
+ # ufunc from signed to unsigned and vice versa will lead to
+ # errors in the values.
+ for tcout in 'bhilqBHILQ':
+ dtout = np.dtype(tcout)
+ assert_raises(TypeError, np.true_divide, x, y, dtype=dtout)
+
+ for tcout in 'efdg':
+ dtout = np.dtype(tcout)
+ if tc in 'FDG':
+ # Casting complex to float is not allowed
+ assert_raises(TypeError, np.true_divide, x, y, dtype=dtout)
+ else:
+ tgt = float(x)/float(y)
+ rtol = max(np.finfo(dtout).resolution, 1e-15)
+ atol = max(np.finfo(dtout).tiny, 3e-308)
+ # Some test values result in invalid for float16.
+ with np.errstate(invalid='ignore'):
+ res = np.true_divide(x, y, dtype=dtout)
+ if not np.isfinite(res) and tcout == 'e':
+ continue
+ assert_allclose(res, tgt, rtol=rtol, atol=atol)
+ assert_(res.dtype.name == dtout.name)
+
+ for tcout in 'FDG':
+ dtout = np.dtype(tcout)
+ tgt = complex(x)/complex(y)
+ rtol = max(np.finfo(dtout).resolution, 1e-15)
+ atol = max(np.finfo(dtout).tiny, 3e-308)
+ res = np.true_divide(x, y, dtype=dtout)
+ if not np.isfinite(res):
+ continue
+ assert_allclose(res, tgt, rtol=rtol, atol=atol)
+ assert_(res.dtype.name == dtout.name)
+
+ # Check booleans
+ a = np.ones((), dtype=np.bool_)
+ res = np.true_divide(a, a)
+ assert_(res == 1.0)
+ assert_(res.dtype.name == 'float64')
+ res = np.true_divide(~a, a)
+ assert_(res == 0.0)
+ assert_(res.dtype.name == 'float64')
def test_sum_stability(self):
a = np.ones(500, dtype=np.float32)
@@ -370,13 +437,22 @@ class TestUfunc(TestCase):
assert_almost_equal((a / 10.).sum() - a.size / 10., 0, 13)
def test_sum(self):
- for dt in (np.int, np.float16, np.float32, np.float64, np.longdouble):
+ for dt in (int, np.float16, np.float32, np.float64, np.longdouble):
for v in (0, 1, 2, 7, 8, 9, 15, 16, 19, 127,
128, 1024, 1235):
tgt = dt(v * (v + 1) / 2)
d = np.arange(1, v + 1, dtype=dt)
- assert_almost_equal(np.sum(d), tgt)
- assert_almost_equal(np.sum(d[::-1]), tgt)
+
+ # warning if sum overflows, which it does in float16
+ overflow = not np.isfinite(tgt)
+
+ with warnings.catch_warnings(record=True) as w:
+ warnings.simplefilter("always")
+ assert_almost_equal(np.sum(d), tgt)
+ assert_equal(len(w), 1 * overflow)
+
+ assert_almost_equal(np.sum(d[::-1]), tgt)
+ assert_equal(len(w), 2 * overflow)
d = np.ones(500, dtype=dt)
assert_almost_equal(np.sum(d[::2]), 250.)
@@ -603,7 +679,7 @@ class TestUfunc(TestCase):
assert_equal(ref, True, err_msg="reference check")
def test_euclidean_pdist(self):
- a = np.arange(12, dtype=np.float).reshape(4, 3)
+ a = np.arange(12, dtype=float).reshape(4, 3)
out = np.empty((a.shape[0] * (a.shape[0] - 1) // 2,), dtype=a.dtype)
umt.euclidean_pdist(a, out)
b = np.sqrt(np.sum((a[:, None] - a)**2, axis=-1))
@@ -784,6 +860,17 @@ class TestUfunc(TestCase):
np.add(a, b, out=c, where=[1, 0, 0, 1, 0, 0, 1, 1, 1, 0])
assert_equal(c, [2, 1.5, 1.5, 2, 1.5, 1.5, 2, 2, 2, 1.5])
+ def test_where_param_alloc(self):
+ # With casting and allocated output
+ a = np.array([1], dtype=np.int64)
+ m = np.array([True], dtype=bool)
+ assert_equal(np.sqrt(a, where=m), [1])
+
+ # No casting and allocated output
+ a = np.array([1], dtype=np.float64)
+ m = np.array([True], dtype=bool)
+ assert_equal(np.sqrt(a, where=m), [1])
+
def check_identityless_reduction(self, a):
# np.minimum.reduce is a identityless reduction
@@ -1000,6 +1087,11 @@ class TestUfunc(TestCase):
dtype=rational)
assert_equal(result, expected)
+ def test_custom_ufunc_forced_sig(self):
+ # gh-9351 - looking for a non-first userloop would previously hang
+ assert_raises(TypeError,
+ np.multiply, rational(1), 1, signature=(rational, int, None))
+
def test_custom_array_like(self):
class MyThing(object):
@@ -1162,9 +1254,9 @@ class TestUfunc(TestCase):
assert_array_equal(values, [1, 8, 6, 4])
# Test exception thrown
- values = np.array(['a', 1], dtype=np.object)
- self.assertRaises(TypeError, np.add.at, values, [0, 1], 1)
- assert_array_equal(values, np.array(['a', 1], dtype=np.object))
+ values = np.array(['a', 1], dtype=object)
+ assert_raises(TypeError, np.add.at, values, [0, 1], 1)
+ assert_array_equal(values, np.array(['a', 1], dtype=object))
# Test multiple output ufuncs raise error, gh-5665
assert_raises(ValueError, np.modf.at, np.arange(10), [1])
@@ -1283,6 +1375,10 @@ class TestUfunc(TestCase):
assert_equal(y_base[1,:], y_base_copy[1,:])
assert_equal(y_base[3,:], y_base_copy[3,:])
+ def test_no_doc_string(self):
+ # gh-9337
+ assert_('\n' not in umt.inner1d_no_doc.__doc__)
+
if __name__ == "__main__":
run_module_suite()
diff --git a/numpy/core/tests/test_umath.py b/numpy/core/tests/test_umath.py
index 13f29504a..5787a5183 100644
--- a/numpy/core/tests/test_umath.py
+++ b/numpy/core/tests/test_umath.py
@@ -6,15 +6,14 @@ import warnings
import fnmatch
import itertools
-from numpy.testing.utils import _gen_alignment_data
import numpy.core.umath as ncu
from numpy.core import umath_tests as ncu_tests
import numpy as np
from numpy.testing import (
- TestCase, run_module_suite, assert_, assert_equal, assert_raises,
+ run_module_suite, assert_, assert_equal, assert_raises,
assert_raises_regex, assert_array_equal, assert_almost_equal,
assert_array_almost_equal, dec, assert_allclose, assert_no_warnings,
- suppress_warnings
+ suppress_warnings, _gen_alignment_data,
)
@@ -32,7 +31,7 @@ class _FilterInvalids(object):
np.seterr(**self.olderr)
-class TestConstants(TestCase):
+class TestConstants(object):
def test_pi(self):
assert_allclose(ncu.pi, 3.141592653589793, 1e-15)
@@ -43,7 +42,7 @@ class TestConstants(TestCase):
assert_allclose(ncu.euler_gamma, 0.5772156649015329, 1e-15)
-class TestOut(TestCase):
+class TestOut(object):
def test_out_subok(self):
for subok in (True, False):
a = np.array(0.5)
@@ -176,7 +175,7 @@ class TestOut(TestCase):
assert_(w[0].category is DeprecationWarning)
-class TestComparisons(TestCase):
+class TestComparisons(object):
def test_ignore_object_identity_in_equal(self):
# Check error raised when comparing identical objects whose comparison
# is not a simple boolean, e.g., arrays that are compared elementwise.
@@ -214,7 +213,7 @@ class TestComparisons(TestCase):
assert_equal(np.not_equal(a, a), [True])
-class TestDivision(TestCase):
+class TestDivision(object):
def test_division_int(self):
# int division should follow Python
x = np.array([5, 10, 90, 100, -5, -10, -90, -100, -120])
@@ -275,7 +274,7 @@ def _signs(dt):
return (+1, -1)
-class TestRemainder(TestCase):
+class TestRemainder(object):
def test_remainder_basic(self):
dt = np.typecodes['AllInteger'] + np.typecodes['Float']
@@ -366,7 +365,7 @@ class TestRemainder(TestCase):
assert_(np.isnan(rem), 'dt: %s, rem: %s' % (dt, rem))
-class TestCbrt(TestCase):
+class TestCbrt(object):
def test_cbrt_scalar(self):
assert_almost_equal((np.cbrt(np.float32(-2.5)**3)), -2.5)
@@ -379,7 +378,7 @@ class TestCbrt(TestCase):
assert_equal(np.cbrt(-np.inf), -np.inf)
-class TestPower(TestCase):
+class TestPower(object):
def test_power_float(self):
x = np.array([1., 2., 3.])
assert_equal(x**0, [1., 1., 1.])
@@ -518,7 +517,7 @@ class TestPower(TestCase):
assert_raises(ValueError, np.power, one, minusone)
-class TestFloat_power(TestCase):
+class TestFloat_power(object):
def test_type_conversion(self):
arg_type = '?bhilBHILefdgFDG'
res_type = 'ddddddddddddgDDG'
@@ -529,7 +528,7 @@ class TestFloat_power(TestCase):
assert_(res.dtype.name == np.dtype(dtout).name, msg)
-class TestLog2(TestCase):
+class TestLog2(object):
def test_log2_values(self):
x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
@@ -560,7 +559,7 @@ class TestLog2(TestCase):
assert_(w[2].category is RuntimeWarning)
-class TestExp2(TestCase):
+class TestExp2(object):
def test_exp2_values(self):
x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
@@ -612,7 +611,7 @@ class TestLogAddExp2(_FilterInvalids):
assert_(np.isnan(np.logaddexp2(np.nan, np.nan)))
-class TestLog(TestCase):
+class TestLog(object):
def test_log_values(self):
x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
@@ -623,7 +622,7 @@ class TestLog(TestCase):
assert_almost_equal(np.log(xf), yf)
-class TestExp(TestCase):
+class TestExp(object):
def test_exp_values(self):
x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
@@ -675,7 +674,7 @@ class TestLogAddExp(_FilterInvalids):
assert_(np.isnan(np.logaddexp(np.nan, np.nan)))
-class TestLog1p(TestCase):
+class TestLog1p(object):
def test_log1p(self):
assert_almost_equal(ncu.log1p(0.2), ncu.log(1.2))
assert_almost_equal(ncu.log1p(1e-6), ncu.log(1+1e-6))
@@ -689,7 +688,7 @@ class TestLog1p(TestCase):
assert_equal(ncu.log1p(-np.inf), np.nan)
-class TestExpm1(TestCase):
+class TestExpm1(object):
def test_expm1(self):
assert_almost_equal(ncu.expm1(0.2), ncu.exp(0.2)-1)
assert_almost_equal(ncu.expm1(1e-6), ncu.exp(1e-6)-1)
@@ -702,7 +701,7 @@ class TestExpm1(TestCase):
assert_equal(ncu.expm1(-np.inf), -1.)
-class TestHypot(TestCase, object):
+class TestHypot(object):
def test_simple(self):
assert_almost_equal(ncu.hypot(1, 1), ncu.sqrt(2))
assert_almost_equal(ncu.hypot(0, 0), 0)
@@ -726,7 +725,7 @@ def assert_hypot_isinf(x, y):
"hypot(%s, %s) is %s, not inf" % (x, y, ncu.hypot(x, y)))
-class TestHypotSpecialValues(TestCase):
+class TestHypotSpecialValues(object):
def test_nan_outputs(self):
assert_hypot_isnan(np.nan, np.nan)
assert_hypot_isnan(np.nan, 1)
@@ -763,7 +762,7 @@ def assert_arctan2_isnzero(x, y):
assert_((ncu.arctan2(x, y) == 0 and np.signbit(ncu.arctan2(x, y))), "arctan(%s, %s) is %s, not -0" % (x, y, ncu.arctan2(x, y)))
-class TestArctan2SpecialValues(TestCase):
+class TestArctan2SpecialValues(object):
def test_one_one(self):
# atan2(1, 1) returns pi/4.
assert_almost_equal(ncu.arctan2(1, 1), 0.25 * np.pi)
@@ -832,7 +831,7 @@ class TestArctan2SpecialValues(TestCase):
assert_arctan2_isnan(np.nan, np.nan)
-class TestLdexp(TestCase):
+class TestLdexp(object):
def _check_ldexp(self, tp):
assert_almost_equal(ncu.ldexp(np.array(2., np.float32),
np.array(3, tp)), 16.)
@@ -898,22 +897,22 @@ class TestMaximum(_FilterInvalids):
# fail if cmp is used instead of rich compare.
# Failure cannot be guaranteed.
for i in range(1):
- x = np.array(float('nan'), np.object)
+ x = np.array(float('nan'), object)
y = 1.0
- z = np.array(float('nan'), np.object)
+ z = np.array(float('nan'), object)
assert_(np.maximum(x, y) == 1.0)
assert_(np.maximum(z, y) == 1.0)
def test_complex_nans(self):
nan = np.nan
for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)]:
- arg1 = np.array([0, cnan, cnan], dtype=np.complex)
- arg2 = np.array([cnan, 0, cnan], dtype=np.complex)
- out = np.array([nan, nan, nan], dtype=np.complex)
+ arg1 = np.array([0, cnan, cnan], dtype=complex)
+ arg2 = np.array([cnan, 0, cnan], dtype=complex)
+ out = np.array([nan, nan, nan], dtype=complex)
assert_equal(np.maximum(arg1, arg2), out)
def test_object_array(self):
- arg1 = np.arange(5, dtype=np.object)
+ arg1 = np.arange(5, dtype=object)
arg2 = arg1 + 1
assert_equal(np.maximum(arg1, arg2), arg2)
@@ -956,22 +955,22 @@ class TestMinimum(_FilterInvalids):
# fail if cmp is used instead of rich compare.
# Failure cannot be guaranteed.
for i in range(1):
- x = np.array(float('nan'), np.object)
+ x = np.array(float('nan'), object)
y = 1.0
- z = np.array(float('nan'), np.object)
+ z = np.array(float('nan'), object)
assert_(np.minimum(x, y) == 1.0)
assert_(np.minimum(z, y) == 1.0)
def test_complex_nans(self):
nan = np.nan
for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)]:
- arg1 = np.array([0, cnan, cnan], dtype=np.complex)
- arg2 = np.array([cnan, 0, cnan], dtype=np.complex)
- out = np.array([nan, nan, nan], dtype=np.complex)
+ arg1 = np.array([0, cnan, cnan], dtype=complex)
+ arg2 = np.array([cnan, 0, cnan], dtype=complex)
+ out = np.array([nan, nan, nan], dtype=complex)
assert_equal(np.minimum(arg1, arg2), out)
def test_object_array(self):
- arg1 = np.arange(5, dtype=np.object)
+ arg1 = np.arange(5, dtype=object)
arg2 = arg1 + 1
assert_equal(np.minimum(arg1, arg2), arg1)
@@ -1012,9 +1011,9 @@ class TestFmax(_FilterInvalids):
def test_complex_nans(self):
nan = np.nan
for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)]:
- arg1 = np.array([0, cnan, cnan], dtype=np.complex)
- arg2 = np.array([cnan, 0, cnan], dtype=np.complex)
- out = np.array([0, 0, nan], dtype=np.complex)
+ arg1 = np.array([0, cnan, cnan], dtype=complex)
+ arg2 = np.array([cnan, 0, cnan], dtype=complex)
+ out = np.array([0, 0, nan], dtype=complex)
assert_equal(np.fmax(arg1, arg2), out)
@@ -1054,13 +1053,13 @@ class TestFmin(_FilterInvalids):
def test_complex_nans(self):
nan = np.nan
for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)]:
- arg1 = np.array([0, cnan, cnan], dtype=np.complex)
- arg2 = np.array([cnan, 0, cnan], dtype=np.complex)
- out = np.array([0, 0, nan], dtype=np.complex)
+ arg1 = np.array([0, cnan, cnan], dtype=complex)
+ arg2 = np.array([cnan, 0, cnan], dtype=complex)
+ out = np.array([0, 0, nan], dtype=complex)
assert_equal(np.fmin(arg1, arg2), out)
-class TestBool(TestCase):
+class TestBool(object):
def test_exceptions(self):
a = np.ones(1, dtype=np.bool_)
assert_raises(TypeError, np.negative, a)
@@ -1123,7 +1122,7 @@ class TestBool(TestCase):
assert_equal(np.logical_xor.reduce(arr), arr.sum() % 2 == 1)
-class TestBitwiseUFuncs(TestCase):
+class TestBitwiseUFuncs(object):
bitwise_types = [np.dtype(c) for c in '?' + 'bBhHiIlLqQ' + 'O']
@@ -1208,10 +1207,10 @@ class TestBitwiseUFuncs(TestCase):
assert_(type(f.reduce(btype)) is bool, msg)
-class TestInt(TestCase):
+class TestInt(object):
def test_logical_not(self):
x = np.ones(10, dtype=np.int16)
- o = np.ones(10 * 2, dtype=np.bool)
+ o = np.ones(10 * 2, dtype=bool)
tgt = o.copy()
tgt[::2] = False
os = o[::2]
@@ -1219,24 +1218,24 @@ class TestInt(TestCase):
assert_array_equal(o, tgt)
-class TestFloatingPoint(TestCase):
+class TestFloatingPoint(object):
def test_floating_point(self):
assert_equal(ncu.FLOATING_POINT_SUPPORT, 1)
-class TestDegrees(TestCase):
+class TestDegrees(object):
def test_degrees(self):
assert_almost_equal(ncu.degrees(np.pi), 180.0)
assert_almost_equal(ncu.degrees(-0.5*np.pi), -90.0)
-class TestRadians(TestCase):
+class TestRadians(object):
def test_radians(self):
assert_almost_equal(ncu.radians(180.0), np.pi)
assert_almost_equal(ncu.radians(-90.0), -0.5*np.pi)
-class TestHeavside(TestCase):
+class TestHeavside(object):
def test_heaviside(self):
x = np.array([[-30.0, -0.1, 0.0, 0.2], [7.5, np.nan, np.inf, -np.inf]])
expectedhalf = np.array([[0.0, 0.0, 0.5, 1.0], [1.0, np.nan, 1.0, 0.0]])
@@ -1258,7 +1257,7 @@ class TestHeavside(TestCase):
assert_equal(h, expected1.astype(np.float32))
-class TestSign(TestCase):
+class TestSign(object):
def test_sign(self):
a = np.array([np.inf, -np.inf, np.nan, 0.0, 3.0, -3.0])
out = np.zeros(a.shape)
@@ -1275,7 +1274,7 @@ class TestSign(TestCase):
# In reference to github issue #6229
foo = np.array([-.1, 0, .1])
- a = np.sign(foo.astype(np.object))
+ a = np.sign(foo.astype(object))
b = np.sign(foo)
assert_array_equal(a, b)
@@ -1284,11 +1283,11 @@ class TestSign(TestCase):
# In reference to github issue #6229
def test_nan():
foo = np.array([np.nan])
- a = np.sign(foo.astype(np.object))
+ a = np.sign(foo.astype(object))
assert_raises(TypeError, test_nan)
-class TestMinMax(TestCase):
+class TestMinMax(object):
def test_minmax_blocked(self):
# simd tests on max/min, test all alignments, slow but important
# for 2 * vz + 2 * (vs - 1) + 1 (unrolled once)
@@ -1299,8 +1298,11 @@ class TestMinMax(TestCase):
inp[:] = np.arange(inp.size, dtype=dt)
inp[i] = np.nan
emsg = lambda: '%r\n%s' % (inp, msg)
- assert_(np.isnan(inp.max()), msg=emsg)
- assert_(np.isnan(inp.min()), msg=emsg)
+ with suppress_warnings() as sup:
+ sup.filter(RuntimeWarning,
+ "invalid value encountered in reduce")
+ assert_(np.isnan(inp.max()), msg=emsg)
+ assert_(np.isnan(inp.min()), msg=emsg)
inp[i] = 1e10
assert_equal(inp.max(), 1e10, err_msg=msg)
@@ -1315,7 +1317,7 @@ class TestMinMax(TestCase):
assert_equal(d.min(), d[0])
-class TestAbsoluteNegative(TestCase):
+class TestAbsoluteNegative(object):
def test_abs_neg_blocked(self):
# simd tests on abs, test all alignments for vz + 2 * (vs - 1) + 1
for dt, sz in [(np.float32, 11), (np.float64, 5)]:
@@ -1324,7 +1326,7 @@ class TestAbsoluteNegative(TestCase):
tgt = [ncu.absolute(i) for i in inp]
np.absolute(inp, out=out)
assert_equal(out, tgt, err_msg=msg)
- self.assertTrue((out >= 0).all())
+ assert_((out >= 0).all())
tgt = [-1*(i) for i in inp]
np.negative(inp, out=out)
@@ -1357,7 +1359,7 @@ class TestAbsoluteNegative(TestCase):
np.abs(np.ones_like(d), out=d)
-class TestPositive(TestCase):
+class TestPositive(object):
def test_valid(self):
valid_dtypes = [int, float, complex, object]
for dtype in valid_dtypes:
@@ -1376,7 +1378,7 @@ class TestPositive(TestCase):
np.positive(np.array(['bar'], dtype=object))
-class TestSpecialMethods(TestCase):
+class TestSpecialMethods(object):
def test_wrap(self):
class with_wrap(object):
@@ -1393,11 +1395,11 @@ class TestSpecialMethods(TestCase):
x = ncu.minimum(a, a)
assert_equal(x.arr, np.zeros(1))
func, args, i = x.context
- self.assertTrue(func is ncu.minimum)
- self.assertEqual(len(args), 2)
+ assert_(func is ncu.minimum)
+ assert_equal(len(args), 2)
assert_equal(args[0], a)
assert_equal(args[1], a)
- self.assertEqual(i, 0)
+ assert_equal(i, 0)
def test_wrap_with_iterable(self):
# test fix for bug #1026:
@@ -1413,7 +1415,7 @@ class TestSpecialMethods(TestCase):
a = with_wrap()
x = ncu.multiply(a, (1, 2, 3))
- self.assertTrue(isinstance(x, with_wrap))
+ assert_(isinstance(x, with_wrap))
assert_array_equal(x, np.array((1, 2, 3)))
def test_priority_with_scalar(self):
@@ -1427,7 +1429,7 @@ class TestSpecialMethods(TestCase):
a = A()
x = np.float64(1)*a
- self.assertTrue(isinstance(x, A))
+ assert_(isinstance(x, A))
assert_array_equal(x, np.array(1))
def test_old_wrap(self):
@@ -1468,25 +1470,25 @@ class TestSpecialMethods(TestCase):
b = B()
c = C()
f = ncu.minimum
- self.assertTrue(type(f(x, x)) is np.ndarray)
- self.assertTrue(type(f(x, a)) is A)
- self.assertTrue(type(f(x, b)) is B)
- self.assertTrue(type(f(x, c)) is C)
- self.assertTrue(type(f(a, x)) is A)
- self.assertTrue(type(f(b, x)) is B)
- self.assertTrue(type(f(c, x)) is C)
-
- self.assertTrue(type(f(a, a)) is A)
- self.assertTrue(type(f(a, b)) is B)
- self.assertTrue(type(f(b, a)) is B)
- self.assertTrue(type(f(b, b)) is B)
- self.assertTrue(type(f(b, c)) is C)
- self.assertTrue(type(f(c, b)) is C)
- self.assertTrue(type(f(c, c)) is C)
-
- self.assertTrue(type(ncu.exp(a) is A))
- self.assertTrue(type(ncu.exp(b) is B))
- self.assertTrue(type(ncu.exp(c) is C))
+ assert_(type(f(x, x)) is np.ndarray)
+ assert_(type(f(x, a)) is A)
+ assert_(type(f(x, b)) is B)
+ assert_(type(f(x, c)) is C)
+ assert_(type(f(a, x)) is A)
+ assert_(type(f(b, x)) is B)
+ assert_(type(f(c, x)) is C)
+
+ assert_(type(f(a, a)) is A)
+ assert_(type(f(a, b)) is B)
+ assert_(type(f(b, a)) is B)
+ assert_(type(f(b, b)) is B)
+ assert_(type(f(b, c)) is C)
+ assert_(type(f(c, b)) is C)
+ assert_(type(f(c, c)) is C)
+
+ assert_(type(ncu.exp(a) is A))
+ assert_(type(ncu.exp(b) is B))
+ assert_(type(ncu.exp(c) is C))
def test_failing_wrap(self):
@@ -1498,7 +1500,7 @@ class TestSpecialMethods(TestCase):
raise RuntimeError
a = A()
- self.assertRaises(RuntimeError, ncu.maximum, a, a)
+ assert_raises(RuntimeError, ncu.maximum, a, a)
def test_none_wrap(self):
# Tests that issue #8507 is resolved. Previously, this would segfault
@@ -1569,7 +1571,7 @@ class TestSpecialMethods(TestCase):
raise RuntimeError
a = A()
- self.assertRaises(RuntimeError, ncu.maximum, a, a)
+ assert_raises(RuntimeError, ncu.maximum, a, a)
def test_array_with_context(self):
@@ -1591,10 +1593,10 @@ class TestSpecialMethods(TestCase):
a = A()
ncu.maximum(np.zeros(1), a)
- self.assertTrue(a.func is ncu.maximum)
+ assert_(a.func is ncu.maximum)
assert_equal(a.args[0], 0)
- self.assertTrue(a.args[1] is a)
- self.assertTrue(a.i == 1)
+ assert_(a.args[1] is a)
+ assert_(a.i == 1)
assert_equal(ncu.maximum(a, B()), 0)
assert_equal(ncu.maximum(a, C()), 0)
@@ -1755,15 +1757,18 @@ class TestSpecialMethods(TestCase):
'keepdims': 'keep0',
'axis': 'axis0'})
- # reduce, output equal to None removed.
- res = np.multiply.reduce(a, out=None)
- assert_equal(res[4], {})
- res = np.multiply.reduce(a, out=(None,))
- assert_equal(res[4], {})
+ # reduce, output equal to None removed, but not other explicit ones,
+ # even if they are at their default value.
+ res = np.multiply.reduce(a, 0, None, None, False)
+ assert_equal(res[4], {'axis': 0, 'dtype': None, 'keepdims': False})
+ res = np.multiply.reduce(a, out=None, axis=0, keepdims=True)
+ assert_equal(res[4], {'axis': 0, 'keepdims': True})
+ res = np.multiply.reduce(a, None, out=(None,), dtype=None)
+ assert_equal(res[4], {'axis': None, 'dtype': None})
# reduce, wrong args
- assert_raises(TypeError, np.multiply.reduce, a, out=())
- assert_raises(TypeError, np.multiply.reduce, a, out=('out0', 'out1'))
+ assert_raises(ValueError, np.multiply.reduce, a, out=())
+ assert_raises(ValueError, np.multiply.reduce, a, out=('out0', 'out1'))
assert_raises(TypeError, np.multiply.reduce, a, 'axis0', axis='axis0')
# accumulate, pos args
@@ -1788,14 +1793,16 @@ class TestSpecialMethods(TestCase):
'axis': 'axis0'})
# accumulate, output equal to None removed.
- res = np.multiply.accumulate(a, out=None)
- assert_equal(res[4], {})
- res = np.multiply.accumulate(a, out=(None,))
- assert_equal(res[4], {})
+ res = np.multiply.accumulate(a, 0, None, None)
+ assert_equal(res[4], {'axis': 0, 'dtype': None})
+ res = np.multiply.accumulate(a, out=None, axis=0, dtype='dtype1')
+ assert_equal(res[4], {'axis': 0, 'dtype': 'dtype1'})
+ res = np.multiply.accumulate(a, None, out=(None,), dtype=None)
+ assert_equal(res[4], {'axis': None, 'dtype': None})
# accumulate, wrong args
- assert_raises(TypeError, np.multiply.accumulate, a, out=())
- assert_raises(TypeError, np.multiply.accumulate, a,
+ assert_raises(ValueError, np.multiply.accumulate, a, out=())
+ assert_raises(ValueError, np.multiply.accumulate, a,
out=('out0', 'out1'))
assert_raises(TypeError, np.multiply.accumulate, a,
'axis0', axis='axis0')
@@ -1822,14 +1829,16 @@ class TestSpecialMethods(TestCase):
'axis': 'axis0'})
# reduceat, output equal to None removed.
- res = np.multiply.reduceat(a, [4, 2], out=None)
- assert_equal(res[4], {})
- res = np.multiply.reduceat(a, [4, 2], out=(None,))
- assert_equal(res[4], {})
+ res = np.multiply.reduceat(a, [4, 2], 0, None, None)
+ assert_equal(res[4], {'axis': 0, 'dtype': None})
+ res = np.multiply.reduceat(a, [4, 2], axis=None, out=None, dtype='dt')
+ assert_equal(res[4], {'axis': None, 'dtype': 'dt'})
+ res = np.multiply.reduceat(a, [4, 2], None, None, out=(None,))
+ assert_equal(res[4], {'axis': None, 'dtype': None})
# reduceat, wrong args
- assert_raises(TypeError, np.multiply.reduce, a, [4, 2], out=())
- assert_raises(TypeError, np.multiply.reduce, a, [4, 2],
+ assert_raises(ValueError, np.multiply.reduce, a, [4, 2], out=())
+ assert_raises(ValueError, np.multiply.reduce, a, [4, 2],
out=('out0', 'out1'))
assert_raises(TypeError, np.multiply.reduce, a, [4, 2],
'axis0', axis='axis0')
@@ -1907,12 +1916,12 @@ class TestSpecialMethods(TestCase):
# wrong number of arguments in the tuple is an error too.
assert_raises(TypeError, np.multiply, a, b, 'one', out='two')
assert_raises(TypeError, np.multiply, a, b, 'one', 'two')
- assert_raises(TypeError, np.multiply, a, b, out=('one', 'two'))
- assert_raises(TypeError, np.multiply, a, out=())
+ assert_raises(ValueError, np.multiply, a, b, out=('one', 'two'))
+ assert_raises(ValueError, np.multiply, a, out=())
assert_raises(TypeError, np.modf, a, 'one', out=('two', 'three'))
assert_raises(TypeError, np.modf, a, 'one', 'two', 'three')
- assert_raises(TypeError, np.modf, a, out=('one', 'two', 'three'))
- assert_raises(TypeError, np.modf, a, out=('one',))
+ assert_raises(ValueError, np.modf, a, out=('one', 'two', 'three'))
+ assert_raises(ValueError, np.modf, a, out=('one',))
def test_ufunc_override_exception(self):
@@ -1931,14 +1940,14 @@ class TestSpecialMethods(TestCase):
def __array_ufunc__(self, *args, **kwargs):
return NotImplemented
- msg = ("operand type(s) do not implement __array_ufunc__("
- "<ufunc 'negative'>, '__call__', <*>): 'A'")
+ msg = ("operand type(s) all returned NotImplemented from "
+ "__array_ufunc__(<ufunc 'negative'>, '__call__', <*>): 'A'")
with assert_raises_regex(TypeError, fnmatch.translate(msg)):
np.negative(A())
- msg = ("operand type(s) do not implement __array_ufunc__("
- "<ufunc 'add'>, '__call__', <*>, <object *>, out=(1,)): "
- "'A', 'object', 'int'")
+ msg = ("operand type(s) all returned NotImplemented from "
+ "__array_ufunc__(<ufunc 'add'>, '__call__', <*>, <object *>, "
+ "out=(1,)): 'A', 'object', 'int'")
with assert_raises_regex(TypeError, fnmatch.translate(msg)):
np.add(A(), object(), out=1)
@@ -1999,11 +2008,12 @@ class TestSpecialMethods(TestCase):
assert_raises(TypeError, inner1d, a, out='two')
assert_raises(TypeError, inner1d, a, a, 'one', out='two')
assert_raises(TypeError, inner1d, a, a, 'one', 'two')
- assert_raises(TypeError, inner1d, a, a, out=('one', 'two'))
- assert_raises(TypeError, inner1d, a, a, out=())
+ assert_raises(ValueError, inner1d, a, a, out=('one', 'two'))
+ assert_raises(ValueError, inner1d, a, a, out=())
def test_ufunc_override_with_super(self):
-
+ # NOTE: this class is given as an example in doc/subclassing.py;
+ # if you make any changes here, do update it there too.
class A(np.ndarray):
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
args = []
@@ -2041,6 +2051,8 @@ class TestSpecialMethods(TestCase):
return NotImplemented
if method == 'at':
+ if isinstance(inputs[0], A):
+ inputs[0].info = info
return
if ufunc.nout == 1:
@@ -2107,9 +2119,73 @@ class TestSpecialMethods(TestCase):
assert_(a.__array_ufunc__(np.add, '__call__', a, b) is NotImplemented)
assert_(b.__array_ufunc__(np.add, '__call__', a, b) == "A!")
assert_(np.add(a, b) == "A!")
-
-
-class TestChoose(TestCase):
+ # regression check for gh-9102 -- tests ufunc.reduce implicitly.
+ d = np.array([[1, 2, 3], [1, 2, 3]])
+ a = d.view(A)
+ c = a.any()
+ check = d.any()
+ assert_equal(c, check)
+ assert_(c.info, {'inputs': [0]})
+ c = a.max()
+ check = d.max()
+ assert_equal(c, check)
+ assert_(c.info, {'inputs': [0]})
+ b = np.array(0).view(A)
+ c = a.max(out=b)
+ assert_equal(c, check)
+ assert_(c is b)
+ assert_(c.info, {'inputs': [0], 'outputs': [0]})
+ check = a.max(axis=0)
+ b = np.zeros_like(check).view(A)
+ c = a.max(axis=0, out=b)
+ assert_equal(c, check)
+ assert_(c is b)
+ assert_(c.info, {'inputs': [0], 'outputs': [0]})
+ # simple explicit tests of reduce, accumulate, reduceat
+ check = np.add.reduce(d, axis=1)
+ c = np.add.reduce(a, axis=1)
+ assert_equal(c, check)
+ assert_(c.info, {'inputs': [0]})
+ b = np.zeros_like(c)
+ c = np.add.reduce(a, 1, None, b)
+ assert_equal(c, check)
+ assert_(c is b)
+ assert_(c.info, {'inputs': [0], 'outputs': [0]})
+ check = np.add.accumulate(d, axis=0)
+ c = np.add.accumulate(a, axis=0)
+ assert_equal(c, check)
+ assert_(c.info, {'inputs': [0]})
+ b = np.zeros_like(c)
+ c = np.add.accumulate(a, 0, None, b)
+ assert_equal(c, check)
+ assert_(c is b)
+ assert_(c.info, {'inputs': [0], 'outputs': [0]})
+ indices = [0, 2, 1]
+ check = np.add.reduceat(d, indices, axis=1)
+ c = np.add.reduceat(a, indices, axis=1)
+ assert_equal(c, check)
+ assert_(c.info, {'inputs': [0]})
+ b = np.zeros_like(c)
+ c = np.add.reduceat(a, indices, 1, None, b)
+ assert_equal(c, check)
+ assert_(c is b)
+ assert_(c.info, {'inputs': [0], 'outputs': [0]})
+ # and a few tests for at
+ d = np.array([[1, 2, 3], [1, 2, 3]])
+ check = d.copy()
+ a = d.copy().view(A)
+ np.add.at(check, ([0, 1], [0, 2]), 1.)
+ np.add.at(a, ([0, 1], [0, 2]), 1.)
+ assert_equal(a, check)
+ assert_(a.info, {'inputs': [0]})
+ b = np.array(1.).view(A)
+ a = d.copy().view(A)
+ np.add.at(a, ([0, 1], [0, 2]), b)
+ assert_equal(a, check)
+ assert_(a.info, {'inputs': [0, 2]})
+
+
+class TestChoose(object):
def test_mixed(self):
c = np.array([True, True])
a = np.array([True, True])
@@ -2134,7 +2210,7 @@ class TestComplexFunctions(object):
else:
x = .5
fr = f(x)
- fz = f(np.complex(x))
+ fz = f(complex(x))
assert_almost_equal(fz.real, fr, err_msg='real part %s' % f)
assert_almost_equal(fz.imag, 0., err_msg='imag part %s' % f)
@@ -2203,7 +2279,7 @@ class TestComplexFunctions(object):
points = [-1-1j, -1+1j, +1-1j, +1+1j]
name_map = {'arcsin': 'asin', 'arccos': 'acos', 'arctan': 'atan',
'arcsinh': 'asinh', 'arccosh': 'acosh', 'arctanh': 'atanh'}
- atol = 4*np.finfo(np.complex).eps
+ atol = 4*np.finfo(complex).eps
for func in self.funcs:
fname = func.__name__.split('.')[-1]
cname = name_map.get(fname, fname)
@@ -2310,12 +2386,12 @@ class TestComplexFunctions(object):
self.check_loss_of_precision(np.longcomplex)
-class TestAttributes(TestCase):
+class TestAttributes(object):
def test_attributes(self):
add = ncu.add
assert_equal(add.__name__, 'add')
- self.assertTrue(add.ntypes >= 18) # don't fail if types added
- self.assertTrue('ii->i' in add.types)
+ assert_(add.ntypes >= 18) # don't fail if types added
+ assert_('ii->i' in add.types)
assert_equal(add.nin, 2)
assert_equal(add.nout, 1)
assert_equal(add.identity, 0)
@@ -2329,7 +2405,7 @@ class TestAttributes(TestCase):
"frexp(x[, out1, out2], / [, out=(None, None)], *, where=True"))
-class TestSubclass(TestCase):
+class TestSubclass(object):
def test_subclass_op(self):
@@ -2343,7 +2419,7 @@ class TestSubclass(TestCase):
assert_equal(a+a, a)
def _check_branch_cut(f, x0, dx, re_sign=1, im_sign=-1, sig_zero_ok=False,
- dtype=np.complex):
+ dtype=complex):
"""
Check for a branch cut in a function.
diff --git a/numpy/core/tests/test_umath_complex.py b/numpy/core/tests/test_umath_complex.py
index 536ad398a..fb3b6577c 100644
--- a/numpy/core/tests/test_umath_complex.py
+++ b/numpy/core/tests/test_umath_complex.py
@@ -6,7 +6,7 @@ import platform
import numpy as np
import numpy.core.umath as ncu
from numpy.testing import (
- TestCase, run_module_suite, assert_equal, assert_array_equal,
+ run_module_suite, assert_raises, assert_equal, assert_array_equal,
assert_almost_equal, dec
)
@@ -38,7 +38,7 @@ class TestCexp(object):
yield check, f, 1, 0, np.exp(1), 0, False
yield check, f, 0, 1, np.cos(1), np.sin(1), False
- ref = np.exp(1) * np.complex(np.cos(1), np.sin(1))
+ ref = np.exp(1) * complex(np.cos(1), np.sin(1))
yield check, f, 1, 1, ref.real, ref.imag, False
@platform_skip
@@ -73,7 +73,7 @@ class TestCexp(object):
def _check_ninf_inf(dummy):
msgform = "cexp(-inf, inf) is (%f, %f), expected (+-0, +-0)"
with np.errstate(invalid='ignore'):
- z = f(np.array(np.complex(-np.inf, np.inf)))
+ z = f(np.array(complex(-np.inf, np.inf)))
if z.real != 0 or z.imag != 0:
raise AssertionError(msgform % (z.real, z.imag))
@@ -83,7 +83,7 @@ class TestCexp(object):
def _check_inf_inf(dummy):
msgform = "cexp(inf, inf) is (%f, %f), expected (+-inf, nan)"
with np.errstate(invalid='ignore'):
- z = f(np.array(np.complex(np.inf, np.inf)))
+ z = f(np.array(complex(np.inf, np.inf)))
if not np.isinf(z.real) or not np.isnan(z.imag):
raise AssertionError(msgform % (z.real, z.imag))
@@ -93,7 +93,7 @@ class TestCexp(object):
def _check_ninf_nan(dummy):
msgform = "cexp(-inf, nan) is (%f, %f), expected (+-0, +-0)"
with np.errstate(invalid='ignore'):
- z = f(np.array(np.complex(-np.inf, np.nan)))
+ z = f(np.array(complex(-np.inf, np.nan)))
if z.real != 0 or z.imag != 0:
raise AssertionError(msgform % (z.real, z.imag))
@@ -103,7 +103,7 @@ class TestCexp(object):
def _check_inf_nan(dummy):
msgform = "cexp(-inf, nan) is (%f, %f), expected (+-inf, nan)"
with np.errstate(invalid='ignore'):
- z = f(np.array(np.complex(np.inf, np.nan)))
+ z = f(np.array(complex(np.inf, np.nan)))
if not np.isinf(z.real) or not np.isnan(z.imag):
raise AssertionError(msgform % (z.real, z.imag))
@@ -129,7 +129,7 @@ class TestCexp(object):
yield check, f, np.nan, 0, np.nan, 0
-class TestClog(TestCase):
+class TestClog(object):
def test_simple(self):
x = np.array([1+0j, 1+2j])
y_r = np.log(np.abs(x)) + 1j * np.angle(x)
@@ -150,9 +150,9 @@ class TestClog(TestCase):
# clog(-0 + i0) returns -inf + i pi and raises the 'divide-by-zero'
# floating-point exception.
with np.errstate(divide='raise'):
- x = np.array([np.NZERO], dtype=np.complex)
- y = np.complex(-np.inf, np.pi)
- self.assertRaises(FloatingPointError, np.log, x)
+ x = np.array([np.NZERO], dtype=complex)
+ y = complex(-np.inf, np.pi)
+ assert_raises(FloatingPointError, np.log, x)
with np.errstate(divide='ignore'):
assert_almost_equal(np.log(x), y)
@@ -162,9 +162,9 @@ class TestClog(TestCase):
# clog(+0 + i0) returns -inf + i0 and raises the 'divide-by-zero'
# floating-point exception.
with np.errstate(divide='raise'):
- x = np.array([0], dtype=np.complex)
- y = np.complex(-np.inf, 0)
- self.assertRaises(FloatingPointError, np.log, x)
+ x = np.array([0], dtype=complex)
+ y = complex(-np.inf, 0)
+ assert_raises(FloatingPointError, np.log, x)
with np.errstate(divide='ignore'):
assert_almost_equal(np.log(x), y)
@@ -172,13 +172,13 @@ class TestClog(TestCase):
yl.append(y)
# clog(x + i inf returns +inf + i pi /2, for finite x.
- x = np.array([complex(1, np.inf)], dtype=np.complex)
- y = np.complex(np.inf, 0.5 * np.pi)
+ x = np.array([complex(1, np.inf)], dtype=complex)
+ y = complex(np.inf, 0.5 * np.pi)
assert_almost_equal(np.log(x), y)
xl.append(x)
yl.append(y)
- x = np.array([complex(-1, np.inf)], dtype=np.complex)
+ x = np.array([complex(-1, np.inf)], dtype=complex)
assert_almost_equal(np.log(x), y)
xl.append(x)
yl.append(y)
@@ -186,9 +186,9 @@ class TestClog(TestCase):
# clog(x + iNaN) returns NaN + iNaN and optionally raises the
# 'invalid' floating- point exception, for finite x.
with np.errstate(invalid='raise'):
- x = np.array([complex(1., np.nan)], dtype=np.complex)
- y = np.complex(np.nan, np.nan)
- #self.assertRaises(FloatingPointError, np.log, x)
+ x = np.array([complex(1., np.nan)], dtype=complex)
+ y = complex(np.nan, np.nan)
+ #assert_raises(FloatingPointError, np.log, x)
with np.errstate(invalid='ignore'):
assert_almost_equal(np.log(x), y)
@@ -196,8 +196,8 @@ class TestClog(TestCase):
yl.append(y)
with np.errstate(invalid='raise'):
- x = np.array([np.inf + 1j * np.nan], dtype=np.complex)
- #self.assertRaises(FloatingPointError, np.log, x)
+ x = np.array([np.inf + 1j * np.nan], dtype=complex)
+ #assert_raises(FloatingPointError, np.log, x)
with np.errstate(invalid='ignore'):
assert_almost_equal(np.log(x), y)
@@ -205,70 +205,70 @@ class TestClog(TestCase):
yl.append(y)
# clog(- inf + iy) returns +inf + ipi , for finite positive-signed y.
- x = np.array([-np.inf + 1j], dtype=np.complex)
- y = np.complex(np.inf, np.pi)
+ x = np.array([-np.inf + 1j], dtype=complex)
+ y = complex(np.inf, np.pi)
assert_almost_equal(np.log(x), y)
xl.append(x)
yl.append(y)
# clog(+ inf + iy) returns +inf + i0, for finite positive-signed y.
- x = np.array([np.inf + 1j], dtype=np.complex)
- y = np.complex(np.inf, 0)
+ x = np.array([np.inf + 1j], dtype=complex)
+ y = complex(np.inf, 0)
assert_almost_equal(np.log(x), y)
xl.append(x)
yl.append(y)
# clog(- inf + i inf) returns +inf + i3pi /4.
- x = np.array([complex(-np.inf, np.inf)], dtype=np.complex)
- y = np.complex(np.inf, 0.75 * np.pi)
+ x = np.array([complex(-np.inf, np.inf)], dtype=complex)
+ y = complex(np.inf, 0.75 * np.pi)
assert_almost_equal(np.log(x), y)
xl.append(x)
yl.append(y)
# clog(+ inf + i inf) returns +inf + ipi /4.
- x = np.array([complex(np.inf, np.inf)], dtype=np.complex)
- y = np.complex(np.inf, 0.25 * np.pi)
+ x = np.array([complex(np.inf, np.inf)], dtype=complex)
+ y = complex(np.inf, 0.25 * np.pi)
assert_almost_equal(np.log(x), y)
xl.append(x)
yl.append(y)
# clog(+/- inf + iNaN) returns +inf + iNaN.
- x = np.array([complex(np.inf, np.nan)], dtype=np.complex)
- y = np.complex(np.inf, np.nan)
+ x = np.array([complex(np.inf, np.nan)], dtype=complex)
+ y = complex(np.inf, np.nan)
assert_almost_equal(np.log(x), y)
xl.append(x)
yl.append(y)
- x = np.array([complex(-np.inf, np.nan)], dtype=np.complex)
+ x = np.array([complex(-np.inf, np.nan)], dtype=complex)
assert_almost_equal(np.log(x), y)
xl.append(x)
yl.append(y)
# clog(NaN + iy) returns NaN + iNaN and optionally raises the
# 'invalid' floating-point exception, for finite y.
- x = np.array([complex(np.nan, 1)], dtype=np.complex)
- y = np.complex(np.nan, np.nan)
+ x = np.array([complex(np.nan, 1)], dtype=complex)
+ y = complex(np.nan, np.nan)
assert_almost_equal(np.log(x), y)
xl.append(x)
yl.append(y)
# clog(NaN + i inf) returns +inf + iNaN.
- x = np.array([complex(np.nan, np.inf)], dtype=np.complex)
- y = np.complex(np.inf, np.nan)
+ x = np.array([complex(np.nan, np.inf)], dtype=complex)
+ y = complex(np.inf, np.nan)
assert_almost_equal(np.log(x), y)
xl.append(x)
yl.append(y)
# clog(NaN + iNaN) returns NaN + iNaN.
- x = np.array([complex(np.nan, np.nan)], dtype=np.complex)
- y = np.complex(np.nan, np.nan)
+ x = np.array([complex(np.nan, np.nan)], dtype=complex)
+ y = complex(np.nan, np.nan)
assert_almost_equal(np.log(x), y)
xl.append(x)
yl.append(y)
# clog(conj(z)) = conj(clog(z)).
- xa = np.array(xl, dtype=np.complex)
- ya = np.array(yl, dtype=np.complex)
+ xa = np.array(xl, dtype=complex)
+ ya = np.array(yl, dtype=complex)
with np.errstate(divide='ignore'):
for i in range(len(xa)):
assert_almost_equal(np.log(xa[i].conj()), ya[i].conj())
@@ -286,7 +286,7 @@ class TestCsqrt(object):
yield check_complex_value, np.sqrt, -1, 0, 0, 1
def test_simple_conjugate(self):
- ref = np.conj(np.sqrt(np.complex(1, 1)))
+ ref = np.conj(np.sqrt(complex(1, 1)))
def f(z):
return np.sqrt(np.conj(z))
@@ -330,7 +330,7 @@ class TestCsqrt(object):
# csqrt(-inf + nani) is nan +- infi (both +i infi are valid)
def _check_ninf_nan(dummy):
msgform = "csqrt(-inf, nan) is (%f, %f), expected (nan, +-inf)"
- z = np.sqrt(np.array(np.complex(-np.inf, np.nan)))
+ z = np.sqrt(np.array(complex(-np.inf, np.nan)))
#Fixme: ugly workaround for isinf bug.
with np.errstate(invalid='ignore'):
if not (np.isnan(z.real) and np.isinf(z.imag)):
@@ -350,7 +350,7 @@ class TestCsqrt(object):
# XXX: check for conj(csqrt(z)) == csqrt(conj(z)) (need to fix branch
# cuts first)
-class TestCpow(TestCase):
+class TestCpow(object):
def setUp(self):
self.olderr = np.seterr(invalid='ignore')
@@ -406,16 +406,16 @@ class TestCabs(object):
def test_fabs(self):
# Test that np.abs(x +- 0j) == np.abs(x) (as mandated by C99 for cabs)
- x = np.array([1+0j], dtype=np.complex)
+ x = np.array([1+0j], dtype=complex)
assert_array_equal(np.abs(x), np.real(x))
- x = np.array([complex(1, np.NZERO)], dtype=np.complex)
+ x = np.array([complex(1, np.NZERO)], dtype=complex)
assert_array_equal(np.abs(x), np.real(x))
- x = np.array([complex(np.inf, np.NZERO)], dtype=np.complex)
+ x = np.array([complex(np.inf, np.NZERO)], dtype=complex)
assert_array_equal(np.abs(x), np.real(x))
- x = np.array([complex(np.nan, np.NZERO)], dtype=np.complex)
+ x = np.array([complex(np.nan, np.NZERO)], dtype=complex)
assert_array_equal(np.abs(x), np.real(x))
def test_cabs_inf_nan(self):
@@ -445,9 +445,9 @@ class TestCabs(object):
return np.abs(np.conj(a))
def g(a, b):
- return np.abs(np.complex(a, b))
+ return np.abs(complex(a, b))
- xa = np.array(x, dtype=np.complex)
+ xa = np.array(x, dtype=complex)
for i in range(len(xa)):
ref = g(x[i], y[i])
yield check_real_value, f, x[i], y[i], ref
@@ -527,7 +527,7 @@ def check_real_value(f, x1, y1, x, exact=True):
def check_complex_value(f, x1, y1, x2, y2, exact=True):
z1 = np.array([complex(x1, y1)])
- z2 = np.complex(x2, y2)
+ z2 = complex(x2, y2)
with np.errstate(invalid='ignore'):
if exact:
assert_equal(f(z1), z2)
diff --git a/numpy/core/tests/test_unicode.py b/numpy/core/tests/test_unicode.py
index ae2beb2a6..8c502ca44 100644
--- a/numpy/core/tests/test_unicode.py
+++ b/numpy/core/tests/test_unicode.py
@@ -5,7 +5,7 @@ import sys
import numpy as np
from numpy.compat import unicode
from numpy.testing import (
- TestCase, run_module_suite, assert_, assert_equal, assert_array_equal)
+ run_module_suite, assert_, assert_equal, assert_array_equal)
# Guess the UCS length for this python interpreter
if sys.version_info[:2] >= (3, 3):
@@ -68,24 +68,24 @@ def test_string_cast():
# Creation tests
############################################################
-class create_zeros(object):
+class CreateZeros(object):
"""Check the creation of zero-valued arrays"""
def content_check(self, ua, ua_scalar, nbytes):
# Check the length of the unicode base type
- self.assertTrue(int(ua.dtype.str[2:]) == self.ulen)
+ assert_(int(ua.dtype.str[2:]) == self.ulen)
# Check the length of the data buffer
- self.assertTrue(buffer_length(ua) == nbytes)
+ assert_(buffer_length(ua) == nbytes)
# Small check that data in array element is ok
- self.assertTrue(ua_scalar == u'')
+ assert_(ua_scalar == u'')
# Encode to ascii and double check
- self.assertTrue(ua_scalar.encode('ascii') == b'')
+ assert_(ua_scalar.encode('ascii') == b'')
# Check buffer lengths for scalars
if ucs4:
- self.assertTrue(buffer_length(ua_scalar) == 0)
+ assert_(buffer_length(ua_scalar) == 0)
else:
- self.assertTrue(buffer_length(ua_scalar) == 0)
+ assert_(buffer_length(ua_scalar) == 0)
def test_zeros0D(self):
# Check creation of 0-dimensional objects
@@ -105,47 +105,47 @@ class create_zeros(object):
self.content_check(ua, ua[-1, -1, -1], 4*self.ulen*2*3*4)
-class test_create_zeros_1(create_zeros, TestCase):
+class TestCreateZeros_1(CreateZeros):
"""Check the creation of zero-valued arrays (size 1)"""
ulen = 1
-class test_create_zeros_2(create_zeros, TestCase):
+class TestCreateZeros_2(CreateZeros):
"""Check the creation of zero-valued arrays (size 2)"""
ulen = 2
-class test_create_zeros_1009(create_zeros, TestCase):
+class TestCreateZeros_1009(CreateZeros):
"""Check the creation of zero-valued arrays (size 1009)"""
ulen = 1009
-class create_values(object):
+class CreateValues(object):
"""Check the creation of unicode arrays with values"""
def content_check(self, ua, ua_scalar, nbytes):
# Check the length of the unicode base type
- self.assertTrue(int(ua.dtype.str[2:]) == self.ulen)
+ assert_(int(ua.dtype.str[2:]) == self.ulen)
# Check the length of the data buffer
- self.assertTrue(buffer_length(ua) == nbytes)
+ assert_(buffer_length(ua) == nbytes)
# Small check that data in array element is ok
- self.assertTrue(ua_scalar == self.ucs_value*self.ulen)
+ assert_(ua_scalar == self.ucs_value*self.ulen)
# Encode to UTF-8 and double check
- self.assertTrue(ua_scalar.encode('utf-8') ==
+ assert_(ua_scalar.encode('utf-8') ==
(self.ucs_value*self.ulen).encode('utf-8'))
# Check buffer lengths for scalars
if ucs4:
- self.assertTrue(buffer_length(ua_scalar) == 4*self.ulen)
+ assert_(buffer_length(ua_scalar) == 4*self.ulen)
else:
if self.ucs_value == ucs4_value:
# In UCS2, the \U0010FFFF will be represented using a
# surrogate *pair*
- self.assertTrue(buffer_length(ua_scalar) == 2*2*self.ulen)
+ assert_(buffer_length(ua_scalar) == 2*2*self.ulen)
else:
# In UCS2, the \uFFFF will be represented using a
# regular 2-byte word
- self.assertTrue(buffer_length(ua_scalar) == 2*self.ulen)
+ assert_(buffer_length(ua_scalar) == 2*self.ulen)
def test_values0D(self):
# Check creation of 0-dimensional objects with values
@@ -165,37 +165,37 @@ class create_values(object):
self.content_check(ua, ua[-1, -1, -1], 4*self.ulen*2*3*4)
-class test_create_values_1_ucs2(create_values, TestCase):
+class TestCreateValues_1_UCS2(CreateValues):
"""Check the creation of valued arrays (size 1, UCS2 values)"""
ulen = 1
ucs_value = ucs2_value
-class test_create_values_1_ucs4(create_values, TestCase):
+class TestCreateValues_1_UCS4(CreateValues):
"""Check the creation of valued arrays (size 1, UCS4 values)"""
ulen = 1
ucs_value = ucs4_value
-class test_create_values_2_ucs2(create_values, TestCase):
+class TestCreateValues_2_UCS2(CreateValues):
"""Check the creation of valued arrays (size 2, UCS2 values)"""
ulen = 2
ucs_value = ucs2_value
-class test_create_values_2_ucs4(create_values, TestCase):
+class TestCreateValues_2_UCS4(CreateValues):
"""Check the creation of valued arrays (size 2, UCS4 values)"""
ulen = 2
ucs_value = ucs4_value
-class test_create_values_1009_ucs2(create_values, TestCase):
+class TestCreateValues_1009_UCS2(CreateValues):
"""Check the creation of valued arrays (size 1009, UCS2 values)"""
ulen = 1009
ucs_value = ucs2_value
-class test_create_values_1009_ucs4(create_values, TestCase):
+class TestCreateValues_1009_UCS4(CreateValues):
"""Check the creation of valued arrays (size 1009, UCS4 values)"""
ulen = 1009
ucs_value = ucs4_value
@@ -205,32 +205,32 @@ class test_create_values_1009_ucs4(create_values, TestCase):
# Assignment tests
############################################################
-class assign_values(object):
+class AssignValues(object):
"""Check the assignment of unicode arrays with values"""
def content_check(self, ua, ua_scalar, nbytes):
# Check the length of the unicode base type
- self.assertTrue(int(ua.dtype.str[2:]) == self.ulen)
+ assert_(int(ua.dtype.str[2:]) == self.ulen)
# Check the length of the data buffer
- self.assertTrue(buffer_length(ua) == nbytes)
+ assert_(buffer_length(ua) == nbytes)
# Small check that data in array element is ok
- self.assertTrue(ua_scalar == self.ucs_value*self.ulen)
+ assert_(ua_scalar == self.ucs_value*self.ulen)
# Encode to UTF-8 and double check
- self.assertTrue(ua_scalar.encode('utf-8') ==
+ assert_(ua_scalar.encode('utf-8') ==
(self.ucs_value*self.ulen).encode('utf-8'))
# Check buffer lengths for scalars
if ucs4:
- self.assertTrue(buffer_length(ua_scalar) == 4*self.ulen)
+ assert_(buffer_length(ua_scalar) == 4*self.ulen)
else:
if self.ucs_value == ucs4_value:
# In UCS2, the \U0010FFFF will be represented using a
# surrogate *pair*
- self.assertTrue(buffer_length(ua_scalar) == 2*2*self.ulen)
+ assert_(buffer_length(ua_scalar) == 2*2*self.ulen)
else:
# In UCS2, the \uFFFF will be represented using a
# regular 2-byte word
- self.assertTrue(buffer_length(ua_scalar) == 2*self.ulen)
+ assert_(buffer_length(ua_scalar) == 2*self.ulen)
def test_values0D(self):
# Check assignment of 0-dimensional objects with values
@@ -255,37 +255,37 @@ class assign_values(object):
self.content_check(ua, ua[-1, -1, -1], 4*self.ulen*2*3*4)
-class test_assign_values_1_ucs2(assign_values, TestCase):
+class TestAssignValues_1_UCS2(AssignValues):
"""Check the assignment of valued arrays (size 1, UCS2 values)"""
ulen = 1
ucs_value = ucs2_value
-class test_assign_values_1_ucs4(assign_values, TestCase):
+class TestAssignValues_1_UCS4(AssignValues):
"""Check the assignment of valued arrays (size 1, UCS4 values)"""
ulen = 1
ucs_value = ucs4_value
-class test_assign_values_2_ucs2(assign_values, TestCase):
+class TestAssignValues_2_UCS2(AssignValues):
"""Check the assignment of valued arrays (size 2, UCS2 values)"""
ulen = 2
ucs_value = ucs2_value
-class test_assign_values_2_ucs4(assign_values, TestCase):
+class TestAssignValues_2_UCS4(AssignValues):
"""Check the assignment of valued arrays (size 2, UCS4 values)"""
ulen = 2
ucs_value = ucs4_value
-class test_assign_values_1009_ucs2(assign_values, TestCase):
+class TestAssignValues_1009_UCS2(AssignValues):
"""Check the assignment of valued arrays (size 1009, UCS2 values)"""
ulen = 1009
ucs_value = ucs2_value
-class test_assign_values_1009_ucs4(assign_values, TestCase):
+class TestAssignValues_1009_UCS4(AssignValues):
"""Check the assignment of valued arrays (size 1009, UCS4 values)"""
ulen = 1009
ucs_value = ucs4_value
@@ -295,7 +295,7 @@ class test_assign_values_1009_ucs4(assign_values, TestCase):
# Byteorder tests
############################################################
-class byteorder_values:
+class ByteorderValues(object):
"""Check the byteorder of unicode arrays in round-trip conversions"""
def test_values0D(self):
@@ -305,7 +305,7 @@ class byteorder_values:
# This changes the interpretation of the data region (but not the
# actual data), therefore the returned scalars are not
# the same (they are byte-swapped versions of each other).
- self.assertTrue(ua[()] != ua2[()])
+ assert_(ua[()] != ua2[()])
ua3 = ua2.newbyteorder()
# Arrays must be equal after the round-trip
assert_equal(ua, ua3)
@@ -314,8 +314,8 @@ class byteorder_values:
# Check byteorder of single-dimensional objects
ua = np.array([self.ucs_value*self.ulen]*2, dtype='U%s' % self.ulen)
ua2 = ua.newbyteorder()
- self.assertTrue((ua != ua2).all())
- self.assertTrue(ua[-1] != ua2[-1])
+ assert_((ua != ua2).all())
+ assert_(ua[-1] != ua2[-1])
ua3 = ua2.newbyteorder()
# Arrays must be equal after the round-trip
assert_equal(ua, ua3)
@@ -325,8 +325,8 @@ class byteorder_values:
ua = np.array([[[self.ucs_value*self.ulen]*2]*3]*4,
dtype='U%s' % self.ulen)
ua2 = ua.newbyteorder()
- self.assertTrue((ua != ua2).all())
- self.assertTrue(ua[-1, -1, -1] != ua2[-1, -1, -1])
+ assert_((ua != ua2).all())
+ assert_(ua[-1, -1, -1] != ua2[-1, -1, -1])
ua3 = ua2.newbyteorder()
# Arrays must be equal after the round-trip
assert_equal(ua, ua3)
@@ -338,8 +338,8 @@ class byteorder_values:
test2 = np.repeat(test1, 2)[::2]
for ua in (test1, test2):
ua2 = ua.astype(dtype=ua.dtype.newbyteorder())
- self.assertTrue((ua == ua2).all())
- self.assertTrue(ua[-1] == ua2[-1])
+ assert_((ua == ua2).all())
+ assert_(ua[-1] == ua2[-1])
ua3 = ua2.astype(dtype=ua.dtype)
# Arrays must be equal after the round-trip
assert_equal(ua, ua3)
@@ -353,45 +353,45 @@ class byteorder_values:
# Cast to a longer type with zero padding
longer_type = np.dtype('U%s' % (self.ulen+1)).newbyteorder()
ua2 = ua.astype(dtype=longer_type)
- self.assertTrue((ua == ua2).all())
- self.assertTrue(ua[-1] == ua2[-1])
+ assert_((ua == ua2).all())
+ assert_(ua[-1] == ua2[-1])
# Cast back again with truncating:
ua3 = ua2.astype(dtype=ua.dtype)
# Arrays must be equal after the round-trip
assert_equal(ua, ua3)
-class test_byteorder_1_ucs2(byteorder_values, TestCase):
+class TestByteorder_1_UCS2(ByteorderValues):
"""Check the byteorder in unicode (size 1, UCS2 values)"""
ulen = 1
ucs_value = ucs2_value
-class test_byteorder_1_ucs4(byteorder_values, TestCase):
+class TestByteorder_1_UCS4(ByteorderValues):
"""Check the byteorder in unicode (size 1, UCS4 values)"""
ulen = 1
ucs_value = ucs4_value
-class test_byteorder_2_ucs2(byteorder_values, TestCase):
+class TestByteorder_2_UCS2(ByteorderValues):
"""Check the byteorder in unicode (size 2, UCS2 values)"""
ulen = 2
ucs_value = ucs2_value
-class test_byteorder_2_ucs4(byteorder_values, TestCase):
+class TestByteorder_2_UCS4(ByteorderValues):
"""Check the byteorder in unicode (size 2, UCS4 values)"""
ulen = 2
ucs_value = ucs4_value
-class test_byteorder_1009_ucs2(byteorder_values, TestCase):
+class TestByteorder_1009_UCS2(ByteorderValues):
"""Check the byteorder in unicode (size 1009, UCS2 values)"""
ulen = 1009
ucs_value = ucs2_value
-class test_byteorder_1009_ucs4(byteorder_values, TestCase):
+class TestByteorder_1009_UCS4(ByteorderValues):
"""Check the byteorder in unicode (size 1009, UCS4 values)"""
ulen = 1009
ucs_value = ucs4_value
diff --git a/numpy/ctypeslib.py b/numpy/ctypeslib.py
index 73328224e..77aace249 100644
--- a/numpy/ctypeslib.py
+++ b/numpy/ctypeslib.py
@@ -283,7 +283,7 @@ def ndpointer(dtype=None, ndim=None, shape=None, flags=None):
if num is None:
try:
flags = [x.strip().upper() for x in flags]
- except:
+ except Exception:
raise TypeError("invalid flags specification")
num = _num_fromflags(flags)
try:
diff --git a/numpy/distutils/ccompiler.py b/numpy/distutils/ccompiler.py
index e7557b3e6..bbc3923bd 100644
--- a/numpy/distutils/ccompiler.py
+++ b/numpy/distutils/ccompiler.py
@@ -80,6 +80,7 @@ def _needs_build(obj, cc_args, extra_postargs, pp_opts):
return False
+
def replace_method(klass, method_name, func):
if sys.version_info[0] < 3:
m = types.MethodType(func, None, klass)
@@ -88,6 +89,25 @@ def replace_method(klass, method_name, func):
m = lambda self, *args, **kw: func(self, *args, **kw)
setattr(klass, method_name, m)
+
+######################################################################
+## Method that subclasses may redefine. But don't call this method,
+## it i private to CCompiler class and may return unexpected
+## results if used elsewhere. So, you have been warned..
+
+def CCompiler_find_executables(self):
+ """
+ Does nothing here, but is called by the get_version method and can be
+ overridden by subclasses. In particular it is redefined in the `FCompiler`
+ class where more documentation can be found.
+
+ """
+ pass
+
+
+replace_method(CCompiler, 'find_executables', CCompiler_find_executables)
+
+
# Using customized CCompiler.spawn.
def CCompiler_spawn(self, cmd, display=None):
"""
@@ -417,7 +437,7 @@ def CCompiler_show_customization(self):
log.info("compiler '%s' is set to %s" % (attrname, attr))
try:
self.get_version()
- except:
+ except Exception:
pass
if log._global_log.threshold<2:
print('*'*80)
diff --git a/numpy/distutils/command/build_clib.py b/numpy/distutils/command/build_clib.py
index 1c868cf6c..910493a77 100644
--- a/numpy/distutils/command/build_clib.py
+++ b/numpy/distutils/command/build_clib.py
@@ -7,7 +7,7 @@ from glob import glob
import shutil
from distutils.command.build_clib import build_clib as old_build_clib
from distutils.errors import DistutilsSetupError, DistutilsError, \
- DistutilsFileError
+ DistutilsFileError
from numpy.distutils import log
from distutils.dep_util import newer_group
@@ -19,9 +19,10 @@ from numpy.distutils.misc_util import filter_sources, has_f_sources,\
_l = old_build_clib.user_options
for _i in range(len(_l)):
if _l[_i][0] in ['build-clib', 'build-temp']:
- _l[_i] = (_l[_i][0]+'=',)+_l[_i][1:]
+ _l[_i] = (_l[_i][0] + '=',) + _l[_i][1:]
#
+
class build_clib(old_build_clib):
description = "build C/C++/F libraries used by Python extensions"
@@ -32,7 +33,7 @@ class build_clib(old_build_clib):
('inplace', 'i', 'Build in-place'),
('parallel=', 'j',
"number of parallel jobs"),
- ]
+ ]
boolean_options = old_build_clib.boolean_options + ['inplace']
@@ -75,7 +76,8 @@ class build_clib(old_build_clib):
for (lib_name, build_info) in self.libraries:
l = build_info.get('language', None)
- if l and l not in languages: languages.append(l)
+ if l and l not in languages:
+ languages.append(l)
from distutils.ccompiler import new_compiler
self.compiler = new_compiler(compiler=self.compiler,
@@ -94,11 +96,11 @@ class build_clib(old_build_clib):
if self.have_f_sources():
from numpy.distutils.fcompiler import new_fcompiler
self._f_compiler = new_fcompiler(compiler=self.fcompiler,
- verbose=self.verbose,
- dry_run=self.dry_run,
- force=self.force,
- requiref90='f90' in languages,
- c_compiler=self.compiler)
+ verbose=self.verbose,
+ dry_run=self.dry_run,
+ force=self.force,
+ requiref90='f90' in languages,
+ c_compiler=self.compiler)
if self._f_compiler is not None:
self._f_compiler.customize(self.distribution)
@@ -114,10 +116,10 @@ class build_clib(old_build_clib):
self.build_libraries(self.libraries)
if self.inplace:
- for l in self.distribution.installed_libraries:
+ for l in self.distribution.installed_libraries:
libname = self.compiler.library_filename(l.name)
source = os.path.join(self.build_clib, libname)
- target = os.path.join(l.target_dir, libname)
+ target = os.path.join(l.target_dir, libname)
self.mkpath(l.target_dir)
shutil.copy(source, target)
@@ -140,21 +142,25 @@ class build_clib(old_build_clib):
sources = build_info.get('sources')
if sources is None or not is_sequence(sources):
raise DistutilsSetupError(("in 'libraries' option (library '%s'), " +
- "'sources' must be present and must be " +
- "a list of source filenames") % lib_name)
+ "'sources' must be present and must be " +
+ "a list of source filenames") % lib_name)
sources = list(sources)
c_sources, cxx_sources, f_sources, fmodule_sources \
- = filter_sources(sources)
+ = filter_sources(sources)
requiref90 = not not fmodule_sources or \
- build_info.get('language', 'c')=='f90'
+ build_info.get('language', 'c') == 'f90'
# save source type information so that build_ext can use it.
source_languages = []
- if c_sources: source_languages.append('c')
- if cxx_sources: source_languages.append('c++')
- if requiref90: source_languages.append('f90')
- elif f_sources: source_languages.append('f77')
+ if c_sources:
+ source_languages.append('c')
+ if cxx_sources:
+ source_languages.append('c++')
+ if requiref90:
+ source_languages.append('f90')
+ elif f_sources:
+ source_languages.append('f77')
build_info['source_languages'] = source_languages
lib_file = compiler.library_filename(lib_name,
@@ -168,8 +174,8 @@ class build_clib(old_build_clib):
config_fc = build_info.get('config_fc', {})
if fcompiler is not None and config_fc:
- log.info('using additional config_fc from setup script '\
- 'for fortran compiler: %s' \
+ log.info('using additional config_fc from setup script '
+ 'for fortran compiler: %s'
% (config_fc,))
from numpy.distutils.fcompiler import new_fcompiler
fcompiler = new_fcompiler(compiler=fcompiler.compiler_type,
@@ -186,12 +192,14 @@ class build_clib(old_build_clib):
# check availability of Fortran compilers
if (f_sources or fmodule_sources) and fcompiler is None:
- raise DistutilsError("library %s has Fortran sources"\
- " but no Fortran compiler found" % (lib_name))
+ raise DistutilsError("library %s has Fortran sources"
+ " but no Fortran compiler found" % (lib_name))
if fcompiler is not None:
- fcompiler.extra_f77_compile_args = build_info.get('extra_f77_compile_args') or []
- fcompiler.extra_f90_compile_args = build_info.get('extra_f90_compile_args') or []
+ fcompiler.extra_f77_compile_args = build_info.get(
+ 'extra_f77_compile_args') or []
+ fcompiler.extra_f90_compile_args = build_info.get(
+ 'extra_f90_compile_args') or []
macros = build_info.get('macros')
include_dirs = build_info.get('include_dirs')
@@ -203,9 +211,10 @@ class build_clib(old_build_clib):
# where compiled F90 module files are:
module_dirs = build_info.get('module_dirs') or []
module_build_dir = os.path.dirname(lib_file)
- if requiref90: self.mkpath(module_build_dir)
+ if requiref90:
+ self.mkpath(module_build_dir)
- if compiler.compiler_type=='msvc':
+ if compiler.compiler_type == 'msvc':
# this hack works around the msvc compiler attributes
# problem, msvc uses its own convention :(
c_sources += cxx_sources
@@ -239,7 +248,7 @@ class build_clib(old_build_clib):
if requiref90:
if fcompiler.module_dir_switch is None:
existing_modules = glob('*.mod')
- extra_postargs += fcompiler.module_options(\
+ extra_postargs += fcompiler.module_options(
module_dirs, module_build_dir)
if fmodule_sources:
@@ -257,14 +266,14 @@ class build_clib(old_build_clib):
if f in existing_modules:
continue
t = os.path.join(module_build_dir, f)
- if os.path.abspath(f)==os.path.abspath(t):
+ if os.path.abspath(f) == os.path.abspath(t):
continue
if os.path.isfile(t):
os.remove(t)
try:
self.move_file(f, module_build_dir)
except DistutilsFileError:
- log.warn('failed to move %r to %r' \
+ log.warn('failed to move %r to %r'
% (f, module_build_dir))
if f_sources:
@@ -278,13 +287,32 @@ class build_clib(old_build_clib):
else:
f_objects = []
- objects.extend(f_objects)
-
- # assume that default linker is suitable for
- # linking Fortran object files
- compiler.create_static_lib(objects, lib_name,
- output_dir=self.build_clib,
- debug=self.debug)
+ if f_objects and not fcompiler.can_ccompiler_link(compiler):
+ # Default linker cannot link Fortran object files, and results
+ # need to be wrapped later. Instead of creating a real static
+ # library, just keep track of the object files.
+ listfn = os.path.join(self.build_clib,
+ lib_name + '.fobjects')
+ with open(listfn, 'w') as f:
+ f.write("\n".join(os.path.abspath(obj) for obj in f_objects))
+
+ listfn = os.path.join(self.build_clib,
+ lib_name + '.cobjects')
+ with open(listfn, 'w') as f:
+ f.write("\n".join(os.path.abspath(obj) for obj in objects))
+
+ # create empty "library" file for dependency tracking
+ lib_fname = os.path.join(self.build_clib,
+ lib_name + compiler.static_lib_extension)
+ with open(lib_fname, 'wb') as f:
+ pass
+ else:
+ # assume that default linker is suitable for
+ # linking Fortran object files
+ objects.extend(f_objects)
+ compiler.create_static_lib(objects, lib_name,
+ output_dir=self.build_clib,
+ debug=self.debug)
# fix library dependencies
clib_libraries = build_info.get('libraries', [])
diff --git a/numpy/distutils/command/build_ext.py b/numpy/distutils/command/build_ext.py
index 0fa52a281..d935a3303 100644
--- a/numpy/distutils/command/build_ext.py
+++ b/numpy/distutils/command/build_ext.py
@@ -5,27 +5,25 @@ from __future__ import division, absolute_import, print_function
import os
import sys
+import shutil
from glob import glob
from distutils.dep_util import newer_group
from distutils.command.build_ext import build_ext as old_build_ext
from distutils.errors import DistutilsFileError, DistutilsSetupError,\
- DistutilsError
+ DistutilsError
from distutils.file_util import copy_file
from numpy.distutils import log
from numpy.distutils.exec_command import exec_command
-from numpy.distutils.system_info import combine_paths
+from numpy.distutils.system_info import combine_paths, system_info
from numpy.distutils.misc_util import filter_sources, has_f_sources, \
- has_cxx_sources, get_ext_source_files, \
- get_numpy_include_dirs, is_sequence, get_build_architecture, \
- msvc_version
+ has_cxx_sources, get_ext_source_files, \
+ get_numpy_include_dirs, is_sequence, get_build_architecture, \
+ msvc_version
from numpy.distutils.command.config_compiler import show_fortran_compilers
-try:
- set
-except NameError:
- from sets import Set as set
+
class build_ext (old_build_ext):
@@ -36,12 +34,12 @@ class build_ext (old_build_ext):
"specify the Fortran compiler type"),
('parallel=', 'j',
"number of parallel jobs"),
- ]
+ ]
help_options = old_build_ext.help_options + [
('help-fcompiler', None, "list available Fortran compilers",
show_fortran_compilers),
- ]
+ ]
def initialize_options(self):
old_build_ext.initialize_options(self)
@@ -84,11 +82,13 @@ class build_ext (old_build_ext):
if self.distribution.has_c_libraries():
if self.inplace:
if self.distribution.have_run.get('build_clib'):
- log.warn('build_clib already run, it is too late to ' \
- 'ensure in-place build of build_clib')
- build_clib = self.distribution.get_command_obj('build_clib')
+ log.warn('build_clib already run, it is too late to '
+ 'ensure in-place build of build_clib')
+ build_clib = self.distribution.get_command_obj(
+ 'build_clib')
else:
- build_clib = self.distribution.get_command_obj('build_clib')
+ build_clib = self.distribution.get_command_obj(
+ 'build_clib')
build_clib.inplace = 1
build_clib.ensure_finalized()
build_clib.run()
@@ -119,13 +119,18 @@ class build_ext (old_build_ext):
self.compiler.customize_cmd(self)
self.compiler.show_customization()
+ # Setup directory for storing generated extra DLL files on Windows
+ self.extra_dll_dir = os.path.join(self.build_temp, 'extra-dll')
+ if not os.path.isdir(self.extra_dll_dir):
+ os.makedirs(self.extra_dll_dir)
+
# Create mapping of libraries built by build_clib:
clibs = {}
if build_clib is not None:
for libname, build_info in build_clib.libraries or []:
if libname in clibs and clibs[libname] != build_info:
- log.warn('library %r defined more than once,'\
- ' overwriting build_info\n%s... \nwith\n%s...' \
+ log.warn('library %r defined more than once,'
+ ' overwriting build_info\n%s... \nwith\n%s...'
% (libname, repr(clibs[libname])[:300], repr(build_info)[:300]))
clibs[libname] = build_info
# .. and distribution libraries:
@@ -181,7 +186,7 @@ class build_ext (old_build_ext):
elif 'f77' in ext_languages:
ext_language = 'f77'
else:
- ext_language = 'c' # default
+ ext_language = 'c' # default
if l and l != ext_language and ext.language:
log.warn('resetting extension %r language from %r to %r.' %
(ext.name, l, ext_language))
@@ -196,9 +201,9 @@ class build_ext (old_build_ext):
# Initialize C++ compiler:
if need_cxx_compiler:
self._cxx_compiler = new_compiler(compiler=compiler_type,
- verbose=self.verbose,
- dry_run=self.dry_run,
- force=self.force)
+ verbose=self.verbose,
+ dry_run=self.dry_run,
+ force=self.force)
compiler = self._cxx_compiler
compiler.customize(self.distribution, need_cxx=need_cxx_compiler)
compiler.customize_cmd(self)
@@ -238,7 +243,7 @@ class build_ext (old_build_ext):
dry_run=self.dry_run,
force=self.force,
requiref90=True,
- c_compiler = self.compiler)
+ c_compiler=self.compiler)
fcompiler = self._f90_compiler
if fcompiler:
ctype = fcompiler.compiler_type
@@ -256,6 +261,16 @@ class build_ext (old_build_ext):
# Build extensions
self.build_extensions()
+ # Copy over any extra DLL files
+ runtime_lib_dir = os.path.join(
+ self.build_lib, self.distribution.get_name(), 'extra-dll')
+ for fn in os.listdir(self.extra_dll_dir):
+ if not fn.lower().endswith('.dll'):
+ continue
+ if not os.path.isdir(runtime_lib_dir):
+ os.makedirs(runtime_lib_dir)
+ runtime_lib = os.path.join(self.extra_dll_dir, fn)
+ copy_file(runtime_lib, runtime_lib_dir)
def swig_sources(self, sources):
# Do nothing. Swig sources have beed handled in build_src command.
@@ -299,11 +314,9 @@ class build_ext (old_build_ext):
macros.append((undef,))
c_sources, cxx_sources, f_sources, fmodule_sources = \
- filter_sources(ext.sources)
+ filter_sources(ext.sources)
-
-
- if self.compiler.compiler_type=='msvc':
+ if self.compiler.compiler_type == 'msvc':
if cxx_sources:
# Needed to compile kiva.agg._agg extension.
extra_args.append('/Zm1000')
@@ -313,32 +326,34 @@ class build_ext (old_build_ext):
cxx_sources = []
# Set Fortran/C++ compilers for compilation and linking.
- if ext.language=='f90':
+ if ext.language == 'f90':
fcompiler = self._f90_compiler
- elif ext.language=='f77':
+ elif ext.language == 'f77':
fcompiler = self._f77_compiler
- else: # in case ext.language is c++, for instance
+ else: # in case ext.language is c++, for instance
fcompiler = self._f90_compiler or self._f77_compiler
if fcompiler is not None:
- fcompiler.extra_f77_compile_args = (ext.extra_f77_compile_args or []) if hasattr(ext, 'extra_f77_compile_args') else []
- fcompiler.extra_f90_compile_args = (ext.extra_f90_compile_args or []) if hasattr(ext, 'extra_f90_compile_args') else []
+ fcompiler.extra_f77_compile_args = (ext.extra_f77_compile_args or []) if hasattr(
+ ext, 'extra_f77_compile_args') else []
+ fcompiler.extra_f90_compile_args = (ext.extra_f90_compile_args or []) if hasattr(
+ ext, 'extra_f90_compile_args') else []
cxx_compiler = self._cxx_compiler
# check for the availability of required compilers
if cxx_sources and cxx_compiler is None:
- raise DistutilsError("extension %r has C++ sources" \
- "but no C++ compiler found" % (ext.name))
+ raise DistutilsError("extension %r has C++ sources"
+ "but no C++ compiler found" % (ext.name))
if (f_sources or fmodule_sources) and fcompiler is None:
- raise DistutilsError("extension %r has Fortran sources " \
- "but no Fortran compiler found" % (ext.name))
+ raise DistutilsError("extension %r has Fortran sources "
+ "but no Fortran compiler found" % (ext.name))
if ext.language in ['f77', 'f90'] and fcompiler is None:
- self.warn("extension %r has Fortran libraries " \
- "but no Fortran linker found, using default linker" % (ext.name))
- if ext.language=='c++' and cxx_compiler is None:
- self.warn("extension %r has C++ libraries " \
- "but no C++ linker found, using default linker" % (ext.name))
+ self.warn("extension %r has Fortran libraries "
+ "but no Fortran linker found, using default linker" % (ext.name))
+ if ext.language == 'c++' and cxx_compiler is None:
+ self.warn("extension %r has C++ libraries "
+ "but no C++ linker found, using default linker" % (ext.name))
- kws = {'depends':ext.depends}
+ kws = {'depends': ext.depends}
output_dir = self.build_temp
include_dirs = ext.include_dirs + get_numpy_include_dirs()
@@ -391,7 +406,7 @@ class build_ext (old_build_ext):
if f in existing_modules:
continue
t = os.path.join(module_build_dir, f)
- if os.path.abspath(f)==os.path.abspath(t):
+ if os.path.abspath(f) == os.path.abspath(t):
continue
if os.path.isfile(t):
os.remove(t)
@@ -410,7 +425,12 @@ class build_ext (old_build_ext):
extra_postargs=extra_postargs,
depends=ext.depends)
- objects = c_objects + f_objects
+ if f_objects and not fcompiler.can_ccompiler_link(self.compiler):
+ unlinkable_fobjects = f_objects
+ objects = c_objects
+ else:
+ unlinkable_fobjects = []
+ objects = c_objects + f_objects
if ext.extra_objects:
objects.extend(ext.extra_objects)
@@ -423,13 +443,20 @@ class build_ext (old_build_ext):
if self.compiler.compiler_type in ('msvc', 'intelw', 'intelemw'):
# expand libraries with fcompiler libraries as we are
# not using fcompiler linker
- self._libs_with_msvc_and_fortran(fcompiler, libraries, library_dirs)
+ self._libs_with_msvc_and_fortran(
+ fcompiler, libraries, library_dirs)
elif ext.language in ['f77', 'f90'] and fcompiler is not None:
linker = fcompiler.link_shared_object
- if ext.language=='c++' and cxx_compiler is not None:
+ if ext.language == 'c++' and cxx_compiler is not None:
linker = cxx_compiler.link_shared_object
+ if fcompiler is not None:
+ objects, libraries = self._process_unlinkable_fobjects(
+ objects, libraries,
+ fcompiler, library_dirs,
+ unlinkable_fobjects)
+
linker(objects, ext_filename,
libraries=libraries,
library_dirs=library_dirs,
@@ -444,23 +471,59 @@ class build_ext (old_build_ext):
build_src = self.get_finalized_command("build_src").build_src
build_clib = self.get_finalized_command("build_clib").build_clib
objects = self.compiler.compile([os.path.join(build_src,
- "gfortran_vs2003_hack.c")],
- output_dir=self.build_temp)
- self.compiler.create_static_lib(objects, "_gfortran_workaround", output_dir=build_clib, debug=self.debug)
+ "gfortran_vs2003_hack.c")],
+ output_dir=self.build_temp)
+ self.compiler.create_static_lib(
+ objects, "_gfortran_workaround", output_dir=build_clib, debug=self.debug)
+
+ def _process_unlinkable_fobjects(self, objects, libraries,
+ fcompiler, library_dirs,
+ unlinkable_fobjects):
+ libraries = list(libraries)
+ objects = list(objects)
+ unlinkable_fobjects = list(unlinkable_fobjects)
+
+ # Expand possible fake static libraries to objects
+ for lib in list(libraries):
+ for libdir in library_dirs:
+ fake_lib = os.path.join(libdir, lib + '.fobjects')
+ if os.path.isfile(fake_lib):
+ # Replace fake static library
+ libraries.remove(lib)
+ with open(fake_lib, 'r') as f:
+ unlinkable_fobjects.extend(f.read().splitlines())
+
+ # Expand C objects
+ c_lib = os.path.join(libdir, lib + '.cobjects')
+ with open(c_lib, 'r') as f:
+ objects.extend(f.read().splitlines())
+
+ # Wrap unlinkable objects to a linkable one
+ if unlinkable_fobjects:
+ fobjects = [os.path.relpath(obj) for obj in unlinkable_fobjects]
+ wrapped = fcompiler.wrap_unlinkable_objects(
+ fobjects, output_dir=self.build_temp,
+ extra_dll_dir=self.extra_dll_dir)
+ objects.extend(wrapped)
+
+ return objects, libraries
def _libs_with_msvc_and_fortran(self, fcompiler, c_libraries,
c_library_dirs):
- if fcompiler is None: return
+ if fcompiler is None:
+ return
for libname in c_libraries:
- if libname.startswith('msvc'): continue
+ if libname.startswith('msvc'):
+ continue
fileexists = False
for libdir in c_library_dirs or []:
libfile = os.path.join(libdir, '%s.lib' % (libname))
if os.path.isfile(libfile):
fileexists = True
break
- if fileexists: continue
+ if fileexists:
+ continue
# make g77-compiled static libs available to MSVC
fileexists = False
for libdir in c_library_dirs:
@@ -474,7 +537,8 @@ class build_ext (old_build_ext):
c_library_dirs.append(self.build_temp)
fileexists = True
break
- if fileexists: continue
+ if fileexists:
+ continue
log.warn('could not find library %r in directories %s'
% (libname, c_library_dirs))
@@ -502,14 +566,14 @@ class build_ext (old_build_ext):
if self.build_temp not in c_library_dirs:
c_library_dirs.append(self.build_temp)
- def get_source_files (self):
+ def get_source_files(self):
self.check_extensions_list(self.extensions)
filenames = []
for ext in self.extensions:
filenames.extend(get_ext_source_files(ext))
return filenames
- def get_outputs (self):
+ def get_outputs(self):
self.check_extensions_list(self.extensions)
outputs = []
diff --git a/numpy/distutils/command/config.py b/numpy/distutils/command/config.py
index e43fb631b..a7368a7ae 100644
--- a/numpy/distutils/command/config.py
+++ b/numpy/distutils/command/config.py
@@ -441,7 +441,7 @@ int main (void)
src, obj, exe = self._link(body, headers, include_dirs,
libraries, library_dirs, lang)
grabber.restore()
- except:
+ except Exception:
output = grabber.data
grabber.restore()
raise
diff --git a/numpy/distutils/cpuinfo.py b/numpy/distutils/cpuinfo.py
index 652826376..580299347 100644
--- a/numpy/distutils/cpuinfo.py
+++ b/numpy/distutils/cpuinfo.py
@@ -35,7 +35,7 @@ def getoutput(cmd, successful_status=(0,), stacklevel=1):
except EnvironmentError:
e = get_exception()
warnings.warn(str(e), UserWarning, stacklevel=stacklevel)
- return False, output
+ return False, ""
if os.WIFEXITED(status) and os.WEXITSTATUS(status) in successful_status:
return True, output
return False, output
@@ -75,7 +75,7 @@ class CPUInfoBase(object):
def _try_call(self, func):
try:
return func()
- except:
+ except Exception:
pass
def __getattr__(self, name):
@@ -336,7 +336,7 @@ class IRIXCPUInfo(CPUInfoBase):
def get_ip(self):
try: return self.info.get('MACHINE')
- except: pass
+ except Exception: pass
def __machine(self, n):
return self.info.get('MACHINE').lower() == 'ip%s' % (n)
def _is_IP19(self): return self.__machine(19)
@@ -523,7 +523,7 @@ class Win32CPUInfo(CPUInfoBase):
info[-1]["Family"]=int(srch.group("FML"))
info[-1]["Model"]=int(srch.group("MDL"))
info[-1]["Stepping"]=int(srch.group("STP"))
- except:
+ except Exception:
print(sys.exc_info()[1], '(ignoring)')
self.__class__.info = info
diff --git a/numpy/distutils/fcompiler/__init__.py b/numpy/distutils/fcompiler/__init__.py
index 8e11019cf..1d558319d 100644
--- a/numpy/distutils/fcompiler/__init__.py
+++ b/numpy/distutils/fcompiler/__init__.py
@@ -22,10 +22,6 @@ import os
import sys
import re
import types
-try:
- set
-except NameError:
- from sets import Set as set
from numpy.compat import open_latin1
@@ -434,6 +430,7 @@ class FCompiler(CCompiler):
raise CompilerNotFound()
return version
+
############################################################
## Public methods:
@@ -701,6 +698,38 @@ class FCompiler(CCompiler):
else:
return hook_name()
+ def can_ccompiler_link(self, ccompiler):
+ """
+ Check if the given C compiler can link objects produced by
+ this compiler.
+ """
+ return True
+
+ def wrap_unlinkable_objects(self, objects, output_dir, extra_dll_dir):
+ """
+ Convert a set of object files that are not compatible with the default
+ linker, to a file that is compatible.
+
+ Parameters
+ ----------
+ objects : list
+ List of object files to include.
+ output_dir : str
+ Output directory to place generated object files.
+ extra_dll_dir : str
+ Output directory to place extra DLL files that need to be
+ included on Windows.
+
+ Returns
+ -------
+ converted_objects : list of str
+ List of converted object files.
+ Note that the number of output files is not necessarily
+ the same as inputs.
+
+ """
+ raise NotImplementedError()
+
## class FCompiler
_default_compilers = (
diff --git a/numpy/distutils/fcompiler/gnu.py b/numpy/distutils/fcompiler/gnu.py
index 4649fd743..10c60dc6f 100644
--- a/numpy/distutils/fcompiler/gnu.py
+++ b/numpy/distutils/fcompiler/gnu.py
@@ -6,37 +6,43 @@ import sys
import warnings
import platform
import tempfile
+import hashlib
+import base64
from subprocess import Popen, PIPE, STDOUT
-
+from copy import copy
from numpy.distutils.fcompiler import FCompiler
from numpy.distutils.exec_command import exec_command
-from numpy.distutils.misc_util import msvc_runtime_library
from numpy.distutils.compat import get_exception
+from numpy.distutils.system_info import system_info
compilers = ['GnuFCompiler', 'Gnu95FCompiler']
TARGET_R = re.compile(r"Target: ([a-zA-Z0-9_\-]*)")
# XXX: handle cross compilation
+
+
def is_win64():
return sys.platform == "win32" and platform.architecture()[0] == "64bit"
+
if is_win64():
#_EXTRAFLAGS = ["-fno-leading-underscore"]
_EXTRAFLAGS = []
else:
_EXTRAFLAGS = []
+
class GnuFCompiler(FCompiler):
compiler_type = 'gnu'
- compiler_aliases = ('g77',)
+ compiler_aliases = ('g77', )
description = 'GNU Fortran 77 compiler'
def gnu_version_match(self, version_string):
"""Handle the different versions of GNU fortran compilers"""
# Strip warning(s) that may be emitted by gfortran
while version_string.startswith('gfortran: warning'):
- version_string = version_string[version_string.find('\n')+1:]
+ version_string = version_string[version_string.find('\n') + 1:]
# Gfortran versions from after 2010 will output a simple string
# (usually "x.y", "x.y.z" or "x.y.z-q") for ``-dumpversion``; older
@@ -60,7 +66,8 @@ class GnuFCompiler(FCompiler):
m = re.search(r'GNU Fortran\s+95.*?([0-9-.]+)', version_string)
if m:
return ('gfortran', m.group(1))
- m = re.search(r'GNU Fortran.*?\-?([0-9-.]+)', version_string)
+ m = re.search(
+ r'GNU Fortran.*?\-?([0-9-.]+\.[0-9-.]+)', version_string)
if m:
v = m.group(1)
if v.startswith('0') or v.startswith('2') or v.startswith('3'):
@@ -91,7 +98,7 @@ class GnuFCompiler(FCompiler):
'archiver' : ["ar", "-cr"],
'ranlib' : ["ranlib"],
'linker_exe' : [None, "-g", "-Wall"]
- }
+ }
module_dir_switch = None
module_include_switch = None
@@ -129,8 +136,8 @@ class GnuFCompiler(FCompiler):
try:
get_makefile_filename = sc.get_makefile_filename
except AttributeError:
- pass # i.e. PyPy
- else:
+ pass # i.e. PyPy
+ else:
filename = get_makefile_filename()
sc.parse_makefile(filename, g)
target = g.get('MACOSX_DEPLOYMENT_TARGET', '10.3')
@@ -153,9 +160,8 @@ class GnuFCompiler(FCompiler):
return opt
def get_libgcc_dir(self):
- status, output = exec_command(self.compiler_f77 +
- ['-print-libgcc-file-name'],
- use_tee=0)
+ status, output = exec_command(
+ self.compiler_f77 + ['-print-libgcc-file-name'], use_tee=0)
if not status:
return os.path.dirname(output)
return None
@@ -170,7 +176,7 @@ class GnuFCompiler(FCompiler):
d = os.path.normpath(d)
path = os.path.join(d, "lib%s.a" % self.g2c)
if not os.path.exists(path):
- root = os.path.join(d, *((os.pardir,)*4))
+ root = os.path.join(d, *((os.pardir, ) * 4))
d2 = os.path.abspath(os.path.join(root, 'lib'))
path = os.path.join(d2, "lib%s.a" % self.g2c)
if os.path.exists(path):
@@ -193,13 +199,8 @@ class GnuFCompiler(FCompiler):
opt.append(g2c)
c_compiler = self.c_compiler
if sys.platform == 'win32' and c_compiler and \
- c_compiler.compiler_type == 'msvc':
- # the following code is not needed (read: breaks) when using MinGW
- # in case want to link F77 compiled code with MSVC
+ c_compiler.compiler_type == 'msvc':
opt.append('gcc')
- runtime_lib = msvc_runtime_library()
- if runtime_lib:
- opt.append(runtime_lib)
if sys.platform == 'darwin':
opt.append('cc_dynamic')
return opt
@@ -241,7 +242,7 @@ class GnuFCompiler(FCompiler):
class Gnu95FCompiler(GnuFCompiler):
compiler_type = 'gnu95'
- compiler_aliases = ('gfortran',)
+ compiler_aliases = ('gfortran', )
description = 'GNU Fortran 95 compiler'
def version_match(self, version_string):
@@ -256,8 +257,10 @@ class Gnu95FCompiler(GnuFCompiler):
# use -mno-cygwin flag for gfortran when Python is not
# Cygwin-Python
if sys.platform == 'win32':
- for key in ['version_cmd', 'compiler_f77', 'compiler_f90',
- 'compiler_fix', 'linker_so', 'linker_exe']:
+ for key in [
+ 'version_cmd', 'compiler_f77', 'compiler_f90',
+ 'compiler_fix', 'linker_so', 'linker_exe'
+ ]:
self.executables[key].append('-mno-cygwin')
return v
@@ -274,7 +277,7 @@ class Gnu95FCompiler(GnuFCompiler):
'archiver' : ["ar", "-cr"],
'ranlib' : ["ranlib"],
'linker_exe' : [None, "-Wall"]
- }
+ }
module_dir_switch = '-J'
module_include_switch = '-I'
@@ -319,7 +322,7 @@ class Gnu95FCompiler(GnuFCompiler):
target = self.get_target()
if target:
d = os.path.normpath(self.get_libgcc_dir())
- root = os.path.join(d, *((os.pardir,)*4))
+ root = os.path.join(d, *((os.pardir, ) * 4))
path = os.path.join(root, "lib")
mingwdir = os.path.normpath(path)
if os.path.exists(os.path.join(mingwdir, "libmingwex.a")):
@@ -335,32 +338,148 @@ class Gnu95FCompiler(GnuFCompiler):
if c_compiler and c_compiler.compiler_type == "msvc":
if "gcc" in opt:
i = opt.index("gcc")
- opt.insert(i+1, "mingwex")
- opt.insert(i+1, "mingw32")
- # XXX: fix this mess, does not work for mingw
- if is_win64():
- c_compiler = self.c_compiler
- if c_compiler and c_compiler.compiler_type == "msvc":
- return []
- else:
- pass
+ opt.insert(i + 1, "mingwex")
+ opt.insert(i + 1, "mingw32")
+ c_compiler = self.c_compiler
+ if c_compiler and c_compiler.compiler_type == "msvc":
+ return []
+ else:
+ pass
return opt
def get_target(self):
- status, output = exec_command(self.compiler_f77 +
- ['-v'],
- use_tee=0)
+ status, output = exec_command(self.compiler_f77 + ['-v'], use_tee=0)
if not status:
m = TARGET_R.search(output)
if m:
return m.group(1)
return ""
- def get_flags_opt(self):
+ def _hash_files(self, filenames):
+ h = hashlib.sha1()
+ for fn in filenames:
+ with open(fn, 'rb') as f:
+ while True:
+ block = f.read(131072)
+ if not block:
+ break
+ h.update(block)
+ text = base64.b32encode(h.digest())
+ if sys.version_info[0] >= 3:
+ text = text.decode('ascii')
+ return text.rstrip('=')
+
+ def _link_wrapper_lib(self, objects, output_dir, extra_dll_dir,
+ chained_dlls, is_archive):
+ """Create a wrapper shared library for the given objects
+
+ Return an MSVC-compatible lib
+ """
+
+ c_compiler = self.c_compiler
+ if c_compiler.compiler_type != "msvc":
+ raise ValueError("This method only supports MSVC")
+
+ object_hash = self._hash_files(list(objects) + list(chained_dlls))
+
+ if is_win64():
+ tag = 'win_amd64'
+ else:
+ tag = 'win32'
+
+ basename = 'lib' + os.path.splitext(
+ os.path.basename(objects[0]))[0][:8]
+ root_name = basename + '.' + object_hash + '.gfortran-' + tag
+ dll_name = root_name + '.dll'
+ def_name = root_name + '.def'
+ lib_name = root_name + '.lib'
+ dll_path = os.path.join(extra_dll_dir, dll_name)
+ def_path = os.path.join(output_dir, def_name)
+ lib_path = os.path.join(output_dir, lib_name)
+
+ if os.path.isfile(lib_path):
+ # Nothing to do
+ return lib_path, dll_path
+
+ if is_archive:
+ objects = (["-Wl,--whole-archive"] + list(objects) +
+ ["-Wl,--no-whole-archive"])
+ self.link_shared_object(
+ objects,
+ dll_name,
+ output_dir=extra_dll_dir,
+ extra_postargs=list(chained_dlls) + [
+ '-Wl,--allow-multiple-definition',
+ '-Wl,--output-def,' + def_path,
+ '-Wl,--export-all-symbols',
+ '-Wl,--enable-auto-import',
+ '-static',
+ '-mlong-double-64',
+ ])
+
+ # No PowerPC!
if is_win64():
- return ['-O0']
+ specifier = '/MACHINE:X64'
+ else:
+ specifier = '/MACHINE:X86'
+
+ # MSVC specific code
+ lib_args = ['/def:' + def_path, '/OUT:' + lib_path, specifier]
+ if not c_compiler.initialized:
+ c_compiler.initialize()
+ c_compiler.spawn([c_compiler.lib] + lib_args)
+
+ return lib_path, dll_path
+
+ def can_ccompiler_link(self, compiler):
+ # MSVC cannot link objects compiled by GNU fortran
+ return compiler.compiler_type not in ("msvc", )
+
+ def wrap_unlinkable_objects(self, objects, output_dir, extra_dll_dir):
+ """
+ Convert a set of object files that are not compatible with the default
+ linker, to a file that is compatible.
+ """
+ if self.c_compiler.compiler_type == "msvc":
+ # Compile a DLL and return the lib for the DLL as
+ # the object. Also keep track of previous DLLs that
+ # we have compiled so that we can link against them.
+
+ # If there are .a archives, assume they are self-contained
+ # static libraries, and build separate DLLs for each
+ archives = []
+ plain_objects = []
+ for obj in objects:
+ if obj.lower().endswith('.a'):
+ archives.append(obj)
+ else:
+ plain_objects.append(obj)
+
+ chained_libs = []
+ chained_dlls = []
+ for archive in archives[::-1]:
+ lib, dll = self._link_wrapper_lib(
+ [archive],
+ output_dir,
+ extra_dll_dir,
+ chained_dlls=chained_dlls,
+ is_archive=True)
+ chained_libs.insert(0, lib)
+ chained_dlls.insert(0, dll)
+
+ if not plain_objects:
+ return chained_libs
+
+ lib, dll = self._link_wrapper_lib(
+ plain_objects,
+ output_dir,
+ extra_dll_dir,
+ chained_dlls=chained_dlls,
+ is_archive=False)
+ return [lib] + chained_libs
else:
- return GnuFCompiler.get_flags_opt(self)
+ raise ValueError("Unsupported C compiler")
+
def _can_target(cmd, arch):
"""Return true if the architecture supports the -arch flag"""
@@ -382,6 +501,7 @@ def _can_target(cmd, arch):
os.remove(filename)
return False
+
if __name__ == '__main__':
from distutils import log
log.set_verbosity(2)
diff --git a/numpy/distutils/fcompiler/intel.py b/numpy/distutils/fcompiler/intel.py
index eb6150201..e3b922edc 100644
--- a/numpy/distutils/fcompiler/intel.py
+++ b/numpy/distutils/fcompiler/intel.py
@@ -57,8 +57,8 @@ class IntelFCompiler(BaseIntelFCompiler):
def get_flags_opt(self): # Scipy test failures with -O2
v = self.get_version()
- mpopt = 'openmp' if v and int(v.split('.')[0]) < 15 else 'qopenmp'
- return ['-xhost -fp-model strict -O1 -{}'.format(mpopt)]
+ mpopt = 'openmp' if v and v < '15' else 'qopenmp'
+ return ['-fp-model strict -O1 -{}'.format(mpopt)]
def get_flags_arch(self):
return []
@@ -123,7 +123,7 @@ class IntelEM64TFCompiler(IntelFCompiler):
def get_flags_opt(self): # Scipy test failures with -O2
v = self.get_version()
- mpopt = 'openmp' if v and int(v.split('.')[0]) < 15 else 'qopenmp'
+ mpopt = 'openmp' if v and v < '15' else 'qopenmp'
return ['-fp-model strict -O1 -{}'.format(mpopt)]
def get_flags_arch(self):
diff --git a/numpy/distutils/intelccompiler.py b/numpy/distutils/intelccompiler.py
index 3b7756b59..3386775ee 100644
--- a/numpy/distutils/intelccompiler.py
+++ b/numpy/distutils/intelccompiler.py
@@ -19,7 +19,7 @@ class IntelCCompiler(UnixCCompiler):
UnixCCompiler.__init__(self, verbose, dry_run, force)
v = self.get_version()
- mpopt = 'openmp' if v and int(v.split('.')[0]) < 15 else 'qopenmp'
+ mpopt = 'openmp' if v and v < '15' else 'qopenmp'
self.cc_exe = ('icc -fPIC -fp-model strict -O3 '
'-fomit-frame-pointer -{}').format(mpopt)
compiler = self.cc_exe
@@ -59,7 +59,7 @@ class IntelEM64TCCompiler(UnixCCompiler):
UnixCCompiler.__init__(self, verbose, dry_run, force)
v = self.get_version()
- mpopt = 'openmp' if v and int(v.split('.')[0]) < 15 else 'qopenmp'
+ mpopt = 'openmp' if v and v < '15' else 'qopenmp'
self.cc_exe = ('icc -m64 -fPIC -fp-model strict -O3 '
'-fomit-frame-pointer -{}').format(mpopt)
compiler = self.cc_exe
diff --git a/numpy/distutils/mingw32ccompiler.py b/numpy/distutils/mingw32ccompiler.py
index 870df0693..90b4def04 100644
--- a/numpy/distutils/mingw32ccompiler.py
+++ b/numpy/distutils/mingw32ccompiler.py
@@ -251,18 +251,21 @@ def find_python_dll():
# We can't do much here:
# - find it in the virtualenv (sys.prefix)
# - find it in python main dir (sys.base_prefix, if in a virtualenv)
+ # - sys.real_prefix is main dir for virtualenvs in Python 2.7
# - in system32,
# - ortherwise (Sxs), I don't know how to get it.
stems = [sys.prefix]
- if sys.base_prefix != sys.prefix:
+ if hasattr(sys, 'base_prefix') and sys.base_prefix != sys.prefix:
stems.append(sys.base_prefix)
+ elif hasattr(sys, 'real_prefix') and sys.real_prefix != sys.prefix:
+ stems.append(sys.real_prefix)
sub_dirs = ['', 'lib', 'bin']
# generate possible combinations of directory trees and sub-directories
lib_dirs = []
for stem in stems:
for folder in sub_dirs:
- lib_dirs = os.path.join(stem, folder)
+ lib_dirs.append(os.path.join(stem, folder))
# add system directory as well
if 'SYSTEMROOT' in os.environ:
@@ -426,8 +429,10 @@ def _check_for_import_lib():
# directory trees that may contain the library
stems = [sys.prefix]
- if sys.base_prefix != sys.prefix:
+ if hasattr(sys, 'base_prefix') and sys.base_prefix != sys.prefix:
stems.append(sys.base_prefix)
+ elif hasattr(sys, 'real_prefix') and sys.real_prefix != sys.prefix:
+ stems.append(sys.real_prefix)
# possible subdirectories within those trees where it is placed
sub_dirs = ['libs', 'lib']
@@ -481,8 +486,15 @@ def _build_import_library_x86():
lib_file = os.path.join(sys.prefix, 'libs', lib_name)
if not os.path.isfile(lib_file):
# didn't find library file in virtualenv, try base distribution, too,
- # and use that instead if found there
- base_lib = os.path.join(sys.base_prefix, 'libs', lib_name)
+ # and use that instead if found there. for Python 2.7 venvs, the base
+ # directory is in attribute real_prefix instead of base_prefix.
+ if hasattr(sys, 'base_prefix'):
+ base_lib = os.path.join(sys.base_prefix, 'libs', lib_name)
+ elif hasattr(sys, 'real_prefix'):
+ base_lib = os.path.join(sys.real_prefix, 'libs', lib_name)
+ else:
+ base_lib = '' # os.path.isfile('') == False
+
if os.path.isfile(base_lib):
lib_file = base_lib
else:
diff --git a/numpy/distutils/misc_util.py b/numpy/distutils/misc_util.py
index 21aaece70..102af874f 100644
--- a/numpy/distutils/misc_util.py
+++ b/numpy/distutils/misc_util.py
@@ -33,11 +33,6 @@ def clean_up_temporary_directory():
atexit.register(clean_up_temporary_directory)
-try:
- set
-except NameError:
- from sets import Set as set
-
from numpy.distutils.compat import get_exception
from numpy.compat import basestring
from numpy.compat import npy_load_module
@@ -461,7 +456,7 @@ def is_sequence(seq):
return False
try:
len(seq)
- except:
+ except Exception:
return False
return True
@@ -1064,24 +1059,25 @@ class Configuration(object):
Notes
-----
- Rules for installation paths:
- foo/bar -> (foo/bar, foo/bar) -> parent/foo/bar
- (gun, foo/bar) -> parent/gun
- foo/* -> (foo/a, foo/a), (foo/b, foo/b) -> parent/foo/a, parent/foo/b
- (gun, foo/*) -> (gun, foo/a), (gun, foo/b) -> gun
- (gun/*, foo/*) -> parent/gun/a, parent/gun/b
- /foo/bar -> (bar, /foo/bar) -> parent/bar
- (gun, /foo/bar) -> parent/gun
- (fun/*/gun/*, sun/foo/bar) -> parent/fun/foo/gun/bar
+ Rules for installation paths::
+
+ foo/bar -> (foo/bar, foo/bar) -> parent/foo/bar
+ (gun, foo/bar) -> parent/gun
+ foo/* -> (foo/a, foo/a), (foo/b, foo/b) -> parent/foo/a, parent/foo/b
+ (gun, foo/*) -> (gun, foo/a), (gun, foo/b) -> gun
+ (gun/*, foo/*) -> parent/gun/a, parent/gun/b
+ /foo/bar -> (bar, /foo/bar) -> parent/bar
+ (gun, /foo/bar) -> parent/gun
+ (fun/*/gun/*, sun/foo/bar) -> parent/fun/foo/gun/bar
Examples
--------
For example suppose the source directory contains fun/foo.dat and
- fun/bar/car.dat::
+ fun/bar/car.dat:
- >>> self.add_data_dir('fun') #doctest: +SKIP
- >>> self.add_data_dir(('sun', 'fun')) #doctest: +SKIP
- >>> self.add_data_dir(('gun', '/full/path/to/fun'))#doctest: +SKIP
+ >>> self.add_data_dir('fun') #doctest: +SKIP
+ >>> self.add_data_dir(('sun', 'fun')) #doctest: +SKIP
+ >>> self.add_data_dir(('gun', '/full/path/to/fun'))#doctest: +SKIP
Will install data-files to the locations::
@@ -1097,6 +1093,7 @@ class Configuration(object):
gun/
foo.dat
car.dat
+
"""
if is_sequence(data_path):
d, data_path = data_path
@@ -1836,7 +1833,7 @@ class Configuration(object):
close_fds=True)
sout = p.stdout
m = re.match(r'(?P<revision>\d+)', sout.read())
- except:
+ except Exception:
pass
os.chdir(cwd)
if m:
@@ -1873,7 +1870,7 @@ class Configuration(object):
close_fds=True)
sout = p.stdout
m = re.match(r'(?P<revision>\d+)', sout.read())
- except:
+ except Exception:
pass
os.chdir(cwd)
if m:
@@ -2068,7 +2065,6 @@ class Configuration(object):
"""
self.py_modules.append((self.name, name, generate_config_py))
-
def get_info(self,*names):
"""Get resources information.
@@ -2282,9 +2278,18 @@ def generate_config_py(target):
from distutils.dir_util import mkpath
mkpath(os.path.dirname(target))
f = open(target, 'w')
- f.write('# This file is generated by %s\n' % (os.path.abspath(sys.argv[0])))
+ f.write('# This file is generated by numpy\'s %s\n' % (os.path.basename(sys.argv[0])))
f.write('# It contains system_info results at the time of building this package.\n')
f.write('__all__ = ["get_info","show"]\n\n')
+
+ # For gfortran+msvc combination, extra shared libraries may exist
+ f.write("""
+import os
+extra_dll_dir = os.path.join(os.path.dirname(__file__), 'extra-dll')
+if os.path.isdir(extra_dll_dir):
+ os.environ["PATH"] += os.pathsep + extra_dll_dir
+""")
+
for k, i in system_info.saved_results.items():
f.write('%s=%r\n' % (k, i))
f.write(r'''
diff --git a/numpy/distutils/msvc9compiler.py b/numpy/distutils/msvc9compiler.py
index 8d0c92ed3..e9cc334a5 100644
--- a/numpy/distutils/msvc9compiler.py
+++ b/numpy/distutils/msvc9compiler.py
@@ -11,15 +11,15 @@ def _merge(old, new):
Here `old` is the environment string before the base class initialize
function is called and `new` is the string after the call. The new string
- will be a fixed string if it is not obtained from the current enviroment,
- or the same as the old string if obtained from the same enviroment. The aim
+ will be a fixed string if it is not obtained from the current environment,
+ or the same as the old string if obtained from the same environment. The aim
here is not to append the new string if it is already contained in the old
string so as to limit the growth of the environment string.
Parameters
----------
old : string
- Previous enviroment string.
+ Previous environment string.
new : string
New environment string.
@@ -29,10 +29,10 @@ def _merge(old, new):
Updated environment string.
"""
- if new in old:
- return old
if not old:
return new
+ if new in old:
+ return old
# Neither new nor old is empty. Give old priority.
return ';'.join([old, new])
diff --git a/numpy/distutils/system_info.py b/numpy/distutils/system_info.py
index 0fba86589..683b15daa 100644
--- a/numpy/distutils/system_info.py
+++ b/numpy/distutils/system_info.py
@@ -126,6 +126,7 @@ import os
import re
import copy
import warnings
+import atexit
from glob import glob
from functools import reduce
if sys.version_info[0] < 3:
@@ -684,9 +685,14 @@ class system_info(object):
return self.get_libs(key, '')
def library_extensions(self):
- static_exts = ['.a']
+ c = distutils.ccompiler.new_compiler()
+ c.customize('')
+ static_exts = []
+ if c.compiler_type != 'msvc':
+ # MSVC doesn't understand binutils
+ static_exts.append('.a')
if sys.platform == 'win32':
- static_exts.append('.lib') # .lib is used by MSVC
+ static_exts.append('.lib') # .lib is used by MSVC and others
if self.search_static_first:
exts = static_exts + [so_ext]
else:
@@ -1739,12 +1745,29 @@ class openblas_info(blas_info):
return True
def calc_info(self):
+ c = distutils.ccompiler.new_compiler()
+ c.customize('')
+
lib_dirs = self.get_lib_dirs()
openblas_libs = self.get_libs('libraries', self._lib_names)
if openblas_libs == self._lib_names: # backward compat with 1.8.0
openblas_libs = self.get_libs('openblas_libs', self._lib_names)
+
info = self.check_libs(lib_dirs, openblas_libs, [])
+
+ if c.compiler_type == "msvc" and info is None:
+ from numpy.distutils.fcompiler import new_fcompiler
+ f = new_fcompiler(c_compiler=c)
+ if f and f.compiler_type == 'gnu95':
+ # Try gfortran-compatible library files
+ info = self.check_msvc_gfortran_libs(lib_dirs, openblas_libs)
+ # Skip lapack check, we'd need build_ext to do it
+ assume_lapack = True
+ elif info:
+ assume_lapack = False
+ info['language'] = 'c'
+
if info is None:
return
@@ -1752,13 +1775,42 @@ class openblas_info(blas_info):
extra_info = self.calc_extra_info()
dict_append(info, **extra_info)
- if not self.check_embedded_lapack(info):
+ if not (assume_lapack or self.check_embedded_lapack(info)):
return
- info['language'] = 'c'
info['define_macros'] = [('HAVE_CBLAS', None)]
self.set_info(**info)
+ def check_msvc_gfortran_libs(self, library_dirs, libraries):
+ # First, find the full path to each library directory
+ library_paths = []
+ for library in libraries:
+ for library_dir in library_dirs:
+ # MinGW static ext will be .a
+ fullpath = os.path.join(library_dir, library + '.a')
+ if os.path.isfile(fullpath):
+ library_paths.append(fullpath)
+ break
+ else:
+ return None
+
+ # Generate numpy.distutils virtual static library file
+ tmpdir = os.path.join(os.getcwd(), 'build', 'openblas')
+ if not os.path.isdir(tmpdir):
+ os.makedirs(tmpdir)
+
+ info = {'library_dirs': [tmpdir],
+ 'libraries': ['openblas'],
+ 'language': 'f77'}
+
+ fake_lib_file = os.path.join(tmpdir, 'openblas.fobjects')
+ fake_clib_file = os.path.join(tmpdir, 'openblas.cobjects')
+ with open(fake_lib_file, 'w') as f:
+ f.write("\n".join(library_paths))
+ with open(fake_clib_file, 'w') as f:
+ pass
+
+ return info
class openblas_lapack_info(openblas_info):
section = 'openblas'
@@ -1770,6 +1822,7 @@ class openblas_lapack_info(openblas_info):
res = False
c = distutils.ccompiler.new_compiler()
c.customize('')
+
tmpdir = tempfile.mkdtemp()
s = """void zungqr();
int main(int argc, const char *argv[])
@@ -1782,8 +1835,10 @@ class openblas_lapack_info(openblas_info):
# Add the additional "extra" arguments
try:
extra_args = info['extra_link_args']
- except:
+ except Exception:
extra_args = []
+ if sys.version_info < (3, 5) and sys.version_info > (3, 0) and c.compiler_type == "msvc":
+ extra_args.append("/MANIFEST")
try:
with open(src, 'wt') as f:
f.write(s)
diff --git a/numpy/distutils/tests/__init__.py b/numpy/distutils/tests/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/numpy/distutils/tests/__init__.py
diff --git a/numpy/distutils/tests/test_exec_command.py b/numpy/distutils/tests/test_exec_command.py
index eccc47124..5e7b3f3e8 100644
--- a/numpy/distutils/tests/test_exec_command.py
+++ b/numpy/distutils/tests/test_exec_command.py
@@ -6,7 +6,7 @@ from tempfile import TemporaryFile
from numpy.distutils import exec_command
from numpy.distutils.exec_command import get_pythonexe
-from numpy.testing import TestCase, run_module_suite, tempdir
+from numpy.testing import run_module_suite, tempdir, assert_
# In python 3 stdout, stderr are text (unicode compliant) devices, so to
# emulate them import StringIO from the io module.
@@ -94,94 +94,94 @@ def test_exec_command_stderr():
exec_command.exec_command("cd '.'")
-class TestExecCommand(TestCase):
- def setUp(self):
+class TestExecCommand(object):
+ def setup(self):
self.pyexe = get_pythonexe()
def check_nt(self, **kws):
- s, o = exec_command.exec_command('echo path=%path%')
- self.assertEqual(s, 0)
- self.assertNotEqual(o, '')
+ s, o = exec_command.exec_command('cmd /C echo path=%path%')
+ assert_(s == 0)
+ assert_(o != '')
s, o = exec_command.exec_command(
'"%s" -c "import sys;sys.stderr.write(sys.platform)"' % self.pyexe)
- self.assertEqual(s, 0)
- self.assertEqual(o, 'win32')
+ assert_(s == 0)
+ assert_(o == 'win32')
def check_posix(self, **kws):
s, o = exec_command.exec_command("echo Hello", **kws)
- self.assertEqual(s, 0)
- self.assertEqual(o, 'Hello')
+ assert_(s == 0)
+ assert_(o == 'Hello')
s, o = exec_command.exec_command('echo $AAA', **kws)
- self.assertEqual(s, 0)
- self.assertEqual(o, '')
+ assert_(s == 0)
+ assert_(o == '')
s, o = exec_command.exec_command('echo "$AAA"', AAA='Tere', **kws)
- self.assertEqual(s, 0)
- self.assertEqual(o, 'Tere')
+ assert_(s == 0)
+ assert_(o == 'Tere')
s, o = exec_command.exec_command('echo "$AAA"', **kws)
- self.assertEqual(s, 0)
- self.assertEqual(o, '')
+ assert_(s == 0)
+ assert_(o == '')
if 'BBB' not in os.environ:
os.environ['BBB'] = 'Hi'
s, o = exec_command.exec_command('echo "$BBB"', **kws)
- self.assertEqual(s, 0)
- self.assertEqual(o, 'Hi')
+ assert_(s == 0)
+ assert_(o == 'Hi')
s, o = exec_command.exec_command('echo "$BBB"', BBB='Hey', **kws)
- self.assertEqual(s, 0)
- self.assertEqual(o, 'Hey')
+ assert_(s == 0)
+ assert_(o == 'Hey')
s, o = exec_command.exec_command('echo "$BBB"', **kws)
- self.assertEqual(s, 0)
- self.assertEqual(o, 'Hi')
+ assert_(s == 0)
+ assert_(o == 'Hi')
del os.environ['BBB']
s, o = exec_command.exec_command('echo "$BBB"', **kws)
- self.assertEqual(s, 0)
- self.assertEqual(o, '')
+ assert_(s == 0)
+ assert_(o == '')
s, o = exec_command.exec_command('this_is_not_a_command', **kws)
- self.assertNotEqual(s, 0)
- self.assertNotEqual(o, '')
+ assert_(s != 0)
+ assert_(o != '')
s, o = exec_command.exec_command('echo path=$PATH', **kws)
- self.assertEqual(s, 0)
- self.assertNotEqual(o, '')
+ assert_(s == 0)
+ assert_(o != '')
s, o = exec_command.exec_command(
'"%s" -c "import sys,os;sys.stderr.write(os.name)"' %
self.pyexe, **kws)
- self.assertEqual(s, 0)
- self.assertEqual(o, 'posix')
+ assert_(s == 0)
+ assert_(o == 'posix')
def check_basic(self, *kws):
s, o = exec_command.exec_command(
'"%s" -c "raise \'Ignore me.\'"' % self.pyexe, **kws)
- self.assertNotEqual(s, 0)
- self.assertNotEqual(o, '')
+ assert_(s != 0)
+ assert_(o != '')
s, o = exec_command.exec_command(
'"%s" -c "import sys;sys.stderr.write(\'0\');'
'sys.stderr.write(\'1\');sys.stderr.write(\'2\')"' %
self.pyexe, **kws)
- self.assertEqual(s, 0)
- self.assertEqual(o, '012')
+ assert_(s == 0)
+ assert_(o == '012')
s, o = exec_command.exec_command(
'"%s" -c "import sys;sys.exit(15)"' % self.pyexe, **kws)
- self.assertEqual(s, 15)
- self.assertEqual(o, '')
+ assert_(s == 15)
+ assert_(o == '')
s, o = exec_command.exec_command(
'"%s" -c "print(\'Heipa\'")' % self.pyexe, **kws)
- self.assertEqual(s, 0)
- self.assertEqual(o, 'Heipa')
+ assert_(s == 0)
+ assert_(o == 'Heipa')
def check_execute_in(self, **kws):
with tempdir() as tmpdir:
@@ -194,13 +194,13 @@ class TestExecCommand(TestCase):
s, o = exec_command.exec_command(
'"%s" -c "f = open(\'%s\', \'r\'); f.close()"' %
(self.pyexe, fn), **kws)
- self.assertNotEqual(s, 0)
- self.assertNotEqual(o, '')
+ assert_(s != 0)
+ assert_(o != '')
s, o = exec_command.exec_command(
'"%s" -c "f = open(\'%s\', \'r\'); print(f.read()); '
'f.close()"' % (self.pyexe, fn), execute_in=tmpdir, **kws)
- self.assertEqual(s, 0)
- self.assertEqual(o, 'Hello')
+ assert_(s == 0)
+ assert_(o == 'Hello')
def test_basic(self):
with redirect_stdout(StringIO()):
diff --git a/numpy/distutils/tests/test_fcompiler_gnu.py b/numpy/distutils/tests/test_fcompiler_gnu.py
index 7ca99db22..659520513 100644
--- a/numpy/distutils/tests/test_fcompiler_gnu.py
+++ b/numpy/distutils/tests/test_fcompiler_gnu.py
@@ -1,6 +1,6 @@
from __future__ import division, absolute_import, print_function
-from numpy.testing import TestCase, assert_, run_module_suite
+from numpy.testing import assert_, run_module_suite
import numpy.distutils.fcompiler
@@ -26,10 +26,11 @@ gfortran_version_strings = [
'4.9.1'),
("gfortran: warning: couldn't understand kern.osversion '14.1.0\n"
"gfortran: warning: yet another warning\n4.9.1",
- '4.9.1')
+ '4.9.1'),
+ ('GNU Fortran (crosstool-NG 8a21ab48) 7.2.0', '7.2.0')
]
-class TestG77Versions(TestCase):
+class TestG77Versions(object):
def test_g77_version(self):
fc = numpy.distutils.fcompiler.new_fcompiler(compiler='gnu')
for vs, version in g77_version_strings:
@@ -42,7 +43,7 @@ class TestG77Versions(TestCase):
v = fc.version_match(vs)
assert_(v is None, (vs, v))
-class TestGFortranVersions(TestCase):
+class TestGFortranVersions(object):
def test_gfortran_version(self):
fc = numpy.distutils.fcompiler.new_fcompiler(compiler='gnu95')
for vs, version in gfortran_version_strings:
diff --git a/numpy/distutils/tests/test_fcompiler_intel.py b/numpy/distutils/tests/test_fcompiler_intel.py
index 8e371b92b..b13a01788 100644
--- a/numpy/distutils/tests/test_fcompiler_intel.py
+++ b/numpy/distutils/tests/test_fcompiler_intel.py
@@ -1,7 +1,7 @@
from __future__ import division, absolute_import, print_function
import numpy.distutils.fcompiler
-from numpy.testing import TestCase, run_module_suite, assert_
+from numpy.testing import run_module_suite, assert_
intel_32bit_version_strings = [
@@ -16,7 +16,7 @@ intel_64bit_version_strings = [
"running on Intel(R) 64, Version 11.1", '11.1')
]
-class TestIntelFCompilerVersions(TestCase):
+class TestIntelFCompilerVersions(object):
def test_32bit_version(self):
fc = numpy.distutils.fcompiler.new_fcompiler(compiler='intel')
for vs, version in intel_32bit_version_strings:
@@ -24,7 +24,7 @@ class TestIntelFCompilerVersions(TestCase):
assert_(v == version)
-class TestIntelEM64TFCompilerVersions(TestCase):
+class TestIntelEM64TFCompilerVersions(object):
def test_64bit_version(self):
fc = numpy.distutils.fcompiler.new_fcompiler(compiler='intelem')
for vs, version in intel_64bit_version_strings:
diff --git a/numpy/distutils/tests/test_misc_util.py b/numpy/distutils/tests/test_misc_util.py
index f7fcbe224..dd4dbc842 100644
--- a/numpy/distutils/tests/test_misc_util.py
+++ b/numpy/distutils/tests/test_misc_util.py
@@ -6,12 +6,12 @@ from numpy.distutils.misc_util import (
appendpath, minrelpath, gpaths, get_shared_lib_extension, get_info
)
from numpy.testing import (
- TestCase, run_module_suite, assert_, assert_equal
+ run_module_suite, assert_, assert_equal
)
ajoin = lambda *paths: join(*((sep,)+paths))
-class TestAppendpath(TestCase):
+class TestAppendpath(object):
def test_1(self):
assert_equal(appendpath('prefix', 'name'), join('prefix', 'name'))
@@ -35,7 +35,7 @@ class TestAppendpath(TestCase):
assert_equal(appendpath('/prefix/sub/sub2', '/prefix/sub/sup/name'),
ajoin('prefix', 'sub', 'sub2', 'sup', 'name'))
-class TestMinrelpath(TestCase):
+class TestMinrelpath(object):
def test_1(self):
n = lambda path: path.replace('/', sep)
@@ -49,7 +49,7 @@ class TestMinrelpath(TestCase):
assert_equal(minrelpath(n('.././..')), n('../..'))
assert_equal(minrelpath(n('aa/bb/.././../dd')), n('dd'))
-class TestGpaths(TestCase):
+class TestGpaths(object):
def test_gpaths(self):
local_path = minrelpath(join(dirname(__file__), '..'))
@@ -58,7 +58,7 @@ class TestGpaths(TestCase):
f = gpaths('system_info.py', local_path)
assert_(join(local_path, 'system_info.py') == f[0], repr(f))
-class TestSharedExtension(TestCase):
+class TestSharedExtension(object):
def test_get_shared_lib_extension(self):
import sys
diff --git a/numpy/distutils/tests/test_npy_pkg_config.py b/numpy/distutils/tests/test_npy_pkg_config.py
index bdef47167..29891b63b 100644
--- a/numpy/distutils/tests/test_npy_pkg_config.py
+++ b/numpy/distutils/tests/test_npy_pkg_config.py
@@ -3,7 +3,7 @@ from __future__ import division, absolute_import, print_function
import os
from numpy.distutils.npy_pkg_config import read_config, parse_flags
-from numpy.testing import TestCase, run_module_suite, temppath
+from numpy.testing import run_module_suite, temppath, assert_
simple = """\
[meta]
@@ -36,7 +36,7 @@ libs = -L${libdir}
simple_variable_d = {'cflags': '-I/foo/bar/include', 'libflags': '-L/foo/bar/lib',
'version': '0.1', 'name': 'foo'}
-class TestLibraryInfo(TestCase):
+class TestLibraryInfo(object):
def test_simple(self):
with temppath('foo.ini') as path:
with open(path, 'w') as f:
@@ -44,10 +44,10 @@ class TestLibraryInfo(TestCase):
pkg = os.path.splitext(path)[0]
out = read_config(pkg)
- self.assertTrue(out.cflags() == simple_d['cflags'])
- self.assertTrue(out.libs() == simple_d['libflags'])
- self.assertTrue(out.name == simple_d['name'])
- self.assertTrue(out.version == simple_d['version'])
+ assert_(out.cflags() == simple_d['cflags'])
+ assert_(out.libs() == simple_d['libflags'])
+ assert_(out.name == simple_d['name'])
+ assert_(out.version == simple_d['version'])
def test_simple_variable(self):
with temppath('foo.ini') as path:
@@ -56,34 +56,34 @@ class TestLibraryInfo(TestCase):
pkg = os.path.splitext(path)[0]
out = read_config(pkg)
- self.assertTrue(out.cflags() == simple_variable_d['cflags'])
- self.assertTrue(out.libs() == simple_variable_d['libflags'])
- self.assertTrue(out.name == simple_variable_d['name'])
- self.assertTrue(out.version == simple_variable_d['version'])
+ assert_(out.cflags() == simple_variable_d['cflags'])
+ assert_(out.libs() == simple_variable_d['libflags'])
+ assert_(out.name == simple_variable_d['name'])
+ assert_(out.version == simple_variable_d['version'])
out.vars['prefix'] = '/Users/david'
- self.assertTrue(out.cflags() == '-I/Users/david/include')
+ assert_(out.cflags() == '-I/Users/david/include')
-class TestParseFlags(TestCase):
+class TestParseFlags(object):
def test_simple_cflags(self):
d = parse_flags("-I/usr/include")
- self.assertTrue(d['include_dirs'] == ['/usr/include'])
+ assert_(d['include_dirs'] == ['/usr/include'])
d = parse_flags("-I/usr/include -DFOO")
- self.assertTrue(d['include_dirs'] == ['/usr/include'])
- self.assertTrue(d['macros'] == ['FOO'])
+ assert_(d['include_dirs'] == ['/usr/include'])
+ assert_(d['macros'] == ['FOO'])
d = parse_flags("-I /usr/include -DFOO")
- self.assertTrue(d['include_dirs'] == ['/usr/include'])
- self.assertTrue(d['macros'] == ['FOO'])
+ assert_(d['include_dirs'] == ['/usr/include'])
+ assert_(d['macros'] == ['FOO'])
def test_simple_lflags(self):
d = parse_flags("-L/usr/lib -lfoo -L/usr/lib -lbar")
- self.assertTrue(d['library_dirs'] == ['/usr/lib', '/usr/lib'])
- self.assertTrue(d['libraries'] == ['foo', 'bar'])
+ assert_(d['library_dirs'] == ['/usr/lib', '/usr/lib'])
+ assert_(d['libraries'] == ['foo', 'bar'])
d = parse_flags("-L /usr/lib -lfoo -L/usr/lib -lbar")
- self.assertTrue(d['library_dirs'] == ['/usr/lib', '/usr/lib'])
- self.assertTrue(d['libraries'] == ['foo', 'bar'])
+ assert_(d['library_dirs'] == ['/usr/lib', '/usr/lib'])
+ assert_(d['libraries'] == ['foo', 'bar'])
if __name__ == '__main__':
diff --git a/numpy/distutils/tests/test_system_info.py b/numpy/distutils/tests/test_system_info.py
index 73b841692..026179d37 100644
--- a/numpy/distutils/tests/test_system_info.py
+++ b/numpy/distutils/tests/test_system_info.py
@@ -7,8 +7,9 @@ from subprocess import Popen, PIPE
from distutils.errors import DistutilsError
from numpy.distutils import ccompiler
-from numpy.testing import TestCase, run_module_suite, assert_, assert_equal
-from numpy.testing.decorators import skipif
+from numpy.testing import (
+ run_module_suite, assert_, assert_equal, dec
+ )
from numpy.distutils.system_info import system_info, ConfigParser
from numpy.distutils.system_info import default_lib_dirs, default_include_dirs
@@ -20,9 +21,9 @@ def get_class(name, notfound_action=1):
1 - display warning message
2 - raise error
"""
- cl = {'temp1': TestTemp1,
- 'temp2': TestTemp2
- }.get(name.lower(), test_system_info)
+ cl = {'temp1': Temp1Info,
+ 'temp2': Temp2Info
+ }.get(name.lower(), _system_info)
return cl()
simple_site = """
@@ -83,7 +84,7 @@ def have_compiler():
HAVE_COMPILER = have_compiler()
-class test_system_info(system_info):
+class _system_info(system_info):
def __init__(self,
default_lib_dirs=default_lib_dirs,
@@ -110,17 +111,19 @@ class test_system_info(system_info):
return info
-class TestTemp1(test_system_info):
+class Temp1Info(_system_info):
+ """For testing purposes"""
section = 'temp1'
-class TestTemp2(test_system_info):
+class Temp2Info(_system_info):
+ """For testing purposes"""
section = 'temp2'
-class TestSystemInfoReading(TestCase):
+class TestSystemInfoReading(object):
- def setUp(self):
+ def setup(self):
""" Create the libraries """
# Create 2 sources and 2 libraries
self._dir1 = mkdtemp()
@@ -162,15 +165,15 @@ class TestSystemInfoReading(TestCase):
# Do each removal separately
try:
shutil.rmtree(self._dir1)
- except:
+ except Exception:
pass
try:
shutil.rmtree(self._dir2)
- except:
+ except Exception:
pass
try:
os.remove(self._sitecfg)
- except:
+ except Exception:
pass
def test_all(self):
@@ -199,7 +202,7 @@ class TestSystemInfoReading(TestCase):
extra = tsi.calc_extra_info()
assert_equal(extra['extra_link_args'], ['-Wl,-rpath=' + self._lib2])
- @skipif(not HAVE_COMPILER)
+ @dec.skipif(not HAVE_COMPILER)
def test_compile1(self):
# Compile source and link the first source
c = ccompiler.new_compiler()
@@ -215,8 +218,8 @@ class TestSystemInfoReading(TestCase):
finally:
os.chdir(previousDir)
- @skipif(not HAVE_COMPILER)
- @skipif('msvc' in repr(ccompiler.new_compiler()))
+ @dec.skipif(not HAVE_COMPILER)
+ @dec.skipif('msvc' in repr(ccompiler.new_compiler()))
def test_compile2(self):
# Compile source and link the second source
tsi = self.c_temp2
diff --git a/numpy/doc/basics.py b/numpy/doc/basics.py
index dac236644..4d3ab046e 100644
--- a/numpy/doc/basics.py
+++ b/numpy/doc/basics.py
@@ -9,36 +9,36 @@ Array types and conversions between types
NumPy supports a much greater variety of numerical types than Python does.
This section shows which are available, and how to modify an array's data-type.
-========== ==========================================================
-Data type Description
-========== ==========================================================
-bool_ Boolean (True or False) stored as a byte
-int_ Default integer type (same as C ``long``; normally either
- ``int64`` or ``int32``)
-intc Identical to C ``int`` (normally ``int32`` or ``int64``)
-intp Integer used for indexing (same as C ``ssize_t``; normally
- either ``int32`` or ``int64``)
-int8 Byte (-128 to 127)
-int16 Integer (-32768 to 32767)
-int32 Integer (-2147483648 to 2147483647)
-int64 Integer (-9223372036854775808 to 9223372036854775807)
-uint8 Unsigned integer (0 to 255)
-uint16 Unsigned integer (0 to 65535)
-uint32 Unsigned integer (0 to 4294967295)
-uint64 Unsigned integer (0 to 18446744073709551615)
-float_ Shorthand for ``float64``.
-float16 Half precision float: sign bit, 5 bits exponent,
- 10 bits mantissa
-float32 Single precision float: sign bit, 8 bits exponent,
- 23 bits mantissa
-float64 Double precision float: sign bit, 11 bits exponent,
- 52 bits mantissa
-complex_ Shorthand for ``complex128``.
-complex64 Complex number, represented by two 32-bit floats (real
- and imaginary components)
-complex128 Complex number, represented by two 64-bit floats (real
- and imaginary components)
-========== ==========================================================
+============ ==========================================================
+Data type Description
+============ ==========================================================
+``bool_`` Boolean (True or False) stored as a byte
+``int_`` Default integer type (same as C ``long``; normally either
+ ``int64`` or ``int32``)
+intc Identical to C ``int`` (normally ``int32`` or ``int64``)
+intp Integer used for indexing (same as C ``ssize_t``; normally
+ either ``int32`` or ``int64``)
+int8 Byte (-128 to 127)
+int16 Integer (-32768 to 32767)
+int32 Integer (-2147483648 to 2147483647)
+int64 Integer (-9223372036854775808 to 9223372036854775807)
+uint8 Unsigned integer (0 to 255)
+uint16 Unsigned integer (0 to 65535)
+uint32 Unsigned integer (0 to 4294967295)
+uint64 Unsigned integer (0 to 18446744073709551615)
+``float_`` Shorthand for ``float64``.
+float16 Half precision float: sign bit, 5 bits exponent,
+ 10 bits mantissa
+float32 Single precision float: sign bit, 8 bits exponent,
+ 23 bits mantissa
+float64 Double precision float: sign bit, 11 bits exponent,
+ 52 bits mantissa
+``complex_`` Shorthand for ``complex128``.
+complex64 Complex number, represented by two 32-bit floats (real
+ and imaginary components)
+complex128 Complex number, represented by two 64-bit floats (real
+ and imaginary components)
+============ ==========================================================
Additionally to ``intc`` the platform dependent C integer types ``short``,
``long``, ``longlong`` and their unsigned versions are defined.
@@ -114,10 +114,10 @@ properties of the type, such as whether it is an integer::
>>> d
dtype('int32')
- >>> np.issubdtype(d, int)
+ >>> np.issubdtype(d, np.integer)
True
- >>> np.issubdtype(d, float)
+ >>> np.issubdtype(d, np.floating)
False
@@ -155,11 +155,11 @@ with 80-bit precision, and while most C compilers provide this as their
``long double`` identical to ``double`` (64 bits). NumPy makes the
compiler's ``long double`` available as ``np.longdouble`` (and
``np.clongdouble`` for the complex numbers). You can find out what your
-numpy provides with``np.finfo(np.longdouble)``.
+numpy provides with ``np.finfo(np.longdouble)``.
NumPy does not provide a dtype with more precision than C
-``long double``s; in particular, the 128-bit IEEE quad precision
-data type (FORTRAN's ``REAL*16``) is not available.
+``long double``\\s; in particular, the 128-bit IEEE quad precision
+data type (FORTRAN's ``REAL*16``\\) is not available.
For efficient memory alignment, ``np.longdouble`` is usually stored
padded with zero bits, either to 96 or 128 bits. Which is more efficient
diff --git a/numpy/doc/creation.py b/numpy/doc/creation.py
index 8480858d4..babe6a4d7 100644
--- a/numpy/doc/creation.py
+++ b/numpy/doc/creation.py
@@ -58,7 +58,7 @@ examples will be given here: ::
>>> np.arange(10)
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
- >>> np.arange(2, 10, dtype=np.float)
+ >>> np.arange(2, 10, dtype=float)
array([ 2., 3., 4., 5., 6., 7., 8., 9.])
>>> np.arange(2, 3, 0.1)
array([ 2. , 2.1, 2.2, 2.3, 2.4, 2.5, 2.6, 2.7, 2.8, 2.9])
diff --git a/numpy/doc/glossary.py b/numpy/doc/glossary.py
index 97b7b3362..794c393f6 100644
--- a/numpy/doc/glossary.py
+++ b/numpy/doc/glossary.py
@@ -48,7 +48,7 @@ Glossary
array([(1, 2.0), (3, 4.0)],
dtype=[('x', '<i4'), ('y', '<f8')])
- Fast element-wise operations, called `ufuncs`_, operate on arrays.
+ Fast element-wise operations, called :term:`ufuncs`, operate on arrays.
array_like
Any sequence that can be interpreted as an ndarray. This includes
@@ -82,7 +82,7 @@ Glossary
array([[4, 5],
[5, 6]])
- See `doc.broadcasting`_ for more information.
+ See `numpy.doc.broadcasting` for more information.
C order
See `row-major`
@@ -155,7 +155,8 @@ Glossary
See `column-major`
flattened
- Collapsed to a one-dimensional array. See `ndarray.flatten`_ for details.
+ Collapsed to a one-dimensional array. See `numpy.ndarray.flatten`
+ for details.
immutable
An object that cannot be modified after execution is called
@@ -284,9 +285,9 @@ Glossary
See *array*.
record array
- An `ndarray`_ with `structured data type`_ which has been subclassed as
- np.recarray and whose dtype is of type np.record, making the
- fields of its data type to be accessible by attribute.
+ An :term:`ndarray` with :term:`structured data type`_ which has been
+ subclassed as ``np.recarray`` and whose dtype is of type ``np.record``,
+ making the fields of its data type to be accessible by attribute.
reference
If ``a`` is a reference to ``b``, then ``(a is b) == True``. Therefore,
@@ -348,10 +349,10 @@ Glossary
>>> x[:, 1]
array([2, 4])
-
+
structured data type
A data type composed of other datatypes
-
+
tuple
A sequence that may contain a variable number of types of any
kind. A tuple is immutable, i.e., once constructed it cannot be
diff --git a/numpy/doc/indexing.py b/numpy/doc/indexing.py
index 39b2c73ed..b286a904d 100644
--- a/numpy/doc/indexing.py
+++ b/numpy/doc/indexing.py
@@ -422,7 +422,7 @@ object: ::
[37, 40, 43],
[46, 49, 52]])
-For this reason it is possible to use the output from the np.where()
+For this reason it is possible to use the output from the np.nonzero()
function directly as an index since it always returns a tuple of index
arrays.
diff --git a/numpy/doc/misc.py b/numpy/doc/misc.py
index 37ebca572..5d6708a0d 100644
--- a/numpy/doc/misc.py
+++ b/numpy/doc/misc.py
@@ -14,7 +14,8 @@ original value was)
Note: cannot use equality to test NaNs. E.g.: ::
>>> myarr = np.array([1., 0., np.nan, 3.])
- >>> np.where(myarr == np.nan)
+ >>> np.nonzero(myarr == np.nan)
+ (array([], dtype=int64),)
>>> np.nan == np.nan # is always False! Use special numpy functions instead.
False
>>> myarr[myarr == np.nan] = 0. # doesn't work
diff --git a/numpy/doc/subclassing.py b/numpy/doc/subclassing.py
index 36d8ff97d..c34278868 100644
--- a/numpy/doc/subclassing.py
+++ b/numpy/doc/subclassing.py
@@ -489,6 +489,8 @@ following.
return NotImplemented
if method == 'at':
+ if isinstance(inputs[0], A):
+ inputs[0].info = info
return
if ufunc.nout == 1:
@@ -541,7 +543,7 @@ will be called, but now it sees an ``ndarray`` as the other argument. Likely,
it will know how to handle this, and return a new instance of the ``B`` class
to us. Our example class is not set up to handle this, but it might well be
the best approach if, e.g., one were to re-implement ``MaskedArray`` using
- ``__array_ufunc__``.
+``__array_ufunc__``.
As a final note: if the ``super`` route is suited to a given class, an
advantage of using it is that it helps in constructing class hierarchies.
diff --git a/numpy/f2py/__init__.py b/numpy/f2py/__init__.py
index b9b86ba0e..250c4322b 100644
--- a/numpy/f2py/__init__.py
+++ b/numpy/f2py/__init__.py
@@ -69,6 +69,6 @@ def compile(source,
f.close()
return status
-from numpy.testing.nosetester import _numpy_tester
+from numpy.testing import _numpy_tester
test = _numpy_tester().test
bench = _numpy_tester().bench
diff --git a/numpy/f2py/auxfuncs.py b/numpy/f2py/auxfuncs.py
index d27b95947..404bdbd2d 100644
--- a/numpy/f2py/auxfuncs.py
+++ b/numpy/f2py/auxfuncs.py
@@ -552,7 +552,7 @@ class F2PYError(Exception):
pass
-class throw_error:
+class throw_error(object):
def __init__(self, mess):
self.mess = mess
diff --git a/numpy/f2py/capi_maps.py b/numpy/f2py/capi_maps.py
index 5b2e6a9b9..64829d30c 100644
--- a/numpy/f2py/capi_maps.py
+++ b/numpy/f2py/capi_maps.py
@@ -328,7 +328,7 @@ def getarrdims(a, var, verbose=0):
ret['size'] = '*'.join(dim)
try:
ret['size'] = repr(eval(ret['size']))
- except:
+ except Exception:
pass
ret['dims'] = ','.join(dim)
ret['rank'] = repr(len(dim))
@@ -485,7 +485,7 @@ def getinit(a, var):
else:
v = eval(v, {}, {})
ret['init.r'], ret['init.i'] = str(v.real), str(v.imag)
- except:
+ except Exception:
raise ValueError(
'getinit: expected complex number `(r,i)\' but got `%s\' as initial value of %r.' % (init, a))
if isarray(var):
diff --git a/numpy/f2py/cfuncs.py b/numpy/f2py/cfuncs.py
index 1632a0d47..6aeeec823 100644
--- a/numpy/f2py/cfuncs.py
+++ b/numpy/f2py/cfuncs.py
@@ -99,8 +99,8 @@ cppmacros['CFUNCSMESS'] = """\
#ifdef DEBUGCFUNCS
#define CFUNCSMESS(mess) fprintf(stderr,\"debug-capi:\"mess);
#define CFUNCSMESSPY(mess,obj) CFUNCSMESS(mess) \\
-\tPyObject_Print((PyObject *)obj,stderr,Py_PRINT_RAW);\\
-\tfprintf(stderr,\"\\n\");
+ PyObject_Print((PyObject *)obj,stderr,Py_PRINT_RAW);\\
+ fprintf(stderr,\"\\n\");
#else
#define CFUNCSMESS(mess)
#define CFUNCSMESSPY(mess,obj)
@@ -219,18 +219,18 @@ cppmacros['SWAPUNSAFE'] = """\
"""
cppmacros['SWAP'] = """\
#define SWAP(a,b,t) {\\
-\tt *c;\\
-\tc = a;\\
-\ta = b;\\
-\tb = c;}
+ t *c;\\
+ c = a;\\
+ a = b;\\
+ b = c;}
"""
# cppmacros['ISCONTIGUOUS']='#define ISCONTIGUOUS(m) (PyArray_FLAGS(m) &
# NPY_ARRAY_C_CONTIGUOUS)'
cppmacros['PRINTPYOBJERR'] = """\
#define PRINTPYOBJERR(obj)\\
-\tfprintf(stderr,\"#modulename#.error is related to \");\\
-\tPyObject_Print((PyObject *)obj,stderr,Py_PRINT_RAW);\\
-\tfprintf(stderr,\"\\n\");
+ fprintf(stderr,\"#modulename#.error is related to \");\\
+ PyObject_Print((PyObject *)obj,stderr,Py_PRINT_RAW);\\
+ fprintf(stderr,\"\\n\");
"""
cppmacros['MINMAX'] = """\
#ifndef max
@@ -401,59 +401,59 @@ cppmacros['TRYCOMPLEXPYARRAYTEMPLATE'] = """\
"""
# cppmacros['NUMFROMARROBJ']="""\
# define NUMFROMARROBJ(typenum,ctype) \\
-# \tif (PyArray_Check(obj)) arr = (PyArrayObject *)obj;\\
-# \telse arr = (PyArrayObject *)PyArray_ContiguousFromObject(obj,typenum,0,0);\\
-# \tif (arr) {\\
-# \t\tif (PyArray_TYPE(arr)==NPY_OBJECT) {\\
-# \t\t\tif (!ctype ## _from_pyobj(v,(PyArray_DESCR(arr)->getitem)(PyArray_DATA(arr)),\"\"))\\
-# \t\t\tgoto capi_fail;\\
-# \t\t} else {\\
-# \t\t\t(PyArray_DESCR(arr)->cast[typenum])(PyArray_DATA(arr),1,(char*)v,1,1);\\
-# \t\t}\\
-# \t\tif ((PyObject *)arr != obj) { Py_DECREF(arr); }\\
-# \t\treturn 1;\\
-# \t}
+# if (PyArray_Check(obj)) arr = (PyArrayObject *)obj;\\
+# else arr = (PyArrayObject *)PyArray_ContiguousFromObject(obj,typenum,0,0);\\
+# if (arr) {\\
+# if (PyArray_TYPE(arr)==NPY_OBJECT) {\\
+# if (!ctype ## _from_pyobj(v,(PyArray_DESCR(arr)->getitem)(PyArray_DATA(arr)),\"\"))\\
+# goto capi_fail;\\
+# } else {\\
+# (PyArray_DESCR(arr)->cast[typenum])(PyArray_DATA(arr),1,(char*)v,1,1);\\
+# }\\
+# if ((PyObject *)arr != obj) { Py_DECREF(arr); }\\
+# return 1;\\
+# }
# """
# XXX: Note that CNUMFROMARROBJ is identical with NUMFROMARROBJ
# cppmacros['CNUMFROMARROBJ']="""\
# define CNUMFROMARROBJ(typenum,ctype) \\
-# \tif (PyArray_Check(obj)) arr = (PyArrayObject *)obj;\\
-# \telse arr = (PyArrayObject *)PyArray_ContiguousFromObject(obj,typenum,0,0);\\
-# \tif (arr) {\\
-# \t\tif (PyArray_TYPE(arr)==NPY_OBJECT) {\\
-# \t\t\tif (!ctype ## _from_pyobj(v,(PyArray_DESCR(arr)->getitem)(PyArray_DATA(arr)),\"\"))\\
-# \t\t\tgoto capi_fail;\\
-# \t\t} else {\\
-# \t\t\t(PyArray_DESCR(arr)->cast[typenum])((void *)(PyArray_DATA(arr)),1,(void *)(v),1,1);\\
-# \t\t}\\
-# \t\tif ((PyObject *)arr != obj) { Py_DECREF(arr); }\\
-# \t\treturn 1;\\
-# \t}
+# if (PyArray_Check(obj)) arr = (PyArrayObject *)obj;\\
+# else arr = (PyArrayObject *)PyArray_ContiguousFromObject(obj,typenum,0,0);\\
+# if (arr) {\\
+# if (PyArray_TYPE(arr)==NPY_OBJECT) {\\
+# if (!ctype ## _from_pyobj(v,(PyArray_DESCR(arr)->getitem)(PyArray_DATA(arr)),\"\"))\\
+# goto capi_fail;\\
+# } else {\\
+# (PyArray_DESCR(arr)->cast[typenum])((void *)(PyArray_DATA(arr)),1,(void *)(v),1,1);\\
+# }\\
+# if ((PyObject *)arr != obj) { Py_DECREF(arr); }\\
+# return 1;\\
+# }
# """
needs['GETSTRFROMPYTUPLE'] = ['STRINGCOPYN', 'PRINTPYOBJERR']
cppmacros['GETSTRFROMPYTUPLE'] = """\
#define GETSTRFROMPYTUPLE(tuple,index,str,len) {\\
-\t\tPyObject *rv_cb_str = PyTuple_GetItem((tuple),(index));\\
-\t\tif (rv_cb_str == NULL)\\
-\t\t\tgoto capi_fail;\\
-\t\tif (PyString_Check(rv_cb_str)) {\\
-\t\t\tstr[len-1]='\\0';\\
-\t\t\tSTRINGCOPYN((str),PyString_AS_STRING((PyStringObject*)rv_cb_str),(len));\\
-\t\t} else {\\
-\t\t\tPRINTPYOBJERR(rv_cb_str);\\
-\t\t\tPyErr_SetString(#modulename#_error,\"string object expected\");\\
-\t\t\tgoto capi_fail;\\
-\t\t}\\
-\t}
+ PyObject *rv_cb_str = PyTuple_GetItem((tuple),(index));\\
+ if (rv_cb_str == NULL)\\
+ goto capi_fail;\\
+ if (PyString_Check(rv_cb_str)) {\\
+ str[len-1]='\\0';\\
+ STRINGCOPYN((str),PyString_AS_STRING((PyStringObject*)rv_cb_str),(len));\\
+ } else {\\
+ PRINTPYOBJERR(rv_cb_str);\\
+ PyErr_SetString(#modulename#_error,\"string object expected\");\\
+ goto capi_fail;\\
+ }\\
+ }
"""
cppmacros['GETSCALARFROMPYTUPLE'] = """\
#define GETSCALARFROMPYTUPLE(tuple,index,var,ctype,mess) {\\
-\t\tif ((capi_tmp = PyTuple_GetItem((tuple),(index)))==NULL) goto capi_fail;\\
-\t\tif (!(ctype ## _from_pyobj((var),capi_tmp,mess)))\\
-\t\t\tgoto capi_fail;\\
-\t}
+ if ((capi_tmp = PyTuple_GetItem((tuple),(index)))==NULL) goto capi_fail;\\
+ if (!(ctype ## _from_pyobj((var),capi_tmp,mess)))\\
+ goto capi_fail;\\
+ }
"""
cppmacros['FAILNULL'] = """\\
@@ -471,12 +471,12 @@ cppmacros['MEMCOPY'] = """\
"""
cppmacros['STRINGMALLOC'] = """\
#define STRINGMALLOC(str,len)\\
-\tif ((str = (string)malloc(sizeof(char)*(len+1))) == NULL) {\\
-\t\tPyErr_SetString(PyExc_MemoryError, \"out of memory\");\\
-\t\tgoto capi_fail;\\
-\t} else {\\
-\t\t(str)[len] = '\\0';\\
-\t}
+ if ((str = (string)malloc(sizeof(char)*(len+1))) == NULL) {\\
+ PyErr_SetString(PyExc_MemoryError, \"out of memory\");\\
+ goto capi_fail;\\
+ } else {\\
+ (str)[len] = '\\0';\\
+ }
"""
cppmacros['STRINGFREE'] = """\
#define STRINGFREE(str) do {if (!(str == NULL)) free(str);} while (0)
@@ -504,39 +504,39 @@ cppmacros['STRINGCOPY'] = """\
"""
cppmacros['CHECKGENERIC'] = """\
#define CHECKGENERIC(check,tcheck,name) \\
-\tif (!(check)) {\\
-\t\tPyErr_SetString(#modulename#_error,\"(\"tcheck\") failed for \"name);\\
-\t\t/*goto capi_fail;*/\\
-\t} else """
+ if (!(check)) {\\
+ PyErr_SetString(#modulename#_error,\"(\"tcheck\") failed for \"name);\\
+ /*goto capi_fail;*/\\
+ } else """
cppmacros['CHECKARRAY'] = """\
#define CHECKARRAY(check,tcheck,name) \\
-\tif (!(check)) {\\
-\t\tPyErr_SetString(#modulename#_error,\"(\"tcheck\") failed for \"name);\\
-\t\t/*goto capi_fail;*/\\
-\t} else """
+ if (!(check)) {\\
+ PyErr_SetString(#modulename#_error,\"(\"tcheck\") failed for \"name);\\
+ /*goto capi_fail;*/\\
+ } else """
cppmacros['CHECKSTRING'] = """\
#define CHECKSTRING(check,tcheck,name,show,var)\\
-\tif (!(check)) {\\
-\t\tchar errstring[256];\\
-\t\tsprintf(errstring, \"%s: \"show, \"(\"tcheck\") failed for \"name, slen(var), var);\\
-\t\tPyErr_SetString(#modulename#_error, errstring);\\
-\t\t/*goto capi_fail;*/\\
-\t} else """
+ if (!(check)) {\\
+ char errstring[256];\\
+ sprintf(errstring, \"%s: \"show, \"(\"tcheck\") failed for \"name, slen(var), var);\\
+ PyErr_SetString(#modulename#_error, errstring);\\
+ /*goto capi_fail;*/\\
+ } else """
cppmacros['CHECKSCALAR'] = """\
#define CHECKSCALAR(check,tcheck,name,show,var)\\
-\tif (!(check)) {\\
-\t\tchar errstring[256];\\
-\t\tsprintf(errstring, \"%s: \"show, \"(\"tcheck\") failed for \"name, var);\\
-\t\tPyErr_SetString(#modulename#_error,errstring);\\
-\t\t/*goto capi_fail;*/\\
-\t} else """
+ if (!(check)) {\\
+ char errstring[256];\\
+ sprintf(errstring, \"%s: \"show, \"(\"tcheck\") failed for \"name, var);\\
+ PyErr_SetString(#modulename#_error,errstring);\\
+ /*goto capi_fail;*/\\
+ } else """
# cppmacros['CHECKDIMS']="""\
# define CHECKDIMS(dims,rank) \\
-# \tfor (int i=0;i<(rank);i++)\\
-# \t\tif (dims[i]<0) {\\
-# \t\t\tfprintf(stderr,\"Unspecified array argument requires a complete dimension specification.\\n\");\\
-# \t\t\tgoto capi_fail;\\
-# \t\t}
+# for (int i=0;i<(rank);i++)\\
+# if (dims[i]<0) {\\
+# fprintf(stderr,\"Unspecified array argument requires a complete dimension specification.\\n\");\\
+# goto capi_fail;\\
+# }
# """
cppmacros[
'ARRSIZE'] = '#define ARRSIZE(dims,rank) (_PyArray_multiply_list(dims,rank))'
@@ -549,17 +549,17 @@ cppmacros['OLDPYNUM'] = """\
cfuncs['calcarrindex'] = """\
static int calcarrindex(int *i,PyArrayObject *arr) {
-\tint k,ii = i[0];
-\tfor (k=1; k < PyArray_NDIM(arr); k++)
-\t\tii += (ii*(PyArray_DIM(arr,k) - 1)+i[k]); /* assuming contiguous arr */
-\treturn ii;
+ int k,ii = i[0];
+ for (k=1; k < PyArray_NDIM(arr); k++)
+ ii += (ii*(PyArray_DIM(arr,k) - 1)+i[k]); /* assuming contiguous arr */
+ return ii;
}"""
cfuncs['calcarrindextr'] = """\
static int calcarrindextr(int *i,PyArrayObject *arr) {
-\tint k,ii = i[PyArray_NDIM(arr)-1];
-\tfor (k=1; k < PyArray_NDIM(arr); k++)
-\t\tii += (ii*(PyArray_DIM(arr,PyArray_NDIM(arr)-k-1) - 1)+i[PyArray_NDIM(arr)-k-1]); /* assuming contiguous arr */
-\treturn ii;
+ int k,ii = i[PyArray_NDIM(arr)-1];
+ for (k=1; k < PyArray_NDIM(arr); k++)
+ ii += (ii*(PyArray_DIM(arr,PyArray_NDIM(arr)-k-1) - 1)+i[PyArray_NDIM(arr)-k-1]); /* assuming contiguous arr */
+ return ii;
}"""
cfuncs['forcomb'] = """\
static struct { int nd;npy_intp *d;int *i,*i_tr,tr; } forcombcache;
@@ -604,543 +604,543 @@ static int *nextforcomb(void) {
needs['try_pyarr_from_string'] = ['STRINGCOPYN', 'PRINTPYOBJERR', 'string']
cfuncs['try_pyarr_from_string'] = """\
static int try_pyarr_from_string(PyObject *obj,const string str) {
-\tPyArrayObject *arr = NULL;
-\tif (PyArray_Check(obj) && (!((arr = (PyArrayObject *)obj) == NULL)))
-\t\t{ STRINGCOPYN(PyArray_DATA(arr),str,PyArray_NBYTES(arr)); }
-\treturn 1;
+ PyArrayObject *arr = NULL;
+ if (PyArray_Check(obj) && (!((arr = (PyArrayObject *)obj) == NULL)))
+ { STRINGCOPYN(PyArray_DATA(arr),str,PyArray_NBYTES(arr)); }
+ return 1;
capi_fail:
-\tPRINTPYOBJERR(obj);
-\tPyErr_SetString(#modulename#_error,\"try_pyarr_from_string failed\");
-\treturn 0;
+ PRINTPYOBJERR(obj);
+ PyErr_SetString(#modulename#_error,\"try_pyarr_from_string failed\");
+ return 0;
}
"""
needs['string_from_pyobj'] = ['string', 'STRINGMALLOC', 'STRINGCOPYN']
cfuncs['string_from_pyobj'] = """\
static int string_from_pyobj(string *str,int *len,const string inistr,PyObject *obj,const char *errmess) {
-\tPyArrayObject *arr = NULL;
-\tPyObject *tmp = NULL;
+ PyArrayObject *arr = NULL;
+ PyObject *tmp = NULL;
#ifdef DEBUGCFUNCS
fprintf(stderr,\"string_from_pyobj(str='%s',len=%d,inistr='%s',obj=%p)\\n\",(char*)str,*len,(char *)inistr,obj);
#endif
-\tif (obj == Py_None) {
-\t\tif (*len == -1)
-\t\t\t*len = strlen(inistr); /* Will this cause problems? */
-\t\tSTRINGMALLOC(*str,*len);
-\t\tSTRINGCOPYN(*str,inistr,*len+1);
-\t\treturn 1;
-\t}
-\tif (PyArray_Check(obj)) {
-\t\tif ((arr = (PyArrayObject *)obj) == NULL)
-\t\t\tgoto capi_fail;
-\t\tif (!ISCONTIGUOUS(arr)) {
-\t\t\tPyErr_SetString(PyExc_ValueError,\"array object is non-contiguous.\");
-\t\t\tgoto capi_fail;
-\t\t}
-\t\tif (*len == -1)
-\t\t\t*len = (PyArray_ITEMSIZE(arr))*PyArray_SIZE(arr);
-\t\tSTRINGMALLOC(*str,*len);
-\t\tSTRINGCOPYN(*str,PyArray_DATA(arr),*len+1);
-\t\treturn 1;
-\t}
-\tif (PyString_Check(obj)) {
-\t\ttmp = obj;
-\t\tPy_INCREF(tmp);
-\t}
+ if (obj == Py_None) {
+ if (*len == -1)
+ *len = strlen(inistr); /* Will this cause problems? */
+ STRINGMALLOC(*str,*len);
+ STRINGCOPYN(*str,inistr,*len+1);
+ return 1;
+ }
+ if (PyArray_Check(obj)) {
+ if ((arr = (PyArrayObject *)obj) == NULL)
+ goto capi_fail;
+ if (!ISCONTIGUOUS(arr)) {
+ PyErr_SetString(PyExc_ValueError,\"array object is non-contiguous.\");
+ goto capi_fail;
+ }
+ if (*len == -1)
+ *len = (PyArray_ITEMSIZE(arr))*PyArray_SIZE(arr);
+ STRINGMALLOC(*str,*len);
+ STRINGCOPYN(*str,PyArray_DATA(arr),*len+1);
+ return 1;
+ }
+ if (PyString_Check(obj)) {
+ tmp = obj;
+ Py_INCREF(tmp);
+ }
#if PY_VERSION_HEX >= 0x03000000
-\telse if (PyUnicode_Check(obj)) {
-\t\ttmp = PyUnicode_AsASCIIString(obj);
-\t}
-\telse {
-\t\tPyObject *tmp2;
-\t\ttmp2 = PyObject_Str(obj);
-\t\tif (tmp2) {
-\t\t\ttmp = PyUnicode_AsASCIIString(tmp2);
-\t\t\tPy_DECREF(tmp2);
-\t\t}
-\t\telse {
-\t\t\ttmp = NULL;
-\t\t}
-\t}
+ else if (PyUnicode_Check(obj)) {
+ tmp = PyUnicode_AsASCIIString(obj);
+ }
+ else {
+ PyObject *tmp2;
+ tmp2 = PyObject_Str(obj);
+ if (tmp2) {
+ tmp = PyUnicode_AsASCIIString(tmp2);
+ Py_DECREF(tmp2);
+ }
+ else {
+ tmp = NULL;
+ }
+ }
#else
-\telse {
-\t\ttmp = PyObject_Str(obj);
-\t}
+ else {
+ tmp = PyObject_Str(obj);
+ }
#endif
-\tif (tmp == NULL) goto capi_fail;
-\tif (*len == -1)
-\t\t*len = PyString_GET_SIZE(tmp);
-\tSTRINGMALLOC(*str,*len);
-\tSTRINGCOPYN(*str,PyString_AS_STRING(tmp),*len+1);
-\tPy_DECREF(tmp);
-\treturn 1;
+ if (tmp == NULL) goto capi_fail;
+ if (*len == -1)
+ *len = PyString_GET_SIZE(tmp);
+ STRINGMALLOC(*str,*len);
+ STRINGCOPYN(*str,PyString_AS_STRING(tmp),*len+1);
+ Py_DECREF(tmp);
+ return 1;
capi_fail:
-\tPy_XDECREF(tmp);
-\t{
-\t\tPyObject* err = PyErr_Occurred();
-\t\tif (err==NULL) err = #modulename#_error;
-\t\tPyErr_SetString(err,errmess);
-\t}
-\treturn 0;
+ Py_XDECREF(tmp);
+ {
+ PyObject* err = PyErr_Occurred();
+ if (err==NULL) err = #modulename#_error;
+ PyErr_SetString(err,errmess);
+ }
+ return 0;
}
"""
needs['char_from_pyobj'] = ['int_from_pyobj']
cfuncs['char_from_pyobj'] = """\
static int char_from_pyobj(char* v,PyObject *obj,const char *errmess) {
-\tint i=0;
-\tif (int_from_pyobj(&i,obj,errmess)) {
-\t\t*v = (char)i;
-\t\treturn 1;
-\t}
-\treturn 0;
+ int i=0;
+ if (int_from_pyobj(&i,obj,errmess)) {
+ *v = (char)i;
+ return 1;
+ }
+ return 0;
}
"""
needs['signed_char_from_pyobj'] = ['int_from_pyobj', 'signed_char']
cfuncs['signed_char_from_pyobj'] = """\
static int signed_char_from_pyobj(signed_char* v,PyObject *obj,const char *errmess) {
-\tint i=0;
-\tif (int_from_pyobj(&i,obj,errmess)) {
-\t\t*v = (signed_char)i;
-\t\treturn 1;
-\t}
-\treturn 0;
+ int i=0;
+ if (int_from_pyobj(&i,obj,errmess)) {
+ *v = (signed_char)i;
+ return 1;
+ }
+ return 0;
}
"""
needs['short_from_pyobj'] = ['int_from_pyobj']
cfuncs['short_from_pyobj'] = """\
static int short_from_pyobj(short* v,PyObject *obj,const char *errmess) {
-\tint i=0;
-\tif (int_from_pyobj(&i,obj,errmess)) {
-\t\t*v = (short)i;
-\t\treturn 1;
-\t}
-\treturn 0;
+ int i=0;
+ if (int_from_pyobj(&i,obj,errmess)) {
+ *v = (short)i;
+ return 1;
+ }
+ return 0;
}
"""
cfuncs['int_from_pyobj'] = """\
static int int_from_pyobj(int* v,PyObject *obj,const char *errmess) {
-\tPyObject* tmp = NULL;
-\tif (PyInt_Check(obj)) {
-\t\t*v = (int)PyInt_AS_LONG(obj);
-\t\treturn 1;
-\t}
-\ttmp = PyNumber_Int(obj);
-\tif (tmp) {
-\t\t*v = PyInt_AS_LONG(tmp);
-\t\tPy_DECREF(tmp);
-\t\treturn 1;
-\t}
-\tif (PyComplex_Check(obj))
-\t\ttmp = PyObject_GetAttrString(obj,\"real\");
-\telse if (PyString_Check(obj) || PyUnicode_Check(obj))
-\t\t/*pass*/;
-\telse if (PySequence_Check(obj))
-\t\ttmp = PySequence_GetItem(obj,0);
-\tif (tmp) {
-\t\tPyErr_Clear();
-\t\tif (int_from_pyobj(v,tmp,errmess)) {Py_DECREF(tmp); return 1;}
-\t\tPy_DECREF(tmp);
-\t}
-\t{
-\t\tPyObject* err = PyErr_Occurred();
-\t\tif (err==NULL) err = #modulename#_error;
-\t\tPyErr_SetString(err,errmess);
-\t}
-\treturn 0;
+ PyObject* tmp = NULL;
+ if (PyInt_Check(obj)) {
+ *v = (int)PyInt_AS_LONG(obj);
+ return 1;
+ }
+ tmp = PyNumber_Int(obj);
+ if (tmp) {
+ *v = PyInt_AS_LONG(tmp);
+ Py_DECREF(tmp);
+ return 1;
+ }
+ if (PyComplex_Check(obj))
+ tmp = PyObject_GetAttrString(obj,\"real\");
+ else if (PyString_Check(obj) || PyUnicode_Check(obj))
+ /*pass*/;
+ else if (PySequence_Check(obj))
+ tmp = PySequence_GetItem(obj,0);
+ if (tmp) {
+ PyErr_Clear();
+ if (int_from_pyobj(v,tmp,errmess)) {Py_DECREF(tmp); return 1;}
+ Py_DECREF(tmp);
+ }
+ {
+ PyObject* err = PyErr_Occurred();
+ if (err==NULL) err = #modulename#_error;
+ PyErr_SetString(err,errmess);
+ }
+ return 0;
}
"""
cfuncs['long_from_pyobj'] = """\
static int long_from_pyobj(long* v,PyObject *obj,const char *errmess) {
-\tPyObject* tmp = NULL;
-\tif (PyInt_Check(obj)) {
-\t\t*v = PyInt_AS_LONG(obj);
-\t\treturn 1;
-\t}
-\ttmp = PyNumber_Int(obj);
-\tif (tmp) {
-\t\t*v = PyInt_AS_LONG(tmp);
-\t\tPy_DECREF(tmp);
-\t\treturn 1;
-\t}
-\tif (PyComplex_Check(obj))
-\t\ttmp = PyObject_GetAttrString(obj,\"real\");
-\telse if (PyString_Check(obj) || PyUnicode_Check(obj))
-\t\t/*pass*/;
-\telse if (PySequence_Check(obj))
-\t\ttmp = PySequence_GetItem(obj,0);
-\tif (tmp) {
-\t\tPyErr_Clear();
-\t\tif (long_from_pyobj(v,tmp,errmess)) {Py_DECREF(tmp); return 1;}
-\t\tPy_DECREF(tmp);
-\t}
-\t{
-\t\tPyObject* err = PyErr_Occurred();
-\t\tif (err==NULL) err = #modulename#_error;
-\t\tPyErr_SetString(err,errmess);
-\t}
-\treturn 0;
+ PyObject* tmp = NULL;
+ if (PyInt_Check(obj)) {
+ *v = PyInt_AS_LONG(obj);
+ return 1;
+ }
+ tmp = PyNumber_Int(obj);
+ if (tmp) {
+ *v = PyInt_AS_LONG(tmp);
+ Py_DECREF(tmp);
+ return 1;
+ }
+ if (PyComplex_Check(obj))
+ tmp = PyObject_GetAttrString(obj,\"real\");
+ else if (PyString_Check(obj) || PyUnicode_Check(obj))
+ /*pass*/;
+ else if (PySequence_Check(obj))
+ tmp = PySequence_GetItem(obj,0);
+ if (tmp) {
+ PyErr_Clear();
+ if (long_from_pyobj(v,tmp,errmess)) {Py_DECREF(tmp); return 1;}
+ Py_DECREF(tmp);
+ }
+ {
+ PyObject* err = PyErr_Occurred();
+ if (err==NULL) err = #modulename#_error;
+ PyErr_SetString(err,errmess);
+ }
+ return 0;
}
"""
needs['long_long_from_pyobj'] = ['long_long']
cfuncs['long_long_from_pyobj'] = """\
static int long_long_from_pyobj(long_long* v,PyObject *obj,const char *errmess) {
-\tPyObject* tmp = NULL;
-\tif (PyLong_Check(obj)) {
-\t\t*v = PyLong_AsLongLong(obj);
-\t\treturn (!PyErr_Occurred());
-\t}
-\tif (PyInt_Check(obj)) {
-\t\t*v = (long_long)PyInt_AS_LONG(obj);
-\t\treturn 1;
-\t}
-\ttmp = PyNumber_Long(obj);
-\tif (tmp) {
-\t\t*v = PyLong_AsLongLong(tmp);
-\t\tPy_DECREF(tmp);
-\t\treturn (!PyErr_Occurred());
-\t}
-\tif (PyComplex_Check(obj))
-\t\ttmp = PyObject_GetAttrString(obj,\"real\");
-\telse if (PyString_Check(obj) || PyUnicode_Check(obj))
-\t\t/*pass*/;
-\telse if (PySequence_Check(obj))
-\t\ttmp = PySequence_GetItem(obj,0);
-\tif (tmp) {
-\t\tPyErr_Clear();
-\t\tif (long_long_from_pyobj(v,tmp,errmess)) {Py_DECREF(tmp); return 1;}
-\t\tPy_DECREF(tmp);
-\t}
-\t{
-\t\tPyObject* err = PyErr_Occurred();
-\t\tif (err==NULL) err = #modulename#_error;
-\t\tPyErr_SetString(err,errmess);
-\t}
-\treturn 0;
+ PyObject* tmp = NULL;
+ if (PyLong_Check(obj)) {
+ *v = PyLong_AsLongLong(obj);
+ return (!PyErr_Occurred());
+ }
+ if (PyInt_Check(obj)) {
+ *v = (long_long)PyInt_AS_LONG(obj);
+ return 1;
+ }
+ tmp = PyNumber_Long(obj);
+ if (tmp) {
+ *v = PyLong_AsLongLong(tmp);
+ Py_DECREF(tmp);
+ return (!PyErr_Occurred());
+ }
+ if (PyComplex_Check(obj))
+ tmp = PyObject_GetAttrString(obj,\"real\");
+ else if (PyString_Check(obj) || PyUnicode_Check(obj))
+ /*pass*/;
+ else if (PySequence_Check(obj))
+ tmp = PySequence_GetItem(obj,0);
+ if (tmp) {
+ PyErr_Clear();
+ if (long_long_from_pyobj(v,tmp,errmess)) {Py_DECREF(tmp); return 1;}
+ Py_DECREF(tmp);
+ }
+ {
+ PyObject* err = PyErr_Occurred();
+ if (err==NULL) err = #modulename#_error;
+ PyErr_SetString(err,errmess);
+ }
+ return 0;
}
"""
needs['long_double_from_pyobj'] = ['double_from_pyobj', 'long_double']
cfuncs['long_double_from_pyobj'] = """\
static int long_double_from_pyobj(long_double* v,PyObject *obj,const char *errmess) {
-\tdouble d=0;
-\tif (PyArray_CheckScalar(obj)){
-\t\tif PyArray_IsScalar(obj, LongDouble) {
-\t\t\tPyArray_ScalarAsCtype(obj, v);
-\t\t\treturn 1;
-\t\t}
-\t\telse if (PyArray_Check(obj) && PyArray_TYPE(obj)==NPY_LONGDOUBLE) {
-\t\t\t(*v) = *((npy_longdouble *)PyArray_DATA(obj));
-\t\t\treturn 1;
-\t\t}
-\t}
-\tif (double_from_pyobj(&d,obj,errmess)) {
-\t\t*v = (long_double)d;
-\t\treturn 1;
-\t}
-\treturn 0;
+ double d=0;
+ if (PyArray_CheckScalar(obj)){
+ if PyArray_IsScalar(obj, LongDouble) {
+ PyArray_ScalarAsCtype(obj, v);
+ return 1;
+ }
+ else if (PyArray_Check(obj) && PyArray_TYPE(obj)==NPY_LONGDOUBLE) {
+ (*v) = *((npy_longdouble *)PyArray_DATA(obj));
+ return 1;
+ }
+ }
+ if (double_from_pyobj(&d,obj,errmess)) {
+ *v = (long_double)d;
+ return 1;
+ }
+ return 0;
}
"""
cfuncs['double_from_pyobj'] = """\
static int double_from_pyobj(double* v,PyObject *obj,const char *errmess) {
-\tPyObject* tmp = NULL;
-\tif (PyFloat_Check(obj)) {
+ PyObject* tmp = NULL;
+ if (PyFloat_Check(obj)) {
#ifdef __sgi
-\t\t*v = PyFloat_AsDouble(obj);
+ *v = PyFloat_AsDouble(obj);
#else
-\t\t*v = PyFloat_AS_DOUBLE(obj);
+ *v = PyFloat_AS_DOUBLE(obj);
#endif
-\t\treturn 1;
-\t}
-\ttmp = PyNumber_Float(obj);
-\tif (tmp) {
+ return 1;
+ }
+ tmp = PyNumber_Float(obj);
+ if (tmp) {
#ifdef __sgi
-\t\t*v = PyFloat_AsDouble(tmp);
+ *v = PyFloat_AsDouble(tmp);
#else
-\t\t*v = PyFloat_AS_DOUBLE(tmp);
+ *v = PyFloat_AS_DOUBLE(tmp);
#endif
-\t\tPy_DECREF(tmp);
-\t\treturn 1;
-\t}
-\tif (PyComplex_Check(obj))
-\t\ttmp = PyObject_GetAttrString(obj,\"real\");
-\telse if (PyString_Check(obj) || PyUnicode_Check(obj))
-\t\t/*pass*/;
-\telse if (PySequence_Check(obj))
-\t\ttmp = PySequence_GetItem(obj,0);
-\tif (tmp) {
-\t\tPyErr_Clear();
-\t\tif (double_from_pyobj(v,tmp,errmess)) {Py_DECREF(tmp); return 1;}
-\t\tPy_DECREF(tmp);
-\t}
-\t{
-\t\tPyObject* err = PyErr_Occurred();
-\t\tif (err==NULL) err = #modulename#_error;
-\t\tPyErr_SetString(err,errmess);
-\t}
-\treturn 0;
+ Py_DECREF(tmp);
+ return 1;
+ }
+ if (PyComplex_Check(obj))
+ tmp = PyObject_GetAttrString(obj,\"real\");
+ else if (PyString_Check(obj) || PyUnicode_Check(obj))
+ /*pass*/;
+ else if (PySequence_Check(obj))
+ tmp = PySequence_GetItem(obj,0);
+ if (tmp) {
+ PyErr_Clear();
+ if (double_from_pyobj(v,tmp,errmess)) {Py_DECREF(tmp); return 1;}
+ Py_DECREF(tmp);
+ }
+ {
+ PyObject* err = PyErr_Occurred();
+ if (err==NULL) err = #modulename#_error;
+ PyErr_SetString(err,errmess);
+ }
+ return 0;
}
"""
needs['float_from_pyobj'] = ['double_from_pyobj']
cfuncs['float_from_pyobj'] = """\
static int float_from_pyobj(float* v,PyObject *obj,const char *errmess) {
-\tdouble d=0.0;
-\tif (double_from_pyobj(&d,obj,errmess)) {
-\t\t*v = (float)d;
-\t\treturn 1;
-\t}
-\treturn 0;
+ double d=0.0;
+ if (double_from_pyobj(&d,obj,errmess)) {
+ *v = (float)d;
+ return 1;
+ }
+ return 0;
}
"""
needs['complex_long_double_from_pyobj'] = ['complex_long_double', 'long_double',
'complex_double_from_pyobj']
cfuncs['complex_long_double_from_pyobj'] = """\
static int complex_long_double_from_pyobj(complex_long_double* v,PyObject *obj,const char *errmess) {
-\tcomplex_double cd={0.0,0.0};
-\tif (PyArray_CheckScalar(obj)){
-\t\tif PyArray_IsScalar(obj, CLongDouble) {
-\t\t\tPyArray_ScalarAsCtype(obj, v);
-\t\t\treturn 1;
-\t\t}
-\t\telse if (PyArray_Check(obj) && PyArray_TYPE(obj)==NPY_CLONGDOUBLE) {
-\t\t\t(*v).r = ((npy_clongdouble *)PyArray_DATA(obj))->real;
-\t\t\t(*v).i = ((npy_clongdouble *)PyArray_DATA(obj))->imag;
-\t\t\treturn 1;
-\t\t}
-\t}
-\tif (complex_double_from_pyobj(&cd,obj,errmess)) {
-\t\t(*v).r = (long_double)cd.r;
-\t\t(*v).i = (long_double)cd.i;
-\t\treturn 1;
-\t}
-\treturn 0;
+ complex_double cd={0.0,0.0};
+ if (PyArray_CheckScalar(obj)){
+ if PyArray_IsScalar(obj, CLongDouble) {
+ PyArray_ScalarAsCtype(obj, v);
+ return 1;
+ }
+ else if (PyArray_Check(obj) && PyArray_TYPE(obj)==NPY_CLONGDOUBLE) {
+ (*v).r = ((npy_clongdouble *)PyArray_DATA(obj))->real;
+ (*v).i = ((npy_clongdouble *)PyArray_DATA(obj))->imag;
+ return 1;
+ }
+ }
+ if (complex_double_from_pyobj(&cd,obj,errmess)) {
+ (*v).r = (long_double)cd.r;
+ (*v).i = (long_double)cd.i;
+ return 1;
+ }
+ return 0;
}
"""
needs['complex_double_from_pyobj'] = ['complex_double']
cfuncs['complex_double_from_pyobj'] = """\
static int complex_double_from_pyobj(complex_double* v,PyObject *obj,const char *errmess) {
-\tPy_complex c;
-\tif (PyComplex_Check(obj)) {
-\t\tc=PyComplex_AsCComplex(obj);
-\t\t(*v).r=c.real, (*v).i=c.imag;
-\t\treturn 1;
-\t}
-\tif (PyArray_IsScalar(obj, ComplexFloating)) {
-\t\tif (PyArray_IsScalar(obj, CFloat)) {
-\t\t\tnpy_cfloat new;
-\t\t\tPyArray_ScalarAsCtype(obj, &new);
-\t\t\t(*v).r = (double)new.real;
-\t\t\t(*v).i = (double)new.imag;
-\t\t}
-\t\telse if (PyArray_IsScalar(obj, CLongDouble)) {
-\t\t\tnpy_clongdouble new;
-\t\t\tPyArray_ScalarAsCtype(obj, &new);
-\t\t\t(*v).r = (double)new.real;
-\t\t\t(*v).i = (double)new.imag;
-\t\t}
-\t\telse { /* if (PyArray_IsScalar(obj, CDouble)) */
-\t\t\tPyArray_ScalarAsCtype(obj, v);
-\t\t}
-\t\treturn 1;
-\t}
-\tif (PyArray_CheckScalar(obj)) { /* 0-dim array or still array scalar */
-\t\tPyObject *arr;
-\t\tif (PyArray_Check(obj)) {
-\t\t\tarr = PyArray_Cast((PyArrayObject *)obj, NPY_CDOUBLE);
-\t\t}
-\t\telse {
-\t\t\tarr = PyArray_FromScalar(obj, PyArray_DescrFromType(NPY_CDOUBLE));
-\t\t}
-\t\tif (arr==NULL) return 0;
-\t\t(*v).r = ((npy_cdouble *)PyArray_DATA(arr))->real;
-\t\t(*v).i = ((npy_cdouble *)PyArray_DATA(arr))->imag;
-\t\treturn 1;
-\t}
-\t/* Python does not provide PyNumber_Complex function :-( */
-\t(*v).i=0.0;
-\tif (PyFloat_Check(obj)) {
+ Py_complex c;
+ if (PyComplex_Check(obj)) {
+ c=PyComplex_AsCComplex(obj);
+ (*v).r=c.real, (*v).i=c.imag;
+ return 1;
+ }
+ if (PyArray_IsScalar(obj, ComplexFloating)) {
+ if (PyArray_IsScalar(obj, CFloat)) {
+ npy_cfloat new;
+ PyArray_ScalarAsCtype(obj, &new);
+ (*v).r = (double)new.real;
+ (*v).i = (double)new.imag;
+ }
+ else if (PyArray_IsScalar(obj, CLongDouble)) {
+ npy_clongdouble new;
+ PyArray_ScalarAsCtype(obj, &new);
+ (*v).r = (double)new.real;
+ (*v).i = (double)new.imag;
+ }
+ else { /* if (PyArray_IsScalar(obj, CDouble)) */
+ PyArray_ScalarAsCtype(obj, v);
+ }
+ return 1;
+ }
+ if (PyArray_CheckScalar(obj)) { /* 0-dim array or still array scalar */
+ PyObject *arr;
+ if (PyArray_Check(obj)) {
+ arr = PyArray_Cast((PyArrayObject *)obj, NPY_CDOUBLE);
+ }
+ else {
+ arr = PyArray_FromScalar(obj, PyArray_DescrFromType(NPY_CDOUBLE));
+ }
+ if (arr==NULL) return 0;
+ (*v).r = ((npy_cdouble *)PyArray_DATA(arr))->real;
+ (*v).i = ((npy_cdouble *)PyArray_DATA(arr))->imag;
+ return 1;
+ }
+ /* Python does not provide PyNumber_Complex function :-( */
+ (*v).i=0.0;
+ if (PyFloat_Check(obj)) {
#ifdef __sgi
-\t\t(*v).r = PyFloat_AsDouble(obj);
+ (*v).r = PyFloat_AsDouble(obj);
#else
-\t\t(*v).r = PyFloat_AS_DOUBLE(obj);
+ (*v).r = PyFloat_AS_DOUBLE(obj);
#endif
-\t\treturn 1;
-\t}
-\tif (PyInt_Check(obj)) {
-\t\t(*v).r = (double)PyInt_AS_LONG(obj);
-\t\treturn 1;
-\t}
-\tif (PyLong_Check(obj)) {
-\t\t(*v).r = PyLong_AsDouble(obj);
-\t\treturn (!PyErr_Occurred());
-\t}
-\tif (PySequence_Check(obj) && !(PyString_Check(obj) || PyUnicode_Check(obj))) {
-\t\tPyObject *tmp = PySequence_GetItem(obj,0);
-\t\tif (tmp) {
-\t\t\tif (complex_double_from_pyobj(v,tmp,errmess)) {
-\t\t\t\tPy_DECREF(tmp);
-\t\t\t\treturn 1;
-\t\t\t}
-\t\t\tPy_DECREF(tmp);
-\t\t}
-\t}
-\t{
-\t\tPyObject* err = PyErr_Occurred();
-\t\tif (err==NULL)
-\t\t\terr = PyExc_TypeError;
-\t\tPyErr_SetString(err,errmess);
-\t}
-\treturn 0;
+ return 1;
+ }
+ if (PyInt_Check(obj)) {
+ (*v).r = (double)PyInt_AS_LONG(obj);
+ return 1;
+ }
+ if (PyLong_Check(obj)) {
+ (*v).r = PyLong_AsDouble(obj);
+ return (!PyErr_Occurred());
+ }
+ if (PySequence_Check(obj) && !(PyString_Check(obj) || PyUnicode_Check(obj))) {
+ PyObject *tmp = PySequence_GetItem(obj,0);
+ if (tmp) {
+ if (complex_double_from_pyobj(v,tmp,errmess)) {
+ Py_DECREF(tmp);
+ return 1;
+ }
+ Py_DECREF(tmp);
+ }
+ }
+ {
+ PyObject* err = PyErr_Occurred();
+ if (err==NULL)
+ err = PyExc_TypeError;
+ PyErr_SetString(err,errmess);
+ }
+ return 0;
}
"""
needs['complex_float_from_pyobj'] = [
'complex_float', 'complex_double_from_pyobj']
cfuncs['complex_float_from_pyobj'] = """\
static int complex_float_from_pyobj(complex_float* v,PyObject *obj,const char *errmess) {
-\tcomplex_double cd={0.0,0.0};
-\tif (complex_double_from_pyobj(&cd,obj,errmess)) {
-\t\t(*v).r = (float)cd.r;
-\t\t(*v).i = (float)cd.i;
-\t\treturn 1;
-\t}
-\treturn 0;
+ complex_double cd={0.0,0.0};
+ if (complex_double_from_pyobj(&cd,obj,errmess)) {
+ (*v).r = (float)cd.r;
+ (*v).i = (float)cd.i;
+ return 1;
+ }
+ return 0;
}
"""
needs['try_pyarr_from_char'] = ['pyobj_from_char1', 'TRYPYARRAYTEMPLATE']
cfuncs[
- 'try_pyarr_from_char'] = 'static int try_pyarr_from_char(PyObject* obj,char* v) {\n\tTRYPYARRAYTEMPLATE(char,\'c\');\n}\n'
+ 'try_pyarr_from_char'] = 'static int try_pyarr_from_char(PyObject* obj,char* v) {\n TRYPYARRAYTEMPLATE(char,\'c\');\n}\n'
needs['try_pyarr_from_signed_char'] = ['TRYPYARRAYTEMPLATE', 'unsigned_char']
cfuncs[
- 'try_pyarr_from_unsigned_char'] = 'static int try_pyarr_from_unsigned_char(PyObject* obj,unsigned_char* v) {\n\tTRYPYARRAYTEMPLATE(unsigned_char,\'b\');\n}\n'
+ 'try_pyarr_from_unsigned_char'] = 'static int try_pyarr_from_unsigned_char(PyObject* obj,unsigned_char* v) {\n TRYPYARRAYTEMPLATE(unsigned_char,\'b\');\n}\n'
needs['try_pyarr_from_signed_char'] = ['TRYPYARRAYTEMPLATE', 'signed_char']
cfuncs[
- 'try_pyarr_from_signed_char'] = 'static int try_pyarr_from_signed_char(PyObject* obj,signed_char* v) {\n\tTRYPYARRAYTEMPLATE(signed_char,\'1\');\n}\n'
+ 'try_pyarr_from_signed_char'] = 'static int try_pyarr_from_signed_char(PyObject* obj,signed_char* v) {\n TRYPYARRAYTEMPLATE(signed_char,\'1\');\n}\n'
needs['try_pyarr_from_short'] = ['pyobj_from_short1', 'TRYPYARRAYTEMPLATE']
cfuncs[
- 'try_pyarr_from_short'] = 'static int try_pyarr_from_short(PyObject* obj,short* v) {\n\tTRYPYARRAYTEMPLATE(short,\'s\');\n}\n'
+ 'try_pyarr_from_short'] = 'static int try_pyarr_from_short(PyObject* obj,short* v) {\n TRYPYARRAYTEMPLATE(short,\'s\');\n}\n'
needs['try_pyarr_from_int'] = ['pyobj_from_int1', 'TRYPYARRAYTEMPLATE']
cfuncs[
- 'try_pyarr_from_int'] = 'static int try_pyarr_from_int(PyObject* obj,int* v) {\n\tTRYPYARRAYTEMPLATE(int,\'i\');\n}\n'
+ 'try_pyarr_from_int'] = 'static int try_pyarr_from_int(PyObject* obj,int* v) {\n TRYPYARRAYTEMPLATE(int,\'i\');\n}\n'
needs['try_pyarr_from_long'] = ['pyobj_from_long1', 'TRYPYARRAYTEMPLATE']
cfuncs[
- 'try_pyarr_from_long'] = 'static int try_pyarr_from_long(PyObject* obj,long* v) {\n\tTRYPYARRAYTEMPLATE(long,\'l\');\n}\n'
+ 'try_pyarr_from_long'] = 'static int try_pyarr_from_long(PyObject* obj,long* v) {\n TRYPYARRAYTEMPLATE(long,\'l\');\n}\n'
needs['try_pyarr_from_long_long'] = [
'pyobj_from_long_long1', 'TRYPYARRAYTEMPLATE', 'long_long']
cfuncs[
- 'try_pyarr_from_long_long'] = 'static int try_pyarr_from_long_long(PyObject* obj,long_long* v) {\n\tTRYPYARRAYTEMPLATE(long_long,\'L\');\n}\n'
+ 'try_pyarr_from_long_long'] = 'static int try_pyarr_from_long_long(PyObject* obj,long_long* v) {\n TRYPYARRAYTEMPLATE(long_long,\'L\');\n}\n'
needs['try_pyarr_from_float'] = ['pyobj_from_float1', 'TRYPYARRAYTEMPLATE']
cfuncs[
- 'try_pyarr_from_float'] = 'static int try_pyarr_from_float(PyObject* obj,float* v) {\n\tTRYPYARRAYTEMPLATE(float,\'f\');\n}\n'
+ 'try_pyarr_from_float'] = 'static int try_pyarr_from_float(PyObject* obj,float* v) {\n TRYPYARRAYTEMPLATE(float,\'f\');\n}\n'
needs['try_pyarr_from_double'] = ['pyobj_from_double1', 'TRYPYARRAYTEMPLATE']
cfuncs[
- 'try_pyarr_from_double'] = 'static int try_pyarr_from_double(PyObject* obj,double* v) {\n\tTRYPYARRAYTEMPLATE(double,\'d\');\n}\n'
+ 'try_pyarr_from_double'] = 'static int try_pyarr_from_double(PyObject* obj,double* v) {\n TRYPYARRAYTEMPLATE(double,\'d\');\n}\n'
needs['try_pyarr_from_complex_float'] = [
'pyobj_from_complex_float1', 'TRYCOMPLEXPYARRAYTEMPLATE', 'complex_float']
cfuncs[
- 'try_pyarr_from_complex_float'] = 'static int try_pyarr_from_complex_float(PyObject* obj,complex_float* v) {\n\tTRYCOMPLEXPYARRAYTEMPLATE(float,\'F\');\n}\n'
+ 'try_pyarr_from_complex_float'] = 'static int try_pyarr_from_complex_float(PyObject* obj,complex_float* v) {\n TRYCOMPLEXPYARRAYTEMPLATE(float,\'F\');\n}\n'
needs['try_pyarr_from_complex_double'] = [
'pyobj_from_complex_double1', 'TRYCOMPLEXPYARRAYTEMPLATE', 'complex_double']
cfuncs[
- 'try_pyarr_from_complex_double'] = 'static int try_pyarr_from_complex_double(PyObject* obj,complex_double* v) {\n\tTRYCOMPLEXPYARRAYTEMPLATE(double,\'D\');\n}\n'
+ 'try_pyarr_from_complex_double'] = 'static int try_pyarr_from_complex_double(PyObject* obj,complex_double* v) {\n TRYCOMPLEXPYARRAYTEMPLATE(double,\'D\');\n}\n'
needs['create_cb_arglist'] = ['CFUNCSMESS', 'PRINTPYOBJERR', 'MINMAX']
cfuncs['create_cb_arglist'] = """\
static int create_cb_arglist(PyObject* fun,PyTupleObject* xa,const int maxnofargs,const int nofoptargs,int *nofargs,PyTupleObject **args,const char *errmess) {
-\tPyObject *tmp = NULL;
-\tPyObject *tmp_fun = NULL;
-\tint tot,opt,ext,siz,i,di=0;
-\tCFUNCSMESS(\"create_cb_arglist\\n\");
-\ttot=opt=ext=siz=0;
-\t/* Get the total number of arguments */
-\tif (PyFunction_Check(fun))
-\t\ttmp_fun = fun;
-\telse {
-\t\tdi = 1;
-\t\tif (PyObject_HasAttrString(fun,\"im_func\")) {
-\t\t\ttmp_fun = PyObject_GetAttrString(fun,\"im_func\");
-\t\t}
-\t\telse if (PyObject_HasAttrString(fun,\"__call__\")) {
-\t\t\ttmp = PyObject_GetAttrString(fun,\"__call__\");
-\t\t\tif (PyObject_HasAttrString(tmp,\"im_func\"))
-\t\t\t\ttmp_fun = PyObject_GetAttrString(tmp,\"im_func\");
-\t\t\telse {
-\t\t\t\ttmp_fun = fun; /* built-in function */
-\t\t\t\ttot = maxnofargs;
-\t\t\t\tif (xa != NULL)
-\t\t\t\t\ttot += PyTuple_Size((PyObject *)xa);
-\t\t\t}
-\t\t\tPy_XDECREF(tmp);
-\t\t}
-\t\telse if (PyFortran_Check(fun) || PyFortran_Check1(fun)) {
-\t\t\ttot = maxnofargs;
-\t\t\tif (xa != NULL)
-\t\t\t\ttot += PyTuple_Size((PyObject *)xa);
-\t\t\ttmp_fun = fun;
-\t\t}
-\t\telse if (F2PyCapsule_Check(fun)) {
-\t\t\ttot = maxnofargs;
-\t\t\tif (xa != NULL)
-\t\t\t\text = PyTuple_Size((PyObject *)xa);
-\t\t\tif(ext>0) {
-\t\t\t\tfprintf(stderr,\"extra arguments tuple cannot be used with CObject call-back\\n\");
-\t\t\t\tgoto capi_fail;
-\t\t\t}
-\t\t\ttmp_fun = fun;
-\t\t}
-\t}
+ PyObject *tmp = NULL;
+ PyObject *tmp_fun = NULL;
+ int tot,opt,ext,siz,i,di=0;
+ CFUNCSMESS(\"create_cb_arglist\\n\");
+ tot=opt=ext=siz=0;
+ /* Get the total number of arguments */
+ if (PyFunction_Check(fun))
+ tmp_fun = fun;
+ else {
+ di = 1;
+ if (PyObject_HasAttrString(fun,\"im_func\")) {
+ tmp_fun = PyObject_GetAttrString(fun,\"im_func\");
+ }
+ else if (PyObject_HasAttrString(fun,\"__call__\")) {
+ tmp = PyObject_GetAttrString(fun,\"__call__\");
+ if (PyObject_HasAttrString(tmp,\"im_func\"))
+ tmp_fun = PyObject_GetAttrString(tmp,\"im_func\");
+ else {
+ tmp_fun = fun; /* built-in function */
+ tot = maxnofargs;
+ if (xa != NULL)
+ tot += PyTuple_Size((PyObject *)xa);
+ }
+ Py_XDECREF(tmp);
+ }
+ else if (PyFortran_Check(fun) || PyFortran_Check1(fun)) {
+ tot = maxnofargs;
+ if (xa != NULL)
+ tot += PyTuple_Size((PyObject *)xa);
+ tmp_fun = fun;
+ }
+ else if (F2PyCapsule_Check(fun)) {
+ tot = maxnofargs;
+ if (xa != NULL)
+ ext = PyTuple_Size((PyObject *)xa);
+ if(ext>0) {
+ fprintf(stderr,\"extra arguments tuple cannot be used with CObject call-back\\n\");
+ goto capi_fail;
+ }
+ tmp_fun = fun;
+ }
+ }
if (tmp_fun==NULL) {
fprintf(stderr,\"Call-back argument must be function|instance|instance.__call__|f2py-function but got %s.\\n\",(fun==NULL?\"NULL\":Py_TYPE(fun)->tp_name));
goto capi_fail;
}
#if PY_VERSION_HEX >= 0x03000000
-\tif (PyObject_HasAttrString(tmp_fun,\"__code__\")) {
-\t\tif (PyObject_HasAttrString(tmp = PyObject_GetAttrString(tmp_fun,\"__code__\"),\"co_argcount\"))
+ if (PyObject_HasAttrString(tmp_fun,\"__code__\")) {
+ if (PyObject_HasAttrString(tmp = PyObject_GetAttrString(tmp_fun,\"__code__\"),\"co_argcount\"))
#else
-\tif (PyObject_HasAttrString(tmp_fun,\"func_code\")) {
-\t\tif (PyObject_HasAttrString(tmp = PyObject_GetAttrString(tmp_fun,\"func_code\"),\"co_argcount\"))
+ if (PyObject_HasAttrString(tmp_fun,\"func_code\")) {
+ if (PyObject_HasAttrString(tmp = PyObject_GetAttrString(tmp_fun,\"func_code\"),\"co_argcount\"))
#endif
-\t\t\ttot = PyInt_AsLong(PyObject_GetAttrString(tmp,\"co_argcount\")) - di;
-\t\tPy_XDECREF(tmp);
-\t}
-\t/* Get the number of optional arguments */
+ tot = PyInt_AsLong(PyObject_GetAttrString(tmp,\"co_argcount\")) - di;
+ Py_XDECREF(tmp);
+ }
+ /* Get the number of optional arguments */
#if PY_VERSION_HEX >= 0x03000000
-\tif (PyObject_HasAttrString(tmp_fun,\"__defaults__\")) {
-\t\tif (PyTuple_Check(tmp = PyObject_GetAttrString(tmp_fun,\"__defaults__\")))
+ if (PyObject_HasAttrString(tmp_fun,\"__defaults__\")) {
+ if (PyTuple_Check(tmp = PyObject_GetAttrString(tmp_fun,\"__defaults__\")))
#else
-\tif (PyObject_HasAttrString(tmp_fun,\"func_defaults\")) {
-\t\tif (PyTuple_Check(tmp = PyObject_GetAttrString(tmp_fun,\"func_defaults\")))
+ if (PyObject_HasAttrString(tmp_fun,\"func_defaults\")) {
+ if (PyTuple_Check(tmp = PyObject_GetAttrString(tmp_fun,\"func_defaults\")))
#endif
-\t\t\topt = PyTuple_Size(tmp);
-\t\tPy_XDECREF(tmp);
-\t}
-\t/* Get the number of extra arguments */
-\tif (xa != NULL)
-\t\text = PyTuple_Size((PyObject *)xa);
-\t/* Calculate the size of call-backs argument list */
-\tsiz = MIN(maxnofargs+ext,tot);
-\t*nofargs = MAX(0,siz-ext);
+ opt = PyTuple_Size(tmp);
+ Py_XDECREF(tmp);
+ }
+ /* Get the number of extra arguments */
+ if (xa != NULL)
+ ext = PyTuple_Size((PyObject *)xa);
+ /* Calculate the size of call-backs argument list */
+ siz = MIN(maxnofargs+ext,tot);
+ *nofargs = MAX(0,siz-ext);
#ifdef DEBUGCFUNCS
-\tfprintf(stderr,\"debug-capi:create_cb_arglist:maxnofargs(-nofoptargs),tot,opt,ext,siz,nofargs=%d(-%d),%d,%d,%d,%d,%d\\n\",maxnofargs,nofoptargs,tot,opt,ext,siz,*nofargs);
+ fprintf(stderr,\"debug-capi:create_cb_arglist:maxnofargs(-nofoptargs),tot,opt,ext,siz,nofargs=%d(-%d),%d,%d,%d,%d,%d\\n\",maxnofargs,nofoptargs,tot,opt,ext,siz,*nofargs);
#endif
-\tif (siz<tot-opt) {
-\t\tfprintf(stderr,\"create_cb_arglist: Failed to build argument list (siz) with enough arguments (tot-opt) required by user-supplied function (siz,tot,opt=%d,%d,%d).\\n\",siz,tot,opt);
-\t\tgoto capi_fail;
-\t}
-\t/* Initialize argument list */
-\t*args = (PyTupleObject *)PyTuple_New(siz);
-\tfor (i=0;i<*nofargs;i++) {
-\t\tPy_INCREF(Py_None);
-\t\tPyTuple_SET_ITEM((PyObject *)(*args),i,Py_None);
-\t}
-\tif (xa != NULL)
-\t\tfor (i=(*nofargs);i<siz;i++) {
-\t\t\ttmp = PyTuple_GetItem((PyObject *)xa,i-(*nofargs));
-\t\t\tPy_INCREF(tmp);
-\t\t\tPyTuple_SET_ITEM(*args,i,tmp);
-\t\t}
-\tCFUNCSMESS(\"create_cb_arglist-end\\n\");
-\treturn 1;
+ if (siz<tot-opt) {
+ fprintf(stderr,\"create_cb_arglist: Failed to build argument list (siz) with enough arguments (tot-opt) required by user-supplied function (siz,tot,opt=%d,%d,%d).\\n\",siz,tot,opt);
+ goto capi_fail;
+ }
+ /* Initialize argument list */
+ *args = (PyTupleObject *)PyTuple_New(siz);
+ for (i=0;i<*nofargs;i++) {
+ Py_INCREF(Py_None);
+ PyTuple_SET_ITEM((PyObject *)(*args),i,Py_None);
+ }
+ if (xa != NULL)
+ for (i=(*nofargs);i<siz;i++) {
+ tmp = PyTuple_GetItem((PyObject *)xa,i-(*nofargs));
+ Py_INCREF(tmp);
+ PyTuple_SET_ITEM(*args,i,tmp);
+ }
+ CFUNCSMESS(\"create_cb_arglist-end\\n\");
+ return 1;
capi_fail:
-\tif ((PyErr_Occurred())==NULL)
-\t\tPyErr_SetString(#modulename#_error,errmess);
-\treturn 0;
+ if ((PyErr_Occurred())==NULL)
+ PyErr_SetString(#modulename#_error,errmess);
+ return 0;
}
"""
diff --git a/numpy/f2py/crackfortran.py b/numpy/f2py/crackfortran.py
index 24f9434c4..677f4bae3 100755
--- a/numpy/f2py/crackfortran.py
+++ b/numpy/f2py/crackfortran.py
@@ -308,22 +308,21 @@ def is_free_format(file):
# f90 allows both fixed and free format, assuming fixed unless
# signs of free format are detected.
result = 0
- f = open(file, 'r')
- line = f.readline()
- n = 15 # the number of non-comment lines to scan for hints
- if _has_f_header(line):
- n = 0
- elif _has_f90_header(line):
- n = 0
- result = 1
- while n > 0 and line:
- if line[0] != '!' and line.strip():
- n -= 1
- if (line[0] != '\t' and _free_f90_start(line[:5])) or line[-2:-1] == '&':
- result = 1
- break
+ with open(file, 'r') as f:
line = f.readline()
- f.close()
+ n = 15 # the number of non-comment lines to scan for hints
+ if _has_f_header(line):
+ n = 0
+ elif _has_f90_header(line):
+ n = 0
+ result = 1
+ while n > 0 and line:
+ if line[0] != '!' and line.strip():
+ n -= 1
+ if (line[0] != '\t' and _free_f90_start(line[:5])) or line[-2:-1] == '&':
+ result = 1
+ break
+ line = f.readline()
return result
@@ -1036,13 +1035,13 @@ def analyzeline(m, case, line):
try:
del groupcache[groupcounter]['vars'][name][
groupcache[groupcounter]['vars'][name]['attrspec'].index('external')]
- except:
+ except Exception:
pass
if block in ['function', 'subroutine']: # set global attributes
try:
groupcache[groupcounter]['vars'][name] = appenddecl(
groupcache[groupcounter]['vars'][name], groupcache[groupcounter - 2]['vars'][''])
- except:
+ except Exception:
pass
if case == 'callfun': # return type
if result and result in groupcache[groupcounter]['vars']:
@@ -1052,7 +1051,7 @@ def analyzeline(m, case, line):
# if groupcounter>1: # name is interfaced
try:
groupcache[groupcounter - 2]['interfaced'].append(name)
- except:
+ except Exception:
pass
if block == 'function':
t = typespattern[0].match(m.group('before') + ' ' + name)
@@ -1174,7 +1173,7 @@ def analyzeline(m, case, line):
for e in markoutercomma(ll).split('@,@'):
try:
k, initexpr = [x.strip() for x in e.split('=')]
- except:
+ except Exception:
outmess(
'analyzeline: could not extract name,expr in parameter statement "%s" of "%s"\n' % (e, ll))
continue
@@ -1251,7 +1250,7 @@ def analyzeline(m, case, line):
if '-' in r:
try:
begc, endc = [x.strip() for x in r.split('-')]
- except:
+ except Exception:
outmess(
'analyzeline: expected "<char>-<char>" instead of "%s" in range list of implicit statement\n' % r)
continue
@@ -1790,7 +1789,7 @@ def setmesstext(block):
try:
filepositiontext = 'In: %s:%s\n' % (block['from'], block['name'])
- except:
+ except Exception:
pass
@@ -2013,7 +2012,7 @@ def analyzecommon(block):
if m.group('dims'):
dims = [x.strip()
for x in markoutercomma(m.group('dims')).split('@,@')]
- n = m.group('name').strip()
+ n = rmbadname1(m.group('name').strip())
if n in block['vars']:
if 'attrspec' in block['vars'][n]:
block['vars'][n]['attrspec'].append(
@@ -2108,7 +2107,7 @@ def getlincoef(e, xset): # e = a*x+b ; x in xset
try:
c = int(myeval(e, {}, {}))
return 0, c, None
- except:
+ except Exception:
pass
if getlincoef_re_1.match(e):
return 1, 0, e
@@ -2150,7 +2149,7 @@ def getlincoef(e, xset): # e = a*x+b ; x in xset
c2 = myeval(ee, {}, {})
if (a * 0.5 + b == c and a * 1.5 + b == c2):
return a, b, x
- except:
+ except Exception:
pass
break
return None, None, None
@@ -2162,11 +2161,11 @@ def getarrlen(dl, args, star='*'):
edl = []
try:
edl.append(myeval(dl[0], {}, {}))
- except:
+ except Exception:
edl.append(dl[0])
try:
edl.append(myeval(dl[1], {}, {}))
- except:
+ except Exception:
edl.append(dl[1])
if isinstance(edl[0], int):
p1 = 1 - edl[0]
@@ -2186,7 +2185,7 @@ def getarrlen(dl, args, star='*'):
d = '%s-(%s)+1' % (dl[1], dl[0])
try:
return repr(myeval(d, {}, {})), None, None
- except:
+ except Exception:
pass
d1, d2 = getlincoef(dl[0], args), getlincoef(dl[1], args)
if None not in [d1[0], d2[0]]:
@@ -2579,7 +2578,7 @@ def analyzevars(block):
l = vars[n]['charselector']['len']
try:
l = str(eval(l, {}, params))
- except:
+ except Exception:
pass
vars[n]['charselector']['len'] = l
@@ -2588,7 +2587,7 @@ def analyzevars(block):
l = vars[n]['kindselector']['kind']
try:
l = str(eval(l, {}, params))
- except:
+ except Exception:
pass
vars[n]['kindselector']['kind'] = l
@@ -2819,7 +2818,7 @@ def analyzevars(block):
try:
kindselect['kind'] = eval(
kindselect['kind'], {}, params)
- except:
+ except Exception:
pass
vars[n]['kindselector'] = kindselect
if charselect:
@@ -3230,7 +3229,7 @@ def vars2fortran(block, vars, args, tab='', as_interface=False):
try:
v = eval(v)
v = '(%s,%s)' % (v.real, v.imag)
- except:
+ except Exception:
pass
vardef = '%s :: %s=%s' % (vardef, a, v)
else:
@@ -3335,8 +3334,7 @@ if __name__ == "__main__":
if pyffilename:
outmess('Writing fortran code to file %s\n' % repr(pyffilename), 0)
pyf = crack2fortran(postlist)
- f = open(pyffilename, 'w')
- f.write(pyf)
- f.close()
+ with open(pyffilename, 'w') as f:
+ f.write(pyf)
if showblocklist:
show(postlist)
diff --git a/numpy/f2py/f2py_testing.py b/numpy/f2py/f2py_testing.py
index c7041fe25..f5d5fa63d 100644
--- a/numpy/f2py/f2py_testing.py
+++ b/numpy/f2py/f2py_testing.py
@@ -3,7 +3,7 @@ from __future__ import division, absolute_import, print_function
import sys
import re
-from numpy.testing.utils import jiffies, memusage
+from numpy.testing import jiffies, memusage
def cmdline():
diff --git a/numpy/f2py/src/fortranobject.c b/numpy/f2py/src/fortranobject.c
index 8c8b4ae5d..96b08ea18 100644
--- a/numpy/f2py/src/fortranobject.c
+++ b/numpy/f2py/src/fortranobject.c
@@ -130,8 +130,7 @@ format_def(char *buf, Py_ssize_t size, FortranDataDef def)
return -1;
}
- p[size] = ')';
- p++;
+ *p++ = ')';
size--;
if (def.data == NULL) {
@@ -591,21 +590,21 @@ static void f2py_report_on_array_copy_fromany(void) {
* $Id: fortranobject.c,v 1.52 2005/07/11 07:44:20 pearu Exp $
*/
+static int check_and_fix_dimensions(const PyArrayObject* arr,
+ const int rank,
+ npy_intp *dims);
+
static int
-count_nonpos(const int rank,
- const npy_intp *dims) {
+count_negative_dimensions(const int rank,
+ const npy_intp *dims) {
int i=0,r=0;
while (i<rank) {
- if (dims[i] <= 0) ++r;
+ if (dims[i] < 0) ++r;
++i;
}
return r;
}
-static int check_and_fix_dimensions(const PyArrayObject* arr,
- const int rank,
- npy_intp *dims);
-
#ifdef DEBUG_COPY_ND_ARRAY
void dump_dims(int rank, npy_intp* dims) {
int i;
@@ -679,7 +678,7 @@ PyArrayObject* array_from_pyobj(const int type_num,
|| ((intent & F2PY_OPTIONAL) && (obj==Py_None))
) {
/* intent(cache), optional, intent(hide) */
- if (count_nonpos(rank,dims)) {
+ if (count_negative_dimensions(rank,dims) > 0) {
int i;
strcpy(mess, "failed to create intent(cache|hide)|optional array"
"-- must have defined dimensions but got (");
@@ -720,8 +719,8 @@ PyArrayObject* array_from_pyobj(const int type_num,
/* intent(cache) */
if (PyArray_ISONESEGMENT(arr)
&& PyArray_ITEMSIZE(arr)>=elsize) {
- if (check_and_fix_dimensions(arr,rank,dims)) {
- return NULL; /*XXX: set exception */
+ if (check_and_fix_dimensions(arr, rank, dims)) {
+ return NULL;
}
if (intent & F2PY_INTENT_OUT)
Py_INCREF(arr);
@@ -742,8 +741,8 @@ PyArrayObject* array_from_pyobj(const int type_num,
/* here we have always intent(in) or intent(inout) or intent(inplace) */
- if (check_and_fix_dimensions(arr,rank,dims)) {
- return NULL; /*XXX: set exception */
+ if (check_and_fix_dimensions(arr, rank, dims)) {
+ return NULL;
}
/*
printf("intent alignement=%d\n", F2PY_GET_ALIGNMENT(intent));
@@ -843,8 +842,9 @@ PyArrayObject* array_from_pyobj(const int type_num,
| NPY_ARRAY_FORCECAST, NULL);
if (arr==NULL)
return NULL;
- if (check_and_fix_dimensions(arr,rank,dims))
- return NULL; /*XXX: set exception */
+ if (check_and_fix_dimensions(arr, rank, dims)) {
+ return NULL;
+ }
return arr;
}
@@ -855,11 +855,16 @@ PyArrayObject* array_from_pyobj(const int type_num,
/*****************************************/
static
-int check_and_fix_dimensions(const PyArrayObject* arr,const int rank,npy_intp *dims) {
+int check_and_fix_dimensions(const PyArrayObject* arr, const int rank, npy_intp *dims)
+{
/*
- This function fills in blanks (that are -1\'s) in dims list using
+ This function fills in blanks (that are -1's) in dims list using
the dimensions from arr. It also checks that non-blank dims will
match with the corresponding values in arr dimensions.
+
+ Returns 0 if the function is successful.
+
+ If an error condition is detected, an exception is set and 1 is returned.
*/
const npy_intp arr_size = (PyArray_NDIM(arr))?PyArray_Size((PyObject *)arr):1;
#ifdef DEBUG_COPY_ND_ARRAY
@@ -877,9 +882,10 @@ int check_and_fix_dimensions(const PyArrayObject* arr,const int rank,npy_intp *d
d = PyArray_DIM(arr,i);
if (dims[i] >= 0) {
if (d>1 && dims[i]!=d) {
- fprintf(stderr,"%d-th dimension must be fixed to %" NPY_INTP_FMT
- " but got %" NPY_INTP_FMT "\n",
- i,dims[i], d);
+ PyErr_Format(PyExc_ValueError,
+ "%d-th dimension must be fixed to %"
+ NPY_INTP_FMT " but got %" NPY_INTP_FMT "\n",
+ i, dims[i], d);
return 1;
}
if (!dims[i]) dims[i] = 1;
@@ -890,9 +896,10 @@ int check_and_fix_dimensions(const PyArrayObject* arr,const int rank,npy_intp *d
}
for(i=PyArray_NDIM(arr);i<rank;++i)
if (dims[i]>1) {
- fprintf(stderr,"%d-th dimension must be %" NPY_INTP_FMT
- " but got 0 (not defined).\n",
- i,dims[i]);
+ PyErr_Format(PyExc_ValueError,
+ "%d-th dimension must be %" NPY_INTP_FMT
+ " but got 0 (not defined).\n",
+ i, dims[i]);
return 1;
} else if (free_axe<0)
free_axe = i;
@@ -903,9 +910,11 @@ int check_and_fix_dimensions(const PyArrayObject* arr,const int rank,npy_intp *d
new_size *= dims[free_axe];
}
if (new_size != arr_size) {
- fprintf(stderr,"unexpected array size: new_size=%" NPY_INTP_FMT
- ", got array with arr_size=%" NPY_INTP_FMT " (maybe too many free"
- " indices)\n", new_size,arr_size);
+ PyErr_Format(PyExc_ValueError,
+ "unexpected array size: new_size=%" NPY_INTP_FMT
+ ", got array with arr_size=%" NPY_INTP_FMT
+ " (maybe too many free indices)\n",
+ new_size, arr_size);
return 1;
}
} else if (rank==PyArray_NDIM(arr)) {
@@ -916,9 +925,10 @@ int check_and_fix_dimensions(const PyArrayObject* arr,const int rank,npy_intp *d
d = PyArray_DIM(arr,i);
if (dims[i]>=0) {
if (d > 1 && d!=dims[i]) {
- fprintf(stderr,"%d-th dimension must be fixed to %" NPY_INTP_FMT
- " but got %" NPY_INTP_FMT "\n",
- i,dims[i],d);
+ PyErr_Format(PyExc_ValueError,
+ "%d-th dimension must be fixed to %"
+ NPY_INTP_FMT " but got %" NPY_INTP_FMT "\n",
+ i, dims[i], d);
return 1;
}
if (!dims[i]) dims[i] = 1;
@@ -926,8 +936,10 @@ int check_and_fix_dimensions(const PyArrayObject* arr,const int rank,npy_intp *d
new_size *= dims[i];
}
if (new_size != arr_size) {
- fprintf(stderr,"unexpected array size: new_size=%" NPY_INTP_FMT
- ", got array with arr_size=%" NPY_INTP_FMT "\n", new_size,arr_size);
+ PyErr_Format(PyExc_ValueError,
+ "unexpected array size: new_size=%" NPY_INTP_FMT
+ ", got array with arr_size=%" NPY_INTP_FMT "\n",
+ new_size, arr_size);
return 1;
}
} else { /* [[1,2]] -> [[1],[2]] */
@@ -939,8 +951,10 @@ int check_and_fix_dimensions(const PyArrayObject* arr,const int rank,npy_intp *d
if (PyArray_DIM(arr,i)>1) ++effrank;
if (dims[rank-1]>=0)
if (effrank>rank) {
- fprintf(stderr,"too many axes: %d (effrank=%d), expected rank=%d\n",
- PyArray_NDIM(arr),effrank,rank);
+ PyErr_Format(PyExc_ValueError,
+ "too many axes: %d (effrank=%d), "
+ "expected rank=%d\n",
+ PyArray_NDIM(arr), effrank, rank);
return 1;
}
@@ -950,9 +964,11 @@ int check_and_fix_dimensions(const PyArrayObject* arr,const int rank,npy_intp *d
else d = PyArray_DIM(arr,j++);
if (dims[i]>=0) {
if (d>1 && d!=dims[i]) {
- fprintf(stderr,"%d-th dimension must be fixed to %" NPY_INTP_FMT
- " but got %" NPY_INTP_FMT " (real index=%d)\n",
- i,dims[i],d,j-1);
+ PyErr_Format(PyExc_ValueError,
+ "%d-th dimension must be fixed to %"
+ NPY_INTP_FMT " but got %" NPY_INTP_FMT
+ " (real index=%d)\n",
+ i, dims[i], d, j-1);
return 1;
}
if (!dims[i]) dims[i] = 1;
@@ -968,13 +984,28 @@ int check_and_fix_dimensions(const PyArrayObject* arr,const int rank,npy_intp *d
}
for (i=0,size=1;i<rank;++i) size *= dims[i];
if (size != arr_size) {
- fprintf(stderr,"unexpected array size: size=%" NPY_INTP_FMT ", arr_size=%" NPY_INTP_FMT
- ", rank=%d, effrank=%d, arr.nd=%d, dims=[",
- size,arr_size,rank,effrank,PyArray_NDIM(arr));
- for (i=0;i<rank;++i) fprintf(stderr," %" NPY_INTP_FMT,dims[i]);
- fprintf(stderr," ], arr.dims=[");
- for (i=0;i<PyArray_NDIM(arr);++i) fprintf(stderr," %" NPY_INTP_FMT,PyArray_DIM(arr,i));
- fprintf(stderr," ]\n");
+ char msg[200];
+ int len;
+ snprintf(msg, sizeof(msg),
+ "unexpected array size: size=%" NPY_INTP_FMT
+ ", arr_size=%" NPY_INTP_FMT
+ ", rank=%d, effrank=%d, arr.nd=%d, dims=[",
+ size, arr_size, rank, effrank, PyArray_NDIM(arr));
+ for (i = 0; i < rank; ++i) {
+ len = strlen(msg);
+ snprintf(msg + len, sizeof(msg) - len,
+ " %" NPY_INTP_FMT, dims[i]);
+ }
+ len = strlen(msg);
+ snprintf(msg + len, sizeof(msg) - len, " ], arr.dims=[");
+ for (i = 0; i < PyArray_NDIM(arr); ++i) {
+ len = strlen(msg);
+ snprintf(msg + len, sizeof(msg) - len,
+ " %" NPY_INTP_FMT, PyArray_DIM(arr, i));
+ }
+ len = strlen(msg);
+ snprintf(msg + len, sizeof(msg) - len, " ]\n");
+ PyErr_SetString(PyExc_ValueError, msg);
return 1;
}
}
diff --git a/numpy/f2py/tests/__init__.py b/numpy/f2py/tests/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/numpy/f2py/tests/__init__.py
diff --git a/numpy/f2py/tests/src/common/block.f b/numpy/f2py/tests/src/common/block.f
new file mode 100644
index 000000000..7ea7968fe
--- /dev/null
+++ b/numpy/f2py/tests/src/common/block.f
@@ -0,0 +1,11 @@
+ SUBROUTINE INITCB
+ DOUBLE PRECISION LONG
+ CHARACTER STRING
+ INTEGER OK
+
+ COMMON /BLOCK/ LONG, STRING, OK
+ LONG = 1.0
+ STRING = '2'
+ OK = 3
+ RETURN
+ END
diff --git a/numpy/f2py/tests/test_array_from_pyobj.py b/numpy/f2py/tests/test_array_from_pyobj.py
index 48bb7c0f4..663fead6a 100644
--- a/numpy/f2py/tests/test_array_from_pyobj.py
+++ b/numpy/f2py/tests/test_array_from_pyobj.py
@@ -12,12 +12,12 @@ from numpy.testing import (
run_module_suite, assert_, assert_equal, SkipTest
)
from numpy.core.multiarray import typeinfo
-import util
+from . import util
wrap = None
-def setup():
+def setup_module():
"""
Build the required testing extension module
@@ -294,7 +294,7 @@ class Array(object):
return obj_attr[0] == self.arr_attr[0]
-class test_intent(unittest.TestCase):
+class TestIntent(object):
def test_in_out(self):
assert_equal(str(intent.in_.out), 'intent(in,out)')
@@ -305,7 +305,7 @@ class test_intent(unittest.TestCase):
assert_(not intent.in_.is_intent('c'))
-class _test_shared_memory:
+class _test_shared_memory(object):
num2seq = [1, 2]
num23seq = [[1, 2, 3], [4, 5, 6]]
@@ -578,14 +578,12 @@ class _test_shared_memory:
for t in _type_names:
exec('''\
-class test_%s_gen(unittest.TestCase,
- _test_shared_memory
- ):
- def setUp(self):
+class TestGen_%s(_test_shared_memory):
+ def setup(self):
self.type = Type(%r)
array = lambda self,dims,intent,obj: Array(Type(%r),dims,intent,obj)
''' % (t, t, t))
if __name__ == "__main__":
- setup()
+ setup_module()
run_module_suite()
diff --git a/numpy/f2py/tests/test_assumed_shape.py b/numpy/f2py/tests/test_assumed_shape.py
index 725e7f0c1..371aab755 100644
--- a/numpy/f2py/tests/test_assumed_shape.py
+++ b/numpy/f2py/tests/test_assumed_shape.py
@@ -3,7 +3,7 @@ from __future__ import division, absolute_import, print_function
import os
from numpy.testing import run_module_suite, assert_, dec
-import util
+from . import util
def _path(*a):
diff --git a/numpy/f2py/tests/test_block_docstring.py b/numpy/f2py/tests/test_block_docstring.py
new file mode 100644
index 000000000..c3f9dc856
--- /dev/null
+++ b/numpy/f2py/tests/test_block_docstring.py
@@ -0,0 +1,23 @@
+from __future__ import division, absolute_import, print_function
+
+import textwrap
+from . import util
+
+from numpy.testing import run_module_suite, assert_equal
+
+class TestBlockDocString(util.F2PyTest):
+ code = """
+ SUBROUTINE FOO()
+ INTEGER BAR(2, 3)
+
+ COMMON /BLOCK/ BAR
+ RETURN
+ END
+ """
+
+ def test_block_docstring(self):
+ expected = "'i'-array(2,3)\n"
+ assert_equal(self.module.block.__doc__, expected)
+
+if __name__ == "__main__":
+ run_module_suite()
diff --git a/numpy/f2py/tests/test_callback.py b/numpy/f2py/tests/test_callback.py
index 6824a2042..ea29043ed 100644
--- a/numpy/f2py/tests/test_callback.py
+++ b/numpy/f2py/tests/test_callback.py
@@ -5,7 +5,7 @@ import textwrap
from numpy import array
from numpy.testing import run_module_suite, assert_, assert_equal, dec
-import util
+from . import util
class TestF77Callback(util.F2PyTest):
diff --git a/numpy/f2py/tests/test_common.py b/numpy/f2py/tests/test_common.py
new file mode 100644
index 000000000..aaa35b678
--- /dev/null
+++ b/numpy/f2py/tests/test_common.py
@@ -0,0 +1,26 @@
+from __future__ import division, absolute_import, print_function
+
+import os
+
+from numpy.testing import run_module_suite, assert_array_equal, dec
+import numpy as np
+from . import util
+
+
+def _path(*a):
+ return os.path.join(*((os.path.dirname(__file__),) + a))
+
+class TestCommonBlock(util.F2PyTest):
+ sources = [_path('src', 'common', 'block.f')]
+
+ def test_common_block(self):
+ self.module.initcb()
+ assert_array_equal(self.module.block.long_bn,
+ np.array(1.0, dtype=np.float64))
+ assert_array_equal(self.module.block.string_bn,
+ np.array('2', dtype='|S1'))
+ assert_array_equal(self.module.block.ok,
+ np.array(3, dtype=np.int32))
+
+if __name__ == "__main__":
+ run_module_suite()
diff --git a/numpy/f2py/tests/test_kind.py b/numpy/f2py/tests/test_kind.py
index 2552234a1..7cfe2e977 100644
--- a/numpy/f2py/tests/test_kind.py
+++ b/numpy/f2py/tests/test_kind.py
@@ -7,7 +7,7 @@ from numpy.f2py.crackfortran import (
_selected_int_kind_func as selected_int_kind,
_selected_real_kind_func as selected_real_kind
)
-import util
+from . import util
def _path(*a):
diff --git a/numpy/f2py/tests/test_mixed.py b/numpy/f2py/tests/test_mixed.py
index 9055083bf..c145a4b23 100644
--- a/numpy/f2py/tests/test_mixed.py
+++ b/numpy/f2py/tests/test_mixed.py
@@ -4,7 +4,7 @@ import os
import textwrap
from numpy.testing import run_module_suite, assert_, assert_equal, dec
-import util
+from . import util
def _path(*a):
diff --git a/numpy/f2py/tests/test_parameter.py b/numpy/f2py/tests/test_parameter.py
index b6891756d..285b693a1 100644
--- a/numpy/f2py/tests/test_parameter.py
+++ b/numpy/f2py/tests/test_parameter.py
@@ -6,7 +6,7 @@ import math
import numpy as np
from numpy.testing import run_module_suite, dec, assert_raises, assert_equal
-import util
+from . import util
def _path(*a):
diff --git a/numpy/f2py/tests/test_regression.py b/numpy/f2py/tests/test_regression.py
index 43a8de350..c34a5781c 100644
--- a/numpy/f2py/tests/test_regression.py
+++ b/numpy/f2py/tests/test_regression.py
@@ -6,7 +6,7 @@ import math
import numpy as np
from numpy.testing import run_module_suite, dec, assert_raises, assert_equal
-import util
+from . import util
def _path(*a):
diff --git a/numpy/f2py/tests/test_return_character.py b/numpy/f2py/tests/test_return_character.py
index 7704e7d28..217b2c9dd 100644
--- a/numpy/f2py/tests/test_return_character.py
+++ b/numpy/f2py/tests/test_return_character.py
@@ -2,7 +2,7 @@ from __future__ import division, absolute_import, print_function
from numpy import array
from numpy.testing import run_module_suite, assert_, dec
-import util
+from . import util
class TestReturnCharacter(util.F2PyTest):
diff --git a/numpy/f2py/tests/test_return_complex.py b/numpy/f2py/tests/test_return_complex.py
index 88ef83e94..73ced8ed8 100644
--- a/numpy/f2py/tests/test_return_complex.py
+++ b/numpy/f2py/tests/test_return_complex.py
@@ -3,7 +3,7 @@ from __future__ import division, absolute_import, print_function
from numpy import array
from numpy.compat import long
from numpy.testing import run_module_suite, assert_, assert_raises, dec
-import util
+from . import util
class TestReturnComplex(util.F2PyTest):
diff --git a/numpy/f2py/tests/test_return_integer.py b/numpy/f2py/tests/test_return_integer.py
index 00033d698..df8fc7c97 100644
--- a/numpy/f2py/tests/test_return_integer.py
+++ b/numpy/f2py/tests/test_return_integer.py
@@ -3,7 +3,7 @@ from __future__ import division, absolute_import, print_function
from numpy import array
from numpy.compat import long
from numpy.testing import run_module_suite, assert_, assert_raises, dec
-import util
+from . import util
class TestReturnInteger(util.F2PyTest):
diff --git a/numpy/f2py/tests/test_return_logical.py b/numpy/f2py/tests/test_return_logical.py
index f88a25d7a..221dc3cbd 100644
--- a/numpy/f2py/tests/test_return_logical.py
+++ b/numpy/f2py/tests/test_return_logical.py
@@ -3,7 +3,7 @@ from __future__ import division, absolute_import, print_function
from numpy import array
from numpy.compat import long
from numpy.testing import run_module_suite, assert_, assert_raises, dec
-import util
+from . import util
class TestReturnLogical(util.F2PyTest):
diff --git a/numpy/f2py/tests/test_return_real.py b/numpy/f2py/tests/test_return_real.py
index 57aa9badf..a81549083 100644
--- a/numpy/f2py/tests/test_return_real.py
+++ b/numpy/f2py/tests/test_return_real.py
@@ -3,7 +3,7 @@ from __future__ import division, absolute_import, print_function
from numpy import array
from numpy.compat import long
from numpy.testing import run_module_suite, assert_, assert_raises, dec
-import util
+from . import util
class TestReturnReal(util.F2PyTest):
diff --git a/numpy/f2py/tests/test_size.py b/numpy/f2py/tests/test_size.py
index aeb70486a..1fcad05a5 100644
--- a/numpy/f2py/tests/test_size.py
+++ b/numpy/f2py/tests/test_size.py
@@ -3,7 +3,7 @@ from __future__ import division, absolute_import, print_function
import os
from numpy.testing import run_module_suite, assert_equal, dec
-import util
+from . import util
def _path(*a):
@@ -15,6 +15,9 @@ class TestSizeSumExample(util.F2PyTest):
@dec.slow
def test_all(self):
+ r = self.module.foo([[]])
+ assert_equal(r, [0], repr(r))
+
r = self.module.foo([[1, 2]])
assert_equal(r, [3], repr(r))
@@ -26,6 +29,9 @@ class TestSizeSumExample(util.F2PyTest):
@dec.slow
def test_transpose(self):
+ r = self.module.trans([[]])
+ assert_equal(r.T, [[]], repr(r))
+
r = self.module.trans([[1, 2]])
assert_equal(r, [[1], [2]], repr(r))
@@ -34,6 +40,9 @@ class TestSizeSumExample(util.F2PyTest):
@dec.slow
def test_flatten(self):
+ r = self.module.flatten([[]])
+ assert_equal(r, [], repr(r))
+
r = self.module.flatten([[1, 2]])
assert_equal(r, [1, 2], repr(r))
diff --git a/numpy/f2py/tests/test_string.py b/numpy/f2py/tests/test_string.py
index 10022ebb1..065861c0b 100644
--- a/numpy/f2py/tests/test_string.py
+++ b/numpy/f2py/tests/test_string.py
@@ -4,7 +4,7 @@ import os
from numpy.testing import run_module_suite, assert_array_equal, dec
import numpy as np
-import util
+from . import util
def _path(*a):
diff --git a/numpy/f2py/tests/util.py b/numpy/f2py/tests/util.py
index fe608d898..55716a2eb 100644
--- a/numpy/f2py/tests/util.py
+++ b/numpy/f2py/tests/util.py
@@ -319,7 +319,7 @@ class F2PyTest(object):
module = None
module_name = None
- def setUp(self):
+ def setup(self):
if self.module is not None:
return
diff --git a/numpy/fft/__init__.py b/numpy/fft/__init__.py
index a1f9e90e0..72d61a728 100644
--- a/numpy/fft/__init__.py
+++ b/numpy/fft/__init__.py
@@ -6,6 +6,6 @@ from .info import __doc__
from .fftpack import *
from .helper import *
-from numpy.testing.nosetester import _numpy_tester
+from numpy.testing import _numpy_tester
test = _numpy_tester().test
bench = _numpy_tester().bench
diff --git a/numpy/fft/tests/__init__.py b/numpy/fft/tests/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/numpy/fft/tests/__init__.py
diff --git a/numpy/fft/tests/test_fftpack.py b/numpy/fft/tests/test_fftpack.py
index a2cbc0f63..7ac0488e4 100644
--- a/numpy/fft/tests/test_fftpack.py
+++ b/numpy/fft/tests/test_fftpack.py
@@ -2,8 +2,10 @@ from __future__ import division, absolute_import, print_function
import numpy as np
from numpy.random import random
-from numpy.testing import TestCase, run_module_suite, assert_array_almost_equal
-from numpy.testing import assert_array_equal
+from numpy.testing import (
+ run_module_suite, assert_array_almost_equal, assert_array_equal,
+ assert_raises,
+ )
import threading
import sys
if sys.version_info[0] >= 3:
@@ -19,13 +21,13 @@ def fft1(x):
return np.sum(x*np.exp(phase), axis=1)
-class TestFFTShift(TestCase):
+class TestFFTShift(object):
def test_fft_n(self):
- self.assertRaises(ValueError, np.fft.fft, [1, 2, 3], 0)
+ assert_raises(ValueError, np.fft.fft, [1, 2, 3], 0)
-class TestFFT1D(TestCase):
+class TestFFT1D(object):
def test_fft(self):
x = random(30) + 1j*random(30)
@@ -145,7 +147,7 @@ class TestFFT1D(TestCase):
assert_array_almost_equal(x_norm,
np.linalg.norm(tmp))
-class TestFFTThreadSafe(TestCase):
+class TestFFTThreadSafe(object):
threads = 16
input_shape = (800, 200)
diff --git a/numpy/fft/tests/test_helper.py b/numpy/fft/tests/test_helper.py
index ff56ff63c..f02edf7cc 100644
--- a/numpy/fft/tests/test_helper.py
+++ b/numpy/fft/tests/test_helper.py
@@ -6,13 +6,15 @@ Copied from fftpack.helper by Pearu Peterson, October 2005
from __future__ import division, absolute_import, print_function
import numpy as np
-from numpy.testing import TestCase, run_module_suite, assert_array_almost_equal
+from numpy.testing import (
+ run_module_suite, assert_array_almost_equal, assert_equal,
+ )
from numpy import fft
from numpy import pi
from numpy.fft.helper import _FFTCache
-class TestFFTShift(TestCase):
+class TestFFTShift(object):
def test_definition(self):
x = [0, 1, 2, 3, 4, -4, -3, -2, -1]
@@ -40,7 +42,7 @@ class TestFFTShift(TestCase):
fft.ifftshift(shifted, axes=(0,)))
-class TestFFTFreq(TestCase):
+class TestFFTFreq(object):
def test_definition(self):
x = [0, 1, 2, 3, 4, -4, -3, -2, -1]
@@ -51,7 +53,7 @@ class TestFFTFreq(TestCase):
assert_array_almost_equal(10*pi*fft.fftfreq(10, pi), x)
-class TestRFFTFreq(TestCase):
+class TestRFFTFreq(object):
def test_definition(self):
x = [0, 1, 2, 3, 4]
@@ -62,7 +64,7 @@ class TestRFFTFreq(TestCase):
assert_array_almost_equal(10*pi*fft.rfftfreq(10, pi), x)
-class TestIRFFTN(TestCase):
+class TestIRFFTN(object):
def test_not_last_axis_success(self):
ar, ai = np.random.random((2, 16, 8, 32))
@@ -74,7 +76,7 @@ class TestIRFFTN(TestCase):
fft.irfftn(a, axes=axes)
-class TestFFTCache(TestCase):
+class TestFFTCache(object):
def test_basic_behaviour(self):
c = _FFTCache(max_size_in_mb=1, max_item_count=4)
@@ -90,7 +92,7 @@ class TestFFTCache(TestCase):
np.zeros(2, dtype=np.float32))
# Nothing should be left.
- self.assertEqual(len(c._dict), 0)
+ assert_equal(len(c._dict), 0)
# Now put everything in twice so it can be retrieved once and each will
# still have one item left.
@@ -101,7 +103,7 @@ class TestFFTCache(TestCase):
np.ones(2, dtype=np.float32))
assert_array_almost_equal(c.pop_twiddle_factors(2),
np.zeros(2, dtype=np.float32))
- self.assertEqual(len(c._dict), 2)
+ assert_equal(len(c._dict), 2)
def test_automatic_pruning(self):
# That's around 2600 single precision samples.
@@ -109,27 +111,27 @@ class TestFFTCache(TestCase):
c.put_twiddle_factors(1, np.ones(200, dtype=np.float32))
c.put_twiddle_factors(2, np.ones(200, dtype=np.float32))
- self.assertEqual(list(c._dict.keys()), [1, 2])
+ assert_equal(list(c._dict.keys()), [1, 2])
# This is larger than the limit but should still be kept.
c.put_twiddle_factors(3, np.ones(3000, dtype=np.float32))
- self.assertEqual(list(c._dict.keys()), [1, 2, 3])
+ assert_equal(list(c._dict.keys()), [1, 2, 3])
# Add one more.
c.put_twiddle_factors(4, np.ones(3000, dtype=np.float32))
# The other three should no longer exist.
- self.assertEqual(list(c._dict.keys()), [4])
+ assert_equal(list(c._dict.keys()), [4])
# Now test the max item count pruning.
c = _FFTCache(max_size_in_mb=0.01, max_item_count=2)
c.put_twiddle_factors(2, np.empty(2))
c.put_twiddle_factors(1, np.empty(2))
# Can still be accessed.
- self.assertEqual(list(c._dict.keys()), [2, 1])
+ assert_equal(list(c._dict.keys()), [2, 1])
c.put_twiddle_factors(3, np.empty(2))
# 1 and 3 can still be accessed - c[2] has been touched least recently
# and is thus evicted.
- self.assertEqual(list(c._dict.keys()), [1, 3])
+ assert_equal(list(c._dict.keys()), [1, 3])
# One last test. We will add a single large item that is slightly
# bigger then the cache size. Some small items can still be added.
@@ -138,18 +140,18 @@ class TestFFTCache(TestCase):
c.put_twiddle_factors(2, np.ones(2, dtype=np.float32))
c.put_twiddle_factors(3, np.ones(2, dtype=np.float32))
c.put_twiddle_factors(4, np.ones(2, dtype=np.float32))
- self.assertEqual(list(c._dict.keys()), [1, 2, 3, 4])
+ assert_equal(list(c._dict.keys()), [1, 2, 3, 4])
# One more big item. This time it is 6 smaller ones but they are
# counted as one big item.
for _ in range(6):
c.put_twiddle_factors(5, np.ones(500, dtype=np.float32))
# '1' no longer in the cache. Rest still in the cache.
- self.assertEqual(list(c._dict.keys()), [2, 3, 4, 5])
+ assert_equal(list(c._dict.keys()), [2, 3, 4, 5])
# Another big item - should now be the only item in the cache.
c.put_twiddle_factors(6, np.ones(4000, dtype=np.float32))
- self.assertEqual(list(c._dict.keys()), [6])
+ assert_equal(list(c._dict.keys()), [6])
if __name__ == "__main__":
diff --git a/numpy/lib/__init__.py b/numpy/lib/__init__.py
index 847a3e896..d85a179dd 100644
--- a/numpy/lib/__init__.py
+++ b/numpy/lib/__init__.py
@@ -44,6 +44,6 @@ __all__ += npyio.__all__
__all__ += financial.__all__
__all__ += nanfunctions.__all__
-from numpy.testing.nosetester import _numpy_tester
+from numpy.testing import _numpy_tester
test = _numpy_tester().test
bench = _numpy_tester().bench
diff --git a/numpy/lib/_iotools.py b/numpy/lib/_iotools.py
index 304bba3d3..1874c2e97 100644
--- a/numpy/lib/_iotools.py
+++ b/numpy/lib/_iotools.py
@@ -527,7 +527,7 @@ class StringConverter(object):
_mapper.append((nx.int64, int, -1))
_mapper.extend([(nx.floating, float, nx.nan),
- (complex, _bytes_to_complex, nx.nan + 0j),
+ (nx.complexfloating, _bytes_to_complex, nx.nan + 0j),
(nx.longdouble, nx.longdouble, nx.nan),
(nx.string_, bytes, b'???')])
diff --git a/numpy/lib/arraypad.py b/numpy/lib/arraypad.py
index 2dad99c34..b8966e543 100644
--- a/numpy/lib/arraypad.py
+++ b/numpy/lib/arraypad.py
@@ -1208,7 +1208,7 @@ def pad(array, pad_width, mode, **kwargs):
length to the vector argument with padded values replaced. It has the
following signature::
- padding_func(vector, iaxis_pad_width, iaxis, **kwargs)
+ padding_func(vector, iaxis_pad_width, iaxis, kwargs)
where
@@ -1222,7 +1222,7 @@ def pad(array, pad_width, mode, **kwargs):
the end of vector.
iaxis : int
The axis currently being calculated.
- kwargs : misc
+ kwargs : dict
Any keyword arguments the function requires.
Examples
@@ -1272,21 +1272,27 @@ def pad(array, pad_width, mode, **kwargs):
>>> np.lib.pad(a, (2, 3), 'wrap')
array([4, 5, 1, 2, 3, 4, 5, 1, 2, 3])
- >>> def padwithtens(vector, pad_width, iaxis, kwargs):
- ... vector[:pad_width[0]] = 10
- ... vector[-pad_width[1]:] = 10
+ >>> def pad_with(vector, pad_width, iaxis, kwargs):
+ ... pad_value = kwargs.get('padder', 10)
+ ... vector[:pad_width[0]] = pad_value
+ ... vector[-pad_width[1]:] = pad_value
... return vector
-
>>> a = np.arange(6)
>>> a = a.reshape((2, 3))
-
- >>> np.lib.pad(a, 2, padwithtens)
+ >>> np.lib.pad(a, 2, pad_with)
array([[10, 10, 10, 10, 10, 10, 10],
[10, 10, 10, 10, 10, 10, 10],
[10, 10, 0, 1, 2, 10, 10],
[10, 10, 3, 4, 5, 10, 10],
[10, 10, 10, 10, 10, 10, 10],
[10, 10, 10, 10, 10, 10, 10]])
+ >>> np.lib.pad(a, 2, pad_with, padder=100)
+ array([[100, 100, 100, 100, 100, 100, 100],
+ [100, 100, 100, 100, 100, 100, 100],
+ [100, 100, 0, 1, 2, 100, 100],
+ [100, 100, 3, 4, 5, 100, 100],
+ [100, 100, 100, 100, 100, 100, 100],
+ [100, 100, 100, 100, 100, 100, 100]])
"""
if not np.asarray(pad_width).dtype.kind == 'i':
raise TypeError('`pad_width` must be of integral type.')
@@ -1407,6 +1413,14 @@ def pad(array, pad_width, mode, **kwargs):
elif mode == 'reflect':
for axis, (pad_before, pad_after) in enumerate(pad_width):
+ if narray.shape[axis] == 0:
+ # Axes with non-zero padding cannot be empty.
+ if pad_before > 0 or pad_after > 0:
+ raise ValueError("There aren't any elements to reflect"
+ " in axis {} of `array`".format(axis))
+ # Skip zero padding on empty axes.
+ continue
+
# Recursive padding along any axis where `pad_amt` is too large
# for indexing tricks. We can only safely pad the original axis
# length, to keep the period of the reflections consistent.
diff --git a/numpy/lib/arraysetops.py b/numpy/lib/arraysetops.py
index d29e555b8..ededb9dd0 100644
--- a/numpy/lib/arraysetops.py
+++ b/numpy/lib/arraysetops.py
@@ -263,9 +263,9 @@ def _unique1d(ar, return_index=False, return_inverse=False,
else:
ret = (ar,)
if return_index:
- ret += (np.empty(0, np.bool),)
+ ret += (np.empty(0, np.intp),)
if return_inverse:
- ret += (np.empty(0, np.bool),)
+ ret += (np.empty(0, np.intp),)
if return_counts:
ret += (np.empty(0, np.intp),)
return ret
@@ -375,11 +375,8 @@ def setxor1d(ar1, ar2, assume_unique=False):
return aux
aux.sort()
-# flag = ediff1d( aux, to_end = 1, to_begin = 1 ) == 0
flag = np.concatenate(([True], aux[1:] != aux[:-1], [True]))
-# flag2 = ediff1d( flag ) == 0
- flag2 = flag[1:] == flag[:-1]
- return aux[flag2]
+ return aux[flag[1:] & flag[:-1]]
def in1d(ar1, ar2, assume_unique=False, invert=False):
@@ -454,11 +451,11 @@ def in1d(ar1, ar2, assume_unique=False, invert=False):
# This code is significantly faster when the condition is satisfied.
if len(ar2) < 10 * len(ar1) ** 0.145:
if invert:
- mask = np.ones(len(ar1), dtype=np.bool)
+ mask = np.ones(len(ar1), dtype=bool)
for a in ar2:
mask &= (ar1 != a)
else:
- mask = np.zeros(len(ar1), dtype=np.bool)
+ mask = np.zeros(len(ar1), dtype=bool)
for a in ar2:
mask |= (ar1 == a)
return mask
diff --git a/numpy/lib/format.py b/numpy/lib/format.py
index 14dec01d5..84af2afc8 100644
--- a/numpy/lib/format.py
+++ b/numpy/lib/format.py
@@ -100,9 +100,9 @@ the header data HEADER_LEN.
The next HEADER_LEN bytes form the header data describing the array's
format. It is an ASCII string which contains a Python literal expression
of a dictionary. It is terminated by a newline (``\\n``) and padded with
-spaces (``\\x20``) to make the total length of
-``magic string + 4 + HEADER_LEN`` be evenly divisible by 16 for alignment
-purposes.
+spaces (``\\x20``) to make the total of
+``len(magic string) + 2 + len(length) + HEADER_LEN`` be evenly divisible
+by 64 for alignment purposes.
The dictionary contains three keys:
@@ -163,6 +163,7 @@ else:
MAGIC_PREFIX = b'\x93NUMPY'
MAGIC_LEN = len(MAGIC_PREFIX) + 2
+ARRAY_ALIGN = 64 # plausible values are powers of 2 between 16 and 4096
BUFFER_SIZE = 2**18 # size of buffer for reading npz files in bytes
# difference between version 1.0 and 2.0 is a 4 byte (I) header length
@@ -304,27 +305,33 @@ def _write_array_header(fp, d, version=None):
header.append("'%s': %s, " % (key, repr(value)))
header.append("}")
header = "".join(header)
- # Pad the header with spaces and a final newline such that the magic
- # string, the header-length short and the header are aligned on a
- # 16-byte boundary. Hopefully, some system, possibly memory-mapping,
- # can take advantage of our premature optimization.
- current_header_len = MAGIC_LEN + 2 + len(header) + 1 # 1 for the newline
- topad = 16 - (current_header_len % 16)
- header = header + ' '*topad + '\n'
header = asbytes(_filter_header(header))
- hlen = len(header)
- if hlen < 256*256 and version in (None, (1, 0)):
+ hlen = len(header) + 1 # 1 for newline
+ padlen_v1 = ARRAY_ALIGN - ((MAGIC_LEN + struct.calcsize('<H') + hlen) % ARRAY_ALIGN)
+ padlen_v2 = ARRAY_ALIGN - ((MAGIC_LEN + struct.calcsize('<I') + hlen) % ARRAY_ALIGN)
+
+ # Which version(s) we write depends on the total header size; v1 has a max of 65535
+ if hlen + padlen_v1 < 2**16 and version in (None, (1, 0)):
version = (1, 0)
- header_prefix = magic(1, 0) + struct.pack('<H', hlen)
- elif hlen < 2**32 and version in (None, (2, 0)):
+ header_prefix = magic(1, 0) + struct.pack('<H', hlen + padlen_v1)
+ topad = padlen_v1
+ elif hlen + padlen_v2 < 2**32 and version in (None, (2, 0)):
version = (2, 0)
- header_prefix = magic(2, 0) + struct.pack('<I', hlen)
+ header_prefix = magic(2, 0) + struct.pack('<I', hlen + padlen_v2)
+ topad = padlen_v2
else:
msg = "Header length %s too big for version=%s"
msg %= (hlen, version)
raise ValueError(msg)
+ # Pad the header with spaces and a final newline such that the magic
+ # string, the header-length short and the header are aligned on a
+ # ARRAY_ALIGN byte boundary. This supports memory mapping of dtypes
+ # aligned up to ARRAY_ALIGN on systems like Linux where mmap()
+ # offset must be page-aligned (i.e. the beginning of the file).
+ header = header + b' '*topad + b'\n'
+
fp.write(header_prefix)
fp.write(header)
return version
@@ -468,18 +475,18 @@ def _read_array_header(fp, version):
# header.
import struct
if version == (1, 0):
- hlength_str = _read_bytes(fp, 2, "array header length")
- header_length = struct.unpack('<H', hlength_str)[0]
- header = _read_bytes(fp, header_length, "array header")
+ hlength_type = '<H'
elif version == (2, 0):
- hlength_str = _read_bytes(fp, 4, "array header length")
- header_length = struct.unpack('<I', hlength_str)[0]
- header = _read_bytes(fp, header_length, "array header")
+ hlength_type = '<I'
else:
raise ValueError("Invalid version %r" % version)
+ hlength_str = _read_bytes(fp, struct.calcsize(hlength_type), "array header length")
+ header_length = struct.unpack(hlength_type, hlength_str)[0]
+ header = _read_bytes(fp, header_length, "array header")
+
# The header is a pretty-printed string representation of a literal
- # Python dictionary with trailing newlines padded to a 16-byte
+ # Python dictionary with trailing newlines padded to a ARRAY_ALIGN byte
# boundary. The keys are strings.
# "shape" : tuple of int
# "fortran_order" : bool
diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py
index 32c999dfc..905e60512 100644
--- a/numpy/lib/function_base.py
+++ b/numpy/lib/function_base.py
@@ -1,7 +1,6 @@
from __future__ import division, absolute_import, print_function
import collections
-import operator
import re
import sys
import warnings
@@ -16,7 +15,7 @@ from numpy.core.numeric import (
)
from numpy.core.umath import (
pi, multiply, add, arctan2, frompyfunc, cos, less_equal, sqrt, sin,
- mod, exp, log10
+ mod, exp, log10, not_equal, subtract
)
from numpy.core.fromnumeric import (
ravel, nonzero, sort, partition, mean, any, sum
@@ -57,8 +56,6 @@ def rot90(m, k=1, axes=(0,1)):
Rotation direction is from the first towards the second axis.
- .. versionadded:: 1.12.0
-
Parameters
----------
m : array_like
@@ -69,6 +66,8 @@ def rot90(m, k=1, axes=(0,1)):
The array is rotated in the plane defined by the axes.
Axes must be different.
+ .. versionadded:: 1.12.0
+
Returns
-------
y : ndarray
@@ -627,7 +626,7 @@ def histogram(a, bins=10, range=None, normed=False, weights=None,
array([ 0.5, 0. , 0.5, 0. , 0. , 0.5, 0. , 0.5, 0. , 0.5])
>>> hist.sum()
2.4999999999999996
- >>> np.sum(hist*np.diff(bin_edges))
+ >>> np.sum(hist * np.diff(bin_edges))
1.0
.. versionadded:: 1.11.0
@@ -718,7 +717,7 @@ def histogram(a, bins=10, range=None, normed=False, weights=None,
# At this point, if the weights are not integer, floating point, or
# complex, we have to use the slow algorithm.
if weights is not None and not (np.can_cast(weights.dtype, np.double) or
- np.can_cast(weights.dtype, np.complex)):
+ np.can_cast(weights.dtype, complex)):
bins = linspace(mn, mx, bins + 1, endpoint=True)
if not iterable(bins):
@@ -974,7 +973,7 @@ def histogramdd(sample, bins=10, range=None, normed=False, weights=None):
on_edge = (around(sample[:, i], decimal) ==
around(edges[i][-1], decimal))
# Shift these points one bin to the left.
- Ncount[i][where(on_edge & not_smaller_than_edge)[0]] -= 1
+ Ncount[i][nonzero(on_edge & not_smaller_than_edge)[0]] -= 1
# Flattened histogram matrix (1D)
# Reshape is used so that overlarge arrays
@@ -1321,16 +1320,8 @@ def piecewise(x, condlist, funclist, *args, **kw):
x = x[None]
zerod = True
if n == n2 - 1: # compute the "otherwise" condition.
- totlist = np.logical_or.reduce(condlist, axis=0)
- # Only able to stack vertically if the array is 1d or less
- if x.ndim <= 1:
- condlist = np.vstack([condlist, ~totlist])
- else:
- condlist = [asarray(c, dtype=bool) for c in condlist]
- totlist = condlist[0]
- for k in range(1, n):
- totlist |= condlist[k]
- condlist.append(~totlist)
+ condelse = ~np.any(condlist, axis=0, keepdims=True)
+ condlist = np.concatenate([condlist, condelse], axis=0)
n += 1
y = zeros(x.shape, x.dtype)
@@ -1550,7 +1541,7 @@ def gradient(f, *varargs, **kwargs):
Examples
--------
- >>> f = np.array([1, 2, 4, 7, 11, 16], dtype=np.float)
+ >>> f = np.array([1, 2, 4, 7, 11, 16], dtype=float)
>>> np.gradient(f)
array([ 1. , 1.5, 2.5, 3.5, 4.5, 5. ])
>>> np.gradient(f, 2)
@@ -1566,7 +1557,7 @@ def gradient(f, *varargs, **kwargs):
Or a non uniform one:
- >>> x = np.array([0., 1., 1.5, 3.5, 4., 6.], dtype=np.float)
+ >>> x = np.array([0., 1., 1.5, 3.5, 4., 6.], dtype=float)
>>> np.gradient(f, x)
array([ 1. , 3. , 3.5, 6.7, 6.9, 2.5])
@@ -1574,7 +1565,7 @@ def gradient(f, *varargs, **kwargs):
axis. In this example the first array stands for the gradient in
rows and the second one in columns direction:
- >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=np.float))
+ >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=float))
[array([[ 2., 2., -1.],
[ 2., 2., -1.]]), array([[ 1. , 2.5, 4. ],
[ 1. , 1. , 1. ]])]
@@ -1584,7 +1575,7 @@ def gradient(f, *varargs, **kwargs):
>>> dx = 2.
>>> y = [1., 1.5, 3.5]
- >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=np.float), dx, y)
+ >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=float), dx, y)
[array([[ 1. , 1. , -0.5],
[ 1. , 1. , -0.5]]), array([[ 2. , 2. , 2. ],
[ 2. , 1.7, 0.5]])]
@@ -1601,7 +1592,7 @@ def gradient(f, *varargs, **kwargs):
The `axis` keyword can be used to specify a subset of axes of which the
gradient is calculated
- >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=np.float), axis=0)
+ >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=float), axis=0)
array([[ 2., 2., -1.],
[ 2., 2., -1.]])
@@ -1728,33 +1719,27 @@ def gradient(f, *varargs, **kwargs):
slice3 = [slice(None)]*N
slice4 = [slice(None)]*N
- otype = f.dtype.char
- if otype not in ['f', 'd', 'F', 'D', 'm', 'M']:
- otype = 'd'
-
- # Difference of datetime64 elements results in timedelta64
- if otype == 'M':
- # Need to use the full dtype name because it contains unit information
- otype = f.dtype.name.replace('datetime', 'timedelta')
- elif otype == 'm':
- # Needs to keep the specific units, can't be a general unit
- otype = f.dtype
-
- # Convert datetime64 data into ints. Make dummy variable `y`
- # that is a view of ints if the data is datetime64, otherwise
- # just set y equal to the array `f`.
- if f.dtype.char in ["M", "m"]:
- y = f.view('int64')
+ otype = f.dtype
+ if otype.type is np.datetime64:
+ # the timedelta dtype with the same unit information
+ otype = np.dtype(otype.name.replace('datetime', 'timedelta'))
+ # view as timedelta to allow addition
+ f = f.view(otype)
+ elif otype.type is np.timedelta64:
+ pass
+ elif np.issubdtype(otype, np.inexact):
+ pass
else:
- y = f
+ # all other types convert to floating point
+ otype = np.double
for i, axis in enumerate(axes):
- if y.shape[axis] < edge_order + 1:
+ if f.shape[axis] < edge_order + 1:
raise ValueError(
"Shape of array too small to calculate a numerical gradient, "
"at least (edge_order + 1) elements are required.")
# result allocation
- out = np.empty_like(y, dtype=otype)
+ out = np.empty_like(f, dtype=otype)
uniform_spacing = np.ndim(dx[i]) == 0
@@ -1785,15 +1770,15 @@ def gradient(f, *varargs, **kwargs):
slice2[axis] = 1
slice3[axis] = 0
dx_0 = dx[i] if uniform_spacing else dx[i][0]
- # 1D equivalent -- out[0] = (y[1] - y[0]) / (x[1] - x[0])
- out[slice1] = (y[slice2] - y[slice3]) / dx_0
+ # 1D equivalent -- out[0] = (f[1] - f[0]) / (x[1] - x[0])
+ out[slice1] = (f[slice2] - f[slice3]) / dx_0
slice1[axis] = -1
slice2[axis] = -1
slice3[axis] = -2
dx_n = dx[i] if uniform_spacing else dx[i][-1]
- # 1D equivalent -- out[-1] = (y[-1] - y[-2]) / (x[-1] - x[-2])
- out[slice1] = (y[slice2] - y[slice3]) / dx_n
+ # 1D equivalent -- out[-1] = (f[-1] - f[-2]) / (x[-1] - x[-2])
+ out[slice1] = (f[slice2] - f[slice3]) / dx_n
# Numerical differentiation: 2nd order edges
else:
@@ -1811,8 +1796,8 @@ def gradient(f, *varargs, **kwargs):
a = -(2. * dx1 + dx2)/(dx1 * (dx1 + dx2))
b = (dx1 + dx2) / (dx1 * dx2)
c = - dx1 / (dx2 * (dx1 + dx2))
- # 1D equivalent -- out[0] = a * y[0] + b * y[1] + c * y[2]
- out[slice1] = a * y[slice2] + b * y[slice3] + c * y[slice4]
+ # 1D equivalent -- out[0] = a * f[0] + b * f[1] + c * f[2]
+ out[slice1] = a * f[slice2] + b * f[slice3] + c * f[slice4]
slice1[axis] = -1
slice2[axis] = -3
@@ -1829,7 +1814,7 @@ def gradient(f, *varargs, **kwargs):
b = - (dx2 + dx1) / (dx1 * dx2)
c = (2. * dx2 + dx1) / (dx2 * (dx1 + dx2))
# 1D equivalent -- out[-1] = a * f[-3] + b * f[-2] + c * f[-1]
- out[slice1] = a * y[slice2] + b * y[slice3] + c * y[slice4]
+ out[slice1] = a * f[slice2] + b * f[slice3] + c * f[slice4]
outvals.append(out)
@@ -1847,7 +1832,7 @@ def gradient(f, *varargs, **kwargs):
def diff(a, n=1, axis=-1):
"""
- Calculate the n-th discrete difference along given axis.
+ Calculate the n-th discrete difference along the given axis.
The first difference is given by ``out[n] = a[n+1] - a[n]`` along
the given axis, higher differences are calculated by using `diff`
@@ -1858,16 +1843,21 @@ def diff(a, n=1, axis=-1):
a : array_like
Input array
n : int, optional
- The number of times values are differenced.
+ The number of times values are differenced. If zero, the input
+ is returned as-is.
axis : int, optional
- The axis along which the difference is taken, default is the last axis.
+ The axis along which the difference is taken, default is the
+ last axis.
Returns
-------
diff : ndarray
The n-th differences. The shape of the output is the same as `a`
except along `axis` where the dimension is smaller by `n`. The
- type of the output is the same as that of the input.
+ type of the output is the same as the type of the difference
+ between any two elements of `a`. This is the same as the type of
+ `a` in most cases. A notable exception is `datetime64`, which
+ results in a `timedelta64` output array.
See Also
--------
@@ -1875,13 +1865,13 @@ def diff(a, n=1, axis=-1):
Notes
-----
- For boolean arrays, the preservation of type means that the result
- will contain `False` when consecutive elements are the same and
- `True` when they differ.
+ Type is preserved for boolean arrays, so the result will contain
+ `False` when consecutive elements are the same and `True` when they
+ differ.
- For unsigned integer arrays, the results will also be unsigned. This should
- not be surprising, as the result is consistent with calculating the
- difference directly:
+ For unsigned integer arrays, the results will also be unsigned. This
+ should not be surprising, as the result is consistent with
+ calculating the difference directly:
>>> u8_arr = np.array([1, 0], dtype=np.uint8)
>>> np.diff(u8_arr)
@@ -1889,8 +1879,8 @@ def diff(a, n=1, axis=-1):
>>> u8_arr[1,...] - u8_arr[0,...]
array(255, np.uint8)
- If this is not desirable, then the array should be cast to a larger integer
- type first:
+ If this is not desirable, then the array should be cast to a larger
+ integer type first:
>>> i16_arr = u8_arr.astype(np.int16)
>>> np.diff(i16_arr)
@@ -1911,24 +1901,33 @@ def diff(a, n=1, axis=-1):
>>> np.diff(x, axis=0)
array([[-1, 2, 0, -2]])
+ >>> x = np.arange('1066-10-13', '1066-10-16', dtype=np.datetime64)
+ >>> np.diff(x)
+ array([1, 1], dtype='timedelta64[D]')
+
"""
if n == 0:
return a
if n < 0:
raise ValueError(
"order must be non-negative but got " + repr(n))
+
a = asanyarray(a)
nd = a.ndim
- slice1 = [slice(None)]*nd
- slice2 = [slice(None)]*nd
+ axis = normalize_axis_index(axis, nd)
+
+ slice1 = [slice(None)] * nd
+ slice2 = [slice(None)] * nd
slice1[axis] = slice(1, None)
slice2[axis] = slice(None, -1)
slice1 = tuple(slice1)
slice2 = tuple(slice2)
- if n > 1:
- return diff(a[slice1]-a[slice2], n-1, axis=axis)
- else:
- return a[slice1]-a[slice2]
+
+ op = not_equal if a.dtype == np.bool_ else subtract
+ for _ in range(n):
+ a = op(a[slice1], a[slice2])
+
+ return a
def interp(x, xp, fp, left=None, right=None, period=None):
@@ -2074,6 +2073,7 @@ def interp(x, xp, fp, left=None, right=None, period=None):
else:
return interp_func(x, xp, fp, left, right).item()
+
def angle(z, deg=0):
"""
Return the angle of the complex argument.
@@ -2096,8 +2096,6 @@ def angle(z, deg=0):
arctan2
absolute
-
-
Examples
--------
>>> np.angle([1.0, 1.0j, 1+1j]) # in radians
@@ -2607,7 +2605,7 @@ class vectorize(object):
>>> out = vfunc([1, 2, 3, 4], 2)
>>> type(out[0])
<type 'numpy.int32'>
- >>> vfunc = np.vectorize(myfunc, otypes=[np.float])
+ >>> vfunc = np.vectorize(myfunc, otypes=[float])
>>> out = vfunc([1, 2, 3, 4], 2)
>>> type(out[0])
<type 'numpy.float64'>
@@ -2987,7 +2985,7 @@ def cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None,
>>> x = [-2.1, -1, 4.3]
>>> y = [3, 1.1, 0.12]
- >>> X = np.vstack((x,y))
+ >>> X = np.stack((x, y), axis=0)
>>> print(np.cov(X))
[[ 11.71 -4.286 ]
[ -4.286 2.14413333]]
@@ -3025,7 +3023,7 @@ def cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None,
y = array(y, copy=False, ndmin=2, dtype=dtype)
if not rowvar and y.shape[0] != 1:
y = y.T
- X = np.vstack((X, y))
+ X = np.concatenate((X, y), axis=0)
if ddof is None:
if bias == 0:
@@ -3036,7 +3034,7 @@ def cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None,
# Get the product of frequencies and weights
w = None
if fweights is not None:
- fweights = np.asarray(fweights, dtype=np.float)
+ fweights = np.asarray(fweights, dtype=float)
if not np.all(fweights == np.around(fweights)):
raise TypeError(
"fweights must be integer")
@@ -3051,7 +3049,7 @@ def cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None,
"fweights cannot be negative")
w = fweights
if aweights is not None:
- aweights = np.asarray(aweights, dtype=np.float)
+ aweights = np.asarray(aweights, dtype=float)
if aweights.ndim > 1:
raise RuntimeError(
"cannot handle multidimensional aweights")
@@ -4010,8 +4008,9 @@ def _ureduce(a, func, **kwargs):
# merge reduced axis
a = a.reshape(a.shape[:nkeep] + (-1,))
kwargs['axis'] = -1
+ keepdim = tuple(keepdim)
else:
- keepdim = [1] * a.ndim
+ keepdim = (1,) * a.ndim
r = func(a, **kwargs)
return r, keepdim
@@ -4273,10 +4272,7 @@ def percentile(a, q, axis=None, out=None,
overwrite_input=overwrite_input,
interpolation=interpolation)
if keepdims:
- if q.ndim == 0:
- return r.reshape(k)
- else:
- return r.reshape([len(q)] + k)
+ return r.reshape(q.shape + k)
else:
return r
@@ -4345,7 +4341,7 @@ def _percentile(a, q, axis=None, out=None,
ap.partition(indices, axis=axis)
# ensure axis with qth is first
- ap = np.rollaxis(ap, axis, 0)
+ ap = np.moveaxis(ap, axis, 0)
axis = 0
# Check if the array contains any nan's
@@ -4378,9 +4374,9 @@ def _percentile(a, q, axis=None, out=None,
ap.partition(concatenate((indices_below, indices_above)), axis=axis)
# ensure axis with qth is first
- ap = np.rollaxis(ap, axis, 0)
- weights_below = np.rollaxis(weights_below, axis, 0)
- weights_above = np.rollaxis(weights_above, axis, 0)
+ ap = np.moveaxis(ap, axis, 0)
+ weights_below = np.moveaxis(weights_below, axis, 0)
+ weights_above = np.moveaxis(weights_above, axis, 0)
axis = 0
# Check if the array contains any nan's
@@ -4392,8 +4388,8 @@ def _percentile(a, q, axis=None, out=None,
x2 = take(ap, indices_above, axis=axis) * weights_above
# ensure axis with qth is first
- x1 = np.rollaxis(x1, axis, 0)
- x2 = np.rollaxis(x2, axis, 0)
+ x1 = np.moveaxis(x1, axis, 0)
+ x2 = np.moveaxis(x2, axis, 0)
if zerod:
x1 = x1.squeeze(0)
@@ -4546,7 +4542,7 @@ def add_newdoc(place, obj, doc):
elif isinstance(doc, list):
for val in doc:
add_docstring(getattr(new, val[0]), val[1].strip())
- except:
+ except Exception:
pass
@@ -5049,7 +5045,7 @@ def insert(arr, obj, values, axis=None):
# broadcasting is very different here, since a[:,0,:] = ... behaves
# very different from a[:,[0],:] = ...! This changes values so that
# it works likes the second case. (here a[:,0:1,:])
- values = np.rollaxis(values, 0, (axis % values.ndim) + 1)
+ values = np.moveaxis(values, 0, axis)
numnew = values.shape[axis]
newshape[axis] += numnew
new = empty(newshape, arr.dtype, arrorder)
diff --git a/numpy/lib/index_tricks.py b/numpy/lib/index_tricks.py
index 003774ce2..650b37f25 100644
--- a/numpy/lib/index_tricks.py
+++ b/numpy/lib/index_tricks.py
@@ -299,7 +299,7 @@ class AxisConcatenator(object):
if len(vec) == 3:
trans1d = int(vec[2])
continue
- except:
+ except Exception:
raise ValueError("unknown special directive")
try:
axis = int(item)
@@ -842,7 +842,7 @@ def diag_indices(n, ndim=2):
And use it to set the diagonal of an array of zeros to 1:
- >>> a = np.zeros((2, 2, 2), dtype=np.int)
+ >>> a = np.zeros((2, 2, 2), dtype=int)
>>> a[d3] = 1
>>> a
array([[[1, 0],
diff --git a/numpy/lib/nanfunctions.py b/numpy/lib/nanfunctions.py
index 1e342b932..ffedcd68a 100644
--- a/numpy/lib/nanfunctions.py
+++ b/numpy/lib/nanfunctions.py
@@ -106,6 +106,46 @@ def _copyto(a, val, mask):
return a
+def _remove_nan_1d(arr1d, overwrite_input=False):
+ """
+ Equivalent to arr1d[~arr1d.isnan()], but in a different order
+
+ Presumably faster as it incurs fewer copies
+
+ Parameters
+ ----------
+ arr1d : ndarray
+ Array to remove nans from
+ overwrite_input : bool
+ True if `arr1d` can be modified in place
+
+ Returns
+ -------
+ res : ndarray
+ Array with nan elements removed
+ overwrite_input : bool
+ True if `res` can be modified in place, given the constraint on the
+ input
+ """
+
+ c = np.isnan(arr1d)
+ s = np.nonzero(c)[0]
+ if s.size == arr1d.size:
+ warnings.warn("All-NaN slice encountered", RuntimeWarning, stacklevel=4)
+ return arr1d[:0], True
+ elif s.size == 0:
+ return arr1d, overwrite_input
+ else:
+ if not overwrite_input:
+ arr1d = arr1d.copy()
+ # select non-nans at end of array
+ enonan = arr1d[-s.size:][~c[-s.size:]]
+ # fill nans in beginning of array with non-nans of end
+ arr1d[s[:enonan.size]] = enonan
+
+ return arr1d[:-s.size], True
+
+
def _divide_by_count(a, b, out=None):
"""
Compute a/b ignoring invalid results. If `a` is an array the division
@@ -554,7 +594,7 @@ def nanprod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue):
Parameters
----------
a : array_like
- Array containing numbers whose sum is desired. If `a` is not an
+ Array containing numbers whose product is desired. If `a` is not an
array, a conversion is attempted.
axis : int, optional
Axis along which the product is computed. The default is to compute
@@ -836,24 +876,12 @@ def _nanmedian1d(arr1d, overwrite_input=False):
Private function for rank 1 arrays. Compute the median ignoring NaNs.
See nanmedian for parameter usage
"""
- c = np.isnan(arr1d)
- s = np.where(c)[0]
- if s.size == arr1d.size:
- warnings.warn("All-NaN slice encountered", RuntimeWarning, stacklevel=3)
+ arr1d, overwrite_input = _remove_nan_1d(arr1d,
+ overwrite_input=overwrite_input)
+ if arr1d.size == 0:
return np.nan
- elif s.size == 0:
- return np.median(arr1d, overwrite_input=overwrite_input)
- else:
- if overwrite_input:
- x = arr1d
- else:
- x = arr1d.copy()
- # select non-nans at end of array
- enonan = arr1d[-s.size:][~c[-s.size:]]
- # fill nans in beginning of array with non-nans of end
- x[s[:enonan.size]] = enonan
- # slice nans away
- return np.median(x[:-s.size], overwrite_input=True)
+
+ return np.median(arr1d, overwrite_input=overwrite_input)
def _nanmedian(a, axis=None, out=None, overwrite_input=False):
@@ -1088,7 +1116,7 @@ def nanpercentile(a, q, axis=None, out=None, overwrite_input=False,
>>> a[0][1] = np.nan
>>> a
array([[ 10., nan, 4.],
- [ 3., 2., 1.]])
+ [ 3., 2., 1.]])
>>> np.percentile(a, 50)
nan
>>> np.nanpercentile(a, 50)
@@ -1123,10 +1151,7 @@ def nanpercentile(a, q, axis=None, out=None, overwrite_input=False,
overwrite_input=overwrite_input,
interpolation=interpolation)
if keepdims and keepdims is not np._NoValue:
- if q.ndim == 0:
- return r.reshape(k)
- else:
- return r.reshape([len(q)] + k)
+ return r.reshape(q.shape + k)
else:
return r
@@ -1149,7 +1174,7 @@ def _nanpercentile(a, q, axis=None, out=None, overwrite_input=False,
# Move that axis to the beginning to match percentile's
# convention.
if q.ndim != 0:
- result = np.rollaxis(result, axis)
+ result = np.moveaxis(result, axis, 0)
if out is not None:
out[...] = result
@@ -1158,34 +1183,16 @@ def _nanpercentile(a, q, axis=None, out=None, overwrite_input=False,
def _nanpercentile1d(arr1d, q, overwrite_input=False, interpolation='linear'):
"""
- Private function for rank 1 arrays. Compute percentile ignoring
- NaNs.
-
+ Private function for rank 1 arrays. Compute percentile ignoring NaNs.
See nanpercentile for parameter usage
"""
- c = np.isnan(arr1d)
- s = np.where(c)[0]
- if s.size == arr1d.size:
- warnings.warn("All-NaN slice encountered", RuntimeWarning, stacklevel=3)
- if q.ndim == 0:
- return np.nan
- else:
- return np.nan * np.ones((len(q),))
- elif s.size == 0:
- return np.percentile(arr1d, q, overwrite_input=overwrite_input,
- interpolation=interpolation)
- else:
- if overwrite_input:
- x = arr1d
- else:
- x = arr1d.copy()
- # select non-nans at end of array
- enonan = arr1d[-s.size:][~c[-s.size:]]
- # fill nans in beginning of array with non-nans of end
- x[s[:enonan.size]] = enonan
- # slice nans away
- return np.percentile(x[:-s.size], q, overwrite_input=True,
- interpolation=interpolation)
+ arr1d, overwrite_input = _remove_nan_1d(arr1d,
+ overwrite_input=overwrite_input)
+ if arr1d.size == 0:
+ return np.full(q.shape, np.nan)[()] # convert to scalar
+
+ return np.percentile(arr1d, q, overwrite_input=overwrite_input,
+ interpolation=interpolation)
def nanvar(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue):
diff --git a/numpy/lib/npyio.py b/numpy/lib/npyio.py
index dc1c951e7..7598b2c6b 100644
--- a/numpy/lib/npyio.py
+++ b/numpy/lib/npyio.py
@@ -424,7 +424,7 @@ def load(file, mmap_mode=None, allow_pickle=True, fix_imports=True,
"non-pickled data")
try:
return pickle.load(fid, **pickle_kwargs)
- except:
+ except Exception:
raise IOError(
"Failed to interpret file %s as a pickle" % repr(file))
finally:
@@ -443,6 +443,8 @@ def save(file, arr, allow_pickle=True, fix_imports=True):
then the filename is unchanged. If file is a string or Path, a ``.npy``
extension will be appended to the file name if it does not already
have one.
+ arr : array_like
+ Array data to be saved.
allow_pickle : bool, optional
Allow saving object arrays using Python pickles. Reasons for disallowing
pickles include security (loading pickled data can execute arbitrary
@@ -456,8 +458,6 @@ def save(file, arr, allow_pickle=True, fix_imports=True):
pickled in a Python 2 compatible way. If `fix_imports` is True, pickle
will try to map the new Python 3 names to the old module names used in
Python 2, so that the pickle data stream is readable with Python 2.
- arr : array_like
- Array data to be saved.
See Also
--------
@@ -737,7 +737,7 @@ def _getconv(dtype):
return np.longdouble
elif issubclass(typ, np.floating):
return floatconv
- elif issubclass(typ, np.complex):
+ elif issubclass(typ, complex):
return lambda x: complex(asstr(x))
elif issubclass(typ, np.bytes_):
return asbytes
@@ -1014,7 +1014,7 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None,
if len(vals) == 0:
continue
if usecols:
- vals = [vals[i] for i in usecols]
+ vals = [vals[j] for j in usecols]
if len(vals) != N:
line_num = i + skiprows + 1
raise ValueError("Wrong number of columns at line %d"
@@ -1071,7 +1071,7 @@ def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n', header='',
If the filename ends in ``.gz``, the file is automatically saved in
compressed gzip format. `loadtxt` understands gzipped files
transparently.
- X : array_like
+ X : 1D or 2D array_like
Data to be saved to a text file.
fmt : str or sequence of strs, optional
A single format (%10.5f), a sequence of formats, or a
@@ -1201,7 +1201,10 @@ def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n', header='',
X = np.asarray(X)
# Handle 1-dimensional arrays
- if X.ndim == 1:
+ if X.ndim == 0 or X.ndim > 2:
+ raise ValueError(
+ "Expected 1D or 2D array, got %dD array instead" % X.ndim)
+ elif X.ndim == 1:
# Common case -- 1d array of numbers
if X.dtype.names is None:
X = np.atleast_2d(X).T
@@ -1902,16 +1905,16 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
# If the dtype is uniform, don't define names, else use ''
base = set([c.type for c in converters if c._checked])
if len(base) == 1:
- (ddtype, mdtype) = (list(base)[0], np.bool)
+ (ddtype, mdtype) = (list(base)[0], bool)
else:
ddtype = [(defaultfmt % i, dt)
for (i, dt) in enumerate(column_types)]
if usemask:
- mdtype = [(defaultfmt % i, np.bool)
+ mdtype = [(defaultfmt % i, bool)
for (i, dt) in enumerate(column_types)]
else:
ddtype = list(zip(names, column_types))
- mdtype = list(zip(names, [np.bool] * len(column_types)))
+ mdtype = list(zip(names, [bool] * len(column_types)))
output = np.array(data, dtype=ddtype)
if usemask:
outputmask = np.array(masks, dtype=mdtype)
@@ -1937,7 +1940,7 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
# Now, process the rowmasks the same way
if usemask:
rowmasks = np.array(
- masks, dtype=np.dtype([('', np.bool) for t in dtype_flat]))
+ masks, dtype=np.dtype([('', bool) for t in dtype_flat]))
# Construct the new dtype
mdtype = make_mask_descr(dtype)
outputmask = rowmasks.view(mdtype)
@@ -1968,9 +1971,9 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
output = np.array(data, dtype)
if usemask:
if dtype.names:
- mdtype = [(_, np.bool) for _ in dtype.names]
+ mdtype = [(_, bool) for _ in dtype.names]
else:
- mdtype = np.bool
+ mdtype = bool
outputmask = np.array(masks, dtype=mdtype)
# Try to take care of the missing data we missed
names = output.dtype.names
diff --git a/numpy/lib/recfunctions.py b/numpy/lib/recfunctions.py
index b9542e848..e9ba38f46 100644
--- a/numpy/lib/recfunctions.py
+++ b/numpy/lib/recfunctions.py
@@ -70,6 +70,37 @@ def recursive_fill_fields(input, output):
return output
+def get_fieldspec(dtype):
+ """
+ Produce a list of name/dtype pairs corresponding to the dtype fields
+
+ Similar to dtype.descr, but the second item of each tuple is a dtype, not a
+ string. As a result, this handles subarray dtypes
+
+ Can be passed to the dtype constructor to reconstruct the dtype, noting that
+ this (deliberately) discards field offsets.
+
+ Examples
+ --------
+ >>> dt = np.dtype([(('a', 'A'), int), ('b', float, 3)])
+ >>> dt.descr
+ [(('a', 'A'), '<i4'), ('b', '<f8', (3,))]
+ >>> get_fieldspec(dt)
+ [(('a', 'A'), dtype('int32')), ('b', dtype(('<f8', (3,))))]
+
+ """
+ if dtype.names is None:
+ # .descr returns a nameless field, so we should too
+ return [('', dtype)]
+ else:
+ fields = ((name, dtype.fields[name]) for name in dtype.names)
+ # keep any titles, if present
+ return [
+ (name if len(f) == 2 else (f[2], name), f[0])
+ for name, f in fields
+ ]
+
+
def get_names(adtype):
"""
Returns the field names of the input datatype as a tuple.
@@ -146,7 +177,7 @@ def flatten_descr(ndtype):
"""
names = ndtype.names
if names is None:
- return ndtype.descr
+ return (('', ndtype),)
else:
descr = []
for field in names:
@@ -158,6 +189,22 @@ def flatten_descr(ndtype):
return tuple(descr)
+def zip_dtype(seqarrays, flatten=False):
+ newdtype = []
+ if flatten:
+ for a in seqarrays:
+ newdtype.extend(flatten_descr(a.dtype))
+ else:
+ for a in seqarrays:
+ current = a.dtype
+ if current.names and len(current.names) <= 1:
+ # special case - dtypes of 0 or 1 field are flattened
+ newdtype.extend(get_fieldspec(current))
+ else:
+ newdtype.append(('', current))
+ return np.dtype(newdtype)
+
+
def zip_descr(seqarrays, flatten=False):
"""
Combine the dtype description of a series of arrays.
@@ -169,19 +216,7 @@ def zip_descr(seqarrays, flatten=False):
flatten : {boolean}, optional
Whether to collapse nested descriptions.
"""
- newdtype = []
- if flatten:
- for a in seqarrays:
- newdtype.extend(flatten_descr(a.dtype))
- else:
- for a in seqarrays:
- current = a.dtype
- names = current.names or ()
- if len(names) > 1:
- newdtype.append(('', current.descr))
- else:
- newdtype.extend(current.descr)
- return np.dtype(newdtype).descr
+ return zip_dtype(seqarrays, flatten=flatten).descr
def get_fieldstructure(adtype, lastname=None, parents=None,):
@@ -376,13 +411,12 @@ def merge_arrays(seqarrays, fill_value=-1, flatten=False,
# Do we have a single ndarray as input ?
if isinstance(seqarrays, (ndarray, np.void)):
seqdtype = seqarrays.dtype
- if (not flatten) or \
- (zip_descr((seqarrays,), flatten=True) == seqdtype.descr):
+ # Make sure we have named fields
+ if not seqdtype.names:
+ seqdtype = np.dtype([('', seqdtype)])
+ if not flatten or zip_dtype((seqarrays,), flatten=True) == seqdtype:
# Minimal processing needed: just make sure everythng's a-ok
seqarrays = seqarrays.ravel()
- # Make sure we have named fields
- if not seqdtype.names:
- seqdtype = [('', seqdtype)]
# Find what type of array we must return
if usemask:
if asrecarray:
@@ -403,7 +437,7 @@ def merge_arrays(seqarrays, fill_value=-1, flatten=False,
sizes = tuple(a.size for a in seqarrays)
maxlength = max(sizes)
# Get the dtype of the output (flattening if needed)
- newdtype = zip_descr(seqarrays, flatten=flatten)
+ newdtype = zip_dtype(seqarrays, flatten=flatten)
# Initialize the sequences for data and mask
seqdata = []
seqmask = []
@@ -655,8 +689,9 @@ def append_fields(base, names, data, dtypes=None,
else:
data = data.pop()
#
- output = ma.masked_all(max(len(base), len(data)),
- dtype=base.dtype.descr + data.dtype.descr)
+ output = ma.masked_all(
+ max(len(base), len(data)),
+ dtype=get_fieldspec(base.dtype) + get_fieldspec(data.dtype))
output = recursive_fill_fields(base, output)
output = recursive_fill_fields(data, output)
#
@@ -746,25 +781,21 @@ def stack_arrays(arrays, defaults=None, usemask=True, asrecarray=False,
fldnames = [d.names for d in ndtype]
#
dtype_l = ndtype[0]
- newdescr = dtype_l.descr
- names = [_[0] for _ in newdescr]
+ newdescr = get_fieldspec(dtype_l)
+ names = [n for n, d in newdescr]
for dtype_n in ndtype[1:]:
- for descr in dtype_n.descr:
- name = descr[0] or ''
- if name not in names:
- newdescr.append(descr)
- names.append(name)
+ for fname, fdtype in get_fieldspec(dtype_n):
+ if fname not in names:
+ newdescr.append((fname, fdtype))
+ names.append(fname)
else:
- nameidx = names.index(name)
- current_descr = newdescr[nameidx]
+ nameidx = names.index(fname)
+ _, cdtype = newdescr[nameidx]
if autoconvert:
- if np.dtype(descr[1]) > np.dtype(current_descr[-1]):
- current_descr = list(current_descr)
- current_descr[-1] = descr[1]
- newdescr[nameidx] = tuple(current_descr)
- elif descr[1] != current_descr[-1]:
+ newdescr[nameidx] = (fname, max(fdtype, cdtype))
+ elif fdtype != cdtype:
raise TypeError("Incompatible type '%s' <> '%s'" %
- (dict(newdescr)[name], descr[1]))
+ (cdtype, fdtype))
# Only one field: use concatenate
if len(newdescr) == 1:
output = ma.concatenate(seqarrays)
@@ -920,10 +951,10 @@ def join_by(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2',
(r1names, r2names) = (r1.dtype.names, r2.dtype.names)
# Check the names for collision
- if (set.intersection(set(r1names), set(r2names)).difference(key) and
- not (r1postfix or r2postfix)):
+ collisions = (set(r1names) & set(r2names)) - set(key)
+ if collisions and not (r1postfix or r2postfix):
msg = "r1 and r2 contain common names, r1postfix and r2postfix "
- msg += "can't be empty"
+ msg += "can't both be empty"
raise ValueError(msg)
# Make temporary arrays of just the keys
@@ -960,32 +991,38 @@ def join_by(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2',
#
# Build the new description of the output array .......
# Start with the key fields
- ndtype = [list(_) for _ in r1k.dtype.descr]
- # Add the other fields
- ndtype.extend(list(_) for _ in r1.dtype.descr if _[0] not in key)
- # Find the new list of names (it may be different from r1names)
- names = list(_[0] for _ in ndtype)
- for desc in r2.dtype.descr:
- desc = list(desc)
- name = desc[0]
+ ndtype = get_fieldspec(r1k.dtype)
+
+ # Add the fields from r1
+ for fname, fdtype in get_fieldspec(r1.dtype):
+ if fname not in key:
+ ndtype.append((fname, fdtype))
+
+ # Add the fields from r2
+ for fname, fdtype in get_fieldspec(r2.dtype):
# Have we seen the current name already ?
- if name in names:
- nameidx = ndtype.index(desc)
- current = ndtype[nameidx]
- # The current field is part of the key: take the largest dtype
- if name in key:
- current[-1] = max(desc[1], current[-1])
- # The current field is not part of the key: add the suffixes
- else:
- current[0] += r1postfix
- desc[0] += r2postfix
- ndtype.insert(nameidx + 1, desc)
- #... we haven't: just add the description to the current list
+ # we need to rebuild this list every time
+ names = list(name for name, dtype in ndtype)
+ try:
+ nameidx = names.index(fname)
+ except ValueError:
+ #... we haven't: just add the description to the current list
+ ndtype.append((fname, fdtype))
else:
- names.extend(desc[0])
- ndtype.append(desc)
- # Revert the elements to tuples
- ndtype = [tuple(_) for _ in ndtype]
+ # collision
+ _, cdtype = ndtype[nameidx]
+ if fname in key:
+ # The current field is part of the key: take the largest dtype
+ ndtype[nameidx] = (fname, max(fdtype, cdtype))
+ else:
+ # The current field is not part of the key: add the suffixes,
+ # and place the new field adjacent to the old one
+ ndtype[nameidx:nameidx + 1] = [
+ (fname + r1postfix, cdtype),
+ (fname + r2postfix, fdtype)
+ ]
+ # Rebuild a dtype from the new fields
+ ndtype = np.dtype(ndtype)
# Find the largest nb of common fields :
# r1cmn and r2cmn should be equal, but...
cmn = max(r1cmn, r2cmn)
diff --git a/numpy/lib/shape_base.py b/numpy/lib/shape_base.py
index 830943e72..53578e0e4 100644
--- a/numpy/lib/shape_base.py
+++ b/numpy/lib/shape_base.py
@@ -85,11 +85,9 @@ def apply_along_axis(func1d, axis, arr, *args, **kwargs):
array([[[1, 0, 0],
[0, 2, 0],
[0, 0, 3]],
-
[[4, 0, 0],
[0, 5, 0],
[0, 0, 6]],
-
[[7, 0, 0],
[0, 8, 0],
[0, 0, 9]]])
@@ -240,14 +238,20 @@ def expand_dims(a, axis):
"""
Expand the shape of an array.
- Insert a new axis, corresponding to a given position in the array shape.
+ Insert a new axis that will appear at the `axis` position in the expanded
+ array shape.
+
+ .. note:: Previous to NumPy 1.13.0, neither ``axis < -a.ndim - 1`` nor
+ ``axis > a.ndim`` raised errors or put the new axis where documented.
+ Those axis values are now deprecated and will raise an AxisError in the
+ future.
Parameters
----------
a : array_like
Input array.
axis : int
- Position (amongst axes) where new axis is to be inserted.
+ Position in the expanded axes where the new axis is placed.
Returns
-------
@@ -291,7 +295,16 @@ def expand_dims(a, axis):
"""
a = asarray(a)
shape = a.shape
- axis = normalize_axis_index(axis, a.ndim + 1)
+ if axis > a.ndim or axis < -a.ndim - 1:
+ # 2017-05-17, 1.13.0
+ warnings.warn("Both axis > a.ndim and axis < -a.ndim - 1 are "
+ "deprecated and will raise an AxisError in the future.",
+ DeprecationWarning, stacklevel=2)
+ # When the deprecation period expires, delete this if block,
+ if axis < 0:
+ axis = axis + a.ndim + 1
+ # and uncomment the following line.
+ # axis = normalize_axis_index(axis, a.ndim + 1)
return a.reshape(shape[:axis] + (1,) + shape[axis:])
row_stack = vstack
@@ -317,7 +330,7 @@ def column_stack(tup):
See Also
--------
- hstack, vstack, concatenate
+ stack, hstack, vstack, concatenate
Examples
--------
diff --git a/numpy/lib/stride_tricks.py b/numpy/lib/stride_tricks.py
index 545623c38..6c240db7f 100644
--- a/numpy/lib/stride_tricks.py
+++ b/numpy/lib/stride_tricks.py
@@ -100,10 +100,9 @@ def as_strided(x, shape=None, strides=None, subok=False, writeable=True):
interface['strides'] = tuple(strides)
array = np.asarray(DummyArray(interface, base=x))
-
- if array.dtype.fields is None and x.dtype.fields is not None:
- # This should only happen if x.dtype is [('', 'Vx')]
- array.dtype = x.dtype
+ # The route via `__interface__` does not preserve structured
+ # dtypes. Since dtype should remain unchanged, we set it explicitly.
+ array.dtype = x.dtype
view = _maybe_view_as_subclass(x, array)
diff --git a/numpy/lib/tests/__init__.py b/numpy/lib/tests/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/numpy/lib/tests/__init__.py
diff --git a/numpy/lib/tests/test__datasource.py b/numpy/lib/tests/test__datasource.py
index f2ad0344a..a9cb157f3 100644
--- a/numpy/lib/tests/test__datasource.py
+++ b/numpy/lib/tests/test__datasource.py
@@ -6,7 +6,7 @@ from tempfile import mkdtemp, mkstemp, NamedTemporaryFile
from shutil import rmtree
from numpy.testing import (
- run_module_suite, TestCase, assert_, SkipTest
+ run_module_suite, assert_, assert_equal, assert_raises, SkipTest,
)
import numpy.lib._datasource as datasource
@@ -55,7 +55,7 @@ malicious_files = ['/etc/shadow', '../../shadow',
magic_line = b'three is the magic number'
-# Utility functions used by many TestCases
+# Utility functions used by many tests
def valid_textfile(filedir):
# Generate and return a valid temporary file.
fd, path = mkstemp(suffix='.txt', prefix='dstmp_', dir=filedir, text=True)
@@ -95,12 +95,12 @@ def invalid_httpfile():
return http_fakefile
-class TestDataSourceOpen(TestCase):
- def setUp(self):
+class TestDataSourceOpen(object):
+ def setup(self):
self.tmpdir = mkdtemp()
self.ds = datasource.DataSource(self.tmpdir)
- def tearDown(self):
+ def teardown(self):
rmtree(self.tmpdir)
del self.ds
@@ -111,7 +111,7 @@ class TestDataSourceOpen(TestCase):
def test_InvalidHTTP(self):
url = invalid_httpurl()
- self.assertRaises(IOError, self.ds.open, url)
+ assert_raises(IOError, self.ds.open, url)
try:
self.ds.open(url)
except IOError as e:
@@ -119,7 +119,7 @@ class TestDataSourceOpen(TestCase):
assert_(e.errno is None)
def test_InvalidHTTPCacheURLError(self):
- self.assertRaises(URLError, self.ds._cache, invalid_httpurl())
+ assert_raises(URLError, self.ds._cache, invalid_httpurl())
def test_ValidFile(self):
local_file = valid_textfile(self.tmpdir)
@@ -129,7 +129,7 @@ class TestDataSourceOpen(TestCase):
def test_InvalidFile(self):
invalid_file = invalid_textfile(self.tmpdir)
- self.assertRaises(IOError, self.ds.open, invalid_file)
+ assert_raises(IOError, self.ds.open, invalid_file)
def test_ValidGzipFile(self):
try:
@@ -145,7 +145,7 @@ class TestDataSourceOpen(TestCase):
fp = self.ds.open(filepath)
result = fp.readline()
fp.close()
- self.assertEqual(magic_line, result)
+ assert_equal(magic_line, result)
def test_ValidBz2File(self):
try:
@@ -161,15 +161,15 @@ class TestDataSourceOpen(TestCase):
fp = self.ds.open(filepath)
result = fp.readline()
fp.close()
- self.assertEqual(magic_line, result)
+ assert_equal(magic_line, result)
-class TestDataSourceExists(TestCase):
- def setUp(self):
+class TestDataSourceExists(object):
+ def setup(self):
self.tmpdir = mkdtemp()
self.ds = datasource.DataSource(self.tmpdir)
- def tearDown(self):
+ def teardown(self):
rmtree(self.tmpdir)
del self.ds
@@ -177,7 +177,7 @@ class TestDataSourceExists(TestCase):
assert_(self.ds.exists(valid_httpurl()))
def test_InvalidHTTP(self):
- self.assertEqual(self.ds.exists(invalid_httpurl()), False)
+ assert_equal(self.ds.exists(invalid_httpurl()), False)
def test_ValidFile(self):
# Test valid file in destpath
@@ -191,15 +191,15 @@ class TestDataSourceExists(TestCase):
def test_InvalidFile(self):
tmpfile = invalid_textfile(self.tmpdir)
- self.assertEqual(self.ds.exists(tmpfile), False)
+ assert_equal(self.ds.exists(tmpfile), False)
-class TestDataSourceAbspath(TestCase):
- def setUp(self):
+class TestDataSourceAbspath(object):
+ def setup(self):
self.tmpdir = os.path.abspath(mkdtemp())
self.ds = datasource.DataSource(self.tmpdir)
- def tearDown(self):
+ def teardown(self):
rmtree(self.tmpdir)
del self.ds
@@ -207,30 +207,30 @@ class TestDataSourceAbspath(TestCase):
scheme, netloc, upath, pms, qry, frg = urlparse(valid_httpurl())
local_path = os.path.join(self.tmpdir, netloc,
upath.strip(os.sep).strip('/'))
- self.assertEqual(local_path, self.ds.abspath(valid_httpurl()))
+ assert_equal(local_path, self.ds.abspath(valid_httpurl()))
def test_ValidFile(self):
tmpfile = valid_textfile(self.tmpdir)
tmpfilename = os.path.split(tmpfile)[-1]
# Test with filename only
- self.assertEqual(tmpfile, self.ds.abspath(tmpfilename))
+ assert_equal(tmpfile, self.ds.abspath(tmpfilename))
# Test filename with complete path
- self.assertEqual(tmpfile, self.ds.abspath(tmpfile))
+ assert_equal(tmpfile, self.ds.abspath(tmpfile))
def test_InvalidHTTP(self):
scheme, netloc, upath, pms, qry, frg = urlparse(invalid_httpurl())
invalidhttp = os.path.join(self.tmpdir, netloc,
upath.strip(os.sep).strip('/'))
- self.assertNotEqual(invalidhttp, self.ds.abspath(valid_httpurl()))
+ assert_(invalidhttp != self.ds.abspath(valid_httpurl()))
def test_InvalidFile(self):
invalidfile = valid_textfile(self.tmpdir)
tmpfile = valid_textfile(self.tmpdir)
tmpfilename = os.path.split(tmpfile)[-1]
# Test with filename only
- self.assertNotEqual(invalidfile, self.ds.abspath(tmpfilename))
+ assert_(invalidfile != self.ds.abspath(tmpfilename))
# Test filename with complete path
- self.assertNotEqual(invalidfile, self.ds.abspath(tmpfile))
+ assert_(invalidfile != self.ds.abspath(tmpfile))
def test_sandboxing(self):
tmpfile = valid_textfile(self.tmpdir)
@@ -259,12 +259,12 @@ class TestDataSourceAbspath(TestCase):
os.sep = orig_os_sep
-class TestRepositoryAbspath(TestCase):
- def setUp(self):
+class TestRepositoryAbspath(object):
+ def setup(self):
self.tmpdir = os.path.abspath(mkdtemp())
self.repos = datasource.Repository(valid_baseurl(), self.tmpdir)
- def tearDown(self):
+ def teardown(self):
rmtree(self.tmpdir)
del self.repos
@@ -273,7 +273,7 @@ class TestRepositoryAbspath(TestCase):
local_path = os.path.join(self.repos._destpath, netloc,
upath.strip(os.sep).strip('/'))
filepath = self.repos.abspath(valid_httpfile())
- self.assertEqual(local_path, filepath)
+ assert_equal(local_path, filepath)
def test_sandboxing(self):
tmp_path = lambda x: os.path.abspath(self.repos.abspath(x))
@@ -292,12 +292,12 @@ class TestRepositoryAbspath(TestCase):
os.sep = orig_os_sep
-class TestRepositoryExists(TestCase):
- def setUp(self):
+class TestRepositoryExists(object):
+ def setup(self):
self.tmpdir = mkdtemp()
self.repos = datasource.Repository(valid_baseurl(), self.tmpdir)
- def tearDown(self):
+ def teardown(self):
rmtree(self.tmpdir)
del self.repos
@@ -308,7 +308,7 @@ class TestRepositoryExists(TestCase):
def test_InvalidFile(self):
tmpfile = invalid_textfile(self.tmpdir)
- self.assertEqual(self.repos.exists(tmpfile), False)
+ assert_equal(self.repos.exists(tmpfile), False)
def test_RemoveHTTPFile(self):
assert_(self.repos.exists(valid_httpurl()))
@@ -325,11 +325,11 @@ class TestRepositoryExists(TestCase):
assert_(self.repos.exists(tmpfile))
-class TestOpenFunc(TestCase):
- def setUp(self):
+class TestOpenFunc(object):
+ def setup(self):
self.tmpdir = mkdtemp()
- def tearDown(self):
+ def teardown(self):
rmtree(self.tmpdir)
def test_DataSourceOpen(self):
diff --git a/numpy/lib/tests/test__iotools.py b/numpy/lib/tests/test__iotools.py
index 6c0b2c6db..03192896c 100644
--- a/numpy/lib/tests/test__iotools.py
+++ b/numpy/lib/tests/test__iotools.py
@@ -6,8 +6,7 @@ from datetime import date
import numpy as np
from numpy.testing import (
- run_module_suite, TestCase, assert_, assert_equal, assert_allclose,
- assert_raises
+ run_module_suite, assert_, assert_equal, assert_allclose, assert_raises,
)
from numpy.lib._iotools import (
LineSplitter, NameValidator, StringConverter,
@@ -15,7 +14,7 @@ from numpy.lib._iotools import (
)
-class TestLineSplitter(TestCase):
+class TestLineSplitter(object):
"Tests the LineSplitter class."
def test_no_delimiter(self):
@@ -79,7 +78,7 @@ class TestLineSplitter(TestCase):
# -----------------------------------------------------------------------------
-class TestNameValidator(TestCase):
+class TestNameValidator(object):
def test_case_sensitivity(self):
"Test case sensitivity"
@@ -140,7 +139,7 @@ def _bytes_to_date(s):
return date(*time.strptime(s, "%Y-%m-%d")[:3])
-class TestStringConverter(TestCase):
+class TestStringConverter(object):
"Test StringConverter"
def test_creation(self):
@@ -254,11 +253,11 @@ class TestStringConverter(TestCase):
assert_(converter(val) == 9223372043271415339)
-class TestMiscFunctions(TestCase):
+class TestMiscFunctions(object):
def test_has_nested_dtype(self):
"Test has_nested_dtype"
- ndtype = np.dtype(np.float)
+ ndtype = np.dtype(float)
assert_equal(has_nested_fields(ndtype), False)
ndtype = np.dtype([('A', '|S3'), ('B', float)])
assert_equal(has_nested_fields(ndtype), False)
diff --git a/numpy/lib/tests/test_arraypad.py b/numpy/lib/tests/test_arraypad.py
index 056aa4582..fce4c451d 100644
--- a/numpy/lib/tests/test_arraypad.py
+++ b/numpy/lib/tests/test_arraypad.py
@@ -4,12 +4,11 @@
from __future__ import division, absolute_import, print_function
import numpy as np
-from numpy.testing import (assert_array_equal, assert_raises, assert_allclose,
- TestCase)
+from numpy.testing import (assert_array_equal, assert_raises, assert_allclose,)
from numpy.lib import pad
-class TestConditionalShortcuts(TestCase):
+class TestConditionalShortcuts(object):
def test_zero_padding_shortcuts(self):
test = np.arange(120).reshape(4, 5, 6)
pad_amt = [(0, 0) for axis in test.shape]
@@ -52,7 +51,7 @@ class TestConditionalShortcuts(TestCase):
pad(test, pad_amt, mode=mode, stat_length=30))
-class TestStatistic(TestCase):
+class TestStatistic(object):
def test_check_mean_stat_length(self):
a = np.arange(100).astype('f')
a = pad(a, ((25, 20), ), 'mean', stat_length=((2, 3), ))
@@ -346,7 +345,7 @@ class TestStatistic(TestCase):
assert_array_equal(a, b)
-class TestConstant(TestCase):
+class TestConstant(object):
def test_check_constant(self):
a = np.arange(100)
a = pad(a, (25, 20), 'constant', constant_values=(10, 20))
@@ -491,7 +490,7 @@ class TestConstant(TestCase):
assert_allclose(test, expected)
-class TestLinearRamp(TestCase):
+class TestLinearRamp(object):
def test_check_simple(self):
a = np.arange(100).astype('f')
a = pad(a, (25, 20), 'linear_ramp', end_values=(4, 5))
@@ -531,7 +530,7 @@ class TestLinearRamp(TestCase):
assert_allclose(test, expected)
-class TestReflect(TestCase):
+class TestReflect(object):
def test_check_simple(self):
a = np.arange(100)
a = pad(a, (25, 20), 'reflect')
@@ -640,8 +639,13 @@ class TestReflect(TestCase):
b = np.array([1, 2, 3, 2, 1, 2, 3, 2, 1, 2, 3])
assert_array_equal(a, b)
+ def test_check_padding_an_empty_array(self):
+ a = pad(np.zeros((0, 3)), ((0,), (1,)), mode='reflect')
+ b = np.zeros((0, 5))
+ assert_array_equal(a, b)
+
-class TestSymmetric(TestCase):
+class TestSymmetric(object):
def test_check_simple(self):
a = np.arange(100)
a = pad(a, (25, 20), 'symmetric')
@@ -775,7 +779,7 @@ class TestSymmetric(TestCase):
assert_array_equal(a, b)
-class TestWrap(TestCase):
+class TestWrap(object):
def test_check_simple(self):
a = np.arange(100)
a = pad(a, (25, 20), 'wrap')
@@ -871,7 +875,7 @@ class TestWrap(TestCase):
assert_array_equal(a, b)
-class TestStatLen(TestCase):
+class TestStatLen(object):
def test_check_simple(self):
a = np.arange(30)
a = np.reshape(a, (6, 5))
@@ -894,7 +898,7 @@ class TestStatLen(TestCase):
assert_array_equal(a, b)
-class TestEdge(TestCase):
+class TestEdge(object):
def test_check_simple(self):
a = np.arange(12)
a = np.reshape(a, (4, 3))
@@ -933,7 +937,7 @@ class TestEdge(TestCase):
assert_array_equal(padded, expected)
-class TestZeroPadWidth(TestCase):
+class TestZeroPadWidth(object):
def test_zero_pad_width(self):
arr = np.arange(30)
arr = np.reshape(arr, (6, 5))
@@ -941,7 +945,7 @@ class TestZeroPadWidth(TestCase):
assert_array_equal(arr, pad(arr, pad_width, mode='constant'))
-class TestLegacyVectorFunction(TestCase):
+class TestLegacyVectorFunction(object):
def test_legacy_vector_functionality(self):
def _padwithtens(vector, pad_width, iaxis, kwargs):
vector[:pad_width[0]] = 10
@@ -963,7 +967,7 @@ class TestLegacyVectorFunction(TestCase):
assert_array_equal(a, b)
-class TestNdarrayPadWidth(TestCase):
+class TestNdarrayPadWidth(object):
def test_check_simple(self):
a = np.arange(12)
a = np.reshape(a, (4, 3))
@@ -984,7 +988,7 @@ class TestNdarrayPadWidth(TestCase):
assert_array_equal(a, b)
-class TestUnicodeInput(TestCase):
+class TestUnicodeInput(object):
def test_unicode_mode(self):
constant_mode = u'constant'
a = np.pad([1], 2, mode=constant_mode)
@@ -992,7 +996,7 @@ class TestUnicodeInput(TestCase):
assert_array_equal(a, b)
-class ValueError1(TestCase):
+class TestValueError1(object):
def test_check_simple(self):
arr = np.arange(30)
arr = np.reshape(arr, (6, 5))
@@ -1014,8 +1018,14 @@ class ValueError1(TestCase):
assert_raises(ValueError, pad, arr, ((-2, 3), (3, 2)),
**kwargs)
+ def test_check_empty_array(self):
+ assert_raises(ValueError, pad, [], 4, mode='reflect')
+ assert_raises(ValueError, pad, np.ndarray(0), 4, mode='reflect')
+ assert_raises(ValueError, pad, np.zeros((0, 3)), ((1,), (0,)),
+ mode='reflect')
+
-class ValueError2(TestCase):
+class TestValueError2(object):
def test_check_negative_pad_amount(self):
arr = np.arange(30)
arr = np.reshape(arr, (6, 5))
@@ -1024,7 +1034,7 @@ class ValueError2(TestCase):
**kwargs)
-class ValueError3(TestCase):
+class TestValueError3(object):
def test_check_kwarg_not_allowed(self):
arr = np.arange(30).reshape(5, 6)
assert_raises(ValueError, pad, arr, 4, mode='mean',
@@ -1052,7 +1062,7 @@ class ValueError3(TestCase):
mode='constant')
-class TypeError1(TestCase):
+class TestTypeError1(object):
def test_float(self):
arr = np.arange(30)
assert_raises(TypeError, pad, arr, ((-2.1, 3), (3, 2)))
diff --git a/numpy/lib/tests/test_arraysetops.py b/numpy/lib/tests/test_arraysetops.py
index fa664ff24..b8ced41e8 100644
--- a/numpy/lib/tests/test_arraysetops.py
+++ b/numpy/lib/tests/test_arraysetops.py
@@ -5,14 +5,14 @@ from __future__ import division, absolute_import, print_function
import numpy as np
from numpy.testing import (
- run_module_suite, TestCase, assert_array_equal, assert_equal, assert_raises
+ run_module_suite, assert_array_equal, assert_equal, assert_raises,
)
from numpy.lib.arraysetops import (
ediff1d, intersect1d, setxor1d, union1d, setdiff1d, unique, in1d, isin
)
-class TestSetOps(TestCase):
+class TestSetOps(object):
def test_intersect1d(self):
# unique inputs
@@ -89,28 +89,28 @@ class TestSetOps(TestCase):
x = isin(a, b)
y = isin_slow(a, b)
assert_array_equal(x, y)
-
+
#multidimensional arrays in both arguments
a = np.arange(24).reshape([2, 3, 4])
b = np.array([[10, 20, 30], [0, 1, 3], [11, 22, 33]])
assert_isin_equal(a, b)
-
+
#array-likes as both arguments
c = [(9, 8), (7, 6)]
d = (9, 7)
assert_isin_equal(c, d)
-
+
#zero-d array:
f = np.array(3)
assert_isin_equal(f, b)
assert_isin_equal(a, f)
assert_isin_equal(f, f)
-
+
#scalar:
assert_isin_equal(5, b)
assert_isin_equal(a, 6)
assert_isin_equal(5, 6)
-
+
#empty array-like:
x = []
assert_isin_equal(x, b)
@@ -252,7 +252,7 @@ class TestSetOps(TestCase):
assert_array_equal(c1, c2)
-class TestUnique(TestCase):
+class TestUnique(object):
def test_unique_1d(self):
@@ -355,6 +355,16 @@ class TestUnique(TestCase):
a2, a2_inv = np.unique(a, return_inverse=True)
assert_array_equal(a2_inv, np.zeros(5))
+ # test for ticket #9137
+ a = []
+ a1_idx = np.unique(a, return_index=True)[1]
+ a2_inv = np.unique(a, return_inverse=True)[1]
+ a3_idx, a3_inv = np.unique(a, return_index=True, return_inverse=True)[1:]
+ assert_equal(a1_idx.dtype, np.intp)
+ assert_equal(a2_inv.dtype, np.intp)
+ assert_equal(a3_idx.dtype, np.intp)
+ assert_equal(a3_inv.dtype, np.intp)
+
def test_unique_axis_errors(self):
assert_raises(TypeError, self._run_axis_tests, object)
assert_raises(TypeError, self._run_axis_tests,
diff --git a/numpy/lib/tests/test_financial.py b/numpy/lib/tests/test_financial.py
index cc8ba55e5..4db364ad5 100644
--- a/numpy/lib/tests/test_financial.py
+++ b/numpy/lib/tests/test_financial.py
@@ -2,12 +2,12 @@ from __future__ import division, absolute_import, print_function
import numpy as np
from numpy.testing import (
- run_module_suite, TestCase, assert_, assert_almost_equal,
- assert_allclose, assert_equal
+ run_module_suite, assert_, assert_almost_equal, assert_allclose,
+ assert_equal
)
-class TestFinancial(TestCase):
+class TestFinancial(object):
def test_rate(self):
assert_almost_equal(np.rate(10, 0, -3500, 10000),
0.1107, 4)
diff --git a/numpy/lib/tests/test_format.py b/numpy/lib/tests/test_format.py
index 93727ef0c..2d2b4cea2 100644
--- a/numpy/lib/tests/test_format.py
+++ b/numpy/lib/tests/test_format.py
@@ -615,6 +615,11 @@ def test_version_2_0():
format.write_array(f, d)
assert_(w[0].category is UserWarning)
+ # check alignment of data portion
+ f.seek(0)
+ header = f.readline()
+ assert_(len(header) % format.ARRAY_ALIGN == 0)
+
f.seek(0)
n = format.read_array(f)
assert_array_equal(d, n)
@@ -758,6 +763,7 @@ def test_read_array_header_1_0():
s.seek(format.MAGIC_LEN)
shape, fortran, dtype = format.read_array_header_1_0(s)
+ assert_(s.tell() % format.ARRAY_ALIGN == 0)
assert_((shape, fortran, dtype) == ((3, 6), False, float))
@@ -770,6 +776,7 @@ def test_read_array_header_2_0():
s.seek(format.MAGIC_LEN)
shape, fortran, dtype = format.read_array_header_2_0(s)
+ assert_(s.tell() % format.ARRAY_ALIGN == 0)
assert_((shape, fortran, dtype) == ((3, 6), False, float))
@@ -811,7 +818,7 @@ def test_large_file_support():
# avoid actually writing 5GB
import subprocess as sp
sp.check_call(["truncate", "-s", "5368709120", tf_name])
- except:
+ except Exception:
raise SkipTest("Could not create 5GB large file")
# write a small array to the end
with open(tf_name, "wb") as f:
diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py
index d7d00758e..c64081088 100644
--- a/numpy/lib/tests/test_function_base.py
+++ b/numpy/lib/tests/test_function_base.py
@@ -6,13 +6,13 @@ import sys
import decimal
import numpy as np
+from numpy import ma
from numpy.testing import (
- run_module_suite, TestCase, assert_, assert_equal, assert_array_equal,
+ run_module_suite, assert_, assert_equal, assert_array_equal,
assert_almost_equal, assert_array_almost_equal, assert_raises,
- assert_allclose, assert_array_max_ulp, assert_warns,
- assert_raises_regex, dec, suppress_warnings
+ assert_allclose, assert_array_max_ulp, assert_warns, assert_raises_regex,
+ dec, suppress_warnings, HAS_REFCOUNT,
)
-from numpy.testing.utils import HAS_REFCOUNT
import numpy.lib.function_base as nfb
from numpy.random import rand
from numpy.lib import (
@@ -32,9 +32,9 @@ def get_mat(n):
return data
-class TestRot90(TestCase):
+class TestRot90(object):
def test_basic(self):
- self.assertRaises(ValueError, rot90, np.ones(4))
+ assert_raises(ValueError, rot90, np.ones(4))
assert_raises(ValueError, rot90, np.ones((2,2,2)), axes=(0,1,2))
assert_raises(ValueError, rot90, np.ones((2,2)), axes=(0,2))
assert_raises(ValueError, rot90, np.ones((2,2)), axes=(1,1))
@@ -100,12 +100,12 @@ class TestRot90(TestCase):
rot90(a_rot90_20, k=k-1, axes=(2, 0)))
-class TestFlip(TestCase):
+class TestFlip(object):
def test_axes(self):
- self.assertRaises(ValueError, np.flip, np.ones(4), axis=1)
- self.assertRaises(ValueError, np.flip, np.ones((4, 4)), axis=2)
- self.assertRaises(ValueError, np.flip, np.ones((4, 4)), axis=-3)
+ assert_raises(ValueError, np.flip, np.ones(4), axis=1)
+ assert_raises(ValueError, np.flip, np.ones((4, 4)), axis=2)
+ assert_raises(ValueError, np.flip, np.ones((4, 4)), axis=-3)
def test_basic_lr(self):
a = get_mat(4)
@@ -173,7 +173,7 @@ class TestFlip(TestCase):
np.flipud(a.swapaxes(0, i)).swapaxes(i, 0))
-class TestAny(TestCase):
+class TestAny(object):
def test_basic(self):
y1 = [0, 0, 1, 0]
@@ -190,7 +190,7 @@ class TestAny(TestCase):
assert_array_equal(np.sometrue(y1, axis=1), [0, 1, 1])
-class TestAll(TestCase):
+class TestAll(object):
def test_basic(self):
y1 = [0, 1, 1, 0]
@@ -208,7 +208,7 @@ class TestAll(TestCase):
assert_array_equal(np.alltrue(y1, axis=1), [0, 0, 1])
-class TestCopy(TestCase):
+class TestCopy(object):
def test_basic(self):
a = np.array([[1, 2], [3, 4]])
@@ -236,7 +236,7 @@ class TestCopy(TestCase):
assert_(a_fort_copy.flags.f_contiguous)
-class TestAverage(TestCase):
+class TestAverage(object):
def test_basic(self):
y1 = np.array([1, 2, 3])
@@ -346,9 +346,9 @@ class TestAverage(TestCase):
a = np.array([decimal.Decimal(x) for x in range(10)])
w = np.array([decimal.Decimal(1) for _ in range(10)])
w /= w.sum()
- assert_almost_equal(a.mean(0), average(a, weights=w))
+ assert_almost_equal(a.mean(0), average(a, weights=w))
-class TestSelect(TestCase):
+class TestSelect(object):
choices = [np.array([1, 2, 3]),
np.array([4, 5, 6]),
np.array([7, 8, 9])]
@@ -420,7 +420,7 @@ class TestSelect(TestCase):
select(conditions, choices)
-class TestInsert(TestCase):
+class TestInsert(object):
def test_basic(self):
a = [1, 2, 3]
@@ -521,7 +521,7 @@ class TestInsert(TestCase):
assert_array_equal(b[[0, 3]], np.array(val, dtype=b.dtype))
-class TestAmax(TestCase):
+class TestAmax(object):
def test_basic(self):
a = [3, 4, 5, 10, -3, -5, 6.0]
@@ -533,7 +533,7 @@ class TestAmax(TestCase):
assert_equal(np.amax(b, axis=1), [9.0, 10.0, 8.0])
-class TestAmin(TestCase):
+class TestAmin(object):
def test_basic(self):
a = [3, 4, 5, 10, -3, -5, 6.0]
@@ -545,7 +545,7 @@ class TestAmin(TestCase):
assert_equal(np.amin(b, axis=1), [3.0, 4.0, 2.0])
-class TestPtp(TestCase):
+class TestPtp(object):
def test_basic(self):
a = np.array([3, 4, 5, 10, -3, -5, 6.0])
@@ -557,7 +557,7 @@ class TestPtp(TestCase):
assert_equal(b.ptp(axis=-1), [6.0, 6.0, 6.0])
-class TestCumsum(TestCase):
+class TestCumsum(object):
def test_basic(self):
ba = [1, 2, 10, 11, 6, 5, 4]
@@ -580,7 +580,7 @@ class TestCumsum(TestCase):
assert_array_equal(np.cumsum(a2, axis=1), tgt)
-class TestProd(TestCase):
+class TestProd(object):
def test_basic(self):
ba = [1, 2, 10, 11, 6, 5, 4]
@@ -590,8 +590,8 @@ class TestProd(TestCase):
a = np.array(ba, ctype)
a2 = np.array(ba2, ctype)
if ctype in ['1', 'b']:
- self.assertRaises(ArithmeticError, np.prod, a)
- self.assertRaises(ArithmeticError, np.prod, a2, 1)
+ assert_raises(ArithmeticError, np.prod, a)
+ assert_raises(ArithmeticError, np.prod, a2, 1)
else:
assert_equal(a.prod(axis=0), 26400)
assert_array_equal(a2.prod(axis=0),
@@ -600,7 +600,7 @@ class TestProd(TestCase):
np.array([24, 1890, 600], ctype))
-class TestCumprod(TestCase):
+class TestCumprod(object):
def test_basic(self):
ba = [1, 2, 10, 11, 6, 5, 4]
@@ -610,9 +610,9 @@ class TestCumprod(TestCase):
a = np.array(ba, ctype)
a2 = np.array(ba2, ctype)
if ctype in ['1', 'b']:
- self.assertRaises(ArithmeticError, np.cumprod, a)
- self.assertRaises(ArithmeticError, np.cumprod, a2, 1)
- self.assertRaises(ArithmeticError, np.cumprod, a)
+ assert_raises(ArithmeticError, np.cumprod, a)
+ assert_raises(ArithmeticError, np.cumprod, a2, 1)
+ assert_raises(ArithmeticError, np.cumprod, a)
else:
assert_array_equal(np.cumprod(a, axis=-1),
np.array([1, 2, 20, 220,
@@ -627,7 +627,7 @@ class TestCumprod(TestCase):
[10, 30, 120, 600]], ctype))
-class TestDiff(TestCase):
+class TestDiff(object):
def test_basic(self):
x = [1, 4, 6, 7, 12]
@@ -638,6 +638,29 @@ class TestDiff(TestCase):
assert_array_equal(diff(x, n=2), out2)
assert_array_equal(diff(x, n=3), out3)
+ x = [1.1, 2.2, 3.0, -0.2, -0.1]
+ out = np.array([1.1, 0.8, -3.2, 0.1])
+ assert_almost_equal(diff(x), out)
+
+ x = [True, True, False, False]
+ out = np.array([False, True, False])
+ out2 = np.array([True, True])
+ assert_array_equal(diff(x), out)
+ assert_array_equal(diff(x, n=2), out2)
+
+ def test_axis(self):
+ x = np.zeros((10, 20, 30))
+ x[:, 1::2, :] = 1
+ exp = np.ones((10, 19, 30))
+ exp[:, 1::2, :] = -1
+ assert_array_equal(diff(x), np.zeros((10, 20, 29)))
+ assert_array_equal(diff(x, axis=-1), np.zeros((10, 20, 29)))
+ assert_array_equal(diff(x, axis=0), np.zeros((9, 20, 30)))
+ assert_array_equal(diff(x, axis=1), exp)
+ assert_array_equal(diff(x, axis=-2), exp)
+ assert_raises(np.AxisError, diff, x, axis=3)
+ assert_raises(np.AxisError, diff, x, axis=-4)
+
def test_nd(self):
x = 20 * rand(10, 20, 30)
out1 = x[:, :, 1:] - x[:, :, :-1]
@@ -649,10 +672,49 @@ class TestDiff(TestCase):
assert_array_equal(diff(x, axis=0), out3)
assert_array_equal(diff(x, n=2, axis=0), out4)
+ def test_n(self):
+ x = list(range(3))
+ assert_raises(ValueError, diff, x, n=-1)
+ output = [diff(x, n=n) for n in range(1, 5)]
+ expected = [[1, 1], [0], [], []]
+ assert_(diff(x, n=0) is x)
+ for n, (expected, out) in enumerate(zip(expected, output), start=1):
+ assert_(type(out) is np.ndarray)
+ assert_array_equal(out, expected)
+ assert_equal(out.dtype, np.int_)
+ assert_equal(len(out), max(0, len(x) - n))
+
+ def test_times(self):
+ x = np.arange('1066-10-13', '1066-10-16', dtype=np.datetime64)
+ expected = [
+ np.array([1, 1], dtype='timedelta64[D]'),
+ np.array([0], dtype='timedelta64[D]'),
+ ]
+ expected.extend([np.array([], dtype='timedelta64[D]')] * 3)
+ for n, exp in enumerate(expected, start=1):
+ out = diff(x, n=n)
+ assert_array_equal(out, exp)
+ assert_equal(out.dtype, exp.dtype)
-class TestDelete(TestCase):
+ def test_subclass(self):
+ x = ma.array([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]],
+ mask=[[False, False], [True, False],
+ [False, True], [True, True], [False, False]])
+ out = diff(x)
+ assert_array_equal(out.data, [[1], [1], [1], [1], [1]])
+ assert_array_equal(out.mask, [[False], [True],
+ [True], [True], [False]])
+ assert_(type(out) is type(x))
- def setUp(self):
+ out3 = diff(x, n=3)
+ assert_array_equal(out3.data, [[], [], [], [], []])
+ assert_array_equal(out3.mask, [[], [], [], [], []])
+ assert_(type(out3) is type(x))
+
+
+class TestDelete(object):
+
+ def setup(self):
self.a = np.arange(5)
self.nd_a = np.arange(5).repeat(2).reshape(1, 5, 2)
@@ -725,7 +787,7 @@ class TestDelete(TestCase):
assert_equal(m.flags.f_contiguous, k.flags.f_contiguous)
-class TestGradient(TestCase):
+class TestGradient(object):
def test_basic(self):
v = [[1, 1], [3, 4]]
@@ -735,7 +797,7 @@ class TestGradient(TestCase):
assert_array_equal(gradient(x), dx)
assert_array_equal(gradient(v), dx)
- def test_args(self):
+ def test_args(self):
dx = np.cumsum(np.ones(5))
dx_uneven = [1., 2., 5., 9., 11.]
f_2d = np.arange(25).reshape(5, 5)
@@ -825,15 +887,15 @@ class TestGradient(TestCase):
def test_spacing(self):
f = np.array([0, 2., 3., 4., 5., 5.])
- f = np.tile(f, (6,1)) + f.reshape(-1, 1)
+ f = np.tile(f, (6,1)) + f.reshape(-1, 1)
x_uneven = np.array([0., 0.5, 1., 3., 5., 7.])
x_even = np.arange(6.)
-
+
fdx_even_ord1 = np.tile([2., 1.5, 1., 1., 0.5, 0.], (6,1))
fdx_even_ord2 = np.tile([2.5, 1.5, 1., 1., 0.5, -0.5], (6,1))
fdx_uneven_ord1 = np.tile([4., 3., 1.7, 0.5, 0.25, 0.], (6,1))
fdx_uneven_ord2 = np.tile([5., 3., 1.7, 0.5, 0.25, -0.25], (6,1))
-
+
# evenly spaced
for edge_order, exp_res in [(1, fdx_even_ord1), (2, fdx_even_ord2)]:
res1 = gradient(f, 1., axis=(0,1), edge_order=edge_order)
@@ -843,19 +905,19 @@ class TestGradient(TestCase):
axis=None, edge_order=edge_order)
assert_array_equal(res1, res2)
assert_array_equal(res2, res3)
- assert_almost_equal(res1[0], exp_res.T)
- assert_almost_equal(res1[1], exp_res)
-
+ assert_almost_equal(res1[0], exp_res.T)
+ assert_almost_equal(res1[1], exp_res)
+
res1 = gradient(f, 1., axis=0, edge_order=edge_order)
res2 = gradient(f, x_even, axis=0, edge_order=edge_order)
assert_(res1.shape == res2.shape)
assert_almost_equal(res2, exp_res.T)
-
+
res1 = gradient(f, 1., axis=1, edge_order=edge_order)
res2 = gradient(f, x_even, axis=1, edge_order=edge_order)
assert_(res1.shape == res2.shape)
assert_array_equal(res2, exp_res)
-
+
# unevenly spaced
for edge_order, exp_res in [(1, fdx_uneven_ord1), (2, fdx_uneven_ord2)]:
res1 = gradient(f, x_uneven, x_uneven,
@@ -865,13 +927,13 @@ class TestGradient(TestCase):
assert_array_equal(res1, res2)
assert_almost_equal(res1[0], exp_res.T)
assert_almost_equal(res1[1], exp_res)
-
+
res1 = gradient(f, x_uneven, axis=0, edge_order=edge_order)
assert_almost_equal(res1, exp_res.T)
-
+
res1 = gradient(f, x_uneven, axis=1, edge_order=edge_order)
assert_almost_equal(res1, exp_res)
-
+
# mixed
res1 = gradient(f, x_even, x_uneven, axis=(0,1), edge_order=1)
res2 = gradient(f, x_uneven, x_even, axis=(1,0), edge_order=1)
@@ -879,14 +941,14 @@ class TestGradient(TestCase):
assert_array_equal(res1[1], res2[0])
assert_almost_equal(res1[0], fdx_even_ord1.T)
assert_almost_equal(res1[1], fdx_uneven_ord1)
-
+
res1 = gradient(f, x_even, x_uneven, axis=(0,1), edge_order=2)
res2 = gradient(f, x_uneven, x_even, axis=(1,0), edge_order=2)
assert_array_equal(res1[0], res2[1])
assert_array_equal(res1[1], res2[0])
assert_almost_equal(res1[0], fdx_even_ord2.T)
assert_almost_equal(res1[1], fdx_uneven_ord2)
-
+
def test_specific_axes(self):
# Testing that gradient can work on a given axis only
v = [[1, 1], [3, 4]]
@@ -912,7 +974,7 @@ class TestGradient(TestCase):
assert_raises(np.AxisError, gradient, x, axis=3)
assert_raises(np.AxisError, gradient, x, axis=-3)
# assert_raises(TypeError, gradient, x, axis=[1,])
-
+
def test_timedelta64(self):
# Make sure gradient() can handle special types like timedelta64
x = np.array(
@@ -924,20 +986,26 @@ class TestGradient(TestCase):
assert_array_equal(gradient(x), dx)
assert_(dx.dtype == np.dtype('timedelta64[D]'))
+ def test_inexact_dtypes(self):
+ for dt in [np.float16, np.float32, np.float64]:
+ # dtypes should not be promoted in a different way to what diff does
+ x = np.array([1, 2, 3], dtype=dt)
+ assert_equal(gradient(x).dtype, np.diff(x).dtype)
+
def test_values(self):
# needs at least 2 points for edge_order ==1
gradient(np.arange(2), edge_order=1)
# needs at least 3 points for edge_order ==1
gradient(np.arange(3), edge_order=2)
-
+
assert_raises(ValueError, gradient, np.arange(0), edge_order=1)
assert_raises(ValueError, gradient, np.arange(0), edge_order=2)
assert_raises(ValueError, gradient, np.arange(1), edge_order=1)
assert_raises(ValueError, gradient, np.arange(1), edge_order=2)
- assert_raises(ValueError, gradient, np.arange(2), edge_order=2)
+ assert_raises(ValueError, gradient, np.arange(2), edge_order=2)
-class TestAngle(TestCase):
+class TestAngle(object):
def test_basic(self):
x = [1 + 3j, np.sqrt(2) / 2.0 + 1j * np.sqrt(2) / 2,
@@ -953,7 +1021,7 @@ class TestAngle(TestCase):
assert_array_almost_equal(z, zo, 11)
-class TestTrimZeros(TestCase):
+class TestTrimZeros(object):
"""
Only testing for integer splits.
@@ -976,7 +1044,7 @@ class TestTrimZeros(TestCase):
assert_array_equal(res, np.array([1, 0, 2, 3, 0, 4]))
-class TestExtins(TestCase):
+class TestExtins(object):
def test_basic(self):
a = np.array([1, 3, 2, 1, 2, 3, 3])
@@ -1015,7 +1083,7 @@ class TestExtins(TestCase):
assert_array_equal(a, ac)
-class TestVectorize(TestCase):
+class TestVectorize(object):
def test_simple(self):
def addsubtract(a, b):
@@ -1074,7 +1142,7 @@ class TestVectorize(TestCase):
import random
try:
vectorize(random.randrange) # Should succeed
- except:
+ except Exception:
raise AssertionError()
def test_keywords2_ticket_2100(self):
@@ -1347,7 +1415,7 @@ class TestVectorize(TestCase):
f(x)
-class TestDigitize(TestCase):
+class TestDigitize(object):
def test_forward(self):
x = np.arange(-6, 5)
@@ -1420,7 +1488,7 @@ class TestDigitize(TestCase):
assert_(not isinstance(digitize(b, a, True), A))
-class TestUnwrap(TestCase):
+class TestUnwrap(object):
def test_simple(self):
# check that unwrap removes jumps greather that 2*pi
@@ -1429,7 +1497,7 @@ class TestUnwrap(TestCase):
assert_(np.all(diff(unwrap(rand(10) * 100)) < np.pi))
-class TestFilterwindows(TestCase):
+class TestFilterwindows(object):
def test_hanning(self):
# check symmetry
@@ -1460,7 +1528,7 @@ class TestFilterwindows(TestCase):
assert_almost_equal(np.sum(w, axis=0), 3.7800, 4)
-class TestTrapz(TestCase):
+class TestTrapz(object):
def test_simple(self):
x = np.arange(-10, 10, .1)
@@ -1532,7 +1600,7 @@ class TestTrapz(TestCase):
assert_almost_equal(mr, r)
-class TestSinc(TestCase):
+class TestSinc(object):
def test_simple(self):
assert_(sinc(0) == 1)
@@ -1549,12 +1617,12 @@ class TestSinc(TestCase):
assert_array_equal(y1, y3)
-class TestHistogram(TestCase):
+class TestHistogram(object):
- def setUp(self):
+ def setup(self):
pass
- def tearDown(self):
+ def teardown(self):
pass
def test_simple(self):
@@ -1650,16 +1718,16 @@ class TestHistogram(TestCase):
# Check the type of the returned histogram
a = np.arange(10) + .5
h, b = histogram(a)
- assert_(np.issubdtype(h.dtype, int))
+ assert_(np.issubdtype(h.dtype, np.integer))
h, b = histogram(a, normed=True)
- assert_(np.issubdtype(h.dtype, float))
+ assert_(np.issubdtype(h.dtype, np.floating))
h, b = histogram(a, weights=np.ones(10, int))
- assert_(np.issubdtype(h.dtype, int))
+ assert_(np.issubdtype(h.dtype, np.integer))
h, b = histogram(a, weights=np.ones(10, float))
- assert_(np.issubdtype(h.dtype, float))
+ assert_(np.issubdtype(h.dtype, np.floating))
def test_f32_rounding(self):
# gh-4799, check that the rounding of the edges works with float32
@@ -1760,16 +1828,16 @@ class TestHistogram(TestCase):
left_edges = edges[:-1][mask]
right_edges = edges[1:][mask]
for x, left, right in zip(arr, left_edges, right_edges):
- self.assertGreaterEqual(x, left)
- self.assertLess(x, right)
+ assert_(x >= left)
+ assert_(x < right)
def test_last_bin_inclusive_range(self):
arr = np.array([0., 0., 0., 1., 2., 3., 3., 4., 5.])
hist, edges = np.histogram(arr, bins=30, range=(-0.5, 5))
- self.assertEqual(hist[-1], 1)
+ assert_equal(hist[-1], 1)
-class TestHistogramOptimBinNums(TestCase):
+class TestHistogramOptimBinNums(object):
"""
Provide test coverage when using provided estimators for optimal number of
bins
@@ -1879,7 +1947,7 @@ class TestHistogramOptimBinNums(TestCase):
completely ignored. All test values have been precomputed and
the shouldn't change.
"""
- # some basic sanity checking, with some fixed data.
+ # some basic sanity checking, with some fixed data.
# Checking for the correct number of bins
basic_test = {
50: {'fd': 8, 'scott': 8, 'rice': 15,
@@ -1891,7 +1959,7 @@ class TestHistogramOptimBinNums(TestCase):
}
for testlen, expectedResults in basic_test.items():
- # create some sort of non uniform data to test with
+ # create some sort of non uniform data to test with
# (3 peak uniform mixture)
x1 = np.linspace(-10, -1, testlen // 5 * 2)
x2 = np.linspace(1, 10, testlen // 5 * 3)
@@ -1909,11 +1977,11 @@ class TestHistogramOptimBinNums(TestCase):
"""
estimator_list = ['fd', 'scott', 'rice', 'sturges', 'auto']
for estimator in estimator_list:
- assert_raises(TypeError, histogram, [1, 2, 3],
+ assert_raises(TypeError, histogram, [1, 2, 3],
estimator, weights=[1, 2, 3])
-class TestHistogramdd(TestCase):
+class TestHistogramdd(object):
def test_simple(self):
x = np.array([[-.5, .5, 1.5], [-.5, 1.5, 2.5], [-.5, 2.5, .5],
@@ -2053,7 +2121,7 @@ class TestHistogramdd(TestCase):
range=[[0.0, 1.0], [np.nan, 0.75], [0.25, 0.5]])
-class TestUnique(TestCase):
+class TestUnique(object):
def test_simple(self):
x = np.array([4, 3, 2, 1, 1, 2, 3, 4, 0])
@@ -2065,7 +2133,7 @@ class TestUnique(TestCase):
assert_(np.all(unique(x) == [1 + 1j, 1 + 10j, 5 + 6j, 10]))
-class TestCheckFinite(TestCase):
+class TestCheckFinite(object):
def test_simple(self):
a = [1, 2, 3]
@@ -2082,7 +2150,7 @@ class TestCheckFinite(TestCase):
assert_(a.dtype == np.float64)
-class TestCorrCoef(TestCase):
+class TestCorrCoef(object):
A = np.array(
[[0.15391142, 0.18045767, 0.14197213],
[0.70461506, 0.96474128, 0.27906989],
@@ -2167,7 +2235,7 @@ class TestCorrCoef(TestCase):
assert_(np.all(np.abs(c) <= 1.0))
-class TestCov(TestCase):
+class TestCov(object):
x1 = np.array([[0, 2], [1, 1], [2, 0]]).T
res1 = np.array([[1., -1.], [-1., 1.]])
x2 = np.array([0.0, 1.0, 2.0], ndmin=2)
@@ -2265,7 +2333,7 @@ class TestCov(TestCase):
self.res1)
-class Test_I0(TestCase):
+class Test_I0(object):
def test_simple(self):
assert_almost_equal(
@@ -2291,7 +2359,7 @@ class Test_I0(TestCase):
[1.05884290, 1.06432317]]))
-class TestKaiser(TestCase):
+class TestKaiser(object):
def test_simple(self):
assert_(np.isfinite(kaiser(1, 1.0)))
@@ -2310,7 +2378,7 @@ class TestKaiser(TestCase):
kaiser(3, 4)
-class TestMsort(TestCase):
+class TestMsort(object):
def test_simple(self):
A = np.array([[0.44567325, 0.79115165, 0.54900530],
@@ -2323,7 +2391,7 @@ class TestMsort(TestCase):
[0.64864341, 0.79115165, 0.96098397]]))
-class TestMeshgrid(TestCase):
+class TestMeshgrid(object):
def test_simple(self):
[X, Y] = meshgrid([1, 2, 3], [4, 5, 6, 7])
@@ -2412,7 +2480,7 @@ class TestMeshgrid(TestCase):
assert_equal(x[1, :], X)
-class TestPiecewise(TestCase):
+class TestPiecewise(object):
def test_simple(self):
# Condition is single bool list
@@ -2488,7 +2556,7 @@ class TestPiecewise(TestCase):
[3., 3., 1.]]))
-class TestBincount(TestCase):
+class TestBincount(object):
def test_simple(self):
y = np.bincount(np.arange(4))
@@ -2575,7 +2643,7 @@ class TestBincount(TestCase):
assert_equal(sys.getrefcount(np.dtype(np.double)), double_refcount)
-class TestInterp(TestCase):
+class TestInterp(object):
def test_exceptions(self):
assert_raises(ValueError, interp, 0, [], [])
@@ -2602,28 +2670,28 @@ class TestInterp(TestCase):
incres = interp(incpts, xp, yp)
decres = interp(decpts, xp, yp)
- inctgt = np.array([1, 1, 1, 1], dtype=np.float)
+ inctgt = np.array([1, 1, 1, 1], dtype=float)
dectgt = inctgt[::-1]
assert_equal(incres, inctgt)
assert_equal(decres, dectgt)
incres = interp(incpts, xp, yp, left=0)
decres = interp(decpts, xp, yp, left=0)
- inctgt = np.array([0, 1, 1, 1], dtype=np.float)
+ inctgt = np.array([0, 1, 1, 1], dtype=float)
dectgt = inctgt[::-1]
assert_equal(incres, inctgt)
assert_equal(decres, dectgt)
incres = interp(incpts, xp, yp, right=2)
decres = interp(decpts, xp, yp, right=2)
- inctgt = np.array([1, 1, 1, 2], dtype=np.float)
+ inctgt = np.array([1, 1, 1, 2], dtype=float)
dectgt = inctgt[::-1]
assert_equal(incres, inctgt)
assert_equal(decres, dectgt)
incres = interp(incpts, xp, yp, left=0, right=2)
decres = interp(decpts, xp, yp, left=0, right=2)
- inctgt = np.array([0, 1, 1, 2], dtype=np.float)
+ inctgt = np.array([0, 1, 1, 2], dtype=float)
dectgt = inctgt[::-1]
assert_equal(incres, inctgt)
assert_equal(decres, dectgt)
@@ -2693,7 +2761,7 @@ def compare_results(res, desired):
assert_array_equal(res[i], desired[i])
-class TestPercentile(TestCase):
+class TestPercentile(object):
def test_basic(self):
x = np.arange(8) * 0.5
@@ -2797,7 +2865,7 @@ class TestPercentile(TestCase):
# test for no empty dimensions for compatibility with old percentile
x = np.arange(12).reshape(3, 4)
assert_equal(np.percentile(x, 50), 5.5)
- self.assertTrue(np.isscalar(np.percentile(x, 50)))
+ assert_(np.isscalar(np.percentile(x, 50)))
r0 = np.array([4., 5., 6., 7.])
assert_equal(np.percentile(x, 50, axis=0), r0)
assert_equal(np.percentile(x, 50, axis=0).shape, r0.shape)
@@ -2818,7 +2886,7 @@ class TestPercentile(TestCase):
# test for no empty dimensions for compatibility with old percentile
x = np.arange(12).reshape(3, 4)
assert_equal(np.percentile(x, 50, interpolation='lower'), 5.)
- self.assertTrue(np.isscalar(np.percentile(x, 50)))
+ assert_(np.isscalar(np.percentile(x, 50)))
r0 = np.array([4., 5., 6., 7.])
c0 = np.percentile(x, 50, interpolation='lower', axis=0)
assert_equal(c0, r0)
@@ -2950,7 +3018,7 @@ class TestPercentile(TestCase):
o = np.random.normal(size=(71, 23))
x = np.dstack([o] * 10)
assert_equal(np.percentile(x, 30, axis=(0, 1)), np.percentile(o, 30))
- x = np.rollaxis(x, -1, 0)
+ x = np.moveaxis(x, -1, 0)
assert_equal(np.percentile(x, 30, axis=(-2, -1)), np.percentile(o, 30))
x = x.swapaxes(0, 1).copy()
assert_equal(np.percentile(x, 30, axis=(0, -1)), np.percentile(o, 30))
@@ -3124,7 +3192,7 @@ class TestPercentile(TestCase):
a, [0.3, 0.6], (0, 2), interpolation='nearest'), b)
-class TestMedian(TestCase):
+class TestMedian(object):
def test_basic(self):
a0 = np.array(1)
@@ -3331,7 +3399,7 @@ class TestMedian(TestCase):
o = np.random.normal(size=(71, 23))
x = np.dstack([o] * 10)
assert_equal(np.median(x, axis=(0, 1)), np.median(o))
- x = np.rollaxis(x, -1, 0)
+ x = np.moveaxis(x, -1, 0)
assert_equal(np.median(x, axis=(-2, -1)), np.median(o))
x = x.swapaxes(0, 1).copy()
assert_equal(np.median(x, axis=(0, -1)), np.median(o))
@@ -3381,7 +3449,7 @@ class TestMedian(TestCase):
(1, 1, 7, 1))
-class TestAdd_newdoc_ufunc(TestCase):
+class TestAdd_newdoc_ufunc(object):
def test_ufunc_arg(self):
assert_raises(TypeError, add_newdoc_ufunc, 2, "blah")
@@ -3391,15 +3459,15 @@ class TestAdd_newdoc_ufunc(TestCase):
assert_raises(TypeError, add_newdoc_ufunc, np.add, 3)
-class TestAdd_newdoc(TestCase):
+class TestAdd_newdoc(object):
@dec.skipif(sys.flags.optimize == 2)
def test_add_doc(self):
# test np.add_newdoc
tgt = "Current flat index into the array."
- self.assertEqual(np.core.flatiter.index.__doc__[:len(tgt)], tgt)
- self.assertTrue(len(np.core.ufunc.identity.__doc__) > 300)
- self.assertTrue(len(np.lib.index_tricks.mgrid.__doc__) > 300)
+ assert_equal(np.core.flatiter.index.__doc__[:len(tgt)], tgt)
+ assert_(len(np.core.ufunc.identity.__doc__) > 300)
+ assert_(len(np.lib.index_tricks.mgrid.__doc__) > 300)
if __name__ == "__main__":
diff --git a/numpy/lib/tests/test_index_tricks.py b/numpy/lib/tests/test_index_tricks.py
index 5b791026b..452b3d6a2 100644
--- a/numpy/lib/tests/test_index_tricks.py
+++ b/numpy/lib/tests/test_index_tricks.py
@@ -2,7 +2,7 @@ from __future__ import division, absolute_import, print_function
import numpy as np
from numpy.testing import (
- run_module_suite, TestCase, assert_, assert_equal, assert_array_equal,
+ run_module_suite, assert_, assert_equal, assert_array_equal,
assert_almost_equal, assert_array_almost_equal, assert_raises
)
from numpy.lib.index_tricks import (
@@ -11,7 +11,7 @@ from numpy.lib.index_tricks import (
)
-class TestRavelUnravelIndex(TestCase):
+class TestRavelUnravelIndex(object):
def test_basic(self):
assert_equal(np.unravel_index(2, (2, 2)), (1, 0))
assert_equal(np.ravel_multi_index((1, 0), (2, 2)), 2)
@@ -110,11 +110,11 @@ class TestRavelUnravelIndex(TestCase):
def test_writeability(self):
# See gh-7269
x, y = np.unravel_index([1, 2, 3], (4, 5))
- self.assertTrue(x.flags.writeable)
- self.assertTrue(y.flags.writeable)
+ assert_(x.flags.writeable)
+ assert_(y.flags.writeable)
-class TestGrid(TestCase):
+class TestGrid(object):
def test_basic(self):
a = mgrid[-1:1:10j]
b = mgrid[-1:1:0.1]
@@ -147,7 +147,7 @@ class TestGrid(TestCase):
0.2*np.ones(20, 'd'), 11)
-class TestConcatenator(TestCase):
+class TestConcatenator(object):
def test_1d(self):
assert_array_equal(r_[1, 2, 3, 4, 5, 6], np.array([1, 2, 3, 4, 5, 6]))
b = np.ones(5)
@@ -206,14 +206,14 @@ class TestConcatenator(TestCase):
assert_equal(type(actual), type(expected))
-class TestNdenumerate(TestCase):
+class TestNdenumerate(object):
def test_basic(self):
a = np.array([[1, 2], [3, 4]])
assert_equal(list(ndenumerate(a)),
[((0, 0), 1), ((0, 1), 2), ((1, 0), 3), ((1, 1), 4)])
-class TestIndexExpression(TestCase):
+class TestIndexExpression(object):
def test_regression_1(self):
# ticket #1196
a = np.arange(2)
@@ -227,7 +227,7 @@ class TestIndexExpression(TestCase):
assert_equal(a[:, :3, [1, 2]], a[s_[:, :3, [1, 2]]])
-class TestIx_(TestCase):
+class TestIx_(object):
def test_regression_1(self):
# Test empty inputs create ouputs of indexing type, gh-5804
# Test both lists and arrays
@@ -243,7 +243,7 @@ class TestIx_(TestCase):
for k, (a, sz) in enumerate(zip(arrays, sizes)):
assert_equal(a.shape[k], sz)
assert_(all(sh == 1 for j, sh in enumerate(a.shape) if j != k))
- assert_(np.issubdtype(a.dtype, int))
+ assert_(np.issubdtype(a.dtype, np.integer))
def test_bool(self):
bool_a = [True, False, True, True]
diff --git a/numpy/lib/tests/test_io.py b/numpy/lib/tests/test_io.py
index 868089551..6f7fcc54c 100644
--- a/numpy/lib/tests/test_io.py
+++ b/numpy/lib/tests/test_io.py
@@ -17,9 +17,9 @@ from numpy.lib._iotools import ConverterError, ConversionWarning
from numpy.compat import asbytes, bytes, unicode, Path
from numpy.ma.testutils import assert_equal
from numpy.testing import (
- TestCase, run_module_suite, assert_warns, assert_,
- assert_raises_regex, assert_raises, assert_allclose,
- assert_array_equal, temppath, dec, IS_PYPY, suppress_warnings
+ run_module_suite, assert_warns, assert_, assert_raises_regex,
+ assert_raises, assert_allclose, assert_array_equal, temppath, dec, IS_PYPY,
+ suppress_warnings
)
@@ -165,7 +165,7 @@ class RoundtripTest(object):
self.check_roundtrips(a)
-class TestSaveLoad(RoundtripTest, TestCase):
+class TestSaveLoad(RoundtripTest):
def roundtrip(self, *args, **kwargs):
RoundtripTest.roundtrip(self, np.save, *args, **kwargs)
assert_equal(self.arr[0], self.arr_reloaded)
@@ -173,7 +173,7 @@ class TestSaveLoad(RoundtripTest, TestCase):
assert_equal(self.arr[0].flags.fnc, self.arr_reloaded.flags.fnc)
-class TestSavezLoad(RoundtripTest, TestCase):
+class TestSavezLoad(RoundtripTest):
def roundtrip(self, *args, **kwargs):
RoundtripTest.roundtrip(self, np.savez, *args, **kwargs)
try:
@@ -304,7 +304,7 @@ class TestSavezLoad(RoundtripTest, TestCase):
assert_(fp.closed)
-class TestSaveTxt(TestCase):
+class TestSaveTxt(object):
def test_array(self):
a = np.array([[1, 2], [3, 4]], float)
fmt = "%.18e"
@@ -329,6 +329,12 @@ class TestSaveTxt(TestCase):
lines = c.readlines()
assert_equal(lines, [b'1\n', b'2\n', b'3\n', b'4\n'])
+ def test_0D_3D(self):
+ c = BytesIO()
+ assert_raises(ValueError, np.savetxt, c, np.array(1))
+ assert_raises(ValueError, np.savetxt, c, np.array([[[1], [2]]]))
+
+
def test_record(self):
a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
c = BytesIO()
@@ -373,7 +379,7 @@ class TestSaveTxt(TestCase):
# Test the functionality of the header and footer keyword argument.
c = BytesIO()
- a = np.array([(1, 2), (3, 4)], dtype=np.int)
+ a = np.array([(1, 2), (3, 4)], dtype=int)
test_header_footer = 'Test header / footer'
# Test the header keyword argument
np.savetxt(c, a, fmt='%1d', header=test_header_footer)
@@ -461,7 +467,7 @@ class TestSaveTxt(TestCase):
assert_array_equal(a, b)
-class TestLoadTxt(TestCase):
+class TestLoadTxt(object):
def test_record(self):
c = TextIO()
c.write('1 2\n3 4')
@@ -485,7 +491,7 @@ class TestLoadTxt(TestCase):
c.write('1 2\n3 4')
c.seek(0)
- x = np.loadtxt(c, dtype=np.int)
+ x = np.loadtxt(c, dtype=int)
a = np.array([[1, 2], [3, 4]], int)
assert_array_equal(x, a)
@@ -721,7 +727,7 @@ class TestLoadTxt(TestCase):
# Test using an explicit dtype with an object
data = """ 1; 2001-01-01
2; 2002-01-31 """
- ndtype = [('idx', int), ('code', np.object)]
+ ndtype = [('idx', int), ('code', object)]
func = lambda s: strptime(s.strip(), "%Y-%m-%d")
converters = {1: func}
test = np.loadtxt(TextIO(data), delimiter=";", dtype=ndtype,
@@ -751,11 +757,11 @@ class TestLoadTxt(TestCase):
# IEEE doubles and floats only, otherwise the float32
# conversion may fail.
tgt = np.logspace(-10, 10, 5).astype(np.float32)
- tgt = np.hstack((tgt, -tgt)).astype(np.float)
+ tgt = np.hstack((tgt, -tgt)).astype(float)
inp = '\n'.join(map(float.hex, tgt))
c = TextIO()
c.write(inp)
- for dt in [np.float, np.float32]:
+ for dt in [float, np.float32]:
c.seek(0)
res = np.loadtxt(c, dtype=dt)
assert_equal(res, tgt, err_msg="%s" % dt)
@@ -765,7 +771,7 @@ class TestLoadTxt(TestCase):
c = TextIO()
c.write("%s %s" % tgt)
c.seek(0)
- res = np.loadtxt(c, dtype=np.complex)
+ res = np.loadtxt(c, dtype=complex)
assert_equal(res, tgt)
def test_universal_newline(self):
@@ -864,7 +870,7 @@ class TestLoadTxt(TestCase):
np.loadtxt(c, delimiter=',', dtype=dt, comments=None) # Should succeed
-class Testfromregex(TestCase):
+class Testfromregex(object):
# np.fromregex expects files opened in binary mode.
def test_record(self):
c = TextIO()
@@ -902,7 +908,7 @@ class Testfromregex(TestCase):
#####--------------------------------------------------------------------------
-class TestFromTxt(TestCase):
+class TestFromTxt(object):
#
def test_record(self):
# Test w/ explicit dtype
@@ -1178,19 +1184,19 @@ M 33 21.99
conv = {0: int, 1: int, 2: int, 3: lambda r: dmap[r.decode()]}
test = np.recfromcsv(TextIO(dstr,), dtype=dtyp, delimiter=',',
names=None, converters=conv)
- control = np.rec.array([[1,5,-1,0], [2,8,-1,1], [3,3,-2,3]], dtype=dtyp)
+ control = np.rec.array([(1,5,-1,0), (2,8,-1,1), (3,3,-2,3)], dtype=dtyp)
assert_equal(test, control)
dtyp = [('e1','i4'),('e2','i4'),('n', 'i1')]
test = np.recfromcsv(TextIO(dstr,), dtype=dtyp, delimiter=',',
usecols=(0,1,3), names=None, converters=conv)
- control = np.rec.array([[1,5,0], [2,8,1], [3,3,3]], dtype=dtyp)
+ control = np.rec.array([(1,5,0), (2,8,1), (3,3,3)], dtype=dtyp)
assert_equal(test, control)
def test_dtype_with_object(self):
# Test using an explicit dtype with an object
data = """ 1; 2001-01-01
2; 2002-01-31 """
- ndtype = [('idx', int), ('code', np.object)]
+ ndtype = [('idx', int), ('code', object)]
func = lambda s: strptime(s.strip(), "%Y-%m-%d")
converters = {1: func}
test = np.genfromtxt(TextIO(data), delimiter=";", dtype=ndtype,
@@ -1200,7 +1206,7 @@ M 33 21.99
dtype=ndtype)
assert_equal(test, control)
- ndtype = [('nest', [('idx', int), ('code', np.object)])]
+ ndtype = [('nest', [('idx', int), ('code', object)])]
try:
test = np.genfromtxt(TextIO(data), delimiter=";",
dtype=ndtype, converters=converters)
@@ -1337,7 +1343,7 @@ M 33 21.99
test = np.mafromtxt(data, dtype=None, **kwargs)
control = ma.array([(0, 1), (2, -1)],
mask=[(False, False), (False, True)],
- dtype=[('A', np.int), ('B', np.int)])
+ dtype=[('A', int), ('B', int)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
#
@@ -1345,7 +1351,7 @@ M 33 21.99
test = np.mafromtxt(data, **kwargs)
control = ma.array([(0, 1), (2, -1)],
mask=[(False, False), (False, True)],
- dtype=[('A', np.float), ('B', np.float)])
+ dtype=[('A', float), ('B', float)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
@@ -1414,7 +1420,7 @@ M 33 21.99
missing_values='-999.0', names=True,)
control = ma.array([(0, 1.5), (2, -1.)],
mask=[(False, False), (False, True)],
- dtype=[('A', np.int), ('B', np.float)])
+ dtype=[('A', int), ('B', float)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
@@ -1682,15 +1688,15 @@ M 33 21.99
kwargs = dict(delimiter=",", missing_values="N/A", names=True)
test = np.recfromtxt(data, **kwargs)
control = np.array([(0, 1), (2, 3)],
- dtype=[('A', np.int), ('B', np.int)])
- self.assertTrue(isinstance(test, np.recarray))
+ dtype=[('A', int), ('B', int)])
+ assert_(isinstance(test, np.recarray))
assert_equal(test, control)
#
data = TextIO('A,B\n0,1\n2,N/A')
test = np.recfromtxt(data, dtype=None, usemask=True, **kwargs)
control = ma.array([(0, 1), (2, -1)],
mask=[(False, False), (False, True)],
- dtype=[('A', np.int), ('B', np.int)])
+ dtype=[('A', int), ('B', int)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
assert_equal(test.A, [0, 2])
@@ -1701,15 +1707,15 @@ M 33 21.99
kwargs = dict(missing_values="N/A", names=True, case_sensitive=True)
test = np.recfromcsv(data, dtype=None, **kwargs)
control = np.array([(0, 1), (2, 3)],
- dtype=[('A', np.int), ('B', np.int)])
- self.assertTrue(isinstance(test, np.recarray))
+ dtype=[('A', int), ('B', int)])
+ assert_(isinstance(test, np.recarray))
assert_equal(test, control)
#
data = TextIO('A,B\n0,1\n2,N/A')
test = np.recfromcsv(data, dtype=None, usemask=True, **kwargs)
control = ma.array([(0, 1), (2, -1)],
mask=[(False, False), (False, True)],
- dtype=[('A', np.int), ('B', np.int)])
+ dtype=[('A', int), ('B', int)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
assert_equal(test.A, [0, 2])
@@ -1717,16 +1723,16 @@ M 33 21.99
data = TextIO('A,B\n0,1\n2,3')
test = np.recfromcsv(data, missing_values='N/A',)
control = np.array([(0, 1), (2, 3)],
- dtype=[('a', np.int), ('b', np.int)])
- self.assertTrue(isinstance(test, np.recarray))
+ dtype=[('a', int), ('b', int)])
+ assert_(isinstance(test, np.recarray))
assert_equal(test, control)
#
data = TextIO('A,B\n0,1\n2,3')
- dtype = [('a', np.int), ('b', np.float)]
+ dtype = [('a', int), ('b', float)]
test = np.recfromcsv(data, missing_values='N/A', dtype=dtype)
control = np.array([(0, 1), (2, 3)],
dtype=dtype)
- self.assertTrue(isinstance(test, np.recarray))
+ assert_(isinstance(test, np.recarray))
assert_equal(test, control)
def test_max_rows(self):
@@ -1827,7 +1833,7 @@ M 33 21.99
assert_equal(test.dtype.names, ['f0', 'f1', 'f2'])
- assert_(test.dtype['f0'] == np.float)
+ assert_(test.dtype['f0'] == float)
assert_(test.dtype['f1'] == np.int64)
assert_(test.dtype['f2'] == np.integer)
@@ -1836,7 +1842,7 @@ M 33 21.99
assert_equal(test['f2'], 1024)
-class TestPathUsage(TestCase):
+class TestPathUsage(object):
# Test that pathlib.Path can be used
@np.testing.dec.skipif(Path is None, "No pathlib.Path")
def test_loadtxt(self):
@@ -1919,8 +1925,8 @@ class TestPathUsage(TestCase):
kwargs = dict(delimiter=",", missing_values="N/A", names=True)
test = np.recfromtxt(path, **kwargs)
control = np.array([(0, 1), (2, 3)],
- dtype=[('A', np.int), ('B', np.int)])
- self.assertTrue(isinstance(test, np.recarray))
+ dtype=[('A', int), ('B', int)])
+ assert_(isinstance(test, np.recarray))
assert_equal(test, control)
@np.testing.dec.skipif(Path is None, "No pathlib.Path")
@@ -1933,8 +1939,8 @@ class TestPathUsage(TestCase):
kwargs = dict(missing_values="N/A", names=True, case_sensitive=True)
test = np.recfromcsv(path, dtype=None, **kwargs)
control = np.array([(0, 1), (2, 3)],
- dtype=[('A', np.int), ('B', np.int)])
- self.assertTrue(isinstance(test, np.recarray))
+ dtype=[('A', int), ('B', int)])
+ assert_(isinstance(test, np.recarray))
assert_equal(test, control)
diff --git a/numpy/lib/tests/test_mixins.py b/numpy/lib/tests/test_mixins.py
index db38bdfd6..94f06c336 100644
--- a/numpy/lib/tests/test_mixins.py
+++ b/numpy/lib/tests/test_mixins.py
@@ -6,7 +6,8 @@ import sys
import numpy as np
from numpy.testing import (
- TestCase, run_module_suite, assert_, assert_equal, assert_raises)
+ run_module_suite, assert_, assert_equal, assert_raises
+ )
PY2 = sys.version_info.major < 3
@@ -99,7 +100,7 @@ _ALL_BINARY_OPERATORS = [
]
-class TestNDArrayOperatorsMixin(TestCase):
+class TestNDArrayOperatorsMixin(object):
def test_array_like_add(self):
diff --git a/numpy/lib/tests/test_nanfunctions.py b/numpy/lib/tests/test_nanfunctions.py
index 466ceefb5..3d362fc6e 100644
--- a/numpy/lib/tests/test_nanfunctions.py
+++ b/numpy/lib/tests/test_nanfunctions.py
@@ -4,7 +4,7 @@ import warnings
import numpy as np
from numpy.testing import (
- run_module_suite, TestCase, assert_, assert_equal, assert_almost_equal,
+ run_module_suite, assert_, assert_equal, assert_almost_equal,
assert_no_warnings, assert_raises, assert_array_equal, suppress_warnings
)
@@ -35,7 +35,7 @@ _ndat_zeros = np.array([[0.6244, 0.0, 0.2692, 0.0116, 0.0, 0.1170],
[0.1610, 0.0, 0.0, 0.1859, 0.3146, 0.0]])
-class TestNanFunctions_MinMax(TestCase):
+class TestNanFunctions_MinMax(object):
nanfuncs = [np.nanmin, np.nanmax]
stdfuncs = [np.min, np.max]
@@ -165,7 +165,7 @@ class TestNanFunctions_MinMax(TestCase):
assert_(issubclass(w[0].category, RuntimeWarning))
-class TestNanFunctions_ArgminArgmax(TestCase):
+class TestNanFunctions_ArgminArgmax(object):
nanfuncs = [np.nanargmin, np.nanargmax]
@@ -224,7 +224,7 @@ class TestNanFunctions_ArgminArgmax(TestCase):
assert_(np.isscalar(res))
-class TestNanFunctions_IntTypes(TestCase):
+class TestNanFunctions_IntTypes(object):
int_types = (np.int8, np.int16, np.int32, np.int64, np.uint8,
np.uint16, np.uint32, np.uint64)
@@ -396,7 +396,7 @@ class SharedNanFunctionsTestsMixin(object):
assert_(np.isscalar(res))
-class TestNanFunctions_SumProd(TestCase, SharedNanFunctionsTestsMixin):
+class TestNanFunctions_SumProd(SharedNanFunctionsTestsMixin):
nanfuncs = [np.nansum, np.nanprod]
stdfuncs = [np.sum, np.prod]
@@ -430,7 +430,7 @@ class TestNanFunctions_SumProd(TestCase, SharedNanFunctionsTestsMixin):
assert_equal(res, tgt)
-class TestNanFunctions_CumSumProd(TestCase, SharedNanFunctionsTestsMixin):
+class TestNanFunctions_CumSumProd(SharedNanFunctionsTestsMixin):
nanfuncs = [np.nancumsum, np.nancumprod]
stdfuncs = [np.cumsum, np.cumprod]
@@ -513,7 +513,7 @@ class TestNanFunctions_CumSumProd(TestCase, SharedNanFunctionsTestsMixin):
assert_almost_equal(res, tgt)
-class TestNanFunctions_MeanVarStd(TestCase, SharedNanFunctionsTestsMixin):
+class TestNanFunctions_MeanVarStd(SharedNanFunctionsTestsMixin):
nanfuncs = [np.nanmean, np.nanvar, np.nanstd]
stdfuncs = [np.mean, np.var, np.std]
@@ -585,7 +585,7 @@ class TestNanFunctions_MeanVarStd(TestCase, SharedNanFunctionsTestsMixin):
assert_(len(w) == 0)
-class TestNanFunctions_Median(TestCase):
+class TestNanFunctions_Median(object):
def test_mutation(self):
# Check that passed array is not modified.
@@ -749,7 +749,7 @@ class TestNanFunctions_Median(TestCase):
([np.nan] * i) + [-inf] * j)
-class TestNanFunctions_Percentile(TestCase):
+class TestNanFunctions_Percentile(object):
def test_mutation(self):
# Check that passed array is not modified.
diff --git a/numpy/lib/tests/test_polynomial.py b/numpy/lib/tests/test_polynomial.py
index 0725c186d..9a4650825 100644
--- a/numpy/lib/tests/test_polynomial.py
+++ b/numpy/lib/tests/test_polynomial.py
@@ -80,12 +80,12 @@ poly1d([ 2.])
'''
import numpy as np
from numpy.testing import (
- run_module_suite, TestCase, assert_, assert_equal, assert_array_equal,
+ run_module_suite, assert_, assert_equal, assert_array_equal,
assert_almost_equal, assert_array_almost_equal, assert_raises, rundocs
)
-class TestDocs(TestCase):
+class TestDocs(object):
def test_doctests(self):
return rundocs()
diff --git a/numpy/lib/tests/test_recfunctions.py b/numpy/lib/tests/test_recfunctions.py
index 0940d37b0..bc9f8d7b6 100644
--- a/numpy/lib/tests/test_recfunctions.py
+++ b/numpy/lib/tests/test_recfunctions.py
@@ -4,7 +4,9 @@ import numpy as np
import numpy.ma as ma
from numpy.ma.mrecords import MaskedRecords
from numpy.ma.testutils import assert_equal
-from numpy.testing import TestCase, run_module_suite, assert_, assert_raises
+from numpy.testing import (
+ run_module_suite, assert_, assert_raises, dec
+ )
from numpy.lib.recfunctions import (
drop_fields, rename_fields, get_fieldstructure, recursive_fill_fields,
find_duplicates, merge_arrays, append_fields, stack_arrays, join_by
@@ -14,10 +16,10 @@ get_names_flat = np.lib.recfunctions.get_names_flat
zip_descr = np.lib.recfunctions.zip_descr
-class TestRecFunctions(TestCase):
+class TestRecFunctions(object):
# Misc tests
- def setUp(self):
+ def setup(self):
x = np.array([1, 2, ])
y = np.array([10, 20, 30])
z = np.array([('A', 1.), ('B', 2.)],
@@ -191,7 +193,7 @@ class TestRecFunctions(TestCase):
assert_equal(test[0], a[test[-1]])
-class TestRecursiveFillFields(TestCase):
+class TestRecursiveFillFields(object):
# Test recursive_fill_fields.
def test_simple_flexible(self):
# Test recursive_fill_fields on flexible-array
@@ -214,10 +216,10 @@ class TestRecursiveFillFields(TestCase):
assert_equal(test, control)
-class TestMergeArrays(TestCase):
+class TestMergeArrays(object):
# Test merge_arrays
- def setUp(self):
+ def setup(self):
x = np.array([1, 2, ])
y = np.array([10, 20, 30])
z = np.array(
@@ -347,10 +349,10 @@ class TestMergeArrays(TestCase):
assert_equal(test, control)
-class TestAppendFields(TestCase):
+class TestAppendFields(object):
# Test append_fields
- def setUp(self):
+ def setup(self):
x = np.array([1, 2, ])
y = np.array([10, 20, 30])
z = np.array(
@@ -401,9 +403,9 @@ class TestAppendFields(TestCase):
assert_equal(test, control)
-class TestStackArrays(TestCase):
+class TestStackArrays(object):
# Test stack_arrays
- def setUp(self):
+ def setup(self):
x = np.array([1, 2, ])
y = np.array([10, 20, 30])
z = np.array(
@@ -417,11 +419,11 @@ class TestStackArrays(TestCase):
(_, x, _, _) = self.data
test = stack_arrays((x,))
assert_equal(test, x)
- self.assertTrue(test is x)
+ assert_(test is x)
test = stack_arrays(x)
assert_equal(test, x)
- self.assertTrue(test is x)
+ assert_(test is x)
def test_unnamed_fields(self):
# Tests combinations of arrays w/o named fields
@@ -546,9 +548,38 @@ class TestStackArrays(TestCase):
assert_equal(test, control)
assert_equal(test.mask, control.mask)
-
-class TestJoinBy(TestCase):
- def setUp(self):
+ def test_subdtype(self):
+ z = np.array([
+ ('A', 1), ('B', 2)
+ ], dtype=[('A', '|S3'), ('B', float, (1,))])
+ zz = np.array([
+ ('a', [10.], 100.), ('b', [20.], 200.), ('c', [30.], 300.)
+ ], dtype=[('A', '|S3'), ('B', float, (1,)), ('C', float)])
+
+ res = stack_arrays((z, zz))
+ expected = ma.array(
+ data=[
+ (b'A', [1.0], 0),
+ (b'B', [2.0], 0),
+ (b'a', [10.0], 100.0),
+ (b'b', [20.0], 200.0),
+ (b'c', [30.0], 300.0)],
+ mask=[
+ (False, [False], True),
+ (False, [False], True),
+ (False, [False], False),
+ (False, [False], False),
+ (False, [False], False)
+ ],
+ dtype=zz.dtype
+ )
+ assert_equal(res.dtype, expected.dtype)
+ assert_equal(res, expected)
+ assert_equal(res.mask, expected.mask)
+
+
+class TestJoinBy(object):
+ def setup(self):
self.a = np.array(list(zip(np.arange(10), np.arange(50, 60),
np.arange(100, 110))),
dtype=[('a', int), ('b', int), ('c', int)])
@@ -588,6 +619,16 @@ class TestJoinBy(TestCase):
dtype=[('a', int), ('b', int),
('c', int), ('d', int)])
+ def test_join_subdtype(self):
+ # tests the bug in https://stackoverflow.com/q/44769632/102441
+ from numpy.lib import recfunctions as rfn
+ foo = np.array([(1,)],
+ dtype=[('key', int)])
+ bar = np.array([(1, np.array([1,2,3]))],
+ dtype=[('key', int), ('value', 'uint16', 3)])
+ res = join_by('key', foo, bar)
+ assert_equal(res, bar.view(ma.MaskedArray))
+
def test_outer_join(self):
a, b = self.a, self.b
@@ -646,10 +687,66 @@ class TestJoinBy(TestCase):
b = np.ones(3, dtype=[('c', 'u1'), ('b', 'f4'), ('a', 'i4')])
assert_raises(ValueError, join_by, ['a', 'b', 'b'], a, b)
+ @dec.knownfailureif(True)
+ def test_same_name_different_dtypes_key(self):
+ a_dtype = np.dtype([('key', 'S5'), ('value', '<f4')])
+ b_dtype = np.dtype([('key', 'S10'), ('value', '<f4')])
+ expected_dtype = np.dtype([
+ ('key', 'S10'), ('value1', '<f4'), ('value2', '<f4')])
+
+ a = np.array([('Sarah', 8.0), ('John', 6.0)], dtype=a_dtype)
+ b = np.array([('Sarah', 10.0), ('John', 7.0)], dtype=b_dtype)
+ res = join_by('key', a, b)
+
+ assert_equal(res.dtype, expected_dtype)
+
+ def test_same_name_different_dtypes(self):
+ # gh-9338
+ a_dtype = np.dtype([('key', 'S10'), ('value', '<f4')])
+ b_dtype = np.dtype([('key', 'S10'), ('value', '<f8')])
+ expected_dtype = np.dtype([
+ ('key', '|S10'), ('value1', '<f4'), ('value2', '<f8')])
+
+ a = np.array([('Sarah', 8.0), ('John', 6.0)], dtype=a_dtype)
+ b = np.array([('Sarah', 10.0), ('John', 7.0)], dtype=b_dtype)
+ res = join_by('key', a, b)
+
+ assert_equal(res.dtype, expected_dtype)
+
+ def test_subarray_key(self):
+ a_dtype = np.dtype([('pos', int, 3), ('f', '<f4')])
+ a = np.array([([1, 1, 1], np.pi), ([1, 2, 3], 0.0)], dtype=a_dtype)
+
+ b_dtype = np.dtype([('pos', int, 3), ('g', '<f4')])
+ b = np.array([([1, 1, 1], 3), ([3, 2, 1], 0.0)], dtype=b_dtype)
+
+ expected_dtype = np.dtype([('pos', int, 3), ('f', '<f4'), ('g', '<f4')])
+ expected = np.array([([1, 1, 1], np.pi, 3)], dtype=expected_dtype)
+
+ res = join_by('pos', a, b)
+ assert_equal(res.dtype, expected_dtype)
+ assert_equal(res, expected)
+
+ def test_padded_dtype(self):
+ dt = np.dtype('i1,f4', align=True)
+ dt.names = ('k', 'v')
+ assert_(len(dt.descr), 3) # padding field is inserted
+
+ a = np.array([(1, 3), (3, 2)], dt)
+ b = np.array([(1, 1), (2, 2)], dt)
+ res = join_by('k', a, b)
+
+ # no padding fields remain
+ expected_dtype = np.dtype([
+ ('k', 'i1'), ('v1', 'f4'), ('v2', 'f4')
+ ])
+
+ assert_equal(res.dtype, expected_dtype)
+
-class TestJoinBy2(TestCase):
+class TestJoinBy2(object):
@classmethod
- def setUp(cls):
+ def setup(cls):
cls.a = np.array(list(zip(np.arange(10), np.arange(50, 60),
np.arange(100, 110))),
dtype=[('a', int), ('b', int), ('c', int)])
@@ -673,8 +770,8 @@ class TestJoinBy2(TestCase):
assert_equal(test, control)
def test_no_postfix(self):
- self.assertRaises(ValueError, join_by, 'a', self.a, self.b,
- r1postfix='', r2postfix='')
+ assert_raises(ValueError, join_by, 'a', self.a, self.b,
+ r1postfix='', r2postfix='')
def test_no_r2postfix(self):
# Basic test of join_by no_r2postfix
@@ -712,13 +809,13 @@ class TestJoinBy2(TestCase):
assert_equal(test.dtype, control.dtype)
assert_equal(test, control)
-class TestAppendFieldsObj(TestCase):
+class TestAppendFieldsObj(object):
"""
Test append_fields with arrays containing objects
"""
# https://github.com/numpy/numpy/issues/2346
- def setUp(self):
+ def setup(self):
from datetime import date
self.data = dict(obj=date(2000, 1, 1))
diff --git a/numpy/lib/tests/test_regression.py b/numpy/lib/tests/test_regression.py
index ee50dcfa4..d96d3422d 100644
--- a/numpy/lib/tests/test_regression.py
+++ b/numpy/lib/tests/test_regression.py
@@ -5,22 +5,19 @@ import sys
import numpy as np
from numpy.testing import (
- run_module_suite, TestCase, assert_, assert_equal, assert_array_equal,
- assert_array_almost_equal, assert_raises
+ run_module_suite, assert_, assert_equal, assert_array_equal,
+ assert_array_almost_equal, assert_raises, _assert_valid_refcount,
)
-from numpy.testing.utils import _assert_valid_refcount
from numpy.compat import unicode
-rlevel = 1
-
-class TestRegression(TestCase):
- def test_poly1d(self, level=rlevel):
+class TestRegression(object):
+ def test_poly1d(self):
# Ticket #28
assert_equal(np.poly1d([1]) - np.poly1d([1, 0]),
np.poly1d([-1, 1]))
- def test_cov_parameters(self, level=rlevel):
+ def test_cov_parameters(self):
# Ticket #91
x = np.random.random((3, 3))
y = x.copy()
@@ -28,57 +25,57 @@ class TestRegression(TestCase):
np.cov(y, rowvar=0)
assert_array_equal(x, y)
- def test_mem_digitize(self, level=rlevel):
+ def test_mem_digitize(self):
# Ticket #95
for i in range(100):
np.digitize([1, 2, 3, 4], [1, 3])
np.digitize([0, 1, 2, 3, 4], [1, 3])
- def test_unique_zero_sized(self, level=rlevel):
+ def test_unique_zero_sized(self):
# Ticket #205
assert_array_equal([], np.unique(np.array([])))
- def test_mem_vectorise(self, level=rlevel):
+ def test_mem_vectorise(self):
# Ticket #325
vt = np.vectorize(lambda *args: args)
vt(np.zeros((1, 2, 1)), np.zeros((2, 1, 1)), np.zeros((1, 1, 2)))
vt(np.zeros((1, 2, 1)), np.zeros((2, 1, 1)), np.zeros((1,
1, 2)), np.zeros((2, 2)))
- def test_mgrid_single_element(self, level=rlevel):
+ def test_mgrid_single_element(self):
# Ticket #339
assert_array_equal(np.mgrid[0:0:1j], [0])
assert_array_equal(np.mgrid[0:0], [])
- def test_refcount_vectorize(self, level=rlevel):
+ def test_refcount_vectorize(self):
# Ticket #378
def p(x, y):
return 123
v = np.vectorize(p)
_assert_valid_refcount(v)
- def test_poly1d_nan_roots(self, level=rlevel):
+ def test_poly1d_nan_roots(self):
# Ticket #396
p = np.poly1d([np.nan, np.nan, 1], r=0)
- self.assertRaises(np.linalg.LinAlgError, getattr, p, "r")
+ assert_raises(np.linalg.LinAlgError, getattr, p, "r")
- def test_mem_polymul(self, level=rlevel):
+ def test_mem_polymul(self):
# Ticket #448
np.polymul([], [1.])
- def test_mem_string_concat(self, level=rlevel):
+ def test_mem_string_concat(self):
# Ticket #469
x = np.array([])
np.append(x, 'asdasd\tasdasd')
- def test_poly_div(self, level=rlevel):
+ def test_poly_div(self):
# Ticket #553
u = np.poly1d([1, 2, 3])
v = np.poly1d([1, 2, 3, 4, 5])
q, r = np.polydiv(u, v)
assert_equal(q*v + r, u)
- def test_poly_eq(self, level=rlevel):
+ def test_poly_eq(self):
# Ticket #554
x = np.poly1d([1, 2, 3])
y = np.poly1d([3, 4])
@@ -109,13 +106,13 @@ class TestRegression(TestCase):
def test_polydiv_type(self):
# Make polydiv work for complex types
msg = "Wrong type, should be complex"
- x = np.ones(3, dtype=np.complex)
+ x = np.ones(3, dtype=complex)
q, r = np.polydiv(x, x)
- assert_(q.dtype == np.complex, msg)
+ assert_(q.dtype == complex, msg)
msg = "Wrong type, should be float"
- x = np.ones(3, dtype=np.int)
+ x = np.ones(3, dtype=int)
q, r = np.polydiv(x, x)
- assert_(q.dtype == np.float, msg)
+ assert_(q.dtype == float, msg)
def test_histogramdd_too_many_bins(self):
# Ticket 928.
@@ -124,22 +121,22 @@ class TestRegression(TestCase):
def test_polyint_type(self):
# Ticket #944
msg = "Wrong type, should be complex"
- x = np.ones(3, dtype=np.complex)
- assert_(np.polyint(x).dtype == np.complex, msg)
+ x = np.ones(3, dtype=complex)
+ assert_(np.polyint(x).dtype == complex, msg)
msg = "Wrong type, should be float"
- x = np.ones(3, dtype=np.int)
- assert_(np.polyint(x).dtype == np.float, msg)
+ x = np.ones(3, dtype=int)
+ assert_(np.polyint(x).dtype == float, msg)
def test_ndenumerate_crash(self):
# Ticket 1140
# Shouldn't crash:
list(np.ndenumerate(np.array([[]])))
- def test_asfarray_none(self, level=rlevel):
+ def test_asfarray_none(self):
# Test for changeset r5065
assert_array_equal(np.array([np.nan]), np.asfarray([None]))
- def test_large_fancy_indexing(self, level=rlevel):
+ def test_large_fancy_indexing(self):
# Large enough to fail on 64-bit.
nbits = np.dtype(np.intp).itemsize * 8
thesize = int((2**nbits)**(1.0/5.0)+1)
@@ -156,15 +153,15 @@ class TestRegression(TestCase):
i = np.random.randint(0, n, size=thesize)
a[np.ix_(i, i, i, i, i)]
- self.assertRaises(ValueError, dp)
- self.assertRaises(ValueError, dp2)
+ assert_raises(ValueError, dp)
+ assert_raises(ValueError, dp2)
- def test_void_coercion(self, level=rlevel):
+ def test_void_coercion(self):
dt = np.dtype([('a', 'f4'), ('b', 'i4')])
x = np.zeros((1,), dt)
assert_(np.r_[x, x].dtype == dt)
- def test_who_with_0dim_array(self, level=rlevel):
+ def test_who_with_0dim_array(self):
# ticket #1243
import os
import sys
@@ -174,7 +171,7 @@ class TestRegression(TestCase):
try:
try:
np.who({'foo': np.array(1)})
- except:
+ except Exception:
raise AssertionError("ticket #1243")
finally:
sys.stdout.close()
@@ -206,7 +203,7 @@ class TestRegression(TestCase):
dlist = [np.float64, np.int32, np.int32]
try:
append_fields(base, names, data, dlist)
- except:
+ except Exception:
raise AssertionError()
def test_loadtxt_fields_subarrays(self):
@@ -235,10 +232,10 @@ class TestRegression(TestCase):
def test_nansum_with_boolean(self):
# gh-2978
- a = np.zeros(2, dtype=np.bool)
+ a = np.zeros(2, dtype=bool)
try:
np.nansum(a)
- except:
+ except Exception:
raise AssertionError()
def test_py3_compat(self):
diff --git a/numpy/lib/tests/test_shape_base.py b/numpy/lib/tests/test_shape_base.py
index 4d06001f4..d0afeefd9 100644
--- a/numpy/lib/tests/test_shape_base.py
+++ b/numpy/lib/tests/test_shape_base.py
@@ -1,23 +1,25 @@
from __future__ import division, absolute_import, print_function
import numpy as np
+import warnings
+
from numpy.lib.shape_base import (
apply_along_axis, apply_over_axes, array_split, split, hsplit, dsplit,
- vsplit, dstack, column_stack, kron, tile
+ vsplit, dstack, column_stack, kron, tile, expand_dims,
)
from numpy.testing import (
- run_module_suite, TestCase, assert_, assert_equal, assert_array_equal,
- assert_raises, assert_warns
+ run_module_suite, assert_, assert_equal, assert_array_equal, assert_raises,
+ assert_warns
)
-class TestApplyAlongAxis(TestCase):
+class TestApplyAlongAxis(object):
def test_simple(self):
a = np.ones((20, 10), 'd')
assert_array_equal(
apply_along_axis(len, 0, a), len(a)*np.ones(a.shape[1]))
- def test_simple101(self, level=11):
+ def test_simple101(self):
a = np.ones((10, 101), 'd')
assert_array_equal(
apply_along_axis(len, 0, a), len(a)*np.ones(a.shape[1]))
@@ -175,14 +177,33 @@ class TestApplyAlongAxis(TestCase):
assert_equal(type(actual[i]), type(expected[i]))
-class TestApplyOverAxes(TestCase):
+class TestApplyOverAxes(object):
def test_simple(self):
a = np.arange(24).reshape(2, 3, 4)
aoa_a = apply_over_axes(np.sum, a, [0, 2])
assert_array_equal(aoa_a, np.array([[[60], [92], [124]]]))
-class TestArraySplit(TestCase):
+class TestExpandDims(object):
+ def test_functionality(self):
+ s = (2, 3, 4, 5)
+ a = np.empty(s)
+ for axis in range(-5, 4):
+ b = expand_dims(a, axis)
+ assert_(b.shape[axis] == 1)
+ assert_(np.squeeze(b).shape == s)
+
+ def test_deprecations(self):
+ # 2017-05-17, 1.13.0
+ s = (2, 3, 4, 5)
+ a = np.empty(s)
+ with warnings.catch_warnings():
+ warnings.simplefilter("always")
+ assert_warns(DeprecationWarning, expand_dims, a, -6)
+ assert_warns(DeprecationWarning, expand_dims, a, 5)
+
+
+class TestArraySplit(object):
def test_integer_0_split(self):
a = np.arange(10)
assert_raises(ValueError, array_split, a, 0)
@@ -307,7 +328,7 @@ class TestArraySplit(TestCase):
compare_results(res, desired)
-class TestSplit(TestCase):
+class TestSplit(object):
# The split function is essentially the same as array_split,
# except that it test if splitting will result in an
# equal split. Only test for this case.
@@ -322,12 +343,12 @@ class TestSplit(TestCase):
a = np.arange(10)
assert_raises(ValueError, split, a, 3)
-class TestColumnStack(TestCase):
+class TestColumnStack(object):
def test_non_iterable(self):
assert_raises(TypeError, column_stack, 1)
-class TestDstack(TestCase):
+class TestDstack(object):
def test_non_iterable(self):
assert_raises(TypeError, dstack, 1)
@@ -362,7 +383,7 @@ class TestDstack(TestCase):
# array_split has more comprehensive test of splitting.
# only do simple test on hsplit, vsplit, and dsplit
-class TestHsplit(TestCase):
+class TestHsplit(object):
"""Only testing for integer splits.
"""
@@ -391,7 +412,7 @@ class TestHsplit(TestCase):
compare_results(res, desired)
-class TestVsplit(TestCase):
+class TestVsplit(object):
"""Only testing for integer splits.
"""
@@ -418,7 +439,7 @@ class TestVsplit(TestCase):
compare_results(res, desired)
-class TestDsplit(TestCase):
+class TestDsplit(object):
# Only testing for integer splits.
def test_non_iterable(self):
assert_raises(ValueError, dsplit, 1, 1)
@@ -451,7 +472,7 @@ class TestDsplit(TestCase):
compare_results(res, desired)
-class TestSqueeze(TestCase):
+class TestSqueeze(object):
def test_basic(self):
from numpy.random import rand
@@ -470,7 +491,7 @@ class TestSqueeze(TestCase):
assert_equal(type(res), np.ndarray)
-class TestKron(TestCase):
+class TestKron(object):
def test_return_type(self):
a = np.ones([2, 2])
m = np.asmatrix(a)
@@ -489,7 +510,7 @@ class TestKron(TestCase):
assert_equal(type(kron(ma, a)), myarray)
-class TestTile(TestCase):
+class TestTile(object):
def test_basic(self):
a = np.array([0, 1, 2])
b = [[1, 2], [3, 4]]
@@ -529,19 +550,19 @@ class TestTile(TestCase):
assert_equal(large, klarge)
-class TestMayShareMemory(TestCase):
+class TestMayShareMemory(object):
def test_basic(self):
d = np.ones((50, 60))
d2 = np.ones((30, 60, 6))
- self.assertTrue(np.may_share_memory(d, d))
- self.assertTrue(np.may_share_memory(d, d[::-1]))
- self.assertTrue(np.may_share_memory(d, d[::2]))
- self.assertTrue(np.may_share_memory(d, d[1:, ::-1]))
-
- self.assertFalse(np.may_share_memory(d[::-1], d2))
- self.assertFalse(np.may_share_memory(d[::2], d2))
- self.assertFalse(np.may_share_memory(d[1:, ::-1], d2))
- self.assertTrue(np.may_share_memory(d2[1:, ::-1], d2))
+ assert_(np.may_share_memory(d, d))
+ assert_(np.may_share_memory(d, d[::-1]))
+ assert_(np.may_share_memory(d, d[::2]))
+ assert_(np.may_share_memory(d, d[1:, ::-1]))
+
+ assert_(not np.may_share_memory(d[::-1], d2))
+ assert_(not np.may_share_memory(d[::2], d2))
+ assert_(not np.may_share_memory(d[1:, ::-1], d2))
+ assert_(np.may_share_memory(d2[1:, ::-1], d2))
# Utility
diff --git a/numpy/lib/tests/test_stride_tricks.py b/numpy/lib/tests/test_stride_tricks.py
index 7dc3c4d24..0599324d7 100644
--- a/numpy/lib/tests/test_stride_tricks.py
+++ b/numpy/lib/tests/test_stride_tricks.py
@@ -1,6 +1,7 @@
from __future__ import division, absolute_import, print_function
import numpy as np
+from numpy.core.test_rational import rational
from numpy.testing import (
run_module_suite, assert_equal, assert_array_equal,
assert_raises, assert_
@@ -317,6 +318,13 @@ def test_as_strided():
a_view = as_strided(a, shape=(3, 4), strides=(0, a.itemsize))
assert_equal(a.dtype, a_view.dtype)
+ # Custom dtypes should not be lost (gh-9161)
+ r = [rational(i) for i in range(4)]
+ a = np.array(r, dtype=rational)
+ a_view = as_strided(a, shape=(3, 4), strides=(0, a.itemsize))
+ assert_equal(a.dtype, a_view.dtype)
+ assert_array_equal([r] * 3, a_view)
+
def as_strided_writeable():
arr = np.ones(10)
view = as_strided(arr, writeable=False)
diff --git a/numpy/lib/tests/test_twodim_base.py b/numpy/lib/tests/test_twodim_base.py
index d57791e34..6bf668dee 100644
--- a/numpy/lib/tests/test_twodim_base.py
+++ b/numpy/lib/tests/test_twodim_base.py
@@ -4,8 +4,8 @@
from __future__ import division, absolute_import, print_function
from numpy.testing import (
- TestCase, run_module_suite, assert_equal, assert_array_equal,
- assert_array_max_ulp, assert_array_almost_equal, assert_raises,
+ run_module_suite, assert_equal, assert_array_equal, assert_array_max_ulp,
+ assert_array_almost_equal, assert_raises,
)
from numpy import (
@@ -23,7 +23,7 @@ def get_mat(n):
return data
-class TestEye(TestCase):
+class TestEye(object):
def test_basic(self):
assert_equal(eye(4),
array([[1, 0, 0, 0],
@@ -96,7 +96,7 @@ class TestEye(TestCase):
assert_equal(eye(2, 2, dtype=bool), [[True, False], [False, True]])
-class TestDiag(TestCase):
+class TestDiag(object):
def test_vector(self):
vals = (100 * arange(5)).astype('l')
b = zeros((5, 5))
@@ -140,12 +140,12 @@ class TestDiag(TestCase):
assert_equal(diag(A, k=-3), [])
def test_failure(self):
- self.assertRaises(ValueError, diag, [[[1]]])
+ assert_raises(ValueError, diag, [[[1]]])
-class TestFliplr(TestCase):
+class TestFliplr(object):
def test_basic(self):
- self.assertRaises(ValueError, fliplr, ones(4))
+ assert_raises(ValueError, fliplr, ones(4))
a = get_mat(4)
b = a[:, ::-1]
assert_equal(fliplr(a), b)
@@ -156,7 +156,7 @@ class TestFliplr(TestCase):
assert_equal(fliplr(a), b)
-class TestFlipud(TestCase):
+class TestFlipud(object):
def test_basic(self):
a = get_mat(4)
b = a[::-1, :]
@@ -168,7 +168,7 @@ class TestFlipud(TestCase):
assert_equal(flipud(a), b)
-class TestHistogram2d(TestCase):
+class TestHistogram2d(object):
def test_simple(self):
x = array(
[0.41702200, 0.72032449, 1.1437481e-4, 0.302332573, 0.146755891])
@@ -265,7 +265,7 @@ class TestHistogram2d(TestCase):
assert_array_equal(xe, array([0., 0.25, 0.5, 0.75, 1]))
-class TestTri(TestCase):
+class TestTri(object):
def test_dtype(self):
out = array([[1, 0, 0],
[1, 1, 0],
@@ -349,10 +349,10 @@ def test_mask_indices():
# simple test without offset
iu = mask_indices(3, np.triu)
a = np.arange(9).reshape(3, 3)
- yield (assert_array_equal, a[iu], array([0, 1, 2, 4, 5, 8]))
+ assert_array_equal(a[iu], array([0, 1, 2, 4, 5, 8]))
# Now with an offset
iu1 = mask_indices(3, np.triu, 1)
- yield (assert_array_equal, a[iu1], array([1, 2, 5]))
+ assert_array_equal(a[iu1], array([1, 2, 5]))
def test_tril_indices():
@@ -369,37 +369,37 @@ def test_tril_indices():
b = np.arange(1, 21).reshape(4, 5)
# indexing:
- yield (assert_array_equal, a[il1],
- array([1, 5, 6, 9, 10, 11, 13, 14, 15, 16]))
- yield (assert_array_equal, b[il3],
- array([1, 6, 7, 11, 12, 13, 16, 17, 18, 19]))
+ assert_array_equal(a[il1],
+ array([1, 5, 6, 9, 10, 11, 13, 14, 15, 16]))
+ assert_array_equal(b[il3],
+ array([1, 6, 7, 11, 12, 13, 16, 17, 18, 19]))
# And for assigning values:
a[il1] = -1
- yield (assert_array_equal, a,
- array([[-1, 2, 3, 4],
- [-1, -1, 7, 8],
- [-1, -1, -1, 12],
- [-1, -1, -1, -1]]))
+ assert_array_equal(a,
+ array([[-1, 2, 3, 4],
+ [-1, -1, 7, 8],
+ [-1, -1, -1, 12],
+ [-1, -1, -1, -1]]))
b[il3] = -1
- yield (assert_array_equal, b,
- array([[-1, 2, 3, 4, 5],
- [-1, -1, 8, 9, 10],
- [-1, -1, -1, 14, 15],
- [-1, -1, -1, -1, 20]]))
+ assert_array_equal(b,
+ array([[-1, 2, 3, 4, 5],
+ [-1, -1, 8, 9, 10],
+ [-1, -1, -1, 14, 15],
+ [-1, -1, -1, -1, 20]]))
# These cover almost the whole array (two diagonals right of the main one):
a[il2] = -10
- yield (assert_array_equal, a,
- array([[-10, -10, -10, 4],
- [-10, -10, -10, -10],
- [-10, -10, -10, -10],
- [-10, -10, -10, -10]]))
+ assert_array_equal(a,
+ array([[-10, -10, -10, 4],
+ [-10, -10, -10, -10],
+ [-10, -10, -10, -10],
+ [-10, -10, -10, -10]]))
b[il4] = -10
- yield (assert_array_equal, b,
- array([[-10, -10, -10, 4, 5],
- [-10, -10, -10, -10, 10],
- [-10, -10, -10, -10, -10],
- [-10, -10, -10, -10, -10]]))
+ assert_array_equal(b,
+ array([[-10, -10, -10, 4, 5],
+ [-10, -10, -10, -10, 10],
+ [-10, -10, -10, -10, -10],
+ [-10, -10, -10, -10, -10]]))
class TestTriuIndices(object):
@@ -416,39 +416,40 @@ class TestTriuIndices(object):
b = np.arange(1, 21).reshape(4, 5)
# Both for indexing:
- yield (assert_array_equal, a[iu1],
- array([1, 2, 3, 4, 6, 7, 8, 11, 12, 16]))
- yield (assert_array_equal, b[iu3],
- array([1, 2, 3, 4, 5, 7, 8, 9, 10, 13, 14, 15, 19, 20]))
+ assert_array_equal(a[iu1],
+ array([1, 2, 3, 4, 6, 7, 8, 11, 12, 16]))
+ assert_array_equal(b[iu3],
+ array([1, 2, 3, 4, 5, 7, 8, 9,
+ 10, 13, 14, 15, 19, 20]))
# And for assigning values:
a[iu1] = -1
- yield (assert_array_equal, a,
- array([[-1, -1, -1, -1],
- [5, -1, -1, -1],
- [9, 10, -1, -1],
- [13, 14, 15, -1]]))
+ assert_array_equal(a,
+ array([[-1, -1, -1, -1],
+ [5, -1, -1, -1],
+ [9, 10, -1, -1],
+ [13, 14, 15, -1]]))
b[iu3] = -1
- yield (assert_array_equal, b,
- array([[-1, -1, -1, -1, -1],
- [6, -1, -1, -1, -1],
- [11, 12, -1, -1, -1],
- [16, 17, 18, -1, -1]]))
+ assert_array_equal(b,
+ array([[-1, -1, -1, -1, -1],
+ [6, -1, -1, -1, -1],
+ [11, 12, -1, -1, -1],
+ [16, 17, 18, -1, -1]]))
# These cover almost the whole array (two diagonals right of the
# main one):
a[iu2] = -10
- yield (assert_array_equal, a,
- array([[-1, -1, -10, -10],
- [5, -1, -1, -10],
- [9, 10, -1, -1],
- [13, 14, 15, -1]]))
+ assert_array_equal(a,
+ array([[-1, -1, -10, -10],
+ [5, -1, -1, -10],
+ [9, 10, -1, -1],
+ [13, 14, 15, -1]]))
b[iu4] = -10
- yield (assert_array_equal, b,
- array([[-1, -1, -10, -10, -10],
- [6, -1, -1, -10, -10],
- [11, 12, -1, -1, -10],
- [16, 17, 18, -1, -1]]))
+ assert_array_equal(b,
+ array([[-1, -1, -10, -10, -10],
+ [6, -1, -1, -10, -10],
+ [11, 12, -1, -1, -10],
+ [16, 17, 18, -1, -1]]))
class TestTrilIndicesFrom(object):
diff --git a/numpy/lib/tests/test_type_check.py b/numpy/lib/tests/test_type_check.py
index 383ffa55c..8945b61ea 100644
--- a/numpy/lib/tests/test_type_check.py
+++ b/numpy/lib/tests/test_type_check.py
@@ -3,7 +3,7 @@ from __future__ import division, absolute_import, print_function
import numpy as np
from numpy.compat import long
from numpy.testing import (
- TestCase, assert_, assert_equal, assert_array_equal, run_module_suite
+ assert_, assert_equal, assert_array_equal, run_module_suite, assert_raises
)
from numpy.lib.type_check import (
common_type, mintypecode, isreal, iscomplex, isposinf, isneginf,
@@ -15,7 +15,7 @@ def assert_all(x):
assert_(np.all(x), x)
-class TestCommonType(TestCase):
+class TestCommonType(object):
def test_basic(self):
ai32 = np.array([[1, 2], [3, 4]], dtype=np.int32)
af16 = np.array([[1, 2], [3, 4]], dtype=np.float16)
@@ -31,7 +31,7 @@ class TestCommonType(TestCase):
assert_(common_type(acd) == np.cdouble)
-class TestMintypecode(TestCase):
+class TestMintypecode(object):
def test_default_1(self):
for itype in '1bcsuwil':
@@ -81,7 +81,7 @@ class TestMintypecode(TestCase):
assert_equal(mintypecode('idD'), 'D')
-class TestIsscalar(TestCase):
+class TestIsscalar(object):
def test_basic(self):
assert_(np.isscalar(3))
@@ -92,7 +92,7 @@ class TestIsscalar(TestCase):
assert_(np.isscalar(4.0))
-class TestReal(TestCase):
+class TestReal(object):
def test_real(self):
y = np.random.rand(10,)
@@ -123,7 +123,7 @@ class TestReal(TestCase):
assert_(not isinstance(out, np.ndarray))
-class TestImag(TestCase):
+class TestImag(object):
def test_real(self):
y = np.random.rand(10,)
@@ -154,7 +154,7 @@ class TestImag(TestCase):
assert_(not isinstance(out, np.ndarray))
-class TestIscomplex(TestCase):
+class TestIscomplex(object):
def test_fail(self):
z = np.array([-1, 0, 1])
@@ -167,7 +167,7 @@ class TestIscomplex(TestCase):
assert_array_equal(res, [1, 0, 0])
-class TestIsreal(TestCase):
+class TestIsreal(object):
def test_pass(self):
z = np.array([-1, 0, 1j])
@@ -180,7 +180,7 @@ class TestIsreal(TestCase):
assert_array_equal(res, [0, 1, 1])
-class TestIscomplexobj(TestCase):
+class TestIscomplexobj(object):
def test_basic(self):
z = np.array([-1, 0, 1])
@@ -233,7 +233,7 @@ class TestIscomplexobj(TestCase):
assert_(iscomplexobj(a))
-class TestIsrealobj(TestCase):
+class TestIsrealobj(object):
def test_basic(self):
z = np.array([-1, 0, 1])
assert_(isrealobj(z))
@@ -241,7 +241,7 @@ class TestIsrealobj(TestCase):
assert_(not isrealobj(z))
-class TestIsnan(TestCase):
+class TestIsnan(object):
def test_goodvalues(self):
z = np.array((-1., 0., 1.))
@@ -271,7 +271,7 @@ class TestIsnan(TestCase):
assert_all(np.isnan(np.array(0+0j)/0.) == 1)
-class TestIsfinite(TestCase):
+class TestIsfinite(object):
# Fixme, wrong place, isfinite now ufunc
def test_goodvalues(self):
@@ -302,7 +302,7 @@ class TestIsfinite(TestCase):
assert_all(np.isfinite(np.array(1+1j)/0.) == 0)
-class TestIsinf(TestCase):
+class TestIsinf(object):
# Fixme, wrong place, isinf now ufunc
def test_goodvalues(self):
@@ -331,7 +331,7 @@ class TestIsinf(TestCase):
assert_all(np.isinf(np.array((0.,))/0.) == 0)
-class TestIsposinf(TestCase):
+class TestIsposinf(object):
def test_generic(self):
with np.errstate(divide='ignore', invalid='ignore'):
@@ -341,7 +341,7 @@ class TestIsposinf(TestCase):
assert_(vals[2] == 1)
-class TestIsneginf(TestCase):
+class TestIsneginf(object):
def test_generic(self):
with np.errstate(divide='ignore', invalid='ignore'):
@@ -351,7 +351,7 @@ class TestIsneginf(TestCase):
assert_(vals[2] == 0)
-class TestNanToNum(TestCase):
+class TestNanToNum(object):
def test_generic(self):
with np.errstate(divide='ignore', invalid='ignore'):
@@ -374,7 +374,7 @@ class TestNanToNum(TestCase):
vals = nan_to_num(1)
assert_all(vals == 1)
vals = nan_to_num([1])
- assert_array_equal(vals, np.array([1], np.int))
+ assert_array_equal(vals, np.array([1], int))
def test_complex_good(self):
vals = nan_to_num(1+1j)
@@ -402,7 +402,7 @@ class TestNanToNum(TestCase):
#assert_all(vals.real < -1e10) and assert_all(np.isfinite(vals))
-class TestRealIfClose(TestCase):
+class TestRealIfClose(object):
def test_basic(self):
a = np.random.rand(10)
@@ -415,12 +415,18 @@ class TestRealIfClose(TestCase):
assert_all(isrealobj(b))
-class TestArrayConversion(TestCase):
+class TestArrayConversion(object):
def test_asfarray(self):
a = asfarray(np.array([1, 2, 3]))
assert_equal(a.__class__, np.ndarray)
- assert_(np.issubdtype(a.dtype, np.float))
+ assert_(np.issubdtype(a.dtype, np.floating))
+
+ # previously this would infer dtypes from arrays, unlike every single
+ # other numpy function
+ assert_raises(TypeError,
+ asfarray, np.array([1, 2, 3]), dtype=np.array(1.0))
+
if __name__ == "__main__":
run_module_suite()
diff --git a/numpy/lib/tests/test_ufunclike.py b/numpy/lib/tests/test_ufunclike.py
index 0b152540f..128ce37ab 100644
--- a/numpy/lib/tests/test_ufunclike.py
+++ b/numpy/lib/tests/test_ufunclike.py
@@ -4,12 +4,11 @@ import numpy as np
import numpy.core as nx
import numpy.lib.ufunclike as ufl
from numpy.testing import (
- run_module_suite, TestCase, assert_, assert_equal, assert_array_equal,
- assert_warns
+ run_module_suite, assert_, assert_equal, assert_array_equal, assert_warns
)
-class TestUfunclike(TestCase):
+class TestUfunclike(object):
def test_isposinf(self):
a = nx.array([nx.inf, -nx.inf, nx.nan, 0.0, 3.0, -3.0])
diff --git a/numpy/lib/twodim_base.py b/numpy/lib/twodim_base.py
index 28ebb8cbd..a6259219a 100644
--- a/numpy/lib/twodim_base.py
+++ b/numpy/lib/twodim_base.py
@@ -6,6 +6,7 @@ from __future__ import division, absolute_import, print_function
from numpy.core.numeric import (
absolute, asanyarray, arange, zeros, greater_equal, multiply, ones,
asarray, where, int8, int16, int32, int64, empty, promote_types, diagonal,
+ nonzero
)
from numpy.core import iinfo, transpose
@@ -717,7 +718,7 @@ def mask_indices(n, mask_func, k=0):
"""
m = ones((n, n), int)
a = mask_func(m, k)
- return where(a != 0)
+ return nonzero(a != 0)
def tril_indices(n, k=0, m=None):
@@ -797,7 +798,7 @@ def tril_indices(n, k=0, m=None):
[-10, -10, -10, -10]])
"""
- return where(tri(n, m, k=k, dtype=bool))
+ return nonzero(tri(n, m, k=k, dtype=bool))
def tril_indices_from(arr, k=0):
@@ -907,7 +908,7 @@ def triu_indices(n, k=0, m=None):
[ 12, 13, 14, -1]])
"""
- return where(~tri(n, m, k=k-1, dtype=bool))
+ return nonzero(~tri(n, m, k=k-1, dtype=bool))
def triu_indices_from(arr, k=0):
diff --git a/numpy/lib/type_check.py b/numpy/lib/type_check.py
index 5202cebde..e6aae8ddd 100644
--- a/numpy/lib/type_check.py
+++ b/numpy/lib/type_check.py
@@ -98,8 +98,7 @@ def asfarray(a, dtype=_nx.float_):
array([ 2., 3.])
"""
- dtype = _nx.obj2sctype(dtype)
- if not issubclass(dtype, _nx.inexact):
+ if not _nx.issubdtype(dtype, _nx.inexact):
dtype = _nx.float_
return asarray(a, dtype=dtype)
@@ -331,11 +330,16 @@ def _getmaxmin(t):
def nan_to_num(x, copy=True):
"""
- Replace nan with zero and inf with finite numbers.
+ Replace nan with zero and inf with large finite numbers.
- Returns an array or scalar replacing Not a Number (NaN) with zero,
- (positive) infinity with a very large number and negative infinity
- with a very small (or negative) number.
+ If `x` is inexact, NaN is replaced by zero, and infinity and -infinity
+ replaced by the respectively largest and most negative finite floating
+ point values representable by ``x.dtype``.
+
+ For complex dtypes, the above is applied to each of the real and
+ imaginary components of `x` separately.
+
+ If `x` is not inexact, then no replacements are made.
Parameters
----------
@@ -352,12 +356,8 @@ def nan_to_num(x, copy=True):
Returns
-------
out : ndarray
- New Array with the same shape as `x` and dtype of the element in
- `x` with the greatest precision. If `x` is inexact, then NaN is
- replaced by zero, and infinity (-infinity) is replaced by the
- largest (smallest or most negative) floating point value that fits
- in the output dtype. If `x` is not inexact, then a copy of `x` is
- returned.
+ `x`, with the non-finite values replaced. If `copy` is False, this may
+ be `x` itself.
See Also
--------
@@ -372,15 +372,17 @@ def nan_to_num(x, copy=True):
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
-
Examples
--------
- >>> np.set_printoptions(precision=8)
>>> x = np.array([np.inf, -np.inf, np.nan, -128, 128])
>>> np.nan_to_num(x)
array([ 1.79769313e+308, -1.79769313e+308, 0.00000000e+000,
-1.28000000e+002, 1.28000000e+002])
-
+ >>> y = np.array([complex(np.inf, np.nan), np.nan, complex(np.nan, np.inf)])
+ >>> np.nan_to_num(y)
+ array([ 1.79769313e+308 +0.00000000e+000j,
+ 0.00000000e+000 +0.00000000e+000j,
+ 0.00000000e+000 +1.79769313e+308j])
"""
x = _nx.array(x, subok=True, copy=copy)
xtype = x.dtype.type
@@ -430,12 +432,12 @@ def real_if_close(a,tol=100):
-----
Machine epsilon varies from machine to machine and between data types
but Python floats on most platforms have a machine epsilon equal to
- 2.2204460492503131e-16. You can use 'np.finfo(np.float).eps' to print
+ 2.2204460492503131e-16. You can use 'np.finfo(float).eps' to print
out the machine epsilon for floats.
Examples
--------
- >>> np.finfo(np.float).eps
+ >>> np.finfo(float).eps
2.2204460492503131e-16
>>> np.real_if_close([2.1 + 4e-14j], tol=1000)
@@ -577,8 +579,8 @@ def common_type(*arrays):
an integer array, the minimum precision type that is returned is a
64-bit floating point dtype.
- All input arrays can be safely cast to the returned dtype without loss
- of information.
+ All input arrays except int64 and uint64 can be safely cast to the
+ returned dtype without loss of information.
Parameters
----------
diff --git a/numpy/lib/utils.py b/numpy/lib/utils.py
index fad159c7e..e18eda0fb 100644
--- a/numpy/lib/utils.py
+++ b/numpy/lib/utils.py
@@ -557,7 +557,7 @@ def info(object=None, maxwidth=76, output=sys.stdout, toplevel='numpy'):
if len(arglist) > 1:
arglist[1] = "("+arglist[1]
arguments = ", ".join(arglist[1:])
- except:
+ except Exception:
pass
if len(name+arguments) > maxwidth:
@@ -689,7 +689,7 @@ def source(object, output=sys.stdout):
try:
print("In file: %s\n" % inspect.getsourcefile(object), file=output)
print(inspect.getsource(object), file=output)
- except:
+ except Exception:
print("Not available for this object.", file=output)
@@ -1138,7 +1138,7 @@ def _median_nancheck(data, result, axis, out):
"""
if data.size == 0:
return result
- data = np.rollaxis(data, axis, data.ndim)
+ data = np.moveaxis(data, axis, -1)
n = np.isnan(data[..., -1])
# masked NaN values are ok
if np.ma.isMaskedArray(n):
diff --git a/numpy/linalg/__init__.py b/numpy/linalg/__init__.py
index 69445f541..2537926c5 100644
--- a/numpy/linalg/__init__.py
+++ b/numpy/linalg/__init__.py
@@ -50,6 +50,6 @@ from .info import __doc__
from .linalg import *
-from numpy.testing.nosetester import _numpy_tester
+from numpy.testing import _numpy_tester
test = _numpy_tester().test
bench = _numpy_tester().bench
diff --git a/numpy/linalg/linalg.py b/numpy/linalg/linalg.py
index 31147b9cc..d2ae7befc 100644
--- a/numpy/linalg/linalg.py
+++ b/numpy/linalg/linalg.py
@@ -19,12 +19,13 @@ __all__ = ['matrix_power', 'solve', 'tensorsolve', 'tensorinv', 'inv',
import warnings
from numpy.core import (
- array, asarray, zeros, empty, empty_like, transpose, intc, single, double,
+ array, asarray, zeros, empty, empty_like, intc, single, double,
csingle, cdouble, inexact, complexfloating, newaxis, ravel, all, Inf, dot,
add, multiply, sqrt, maximum, fastCopyAndTranspose, sum, isfinite, size,
- finfo, errstate, geterrobj, longdouble, rollaxis, amin, amax, product, abs,
- broadcast, atleast_2d, intp, asanyarray, isscalar, object_, ones
- )
+ finfo, errstate, geterrobj, longdouble, moveaxis, amin, amax, product, abs,
+ broadcast, atleast_2d, intp, asanyarray, isscalar, object_, ones, matmul,
+ swapaxes, divide, count_nonzero
+)
from numpy.core.multiarray import normalize_axis_index
from numpy.lib import triu, asfarray
from numpy.linalg import lapack_lite, _umath_linalg
@@ -69,12 +70,8 @@ class LinAlgError(Exception):
"""
pass
-# Dealing with errors in _umath_linalg
-
-_linalg_error_extobj = None
def _determine_error_states():
- global _linalg_error_extobj
errobj = geterrobj()
bufsize = errobj[0]
@@ -82,9 +79,11 @@ def _determine_error_states():
divide='ignore', under='ignore'):
invalid_call_errmask = geterrobj()[1]
- _linalg_error_extobj = [bufsize, invalid_call_errmask, None]
+ return [bufsize, invalid_call_errmask, None]
-_determine_error_states()
+# Dealing with errors in _umath_linalg
+_linalg_error_extobj = _determine_error_states()
+del _determine_error_states
def _raise_linalgerror_singular(err, flag):
raise LinAlgError("Singular matrix")
@@ -99,7 +98,7 @@ def _raise_linalgerror_svd_nonconvergence(err, flag):
raise LinAlgError("SVD did not converge")
def get_linalg_error_extobj(callback):
- extobj = list(_linalg_error_extobj)
+ extobj = list(_linalg_error_extobj) # make a copy
extobj[2] = callback
return extobj
@@ -225,6 +224,22 @@ def _assertNoEmpty2d(*arrays):
if _isEmpty2d(a):
raise LinAlgError("Arrays cannot be empty")
+def transpose(a):
+ """
+ Transpose each matrix in a stack of matrices.
+
+ Unlike np.transpose, this only swaps the last two axes, rather than all of
+ them
+
+ Parameters
+ ----------
+ a : (...,M,N) array_like
+
+ Returns
+ -------
+ aT : (...,N,M) ndarray
+ """
+ return swapaxes(a, -1, -2)
# Linear equations
@@ -1281,7 +1296,7 @@ def eigh(a, UPLO='L'):
# Singular value decomposition
-def svd(a, full_matrices=1, compute_uv=1):
+def svd(a, full_matrices=True, compute_uv=True):
"""
Singular Value Decomposition.
@@ -1489,22 +1504,34 @@ def cond(x, p=None):
return norm(x, p, axis=(-2, -1)) * norm(inv(x), p, axis=(-2, -1))
-def matrix_rank(M, tol=None):
+def matrix_rank(M, tol=None, hermitian=False):
"""
Return matrix rank of array using SVD method
- Rank of the array is the number of SVD singular values of the array that are
+ Rank of the array is the number of singular values of the array that are
greater than `tol`.
+ .. versionchanged:: 1.14
+ Can now operate on stacks of matrices
+
Parameters
----------
M : {(M,), (..., M, N)} array_like
input vector or stack of matrices
- tol : {None, float}, optional
- threshold below which SVD values are considered zero. If `tol` is
- None, and ``S`` is an array with singular values for `M`, and
- ``eps`` is the epsilon value for datatype of ``S``, then `tol` is
- set to ``S.max() * max(M.shape) * eps``.
+ tol : (...) array_like, float, optional
+ threshold below which SVD values are considered zero. If `tol` is
+ None, and ``S`` is an array with singular values for `M`, and
+ ``eps`` is the epsilon value for datatype of ``S``, then `tol` is
+ set to ``S.max() * max(M.shape) * eps``.
+
+ .. versionchanged:: 1.14
+ Broadcasted against the stack of matrices
+ hermitian : bool, optional
+ If True, `M` is assumed to be Hermitian (symmetric if real-valued),
+ enabling a more efficient method for finding singular values.
+ Defaults to False.
+
+ .. versionadded:: 1.14
Notes
-----
@@ -1568,10 +1595,15 @@ def matrix_rank(M, tol=None):
M = asarray(M)
if M.ndim < 2:
return int(not all(M==0))
- S = svd(M, compute_uv=False)
+ if hermitian:
+ S = abs(eigvalsh(M))
+ else:
+ S = svd(M, compute_uv=False)
if tol is None:
tol = S.max(axis=-1, keepdims=True) * max(M.shape[-2:]) * finfo(S.dtype).eps
- return (S > tol).sum(axis=-1)
+ else:
+ tol = asarray(tol)[..., newaxis]
+ return count_nonzero(S > tol, axis=-1)
# Generalized inverse
@@ -1584,26 +1616,29 @@ def pinv(a, rcond=1e-15 ):
singular-value decomposition (SVD) and including all
*large* singular values.
+ .. versionchanged:: 1.14
+ Can now operate on stacks of matrices
+
Parameters
----------
- a : (M, N) array_like
- Matrix to be pseudo-inverted.
- rcond : float
- Cutoff for small singular values.
- Singular values smaller (in modulus) than
- `rcond` * largest_singular_value (again, in modulus)
- are set to zero.
+ a : (..., M, N) array_like
+ Matrix or stack of matrices to be pseudo-inverted.
+ rcond : (...) array_like of float
+ Cutoff for small singular values.
+ Singular values smaller (in modulus) than
+ `rcond` * largest_singular_value (again, in modulus)
+ are set to zero. Broadcasts against the stack of matrices
Returns
-------
- B : (N, M) ndarray
- The pseudo-inverse of `a`. If `a` is a `matrix` instance, then so
- is `B`.
+ B : (..., N, M) ndarray
+ The pseudo-inverse of `a`. If `a` is a `matrix` instance, then so
+ is `B`.
Raises
------
LinAlgError
- If the SVD computation does not converge.
+ If the SVD computation does not converge.
Notes
-----
@@ -1640,20 +1675,20 @@ def pinv(a, rcond=1e-15 ):
"""
a, wrap = _makearray(a)
+ rcond = asarray(rcond)
if _isEmpty2d(a):
res = empty(a.shape[:-2] + (a.shape[-1], a.shape[-2]), dtype=a.dtype)
return wrap(res)
a = a.conjugate()
- u, s, vt = svd(a, 0)
- m = u.shape[0]
- n = vt.shape[1]
- cutoff = rcond*maximum.reduce(s)
- for i in range(min(n, m)):
- if s[i] > cutoff:
- s[i] = 1./s[i]
- else:
- s[i] = 0.
- res = dot(transpose(vt), multiply(s[:, newaxis], transpose(u)))
+ u, s, vt = svd(a, full_matrices=False)
+
+ # discard small singular values
+ cutoff = rcond[..., newaxis] * amax(s, axis=-1, keepdims=True)
+ large = s > cutoff
+ s = divide(1, s, where=large, out=s)
+ s[~large] = 0
+
+ res = matmul(transpose(vt), multiply(s[..., newaxis], transpose(u)))
return wrap(res)
# Determinant
@@ -1810,7 +1845,7 @@ def det(a):
# Linear Least Squares
-def lstsq(a, b, rcond=-1):
+def lstsq(a, b, rcond="warn"):
"""
Return the least-squares solution to a linear matrix equation.
@@ -1836,6 +1871,13 @@ def lstsq(a, b, rcond=-1):
as zero if they are smaller than `rcond` times the largest singular
value of `a`.
+ .. versionchanged:: 1.14.0
+ If not set, a FutureWarning is given. The previous default
+ of ``-1`` will use the machine precision as `rcond` parameter,
+ the new default will use the machine precision times `max(M, N)`.
+ To silence the warning and use the new default, use ``rcond=None``,
+ to keep using the old behavior, use ``rcond=-1``.
+
Returns
-------
x : {(N,), (N, K)} ndarray
@@ -1909,6 +1951,20 @@ def lstsq(a, b, rcond=-1):
if m != b.shape[0]:
raise LinAlgError('Incompatible dimensions')
t, result_t = _commonType(a, b)
+ # Determine default rcond value
+ if rcond == "warn":
+ # 2017-08-19, 1.14.0
+ warnings.warn("`rcond` parameter will change to the default of "
+ "machine precision times ``max(M, N)`` where M and N "
+ "are the input matrix dimensions.\n"
+ "To use the future default and silence this warning "
+ "we advise to pass `rcond=None`, to keep using the old, "
+ "explicitly pass `rcond=-1`.",
+ FutureWarning, stacklevel=2)
+ rcond = -1
+ if rcond is None:
+ rcond = finfo(t).eps * ldb
+
result_real_t = _realType(result_t)
real_t = _linalgRealType(t)
bstar = zeros((ldb, n_rhs), t)
@@ -1968,13 +2024,13 @@ def lstsq(a, b, rcond=-1):
resids = array([sum((ravel(bstar)[n:])**2)],
dtype=result_real_t)
else:
- x = array(transpose(bstar)[:n,:], dtype=result_t, copy=True)
+ x = array(bstar.T[:n,:], dtype=result_t, copy=True)
if results['rank'] == n and m > n:
if isComplexType(t):
- resids = sum(abs(transpose(bstar)[n:,:])**2, axis=0).astype(
+ resids = sum(abs(bstar.T[n:,:])**2, axis=0).astype(
result_real_t, copy=False)
else:
- resids = sum((transpose(bstar)[n:,:])**2, axis=0).astype(
+ resids = sum((bstar.T[n:,:])**2, axis=0).astype(
result_real_t, copy=False)
st = s[:min(n, m)].astype(result_real_t, copy=True)
@@ -2004,9 +2060,7 @@ def _multi_svd_norm(x, row_axis, col_axis, op):
is `numpy.amin` or `numpy.amax` or `numpy.sum`.
"""
- if row_axis > col_axis:
- row_axis -= 1
- y = rollaxis(rollaxis(x, col_axis, x.ndim), row_axis, -1)
+ y = moveaxis(x, (row_axis, col_axis), (-2, -1))
result = op(svd(y, compute_uv=0), axis=-1)
return result
@@ -2177,7 +2231,7 @@ def norm(x, ord=None, axis=None, keepdims=False):
elif not isinstance(axis, tuple):
try:
axis = int(axis)
- except:
+ except Exception:
raise TypeError("'axis' must be None, an integer or a tuple of integers")
axis = (axis,)
@@ -2201,18 +2255,7 @@ def norm(x, ord=None, axis=None, keepdims=False):
ord + 1
except TypeError:
raise ValueError("Invalid norm order for vectors.")
- if x.dtype.type is longdouble:
- # Convert to a float type, so integer arrays give
- # float results. Don't apply asfarray to longdouble arrays,
- # because it will downcast to float64.
- absx = abs(x)
- else:
- absx = x if isComplexType(x.dtype.type) else asfarray(x)
- if absx.dtype is x.dtype:
- absx = abs(absx)
- else:
- # if the type changed, we can safely overwrite absx
- abs(absx, out=absx)
+ absx = abs(x)
absx **= ord
return add.reduce(absx, axis=axis, keepdims=keepdims) ** (1.0 / ord)
elif len(axis) == 2:
@@ -2327,7 +2370,7 @@ def multi_dot(arrays):
return A.shape[0] * A.shape[1] * B.shape[1]
Let's assume we have three matrices
- :math:`A_{10x100}, B_{100x5}, C_{5x50}$`.
+ :math:`A_{10x100}, B_{100x5}, C_{5x50}`.
The costs for the two different parenthesizations are as follows::
diff --git a/numpy/linalg/tests/__init__.py b/numpy/linalg/tests/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/numpy/linalg/tests/__init__.py
diff --git a/numpy/linalg/tests/test_build.py b/numpy/linalg/tests/test_build.py
index a91f97670..b46a72c02 100644
--- a/numpy/linalg/tests/test_build.py
+++ b/numpy/linalg/tests/test_build.py
@@ -5,7 +5,7 @@ import sys
import re
from numpy.linalg import lapack_lite
-from numpy.testing import TestCase, dec, run_module_suite
+from numpy.testing import run_module_suite, assert_, dec
class FindDependenciesLdd(object):
@@ -40,7 +40,7 @@ class FindDependenciesLdd(object):
return founds
-class TestF77Mismatch(TestCase):
+class TestF77Mismatch(object):
@dec.skipif(not(sys.platform[:5] == 'linux'),
"Skipping fortran compiler mismatch on non Linux platform")
@@ -48,7 +48,7 @@ class TestF77Mismatch(TestCase):
f = FindDependenciesLdd()
deps = f.grep_dependencies(lapack_lite.__file__,
[b'libg2c', b'libgfortran'])
- self.assertFalse(len(deps) > 1,
+ assert_(len(deps) <= 1,
"""Both g77 and gfortran runtimes linked in lapack_lite ! This is likely to
cause random crashes and wrong results. See numpy INSTALL.txt for more
information.""")
diff --git a/numpy/linalg/tests/test_linalg.py b/numpy/linalg/tests/test_linalg.py
index c612eb6bb..8b3984883 100644
--- a/numpy/linalg/tests/test_linalg.py
+++ b/numpy/linalg/tests/test_linalg.py
@@ -712,12 +712,16 @@ class TestCondInf(object):
assert_almost_equal(linalg.cond(A, inf), 3.)
-class TestPinv(LinalgSquareTestCase, LinalgNonsquareTestCase):
+class TestPinv(LinalgSquareTestCase,
+ LinalgNonsquareTestCase,
+ LinalgGeneralizedSquareTestCase,
+ LinalgGeneralizedNonsquareTestCase):
def do(self, a, b, tags):
a_ginv = linalg.pinv(a)
# `a @ a_ginv == I` does not hold if a is singular
- assert_almost_equal(dot(a, a_ginv).dot(a), a, single_decimal=5, double_decimal=11)
+ dot = dot_generalized
+ assert_almost_equal(dot(dot(a, a_ginv), a), a, single_decimal=5, double_decimal=11)
assert_(imply(isinstance(a, matrix), isinstance(a_ginv, matrix)))
@@ -793,7 +797,7 @@ class TestLstsq(LinalgSquareTestCase, LinalgNonsquareTestCase):
arr = np.asarray(a)
m, n = arr.shape
u, s, vt = linalg.svd(a, 0)
- x, residuals, rank, sv = linalg.lstsq(a, b)
+ x, residuals, rank, sv = linalg.lstsq(a, b, rcond=-1)
if m <= n:
assert_almost_equal(b, dot(a, x))
assert_equal(rank, m)
@@ -814,6 +818,23 @@ class TestLstsq(LinalgSquareTestCase, LinalgNonsquareTestCase):
assert_(imply(isinstance(b, matrix), isinstance(x, matrix)))
assert_(imply(isinstance(b, matrix), isinstance(residuals, matrix)))
+ def test_future_rcond(self):
+ a = np.array([[0., 1., 0., 1., 2., 0.],
+ [0., 2., 0., 0., 1., 0.],
+ [1., 0., 1., 0., 0., 4.],
+ [0., 0., 0., 2., 3., 0.]]).T
+
+ b = np.array([1, 0, 0, 0, 0, 0])
+ with suppress_warnings() as sup:
+ w = sup.record(FutureWarning, "`rcond` parameter will change")
+ x, residuals, rank, s = linalg.lstsq(a, b)
+ assert_(rank == 4)
+ x, residuals, rank, s = linalg.lstsq(a, b, rcond=-1)
+ assert_(rank == 4)
+ x, residuals, rank, s = linalg.lstsq(a, b, rcond=None)
+ assert_(rank == 3)
+ # Warning should be raised exactly once (first command)
+ assert_(len(w) == 1)
class TestMatrixPower(object):
R90 = array([[0, 1], [-1, 0]])
@@ -1362,6 +1383,19 @@ class TestMatrixRank(object):
# works on scalar
yield assert_equal, matrix_rank(1), 1
+ def test_symmetric_rank(self):
+ yield assert_equal, 4, matrix_rank(np.eye(4), hermitian=True)
+ yield assert_equal, 1, matrix_rank(np.ones((4, 4)), hermitian=True)
+ yield assert_equal, 0, matrix_rank(np.zeros((4, 4)), hermitian=True)
+ # rank deficient matrix
+ I = np.eye(4)
+ I[-1, -1] = 0.
+ yield assert_equal, 3, matrix_rank(I, hermitian=True)
+ # manually supplied tolerance
+ I[-1, -1] = 1e-8
+ yield assert_equal, 4, matrix_rank(I, hermitian=True, tol=0.99e-8)
+ yield assert_equal, 3, matrix_rank(I, hermitian=True, tol=1.01e-8)
+
def test_reduced_rank():
# Test matrices with reduced rank
@@ -1550,7 +1584,7 @@ def test_xerbla_override():
np.linalg.lapack_lite.xerbla()
except ValueError:
pass
- except:
+ except Exception:
os._exit(os.EX_CONFIG)
try:
@@ -1645,7 +1679,7 @@ class TestMultiDot(object):
[0, 0, 0, 3, 3, 3],
[0, 0, 0, 0, 4, 5],
[0, 0, 0, 0, 0, 5],
- [0, 0, 0, 0, 0, 0]], dtype=np.int)
+ [0, 0, 0, 0, 0, 0]], dtype=int)
s_expected -= 1 # Cormen uses 1-based index, python does not.
s, m = _multi_dot_matrix_chain_order(arrays, return_costs=True)
diff --git a/numpy/linalg/tests/test_regression.py b/numpy/linalg/tests/test_regression.py
index d2080b709..07d72620b 100644
--- a/numpy/linalg/tests/test_regression.py
+++ b/numpy/linalg/tests/test_regression.py
@@ -7,17 +7,14 @@ import warnings
import numpy as np
from numpy import linalg, arange, float64, array, dot, transpose
from numpy.testing import (
- TestCase, run_module_suite, assert_equal, assert_array_equal,
+ run_module_suite, assert_, assert_raises, assert_equal, assert_array_equal,
assert_array_almost_equal, assert_array_less
)
-rlevel = 1
+class TestRegression(object):
-
-class TestRegression(TestCase):
-
- def test_eig_build(self, level=rlevel):
+ def test_eig_build(self):
# Ticket #652
rva = array([1.03221168e+02 + 0.j,
-1.91843603e+01 + 0.j,
@@ -40,7 +37,7 @@ class TestRegression(TestCase):
rva.sort()
assert_array_almost_equal(va, rva)
- def test_eigh_build(self, level=rlevel):
+ def test_eigh_build(self):
# Ticket 662.
rvals = [68.60568999, 89.57756725, 106.67185574]
@@ -51,7 +48,7 @@ class TestRegression(TestCase):
vals, vecs = linalg.eigh(cov)
assert_array_almost_equal(vals, rvals)
- def test_svd_build(self, level=rlevel):
+ def test_svd_build(self):
# Ticket 627.
a = array([[0., 1.], [1., 1.], [2., 1.], [3., 1.]])
m, n = a.shape
@@ -64,7 +61,7 @@ class TestRegression(TestCase):
def test_norm_vector_badarg(self):
# Regression for #786: Froebenius norm for vectors raises
# TypeError.
- self.assertRaises(ValueError, linalg.norm, array([1., 2., 3.]), 'fro')
+ assert_raises(ValueError, linalg.norm, array([1., 2., 3.]), 'fro')
def test_lapack_endian(self):
# For bug #1482
@@ -98,47 +95,47 @@ class TestRegression(TestCase):
norm = linalg.norm(testvector)
assert_array_equal(norm, [0, 1])
- self.assertEqual(norm.dtype, np.dtype('float64'))
+ assert_(norm.dtype == np.dtype('float64'))
norm = linalg.norm(testvector, ord=1)
assert_array_equal(norm, [0, 1])
- self.assertNotEqual(norm.dtype, np.dtype('float64'))
+ assert_(norm.dtype != np.dtype('float64'))
norm = linalg.norm(testvector, ord=2)
assert_array_equal(norm, [0, 1])
- self.assertEqual(norm.dtype, np.dtype('float64'))
+ assert_(norm.dtype == np.dtype('float64'))
- self.assertRaises(ValueError, linalg.norm, testvector, ord='fro')
- self.assertRaises(ValueError, linalg.norm, testvector, ord='nuc')
- self.assertRaises(ValueError, linalg.norm, testvector, ord=np.inf)
- self.assertRaises(ValueError, linalg.norm, testvector, ord=-np.inf)
+ assert_raises(ValueError, linalg.norm, testvector, ord='fro')
+ assert_raises(ValueError, linalg.norm, testvector, ord='nuc')
+ assert_raises(ValueError, linalg.norm, testvector, ord=np.inf)
+ assert_raises(ValueError, linalg.norm, testvector, ord=-np.inf)
with warnings.catch_warnings():
warnings.simplefilter("error", DeprecationWarning)
- self.assertRaises((AttributeError, DeprecationWarning),
+ assert_raises((AttributeError, DeprecationWarning),
linalg.norm, testvector, ord=0)
- self.assertRaises(ValueError, linalg.norm, testvector, ord=-1)
- self.assertRaises(ValueError, linalg.norm, testvector, ord=-2)
+ assert_raises(ValueError, linalg.norm, testvector, ord=-1)
+ assert_raises(ValueError, linalg.norm, testvector, ord=-2)
testmatrix = np.array([[np.array([0, 1]), 0, 0],
[0, 0, 0]], dtype=object)
norm = linalg.norm(testmatrix)
assert_array_equal(norm, [0, 1])
- self.assertEqual(norm.dtype, np.dtype('float64'))
+ assert_(norm.dtype == np.dtype('float64'))
norm = linalg.norm(testmatrix, ord='fro')
assert_array_equal(norm, [0, 1])
- self.assertEqual(norm.dtype, np.dtype('float64'))
-
- self.assertRaises(TypeError, linalg.norm, testmatrix, ord='nuc')
- self.assertRaises(ValueError, linalg.norm, testmatrix, ord=np.inf)
- self.assertRaises(ValueError, linalg.norm, testmatrix, ord=-np.inf)
- self.assertRaises(ValueError, linalg.norm, testmatrix, ord=0)
- self.assertRaises(ValueError, linalg.norm, testmatrix, ord=1)
- self.assertRaises(ValueError, linalg.norm, testmatrix, ord=-1)
- self.assertRaises(TypeError, linalg.norm, testmatrix, ord=2)
- self.assertRaises(TypeError, linalg.norm, testmatrix, ord=-2)
- self.assertRaises(ValueError, linalg.norm, testmatrix, ord=3)
+ assert_(norm.dtype == np.dtype('float64'))
+
+ assert_raises(TypeError, linalg.norm, testmatrix, ord='nuc')
+ assert_raises(ValueError, linalg.norm, testmatrix, ord=np.inf)
+ assert_raises(ValueError, linalg.norm, testmatrix, ord=-np.inf)
+ assert_raises(ValueError, linalg.norm, testmatrix, ord=0)
+ assert_raises(ValueError, linalg.norm, testmatrix, ord=1)
+ assert_raises(ValueError, linalg.norm, testmatrix, ord=-1)
+ assert_raises(TypeError, linalg.norm, testmatrix, ord=2)
+ assert_raises(TypeError, linalg.norm, testmatrix, ord=-2)
+ assert_raises(ValueError, linalg.norm, testmatrix, ord=3)
if __name__ == '__main__':
diff --git a/numpy/ma/__init__.py b/numpy/ma/__init__.py
index af3468b01..fbefc47a4 100644
--- a/numpy/ma/__init__.py
+++ b/numpy/ma/__init__.py
@@ -51,6 +51,6 @@ __all__ = ['core', 'extras']
__all__ += core.__all__
__all__ += extras.__all__
-from numpy.testing.nosetester import _numpy_tester
+from numpy.testing import _numpy_tester
test = _numpy_tester().test
bench = _numpy_tester().bench
diff --git a/numpy/ma/core.py b/numpy/ma/core.py
index d6b30ae2e..d8d3ae621 100644
--- a/numpy/ma/core.py
+++ b/numpy/ma/core.py
@@ -186,7 +186,7 @@ default_filler = {'b': True,
'O': '?',
'S': b'N/A',
'u': 999999,
- 'V': '???',
+ 'V': b'???',
'U': u'N/A'
}
@@ -205,6 +205,31 @@ if 'float128' in ntypes.typeDict:
min_filler.update([(np.float128, +np.inf)])
+def _recursive_fill_value(dtype, f):
+ """
+ Recursively produce a fill value for `dtype`, calling f on scalar dtypes
+ """
+ if dtype.names:
+ vals = tuple(_recursive_fill_value(dtype[name], f) for name in dtype.names)
+ return np.array(vals, dtype=dtype)[()] # decay to void scalar from 0d
+ elif dtype.subdtype:
+ subtype, shape = dtype.subdtype
+ subval = _recursive_fill_value(subtype, f)
+ return np.full(shape, subval)
+ else:
+ return f(dtype)
+
+
+def _get_dtype_of(obj):
+ """ Convert the argument for *_fill_value into a dtype """
+ if isinstance(obj, np.dtype):
+ return obj
+ elif hasattr(obj, 'dtype'):
+ return obj.dtype
+ else:
+ return np.asanyarray(obj).dtype
+
+
def default_fill_value(obj):
"""
Return the default fill value for the argument object.
@@ -223,6 +248,11 @@ def default_fill_value(obj):
string 'N/A'
======== ========
+ For structured types, a structured scalar is returned, with each field the
+ default fill value for its type.
+
+ For subarray types, the fill value is an array of the same size containing
+ the default scalar fill value.
Parameters
----------
@@ -245,39 +275,29 @@ def default_fill_value(obj):
(1e+20+0j)
"""
- if hasattr(obj, 'dtype'):
- defval = _check_fill_value(None, obj.dtype)
- elif isinstance(obj, np.dtype):
- if obj.subdtype:
- defval = default_filler.get(obj.subdtype[0].kind, '?')
- elif obj.kind in 'Mm':
- defval = default_filler.get(obj.str[1:], '?')
+ def _scalar_fill_value(dtype):
+ if dtype.kind in 'Mm':
+ return default_filler.get(dtype.str[1:], '?')
else:
- defval = default_filler.get(obj.kind, '?')
- elif isinstance(obj, float):
- defval = default_filler['f']
- elif isinstance(obj, int) or isinstance(obj, long):
- defval = default_filler['i']
- elif isinstance(obj, bytes):
- defval = default_filler['S']
- elif isinstance(obj, unicode):
- defval = default_filler['U']
- elif isinstance(obj, complex):
- defval = default_filler['c']
- else:
- defval = default_filler['O']
- return defval
+ return default_filler.get(dtype.kind, '?')
+ dtype = _get_dtype_of(obj)
+ return _recursive_fill_value(dtype, _scalar_fill_value)
-def _recursive_extremum_fill_value(ndtype, extremum):
- names = ndtype.names
- if names:
- deflist = []
- for name in names:
- fval = _recursive_extremum_fill_value(ndtype[name], extremum)
- deflist.append(fval)
- return tuple(deflist)
- return extremum[ndtype]
+
+def _extremum_fill_value(obj, extremum, extremum_name):
+
+ def _scalar_fill_value(dtype):
+ try:
+ return extremum[dtype]
+ except KeyError:
+ raise TypeError(
+ "Unsuitable type {} for calculating {}."
+ .format(dtype, extremum_name)
+ )
+
+ dtype = _get_dtype_of(obj)
+ return _recursive_fill_value(dtype, _scalar_fill_value)
def minimum_fill_value(obj):
@@ -289,7 +309,7 @@ def minimum_fill_value(obj):
Parameters
----------
- obj : ndarray or dtype
+ obj : ndarray, dtype or scalar
An object that can be queried for it's numeric type.
Returns
@@ -328,19 +348,7 @@ def minimum_fill_value(obj):
inf
"""
- errmsg = "Unsuitable type for calculating minimum."
- if hasattr(obj, 'dtype'):
- return _recursive_extremum_fill_value(obj.dtype, min_filler)
- elif isinstance(obj, float):
- return min_filler[ntypes.typeDict['float_']]
- elif isinstance(obj, int):
- return min_filler[ntypes.typeDict['int_']]
- elif isinstance(obj, long):
- return min_filler[ntypes.typeDict['uint']]
- elif isinstance(obj, np.dtype):
- return min_filler[obj]
- else:
- raise TypeError(errmsg)
+ return _extremum_fill_value(obj, min_filler, "minimum")
def maximum_fill_value(obj):
@@ -352,7 +360,7 @@ def maximum_fill_value(obj):
Parameters
----------
- obj : {ndarray, dtype}
+ obj : ndarray, dtype or scalar
An object that can be queried for it's numeric type.
Returns
@@ -391,48 +399,7 @@ def maximum_fill_value(obj):
-inf
"""
- errmsg = "Unsuitable type for calculating maximum."
- if hasattr(obj, 'dtype'):
- return _recursive_extremum_fill_value(obj.dtype, max_filler)
- elif isinstance(obj, float):
- return max_filler[ntypes.typeDict['float_']]
- elif isinstance(obj, int):
- return max_filler[ntypes.typeDict['int_']]
- elif isinstance(obj, long):
- return max_filler[ntypes.typeDict['uint']]
- elif isinstance(obj, np.dtype):
- return max_filler[obj]
- else:
- raise TypeError(errmsg)
-
-
-def _recursive_set_default_fill_value(dt):
- """
- Create the default fill value for a structured dtype.
-
- Parameters
- ----------
- dt: dtype
- The structured dtype for which to create the fill value.
-
- Returns
- -------
- val: tuple
- A tuple of values corresponding to the default structured fill value.
-
- """
- deflist = []
- for name in dt.names:
- currenttype = dt[name]
- if currenttype.subdtype:
- currenttype = currenttype.subdtype[0]
-
- if currenttype.names:
- deflist.append(
- tuple(_recursive_set_default_fill_value(currenttype)))
- else:
- deflist.append(default_fill_value(currenttype))
- return tuple(deflist)
+ return _extremum_fill_value(obj, max_filler, "maximum")
def _recursive_set_fill_value(fillvalue, dt):
@@ -471,22 +438,16 @@ def _check_fill_value(fill_value, ndtype):
"""
Private function validating the given `fill_value` for the given dtype.
- If fill_value is None, it is set to the default corresponding to the dtype
- if this latter is standard (no fields). If the datatype is flexible (named
- fields), fill_value is set to a tuple whose elements are the default fill
- values corresponding to each field.
+ If fill_value is None, it is set to the default corresponding to the dtype.
If fill_value is not None, its value is forced to the given dtype.
+ The result is always a 0d array.
"""
ndtype = np.dtype(ndtype)
fields = ndtype.fields
if fill_value is None:
- if fields:
- fill_value = np.array(_recursive_set_default_fill_value(ndtype),
- dtype=ndtype)
- else:
- fill_value = default_fill_value(ndtype)
+ fill_value = default_fill_value(ndtype)
elif fields:
fdtype = [(_[0], _[1]) for _ in ndtype.descr]
if isinstance(fill_value, (ndarray, np.void)):
@@ -823,7 +784,7 @@ ufunc_domain = {}
ufunc_fills = {}
-class _DomainCheckInterval:
+class _DomainCheckInterval(object):
"""
Define a valid interval, so that :
@@ -848,7 +809,7 @@ class _DomainCheckInterval:
umath.less(x, self.a))
-class _DomainTan:
+class _DomainTan(object):
"""
Define a valid interval for the `tan` function, so that:
@@ -866,7 +827,7 @@ class _DomainTan:
return umath.less(umath.absolute(umath.cos(x)), self.eps)
-class _DomainSafeDivide:
+class _DomainSafeDivide(object):
"""
Define a domain for safe division.
@@ -887,7 +848,7 @@ class _DomainSafeDivide:
return umath.absolute(a) * self.tolerance >= umath.absolute(b)
-class _DomainGreater:
+class _DomainGreater(object):
"""
DomainGreater(v)(x) is True where x <= v.
@@ -903,7 +864,7 @@ class _DomainGreater:
return umath.less_equal(x, self.critical_value)
-class _DomainGreaterEqual:
+class _DomainGreaterEqual(object):
"""
DomainGreaterEqual(v)(x) is True where x < v.
@@ -919,7 +880,17 @@ class _DomainGreaterEqual:
return umath.less(x, self.critical_value)
-class _MaskedUnaryOperation:
+class _MaskedUFunc(object):
+ def __init__(self, ufunc):
+ self.f = ufunc
+ self.__doc__ = ufunc.__doc__
+ self.__name__ = ufunc.__name__
+
+ def __str__(self):
+ return "Masked version of {}".format(self.f)
+
+
+class _MaskedUnaryOperation(_MaskedUFunc):
"""
Defines masked version of unary operations, where invalid values are
pre-masked.
@@ -938,11 +909,9 @@ class _MaskedUnaryOperation:
"""
def __init__(self, mufunc, fill=0, domain=None):
- self.f = mufunc
+ super(_MaskedUnaryOperation, self).__init__(mufunc)
self.fill = fill
self.domain = domain
- self.__doc__ = getattr(mufunc, "__doc__", str(mufunc))
- self.__name__ = getattr(mufunc, "__name__", str(mufunc))
ufunc_domain[mufunc] = domain
ufunc_fills[mufunc] = fill
@@ -994,11 +963,8 @@ class _MaskedUnaryOperation:
masked_result._update_from(a)
return masked_result
- def __str__(self):
- return "Masked version of %s. [Invalid values are masked]" % str(self.f)
-
-class _MaskedBinaryOperation:
+class _MaskedBinaryOperation(_MaskedUFunc):
"""
Define masked version of binary operations, where invalid
values are pre-masked.
@@ -1025,11 +991,9 @@ class _MaskedBinaryOperation:
abfunc(x, filly) = x for all x to enable reduce.
"""
- self.f = mbfunc
+ super(_MaskedBinaryOperation, self).__init__(mbfunc)
self.fillx = fillx
self.filly = filly
- self.__doc__ = getattr(mbfunc, "__doc__", str(mbfunc))
- self.__name__ = getattr(mbfunc, "__name__", str(mbfunc))
ufunc_domain[mbfunc] = None
ufunc_fills[mbfunc] = (fillx, filly)
@@ -1068,7 +1032,7 @@ class _MaskedBinaryOperation:
# any errors, just abort; impossible to guarantee masked values
try:
np.copyto(result, da, casting='unsafe', where=m)
- except:
+ except Exception:
pass
# Transforms to a (subclass of) MaskedArray
@@ -1146,11 +1110,9 @@ class _MaskedBinaryOperation:
masked_result = result.view(tclass)
return masked_result
- def __str__(self):
- return "Masked version of " + str(self.f)
-class _DomainedBinaryOperation:
+class _DomainedBinaryOperation(_MaskedUFunc):
"""
Define binary operations that have a domain, like divide.
@@ -1175,12 +1137,10 @@ class _DomainedBinaryOperation:
"""abfunc(fillx, filly) must be defined.
abfunc(x, filly) = x for all x to enable reduce.
"""
- self.f = dbfunc
+ super(_DomainedBinaryOperation, self).__init__(dbfunc)
self.domain = domain
self.fillx = fillx
self.filly = filly
- self.__doc__ = getattr(dbfunc, "__doc__", str(dbfunc))
- self.__name__ = getattr(dbfunc, "__name__", str(dbfunc))
ufunc_domain[dbfunc] = domain
ufunc_fills[dbfunc] = (fillx, filly)
@@ -1214,7 +1174,7 @@ class _DomainedBinaryOperation:
# only add back if it can be cast safely
if np.can_cast(masked_da.dtype, result.dtype, casting='safe'):
result += masked_da
- except:
+ except Exception:
pass
# Transforms to a (subclass of) MaskedArray
@@ -1226,9 +1186,6 @@ class _DomainedBinaryOperation:
masked_result._update_from(b)
return masked_result
- def __str__(self):
- return "Masked version of " + str(self.f)
-
# Unary ufuncs
exp = _MaskedUnaryOperation(umath.exp)
@@ -1329,7 +1286,7 @@ def _replace_dtype_fields_recursive(dtype, primitive_dtype):
descr.append((name, _recurse(field[0], primitive_dtype)))
new_dtype = np.dtype(descr)
- # Is this some kind of composite a la (np.float,2)
+ # Is this some kind of composite a la (float,2)
elif dtype.subdtype:
descr = list(dtype.subdtype)
descr[0] = _recurse(dtype.subdtype[0], primitive_dtype)
@@ -1381,7 +1338,7 @@ def make_mask_descr(ndtype):
--------
>>> import numpy.ma as ma
>>> dtype = np.dtype({'names':['foo', 'bar'],
- 'formats':[np.float32, np.int]})
+ 'formats':[np.float32, int]})
>>> dtype
dtype([('foo', '<f4'), ('bar', '<i4')])
>>> ma.make_mask_descr(dtype)
@@ -1562,7 +1519,7 @@ def is_mask(m):
Arrays with complex dtypes don't return True.
>>> dtype = np.dtype({'names':['monty', 'pithon'],
- 'formats':[np.bool, np.bool]})
+ 'formats':[bool, bool]})
>>> dtype
dtype([('monty', '|b1'), ('pithon', '|b1')])
>>> m = np.array([(True, False), (False, True), (True, False)],
@@ -1641,7 +1598,7 @@ def make_mask(m, copy=False, shrink=True, dtype=MaskType):
>>> arr
[(1, 0), (0, 1), (1, 0), (1, 0)]
>>> dtype = np.dtype({'names':['man', 'mouse'],
- 'formats':[np.int, np.int]})
+ 'formats':[int, int]})
>>> arr = np.array(arr, dtype=dtype)
>>> arr
array([(1, 0), (0, 1), (1, 0), (1, 0)],
@@ -1656,6 +1613,11 @@ def make_mask(m, copy=False, shrink=True, dtype=MaskType):
# Make sure the input dtype is valid.
dtype = make_mask_descr(dtype)
+
+ # legacy boolean special case: "existence of fields implies true"
+ if isinstance(m, ndarray) and m.dtype.fields and dtype == np.bool_:
+ return np.ones(m.shape, dtype=dtype)
+
# Fill the mask in case there are missing data; turn it into an ndarray.
result = np.array(filled(m, True), copy=copy, dtype=dtype, subok=True)
# Bas les masques !
@@ -1700,7 +1662,7 @@ def make_mask_none(newshape, dtype=None):
Defining a more complex dtype.
>>> dtype = np.dtype({'names':['foo', 'bar'],
- 'formats':[np.float32, np.int]})
+ 'formats':[np.float32, int]})
>>> dtype
dtype([('foo', '<f4'), ('bar', '<i4')])
>>> ma.make_mask_none((3,), dtype=dtype)
@@ -1798,7 +1760,7 @@ def flatten_mask(mask):
Examples
--------
- >>> mask = np.array([0, 0, 1], dtype=np.bool)
+ >>> mask = np.array([0, 0, 1], dtype=bool)
>>> flatten_mask(mask)
array([False, False, True], dtype=bool)
@@ -2366,7 +2328,7 @@ def masked_invalid(a, copy=True):
Examples
--------
>>> import numpy.ma as ma
- >>> a = np.arange(5, dtype=np.float)
+ >>> a = np.arange(5, dtype=float)
>>> a[2] = np.NaN
>>> a[3] = np.PINF
>>> a
@@ -2397,7 +2359,7 @@ def masked_invalid(a, copy=True):
###############################################################################
-class _MaskedPrintOption:
+class _MaskedPrintOption(object):
"""
Handle the string used to represent missing data in a masked array.
@@ -2598,14 +2560,11 @@ def _arraymethod(funcname, onmask=True):
result = result.view(type(self))
result._update_from(self)
mask = self._mask
- if result.ndim:
- if not onmask:
- result.__setmask__(mask)
- elif mask is not nomask:
- result.__setmask__(getattr(mask, funcname)(*args, **params))
- else:
- if mask.ndim and (not mask.dtype.names and mask.all()):
- return masked
+ if not onmask:
+ result.__setmask__(mask)
+ elif mask is not nomask:
+ # __setmask__ makes a copy, which we don't want
+ result._mask = getattr(mask, funcname)(*args, **params)
return result
methdoc = getattr(ndarray, funcname, None) or getattr(np, funcname, None)
if methdoc is not None:
@@ -2935,7 +2894,7 @@ class MaskedArray(ndarray):
Copies some attributes of obj to self.
"""
- if obj is not None and isinstance(obj, ndarray):
+ if isinstance(obj, ndarray):
_baseclass = type(obj)
else:
_baseclass = ndarray
@@ -3191,16 +3150,16 @@ class MaskedArray(ndarray):
"""
newtype = np.dtype(newtype)
+ newmasktype = make_mask_descr(newtype)
+
output = self._data.astype(newtype).view(type(self))
output._update_from(self)
- names = output.dtype.names
- if names is None:
- output._mask = self._mask.astype(bool)
+
+ if self._mask is nomask:
+ output._mask = nomask
else:
- if self._mask is nomask:
- output._mask = nomask
- else:
- output._mask = self._mask.astype([(n, bool) for n in names])
+ output._mask = self._mask.astype(newmasktype)
+
# Don't check _fill_value if it's None, that'll speed things up
if self._fill_value is not None:
output._fill_value = _check_fill_value(self._fill_value, newtype)
@@ -3357,8 +3316,6 @@ class MaskedArray(ndarray):
_mask[indx] = tuple([True] * nbfields)
else:
_mask[indx] = True
- if not self._isfield:
- self._sharedmask = False
return
# Get the _data part of the new value
@@ -3374,27 +3331,6 @@ class MaskedArray(ndarray):
_mask = self._mask = make_mask_none(self.shape, _dtype)
_mask[indx] = mval
elif not self._hardmask:
- # Unshare the mask if necessary to avoid propagation
- # We want to remove the unshare logic from this place in the
- # future. Note that _sharedmask has lots of false positives.
- if not self._isfield:
- notthree = getattr(sys, 'getrefcount', False) and (sys.getrefcount(_mask) != 3)
- if self._sharedmask and not (
- # If no one else holds a reference (we have two
- # references (_mask and self._mask) -- add one for
- # getrefcount) and the array owns its own data
- # copying the mask should do nothing.
- (not notthree) and _mask.flags.owndata):
- # 2016.01.15 -- v1.11.0
- warnings.warn(
- "setting an item on a masked array which has a shared "
- "mask will not copy the mask and also change the "
- "original mask array in the future.\n"
- "Check the NumPy 1.11 release notes for more "
- "information.",
- MaskedArrayFutureWarning, stacklevel=2)
- self.unshare_mask()
- _mask = self._mask
# Set the data, then the mask
_data[indx] = dval
_mask[indx] = mval
@@ -4022,6 +3958,7 @@ class MaskedArray(ndarray):
mask = np.broadcast_to(mask, check.shape).copy()
check = check.view(type(self))
+ check._update_from(self)
check._mask = mask
return check
@@ -4475,8 +4412,6 @@ class MaskedArray(ndarray):
return (~m).sum(axis=axis, dtype=np.intp, **kwargs)
- flatten = _arraymethod('flatten')
-
def ravel(self, order='C'):
"""
Returns a 1D version of self, as a view.
@@ -4522,8 +4457,6 @@ class MaskedArray(ndarray):
r._mask = nomask
return r
- repeat = _arraymethod('repeat')
-
def reshape(self, *s, **kwargs):
"""
@@ -4659,7 +4592,7 @@ class MaskedArray(ndarray):
if self._mask is nomask and getmask(values) is nomask:
return
- m = getmaskarray(self).copy()
+ m = getmaskarray(self)
if getmask(values) is nomask:
m.put(indices, False, mode=mode)
@@ -5810,14 +5743,15 @@ class MaskedArray(ndarray):
return out[()]
# Array methods
- copy = _arraymethod('copy')
- diagonal = _arraymethod('diagonal')
- transpose = _arraymethod('transpose')
- T = property(fget=lambda self: self.transpose())
- swapaxes = _arraymethod('swapaxes')
clip = _arraymethod('clip', onmask=False)
copy = _arraymethod('copy')
+ diagonal = _arraymethod('diagonal')
+ flatten = _arraymethod('flatten')
+ repeat = _arraymethod('repeat')
squeeze = _arraymethod('squeeze')
+ swapaxes = _arraymethod('swapaxes')
+ T = property(fget=lambda self: self.transpose())
+ transpose = _arraymethod('transpose')
def tolist(self, fill_value=None):
"""
@@ -6358,7 +6292,7 @@ def is_masked(x):
##############################################################################
-class _extrema_operation(object):
+class _extrema_operation(_MaskedUFunc):
"""
Generic class for maximum/minimum functions.
@@ -6368,11 +6302,9 @@ class _extrema_operation(object):
"""
def __init__(self, ufunc, compare, fill_value):
- self.ufunc = ufunc
+ super(_extrema_operation, self).__init__(ufunc)
self.compare = compare
self.fill_value_func = fill_value
- self.__doc__ = ufunc.__doc__
- self.__name__ = ufunc.__name__
def __call__(self, a, b=None):
"Executes the call behavior."
@@ -6407,11 +6339,11 @@ class _extrema_operation(object):
kwargs = dict()
if m is nomask:
- t = self.ufunc.reduce(target, **kwargs)
+ t = self.f.reduce(target, **kwargs)
else:
target = target.filled(
self.fill_value_func(target)).view(type(target))
- t = self.ufunc.reduce(target, **kwargs)
+ t = self.f.reduce(target, **kwargs)
m = umath.logical_and.reduce(m, **kwargs)
if hasattr(t, '_mask'):
t._mask = m
@@ -6429,7 +6361,7 @@ class _extrema_operation(object):
ma = getmaskarray(a)
mb = getmaskarray(b)
m = logical_or.outer(ma, mb)
- result = self.ufunc.outer(filled(a), filled(b))
+ result = self.f.outer(filled(a), filled(b))
if not isinstance(result, MaskedArray):
result = result.view(MaskedArray)
result._mask = m
@@ -6479,7 +6411,7 @@ ptp.__doc__ = MaskedArray.ptp.__doc__
##############################################################################
-class _frommethod:
+class _frommethod(object):
"""
Define functions from existing MaskedArray methods.
@@ -7295,7 +7227,7 @@ def mask_rowcols(a, axis=None):
Examples
--------
>>> import numpy.ma as ma
- >>> a = np.zeros((3, 3), dtype=np.int)
+ >>> a = np.zeros((3, 3), dtype=int)
>>> a[1, 1] = 1
>>> a
array([[0, 0, 0],
@@ -7476,8 +7408,8 @@ def _convolve_or_correlate(f, a, v, mode, propagate_mask):
if propagate_mask:
# results which are contributed to by either item in any pair being invalid
mask = (
- f(getmaskarray(a), np.ones(np.shape(v), dtype=np.bool), mode=mode)
- | f(np.ones(np.shape(a), dtype=np.bool), getmaskarray(v), mode=mode)
+ f(getmaskarray(a), np.ones(np.shape(v), dtype=bool), mode=mode)
+ | f(np.ones(np.shape(a), dtype=bool), getmaskarray(v), mode=mode)
)
data = f(getdata(a), getdata(v), mode=mode)
else:
@@ -7957,7 +7889,7 @@ def fromflex(fxarray):
return masked_array(fxarray['_data'], mask=fxarray['_mask'])
-class _convert2ma:
+class _convert2ma(object):
"""
Convert functions from numpy to numpy.ma.
diff --git a/numpy/ma/extras.py b/numpy/ma/extras.py
index d8ea3de8c..323fbce38 100644
--- a/numpy/ma/extras.py
+++ b/numpy/ma/extras.py
@@ -215,7 +215,7 @@ def masked_all_like(arr):
#####--------------------------------------------------------------------------
#---- --- Standard functions ---
#####--------------------------------------------------------------------------
-class _fromnxfunction:
+class _fromnxfunction(object):
"""
Defines a wrapper to adapt NumPy functions to masked arrays.
@@ -778,7 +778,7 @@ def _median(a, axis=None, out=None, overwrite_input=False):
# not necessary for scalar True/False masks
try:
np.copyto(low.mask, high.mask, where=odd)
- except:
+ except Exception:
pass
if np.issubdtype(asorted.dtype, np.inexact):
@@ -939,7 +939,7 @@ def mask_rows(a, axis=None):
Examples
--------
>>> import numpy.ma as ma
- >>> a = np.zeros((3, 3), dtype=np.int)
+ >>> a = np.zeros((3, 3), dtype=int)
>>> a[1, 1] = 1
>>> a
array([[0, 0, 0],
@@ -984,7 +984,7 @@ def mask_cols(a, axis=None):
Examples
--------
>>> import numpy.ma as ma
- >>> a = np.zeros((3, 3), dtype=np.int)
+ >>> a = np.zeros((3, 3), dtype=int)
>>> a[1, 1] = 1
>>> a
array([[0, 0, 0],
diff --git a/numpy/ma/mrecords.py b/numpy/ma/mrecords.py
index ef5f5fd53..90a5141b3 100644
--- a/numpy/ma/mrecords.py
+++ b/numpy/ma/mrecords.py
@@ -243,7 +243,7 @@ class MaskedRecords(MaskedArray, object):
except IndexError:
# Couldn't find a mask: use the default (nomask)
pass
- hasmasked = _mask.view((np.bool, (len(_mask.dtype) or 1))).any()
+ hasmasked = _mask.view((bool, (len(_mask.dtype) or 1))).any()
if (obj.shape or hasmasked):
obj = obj.view(MaskedArray)
obj._baseclass = ndarray
@@ -276,7 +276,7 @@ class MaskedRecords(MaskedArray, object):
try:
# Is attr a generic attribute ?
ret = object.__setattr__(self, attr, val)
- except:
+ except Exception:
# Not a generic attribute: exit if it's not a valid field
fielddict = ndarray.__getattribute__(self, 'dtype').fields or {}
optinfo = ndarray.__getattribute__(self, '_optinfo') or {}
@@ -294,7 +294,7 @@ class MaskedRecords(MaskedArray, object):
# internal attribute.
try:
object.__delattr__(self, attr)
- except:
+ except Exception:
return ret
# Let's try to set the field
try:
diff --git a/numpy/ma/tests/__init__.py b/numpy/ma/tests/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/numpy/ma/tests/__init__.py
diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py
index c2b8d1403..6aa8f3e08 100644
--- a/numpy/ma/tests/test_core.py
+++ b/numpy/ma/tests/test_core.py
@@ -20,7 +20,8 @@ import numpy.ma.core
import numpy.core.fromnumeric as fromnumeric
import numpy.core.umath as umath
from numpy.testing import (
- TestCase, run_module_suite, assert_raises, assert_warns, suppress_warnings)
+ run_module_suite, assert_raises, assert_warns, suppress_warnings
+ )
from numpy import ndarray
from numpy.compat import asbytes, asbytes_nested
from numpy.ma.testutils import (
@@ -55,10 +56,10 @@ suppress_copy_mask_on_assignment.filter(
"setting an item on a masked array which has a shared mask will not copy")
-class TestMaskedArray(TestCase):
+class TestMaskedArray(object):
# Base test class for MaskedArrays.
- def setUp(self):
+ def setup(self):
# Base data definition.
x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.])
y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.])
@@ -93,14 +94,14 @@ class TestMaskedArray(TestCase):
x = masked_array(0, mask=False)
assert_equal(str(x), '0')
x = array(0, mask=1)
- self.assertTrue(x.filled().dtype is x._data.dtype)
+ assert_(x.filled().dtype is x._data.dtype)
def test_basic1d(self):
# Test of basic array creation and properties in 1 dimension.
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
- self.assertTrue(not isMaskedArray(x))
- self.assertTrue(isMaskedArray(xm))
- self.assertTrue((xm - ym).filled(0).any())
+ assert_(not isMaskedArray(x))
+ assert_(isMaskedArray(xm))
+ assert_((xm - ym).filled(0).any())
fail_if_equal(xm.mask.astype(int), ym.mask.astype(int))
s = x.shape
assert_equal(np.shape(xm), s)
@@ -123,8 +124,8 @@ class TestMaskedArray(TestCase):
ym.shape = s
xf.shape = s
- self.assertTrue(not isMaskedArray(x))
- self.assertTrue(isMaskedArray(xm))
+ assert_(not isMaskedArray(x))
+ assert_(isMaskedArray(xm))
assert_equal(shape(xm), s)
assert_equal(xm.shape, s)
assert_equal(xm.size, reduce(lambda x, y:x * y, s))
@@ -217,7 +218,7 @@ class TestMaskedArray(TestCase):
x.mask = nomask
data = array((x, x[::-1]))
assert_equal(data, [[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]])
- self.assertTrue(data.mask is nomask)
+ assert_(data.mask is nomask)
def test_creation_from_ndarray_with_padding(self):
x = np.array([('A', 0)], dtype={'names':['f0','f1'],
@@ -238,18 +239,18 @@ class TestMaskedArray(TestCase):
def test_asarray_default_order(self):
# See Issue #6646
m = np.eye(3).T
- self.assertFalse(m.flags.c_contiguous)
+ assert_(not m.flags.c_contiguous)
new_m = asarray(m)
- self.assertTrue(new_m.flags.c_contiguous)
+ assert_(new_m.flags.c_contiguous)
def test_asarray_enforce_order(self):
# See Issue #6646
m = np.eye(3).T
- self.assertFalse(m.flags.c_contiguous)
+ assert_(not m.flags.c_contiguous)
new_m = asarray(m, order='C')
- self.assertTrue(new_m.flags.c_contiguous)
+ assert_(new_m.flags.c_contiguous)
def test_fix_invalid(self):
# Checks fix_invalid.
@@ -263,8 +264,8 @@ class TestMaskedArray(TestCase):
# Test of masked element
x = arange(6)
x[1] = masked
- self.assertTrue(str(masked) == '--')
- self.assertTrue(x[1] is masked)
+ assert_(str(masked) == '--')
+ assert_(x[1] is masked)
assert_equal(filled(x[1], 0), 0)
def test_set_element_as_object(self):
@@ -273,12 +274,12 @@ class TestMaskedArray(TestCase):
x = (1, 2, 3, 4, 5)
a[0] = x
assert_equal(a[0], x)
- self.assertTrue(a[0] is x)
+ assert_(a[0] is x)
import datetime
dt = datetime.datetime.now()
a[0] = dt
- self.assertTrue(a[0] is dt)
+ assert_(a[0] is dt)
def test_indexing(self):
# Tests conversions and indexing
@@ -379,32 +380,43 @@ class TestMaskedArray(TestCase):
n = [0, 0, 1, 0, 0]
m = make_mask(n)
m2 = make_mask(m)
- self.assertTrue(m is m2)
+ assert_(m is m2)
m3 = make_mask(m, copy=1)
- self.assertTrue(m is not m3)
+ assert_(m is not m3)
x1 = np.arange(5)
y1 = array(x1, mask=m)
assert_equal(y1._data.__array_interface__, x1.__array_interface__)
- self.assertTrue(allequal(x1, y1.data))
+ assert_(allequal(x1, y1.data))
assert_equal(y1._mask.__array_interface__, m.__array_interface__)
y1a = array(y1)
- self.assertTrue(y1a._data.__array_interface__ ==
+ assert_(y1a._data.__array_interface__ ==
y1._data.__array_interface__)
- self.assertTrue(y1a.mask is y1.mask)
+ assert_(y1a.mask is y1.mask)
- y2 = array(x1, mask=m)
- self.assertTrue(y2._data.__array_interface__ == x1.__array_interface__)
- self.assertTrue(y2._mask.__array_interface__ == m.__array_interface__)
- self.assertTrue(y2[2] is masked)
+ y2 = array(x1, mask=m3)
+ assert_(y2._data.__array_interface__ == x1.__array_interface__)
+ assert_(y2._mask.__array_interface__ == m3.__array_interface__)
+ assert_(y2[2] is masked)
y2[2] = 9
- self.assertTrue(y2[2] is not masked)
- self.assertTrue(y2._mask.__array_interface__ != m.__array_interface__)
- self.assertTrue(allequal(y2.mask, 0))
+ assert_(y2[2] is not masked)
+ assert_(y2._mask.__array_interface__ == m3.__array_interface__)
+ assert_(allequal(y2.mask, 0))
+
+ y2a = array(x1, mask=m, copy=1)
+ assert_(y2a._data.__array_interface__ != x1.__array_interface__)
+ #assert_( y2a.mask is not m)
+ assert_(y2a._mask.__array_interface__ != m.__array_interface__)
+ assert_(y2a[2] is masked)
+ y2a[2] = 9
+ assert_(y2a[2] is not masked)
+ #assert_( y2a.mask is not m)
+ assert_(y2a._mask.__array_interface__ != m.__array_interface__)
+ assert_(allequal(y2a.mask, 0))
y3 = array(x1 * 1.0, mask=m)
- self.assertTrue(filled(y3).dtype is (x1 * 1.0).dtype)
+ assert_(filled(y3).dtype is (x1 * 1.0).dtype)
x4 = arange(4)
x4[2] = masked
@@ -433,10 +445,16 @@ class TestMaskedArray(TestCase):
assert_not_equal(y._data.ctypes.data, x._data.ctypes.data)
assert_not_equal(y._mask.ctypes.data, x._mask.ctypes.data)
+ def test_copy_0d(self):
+ # gh-9430
+ x = np.ma.array(43, mask=True)
+ xc = x.copy()
+ assert_equal(xc.mask, True)
+
def test_copy_on_python_builtins(self):
# Tests copy works on python builtins (issue#8019)
- self.assertTrue(isMaskedArray(np.ma.copy([1,2,3])))
- self.assertTrue(isMaskedArray(np.ma.copy((1,2,3))))
+ assert_(isMaskedArray(np.ma.copy([1,2,3])))
+ assert_(isMaskedArray(np.ma.copy((1,2,3))))
def test_copy_immutable(self):
# Tests that the copy method is immutable, GitHub issue #5247
@@ -506,7 +524,7 @@ class TestMaskedArray(TestCase):
a_pickled = pickle.loads(a.dumps())
assert_equal(a_pickled._mask, a._mask)
assert_equal(a_pickled, a)
- self.assertTrue(isinstance(a_pickled._data, np.matrix))
+ assert_(isinstance(a_pickled._data, np.matrix))
def test_pickling_maskedconstant(self):
# Test pickling MaskedConstant
@@ -546,19 +564,19 @@ class TestMaskedArray(TestCase):
assert_equal(1.0, float(array(1)))
assert_equal(1, int(array([[[1]]])))
assert_equal(1.0, float(array([[1]])))
- self.assertRaises(TypeError, float, array([1, 1]))
+ assert_raises(TypeError, float, array([1, 1]))
with suppress_warnings() as sup:
sup.filter(UserWarning, 'Warning: converting a masked element')
assert_(np.isnan(float(array([1], mask=[1]))))
a = array([1, 2, 3], mask=[1, 0, 0])
- self.assertRaises(TypeError, lambda: float(a))
+ assert_raises(TypeError, lambda: float(a))
assert_equal(float(a[-1]), 3.)
- self.assertTrue(np.isnan(float(a[0])))
- self.assertRaises(TypeError, int, a)
+ assert_(np.isnan(float(a[0])))
+ assert_raises(TypeError, int, a)
assert_equal(int(a[-1]), 3)
- self.assertRaises(MAError, lambda:int(a[0]))
+ assert_raises(MAError, lambda:int(a[0]))
def test_oddfeatures_1(self):
# Test of other odd features
@@ -667,8 +685,8 @@ class TestMaskedArray(TestCase):
a = array(np.array([(0, 1, 2), (4, 5, 6)], order='F'),
mask=np.array([(0, 0, 1), (1, 0, 0)], order='F'),
order='F') # this is currently ignored
- self.assertTrue(a.flags['F_CONTIGUOUS'])
- self.assertTrue(a.filled(0).flags['F_CONTIGUOUS'])
+ assert_(a.flags['F_CONTIGUOUS'])
+ assert_(a.filled(0).flags['F_CONTIGUOUS'])
def test_optinfo_propagation(self):
# Checks that _optinfo dictionary isn't back-propagated
@@ -679,6 +697,25 @@ class TestMaskedArray(TestCase):
y._optinfo['info'] = '!!!'
assert_equal(x._optinfo['info'], '???')
+ def test_optinfo_forward_propagation(self):
+ a = array([1,2,2,4])
+ a._optinfo["key"] = "value"
+ assert_equal(a._optinfo["key"], (a == 2)._optinfo["key"])
+ assert_equal(a._optinfo["key"], (a != 2)._optinfo["key"])
+ assert_equal(a._optinfo["key"], (a > 2)._optinfo["key"])
+ assert_equal(a._optinfo["key"], (a >= 2)._optinfo["key"])
+ assert_equal(a._optinfo["key"], (a <= 2)._optinfo["key"])
+ assert_equal(a._optinfo["key"], (a + 2)._optinfo["key"])
+ assert_equal(a._optinfo["key"], (a - 2)._optinfo["key"])
+ assert_equal(a._optinfo["key"], (a * 2)._optinfo["key"])
+ assert_equal(a._optinfo["key"], (a / 2)._optinfo["key"])
+ assert_equal(a._optinfo["key"], a[:2]._optinfo["key"])
+ assert_equal(a._optinfo["key"], a[[0,0,2]]._optinfo["key"])
+ assert_equal(a._optinfo["key"], np.exp(a)._optinfo["key"])
+ assert_equal(a._optinfo["key"], np.abs(a)._optinfo["key"])
+ assert_equal(a._optinfo["key"], array(a, copy=True)._optinfo["key"])
+ assert_equal(a._optinfo["key"], np.zeros_like(a)._optinfo["key"])
+
def test_fancy_printoptions(self):
# Test printing a masked array w/ fancy dtype.
fancydtype = np.dtype([('x', int), ('y', [('t', int), ('s', float)])])
@@ -706,14 +743,14 @@ class TestMaskedArray(TestCase):
ndtype = [('a', int), ('b', float)]
a = np.array([(1, 1), (2, 2)], dtype=ndtype)
test = flatten_structured_array(a)
- control = np.array([[1., 1.], [2., 2.]], dtype=np.float)
+ control = np.array([[1., 1.], [2., 2.]], dtype=float)
assert_equal(test, control)
assert_equal(test.dtype, control.dtype)
# On masked_array
a = array([(1, 1), (2, 2)], mask=[(0, 1), (1, 0)], dtype=ndtype)
test = flatten_structured_array(a)
control = array([[1., 1.], [2., 2.]],
- mask=[[0, 1], [1, 0]], dtype=np.float)
+ mask=[[0, 1], [1, 0]], dtype=float)
assert_equal(test, control)
assert_equal(test.dtype, control.dtype)
assert_equal(test.mask, control.mask)
@@ -723,7 +760,7 @@ class TestMaskedArray(TestCase):
mask=[(0, (1, 0)), (1, (0, 1))], dtype=ndtype)
test = flatten_structured_array(a)
control = array([[1., 1., 1.1], [2., 2., 2.2]],
- mask=[[0, 1, 0], [1, 0, 1]], dtype=np.float)
+ mask=[[0, 1, 0], [1, 0, 1]], dtype=float)
assert_equal(test, control)
assert_equal(test.dtype, control.dtype)
assert_equal(test.mask, control.mask)
@@ -731,7 +768,7 @@ class TestMaskedArray(TestCase):
ndtype = [('a', int), ('b', float)]
a = np.array([[(1, 1), ], [(2, 2), ]], dtype=ndtype)
test = flatten_structured_array(a)
- control = np.array([[[1., 1.], ], [[2., 2.], ]], dtype=np.float)
+ control = np.array([[[1., 1.], ], [[2., 2.], ]], dtype=float)
assert_equal(test, control)
assert_equal(test.dtype, control.dtype)
@@ -756,14 +793,14 @@ class TestMaskedArray(TestCase):
dtype=ndtype)
# w/o mask
f = a[0]
- self.assertTrue(isinstance(f, mvoid))
+ assert_(isinstance(f, mvoid))
assert_equal((f[0], f['a']), (1, 1))
assert_equal(f['b'], 2)
# w/ mask
f = a[1]
- self.assertTrue(isinstance(f, mvoid))
- self.assertTrue(f[0] is masked)
- self.assertTrue(f['a'] is masked)
+ assert_(isinstance(f, mvoid))
+ assert_(f[0] is masked)
+ assert_(f['a'] is masked)
assert_equal(f[1], 4)
# exotic dtype
@@ -850,10 +887,10 @@ class TestMaskedArray(TestCase):
assert_(mx2[0] == 0.)
-class TestMaskedArrayArithmetic(TestCase):
+class TestMaskedArrayArithmetic(object):
# Base test class for MaskedArrays.
- def setUp(self):
+ def setup(self):
# Base data definition.
x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.])
y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.])
@@ -870,7 +907,7 @@ class TestMaskedArrayArithmetic(TestCase):
self.err_status = np.geterr()
np.seterr(divide='ignore', invalid='ignore')
- def tearDown(self):
+ def teardown(self):
np.seterr(**self.err_status)
def test_basic_arithmetic(self):
@@ -930,8 +967,8 @@ class TestMaskedArrayArithmetic(TestCase):
# Tests mixed arithmetics.
na = np.array([1])
ma = array([1])
- self.assertTrue(isinstance(na + ma, MaskedArray))
- self.assertTrue(isinstance(ma + na, MaskedArray))
+ assert_(isinstance(na + ma, MaskedArray))
+ assert_(isinstance(ma + na, MaskedArray))
def test_limits_arithmetic(self):
tiny = np.finfo(float).tiny
@@ -943,11 +980,11 @@ class TestMaskedArrayArithmetic(TestCase):
# Tests some scalar arithmetics on MaskedArrays.
# Masked singleton should remain masked no matter what
xm = array(0, mask=1)
- self.assertTrue((1 / array(0)).mask)
- self.assertTrue((1 + xm).mask)
- self.assertTrue((-xm).mask)
- self.assertTrue(maximum(xm, xm).mask)
- self.assertTrue(minimum(xm, xm).mask)
+ assert_((1 / array(0)).mask)
+ assert_((1 + xm).mask)
+ assert_((-xm).mask)
+ assert_(maximum(xm, xm).mask)
+ assert_(minimum(xm, xm).mask)
def test_masked_singleton_equality(self):
# Tests (in)equality on masked singleton
@@ -1019,7 +1056,7 @@ class TestMaskedArrayArithmetic(TestCase):
ott = array([0., 1., 2., 3.], mask=[1, 0, 0, 0])
res = count(ott)
- self.assertTrue(res.dtype.type is np.intp)
+ assert_(res.dtype.type is np.intp)
assert_equal(3, res)
ott = ott.reshape((2, 2))
@@ -1070,19 +1107,19 @@ class TestMaskedArrayArithmetic(TestCase):
def test_minimummaximum_func(self):
a = np.ones((2, 2))
aminimum = minimum(a, a)
- self.assertTrue(isinstance(aminimum, MaskedArray))
+ assert_(isinstance(aminimum, MaskedArray))
assert_equal(aminimum, np.minimum(a, a))
aminimum = minimum.outer(a, a)
- self.assertTrue(isinstance(aminimum, MaskedArray))
+ assert_(isinstance(aminimum, MaskedArray))
assert_equal(aminimum, np.minimum.outer(a, a))
amaximum = maximum(a, a)
- self.assertTrue(isinstance(amaximum, MaskedArray))
+ assert_(isinstance(amaximum, MaskedArray))
assert_equal(amaximum, np.maximum(a, a))
amaximum = maximum.outer(a, a)
- self.assertTrue(isinstance(amaximum, MaskedArray))
+ assert_(isinstance(amaximum, MaskedArray))
assert_equal(amaximum, np.maximum.outer(a, a))
def test_minmax_reduce(self):
@@ -1108,33 +1145,33 @@ class TestMaskedArrayArithmetic(TestCase):
pass
nout = np.empty((4,), dtype=float)
result = npfunc(xm, axis=0, out=nout)
- self.assertTrue(result is nout)
+ assert_(result is nout)
# Use the ma version
nout.fill(-999)
result = mafunc(xm, axis=0, out=nout)
- self.assertTrue(result is nout)
+ assert_(result is nout)
def test_minmax_methods(self):
# Additional tests on max/min
(_, _, _, _, _, xm, _, _, _, _) = self.d
xm.shape = (xm.size,)
assert_equal(xm.max(), 10)
- self.assertTrue(xm[0].max() is masked)
- self.assertTrue(xm[0].max(0) is masked)
- self.assertTrue(xm[0].max(-1) is masked)
+ assert_(xm[0].max() is masked)
+ assert_(xm[0].max(0) is masked)
+ assert_(xm[0].max(-1) is masked)
assert_equal(xm.min(), -10.)
- self.assertTrue(xm[0].min() is masked)
- self.assertTrue(xm[0].min(0) is masked)
- self.assertTrue(xm[0].min(-1) is masked)
+ assert_(xm[0].min() is masked)
+ assert_(xm[0].min(0) is masked)
+ assert_(xm[0].min(-1) is masked)
assert_equal(xm.ptp(), 20.)
- self.assertTrue(xm[0].ptp() is masked)
- self.assertTrue(xm[0].ptp(0) is masked)
- self.assertTrue(xm[0].ptp(-1) is masked)
+ assert_(xm[0].ptp() is masked)
+ assert_(xm[0].ptp(0) is masked)
+ assert_(xm[0].ptp(-1) is masked)
x = array([1, 2, 3], mask=True)
- self.assertTrue(x.min() is masked)
- self.assertTrue(x.max() is masked)
- self.assertTrue(x.ptp() is masked)
+ assert_(x.min() is masked)
+ assert_(x.max() is masked)
+ assert_(x.ptp() is masked)
def test_addsumprod(self):
# Tests add, sum, product.
@@ -1491,7 +1528,7 @@ class TestMaskedArrayArithmetic(TestCase):
assert_equal(a.mask, [0, 0, 0, 0, 1])
-class TestMaskedArrayAttributes(TestCase):
+class TestMaskedArrayAttributes(object):
def test_keepmask(self):
# Tests the keep mask flag
@@ -1519,8 +1556,8 @@ class TestMaskedArrayAttributes(TestCase):
assert_equal(xh._data, [0, 10, 2, 3, 4])
assert_equal(xs._data, [0, 10, 2, 3, 40])
assert_equal(xs.mask, [0, 0, 0, 1, 0])
- self.assertTrue(xh._hardmask)
- self.assertTrue(not xs._hardmask)
+ assert_(xh._hardmask)
+ assert_(not xs._hardmask)
xh[1:4] = [10, 20, 30]
xs[1:4] = [10, 20, 30]
assert_equal(xh._data, [0, 10, 20, 3, 4])
@@ -1610,7 +1647,7 @@ class TestMaskedArrayAttributes(TestCase):
test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1])
assert_equal(test.flat[1], 2)
assert_equal(test.flat[2], masked)
- self.assertTrue(np.all(test.flat[0:2] == test[0, 0:2]))
+ assert_(np.all(test.flat[0:2] == test[0, 0:2]))
# Test flat on masked_matrices
test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1])
test.flat = masked_array([3, 2, 1], mask=[1, 0, 0])
@@ -1684,7 +1721,7 @@ class TestMaskedArrayAttributes(TestCase):
assert_equal(m._mask, np.ma.nomask)
-class TestFillingValues(TestCase):
+class TestFillingValues(object):
def test_check_on_scalar(self):
# Test _check_fill_value set to valid and invalid values
@@ -1699,8 +1736,8 @@ class TestFillingValues(TestCase):
assert_equal(fval, b"0")
fval = _check_fill_value(None, "|S3")
assert_equal(fval, default_fill_value(b"camelot!"))
- self.assertRaises(TypeError, _check_fill_value, 1e+20, int)
- self.assertRaises(TypeError, _check_fill_value, 'stuff', int)
+ assert_raises(TypeError, _check_fill_value, 1e+20, int)
+ assert_raises(TypeError, _check_fill_value, 'stuff', int)
def test_check_on_fields(self):
# Tests _check_fill_value with records
@@ -1708,49 +1745,45 @@ class TestFillingValues(TestCase):
ndtype = [('a', int), ('b', float), ('c', "|S3")]
# A check on a list should return a single record
fval = _check_fill_value([-999, -12345678.9, "???"], ndtype)
- self.assertTrue(isinstance(fval, ndarray))
+ assert_(isinstance(fval, ndarray))
assert_equal(fval.item(), [-999, -12345678.9, b"???"])
# A check on None should output the defaults
fval = _check_fill_value(None, ndtype)
- self.assertTrue(isinstance(fval, ndarray))
+ assert_(isinstance(fval, ndarray))
assert_equal(fval.item(), [default_fill_value(0),
default_fill_value(0.),
asbytes(default_fill_value("0"))])
#.....Using a structured type as fill_value should work
fill_val = np.array((-999, -12345678.9, "???"), dtype=ndtype)
fval = _check_fill_value(fill_val, ndtype)
- self.assertTrue(isinstance(fval, ndarray))
+ assert_(isinstance(fval, ndarray))
assert_equal(fval.item(), [-999, -12345678.9, b"???"])
#.....Using a flexible type w/ a different type shouldn't matter
- # BEHAVIOR in 1.5 and earlier: match structured types by position
- #fill_val = np.array((-999, -12345678.9, "???"),
- # dtype=[("A", int), ("B", float), ("C", "|S3")])
- # BEHAVIOR in 1.6 and later: match structured types by name
- fill_val = np.array(("???", -999, -12345678.9),
- dtype=[("c", "|S3"), ("a", int), ("b", float), ])
- # suppress deprecation warning in 1.12 (remove in 1.13)
- with assert_warns(FutureWarning):
- fval = _check_fill_value(fill_val, ndtype)
- self.assertTrue(isinstance(fval, ndarray))
+ # BEHAVIOR in 1.5 and earlier, and 1.13 and later: match structured
+ # types by position
+ fill_val = np.array((-999, -12345678.9, "???"),
+ dtype=[("A", int), ("B", float), ("C", "|S3")])
+ fval = _check_fill_value(fill_val, ndtype)
+ assert_(isinstance(fval, ndarray))
assert_equal(fval.item(), [-999, -12345678.9, b"???"])
#.....Using an object-array shouldn't matter either
fill_val = np.ndarray(shape=(1,), dtype=object)
fill_val[0] = (-999, -12345678.9, b"???")
fval = _check_fill_value(fill_val, object)
- self.assertTrue(isinstance(fval, ndarray))
+ assert_(isinstance(fval, ndarray))
assert_equal(fval.item(), [-999, -12345678.9, b"???"])
# NOTE: This test was never run properly as "fill_value" rather than
# "fill_val" was assigned. Written properly, it fails.
#fill_val = np.array((-999, -12345678.9, "???"))
#fval = _check_fill_value(fill_val, ndtype)
- #self.assertTrue(isinstance(fval, ndarray))
+ #assert_(isinstance(fval, ndarray))
#assert_equal(fval.item(), [-999, -12345678.9, b"???"])
#.....One-field-only flexible type should work as well
ndtype = [("a", int)]
fval = _check_fill_value(-999999999, ndtype)
- self.assertTrue(isinstance(fval, ndarray))
+ assert_(isinstance(fval, ndarray))
assert_equal(fval.item(), (-999999999,))
def test_fillvalue_conversion(self):
@@ -1777,6 +1810,31 @@ class TestFillingValues(TestCase):
assert_equal(b['a']._data, a._data)
assert_equal(b['a'].fill_value, a.fill_value)
+ def test_default_fill_value(self):
+ # check all calling conventions
+ f1 = default_fill_value(1.)
+ f2 = default_fill_value(np.array(1.))
+ f3 = default_fill_value(np.array(1.).dtype)
+ assert_equal(f1, f2)
+ assert_equal(f1, f3)
+
+ def test_default_fill_value_structured(self):
+ fields = array([(1, 1, 1)],
+ dtype=[('i', int), ('s', '|S8'), ('f', float)])
+
+ f1 = default_fill_value(fields)
+ f2 = default_fill_value(fields.dtype)
+ expected = np.array((default_fill_value(0),
+ default_fill_value('0'),
+ default_fill_value(0.)), dtype=fields.dtype)
+ assert_equal(f1, expected)
+ assert_equal(f2, expected)
+
+ def test_default_fill_value_void(self):
+ dt = np.dtype([('v', 'V7')])
+ f = default_fill_value(dt)
+ assert_equal(f['v'], np.array(default_fill_value(dt['v']), dt['v']))
+
def test_fillvalue(self):
# Yet more fun with the fill_value
data = masked_array([1, 2, 3], fill_value=-999)
@@ -1841,33 +1899,47 @@ class TestFillingValues(TestCase):
"h", "D", "W", "M", "Y"):
control = numpy.datetime64("NaT", timecode)
test = default_fill_value(numpy.dtype("<M8[" + timecode + "]"))
- np.testing.utils.assert_equal(test, control)
+ np.testing.assert_equal(test, control)
control = numpy.timedelta64("NaT", timecode)
test = default_fill_value(numpy.dtype("<m8[" + timecode + "]"))
- np.testing.utils.assert_equal(test, control)
+ np.testing.assert_equal(test, control)
def test_extremum_fill_value(self):
# Tests extremum fill values for flexible type.
a = array([(1, (2, 3)), (4, (5, 6))],
dtype=[('A', int), ('B', [('BA', int), ('BB', int)])])
test = a.fill_value
+ assert_equal(test.dtype, a.dtype)
assert_equal(test['A'], default_fill_value(a['A']))
assert_equal(test['B']['BA'], default_fill_value(a['B']['BA']))
assert_equal(test['B']['BB'], default_fill_value(a['B']['BB']))
test = minimum_fill_value(a)
+ assert_equal(test.dtype, a.dtype)
assert_equal(test[0], minimum_fill_value(a['A']))
assert_equal(test[1][0], minimum_fill_value(a['B']['BA']))
assert_equal(test[1][1], minimum_fill_value(a['B']['BB']))
assert_equal(test[1], minimum_fill_value(a['B']))
test = maximum_fill_value(a)
+ assert_equal(test.dtype, a.dtype)
assert_equal(test[0], maximum_fill_value(a['A']))
assert_equal(test[1][0], maximum_fill_value(a['B']['BA']))
assert_equal(test[1][1], maximum_fill_value(a['B']['BB']))
assert_equal(test[1], maximum_fill_value(a['B']))
+ def test_extremum_fill_value_subdtype(self):
+ a = array(([2, 3, 4],), dtype=[('value', np.int8, 3)])
+
+ test = minimum_fill_value(a)
+ assert_equal(test.dtype, a.dtype)
+ assert_equal(test[0], np.full(3, minimum_fill_value(a['value'])))
+
+ test = maximum_fill_value(a)
+ assert_equal(test.dtype, a.dtype)
+ assert_equal(test[0], np.full(3, maximum_fill_value(a['value'])))
+
def test_fillvalue_individual_fields(self):
# Test setting fill_value on individual fields
ndtype = [('a', int), ('b', int)]
@@ -1976,17 +2048,17 @@ class TestFillingValues(TestCase):
assert_equal(a["f1"].fill_value, default_fill_value("eggs"))
-class TestUfuncs(TestCase):
+class TestUfuncs(object):
# Test class for the application of ufuncs on MaskedArrays.
- def setUp(self):
+ def setup(self):
# Base data definition.
self.d = (array([1.0, 0, -1, pi / 2] * 2, mask=[0, 1] + [0] * 6),
array([1.0, 0, -1, pi / 2] * 2, mask=[1, 0] + [0] * 6),)
self.err_status = np.geterr()
np.seterr(divide='ignore', invalid='ignore')
- def tearDown(self):
+ def teardown(self):
np.seterr(**self.err_status)
def test_testUfuncRegression(self):
@@ -2022,8 +2094,8 @@ class TestUfuncs(TestCase):
def test_reduce(self):
# Tests reduce on MaskedArrays.
a = self.d[0]
- self.assertTrue(not alltrue(a, axis=0))
- self.assertTrue(sometrue(a, axis=0))
+ assert_(not alltrue(a, axis=0))
+ assert_(sometrue(a, axis=0))
assert_equal(sum(a[:3], axis=0), 0)
assert_equal(product(a, axis=0), 0)
assert_equal(add.reduce(a), pi)
@@ -2036,8 +2108,8 @@ class TestUfuncs(TestCase):
assert_equal(amask.min(), 5)
assert_equal(amask.max(0), a.max(0))
assert_equal(amask.min(0), [5, 6, 7, 8])
- self.assertTrue(amask.max(1)[0].mask)
- self.assertTrue(amask.min(1)[0].mask)
+ assert_(amask.max(1)[0].mask)
+ assert_(amask.min(1)[0].mask)
def test_ndarray_mask(self):
# Check that the mask of the result is a ndarray (not a MaskedArray...)
@@ -2047,14 +2119,14 @@ class TestUfuncs(TestCase):
mask=[1, 0, 0, 0, 1])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
- self.assertTrue(not isinstance(test.mask, MaskedArray))
+ assert_(not isinstance(test.mask, MaskedArray))
def test_treatment_of_NotImplemented(self):
# Check that NotImplemented is returned at appropriate places
a = masked_array([1., 2.], mask=[1, 0])
- self.assertRaises(TypeError, operator.mul, a, "abc")
- self.assertRaises(TypeError, operator.truediv, a, "abc")
+ assert_raises(TypeError, operator.mul, a, "abc")
+ assert_raises(TypeError, operator.truediv, a, "abc")
class MyClass(object):
__array_priority__ = a.__array_priority__ + 1
@@ -2120,10 +2192,10 @@ class TestUfuncs(TestCase):
# also check that allclose uses ma ufuncs, to avoid warning
allclose(m, 0.5)
-class TestMaskedArrayInPlaceArithmetics(TestCase):
+class TestMaskedArrayInPlaceArithmetics(object):
# Test MaskedArray Arithmetics
- def setUp(self):
+ def setup(self):
x = arange(10)
y = arange(10)
xm = arange(10)
@@ -2622,9 +2694,9 @@ class TestMaskedArrayInPlaceArithmetics(TestCase):
assert_equal(len(w), 0, "Failed on type=%s." % t)
-class TestMaskedArrayMethods(TestCase):
+class TestMaskedArrayMethods(object):
# Test class for miscellaneous MaskedArrays methods.
- def setUp(self):
+ def setup(self):
# Base data definition.
x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928,
8.43, 7.78, 9.865, 5.878, 8.979, 4.732,
@@ -2678,25 +2750,25 @@ class TestMaskedArrayMethods(TestCase):
# Tests allclose on arrays
a = np.random.rand(10)
b = a + np.random.rand(10) * 1e-8
- self.assertTrue(allclose(a, b))
+ assert_(allclose(a, b))
# Test allclose w/ infs
a[0] = np.inf
- self.assertTrue(not allclose(a, b))
+ assert_(not allclose(a, b))
b[0] = np.inf
- self.assertTrue(allclose(a, b))
+ assert_(allclose(a, b))
# Test allclose w/ masked
a = masked_array(a)
a[-1] = masked
- self.assertTrue(allclose(a, b, masked_equal=True))
- self.assertTrue(not allclose(a, b, masked_equal=False))
+ assert_(allclose(a, b, masked_equal=True))
+ assert_(not allclose(a, b, masked_equal=False))
# Test comparison w/ scalar
a *= 1e-8
a[0] = 0
- self.assertTrue(allclose(a, 0, masked_equal=True))
+ assert_(allclose(a, 0, masked_equal=True))
# Test that the function works for MIN_INT integer typed arrays
a = masked_array([np.iinfo(np.int_).min], dtype=np.int_)
- self.assertTrue(allclose(a, a))
+ assert_(allclose(a, a))
def test_allany(self):
# Checks the any/all methods/functions.
@@ -2710,15 +2782,15 @@ class TestMaskedArrayMethods(TestCase):
mxbig = (mx > 0.5)
mxsmall = (mx < 0.5)
- self.assertFalse(mxbig.all())
- self.assertTrue(mxbig.any())
+ assert_(not mxbig.all())
+ assert_(mxbig.any())
assert_equal(mxbig.all(0), [False, False, True])
assert_equal(mxbig.all(1), [False, False, True])
assert_equal(mxbig.any(0), [False, False, True])
assert_equal(mxbig.any(1), [True, True, True])
- self.assertFalse(mxsmall.all())
- self.assertTrue(mxsmall.any())
+ assert_(not mxsmall.all())
+ assert_(mxsmall.any())
assert_equal(mxsmall.all(0), [True, True, False])
assert_equal(mxsmall.all(1), [False, False, False])
assert_equal(mxsmall.any(0), [True, True, False])
@@ -2736,15 +2808,15 @@ class TestMaskedArrayMethods(TestCase):
mXbig = (mX > 0.5)
mXsmall = (mX < 0.5)
- self.assertFalse(mXbig.all())
- self.assertTrue(mXbig.any())
+ assert_(not mXbig.all())
+ assert_(mXbig.any())
assert_equal(mXbig.all(0), np.matrix([False, False, True]))
assert_equal(mXbig.all(1), np.matrix([False, False, True]).T)
assert_equal(mXbig.any(0), np.matrix([False, False, True]))
assert_equal(mXbig.any(1), np.matrix([True, True, True]).T)
- self.assertFalse(mXsmall.all())
- self.assertTrue(mXsmall.any())
+ assert_(not mXsmall.all())
+ assert_(mXsmall.any())
assert_equal(mXsmall.all(0), np.matrix([True, True, False]))
assert_equal(mXsmall.all(1), np.matrix([False, False, False]).T)
assert_equal(mXsmall.any(0), np.matrix([True, True, False]))
@@ -2755,18 +2827,18 @@ class TestMaskedArrayMethods(TestCase):
store = empty((), dtype=bool)
full = array([1, 2, 3], mask=True)
- self.assertTrue(full.all() is masked)
+ assert_(full.all() is masked)
full.all(out=store)
- self.assertTrue(store)
- self.assertTrue(store._mask, True)
- self.assertTrue(store is not masked)
+ assert_(store)
+ assert_(store._mask, True)
+ assert_(store is not masked)
store = empty((), dtype=bool)
- self.assertTrue(full.any() is masked)
+ assert_(full.any() is masked)
full.any(out=store)
- self.assertTrue(not store)
- self.assertTrue(store._mask, True)
- self.assertTrue(store is not masked)
+ assert_(not store)
+ assert_(store._mask, True)
+ assert_(store is not masked)
def test_argmax_argmin(self):
# Tests argmin & argmax on MaskedArrays.
@@ -2851,7 +2923,7 @@ class TestMaskedArrayMethods(TestCase):
a = array(np.matrix([1, 2, 3, 4]), mask=[0, 0, 0, 0])
b = a.compressed()
assert_equal(b, a)
- self.assertTrue(isinstance(b, np.matrix))
+ assert_(isinstance(b, np.matrix))
a[0, 0] = masked
b = a.compressed()
assert_equal(b, [[2, 3, 4]])
@@ -2885,11 +2957,11 @@ class TestMaskedArrayMethods(TestCase):
n = [0, 0, 0, 1, 1]
m = make_mask(n)
x = array(d, mask=m)
- self.assertTrue(x[3] is masked)
- self.assertTrue(x[4] is masked)
+ assert_(x[3] is masked)
+ assert_(x[4] is masked)
x[[1, 4]] = [10, 40]
- self.assertTrue(x[3] is masked)
- self.assertTrue(x[4] is not masked)
+ assert_(x[3] is masked)
+ assert_(x[4] is not masked)
assert_equal(x, [0, 10, 2, -1, 40])
x = masked_array(arange(10), mask=[1, 0, 0, 0, 0] * 2)
@@ -2915,12 +2987,12 @@ class TestMaskedArrayMethods(TestCase):
z = array([3., -1.], mask=[False, True])
x.put([1, 2], z)
- self.assertTrue(x[0] is not masked)
+ assert_(x[0] is not masked)
assert_equal(x[0], 0)
- self.assertTrue(x[1] is not masked)
+ assert_(x[1] is not masked)
assert_equal(x[1], 3)
- self.assertTrue(x[2] is masked)
- self.assertTrue(x[3] is not masked)
+ assert_(x[2] is masked)
+ assert_(x[3] is not masked)
assert_equal(x[3], 0)
def test_put_hardmask(self):
@@ -3021,7 +3093,7 @@ class TestMaskedArrayMethods(TestCase):
x = [1, 4, 2, 3]
sortedx = sort(x)
- self.assertTrue(not isinstance(sorted, MaskedArray))
+ assert_(not isinstance(sorted, MaskedArray))
x = array([0, 1, -1, -2, 2], mask=nomask, dtype=np.int8)
sortedx = sort(x, endwith=False)
@@ -3086,27 +3158,41 @@ class TestMaskedArrayMethods(TestCase):
assert_equal(am, an)
def test_sort_flexible(self):
- # Test sort on flexible dtype.
+ # Test sort on structured dtype.
a = array(
data=[(3, 3), (3, 2), (2, 2), (2, 1), (1, 0), (1, 1), (1, 2)],
mask=[(0, 0), (0, 1), (0, 0), (0, 0), (1, 0), (0, 0), (0, 0)],
dtype=[('A', int), ('B', int)])
-
- test = sort(a)
- b = array(
+ mask_last = array(
data=[(1, 1), (1, 2), (2, 1), (2, 2), (3, 3), (3, 2), (1, 0)],
mask=[(0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 1), (1, 0)],
dtype=[('A', int), ('B', int)])
- assert_equal(test, b)
- assert_equal(test.mask, b.mask)
+ mask_first = array(
+ data=[(1, 0), (1, 1), (1, 2), (2, 1), (2, 2), (3, 2), (3, 3)],
+ mask=[(1, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 1), (0, 0)],
+ dtype=[('A', int), ('B', int)])
+
+ test = sort(a)
+ assert_equal(test, mask_last)
+ assert_equal(test.mask, mask_last.mask)
test = sort(a, endwith=False)
- b = array(
- data=[(1, 0), (1, 1), (1, 2), (2, 1), (2, 2), (3, 2), (3, 3), ],
- mask=[(1, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 1), (0, 0), ],
- dtype=[('A', int), ('B', int)])
- assert_equal(test, b)
- assert_equal(test.mask, b.mask)
+ assert_equal(test, mask_first)
+ assert_equal(test.mask, mask_first.mask)
+
+ # Test sort on dtype with subarray (gh-8069)
+ dt = np.dtype([('v', int, 2)])
+ a = a.view(dt)
+ mask_last = mask_last.view(dt)
+ mask_first = mask_first.view(dt)
+
+ test = sort(a)
+ assert_equal(test, mask_last)
+ assert_equal(test.mask, mask_last.mask)
+
+ test = sort(a, endwith=False)
+ assert_equal(test, mask_first)
+ assert_equal(test.mask, mask_first.mask)
def test_argsort(self):
# Test argsort
@@ -3120,8 +3206,21 @@ class TestMaskedArrayMethods(TestCase):
data = masked_array([[1, 2, 3]], mask=[[1, 1, 1]])
assert_equal(data.squeeze(), [1, 2, 3])
assert_equal(data.squeeze()._mask, [1, 1, 1])
- data = masked_array([[1]], mask=True)
- self.assertTrue(data.squeeze() is masked)
+
+ # normal ndarrays return a view
+ arr = np.array([[1]])
+ arr_sq = arr.squeeze()
+ assert_equal(arr_sq, 1)
+ arr_sq[...] = 2
+ assert_equal(arr[0,0], 2)
+
+ # so maskedarrays should too
+ m_arr = masked_array([[1]], mask=True)
+ m_arr_sq = m_arr.squeeze()
+ assert_(m_arr_sq is not np.ma.masked)
+ assert_equal(m_arr_sq.mask, True)
+ m_arr_sq[...] = 2
+ assert_equal(m_arr[0,0], 2)
def test_swapaxes(self):
# Tests swapaxes on MaskedArrays.
@@ -3155,8 +3254,8 @@ class TestMaskedArrayMethods(TestCase):
masked_array([[10, 20], [10, 20]], [[0, 1], [0, 1]]))
# assert_equal crashes when passed np.ma.mask
- self.assertIs(x[1], np.ma.masked)
- self.assertIs(x.take(1), np.ma.masked)
+ assert_(x[1] is np.ma.masked)
+ assert_(x.take(1) is np.ma.masked)
x = array([[10, 20, 30], [40, 50, 60]], mask=[[0, 0, 1], [1, 0, 0, ]])
assert_equal(x.take([0, 2], axis=1),
@@ -3200,8 +3299,8 @@ class TestMaskedArrayMethods(TestCase):
x = array(np.arange(12))
x[[1, -2]] = masked
xlist = x.tolist()
- self.assertTrue(xlist[1] is None)
- self.assertTrue(xlist[-2] is None)
+ assert_(xlist[1] is None)
+ assert_(xlist[-2] is None)
# ... on 2D
x.shape = (3, 4)
xlist = x.tolist()
@@ -3304,10 +3403,37 @@ class TestMaskedArrayMethods(TestCase):
assert_equal(MaskedArray.cumsum(marray.T, 0), control.cumsum(0))
+ def test_arraymethod_0d(self):
+ # gh-9430
+ x = np.ma.array(42, mask=True)
+ assert_equal(x.T.mask, x.mask)
+ assert_equal(x.T.data, x.data)
+
+ def test_transpose_view(self):
+ x = np.ma.array([[1, 2, 3], [4, 5, 6]])
+ x[0,1] = np.ma.masked
+ xt = x.T
+
+ xt[1,0] = 10
+ xt[0,1] = np.ma.masked
+
+ assert_equal(x.data, xt.T.data)
+ assert_equal(x.mask, xt.T.mask)
+
+ def test_diagonal_view(self):
+ x = np.ma.zeros((3,3))
+ x[0,0] = 10
+ x[1,1] = np.ma.masked
+ x[2,2] = 20
+ xd = x.diagonal()
+ x[1,1] = 15
+ assert_equal(xd.mask, x.diagonal().mask)
+ assert_equal(xd.data, x.diagonal().data)
-class TestMaskedArrayMathMethods(TestCase):
- def setUp(self):
+class TestMaskedArrayMathMethods(object):
+
+ def setup(self):
# Base data definition.
x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928,
8.43, 7.78, 9.865, 5.878, 8.979, 4.732,
@@ -3366,20 +3492,20 @@ class TestMaskedArrayMathMethods(TestCase):
output.fill(-9999)
result = npfunc(xm, axis=0, out=output)
# ... the result should be the given output
- self.assertTrue(result is output)
+ assert_(result is output)
assert_equal(result, xmmeth(axis=0, out=output))
output = empty((3, 4), dtype=int)
result = xmmeth(axis=0, out=output)
- self.assertTrue(result is output)
+ assert_(result is output)
def test_ptp(self):
# Tests ptp on MaskedArrays.
(x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d
(n, m) = X.shape
assert_equal(mx.ptp(), mx.compressed().ptp())
- rows = np.zeros(n, np.float)
- cols = np.zeros(m, np.float)
+ rows = np.zeros(n, float)
+ cols = np.zeros(m, float)
for k in range(m):
cols[k] = mX[:, k].compressed().ptp()
for k in range(n):
@@ -3395,21 +3521,21 @@ class TestMaskedArrayMathMethods(TestCase):
def test_sum_object(self):
# Test sum on object dtype
- a = masked_array([1, 2, 3], mask=[1, 0, 0], dtype=np.object)
+ a = masked_array([1, 2, 3], mask=[1, 0, 0], dtype=object)
assert_equal(a.sum(), 5)
a = masked_array([[1, 2, 3], [4, 5, 6]], dtype=object)
assert_equal(a.sum(axis=0), [5, 7, 9])
def test_prod_object(self):
# Test prod on object dtype
- a = masked_array([1, 2, 3], mask=[1, 0, 0], dtype=np.object)
+ a = masked_array([1, 2, 3], mask=[1, 0, 0], dtype=object)
assert_equal(a.prod(), 2 * 3)
a = masked_array([[1, 2, 3], [4, 5, 6]], dtype=object)
assert_equal(a.prod(axis=0), [4, 10, 18])
def test_meananom_object(self):
# Test mean/anom on object dtype
- a = masked_array([1, 2, 3], dtype=np.object)
+ a = masked_array([1, 2, 3], dtype=object)
assert_equal(a.mean(), 2)
assert_equal(a.anom(), [-1, 0, 1])
@@ -3503,31 +3629,31 @@ class TestMaskedArrayMathMethods(TestCase):
x = array(arange(10), mask=True)
for methodname in ('var', 'std'):
method = getattr(x, methodname)
- self.assertTrue(method() is masked)
- self.assertTrue(method(0) is masked)
- self.assertTrue(method(-1) is masked)
+ assert_(method() is masked)
+ assert_(method(0) is masked)
+ assert_(method(-1) is masked)
# Using a masked array as explicit output
method(out=mout)
- self.assertTrue(mout is not masked)
+ assert_(mout is not masked)
assert_equal(mout.mask, True)
# Using a ndarray as explicit output
method(out=nout)
- self.assertTrue(np.isnan(nout))
+ assert_(np.isnan(nout))
x = array(arange(10), mask=True)
x[-1] = 9
for methodname in ('var', 'std'):
method = getattr(x, methodname)
- self.assertTrue(method(ddof=1) is masked)
- self.assertTrue(method(0, ddof=1) is masked)
- self.assertTrue(method(-1, ddof=1) is masked)
+ assert_(method(ddof=1) is masked)
+ assert_(method(0, ddof=1) is masked)
+ assert_(method(-1, ddof=1) is masked)
# Using a masked array as explicit output
method(out=mout, ddof=1)
- self.assertTrue(mout is not masked)
+ assert_(mout is not masked)
assert_equal(mout.mask, True)
# Using a ndarray as explicit output
method(out=nout, ddof=1)
- self.assertTrue(np.isnan(nout))
+ assert_(np.isnan(nout))
def test_varstd_ddof(self):
a = array([[1, 1, 0], [1, 1, 0]], mask=[[0, 0, 1], [0, 0, 1]])
@@ -3576,9 +3702,9 @@ class TestMaskedArrayMathMethods(TestCase):
assert_equal(a.max(1), [3, 6])
-class TestMaskedArrayMathMethodsComplex(TestCase):
+class TestMaskedArrayMathMethodsComplex(object):
# Test class for miscellaneous MaskedArrays methods.
- def setUp(self):
+ def setup(self):
# Base data definition.
x = np.array([8.375j, 7.545j, 8.828j, 8.5j, 1.757j, 5.928,
8.43, 7.78, 9.865, 5.878, 8.979, 4.732,
@@ -3629,10 +3755,10 @@ class TestMaskedArrayMathMethodsComplex(TestCase):
mX[:, k].compressed().std())
-class TestMaskedArrayFunctions(TestCase):
+class TestMaskedArrayFunctions(object):
# Test class for miscellaneous functions.
- def setUp(self):
+ def setup(self):
x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.])
y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.])
m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]
@@ -3756,12 +3882,12 @@ class TestMaskedArrayFunctions(TestCase):
output.fill(-9999)
result = np.round(xm, decimals=2, out=output)
# ... the result should be the given output
- self.assertTrue(result is output)
+ assert_(result is output)
assert_equal(result, xm.round(decimals=2, out=output))
output = empty((3, 4), dtype=float)
result = xm.round(decimals=2, out=output)
- self.assertTrue(result is output)
+ assert_(result is output)
def test_round_with_scalar(self):
# Testing round with scalar/zero dimension input
@@ -3790,13 +3916,13 @@ class TestMaskedArrayFunctions(TestCase):
def test_identity(self):
a = identity(5)
- self.assertTrue(isinstance(a, MaskedArray))
+ assert_(isinstance(a, MaskedArray))
assert_equal(a, np.identity(5))
def test_power(self):
x = -1.1
assert_almost_equal(power(x, 2.), 1.21)
- self.assertTrue(power(x, masked) is masked)
+ assert_(power(x, masked) is masked)
x = array([-1.1, -1.1, 1.1, 1.1, 0.])
b = array([0.5, 2., 0.5, 2., -1.], mask=[0, 0, 0, 0, 1])
y = power(x, b)
@@ -4004,7 +4130,7 @@ class TestMaskedArrayFunctions(TestCase):
store = empty(4, dtype=int)
chosen = choose([2, 3, 1, 0], choices, out=store)
assert_equal(store, array([20, 31, 12, 3]))
- self.assertTrue(store is chosen)
+ assert_(store is chosen)
# Check with some masked indices + out
store = empty(4, dtype=int)
indices_ = array([2, 3, 1, 0], mask=[1, 0, 0, 1])
@@ -4025,56 +4151,56 @@ class TestMaskedArrayFunctions(TestCase):
# Try the default
b = a.reshape((5, 2))
assert_equal(b.shape, (5, 2))
- self.assertTrue(b.flags['C'])
+ assert_(b.flags['C'])
# Try w/ arguments as list instead of tuple
b = a.reshape(5, 2)
assert_equal(b.shape, (5, 2))
- self.assertTrue(b.flags['C'])
+ assert_(b.flags['C'])
# Try w/ order
b = a.reshape((5, 2), order='F')
assert_equal(b.shape, (5, 2))
- self.assertTrue(b.flags['F'])
+ assert_(b.flags['F'])
# Try w/ order
b = a.reshape(5, 2, order='F')
assert_equal(b.shape, (5, 2))
- self.assertTrue(b.flags['F'])
+ assert_(b.flags['F'])
c = np.reshape(a, (2, 5))
- self.assertTrue(isinstance(c, MaskedArray))
+ assert_(isinstance(c, MaskedArray))
assert_equal(c.shape, (2, 5))
- self.assertTrue(c[0, 0] is masked)
- self.assertTrue(c.flags['C'])
+ assert_(c[0, 0] is masked)
+ assert_(c.flags['C'])
def test_make_mask_descr(self):
# Flexible
- ntype = [('a', np.float), ('b', np.float)]
+ ntype = [('a', float), ('b', float)]
test = make_mask_descr(ntype)
- assert_equal(test, [('a', np.bool), ('b', np.bool)])
+ assert_equal(test, [('a', bool), ('b', bool)])
assert_(test is make_mask_descr(test))
# Standard w/ shape
- ntype = (np.float, 2)
+ ntype = (float, 2)
test = make_mask_descr(ntype)
- assert_equal(test, (np.bool, 2))
+ assert_equal(test, (bool, 2))
assert_(test is make_mask_descr(test))
# Standard standard
- ntype = np.float
+ ntype = float
test = make_mask_descr(ntype)
- assert_equal(test, np.dtype(np.bool))
+ assert_equal(test, np.dtype(bool))
assert_(test is make_mask_descr(test))
# Nested
- ntype = [('a', np.float), ('b', [('ba', np.float), ('bb', np.float)])]
+ ntype = [('a', float), ('b', [('ba', float), ('bb', float)])]
test = make_mask_descr(ntype)
control = np.dtype([('a', 'b1'), ('b', [('ba', 'b1'), ('bb', 'b1')])])
assert_equal(test, control)
assert_(test is make_mask_descr(test))
# Named+ shape
- ntype = [('a', (np.float, 2))]
+ ntype = [('a', (float, 2))]
test = make_mask_descr(ntype)
- assert_equal(test, np.dtype([('a', (np.bool, 2))]))
+ assert_equal(test, np.dtype([('a', (bool, 2))]))
assert_(test is make_mask_descr(test))
# 2 names
@@ -4099,25 +4225,25 @@ class TestMaskedArrayFunctions(TestCase):
assert_equal(test.dtype, MaskType)
assert_equal(test, [0, 1])
# w/ a ndarray as an input
- mask = np.array([0, 1], dtype=np.bool)
+ mask = np.array([0, 1], dtype=bool)
test = make_mask(mask)
assert_equal(test.dtype, MaskType)
assert_equal(test, [0, 1])
# w/ a flexible-type ndarray as an input - use default
- mdtype = [('a', np.bool), ('b', np.bool)]
+ mdtype = [('a', bool), ('b', bool)]
mask = np.array([(0, 0), (0, 1)], dtype=mdtype)
test = make_mask(mask)
assert_equal(test.dtype, MaskType)
assert_equal(test, [1, 1])
# w/ a flexible-type ndarray as an input - use input dtype
- mdtype = [('a', np.bool), ('b', np.bool)]
+ mdtype = [('a', bool), ('b', bool)]
mask = np.array([(0, 0), (0, 1)], dtype=mdtype)
test = make_mask(mask, dtype=mask.dtype)
assert_equal(test.dtype, mdtype)
assert_equal(test, mask)
# w/ a flexible-type ndarray as an input - use input dtype
- mdtype = [('a', np.float), ('b', np.float)]
- bdtype = [('a', np.bool), ('b', np.bool)]
+ mdtype = [('a', float), ('b', float)]
+ bdtype = [('a', bool), ('b', bool)]
mask = np.array([(0, 0), (0, 1)], dtype=mdtype)
test = make_mask(mask, dtype=mask.dtype)
assert_equal(test.dtype, bdtype)
@@ -4133,7 +4259,7 @@ class TestMaskedArrayFunctions(TestCase):
assert_equal(test2, test)
# test that nomask is returned when m is nomask.
bools = [True, False]
- dtypes = [MaskType, np.float]
+ dtypes = [MaskType, float]
msgformat = 'copy=%s, shrink=%s, dtype=%s'
for cpy, shr, dt in itertools.product(bools, bools, dtypes):
res = make_mask(nomask, copy=cpy, shrink=shr, dtype=dt)
@@ -4141,7 +4267,7 @@ class TestMaskedArrayFunctions(TestCase):
def test_mask_or(self):
# Initialize
- mtype = [('a', np.bool), ('b', np.bool)]
+ mtype = [('a', bool), ('b', bool)]
mask = np.array([(0, 0), (0, 1), (1, 0), (0, 0)], dtype=mtype)
# Test using nomask as input
test = mask_or(mask, nomask)
@@ -4157,14 +4283,14 @@ class TestMaskedArrayFunctions(TestCase):
control = np.array([(0, 1), (0, 1), (1, 1), (0, 1)], dtype=mtype)
assert_equal(test, control)
# Using another array w / a different dtype
- othertype = [('A', np.bool), ('B', np.bool)]
+ othertype = [('A', bool), ('B', bool)]
other = np.array([(0, 1), (0, 1), (0, 1), (0, 1)], dtype=othertype)
try:
test = mask_or(mask, other)
except ValueError:
pass
# Using nested arrays
- dtype = [('a', np.bool), ('b', [('ba', np.bool), ('bb', np.bool)])]
+ dtype = [('a', bool), ('b', [('ba', bool), ('bb', bool)])]
amask = np.array([(0, (1, 0)), (0, (1, 0))], dtype=dtype)
bmask = np.array([(1, (0, 1)), (0, (0, 0))], dtype=dtype)
cntrl = np.array([(1, (1, 1)), (0, (1, 0))], dtype=dtype)
@@ -4173,7 +4299,7 @@ class TestMaskedArrayFunctions(TestCase):
def test_flatten_mask(self):
# Tests flatten mask
# Standard dtype
- mask = np.array([0, 0, 1], dtype=np.bool)
+ mask = np.array([0, 0, 1], dtype=bool)
assert_equal(flatten_mask(mask), mask)
# Flexible dtype
mask = np.array([(0, 0), (0, 1)], dtype=[('a', bool), ('b', bool)])
@@ -4266,9 +4392,9 @@ class TestMaskedArrayFunctions(TestCase):
assert_equal(test, masked_equal([-1, -1, -1, -1, -1], -1))
-class TestMaskedFields(TestCase):
+class TestMaskedFields(object):
- def setUp(self):
+ def setup(self):
ilist = [1, 2, 3, 4, 5]
flist = [1.1, 2.2, 3.3, 4.4, 5.5]
slist = ['one', 'two', 'three', 'four', 'five']
@@ -4367,7 +4493,7 @@ class TestMaskedFields(TestCase):
test = a.view((float, 2), np.matrix)
assert_equal(test, data)
- self.assertTrue(isinstance(test, np.matrix))
+ assert_(isinstance(test, np.matrix))
def test_getitem(self):
ndtype = [('a', float), ('b', float)]
@@ -4432,7 +4558,7 @@ class TestMaskedFields(TestCase):
assert_equal(len(rec), len(self.data['ddtype']))
-class TestMaskedObjectArray(TestCase):
+class TestMaskedObjectArray(object):
def test_getitem(self):
arr = np.ma.array([None, None])
@@ -4480,9 +4606,9 @@ class TestMaskedObjectArray(TestCase):
assert_(arr[0] is np.ma.masked)
-class TestMaskedView(TestCase):
+class TestMaskedView(object):
- def setUp(self):
+ def setup(self):
iterator = list(zip(np.arange(10), np.random.rand(10)))
data = np.array(iterator)
a = array(iterator, dtype=[('a', float), ('b', float)])
@@ -4493,14 +4619,14 @@ class TestMaskedView(TestCase):
def test_view_to_nothing(self):
(data, a, controlmask) = self.data
test = a.view()
- self.assertTrue(isinstance(test, MaskedArray))
+ assert_(isinstance(test, MaskedArray))
assert_equal(test._data, a._data)
assert_equal(test._mask, a._mask)
def test_view_to_type(self):
(data, a, controlmask) = self.data
test = a.view(np.ndarray)
- self.assertTrue(not isinstance(test, MaskedArray))
+ assert_(not isinstance(test, MaskedArray))
assert_equal(test, a._data)
assert_equal_records(test, data.view(a.dtype).squeeze())
@@ -4508,7 +4634,7 @@ class TestMaskedView(TestCase):
(data, a, controlmask) = self.data
# View globally
test = a.view(float)
- self.assertTrue(isinstance(test, MaskedArray))
+ assert_(isinstance(test, MaskedArray))
assert_equal(test, data.ravel())
assert_equal(test.mask, controlmask)
@@ -4521,13 +4647,13 @@ class TestMaskedView(TestCase):
assert_equal(test['B'], a['b'])
test = a[0].view([('A', float), ('B', float)])
- self.assertTrue(isinstance(test, MaskedArray))
+ assert_(isinstance(test, MaskedArray))
assert_equal(test.mask.dtype.names, ('A', 'B'))
assert_equal(test['A'], a['a'][0])
assert_equal(test['B'], a['b'][0])
test = a[-1].view([('A', float), ('B', float)])
- self.assertTrue(isinstance(test, MaskedArray))
+ assert_(isinstance(test, MaskedArray))
assert_equal(test.dtype.names, ('A', 'B'))
assert_equal(test['A'], a['a'][-1])
assert_equal(test['B'], a['b'][-1])
@@ -4536,17 +4662,17 @@ class TestMaskedView(TestCase):
(data, a, controlmask) = self.data
# View globally
test = a.view((float, 2))
- self.assertTrue(isinstance(test, MaskedArray))
+ assert_(isinstance(test, MaskedArray))
assert_equal(test, data)
assert_equal(test.mask, controlmask.reshape(-1, 2))
# View on 1 masked element
test = a[0].view((float, 2))
- self.assertTrue(isinstance(test, MaskedArray))
+ assert_(isinstance(test, MaskedArray))
assert_equal(test, data[0])
assert_equal(test.mask, (1, 0))
# View on 1 unmasked element
test = a[-1].view((float, 2))
- self.assertTrue(isinstance(test, MaskedArray))
+ assert_(isinstance(test, MaskedArray))
assert_equal(test, data[-1])
def test_view_to_dtype_and_type(self):
@@ -4554,10 +4680,10 @@ class TestMaskedView(TestCase):
test = a.view((float, 2), np.matrix)
assert_equal(test, data)
- self.assertTrue(isinstance(test, np.matrix))
- self.assertTrue(not isinstance(test, MaskedArray))
+ assert_(isinstance(test, np.matrix))
+ assert_(not isinstance(test, MaskedArray))
-class TestOptionalArgs(TestCase):
+class TestOptionalArgs(object):
def test_ndarrayfuncs(self):
# test axis arg behaves the same as ndarray (including multiple axes)
@@ -4644,10 +4770,10 @@ class TestOptionalArgs(TestCase):
assert_raises(np.AxisError, count, np.ma.array(1), axis=1)
-class TestMaskedConstant(TestCase):
+class TestMaskedConstant(object):
def _do_add_test(self, add):
# sanity check
- self.assertIs(add(np.ma.masked, 1), np.ma.masked)
+ assert_(add(np.ma.masked, 1) is np.ma.masked)
# now try with a vector
vector = np.array([1, 2, 3])
@@ -4729,6 +4855,11 @@ def test_ufunc_with_output():
y = np.add(x, 1., out=x)
assert_(y is x)
+def test_astype():
+ descr = [('v', int, 3), ('x', [('y', float)])]
+ x = array(([1, 2, 3], (1.0,)), dtype=descr)
+ assert_equal(x, x.astype(descr))
+
###############################################################################
if __name__ == "__main__":
diff --git a/numpy/ma/tests/test_deprecations.py b/numpy/ma/tests/test_deprecations.py
index 24dd7cb8d..23c095470 100644
--- a/numpy/ma/tests/test_deprecations.py
+++ b/numpy/ma/tests/test_deprecations.py
@@ -4,11 +4,11 @@
from __future__ import division, absolute_import, print_function
import numpy as np
-from numpy.testing import TestCase, run_module_suite, assert_warns
+from numpy.testing import run_module_suite, assert_warns
from numpy.ma.testutils import assert_equal
from numpy.ma.core import MaskedArrayFutureWarning
-class TestArgsort(TestCase):
+class TestArgsort(object):
""" gh-8701 """
def _test_base(self, argsort, cls):
arr_0d = np.array(1).view(cls)
@@ -37,7 +37,7 @@ class TestArgsort(TestCase):
return self._test_base(np.ma.MaskedArray.argsort, np.ma.MaskedArray)
-class TestMinimumMaximum(TestCase):
+class TestMinimumMaximum(object):
def test_minimum(self):
assert_warns(DeprecationWarning, np.ma.minimum, np.ma.array([1, 2]))
diff --git a/numpy/ma/tests/test_extras.py b/numpy/ma/tests/test_extras.py
index 4b7fe07b6..1bec584c1 100644
--- a/numpy/ma/tests/test_extras.py
+++ b/numpy/ma/tests/test_extras.py
@@ -14,8 +14,7 @@ import itertools
import numpy as np
from numpy.testing import (
- TestCase, run_module_suite, assert_warns, suppress_warnings,
- assert_raises
+ run_module_suite, assert_warns, suppress_warnings, assert_raises,
)
from numpy.ma.testutils import (
assert_, assert_array_equal, assert_equal, assert_almost_equal
@@ -35,7 +34,7 @@ from numpy.ma.extras import (
import numpy.ma.extras as mae
-class TestGeneric(TestCase):
+class TestGeneric(object):
#
def test_masked_all(self):
# Tests masked_all
@@ -140,7 +139,7 @@ class TestGeneric(TestCase):
assert_equal(test, None)
-class TestAverage(TestCase):
+class TestAverage(object):
# Several tests of average. Why so many ? Good point...
def test_testAverage1(self):
# Test of average.
@@ -149,7 +148,7 @@ class TestAverage(TestCase):
assert_equal(2.0, average(ott, weights=[1., 1., 2., 1.]))
result, wts = average(ott, weights=[1., 1., 2., 1.], returned=1)
assert_equal(2.0, result)
- self.assertTrue(wts == 4.0)
+ assert_(wts == 4.0)
ott[:] = masked
assert_equal(average(ott, axis=0).mask, [True])
ott = array([0., 1., 2., 3.], mask=[True, False, False, False])
@@ -271,7 +270,7 @@ class TestAverage(TestCase):
assert_almost_equal(wav1.imag, expected1.imag)
-class TestConcatenator(TestCase):
+class TestConcatenator(object):
# Tests for mr_, the equivalent of r_ for masked arrays.
def test_1d(self):
@@ -281,7 +280,7 @@ class TestConcatenator(TestCase):
m = [1, 0, 0, 0, 0]
d = masked_array(b, mask=m)
c = mr_[d, 0, 0, d]
- self.assertTrue(isinstance(c, MaskedArray))
+ assert_(isinstance(c, MaskedArray))
assert_array_equal(c, [1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1])
assert_array_equal(c.mask, mr_[m, 0, 0, m])
@@ -295,12 +294,12 @@ class TestConcatenator(TestCase):
b_2 = masked_array(a_2, mask=m_2)
# append columns
d = mr_['1', b_1, b_2]
- self.assertTrue(d.shape == (5, 10))
+ assert_(d.shape == (5, 10))
assert_array_equal(d[:, :5], b_1)
assert_array_equal(d[:, 5:], b_2)
assert_array_equal(d.mask, np.r_['1', m_1, m_2])
d = mr_[b_1, b_2]
- self.assertTrue(d.shape == (10, 5))
+ assert_(d.shape == (10, 5))
assert_array_equal(d[:5,:], b_1)
assert_array_equal(d[5:,:], b_2)
assert_array_equal(d.mask, np.r_[m_1, m_2])
@@ -318,7 +317,7 @@ class TestConcatenator(TestCase):
assert_equal(type(actual.data), type(expected.data))
-class TestNotMasked(TestCase):
+class TestNotMasked(object):
# Tests notmasked_edges and notmasked_contiguous.
def test_edges(self):
@@ -367,19 +366,19 @@ class TestNotMasked(TestCase):
assert_equal(tmp[-3], slice(0, 4, None))
#
tmp = notmasked_contiguous(a, 0)
- self.assertTrue(len(tmp[-1]) == 1)
- self.assertTrue(tmp[-2] is None)
+ assert_(len(tmp[-1]) == 1)
+ assert_(tmp[-2] is None)
assert_equal(tmp[-3], tmp[-1])
- self.assertTrue(len(tmp[0]) == 2)
+ assert_(len(tmp[0]) == 2)
#
tmp = notmasked_contiguous(a, 1)
assert_equal(tmp[0][-1], slice(0, 4, None))
- self.assertTrue(tmp[1] is None)
+ assert_(tmp[1] is None)
assert_equal(tmp[2][-1], slice(7, 8, None))
assert_equal(tmp[2][-2], slice(0, 6, None))
-class TestCompressFunctions(TestCase):
+class TestCompressFunctions(object):
def test_compress_nd(self):
# Tests compress_nd
@@ -538,12 +537,12 @@ class TestCompressFunctions(TestCase):
assert_equal(mask_rowcols(x, 1,).mask,
[[1, 1, 0], [1, 1, 0], [1, 1, 0]])
x = array(x._data, mask=[[1, 0, 0], [0, 1, 0], [0, 0, 1]])
- self.assertTrue(mask_rowcols(x).all() is masked)
- self.assertTrue(mask_rowcols(x, 0).all() is masked)
- self.assertTrue(mask_rowcols(x, 1).all() is masked)
- self.assertTrue(mask_rowcols(x).mask.all())
- self.assertTrue(mask_rowcols(x, 0).mask.all())
- self.assertTrue(mask_rowcols(x, 1).mask.all())
+ assert_(mask_rowcols(x).all() is masked)
+ assert_(mask_rowcols(x, 0).all() is masked)
+ assert_(mask_rowcols(x, 1).all() is masked)
+ assert_(mask_rowcols(x).mask.all())
+ assert_(mask_rowcols(x, 0).mask.all())
+ assert_(mask_rowcols(x, 1).mask.all())
def test_dot(self):
# Tests dot product
@@ -632,7 +631,7 @@ class TestCompressFunctions(TestCase):
assert_equal(a, res)
-class TestApplyAlongAxis(TestCase):
+class TestApplyAlongAxis(object):
# Tests 2D functions
def test_3d(self):
a = arange(12.).reshape(2, 2, 3)
@@ -654,20 +653,20 @@ class TestApplyAlongAxis(TestCase):
assert_equal(xa, [[2, 5], [8, 11]])
-class TestApplyOverAxes(TestCase):
+class TestApplyOverAxes(object):
# Tests apply_over_axes
def test_basic(self):
a = arange(24).reshape(2, 3, 4)
test = apply_over_axes(np.sum, a, [0, 2])
ctrl = np.array([[[60], [92], [124]]])
assert_equal(test, ctrl)
- a[(a % 2).astype(np.bool)] = masked
+ a[(a % 2).astype(bool)] = masked
test = apply_over_axes(np.sum, a, [0, 2])
ctrl = np.array([[[28], [44], [60]]])
assert_equal(test, ctrl)
-class TestMedian(TestCase):
+class TestMedian(object):
def test_pytype(self):
r = np.ma.median([[np.inf, np.inf], [np.inf, np.inf]], axis=-1)
assert_equal(r, np.inf)
@@ -737,7 +736,7 @@ class TestMedian(TestCase):
for axis, over in args:
try:
np.ma.median(x, axis=axis, overwrite_input=over)
- except:
+ except Exception:
raise AssertionError(msg % (mask, ndmin, axis, over))
# Invalid axis values should raise exception
@@ -886,7 +885,7 @@ class TestMedian(TestCase):
def test_nan(self):
with suppress_warnings() as w:
w.record(RuntimeWarning)
- for mask in (False, np.zeros(6, dtype=np.bool)):
+ for mask in (False, np.zeros(6, dtype=bool)):
dm = np.ma.array([[1, np.nan, 3], [1, 2, 3]])
dm.mask = mask
@@ -1069,9 +1068,9 @@ class TestMedian(TestCase):
assert_(type(np.ma.median(o.astype(object))), float)
-class TestCov(TestCase):
+class TestCov(object):
- def setUp(self):
+ def setup(self):
self.data = array(np.random.rand(12))
def test_1d_without_missing(self):
@@ -1136,9 +1135,9 @@ class TestCov(TestCase):
x.shape[0] / frac))
-class TestCorrcoef(TestCase):
+class TestCorrcoef(object):
- def setUp(self):
+ def setup(self):
self.data = array(np.random.rand(12))
self.data2 = array(np.random.rand(12))
@@ -1243,7 +1242,7 @@ class TestCorrcoef(TestCase):
control[:-1, :-1])
-class TestPolynomial(TestCase):
+class TestPolynomial(object):
#
def test_polyfit(self):
# Tests polyfit
@@ -1301,13 +1300,13 @@ class TestPolynomial(TestCase):
assert_almost_equal(a, a_)
-class TestArraySetOps(TestCase):
+class TestArraySetOps(object):
def test_unique_onlist(self):
# Test unique on list
data = [1, 1, 1, 2, 2, 3]
test = unique(data, return_index=True, return_inverse=True)
- self.assertTrue(isinstance(test[0], MaskedArray))
+ assert_(isinstance(test[0], MaskedArray))
assert_equal(test[0], masked_array([1, 2, 3], mask=[0, 0, 0]))
assert_equal(test[1], [0, 3, 5])
assert_equal(test[2], [0, 0, 0, 1, 1, 2])
@@ -1404,13 +1403,13 @@ class TestArraySetOps(TestCase):
test = ediff1d(x)
control = array([1, 1, 1, 1], mask=[0, 0, 0, 0])
assert_equal(test, control)
- self.assertTrue(isinstance(test, MaskedArray))
+ assert_(isinstance(test, MaskedArray))
assert_equal(test.filled(0), control.filled(0))
assert_equal(test.mask, control.mask)
#
test = ediff1d(x, to_end=masked, to_begin=masked)
control = array([0, 1, 1, 1, 1, 0], mask=[1, 0, 0, 0, 0, 1])
- self.assertTrue(isinstance(test, MaskedArray))
+ assert_(isinstance(test, MaskedArray))
assert_equal(test.filled(0), control.filled(0))
assert_equal(test.mask, control.mask)
@@ -1525,7 +1524,7 @@ class TestArraySetOps(TestCase):
assert_array_equal(setdiff1d(a, b), np.array(['c']))
-class TestShapeBase(TestCase):
+class TestShapeBase(object):
def test_atleast_2d(self):
# Test atleast_2d
diff --git a/numpy/ma/tests/test_mrecords.py b/numpy/ma/tests/test_mrecords.py
index 785733400..1ca8e175f 100644
--- a/numpy/ma/tests/test_mrecords.py
+++ b/numpy/ma/tests/test_mrecords.py
@@ -14,7 +14,7 @@ import numpy as np
import numpy.ma as ma
from numpy import recarray
from numpy.ma import masked, nomask
-from numpy.testing import TestCase, run_module_suite, temppath
+from numpy.testing import run_module_suite, temppath
from numpy.core.records import (
fromrecords as recfromrecords, fromarrays as recfromarrays
)
@@ -28,21 +28,14 @@ from numpy.ma.testutils import (
)
-class TestMRecords(TestCase):
- # Base test class for MaskedArrays.
- def __init__(self, *args, **kwds):
- TestCase.__init__(self, *args, **kwds)
- self.setup()
+class TestMRecords(object):
- def setup(self):
- # Generic setup
- ilist = [1, 2, 3, 4, 5]
- flist = [1.1, 2.2, 3.3, 4.4, 5.5]
- slist = [b'one', b'two', b'three', b'four', b'five']
- ddtype = [('a', int), ('b', float), ('c', '|S8')]
- mask = [0, 1, 0, 0, 1]
- self.base = ma.array(list(zip(ilist, flist, slist)),
- mask=mask, dtype=ddtype)
+ ilist = [1, 2, 3, 4, 5]
+ flist = [1.1, 2.2, 3.3, 4.4, 5.5]
+ slist = [b'one', b'two', b'three', b'four', b'five']
+ ddtype = [('a', int), ('b', float), ('c', '|S8')]
+ mask = [0, 1, 0, 0, 1]
+ base = ma.array(list(zip(ilist, flist, slist)), mask=mask, dtype=ddtype)
def test_byview(self):
# Test creation by view
@@ -279,16 +272,16 @@ class TestMRecords(TestCase):
base = self.base.copy()
mbase = base.view(mrecarray)
mbase.harden_mask()
- self.assertTrue(mbase._hardmask)
+ assert_(mbase._hardmask)
mbase.mask = nomask
assert_equal_records(mbase._mask, base._mask)
mbase.soften_mask()
- self.assertTrue(not mbase._hardmask)
+ assert_(not mbase._hardmask)
mbase.mask = nomask
# So, the mask of a field is no longer set to nomask...
assert_equal_records(mbase._mask,
ma.make_mask_none(base.shape, base.dtype))
- self.assertTrue(ma.make_mask(mbase['b']._mask) is nomask)
+ assert_(ma.make_mask(mbase['b']._mask) is nomask)
assert_equal(mbase['a']._mask, mbase['b']._mask)
def test_pickling(self):
@@ -356,11 +349,11 @@ class TestMRecords(TestCase):
dtype=mult.dtype))
-class TestView(TestCase):
+class TestView(object):
- def setUp(self):
+ def setup(self):
(a, b) = (np.arange(10), np.random.rand(10))
- ndtype = [('a', np.float), ('b', np.float)]
+ ndtype = [('a', float), ('b', float)]
arr = np.array(list(zip(a, b)), dtype=ndtype)
mrec = fromarrays([a, b], dtype=ndtype, fill_value=(-9., -99.))
@@ -370,48 +363,42 @@ class TestView(TestCase):
def test_view_by_itself(self):
(mrec, a, b, arr) = self.data
test = mrec.view()
- self.assertTrue(isinstance(test, MaskedRecords))
+ assert_(isinstance(test, MaskedRecords))
assert_equal_records(test, mrec)
assert_equal_records(test._mask, mrec._mask)
def test_view_simple_dtype(self):
(mrec, a, b, arr) = self.data
- ntype = (np.float, 2)
+ ntype = (float, 2)
test = mrec.view(ntype)
- self.assertTrue(isinstance(test, ma.MaskedArray))
- assert_equal(test, np.array(list(zip(a, b)), dtype=np.float))
- self.assertTrue(test[3, 1] is ma.masked)
+ assert_(isinstance(test, ma.MaskedArray))
+ assert_equal(test, np.array(list(zip(a, b)), dtype=float))
+ assert_(test[3, 1] is ma.masked)
def test_view_flexible_type(self):
(mrec, a, b, arr) = self.data
- alttype = [('A', np.float), ('B', np.float)]
+ alttype = [('A', float), ('B', float)]
test = mrec.view(alttype)
- self.assertTrue(isinstance(test, MaskedRecords))
+ assert_(isinstance(test, MaskedRecords))
assert_equal_records(test, arr.view(alttype))
- self.assertTrue(test['B'][3] is masked)
+ assert_(test['B'][3] is masked)
assert_equal(test.dtype, np.dtype(alttype))
- self.assertTrue(test._fill_value is None)
+ assert_(test._fill_value is None)
##############################################################################
-class TestMRecordsImport(TestCase):
- # Base test class for MaskedArrays.
- def __init__(self, *args, **kwds):
- TestCase.__init__(self, *args, **kwds)
- self.setup()
-
- def setup(self):
- # Generic setup
- _a = ma.array([1, 2, 3], mask=[0, 0, 1], dtype=int)
- _b = ma.array([1.1, 2.2, 3.3], mask=[0, 0, 1], dtype=float)
- _c = ma.array([b'one', b'two', b'three'],
- mask=[0, 0, 1], dtype='|S8')
- ddtype = [('a', int), ('b', float), ('c', '|S8')]
- mrec = fromarrays([_a, _b, _c], dtype=ddtype,
- fill_value=(b'99999', b'99999.',
- b'N/A'))
- nrec = recfromarrays((_a._data, _b._data, _c._data), dtype=ddtype)
- self.data = (mrec, nrec, ddtype)
+class TestMRecordsImport(object):
+
+ _a = ma.array([1, 2, 3], mask=[0, 0, 1], dtype=int)
+ _b = ma.array([1.1, 2.2, 3.3], mask=[0, 0, 1], dtype=float)
+ _c = ma.array([b'one', b'two', b'three'],
+ mask=[0, 0, 1], dtype='|S8')
+ ddtype = [('a', int), ('b', float), ('c', '|S8')]
+ mrec = fromarrays([_a, _b, _c], dtype=ddtype,
+ fill_value=(b'99999', b'99999.',
+ b'N/A'))
+ nrec = recfromarrays((_a._data, _b._data, _c._data), dtype=ddtype)
+ data = (mrec, nrec, ddtype)
def test_fromarrays(self):
_a = ma.array([1, 2, 3], mask=[0, 0, 1], dtype=int)
@@ -485,7 +472,7 @@ class TestMRecordsImport(TestCase):
with open(path, 'w') as f:
f.write(fcontent)
mrectxt = fromtextfile(path, delimitor=',', varnames='ABCDEFG')
- self.assertTrue(isinstance(mrectxt, MaskedRecords))
+ assert_(isinstance(mrectxt, MaskedRecords))
assert_equal(mrectxt.F, [1, 1, 1, 1])
assert_equal(mrectxt.E._mask, [1, 1, 1, 1])
assert_equal(mrectxt.C, [1, 2, 3.e+5, -1e-10])
@@ -504,7 +491,7 @@ def test_record_array_with_object_field():
y = ma.masked_array(
[(1, '2'), (3, '4')],
mask=[(0, 0), (0, 1)],
- dtype=[('a', int), ('b', np.object)])
+ dtype=[('a', int), ('b', object)])
# getting an item used to fail
y[1]
diff --git a/numpy/ma/tests/test_old_ma.py b/numpy/ma/tests/test_old_ma.py
index 51fa6ac36..9152e8d73 100644
--- a/numpy/ma/tests/test_old_ma.py
+++ b/numpy/ma/tests/test_old_ma.py
@@ -6,7 +6,8 @@ import numpy as np
import numpy.core.umath as umath
import numpy.core.fromnumeric as fromnumeric
from numpy.testing import (
- TestCase, run_module_suite, assert_, suppress_warnings)
+ run_module_suite, assert_, assert_raises, assert_equal,
+ )
from numpy.ma.testutils import assert_array_equal
from numpy.ma import (
MaskType, MaskedArray, absolute, add, all, allclose, allequal, alltrue,
@@ -32,9 +33,9 @@ def eq(v, w, msg=''):
return result
-class TestMa(TestCase):
+class TestMa(object):
- def setUp(self):
+ def setup(self):
x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.])
y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.])
a10 = 10.
@@ -52,16 +53,16 @@ class TestMa(TestCase):
def test_testBasic1d(self):
# Test of basic array creation and properties in 1 dimension.
(x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
- self.assertFalse(isMaskedArray(x))
- self.assertTrue(isMaskedArray(xm))
- self.assertEqual(shape(xm), s)
- self.assertEqual(xm.shape, s)
- self.assertEqual(xm.dtype, x.dtype)
- self.assertEqual(xm.size, reduce(lambda x, y:x * y, s))
- self.assertEqual(count(xm), len(m1) - reduce(lambda x, y:x + y, m1))
- self.assertTrue(eq(xm, xf))
- self.assertTrue(eq(filled(xm, 1.e20), xf))
- self.assertTrue(eq(x, xm))
+ assert_(not isMaskedArray(x))
+ assert_(isMaskedArray(xm))
+ assert_equal(shape(xm), s)
+ assert_equal(xm.shape, s)
+ assert_equal(xm.dtype, x.dtype)
+ assert_equal(xm.size, reduce(lambda x, y:x * y, s))
+ assert_equal(count(xm), len(m1) - reduce(lambda x, y:x + y, m1))
+ assert_(eq(xm, xf))
+ assert_(eq(filled(xm, 1.e20), xf))
+ assert_(eq(x, xm))
def test_testBasic2d(self):
# Test of basic array creation and properties in 2 dimensions.
@@ -73,107 +74,107 @@ class TestMa(TestCase):
ym.shape = s
xf.shape = s
- self.assertFalse(isMaskedArray(x))
- self.assertTrue(isMaskedArray(xm))
- self.assertEqual(shape(xm), s)
- self.assertEqual(xm.shape, s)
- self.assertEqual(xm.size, reduce(lambda x, y:x * y, s))
- self.assertEqual(count(xm),
+ assert_(not isMaskedArray(x))
+ assert_(isMaskedArray(xm))
+ assert_equal(shape(xm), s)
+ assert_equal(xm.shape, s)
+ assert_equal(xm.size, reduce(lambda x, y:x * y, s))
+ assert_equal(count(xm),
len(m1) - reduce(lambda x, y:x + y, m1))
- self.assertTrue(eq(xm, xf))
- self.assertTrue(eq(filled(xm, 1.e20), xf))
- self.assertTrue(eq(x, xm))
- self.setUp()
+ assert_(eq(xm, xf))
+ assert_(eq(filled(xm, 1.e20), xf))
+ assert_(eq(x, xm))
+ self.setup()
def test_testArithmetic(self):
# Test of basic arithmetic.
(x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
a2d = array([[1, 2], [0, 4]])
a2dm = masked_array(a2d, [[0, 0], [1, 0]])
- self.assertTrue(eq(a2d * a2d, a2d * a2dm))
- self.assertTrue(eq(a2d + a2d, a2d + a2dm))
- self.assertTrue(eq(a2d - a2d, a2d - a2dm))
+ assert_(eq(a2d * a2d, a2d * a2dm))
+ assert_(eq(a2d + a2d, a2d + a2dm))
+ assert_(eq(a2d - a2d, a2d - a2dm))
for s in [(12,), (4, 3), (2, 6)]:
x = x.reshape(s)
y = y.reshape(s)
xm = xm.reshape(s)
ym = ym.reshape(s)
xf = xf.reshape(s)
- self.assertTrue(eq(-x, -xm))
- self.assertTrue(eq(x + y, xm + ym))
- self.assertTrue(eq(x - y, xm - ym))
- self.assertTrue(eq(x * y, xm * ym))
+ assert_(eq(-x, -xm))
+ assert_(eq(x + y, xm + ym))
+ assert_(eq(x - y, xm - ym))
+ assert_(eq(x * y, xm * ym))
with np.errstate(divide='ignore', invalid='ignore'):
- self.assertTrue(eq(x / y, xm / ym))
- self.assertTrue(eq(a10 + y, a10 + ym))
- self.assertTrue(eq(a10 - y, a10 - ym))
- self.assertTrue(eq(a10 * y, a10 * ym))
+ assert_(eq(x / y, xm / ym))
+ assert_(eq(a10 + y, a10 + ym))
+ assert_(eq(a10 - y, a10 - ym))
+ assert_(eq(a10 * y, a10 * ym))
with np.errstate(divide='ignore', invalid='ignore'):
- self.assertTrue(eq(a10 / y, a10 / ym))
- self.assertTrue(eq(x + a10, xm + a10))
- self.assertTrue(eq(x - a10, xm - a10))
- self.assertTrue(eq(x * a10, xm * a10))
- self.assertTrue(eq(x / a10, xm / a10))
- self.assertTrue(eq(x ** 2, xm ** 2))
- self.assertTrue(eq(abs(x) ** 2.5, abs(xm) ** 2.5))
- self.assertTrue(eq(x ** y, xm ** ym))
- self.assertTrue(eq(np.add(x, y), add(xm, ym)))
- self.assertTrue(eq(np.subtract(x, y), subtract(xm, ym)))
- self.assertTrue(eq(np.multiply(x, y), multiply(xm, ym)))
+ assert_(eq(a10 / y, a10 / ym))
+ assert_(eq(x + a10, xm + a10))
+ assert_(eq(x - a10, xm - a10))
+ assert_(eq(x * a10, xm * a10))
+ assert_(eq(x / a10, xm / a10))
+ assert_(eq(x ** 2, xm ** 2))
+ assert_(eq(abs(x) ** 2.5, abs(xm) ** 2.5))
+ assert_(eq(x ** y, xm ** ym))
+ assert_(eq(np.add(x, y), add(xm, ym)))
+ assert_(eq(np.subtract(x, y), subtract(xm, ym)))
+ assert_(eq(np.multiply(x, y), multiply(xm, ym)))
with np.errstate(divide='ignore', invalid='ignore'):
- self.assertTrue(eq(np.divide(x, y), divide(xm, ym)))
+ assert_(eq(np.divide(x, y), divide(xm, ym)))
def test_testMixedArithmetic(self):
na = np.array([1])
ma = array([1])
- self.assertTrue(isinstance(na + ma, MaskedArray))
- self.assertTrue(isinstance(ma + na, MaskedArray))
+ assert_(isinstance(na + ma, MaskedArray))
+ assert_(isinstance(ma + na, MaskedArray))
def test_testUfuncs1(self):
# Test various functions such as sin, cos.
(x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
- self.assertTrue(eq(np.cos(x), cos(xm)))
- self.assertTrue(eq(np.cosh(x), cosh(xm)))
- self.assertTrue(eq(np.sin(x), sin(xm)))
- self.assertTrue(eq(np.sinh(x), sinh(xm)))
- self.assertTrue(eq(np.tan(x), tan(xm)))
- self.assertTrue(eq(np.tanh(x), tanh(xm)))
+ assert_(eq(np.cos(x), cos(xm)))
+ assert_(eq(np.cosh(x), cosh(xm)))
+ assert_(eq(np.sin(x), sin(xm)))
+ assert_(eq(np.sinh(x), sinh(xm)))
+ assert_(eq(np.tan(x), tan(xm)))
+ assert_(eq(np.tanh(x), tanh(xm)))
with np.errstate(divide='ignore', invalid='ignore'):
- self.assertTrue(eq(np.sqrt(abs(x)), sqrt(xm)))
- self.assertTrue(eq(np.log(abs(x)), log(xm)))
- self.assertTrue(eq(np.log10(abs(x)), log10(xm)))
- self.assertTrue(eq(np.exp(x), exp(xm)))
- self.assertTrue(eq(np.arcsin(z), arcsin(zm)))
- self.assertTrue(eq(np.arccos(z), arccos(zm)))
- self.assertTrue(eq(np.arctan(z), arctan(zm)))
- self.assertTrue(eq(np.arctan2(x, y), arctan2(xm, ym)))
- self.assertTrue(eq(np.absolute(x), absolute(xm)))
- self.assertTrue(eq(np.equal(x, y), equal(xm, ym)))
- self.assertTrue(eq(np.not_equal(x, y), not_equal(xm, ym)))
- self.assertTrue(eq(np.less(x, y), less(xm, ym)))
- self.assertTrue(eq(np.greater(x, y), greater(xm, ym)))
- self.assertTrue(eq(np.less_equal(x, y), less_equal(xm, ym)))
- self.assertTrue(eq(np.greater_equal(x, y), greater_equal(xm, ym)))
- self.assertTrue(eq(np.conjugate(x), conjugate(xm)))
- self.assertTrue(eq(np.concatenate((x, y)), concatenate((xm, ym))))
- self.assertTrue(eq(np.concatenate((x, y)), concatenate((x, y))))
- self.assertTrue(eq(np.concatenate((x, y)), concatenate((xm, y))))
- self.assertTrue(eq(np.concatenate((x, y, x)), concatenate((x, ym, x))))
+ assert_(eq(np.sqrt(abs(x)), sqrt(xm)))
+ assert_(eq(np.log(abs(x)), log(xm)))
+ assert_(eq(np.log10(abs(x)), log10(xm)))
+ assert_(eq(np.exp(x), exp(xm)))
+ assert_(eq(np.arcsin(z), arcsin(zm)))
+ assert_(eq(np.arccos(z), arccos(zm)))
+ assert_(eq(np.arctan(z), arctan(zm)))
+ assert_(eq(np.arctan2(x, y), arctan2(xm, ym)))
+ assert_(eq(np.absolute(x), absolute(xm)))
+ assert_(eq(np.equal(x, y), equal(xm, ym)))
+ assert_(eq(np.not_equal(x, y), not_equal(xm, ym)))
+ assert_(eq(np.less(x, y), less(xm, ym)))
+ assert_(eq(np.greater(x, y), greater(xm, ym)))
+ assert_(eq(np.less_equal(x, y), less_equal(xm, ym)))
+ assert_(eq(np.greater_equal(x, y), greater_equal(xm, ym)))
+ assert_(eq(np.conjugate(x), conjugate(xm)))
+ assert_(eq(np.concatenate((x, y)), concatenate((xm, ym))))
+ assert_(eq(np.concatenate((x, y)), concatenate((x, y))))
+ assert_(eq(np.concatenate((x, y)), concatenate((xm, y))))
+ assert_(eq(np.concatenate((x, y, x)), concatenate((x, ym, x))))
def test_xtestCount(self):
# Test count
ott = array([0., 1., 2., 3.], mask=[1, 0, 0, 0])
- self.assertTrue(count(ott).dtype.type is np.intp)
- self.assertEqual(3, count(ott))
- self.assertEqual(1, count(1))
- self.assertTrue(eq(0, array(1, mask=[1])))
+ assert_(count(ott).dtype.type is np.intp)
+ assert_equal(3, count(ott))
+ assert_equal(1, count(1))
+ assert_(eq(0, array(1, mask=[1])))
ott = ott.reshape((2, 2))
- self.assertTrue(count(ott).dtype.type is np.intp)
+ assert_(count(ott).dtype.type is np.intp)
assert_(isinstance(count(ott, 0), np.ndarray))
- self.assertTrue(count(ott).dtype.type is np.intp)
- self.assertTrue(eq(3, count(ott)))
+ assert_(count(ott).dtype.type is np.intp)
+ assert_(eq(3, count(ott)))
assert_(getmask(count(ott, 0)) is nomask)
- self.assertTrue(eq([1, 2], count(ott, 0)))
+ assert_(eq([1, 2], count(ott, 0)))
def test_testMinMax(self):
# Test minimum and maximum.
@@ -182,29 +183,29 @@ class TestMa(TestCase):
xmr = ravel(xm)
# true because of careful selection of data
- self.assertTrue(eq(max(xr), maximum.reduce(xmr)))
- self.assertTrue(eq(min(xr), minimum.reduce(xmr)))
+ assert_(eq(max(xr), maximum.reduce(xmr)))
+ assert_(eq(min(xr), minimum.reduce(xmr)))
def test_testAddSumProd(self):
# Test add, sum, product.
(x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
- self.assertTrue(eq(np.add.reduce(x), add.reduce(x)))
- self.assertTrue(eq(np.add.accumulate(x), add.accumulate(x)))
- self.assertTrue(eq(4, sum(array(4), axis=0)))
- self.assertTrue(eq(4, sum(array(4), axis=0)))
- self.assertTrue(eq(np.sum(x, axis=0), sum(x, axis=0)))
- self.assertTrue(eq(np.sum(filled(xm, 0), axis=0), sum(xm, axis=0)))
- self.assertTrue(eq(np.sum(x, 0), sum(x, 0)))
- self.assertTrue(eq(np.product(x, axis=0), product(x, axis=0)))
- self.assertTrue(eq(np.product(x, 0), product(x, 0)))
- self.assertTrue(eq(np.product(filled(xm, 1), axis=0),
+ assert_(eq(np.add.reduce(x), add.reduce(x)))
+ assert_(eq(np.add.accumulate(x), add.accumulate(x)))
+ assert_(eq(4, sum(array(4), axis=0)))
+ assert_(eq(4, sum(array(4), axis=0)))
+ assert_(eq(np.sum(x, axis=0), sum(x, axis=0)))
+ assert_(eq(np.sum(filled(xm, 0), axis=0), sum(xm, axis=0)))
+ assert_(eq(np.sum(x, 0), sum(x, 0)))
+ assert_(eq(np.product(x, axis=0), product(x, axis=0)))
+ assert_(eq(np.product(x, 0), product(x, 0)))
+ assert_(eq(np.product(filled(xm, 1), axis=0),
product(xm, axis=0)))
if len(s) > 1:
- self.assertTrue(eq(np.concatenate((x, y), 1),
+ assert_(eq(np.concatenate((x, y), 1),
concatenate((xm, ym), 1)))
- self.assertTrue(eq(np.add.reduce(x, 1), add.reduce(x, 1)))
- self.assertTrue(eq(np.sum(x, 1), sum(x, 1)))
- self.assertTrue(eq(np.product(x, 1), product(x, 1)))
+ assert_(eq(np.add.reduce(x, 1), add.reduce(x, 1)))
+ assert_(eq(np.sum(x, 1), sum(x, 1)))
+ assert_(eq(np.product(x, 1), product(x, 1)))
def test_testCI(self):
# Test of conversions and indexing
@@ -251,80 +252,105 @@ class TestMa(TestCase):
x2 = np.array([1, 'hello', 2, 3], object)
s1 = x1[1]
s2 = x2[1]
- self.assertEqual(type(s2), str)
- self.assertEqual(type(s1), str)
- self.assertEqual(s1, s2)
+ assert_equal(type(s2), str)
+ assert_equal(type(s1), str)
+ assert_equal(s1, s2)
assert_(x1[1:1].shape == (0,))
def test_testCopySize(self):
# Tests of some subtle points of copying and sizing.
- with suppress_warnings() as sup:
- sup.filter(
- np.ma.core.MaskedArrayFutureWarning,
- "setting an item on a masked array which has a "
- "shared mask will not copy")
-
- n = [0, 0, 1, 0, 0]
- m = make_mask(n)
- m2 = make_mask(m)
- self.assertTrue(m is m2)
- m3 = make_mask(m, copy=1)
- self.assertTrue(m is not m3)
-
- x1 = np.arange(5)
- y1 = array(x1, mask=m)
- self.assertTrue(y1._data is not x1)
- self.assertTrue(allequal(x1, y1._data))
- self.assertTrue(y1.mask is m)
-
- y1a = array(y1, copy=0)
- self.assertTrue(y1a.mask is y1.mask)
-
- y2 = array(x1, mask=m, copy=0)
- self.assertTrue(y2.mask is m)
- self.assertTrue(y2[2] is masked)
- y2[2] = 9
- self.assertTrue(y2[2] is not masked)
- self.assertTrue(y2.mask is not m)
- self.assertTrue(allequal(y2.mask, 0))
-
- y3 = array(x1 * 1.0, mask=m)
- self.assertTrue(filled(y3).dtype is (x1 * 1.0).dtype)
-
- x4 = arange(4)
- x4[2] = masked
- y4 = resize(x4, (8,))
- self.assertTrue(eq(concatenate([x4, x4]), y4))
- self.assertTrue(eq(getmask(y4), [0, 0, 1, 0, 0, 0, 1, 0]))
- y5 = repeat(x4, (2, 2, 2, 2), axis=0)
- self.assertTrue(eq(y5, [0, 0, 1, 1, 2, 2, 3, 3]))
- y6 = repeat(x4, 2, axis=0)
- self.assertTrue(eq(y5, y6))
+ n = [0, 0, 1, 0, 0]
+ m = make_mask(n)
+ m2 = make_mask(m)
+ assert_(m is m2)
+ m3 = make_mask(m, copy=1)
+ assert_(m is not m3)
+
+ x1 = np.arange(5)
+ y1 = array(x1, mask=m)
+ assert_(y1._data is not x1)
+ assert_(allequal(x1, y1._data))
+ assert_(y1.mask is m)
+
+ y1a = array(y1, copy=0)
+ assert_(y1a.mask is y1.mask)
+
+ y2 = array(x1, mask=m3, copy=0)
+ assert_(y2.mask is m3)
+ assert_(y2[2] is masked)
+ y2[2] = 9
+ assert_(y2[2] is not masked)
+ assert_(y2.mask is m3)
+ assert_(allequal(y2.mask, 0))
+
+ y2a = array(x1, mask=m, copy=1)
+ assert_(y2a.mask is not m)
+ assert_(y2a[2] is masked)
+ y2a[2] = 9
+ assert_(y2a[2] is not masked)
+ assert_(y2a.mask is not m)
+ assert_(allequal(y2a.mask, 0))
+
+ y3 = array(x1 * 1.0, mask=m)
+ assert_(filled(y3).dtype is (x1 * 1.0).dtype)
+
+ x4 = arange(4)
+ x4[2] = masked
+ y4 = resize(x4, (8,))
+ assert_(eq(concatenate([x4, x4]), y4))
+ assert_(eq(getmask(y4), [0, 0, 1, 0, 0, 0, 1, 0]))
+ y5 = repeat(x4, (2, 2, 2, 2), axis=0)
+ assert_(eq(y5, [0, 0, 1, 1, 2, 2, 3, 3]))
+ y6 = repeat(x4, 2, axis=0)
+ assert_(eq(y5, y6))
def test_testPut(self):
# Test of put
- with suppress_warnings() as sup:
- sup.filter(
- np.ma.core.MaskedArrayFutureWarning,
- "setting an item on a masked array which has a "
- "shared mask will not copy")
- d = arange(5)
- n = [0, 0, 0, 1, 1]
- m = make_mask(n)
- x = array(d, mask=m)
- self.assertTrue(x[3] is masked)
- self.assertTrue(x[4] is masked)
- x[[1, 4]] = [10, 40]
- self.assertTrue(x.mask is not m)
- self.assertTrue(x[3] is masked)
- self.assertTrue(x[4] is not masked)
- self.assertTrue(eq(x, [0, 10, 2, -1, 40]))
-
- x = array(d, mask=m)
- x.put([0, 1, 2], [-1, 100, 200])
- self.assertTrue(eq(x, [-1, 100, 200, 0, 0]))
- self.assertTrue(x[3] is masked)
- self.assertTrue(x[4] is masked)
+ d = arange(5)
+ n = [0, 0, 0, 1, 1]
+ m = make_mask(n)
+ m2 = m.copy()
+ x = array(d, mask=m)
+ assert_(x[3] is masked)
+ assert_(x[4] is masked)
+ x[[1, 4]] = [10, 40]
+ assert_(x.mask is m)
+ assert_(x[3] is masked)
+ assert_(x[4] is not masked)
+ assert_(eq(x, [0, 10, 2, -1, 40]))
+
+ x = array(d, mask=m2, copy=True)
+ x.put([0, 1, 2], [-1, 100, 200])
+ assert_(x.mask is not m2)
+ assert_(x[3] is masked)
+ assert_(x[4] is masked)
+ assert_(eq(x, [-1, 100, 200, 0, 0]))
+
+ def test_testPut2(self):
+ # Test of put
+ d = arange(5)
+ x = array(d, mask=[0, 0, 0, 0, 0])
+ z = array([10, 40], mask=[1, 0])
+ assert_(x[2] is not masked)
+ assert_(x[3] is not masked)
+ x[2:4] = z
+ assert_(x[2] is masked)
+ assert_(x[3] is not masked)
+ assert_(eq(x, [0, 1, 10, 40, 4]))
+
+ d = arange(5)
+ x = array(d, mask=[0, 0, 0, 0, 0])
+ y = x[2:4]
+ z = array([10, 40], mask=[1, 0])
+ assert_(x[2] is not masked)
+ assert_(x[3] is not masked)
+ y[:] = z
+ assert_(y[0] is masked)
+ assert_(y[1] is not masked)
+ assert_(eq(y, [10, 40]))
+ assert_(x[2] is masked)
+ assert_(x[3] is not masked)
+ assert_(eq(x, [0, 1, 10, 40, 4]))
def test_testMaPut(self):
(x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
@@ -531,147 +557,147 @@ class TestMa(TestCase):
# Test of masked element
xx = arange(6)
xx[1] = masked
- self.assertTrue(str(masked) == '--')
- self.assertTrue(xx[1] is masked)
- self.assertEqual(filled(xx[1], 0), 0)
+ assert_(str(masked) == '--')
+ assert_(xx[1] is masked)
+ assert_equal(filled(xx[1], 0), 0)
def test_testAverage1(self):
# Test of average.
ott = array([0., 1., 2., 3.], mask=[1, 0, 0, 0])
- self.assertTrue(eq(2.0, average(ott, axis=0)))
- self.assertTrue(eq(2.0, average(ott, weights=[1., 1., 2., 1.])))
+ assert_(eq(2.0, average(ott, axis=0)))
+ assert_(eq(2.0, average(ott, weights=[1., 1., 2., 1.])))
result, wts = average(ott, weights=[1., 1., 2., 1.], returned=1)
- self.assertTrue(eq(2.0, result))
- self.assertTrue(wts == 4.0)
+ assert_(eq(2.0, result))
+ assert_(wts == 4.0)
ott[:] = masked
- self.assertTrue(average(ott, axis=0) is masked)
+ assert_(average(ott, axis=0) is masked)
ott = array([0., 1., 2., 3.], mask=[1, 0, 0, 0])
ott = ott.reshape(2, 2)
ott[:, 1] = masked
- self.assertTrue(eq(average(ott, axis=0), [2.0, 0.0]))
- self.assertTrue(average(ott, axis=1)[0] is masked)
- self.assertTrue(eq([2., 0.], average(ott, axis=0)))
+ assert_(eq(average(ott, axis=0), [2.0, 0.0]))
+ assert_(average(ott, axis=1)[0] is masked)
+ assert_(eq([2., 0.], average(ott, axis=0)))
result, wts = average(ott, axis=0, returned=1)
- self.assertTrue(eq(wts, [1., 0.]))
+ assert_(eq(wts, [1., 0.]))
def test_testAverage2(self):
# More tests of average.
w1 = [0, 1, 1, 1, 1, 0]
w2 = [[0, 1, 1, 1, 1, 0], [1, 0, 0, 0, 0, 1]]
x = arange(6)
- self.assertTrue(allclose(average(x, axis=0), 2.5))
- self.assertTrue(allclose(average(x, axis=0, weights=w1), 2.5))
+ assert_(allclose(average(x, axis=0), 2.5))
+ assert_(allclose(average(x, axis=0, weights=w1), 2.5))
y = array([arange(6), 2.0 * arange(6)])
- self.assertTrue(allclose(average(y, None),
+ assert_(allclose(average(y, None),
np.add.reduce(np.arange(6)) * 3. / 12.))
- self.assertTrue(allclose(average(y, axis=0), np.arange(6) * 3. / 2.))
- self.assertTrue(allclose(average(y, axis=1),
+ assert_(allclose(average(y, axis=0), np.arange(6) * 3. / 2.))
+ assert_(allclose(average(y, axis=1),
[average(x, axis=0), average(x, axis=0)*2.0]))
- self.assertTrue(allclose(average(y, None, weights=w2), 20. / 6.))
- self.assertTrue(allclose(average(y, axis=0, weights=w2),
+ assert_(allclose(average(y, None, weights=w2), 20. / 6.))
+ assert_(allclose(average(y, axis=0, weights=w2),
[0., 1., 2., 3., 4., 10.]))
- self.assertTrue(allclose(average(y, axis=1),
+ assert_(allclose(average(y, axis=1),
[average(x, axis=0), average(x, axis=0)*2.0]))
m1 = zeros(6)
m2 = [0, 0, 1, 1, 0, 0]
m3 = [[0, 0, 1, 1, 0, 0], [0, 1, 1, 1, 1, 0]]
m4 = ones(6)
m5 = [0, 1, 1, 1, 1, 1]
- self.assertTrue(allclose(average(masked_array(x, m1), axis=0), 2.5))
- self.assertTrue(allclose(average(masked_array(x, m2), axis=0), 2.5))
- self.assertTrue(average(masked_array(x, m4), axis=0) is masked)
- self.assertEqual(average(masked_array(x, m5), axis=0), 0.0)
- self.assertEqual(count(average(masked_array(x, m4), axis=0)), 0)
+ assert_(allclose(average(masked_array(x, m1), axis=0), 2.5))
+ assert_(allclose(average(masked_array(x, m2), axis=0), 2.5))
+ assert_(average(masked_array(x, m4), axis=0) is masked)
+ assert_equal(average(masked_array(x, m5), axis=0), 0.0)
+ assert_equal(count(average(masked_array(x, m4), axis=0)), 0)
z = masked_array(y, m3)
- self.assertTrue(allclose(average(z, None), 20. / 6.))
- self.assertTrue(allclose(average(z, axis=0),
+ assert_(allclose(average(z, None), 20. / 6.))
+ assert_(allclose(average(z, axis=0),
[0., 1., 99., 99., 4.0, 7.5]))
- self.assertTrue(allclose(average(z, axis=1), [2.5, 5.0]))
- self.assertTrue(allclose(average(z, axis=0, weights=w2),
+ assert_(allclose(average(z, axis=1), [2.5, 5.0]))
+ assert_(allclose(average(z, axis=0, weights=w2),
[0., 1., 99., 99., 4.0, 10.0]))
a = arange(6)
b = arange(6) * 3
r1, w1 = average([[a, b], [b, a]], axis=1, returned=1)
- self.assertEqual(shape(r1), shape(w1))
- self.assertEqual(r1.shape, w1.shape)
+ assert_equal(shape(r1), shape(w1))
+ assert_equal(r1.shape, w1.shape)
r2, w2 = average(ones((2, 2, 3)), axis=0, weights=[3, 1], returned=1)
- self.assertEqual(shape(w2), shape(r2))
+ assert_equal(shape(w2), shape(r2))
r2, w2 = average(ones((2, 2, 3)), returned=1)
- self.assertEqual(shape(w2), shape(r2))
+ assert_equal(shape(w2), shape(r2))
r2, w2 = average(ones((2, 2, 3)), weights=ones((2, 2, 3)), returned=1)
- self.assertTrue(shape(w2) == shape(r2))
+ assert_(shape(w2) == shape(r2))
a2d = array([[1, 2], [0, 4]], float)
a2dm = masked_array(a2d, [[0, 0], [1, 0]])
a2da = average(a2d, axis=0)
- self.assertTrue(eq(a2da, [0.5, 3.0]))
+ assert_(eq(a2da, [0.5, 3.0]))
a2dma = average(a2dm, axis=0)
- self.assertTrue(eq(a2dma, [1.0, 3.0]))
+ assert_(eq(a2dma, [1.0, 3.0]))
a2dma = average(a2dm, axis=None)
- self.assertTrue(eq(a2dma, 7. / 3.))
+ assert_(eq(a2dma, 7. / 3.))
a2dma = average(a2dm, axis=1)
- self.assertTrue(eq(a2dma, [1.5, 4.0]))
+ assert_(eq(a2dma, [1.5, 4.0]))
def test_testToPython(self):
- self.assertEqual(1, int(array(1)))
- self.assertEqual(1.0, float(array(1)))
- self.assertEqual(1, int(array([[[1]]])))
- self.assertEqual(1.0, float(array([[1]])))
- self.assertRaises(TypeError, float, array([1, 1]))
- self.assertRaises(ValueError, bool, array([0, 1]))
- self.assertRaises(ValueError, bool, array([0, 0], mask=[0, 1]))
+ assert_equal(1, int(array(1)))
+ assert_equal(1.0, float(array(1)))
+ assert_equal(1, int(array([[[1]]])))
+ assert_equal(1.0, float(array([[1]])))
+ assert_raises(TypeError, float, array([1, 1]))
+ assert_raises(ValueError, bool, array([0, 1]))
+ assert_raises(ValueError, bool, array([0, 0], mask=[0, 1]))
def test_testScalarArithmetic(self):
xm = array(0, mask=1)
#TODO FIXME: Find out what the following raises a warning in r8247
with np.errstate(divide='ignore'):
- self.assertTrue((1 / array(0)).mask)
- self.assertTrue((1 + xm).mask)
- self.assertTrue((-xm).mask)
- self.assertTrue((-xm).mask)
- self.assertTrue(maximum(xm, xm).mask)
- self.assertTrue(minimum(xm, xm).mask)
- self.assertTrue(xm.filled().dtype is xm._data.dtype)
+ assert_((1 / array(0)).mask)
+ assert_((1 + xm).mask)
+ assert_((-xm).mask)
+ assert_((-xm).mask)
+ assert_(maximum(xm, xm).mask)
+ assert_(minimum(xm, xm).mask)
+ assert_(xm.filled().dtype is xm._data.dtype)
x = array(0, mask=0)
- self.assertTrue(x.filled() == x._data)
- self.assertEqual(str(xm), str(masked_print_option))
+ assert_(x.filled() == x._data)
+ assert_equal(str(xm), str(masked_print_option))
def test_testArrayMethods(self):
a = array([1, 3, 2])
- self.assertTrue(eq(a.any(), a._data.any()))
- self.assertTrue(eq(a.all(), a._data.all()))
- self.assertTrue(eq(a.argmax(), a._data.argmax()))
- self.assertTrue(eq(a.argmin(), a._data.argmin()))
- self.assertTrue(eq(a.choose(0, 1, 2, 3, 4),
+ assert_(eq(a.any(), a._data.any()))
+ assert_(eq(a.all(), a._data.all()))
+ assert_(eq(a.argmax(), a._data.argmax()))
+ assert_(eq(a.argmin(), a._data.argmin()))
+ assert_(eq(a.choose(0, 1, 2, 3, 4),
a._data.choose(0, 1, 2, 3, 4)))
- self.assertTrue(eq(a.compress([1, 0, 1]), a._data.compress([1, 0, 1])))
- self.assertTrue(eq(a.conj(), a._data.conj()))
- self.assertTrue(eq(a.conjugate(), a._data.conjugate()))
+ assert_(eq(a.compress([1, 0, 1]), a._data.compress([1, 0, 1])))
+ assert_(eq(a.conj(), a._data.conj()))
+ assert_(eq(a.conjugate(), a._data.conjugate()))
m = array([[1, 2], [3, 4]])
- self.assertTrue(eq(m.diagonal(), m._data.diagonal()))
- self.assertTrue(eq(a.sum(), a._data.sum()))
- self.assertTrue(eq(a.take([1, 2]), a._data.take([1, 2])))
- self.assertTrue(eq(m.transpose(), m._data.transpose()))
+ assert_(eq(m.diagonal(), m._data.diagonal()))
+ assert_(eq(a.sum(), a._data.sum()))
+ assert_(eq(a.take([1, 2]), a._data.take([1, 2])))
+ assert_(eq(m.transpose(), m._data.transpose()))
def test_testArrayAttributes(self):
a = array([1, 3, 2])
- self.assertEqual(a.ndim, 1)
+ assert_equal(a.ndim, 1)
def test_testAPI(self):
- self.assertFalse([m for m in dir(np.ndarray)
- if m not in dir(MaskedArray) and
- not m.startswith('_')])
+ assert_(not [m for m in dir(np.ndarray)
+ if m not in dir(MaskedArray) and
+ not m.startswith('_')])
def test_testSingleElementSubscript(self):
a = array([1, 3, 2])
b = array([1, 3, 2], mask=[1, 0, 1])
- self.assertEqual(a[0].shape, ())
- self.assertEqual(b[0].shape, ())
- self.assertEqual(b[1].shape, ())
+ assert_equal(a[0].shape, ())
+ assert_equal(b[0].shape, ())
+ assert_equal(b[1].shape, ())
-class TestUfuncs(TestCase):
- def setUp(self):
+class TestUfuncs(object):
+ def setup(self):
self.d = (array([1.0, 0, -1, pi / 2] * 2, mask=[0, 1] + [0] * 6),
array([1.0, 0, -1, pi / 2] * 2, mask=[1, 0] + [0] * 6),)
@@ -709,35 +735,35 @@ class TestUfuncs(TestCase):
np.seterr(divide='ignore')
ur = uf(*args)
mr = mf(*args)
- self.assertTrue(eq(ur.filled(0), mr.filled(0), f))
- self.assertTrue(eqmask(ur.mask, mr.mask))
+ assert_(eq(ur.filled(0), mr.filled(0), f))
+ assert_(eqmask(ur.mask, mr.mask))
def test_reduce(self):
a = self.d[0]
- self.assertFalse(alltrue(a, axis=0))
- self.assertTrue(sometrue(a, axis=0))
- self.assertEqual(sum(a[:3], axis=0), 0)
- self.assertEqual(product(a, axis=0), 0)
+ assert_(not alltrue(a, axis=0))
+ assert_(sometrue(a, axis=0))
+ assert_equal(sum(a[:3], axis=0), 0)
+ assert_equal(product(a, axis=0), 0)
def test_minmax(self):
a = arange(1, 13).reshape(3, 4)
amask = masked_where(a < 5, a)
- self.assertEqual(amask.max(), a.max())
- self.assertEqual(amask.min(), 5)
- self.assertTrue((amask.max(0) == a.max(0)).all())
- self.assertTrue((amask.min(0) == [5, 6, 7, 8]).all())
- self.assertTrue(amask.max(1)[0].mask)
- self.assertTrue(amask.min(1)[0].mask)
+ assert_equal(amask.max(), a.max())
+ assert_equal(amask.min(), 5)
+ assert_((amask.max(0) == a.max(0)).all())
+ assert_((amask.min(0) == [5, 6, 7, 8]).all())
+ assert_(amask.max(1)[0].mask)
+ assert_(amask.min(1)[0].mask)
def test_nonzero(self):
for t in "?bhilqpBHILQPfdgFDGO":
x = array([1, 0, 2, 0], mask=[0, 0, 1, 1])
- self.assertTrue(eq(nonzero(x), [0]))
+ assert_(eq(nonzero(x), [0]))
-class TestArrayMethods(TestCase):
+class TestArrayMethods(object):
- def setUp(self):
+ def setup(self):
x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928,
8.43, 7.78, 9.865, 5.878, 8.979, 4.732,
3.012, 6.022, 5.095, 3.116, 5.238, 3.957,
@@ -762,63 +788,63 @@ class TestArrayMethods(TestCase):
def test_trace(self):
(x, X, XX, m, mx, mX, mXX,) = self.d
mXdiag = mX.diagonal()
- self.assertEqual(mX.trace(), mX.diagonal().compressed().sum())
- self.assertTrue(eq(mX.trace(),
+ assert_equal(mX.trace(), mX.diagonal().compressed().sum())
+ assert_(eq(mX.trace(),
X.trace() - sum(mXdiag.mask * X.diagonal(),
axis=0)))
def test_clip(self):
(x, X, XX, m, mx, mX, mXX,) = self.d
clipped = mx.clip(2, 8)
- self.assertTrue(eq(clipped.mask, mx.mask))
- self.assertTrue(eq(clipped._data, x.clip(2, 8)))
- self.assertTrue(eq(clipped._data, mx._data.clip(2, 8)))
+ assert_(eq(clipped.mask, mx.mask))
+ assert_(eq(clipped._data, x.clip(2, 8)))
+ assert_(eq(clipped._data, mx._data.clip(2, 8)))
def test_ptp(self):
(x, X, XX, m, mx, mX, mXX,) = self.d
(n, m) = X.shape
- self.assertEqual(mx.ptp(), mx.compressed().ptp())
+ assert_equal(mx.ptp(), mx.compressed().ptp())
rows = np.zeros(n, np.float_)
cols = np.zeros(m, np.float_)
for k in range(m):
cols[k] = mX[:, k].compressed().ptp()
for k in range(n):
rows[k] = mX[k].compressed().ptp()
- self.assertTrue(eq(mX.ptp(0), cols))
- self.assertTrue(eq(mX.ptp(1), rows))
+ assert_(eq(mX.ptp(0), cols))
+ assert_(eq(mX.ptp(1), rows))
def test_swapaxes(self):
(x, X, XX, m, mx, mX, mXX,) = self.d
mXswapped = mX.swapaxes(0, 1)
- self.assertTrue(eq(mXswapped[-1], mX[:, -1]))
+ assert_(eq(mXswapped[-1], mX[:, -1]))
mXXswapped = mXX.swapaxes(0, 2)
- self.assertEqual(mXXswapped.shape, (2, 2, 3, 3))
+ assert_equal(mXXswapped.shape, (2, 2, 3, 3))
def test_cumprod(self):
(x, X, XX, m, mx, mX, mXX,) = self.d
mXcp = mX.cumprod(0)
- self.assertTrue(eq(mXcp._data, mX.filled(1).cumprod(0)))
+ assert_(eq(mXcp._data, mX.filled(1).cumprod(0)))
mXcp = mX.cumprod(1)
- self.assertTrue(eq(mXcp._data, mX.filled(1).cumprod(1)))
+ assert_(eq(mXcp._data, mX.filled(1).cumprod(1)))
def test_cumsum(self):
(x, X, XX, m, mx, mX, mXX,) = self.d
mXcp = mX.cumsum(0)
- self.assertTrue(eq(mXcp._data, mX.filled(0).cumsum(0)))
+ assert_(eq(mXcp._data, mX.filled(0).cumsum(0)))
mXcp = mX.cumsum(1)
- self.assertTrue(eq(mXcp._data, mX.filled(0).cumsum(1)))
+ assert_(eq(mXcp._data, mX.filled(0).cumsum(1)))
def test_varstd(self):
(x, X, XX, m, mx, mX, mXX,) = self.d
- self.assertTrue(eq(mX.var(axis=None), mX.compressed().var()))
- self.assertTrue(eq(mX.std(axis=None), mX.compressed().std()))
- self.assertTrue(eq(mXX.var(axis=3).shape, XX.var(axis=3).shape))
- self.assertTrue(eq(mX.var().shape, X.var().shape))
+ assert_(eq(mX.var(axis=None), mX.compressed().var()))
+ assert_(eq(mX.std(axis=None), mX.compressed().std()))
+ assert_(eq(mXX.var(axis=3).shape, XX.var(axis=3).shape))
+ assert_(eq(mX.var().shape, X.var().shape))
(mXvar0, mXvar1) = (mX.var(axis=0), mX.var(axis=1))
for k in range(6):
- self.assertTrue(eq(mXvar1[k], mX[k].compressed().var()))
- self.assertTrue(eq(mXvar0[k], mX[:, k].compressed().var()))
- self.assertTrue(eq(np.sqrt(mXvar0[k]),
+ assert_(eq(mXvar1[k], mX[k].compressed().var()))
+ assert_(eq(mXvar0[k], mX[:, k].compressed().var()))
+ assert_(eq(np.sqrt(mXvar0[k]),
mX[:, k].compressed().std()))
diff --git a/numpy/ma/tests/test_regression.py b/numpy/ma/tests/test_regression.py
index d1fb2bb2b..925b21a14 100644
--- a/numpy/ma/tests/test_regression.py
+++ b/numpy/ma/tests/test_regression.py
@@ -3,25 +3,24 @@ from __future__ import division, absolute_import, print_function
import warnings
import numpy as np
-from numpy.testing import (assert_, TestCase, assert_array_equal,
- assert_allclose, run_module_suite,
- suppress_warnings)
+from numpy.testing import (
+ assert_, assert_array_equal, assert_allclose, run_module_suite,
+ suppress_warnings
+ )
-rlevel = 1
-
-class TestRegression(TestCase):
- def test_masked_array_create(self,level=rlevel):
+class TestRegression(object):
+ def test_masked_array_create(self):
# Ticket #17
x = np.ma.masked_array([0, 1, 2, 3, 0, 4, 5, 6],
mask=[0, 0, 0, 1, 1, 1, 0, 0])
assert_array_equal(np.ma.nonzero(x), [[1, 2, 6, 7]])
- def test_masked_array(self,level=rlevel):
+ def test_masked_array(self):
# Ticket #61
np.ma.array(1, mask=[1])
- def test_mem_masked_where(self,level=rlevel):
+ def test_mem_masked_where(self):
# Ticket #62
from numpy.ma import masked_where, MaskType
a = np.zeros((1, 1))
@@ -29,7 +28,7 @@ class TestRegression(TestCase):
c = masked_where(b, a)
a-c
- def test_masked_array_multiply(self,level=rlevel):
+ def test_masked_array_multiply(self):
# Ticket #254
a = np.ma.zeros((4, 1))
a[2, 0] = np.ma.masked
@@ -37,7 +36,7 @@ class TestRegression(TestCase):
a*b
b*a
- def test_masked_array_repeat(self, level=rlevel):
+ def test_masked_array_repeat(self):
# Ticket #271
np.ma.array([1], mask=False).repeat(10)
diff --git a/numpy/ma/tests/test_subclassing.py b/numpy/ma/tests/test_subclassing.py
index b2995fd57..e59dd4656 100644
--- a/numpy/ma/tests/test_subclassing.py
+++ b/numpy/ma/tests/test_subclassing.py
@@ -9,7 +9,7 @@
from __future__ import division, absolute_import, print_function
import numpy as np
-from numpy.testing import TestCase, run_module_suite, assert_raises, dec
+from numpy.testing import run_module_suite, assert_, assert_raises, dec
from numpy.ma.testutils import assert_equal
from numpy.ma.core import (
array, arange, masked, MaskedArray, masked_array, log, add, hypot,
@@ -172,10 +172,10 @@ class ComplicatedSubArray(SubArray):
return obj
-class TestSubclassing(TestCase):
+class TestSubclassing(object):
# Test suite for masked subclasses of ndarray.
- def setUp(self):
+ def setup(self):
x = np.arange(5, dtype='float')
mx = mmatrix(x, mask=[0, 1, 0, 0, 0])
self.data = (x, mx)
@@ -186,41 +186,41 @@ class TestSubclassing(TestCase):
m = [0, 0, 1, 0, 0]
xsub = SubArray(x)
xmsub = masked_array(xsub, mask=m)
- self.assertTrue(isinstance(xmsub, MaskedArray))
+ assert_(isinstance(xmsub, MaskedArray))
assert_equal(xmsub._data, xsub)
- self.assertTrue(isinstance(xmsub._data, SubArray))
+ assert_(isinstance(xmsub._data, SubArray))
def test_maskedarray_subclassing(self):
# Tests subclassing MaskedArray
(x, mx) = self.data
- self.assertTrue(isinstance(mx._data, np.matrix))
+ assert_(isinstance(mx._data, np.matrix))
def test_masked_unary_operations(self):
# Tests masked_unary_operation
(x, mx) = self.data
with np.errstate(divide='ignore'):
- self.assertTrue(isinstance(log(mx), mmatrix))
+ assert_(isinstance(log(mx), mmatrix))
assert_equal(log(x), np.log(x))
def test_masked_binary_operations(self):
# Tests masked_binary_operation
(x, mx) = self.data
# Result should be a mmatrix
- self.assertTrue(isinstance(add(mx, mx), mmatrix))
- self.assertTrue(isinstance(add(mx, x), mmatrix))
+ assert_(isinstance(add(mx, mx), mmatrix))
+ assert_(isinstance(add(mx, x), mmatrix))
# Result should work
assert_equal(add(mx, x), mx+x)
- self.assertTrue(isinstance(add(mx, mx)._data, np.matrix))
- self.assertTrue(isinstance(add.outer(mx, mx), mmatrix))
- self.assertTrue(isinstance(hypot(mx, mx), mmatrix))
- self.assertTrue(isinstance(hypot(mx, x), mmatrix))
+ assert_(isinstance(add(mx, mx)._data, np.matrix))
+ assert_(isinstance(add.outer(mx, mx), mmatrix))
+ assert_(isinstance(hypot(mx, mx), mmatrix))
+ assert_(isinstance(hypot(mx, x), mmatrix))
def test_masked_binary_operations2(self):
# Tests domained_masked_binary_operation
(x, mx) = self.data
xmx = masked_array(mx.data.__array__(), mask=mx.mask)
- self.assertTrue(isinstance(divide(mx, mx), mmatrix))
- self.assertTrue(isinstance(divide(mx, x), mmatrix))
+ assert_(isinstance(divide(mx, mx), mmatrix))
+ assert_(isinstance(divide(mx, x), mmatrix))
assert_equal(divide(mx, mx), divide(xmx, xmx))
def test_attributepropagation(self):
@@ -229,22 +229,22 @@ class TestSubclassing(TestCase):
ym = msubarray(x)
#
z = (my+1)
- self.assertTrue(isinstance(z, MaskedArray))
- self.assertTrue(not isinstance(z, MSubArray))
- self.assertTrue(isinstance(z._data, SubArray))
+ assert_(isinstance(z, MaskedArray))
+ assert_(not isinstance(z, MSubArray))
+ assert_(isinstance(z._data, SubArray))
assert_equal(z._data.info, {})
#
z = (ym+1)
- self.assertTrue(isinstance(z, MaskedArray))
- self.assertTrue(isinstance(z, MSubArray))
- self.assertTrue(isinstance(z._data, SubArray))
- self.assertTrue(z._data.info['added'] > 0)
+ assert_(isinstance(z, MaskedArray))
+ assert_(isinstance(z, MSubArray))
+ assert_(isinstance(z._data, SubArray))
+ assert_(z._data.info['added'] > 0)
# Test that inplace methods from data get used (gh-4617)
ym += 1
- self.assertTrue(isinstance(ym, MaskedArray))
- self.assertTrue(isinstance(ym, MSubArray))
- self.assertTrue(isinstance(ym._data, SubArray))
- self.assertTrue(ym._data.info['iadded'] > 0)
+ assert_(isinstance(ym, MaskedArray))
+ assert_(isinstance(ym, MSubArray))
+ assert_(isinstance(ym._data, SubArray))
+ assert_(ym._data.info['iadded'] > 0)
#
ym._set_mask([1, 0, 0, 0, 1])
assert_equal(ym._mask, [1, 0, 0, 0, 1])
@@ -253,7 +253,7 @@ class TestSubclassing(TestCase):
#
xsub = subarray(x, info={'name':'x'})
mxsub = masked_array(xsub)
- self.assertTrue(hasattr(mxsub, 'info'))
+ assert_(hasattr(mxsub, 'info'))
assert_equal(mxsub.info, xsub.info)
def test_subclasspreservation(self):
@@ -264,22 +264,22 @@ class TestSubclassing(TestCase):
xsub = MSubArray(x, mask=m, info={'xsub':xinfo})
#
mxsub = masked_array(xsub, subok=False)
- self.assertTrue(not isinstance(mxsub, MSubArray))
- self.assertTrue(isinstance(mxsub, MaskedArray))
+ assert_(not isinstance(mxsub, MSubArray))
+ assert_(isinstance(mxsub, MaskedArray))
assert_equal(mxsub._mask, m)
#
mxsub = asarray(xsub)
- self.assertTrue(not isinstance(mxsub, MSubArray))
- self.assertTrue(isinstance(mxsub, MaskedArray))
+ assert_(not isinstance(mxsub, MSubArray))
+ assert_(isinstance(mxsub, MaskedArray))
assert_equal(mxsub._mask, m)
#
mxsub = masked_array(xsub, subok=True)
- self.assertTrue(isinstance(mxsub, MSubArray))
+ assert_(isinstance(mxsub, MSubArray))
assert_equal(mxsub.info, xsub.info)
assert_equal(mxsub._mask, xsub._mask)
#
mxsub = asanyarray(xsub)
- self.assertTrue(isinstance(mxsub, MSubArray))
+ assert_(isinstance(mxsub, MSubArray))
assert_equal(mxsub.info, xsub.info)
assert_equal(mxsub._mask, m)
@@ -290,21 +290,21 @@ class TestSubclassing(TestCase):
mxcsub = masked_array(xcsub, mask=[True, False, True, False, False])
# getter should return a ComplicatedSubArray, even for single item
# first check we wrote ComplicatedSubArray correctly
- self.assertTrue(isinstance(xcsub[1], ComplicatedSubArray))
- self.assertTrue(isinstance(xcsub[1,...], ComplicatedSubArray))
- self.assertTrue(isinstance(xcsub[1:4], ComplicatedSubArray))
+ assert_(isinstance(xcsub[1], ComplicatedSubArray))
+ assert_(isinstance(xcsub[1,...], ComplicatedSubArray))
+ assert_(isinstance(xcsub[1:4], ComplicatedSubArray))
# now that it propagates inside the MaskedArray
- self.assertTrue(isinstance(mxcsub[1], ComplicatedSubArray))
- self.assertTrue(isinstance(mxcsub[1,...].data, ComplicatedSubArray))
- self.assertTrue(mxcsub[0] is masked)
- self.assertTrue(isinstance(mxcsub[0,...].data, ComplicatedSubArray))
- self.assertTrue(isinstance(mxcsub[1:4].data, ComplicatedSubArray))
+ assert_(isinstance(mxcsub[1], ComplicatedSubArray))
+ assert_(isinstance(mxcsub[1,...].data, ComplicatedSubArray))
+ assert_(mxcsub[0] is masked)
+ assert_(isinstance(mxcsub[0,...].data, ComplicatedSubArray))
+ assert_(isinstance(mxcsub[1:4].data, ComplicatedSubArray))
# also for flattened version (which goes via MaskedIterator)
- self.assertTrue(isinstance(mxcsub.flat[1].data, ComplicatedSubArray))
- self.assertTrue(mxcsub.flat[0] is masked)
- self.assertTrue(isinstance(mxcsub.flat[1:4].base, ComplicatedSubArray))
+ assert_(isinstance(mxcsub.flat[1].data, ComplicatedSubArray))
+ assert_(mxcsub.flat[0] is masked)
+ assert_(isinstance(mxcsub.flat[1:4].base, ComplicatedSubArray))
# setter should only work with ComplicatedSubArray input
# first check we wrote ComplicatedSubArray correctly
@@ -325,21 +325,21 @@ class TestSubclassing(TestCase):
xcsub = ComplicatedSubArray(x)
mxcsub_nomask = masked_array(xcsub)
- self.assertTrue(isinstance(mxcsub_nomask[1,...].data, ComplicatedSubArray))
- self.assertTrue(isinstance(mxcsub_nomask[0,...].data, ComplicatedSubArray))
+ assert_(isinstance(mxcsub_nomask[1,...].data, ComplicatedSubArray))
+ assert_(isinstance(mxcsub_nomask[0,...].data, ComplicatedSubArray))
- self.assertTrue(isinstance(mxcsub_nomask[1], ComplicatedSubArray))
- self.assertTrue(isinstance(mxcsub_nomask[0], ComplicatedSubArray))
+ assert_(isinstance(mxcsub_nomask[1], ComplicatedSubArray))
+ assert_(isinstance(mxcsub_nomask[0], ComplicatedSubArray))
def test_subclass_repr(self):
"""test that repr uses the name of the subclass
and 'array' for np.ndarray"""
x = np.arange(5)
mx = masked_array(x, mask=[True, False, True, False, False])
- self.assertTrue(repr(mx).startswith('masked_array'))
+ assert_(repr(mx).startswith('masked_array'))
xsub = SubArray(x)
mxsub = masked_array(xsub, mask=[True, False, True, False, False])
- self.assertTrue(repr(mxsub).startswith(
+ assert_(repr(mxsub).startswith(
'masked_{0}(data = [-- 1 -- 3 4]'.format(SubArray.__name__)))
def test_subclass_str(self):
@@ -348,13 +348,13 @@ class TestSubclassing(TestCase):
x = np.arange(5)
xsub = SubArray(x)
mxsub = masked_array(xsub, mask=[True, False, True, False, False])
- self.assertTrue(str(mxsub) == '[-- 1 -- 3 4]')
+ assert_(str(mxsub) == '[-- 1 -- 3 4]')
xcsub = ComplicatedSubArray(x)
assert_raises(ValueError, xcsub.__setitem__, 0,
np.ma.core.masked_print_option)
mxcsub = masked_array(xcsub, mask=[True, False, True, False, False])
- self.assertTrue(str(mxcsub) == 'myprefix [-- 1 -- 3 4] mypostfix')
+ assert_(str(mxcsub) == 'myprefix [-- 1 -- 3 4] mypostfix')
def test_pure_subclass_info_preservation(self):
# Test that ufuncs and methods conserve extra information consistently;
@@ -362,11 +362,11 @@ class TestSubclassing(TestCase):
arr1 = SubMaskedArray('test', data=[1,2,3,4,5,6])
arr2 = SubMaskedArray(data=[0,1,2,3,4,5])
diff1 = np.subtract(arr1, arr2)
- self.assertTrue('info' in diff1._optinfo)
- self.assertTrue(diff1._optinfo['info'] == 'test')
+ assert_('info' in diff1._optinfo)
+ assert_(diff1._optinfo['info'] == 'test')
diff2 = arr1 - arr2
- self.assertTrue('info' in diff2._optinfo)
- self.assertTrue(diff2._optinfo['info'] == 'test')
+ assert_('info' in diff2._optinfo)
+ assert_(diff2._optinfo['info'] == 'test')
###############################################################################
diff --git a/numpy/ma/testutils.py b/numpy/ma/testutils.py
index c19066d71..a95c170c8 100644
--- a/numpy/ma/testutils.py
+++ b/numpy/ma/testutils.py
@@ -12,11 +12,11 @@ import operator
import numpy as np
from numpy import ndarray, float_
import numpy.core.umath as umath
+import numpy.testing
from numpy.testing import (
TestCase, assert_, assert_allclose, assert_array_almost_equal_nulp,
assert_raises, build_err_msg, run_module_suite
)
-import numpy.testing.utils as utils
from .core import mask_or, getmask, masked_array, nomask, masked, filled
__all__masked = [
@@ -211,11 +211,11 @@ def assert_array_compare(comparison, x, y, err_msg='', verbose=True, header='',
header=header, names=('x', 'y'))
raise ValueError(msg)
# OK, now run the basic tests on filled versions
- return utils.assert_array_compare(comparison,
- x.filled(fill_value),
- y.filled(fill_value),
- err_msg=err_msg,
- verbose=verbose, header=header)
+ return np.testing.assert_array_compare(comparison,
+ x.filled(fill_value),
+ y.filled(fill_value),
+ err_msg=err_msg,
+ verbose=verbose, header=header)
def assert_array_equal(x, y, err_msg='', verbose=True):
diff --git a/numpy/ma/timer_comparison.py b/numpy/ma/timer_comparison.py
index dae4b141b..68104ed0a 100644
--- a/numpy/ma/timer_comparison.py
+++ b/numpy/ma/timer_comparison.py
@@ -7,7 +7,7 @@ import numpy as np
from numpy import float_
import numpy.core.fromnumeric as fromnumeric
-from numpy.testing.utils import build_err_msg
+from numpy.testing import build_err_msg
# Fixme: this does not look right.
np.seterr(all='ignore')
diff --git a/numpy/matrixlib/__init__.py b/numpy/matrixlib/__init__.py
index b2b76837a..11dce2928 100644
--- a/numpy/matrixlib/__init__.py
+++ b/numpy/matrixlib/__init__.py
@@ -7,6 +7,6 @@ from .defmatrix import *
__all__ = defmatrix.__all__
-from numpy.testing.nosetester import _numpy_tester
+from numpy.testing import _numpy_tester
test = _numpy_tester().test
bench = _numpy_tester().bench
diff --git a/numpy/matrixlib/defmatrix.py b/numpy/matrixlib/defmatrix.py
index f212a8c5e..e016b5f4c 100644
--- a/numpy/matrixlib/defmatrix.py
+++ b/numpy/matrixlib/defmatrix.py
@@ -137,7 +137,7 @@ def matrix_power(M, n):
M = asanyarray(M)
if M.ndim != 2 or M.shape[0] != M.shape[1]:
raise ValueError("input must be a square array")
- if not issubdtype(type(n), int):
+ if not issubdtype(type(n), N.integer):
raise TypeError("exponent must be an integer")
from numpy.linalg import inv
@@ -295,7 +295,7 @@ class matrix(N.ndarray):
# Determine when we should have a column array
try:
n = len(index)
- except:
+ except Exception:
n = 0
if n > 1 and isscalar(index[1]):
out.shape = (sh, 1)
@@ -1155,7 +1155,7 @@ def bmat(obj, ldict=None, gdict=None):
--------
block :
A generalization of this function for N-d arrays, that returns normal
- `ndarray`s.
+ ndarrays.
Examples
--------
diff --git a/numpy/matrixlib/tests/__init__.py b/numpy/matrixlib/tests/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/numpy/matrixlib/tests/__init__.py
diff --git a/numpy/matrixlib/tests/test_defmatrix.py b/numpy/matrixlib/tests/test_defmatrix.py
index fd36d7770..77f262031 100644
--- a/numpy/matrixlib/tests/test_defmatrix.py
+++ b/numpy/matrixlib/tests/test_defmatrix.py
@@ -5,13 +5,13 @@ import collections
import numpy as np
from numpy import matrix, asmatrix, bmat
from numpy.testing import (
- TestCase, run_module_suite, assert_, assert_equal, assert_almost_equal,
+ run_module_suite, assert_, assert_equal, assert_almost_equal,
assert_array_equal, assert_array_almost_equal, assert_raises
)
from numpy.matrixlib.defmatrix import matrix_power
from numpy.matrixlib import mat
-class TestCtor(TestCase):
+class TestCtor(object):
def test_basic(self):
A = np.array([[1, 2], [3, 4]])
mA = matrix(A)
@@ -58,7 +58,7 @@ class TestCtor(TestCase):
assert_(np.all(b2 == mixresult))
-class TestProperties(TestCase):
+class TestProperties(object):
def test_sum(self):
"""Test whether matrix.sum(axis=1) preserves orientation.
Fails in NumPy <= 0.9.6.2127.
@@ -191,7 +191,7 @@ class TestProperties(TestCase):
B = matrix([[True], [True], [False]])
assert_array_equal(A, B)
-class TestCasting(TestCase):
+class TestCasting(object):
def test_basic(self):
A = np.arange(100).reshape(10, 10)
mA = matrix(A)
@@ -210,7 +210,7 @@ class TestCasting(TestCase):
assert_(np.all(mA != mB))
-class TestAlgebra(TestCase):
+class TestAlgebra(object):
def test_basic(self):
import numpy.linalg as linalg
@@ -249,6 +249,12 @@ class TestAlgebra(TestCase):
assert_array_almost_equal(m4, np.dot(m2, m2))
assert_array_almost_equal(np.dot(mi, m), np.eye(2))
+ def test_scalar_type_pow(self):
+ m = matrix([[1, 2], [3, 4]])
+ for scalar_t in [np.int8, np.uint8]:
+ two = scalar_t(2)
+ assert_array_almost_equal(m ** 2, m ** two)
+
def test_notimplemented(self):
'''Check that 'not implemented' operations produce a failure.'''
A = matrix([[1., 2.],
@@ -271,7 +277,7 @@ class TestAlgebra(TestCase):
self.fail("matrix.__mul__ with non-numeric object doesn't raise"
"a TypeError")
-class TestMatrixReturn(TestCase):
+class TestMatrixReturn(object):
def test_instance_methods(self):
a = matrix([1.0], dtype='f8')
methodargs = {
@@ -313,7 +319,7 @@ class TestMatrixReturn(TestCase):
assert_(type(d) is np.ndarray)
-class TestIndexing(TestCase):
+class TestIndexing(object):
def test_basic(self):
x = asmatrix(np.zeros((3, 2), float))
y = np.zeros((3, 1), float)
@@ -322,9 +328,8 @@ class TestIndexing(TestCase):
assert_equal(x, [[0, 1], [0, 0], [0, 0]])
-class TestNewScalarIndexing(TestCase):
- def setUp(self):
- self.a = matrix([[1, 2], [3, 4]])
+class TestNewScalarIndexing(object):
+ a = matrix([[1, 2], [3, 4]])
def test_dimesions(self):
a = self.a
@@ -390,7 +395,7 @@ class TestNewScalarIndexing(TestCase):
assert_array_equal(x[[2, 1, 0],:], x[::-1,:])
-class TestPower(TestCase):
+class TestPower(object):
def test_returntype(self):
a = np.array([[0, 1], [0, 0]])
assert_(type(matrix_power(a, 2)) is np.ndarray)
@@ -401,10 +406,10 @@ class TestPower(TestCase):
assert_array_equal(matrix_power([[0, 1], [0, 0]], 2), [[0, 0], [0, 0]])
-class TestShape(TestCase):
- def setUp(self):
- self.a = np.array([[1], [2]])
- self.m = matrix([[1], [2]])
+class TestShape(object):
+
+ a = np.array([[1], [2]])
+ m = matrix([[1], [2]])
def test_shape(self):
assert_equal(self.a.shape, (2, 1))
diff --git a/numpy/matrixlib/tests/test_multiarray.py b/numpy/matrixlib/tests/test_multiarray.py
index d27e24ec9..bf891a196 100644
--- a/numpy/matrixlib/tests/test_multiarray.py
+++ b/numpy/matrixlib/tests/test_multiarray.py
@@ -2,10 +2,10 @@ from __future__ import division, absolute_import, print_function
import numpy as np
from numpy.testing import (
- TestCase, run_module_suite, assert_, assert_equal, assert_array_equal
+ run_module_suite, assert_, assert_equal, assert_array_equal
)
-class TestView(TestCase):
+class TestView(object):
def test_type(self):
x = np.array([1, 2, 3])
assert_(isinstance(x.view(np.matrix), np.matrix))
diff --git a/numpy/matrixlib/tests/test_numeric.py b/numpy/matrixlib/tests/test_numeric.py
index 28329da39..b826b8e81 100644
--- a/numpy/matrixlib/tests/test_numeric.py
+++ b/numpy/matrixlib/tests/test_numeric.py
@@ -1,9 +1,9 @@
from __future__ import division, absolute_import, print_function
import numpy as np
-from numpy.testing import assert_equal, TestCase, run_module_suite
+from numpy.testing import assert_equal, run_module_suite
-class TestDot(TestCase):
+class TestDot(object):
def test_matscalar(self):
b1 = np.matrix(np.ones((3, 3), dtype=complex))
assert_equal(b1*1.0, b1)
diff --git a/numpy/matrixlib/tests/test_regression.py b/numpy/matrixlib/tests/test_regression.py
index 0839fbf28..32cb38ac7 100644
--- a/numpy/matrixlib/tests/test_regression.py
+++ b/numpy/matrixlib/tests/test_regression.py
@@ -1,17 +1,18 @@
from __future__ import division, absolute_import, print_function
import numpy as np
-from numpy.testing import TestCase, run_module_suite, assert_, assert_equal
+from numpy.testing import (
+ run_module_suite, assert_, assert_equal, assert_raises
+ )
-rlevel = 1
-class TestRegression(TestCase):
- def test_kron_matrix(self, level=rlevel):
+class TestRegression(object):
+ def test_kron_matrix(self):
# Ticket #71
x = np.matrix('[1 0; 1 0]')
assert_equal(type(np.kron(x, x)), type(x))
- def test_matrix_properties(self,level=rlevel):
+ def test_matrix_properties(self):
# Ticket #125
a = np.matrix([1.0], dtype=float)
assert_(type(a.real) is np.matrix)
@@ -20,18 +21,18 @@ class TestRegression(TestCase):
assert_(type(c) is np.ndarray)
assert_(type(d) is np.ndarray)
- def test_matrix_multiply_by_1d_vector(self, level=rlevel):
+ def test_matrix_multiply_by_1d_vector(self):
# Ticket #473
def mul():
np.mat(np.eye(2))*np.ones(2)
- self.assertRaises(ValueError, mul)
+ assert_raises(ValueError, mul)
- def test_matrix_std_argmax(self,level=rlevel):
+ def test_matrix_std_argmax(self):
# Ticket #83
x = np.asmatrix(np.random.uniform(0, 1, (3, 3)))
- self.assertEqual(x.std().shape, ())
- self.assertEqual(x.argmax().shape, ())
+ assert_equal(x.std().shape, ())
+ assert_equal(x.argmax().shape, ())
if __name__ == "__main__":
run_module_suite()
diff --git a/numpy/polynomial/__init__.py b/numpy/polynomial/__init__.py
index 82c350e9b..ae5b1f078 100644
--- a/numpy/polynomial/__init__.py
+++ b/numpy/polynomial/__init__.py
@@ -22,6 +22,6 @@ from .hermite import Hermite
from .hermite_e import HermiteE
from .laguerre import Laguerre
-from numpy.testing.nosetester import _numpy_tester
+from numpy.testing import _numpy_tester
test = _numpy_tester().test
bench = _numpy_tester().bench
diff --git a/numpy/polynomial/_polybase.py b/numpy/polynomial/_polybase.py
index 39f5fac31..78392d2a2 100644
--- a/numpy/polynomial/_polybase.py
+++ b/numpy/polynomial/_polybase.py
@@ -260,7 +260,7 @@ class ABCPolyBase(object):
self.window = window
def __repr__(self):
- format = "%s(%s, %s, %s)"
+ format = "%s(%s, domain=%s, window=%s)"
coef = repr(self.coef)[6:-1]
domain = repr(self.domain)[6:-1]
window = repr(self.window)[6:-1]
@@ -307,32 +307,26 @@ class ABCPolyBase(object):
return self
def __add__(self, other):
+ othercoef = self._get_coefficients(other)
try:
- othercoef = self._get_coefficients(other)
coef = self._add(self.coef, othercoef)
- except TypeError as e:
- raise e
- except:
+ except Exception:
return NotImplemented
return self.__class__(coef, self.domain, self.window)
def __sub__(self, other):
+ othercoef = self._get_coefficients(other)
try:
- othercoef = self._get_coefficients(other)
coef = self._sub(self.coef, othercoef)
- except TypeError as e:
- raise e
- except:
+ except Exception:
return NotImplemented
return self.__class__(coef, self.domain, self.window)
def __mul__(self, other):
+ othercoef = self._get_coefficients(other)
try:
- othercoef = self._get_coefficients(other)
coef = self._mul(self.coef, othercoef)
- except TypeError as e:
- raise e
- except:
+ except Exception:
return NotImplemented
return self.__class__(coef, self.domain, self.window)
@@ -362,12 +356,12 @@ class ABCPolyBase(object):
return res[1]
def __divmod__(self, other):
+ othercoef = self._get_coefficients(other)
try:
- othercoef = self._get_coefficients(other)
quo, rem = self._div(self.coef, othercoef)
- except (TypeError, ZeroDivisionError) as e:
+ except ZeroDivisionError as e:
raise e
- except:
+ except Exception:
return NotImplemented
quo = self.__class__(quo, self.domain, self.window)
rem = self.__class__(rem, self.domain, self.window)
@@ -381,21 +375,21 @@ class ABCPolyBase(object):
def __radd__(self, other):
try:
coef = self._add(other, self.coef)
- except:
+ except Exception:
return NotImplemented
return self.__class__(coef, self.domain, self.window)
def __rsub__(self, other):
try:
coef = self._sub(other, self.coef)
- except:
+ except Exception:
return NotImplemented
return self.__class__(coef, self.domain, self.window)
def __rmul__(self, other):
try:
coef = self._mul(other, self.coef)
- except:
+ except Exception:
return NotImplemented
return self.__class__(coef, self.domain, self.window)
@@ -425,7 +419,7 @@ class ABCPolyBase(object):
quo, rem = self._div(other, self.coef)
except ZeroDivisionError as e:
raise e
- except:
+ except Exception:
return NotImplemented
quo = self.__class__(quo, self.domain, self.window)
rem = self.__class__(rem, self.domain, self.window)
diff --git a/numpy/polynomial/chebyshev.py b/numpy/polynomial/chebyshev.py
index 49d0302e0..fe2805a03 100644
--- a/numpy/polynomial/chebyshev.py
+++ b/numpy/polynomial/chebyshev.py
@@ -52,6 +52,7 @@ Misc Functions
- `chebline` -- Chebyshev series representing given straight line.
- `cheb2poly` -- convert a Chebyshev series to a polynomial.
- `poly2cheb` -- convert a polynomial to a Chebyshev series.
+- `chebinterpolate` -- interpolate a function at the Chebyshev points.
Classes
-------
@@ -87,6 +88,7 @@ References
"""
from __future__ import division, absolute_import, print_function
+import numbers
import warnings
import numpy as np
import numpy.linalg as la
@@ -102,7 +104,7 @@ __all__ = [
'chebvander', 'chebfit', 'chebtrim', 'chebroots', 'chebpts1',
'chebpts2', 'Chebyshev', 'chebval2d', 'chebval3d', 'chebgrid2d',
'chebgrid3d', 'chebvander2d', 'chebvander3d', 'chebcompanion',
- 'chebgauss', 'chebweight']
+ 'chebgauss', 'chebweight', 'chebinterpolate']
chebtrim = pu.trimcoef
@@ -359,10 +361,10 @@ def poly2cheb(pol):
>>> from numpy import polynomial as P
>>> p = P.Polynomial(range(4))
>>> p
- Polynomial([ 0., 1., 2., 3.], [-1., 1.])
+ Polynomial([ 0., 1., 2., 3.], domain=[-1, 1], window=[-1, 1])
>>> c = p.convert(kind=P.Chebyshev)
>>> c
- Chebyshev([ 1. , 3.25, 1. , 0.75], [-1., 1.])
+ Chebyshev([ 1. , 3.25, 1. , 0.75], domain=[-1, 1], window=[-1, 1])
>>> P.poly2cheb(range(4))
array([ 1. , 3.25, 1. , 0.75])
@@ -942,7 +944,7 @@ def chebder(c, m=1, scl=1, axis=0):
if cnt == 0:
return c
- c = np.rollaxis(c, iaxis)
+ c = np.moveaxis(c, iaxis, 0)
n = len(c)
if cnt >= n:
c = c[:1]*0
@@ -958,7 +960,7 @@ def chebder(c, m=1, scl=1, axis=0):
der[1] = 4*c[2]
der[0] = c[1]
c = der
- c = np.rollaxis(c, 0, iaxis + 1)
+ c = np.moveaxis(c, 0, iaxis)
return c
@@ -1022,7 +1024,7 @@ def chebint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
Note that the result of each integration is *multiplied* by `scl`.
Why is this important to note? Say one is making a linear change of
variable :math:`u = ax + b` in an integral relative to `x`. Then
- .. math::`dx = du/a`, so one will need to set `scl` equal to
+ :math:`dx = du/a`, so one will need to set `scl` equal to
:math:`1/a`- perhaps not what one would have first thought.
Also note that, in general, the result of integrating a C-series needs
@@ -1067,7 +1069,7 @@ def chebint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
if cnt == 0:
return c
- c = np.rollaxis(c, iaxis)
+ c = np.moveaxis(c, iaxis, 0)
k = list(k) + [0]*(cnt - len(k))
for i in range(cnt):
n = len(c)
@@ -1086,7 +1088,7 @@ def chebint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
tmp[j - 1] -= c[j]/(2*(j - 1))
tmp[0] += k[i] - chebval(lbnd, tmp)
c = tmp
- c = np.rollaxis(c, 0, iaxis + 1)
+ c = np.moveaxis(c, 0, iaxis)
return c
@@ -1220,12 +1222,12 @@ def chebval2d(x, y, c):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
try:
x, y = np.array((x, y), copy=0)
- except:
+ except Exception:
raise ValueError('x, y are incompatible')
c = chebval(x, c)
@@ -1280,7 +1282,7 @@ def chebgrid2d(x, y, c):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
c = chebval(x, c)
@@ -1333,12 +1335,12 @@ def chebval3d(x, y, z, c):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
try:
x, y, z = np.array((x, y, z), copy=0)
- except:
+ except Exception:
raise ValueError('x, y, z are incompatible')
c = chebval(x, c)
@@ -1397,7 +1399,7 @@ def chebgrid3d(x, y, z, c):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
c = chebval(x, c)
@@ -1458,7 +1460,7 @@ def chebvander(x, deg):
v[1] = x
for i in range(2, ideg + 1):
v[i] = v[i-1]*x2 - v[i-2]
- return np.rollaxis(v, 0, v.ndim)
+ return np.moveaxis(v, 0, -1)
def chebvander2d(x, y, deg):
@@ -1508,7 +1510,7 @@ def chebvander2d(x, y, deg):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
ideg = [int(d) for d in deg]
@@ -1572,7 +1574,7 @@ def chebvander3d(x, y, z, deg):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
ideg = [int(d) for d in deg]
@@ -1613,7 +1615,7 @@ def chebfit(x, y, deg, rcond=None, full=False, w=None):
points sharing the same x-coordinates can be fitted at once by
passing in a 2D-array that contains one dataset per column.
deg : int or 1-D array_like
- Degree(s) of the fitting polynomials. If `deg` is a single integer
+ Degree(s) of the fitting polynomials. If `deg` is a single integer,
all terms up to and including the `deg`'th term are included in the
fit. For NumPy versions >= 1.11.0 a list of integers specifying the
degrees of the terms to include may be used instead.
@@ -1808,7 +1810,7 @@ def chebcompanion(c):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
# c is a trimmed copy
@@ -1886,6 +1888,73 @@ def chebroots(c):
return r
+def chebinterpolate(func, deg, args=()):
+ """Interpolate a function at the Chebyshev points of the first kind.
+
+ Returns the Chebyshev series that interpolates `func` at the Chebyshev
+ points of the first kind in the interval [-1, 1]. The interpolating
+ series tends to a minmax approximation to `func` with increasing `deg`
+ if the function is continuous in the interval.
+
+ .. versionadded:: 1.14.0
+
+ Parameters
+ ----------
+ func : function
+ The function to be approximated. It must be a function of a single
+ variable of the form ``f(x, a, b, c...)``, where ``a, b, c...`` are
+ extra arguments passed in the `args` parameter.
+ deg : int
+ Degree of the interpolating polynomial
+ args : tuple, optional
+ Extra arguments to be used in the function call. Default is no extra
+ arguments.
+
+ Returns
+ -------
+ coef : ndarray, shape (deg + 1,)
+ Chebyshev coefficients of the interpolating series ordered from low to
+ high.
+
+ Examples
+ --------
+ >>> import numpy.polynomial.chebyshev as C
+ >>> C.chebfromfunction(lambda x: np.tanh(x) + 0.5, 8)
+ array([ 5.00000000e-01, 8.11675684e-01, -9.86864911e-17,
+ -5.42457905e-02, -2.71387850e-16, 4.51658839e-03,
+ 2.46716228e-17, -3.79694221e-04, -3.26899002e-16])
+
+ Notes
+ -----
+
+ The Chebyshev polynomials used in the interpolation are orthogonal when
+ sampled at the Chebyshev points of the first kind. If it is desired to
+ constrain some of the coefficients they can simply be set to the desired
+ value after the interpolation, no new interpolation or fit is needed. This
+ is especially useful if it is known apriori that some of coefficients are
+ zero. For instance, if the function is even then the coefficients of the
+ terms of odd degree in the result can be set to zero.
+
+ """
+ deg = np.asarray(deg)
+
+ # check arguments.
+ if deg.ndim > 0 or deg.dtype.kind not in 'iu' or deg.size == 0:
+ raise TypeError("deg must be an int")
+ if deg < 0:
+ raise ValueError("expected deg >= 0")
+
+ order = deg + 1
+ xcheb = chebpts1(order)
+ yfunc = func(xcheb, *args)
+ m = chebvander(xcheb, deg)
+ c = np.dot(m.T, yfunc)
+ c[0] /= order
+ c[1:] /= 0.5*order
+
+ return c
+
+
def chebgauss(deg):
"""
Gauss-Chebyshev quadrature.
@@ -2069,6 +2138,48 @@ class Chebyshev(ABCPolyBase):
_roots = staticmethod(chebroots)
_fromroots = staticmethod(chebfromroots)
+ @classmethod
+ def interpolate(cls, func, deg, domain=None, args=()):
+ """Interpolate a function at the Chebyshev points of the first kind.
+
+ Returns the series that interpolates `func` at the Chebyshev points of
+ the first kind scaled and shifted to the `domain`. The resulting series
+ tends to a minmax approximation of `func` when the function is
+ continuous in the domain.
+
+ .. versionadded:: 1.14.0
+
+ Parameters
+ ----------
+ func : function
+ The function to be interpolated. It must be a function of a single
+ variable of the form ``f(x, a, b, c...)``, where ``a, b, c...`` are
+ extra arguments passed in the `args` parameter.
+ deg : int
+ Degree of the interpolating polynomial.
+ domain : {None, [beg, end]}, optional
+ Domain over which `func` is interpolated. The default is None, in
+ which case the domain is [-1, 1].
+ args : tuple, optional
+ Extra arguments to be used in the function call. Default is no
+ extra arguments.
+
+ Returns
+ -------
+ polynomial : Chebyshev instance
+ Interpolating Chebyshev instance.
+
+ Notes
+ -----
+ See `numpy.polynomial.chebfromfunction` for more details.
+
+ """
+ if domain is None:
+ domain = cls.domain
+ xfunc = lambda x: func(pu.mapdomain(x, cls.window, domain), *args)
+ coef = chebinterpolate(xfunc, deg)
+ return cls(coef, domain=domain)
+
# Virtual properties
nickname = 'cheb'
domain = np.array(chebdomain)
diff --git a/numpy/polynomial/hermite.py b/numpy/polynomial/hermite.py
index a03fe722c..ae1143d28 100644
--- a/numpy/polynomial/hermite.py
+++ b/numpy/polynomial/hermite.py
@@ -706,7 +706,7 @@ def hermder(c, m=1, scl=1, axis=0):
if cnt == 0:
return c
- c = np.rollaxis(c, iaxis)
+ c = np.moveaxis(c, iaxis, 0)
n = len(c)
if cnt >= n:
c = c[:1]*0
@@ -718,7 +718,7 @@ def hermder(c, m=1, scl=1, axis=0):
for j in range(n, 0, -1):
der[j - 1] = (2*j)*c[j]
c = der
- c = np.rollaxis(c, 0, iaxis + 1)
+ c = np.moveaxis(c, 0, iaxis)
return c
@@ -782,7 +782,7 @@ def hermint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
Note that the result of each integration is *multiplied* by `scl`.
Why is this important to note? Say one is making a linear change of
variable :math:`u = ax + b` in an integral relative to `x`. Then
- .. math::`dx = du/a`, so one will need to set `scl` equal to
+ :math:`dx = du/a`, so one will need to set `scl` equal to
:math:`1/a` - perhaps not what one would have first thought.
Also note that, in general, the result of integrating a C-series needs
@@ -825,7 +825,7 @@ def hermint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
if cnt == 0:
return c
- c = np.rollaxis(c, iaxis)
+ c = np.moveaxis(c, iaxis, 0)
k = list(k) + [0]*(cnt - len(k))
for i in range(cnt):
n = len(c)
@@ -840,7 +840,7 @@ def hermint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
tmp[j + 1] = c[j]/(2*(j + 1))
tmp[0] += k[i] - hermval(lbnd, tmp)
c = tmp
- c = np.rollaxis(c, 0, iaxis + 1)
+ c = np.moveaxis(c, 0, iaxis)
return c
@@ -983,12 +983,12 @@ def hermval2d(x, y, c):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
try:
x, y = np.array((x, y), copy=0)
- except:
+ except Exception:
raise ValueError('x, y are incompatible')
c = hermval(x, c)
@@ -1043,7 +1043,7 @@ def hermgrid2d(x, y, c):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
c = hermval(x, c)
@@ -1096,12 +1096,12 @@ def hermval3d(x, y, z, c):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
try:
x, y, z = np.array((x, y, z), copy=0)
- except:
+ except Exception:
raise ValueError('x, y, z are incompatible')
c = hermval(x, c)
@@ -1160,7 +1160,7 @@ def hermgrid3d(x, y, z, c):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
c = hermval(x, c)
@@ -1229,7 +1229,7 @@ def hermvander(x, deg):
v[1] = x2
for i in range(2, ideg + 1):
v[i] = (v[i-1]*x2 - v[i-2]*(2*(i - 1)))
- return np.rollaxis(v, 0, v.ndim)
+ return np.moveaxis(v, 0, -1)
def hermvander2d(x, y, deg):
@@ -1279,7 +1279,7 @@ def hermvander2d(x, y, deg):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
ideg = [int(d) for d in deg]
@@ -1343,7 +1343,7 @@ def hermvander3d(x, y, z, deg):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
ideg = [int(d) for d in deg]
@@ -1584,7 +1584,7 @@ def hermcompanion(c):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
# c is a trimmed copy
@@ -1732,7 +1732,7 @@ def hermgauss(deg):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
The results have only been tested up to degree 100, higher degrees may
be problematic. The weights are determined by using the fact that
@@ -1796,7 +1796,7 @@ def hermweight(x):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
w = np.exp(-x**2)
diff --git a/numpy/polynomial/hermite_e.py b/numpy/polynomial/hermite_e.py
index 2a29d61cf..ee29ec5d3 100644
--- a/numpy/polynomial/hermite_e.py
+++ b/numpy/polynomial/hermite_e.py
@@ -705,7 +705,7 @@ def hermeder(c, m=1, scl=1, axis=0):
if cnt == 0:
return c
- c = np.rollaxis(c, iaxis)
+ c = np.moveaxis(c, iaxis, 0)
n = len(c)
if cnt >= n:
return c[:1]*0
@@ -717,7 +717,7 @@ def hermeder(c, m=1, scl=1, axis=0):
for j in range(n, 0, -1):
der[j - 1] = j*c[j]
c = der
- c = np.rollaxis(c, 0, iaxis + 1)
+ c = np.moveaxis(c, 0, iaxis)
return c
@@ -781,7 +781,7 @@ def hermeint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
Note that the result of each integration is *multiplied* by `scl`.
Why is this important to note? Say one is making a linear change of
variable :math:`u = ax + b` in an integral relative to `x`. Then
- .. math::`dx = du/a`, so one will need to set `scl` equal to
+ :math:`dx = du/a`, so one will need to set `scl` equal to
:math:`1/a` - perhaps not what one would have first thought.
Also note that, in general, the result of integrating a C-series needs
@@ -824,7 +824,7 @@ def hermeint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
if cnt == 0:
return c
- c = np.rollaxis(c, iaxis)
+ c = np.moveaxis(c, iaxis, 0)
k = list(k) + [0]*(cnt - len(k))
for i in range(cnt):
n = len(c)
@@ -839,7 +839,7 @@ def hermeint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
tmp[j + 1] = c[j]/(j + 1)
tmp[0] += k[i] - hermeval(lbnd, tmp)
c = tmp
- c = np.rollaxis(c, 0, iaxis + 1)
+ c = np.moveaxis(c, 0, iaxis)
return c
@@ -981,12 +981,12 @@ def hermeval2d(x, y, c):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
try:
x, y = np.array((x, y), copy=0)
- except:
+ except Exception:
raise ValueError('x, y are incompatible')
c = hermeval(x, c)
@@ -1041,7 +1041,7 @@ def hermegrid2d(x, y, c):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
c = hermeval(x, c)
@@ -1094,12 +1094,12 @@ def hermeval3d(x, y, z, c):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
try:
x, y, z = np.array((x, y, z), copy=0)
- except:
+ except Exception:
raise ValueError('x, y, z are incompatible')
c = hermeval(x, c)
@@ -1158,7 +1158,7 @@ def hermegrid3d(x, y, z, c):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
c = hermeval(x, c)
@@ -1226,7 +1226,7 @@ def hermevander(x, deg):
v[1] = x
for i in range(2, ideg + 1):
v[i] = (v[i-1]*x - v[i-2]*(i - 1))
- return np.rollaxis(v, 0, v.ndim)
+ return np.moveaxis(v, 0, -1)
def hermevander2d(x, y, deg):
@@ -1276,7 +1276,7 @@ def hermevander2d(x, y, deg):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
ideg = [int(d) for d in deg]
@@ -1340,7 +1340,7 @@ def hermevander3d(x, y, z, deg):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
ideg = [int(d) for d in deg]
@@ -1582,7 +1582,7 @@ def hermecompanion(c):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
# c is a trimmed copy
@@ -1730,7 +1730,7 @@ def hermegauss(deg):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
The results have only been tested up to degree 100, higher degrees may
be problematic. The weights are determined by using the fact that
@@ -1793,7 +1793,7 @@ def hermeweight(x):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
w = np.exp(-.5*x**2)
diff --git a/numpy/polynomial/laguerre.py b/numpy/polynomial/laguerre.py
index c9e1302e1..079cf97b3 100644
--- a/numpy/polynomial/laguerre.py
+++ b/numpy/polynomial/laguerre.py
@@ -703,7 +703,7 @@ def lagder(c, m=1, scl=1, axis=0):
if cnt == 0:
return c
- c = np.rollaxis(c, iaxis)
+ c = np.moveaxis(c, iaxis, 0)
n = len(c)
if cnt >= n:
c = c[:1]*0
@@ -717,7 +717,7 @@ def lagder(c, m=1, scl=1, axis=0):
c[j - 1] += c[j]
der[0] = -c[1]
c = der
- c = np.rollaxis(c, 0, iaxis + 1)
+ c = np.moveaxis(c, 0, iaxis)
return c
@@ -782,7 +782,7 @@ def lagint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
Note that the result of each integration is *multiplied* by `scl`.
Why is this important to note? Say one is making a linear change of
variable :math:`u = ax + b` in an integral relative to `x`. Then
- .. math::`dx = du/a`, so one will need to set `scl` equal to
+ :math:`dx = du/a`, so one will need to set `scl` equal to
:math:`1/a` - perhaps not what one would have first thought.
Also note that, in general, the result of integrating a C-series needs
@@ -825,7 +825,7 @@ def lagint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
if cnt == 0:
return c
- c = np.rollaxis(c, iaxis)
+ c = np.moveaxis(c, iaxis, 0)
k = list(k) + [0]*(cnt - len(k))
for i in range(cnt):
n = len(c)
@@ -841,7 +841,7 @@ def lagint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
tmp[j + 1] = -c[j]
tmp[0] += k[i] - lagval(lbnd, tmp)
c = tmp
- c = np.rollaxis(c, 0, iaxis + 1)
+ c = np.moveaxis(c, 0, iaxis)
return c
@@ -983,12 +983,12 @@ def lagval2d(x, y, c):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
try:
x, y = np.array((x, y), copy=0)
- except:
+ except Exception:
raise ValueError('x, y are incompatible')
c = lagval(x, c)
@@ -1043,7 +1043,7 @@ def laggrid2d(x, y, c):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
c = lagval(x, c)
@@ -1096,12 +1096,12 @@ def lagval3d(x, y, z, c):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
try:
x, y, z = np.array((x, y, z), copy=0)
- except:
+ except Exception:
raise ValueError('x, y, z are incompatible')
c = lagval(x, c)
@@ -1160,7 +1160,7 @@ def laggrid3d(x, y, z, c):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
c = lagval(x, c)
@@ -1228,7 +1228,7 @@ def lagvander(x, deg):
v[1] = 1 - x
for i in range(2, ideg + 1):
v[i] = (v[i-1]*(2*i - 1 - x) - v[i-2]*(i - 1))/i
- return np.rollaxis(v, 0, v.ndim)
+ return np.moveaxis(v, 0, -1)
def lagvander2d(x, y, deg):
@@ -1278,7 +1278,7 @@ def lagvander2d(x, y, deg):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
ideg = [int(d) for d in deg]
@@ -1342,7 +1342,7 @@ def lagvander3d(x, y, z, deg):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
ideg = [int(d) for d in deg]
@@ -1582,7 +1582,7 @@ def lagcompanion(c):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
# c is a trimmed copy
@@ -1687,7 +1687,7 @@ def laggauss(deg):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
The results have only been tested up to degree 100 higher degrees may
be problematic. The weights are determined by using the fact that
@@ -1747,7 +1747,7 @@ def lagweight(x):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
w = np.exp(-x)
diff --git a/numpy/polynomial/legendre.py b/numpy/polynomial/legendre.py
index be8410b82..1c42f4881 100644
--- a/numpy/polynomial/legendre.py
+++ b/numpy/polynomial/legendre.py
@@ -136,10 +136,10 @@ def poly2leg(pol):
>>> from numpy import polynomial as P
>>> p = P.Polynomial(np.arange(4))
>>> p
- Polynomial([ 0., 1., 2., 3.], [-1., 1.])
- >>> c = P.Legendre(P.poly2leg(p.coef))
+ Polynomial([ 0., 1., 2., 3.], domain=[-1, 1], window=[-1, 1])
+ >>> c = P.Legendre(P.legendre.poly2leg(p.coef))
>>> c
- Legendre([ 1. , 3.25, 1. , 0.75], [-1., 1.])
+ Legendre([ 1. , 3.25, 1. , 0.75], domain=[-1, 1], window=[-1, 1])
"""
[pol] = pu.as_series([pol])
@@ -742,7 +742,7 @@ def legder(c, m=1, scl=1, axis=0):
if cnt == 0:
return c
- c = np.rollaxis(c, iaxis)
+ c = np.moveaxis(c, iaxis, 0)
n = len(c)
if cnt >= n:
c = c[:1]*0
@@ -758,7 +758,7 @@ def legder(c, m=1, scl=1, axis=0):
der[1] = 3*c[2]
der[0] = c[1]
c = der
- c = np.rollaxis(c, 0, iaxis + 1)
+ c = np.moveaxis(c, 0, iaxis)
return c
@@ -822,7 +822,7 @@ def legint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
Note that the result of each integration is *multiplied* by `scl`.
Why is this important to note? Say one is making a linear change of
variable :math:`u = ax + b` in an integral relative to `x`. Then
- .. math::`dx = du/a`, so one will need to set `scl` equal to
+ :math:`dx = du/a`, so one will need to set `scl` equal to
:math:`1/a` - perhaps not what one would have first thought.
Also note that, in general, the result of integrating a C-series needs
@@ -867,7 +867,7 @@ def legint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
if cnt == 0:
return c
- c = np.rollaxis(c, iaxis)
+ c = np.moveaxis(c, iaxis, 0)
k = list(k) + [0]*(cnt - len(k))
for i in range(cnt):
n = len(c)
@@ -886,7 +886,7 @@ def legint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
tmp[j - 1] -= t
tmp[0] += k[i] - legval(lbnd, tmp)
c = tmp
- c = np.rollaxis(c, 0, iaxis + 1)
+ c = np.moveaxis(c, 0, iaxis)
return c
@@ -1021,12 +1021,12 @@ def legval2d(x, y, c):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
try:
x, y = np.array((x, y), copy=0)
- except:
+ except Exception:
raise ValueError('x, y are incompatible')
c = legval(x, c)
@@ -1081,7 +1081,7 @@ def leggrid2d(x, y, c):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
c = legval(x, c)
@@ -1134,12 +1134,12 @@ def legval3d(x, y, z, c):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
try:
x, y, z = np.array((x, y, z), copy=0)
- except:
+ except Exception:
raise ValueError('x, y, z are incompatible')
c = legval(x, c)
@@ -1198,7 +1198,7 @@ def leggrid3d(x, y, z, c):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
c = legval(x, c)
@@ -1259,7 +1259,7 @@ def legvander(x, deg):
v[1] = x
for i in range(2, ideg + 1):
v[i] = (v[i-1]*x*(2*i - 1) - v[i-2]*(i - 1))/i
- return np.rollaxis(v, 0, v.ndim)
+ return np.moveaxis(v, 0, -1)
def legvander2d(x, y, deg):
@@ -1309,7 +1309,7 @@ def legvander2d(x, y, deg):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
ideg = [int(d) for d in deg]
@@ -1373,7 +1373,7 @@ def legvander3d(x, y, z, deg):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
ideg = [int(d) for d in deg]
@@ -1611,7 +1611,7 @@ def legcompanion(c):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
# c is a trimmed copy
@@ -1712,7 +1712,7 @@ def leggauss(deg):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
The results have only been tested up to degree 100, higher degrees may
be problematic. The weights are determined by using the fact that
@@ -1777,7 +1777,7 @@ def legweight(x):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
w = x*0.0 + 1.0
diff --git a/numpy/polynomial/polynomial.py b/numpy/polynomial/polynomial.py
index c357b48c9..1be775f6a 100644
--- a/numpy/polynomial/polynomial.py
+++ b/numpy/polynomial/polynomial.py
@@ -546,7 +546,7 @@ def polyder(c, m=1, scl=1, axis=0):
if cnt == 0:
return c
- c = np.rollaxis(c, iaxis)
+ c = np.moveaxis(c, iaxis, 0)
n = len(c)
if cnt >= n:
c = c[:1]*0
@@ -558,7 +558,7 @@ def polyder(c, m=1, scl=1, axis=0):
for j in range(n, 0, -1):
der[j - 1] = j*c[j]
c = der
- c = np.rollaxis(c, 0, iaxis + 1)
+ c = np.moveaxis(c, 0, iaxis)
return c
@@ -619,7 +619,7 @@ def polyint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
Note that the result of each integration is *multiplied* by `scl`. Why
is this important to note? Say one is making a linear change of
variable :math:`u = ax + b` in an integral relative to `x`. Then
- .. math::`dx = du/a`, so one will need to set `scl` equal to
+ :math:`dx = du/a`, so one will need to set `scl` equal to
:math:`1/a` - perhaps not what one would have first thought.
Examples
@@ -662,7 +662,7 @@ def polyint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
return c
k = list(k) + [0]*(cnt - len(k))
- c = np.rollaxis(c, iaxis)
+ c = np.moveaxis(c, iaxis, 0)
for i in range(cnt):
n = len(c)
c *= scl
@@ -676,7 +676,7 @@ def polyint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
tmp[j + 1] = c[j]/(j + 1)
tmp[0] += k[i] - polyval(lbnd, tmp)
c = tmp
- c = np.rollaxis(c, 0, iaxis + 1)
+ c = np.moveaxis(c, 0, iaxis)
return c
@@ -913,7 +913,7 @@ def polyval2d(x, y, c):
"""
try:
x, y = np.array((x, y), copy=0)
- except:
+ except Exception:
raise ValueError('x, y are incompatible')
c = polyval(x, c)
@@ -1026,7 +1026,7 @@ def polyval3d(x, y, z, c):
"""
try:
x, y, z = np.array((x, y, z), copy=0)
- except:
+ except Exception:
raise ValueError('x, y, z are incompatible')
c = polyval(x, c)
@@ -1147,7 +1147,7 @@ def polyvander(x, deg):
v[1] = x
for i in range(2, ideg + 1):
v[i] = v[i-1]*x
- return np.rollaxis(v, 0, v.ndim)
+ return np.moveaxis(v, 0, -1)
def polyvander2d(x, y, deg):
diff --git a/numpy/polynomial/polyutils.py b/numpy/polynomial/polyutils.py
index 5b6663bfd..e2dba1a55 100644
--- a/numpy/polynomial/polyutils.py
+++ b/numpy/polynomial/polyutils.py
@@ -182,7 +182,7 @@ def as_series(alist, trim=True):
else:
try:
dtype = np.common_type(*arrays)
- except:
+ except Exception:
raise ValueError("Coefficient arrays have no common type")
ret = [np.array(a, copy=1, dtype=dtype) for a in arrays]
return ret
@@ -236,7 +236,7 @@ def trimcoef(c, tol=0):
raise ValueError("tol must be non-negative")
[c] = as_series([c])
- [ind] = np.where(np.abs(c) > tol)
+ [ind] = np.nonzero(np.abs(c) > tol)
if len(ind) == 0:
return c[:1]*0
else:
diff --git a/numpy/polynomial/tests/__init__.py b/numpy/polynomial/tests/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/numpy/polynomial/tests/__init__.py
diff --git a/numpy/polynomial/tests/test_chebyshev.py b/numpy/polynomial/tests/test_chebyshev.py
index dc0cd14b3..1a34f42b0 100644
--- a/numpy/polynomial/tests/test_chebyshev.py
+++ b/numpy/polynomial/tests/test_chebyshev.py
@@ -7,8 +7,9 @@ import numpy as np
import numpy.polynomial.chebyshev as cheb
from numpy.polynomial.polynomial import polyval
from numpy.testing import (
- TestCase, assert_almost_equal, assert_raises,
- assert_equal, assert_, run_module_suite)
+ assert_almost_equal, assert_raises, assert_equal, assert_,
+ run_module_suite
+ )
def trim(x):
@@ -28,7 +29,7 @@ T9 = [0, 9, 0, -120, 0, 432, 0, -576, 0, 256]
Tlist = [T0, T1, T2, T3, T4, T5, T6, T7, T8, T9]
-class TestPrivate(TestCase):
+class TestPrivate(object):
def test__cseries_to_zseries(self):
for i in range(5):
@@ -45,7 +46,7 @@ class TestPrivate(TestCase):
assert_equal(res, tgt)
-class TestConstants(TestCase):
+class TestConstants(object):
def test_chebdomain(self):
assert_equal(cheb.chebdomain, [-1, 1])
@@ -60,7 +61,7 @@ class TestConstants(TestCase):
assert_equal(cheb.chebx, [0, 1])
-class TestArithmetic(TestCase):
+class TestArithmetic(object):
def test_chebadd(self):
for i in range(5):
@@ -112,7 +113,7 @@ class TestArithmetic(TestCase):
assert_equal(trim(res), trim(tgt), err_msg=msg)
-class TestEvaluation(TestCase):
+class TestEvaluation(object):
# coefficients of 1 + 2*x + 3*x**2
c1d = np.array([2.5, 2., 1.5])
c2d = np.einsum('i,j->ij', c1d, c1d)
@@ -206,7 +207,7 @@ class TestEvaluation(TestCase):
assert_(res.shape == (2, 3)*3)
-class TestIntegral(TestCase):
+class TestIntegral(object):
def test_chebint(self):
# check exceptions
@@ -305,7 +306,7 @@ class TestIntegral(TestCase):
assert_almost_equal(res, tgt)
-class TestDerivative(TestCase):
+class TestDerivative(object):
def test_chebder(self):
# check exceptions
@@ -345,7 +346,7 @@ class TestDerivative(TestCase):
assert_almost_equal(res, tgt)
-class TestVander(TestCase):
+class TestVander(object):
# some random values in [-1, 1)
x = np.random.random((3, 5))*2 - 1
@@ -393,7 +394,7 @@ class TestVander(TestCase):
assert_(van.shape == (1, 5, 24))
-class TestFitting(TestCase):
+class TestFitting(object):
def test_chebfit(self):
def f(x):
@@ -470,7 +471,32 @@ class TestFitting(TestCase):
assert_almost_equal(coef1, coef2)
-class TestCompanion(TestCase):
+class TestInterpolate(object):
+
+ def f(self, x):
+ return x * (x - 1) * (x - 2)
+
+ def test_raises(self):
+ assert_raises(ValueError, cheb.chebinterpolate, self.f, -1)
+ assert_raises(TypeError, cheb.chebinterpolate, self.f, 10.)
+
+ def test_dimensions(self):
+ for deg in range(1, 5):
+ assert_(cheb.chebinterpolate(self.f, deg).shape == (deg + 1,))
+
+ def test_approximation(self):
+
+ def powx(x, p):
+ return x**p
+
+ x = np.linspace(-1, 1, 10)
+ for deg in range(0, 10):
+ for p in range(0, deg + 1):
+ c = cheb.chebinterpolate(powx, deg, (p,))
+ assert_almost_equal(cheb.chebval(x, c), powx(x, p), decimal=12)
+
+
+class TestCompanion(object):
def test_raises(self):
assert_raises(ValueError, cheb.chebcompanion, [])
@@ -485,7 +511,7 @@ class TestCompanion(TestCase):
assert_(cheb.chebcompanion([1, 2])[0, 0] == -.5)
-class TestGauss(TestCase):
+class TestGauss(object):
def test_100(self):
x, w = cheb.chebgauss(100)
@@ -504,7 +530,7 @@ class TestGauss(TestCase):
assert_almost_equal(w.sum(), tgt)
-class TestMisc(TestCase):
+class TestMisc(object):
def test_chebfromroots(self):
res = cheb.chebfromroots([])
diff --git a/numpy/polynomial/tests/test_classes.py b/numpy/polynomial/tests/test_classes.py
index 46d721df4..2ec8277ff 100644
--- a/numpy/polynomial/tests/test_classes.py
+++ b/numpy/polynomial/tests/test_classes.py
@@ -583,5 +583,30 @@ def check_ufunc_override(Poly):
assert_raises(TypeError, np.add, x, p)
+class TestInterpolate(object):
+
+ def f(self, x):
+ return x * (x - 1) * (x - 2)
+
+ def test_raises(self):
+ assert_raises(ValueError, Chebyshev.interpolate, self.f, -1)
+ assert_raises(TypeError, Chebyshev.interpolate, self.f, 10.)
+
+ def test_dimensions(self):
+ for deg in range(1, 5):
+ assert_(Chebyshev.interpolate(self.f, deg).degree() == deg)
+
+ def test_approximation(self):
+
+ def powx(x, p):
+ return x**p
+
+ x = np.linspace(0, 2, 10)
+ for deg in range(0, 10):
+ for t in range(0, deg + 1):
+ p = Chebyshev.interpolate(powx, deg, domain=[0, 2], args=(t,))
+ assert_almost_equal(p(x), powx(x, t), decimal=12)
+
+
if __name__ == "__main__":
run_module_suite()
diff --git a/numpy/polynomial/tests/test_hermite.py b/numpy/polynomial/tests/test_hermite.py
index 06ce46ae4..2e39d854d 100644
--- a/numpy/polynomial/tests/test_hermite.py
+++ b/numpy/polynomial/tests/test_hermite.py
@@ -7,8 +7,9 @@ import numpy as np
import numpy.polynomial.hermite as herm
from numpy.polynomial.polynomial import polyval
from numpy.testing import (
- TestCase, assert_almost_equal, assert_raises,
- assert_equal, assert_, run_module_suite)
+ assert_almost_equal, assert_raises, assert_equal, assert_,
+ run_module_suite
+ )
H0 = np.array([1])
H1 = np.array([0, 2])
@@ -28,7 +29,7 @@ def trim(x):
return herm.hermtrim(x, tol=1e-6)
-class TestConstants(TestCase):
+class TestConstants(object):
def test_hermdomain(self):
assert_equal(herm.hermdomain, [-1, 1])
@@ -43,7 +44,7 @@ class TestConstants(TestCase):
assert_equal(herm.hermx, [0, .5])
-class TestArithmetic(TestCase):
+class TestArithmetic(object):
x = np.linspace(-3, 3, 100)
def test_hermadd(self):
@@ -100,7 +101,7 @@ class TestArithmetic(TestCase):
assert_equal(trim(res), trim(tgt), err_msg=msg)
-class TestEvaluation(TestCase):
+class TestEvaluation(object):
# coefficients of 1 + 2*x + 3*x**2
c1d = np.array([2.5, 1., .75])
c2d = np.einsum('i,j->ij', c1d, c1d)
@@ -194,7 +195,7 @@ class TestEvaluation(TestCase):
assert_(res.shape == (2, 3)*3)
-class TestIntegral(TestCase):
+class TestIntegral(object):
def test_hermint(self):
# check exceptions
@@ -293,7 +294,7 @@ class TestIntegral(TestCase):
assert_almost_equal(res, tgt)
-class TestDerivative(TestCase):
+class TestDerivative(object):
def test_hermder(self):
# check exceptions
@@ -333,7 +334,7 @@ class TestDerivative(TestCase):
assert_almost_equal(res, tgt)
-class TestVander(TestCase):
+class TestVander(object):
# some random values in [-1, 1)
x = np.random.random((3, 5))*2 - 1
@@ -381,7 +382,7 @@ class TestVander(TestCase):
assert_(van.shape == (1, 5, 24))
-class TestFitting(TestCase):
+class TestFitting(object):
def test_hermfit(self):
def f(x):
@@ -458,7 +459,7 @@ class TestFitting(TestCase):
assert_almost_equal(coef1, coef2)
-class TestCompanion(TestCase):
+class TestCompanion(object):
def test_raises(self):
assert_raises(ValueError, herm.hermcompanion, [])
@@ -473,7 +474,7 @@ class TestCompanion(TestCase):
assert_(herm.hermcompanion([1, 2])[0, 0] == -.25)
-class TestGauss(TestCase):
+class TestGauss(object):
def test_100(self):
x, w = herm.hermgauss(100)
@@ -492,7 +493,7 @@ class TestGauss(TestCase):
assert_almost_equal(w.sum(), tgt)
-class TestMisc(TestCase):
+class TestMisc(object):
def test_hermfromroots(self):
res = herm.hermfromroots([])
diff --git a/numpy/polynomial/tests/test_hermite_e.py b/numpy/polynomial/tests/test_hermite_e.py
index 38da325f6..a81910787 100644
--- a/numpy/polynomial/tests/test_hermite_e.py
+++ b/numpy/polynomial/tests/test_hermite_e.py
@@ -7,8 +7,9 @@ import numpy as np
import numpy.polynomial.hermite_e as herme
from numpy.polynomial.polynomial import polyval
from numpy.testing import (
- TestCase, assert_almost_equal, assert_raises,
- assert_equal, assert_, run_module_suite)
+ assert_almost_equal, assert_raises, assert_equal, assert_,
+ run_module_suite
+ )
He0 = np.array([1])
He1 = np.array([0, 1])
@@ -28,7 +29,7 @@ def trim(x):
return herme.hermetrim(x, tol=1e-6)
-class TestConstants(TestCase):
+class TestConstants(object):
def test_hermedomain(self):
assert_equal(herme.hermedomain, [-1, 1])
@@ -43,7 +44,7 @@ class TestConstants(TestCase):
assert_equal(herme.hermex, [0, 1])
-class TestArithmetic(TestCase):
+class TestArithmetic(object):
x = np.linspace(-3, 3, 100)
def test_hermeadd(self):
@@ -100,7 +101,7 @@ class TestArithmetic(TestCase):
assert_equal(trim(res), trim(tgt), err_msg=msg)
-class TestEvaluation(TestCase):
+class TestEvaluation(object):
# coefficients of 1 + 2*x + 3*x**2
c1d = np.array([4., 2., 3.])
c2d = np.einsum('i,j->ij', c1d, c1d)
@@ -194,7 +195,7 @@ class TestEvaluation(TestCase):
assert_(res.shape == (2, 3)*3)
-class TestIntegral(TestCase):
+class TestIntegral(object):
def test_hermeint(self):
# check exceptions
@@ -293,7 +294,7 @@ class TestIntegral(TestCase):
assert_almost_equal(res, tgt)
-class TestDerivative(TestCase):
+class TestDerivative(object):
def test_hermeder(self):
# check exceptions
@@ -334,7 +335,7 @@ class TestDerivative(TestCase):
assert_almost_equal(res, tgt)
-class TestVander(TestCase):
+class TestVander(object):
# some random values in [-1, 1)
x = np.random.random((3, 5))*2 - 1
@@ -382,7 +383,7 @@ class TestVander(TestCase):
assert_(van.shape == (1, 5, 24))
-class TestFitting(TestCase):
+class TestFitting(object):
def test_hermefit(self):
def f(x):
@@ -459,7 +460,7 @@ class TestFitting(TestCase):
assert_almost_equal(coef1, coef2)
-class TestCompanion(TestCase):
+class TestCompanion(object):
def test_raises(self):
assert_raises(ValueError, herme.hermecompanion, [])
@@ -474,7 +475,7 @@ class TestCompanion(TestCase):
assert_(herme.hermecompanion([1, 2])[0, 0] == -.5)
-class TestGauss(TestCase):
+class TestGauss(object):
def test_100(self):
x, w = herme.hermegauss(100)
@@ -493,7 +494,7 @@ class TestGauss(TestCase):
assert_almost_equal(w.sum(), tgt)
-class TestMisc(TestCase):
+class TestMisc(object):
def test_hermefromroots(self):
res = herme.hermefromroots([])
diff --git a/numpy/polynomial/tests/test_laguerre.py b/numpy/polynomial/tests/test_laguerre.py
index 0fa76b48a..17a3f7558 100644
--- a/numpy/polynomial/tests/test_laguerre.py
+++ b/numpy/polynomial/tests/test_laguerre.py
@@ -7,8 +7,9 @@ import numpy as np
import numpy.polynomial.laguerre as lag
from numpy.polynomial.polynomial import polyval
from numpy.testing import (
- TestCase, assert_almost_equal, assert_raises,
- assert_equal, assert_, run_module_suite)
+ assert_almost_equal, assert_raises, assert_equal, assert_,
+ run_module_suite
+ )
L0 = np.array([1])/1
L1 = np.array([1, -1])/1
@@ -25,7 +26,7 @@ def trim(x):
return lag.lagtrim(x, tol=1e-6)
-class TestConstants(TestCase):
+class TestConstants(object):
def test_lagdomain(self):
assert_equal(lag.lagdomain, [0, 1])
@@ -40,7 +41,7 @@ class TestConstants(TestCase):
assert_equal(lag.lagx, [1, -1])
-class TestArithmetic(TestCase):
+class TestArithmetic(object):
x = np.linspace(-3, 3, 100)
def test_lagadd(self):
@@ -97,7 +98,7 @@ class TestArithmetic(TestCase):
assert_almost_equal(trim(res), trim(tgt), err_msg=msg)
-class TestEvaluation(TestCase):
+class TestEvaluation(object):
# coefficients of 1 + 2*x + 3*x**2
c1d = np.array([9., -14., 6.])
c2d = np.einsum('i,j->ij', c1d, c1d)
@@ -191,7 +192,7 @@ class TestEvaluation(TestCase):
assert_(res.shape == (2, 3)*3)
-class TestIntegral(TestCase):
+class TestIntegral(object):
def test_lagint(self):
# check exceptions
@@ -290,7 +291,7 @@ class TestIntegral(TestCase):
assert_almost_equal(res, tgt)
-class TestDerivative(TestCase):
+class TestDerivative(object):
def test_lagder(self):
# check exceptions
@@ -330,7 +331,7 @@ class TestDerivative(TestCase):
assert_almost_equal(res, tgt)
-class TestVander(TestCase):
+class TestVander(object):
# some random values in [-1, 1)
x = np.random.random((3, 5))*2 - 1
@@ -378,7 +379,7 @@ class TestVander(TestCase):
assert_(van.shape == (1, 5, 24))
-class TestFitting(TestCase):
+class TestFitting(object):
def test_lagfit(self):
def f(x):
@@ -440,7 +441,7 @@ class TestFitting(TestCase):
assert_almost_equal(lag.lagfit(x, x, [0, 1]), [1, -1])
-class TestCompanion(TestCase):
+class TestCompanion(object):
def test_raises(self):
assert_raises(ValueError, lag.lagcompanion, [])
@@ -455,7 +456,7 @@ class TestCompanion(TestCase):
assert_(lag.lagcompanion([1, 2])[0, 0] == 1.5)
-class TestGauss(TestCase):
+class TestGauss(object):
def test_100(self):
x, w = lag.laggauss(100)
@@ -474,7 +475,7 @@ class TestGauss(TestCase):
assert_almost_equal(w.sum(), tgt)
-class TestMisc(TestCase):
+class TestMisc(object):
def test_lagfromroots(self):
res = lag.lagfromroots([])
diff --git a/numpy/polynomial/tests/test_legendre.py b/numpy/polynomial/tests/test_legendre.py
index 485bc9688..375f41d49 100644
--- a/numpy/polynomial/tests/test_legendre.py
+++ b/numpy/polynomial/tests/test_legendre.py
@@ -7,8 +7,9 @@ import numpy as np
import numpy.polynomial.legendre as leg
from numpy.polynomial.polynomial import polyval
from numpy.testing import (
- TestCase, assert_almost_equal, assert_raises,
- assert_equal, assert_, run_module_suite)
+ assert_almost_equal, assert_raises, assert_equal, assert_,
+ run_module_suite
+ )
L0 = np.array([1])
L1 = np.array([0, 1])
@@ -28,7 +29,7 @@ def trim(x):
return leg.legtrim(x, tol=1e-6)
-class TestConstants(TestCase):
+class TestConstants(object):
def test_legdomain(self):
assert_equal(leg.legdomain, [-1, 1])
@@ -43,7 +44,7 @@ class TestConstants(TestCase):
assert_equal(leg.legx, [0, 1])
-class TestArithmetic(TestCase):
+class TestArithmetic(object):
x = np.linspace(-1, 1, 100)
def test_legadd(self):
@@ -101,7 +102,7 @@ class TestArithmetic(TestCase):
assert_equal(trim(res), trim(tgt), err_msg=msg)
-class TestEvaluation(TestCase):
+class TestEvaluation(object):
# coefficients of 1 + 2*x + 3*x**2
c1d = np.array([2., 2., 2.])
c2d = np.einsum('i,j->ij', c1d, c1d)
@@ -195,7 +196,7 @@ class TestEvaluation(TestCase):
assert_(res.shape == (2, 3)*3)
-class TestIntegral(TestCase):
+class TestIntegral(object):
def test_legint(self):
# check exceptions
@@ -294,7 +295,7 @@ class TestIntegral(TestCase):
assert_almost_equal(res, tgt)
-class TestDerivative(TestCase):
+class TestDerivative(object):
def test_legder(self):
# check exceptions
@@ -334,7 +335,7 @@ class TestDerivative(TestCase):
assert_almost_equal(res, tgt)
-class TestVander(TestCase):
+class TestVander(object):
# some random values in [-1, 1)
x = np.random.random((3, 5))*2 - 1
@@ -382,7 +383,7 @@ class TestVander(TestCase):
assert_(van.shape == (1, 5, 24))
-class TestFitting(TestCase):
+class TestFitting(object):
def test_legfit(self):
def f(x):
@@ -459,7 +460,7 @@ class TestFitting(TestCase):
assert_almost_equal(coef1, coef2)
-class TestCompanion(TestCase):
+class TestCompanion(object):
def test_raises(self):
assert_raises(ValueError, leg.legcompanion, [])
@@ -474,7 +475,7 @@ class TestCompanion(TestCase):
assert_(leg.legcompanion([1, 2])[0, 0] == -.5)
-class TestGauss(TestCase):
+class TestGauss(object):
def test_100(self):
x, w = leg.leggauss(100)
@@ -493,7 +494,7 @@ class TestGauss(TestCase):
assert_almost_equal(w.sum(), tgt)
-class TestMisc(TestCase):
+class TestMisc(object):
def test_legfromroots(self):
res = leg.legfromroots([])
diff --git a/numpy/polynomial/tests/test_polynomial.py b/numpy/polynomial/tests/test_polynomial.py
index 037be5927..bf6c5e814 100644
--- a/numpy/polynomial/tests/test_polynomial.py
+++ b/numpy/polynomial/tests/test_polynomial.py
@@ -6,8 +6,9 @@ from __future__ import division, absolute_import, print_function
import numpy as np
import numpy.polynomial.polynomial as poly
from numpy.testing import (
- TestCase, assert_almost_equal, assert_raises,
- assert_equal, assert_, run_module_suite)
+ assert_almost_equal, assert_raises, assert_equal, assert_,
+ run_module_suite
+ )
def trim(x):
@@ -27,7 +28,7 @@ T9 = [0, 9, 0, -120, 0, 432, 0, -576, 0, 256]
Tlist = [T0, T1, T2, T3, T4, T5, T6, T7, T8, T9]
-class TestConstants(TestCase):
+class TestConstants(object):
def test_polydomain(self):
assert_equal(poly.polydomain, [-1, 1])
@@ -42,7 +43,7 @@ class TestConstants(TestCase):
assert_equal(poly.polyx, [0, 1])
-class TestArithmetic(TestCase):
+class TestArithmetic(object):
def test_polyadd(self):
for i in range(5):
@@ -103,7 +104,7 @@ class TestArithmetic(TestCase):
assert_equal(res, tgt, err_msg=msg)
-class TestEvaluation(TestCase):
+class TestEvaluation(object):
# coefficients of 1 + 2*x + 3*x**2
c1d = np.array([1., 2., 3.])
c2d = np.einsum('i,j->ij', c1d, c1d)
@@ -263,7 +264,7 @@ class TestEvaluation(TestCase):
assert_(res.shape == (2, 3)*3)
-class TestIntegral(TestCase):
+class TestIntegral(object):
def test_polyint(self):
# check exceptions
@@ -357,7 +358,7 @@ class TestIntegral(TestCase):
assert_almost_equal(res, tgt)
-class TestDerivative(TestCase):
+class TestDerivative(object):
def test_polyder(self):
# check exceptions
@@ -397,7 +398,7 @@ class TestDerivative(TestCase):
assert_almost_equal(res, tgt)
-class TestVander(TestCase):
+class TestVander(object):
# some random values in [-1, 1)
x = np.random.random((3, 5))*2 - 1
@@ -445,7 +446,7 @@ class TestVander(TestCase):
assert_(van.shape == (1, 5, 24))
-class TestCompanion(TestCase):
+class TestCompanion(object):
def test_raises(self):
assert_raises(ValueError, poly.polycompanion, [])
@@ -460,7 +461,7 @@ class TestCompanion(TestCase):
assert_(poly.polycompanion([1, 2])[0, 0] == -.5)
-class TestMisc(TestCase):
+class TestMisc(object):
def test_polyfromroots(self):
res = poly.polyfromroots([])
diff --git a/numpy/polynomial/tests/test_polyutils.py b/numpy/polynomial/tests/test_polyutils.py
index 974e2e09a..bd1cb2008 100644
--- a/numpy/polynomial/tests/test_polyutils.py
+++ b/numpy/polynomial/tests/test_polyutils.py
@@ -6,11 +6,12 @@ from __future__ import division, absolute_import, print_function
import numpy as np
import numpy.polynomial.polyutils as pu
from numpy.testing import (
- TestCase, assert_almost_equal, assert_raises,
- assert_equal, assert_, run_module_suite)
+ assert_almost_equal, assert_raises, assert_equal, assert_,
+ run_module_suite
+ )
-class TestMisc(TestCase):
+class TestMisc(object):
def test_trimseq(self):
for i in range(5):
@@ -43,7 +44,7 @@ class TestMisc(TestCase):
assert_equal(pu.trimcoef(coef, 2), [0])
-class TestDomain(TestCase):
+class TestDomain(object):
def test_getdomain(self):
# test for real values
diff --git a/numpy/polynomial/tests/test_printing.py b/numpy/polynomial/tests/test_printing.py
index 86cd25732..f403812c9 100644
--- a/numpy/polynomial/tests/test_printing.py
+++ b/numpy/polynomial/tests/test_printing.py
@@ -1,71 +1,71 @@
from __future__ import division, absolute_import, print_function
import numpy.polynomial as poly
-from numpy.testing import TestCase, run_module_suite, assert_
+from numpy.testing import run_module_suite, assert_equal
-class test_str(TestCase):
+class TestStr(object):
def test_polynomial_str(self):
res = str(poly.Polynomial([0, 1]))
- tgt = 'poly([0., 1.])'
- assert_(res, tgt)
+ tgt = 'poly([ 0. 1.])'
+ assert_equal(res, tgt)
def test_chebyshev_str(self):
res = str(poly.Chebyshev([0, 1]))
- tgt = 'leg([0., 1.])'
- assert_(res, tgt)
+ tgt = 'cheb([ 0. 1.])'
+ assert_equal(res, tgt)
def test_legendre_str(self):
res = str(poly.Legendre([0, 1]))
- tgt = 'leg([0., 1.])'
- assert_(res, tgt)
+ tgt = 'leg([ 0. 1.])'
+ assert_equal(res, tgt)
def test_hermite_str(self):
res = str(poly.Hermite([0, 1]))
- tgt = 'herm([0., 1.])'
- assert_(res, tgt)
+ tgt = 'herm([ 0. 1.])'
+ assert_equal(res, tgt)
def test_hermiteE_str(self):
res = str(poly.HermiteE([0, 1]))
- tgt = 'herme([0., 1.])'
- assert_(res, tgt)
+ tgt = 'herme([ 0. 1.])'
+ assert_equal(res, tgt)
def test_laguerre_str(self):
res = str(poly.Laguerre([0, 1]))
- tgt = 'lag([0., 1.])'
- assert_(res, tgt)
+ tgt = 'lag([ 0. 1.])'
+ assert_equal(res, tgt)
-class test_repr(TestCase):
+class TestRepr(object):
def test_polynomial_str(self):
res = repr(poly.Polynomial([0, 1]))
- tgt = 'Polynomial([0., 1.])'
- assert_(res, tgt)
+ tgt = 'Polynomial([ 0., 1.], domain=[-1, 1], window=[-1, 1])'
+ assert_equal(res, tgt)
def test_chebyshev_str(self):
res = repr(poly.Chebyshev([0, 1]))
- tgt = 'Chebyshev([0., 1.], [-1., 1.], [-1., 1.])'
- assert_(res, tgt)
+ tgt = 'Chebyshev([ 0., 1.], domain=[-1, 1], window=[-1, 1])'
+ assert_equal(res, tgt)
def test_legendre_repr(self):
res = repr(poly.Legendre([0, 1]))
- tgt = 'Legendre([0., 1.], [-1., 1.], [-1., 1.])'
- assert_(res, tgt)
+ tgt = 'Legendre([ 0., 1.], domain=[-1, 1], window=[-1, 1])'
+ assert_equal(res, tgt)
def test_hermite_repr(self):
res = repr(poly.Hermite([0, 1]))
- tgt = 'Hermite([0., 1.], [-1., 1.], [-1., 1.])'
- assert_(res, tgt)
+ tgt = 'Hermite([ 0., 1.], domain=[-1, 1], window=[-1, 1])'
+ assert_equal(res, tgt)
def test_hermiteE_repr(self):
res = repr(poly.HermiteE([0, 1]))
- tgt = 'HermiteE([0., 1.], [-1., 1.], [-1., 1.])'
- assert_(res, tgt)
+ tgt = 'HermiteE([ 0., 1.], domain=[-1, 1], window=[-1, 1])'
+ assert_equal(res, tgt)
def test_laguerre_repr(self):
res = repr(poly.Laguerre([0, 1]))
- tgt = 'Laguerre([0., 1.], [0., 1.], [0., 1.])'
- assert_(res, tgt)
+ tgt = 'Laguerre([ 0., 1.], domain=[0, 1], window=[0, 1])'
+ assert_equal(res, tgt)
#
diff --git a/numpy/random/__init__.py b/numpy/random/__init__.py
index 6c7d3140f..869818a22 100644
--- a/numpy/random/__init__.py
+++ b/numpy/random/__init__.py
@@ -117,6 +117,6 @@ def __RandomState_ctor():
"""
return RandomState(seed=0)
-from numpy.testing.nosetester import _numpy_tester
+from numpy.testing import _numpy_tester
test = _numpy_tester().test
bench = _numpy_tester().bench
diff --git a/numpy/random/mtrand/distributions.c b/numpy/random/mtrand/distributions.c
index e195700d4..7673f92b4 100644
--- a/numpy/random/mtrand/distributions.c
+++ b/numpy/random/mtrand/distributions.c
@@ -41,10 +41,10 @@
* SOFTWARE OR ITS DOCUMENTATION.
*/
-#include <math.h>
-#include <stdlib.h>
#include "distributions.h"
#include <stdio.h>
+#include <math.h>
+#include <stdlib.h>
#ifndef min
#define min(x,y) ((x<y)?x:y)
diff --git a/numpy/random/mtrand/mtrand.pyx b/numpy/random/mtrand/mtrand.pyx
index c0082a782..9e8a79804 100644
--- a/numpy/random/mtrand/mtrand.pyx
+++ b/numpy/random/mtrand/mtrand.pyx
@@ -211,7 +211,7 @@ cdef object cont1_array(rk_state *state, rk_cont1 func, object size,
itera = <flatiter>PyArray_IterNew(<object>oa)
with lock, nogil:
for i from 0 <= i < length:
- array_data[i] = func(state, (<double *>(itera.dataptr))[0])
+ array_data[i] = func(state, (<double *>PyArray_ITER_DATA(itera))[0])
PyArray_ITER_NEXT(itera)
else:
array = <ndarray>np.empty(size, np.float64)
@@ -536,7 +536,7 @@ cdef object discd_array(rk_state *state, rk_discd func, object size, ndarray oa,
itera = <flatiter>PyArray_IterNew(<object>oa)
with lock, nogil:
for i from 0 <= i < length:
- array_data[i] = func(state, (<double *>(itera.dataptr))[0])
+ array_data[i] = func(state, (<double *>PyArray_ITER_DATA(itera))[0])
PyArray_ITER_NEXT(itera)
else:
array = <ndarray>np.empty(size, int)
@@ -1469,7 +1469,7 @@ cdef class RandomState:
4
>>> type(np.random.random_integers(5))
<type 'int'>
- >>> np.random.random_integers(5, size=(3.,2.))
+ >>> np.random.random_integers(5, size=(3,2))
array([[5, 4],
[3, 3],
[4, 5]])
@@ -1951,7 +1951,7 @@ cdef class RandomState:
--------
Draw samples from the distribution:
- >>> shape, scale = 2., 2. # mean=4, std=2*sqrt(2)
+ >>> shape, scale = 2., 2. # mean=4, std=2*sqrt(2)
>>> s = np.random.gamma(shape, scale, 1000)
Display the histogram of the samples, along with
@@ -2007,10 +2007,10 @@ cdef class RandomState:
Parameters
----------
- dfnum : int or array_like of ints
- Degrees of freedom in numerator. Should be greater than zero.
- dfden : int or array_like of ints
- Degrees of freedom in denominator. Should be greater than zero.
+ dfnum : float or array_like of floats
+ Degrees of freedom in numerator, should be > 0.
+ dfden : float or array_like of float
+ Degrees of freedom in denominator, should be > 0.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. If size is ``None`` (default),
@@ -2109,12 +2109,16 @@ cdef class RandomState:
Parameters
----------
- dfnum : int or array_like of ints
- Parameter, should be > 1.
- dfden : int or array_like of ints
- Parameter, should be > 1.
+ dfnum : float or array_like of floats
+ Numerator degrees of freedom, should be > 0.
+
+ .. versionchanged:: 1.14.0
+ Earlier NumPy versions required dfnum > 1.
+ dfden : float or array_like of floats
+ Denominator degrees of freedom, should be > 0.
nonc : float or array_like of floats
- Parameter, should be >= 0.
+ Non-centrality parameter, the sum of the squares of the numerator
+ means, should be >= 0.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. If size is ``None`` (default),
@@ -2175,8 +2179,8 @@ cdef class RandomState:
fdfden = PyFloat_AsDouble(dfden)
fnonc = PyFloat_AsDouble(nonc)
- if fdfnum <= 1:
- raise ValueError("dfnum <= 1")
+ if fdfnum <= 0:
+ raise ValueError("dfnum <= 0")
if fdfden <= 0:
raise ValueError("dfden <= 0")
if fnonc < 0:
@@ -2184,8 +2188,8 @@ cdef class RandomState:
return cont3_array_sc(self.internal_state, rk_noncentral_f, size,
fdfnum, fdfden, fnonc, self.lock)
- if np.any(np.less_equal(odfnum, 1.0)):
- raise ValueError("dfnum <= 1")
+ if np.any(np.less_equal(odfnum, 0.0)):
+ raise ValueError("dfnum <= 0")
if np.any(np.less_equal(odfden, 0.0)):
raise ValueError("dfden <= 0")
if np.any(np.less(ononc, 0.0)):
@@ -2206,8 +2210,8 @@ cdef class RandomState:
Parameters
----------
- df : int or array_like of ints
- Number of degrees of freedom.
+ df : float or array_like of floats
+ Number of degrees of freedom, should be > 0.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. If size is ``None`` (default),
@@ -2285,9 +2289,11 @@ cdef class RandomState:
Parameters
----------
- df : int or array_like of ints
- Degrees of freedom, should be > 0 as of NumPy 1.10.0,
- should be > 1 for earlier versions.
+ df : float or array_like of floats
+ Degrees of freedom, should be > 0.
+
+ .. versionchanged:: 1.10.0
+ Earlier NumPy versions required dfnum > 1.
nonc : float or array_like of floats
Non-centrality, should be non-negative.
size : int or tuple of ints, optional
@@ -2455,7 +2461,7 @@ cdef class RandomState:
Parameters
----------
- df : int or array_like of ints
+ df : float or array_like of floats
Degrees of freedom, should be > 0.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
@@ -4666,6 +4672,11 @@ cdef class RandomState:
samples : ndarray,
The drawn samples, of shape (size, alpha.ndim).
+ Raises
+ -------
+ ValueError
+ If any value in alpha is less than or equal to zero
+
Notes
-----
.. math:: X \\approx \\prod_{i=1}^{k}{x^{\\alpha_i-1}_i}
@@ -4731,6 +4742,8 @@ cdef class RandomState:
k = len(alpha)
alpha_arr = <ndarray>PyArray_ContiguousFromObject(alpha, NPY_DOUBLE, 1, 1)
+ if np.any(np.less_equal(alpha_arr, 0)):
+ raise ValueError('alpha <= 0')
alpha_data = <double*>PyArray_DATA(alpha_arr)
shape = _shape_from_size(size, k)
diff --git a/numpy/random/mtrand/numpy.pxd b/numpy/random/mtrand/numpy.pxd
index d5b0d74ca..32b19c1ab 100644
--- a/numpy/random/mtrand/numpy.pxd
+++ b/numpy/random/mtrand/numpy.pxd
@@ -130,6 +130,7 @@ cdef extern from "numpy/arrayobject.h":
object PyArray_IterNew(object arr)
void PyArray_ITER_NEXT(flatiter it) nogil
+ void* PyArray_ITER_DATA(flatiter it) nogil
dtype PyArray_DescrFromType(int)
diff --git a/numpy/random/mtrand/randomkit.c b/numpy/random/mtrand/randomkit.c
index 3a95efeeb..380917180 100644
--- a/numpy/random/mtrand/randomkit.c
+++ b/numpy/random/mtrand/randomkit.c
@@ -64,13 +64,6 @@
/* static char const rcsid[] =
"@(#) $Jeannot: randomkit.c,v 1.28 2005/07/21 22:14:09 js Exp $"; */
-#include <stddef.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <errno.h>
-#include <limits.h>
-#include <math.h>
-#include <assert.h>
#ifdef _WIN32
/*
@@ -109,18 +102,27 @@
#include <wincrypt.h>
#endif
+/*
+ * Do not move this include. randomkit.h must be included
+ * after windows timeb.h is included.
+ */
+#include "randomkit.h"
+
#else
/* Unix */
+#include "randomkit.h"
#include <time.h>
#include <sys/time.h>
#include <unistd.h>
#endif
-/*
- * Do not move this include. randomkit.h must be included
- * after windows timeb.h is included.
- */
-#include "randomkit.h"
+#include <stddef.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <limits.h>
+#include <math.h>
+#include <assert.h>
#ifndef RK_DEV_URANDOM
#define RK_DEV_URANDOM "/dev/urandom"
diff --git a/numpy/random/tests/__init__.py b/numpy/random/tests/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/numpy/random/tests/__init__.py
diff --git a/numpy/random/tests/test_random.py b/numpy/random/tests/test_random.py
index 0e7396494..a530b9e13 100644
--- a/numpy/random/tests/test_random.py
+++ b/numpy/random/tests/test_random.py
@@ -3,15 +3,16 @@ import warnings
import numpy as np
from numpy.testing import (
- TestCase, run_module_suite, assert_, assert_raises, assert_equal,
- assert_warns, assert_no_warnings, assert_array_equal,
- assert_array_almost_equal, suppress_warnings)
+ run_module_suite, assert_, assert_raises, assert_equal, assert_warns,
+ assert_no_warnings, assert_array_equal, assert_array_almost_equal,
+ suppress_warnings
+ )
from numpy import random
import sys
import warnings
-class TestSeed(TestCase):
+class TestSeed(object):
def test_scalar(self):
s = np.random.RandomState(0)
assert_equal(s.randint(1000), 684)
@@ -42,7 +43,7 @@ class TestSeed(TestCase):
assert_raises(ValueError, np.random.RandomState, [1, -2, 4294967296])
-class TestBinomial(TestCase):
+class TestBinomial(object):
def test_n_zero(self):
# Tests the corner case of n == 0 for the binomial distribution.
# binomial(0, p) should be zero for any p in [0, 1].
@@ -57,7 +58,7 @@ class TestBinomial(TestCase):
assert_raises(ValueError, random.binomial, 1, np.nan)
-class TestMultinomial(TestCase):
+class TestMultinomial(object):
def test_basic(self):
random.multinomial(100, [0.2, 0.8])
@@ -82,11 +83,11 @@ class TestMultinomial(TestCase):
(2, 2, 2))
assert_raises(TypeError, np.random.multinomial, 1, p,
- np.float(1))
+ float(1))
-class TestSetState(TestCase):
- def setUp(self):
+class TestSetState(object):
+ def setup(self):
self.seed = 1234567890
self.prng = random.RandomState(self.seed)
self.state = self.prng.get_state()
@@ -133,7 +134,7 @@ class TestSetState(TestCase):
self.prng.negative_binomial(0.5, 0.5)
-class TestRandint(TestCase):
+class TestRandint(object):
rfunc = np.random.randint
@@ -142,7 +143,7 @@ class TestRandint(TestCase):
np.int32, np.uint32, np.int64, np.uint64]
def test_unsupported_type(self):
- assert_raises(TypeError, self.rfunc, 1, dtype=np.float)
+ assert_raises(TypeError, self.rfunc, 1, dtype=float)
def test_bounds_checking(self):
for dt in self.itype:
@@ -199,7 +200,7 @@ class TestRandint(TestCase):
def test_repeatability(self):
import hashlib
# We use a md5 hash of generated sequences of 1000 samples
- # in the range [0, 6) for all but np.bool, where the range
+ # in the range [0, 6) for all but bool, where the range
# is [0, 2). Hashes are for little endian numbers.
tgt = {'bool': '7dd3170d7aa461d201a65f8bcf3944b0',
'int16': '1b7741b80964bb190c50d541dca1cac1',
@@ -225,9 +226,9 @@ class TestRandint(TestCase):
# bools do not depend on endianess
np.random.seed(1234)
- val = self.rfunc(0, 2, size=1000, dtype=np.bool).view(np.int8)
+ val = self.rfunc(0, 2, size=1000, dtype=bool).view(np.int8)
res = hashlib.md5(val).hexdigest()
- assert_(tgt[np.dtype(np.bool).name] == res)
+ assert_(tgt[np.dtype(bool).name] == res)
def test_int64_uint64_corner_case(self):
# When stored in Numpy arrays, `lbnd` is casted
@@ -259,23 +260,23 @@ class TestRandint(TestCase):
ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
sample = self.rfunc(lbnd, ubnd, dtype=dt)
- self.assertEqual(sample.dtype, np.dtype(dt))
+ assert_equal(sample.dtype, np.dtype(dt))
- for dt in (np.bool, np.int, np.long):
- lbnd = 0 if dt is np.bool else np.iinfo(dt).min
- ubnd = 2 if dt is np.bool else np.iinfo(dt).max + 1
+ for dt in (bool, int, np.long):
+ lbnd = 0 if dt is bool else np.iinfo(dt).min
+ ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
# gh-7284: Ensure that we get Python data types
sample = self.rfunc(lbnd, ubnd, dtype=dt)
- self.assertFalse(hasattr(sample, 'dtype'))
- self.assertEqual(type(sample), dt)
+ assert_(not hasattr(sample, 'dtype'))
+ assert_equal(type(sample), dt)
-class TestRandomDist(TestCase):
+class TestRandomDist(object):
# Make sure the random distribution returns the correct value for a
# given seed
- def setUp(self):
+ def setup(self):
self.seed = 1234567890
def test_rand(self):
@@ -522,7 +523,12 @@ class TestRandomDist(TestCase):
assert_equal(np.random.dirichlet(p, (2, 2)).shape, (2, 2, 2))
assert_equal(np.random.dirichlet(p, np.array((2, 2))).shape, (2, 2, 2))
- assert_raises(TypeError, np.random.dirichlet, p, np.float(1))
+ assert_raises(TypeError, np.random.dirichlet, p, float(1))
+
+ def test_dirichlet_bad_alpha(self):
+ # gh-2089
+ alpha = np.array([5.4e-01, -1.0e-16])
+ assert_raises(ValueError, np.random.mtrand.dirichlet, alpha)
def test_exponential(self):
np.random.seed(self.seed)
@@ -929,10 +935,10 @@ class TestRandomDist(TestCase):
assert_array_equal(actual, desired)
-class TestBroadcast(TestCase):
+class TestBroadcast(object):
# tests that functions that broadcast behave
# correctly when presented with non-scalar arguments
- def setUp(self):
+ def setup(self):
self.seed = 123456789
def setSeed(self):
@@ -1100,7 +1106,13 @@ class TestBroadcast(TestCase):
assert_raises(ValueError, nonc_f, bad_dfnum, dfden, nonc * 3)
assert_raises(ValueError, nonc_f, dfnum, bad_dfden, nonc * 3)
assert_raises(ValueError, nonc_f, dfnum, dfden, bad_nonc * 3)
-
+
+ def test_noncentral_f_small_df(self):
+ self.setSeed()
+ desired = np.array([6.869638627492048, 0.785880199263955])
+ actual = np.random.noncentral_f(0.9, 0.9, 2, size=2)
+ assert_array_almost_equal(actual, desired, decimal=14)
+
def test_chisquare(self):
df = [1]
bad_df = [-1]
@@ -1484,9 +1496,9 @@ class TestBroadcast(TestCase):
assert_raises(ValueError, logseries, bad_p_one * 3)
assert_raises(ValueError, logseries, bad_p_two * 3)
-class TestThread(TestCase):
+class TestThread(object):
# make sure each state produces the same sequence even in threads
- def setUp(self):
+ def setup(self):
self.seeds = range(4)
def check_function(self, function, sz):
@@ -1527,8 +1539,8 @@ class TestThread(TestCase):
self.check_function(gen_random, sz=(10000, 6))
# See Issue #4263
-class TestSingleEltArrayInput(TestCase):
- def setUp(self):
+class TestSingleEltArrayInput(object):
+ def setup(self):
self.argOne = np.array([2])
self.argTwo = np.array([3])
self.argThree = np.array([4])
@@ -1551,7 +1563,7 @@ class TestSingleEltArrayInput(TestCase):
else:
out = func(self.argOne)
- self.assertEqual(out.shape, self.tgtShape)
+ assert_equal(out.shape, self.tgtShape)
def test_two_arg_funcs(self):
funcs = (np.random.uniform, np.random.normal,
@@ -1572,17 +1584,17 @@ class TestSingleEltArrayInput(TestCase):
argTwo = self.argTwo
out = func(self.argOne, argTwo)
- self.assertEqual(out.shape, self.tgtShape)
+ assert_equal(out.shape, self.tgtShape)
out = func(self.argOne[0], argTwo)
- self.assertEqual(out.shape, self.tgtShape)
+ assert_equal(out.shape, self.tgtShape)
out = func(self.argOne, argTwo[0])
- self.assertEqual(out.shape, self.tgtShape)
+ assert_equal(out.shape, self.tgtShape)
# TODO: Uncomment once randint can broadcast arguments
# def test_randint(self):
-# itype = [np.bool, np.int8, np.uint8, np.int16, np.uint16,
+# itype = [bool, np.int8, np.uint8, np.int16, np.uint16,
# np.int32, np.uint32, np.int64, np.uint64]
# func = np.random.randint
# high = np.array([1])
@@ -1604,13 +1616,13 @@ class TestSingleEltArrayInput(TestCase):
for func in funcs:
out = func(self.argOne, self.argTwo, self.argThree)
- self.assertEqual(out.shape, self.tgtShape)
+ assert_equal(out.shape, self.tgtShape)
out = func(self.argOne[0], self.argTwo, self.argThree)
- self.assertEqual(out.shape, self.tgtShape)
+ assert_equal(out.shape, self.tgtShape)
out = func(self.argOne, self.argTwo[0], self.argThree)
- self.assertEqual(out.shape, self.tgtShape)
+ assert_equal(out.shape, self.tgtShape)
if __name__ == "__main__":
run_module_suite()
diff --git a/numpy/random/tests/test_regression.py b/numpy/random/tests/test_regression.py
index ce435b374..572f4c087 100644
--- a/numpy/random/tests/test_regression.py
+++ b/numpy/random/tests/test_regression.py
@@ -1,14 +1,15 @@
from __future__ import division, absolute_import, print_function
import sys
-from numpy.testing import (TestCase, run_module_suite, assert_,
- assert_array_equal, assert_raises)
+from numpy.testing import (
+ run_module_suite, assert_, assert_array_equal, assert_raises,
+ )
from numpy import random
from numpy.compat import long
import numpy as np
-class TestRegression(TestCase):
+class TestRegression(object):
def test_VonMises_range(self):
# Make sure generated random variables are in [-pi, pi].
diff --git a/numpy/testing/__init__.py b/numpy/testing/__init__.py
index 625fdecdc..9485b455e 100644
--- a/numpy/testing/__init__.py
+++ b/numpy/testing/__init__.py
@@ -10,6 +10,6 @@ from __future__ import division, absolute_import, print_function
from unittest import TestCase
from . import decorators as dec
-from .nosetester import run_module_suite, NoseTester as Tester
+from .nosetester import run_module_suite, NoseTester as Tester, _numpy_tester
from .utils import *
-test = nosetester._numpy_tester().test
+test = _numpy_tester().test
diff --git a/numpy/testing/decorators.py b/numpy/testing/decorators.py
index 17400c0d5..b63850090 100644
--- a/numpy/testing/decorators.py
+++ b/numpy/testing/decorators.py
@@ -1,265 +1,6 @@
"""
-Decorators for labeling and modifying behavior of test objects.
-
-Decorators that merely return a modified version of the original
-function object are straightforward. Decorators that return a new
-function object need to use
-::
-
- nose.tools.make_decorator(original_function)(decorator)
-
-in returning the decorator, in order to preserve meta-data such as
-function name, setup and teardown functions and so on - see
-``nose.tools`` for more information.
+Back compatibility decorators module. It will import the appropriate
+set of tools
"""
-from __future__ import division, absolute_import, print_function
-
-import collections
-
-from .utils import SkipTest, assert_warns
-
-
-def slow(t):
- """
- Label a test as 'slow'.
-
- The exact definition of a slow test is obviously both subjective and
- hardware-dependent, but in general any individual test that requires more
- than a second or two should be labeled as slow (the whole suite consits of
- thousands of tests, so even a second is significant).
-
- Parameters
- ----------
- t : callable
- The test to label as slow.
-
- Returns
- -------
- t : callable
- The decorated test `t`.
-
- Examples
- --------
- The `numpy.testing` module includes ``import decorators as dec``.
- A test can be decorated as slow like this::
-
- from numpy.testing import *
-
- @dec.slow
- def test_big(self):
- print('Big, slow test')
-
- """
-
- t.slow = True
- return t
-
-def setastest(tf=True):
- """
- Signals to nose that this function is or is not a test.
-
- Parameters
- ----------
- tf : bool
- If True, specifies that the decorated callable is a test.
- If False, specifies that the decorated callable is not a test.
- Default is True.
-
- Notes
- -----
- This decorator can't use the nose namespace, because it can be
- called from a non-test module. See also ``istest`` and ``nottest`` in
- ``nose.tools``.
-
- Examples
- --------
- `setastest` can be used in the following way::
-
- from numpy.testing.decorators import setastest
-
- @setastest(False)
- def func_with_test_in_name(arg1, arg2):
- pass
-
- """
- def set_test(t):
- t.__test__ = tf
- return t
- return set_test
-
-def skipif(skip_condition, msg=None):
- """
- Make function raise SkipTest exception if a given condition is true.
-
- If the condition is a callable, it is used at runtime to dynamically
- make the decision. This is useful for tests that may require costly
- imports, to delay the cost until the test suite is actually executed.
-
- Parameters
- ----------
- skip_condition : bool or callable
- Flag to determine whether to skip the decorated test.
- msg : str, optional
- Message to give on raising a SkipTest exception. Default is None.
-
- Returns
- -------
- decorator : function
- Decorator which, when applied to a function, causes SkipTest
- to be raised when `skip_condition` is True, and the function
- to be called normally otherwise.
-
- Notes
- -----
- The decorator itself is decorated with the ``nose.tools.make_decorator``
- function in order to transmit function name, and various other metadata.
-
- """
-
- def skip_decorator(f):
- # Local import to avoid a hard nose dependency and only incur the
- # import time overhead at actual test-time.
- import nose
-
- # Allow for both boolean or callable skip conditions.
- if isinstance(skip_condition, collections.Callable):
- skip_val = lambda: skip_condition()
- else:
- skip_val = lambda: skip_condition
-
- def get_msg(func,msg=None):
- """Skip message with information about function being skipped."""
- if msg is None:
- out = 'Test skipped due to test condition'
- else:
- out = msg
-
- return "Skipping test: %s: %s" % (func.__name__, out)
-
- # We need to define *two* skippers because Python doesn't allow both
- # return with value and yield inside the same function.
- def skipper_func(*args, **kwargs):
- """Skipper for normal test functions."""
- if skip_val():
- raise SkipTest(get_msg(f, msg))
- else:
- return f(*args, **kwargs)
-
- def skipper_gen(*args, **kwargs):
- """Skipper for test generators."""
- if skip_val():
- raise SkipTest(get_msg(f, msg))
- else:
- for x in f(*args, **kwargs):
- yield x
-
- # Choose the right skipper to use when building the actual decorator.
- if nose.util.isgenerator(f):
- skipper = skipper_gen
- else:
- skipper = skipper_func
-
- return nose.tools.make_decorator(f)(skipper)
-
- return skip_decorator
-
-
-def knownfailureif(fail_condition, msg=None):
- """
- Make function raise KnownFailureException exception if given condition is true.
-
- If the condition is a callable, it is used at runtime to dynamically
- make the decision. This is useful for tests that may require costly
- imports, to delay the cost until the test suite is actually executed.
-
- Parameters
- ----------
- fail_condition : bool or callable
- Flag to determine whether to mark the decorated test as a known
- failure (if True) or not (if False).
- msg : str, optional
- Message to give on raising a KnownFailureException exception.
- Default is None.
-
- Returns
- -------
- decorator : function
- Decorator, which, when applied to a function, causes
- KnownFailureException to be raised when `fail_condition` is True,
- and the function to be called normally otherwise.
-
- Notes
- -----
- The decorator itself is decorated with the ``nose.tools.make_decorator``
- function in order to transmit function name, and various other metadata.
-
- """
- if msg is None:
- msg = 'Test skipped due to known failure'
-
- # Allow for both boolean or callable known failure conditions.
- if isinstance(fail_condition, collections.Callable):
- fail_val = lambda: fail_condition()
- else:
- fail_val = lambda: fail_condition
-
- def knownfail_decorator(f):
- # Local import to avoid a hard nose dependency and only incur the
- # import time overhead at actual test-time.
- import nose
- from .noseclasses import KnownFailureException
-
- def knownfailer(*args, **kwargs):
- if fail_val():
- raise KnownFailureException(msg)
- else:
- return f(*args, **kwargs)
- return nose.tools.make_decorator(f)(knownfailer)
-
- return knownfail_decorator
-
-def deprecated(conditional=True):
- """
- Filter deprecation warnings while running the test suite.
-
- This decorator can be used to filter DeprecationWarning's, to avoid
- printing them during the test suite run, while checking that the test
- actually raises a DeprecationWarning.
-
- Parameters
- ----------
- conditional : bool or callable, optional
- Flag to determine whether to mark test as deprecated or not. If the
- condition is a callable, it is used at runtime to dynamically make the
- decision. Default is True.
-
- Returns
- -------
- decorator : function
- The `deprecated` decorator itself.
-
- Notes
- -----
- .. versionadded:: 1.4.0
-
- """
- def deprecate_decorator(f):
- # Local import to avoid a hard nose dependency and only incur the
- # import time overhead at actual test-time.
- import nose
-
- def _deprecated_imp(*args, **kwargs):
- # Poor man's replacement for the with statement
- with assert_warns(DeprecationWarning):
- f(*args, **kwargs)
-
- if isinstance(conditional, collections.Callable):
- cond = conditional()
- else:
- cond = conditional
- if cond:
- return nose.tools.make_decorator(f)(_deprecated_imp)
- else:
- return f
- return deprecate_decorator
+from .nose_tools.decorators import *
diff --git a/numpy/testing/nose_tools/__init__.py b/numpy/testing/nose_tools/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/numpy/testing/nose_tools/__init__.py
diff --git a/numpy/testing/nose_tools/decorators.py b/numpy/testing/nose_tools/decorators.py
new file mode 100644
index 000000000..12531e734
--- /dev/null
+++ b/numpy/testing/nose_tools/decorators.py
@@ -0,0 +1,282 @@
+"""
+Decorators for labeling and modifying behavior of test objects.
+
+Decorators that merely return a modified version of the original
+function object are straightforward. Decorators that return a new
+function object need to use
+::
+
+ nose.tools.make_decorator(original_function)(decorator)
+
+in returning the decorator, in order to preserve meta-data such as
+function name, setup and teardown functions and so on - see
+``nose.tools`` for more information.
+
+"""
+from __future__ import division, absolute_import, print_function
+
+import collections
+
+from .utils import SkipTest, assert_warns
+
+
+def slow(t):
+ """
+ Label a test as 'slow'.
+
+ The exact definition of a slow test is obviously both subjective and
+ hardware-dependent, but in general any individual test that requires more
+ than a second or two should be labeled as slow (the whole suite consits of
+ thousands of tests, so even a second is significant).
+
+ Parameters
+ ----------
+ t : callable
+ The test to label as slow.
+
+ Returns
+ -------
+ t : callable
+ The decorated test `t`.
+
+ Examples
+ --------
+ The `numpy.testing` module includes ``import decorators as dec``.
+ A test can be decorated as slow like this::
+
+ from numpy.testing import *
+
+ @dec.slow
+ def test_big(self):
+ print('Big, slow test')
+
+ """
+
+ t.slow = True
+ return t
+
+def setastest(tf=True):
+ """
+ Signals to nose that this function is or is not a test.
+
+ Parameters
+ ----------
+ tf : bool
+ If True, specifies that the decorated callable is a test.
+ If False, specifies that the decorated callable is not a test.
+ Default is True.
+
+ Notes
+ -----
+ This decorator can't use the nose namespace, because it can be
+ called from a non-test module. See also ``istest`` and ``nottest`` in
+ ``nose.tools``.
+
+ Examples
+ --------
+ `setastest` can be used in the following way::
+
+ from numpy.testing import dec
+
+ @dec.setastest(False)
+ def func_with_test_in_name(arg1, arg2):
+ pass
+
+ """
+ def set_test(t):
+ t.__test__ = tf
+ return t
+ return set_test
+
+def skipif(skip_condition, msg=None):
+ """
+ Make function raise SkipTest exception if a given condition is true.
+
+ If the condition is a callable, it is used at runtime to dynamically
+ make the decision. This is useful for tests that may require costly
+ imports, to delay the cost until the test suite is actually executed.
+
+ Parameters
+ ----------
+ skip_condition : bool or callable
+ Flag to determine whether to skip the decorated test.
+ msg : str, optional
+ Message to give on raising a SkipTest exception. Default is None.
+
+ Returns
+ -------
+ decorator : function
+ Decorator which, when applied to a function, causes SkipTest
+ to be raised when `skip_condition` is True, and the function
+ to be called normally otherwise.
+
+ Notes
+ -----
+ The decorator itself is decorated with the ``nose.tools.make_decorator``
+ function in order to transmit function name, and various other metadata.
+
+ """
+
+ def skip_decorator(f):
+ # Local import to avoid a hard nose dependency and only incur the
+ # import time overhead at actual test-time.
+ import nose
+
+ # Allow for both boolean or callable skip conditions.
+ if isinstance(skip_condition, collections.Callable):
+ skip_val = lambda: skip_condition()
+ else:
+ skip_val = lambda: skip_condition
+
+ def get_msg(func,msg=None):
+ """Skip message with information about function being skipped."""
+ if msg is None:
+ out = 'Test skipped due to test condition'
+ else:
+ out = msg
+
+ return "Skipping test: %s: %s" % (func.__name__, out)
+
+ # We need to define *two* skippers because Python doesn't allow both
+ # return with value and yield inside the same function.
+ def skipper_func(*args, **kwargs):
+ """Skipper for normal test functions."""
+ if skip_val():
+ raise SkipTest(get_msg(f, msg))
+ else:
+ return f(*args, **kwargs)
+
+ def skipper_gen(*args, **kwargs):
+ """Skipper for test generators."""
+ if skip_val():
+ raise SkipTest(get_msg(f, msg))
+ else:
+ for x in f(*args, **kwargs):
+ yield x
+
+ # Choose the right skipper to use when building the actual decorator.
+ if nose.util.isgenerator(f):
+ skipper = skipper_gen
+ else:
+ skipper = skipper_func
+
+ return nose.tools.make_decorator(f)(skipper)
+
+ return skip_decorator
+
+
+def knownfailureif(fail_condition, msg=None):
+ """
+ Make function raise KnownFailureException exception if given condition is true.
+
+ If the condition is a callable, it is used at runtime to dynamically
+ make the decision. This is useful for tests that may require costly
+ imports, to delay the cost until the test suite is actually executed.
+
+ Parameters
+ ----------
+ fail_condition : bool or callable
+ Flag to determine whether to mark the decorated test as a known
+ failure (if True) or not (if False).
+ msg : str, optional
+ Message to give on raising a KnownFailureException exception.
+ Default is None.
+
+ Returns
+ -------
+ decorator : function
+ Decorator, which, when applied to a function, causes
+ KnownFailureException to be raised when `fail_condition` is True,
+ and the function to be called normally otherwise.
+
+ Notes
+ -----
+ The decorator itself is decorated with the ``nose.tools.make_decorator``
+ function in order to transmit function name, and various other metadata.
+
+ """
+ if msg is None:
+ msg = 'Test skipped due to known failure'
+
+ # Allow for both boolean or callable known failure conditions.
+ if isinstance(fail_condition, collections.Callable):
+ fail_val = lambda: fail_condition()
+ else:
+ fail_val = lambda: fail_condition
+
+ def knownfail_decorator(f):
+ # Local import to avoid a hard nose dependency and only incur the
+ # import time overhead at actual test-time.
+ import nose
+ from .noseclasses import KnownFailureException
+
+ def knownfailer(*args, **kwargs):
+ if fail_val():
+ raise KnownFailureException(msg)
+ else:
+ return f(*args, **kwargs)
+ return nose.tools.make_decorator(f)(knownfailer)
+
+ return knownfail_decorator
+
+def deprecated(conditional=True):
+ """
+ Filter deprecation warnings while running the test suite.
+
+ This decorator can be used to filter DeprecationWarning's, to avoid
+ printing them during the test suite run, while checking that the test
+ actually raises a DeprecationWarning.
+
+ Parameters
+ ----------
+ conditional : bool or callable, optional
+ Flag to determine whether to mark test as deprecated or not. If the
+ condition is a callable, it is used at runtime to dynamically make the
+ decision. Default is True.
+
+ Returns
+ -------
+ decorator : function
+ The `deprecated` decorator itself.
+
+ Notes
+ -----
+ .. versionadded:: 1.4.0
+
+ """
+ def deprecate_decorator(f):
+ # Local import to avoid a hard nose dependency and only incur the
+ # import time overhead at actual test-time.
+ import nose
+
+ def _deprecated_imp(*args, **kwargs):
+ # Poor man's replacement for the with statement
+ with assert_warns(DeprecationWarning):
+ f(*args, **kwargs)
+
+ if isinstance(conditional, collections.Callable):
+ cond = conditional()
+ else:
+ cond = conditional
+ if cond:
+ return nose.tools.make_decorator(f)(_deprecated_imp)
+ else:
+ return f
+ return deprecate_decorator
+
+
+def parametrize(vars, input):
+ """
+ Pytest compatibility class. This implements the simplest level of
+ pytest.mark.parametrize for use in nose as an aid in making the transition
+ to pytest. It achieves that by adding a dummy var parameter and ignoring
+ the doc_func parameter of the base class. It does not support variable
+ substitution by name, nor does it support nesting or classes. See the
+ pytest documentation for usage.
+
+ .. versionadded:: 1.14.0
+
+ """
+ from .parameterized import parameterized
+
+ return parameterized(input)
diff --git a/numpy/testing/nose_tools/noseclasses.py b/numpy/testing/nose_tools/noseclasses.py
new file mode 100644
index 000000000..9756b9b45
--- /dev/null
+++ b/numpy/testing/nose_tools/noseclasses.py
@@ -0,0 +1,366 @@
+# These classes implement a doctest runner plugin for nose, a "known failure"
+# error class, and a customized TestProgram for NumPy.
+
+# Because this module imports nose directly, it should not
+# be used except by nosetester.py to avoid a general NumPy
+# dependency on nose.
+from __future__ import division, absolute_import, print_function
+
+import os
+import sys
+import doctest
+import inspect
+
+import numpy
+import nose
+from nose.plugins import doctests as npd
+from nose.plugins.errorclass import ErrorClass, ErrorClassPlugin
+from nose.plugins.base import Plugin
+from nose.util import src
+from .nosetester import get_package_name
+from .utils import KnownFailureException, KnownFailureTest
+
+
+# Some of the classes in this module begin with 'Numpy' to clearly distinguish
+# them from the plethora of very similar names from nose/unittest/doctest
+
+#-----------------------------------------------------------------------------
+# Modified version of the one in the stdlib, that fixes a python bug (doctests
+# not found in extension modules, http://bugs.python.org/issue3158)
+class NumpyDocTestFinder(doctest.DocTestFinder):
+
+ def _from_module(self, module, object):
+ """
+ Return true if the given object is defined in the given
+ module.
+ """
+ if module is None:
+ return True
+ elif inspect.isfunction(object):
+ return module.__dict__ is object.__globals__
+ elif inspect.isbuiltin(object):
+ return module.__name__ == object.__module__
+ elif inspect.isclass(object):
+ return module.__name__ == object.__module__
+ elif inspect.ismethod(object):
+ # This one may be a bug in cython that fails to correctly set the
+ # __module__ attribute of methods, but since the same error is easy
+ # to make by extension code writers, having this safety in place
+ # isn't such a bad idea
+ return module.__name__ == object.__self__.__class__.__module__
+ elif inspect.getmodule(object) is not None:
+ return module is inspect.getmodule(object)
+ elif hasattr(object, '__module__'):
+ return module.__name__ == object.__module__
+ elif isinstance(object, property):
+ return True # [XX] no way not be sure.
+ else:
+ raise ValueError("object must be a class or function")
+
+ def _find(self, tests, obj, name, module, source_lines, globs, seen):
+ """
+ Find tests for the given object and any contained objects, and
+ add them to `tests`.
+ """
+
+ doctest.DocTestFinder._find(self, tests, obj, name, module,
+ source_lines, globs, seen)
+
+ # Below we re-run pieces of the above method with manual modifications,
+ # because the original code is buggy and fails to correctly identify
+ # doctests in extension modules.
+
+ # Local shorthands
+ from inspect import (
+ isroutine, isclass, ismodule, isfunction, ismethod
+ )
+
+ # Look for tests in a module's contained objects.
+ if ismodule(obj) and self._recurse:
+ for valname, val in obj.__dict__.items():
+ valname1 = '%s.%s' % (name, valname)
+ if ( (isroutine(val) or isclass(val))
+ and self._from_module(module, val)):
+
+ self._find(tests, val, valname1, module, source_lines,
+ globs, seen)
+
+ # Look for tests in a class's contained objects.
+ if isclass(obj) and self._recurse:
+ for valname, val in obj.__dict__.items():
+ # Special handling for staticmethod/classmethod.
+ if isinstance(val, staticmethod):
+ val = getattr(obj, valname)
+ if isinstance(val, classmethod):
+ val = getattr(obj, valname).__func__
+
+ # Recurse to methods, properties, and nested classes.
+ if ((isfunction(val) or isclass(val) or
+ ismethod(val) or isinstance(val, property)) and
+ self._from_module(module, val)):
+ valname = '%s.%s' % (name, valname)
+ self._find(tests, val, valname, module, source_lines,
+ globs, seen)
+
+
+# second-chance checker; if the default comparison doesn't
+# pass, then see if the expected output string contains flags that
+# tell us to ignore the output
+class NumpyOutputChecker(doctest.OutputChecker):
+ def check_output(self, want, got, optionflags):
+ ret = doctest.OutputChecker.check_output(self, want, got,
+ optionflags)
+ if not ret:
+ if "#random" in want:
+ return True
+
+ # it would be useful to normalize endianness so that
+ # bigendian machines don't fail all the tests (and there are
+ # actually some bigendian examples in the doctests). Let's try
+ # making them all little endian
+ got = got.replace("'>", "'<")
+ want = want.replace("'>", "'<")
+
+ # try to normalize out 32 and 64 bit default int sizes
+ for sz in [4, 8]:
+ got = got.replace("'<i%d'" % sz, "int")
+ want = want.replace("'<i%d'" % sz, "int")
+
+ ret = doctest.OutputChecker.check_output(self, want,
+ got, optionflags)
+
+ return ret
+
+
+# Subclass nose.plugins.doctests.DocTestCase to work around a bug in
+# its constructor that blocks non-default arguments from being passed
+# down into doctest.DocTestCase
+class NumpyDocTestCase(npd.DocTestCase):
+ def __init__(self, test, optionflags=0, setUp=None, tearDown=None,
+ checker=None, obj=None, result_var='_'):
+ self._result_var = result_var
+ self._nose_obj = obj
+ doctest.DocTestCase.__init__(self, test,
+ optionflags=optionflags,
+ setUp=setUp, tearDown=tearDown,
+ checker=checker)
+
+
+print_state = numpy.get_printoptions()
+
+class NumpyDoctest(npd.Doctest):
+ name = 'numpydoctest' # call nosetests with --with-numpydoctest
+ score = 1000 # load late, after doctest builtin
+
+ # always use whitespace and ellipsis options for doctests
+ doctest_optflags = doctest.NORMALIZE_WHITESPACE | doctest.ELLIPSIS
+
+ # files that should be ignored for doctests
+ doctest_ignore = ['generate_numpy_api.py',
+ 'setup.py']
+
+ # Custom classes; class variables to allow subclassing
+ doctest_case_class = NumpyDocTestCase
+ out_check_class = NumpyOutputChecker
+ test_finder_class = NumpyDocTestFinder
+
+ # Don't use the standard doctest option handler; hard-code the option values
+ def options(self, parser, env=os.environ):
+ Plugin.options(self, parser, env)
+ # Test doctests in 'test' files / directories. Standard plugin default
+ # is False
+ self.doctest_tests = True
+ # Variable name; if defined, doctest results stored in this variable in
+ # the top-level namespace. None is the standard default
+ self.doctest_result_var = None
+
+ def configure(self, options, config):
+ # parent method sets enabled flag from command line --with-numpydoctest
+ Plugin.configure(self, options, config)
+ self.finder = self.test_finder_class()
+ self.parser = doctest.DocTestParser()
+ if self.enabled:
+ # Pull standard doctest out of plugin list; there's no reason to run
+ # both. In practice the Unplugger plugin above would cover us when
+ # run from a standard numpy.test() call; this is just in case
+ # someone wants to run our plugin outside the numpy.test() machinery
+ config.plugins.plugins = [p for p in config.plugins.plugins
+ if p.name != 'doctest']
+
+ def set_test_context(self, test):
+ """ Configure `test` object to set test context
+
+ We set the numpy / scipy standard doctest namespace
+
+ Parameters
+ ----------
+ test : test object
+ with ``globs`` dictionary defining namespace
+
+ Returns
+ -------
+ None
+
+ Notes
+ -----
+ `test` object modified in place
+ """
+ # set the namespace for tests
+ pkg_name = get_package_name(os.path.dirname(test.filename))
+
+ # Each doctest should execute in an environment equivalent to
+ # starting Python and executing "import numpy as np", and,
+ # for SciPy packages, an additional import of the local
+ # package (so that scipy.linalg.basic.py's doctests have an
+ # implicit "from scipy import linalg" as well.
+ #
+ # Note: __file__ allows the doctest in NoseTester to run
+ # without producing an error
+ test.globs = {'__builtins__':__builtins__,
+ '__file__':'__main__',
+ '__name__':'__main__',
+ 'np':numpy}
+ # add appropriate scipy import for SciPy tests
+ if 'scipy' in pkg_name:
+ p = pkg_name.split('.')
+ p2 = p[-1]
+ test.globs[p2] = __import__(pkg_name, test.globs, {}, [p2])
+
+ # Override test loading to customize test context (with set_test_context
+ # method), set standard docstring options, and install our own test output
+ # checker
+ def loadTestsFromModule(self, module):
+ if not self.matches(module.__name__):
+ npd.log.debug("Doctest doesn't want module %s", module)
+ return
+ try:
+ tests = self.finder.find(module)
+ except AttributeError:
+ # nose allows module.__test__ = False; doctest does not and
+ # throws AttributeError
+ return
+ if not tests:
+ return
+ tests.sort()
+ module_file = src(module.__file__)
+ for test in tests:
+ if not test.examples:
+ continue
+ if not test.filename:
+ test.filename = module_file
+ # Set test namespace; test altered in place
+ self.set_test_context(test)
+ yield self.doctest_case_class(test,
+ optionflags=self.doctest_optflags,
+ checker=self.out_check_class(),
+ result_var=self.doctest_result_var)
+
+ # Add an afterContext method to nose.plugins.doctests.Doctest in order
+ # to restore print options to the original state after each doctest
+ def afterContext(self):
+ numpy.set_printoptions(**print_state)
+
+ # Ignore NumPy-specific build files that shouldn't be searched for tests
+ def wantFile(self, file):
+ bn = os.path.basename(file)
+ if bn in self.doctest_ignore:
+ return False
+ return npd.Doctest.wantFile(self, file)
+
+
+class Unplugger(object):
+ """ Nose plugin to remove named plugin late in loading
+
+ By default it removes the "doctest" plugin.
+ """
+ name = 'unplugger'
+ enabled = True # always enabled
+ score = 4000 # load late in order to be after builtins
+
+ def __init__(self, to_unplug='doctest'):
+ self.to_unplug = to_unplug
+
+ def options(self, parser, env):
+ pass
+
+ def configure(self, options, config):
+ # Pull named plugin out of plugins list
+ config.plugins.plugins = [p for p in config.plugins.plugins
+ if p.name != self.to_unplug]
+
+
+class KnownFailurePlugin(ErrorClassPlugin):
+ '''Plugin that installs a KNOWNFAIL error class for the
+ KnownFailureClass exception. When KnownFailure is raised,
+ the exception will be logged in the knownfail attribute of the
+ result, 'K' or 'KNOWNFAIL' (verbose) will be output, and the
+ exception will not be counted as an error or failure.'''
+ enabled = True
+ knownfail = ErrorClass(KnownFailureException,
+ label='KNOWNFAIL',
+ isfailure=False)
+
+ def options(self, parser, env=os.environ):
+ env_opt = 'NOSE_WITHOUT_KNOWNFAIL'
+ parser.add_option('--no-knownfail', action='store_true',
+ dest='noKnownFail', default=env.get(env_opt, False),
+ help='Disable special handling of KnownFailure '
+ 'exceptions')
+
+ def configure(self, options, conf):
+ if not self.can_configure:
+ return
+ self.conf = conf
+ disable = getattr(options, 'noKnownFail', False)
+ if disable:
+ self.enabled = False
+
+KnownFailure = KnownFailurePlugin # backwards compat
+
+
+class FPUModeCheckPlugin(Plugin):
+ """
+ Plugin that checks the FPU mode before and after each test,
+ raising failures if the test changed the mode.
+ """
+
+ def prepareTestCase(self, test):
+ from numpy.core.multiarray_tests import get_fpu_mode
+
+ def run(result):
+ old_mode = get_fpu_mode()
+ test.test(result)
+ new_mode = get_fpu_mode()
+
+ if old_mode != new_mode:
+ try:
+ raise AssertionError(
+ "FPU mode changed from {0:#x} to {1:#x} during the "
+ "test".format(old_mode, new_mode))
+ except AssertionError:
+ result.addFailure(test, sys.exc_info())
+
+ return run
+
+
+# Class allows us to save the results of the tests in runTests - see runTests
+# method docstring for details
+class NumpyTestProgram(nose.core.TestProgram):
+ def runTests(self):
+ """Run Tests. Returns true on success, false on failure, and
+ sets self.success to the same value.
+
+ Because nose currently discards the test result object, but we need
+ to return it to the user, override TestProgram.runTests to retain
+ the result
+ """
+ if self.testRunner is None:
+ self.testRunner = nose.core.TextTestRunner(stream=self.config.stream,
+ verbosity=self.config.verbosity,
+ config=self.config)
+ plug_runner = self.config.plugins.prepareTestRunner(self.testRunner)
+ if plug_runner is not None:
+ self.testRunner = plug_runner
+ self.result = self.testRunner.run(self.test)
+ self.success = self.result.wasSuccessful()
+ return self.success
diff --git a/numpy/testing/nose_tools/nosetester.py b/numpy/testing/nose_tools/nosetester.py
new file mode 100644
index 000000000..c2cf58377
--- /dev/null
+++ b/numpy/testing/nose_tools/nosetester.py
@@ -0,0 +1,560 @@
+"""
+Nose test running.
+
+This module implements ``test()`` and ``bench()`` functions for NumPy modules.
+
+"""
+from __future__ import division, absolute_import, print_function
+
+import os
+import sys
+import warnings
+from numpy.compat import basestring
+import numpy as np
+
+from .utils import import_nose, suppress_warnings
+
+
+__all__ = ['get_package_name', 'run_module_suite', 'NoseTester',
+ '_numpy_tester', 'get_package_name', 'import_nose',
+ 'suppress_warnings']
+
+
+def get_package_name(filepath):
+ """
+ Given a path where a package is installed, determine its name.
+
+ Parameters
+ ----------
+ filepath : str
+ Path to a file. If the determination fails, "numpy" is returned.
+
+ Examples
+ --------
+ >>> np.testing.nosetester.get_package_name('nonsense')
+ 'numpy'
+
+ """
+
+ fullpath = filepath[:]
+ pkg_name = []
+ while 'site-packages' in filepath or 'dist-packages' in filepath:
+ filepath, p2 = os.path.split(filepath)
+ if p2 in ('site-packages', 'dist-packages'):
+ break
+ pkg_name.append(p2)
+
+ # if package name determination failed, just default to numpy/scipy
+ if not pkg_name:
+ if 'scipy' in fullpath:
+ return 'scipy'
+ else:
+ return 'numpy'
+
+ # otherwise, reverse to get correct order and return
+ pkg_name.reverse()
+
+ # don't include the outer egg directory
+ if pkg_name[0].endswith('.egg'):
+ pkg_name.pop(0)
+
+ return '.'.join(pkg_name)
+
+
+def run_module_suite(file_to_run=None, argv=None):
+ """
+ Run a test module.
+
+ Equivalent to calling ``$ nosetests <argv> <file_to_run>`` from
+ the command line
+
+ Parameters
+ ----------
+ file_to_run : str, optional
+ Path to test module, or None.
+ By default, run the module from which this function is called.
+ argv : list of strings
+ Arguments to be passed to the nose test runner. ``argv[0]`` is
+ ignored. All command line arguments accepted by ``nosetests``
+ will work. If it is the default value None, sys.argv is used.
+
+ .. versionadded:: 1.9.0
+
+ Examples
+ --------
+ Adding the following::
+
+ if __name__ == "__main__" :
+ run_module_suite(argv=sys.argv)
+
+ at the end of a test module will run the tests when that module is
+ called in the python interpreter.
+
+ Alternatively, calling::
+
+ >>> run_module_suite(file_to_run="numpy/tests/test_matlib.py")
+
+ from an interpreter will run all the test routine in 'test_matlib.py'.
+ """
+ if file_to_run is None:
+ f = sys._getframe(1)
+ file_to_run = f.f_locals.get('__file__', None)
+ if file_to_run is None:
+ raise AssertionError
+
+ if argv is None:
+ argv = sys.argv + [file_to_run]
+ else:
+ argv = argv + [file_to_run]
+
+ nose = import_nose()
+ from .noseclasses import KnownFailurePlugin
+ nose.run(argv=argv, addplugins=[KnownFailurePlugin()])
+
+
+class NoseTester(object):
+ """
+ Nose test runner.
+
+ This class is made available as numpy.testing.Tester, and a test function
+ is typically added to a package's __init__.py like so::
+
+ from numpy.testing import Tester
+ test = Tester().test
+
+ Calling this test function finds and runs all tests associated with the
+ package and all its sub-packages.
+
+ Attributes
+ ----------
+ package_path : str
+ Full path to the package to test.
+ package_name : str
+ Name of the package to test.
+
+ Parameters
+ ----------
+ package : module, str or None, optional
+ The package to test. If a string, this should be the full path to
+ the package. If None (default), `package` is set to the module from
+ which `NoseTester` is initialized.
+ raise_warnings : None, str or sequence of warnings, optional
+ This specifies which warnings to configure as 'raise' instead
+ of being shown once during the test execution. Valid strings are:
+
+ - "develop" : equals ``(Warning,)``
+ - "release" : equals ``()``, don't raise on any warnings.
+
+ Default is "release".
+ depth : int, optional
+ If `package` is None, then this can be used to initialize from the
+ module of the caller of (the caller of (...)) the code that
+ initializes `NoseTester`. Default of 0 means the module of the
+ immediate caller; higher values are useful for utility routines that
+ want to initialize `NoseTester` objects on behalf of other code.
+
+ """
+ def __init__(self, package=None, raise_warnings="release", depth=0,
+ check_fpu_mode=False):
+ # Back-compat: 'None' used to mean either "release" or "develop"
+ # depending on whether this was a release or develop version of
+ # numpy. Those semantics were fine for testing numpy, but not so
+ # helpful for downstream projects like scipy that use
+ # numpy.testing. (They want to set this based on whether *they* are a
+ # release or develop version, not whether numpy is.) So we continue to
+ # accept 'None' for back-compat, but it's now just an alias for the
+ # default "release".
+ if raise_warnings is None:
+ raise_warnings = "release"
+
+ package_name = None
+ if package is None:
+ f = sys._getframe(1 + depth)
+ package_path = f.f_locals.get('__file__', None)
+ if package_path is None:
+ raise AssertionError
+ package_path = os.path.dirname(package_path)
+ package_name = f.f_locals.get('__name__', None)
+ elif isinstance(package, type(os)):
+ package_path = os.path.dirname(package.__file__)
+ package_name = getattr(package, '__name__', None)
+ else:
+ package_path = str(package)
+
+ self.package_path = package_path
+
+ # Find the package name under test; this name is used to limit coverage
+ # reporting (if enabled).
+ if package_name is None:
+ package_name = get_package_name(package_path)
+ self.package_name = package_name
+
+ # Set to "release" in constructor in maintenance branches.
+ self.raise_warnings = raise_warnings
+
+ # Whether to check for FPU mode changes
+ self.check_fpu_mode = check_fpu_mode
+
+ def _test_argv(self, label, verbose, extra_argv):
+ ''' Generate argv for nosetest command
+
+ Parameters
+ ----------
+ label : {'fast', 'full', '', attribute identifier}, optional
+ see ``test`` docstring
+ verbose : int, optional
+ Verbosity value for test outputs, in the range 1-10. Default is 1.
+ extra_argv : list, optional
+ List with any extra arguments to pass to nosetests.
+
+ Returns
+ -------
+ argv : list
+ command line arguments that will be passed to nose
+ '''
+ argv = [__file__, self.package_path, '-s']
+ if label and label != 'full':
+ if not isinstance(label, basestring):
+ raise TypeError('Selection label should be a string')
+ if label == 'fast':
+ label = 'not slow'
+ argv += ['-A', label]
+ argv += ['--verbosity', str(verbose)]
+
+ # When installing with setuptools, and also in some other cases, the
+ # test_*.py files end up marked +x executable. Nose, by default, does
+ # not run files marked with +x as they might be scripts. However, in
+ # our case nose only looks for test_*.py files under the package
+ # directory, which should be safe.
+ argv += ['--exe']
+
+ if extra_argv:
+ argv += extra_argv
+ return argv
+
+ def _show_system_info(self):
+ nose = import_nose()
+
+ import numpy
+ print("NumPy version %s" % numpy.__version__)
+ relaxed_strides = numpy.ones((10, 1), order="C").flags.f_contiguous
+ print("NumPy relaxed strides checking option:", relaxed_strides)
+ npdir = os.path.dirname(numpy.__file__)
+ print("NumPy is installed in %s" % npdir)
+
+ if 'scipy' in self.package_name:
+ import scipy
+ print("SciPy version %s" % scipy.__version__)
+ spdir = os.path.dirname(scipy.__file__)
+ print("SciPy is installed in %s" % spdir)
+
+ pyversion = sys.version.replace('\n', '')
+ print("Python version %s" % pyversion)
+ print("nose version %d.%d.%d" % nose.__versioninfo__)
+
+ def _get_custom_doctester(self):
+ """ Return instantiated plugin for doctests
+
+ Allows subclassing of this class to override doctester
+
+ A return value of None means use the nose builtin doctest plugin
+ """
+ from .noseclasses import NumpyDoctest
+ return NumpyDoctest()
+
+ def prepare_test_args(self, label='fast', verbose=1, extra_argv=None,
+ doctests=False, coverage=False, timer=False):
+ """
+ Run tests for module using nose.
+
+ This method does the heavy lifting for the `test` method. It takes all
+ the same arguments, for details see `test`.
+
+ See Also
+ --------
+ test
+
+ """
+ # fail with nice error message if nose is not present
+ import_nose()
+ # compile argv
+ argv = self._test_argv(label, verbose, extra_argv)
+ # our way of doing coverage
+ if coverage:
+ argv += ['--cover-package=%s' % self.package_name, '--with-coverage',
+ '--cover-tests', '--cover-erase']
+
+ if timer:
+ if timer is True:
+ argv += ['--with-timer']
+ elif isinstance(timer, int):
+ argv += ['--with-timer', '--timer-top-n', str(timer)]
+
+ # construct list of plugins
+ import nose.plugins.builtin
+ from nose.plugins import EntryPointPluginManager
+ from .noseclasses import (KnownFailurePlugin, Unplugger,
+ FPUModeCheckPlugin)
+ plugins = [KnownFailurePlugin()]
+ plugins += [p() for p in nose.plugins.builtin.plugins]
+ if self.check_fpu_mode:
+ plugins += [FPUModeCheckPlugin()]
+ argv += ["--with-fpumodecheckplugin"]
+ try:
+ # External plugins (like nose-timer)
+ entrypoint_manager = EntryPointPluginManager()
+ entrypoint_manager.loadPlugins()
+ plugins += [p for p in entrypoint_manager.plugins]
+ except ImportError:
+ # Relies on pkg_resources, not a hard dependency
+ pass
+
+ # add doctesting if required
+ doctest_argv = '--with-doctest' in argv
+ if doctests == False and doctest_argv:
+ doctests = True
+ plug = self._get_custom_doctester()
+ if plug is None:
+ # use standard doctesting
+ if doctests and not doctest_argv:
+ argv += ['--with-doctest']
+ else: # custom doctesting
+ if doctest_argv: # in fact the unplugger would take care of this
+ argv.remove('--with-doctest')
+ plugins += [Unplugger('doctest'), plug]
+ if doctests:
+ argv += ['--with-' + plug.name]
+ return argv, plugins
+
+ def test(self, label='fast', verbose=1, extra_argv=None,
+ doctests=False, coverage=False, raise_warnings=None,
+ timer=False):
+ """
+ Run tests for module using nose.
+
+ Parameters
+ ----------
+ label : {'fast', 'full', '', attribute identifier}, optional
+ Identifies the tests to run. This can be a string to pass to
+ the nosetests executable with the '-A' option, or one of several
+ special values. Special values are:
+ * 'fast' - the default - which corresponds to the ``nosetests -A``
+ option of 'not slow'.
+ * 'full' - fast (as above) and slow tests as in the
+ 'no -A' option to nosetests - this is the same as ''.
+ * None or '' - run all tests.
+ attribute_identifier - string passed directly to nosetests as '-A'.
+ verbose : int, optional
+ Verbosity value for test outputs, in the range 1-10. Default is 1.
+ extra_argv : list, optional
+ List with any extra arguments to pass to nosetests.
+ doctests : bool, optional
+ If True, run doctests in module. Default is False.
+ coverage : bool, optional
+ If True, report coverage of NumPy code. Default is False.
+ (This requires the `coverage module:
+ <http://nedbatchelder.com/code/modules/coverage.html>`_).
+ raise_warnings : None, str or sequence of warnings, optional
+ This specifies which warnings to configure as 'raise' instead
+ of being shown once during the test execution. Valid strings are:
+
+ - "develop" : equals ``(Warning,)``
+ - "release" : equals ``()``, don't raise on any warnings.
+
+ The default is to use the class initialization value.
+ timer : bool or int, optional
+ Timing of individual tests with ``nose-timer`` (which needs to be
+ installed). If True, time tests and report on all of them.
+ If an integer (say ``N``), report timing results for ``N`` slowest
+ tests.
+
+ Returns
+ -------
+ result : object
+ Returns the result of running the tests as a
+ ``nose.result.TextTestResult`` object.
+
+ Notes
+ -----
+ Each NumPy module exposes `test` in its namespace to run all tests for it.
+ For example, to run all tests for numpy.lib:
+
+ >>> np.lib.test() #doctest: +SKIP
+
+ Examples
+ --------
+ >>> result = np.lib.test() #doctest: +SKIP
+ Running unit tests for numpy.lib
+ ...
+ Ran 976 tests in 3.933s
+
+ OK
+
+ >>> result.errors #doctest: +SKIP
+ []
+ >>> result.knownfail #doctest: +SKIP
+ []
+ """
+
+ # cap verbosity at 3 because nose becomes *very* verbose beyond that
+ verbose = min(verbose, 3)
+
+ from . import utils
+ utils.verbose = verbose
+
+ argv, plugins = self.prepare_test_args(
+ label, verbose, extra_argv, doctests, coverage, timer)
+
+ if doctests:
+ print("Running unit tests and doctests for %s" % self.package_name)
+ else:
+ print("Running unit tests for %s" % self.package_name)
+
+ self._show_system_info()
+
+ # reset doctest state on every run
+ import doctest
+ doctest.master = None
+
+ if raise_warnings is None:
+ raise_warnings = self.raise_warnings
+
+ _warn_opts = dict(develop=(Warning,),
+ release=())
+ if isinstance(raise_warnings, basestring):
+ raise_warnings = _warn_opts[raise_warnings]
+
+ with suppress_warnings("location") as sup:
+ # Reset the warning filters to the default state,
+ # so that running the tests is more repeatable.
+ warnings.resetwarnings()
+ # Set all warnings to 'warn', this is because the default 'once'
+ # has the bad property of possibly shadowing later warnings.
+ warnings.filterwarnings('always')
+ # Force the requested warnings to raise
+ for warningtype in raise_warnings:
+ warnings.filterwarnings('error', category=warningtype)
+ # Filter out annoying import messages.
+ sup.filter(message='Not importing directory')
+ sup.filter(message="numpy.dtype size changed")
+ sup.filter(message="numpy.ufunc size changed")
+ sup.filter(category=np.ModuleDeprecationWarning)
+ # Filter out boolean '-' deprecation messages. This allows
+ # older versions of scipy to test without a flood of messages.
+ sup.filter(message=".*boolean negative.*")
+ sup.filter(message=".*boolean subtract.*")
+ # Filter out distutils cpu warnings (could be localized to
+ # distutils tests). ASV has problems with top level import,
+ # so fetch module for suppression here.
+ with warnings.catch_warnings():
+ warnings.simplefilter("always")
+ from ...distutils import cpuinfo
+ sup.filter(category=UserWarning, module=cpuinfo)
+ # See #7949: Filter out deprecation warnings due to the -3 flag to
+ # python 2
+ if sys.version_info.major == 2 and sys.py3kwarning:
+ # This is very specific, so using the fragile module filter
+ # is fine
+ import threading
+ sup.filter(DeprecationWarning,
+ r"sys\.exc_clear\(\) not supported in 3\.x",
+ module=threading)
+ sup.filter(DeprecationWarning, message=r"in 3\.x, __setslice__")
+ sup.filter(DeprecationWarning, message=r"in 3\.x, __getslice__")
+ sup.filter(DeprecationWarning, message=r"buffer\(\) not supported in 3\.x")
+ sup.filter(DeprecationWarning, message=r"CObject type is not supported in 3\.x")
+ sup.filter(DeprecationWarning, message=r"comparing unequal types not supported in 3\.x")
+ # Filter out some deprecation warnings inside nose 1.3.7 when run
+ # on python 3.5b2. See
+ # https://github.com/nose-devs/nose/issues/929
+ # Note: it is hard to filter based on module for sup (lineno could
+ # be implemented).
+ warnings.filterwarnings("ignore", message=".*getargspec.*",
+ category=DeprecationWarning,
+ module=r"nose\.")
+
+ from .noseclasses import NumpyTestProgram
+
+ t = NumpyTestProgram(argv=argv, exit=False, plugins=plugins)
+
+ return t.result
+
+ def bench(self, label='fast', verbose=1, extra_argv=None):
+ """
+ Run benchmarks for module using nose.
+
+ Parameters
+ ----------
+ label : {'fast', 'full', '', attribute identifier}, optional
+ Identifies the benchmarks to run. This can be a string to pass to
+ the nosetests executable with the '-A' option, or one of several
+ special values. Special values are:
+ * 'fast' - the default - which corresponds to the ``nosetests -A``
+ option of 'not slow'.
+ * 'full' - fast (as above) and slow benchmarks as in the
+ 'no -A' option to nosetests - this is the same as ''.
+ * None or '' - run all tests.
+ attribute_identifier - string passed directly to nosetests as '-A'.
+ verbose : int, optional
+ Verbosity value for benchmark outputs, in the range 1-10. Default is 1.
+ extra_argv : list, optional
+ List with any extra arguments to pass to nosetests.
+
+ Returns
+ -------
+ success : bool
+ Returns True if running the benchmarks works, False if an error
+ occurred.
+
+ Notes
+ -----
+ Benchmarks are like tests, but have names starting with "bench" instead
+ of "test", and can be found under the "benchmarks" sub-directory of the
+ module.
+
+ Each NumPy module exposes `bench` in its namespace to run all benchmarks
+ for it.
+
+ Examples
+ --------
+ >>> success = np.lib.bench() #doctest: +SKIP
+ Running benchmarks for numpy.lib
+ ...
+ using 562341 items:
+ unique:
+ 0.11
+ unique1d:
+ 0.11
+ ratio: 1.0
+ nUnique: 56230 == 56230
+ ...
+ OK
+
+ >>> success #doctest: +SKIP
+ True
+
+ """
+
+ print("Running benchmarks for %s" % self.package_name)
+ self._show_system_info()
+
+ argv = self._test_argv(label, verbose, extra_argv)
+ argv += ['--match', r'(?:^|[\\b_\\.%s-])[Bb]ench' % os.sep]
+
+ # import nose or make informative error
+ nose = import_nose()
+
+ # get plugin to disable doctests
+ from .noseclasses import Unplugger
+ add_plugins = [Unplugger('doctest')]
+
+ return nose.run(argv=argv, addplugins=add_plugins)
+
+
+def _numpy_tester():
+ if hasattr(np, "__version__") and ".dev0" in np.__version__:
+ mode = "develop"
+ else:
+ mode = "release"
+ return NoseTester(raise_warnings=mode, depth=1,
+ check_fpu_mode=True)
diff --git a/numpy/testing/nose_tools/parameterized.py b/numpy/testing/nose_tools/parameterized.py
new file mode 100644
index 000000000..962fddcbf
--- /dev/null
+++ b/numpy/testing/nose_tools/parameterized.py
@@ -0,0 +1,489 @@
+"""
+tl;dr: all code code is licensed under simplified BSD, unless stated otherwise.
+
+Unless stated otherwise in the source files, all code is copyright 2010 David
+Wolever <david@wolever.net>. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY <COPYRIGHT HOLDER> ``AS IS'' AND ANY EXPRESS OR
+IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+EVENT SHALL <COPYRIGHT HOLDER> OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+The views and conclusions contained in the software and documentation are those
+of the authors and should not be interpreted as representing official policies,
+either expressed or implied, of David Wolever.
+
+"""
+import re
+import sys
+import inspect
+import warnings
+from functools import wraps
+from types import MethodType as MethodType
+from collections import namedtuple
+
+try:
+ from collections import OrderedDict as MaybeOrderedDict
+except ImportError:
+ MaybeOrderedDict = dict
+
+from unittest import TestCase
+
+PY3 = sys.version_info[0] == 3
+PY2 = sys.version_info[0] == 2
+
+
+if PY3:
+ # Python 3 doesn't have an InstanceType, so just use a dummy type.
+ class InstanceType():
+ pass
+ lzip = lambda *a: list(zip(*a))
+ text_type = str
+ string_types = str,
+ bytes_type = bytes
+ def make_method(func, instance, type):
+ if instance is None:
+ return func
+ return MethodType(func, instance)
+else:
+ from types import InstanceType
+ lzip = zip
+ text_type = unicode
+ bytes_type = str
+ string_types = basestring,
+ def make_method(func, instance, type):
+ return MethodType(func, instance, type)
+
+_param = namedtuple("param", "args kwargs")
+
+class param(_param):
+ """ Represents a single parameter to a test case.
+
+ For example::
+
+ >>> p = param("foo", bar=16)
+ >>> p
+ param("foo", bar=16)
+ >>> p.args
+ ('foo', )
+ >>> p.kwargs
+ {'bar': 16}
+
+ Intended to be used as an argument to ``@parameterized``::
+
+ @parameterized([
+ param("foo", bar=16),
+ ])
+ def test_stuff(foo, bar=16):
+ pass
+ """
+
+ def __new__(cls, *args , **kwargs):
+ return _param.__new__(cls, args, kwargs)
+
+ @classmethod
+ def explicit(cls, args=None, kwargs=None):
+ """ Creates a ``param`` by explicitly specifying ``args`` and
+ ``kwargs``::
+
+ >>> param.explicit([1,2,3])
+ param(*(1, 2, 3))
+ >>> param.explicit(kwargs={"foo": 42})
+ param(*(), **{"foo": "42"})
+ """
+ args = args or ()
+ kwargs = kwargs or {}
+ return cls(*args, **kwargs)
+
+ @classmethod
+ def from_decorator(cls, args):
+ """ Returns an instance of ``param()`` for ``@parameterized`` argument
+ ``args``::
+
+ >>> param.from_decorator((42, ))
+ param(args=(42, ), kwargs={})
+ >>> param.from_decorator("foo")
+ param(args=("foo", ), kwargs={})
+ """
+ if isinstance(args, param):
+ return args
+ elif isinstance(args, string_types):
+ args = (args, )
+ try:
+ return cls(*args)
+ except TypeError as e:
+ if "after * must be" not in str(e):
+ raise
+ raise TypeError(
+ "Parameters must be tuples, but %r is not (hint: use '(%r, )')"
+ %(args, args),
+ )
+
+ def __repr__(self):
+ return "param(*%r, **%r)" %self
+
+
+class QuietOrderedDict(MaybeOrderedDict):
+ """ When OrderedDict is available, use it to make sure that the kwargs in
+ doc strings are consistently ordered. """
+ __str__ = dict.__str__
+ __repr__ = dict.__repr__
+
+
+def parameterized_argument_value_pairs(func, p):
+ """Return tuples of parameterized arguments and their values.
+
+ This is useful if you are writing your own doc_func
+ function and need to know the values for each parameter name::
+
+ >>> def func(a, foo=None, bar=42, **kwargs): pass
+ >>> p = param(1, foo=7, extra=99)
+ >>> parameterized_argument_value_pairs(func, p)
+ [("a", 1), ("foo", 7), ("bar", 42), ("**kwargs", {"extra": 99})]
+
+ If the function's first argument is named ``self`` then it will be
+ ignored::
+
+ >>> def func(self, a): pass
+ >>> p = param(1)
+ >>> parameterized_argument_value_pairs(func, p)
+ [("a", 1)]
+
+ Additionally, empty ``*args`` or ``**kwargs`` will be ignored::
+
+ >>> def func(foo, *args): pass
+ >>> p = param(1)
+ >>> parameterized_argument_value_pairs(func, p)
+ [("foo", 1)]
+ >>> p = param(1, 16)
+ >>> parameterized_argument_value_pairs(func, p)
+ [("foo", 1), ("*args", (16, ))]
+ """
+ argspec = inspect.getargspec(func)
+ arg_offset = 1 if argspec.args[:1] == ["self"] else 0
+
+ named_args = argspec.args[arg_offset:]
+
+ result = lzip(named_args, p.args)
+ named_args = argspec.args[len(result) + arg_offset:]
+ varargs = p.args[len(result):]
+
+ result.extend([
+ (name, p.kwargs.get(name, default))
+ for (name, default)
+ in zip(named_args, argspec.defaults or [])
+ ])
+
+ seen_arg_names = set([ n for (n, _) in result ])
+ keywords = QuietOrderedDict(sorted([
+ (name, p.kwargs[name])
+ for name in p.kwargs
+ if name not in seen_arg_names
+ ]))
+
+ if varargs:
+ result.append(("*%s" %(argspec.varargs, ), tuple(varargs)))
+
+ if keywords:
+ result.append(("**%s" %(argspec.keywords, ), keywords))
+
+ return result
+
+def short_repr(x, n=64):
+ """ A shortened repr of ``x`` which is guaranteed to be ``unicode``::
+
+ >>> short_repr("foo")
+ u"foo"
+ >>> short_repr("123456789", n=4)
+ u"12...89"
+ """
+
+ x_repr = repr(x)
+ if isinstance(x_repr, bytes_type):
+ try:
+ x_repr = text_type(x_repr, "utf-8")
+ except UnicodeDecodeError:
+ x_repr = text_type(x_repr, "latin1")
+ if len(x_repr) > n:
+ x_repr = x_repr[:n//2] + "..." + x_repr[len(x_repr) - n//2:]
+ return x_repr
+
+def default_doc_func(func, num, p):
+ if func.__doc__ is None:
+ return None
+
+ all_args_with_values = parameterized_argument_value_pairs(func, p)
+
+ # Assumes that the function passed is a bound method.
+ descs = ["%s=%s" %(n, short_repr(v)) for n, v in all_args_with_values]
+
+ # The documentation might be a multiline string, so split it
+ # and just work with the first string, ignoring the period
+ # at the end if there is one.
+ first, nl, rest = func.__doc__.lstrip().partition("\n")
+ suffix = ""
+ if first.endswith("."):
+ suffix = "."
+ first = first[:-1]
+ args = "%s[with %s]" %(len(first) and " " or "", ", ".join(descs))
+ return "".join([first.rstrip(), args, suffix, nl, rest])
+
+def default_name_func(func, num, p):
+ base_name = func.__name__
+ name_suffix = "_%s" %(num, )
+ if len(p.args) > 0 and isinstance(p.args[0], string_types):
+ name_suffix += "_" + parameterized.to_safe_name(p.args[0])
+ return base_name + name_suffix
+
+
+_test_runner_override = None
+_test_runner_guess = False
+_test_runners = set(["unittest", "unittest2", "nose", "nose2", "pytest"])
+_test_runner_aliases = {
+ "_pytest": "pytest",
+}
+
+def set_test_runner(name):
+ global _test_runner_override
+ if name not in _test_runners:
+ raise TypeError(
+ "Invalid test runner: %r (must be one of: %s)"
+ %(name, ", ".join(_test_runners)),
+ )
+ _test_runner_override = name
+
+def detect_runner():
+ """ Guess which test runner we're using by traversing the stack and looking
+ for the first matching module. This *should* be reasonably safe, as
+ it's done during test disocvery where the test runner should be the
+ stack frame immediately outside. """
+ if _test_runner_override is not None:
+ return _test_runner_override
+ global _test_runner_guess
+ if _test_runner_guess is False:
+ stack = inspect.stack()
+ for record in reversed(stack):
+ frame = record[0]
+ module = frame.f_globals.get("__name__").partition(".")[0]
+ if module in _test_runner_aliases:
+ module = _test_runner_aliases[module]
+ if module in _test_runners:
+ _test_runner_guess = module
+ break
+ if record[1].endswith("python2.6/unittest.py"):
+ _test_runner_guess = "unittest"
+ break
+ else:
+ _test_runner_guess = None
+ return _test_runner_guess
+
+class parameterized(object):
+ """ Parameterize a test case::
+
+ class TestInt(object):
+ @parameterized([
+ ("A", 10),
+ ("F", 15),
+ param("10", 42, base=42)
+ ])
+ def test_int(self, input, expected, base=16):
+ actual = int(input, base=base)
+ assert_equal(actual, expected)
+
+ @parameterized([
+ (2, 3, 5)
+ (3, 5, 8),
+ ])
+ def test_add(a, b, expected):
+ assert_equal(a + b, expected)
+ """
+
+ def __init__(self, input, doc_func=None):
+ self.get_input = self.input_as_callable(input)
+ self.doc_func = doc_func or default_doc_func
+
+ def __call__(self, test_func):
+ self.assert_not_in_testcase_subclass()
+
+ @wraps(test_func)
+ def wrapper(test_self=None):
+ test_cls = test_self and type(test_self)
+ if test_self is not None:
+ if issubclass(test_cls, InstanceType):
+ raise TypeError((
+ "@parameterized can't be used with old-style classes, but "
+ "%r has an old-style class. Consider using a new-style "
+ "class, or '@parameterized.expand' "
+ "(see http://stackoverflow.com/q/54867/71522 for more "
+ "information on old-style classes)."
+ ) %(test_self, ))
+
+ original_doc = wrapper.__doc__
+ for num, args in enumerate(wrapper.parameterized_input):
+ p = param.from_decorator(args)
+ unbound_func, nose_tuple = self.param_as_nose_tuple(test_self, test_func, num, p)
+ try:
+ wrapper.__doc__ = nose_tuple[0].__doc__
+ # Nose uses `getattr(instance, test_func.__name__)` to get
+ # a method bound to the test instance (as opposed to a
+ # method bound to the instance of the class created when
+ # tests were being enumerated). Set a value here to make
+ # sure nose can get the correct test method.
+ if test_self is not None:
+ setattr(test_cls, test_func.__name__, unbound_func)
+ yield nose_tuple
+ finally:
+ if test_self is not None:
+ delattr(test_cls, test_func.__name__)
+ wrapper.__doc__ = original_doc
+ wrapper.parameterized_input = self.get_input()
+ wrapper.parameterized_func = test_func
+ test_func.__name__ = "_parameterized_original_%s" %(test_func.__name__, )
+ return wrapper
+
+ def param_as_nose_tuple(self, test_self, func, num, p):
+ nose_func = wraps(func)(lambda *args: func(*args[:-1], **args[-1]))
+ nose_func.__doc__ = self.doc_func(func, num, p)
+ # Track the unbound function because we need to setattr the unbound
+ # function onto the class for nose to work (see comments above), and
+ # Python 3 doesn't let us pull the function out of a bound method.
+ unbound_func = nose_func
+ if test_self is not None:
+ # Under nose on Py2 we need to return an unbound method to make
+ # sure that the `self` in the method is properly shared with the
+ # `self` used in `setUp` and `tearDown`. But only there. Everyone
+ # else needs a bound method.
+ func_self = (
+ None if PY2 and detect_runner() == "nose" else
+ test_self
+ )
+ nose_func = make_method(nose_func, func_self, type(test_self))
+ return unbound_func, (nose_func, ) + p.args + (p.kwargs or {}, )
+
+ def assert_not_in_testcase_subclass(self):
+ parent_classes = self._terrible_magic_get_defining_classes()
+ if any(issubclass(cls, TestCase) for cls in parent_classes):
+ raise Exception("Warning: '@parameterized' tests won't work "
+ "inside subclasses of 'TestCase' - use "
+ "'@parameterized.expand' instead.")
+
+ def _terrible_magic_get_defining_classes(self):
+ """ Returns the set of parent classes of the class currently being defined.
+ Will likely only work if called from the ``parameterized`` decorator.
+ This function is entirely @brandon_rhodes's fault, as he suggested
+ the implementation: http://stackoverflow.com/a/8793684/71522
+ """
+ stack = inspect.stack()
+ if len(stack) <= 4:
+ return []
+ frame = stack[4]
+ code_context = frame[4] and frame[4][0].strip()
+ if not (code_context and code_context.startswith("class ")):
+ return []
+ _, _, parents = code_context.partition("(")
+ parents, _, _ = parents.partition(")")
+ return eval("[" + parents + "]", frame[0].f_globals, frame[0].f_locals)
+
+ @classmethod
+ def input_as_callable(cls, input):
+ if callable(input):
+ return lambda: cls.check_input_values(input())
+ input_values = cls.check_input_values(input)
+ return lambda: input_values
+
+ @classmethod
+ def check_input_values(cls, input_values):
+ # Explicitly convery non-list inputs to a list so that:
+ # 1. A helpful exception will be raised if they aren't iterable, and
+ # 2. Generators are unwrapped exactly once (otherwise `nosetests
+ # --processes=n` has issues; see:
+ # https://github.com/wolever/nose-parameterized/pull/31)
+ if not isinstance(input_values, list):
+ input_values = list(input_values)
+ return [ param.from_decorator(p) for p in input_values ]
+
+ @classmethod
+ def expand(cls, input, name_func=None, doc_func=None, **legacy):
+ """ A "brute force" method of parameterizing test cases. Creates new
+ test cases and injects them into the namespace that the wrapped
+ function is being defined in. Useful for parameterizing tests in
+ subclasses of 'UnitTest', where Nose test generators don't work.
+
+ >>> @parameterized.expand([("foo", 1, 2)])
+ ... def test_add1(name, input, expected):
+ ... actual = add1(input)
+ ... assert_equal(actual, expected)
+ ...
+ >>> locals()
+ ... 'test_add1_foo_0': <function ...> ...
+ >>>
+ """
+
+ if "testcase_func_name" in legacy:
+ warnings.warn("testcase_func_name= is deprecated; use name_func=",
+ DeprecationWarning, stacklevel=2)
+ if not name_func:
+ name_func = legacy["testcase_func_name"]
+
+ if "testcase_func_doc" in legacy:
+ warnings.warn("testcase_func_doc= is deprecated; use doc_func=",
+ DeprecationWarning, stacklevel=2)
+ if not doc_func:
+ doc_func = legacy["testcase_func_doc"]
+
+ doc_func = doc_func or default_doc_func
+ name_func = name_func or default_name_func
+
+ def parameterized_expand_wrapper(f, instance=None):
+ stack = inspect.stack()
+ frame = stack[1]
+ frame_locals = frame[0].f_locals
+
+ paramters = cls.input_as_callable(input)()
+ for num, p in enumerate(paramters):
+ name = name_func(f, num, p)
+ frame_locals[name] = cls.param_as_standalone_func(p, f, name)
+ frame_locals[name].__doc__ = doc_func(f, num, p)
+
+ f.__test__ = False
+ return parameterized_expand_wrapper
+
+ @classmethod
+ def param_as_standalone_func(cls, p, func, name):
+ @wraps(func)
+ def standalone_func(*a):
+ return func(*(a + p.args), **p.kwargs)
+ standalone_func.__name__ = name
+
+ # place_as is used by py.test to determine what source file should be
+ # used for this test.
+ standalone_func.place_as = func
+
+ # Remove __wrapped__ because py.test will try to look at __wrapped__
+ # to determine which parameters should be used with this test case,
+ # and obviously we don't need it to do any parameterization.
+ try:
+ del standalone_func.__wrapped__
+ except AttributeError:
+ pass
+ return standalone_func
+
+ @classmethod
+ def to_safe_name(cls, s):
+ return str(re.sub("[^a-zA-Z0-9_]+", "_", s))
diff --git a/numpy/testing/nose_tools/utils.py b/numpy/testing/nose_tools/utils.py
new file mode 100644
index 000000000..302cf32ff
--- /dev/null
+++ b/numpy/testing/nose_tools/utils.py
@@ -0,0 +1,2229 @@
+"""
+Utility function to facilitate testing.
+
+"""
+from __future__ import division, absolute_import, print_function
+
+import os
+import sys
+import re
+import operator
+import warnings
+from functools import partial, wraps
+import shutil
+import contextlib
+from tempfile import mkdtemp, mkstemp
+from unittest.case import SkipTest
+
+from numpy.core import(
+ float32, empty, arange, array_repr, ndarray, isnat, array)
+from numpy.lib.utils import deprecate
+
+if sys.version_info[0] >= 3:
+ from io import StringIO
+else:
+ from StringIO import StringIO
+
+__all__ = [
+ 'assert_equal', 'assert_almost_equal', 'assert_approx_equal',
+ 'assert_array_equal', 'assert_array_less', 'assert_string_equal',
+ 'assert_array_almost_equal', 'assert_raises', 'build_err_msg',
+ 'decorate_methods', 'jiffies', 'memusage', 'print_assert_equal',
+ 'raises', 'rand', 'rundocs', 'runstring', 'verbose', 'measure',
+ 'assert_', 'assert_array_almost_equal_nulp', 'assert_raises_regex',
+ 'assert_array_max_ulp', 'assert_warns', 'assert_no_warnings',
+ 'assert_allclose', 'IgnoreException', 'clear_and_catch_warnings',
+ 'SkipTest', 'KnownFailureException', 'temppath', 'tempdir', 'IS_PYPY',
+ 'HAS_REFCOUNT', 'suppress_warnings', 'assert_array_compare',
+ '_assert_valid_refcount', '_gen_alignment_data',
+ ]
+
+
+class KnownFailureException(Exception):
+ '''Raise this exception to mark a test as a known failing test.'''
+ pass
+
+
+KnownFailureTest = KnownFailureException # backwards compat
+verbose = 0
+
+IS_PYPY = '__pypy__' in sys.modules
+HAS_REFCOUNT = getattr(sys, 'getrefcount', None) is not None
+
+
+def import_nose():
+ """ Import nose only when needed.
+ """
+ nose_is_good = True
+ minimum_nose_version = (1, 0, 0)
+ try:
+ import nose
+ except ImportError:
+ nose_is_good = False
+ else:
+ if nose.__versioninfo__ < minimum_nose_version:
+ nose_is_good = False
+
+ if not nose_is_good:
+ msg = ('Need nose >= %d.%d.%d for tests - see '
+ 'http://nose.readthedocs.io' %
+ minimum_nose_version)
+ raise ImportError(msg)
+
+ return nose
+
+
+def assert_(val, msg=''):
+ """
+ Assert that works in release mode.
+ Accepts callable msg to allow deferring evaluation until failure.
+
+ The Python built-in ``assert`` does not work when executing code in
+ optimized mode (the ``-O`` flag) - no byte-code is generated for it.
+
+ For documentation on usage, refer to the Python documentation.
+
+ """
+ __tracebackhide__ = True # Hide traceback for py.test
+ if not val:
+ try:
+ smsg = msg()
+ except TypeError:
+ smsg = msg
+ raise AssertionError(smsg)
+
+
+def gisnan(x):
+ """like isnan, but always raise an error if type not supported instead of
+ returning a TypeError object.
+
+ Notes
+ -----
+ isnan and other ufunc sometimes return a NotImplementedType object instead
+ of raising any exception. This function is a wrapper to make sure an
+ exception is always raised.
+
+ This should be removed once this problem is solved at the Ufunc level."""
+ from numpy.core import isnan
+ st = isnan(x)
+ if isinstance(st, type(NotImplemented)):
+ raise TypeError("isnan not supported for this type")
+ return st
+
+
+def gisfinite(x):
+ """like isfinite, but always raise an error if type not supported instead of
+ returning a TypeError object.
+
+ Notes
+ -----
+ isfinite and other ufunc sometimes return a NotImplementedType object instead
+ of raising any exception. This function is a wrapper to make sure an
+ exception is always raised.
+
+ This should be removed once this problem is solved at the Ufunc level."""
+ from numpy.core import isfinite, errstate
+ with errstate(invalid='ignore'):
+ st = isfinite(x)
+ if isinstance(st, type(NotImplemented)):
+ raise TypeError("isfinite not supported for this type")
+ return st
+
+
+def gisinf(x):
+ """like isinf, but always raise an error if type not supported instead of
+ returning a TypeError object.
+
+ Notes
+ -----
+ isinf and other ufunc sometimes return a NotImplementedType object instead
+ of raising any exception. This function is a wrapper to make sure an
+ exception is always raised.
+
+ This should be removed once this problem is solved at the Ufunc level."""
+ from numpy.core import isinf, errstate
+ with errstate(invalid='ignore'):
+ st = isinf(x)
+ if isinstance(st, type(NotImplemented)):
+ raise TypeError("isinf not supported for this type")
+ return st
+
+
+@deprecate(message="numpy.testing.rand is deprecated in numpy 1.11. "
+ "Use numpy.random.rand instead.")
+def rand(*args):
+ """Returns an array of random numbers with the given shape.
+
+ This only uses the standard library, so it is useful for testing purposes.
+ """
+ import random
+ from numpy.core import zeros, float64
+ results = zeros(args, float64)
+ f = results.flat
+ for i in range(len(f)):
+ f[i] = random.random()
+ return results
+
+
+if os.name == 'nt':
+ # Code "stolen" from enthought/debug/memusage.py
+ def GetPerformanceAttributes(object, counter, instance=None,
+ inum=-1, format=None, machine=None):
+ # NOTE: Many counters require 2 samples to give accurate results,
+ # including "% Processor Time" (as by definition, at any instant, a
+ # thread's CPU usage is either 0 or 100). To read counters like this,
+ # you should copy this function, but keep the counter open, and call
+ # CollectQueryData() each time you need to know.
+ # See http://msdn.microsoft.com/library/en-us/dnperfmo/html/perfmonpt2.asp
+ # My older explanation for this was that the "AddCounter" process forced
+ # the CPU to 100%, but the above makes more sense :)
+ import win32pdh
+ if format is None:
+ format = win32pdh.PDH_FMT_LONG
+ path = win32pdh.MakeCounterPath( (machine, object, instance, None, inum, counter))
+ hq = win32pdh.OpenQuery()
+ try:
+ hc = win32pdh.AddCounter(hq, path)
+ try:
+ win32pdh.CollectQueryData(hq)
+ type, val = win32pdh.GetFormattedCounterValue(hc, format)
+ return val
+ finally:
+ win32pdh.RemoveCounter(hc)
+ finally:
+ win32pdh.CloseQuery(hq)
+
+ def memusage(processName="python", instance=0):
+ # from win32pdhutil, part of the win32all package
+ import win32pdh
+ return GetPerformanceAttributes("Process", "Virtual Bytes",
+ processName, instance,
+ win32pdh.PDH_FMT_LONG, None)
+elif sys.platform[:5] == 'linux':
+
+ def memusage(_proc_pid_stat='/proc/%s/stat' % (os.getpid())):
+ """
+ Return virtual memory size in bytes of the running python.
+
+ """
+ try:
+ f = open(_proc_pid_stat, 'r')
+ l = f.readline().split(' ')
+ f.close()
+ return int(l[22])
+ except Exception:
+ return
+else:
+ def memusage():
+ """
+ Return memory usage of running python. [Not implemented]
+
+ """
+ raise NotImplementedError
+
+
+if sys.platform[:5] == 'linux':
+ def jiffies(_proc_pid_stat='/proc/%s/stat' % (os.getpid()),
+ _load_time=[]):
+ """
+ Return number of jiffies elapsed.
+
+ Return number of jiffies (1/100ths of a second) that this
+ process has been scheduled in user mode. See man 5 proc.
+
+ """
+ import time
+ if not _load_time:
+ _load_time.append(time.time())
+ try:
+ f = open(_proc_pid_stat, 'r')
+ l = f.readline().split(' ')
+ f.close()
+ return int(l[13])
+ except Exception:
+ return int(100*(time.time()-_load_time[0]))
+else:
+ # os.getpid is not in all platforms available.
+ # Using time is safe but inaccurate, especially when process
+ # was suspended or sleeping.
+ def jiffies(_load_time=[]):
+ """
+ Return number of jiffies elapsed.
+
+ Return number of jiffies (1/100ths of a second) that this
+ process has been scheduled in user mode. See man 5 proc.
+
+ """
+ import time
+ if not _load_time:
+ _load_time.append(time.time())
+ return int(100*(time.time()-_load_time[0]))
+
+
+def build_err_msg(arrays, err_msg, header='Items are not equal:',
+ verbose=True, names=('ACTUAL', 'DESIRED'), precision=8):
+ msg = ['\n' + header]
+ if err_msg:
+ if err_msg.find('\n') == -1 and len(err_msg) < 79-len(header):
+ msg = [msg[0] + ' ' + err_msg]
+ else:
+ msg.append(err_msg)
+ if verbose:
+ for i, a in enumerate(arrays):
+
+ if isinstance(a, ndarray):
+ # precision argument is only needed if the objects are ndarrays
+ r_func = partial(array_repr, precision=precision)
+ else:
+ r_func = repr
+
+ try:
+ r = r_func(a)
+ except Exception as exc:
+ r = '[repr failed for <{}>: {}]'.format(type(a).__name__, exc)
+ if r.count('\n') > 3:
+ r = '\n'.join(r.splitlines()[:3])
+ r += '...'
+ msg.append(' %s: %s' % (names[i], r))
+ return '\n'.join(msg)
+
+
+def assert_equal(actual, desired, err_msg='', verbose=True):
+ """
+ Raises an AssertionError if two objects are not equal.
+
+ Given two objects (scalars, lists, tuples, dictionaries or numpy arrays),
+ check that all elements of these objects are equal. An exception is raised
+ at the first conflicting values.
+
+ Parameters
+ ----------
+ actual : array_like
+ The object to check.
+ desired : array_like
+ The expected object.
+ err_msg : str, optional
+ The error message to be printed in case of failure.
+ verbose : bool, optional
+ If True, the conflicting values are appended to the error message.
+
+ Raises
+ ------
+ AssertionError
+ If actual and desired are not equal.
+
+ Examples
+ --------
+ >>> np.testing.assert_equal([4,5], [4,6])
+ ...
+ <type 'exceptions.AssertionError'>:
+ Items are not equal:
+ item=1
+ ACTUAL: 5
+ DESIRED: 6
+
+ """
+ __tracebackhide__ = True # Hide traceback for py.test
+ if isinstance(desired, dict):
+ if not isinstance(actual, dict):
+ raise AssertionError(repr(type(actual)))
+ assert_equal(len(actual), len(desired), err_msg, verbose)
+ for k, i in desired.items():
+ if k not in actual:
+ raise AssertionError(repr(k))
+ assert_equal(actual[k], desired[k], 'key=%r\n%s' % (k, err_msg), verbose)
+ return
+ if isinstance(desired, (list, tuple)) and isinstance(actual, (list, tuple)):
+ assert_equal(len(actual), len(desired), err_msg, verbose)
+ for k in range(len(desired)):
+ assert_equal(actual[k], desired[k], 'item=%r\n%s' % (k, err_msg), verbose)
+ return
+ from numpy.core import ndarray, isscalar, signbit
+ from numpy.lib import iscomplexobj, real, imag
+ if isinstance(actual, ndarray) or isinstance(desired, ndarray):
+ return assert_array_equal(actual, desired, err_msg, verbose)
+ msg = build_err_msg([actual, desired], err_msg, verbose=verbose)
+
+ # Handle complex numbers: separate into real/imag to handle
+ # nan/inf/negative zero correctly
+ # XXX: catch ValueError for subclasses of ndarray where iscomplex fail
+ try:
+ usecomplex = iscomplexobj(actual) or iscomplexobj(desired)
+ except ValueError:
+ usecomplex = False
+
+ if usecomplex:
+ if iscomplexobj(actual):
+ actualr = real(actual)
+ actuali = imag(actual)
+ else:
+ actualr = actual
+ actuali = 0
+ if iscomplexobj(desired):
+ desiredr = real(desired)
+ desiredi = imag(desired)
+ else:
+ desiredr = desired
+ desiredi = 0
+ try:
+ assert_equal(actualr, desiredr)
+ assert_equal(actuali, desiredi)
+ except AssertionError:
+ raise AssertionError(msg)
+
+ # isscalar test to check cases such as [np.nan] != np.nan
+ if isscalar(desired) != isscalar(actual):
+ raise AssertionError(msg)
+
+ # Inf/nan/negative zero handling
+ try:
+ # If one of desired/actual is not finite, handle it specially here:
+ # check that both are nan if any is a nan, and test for equality
+ # otherwise
+ if not (gisfinite(desired) and gisfinite(actual)):
+ isdesnan = gisnan(desired)
+ isactnan = gisnan(actual)
+ if isdesnan or isactnan:
+ if not (isdesnan and isactnan):
+ raise AssertionError(msg)
+ else:
+ if not desired == actual:
+ raise AssertionError(msg)
+ return
+ elif desired == 0 and actual == 0:
+ if not signbit(desired) == signbit(actual):
+ raise AssertionError(msg)
+ # If TypeError or ValueError raised while using isnan and co, just handle
+ # as before
+ except (TypeError, ValueError, NotImplementedError):
+ pass
+
+ try:
+ # If both are NaT (and have the same dtype -- datetime or timedelta)
+ # they are considered equal.
+ if (isnat(desired) == isnat(actual) and
+ array(desired).dtype.type == array(actual).dtype.type):
+ return
+ else:
+ raise AssertionError(msg)
+
+ # If TypeError or ValueError raised while using isnan and co, just handle
+ # as before
+ except (TypeError, ValueError, NotImplementedError):
+ pass
+
+ # Explicitly use __eq__ for comparison, ticket #2552
+ if not (desired == actual):
+ raise AssertionError(msg)
+
+
+def print_assert_equal(test_string, actual, desired):
+ """
+ Test if two objects are equal, and print an error message if test fails.
+
+ The test is performed with ``actual == desired``.
+
+ Parameters
+ ----------
+ test_string : str
+ The message supplied to AssertionError.
+ actual : object
+ The object to test for equality against `desired`.
+ desired : object
+ The expected result.
+
+ Examples
+ --------
+ >>> np.testing.print_assert_equal('Test XYZ of func xyz', [0, 1], [0, 1])
+ >>> np.testing.print_assert_equal('Test XYZ of func xyz', [0, 1], [0, 2])
+ Traceback (most recent call last):
+ ...
+ AssertionError: Test XYZ of func xyz failed
+ ACTUAL:
+ [0, 1]
+ DESIRED:
+ [0, 2]
+
+ """
+ __tracebackhide__ = True # Hide traceback for py.test
+ import pprint
+
+ if not (actual == desired):
+ msg = StringIO()
+ msg.write(test_string)
+ msg.write(' failed\nACTUAL: \n')
+ pprint.pprint(actual, msg)
+ msg.write('DESIRED: \n')
+ pprint.pprint(desired, msg)
+ raise AssertionError(msg.getvalue())
+
+
+def assert_almost_equal(actual,desired,decimal=7,err_msg='',verbose=True):
+ """
+ Raises an AssertionError if two items are not equal up to desired
+ precision.
+
+ .. note:: It is recommended to use one of `assert_allclose`,
+ `assert_array_almost_equal_nulp` or `assert_array_max_ulp`
+ instead of this function for more consistent floating point
+ comparisons.
+
+ The test verifies that the elements of ``actual`` and ``desired`` satisfy.
+
+ ``abs(desired-actual) < 1.5 * 10**(-decimal)``
+
+ That is a looser test than originally documented, but agrees with what the
+ actual implementation in `assert_array_almost_equal` did up to rounding
+ vagaries. An exception is raised at conflicting values. For ndarrays this
+ delegates to assert_array_almost_equal
+
+ Parameters
+ ----------
+ actual : array_like
+ The object to check.
+ desired : array_like
+ The expected object.
+ decimal : int, optional
+ Desired precision, default is 7.
+ err_msg : str, optional
+ The error message to be printed in case of failure.
+ verbose : bool, optional
+ If True, the conflicting values are appended to the error message.
+
+ Raises
+ ------
+ AssertionError
+ If actual and desired are not equal up to specified precision.
+
+ See Also
+ --------
+ assert_allclose: Compare two array_like objects for equality with desired
+ relative and/or absolute precision.
+ assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal
+
+ Examples
+ --------
+ >>> import numpy.testing as npt
+ >>> npt.assert_almost_equal(2.3333333333333, 2.33333334)
+ >>> npt.assert_almost_equal(2.3333333333333, 2.33333334, decimal=10)
+ ...
+ <type 'exceptions.AssertionError'>:
+ Items are not equal:
+ ACTUAL: 2.3333333333333002
+ DESIRED: 2.3333333399999998
+
+ >>> npt.assert_almost_equal(np.array([1.0,2.3333333333333]),
+ ... np.array([1.0,2.33333334]), decimal=9)
+ ...
+ <type 'exceptions.AssertionError'>:
+ Arrays are not almost equal
+ <BLANKLINE>
+ (mismatch 50.0%)
+ x: array([ 1. , 2.33333333])
+ y: array([ 1. , 2.33333334])
+
+ """
+ __tracebackhide__ = True # Hide traceback for py.test
+ from numpy.core import ndarray
+ from numpy.lib import iscomplexobj, real, imag
+
+ # Handle complex numbers: separate into real/imag to handle
+ # nan/inf/negative zero correctly
+ # XXX: catch ValueError for subclasses of ndarray where iscomplex fail
+ try:
+ usecomplex = iscomplexobj(actual) or iscomplexobj(desired)
+ except ValueError:
+ usecomplex = False
+
+ def _build_err_msg():
+ header = ('Arrays are not almost equal to %d decimals' % decimal)
+ return build_err_msg([actual, desired], err_msg, verbose=verbose,
+ header=header)
+
+ if usecomplex:
+ if iscomplexobj(actual):
+ actualr = real(actual)
+ actuali = imag(actual)
+ else:
+ actualr = actual
+ actuali = 0
+ if iscomplexobj(desired):
+ desiredr = real(desired)
+ desiredi = imag(desired)
+ else:
+ desiredr = desired
+ desiredi = 0
+ try:
+ assert_almost_equal(actualr, desiredr, decimal=decimal)
+ assert_almost_equal(actuali, desiredi, decimal=decimal)
+ except AssertionError:
+ raise AssertionError(_build_err_msg())
+
+ if isinstance(actual, (ndarray, tuple, list)) \
+ or isinstance(desired, (ndarray, tuple, list)):
+ return assert_array_almost_equal(actual, desired, decimal, err_msg)
+ try:
+ # If one of desired/actual is not finite, handle it specially here:
+ # check that both are nan if any is a nan, and test for equality
+ # otherwise
+ if not (gisfinite(desired) and gisfinite(actual)):
+ if gisnan(desired) or gisnan(actual):
+ if not (gisnan(desired) and gisnan(actual)):
+ raise AssertionError(_build_err_msg())
+ else:
+ if not desired == actual:
+ raise AssertionError(_build_err_msg())
+ return
+ except (NotImplementedError, TypeError):
+ pass
+ if abs(desired - actual) >= 1.5 * 10.0**(-decimal):
+ raise AssertionError(_build_err_msg())
+
+
+def assert_approx_equal(actual,desired,significant=7,err_msg='',verbose=True):
+ """
+ Raises an AssertionError if two items are not equal up to significant
+ digits.
+
+ .. note:: It is recommended to use one of `assert_allclose`,
+ `assert_array_almost_equal_nulp` or `assert_array_max_ulp`
+ instead of this function for more consistent floating point
+ comparisons.
+
+ Given two numbers, check that they are approximately equal.
+ Approximately equal is defined as the number of significant digits
+ that agree.
+
+ Parameters
+ ----------
+ actual : scalar
+ The object to check.
+ desired : scalar
+ The expected object.
+ significant : int, optional
+ Desired precision, default is 7.
+ err_msg : str, optional
+ The error message to be printed in case of failure.
+ verbose : bool, optional
+ If True, the conflicting values are appended to the error message.
+
+ Raises
+ ------
+ AssertionError
+ If actual and desired are not equal up to specified precision.
+
+ See Also
+ --------
+ assert_allclose: Compare two array_like objects for equality with desired
+ relative and/or absolute precision.
+ assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal
+
+ Examples
+ --------
+ >>> np.testing.assert_approx_equal(0.12345677777777e-20, 0.1234567e-20)
+ >>> np.testing.assert_approx_equal(0.12345670e-20, 0.12345671e-20,
+ significant=8)
+ >>> np.testing.assert_approx_equal(0.12345670e-20, 0.12345672e-20,
+ significant=8)
+ ...
+ <type 'exceptions.AssertionError'>:
+ Items are not equal to 8 significant digits:
+ ACTUAL: 1.234567e-021
+ DESIRED: 1.2345672000000001e-021
+
+ the evaluated condition that raises the exception is
+
+ >>> abs(0.12345670e-20/1e-21 - 0.12345672e-20/1e-21) >= 10**-(8-1)
+ True
+
+ """
+ __tracebackhide__ = True # Hide traceback for py.test
+ import numpy as np
+
+ (actual, desired) = map(float, (actual, desired))
+ if desired == actual:
+ return
+ # Normalized the numbers to be in range (-10.0,10.0)
+ # scale = float(pow(10,math.floor(math.log10(0.5*(abs(desired)+abs(actual))))))
+ with np.errstate(invalid='ignore'):
+ scale = 0.5*(np.abs(desired) + np.abs(actual))
+ scale = np.power(10, np.floor(np.log10(scale)))
+ try:
+ sc_desired = desired/scale
+ except ZeroDivisionError:
+ sc_desired = 0.0
+ try:
+ sc_actual = actual/scale
+ except ZeroDivisionError:
+ sc_actual = 0.0
+ msg = build_err_msg([actual, desired], err_msg,
+ header='Items are not equal to %d significant digits:' %
+ significant,
+ verbose=verbose)
+ try:
+ # If one of desired/actual is not finite, handle it specially here:
+ # check that both are nan if any is a nan, and test for equality
+ # otherwise
+ if not (gisfinite(desired) and gisfinite(actual)):
+ if gisnan(desired) or gisnan(actual):
+ if not (gisnan(desired) and gisnan(actual)):
+ raise AssertionError(msg)
+ else:
+ if not desired == actual:
+ raise AssertionError(msg)
+ return
+ except (TypeError, NotImplementedError):
+ pass
+ if np.abs(sc_desired - sc_actual) >= np.power(10., -(significant-1)):
+ raise AssertionError(msg)
+
+
+def assert_array_compare(comparison, x, y, err_msg='', verbose=True,
+ header='', precision=6, equal_nan=True,
+ equal_inf=True):
+ __tracebackhide__ = True # Hide traceback for py.test
+ from numpy.core import array, isnan, isinf, any, inf
+ x = array(x, copy=False, subok=True)
+ y = array(y, copy=False, subok=True)
+
+ def isnumber(x):
+ return x.dtype.char in '?bhilqpBHILQPefdgFDG'
+
+ def istime(x):
+ return x.dtype.char in "Mm"
+
+ def chk_same_position(x_id, y_id, hasval='nan'):
+ """Handling nan/inf: check that x and y have the nan/inf at the same
+ locations."""
+ try:
+ assert_array_equal(x_id, y_id)
+ except AssertionError:
+ msg = build_err_msg([x, y],
+ err_msg + '\nx and y %s location mismatch:'
+ % (hasval), verbose=verbose, header=header,
+ names=('x', 'y'), precision=precision)
+ raise AssertionError(msg)
+
+ try:
+ cond = (x.shape == () or y.shape == ()) or x.shape == y.shape
+ if not cond:
+ msg = build_err_msg([x, y],
+ err_msg
+ + '\n(shapes %s, %s mismatch)' % (x.shape,
+ y.shape),
+ verbose=verbose, header=header,
+ names=('x', 'y'), precision=precision)
+ raise AssertionError(msg)
+
+ if isnumber(x) and isnumber(y):
+ has_nan = has_inf = False
+ if equal_nan:
+ x_isnan, y_isnan = isnan(x), isnan(y)
+ # Validate that NaNs are in the same place
+ has_nan = any(x_isnan) or any(y_isnan)
+ if has_nan:
+ chk_same_position(x_isnan, y_isnan, hasval='nan')
+
+ if equal_inf:
+ x_isinf, y_isinf = isinf(x), isinf(y)
+ # Validate that infinite values are in the same place
+ has_inf = any(x_isinf) or any(y_isinf)
+ if has_inf:
+ # Check +inf and -inf separately, since they are different
+ chk_same_position(x == +inf, y == +inf, hasval='+inf')
+ chk_same_position(x == -inf, y == -inf, hasval='-inf')
+
+ if has_nan and has_inf:
+ x = x[~(x_isnan | x_isinf)]
+ y = y[~(y_isnan | y_isinf)]
+ elif has_nan:
+ x = x[~x_isnan]
+ y = y[~y_isnan]
+ elif has_inf:
+ x = x[~x_isinf]
+ y = y[~y_isinf]
+
+ # Only do the comparison if actual values are left
+ if x.size == 0:
+ return
+
+ elif istime(x) and istime(y):
+ # If one is datetime64 and the other timedelta64 there is no point
+ if equal_nan and x.dtype.type == y.dtype.type:
+ x_isnat, y_isnat = isnat(x), isnat(y)
+
+ if any(x_isnat) or any(y_isnat):
+ chk_same_position(x_isnat, y_isnat, hasval="NaT")
+
+ if any(x_isnat) or any(y_isnat):
+ x = x[~x_isnat]
+ y = y[~y_isnat]
+
+ val = comparison(x, y)
+
+ if isinstance(val, bool):
+ cond = val
+ reduced = [0]
+ else:
+ reduced = val.ravel()
+ cond = reduced.all()
+ reduced = reduced.tolist()
+ if not cond:
+ match = 100-100.0*reduced.count(1)/len(reduced)
+ msg = build_err_msg([x, y],
+ err_msg
+ + '\n(mismatch %s%%)' % (match,),
+ verbose=verbose, header=header,
+ names=('x', 'y'), precision=precision)
+ if not cond:
+ raise AssertionError(msg)
+ except ValueError:
+ import traceback
+ efmt = traceback.format_exc()
+ header = 'error during assertion:\n\n%s\n\n%s' % (efmt, header)
+
+ msg = build_err_msg([x, y], err_msg, verbose=verbose, header=header,
+ names=('x', 'y'), precision=precision)
+ raise ValueError(msg)
+
+
+def assert_array_equal(x, y, err_msg='', verbose=True):
+ """
+ Raises an AssertionError if two array_like objects are not equal.
+
+ Given two array_like objects, check that the shape is equal and all
+ elements of these objects are equal. An exception is raised at
+ shape mismatch or conflicting values. In contrast to the standard usage
+ in numpy, NaNs are compared like numbers, no assertion is raised if
+ both objects have NaNs in the same positions.
+
+ The usual caution for verifying equality with floating point numbers is
+ advised.
+
+ Parameters
+ ----------
+ x : array_like
+ The actual object to check.
+ y : array_like
+ The desired, expected object.
+ err_msg : str, optional
+ The error message to be printed in case of failure.
+ verbose : bool, optional
+ If True, the conflicting values are appended to the error message.
+
+ Raises
+ ------
+ AssertionError
+ If actual and desired objects are not equal.
+
+ See Also
+ --------
+ assert_allclose: Compare two array_like objects for equality with desired
+ relative and/or absolute precision.
+ assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal
+
+ Examples
+ --------
+ The first assert does not raise an exception:
+
+ >>> np.testing.assert_array_equal([1.0,2.33333,np.nan],
+ ... [np.exp(0),2.33333, np.nan])
+
+ Assert fails with numerical inprecision with floats:
+
+ >>> np.testing.assert_array_equal([1.0,np.pi,np.nan],
+ ... [1, np.sqrt(np.pi)**2, np.nan])
+ ...
+ <type 'exceptions.ValueError'>:
+ AssertionError:
+ Arrays are not equal
+ <BLANKLINE>
+ (mismatch 50.0%)
+ x: array([ 1. , 3.14159265, NaN])
+ y: array([ 1. , 3.14159265, NaN])
+
+ Use `assert_allclose` or one of the nulp (number of floating point values)
+ functions for these cases instead:
+
+ >>> np.testing.assert_allclose([1.0,np.pi,np.nan],
+ ... [1, np.sqrt(np.pi)**2, np.nan],
+ ... rtol=1e-10, atol=0)
+
+ """
+ __tracebackhide__ = True # Hide traceback for py.test
+ assert_array_compare(operator.__eq__, x, y, err_msg=err_msg,
+ verbose=verbose, header='Arrays are not equal')
+
+
+def assert_array_almost_equal(x, y, decimal=6, err_msg='', verbose=True):
+ """
+ Raises an AssertionError if two objects are not equal up to desired
+ precision.
+
+ .. note:: It is recommended to use one of `assert_allclose`,
+ `assert_array_almost_equal_nulp` or `assert_array_max_ulp`
+ instead of this function for more consistent floating point
+ comparisons.
+
+ The test verifies identical shapes and that the elements of ``actual`` and
+ ``desired`` satisfy.
+
+ ``abs(desired-actual) < 1.5 * 10**(-decimal)``
+
+ That is a looser test than originally documented, but agrees with what the
+ actual implementation did up to rounding vagaries. An exception is raised
+ at shape mismatch or conflicting values. In contrast to the standard usage
+ in numpy, NaNs are compared like numbers, no assertion is raised if both
+ objects have NaNs in the same positions.
+
+ Parameters
+ ----------
+ x : array_like
+ The actual object to check.
+ y : array_like
+ The desired, expected object.
+ decimal : int, optional
+ Desired precision, default is 6.
+ err_msg : str, optional
+ The error message to be printed in case of failure.
+ verbose : bool, optional
+ If True, the conflicting values are appended to the error message.
+
+ Raises
+ ------
+ AssertionError
+ If actual and desired are not equal up to specified precision.
+
+ See Also
+ --------
+ assert_allclose: Compare two array_like objects for equality with desired
+ relative and/or absolute precision.
+ assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal
+
+ Examples
+ --------
+ the first assert does not raise an exception
+
+ >>> np.testing.assert_array_almost_equal([1.0,2.333,np.nan],
+ [1.0,2.333,np.nan])
+
+ >>> np.testing.assert_array_almost_equal([1.0,2.33333,np.nan],
+ ... [1.0,2.33339,np.nan], decimal=5)
+ ...
+ <type 'exceptions.AssertionError'>:
+ AssertionError:
+ Arrays are not almost equal
+ <BLANKLINE>
+ (mismatch 50.0%)
+ x: array([ 1. , 2.33333, NaN])
+ y: array([ 1. , 2.33339, NaN])
+
+ >>> np.testing.assert_array_almost_equal([1.0,2.33333,np.nan],
+ ... [1.0,2.33333, 5], decimal=5)
+ <type 'exceptions.ValueError'>:
+ ValueError:
+ Arrays are not almost equal
+ x: array([ 1. , 2.33333, NaN])
+ y: array([ 1. , 2.33333, 5. ])
+
+ """
+ __tracebackhide__ = True # Hide traceback for py.test
+ from numpy.core import around, number, float_, result_type, array
+ from numpy.core.numerictypes import issubdtype
+ from numpy.core.fromnumeric import any as npany
+
+ def compare(x, y):
+ try:
+ if npany(gisinf(x)) or npany( gisinf(y)):
+ xinfid = gisinf(x)
+ yinfid = gisinf(y)
+ if not (xinfid == yinfid).all():
+ return False
+ # if one item, x and y is +- inf
+ if x.size == y.size == 1:
+ return x == y
+ x = x[~xinfid]
+ y = y[~yinfid]
+ except (TypeError, NotImplementedError):
+ pass
+
+ # make sure y is an inexact type to avoid abs(MIN_INT); will cause
+ # casting of x later.
+ dtype = result_type(y, 1.)
+ y = array(y, dtype=dtype, copy=False, subok=True)
+ z = abs(x - y)
+
+ if not issubdtype(z.dtype, number):
+ z = z.astype(float_) # handle object arrays
+
+ return z < 1.5 * 10.0**(-decimal)
+
+ assert_array_compare(compare, x, y, err_msg=err_msg, verbose=verbose,
+ header=('Arrays are not almost equal to %d decimals' % decimal),
+ precision=decimal)
+
+
+def assert_array_less(x, y, err_msg='', verbose=True):
+ """
+ Raises an AssertionError if two array_like objects are not ordered by less
+ than.
+
+ Given two array_like objects, check that the shape is equal and all
+ elements of the first object are strictly smaller than those of the
+ second object. An exception is raised at shape mismatch or incorrectly
+ ordered values. Shape mismatch does not raise if an object has zero
+ dimension. In contrast to the standard usage in numpy, NaNs are
+ compared, no assertion is raised if both objects have NaNs in the same
+ positions.
+
+
+
+ Parameters
+ ----------
+ x : array_like
+ The smaller object to check.
+ y : array_like
+ The larger object to compare.
+ err_msg : string
+ The error message to be printed in case of failure.
+ verbose : bool
+ If True, the conflicting values are appended to the error message.
+
+ Raises
+ ------
+ AssertionError
+ If actual and desired objects are not equal.
+
+ See Also
+ --------
+ assert_array_equal: tests objects for equality
+ assert_array_almost_equal: test objects for equality up to precision
+
+
+
+ Examples
+ --------
+ >>> np.testing.assert_array_less([1.0, 1.0, np.nan], [1.1, 2.0, np.nan])
+ >>> np.testing.assert_array_less([1.0, 1.0, np.nan], [1, 2.0, np.nan])
+ ...
+ <type 'exceptions.ValueError'>:
+ Arrays are not less-ordered
+ (mismatch 50.0%)
+ x: array([ 1., 1., NaN])
+ y: array([ 1., 2., NaN])
+
+ >>> np.testing.assert_array_less([1.0, 4.0], 3)
+ ...
+ <type 'exceptions.ValueError'>:
+ Arrays are not less-ordered
+ (mismatch 50.0%)
+ x: array([ 1., 4.])
+ y: array(3)
+
+ >>> np.testing.assert_array_less([1.0, 2.0, 3.0], [4])
+ ...
+ <type 'exceptions.ValueError'>:
+ Arrays are not less-ordered
+ (shapes (3,), (1,) mismatch)
+ x: array([ 1., 2., 3.])
+ y: array([4])
+
+ """
+ __tracebackhide__ = True # Hide traceback for py.test
+ assert_array_compare(operator.__lt__, x, y, err_msg=err_msg,
+ verbose=verbose,
+ header='Arrays are not less-ordered',
+ equal_inf=False)
+
+
+def runstring(astr, dict):
+ exec(astr, dict)
+
+
+def assert_string_equal(actual, desired):
+ """
+ Test if two strings are equal.
+
+ If the given strings are equal, `assert_string_equal` does nothing.
+ If they are not equal, an AssertionError is raised, and the diff
+ between the strings is shown.
+
+ Parameters
+ ----------
+ actual : str
+ The string to test for equality against the expected string.
+ desired : str
+ The expected string.
+
+ Examples
+ --------
+ >>> np.testing.assert_string_equal('abc', 'abc')
+ >>> np.testing.assert_string_equal('abc', 'abcd')
+ Traceback (most recent call last):
+ File "<stdin>", line 1, in <module>
+ ...
+ AssertionError: Differences in strings:
+ - abc+ abcd? +
+
+ """
+ # delay import of difflib to reduce startup time
+ __tracebackhide__ = True # Hide traceback for py.test
+ import difflib
+
+ if not isinstance(actual, str):
+ raise AssertionError(repr(type(actual)))
+ if not isinstance(desired, str):
+ raise AssertionError(repr(type(desired)))
+ if re.match(r'\A'+desired+r'\Z', actual, re.M):
+ return
+
+ diff = list(difflib.Differ().compare(actual.splitlines(1), desired.splitlines(1)))
+ diff_list = []
+ while diff:
+ d1 = diff.pop(0)
+ if d1.startswith(' '):
+ continue
+ if d1.startswith('- '):
+ l = [d1]
+ d2 = diff.pop(0)
+ if d2.startswith('? '):
+ l.append(d2)
+ d2 = diff.pop(0)
+ if not d2.startswith('+ '):
+ raise AssertionError(repr(d2))
+ l.append(d2)
+ if diff:
+ d3 = diff.pop(0)
+ if d3.startswith('? '):
+ l.append(d3)
+ else:
+ diff.insert(0, d3)
+ if re.match(r'\A'+d2[2:]+r'\Z', d1[2:]):
+ continue
+ diff_list.extend(l)
+ continue
+ raise AssertionError(repr(d1))
+ if not diff_list:
+ return
+ msg = 'Differences in strings:\n%s' % (''.join(diff_list)).rstrip()
+ if actual != desired:
+ raise AssertionError(msg)
+
+
+def rundocs(filename=None, raise_on_error=True):
+ """
+ Run doctests found in the given file.
+
+ By default `rundocs` raises an AssertionError on failure.
+
+ Parameters
+ ----------
+ filename : str
+ The path to the file for which the doctests are run.
+ raise_on_error : bool
+ Whether to raise an AssertionError when a doctest fails. Default is
+ True.
+
+ Notes
+ -----
+ The doctests can be run by the user/developer by adding the ``doctests``
+ argument to the ``test()`` call. For example, to run all tests (including
+ doctests) for `numpy.lib`:
+
+ >>> np.lib.test(doctests=True) #doctest: +SKIP
+ """
+ from numpy.compat import npy_load_module
+ import doctest
+ if filename is None:
+ f = sys._getframe(1)
+ filename = f.f_globals['__file__']
+ name = os.path.splitext(os.path.basename(filename))[0]
+ m = npy_load_module(name, filename)
+
+ tests = doctest.DocTestFinder().find(m)
+ runner = doctest.DocTestRunner(verbose=False)
+
+ msg = []
+ if raise_on_error:
+ out = lambda s: msg.append(s)
+ else:
+ out = None
+
+ for test in tests:
+ runner.run(test, out=out)
+
+ if runner.failures > 0 and raise_on_error:
+ raise AssertionError("Some doctests failed:\n%s" % "\n".join(msg))
+
+
+def raises(*args,**kwargs):
+ nose = import_nose()
+ return nose.tools.raises(*args,**kwargs)
+
+
+def assert_raises(*args, **kwargs):
+ """
+ assert_raises(exception_class, callable, *args, **kwargs)
+ assert_raises(exception_class)
+
+ Fail unless an exception of class exception_class is thrown
+ by callable when invoked with arguments args and keyword
+ arguments kwargs. If a different type of exception is
+ thrown, it will not be caught, and the test case will be
+ deemed to have suffered an error, exactly as for an
+ unexpected exception.
+
+ Alternatively, `assert_raises` can be used as a context manager:
+
+ >>> from numpy.testing import assert_raises
+ >>> with assert_raises(ZeroDivisionError):
+ ... 1 / 0
+
+ is equivalent to
+
+ >>> def div(x, y):
+ ... return x / y
+ >>> assert_raises(ZeroDivisionError, div, 1, 0)
+
+ """
+ __tracebackhide__ = True # Hide traceback for py.test
+ nose = import_nose()
+ return nose.tools.assert_raises(*args,**kwargs)
+
+
+def assert_raises_regex(exception_class, expected_regexp, *args, **kwargs):
+ """
+ assert_raises_regex(exception_class, expected_regexp, callable, *args,
+ **kwargs)
+ assert_raises_regex(exception_class, expected_regexp)
+
+ Fail unless an exception of class exception_class and with message that
+ matches expected_regexp is thrown by callable when invoked with arguments
+ args and keyword arguments kwargs.
+
+ Alternatively, can be used as a context manager like `assert_raises`.
+
+ Name of this function adheres to Python 3.2+ reference, but should work in
+ all versions down to 2.6.
+
+ Notes
+ -----
+ .. versionadded:: 1.9.0
+
+ """
+ __tracebackhide__ = True # Hide traceback for py.test
+ nose = import_nose()
+
+ if sys.version_info.major >= 3:
+ funcname = nose.tools.assert_raises_regex
+ else:
+ # Only present in Python 2.7, missing from unittest in 2.6
+ funcname = nose.tools.assert_raises_regexp
+
+ return funcname(exception_class, expected_regexp, *args, **kwargs)
+
+
+def decorate_methods(cls, decorator, testmatch=None):
+ """
+ Apply a decorator to all methods in a class matching a regular expression.
+
+ The given decorator is applied to all public methods of `cls` that are
+ matched by the regular expression `testmatch`
+ (``testmatch.search(methodname)``). Methods that are private, i.e. start
+ with an underscore, are ignored.
+
+ Parameters
+ ----------
+ cls : class
+ Class whose methods to decorate.
+ decorator : function
+ Decorator to apply to methods
+ testmatch : compiled regexp or str, optional
+ The regular expression. Default value is None, in which case the
+ nose default (``re.compile(r'(?:^|[\\b_\\.%s-])[Tt]est' % os.sep)``)
+ is used.
+ If `testmatch` is a string, it is compiled to a regular expression
+ first.
+
+ """
+ if testmatch is None:
+ testmatch = re.compile(r'(?:^|[\\b_\\.%s-])[Tt]est' % os.sep)
+ else:
+ testmatch = re.compile(testmatch)
+ cls_attr = cls.__dict__
+
+ # delayed import to reduce startup time
+ from inspect import isfunction
+
+ methods = [_m for _m in cls_attr.values() if isfunction(_m)]
+ for function in methods:
+ try:
+ if hasattr(function, 'compat_func_name'):
+ funcname = function.compat_func_name
+ else:
+ funcname = function.__name__
+ except AttributeError:
+ # not a function
+ continue
+ if testmatch.search(funcname) and not funcname.startswith('_'):
+ setattr(cls, funcname, decorator(function))
+ return
+
+
+def measure(code_str,times=1,label=None):
+ """
+ Return elapsed time for executing code in the namespace of the caller.
+
+ The supplied code string is compiled with the Python builtin ``compile``.
+ The precision of the timing is 10 milli-seconds. If the code will execute
+ fast on this timescale, it can be executed many times to get reasonable
+ timing accuracy.
+
+ Parameters
+ ----------
+ code_str : str
+ The code to be timed.
+ times : int, optional
+ The number of times the code is executed. Default is 1. The code is
+ only compiled once.
+ label : str, optional
+ A label to identify `code_str` with. This is passed into ``compile``
+ as the second argument (for run-time error messages).
+
+ Returns
+ -------
+ elapsed : float
+ Total elapsed time in seconds for executing `code_str` `times` times.
+
+ Examples
+ --------
+ >>> etime = np.testing.measure('for i in range(1000): np.sqrt(i**2)',
+ ... times=times)
+ >>> print("Time for a single execution : ", etime / times, "s")
+ Time for a single execution : 0.005 s
+
+ """
+ frame = sys._getframe(1)
+ locs, globs = frame.f_locals, frame.f_globals
+
+ code = compile(code_str,
+ 'Test name: %s ' % label,
+ 'exec')
+ i = 0
+ elapsed = jiffies()
+ while i < times:
+ i += 1
+ exec(code, globs, locs)
+ elapsed = jiffies() - elapsed
+ return 0.01*elapsed
+
+
+def _assert_valid_refcount(op):
+ """
+ Check that ufuncs don't mishandle refcount of object `1`.
+ Used in a few regression tests.
+ """
+ if not HAS_REFCOUNT:
+ return True
+ import numpy as np
+
+ b = np.arange(100*100).reshape(100, 100)
+ c = b
+ i = 1
+
+ rc = sys.getrefcount(i)
+ for j in range(15):
+ d = op(b, c)
+ assert_(sys.getrefcount(i) >= rc)
+ del d # for pyflakes
+
+
+def assert_allclose(actual, desired, rtol=1e-7, atol=0, equal_nan=True,
+ err_msg='', verbose=True):
+ """
+ Raises an AssertionError if two objects are not equal up to desired
+ tolerance.
+
+ The test is equivalent to ``allclose(actual, desired, rtol, atol)``.
+ It compares the difference between `actual` and `desired` to
+ ``atol + rtol * abs(desired)``.
+
+ .. versionadded:: 1.5.0
+
+ Parameters
+ ----------
+ actual : array_like
+ Array obtained.
+ desired : array_like
+ Array desired.
+ rtol : float, optional
+ Relative tolerance.
+ atol : float, optional
+ Absolute tolerance.
+ equal_nan : bool, optional.
+ If True, NaNs will compare equal.
+ err_msg : str, optional
+ The error message to be printed in case of failure.
+ verbose : bool, optional
+ If True, the conflicting values are appended to the error message.
+
+ Raises
+ ------
+ AssertionError
+ If actual and desired are not equal up to specified precision.
+
+ See Also
+ --------
+ assert_array_almost_equal_nulp, assert_array_max_ulp
+
+ Examples
+ --------
+ >>> x = [1e-5, 1e-3, 1e-1]
+ >>> y = np.arccos(np.cos(x))
+ >>> assert_allclose(x, y, rtol=1e-5, atol=0)
+
+ """
+ __tracebackhide__ = True # Hide traceback for py.test
+ import numpy as np
+
+ def compare(x, y):
+ return np.core.numeric.isclose(x, y, rtol=rtol, atol=atol,
+ equal_nan=equal_nan)
+
+ actual, desired = np.asanyarray(actual), np.asanyarray(desired)
+ header = 'Not equal to tolerance rtol=%g, atol=%g' % (rtol, atol)
+ assert_array_compare(compare, actual, desired, err_msg=str(err_msg),
+ verbose=verbose, header=header, equal_nan=equal_nan)
+
+
+def assert_array_almost_equal_nulp(x, y, nulp=1):
+ """
+ Compare two arrays relatively to their spacing.
+
+ This is a relatively robust method to compare two arrays whose amplitude
+ is variable.
+
+ Parameters
+ ----------
+ x, y : array_like
+ Input arrays.
+ nulp : int, optional
+ The maximum number of unit in the last place for tolerance (see Notes).
+ Default is 1.
+
+ Returns
+ -------
+ None
+
+ Raises
+ ------
+ AssertionError
+ If the spacing between `x` and `y` for one or more elements is larger
+ than `nulp`.
+
+ See Also
+ --------
+ assert_array_max_ulp : Check that all items of arrays differ in at most
+ N Units in the Last Place.
+ spacing : Return the distance between x and the nearest adjacent number.
+
+ Notes
+ -----
+ An assertion is raised if the following condition is not met::
+
+ abs(x - y) <= nulps * spacing(maximum(abs(x), abs(y)))
+
+ Examples
+ --------
+ >>> x = np.array([1., 1e-10, 1e-20])
+ >>> eps = np.finfo(x.dtype).eps
+ >>> np.testing.assert_array_almost_equal_nulp(x, x*eps/2 + x)
+
+ >>> np.testing.assert_array_almost_equal_nulp(x, x*eps + x)
+ Traceback (most recent call last):
+ ...
+ AssertionError: X and Y are not equal to 1 ULP (max is 2)
+
+ """
+ __tracebackhide__ = True # Hide traceback for py.test
+ import numpy as np
+ ax = np.abs(x)
+ ay = np.abs(y)
+ ref = nulp * np.spacing(np.where(ax > ay, ax, ay))
+ if not np.all(np.abs(x-y) <= ref):
+ if np.iscomplexobj(x) or np.iscomplexobj(y):
+ msg = "X and Y are not equal to %d ULP" % nulp
+ else:
+ max_nulp = np.max(nulp_diff(x, y))
+ msg = "X and Y are not equal to %d ULP (max is %g)" % (nulp, max_nulp)
+ raise AssertionError(msg)
+
+
+def assert_array_max_ulp(a, b, maxulp=1, dtype=None):
+ """
+ Check that all items of arrays differ in at most N Units in the Last Place.
+
+ Parameters
+ ----------
+ a, b : array_like
+ Input arrays to be compared.
+ maxulp : int, optional
+ The maximum number of units in the last place that elements of `a` and
+ `b` can differ. Default is 1.
+ dtype : dtype, optional
+ Data-type to convert `a` and `b` to if given. Default is None.
+
+ Returns
+ -------
+ ret : ndarray
+ Array containing number of representable floating point numbers between
+ items in `a` and `b`.
+
+ Raises
+ ------
+ AssertionError
+ If one or more elements differ by more than `maxulp`.
+
+ See Also
+ --------
+ assert_array_almost_equal_nulp : Compare two arrays relatively to their
+ spacing.
+
+ Examples
+ --------
+ >>> a = np.linspace(0., 1., 100)
+ >>> res = np.testing.assert_array_max_ulp(a, np.arcsin(np.sin(a)))
+
+ """
+ __tracebackhide__ = True # Hide traceback for py.test
+ import numpy as np
+ ret = nulp_diff(a, b, dtype)
+ if not np.all(ret <= maxulp):
+ raise AssertionError("Arrays are not almost equal up to %g ULP" %
+ maxulp)
+ return ret
+
+
+def nulp_diff(x, y, dtype=None):
+ """For each item in x and y, return the number of representable floating
+ points between them.
+
+ Parameters
+ ----------
+ x : array_like
+ first input array
+ y : array_like
+ second input array
+ dtype : dtype, optional
+ Data-type to convert `x` and `y` to if given. Default is None.
+
+ Returns
+ -------
+ nulp : array_like
+ number of representable floating point numbers between each item in x
+ and y.
+
+ Examples
+ --------
+ # By definition, epsilon is the smallest number such as 1 + eps != 1, so
+ # there should be exactly one ULP between 1 and 1 + eps
+ >>> nulp_diff(1, 1 + np.finfo(x.dtype).eps)
+ 1.0
+ """
+ import numpy as np
+ if dtype:
+ x = np.array(x, dtype=dtype)
+ y = np.array(y, dtype=dtype)
+ else:
+ x = np.array(x)
+ y = np.array(y)
+
+ t = np.common_type(x, y)
+ if np.iscomplexobj(x) or np.iscomplexobj(y):
+ raise NotImplementedError("_nulp not implemented for complex array")
+
+ x = np.array(x, dtype=t)
+ y = np.array(y, dtype=t)
+
+ if not x.shape == y.shape:
+ raise ValueError("x and y do not have the same shape: %s - %s" %
+ (x.shape, y.shape))
+
+ def _diff(rx, ry, vdt):
+ diff = np.array(rx-ry, dtype=vdt)
+ return np.abs(diff)
+
+ rx = integer_repr(x)
+ ry = integer_repr(y)
+ return _diff(rx, ry, t)
+
+
+def _integer_repr(x, vdt, comp):
+ # Reinterpret binary representation of the float as sign-magnitude:
+ # take into account two-complement representation
+ # See also
+ # http://www.cygnus-software.com/papers/comparingfloats/comparingfloats.htm
+ rx = x.view(vdt)
+ if not (rx.size == 1):
+ rx[rx < 0] = comp - rx[rx < 0]
+ else:
+ if rx < 0:
+ rx = comp - rx
+
+ return rx
+
+
+def integer_repr(x):
+ """Return the signed-magnitude interpretation of the binary representation of
+ x."""
+ import numpy as np
+ if x.dtype == np.float32:
+ return _integer_repr(x, np.int32, np.int32(-2**31))
+ elif x.dtype == np.float64:
+ return _integer_repr(x, np.int64, np.int64(-2**63))
+ else:
+ raise ValueError("Unsupported dtype %s" % x.dtype)
+
+
+# The following two classes are copied from python 2.6 warnings module (context
+# manager)
+class WarningMessage(object):
+
+ """
+ Holds the result of a single showwarning() call.
+
+ Deprecated in 1.8.0
+
+ Notes
+ -----
+ `WarningMessage` is copied from the Python 2.6 warnings module,
+ so it can be used in NumPy with older Python versions.
+
+ """
+
+ _WARNING_DETAILS = ("message", "category", "filename", "lineno", "file",
+ "line")
+
+ def __init__(self, message, category, filename, lineno, file=None,
+ line=None):
+ local_values = locals()
+ for attr in self._WARNING_DETAILS:
+ setattr(self, attr, local_values[attr])
+ if category:
+ self._category_name = category.__name__
+ else:
+ self._category_name = None
+
+ def __str__(self):
+ return ("{message : %r, category : %r, filename : %r, lineno : %s, "
+ "line : %r}" % (self.message, self._category_name,
+ self.filename, self.lineno, self.line))
+
+
+class WarningManager(object):
+ """
+ A context manager that copies and restores the warnings filter upon
+ exiting the context.
+
+ The 'record' argument specifies whether warnings should be captured by a
+ custom implementation of ``warnings.showwarning()`` and be appended to a
+ list returned by the context manager. Otherwise None is returned by the
+ context manager. The objects appended to the list are arguments whose
+ attributes mirror the arguments to ``showwarning()``.
+
+ The 'module' argument is to specify an alternative module to the module
+ named 'warnings' and imported under that name. This argument is only useful
+ when testing the warnings module itself.
+
+ Deprecated in 1.8.0
+
+ Notes
+ -----
+ `WarningManager` is a copy of the ``catch_warnings`` context manager
+ from the Python 2.6 warnings module, with slight modifications.
+ It is copied so it can be used in NumPy with older Python versions.
+
+ """
+
+ def __init__(self, record=False, module=None):
+ self._record = record
+ if module is None:
+ self._module = sys.modules['warnings']
+ else:
+ self._module = module
+ self._entered = False
+
+ def __enter__(self):
+ if self._entered:
+ raise RuntimeError("Cannot enter %r twice" % self)
+ self._entered = True
+ self._filters = self._module.filters
+ self._module.filters = self._filters[:]
+ self._showwarning = self._module.showwarning
+ if self._record:
+ log = []
+
+ def showwarning(*args, **kwargs):
+ log.append(WarningMessage(*args, **kwargs))
+ self._module.showwarning = showwarning
+ return log
+ else:
+ return None
+
+ def __exit__(self):
+ if not self._entered:
+ raise RuntimeError("Cannot exit %r without entering first" % self)
+ self._module.filters = self._filters
+ self._module.showwarning = self._showwarning
+
+
+@contextlib.contextmanager
+def _assert_warns_context(warning_class, name=None):
+ __tracebackhide__ = True # Hide traceback for py.test
+ with suppress_warnings() as sup:
+ l = sup.record(warning_class)
+ yield
+ if not len(l) > 0:
+ name_str = " when calling %s" % name if name is not None else ""
+ raise AssertionError("No warning raised" + name_str)
+
+
+def assert_warns(warning_class, *args, **kwargs):
+ """
+ Fail unless the given callable throws the specified warning.
+
+ A warning of class warning_class should be thrown by the callable when
+ invoked with arguments args and keyword arguments kwargs.
+ If a different type of warning is thrown, it will not be caught.
+
+ If called with all arguments other than the warning class omitted, may be
+ used as a context manager:
+
+ with assert_warns(SomeWarning):
+ do_something()
+
+ The ability to be used as a context manager is new in NumPy v1.11.0.
+
+ .. versionadded:: 1.4.0
+
+ Parameters
+ ----------
+ warning_class : class
+ The class defining the warning that `func` is expected to throw.
+ func : callable
+ The callable to test.
+ \\*args : Arguments
+ Arguments passed to `func`.
+ \\*\\*kwargs : Kwargs
+ Keyword arguments passed to `func`.
+
+ Returns
+ -------
+ The value returned by `func`.
+
+ """
+ if not args:
+ return _assert_warns_context(warning_class)
+
+ func = args[0]
+ args = args[1:]
+ with _assert_warns_context(warning_class, name=func.__name__):
+ return func(*args, **kwargs)
+
+
+@contextlib.contextmanager
+def _assert_no_warnings_context(name=None):
+ __tracebackhide__ = True # Hide traceback for py.test
+ with warnings.catch_warnings(record=True) as l:
+ warnings.simplefilter('always')
+ yield
+ if len(l) > 0:
+ name_str = " when calling %s" % name if name is not None else ""
+ raise AssertionError("Got warnings%s: %s" % (name_str, l))
+
+
+def assert_no_warnings(*args, **kwargs):
+ """
+ Fail if the given callable produces any warnings.
+
+ If called with all arguments omitted, may be used as a context manager:
+
+ with assert_no_warnings():
+ do_something()
+
+ The ability to be used as a context manager is new in NumPy v1.11.0.
+
+ .. versionadded:: 1.7.0
+
+ Parameters
+ ----------
+ func : callable
+ The callable to test.
+ \\*args : Arguments
+ Arguments passed to `func`.
+ \\*\\*kwargs : Kwargs
+ Keyword arguments passed to `func`.
+
+ Returns
+ -------
+ The value returned by `func`.
+
+ """
+ if not args:
+ return _assert_no_warnings_context()
+
+ func = args[0]
+ args = args[1:]
+ with _assert_no_warnings_context(name=func.__name__):
+ return func(*args, **kwargs)
+
+
+def _gen_alignment_data(dtype=float32, type='binary', max_size=24):
+ """
+ generator producing data with different alignment and offsets
+ to test simd vectorization
+
+ Parameters
+ ----------
+ dtype : dtype
+ data type to produce
+ type : string
+ 'unary': create data for unary operations, creates one input
+ and output array
+ 'binary': create data for unary operations, creates two input
+ and output array
+ max_size : integer
+ maximum size of data to produce
+
+ Returns
+ -------
+ if type is 'unary' yields one output, one input array and a message
+ containing information on the data
+ if type is 'binary' yields one output array, two input array and a message
+ containing information on the data
+
+ """
+ ufmt = 'unary offset=(%d, %d), size=%d, dtype=%r, %s'
+ bfmt = 'binary offset=(%d, %d, %d), size=%d, dtype=%r, %s'
+ for o in range(3):
+ for s in range(o + 2, max(o + 3, max_size)):
+ if type == 'unary':
+ inp = lambda: arange(s, dtype=dtype)[o:]
+ out = empty((s,), dtype=dtype)[o:]
+ yield out, inp(), ufmt % (o, o, s, dtype, 'out of place')
+ d = inp()
+ yield d, d, ufmt % (o, o, s, dtype, 'in place')
+ yield out[1:], inp()[:-1], ufmt % \
+ (o + 1, o, s - 1, dtype, 'out of place')
+ yield out[:-1], inp()[1:], ufmt % \
+ (o, o + 1, s - 1, dtype, 'out of place')
+ yield inp()[:-1], inp()[1:], ufmt % \
+ (o, o + 1, s - 1, dtype, 'aliased')
+ yield inp()[1:], inp()[:-1], ufmt % \
+ (o + 1, o, s - 1, dtype, 'aliased')
+ if type == 'binary':
+ inp1 = lambda: arange(s, dtype=dtype)[o:]
+ inp2 = lambda: arange(s, dtype=dtype)[o:]
+ out = empty((s,), dtype=dtype)[o:]
+ yield out, inp1(), inp2(), bfmt % \
+ (o, o, o, s, dtype, 'out of place')
+ d = inp1()
+ yield d, d, inp2(), bfmt % \
+ (o, o, o, s, dtype, 'in place1')
+ d = inp2()
+ yield d, inp1(), d, bfmt % \
+ (o, o, o, s, dtype, 'in place2')
+ yield out[1:], inp1()[:-1], inp2()[:-1], bfmt % \
+ (o + 1, o, o, s - 1, dtype, 'out of place')
+ yield out[:-1], inp1()[1:], inp2()[:-1], bfmt % \
+ (o, o + 1, o, s - 1, dtype, 'out of place')
+ yield out[:-1], inp1()[:-1], inp2()[1:], bfmt % \
+ (o, o, o + 1, s - 1, dtype, 'out of place')
+ yield inp1()[1:], inp1()[:-1], inp2()[:-1], bfmt % \
+ (o + 1, o, o, s - 1, dtype, 'aliased')
+ yield inp1()[:-1], inp1()[1:], inp2()[:-1], bfmt % \
+ (o, o + 1, o, s - 1, dtype, 'aliased')
+ yield inp1()[:-1], inp1()[:-1], inp2()[1:], bfmt % \
+ (o, o, o + 1, s - 1, dtype, 'aliased')
+
+
+class IgnoreException(Exception):
+ "Ignoring this exception due to disabled feature"
+
+
+@contextlib.contextmanager
+def tempdir(*args, **kwargs):
+ """Context manager to provide a temporary test folder.
+
+ All arguments are passed as this to the underlying tempfile.mkdtemp
+ function.
+
+ """
+ tmpdir = mkdtemp(*args, **kwargs)
+ try:
+ yield tmpdir
+ finally:
+ shutil.rmtree(tmpdir)
+
+
+@contextlib.contextmanager
+def temppath(*args, **kwargs):
+ """Context manager for temporary files.
+
+ Context manager that returns the path to a closed temporary file. Its
+ parameters are the same as for tempfile.mkstemp and are passed directly
+ to that function. The underlying file is removed when the context is
+ exited, so it should be closed at that time.
+
+ Windows does not allow a temporary file to be opened if it is already
+ open, so the underlying file must be closed after opening before it
+ can be opened again.
+
+ """
+ fd, path = mkstemp(*args, **kwargs)
+ os.close(fd)
+ try:
+ yield path
+ finally:
+ os.remove(path)
+
+
+class clear_and_catch_warnings(warnings.catch_warnings):
+ """ Context manager that resets warning registry for catching warnings
+
+ Warnings can be slippery, because, whenever a warning is triggered, Python
+ adds a ``__warningregistry__`` member to the *calling* module. This makes
+ it impossible to retrigger the warning in this module, whatever you put in
+ the warnings filters. This context manager accepts a sequence of `modules`
+ as a keyword argument to its constructor and:
+
+ * stores and removes any ``__warningregistry__`` entries in given `modules`
+ on entry;
+ * resets ``__warningregistry__`` to its previous state on exit.
+
+ This makes it possible to trigger any warning afresh inside the context
+ manager without disturbing the state of warnings outside.
+
+ For compatibility with Python 3.0, please consider all arguments to be
+ keyword-only.
+
+ Parameters
+ ----------
+ record : bool, optional
+ Specifies whether warnings should be captured by a custom
+ implementation of ``warnings.showwarning()`` and be appended to a list
+ returned by the context manager. Otherwise None is returned by the
+ context manager. The objects appended to the list are arguments whose
+ attributes mirror the arguments to ``showwarning()``.
+ modules : sequence, optional
+ Sequence of modules for which to reset warnings registry on entry and
+ restore on exit. To work correctly, all 'ignore' filters should
+ filter by one of these modules.
+
+ Examples
+ --------
+ >>> import warnings
+ >>> with clear_and_catch_warnings(modules=[np.core.fromnumeric]):
+ ... warnings.simplefilter('always')
+ ... warnings.filterwarnings('ignore', module='np.core.fromnumeric')
+ ... # do something that raises a warning but ignore those in
+ ... # np.core.fromnumeric
+ """
+ class_modules = ()
+
+ def __init__(self, record=False, modules=()):
+ self.modules = set(modules).union(self.class_modules)
+ self._warnreg_copies = {}
+ super(clear_and_catch_warnings, self).__init__(record=record)
+
+ def __enter__(self):
+ for mod in self.modules:
+ if hasattr(mod, '__warningregistry__'):
+ mod_reg = mod.__warningregistry__
+ self._warnreg_copies[mod] = mod_reg.copy()
+ mod_reg.clear()
+ return super(clear_and_catch_warnings, self).__enter__()
+
+ def __exit__(self, *exc_info):
+ super(clear_and_catch_warnings, self).__exit__(*exc_info)
+ for mod in self.modules:
+ if hasattr(mod, '__warningregistry__'):
+ mod.__warningregistry__.clear()
+ if mod in self._warnreg_copies:
+ mod.__warningregistry__.update(self._warnreg_copies[mod])
+
+
+class suppress_warnings(object):
+ """
+ Context manager and decorator doing much the same as
+ ``warnings.catch_warnings``.
+
+ However, it also provides a filter mechanism to work around
+ http://bugs.python.org/issue4180.
+
+ This bug causes Python before 3.4 to not reliably show warnings again
+ after they have been ignored once (even within catch_warnings). It
+ means that no "ignore" filter can be used easily, since following
+ tests might need to see the warning. Additionally it allows easier
+ specificity for testing warnings and can be nested.
+
+ Parameters
+ ----------
+ forwarding_rule : str, optional
+ One of "always", "once", "module", or "location". Analogous to
+ the usual warnings module filter mode, it is useful to reduce
+ noise mostly on the outmost level. Unsuppressed and unrecorded
+ warnings will be forwarded based on this rule. Defaults to "always".
+ "location" is equivalent to the warnings "default", match by exact
+ location the warning warning originated from.
+
+ Notes
+ -----
+ Filters added inside the context manager will be discarded again
+ when leaving it. Upon entering all filters defined outside a
+ context will be applied automatically.
+
+ When a recording filter is added, matching warnings are stored in the
+ ``log`` attribute as well as in the list returned by ``record``.
+
+ If filters are added and the ``module`` keyword is given, the
+ warning registry of this module will additionally be cleared when
+ applying it, entering the context, or exiting it. This could cause
+ warnings to appear a second time after leaving the context if they
+ were configured to be printed once (default) and were already
+ printed before the context was entered.
+
+ Nesting this context manager will work as expected when the
+ forwarding rule is "always" (default). Unfiltered and unrecorded
+ warnings will be passed out and be matched by the outer level.
+ On the outmost level they will be printed (or caught by another
+ warnings context). The forwarding rule argument can modify this
+ behaviour.
+
+ Like ``catch_warnings`` this context manager is not threadsafe.
+
+ Examples
+ --------
+ >>> with suppress_warnings() as sup:
+ ... sup.filter(DeprecationWarning, "Some text")
+ ... sup.filter(module=np.ma.core)
+ ... log = sup.record(FutureWarning, "Does this occur?")
+ ... command_giving_warnings()
+ ... # The FutureWarning was given once, the filtered warnings were
+ ... # ignored. All other warnings abide outside settings (may be
+ ... # printed/error)
+ ... assert_(len(log) == 1)
+ ... assert_(len(sup.log) == 1) # also stored in log attribute
+
+ Or as a decorator:
+
+ >>> sup = suppress_warnings()
+ >>> sup.filter(module=np.ma.core) # module must match exact
+ >>> @sup
+ >>> def some_function():
+ ... # do something which causes a warning in np.ma.core
+ ... pass
+ """
+ def __init__(self, forwarding_rule="always"):
+ self._entered = False
+
+ # Suppressions are either instance or defined inside one with block:
+ self._suppressions = []
+
+ if forwarding_rule not in {"always", "module", "once", "location"}:
+ raise ValueError("unsupported forwarding rule.")
+ self._forwarding_rule = forwarding_rule
+
+ def _clear_registries(self):
+ if hasattr(warnings, "_filters_mutated"):
+ # clearing the registry should not be necessary on new pythons,
+ # instead the filters should be mutated.
+ warnings._filters_mutated()
+ return
+ # Simply clear the registry, this should normally be harmless,
+ # note that on new pythons it would be invalidated anyway.
+ for module in self._tmp_modules:
+ if hasattr(module, "__warningregistry__"):
+ module.__warningregistry__.clear()
+
+ def _filter(self, category=Warning, message="", module=None, record=False):
+ if record:
+ record = [] # The log where to store warnings
+ else:
+ record = None
+ if self._entered:
+ if module is None:
+ warnings.filterwarnings(
+ "always", category=category, message=message)
+ else:
+ module_regex = module.__name__.replace('.', r'\.') + '$'
+ warnings.filterwarnings(
+ "always", category=category, message=message,
+ module=module_regex)
+ self._tmp_modules.add(module)
+ self._clear_registries()
+
+ self._tmp_suppressions.append(
+ (category, message, re.compile(message, re.I), module, record))
+ else:
+ self._suppressions.append(
+ (category, message, re.compile(message, re.I), module, record))
+
+ return record
+
+ def filter(self, category=Warning, message="", module=None):
+ """
+ Add a new suppressing filter or apply it if the state is entered.
+
+ Parameters
+ ----------
+ category : class, optional
+ Warning class to filter
+ message : string, optional
+ Regular expression matching the warning message.
+ module : module, optional
+ Module to filter for. Note that the module (and its file)
+ must match exactly and cannot be a submodule. This may make
+ it unreliable for external modules.
+
+ Notes
+ -----
+ When added within a context, filters are only added inside
+ the context and will be forgotten when the context is exited.
+ """
+ self._filter(category=category, message=message, module=module,
+ record=False)
+
+ def record(self, category=Warning, message="", module=None):
+ """
+ Append a new recording filter or apply it if the state is entered.
+
+ All warnings matching will be appended to the ``log`` attribute.
+
+ Parameters
+ ----------
+ category : class, optional
+ Warning class to filter
+ message : string, optional
+ Regular expression matching the warning message.
+ module : module, optional
+ Module to filter for. Note that the module (and its file)
+ must match exactly and cannot be a submodule. This may make
+ it unreliable for external modules.
+
+ Returns
+ -------
+ log : list
+ A list which will be filled with all matched warnings.
+
+ Notes
+ -----
+ When added within a context, filters are only added inside
+ the context and will be forgotten when the context is exited.
+ """
+ return self._filter(category=category, message=message, module=module,
+ record=True)
+
+ def __enter__(self):
+ if self._entered:
+ raise RuntimeError("cannot enter suppress_warnings twice.")
+
+ self._orig_show = warnings.showwarning
+ self._filters = warnings.filters
+ warnings.filters = self._filters[:]
+
+ self._entered = True
+ self._tmp_suppressions = []
+ self._tmp_modules = set()
+ self._forwarded = set()
+
+ self.log = [] # reset global log (no need to keep same list)
+
+ for cat, mess, _, mod, log in self._suppressions:
+ if log is not None:
+ del log[:] # clear the log
+ if mod is None:
+ warnings.filterwarnings(
+ "always", category=cat, message=mess)
+ else:
+ module_regex = mod.__name__.replace('.', r'\.') + '$'
+ warnings.filterwarnings(
+ "always", category=cat, message=mess,
+ module=module_regex)
+ self._tmp_modules.add(mod)
+ warnings.showwarning = self._showwarning
+ self._clear_registries()
+
+ return self
+
+ def __exit__(self, *exc_info):
+ warnings.showwarning = self._orig_show
+ warnings.filters = self._filters
+ self._clear_registries()
+ self._entered = False
+ del self._orig_show
+ del self._filters
+
+ def _showwarning(self, message, category, filename, lineno,
+ *args, **kwargs):
+ use_warnmsg = kwargs.pop("use_warnmsg", None)
+ for cat, _, pattern, mod, rec in (
+ self._suppressions + self._tmp_suppressions)[::-1]:
+ if (issubclass(category, cat) and
+ pattern.match(message.args[0]) is not None):
+ if mod is None:
+ # Message and category match, either recorded or ignored
+ if rec is not None:
+ msg = WarningMessage(message, category, filename,
+ lineno, **kwargs)
+ self.log.append(msg)
+ rec.append(msg)
+ return
+ # Use startswith, because warnings strips the c or o from
+ # .pyc/.pyo files.
+ elif mod.__file__.startswith(filename):
+ # The message and module (filename) match
+ if rec is not None:
+ msg = WarningMessage(message, category, filename,
+ lineno, **kwargs)
+ self.log.append(msg)
+ rec.append(msg)
+ return
+
+ # There is no filter in place, so pass to the outside handler
+ # unless we should only pass it once
+ if self._forwarding_rule == "always":
+ if use_warnmsg is None:
+ self._orig_show(message, category, filename, lineno,
+ *args, **kwargs)
+ else:
+ self._orig_showmsg(use_warnmsg)
+ return
+
+ if self._forwarding_rule == "once":
+ signature = (message.args, category)
+ elif self._forwarding_rule == "module":
+ signature = (message.args, category, filename)
+ elif self._forwarding_rule == "location":
+ signature = (message.args, category, filename, lineno)
+
+ if signature in self._forwarded:
+ return
+ self._forwarded.add(signature)
+ if use_warnmsg is None:
+ self._orig_show(message, category, filename, lineno, *args,
+ **kwargs)
+ else:
+ self._orig_showmsg(use_warnmsg)
+
+ def __call__(self, func):
+ """
+ Function decorator to apply certain suppressions to a whole
+ function.
+ """
+ @wraps(func)
+ def new_func(*args, **kwargs):
+ with self:
+ return func(*args, **kwargs)
+
+ return new_func
diff --git a/numpy/testing/noseclasses.py b/numpy/testing/noseclasses.py
index ee9d1b4df..563ed14ea 100644
--- a/numpy/testing/noseclasses.py
+++ b/numpy/testing/noseclasses.py
@@ -1,340 +1,6 @@
-# These classes implement a doctest runner plugin for nose, a "known failure"
-# error class, and a customized TestProgram for NumPy.
+"""
+Back compatibility noseclasses module. It will import the appropriate
+set of tools
-# Because this module imports nose directly, it should not
-# be used except by nosetester.py to avoid a general NumPy
-# dependency on nose.
-from __future__ import division, absolute_import, print_function
-
-import os
-import doctest
-import inspect
-
-import nose
-from nose.plugins import doctests as npd
-from nose.plugins.errorclass import ErrorClass, ErrorClassPlugin
-from nose.plugins.base import Plugin
-from nose.util import src
-import numpy
-from .nosetester import get_package_name
-from .utils import KnownFailureException, KnownFailureTest
-
-
-# Some of the classes in this module begin with 'Numpy' to clearly distinguish
-# them from the plethora of very similar names from nose/unittest/doctest
-
-#-----------------------------------------------------------------------------
-# Modified version of the one in the stdlib, that fixes a python bug (doctests
-# not found in extension modules, http://bugs.python.org/issue3158)
-class NumpyDocTestFinder(doctest.DocTestFinder):
-
- def _from_module(self, module, object):
- """
- Return true if the given object is defined in the given
- module.
- """
- if module is None:
- return True
- elif inspect.isfunction(object):
- return module.__dict__ is object.__globals__
- elif inspect.isbuiltin(object):
- return module.__name__ == object.__module__
- elif inspect.isclass(object):
- return module.__name__ == object.__module__
- elif inspect.ismethod(object):
- # This one may be a bug in cython that fails to correctly set the
- # __module__ attribute of methods, but since the same error is easy
- # to make by extension code writers, having this safety in place
- # isn't such a bad idea
- return module.__name__ == object.__self__.__class__.__module__
- elif inspect.getmodule(object) is not None:
- return module is inspect.getmodule(object)
- elif hasattr(object, '__module__'):
- return module.__name__ == object.__module__
- elif isinstance(object, property):
- return True # [XX] no way not be sure.
- else:
- raise ValueError("object must be a class or function")
-
- def _find(self, tests, obj, name, module, source_lines, globs, seen):
- """
- Find tests for the given object and any contained objects, and
- add them to `tests`.
- """
-
- doctest.DocTestFinder._find(self, tests, obj, name, module,
- source_lines, globs, seen)
-
- # Below we re-run pieces of the above method with manual modifications,
- # because the original code is buggy and fails to correctly identify
- # doctests in extension modules.
-
- # Local shorthands
- from inspect import (
- isroutine, isclass, ismodule, isfunction, ismethod
- )
-
- # Look for tests in a module's contained objects.
- if ismodule(obj) and self._recurse:
- for valname, val in obj.__dict__.items():
- valname1 = '%s.%s' % (name, valname)
- if ( (isroutine(val) or isclass(val))
- and self._from_module(module, val)):
-
- self._find(tests, val, valname1, module, source_lines,
- globs, seen)
-
- # Look for tests in a class's contained objects.
- if isclass(obj) and self._recurse:
- for valname, val in obj.__dict__.items():
- # Special handling for staticmethod/classmethod.
- if isinstance(val, staticmethod):
- val = getattr(obj, valname)
- if isinstance(val, classmethod):
- val = getattr(obj, valname).__func__
-
- # Recurse to methods, properties, and nested classes.
- if ((isfunction(val) or isclass(val) or
- ismethod(val) or isinstance(val, property)) and
- self._from_module(module, val)):
- valname = '%s.%s' % (name, valname)
- self._find(tests, val, valname, module, source_lines,
- globs, seen)
-
-
-# second-chance checker; if the default comparison doesn't
-# pass, then see if the expected output string contains flags that
-# tell us to ignore the output
-class NumpyOutputChecker(doctest.OutputChecker):
- def check_output(self, want, got, optionflags):
- ret = doctest.OutputChecker.check_output(self, want, got,
- optionflags)
- if not ret:
- if "#random" in want:
- return True
-
- # it would be useful to normalize endianness so that
- # bigendian machines don't fail all the tests (and there are
- # actually some bigendian examples in the doctests). Let's try
- # making them all little endian
- got = got.replace("'>", "'<")
- want = want.replace("'>", "'<")
-
- # try to normalize out 32 and 64 bit default int sizes
- for sz in [4, 8]:
- got = got.replace("'<i%d'" % sz, "int")
- want = want.replace("'<i%d'" % sz, "int")
-
- ret = doctest.OutputChecker.check_output(self, want,
- got, optionflags)
-
- return ret
-
-
-# Subclass nose.plugins.doctests.DocTestCase to work around a bug in
-# its constructor that blocks non-default arguments from being passed
-# down into doctest.DocTestCase
-class NumpyDocTestCase(npd.DocTestCase):
- def __init__(self, test, optionflags=0, setUp=None, tearDown=None,
- checker=None, obj=None, result_var='_'):
- self._result_var = result_var
- self._nose_obj = obj
- doctest.DocTestCase.__init__(self, test,
- optionflags=optionflags,
- setUp=setUp, tearDown=tearDown,
- checker=checker)
-
-
-print_state = numpy.get_printoptions()
-
-class NumpyDoctest(npd.Doctest):
- name = 'numpydoctest' # call nosetests with --with-numpydoctest
- score = 1000 # load late, after doctest builtin
-
- # always use whitespace and ellipsis options for doctests
- doctest_optflags = doctest.NORMALIZE_WHITESPACE | doctest.ELLIPSIS
-
- # files that should be ignored for doctests
- doctest_ignore = ['generate_numpy_api.py',
- 'setup.py']
-
- # Custom classes; class variables to allow subclassing
- doctest_case_class = NumpyDocTestCase
- out_check_class = NumpyOutputChecker
- test_finder_class = NumpyDocTestFinder
-
- # Don't use the standard doctest option handler; hard-code the option values
- def options(self, parser, env=os.environ):
- Plugin.options(self, parser, env)
- # Test doctests in 'test' files / directories. Standard plugin default
- # is False
- self.doctest_tests = True
- # Variable name; if defined, doctest results stored in this variable in
- # the top-level namespace. None is the standard default
- self.doctest_result_var = None
-
- def configure(self, options, config):
- # parent method sets enabled flag from command line --with-numpydoctest
- Plugin.configure(self, options, config)
- self.finder = self.test_finder_class()
- self.parser = doctest.DocTestParser()
- if self.enabled:
- # Pull standard doctest out of plugin list; there's no reason to run
- # both. In practice the Unplugger plugin above would cover us when
- # run from a standard numpy.test() call; this is just in case
- # someone wants to run our plugin outside the numpy.test() machinery
- config.plugins.plugins = [p for p in config.plugins.plugins
- if p.name != 'doctest']
-
- def set_test_context(self, test):
- """ Configure `test` object to set test context
-
- We set the numpy / scipy standard doctest namespace
-
- Parameters
- ----------
- test : test object
- with ``globs`` dictionary defining namespace
-
- Returns
- -------
- None
-
- Notes
- -----
- `test` object modified in place
- """
- # set the namespace for tests
- pkg_name = get_package_name(os.path.dirname(test.filename))
-
- # Each doctest should execute in an environment equivalent to
- # starting Python and executing "import numpy as np", and,
- # for SciPy packages, an additional import of the local
- # package (so that scipy.linalg.basic.py's doctests have an
- # implicit "from scipy import linalg" as well.
- #
- # Note: __file__ allows the doctest in NoseTester to run
- # without producing an error
- test.globs = {'__builtins__':__builtins__,
- '__file__':'__main__',
- '__name__':'__main__',
- 'np':numpy}
- # add appropriate scipy import for SciPy tests
- if 'scipy' in pkg_name:
- p = pkg_name.split('.')
- p2 = p[-1]
- test.globs[p2] = __import__(pkg_name, test.globs, {}, [p2])
-
- # Override test loading to customize test context (with set_test_context
- # method), set standard docstring options, and install our own test output
- # checker
- def loadTestsFromModule(self, module):
- if not self.matches(module.__name__):
- npd.log.debug("Doctest doesn't want module %s", module)
- return
- try:
- tests = self.finder.find(module)
- except AttributeError:
- # nose allows module.__test__ = False; doctest does not and
- # throws AttributeError
- return
- if not tests:
- return
- tests.sort()
- module_file = src(module.__file__)
- for test in tests:
- if not test.examples:
- continue
- if not test.filename:
- test.filename = module_file
- # Set test namespace; test altered in place
- self.set_test_context(test)
- yield self.doctest_case_class(test,
- optionflags=self.doctest_optflags,
- checker=self.out_check_class(),
- result_var=self.doctest_result_var)
-
- # Add an afterContext method to nose.plugins.doctests.Doctest in order
- # to restore print options to the original state after each doctest
- def afterContext(self):
- numpy.set_printoptions(**print_state)
-
- # Ignore NumPy-specific build files that shouldn't be searched for tests
- def wantFile(self, file):
- bn = os.path.basename(file)
- if bn in self.doctest_ignore:
- return False
- return npd.Doctest.wantFile(self, file)
-
-
-class Unplugger(object):
- """ Nose plugin to remove named plugin late in loading
-
- By default it removes the "doctest" plugin.
- """
- name = 'unplugger'
- enabled = True # always enabled
- score = 4000 # load late in order to be after builtins
-
- def __init__(self, to_unplug='doctest'):
- self.to_unplug = to_unplug
-
- def options(self, parser, env):
- pass
-
- def configure(self, options, config):
- # Pull named plugin out of plugins list
- config.plugins.plugins = [p for p in config.plugins.plugins
- if p.name != self.to_unplug]
-
-
-class KnownFailurePlugin(ErrorClassPlugin):
- '''Plugin that installs a KNOWNFAIL error class for the
- KnownFailureClass exception. When KnownFailure is raised,
- the exception will be logged in the knownfail attribute of the
- result, 'K' or 'KNOWNFAIL' (verbose) will be output, and the
- exception will not be counted as an error or failure.'''
- enabled = True
- knownfail = ErrorClass(KnownFailureException,
- label='KNOWNFAIL',
- isfailure=False)
-
- def options(self, parser, env=os.environ):
- env_opt = 'NOSE_WITHOUT_KNOWNFAIL'
- parser.add_option('--no-knownfail', action='store_true',
- dest='noKnownFail', default=env.get(env_opt, False),
- help='Disable special handling of KnownFailure '
- 'exceptions')
-
- def configure(self, options, conf):
- if not self.can_configure:
- return
- self.conf = conf
- disable = getattr(options, 'noKnownFail', False)
- if disable:
- self.enabled = False
-
-KnownFailure = KnownFailurePlugin # backwards compat
-
-
-# Class allows us to save the results of the tests in runTests - see runTests
-# method docstring for details
-class NumpyTestProgram(nose.core.TestProgram):
- def runTests(self):
- """Run Tests. Returns true on success, false on failure, and
- sets self.success to the same value.
-
- Because nose currently discards the test result object, but we need
- to return it to the user, override TestProgram.runTests to retain
- the result
- """
- if self.testRunner is None:
- self.testRunner = nose.core.TextTestRunner(stream=self.config.stream,
- verbosity=self.config.verbosity,
- config=self.config)
- plug_runner = self.config.plugins.prepareTestRunner(self.testRunner)
- if plug_runner is not None:
- self.testRunner = plug_runner
- self.result = self.testRunner.run(self.test)
- self.success = self.result.wasSuccessful()
- return self.success
+"""
+from .nose_tools.noseclasses import *
diff --git a/numpy/testing/nosetester.py b/numpy/testing/nosetester.py
index 3d9616ed8..b726684c9 100644
--- a/numpy/testing/nosetester.py
+++ b/numpy/testing/nosetester.py
@@ -1,523 +1,10 @@
"""
-Nose test running.
-
-This module implements ``test()`` and ``bench()`` functions for NumPy modules.
+Back compatibility nosetester module. It will import the appropriate
+set of tools
"""
-from __future__ import division, absolute_import, print_function
-
-import os
-import sys
-import warnings
-from numpy.compat import basestring
-import numpy as np
-
-from .utils import import_nose, suppress_warnings
-
-
-def get_package_name(filepath):
- """
- Given a path where a package is installed, determine its name.
-
- Parameters
- ----------
- filepath : str
- Path to a file. If the determination fails, "numpy" is returned.
-
- Examples
- --------
- >>> np.testing.nosetester.get_package_name('nonsense')
- 'numpy'
-
- """
-
- fullpath = filepath[:]
- pkg_name = []
- while 'site-packages' in filepath or 'dist-packages' in filepath:
- filepath, p2 = os.path.split(filepath)
- if p2 in ('site-packages', 'dist-packages'):
- break
- pkg_name.append(p2)
-
- # if package name determination failed, just default to numpy/scipy
- if not pkg_name:
- if 'scipy' in fullpath:
- return 'scipy'
- else:
- return 'numpy'
-
- # otherwise, reverse to get correct order and return
- pkg_name.reverse()
-
- # don't include the outer egg directory
- if pkg_name[0].endswith('.egg'):
- pkg_name.pop(0)
-
- return '.'.join(pkg_name)
-
-
-def run_module_suite(file_to_run=None, argv=None):
- """
- Run a test module.
-
- Equivalent to calling ``$ nosetests <argv> <file_to_run>`` from
- the command line
-
- Parameters
- ----------
- file_to_run : str, optional
- Path to test module, or None.
- By default, run the module from which this function is called.
- argv : list of strings
- Arguments to be passed to the nose test runner. ``argv[0]`` is
- ignored. All command line arguments accepted by ``nosetests``
- will work. If it is the default value None, sys.argv is used.
-
- .. versionadded:: 1.9.0
-
- Examples
- --------
- Adding the following::
-
- if __name__ == "__main__" :
- run_module_suite(argv=sys.argv)
-
- at the end of a test module will run the tests when that module is
- called in the python interpreter.
-
- Alternatively, calling::
-
- >>> run_module_suite(file_to_run="numpy/tests/test_matlib.py")
-
- from an interpreter will run all the test routine in 'test_matlib.py'.
- """
- if file_to_run is None:
- f = sys._getframe(1)
- file_to_run = f.f_locals.get('__file__', None)
- if file_to_run is None:
- raise AssertionError
-
- if argv is None:
- argv = sys.argv + [file_to_run]
- else:
- argv = argv + [file_to_run]
-
- nose = import_nose()
- from .noseclasses import KnownFailurePlugin
- nose.run(argv=argv, addplugins=[KnownFailurePlugin()])
-
-
-class NoseTester(object):
- """
- Nose test runner.
-
- This class is made available as numpy.testing.Tester, and a test function
- is typically added to a package's __init__.py like so::
-
- from numpy.testing import Tester
- test = Tester().test
-
- Calling this test function finds and runs all tests associated with the
- package and all its sub-packages.
-
- Attributes
- ----------
- package_path : str
- Full path to the package to test.
- package_name : str
- Name of the package to test.
-
- Parameters
- ----------
- package : module, str or None, optional
- The package to test. If a string, this should be the full path to
- the package. If None (default), `package` is set to the module from
- which `NoseTester` is initialized.
- raise_warnings : None, str or sequence of warnings, optional
- This specifies which warnings to configure as 'raise' instead
- of being shown once during the test execution. Valid strings are:
-
- - "develop" : equals ``(Warning,)``
- - "release" : equals ``()``, don't raise on any warnings.
-
- Default is "release".
- depth : int, optional
- If `package` is None, then this can be used to initialize from the
- module of the caller of (the caller of (...)) the code that
- initializes `NoseTester`. Default of 0 means the module of the
- immediate caller; higher values are useful for utility routines that
- want to initialize `NoseTester` objects on behalf of other code.
-
- """
- def __init__(self, package=None, raise_warnings="release", depth=0):
- # Back-compat: 'None' used to mean either "release" or "develop"
- # depending on whether this was a release or develop version of
- # numpy. Those semantics were fine for testing numpy, but not so
- # helpful for downstream projects like scipy that use
- # numpy.testing. (They want to set this based on whether *they* are a
- # release or develop version, not whether numpy is.) So we continue to
- # accept 'None' for back-compat, but it's now just an alias for the
- # default "release".
- if raise_warnings is None:
- raise_warnings = "release"
-
- package_name = None
- if package is None:
- f = sys._getframe(1 + depth)
- package_path = f.f_locals.get('__file__', None)
- if package_path is None:
- raise AssertionError
- package_path = os.path.dirname(package_path)
- package_name = f.f_locals.get('__name__', None)
- elif isinstance(package, type(os)):
- package_path = os.path.dirname(package.__file__)
- package_name = getattr(package, '__name__', None)
- else:
- package_path = str(package)
-
- self.package_path = package_path
-
- # Find the package name under test; this name is used to limit coverage
- # reporting (if enabled).
- if package_name is None:
- package_name = get_package_name(package_path)
- self.package_name = package_name
-
- # Set to "release" in constructor in maintenance branches.
- self.raise_warnings = raise_warnings
-
- def _test_argv(self, label, verbose, extra_argv):
- ''' Generate argv for nosetest command
-
- Parameters
- ----------
- label : {'fast', 'full', '', attribute identifier}, optional
- see ``test`` docstring
- verbose : int, optional
- Verbosity value for test outputs, in the range 1-10. Default is 1.
- extra_argv : list, optional
- List with any extra arguments to pass to nosetests.
-
- Returns
- -------
- argv : list
- command line arguments that will be passed to nose
- '''
- argv = [__file__, self.package_path, '-s']
- if label and label != 'full':
- if not isinstance(label, basestring):
- raise TypeError('Selection label should be a string')
- if label == 'fast':
- label = 'not slow'
- argv += ['-A', label]
- argv += ['--verbosity', str(verbose)]
-
- # When installing with setuptools, and also in some other cases, the
- # test_*.py files end up marked +x executable. Nose, by default, does
- # not run files marked with +x as they might be scripts. However, in
- # our case nose only looks for test_*.py files under the package
- # directory, which should be safe.
- argv += ['--exe']
-
- if extra_argv:
- argv += extra_argv
- return argv
-
- def _show_system_info(self):
- nose = import_nose()
-
- import numpy
- print("NumPy version %s" % numpy.__version__)
- relaxed_strides = numpy.ones((10, 1), order="C").flags.f_contiguous
- print("NumPy relaxed strides checking option:", relaxed_strides)
- npdir = os.path.dirname(numpy.__file__)
- print("NumPy is installed in %s" % npdir)
-
- if 'scipy' in self.package_name:
- import scipy
- print("SciPy version %s" % scipy.__version__)
- spdir = os.path.dirname(scipy.__file__)
- print("SciPy is installed in %s" % spdir)
-
- pyversion = sys.version.replace('\n', '')
- print("Python version %s" % pyversion)
- print("nose version %d.%d.%d" % nose.__versioninfo__)
-
- def _get_custom_doctester(self):
- """ Return instantiated plugin for doctests
-
- Allows subclassing of this class to override doctester
-
- A return value of None means use the nose builtin doctest plugin
- """
- from .noseclasses import NumpyDoctest
- return NumpyDoctest()
-
- def prepare_test_args(self, label='fast', verbose=1, extra_argv=None,
- doctests=False, coverage=False):
- """
- Run tests for module using nose.
-
- This method does the heavy lifting for the `test` method. It takes all
- the same arguments, for details see `test`.
-
- See Also
- --------
- test
-
- """
- # fail with nice error message if nose is not present
- import_nose()
- # compile argv
- argv = self._test_argv(label, verbose, extra_argv)
- # our way of doing coverage
- if coverage:
- argv += ['--cover-package=%s' % self.package_name, '--with-coverage',
- '--cover-tests', '--cover-erase']
- # construct list of plugins
- import nose.plugins.builtin
- from .noseclasses import KnownFailurePlugin, Unplugger
- plugins = [KnownFailurePlugin()]
- plugins += [p() for p in nose.plugins.builtin.plugins]
- # add doctesting if required
- doctest_argv = '--with-doctest' in argv
- if doctests == False and doctest_argv:
- doctests = True
- plug = self._get_custom_doctester()
- if plug is None:
- # use standard doctesting
- if doctests and not doctest_argv:
- argv += ['--with-doctest']
- else: # custom doctesting
- if doctest_argv: # in fact the unplugger would take care of this
- argv.remove('--with-doctest')
- plugins += [Unplugger('doctest'), plug]
- if doctests:
- argv += ['--with-' + plug.name]
- return argv, plugins
-
- def test(self, label='fast', verbose=1, extra_argv=None,
- doctests=False, coverage=False, raise_warnings=None):
- """
- Run tests for module using nose.
-
- Parameters
- ----------
- label : {'fast', 'full', '', attribute identifier}, optional
- Identifies the tests to run. This can be a string to pass to
- the nosetests executable with the '-A' option, or one of several
- special values. Special values are:
- * 'fast' - the default - which corresponds to the ``nosetests -A``
- option of 'not slow'.
- * 'full' - fast (as above) and slow tests as in the
- 'no -A' option to nosetests - this is the same as ''.
- * None or '' - run all tests.
- attribute_identifier - string passed directly to nosetests as '-A'.
- verbose : int, optional
- Verbosity value for test outputs, in the range 1-10. Default is 1.
- extra_argv : list, optional
- List with any extra arguments to pass to nosetests.
- doctests : bool, optional
- If True, run doctests in module. Default is False.
- coverage : bool, optional
- If True, report coverage of NumPy code. Default is False.
- (This requires the `coverage module:
- <http://nedbatchelder.com/code/modules/coverage.html>`_).
- raise_warnings : None, str or sequence of warnings, optional
- This specifies which warnings to configure as 'raise' instead
- of being shown once during the test execution. Valid strings are:
-
- - "develop" : equals ``(Warning,)``
- - "release" : equals ``()``, don't raise on any warnings.
-
- The default is to use the class initialization value.
-
- Returns
- -------
- result : object
- Returns the result of running the tests as a
- ``nose.result.TextTestResult`` object.
-
- Notes
- -----
- Each NumPy module exposes `test` in its namespace to run all tests for it.
- For example, to run all tests for numpy.lib:
-
- >>> np.lib.test() #doctest: +SKIP
-
- Examples
- --------
- >>> result = np.lib.test() #doctest: +SKIP
- Running unit tests for numpy.lib
- ...
- Ran 976 tests in 3.933s
-
- OK
-
- >>> result.errors #doctest: +SKIP
- []
- >>> result.knownfail #doctest: +SKIP
- []
- """
-
- # cap verbosity at 3 because nose becomes *very* verbose beyond that
- verbose = min(verbose, 3)
-
- from . import utils
- utils.verbose = verbose
-
- if doctests:
- print("Running unit tests and doctests for %s" % self.package_name)
- else:
- print("Running unit tests for %s" % self.package_name)
-
- self._show_system_info()
-
- # reset doctest state on every run
- import doctest
- doctest.master = None
-
- if raise_warnings is None:
- raise_warnings = self.raise_warnings
-
- _warn_opts = dict(develop=(Warning,),
- release=())
- if isinstance(raise_warnings, basestring):
- raise_warnings = _warn_opts[raise_warnings]
-
- with suppress_warnings("location") as sup:
- # Reset the warning filters to the default state,
- # so that running the tests is more repeatable.
- warnings.resetwarnings()
- # Set all warnings to 'warn', this is because the default 'once'
- # has the bad property of possibly shadowing later warnings.
- warnings.filterwarnings('always')
- # Force the requested warnings to raise
- for warningtype in raise_warnings:
- warnings.filterwarnings('error', category=warningtype)
- # Filter out annoying import messages.
- sup.filter(message='Not importing directory')
- sup.filter(message="numpy.dtype size changed")
- sup.filter(message="numpy.ufunc size changed")
- sup.filter(category=np.ModuleDeprecationWarning)
- # Filter out boolean '-' deprecation messages. This allows
- # older versions of scipy to test without a flood of messages.
- sup.filter(message=".*boolean negative.*")
- sup.filter(message=".*boolean subtract.*")
- # Filter out distutils cpu warnings (could be localized to
- # distutils tests). ASV has problems with top level import,
- # so fetch module for suppression here.
- with warnings.catch_warnings():
- warnings.simplefilter("always")
- from ..distutils import cpuinfo
- sup.filter(category=UserWarning, module=cpuinfo)
- # See #7949: Filter out deprecation warnings due to the -3 flag to
- # python 2
- if sys.version_info.major == 2 and sys.py3kwarning:
- # This is very specific, so using the fragile module filter
- # is fine
- import threading
- sup.filter(DeprecationWarning,
- r"sys\.exc_clear\(\) not supported in 3\.x",
- module=threading)
- sup.filter(DeprecationWarning, message=r"in 3\.x, __setslice__")
- sup.filter(DeprecationWarning, message=r"in 3\.x, __getslice__")
- sup.filter(DeprecationWarning, message=r"buffer\(\) not supported in 3\.x")
- sup.filter(DeprecationWarning, message=r"CObject type is not supported in 3\.x")
- sup.filter(DeprecationWarning, message=r"comparing unequal types not supported in 3\.x")
- # Filter out some deprecation warnings inside nose 1.3.7 when run
- # on python 3.5b2. See
- # https://github.com/nose-devs/nose/issues/929
- # Note: it is hard to filter based on module for sup (lineno could
- # be implemented).
- warnings.filterwarnings("ignore", message=".*getargspec.*",
- category=DeprecationWarning,
- module=r"nose\.")
-
- from .noseclasses import NumpyTestProgram
-
- argv, plugins = self.prepare_test_args(
- label, verbose, extra_argv, doctests, coverage)
-
- t = NumpyTestProgram(argv=argv, exit=False, plugins=plugins)
-
- return t.result
-
- def bench(self, label='fast', verbose=1, extra_argv=None):
- """
- Run benchmarks for module using nose.
-
- Parameters
- ----------
- label : {'fast', 'full', '', attribute identifier}, optional
- Identifies the benchmarks to run. This can be a string to pass to
- the nosetests executable with the '-A' option, or one of several
- special values. Special values are:
- * 'fast' - the default - which corresponds to the ``nosetests -A``
- option of 'not slow'.
- * 'full' - fast (as above) and slow benchmarks as in the
- 'no -A' option to nosetests - this is the same as ''.
- * None or '' - run all tests.
- attribute_identifier - string passed directly to nosetests as '-A'.
- verbose : int, optional
- Verbosity value for benchmark outputs, in the range 1-10. Default is 1.
- extra_argv : list, optional
- List with any extra arguments to pass to nosetests.
-
- Returns
- -------
- success : bool
- Returns True if running the benchmarks works, False if an error
- occurred.
-
- Notes
- -----
- Benchmarks are like tests, but have names starting with "bench" instead
- of "test", and can be found under the "benchmarks" sub-directory of the
- module.
-
- Each NumPy module exposes `bench` in its namespace to run all benchmarks
- for it.
-
- Examples
- --------
- >>> success = np.lib.bench() #doctest: +SKIP
- Running benchmarks for numpy.lib
- ...
- using 562341 items:
- unique:
- 0.11
- unique1d:
- 0.11
- ratio: 1.0
- nUnique: 56230 == 56230
- ...
- OK
-
- >>> success #doctest: +SKIP
- True
-
- """
-
- print("Running benchmarks for %s" % self.package_name)
- self._show_system_info()
-
- argv = self._test_argv(label, verbose, extra_argv)
- argv += ['--match', r'(?:^|[\\b_\\.%s-])[Bb]ench' % os.sep]
-
- # import nose or make informative error
- nose = import_nose()
-
- # get plugin to disable doctests
- from .noseclasses import Unplugger
- add_plugins = [Unplugger('doctest')]
-
- return nose.run(argv=argv, addplugins=add_plugins)
-
+from .nose_tools.nosetester import *
-def _numpy_tester():
- if hasattr(np, "__version__") and ".dev0" in np.__version__:
- mode = "develop"
- else:
- mode = "release"
- return NoseTester(raise_warnings=mode, depth=1)
+__all__ = ['get_package_name', 'run_module_suite', 'NoseTester',
+ '_numpy_tester', 'get_package_name', 'import_nose',
+ 'suppress_warnings']
diff --git a/numpy/testing/setup.py b/numpy/testing/setup.py
index 7c1c237b9..a5e9656a3 100755
--- a/numpy/testing/setup.py
+++ b/numpy/testing/setup.py
@@ -6,6 +6,7 @@ def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('testing', parent_package, top_path)
+ config.add_subpackage('nose_tools')
config.add_data_dir('tests')
return config
diff --git a/numpy/testing/tests/__init__.py b/numpy/testing/tests/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/numpy/testing/tests/__init__.py
diff --git a/numpy/testing/tests/test_decorators.py b/numpy/testing/tests/test_decorators.py
index 02cd9fb88..1258a9296 100644
--- a/numpy/testing/tests/test_decorators.py
+++ b/numpy/testing/tests/test_decorators.py
@@ -1,3 +1,7 @@
+"""
+Test the decorators from ``testing.decorators``.
+
+"""
from __future__ import division, absolute_import, print_function
import warnings
@@ -13,6 +17,7 @@ def test_slow():
assert_(slow_func.slow)
+
def test_setastest():
@dec.setastest()
def f_default(a):
@@ -30,6 +35,7 @@ def test_setastest():
assert_(f_istest.__test__)
assert_(not f_isnottest.__test__)
+
class DidntSkipException(Exception):
pass
@@ -182,5 +188,13 @@ def test_deprecated():
assert_raises(AssertionError, deprecated_func3)
+@dec.parametrize('base, power, expected',
+ [(1, 1, 1),
+ (2, 1, 2),
+ (2, 2, 4)])
+def test_parametrize(base, power, expected):
+ assert_(base**power == expected)
+
+
if __name__ == '__main__':
run_module_suite()
diff --git a/numpy/testing/tests/test_utils.py b/numpy/testing/tests/test_utils.py
index e2c105245..493c538af 100644
--- a/numpy/testing/tests/test_utils.py
+++ b/numpy/testing/tests/test_utils.py
@@ -61,7 +61,7 @@ class _GenericTest(object):
def test_objarray(self):
"""Test object arrays."""
- a = np.array([1, 1], dtype=np.object)
+ a = np.array([1, 1], dtype=object)
self._test_equal(a, 1)
def test_array_likes(self):
@@ -134,14 +134,14 @@ class TestArrayEqual(_GenericTest, unittest.TestCase):
def test_recarrays(self):
"""Test record arrays."""
- a = np.empty(2, [('floupi', np.float), ('floupa', np.float)])
+ a = np.empty(2, [('floupi', float), ('floupa', float)])
a['floupi'] = [1, 2]
a['floupa'] = [1, 2]
b = a.copy()
self._test_equal(a, b)
- c = np.empty(2, [('floupipi', np.float), ('floupa', np.float)])
+ c = np.empty(2, [('floupipi', float), ('floupa', float)])
c['floupipi'] = a['floupi'].copy()
c['floupa'] = a['floupa'].copy()
diff --git a/numpy/testing/utils.py b/numpy/testing/utils.py
index f54995870..7ecb68f47 100644
--- a/numpy/testing/utils.py
+++ b/numpy/testing/utils.py
@@ -1,29 +1,8 @@
"""
-Utility function to facilitate testing.
+Back compatibility utils module. It will import the appropriate
+set of tools
"""
-from __future__ import division, absolute_import, print_function
-
-import os
-import sys
-import re
-import operator
-import warnings
-from functools import partial, wraps
-import shutil
-import contextlib
-from tempfile import mkdtemp, mkstemp
-from unittest.case import SkipTest
-
-from numpy.core import(
- float32, empty, arange, array_repr, ndarray, isnat, array)
-from numpy.lib.utils import deprecate
-
-if sys.version_info[0] >= 3:
- from io import StringIO
-else:
- from StringIO import StringIO
-
__all__ = [
'assert_equal', 'assert_almost_equal', 'assert_approx_equal',
'assert_array_equal', 'assert_array_less', 'assert_string_equal',
@@ -34,2195 +13,8 @@ __all__ = [
'assert_array_max_ulp', 'assert_warns', 'assert_no_warnings',
'assert_allclose', 'IgnoreException', 'clear_and_catch_warnings',
'SkipTest', 'KnownFailureException', 'temppath', 'tempdir', 'IS_PYPY',
- 'HAS_REFCOUNT', 'suppress_warnings'
+ 'HAS_REFCOUNT', 'suppress_warnings', 'assert_array_compare',
+ '_assert_valid_refcount', '_gen_alignment_data',
]
-
-class KnownFailureException(Exception):
- '''Raise this exception to mark a test as a known failing test.'''
- pass
-
-
-KnownFailureTest = KnownFailureException # backwards compat
-verbose = 0
-
-IS_PYPY = '__pypy__' in sys.modules
-HAS_REFCOUNT = getattr(sys, 'getrefcount', None) is not None
-
-
-def import_nose():
- """ Import nose only when needed.
- """
- nose_is_good = True
- minimum_nose_version = (1, 0, 0)
- try:
- import nose
- except ImportError:
- nose_is_good = False
- else:
- if nose.__versioninfo__ < minimum_nose_version:
- nose_is_good = False
-
- if not nose_is_good:
- msg = ('Need nose >= %d.%d.%d for tests - see '
- 'http://nose.readthedocs.io' %
- minimum_nose_version)
- raise ImportError(msg)
-
- return nose
-
-
-def assert_(val, msg=''):
- """
- Assert that works in release mode.
- Accepts callable msg to allow deferring evaluation until failure.
-
- The Python built-in ``assert`` does not work when executing code in
- optimized mode (the ``-O`` flag) - no byte-code is generated for it.
-
- For documentation on usage, refer to the Python documentation.
-
- """
- __tracebackhide__ = True # Hide traceback for py.test
- if not val:
- try:
- smsg = msg()
- except TypeError:
- smsg = msg
- raise AssertionError(smsg)
-
-
-def gisnan(x):
- """like isnan, but always raise an error if type not supported instead of
- returning a TypeError object.
-
- Notes
- -----
- isnan and other ufunc sometimes return a NotImplementedType object instead
- of raising any exception. This function is a wrapper to make sure an
- exception is always raised.
-
- This should be removed once this problem is solved at the Ufunc level."""
- from numpy.core import isnan
- st = isnan(x)
- if isinstance(st, type(NotImplemented)):
- raise TypeError("isnan not supported for this type")
- return st
-
-
-def gisfinite(x):
- """like isfinite, but always raise an error if type not supported instead of
- returning a TypeError object.
-
- Notes
- -----
- isfinite and other ufunc sometimes return a NotImplementedType object instead
- of raising any exception. This function is a wrapper to make sure an
- exception is always raised.
-
- This should be removed once this problem is solved at the Ufunc level."""
- from numpy.core import isfinite, errstate
- with errstate(invalid='ignore'):
- st = isfinite(x)
- if isinstance(st, type(NotImplemented)):
- raise TypeError("isfinite not supported for this type")
- return st
-
-
-def gisinf(x):
- """like isinf, but always raise an error if type not supported instead of
- returning a TypeError object.
-
- Notes
- -----
- isinf and other ufunc sometimes return a NotImplementedType object instead
- of raising any exception. This function is a wrapper to make sure an
- exception is always raised.
-
- This should be removed once this problem is solved at the Ufunc level."""
- from numpy.core import isinf, errstate
- with errstate(invalid='ignore'):
- st = isinf(x)
- if isinstance(st, type(NotImplemented)):
- raise TypeError("isinf not supported for this type")
- return st
-
-
-@deprecate(message="numpy.testing.rand is deprecated in numpy 1.11. "
- "Use numpy.random.rand instead.")
-def rand(*args):
- """Returns an array of random numbers with the given shape.
-
- This only uses the standard library, so it is useful for testing purposes.
- """
- import random
- from numpy.core import zeros, float64
- results = zeros(args, float64)
- f = results.flat
- for i in range(len(f)):
- f[i] = random.random()
- return results
-
-
-if os.name == 'nt':
- # Code "stolen" from enthought/debug/memusage.py
- def GetPerformanceAttributes(object, counter, instance=None,
- inum=-1, format=None, machine=None):
- # NOTE: Many counters require 2 samples to give accurate results,
- # including "% Processor Time" (as by definition, at any instant, a
- # thread's CPU usage is either 0 or 100). To read counters like this,
- # you should copy this function, but keep the counter open, and call
- # CollectQueryData() each time you need to know.
- # See http://msdn.microsoft.com/library/en-us/dnperfmo/html/perfmonpt2.asp
- # My older explanation for this was that the "AddCounter" process forced
- # the CPU to 100%, but the above makes more sense :)
- import win32pdh
- if format is None:
- format = win32pdh.PDH_FMT_LONG
- path = win32pdh.MakeCounterPath( (machine, object, instance, None, inum, counter))
- hq = win32pdh.OpenQuery()
- try:
- hc = win32pdh.AddCounter(hq, path)
- try:
- win32pdh.CollectQueryData(hq)
- type, val = win32pdh.GetFormattedCounterValue(hc, format)
- return val
- finally:
- win32pdh.RemoveCounter(hc)
- finally:
- win32pdh.CloseQuery(hq)
-
- def memusage(processName="python", instance=0):
- # from win32pdhutil, part of the win32all package
- import win32pdh
- return GetPerformanceAttributes("Process", "Virtual Bytes",
- processName, instance,
- win32pdh.PDH_FMT_LONG, None)
-elif sys.platform[:5] == 'linux':
-
- def memusage(_proc_pid_stat='/proc/%s/stat' % (os.getpid())):
- """
- Return virtual memory size in bytes of the running python.
-
- """
- try:
- f = open(_proc_pid_stat, 'r')
- l = f.readline().split(' ')
- f.close()
- return int(l[22])
- except:
- return
-else:
- def memusage():
- """
- Return memory usage of running python. [Not implemented]
-
- """
- raise NotImplementedError
-
-
-if sys.platform[:5] == 'linux':
- def jiffies(_proc_pid_stat='/proc/%s/stat' % (os.getpid()),
- _load_time=[]):
- """
- Return number of jiffies elapsed.
-
- Return number of jiffies (1/100ths of a second) that this
- process has been scheduled in user mode. See man 5 proc.
-
- """
- import time
- if not _load_time:
- _load_time.append(time.time())
- try:
- f = open(_proc_pid_stat, 'r')
- l = f.readline().split(' ')
- f.close()
- return int(l[13])
- except:
- return int(100*(time.time()-_load_time[0]))
-else:
- # os.getpid is not in all platforms available.
- # Using time is safe but inaccurate, especially when process
- # was suspended or sleeping.
- def jiffies(_load_time=[]):
- """
- Return number of jiffies elapsed.
-
- Return number of jiffies (1/100ths of a second) that this
- process has been scheduled in user mode. See man 5 proc.
-
- """
- import time
- if not _load_time:
- _load_time.append(time.time())
- return int(100*(time.time()-_load_time[0]))
-
-
-def build_err_msg(arrays, err_msg, header='Items are not equal:',
- verbose=True, names=('ACTUAL', 'DESIRED'), precision=8):
- msg = ['\n' + header]
- if err_msg:
- if err_msg.find('\n') == -1 and len(err_msg) < 79-len(header):
- msg = [msg[0] + ' ' + err_msg]
- else:
- msg.append(err_msg)
- if verbose:
- for i, a in enumerate(arrays):
-
- if isinstance(a, ndarray):
- # precision argument is only needed if the objects are ndarrays
- r_func = partial(array_repr, precision=precision)
- else:
- r_func = repr
-
- try:
- r = r_func(a)
- except Exception as exc:
- r = '[repr failed for <{}>: {}]'.format(type(a).__name__, exc)
- if r.count('\n') > 3:
- r = '\n'.join(r.splitlines()[:3])
- r += '...'
- msg.append(' %s: %s' % (names[i], r))
- return '\n'.join(msg)
-
-
-def assert_equal(actual, desired, err_msg='', verbose=True):
- """
- Raises an AssertionError if two objects are not equal.
-
- Given two objects (scalars, lists, tuples, dictionaries or numpy arrays),
- check that all elements of these objects are equal. An exception is raised
- at the first conflicting values.
-
- Parameters
- ----------
- actual : array_like
- The object to check.
- desired : array_like
- The expected object.
- err_msg : str, optional
- The error message to be printed in case of failure.
- verbose : bool, optional
- If True, the conflicting values are appended to the error message.
-
- Raises
- ------
- AssertionError
- If actual and desired are not equal.
-
- Examples
- --------
- >>> np.testing.assert_equal([4,5], [4,6])
- ...
- <type 'exceptions.AssertionError'>:
- Items are not equal:
- item=1
- ACTUAL: 5
- DESIRED: 6
-
- """
- __tracebackhide__ = True # Hide traceback for py.test
- if isinstance(desired, dict):
- if not isinstance(actual, dict):
- raise AssertionError(repr(type(actual)))
- assert_equal(len(actual), len(desired), err_msg, verbose)
- for k, i in desired.items():
- if k not in actual:
- raise AssertionError(repr(k))
- assert_equal(actual[k], desired[k], 'key=%r\n%s' % (k, err_msg), verbose)
- return
- if isinstance(desired, (list, tuple)) and isinstance(actual, (list, tuple)):
- assert_equal(len(actual), len(desired), err_msg, verbose)
- for k in range(len(desired)):
- assert_equal(actual[k], desired[k], 'item=%r\n%s' % (k, err_msg), verbose)
- return
- from numpy.core import ndarray, isscalar, signbit
- from numpy.lib import iscomplexobj, real, imag
- if isinstance(actual, ndarray) or isinstance(desired, ndarray):
- return assert_array_equal(actual, desired, err_msg, verbose)
- msg = build_err_msg([actual, desired], err_msg, verbose=verbose)
-
- # Handle complex numbers: separate into real/imag to handle
- # nan/inf/negative zero correctly
- # XXX: catch ValueError for subclasses of ndarray where iscomplex fail
- try:
- usecomplex = iscomplexobj(actual) or iscomplexobj(desired)
- except ValueError:
- usecomplex = False
-
- if usecomplex:
- if iscomplexobj(actual):
- actualr = real(actual)
- actuali = imag(actual)
- else:
- actualr = actual
- actuali = 0
- if iscomplexobj(desired):
- desiredr = real(desired)
- desiredi = imag(desired)
- else:
- desiredr = desired
- desiredi = 0
- try:
- assert_equal(actualr, desiredr)
- assert_equal(actuali, desiredi)
- except AssertionError:
- raise AssertionError(msg)
-
- # isscalar test to check cases such as [np.nan] != np.nan
- if isscalar(desired) != isscalar(actual):
- raise AssertionError(msg)
-
- # Inf/nan/negative zero handling
- try:
- # If one of desired/actual is not finite, handle it specially here:
- # check that both are nan if any is a nan, and test for equality
- # otherwise
- if not (gisfinite(desired) and gisfinite(actual)):
- isdesnan = gisnan(desired)
- isactnan = gisnan(actual)
- if isdesnan or isactnan:
- if not (isdesnan and isactnan):
- raise AssertionError(msg)
- else:
- if not desired == actual:
- raise AssertionError(msg)
- return
- elif desired == 0 and actual == 0:
- if not signbit(desired) == signbit(actual):
- raise AssertionError(msg)
- # If TypeError or ValueError raised while using isnan and co, just handle
- # as before
- except (TypeError, ValueError, NotImplementedError):
- pass
-
- try:
- # If both are NaT (and have the same dtype -- datetime or timedelta)
- # they are considered equal.
- if (isnat(desired) == isnat(actual) and
- array(desired).dtype.type == array(actual).dtype.type):
- return
- else:
- raise AssertionError(msg)
-
- # If TypeError or ValueError raised while using isnan and co, just handle
- # as before
- except (TypeError, ValueError, NotImplementedError):
- pass
-
- # Explicitly use __eq__ for comparison, ticket #2552
- if not (desired == actual):
- raise AssertionError(msg)
-
-
-def print_assert_equal(test_string, actual, desired):
- """
- Test if two objects are equal, and print an error message if test fails.
-
- The test is performed with ``actual == desired``.
-
- Parameters
- ----------
- test_string : str
- The message supplied to AssertionError.
- actual : object
- The object to test for equality against `desired`.
- desired : object
- The expected result.
-
- Examples
- --------
- >>> np.testing.print_assert_equal('Test XYZ of func xyz', [0, 1], [0, 1])
- >>> np.testing.print_assert_equal('Test XYZ of func xyz', [0, 1], [0, 2])
- Traceback (most recent call last):
- ...
- AssertionError: Test XYZ of func xyz failed
- ACTUAL:
- [0, 1]
- DESIRED:
- [0, 2]
-
- """
- __tracebackhide__ = True # Hide traceback for py.test
- import pprint
-
- if not (actual == desired):
- msg = StringIO()
- msg.write(test_string)
- msg.write(' failed\nACTUAL: \n')
- pprint.pprint(actual, msg)
- msg.write('DESIRED: \n')
- pprint.pprint(desired, msg)
- raise AssertionError(msg.getvalue())
-
-
-def assert_almost_equal(actual,desired,decimal=7,err_msg='',verbose=True):
- """
- Raises an AssertionError if two items are not equal up to desired
- precision.
-
- .. note:: It is recommended to use one of `assert_allclose`,
- `assert_array_almost_equal_nulp` or `assert_array_max_ulp`
- instead of this function for more consistent floating point
- comparisons.
-
- The test verifies that the elements of ``actual`` and ``desired`` satisfy.
-
- ``abs(desired-actual) < 1.5 * 10**(-decimal)``
-
- That is a looser test than originally documented, but agrees with what the
- actual implementation in `assert_array_almost_equal` did up to rounding
- vagaries. An exception is raised at conflicting values. For ndarrays this
- delegates to assert_array_almost_equal
-
- Parameters
- ----------
- actual : array_like
- The object to check.
- desired : array_like
- The expected object.
- decimal : int, optional
- Desired precision, default is 7.
- err_msg : str, optional
- The error message to be printed in case of failure.
- verbose : bool, optional
- If True, the conflicting values are appended to the error message.
-
- Raises
- ------
- AssertionError
- If actual and desired are not equal up to specified precision.
-
- See Also
- --------
- assert_allclose: Compare two array_like objects for equality with desired
- relative and/or absolute precision.
- assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal
-
- Examples
- --------
- >>> import numpy.testing as npt
- >>> npt.assert_almost_equal(2.3333333333333, 2.33333334)
- >>> npt.assert_almost_equal(2.3333333333333, 2.33333334, decimal=10)
- ...
- <type 'exceptions.AssertionError'>:
- Items are not equal:
- ACTUAL: 2.3333333333333002
- DESIRED: 2.3333333399999998
-
- >>> npt.assert_almost_equal(np.array([1.0,2.3333333333333]),
- ... np.array([1.0,2.33333334]), decimal=9)
- ...
- <type 'exceptions.AssertionError'>:
- Arrays are not almost equal
- <BLANKLINE>
- (mismatch 50.0%)
- x: array([ 1. , 2.33333333])
- y: array([ 1. , 2.33333334])
-
- """
- __tracebackhide__ = True # Hide traceback for py.test
- from numpy.core import ndarray
- from numpy.lib import iscomplexobj, real, imag
-
- # Handle complex numbers: separate into real/imag to handle
- # nan/inf/negative zero correctly
- # XXX: catch ValueError for subclasses of ndarray where iscomplex fail
- try:
- usecomplex = iscomplexobj(actual) or iscomplexobj(desired)
- except ValueError:
- usecomplex = False
-
- def _build_err_msg():
- header = ('Arrays are not almost equal to %d decimals' % decimal)
- return build_err_msg([actual, desired], err_msg, verbose=verbose,
- header=header)
-
- if usecomplex:
- if iscomplexobj(actual):
- actualr = real(actual)
- actuali = imag(actual)
- else:
- actualr = actual
- actuali = 0
- if iscomplexobj(desired):
- desiredr = real(desired)
- desiredi = imag(desired)
- else:
- desiredr = desired
- desiredi = 0
- try:
- assert_almost_equal(actualr, desiredr, decimal=decimal)
- assert_almost_equal(actuali, desiredi, decimal=decimal)
- except AssertionError:
- raise AssertionError(_build_err_msg())
-
- if isinstance(actual, (ndarray, tuple, list)) \
- or isinstance(desired, (ndarray, tuple, list)):
- return assert_array_almost_equal(actual, desired, decimal, err_msg)
- try:
- # If one of desired/actual is not finite, handle it specially here:
- # check that both are nan if any is a nan, and test for equality
- # otherwise
- if not (gisfinite(desired) and gisfinite(actual)):
- if gisnan(desired) or gisnan(actual):
- if not (gisnan(desired) and gisnan(actual)):
- raise AssertionError(_build_err_msg())
- else:
- if not desired == actual:
- raise AssertionError(_build_err_msg())
- return
- except (NotImplementedError, TypeError):
- pass
- if abs(desired - actual) >= 1.5 * 10.0**(-decimal):
- raise AssertionError(_build_err_msg())
-
-
-def assert_approx_equal(actual,desired,significant=7,err_msg='',verbose=True):
- """
- Raises an AssertionError if two items are not equal up to significant
- digits.
-
- .. note:: It is recommended to use one of `assert_allclose`,
- `assert_array_almost_equal_nulp` or `assert_array_max_ulp`
- instead of this function for more consistent floating point
- comparisons.
-
- Given two numbers, check that they are approximately equal.
- Approximately equal is defined as the number of significant digits
- that agree.
-
- Parameters
- ----------
- actual : scalar
- The object to check.
- desired : scalar
- The expected object.
- significant : int, optional
- Desired precision, default is 7.
- err_msg : str, optional
- The error message to be printed in case of failure.
- verbose : bool, optional
- If True, the conflicting values are appended to the error message.
-
- Raises
- ------
- AssertionError
- If actual and desired are not equal up to specified precision.
-
- See Also
- --------
- assert_allclose: Compare two array_like objects for equality with desired
- relative and/or absolute precision.
- assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal
-
- Examples
- --------
- >>> np.testing.assert_approx_equal(0.12345677777777e-20, 0.1234567e-20)
- >>> np.testing.assert_approx_equal(0.12345670e-20, 0.12345671e-20,
- significant=8)
- >>> np.testing.assert_approx_equal(0.12345670e-20, 0.12345672e-20,
- significant=8)
- ...
- <type 'exceptions.AssertionError'>:
- Items are not equal to 8 significant digits:
- ACTUAL: 1.234567e-021
- DESIRED: 1.2345672000000001e-021
-
- the evaluated condition that raises the exception is
-
- >>> abs(0.12345670e-20/1e-21 - 0.12345672e-20/1e-21) >= 10**-(8-1)
- True
-
- """
- __tracebackhide__ = True # Hide traceback for py.test
- import numpy as np
-
- (actual, desired) = map(float, (actual, desired))
- if desired == actual:
- return
- # Normalized the numbers to be in range (-10.0,10.0)
- # scale = float(pow(10,math.floor(math.log10(0.5*(abs(desired)+abs(actual))))))
- with np.errstate(invalid='ignore'):
- scale = 0.5*(np.abs(desired) + np.abs(actual))
- scale = np.power(10, np.floor(np.log10(scale)))
- try:
- sc_desired = desired/scale
- except ZeroDivisionError:
- sc_desired = 0.0
- try:
- sc_actual = actual/scale
- except ZeroDivisionError:
- sc_actual = 0.0
- msg = build_err_msg([actual, desired], err_msg,
- header='Items are not equal to %d significant digits:' %
- significant,
- verbose=verbose)
- try:
- # If one of desired/actual is not finite, handle it specially here:
- # check that both are nan if any is a nan, and test for equality
- # otherwise
- if not (gisfinite(desired) and gisfinite(actual)):
- if gisnan(desired) or gisnan(actual):
- if not (gisnan(desired) and gisnan(actual)):
- raise AssertionError(msg)
- else:
- if not desired == actual:
- raise AssertionError(msg)
- return
- except (TypeError, NotImplementedError):
- pass
- if np.abs(sc_desired - sc_actual) >= np.power(10., -(significant-1)):
- raise AssertionError(msg)
-
-
-def assert_array_compare(comparison, x, y, err_msg='', verbose=True,
- header='', precision=6, equal_nan=True,
- equal_inf=True):
- __tracebackhide__ = True # Hide traceback for py.test
- from numpy.core import array, isnan, isinf, any, inf
- x = array(x, copy=False, subok=True)
- y = array(y, copy=False, subok=True)
-
- def isnumber(x):
- return x.dtype.char in '?bhilqpBHILQPefdgFDG'
-
- def istime(x):
- return x.dtype.char in "Mm"
-
- def chk_same_position(x_id, y_id, hasval='nan'):
- """Handling nan/inf: check that x and y have the nan/inf at the same
- locations."""
- try:
- assert_array_equal(x_id, y_id)
- except AssertionError:
- msg = build_err_msg([x, y],
- err_msg + '\nx and y %s location mismatch:'
- % (hasval), verbose=verbose, header=header,
- names=('x', 'y'), precision=precision)
- raise AssertionError(msg)
-
- try:
- cond = (x.shape == () or y.shape == ()) or x.shape == y.shape
- if not cond:
- msg = build_err_msg([x, y],
- err_msg
- + '\n(shapes %s, %s mismatch)' % (x.shape,
- y.shape),
- verbose=verbose, header=header,
- names=('x', 'y'), precision=precision)
- raise AssertionError(msg)
-
- if isnumber(x) and isnumber(y):
- has_nan = has_inf = False
- if equal_nan:
- x_isnan, y_isnan = isnan(x), isnan(y)
- # Validate that NaNs are in the same place
- has_nan = any(x_isnan) or any(y_isnan)
- if has_nan:
- chk_same_position(x_isnan, y_isnan, hasval='nan')
-
- if equal_inf:
- x_isinf, y_isinf = isinf(x), isinf(y)
- # Validate that infinite values are in the same place
- has_inf = any(x_isinf) or any(y_isinf)
- if has_inf:
- # Check +inf and -inf separately, since they are different
- chk_same_position(x == +inf, y == +inf, hasval='+inf')
- chk_same_position(x == -inf, y == -inf, hasval='-inf')
-
- if has_nan and has_inf:
- x = x[~(x_isnan | x_isinf)]
- y = y[~(y_isnan | y_isinf)]
- elif has_nan:
- x = x[~x_isnan]
- y = y[~y_isnan]
- elif has_inf:
- x = x[~x_isinf]
- y = y[~y_isinf]
-
- # Only do the comparison if actual values are left
- if x.size == 0:
- return
-
- elif istime(x) and istime(y):
- # If one is datetime64 and the other timedelta64 there is no point
- if equal_nan and x.dtype.type == y.dtype.type:
- x_isnat, y_isnat = isnat(x), isnat(y)
-
- if any(x_isnat) or any(y_isnat):
- chk_same_position(x_isnat, y_isnat, hasval="NaT")
-
- if any(x_isnat) or any(y_isnat):
- x = x[~x_isnat]
- y = y[~y_isnat]
-
- val = comparison(x, y)
-
- if isinstance(val, bool):
- cond = val
- reduced = [0]
- else:
- reduced = val.ravel()
- cond = reduced.all()
- reduced = reduced.tolist()
- if not cond:
- match = 100-100.0*reduced.count(1)/len(reduced)
- msg = build_err_msg([x, y],
- err_msg
- + '\n(mismatch %s%%)' % (match,),
- verbose=verbose, header=header,
- names=('x', 'y'), precision=precision)
- if not cond:
- raise AssertionError(msg)
- except ValueError:
- import traceback
- efmt = traceback.format_exc()
- header = 'error during assertion:\n\n%s\n\n%s' % (efmt, header)
-
- msg = build_err_msg([x, y], err_msg, verbose=verbose, header=header,
- names=('x', 'y'), precision=precision)
- raise ValueError(msg)
-
-
-def assert_array_equal(x, y, err_msg='', verbose=True):
- """
- Raises an AssertionError if two array_like objects are not equal.
-
- Given two array_like objects, check that the shape is equal and all
- elements of these objects are equal. An exception is raised at
- shape mismatch or conflicting values. In contrast to the standard usage
- in numpy, NaNs are compared like numbers, no assertion is raised if
- both objects have NaNs in the same positions.
-
- The usual caution for verifying equality with floating point numbers is
- advised.
-
- Parameters
- ----------
- x : array_like
- The actual object to check.
- y : array_like
- The desired, expected object.
- err_msg : str, optional
- The error message to be printed in case of failure.
- verbose : bool, optional
- If True, the conflicting values are appended to the error message.
-
- Raises
- ------
- AssertionError
- If actual and desired objects are not equal.
-
- See Also
- --------
- assert_allclose: Compare two array_like objects for equality with desired
- relative and/or absolute precision.
- assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal
-
- Examples
- --------
- The first assert does not raise an exception:
-
- >>> np.testing.assert_array_equal([1.0,2.33333,np.nan],
- ... [np.exp(0),2.33333, np.nan])
-
- Assert fails with numerical inprecision with floats:
-
- >>> np.testing.assert_array_equal([1.0,np.pi,np.nan],
- ... [1, np.sqrt(np.pi)**2, np.nan])
- ...
- <type 'exceptions.ValueError'>:
- AssertionError:
- Arrays are not equal
- <BLANKLINE>
- (mismatch 50.0%)
- x: array([ 1. , 3.14159265, NaN])
- y: array([ 1. , 3.14159265, NaN])
-
- Use `assert_allclose` or one of the nulp (number of floating point values)
- functions for these cases instead:
-
- >>> np.testing.assert_allclose([1.0,np.pi,np.nan],
- ... [1, np.sqrt(np.pi)**2, np.nan],
- ... rtol=1e-10, atol=0)
-
- """
- __tracebackhide__ = True # Hide traceback for py.test
- assert_array_compare(operator.__eq__, x, y, err_msg=err_msg,
- verbose=verbose, header='Arrays are not equal')
-
-
-def assert_array_almost_equal(x, y, decimal=6, err_msg='', verbose=True):
- """
- Raises an AssertionError if two objects are not equal up to desired
- precision.
-
- .. note:: It is recommended to use one of `assert_allclose`,
- `assert_array_almost_equal_nulp` or `assert_array_max_ulp`
- instead of this function for more consistent floating point
- comparisons.
-
- The test verifies identical shapes and that the elements of ``actual`` and
- ``desired`` satisfy.
-
- ``abs(desired-actual) < 1.5 * 10**(-decimal)``
-
- That is a looser test than originally documented, but agrees with what the
- actual implementation did up to rounding vagaries. An exception is raised
- at shape mismatch or conflicting values. In contrast to the standard usage
- in numpy, NaNs are compared like numbers, no assertion is raised if both
- objects have NaNs in the same positions.
-
- Parameters
- ----------
- x : array_like
- The actual object to check.
- y : array_like
- The desired, expected object.
- decimal : int, optional
- Desired precision, default is 6.
- err_msg : str, optional
- The error message to be printed in case of failure.
- verbose : bool, optional
- If True, the conflicting values are appended to the error message.
-
- Raises
- ------
- AssertionError
- If actual and desired are not equal up to specified precision.
-
- See Also
- --------
- assert_allclose: Compare two array_like objects for equality with desired
- relative and/or absolute precision.
- assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal
-
- Examples
- --------
- the first assert does not raise an exception
-
- >>> np.testing.assert_array_almost_equal([1.0,2.333,np.nan],
- [1.0,2.333,np.nan])
-
- >>> np.testing.assert_array_almost_equal([1.0,2.33333,np.nan],
- ... [1.0,2.33339,np.nan], decimal=5)
- ...
- <type 'exceptions.AssertionError'>:
- AssertionError:
- Arrays are not almost equal
- <BLANKLINE>
- (mismatch 50.0%)
- x: array([ 1. , 2.33333, NaN])
- y: array([ 1. , 2.33339, NaN])
-
- >>> np.testing.assert_array_almost_equal([1.0,2.33333,np.nan],
- ... [1.0,2.33333, 5], decimal=5)
- <type 'exceptions.ValueError'>:
- ValueError:
- Arrays are not almost equal
- x: array([ 1. , 2.33333, NaN])
- y: array([ 1. , 2.33333, 5. ])
-
- """
- __tracebackhide__ = True # Hide traceback for py.test
- from numpy.core import around, number, float_, result_type, array
- from numpy.core.numerictypes import issubdtype
- from numpy.core.fromnumeric import any as npany
-
- def compare(x, y):
- try:
- if npany(gisinf(x)) or npany( gisinf(y)):
- xinfid = gisinf(x)
- yinfid = gisinf(y)
- if not (xinfid == yinfid).all():
- return False
- # if one item, x and y is +- inf
- if x.size == y.size == 1:
- return x == y
- x = x[~xinfid]
- y = y[~yinfid]
- except (TypeError, NotImplementedError):
- pass
-
- # make sure y is an inexact type to avoid abs(MIN_INT); will cause
- # casting of x later.
- dtype = result_type(y, 1.)
- y = array(y, dtype=dtype, copy=False, subok=True)
- z = abs(x - y)
-
- if not issubdtype(z.dtype, number):
- z = z.astype(float_) # handle object arrays
-
- return z < 1.5 * 10.0**(-decimal)
-
- assert_array_compare(compare, x, y, err_msg=err_msg, verbose=verbose,
- header=('Arrays are not almost equal to %d decimals' % decimal),
- precision=decimal)
-
-
-def assert_array_less(x, y, err_msg='', verbose=True):
- """
- Raises an AssertionError if two array_like objects are not ordered by less
- than.
-
- Given two array_like objects, check that the shape is equal and all
- elements of the first object are strictly smaller than those of the
- second object. An exception is raised at shape mismatch or incorrectly
- ordered values. Shape mismatch does not raise if an object has zero
- dimension. In contrast to the standard usage in numpy, NaNs are
- compared, no assertion is raised if both objects have NaNs in the same
- positions.
-
-
-
- Parameters
- ----------
- x : array_like
- The smaller object to check.
- y : array_like
- The larger object to compare.
- err_msg : string
- The error message to be printed in case of failure.
- verbose : bool
- If True, the conflicting values are appended to the error message.
-
- Raises
- ------
- AssertionError
- If actual and desired objects are not equal.
-
- See Also
- --------
- assert_array_equal: tests objects for equality
- assert_array_almost_equal: test objects for equality up to precision
-
-
-
- Examples
- --------
- >>> np.testing.assert_array_less([1.0, 1.0, np.nan], [1.1, 2.0, np.nan])
- >>> np.testing.assert_array_less([1.0, 1.0, np.nan], [1, 2.0, np.nan])
- ...
- <type 'exceptions.ValueError'>:
- Arrays are not less-ordered
- (mismatch 50.0%)
- x: array([ 1., 1., NaN])
- y: array([ 1., 2., NaN])
-
- >>> np.testing.assert_array_less([1.0, 4.0], 3)
- ...
- <type 'exceptions.ValueError'>:
- Arrays are not less-ordered
- (mismatch 50.0%)
- x: array([ 1., 4.])
- y: array(3)
-
- >>> np.testing.assert_array_less([1.0, 2.0, 3.0], [4])
- ...
- <type 'exceptions.ValueError'>:
- Arrays are not less-ordered
- (shapes (3,), (1,) mismatch)
- x: array([ 1., 2., 3.])
- y: array([4])
-
- """
- __tracebackhide__ = True # Hide traceback for py.test
- assert_array_compare(operator.__lt__, x, y, err_msg=err_msg,
- verbose=verbose,
- header='Arrays are not less-ordered',
- equal_inf=False)
-
-
-def runstring(astr, dict):
- exec(astr, dict)
-
-
-def assert_string_equal(actual, desired):
- """
- Test if two strings are equal.
-
- If the given strings are equal, `assert_string_equal` does nothing.
- If they are not equal, an AssertionError is raised, and the diff
- between the strings is shown.
-
- Parameters
- ----------
- actual : str
- The string to test for equality against the expected string.
- desired : str
- The expected string.
-
- Examples
- --------
- >>> np.testing.assert_string_equal('abc', 'abc')
- >>> np.testing.assert_string_equal('abc', 'abcd')
- Traceback (most recent call last):
- File "<stdin>", line 1, in <module>
- ...
- AssertionError: Differences in strings:
- - abc+ abcd? +
-
- """
- # delay import of difflib to reduce startup time
- __tracebackhide__ = True # Hide traceback for py.test
- import difflib
-
- if not isinstance(actual, str):
- raise AssertionError(repr(type(actual)))
- if not isinstance(desired, str):
- raise AssertionError(repr(type(desired)))
- if re.match(r'\A'+desired+r'\Z', actual, re.M):
- return
-
- diff = list(difflib.Differ().compare(actual.splitlines(1), desired.splitlines(1)))
- diff_list = []
- while diff:
- d1 = diff.pop(0)
- if d1.startswith(' '):
- continue
- if d1.startswith('- '):
- l = [d1]
- d2 = diff.pop(0)
- if d2.startswith('? '):
- l.append(d2)
- d2 = diff.pop(0)
- if not d2.startswith('+ '):
- raise AssertionError(repr(d2))
- l.append(d2)
- if diff:
- d3 = diff.pop(0)
- if d3.startswith('? '):
- l.append(d3)
- else:
- diff.insert(0, d3)
- if re.match(r'\A'+d2[2:]+r'\Z', d1[2:]):
- continue
- diff_list.extend(l)
- continue
- raise AssertionError(repr(d1))
- if not diff_list:
- return
- msg = 'Differences in strings:\n%s' % (''.join(diff_list)).rstrip()
- if actual != desired:
- raise AssertionError(msg)
-
-
-def rundocs(filename=None, raise_on_error=True):
- """
- Run doctests found in the given file.
-
- By default `rundocs` raises an AssertionError on failure.
-
- Parameters
- ----------
- filename : str
- The path to the file for which the doctests are run.
- raise_on_error : bool
- Whether to raise an AssertionError when a doctest fails. Default is
- True.
-
- Notes
- -----
- The doctests can be run by the user/developer by adding the ``doctests``
- argument to the ``test()`` call. For example, to run all tests (including
- doctests) for `numpy.lib`:
-
- >>> np.lib.test(doctests=True) #doctest: +SKIP
- """
- from numpy.compat import npy_load_module
- import doctest
- if filename is None:
- f = sys._getframe(1)
- filename = f.f_globals['__file__']
- name = os.path.splitext(os.path.basename(filename))[0]
- m = npy_load_module(name, filename)
-
- tests = doctest.DocTestFinder().find(m)
- runner = doctest.DocTestRunner(verbose=False)
-
- msg = []
- if raise_on_error:
- out = lambda s: msg.append(s)
- else:
- out = None
-
- for test in tests:
- runner.run(test, out=out)
-
- if runner.failures > 0 and raise_on_error:
- raise AssertionError("Some doctests failed:\n%s" % "\n".join(msg))
-
-
-def raises(*args,**kwargs):
- nose = import_nose()
- return nose.tools.raises(*args,**kwargs)
-
-
-def assert_raises(*args, **kwargs):
- """
- assert_raises(exception_class, callable, *args, **kwargs)
- assert_raises(exception_class)
-
- Fail unless an exception of class exception_class is thrown
- by callable when invoked with arguments args and keyword
- arguments kwargs. If a different type of exception is
- thrown, it will not be caught, and the test case will be
- deemed to have suffered an error, exactly as for an
- unexpected exception.
-
- Alternatively, `assert_raises` can be used as a context manager:
-
- >>> from numpy.testing import assert_raises
- >>> with assert_raises(ZeroDivisionError):
- ... 1 / 0
-
- is equivalent to
-
- >>> def div(x, y):
- ... return x / y
- >>> assert_raises(ZeroDivisionError, div, 1, 0)
-
- """
- __tracebackhide__ = True # Hide traceback for py.test
- nose = import_nose()
- return nose.tools.assert_raises(*args,**kwargs)
-
-
-def assert_raises_regex(exception_class, expected_regexp, *args, **kwargs):
- """
- assert_raises_regex(exception_class, expected_regexp, callable, *args,
- **kwargs)
- assert_raises_regex(exception_class, expected_regexp)
-
- Fail unless an exception of class exception_class and with message that
- matches expected_regexp is thrown by callable when invoked with arguments
- args and keyword arguments kwargs.
-
- Alternatively, can be used as a context manager like `assert_raises`.
-
- Name of this function adheres to Python 3.2+ reference, but should work in
- all versions down to 2.6.
-
- Notes
- -----
- .. versionadded:: 1.9.0
-
- """
- __tracebackhide__ = True # Hide traceback for py.test
- nose = import_nose()
-
- if sys.version_info.major >= 3:
- funcname = nose.tools.assert_raises_regex
- else:
- # Only present in Python 2.7, missing from unittest in 2.6
- funcname = nose.tools.assert_raises_regexp
-
- return funcname(exception_class, expected_regexp, *args, **kwargs)
-
-
-def decorate_methods(cls, decorator, testmatch=None):
- """
- Apply a decorator to all methods in a class matching a regular expression.
-
- The given decorator is applied to all public methods of `cls` that are
- matched by the regular expression `testmatch`
- (``testmatch.search(methodname)``). Methods that are private, i.e. start
- with an underscore, are ignored.
-
- Parameters
- ----------
- cls : class
- Class whose methods to decorate.
- decorator : function
- Decorator to apply to methods
- testmatch : compiled regexp or str, optional
- The regular expression. Default value is None, in which case the
- nose default (``re.compile(r'(?:^|[\\b_\\.%s-])[Tt]est' % os.sep)``)
- is used.
- If `testmatch` is a string, it is compiled to a regular expression
- first.
-
- """
- if testmatch is None:
- testmatch = re.compile(r'(?:^|[\\b_\\.%s-])[Tt]est' % os.sep)
- else:
- testmatch = re.compile(testmatch)
- cls_attr = cls.__dict__
-
- # delayed import to reduce startup time
- from inspect import isfunction
-
- methods = [_m for _m in cls_attr.values() if isfunction(_m)]
- for function in methods:
- try:
- if hasattr(function, 'compat_func_name'):
- funcname = function.compat_func_name
- else:
- funcname = function.__name__
- except AttributeError:
- # not a function
- continue
- if testmatch.search(funcname) and not funcname.startswith('_'):
- setattr(cls, funcname, decorator(function))
- return
-
-
-def measure(code_str,times=1,label=None):
- """
- Return elapsed time for executing code in the namespace of the caller.
-
- The supplied code string is compiled with the Python builtin ``compile``.
- The precision of the timing is 10 milli-seconds. If the code will execute
- fast on this timescale, it can be executed many times to get reasonable
- timing accuracy.
-
- Parameters
- ----------
- code_str : str
- The code to be timed.
- times : int, optional
- The number of times the code is executed. Default is 1. The code is
- only compiled once.
- label : str, optional
- A label to identify `code_str` with. This is passed into ``compile``
- as the second argument (for run-time error messages).
-
- Returns
- -------
- elapsed : float
- Total elapsed time in seconds for executing `code_str` `times` times.
-
- Examples
- --------
- >>> etime = np.testing.measure('for i in range(1000): np.sqrt(i**2)',
- ... times=times)
- >>> print("Time for a single execution : ", etime / times, "s")
- Time for a single execution : 0.005 s
-
- """
- frame = sys._getframe(1)
- locs, globs = frame.f_locals, frame.f_globals
-
- code = compile(code_str,
- 'Test name: %s ' % label,
- 'exec')
- i = 0
- elapsed = jiffies()
- while i < times:
- i += 1
- exec(code, globs, locs)
- elapsed = jiffies() - elapsed
- return 0.01*elapsed
-
-
-def _assert_valid_refcount(op):
- """
- Check that ufuncs don't mishandle refcount of object `1`.
- Used in a few regression tests.
- """
- if not HAS_REFCOUNT:
- return True
- import numpy as np
-
- b = np.arange(100*100).reshape(100, 100)
- c = b
- i = 1
-
- rc = sys.getrefcount(i)
- for j in range(15):
- d = op(b, c)
- assert_(sys.getrefcount(i) >= rc)
- del d # for pyflakes
-
-
-def assert_allclose(actual, desired, rtol=1e-7, atol=0, equal_nan=True,
- err_msg='', verbose=True):
- """
- Raises an AssertionError if two objects are not equal up to desired
- tolerance.
-
- The test is equivalent to ``allclose(actual, desired, rtol, atol)``.
- It compares the difference between `actual` and `desired` to
- ``atol + rtol * abs(desired)``.
-
- .. versionadded:: 1.5.0
-
- Parameters
- ----------
- actual : array_like
- Array obtained.
- desired : array_like
- Array desired.
- rtol : float, optional
- Relative tolerance.
- atol : float, optional
- Absolute tolerance.
- equal_nan : bool, optional.
- If True, NaNs will compare equal.
- err_msg : str, optional
- The error message to be printed in case of failure.
- verbose : bool, optional
- If True, the conflicting values are appended to the error message.
-
- Raises
- ------
- AssertionError
- If actual and desired are not equal up to specified precision.
-
- See Also
- --------
- assert_array_almost_equal_nulp, assert_array_max_ulp
-
- Examples
- --------
- >>> x = [1e-5, 1e-3, 1e-1]
- >>> y = np.arccos(np.cos(x))
- >>> assert_allclose(x, y, rtol=1e-5, atol=0)
-
- """
- __tracebackhide__ = True # Hide traceback for py.test
- import numpy as np
-
- def compare(x, y):
- return np.core.numeric.isclose(x, y, rtol=rtol, atol=atol,
- equal_nan=equal_nan)
-
- actual, desired = np.asanyarray(actual), np.asanyarray(desired)
- header = 'Not equal to tolerance rtol=%g, atol=%g' % (rtol, atol)
- assert_array_compare(compare, actual, desired, err_msg=str(err_msg),
- verbose=verbose, header=header, equal_nan=equal_nan)
-
-
-def assert_array_almost_equal_nulp(x, y, nulp=1):
- """
- Compare two arrays relatively to their spacing.
-
- This is a relatively robust method to compare two arrays whose amplitude
- is variable.
-
- Parameters
- ----------
- x, y : array_like
- Input arrays.
- nulp : int, optional
- The maximum number of unit in the last place for tolerance (see Notes).
- Default is 1.
-
- Returns
- -------
- None
-
- Raises
- ------
- AssertionError
- If the spacing between `x` and `y` for one or more elements is larger
- than `nulp`.
-
- See Also
- --------
- assert_array_max_ulp : Check that all items of arrays differ in at most
- N Units in the Last Place.
- spacing : Return the distance between x and the nearest adjacent number.
-
- Notes
- -----
- An assertion is raised if the following condition is not met::
-
- abs(x - y) <= nulps * spacing(maximum(abs(x), abs(y)))
-
- Examples
- --------
- >>> x = np.array([1., 1e-10, 1e-20])
- >>> eps = np.finfo(x.dtype).eps
- >>> np.testing.assert_array_almost_equal_nulp(x, x*eps/2 + x)
-
- >>> np.testing.assert_array_almost_equal_nulp(x, x*eps + x)
- Traceback (most recent call last):
- ...
- AssertionError: X and Y are not equal to 1 ULP (max is 2)
-
- """
- __tracebackhide__ = True # Hide traceback for py.test
- import numpy as np
- ax = np.abs(x)
- ay = np.abs(y)
- ref = nulp * np.spacing(np.where(ax > ay, ax, ay))
- if not np.all(np.abs(x-y) <= ref):
- if np.iscomplexobj(x) or np.iscomplexobj(y):
- msg = "X and Y are not equal to %d ULP" % nulp
- else:
- max_nulp = np.max(nulp_diff(x, y))
- msg = "X and Y are not equal to %d ULP (max is %g)" % (nulp, max_nulp)
- raise AssertionError(msg)
-
-
-def assert_array_max_ulp(a, b, maxulp=1, dtype=None):
- """
- Check that all items of arrays differ in at most N Units in the Last Place.
-
- Parameters
- ----------
- a, b : array_like
- Input arrays to be compared.
- maxulp : int, optional
- The maximum number of units in the last place that elements of `a` and
- `b` can differ. Default is 1.
- dtype : dtype, optional
- Data-type to convert `a` and `b` to if given. Default is None.
-
- Returns
- -------
- ret : ndarray
- Array containing number of representable floating point numbers between
- items in `a` and `b`.
-
- Raises
- ------
- AssertionError
- If one or more elements differ by more than `maxulp`.
-
- See Also
- --------
- assert_array_almost_equal_nulp : Compare two arrays relatively to their
- spacing.
-
- Examples
- --------
- >>> a = np.linspace(0., 1., 100)
- >>> res = np.testing.assert_array_max_ulp(a, np.arcsin(np.sin(a)))
-
- """
- __tracebackhide__ = True # Hide traceback for py.test
- import numpy as np
- ret = nulp_diff(a, b, dtype)
- if not np.all(ret <= maxulp):
- raise AssertionError("Arrays are not almost equal up to %g ULP" %
- maxulp)
- return ret
-
-
-def nulp_diff(x, y, dtype=None):
- """For each item in x and y, return the number of representable floating
- points between them.
-
- Parameters
- ----------
- x : array_like
- first input array
- y : array_like
- second input array
- dtype : dtype, optional
- Data-type to convert `x` and `y` to if given. Default is None.
-
- Returns
- -------
- nulp : array_like
- number of representable floating point numbers between each item in x
- and y.
-
- Examples
- --------
- # By definition, epsilon is the smallest number such as 1 + eps != 1, so
- # there should be exactly one ULP between 1 and 1 + eps
- >>> nulp_diff(1, 1 + np.finfo(x.dtype).eps)
- 1.0
- """
- import numpy as np
- if dtype:
- x = np.array(x, dtype=dtype)
- y = np.array(y, dtype=dtype)
- else:
- x = np.array(x)
- y = np.array(y)
-
- t = np.common_type(x, y)
- if np.iscomplexobj(x) or np.iscomplexobj(y):
- raise NotImplementedError("_nulp not implemented for complex array")
-
- x = np.array(x, dtype=t)
- y = np.array(y, dtype=t)
-
- if not x.shape == y.shape:
- raise ValueError("x and y do not have the same shape: %s - %s" %
- (x.shape, y.shape))
-
- def _diff(rx, ry, vdt):
- diff = np.array(rx-ry, dtype=vdt)
- return np.abs(diff)
-
- rx = integer_repr(x)
- ry = integer_repr(y)
- return _diff(rx, ry, t)
-
-
-def _integer_repr(x, vdt, comp):
- # Reinterpret binary representation of the float as sign-magnitude:
- # take into account two-complement representation
- # See also
- # http://www.cygnus-software.com/papers/comparingfloats/comparingfloats.htm
- rx = x.view(vdt)
- if not (rx.size == 1):
- rx[rx < 0] = comp - rx[rx < 0]
- else:
- if rx < 0:
- rx = comp - rx
-
- return rx
-
-
-def integer_repr(x):
- """Return the signed-magnitude interpretation of the binary representation of
- x."""
- import numpy as np
- if x.dtype == np.float32:
- return _integer_repr(x, np.int32, np.int32(-2**31))
- elif x.dtype == np.float64:
- return _integer_repr(x, np.int64, np.int64(-2**63))
- else:
- raise ValueError("Unsupported dtype %s" % x.dtype)
-
-
-# The following two classes are copied from python 2.6 warnings module (context
-# manager)
-class WarningMessage(object):
-
- """
- Holds the result of a single showwarning() call.
-
- Deprecated in 1.8.0
-
- Notes
- -----
- `WarningMessage` is copied from the Python 2.6 warnings module,
- so it can be used in NumPy with older Python versions.
-
- """
-
- _WARNING_DETAILS = ("message", "category", "filename", "lineno", "file",
- "line")
-
- def __init__(self, message, category, filename, lineno, file=None,
- line=None):
- local_values = locals()
- for attr in self._WARNING_DETAILS:
- setattr(self, attr, local_values[attr])
- if category:
- self._category_name = category.__name__
- else:
- self._category_name = None
-
- def __str__(self):
- return ("{message : %r, category : %r, filename : %r, lineno : %s, "
- "line : %r}" % (self.message, self._category_name,
- self.filename, self.lineno, self.line))
-
-
-class WarningManager(object):
- """
- A context manager that copies and restores the warnings filter upon
- exiting the context.
-
- The 'record' argument specifies whether warnings should be captured by a
- custom implementation of ``warnings.showwarning()`` and be appended to a
- list returned by the context manager. Otherwise None is returned by the
- context manager. The objects appended to the list are arguments whose
- attributes mirror the arguments to ``showwarning()``.
-
- The 'module' argument is to specify an alternative module to the module
- named 'warnings' and imported under that name. This argument is only useful
- when testing the warnings module itself.
-
- Deprecated in 1.8.0
-
- Notes
- -----
- `WarningManager` is a copy of the ``catch_warnings`` context manager
- from the Python 2.6 warnings module, with slight modifications.
- It is copied so it can be used in NumPy with older Python versions.
-
- """
-
- def __init__(self, record=False, module=None):
- self._record = record
- if module is None:
- self._module = sys.modules['warnings']
- else:
- self._module = module
- self._entered = False
-
- def __enter__(self):
- if self._entered:
- raise RuntimeError("Cannot enter %r twice" % self)
- self._entered = True
- self._filters = self._module.filters
- self._module.filters = self._filters[:]
- self._showwarning = self._module.showwarning
- if self._record:
- log = []
-
- def showwarning(*args, **kwargs):
- log.append(WarningMessage(*args, **kwargs))
- self._module.showwarning = showwarning
- return log
- else:
- return None
-
- def __exit__(self):
- if not self._entered:
- raise RuntimeError("Cannot exit %r without entering first" % self)
- self._module.filters = self._filters
- self._module.showwarning = self._showwarning
-
-
-@contextlib.contextmanager
-def _assert_warns_context(warning_class, name=None):
- __tracebackhide__ = True # Hide traceback for py.test
- with suppress_warnings() as sup:
- l = sup.record(warning_class)
- yield
- if not len(l) > 0:
- name_str = " when calling %s" % name if name is not None else ""
- raise AssertionError("No warning raised" + name_str)
-
-
-def assert_warns(warning_class, *args, **kwargs):
- """
- Fail unless the given callable throws the specified warning.
-
- A warning of class warning_class should be thrown by the callable when
- invoked with arguments args and keyword arguments kwargs.
- If a different type of warning is thrown, it will not be caught.
-
- If called with all arguments other than the warning class omitted, may be
- used as a context manager:
-
- with assert_warns(SomeWarning):
- do_something()
-
- The ability to be used as a context manager is new in NumPy v1.11.0.
-
- .. versionadded:: 1.4.0
-
- Parameters
- ----------
- warning_class : class
- The class defining the warning that `func` is expected to throw.
- func : callable
- The callable to test.
- \\*args : Arguments
- Arguments passed to `func`.
- \\*\\*kwargs : Kwargs
- Keyword arguments passed to `func`.
-
- Returns
- -------
- The value returned by `func`.
-
- """
- if not args:
- return _assert_warns_context(warning_class)
-
- func = args[0]
- args = args[1:]
- with _assert_warns_context(warning_class, name=func.__name__):
- return func(*args, **kwargs)
-
-
-@contextlib.contextmanager
-def _assert_no_warnings_context(name=None):
- __tracebackhide__ = True # Hide traceback for py.test
- with warnings.catch_warnings(record=True) as l:
- warnings.simplefilter('always')
- yield
- if len(l) > 0:
- name_str = " when calling %s" % name if name is not None else ""
- raise AssertionError("Got warnings%s: %s" % (name_str, l))
-
-
-def assert_no_warnings(*args, **kwargs):
- """
- Fail if the given callable produces any warnings.
-
- If called with all arguments omitted, may be used as a context manager:
-
- with assert_no_warnings():
- do_something()
-
- The ability to be used as a context manager is new in NumPy v1.11.0.
-
- .. versionadded:: 1.7.0
-
- Parameters
- ----------
- func : callable
- The callable to test.
- \\*args : Arguments
- Arguments passed to `func`.
- \\*\\*kwargs : Kwargs
- Keyword arguments passed to `func`.
-
- Returns
- -------
- The value returned by `func`.
-
- """
- if not args:
- return _assert_no_warnings_context()
-
- func = args[0]
- args = args[1:]
- with _assert_no_warnings_context(name=func.__name__):
- return func(*args, **kwargs)
-
-
-def _gen_alignment_data(dtype=float32, type='binary', max_size=24):
- """
- generator producing data with different alignment and offsets
- to test simd vectorization
-
- Parameters
- ----------
- dtype : dtype
- data type to produce
- type : string
- 'unary': create data for unary operations, creates one input
- and output array
- 'binary': create data for unary operations, creates two input
- and output array
- max_size : integer
- maximum size of data to produce
-
- Returns
- -------
- if type is 'unary' yields one output, one input array and a message
- containing information on the data
- if type is 'binary' yields one output array, two input array and a message
- containing information on the data
-
- """
- ufmt = 'unary offset=(%d, %d), size=%d, dtype=%r, %s'
- bfmt = 'binary offset=(%d, %d, %d), size=%d, dtype=%r, %s'
- for o in range(3):
- for s in range(o + 2, max(o + 3, max_size)):
- if type == 'unary':
- inp = lambda: arange(s, dtype=dtype)[o:]
- out = empty((s,), dtype=dtype)[o:]
- yield out, inp(), ufmt % (o, o, s, dtype, 'out of place')
- d = inp()
- yield d, d, ufmt % (o, o, s, dtype, 'in place')
- yield out[1:], inp()[:-1], ufmt % \
- (o + 1, o, s - 1, dtype, 'out of place')
- yield out[:-1], inp()[1:], ufmt % \
- (o, o + 1, s - 1, dtype, 'out of place')
- yield inp()[:-1], inp()[1:], ufmt % \
- (o, o + 1, s - 1, dtype, 'aliased')
- yield inp()[1:], inp()[:-1], ufmt % \
- (o + 1, o, s - 1, dtype, 'aliased')
- if type == 'binary':
- inp1 = lambda: arange(s, dtype=dtype)[o:]
- inp2 = lambda: arange(s, dtype=dtype)[o:]
- out = empty((s,), dtype=dtype)[o:]
- yield out, inp1(), inp2(), bfmt % \
- (o, o, o, s, dtype, 'out of place')
- d = inp1()
- yield d, d, inp2(), bfmt % \
- (o, o, o, s, dtype, 'in place1')
- d = inp2()
- yield d, inp1(), d, bfmt % \
- (o, o, o, s, dtype, 'in place2')
- yield out[1:], inp1()[:-1], inp2()[:-1], bfmt % \
- (o + 1, o, o, s - 1, dtype, 'out of place')
- yield out[:-1], inp1()[1:], inp2()[:-1], bfmt % \
- (o, o + 1, o, s - 1, dtype, 'out of place')
- yield out[:-1], inp1()[:-1], inp2()[1:], bfmt % \
- (o, o, o + 1, s - 1, dtype, 'out of place')
- yield inp1()[1:], inp1()[:-1], inp2()[:-1], bfmt % \
- (o + 1, o, o, s - 1, dtype, 'aliased')
- yield inp1()[:-1], inp1()[1:], inp2()[:-1], bfmt % \
- (o, o + 1, o, s - 1, dtype, 'aliased')
- yield inp1()[:-1], inp1()[:-1], inp2()[1:], bfmt % \
- (o, o, o + 1, s - 1, dtype, 'aliased')
-
-
-class IgnoreException(Exception):
- "Ignoring this exception due to disabled feature"
-
-
-@contextlib.contextmanager
-def tempdir(*args, **kwargs):
- """Context manager to provide a temporary test folder.
-
- All arguments are passed as this to the underlying tempfile.mkdtemp
- function.
-
- """
- tmpdir = mkdtemp(*args, **kwargs)
- try:
- yield tmpdir
- finally:
- shutil.rmtree(tmpdir)
-
-
-@contextlib.contextmanager
-def temppath(*args, **kwargs):
- """Context manager for temporary files.
-
- Context manager that returns the path to a closed temporary file. Its
- parameters are the same as for tempfile.mkstemp and are passed directly
- to that function. The underlying file is removed when the context is
- exited, so it should be closed at that time.
-
- Windows does not allow a temporary file to be opened if it is already
- open, so the underlying file must be closed after opening before it
- can be opened again.
-
- """
- fd, path = mkstemp(*args, **kwargs)
- os.close(fd)
- try:
- yield path
- finally:
- os.remove(path)
-
-
-class clear_and_catch_warnings(warnings.catch_warnings):
- """ Context manager that resets warning registry for catching warnings
-
- Warnings can be slippery, because, whenever a warning is triggered, Python
- adds a ``__warningregistry__`` member to the *calling* module. This makes
- it impossible to retrigger the warning in this module, whatever you put in
- the warnings filters. This context manager accepts a sequence of `modules`
- as a keyword argument to its constructor and:
-
- * stores and removes any ``__warningregistry__`` entries in given `modules`
- on entry;
- * resets ``__warningregistry__`` to its previous state on exit.
-
- This makes it possible to trigger any warning afresh inside the context
- manager without disturbing the state of warnings outside.
-
- For compatibility with Python 3.0, please consider all arguments to be
- keyword-only.
-
- Parameters
- ----------
- record : bool, optional
- Specifies whether warnings should be captured by a custom
- implementation of ``warnings.showwarning()`` and be appended to a list
- returned by the context manager. Otherwise None is returned by the
- context manager. The objects appended to the list are arguments whose
- attributes mirror the arguments to ``showwarning()``.
- modules : sequence, optional
- Sequence of modules for which to reset warnings registry on entry and
- restore on exit. To work correctly, all 'ignore' filters should
- filter by one of these modules.
-
- Examples
- --------
- >>> import warnings
- >>> with clear_and_catch_warnings(modules=[np.core.fromnumeric]):
- ... warnings.simplefilter('always')
- ... warnings.filterwarnings('ignore', module='np.core.fromnumeric')
- ... # do something that raises a warning but ignore those in
- ... # np.core.fromnumeric
- """
- class_modules = ()
-
- def __init__(self, record=False, modules=()):
- self.modules = set(modules).union(self.class_modules)
- self._warnreg_copies = {}
- super(clear_and_catch_warnings, self).__init__(record=record)
-
- def __enter__(self):
- for mod in self.modules:
- if hasattr(mod, '__warningregistry__'):
- mod_reg = mod.__warningregistry__
- self._warnreg_copies[mod] = mod_reg.copy()
- mod_reg.clear()
- return super(clear_and_catch_warnings, self).__enter__()
-
- def __exit__(self, *exc_info):
- super(clear_and_catch_warnings, self).__exit__(*exc_info)
- for mod in self.modules:
- if hasattr(mod, '__warningregistry__'):
- mod.__warningregistry__.clear()
- if mod in self._warnreg_copies:
- mod.__warningregistry__.update(self._warnreg_copies[mod])
-
-
-class suppress_warnings(object):
- """
- Context manager and decorator doing much the same as
- ``warnings.catch_warnings``.
-
- However, it also provides a filter mechanism to work around
- http://bugs.python.org/issue4180.
-
- This bug causes Python before 3.4 to not reliably show warnings again
- after they have been ignored once (even within catch_warnings). It
- means that no "ignore" filter can be used easily, since following
- tests might need to see the warning. Additionally it allows easier
- specificity for testing warnings and can be nested.
-
- Parameters
- ----------
- forwarding_rule : str, optional
- One of "always", "once", "module", or "location". Analogous to
- the usual warnings module filter mode, it is useful to reduce
- noise mostly on the outmost level. Unsuppressed and unrecorded
- warnings will be forwarded based on this rule. Defaults to "always".
- "location" is equivalent to the warnings "default", match by exact
- location the warning warning originated from.
-
- Notes
- -----
- Filters added inside the context manager will be discarded again
- when leaving it. Upon entering all filters defined outside a
- context will be applied automatically.
-
- When a recording filter is added, matching warnings are stored in the
- ``log`` attribute as well as in the list returned by ``record``.
-
- If filters are added and the ``module`` keyword is given, the
- warning registry of this module will additionally be cleared when
- applying it, entering the context, or exiting it. This could cause
- warnings to appear a second time after leaving the context if they
- were configured to be printed once (default) and were already
- printed before the context was entered.
-
- Nesting this context manager will work as expected when the
- forwarding rule is "always" (default). Unfiltered and unrecorded
- warnings will be passed out and be matched by the outer level.
- On the outmost level they will be printed (or caught by another
- warnings context). The forwarding rule argument can modify this
- behaviour.
-
- Like ``catch_warnings`` this context manager is not threadsafe.
-
- Examples
- --------
- >>> with suppress_warnings() as sup:
- ... sup.filter(DeprecationWarning, "Some text")
- ... sup.filter(module=np.ma.core)
- ... log = sup.record(FutureWarning, "Does this occur?")
- ... command_giving_warnings()
- ... # The FutureWarning was given once, the filtered warnings were
- ... # ignored. All other warnings abide outside settings (may be
- ... # printed/error)
- ... assert_(len(log) == 1)
- ... assert_(len(sup.log) == 1) # also stored in log attribute
-
- Or as a decorator:
-
- >>> sup = suppress_warnings()
- >>> sup.filter(module=np.ma.core) # module must match exact
- >>> @sup
- >>> def some_function():
- ... # do something which causes a warning in np.ma.core
- ... pass
- """
- def __init__(self, forwarding_rule="always"):
- self._entered = False
-
- # Suppressions are either instance or defined inside one with block:
- self._suppressions = []
-
- if forwarding_rule not in {"always", "module", "once", "location"}:
- raise ValueError("unsupported forwarding rule.")
- self._forwarding_rule = forwarding_rule
-
- def _clear_registries(self):
- if hasattr(warnings, "_filters_mutated"):
- # clearing the registry should not be necessary on new pythons,
- # instead the filters should be mutated.
- warnings._filters_mutated()
- return
- # Simply clear the registry, this should normally be harmless,
- # note that on new pythons it would be invalidated anyway.
- for module in self._tmp_modules:
- if hasattr(module, "__warningregistry__"):
- module.__warningregistry__.clear()
-
- def _filter(self, category=Warning, message="", module=None, record=False):
- if record:
- record = [] # The log where to store warnings
- else:
- record = None
- if self._entered:
- if module is None:
- warnings.filterwarnings(
- "always", category=category, message=message)
- else:
- module_regex = module.__name__.replace('.', r'\.') + '$'
- warnings.filterwarnings(
- "always", category=category, message=message,
- module=module_regex)
- self._tmp_modules.add(module)
- self._clear_registries()
-
- self._tmp_suppressions.append(
- (category, message, re.compile(message, re.I), module, record))
- else:
- self._suppressions.append(
- (category, message, re.compile(message, re.I), module, record))
-
- return record
-
- def filter(self, category=Warning, message="", module=None):
- """
- Add a new suppressing filter or apply it if the state is entered.
-
- Parameters
- ----------
- category : class, optional
- Warning class to filter
- message : string, optional
- Regular expression matching the warning message.
- module : module, optional
- Module to filter for. Note that the module (and its file)
- must match exactly and cannot be a submodule. This may make
- it unreliable for external modules.
-
- Notes
- -----
- When added within a context, filters are only added inside
- the context and will be forgotten when the context is exited.
- """
- self._filter(category=category, message=message, module=module,
- record=False)
-
- def record(self, category=Warning, message="", module=None):
- """
- Append a new recording filter or apply it if the state is entered.
-
- All warnings matching will be appended to the ``log`` attribute.
-
- Parameters
- ----------
- category : class, optional
- Warning class to filter
- message : string, optional
- Regular expression matching the warning message.
- module : module, optional
- Module to filter for. Note that the module (and its file)
- must match exactly and cannot be a submodule. This may make
- it unreliable for external modules.
-
- Returns
- -------
- log : list
- A list which will be filled with all matched warnings.
-
- Notes
- -----
- When added within a context, filters are only added inside
- the context and will be forgotten when the context is exited.
- """
- return self._filter(category=category, message=message, module=module,
- record=True)
-
- def __enter__(self):
- if self._entered:
- raise RuntimeError("cannot enter suppress_warnings twice.")
-
- self._orig_show = warnings.showwarning
- self._filters = warnings.filters
- warnings.filters = self._filters[:]
-
- self._entered = True
- self._tmp_suppressions = []
- self._tmp_modules = set()
- self._forwarded = set()
-
- self.log = [] # reset global log (no need to keep same list)
-
- for cat, mess, _, mod, log in self._suppressions:
- if log is not None:
- del log[:] # clear the log
- if mod is None:
- warnings.filterwarnings(
- "always", category=cat, message=mess)
- else:
- module_regex = mod.__name__.replace('.', r'\.') + '$'
- warnings.filterwarnings(
- "always", category=cat, message=mess,
- module=module_regex)
- self._tmp_modules.add(mod)
- warnings.showwarning = self._showwarning
- self._clear_registries()
-
- return self
-
- def __exit__(self, *exc_info):
- warnings.showwarning = self._orig_show
- warnings.filters = self._filters
- self._clear_registries()
- self._entered = False
- del self._orig_show
- del self._filters
-
- def _showwarning(self, message, category, filename, lineno,
- *args, **kwargs):
- use_warnmsg = kwargs.pop("use_warnmsg", None)
- for cat, _, pattern, mod, rec in (
- self._suppressions + self._tmp_suppressions)[::-1]:
- if (issubclass(category, cat) and
- pattern.match(message.args[0]) is not None):
- if mod is None:
- # Message and category match, either recorded or ignored
- if rec is not None:
- msg = WarningMessage(message, category, filename,
- lineno, **kwargs)
- self.log.append(msg)
- rec.append(msg)
- return
- # Use startswith, because warnings strips the c or o from
- # .pyc/.pyo files.
- elif mod.__file__.startswith(filename):
- # The message and module (filename) match
- if rec is not None:
- msg = WarningMessage(message, category, filename,
- lineno, **kwargs)
- self.log.append(msg)
- rec.append(msg)
- return
-
- # There is no filter in place, so pass to the outside handler
- # unless we should only pass it once
- if self._forwarding_rule == "always":
- if use_warnmsg is None:
- self._orig_show(message, category, filename, lineno,
- *args, **kwargs)
- else:
- self._orig_showmsg(use_warnmsg)
- return
-
- if self._forwarding_rule == "once":
- signature = (message.args, category)
- elif self._forwarding_rule == "module":
- signature = (message.args, category, filename)
- elif self._forwarding_rule == "location":
- signature = (message.args, category, filename, lineno)
-
- if signature in self._forwarded:
- return
- self._forwarded.add(signature)
- if use_warnmsg is None:
- self._orig_show(message, category, filename, lineno, *args,
- **kwargs)
- else:
- self._orig_showmsg(use_warnmsg)
-
- def __call__(self, func):
- """
- Function decorator to apply certain suppressions to a whole
- function.
- """
- @wraps(func)
- def new_func(*args, **kwargs):
- with self:
- return func(*args, **kwargs)
-
- return new_func
+from .nose_tools.utils import *
diff --git a/numpy/tests/__init__.py b/numpy/tests/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/numpy/tests/__init__.py
diff --git a/numpy/tests/test_ctypeslib.py b/numpy/tests/test_ctypeslib.py
index 2c58f1184..e8043d057 100644
--- a/numpy/tests/test_ctypeslib.py
+++ b/numpy/tests/test_ctypeslib.py
@@ -5,7 +5,7 @@ import sys
import numpy as np
from numpy.ctypeslib import ndpointer, load_library
from numpy.distutils.misc_util import get_shared_lib_extension
-from numpy.testing import TestCase, run_module_suite, dec
+from numpy.testing import run_module_suite, assert_, assert_raises, dec
try:
cdll = None
@@ -20,7 +20,7 @@ try:
except ImportError:
_HAS_CTYPE = False
-class TestLoadLibrary(TestCase):
+class TestLoadLibrary(object):
@dec.skipif(not _HAS_CTYPE,
"ctypes not available on this python installation")
@dec.knownfailureif(sys.platform ==
@@ -53,65 +53,65 @@ class TestLoadLibrary(TestCase):
" (import error was: %s)" % str(e))
print(msg)
-class TestNdpointer(TestCase):
+class TestNdpointer(object):
def test_dtype(self):
dt = np.intc
p = ndpointer(dtype=dt)
- self.assertTrue(p.from_param(np.array([1], dt)))
+ assert_(p.from_param(np.array([1], dt)))
dt = '<i4'
p = ndpointer(dtype=dt)
- self.assertTrue(p.from_param(np.array([1], dt)))
+ assert_(p.from_param(np.array([1], dt)))
dt = np.dtype('>i4')
p = ndpointer(dtype=dt)
p.from_param(np.array([1], dt))
- self.assertRaises(TypeError, p.from_param,
+ assert_raises(TypeError, p.from_param,
np.array([1], dt.newbyteorder('swap')))
dtnames = ['x', 'y']
dtformats = [np.intc, np.float64]
dtdescr = {'names': dtnames, 'formats': dtformats}
dt = np.dtype(dtdescr)
p = ndpointer(dtype=dt)
- self.assertTrue(p.from_param(np.zeros((10,), dt)))
+ assert_(p.from_param(np.zeros((10,), dt)))
samedt = np.dtype(dtdescr)
p = ndpointer(dtype=samedt)
- self.assertTrue(p.from_param(np.zeros((10,), dt)))
+ assert_(p.from_param(np.zeros((10,), dt)))
dt2 = np.dtype(dtdescr, align=True)
if dt.itemsize != dt2.itemsize:
- self.assertRaises(TypeError, p.from_param, np.zeros((10,), dt2))
+ assert_raises(TypeError, p.from_param, np.zeros((10,), dt2))
else:
- self.assertTrue(p.from_param(np.zeros((10,), dt2)))
+ assert_(p.from_param(np.zeros((10,), dt2)))
def test_ndim(self):
p = ndpointer(ndim=0)
- self.assertTrue(p.from_param(np.array(1)))
- self.assertRaises(TypeError, p.from_param, np.array([1]))
+ assert_(p.from_param(np.array(1)))
+ assert_raises(TypeError, p.from_param, np.array([1]))
p = ndpointer(ndim=1)
- self.assertRaises(TypeError, p.from_param, np.array(1))
- self.assertTrue(p.from_param(np.array([1])))
+ assert_raises(TypeError, p.from_param, np.array(1))
+ assert_(p.from_param(np.array([1])))
p = ndpointer(ndim=2)
- self.assertTrue(p.from_param(np.array([[1]])))
+ assert_(p.from_param(np.array([[1]])))
def test_shape(self):
p = ndpointer(shape=(1, 2))
- self.assertTrue(p.from_param(np.array([[1, 2]])))
- self.assertRaises(TypeError, p.from_param, np.array([[1], [2]]))
+ assert_(p.from_param(np.array([[1, 2]])))
+ assert_raises(TypeError, p.from_param, np.array([[1], [2]]))
p = ndpointer(shape=())
- self.assertTrue(p.from_param(np.array(1)))
+ assert_(p.from_param(np.array(1)))
def test_flags(self):
x = np.array([[1, 2], [3, 4]], order='F')
p = ndpointer(flags='FORTRAN')
- self.assertTrue(p.from_param(x))
+ assert_(p.from_param(x))
p = ndpointer(flags='CONTIGUOUS')
- self.assertRaises(TypeError, p.from_param, x)
+ assert_raises(TypeError, p.from_param, x)
p = ndpointer(flags=x.flags.num)
- self.assertTrue(p.from_param(x))
- self.assertRaises(TypeError, p.from_param, np.array([[1, 2], [3, 4]]))
+ assert_(p.from_param(x))
+ assert_raises(TypeError, p.from_param, np.array([[1, 2], [3, 4]]))
def test_cache(self):
a1 = ndpointer(dtype=np.float64)
a2 = ndpointer(dtype=np.float64)
- self.assertEqual(a1, a2)
+ assert_(a1 == a2)
if __name__ == "__main__":
diff --git a/numpy/tests/test_matlib.py b/numpy/tests/test_matlib.py
index 3ff6cd7ed..11227b19a 100644
--- a/numpy/tests/test_matlib.py
+++ b/numpy/tests/test_matlib.py
@@ -24,7 +24,7 @@ def test_zeros():
assert_array_equal(numpy.matlib.zeros(2), np.matrix([[ 0., 0.]]))
def test_identity():
- x = numpy.matlib.identity(2, dtype=np.int)
+ x = numpy.matlib.identity(2, dtype=int)
assert_array_equal(x, np.matrix([[1, 0], [0, 1]]))
def test_eye():
diff --git a/numpy/tests/test_scripts.py b/numpy/tests/test_scripts.py
index 489e0c6e5..675fe6575 100644
--- a/numpy/tests/test_scripts.py
+++ b/numpy/tests/test_scripts.py
@@ -11,8 +11,7 @@ from subprocess import Popen, PIPE
import numpy as np
from numpy.compat.py3k import basestring
from nose.tools import assert_equal
-from numpy.testing.decorators import skipif
-from numpy.testing import assert_
+from numpy.testing import assert_, dec
is_inplace = isfile(pathjoin(dirname(np.__file__), '..', 'setup.py'))
@@ -59,7 +58,7 @@ def run_command(cmd, check_code=True):
return proc.returncode, stdout, stderr
-@skipif(is_inplace)
+@dec.skipif(is_inplace)
def test_f2py():
# test that we can run f2py script
if sys.platform == 'win32':
@@ -87,7 +86,7 @@ def test_f2py():
assert_equal(stdout.strip(), b'2')
success = True
break
- except:
+ except Exception:
pass
msg = "Warning: neither %s nor %s nor %s found in path" % f2py_cmds
assert_(success, msg)
diff --git a/numpy/tests/test_warnings.py b/numpy/tests/test_warnings.py
index c5818d21c..7f22794ec 100644
--- a/numpy/tests/test_warnings.py
+++ b/numpy/tests/test_warnings.py
@@ -13,9 +13,7 @@ if sys.version_info >= (3, 4):
import ast
import tokenize
import numpy
- from numpy.testing import run_module_suite
- from numpy.testing.decorators import slow
-
+ from numpy.testing import run_module_suite, dec
class ParseCall(ast.NodeVisitor):
def __init__(self):
@@ -63,7 +61,7 @@ if sys.version_info >= (3, 4):
"{} on line {}".format(self.__filename, node.lineno))
- @slow
+ @dec.slow
def test_warning_calls():
# combined "ignore" and stacklevel error
base = Path(numpy.__file__).parent