summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.mailmap3
-rw-r--r--.travis.yml6
-rw-r--r--MANIFEST.in1
-rw-r--r--README.md2
-rw-r--r--benchmarks/benchmarks/bench_core.py2
-rw-r--r--benchmarks/benchmarks/bench_reduce.py4
-rw-r--r--benchmarks/benchmarks/bench_shape_base.py89
-rw-r--r--benchmarks/benchmarks/bench_ufunc.py2
-rw-r--r--doc/CAPI.rst.txt5
-rw-r--r--doc/Makefile17
-rwxr-xr-xdoc/cdoc/numpyfilter.py2
-rw-r--r--doc/changelog/1.12.0-changelog.rst573
-rw-r--r--doc/changelog/1.12.1-changelog.rst39
-rw-r--r--doc/changelog/1.13.0-changelog.rst426
-rw-r--r--doc/changelog/1.13.1-changelog.rst44
-rw-r--r--doc/f2py/BUGS.txt55
-rw-r--r--doc/f2py/FAQ.txt603
-rw-r--r--doc/f2py/HISTORY.txt1043
-rw-r--r--doc/f2py/Makefile76
-rw-r--r--doc/f2py/OLDNEWS.txt93
-rw-r--r--doc/f2py/README.txt415
-rw-r--r--doc/f2py/Release-1.x.txt27
-rw-r--r--doc/f2py/Release-2.x.txt77
-rw-r--r--doc/f2py/Release-3.x.txt87
-rw-r--r--doc/f2py/Release-4.x.txt91
-rw-r--r--doc/f2py/TESTING.txt108
-rw-r--r--doc/f2py/THANKS.txt63
-rw-r--r--doc/f2py/TODO.txt67
-rw-r--r--doc/f2py/apps.tex71
-rw-r--r--doc/f2py/bugs.tex109
-rwxr-xr-xdoc/f2py/collectinput.py83
-rw-r--r--doc/f2py/commands.tex20
-rw-r--r--doc/f2py/default.css180
-rw-r--r--doc/f2py/docutils.conf16
-rw-r--r--doc/f2py/ex1/arr.f4
-rw-r--r--doc/f2py/ex1/bar.f4
-rw-r--r--doc/f2py/ex1/foo.f5
-rw-r--r--doc/f2py/ex1/foobar-smart.f9024
-rw-r--r--doc/f2py/ex1/foobar.f9016
-rw-r--r--doc/f2py/ex1/foobarmodule.tex36
-rwxr-xr-xdoc/f2py/ex1/runme18
-rw-r--r--doc/f2py/f2py.1209
-rw-r--r--doc/f2py/f2py2e.tex50
-rw-r--r--doc/f2py/f2python9-final/README.txt38
-rw-r--r--doc/f2py/f2python9-final/aerostructure.jpgbin72247 -> 0 bytes
-rw-r--r--doc/f2py/f2python9-final/flow.jpgbin13266 -> 0 bytes
-rwxr-xr-xdoc/f2py/f2python9-final/mk_html.sh13
-rwxr-xr-xdoc/f2py/f2python9-final/mk_pdf.sh13
-rwxr-xr-xdoc/f2py/f2python9-final/mk_ps.sh14
-rw-r--r--doc/f2py/f2python9-final/src/examples/exp1.f26
-rw-r--r--doc/f2py/f2python9-final/src/examples/exp1mess.txt17
-rw-r--r--doc/f2py/f2python9-final/src/examples/exp1session.txt20
-rw-r--r--doc/f2py/f2python9-final/src/examples/foo.pyf13
-rw-r--r--doc/f2py/f2python9-final/src/examples/foom.pyf14
-rw-r--r--doc/f2py/f2python9-final/structure.jpgbin17860 -> 0 bytes
-rw-r--r--doc/f2py/fortranobject.tex574
-rw-r--r--doc/f2py/hello.f7
-rw-r--r--doc/f2py/index.html264
-rw-r--r--doc/f2py/intro.tex158
-rw-r--r--doc/f2py/multiarray/array_from_pyobj.c323
-rw-r--r--doc/f2py/multiarray/bar.c15
-rw-r--r--doc/f2py/multiarray/foo.f13
-rw-r--r--doc/f2py/multiarray/fortran_array_from_pyobj.txt284
-rw-r--r--doc/f2py/multiarray/fun.pyf89
-rw-r--r--doc/f2py/multiarray/run.pyf91
-rw-r--r--doc/f2py/multiarray/transpose.txt1126
-rw-r--r--doc/f2py/multiarrays.txt119
-rw-r--r--doc/f2py/notes.tex310
-rw-r--r--doc/f2py/oldnews.html121
-rw-r--r--doc/f2py/options.tex63
-rw-r--r--doc/f2py/pyforttest.pyf5
-rw-r--r--doc/f2py/pytest.py12
-rw-r--r--doc/f2py/python9.tex1044
-rw-r--r--doc/f2py/signaturefile.tex368
-rw-r--r--doc/f2py/simple.f13
-rw-r--r--doc/f2py/simple_session.dat51
-rw-r--r--doc/f2py/using_F_compiler.txt147
-rw-r--r--doc/f2py/win32_notes.txt84
-rw-r--r--doc/neps/ufunc-overrides.rst19
-rw-r--r--doc/release/1.10.0-notes.rst95
-rw-r--r--doc/release/1.10.1-notes.rst3
-rw-r--r--doc/release/1.10.2-notes.rst9
-rw-r--r--doc/release/1.10.3-notes.rst3
-rw-r--r--doc/release/1.10.4-notes.rst3
-rw-r--r--doc/release/1.11.0-notes.rst55
-rw-r--r--doc/release/1.11.1-notes.rst3
-rw-r--r--doc/release/1.11.2-notes.rst3
-rw-r--r--doc/release/1.12.0-notes.rst573
-rw-r--r--doc/release/1.12.1-notes.rst52
-rw-r--r--doc/release/1.13.0-notes.rst22
-rw-r--r--doc/release/1.13.1-notes.rst60
-rw-r--r--doc/release/1.14.0-notes.rst296
-rw-r--r--doc/release/1.3.0-notes.rst43
-rw-r--r--doc/release/1.4.0-notes.rst31
-rw-r--r--doc/release/1.5.0-notes.rst23
-rw-r--r--doc/release/1.6.0-notes.rst31
-rw-r--r--doc/release/1.6.1-notes.rst3
-rw-r--r--doc/release/1.6.2-notes.rst15
-rw-r--r--doc/release/1.7.0-notes.rst33
-rw-r--r--doc/release/1.7.1-notes.rst3
-rw-r--r--doc/release/1.7.2-notes.rst3
-rw-r--r--doc/release/1.8.0-notes.rst61
-rw-r--r--doc/release/1.8.1-notes.rst9
-rw-r--r--doc/release/1.8.2-notes.rst3
-rw-r--r--doc/release/1.9.0-notes.rst101
-rw-r--r--doc/release/1.9.1-notes.rst3
-rw-r--r--doc/release/1.9.2-notes.rst3
-rw-r--r--doc/source/about.rst3
-rw-r--r--doc/source/conf.py14
-rw-r--r--doc/source/dev/gitwash/development_setup.rst17
-rw-r--r--doc/source/dev/governance/people.rst12
-rw-r--r--doc/source/f2py/index.rst32
-rw-r--r--doc/source/reference/arrays.datetime.rst3
-rw-r--r--doc/source/reference/c-api.array.rst44
-rw-r--r--doc/source/reference/internals.code-explanations.rst6
-rw-r--r--doc/source/reference/maskedarray.generic.rst4
-rw-r--r--doc/source/reference/routines.linalg.rst1
-rw-r--r--doc/source/reference/routines.logic.rst1
-rw-r--r--doc/source/reference/routines.math.rst1
-rw-r--r--doc/source/reference/routines.polynomials.classes.rst48
-rw-r--r--doc/source/reference/routines.testing.rst2
-rw-r--r--doc/source/reference/ufuncs.rst18
-rw-r--r--doc/source/user/basics.io.genfromtxt.rst92
-rw-r--r--doc/source/user/building.rst4
-rw-r--r--doc/source/user/c-info.beyond-basics.rst4
-rw-r--r--doc/source/user/c-info.ufunc-tutorial.rst2
-rw-r--r--doc/source/user/numpy-for-matlab-users.rst20
-rw-r--r--doc/source/user/quickstart.rst34
-rw-r--r--numpy/__init__.py6
-rw-r--r--numpy/_globals.py2
-rw-r--r--numpy/_import_tools.py3
-rw-r--r--numpy/add_newdocs.py82
-rw-r--r--numpy/compat/tests/__init__.py0
-rw-r--r--numpy/compat/tests/test_compat.py2
-rw-r--r--numpy/conftest.py54
-rw-r--r--numpy/core/__init__.py2
-rw-r--r--numpy/core/_internal.py21
-rw-r--r--numpy/core/arrayprint.py271
-rw-r--r--numpy/core/code_generators/cversions.txt1
-rw-r--r--numpy/core/code_generators/genapi.py7
-rw-r--r--numpy/core/code_generators/generate_numpy_api.py9
-rw-r--r--numpy/core/code_generators/generate_umath.py4
-rw-r--r--numpy/core/code_generators/ufunc_docstrings.py42
-rw-r--r--numpy/core/einsumfunc.py165
-rw-r--r--numpy/core/fromnumeric.py32
-rw-r--r--numpy/core/function_base.py10
-rw-r--r--numpy/core/getlimits.py42
-rw-r--r--numpy/core/include/numpy/npy_cpu.h6
-rw-r--r--numpy/core/include/numpy/npy_endian.h6
-rw-r--r--numpy/core/include/numpy/numpyconfig.h2
-rw-r--r--numpy/core/numeric.py327
-rw-r--r--numpy/core/numerictypes.py112
-rw-r--r--numpy/core/records.py8
-rw-r--r--numpy/core/setup.py8
-rw-r--r--numpy/core/setup_common.py1
-rw-r--r--numpy/core/shape_base.py14
-rw-r--r--numpy/core/src/multiarray/_datetime.h3
-rw-r--r--numpy/core/src/multiarray/alloc.c11
-rw-r--r--numpy/core/src/multiarray/alloc.h12
-rw-r--r--numpy/core/src/multiarray/array_assign_array.c3
-rw-r--r--numpy/core/src/multiarray/arrayobject.c199
-rw-r--r--numpy/core/src/multiarray/arraytypes.c.src272
-rw-r--r--numpy/core/src/multiarray/cblasfuncs.c3
-rw-r--r--numpy/core/src/multiarray/compiled_base.c22
-rw-r--r--numpy/core/src/multiarray/conversion_utils.c5
-rw-r--r--numpy/core/src/multiarray/convert.c9
-rw-r--r--numpy/core/src/multiarray/ctors.c12
-rw-r--r--numpy/core/src/multiarray/datetime.c94
-rw-r--r--numpy/core/src/multiarray/datetime_busdaycal.c3
-rw-r--r--numpy/core/src/multiarray/descriptor.c30
-rw-r--r--numpy/core/src/multiarray/descriptor.h4
-rw-r--r--numpy/core/src/multiarray/dtype_transfer.c411
-rw-r--r--numpy/core/src/multiarray/einsum.c.src12
-rw-r--r--numpy/core/src/multiarray/getset.c133
-rw-r--r--numpy/core/src/multiarray/item_selection.c39
-rw-r--r--numpy/core/src/multiarray/iterators.c68
-rw-r--r--numpy/core/src/multiarray/mapping.c366
-rw-r--r--numpy/core/src/multiarray/methods.c60
-rw-r--r--numpy/core/src/multiarray/multiarray_tests.c.src156
-rw-r--r--numpy/core/src/multiarray/multiarraymodule.c306
-rw-r--r--numpy/core/src/multiarray/nditer_pywrap.c20
-rw-r--r--numpy/core/src/multiarray/number.c65
-rw-r--r--numpy/core/src/multiarray/number.h1
-rw-r--r--numpy/core/src/multiarray/scalarapi.c2
-rw-r--r--numpy/core/src/multiarray/scalartypes.c.src145
-rw-r--r--numpy/core/src/multiarray/shape.c46
-rw-r--r--numpy/core/src/multiarray/strfuncs.c200
-rw-r--r--numpy/core/src/multiarray/strfuncs.h13
-rw-r--r--numpy/core/src/multiarray/temp_elide.c8
-rw-r--r--numpy/core/src/private/mem_overlap.c7
-rw-r--r--numpy/core/src/private/npy_config.h13
-rw-r--r--numpy/core/src/private/ufunc_override.c7
-rw-r--r--numpy/core/src/umath/extobj.c318
-rw-r--r--numpy/core/src/umath/extobj.h32
-rw-r--r--numpy/core/src/umath/loops.c.src59
-rw-r--r--numpy/core/src/umath/loops.h.src3
-rw-r--r--numpy/core/src/umath/override.c130
-rw-r--r--numpy/core/src/umath/reduction.c15
-rw-r--r--numpy/core/src/umath/reduction.h4
-rw-r--r--numpy/core/src/umath/test_rational.c.src9
-rw-r--r--numpy/core/src/umath/ufunc_object.c419
-rw-r--r--numpy/core/src/umath/ufunc_object.h3
-rw-r--r--numpy/core/src/umath/ufunc_type_resolution.c141
-rw-r--r--numpy/core/src/umath/ufunc_type_resolution.h9
-rw-r--r--numpy/core/src/umath/umath_tests.c.src6
-rw-r--r--numpy/core/tests/__init__.py0
-rw-r--r--numpy/core/tests/test_abc.py29
-rw-r--r--numpy/core/tests/test_arrayprint.py26
-rw-r--r--numpy/core/tests/test_datetime.py24
-rw-r--r--numpy/core/tests/test_defchararray.py76
-rw-r--r--numpy/core/tests/test_deprecations.py36
-rw-r--r--numpy/core/tests/test_dtype.py174
-rw-r--r--numpy/core/tests/test_einsum.py57
-rw-r--r--numpy/core/tests/test_errstate.py4
-rw-r--r--numpy/core/tests/test_extint128.py2
-rw-r--r--numpy/core/tests/test_function_base.py8
-rw-r--r--numpy/core/tests/test_getlimits.py24
-rw-r--r--numpy/core/tests/test_half.py9
-rw-r--r--numpy/core/tests/test_indexerrors.py4
-rw-r--r--numpy/core/tests/test_indexing.py40
-rw-r--r--numpy/core/tests/test_item_selection.py6
-rw-r--r--numpy/core/tests/test_longdouble.py6
-rw-r--r--numpy/core/tests/test_machar.py13
-rw-r--r--numpy/core/tests/test_memmap.py24
-rw-r--r--numpy/core/tests/test_multiarray.py922
-rw-r--r--numpy/core/tests/test_nditer.py107
-rw-r--r--numpy/core/tests/test_numeric.py286
-rw-r--r--numpy/core/tests/test_numerictypes.py136
-rw-r--r--numpy/core/tests/test_print.py6
-rw-r--r--numpy/core/tests/test_records.py33
-rw-r--r--numpy/core/tests/test_regression.py371
-rw-r--r--numpy/core/tests/test_scalarinherit.py40
-rw-r--r--numpy/core/tests/test_scalarmath.py54
-rw-r--r--numpy/core/tests/test_scalarprint.py4
-rw-r--r--numpy/core/tests/test_shape_base.py54
-rw-r--r--numpy/core/tests/test_ufunc.py148
-rw-r--r--numpy/core/tests/test_umath.py330
-rw-r--r--numpy/core/tests/test_umath_complex.py100
-rw-r--r--numpy/core/tests/test_unicode.py110
-rw-r--r--numpy/ctypeslib.py2
-rw-r--r--numpy/distutils/ccompiler.py22
-rw-r--r--numpy/distutils/command/build_clib.py102
-rw-r--r--numpy/distutils/command/build_ext.py172
-rw-r--r--numpy/distutils/command/config.py2
-rw-r--r--numpy/distutils/cpuinfo.py8
-rw-r--r--numpy/distutils/fcompiler/__init__.py37
-rw-r--r--numpy/distutils/fcompiler/gnu.py196
-rw-r--r--numpy/distutils/fcompiler/intel.py6
-rw-r--r--numpy/distutils/intelccompiler.py4
-rw-r--r--numpy/distutils/mingw32ccompiler.py22
-rw-r--r--numpy/distutils/misc_util.py51
-rw-r--r--numpy/distutils/msvc9compiler.py10
-rw-r--r--numpy/distutils/system_info.py65
-rw-r--r--numpy/distutils/tests/__init__.py0
-rw-r--r--numpy/distutils/tests/test_exec_command.py84
-rw-r--r--numpy/distutils/tests/test_fcompiler_gnu.py9
-rw-r--r--numpy/distutils/tests/test_fcompiler_intel.py6
-rw-r--r--numpy/distutils/tests/test_misc_util.py10
-rw-r--r--numpy/distutils/tests/test_npy_pkg_config.py42
-rw-r--r--numpy/distutils/tests/test_system_info.py35
-rw-r--r--numpy/doc/basics.py70
-rw-r--r--numpy/doc/creation.py2
-rw-r--r--numpy/doc/glossary.py17
-rw-r--r--numpy/doc/indexing.py2
-rw-r--r--numpy/doc/misc.py3
-rw-r--r--numpy/doc/subclassing.py4
-rw-r--r--numpy/f2py/__init__.py2
-rw-r--r--numpy/f2py/auxfuncs.py2
-rw-r--r--numpy/f2py/capi_maps.py4
-rw-r--r--numpy/f2py/cfuncs.py1000
-rwxr-xr-xnumpy/f2py/crackfortran.py66
-rw-r--r--numpy/f2py/f2py_testing.py2
-rw-r--r--numpy/f2py/src/fortranobject.c119
-rw-r--r--numpy/f2py/tests/__init__.py0
-rw-r--r--numpy/f2py/tests/src/common/block.f11
-rw-r--r--numpy/f2py/tests/test_array_from_pyobj.py16
-rw-r--r--numpy/f2py/tests/test_assumed_shape.py2
-rw-r--r--numpy/f2py/tests/test_block_docstring.py23
-rw-r--r--numpy/f2py/tests/test_callback.py2
-rw-r--r--numpy/f2py/tests/test_common.py26
-rw-r--r--numpy/f2py/tests/test_kind.py2
-rw-r--r--numpy/f2py/tests/test_mixed.py2
-rw-r--r--numpy/f2py/tests/test_parameter.py2
-rw-r--r--numpy/f2py/tests/test_regression.py2
-rw-r--r--numpy/f2py/tests/test_return_character.py2
-rw-r--r--numpy/f2py/tests/test_return_complex.py2
-rw-r--r--numpy/f2py/tests/test_return_integer.py2
-rw-r--r--numpy/f2py/tests/test_return_logical.py2
-rw-r--r--numpy/f2py/tests/test_return_real.py2
-rw-r--r--numpy/f2py/tests/test_size.py11
-rw-r--r--numpy/f2py/tests/test_string.py2
-rw-r--r--numpy/f2py/tests/util.py2
-rw-r--r--numpy/fft/__init__.py2
-rw-r--r--numpy/fft/tests/__init__.py0
-rw-r--r--numpy/fft/tests/test_fftpack.py14
-rw-r--r--numpy/fft/tests/test_helper.py34
-rw-r--r--numpy/lib/__init__.py2
-rw-r--r--numpy/lib/_iotools.py2
-rw-r--r--numpy/lib/arraypad.py30
-rw-r--r--numpy/lib/arraysetops.py13
-rw-r--r--numpy/lib/format.py51
-rw-r--r--numpy/lib/function_base.py170
-rw-r--r--numpy/lib/index_tricks.py4
-rw-r--r--numpy/lib/nanfunctions.py107
-rw-r--r--numpy/lib/npyio.py29
-rw-r--r--numpy/lib/recfunctions.py165
-rw-r--r--numpy/lib/shape_base.py25
-rw-r--r--numpy/lib/stride_tricks.py7
-rw-r--r--numpy/lib/tests/__init__.py0
-rw-r--r--numpy/lib/tests/test__datasource.py70
-rw-r--r--numpy/lib/tests/test__iotools.py13
-rw-r--r--numpy/lib/tests/test_arraypad.py48
-rw-r--r--numpy/lib/tests/test_arraysetops.py26
-rw-r--r--numpy/lib/tests/test_financial.py6
-rw-r--r--numpy/lib/tests/test_format.py9
-rw-r--r--numpy/lib/tests/test_function_base.py268
-rw-r--r--numpy/lib/tests/test_index_tricks.py20
-rw-r--r--numpy/lib/tests/test_io.py82
-rw-r--r--numpy/lib/tests/test_mixins.py5
-rw-r--r--numpy/lib/tests/test_nanfunctions.py18
-rw-r--r--numpy/lib/tests/test_polynomial.py4
-rw-r--r--numpy/lib/tests/test_recfunctions.py139
-rw-r--r--numpy/lib/tests/test_regression.py71
-rw-r--r--numpy/lib/tests/test_shape_base.py73
-rw-r--r--numpy/lib/tests/test_stride_tricks.py8
-rw-r--r--numpy/lib/tests/test_twodim_base.py121
-rw-r--r--numpy/lib/tests/test_type_check.py46
-rw-r--r--numpy/lib/tests/test_ufunclike.py5
-rw-r--r--numpy/lib/twodim_base.py7
-rw-r--r--numpy/lib/type_check.py40
-rw-r--r--numpy/lib/utils.py6
-rw-r--r--numpy/linalg/__init__.py2
-rw-r--r--numpy/linalg/linalg.py169
-rw-r--r--numpy/linalg/tests/__init__.py0
-rw-r--r--numpy/linalg/tests/test_build.py6
-rw-r--r--numpy/linalg/tests/test_linalg.py44
-rw-r--r--numpy/linalg/tests/test_regression.py59
-rw-r--r--numpy/ma/__init__.py2
-rw-r--r--numpy/ma/core.py318
-rw-r--r--numpy/ma/extras.py8
-rw-r--r--numpy/ma/mrecords.py6
-rw-r--r--numpy/ma/tests/__init__.py0
-rw-r--r--numpy/ma/tests/test_core.py613
-rw-r--r--numpy/ma/tests/test_deprecations.py6
-rw-r--r--numpy/ma/tests/test_extras.py73
-rw-r--r--numpy/ma/tests/test_mrecords.py87
-rw-r--r--numpy/ma/tests/test_old_ma.py578
-rw-r--r--numpy/ma/tests/test_regression.py21
-rw-r--r--numpy/ma/tests/test_subclassing.py112
-rw-r--r--numpy/ma/testutils.py12
-rw-r--r--numpy/ma/timer_comparison.py2
-rw-r--r--numpy/matrixlib/__init__.py2
-rw-r--r--numpy/matrixlib/defmatrix.py6
-rw-r--r--numpy/matrixlib/tests/__init__.py0
-rw-r--r--numpy/matrixlib/tests/test_defmatrix.py35
-rw-r--r--numpy/matrixlib/tests/test_multiarray.py4
-rw-r--r--numpy/matrixlib/tests/test_numeric.py4
-rw-r--r--numpy/matrixlib/tests/test_regression.py21
-rw-r--r--numpy/polynomial/__init__.py2
-rw-r--r--numpy/polynomial/_polybase.py34
-rw-r--r--numpy/polynomial/chebyshev.py149
-rw-r--r--numpy/polynomial/hermite.py34
-rw-r--r--numpy/polynomial/hermite_e.py34
-rw-r--r--numpy/polynomial/laguerre.py34
-rw-r--r--numpy/polynomial/legendre.py40
-rw-r--r--numpy/polynomial/polynomial.py16
-rw-r--r--numpy/polynomial/polyutils.py4
-rw-r--r--numpy/polynomial/tests/__init__.py0
-rw-r--r--numpy/polynomial/tests/test_chebyshev.py52
-rw-r--r--numpy/polynomial/tests/test_classes.py25
-rw-r--r--numpy/polynomial/tests/test_hermite.py25
-rw-r--r--numpy/polynomial/tests/test_hermite_e.py25
-rw-r--r--numpy/polynomial/tests/test_laguerre.py25
-rw-r--r--numpy/polynomial/tests/test_legendre.py25
-rw-r--r--numpy/polynomial/tests/test_polynomial.py21
-rw-r--r--numpy/polynomial/tests/test_polyutils.py9
-rw-r--r--numpy/polynomial/tests/test_printing.py54
-rw-r--r--numpy/random/__init__.py2
-rw-r--r--numpy/random/mtrand/distributions.c4
-rw-r--r--numpy/random/mtrand/mtrand.pyx59
-rw-r--r--numpy/random/mtrand/numpy.pxd1
-rw-r--r--numpy/random/mtrand/randomkit.c26
-rw-r--r--numpy/random/tests/__init__.py0
-rw-r--r--numpy/random/tests/test_random.py88
-rw-r--r--numpy/random/tests/test_regression.py7
-rw-r--r--numpy/testing/__init__.py4
-rw-r--r--numpy/testing/decorators.py265
-rw-r--r--numpy/testing/nose_tools/__init__.py0
-rw-r--r--numpy/testing/nose_tools/decorators.py282
-rw-r--r--numpy/testing/nose_tools/noseclasses.py366
-rw-r--r--numpy/testing/nose_tools/nosetester.py560
-rw-r--r--numpy/testing/nose_tools/parameterized.py489
-rw-r--r--numpy/testing/nose_tools/utils.py2229
-rw-r--r--numpy/testing/noseclasses.py344
-rw-r--r--numpy/testing/nosetester.py525
-rwxr-xr-xnumpy/testing/setup.py1
-rw-r--r--numpy/testing/tests/__init__.py0
-rw-r--r--numpy/testing/tests/test_decorators.py14
-rw-r--r--numpy/testing/tests/test_utils.py6
-rw-r--r--numpy/testing/utils.py2218
-rw-r--r--numpy/tests/__init__.py0
-rw-r--r--numpy/tests/test_ctypeslib.py46
-rw-r--r--numpy/tests/test_matlib.py2
-rw-r--r--numpy/tests/test_scripts.py7
-rw-r--r--numpy/tests/test_warnings.py6
-rw-r--r--pavement.py31
-rwxr-xr-xruntests.py31
-rwxr-xr-xsetup.py3
-rw-r--r--tools/allocation_tracking/track_allocations.py4
-rw-r--r--tools/npy_tempita/__init__.py52
-rwxr-xr-xtools/travis-test.sh41
411 files changed, 15067 insertions, 20122 deletions
diff --git a/.mailmap b/.mailmap
index 675c5e386..92bc79b7b 100644
--- a/.mailmap
+++ b/.mailmap
@@ -18,6 +18,8 @@ Alex Griffing <argriffi@ncsu.edu> alex <argriffi@ncsu.edu>
Alex Griffing <argriffi@ncsu.edu> argriffing <argriffi@ncsu.edu>
Alex Griffing <argriffi@ncsu.edu> argriffing <argriffing@gmail.com>
Alex Griffing <argriffi@ncsu.edu> argriffing <argriffing@users.noreply.github.com>
+Alexander Belopolsky <abalkin@enlnt.com> Alexander Belopolsky <a@enlnt.com>
+Alexander Shadchin <alexandr.shadchin@gmail.com> shadchin <alexandr.shadchin@gmail.com>
Allan Haldane <allan.haldane@gmail.com> ahaldane <ealloc@gmail.com>
Alok Singhal <gandalf013@gmail.com> Alok Singhal <alok@merfinllc.com>
Amir Sarabadani <ladsgroup@gmail.com> amir <ladsgroup@gmail.com>
@@ -54,6 +56,7 @@ Daniel Müllner <Daniel Müllner muellner@math.stanford.edu> dmuellner <Daniel M
Daniel Rasmussen <daniel.rasmussen@appliedbrainresearch.com> drasmuss <daniel.rasmussen@appliedbrainresearch.com>
David Huard <david.huard@gmail.com> dhuard <dhuard@localhost>
David M Cooke <cookedm@localhost> cookedm <cookedm@localhost>
+David Nicholson <davidjn@google.com> davidjn <dnic12345@gmail.com>
David Ochoa <ochoadavid@gmail.com> ochoadavid <ochoadavid@gmail.com>
Derek Homeier <derek@astro.physik.uni-goettingen.de> Derek Homeier <dhomeie@gwdg.de>
Derek Homeier <derek@astro.physik.uni-goettingen.de> Derek Homeir <derek@astro.phsik.uni-goettingen.de>
diff --git a/.travis.yml b/.travis.yml
index 416a306fa..8dd5385d6 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -38,9 +38,7 @@ python:
matrix:
include:
- python: 2.7
- env: PY3_COMPATIBILITY_CHECK=1
- - python: 2.7
- env: USE_CHROOT=1 ARCH=i386 DIST=yakkety PYTHON=2.7
+ env: USE_CHROOT=1 ARCH=i386 DIST=zesty PYTHON=2.7
sudo: true
dist: trusty
addons:
@@ -69,7 +67,7 @@ matrix:
- PYTHONOPTIMIZE=2
- USE_ASV=1
- python: 2.7
- env: NPY_RELAXED_STRIDES_CHECKING=0 PYTHON_OO=1
+ env: NPY_RELAXED_STRIDES_CHECKING=0 PYTHON_OPTS="-3 -OO"
- python: 2.7
env: USE_WHEEL=1 NPY_RELAXED_STRIDES_DEBUG=1
- python: 2.7
diff --git a/MANIFEST.in b/MANIFEST.in
index 4e5206b94..0e11cb4f8 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -25,6 +25,5 @@ recursive-include doc/source *
recursive-include doc/sphinxext *
recursive-include tools/swig *
recursive-include doc/scipy-sphinx-theme *
-recursive-include doc/f2py *
global-exclude *.pyc *.pyo *.pyd
diff --git a/README.md b/README.md
index 39f6f44ef..2b9540dd5 100644
--- a/README.md
+++ b/README.md
@@ -2,6 +2,8 @@
<img src="http://www.numpy.org/_static/numpy_logo.png"><br>
</div>
+[![Powered by NumFOCUS](https://img.shields.io/badge/powered%20by-NumFOCUS-orange.svg?style=flat&colorA=E1523D&colorB=007D8A)](https://numfocus.org)
+
-----------------
| **`Travis CI Status`** |
|-------------------|
diff --git a/benchmarks/benchmarks/bench_core.py b/benchmarks/benchmarks/bench_core.py
index a0de81054..26cffcab1 100644
--- a/benchmarks/benchmarks/bench_core.py
+++ b/benchmarks/benchmarks/bench_core.py
@@ -139,7 +139,7 @@ class CountNonzero(Benchmark):
class PackBits(Benchmark):
param_names = ['dtype']
- params = [[np.bool, np.uintp]]
+ params = [[bool, np.uintp]]
def setup(self, dtype):
self.d = np.ones(10000, dtype=dtype)
self.d2 = np.ones((200, 1000), dtype=dtype)
diff --git a/benchmarks/benchmarks/bench_reduce.py b/benchmarks/benchmarks/bench_reduce.py
index 704023528..353eb980c 100644
--- a/benchmarks/benchmarks/bench_reduce.py
+++ b/benchmarks/benchmarks/bench_reduce.py
@@ -29,8 +29,8 @@ class AddReduceSeparate(Benchmark):
class AnyAll(Benchmark):
def setup(self):
- self.zeros = np.zeros(100000, np.bool)
- self.ones = np.ones(100000, np.bool)
+ self.zeros = np.zeros(100000, bool)
+ self.ones = np.ones(100000, bool)
def time_all_fast(self):
self.zeros.all()
diff --git a/benchmarks/benchmarks/bench_shape_base.py b/benchmarks/benchmarks/bench_shape_base.py
new file mode 100644
index 000000000..9d0f0ae04
--- /dev/null
+++ b/benchmarks/benchmarks/bench_shape_base.py
@@ -0,0 +1,89 @@
+from __future__ import absolute_import, division, print_function
+
+from .common import Benchmark
+
+import numpy as np
+
+
+class Block(Benchmark):
+ params = [1, 10, 100]
+ param_names = ['size']
+
+ def setup(self, n):
+ self.a_2d = np.ones((2 * n, 2 * n))
+ self.b_1d = np.ones(2 * n)
+ self.b_2d = 2 * self.a_2d
+
+ self.a = np.ones(3 * n)
+ self.b = np.ones(3 * n)
+
+ self.one_2d = np.ones((1 * n, 3 * n))
+ self.two_2d = np.ones((1 * n, 3 * n))
+ self.three_2d = np.ones((1 * n, 6 * n))
+ self.four_1d = np.ones(6 * n)
+ self.five_0d = np.ones(1 * n)
+ self.six_1d = np.ones(5 * n)
+ self.zero_2d = np.zeros((2 * n, 6 * n))
+
+ self.one = np.ones(3 * n)
+ self.two = 2 * np.ones((3, 3 * n))
+ self.three = 3 * np.ones(3 * n)
+ self.four = 4 * np.ones(3 * n)
+ self.five = 5 * np.ones(1 * n)
+ self.six = 6 * np.ones(5 * n)
+ self.zero = np.zeros((2 * n, 6 * n))
+
+ self.a000 = np.ones((2 * n, 2 * n, 2 * n), int) * 1
+
+ self.a100 = np.ones((3 * n, 2 * n, 2 * n), int) * 2
+ self.a010 = np.ones((2 * n, 3 * n, 2 * n), int) * 3
+ self.a001 = np.ones((2 * n, 2 * n, 3 * n), int) * 4
+
+ self.a011 = np.ones((2 * n, 3 * n, 3 * n), int) * 5
+ self.a101 = np.ones((3 * n, 2 * n, 3 * n), int) * 6
+ self.a110 = np.ones((3 * n, 3 * n, 2 * n), int) * 7
+
+ self.a111 = np.ones((3 * n, 3 * n, 3 * n), int) * 8
+
+ def time_block_simple_row_wise(self, n):
+ np.block([self.a_2d, self.b_2d])
+
+ def time_block_simple_column_wise(self, n):
+ np.block([[self.a_2d], [self.b_2d]])
+
+ def time_block_complicated(self, n):
+ np.block([[self.one_2d, self.two_2d],
+ [self.three_2d],
+ [self.four_1d],
+ [self.five_0d, self.six_1d],
+ [self.zero_2d]])
+
+ def time_nested(self, n):
+ np.block([
+ [
+ np.block([
+ [self.one],
+ [self.three],
+ [self.four]
+ ]),
+ self.two
+ ],
+ [self.five, self.six],
+ [self.zero]
+ ])
+
+ def time_3d(self, n):
+ np.block([
+ [
+ [self.a000, self.a001],
+ [self.a010, self.a011],
+ ],
+ [
+ [self.a100, self.a101],
+ [self.a110, self.a111],
+ ]
+ ])
+
+ def time_no_lists(self, n):
+ np.block(1)
+ np.block(np.eye(3 * n))
diff --git a/benchmarks/benchmarks/bench_ufunc.py b/benchmarks/benchmarks/bench_ufunc.py
index 1baee1340..8f7d638b5 100644
--- a/benchmarks/benchmarks/bench_ufunc.py
+++ b/benchmarks/benchmarks/bench_ufunc.py
@@ -62,7 +62,7 @@ class UFunc(Benchmark):
class Custom(Benchmark):
def setup(self):
- self.b = np.ones(20000, dtype=np.bool)
+ self.b = np.ones(20000, dtype=bool)
def time_nonzero(self):
np.nonzero(self.b)
diff --git a/doc/CAPI.rst.txt b/doc/CAPI.rst.txt
index c586dd4ce..9656abf5c 100644
--- a/doc/CAPI.rst.txt
+++ b/doc/CAPI.rst.txt
@@ -178,8 +178,7 @@ function calls still remain but they are loose wrappers around the
- ``ENSURECOPY``: always copy the array. Returned arrays always
have ``CONTIGUOUS``, ``ALIGNED``, and ``WRITEABLE`` set.
- - ``ENSUREARRAY``: ensure the returned array is an ndarray (or a
- bigndarray if ``op`` is one).
+ - ``ENSUREARRAY``: ensure the returned array is an ndarray.
- ``FORCECAST``: cause a cast to occur regardless of whether or
not it is safe.
@@ -194,7 +193,7 @@ function calls still remain but they are loose wrappers around the
equivalent to ``PyArray_ContiguousFromObject(...)`` (which is still
available), except it will return the subclass if op is already a
subclass of the ndarray. The ``ContiguousFromObject`` version will
-always return an ndarray (or a bigndarray).
+always return an ndarray.
Passing Data Type information to C-code
=======================================
diff --git a/doc/Makefile b/doc/Makefile
index 52840be92..d414d26d7 100644
--- a/doc/Makefile
+++ b/doc/Makefile
@@ -90,19 +90,18 @@ upload:
# SSH must be correctly configured for this to work.
# Assumes that ``make dist`` was already run
# Example usage: ``make upload USERNAME=rgommers RELEASE=1.10.1``
- ssh $(USERNAME)@new.scipy.org mkdir $(UPLOAD_DIR)
- scp build/dist.tar.gz $(USERNAME)@new.scipy.org:$(UPLOAD_DIR)
- ssh $(USERNAME)@new.scipy.org tar xvC $(UPLOAD_DIR) \
+ ssh $(USERNAME)@docs.scipy.org mkdir $(UPLOAD_DIR)
+ scp build/dist.tar.gz $(USERNAME)@docs.scipy.org:$(UPLOAD_DIR)
+ ssh $(USERNAME)@docs.scipy.org tar xvC $(UPLOAD_DIR) \
-zf $(UPLOAD_DIR)/dist.tar.gz
- ssh $(USERNAME)@new.scipy.org mv $(UPLOAD_DIR)/numpy-ref.pdf \
+ ssh $(USERNAME)@docs.scipy.org mv $(UPLOAD_DIR)/numpy-ref.pdf \
$(UPLOAD_DIR)/numpy-ref-$(RELEASE).pdf
- ssh $(USERNAME)@new.scipy.org mv $(UPLOAD_DIR)/numpy-user.pdf \
+ ssh $(USERNAME)@docs.scipy.org mv $(UPLOAD_DIR)/numpy-user.pdf \
$(UPLOAD_DIR)/numpy-user-$(RELEASE).pdf
- ssh $(USERNAME)@new.scipy.org mv $(UPLOAD_DIR)/numpy-html.zip \
+ ssh $(USERNAME)@docs.scipy.org mv $(UPLOAD_DIR)/numpy-html.zip \
$(UPLOAD_DIR)/numpy-html-$(RELEASE).zip
- ssh $(USERNAME)@new.scipy.org rm $(UPLOAD_DIR)/dist.tar.gz
- ssh $(USERNAME)@new.scipy.org ln -snf numpy-$(RELEASE) /srv/docs_scipy_org/doc/numpy
- ssh $(USERNAME)@new.scipy.org /srv/bin/fixperm-scipy_org.sh
+ ssh $(USERNAME)@docs.scipy.org rm $(UPLOAD_DIR)/dist.tar.gz
+ ssh $(USERNAME)@docs.scipy.org ln -snf numpy-$(RELEASE) /srv/docs_scipy_org/doc/numpy
#------------------------------------------------------------------------------
# Basic Sphinx generation rules for different formats
diff --git a/doc/cdoc/numpyfilter.py b/doc/cdoc/numpyfilter.py
index 32c6dffcb..614c50771 100755
--- a/doc/cdoc/numpyfilter.py
+++ b/doc/cdoc/numpyfilter.py
@@ -75,7 +75,7 @@ def load_cache():
f = open(CACHE_FILE, 'rb')
try:
cache = pickle.load(f)
- except:
+ except Exception:
cache = {}
finally:
f.close()
diff --git a/doc/changelog/1.12.0-changelog.rst b/doc/changelog/1.12.0-changelog.rst
new file mode 100644
index 000000000..b607f70fc
--- /dev/null
+++ b/doc/changelog/1.12.0-changelog.rst
@@ -0,0 +1,573 @@
+=========
+Changelog
+=========
+
+Contributors
+============
+
+A total of 139 people contributed to this release. People with a "+" by their
+names contributed a patch for the first time.
+
+* Aditya Panchal +
+* Ales Erjavec +
+* Alex Griffing
+* Alexandr Shadchin +
+* Alistair Muldal
+* Allan Haldane
+* Amit Aronovitch +
+* Andrei Kucharavy +
+* Antony Lee
+* Antti Kaihola +
+* Arne de Laat +
+* Auke Wiggers +
+* AustereCuriosity +
+* Badhri Narayanan Krishnakumar +
+* Ben North +
+* Ben Rowland +
+* Bertrand Lefebvre
+* Boxiang Sun
+* CJ Carey
+* Charles Harris
+* Christoph Gohlke
+* Daniel Ching +
+* Daniel Rasmussen +
+* Daniel Smith +
+* David Schaich +
+* Denis Alevi +
+* Devin Jeanpierre +
+* Dmitry Odzerikho
+* Dongjoon Hyun +
+* Edward Richards +
+* Ekaterina Tuzova +
+* Emilien Kofman +
+* Endolith
+* Eren Sezener +
+* Eric Moore
+* Eric Quintero +
+* Eric Wieser +
+* Erik M. Bray
+* Frederic Bastien
+* Friedrich Dunne +
+* Gerrit Holl
+* Golnaz Irannejad +
+* Graham Markall +
+* Greg Knoll +
+* Greg Young
+* Gustavo Serra Scalet +
+* Ines Wichert +
+* Irvin Probst +
+* Jaime Fernandez
+* James Sanders +
+* Jan David Mol +
+* Jan Schlüter
+* Jeremy Tuloup +
+* John Kirkham
+* John Zwinck +
+* Jonathan Helmus
+* Joseph Fox-Rabinovitz
+* Josh Wilson +
+* Joshua Warner +
+* Julian Taylor
+* Ka Wo Chen +
+* Kamil Rytarowski +
+* Kelsey Jordahl +
+* Kevin Deldycke +
+* Khaled Ben Abdallah Okuda +
+* Lion Krischer +
+* Loïc Estève +
+* Luca Mussi +
+* Mads Ohm Larsen +
+* Manoj Kumar +
+* Mario Emmenlauer +
+* Marshall Bockrath-Vandegrift +
+* Marshall Ward +
+* Marten van Kerkwijk
+* Mathieu Lamarre +
+* Matthew Brett
+* Matthew Harrigan +
+* Matthias Geier
+* Matti Picus +
+* Meet Udeshi +
+* Michael Felt +
+* Michael Goerz +
+* Michael Martin +
+* Michael Seifert +
+* Mike Nolta +
+* Nathaniel Beaver +
+* Nathaniel J. Smith
+* Naveen Arunachalam +
+* Nick Papior
+* Nikola Forró +
+* Oleksandr Pavlyk +
+* Olivier Grisel
+* Oren Amsalem +
+* Pauli Virtanen
+* Pavel Potocek +
+* Pedro Lacerda +
+* Peter Creasey +
+* Phil Elson +
+* Philip Gura +
+* Phillip J. Wolfram +
+* Pierre de Buyl +
+* Raghav RV +
+* Ralf Gommers
+* Ray Donnelly +
+* Rehas Sachdeva
+* Rob Malouf +
+* Robert Kern
+* Samuel St-Jean
+* Sanchez Gonzalez Alvaro +
+* Saurabh Mehta +
+* Scott Sanderson +
+* Sebastian Berg
+* Shayan Pooya +
+* Shota Kawabuchi +
+* Simon Conseil
+* Simon Gibbons
+* Sorin Sbarnea +
+* Stefan van der Walt
+* Stephan Hoyer
+* Steven J Kern +
+* Stuart Archibald
+* Tadeu Manoel +
+* Takuya Akiba +
+* Thomas A Caswell
+* Tom Bird +
+* Tony Kelman +
+* Toshihiro Kamishima +
+* Valentin Valls +
+* Varun Nayyar
+* Victor Stinner +
+* Warren Weckesser
+* Wendell Smith
+* Wojtek Ruszczewski +
+* Xavier Abellan Ecija +
+* Yaroslav Halchenko
+* Yash Shah +
+* Yinon Ehrlich +
+* Yu Feng +
+* nevimov +
+
+Pull requests merged
+====================
+
+A total of 418 pull requests were merged for this release.
+
+* `#4073 <https://github.com/numpy/numpy/pull/4073>`__: BUG: change real output checking to test if all imaginary parts...
+* `#4619 <https://github.com/numpy/numpy/pull/4619>`__: BUG : np.sum silently drops keepdims for sub-classes of ndarray
+* `#5488 <https://github.com/numpy/numpy/pull/5488>`__: ENH: add `contract`: optimizing numpy's einsum expression
+* `#5706 <https://github.com/numpy/numpy/pull/5706>`__: ENH: make some masked array methods behave more like ndarray...
+* `#5822 <https://github.com/numpy/numpy/pull/5822>`__: Allow many distributions to have a scale of 0.
+* `#6054 <https://github.com/numpy/numpy/pull/6054>`__: WIP: MAINT: Add deprecation warning to views of multi-field indexes
+* `#6298 <https://github.com/numpy/numpy/pull/6298>`__: Check lower base limit in base_repr.
+* `#6430 <https://github.com/numpy/numpy/pull/6430>`__: Fix issues with zero-width string fields
+* `#6656 <https://github.com/numpy/numpy/pull/6656>`__: ENH: usecols now accepts an int when only one column has to be...
+* `#6660 <https://github.com/numpy/numpy/pull/6660>`__: Added pathlib support for several functions
+* `#6872 <https://github.com/numpy/numpy/pull/6872>`__: ENH: linear interpolation of complex values in lib.interp
+* `#6997 <https://github.com/numpy/numpy/pull/6997>`__: MAINT: Simplify mtrand.pyx helpers
+* `#7003 <https://github.com/numpy/numpy/pull/7003>`__: BUG: Fix string copying for np.place
+* `#7026 <https://github.com/numpy/numpy/pull/7026>`__: DOC: Clarify behavior in np.random.uniform
+* `#7055 <https://github.com/numpy/numpy/pull/7055>`__: BUG: One Element Array Inputs Return Scalars in np.random
+* `#7063 <https://github.com/numpy/numpy/pull/7063>`__: REL: Update master branch after 1.11.x branch has been made.
+* `#7073 <https://github.com/numpy/numpy/pull/7073>`__: DOC: Update the 1.11.0 release notes.
+* `#7076 <https://github.com/numpy/numpy/pull/7076>`__: MAINT: Update the git .mailmap file.
+* `#7082 <https://github.com/numpy/numpy/pull/7082>`__: TST, DOC: Added Broadcasting Tests in test_random.py
+* `#7087 <https://github.com/numpy/numpy/pull/7087>`__: BLD: fix compilation on non glibc-Linuxes
+* `#7088 <https://github.com/numpy/numpy/pull/7088>`__: BUG: Have `norm` cast non-floating point arrays to 64-bit float...
+* `#7090 <https://github.com/numpy/numpy/pull/7090>`__: ENH: Added 'doane' and 'sqrt' estimators to np.histogram in numpy.function_base
+* `#7091 <https://github.com/numpy/numpy/pull/7091>`__: Revert "BLD: fix compilation on non glibc-Linuxes"
+* `#7092 <https://github.com/numpy/numpy/pull/7092>`__: BLD: fix compilation on non glibc-Linuxes
+* `#7099 <https://github.com/numpy/numpy/pull/7099>`__: TST: Suppressed warnings
+* `#7102 <https://github.com/numpy/numpy/pull/7102>`__: MAINT: Removed conditionals that are always false in datetime_strings.c
+* `#7105 <https://github.com/numpy/numpy/pull/7105>`__: DEP: Deprecate as_strided returning a writable array as default
+* `#7109 <https://github.com/numpy/numpy/pull/7109>`__: DOC: update Python versions requirements in the install docs
+* `#7114 <https://github.com/numpy/numpy/pull/7114>`__: MAINT: Fix typos in docs
+* `#7116 <https://github.com/numpy/numpy/pull/7116>`__: TST: Fixed f2py test for win32 virtualenv
+* `#7118 <https://github.com/numpy/numpy/pull/7118>`__: TST: Fixed f2py test for non-versioned python executables
+* `#7119 <https://github.com/numpy/numpy/pull/7119>`__: BUG: Fixed mingw.lib error
+* `#7125 <https://github.com/numpy/numpy/pull/7125>`__: DOC: Updated documentation wording and examples for np.percentile.
+* `#7129 <https://github.com/numpy/numpy/pull/7129>`__: BUG: Fixed 'midpoint' interpolation of np.percentile in odd cases.
+* `#7131 <https://github.com/numpy/numpy/pull/7131>`__: Fix setuptools sdist
+* `#7133 <https://github.com/numpy/numpy/pull/7133>`__: ENH: savez: temporary file alongside with target file and improve...
+* `#7134 <https://github.com/numpy/numpy/pull/7134>`__: MAINT: Fix some typos in a code string and comments
+* `#7141 <https://github.com/numpy/numpy/pull/7141>`__: BUG: Unpickled void scalars should be contiguous
+* `#7144 <https://github.com/numpy/numpy/pull/7144>`__: MAINT: Change `call_fortran` into `callfortran` in comments.
+* `#7145 <https://github.com/numpy/numpy/pull/7145>`__: BUG: Fixed regressions in np.piecewise in ref to #5737 and #5729.
+* `#7147 <https://github.com/numpy/numpy/pull/7147>`__: Temporarily disable __numpy_ufunc__
+* `#7148 <https://github.com/numpy/numpy/pull/7148>`__: ENH,TST: Bump stacklevel and add tests for warnings
+* `#7149 <https://github.com/numpy/numpy/pull/7149>`__: TST: Add missing suffix to temppath manager
+* `#7152 <https://github.com/numpy/numpy/pull/7152>`__: BUG: mode kwargs passed as unicode to np.pad raises an exception
+* `#7156 <https://github.com/numpy/numpy/pull/7156>`__: BUG: Reascertain that linspace respects ndarray subclasses in...
+* `#7167 <https://github.com/numpy/numpy/pull/7167>`__: DOC: Update Wikipedia references for mtrand.pyx
+* `#7171 <https://github.com/numpy/numpy/pull/7171>`__: TST: Fixed f2py test for Anaconda non-win32
+* `#7174 <https://github.com/numpy/numpy/pull/7174>`__: DOC: Fix broken pandas link in release notes
+* `#7177 <https://github.com/numpy/numpy/pull/7177>`__: ENH: added axis param for np.count_nonzero
+* `#7178 <https://github.com/numpy/numpy/pull/7178>`__: BUG: Fix binary_repr for negative numbers
+* `#7180 <https://github.com/numpy/numpy/pull/7180>`__: BUG: Fixed previous attempt to fix dimension mismatch in nanpercentile
+* `#7181 <https://github.com/numpy/numpy/pull/7181>`__: DOC: Updated minor typos in function_base.py and test_function_base.py
+* `#7191 <https://github.com/numpy/numpy/pull/7191>`__: DOC: add vstack, hstack, dstack reference to stack documentation.
+* `#7193 <https://github.com/numpy/numpy/pull/7193>`__: MAINT: Removed supurious assert in histogram estimators
+* `#7194 <https://github.com/numpy/numpy/pull/7194>`__: BUG: Raise a quieter `MaskedArrayFutureWarning` for mask changes.
+* `#7195 <https://github.com/numpy/numpy/pull/7195>`__: STY: Drop some trailing spaces in `numpy.ma.core`.
+* `#7196 <https://github.com/numpy/numpy/pull/7196>`__: Revert "DOC: add vstack, hstack, dstack reference to stack documentation."
+* `#7197 <https://github.com/numpy/numpy/pull/7197>`__: TST: Pin virtualenv used on Travis CI.
+* `#7198 <https://github.com/numpy/numpy/pull/7198>`__: ENH: Unlock the GIL for gufuncs
+* `#7199 <https://github.com/numpy/numpy/pull/7199>`__: MAINT: Cleanup for histogram bin estimator selection
+* `#7201 <https://github.com/numpy/numpy/pull/7201>`__: Raise IOError on not a file in python2
+* `#7202 <https://github.com/numpy/numpy/pull/7202>`__: MAINT: Made `iterable` return a boolean
+* `#7209 <https://github.com/numpy/numpy/pull/7209>`__: TST: Bump `virtualenv` to 14.0.6
+* `#7211 <https://github.com/numpy/numpy/pull/7211>`__: DOC: Fix fmin examples
+* `#7215 <https://github.com/numpy/numpy/pull/7215>`__: MAINT: Use PySlice_GetIndicesEx instead of custom reimplementation
+* `#7229 <https://github.com/numpy/numpy/pull/7229>`__: ENH: implement __complex__
+* `#7231 <https://github.com/numpy/numpy/pull/7231>`__: MRG: allow distributors to run custom init
+* `#7232 <https://github.com/numpy/numpy/pull/7232>`__: BLD: Switch order of test for lapack_mkl and openblas_lapack
+* `#7239 <https://github.com/numpy/numpy/pull/7239>`__: DOC: Removed residual merge markup from previous commit
+* `#7240 <https://github.com/numpy/numpy/pull/7240>`__: Change 'pubic' to 'public'.
+* `#7241 <https://github.com/numpy/numpy/pull/7241>`__: MAINT: update doc/sphinxext to numpydoc 0.6.0, and fix up some...
+* `#7243 <https://github.com/numpy/numpy/pull/7243>`__: ENH: Adding support to the range keyword for estimation of the...
+* `#7246 <https://github.com/numpy/numpy/pull/7246>`__: DOC: metion writeable keyword in as_strided in release notes
+* `#7247 <https://github.com/numpy/numpy/pull/7247>`__: TST: Fail quickly on AppVeyor for superseded PR builds
+* `#7248 <https://github.com/numpy/numpy/pull/7248>`__: DOC: remove link to documentation wiki editor from HOWTO_DOCUMENT.
+* `#7250 <https://github.com/numpy/numpy/pull/7250>`__: DOC,REL: Update 1.11.0 notes.
+* `#7251 <https://github.com/numpy/numpy/pull/7251>`__: BUG: only benchmark complex256 if it exists
+* `#7252 <https://github.com/numpy/numpy/pull/7252>`__: Forward port a fix and enhancement from 1.11.x
+* `#7253 <https://github.com/numpy/numpy/pull/7253>`__: DOC: note in h/v/dstack points users to stack/concatenate
+* `#7254 <https://github.com/numpy/numpy/pull/7254>`__: BUG: Enforce dtype for randint singletons
+* `#7256 <https://github.com/numpy/numpy/pull/7256>`__: MAINT: Use `is None` or `is not None` instead of `== None` or...
+* `#7257 <https://github.com/numpy/numpy/pull/7257>`__: DOC: Fix mismatched variable names in docstrings.
+* `#7258 <https://github.com/numpy/numpy/pull/7258>`__: ENH: Make numpy floor_divide and remainder agree with Python...
+* `#7260 <https://github.com/numpy/numpy/pull/7260>`__: BUG/TST: Fix #7259, do not "force scalar" for already scalar...
+* `#7261 <https://github.com/numpy/numpy/pull/7261>`__: Added self to mailmap
+* `#7266 <https://github.com/numpy/numpy/pull/7266>`__: BUG: Segfault for classes with deceptive __len__
+* `#7268 <https://github.com/numpy/numpy/pull/7268>`__: ENH: add geomspace function
+* `#7274 <https://github.com/numpy/numpy/pull/7274>`__: BUG: Preserve array order in np.delete
+* `#7275 <https://github.com/numpy/numpy/pull/7275>`__: DEP: Warn about assigning 'data' attribute of ndarray
+* `#7276 <https://github.com/numpy/numpy/pull/7276>`__: DOC: apply_along_axis missing whitespace inserted (before colon)
+* `#7278 <https://github.com/numpy/numpy/pull/7278>`__: BUG: Make returned unravel_index arrays writeable
+* `#7279 <https://github.com/numpy/numpy/pull/7279>`__: TST: Fixed elements being shuffled
+* `#7280 <https://github.com/numpy/numpy/pull/7280>`__: MAINT: Remove redundant trailing semicolons.
+* `#7285 <https://github.com/numpy/numpy/pull/7285>`__: BUG: Make Randint Backwards Compatible with Pandas
+* `#7286 <https://github.com/numpy/numpy/pull/7286>`__: MAINT: Fix typos in docs/comments of `ma` and `polynomial` modules.
+* `#7292 <https://github.com/numpy/numpy/pull/7292>`__: Clarify error on repr failure in assert_equal.
+* `#7294 <https://github.com/numpy/numpy/pull/7294>`__: ENH: add support for BLIS to numpy.distutils
+* `#7295 <https://github.com/numpy/numpy/pull/7295>`__: DOC: understanding code and getting started section to dev doc
+* `#7296 <https://github.com/numpy/numpy/pull/7296>`__: Revert part of #3907 which incorrectly propogated MaskedArray...
+* `#7299 <https://github.com/numpy/numpy/pull/7299>`__: DOC: Fix mismatched variable names in docstrings.
+* `#7300 <https://github.com/numpy/numpy/pull/7300>`__: DOC: dev: stop recommending keeping local master updated with...
+* `#7301 <https://github.com/numpy/numpy/pull/7301>`__: DOC: Update release notes
+* `#7305 <https://github.com/numpy/numpy/pull/7305>`__: BUG: Remove data race in mtrand: two threads could mutate the...
+* `#7307 <https://github.com/numpy/numpy/pull/7307>`__: DOC: Missing some characters in link.
+* `#7308 <https://github.com/numpy/numpy/pull/7308>`__: BUG: Incrementing the wrong reference on return
+* `#7310 <https://github.com/numpy/numpy/pull/7310>`__: STY: Fix GitHub rendering of ordered lists >9
+* `#7311 <https://github.com/numpy/numpy/pull/7311>`__: ENH: Make _pointer_type_cache functional
+* `#7313 <https://github.com/numpy/numpy/pull/7313>`__: DOC: corrected grammatical error in quickstart doc
+* `#7325 <https://github.com/numpy/numpy/pull/7325>`__: BUG, MAINT: Improve fromnumeric.py interface for downstream compatibility
+* `#7328 <https://github.com/numpy/numpy/pull/7328>`__: DEP: Deprecated using a float index in linspace
+* `#7331 <https://github.com/numpy/numpy/pull/7331>`__: Add comment, TST: fix MemoryError on win32
+* `#7332 <https://github.com/numpy/numpy/pull/7332>`__: Check for no solution in np.irr Fixes #6744
+* `#7338 <https://github.com/numpy/numpy/pull/7338>`__: TST: Install `pytz` in the CI.
+* `#7340 <https://github.com/numpy/numpy/pull/7340>`__: DOC: Fixed math rendering in tensordot docs.
+* `#7341 <https://github.com/numpy/numpy/pull/7341>`__: TST: Add test for #6469
+* `#7344 <https://github.com/numpy/numpy/pull/7344>`__: DOC: Fix more typos in docs and comments.
+* `#7346 <https://github.com/numpy/numpy/pull/7346>`__: Generalized flip
+* `#7347 <https://github.com/numpy/numpy/pull/7347>`__: ENH Generalized rot90
+* `#7348 <https://github.com/numpy/numpy/pull/7348>`__: Maint: Removed extra space from `ureduce`
+* `#7349 <https://github.com/numpy/numpy/pull/7349>`__: MAINT: Hide nan warnings for masked internal MA computations
+* `#7350 <https://github.com/numpy/numpy/pull/7350>`__: BUG: MA ufuncs should set mask to False, not array([False])
+* `#7351 <https://github.com/numpy/numpy/pull/7351>`__: TST: Fix some MA tests to avoid looking at the .data attribute
+* `#7358 <https://github.com/numpy/numpy/pull/7358>`__: BUG: pull request related to the issue #7353
+* `#7359 <https://github.com/numpy/numpy/pull/7359>`__: Update 7314, DOC: Clarify valid integer range for random.seed...
+* `#7361 <https://github.com/numpy/numpy/pull/7361>`__: MAINT: Fix copy and paste oversight.
+* `#7363 <https://github.com/numpy/numpy/pull/7363>`__: ENH: Make no unshare mask future warnings less noisy
+* `#7366 <https://github.com/numpy/numpy/pull/7366>`__: TST: fix #6542, add tests to check non-iterable argument raises...
+* `#7373 <https://github.com/numpy/numpy/pull/7373>`__: ENH: Add bitwise_and identity
+* `#7378 <https://github.com/numpy/numpy/pull/7378>`__: added NumPy logo and separator
+* `#7382 <https://github.com/numpy/numpy/pull/7382>`__: MAINT: cleanup np.average
+* `#7385 <https://github.com/numpy/numpy/pull/7385>`__: DOC: note about wheels / windows wheels for pypi
+* `#7386 <https://github.com/numpy/numpy/pull/7386>`__: Added label icon to Travis status
+* `#7397 <https://github.com/numpy/numpy/pull/7397>`__: BUG: incorrect type for objects whose __len__ fails
+* `#7398 <https://github.com/numpy/numpy/pull/7398>`__: DOC: fix typo
+* `#7404 <https://github.com/numpy/numpy/pull/7404>`__: Use PyMem_RawMalloc on Python 3.4 and newer
+* `#7406 <https://github.com/numpy/numpy/pull/7406>`__: ENH ufunc called on memmap return a ndarray
+* `#7407 <https://github.com/numpy/numpy/pull/7407>`__: BUG: Fix decref before incref for in-place accumulate
+* `#7410 <https://github.com/numpy/numpy/pull/7410>`__: DOC: add nanprod to the list of math routines
+* `#7414 <https://github.com/numpy/numpy/pull/7414>`__: Tweak corrcoef
+* `#7415 <https://github.com/numpy/numpy/pull/7415>`__: DOC: Documention fixes
+* `#7416 <https://github.com/numpy/numpy/pull/7416>`__: BUG: Incorrect handling of range in `histogram` with automatic...
+* `#7418 <https://github.com/numpy/numpy/pull/7418>`__: DOC: Minor typo fix, hermefik -> hermefit.
+* `#7421 <https://github.com/numpy/numpy/pull/7421>`__: ENH: adds np.nancumsum and np.nancumprod
+* `#7423 <https://github.com/numpy/numpy/pull/7423>`__: BUG: Ongoing fixes to PR#7416
+* `#7430 <https://github.com/numpy/numpy/pull/7430>`__: DOC: Update 1.11.0-notes.
+* `#7433 <https://github.com/numpy/numpy/pull/7433>`__: MAINT: FutureWarning for changes to np.average subclass handling
+* `#7437 <https://github.com/numpy/numpy/pull/7437>`__: np.full now defaults to the filling value's dtype.
+* `#7438 <https://github.com/numpy/numpy/pull/7438>`__: Allow rolling multiple axes at the same time.
+* `#7439 <https://github.com/numpy/numpy/pull/7439>`__: BUG: Do not try sequence repeat unless necessary
+* `#7442 <https://github.com/numpy/numpy/pull/7442>`__: MANT: Simplify diagonal length calculation logic
+* `#7445 <https://github.com/numpy/numpy/pull/7445>`__: BUG: reference count leak in bincount, fixes #6805
+* `#7446 <https://github.com/numpy/numpy/pull/7446>`__: DOC: ndarray typo fix
+* `#7447 <https://github.com/numpy/numpy/pull/7447>`__: BUG: scalar integer negative powers gave wrong results.
+* `#7448 <https://github.com/numpy/numpy/pull/7448>`__: DOC: array "See also" link to full and full_like instead of fill
+* `#7456 <https://github.com/numpy/numpy/pull/7456>`__: BUG: int overflow in reshape, fixes #7455, fixes #7293
+* `#7463 <https://github.com/numpy/numpy/pull/7463>`__: BUG: fix array too big error for wide dtypes.
+* `#7466 <https://github.com/numpy/numpy/pull/7466>`__: BUG: segfault inplace object reduceat, fixes #7465
+* `#7468 <https://github.com/numpy/numpy/pull/7468>`__: BUG: more on inplace reductions, fixes #615
+* `#7469 <https://github.com/numpy/numpy/pull/7469>`__: MAINT: Update git .mailmap
+* `#7472 <https://github.com/numpy/numpy/pull/7472>`__: MAINT: Update .mailmap.
+* `#7477 <https://github.com/numpy/numpy/pull/7477>`__: MAINT: Yet more .mailmap updates for recent contributors.
+* `#7481 <https://github.com/numpy/numpy/pull/7481>`__: BUG: Fix segfault in PyArray_OrderConverter
+* `#7482 <https://github.com/numpy/numpy/pull/7482>`__: BUG: Memory Leak in _GenericBinaryOutFunction
+* `#7489 <https://github.com/numpy/numpy/pull/7489>`__: Faster real_if_close.
+* `#7491 <https://github.com/numpy/numpy/pull/7491>`__: DOC: Update subclassing doc regarding downstream compatibility
+* `#7496 <https://github.com/numpy/numpy/pull/7496>`__: BUG: don't use pow for integer power ufunc loops.
+* `#7504 <https://github.com/numpy/numpy/pull/7504>`__: DOC: remove "arr" from keepdims docstrings
+* `#7505 <https://github.com/numpy/numpy/pull/7505>`__: MAIN: fix to #7382, make scl in np.average writeable
+* `#7507 <https://github.com/numpy/numpy/pull/7507>`__: MAINT: Remove nose.SkipTest import.
+* `#7508 <https://github.com/numpy/numpy/pull/7508>`__: DOC: link frompyfunc and vectorize
+* `#7511 <https://github.com/numpy/numpy/pull/7511>`__: numpy.power(0, 0) should return 1
+* `#7515 <https://github.com/numpy/numpy/pull/7515>`__: BUG: MaskedArray.count treats negative axes incorrectly
+* `#7518 <https://github.com/numpy/numpy/pull/7518>`__: BUG: Extend glibc complex trig functions blacklist to glibc <...
+* `#7521 <https://github.com/numpy/numpy/pull/7521>`__: DOC: rephrase writeup of memmap changes
+* `#7522 <https://github.com/numpy/numpy/pull/7522>`__: BUG: Fixed iteration over additional bad commands
+* `#7526 <https://github.com/numpy/numpy/pull/7526>`__: DOC: Removed an extra `:const:`
+* `#7529 <https://github.com/numpy/numpy/pull/7529>`__: BUG: Floating exception with invalid axis in np.lexsort
+* `#7534 <https://github.com/numpy/numpy/pull/7534>`__: MAINT: Update setup.py to reflect supported python versions.
+* `#7536 <https://github.com/numpy/numpy/pull/7536>`__: MAINT: Always use PyCapsule instead of PyCObject in mtrand.pyx
+* `#7539 <https://github.com/numpy/numpy/pull/7539>`__: MAINT: Cleanup of random stuff
+* `#7549 <https://github.com/numpy/numpy/pull/7549>`__: BUG: allow graceful recovery for no Liux compiler
+* `#7562 <https://github.com/numpy/numpy/pull/7562>`__: BUG: Fix test_from_object_array_unicode (test_defchararray.TestBasic)…
+* `#7565 <https://github.com/numpy/numpy/pull/7565>`__: BUG: Fix test_ctypeslib and test_indexing for debug interpreter
+* `#7566 <https://github.com/numpy/numpy/pull/7566>`__: MAINT: use manylinux1 wheel for cython
+* `#7568 <https://github.com/numpy/numpy/pull/7568>`__: Fix a false positive OverflowError in Python 3.x when value above...
+* `#7579 <https://github.com/numpy/numpy/pull/7579>`__: DOC: clarify purpose of Attributes section
+* `#7584 <https://github.com/numpy/numpy/pull/7584>`__: BUG: fixes #7572, percent in path
+* `#7586 <https://github.com/numpy/numpy/pull/7586>`__: Make np.ma.take works on scalars
+* `#7587 <https://github.com/numpy/numpy/pull/7587>`__: BUG: linalg.norm(): Don't convert object arrays to float
+* `#7598 <https://github.com/numpy/numpy/pull/7598>`__: Cast array size to int64 when loading from archive
+* `#7602 <https://github.com/numpy/numpy/pull/7602>`__: DOC: Remove isreal and iscomplex from ufunc list
+* `#7605 <https://github.com/numpy/numpy/pull/7605>`__: DOC: fix incorrect Gamma distribution parameterization comments
+* `#7609 <https://github.com/numpy/numpy/pull/7609>`__: BUG: Fix TypeError when raising TypeError
+* `#7611 <https://github.com/numpy/numpy/pull/7611>`__: ENH: expose test runner raise_warnings option
+* `#7614 <https://github.com/numpy/numpy/pull/7614>`__: BLD: Avoid using os.spawnve in favor of os.spawnv in exec_command
+* `#7618 <https://github.com/numpy/numpy/pull/7618>`__: BUG: distance arg of np.gradient must be scalar, fix docstring
+* `#7626 <https://github.com/numpy/numpy/pull/7626>`__: DOC: RST definition list fixes
+* `#7627 <https://github.com/numpy/numpy/pull/7627>`__: MAINT: unify tup processing, move tup use to after all PyTuple_SetItem...
+* `#7630 <https://github.com/numpy/numpy/pull/7630>`__: MAINT: add ifdef around PyDictProxy_Check macro
+* `#7631 <https://github.com/numpy/numpy/pull/7631>`__: MAINT: linalg: fix comment, simplify math
+* `#7634 <https://github.com/numpy/numpy/pull/7634>`__: BLD: correct C compiler customization in system_info.py Closes...
+* `#7635 <https://github.com/numpy/numpy/pull/7635>`__: BUG: ma.median alternate fix for #7592
+* `#7636 <https://github.com/numpy/numpy/pull/7636>`__: MAINT: clean up testing.assert_raises_regexp, 2.6-specific code...
+* `#7637 <https://github.com/numpy/numpy/pull/7637>`__: MAINT: clearer exception message when importing multiarray fails.
+* `#7639 <https://github.com/numpy/numpy/pull/7639>`__: TST: fix a set of test errors in master.
+* `#7643 <https://github.com/numpy/numpy/pull/7643>`__: DOC : minor changes to linspace docstring
+* `#7651 <https://github.com/numpy/numpy/pull/7651>`__: BUG: one to any power is still 1. Broken edgecase for int arrays
+* `#7655 <https://github.com/numpy/numpy/pull/7655>`__: BLD: Remove Intel compiler flag -xSSE4.2
+* `#7658 <https://github.com/numpy/numpy/pull/7658>`__: BUG: fix incorrect printing of 1D masked arrays
+* `#7659 <https://github.com/numpy/numpy/pull/7659>`__: BUG: Temporary fix for str(mvoid) for object field types
+* `#7664 <https://github.com/numpy/numpy/pull/7664>`__: BUG: Fix unicode with byte swap transfer and copyswap
+* `#7667 <https://github.com/numpy/numpy/pull/7667>`__: Restore histogram consistency
+* `#7668 <https://github.com/numpy/numpy/pull/7668>`__: ENH: Do not check the type of module.__dict__ explicit in test.
+* `#7669 <https://github.com/numpy/numpy/pull/7669>`__: BUG: boolean assignment no GIL release when transfer needs API
+* `#7673 <https://github.com/numpy/numpy/pull/7673>`__: DOC: Create Numpy 1.11.1 release notes.
+* `#7675 <https://github.com/numpy/numpy/pull/7675>`__: BUG: fix handling of right edge of final bin.
+* `#7678 <https://github.com/numpy/numpy/pull/7678>`__: BUG: Fix np.clip bug NaN handling for Visual Studio 2015
+* `#7679 <https://github.com/numpy/numpy/pull/7679>`__: MAINT: Fix up C++ comment in arraytypes.c.src.
+* `#7681 <https://github.com/numpy/numpy/pull/7681>`__: DOC: Update 1.11.1 release notes.
+* `#7686 <https://github.com/numpy/numpy/pull/7686>`__: ENH: Changing FFT cache to a bounded LRU cache
+* `#7688 <https://github.com/numpy/numpy/pull/7688>`__: DOC: fix broken genfromtxt examples in user guide. Closes gh-7662.
+* `#7689 <https://github.com/numpy/numpy/pull/7689>`__: BENCH: add correlate/convolve benchmarks.
+* `#7696 <https://github.com/numpy/numpy/pull/7696>`__: DOC: update wheel build / upload instructions
+* `#7699 <https://github.com/numpy/numpy/pull/7699>`__: BLD: preserve library order
+* `#7704 <https://github.com/numpy/numpy/pull/7704>`__: ENH: Add bits attribute to np.finfo
+* `#7712 <https://github.com/numpy/numpy/pull/7712>`__: BUG: Fix race condition with new FFT cache
+* `#7715 <https://github.com/numpy/numpy/pull/7715>`__: BUG: Remove memory leak in np.place
+* `#7719 <https://github.com/numpy/numpy/pull/7719>`__: BUG: Fix segfault in np.random.shuffle for arrays of different...
+* `#7723 <https://github.com/numpy/numpy/pull/7723>`__: Change mkl_info.dir_env_var from MKL to MKLROOT
+* `#7727 <https://github.com/numpy/numpy/pull/7727>`__: DOC: Corrections in Datetime Units-arrays.datetime.rst
+* `#7729 <https://github.com/numpy/numpy/pull/7729>`__: DOC: fix typo in savetxt docstring (closes #7620)
+* `#7733 <https://github.com/numpy/numpy/pull/7733>`__: Update 7525, DOC: Fix order='A' docs of np.array.
+* `#7734 <https://github.com/numpy/numpy/pull/7734>`__: Update 7542, ENH: Add `polyrootval` to numpy.polynomial
+* `#7735 <https://github.com/numpy/numpy/pull/7735>`__: BUG: fix issue on OS X with Python 3.x where npymath.ini was...
+* `#7739 <https://github.com/numpy/numpy/pull/7739>`__: DOC: Mention the changes of #6430 in the release notes.
+* `#7740 <https://github.com/numpy/numpy/pull/7740>`__: DOC: add reference to poisson rng
+* `#7743 <https://github.com/numpy/numpy/pull/7743>`__: Update 7476, DEP: deprecate Numeric-style typecodes, closes #2148
+* `#7744 <https://github.com/numpy/numpy/pull/7744>`__: DOC: Remove "ones_like" from ufuncs list (it is not)
+* `#7746 <https://github.com/numpy/numpy/pull/7746>`__: DOC: Clarify the effect of rcond in numpy.linalg.lstsq.
+* `#7747 <https://github.com/numpy/numpy/pull/7747>`__: Update 7672, BUG: Make sure we don't divide by zero
+* `#7748 <https://github.com/numpy/numpy/pull/7748>`__: DOC: Update float32 mean example in docstring
+* `#7754 <https://github.com/numpy/numpy/pull/7754>`__: Update 7612, ENH: Add broadcast.ndim to match code elsewhere.
+* `#7757 <https://github.com/numpy/numpy/pull/7757>`__: Update 7175, BUG: Invalid read of size 4 in PyArray_FromFile
+* `#7759 <https://github.com/numpy/numpy/pull/7759>`__: BUG: Fix numpy.i support for numpy API < 1.7.
+* `#7760 <https://github.com/numpy/numpy/pull/7760>`__: ENH: Make assert_almost_equal & assert_array_almost_equal consistent.
+* `#7766 <https://github.com/numpy/numpy/pull/7766>`__: fix an English typo
+* `#7771 <https://github.com/numpy/numpy/pull/7771>`__: DOC: link geomspace from logspace
+* `#7773 <https://github.com/numpy/numpy/pull/7773>`__: DOC: Remove a redundant the
+* `#7777 <https://github.com/numpy/numpy/pull/7777>`__: DOC: Update Numpy 1.11.1 release notes.
+* `#7785 <https://github.com/numpy/numpy/pull/7785>`__: DOC: update wheel building procedure for release
+* `#7789 <https://github.com/numpy/numpy/pull/7789>`__: MRG: add note of 64-bit wheels on Windows
+* `#7791 <https://github.com/numpy/numpy/pull/7791>`__: f2py.compile issues (#7683)
+* `#7799 <https://github.com/numpy/numpy/pull/7799>`__: "lambda" is not allowed to use as keyword arguments in a sample...
+* `#7803 <https://github.com/numpy/numpy/pull/7803>`__: BUG: interpret 'c' PEP3118/struct type as 'S1'.
+* `#7807 <https://github.com/numpy/numpy/pull/7807>`__: DOC: Misplaced parens in formula
+* `#7817 <https://github.com/numpy/numpy/pull/7817>`__: BUG: Make sure npy_mul_with_overflow_<type> detects overflow.
+* `#7818 <https://github.com/numpy/numpy/pull/7818>`__: numpy/distutils/misc_util.py fix for #7809: check that _tmpdirs...
+* `#7820 <https://github.com/numpy/numpy/pull/7820>`__: MAINT: Allocate fewer bytes for empty arrays.
+* `#7823 <https://github.com/numpy/numpy/pull/7823>`__: BUG: Fixed masked array behavior for scalar inputs to np.ma.atleast_*d
+* `#7834 <https://github.com/numpy/numpy/pull/7834>`__: DOC: Added an example
+* `#7839 <https://github.com/numpy/numpy/pull/7839>`__: Pypy fixes
+* `#7840 <https://github.com/numpy/numpy/pull/7840>`__: Fix ATLAS version detection
+* `#7842 <https://github.com/numpy/numpy/pull/7842>`__: Fix versionadded tags
+* `#7848 <https://github.com/numpy/numpy/pull/7848>`__: MAINT: Fix remaining uses of deprecated Python imp module.
+* `#7853 <https://github.com/numpy/numpy/pull/7853>`__: BUG: Make sure numpy globals keep identity after reload.
+* `#7863 <https://github.com/numpy/numpy/pull/7863>`__: ENH: turn quicksort into introsort
+* `#7866 <https://github.com/numpy/numpy/pull/7866>`__: Document runtests extra argv
+* `#7871 <https://github.com/numpy/numpy/pull/7871>`__: BUG: handle introsort depth limit properly
+* `#7879 <https://github.com/numpy/numpy/pull/7879>`__: DOC: fix typo in documentation of loadtxt (closes #7878)
+* `#7885 <https://github.com/numpy/numpy/pull/7885>`__: Handle NetBSD specific <sys/endian.h>
+* `#7889 <https://github.com/numpy/numpy/pull/7889>`__: DOC: #7881. Fix link to record arrays
+* `#7894 <https://github.com/numpy/numpy/pull/7894>`__: fixup-7790, BUG: construct ma.array from np.array which contains...
+* `#7898 <https://github.com/numpy/numpy/pull/7898>`__: Spelling and grammar fix.
+* `#7903 <https://github.com/numpy/numpy/pull/7903>`__: BUG: fix float16 type not being called due to wrong ordering
+* `#7908 <https://github.com/numpy/numpy/pull/7908>`__: BLD: Fixed detection for recent MKL versions
+* `#7911 <https://github.com/numpy/numpy/pull/7911>`__: BUG: fix for issue#7835 (ma.median of 1d)
+* `#7912 <https://github.com/numpy/numpy/pull/7912>`__: ENH: skip or avoid gc/objectmodel differences btwn pypy and cpython
+* `#7918 <https://github.com/numpy/numpy/pull/7918>`__: ENH: allow numpy.apply_along_axis() to work with ndarray subclasses
+* `#7922 <https://github.com/numpy/numpy/pull/7922>`__: ENH: Add ma.convolve and ma.correlate for #6458
+* `#7925 <https://github.com/numpy/numpy/pull/7925>`__: Monkey-patch _msvccompile.gen_lib_option like any other compilators
+* `#7931 <https://github.com/numpy/numpy/pull/7931>`__: BUG: Check for HAVE_LDOUBLE_DOUBLE_DOUBLE_LE in npy_math_complex.
+* `#7936 <https://github.com/numpy/numpy/pull/7936>`__: ENH: improve duck typing inside iscomplexobj
+* `#7937 <https://github.com/numpy/numpy/pull/7937>`__: BUG: Guard against buggy comparisons in generic quicksort.
+* `#7938 <https://github.com/numpy/numpy/pull/7938>`__: DOC: add cbrt to math summary page
+* `#7941 <https://github.com/numpy/numpy/pull/7941>`__: BUG: Make sure numpy globals keep identity after reload.
+* `#7943 <https://github.com/numpy/numpy/pull/7943>`__: DOC: #7927. Remove deprecated note for memmap relevant for Python...
+* `#7952 <https://github.com/numpy/numpy/pull/7952>`__: BUG: Use keyword arguments to initialize Extension base class.
+* `#7956 <https://github.com/numpy/numpy/pull/7956>`__: BLD: remove __NUMPY_SETUP__ from builtins at end of setup.py
+* `#7963 <https://github.com/numpy/numpy/pull/7963>`__: BUG: MSVCCompiler grows 'lib' & 'include' env strings exponentially.
+* `#7965 <https://github.com/numpy/numpy/pull/7965>`__: BUG: cannot modify tuple after use
+* `#7976 <https://github.com/numpy/numpy/pull/7976>`__: DOC: Fixed documented dimension of return value
+* `#7977 <https://github.com/numpy/numpy/pull/7977>`__: DOC: Create 1.11.2 release notes.
+* `#7979 <https://github.com/numpy/numpy/pull/7979>`__: DOC: Corrected allowed keywords in ``add_installed_library``
+* `#7980 <https://github.com/numpy/numpy/pull/7980>`__: ENH: Add ability to runtime select ufunc loops, add AVX2 integer...
+* `#7985 <https://github.com/numpy/numpy/pull/7985>`__: Rebase 7763, ENH: Add new warning suppression/filtering context
+* `#7987 <https://github.com/numpy/numpy/pull/7987>`__: DOC: See also np.load and np.memmap in np.lib.format.open_memmap
+* `#7988 <https://github.com/numpy/numpy/pull/7988>`__: DOC: Include docstring for cbrt, spacing and fabs in documentation
+* `#7999 <https://github.com/numpy/numpy/pull/7999>`__: ENH: add inplace cases to fast ufunc loop macros
+* `#8006 <https://github.com/numpy/numpy/pull/8006>`__: DOC: Update 1.11.2 release notes.
+* `#8008 <https://github.com/numpy/numpy/pull/8008>`__: MAINT: Remove leftover imp module imports.
+* `#8009 <https://github.com/numpy/numpy/pull/8009>`__: DOC: Fixed three typos in the c-info.ufunc-tutorial
+* `#8011 <https://github.com/numpy/numpy/pull/8011>`__: DOC: Update 1.11.2 release notes.
+* `#8014 <https://github.com/numpy/numpy/pull/8014>`__: BUG: Fix fid.close() to use os.close(fid)
+* `#8016 <https://github.com/numpy/numpy/pull/8016>`__: BUG: Fix numpy.ma.median.
+* `#8018 <https://github.com/numpy/numpy/pull/8018>`__: BUG: Fixes return for np.ma.count if keepdims is True and axis...
+* `#8021 <https://github.com/numpy/numpy/pull/8021>`__: DOC: change all non-code instances of Numpy to NumPy
+* `#8027 <https://github.com/numpy/numpy/pull/8027>`__: ENH: Add platform indepedent lib dir to PYTHONPATH
+* `#8028 <https://github.com/numpy/numpy/pull/8028>`__: DOC: Update 1.11.2 release notes.
+* `#8030 <https://github.com/numpy/numpy/pull/8030>`__: BUG: fix np.ma.median with only one non-masked value and an axis...
+* `#8038 <https://github.com/numpy/numpy/pull/8038>`__: MAINT: Update error message in rollaxis.
+* `#8040 <https://github.com/numpy/numpy/pull/8040>`__: Update add_newdocs.py
+* `#8042 <https://github.com/numpy/numpy/pull/8042>`__: BUG: core: fix bug in NpyIter buffering with discontinuous arrays
+* `#8045 <https://github.com/numpy/numpy/pull/8045>`__: DOC: Update 1.11.2 release notes.
+* `#8050 <https://github.com/numpy/numpy/pull/8050>`__: remove refcount semantics, now a.resize() almost always requires...
+* `#8051 <https://github.com/numpy/numpy/pull/8051>`__: Clear signaling NaN exceptions
+* `#8054 <https://github.com/numpy/numpy/pull/8054>`__: ENH: add signature argument to vectorize for vectorizing like...
+* `#8057 <https://github.com/numpy/numpy/pull/8057>`__: BUG: lib: Simplify (and fix) pad's handling of the pad_width
+* `#8061 <https://github.com/numpy/numpy/pull/8061>`__: BUG : financial.pmt modifies input (issue #8055)
+* `#8064 <https://github.com/numpy/numpy/pull/8064>`__: MAINT: Add PMIP files to .gitignore
+* `#8065 <https://github.com/numpy/numpy/pull/8065>`__: BUG: Assert fromfile ending earlier in pyx_processing
+* `#8066 <https://github.com/numpy/numpy/pull/8066>`__: BUG, TST: Fix python3-dbg bug in Travis script
+* `#8071 <https://github.com/numpy/numpy/pull/8071>`__: MAINT: Add Tempita to randint helpers
+* `#8075 <https://github.com/numpy/numpy/pull/8075>`__: DOC: Fix description of isinf in nan_to_num
+* `#8080 <https://github.com/numpy/numpy/pull/8080>`__: BUG: non-integers can end up in dtype offsets
+* `#8081 <https://github.com/numpy/numpy/pull/8081>`__: Update outdated Nose URL to nose.readthedocs.io
+* `#8083 <https://github.com/numpy/numpy/pull/8083>`__: ENH: Deprecation warnings for `/` integer division when running...
+* `#8084 <https://github.com/numpy/numpy/pull/8084>`__: DOC: Fix erroneous return type description for np.roots.
+* `#8087 <https://github.com/numpy/numpy/pull/8087>`__: BUG: financial.pmt modifies input #8055
+* `#8088 <https://github.com/numpy/numpy/pull/8088>`__: MAINT: Remove duplicate randint helpers code.
+* `#8093 <https://github.com/numpy/numpy/pull/8093>`__: MAINT: fix assert_raises_regex when used as a context manager
+* `#8096 <https://github.com/numpy/numpy/pull/8096>`__: ENH: Vendorize tempita.
+* `#8098 <https://github.com/numpy/numpy/pull/8098>`__: DOC: Enhance description/usage for np.linalg.eig*h
+* `#8103 <https://github.com/numpy/numpy/pull/8103>`__: Pypy fixes
+* `#8104 <https://github.com/numpy/numpy/pull/8104>`__: Fix test code on cpuinfo's main function
+* `#8107 <https://github.com/numpy/numpy/pull/8107>`__: BUG: Fix array printing with precision=0.
+* `#8109 <https://github.com/numpy/numpy/pull/8109>`__: Fix bug in ravel_multi_index for big indices (Issue #7546)
+* `#8110 <https://github.com/numpy/numpy/pull/8110>`__: BUG: distutils: fix issue with rpath in fcompiler/gnu.py
+* `#8111 <https://github.com/numpy/numpy/pull/8111>`__: ENH: Add a tool for release authors and PRs.
+* `#8112 <https://github.com/numpy/numpy/pull/8112>`__: DOC: Fix "See also" links in linalg.
+* `#8114 <https://github.com/numpy/numpy/pull/8114>`__: BUG: core: add missing error check after PyLong_AsSsize_t
+* `#8121 <https://github.com/numpy/numpy/pull/8121>`__: DOC: Improve histogram2d() example.
+* `#8122 <https://github.com/numpy/numpy/pull/8122>`__: BUG: Fix broken pickle in MaskedArray when dtype is object (Return...
+* `#8124 <https://github.com/numpy/numpy/pull/8124>`__: BUG: Fixed build break
+* `#8125 <https://github.com/numpy/numpy/pull/8125>`__: Rebase, BUG: Fixed deepcopy of F-order object arrays.
+* `#8127 <https://github.com/numpy/numpy/pull/8127>`__: BUG: integers to a negative integer powers should error.
+* `#8141 <https://github.com/numpy/numpy/pull/8141>`__: improve configure checks for broken systems
+* `#8142 <https://github.com/numpy/numpy/pull/8142>`__: BUG: np.ma.mean and var should return scalar if no mask
+* `#8148 <https://github.com/numpy/numpy/pull/8148>`__: BUG: import full module path in npy_load_module
+* `#8153 <https://github.com/numpy/numpy/pull/8153>`__: MAINT: Expose void-scalar "base" attribute in python
+* `#8156 <https://github.com/numpy/numpy/pull/8156>`__: DOC: added example with empty indices for a scalar, #8138
+* `#8160 <https://github.com/numpy/numpy/pull/8160>`__: BUG: fix _array2string for structured array (issue #5692)
+* `#8164 <https://github.com/numpy/numpy/pull/8164>`__: MAINT: Update mailmap for NumPy 1.12.0
+* `#8165 <https://github.com/numpy/numpy/pull/8165>`__: Fixup 8152, BUG: assert_allclose(..., equal_nan=False) doesn't...
+* `#8167 <https://github.com/numpy/numpy/pull/8167>`__: Fixup 8146, DOC: Clarify when PyArray_{Max, Min, Ptp} return...
+* `#8168 <https://github.com/numpy/numpy/pull/8168>`__: DOC: Minor spelling fix in genfromtxt() docstring.
+* `#8173 <https://github.com/numpy/numpy/pull/8173>`__: BLD: Enable build on AIX
+* `#8174 <https://github.com/numpy/numpy/pull/8174>`__: DOC: warn that dtype.descr is only for use in PEP3118
+* `#8177 <https://github.com/numpy/numpy/pull/8177>`__: MAINT: Add python 3.6 support to suppress_warnings
+* `#8178 <https://github.com/numpy/numpy/pull/8178>`__: MAINT: Fix ResourceWarning new in Python 3.6.
+* `#8180 <https://github.com/numpy/numpy/pull/8180>`__: FIX: protect stolen ref by PyArray_NewFromDescr in array_empty
+* `#8181 <https://github.com/numpy/numpy/pull/8181>`__: ENH: Improve announce to find github squash-merge commits.
+* `#8182 <https://github.com/numpy/numpy/pull/8182>`__: MAINT: Update .mailmap
+* `#8183 <https://github.com/numpy/numpy/pull/8183>`__: MAINT: Ediff1d performance
+* `#8184 <https://github.com/numpy/numpy/pull/8184>`__: MAINT: make `assert_allclose` behavior on nans match pre 1.12
+* `#8188 <https://github.com/numpy/numpy/pull/8188>`__: DOC: 'highest' is exclusive for randint()
+* `#8189 <https://github.com/numpy/numpy/pull/8189>`__: BUG: setfield should raise if arr is not writeable
+* `#8190 <https://github.com/numpy/numpy/pull/8190>`__: ENH: Add a float_power function with at least float64 precision.
+* `#8197 <https://github.com/numpy/numpy/pull/8197>`__: DOC: Add missing arguments to np.ufunc.outer
+* `#8198 <https://github.com/numpy/numpy/pull/8198>`__: DEP: Deprecate the keepdims argument to accumulate
+* `#8199 <https://github.com/numpy/numpy/pull/8199>`__: MAINT: change path to env in distutils.system_info. Closes gh-8195.
+* `#8200 <https://github.com/numpy/numpy/pull/8200>`__: BUG: Fix structured array format functions
+* `#8202 <https://github.com/numpy/numpy/pull/8202>`__: ENH: specialize name of dev package by interpreter
+* `#8205 <https://github.com/numpy/numpy/pull/8205>`__: DOC: change development instructions from SSH to HTTPS access.
+* `#8216 <https://github.com/numpy/numpy/pull/8216>`__: DOC: Patch doc errors for atleast_nd and frombuffer
+* `#8218 <https://github.com/numpy/numpy/pull/8218>`__: BUG: ediff1d should return subclasses
+* `#8219 <https://github.com/numpy/numpy/pull/8219>`__: DOC: Turn SciPy references into links.
+* `#8222 <https://github.com/numpy/numpy/pull/8222>`__: ENH: Make numpy.mean() do more precise computation
+* `#8227 <https://github.com/numpy/numpy/pull/8227>`__: BUG: Better check for invalid bounds in np.random.uniform.
+* `#8231 <https://github.com/numpy/numpy/pull/8231>`__: ENH: Refactor numpy ** operators for numpy scalar integer powers
+* `#8234 <https://github.com/numpy/numpy/pull/8234>`__: DOC: Clarified when a copy is made in numpy.asarray
+* `#8236 <https://github.com/numpy/numpy/pull/8236>`__: DOC: Fix documentation pull requests.
+* `#8238 <https://github.com/numpy/numpy/pull/8238>`__: MAINT: Update pavement.py
+* `#8239 <https://github.com/numpy/numpy/pull/8239>`__: ENH: Improve announce tool.
+* `#8240 <https://github.com/numpy/numpy/pull/8240>`__: REL: Prepare for 1.12.x branch
+* `#8243 <https://github.com/numpy/numpy/pull/8243>`__: BUG: Update operator `**` tests for new behavior.
+* `#8246 <https://github.com/numpy/numpy/pull/8246>`__: REL: Reset strides for RELAXED_STRIDE_CHECKING for 1.12 releases.
+* `#8265 <https://github.com/numpy/numpy/pull/8265>`__: BUG: np.piecewise not working for scalars
+* `#8272 <https://github.com/numpy/numpy/pull/8272>`__: TST: Path test should resolve symlinks when comparing
+* `#8282 <https://github.com/numpy/numpy/pull/8282>`__: DOC: Update 1.12.0 release notes.
+* `#8286 <https://github.com/numpy/numpy/pull/8286>`__: BUG: Fix pavement.py write_release_task.
+* `#8296 <https://github.com/numpy/numpy/pull/8296>`__: BUG: Fix iteration over reversed subspaces in mapiter_@name@.
+* `#8304 <https://github.com/numpy/numpy/pull/8304>`__: BUG: Fix PyPy crash in PyUFunc_GenericReduction.
+* `#8319 <https://github.com/numpy/numpy/pull/8319>`__: BLD: blacklist powl (longdouble power function) on OS X.
+* `#8320 <https://github.com/numpy/numpy/pull/8320>`__: BUG: do not link to Accelerate if OpenBLAS, MKL or BLIS are found.
+* `#8322 <https://github.com/numpy/numpy/pull/8322>`__: BUG: fixed kind specifications for parameters
+* `#8336 <https://github.com/numpy/numpy/pull/8336>`__: BUG: fix packbits and unpackbits to correctly handle empty arrays
+* `#8338 <https://github.com/numpy/numpy/pull/8338>`__: BUG: fix test_api test that fails intermittently in python 3
+* `#8339 <https://github.com/numpy/numpy/pull/8339>`__: BUG: Fix ndarray.tofile large file corruption in append mode.
+* `#8359 <https://github.com/numpy/numpy/pull/8359>`__: BUG: Fix suppress_warnings (again) for Python 3.6.
+* `#8372 <https://github.com/numpy/numpy/pull/8372>`__: BUG: Fixes for ma.median and nanpercentile.
+* `#8373 <https://github.com/numpy/numpy/pull/8373>`__: BUG: correct letter case
+* `#8379 <https://github.com/numpy/numpy/pull/8379>`__: DOC: Update 1.12.0-notes.rst.
+* `#8390 <https://github.com/numpy/numpy/pull/8390>`__: ENH: retune apply_along_axis nanmedian cutoff in 1.12
+* `#8391 <https://github.com/numpy/numpy/pull/8391>`__: DEP: Fix escaped string characters deprecated in Python 3.6.
+* `#8394 <https://github.com/numpy/numpy/pull/8394>`__: DOC: create 1.11.3 release notes.
+* `#8399 <https://github.com/numpy/numpy/pull/8399>`__: BUG: Fix author search in announce.py
+* `#8402 <https://github.com/numpy/numpy/pull/8402>`__: DOC, MAINT: Update 1.12.0 notes and mailmap.
+* `#8418 <https://github.com/numpy/numpy/pull/8418>`__: BUG: Fix ma.median even elements for 1.12
+* `#8424 <https://github.com/numpy/numpy/pull/8424>`__: DOC: Fix tools and release notes to be more markdown compatible.
+* `#8427 <https://github.com/numpy/numpy/pull/8427>`__: BUG: Add a lock to assert_equal and other testing functions
+* `#8431 <https://github.com/numpy/numpy/pull/8431>`__: BUG: Fix apply_along_axis() for when func1d() returns a non-ndarray.
+* `#8432 <https://github.com/numpy/numpy/pull/8432>`__: BUG: Let linspace accept input that has an array_interface.
+* `#8437 <https://github.com/numpy/numpy/pull/8437>`__: TST: Update 3.6-dev tests to 3.6 after Python final release.
+* `#8439 <https://github.com/numpy/numpy/pull/8439>`__: DOC: Update 1.12.0 release notes.
+* `#8466 <https://github.com/numpy/numpy/pull/8466>`__: MAINT: Update mailmap entries.
+* `#8467 <https://github.com/numpy/numpy/pull/8467>`__: DOC: Back-port the missing part of gh-8464.
+* `#8476 <https://github.com/numpy/numpy/pull/8476>`__: DOC: Update 1.12.0 release notes.
+* `#8477 <https://github.com/numpy/numpy/pull/8477>`__: DOC: Update 1.12.0 release notes.
diff --git a/doc/changelog/1.12.1-changelog.rst b/doc/changelog/1.12.1-changelog.rst
new file mode 100644
index 000000000..afa5fa686
--- /dev/null
+++ b/doc/changelog/1.12.1-changelog.rst
@@ -0,0 +1,39 @@
+=========
+Changelog
+=========
+
+Contributors
+============
+
+A total of 10 people contributed to this release. People with a "+" by their
+names contributed a patch for the first time.
+
+* Charles Harris
+* Eric Wieser
+* Greg Young
+* Joerg Behrmann +
+* John Kirkham
+* Julian Taylor
+* Marten van Kerkwijk
+* Matthew Brett
+* Shota Kawabuchi
+* Jean Utke +
+
+Pull requests merged
+====================
+
+* `#8483 <https://github.com/numpy/numpy/pull/8483>`__: BUG: Fix wrong future nat warning and equiv type logic error...
+* `#8489 <https://github.com/numpy/numpy/pull/8489>`__: BUG: Fix wrong masked median for some special cases
+* `#8490 <https://github.com/numpy/numpy/pull/8490>`__: DOC: Place np.average in inline code
+* `#8491 <https://github.com/numpy/numpy/pull/8491>`__: TST: Work around isfinite inconsistency on i386
+* `#8494 <https://github.com/numpy/numpy/pull/8494>`__: BUG: Guard against replacing constants without '_' spec in f2py.
+* `#8524 <https://github.com/numpy/numpy/pull/8524>`__: BUG: Fix mean for float 16 non-array inputs for 1.12
+* `#8571 <https://github.com/numpy/numpy/pull/8571>`__: BUG: Fix calling python api with error set and minor leaks for...
+* `#8602 <https://github.com/numpy/numpy/pull/8602>`__: BUG: Make iscomplexobj compatible with custom dtypes again
+* `#8618 <https://github.com/numpy/numpy/pull/8618>`__: BUG: Fix undefined behaviour induced by bad __array_wrap__
+* `#8648 <https://github.com/numpy/numpy/pull/8648>`__: BUG: Fix MaskedArray.__setitem__
+* `#8659 <https://github.com/numpy/numpy/pull/8659>`__: BUG: PPC64el machines are POWER for Fortran in f2py
+* `#8665 <https://github.com/numpy/numpy/pull/8665>`__: BUG: Look up methods on MaskedArray in `_frommethod`
+* `#8674 <https://github.com/numpy/numpy/pull/8674>`__: BUG: Remove extra digit in binary_repr at limit
+* `#8704 <https://github.com/numpy/numpy/pull/8704>`__: BUG: Fix deepcopy regression for empty arrays.
+* `#8707 <https://github.com/numpy/numpy/pull/8707>`__: BUG: Fix ma.median for empty ndarrays
diff --git a/doc/changelog/1.13.0-changelog.rst b/doc/changelog/1.13.0-changelog.rst
new file mode 100644
index 000000000..2ea0177b4
--- /dev/null
+++ b/doc/changelog/1.13.0-changelog.rst
@@ -0,0 +1,426 @@
+=========
+Changelog
+=========
+
+Contributors
+============
+
+A total of 102 people contributed to this release. People with a "+" by their
+names contributed a patch for the first time.
+
+* A. Jesse Jiryu Davis +
+* Alessandro Pietro Bardelli +
+* Alex Rothberg +
+* Alexander Shadchin
+* Allan Haldane
+* Andres Guzman-Ballen +
+* Antoine Pitrou
+* Antony Lee
+* B R S Recht +
+* Baurzhan Muftakhidinov +
+* Ben Rowland
+* Benda Xu +
+* Blake Griffith
+* Bradley Wogsland +
+* Brandon Carter +
+* CJ Carey
+* Charles Harris
+* Christoph Gohlke
+* Danny Hermes +
+* David Hagen +
+* Duke Vijitbenjaronk +
+* Egor Klenin +
+* Elliott Forney +
+* Elliott M Forney +
+* Endolith
+* Eric Wieser
+* Erik M. Bray
+* Eugene +
+* Evan Limanto +
+* Felix Berkenkamp +
+* François Bissey +
+* Frederic Bastien
+* Greg Young
+* Gregory R. Lee
+* Importance of Being Ernest +
+* Jaime Fernandez
+* Jakub Wilk +
+* James Cowgill +
+* James Sanders
+* Jean Utke +
+* Jesse Thoren +
+* Jim Crist +
+* Joerg Behrmann +
+* John Kirkham
+* Jonathan Helmus
+* Jonathan L Long
+* Jonathan Tammo Siebert +
+* Joseph Fox-Rabinovitz
+* Joshua Loyal +
+* Juan Nunez-Iglesias +
+* Julian Taylor
+* Kirill Balunov +
+* Likhith Chitneni +
+* Loïc Estève
+* Mads Ohm Larsen
+* Marein Könings +
+* Marten van Kerkwijk
+* Martin Thoma
+* Martino Sorbaro +
+* Marvin Schmidt +
+* Matthew Brett
+* Matthias Bussonnier +
+* Matthias C. M. Troffaes +
+* Matti Picus
+* Michael Seifert
+* Mikhail Pak +
+* Mortada Mehyar
+* Nathaniel J. Smith
+* Nick Papior
+* Oscar Villellas +
+* Pauli Virtanen
+* Pavel Potocek
+* Pete Peeradej Tanruangporn +
+* Philipp A +
+* Ralf Gommers
+* Robert Kern
+* Roland Kaufmann +
+* Ronan Lamy
+* Sami Salonen +
+* Sanchez Gonzalez Alvaro
+* Sebastian Berg
+* Shota Kawabuchi
+* Simon Gibbons
+* Stefan Otte
+* Stefan Peterson +
+* Stephan Hoyer
+* Søren Fuglede Jørgensen +
+* Takuya Akiba
+* Tom Boyd +
+* Ville Skyttä +
+* Warren Weckesser
+* Wendell Smith
+* Yu Feng
+* Zixu Zhao +
+* Zè Vinícius +
+* aha66 +
+* davidjn +
+* drabach +
+* drlvk +
+* jsh9 +
+* solarjoe +
+* zengi +
+
+Pull requests merged
+====================
+
+A total of 309 pull requests were merged for this release.
+
+* `#3861 <https://github.com/numpy/numpy/pull/3861>`__: ENH: Make it possible to NpyIter_RemoveAxis an empty dimension
+* `#5302 <https://github.com/numpy/numpy/pull/5302>`__: Fixed meshgrid to return arrays with same dtype as arguments.
+* `#5726 <https://github.com/numpy/numpy/pull/5726>`__: BUG, API: np.random.multivariate_normal behavior with bad covariance...
+* `#6632 <https://github.com/numpy/numpy/pull/6632>`__: TST/BUG: fromfile - fix test and expose bug with io class argument
+* `#6659 <https://github.com/numpy/numpy/pull/6659>`__: BUG: Let linspace accept input that has an array_interface.
+* `#7742 <https://github.com/numpy/numpy/pull/7742>`__: Add `axis` argument to numpy.unique
+* `#7862 <https://github.com/numpy/numpy/pull/7862>`__: BLD: rewrite np.distutils.exec_command.exec_command()
+* `#7997 <https://github.com/numpy/numpy/pull/7997>`__: ENH: avoid temporary arrays in expressions (again)
+* `#8043 <https://github.com/numpy/numpy/pull/8043>`__: ENH: umath: ensure ufuncs are well-defined with memory overlapping...
+* `#8106 <https://github.com/numpy/numpy/pull/8106>`__: DOC: Document release procedure with a walkthrough.
+* `#8194 <https://github.com/numpy/numpy/pull/8194>`__: BUG: np.piecewise not working for scalars
+* `#8235 <https://github.com/numpy/numpy/pull/8235>`__: BUG: add checks for some invalid structured dtypes. Fixes #2865.
+* `#8241 <https://github.com/numpy/numpy/pull/8241>`__: MAINT: Prepare for 1.13.0 after 1.12.x branch
+* `#8242 <https://github.com/numpy/numpy/pull/8242>`__: BUG: Update operator `**` tests for new behavior.
+* `#8244 <https://github.com/numpy/numpy/pull/8244>`__: DOC: fix typos in arrayprint docstrings.
+* `#8247 <https://github.com/numpy/numpy/pull/8247>`__: ENH: Add `__array_ufunc__`
+* `#8251 <https://github.com/numpy/numpy/pull/8251>`__: MAINT: Cleaned up mailmap
+* `#8267 <https://github.com/numpy/numpy/pull/8267>`__: DOC: Changed shape assignment example to reshape. Elaborated...
+* `#8271 <https://github.com/numpy/numpy/pull/8271>`__: TST: Path test should resolve symlinks when comparing
+* `#8277 <https://github.com/numpy/numpy/pull/8277>`__: DOC: improve comment in prepare_index
+* `#8279 <https://github.com/numpy/numpy/pull/8279>`__: BUG: bool(dtype) is True
+* `#8281 <https://github.com/numpy/numpy/pull/8281>`__: DOC: Update 1.12.0 release notes.
+* `#8284 <https://github.com/numpy/numpy/pull/8284>`__: BUG: Fix iteration over reversed subspaces in mapiter_@name@
+* `#8285 <https://github.com/numpy/numpy/pull/8285>`__: BUG: Fix pavement.py write_release_task.
+* `#8287 <https://github.com/numpy/numpy/pull/8287>`__: DOC: Update 1.13.0 release notes.
+* `#8290 <https://github.com/numpy/numpy/pull/8290>`__: MAINT: let average preserve subclass information.
+* `#8297 <https://github.com/numpy/numpy/pull/8297>`__: DEP: Handle expired deprecations.
+* `#8299 <https://github.com/numpy/numpy/pull/8299>`__: BUG: Make f2py respect kind specifications for real parameters
+* `#8302 <https://github.com/numpy/numpy/pull/8302>`__: BUG: Fix PyPy crash in PyUFunc_GenericReduction.
+* `#8308 <https://github.com/numpy/numpy/pull/8308>`__: BUG: do not link to Accelerate if OpenBLAS, MKL or BLIS are found.
+* `#8312 <https://github.com/numpy/numpy/pull/8312>`__: DEP: Drop deprecated boolean indexing behavior and update to...
+* `#8318 <https://github.com/numpy/numpy/pull/8318>`__: BLD: blacklist powl (longdouble power function) on OS X.
+* `#8326 <https://github.com/numpy/numpy/pull/8326>`__: ENH: Vectorize packbits with SSE2
+* `#8327 <https://github.com/numpy/numpy/pull/8327>`__: BUG: Fix packbits to correctly handle empty arrays
+* `#8335 <https://github.com/numpy/numpy/pull/8335>`__: BUG: Fix ndarray.tofile large file corruption in append mode
+* `#8337 <https://github.com/numpy/numpy/pull/8337>`__: BUG: fix test_api test that fails intermittently in python 3
+* `#8343 <https://github.com/numpy/numpy/pull/8343>`__: TST: Ellipsis indexing creates a view
+* `#8348 <https://github.com/numpy/numpy/pull/8348>`__: ENH: Allow bincount(..., minlength=0).
+* `#8349 <https://github.com/numpy/numpy/pull/8349>`__: BUG: Apply more robust string converts in loadtxt
+* `#8351 <https://github.com/numpy/numpy/pull/8351>`__: BUG: correct letter case
+* `#8354 <https://github.com/numpy/numpy/pull/8354>`__: BUG: Fix suppress_warnings (again) for Python 3.6.
+* `#8355 <https://github.com/numpy/numpy/pull/8355>`__: Fix building extensions with MinGW for Python 3.5
+* `#8356 <https://github.com/numpy/numpy/pull/8356>`__: Allow extensions to be built with MinGW in a virtualenv
+* `#8360 <https://github.com/numpy/numpy/pull/8360>`__: MAINT: Drop special case code for python2 < 2.7 and python3 <...
+* `#8364 <https://github.com/numpy/numpy/pull/8364>`__: BUG: handle unmasked NaN in ma.median like normal median
+* `#8366 <https://github.com/numpy/numpy/pull/8366>`__: BUG: fix nanpercentile not returning scalar with axis argument
+* `#8367 <https://github.com/numpy/numpy/pull/8367>`__: xlocale.h is not available in newlib / Cygwin
+* `#8368 <https://github.com/numpy/numpy/pull/8368>`__: ENH: Implement most linalg operations for 0x0 matrices
+* `#8369 <https://github.com/numpy/numpy/pull/8369>`__: TST: Fix various incorrect linalg tests
+* `#8374 <https://github.com/numpy/numpy/pull/8374>`__: DOC: Fixed minor typo in William Gosset's name
+* `#8377 <https://github.com/numpy/numpy/pull/8377>`__: Switch to the PyPI version of plex to generate lapack_lite
+* `#8380 <https://github.com/numpy/numpy/pull/8380>`__: DOC: Update 1.12.0-notes.rst.
+* `#8381 <https://github.com/numpy/numpy/pull/8381>`__: MAINT: Rebuild lapack lite
+* `#8382 <https://github.com/numpy/numpy/pull/8382>`__: DEP: Fix escaped string characters deprecated in Python 3.6.
+* `#8384 <https://github.com/numpy/numpy/pull/8384>`__: ENH: Add tool to check for deprecated escaped characters.
+* `#8388 <https://github.com/numpy/numpy/pull/8388>`__: API: Return scalars for scalar inputs to np.real/imag
+* `#8389 <https://github.com/numpy/numpy/pull/8389>`__: ENH: retune apply_along_axis nanmedian cutoff
+* `#8395 <https://github.com/numpy/numpy/pull/8395>`__: DOC: create 1.11.3 release notes.
+* `#8398 <https://github.com/numpy/numpy/pull/8398>`__: BUG: Fix author search in announce.py
+* `#8400 <https://github.com/numpy/numpy/pull/8400>`__: Fix `corrcoef` and `cov` rowvar param handling
+* `#8401 <https://github.com/numpy/numpy/pull/8401>`__: DOC, MAINT: Update 1.12.0 notes and mailmap.
+* `#8410 <https://github.com/numpy/numpy/pull/8410>`__: BUG: Fixed behavior of assert_array_less for +/-inf
+* `#8414 <https://github.com/numpy/numpy/pull/8414>`__: BUG: fixed failure of np.ma.median for 1-D even arrays.
+* `#8416 <https://github.com/numpy/numpy/pull/8416>`__: BUG operations involving MaskedArray with output given do not...
+* `#8421 <https://github.com/numpy/numpy/pull/8421>`__: ENH: Add isnat function and make comparison tests NAT specific
+* `#8423 <https://github.com/numpy/numpy/pull/8423>`__: Adding isin function for multidimensional arrays
+* `#8426 <https://github.com/numpy/numpy/pull/8426>`__: BUG: Fix apply_along_axis() for when func1d() returns a non-ndarray
+* `#8434 <https://github.com/numpy/numpy/pull/8434>`__: TST: Update 3.6-dev tests to 3.6 after Python final release.
+* `#8441 <https://github.com/numpy/numpy/pull/8441>`__: BUG: Fix crash on 0d return value in apply_along_axis
+* `#8443 <https://github.com/numpy/numpy/pull/8443>`__: BUG: fix set memmap offset attribute correctly when offset is...
+* `#8445 <https://github.com/numpy/numpy/pull/8445>`__: BUG: correct norm='ortho' scaling for rfft when n != None
+* `#8446 <https://github.com/numpy/numpy/pull/8446>`__: ENH: gradient support for unevenly spaced data
+* `#8448 <https://github.com/numpy/numpy/pull/8448>`__: TST: remove a duplicate test. Closes gh-8447.
+* `#8452 <https://github.com/numpy/numpy/pull/8452>`__: BUG: assert_almost_equal fails on subclasses that cannot handle...
+* `#8454 <https://github.com/numpy/numpy/pull/8454>`__: MAINT: Fix building extensions with MinGW in WinPython 3.4
+* `#8464 <https://github.com/numpy/numpy/pull/8464>`__: [DOC]Small release doc fix
+* `#8468 <https://github.com/numpy/numpy/pull/8468>`__: BUG: Ensure inf/nan removal in assert_array_compare is matrix-safe.
+* `#8470 <https://github.com/numpy/numpy/pull/8470>`__: DOC: Add example to np.savez_compressed
+* `#8474 <https://github.com/numpy/numpy/pull/8474>`__: MAINT: use env in shebang instead of absolute path to python
+* `#8475 <https://github.com/numpy/numpy/pull/8475>`__: DOC: improve clip docstring
+* `#8478 <https://github.com/numpy/numpy/pull/8478>`__: MAINT: Forward port accumulated changes from the 1.12.0 release.
+* `#8482 <https://github.com/numpy/numpy/pull/8482>`__: TST: switch to ubuntu yakkety for i386 testing
+* `#8483 <https://github.com/numpy/numpy/pull/8483>`__: BUG: fix wrong future nat warning and equiv type logic error
+* `#8486 <https://github.com/numpy/numpy/pull/8486>`__: BUG: Prevent crash for length-0 input to fromrecords
+* `#8488 <https://github.com/numpy/numpy/pull/8488>`__: ENH: Improve the alignment of `recarray.__repr__`
+* `#8489 <https://github.com/numpy/numpy/pull/8489>`__: BUG: fix wrong masked median for some special cases
+* `#8490 <https://github.com/numpy/numpy/pull/8490>`__: DOC: Place np.average in inline code
+* `#8491 <https://github.com/numpy/numpy/pull/8491>`__: TST: work around isfinite inconsistency on i386
+* `#8494 <https://github.com/numpy/numpy/pull/8494>`__: BUG: guard against replacing constants without `'_'` spec
+* `#8496 <https://github.com/numpy/numpy/pull/8496>`__: Update LICENSE.txt to 2017
+* `#8497 <https://github.com/numpy/numpy/pull/8497>`__: BUG: Fix creating a np.matrix from string syntax involving booleans
+* `#8501 <https://github.com/numpy/numpy/pull/8501>`__: Changing spurious Legendre reference to Chebyshev in chebfit...
+* `#8504 <https://github.com/numpy/numpy/pull/8504>`__: ENH: hard-code finfo parameters for known types
+* `#8508 <https://github.com/numpy/numpy/pull/8508>`__: BUG: Fix loss of dimensionality of np.ma.masked in ufunc
+* `#8524 <https://github.com/numpy/numpy/pull/8524>`__: BUG: fix mean for float 16 non-array inputs
+* `#8527 <https://github.com/numpy/numpy/pull/8527>`__: DOC: fix return value for PyArray_Resize
+* `#8539 <https://github.com/numpy/numpy/pull/8539>`__: BUG: core: in dot(), make copies if out has memory overlap with...
+* `#8540 <https://github.com/numpy/numpy/pull/8540>`__: DOC: Update arrays.ndarray.rst
+* `#8541 <https://github.com/numpy/numpy/pull/8541>`__: DOC: Revert 8540 patch 1
+* `#8542 <https://github.com/numpy/numpy/pull/8542>`__: MAINT: typo in histogram docstring
+* `#8551 <https://github.com/numpy/numpy/pull/8551>`__: DOC: Missing backticks
+* `#8555 <https://github.com/numpy/numpy/pull/8555>`__: Fixing docstring error in polyvander2d
+* `#8558 <https://github.com/numpy/numpy/pull/8558>`__: DOC: Improve documentation of None as interval bounds in clip.
+* `#8567 <https://github.com/numpy/numpy/pull/8567>`__: TST: core: use aligned memory for dot() out= arrays
+* `#8568 <https://github.com/numpy/numpy/pull/8568>`__: TST: re-enable PPC longdouble spacing tests
+* `#8569 <https://github.com/numpy/numpy/pull/8569>`__: ENH: Add missing `__tracebackhide__` to testing functions.
+* `#8570 <https://github.com/numpy/numpy/pull/8570>`__: BUG: fix issue #8250 when np.array gets called on an invalid...
+* `#8571 <https://github.com/numpy/numpy/pull/8571>`__: BUG: fix calling python api with error set and minor leaks
+* `#8572 <https://github.com/numpy/numpy/pull/8572>`__: MAINT: remove ma out= workaround
+* `#8575 <https://github.com/numpy/numpy/pull/8575>`__: DOC: fix several typos #8537.
+* `#8584 <https://github.com/numpy/numpy/pull/8584>`__: MAINT: Use the same exception for all bad axis requests
+* `#8586 <https://github.com/numpy/numpy/pull/8586>`__: MAINT: PyPy3 compatibility: sys.getsizeof()
+* `#8590 <https://github.com/numpy/numpy/pull/8590>`__: BUG MaskedArray `__eq__` wrong for masked scalar, multi-d recarray
+* `#8591 <https://github.com/numpy/numpy/pull/8591>`__: BUG: make np.squeeze always return an array, never a scalar
+* `#8592 <https://github.com/numpy/numpy/pull/8592>`__: MAINT: Remove `__setslice__` and `__getslice__`
+* `#8594 <https://github.com/numpy/numpy/pull/8594>`__: BUG: Fix `MaskedArray.__setitem__`
+* `#8596 <https://github.com/numpy/numpy/pull/8596>`__: BUG: match hard-coded finfo to calculated MachAr
+* `#8602 <https://github.com/numpy/numpy/pull/8602>`__: BUG: Make iscomplexobj compatible with custom dtypes again
+* `#8605 <https://github.com/numpy/numpy/pull/8605>`__: DOC: gradient uses 1st order central difference in the interior
+* `#8606 <https://github.com/numpy/numpy/pull/8606>`__: Revert "DOC: gradient uses 1st order central difference in the...
+* `#8610 <https://github.com/numpy/numpy/pull/8610>`__: Revert "BUG: make np.squeeze always return an array, never a...
+* `#8611 <https://github.com/numpy/numpy/pull/8611>`__: DOC: The axis argument of average can be a tuple of ints
+* `#8612 <https://github.com/numpy/numpy/pull/8612>`__: MAINT: Decrease merge conflicts in release notes
+* `#8614 <https://github.com/numpy/numpy/pull/8614>`__: BUG: Don't leak internal exceptions when given an empty array
+* `#8617 <https://github.com/numpy/numpy/pull/8617>`__: BUG: Copy meshgrid after broadcasting
+* `#8618 <https://github.com/numpy/numpy/pull/8618>`__: BUG: Fix undefined behaviour induced by bad `__array_wrap__`
+* `#8619 <https://github.com/numpy/numpy/pull/8619>`__: BUG: blas_info should record include_dirs
+* `#8625 <https://github.com/numpy/numpy/pull/8625>`__: DOC: Create 1.12.1 release notes.
+* `#8629 <https://github.com/numpy/numpy/pull/8629>`__: ENH: Improve the efficiency of indices
+* `#8631 <https://github.com/numpy/numpy/pull/8631>`__: Fix typo in fill_diagonal docstring.
+* `#8633 <https://github.com/numpy/numpy/pull/8633>`__: DOC: Mention boolean arrays in the ix_ documentation.
+* `#8636 <https://github.com/numpy/numpy/pull/8636>`__: MAINT: ensure benchmark suite is importable on old numpy versions
+* `#8638 <https://github.com/numpy/numpy/pull/8638>`__: BUG: fix wrong odd determination in packbits
+* `#8643 <https://github.com/numpy/numpy/pull/8643>`__: BUG: Fix double-wrapping of object scalars
+* `#8645 <https://github.com/numpy/numpy/pull/8645>`__: MAINT: Use getmask where possible
+* `#8646 <https://github.com/numpy/numpy/pull/8646>`__: ENH: Allow for an in-place nan_to_num conversion
+* `#8647 <https://github.com/numpy/numpy/pull/8647>`__: Fix various bugs in np.ma.where
+* `#8649 <https://github.com/numpy/numpy/pull/8649>`__: Upgrade to Lapack lite 3.2.2
+* `#8650 <https://github.com/numpy/numpy/pull/8650>`__: DOC: Fix obsolete data in readme
+* `#8651 <https://github.com/numpy/numpy/pull/8651>`__: MAINT: Split lapack_lite more logically across files
+* `#8652 <https://github.com/numpy/numpy/pull/8652>`__: TST: Improve testing of read-only mmaps
+* `#8655 <https://github.com/numpy/numpy/pull/8655>`__: MAINT: Squelch parenthesis warnings from GCC
+* `#8656 <https://github.com/numpy/numpy/pull/8656>`__: BUG: allow for precision > 17 in longdouble repr test
+* `#8658 <https://github.com/numpy/numpy/pull/8658>`__: BUG: fix denormal linspace test for longdouble
+* `#8659 <https://github.com/numpy/numpy/pull/8659>`__: BUG: PPC64el machines are POWER for Fortran
+* `#8663 <https://github.com/numpy/numpy/pull/8663>`__: ENH: Fix alignment of repr for array subclasses
+* `#8665 <https://github.com/numpy/numpy/pull/8665>`__: BUG: Look up methods on MaskedArray in _frommethod
+* `#8667 <https://github.com/numpy/numpy/pull/8667>`__: BUG: Preserve identity of dtypes in make_mask_descr
+* `#8668 <https://github.com/numpy/numpy/pull/8668>`__: DOC: Add more examples for `np.c_`
+* `#8669 <https://github.com/numpy/numpy/pull/8669>`__: MAINT: Warn users when calling np.ma.MaskedArray.partition function.
+* `#8672 <https://github.com/numpy/numpy/pull/8672>`__: BUG: Use int for axes, not intp
+* `#8674 <https://github.com/numpy/numpy/pull/8674>`__: BUG: Remove extra digit in binary_repr at limit
+* `#8675 <https://github.com/numpy/numpy/pull/8675>`__: BUG: Fix problems detecting runtime for MSYS2 compiler on Windows
+* `#8677 <https://github.com/numpy/numpy/pull/8677>`__: MAINT: We can now rely on itertools.izip_longest existing
+* `#8678 <https://github.com/numpy/numpy/pull/8678>`__: BUG: Fix argsort vs sort in Masked arrays
+* `#8680 <https://github.com/numpy/numpy/pull/8680>`__: DOC: Removed broken link
+* `#8682 <https://github.com/numpy/numpy/pull/8682>`__: ENH: allow argument to matrix_rank to be stacked
+* `#8685 <https://github.com/numpy/numpy/pull/8685>`__: ENH: add dtype.ndim
+* `#8688 <https://github.com/numpy/numpy/pull/8688>`__: DOC: Added note to np.diff
+* `#8692 <https://github.com/numpy/numpy/pull/8692>`__: MAINT: Fix deprecated escape sequences
+* `#8694 <https://github.com/numpy/numpy/pull/8694>`__: BUG: missing comma disabled some header checks
+* `#8695 <https://github.com/numpy/numpy/pull/8695>`__: MAINT: Remove numpy-macosx-installer and win32build directories.
+* `#8698 <https://github.com/numpy/numpy/pull/8698>`__: DOC: fix incorrect mask value when value was changed
+* `#8702 <https://github.com/numpy/numpy/pull/8702>`__: DOC: Fixed small mistakes in numpy.copy documentation.
+* `#8704 <https://github.com/numpy/numpy/pull/8704>`__: BUG: Fix deepcopy regression for empty arrays.
+* `#8705 <https://github.com/numpy/numpy/pull/8705>`__: BUG: fix ma.median for empty ndarrays
+* `#8709 <https://github.com/numpy/numpy/pull/8709>`__: DOC: Fixed minor typos in temp_elide.c
+* `#8713 <https://github.com/numpy/numpy/pull/8713>`__: BUG: Don't signal FP exceptions in np.absolute
+* `#8716 <https://github.com/numpy/numpy/pull/8716>`__: MAINT: Mark some tests with slow decorator
+* `#8718 <https://github.com/numpy/numpy/pull/8718>`__: BUG: Fix assert statements in random.choice tests
+* `#8729 <https://github.com/numpy/numpy/pull/8729>`__: DOC: Add float_power to routines.math documentation autosummary
+* `#8731 <https://github.com/numpy/numpy/pull/8731>`__: DOC: added linalg.multi_dot to doc
+* `#8737 <https://github.com/numpy/numpy/pull/8737>`__: DOC: Mention that expand_dims and squeeze are inverses
+* `#8744 <https://github.com/numpy/numpy/pull/8744>`__: MAINT: Remove files and constants that were only needed for Bento.
+* `#8745 <https://github.com/numpy/numpy/pull/8745>`__: TST: Remove unused env from tox
+* `#8746 <https://github.com/numpy/numpy/pull/8746>`__: DOC: Update 1.12.1 release notes.
+* `#8749 <https://github.com/numpy/numpy/pull/8749>`__: DOC: Add 1.12.1 release notes to documentation.
+* `#8750 <https://github.com/numpy/numpy/pull/8750>`__: BUG: Fix np.average for object arrays
+* `#8754 <https://github.com/numpy/numpy/pull/8754>`__: ENH: Allows building npy_math with static inlining
+* `#8756 <https://github.com/numpy/numpy/pull/8756>`__: BUG: Correct lapack ld* args
+* `#8759 <https://github.com/numpy/numpy/pull/8759>`__: BUG: Add HOME to the git environment.
+* `#8761 <https://github.com/numpy/numpy/pull/8761>`__: MAINT: better warning message when running build_src from sdist
+* `#8762 <https://github.com/numpy/numpy/pull/8762>`__: BUG: Prevent crash in `poly1d.__eq__`
+* `#8781 <https://github.com/numpy/numpy/pull/8781>`__: BUG: Revert gh-8570.
+* `#8788 <https://github.com/numpy/numpy/pull/8788>`__: BUG: Fix scipy incompatibility with cleanup to poly1d
+* `#8792 <https://github.com/numpy/numpy/pull/8792>`__: DOC: Fix typos
+* `#8793 <https://github.com/numpy/numpy/pull/8793>`__: DOC: fix minor docstring typos
+* `#8795 <https://github.com/numpy/numpy/pull/8795>`__: ENH: Add the 'heaviside' ufunc.
+* `#8796 <https://github.com/numpy/numpy/pull/8796>`__: BUG: fix regex of determineexprtype_re_3 in numpy/f2py/crackfortran.py
+* `#8799 <https://github.com/numpy/numpy/pull/8799>`__: DOC: Include np. prefix in meshgrid examples
+* `#8801 <https://github.com/numpy/numpy/pull/8801>`__: BUG: fix the error msg of empty hstack input
+* `#8806 <https://github.com/numpy/numpy/pull/8806>`__: BUG: Raise TypeError on ternary power
+* `#8807 <https://github.com/numpy/numpy/pull/8807>`__: TST: Prove that poly1d coeffs are immutable
+* `#8813 <https://github.com/numpy/numpy/pull/8813>`__: MAINT: tidy up some of npyio
+* `#8816 <https://github.com/numpy/numpy/pull/8816>`__: BUG: `np.lib.index_tricks.r_` mutates its own state
+* `#8820 <https://github.com/numpy/numpy/pull/8820>`__: DOC: Add 'heaviside' to the ufunc documentation.
+* `#8822 <https://github.com/numpy/numpy/pull/8822>`__: DOC: Use gray and hsv colormaps in examples
+* `#8824 <https://github.com/numpy/numpy/pull/8824>`__: MAINT: a couple distutils cleanups
+* `#8825 <https://github.com/numpy/numpy/pull/8825>`__: STY: Fix bad style in umath_linalg
+* `#8828 <https://github.com/numpy/numpy/pull/8828>`__: DOC: Add missing release note for #8584
+* `#8830 <https://github.com/numpy/numpy/pull/8830>`__: DOC: added a whitespace so that sphinx directive displays correctly
+* `#8832 <https://github.com/numpy/numpy/pull/8832>`__: MAINT: Remove python <2.7,<3.3 string/unicode workarounds
+* `#8834 <https://github.com/numpy/numpy/pull/8834>`__: BENCH: use initialized memory for count_nonzero benchmark
+* `#8835 <https://github.com/numpy/numpy/pull/8835>`__: DOC: Include nextafter and spacing function in documentation.
+* `#8836 <https://github.com/numpy/numpy/pull/8836>`__: DOC: Several documentation fixes (broken links, incorrect sphinx...
+* `#8837 <https://github.com/numpy/numpy/pull/8837>`__: DOC: Spell out note for `hstack`
+* `#8840 <https://github.com/numpy/numpy/pull/8840>`__: DOC: update docs and comments for move of mailing list to python.org
+* `#8843 <https://github.com/numpy/numpy/pull/8843>`__: MAINT: Use AxisError in more places
+* `#8844 <https://github.com/numpy/numpy/pull/8844>`__: DOC: Spell out note for `dstack`
+* `#8845 <https://github.com/numpy/numpy/pull/8845>`__: DOC: Add release note about np.real and np.conj
+* `#8846 <https://github.com/numpy/numpy/pull/8846>`__: BUG: Buttress handling of extreme values in randint
+* `#8847 <https://github.com/numpy/numpy/pull/8847>`__: DOC: Preliminary edit of 1.13.0 release notes.
+* `#8850 <https://github.com/numpy/numpy/pull/8850>`__: DOC: Updated doc of nonzero()
+* `#8852 <https://github.com/numpy/numpy/pull/8852>`__: MAINT: restore auto-vectorization of inplace operations
+* `#8854 <https://github.com/numpy/numpy/pull/8854>`__: MAINT: Remove manual expansion of template loop for some ufuncs
+* `#8857 <https://github.com/numpy/numpy/pull/8857>`__: DOC: remove empty jargon reference in glossary
+* `#8859 <https://github.com/numpy/numpy/pull/8859>`__: DOC: Fixed README formatting
+* `#8861 <https://github.com/numpy/numpy/pull/8861>`__: MAINT: Include the function name in all argument error messages
+* `#8862 <https://github.com/numpy/numpy/pull/8862>`__: BUG: do not memcpy ptr to freed object
+* `#8870 <https://github.com/numpy/numpy/pull/8870>`__: TST: Respect compiler customizations
+* `#8871 <https://github.com/numpy/numpy/pull/8871>`__: DOC: Replace line that was errantly removed in #8850
+* `#8873 <https://github.com/numpy/numpy/pull/8873>`__: BUG: Make runtests.py --shell behave better on windows
+* `#8874 <https://github.com/numpy/numpy/pull/8874>`__: TST: Use explicit NaT in test_structure_format
+* `#8876 <https://github.com/numpy/numpy/pull/8876>`__: MAINT: Minor ufunc cleanup
+* `#8883 <https://github.com/numpy/numpy/pull/8883>`__: BUG: Ensure Errors are correctly checked when PyFloat_AsDouble...
+* `#8884 <https://github.com/numpy/numpy/pull/8884>`__: BUG: Check for errors when PyInt_AsLong is called in np.random
+* `#8885 <https://github.com/numpy/numpy/pull/8885>`__: ENH: add support for python3.6 memory tracing
+* `#8886 <https://github.com/numpy/numpy/pull/8886>`__: ENH: add np.block to improve upon np.bmat
+* `#8888 <https://github.com/numpy/numpy/pull/8888>`__: BUG: Don't modify types after PyType_Ready
+* `#8890 <https://github.com/numpy/numpy/pull/8890>`__: DOC: proposed fixes for issues #7622 and #7914
+* `#8894 <https://github.com/numpy/numpy/pull/8894>`__: MAINT: Use PyArray_FROM_* macros
+* `#8895 <https://github.com/numpy/numpy/pull/8895>`__: BUG: return values of exec_command were swapped
+* `#8896 <https://github.com/numpy/numpy/pull/8896>`__: ENH: do integer**2. inplace
+* `#8897 <https://github.com/numpy/numpy/pull/8897>`__: ENH: don't rebuild unchanged files
+* `#8898 <https://github.com/numpy/numpy/pull/8898>`__: BUG: Move ctypes ImportError catching to appropriate place
+* `#8900 <https://github.com/numpy/numpy/pull/8900>`__: Fix typos.
+* `#8903 <https://github.com/numpy/numpy/pull/8903>`__: BUG: Fix setitem on UNICODE, STRING, and LONGDOUBLE
+* `#8905 <https://github.com/numpy/numpy/pull/8905>`__: BUG: Correctly distinguish between 0d arrays and scalars in `MaskedArray.__getitem__`
+* `#8907 <https://github.com/numpy/numpy/pull/8907>`__: COMPAT: notify garbage collector when memory is allocated
+* `#8911 <https://github.com/numpy/numpy/pull/8911>`__: BUG: check_api_dict does not correctly handle tuple values
+* `#8914 <https://github.com/numpy/numpy/pull/8914>`__: DOC: Replace reference to np.swapaxis with np.swapaxes
+* `#8918 <https://github.com/numpy/numpy/pull/8918>`__: DEP: deprecate calling ma.argsort without an axis
+* `#8919 <https://github.com/numpy/numpy/pull/8919>`__: MAINT, TST: Remove duplicated code for testing the two types...
+* `#8921 <https://github.com/numpy/numpy/pull/8921>`__: MAINT: avoid memcpy when i == j
+* `#8925 <https://github.com/numpy/numpy/pull/8925>`__: DOC: Fix incorrect call to set_printoptions
+* `#8928 <https://github.com/numpy/numpy/pull/8928>`__: BUG: runtests --bench fails on windows
+* `#8929 <https://github.com/numpy/numpy/pull/8929>`__: BENCH: Masked array benchmarks
+* `#8939 <https://github.com/numpy/numpy/pull/8939>`__: DEP: Deprecate `np.ma.MaskedArray.mini`
+* `#8942 <https://github.com/numpy/numpy/pull/8942>`__: DOC: stop refering to 'S' dtype as string
+* `#8948 <https://github.com/numpy/numpy/pull/8948>`__: DEP: Deprecate NPY_CHAR
+* `#8949 <https://github.com/numpy/numpy/pull/8949>`__: REL: add `python_requires` to setup.py
+* `#8951 <https://github.com/numpy/numpy/pull/8951>`__: ENH: Add ufunc.identity for hypot and logical_xor
+* `#8953 <https://github.com/numpy/numpy/pull/8953>`__: DEP: Add back `ndarray.__[sg]etslice__`, but deprecate it
+* `#8959 <https://github.com/numpy/numpy/pull/8959>`__: DEP: Remove alter/restore dot methods
+* `#8961 <https://github.com/numpy/numpy/pull/8961>`__: MAINT: Update Intel compiler options.
+* `#8962 <https://github.com/numpy/numpy/pull/8962>`__: DOC: Wrong return type of np.random.choice and wrong variable...
+* `#8963 <https://github.com/numpy/numpy/pull/8963>`__: BUG: Prevent crash on repr of recursive array
+* `#8964 <https://github.com/numpy/numpy/pull/8964>`__: BUG: don't create array with invalid memory in where
+* `#8967 <https://github.com/numpy/numpy/pull/8967>`__: ENH: add np.positive ufunc
+* `#8971 <https://github.com/numpy/numpy/pull/8971>`__: BUG: do not change size 0 description when viewing data
+* `#8976 <https://github.com/numpy/numpy/pull/8976>`__: BUG: Prevent VOID_copyswapn ignoring strides
+* `#8978 <https://github.com/numpy/numpy/pull/8978>`__: TST: enable shadowed test
+* `#8980 <https://github.com/numpy/numpy/pull/8980>`__: DOC: Correct shape of edges in np.histogram2d
+* `#8988 <https://github.com/numpy/numpy/pull/8988>`__: DOC: Explain the behavior of diff on unsigned types
+* `#8989 <https://github.com/numpy/numpy/pull/8989>`__: ENH: Print object arrays containing lists unambiguously
+* `#8996 <https://github.com/numpy/numpy/pull/8996>`__: BUG/DEP: Make ufunclike functions more ufunc-like
+* `#8997 <https://github.com/numpy/numpy/pull/8997>`__: TST: fix io test that doesn't close file
+* `#8998 <https://github.com/numpy/numpy/pull/8998>`__: DOC: Use ` instead of * to refer to a function parameter.
+* `#8999 <https://github.com/numpy/numpy/pull/8999>`__: TST: Enable NPY_RELAXED_STRIDES_DEBUG environment variable.
+* `#9002 <https://github.com/numpy/numpy/pull/9002>`__: MAINT: Document ufunc(where=...) as defaulting to True
+* `#9012 <https://github.com/numpy/numpy/pull/9012>`__: MAINT: Set the `__name__` of generated methods
+* `#9013 <https://github.com/numpy/numpy/pull/9013>`__: BUG: Fix np.lib.nanfunctions on object arrays
+* `#9014 <https://github.com/numpy/numpy/pull/9014>`__: BUG: `__array_ufunc__= None` -> TypeError
+* `#9015 <https://github.com/numpy/numpy/pull/9015>`__: ENH: Use `__array_ufunc__ = None` in polynomial convenience classes.
+* `#9021 <https://github.com/numpy/numpy/pull/9021>`__: BUG: Make ndarray inplace operators forward calls when needed.
+* `#9024 <https://github.com/numpy/numpy/pull/9024>`__: DOC: Correct default stop index value for negative stepping.
+* `#9026 <https://github.com/numpy/numpy/pull/9026>`__: ENH: Show full PEP 457 argument lists for ufuncs
+* `#9027 <https://github.com/numpy/numpy/pull/9027>`__: DOC: update binary-op / ufunc interactions and recommendations...
+* `#9038 <https://github.com/numpy/numpy/pull/9038>`__: BUG: check compiler flags to determine the need for a rebuild
+* `#9039 <https://github.com/numpy/numpy/pull/9039>`__: DOC: actually produce docs for as_strided
+* `#9050 <https://github.com/numpy/numpy/pull/9050>`__: BUG: distutils, add compatiblity python parallelization
+* `#9054 <https://github.com/numpy/numpy/pull/9054>`__: BUG: Various fixes to _dtype_from_pep3118
+* `#9058 <https://github.com/numpy/numpy/pull/9058>`__: MAINT: Update FutureWarning message.
+* `#9060 <https://github.com/numpy/numpy/pull/9060>`__: DEP: deprecate ndarray.conjugate's no-op fall through for non-numeric...
+* `#9061 <https://github.com/numpy/numpy/pull/9061>`__: BUG: ndarray.conjugate broken for custom dtypes (unlike np.conjugate)
+* `#9062 <https://github.com/numpy/numpy/pull/9062>`__: STY: two blank lines between classes per PEP8
+* `#9063 <https://github.com/numpy/numpy/pull/9063>`__: ENH: add np.divmod ufunc
+* `#9070 <https://github.com/numpy/numpy/pull/9070>`__: BUG: Preserve field order in join_by, avoids FutureWarning
+* `#9072 <https://github.com/numpy/numpy/pull/9072>`__: BUG: if importing multiarray fails, don't discard the error message
+* `#9074 <https://github.com/numpy/numpy/pull/9074>`__: MAINT: Python 3.6 invalid escape sequence deprecation fixes
+* `#9075 <https://github.com/numpy/numpy/pull/9075>`__: ENH: Spelling fixes
+* `#9077 <https://github.com/numpy/numpy/pull/9077>`__: BUG: Prevent stackoverflow on self-containing arrays
+* `#9080 <https://github.com/numpy/numpy/pull/9080>`__: MAINT, DOC: Update 1.13.0 release notes and .mailmap
+* `#9087 <https://github.com/numpy/numpy/pull/9087>`__: BUG: `__array_ufunc__` should always be looked up on the type,...
+* `#9091 <https://github.com/numpy/numpy/pull/9091>`__: MAINT: refine error message for `__array_ufunc__` not implemented
+* `#9093 <https://github.com/numpy/numpy/pull/9093>`__: BUG remove memory leak in array ufunc override.
+* `#9097 <https://github.com/numpy/numpy/pull/9097>`__: TST: fix test_basic failure on Windows
+* `#9111 <https://github.com/numpy/numpy/pull/9111>`__: BUG: Array ufunc reduce out tuple
+* `#9123 <https://github.com/numpy/numpy/pull/9123>`__: DOC: update 1.13 release note for MaskedArray, masked constants...
+* `#9124 <https://github.com/numpy/numpy/pull/9124>`__: BUG: Do not elide complex abs() for 1.13
+* `#9129 <https://github.com/numpy/numpy/pull/9129>`__: BUG: `ndarray.__pow__` does not check result of fast_scalar_power
+* `#9133 <https://github.com/numpy/numpy/pull/9133>`__: DEP: Deprecate incorrect behavior of expand_dims.
+* `#9135 <https://github.com/numpy/numpy/pull/9135>`__: BUG: delay calls of array repr in getlimits
+* `#9136 <https://github.com/numpy/numpy/pull/9136>`__: BUG: Compilation crashes in MSVC when LIB or INCLUDE is not set
+* `#9173 <https://github.com/numpy/numpy/pull/9173>`__: BUG: have as_strided() keep custom dtypes
+* `#9175 <https://github.com/numpy/numpy/pull/9175>`__: BUG: ensure structured `ndarray.__eq__,__ne__` defer when appropriate.
+* `#9196 <https://github.com/numpy/numpy/pull/9196>`__: BUG: pull request 9087 modifies a tuple after use
+* `#9199 <https://github.com/numpy/numpy/pull/9199>`__: DOC: Update bincount docs to reflect gh-8348 (backport)
diff --git a/doc/changelog/1.13.1-changelog.rst b/doc/changelog/1.13.1-changelog.rst
new file mode 100644
index 000000000..0357c26ef
--- /dev/null
+++ b/doc/changelog/1.13.1-changelog.rst
@@ -0,0 +1,44 @@
+
+Contributors
+============
+
+A total of 12 people contributed to this release. People with a "+" by their
+names contributed a patch for the first time.
+
+* Andras Deak +
+* Bob Eldering +
+* Charles Harris
+* Daniel Hrisca +
+* Eric Wieser
+* Joshua Leahy +
+* Julian Taylor
+* Michael Seifert
+* Pauli Virtanen
+* Ralf Gommers
+* Roland Kaufmann
+* Warren Weckesser
+
+Pull requests merged
+====================
+
+A total of 19 pull requests were merged for this release.
+
+* `#9240 <https://github.com/numpy/numpy/pull/9240>`__: DOC: BLD: fix lots of Sphinx warnings/errors.
+* `#9255 <https://github.com/numpy/numpy/pull/9255>`__: Revert "DEP: Raise TypeError for subtract(bool_, bool_)."
+* `#9261 <https://github.com/numpy/numpy/pull/9261>`__: BUG: don't elide into readonly and updateifcopy temporaries for...
+* `#9262 <https://github.com/numpy/numpy/pull/9262>`__: BUG: fix missing keyword rename for common block in numpy.f2py
+* `#9263 <https://github.com/numpy/numpy/pull/9263>`__: BUG: handle resize of 0d array
+* `#9267 <https://github.com/numpy/numpy/pull/9267>`__: DOC: update f2py front page and some doc build metadata.
+* `#9299 <https://github.com/numpy/numpy/pull/9299>`__: BUG: Fix Intel compilation on Unix.
+* `#9317 <https://github.com/numpy/numpy/pull/9317>`__: BUG: fix wrong ndim used in empty where check
+* `#9319 <https://github.com/numpy/numpy/pull/9319>`__: BUG: Make extensions compilable with MinGW on Py2.7
+* `#9339 <https://github.com/numpy/numpy/pull/9339>`__: BUG: Prevent crash if ufunc doc string is null
+* `#9340 <https://github.com/numpy/numpy/pull/9340>`__: BUG: umath: un-break ufunc where= when no out= is given
+* `#9371 <https://github.com/numpy/numpy/pull/9371>`__: DOC: Add isnat/positive ufunc to documentation
+* `#9372 <https://github.com/numpy/numpy/pull/9372>`__: BUG: Fix error in fromstring function from numpy.core.records...
+* `#9373 <https://github.com/numpy/numpy/pull/9373>`__: BUG: ')' is printed at the end pointer of the buffer in numpy.f2py.
+* `#9374 <https://github.com/numpy/numpy/pull/9374>`__: DOC: Create NumPy 1.13.1 release notes.
+* `#9376 <https://github.com/numpy/numpy/pull/9376>`__: BUG: Prevent hang traversing ufunc userloop linked list
+* `#9377 <https://github.com/numpy/numpy/pull/9377>`__: DOC: Use x1 and x2 in the heaviside docstring.
+* `#9378 <https://github.com/numpy/numpy/pull/9378>`__: DOC: Add $PARAMS to the isnat docstring
+* `#9379 <https://github.com/numpy/numpy/pull/9379>`__: DOC: Update the 1.13.1 release notes
diff --git a/doc/f2py/BUGS.txt b/doc/f2py/BUGS.txt
deleted file mode 100644
index ee08863bb..000000000
--- a/doc/f2py/BUGS.txt
+++ /dev/null
@@ -1,55 +0,0 @@
-December 1, 2002:
-
-C FILE: STRING.F
- SUBROUTINE FOO
- END
-C END OF FILE STRING.F
-does not build with
- f2py -c -m string string.f
-Cause: string is mapped to string_bn
-**************************************************************************
-August 16, 2001:
-1) re in Python 2.x is **three** times slower than the re in Python 1.5.
-**************************************************************************
-HP-UX B.10.20 A 9000/780:
-Fortran function returning character*(*) (id=7) ... failed(core dump)
-Fortran function returning logical*8 (id=21) ... expected .true. but got 0
-Callback function returning real (id=45) ... expected 34.56 but got 14087495680.0
-Callback function returning real*4 (id=46) ... expected 34.56 but got 14087495680.0
-Callback function returning logical*8 (id=55) ... expected .true. but got 0
- C compiler: gcc ('gcc 2.x.x' 2.95.2) (from .f2py_get_compiler_CC)
- Fortran compiler: g77 ('g77 2.x.x' 2.95.2) (from .f2py_get_compiler_FC)
- Linker: ld ('HP-UX ld' 92453-07 linker linker ld B.10.24 961204) (from .f2py_get_compiler_LD)
-**************************************************************************
-Linux 2.2.13-0.9 #1 Thu Dec 9 17:03:57 EST 1999 alpha unknown:
-Fortran function returning character*(*) (id=7) ... expected 'abcdefgh' but got 'abcdefgh \201' (o?k)
-Callback function returning complex (id=48) ... failed(core dump)
- Trying with -DF2PY_CB_RETURNCOMPLEX ... failed(core dump)
-Callback function returning complex*8 (id=49) ... failed(core dump)
- Trying with -DF2PY_CB_RETURNCOMPLEX ... failed(core dump)
-Callback function returning complex*16 (id=50) ... failed(core dump)
- Trying with -DF2PY_CB_RETURNCOMPLEX ... failed(core dump)
- C compiler: cc ('Compaq C' V6.2-002) (from .f2py_get_compiler_CC)
- Fortran compiler: fort ('Compaq Fortran' V1.0-920) (from .f2py_get_compiler_FC)
- Linker: fort ('Compaq Fortran' V1.0-920) (from .f2py_get_compiler_LD)
-**************************************************************************
-Linux 2.2.14-15mdk #1 Tue Jan 4 22:24:20 CET 2000 i686 unknown:
-Callback function returning logical*8 (id=55) ... failed
- C compiler: cc ('gcc 2.x.x' 2.95.2)
- Fortran compiler: f90 ('Absoft F90' 3.0)
- Linker: ld ('GNU ld' 2.9.5)
-**************************************************************************
-IRIX64 6.5 04151556 IP30:
-Testing integer, intent(inout) ...failed # not f2py problem
-Testing integer, intent(inout,out) ...failed
-Testing integer*1, intent(inout) ...failed
-Testing integer*1, intent(inout,out) ...failed
-Testing integer*8, intent(inout) ...failed
-Testing integer*8, intent(inout,out) ...failed
-cc-1140 cc: WARNING File = genmodule.c, Line = 114
- A value of type "void *" cannot be used to initialize an entity of type
- "void (*)()".
- {"foo",-1,{-1},0,(char *)F_FUNC(foo,FOO),(void *)gen_foo,doc_gen_foo},
- C compiler: cc ('MIPSpro 7 Compilers' 7.30)
- Fortran compiler: f77 ('MIPSpro 7 Compilers' 7.30)
- Linker: ld ('Linker for MIPSpro 7 Compilers' 7.30.)
diff --git a/doc/f2py/FAQ.txt b/doc/f2py/FAQ.txt
deleted file mode 100644
index 979a20179..000000000
--- a/doc/f2py/FAQ.txt
+++ /dev/null
@@ -1,603 +0,0 @@
-
-======================================================================
- F2PY Frequently Asked Questions
-======================================================================
-
-.. contents::
-
-General information
-===================
-
-Q: How to get started?
-----------------------
-
-First, install__ F2PY. Then check that F2PY installation works
-properly (see below__). Try out a `simple example`__.
-
-Read `F2PY Users Guide and Reference Manual`__. It contains lots
-of complete examples.
-
-If you have any questions/problems when using F2PY, don't hesitate to
-turn to `F2PY users mailing list`__ or directly to me.
-
-__ index.html#installation
-__ #testing
-__ index.html#usage
-__ usersguide/index.html
-__ index.html#mailing-list
-
-Q: When to report bugs?
------------------------
-
-* If F2PY scanning fails on Fortran sources that otherwise compile
- fine.
-
-* After checking that you have the latest version of F2PY from its
- CVS. It is possible that a bug has been fixed already. See also the
- log entries in the file `HISTORY.txt`_ (`HISTORY.txt in CVS`_).
-
-* After checking that your Python and Numerical Python installations
- work correctly.
-
-* After checking that your C and Fortran compilers work correctly.
-
-Q: How to report bugs?
-----------------------
-
-F2PY is part of NumPy. Report bugs on the NumPy issue tracker at
-__ https://github.com/numpy/numpy/issues
-Please, include information about your platform (operating system,
-version) and compilers/linkers, e.g. the output (both stdout/stderr) of
-::
-
- python -c 'import numpy.f2py.diagnose;numpy.f2py.diagnose.run()'
-
-Feel free to add any other relevant information. However, avoid
-sending the output of F2PY generated ``.pyf`` files (unless they are
-manually modified) or any binary files like shared libraries or object
-codes.
-
-N.B. You may notice that other F2PY issues are tagged 'f2py'. Only the
-admins can add tags to issues, don't waste time trying to work out how
-to tag it yourself.
-
-While reporting bugs, you may find the following notes useful:
-
-* `How To Ask Questions The Smart Way`__ by E. S. Raymond and R. Moen.
-
-* `How to Report Bugs Effectively`__ by S. Tatham.
-
-__ http://www.catb.org/~esr/faqs/smart-questions.html
-__ http://www.chiark.greenend.org.uk/~sgtatham/bugs.html
-
-Installation
-============
-
-Q: How to use F2PY with different Python versions?
---------------------------------------------------
-
-Run the installation command using the corresponding Python
-executable. For example,
-::
-
- python2.1 setup.py install
-
-installs the ``f2py`` script as ``f2py2.1``.
-
-See `Distutils User Documentation`__ for more information how to
-install Python modules to non-standard locations.
-
-__ http://www.python.org/sigs/distutils-sig/doc/inst/inst.html
-
-
-Q: Why F2PY is not working after upgrading?
--------------------------------------------
-
-If upgrading from F2PY version 2.3.321 or earlier then remove all f2py
-specific files from ``/path/to/python/bin`` directory before
-running installation command.
-
-Q: How to get/upgrade numpy and F2PY from git?
----------------------------------------------------------------
-
-The numpy code repository is hosted on GitHub at
-__ http://github.com/numpy/numpy
-
-You can check it out with
-::
- git clone git://github.com/numpy/numpy.git numpy
-
-Installation information is at
-__ http://www.scipy.org/scipylib/download.html
-
-Information for developers is at
-__ http://www.scipy.org/scipylib/dev-zone.html
-
-
-Testing
-=======
-
-Q: How to test if F2PY is installed correctly?
-----------------------------------------------
-
-Run
-::
-
- f2py
-
-without arguments. If F2PY is installed correctly then it should print
-the usage information for f2py.
-
-Q: How to test if F2PY is working correctly?
---------------------------------------------
-
-For a quick test, try out an example problem from Usage__
-section in `README.txt`_.
-
-__ index.html#usage
-
-For running F2PY unit tests, see `TESTING.txt`_.
-
-
-Compiler/Platform-specific issues
-=================================
-
-Q: What are supported platforms and compilers?
-----------------------------------------------
-
-F2PY is developed on Linux system with a GCC compiler (versions
-2.95.x, 3.x). Fortran 90 related hooks are tested against Intel
-Fortran Compiler. F2PY should work under any platform where Python and
-Numeric are installed and has supported Fortran compiler installed.
-
-To see a list of supported compilers, execute::
-
- f2py -c --help-fcompiler
-
-Example output::
-
- List of available Fortran compilers:
- --fcompiler=gnu GNU Fortran Compiler (3.3.4)
- --fcompiler=intel Intel Fortran Compiler for 32-bit apps (8.0)
- List of unavailable Fortran compilers:
- --fcompiler=absoft Absoft Corp Fortran Compiler
- --fcompiler=compaq Compaq Fortran Compiler
- --fcompiler=compaqv DIGITAL|Compaq Visual Fortran Compiler
- --fcompiler=hpux HP Fortran 90 Compiler
- --fcompiler=ibm IBM XL Fortran Compiler
- --fcompiler=intele Intel Fortran Compiler for Itanium apps
- --fcompiler=intelev Intel Visual Fortran Compiler for Itanium apps
- --fcompiler=intelv Intel Visual Fortran Compiler for 32-bit apps
- --fcompiler=lahey Lahey/Fujitsu Fortran 95 Compiler
- --fcompiler=mips MIPSpro Fortran Compiler
- --fcompiler=nag NAGWare Fortran 95 Compiler
- --fcompiler=pg Portland Group Fortran Compiler
- --fcompiler=sun Sun|Forte Fortran 95 Compiler
- --fcompiler=vast Pacific-Sierra Research Fortran 90 Compiler
- List of unimplemented Fortran compilers:
- --fcompiler=f Fortran Company/NAG F Compiler
- For compiler details, run 'config_fc --verbose' setup command.
-
-
-Q: How to use the F compiler in F2PY?
--------------------------------------
-
-Read `f2py2e/doc/using_F_compiler.txt`__. It describes why the F
-compiler cannot be used in a normal way (i.e. using ``-c`` switch) to
-build F2PY generated modules. It also gives a workaround to this
-problem.
-
-__ http://cens.ioc.ee/cgi-bin/viewcvs.cgi/python/f2py2e/doc/using_F_compiler.txt?rev=HEAD&content-type=text/vnd.viewcvs-markup
-
-Q: How to use F2PY under Windows?
----------------------------------
-
-F2PY can be used both within Cygwin__ and MinGW__ environments under
-Windows, F2PY can be used also in Windows native terminal.
-See the section `Setting up environment`__ for Cygwin and MinGW.
-
-__ http://cygwin.com/
-__ http://www.mingw.org/
-__ http://cens.ioc.ee/~pearu/numpy/BUILD_WIN32.html#setting-up-environment
-
-Install numpy_distutils and F2PY. Win32 installers of these packages
-are provided in `F2PY Download`__ section.
-
-__ http://cens.ioc.ee/projects/f2py2e/#download
-
-Use ``--compiler=`` and ``--fcompiler`` F2PY command line switches to
-to specify which C and Fortran compilers F2PY should use, respectively.
-
-Under MinGW environment, ``mingw32`` is default for a C compiler.
-
-Supported and Unsupported Features
-==================================
-
-Q: Does F2PY support ``ENTRY`` statements?
-------------------------------------------
-
-Yes, starting at F2PY version higher than 2.39.235_1706.
-
-Q: Does F2PY support derived types in F90 code?
------------------------------------------------
-
-Not yet. However I do have plans to implement support for F90 TYPE
-constructs in future. But note that the task in non-trivial and may
-require the next edition of F2PY for which I don't have resources to
-work with at the moment.
-
-Jeffrey Hagelberg from LLNL has made progress on adding
-support for derived types to f2py. He writes:
-
- At this point, I have a version of f2py that supports derived types
- for most simple cases. I have multidimensional arrays of derived
- types and allocatable arrays of derived types working. I'm just now
- starting to work on getting nested derived types to work. I also
- haven't tried putting complex number in derived types yet.
-
-Hopefully he can contribute his changes to f2py soon.
-
-Q: Does F2PY support pointer data in F90 code?
------------------------------------------------
-
-No. I have never needed it and I haven't studied if there are any
-obstacles to add pointer data support to F2PY.
-
-Q: What if Fortran 90 code uses ``<type spec>(kind=KIND(..))``?
----------------------------------------------------------------
-
-Currently, F2PY can handle only ``<type spec>(kind=<kindselector>)``
-declarations where ``<kindselector>`` is a numeric integer (e.g. 1, 2,
-4,...) but not a function call ``KIND(..)`` or any other
-expression. F2PY needs to know what would be the corresponding C type
-and a general solution for that would be too complicated to implement.
-
-However, F2PY provides a hook to overcome this difficulty, namely,
-users can define their own <Fortran type> to <C type> maps. For
-example, if Fortran 90 code contains::
-
- REAL(kind=KIND(0.0D0)) ...
-
-then create a file ``.f2py_f2cmap`` (into the working directory)
-containing a Python dictionary::
-
- {'real':{'KIND(0.0D0)':'double'}}
-
-for instance.
-
-Or more generally, the file ``.f2py_f2cmap`` must contain a dictionary
-with items::
-
- <Fortran typespec> : {<selector_expr>:<C type>}
-
-that defines mapping between Fortran type::
-
- <Fortran typespec>([kind=]<selector_expr>)
-
-and the corresponding ``<C type>``. ``<C type>`` can be one of the
-following::
-
- char
- signed_char
- short
- int
- long_long
- float
- double
- long_double
- complex_float
- complex_double
- complex_long_double
- string
-
-For more information, see ``f2py2e/capi_maps.py``.
-
-Related software
-================
-
-Q: How F2PY distinguishes from Pyfort?
---------------------------------------
-
-F2PY and Pyfort have very similar aims and ideology of how they are
-targeted. Both projects started to evolve in the same year 1999
-independently. When we discovered each other's projects, a discussion
-started to join the projects but that unfortunately failed for
-various reasons, e.g. both projects had evolved too far that merging
-the tools would have been impractical and giving up the efforts that
-the developers of both projects have made was unacceptable to both
-parties. And so, nowadays we have two tools for connecting Fortran
-with Python and this fact will hardly change in near future. To decide
-which one to choose is a matter of taste, I can only recommend to try
-out both to make up your choice.
-
-At the moment F2PY can handle more wrapping tasks than Pyfort,
-e.g. with F2PY one can wrap Fortran 77 common blocks, Fortran 90
-module routines, Fortran 90 module data (including allocatable
-arrays), one can call Python from Fortran, etc etc. F2PY scans Fortran
-codes to create signature (.pyf) files. F2PY is free from most of the
-limitations listed in in `the corresponding section of Pyfort
-Reference Manual`__.
-
-__ http://pyfortran.sourceforge.net/pyfort/pyfort_reference.htm#pgfId-296925
-
-There is a conceptual difference on how F2PY and Pyfort handle the
-issue of different data ordering in Fortran and C multi-dimensional
-arrays. Pyfort generated wrapper functions have optional arguments
-TRANSPOSE and MIRROR that can be used to control explicitly how the array
-arguments and their dimensions are passed to Fortran routine in order
-to deal with the C/Fortran data ordering issue. F2PY generated wrapper
-functions hide the whole issue from an end-user so that translation
-between Fortran and C/Python loops and array element access codes is
-one-to-one. How the F2PY generated wrappers deal with the issue is
-determined by a person who creates a signature file via using
-attributes like ``intent(c)``, ``intent(copy|overwrite)``,
-``intent(inout|in,out|inplace)`` etc.
-
-For example, let's consider a typical usage of both F2PY and Pyfort
-when wrapping the following simple Fortran code:
-
-.. include:: simple.f
- :literal:
-
-The comment lines starting with ``cf2py`` are read by F2PY (so that we
-don't need to generate/handwrite an intermediate signature file in
-this simple case) while for a Fortran compiler they are just comment
-lines.
-
-And here is a Python version of the Fortran code:
-
-.. include:: pytest.py
- :literal:
-
-To generate a wrapper for subroutine ``foo`` using F2PY, execute::
-
- $ f2py -m f2pytest simple.f -c
-
-that will generate an extension module ``f2pytest`` into the current
-directory.
-
-To generate a wrapper using Pyfort, create the following file
-
-.. include:: pyforttest.pyf
- :literal:
-
-and execute::
-
- $ pyfort pyforttest
-
-In Pyfort GUI add ``simple.f`` to the list of Fortran sources and
-check that the signature file is in free format. And then copy
-``pyforttest.so`` from the build directory to the current directory.
-
-Now, in Python
-
-.. include:: simple_session.dat
- :literal:
-
-Q: Can Pyfort .pyf files used with F2PY and vice versa?
--------------------------------------------------------
-
-After some simple modifications, yes. You should take into account the
-following differences in Pyfort and F2PY .pyf files.
-
-+ F2PY signature file contains ``python module`` and ``interface``
- blocks that are equivalent to Pyfort ``module`` block usage.
-
-+ F2PY attribute ``intent(inplace)`` is equivalent to Pyfort
- ``intent(inout)``. F2PY ``intent(inout)`` is a strict (but safe)
- version of ``intent(inplace)``, any mismatch in arguments with
- expected type, size, or contiguouness will trigger an exception
- while ``intent(inplace)`` (dangerously) modifies arguments
- attributes in-place.
-
-Misc
-====
-
-Q: How to establish which Fortran compiler F2PY will use?
----------------------------------------------------------
-
-This question may be releavant when using F2PY in Makefiles. Here
-follows a script demonstrating how to determine which Fortran compiler
-and flags F2PY will use::
-
- # Using post-0.2.2 numpy_distutils
- from numpy_distutils.fcompiler import new_fcompiler
- compiler = new_fcompiler() # or new_fcompiler(compiler='intel')
- compiler.dump_properties()
-
- # Using pre-0.2.2 numpy_distutils
- import os
- from numpy_distutils.command.build_flib import find_fortran_compiler
- def main():
- fcompiler = os.environ.get('FC_VENDOR')
- fcompiler_exec = os.environ.get('F77')
- f90compiler_exec = os.environ.get('F90')
- fc = find_fortran_compiler(fcompiler,
- fcompiler_exec,
- f90compiler_exec,
- verbose = 0)
- print 'FC=',fc.f77_compiler
- print 'FFLAGS=',fc.f77_switches
- print 'FOPT=',fc.f77_opt
- if __name__ == "__main__":
- main()
-
-Users feedback
-==============
-
-Q: Where to find additional information on using F2PY?
-------------------------------------------------------
-
-There are several F2PY related tutorials, slides, papers, etc
-available:
-
-+ `Fortran to Python Interface Generator with an Application to
- Aerospace Engineering`__ by P. Peterson, J. R. R. A. Martins, and
- J. J. Alonso in `In Proceedings of the 9th International Python
- Conference`__, Long Beach, California, 2001.
-
-__ http://www.python9.org/p9-cdrom/07/index.htm
-__ http://www.python9.org/
-
-+ Section `Adding Fortran90 code`__ in the UG of `The Bolometer Data
- Analysis Project`__.
-
-__ http://www.astro.rub.de/laboca/download/boa_master_doc/7_4Adding_Fortran90_code.html
-__ http://www.openboa.de/
-
-+ Powerpoint presentation `Python for Scientific Computing`__ by Eric
- Jones in `The Ninth International Python Conference`__.
-
-__ http://www.python9.org/p9-jones.ppt
-__ http://www.python9.org/
-
-+ Paper `Scripting a Large Fortran Code with Python`__ by Alvaro Caceres
- Calleja in `International Workshop on Software Engineering for High
- Performance Computing System Applications`__.
-
-__ http://csdl.ics.hawaii.edu/se-hpcs/pdf/calleja.pdf
-__ http://csdl.ics.hawaii.edu/se-hpcs/
-
-+ Section `Automatic building of C/Fortran extension for Python`__ by
- Simon Lacoste-Julien in `Summer 2002 Report about Hybrid Systems
- Modelling`__.
-
-__ http://moncs.cs.mcgill.ca/people/slacoste/research/report/SummerReport.html#tth_sEc3.4
-__ http://moncs.cs.mcgill.ca/people/slacoste/research/report/SummerReport.html
-
-+ `Scripting for Computational Science`__ by Hans Petter Langtangen
- (see the `Mixed language programming`__ and `NumPy array programming`__
- sections for examples on using F2PY).
-
-__ http://www.ifi.uio.no/~inf3330/lecsplit/
-__ http://www.ifi.uio.no/~inf3330/lecsplit/slide662.html
-__ http://www.ifi.uio.no/~inf3330/lecsplit/slide718.html
-
-+ Chapters 5 and 9 of `Python Scripting for Computational Science`__
- by H. P. Langtangen for case studies on using F2PY.
-
-__ http://www.springeronline.com/3-540-43508-5
-
-+ Section `Fortran Wrapping`__ in `Continuity`__, a computational tool
- for continuum problems in bioengineering and physiology.
-
-__ http://www.continuity.ucsd.edu/cont6_html/docs_fram.html
-__ http://www.continuity.ucsd.edu/
-
-+ Presentation `PYFORT and F2PY: 2 ways to bind C and Fortran with Python`__
- by Reiner Vogelsang.
-
-__ http://www.prism.enes.org/WPs/WP4a/Slides/pyfort/pyfort.html
-
-+ Lecture slides of `Extending Python: speed it up`__.
-
-__ http://www.astro.uni-bonn.de/~heith/lecture_pdf/friedrich5.pdf
-
-+ Wiki topics on `Wrapping Tools`__ and `Wrapping Benchmarks`__ for Climate
- System Center at the University of Chicago.
-
-__ https://geodoc.uchicago.edu/climatewiki/DiscussWrappingTools
-__ https://geodoc.uchicago.edu/climatewiki/WrappingBenchmarks
-
-+ `Performance Python with Weave`__ by Prabhu Ramachandran.
-
-__ http://www.numpy.org/documentation/weave/weaveperformance.html
-
-+ `How To Install py-f2py on Mac OSX`__
-
-__ http://py-f2py.darwinports.com/
-
-Please, let me know if there are any other sites that document F2PY
-usage in one or another way.
-
-Q: What projects use F2PY?
---------------------------
-
-+ `SciPy: Scientific tools for Python`__
-
-__ http://www.numpy.org/
-
-+ `The Bolometer Data Analysis Project`__
-
-__ http://www.openboa.de/
-
-+ `pywavelet`__
-
-__ http://www.met.wau.nl/index.html?http://www.met.wau.nl/medewerkers/moenea/python/pywavelet.html
-
-+ `PyARTS: an ARTS related Python package`__.
-
-__ http://www.met.ed.ac.uk/~cory/PyARTS/
-
-+ `Python interface to PSPLINE`__, a collection of Spline and
- Hermite interpolation tools for 1D, 2D, and 3D datasets on
- rectilinear grids.
-
-__ http://pypspline.sourceforge.net
-
-+ `Markovian Analysis Package for Python`__.
-
-__ http://pymc.sourceforge.net
-
-+ `Modular toolkit for Data Processing (MDP)`__
-
-__ http://mdp-toolkit.sourceforge.net/
-
-
-Please, send me a note if you are using F2PY in your project.
-
-Q: What people think about F2PY?
---------------------------------
-
-*F2PY is GOOD*:
-
-Here are some comments people have posted to f2py mailing list and c.l.py:
-
-+ Ryan Krauss: I really appreciate f2py. It seems weird to say, but I
- am excited about relearning FORTRAN to compliment my python stuff.
-
-+ Fabien Wahl: f2py is great, and is used extensively over here...
-
-+ Fernando Perez: Anyway, many many thanks for this amazing tool.
-
- I haven't used pyfort, but I can definitely vouch for the amazing quality of
- f2py. And since f2py is actively used by numpy, it won't go unmaintained.
- It's quite impressive, and very easy to use.
-
-+ Kevin Mueller: First off, thanks to those responsible for F2PY;
- its been an integral tool of my research for years now.
-
-+ David Linke: Best regards and thanks for the great tool!
-
-+ Perrin Meyer: F2Py is really useful!
-
-+ Hans Petter Langtangen: First of all, thank you for developing
- F2py. This is a very important contribution to the scientific
- computing community. We are using F2py a lot and are very happy with
- it.
-
-+ Berthold Höllmann: Thank's alot. It seems it is also working in my
- 'real' application :-)
-
-+ John Hunter: At first I wrapped them with f2py (unbelievably easy!)...
-
-+ Cameron Laird: Among many other features, Python boasts a mature
- f2py, which makes it particularly rewarding to yoke Fortran- and
- Python-coded modules into finished applications.
-
-+ Ryan Gutenkunst: f2py is sweet magic.
-
-*F2PY is BAD*:
-
-+ `Is it worth using on a large scale python drivers for Fortran
- subroutines, interfaced with f2py?`__
-
-__ http://sepwww.stanford.edu/internal/computing/python.html
-
-Additional comments on F2PY, good or bad, are welcome!
-
-.. References:
-.. _README.txt: index.html
-.. _HISTORY.txt: HISTORY.html
-.. _HISTORY.txt in CVS: http://cens.ioc.ee/cgi-bin/cvsweb/python/f2py2e/docs/HISTORY.txt?rev=HEAD&content-type=text/x-cvsweb-markup
-.. _TESTING.txt: TESTING.html
diff --git a/doc/f2py/HISTORY.txt b/doc/f2py/HISTORY.txt
deleted file mode 100644
index 44dc8b916..000000000
--- a/doc/f2py/HISTORY.txt
+++ /dev/null
@@ -1,1043 +0,0 @@
-.. -*- rest -*-
-
-=========================
- F2PY History
-=========================
-
-:Author: Pearu Peterson <pearu@cens.ioc.ee>
-:Web-site: http://cens.ioc.ee/projects/f2py2e/
-:Date: $Date: 2005/09/16 08:36:45 $
-:Revision: $Revision: 1.191 $
-
-.. Contents::
-
-Release 2.46.243
-=====================
-
-* common_rules.py
-
- - Fixed compiler warnings.
-
-* fortranobject.c
-
- - Fixed another dims calculation bug.
- - Fixed dims calculation bug and added the corresponding check.
- - Accept higher dimensional arrays if their effective rank matches.
- Effective rank is multiplication of non-unit dimensions.
-
-* f2py2e.py
-
- - Added support for numpy.distutils version 0.4.0.
-
-* Documentation
-
- - Added example about ``intent(callback,hide)`` usage. Updates.
- - Updated FAQ.
-
-* cb_rules.py
-
- - Fixed missing need kw error.
- - Fixed getting callback non-existing extra arguments.
- - External callback functions and extra_args can be set via
- ext.module namespace.
- - Avoid crash when external callback function is not set.
-
-* rules.py
-
- - Enabled ``intent(out)`` for ``intent(aux)`` non-complex scalars.
- - Fixed splitting lines in F90 fixed form mode.
- - Fixed FORTRANAME typo, relevant when wrapping scalar functions with
- ``--no-wrap-functions``.
- - Improved failure handling for callback functions.
- - Fixed bug in writing F90 wrapper functions when a line length
- is exactly 66.
-
-* cfuncs.py
-
- - Fixed dependency issue with typedefs.
- - Introduced ``-DUNDERSCORE_G77`` that cause extra underscore to be
- used for external names that contain an underscore.
-
-* capi_maps.py
-
- - Fixed typos.
- - Fixed using complex cb functions.
-
-* crackfortran.py
-
- - Introduced parent_block key. Get ``use`` statements recursively
- from parent blocks.
- - Apply parameter values to kindselectors.
- - Fixed bug evaluating ``selected_int_kind`` function.
- - Ignore Name and Syntax errors when evaluating scalars.
- - Treat ``<int>_intType`` as ``<int>`` in get_parameters.
- - Added support for F90 line continuation in fix format mode.
- - Include optional attribute of external to signature file.
- - Add ``entry`` arguments to variable lists.
- - Treat \xa0 character as space.
- - Fixed bug where __user__ callback subroutine was added to its
- argument list.
- - In strict 77 mode read only the first 72 columns.
- - Fixed parsing ``v(i) = func(r)``.
- - Fixed parsing ``integer*4::``.
- - Fixed parsing ``1.d-8`` when used as a parameter value.
-
-Release 2.45.241_1926
-=====================
-
-* diagnose.py
-
- - Clean up output.
-
-* cb_rules.py
-
- - Fixed ``_cpointer`` usage for subroutines.
- - Fortran function ``_cpointer`` can be used for callbacks.
-
-* func2subr.py
-
- - Use result name when wrapping functions with subroutines.
-
-* f2py2e.py
-
- - Fixed ``--help-link`` switch.
- - Fixed ``--[no-]lower`` usage with ``-c`` option.
- - Added support for ``.pyf.src`` template files.
-
-* __init__.py
-
- - Using ``exec_command`` in ``compile()``.
-
-* setup.py
-
- - Clean up.
- - Disabled ``need_numpy_distutils`` function. From now on it is assumed
- that proper version of ``numpy_distutils`` is already installed.
-
-* capi_maps.py
-
- - Added support for wrapping unsigned integers. In a .pyf file
- ``integer(-1)``, ``integer(-2)``, ``integer(-4)`` correspond to
- ``unsigned char``, ``unsigned short``, ``unsigned`` C types,
- respectively.
-
-* tests/c/return_real.py
-
- - Added tests to wrap C functions returning float/double.
-
-* fortranobject.c
-
- - Added ``_cpointer`` attribute to wrapped objects.
-
-* rules.py
-
- - ``_cpointer`` feature for wrapped module functions is not
- functional at the moment.
- - Introduced ``intent(aux)`` attribute. Useful to save a value
- of a parameter to auxiliary C variable. Note that ``intent(aux)``
- implies ``intent(c)``.
- - Added ``usercode`` section. When ``usercode`` is used in ``python
- module`` block twise then the contents of the second multi-line
- block is inserted after the definition of external routines.
- - Call-back function arguments can be CObjects.
-
-* cfuncs.py
-
- - Allow call-back function arguments to be fortran objects.
- - Allow call-back function arguments to be built-in functions.
-
-* crackfortran.py
-
- - Fixed detection of a function signature from usage example.
- - Cleaned up -h output for intent(callback) variables.
- - Repair malformed argument list (missing argument name).
- - Warn on the usage of multiple attributes without type specification.
- - Evaluate only scalars ``<initexpr>`` (e.g. not of strings).
- - Evaluate ``<initexpr>`` using parameters name space.
- - Fixed resolving `<name>(<args>)[result(<result>)]` pattern.
- - ``usercode`` can be used more than once in the same context.
-
-Release 2.43.239_1831
-=====================
-
-* auxfuncs.py
-
- - Made ``intent(in,inplace)`` to mean ``intent(inplace)``.
-
-* f2py2e.py
-
- - Intoduced ``--help-link`` and ``--link-<resource>``
- switches to link generated extension module with system
- ``<resource>`` as defined by numpy_distutils/system_info.py.
-
-* fortranobject.c
-
- - Patch to make PyArray_CanCastSafely safe on 64-bit machines.
- Fixes incorrect results when passing ``array('l')`` to
- ``real*8 intent(in,out,overwrite)`` arguments.
-
-* rules.py
-
- - Avoid empty continuation lines in Fortran wrappers.
-
-* cfuncs.py
-
- - Adding ``\0`` at the end of a space-padded string, fixes tests
- on 64-bit Gentoo.
-
-* crackfortran.py
-
- - Fixed splitting lines with string parameters.
-
-Release 2.43.239_1806
-=====================
-
-* Tests
-
- - Fixed test site that failed after padding strings with spaces
- instead of zeros.
-
-* Documentation
-
- - Documented ``intent(inplace)`` attribute.
- - Documented ``intent(callback)`` attribute.
- - Updated FAQ, added Users Feedback section.
-
-* cfuncs.py
-
- - Padding longer (than provided from Python side) strings with spaces
- (that is Fortran behavior) instead of nulls (that is C strncpy behavior).
-
-* f90mod_rules.py
-
- - Undoing rmbadnames in Python and Fortran layers.
-
-* common_rules.py
-
- - Renaming common block items that have names identical to C keywords.
- - Fixed wrapping blank common blocks.
-
-* fortranobject.h
-
- - Updated numarray (0.9, 1.0, 1.1) support (patch by Todd Miller).
-
-* fortranobject.c
-
- - Introduced ``intent(inplace)`` feature.
- - Fix numarray reference counts (patch by Todd).
- - Updated numarray (0.9, 1.0, 1.1) support (patch by Todd Miller).
- - Enabled F2PY_REPORT_ON_ARRAY_COPY for Numarray.
-
-* capi_maps.py
-
- - Always normalize .f2py_f2cmap keys to lower case.
-
-* rules.py
-
- - Disabled ``index`` macro as it conflicts with the one defined
- in string.h.
- - Moved ``externroutines`` up to make it visible to ``usercode``.
- - Fixed bug in f90 code generation: no empty line continuation is
- allowed.
- - Fixed undefined symbols failure when ``fortranname`` is used
- to rename a wrapped function.
- - Support for ``entry`` statement.
-
-* auxfuncs.py
-
- - Made is* functions more robust with respect to parameters that
- have no typespec specified.
- - Using ``size_t`` instead of ``int`` as the type of string
- length. Fixes issues on 64-bit platforms.
-
-* setup.py
-
- - Fixed bug of installing ``f2py`` script as ``.exe`` file.
-
-* f2py2e.py
-
- - ``--compiler=`` and ``--fcompiler=`` can be specified at the same time.
-
-* crackfortran.py
-
- - Fixed dependency detection for non-intent(in|inout|inplace) arguments.
- They must depend on their dimensions, not vice-versa.
- - Don't match ``!!f2py`` as a start of f2py directive.
- - Only effective intent attributes will be output to ``-h`` target.
- - Introduced ``intent(callback)`` to build interface between Python
- functions and Fortran external routines.
- - Avoid including external arguments to __user__ modules.
- - Initial hooks to evaluate ``kind`` and ``selected_int_kind``.
- - Evaluating parameters in {char,kind}selectors and applying rmbadname.
- - Evaluating parameters using also module parameters. Fixed the order
- of parameter evaluation.
- - Fixed silly bug: when block name was not lower cased, it was not
- recognized correctly.
- - Applying mapping '.false.'->'False', '.true.'->'True' to logical
- parameters. TODO: Support for logical expressions is needed.
- - Added support for multiple statements in one line (separated with semicolon).
- - Impl. get_useparameters function for using parameter values from
- other f90 modules.
- - Applied Bertholds patch to fix bug in evaluating expressions
- like ``1.d0/dvar``.
- - Fixed bug in reading string parameters.
- - Evaluating parameters in charselector. Code cleanup.
- - Using F90 module parameters to resolve kindselectors.
- - Made the evaluation of module data init-expression more robust.
- - Support for ``entry`` statement.
- - Fixed ``determineexprtype`` that in the case of parameters
- returned non-dictionary objects.
- - Use ``-*- fix -*-`` to specify that a file is in fixed format.
-
-Release 2.39.235_1693
-=====================
-
-* fortranobject.{h,c}
-
- - Support for allocatable string arrays.
-
-* cfuncs.py
-
- - Call-back arguments can now be also instances that have ``__call__`` method
- as well as instance methods.
-
-* f2py2e.py
-
- - Introduced ``--include_paths <path1>:<path2>:..`` command line
- option.
- - Added ``--compiler=`` support to change the C/C++ compiler from
- f2py command line.
-
-* capi_maps.py
-
- - Handle ``XDY`` parameter constants.
-
-* crackfortran.py
-
- - Handle ``XDY`` parameter constants.
-
- - Introduced formatpattern to workaround a corner case where reserved
- keywords are used in format statement. Other than that, format pattern
- has no use.
-
- - Parameters are now fully evaluated.
-
-* More splitting of documentation strings.
-
-* func2subr.py - fixed bug for function names that f77 compiler
- would set ``integer`` type.
-
-Release 2.39.235_1660
-=====================
-
-* f2py2e.py
-
- - Fixed bug in using --f90flags=..
-
-* f90mod_rules.py
-
- - Split generated documentation strings (to avoid MSVC issue when
- string length>2k)
-
- - Ignore ``private`` module data.
-
-Release 2.39.235_1644
-=====================
-
-:Date:24 February 2004
-
-* Character arrays:
-
- - Finished complete support for character arrays and arrays of strings.
- - ``character*n a(m)`` is treated like ``character a(m,n)`` with ``intent(c)``.
- - Character arrays are now considered as ordinary arrays (not as arrays
- of strings which actually didn't work).
-
-* docs
-
- - Initial f2py manpage file f2py.1.
- - Updated usersguide and other docs when using numpy_distutils 0.2.2
- and up.
-
-* capi_maps.py
-
- - Try harder to use .f2py_f2cmap mappings when kind is used.
-
-* crackfortran.py
-
- - Included files are first search in the current directory and
- then from the source file directory.
- - Ignoring dimension and character selector changes.
- - Fixed bug in Fortran 90 comments of fixed format.
- - Warn when .pyf signatures contain undefined symbols.
- - Better detection of source code formats. Using ``-*- fortran -*-``
- or ``-*- f90 -*-`` in the first line of a Fortran source file is
- recommended to help f2py detect the format, fixed or free,
- respectively, correctly.
-
-* cfuncs.py
-
- - Fixed intent(inout) scalars when typecode=='l'.
- - Fixed intent(inout) scalars when not using numarray.
- - Fixed intent(inout) scalars when using numarray.
-
-* diagnose.py
-
- - Updated for numpy_distutils 0.2.2 and up.
- - Added numarray support to diagnose.
-
-* fortranobject.c
-
- - Fixed nasty bug with intent(in,copy) complex slice arrays.
- - Applied Todd's patch to support numarray's byteswapped or
- misaligned arrays, requires numarray-0.8 or higher.
-
-* f2py2e.py
-
- - Applying new hooks for numpy_distutils 0.2.2 and up, keeping
- backward compatibility with depreciation messages.
- - Using always os.system on non-posix platforms in f2py2e.compile
- function.
-
-* rules.py
-
- - Changed the order of buildcallback and usercode junks.
-
-* setup.cfg
-
- - Added so that docs/ and tests/ directories are included to RPMs.
-
-* setup.py
-
- - Installing f2py.py instead of f2py.bat under NT.
- - Introduced ``--with-numpy_distutils`` that is useful when making
- f2py tar-ball with numpy_distutils included.
-
-Release 2.37.233-1545
-=====================
-
-:Date: 11 September 2003
-
-* rules.py
-
- - Introduced ``interface_usercode`` replacement. When ``usercode``
- statement is used inside the first interface block, its contents
- will be inserted at the end of initialization function of a F2PY
- generated extension module (feature request: Berthold Höllmann).
- - Introduced auxiliary function ``as_column_major_storage`` that
- converts input array to an array with column major storage order
- (feature request: Hans Petter Langtangen).
-
-* crackfortran.py
-
- - Introduced ``pymethoddef`` statement.
-
-* cfuncs.py
-
- - Fixed "#ifdef in #define TRYPYARRAYTEMPLATE" bug (patch thanks
- to Bernhard Gschaider)
-
-* auxfuncs.py
-
- - Introduced ``getpymethod`` function.
- - Enabled multi-line blocks in ``callprotoargument`` statement.
-
-* f90mod_rules.py
-
- - Undone "Fixed Warning 43 emitted by Intel Fortran compiler" that
- causes (curios) segfaults.
-
-* fortranobject.c
-
- - Fixed segfaults (that were introduced with recent memory leak
- fixes) when using allocatable arrays.
- - Introduced F2PY_REPORT_ON_ARRAY_COPY CPP macro int-variable. If defined
- then a message is printed to stderr whenever a copy of an array is
- made and arrays size is larger than F2PY_REPORT_ON_ARRAY_COPY.
-
-Release 2.35.229-1505
-=====================
-
-:Date: 5 August 2003
-
-* General
-
- - Introduced ``usercode`` statement (dropped ``c_code`` hooks).
-
-* setup.py
-
- - Updated the CVS location of numpy_distutils.
-
-* auxfuncs.py
-
- - Introduced ``isint1array(var)`` for fixing ``integer*1 intent(out)``
- support.
-
-* tests/f77/callback.py
-
- Introduced some basic tests.
-
-* src/fortranobject.{c,h}
-
- - Fixed memory leaks when getting/setting allocatable arrays.
- (Bug report by Bernhard Gschaider)
-
- - Initial support for numarray (Todd Miller's patch). Use -DNUMARRAY
- on the f2py command line to enable numarray support. Note that
- there is no character arrays support and these hooks are not
- tested with F90 compilers yet.
-
-* cfuncs.py
-
- - Fixed reference counting bug that appeared when constructing extra
- argument list to callback functions.
- - Added ``NPY_LONG != NPY_INT`` test.
-
-* f2py2e.py
-
- Undocumented ``--f90compiler``.
-
-* crackfortran.py
-
- - Introduced ``usercode`` statement.
- - Fixed newlines when outputting multi-line blocks.
- - Optimized ``getlincoef`` loop and ``analyzevars`` for cases where
- len(vars) is large.
- - Fixed callback string argument detection.
- - Fixed evaluating expressions: only int|float expressions are
- evaluated successfully.
-
-* docs
-
- Documented -DF2PY_REPORT_ATEXIT feature.
-
-* diagnose.py
-
- Added CPU information and sys.prefix printout.
-
-* tests/run_all.py
-
- Added cwd to PYTHONPATH.
-
-* tests/f??/return_{real,complex}.py
-
- Pass "infinity" check in SunOS.
-
-* rules.py
-
- - Fixed ``integer*1 intent(out)`` support
- - Fixed free format continuation of f2py generated F90 files.
-
-* tests/mixed/
-
- Introduced tests for mixing Fortran 77, Fortran 90 fixed and free
- format codes in one module.
-
-* f90mod_rules.py
-
- - Fixed non-prototype warnings.
- - Fixed Warning 43 emitted by Intel Fortran compiler.
- - Avoid long lines in Fortran codes to reduce possible problems with
- continuations of lines.
-
-Public Release 2.32.225-1419
-============================
-
-:Date: 8 December 2002
-
-* docs/usersguide/
-
- Complete revision of F2PY Users Guide
-
-* tests/run_all.py
-
- - New file. A Python script to run all f2py unit tests.
-
-* Removed files: buildmakefile.py, buildsetup.py.
-
-* tests/f77/
-
- - Added intent(out) scalar tests.
-
-* f2py_testing.py
-
- - Introduced. It contains jiffies, memusage, run, cmdline functions
- useful for f2py unit tests site.
-
-* setup.py
-
- - Install numpy_distutils only if it is missing or is too old
- for f2py.
-
-* f90modrules.py
-
- - Fixed wrapping f90 module data.
- - Fixed wrapping f90 module subroutines.
- - Fixed f90 compiler warnings for wrapped functions by using interface
- instead of external stmt for functions.
-
-* tests/f90/
-
- - Introduced return_*.py tests.
-
-* func2subr.py
-
- - Added optional signature argument to createfuncwrapper.
- - In f2pywrappers routines, declare external, scalar, remaining
- arguments in that order. Fixes compiler error 'Invalid declaration'
- for::
-
- real function foo(a,b)
- integer b
- real a(b)
- end
-
-* crackfortran.py
-
- - Removed first-line comment information support.
- - Introduced multiline block. Currently usable only for
- ``callstatement`` statement.
- - Improved array length calculation in getarrlen(..).
- - "From sky" program group is created only if ``groupcounter<1``.
- See TODO.txt.
- - Added support for ``dimension(n:*)``, ``dimension(*:n)``. They are
- treated as ``dimension(*)`` by f2py.
- - Fixed parameter substitution (this fixes TODO item by Patrick
- LeGresley, 22 Aug 2001).
-
-* f2py2e.py
-
- - Disabled all makefile, setup, manifest file generation hooks.
- - Disabled --[no]-external-modroutines option. All F90 module
- subroutines will have Fortran/C interface hooks.
- - --build-dir can be used with -c option.
- - only/skip modes can be used with -c option.
- - Fixed and documented `-h stdout` feature.
- - Documented extra options.
- - Introduced --quiet and --verbose flags.
-
-* cb_rules.py
-
- - Fixed debugcapi hooks for intent(c) scalar call-back arguments
- (bug report: Pierre Schnizer).
- - Fixed intent(c) for scalar call-back arguments.
- - Improved failure reports.
-
-* capi_maps.py
-
- - Fixed complex(kind=..) to C type mapping bug. The following hold
- complex==complex(kind=4)==complex*8, complex(kind=8)==complex*16
- - Using signed_char for integer*1 (bug report: Steve M. Robbins).
- - Fixed logical*8 function bug: changed its C correspondence to
- long_long.
- - Fixed memory leak when returning complex scalar.
-
-* __init__.py
-
- - Introduced a new function (for f2py test site, but could be useful
- in general) ``compile(source[,modulename,extra_args])`` for
- compiling fortran source codes directly from Python.
-
-* src/fortranobject.c
-
- - Multi-dimensional common block members and allocatable arrays
- are returned as Fortran-contiguous arrays.
- - Fixed NULL return to Python without exception.
- - Fixed memory leak in getattr(<fortranobj>,'__doc__').
- - <fortranobj>.__doc__ is saved to <fortranobj>.__dict__ (previously
- it was generated each time when requested).
- - Fixed a nasty typo from the previous item that caused data
- corruption and occasional SEGFAULTs.
- - array_from_pyobj accepts arbitrary rank arrays if the last dimension
- is undefined. E.g. dimension(3,*) accepts a(3,4,5) and the result is
- array with dimension(3,20).
- - Fixed (void*) casts to make g++ happy (bug report: eric).
- - Changed the interface of ARR_IS_NULL macro to avoid "``NULL used in
- arithmetics``" warnings from g++.
-
-* src/fortranobject.h
-
- - Undone previous item. Defining NO_IMPORT_ARRAY for
- src/fortranobject.c (bug report: travis)
- - Ensured that PY_ARRAY_UNIQUE_SYMBOL is defined only for
- src/fortranobject.c (bug report: eric).
-
-* rules.py
-
- - Introduced dummy routine feature.
- - F77 and F90 wrapper subroutines (if any) as saved to different
- files, <modulename>-f2pywrappers.f and <modulename>-f2pywrappers2.f90,
- respectively. Therefore, wrapping F90 requires numpy_distutils >=
- 0.2.0_alpha_2.229.
- - Fixed compiler warnings about meaningless ``const void (*f2py_func)(..)``.
- - Improved error messages for ``*_from_pyobj``.
- - Changed __CPLUSPLUS__ macros to __cplusplus (bug report: eric).
- - Changed (void*) casts to (f2py_init_func) (bug report: eric).
- - Removed unnecessary (void*) cast for f2py_has_column_major_storage
- in f2py_module_methods definition (bug report: eric).
- - Changed the interface of f2py_has_column_major_storage function:
- removed const from the 1st argument.
-
-* cfuncs.py
-
- - Introduced -DPREPEND_FORTRAN.
- - Fixed bus error on SGI by using PyFloat_AsDouble when ``__sgi`` is defined.
- This seems to be `know bug`__ with Python 2.1 and SGI.
- - string_from_pyobj accepts only arrays whos elements size==sizeof(char).
- - logical scalars (intent(in),function) are normalized to 0 or 1.
- - Removed NUMFROMARROBJ macro.
- - (char|short)_from_pyobj now use int_from_pyobj.
- - (float|long_double)_from_pyobj now use double_from_pyobj.
- - complex_(float|long_double)_from_pyobj now use complex_double_from_pyobj.
- - Rewrote ``*_from_pyobj`` to be more robust. This fixes segfaults if
- getting * from a string. Note that int_from_pyobj differs
- from PyNumber_Int in that it accepts also complex arguments
- (takes the real part) and sequences (takes the 1st element).
- - Removed unnecessary void* casts in NUMFROMARROBJ.
- - Fixed casts in ``*_from_pyobj`` functions.
- - Replaced CNUMFROMARROBJ with NUMFROMARROBJ.
-
-.. __: http://sourceforge.net/tracker/index.php?func=detail&aid=435026&group_id=5470&atid=105470
-
-* auxfuncs.py
-
- - Introduced isdummyroutine().
- - Fixed islong_* functions.
- - Fixed isintent_in for intent(c) arguments (bug report: Pierre Schnizer).
- - Introduced F2PYError and throw_error. Using throw_error, f2py
- rejects illegal .pyf file constructs that otherwise would cause
- compilation failures or python crashes.
- - Fixed islong_long(logical*8)->True.
- - Introduced islogical() and islogicalfunction().
- - Fixed prototype string argument (bug report: eric).
-
-* Updated README.txt and doc strings. Starting to use docutils.
-
-* Speed up for ``*_from_pyobj`` functions if obj is a sequence.
-
-* Fixed SegFault (reported by M.Braun) due to invalid ``Py_DECREF``
- in ``GETSCALARFROMPYTUPLE``.
-
-Older Releases
-==============
-
-::
-
- *** Fixed missing includes when wrapping F90 module data.
- *** Fixed typos in docs of build_flib options.
- *** Implemented prototype calculator if no callstatement or
- callprotoargument statements are used. A warning is issued if
- callstatement is used without callprotoargument.
- *** Fixed transposing issue with array arguments in callback functions.
- *** Removed -pyinc command line option.
- *** Complete tests for Fortran 77 functions returning scalars.
- *** Fixed returning character bug if --no-wrap-functions.
- *** Described how to wrap F compiled Fortran F90 module procedures
- with F2PY. See doc/using_F_compiler.txt.
- *** Fixed the order of build_flib options when using --fcompiler=...
- *** Recognize .f95 and .F95 files as Fortran sources with free format.
- *** Cleaned up the output of 'f2py -h': removed obsolete items,
- added build_flib options section.
- *** Added --help-compiler option: it lists available Fortran compilers
- as detected by numpy_distutils/command/build_flib.py. This option
- is available only with -c option.
-
-
-:Release: 2.13.175-1250
-:Date: 4 April 2002
-
-::
-
- *** Fixed copying of non-contiguous 1-dimensional arrays bug.
- (Thanks to Travis O.).
-
-
-:Release: 2.13.175-1242
-:Date: 26 March 2002
-
-::
-
- *** Fixed ignoring type declarations.
- *** Turned F2PY_REPORT_ATEXIT off by default.
- *** Made MAX,MIN macros available by default so that they can be
- always used in signature files.
- *** Disabled F2PY_REPORT_ATEXIT for FreeBSD.
-
-
-:Release: 2.13.175-1233
-:Date: 13 March 2002
-
-::
-
- *** Fixed Win32 port when using f2py.bat. (Thanks to Erik Wilsher).
- *** F2PY_REPORT_ATEXIT is disabled for MACs.
- *** Fixed incomplete dependency calculator.
-
-
-:Release: 2.13.175-1222
-:Date: 3 March 2002
-
-::
-
- *** Plugged a memory leak for intent(out) arrays with overwrite=0.
- *** Introduced CDOUBLE_to_CDOUBLE,.. functions for copy_ND_array.
- These cast functions probably work incorrectly in Numeric.
-
-
-:Release: 2.13.175-1212
-:Date: 23 February 2002
-
-::
-
- *** Updated f2py for the latest numpy_distutils.
- *** A nasty bug with multi-dimensional Fortran arrays is fixed
- (intent(out) arrays had wrong shapes). (Thanks to Eric for
- pointing out this bug).
- *** F2PY_REPORT_ATEXIT is disabled by default for __WIN32__.
-
-
-:Release: 2.11.174-1161
-:Date: 14 February 2002
-
-::
-
- *** Updated f2py for the latest numpy_distutils.
- *** Fixed raise error when f2py missed -m flag.
- *** Script name `f2py' now depends on the name of python executable.
- For example, `python2.2 setup.py install' will create a f2py
- script with a name `f2py2.2'.
- *** Introduced 'callprotoargument' statement so that proper prototypes
- can be declared. This is crucial when wrapping C functions as it
- will fix segmentation faults when these wrappers use non-pointer
- arguments (thanks to R. Clint Whaley for explaining this to me).
- Note that in f2py generated wrapper, the prototypes have
- the following forms:
- extern #rtype# #fortranname#(#callprotoargument#);
- or
- extern #rtype# F_FUNC(#fortranname#,#FORTRANNAME#)(#callprotoargument#);
- *** Cosmetic fixes to F2PY_REPORT_ATEXIT feature.
-
-
-:Release: 2.11.174-1146
-:Date: 3 February 2002
-
-::
-
- *** Reviewed reference counting in call-back mechanism. Fixed few bugs.
- *** Enabled callstatement for complex functions.
- *** Fixed bug with initializing capi_overwrite_<varname>
- *** Introduced intent(overwrite) that is similar to intent(copy) but
- has opposite effect. Renamed copy_<name>=1 to overwrite_<name>=0.
- intent(overwrite) will make default overwrite_<name>=1.
- *** Introduced intent(in|inout,out,out=<name>) attribute that renames
- arguments name when returned. This renaming has effect only in
- documentation strings.
- *** Introduced 'callstatement' statement to pyf file syntax. With this
- one can specify explicitly how wrapped function should be called
- from the f2py generated module. WARNING: this is a dangerous feature
- and should be used with care. It is introduced to provide a hack
- to construct wrappers that may have very different signature
- pattern from the wrapped function. Currently 'callstatement' can
- be used only inside a subroutine or function block (it should be enough
- though) and must be only in one continuous line. The syntax of the
- statement is: callstatement <C-expression>;
-
-
-:Release: 2.11.174
-:Date: 18 January 2002
-
-::
-
- *** Fixed memory-leak for PyFortranObject.
- *** Introduced extra keyword argument copy_<varname> for intent(copy)
- variables. It defaults to 1 and forces to make a copy for
- intent(in) variables when passing on to wrapped functions (in case
- they undesirably change the variable in-situ).
- *** Introduced has_column_major_storage member function for all f2py
- generated extension modules. It is equivalent to Python call
- 'transpose(obj).iscontiguous()' but very efficient.
- *** Introduced -DF2PY_REPORT_ATEXIT. If this is used when compiling,
- a report is printed to stderr as python exits. The report includes
- the following timings:
- 1) time spent in all wrapped function calls;
- 2) time spent in f2py generated interface around the wrapped
- functions. This gives a hint whether one should worry
- about storing data in proper order (C or Fortran).
- 3) time spent in Python functions called by wrapped functions
- through call-back interface.
- 4) time spent in f2py generated call-back interface.
- For now, -DF2PY_REPORT_ATEXIT is enabled by default. Use
- -DF2PY_REPORT_ATEXIT_DISABLE to disable it (I am not sure if
- Windows has needed tools, let me know).
- Also, I appreciate if you could send me the output of 'F2PY
- performance report' (with CPU and platform information) so that I
- could optimize f2py generated interfaces for future releases.
- *** Extension modules can be linked with dmalloc library. Use
- -DDMALLOC when compiling.
- *** Moved array_from_pyobj to fortranobject.c.
- *** Usage of intent(inout) arguments is made more strict -- only
- with proper type contiguous arrays are accepted. In general,
- you should avoid using intent(inout) attribute as it makes
- wrappers of C and Fortran functions asymmetric. I recommend using
- intent(in,out) instead.
- *** intent(..) has new keywords: copy,cache.
- intent(copy,in) - forces a copy of an input argument; this
- may be useful for cases where the wrapped function changes
- the argument in situ and this may not be desired side effect.
- Otherwise, it is safe to not use intent(copy) for the sake
- of a better performance.
- intent(cache,hide|optional) - just creates a junk of memory.
- It does not care about proper storage order. Can be also
- intent(in) but then the corresponding argument must be a
- contiguous array with a proper elsize.
- *** intent(c) can be used also for subroutine names so that
- -DNO_APPEND_FORTRAN can be avoided for C functions.
-
- *** IMPORTANT BREAKING GOOD ... NEWS!!!:
-
- From now on you don't have to worry about the proper storage order
- in multi-dimensional arrays that was earlier a real headache when
- wrapping Fortran functions. Now f2py generated modules take care
- of the proper conversations when needed. I have carefully designed
- and optimized this interface to avoid any unnecessary memory usage
- or copying of data. However, it is wise to use input arrays that
- has proper storage order: for C arguments it is row-major and for
- Fortran arguments it is column-major. But you don't need to worry
- about that when developing your programs. The optimization of
- initializing the program with proper data for possibly better
- memory usage can be safely postponed until the program is working.
-
- This change also affects the signatures in .pyf files. If you have
- created wrappers that take multi-dimensional arrays in arguments,
- it is better to let f2py re-generate these files. Or you have to
- manually do the following changes: reverse the axes indices in all
- 'shape' macros. For example, if you have defined an array A(n,m)
- and n=shape(A,1), m=shape(A,0) then you must change the last
- statements to n=shape(A,0), m=shape(A,1).
-
-
-:Release: 2.8.172
-:Date: 13 January 2002
-
-::
-
- *** Fixed -c process. Removed pyf_extensions function and pyf_file class.
- *** Reorganized setup.py. It generates f2py or f2py.bat scripts
- depending on the OS and the location of the python executable.
- *** Started to use update_version from numpy_distutils that makes
- f2py startup faster. As a side effect, the version number system
- changed.
- *** Introduced test-site/test_f2py2e.py script that runs all
- tests.
- *** Fixed global variables initialization problem in crackfortran
- when run_main is called several times.
- *** Added 'import Numeric' to C/API init<module> function.
- *** Fixed f2py.bat in setup.py.
- *** Switched over to numpy_distutils and dropped fortran_support.
- *** On Windows create f2py.bat file.
- *** Introduced -c option: read fortran or pyf files, construct extension
- modules, build, and save them to current directory.
- In one word: do-it-all-in-one-call.
- *** Introduced pyf_extensions(sources,f2py_opts) function. It simplifies
- the extension building process considerably. Only for internal use.
- *** Converted tests to use numpy_distutils in order to improve portability:
- a,b,c
- *** f2py2e.run_main() returns a pyf_file class instance containing
- information about f2py generated files.
- *** Introduced `--build-dir <dirname>' command line option.
- *** Fixed setup.py for bdist_rpm command.
- *** Added --numpy-setup command line option.
- *** Fixed crackfortran that did not recognized capitalized type
- specification with --no-lower flag.
- *** `-h stdout' writes signature to stdout.
- *** Fixed incorrect message for check() with empty name list.
-
-
-:Release: 2.4.366
-:Date: 17 December 2001
-
-::
-
- *** Added command line option --[no-]manifest.
- *** `make test' should run on Windows, but the results are not truthful.
- *** Reorganized f2py2e.py a bit. Introduced run_main(comline_list) function
- that can be useful when running f2py from another Python module.
- *** Removed command line options -f77,-fix,-f90 as the file format
- is determined from the extension of the fortran file
- or from its header (first line starting with `!%' and containing keywords
- free, fix, or f77). The later overrides the former one.
- *** Introduced command line options --[no-]makefile,--[no-]latex-doc.
- Users must explicitly use --makefile,--latex-doc if Makefile-<modulename>,
- <modulename>module.tex is desired. --setup is default. Use --no-setup
- to disable setup_<modulename>.py generation. --overwrite-makefile
- will set --makefile.
- *** Added `f2py_rout_' to #capiname# in rules.py.
- *** intent(...) statement with empty namelist forces intent(...) attribute for
- all arguments.
- *** Dropped DL_IMPORT and DL_EXPORT in fortranobject.h.
- *** Added missing PyFortran_Type.ob_type initialization.
- *** Added gcc-3.0 support.
- *** Raising non-existing/broken Numeric as a FatalError exception.
- *** Fixed Python 2.x specific += construct in fortran_support.py.
- *** Fixed copy_ND_array for 1-rank arrays that used to call calloc(0,..)
- and caused core dump with a non-gcc compiler (Thanks to Pierre Schnizer
- for reporting this bug).
- *** Fixed "warning: variable `..' might be clobbered by `longjmp' or `vfork'":
- - Reorganized the structure of wrapper functions to get rid of
- `goto capi_fail' statements that caused the above warning.
-
-
-:Release: 2.3.343
-:Date: 12 December 2001
-
-::
-
- *** Issues with the Win32 support (thanks to Eric Jones and Tiffany Kamm):
- - Using DL_EXPORT macro for init#modulename#.
- - Changed PyObject_HEAD_INIT(&PyType_Type) to PyObject_HEAD_INIT(0).
- - Initializing #name#_capi=NULL instead of Py_None in cb hooks.
- *** Fixed some 'warning: function declaration isn't a prototype', mainly
- in fortranobject.{c,h}.
- *** Fixed 'warning: missing braces around initializer'.
- *** Fixed reading a line containing only a label.
- *** Fixed nonportable 'cp -fv' to shutil.copy in f2py2e.py.
- *** Replaced PyEval_CallObject with PyObject_CallObject in cb_rules.
- *** Replaced Py_DECREF with Py_XDECREF when freeing hidden arguments.
- (Reason: Py_DECREF caused segfault when an error was raised)
- *** Impl. support for `include "file"' (in addition to `include 'file'')
- *** Fixed bugs (buildsetup.py missing in Makefile, in generated MANIFEST.in)
-
-
-:Release: 2.3.327
-:Date: 4 December 2001
-
-::
-
- *** Sending out the third public release of f2py.
- *** Support for Intel(R) Fortran Compiler (thanks to Patrick LeGresley).
- *** Introduced `threadsafe' statement to pyf-files (or to be used with
- the 'f2py' directive in fortran codes) to force
- Py_BEGIN|END_ALLOW_THREADS block around the Fortran subroutine
- calling statement in Python C/API. `threadsafe' statement has
- an effect only inside a subroutine block.
- *** Introduced `fortranname <name>' statement to be used only within
- pyf-files. This is useful when the wrapper (Python C/API) function
- has different name from the wrapped (Fortran) function.
- *** Introduced `intent(c)' directive and statement. It is useful when
- wrapping C functions. Use intent(c) for arguments that are
- scalars (not pointers) or arrays (with row-ordering of elements).
-
-
-:Release: 2.3.321
-:Date: 3 December 2001
-
-::
-
- *** f2py2e can be installed using distutils (run `python setup.py install').
- *** f2py builds setup_<modulename>.py. Use --[no-]setup to control this
- feature. setup_<modulename>.py uses fortran_support module (from SciPy),
- but for your convenience it is included also with f2py as an additional
- package. Note that it has not as many compilers supported as with
- using Makefile-<modulename>, but new compilers should be added to
- fortran_support module, not to f2py2e package.
- *** Fixed some compiler warnings about else statements.
diff --git a/doc/f2py/Makefile b/doc/f2py/Makefile
deleted file mode 100644
index 2dd168fad..000000000
--- a/doc/f2py/Makefile
+++ /dev/null
@@ -1,76 +0,0 @@
-# Makefile for compiling f2py2e documentation (dvi, ps, html)
-# Pearu Peterson <pearu@ioc.ee>
-
-REL=4
-TOP = usersguide
-LATEXSRC = bugs.tex commands.tex f2py2e.tex intro.tex notes.tex signaturefile.tex
-MAINLATEX = f2py2e
-
-LATEX = latex
-PDFLATEX = pdflatex
-
-COLLECTINPUT = ./collectinput.py
-INSTALLDATA = install -m 644 -c
-
-TTH = tth
-TTHFILTER = sed -e "s/{{}\\\verb@/\\\texttt{/g" | sed -e "s/@{}}/}/g" | $(TTH) -L$(MAINLATEX) -i
-TTHFILTER2 = sed -e "s/{{}\\\verb@/\\\texttt{/g" | sed -e "s/@{}}/}/g" | $(TTH) -Lpython9 -i
-TTHFILTER3 = sed -e "s/{{}\\\verb@/\\\texttt{/g" | sed -e "s/@{}}/}/g" | $(TTH) -Lfortranobject -i
-TTHMISSING = "\
-***************************************************************\n\
-Warning: Could not find tth (a TeX to HTML translator) \n\
- or an error was arisen by tth\n\
-You can download tth from http://hutchinson.belmont.ma.us/tth/ \n\
-or\n\
-use your favorite LaTeX to HTML translator on file tmp_main.tex\n\
-***************************************************************\
-"
-
-all: dvi ps html clean
-$(MAINLATEX).dvi: $(LATEXSRC)
- $(LATEX) $(MAINLATEX).tex
- $(LATEX) $(MAINLATEX).tex
- $(LATEX) $(MAINLATEX).tex
- $(PDFLATEX) $(MAINLATEX).tex
-$(TOP).dvi: $(MAINLATEX).dvi
- cp -f $(MAINLATEX).dvi $(TOP).dvi
- mv -f $(MAINLATEX).pdf $(TOP).pdf
-$(TOP).ps: $(TOP).dvi
- dvips $(TOP).dvi -o
-$(TOP).html: $(LATEXSRC)
- $(COLLECTINPUT) < $(MAINLATEX).tex > tmp_$(MAINLATEX).tex
- @test `which $(TTH)` && cat tmp_$(MAINLATEX).tex | $(TTHFILTER) > $(TOP).html\
- || echo -e $(TTHMISSING)
-dvi: $(TOP).dvi
-ps: $(TOP).ps
- gzip -f $(TOP).ps
-html: $(TOP).html
-
-python9:
- cp -f python9.tex f2python9-final/src/
- cd f2python9-final && mk_html.sh
- cd f2python9-final && mk_ps.sh
- cd f2python9-final && mk_pdf.sh
-pyfobj:
- $(LATEX) fortranobject.tex
- $(LATEX) fortranobject.tex
- $(LATEX) fortranobject.tex
- @test `which $(TTH)` && cat fortranobject.tex | $(TTHFILTER3) > pyfobj.html\
- || echo -e $(TTHMISSING)
- dvips fortranobject.dvi -o pyfobj.ps
- gzip -f pyfobj.ps
- pdflatex fortranobject.tex
- mv fortranobject.pdf pyfobj.pdf
-
-WWWDIR=/net/cens/home/www/unsecure/projects/f2py2e/
-wwwpage: all
- $(INSTALLDATA) index.html $(TOP).html $(TOP).ps.gz $(TOP).dvi $(TOP).pdf \
- Release-$(REL).x.txt ../NEWS.txt win32_notes.txt $(WWWDIR)
- $(INSTALLDATA) pyfobj.{ps.gz,pdf,html} $(WWWDIR)
- $(INSTALLDATA) f2python9-final/f2python9.{ps.gz,pdf,html} f2python9-final/{flow,structure,aerostructure}.jpg $(WWWDIR)
-clean:
- rm -f tmp_$(MAINLATEX).* $(MAINLATEX).{aux,dvi,log,toc}
-distclean:
- rm -f tmp_$(MAINLATEX).* $(MAINLATEX).{aux,dvi,log,toc}
- rm -f $(TOP).{ps,dvi,html,pdf,ps.gz}
- rm -f *~
diff --git a/doc/f2py/OLDNEWS.txt b/doc/f2py/OLDNEWS.txt
deleted file mode 100644
index 7b094951c..000000000
--- a/doc/f2py/OLDNEWS.txt
+++ /dev/null
@@ -1,93 +0,0 @@
-
-.. topic:: Old F2PY NEWS
-
- January 30, 2005
-
- Latest F2PY release (version 2.45.241_1926).
- New features: wrapping unsigned integers, support for ``.pyf.src`` template files,
- callback arguments can now be CObjects, fortran objects, built-in functions.
- Introduced ``intent(aux)`` attribute. Wrapped objects have ``_cpointer``
- attribute holding C pointer to wrapped functions or variables.
- Many bug fixes and improvements, updated documentation.
- `Differences with the previous release (version 2.43.239_1831)`__.
-
- __ http://cens.ioc.ee/cgi-bin/cvsweb/python/f2py2e/docs/HISTORY.txt.diff?r1=1.163&r2=1.137&f=h
-
- October 4, 2004
- F2PY bug fix release (version 2.43.239_1831).
- Better support for 64-bit platforms.
- Introduced ``--help-link`` and ``--link-<resource>`` options.
- Bug fixes.
- `Differences with the previous release (version 2.43.239_1806)`__.
-
- __ http://cens.ioc.ee/cgi-bin/cvsweb/python/f2py2e/docs/HISTORY.txt.diff?r1=1.137&r2=1.131&f=h
-
- September 25, 2004
- Latest F2PY release (version 2.43.239_1806).
- Support for ``ENTRY`` statement. New attributes:
- ``intent(inplace)``, ``intent(callback)``. Supports Numarray 1.1.
- Introduced ``-*- fix -*-`` header content. Improved ``PARAMETER`` support.
- Documentation updates. `Differences with the previous release
- (version 2.39.235-1693)`__.
-
- __ http://cens.ioc.ee/cgi-bin/cvsweb/python/f2py2e/docs/HISTORY.txt.diff?r1=1.131&r2=1.98&f=h
-
- March 30, 2004
- F2PY bug fix release (version 2.39.235-1693). Two new command line switches:
- ``--compiler`` and ``--include_paths``. Support for allocatable string arrays.
- Callback arguments may now be arbitrary callable objects. Win32 installers
- for F2PY and Scipy_core are provided.
- `Differences with the previous release (version 2.37.235-1660)`__.
-
- __ http://cens.ioc.ee/cgi-bin/cvsweb/python/f2py2e/docs/HISTORY.txt.diff?r1=1.98&r2=1.87&f=h
-
- March 9, 2004
- F2PY bug fix release (version 2.39.235-1660).
- `Differences with the previous release (version 2.37.235-1644)`__.
-
- __ http://cens.ioc.ee/cgi-bin/cvsweb/python/f2py2e/docs/HISTORY.txt.diff?r1=1.87&r2=1.83&f=h
-
- February 24, 2004
- Latest F2PY release (version 2.39.235-1644).
- Support for numpy_distutils 0.2.2 and up (e.g. compiler flags can be
- changed via f2py command line options). Implemented support for
- character arrays and arrays of strings (e.g. ``character*(*) a(m,..)``).
- *Important bug fixes regarding complex arguments, upgrading is
- highly recommended*. Documentation updates.
- `Differences with the previous release (version 2.37.233-1545)`__.
-
- __ http://cens.ioc.ee/cgi-bin/cvsweb/python/f2py2e/docs/HISTORY.txt.diff?r1=1.83&r2=1.58&f=h
-
- September 11, 2003
- Latest F2PY release (version 2.37.233-1545).
- New statements: ``pymethoddef`` and ``usercode`` in interface blocks.
- New function: ``as_column_major_storage``.
- New CPP macro: ``F2PY_REPORT_ON_ARRAY_COPY``.
- Bug fixes.
- `Differences with the previous release (version 2.35.229-1505)`__.
-
- __ http://cens.ioc.ee/cgi-bin/cvsweb/python/f2py2e/docs/HISTORY.txt.diff?r1=1.58&r2=1.49&f=h
-
- August 2, 2003
- Latest F2PY release (version 2.35.229-1505).
- `Differences with the previous release (version 2.32.225-1419)`__.
-
- __ http://cens.ioc.ee/cgi-bin/cvsweb/python/f2py2e/docs/HISTORY.txt.diff?r1=1.49&r2=1.28&f=h
-
- April 2, 2003
- Initial support for Numarray_ (thanks to Todd Miller).
-
- December 8, 2002
- Sixth public release of F2PY (version 2.32.225-1419). Comes with
- revised `F2PY Users Guide`__, `new testing site`__, lots of fixes
- and other improvements, see `HISTORY.txt`_ for details.
-
- __ usersguide/index.html
- __ TESTING.txt_
-
-.. References
- ==========
-
-.. _HISTORY.txt: HISTORY.html
-.. _Numarray: http://www.stsci.edu/resources/software_hardware/numarray
-.. _TESTING.txt: TESTING.html
diff --git a/doc/f2py/README.txt b/doc/f2py/README.txt
deleted file mode 100644
index 76e1fed97..000000000
--- a/doc/f2py/README.txt
+++ /dev/null
@@ -1,415 +0,0 @@
-.. -*- rest -*-
-
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- F2PY: Fortran to Python interface generator
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-:Author: Pearu Peterson <pearu@cens.ioc.ee>
-:License: NumPy License
-:Web-site: http://cens.ioc.ee/projects/f2py2e/
-:Discussions to: `f2py-users mailing list`_
-:Documentation: `User's Guide`__, FAQ__
-:Platforms: All
-:Date: $Date: 2005/01/30 18:54:53 $
-
-.. _f2py-users mailing list: http://cens.ioc.ee/mailman/listinfo/f2py-users/
-__ usersguide/index.html
-__ FAQ.html
-
-.. Contents::
-
-==============
- Introduction
-==============
-
-The purpose of the F2PY --*Fortran to Python interface generator*--
-project is to provide connection between Python_ and Fortran
-languages. F2PY is a Python extension tool for creating Python C/API
-modules from (handwritten or F2PY generated) signature files (or
-directly from Fortran sources). The generated extension modules
-facilitate:
-
-* Calling Fortran 77/90/95, Fortran 90/95 module, and C functions from
- Python.
-
-* Accessing Fortran 77 ``COMMON`` blocks and Fortran 90/95 module
- data (including allocatable arrays) from Python.
-
-* Calling Python functions from Fortran or C (call-backs).
-
-* Automatically handling the difference in the data storage order of
- multi-dimensional Fortran and Numerical Python (i.e. C) arrays.
-
-In addition, F2PY can build the generated extension modules to shared
-libraries with one command. F2PY uses the ``numpy_distutils`` module
-from SciPy_ that supports number of major Fortran compilers.
-
-..
- (see `COMPILERS.txt`_ for more information).
-
-F2PY generated extension modules depend on NumPy_ package that
-provides fast multi-dimensional array language facility to Python.
-
-
----------------
- Main features
----------------
-
-Here follows a more detailed list of F2PY features:
-
-* F2PY scans real Fortran codes to produce the so-called signature
- files (.pyf files). The signature files contain all the information
- (function names, arguments and their types, etc.) that is needed to
- construct Python bindings to Fortran (or C) functions.
-
- The syntax of signature files is borrowed from the
- Fortran 90/95 language specification and has some F2PY specific
- extensions. The signature files can be modified to dictate how
- Fortran (or C) programs are called from Python:
-
- + F2PY solves dependencies between arguments (this is relevant for
- the order of initializing variables in extension modules).
-
- + Arguments can be specified to be optional or hidden that
- simplifies calling Fortran programs from Python considerably.
-
- + In principle, one can design any Python signature for a given
- Fortran function, e.g. change the order arguments, introduce
- auxiliary arguments, hide the arguments, process the arguments
- before passing to Fortran, return arguments as output of F2PY
- generated functions, etc.
-
-* F2PY automatically generates __doc__ strings (and optionally LaTeX
- documentation) for extension modules.
-
-* F2PY generated functions accept arbitrary (but sensible) Python
- objects as arguments. The F2PY interface automatically takes care of
- type-casting and handling of non-contiguous arrays.
-
-* The following Fortran constructs are recognized by F2PY:
-
- + All basic Fortran types::
-
- integer[ | *1 | *2 | *4 | *8 ], logical[ | *1 | *2 | *4 | *8 ]
- integer*([ -1 | -2 | -4 | -8 ])
- character[ | *(*) | *1 | *2 | *3 | ... ]
- real[ | *4 | *8 | *16 ], double precision
- complex[ | *8 | *16 | *32 ]
-
- Negative ``integer`` kinds are used to wrap unsigned integers.
-
- + Multi-dimensional arrays of all basic types with the following
- dimension specifications::
-
- <dim> | <start>:<end> | * | :
-
- + Attributes and statements::
-
- intent([ in | inout | out | hide | in,out | inout,out | c |
- copy | cache | callback | inplace | aux ])
- dimension(<dimspec>)
- common, parameter
- allocatable
- optional, required, external
- depend([<names>])
- check([<C-booleanexpr>])
- note(<LaTeX text>)
- usercode, callstatement, callprotoargument, threadsafe, fortranname
- pymethoddef
- entry
-
-* Because there are only little (and easily handleable) differences
- between calling C and Fortran functions from F2PY generated
- extension modules, then F2PY is also well suited for wrapping C
- libraries to Python.
-
-* Practice has shown that F2PY generated interfaces (to C or Fortran
- functions) are less error prone and even more efficient than
- handwritten extension modules. The F2PY generated interfaces are
- easy to maintain and any future optimization of F2PY generated
- interfaces transparently apply to extension modules by just
- regenerating them with the latest version of F2PY.
-
-* `F2PY Users Guide and Reference Manual`_
-
-
-===============
- Prerequisites
-===============
-
-F2PY requires the following software installed:
-
-* Python_ (versions 1.5.2 or later; 2.1 and up are recommended).
- You must have python-dev package installed.
-* NumPy_ (versions 13 or later; 20.x, 21.x, 22.x, 23.x are recommended)
-* Numarray_ (version 0.9 and up), optional, partial support.
-* Scipy_distutils (version 0.2.2 and up are recommended) from SciPy_
- project. Get it from Scipy CVS or download it below.
-
-Python 1.x users also need distutils_.
-
-Of course, to build extension modules, you'll need also working C
-and/or Fortran compilers installed.
-
-==========
- Download
-==========
-
-You can download the sources for the latest F2PY and numpy_distutils
-releases as:
-
-* `2.x`__/`F2PY-2-latest.tar.gz`__
-* `2.x`__/`numpy_distutils-latest.tar.gz`__
-
-Windows users might be interested in Win32 installer for F2PY and
-Scipy_distutils (these installers are built using Python 2.3):
-
-* `2.x`__/`F2PY-2-latest.win32.exe`__
-* `2.x`__/`numpy_distutils-latest.win32.exe`__
-
-Older releases are also available in the directories
-`rel-0.x`__, `rel-1.x`__, `rel-2.x`__, `rel-3.x`__, `rel-4.x`__, `rel-5.x`__,
-if you need them.
-
-.. __: 2.x/
-.. __: 2.x/F2PY-2-latest.tar.gz
-.. __: 2.x/
-.. __: 2.x/numpy_distutils-latest.tar.gz
-.. __: 2.x/
-.. __: 2.x/F2PY-2-latest.win32.exe
-.. __: 2.x/
-.. __: 2.x/numpy_distutils-latest.win32.exe
-.. __: rel-0.x
-.. __: rel-1.x
-.. __: rel-2.x
-.. __: rel-3.x
-.. __: rel-4.x
-.. __: rel-5.x
-
-Development version of F2PY from CVS is available as `f2py2e.tar.gz`__.
-
-__ http://cens.ioc.ee/cgi-bin/viewcvs.cgi/python/f2py2e/f2py2e.tar.gz?tarball=1
-
-Debian Sid users can simply install ``python-f2py`` package.
-
-==============
- Installation
-==============
-
-Unpack the source file, change to directrory ``F2PY-?-???/`` and run
-(you may need to become a root)::
-
- python setup.py install
-
-The F2PY installation installs a Python package ``f2py2e`` to your
-Python ``site-packages`` directory and a script ``f2py`` to your
-Python executable path.
-
-See also Installation__ section in `F2PY FAQ`_.
-
-.. __: FAQ.html#installation
-
-Similarly, to install ``numpy_distutils``, unpack its tar-ball and run::
-
- python setup.py install
-
-=======
- Usage
-=======
-
-To check if F2PY is installed correctly, run
-::
-
- f2py
-
-without any arguments. This should print out the usage information of
-the ``f2py`` program.
-
-Next, try out the following three steps:
-
-1) Create a Fortran file `hello.f`__ that contains::
-
- C File hello.f
- subroutine foo (a)
- integer a
- print*, "Hello from Fortran!"
- print*, "a=",a
- end
-
-__ hello.f
-
-2) Run
-
- ::
-
- f2py -c -m hello hello.f
-
- This will build an extension module ``hello.so`` (or ``hello.sl``,
- or ``hello.pyd``, etc. depending on your platform) into the current
- directory.
-
-3) Now in Python try::
-
- >>> import hello
- >>> print hello.__doc__
- >>> print hello.foo.__doc__
- >>> hello.foo(4)
- Hello from Fortran!
- a= 4
- >>>
-
-If the above works, then you can try out more thorough
-`F2PY unit tests`__ and read the `F2PY Users Guide and Reference Manual`_.
-
-__ FAQ.html#q-how-to-test-if-f2py-is-working-correctly
-
-===============
- Documentation
-===============
-
-The documentation of the F2PY project is collected in ``f2py2e/docs/``
-directory. It contains the following documents:
-
-`README.txt`_ (on GitHub__)
- The first thing to read about F2PY -- this document.
-
-__ https://github.com/numpy/numpy/blob/master/numpy/f2py/docs/README.txt
-
-`usersguide/index.txt`_, `usersguide/f2py_usersguide.pdf`_
- F2PY Users Guide and Reference Manual. Contains lots of examples.
-
-`FAQ.txt`_ (on GitHub__)
- F2PY Frequently Asked Questions.
-
-__ https://github.com/numpy/numpy/blob/master/numpy/f2py/docs/FAQ.txt
-
-`TESTING.txt`_ (on GitHub__)
- About F2PY testing site. What tests are available and how to run them.
-
-__ https://github.com/numpy/numpy/blob/master/numpy/f2py/docs/TESTING.txt
-
-`HISTORY.txt`_ (on GitHub__)
- A list of latest changes in F2PY. This is the most up-to-date
- document on F2PY.
-
-__ https://github.com/numpy/numpy/blob/master/numpy/f2py/docs/HISTORY.txt
-
-`THANKS.txt`_
- Acknowledgments.
-
-..
- `COMPILERS.txt`_
- Compiler and platform specific notes.
-
-===============
- Mailing list
-===============
-
-A mailing list f2py-users@cens.ioc.ee is open for F2PY related
-discussion/questions/etc.
-
-* `Subscribe..`__
-* `Archives..`__
-
-__ http://cens.ioc.ee/mailman/listinfo/f2py-users
-__ http://cens.ioc.ee/pipermail/f2py-users
-
-
-=====
- CVS
-=====
-
-F2PY is being developed under CVS_. The CVS version of F2PY can be
-obtained as follows:
-
-1) First you need to login (the password is ``guest``)::
-
- cvs -d :pserver:anonymous@cens.ioc.ee:/home/cvs login
-
-2) and then do the checkout::
-
- cvs -z6 -d :pserver:anonymous@cens.ioc.ee:/home/cvs checkout f2py2e
-
-3) You can update your local F2PY tree ``f2py2e/`` by executing::
-
- cvs -z6 update -P -d
-
-You can browse the `F2PY CVS`_ repository.
-
-===============
- Contributions
-===============
-
-* `A short introduction to F2PY`__ by Pierre Schnizer.
-
-* `F2PY notes`__ by Fernando Perez.
-
-* `Debian packages of F2PY`__ by José Fonseca. [OBSOLETE, Debian Sid
- ships python-f2py package]
-
-__ http://fubphpc.tu-graz.ac.at/~pierre/f2py_tutorial.tar.gz
-__ http://cens.ioc.ee/pipermail/f2py-users/2003-April/000472.html
-__ http://jrfonseca.dyndns.org/debian/
-
-
-===============
- Related sites
-===============
-
-* `Numerical Python`_ -- adds a fast array facility to the Python language.
-* Pyfort_ -- A Python-Fortran connection tool.
-* SciPy_ -- An open source library of scientific tools for Python.
-* `Scientific Python`_ -- A collection of Python modules that are
- useful for scientific computing.
-* `The Fortran Company`_ -- A place to find products, services, and general
- information related to the Fortran programming language.
-* `American National Standard Programming Language FORTRAN ANSI(R) X3.9-1978`__
-* `J3`_ -- The US Fortran standards committee.
-* SWIG_ -- A software development tool that connects programs written
- in C and C++ with a variety of high-level programming languages.
-* `Mathtools.net`_ -- A technical computing portal for all scientific
- and engineering needs.
-
-.. __: http://www.fortran.com/fortran/F77_std/rjcnf.html
-
-.. References
- ==========
-
-
-.. _F2PY Users Guide and Reference Manual: usersguide/index.html
-.. _usersguide/index.txt: usersguide/index.html
-.. _usersguide/f2py_usersguide.pdf: usersguide/f2py_usersguide.pdf
-.. _README.txt: README.html
-.. _COMPILERS.txt: COMPILERS.html
-.. _F2PY FAQ:
-.. _FAQ.txt: FAQ.html
-.. _HISTORY.txt: HISTORY.html
-.. _HISTORY.txt from CVS: http://cens.ioc.ee/cgi-bin/cvsweb/python/f2py2e/docs/HISTORY.txt?rev=HEAD&content-type=text/x-cvsweb-markup
-.. _THANKS.txt: THANKS.html
-.. _TESTING.txt: TESTING.html
-.. _F2PY CVS2: http://cens.ioc.ee/cgi-bin/cvsweb/python/f2py2e/
-.. _F2PY CVS: http://cens.ioc.ee/cgi-bin/viewcvs.cgi/python/f2py2e/
-
-.. _CVS: http://www.cvshome.org/
-.. _Python: http://www.python.org/
-.. _SciPy: http://www.numpy.org/
-.. _NumPy: http://www.numpy.org/
-.. _Numarray: http://www.stsci.edu/resources/software_hardware/numarray
-.. _docutils: http://docutils.sourceforge.net/
-.. _distutils: http://www.python.org/sigs/distutils-sig/
-.. _Numerical Python: http://www.numpy.org/
-.. _Pyfort: http://pyfortran.sourceforge.net/
-.. _Scientific Python:
- http://starship.python.net/crew/hinsen/scientific.html
-.. _The Fortran Company: http://www.fortran.com/fortran/
-.. _J3: http://www.j3-fortran.org/
-.. _Mathtools.net: http://www.mathtools.net/
-.. _SWIG: http://www.swig.org/
-
-..
- Local Variables:
- mode: indented-text
- indent-tabs-mode: nil
- sentence-end-double-space: t
- fill-column: 70
- End:
diff --git a/doc/f2py/Release-1.x.txt b/doc/f2py/Release-1.x.txt
deleted file mode 100644
index 46d6fbf09..000000000
--- a/doc/f2py/Release-1.x.txt
+++ /dev/null
@@ -1,27 +0,0 @@
-
-I am pleased to announce the first public release of f2py 1.116:
-
-Writing Python C/API wrappers for Fortran routines can be a very
-tedious task, especially if a Fortran routine takes more than 20
-arguments but only few of them are relevant for the problems that they
-solve.
-
-The Fortran to Python Interface Generator, or FPIG for short, is a
-command line tool (f2py) for generating Python C/API modules for
-wrapping Fortran 77 routines, accessing common blocks from Python, and
-calling Python functions from Fortran (call-backs).
-
-The tool can be downloaded from
-
- http://cens.ioc.ee/projects/f2py2e/
-
-where you can find also information about f2py features and its User's
-Guide.
-
-f2py is released under the LGPL license.
-
-With regards,
- Pearu Peterson <pearu@ioc.ee>
-
-<P><A HREF="http://cens.ioc.ee/projects/f2py2e/">f2py 1.116</A> - The
-Fortran to Python Interface Generator (25-Jan-00)
diff --git a/doc/f2py/Release-2.x.txt b/doc/f2py/Release-2.x.txt
deleted file mode 100644
index 2085cb1be..000000000
--- a/doc/f2py/Release-2.x.txt
+++ /dev/null
@@ -1,77 +0,0 @@
-
-FPIG - Fortran to Python Interface Generator
-
-I am pleased to announce the second public release of f2py
-(version 2.264):
-
- http://cens.ioc.ee/projects/f2py2e/
-
-f2py is a command line tool for binding Python and Fortran codes. It
-scans Fortran 77/90/95 codes and generates a Python C/API module that
-makes it possible to call Fortran routines from Python. No Fortran or
-C expertise is required for using this tool.
-
-Features include:
-
- *** All basic Fortran types are supported:
- integer[ | *1 | *2 | *4 | *8 ], logical[ | *1 | *2 | *4 | *8 ],
- character[ | *(*) | *1 | *2 | *3 | ... ]
- real[ | *4 | *8 | *16 ], double precision,
- complex[ | *8 | *16 | *32 ]
-
- *** Multi-dimensional arrays of (almost) all basic types.
- Dimension specifications:
- <dim> | <start>:<end> | * | :
-
- *** Supported attributes:
- intent([ in | inout | out | hide | in,out | inout,out ])
- dimension(<dimspec>)
- depend([<names>])
- check([<C-booleanexpr>])
- note(<LaTeX text>)
- optional, required, external
-
- *** Calling Fortran 77/90/95 subroutines and functions. Also
- Fortran 90/95 module routines. Internal initialization of
- optional arguments.
-
- *** Accessing COMMON blocks from Python. Accessing Fortran 90/95
- module data coming soon.
-
- *** Call-back functions: calling Python functions from Fortran with
- very flexible hooks.
-
- *** In Python, arguments of the interfaced functions may be of
- different type - necessary type conversations are done
- internally in C level.
-
- *** Automatically generates documentation (__doc__,LaTeX) for
- interface functions.
-
- *** Automatically generates signature files --- user has full
- control over the interface constructions. Automatically
- detects the signatures of call-back functions, solves argument
- dependencies, etc.
-
- *** Automatically generates Makefile for compiling Fortran and C
- codes and linking them to a shared module. Many compilers are
- supported: gcc, Compaq Fortran, VAST/f90 Fortran, Absoft
- F77/F90, MIPSpro 7 Compilers, etc. Platforms: Intel/Alpha
- Linux, HP-UX, IRIX64.
-
- *** Complete User's Guide in various formats (html,ps,pdf,dvi).
-
- *** f2py users list is available for support, feedback, etc.
-
-More information about f2py, see
-
- http://cens.ioc.ee/projects/f2py2e/
-
-f2py is released under the LGPL license.
-
-Sincerely,
- Pearu Peterson <pearu@ioc.ee>
- September 12, 2000
-
-<P><A HREF="http://cens.ioc.ee/projects/f2py2e/">f2py 2.264</A> - The
-Fortran to Python Interface Generator (12-Sep-00)
diff --git a/doc/f2py/Release-3.x.txt b/doc/f2py/Release-3.x.txt
deleted file mode 100644
index ddb93b9fd..000000000
--- a/doc/f2py/Release-3.x.txt
+++ /dev/null
@@ -1,87 +0,0 @@
-
-F2PY - Fortran to Python Interface Generator
-
-I am pleased to announce the third public release of f2py
-(version 2.3.321):
-
- http://cens.ioc.ee/projects/f2py2e/
-
-f2py is a command line tool for binding Python and Fortran codes. It
-scans Fortran 77/90/95 codes and generates a Python C/API module that
-makes it possible to call Fortran subroutines from Python. No Fortran or
-C expertise is required for using this tool.
-
-Features include:
-
- *** All basic Fortran types are supported:
- integer[ | *1 | *2 | *4 | *8 ], logical[ | *1 | *2 | *4 | *8 ],
- character[ | *(*) | *1 | *2 | *3 | ... ]
- real[ | *4 | *8 | *16 ], double precision,
- complex[ | *8 | *16 | *32 ]
-
- *** Multi-dimensional arrays of (almost) all basic types.
- Dimension specifications:
- <dim> | <start>:<end> | * | :
-
- *** Supported attributes and statements:
- intent([ in | inout | out | hide | in,out | inout,out ])
- dimension(<dimspec>)
- depend([<names>])
- check([<C-booleanexpr>])
- note(<LaTeX text>)
- optional, required, external
-NEW: intent(c), threadsafe, fortranname
-
- *** Calling Fortran 77/90/95 subroutines and functions. Also
- Fortran 90/95 module subroutines are supported. Internal
- initialization of optional arguments.
-
- *** Accessing COMMON blocks from Python.
-NEW: Accessing Fortran 90/95 module data.
-
- *** Call-back functions: calling Python functions from Fortran with
- very flexible hooks.
-
- *** In Python, arguments of the interfaced functions may be of
- different type - necessary type conversations are done
- internally in C level.
-
- *** Automatically generates documentation (__doc__,LaTeX) for
- interfaced functions.
-
- *** Automatically generates signature files --- user has full
- control over the interface constructions. Automatically
- detects the signatures of call-back functions, solves argument
- dependencies, etc.
-
-NEW: * Automatically generates setup_<modulename>.py for building
- extension modules using tools from distutils and
- fortran_support module (SciPy).
-
- *** Automatically generates Makefile for compiling Fortran and C
- codes and linking them to a shared module. Many compilers are
- supported: gcc, Compaq Fortran, VAST/f90 Fortran, Absoft
- F77/F90, MIPSpro 7 Compilers, etc. Platforms: Intel/Alpha
- Linux, HP-UX, IRIX64.
-
- *** Complete User's Guide in various formats (html,ps,pdf,dvi).
-
- *** f2py users list is available for support, feedback, etc.
-
-NEW: * Installation with distutils.
-
- *** And finally, many bugs are fixed.
-
-More information about f2py, see
-
- http://cens.ioc.ee/projects/f2py2e/
-
-LICENSE:
- f2py is released under the LGPL.
-
-Sincerely,
- Pearu Peterson <pearu@cens.ioc.ee>
- December 4, 2001
-
-<P><A HREF="http://cens.ioc.ee/projects/f2py2e/">f2py 2.3.321</A> - The
-Fortran to Python Interface Generator (04-Dec-01)
diff --git a/doc/f2py/Release-4.x.txt b/doc/f2py/Release-4.x.txt
deleted file mode 100644
index d490dcb7a..000000000
--- a/doc/f2py/Release-4.x.txt
+++ /dev/null
@@ -1,91 +0,0 @@
-
-F2PY - Fortran to Python Interface Generator
-
-I am pleased to announce the fourth public release of f2py
-(version 2.4.366):
-
- http://cens.ioc.ee/projects/f2py2e/
-
-f2py is a command line tool for binding Python and Fortran codes. It
-scans Fortran 77/90/95 codes and generates a Python C/API module that
-makes it possible to call Fortran subroutines from Python. No Fortran or
-C expertise is required for using this tool.
-
-New features:
- *** Win32 support.
- *** Better Python C/API generated code (-Wall is much less verbose).
-
-Features include:
-
- *** All basic Fortran types are supported:
- integer[ | *1 | *2 | *4 | *8 ], logical[ | *1 | *2 | *4 | *8 ],
- character[ | *(*) | *1 | *2 | *3 | ... ]
- real[ | *4 | *8 | *16 ], double precision,
- complex[ | *8 | *16 | *32 ]
-
- *** Multi-dimensional arrays of (almost) all basic types.
- Dimension specifications:
- <dim> | <start>:<end> | * | :
-
- *** Supported attributes and statements:
- intent([ in | inout | out | hide | in,out | inout,out ])
- dimension(<dimspec>)
- depend([<names>])
- check([<C-booleanexpr>])
- note(<LaTeX text>)
- optional, required, external
- intent(c), threadsafe, fortranname
-
- *** Calling Fortran 77/90/95 subroutines and functions. Also
- Fortran 90/95 module subroutines are supported. Internal
- initialization of optional arguments.
-
- *** Accessing COMMON blocks from Python.
- Accessing Fortran 90/95 module data.
-
- *** Call-back functions: calling Python functions from Fortran with
- very flexible hooks.
-
- *** In Python, arguments of the interfaced functions may be of
- different type - necessary type conversations are done
- internally in C level.
-
- *** Automatically generates documentation (__doc__,LaTeX) for
- interfaced functions.
-
- *** Automatically generates signature files --- user has full
- control over the interface constructions. Automatically
- detects the signatures of call-back functions, solves argument
- dependencies, etc.
-
- *** Automatically generates setup_<modulename>.py for building
- extension modules using tools from distutils and
- fortran_support module (SciPy).
-
- *** Automatically generates Makefile for compiling Fortran and C
- codes and linking them to a shared module. Many compilers are
- supported: gcc, Compaq Fortran, VAST/f90 Fortran, Absoft
- F77/F90, MIPSpro 7 Compilers, etc. Platforms: Intel/Alpha
- Linux, HP-UX, IRIX64.
-
- *** Complete User's Guide in various formats (html,ps,pdf,dvi).
-
- *** f2py users list is available for support, feedback, etc.
-
- *** Installation with distutils.
-
- *** And finally, many bugs are fixed.
-
-More information about f2py, see
-
- http://cens.ioc.ee/projects/f2py2e/
-
-LICENSE:
- f2py is released under the LGPL.
-
-Sincerely,
- Pearu Peterson <pearu@cens.ioc.ee>
- December 17, 2001
-
-<P><A HREF="http://cens.ioc.ee/projects/f2py2e/">f2py 2.4.366</A> - The
-Fortran to Python Interface Generator (17-Dec-01)
diff --git a/doc/f2py/TESTING.txt b/doc/f2py/TESTING.txt
deleted file mode 100644
index 00817e48f..000000000
--- a/doc/f2py/TESTING.txt
+++ /dev/null
@@ -1,108 +0,0 @@
-
-=======================================================
- F2PY unit testing site
-=======================================================
-
-.. Contents::
-
-Tests
------
-
-* To run all F2PY unit tests in one command::
-
- cd tests
- python run_all.py [<options>]
-
- For example::
-
- localhost:~/src_cvs/f2py2e/tests$ python2.2 run_all.py 100 --quiet
- **********************************************
- Running '/usr/bin/python2.2 f77/return_integer.py 100 --quiet'
- run 1000 tests in 1.87 seconds
- initial virtual memory size: 3952640 bytes
- current virtual memory size: 3952640 bytes
- ok
- **********************************************
- Running '/usr/bin/python2.2 f77/return_logical.py 100 --quiet'
- run 1000 tests in 1.47 seconds
- initial virtual memory size: 3952640 bytes
- current virtual memory size: 3952640 bytes
- ok
- ...
-
- If some tests fail, try to run the failing tests separately (without
- the ``--quiet`` option) as described below to get more information
- about the failure.
-
-* Test intent(in), intent(out) scalar arguments,
- scalars returned by F77 functions
- and F90 module functions::
-
- tests/f77/return_integer.py
- tests/f77/return_real.py
- tests/f77/return_logical.py
- tests/f77/return_complex.py
- tests/f77/return_character.py
- tests/f90/return_integer.py
- tests/f90/return_real.py
- tests/f90/return_logical.py
- tests/f90/return_complex.py
- tests/f90/return_character.py
-
- Change to tests/ directory and run::
-
- python f77/return_<type>.py [<options>]
- python f90/return_<type>.py [<options>]
-
- where ``<type>`` is integer, real, logical, complex, or character.
- Test scripts options are described below.
-
- A test is considered successful if the last printed line is "ok".
-
- If you get import errors like::
-
- ImportError: No module named f77_ext_return_integer
-
- but ``f77_ext_return_integer.so`` exists in the current directory then
- it means that the current directory is not included in to `sys.path`
- in your Python installation. As a fix, prepend ``.`` to ``PYTHONPATH``
- environment variable and rerun the tests. For example::
-
- PYTHONPATH=. python f77/return_integer.py
-
-* Test mixing Fortran 77, Fortran 90 fixed and free format codes::
-
- tests/mixed/run.py
-
-* Test basic callback hooks::
-
- tests/f77/callback.py
-
-Options
--------
-
-You may want to use the following options when running the test
-scripts:
-
-``<integer>``
- Run tests ``<integer>`` times. Useful for detecting memory leaks. Under
- Linux tests scripts output virtual memory size state of the process
- before and after calling the wrapped functions.
-
-``--quiet``
- Suppress all messages. On success only "ok" should be displayed.
-
-``--fcompiler=<Gnu|Intel|...>``
- Use::
-
- f2py -c --help-fcompiler
-
- to find out what compilers are available (or more precisely, which
- ones are recognized by ``numpy_distutils``).
-
-Reporting failures
-------------------
-
-XXX: (1) make sure that failures are due to f2py and (2) send full
-stdout/stderr messages to me. Also add compiler,python,platform
-information.
diff --git a/doc/f2py/THANKS.txt b/doc/f2py/THANKS.txt
deleted file mode 100644
index 636540687..000000000
--- a/doc/f2py/THANKS.txt
+++ /dev/null
@@ -1,63 +0,0 @@
-
-=================
- Acknowledgments
-=================
-
-F2PY__ is an open source Python package and command line tool developed and
-maintained by Pearu Peterson (me__).
-
-.. __: http://cens.ioc.ee/projects/f2py2e/
-.. __: http://cens.ioc.ee/~pearu/
-
-Many people have contributed to the F2PY project in terms of interest,
-encouragement, suggestions, criticism, bug reports, code
-contributions, and keeping me busy with developing F2PY. For all that
-I thank
-
- James Amundson, John Barnard, David Beazley, Frank Bertoldi, Roman
- Bertle, James Boyle, Moritz Braun, Rolv Erlend Bredesen, John
- Chaffer, Fred Clare, Adam Collard, Ben Cornett, Jose L Gomez Dans,
- Jaime D. Perea Duarte, Paul F Dubois, Thilo Ernst, Bonilla Fabian,
- Martin Gelfand, Eduardo A. Gonzalez, Siegfried Gonzi, Bernhard
- Gschaider, Charles Doutriaux, Jeff Hagelberg, Janko Hauser, Thomas
- Hauser, Heiko Henkelmann, William Henney, Yueqiang Huang, Asim
- Hussain, Berthold Höllmann, Vladimir Janku, Henk Jansen, Curtis
- Jensen, Eric Jones, Tiffany Kamm, Andrey Khavryuchenko, Greg
- Kochanski, Jochen Küpper, Simon Lacoste-Julien, Tim Lahey, Hans
- Petter Langtangen, Jeff Layton, Matthew Lewis, Patrick LeGresley,
- Joaquim R R A Martins, Paul Magwene Lionel Maziere, Craig McNeile,
- Todd Miller, David C. Morrill, Dirk Muders, Kevin Mueller, Andrew
- Mullhaupt, Vijayendra Munikoti, Travis Oliphant, Kevin O'Mara, Arno
- Paehler, Fernando Perez, Didrik Pinte, Todd Alan Pitts, Prabhu
- Ramachandran, Brad Reisfeld, Steve M. Robbins, Theresa Robinson,
- Pedro Rodrigues, Les Schaffer, Christoph Scheurer, Herb Schilling,
- Pierre Schnizer, Kevin Smith, Paulo Teotonio Sobrinho, José Rui
- Faustino de Sousa, Andrew Swan, Dustin Tang, Charlie Taylor, Paul le
- Texier, Michael Tiller, Semen Trygubenko, Ravi C Venkatesan, Peter
- Verveer, Nils Wagner, R. Clint Whaley, Erik Wilsher, Martin
- Wiechert, Gilles Zerah, SungPil Yoon.
-
-(This list may not be complete. Please forgive me if I have left you
-out and let me know, I'll add your name.)
-
-Special thanks are due to ...
-
-Eric Jones - he and Travis O. are responsible for starting the
-numpy_distutils project that allowed to move most of the platform and
-compiler specific codes out from F2PY. This simplified maintaining the
-F2PY project considerably.
-
-Joaquim R R A Martins - he made possible for me to test F2PY on IRIX64
-platform. He also presented our paper about F2PY in the 9th Python
-Conference that I planned to attend but had to cancel in very last
-minutes.
-
-Travis Oliphant - his knowledge and experience on Numerical Python
-C/API has been invaluable in early development of the F2PY program.
-His major contributions are call-back mechanism and copying N-D arrays
-of arbitrary types.
-
-Todd Miller - he is responsible for Numarray support in F2PY.
-
-Thanks!
- Pearu
diff --git a/doc/f2py/TODO.txt b/doc/f2py/TODO.txt
deleted file mode 100644
index a883f75d0..000000000
--- a/doc/f2py/TODO.txt
+++ /dev/null
@@ -1,67 +0,0 @@
-Determine fixed/free format Fortran 90 dialect from the
-contents of Fortran files. See numpy_distutils/command/build_flib.py.
-
-[DONE]
-========================================================================
-Wrapping F90 code as follows:
-
-subroutine foo
-print*,"In foo"
-end subroutine foo
-subroutine bar(func)
- interface aa ! bug: this interface block is ignored
- subroutine foo
- end subroutine foo
- end interface
- !external foo
- external func
- call func(foo)
-end subroutine bar
-subroutine gun(a)
- external a
- call a()
-end subroutine gun
-subroutine fun
- call bar(gun)
-end subroutine fun
-
-=========================================================================
-Users Guide needs major revision.
-
-[DONE]
-=========================================================================
-On Thu, 27 Sep 2001, José Luis Gómez Dans wrote:
-
-> Hi,
-> just one question: does f2py supporte derived types in F90 code?
-> Stuff like something%or and things like that.
-
-Not yet.
-
-=========================================================================
-Date: Tue, 28 Aug 2001 22:23:04 -0700
-From: Patrick LeGresley <plegresl@ape.stanford.edu>
-To: f2py-users@cens.ioc.ee
-Subject: [f2py] Strange initialization of allocatable arrays
-
-I've noticed an odd behavior when setting an allocatable, multidimensional
-array in a module. If the rank of the array is odd, the initialization is
-fine. However, if the rank is even only the first element of the array is
-set properly. See the attached sample code for example.
-
-=========================================================================
-On Wed, 22 Aug 2001, Patrick LeGresley wrote:
-
-> I've noticed that if a parameter is defined in terms of another parameter,
-> that the parameter is replaced not by a number but by another parameter
-> (try the attached subroutine for example). Is there any way to have f2py
-> automatically recognize the dependencies and generate a signature file
-> without parameter variables ?
-
-It is certainly possible. In fact, f2py has only a basic support for
-PARAMETER statements and it fails in your 'advanced' example to produce a
-robust signature file.
-I am sorry but you have to wait until I'll get back from my travel tour
-(somewhere in the middle of September) and get a chance to work on it.
-
-[DONE]
diff --git a/doc/f2py/apps.tex b/doc/f2py/apps.tex
deleted file mode 100644
index bd88d09bf..000000000
--- a/doc/f2py/apps.tex
+++ /dev/null
@@ -1,71 +0,0 @@
-
-\section{Applications}
-\label{sec:apps}
-
-
-\subsection{Example: wrapping C library \texttt{fftw}}
-\label{sec:wrapfftw}
-
-Here follows a simple example how to use \fpy to generate a wrapper
-for C functions. Let us create a FFT code using the functions in FFTW
-library. I'll assume that the library \texttt{fftw} is configured with
-\texttt{-{}-enable-shared} option.
-
-Here is the wrapper for the typical usage of FFTW:
-\begin{verbatim}
-/* File: wrap_dfftw.c */
-#include <dfftw.h>
-
-extern void dfftw_one(fftw_complex *in,fftw_complex *out,int *n) {
- fftw_plan p;
- p = fftw_create_plan(*n,FFTW_FORWARD,FFTW_ESTIMATE);
- fftw_one(p,in,out);
- fftw_destroy_plan(p);
-}
-\end{verbatim}
-and here follows the corresponding signature file (created manually):
-\begin{verbatim}
-!%f90
-! File: fftw.f90
-module fftw
- interface
- subroutine dfftw_one(in,out,n)
- integer n
- complex*16 in(n),out(n)
- intent(out) out
- intent(hide) n
- end subroutine dfftw_one
- end interface
-end module fftw
-\end{verbatim}
-
-Now let us generate the Python C/API module with \fpy:
-\begin{verbatim}
-f2py fftw.f90
-\end{verbatim}
-and compile it
-\begin{verbatim}
-gcc -shared -I/numeric/include -I`f2py -I` -L/numeric/lib -ldfftw \
- -o fftwmodule.so -DNO_APPEND_FORTRAN fftwmodule.c wrap_dfftw.c
-\end{verbatim}
-
-In Python:
-\begin{verbatim}
->>> from Numeric import *
->>> from fftw import *
->>> print dfftw_one.__doc__
-Function signature:
- out = dfftw_one(in)
-Required arguments:
- in : input rank-1 array('D') with bounds (n)
-Return objects:
- out : rank-1 array('D') with bounds (n)
->>> print dfftw_one([1,2,3,4])
-[ 10.+0.j -2.+2.j -2.+0.j -2.-2.j]
->>>
-\end{verbatim}
-
-%%% Local Variables:
-%%% mode: latex
-%%% TeX-master: "f2py2e"
-%%% End:
diff --git a/doc/f2py/bugs.tex b/doc/f2py/bugs.tex
deleted file mode 100644
index bbfce0f9a..000000000
--- a/doc/f2py/bugs.tex
+++ /dev/null
@@ -1,109 +0,0 @@
-
-\section{Bugs, Plans, and Feedback}
-\label{sec:bugs}
-
-Currently no bugs have found that I was not able to fix. I will be
-happy to receive bug reports from you (so that I could fix them and
-keep the first sentence of this paragraph as true as possible ;-).
-Note that \fpy is developed to work properly with gcc/g77
-compilers.
-\begin{description}
-\item[NOTE:] Wrapping callback functions returning \texttt{COMPLEX}
- may fail on some systems. Workaround: avoid it by using callback
- subroutines.
-\end{description}
-
-Here follows a list of things that I plan to implement in (near) future:
-\begin{enumerate}
-\item recognize file types by their extension (signatures:
- \texttt{*.pyf}, Fortran 77, Fortran 90 fixed: \texttt{*.f, *.for, *.F, *.FOR},
- Fortran 90 free: \texttt{*.F90, *.f90, *.m, *.f95, *.F95}); [DONE]
-\item installation using \texttt{distutils} (when it will be stable);
-\item put out to the web examples of \fpy usages in real situations:
- wrapping \texttt{vode}, for example;
-\item implement support for \texttt{PARAMETER} statement; [DONE]
-\item rewrite test-site;
-\item ...
-\end{enumerate}
-and here are things that I plan to do in future:
-\begin{enumerate}
-\item implement \texttt{intent(cache)} attribute for an optional work
- arrays with a feature of allocating additional memory if needed;
-\item use \fpy for wrapping Fortran 90/95 codes. \fpy should scan
- Fortran 90/95 codes with no problems, what needs to be done is find
- out how to call a Fortran 90/95 function (from a module) from
- C. Anybody there willing to test \fpy with Fortran 90/95 modules? [DONE]
-\item implement support for Fortran 90/95 module data; [DONE]
-\item implement support for \texttt{BLOCK DATA} blocks (if needed);
-\item test/document \fpy for \texttt{CHARACTER} arrays;
-\item decide whether internal transposition of multi-dimensional
- arrays is reasonable (need efficient code then), even if this is
- controlled by the user trough some additional keyword; need
- consistent and safe policy here;
-\item use \fpy for generating wrapper functions also for C programs (a
- kind of SWIG, only between Python and C). For that \fpy needs a
- command line switch to inform itself that C scalars are passed in by
- their value, not by their reference, for instance;
-\item introduce a counter that counts the number of inefficient usages
- of wrapper functions (copying caused by type-casting, non-contiguous
- arrays);
-\item if needed, make \texttt{DATA} statement to work properly for
- arrays;
-\item rewrite \texttt{COMMON} wrapper; [DONE]
-\item ...
-\end{enumerate}
-I'll appreciate any feedback that will improve \fpy (bug reports,
-suggestions, etc). If you find a correct Fortran code that fails with
-\fpy, try to send me a minimal version of it so that I could track
-down the cause of the failure. Note also that there is no sense to
-send me files that are auto-generated with \fpy (I can generate them
-myself); the version of \fpy that you are using (run \texttt{\fpy\
- -v}), and the relevant fortran codes or modified signature files
-should be enough information to fix the bugs. Also add some
-information on compilers and linkers that you use to the bug report.
-
-
-\section{History of \fpy}
-\label{sec:history}
-
-\begin{enumerate}
-\item I was driven to start developing a tool such as \fpy after I had
- wrote several Python C/API modules for interfacing various Fortran
- routines from the Netlib. This work was tedious (some of functions
- had more than 20 arguments, only few of them made sense for the
- problems that they solved). I realized that most of the writing
- could be done automatically.
-\item On 9th of July, 1999, the first lines of the tool was written. A
- prototype of the tool was ready to use in only three weeks. During
- this time Travis Oliphant joined to the project and shared his
- valuable knowledge and experience; the call-back mechanism is his
- major contribution. Then I gave the tool to public under the name
- FPIG --- \emph{Fortran to Python Interface Generator}. The tool contained
- only one file \texttt{f2py.py}.
-\item By autumn, it was clear that a better implementation was needed
- as the debugging process became very tedious. So, I reserved some
- time and rewrote the tool from scratch. The most important result of
- this rewriting was the code that reads real Fortran codes and
- determines the signatures of the Fortran routines. The main
- attention was paid in particular to this part so that the tool
- could read arbitrary Fortran~77/90/95 codes. As a result, the other
- side of the tools task, that is, generating Python C/API functions,
- was not so great. In public, this version of the tool was called
- \texttt{f2py2e} --- \emph{Fortran to Python C/API generator, the
- Second Edition}.
-\item So, a month before The New Year 2000, I started the third
- iteration of the \fpy development. Now the main attention was to
- have a good C/API module constructing code. By 21st of January,
- 2000, the tool of generating wrapper functions for Fortran routines
- was ready. It had many new features and was more robust than ever.
-\item In 25th of January, 2000, the first public release of \fpy was
- announced (version 1.116).
-\item In 12th of September, 2000, the second public release of \fpy was
- announced (version 2.264). It now has among other changes a support
- for Fortran 90/95 module routines.
-\end{enumerate}
-
-%%% Local Variables:
-%%% mode: latex
-%%% TeX-master: "f2py2e"
-%%% End:
diff --git a/doc/f2py/collectinput.py b/doc/f2py/collectinput.py
deleted file mode 100755
index 818c759f4..000000000
--- a/doc/f2py/collectinput.py
+++ /dev/null
@@ -1,83 +0,0 @@
-#!/usr/bin/env python
-r"""
-collectinput - Collects all files that are included to a main Latex document
- with \input or \include commands. These commands must be
- in separate lines.
-
-Copyright 1999 Pearu Peterson all rights reserved,
-Pearu Peterson <pearu@ioc.ee>
-Permission to use, modify, and distribute this software is given under the
-terms of the NumPy License
-
-NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
-
-Pearu Peterson
-
-Usage:
- collectinput <infile> <outfile>
- collectinput <infile> # <outfile>=inputless_<infile>
- collectinput # in and out are stdin and stdout
-
-"""
-from __future__ import division, absolute_import, print_function
-
-__version__ = "0.0"
-
-stdoutflag=0
-import sys
-import fileinput
-import re
-
-if sys.version_info[0] >= 3:
- from subprocess import getoutput
-else:
- from commands import getoutput
-
-try: fn=sys.argv[2]
-except:
- try: fn='inputless_'+sys.argv[1]
- except: stdoutflag=1
-try: fi=sys.argv[1]
-except: fi=()
-if not stdoutflag:
- sys.stdout=open(fn, 'w')
-
-nonverb=r'[\w\s\\&=\^\*\.\{\(\)\[\?\+\$/]*(?!\\verb.)'
-input=re.compile(nonverb+r'\\(input|include)\*?\s*\{?.*}?')
-comment=re.compile(r'[^%]*%')
-
-for l in fileinput.input(fi):
- l=l[:-1]
- l1=''
- if comment.match(l):
- m=comment.match(l)
- l1=l[m.end()-1:]
- l=l[:m.end()-1]
- m=input.match(l)
- if m:
- l=l.strip()
- if l[-1]=='}': l=l[:-1]
- i=m.end()-2
- sys.stderr.write('>>>>>>')
- while i>-1 and (l[i] not in [' ', '{']): i=i-1
- if i>-1:
- fn=l[i+1:]
- try: f=open(fn, 'r'); flag=1; f.close()
- except:
- try: f=open(fn+'.tex', 'r'); flag=1;fn=fn+'.tex'; f.close()
- except: flag=0
- if flag==0:
- sys.stderr.write('Could not open a file: '+fn+'\n')
- print(l+l1)
- continue
- elif flag==1:
- sys.stderr.write(fn+'\n')
- print('%%%%% Begin of '+fn)
- print(getoutput(sys.argv[0]+' < '+fn))
- print('%%%%% End of '+fn)
- else:
- sys.stderr.write('Could not extract a file name from: '+l)
- print(l+l1)
- else:
- print(l+l1)
-sys.stdout.close()
diff --git a/doc/f2py/commands.tex b/doc/f2py/commands.tex
deleted file mode 100644
index 5101a9ff5..000000000
--- a/doc/f2py/commands.tex
+++ /dev/null
@@ -1,20 +0,0 @@
-\usepackage{xspace}
-\usepackage{verbatim}
-
-%%tth:\newcommand{\xspace}{ }
-
-\newcommand{\fpy}{\texttt{f2py}\xspace}
-
-\newcommand{\bs}{\symbol{`\\}}
-% need bs here:
-%%tth:\newcommand{\bs}{\texttt{<backslash>}}
-
-\newcommand{\shell}[1]{\hspace*{1em}\texttt{sh> \begin{minipage}[t]{0.8\textwidth}#1\end{minipage}}}
-
-
-%%% Local Variables:
-%%% mode: latex
-%%% TeX-master: "f2py2e"
-%%% End:
-
-
diff --git a/doc/f2py/default.css b/doc/f2py/default.css
deleted file mode 100644
index 9289e2826..000000000
--- a/doc/f2py/default.css
+++ /dev/null
@@ -1,180 +0,0 @@
-/*
-:Author: David Goodger
-:Contact: goodger@users.sourceforge.net
-:date: $Date: 2002/08/01 20:52:44 $
-:version: $Revision: 1.1 $
-:copyright: This stylesheet has been placed in the public domain.
-
-Default cascading style sheet for the HTML output of Docutils.
-*/
-
-body {
- background: #FFFFFF ;
- color: #000000
-}
-
-a.footnote-reference {
- font-size: smaller ;
- vertical-align: super }
-
-a.target {
- color: blue }
-
-a.toc-backref {
- text-decoration: none ;
- color: black }
-
-dd {
- margin-bottom: 0.5em }
-
-div.abstract {
- margin: 2em 5em }
-
-div.abstract p.topic-title {
- font-weight: bold ;
- text-align: center }
-
-div.attention, div.caution, div.danger, div.error, div.hint,
-div.important, div.note, div.tip, div.warning {
- margin: 2em ;
- border: medium outset ;
- padding: 1em }
-
-div.attention p.admonition-title, div.caution p.admonition-title,
-div.danger p.admonition-title, div.error p.admonition-title,
-div.warning p.admonition-title {
- color: red ;
- font-weight: bold ;
- font-family: sans-serif }
-
-div.hint p.admonition-title, div.important p.admonition-title,
-div.note p.admonition-title, div.tip p.admonition-title {
- font-weight: bold ;
- font-family: sans-serif }
-
-div.dedication {
- margin: 2em 5em ;
- text-align: center ;
- font-style: italic }
-
-div.dedication p.topic-title {
- font-weight: bold ;
- font-style: normal }
-
-div.figure {
- margin-left: 2em }
-
-div.footer, div.header {
- font-size: smaller }
-
-div.system-messages {
- margin: 5em }
-
-div.system-messages h1 {
- color: red }
-
-div.system-message {
- border: medium outset ;
- padding: 1em }
-
-div.system-message p.system-message-title {
- color: red ;
- font-weight: bold }
-
-div.topic {
- margin: 2em }
-
-h1.title {
- text-align: center }
-
-h2.subtitle {
- text-align: center }
-
-hr {
- width: 75% }
-
-ol.simple, ul.simple {
- margin-bottom: 1em }
-
-ol.arabic {
- list-style: decimal }
-
-ol.loweralpha {
- list-style: lower-alpha }
-
-ol.upperalpha {
- list-style: upper-alpha }
-
-ol.lowerroman {
- list-style: lower-roman }
-
-ol.upperroman {
- list-style: upper-roman }
-
-p.caption {
- font-style: italic }
-
-p.credits {
- font-style: italic ;
- font-size: smaller }
-
-p.first {
- margin-top: 0 }
-
-p.label {
- white-space: nowrap }
-
-p.topic-title {
- font-weight: bold }
-
-pre.literal-block, pre.doctest-block {
- margin-left: 2em ;
- margin-right: 2em ;
- background-color: #eeeeee }
-
-span.classifier {
- font-family: sans-serif ;
- font-style: oblique }
-
-span.classifier-delimiter {
- font-family: sans-serif ;
- font-weight: bold }
-
-span.field-argument {
- font-style: italic }
-
-span.interpreted {
- font-family: sans-serif }
-
-span.option-argument {
- font-style: italic }
-
-span.problematic {
- color: red }
-
-table {
- margin-top: 0.5em ;
- margin-bottom: 0.5em }
-
-table.citation {
- border-left: solid thin gray ;
- padding-left: 0.5ex }
-
-table.docinfo {
- margin: 2em 4em }
-
-table.footnote {
- border-left: solid thin black ;
- padding-left: 0.5ex }
-
-td, th {
- padding-left: 0.5em ;
- padding-right: 0.5em ;
- vertical-align: baseline }
-
-td.docinfo-name {
- font-weight: bold ;
- text-align: right }
-
-td.field-name {
- font-weight: bold }
diff --git a/doc/f2py/docutils.conf b/doc/f2py/docutils.conf
deleted file mode 100644
index 4e5a8425b..000000000
--- a/doc/f2py/docutils.conf
+++ /dev/null
@@ -1,16 +0,0 @@
-[general]
-
-# These entries affect all processing:
-#source-link: 1
-datestamp: %Y-%m-%d %H:%M UTC
-generator: 1
-
-# These entries affect HTML output:
-#stylesheet-path: pearu_style.css
-output-encoding: latin-1
-
-# These entries affect reStructuredText-style PEPs:
-#pep-template: pep-html-template
-#pep-stylesheet-path: stylesheets/pep.css
-#python-home: http://www.python.org
-#no-random: 1
diff --git a/doc/f2py/ex1/arr.f b/doc/f2py/ex1/arr.f
deleted file mode 100644
index c4e49988f..000000000
--- a/doc/f2py/ex1/arr.f
+++ /dev/null
@@ -1,4 +0,0 @@
- subroutine arr(l,m,n,a)
- integer l,m,n
- real*8 a(l,m,n)
- end
diff --git a/doc/f2py/ex1/bar.f b/doc/f2py/ex1/bar.f
deleted file mode 100644
index c723b5af1..000000000
--- a/doc/f2py/ex1/bar.f
+++ /dev/null
@@ -1,4 +0,0 @@
- function bar(a,b)
- integer a,b,bar
- bar = a + b
- end
diff --git a/doc/f2py/ex1/foo.f b/doc/f2py/ex1/foo.f
deleted file mode 100644
index cdcac4103..000000000
--- a/doc/f2py/ex1/foo.f
+++ /dev/null
@@ -1,5 +0,0 @@
- subroutine foo(a)
- integer a
-cf2py intent(in,out) :: a
- a = a + 5
- end
diff --git a/doc/f2py/ex1/foobar-smart.f90 b/doc/f2py/ex1/foobar-smart.f90
deleted file mode 100644
index 61385a685..000000000
--- a/doc/f2py/ex1/foobar-smart.f90
+++ /dev/null
@@ -1,24 +0,0 @@
-!%f90
-module foobar ! in
- note(This module contains two examples that are used in &
- \texttt{f2py} documentation.) foobar
- interface ! in :foobar
- subroutine foo(a) ! in :foobar:foo.f
- note(Example of a wrapper function of a Fortran subroutine.) foo
- integer intent(inout),&
- note(5 is added to the variable {{}\verb@a@{}} ``in place''.) :: a
- end subroutine foo
- function bar(a,b) result (ab) ! in :foobar:bar.f
- integer :: a
- integer :: b
- integer :: ab
- note(The first value.) a
- note(The second value.) b
- note(Add two values.) bar
- note(The result.) ab
- end function bar
- end interface
-end module foobar
-
-! This file was auto-generated with f2py (version:0.95).
-! See http://cens.ioc.ee/projects/f2py2e/
diff --git a/doc/f2py/ex1/foobar.f90 b/doc/f2py/ex1/foobar.f90
deleted file mode 100644
index 53ac5b506..000000000
--- a/doc/f2py/ex1/foobar.f90
+++ /dev/null
@@ -1,16 +0,0 @@
-!%f90
-module foobar ! in
- interface ! in :foobar
- subroutine foo(a) ! in :foobar:foo.f
- integer intent(inout) :: a
- end subroutine foo
- function bar(a,b) ! in :foobar:bar.f
- integer :: a
- integer :: b
- integer :: bar
- end function bar
- end interface
-end module foobar
-
-! This file was auto-generated with f2py (version:0.95).
-! See http://cens.ioc.ee/projects/f2py2e/
diff --git a/doc/f2py/ex1/foobarmodule.tex b/doc/f2py/ex1/foobarmodule.tex
deleted file mode 100644
index 32411ec03..000000000
--- a/doc/f2py/ex1/foobarmodule.tex
+++ /dev/null
@@ -1,36 +0,0 @@
-% This file is auto-generated with f2py (version:2.266)
-\section{Module \texttt{foobar}}
-
-This module contains two examples that are used in \texttt{f2py} documentation.
-
-\subsection{Wrapper function \texttt{foo}}
-
-
-\noindent{{}\verb@foo@{}}\texttt{(a)}
---- Example of a wrapper function of a Fortran subroutine.
-
-\noindent Required arguments:
-\begin{description}
-\item[]{{}\verb@a : in/output rank-0 array(int,'i')@{}}
---- 5 is added to the variable {{}\verb@a@{}} ``in place''.
-\end{description}
-
-\subsection{Wrapper function \texttt{bar}}
-
-
-\noindent{{}\verb@bar = bar@{}}\texttt{(a, b)}
---- Add two values.
-
-\noindent Required arguments:
-\begin{description}
-\item[]{{}\verb@a : input int@{}}
---- The first value.
-\item[]{{}\verb@b : input int@{}}
---- The second value.
-\end{description}
-\noindent Return objects:
-\begin{description}
-\item[]{{}\verb@bar : int@{}}
---- See elsewhere.
-\end{description}
-
diff --git a/doc/f2py/ex1/runme b/doc/f2py/ex1/runme
deleted file mode 100755
index 2aac6158e..000000000
--- a/doc/f2py/ex1/runme
+++ /dev/null
@@ -1,18 +0,0 @@
-#!/bin/sh
-
-f2py2e='python ../../f2py2e.py'
-PYINC=`$f2py2e -pyinc`
-$f2py2e foobar-smart.pyf --short-latex --overwrite-makefile -makefile foo.f bar.f
-gmake -f Makefile-foobar
-#gcc -O3 -I$PYINC -I$PYINC/Numeric -shared -o foobarmodule.so foobarmodule.c foo.f bar.f
-python -c '
-import foobar
-print foobar.__doc__
-print foobar.bar(2,3)
-from Numeric import *
-a=array(3)
-print a,foobar.foo(a),a
-print foobar.foo.__doc__
-print foobar.bar.__doc__
-print "ok"
-'
diff --git a/doc/f2py/f2py.1 b/doc/f2py/f2py.1
deleted file mode 100644
index 7f51ea29d..000000000
--- a/doc/f2py/f2py.1
+++ /dev/null
@@ -1,209 +0,0 @@
-.TH "F2PY" 1
-.SH NAME
-f2py \- Fortran to Python interface generator
-.SH SYNOPSIS
-(1) To construct extension module sources:
-
-.B f2py
-[<options>] <fortran files> [[[only:]||[skip:]] <fortran functions> ] [: <fortran files> ...]
-
-(2) To compile fortran files and build extension modules:
-
-.B f2py
-\-c [<options>, <config_fc options>, <extra options>] <fortran files>
-
-(3) To generate signature files:
-
-.B f2py
-\-h <filename.pyf> ...< same options as in (1) >
-.SH DESCRIPTION
-This program generates a Python C/API file (<modulename>module.c)
-that contains wrappers for given Fortran or C functions so that they
-can be called from Python.
-With the \-c option the corresponding
-extension modules are built.
-.SH OPTIONS
-.TP
-.B \-h <filename>
-Write signatures of the fortran routines to file <filename> and
-exit. You can then edit <filename> and use it instead of <fortran
-files>. If <filename>==stdout then the signatures are printed to
-stdout.
-.TP
-.B <fortran functions>
-Names of fortran routines for which Python C/API functions will be
-generated. Default is all that are found in <fortran files>.
-.TP
-.B skip:
-Ignore fortran functions that follow until `:'.
-.TP
-.B only:
-Use only fortran functions that follow until `:'.
-.TP
-.B :
-Get back to <fortran files> mode.
-.TP
-.B \-m <modulename>
-Name of the module; f2py generates a Python/C API file
-<modulename>module.c or extension module <modulename>. Default is
-\'untitled\'.
-.TP
-.B \-\-[no\-]lower
-Do [not] lower the cases in <fortran files>. By default, \-\-lower is
-assumed with \-h key, and \-\-no\-lower without \-h key.
-.TP
-.B \-\-build\-dir <dirname>
-All f2py generated files are created in <dirname>. Default is tempfile.mkdtemp().
-.TP
-.B \-\-overwrite\-signature
-Overwrite existing signature file.
-.TP
-.B \-\-[no\-]latex\-doc
-Create (or not) <modulename>module.tex. Default is \-\-no\-latex\-doc.
-.TP
-.B \-\-short\-latex
-Create 'incomplete' LaTeX document (without commands \\documentclass,
-\\tableofcontents, and \\begin{document}, \\end{document}).
-.TP
-.B \-\-[no\-]rest\-doc
-Create (or not) <modulename>module.rst. Default is \-\-no\-rest\-doc.
-.TP
-.B \-\-debug\-capi
-Create C/API code that reports the state of the wrappers during
-runtime. Useful for debugging.
-.TP
-.B \-include\'<includefile>\'
-Add CPP #include statement to the C/API code. <includefile> should be
-in the format of either `"filename.ext"' or `<filename.ext>'. As a
-result <includefile> will be included just before wrapper functions
-part in the C/API code. The option is depreciated, use `usercode`
-statement in signature files instead.
-.TP
-.B \-\-[no\-]wrap\-functions
-Create Fortran subroutine wrappers to Fortran 77
-functions. \-\-wrap\-functions is default because it ensures maximum
-portability/compiler independence.
-.TP
-.B \-\-help\-link [..]
-List system resources found by system_info.py. [..] may contain
-a list of resources names. See also \-\-link\-<resource> switch below.
-.TP
-.B \-\-quiet
-Run quietly.
-.TP
-.B \-\-verbose
-Run with extra verbosity.
-.TP
-.B \-v
-Print f2py version ID and exit.
-.TP
-.B \-\-include_paths path1:path2:...
-Search include files (that f2py will scan) from the given directories.
-.SH "CONFIG_FC OPTIONS"
-The following options are effective only when \-c switch is used.
-.TP
-.B \-\-help-compiler
-List available Fortran compilers [DEPRECIATED].
-.TP
-.B \-\-fcompiler=<name>
-Specify Fortran compiler type by vendor.
-.TP
-.B \-\-compiler=<name>
-Specify C compiler type (as defined by distutils)
-.TP
-.B \-\-fcompiler-exec=<path>
-Specify the path to F77 compiler [DEPRECIATED].
-.TP
-.B \-\-f90compiler\-exec=<path>
-Specify the path to F90 compiler [DEPRECIATED].
-.TP
-.B \-\-help\-fcompiler
-List available Fortran compilers and exit.
-.TP
-.B \-\-f77exec=<path>
-Specify the path to F77 compiler.
-.TP
-.B \-\-f90exec=<path>
-Specify the path to F90 compiler.
-.TP
-.B \-\-f77flags="..."
-Specify F77 compiler flags.
-.TP
-.B \-\-f90flags="..."
-Specify F90 compiler flags.
-.TP
-.B \-\-opt="..."
-Specify optimization flags.
-.TP
-.B \-\-arch="..."
-Specify architecture specific optimization flags.
-.TP
-.B \-\-noopt
-Compile without optimization.
-.TP
-.B \-\-noarch
-Compile without arch-dependent optimization.
-.TP
-.B \-\-debug
-Compile with debugging information.
-.SH "EXTRA OPTIONS"
-The following options are effective only when \-c switch is used.
-.TP
-.B \-\-link-<resource>
-Link extension module with <resource> as defined by
-numpy_distutils/system_info.py. E.g. to link with optimized LAPACK
-libraries (vecLib on MacOSX, ATLAS elsewhere), use
-\-\-link\-lapack_opt. See also \-\-help\-link switch.
-
-.TP
-.B -L/path/to/lib/ -l<libname>
-.TP
-.B -D<define> -U<name> -I/path/to/include/
-.TP
-.B <filename>.o <filename>.so <filename>.a
-
-.TP
-.B -DPREPEND_FORTRAN -DNO_APPEND_FORTRAN -DUPPERCASE_FORTRAN -DUNDERSCORE_G77
-Macros that might be required with non-gcc Fortran compilers.
-
-.TP
-.B -DF2PY_REPORT_ATEXIT
-To print out a performance report of F2PY interface when python
-exits. Available for Linux.
-
-.TP
-.B -DF2PY_REPORT_ON_ARRAY_COPY=<int>
-To send a message to stderr whenever F2PY interface makes a copy of an
-array. Integer <int> sets the threshold for array sizes when a message
-should be shown.
-
-.SH REQUIREMENTS
-Python 1.5.2 or higher (2.x is supported).
-
-Numerical Python 13 or higher (20.x,21.x,22.x,23.x are supported).
-
-Optional Numarray 0.9 or higher partially supported.
-
-numpy_distutils from Scipy (can be downloaded from F2PY homepage)
-.SH "SEE ALSO"
-python(1)
-.SH BUGS
-For instructions on reporting bugs, see
-
- http://cens.ioc.ee/projects/f2py2e/FAQ.html
-.SH AUTHOR
-Pearu Peterson <pearu@cens.ioc.ee>
-.SH "INTERNET RESOURCES"
-Main website: http://cens.ioc.ee/projects/f2py2e/
-
-User's Guide: http://cens.ioc.ee/projects/f2py2e/usersguide/
-
-Mailing list: http://cens.ioc.ee/mailman/listinfo/f2py-users/
-
-Scipy website: http://www.numpy.org
-.SH COPYRIGHT
-Copyright (c) 1999, 2000, 2001, 2002, 2003, 2004, 2005 Pearu Peterson
-.SH LICENSE
-NumPy License
-.SH VERSION
-2.45.241
diff --git a/doc/f2py/f2py2e.tex b/doc/f2py/f2py2e.tex
deleted file mode 100644
index 6e3e9d68c..000000000
--- a/doc/f2py/f2py2e.tex
+++ /dev/null
@@ -1,50 +0,0 @@
-\documentclass{article}
-\usepackage{a4wide}
-
-\input commands
-
-\title{\fpy\\Fortran to Python Interface Generator\\{\large Second Edition}}
-\author{Pearu Peterson \texttt{<pearu@ioc.ee>}}
-\date{$Revision: 1.16 $\\\today}
-\begin{document}
-\special{html: <font size=-1>If equations does not show Greek letters or large
- brackets correctly, then your browser configuration needs some
- adjustment. Read the notes for <A
- href=http://hutchinson.belmont.ma.us/tth/Xfonts.html>Enabling Symbol
- Fonts in Netscape under X </A>. In addition, the browser must be set
- to use document fonts. </font>
-}
-
-\maketitle
-\begin{abstract}
- \fpy is a Python program that generates Python C/API modules for
- wrapping Fortran~77/90/95 codes to Python. The user can influence the
- process by modifying the signature files that \fpy generates when
- scanning the Fortran codes. This document describes the syntax of
- the signature files and the ways how the user can dictate the tool
- to produce wrapper functions with desired Python signatures. Also
- how to call the wrapper functions from Python is discussed.
-
- See \texttt{http://cens.ioc.ee/projects/f2py2e/} for updates of this
- document and the tool.
-\end{abstract}
-
-\tableofcontents
-
-\input intro
-\input signaturefile
-\input notes
-\input options
-\input bugs
-
-\appendix
-\input ex1/foobarmodule
-\input apps
-\end{document}
-
-%%% Local Variables:
-%%% mode: latex
-%%% TeX-master: t
-%%% End:
-
-
diff --git a/doc/f2py/f2python9-final/README.txt b/doc/f2py/f2python9-final/README.txt
deleted file mode 100644
index 2ce8e393a..000000000
--- a/doc/f2py/f2python9-final/README.txt
+++ /dev/null
@@ -1,38 +0,0 @@
-
-This directory contains the source of the paper
-
- "Fortran to Python Interface Generator with an Application
- to Aerospace Engineering"
-
-by
- Pearu Peterson <pearu@cens.ioc.ee> (the corresponding author)
- Joaquim R. R. A. Martins <joaquim.martins@stanford.edu>
- Juan J. Alonso <jjalonso@stanford.edu>
-
-for The 9th International Python Conference, March 5-8, 2001, Long Beach, California.
-
-The paper is provided here is in the HTML format:
-
- f2python9.html (size=48151 bytes)
-
-Note that this file includes the following JPG images
-
- flow.jpg (size=13266)
- structure.jpg (size=17860)
- aerostructure.jpg (size=72247)
-
-PS:
-The HTML file f2python9.html is generated using TTH (http://hutchinson.belmont.ma.us/tth/)
-from the LaTeX source file `python9.tex'. The source can be found in the
- src/
-directory. This directory contains also the following EPS files
- flow.eps
- structure.eps
- aerostructure.eps
-and the text files
- examples/{exp1.f,exp1mess.txt,exp1session.txt,foo.pyf,foom.pyf}
-that are used by the LaTeX source python9.tex.
-
-Regards,
- Pearu
-January 15, 2001
diff --git a/doc/f2py/f2python9-final/aerostructure.jpg b/doc/f2py/f2python9-final/aerostructure.jpg
deleted file mode 100644
index 896ad6e12..000000000
--- a/doc/f2py/f2python9-final/aerostructure.jpg
+++ /dev/null
Binary files differ
diff --git a/doc/f2py/f2python9-final/flow.jpg b/doc/f2py/f2python9-final/flow.jpg
deleted file mode 100644
index cfe0f85f3..000000000
--- a/doc/f2py/f2python9-final/flow.jpg
+++ /dev/null
Binary files differ
diff --git a/doc/f2py/f2python9-final/mk_html.sh b/doc/f2py/f2python9-final/mk_html.sh
deleted file mode 100755
index 944110e93..000000000
--- a/doc/f2py/f2python9-final/mk_html.sh
+++ /dev/null
@@ -1,13 +0,0 @@
-#!/bin/sh
-cd src
-
-test -f aerostructure.eps || convert ../aerostructure.jpg aerostructure.eps
-test -f flow.eps || convert ../flow.jpg flow.eps
-test -f structure.eps || convert ../structure.jpg structure.eps
-
-latex python9.tex
-latex python9.tex
-latex python9.tex
-
-test `which tth` && cat python9.tex | sed -e "s/{{}\\\verb@/\\\texttt{/g" | sed -e "s/@{}}/}/g" | tth -Lpython9 -i > ../f2python9.html
-cd ..
diff --git a/doc/f2py/f2python9-final/mk_pdf.sh b/doc/f2py/f2python9-final/mk_pdf.sh
deleted file mode 100755
index b773028b7..000000000
--- a/doc/f2py/f2python9-final/mk_pdf.sh
+++ /dev/null
@@ -1,13 +0,0 @@
-#!/bin/sh
-cd src
-
-test -f aerostructure.pdf || convert ../aerostructure.jpg aerostructure.pdf
-test -f flow.pdf || convert ../flow.jpg flow.pdf
-test -f structure.pdf || convert ../structure.jpg structure.pdf
-
-cat python9.tex | sed -e "s/eps,/pdf,/g" > python9pdf.tex
-pdflatex python9pdf.tex
-pdflatex python9pdf.tex
-pdflatex python9pdf.tex
-
-mv python9pdf.pdf ../f2python9.pdf \ No newline at end of file
diff --git a/doc/f2py/f2python9-final/mk_ps.sh b/doc/f2py/f2python9-final/mk_ps.sh
deleted file mode 100755
index 4b0863fcd..000000000
--- a/doc/f2py/f2python9-final/mk_ps.sh
+++ /dev/null
@@ -1,14 +0,0 @@
-#!/bin/sh
-cd src
-
-test -f aerostructure.eps || convert ../aerostructure.jpg aerostructure.eps
-test -f flow.eps || convert ../flow.jpg flow.eps
-test -f structure.eps || convert ../structure.jpg structure.eps
-
-latex python9.tex
-latex python9.tex
-latex python9.tex
-
-dvips python9.dvi -o ../f2python9.ps
-cd ..
-gzip -f f2python9.ps
diff --git a/doc/f2py/f2python9-final/src/examples/exp1.f b/doc/f2py/f2python9-final/src/examples/exp1.f
deleted file mode 100644
index 36bee50b0..000000000
--- a/doc/f2py/f2python9-final/src/examples/exp1.f
+++ /dev/null
@@ -1,26 +0,0 @@
- subroutine exp1(l,u,n)
-C Input: n is number of iterations
-C Output: l,u are such that
-C l(1)/l(2) < exp(1) < u(1)/u(2)
-C
-Cf2py integer*4 :: n = 1
-Cf2py intent(out) l,u
- integer*4 n,i
- real*8 l(2),u(2),t,t1,t2,t3,t4
- l(2) = 1
- l(1) = 0
- u(2) = 0
- u(1) = 1
- do 10 i=0,n
- t1 = 4 + 32*(1+i)*i
- t2 = 11 + (40+32*i)*i
- t3 = 3 + (24+32*i)*i
- t4 = 8 + 32*(1+i)*i
- t = u(1)
- u(1) = l(1)*t1 + t*t2
- l(1) = l(1)*t3 + t*t4
- t = u(2)
- u(2) = l(2)*t1 + t*t2
- l(2) = l(2)*t3 + t*t4
- 10 continue
- end
diff --git a/doc/f2py/f2python9-final/src/examples/exp1mess.txt b/doc/f2py/f2python9-final/src/examples/exp1mess.txt
deleted file mode 100644
index d4188a91b..000000000
--- a/doc/f2py/f2python9-final/src/examples/exp1mess.txt
+++ /dev/null
@@ -1,17 +0,0 @@
-Reading fortran codes...
- Reading file 'exp1.f'
-Post-processing...
- Block: foo
- Block: exp1
-Creating 'Makefile-foo'...
- Linker: ld ('GNU ld' 2.9.5)
- Fortran compiler: f77 ('g77 2.x.x' 2.95.2)
- C compiler: cc ('gcc 2.x.x' 2.95.2)
-Building modules...
- Building module "foo"...
- Constructing wrapper function "exp1"...
- l,u = exp1([n])
- Wrote C/API module "foo" to file "foomodule.c"
- Documentation is saved to file "foomodule.tex"
-Run GNU make to build shared modules:
- gmake -f Makefile-<modulename> [test]
diff --git a/doc/f2py/f2python9-final/src/examples/exp1session.txt b/doc/f2py/f2python9-final/src/examples/exp1session.txt
deleted file mode 100644
index 5ae75ebd1..000000000
--- a/doc/f2py/f2python9-final/src/examples/exp1session.txt
+++ /dev/null
@@ -1,20 +0,0 @@
->>> import foo,Numeric
->>> print foo.exp1.__doc__
-exp1 - Function signature:
- l,u = exp1([n])
-Optional arguments:
- n := 1 input int
-Return objects:
- l : rank-1 array('d') with bounds (2)
- u : rank-1 array('d') with bounds (2)
-
->>> l,u = foo.exp1()
->>> print l,u
-[ 1264. 465.] [ 1457. 536.]
->>> print l[0]/l[1], u[0]/u[1]-l[0]/l[1]
-2.71827956989 2.25856657199e-06
->>> l,u = foo.exp1(2)
->>> print l,u
-[ 517656. 190435.] [ 566827. 208524.]
->>> print l[0]/l[1], u[0]/u[1]-l[0]/l[1]
-2.71828182845 1.36437527942e-11
diff --git a/doc/f2py/f2python9-final/src/examples/foo.pyf b/doc/f2py/f2python9-final/src/examples/foo.pyf
deleted file mode 100644
index 516bb292f..000000000
--- a/doc/f2py/f2python9-final/src/examples/foo.pyf
+++ /dev/null
@@ -1,13 +0,0 @@
-!%f90 -*- f90 -*-
-python module foo
- interface
- subroutine exp1(l,u,n)
- real*8 dimension(2) :: l
- real*8 dimension(2) :: u
- integer*4 :: n
- end subroutine exp1
- end interface
-end python module foo
-! This file was auto-generated with f2py
-! (version:2.298).
-! See http://cens.ioc.ee/projects/f2py2e/
diff --git a/doc/f2py/f2python9-final/src/examples/foom.pyf b/doc/f2py/f2python9-final/src/examples/foom.pyf
deleted file mode 100644
index 6392ebc95..000000000
--- a/doc/f2py/f2python9-final/src/examples/foom.pyf
+++ /dev/null
@@ -1,14 +0,0 @@
-!%f90 -*- f90 -*-
-python module foo
- interface
- subroutine exp1(l,u,n)
- real*8 dimension(2) :: l
- real*8 dimension(2) :: u
- intent(out) l,u
- integer*4 optional :: n = 1
- end subroutine exp1
- end interface
-end python module foo
-! This file was auto-generated with f2py
-! (version:2.298) and modified by pearu.
-! See http://cens.ioc.ee/projects/f2py2e/
diff --git a/doc/f2py/f2python9-final/structure.jpg b/doc/f2py/f2python9-final/structure.jpg
deleted file mode 100644
index 9aa691339..000000000
--- a/doc/f2py/f2python9-final/structure.jpg
+++ /dev/null
Binary files differ
diff --git a/doc/f2py/fortranobject.tex b/doc/f2py/fortranobject.tex
deleted file mode 100644
index a30b4b6c9..000000000
--- a/doc/f2py/fortranobject.tex
+++ /dev/null
@@ -1,574 +0,0 @@
-\documentclass{article}
-
-\headsep=0pt
-\topmargin=0pt
-\headheight=0pt
-\oddsidemargin=0pt
-\textwidth=6.5in
-\textheight=9in
-
-\usepackage{xspace}
-\usepackage{verbatim}
-\newcommand{\fpy}{\texttt{f2py}\xspace}
-\newcommand{\bs}{\symbol{`\\}}
-\newcommand{\email}[1]{\special{html:<A href="mailto:#1">}\texttt{<#1>}\special{html:</A>}}
-\title{\texttt{PyFortranObject} --- example usages}
-\author{
-\large Pearu Peterson\\
-\small \email{pearu@cens.ioc.ee}
-}
-
-\begin{document}
-
-\maketitle
-
-\special{html: Other formats of this document:
-<A href=pyfobj.ps.gz>Gzipped PS</A>,
-<A href=pyfobj.pdf>PDF</A>
-}
-
-\tableofcontents
-
-\section{Introduction}
-\label{sec:intro}
-
-Fortran language defines the following concepts that we would like to
-access from Python: functions, subroutines, data in \texttt{COMMON} blocks,
-F90 module functions and subroutines, F90 module data (both static and
-allocatable arrays).
-
-In the following we shall assume that we know the signatures (full
-specifications of routine arguments and variables) of these concepts
-from their Fortran source codes. Now, in order to call or use them
-from C, one needs to have pointers to the corresponding objects. The
-pointers to Fortran 77 objects (routines, data in \texttt{COMMON}
-blocks) are readily available to C codes (there are various sources
-available about mixing Fortran 77 and C codes). On the other hand, F90
-module specifications are highly compiler dependent and sometimes it
-is not even possible to access F90 module objects from C (at least,
-not directly, see remark about MIPSPro 7 Compilers). But using some
-tricks (described below), the pointers to F90 module objects can be
-determined in runtime providing a compiler independent solution.
-
-To use Fortran objects from Python in unified manner, \fpy introduces
-\texttt{PyFortranObject} to hold pointers of the Fortran objects and
-the corresponding wrapper functions. In fact, \texttt{PyFortranObject}
-does much more: it generates documentation strings in run-time (for
-items in \texttt{COMMON} blocks and data in F90 modules), provides
-methods for accessing Fortran data and for calling Fortran routines,
-etc.
-
-\section{\texttt{PyFortranObject}}
-\label{sec:pyfortobj}
-
-\texttt{PyFortranObject} is defined as follows
-\begin{verbatim}
-typedef struct {
- PyObject_HEAD
- int len; /* Number of attributes */
- FortranDataDef *defs; /* An array of FortranDataDef's */
- PyObject *dict; /* Fortran object attribute dictionary */
-} PyFortranObject;
-\end{verbatim}
-where \texttt{FortranDataDef} is
-\begin{verbatim}
-typedef struct {
- char *name; /* attribute (array||routine) name */
- int rank; /* array rank, 0 for scalar, max is F2PY_MAX_DIMS,
- || rank=-1 for Fortran routine */
- struct {int d[F2PY_MAX_DIMS];} dims; /* dimensions of the array, || not used */
- int type; /* NPY_<type> || not used */
- char *data; /* pointer to array || Fortran routine */
- void (*func)(); /* initialization function for
- allocatable arrays:
- func(&rank,dims,set_ptr_func,name,len(name))
- || C/API wrapper for Fortran routine */
- char *doc; /* documentation string; only recommended
- for routines. */
-} FortranDataDef;
-\end{verbatim}
-In the following we demonstrate typical usages of
-\texttt{PyFortranObject}. Just relevant code fragments will be given.
-
-
-\section{Fortran 77 subroutine}
-\label{sec:f77subrout}
-
-Consider Fortran 77 subroutine
-\begin{verbatim}
-subroutine bar()
-end
-\end{verbatim}
-The corresponding \texttt{PyFortranObject} is defined in C as follows:
-\begin{verbatim}
-static char doc_bar[] = "bar()";
-static PyObject *c_bar(PyObject *self, PyObject *args,
- PyObject *keywds, void (*f2py_func)()) {
- static char *capi_kwlist[] = {NULL};
- if (!PyArg_ParseTupleAndKeywords(args,keywds,"|:bar",capi_kwlist))
- return NULL;
- (*f2py_func)();
- return Py_BuildValue("");
-}
-extern void F_FUNC(bar,BAR)();
-static FortranDataDef f2py_routines_def[] = {
- {"bar",-1, {-1}, 0, (char *)F_FUNC(bar,BAR),(void*)c_bar,doc_bar},
- {NULL}
-};
-void initfoo() {
- <snip>
- d = PyModule_GetDict(m);
- PyDict_SetItemString(d, f2py_routines_def[0].name,
- PyFortranObject_NewAsAttr(&f2py_routines_def[0]));
-}
-\end{verbatim}
-where CPP macro \texttt{F\_FUNC} defines how Fortran 77 routines are
-seen in C.
-In Python, Fortran subroutine \texttt{bar} is called as follows
-\begin{verbatim}
->>> import foo
->>> foo.bar()
-\end{verbatim}
-
-\section{Fortran 77 function}
-\label{sec:f77func}
-Consider Fortran 77 function
-\begin{verbatim}
-function bar()
-complex bar
-end
-\end{verbatim}
-The corresponding \texttt{PyFortranObject} is defined in C as in
-previous example but with the following changes:
-\begin{verbatim}
-static char doc_bar[] = "bar = bar()";
-static PyObject *c_bar(PyObject *self, PyObject *args,
- PyObject *keywds, void (*f2py_func)()) {
- complex_float bar;
- static char *capi_kwlist[] = {NULL};
- if (!PyArg_ParseTupleAndKeywords(args,keywds,"|:bar",capi_kwlist))
- return NULL;
- (*f2py_func)(&bar);
- return Py_BuildValue("O",pyobj_from_complex_float1(bar));
-}
-extern void F_WRAPPEDFUNC(bar,BAR)();
-static FortranDataDef f2py_routines_def[] = {
- {"bar",-1,{-1},0,(char *)F_WRAPPEDFUNC(bar,BAR),(void *)c_bar,doc_bar},
- {NULL}
-};
-\end{verbatim}
-where CPP macro \texttt{F\_WRAPPEDFUNC} gives the pointer to the following
-Fortran 77 subroutine:
-\begin{verbatim}
-subroutine f2pywrapbar (barf2pywrap)
-external bar
-complex bar, barf2pywrap
-barf2pywrap = bar()
-end
-\end{verbatim}
-With these hooks, calling Fortran functions returning composed types
-becomes platform/compiler independent.
-
-
-\section{\texttt{COMMON} block data}
-\label{sec:commondata}
-
-Consider Fortran 77 \texttt{COMMON} block
-\begin{verbatim}
-integer i
-COMMON /bar/ i
-\end{verbatim}
-In order to access the variable \texttt{i} from Python,
-\texttt{PyFortranObject} is defined as follows:
-\begin{verbatim}
-static FortranDataDef f2py_bar_def[] = {
- {"i",0,{-1},NPY_INT},
- {NULL}
-};
-static void f2py_setup_bar(char *i) {
- f2py_bar_def[0].data = i;
-}
-extern void F_FUNC(f2pyinitbar,F2PYINITBAR)();
-static void f2py_init_bar() {
- F_FUNC(f2pyinitbar,F2PYINITBAR)(f2py_setup_bar);
-}
-void initfoo() {
- <snip>
- PyDict_SetItemString(d, "bar", PyFortranObject_New(f2py_bar_def,f2py_init_bar));
-}
-\end{verbatim}
-where auxiliary Fortran function \texttt{f2pyinitbar} is defined as follows
-\begin{verbatim}
-subroutine f2pyinitbar(setupfunc)
-external setupfunc
-integer i
-common /bar/ i
-call setupfunc(i)
-end
-\end{verbatim}
-and it is called in \texttt{PyFortranObject\_New}.
-
-
-\section{Fortran 90 module subroutine}
-\label{sec:f90modsubrout}
-
-Consider
-\begin{verbatim}
-module fun
- subroutine bar()
- end subroutine bar
-end module fun
-\end{verbatim}
-\texttt{PyFortranObject} is defined as follows
-\begin{verbatim}
-static char doc_fun_bar[] = "fun.bar()";
-static PyObject *c_fun_bar(PyObject *self, PyObject *args,
- PyObject *keywds, void (*f2py_func)()) {
- static char *kwlist[] = {NULL};
- if (!PyArg_ParseTupleAndKeywords(args,keywds,"",kwlist))
- return NULL;
- (*f2py_func)();
- return Py_BuildValue("");
-}
-static FortranDataDef f2py_fun_def[] = {
- {"bar",-1,{-1},0,NULL,(void *)c_fun_bar,doc_fun_bar},
- {NULL}
-};
-static void f2py_setup_fun(char *bar) {
- f2py_fun_def[0].data = bar;
-}
-extern void F_FUNC(f2pyinitfun,F2PYINITFUN)();
-static void f2py_init_fun() {
- F_FUNC(f2pyinitfun,F2PYINITFUN)(f2py_setup_fun);
-}
-void initfoo () {
- <snip>
- PyDict_SetItemString(d, "fun", PyFortranObject_New(f2py_fun_def,f2py_init_fun));
-}
-\end{verbatim}
-where auxiliary Fortran function \texttt{f2pyinitfun} is defined as
-follows
-\begin{verbatim}
-subroutine f2pyinitfun(f2pysetupfunc)
-use fun
-external f2pysetupfunc
-call f2pysetupfunc(bar)
-end subroutine f2pyinitfun
-\end{verbatim}
-The following Python session demonstrates how to call Fortran 90
-module function \texttt{bar}:
-\begin{verbatim}
->>> import foo
->>> foo.fun.bar()
-\end{verbatim}
-
-\section{Fortran 90 module function}
-\label{sec:f90modfunc}
-
-Consider
-\begin{verbatim}
-module fun
- function bar()
- complex bar
- end subroutine bar
-end module fun
-\end{verbatim}
-\texttt{PyFortranObject} is defined as follows
-\begin{verbatim}
-static char doc_fun_bar[] = "bar = fun.bar()";
-static PyObject *c_fun_bar(PyObject *self, PyObject *args,
- PyObject *keywds, void (*f2py_func)()) {
- complex_float bar;
- static char *kwlist[] = {NULL};
- if (!PyArg_ParseTupleAndKeywords(args,keywds,"",kwlist))
- return NULL;
- (*f2py_func)(&bar);
- return Py_BuildValue("O",pyobj_from_complex_float1(bar));
-}
-static FortranDataDef f2py_fun_def[] = {
- {"bar",-1,{-1},0,NULL,(void *)c_fun_bar,doc_fun_bar},
- {NULL}
-};
-static void f2py_setup_fun(char *bar) {
- f2py_fun_def[0].data = bar;
-}
-extern void F_FUNC(f2pyinitfun,F2PYINITFUN)();
-static void f2py_init_fun() {
- F_FUNC(f2pyinitfun,F2PYINITFUN)(f2py_setup_fun);
-}
-void initfoo() {
- <snip>
- PyDict_SetItemString(d, "fun", PyFortranObject_New(f2py_fun_def,f2py_init_fun));
-}
-\end{verbatim}
-where
-\begin{verbatim}
-subroutine f2pywrap_fun_bar (barf2pywrap)
-use fun
-complex barf2pywrap
-barf2pywrap = bar()
-end
-
-subroutine f2pyinitfun(f2pysetupfunc)
-external f2pysetupfunc,f2pywrap_fun_bar
-call f2pysetupfunc(f2pywrap_fun_bar)
-end
-\end{verbatim}
-
-
-\section{Fortran 90 module data}
-\label{sec:f90moddata}
-
-Consider
-\begin{verbatim}
-module fun
- integer i
-end module fun
-\end{verbatim}
-Then
-\begin{verbatim}
-static FortranDataDef f2py_fun_def[] = {
- {"i",0,{-1},NPY_INT},
- {NULL}
-};
-static void f2py_setup_fun(char *i) {
- f2py_fun_def[0].data = i;
-}
-extern void F_FUNC(f2pyinitfun,F2PYINITFUN)();
-static void f2py_init_fun() {
- F_FUNC(f2pyinitfun,F2PYINITFUN)(f2py_setup_fun);
-}
-void initfoo () {
- <snip>
- PyDict_SetItemString(d, "fun",
- PyFortranObject_New(f2py_fun_def,f2py_init_fun));
-}
-\end{verbatim}
-where
-\begin{verbatim}
-subroutine f2pyinitfun(f2pysetupfunc)
-use fun
-external f2pysetupfunc
-call f2pysetupfunc(i)
-end subroutine f2pyinitfun
-\end{verbatim}
-Example usage in Python:
-\begin{verbatim}
->>> import foo
->>> foo.fun.i = 4
-\end{verbatim}
-
-\section{Fortran 90 module allocatable array}
-\label{sec:f90modallocarr}
-
-Consider
-\begin{verbatim}
-module fun
- real, allocatable :: r(:)
-end module fun
-\end{verbatim}
-Then
-\begin{verbatim}
-static FortranDataDef f2py_fun_def[] = {
- {"r",1,{-1},NPY_FLOAT},
- {NULL}
-};
-static void f2py_setup_fun(void (*r)()) {
- f2py_fun_def[0].func = r;
-}
-extern void F_FUNC(f2pyinitfun,F2PYINITFUN)();
-static void f2py_init_fun() {
- F_FUNC(f2pyinitfun,F2PYINITFUN)(f2py_setup_fun);
-}
-void initfoo () {
- <snip>
- PyDict_SetItemString(d, "fun", PyFortranObject_New(f2py_fun_def,f2py_init_fun));
-}
-\end{verbatim}
-where
-\begin{verbatim}
-subroutine f2py_fun_getdims_r(r,s,f2pysetdata)
-use fun, only: d => r
-external f2pysetdata
-logical ns
-integer s(*),r,i,j
-ns = .FALSE.
-if (allocated(d)) then
- do i=1,r
- if ((size(d,r-i+1).ne.s(i)).and.(s(i).ge.0)) then
- ns = .TRUE.
- end if
- end do
- if (ns) then
- deallocate(d)
- end if
-end if
-if ((.not.allocated(d)).and.(s(1).ge.1)) then
- allocate(d(s(1)))
-end if
-if (allocated(d)) then
- do i=1,r
- s(i) = size(d,r-i+1)
- end do
-end if
-call f2pysetdata(d,allocated(d))
-end subroutine f2py_fun_getdims_r
-
-subroutine f2pyinitfun(f2pysetupfunc)
-use fun
-external f2pysetupfunc,f2py_fun_getdims_r
-call f2pysetupfunc(f2py_fun_getdims_r)
-end subroutine f2pyinitfun
-\end{verbatim}
-Usage in Python:
-\begin{verbatim}
->>> import foo
->>> foo.fun.r = [1,2,3,4]
-\end{verbatim}
-
-\section{Callback subroutine}
-\label{sec:cbsubr}
-
-Thanks to Travis Oliphant for working out the basic idea of the
-following callback mechanism.
-
-Consider
-\begin{verbatim}
-subroutine fun(bar)
-external bar
-call bar(1)
-end
-\end{verbatim}
-Then
-\begin{verbatim}
-static char doc_foo8_fun[] = "
-Function signature:
- fun(bar,[bar_extra_args])
-Required arguments:
- bar : call-back function
-Optional arguments:
- bar_extra_args := () input tuple
-Call-back functions:
- def bar(e_1_e): return
- Required arguments:
- e_1_e : input int";
-static PyObject *foo8_fun(PyObject *capi_self, PyObject *capi_args,
- PyObject *capi_keywds, void (*f2py_func)()) {
- PyObject *capi_buildvalue = NULL;
- PyObject *bar_capi = Py_None;
- PyTupleObject *bar_xa_capi = NULL;
- PyTupleObject *bar_args_capi = NULL;
- jmp_buf bar_jmpbuf;
- int bar_jmpbuf_flag = 0;
- int bar_nofargs_capi = 0;
- static char *capi_kwlist[] = {"bar","bar_extra_args",NULL};
-
- if (!PyArg_ParseTupleAndKeywords(capi_args,capi_keywds,\
- "O!|O!:foo8.fun",\
- capi_kwlist,&PyFunction_Type,&bar_capi,&PyTuple_Type,&bar_xa_capi))
- goto capi_fail;
-
- bar_nofargs_capi = cb_bar_in_fun__user__routines_nofargs;
- if (create_cb_arglist(bar_capi,bar_xa_capi,1,0,
- &cb_bar_in_fun__user__routines_nofargs,&bar_args_capi)) {
- if ((PyErr_Occurred())==NULL)
- PyErr_SetString(foo8_error,"failed in processing argument list for call-back bar." );
- goto capi_fail;
- }
-
- SWAP(bar_capi,cb_bar_in_fun__user__routines_capi,PyObject);
- SWAP(bar_args_capi,cb_bar_in_fun__user__routines_args_capi,PyTupleObject);
- memcpy(&bar_jmpbuf,&cb_bar_in_fun__user__routines_jmpbuf,sizeof(jmp_buf));
- bar_jmpbuf_flag = 1;
-
- if ((setjmp(cb_bar_in_fun__user__routines_jmpbuf))) {
- if ((PyErr_Occurred())==NULL)
- PyErr_SetString(foo8_error,"Failure of a callback function");
- goto capi_fail;
- } else
- (*f2py_func)(cb_bar_in_fun__user__routines);
-
- capi_buildvalue = Py_BuildValue("");
-capi_fail:
-
- if (bar_jmpbuf_flag) {
- cb_bar_in_fun__user__routines_capi = bar_capi;
- Py_DECREF(cb_bar_in_fun__user__routines_args_capi);
- cb_bar_in_fun__user__routines_args_capi = bar_args_capi;
- cb_bar_in_fun__user__routines_nofargs = bar_nofargs_capi;
- memcpy(&cb_bar_in_fun__user__routines_jmpbuf,&bar_jmpbuf,sizeof(jmp_buf));
- bar_jmpbuf_flag = 0;
- }
- return capi_buildvalue;
-}
-extern void F_FUNC(fun,FUN)();
-static FortranDataDef f2py_routine_defs[] = {
- {"fun",-1,{-1},0,(char *)F_FUNC(fun,FUN),(void *)foo8_fun,doc_foo8_fun},
- {NULL}
-};
-void initfoo8 () {
- <snip>
- PyDict_SetItemString(d, f2py_routine_defs[0].name,
- PyFortranObject_NewAsAttr(&f2py_routine_defs[0]));
-}
-\end{verbatim}
-where
-\begin{verbatim}
-PyObject *cb_bar_in_fun__user__routines_capi = Py_None;
-PyTupleObject *cb_bar_in_fun__user__routines_args_capi = NULL;
-int cb_bar_in_fun__user__routines_nofargs = 0;
-jmp_buf cb_bar_in_fun__user__routines_jmpbuf;
-static void cb_bar_in_fun__user__routines (int *e_1_e_cb_capi) {
- PyTupleObject *capi_arglist = cb_bar_in_fun__user__routines_args_capi;
- PyObject *capi_return = NULL;
- PyObject *capi_tmp = NULL;
- int capi_j,capi_i = 0;
-
- int e_1_e=(*e_1_e_cb_capi);
- if (capi_arglist == NULL)
- goto capi_fail;
- if (cb_bar_in_fun__user__routines_nofargs>capi_i)
- if (PyTuple_SetItem((PyObject *)capi_arglist,capi_i++,pyobj_from_int1(e_1_e)))
- goto capi_fail;
-
- capi_return = PyEval_CallObject(cb_bar_in_fun__user__routines_capi,
- (PyObject *)capi_arglist);
-
- if (capi_return == NULL)
- goto capi_fail;
- if (capi_return == Py_None) {
- Py_DECREF(capi_return);
- capi_return = Py_BuildValue("()");
- }
- else if (!PyTuple_Check(capi_return)) {
- capi_tmp = capi_return;
- capi_return = Py_BuildValue("(O)",capi_tmp);
- Py_DECREF(capi_tmp);
- }
- capi_j = PyTuple_Size(capi_return);
- capi_i = 0;
- goto capi_return_pt;
-capi_fail:
- fprintf(stderr,"Call-back cb_bar_in_fun__user__routines failed.\n");
- Py_XDECREF(capi_return);
- longjmp(cb_bar_in_fun__user__routines_jmpbuf,-1);
-capi_return_pt:
- ;
-}
-\end{verbatim}
-Usage in Python:
-\begin{verbatim}
->>> import foo8 as foo
->>> def bar(i): print 'In bar i=',i
-...
->>> foo.fun(bar)
-In bar i= 1
-\end{verbatim}
-
-\end{document}
-
-
-%%% Local Variables:
-%%% mode: latex
-%%% TeX-master: t
-%%% End:
diff --git a/doc/f2py/hello.f b/doc/f2py/hello.f
deleted file mode 100644
index 3e0dc6d21..000000000
--- a/doc/f2py/hello.f
+++ /dev/null
@@ -1,7 +0,0 @@
-C File hello.f
- subroutine foo (a)
- integer a
- print*, "Hello from Fortran!"
- print*, "a=",a
- end
-
diff --git a/doc/f2py/index.html b/doc/f2py/index.html
deleted file mode 100644
index f155b1c97..000000000
--- a/doc/f2py/index.html
+++ /dev/null
@@ -1,264 +0,0 @@
-<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN" "http://www.w3.org/TR/REC-html40/loose.dtd">
-<HTML>
-<HEAD>
-<META name="Author" content="Pearu Peterson">
-<!-- You may add here some keywords (comma separated list) -->
-<META name="Keywords" content="fortran,python,interface,f2py,f2py2e,wrapper,fpig">
-<TITLE>F2PY - Fortran to Python Interface Generator</TITLE>
-<LINK rel="stylesheet" type="text/css" href="/styles/userstyle.css">
-</HEAD>
-
-<BODY>
-<!-- Begin of user text -->
-<H1>F2PY &shy; Fortran to Python Interface Generator</H1>
-by <em>Pearu Peterson</em>
-
-<h2>What's new?</h2>
-
-See <a href="NEWS.txt">NEWS.txt</a> for the latest changes in <code>f2py</code>.
-<dl>
- <dt> July ??, 2002
- <dd> Implemented prototype calculator, complete tests for scalar F77
- functions, --help-compiler option. Fixed number of bugs and
- removed obsolete features.
- <dt> April 4, 2002
- <dd> Fixed a nasty bug of copying one-dimensional non-contiguous arrays.
- (Thanks to Travis O. for pointing this out).
- <dt> March 26, 2002
- <dd> Bug fixes, turned off F2PY_REPORT_ATEXIT by default.
- <dt> March 13, 2002
- <dd> MAC support, fixed incomplete dependency calculator, minor bug fixes.
- <dt> March 3, 2002
- <dd> Fixed memory leak and copying of multi-dimensional complex arrays.
- <dt> <a href="oldnews.html">Old news</a>.
-</dl>
-
-<h2>Introduction</h2>
-
-Writing Python C/API wrappers for Fortran routines can be a very
-tedious task, especially if a Fortran routine takes more than 20
-arguments but only few of them are relevant for the problems that they
-solve. So, I have developed a tool that generates the C/API modules
-containing wrapper functions of Fortran routines. I call this
-tool as <em>F2PY &shy; Fortran to Python Interface Generator</em>.
-It is completely written in <a href="http://www.python.org">Python</a>
-language and can be called from the command line as <code>f2py</code>.
-<em>F2PY</em> (in NumPy) is released under the terms of the NumPy License.
-
-
-<h2><code>f2py</code>, Second Edition</h2>
-
-The development of <code>f2py</code> started in summer of 1999.
-For now (January, 2000) it has reached to stage of being a
-complete tool: it scans real Fortran code, creates signature file
-that the user can modify, constructs C/API module that can be
-complied and imported to Python, and it creates LaTeX documentation
-for wrapper functions. Below is a bit longer list of
-<code>f2py</code> features:
-<ol>
- <li> <code>f2py</code> scans real Fortran codes and produces the signature files.
- The syntax of the signature files is borrowed from the Fortran 90/95
- language specification with some extensions.
- <li> <code>f2py</code> generates a GNU Makefile that can be used
- for building shared modules (see below for a list of supported
- platforms/compilers). Starting from the third release,
- <code>f2py</code> generates <code>setup_modulename.py</code> for
- building extension modules using <code>distutils</code> tools.
- <li> <code>f2py</code> uses the signature files to produce the wrappers for
- Fortran 77 routines and their <code>COMMON</code> blocks.
- <li> For <code>external</code> arguments <code>f2py</code> constructs a very flexible
- call-back mechanism so that Python functions can be called from
- Fortran.
- <li> You can pass in almost arbitrary Python objects to wrapper
- functions. If needed, <code>f2py</code> takes care of type-casting and
- non-contiguous arrays.
- <li> You can modify the signature files so that <code>f2py</code> will generate
- wrapper functions with desired signatures. <code>depend()</code>
- attribute is introduced to control the initialization order of the
- variables. <code>f2py</code> introduces <code>intent(hide)</code>
- attribute to remove
- the particular argument from the argument list of the wrapper
- function and <code>intent(c)</code> that is useful for wrapping C
-libraries. In addition, <code>optional</code> and
-<code>required</code>
- attributes are introduced and employed.
- <li> <code>f2py</code> supports almost all standard Fortran 77/90/95 constructs
- and understands all basic Fortran types, including
- (multi-dimensional, complex) arrays and character strings with
- adjustable and assumed sizes/lengths.
- <li> <code>f2py</code> generates a LaTeX document containing the
- documentations of the wrapped functions (argument types, dimensions,
- etc). The user can easily add some human readable text to the
- documentation by inserting <code>note(&lt;LaTeX text&gt;)</code> attribute to
- the definition of routine signatures.
- <li> With <code>f2py</code> one can access also Fortran 90/95
- module subroutines from Python.
-</ol>
-
-For more information, see the <a href="usersguide.html">User's
-Guide</a> of the tool. Windows users should also take a look at
-<a href="win32_notes.txt">f2py HOWTO for Win32</a> (its latest version
-can be found <a
-href="http://www.numpy.org/Members/eric/f2py_win32">here</a>).
-
-<h3>Requirements</h3>
-<ol>
- <li> You'll need <a
- href="http://www.python.org/download/">Python</a>
- (1.5.2 or later, 2.2 is recommended) to run <code>f2py</code>
- (because it uses exchanged module <code>re</code>).
- To build generated extension modules with distutils setup script,
- you'll need Python 2.x.
- <li> You'll need <a
- href="http://sourceforge.net/project/?group_id=1369">Numerical
- Python</a>
- (version 13 or later, 20.3 is recommended) to compile
- C/API modules (because they use function
- <code>PyArray_FromDimsAndDataAndDescr</code>)
-</ol>
-
-<h3>Download</h3>
-
-<dl>
- <dt> User's Guide:
- <dd> <a href="usersguide.html">usersguide.html</a>,
- <a href="usersguide.pdf">usersguide.pdf</a>,
- <a href="usersguide.ps.gz">usersguide.ps.gz</a>,
- <a href="usersguide.dvi">usersguide.dvi</a>.
- <dt> Snapshots of the fifth public release:
- <dd> <a href="2.x">2.x</a>/<a href="2.x/F2PY-2-latest.tar.gz">F2PY-2-latest.tar.gz</a>
- <dt> Snapshots of earlier releases:
- <dd> <a href="rel-5.x">rel-5.x</a>, <a href="rel-4.x">rel-4.x</a>,
- <a href="rel-3.x">rel-3.x</a>,
- <a href="rel-2.x">rel-2.x</a>,<a href="rel-1.x">rel-1.x</a>,
- <a href="rel-0.x">rel-0.x</a>
-</dl>
-
-<h3>Installation</h3>
-
-Unpack the source file, change to directory <code>f2py-?-???</code>
-and run <code>python setup.py install</code>. That's it!
-
-<h3>Platform/Compiler Related Notes</h3>
-
-<code>f2py</code> has been successfully tested on
-<ul>
- <li> Intel Linux (MD7.0,RH6.1,RH4.2,Debian woody), Athlon Linux (RH6.1), Alpha Linux (RH5.2,RH6.1) with <a
-href="http://gcc.gnu.org/">gcc</a> (versions egcs-2.91.60,egcs-2.91.66, and 2.95.2).
- <li> Intel Linux (MD7.0) with <a
- href="http://www.psrv.com/index.html">Pacific-Sierra
- Research</a> <a href="http://www.psrv.com/lnxf90.html">Personal
- Linux VAST/f90 Fortran 90 compiler</a> (version V3.4N5).
- <li> Intel Linux (RH6.1) with <a href="http://www.absoft.com/">Absoft F77/F90</a> compilers for Linux.
- <li> IRIX64 with <a href="http://gcc.gnu.org/">gcc</a> (2.95.2) and <a
-href="http://www.sgi.com/developers/devtools/languages/mipspro.html">MIPSpro
-7 Compilers</a> (f77,f90,cc versions 7.30).
- <li> Alpha Linux (RH5.2,RH6.1) with <a href="http://www.digital.com/fortran/linux/">Compaq Fortran </a> compiler (version V1.0-920).
- <li> Linux with <a href="http://www.nag.co.uk/">NAGWare</a> Fortran
- 95 compiler.
- <li> <a href="http://developer.intel.com/software/products/compilers/f50/linux/">
- Intel(R) Fortran Compiler for Linux</a>
- <li> Windows 2000 with <a href="http://www.mingw.org">mingw32</a>.
-</ul>
-<code>f2py</code> will probably run on other UN*X systems as
-well. Additions to the list of platforms/compilers where
-<code>f2py</code> has been successfully used are most welcome.
-<P>
-<em>Note:</em>
-Using Compaq Fortran
-compiler on Alpha Linux is successful unless when
-wrapping Fortran callback functions returning
-<code>COMPLEX</code>. This applies also for IRIX64.
-<P>
-<em>Note:</em>
-Fortran 90/95 module support is currently tested with Absoft F90, VAST/f90, Intel F90 compilers on Linux (MD7.0,Debian woody).
-
-
-<h3><a name="f2py-users">Mailing list</a></h3>
-
-There is a mailing list <a
-href="http://cens.ioc.ee/pipermail/f2py-users/">f2py-users</a>
-available for the users of the <code>f2py</code>
-program and it is open for discussion, questions, and answers. You can subscribe
-the list <a href="http://cens.ioc.ee/mailman/listinfo/f2py-users">here</a>.
-
-<h3><a href="http://cens.ioc.ee/cgi-bin/cvsweb/python/f2py2e/">CVS Repository</a></h3>
-
-<code>f2py</code> is being developed under <a href="http://www.sourcegear.com/CVS">CVS</a> and those who are
-interested in the really latest version of <code>f2py</code> (possibly
-unstable) can get it from the repository as follows:
-<ol>
- <li> First you need to login (the password is <code>guest</code>):
-<pre>
-> cvs -d :pserver:anonymous@cens.ioc.ee:/home/cvs login
-</pre>
- <li> and then do the checkout:
-<pre>
-> cvs -z6 -d :pserver:anonymous@cens.ioc.ee:/home/cvs checkout f2py2e
-</pre>
- <li> In the directory <code>f2py2e</code> you can get the updates by hitting
-<pre>
-> cvs -z6 update -P -d
-</pre>
-</ol>
-You can browse <code>f2py</code> CVS repository <a href="http://cens.ioc.ee/cgi-bin/cvsweb/python/f2py2e/">here</a>.
-
-<h2>Related sites</h2>
-
-<ol>
- <li> <a href="http://pfdubois.com/numpy/" target="_top">Numerical Python</a>.
- <li> <a href="http://pyfortran.sourceforge.net/" target="_top">Pyfort</a> -- The Python-Fortran connection tool.
- <li> <a href="http://starship.python.net/crew/hinsen/scientific.html" target="_top">Scientific Python</a>.
- <li> <a href="http://numpy.org/" target="_top">SciPy</a> -- Scientific tools for Python (includes Multipack).
- <li> <a href="http://www.fortran.com/fortran/" target="_top">The Fortran Company</a>.
- <li> <a href="http://www.j3-fortran.org/" target="_top">Fortran Standards</a>.
-
- <li> <a href="http://www.fortran.com/fortran/F77_std/rjcnf.html">American National Standard Programming Language FORTRAN ANSI(R) X3.9-1978</a>
- <li> <a href="http://www.mathtools.net" target="_top">Mathtools.net</a> -- A technical computing portal for all scientific and engineering needs.
-
-</ol>
-
-<!-- End of user text -->
-<HR>
-<ADDRESS>
-<A href="http://validator.w3.org/"><IMG border=0 align=right src="/icons/vh40.gif" alt="Valid HTML 4.0!" height=31 width=88></A>
-<A href="http://cens.ioc.ee/~pearu/" target="_top">Pearu Peterson</A>
-<A href="mailto:pearu(at)ioc.ee">&lt;pearu(at)ioc.ee&gt;</A><BR>
-<!-- hhmts start -->
-Last modified: Fri Jan 20 14:55:12 MST 2006
-<!-- hhmts end -->
-</ADDRESS>
-<!-- You may want to comment the following line out when the document is final-->
-<!-- Check that the reference is right -->
-<!--A href="http://validator.w3.org/check?uri=http://cens.ioc.ee/projects/f2py2e/index.html;ss"> Submit this page for validation</A-->
-
-<p>
-<center>
-This <a href="http://www.ctv.es/USERS/irmina/pythonring.html">Python
-ring</a> site owned by <a href="mailto:pearu(at)ioc.ee">Pearu Peterson</a>.
-<br>
-[
- <a href="http://nav.webring.org/cgi-bin/navcgi?ring=python_ring;id=12;prev5">Previous 5 Sites</a>
-|
- <a href="http://nav.webring.org/cgi-bin/navcgi?ring=python_ring;id=12;prev">Previous</a>
-|
- <a href="http://nav.webring.org/cgi-bin/navcgi?ring=python_ring;id=12;next">Next</a>
-|
- <a href="http://nav.webring.org/cgi-bin/navcgi?ring=python_ring;id=12;next5">Next 5 Sites</a>
-|
- <a href="http://nav.webring.org/cgi-bin/navcgi?ring=python_ring;random">Random Site</a>
-|
- <a href="http://nav.webring.org/cgi-bin/navcgi?ring=python_ring;list">List Sites</a>
-]
-</center>
-<p>
-
-
-
-</BODY>
-
-
-</HTML>
-
-
-
diff --git a/doc/f2py/intro.tex b/doc/f2py/intro.tex
deleted file mode 100644
index d9625b09c..000000000
--- a/doc/f2py/intro.tex
+++ /dev/null
@@ -1,158 +0,0 @@
-
-\section{Introduction}
-\label{sec:intro}
-
-\fpy is a command line tool that generates Python C/API modules for
-interfacing Fortran~77/90/95 codes and Fortran~90/95 modules from
-Python. In general, using \fpy an
-interface is produced in three steps:
-\begin{itemize}
-\item[(i)] \fpy scans Fortran sources and creates the so-called
- \emph{signature} file; the signature file contains the signatures of
- Fortran routines; the signatures are given in the free format of the
- Fortran~90/95 language specification. Latest version of \fpy
- generates also a make file for building shared module.
- About currently supported compilers see the \fpy home page
-\item[(ii)] Optionally, the signature files can be modified manually
- in order to dictate how the Fortran routines should be called or
- seemed from the Python environment.
-\item[(iii)] \fpy reads the signature files and generates Python C/API
- modules that can be compiled and imported to Python code. In
- addition, a LaTeX document is generated that contains the
- documentation of wrapped functions.
-\end{itemize}
-(Note that if you are satisfied with the default signature that \fpy
-generates in step (i), all three steps can be covered with just
-one call to \fpy --- by not specifying `\texttt{-h}' flag).
-Latest versions of \fpy support so-called \fpy directive that allows
-inserting various information about wrapping directly to Fortran
-source code as comments (\texttt{<comment char>f2py <signature statement>}).
-
-The following diagram illustrates the usage of the tool:
-\begin{verbatim}
-! Fortran file foo.f:
- subroutine foo(a)
- integer a
- a = a + 5
- end
-\end{verbatim}
-\begin{verbatim}
-! Fortran file bar.f:
- function bar(a,b)
- integer a,b,bar
- bar = a + b
- end
-\end{verbatim}
-\begin{itemize}
-\item[(i)] \shell{\fpy foo.f bar.f -m foobar -h foobar.pyf}
-\end{itemize}
-\begin{verbatim}
-!%f90
-! Signature file: foobar.pyf
-python module foobar ! in
- interface ! in :foobar
- subroutine foo(a) ! in :foobar:foo.f
- integer intent(inout) :: a
- end subroutine foo
- function bar(a,b) ! in :foobar:bar.f
- integer :: a
- integer :: b
- integer :: bar
- end function bar
- end interface
-end python module foobar
-\end{verbatim}
-\begin{itemize}
-\item[(ii)] Edit the signature file (here I made \texttt{foo}s
- argument \texttt{a} to be \texttt{intent(inout)}, see
- Sec.~\ref{sec:attributes}).
-\item[(iii)] \shell{\fpy foobar.pyf}
-\end{itemize}
-\begin{verbatim}
-/* Python C/API module: foobarmodule.c */
-...
-\end{verbatim}
-\begin{itemize}
-\item[(iv)] \shell{make -f Makefile-foobar}
-%\shell{gcc -shared -I/usr/include/python1.5/ foobarmodule.c\bs\\
-%foo.f bar.f -o foobarmodule.so}
-\end{itemize}
-\begin{verbatim}
-Python shared module: foobarmodule.so
-\end{verbatim}
-\begin{itemize}
-\item[(v)] Usage in Python:
-\end{itemize}
-\vspace*{-4ex}
-\begin{verbatim}
->>> import foobar
->>> print foobar.__doc__
-This module 'foobar' is auto-generated with f2py (version:1.174).
-The following functions are available:
- foo(a)
- bar = bar(a,b)
-.
->>> print foobar.bar(2,3)
-5
->>> from Numeric import *
->>> a = array(3)
->>> print a,foobar.foo(a),a
-3 None 8
-\end{verbatim}
-Information about how to call \fpy (steps (i) and (iii)) can be
-obtained by executing\\
-\shell{\fpy}\\
-This will print the usage instructions.
- Step (iv) is system dependent
-(compiler and the locations of the header files \texttt{Python.h} and
-\texttt{arrayobject.h}), and so you must know how to compile a shared
-module for Python in you system.
-
-The next Section describes the step (ii) in more detail in order to
-explain how you can influence to the process of interface generation
-so that the users can enjoy more writing Python programs using your
-wrappers that call Fortran routines. Step (v) is covered in
-Sec.~\ref{sec:notes}.
-
-
-\subsection{Features}
-\label{sec:features}
-
-\fpy has the following features:
-\begin{enumerate}
-\item \fpy scans real Fortran codes and produces the signature files.
- The syntax of the signature files is borrowed from the Fortran~90/95
- language specification with some extensions.
-\item \fpy uses the signature files to produce the wrappers for
- Fortran~77 routines and their \texttt{COMMON} blocks.
-\item For \texttt{external} arguments \fpy constructs a very flexible
- call-back mechanism so that Python functions can be called from
- Fortran.
-\item You can pass in almost arbitrary Python objects to wrapper
- functions. If needed, \fpy takes care of type-casting and
- non-contiguous arrays.
-\item You can modify the signature files so that \fpy will generate
- wrapper functions with desired signatures. \texttt{depend()}
- attribute is introduced to control the initialization order of the
- variables. \fpy introduces \texttt{intent(hide)} attribute to remove
- the particular argument from the argument list of the wrapper
- function. In addition, \texttt{optional} and \texttt{required}
- attributes are introduced and employed.
-\item \fpy supports almost all standard Fortran~77/90/95 constructs
- and understands all basic Fortran types, including
- (multi-dimensional, complex) arrays and character strings with
- adjustable and assumed sizes/lengths.
-\item \fpy generates a LaTeX document containing the
- documentations of the wrapped functions (argument types, dimensions,
- etc). The user can easily add some human readable text to the
- documentation by inserting \texttt{note(<LaTeX text>)} attribute to
- the definition of routine signatures.
-\item \fpy generates a GNU make file that can be used for building
- shared modules calling Fortran functions.
-\item \fpy supports wrapping Fortran 90/95 module routines.
-\end{enumerate}
-
-%%% Local Variables:
-%%% mode: latex
-%%% TeX-master: "f2py2e"
-%%% End:
diff --git a/doc/f2py/multiarray/array_from_pyobj.c b/doc/f2py/multiarray/array_from_pyobj.c
deleted file mode 100644
index fba14ac71..000000000
--- a/doc/f2py/multiarray/array_from_pyobj.c
+++ /dev/null
@@ -1,323 +0,0 @@
-/*
- * File: array_from_pyobj.c
- *
- * Description:
- * ------------
- * Provides array_from_pyobj function that returns a contiguous array
- * object with the given dimensions and required storage order, either
- * in row-major (C) or column-major (Fortran) order. The function
- * array_from_pyobj is very flexible about its Python object argument
- * that can be any number, list, tuple, or array.
- *
- * array_from_pyobj is used in f2py generated Python extension
- * modules.
- *
- * Author: Pearu Peterson <pearu@cens.ioc.ee>
- * Created: 13-16 January 2002
- * $Id: array_from_pyobj.c,v 1.1 2002/01/16 18:57:33 pearu Exp $
- */
-
-
-#define ARR_IS_NULL(arr,mess) \
-if (arr==NULL) { \
- fprintf(stderr,"array_from_pyobj:" mess); \
- return NULL; \
-}
-
-#define CHECK_DIMS_DEFINED(rank,dims,mess) \
-if (count_nonpos(rank,dims)) { \
- fprintf(stderr,"array_from_pyobj:" mess); \
- return NULL; \
-}
-
-#define HAS_PROPER_ELSIZE(arr,type_num) \
- ((PyArray_DescrFromType(type_num)->elsize) == (arr)->descr->elsize)
-
-/* static */
-/* void f2py_show_args(const int type_num, */
-/* const int *dims, */
-/* const int rank, */
-/* const int intent) { */
-/* int i; */
-/* fprintf(stderr,"array_from_pyobj:\n\ttype_num=%d\n\trank=%d\n\tintent=%d\n",\ */
-/* type_num,rank,intent); */
-/* for (i=0;i<rank;++i) */
-/* fprintf(stderr,"\tdims[%d]=%d\n",i,dims[i]); */
-/* } */
-
-static
-int count_nonpos(const int rank,
- const int *dims) {
- int i=0,r=0;
- while (i<rank) {
- if (dims[i] <= 0) ++r;
- ++i;
- }
- return r;
-}
-
-static void lazy_transpose(PyArrayObject* arr);
-static int check_and_fix_dimensions(const PyArrayObject* arr,
- const int rank,
- int *dims);
-static
-int array_has_column_major_storage(const PyArrayObject *ap);
-
-static
-PyArrayObject* array_from_pyobj(const int type_num,
- int *dims,
- const int rank,
- const int intent,
- PyObject *obj) {
- /* Note about reference counting
- -----------------------------
- If the caller returns the array to Python, it must be done with
- Py_BuildValue("N",arr).
- Otherwise, if obj!=arr then the caller must call Py_DECREF(arr).
- */
-
-/* f2py_show_args(type_num,dims,rank,intent); */
-
- if (intent & F2PY_INTENT_CACHE) {
- /* Don't expect correct storage order or anything reasonable when
- returning cache array. */
- if ((intent & F2PY_INTENT_HIDE)
- || (obj==Py_None)) {
- PyArrayObject *arr = NULL;
- CHECK_DIMS_DEFINED(rank,dims,"optional,intent(cache) must"
- " have defined dimensions.\n");
- arr = (PyArrayObject *)PyArray_FromDims(rank,dims,type_num);
- ARR_IS_NULL(arr,"FromDims failed: optional,intent(cache)\n");
- if (intent & F2PY_INTENT_OUT)
- Py_INCREF(arr);
- return arr;
- }
- if (PyArray_Check(obj)
- && ISCONTIGUOUS((PyArrayObject *)obj)
- && HAS_PROPER_ELSIZE((PyArrayObject *)obj,type_num)
- ) {
- if (check_and_fix_dimensions((PyArrayObject *)obj,rank,dims))
- return NULL; /*XXX: set exception */
- if (intent & F2PY_INTENT_OUT)
- Py_INCREF(obj);
- return (PyArrayObject *)obj;
- }
- ARR_IS_NULL(NULL,"intent(cache) must be contiguous array with a proper elsize.\n");
- }
-
- if (intent & F2PY_INTENT_HIDE) {
- PyArrayObject *arr = NULL;
- CHECK_DIMS_DEFINED(rank,dims,"intent(hide) must have defined dimensions.\n");
- arr = (PyArrayObject *)PyArray_FromDims(rank,dims,type_num);
- ARR_IS_NULL(arr,"FromDims failed: intent(hide)\n");
- if (intent & F2PY_INTENT_OUT) {
- if ((!(intent & F2PY_INTENT_C)) && (rank>1)) {
- lazy_transpose(arr);
- arr->flags &= ~NPY_CONTIGUOUS;
- }
- Py_INCREF(arr);
- }
- return arr;
- }
-
- if (PyArray_Check(obj)) { /* here we have always intent(in) or
- intent(inout) */
-
- PyArrayObject *arr = (PyArrayObject *)obj;
- int is_cont = (intent & F2PY_INTENT_C) ?
- (ISCONTIGUOUS(arr)) : (array_has_column_major_storage(arr));
-
- if (check_and_fix_dimensions(arr,rank,dims))
- return NULL; /*XXX: set exception */
-
- if ((intent & F2PY_INTENT_COPY)
- || (! (is_cont
- && HAS_PROPER_ELSIZE(arr,type_num)
- && PyArray_CanCastSafely(arr->descr->type_num,type_num)))) {
- PyArrayObject *tmp_arr = NULL;
- if (intent & F2PY_INTENT_INOUT) {
- ARR_IS_NULL(NULL,"intent(inout) array must be contiguous and"
- " with a proper type and size.\n")
- }
- if ((rank>1) && (! (intent & F2PY_INTENT_C)))
- lazy_transpose(arr);
- if (PyArray_CanCastSafely(arr->descr->type_num,type_num)) {
- tmp_arr = (PyArrayObject *)PyArray_CopyFromObject(obj,type_num,0,0);
- ARR_IS_NULL(arr,"CopyFromObject failed: array.\n");
- } else {
- tmp_arr = (PyArrayObject *)PyArray_FromDims(arr->nd,
- arr->dimensions,
- type_num);
- ARR_IS_NULL(tmp_arr,"FromDims failed: array with unsafe cast.\n");
- if (copy_ND_array(arr,tmp_arr))
- ARR_IS_NULL(NULL,"copy_ND_array failed: array with unsafe cast.\n");
- }
- if ((rank>1) && (! (intent & F2PY_INTENT_C))) {
- lazy_transpose(arr);
- lazy_transpose(tmp_arr);
- tmp_arr->flags &= ~NPY_CONTIGUOUS;
- }
- arr = tmp_arr;
- }
- if (intent & F2PY_INTENT_OUT)
- Py_INCREF(arr);
- return arr;
- }
-
- if ((obj==Py_None) && (intent & F2PY_OPTIONAL)) {
- PyArrayObject *arr = NULL;
- CHECK_DIMS_DEFINED(rank,dims,"optional must have defined dimensions.\n");
- arr = (PyArrayObject *)PyArray_FromDims(rank,dims,type_num);
- ARR_IS_NULL(arr,"FromDims failed: optional.\n");
- if (intent & F2PY_INTENT_OUT) {
- if ((!(intent & F2PY_INTENT_C)) && (rank>1)) {
- lazy_transpose(arr);
- arr->flags &= ~NPY_CONTIGUOUS;
- }
- Py_INCREF(arr);
- }
- return arr;
- }
-
- if (intent & F2PY_INTENT_INOUT) {
- ARR_IS_NULL(NULL,"intent(inout) argument must be an array.\n");
- }
-
- {
- PyArrayObject *arr = (PyArrayObject *) \
- PyArray_ContiguousFromObject(obj,type_num,0,0);
- ARR_IS_NULL(arr,"ContiguousFromObject failed: not a sequence.\n");
- if (check_and_fix_dimensions(arr,rank,dims))
- return NULL; /*XXX: set exception */
- if ((rank>1) && (! (intent & F2PY_INTENT_C))) {
- PyArrayObject *tmp_arr = NULL;
- lazy_transpose(arr);
- arr->flags &= ~NPY_CONTIGUOUS;
- tmp_arr = (PyArrayObject *) PyArray_CopyFromObject((PyObject *)arr,type_num,0,0);
- Py_DECREF(arr);
- arr = tmp_arr;
- ARR_IS_NULL(arr,"CopyFromObject(Array) failed: intent(fortran)\n");
- lazy_transpose(arr);
- arr->flags &= ~NPY_CONTIGUOUS;
- }
- if (intent & F2PY_INTENT_OUT)
- Py_INCREF(arr);
- return arr;
- }
-
-}
-
- /*****************************************/
- /* Helper functions for array_from_pyobj */
- /*****************************************/
-
-static
-int array_has_column_major_storage(const PyArrayObject *ap) {
- /* array_has_column_major_storage(a) is equivalent to
- transpose(a).iscontiguous() but more efficient.
-
- This function can be used in order to decide whether to use a
- Fortran or C version of a wrapped function. This is relevant, for
- example, in choosing a clapack or flapack function depending on
- the storage order of array arguments.
- */
- int sd;
- int i;
- sd = ap->descr->elsize;
- for (i=0;i<ap->nd;++i) {
- if (ap->dimensions[i] == 0) return 1;
- if (ap->strides[i] != sd) return 0;
- sd *= ap->dimensions[i];
- }
- return 1;
-}
-
-static
-void lazy_transpose(PyArrayObject* arr) {
- /*
- Changes the order of array strides and dimensions. This
- corresponds to the lazy transpose of a Numeric array in-situ.
- Note that this function is assumed to be used even times for a
- given array. Otherwise, the caller should set flags &= ~NPY_CONTIGUOUS.
- */
- int rank,i,s,j;
- rank = arr->nd;
- if (rank < 2) return;
-
- for(i=0,j=rank-1;i<rank/2;++i,--j) {
- s = arr->strides[i];
- arr->strides[i] = arr->strides[j];
- arr->strides[j] = s;
- s = arr->dimensions[i];
- arr->dimensions[i] = arr->dimensions[j];
- arr->dimensions[j] = s;
- }
-}
-
-static
-int check_and_fix_dimensions(const PyArrayObject* arr,const int rank,int *dims) {
- /*
- This function fills in blanks (that are -1's) in dims list using
- the dimensions from arr. It also checks that non-blank dims will
- match with the corresponding values in arr dimensions.
- */
- const int arr_size = (arr->nd)?PyArray_Size((PyObject *)arr):1;
-
- if (rank > arr->nd) { /* [1,2] -> [[1],[2]]; 1 -> [[1]] */
- int new_size = 1;
- int free_axe = -1;
- int i;
- /* Fill dims where -1 or 0; check dimensions; calc new_size; */
- for(i=0;i<arr->nd;++i) {
- if (dims[i] >= 0) {
- if (dims[i]!=arr->dimensions[i]) {
- fprintf(stderr,"%d-th dimension must be fixed to %d but got %d\n",
- i,dims[i],arr->dimensions[i]);
- return 1;
- }
- if (!dims[i]) dims[i] = 1;
- } else {
- dims[i] = arr->dimensions[i] ? arr->dimensions[i] : 1;
- }
- new_size *= dims[i];
- }
- for(i=arr->nd;i<rank;++i)
- if (dims[i]>1) {
- fprintf(stderr,"%d-th dimension must be %d but got 0 (not defined).\n",
- i,dims[i]);
- return 1;
- } else if (free_axe<0)
- free_axe = i;
- else
- dims[i] = 1;
- if (free_axe>=0) {
- dims[free_axe] = arr_size/new_size;
- new_size *= dims[free_axe];
- }
- if (new_size != arr_size) {
- fprintf(stderr,"confused: new_size=%d, arr_size=%d (maybe too many free"
- " indices)\n",new_size,arr_size);
- return 1;
- }
- } else {
- int i;
- for (i=rank;i<arr->nd;++i)
- if (arr->dimensions[i]>1) {
- fprintf(stderr,"too many axes: %d, expected rank=%d\n",arr->nd,rank);
- return 1;
- }
- for (i=0;i<rank;++i)
- if (dims[i]>=0) {
- if (arr->dimensions[i]!=dims[i]) {
- fprintf(stderr,"%d-th dimension must be fixed to %d but got %d\n",
- i,dims[i],arr->dimensions[i]);
- return 1;
- }
- if (!dims[i]) dims[i] = 1;
- } else
- dims[i] = arr->dimensions[i];
- }
- return 0;
-}
-
-/* End of file: array_from_pyobj.c */
diff --git a/doc/f2py/multiarray/bar.c b/doc/f2py/multiarray/bar.c
deleted file mode 100644
index 350636ea6..000000000
--- a/doc/f2py/multiarray/bar.c
+++ /dev/null
@@ -1,15 +0,0 @@
-
-#include <stdio.h>
-
-void bar(int *a,int m,int n) {
- int i,j;
- printf("C:");
- printf("m=%d, n=%d\n",m,n);
- for (i=0;i<m;++i) {
- printf("Row %d:\n",i+1);
- for (j=0;j<n;++j)
- printf("a(i=%d,j=%d)=%d\n",i,j,a[n*i+j]);
- }
- if (m*n)
- a[0] = 7777;
-}
diff --git a/doc/f2py/multiarray/foo.f b/doc/f2py/multiarray/foo.f
deleted file mode 100644
index f8c39c4d1..000000000
--- a/doc/f2py/multiarray/foo.f
+++ /dev/null
@@ -1,13 +0,0 @@
- subroutine foo(a,m,n)
- integer a(m,n), m,n,i,j
- print*, "F77:"
- print*, "m=",m,", n=",n
- do 100,i=1,m
- print*, "Row ",i,":"
- do 50,j=1,n
- print*, "a(i=",i,",j=",j,") = ",a(i,j)
- 50 continue
- 100 continue
- if (m*n.gt.0) a(1,1) = 77777
- end
-
diff --git a/doc/f2py/multiarray/fortran_array_from_pyobj.txt b/doc/f2py/multiarray/fortran_array_from_pyobj.txt
deleted file mode 100644
index c7ca2d582..000000000
--- a/doc/f2py/multiarray/fortran_array_from_pyobj.txt
+++ /dev/null
@@ -1,284 +0,0 @@
-
- _____________________________________________________________
- / Proposed internal structure for f2py generated extension \
- < modules regarding the issues with different storage-orders >
- \ of multi-dimensional matrices in Fortran and C. /
- =============================================================
-
-Author: Pearu Peterson
-Date: 14 January, 2001
-
-Definitions:
-============
-
-In the following I will use the following definitions:
-
-1) A matrix is a mathematical object that represents a collection of
- objects (elements), usually visualized in a table form, and one can
- define a set of various (algebraic,etc) operations for matrices.
- One can think of a matrix as a definition of a certain mapping:
- (i) |--> A(i)
- where i belongs to the set of indices (an index itself can be a
- sequence of objects, for example, a sequence of integers) and A(i)
- is an element from a specified set, for example, a set of fruits.
- Symbol A then denotes a matrix of fruits.
-
-2) An array is a storage object that represents a collection of
- objects stored in a certain systematic way, for example, as an
- ordered sequence in computer memory.
-
-In order to manipulate matrices using computers, one must store matrix
-elements in computer memory. In the following, I will assume that the
-elements of a matrix is stored as an array. There is no unique way in
-which order one should save matrix elements in the array. However, in
-C and Fortran programming languages, two, unfortunately different,
-conventions are used.
-
-Aim:
-====
-
-The purpose of this writing is to work out an interface for Python
-language so that C and Fortran routines can be called without
-bothering about how multi-dimensional matrices are stored in memory.
-For example, accessing a matrix element A[i,j] in Python will be
-equivalent to accessing the same matrix in C, using A[i][j], or in
-Fortran, using A(i,j).
-
-External conditions:
-====================
-
-In C programming language, it is custom to think that matrices are
-stored in the so-called row-major order, that is, a matrix is stored
-row by row, each row is as a contiguous array in computer memory.
-
-In Fortran programming language, matrices are stored in the
-column-major order: each column is a contiguous array in computer
-memory.
-
-In Python programming language, matrices can be stored using Python
-Numeric array() function that uses internally C approach, that is,
-elements of matrices are stored in row-major order. For example,
-A = array([[1,2,3],[4,5,6]]) represents a 2-by-3 matrix
-
- / 1 2 3 \
- | |
- \ 4 5 6 /
-
-and its elements are stored in computer memory as the following array:
-
- 1 2 3 4 5 6
-
-The same matrix, if used in Fortran, would be stored in computer
-memory as the following array:
-
- 1 4 2 5 3 6
-
-Problem and solution:
-=====================
-
-A problem arises if one wants to use the same matrix both in C and in
-Fortran functions. Then the difference in storage order of a matrix
-elements must be taken into account. This technical detail can be very
-confusing even for an experienced programmer. This is because when
-passing a matrix to a Fortran subroutine, you must (mentally or
-programmically) transpose the matrix and when the subroutine returns,
-you must transpose it back.
-
-As will be discussed below, there is a way to overcome these
-difficulties in Python by creating an interface between Python and
-Fortran code layers that takes care of this transition internally. So
-that if you will read the manual pages of the Fortran codes, then you
-need not to think about how matrices are actually stored, the storage
-order will be the same, seemingly.
-
-Python / C / Fortran interface:
-===============================
-
-The interface between Python and Fortran codes will use the following
-Python Numeric feature: transposing a Numeric array does not involve
-copying of its data but just permuting the dimensions and strides of
-the array (the so-called lazy transpose).
-
-However, when passing a Numeric array data pointer to Fortran or C
-function, the data must be contiguous in memory. If it is not, then
-data is rearranged inplace. I don't think that it can be avoided.
-This is certainly a penalty hit to performance. However, one can
-easily avoid it by creating a Numeric array with the right storage
-order, so that after transposing, the array data will be contiguous in
-memory and the data pointer can safely passed on to the Fortran
-subroutine. This lazy-transpose operation will be done within the
-interface and users need not to bother about this detail anymore (that
-is, after they initialize Numeric array with matrix elements using the
-proper order. Of course, the proper order depends on the target
-function: C or Fortran). The interface should be smart enough to
-minimize the need of real-transpose operations and the need to
-additional memory storage as well.
-
-Statement of the problem:
-=========================
-
-Consider a M-by-N matrix A of integers, where M and N are the number A
-rows and columns, respectively.
-
-In Fortran language, the storing array of this matrix can be defined
-as follows:
-
- integer A(M,N)
-
-in C:
-
- int A[M][N];
-
-and in Python:
-
- A = Numeric.zeros((M,N),'i')
-
-Consider also the corresponding Fortran and C functions that
-that use matrix arguments:
-
-Fortran:
- subroutine FUN(A,M,N)
- integer A(M,N)
- ...
- end
-C:
- void cun(int *a,int m,int n) {
- ...
- }
-
-and the corresponding Python interface signatures:
-
- def py_fun(a):
- ...
- def py_cun(a):
- ...
-
-Main goal:
-==========
-
-Our goal is to generate Python C/API functions py_fun and py_cun such
-that their usage in Python would be identical. The crucial part of
-their implementation are in functions that take a PyObject and
-return a PyArrayObject such that it is contiguous and its data pointer
-is suitable for passing on to the arguments of C or Fortran functions.
-The prototypes of these functions are:
-
-PyArrayObject* fortran_array_from_pyobj(
- int typecode,
- int *dims,
- int rank,
- int intent,
- PyObject *obj);
-
-and
-
-PyArrayObject* c_array_from_pyobj(
- int typecode,
- int *dims,
- int rank,
- int intent,
- PyObject *obj);
-
-for wrapping Fortran and C functions, respectively.
-
-Pseudo-code for fortran_array_from_pyobj:
-=========================================
-
-if type(obj) is ArrayType:
- #raise not check(len(ravel(obj)) >= dims[0]*dims[1]*...*dims[rank-1])
- if obj.typecode is typecode:
- if is_contiguous(obj):
- transpose_data_inplace(obj) # real-transpose
- set_transpose_strides(obj) # lazy-transpose
- Py_INCREF(obj);
- return obj
- set_transpose_strides(obj)
- if is_contiguous(obj):
- set_transpose_strides(obj)
- Py_INCREF(obj);
- return obj
- else:
- tmp_obj = PyArray_ContiguousFromObject(obj,typecode,0,0)
- swap_datapointer_and_typeinfo(obj,tmp_obj)
- Py_DECREF(tmp_obj);
- set_transpose_strides(obj)
- Py_INCREF(obj);
- return obj
- else:
- tmp_obj = PyArray_FromDims(rank,dims,typecode)
- set_transpose_strides(tmp_obj)
- if intent in [in,inout]:
- copy_ND_array(obj,tmp_obj)
- swap_datapointer_and_typeinfo(obj,tmp_obj)
- Py_DECREF(tmp_obj);
- Py_INCREF(obj);
- return obj
-elif obj is None: # happens when only intent is 'hide'
- tmp_obj = PyArray_FromDims(rank,dims,typecode)
- if intent is out:
- set_transpose_strides(tmp_obj)
- # otherwise tmp_obj->data is used as a work array
- Py_INCREF(tmp_obj)
- return tmp_obj
-else:
- tmp_obj = PyArray_ContiguousFromObject(obj,typecode,0,0)
- #raise not check(len(ravel(obj)) >= dims[0]*dims[1]*...*dims[rank-1])
- set_transpose_strides(tmp_obj)
- transpose_data_inplace(tmp_obj)
- Py_INCREF(tmp_obj)
- return tmp_obj
-
-Notes:
- 1) CPU expensive tasks are in transpose_data_inplace and
- copy_ND_array, PyArray_ContiguousFromObject.
- 2) Memory expensive tasks are in PyArray_FromDims,
- PyArray_ContiguousFromObject
- 3) Side-effects are expected when set_transpose_strides and
- transpose_data_inplace are used. For example:
- >>> a = Numeric([[1,2,3],[4,5,6]],'d')
- >>> a.is_contiguous()
- 1
- >>> py_fun(a)
- >>> a.typecode()
- 'i'
- >>> a.is_contiguous()
- 0
- >>> transpose(a).is_contiguous()
- 1
-
-Pseudo-code for c_array_from_pyobj:
-===================================
-
-if type(obj) is ArrayType:
- #raise not check(len(ravel(obj)) >= dims[0]*dims[1]*...*dims[rank-1])
- if obj.typecode is typecode:
- if is_contiguous(obj):
- Py_INCREF(obj);
- return obj
- else:
- tmp_obj = PyArray_ContiguousFromObject(obj,typecode,0,0)
- swap_datapointer_and_typeinfo(obj,tmp_obj)
- Py_DECREF(tmp_obj);
- Py_INCREF(obj);
- return obj
- else:
- tmp_obj = PyArray_FromDims(rank,dims,typecode)
- if intent in [in,inout]:
- copy_ND_array(obj,tmp_obj)
- swap_datapointer_and_typeinfo(obj,tmp_obj)
- Py_DECREF(tmp_obj);
- Py_INCREF(obj);
- return obj
-elif obj is None: # happens when only intent is 'hide'
- tmp_obj = PyArray_FromDims(rank,dims,typecode)
- Py_INCREF(tmp_obj)
- return tmp_obj
-else:
- tmp_obj = PyArray_ContiguousFromObject(obj,typecode,0,0)
- #raise not check(len(ravel(obj)) >= dims[0]*dims[1]*...*dims[rank-1])
- Py_INCREF(tmp_obj)
- return tmp_obj
-
-
-14 January, 2002
-Pearu Peterson <pearu@cens.ioc.ee>
diff --git a/doc/f2py/multiarray/fun.pyf b/doc/f2py/multiarray/fun.pyf
deleted file mode 100644
index ed5d1923f..000000000
--- a/doc/f2py/multiarray/fun.pyf
+++ /dev/null
@@ -1,89 +0,0 @@
-!%f90 -*- f90 -*-
-
-! Example:
-! Using f2py for wrapping multi-dimensional Fortran and C arrays
-! [NEW APPROACH, use it with f2py higher than 2.8.x]
-! $Id: fun.pyf,v 1.3 2002/01/18 10:06:50 pearu Exp $
-
-! Usage (with gcc compiler):
-! f2py -c fun.pyf foo.f bar.c
-
-python module fun ! in
- interface ! in :fun
-
-! >>> from Numeric import *
-! >>> import fun
-! >>> a=array([[1,2,3],[4,5,6]])
-
- subroutine foo(a,m,n) ! in :fun:foo.f
- integer dimension(m,n) :: a
- intent(in,out,copy) :: a
- integer optional,check(shape(a,0)==m),depend(a) :: m=shape(a,0)
- integer optional,check(shape(a,1)==n),depend(a) :: n=shape(a,1)
- end subroutine foo
-
-! >>> print fun.foo.__doc__
-! foo - Function signature:
-! a = foo(a,[m,n])
-! Required arguments:
-! a : input rank-2 array('i') with bounds (m,n)
-! Optional arguments:
-! m := shape(a,0) input int
-! n := shape(a,1) input int
-! Return objects:
-! a : rank-2 array('i') with bounds (m,n)
-
-! >>> print fun.foo(a)
-! F77:
-! m= 2, n= 3
-! Row 1:
-! a(i= 1,j= 1) = 1
-! a(i= 1,j= 2) = 2
-! a(i= 1,j= 3) = 3
-! Row 2:
-! a(i= 2,j= 1) = 4
-! a(i= 2,j= 2) = 5
-! a(i= 2,j= 3) = 6
-! [[77777 2 3]
-! [ 4 5 6]]
-
-
- subroutine bar(a,m,n)
- intent(c)
- intent(c) bar
- integer dimension(m,n) :: a
- intent(in,out) :: a
- integer optional,check(shape(a,0)==m),depend(a) :: m=shape(a,0)
- integer optional,check(shape(a,1)==n),depend(a) :: n=shape(a,1)
- intent(in) m,n
- end subroutine bar
-
-! >>> print fun.bar.__doc__
-! bar - Function signature:
-! a = bar(a,[m,n])
-! Required arguments:
-! a : input rank-2 array('i') with bounds (m,n)
-! Optional arguments:
-! m := shape(a,0) input int
-! n := shape(a,1) input int
-! Return objects:
-! a : rank-2 array('i') with bounds (m,n)
-
-! >>> print fun.bar(a)
-! C:m=2, n=3
-! Row 1:
-! a(i=0,j=0)=1
-! a(i=0,j=1)=2
-! a(i=0,j=2)=3
-! Row 2:
-! a(i=1,j=0)=4
-! a(i=1,j=1)=5
-! a(i=1,j=2)=6
-! [[7777 2 3]
-! [ 4 5 6]]
-
- end interface
-end python module fun
-
-! This file was auto-generated with f2py (version:2.9.166).
-! See http://cens.ioc.ee/projects/f2py2e/
diff --git a/doc/f2py/multiarray/run.pyf b/doc/f2py/multiarray/run.pyf
deleted file mode 100644
index bb12a439b..000000000
--- a/doc/f2py/multiarray/run.pyf
+++ /dev/null
@@ -1,91 +0,0 @@
-!%f90 -*- f90 -*-
-
-! Example:
-! Using f2py for wrapping multi-dimensional Fortran and C arrays
-! [OLD APPROACH, do not use it with f2py higher than 2.8.x]
-! $Id: run.pyf,v 1.1 2002/01/14 15:49:46 pearu Exp $
-
-! Usage (with gcc compiler):
-! f2py -c run.pyf foo.f bar.c -DNO_APPEND_FORTRAN
-
-python module run ! in
- interface ! in :run
-
-! >>> from Numeric import *
-! >>> import run
-! >>> a=array([[1,2,3],[4,5,6]],'i')
-
- subroutine foo(a,m,n)
- fortranname foo_
- integer dimension(m,n) :: a
- integer optional,check(shape(a,1)==m),depend(a) :: m=shape(a,1)
- integer optional,check(shape(a,0)==n),depend(a) :: n=shape(a,0)
- end subroutine foo
-
-! >>> print run.foo.__doc__
-! foo - Function signature:
-! foo(a,[m,n])
-! Required arguments:
-! a : input rank-2 array('i') with bounds (n,m)
-! Optional arguments:
-! m := shape(a,1) input int
-! n := shape(a,0) input int
-
-! >>> run.foo(a)
-! F77:
-! m= 3, n= 2
-! Row 1:
-! a(i= 1,j= 1) = 1
-! a(i= 1,j= 2) = 4
-! Row 2:
-! a(i= 2,j= 1) = 2
-! a(i= 2,j= 2) = 5
-! Row 3:
-! a(i= 3,j= 1) = 3
-! a(i= 3,j= 2) = 6
-
-! >>> run.foo(transpose(a))
-! F77:
-! m= 2, n= 3
-! Row 1:
-! a(i= 1,j= 1) = 1
-! a(i= 1,j= 2) = 2
-! a(i= 1,j= 3) = 3
-! Row 2:
-! a(i= 2,j= 1) = 4
-! a(i= 2,j= 2) = 5
-! a(i= 2,j= 3) = 6
-
- subroutine bar(a,m,n)
- intent(c)
- integer dimension(m,n) :: a
- integer optional,check(shape(a,0)==m),depend(a) :: m=shape(a,0)
- integer optional,check(shape(a,1)==n),depend(a) :: n=shape(a,1)
- end subroutine bar
-
-! >>> print run.bar.__doc__
-! bar - Function signature:
-! bar(a,[m,n])
-! Required arguments:
-! a : rank-2 array('i') with bounds (m,n)
-! Optional arguments:
-! m := shape(a,0) int
-! n := shape(a,1) int
-
-! >>> run.bar(a)
-! C:m=2, n=3
-! Row 1:
-! a(i=0,j=0)=1
-! a(i=0,j=1)=2
-! a(i=0,j=2)=3
-! Row 2:
-! a(i=1,j=0)=4
-! a(i=1,j=1)=5
-! a(i=1,j=2)=6
-
-
- end interface
-end python module run
-
-! This file was auto-generated with f2py (version:2.8.172).
-! See http://cens.ioc.ee/projects/f2py2e/
diff --git a/doc/f2py/multiarray/transpose.txt b/doc/f2py/multiarray/transpose.txt
deleted file mode 100644
index b04152418..000000000
--- a/doc/f2py/multiarray/transpose.txt
+++ /dev/null
@@ -1,1126 +0,0 @@
-From: Phil Garner (garner@signal.dra.hmg.gb)
- Subject: In place matrix transpose
- Newsgroups: sci.math.num-analysis
- Date: 1993-08-05 06:35:06 PST
-
-
-Someone was talking about matrix transposes earlier on. It's a
-curious subject. I found that an in-place transpose is about 12 times
-slower than the trivial copying method.
-
-Here's somthing I nicked from netlib and translated into C to do the
-in-place one for those that are interested: (matrix must be in one
-block)
-
-
-typedef float scalar; /* float -> double for double precision */
-
-/*
- * In Place Matrix Transpose
- * From: Algorithm 380 collected algorithms from ACM.
- * Converted to C by Phil Garner
- *
- * Algorithm appeared in comm. ACM, vol. 13, no. 05,
- * p. 324.
- */
-int trans(scalar *a, unsigned m, unsigned n, int *move, int iwrk)
-{
- scalar b;
- int i, j, k, i1, i2, ia, ib, ncount, kmi, Max, mn;
-
- /*
- * a is a one-dimensional array of length mn=m*n, which
- * contains the m by n matrix to be transposed.
- * move is a one-dimensional array of length iwrk
- * used to store information to speed up the process. the
- * value iwrk=(m+n)/2 is recommended. Return val indicates the
- * success or failure of the routine.
- * normal return = 0
- * errors
- * -2, iwrk negative or zero.
- * ret > 0, (should never occur). in this case
- * we set ret equal to the final value of i when the search
- * is completed but some loops have not been moved.
- * check arguments and initialise
- */
-
- /* Function Body */
- if (n < 2 || m < 2)
- return 0;
- if (iwrk < 1)
- return -2;
-
- /* If matrix is square, exchange elements a(i,j) and a(j,i). */
- if (n == m)
- {
- for (i = 0; i < m - 1; ++i)
- for (j = i + 1; j < m; ++j)
- {
- i1 = i + j * m;
- i2 = j + i * m;
- b = a[i1];
- a[i1] = a[i2];
- a[i2] = b;
- } return 0;
- }
-
- /* Non square matrix */
- ncount = 2;
- for (i = 0; i < iwrk; ++i)
- move[i] = 0;
-
- if (n > 2)
- /* Count number,ncount, of single points. */
- for (ia = 1; ia < n - 1; ++ia)
- {
- ib = ia * (m - 1) / (n - 1);
- if (ia * (m - 1) != ib * (n - 1))
- continue;
- ++ncount;
- i = ia * m + ib;
- if (i > iwrk)
- continue;
- move[i] = 1;
- }
-
- /* Set initial values for search. */
- mn = m * n;
- k = mn - 1;
- kmi = k - 1;
- Max = mn;
- i = 1;
-
- while (1)
- {
- /* Rearrange elements of a loop. */
- /* At least one loop must be re-arranged. */
- i1 = i;
- while (1)
- {
- b = a[i1];
- while (1)
- {
- i2 = n * i1 - k * (i1 / m);
- if (i1 <= iwrk)
- move[i1 - 1] = 2;
- ++ncount;
- if (i2 == i || i2 >= kmi)
- {
- if (Max == kmi || i2 == i)
- break;
- Max = kmi;
- }
- a[i1] = a[i2];
- i1 = i2;
- }
-
- /* Test for symmetric pair of loops. */
- a[i1] = b;
- if (ncount >= mn)
- return 0;
- if (i2 == Max || Max == kmi)
- break;
- Max = kmi;
- i1 = Max;
- }
-
- /* Search for loops to be rearranged. */
- while (1)
- {
- Max = k - i;
- ++i;
- kmi = k - i;
- if (i > Max)
- return i;
- if (i <= iwrk)
- {
- if (move[i-1] < 1)
- break;
- continue;
- }
- if (i == n * i - k * (i / m))
- continue;
- i1 = i;
- while (1)
- {
- i2 = n * i1 - k * (i1 / m);
- if (i2 <= i || i2 >= Max)
- break;
- i1 = i2;
- }
- if (i2 == i)
- break;
- }
- } /* End never reached */
-}
-
---
- ,----------------------------- ______
- ____ | Phil Garner. \___| |/ \ \ ____
-/__/ `--, _L__L\_ | garner@signal.dra.hmg.gb | _|`---' \_/__/ `--,
-`-0---0-' `-0--0-' `--OO-------------------O-----' `---0---' `-0---0-'
-
- From: Murray Dow (mld900@anusf.anu.edu.au)
- Subject: Re: In place matrix transpose
- Newsgroups: sci.math.num-analysis
- Date: 1993-08-09 19:45:57 PST
-
-
-In article <23qmp3INN3gl@mentor.dra.hmg.gb>, garner@signal.dra.hmg.gb (Phil Garner) writes:
-|> Someone was talking about matrix transposes earlier on. It's a
-|> curious subject. I found that an in-place transpose is about 12 times
-|> slower than the trivial copying method.
-|>
-
-Algorithm 380 from CACM is sloweer than ALG 467. Here are my times
-from a VP2200 vector computer. Note that the CACM algorithms are scalar.
-Times are in seconds, for a 900*904 matrix:
-
-380 NAG 467 disc copy
-1.03 1.14 .391 .177
-
-Compare two vector algorithms, one I wrote and the second a matrix
-copy:
-
-My Alg Matrix copy
-.0095 .0097
-
-Conclusions: dont use Alg 380 from Netlib. If you have the available memory,
-do a matrix copy. If you don't have the memory, I will send you my algorithm
-when I have published it.
---
-Murray Dow GPO Box 4 Canberra ACT 2601 Australia
-Supercomputer Facility Phone: +61 6 2495028
-Australian National University Fax: +61 6 2473425
-mld900@anusf.anu.edu.au
-
-=============================================================================
-
-From: Mark Smotherman (mark@hubcap.clemson.edu)
- Subject: Matrix transpose benchmark [was Re: MIPS R8000 == TFP?]
- Newsgroups: comp.arch, comp.benchmarks, comp.sys.super
- Date: 1994-07-01 06:35:51 PST
-
-
-mccalpin@perelandra.cms.udel.edu (John D. McCalpin) writes:
-
->
->Of course, these results are all for the naive algorithm. I would be
->interested to see what an efficient blocked algorithm looks like.
->Anyone care to offer one? There is clearly a lot of performance
->to be gained by the effort....
-
-Here is a matrix transpose benchmark generator. Enter something like
-
- 10d10eij;
-
-and you get a benchmark program with tiles of size 10 for the i and j
-inner loops. Please email code improvements and flames.
-
-Enjoy!
-
-
-/*---------------------------------------------------------------------------
-
- Matrix Transpose Generator
-
- Copyright 1993, Dept. of Computer Science, Clemson University
-
- Permission to use, copy, modify, and distribute this software and
- its documentation for any purpose and without fee is hereby granted,
- provided that the above copyright notice appears in all copies.
-
- Clemson University and its Dept. of Computer Science make no
- representations about the suitability of this software for any
- purpose. It is provided "as is" without express or implied warranty.
-
- Original author: Mark Smotherman
-
- -------------------------------------------------------------------------*/
-
-
-/* tpgen.c version 1.0
- *
- * generate a matrix transpose loop nest, with tiling and unrolling
- * (timing code using getrusage is included in the generated program)
- *
- * mark smotherman
- * mark@cs.clemson.edu
- * clemson university
- * 9 july 1993
- *
- * a loop nest can be described by the order of its loop indices, so
- * this program takes as input a simple language describing these indices:
- * <number>d ==> generate tiling loop for index i with step size of <number>
- * <number>e ==> generate tiling loop for index j with step size of <number>
- * <number>i ==> generate loop for index i with unrolling factor of <number>
- * <number>j ==> generate loop for index j with unrolling factor of <number>
- * ; ==> input terminator (required)
- * rules are:
- * i,j tokens must appear
- * if d appears, it must appear before i
- * if e appears, it must appear before j
- * ; must appear
- * matrix size is controlled by #define N in this program.
- *
- * this code was adapted from mmgen.c v1.2 and extended to generate pre-
- * condition loops for unrolling factors that do not evenly divide the
- * matrix size (or the tiling step size for loop nests with a tiling loop).
- * note that this program only provides a preconditioning loop for the
- * innermost loop. unrolling factors for non-innermost loops that do not
- * evenly divide the matrix size (or step size) are not supported.
- *
- * my interest in this program generator is to hook it to a sentence
- * generator and a minimum execution time finder, that is
- * while((sentence=sgen())!=NULL){
- * genprogram=tpgen(sentence);
- * system("cc -O4 genprogram.c");
- * system("a.out >> tpresults");
- * }
- * findmintime(tpresults);
- * this will find the optimum algorithm for the host system via an
- * exhaustive search.
- *
- * please report bugs and suggestions for enhancements to me.
- */
-
-#include <stdio.h>
-#include <string.h>
-#include <ctype.h>
-#define N 500
-
-#define ALLOC1 temp1=(struct line *)malloc(sizeof(struct line));\
-temp1->indentcnt=indentcnt;
-
-#define LINK1 temp1->next=insertbefore;\
-insertafter->next=temp1;\
-insertafter=temp1;
-
-#define INSERT1 temp1->next=start;\
-start=temp1;
-
-#define ALLOC2 temp1=(struct line *)malloc(sizeof(struct line));\
-temp2=(struct line *)malloc(sizeof(struct line));\
-temp1->indentcnt=indentcnt;\
-temp2->indentcnt=indentcnt++;
-
-#define LINK2 temp1->next=temp2;\
-temp2->next=insertbefore;\
-insertafter->next=temp1;\
-insertafter=temp1;\
-insertbefore=temp2;
-
-struct line{ int indentcnt; char line[256]; struct line *next; };
-
-int indentcnt;
-int iflag,jflag;
-int ijflag,jiflag;
-int dflag,eflag;
-int counter;
-int iistep,jjstep;
-int iunroll,junroll;
-int precond;
-
-char c;
-int i,ttp,nt;
-char *p0;
-char tptype[80];
-char number[10];
-
-struct line *start,*head,*insertafter,*insertbefore,*temp1,*temp2;
-
-void processloop();
-void processstmt();
-
-main(){
-
- indentcnt=0;
- iflag=jflag=0;
- ijflag=jiflag=0;
- dflag=eflag=0;
- iunroll=junroll=0;
- counter=1;
- precond=0;
- ttp=0;
-
- start=NULL;
- ALLOC2
- sprintf(temp1->line,"/* begin */\nt_start=second();\n");
- sprintf(temp2->line,"/* end */\nt_end = second();\n");
- head=temp1; temp1->next=temp2; temp2->next=NULL;
- insertafter=temp1; insertbefore=temp2;
-
- while((c=getchar())!=';'){
- tptype[ttp++]=c;
- if(isdigit(c)){
- nt=0;
- while(isdigit(c)){
- number[nt++]=c;
- c=getchar();
- if(c==';'){ fprintf(stderr,"unexpected ;!\n"); exit(1); }
- tptype[ttp++]=c;
- }
- number[nt]='\0';
- sscanf(number,"%d",&counter);
- }
- switch(c){
- case 'd':
- if(iflag){ fprintf(stderr,"d cannot appear after i!\n"); exit(1); }
- dflag++;
- ALLOC1
- sprintf(temp1->line,"#define IISTEP %d\n",counter);
- INSERT1
- iistep=counter;
- counter=1;
- ALLOC2
- sprintf(temp1->line,"for(ii=0;ii<%d;ii+=IISTEP){\n",N);
- sprintf(temp2->line,"}\n",N);
- LINK2
- ALLOC1
- sprintf(temp1->line,"it=min(ii+IISTEP,%d);\n",N);
- LINK1
- break;
- case 'e':
- if(jflag){ fprintf(stderr,"e cannot appear after j!\n"); exit(1); }
- eflag++;
- ALLOC1
- sprintf(temp1->line,"#define JJSTEP %d\n",counter);
- INSERT1
- jjstep=counter;
- counter=1;
- ALLOC2
- sprintf(temp1->line,"for(jj=0;jj<%d;jj+=JJSTEP){\n",N);
- sprintf(temp2->line,"}\n",N);
- LINK2
- ALLOC1
- sprintf(temp1->line,"jt=min(jj+JJSTEP,%d);\n",N);
- LINK1
- break;
- case 'i':
- iunroll=counter;
- counter=1;
- iflag++; if(jflag) jiflag++;
- if(dflag) precond=iistep%iunroll; else precond=N%iunroll;
- if(precond&&(jiflag==0)){
- fprintf(stderr,"unrolling factor for outer loop i\n");
- fprintf(stderr," does not evenly divide matrix/step size!\n");
- exit(1);
- }
- if(dflag&&(iunroll>1)&&(N%iistep)){
- fprintf(stderr,"with unrolling of i, step size for tiled loop ii\n");
- fprintf(stderr," does not evenly divide matrix size!\n");
- exit(1);
- }
- processloop('i',dflag,iunroll,precond,junroll);
- break;
- case 'j':
- junroll=counter;
- counter=1;
- jflag++; if(iflag) ijflag++;
- if(eflag) precond=jjstep%junroll; else precond=N%junroll;
- if(precond&&(ijflag==0)){
- fprintf(stderr,"unrolling factor for outer loop j\n");
- fprintf(stderr," does not evenly divide matrix/step size!\n");
- exit(1);
- }
- if(eflag&&(junroll>1)&&(N%jjstep)){
- fprintf(stderr,"with unrolling of j, step size for tiled loop jj\n");
- fprintf(stderr," does not evenly divide matrix size!\n");
- exit(1);
- }
- processloop('j',eflag,junroll,precond,iunroll);
- break;
- default: break;
- }
- }
- processstmt();
-
- tptype[ttp++]=c;
-
- if((iflag==0)||(jflag==0)){
- fprintf(stderr,
- "one of the loops (i,j) was not specified!\n");
- exit(1);
- }
-
- temp1=start;
- while(temp1!=NULL){
- printf("%s",temp1->line);
- temp1=temp1->next;
- }
- printf("#include <stdio.h>\n");
- printf("#include <sys/time.h>\n");
- printf("#include <sys/resource.h>\n");
- if(dflag|eflag) printf("#define min(a,b) ((a)<=(b)?(a):(b))\n");
- printf("double second();\n");
- printf("double t_start,t_end,t_total;\n");
- printf("int times;\n");
- printf("\ndouble b[%d][%d],dummy[10000],bt[%d][%d];\n\nmain(){\n"
- ,N,N,N,N);
- if(precond) printf(" int i,j,n;\n"); else printf(" int i,j;\n");
- if(dflag) printf(" int ii,it;\n");
- if(eflag) printf(" int jj,jt;\n");
- printf("/* set coefficients so that result matrix should have \n");
- printf(" * column entries equal to column index\n");
- printf(" */\n");
- printf(" for (i=0;i<%d;i++){\n",N);
- printf(" for (j=0;j<%d;j++){\n",N);
- printf(" b[i][j] = (double) i;\n");
- printf(" }\n");
- printf(" }\n");
- printf("\n t_total=0.0;\n for(times=0;times<10;times++){\n\n",N);
- printf("/* try to flush cache */\n");
- printf(" for(i=0;i<10000;i++){\n",N);
- printf(" dummy[i] = 0.0;\n");
- printf(" }\n");
- printf("%s",head->line);
- temp1=head->next;
- while(temp1!=NULL){
- for(i=0;i<temp1->indentcnt;i++) printf(" ");
- while((p0=strstr(temp1->line,"+0"))!=NULL){
- *p0++=' '; *p0=' ';
- }
- printf("%s",temp1->line);
- temp1=temp1->next;
- }
- printf("\n t_total+=t_end-t_start;\n }\n");
- printf("/* check result */\n");
- printf(" for (j=0;j<%d;j++){\n",N);
- printf(" for (i=0;i<%d;i++){\n",N);
- printf(" if (bt[i][j]!=((double)j)){\n");
- printf(" fprintf(stderr,\"error in bt[%cd][%cd]",'%','%');
- printf("\\n\",i,j);\n");
- printf(" fprintf(stderr,\" for %s\\n\");\n",tptype);
- printf(" exit(1);\n");
- printf(" }\n");
- printf(" }\n");
- printf(" }\n");
- tptype[ttp]='\0';
- printf(" printf(\"%c10.2f secs\",t_total);\n",'%');
- printf(" printf(\" for 10 runs of %s\\n\");\n",tptype);
- printf("}\n");
- printf("double second(){\n");
- printf(" void getrusage();\n");
- printf(" struct rusage ru;\n");
- printf(" double t;\n");
- printf(" getrusage(RUSAGE_SELF,&ru);\n");
- printf(" t = ((double)ru.ru_utime.tv_sec) +\n");
- printf(" ((double)ru.ru_utime.tv_usec)/1.0e6;\n");
- printf(" return t;\n");
- printf("}\n");
-
-}
-
-void processloop(index,flag,unroll,precond,unroll2)
-char index;
-int flag,unroll,precond,unroll2;
-{
- char build[80],temp[40];
- int n;
- if(precond){
- ALLOC1
- sprintf(temp1->line,"/* preconditioning loop for unrolling factor */\n");
- LINK1
- if(unroll2==1){
- build[0]='\0';
- if(flag){
- if(index='i')
- sprintf(temp,"n=IISTEP%c%d; ",'%',unroll);
- else
- sprintf(temp,"n=JJSTEP%c%d; ",'%',unroll);
- strcat(build,temp);
- sprintf(temp,"for(%c=%c%c;%c<%c%c+n;%c++) ",index,index,index,
- index,index,index,index);
- strcat(build,temp);
- }else{
- sprintf(temp,"n=%d%c%d; ",N,'%',unroll);
- strcat(build,temp);
- sprintf(temp,"for(%c=0;%c<n;%c++) ",index,index,index);
- strcat(build,temp);
- }
- sprintf(temp,"bt[i][j]=b[j][i];\n");
- strcat(build,temp);
- ALLOC1
- sprintf(temp1->line,"%s\n",build);
- LINK1
- }else{
- if(flag){
- ALLOC1
- if(index=='i')
- sprintf(temp1->line,"n=IISTEP%c%d;\n",'%',unroll);
- else
- sprintf(temp1->line,"n=JJSTEP%c%d;\n",'%',unroll);
- LINK1
- ALLOC1
- sprintf(temp1->line,"for(%c=%c%c;%c<%c%c+n;%c++){\n",index,index,index,
- index,index,index,index);
- LINK1
- }else{
- ALLOC1
- sprintf(temp1->line,"n=%d%c%d;\n",N,'%',unroll);
- LINK1
- ALLOC1
- sprintf(temp1->line,"for(%c=0;%c<n;%c++){\n",index,index,index);
- LINK1
- }
- if(index=='i'){
- for(n=0;n<unroll2;n++){
- ALLOC1
- sprintf(temp1->line," bt[i][j+%d]=b[j+%d][i];\n",n,n);
- LINK1
- }
- }else{
- for(n=0;n<unroll2;n++){
- ALLOC1
- sprintf(temp1->line," bt[i+%d][j]=b[j][i+%d];\n",n,n);
- LINK1
- }
- }
- ALLOC1
- sprintf(temp1->line,"}\n");
- LINK1
- }
- ALLOC2
- if(flag){
- sprintf(temp1->line,"for(%c=%c%c+n;%c<%ct;%c+=%d){\n",index,index,index,
- index,index,index,unroll);
- }else{
- sprintf(temp1->line,"for(%c=n;%c<%d;%c+=%d){\n",index,index,N,index,
- unroll);
- }
- sprintf(temp2->line,"}\n",N);
- LINK2
- }else{
- ALLOC2
- if(unroll==1){
- if(flag){
- sprintf(temp1->line,"for(%c=%c%c;%c<%ct;%c++){\n",index,index,index,
- index,index,index);
- }else{
- sprintf(temp1->line,"for(%c=0;%c<%d;%c++){\n",index,index,N,index);
- }
- }else{
- if(flag){
- sprintf(temp1->line,"for(%c=%c%c;%c<%ct;%c+=%d){\n",index,index,index,
- index,index,index,unroll);
- }else{
- sprintf(temp1->line,"for(%c=0;%c<%d;%c+=%d){\n",index,index,N,index,
- unroll);
- }
- }
- sprintf(temp2->line,"}\n",N);
- LINK2
- }
-}
-
-void processstmt()
-{
- int i,j;
- for(i=0;i<iunroll;i++){
- for(j=0;j<junroll;j++){
- ALLOC1
- sprintf(temp1->line,"bt[i+%d][j+%d]=b[j+%d][i+%d];\n",i,j,j,i);
- LINK1
- }
- }
-}
---
-Mark Smotherman, Computer Science Dept., Clemson University, Clemson, SC
-
-=======================================================================
-From: has (h.genceli@bre.com)
- Subject: transpose of a nxm matrix stored in a vector !!!
- Newsgroups: sci.math.num-analysis
- Date: 2000/07/25
-
-
-If I have a matrix nrows x ncols, I can store it in a vector.
-so A(i,j) is really a[i*ncols+j]. So really TRANS of A
-(say B) is really is also a vector B where
-
-0<=i b[j*nrows+i] <nrows, 0<=j<ncols
-b[j*nrows+i] = a[i*ncols+j].
-
-Fine but I want to use only one array a to do this transformation.
-
-i.e a[j*nrows+i] = a[i*ncols+j]. this will itself
-erase some elements so each time a swap is necessary in a loop.
-
-temp = a[j*nrows+i]
-a[j*nrows+i] = a[i*ncols+j]
-a[i*ncols+j] = temp
-
-but still this will lose some info as it is, so indexing
-should have more intelligence in it ???? anybody
-can give me a lead here, thanks.
-
-Has
-
- From: wei-choon ng (wng@ux8.cso.uiuc.edu)
- Subject: Re: transpose of a nxm matrix stored in a vector !!!
- Newsgroups: sci.math.num-analysis
- Date: 2000/07/25
-
-
-has <h.genceli@bre.com> wrote:
-> If I have a matrix nrows x ncols, I can store it in a vector.
-> so A(i,j) is really a[i*ncols+j]. So really TRANS of A
-> (say B) is really is also a vector B where
-
-[snip]
-
-Hey, if you just want to do a transpose-matrix vector multiply, there is
-no need to explicitly store the transpose matrix in another array and
-doubling the storage!
-
-W.C.
---
-
- From: Robin Becker (robin@jessikat.fsnet.co.uk)
- Subject: Re: transpose of a nxm matrix stored in a vector !!!
- Newsgroups: sci.math.num-analysis
- Date: 2000/07/25
-
-
-In article <snr532fo3j1180@corp.supernews.com>, has <h.genceli@bre.com>
-writes
->If I have a matrix nrows x ncols, I can store it in a vector.
->so A(i,j) is really a[i*ncols+j]. So really TRANS of A
->(say B) is really is also a vector B where
->
->0<=i b[j*nrows+i] <nrows, 0<=j<ncols
->b[j*nrows+i] = a[i*ncols+j].
->
->Fine but I want to use only one array a to do this transformation.
->
->i.e a[j*nrows+i] = a[i*ncols+j]. this will itself
->erase some elements so each time a swap is necessary in a loop.
->
->temp = a[j*nrows+i]
->a[j*nrows+i] = a[i*ncols+j]
->a[i*ncols+j] = temp
->
->but still this will lose some info as it is, so indexing
->should have more intelligence in it ???? anybody
->can give me a lead here, thanks.
->
->Has
->
->
->
-
-void dmx_transpose(unsigned n, unsigned m, double* a, double* b)
-{
- unsigned size = m*n;
- if(b!=a){
- real *bmn, *aij, *anm;
- bmn = b + size; /*b+n*m*/
- anm = a + size;
- while(b<bmn) for(aij=a++;aij<anm; aij+=n ) *b++ = *aij;
- }
- else if(size>3){
- unsigned i,row,column,current;
- for(i=1, size -= 2;i<size;i++){
- current = i;
- do {
- /*current = row+n*column*/
- column = current/m;
- row = current%m;
- current = n*row + column;
- } while(current < i);
-
- if (current >i) {
- real temp = a[i];
- a[i] = a[current];
- a[current] = temp;
- }
- }
- }
-}
---
-Robin Becker
-
- From: E. Robert Tisdale (edwin@netwood.net)
- Subject: Re: transpose of a nxm matrix stored in a vector !!!
- Newsgroups: sci.math.num-analysis
- Date: 2000/07/25
-
-
-Take a look at
-The C++ Scalar, Vector, Matrix and Tensor class library
-
- http://www.netwood.net/~edwin/svmt/
-
-<Type><System>SubVector&
- <Type><System>SubVector::transpose(Extent p, Extent q) {
- <Type><System>SubVector&
- v = *this;
- if (1 < p && 1 < q) {
- // A vector v of extent n = qp is viewed as a q by p matrix U and
- // a p by q matrix V where U_{ij} = v_{p*i+j} and V_{ij} = v_{q*i+j}.
- // The vector v is modified in-place so that V is the transpose of U.
- // The algorithm searches for every sequence k_s of S indices
- // such that a circular shift of elements v_{k_s} <-- v_{k_{s+1}}
- // and v_{k_{S-1}} <-- v_{k_0} effects an in-place transpose.
- Extent n = q*p;
- Extent m = 0; // count up to n-2
- Offset l = 0; // 1 <= l <= n-2
- while (++l < n-1 && m < n-2) {
- Offset k = l;
- Offset j = k;
- while (l < (k = (j%p)*q + j/p)) { // Search backward for k < l.
- j = k;
- }
- // If a sequence of indices beginning with l has any index k < l,
- // it has already been transposed. The sequence length S = 1
- // and diagonal element v_k is its own transpose if k = j.
- // Skip every index sequence that has already been transposed.
- if (k == l) { // a new sequence
- if (k < j) { // with 1 < S
- TYPE x = v[k]; // save v_{k_0}
- do {
- v[k] = v[j]; // v_{k_{s}} <-- v_{k_{s+1}}
- k = j;
- ++m;
- } while (l < (j = (k%q)*p + k/q));
- v[k] = x; // v_{k_{S-1}} <-- v_{k_0}
- }
- ++m;
- }
- }
- } return v;
- }
-
-
-
-<Type><System>SubVector&
-
-Read the rest of this message... (50 more lines)
-
- From: Victor Eijkhout (eijkhout@disco.cs.utk.edu)
- Subject: Re: transpose of a nxm matrix stored in a vector !!!
- Newsgroups: sci.math.num-analysis
- Date: 2000/07/25
-
-
-"Alan Miller" <amiller @ vic.bigpond.net.au> writes:
-
-> The attached routine does an in situ transpose.
-> begin 666 Dtip.f90
-> M4U5"4D]55$E.12!D=&EP("AA+"!N,2P@;C(L(&YD:6TI#0HA("TM+2TM+2TM
-
-Hm. F90? You're not silently allocating a temporary I hope?
-
-(Why did you have to encode this? Now I have to save, this decode, ...
-and all for plain ascii?)
-
---
-Victor Eijkhout
-"When I was coming up, [..] we knew exactly who the they were. It was us
-versus them, and it was clear who the them was were. Today, we are not
-so sure who the they are, but we know they're there." [G.W. Bush]
-
- From: Alan Miller (amiller_@_vic.bigpond.net.au)
- Subject: Re: transpose of a nxm matrix stored in a vector !!!
- Newsgroups: sci.math.num-analysis
- Date: 2000/07/25
-
-
-Victor Eijkhout wrote in message ...
->"Alan Miller" <amiller @ vic.bigpond.net.au> writes:
->
->> The attached routine does an in situ transpose.
->> begin 666 Dtip.f90
->> M4U5"4D]55$E.12!D=&EP("AA+"!N,2P@;C(L(&YD:6TI#0HA("TM+2TM+2TM
->
->Hm. F90? You're not silently allocating a temporary I hope?
->
->(Why did you have to encode this? Now I have to save, this decode, ...
->and all for plain ascii?)
->
-
-I know the problem.
-I sometimes use a Unix system, and have to use decode64 to read
-attachments. On the other hand, Windows wraps lines around,
-formats then and generally makes the code unreadable.
-
-The straight code for dtip (double transpose in place) is attached
-this time.
-
->--
->Victor Eijkhout
-
-
---
-Alan Miller, Retired Scientist (Statistician)
-CSIRO Mathematical & Information Sciences
-Alan.Miller -at- vic.cmis.csiro.au
-http://www.ozemail.com.au/~milleraj
-http://users.bigpond.net.au/amiller/
-
-
-=================================================================
-
-From: Darran Edmundson (dedmunds@sfu.ca)
- Subject: array reordering algorithm?
- Newsgroups: sci.math.num-analysis
- Date: 1995/04/30
-
-
-A code I've written refers to a complex array as two separate real arrays.
-However, I have a canned subroutine which expects a single array where the
-real and imaginary values alternate. Essentially I have a case of mismatched
-data structures, yet for reasons that I'd rather not go into, I'm stuck with them.
-
-Assuming that the two real arrays A and B are sequential in memory, and
-that the single array of alternating real/imaginary values C shares the same
-space, what I need is a porting subroutine that remaps the data from one format
-to the other - using as little space as possible.
-
-I think of the problem as follows. Imagine an array of dimension 10 containing
-the values 1,3,5,7,9,2,4,6,8,10 in this order.
-
- A(1) / 1 \ C(1)
- A(2) | 3 | C(2)
- A(3) | 5 | C(3)
- A(4) | 7 | C(4)
- A(5) \ 9 | C(5)
- |
- B(1) / 2 | C(6)
- B(2) | 4 | C(7)
- B(3) | 6 | C(8)
- B(4) | 8 | C(9)
- B(5) \ 10 / C(10)
-
-Given that I know this initial pattern, I want to sort the array C in-place *without
-making comparisons*. That is, the algorithm can only depend on the initial
-knowledge of the pattern. Do you see what a sort is going to do? It will
-make the A and B arrays alternate, i.e. C(1)=A(1), C(2)=B(1), C(3)=A(2),
-C(4)=B(2), etc. It's not a real sort though because I can't actually refer to the
-values above (i.e. no comparisons) because A and B will be holding real data,
-not this contrived pattern. The pattern above exists though - it's the
-natural ordering in memory of A and B.
-
-Either pair swapping only or a small amount of workspace can be used. The
-in-place is important - imagine scaling this problem up to an
-array of 32 or 64 million double precision values and you can easily see how
-duplicating the array is not a feasible solution.
-
-Any ideas? I've been stumped on this for a day and a half now.
-
-Darran Edmundson
-dedmunds@sfu.ca
-
- From: Roger Critchlow (rec@elf115.elf.org)
- Subject: Re: array reordering algorithm?
- Newsgroups: sci.math.num-analysis
- Date: 1995/04/30
-
-
- Any ideas? I've been stumped on this for a day and a half now.
-
-Here's some code for in situ permutations of arrays that I wrote
-a few years ago. It all started from the in situ transposition
-algorithms in the Collected Algorithms of the ACM, the references
-for which always get lost during the decryption from fortran.
-
-This is the minimum space algorithm. All you need to supply is
-a function which computes the new order array index from the old
-order array index.
-
-If you can spare n*m bits to record the indexes of elements which
-have been permuted, then you can speed things up.
-
--- rec --
-
-------------------------------------------------------------------------
-/*
-** Arbitrary in situ permutations of an m by n array of base type TYPE.
-** Copyright 1995 by Roger E Critchlow Jr, rec@elf.org, San Francisco, CA.
-** Fair use permitted, caveat emptor.
-*/
-typedef int TYPE;
-
-int transposition(int ij, int m, int n) /* transposition about diagonal from upper left to lower right */
-{ return ((ij%m)*n+ (ij/m)); }
-
-int countertrans(int ij, int m, int n) /* transposition about diagonal from upper right to lower left */
-{ return ((m-1-(ij%m))*n+ (n-1-(ij/m))); }
-
-int rotate90cw(int ij, int m, int n) /* 90 degree clockwise rotation */
-{ return ((m-1-(ij%m))*n+ (ij/m)); }
-
-int rotate90ccw(int ij, int m, int n) /* 90 degree counter clockwise rotation */
-{ return ((ij%m)*n+ (n-1-(ij/m))); }
-
-int rotate180(int ij, int m, int n) /* 180 degree rotation */
-{ return ((m-1-(ij/n))*n+ (n-1-(ij%n))); }
-
-int reflecth(int ij, int m, int n) /* reflection across horizontal plane */
-{ return ((m-1-(ij/n))*n+ (ij%n)); }
-
-int reflectv(int ij, int m, int n) /* reflection across vertical plane */
-{ return ((ij/n)*n+ (n-1-(ij%n))); }
-
-int in_situ_permutation(TYPE a[], int m, int n, int (*origination)(int ij, int m, int n))
-{
- int ij, oij, dij, n_to_do;
- TYPE b;
- n_to_do = m*n;
- for (ij = 0; ij < m*n && n_to_do > 0; ij += 1) {
- /* Test for previously permuted */
- for (oij = origination(ij,m,n); oij > ij; oij = origination(oij,m,n))
- ;
- if (oij < ij)
- continue;
- /* Chase the cycle */
- dij = ij;
- b = a[ij];
- for (oij = origination(dij,m,n); oij != ij; oij = origination(dij,m,n)) {
- a[dij] = a[oij];
- dij = oij;
- n_to_do -= 1;
- }
- a[dij] = b;
- n_to_do -= 1;
- } return 0;
-}
-
-#define TESTING 1
-#if TESTING
-
-/* fill a matrix with sequential numbers, row major ordering */
-void fill_matrix_rows(a, m, n) TYPE *a; int m, n;
-{
- int i, j;
- for (i = 0; i < m; i += 1)
- for (j = 0; j < n; j += 1)
- a[i*n+j] = i*n+j;
-}
-
-/* fill a matrix with sequential numbers, column major ordering */
-void fill_matrix_cols(a, m, n) TYPE *a; int m, n;
-{
- int i, j;
- for (i = 0; i < m; i += 1)
- for (j = 0; j < n; j += 1)
- a[i*n+j] = j*m+i;
-}
-
-/* test a matrix for sequential numbers, row major ordering */
-int test_matrix_rows(a, m, n) TYPE *a; int m, n;
-{
- int i, j, o;
- for (o = i = 0; i < m; i += 1)
- for (j = 0; j < n; j += 1)
- o += a[i*n+j] != i*n+j;
- return o;
-}
-
-/* test a matrix for sequential numbers, column major ordering */
-int test_matrix_cols(a, m, n) TYPE *a; int m, n;
-{
- int i, j, o;
- for (o = i = 0; i < m; i += 1)
- for (j = 0; j < n; j += 1)
- o += a[i*n+j] != j*m+i;
- return o;
-}
-
-/* print a matrix */
-void print_matrix(a, m, n) TYPE *a; int m, n;
-{
- char *format;
- int i, j;
- if (m*n < 10) format = "%2d";
- if (m*n < 100) format = "%3d";
- if (m*n < 1000) format = "%4d";
- if (m*n < 10000) format = "%5d";
- for (i = 0; i < m; i += 1) {
- for (j = 0; j < n; j += 1)
- printf(format, a[i*n+j]);
- printf("\n");
- }
-}
-
-#if TEST_TRANSPOSE
-#define MAXSIZE 1000
-
-main()
-{
- int i, j, m, n, o;
- TYPE a[MAXSIZE];
- for (m = 1; m < sizeof(a)/sizeof(a[0]); m += 1)
- for (n = 1; m*n < sizeof(a)/sizeof(a[0]); n += 1) {
- fill_matrix_rows(a, m, n); /* {0 1} {2 3} */
- if (o = transpose(a, m, n))
- printf(">> transpose returned %d for a[%d][%d], row major\n", o, m, n);
- if ((o = test_matrix_cols(a, n, m)) != 0) /* {0 2} {1 3} */
- printf(">> transpose made %d mistakes for a[%d][%d], row major\n", o, m, n);
- /* column major */
- fill_matrix_rows(a, m, n);
- if (o = transpose(a, m, n))
- printf(">> transpose returned %d for a[%d][%d], column major\n", o, m, n);
- if ((o = test_matrix_cols(a, n, m)) != 0)
- printf(">> transpose made %d mistakes for a[%d][%d], column major\n", o, m, n);
- } return 0;
-}
-#endif /* TEST_TRANSPOSE */
-
-
-#define TEST_DISPLAY 1
-#if TEST_DISPLAY
-main(argc, argv) int argc; char *argv[];
-{
- TYPE *a;
- int m = 5, n = 5;
- extern void *malloc();
- if (argc > 1) {
- m = atoi(argv[1]);
- if (argc > 2)
- n = atoi(argv[2]);
- }
- a = malloc(m*n*sizeof(TYPE));
-
- printf("matrix\n");
- fill_matrix_rows(a, m, n);
- print_matrix(a, m, n);
- printf("transposition\n");
- in_situ_permutation(a, m, n, transposition);
- print_matrix(a, n, m);
-
- printf("counter transposition\n");
- fill_matrix_rows(a, m, n);
- in_situ_permutation(a, m, n, countertrans);
- print_matrix(a, n, m);
-
- printf("rotate 90 degrees clockwise\n");
- fill_matrix_rows(a, m, n);
- in_situ_permutation(a, m, n, rotate90cw);
- print_matrix(a, n, m);
-
- printf("rotate 90 degrees counterclockwise\n");
- fill_matrix_rows(a, m, n);
- in_situ_permutation(a, m, n, rotate90ccw);
- print_matrix(a, n, m);
-
- printf("rotate 180 degrees\n");
- fill_matrix_rows(a, m, n);
- in_situ_permutation(a, m, n, rotate180);
- print_matrix(a, m, n);
-
- printf("reflect across horizontal\n");
- fill_matrix_rows(a, m, n);
- in_situ_permutation(a, m, n, reflecth);
- print_matrix(a, m, n);
-
- printf("reflect across vertical\n");
- fill_matrix_rows(a, m, n);
- in_situ_permutation(a, m, n, reflectv);
- print_matrix(a, m, n);
-
- return 0;
-}
-
-#endif
-#endif
diff --git a/doc/f2py/multiarrays.txt b/doc/f2py/multiarrays.txt
deleted file mode 100644
index a4bd72cb5..000000000
--- a/doc/f2py/multiarrays.txt
+++ /dev/null
@@ -1,119 +0,0 @@
-From pearu@ioc.ee Thu Dec 30 09:58:01 1999
-Date: Fri, 26 Nov 1999 12:02:42 +0200 (EET)
-From: Pearu Peterson <pearu@ioc.ee>
-To: Users of f2py2e -- Curtis Jensen <cjensen@be-research.ucsd.edu>,
- Vladimir Janku <vjanku@kvet.sk>,
- Travis Oliphant <Oliphant.Travis@mayo.edu>
-Subject: Multidimensional arrays in f2py2e
-
-
-Hi!
-
-Below I will describe how f2py2e wraps Fortran multidimensional arrays as
-it constantly causes confusion. As for example, consider Fortran code
-
- subroutine foo(l,m,n,a)
- integer l,m,n
- real*8 a(l,m,n)
- ..
- end
-Running f2py2e with -h flag, it generates the following signature
-
-subroutine foo(l,m,n,a)
- integer optional,check(shape(a,2)==l),depend(a) :: l=shape(a,2)
- integer optional,check(shape(a,1)==m),depend(a) :: m=shape(a,1)
- integer optional,check(shape(a,0)==n),depend(a) :: n=shape(a,0)
- real*8 dimension(l,m,n),check(rank(a)==3) :: a
-end subroutine foo
-
-where parameters l,m,n are considered optional and they are initialized in
-Python C/API code using the array a. Note that a can be also a proper
-list, that is, asarray(a) should result in a rank-3 array. But then there
-is an automatic restriction that elements of a (in Python) are not
-changeable (in place) even if Fortran subroutine changes the array a (in
-C,Fortran).
-
-Hint: you can attribute the array a with 'intent(out)' which causes foo to
-return the array a (in Python) if you are to lazy to define a=asarray(a)
-before the call to foo (in Python).
-
-Calling f2py2e without the switch -h, a Python C/API module will be
-generated. After compiling it and importing it to Python
->>> print foo.__doc__
-shows
-None = foo(a,l=shape(a,2),m=shape(a,1),n=shape(a,0))
-
-You will notice that f2py2e has changed the order of arguments putting the
-optional ones at the end of the argument list.
-Now, you have to be careful when specifying the parameters l,m,n (though
-situations where you need this should be rare). A proper definition
-of the array a should be, say
-
- a = zeros(n,m,l)
-
-Note that the dimensions l,m,n are in reverse, that is, the array a should
-be transposed when feeding it to the wrapper.
-
-Hint (and a performance hit): To be always consistent with fortran
-arrays, you can define, for example
- a = zeros(l,m,n)
-and call from Python
- foo(transpose(a),l,m,n)
-which is equivalent with the given Fortran call
- call foo(l,m,n,a)
-
-Another hint (not recommended, though): If you don't like optional
-arguments feature at all and want to be strictly consistent with Fortran
-signature, that is, you want to call foo from Python as
- foo(l,m,n,a)
-then you should edit the signature to
-subroutine foo(l,m,n,a)
- integer :: l
- integer :: m
- integer :: n
- real*8 dimension(l,m,n),check(rank(a)==3),depend(l,m,n), &
- check(shape(a,2)==l,shape(a,1)==m,shape(a,0)==n):: a
-end
-Important! Note that now the array a should depend on l,m,n
-so that the checks can be performed in the proper order.
-(you cannot check, say, shape(a,2)==l before initializing a or l)
-(There are other ways to edit the signature in order to get the same
-effect but they are not so safe and I will not discuss about them here).
-
-Hint: If the array a should be a work array (as used frequently in
-Fortran) and you a too lazy (its good laziness;) to provide it (in Python)
-then you can define it as optional by ediding the signature:
-subroutine foo(l,m,n,a)
- integer :: l
- integer :: m
- integer :: n
- real*8 dimension(l,m,n),check(rank(a)==3),depend(l,m,n), &
- check(shape(a,2)==l,shape(a,1)==m,shape(a,0)==n):: a
- optional a
-end
-Note again that the array a must depend on l,m,n. Then the array a will be
-allocated in the Python C/API module. Not also that
->>> print foo.__doc__
-shows then
-None = foo(l,m,n,a=)
-Performance hint: If you call the given foo lots of times from Python then
-you don't want to allocate/deallocate the memory in each call. So, it is
-then recommended to define a temporary array in Python, for instance
->>> tmp = zeros(n,m,l)
->>> for i in ...:
->>> foo(l,m,n,a=tmp)
-
-Important! It is not good at all to define
- >>> tmp = transpose(zeros(l,m,n))
-because tmp will be then a noncontiguous array and there will be a
-huge performance hit as in Python C/API a new array will be allocated and
-also a copying of arrays will be performed elementwise!
-But
- >>> tmp = asarray(transpose(zeros(l,m,n)))
-is still ok.
-
-I hope that the above answers lots of your (possible) questions about
-wrapping Fortran multidimensional arrays with f2py2e.
-
-Regards,
- Pearu
diff --git a/doc/f2py/notes.tex b/doc/f2py/notes.tex
deleted file mode 100644
index 2746b049d..000000000
--- a/doc/f2py/notes.tex
+++ /dev/null
@@ -1,310 +0,0 @@
-
-\section{Calling wrapper functions from Python}
-\label{sec:notes}
-
-\subsection{Scalar arguments}
-\label{sec:scalars}
-
-In general, for scalar argument you can pass in in
-addition to ordinary Python scalars (like integers, floats, complex
-values) also arbitrary sequence objects (lists, arrays, strings) ---
-then the first element of a sequence is passed in to the Fortran routine.
-
-It is recommended that you always pass in scalars of required type. This
-ensures the correctness as no type-casting is needed.
-However, no exception is raised if type-casting would produce
-inaccurate or incorrect results! For example, in place of an expected
-complex value you can give an integer, or vice-versa (in the latter case only
-a rounded real part of the complex value will be used).
-
-If the argument is \texttt{intent(inout)} then Fortran routine can change the
-value ``in place'' only if you pass in a sequence object, for
-instance, rank-0 array. Also make sure that the type of an array is of
-correct type. Otherwise type-casting will be performed and you may
-get inaccurate or incorrect results. The following example illustrates this
-\begin{verbatim}
->>> a = array(0)
->>> calculate_pi(a)
->>> print a
-3
-\end{verbatim}
-
-If you pass in an ordinary Python scalar in place of
-\texttt{intent(inout)} variable, it will be used as an input argument
-since
-Python
-scalars cannot not be changed ``in place'' (all Python scalars
-are immutable objects).
-
-\subsection{String arguments}
-\label{sec:strings}
-
-You can pass in strings of arbitrary length. If the length is greater than
-required, only a required part of the string is used. If the length
-is smaller than required, additional memory is allocated and fulfilled
-with `\texttt{\bs0}'s.
-
-Because Python strings are immutable, \texttt{intent(inout)} argument
-expects an array version of a string --- an array of chars:
-\texttt{array("<string>")}.
-Otherwise, the change ``in place'' has no effect.
-
-
-\subsection{Array arguments}
-\label{sec:arrays}
-
-If the size of an array is relatively large, it is \emph{highly
- recommended} that you pass in arrays of required type. Otherwise,
-type-casting will be performed which includes the creation of new
-arrays and their copying. If the argument is also
-\texttt{intent(inout)}, the wasted time is doubled. So, pass in arrays
-of required type!
-
-On the other hand, there are situations where it is perfectly all
-right to ignore this recommendation: if the size of an array is
-relatively small or the actual time spent in Fortran routine takes
-much longer than copying an array. Anyway, if you want to optimize
-your Python code, start using arrays of required types.
-
-Another source of performance hit is when you use non-contiguous
-arrays. The performance hit will be exactly the same as when using
-incorrect array types. This is because a contiguous copy is created
-to be passed in to the Fortran routine.
-
-\fpy provides a feature such that the ranks of array arguments need
-not to match --- only the correct total size matters. For example, if
-the wrapper function expects a rank-1 array \texttt{array([...])},
-then it is correct to pass in rank-2 (or higher) arrays
-\texttt{array([[...],...,[...]])} assuming that the sizes will match.
-This is especially useful when the arrays should contain only one
-element (size is 1). Then you can pass in arrays \texttt{array(0)},
-\texttt{array([0])}, \texttt{array([[0]])}, etc and all cases are
-handled correctly. In this case it is correct to pass in a Python
-scalar in place of an array (but then ``change in place'' is ignored,
-of course).
-
-\subsubsection{Multidimensional arrays}
-
-If you are using rank-2 or higher rank arrays, you must always
-remember that indexing in Fortran starts from the lowest dimension
-while in Python (and in C) the indexing starts from the highest
-dimension (though some compilers have switches to change this). As a
-result, if you pass in a 2-dimensional array then the Fortran routine
-sees it as the transposed version of the array (in multi-dimensional
-case the indexes are reversed).
-
-You must take this matter into account also when modifying the
-signature file and interpreting the generated Python signatures:
-
-\begin{itemize}
-\item First, when initializing an array using \texttt{init\_expr}, the index
-vector \texttt{\_i[]} changes accordingly to Fortran convention.
-\item Second, the result of CPP-macro \texttt{shape(<array>,0)}
- corresponds to the last dimension of the Fortran array, etc.
-\end{itemize}
-Let me illustrate this with the following example:\\
-\begin{verbatim}
-! Fortran file: arr.f
- subroutine arr(l,m,n,a)
- integer l,m,n
- real*8 a(l,m,n)
- ...
- end
-\end{verbatim}
-\fpy will generate the following signature file:\\
-\begin{verbatim}
-!%f90
-! Signature file: arr.f90
-python module arr ! in
- interface ! in :arr
- subroutine arr(l,m,n,a) ! in :arr:arr.f
- integer optional,check(shape(a,2)==l),depend(a) :: l=shape(a,2)
- integer optional,check(shape(a,1)==m),depend(a) :: m=shape(a,1)
- integer optional,check(shape(a,0)==n),depend(a) :: n=shape(a,0)
- real*8 dimension(l,m,n) :: a
- end subroutine arr
- end interface
-end python module arr
-\end{verbatim}
-and the following wrapper function will be produced
-\begin{verbatim}
-None = arr(a,l=shape(a,2),m=shape(a,1),n=shape(a,0))
-\end{verbatim}
-
-In general, I would suggest not to specify the given optional
-variables \texttt{l,m,n} when calling the wrapper function --- let the
-interface find the values of the variables \texttt{l,m,n}. But there
-are occasions when you need to specify the dimensions in Python.
-
-So, in Python a proper way to create an array from the given
-dimensions is
-\begin{verbatim}
->>> a = zeros(n,m,l,'d')
-\end{verbatim}
-(note that the dimensions are reversed and correct type is specified),
-and then a complete call to \texttt{arr} is
-\begin{verbatim}
->>> arr(a,l,m,n)
-\end{verbatim}
-
-From the performance point of view, always be consistent with Fortran
-indexing convention, that is, use transposed arrays. But if you do the
-following
-\begin{verbatim}
->>> a = transpose(zeros(l,m,n,'d'))
->>> arr(a)
-\end{verbatim}
-then you will get a performance hit! The reason is that here the
-transposition is not actually performed. Instead, the array \texttt{a}
-will be non-contiguous which means that before calling a Fortran
-routine, internally a contiguous array is created which
-includes memory allocation and copying. In addition, if
-the argument array is also \texttt{intent(inout)}, the results are
-copied back to the initial array which doubles the
-performance hit!
-
-So, to improve the performance: always pass in
-arrays that are contiguous.
-
-\subsubsection{Work arrays}
-
-Often Fortran routines use the so-called work arrays. The
-corresponding arguments can be declared as optional arguments, but be
-sure that all dimensions are specified (bounded) and defined before
-the initialization (dependence relations).
-
-On the other hand, if you call the Fortran routine many times then you
-don't want to allocate/deallocate the memory of the work arrays on
-every call. In this case it is recommended that you create temporary
-arrays with proper sizes in Python and use them as work arrays. But be
-careful when specifying the required type and be sure that the
-temporary arrays are contiguous. Otherwise the performance hit would
-be even harder than the hit when not using the temporary arrays from
-Python!
-
-
-
-\subsection{Call-back arguments}
-\label{sec:cbargs}
-
-\fpy builds a very flexible call-back mechanisms for call-back
-arguments. If the wrapper function expects a call-back function \texttt{fun}
-with the following Python signature to be passed in
-\begin{verbatim}
-def fun(a_1,...,a_n):
- ...
- return x_1,...,x_k
-\end{verbatim}
-but the user passes in a function \texttt{gun} with the signature
-\begin{verbatim}
-def gun(b_1,...,b_m):
- ...
- return y_1,...,y_l
-\end{verbatim}
-and the following extra arguments (specified as additional optional
-argument for the wrapper function):
-\begin{verbatim}
-fun_extra_args = (e_1,...,e_p)
-\end{verbatim}
-then the actual call-back is constructed accordingly to the following rules:
-\begin{itemize}
-\item if \texttt{p==0} then \texttt{gun(a\_1,...,a\_q)}, where
- \texttt{q=min(m,n)};
-\item if \texttt{n+p<=m} then \texttt{gun(a\_1,...,a\_n,e\_1,...,e\_p)};
-\item if \texttt{p<=m<n+p} then \texttt{gun(a\_1,...,a\_q,e\_1,...,e\_p)},
- where \texttt{q=m-p};
-\item if \texttt{p>m} then \texttt{gun(e\_1,...,e\_m)};
-\item if \texttt{n+p} is less than the number of required arguments
- of the function \texttt{gun}, an exception is raised.
-\end{itemize}
-
-A call-back function \texttt{gun} may return any number of objects as a tuple:
-if \texttt{k<l}, then objects \texttt{y\_k+1,...,y\_l} are ignored;
-if \texttt{k>l}, then only objects \texttt{x\_1,...,x\_l} are set.
-
-
-\subsection{Obtaining information on wrapper functions}
-\label{sec:info}
-
-From the previous sections we learned that it is useful for the
-performance to pass in arguments of expected type, if possible. To
-know what are the expected types, \fpy generates a complete
-documentation strings for all wrapper functions. You can read them
-from Python by printing out \texttt{\_\_doc\_\_} attributes of the
-wrapper functions. For the example in Sec.~\ref{sec:intro}:
-\begin{verbatim}
->>> print foobar.foo.__doc__
-Function signature:
- foo(a)
-Required arguments:
- a : in/output rank-0 array(int,'i')
->>> print foobar.bar.__doc__
-Function signature:
- bar = bar(a,b)
-Required arguments:
- a : input int
- b : input int
-Return objects:
- bar : int
-\end{verbatim}
-
-In addition, \fpy generates a LaTeX document
-(\texttt{<modulename>module.tex}) containing a bit more information on
-the wrapper functions. See for example Appendix that contains a result
-of the documentation generation for the example module
-\texttt{foobar}. Here the file \texttt{foobar-smart.f90} (modified
-version of \texttt{foobar.f90}) is used --- it contains
-\texttt{note(<LaTeX text>)} attributes for specifying some additional
-information.
-
-\subsection{Wrappers for common blocks}
-\label{sec:wrapcomblock}
-
-[See examples \texttt{test-site/e/runme*}]
-
-What follows is obsolute for \fpy version higher that 2.264.
-
-\fpy generates wrapper functions for common blocks. For every common
-block with a name \texttt{<commonname>} a function
-\texttt{get\_<commonname>()} is constructed that takes no arguments
-and returns a dictionary. The dictionary represents maps between the
-names of common block fields and the arrays containing the common
-block fields (multi-dimensional arrays are transposed). So, in order
-to access to the common block fields, you must first obtain the
-references
-\begin{verbatim}
-commonblock = get_<commonname>()
-\end{verbatim}
-and then the fields are available through the arrays
-\texttt{commonblock["<fieldname>"]}.
-To change the values of common block fields, you can use for scalars
-\begin{verbatim}
-commonblock["<fieldname>"][0] = <new value>
-\end{verbatim}
-and for arrays
-\begin{verbatim}
-commonblock["<fieldname>"][:] = <new array>
-\end{verbatim}
-for example.
-
-For more information on the particular common block wrapping, see
-\texttt{get\_<commonname>.\_\_doc\_\_}.
-
-\subsection{Wrappers for F90/95 module data and routines}
-\label{sec:wrapf90modules}
-
-[See example \texttt{test-site/mod/runme\_mod}]
-
-\subsection{Examples}
-\label{sec:examples}
-
-Examples on various aspects of wrapping Fortran routines to Python can
-be found in directories \texttt{test-site/d/} and
-\texttt{test-site/e/}: study the shell scripts \texttt{runme\_*}. See
-also files in \texttt{doc/ex1/}.
-
-
-%%% Local Variables:
-%%% mode: latex
-%%% TeX-master: "f2py2e"
-%%% End:
diff --git a/doc/f2py/oldnews.html b/doc/f2py/oldnews.html
deleted file mode 100644
index 054643174..000000000
--- a/doc/f2py/oldnews.html
+++ /dev/null
@@ -1,121 +0,0 @@
-<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN" "http://www.w3.org/TR/REC-html40/loose.dtd">
-<HTML>
-<HEAD>
-<META name="Author" content="Pearu Peterson">
-<!-- You may add here some keywords (comma separated list) -->
-<META name="Keywords" content="fortran,python,interface,f2py,f2py2e,wrapper,fpig">
-<TITLE>F2PY - Fortran to Python Interface Generator</TITLE>
-<LINK rel="stylesheet" type="text/css" href="/styles/userstyle.css">
-</HEAD>
-
-<body>
-<h2><a href="http://cens.ioc.ee/projects/f2py2e">F2PY</a> old news.</h2>
-
-<dl>
- <dt> February 23, 2002
- <dd> Fixed a bug of incorrect shapes of multi-dimensional arrays
- when returning from Fortran routine (thanks to Eric for pointing
- this out).
- <code>F2PY_REPORT_ATEXIT</code> is disabled by default under Win32.
- <dt> February 14, 2002
- <dd> Introduced <code>callprotoargument</code> statement so that
- proper prototypes can be specified (this fixes SEGFAULTs when
- wrapping C functions with <code>f2py</code>, see <a
- href="NEWS.txt">NEWS.txt</a> for more details). Updated for the
- latest <code>numpy_distutils</code>. Fixed few bugs.
- <dt> February 3, 2002
- <dd> Introduced <code>intent(overwrite),intent(out=name)</code>
- attributes, <code>callstatement C-expr;</code> statement, and
- reviewed reference counting in callback mechanism. Fixed bugs.
- <dt> January 18, 2002
- <dd> Introduced extra keyword argument <code>copy_#varname#=1</code>
- for <code>intent(copy)</code> variables,
- <code>-DF2PY_REPORT_ATEXIT</code> for reporting <code>f2py</code>
- performance,
- <code>has_column_major_storage</code> member function for generated
- modules, and <a href="http://dmalloc.com/">dmalloc</a> support.
- <dt> January 16, 2002
- <dd> BREAKING NEWS! Solved long lasted dilemma of wrapping
- multi-dimensional arrays where different
- storage orders in C and Fortran come into account. From now on
- this difference is dealt automatically by the f2py generated
- module and in a very efficient way. For example, the corresponding
- element A(i,j) of a Fortran array can be accessed in Python as
- A[i,j].
- <dt> January 13, 2002
- <dd> Fifth Public Release is coming soon..., a snapshot is available
- for download, now with updates.
- <dt> December 17, 2001
- <dd> <a href="Release-4.x.txt">Fourth Public Release</a>: Win32 support.
- <dd> Making <code>f2py2e</code> a module. Currently it has only one
- member function <code>run_main(comline_list)</code>.
- <dd> Removed command line arguments <code>-fix,-f90,-f77</code>
- and introduced many new ones. See <a href="NEWS.txt">NEWS.txt</a>.
- <dd> <code>intent(..)</code> statement with empty name list defines
- default <code>intent(..)</code> attribute for all routine arguments.
- <dd> Refinements in Win32 support. Eric Jones has provided a f2py
- HOWTO for Windows users. See <a href="win32_notes.txt">win32_notes.txt</a>.
- <dd> Major rewrote of the code generator to achieve
- a higher quality of generated C/API modules (-Wall messages are
- considerably reduced, especially for callback functions).
- <dd> Many bugs were fixed.
- <dt> December 12, 2001
- <dd> Win32 support (thanks to Eric Jones and Tiffany Kamm). Minor
- cleanups and fixes.
- <dt> December 4, 2001
- <dd> <a href="Release-3.x.txt">Third Public Release</a>: <code>f2py</code> supports <code>distutils</code>. It can be
- installed with one and it generates <code>setup_modulename.py</code>
- to be used for building Python extension modules.
- <dd> Introduced <code>threadsafe</code>, <code>fortranname</code>,
- and <code>intent(c)</code> statements.
- <dt> August 13, 2001
- <dd> Changed the name FPIG to F2PY for avoiding confusion with project names.
- <dd> Updated <code>f2py</code> for use with Numeric version 20.x.
- <dt> January 12, 2001
- <dd> Example usages of <a href="pyfobj.html"><code>PyFortranObject</code></a>.
- Fixed bugs. Updated the
- <a href="f2python9.html">Python 9 Conference paper</a> (F2PY paper).
- <dt> December 9, 2000
- <dd> Implemented support for <code>PARAMETER</code> statement.
- <dt> November 6, 2000
- <dd> Submitted a paper for 9th Python Conference (accepted). It is available in <a
- href="f2python9.html">html</a>, <a href="f2python9.pdf">PDF</a>,
- and <a href="f2python9.ps.gz">Gzipped PS</a> formats.
- <dt> September 17, 2000
- <dd> Support for F90/95 module data and routines. COMMON block
- wrapping is rewritten. New signature file syntax:
- <code>pythonmodule</code>. Signature files generated with
- f2py-2.264 or earlier, are incompatible (need replacement
- <code>module</code> with
- <code>pythonmodule</code>).
- <dt> September 12, 2000
- <dd> The second public release of <code>f2py</code> is out. See <a
- href="Release-2.x.txt">Release notes</a>.
- <dt> September 11, 2000
- <dd> Now <code>f2py</code> supports wrapping Fortran 90/95 module routines
- (support for F90/95 module data coming soon)
- <dt> June 12, 2000
- <dd> Now <code>f2py</code> has a mailing list <a
-href="#f2py-users">f2py-users</a> open for discussion.
-
-</dl>
-
-
-<!-- End of user text -->
-<HR>
-<ADDRESS>
-<A href="http://validator.w3.org/"><IMG border=0 align=right src="/icons/vh40.gif" alt="Valid HTML 4.0!" height=31 width=88></A>
-<A href="http://cens.ioc.ee/~pearu/" target="_top">Pearu Peterson</A>
-<A href="mailto:pearu (at) ioc.ee">&lt;pearu(at)ioc.ee&gt;</A><BR>
-<!-- hhmts start -->
-Last modified: Mon Dec 3 19:40:26 EET 2001
-<!-- hhmts end -->
-</ADDRESS>
-<!-- You may want to comment the following line out when the document is final-->
-<!-- Check that the reference is right -->
-<!--A href="http://validator.w3.org/check?uri=http://cens.ioc.ee/projects/f2py2e/index.html;ss"> Submit this page for validation</A-->
-
-</BODY>
-
-
-</HTML>
diff --git a/doc/f2py/options.tex b/doc/f2py/options.tex
deleted file mode 100644
index 4e67fb162..000000000
--- a/doc/f2py/options.tex
+++ /dev/null
@@ -1,63 +0,0 @@
-
-\section{\fpy command line options}
-\label{sec:opts}
-
-\fpy has the following command line syntax (run \fpy without arguments
-to get up to date options!!!):
-\begin{verbatim}
-f2py [<options>] <fortran files> [[[only:]||[skip:]] <fortran functions> ]\
- [: <fortran files> ...]
-\end{verbatim}
-where
-\begin{description}
-\item[\texttt{<options>}] --- the following options are available:
- \begin{description}
- \item[\texttt{-f77}] --- \texttt{<fortran files>} are in Fortran~77
- fixed format (default).
- \item[\texttt{-f90}] --- \texttt{<fortran files>} are in
- Fortran~90/95 free format (default for signature files).
- \item[\texttt{-fix}] --- \texttt{<fortran files>} are in
- Fortran~90/95 fixed format.
- \item[\texttt{-h <filename>}] --- after scanning the
- \texttt{<fortran files>} write the signatures of Fortran routines
- to file \texttt{<filename>} and exit. If \texttt{<filename>}
- exists, \fpy quits without overwriting the file. Use
- \texttt{-{}-overwrite-signature} to overwrite.
- \item[\texttt{-m <modulename>}] --- specify the name of the module
- when scanning Fortran~77 codes for the first time. \fpy will
- generate Python C/API module source \texttt{<modulename>module.c}.
- \item[\texttt{-{}-lower/-{}-no-lower}] --- lower/do not lower the cases
- when scanning the \texttt{<fortran files>}. Default when
- \texttt{-h} flag is specified/unspecified (that is for Fortran~77
- codes/signature files).
- \item[\texttt{-{}-short-latex}] --- use this flag when you want to
- include the generated LaTeX document to another LaTeX document.
- \item[\texttt{-{}-debug-capi}] --- create a very verbose C/API
- code. Useful for debbuging.
-% \item[\texttt{-{}-h-force}] --- if \texttt{-h <filename>} is used then
-% overwrite the file \texttt{<filename>} (if it exists) and continue
-% with constructing the C/API module source.
- \item[\texttt{-makefile <options>}] --- run \fpy without arguments
- for more information.
- \item[\texttt{-{}-use-libs}] --- see \texttt{-makefile}.
- \item[\texttt{-{}-overwrite-makefile}] --- overwrite existing
- \texttt{Makefile-<modulename>}.
- \item[\texttt{-v}] --- print \fpy version number and exit.
- \item[\texttt{-pyinc}] --- print Python include path and exit.
- \end{description}
-\item[\texttt{<fortran files>}] --- are the paths to Fortran files or
- to signature files that will be scanned for \texttt{<fortran
- functions>} in order to determine their signatures.
-\item[\texttt{<fortran functions>}] --- are the names of Fortran
- routines for which Python C/API wrapper functions will be generated.
- Default is all that are found in \texttt{<fortran files>}.
-\item[\texttt{only:}/\texttt{skip:}] --- are flags for filtering
- in/out the names of fortran routines to be wrapped. Run \fpy without
- arguments for more information about the usage of these flags.
-\end{description}
-
-
-%%% Local Variables:
-%%% mode: latex
-%%% TeX-master: "f2py2e"
-%%% End:
diff --git a/doc/f2py/pyforttest.pyf b/doc/f2py/pyforttest.pyf
deleted file mode 100644
index 79a9ae205..000000000
--- a/doc/f2py/pyforttest.pyf
+++ /dev/null
@@ -1,5 +0,0 @@
-subroutine foo(a,m,n)
-integer m = size(a,1)
-integer n = size(a,2)
-real, intent(inout) :: a(m,n)
-end subroutine foo
diff --git a/doc/f2py/pytest.py b/doc/f2py/pytest.py
deleted file mode 100644
index bf4ef917f..000000000
--- a/doc/f2py/pytest.py
+++ /dev/null
@@ -1,12 +0,0 @@
-from __future__ import division, absolute_import, print_function
-
-#File: pytest.py
-import Numeric
-def foo(a):
- a = Numeric.array(a)
- m, n = a.shape
- for i in range(m):
- for j in range(n):
- a[i, j] = a[i, j] + 10*(i+1) + (j+1)
- return a
-#eof
diff --git a/doc/f2py/python9.tex b/doc/f2py/python9.tex
deleted file mode 100644
index 524d61113..000000000
--- a/doc/f2py/python9.tex
+++ /dev/null
@@ -1,1044 +0,0 @@
-\documentclass[twocolumn]{article}
-\usepackage{epsfig}
-\usepackage{xspace}
-\usepackage{verbatim}
-
-
-\headsep=0pt
-\topmargin=0pt
-\headheight=0pt
-\oddsidemargin=0pt
-\textwidth=6.5in
-\textheight=9in
-%%tth:\newcommand{\xspace}{ }
-\newcommand{\fpy}{\texttt{f2py}\xspace}
-\newcommand{\bs}{\symbol{`\\}}
-% need bs here:
-%%tth:\newcommand{\bs}{\texttt{<backslash>}}
-
-\newcommand{\tthhide}[1]{#1}
-\newcommand{\latexhide}[1]{}
-%%tth:\newcommand{\tthhide}[1]{}
-%%tth:\newcommand{\latexhide}[1]{#1}
-
-\newcommand{\shell}[1]{
-\latexhide{
- \special{html:
-<BLOCKQUOTE>
-<pre>
-sh> #1
-</pre>
-</BLOCKQUOTE>}
-}
-\tthhide{
- \\[1ex]
- \hspace*{1em}
- \texttt{sh> \begin{minipage}[t]{0.8\textwidth}#1\end{minipage}}\\[1ex]
-}
-}
-
-\newcommand{\email}[1]{\special{html:<A href="mailto:#1">}\texttt{<#1>}\special{html:</A>}}
-\newcommand{\wwwsite}[1]{\special{html:<A href="#1">}{#1}\special{html:</A>}}
-\title{Fortran to Python Interface Generator with
-an Application to Aerospace Engineering}
-\author{
-\large Pearu Peterson\\
-\small \email{pearu@cens.ioc.ee}\\
-\small Center of Nonlinear Studies\\
-\small Institute of Cybernetics at TTU\\
-\small Akadeemia Rd 21, 12618 Tallinn, ESTONIA\\[2ex]
-\large Joaquim R. R. A. Martins and Juan J. Alonso\\
-\small \email{joaquim.martins@stanford.edu}, \email{jjalonso@stanford.edu}\\
-\small Department of Aeronautics and Astronautics\\
-\small Stanford University, CA
-}
-\date{$Revision: 1.17 $\\\today}
-\begin{document}
-
-\maketitle
-
-\special{html: Other formats of this document:
-<A href=f2python9.ps.gz>Gzipped PS</A>,
-<A href=f2python9.pdf>PDF</A>
-}
-
-\begin{abstract}
- FPIG --- Fortran to Python Interface Generator --- is a tool for
- generating Python C/API extension modules that interface
- Fortran~77/90/95 codes with Python. This tool automates the process
- of interface generation by scanning the Fortran source code to
- determine the signatures of Fortran routines and creating a
- Python C/API module that contains the corresponding interface
- functions. FPIG also attempts to find dependence relations between
- the arguments of a Fortran routine call (e.g. an array and its
- dimensions) and constructs interface functions with potentially
- fewer arguments. The tool is extremely flexible since the user has
- control over the generation process of the interface by specifying the
- desired function signatures. The home page for FPIG can be found at
- \wwwsite{http://cens.ioc.ee/projects/f2py2e/}.
-
- FPIG has been used successfully to wrap a large number of Fortran
- programs and libraries. Advances in computational science have led
- to large improvements in the modeling of physical systems which are
- often a result of the coupling of a variety of physical models that
- were typically run in isolation. Since a majority of the available
- physical models have been previously written in Fortran, the
- importance of FPIG in accomplishing these couplings cannot be
- understated. In this paper, we present an application of FPIG to
- create an object-oriented framework for aero-structural analysis and
- design of aircraft.
-\end{abstract}
-
-%%tth:
-\tableofcontents
-
-\section{Preface}
-\label{sec:preface}
-
-The use of high-performance computing has made it possible to tackle
-many important problems and discover new physical phenomena in science
-and engineering. These accomplishments would not have been achieved
-without the computer's ability to process large amounts of data in a
-reasonably short time. It can safely be said that the computer has
-become an essential tool for scientists and engineers. However, the
-diversity of problems in science and engineering has left its mark as
-computer programs have been developed in different programming
-languages, including languages developed to describe certain specific
-classes of problems.
-
-In interdisciplinary fields it is not uncommon for scientists and
-engineers to face problems that have already been solved in a
-different programming environment from the one they are familiar with.
-Unfortunately, researchers may not have the time or willingness to
-learn a new programming language and typically end up developing the
-corresponding tools in the language that they normally use. This
-approach to the development of new software can substantially impact
-the time to develop and the quality of the resulting product: firstly,
-it usually takes longer to develop and test a new tool than to learn a
-new programming environment, and secondly it is very unlikely that a
-non-specialist in a given field can produce a program that is more
-efficient than more established tools.
-
-To avoid situations such as the one described above, one alternative
-would be to provide automatic or semi-automatic interfaces between programming
-languages. Another possibility would be to provide language
-translators, but these obviously require more work than interface
-generators --- a translator must understand all language constructs
-while an interface generator only needs to understand a subset of these
-constructs. With an automatic interface between two languages, scientists or
-engineers can effectively use programs written in other programming
-languages without ever having to learn them.
-
-Although it is clear that it is impossible to interface arbitrary programming
-languages with each other, there is no reason for doing so. Low-level languages such as C and Fortran are well known for
-their speed and are therefore suitable for applications where
-performance is critical. High-level scripting languages, on the other
-hand, are generally slower but much easier to learn and use,
-especially when performing interactive analysis. Therefore, it makes
-sense to create interfaces only in one direction: from lower-level
-languages to higher-level languages.
-
-In an ideal world, scientists and engineers would use higher-level
-languages for the manipulation of the mathematical formulas in a problem
-rather than having to struggle with tedious programming details. For tasks
-that are computationally demanding, they would use interfaces to
-high-performance routines that are written in a lower-level language
-optimized for execution speed.
-
-
-\section{Introduction}
-\label{sec:intro}
-
-This paper presents a tool that has been developed for the creation of
-interfaces between Fortran and Python.
-
-
-The Fortran language is popular in
-scientific computing, and is used mostly in applications that use
-extensive matrix manipulations (e.g. linear algebra). Since Fortran
- has been the standard language among scientists and engineers for
- at least three decades, there is a large number of legacy codes available that
- perform a variety of tasks using very sophisticated algorithms (see
-e.g. \cite{netlib}).
-
-The Python language \cite{python}, on the other hand, is a relatively
-new programming language. It is a very high-level scripting language
-that supports object-oriented programming. What makes Python
-especially appealing is its very clear and natural syntax, which makes it
-easy to learn and use. With Python one can implement relatively
-complicated algorithms and tasks in a short time with very compact
-source code.
-
-Although there are ongoing projects for extending Python's usage in
-scientific computation, it lacks reliable tools that are common in
-scientific and engineering such as ODE integrators, equation solvers,
-tools for FEM, etc. The implementation of all of these tools in Python
-would be not only too time-consuming but also inefficient. On the
-other hand, these tools are already developed in other,
-computationally more efficient languages such as Fortran or C.
-Therefore, the perfect role for Python in the context of scientific
-computing would be that of a ``gluing'' language. That is, the role
-of providing high-level interfaces to C, C++ and Fortran libraries.
-
-There are a number of widely-used tools that can be used for interfacing
-software libraries to Python. For binding C libraries with various
-scripting languages, including Python, the tool most often used is
-SWIG \cite{swig}. Wrapping Fortran routines with Python is less
-popular, mainly because there are many platform and compiler-specific
-issues that need to be addressed. Nevertheless, there is great
-interest in interfacing Fortran libraries because they provide
-invaluable tools for scientific computing. At LLNL, for example, a tool
-called PyFort has been developed for connecting Fortran and
-Python~\cite{pyfort}.
-
-The tools mentioned above require an input file describing signatures
-of functions to be interfaced. To create these input files, one needs
-to have a good knowledge of either C or Fortran. In addition,
-binding libraries that have thousands of routines can certainly constitute a
-very tedious task, even with these tools.
-
-The tool that is introduced in this paper, FPIG (Fortran to Python
-Interface Generator)~\cite{fpig}, automatically generates interfaces
-between Fortran and Python. It is different from the tools mentioned
-above in that FPIG can create signature files automatically by
-scanning the source code of the libraries and then construct Python
-C/API extension modules. Note that the user need not be experienced
-in C or even Fortran. In addition, FPIG is designed to wrap large
-Fortran libraries containing many routines with only one or two
-commands. This process is very flexible since one can always modify
-the generated signature files to insert additional attributes in order
-to achieve more sophisticated interface functions such as taking care
-of optional arguments, predicting the sizes of array arguments and
-performing various checks on the correctness of the input arguments.
-
-The organization of this paper is as follows. First, a simple example
-of FPIG usage is given. Then FPIG's basic features are described and
-solutions to platform and compiler specific issues are discussed.
-Unsolved problems and future work on FPIG's development are also
-addressed. Finally, an application to a large aero-structural solver
-is presented as real-world example of FPIG's usage.
-
-\section{Getting Started}
-\label{sec:getstart}
-
-To get acquainted with FPIG, let us consider the simple Fortran~77
-subroutine shown in Fig. \ref{fig:exp1.f}.
-\begin{figure}[htb]
- \latexhide{\label{fig:exp1.f}}
- \special{html:<BLOCKQUOTE>}
- \verbatiminput{examples/exp1.f}
- \special{html:</BLOCKQUOTE>}
- \caption{Example Fortran code \texttt{exp1.f}. This routine calculates
- the simplest rational lower and upper approximations to $e$ (for
- details of
- the algorithm see \cite{graham-etal}, p.122)}
- \tthhide{\label{fig:exp1.f}}
-\end{figure}
-In the sections that follow, two ways of creating interfaces to this
-Fortran subroutine are described. The first and simplest way is
-suitable for Fortran codes that are developed in connection with \fpy.
-The second and not much more difficult method, is suitable for
-interfacing existing Fortran libraries which might have been developed
-by other programmers.
-
-Numerical Python~\cite{numpy} is needed in order to compile extension
-modules generated by FPIG.
-
-\subsection{Interfacing Simple Routines}
-\label{sec:example1}
-
-In order to call the Fortran routine \texttt{exp1} from Python, let us
-create an interface to it by using \fpy (FPIG's front-end program). In
-order to do this, we issue the following command, \shell{f2py -m foo
-exp1.f} where the option \texttt{-m foo} sets the name of the Python
-C/API extension module that \fpy will create to
-\texttt{foo}. To learn more about the \fpy command line options, run \fpy
-without arguments.
-
-The output messages in Fig. \ref{fig:f2pyoutmess}
-illustrate the procedure followed by \fpy:
- (i) it scans the Fortran source code specified in the command line,
- (ii) it analyses and determines the routine signatures,
- (iii) it constructs the corresponding Python C/API extension modules,
- (iv) it writes documentation to a LaTeX file, and
- (v) it creates a GNU Makefile for building the shared modules.
-\begin{figure}[htb]
- \latexhide{\label{fig:f2pyoutmess}}
- \special{html:<BLOCKQUOTE>}
- {\tthhide{\small}
- \verbatiminput{examples/exp1mess.txt}
- }
- \special{html:</BLOCKQUOTE>}
- \caption{Output messages of \texttt{f2py -m foo exp1.f}.}
- \tthhide{\label{fig:f2pyoutmess}}
-\end{figure}
-
-Now we can build the \texttt{foo} module:
-\shell{make -f Makefile-foo}
-
-Figure \ref{fig:exp1session} illustrates a sample session for
- calling the Fortran routine \texttt{exp1} from Python.
-\begin{figure}[htb]
- \latexhide{\label{fig:exp1session}}
- \special{html:<BLOCKQUOTE>}
- \verbatiminput{examples/exp1session.txt}
- \special{html:</BLOCKQUOTE>}
- \caption{Calling Fortran routine \texttt{exp1} from Python. Here
- \texttt{l[0]/l[1]} gives an estimate to $e$ with absolute error
- less than \texttt{u[0]/u[1]-l[0]/l[1]} (this value may depend on
- the platform and compiler used).}
- \tthhide{\label{fig:exp1session}}
-\end{figure}
-
-Note the difference between the signatures of the Fortran routine
-\texttt{exp1(l,u,n)} and the corresponding wrapper function
-\texttt{l,u=exp1([n])}. Clearly, the later is more informative to
-the user: \texttt{exp1} takes one optional argument \texttt{n} and it
-returns \texttt{l}, \texttt{u}. This exchange of signatures is
-achieved by special comment lines (starting with \texttt{Cf2py}) in
-the Fortran source code --- these lines are interpreted by \fpy as
-normal Fortran code. Therefore, in the given example the line \texttt{Cf2py
- integer*4 :: n = 1} informs \fpy that the variable \texttt{n} is
-optional with a default value equal to one. The line \texttt{Cf2py
- intent(out) l,u} informs \fpy that the variables \texttt{l,u} are to be
-returned to Python after calling Fortran function \texttt{exp1}.
-
-\subsection{Interfacing Libraries}
-\label{sec:example2}
-
-In our example the Fortran source \texttt{exp1.f} contains \fpy
-specific information, though only as comments. When interfacing
-libraries from other parties, it is not recommended to modify their
-source. Instead, one should use a special auxiliary file to collect
-the signatures of all Fortran routines and insert \fpy specific
-declaration and attribute statements in that file. This auxiliary file
-is called a \emph{signature file} and is identified by the extension
-\texttt{.pyf}.
-
-We can use \fpy to generate these signature files by using the
-\texttt{-h <filename>.pyf} option.
-In our example, \fpy could have been called as follows,
-\shell{f2py -m foo -h foo.pyf exp1.f}
-where the option \texttt{-h foo.pyf} requests \fpy to read the
-routine signatures, save them to the file \texttt{foo.pyf}, and then
-exit.
-If \texttt{exp1.f} in Fig.~\ref{fig:exp1.f} were to
-contain no lines starting with \texttt{Cf2py}, the corresponding
-signature file \texttt{foo.pyf} would be as shown in Fig.~\ref{fig:foo.pyf}.
-In order to obtain the exchanged and more convenient signature
-\texttt{l,u=foo.exp1([n])}, we would edit \texttt{foo.pyf} as shown in
-Fig.~\ref{fig:foom.pyf}.
-The Python C/API extension module \texttt{foo} can be constructed by
-applying \fpy to the signature file with the following command:
-\shell{f2py foo.pyf}
-The procedure for building the corresponding shared module and using
-it in Python is identical to the one described in the previous section.
-
-\begin{figure}[htb]
- \latexhide{\label{fig:foo.pyf}}
- \special{html:<BLOCKQUOTE>}
- \verbatiminput{examples/foo.pyf}
- \special{html:</BLOCKQUOTE>}
- \caption{Raw signature file \texttt{foo.pyf} generated with
- \texttt{f2py -m foo -h foo.pyf exp1.f}}
- \tthhide{\label{fig:foo.pyf}}
-\end{figure}
-\begin{figure}[htb]
- \latexhide{\label{fig:foom.pyf}}
- \special{html:<BLOCKQUOTE>}
- \verbatiminput{examples/foom.pyf}
- \special{html:</BLOCKQUOTE>}
- \caption{Modified signature file \texttt{foo.pyf}}
- \tthhide{\label{fig:foom.pyf}}
-\end{figure}
-
-As we can see, the syntax of the signature file is an
-extension of the Fortran~90/95 syntax. This means that only a few new
-constructs are introduced for \fpy in addition to all standard Fortran
-constructs; signature files can even be written in fixed form. A
-complete set of constructs that are used when creating interfaces, is
-described in the \fpy User's Guide \cite{f2py-ug}.
-
-
-\section{Basic Features}
-\label{sec:features}
-
-In this section a short overview of \fpy features is given.
-\begin{enumerate}
-\item All basic Fortran types are supported. They include
-the following type specifications:
-\begin{verbatim}
-integer[ | *1 | *2 | *4 | *8 ]
-logical[ | *1 | *2 | *4 | *8 ]
-real[ | *4 | *8 | *16 ]
-complex[ | *8 | *16 | *32 ]
-double precision, double complex
-character[ |*(*)|*1|*2|*3|...]
-\end{verbatim}
-In addition, they can all be in the kind-selector form
-(e.g. \texttt{real(kind=8)}) or char-selector form
-(e.g. \texttt{character(len=5)}).
-\item Arrays of all basic types are supported. Dimension
- specifications can be of form \texttt{<dimension>} or
- \texttt{<start>:<end>}. In addition, \texttt{*} and \texttt{:}
- dimension specifications can be used for input arrays.
- Dimension specifications may contain also \texttt{PARAMETER}'s.
-\item The following attributes are supported:
- \begin{itemize}
- \item
- \texttt{intent(in)}: used for input-only arguments.
- \item
- \texttt{intent(inout)}: used for arguments that are changed in
- place.
- \item
- \texttt{intent(out)}: used for return arguments.
- \item
- \texttt{intent(hide)}: used for arguments to be removed from
- the signature of the Python function.
- \item
- \texttt{intent(in,out)}, \texttt{intent(inout,out)}: used for
- arguments with combined behavior.
- \item
- \texttt{dimension(<dimspec>)}
- \item
- \texttt{depend([<names>])}: used
- for arguments that depend on other arguments in \texttt{<names>}.
- \item
- \texttt{check([<C booleanexpr>])}: used for checking the
- correctness of input arguments.
- \item
- \texttt{note(<LaTeX text>)}: used for
- adding notes to the module documentation.
- \item
- \texttt{optional}, \texttt{required}
- \item
- \texttt{external}: used for call-back arguments.
- \item
- \texttt{allocatable}: used for Fortran 90/95 allocatable arrays.
- \end{itemize}
-\item Using \fpy one can call arbitrary Fortran~77/90/95 subroutines
- and functions from Python, including Fortran 90/95 module routines.
-\item Using \fpy one can access data in Fortran~77 COMMON blocks and
- variables in Fortran 90/95 modules, including allocatable arrays.
-\item Using \fpy one can call Python functions from Fortran (call-back
- functions). \fpy supports very flexible hooks for call-back functions.
-\item Wrapper functions perform the necessary type conversations for their
- arguments resulting in contiguous Numeric arrays that are suitable for
- passing to Fortran routines.
-\item \fpy generates documentation strings
-for \texttt{\_\_doc\_\_} attributes of the wrapper functions automatically.
-\item \fpy scans Fortran codes and creates the signature
- files. It automatically detects the signatures of call-back functions,
- solves argument dependencies, decides the order of initialization of
- optional arguments, etc.
-\item \fpy automatically generates GNU Makefiles for compiling Fortran
- and C codes, and linking them to a shared module.
- \fpy detects available Fortran and C compilers. The
- supported compilers include the GNU project C Compiler (gcc), Compaq
- Fortran, VAST/f90 Fortran, Absoft F77/F90, and MIPSpro 7 Compilers, etc.
- \fpy has been tested to work on the following platforms: Intel/Alpha
- Linux, HP-UX, IRIX64.
-\item Finally, the complete \fpy User's Guide is available in various
- formats (ps, pdf, html, dvi). A mailing list,
- \email{f2py-users@cens.ioc.ee}, is open for support and feedback. See
- the FPIG's home page for more information \cite{fpig}.
-\end{enumerate}
-
-
-\section{Implementation Issues}
-\label{sec:impl}
-
-The Fortran to Python interface can be thought of as a three layer
-``sandwich'' of different languages: Python, C, and Fortran. This
-arrangement has two interfaces: Python-C and C-Fortran. Since Python
-itself is written in C, there are no basic difficulties in
-implementing the Python-C interface~\cite{python-doc:ext}. The C-Fortran
-interface, on the other hand, results in many platform and compiler specific
-issues that have to be dealt with. We will now discuss these issues
-in some detail and describe how they are solved in FPIG.
-
-\subsection{Mapping Fortran Types to C Types}
-\label{sec:mapF2Ctypes}
-
-Table \ref{tab:mapf2c} defines how Fortran types are mapped to C types
-in \fpy.
-\begin{table}[htb]
- \begin{center}
- \begin{tabular}[c]{l|l}
- Fortran type & C type \\\hline
- \texttt{integer *1} & \texttt{char}\\
- \texttt{byte} & \texttt{char}\\
- \texttt{integer *2} & \texttt{short}\\
- \texttt{integer[ | *4]} & \texttt{int}\\
- \texttt{integer *8} & \texttt{long long}\\
- \texttt{logical *1} & \texttt{char}\\
- \texttt{logical *2} & \texttt{short}\\
- \texttt{logical[ | *4]} & \texttt{int}\\
- \texttt{logical *8} & \texttt{int}\\
- \texttt{real[ | *4]} & \texttt{float}\\
- \texttt{real *8} & \texttt{double}\\
- \texttt{real *16} & \texttt{long double}\\
- \texttt{complex[ | *8]} & \texttt{struct \{float r,i;\}}\\
- \texttt{complex *16} & \texttt{struct \{double r,i;\}}\\
- \texttt{complex *32} & \texttt{struct \{long double r,i;\}}\\
- \texttt{character[*...]} & \texttt{char *}\\
- \end{tabular}
- \caption{Mapping Fortran types to C types.}
- \label{tab:mapf2c}
- \end{center}
-\end{table}
-Users may redefine these mappings by creating a \texttt{.f2py\_f2cmap}
-file in the working directory. This file should contain a Python
-dictionary of dictionaries, e.g. \texttt{\{'real':\{'low':'float'\}\}},
-that informs \fpy to map Fortran type \texttt{real(low)}
-to C type \texttt{float} (here \texttt{PARAMETER low = ...}).
-
-
-\subsection{Calling Fortran (Module) Routines}
-\label{sec:callrout}
-
-When mixing Fortran and C codes, one has to know how function names
-are mapped to low-level symbols in their object files. Different
-compilers may use different conventions for this purpose. For example, gcc
-appends the underscore \texttt{\_} to a Fortran routine name. Other
-compilers may use upper case names, prepend or append different
-symbols to Fortran routine names or both. In any case, if the
-low-level symbols corresponding to Fortran routines are valid for the
-C language specification, compiler specific issues can be solved by
-using CPP macro features.
-
-Unfortunately, there are Fortran compilers that use symbols in
-constructing low-level routine names that are not valid for C. For
-example, the (IRIX64) MIPSpro 7 Compilers use `\$' character in the
-low-level names of module routines which makes it impossible (at
-least directly) to call such routines from C when using the MIPSpro 7
-C Compiler.
-
-In order to overcome this difficulty, FPIG introduces a unique
-solution: instead of using low-level symbols for calling Fortran
-module routines from C, the references to such routines are determined
-at run-time by using special wrappers. These wrappers are called once
-during the initialization of an extension module. They are simple
-Fortran subroutines that use a Fortran module and call another C
-function with Fortran module routines as arguments in order to save
-their references to C global variables that are later used for calling
-the corresponding Fortran module routines. This arrangement is
-set up as follows. Consider the following Fortran 90 module with the
-subroutine \texttt{bar}:
-\special{html:<BLOCKQUOTE>}
-\begin{verbatim}
-module fun
- subroutine bar()
- end
-end
-\end{verbatim}
-\special{html:</BLOCKQUOTE>}
-Figure \ref{fig:capi-sketch} illustrates a Python C/API extension
-module for accessing the F90 module subroutine \texttt{bar} from Python.
-When the Python module \texttt{foo} is loaded, \texttt{finitbar} is
-called. \texttt{finitbar} calls \texttt{init\_bar} by passing the
-reference of the Fortran 90 module subroutine \texttt{bar} to C where it is
-saved to the variable \texttt{bar\_ptr}. Now, when one executes \texttt{foo.bar()}
-from Python, \texttt{bar\_ptr} is used in \texttt{bar\_capi} to call
-the F90 module subroutine \texttt{bar}.
-\begin{figure}[htb]
- \latexhide{\label{fig:capi-sketch}}
- \special{html:<BLOCKQUOTE>}
-\begin{verbatim}
-#include "Python.h"
-...
-char *bar_ptr;
-void init_bar(char *bar) {
- bar_ptr = bar;
-}
-static PyObject *
-bar_capi(PyObject *self,PyObject *args) {
- ...
- (*((void *)bar_ptr))();
- ...
-}
-static PyMethodDef
-foo_module_methods[] = {
- {"bar",bar_capi,METH_VARARGS},
- {NULL,NULL}
-};
-extern void finitbar_; /* GCC convention */
-void initfoo() {
- ...
- finitbar_(init_bar);
- Py_InitModule("foo",foo_module_methods);
- ...
-}
-\end{verbatim}
- \special{html:</BLOCKQUOTE>}
- \caption{Sketch of Python C/API for accessing F90 module subroutine
- \texttt{bar}. The Fortran function \texttt{finitbar} is defined in
- Fig.~\ref{fig:wrapbar}.}
- \tthhide{\label{fig:capi-sketch}}
-\end{figure}
-\begin{figure}[ht]
- \latexhide{\label{fig:wrapbar}}
-\special{html:<BLOCKQUOTE>}
-\begin{verbatim}
- subroutine finitbar(cinit)
- use fun
- extern cinit
- call cinit(bar)
- end
-\end{verbatim}
-\special{html:</BLOCKQUOTE>}
- \caption{Wrapper for passing the reference of \texttt{bar} to C code.}
- \tthhide{\label{fig:wrapbar}}
-\end{figure}
-
-Surprisingly, mixing C code and Fortran modules in this way is as
-portable and compiler independent as mixing C and ordinary Fortran~77
-code.
-
-Note that extension modules generated by \fpy actually use
-\texttt{PyFortranObject} that implements above described scheme with
-exchanged functionalities (see Section \ref{sec:PFO}).
-
-
-\subsection{Wrapping Fortran Functions}
-\label{sec:wrapfunc}
-
-The Fortran language has two types of routines: subroutines and
-functions. When a Fortran function returns a composed type such as
-\texttt{COMPLEX} or \texttt{CHARACTER}-array then calling this
-function directly from C may not work for all compilers, as C
-functions are not supposed to return such references. In order to
-avoid this, FPIG constructs an additional Fortran wrapper subroutine
-for each such Fortran function. These wrappers call just the
-corresponding functions in the Fortran layer and return the result to
-C through its first argument.
-
-
-\subsection{Accessing Fortran Data}
-\label{sec:accsdata}
-
-In Fortran one can use \texttt{COMMON} blocks and Fortran module
-variables to save data that is accessible from other routines. Using
-FPIG, one can also access these data containers from Python. To achieve
-this, FPIG uses special wrapper functions (similar to the ones used
-for wrapping Fortran module routines) to save the references to these
-data containers so that they can later be used from C.
-
-FPIG can also handle \texttt{allocatable} arrays. For example, if a
-Fortran array is not yet allocated, then by assigning it in Python,
-the Fortran to Python interface will allocate and initialize the
-array. For example, the F90 module allocatable array \texttt{bar}
-defined in
-\special{html:<BLOCKQUOTE>}
-\begin{verbatim}
-module fun
- integer, allocatable :: bar(:)
-end module
-\end{verbatim}
-\special{html:</BLOCKQUOTE>}
-can be allocated from Python as follows
-\special{html:<BLOCKQUOTE>}
-\begin{verbatim}
->>> import foo
->>> foo.fun.bar = [1,2,3,4]
-\end{verbatim}
-\special{html:</BLOCKQUOTE>}
-
-\subsection{\texttt{PyFortranObject}}
-\label{sec:PFO}
-
-In general, we would like to access from Python the following Fortran
-objects:
-\begin{itemize}
-\item subroutines and functions,
-\item F90 module subroutines and functions,
-\item items in COMMON blocks,
-\item F90 module data.
-\end{itemize}
-Assuming that the Fortran source is available, we can determine the signatures
-of these objects (the full specification of routine arguments, the
-layout of Fortran data, etc.). In fact, \fpy gets this information
-while scanning the Fortran source.
-
-In order to access these Fortran objects from C, we need to determine
-their references. Note that the direct access of F90 module objects is
-extremely compiler dependent and in some cases even impossible.
-Therefore, FPIG uses various wrapper functions for obtaining the
-references to Fortran objects. These wrapper functions are ordinary
-F77 subroutines that can easily access objects from F90 modules and
-that pass the references to Fortran objects as C variables.
-
-
-\fpy generated Python C/API extension modules use
-\texttt{PyFortranObject} to store the references of Fortran objects.
-In addition to the storing functionality, the \texttt{PyFortranObject}
-also provides methods for accessing/calling Fortran objects from
-Python in a user-friendly manner. For example, the item \texttt{a} in
-\texttt{COMMON /bar/ a(2)} can be accessed from Python as
-\texttt{foo.bar.a}.
-
-Detailed examples of \texttt{PyFortranObject} usage can be found in
-\cite{PFO}.
-
-\subsection{Callback Functions}
-\label{sec:callback}
-
-Fortran routines may have arguments specified as \texttt{external}.
-These arguments are functions or subroutines names that the receiving Fortran routine
-will call from its body. For such arguments FPIG
-constructs a call-back mechanism (originally contributed by Travis
-Oliphant) that allows Fortran routines to call Python functions. This
-is actually realized using a C layer between Python and
-Fortran. Currently, the call-back mechanism is compiler independent
-unless a call-back function needs to return a composed type
-(e.g. \texttt{COMPLEX}).
-
-The signatures of call-back functions are determined when \fpy scans
-the Fortran source code. To illustrate this, consider the following
-example:
-\special{html:<BLOCKQUOTE>}
-\begin{verbatim}
- subroutine foo(bar, fun, boo)
- integer i
- real r
- external bar,fun,boo
- call bar(i, 1.2)
- r = fun()
- call sun(boo)
- end
-\end{verbatim}
-\special{html:</BLOCKQUOTE>}
-\fpy recognizes the signatures of the user routines \texttt{bar} and
-\texttt{fun} using the information contained in the lines \texttt{call
- bar(i, 1.2)} and \texttt{r = fun()}:
-\special{html:<BLOCKQUOTE>}
-\begin{verbatim}
-subroutine bar(a,b)
- integer a
- real b
-end
-function fun()
- real fun
-end
-\end{verbatim}
-\special{html:</BLOCKQUOTE>}
-But \fpy cannot determine the signature of the user routine
-\texttt{boo} because the source contains no information at all about
-the \texttt{boo} specification. Here user needs to provide the
-signature of \texttt{boo} manually.
-
-\section{Future Work}
-\label{sec:future}
-
-FPIG can be used to wrap almost any Fortran code. However, there are
-still issues that need to be resolved. Some of them are listed below:
-\begin{enumerate}
-\item One of the FPIG's goals is to become as platform and compiler
- independent as possible. Currently FPIG can be used on
- any UN*X platform that has gcc installed in it. In the future, FPIG
- should be also tested on Windows systems.
-\item Another goal of FPIG is to become as simple to use as
- possible. To achieve that, FPIG should start using the facilities of
- \texttt{distutils}, the new Python standard to distribute and build
- Python modules. Therefore, a contribution to \texttt{distutils}
- that can handle Fortran extensions should be developed.
-\item Currently users must be aware of
- the fact that multi-dimensional arrays are stored differently in C
- and Fortran (they must provide transposed multi-dimensional arrays
- to wrapper functions). In the future a solution should be found such
- that users do not need to worry about this rather
- confusing and technical detail.
-\item Finally, a repository of signature files for widely-used Fortran
- libraries (e.g. BLAS, LAPACK, MINPACK, ODEPACK, EISPACK, LINPACK) should be
- provided.
-\end{enumerate}
-
-
-\section{Application to a Large Aero-Structural Analysis Framework}
-\label{sec:app}
-
-
-\subsection{The Need for Python and FPIG}
-\label{sec:appsub1}
-
-As a demonstration of the power and usefulness of FPIG, we will
-present work that has been done at the Aerospace Computing Laboratory
-at Stanford University. The focus of the research is on aircraft
-design optimization using high-fidelity analysis tools such as
-Computational Fluid Dynamics (CFD) and Computational Structural
-Mechanics (CSM)~\cite{reno99}.
-
-The group's analysis programs are written mainly in Fortran and are the result
-of many years of development. Until now, any researcher that needed
-to use these tools would have to learn a less than user-friendly
-interface and become relatively familiar with the inner workings of
-the codes before starting the research itself. The need to
-couple analyses of different disciplines revealed the additional
-inconvenience of gluing and scripting the different codes with
-Fortran.
-
-It was therefore decided that the existing tools should be wrapped
-using an object-oriented language in order to improve their ease of
-use and versatility. The use of several different languages such as
-C++, Java and Perl was investigated but Python seemed to provide the
-best solution. The fact that it combines scripting capability
-with a fully-featured object-oriented programming language, and that
-it has a clean syntax were factors that determined our choice. The
-introduction of tools that greatly facilitate the task of wrapping
-Fortran with Python provided the final piece needed to realize our
-objective.
-
-\subsection{Wrapping the Fortran Programs}
-
-In theory, it would have been possible to wrap our Fortran programs
-with C and then with Python by hand. However, this would have been a
-labor intensive task that would detract from our research. The use of
-tools that automate the task of wrapping has been extremely useful.
-
-The first such tool that we used was PyFort. This tool created the C
-wrappers and Python modules automatically, based on signature files
-(\texttt{.pyf}) provided by the user. Although it made the task of
-wrapping considerably easier, PyFort was limited by the fact that any
-Fortran data that was needed at the Python level had to be passed in
-the argument list of the Fortran subroutine. Since the bulk of the
-data in our programs is shared by using Fortran~77 common blocks and
-Fortran~90 modules, this required adding many more arguments to the
-subroutine headers. Furthermore, since Fortran does not allow common
-block variables or module data to be specified in a subroutine
-argument list, a dummy pointer for each desired variable had to be
-created and initialized.
-
-The search for a better solution to this problem led us to \fpy.
-Since \fpy provides a solution for accessing common block and module
-variables, there was no need to change the Fortran source anymore,
-making the wrapping process even easier. With \fpy we also
-experienced an increased level of automation since it produces the
-signature files automatically, as well as a Makefile for the joint
-compilation of the original Fortran and C wrapper codes. This increased
-automation did not detract from its flexibility since it was always
-possible to edit the signature files to provide different functionality.
-
-Once Python interfaces were created for each Fortran application
-by running \fpy, it was just a matter of using Python to achieve the
-final objective of developing an object-oriented framework for our
-multidisciplinary solvers. The Python modules that we designed are
-discussed in the following section.
-
-
-\subsection{Module Design}
-\label{ssec:module}
-
-The first objective of this effort was to design the classes for each
-type of analysis, each representing an independent Python module. In
-our case, we are interested in performing aero-structural analysis and
-optimization of aircraft wings. We therefore needed an analysis tool
-for the flow (CFD), another for analyzing the structure (CSM), as well
-as a geometry database. In addition, we needed to interface these two
-tools in order to analyze the coupled system. The object design for
-each of these modules should be general enough that the underlying
-analysis code in Fortran can be changed without changing the Python
-interface. Another requirement was that the modules be usable on
-their own for single discipline analysis.
-
-\subsubsection{Geometry}
-
-The \emph{Geometry} class provides a database for the outer mold
-geometry of the aircraft. This database needs to be accessed by both
-the flow and structural solvers. It contains a parametric description
-of the aircraft's surface as well as methods that extract and update
-this information.
-
-
-\subsubsection{Flow}
-
-The flow solver was wrapped in a class called \emph{Flow}. The class
-was designed so that it can wrap any type of CFD solver. It contains
-two main objects: the computational mesh and a solver object. A graph
-showing the hierarchy of the objects in \emph{Flow} is shown in
-Fig.~\ref{fig:flow}.
-\tthhide{
-\begin{figure}[h]
- \centering
- \epsfig{file=./flow.eps, angle=0, width=.7\linewidth}
- \caption{The \emph{Flow} container class.}
- \label{fig:flow}
-\end{figure}
-}
-\latexhide{
-\begin{figure}[h]
- \label{fig:flow}
-\special{html:
-<CENTER>
- <IMG SRC="flow.jpg" WIDTH="400">
-</CENTER>
-}
- \caption{The \emph{Flow} container class.}
-\end{figure}
-}
-Methods in the flow class include those used for the initialization of
-all the class components as well as methods that write the current
-solution to a file.
-
-
-\subsubsection{Structure}
-
-The \emph{Structure} class wraps a structural analysis code. The class
-stores the information about the structure itself in an object called
-\emph{Model} which also provides methods for changing and exporting
-its information. A list of the objects contained in this class can be
-seen in Fig.~\ref{fig:structure}.
-\tthhide{
-\begin{figure}[h]
- \centering
- \epsfig{file=./structure.eps, angle=0, width=.7\linewidth}
- \caption{The \emph{Structure} container class.}
- \label{fig:structure}
-\end{figure}
-}
-\latexhide{
-\begin{figure}[h]
- \label{fig:structure}
-\special{html:
-<CENTER>
- <IMG SRC="structure.jpg" WIDTH="400">
-</CENTER>
-}
- \caption{The \emph{Structure} container class.}
-\end{figure}
-}
-Since the \emph{Structure} class contains a
-dictionary of \emph{LoadCase} objects, it is able to store and solve
-multiple load cases, a capability that the original Fortran code
-does not have.
-
-
-\subsubsection{Aerostructure}
-
-The \emph{Aerostructure} class is the main class in the
-aero-structural analysis module and contains a \emph{Geometry}, a
-\emph{Flow} and a \emph{Structure}. In addition, the class defines
-all the functions that are necessary to translate aerodynamic
-loads to structural loads and structural displacements to
-geometry surface deformations.
-
-One of the main methods of this class is the one that solves the
-aeroelastic system. This method is printed below:
-\begin{verbatim}
-def Iterate(self, load_case):
- """Iterates the aero-structural solution."""
- self.flow.Iterate()
- self._UpdateStructuralLoads()
- self.structure.CalcDisplacements(load_case)
- self.structure.CalcStresses(load_case)
- self._UpdateFlowMesh()
- return
-\end{verbatim}
-This is indeed a very readable script, thanks to Python, and any
-high-level changes to the solution procedure can be easily
-implemented.
-The \emph{Aerostructure} class also contains methods that export all
-the information on the current solution for visualization, an example
-of which is shown in the next section.
-
-
-\subsection{Results}
-
-In order to visualize results, and because we needed to view results
-from multiple disciplines simultaneously, we selected OpenDX. Output
-files in DX format are written at the Python level and the result can
-be seen in Fig.~\ref{fig:aerostructure} for the case of a transonic
-airliner configuration.
-\tthhide{
-\begin{figure*}[t]
- \centering
- \epsfig{file=./aerostructure.eps, angle=-90, width=\linewidth}
- \caption{Aero-structural model and results.}
- \label{fig:aerostructure}
-\end{figure*}
-}
-\latexhide{
-\begin{figure}[h]
- \label{fig:aerostructure}
-\special{html:
-<CENTER>
- <IMG SRC="aerostructure.jpg" WIDTH="600">
-</CENTER>
-}
- \caption{Aero-structural model and results.}
-\end{figure}
-}
-
-
-The figure illustrates the multidisciplinary nature of the
-problem. The grid pictured in the background is the mesh used by the
-flow solver and is colored by the pressure values computed at the
-cell centers. The wing in the foreground and its outer surface is
-clipped to show the internal structural components which are colored
-by their stress value.
-
-In conclusion, \fpy and Python have been extremely useful tools in our
-pursuit for increasing the usability and flexibility of existing Fortran
-tools.
-
-
-\begin{thebibliography}{99}
-\bibitem{netlib}
-\newblock Netlib repository at UTK and ORNL.
-\newblock \\\wwwsite{http://www.netlib.org/}
-\bibitem{python}
-Python language.
-\newblock \\\wwwsite{http://www.python.org/}
-\bibitem{swig}
-SWIG --- Simplified Wrapper and Interface Generator.
-\newblock \\\wwwsite{http://www.swig.org/}
-\bibitem{pyfort}
-PyFort --- The Python-Fortran connection tool.
-\newblock \\\wwwsite{http://pyfortran.sourceforge.net/}
-\bibitem{fpig}
-FPIG --- Fortran to Python Interface Generator.
-\newblock \\\wwwsite{http://cens.ioc.ee/projects/f2py2e/}
-\bibitem{numpy}
-Numerical Extension to Python.
-\newblock \\\wwwsite{http://numpy.sourceforge.net/}
-\bibitem{graham-etal}
-R. L. Graham, D. E. Knuth, and O. Patashnik.
-\newblock {\em {C}oncrete {M}athematics: a foundation for computer science.}
-\newblock Addison-Wesley, 1988
-\bibitem{f2py-ug}
-P. Peterson.
-\newblock {\em {\tt f2py} - Fortran to Python Interface Generator. Second Edition.}
-\newblock 2000
-\newblock
-\\\wwwsite{http://cens.ioc.ee/projects/f2py2e/usersguide.html}
-\bibitem{python-doc:ext}
-Python Documentation: Extending and Embedding.
-\newblock \\\wwwsite{http://www.python.org/doc/ext/}
-\bibitem{PFO}
-P. Peterson. {\em {\tt PyFortranObject} example usages.}
-\newblock 2001
-\newblock \\\wwwsite{http://cens.ioc.ee/projects/f2py2e/pyfobj.html}
-\bibitem{reno99}
-Reuther, J., J. J. Alonso, J. R. R. A. Martins, and
-S. C. Smith.
-\newblock ``A Coupled Aero-Structural Optimization Method for
- Complete Aircraft Configurations'',
-\newblock {\em Proceedings of the 37th Aerospace Sciences Meeting},
-\newblock AIAA Paper 1999-0187. Reno, NV, January, 1999
-\end{thebibliography}
-
-%\end{multicols}
-
-%\begin{figure}[htbp]
-% \begin{center}
-% \epsfig{file=aerostructure2b.ps,width=0.75\textwidth}
-% \end{center}
-%\end{figure}
-
-
-
-\end{document}
-
-%%% Local Variables:
-%%% mode: latex
-%%% TeX-master: t
-%%% End:
diff --git a/doc/f2py/signaturefile.tex b/doc/f2py/signaturefile.tex
deleted file mode 100644
index 3cd16d890..000000000
--- a/doc/f2py/signaturefile.tex
+++ /dev/null
@@ -1,368 +0,0 @@
-
-\section{Signature file}
-\label{sec:signaturefile}
-
-The syntax of a signature file is borrowed from the Fortran~90/95
-language specification. Almost all Fortran~90/95 standard constructs
-are understood. Recall that Fortran~77 is a subset of Fortran~90/95.
-This tool introduces also some new attributes that are used for
-controlling the process of Fortran to Python interface construction.
-In the following, a short overview of the constructs
-used in signature files will be given.
-
-
-\subsection{Module block}
-\label{sec:moduleblock}
-
-A signature file contains one or more \texttt{pythonmodule} blocks. A
-\texttt{pythonmodule} block has the following structure:
-\begin{verbatim}
-python module <modulename>
- interface
- <routine signatures>
- end [interface]
- interface
- module <F90/95 modulename>
- <F90 module data type declarations>
- <F90 module routine signatures>
- end [module [<F90/95 modulename>]]
- end [interface]
-end [pythonmodule [<modulename>]]
-\end{verbatim}
-For each \texttt{pythonmodule} block \fpy will generate a C-file
-\texttt{<modulename>module.c} (see step (iii)). (This is not true if
-\texttt{<modulename>} contains substring \texttt{\_\_user\_\_}, see
-Sec.~\ref{sec:cbmodule} and \texttt{external} attribute).
-
-\subsection{Signatures of Fortran routines and Python functions}
-\label{sec:routineblock}
-
-
-The signature of a Fortran routine has the following structure:
-\begin{verbatim}
-[<typespec>] function|subroutine <routine name> [([<arguments>])] \
- [result (<entityname>)]
- [<argument type declarations>]
- [<argument attribute statements>]
- [<use statements>]
- [<common block statements>]
- [<other statements>]
-end [function|subroutine [<routine name>]]
-\end{verbatim}
-
-Let us introduce also the signature of the corresponding wrapper
-function:
-\begin{verbatim}
-def <routine name>(<required arguments>[,<optional arguments>]):
- ...
- return <return variables>
-\end{verbatim}
-
-Before you edit the signature file, you should first decide what is the
-desired signature of the corresponding Python function. \fpy offers
-many possibilities to control the interface construction process: you
-may want to insert/change/remove various attributes in the
-declarations of the arguments in order to change the appearance
-of the arguments in the Python wrapper function.
-
-\begin{itemize}
-\item
-The definition of the \texttt{<argument type declaration>} is
-\begin{verbatim}
-<typespec> [[<attrspec>]::] <entitydecl>
-\end{verbatim}
-where
-\begin{verbatim}
-<typespec> := byte | character[<charselector>]
- | complex[<kindselector>] | real[<kindselector>]
- | double complex | double precision
- | integer[<kindselector>] | logical[<kindselector>]
-\end{verbatim}
-\begin{verbatim}
-<charselector> := *<charlen> | ([len=]<len>[,[kind]<kind>])
- | (kind=<kind>[,len=<len>])
-<kindselector> := *<intlen> | ([kind=]<kind>)
-\end{verbatim}
-(there is no sense to modify \texttt{<typespec>}s generated by \fpy).
-\texttt{<attrspec>} is a comma separated list of attributes (see
-Sec.~\ref{sec:attributes});
-\begin{verbatim}
-<entitydecl> := <name> [[*<charlen>][(<arrayspec>)]
- | [(<arrayspec>)]*<charlen>]
- | [/<init_expr>/ | =<init_expr>] [,<entitydecl>]
-\end{verbatim}
-where \texttt{<arrayspec>} is a comma separated list of dimension
-bounds; \texttt{<init\_expr>} is a C-expression (see
-Sec.~\ref{sec:C-expr}). If an argument is not defined with
-\texttt{<argument type declaration>}, its type is determined by
-applying \texttt{implicit} rules (if it is not specifyied, then
-standard rules are applied).
-
-\item The definition of the \texttt{<argument attribute statement>} is
-a short form of the \texttt{<argument type declaration>}:
-\begin{verbatim}
-<attrspec> <entitydecl>
-\end{verbatim}
-
-\item \texttt{<use statement>} is defined as follows
-\begin{verbatim}
-use <modulename> [,<rename_list> | ,ONLY:<only_list>]
-<rename_list> := local_name=>use_name [,<rename_list>]
-\end{verbatim}
- Currently the \texttt{use} statement is used to link call-back
- modules (Sec.~\ref{sec:cbmodule}) and the \texttt{external}
- arguments (call-back functions).
-
-\item \texttt{<common block statement>} is defined as follows
-\begin{verbatim}
-common /<commonname>/ <shortentitydecl>
-\end{verbatim}
-where
-\begin{verbatim}
-<shortentitydecl> := <name> [(<arrayspec>)] [,<shortentitydecl>]
-\end{verbatim}
-One \texttt{module} block should not contain two or more
-\texttt{common} blocks with the same name. Otherwise, the later ones
-are ignored. The types of variables in \texttt{<shortentitydecl>} can
-be defined in \texttt{<argument type declarations>}. Note that there
-you can specify also the array specifications; then you don't need to
-do that in \texttt{<shortentitydecl>}.
-\end{itemize}
-
-\subsection{Attributes}
-\label{sec:attributes}
-
-The following attributes are used by \fpy:
-\begin{description}
-\item[\texttt{optional}] --- the variable is moved to the end of
- optional argument list of the wrapper function. Default value of an
- optional argument can be specified using \texttt{<init\_expr>} in
- \texttt{entitydecl}. You can use \texttt{optional} attribute also for
- \texttt{external} arguments (call-back functions), but it is your
- responsibility to ensure that it is given by the user if Fortran
- routine wants to call it.
-\item[\texttt{required}] --- the variable is considered as a required
- argument (that is default). You will need this in order to overwrite
- the \texttt{optional} attribute that is automatically set when
- \texttt{<init\_expr>} is used. However, usage of this attribute
- should be rare.
-\item[\texttt{dimension(<arrayspec>)}] --- used when the variable is
- an array. For unbounded dimensions symbols `\texttt{*}' or
- `\texttt{:}' can be used (then internally the corresponding
- dimensions are set to -1; you'll notice this when certain exceptions
- are raised).
-\item[\texttt{external}] --- the variable is a call-back function. \fpy will
- construct a call-back mechanism for this function. Also call-back
- functions must be defined by their signatures, and there are several
- ways to do that. In most cases, \fpy will be able to determine the signatures
- of call-back functions from the Fortran source code; then it
- builds an additional \texttt{module} block with a name containing
- string `\texttt{\_\_user\_\_}' (see Sec.~\ref{sec:cbmodule}) and
- includes \texttt{use} statement to the routines signature. Anyway,
- you should check that the generated signature is correct.
-
- Alternatively, you can specify the signature by inserting to the
- routines block a ``model'' how the call-back function would be called
- from Fortran. For subroutines you should use\\
- \hspace*{2em}\texttt{call <call-back name>(<arguments>)}\\
- and for functions\\%
- \hspace*{2em}\texttt{<return value> = <call-back name>(<arguments>)}\\
- The variables in \texttt{<arguments>} and \texttt{<return value>}
- must be defined as well. You can use the arguments of the main
- routine, for instance.
-\item[\texttt{intent(<intentspec>)}] --- this specifies the
- ``intention'' of the variable. \texttt{<intentspec>} is a comma
- separated list of the following specifications:
- \begin{description}
- \item[\texttt{in}] --- the variable is considered to be an input
- variable (default). It means that the Fortran function uses only
- the value(s) of the variable and is assumed not to change it.
- \item[\texttt{inout}] --- the variable is considered to be an
- input/output variable which means that Fortran routine may change
- the value(s) of the variable. Note that in Python only array
- objects can be changed ``in place''. (\texttt{intent(outin)} is
- \texttt{intent(inout)}.)
- \item[\texttt{out}] --- the value of the (output) variable is
- returned by the wrapper function: it is appended to the list of
- \texttt{<returned variables>}. If \texttt{out} is specified alone,
- also \texttt{hide} is assumed.
- \item[\texttt{hide}] --- use this if the variable \emph{should not}
- or \emph{need not} to be in the list of wrapper function arguments
- (not even in optional ones). For example, this is assumed if
- \texttt{intent(out)} is used. You can ``hide'' an argument if it
- has always a constant value specified in \texttt{<init\_expr>},
- for instance.
- \end{description}
- The following rules apply:
- \begin{itemize}
- \item if no \texttt{intent} attribute is specified, \texttt{intent(in)} is
- assumed;
- \item \texttt{intent(in,inout)} is \texttt{intent(in)};
- \item \texttt{intent(in,hide)}, \texttt{intent(inout,hide)} are \texttt{intent(hide)};
- \item \texttt{intent(out)} is \texttt{intent(out,hide)};
-\item \texttt{intent(inout)} is NOT \texttt{intent(in,out)}.
- \end{itemize}
- In conclusion, the following combinations are ``minimal'':
- \texttt{intent(in)}, \texttt{intent(inout)}, \texttt{intent(out)},
- \texttt{intent(hide)}, \texttt{intent(in,out)}, and
- \texttt{intent(inout,out)}.
-\item[\texttt{check([<C-booleanexpr>])}] --- if
- \texttt{<C-booleanexpr>} evaluates to zero, an exception is raised
- about incorrect value or size or any other incorrectness of the
- variable. If \texttt{check()} or \texttt{check} is used then \fpy
- will not try to guess the checks automatically.
-\item[\texttt{depend([<names>])}] --- the variable depends on other
- variables listed in \texttt{<names>}. These dependence relations
- determine the order of internal initialization of the variables. If
- you need to change these relations then be careful not to break the
- dependence relations of other relevant variables. If
- \texttt{depend()} or \texttt{depend} is used then \fpy will not try
- to guess the dependence relations automatically.
-\item[\texttt{note(<LaTeX text>)}] --- with this attribute you can
- include human readable documentation strings to the LaTeX document
- that \fpy generates. Do not insert here information that \fpy can
- establish by itself, such as, types, sizes, lengths of the
- variables. Here you can insert almost arbitrary LaTeX text. Note
- that \texttt{<LaTeX text>} is mainly used inside the LaTeX
- \texttt{description} environment. Hint: you can use
- \texttt{\bs{}texttt\{<name>\}} for typesetting variable \texttt{<name>}
- in LaTeX. In order to get a new line to the LaTeX document, use
- \texttt{\bs{}n} followed by a space. For longer text, you may want
- to use line continuation feature of Fortran 90/95 language: set
- \texttt{\&} (ampersand)
- to be the last character in a line.
-\item[\texttt{parameter}] --- the variable is parameter and it must
- have a value. If the parameter is used in dimension specification,
- it is replaced by its value. (Are there any other usages of
- parameters except in dimension specifications? Let me know and I'll
- add support for it).
-\end{description}
-
-
-\subsection{C-expressions}
-\label{sec:C-expr}
-
-The signature of a routine may contain C-expressions in
-\begin{itemize}
-\item \texttt{<init\_expr>} for initializing particular variable, or in
-\item \texttt{<C-booleanexpr>} of the \texttt{check} attribute, or in
-\item \texttt{<arrayspec>} of the \texttt{dimension} attribute.
-\end{itemize}
-A C-expression may contain
-\begin{itemize}
-\item standard C-statement,
-\item functions offered in \texttt{math.h},
-\item previously initialized variables (study
-the dependence relations) from the argument list, and
-\item the following CPP-macros:
- \begin{description}
- \item[\texttt{len(<name>)}] --- the length of an array \texttt{<name>};
- \item[\texttt{shape(<name>,<n>)}] --- the $n$-th dimension of an array
- \texttt{<name>};
- \item[\texttt{rank(<name>)}] --- the rank of an array \texttt{<name>};
- \item[\texttt{slen(<name>)}] --- the length of a string \texttt{<name>}.
- \end{description}
-\end{itemize}
-
-
-In addition, when initializing arrays, an index vector \texttt{int
- \_i[rank(<name>)];}
-is available: \texttt{\_i[0]} refers to
-the index of the first dimension, \texttt{\_i[1]} to the index of
-the second dimension, etc. For example, the argument type declaration\\
-\hspace*{2em}\texttt{integer a(10) = \_i[0]}\\
-is equivalent with the following Python statement\\
-\hspace*{2em}\texttt{a = array(range(10))}
-
-
-\subsection{Required/optional arguments}
-\label{sec:reqoptargs}
-
-When \texttt{optional} attribute is used (including the usage of
-\texttt{<init\_expr>} without the \texttt{required} attribute), the
-corresponding variable in the argument list of a Fortran routine is
-appended to the optional argument list of the wrapper function.
-
-For optional array argument all dimensions must be bounded (not
-\texttt{(*)} or \texttt{(:)}) and defined at the time of
-initialization (dependence relations).
-
-If the \texttt{None} object is passed in in place of a required array
-argument, it will be considered as optional: that is, the memory is
-allocated (of course, if it has unbounded dimensions, an exception
-will be raised), and if \texttt{<init\_expr>} is defined,
-initialization is carried out.
-
-
-\subsection{Internal checks}
-\label{sec:intchecks}
-
-All array arguments are checked against the correctness of their rank.
-If there is a mismatch, \fpy attempts to fix that by constructing an
-array with a correct rank from the given array argument (there will be
-no performance hit as no data is copied). The freedom to do so is
-given only if some dimensions are unbounded or their value is 1. An
-exception is raised when the sizes will not match.
-
-All bounded dimensions of an array are checked to be larger or equal
-to the dimensions specified in the signature.
-
-So, you don't need to give explicit \texttt{check} attributes to check
-these internal checks.
-
-
-\subsection{Call-back modules}
-\label{sec:cbmodule}
-
-A Fortran routine may have \texttt{external} arguments (call-back
-functions). The signatures of the call-back functions must be defined
-in a call-back \texttt{module} block (its name contains
-\texttt{\_\_user\_\_}), in general; other possibilities are described
-in the \texttt{external} attribute specification (see
-Sec.~\ref{sec:attributes}). For the signatures of call-back
-functions the following restrictions apply:
-\begin{itemize}
-\item Attributes \texttt{external}, \texttt{check(...)}, and
- initialization statements are ignored.
-\item Attribute \texttt{optional} is used only for changing the order
- of the arguments.
-\item For arrays all dimension bounds must be specified. They may be
- C-expressions containing variables from the argument list.
- Note that here CPP-macros \texttt{len}, \texttt{shape},
- \texttt{rank}, and \texttt{slen} are not available.
-\end{itemize}
-
-
-\subsection{Common blocks}
-\label{sec:commonblocks}
-
-All fields in a common block are mapped to arrays of appropriate sizes
-and types. Scalars are mapped to rank-0 arrays. For multi-dimensional
-fields the corresponding arrays are transposed. In the type
-declarations of the variables representing the common block fields,
-only \texttt{dimension(<arrayspec>)}, \texttt{intent(hide)}, and
-\texttt{note(<LaTeX text>)} attributes are used, others are ignored.
-
-\subsection{Including files}
-\label{sec:include}
-
-You can include files to the signature file using
-\begin{verbatim}
-include '<filename>'
-\end{verbatim}
-statement. It can be used in any part of the signature file.
-If the file \texttt{<filename>} does not exists or it is not in the path,
-the \texttt{include} line is ignored.
-
-\subsection{\fpy directives}
-\label{sec:directives}
-
-You can insert signature statements directly to Fortran source codes
-as comments. Anything that follows \texttt{<comment char>f2py} is
-regarded as normal statement for \fpy.
-
-%%% Local Variables:
-%%% mode: latex
-%%% TeX-master: "f2py2e"
-%%% End:
-
diff --git a/doc/f2py/simple.f b/doc/f2py/simple.f
deleted file mode 100644
index ba468a509..000000000
--- a/doc/f2py/simple.f
+++ /dev/null
@@ -1,13 +0,0 @@
-cFile: simple.f
- subroutine foo(a,m,n)
- integer m,n,i,j
- real a(m,n)
-cf2py intent(in,out) a
-cf2py intent(hide) m,n
- do i=1,m
- do j=1,n
- a(i,j) = a(i,j) + 10*i+j
- enddo
- enddo
- end
-cEOF
diff --git a/doc/f2py/simple_session.dat b/doc/f2py/simple_session.dat
deleted file mode 100644
index 10d9dc962..000000000
--- a/doc/f2py/simple_session.dat
+++ /dev/null
@@ -1,51 +0,0 @@
->>> import pytest
->>> import f2pytest
->>> import pyforttest
->>> print f2pytest.foo.__doc__
-foo - Function signature:
- a = foo(a)
-Required arguments:
- a : input rank-2 array('f') with bounds (m,n)
-Return objects:
- a : rank-2 array('f') with bounds (m,n)
-
->>> print pyforttest.foo.__doc__
-foo(a)
-
->>> pytest.foo([[1,2],[3,4]])
-array([[12, 14],
- [24, 26]])
->>> f2pytest.foo([[1,2],[3,4]]) # F2PY can handle arbitrary input sequences
-array([[ 12., 14.],
- [ 24., 26.]],'f')
->>> pyforttest.foo([[1,2],[3,4]])
-Traceback (most recent call last):
- File "<stdin>", line 1, in ?
-pyforttest.error: foo, argument A: Argument intent(inout) must be an array.
-
->>> import Numeric
->>> a=Numeric.array([[1,2],[3,4]],'f')
->>> f2pytest.foo(a)
-array([[ 12., 14.],
- [ 24., 26.]],'f')
->>> a # F2PY makes a copy when input array is not Fortran contiguous
-array([[ 1., 2.],
- [ 3., 4.]],'f')
->>> a=Numeric.transpose(Numeric.array([[1,3],[2,4]],'f'))
->>> a
-array([[ 1., 2.],
- [ 3., 4.]],'f')
->>> f2pytest.foo(a)
-array([[ 12., 14.],
- [ 24., 26.]],'f')
->>> a # F2PY passes Fortran contiguous input array directly to Fortran
-array([[ 12., 14.],
- [ 24., 26.]],'f')
-# See intent(copy), intent(overwrite), intent(inplace), intent(inout)
-# attributes documentation to enhance the above behavior.
-
->>> a=Numeric.array([[1,2],[3,4]],'f')
->>> pyforttest.foo(a)
->>> a # Huh? Pyfort 8.5 gives wrong results..
-array([[ 12., 23.],
- [ 15., 26.]],'f')
diff --git a/doc/f2py/using_F_compiler.txt b/doc/f2py/using_F_compiler.txt
deleted file mode 100644
index 63bb0d68c..000000000
--- a/doc/f2py/using_F_compiler.txt
+++ /dev/null
@@ -1,147 +0,0 @@
-
-Title: Wrapping F compiled Fortran 90 modules with F2PY
- ================================================
-
-Rationale: The F compiler does not support external procedures which
- makes it impossible to use it in F2PY in a normal way.
- This document describes a workaround to this problem so
- that F compiled codes can be still wrapped with F2PY.
-
-Author: Pearu Peterson
-Date: May 8, 2002
-
-Acknowledgement: Thanks to Siegfried Gonzi who hammered me to produce
- this document.
-
-Normally wrapping Fortran 90 modules to Python using F2PY is carried
-out with the following command
-
- f2py -c -m fun foo.f90
-
-where file foo.f90 contains, for example,
-
-module foo
- public :: bar
- contains
- subroutine bar (a)
- integer,intent(inout) :: a
- print *,"Hello from foo.bar"
- print *,"a=",a
- a = a + 5
- print *,"a=",a
- end subroutine bar
-end module foo
-
-Then with a supported F90 compiler (running `f2py -c --help-compiler'
-will display the found compilers) f2py will generate an extension
-module fun.so into the current directory and the Fortran module foo
-subroutine bar can be called from Python as follows
-
->>> import fun
->>> print fun.foo.bar.__doc__
-bar - Function signature:
- bar(a)
-Required arguments:
- a : in/output rank-0 array(int,'i')
-
->>> from Numeric import array
->>> a = array(3)
->>> fun.foo.bar(a)
- Hello from foo.bar
- a= 3
- a= 8
->>> a
-8
->>>
-
-This works nicely with all supported Fortran compilers.
-
-However, the F compiler (http://www.fortran.com/F/compilers.html) is
-an exception. Namely, the F compiler is designed to recognize only
-module procedures (and main programs, of course) but F2PY needs to
-compile also the so-called external procedures that it generates to
-facilitate accessing Fortran F90 module procedures from C and
-subsequently from Python. As a result, wrapping F compiled Fortran
-procedures to Python is _not_ possible using the simple procedure as
-described above. But, there is a workaround that I'll describe below
-in five steps.
-
-1) Compile foo.f90:
-
- F -c foo.f90
-
-This creates an object file foo.o into the current directory.
-
-2) Create the signature file:
-
- f2py foo.f90 -h foo.pyf
-
-This creates a file foo.pyf containing
-
-module foo ! in foo.f90
- real public :: bar
- subroutine bar(a) ! in foo.f90:foo
- integer intent(inout) :: a
- end subroutine bar
-end module foo
-
-3) Open the file foo.pyf with your favorite text editor and change the
- above signature to
-
-python module foo
- interface
- subroutine bar(a)
- fortranname foo_MP_bar
- intent(c) bar
- integer intent(in,out) :: a
- end subroutine bar
- end interface
-end python module foo
-
-The most important modifications are
-
- a) adding `python' keyword everywhere before the `module' keyword
-
- b) including an `interface' block around the all subroutine blocks.
-
- c) specifying the real symbol name of the subroutine using
- `fortranname' statement. F generated symbol names are in the form
- <module name>_MP_<subroutine name>
-
- d) specifying that subroutine is `intent(c)'.
-
-Notice that the `intent(inout)' attribute is changed to
-`intent(in,out)' that instructs the wrapper to return the modified
-value of `a'.
-
-4) Build the extension module
-
- f2py -c foo.pyf foo.o --fcompiler=Gnu /opt/F/lib/quickfit.o \
- /opt/F/lib/libf96.a
-
-This will create the extension module foo.so into the current
-directory. Notice that you must use Gnu compiler (gcc) for linking.
-And the paths to F specific object files and libraries may differ for
-your F installation.
-
-5) Finally, we can call the module subroutine `bar' from Python
-
->>> import foo
->>> print foo.bar.__doc__
-bar - Function signature:
- a = bar(a)
-Required arguments:
- a : input int
-Return objects:
- a : int
-
->>> foo.bar(3)
-8
->>>
-
-Notice that the F compiled module procedures are called as ordinary
-external procedures. Also I/O seems to be lacking for F compiled
-Fortran modules.
-
-Enjoy,
- Pearu
diff --git a/doc/f2py/win32_notes.txt b/doc/f2py/win32_notes.txt
deleted file mode 100644
index 40b7d549a..000000000
--- a/doc/f2py/win32_notes.txt
+++ /dev/null
@@ -1,84 +0,0 @@
-The following notes are from Eric Jones.
-
-My Setup:
-
-For Python/Fortran development, I run Windows 2000 and use the mingw32
-(www.mingw.org) set of gcc/g77 compilers and tools (gcc 2.95.2) to build python
-extensions. I'll also occasionally use MSVC for extension development, but
-rarely on projects that include Fortran code. This short HOWTO describes how
-I use f2py in the Windows environment. Pretty much everything is done from
-a CMD (DOS) prompt, so you'll need to be familiar with using shell commands.
-
-Installing f2py:
-
-Before installing f2py, you'll need to install python. I use python2.1 (maybe
-python2.2 will be out by the time you read this). Any version of Python beyond
-version 1.52 should be fine. See www.python.org for info on installing Python.
-
-You'll also need Numeric which is available at
-http://sourceforge.net/projects/numpy/. The latest version is 20.3.
-
-Since Pearu has moved to a setup.py script, installation is pretty easy. You
-can download f2py from http://cens.ioc.ee/projects/f2py2e/. The latest public
-release is http://cens.ioc.ee/projects/f2py2e/rel-3.x/f2py-3.latest.tgz. Even
-though this is a .tgz file instead of a .zip file, most standard compression
-utilities such as WinZip (www.winzip.com) handle unpacking .tgz files
-automatically. Here are the download steps:
-
- 1. Download the latest version of f2py and save it to disk.
-
- 2. Use WinZip or some other tool to open the "f2py.xxx.tgz" file.
- a. When WinZip says archive contains one file, "f2py.xxx.tar"
- and ask if it should open it, respond with "yes".
- b. Extract (use the extract button at the top) all the files
- in the archive into a file. I'll use c:\f2py2e
-
- 3. Open a cmd prompt by clicking start->run and typing "cmd.exe".
- Now type the following commands.
-
- C:\WINDOWS\SYSTEM32> cd c:\f2py2e
- C:\F2PY2E> python setup.py install
-
- This will install f2py in the c:\python21\f2py2e directory. It
- also copies a few scripts into the c:\python21\Scripts directory.
- That's all there is to installing f2py. Now let's set up the
- environment so that f2py is easy to use.
-
- 4. You need to set up a couple of environment variables. The path
- "c:\python21\Scripts" needs to be added to your path variables.
- To do this, go to the environment variables settings page. This is
- where it is on windows 2000:
-
- Desktop->(right click)My Computer->Properties->Advanced->
- Environment Variables
-
- a. Add "c:\python21\Scripts" to the end of the Path variable.
- b. If it isn't already there, add ".py" to the PATHEXT variable.
- This tells the OS to execute f2py.py even when just "f2py" is
- typed at a command prompt.
-
- 5. Well, there actually isn't anything to be done here. The Python
- installation should have taken care of associating .py files with
- Python for execution, so you shouldn't have to do anything to
- registry settings.
-
-To test your installation, open a new cmd prompt, and type the following:
-
- C:\WINDOWS\SYSTEM32> f2py
- Usage:
- f2py [<options>] <fortran files> [[[only:]||[skip:]] \
- <fortran functions> ] \
- [: <fortran files> ...]
- ...
-
-This prints out the usage information for f2py. If it doesn't, there is
-something wrong with the installation.
-
-Testing:
-The f2py test scripts are kinda Unix-centric, so they don't work under windows.
-
-XXX include test script XXX.
-
-Compiler and setup.py issues:
-
-XXX
diff --git a/doc/neps/ufunc-overrides.rst b/doc/neps/ufunc-overrides.rst
index 451b55a62..90869e1ac 100644
--- a/doc/neps/ufunc-overrides.rst
+++ b/doc/neps/ufunc-overrides.rst
@@ -166,7 +166,8 @@ Hence, the arguments are normalized: only the required input arguments
passed on as a dict of keyword arguments (``kwargs``). In particular, if
there are output arguments, positional are otherwise, that are not
:obj:`None`, they are passed on as a tuple in the ``out`` keyword
-argument.
+argument (even for the ``reduce``, ``accumulate``, and ``reduceat`` methods
+where in all current cases only a single output makes sense).
The function dispatch proceeds as follows:
@@ -664,13 +665,13 @@ Symbol Operator NumPy Ufunc(s)
``//`` ``floordiv`` :func:`floor_divide`
``%`` ``mod`` :func:`remainder`
NA ``divmod`` :func:`divmod`
-``**`` ``pow`` :func:`power`
+``**`` ``pow`` :func:`power` [10]_
``<<`` ``lshift`` :func:`left_shift`
``>>`` ``rshift`` :func:`right_shift`
``&`` ``and_`` :func:`bitwise_and`
``^`` ``xor_`` :func:`bitwise_xor`
``|`` ``or_`` :func:`bitwise_or`
-``@`` ``matmul`` Not yet implemented as a ufunc [10]_
+``@`` ``matmul`` Not yet implemented as a ufunc [11]_
====== ============ =========================================
And here is the list of unary operators:
@@ -679,16 +680,22 @@ And here is the list of unary operators:
Symbol Operator NumPy Ufunc(s)
====== ============ =========================================
``-`` ``neg`` :func:`negative`
-``+`` ``pos`` :func:`positive` [11]_
+``+`` ``pos`` :func:`positive` [12]_
NA ``abs`` :func:`absolute`
``~`` ``invert`` :func:`invert`
====== ============ =========================================
-.. [10] Because NumPy's :func:`matmul` is not a ufunc, it is
+.. [10] class :`ndarray` takes short cuts for ``__pow__`` for the
+ cases where the power equals ``1`` (:func:`positive`),
+ ``-1`` (:func:`reciprocal`), ``2`` (:func:`square`), ``0`` (an
+ otherwise private ``_ones_like`` ufunc), and ``0.5``
+ (:func:`sqrt`), and the array is float or complex (or integer
+ for square).
+.. [11] Because NumPy's :func:`matmul` is not a ufunc, it is
`currently not possible <https://github.com/numpy/numpy/issues/9028>`_
to override ``numpy_array @ other`` with ``other`` taking precedence
if ``other`` implements ``__array_func__``.
-.. [11] :class:`ndarray` currently does a copy instead of using this ufunc.
+.. [12] :class:`ndarray` currently does a copy instead of using this ufunc.
Future extensions to other functions
------------------------------------
diff --git a/doc/release/1.10.0-notes.rst b/doc/release/1.10.0-notes.rst
index 35e967f44..88062e463 100644
--- a/doc/release/1.10.0-notes.rst
+++ b/doc/release/1.10.0-notes.rst
@@ -1,5 +1,6 @@
+==========================
NumPy 1.10.0 Release Notes
-**************************
+==========================
This release supports Python 2.6 - 2.7 and 3.2 - 3.5.
@@ -59,7 +60,7 @@ Compatibility notes
===================
Default casting rule change
-~~~~~~~~~~~~~~~~~~~~~~~~~~~
+---------------------------
Default casting for inplace operations has changed to ``'same_kind'``. For
instance, if n is an array of integers, and f is an array of floats, then
``n += f`` will result in a ``TypeError``, whereas in previous Numpy
@@ -69,13 +70,13 @@ compatible way by rewriting it as ``np.add(n, f, out=n, casting='unsafe')``.
The old ``'unsafe'`` default has been deprecated since Numpy 1.7.
numpy version string
-~~~~~~~~~~~~~~~~~~~~
+--------------------
The numpy version string for development builds has been changed from
``x.y.z.dev-githash`` to ``x.y.z.dev0+githash`` (note the +) in order to comply
with PEP 440.
relaxed stride checking
-~~~~~~~~~~~~~~~~~~~~~~~
+-----------------------
NPY_RELAXED_STRIDE_CHECKING is now true by default.
UPDATE: In 1.10.2 the default value of NPY_RELAXED_STRIDE_CHECKING was
@@ -85,12 +86,12 @@ dimension changing views of f_contiguous not c_contiguous arrays was also
added.
Concatenation of 1d arrays along any but ``axis=0`` raises ``IndexError``
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+-------------------------------------------------------------------------
Using axis != 0 has raised a DeprecationWarning since NumPy 1.7, it now
raises an error.
*np.ravel*, *np.diagonal* and *np.diag* now preserve subtypes
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+-------------------------------------------------------------
There was inconsistent behavior between *x.ravel()* and *np.ravel(x)*, as
well as between *x.diagonal()* and *np.diagonal(x)*, with the methods
preserving subtypes while the functions did not. This has been fixed and
@@ -100,13 +101,13 @@ compatibility and still return 1-D arrays as before. If you need to
preserve the matrix subtype, use the methods instead of the functions.
*rollaxis* and *swapaxes* always return a view
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+----------------------------------------------
Previously, a view was returned except when no change was made in the order
of the axes, in which case the input array was returned. A view is now
returned in all cases.
*nonzero* now returns base ndarrays
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+-----------------------------------
Previously, an inconsistency existed between 1-D inputs (returning a
base ndarray) and higher dimensional ones (which preserved subclasses).
Behavior has been unified, and the return will now be a base ndarray.
@@ -114,7 +115,7 @@ Subclasses can still override this behavior by providing their own
*nonzero* method.
C API
-~~~~~
+-----
The changes to *swapaxes* also apply to the *PyArray_SwapAxes* C function,
which now returns a view in all cases.
@@ -128,7 +129,7 @@ The change to the concatenation function DeprecationWarning also affects
PyArray_ConcatenateArrays,
recarray field return types
-~~~~~~~~~~~~~~~~~~~~~~~~~~~
+---------------------------
Previously the returned types for recarray fields accessed by attribute and by
index were inconsistent, and fields of string type were returned as chararrays.
Now, fields accessed by either attribute or indexing will return an ndarray for
@@ -138,14 +139,14 @@ whitespace is trimmed from chararrays but kept in ndarrays of string type.
Also, the dtype.type of nested structured fields is now inherited.
recarray views
-~~~~~~~~~~~~~~
+--------------
Viewing an ndarray as a recarray now automatically converts the dtype to
np.record. See new record array documentation. Additionally, viewing a recarray
with a non-structured dtype no longer converts the result's type to ndarray -
the result will remain a recarray.
'out' keyword argument of ufuncs now accepts tuples of arrays
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+-------------------------------------------------------------
When using the 'out' keyword argument of a ufunc, a tuple of arrays, one per
ufunc output, can be provided. For ufuncs with a single output a single array
is also a valid 'out' keyword argument. Previously a single array could be
@@ -154,24 +155,24 @@ output for ufuncs with multiple outputs, is deprecated, and will result in a
`DeprecationWarning` now and an error in the future.
byte-array indices now raises an IndexError
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+-------------------------------------------
Indexing an ndarray using a byte-string in Python 3 now raises an IndexError
instead of a ValueError.
Masked arrays containing objects with arrays
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+--------------------------------------------
For such (rare) masked arrays, getting a single masked item no longer returns a
corrupted masked array, but a fully masked version of the item.
Median warns and returns nan when invalid values are encountered
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+----------------------------------------------------------------
Similar to mean, median and percentile now emits a Runtime warning and
returns `NaN` in slices where a `NaN` is present.
To compute the median or percentile while ignoring invalid values use the
new `nanmedian` or `nanpercentile` functions.
Functions available from numpy.ma.testutils have changed
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+--------------------------------------------------------
All functions from numpy.testing were once available from
numpy.ma.testutils but not all of them were redefined to work with masked
arrays. Most of those functions have now been removed from
@@ -184,7 +185,7 @@ New Features
============
Reading extra flags from site.cfg
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+---------------------------------
Previously customization of compilation of dependency libraries and numpy
itself was only accomblishable via code changes in the distutils package.
Now numpy.distutils reads in the following extra flags from each group of the
@@ -198,34 +199,34 @@ Now numpy.distutils reads in the following extra flags from each group of the
This should, at least partially, complete user customization.
*np.cbrt* to compute cube root for real floats
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+----------------------------------------------
*np.cbrt* wraps the C99 cube root function *cbrt*.
Compared to *np.power(x, 1./3.)* it is well defined for negative real floats
and a bit faster.
numpy.distutils now allows parallel compilation
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+-----------------------------------------------
By passing *--parallel=n* or *-j n* to *setup.py build* the compilation of
extensions is now performed in *n* parallel processes.
The parallelization is limited to files within one extension so projects using
Cython will not profit because it builds extensions from single files.
*genfromtxt* has a new ``max_rows`` argument
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+--------------------------------------------
A ``max_rows`` argument has been added to *genfromtxt* to limit the
number of rows read in a single call. Using this functionality, it is
possible to read in multiple arrays stored in a single file by making
repeated calls to the function.
New function *np.broadcast_to* for invoking array broadcasting
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+--------------------------------------------------------------
*np.broadcast_to* manually broadcasts an array to a given shape according to
numpy's broadcasting rules. The functionality is similar to broadcast_arrays,
which in fact has been rewritten to use broadcast_to internally, but only a
single array is necessary.
New context manager *clear_and_catch_warnings* for testing warnings
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+-------------------------------------------------------------------
When Python emits a warning, it records that this warning has been emitted in
the module that caused the warning, in a module attribute
``__warningregistry__``. Once this has happened, it is not possible to emit
@@ -237,7 +238,7 @@ you will not be able to emit the warning or test it. The context manager
and resets them on exit, meaning that warnings can be re-raised.
*cov* has new ``fweights`` and ``aweights`` arguments
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+-----------------------------------------------------
The ``fweights`` and ``aweights`` arguments add new functionality to
covariance calculations by applying two types of weighting to observation
vectors. An array of ``fweights`` indicates the number of repeats of each
@@ -245,7 +246,7 @@ observation vector, and an array of ``aweights`` provides their relative
importance or probability.
Support for the '@' operator in Python 3.5+
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+-------------------------------------------
Python 3.5 adds support for a matrix multiplication operator '@' proposed
in PEP465. Preliminary support for that has been implemented, and an
equivalent function ``matmul`` has also been added for testing purposes and
@@ -253,7 +254,7 @@ use in earlier Python versions. The function is preliminary and the order
and number of its optional arguments can be expected to change.
New argument ``norm`` to fft functions
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+--------------------------------------
The default normalization has the direct transforms unscaled and the inverse
transforms are scaled by :math:`1/n`. It is possible to obtain unitary
transforms by setting the keyword argument ``norm`` to ``"ortho"`` (default is
@@ -265,21 +266,21 @@ Improvements
============
*np.digitize* using binary search
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+---------------------------------
*np.digitize* is now implemented in terms of *np.searchsorted*. This means
that a binary search is used to bin the values, which scales much better
for larger number of bins than the previous linear search. It also removes
the requirement for the input array to be 1-dimensional.
*np.poly* now casts integer inputs to float
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+-------------------------------------------
*np.poly* will now cast 1-dimensional input arrays of integer type to double
precision floating point, to prevent integer overflow when computing the monic
polynomial. It is still possible to obtain higher precision results by
passing in an array of object type, filled e.g. with Python ints.
*np.interp* can now be used with periodic functions
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+---------------------------------------------------
*np.interp* now has a new parameter *period* that supplies the period of the
input data *xp*. In such case, the input data is properly normalized to the
given period and one end point is added to each extremity of *xp* in order to
@@ -287,19 +288,19 @@ close the previous and the next period cycles, resulting in the correct
interpolation behavior.
*np.pad* supports more input types for ``pad_width`` and ``constant_values``
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+----------------------------------------------------------------------------
``constant_values`` parameters now accepts NumPy arrays and float values.
NumPy arrays are supported as input for ``pad_width``, and an exception is
raised if its values are not of integral type.
*np.argmax* and *np.argmin* now support an ``out`` argument
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+-----------------------------------------------------------
The ``out`` parameter was added to *np.argmax* and *np.argmin* for consistency
with *ndarray.argmax* and *ndarray.argmin*. The new parameter behaves exactly
as it does in those methods.
More system C99 complex functions detected and used
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+---------------------------------------------------
All of the functions ``in complex.h`` are now detected. There are new
fallback implementations of the following functions.
@@ -312,31 +313,31 @@ As a result of these improvements, there will be some small changes in
returned values, especially for corner cases.
*np.loadtxt* support for the strings produced by the ``float.hex`` method
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+-------------------------------------------------------------------------
The strings produced by ``float.hex`` look like ``0x1.921fb54442d18p+1``,
so this is not the hex used to represent unsigned integer types.
*np.isclose* properly handles minimal values of integer dtypes
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+--------------------------------------------------------------
In order to properly handle minimal values of integer types, *np.isclose* will
now cast to the float dtype during comparisons. This aligns its behavior with
what was provided by *np.allclose*.
*np.allclose* uses *np.isclose* internally.
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+-------------------------------------------
*np.allclose* now uses *np.isclose* internally and inherits the ability to
compare NaNs as equal by setting ``equal_nan=True``. Subclasses, such as
*np.ma.MaskedArray*, are also preserved now.
*np.genfromtxt* now handles large integers correctly
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+----------------------------------------------------
*np.genfromtxt* now correctly handles integers larger than ``2**31-1`` on
32-bit systems and larger than ``2**63-1`` on 64-bit systems (it previously
crashed with an ``OverflowError`` in these cases). Integers larger than
``2**63-1`` are converted to floating-point values.
*np.load*, *np.save* have pickle backward compatibility flags
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+-------------------------------------------------------------
The functions *np.load* and *np.save* have additional keyword
arguments for controlling backward compatibility of pickled Python
@@ -344,7 +345,7 @@ objects. This enables Numpy on Python 3 to load npy files containing
object arrays that were generated on Python 2.
MaskedArray support for more complicated base classes
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+-----------------------------------------------------
Built-in assumptions that the baseclass behaved like a plain array are being
removed. In particular, setting and getting elements and ranges will respect
baseclass overrides of ``__setitem__`` and ``__getitem__``, and arithmetic
@@ -354,13 +355,13 @@ Changes
=======
dotblas functionality moved to multiarray
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+-----------------------------------------
The cblas versions of dot, inner, and vdot have been integrated into
the multiarray module. In particular, vdot is now a multiarray function,
which it was not before.
stricter check of gufunc signature compliance
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+---------------------------------------------
Inputs to generalized universal functions are now more strictly checked
against the function's signature: all core dimensions are now required to
be present in input arrays; core dimensions with the same label must have
@@ -368,12 +369,12 @@ the exact same size; and output core dimension's must be specified, either
by a same label input core dimension or by a passed-in output array.
views returned from *np.einsum* are writeable
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+---------------------------------------------
Views returned by *np.einsum* will now be writeable whenever the input
array is writeable.
*np.argmin* skips NaT values
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+----------------------------
*np.argmin* now skips NaT values in datetime64 and timedelta64 arrays,
making it consistent with *np.min*, *np.argmax* and *np.max*.
@@ -383,7 +384,7 @@ Deprecations
============
Array comparisons involving strings or structured dtypes
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+--------------------------------------------------------
Normally, comparison operations on arrays perform elementwise
comparisons and return arrays of booleans. But in some corner cases,
@@ -418,21 +419,21 @@ comparison operations, e.g.::
# -> [False, False]
SafeEval
-~~~~~~~~
+--------
The SafeEval class in numpy/lib/utils.py is deprecated and will be removed
in the next release.
alterdot, restoredot
-~~~~~~~~~~~~~~~~~~~~
+--------------------
The alterdot and restoredot functions no longer do anything, and are
deprecated.
pkgload, PackageLoader
-~~~~~~~~~~~~~~~~~~~~~~
+----------------------
These ways of loading packages are now deprecated.
bias, ddof arguments to corrcoef
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+--------------------------------
The values for the ``bias`` and ``ddof`` arguments to the ``corrcoef``
function canceled in the division implied by the correlation coefficient and
@@ -447,7 +448,7 @@ as its position will change with the removal of ``bias``. ``allow_masked``
will in due course become a keyword-only argument.
dtype string representation changes
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+-----------------------------------
Since 1.6, creating a dtype object from its string representation, e.g.
``'f4'``, would issue a deprecation warning if the size did not correspond
to an existing type, and default to creating a dtype of the default size
diff --git a/doc/release/1.10.1-notes.rst b/doc/release/1.10.1-notes.rst
index 9096f6c15..4e541d279 100644
--- a/doc/release/1.10.1-notes.rst
+++ b/doc/release/1.10.1-notes.rst
@@ -1,5 +1,6 @@
+==========================
NumPy 1.10.1 Release Notes
-**************************
+==========================
This release deals with a few build problems that showed up in 1.10.0. Most
users would not have seen these problems. The differences are:
diff --git a/doc/release/1.10.2-notes.rst b/doc/release/1.10.2-notes.rst
index 02e756474..8c26b463c 100644
--- a/doc/release/1.10.2-notes.rst
+++ b/doc/release/1.10.2-notes.rst
@@ -1,5 +1,6 @@
+==========================
NumPy 1.10.2 Release Notes
-**************************
+==========================
This release deals with a number of bugs that turned up in 1.10.1 and
adds various build and release improvements.
@@ -11,20 +12,20 @@ Compatibility notes
===================
Relaxed stride checking is no longer the default
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+------------------------------------------------
There were back compatibility problems involving views changing the dtype of
multidimensional Fortran arrays that need to be dealt with over a longer
timeframe.
Fix swig bug in ``numpy.i``
-~~~~~~~~~~~~~~~~~~~~~~~~~~~
+---------------------------
Relaxed stride checking revealed a bug in ``array_is_fortran(a)``, that was
using PyArray_ISFORTRAN to check for Fortran contiguity instead of
PyArray_IS_F_CONTIGUOUS. You may want to regenerate swigged files using the
updated numpy.i
Deprecate views changing dimensions in fortran order
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+----------------------------------------------------
This deprecates assignment of a new descriptor to the dtype attribute of
a non-C-contiguous array if it result in changing the shape. This
effectively bars viewing a multidimensional Fortran array using a dtype
diff --git a/doc/release/1.10.3-notes.rst b/doc/release/1.10.3-notes.rst
index 036827274..0d4df4ce6 100644
--- a/doc/release/1.10.3-notes.rst
+++ b/doc/release/1.10.3-notes.rst
@@ -1,4 +1,5 @@
+==========================
NumPy 1.10.3 Release Notes
-**************************
+==========================
N/A this release did not happen due to various screwups involving PyPi.
diff --git a/doc/release/1.10.4-notes.rst b/doc/release/1.10.4-notes.rst
index 7de732a22..481928ca7 100644
--- a/doc/release/1.10.4-notes.rst
+++ b/doc/release/1.10.4-notes.rst
@@ -1,5 +1,6 @@
+==========================
NumPy 1.10.4 Release Notes
-**************************
+==========================
This release is a bugfix source release motivated by a segfault regression.
No windows binaries are provided for this release, as there appear to be
diff --git a/doc/release/1.11.0-notes.rst b/doc/release/1.11.0-notes.rst
index 02222a5ab..2c1870c8f 100644
--- a/doc/release/1.11.0-notes.rst
+++ b/doc/release/1.11.0-notes.rst
@@ -1,5 +1,6 @@
+==========================
NumPy 1.11.0 Release Notes
-**************************
+==========================
This release supports Python 2.6 - 2.7 and 3.2 - 3.5 and contains a number
of enhancements and improvements. Note also the build system changes listed
@@ -78,7 +79,7 @@ Compatibility notes
===================
datetime64 changes
-~~~~~~~~~~~~~~~~~~
+------------------
In prior versions of NumPy the experimental datetime64 type always stored
times in UTC. By default, creating a datetime64 object from a string or
printing it would convert from or to local time::
@@ -112,24 +113,24 @@ with date units and datetimes with time units. With timezone naive datetimes,
the rule for casting from dates to times is no longer ambiguous.
``linalg.norm`` return type changes
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+-----------------------------------
The return type of the ``linalg.norm`` function is now floating point without
exception. Some of the norm types previously returned integers.
polynomial fit changes
-~~~~~~~~~~~~~~~~~~~~~~
+----------------------
The various fit functions in the numpy polynomial package no longer accept
non-integers for degree specification.
*np.dot* now raises ``TypeError`` instead of ``ValueError``
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+-----------------------------------------------------------
This behaviour mimics that of other functions such as ``np.inner``. If the two
arguments cannot be cast to a common type, it could have raised a ``TypeError``
or ``ValueError`` depending on their order. Now, ``np.dot`` will now always
raise a ``TypeError``.
FutureWarning to changed behavior
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+---------------------------------
* In ``np.lib.split`` an empty array in the result always had dimension
``(0,)`` no matter the dimensions of the array being split. This
@@ -139,7 +140,7 @@ FutureWarning to changed behavior
already preserved.
``%`` and ``//`` operators
-~~~~~~~~~~~~~~~~~~~~~~~~~~
+--------------------------
These operators are implemented with the ``remainder`` and ``floor_divide``
functions respectively. Those functions are now based around ``fmod`` and are
computed together so as to be compatible with each other and with the Python
@@ -152,7 +153,7 @@ is always returned for both functions when the divisor is zero,
``divmod(-1.0, inf)`` returns ``(-1.0, inf)``.
C API
-~~~~~
+-----
Removed the ``check_return`` and ``inner_loop_selector`` members of
the ``PyUFuncObject`` struct (replacing them with ``reserved`` slots
@@ -162,7 +163,7 @@ mention it here for completeness.
object dtype detection for old-style classes
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+--------------------------------------------
In python 2, objects which are instances of old-style user-defined classes no
longer automatically count as 'object' type in the dtype-detection handler.
@@ -232,17 +233,17 @@ Improvements
============
``np.gradient`` now supports an ``axis`` argument
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+-------------------------------------------------
The ``axis`` parameter was added to ``np.gradient`` for consistency. It
allows to specify over which axes the gradient is calculated.
``np.lexsort`` now supports arrays with object data-type
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+--------------------------------------------------------
The function now internally calls the generic ``npy_amergesort`` when the
type does not implement a merge-sort kind of ``argsort`` method.
``np.ma.core.MaskedArray`` now supports an ``order`` argument
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+-------------------------------------------------------------
When constructing a new ``MaskedArray`` instance, it can be configured with
an ``order`` argument analogous to the one when calling ``np.ndarray``. The
addition of this argument allows for the proper processing of an ``order``
@@ -250,19 +251,19 @@ argument in several MaskedArray-related utility functions such as
``np.ma.core.array`` and ``np.ma.core.asarray``.
Memory and speed improvements for masked arrays
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+-----------------------------------------------
Creating a masked array with ``mask=True`` (resp. ``mask=False``) now uses
``np.ones`` (resp. ``np.zeros``) to create the mask, which is faster and
avoid a big memory peak. Another optimization was done to avoid a memory
peak and useless computations when printing a masked array.
``ndarray.tofile`` now uses fallocate on linux
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+----------------------------------------------
The function now uses the fallocate system call to reserve sufficient
disk space on file systems that support it.
Optimizations for operations of the form ``A.T @ A`` and ``A @ A.T``
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+--------------------------------------------------------------------
Previously, ``gemm`` BLAS operations were used for all matrix products. Now,
if the matrix product is between a matrix and its transpose, it will use
``syrk`` BLAS operations for a performance boost. This optimization has been
@@ -271,11 +272,11 @@ extended to ``@``, ``numpy.dot``, ``numpy.inner``, and ``numpy.matmul``.
**Note:** Requires the transposed and non-transposed matrices to share data.
``np.testing.assert_warns`` can now be used as a context manager
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+----------------------------------------------------------------
This matches the behavior of ``assert_raises``.
Speed improvement for np.random.shuffle
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+---------------------------------------
``np.random.shuffle`` is now much faster for 1d ndarrays.
@@ -283,14 +284,14 @@ Changes
=======
Pyrex support was removed from ``numpy.distutils``
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+--------------------------------------------------
The method ``build_src.generate_a_pyrex_source`` will remain available; it
has been monkeypatched by users to support Cython instead of Pyrex. It's
recommended to switch to a better supported method of build Cython
extensions though.
``np.broadcast`` can now be called with a single argument
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+---------------------------------------------------------
The resulting object in that case will simply mimic iteration over
a single array. This change obsoletes distinctions like
@@ -302,20 +303,20 @@ a single array. This change obsoletes distinctions like
Instead, ``np.broadcast`` can be used in all cases.
``np.trace`` now respects array subclasses
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+------------------------------------------
This behaviour mimics that of other functions such as ``np.diagonal`` and
ensures, e.g., that for masked arrays ``np.trace(ma)`` and ``ma.trace()`` give
the same result.
``np.dot`` now raises ``TypeError`` instead of ``ValueError``
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+-------------------------------------------------------------
This behaviour mimics that of other functions such as ``np.inner``. If the two
arguments cannot be cast to a common type, it could have raised a ``TypeError``
or ``ValueError`` depending on their order. Now, ``np.dot`` will now always
raise a ``TypeError``.
``linalg.norm`` return type changes
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+-----------------------------------
The ``linalg.norm`` function now does all its computations in floating point
and returns floating results. This change fixes bugs due to integer overflow
and the failure of abs with signed integers of minimum value, e.g., int8(-128).
@@ -326,7 +327,7 @@ Deprecations
============
Views of arrays in Fortran order
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+--------------------------------
The F_CONTIGUOUS flag was used to signal that views using a dtype that
changed the element size would change the first index. This was always
problematical for arrays that were both F_CONTIGUOUS and C_CONTIGUOUS
@@ -340,7 +341,7 @@ added to the view method to explicitly ask for Fortran order views, but
that will not be backward compatible.
Invalid arguments for array ordering
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+------------------------------------
It is currently possible to pass in arguments for the ``order``
parameter in methods like ``array.flatten`` or ``array.ravel``
that were not one of the following: 'C', 'F', 'A', 'K' (note that
@@ -348,14 +349,14 @@ all of these possible values are both unicode and case insensitive).
Such behavior will not be allowed in future releases.
Random number generator in the ``testing`` namespace
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+----------------------------------------------------
The Python standard library random number generator was previously exposed
in the ``testing`` namespace as ``testing.rand``. Using this generator is
not recommended and it will be removed in a future release. Use generators
from ``numpy.random`` namespace instead.
Random integer generation on a closed interval
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+----------------------------------------------
In accordance with the Python C API, which gives preference to the half-open
interval over the closed one, ``np.random.random_integers`` is being
deprecated in favor of calling ``np.random.randint``, which has been
@@ -367,7 +368,7 @@ FutureWarnings
==============
Assigning to slices/views of ``MaskedArray``
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+--------------------------------------------
Currently a slice of a masked array contains a view of the original data and a
copy-on-write view of the mask. Consequently, any changes to the slice's mask
will result in a copy of the original mask being made and that new mask being
diff --git a/doc/release/1.11.1-notes.rst b/doc/release/1.11.1-notes.rst
index 37a6e300b..6303c32f0 100644
--- a/doc/release/1.11.1-notes.rst
+++ b/doc/release/1.11.1-notes.rst
@@ -1,5 +1,6 @@
+==========================
NumPy 1.11.1 Release Notes
-**************************
+==========================
Numpy 1.11.1 supports Python 2.6 - 2.7 and 3.2 - 3.5. It fixes bugs and
regressions found in Numpy 1.11.0 and includes several build related
diff --git a/doc/release/1.11.2-notes.rst b/doc/release/1.11.2-notes.rst
index f57afb778..c954089d5 100644
--- a/doc/release/1.11.2-notes.rst
+++ b/doc/release/1.11.2-notes.rst
@@ -1,5 +1,6 @@
+==========================
NumPy 1.11.2 Release Notes
-**************************
+==========================
Numpy 1.11.2 supports Python 2.6 - 2.7 and 3.2 - 3.5. It fixes bugs and
regressions found in Numpy 1.11.1 and includes several build related
diff --git a/doc/release/1.12.0-notes.rst b/doc/release/1.12.0-notes.rst
index 58a570b54..229593ed9 100644
--- a/doc/release/1.12.0-notes.rst
+++ b/doc/release/1.12.0-notes.rst
@@ -182,7 +182,7 @@ The following functions are changed: ``sum``, ``product``,
``bitwise_and`` identity changed
--------------------------------
-The previous identity was 1, it is now -1. See entry in `Improvements`_ for
+The previous identity was 1, it is now -1. See entry in Improvements for
more explanation.
ma.median warns and returns nan when unmasked invalid values are encountered
@@ -493,574 +493,3 @@ This causes warnings with the "default" or "module" filter to be shown once
for every offending user code line or user module instead of only once. On
python versions before 3.4, this can cause warnings to appear that were falsely
ignored before, which may be surprising especially in test suits.
-
-
-Contributors
-============
-
-A total of 139 people contributed to this release. People with a "+" by their
-names contributed a patch for the first time.
-
-* Aditya Panchal +
-* Ales Erjavec +
-* Alex Griffing
-* Alexandr Shadchin +
-* Alistair Muldal
-* Allan Haldane
-* Amit Aronovitch +
-* Andrei Kucharavy +
-* Antony Lee
-* Antti Kaihola +
-* Arne de Laat +
-* Auke Wiggers +
-* AustereCuriosity +
-* Badhri Narayanan Krishnakumar +
-* Ben North +
-* Ben Rowland +
-* Bertrand Lefebvre
-* Boxiang Sun
-* CJ Carey
-* Charles Harris
-* Christoph Gohlke
-* Daniel Ching +
-* Daniel Rasmussen +
-* Daniel Smith +
-* David Schaich +
-* Denis Alevi +
-* Devin Jeanpierre +
-* Dmitry Odzerikho
-* Dongjoon Hyun +
-* Edward Richards +
-* Ekaterina Tuzova +
-* Emilien Kofman +
-* Endolith
-* Eren Sezener +
-* Eric Moore
-* Eric Quintero +
-* Eric Wieser +
-* Erik M. Bray
-* Frederic Bastien
-* Friedrich Dunne +
-* Gerrit Holl
-* Golnaz Irannejad +
-* Graham Markall +
-* Greg Knoll +
-* Greg Young
-* Gustavo Serra Scalet +
-* Ines Wichert +
-* Irvin Probst +
-* Jaime Fernandez
-* James Sanders +
-* Jan David Mol +
-* Jan Schlüter
-* Jeremy Tuloup +
-* John Kirkham
-* John Zwinck +
-* Jonathan Helmus
-* Joseph Fox-Rabinovitz
-* Josh Wilson +
-* Joshua Warner +
-* Julian Taylor
-* Ka Wo Chen +
-* Kamil Rytarowski +
-* Kelsey Jordahl +
-* Kevin Deldycke +
-* Khaled Ben Abdallah Okuda +
-* Lion Krischer +
-* Loïc Estève +
-* Luca Mussi +
-* Mads Ohm Larsen +
-* Manoj Kumar +
-* Mario Emmenlauer +
-* Marshall Bockrath-Vandegrift +
-* Marshall Ward +
-* Marten van Kerkwijk
-* Mathieu Lamarre +
-* Matthew Brett
-* Matthew Harrigan +
-* Matthias Geier
-* Matti Picus +
-* Meet Udeshi +
-* Michael Felt +
-* Michael Goerz +
-* Michael Martin +
-* Michael Seifert +
-* Mike Nolta +
-* Nathaniel Beaver +
-* Nathaniel J. Smith
-* Naveen Arunachalam +
-* Nick Papior
-* Nikola Forró +
-* Oleksandr Pavlyk +
-* Olivier Grisel
-* Oren Amsalem +
-* Pauli Virtanen
-* Pavel Potocek +
-* Pedro Lacerda +
-* Peter Creasey +
-* Phil Elson +
-* Philip Gura +
-* Phillip J. Wolfram +
-* Pierre de Buyl +
-* Raghav RV +
-* Ralf Gommers
-* Ray Donnelly +
-* Rehas Sachdeva
-* Rob Malouf +
-* Robert Kern
-* Samuel St-Jean
-* Sanchez Gonzalez Alvaro +
-* Saurabh Mehta +
-* Scott Sanderson +
-* Sebastian Berg
-* Shayan Pooya +
-* Shota Kawabuchi +
-* Simon Conseil
-* Simon Gibbons
-* Sorin Sbarnea +
-* Stefan van der Walt
-* Stephan Hoyer
-* Steven J Kern +
-* Stuart Archibald
-* Tadeu Manoel +
-* Takuya Akiba +
-* Thomas A Caswell
-* Tom Bird +
-* Tony Kelman +
-* Toshihiro Kamishima +
-* Valentin Valls +
-* Varun Nayyar
-* Victor Stinner +
-* Warren Weckesser
-* Wendell Smith
-* Wojtek Ruszczewski +
-* Xavier Abellan Ecija +
-* Yaroslav Halchenko
-* Yash Shah +
-* Yinon Ehrlich +
-* Yu Feng +
-* nevimov +
-
-Pull requests merged
-====================
-
-A total of 418 pull requests were merged for this release.
-
-* `#4073 <https://github.com/numpy/numpy/pull/4073>`__: BUG: change real output checking to test if all imaginary parts...
-* `#4619 <https://github.com/numpy/numpy/pull/4619>`__: BUG : np.sum silently drops keepdims for sub-classes of ndarray
-* `#5488 <https://github.com/numpy/numpy/pull/5488>`__: ENH: add `contract`: optimizing numpy's einsum expression
-* `#5706 <https://github.com/numpy/numpy/pull/5706>`__: ENH: make some masked array methods behave more like ndarray...
-* `#5822 <https://github.com/numpy/numpy/pull/5822>`__: Allow many distributions to have a scale of 0.
-* `#6054 <https://github.com/numpy/numpy/pull/6054>`__: WIP: MAINT: Add deprecation warning to views of multi-field indexes
-* `#6298 <https://github.com/numpy/numpy/pull/6298>`__: Check lower base limit in base_repr.
-* `#6430 <https://github.com/numpy/numpy/pull/6430>`__: Fix issues with zero-width string fields
-* `#6656 <https://github.com/numpy/numpy/pull/6656>`__: ENH: usecols now accepts an int when only one column has to be...
-* `#6660 <https://github.com/numpy/numpy/pull/6660>`__: Added pathlib support for several functions
-* `#6872 <https://github.com/numpy/numpy/pull/6872>`__: ENH: linear interpolation of complex values in lib.interp
-* `#6997 <https://github.com/numpy/numpy/pull/6997>`__: MAINT: Simplify mtrand.pyx helpers
-* `#7003 <https://github.com/numpy/numpy/pull/7003>`__: BUG: Fix string copying for np.place
-* `#7026 <https://github.com/numpy/numpy/pull/7026>`__: DOC: Clarify behavior in np.random.uniform
-* `#7055 <https://github.com/numpy/numpy/pull/7055>`__: BUG: One Element Array Inputs Return Scalars in np.random
-* `#7063 <https://github.com/numpy/numpy/pull/7063>`__: REL: Update master branch after 1.11.x branch has been made.
-* `#7073 <https://github.com/numpy/numpy/pull/7073>`__: DOC: Update the 1.11.0 release notes.
-* `#7076 <https://github.com/numpy/numpy/pull/7076>`__: MAINT: Update the git .mailmap file.
-* `#7082 <https://github.com/numpy/numpy/pull/7082>`__: TST, DOC: Added Broadcasting Tests in test_random.py
-* `#7087 <https://github.com/numpy/numpy/pull/7087>`__: BLD: fix compilation on non glibc-Linuxes
-* `#7088 <https://github.com/numpy/numpy/pull/7088>`__: BUG: Have `norm` cast non-floating point arrays to 64-bit float...
-* `#7090 <https://github.com/numpy/numpy/pull/7090>`__: ENH: Added 'doane' and 'sqrt' estimators to np.histogram in numpy.function_base
-* `#7091 <https://github.com/numpy/numpy/pull/7091>`__: Revert "BLD: fix compilation on non glibc-Linuxes"
-* `#7092 <https://github.com/numpy/numpy/pull/7092>`__: BLD: fix compilation on non glibc-Linuxes
-* `#7099 <https://github.com/numpy/numpy/pull/7099>`__: TST: Suppressed warnings
-* `#7102 <https://github.com/numpy/numpy/pull/7102>`__: MAINT: Removed conditionals that are always false in datetime_strings.c
-* `#7105 <https://github.com/numpy/numpy/pull/7105>`__: DEP: Deprecate as_strided returning a writable array as default
-* `#7109 <https://github.com/numpy/numpy/pull/7109>`__: DOC: update Python versions requirements in the install docs
-* `#7114 <https://github.com/numpy/numpy/pull/7114>`__: MAINT: Fix typos in docs
-* `#7116 <https://github.com/numpy/numpy/pull/7116>`__: TST: Fixed f2py test for win32 virtualenv
-* `#7118 <https://github.com/numpy/numpy/pull/7118>`__: TST: Fixed f2py test for non-versioned python executables
-* `#7119 <https://github.com/numpy/numpy/pull/7119>`__: BUG: Fixed mingw.lib error
-* `#7125 <https://github.com/numpy/numpy/pull/7125>`__: DOC: Updated documentation wording and examples for np.percentile.
-* `#7129 <https://github.com/numpy/numpy/pull/7129>`__: BUG: Fixed 'midpoint' interpolation of np.percentile in odd cases.
-* `#7131 <https://github.com/numpy/numpy/pull/7131>`__: Fix setuptools sdist
-* `#7133 <https://github.com/numpy/numpy/pull/7133>`__: ENH: savez: temporary file alongside with target file and improve...
-* `#7134 <https://github.com/numpy/numpy/pull/7134>`__: MAINT: Fix some typos in a code string and comments
-* `#7141 <https://github.com/numpy/numpy/pull/7141>`__: BUG: Unpickled void scalars should be contiguous
-* `#7144 <https://github.com/numpy/numpy/pull/7144>`__: MAINT: Change `call_fortran` into `callfortran` in comments.
-* `#7145 <https://github.com/numpy/numpy/pull/7145>`__: BUG: Fixed regressions in np.piecewise in ref to #5737 and #5729.
-* `#7147 <https://github.com/numpy/numpy/pull/7147>`__: Temporarily disable __numpy_ufunc__
-* `#7148 <https://github.com/numpy/numpy/pull/7148>`__: ENH,TST: Bump stacklevel and add tests for warnings
-* `#7149 <https://github.com/numpy/numpy/pull/7149>`__: TST: Add missing suffix to temppath manager
-* `#7152 <https://github.com/numpy/numpy/pull/7152>`__: BUG: mode kwargs passed as unicode to np.pad raises an exception
-* `#7156 <https://github.com/numpy/numpy/pull/7156>`__: BUG: Reascertain that linspace respects ndarray subclasses in...
-* `#7167 <https://github.com/numpy/numpy/pull/7167>`__: DOC: Update Wikipedia references for mtrand.pyx
-* `#7171 <https://github.com/numpy/numpy/pull/7171>`__: TST: Fixed f2py test for Anaconda non-win32
-* `#7174 <https://github.com/numpy/numpy/pull/7174>`__: DOC: Fix broken pandas link in release notes
-* `#7177 <https://github.com/numpy/numpy/pull/7177>`__: ENH: added axis param for np.count_nonzero
-* `#7178 <https://github.com/numpy/numpy/pull/7178>`__: BUG: Fix binary_repr for negative numbers
-* `#7180 <https://github.com/numpy/numpy/pull/7180>`__: BUG: Fixed previous attempt to fix dimension mismatch in nanpercentile
-* `#7181 <https://github.com/numpy/numpy/pull/7181>`__: DOC: Updated minor typos in function_base.py and test_function_base.py
-* `#7191 <https://github.com/numpy/numpy/pull/7191>`__: DOC: add vstack, hstack, dstack reference to stack documentation.
-* `#7193 <https://github.com/numpy/numpy/pull/7193>`__: MAINT: Removed supurious assert in histogram estimators
-* `#7194 <https://github.com/numpy/numpy/pull/7194>`__: BUG: Raise a quieter `MaskedArrayFutureWarning` for mask changes.
-* `#7195 <https://github.com/numpy/numpy/pull/7195>`__: STY: Drop some trailing spaces in `numpy.ma.core`.
-* `#7196 <https://github.com/numpy/numpy/pull/7196>`__: Revert "DOC: add vstack, hstack, dstack reference to stack documentation."
-* `#7197 <https://github.com/numpy/numpy/pull/7197>`__: TST: Pin virtualenv used on Travis CI.
-* `#7198 <https://github.com/numpy/numpy/pull/7198>`__: ENH: Unlock the GIL for gufuncs
-* `#7199 <https://github.com/numpy/numpy/pull/7199>`__: MAINT: Cleanup for histogram bin estimator selection
-* `#7201 <https://github.com/numpy/numpy/pull/7201>`__: Raise IOError on not a file in python2
-* `#7202 <https://github.com/numpy/numpy/pull/7202>`__: MAINT: Made `iterable` return a boolean
-* `#7209 <https://github.com/numpy/numpy/pull/7209>`__: TST: Bump `virtualenv` to 14.0.6
-* `#7211 <https://github.com/numpy/numpy/pull/7211>`__: DOC: Fix fmin examples
-* `#7215 <https://github.com/numpy/numpy/pull/7215>`__: MAINT: Use PySlice_GetIndicesEx instead of custom reimplementation
-* `#7229 <https://github.com/numpy/numpy/pull/7229>`__: ENH: implement __complex__
-* `#7231 <https://github.com/numpy/numpy/pull/7231>`__: MRG: allow distributors to run custom init
-* `#7232 <https://github.com/numpy/numpy/pull/7232>`__: BLD: Switch order of test for lapack_mkl and openblas_lapack
-* `#7239 <https://github.com/numpy/numpy/pull/7239>`__: DOC: Removed residual merge markup from previous commit
-* `#7240 <https://github.com/numpy/numpy/pull/7240>`__: Change 'pubic' to 'public'.
-* `#7241 <https://github.com/numpy/numpy/pull/7241>`__: MAINT: update doc/sphinxext to numpydoc 0.6.0, and fix up some...
-* `#7243 <https://github.com/numpy/numpy/pull/7243>`__: ENH: Adding support to the range keyword for estimation of the...
-* `#7246 <https://github.com/numpy/numpy/pull/7246>`__: DOC: metion writeable keyword in as_strided in release notes
-* `#7247 <https://github.com/numpy/numpy/pull/7247>`__: TST: Fail quickly on AppVeyor for superseded PR builds
-* `#7248 <https://github.com/numpy/numpy/pull/7248>`__: DOC: remove link to documentation wiki editor from HOWTO_DOCUMENT.
-* `#7250 <https://github.com/numpy/numpy/pull/7250>`__: DOC,REL: Update 1.11.0 notes.
-* `#7251 <https://github.com/numpy/numpy/pull/7251>`__: BUG: only benchmark complex256 if it exists
-* `#7252 <https://github.com/numpy/numpy/pull/7252>`__: Forward port a fix and enhancement from 1.11.x
-* `#7253 <https://github.com/numpy/numpy/pull/7253>`__: DOC: note in h/v/dstack points users to stack/concatenate
-* `#7254 <https://github.com/numpy/numpy/pull/7254>`__: BUG: Enforce dtype for randint singletons
-* `#7256 <https://github.com/numpy/numpy/pull/7256>`__: MAINT: Use `is None` or `is not None` instead of `== None` or...
-* `#7257 <https://github.com/numpy/numpy/pull/7257>`__: DOC: Fix mismatched variable names in docstrings.
-* `#7258 <https://github.com/numpy/numpy/pull/7258>`__: ENH: Make numpy floor_divide and remainder agree with Python...
-* `#7260 <https://github.com/numpy/numpy/pull/7260>`__: BUG/TST: Fix #7259, do not "force scalar" for already scalar...
-* `#7261 <https://github.com/numpy/numpy/pull/7261>`__: Added self to mailmap
-* `#7266 <https://github.com/numpy/numpy/pull/7266>`__: BUG: Segfault for classes with deceptive __len__
-* `#7268 <https://github.com/numpy/numpy/pull/7268>`__: ENH: add geomspace function
-* `#7274 <https://github.com/numpy/numpy/pull/7274>`__: BUG: Preserve array order in np.delete
-* `#7275 <https://github.com/numpy/numpy/pull/7275>`__: DEP: Warn about assigning 'data' attribute of ndarray
-* `#7276 <https://github.com/numpy/numpy/pull/7276>`__: DOC: apply_along_axis missing whitespace inserted (before colon)
-* `#7278 <https://github.com/numpy/numpy/pull/7278>`__: BUG: Make returned unravel_index arrays writeable
-* `#7279 <https://github.com/numpy/numpy/pull/7279>`__: TST: Fixed elements being shuffled
-* `#7280 <https://github.com/numpy/numpy/pull/7280>`__: MAINT: Remove redundant trailing semicolons.
-* `#7285 <https://github.com/numpy/numpy/pull/7285>`__: BUG: Make Randint Backwards Compatible with Pandas
-* `#7286 <https://github.com/numpy/numpy/pull/7286>`__: MAINT: Fix typos in docs/comments of `ma` and `polynomial` modules.
-* `#7292 <https://github.com/numpy/numpy/pull/7292>`__: Clarify error on repr failure in assert_equal.
-* `#7294 <https://github.com/numpy/numpy/pull/7294>`__: ENH: add support for BLIS to numpy.distutils
-* `#7295 <https://github.com/numpy/numpy/pull/7295>`__: DOC: understanding code and getting started section to dev doc
-* `#7296 <https://github.com/numpy/numpy/pull/7296>`__: Revert part of #3907 which incorrectly propogated MaskedArray...
-* `#7299 <https://github.com/numpy/numpy/pull/7299>`__: DOC: Fix mismatched variable names in docstrings.
-* `#7300 <https://github.com/numpy/numpy/pull/7300>`__: DOC: dev: stop recommending keeping local master updated with...
-* `#7301 <https://github.com/numpy/numpy/pull/7301>`__: DOC: Update release notes
-* `#7305 <https://github.com/numpy/numpy/pull/7305>`__: BUG: Remove data race in mtrand: two threads could mutate the...
-* `#7307 <https://github.com/numpy/numpy/pull/7307>`__: DOC: Missing some characters in link.
-* `#7308 <https://github.com/numpy/numpy/pull/7308>`__: BUG: Incrementing the wrong reference on return
-* `#7310 <https://github.com/numpy/numpy/pull/7310>`__: STY: Fix GitHub rendering of ordered lists >9
-* `#7311 <https://github.com/numpy/numpy/pull/7311>`__: ENH: Make _pointer_type_cache functional
-* `#7313 <https://github.com/numpy/numpy/pull/7313>`__: DOC: corrected grammatical error in quickstart doc
-* `#7325 <https://github.com/numpy/numpy/pull/7325>`__: BUG, MAINT: Improve fromnumeric.py interface for downstream compatibility
-* `#7328 <https://github.com/numpy/numpy/pull/7328>`__: DEP: Deprecated using a float index in linspace
-* `#7331 <https://github.com/numpy/numpy/pull/7331>`__: Add comment, TST: fix MemoryError on win32
-* `#7332 <https://github.com/numpy/numpy/pull/7332>`__: Check for no solution in np.irr Fixes #6744
-* `#7338 <https://github.com/numpy/numpy/pull/7338>`__: TST: Install `pytz` in the CI.
-* `#7340 <https://github.com/numpy/numpy/pull/7340>`__: DOC: Fixed math rendering in tensordot docs.
-* `#7341 <https://github.com/numpy/numpy/pull/7341>`__: TST: Add test for #6469
-* `#7344 <https://github.com/numpy/numpy/pull/7344>`__: DOC: Fix more typos in docs and comments.
-* `#7346 <https://github.com/numpy/numpy/pull/7346>`__: Generalized flip
-* `#7347 <https://github.com/numpy/numpy/pull/7347>`__: ENH Generalized rot90
-* `#7348 <https://github.com/numpy/numpy/pull/7348>`__: Maint: Removed extra space from `ureduce`
-* `#7349 <https://github.com/numpy/numpy/pull/7349>`__: MAINT: Hide nan warnings for masked internal MA computations
-* `#7350 <https://github.com/numpy/numpy/pull/7350>`__: BUG: MA ufuncs should set mask to False, not array([False])
-* `#7351 <https://github.com/numpy/numpy/pull/7351>`__: TST: Fix some MA tests to avoid looking at the .data attribute
-* `#7358 <https://github.com/numpy/numpy/pull/7358>`__: BUG: pull request related to the issue #7353
-* `#7359 <https://github.com/numpy/numpy/pull/7359>`__: Update 7314, DOC: Clarify valid integer range for random.seed...
-* `#7361 <https://github.com/numpy/numpy/pull/7361>`__: MAINT: Fix copy and paste oversight.
-* `#7363 <https://github.com/numpy/numpy/pull/7363>`__: ENH: Make no unshare mask future warnings less noisy
-* `#7366 <https://github.com/numpy/numpy/pull/7366>`__: TST: fix #6542, add tests to check non-iterable argument raises...
-* `#7373 <https://github.com/numpy/numpy/pull/7373>`__: ENH: Add bitwise_and identity
-* `#7378 <https://github.com/numpy/numpy/pull/7378>`__: added NumPy logo and separator
-* `#7382 <https://github.com/numpy/numpy/pull/7382>`__: MAINT: cleanup np.average
-* `#7385 <https://github.com/numpy/numpy/pull/7385>`__: DOC: note about wheels / windows wheels for pypi
-* `#7386 <https://github.com/numpy/numpy/pull/7386>`__: Added label icon to Travis status
-* `#7397 <https://github.com/numpy/numpy/pull/7397>`__: BUG: incorrect type for objects whose __len__ fails
-* `#7398 <https://github.com/numpy/numpy/pull/7398>`__: DOC: fix typo
-* `#7404 <https://github.com/numpy/numpy/pull/7404>`__: Use PyMem_RawMalloc on Python 3.4 and newer
-* `#7406 <https://github.com/numpy/numpy/pull/7406>`__: ENH ufunc called on memmap return a ndarray
-* `#7407 <https://github.com/numpy/numpy/pull/7407>`__: BUG: Fix decref before incref for in-place accumulate
-* `#7410 <https://github.com/numpy/numpy/pull/7410>`__: DOC: add nanprod to the list of math routines
-* `#7414 <https://github.com/numpy/numpy/pull/7414>`__: Tweak corrcoef
-* `#7415 <https://github.com/numpy/numpy/pull/7415>`__: DOC: Documention fixes
-* `#7416 <https://github.com/numpy/numpy/pull/7416>`__: BUG: Incorrect handling of range in `histogram` with automatic...
-* `#7418 <https://github.com/numpy/numpy/pull/7418>`__: DOC: Minor typo fix, hermefik -> hermefit.
-* `#7421 <https://github.com/numpy/numpy/pull/7421>`__: ENH: adds np.nancumsum and np.nancumprod
-* `#7423 <https://github.com/numpy/numpy/pull/7423>`__: BUG: Ongoing fixes to PR#7416
-* `#7430 <https://github.com/numpy/numpy/pull/7430>`__: DOC: Update 1.11.0-notes.
-* `#7433 <https://github.com/numpy/numpy/pull/7433>`__: MAINT: FutureWarning for changes to np.average subclass handling
-* `#7437 <https://github.com/numpy/numpy/pull/7437>`__: np.full now defaults to the filling value's dtype.
-* `#7438 <https://github.com/numpy/numpy/pull/7438>`__: Allow rolling multiple axes at the same time.
-* `#7439 <https://github.com/numpy/numpy/pull/7439>`__: BUG: Do not try sequence repeat unless necessary
-* `#7442 <https://github.com/numpy/numpy/pull/7442>`__: MANT: Simplify diagonal length calculation logic
-* `#7445 <https://github.com/numpy/numpy/pull/7445>`__: BUG: reference count leak in bincount, fixes #6805
-* `#7446 <https://github.com/numpy/numpy/pull/7446>`__: DOC: ndarray typo fix
-* `#7447 <https://github.com/numpy/numpy/pull/7447>`__: BUG: scalar integer negative powers gave wrong results.
-* `#7448 <https://github.com/numpy/numpy/pull/7448>`__: DOC: array "See also" link to full and full_like instead of fill
-* `#7456 <https://github.com/numpy/numpy/pull/7456>`__: BUG: int overflow in reshape, fixes #7455, fixes #7293
-* `#7463 <https://github.com/numpy/numpy/pull/7463>`__: BUG: fix array too big error for wide dtypes.
-* `#7466 <https://github.com/numpy/numpy/pull/7466>`__: BUG: segfault inplace object reduceat, fixes #7465
-* `#7468 <https://github.com/numpy/numpy/pull/7468>`__: BUG: more on inplace reductions, fixes #615
-* `#7469 <https://github.com/numpy/numpy/pull/7469>`__: MAINT: Update git .mailmap
-* `#7472 <https://github.com/numpy/numpy/pull/7472>`__: MAINT: Update .mailmap.
-* `#7477 <https://github.com/numpy/numpy/pull/7477>`__: MAINT: Yet more .mailmap updates for recent contributors.
-* `#7481 <https://github.com/numpy/numpy/pull/7481>`__: BUG: Fix segfault in PyArray_OrderConverter
-* `#7482 <https://github.com/numpy/numpy/pull/7482>`__: BUG: Memory Leak in _GenericBinaryOutFunction
-* `#7489 <https://github.com/numpy/numpy/pull/7489>`__: Faster real_if_close.
-* `#7491 <https://github.com/numpy/numpy/pull/7491>`__: DOC: Update subclassing doc regarding downstream compatibility
-* `#7496 <https://github.com/numpy/numpy/pull/7496>`__: BUG: don't use pow for integer power ufunc loops.
-* `#7504 <https://github.com/numpy/numpy/pull/7504>`__: DOC: remove "arr" from keepdims docstrings
-* `#7505 <https://github.com/numpy/numpy/pull/7505>`__: MAIN: fix to #7382, make scl in np.average writeable
-* `#7507 <https://github.com/numpy/numpy/pull/7507>`__: MAINT: Remove nose.SkipTest import.
-* `#7508 <https://github.com/numpy/numpy/pull/7508>`__: DOC: link frompyfunc and vectorize
-* `#7511 <https://github.com/numpy/numpy/pull/7511>`__: numpy.power(0, 0) should return 1
-* `#7515 <https://github.com/numpy/numpy/pull/7515>`__: BUG: MaskedArray.count treats negative axes incorrectly
-* `#7518 <https://github.com/numpy/numpy/pull/7518>`__: BUG: Extend glibc complex trig functions blacklist to glibc <...
-* `#7521 <https://github.com/numpy/numpy/pull/7521>`__: DOC: rephrase writeup of memmap changes
-* `#7522 <https://github.com/numpy/numpy/pull/7522>`__: BUG: Fixed iteration over additional bad commands
-* `#7526 <https://github.com/numpy/numpy/pull/7526>`__: DOC: Removed an extra `:const:`
-* `#7529 <https://github.com/numpy/numpy/pull/7529>`__: BUG: Floating exception with invalid axis in np.lexsort
-* `#7534 <https://github.com/numpy/numpy/pull/7534>`__: MAINT: Update setup.py to reflect supported python versions.
-* `#7536 <https://github.com/numpy/numpy/pull/7536>`__: MAINT: Always use PyCapsule instead of PyCObject in mtrand.pyx
-* `#7539 <https://github.com/numpy/numpy/pull/7539>`__: MAINT: Cleanup of random stuff
-* `#7549 <https://github.com/numpy/numpy/pull/7549>`__: BUG: allow graceful recovery for no Liux compiler
-* `#7562 <https://github.com/numpy/numpy/pull/7562>`__: BUG: Fix test_from_object_array_unicode (test_defchararray.TestBasic)…
-* `#7565 <https://github.com/numpy/numpy/pull/7565>`__: BUG: Fix test_ctypeslib and test_indexing for debug interpreter
-* `#7566 <https://github.com/numpy/numpy/pull/7566>`__: MAINT: use manylinux1 wheel for cython
-* `#7568 <https://github.com/numpy/numpy/pull/7568>`__: Fix a false positive OverflowError in Python 3.x when value above...
-* `#7579 <https://github.com/numpy/numpy/pull/7579>`__: DOC: clarify purpose of Attributes section
-* `#7584 <https://github.com/numpy/numpy/pull/7584>`__: BUG: fixes #7572, percent in path
-* `#7586 <https://github.com/numpy/numpy/pull/7586>`__: Make np.ma.take works on scalars
-* `#7587 <https://github.com/numpy/numpy/pull/7587>`__: BUG: linalg.norm(): Don't convert object arrays to float
-* `#7598 <https://github.com/numpy/numpy/pull/7598>`__: Cast array size to int64 when loading from archive
-* `#7602 <https://github.com/numpy/numpy/pull/7602>`__: DOC: Remove isreal and iscomplex from ufunc list
-* `#7605 <https://github.com/numpy/numpy/pull/7605>`__: DOC: fix incorrect Gamma distribution parameterization comments
-* `#7609 <https://github.com/numpy/numpy/pull/7609>`__: BUG: Fix TypeError when raising TypeError
-* `#7611 <https://github.com/numpy/numpy/pull/7611>`__: ENH: expose test runner raise_warnings option
-* `#7614 <https://github.com/numpy/numpy/pull/7614>`__: BLD: Avoid using os.spawnve in favor of os.spawnv in exec_command
-* `#7618 <https://github.com/numpy/numpy/pull/7618>`__: BUG: distance arg of np.gradient must be scalar, fix docstring
-* `#7626 <https://github.com/numpy/numpy/pull/7626>`__: DOC: RST definition list fixes
-* `#7627 <https://github.com/numpy/numpy/pull/7627>`__: MAINT: unify tup processing, move tup use to after all PyTuple_SetItem...
-* `#7630 <https://github.com/numpy/numpy/pull/7630>`__: MAINT: add ifdef around PyDictProxy_Check macro
-* `#7631 <https://github.com/numpy/numpy/pull/7631>`__: MAINT: linalg: fix comment, simplify math
-* `#7634 <https://github.com/numpy/numpy/pull/7634>`__: BLD: correct C compiler customization in system_info.py Closes...
-* `#7635 <https://github.com/numpy/numpy/pull/7635>`__: BUG: ma.median alternate fix for #7592
-* `#7636 <https://github.com/numpy/numpy/pull/7636>`__: MAINT: clean up testing.assert_raises_regexp, 2.6-specific code...
-* `#7637 <https://github.com/numpy/numpy/pull/7637>`__: MAINT: clearer exception message when importing multiarray fails.
-* `#7639 <https://github.com/numpy/numpy/pull/7639>`__: TST: fix a set of test errors in master.
-* `#7643 <https://github.com/numpy/numpy/pull/7643>`__: DOC : minor changes to linspace docstring
-* `#7651 <https://github.com/numpy/numpy/pull/7651>`__: BUG: one to any power is still 1. Broken edgecase for int arrays
-* `#7655 <https://github.com/numpy/numpy/pull/7655>`__: BLD: Remove Intel compiler flag -xSSE4.2
-* `#7658 <https://github.com/numpy/numpy/pull/7658>`__: BUG: fix incorrect printing of 1D masked arrays
-* `#7659 <https://github.com/numpy/numpy/pull/7659>`__: BUG: Temporary fix for str(mvoid) for object field types
-* `#7664 <https://github.com/numpy/numpy/pull/7664>`__: BUG: Fix unicode with byte swap transfer and copyswap
-* `#7667 <https://github.com/numpy/numpy/pull/7667>`__: Restore histogram consistency
-* `#7668 <https://github.com/numpy/numpy/pull/7668>`__: ENH: Do not check the type of module.__dict__ explicit in test.
-* `#7669 <https://github.com/numpy/numpy/pull/7669>`__: BUG: boolean assignment no GIL release when transfer needs API
-* `#7673 <https://github.com/numpy/numpy/pull/7673>`__: DOC: Create Numpy 1.11.1 release notes.
-* `#7675 <https://github.com/numpy/numpy/pull/7675>`__: BUG: fix handling of right edge of final bin.
-* `#7678 <https://github.com/numpy/numpy/pull/7678>`__: BUG: Fix np.clip bug NaN handling for Visual Studio 2015
-* `#7679 <https://github.com/numpy/numpy/pull/7679>`__: MAINT: Fix up C++ comment in arraytypes.c.src.
-* `#7681 <https://github.com/numpy/numpy/pull/7681>`__: DOC: Update 1.11.1 release notes.
-* `#7686 <https://github.com/numpy/numpy/pull/7686>`__: ENH: Changing FFT cache to a bounded LRU cache
-* `#7688 <https://github.com/numpy/numpy/pull/7688>`__: DOC: fix broken genfromtxt examples in user guide. Closes gh-7662.
-* `#7689 <https://github.com/numpy/numpy/pull/7689>`__: BENCH: add correlate/convolve benchmarks.
-* `#7696 <https://github.com/numpy/numpy/pull/7696>`__: DOC: update wheel build / upload instructions
-* `#7699 <https://github.com/numpy/numpy/pull/7699>`__: BLD: preserve library order
-* `#7704 <https://github.com/numpy/numpy/pull/7704>`__: ENH: Add bits attribute to np.finfo
-* `#7712 <https://github.com/numpy/numpy/pull/7712>`__: BUG: Fix race condition with new FFT cache
-* `#7715 <https://github.com/numpy/numpy/pull/7715>`__: BUG: Remove memory leak in np.place
-* `#7719 <https://github.com/numpy/numpy/pull/7719>`__: BUG: Fix segfault in np.random.shuffle for arrays of different...
-* `#7723 <https://github.com/numpy/numpy/pull/7723>`__: Change mkl_info.dir_env_var from MKL to MKLROOT
-* `#7727 <https://github.com/numpy/numpy/pull/7727>`__: DOC: Corrections in Datetime Units-arrays.datetime.rst
-* `#7729 <https://github.com/numpy/numpy/pull/7729>`__: DOC: fix typo in savetxt docstring (closes #7620)
-* `#7733 <https://github.com/numpy/numpy/pull/7733>`__: Update 7525, DOC: Fix order='A' docs of np.array.
-* `#7734 <https://github.com/numpy/numpy/pull/7734>`__: Update 7542, ENH: Add `polyrootval` to numpy.polynomial
-* `#7735 <https://github.com/numpy/numpy/pull/7735>`__: BUG: fix issue on OS X with Python 3.x where npymath.ini was...
-* `#7739 <https://github.com/numpy/numpy/pull/7739>`__: DOC: Mention the changes of #6430 in the release notes.
-* `#7740 <https://github.com/numpy/numpy/pull/7740>`__: DOC: add reference to poisson rng
-* `#7743 <https://github.com/numpy/numpy/pull/7743>`__: Update 7476, DEP: deprecate Numeric-style typecodes, closes #2148
-* `#7744 <https://github.com/numpy/numpy/pull/7744>`__: DOC: Remove "ones_like" from ufuncs list (it is not)
-* `#7746 <https://github.com/numpy/numpy/pull/7746>`__: DOC: Clarify the effect of rcond in numpy.linalg.lstsq.
-* `#7747 <https://github.com/numpy/numpy/pull/7747>`__: Update 7672, BUG: Make sure we don't divide by zero
-* `#7748 <https://github.com/numpy/numpy/pull/7748>`__: DOC: Update float32 mean example in docstring
-* `#7754 <https://github.com/numpy/numpy/pull/7754>`__: Update 7612, ENH: Add broadcast.ndim to match code elsewhere.
-* `#7757 <https://github.com/numpy/numpy/pull/7757>`__: Update 7175, BUG: Invalid read of size 4 in PyArray_FromFile
-* `#7759 <https://github.com/numpy/numpy/pull/7759>`__: BUG: Fix numpy.i support for numpy API < 1.7.
-* `#7760 <https://github.com/numpy/numpy/pull/7760>`__: ENH: Make assert_almost_equal & assert_array_almost_equal consistent.
-* `#7766 <https://github.com/numpy/numpy/pull/7766>`__: fix an English typo
-* `#7771 <https://github.com/numpy/numpy/pull/7771>`__: DOC: link geomspace from logspace
-* `#7773 <https://github.com/numpy/numpy/pull/7773>`__: DOC: Remove a redundant the
-* `#7777 <https://github.com/numpy/numpy/pull/7777>`__: DOC: Update Numpy 1.11.1 release notes.
-* `#7785 <https://github.com/numpy/numpy/pull/7785>`__: DOC: update wheel building procedure for release
-* `#7789 <https://github.com/numpy/numpy/pull/7789>`__: MRG: add note of 64-bit wheels on Windows
-* `#7791 <https://github.com/numpy/numpy/pull/7791>`__: f2py.compile issues (#7683)
-* `#7799 <https://github.com/numpy/numpy/pull/7799>`__: "lambda" is not allowed to use as keyword arguments in a sample...
-* `#7803 <https://github.com/numpy/numpy/pull/7803>`__: BUG: interpret 'c' PEP3118/struct type as 'S1'.
-* `#7807 <https://github.com/numpy/numpy/pull/7807>`__: DOC: Misplaced parens in formula
-* `#7817 <https://github.com/numpy/numpy/pull/7817>`__: BUG: Make sure npy_mul_with_overflow_<type> detects overflow.
-* `#7818 <https://github.com/numpy/numpy/pull/7818>`__: numpy/distutils/misc_util.py fix for #7809: check that _tmpdirs...
-* `#7820 <https://github.com/numpy/numpy/pull/7820>`__: MAINT: Allocate fewer bytes for empty arrays.
-* `#7823 <https://github.com/numpy/numpy/pull/7823>`__: BUG: Fixed masked array behavior for scalar inputs to np.ma.atleast_*d
-* `#7834 <https://github.com/numpy/numpy/pull/7834>`__: DOC: Added an example
-* `#7839 <https://github.com/numpy/numpy/pull/7839>`__: Pypy fixes
-* `#7840 <https://github.com/numpy/numpy/pull/7840>`__: Fix ATLAS version detection
-* `#7842 <https://github.com/numpy/numpy/pull/7842>`__: Fix versionadded tags
-* `#7848 <https://github.com/numpy/numpy/pull/7848>`__: MAINT: Fix remaining uses of deprecated Python imp module.
-* `#7853 <https://github.com/numpy/numpy/pull/7853>`__: BUG: Make sure numpy globals keep identity after reload.
-* `#7863 <https://github.com/numpy/numpy/pull/7863>`__: ENH: turn quicksort into introsort
-* `#7866 <https://github.com/numpy/numpy/pull/7866>`__: Document runtests extra argv
-* `#7871 <https://github.com/numpy/numpy/pull/7871>`__: BUG: handle introsort depth limit properly
-* `#7879 <https://github.com/numpy/numpy/pull/7879>`__: DOC: fix typo in documentation of loadtxt (closes #7878)
-* `#7885 <https://github.com/numpy/numpy/pull/7885>`__: Handle NetBSD specific <sys/endian.h>
-* `#7889 <https://github.com/numpy/numpy/pull/7889>`__: DOC: #7881. Fix link to record arrays
-* `#7894 <https://github.com/numpy/numpy/pull/7894>`__: fixup-7790, BUG: construct ma.array from np.array which contains...
-* `#7898 <https://github.com/numpy/numpy/pull/7898>`__: Spelling and grammar fix.
-* `#7903 <https://github.com/numpy/numpy/pull/7903>`__: BUG: fix float16 type not being called due to wrong ordering
-* `#7908 <https://github.com/numpy/numpy/pull/7908>`__: BLD: Fixed detection for recent MKL versions
-* `#7911 <https://github.com/numpy/numpy/pull/7911>`__: BUG: fix for issue#7835 (ma.median of 1d)
-* `#7912 <https://github.com/numpy/numpy/pull/7912>`__: ENH: skip or avoid gc/objectmodel differences btwn pypy and cpython
-* `#7918 <https://github.com/numpy/numpy/pull/7918>`__: ENH: allow numpy.apply_along_axis() to work with ndarray subclasses
-* `#7922 <https://github.com/numpy/numpy/pull/7922>`__: ENH: Add ma.convolve and ma.correlate for #6458
-* `#7925 <https://github.com/numpy/numpy/pull/7925>`__: Monkey-patch _msvccompile.gen_lib_option like any other compilators
-* `#7931 <https://github.com/numpy/numpy/pull/7931>`__: BUG: Check for HAVE_LDOUBLE_DOUBLE_DOUBLE_LE in npy_math_complex.
-* `#7936 <https://github.com/numpy/numpy/pull/7936>`__: ENH: improve duck typing inside iscomplexobj
-* `#7937 <https://github.com/numpy/numpy/pull/7937>`__: BUG: Guard against buggy comparisons in generic quicksort.
-* `#7938 <https://github.com/numpy/numpy/pull/7938>`__: DOC: add cbrt to math summary page
-* `#7941 <https://github.com/numpy/numpy/pull/7941>`__: BUG: Make sure numpy globals keep identity after reload.
-* `#7943 <https://github.com/numpy/numpy/pull/7943>`__: DOC: #7927. Remove deprecated note for memmap relevant for Python...
-* `#7952 <https://github.com/numpy/numpy/pull/7952>`__: BUG: Use keyword arguments to initialize Extension base class.
-* `#7956 <https://github.com/numpy/numpy/pull/7956>`__: BLD: remove __NUMPY_SETUP__ from builtins at end of setup.py
-* `#7963 <https://github.com/numpy/numpy/pull/7963>`__: BUG: MSVCCompiler grows 'lib' & 'include' env strings exponentially.
-* `#7965 <https://github.com/numpy/numpy/pull/7965>`__: BUG: cannot modify tuple after use
-* `#7976 <https://github.com/numpy/numpy/pull/7976>`__: DOC: Fixed documented dimension of return value
-* `#7977 <https://github.com/numpy/numpy/pull/7977>`__: DOC: Create 1.11.2 release notes.
-* `#7979 <https://github.com/numpy/numpy/pull/7979>`__: DOC: Corrected allowed keywords in add_(installed_)library
-* `#7980 <https://github.com/numpy/numpy/pull/7980>`__: ENH: Add ability to runtime select ufunc loops, add AVX2 integer...
-* `#7985 <https://github.com/numpy/numpy/pull/7985>`__: Rebase 7763, ENH: Add new warning suppression/filtering context
-* `#7987 <https://github.com/numpy/numpy/pull/7987>`__: DOC: See also np.load and np.memmap in np.lib.format.open_memmap
-* `#7988 <https://github.com/numpy/numpy/pull/7988>`__: DOC: Include docstring for cbrt, spacing and fabs in documentation
-* `#7999 <https://github.com/numpy/numpy/pull/7999>`__: ENH: add inplace cases to fast ufunc loop macros
-* `#8006 <https://github.com/numpy/numpy/pull/8006>`__: DOC: Update 1.11.2 release notes.
-* `#8008 <https://github.com/numpy/numpy/pull/8008>`__: MAINT: Remove leftover imp module imports.
-* `#8009 <https://github.com/numpy/numpy/pull/8009>`__: DOC: Fixed three typos in the c-info.ufunc-tutorial
-* `#8011 <https://github.com/numpy/numpy/pull/8011>`__: DOC: Update 1.11.2 release notes.
-* `#8014 <https://github.com/numpy/numpy/pull/8014>`__: BUG: Fix fid.close() to use os.close(fid)
-* `#8016 <https://github.com/numpy/numpy/pull/8016>`__: BUG: Fix numpy.ma.median.
-* `#8018 <https://github.com/numpy/numpy/pull/8018>`__: BUG: Fixes return for np.ma.count if keepdims is True and axis...
-* `#8021 <https://github.com/numpy/numpy/pull/8021>`__: DOC: change all non-code instances of Numpy to NumPy
-* `#8027 <https://github.com/numpy/numpy/pull/8027>`__: ENH: Add platform indepedent lib dir to PYTHONPATH
-* `#8028 <https://github.com/numpy/numpy/pull/8028>`__: DOC: Update 1.11.2 release notes.
-* `#8030 <https://github.com/numpy/numpy/pull/8030>`__: BUG: fix np.ma.median with only one non-masked value and an axis...
-* `#8038 <https://github.com/numpy/numpy/pull/8038>`__: MAINT: Update error message in rollaxis.
-* `#8040 <https://github.com/numpy/numpy/pull/8040>`__: Update add_newdocs.py
-* `#8042 <https://github.com/numpy/numpy/pull/8042>`__: BUG: core: fix bug in NpyIter buffering with discontinuous arrays
-* `#8045 <https://github.com/numpy/numpy/pull/8045>`__: DOC: Update 1.11.2 release notes.
-* `#8050 <https://github.com/numpy/numpy/pull/8050>`__: remove refcount semantics, now a.resize() almost always requires...
-* `#8051 <https://github.com/numpy/numpy/pull/8051>`__: Clear signaling NaN exceptions
-* `#8054 <https://github.com/numpy/numpy/pull/8054>`__: ENH: add signature argument to vectorize for vectorizing like...
-* `#8057 <https://github.com/numpy/numpy/pull/8057>`__: BUG: lib: Simplify (and fix) pad's handling of the pad_width
-* `#8061 <https://github.com/numpy/numpy/pull/8061>`__: BUG : financial.pmt modifies input (issue #8055)
-* `#8064 <https://github.com/numpy/numpy/pull/8064>`__: MAINT: Add PMIP files to .gitignore
-* `#8065 <https://github.com/numpy/numpy/pull/8065>`__: BUG: Assert fromfile ending earlier in pyx_processing
-* `#8066 <https://github.com/numpy/numpy/pull/8066>`__: BUG, TST: Fix python3-dbg bug in Travis script
-* `#8071 <https://github.com/numpy/numpy/pull/8071>`__: MAINT: Add Tempita to randint helpers
-* `#8075 <https://github.com/numpy/numpy/pull/8075>`__: DOC: Fix description of isinf in nan_to_num
-* `#8080 <https://github.com/numpy/numpy/pull/8080>`__: BUG: non-integers can end up in dtype offsets
-* `#8081 <https://github.com/numpy/numpy/pull/8081>`__: Update outdated Nose URL to nose.readthedocs.io
-* `#8083 <https://github.com/numpy/numpy/pull/8083>`__: ENH: Deprecation warnings for `/` integer division when running...
-* `#8084 <https://github.com/numpy/numpy/pull/8084>`__: DOC: Fix erroneous return type description for np.roots.
-* `#8087 <https://github.com/numpy/numpy/pull/8087>`__: BUG: financial.pmt modifies input #8055
-* `#8088 <https://github.com/numpy/numpy/pull/8088>`__: MAINT: Remove duplicate randint helpers code.
-* `#8093 <https://github.com/numpy/numpy/pull/8093>`__: MAINT: fix assert_raises_regex when used as a context manager
-* `#8096 <https://github.com/numpy/numpy/pull/8096>`__: ENH: Vendorize tempita.
-* `#8098 <https://github.com/numpy/numpy/pull/8098>`__: DOC: Enhance description/usage for np.linalg.eig*h
-* `#8103 <https://github.com/numpy/numpy/pull/8103>`__: Pypy fixes
-* `#8104 <https://github.com/numpy/numpy/pull/8104>`__: Fix test code on cpuinfo's main function
-* `#8107 <https://github.com/numpy/numpy/pull/8107>`__: BUG: Fix array printing with precision=0.
-* `#8109 <https://github.com/numpy/numpy/pull/8109>`__: Fix bug in ravel_multi_index for big indices (Issue #7546)
-* `#8110 <https://github.com/numpy/numpy/pull/8110>`__: BUG: distutils: fix issue with rpath in fcompiler/gnu.py
-* `#8111 <https://github.com/numpy/numpy/pull/8111>`__: ENH: Add a tool for release authors and PRs.
-* `#8112 <https://github.com/numpy/numpy/pull/8112>`__: DOC: Fix "See also" links in linalg.
-* `#8114 <https://github.com/numpy/numpy/pull/8114>`__: BUG: core: add missing error check after PyLong_AsSsize_t
-* `#8121 <https://github.com/numpy/numpy/pull/8121>`__: DOC: Improve histogram2d() example.
-* `#8122 <https://github.com/numpy/numpy/pull/8122>`__: BUG: Fix broken pickle in MaskedArray when dtype is object (Return...
-* `#8124 <https://github.com/numpy/numpy/pull/8124>`__: BUG: Fixed build break
-* `#8125 <https://github.com/numpy/numpy/pull/8125>`__: Rebase, BUG: Fixed deepcopy of F-order object arrays.
-* `#8127 <https://github.com/numpy/numpy/pull/8127>`__: BUG: integers to a negative integer powers should error.
-* `#8141 <https://github.com/numpy/numpy/pull/8141>`__: improve configure checks for broken systems
-* `#8142 <https://github.com/numpy/numpy/pull/8142>`__: BUG: np.ma.mean and var should return scalar if no mask
-* `#8148 <https://github.com/numpy/numpy/pull/8148>`__: BUG: import full module path in npy_load_module
-* `#8153 <https://github.com/numpy/numpy/pull/8153>`__: MAINT: Expose void-scalar "base" attribute in python
-* `#8156 <https://github.com/numpy/numpy/pull/8156>`__: DOC: added example with empty indices for a scalar, #8138
-* `#8160 <https://github.com/numpy/numpy/pull/8160>`__: BUG: fix _array2string for structured array (issue #5692)
-* `#8164 <https://github.com/numpy/numpy/pull/8164>`__: MAINT: Update mailmap for NumPy 1.12.0
-* `#8165 <https://github.com/numpy/numpy/pull/8165>`__: Fixup 8152, BUG: assert_allclose(..., equal_nan=False) doesn't...
-* `#8167 <https://github.com/numpy/numpy/pull/8167>`__: Fixup 8146, DOC: Clarify when PyArray_{Max, Min, Ptp} return...
-* `#8168 <https://github.com/numpy/numpy/pull/8168>`__: DOC: Minor spelling fix in genfromtxt() docstring.
-* `#8173 <https://github.com/numpy/numpy/pull/8173>`__: BLD: Enable build on AIX
-* `#8174 <https://github.com/numpy/numpy/pull/8174>`__: DOC: warn that dtype.descr is only for use in PEP3118
-* `#8177 <https://github.com/numpy/numpy/pull/8177>`__: MAINT: Add python 3.6 support to suppress_warnings
-* `#8178 <https://github.com/numpy/numpy/pull/8178>`__: MAINT: Fix ResourceWarning new in Python 3.6.
-* `#8180 <https://github.com/numpy/numpy/pull/8180>`__: FIX: protect stolen ref by PyArray_NewFromDescr in array_empty
-* `#8181 <https://github.com/numpy/numpy/pull/8181>`__: ENH: Improve announce to find github squash-merge commits.
-* `#8182 <https://github.com/numpy/numpy/pull/8182>`__: MAINT: Update .mailmap
-* `#8183 <https://github.com/numpy/numpy/pull/8183>`__: MAINT: Ediff1d performance
-* `#8184 <https://github.com/numpy/numpy/pull/8184>`__: MAINT: make `assert_allclose` behavior on `nan`s match pre 1.12
-* `#8188 <https://github.com/numpy/numpy/pull/8188>`__: DOC: 'highest' is exclusive for randint()
-* `#8189 <https://github.com/numpy/numpy/pull/8189>`__: BUG: setfield should raise if arr is not writeable
-* `#8190 <https://github.com/numpy/numpy/pull/8190>`__: ENH: Add a float_power function with at least float64 precision.
-* `#8197 <https://github.com/numpy/numpy/pull/8197>`__: DOC: Add missing arguments to np.ufunc.outer
-* `#8198 <https://github.com/numpy/numpy/pull/8198>`__: DEP: Deprecate the keepdims argument to accumulate
-* `#8199 <https://github.com/numpy/numpy/pull/8199>`__: MAINT: change path to env in distutils.system_info. Closes gh-8195.
-* `#8200 <https://github.com/numpy/numpy/pull/8200>`__: BUG: Fix structured array format functions
-* `#8202 <https://github.com/numpy/numpy/pull/8202>`__: ENH: specialize name of dev package by interpreter
-* `#8205 <https://github.com/numpy/numpy/pull/8205>`__: DOC: change development instructions from SSH to HTTPS access.
-* `#8216 <https://github.com/numpy/numpy/pull/8216>`__: DOC: Patch doc errors for atleast_nd and frombuffer
-* `#8218 <https://github.com/numpy/numpy/pull/8218>`__: BUG: ediff1d should return subclasses
-* `#8219 <https://github.com/numpy/numpy/pull/8219>`__: DOC: Turn SciPy references into links.
-* `#8222 <https://github.com/numpy/numpy/pull/8222>`__: ENH: Make numpy.mean() do more precise computation
-* `#8227 <https://github.com/numpy/numpy/pull/8227>`__: BUG: Better check for invalid bounds in np.random.uniform.
-* `#8231 <https://github.com/numpy/numpy/pull/8231>`__: ENH: Refactor numpy ** operators for numpy scalar integer powers
-* `#8234 <https://github.com/numpy/numpy/pull/8234>`__: DOC: Clarified when a copy is made in numpy.asarray
-* `#8236 <https://github.com/numpy/numpy/pull/8236>`__: DOC: Fix documentation pull requests.
-* `#8238 <https://github.com/numpy/numpy/pull/8238>`__: MAINT: Update pavement.py
-* `#8239 <https://github.com/numpy/numpy/pull/8239>`__: ENH: Improve announce tool.
-* `#8240 <https://github.com/numpy/numpy/pull/8240>`__: REL: Prepare for 1.12.x branch
-* `#8243 <https://github.com/numpy/numpy/pull/8243>`__: BUG: Update operator `**` tests for new behavior.
-* `#8246 <https://github.com/numpy/numpy/pull/8246>`__: REL: Reset strides for RELAXED_STRIDE_CHECKING for 1.12 releases.
-* `#8265 <https://github.com/numpy/numpy/pull/8265>`__: BUG: np.piecewise not working for scalars
-* `#8272 <https://github.com/numpy/numpy/pull/8272>`__: TST: Path test should resolve symlinks when comparing
-* `#8282 <https://github.com/numpy/numpy/pull/8282>`__: DOC: Update 1.12.0 release notes.
-* `#8286 <https://github.com/numpy/numpy/pull/8286>`__: BUG: Fix pavement.py write_release_task.
-* `#8296 <https://github.com/numpy/numpy/pull/8296>`__: BUG: Fix iteration over reversed subspaces in mapiter_@name@.
-* `#8304 <https://github.com/numpy/numpy/pull/8304>`__: BUG: Fix PyPy crash in PyUFunc_GenericReduction.
-* `#8319 <https://github.com/numpy/numpy/pull/8319>`__: BLD: blacklist powl (longdouble power function) on OS X.
-* `#8320 <https://github.com/numpy/numpy/pull/8320>`__: BUG: do not link to Accelerate if OpenBLAS, MKL or BLIS are found.
-* `#8322 <https://github.com/numpy/numpy/pull/8322>`__: BUG: fixed kind specifications for parameters
-* `#8336 <https://github.com/numpy/numpy/pull/8336>`__: BUG: fix packbits and unpackbits to correctly handle empty arrays
-* `#8338 <https://github.com/numpy/numpy/pull/8338>`__: BUG: fix test_api test that fails intermittently in python 3
-* `#8339 <https://github.com/numpy/numpy/pull/8339>`__: BUG: Fix ndarray.tofile large file corruption in append mode.
-* `#8359 <https://github.com/numpy/numpy/pull/8359>`__: BUG: Fix suppress_warnings (again) for Python 3.6.
-* `#8372 <https://github.com/numpy/numpy/pull/8372>`__: BUG: Fixes for ma.median and nanpercentile.
-* `#8373 <https://github.com/numpy/numpy/pull/8373>`__: BUG: correct letter case
-* `#8379 <https://github.com/numpy/numpy/pull/8379>`__: DOC: Update 1.12.0-notes.rst.
-* `#8390 <https://github.com/numpy/numpy/pull/8390>`__: ENH: retune apply_along_axis nanmedian cutoff in 1.12
-* `#8391 <https://github.com/numpy/numpy/pull/8391>`__: DEP: Fix escaped string characters deprecated in Python 3.6.
-* `#8394 <https://github.com/numpy/numpy/pull/8394>`__: DOC: create 1.11.3 release notes.
-* `#8399 <https://github.com/numpy/numpy/pull/8399>`__: BUG: Fix author search in announce.py
-* `#8402 <https://github.com/numpy/numpy/pull/8402>`__: DOC, MAINT: Update 1.12.0 notes and mailmap.
-* `#8418 <https://github.com/numpy/numpy/pull/8418>`__: BUG: Fix ma.median even elements for 1.12
-* `#8424 <https://github.com/numpy/numpy/pull/8424>`__: DOC: Fix tools and release notes to be more markdown compatible.
-* `#8427 <https://github.com/numpy/numpy/pull/8427>`__: BUG: Add a lock to assert_equal and other testing functions
-* `#8431 <https://github.com/numpy/numpy/pull/8431>`__: BUG: Fix apply_along_axis() for when func1d() returns a non-ndarray.
-* `#8432 <https://github.com/numpy/numpy/pull/8432>`__: BUG: Let linspace accept input that has an array_interface.
-* `#8437 <https://github.com/numpy/numpy/pull/8437>`__: TST: Update 3.6-dev tests to 3.6 after Python final release.
-* `#8439 <https://github.com/numpy/numpy/pull/8439>`__: DOC: Update 1.12.0 release notes.
-* `#8466 <https://github.com/numpy/numpy/pull/8466>`__: MAINT: Update mailmap entries.
-* `#8467 <https://github.com/numpy/numpy/pull/8467>`__: DOC: Back-port the missing part of gh-8464.
-* `#8476 <https://github.com/numpy/numpy/pull/8476>`__: DOC: Update 1.12.0 release notes.
-* `#8477 <https://github.com/numpy/numpy/pull/8477>`__: DOC: Update 1.12.0 release notes.
diff --git a/doc/release/1.12.1-notes.rst b/doc/release/1.12.1-notes.rst
index 21f7ea16e..f67dab108 100644
--- a/doc/release/1.12.1-notes.rst
+++ b/doc/release/1.12.1-notes.rst
@@ -6,39 +6,21 @@ NumPy 1.12.1 supports Python 2.7 and 3.4 - 3.6 and fixes bugs and regressions
found in NumPy 1.12.0. In particular, the regression in f2py constant parsing
is fixed. Wheels for Linux, Windows, and OSX can be found on pypi,
+Bugs Fixed
+==========
-Contributors
-============
-
-A total of 10 people contributed to this release. People with a "+" by their
-names contributed a patch for the first time.
-
-* Charles Harris
-* Eric Wieser
-* Greg Young
-* Joerg Behrmann +
-* John Kirkham
-* Julian Taylor
-* Marten van Kerkwijk
-* Matthew Brett
-* Shota Kawabuchi
-* Jean Utke +
-
-Fixes Backported
-================
-
-* `#8483 <https://github.com/numpy/numpy/pull/8483>`__: BUG: Fix wrong future nat warning and equiv type logic error...
-* `#8489 <https://github.com/numpy/numpy/pull/8489>`__: BUG: Fix wrong masked median for some special cases
-* `#8490 <https://github.com/numpy/numpy/pull/8490>`__: DOC: Place np.average in inline code
-* `#8491 <https://github.com/numpy/numpy/pull/8491>`__: TST: Work around isfinite inconsistency on i386
-* `#8494 <https://github.com/numpy/numpy/pull/8494>`__: BUG: Guard against replacing constants without '_' spec in f2py.
-* `#8524 <https://github.com/numpy/numpy/pull/8524>`__: BUG: Fix mean for float 16 non-array inputs for 1.12
-* `#8571 <https://github.com/numpy/numpy/pull/8571>`__: BUG: Fix calling python api with error set and minor leaks for...
-* `#8602 <https://github.com/numpy/numpy/pull/8602>`__: BUG: Make iscomplexobj compatible with custom dtypes again
-* `#8618 <https://github.com/numpy/numpy/pull/8618>`__: BUG: Fix undefined behaviour induced by bad __array_wrap__
-* `#8648 <https://github.com/numpy/numpy/pull/8648>`__: BUG: Fix MaskedArray.__setitem__
-* `#8659 <https://github.com/numpy/numpy/pull/8659>`__: BUG: PPC64el machines are POWER for Fortran in f2py
-* `#8665 <https://github.com/numpy/numpy/pull/8665>`__: BUG: Look up methods on MaskedArray in `_frommethod`
-* `#8674 <https://github.com/numpy/numpy/pull/8674>`__: BUG: Remove extra digit in binary_repr at limit
-* `#8704 <https://github.com/numpy/numpy/pull/8704>`__: BUG: Fix deepcopy regression for empty arrays.
-* `#8707 <https://github.com/numpy/numpy/pull/8707>`__: BUG: Fix ma.median for empty ndarrays
+* BUG: Fix wrong future nat warning and equiv type logic error...
+* BUG: Fix wrong masked median for some special cases
+* DOC: Place np.average in inline code
+* TST: Work around isfinite inconsistency on i386
+* BUG: Guard against replacing constants without '_' spec in f2py.
+* BUG: Fix mean for float 16 non-array inputs for 1.12
+* BUG: Fix calling python api with error set and minor leaks for...
+* BUG: Make iscomplexobj compatible with custom dtypes again
+* BUG: Fix undefined behaviour induced by bad __array_wrap__
+* BUG: Fix MaskedArray.__setitem__
+* BUG: PPC64el machines are POWER for Fortran in f2py
+* BUG: Look up methods on MaskedArray in `_frommethod`
+* BUG: Remove extra digit in binary_repr at limit
+* BUG: Fix deepcopy regression for empty arrays.
+* BUG: Fix ma.median for empty ndarrays
diff --git a/doc/release/1.13.0-notes.rst b/doc/release/1.13.0-notes.rst
index a712c6949..4554e53ea 100644
--- a/doc/release/1.13.0-notes.rst
+++ b/doc/release/1.13.0-notes.rst
@@ -53,6 +53,9 @@ Deprecations
with ``np.minimum``.
* Calling ``ndarray.conjugate`` on non-numeric dtypes is deprecated (it
should match the behavior of ``np.conjugate``, which throws an error).
+* Calling ``expand_dims`` when the ``axis`` keyword does not satisfy
+ ``-a.ndim - 1 <= axis <= a.ndim``, where ``a`` is the array being reshaped,
+ is deprecated.
Future Changes
@@ -137,6 +140,11 @@ invokes``ndarray.__getslice__`` (e.g. through ``super(...).__getslice__``) will
now issue a DeprecationWarning - ``.__getitem__(slice(start, end))`` should be
used instead.
+Indexing MaskedArrays/Constants with ``...`` (ellipsis) now returns MaskedArray
+-------------------------------------------------------------------------------
+This behavior mirrors that of np.ndarray, and accounts for nested arrays in
+MaskedArrays of object dtype, and ellipsis combined with other forms of
+indexing.
C API changes
=============
@@ -202,7 +210,9 @@ within datetime and timedelta arrays. This is analogous to ``np.isnan``.
------------------------------------------------------
The new function ``np.heaviside(x, h0)`` (a ufunc) computes the Heaviside
function:
+
.. code::
+
{ 0 if x < 0,
heaviside(x, h0) = { h0 if x == 0,
{ 1 if x > 0.
@@ -239,8 +249,8 @@ of ``in1d`` that preserves the shape of the first array.
Temporary elision
-----------------
-On platforms providing the ``backtrace`` function NumPy will now not create
-temporaries in expression when possible.
+On platforms providing the ``backtrace`` function NumPy will try to avoid
+creating temporaries in expression involving basic numeric types.
For example ``d = a + b + c`` is transformed to ``d = a + b; d += c`` which can
improve performance for large arrays as less memory bandwidth is required to
perform the operation.
@@ -536,3 +546,11 @@ The ABCPolyBase class, from which the convenience classes are derived, sets
``__array_ufun__ = None`` in order of opt out of ufuncs. If a polynomial
convenience class instance is passed as an argument to a ufunc, a ``TypeError``
will now be raised.
+
+Output arguments to ufuncs can be tuples also for ufunc methods
+---------------------------------------------------------------
+For calls to ufuncs, it was already possible, and recommended, to use an
+``out`` argument with a tuple for ufuncs with multiple outputs. This has now
+been extended to output arguments in the ``reduce``, ``accumulate``, and
+``reduceat`` methods. This is mostly for compatibility with ``__array_ufunc``;
+there are no ufuncs yet that have more than one output.
diff --git a/doc/release/1.13.1-notes.rst b/doc/release/1.13.1-notes.rst
new file mode 100644
index 000000000..807296a85
--- /dev/null
+++ b/doc/release/1.13.1-notes.rst
@@ -0,0 +1,60 @@
+==========================
+NumPy 1.13.1 Release Notes
+==========================
+
+This is a bugfix release for problems found in 1.13.0. The major changes are
+fixes for the new memory overlap detection and temporary elision as well as
+reversion of the removal of the boolean binary ``-`` operator. Users of 1.13.0
+should upgrade.
+
+Thr Python versions supported are 2.7 and 3.4 - 3.6. Note that the Python 3.6
+wheels available from PIP are built against 3.6.1, hence will not work when
+used with 3.6.0 due to Python bug 29943_. NumPy 1.13.2 will be released shortly
+after Python 3.6.2 is out to fix that problem. If you are using 3.6.0 the
+workaround is to upgrade to 3.6.1 or use an earlier Python version.
+
+.. _#29943: https://bugs.python.org/issue29943
+
+
+Pull requests merged
+====================
+A total of 19 pull requests were merged for this release.
+
+* #9240 DOC: BLD: fix lots of Sphinx warnings/errors.
+* #9255 Revert "DEP: Raise TypeError for subtract(bool_, bool_)."
+* #9261 BUG: don't elide into readonly and updateifcopy temporaries for...
+* #9262 BUG: fix missing keyword rename for common block in numpy.f2py
+* #9263 BUG: handle resize of 0d array
+* #9267 DOC: update f2py front page and some doc build metadata.
+* #9299 BUG: Fix Intel compilation on Unix.
+* #9317 BUG: fix wrong ndim used in empty where check
+* #9319 BUG: Make extensions compilable with MinGW on Py2.7
+* #9339 BUG: Prevent crash if ufunc doc string is null
+* #9340 BUG: umath: un-break ufunc where= when no out= is given
+* #9371 DOC: Add isnat/positive ufunc to documentation
+* #9372 BUG: Fix error in fromstring function from numpy.core.records...
+* #9373 BUG: ')' is printed at the end pointer of the buffer in numpy.f2py.
+* #9374 DOC: Create NumPy 1.13.1 release notes.
+* #9376 BUG: Prevent hang traversing ufunc userloop linked list
+* #9377 DOC: Use x1 and x2 in the heaviside docstring.
+* #9378 DOC: Add $PARAMS to the isnat docstring
+* #9379 DOC: Update the 1.13.1 release notes
+
+
+Contributors
+============
+A total of 12 people contributed to this release. People with a "+" by their
+names contributed a patch for the first time.
+
+* Andras Deak +
+* Bob Eldering +
+* Charles Harris
+* Daniel Hrisca +
+* Eric Wieser
+* Joshua Leahy +
+* Julian Taylor
+* Michael Seifert
+* Pauli Virtanen
+* Ralf Gommers
+* Roland Kaufmann
+* Warren Weckesser
diff --git a/doc/release/1.14.0-notes.rst b/doc/release/1.14.0-notes.rst
new file mode 100644
index 000000000..6951c6272
--- /dev/null
+++ b/doc/release/1.14.0-notes.rst
@@ -0,0 +1,296 @@
+==========================
+NumPy 1.14.0 Release Notes
+==========================
+
+This release supports Python 2.7 and 3.4 - 3.6.
+
+
+Highlights
+==========
+
+* The `np.einsum` function will use BLAS when possible
+
+
+New functions
+=============
+
+* ``parametrize``: decorator added to numpy.testing
+* ``chebinterpolate``: Interpolate function at Chebyshev points.
+
+
+Deprecations
+============
+
+
+Future Changes
+==============
+
+``np.issubdtype`` will stop downcasting dtype-like arguments
+------------------------------------------------------------
+It would be expected that ``issubdtype(np.float32, 'float64')`` and
+``issubdtype(np.float32, np.float64)`` mean the same thing - however, there
+was an undocumented special case that translated the former into
+``issubdtype(np.float32, np.floating)``, giving the surprising result of True.
+
+This translation now gives a warning explaining what translation is occuring.
+In future, the translation will be disabled, and the first example will be made
+equivalent to the second.
+
+``np.linalg.lstsq`` default for ``rcond`` will be changed
+---------------------------------------------------------
+
+The ``rcond`` parameter to ``np.linalg.lstsq`` will change its default to the
+better value of machine precision times the maximum of the input matrix
+dimensions. A FutureWarning is given if the parameter is not passed explicitly.
+* ``a.flat.__array__()`` will return a writeable copy of ``a`` when ``a`` is
+ non-contiguous. Previously it returned an UPDATEIFCOPY array when ``a`` was
+ writeable. Currently it returns a non-writeable copy. See gh-7054 for a
+ discussion of the issue.
+
+
+
+Build System Changes
+====================
+
+
+Compatibility notes
+===================
+
+``a.flat.__array__()`` returns non-writeable arrays when ``a`` is non-contiguous
+--------------------------------------------------------------------------------
+The intent is that the UPDATEIFCOPY array previously returned when ``a`` was
+non-contiguous will be replaced by a writeable copy in the future. This
+temporary measure is aimed to notify folks who expect the underlying array be
+modified in this situation that that will no longer be the case. The most
+likely places for this to be noticed is when expressions of the form
+``np.asarray(a.flat)`` are used, or when ``a.flat`` is passed as the out
+parameter to a ufunc.
+
+``np.tensordot`` now returns zero array when contracting over 0-length dimension
+--------------------------------------------------------------------------------
+Previously ``np.tensordot`` raised a ValueError when contracting over 0-length
+dimension. Now it returns a zero array, which is consistent with the behaviour
+of ``np.dot`` and ``np.einsum``.
+
+``np.ma`` functions producing ``fill_value``s have changed
+----------------------------------------------------------
+Previously, ``np.ma.default_fill_value`` would return a 0d array, but
+``np.ma.minimum_fill_value`` and ``np.ma.maximum_fill_value`` would return a
+tuple of the fields. Instead, all three methods return a structured ``np.void``
+object, which is what you would already find in the ``.fill_value`` attribute.
+
+Additionally, the dtype guessing now matches that of ``np.array`` - so when
+passing a python scalar ``x``, ``maximum_fill_value(x)`` is always the same as
+``maximum_fill_value(np.array(x))``. Previously ``x = long(1)`` on Python 2
+violated this assumption.
+
+``numpy.testing`` reorganized
+-----------------------------
+This is not expected to cause problems, but possibly something has been left
+out. If you experience an unexpected import problem using ``numpy.testing``
+let us know.
+
+``np.asfarray`` no longer accepts non-dtypes through the ``dtype`` argument
+---------------------------------------------------------------------------
+This previously would accept ``dtype=some_array``, with the implied semantics
+of ``dtype=some_array.dtype``. This was undocumented, unique across the numpy
+functions, and if used would likely correspond to a typo.
+
+1D ``np.linalg.norm`` preserves float input types, even for arbitrary orders
+----------------------------------------------------------------------------
+Previously, this would promote to ``float64`` when arbitrary orders were
+passed, despite not doing so under the simple cases::
+
+ >>> f32 = np.float32([1, 2])
+ >>> np.linalg.norm(f32, 2.0).dtype
+ dtype('float32')
+ >>> np.linalg.norm(f32, 2.0001).dtype
+ dtype('float64') # numpy 1.13
+ dtype('float32') # numpy 1.14
+
+This change affects only ``float32`` and ``float16`` arrays.
+
+``__init__.py`` files added to test directories
+-----------------------------------------------
+This is for pytest compatibility in the case of duplicate test file names in
+the different directories. As a result, ``run_module_suite`` no longer works,
+i.e., ``python <path-to-test-file>`` results in an error.
+
+``MaskedArray.squeeze`` never returns ``np.ma.masked``
+------------------------------------------------------
+``np.squeeze`` is documented as returning a view, but the masked variant would
+sometimes return ``masked``, which is not a view. This has been fixed, so that
+the result is always a view on the original masked array.
+This breaks any code that used ``masked_arr.squeeze() is np.ma.masked``, but
+fixes code that writes to the result of `.squeeze()`.
+
+Renamed first parameter of ``can_cast`` from ``from`` to ``from_``
+------------------------------------------------------------------
+The previous parameter name ``from`` is a reserved keyword in Python, which made
+it difficult to pass the argument by name. This has been fixed by renaming
+the parameter to ``from_``.
+
+
+C API changes
+=============
+
+
+New Features
+============
+
+External ``nose`` plugins are usable by ``numpy.testing.Tester``
+----------------------------------------------------------------
+``numpy.testing.Tester`` is now aware of ``nose`` plugins that are outside the
+``nose`` built-in ones. This allows using, for example, ``nose-timer`` like
+so: ``np.test(extra_argv=['--with-timer', '--timer-top-n', '20'])`` to
+obtain the runtime of the 20 slowest tests. An extra keyword ``timer`` was
+also added to ``Tester.test``, so ``np.test(timer=20)`` will also report the 20
+slowest tests.
+
+``parametrize`` decorator added to ``numpy.testing``
+----------------------------------------------------
+A basic ``parametrize`` decorator is now available in ``numpy.testing``. It is
+intended to allow rewriting yield based tests that have been deprecated in
+pytest so as to facilitate the transition to pytest in the future. The nose
+testing framework has not been supported for several years and looks like
+abandonware.
+
+The new ``parametrize`` decorator does not have the full functionality of the
+one in pytest. It doesn't work for classes, doesn't support nesting, and does
+not substitute variable names. Even so, it should be adequate to rewrite the
+NumPy tests.
+
+``chebinterpolate`` function added to ``numpy.polynomial.chebyshev``
+--------------------------------------------------------------------
+The new ``chebinterpolate`` function interpolates a given function at the
+Chebyshev points of the first kind. A new ``Chebyshev.interpolate`` class
+method adds support for interpolation over arbitrary intervals using the scaled
+and shifted Chebyshev points of the first kind.
+
+
+Improvements
+============
+
+Numerator degrees of freedom in ``random.noncentral_f`` need only be positive.
+------------------------------------------------------------------------------
+Prior to NumPy 1.14.0, the numerator degrees of freedom needed to be > 1, but
+the distribution is valid for values > 0, which is the new requirement.
+
+The GIL is released for all ``np.einsum`` variations
+----------------------------------------------------
+Some specific loop structures which have an accelerated loop version
+did not release the GIL prior to NumPy 1.14.0. This oversight has been
+fixed.
+
+The `np.einsum` function will use BLAS when possible and optimize by default
+----------------------------------------------------------------------------
+The ``np.einsum`` function will now call ``np.tensordot`` when appropriate.
+Because ``np.tensordot`` uses BLAS when possible, that will speed up execution.
+By default, ``np.einsum`` will also attempt optimization as the overhead is
+small relative to the potential improvement in speed.
+
+The ``repr`` of ``np.polynomial`` classes is more explicit
+----------------------------------------------------------
+It now shows the domain and window parameters as keyword arguments to make
+them more clear::
+
+ >>> np.polynomial.Polynomial(range(4))
+ Polynomial([ 0., 1., 2., 3.], domain=[-1, 1], window=[-1, 1])
+
+f2py now handles arrays of dimension 0
+--------------------------------------
+f2py now allows for the allocation of arrays of dimension 0. This allows for
+more consistent handling of corner cases downstream.
+
+``numpy.distutils`` supports using MSVC and mingw64-gfortran together
+---------------------------------------------------------------------
+
+Numpy distutils now supports using MSVC and Mingw64-gfortran compilers
+together. This enables producing Python extension modules on Windows
+containing Fortran code, while retaining compatibility with the
+binaries distributed by Python.org. Not all use cases are supported,
+but most common ways to wrap Fortran for Python are functional.
+
+Compilation in this mode is usually enabled automatically, and can be
+selected via the ``--fcompiler`` and ``--compiler`` options to
+``setup.py``. Moreover, linking Fortran codes to static OpenBLAS is
+supported; by default a gfortran-compatible static archive
+``openblas.a`` is looked for.
+
+``concatenate`` and ``stack`` gained an ``out`` argument
+--------------------------------------------------------
+A preallocated buffer of the desired dtype can now be used for the output of
+these functions.
+
+``np.linalg.pinv`` now works on stacked matrices
+------------------------------------------------
+Previously it was limited to a single 2d array.
+
+``numpy.save`` aligns data to 64 bytes instead of 16
+----------------------------------------------------
+Saving NumPy arrays in the ``npy`` format with ``numpy.save`` inserts
+padding before the array data to align it at 64 bytes. Previously
+this was only 16 bytes (and sometimes less due to a bug in the code
+for version 2). Now the alignment is 64 bytes, which matches the
+widest SIMD instruction set commonly available, and is also the most
+common cache line size. This makes ``npy`` files easier to use in
+programs which open them with ``mmap``, especially on Linux where an
+``mmap`` offset must be a multiple of the page size.
+
+
+Changes
+=======
+
+0d arrays now print their elements like other arrays
+----------------------------------------------------
+0d arrays now use the array2string formatters to print their elements, like
+other arrays. The ``style`` argument of ``array2string`` is now non-functional.
+
+``np.linalg.matrix_rank`` is more efficient for hermitian matrices
+------------------------------------------------------------------
+The keyword argument ``hermitian`` was added to toggle between standard
+SVD-based matrix rank calculation and the more efficient eigenvalue-based
+method for symmetric/hermitian matrices.
+
+Integer scalars are now unaffected by ``np.set_string_function``
+----------------------------------------------------------------
+Previously the str/repr of integer scalars could be controlled by
+``np.set_string_function``, unlike most other numpy scalars. This is no longer
+the case.
+
+Multiple-field indexing/assignment of structured arrays
+-------------------------------------------------------
+The indexing and assignment of structured arrays with multiple fields has
+changed in a number of ways:
+
+First, indexing a structured array with multiple fields (eg,
+``arr[['f1', 'f3']]``) returns a view into the original array instead of a
+copy. The returned view will have extra padding bytes corresponding to
+intervening fields in the original array, unlike the copy in 1.13, which will
+affect code such as ``arr[['f1', 'f3']].view(newdtype)``.
+
+Second, assignment between structured arrays will now occur "by position"
+instead of "by field name". The Nth field of the destination will be set to the
+Nth field of the source regardless of field name, unlike in numpy versions 1.6
+to 1.13 in which fields in the destination array were set to the
+identically-named field in the source array or to 0 if the source did not have
+a field.
+
+Correspondingly, the order of fields in a structured dtypes now matters when
+computing dtype equality. For example with the dtypes
+`x = dtype({'names': ['A', 'B'], 'formats': ['i4', 'f4'], 'offsets': [0, 4]})`
+`y = dtype({'names': ['B', 'A'], 'formats': ['f4', 'i4'], 'offsets': [4, 0]})`
+now `x == y` will return `False`, unlike before. This makes dictionary-based
+dtype specifications like `dtype({'a': ('i4', 0), 'b': ('f4', 4)})` dangerous
+in python < 3.6 since dict key-order is not preserved in those versions.
+
+Assignment from a structured array to a boolean array now raises a ValueError,
+unlike in 1.13 where it always set the destination elements to `True`.
+
+Assignment from structured array with more than one field to a non-structured
+array now raises a ValueError. In 1.13 this copied just the first field of the
+source to the destination.
+
+Using field "titles" in multiple-field indexing is now disallowed, as is
+repeating a field name in a multiple-field index.
+
diff --git a/doc/release/1.3.0-notes.rst b/doc/release/1.3.0-notes.rst
index b5e43155b..246ec5869 100644
--- a/doc/release/1.3.0-notes.rst
+++ b/doc/release/1.3.0-notes.rst
@@ -1,5 +1,6 @@
+=========================
NumPy 1.3.0 Release Notes
-*************************
+=========================
This minor includes numerous bug fixes, official python 2.6 support, and
several new features such as generalized ufuncs.
@@ -8,7 +9,7 @@ Highlights
==========
Python 2.6 support
-~~~~~~~~~~~~~~~~~~
+------------------
Python 2.6 is now supported on all previously supported platforms, including
windows.
@@ -16,7 +17,7 @@ windows.
http://www.python.org/dev/peps/pep-0361/
Generalized ufuncs
-~~~~~~~~~~~~~~~~~~
+------------------
There is a general need for looping over not only functions on scalars but also
over functions on vectors (or arrays), as explained on
@@ -60,7 +61,7 @@ the loop dimensions. The output is given by the loop dimensions plus the
output core dimensions.
Experimental Windows 64 bits support
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+------------------------------------
Numpy can now be built on windows 64 bits (amd64 only, not IA64), with both MS
compilers and mingw-w64 compilers:
@@ -73,7 +74,7 @@ New features
============
Formatting issues
-~~~~~~~~~~~~~~~~~
+-----------------
Float formatting is now handled by numpy instead of the C runtime: this enables
locale independent formatting, more robust fromstring and related methods.
@@ -82,7 +83,7 @@ IND/NaN, etc...), and more consistent with recent python formatting work (in
2.6 and later).
Nan handling in max/min
-~~~~~~~~~~~~~~~~~~~~~~~
+-----------------------
The maximum/minimum ufuncs now reliably propagate nans. If one of the
arguments is a nan, then nan is returned. This affects np.min/np.max, amin/amax
@@ -90,13 +91,13 @@ and the array methods max/min. New ufuncs fmax and fmin have been added to deal
with non-propagating nans.
Nan handling in sign
-~~~~~~~~~~~~~~~~~~~~
+--------------------
The ufunc sign now returns nan for the sign of anan.
New ufuncs
-~~~~~~~~~~
+----------
#. fmax - same as maximum for integer types and non-nan floats. Returns the
non-nan argument if one argument is nan and returns nan if both arguments
@@ -115,7 +116,7 @@ New ufuncs
logarithm of the result.
Masked arrays
-~~~~~~~~~~~~~
+-------------
Several new features and bug fixes, including:
@@ -128,7 +129,7 @@ Several new features and bug fixes, including:
* doc update
gfortran support on windows
-~~~~~~~~~~~~~~~~~~~~~~~~~~~
+---------------------------
Gfortran can now be used as a fortran compiler for numpy on windows, even when
the C compiler is Visual Studio (VS 2005 and above; VS 2003 will NOT work).
@@ -137,7 +138,7 @@ does). It is unclear whether it will be possible to use gfortran and visual
studio at all on x64.
Arch option for windows binary
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+------------------------------
Automatic arch detection can now be bypassed from the command line for the superpack installed:
@@ -150,7 +151,7 @@ Deprecated features
===================
Histogram
-~~~~~~~~~
+---------
The semantics of histogram has been modified to fix long-standing issues
with outliers handling. The main changes concern
@@ -172,14 +173,14 @@ New C API
=========
Multiarray API
-~~~~~~~~~~~~~~
+--------------
The following functions have been added to the multiarray C API:
* PyArray_GetEndianness: to get runtime endianness
Ufunc API
-~~~~~~~~~
+---------
The following functions have been added to the ufunc API:
@@ -188,7 +189,7 @@ The following functions have been added to the ufunc API:
New defines
-~~~~~~~~~~~
+-----------
New public C defines are available for ARCH specific code through numpy/npy_cpu.h:
@@ -212,7 +213,7 @@ Those provide portable alternatives to glibc endian.h macros for platforms
without it.
Portable NAN, INFINITY, etc...
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+------------------------------
npy_math.h now makes available several portable macro to get NAN, INFINITY:
@@ -228,7 +229,7 @@ Internal changes
================
numpy.core math configuration revamp
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+------------------------------------
This should make the porting to new platforms easier, and more robust. In
particular, the configuration stage does not need to execute any code on the
@@ -237,19 +238,19 @@ target platform, which is a first step toward cross-compilation.
http://projects.scipy.org/numpy/browser/trunk/doc/neps/math_config_clean.txt
umath refactor
-~~~~~~~~~~~~~~
+--------------
A lot of code cleanup for umath/ufunc code (charris).
Improvements to build warnings
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+------------------------------
Numpy can now build with -W -Wall without warnings
http://projects.scipy.org/numpy/browser/trunk/doc/neps/warnfix.txt
Separate core math library
-~~~~~~~~~~~~~~~~~~~~~~~~~~
+--------------------------
The core math functions (sin, cos, etc... for basic C types) have been put into
a separate library; it acts as a compatibility layer, to support most C99 maths
@@ -262,7 +263,7 @@ prefix (npy_cos vs cos).
The core library will be made available to any extension in 1.4.0.
CPU arch detection
-~~~~~~~~~~~~~~~~~~
+------------------
npy_cpu.h defines numpy specific CPU defines, such as NPY_CPU_X86, etc...
Those are portable across OS and toolchains, and set up when the header is
diff --git a/doc/release/1.4.0-notes.rst b/doc/release/1.4.0-notes.rst
index 9b53c1261..9480a054e 100644
--- a/doc/release/1.4.0-notes.rst
+++ b/doc/release/1.4.0-notes.rst
@@ -1,5 +1,6 @@
+=========================
NumPy 1.4.0 Release Notes
-*************************
+=========================
This minor includes numerous bug fixes, as well as a few new features. It
is backward compatible with 1.3.0 release.
@@ -21,7 +22,7 @@ New features
============
Extended array wrapping mechanism for ufuncs
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+--------------------------------------------
An __array_prepare__ method has been added to ndarray to provide subclasses
greater flexibility to interact with ufuncs and ufunc-like functions. ndarray
@@ -34,7 +35,7 @@ before computing the results and populating it. This way, checks can be made
and errors raised before operations which may modify data in place.
Automatic detection of forward incompatibilities
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+------------------------------------------------
Previously, if an extension was built against a version N of NumPy, and used on
a system with NumPy M < N, the import_array was successful, which could cause
@@ -43,7 +44,7 @@ NumPy 1.4.0, this will cause a failure in import_array, so the error will be
caught early on.
New iterators
-~~~~~~~~~~~~~
+-------------
A new neighborhood iterator has been added to the C API. It can be used to
iterate over the items in a neighborhood of an array, and can handle boundaries
@@ -51,7 +52,7 @@ conditions automatically. Zero and one padding are available, as well as
arbitrary constant value, mirror and circular padding.
New polynomial support
-~~~~~~~~~~~~~~~~~~~~~~
+----------------------
New modules chebyshev and polynomial have been added. The new polynomial module
is not compatible with the current polynomial support in numpy, but is much
@@ -70,7 +71,7 @@ they must be explicitly brought in with an "import numpy.polynomial"
statement.
New C API
-~~~~~~~~~
+---------
The following C functions have been added to the C API:
@@ -85,7 +86,7 @@ The following C functions have been added to the C API:
find some examples in the multiarray_test.c.src file in numpy.core.
New ufuncs
-~~~~~~~~~~
+----------
The following ufuncs have been added to the C API:
@@ -95,7 +96,7 @@ The following ufuncs have been added to the C API:
first argument toward the second argument.
New defines
-~~~~~~~~~~~
+-----------
The alpha processor is now defined and available in numpy/npy_cpu.h. The
failed detection of the PARISC processor has been fixed. The defines are:
@@ -104,7 +105,7 @@ failed detection of the PARISC processor has been fixed. The defines are:
#. NPY_CPU_ALPHA: Alpha
Testing
-~~~~~~~
+-------
#. deprecated decorator: this decorator may be used to avoid cluttering
testing output while testing DeprecationWarning is effectively raised by
@@ -120,7 +121,7 @@ Testing
warning of the appropriate class, without altering the warning state.
Reusing npymath
-~~~~~~~~~~~~~~~
+---------------
In 1.3.0, we started putting portable C math routines in npymath library, so
that people can use those to write portable extensions. Unfortunately, it was
@@ -129,7 +130,7 @@ added to numpy.distutils so that 3rd party can reuse this library. See coremath
documentation for more information.
Improved set operations
-~~~~~~~~~~~~~~~~~~~~~~~
+-----------------------
In previous versions of NumPy some set functions (intersect1d,
setxor1d, setdiff1d and setmember1d) could return incorrect results if
@@ -196,21 +197,21 @@ Internal changes
================
Use C99 complex functions when available
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+----------------------------------------
The numpy complex types are now guaranteed to be ABI compatible with C99
complex type, if available on the platform. Moreover, the complex ufunc now use
the platform C99 functions instead of our own.
split multiarray and umath source code
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+--------------------------------------
The source code of multiarray and umath has been split into separate logic
compilation units. This should make the source code more amenable for
newcomers.
Separate compilation
-~~~~~~~~~~~~~~~~~~~~
+--------------------
By default, every file of multiarray (and umath) is merged into one for
compilation as was the case before, but if NPY_SEPARATE_COMPILATION env
@@ -219,7 +220,7 @@ each file is enabled. This makes the compile/debug cycle much faster when
working on core numpy.
Separate core math library
-~~~~~~~~~~~~~~~~~~~~~~~~~~
+--------------------------
New functions which have been added:
diff --git a/doc/release/1.5.0-notes.rst b/doc/release/1.5.0-notes.rst
index e9e36f0de..a2184ab13 100644
--- a/doc/release/1.5.0-notes.rst
+++ b/doc/release/1.5.0-notes.rst
@@ -1,12 +1,13 @@
+=========================
NumPy 1.5.0 Release Notes
-*************************
+=========================
Highlights
==========
Python 3 compatibility
-~~~~~~~~~~~~~~~~~~~~~~
+----------------------
This is the first NumPy release which is compatible with Python 3. Support for
Python 3 and Python 2 is done from a single code base. Extensive notes on
@@ -20,7 +21,7 @@ at `<http://bitbucket.org/jpellerin/nose3/>`_ however.
Porting of SciPy to Python 3 is expected to be completed soon.
:pep:`3118` compatibility
-~~~~~~~~~~~~~~~~~~~~~~~~~
+-------------------------
The new buffer protocol described by PEP 3118 is fully supported in this
version of Numpy. On Python versions >= 2.6 Numpy arrays expose the buffer
@@ -32,7 +33,7 @@ New features
============
Warning on casting complex to real
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+----------------------------------
Numpy now emits a `numpy.ComplexWarning` when a complex number is cast
into a real number. For example:
@@ -49,7 +50,7 @@ turned off in the standard way:
>>> warnings.simplefilter("ignore", np.ComplexWarning)
Dot method for ndarrays
-~~~~~~~~~~~~~~~~~~~~~~~
+-----------------------
Ndarrays now have the dot product also as a method, which allows writing
chains of matrix products as
@@ -61,7 +62,7 @@ instead of the longer alternative
>>> np.dot(a, np.dot(b, c))
linalg.slogdet function
-~~~~~~~~~~~~~~~~~~~~~~~
+-----------------------
The slogdet function returns the sign and logarithm of the determinant
of a matrix. Because the determinant may involve the product of many
@@ -69,7 +70,7 @@ small/large values, the result is often more accurate than that obtained
by simple multiplication.
new header
-~~~~~~~~~~
+----------
The new header file ndarraytypes.h contains the symbols from
ndarrayobject.h that do not depend on the PY_ARRAY_UNIQUE_SYMBOL and
@@ -84,7 +85,7 @@ Changes
=======
polynomial.polynomial
-~~~~~~~~~~~~~~~~~~~~~
+---------------------
* The polyint and polyder functions now check that the specified number
integrations or derivations is a non-negative integer. The number 0 is
@@ -100,7 +101,7 @@ polynomial.polynomial
* The polymulx function was added.
polynomial.chebyshev
-~~~~~~~~~~~~~~~~~~~~
+--------------------
* The chebint and chebder functions now check that the specified number
integrations or derivations is a non-negative integer. The number 0 is
@@ -118,13 +119,13 @@ polynomial.chebyshev
histogram
-~~~~~~~~~
+---------
After a two years transition period, the old behavior of the histogram function
has been phased out, and the "new" keyword has been removed.
correlate
-~~~~~~~~~
+---------
The old behavior of correlate was deprecated in 1.4.0, the new behavior (the
usual definition for cross-correlation) is now the default.
diff --git a/doc/release/1.6.0-notes.rst b/doc/release/1.6.0-notes.rst
index e2c71e35c..c5f53a0eb 100644
--- a/doc/release/1.6.0-notes.rst
+++ b/doc/release/1.6.0-notes.rst
@@ -1,5 +1,6 @@
+=========================
NumPy 1.6.0 Release Notes
-*************************
+=========================
This release includes several new features as well as numerous bug fixes and
improved documentation. It is backward compatible with the 1.5.0 release, and
@@ -20,7 +21,7 @@ New features
============
New 16-bit floating point type
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+------------------------------
This release adds support for the IEEE 754-2008 binary16 format, available as
the data type ``numpy.half``. Within Python, the type behaves similarly to
@@ -29,7 +30,7 @@ half-float API.
New iterator
-~~~~~~~~~~~~
+------------
A new iterator has been added, replacing the functionality of the
existing iterator and multi-iterator with a single object and API.
@@ -42,7 +43,7 @@ iterator.
Legendre, Laguerre, Hermite, HermiteE polynomials in ``numpy.polynomial``
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+-------------------------------------------------------------------------
Extend the number of polynomials available in the polynomial package. In
addition, a new ``window`` attribute has been added to the classes in
@@ -53,7 +54,7 @@ of values without playing unnatural tricks with the domain.
Fortran assumed shape array and size function support in ``numpy.f2py``
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+-----------------------------------------------------------------------
F2py now supports wrapping Fortran 90 routines that use assumed shape
arrays. Before such routines could be called from Python but the
@@ -67,7 +68,7 @@ that use two argument ``size`` function in dimension specifications.
Other new functions
-~~~~~~~~~~~~~~~~~~~
+-------------------
``numpy.ravel_multi_index`` : Converts a multi-index tuple into
an array of flat indices, applying boundary modes to the indices.
@@ -90,14 +91,14 @@ Changes
=======
``default error handling``
-~~~~~~~~~~~~~~~~~~~~~~~~~~
+--------------------------
The default error handling has been change from ``print`` to ``warn`` for
all except for ``underflow``, which remains as ``ignore``.
``numpy.distutils``
-~~~~~~~~~~~~~~~~~~~
+-------------------
Several new compilers are supported for building Numpy: the Portland Group
Fortran compiler on OS X, the PathScale compiler suite and the 64-bit Intel C
@@ -105,7 +106,7 @@ compiler on Linux.
``numpy.testing``
-~~~~~~~~~~~~~~~~~
+-----------------
The testing framework gained ``numpy.testing.assert_allclose``, which provides
a more convenient way to compare floating point arrays than
@@ -113,7 +114,7 @@ a more convenient way to compare floating point arrays than
``C API``
-~~~~~~~~~
+---------
In addition to the APIs for the new iterator and half data type, a number
of other additions have been made to the C API. The type promotion
@@ -137,7 +138,7 @@ Removed features
================
``numpy.fft``
-~~~~~~~~~~~~~
+-------------
The functions `refft`, `refft2`, `refftn`, `irefft`, `irefft2`, `irefftn`,
which were aliases for the same functions without the 'e' in the name, were
@@ -145,21 +146,21 @@ removed.
``numpy.memmap``
-~~~~~~~~~~~~~~~~
+----------------
The `sync()` and `close()` methods of memmap were removed. Use `flush()` and
"del memmap" instead.
``numpy.lib``
-~~~~~~~~~~~~~
+-------------
The deprecated functions ``numpy.unique1d``, ``numpy.setmember1d``,
``numpy.intersect1d_nu`` and ``numpy.lib.ufunclike.log2`` were removed.
``numpy.ma``
-~~~~~~~~~~~~
+------------
Several deprecated items were removed from the ``numpy.ma`` module::
@@ -170,7 +171,7 @@ Several deprecated items were removed from the ``numpy.ma`` module::
``numpy.distutils``
-~~~~~~~~~~~~~~~~~~~
+-------------------
The ``numpy.get_numpy_include`` function was removed, use ``numpy.get_include``
instead.
diff --git a/doc/release/1.6.1-notes.rst b/doc/release/1.6.1-notes.rst
index b5e97b97e..05fcb4ab9 100644
--- a/doc/release/1.6.1-notes.rst
+++ b/doc/release/1.6.1-notes.rst
@@ -1,5 +1,6 @@
+=========================
NumPy 1.6.1 Release Notes
-*************************
+=========================
This is a bugfix only release in the 1.6.x series.
diff --git a/doc/release/1.6.2-notes.rst b/doc/release/1.6.2-notes.rst
index d73d80981..8f0b06f98 100644
--- a/doc/release/1.6.2-notes.rst
+++ b/doc/release/1.6.2-notes.rst
@@ -1,5 +1,6 @@
+=========================
NumPy 1.6.2 Release Notes
-*************************
+=========================
This is a bugfix release in the 1.6.x series. Due to the delay of the NumPy
1.7.0 release, this release contains far more fixes than a regular NumPy bugfix
@@ -9,7 +10,7 @@ Issues fixed
============
``numpy.core``
-~~~~~~~~~~~~~~
+--------------
* #2063: make unique() return consistent index
* #1138: allow creating arrays from empty buffers or empty slices
@@ -31,7 +32,7 @@ Issues fixed
``numpy.lib``
-~~~~~~~~~~~~~
+-------------
* #2048: break reference cycle in NpzFile
* #1573: savetxt() now handles complex arrays
@@ -44,7 +45,7 @@ Issues fixed
``numpy.distutils``
-~~~~~~~~~~~~~~~~~~~
+-------------------
* #1261: change compile flag on AIX from -O5 to -O3
* #1377: update HP compiler flags
@@ -60,7 +61,7 @@ Issues fixed
``numpy.random``
-~~~~~~~~~~~~~~~~
+----------------
* BUG: Use npy_intp instead of long in mtrand
@@ -68,7 +69,7 @@ Changes
=======
``numpy.f2py``
-~~~~~~~~~~~~~~
+--------------
* ENH: Introduce new options extra_f77_compiler_args and extra_f90_compiler_args
* BLD: Improve reporting of fcompiler value
@@ -76,7 +77,7 @@ Changes
``numpy.poly``
-~~~~~~~~~~~~~~
+--------------
* ENH: Add some tests for polynomial printing
* ENH: Add companion matrix functions
diff --git a/doc/release/1.7.0-notes.rst b/doc/release/1.7.0-notes.rst
index 754e282b0..72aab4d4f 100644
--- a/doc/release/1.7.0-notes.rst
+++ b/doc/release/1.7.0-notes.rst
@@ -1,5 +1,6 @@
+=========================
NumPy 1.7.0 Release Notes
-*************************
+=========================
This release includes several new features as well as numerous bug fixes and
refactorings. It supports Python 2.4 - 2.7 and 3.1 - 3.3 and is the last
@@ -66,7 +67,7 @@ New features
============
Reduction UFuncs Generalize axis= Parameter
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+-------------------------------------------
Any ufunc.reduce function call, as well as other reductions like sum, prod,
any, all, max and min support the ability to choose a subset of the axes to
@@ -75,7 +76,7 @@ axis=# to pick a single axis. Now, one can also say axis=(#,#) to pick a
list of axes for reduction.
Reduction UFuncs New keepdims= Parameter
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+----------------------------------------
There is a new keepdims= parameter, which if set to True, doesn't throw
away the reduction axes but instead sets them to have size one. When this
@@ -83,7 +84,7 @@ option is set, the reduction result will broadcast correctly to the
original operand which was reduced.
Datetime support
-~~~~~~~~~~~~~~~~
+----------------
.. note:: The datetime API is *experimental* in 1.7.0, and may undergo changes
in future versions of NumPy.
@@ -104,26 +105,26 @@ The notes in `doc/source/reference/arrays.datetime.rst <https://github.com/numpy
consulted for more details.
Custom formatter for printing arrays
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+------------------------------------
See the new ``formatter`` parameter of the ``numpy.set_printoptions``
function.
New function numpy.random.choice
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+--------------------------------
A generic sampling function has been added which will generate samples from
a given array-like. The samples can be with or without replacement, and
with uniform or given non-uniform probabilities.
New function isclose
-~~~~~~~~~~~~~~~~~~~~
+--------------------
Returns a boolean array where two arrays are element-wise equal within a
tolerance. Both relative and absolute tolerance can be specified.
Preliminary multi-dimensional support in the polynomial package
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+---------------------------------------------------------------
Axis keywords have been added to the integration and differentiation
functions and a tensor keyword was added to the evaluation functions.
@@ -134,7 +135,7 @@ pseudo-Vandermonde matrices that can be used for fitting.
Ability to pad rank-n arrays
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+----------------------------
A pad module containing functions for padding n-dimensional arrays has been
added. The various private padding functions are exposed as options to a
@@ -148,18 +149,18 @@ Current modes are ``constant``, ``edge``, ``linear_ramp``, ``maximum``,
New argument to searchsorted
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+----------------------------
The function searchsorted now accepts a 'sorter' argument that is a
permutation array that sorts the array to search.
Build system
-~~~~~~~~~~~~
+------------
Added experimental support for the AArch64 architecture.
C API
-~~~~~
+-----
New function ``PyArray_RequireWriteable`` provides a consistent interface
for checking array writeability -- any C code which works with arrays whose
@@ -172,7 +173,7 @@ Changes
=======
General
-~~~~~~~
+-------
The function np.concatenate tries to match the layout of its input arrays.
Previously, the layout did not follow any particular reason, and depended
@@ -213,7 +214,7 @@ and so the collapsing process only continues so long as it encounters other
``b`` is the last entry in that list which is a ``matrix`` object.
Casting Rules
-~~~~~~~~~~~~~
+-------------
Casting rules have undergone some changes in corner cases, due to the
NA-related work. In particular for combinations of scalar+scalar:
@@ -255,7 +256,7 @@ Deprecations
============
General
-~~~~~~~
+-------
Specifying a custom string formatter with a `_format` array attribute is
deprecated. The new `formatter` keyword in ``numpy.set_printoptions`` or
@@ -268,7 +269,7 @@ Versions of numpy < 1.7.0 ignored axis argument value for 1D arrays. We
allow this for now, but in due course we will raise an error.
C-API
-~~~~~
+-----
Direct access to the fields of PyArrayObject* has been deprecated. Direct
access has been recommended against for many releases. Expect similar
diff --git a/doc/release/1.7.1-notes.rst b/doc/release/1.7.1-notes.rst
index 7ff533d3a..04216b0df 100644
--- a/doc/release/1.7.1-notes.rst
+++ b/doc/release/1.7.1-notes.rst
@@ -1,5 +1,6 @@
+=========================
NumPy 1.7.1 Release Notes
-*************************
+=========================
This is a bugfix only release in the 1.7.x series.
It supports Python 2.4 - 2.7 and 3.1 - 3.3 and is the last series that
diff --git a/doc/release/1.7.2-notes.rst b/doc/release/1.7.2-notes.rst
index 87109cdd3..b0951bd72 100644
--- a/doc/release/1.7.2-notes.rst
+++ b/doc/release/1.7.2-notes.rst
@@ -1,5 +1,6 @@
+=========================
NumPy 1.7.2 Release Notes
-*************************
+=========================
This is a bugfix only release in the 1.7.x series.
It supports Python 2.4 - 2.7 and 3.1 - 3.3 and is the last series that
diff --git a/doc/release/1.8.0-notes.rst b/doc/release/1.8.0-notes.rst
index 5eed57129..80c39f8bc 100644
--- a/doc/release/1.8.0-notes.rst
+++ b/doc/release/1.8.0-notes.rst
@@ -1,5 +1,6 @@
+=========================
NumPy 1.8.0 Release Notes
-*************************
+=========================
This release supports Python 2.6 -2.7 and 3.2 - 3.3.
@@ -79,7 +80,7 @@ the index in all-NaN slices. Previously the functions would raise a ValueError
for array returns and NaN for scalar returns.
NPY_RELAXED_STRIDES_CHECKING
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+----------------------------
There is a new compile time environment variable
``NPY_RELAXED_STRIDES_CHECKING``. If this variable is set to 1, then
numpy will consider more arrays to be C- or F-contiguous -- for
@@ -112,7 +113,7 @@ For more information check the "Internal memory layout of an ndarray"
section in the documentation.
Binary operations with non-arrays as second argument
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+----------------------------------------------------
Binary operations of the form ``<array-or-subclass> * <non-array-subclass>``
where ``<non-array-subclass>`` declares an ``__array_priority__`` higher than
that of ``<array-or-subclass>`` will now unconditionally return
@@ -124,12 +125,12 @@ attempted. (`bug <https://github.com/numpy/numpy/issues/3375>`_, `pull request
<https://github.com/numpy/numpy/pull/3501>`_)
Function `median` used with `overwrite_input` only partially sorts array
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+------------------------------------------------------------------------
If `median` is used with `overwrite_input` option the input array will now only
be partially sorted instead of fully sorted.
Fix to financial.npv
-~~~~~~~~~~~~~~~~~~~~
+--------------------
The npv function had a bug. Contrary to what the documentation stated, it
summed from indexes ``1`` to ``M`` instead of from ``0`` to ``M - 1``. The
fix changes the returned value. The mirr function called the npv function,
@@ -137,7 +138,7 @@ but worked around the problem, so that was also fixed and the return value
of the mirr function remains unchanged.
Runtime warnings when comparing NaN numbers
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+-------------------------------------------
Comparing ``NaN`` floating point numbers now raises the ``invalid`` runtime
warning. If a ``NaN`` is expected the warning can be ignored using np.errstate.
E.g.::
@@ -151,7 +152,7 @@ New Features
Support for linear algebra on stacked arrays
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+--------------------------------------------
The gufunc machinery is now used for np.linalg, allowing operations on
stacked arrays and vectors. For example::
@@ -170,7 +171,7 @@ stacked arrays and vectors. For example::
[ 0., 1.]]])
In place fancy indexing for ufuncs
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+----------------------------------
The function ``at`` has been added to ufunc objects to allow in place
ufuncs with no buffering when fancy indexing is used. For example, the
following will increment the first and second items in the array, and will
@@ -181,7 +182,7 @@ but that does not work as the incremented value of ``arr[2]`` is simply copied
into the third slot in ``arr`` twice, not incremented twice.
New functions `partition` and `argpartition`
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+--------------------------------------------
New functions to partially sort arrays via a selection algorithm.
A ``partition`` by index ``k`` moves the ``k`` smallest element to the front of
@@ -197,30 +198,30 @@ percentiles of samples.
``O(n log(n))``.
New functions `nanmean`, `nanvar` and `nanstd`
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+----------------------------------------------
New nan aware statistical functions are added. In these functions the
results are what would be obtained if nan values were omitted from all
computations.
New functions `full` and `full_like`
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+------------------------------------
New convenience functions to create arrays filled with a specific value;
complementary to the existing `zeros` and `zeros_like` functions.
IO compatibility with large files
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+---------------------------------
Large NPZ files >2GB can be loaded on 64-bit systems.
Building against OpenBLAS
-~~~~~~~~~~~~~~~~~~~~~~~~~
+-------------------------
It is now possible to build numpy against OpenBLAS by editing site.cfg.
New constant
-~~~~~~~~~~~~
+------------
Euler's constant is now exposed in numpy as euler_gamma.
New modes for qr
-~~~~~~~~~~~~~~~~
+----------------
New modes 'complete', 'reduced', and 'raw' have been added to the qr
factorization and the old 'full' and 'economic' modes are deprecated.
The 'reduced' mode replaces the old 'full' mode and is the default as was
@@ -236,12 +237,12 @@ deprecated, there isn't much use for it and it isn't any more efficient
than the 'raw' mode.
New `invert` argument to `in1d`
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+-------------------------------
The function `in1d` now accepts a `invert` argument which, when `True`,
causes the returned array to be inverted.
Advanced indexing using `np.newaxis`
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+------------------------------------
It is now possible to use `np.newaxis`/`None` together with index
arrays instead of only in simple indices. This means that
``array[np.newaxis, [0, 1]]`` will now work as expected and select the first
@@ -249,7 +250,7 @@ two rows while prepending a new axis to the array.
C-API
-~~~~~
+-----
New ufuncs can now be registered with builtin input types and a custom
output type. Before this change, NumPy wouldn't be able to find the right
ufunc loop function when the ufunc was called from Python, because the ufunc
@@ -258,7 +259,7 @@ Now the correct ufunc loop is found, as long as the user provides an output
argument with the correct output type.
runtests.py
-~~~~~~~~~~~
+-----------
A simple test runner script ``runtests.py`` was added. It also builds Numpy via
``setup.py build`` and can be used to run tests easily during development.
@@ -267,24 +268,24 @@ Improvements
============
IO performance improvements
-~~~~~~~~~~~~~~~~~~~~~~~~~~~
+---------------------------
Performance in reading large files was improved by chunking (see also IO compatibility).
Performance improvements to `pad`
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+---------------------------------
The `pad` function has a new implementation, greatly improving performance for
all inputs except `mode=<function>` (retained for backwards compatibility).
Scaling with dimensionality is dramatically improved for rank >= 4.
Performance improvements to `isnan`, `isinf`, `isfinite` and `byteswap`
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+-----------------------------------------------------------------------
`isnan`, `isinf`, `isfinite` and `byteswap` have been improved to take
advantage of compiler builtins to avoid expensive calls to libc.
This improves performance of these operations by about a factor of two on gnu
libc systems.
Performance improvements via SSE2 vectorization
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+-----------------------------------------------
Several functions have been optimized to make use of SSE2 CPU SIMD instructions.
* Float32 and float64:
@@ -307,7 +308,7 @@ capable CPU it must be enabled by passing the appropriate flag to the CFLAGS
build variable (-msse2 with gcc).
Performance improvements to `median`
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+------------------------------------
`median` is now implemented in terms of `partition` instead of `sort` which
reduces its time complexity from O(n log(n)) to O(n).
If used with the `overwrite_input` option the array will now only be partially
@@ -315,7 +316,7 @@ sorted instead of fully sorted.
Overrideable operand flags in ufunc C-API
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+-----------------------------------------
When creating a ufunc, the default ufunc operand flags can be overridden
via the new op_flags attribute of the ufunc object. For example, to set
the operand flag for the first input to read/write:
@@ -335,7 +336,7 @@ Changes
General
-~~~~~~~
+-------
The function np.take now allows 0-d arrays as indices.
The separate compilation mode is now enabled by default.
@@ -358,7 +359,7 @@ Several changes to np.insert and np.delete:
Padded regions from np.pad are now correctly rounded, not truncated.
C-API Array Additions
-~~~~~~~~~~~~~~~~~~~~~
+---------------------
Four new functions have been added to the array C-API.
* PyArray_Partition
@@ -367,14 +368,14 @@ Four new functions have been added to the array C-API.
* PyDataMem_NEW_ZEROED
C-API Ufunc Additions
-~~~~~~~~~~~~~~~~~~~~~
+---------------------
One new function has been added to the ufunc C-API that allows to register
an inner loop for user types using the descr.
* PyUFunc_RegisterLoopForDescr
C-API Developer Improvements
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+----------------------------
The ``PyArray_Type`` instance creation function ``tp_new`` now
uses ``tp_basicsize`` to determine how much memory to allocate.
In previous releases only ``sizeof(PyArrayObject)`` bytes of
@@ -387,7 +388,7 @@ Deprecations
The 'full' and 'economic' modes of qr factorization are deprecated.
General
-~~~~~~~
+-------
The use of non-integer for indices and most integer arguments has been
deprecated. Previously float indices and function arguments such as axes or
shapes were truncated to integers without warning. For example
diff --git a/doc/release/1.8.1-notes.rst b/doc/release/1.8.1-notes.rst
index c26a03eff..ea34e75ac 100644
--- a/doc/release/1.8.1-notes.rst
+++ b/doc/release/1.8.1-notes.rst
@@ -1,5 +1,6 @@
+=========================
NumPy 1.8.1 Release Notes
-*************************
+=========================
This is a bugfix only release in the 1.8.x series.
@@ -59,7 +60,7 @@ Changes
=======
NDIter
-~~~~~~
+------
When ``NpyIter_RemoveAxis`` is now called, the iterator range will be reset.
When a multi index is being tracked and an iterator is not buffered, it is
@@ -75,7 +76,7 @@ cases the arrays being iterated are as large as the iterator so that such
a problem cannot occur.
Optional reduced verbosity for np.distutils
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+-------------------------------------------
Set ``numpy.distutils.system_info.system_info.verbosity = 0`` and then
calls to ``numpy.distutils.system_info.get_info('blas_opt')`` will not
print anything on the output. This is mostly for other packages using
@@ -85,7 +86,7 @@ Deprecations
============
C-API
-~~~~~
+-----
The utility function npy_PyFile_Dup and npy_PyFile_DupClose are broken by the
internal buffering python 3 applies to its file objects.
diff --git a/doc/release/1.8.2-notes.rst b/doc/release/1.8.2-notes.rst
index c21f81a27..71e549526 100644
--- a/doc/release/1.8.2-notes.rst
+++ b/doc/release/1.8.2-notes.rst
@@ -1,5 +1,6 @@
+=========================
NumPy 1.8.2 Release Notes
-*************************
+=========================
This is a bugfix only release in the 1.8.x series.
diff --git a/doc/release/1.9.0-notes.rst b/doc/release/1.9.0-notes.rst
index 1ffbf8d4b..7ea29e354 100644
--- a/doc/release/1.9.0-notes.rst
+++ b/doc/release/1.9.0-notes.rst
@@ -1,5 +1,6 @@
+=========================
NumPy 1.9.0 Release Notes
-*************************
+=========================
This release supports Python 2.6 - 2.7 and 3.2 - 3.4.
@@ -41,13 +42,13 @@ Compatibility notes
===================
The diagonal and diag functions return readonly views.
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+------------------------------------------------------
In NumPy 1.8, the diagonal and diag functions returned readonly copies, in
NumPy 1.9 they return readonly views, and in 1.10 they will return writeable
views.
Special scalar float values don't cause upcast to double anymore
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+----------------------------------------------------------------
In previous numpy versions operations involving floating point scalars
containing special values ``NaN``, ``Inf`` and ``-Inf`` caused the result
type to be at least ``float64``. As the special values can be represented
@@ -62,7 +63,7 @@ now remains ``float32`` instead of being cast to ``float64``.
Operations involving non-special values have not been changed.
Percentile output changes
-~~~~~~~~~~~~~~~~~~~~~~~~~
+-------------------------
If given more than one percentile to compute numpy.percentile returns an
array instead of a list. A single percentile still returns a scalar. The
array is equivalent to converting the list returned in older versions
@@ -72,12 +73,12 @@ If the ``overwrite_input`` option is used the input is only partially
instead of fully sorted.
ndarray.tofile exception type
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+-----------------------------
All ``tofile`` exceptions are now ``IOError``, some were previously
``ValueError``.
Invalid fill value exceptions
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+-----------------------------
Two changes to numpy.ma.core._check_fill_value:
* When the fill value is a string and the array type is not one of
@@ -87,7 +88,7 @@ Two changes to numpy.ma.core._check_fill_value:
of OverflowError.
Polynomial Classes no longer derived from PolyBase
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+--------------------------------------------------
This may cause problems with folks who depended on the polynomial classes
being derived from PolyBase. They are now all derived from the abstract
base class ABCPolyBase. Strictly speaking, there should be a deprecation
@@ -95,7 +96,7 @@ involved, but no external code making use of the old baseclass could be
found.
Using numpy.random.binomial may change the RNG state vs. numpy < 1.9
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+--------------------------------------------------------------------
A bug in one of the algorithms to generate a binomial random variate has
been fixed. This change will likely alter the number of random draws
performed, and hence the sequence location will be different after a
@@ -103,7 +104,7 @@ call to distribution.c::rk_binomial_btpe. Any tests which rely on the RNG
being in a known state should be checked and/or updated as a result.
Random seed enforced to be a 32 bit unsigned integer
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+----------------------------------------------------
``np.random.seed`` and ``np.random.RandomState`` now throw a ``ValueError``
if the seed cannot safely be converted to 32 bit unsigned integers.
Applications that now fail can be fixed by masking the higher 32 bit values to
@@ -111,20 +112,20 @@ zero: ``seed = seed & 0xFFFFFFFF``. This is what is done silently in older
versions so the random stream remains the same.
Argmin and argmax out argument
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+------------------------------
The ``out`` argument to ``np.argmin`` and ``np.argmax`` and their
equivalent C-API functions is now checked to match the desired output shape
exactly. If the check fails a ``ValueError`` instead of ``TypeError`` is
raised.
Einsum
-~~~~~~
+------
Remove unnecessary broadcasting notation restrictions.
``np.einsum('ijk,j->ijk', A, B)`` can also be written as
``np.einsum('ij...,j->ij...', A, B)`` (ellipsis is no longer required on 'j')
Indexing
-~~~~~~~~
+--------
The NumPy indexing has seen a complete rewrite in this version. This makes
most advanced integer indexing operations much faster and should have no
@@ -177,12 +178,12 @@ introduced in advanced indexing operations:
* Indexing with more then one ellipsis (``...``) is deprecated.
Non-integer reduction axis indexes are deprecated
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+-------------------------------------------------
Non-integer axis indexes to reduction ufuncs like `add.reduce` or `sum` are
deprecated.
``promote_types`` and string dtype
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+----------------------------------
``promote_types`` function now returns a valid string length when given an
integer or float dtype as one argument and a string dtype as another
argument. Previously it always returned the input string dtype, even if it
@@ -190,7 +191,7 @@ wasn't long enough to store the max integer/float value converted to a
string.
``can_cast`` and string dtype
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+-----------------------------
``can_cast`` function now returns False in "safe" casting mode for
integer/float dtype and string dtype if the string dtype length is not long
enough to store the max integer/float value converted to a string.
@@ -198,37 +199,37 @@ Previously ``can_cast`` in "safe" mode returned True for integer/float
dtype and a string dtype of any length.
astype and string dtype
-~~~~~~~~~~~~~~~~~~~~~~~
+-----------------------
The ``astype`` method now returns an error if the string dtype to cast to
is not long enough in "safe" casting mode to hold the max value of
integer/float array that is being casted. Previously the casting was
allowed even if the result was truncated.
`npyio.recfromcsv` keyword arguments change
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+-------------------------------------------
`npyio.recfromcsv` no longer accepts the undocumented `update` keyword,
which used to override the `dtype` keyword.
The ``doc/swig`` directory moved
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+--------------------------------
The ``doc/swig`` directory has been moved to ``tools/swig``.
The ``npy_3kcompat.h`` header changed
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+-------------------------------------
The unused ``simple_capsule_dtor`` function has been removed from
``npy_3kcompat.h``. Note that this header is not meant to be used outside
of numpy; other projects should be using their own copy of this file when
needed.
Negative indices in C-Api ``sq_item`` and ``sq_ass_item`` sequence methods
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+--------------------------------------------------------------------------
When directly accessing the ``sq_item`` or ``sq_ass_item`` PyObject slots
for item getting, negative indices will not be supported anymore.
``PySequence_GetItem`` and ``PySequence_SetItem`` however fix negative
indices so that they can be used there.
NDIter
-~~~~~~
+------
When ``NpyIter_RemoveAxis`` is now called, the iterator range will be reset.
When a multi index is being tracked and an iterator is not buffered, it is
@@ -246,7 +247,7 @@ a problem cannot occur.
This change was already applied to the 1.8.1 release.
``zeros_like`` for string dtypes now returns empty strings
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+----------------------------------------------------------
To match the `zeros` function `zeros_like` now returns an array initialized
with empty strings instead of an array filled with `'0'`.
@@ -255,60 +256,60 @@ New Features
============
Percentile supports more interpolation options
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+----------------------------------------------
``np.percentile`` now has the interpolation keyword argument to specify in
which way points should be interpolated if the percentiles fall between two
values. See the documentation for the available options.
Generalized axis support for median and percentile
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+--------------------------------------------------
``np.median`` and ``np.percentile`` now support generalized axis arguments like
ufunc reductions do since 1.7. One can now say axis=(index, index) to pick a
list of axes for the reduction. The ``keepdims`` keyword argument was also
added to allow convenient broadcasting to arrays of the original shape.
Dtype parameter added to ``np.linspace`` and ``np.logspace``
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+------------------------------------------------------------
The returned data type from the ``linspace`` and ``logspace`` functions can
now be specified using the dtype parameter.
More general ``np.triu`` and ``np.tril`` broadcasting
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+-----------------------------------------------------
For arrays with ``ndim`` exceeding 2, these functions will now apply to the
final two axes instead of raising an exception.
``tobytes`` alias for ``tostring`` method
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+-----------------------------------------
``ndarray.tobytes`` and ``MaskedArray.tobytes`` have been added as aliases
for ``tostring`` which exports arrays as ``bytes``. This is more consistent
in Python 3 where ``str`` and ``bytes`` are not the same.
Build system
-~~~~~~~~~~~~
+------------
Added experimental support for the ppc64le and OpenRISC architecture.
Compatibility to python ``numbers`` module
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+------------------------------------------
All numerical numpy types are now registered with the type hierarchy in
the python ``numbers`` module.
``increasing`` parameter added to ``np.vander``
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+-----------------------------------------------
The ordering of the columns of the Vandermonde matrix can be specified with
this new boolean argument.
``unique_counts`` parameter added to ``np.unique``
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+--------------------------------------------------
The number of times each unique item comes up in the input can now be
obtained as an optional return value.
Support for median and percentile in nanfunctions
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+-------------------------------------------------
The ``np.nanmedian`` and ``np.nanpercentile`` functions behave like
the median and percentile functions except that NaNs are ignored.
NumpyVersion class added
-~~~~~~~~~~~~~~~~~~~~~~~~
+------------------------
The class may be imported from numpy.lib and can be used for version
comparison when the numpy version goes to 1.10.devel. For example::
@@ -317,7 +318,7 @@ comparison when the numpy version goes to 1.10.devel. For example::
... print('Wow, that is an old NumPy version!')
Allow saving arrays with large number of named columns
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+------------------------------------------------------
The numpy storage format 1.0 only allowed the array header to have a total size
of 65535 bytes. This can be exceeded by structured arrays with a large number
of columns. A new format 2.0 has been added which extends the header size to 4
@@ -325,7 +326,7 @@ GiB. `np.save` will automatically save in 2.0 format if the data requires it,
else it will always use the more compatible 1.0 format.
Full broadcasting support for ``np.cross``
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+------------------------------------------
``np.cross`` now properly broadcasts its two input arrays, even if they
have different number of dimensions. In earlier versions this would result
in either an error being raised, or wrong results computed.
@@ -335,87 +336,87 @@ Improvements
============
Better numerical stability for sum in some cases
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+------------------------------------------------
Pairwise summation is now used in the sum method, but only along the fast
axis and for groups of the values <= 8192 in length. This should also
improve the accuracy of var and std in some common cases.
Percentile implemented in terms of ``np.partition``
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+---------------------------------------------------
``np.percentile`` has been implemented in terms of ``np.partition`` which
only partially sorts the data via a selection algorithm. This improves the
time complexity from ``O(nlog(n))`` to ``O(n)``.
Performance improvement for ``np.array``
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+----------------------------------------
The performance of converting lists containing arrays to arrays using
``np.array`` has been improved. It is now equivalent in speed to
``np.vstack(list)``.
Performance improvement for ``np.searchsorted``
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+-----------------------------------------------
For the built-in numeric types, ``np.searchsorted`` no longer relies on the
data type's ``compare`` function to perform the search, but is now
implemented by type specific functions. Depending on the size of the
inputs, this can result in performance improvements over 2x.
Optional reduced verbosity for np.distutils
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+-------------------------------------------
Set ``numpy.distutils.system_info.system_info.verbosity = 0`` and then
calls to ``numpy.distutils.system_info.get_info('blas_opt')`` will not
print anything on the output. This is mostly for other packages using
numpy.distutils.
Covariance check in ``np.random.multivariate_normal``
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+-----------------------------------------------------
A ``RuntimeWarning`` warning is raised when the covariance matrix is not
positive-semidefinite.
Polynomial Classes no longer template based
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+-------------------------------------------
The polynomial classes have been refactored to use an abstract base class
rather than a template in order to implement a common interface. This makes
importing the polynomial package faster as the classes do not need to be
compiled on import.
More GIL releases
-~~~~~~~~~~~~~~~~~
+-----------------
Several more functions now release the Global Interpreter Lock allowing more
efficient parallelization using the ``threading`` module. Most notably the GIL is
now released for fancy indexing, ``np.where`` and the ``random`` module now
uses a per-state lock instead of the GIL.
MaskedArray support for more complicated base classes
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+-----------------------------------------------------
Built-in assumptions that the baseclass behaved like a plain array are being
removed. In particalur, ``repr`` and ``str`` should now work more reliably.
C-API
-~~~~~
+-----
Deprecations
============
Non-integer scalars for sequence repetition
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+-------------------------------------------
Using non-integer numpy scalars to repeat python sequences is deprecated.
For example ``np.float_(2) * [1]`` will be an error in the future.
``select`` input deprecations
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+-----------------------------
The integer and empty input to ``select`` is deprecated. In the future only
boolean arrays will be valid conditions and an empty ``condlist`` will be
considered an input error instead of returning the default.
``rank`` function
-~~~~~~~~~~~~~~~~~
+-----------------
The ``rank`` function has been deprecated to avoid confusion with
``numpy.linalg.matrix_rank``.
Object array equality comparisons
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+---------------------------------
In the future object array comparisons both `==` and `np.equal` will not
make use of identity checks anymore. For example:
@@ -435,7 +436,7 @@ instead of just returning False. Code should be using `arr is None`.
All of these changes will give Deprecation- or FutureWarnings at this time.
C-API
-~~~~~
+-----
The utility function npy_PyFile_Dup and npy_PyFile_DupClose are broken by the
internal buffering python 3 applies to its file objects.
diff --git a/doc/release/1.9.1-notes.rst b/doc/release/1.9.1-notes.rst
index a72e71aae..4558237f4 100644
--- a/doc/release/1.9.1-notes.rst
+++ b/doc/release/1.9.1-notes.rst
@@ -1,5 +1,6 @@
+=========================
NumPy 1.9.1 Release Notes
-*************************
+=========================
This is a bugfix only release in the 1.9.x series.
diff --git a/doc/release/1.9.2-notes.rst b/doc/release/1.9.2-notes.rst
index 857b6fe30..268f3aa64 100644
--- a/doc/release/1.9.2-notes.rst
+++ b/doc/release/1.9.2-notes.rst
@@ -1,5 +1,6 @@
+=========================
NumPy 1.9.2 Release Notes
-*************************
+=========================
This is a bugfix only release in the 1.9.x series.
diff --git a/doc/source/about.rst b/doc/source/about.rst
index 0f585950a..be1ced13e 100644
--- a/doc/source/about.rst
+++ b/doc/source/about.rst
@@ -40,8 +40,7 @@ Our main means of communication are:
- `Old NumPy Trac <http://projects.scipy.org/numpy>`__ (no longer used)
-More information about the development of NumPy can be found at
-http://scipy.org/Developer_Zone
+More information about the development of NumPy can be found at our `Developer Zone <https://scipy.scipy.org/scipylib/dev-zone.html>`__.
If you want to fix issues in this documentation, the easiest way
is to participate in `our ongoing documentation marathon
diff --git a/doc/source/conf.py b/doc/source/conf.py
index 2bafc50eb..9ac729961 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -5,8 +5,8 @@ import sys, os, re
# Check Sphinx version
import sphinx
-if sphinx.__version__ < "1.0.1":
- raise RuntimeError("Sphinx 1.0.1 or newer required")
+if sphinx.__version__ < "1.2.1":
+ raise RuntimeError("Sphinx 1.2.1 or newer required")
needs_sphinx = '1.0'
@@ -33,7 +33,7 @@ source_suffix = '.rst'
# General substitutions.
project = 'NumPy'
-copyright = '2008-2009, The Scipy community'
+copyright = '2008-2017, The SciPy community'
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents.
@@ -126,6 +126,8 @@ htmlhelp_basename = 'numpy'
pngmath_use_preview = True
pngmath_dvipng_args = ['-gamma', '1.5', '-D', '96', '-bg', 'Transparent']
+plot_html_show_formats = False
+plot_html_show_source_link = False
# -----------------------------------------------------------------------------
# LaTeX output
@@ -306,19 +308,19 @@ def linkcode_resolve(domain, info):
for part in fullname.split('.'):
try:
obj = getattr(obj, part)
- except:
+ except Exception:
return None
try:
fn = inspect.getsourcefile(obj)
- except:
+ except Exception:
fn = None
if not fn:
return None
try:
source, lineno = inspect.getsourcelines(obj)
- except:
+ except Exception:
lineno = None
if lineno:
diff --git a/doc/source/dev/gitwash/development_setup.rst b/doc/source/dev/gitwash/development_setup.rst
index 5623364a2..1ebd4b486 100644
--- a/doc/source/dev/gitwash/development_setup.rst
+++ b/doc/source/dev/gitwash/development_setup.rst
@@ -62,7 +62,7 @@ Overview
git clone https://github.com/your-user-name/numpy.git
cd numpy
- git remote add upstream git://github.com/numpy/numpy.git
+ git remote add upstream https://github.com/numpy/numpy.git
In detail
=========
@@ -95,21 +95,16 @@ Linking your repository to the upstream repo
::
cd numpy
- git remote add upstream git://github.com/numpy/numpy.git
+ git remote add upstream https://github.com/numpy/numpy.git
``upstream`` here is just the arbitrary name we're using to refer to the
main NumPy_ repository at `NumPy github`_.
-Note that we've used ``git://`` for the URL rather than ``https://``. The
-``git://`` URL is read only. This means we that we can't accidentally
-(or deliberately) write to the upstream repo, and we are only going to
-use it to merge into our own code.
-
Just for your own satisfaction, show yourself that you now have a new
'remote', with ``git remote -v show``, giving you something like::
- upstream git://github.com/numpy/numpy.git (fetch)
- upstream git://github.com/numpy/numpy.git (push)
+ upstream https://github.com/numpy/numpy.git (fetch)
+ upstream https://github.com/numpy/numpy.git (push)
origin https://github.com/your-user-name/numpy.git (fetch)
origin https://github.com/your-user-name/numpy.git (push)
@@ -122,7 +117,7 @@ so it pulls from ``upstream`` by default. This can be done with::
You may also want to have easy access to all pull requests sent to the
NumPy repository::
- git config --add remote.upstream.fetch '+refs/pull//head:refs/remotes/upstream/pr/'
+ git config --add remote.upstream.fetch '+refs/pull/*/head:refs/remotes/upstream/pr/*'
Your config file should now look something like (from
``$ cat .git/config``)::
@@ -138,7 +133,7 @@ Your config file should now look something like (from
url = https://github.com/your-user-name/numpy.git
fetch = +refs/heads/*:refs/remotes/origin/*
[remote "upstream"]
- url = git://github.com/numpy/numpy.git
+ url = https://github.com/numpy/numpy.git
fetch = +refs/heads/*:refs/remotes/upstream/*
fetch = +refs/pull/*/head:refs/remotes/upstream/pr/*
[branch "master"]
diff --git a/doc/source/dev/governance/people.rst b/doc/source/dev/governance/people.rst
index a0f08b57d..b22852a5a 100644
--- a/doc/source/dev/governance/people.rst
+++ b/doc/source/dev/governance/people.rst
@@ -12,8 +12,6 @@ Steering council
* Ralf Gommers
-* Alex Griffing
-
* Charles Harris
* Nathaniel Smith
@@ -22,12 +20,22 @@ Steering council
* Pauli Virtanen
+* Eric Wieser
+
+* Marten van Kerkwijk
+
+* Stephan Hoyer
+
+* Allan Haldane
+
Emeritus members
----------------
* Travis Oliphant - Project Founder / Emeritus Leader (served: 2005-2012)
+* Alex Griffing (served: 2015-2017)
+
NumFOCUS Subcommittee
---------------------
diff --git a/doc/source/f2py/index.rst b/doc/source/f2py/index.rst
index 0cebbfd16..8b7d1453a 100644
--- a/doc/source/f2py/index.rst
+++ b/doc/source/f2py/index.rst
@@ -1,31 +1,20 @@
-.. -*- rest -*-
+#####################################
+F2PY Users Guide and Reference Manual
+#####################################
-//////////////////////////////////////////////////////////////////////
- F2PY Users Guide and Reference Manual
-//////////////////////////////////////////////////////////////////////
-
-:Author: Pearu Peterson
-:Contact: pearu@cens.ioc.ee
-:Web site: http://cens.ioc.ee/projects/f2py2e/
-:Date: 2005/04/02 10:03:26
-
-================
- Introduction
-================
-
-The purpose of the F2PY_ --*Fortran to Python interface generator*--
-project is to provide a connection between Python and Fortran
-languages. F2PY is a Python_ package (with a command line tool
-``f2py`` and a module ``f2py2e``) that facilitates creating/building
-Python C/API extension modules that make it possible
+The purpose of the ``F2PY`` --*Fortran to Python interface generator*--
+is to provide a connection between Python and Fortran
+languages. F2PY is a part of NumPy_ (``numpy.f2py``) and also available as a
+standalone command line tool ``f2py`` when ``numpy`` is installed that
+facilitates creating/building Python C/API extension modules that make it
+possible
* to call Fortran 77/90/95 external subroutines and Fortran 90/95
module subroutines as well as C functions;
* to access Fortran 77 ``COMMON`` blocks and Fortran 90/95 module data,
including allocatable arrays
-from Python. See F2PY_ web site for more information and installation
-instructions.
+from Python.
.. toctree::
:maxdepth: 2
@@ -37,7 +26,6 @@ instructions.
distutils
advanced
-.. _F2PY: http://cens.ioc.ee/projects/f2py2e/
.. _Python: http://www.python.org/
.. _NumPy: http://www.numpy.org/
.. _SciPy: http://www.numpy.org/
diff --git a/doc/source/reference/arrays.datetime.rst b/doc/source/reference/arrays.datetime.rst
index 139f23f11..e64d0c17e 100644
--- a/doc/source/reference/arrays.datetime.rst
+++ b/doc/source/reference/arrays.datetime.rst
@@ -363,7 +363,8 @@ As a corollary to this change, we no longer prohibit casting between datetimes
with date units and datetimes with timeunits. With timezone naive datetimes,
the rule for casting from dates to times is no longer ambiguous.
-pandas_: http://pandas.pydata.org
+.. _pandas: http://pandas.pydata.org
+
Differences Between 1.6 and 1.7 Datetimes
=========================================
diff --git a/doc/source/reference/c-api.array.rst b/doc/source/reference/c-api.array.rst
index 35df42daa..90bb56b2d 100644
--- a/doc/source/reference/c-api.array.rst
+++ b/doc/source/reference/c-api.array.rst
@@ -137,7 +137,7 @@ sub-types).
.. c:function:: npy_intp PyArray_Size(PyArrayObject* obj)
- Returns 0 if *obj* is not a sub-class of bigndarray. Otherwise,
+ Returns 0 if *obj* is not a sub-class of ndarray. Otherwise,
returns the total number of elements in the array. Safer version
of :c:func:`PyArray_SIZE` (*obj*).
@@ -257,7 +257,7 @@ From scratch
PyTypeObject* subtype, int nd, npy_intp* dims, int type_num, \
npy_intp* strides, void* data, int itemsize, int flags, PyObject* obj)
- This is similar to :c:func:`PyArray_DescrNew` (...) except you
+ This is similar to :c:func:`PyArray_NewFromDescr` (...) except you
specify the data-type descriptor with *type_num* and *itemsize*,
where *type_num* corresponds to a builtin (or user-defined)
type. If the type always has the same number of bytes, then
@@ -303,7 +303,7 @@ From scratch
.. c:function:: PyArray_FILLWBYTE(PyObject* obj, int val)
Fill the array pointed to by *obj* ---which must be a (subclass
- of) bigndarray---with the contents of *val* (evaluated as a byte).
+ of) ndarray---with the contents of *val* (evaluated as a byte).
This macro calls memset, so obj must be contiguous.
.. c:function:: PyObject* PyArray_Zeros( \
@@ -433,9 +433,9 @@ From other objects
.. c:var:: NPY_ARRAY_ENSUREARRAY
- Make sure the result is a base-class ndarray or bigndarray. By
- default, if *op* is an instance of a subclass of the
- bigndarray, an instance of that same subclass is returned. If
+ Make sure the result is a base-class ndarray. By
+ default, if *op* is an instance of a subclass of
+ ndarray, an instance of that same subclass is returned. If
this flag is set, an ndarray object will be returned instead.
.. c:var:: NPY_ARRAY_FORCECAST
@@ -455,8 +455,7 @@ From other objects
is deleted (presumably after your calculations are complete),
its contents will be copied back into *op* and the *op* array
will be made writeable again. If *op* is not writeable to begin
- with, then an error is raised. If *op* is not already an array,
- then this flag has no effect.
+ with, or if it is not already an array, then an error is raised.
.. c:var:: NPY_ARRAY_BEHAVED
@@ -1483,8 +1482,7 @@ specify desired properties of the new array.
.. c:var:: NPY_ARRAY_ENSUREARRAY
- Make sure the resulting object is an actual ndarray (or bigndarray),
- and not a sub-class.
+ Make sure the resulting object is an actual ndarray, and not a sub-class.
.. c:var:: NPY_ARRAY_NOTSWAPPED
@@ -2888,10 +2886,10 @@ to.
to a C-array of :c:type:`npy_intp`. The Python object could also be a
single number. The *seq* variable is a pointer to a structure with
members ptr and len. On successful return, *seq* ->ptr contains a
- pointer to memory that must be freed to avoid a memory leak. The
- restriction on memory size allows this converter to be
- conveniently used for sequences intended to be interpreted as
- array shapes.
+ pointer to memory that must be freed, by calling :c:func:`PyDimMem_FREE`,
+ to avoid a memory leak. The restriction on memory size allows this
+ converter to be conveniently used for sequences intended to be
+ interpreted as array shapes.
.. c:function:: int PyArray_BufferConverter(PyObject* obj, PyArray_Chunk* buf)
@@ -3064,6 +3062,24 @@ the C-API is needed then some additional steps must be taken.
header file as long as you make sure that NO_IMPORT_ARRAY is
#defined before #including that file.
+ Internally, these #defines work as follows:
+
+ * If neither is defined, the C-API is declared to be
+ :c:type:`static void**`, so it is only visible within the
+ compilation unit that #includes numpy/arrayobject.h.
+ * If :c:macro:`PY_ARRAY_UNIQUE_SYMBOL` is #defined, but
+ :c:macro:`NO_IMPORT_ARRAY` is not, the C-API is declared to
+ be :c:type:`void**`, so that it will also be visible to other
+ compilation units.
+ * If :c:macro:`NO_IMPORT_ARRAY` is #defined, regardless of
+ whether :c:macro:`PY_ARRAY_UNIQUE_SYMBOL` is, the C-API is
+ declared to be :c:type:`extern void**`, so it is expected to
+ be defined in another compilation unit.
+ * Whenever :c:macro:`PY_ARRAY_UNIQUE_SYMBOL` is #defined, it
+ also changes the name of the variable holding the C-API, which
+ defaults to :c:data:`PyArray_API`, to whatever the macro is
+ #defined to.
+
Checking the API Version
^^^^^^^^^^^^^^^^^^^^^^^^
diff --git a/doc/source/reference/internals.code-explanations.rst b/doc/source/reference/internals.code-explanations.rst
index af34d716f..94e827429 100644
--- a/doc/source/reference/internals.code-explanations.rst
+++ b/doc/source/reference/internals.code-explanations.rst
@@ -105,7 +105,7 @@ which work very simply.
For the general case, the iteration works by keeping track of a list
of coordinate counters in the iterator object. At each iteration, the
last coordinate counter is increased (starting from 0). If this
-counter is smaller then one less than the size of the array in that
+counter is smaller than one less than the size of the array in that
dimension (a pre-computed and stored value), then the counter is
increased and the dataptr member is increased by the strides in that
dimension and the macro ends. If the end of a dimension is reached,
@@ -369,7 +369,7 @@ return arrays are constructed. If any provided output array doesn't
have the correct type (or is mis-aligned) and is smaller than the
buffer size, then a new output array is constructed with the special
UPDATEIFCOPY flag set so that when it is DECREF'd on completion of the
-function, it's contents will be copied back into the output array.
+function, its contents will be copied back into the output array.
Iterators for the output arguments are then processed.
Finally, the decision is made about how to execute the looping
@@ -475,7 +475,7 @@ function is called with just the ndarray as the first argument.
Methods
-------
-Their are three methods of ufuncs that require calculation similar to
+There are three methods of ufuncs that require calculation similar to
the general-purpose ufuncs. These are reduce, accumulate, and
reduceat. Each of these methods requires a setup command followed by a
loop. There are four loop styles possible for the methods
diff --git a/doc/source/reference/maskedarray.generic.rst b/doc/source/reference/maskedarray.generic.rst
index adb51416a..1fee9a74a 100644
--- a/doc/source/reference/maskedarray.generic.rst
+++ b/doc/source/reference/maskedarray.generic.rst
@@ -379,8 +379,8 @@ is masked.
When accessing a slice, the output is a masked array whose
:attr:`~MaskedArray.data` attribute is a view of the original data, and whose
mask is either :attr:`nomask` (if there was no invalid entries in the original
-array) or a copy of the corresponding slice of the original mask. The copy is
-required to avoid propagation of any modification of the mask to the original.
+array) or a view of the corresponding slice of the original mask. The view is
+required to ensure propagation of any modification of the mask to the original.
>>> x = ma.array([1, 2, 3, 4, 5], mask=[0, 1, 0, 0, 1])
>>> mx = x[:3]
diff --git a/doc/source/reference/routines.linalg.rst b/doc/source/reference/routines.linalg.rst
index 09c7d9b4e..4715f636e 100644
--- a/doc/source/reference/routines.linalg.rst
+++ b/doc/source/reference/routines.linalg.rst
@@ -18,6 +18,7 @@ Matrix and vector products
matmul
tensordot
einsum
+ einsum_path
linalg.matrix_power
kron
diff --git a/doc/source/reference/routines.logic.rst b/doc/source/reference/routines.logic.rst
index 88edde855..7fa0cd1de 100644
--- a/doc/source/reference/routines.logic.rst
+++ b/doc/source/reference/routines.logic.rst
@@ -19,6 +19,7 @@ Array contents
isfinite
isinf
isnan
+ isnat
isneginf
isposinf
diff --git a/doc/source/reference/routines.math.rst b/doc/source/reference/routines.math.rst
index a2fb06958..4c2f2800a 100644
--- a/doc/source/reference/routines.math.rst
+++ b/doc/source/reference/routines.math.rst
@@ -108,6 +108,7 @@ Arithmetic operations
add
reciprocal
+ positive
negative
multiply
divide
diff --git a/doc/source/reference/routines.polynomials.classes.rst b/doc/source/reference/routines.polynomials.classes.rst
index 0db77eb7c..f44ddd46c 100644
--- a/doc/source/reference/routines.polynomials.classes.rst
+++ b/doc/source/reference/routines.polynomials.classes.rst
@@ -52,7 +52,7 @@ the conventional Polynomial class because of its familiarity::
>>> from numpy.polynomial import Polynomial as P
>>> p = P([1,2,3])
>>> p
- Polynomial([ 1., 2., 3.], [-1., 1.], [-1., 1.])
+ Polynomial([ 1., 2., 3.], domain=[-1, 1], window=[-1, 1])
Note that there are three parts to the long version of the printout. The
first is the coefficients, the second is the domain, and the third is the
@@ -77,19 +77,19 @@ we ignore them and run through the basic algebraic and arithmetic operations.
Addition and Subtraction::
>>> p + p
- Polynomial([ 2., 4., 6.], [-1., 1.], [-1., 1.])
+ Polynomial([ 2., 4., 6.], domain=[-1, 1], window=[-1, 1])
>>> p - p
- Polynomial([ 0.], [-1., 1.], [-1., 1.])
+ Polynomial([ 0.], domain=[-1, 1], window=[-1, 1])
Multiplication::
>>> p * p
- Polynomial([ 1., 4., 10., 12., 9.], [-1., 1.], [-1., 1.])
+ Polynomial([ 1., 4., 10., 12., 9.], domain=[-1, 1], window=[-1, 1])
Powers::
>>> p**2
- Polynomial([ 1., 4., 10., 12., 9.], [-1., 1.], [-1., 1.])
+ Polynomial([ 1., 4., 10., 12., 9.], domain=[-1, 1], window=[-1, 1])
Division:
@@ -100,20 +100,20 @@ versions the '/' will only work for division by scalars. At some point it
will be deprecated::
>>> p // P([-1, 1])
- Polynomial([ 5., 3.], [-1., 1.], [-1., 1.])
+ Polynomial([ 5., 3.], domain=[-1, 1], window=[-1, 1])
Remainder::
>>> p % P([-1, 1])
- Polynomial([ 6.], [-1., 1.], [-1., 1.])
+ Polynomial([ 6.], domain=[-1, 1], window=[-1, 1])
Divmod::
>>> quo, rem = divmod(p, P([-1, 1]))
>>> quo
- Polynomial([ 5., 3.], [-1., 1.], [-1., 1.])
+ Polynomial([ 5., 3.], domain=[-1, 1], window=[-1, 1])
>>> rem
- Polynomial([ 6.], [-1., 1.], [-1., 1.])
+ Polynomial([ 6.], domain=[-1, 1], window=[-1, 1])
Evaluation::
@@ -134,7 +134,7 @@ the polynomials are regarded as functions this is composition of
functions::
>>> p(p)
- Polynomial([ 6., 16., 36., 36., 27.], [-1., 1.], [-1., 1.])
+ Polynomial([ 6., 16., 36., 36., 27.], domain=[-1, 1], window=[-1, 1])
Roots::
@@ -148,11 +148,11 @@ tuples, lists, arrays, and scalars are automatically cast in the arithmetic
operations::
>>> p + [1, 2, 3]
- Polynomial([ 2., 4., 6.], [-1., 1.], [-1., 1.])
+ Polynomial([ 2., 4., 6.], domain=[-1, 1], window=[-1, 1])
>>> [1, 2, 3] * p
- Polynomial([ 1., 4., 10., 12., 9.], [-1., 1.], [-1., 1.])
+ Polynomial([ 1., 4., 10., 12., 9.], domain=[-1, 1], window=[-1, 1])
>>> p / 2
- Polynomial([ 0.5, 1. , 1.5], [-1., 1.], [-1., 1.])
+ Polynomial([ 0.5, 1. , 1.5], domain=[-1, 1], window=[-1, 1])
Polynomials that differ in domain, window, or class can't be mixed in
arithmetic::
@@ -180,7 +180,7 @@ conversion of Polynomial classes among themselves is done for type, domain,
and window casting::
>>> p(T([0, 1]))
- Chebyshev([ 2.5, 2. , 1.5], [-1., 1.], [-1., 1.])
+ Chebyshev([ 2.5, 2. , 1.5], domain=[-1, 1], window=[-1, 1])
Which gives the polynomial `p` in Chebyshev form. This works because
:math:`T_1(x) = x` and substituting :math:`x` for :math:`x` doesn't change
@@ -195,18 +195,18 @@ Polynomial instances can be integrated and differentiated.::
>>> from numpy.polynomial import Polynomial as P
>>> p = P([2, 6])
>>> p.integ()
- Polynomial([ 0., 2., 3.], [-1., 1.], [-1., 1.])
+ Polynomial([ 0., 2., 3.], domain=[-1, 1], window=[-1, 1])
>>> p.integ(2)
- Polynomial([ 0., 0., 1., 1.], [-1., 1.], [-1., 1.])
+ Polynomial([ 0., 0., 1., 1.], domain=[-1, 1], window=[-1, 1])
The first example integrates `p` once, the second example integrates it
twice. By default, the lower bound of the integration and the integration
constant are 0, but both can be specified.::
>>> p.integ(lbnd=-1)
- Polynomial([-1., 2., 3.], [-1., 1.], [-1., 1.])
+ Polynomial([-1., 2., 3.], domain=[-1, 1], window=[-1, 1])
>>> p.integ(lbnd=-1, k=1)
- Polynomial([ 0., 2., 3.], [-1., 1.], [-1., 1.])
+ Polynomial([ 0., 2., 3.], domain=[-1, 1], window=[-1, 1])
In the first case the lower bound of the integration is set to -1 and the
integration constant is 0. In the second the constant of integration is set
@@ -215,9 +215,9 @@ number of times the polynomial is differentiated::
>>> p = P([1, 2, 3])
>>> p.deriv(1)
- Polynomial([ 2., 6.], [-1., 1.], [-1., 1.])
+ Polynomial([ 2., 6.], domain=[-1, 1], window=[-1, 1])
>>> p.deriv(2)
- Polynomial([ 6.], [-1., 1.], [-1., 1.])
+ Polynomial([ 6.], domain=[-1, 1], window=[-1, 1])
Other Polynomial Constructors
@@ -233,9 +233,9 @@ are demonstrated below::
>>> from numpy.polynomial import Chebyshev as T
>>> p = P.fromroots([1, 2, 3])
>>> p
- Polynomial([ -6., 11., -6., 1.], [-1., 1.], [-1., 1.])
+ Polynomial([ -6., 11., -6., 1.], domain=[-1, 1], window=[-1, 1])
>>> p.convert(kind=T)
- Chebyshev([ -9. , 11.75, -3. , 0.25], [-1., 1.], [-1., 1.])
+ Chebyshev([ -9. , 11.75, -3. , 0.25], domain=[-1, 1], window=[-1, 1])
The convert method can also convert domain and window::
@@ -249,9 +249,9 @@ available. The cast method works like the convert method while the basis
method returns the basis polynomial of given degree::
>>> P.basis(3)
- Polynomial([ 0., 0., 0., 1.], [-1., 1.], [-1., 1.])
+ Polynomial([ 0., 0., 0., 1.], domain=[-1, 1], window=[-1, 1])
>>> T.cast(p)
- Chebyshev([ -9. , 11.75, -3. , 0.25], [-1., 1.], [-1., 1.])
+ Chebyshev([ -9. , 11.75, -3. , 0.25], domain=[-1, 1], window=[-1, 1])
Conversions between types can be useful, but it is *not* recommended
for routine use. The loss of numerical precision in passing from a
diff --git a/doc/source/reference/routines.testing.rst b/doc/source/reference/routines.testing.rst
index c43aeeed9..ad95bb399 100644
--- a/doc/source/reference/routines.testing.rst
+++ b/doc/source/reference/routines.testing.rst
@@ -41,7 +41,6 @@ Decorators
decorators.slow
decorate_methods
-
Test Running
------------
.. autosummary::
@@ -50,3 +49,4 @@ Test Running
Tester
run_module_suite
rundocs
+ suppress_warnings
diff --git a/doc/source/reference/ufuncs.rst b/doc/source/reference/ufuncs.rst
index b3fb4d384..e28496cf6 100644
--- a/doc/source/reference/ufuncs.rst
+++ b/doc/source/reference/ufuncs.rst
@@ -426,12 +426,14 @@ Methods
All ufuncs have four methods. However, these methods only make sense on
ufuncs that take two input arguments and return one output argument.
Attempting to call these methods on other ufuncs will cause a
-:exc:`ValueError`. The reduce-like methods all take an *axis* keyword
-and a *dtype* keyword, and the arrays must all have dimension >= 1.
+:exc:`ValueError`. The reduce-like methods all take an *axis* keyword, a *dtype*
+keyword, and an *out* keyword, and the arrays must all have dimension >= 1.
The *axis* keyword specifies the axis of the array over which the reduction
-will take place and may be negative, but must be an integer. The
-*dtype* keyword allows you to manage a very common problem that arises
-when naively using :ref:`{op}.reduce <ufunc.reduce>`. Sometimes you may
+will take place (with negative values counting backwards). Generally, it is an
+integer, though for :meth:`ufunc.reduce`, it can also be a tuple of `int` to
+reduce over several axes at once, or `None`, to reduce over all axes.
+The *dtype* keyword allows you to manage a very common problem that arises
+when naively using :meth:`ufunc.reduce`. Sometimes you may
have an array of a certain data type and wish to add up all of its
elements, but the result does not fit into the data type of the
array. This commonly happens if you have an array of single-byte
@@ -443,7 +445,10 @@ mostly up to you. There is one exception: if no *dtype* is given for a
reduction on the "add" or "multiply" operations, then if the input type is
an integer (or Boolean) data-type and smaller than the size of the
:class:`int_` data type, it will be internally upcast to the :class:`int_`
-(or :class:`uint`) data-type.
+(or :class:`uint`) data-type. Finally, the *out* keyword allows you to provide
+an output array (for single-output ufuncs, which are currently the only ones
+supported; for future extension, however, a tuple with a single argument
+can be passed in). If *out* is given, the *dtype* argument is ignored.
Ufuncs also have a fifth method that allows in place operations to be
performed using fancy indexing. No buffering is used on the dimensions where
@@ -660,6 +665,7 @@ single operation.
isfinite
isinf
isnan
+ isnat
fabs
signbit
copysign
diff --git a/doc/source/user/basics.io.genfromtxt.rst b/doc/source/user/basics.io.genfromtxt.rst
index 1048ab725..2bdd5a0d0 100644
--- a/doc/source/user/basics.io.genfromtxt.rst
+++ b/doc/source/user/basics.io.genfromtxt.rst
@@ -46,12 +46,12 @@ ends with ``'.gz'``, a :class:`gzip` archive is expected; if it ends with
Splitting the lines into columns
================================
-The :keyword:`delimiter` argument
----------------------------------
+The ``delimiter`` argument
+--------------------------
Once the file is defined and open for reading, :func:`~numpy.genfromtxt`
splits each non-empty line into a sequence of strings. Empty or commented
-lines are just skipped. The :keyword:`delimiter` keyword is used to define
+lines are just skipped. The ``delimiter`` keyword is used to define
how the splitting should take place.
Quite often, a single character marks the separation between columns. For
@@ -71,7 +71,7 @@ spaces are considered as a single white space.
Alternatively, we may be dealing with a fixed-width file, where columns are
defined as a given number of characters. In that case, we need to set
-:keyword:`delimiter` to a single integer (if all the columns have the same
+``delimiter`` to a single integer (if all the columns have the same
size) or to a sequence of integers (if columns can have different sizes)::
>>> data = " 1 2 3\n 4 5 67\n890123 4"
@@ -86,13 +86,13 @@ size) or to a sequence of integers (if columns can have different sizes)::
[ 4., 567., 9.]])
-The :keyword:`autostrip` argument
----------------------------------
+The ``autostrip`` argument
+--------------------------
By default, when a line is decomposed into a series of strings, the
individual entries are not stripped of leading nor trailing white spaces.
This behavior can be overwritten by setting the optional argument
-:keyword:`autostrip` to a value of ``True``::
+``autostrip`` to a value of ``True``::
>>> data = "1, abc , 2\n 3, xxx, 4"
>>> # Without autostrip
@@ -107,10 +107,10 @@ This behavior can be overwritten by setting the optional argument
dtype='|U5')
-The :keyword:`comments` argument
---------------------------------
+The ``comments`` argument
+-------------------------
-The optional argument :keyword:`comments` is used to define a character
+The optional argument ``comments`` is used to define a character
string that marks the beginning of a comment. By default,
:func:`~numpy.genfromtxt` assumes ``comments='#'``. The comment marker may
occur anywhere on the line. Any character present after the comment
@@ -143,15 +143,15 @@ marker(s) is simply ignored::
Skipping lines and choosing columns
===================================
-The :keyword:`skip_header` and :keyword:`skip_footer` arguments
+The ``skip_header`` and ``skip_footer`` arguments
---------------------------------------------------------------
The presence of a header in the file can hinder data processing. In that
-case, we need to use the :keyword:`skip_header` optional argument. The
+case, we need to use the ``skip_header`` optional argument. The
values of this argument must be an integer which corresponds to the number
of lines to skip at the beginning of the file, before any other action is
performed. Similarly, we can skip the last ``n`` lines of the file by
-using the :keyword:`skip_footer` attribute and giving it a value of ``n``::
+using the ``skip_footer`` attribute and giving it a value of ``n``::
>>> data = "\n".join(str(i) for i in range(10))
>>> np.genfromtxt(BytesIO(data),)
@@ -164,12 +164,12 @@ By default, ``skip_header=0`` and ``skip_footer=0``, meaning that no lines
are skipped.
-The :keyword:`usecols` argument
--------------------------------
+The ``usecols`` argument
+------------------------
In some cases, we are not interested in all the columns of the data but
only a few of them. We can select which columns to import with the
-:keyword:`usecols` argument. This argument accepts a single integer or a
+``usecols`` argument. This argument accepts a single integer or a
sequence of integers corresponding to the indices of the columns to import.
Remember that by convention, the first column has an index of 0. Negative
integers behave the same as regular Python negative indexes.
@@ -183,7 +183,7 @@ can use ``usecols=(0, -1)``::
[ 4., 6.]])
If the columns have names, we can also select which columns to import by
-giving their name to the :keyword:`usecols` argument, either as a sequence
+giving their name to the ``usecols`` argument, either as a sequence
of strings or a comma-separated string::
>>> data = "1 2 3\n4 5 6"
@@ -203,12 +203,12 @@ Choosing the data type
======================
The main way to control how the sequences of strings we have read from the
-file are converted to other types is to set the :keyword:`dtype` argument.
+file are converted to other types is to set the ``dtype`` argument.
Acceptable values for this argument are:
* a single type, such as ``dtype=float``.
The output will be 2D with the given dtype, unless a name has been
- associated with each column with the use of the :keyword:`names` argument
+ associated with each column with the use of the ``names`` argument
(see below). Note that ``dtype=float`` is the default for
:func:`~numpy.genfromtxt`.
* a sequence of types, such as ``dtype=(int, float, float)``.
@@ -223,7 +223,7 @@ Acceptable values for this argument are:
In all the cases but the first one, the output will be a 1D array with a
structured dtype. This dtype has as many fields as items in the sequence.
-The field names are defined with the :keyword:`names` keyword.
+The field names are defined with the ``names`` keyword.
When ``dtype=None``, the type of each column is determined iteratively from
@@ -242,8 +242,8 @@ significantly slower than setting the dtype explicitly.
Setting the names
=================
-The :keyword:`names` argument
------------------------------
+The ``names`` argument
+----------------------
A natural approach when dealing with tabular data is to allocate a name to
each column. A first possibility is to use an explicit structured dtype,
@@ -254,7 +254,7 @@ as mentioned previously::
array([(1, 2, 3), (4, 5, 6)],
dtype=[('a', '<i8'), ('b', '<i8'), ('c', '<i8')])
-Another simpler possibility is to use the :keyword:`names` keyword with a
+Another simpler possibility is to use the ``names`` keyword with a
sequence of strings or a comma-separated string::
>>> data = BytesIO("1 2 3\n 4 5 6")
@@ -267,7 +267,7 @@ By giving a sequence of names, we are forcing the output to a structured
dtype.
We may sometimes need to define the column names from the data itself. In
-that case, we must use the :keyword:`names` keyword with a value of
+that case, we must use the ``names`` keyword with a value of
``True``. The names will then be read from the first line (after the
``skip_header`` ones), even if the line is commented out::
@@ -276,7 +276,7 @@ that case, we must use the :keyword:`names` keyword with a value of
array([(1.0, 2.0, 3.0), (4.0, 5.0, 6.0)],
dtype=[('a', '<f8'), ('b', '<f8'), ('c', '<f8')])
-The default value of :keyword:`names` is ``None``. If we give any other
+The default value of ``names`` is ``None``. If we give any other
value to the keyword, the new names will overwrite the field names we may
have defined with the dtype::
@@ -288,8 +288,8 @@ have defined with the dtype::
dtype=[('A', '<i8'), ('B', '<f8'), ('C', '<i8')])
-The :keyword:`defaultfmt` argument
-----------------------------------
+The ``defaultfmt`` argument
+---------------------------
If ``names=None`` but a structured dtype is expected, names are defined
with the standard NumPy default of ``"f%i"``, yielding names like ``f0``,
@@ -308,7 +308,7 @@ dtype, the missing names will be defined with this default template::
array([(1, 2.0, 3), (4, 5.0, 6)],
dtype=[('a', '<i8'), ('f0', '<f8'), ('f1', '<i8')])
-We can overwrite this default with the :keyword:`defaultfmt` argument, that
+We can overwrite this default with the ``defaultfmt`` argument, that
takes any format string::
>>> data = BytesIO("1 2 3\n 4 5 6")
@@ -333,16 +333,16 @@ correspond to the name of a standard attribute (like ``size`` or
``shape``), which would confuse the interpreter. :func:`~numpy.genfromtxt`
accepts three optional arguments that provide a finer control on the names:
- :keyword:`deletechars`
+ ``deletechars``
Gives a string combining all the characters that must be deleted from
the name. By default, invalid characters are
``~!@#$%^&*()-=+~\|]}[{';:
/?.>,<``.
- :keyword:`excludelist`
+ ``excludelist``
Gives a list of the names to exclude, such as ``return``, ``file``,
``print``... If one of the input name is part of this list, an
underscore character (``'_'``) will be appended to it.
- :keyword:`case_sensitive`
+ ``case_sensitive``
Whether the names should be case-sensitive (``case_sensitive=True``),
converted to upper case (``case_sensitive=False`` or
``case_sensitive='upper'``) or to lower case
@@ -353,15 +353,15 @@ accepts three optional arguments that provide a finer control on the names:
Tweaking the conversion
=======================
-The :keyword:`converters` argument
-----------------------------------
+The ``converters`` argument
+---------------------------
Usually, defining a dtype is sufficient to define how the sequence of
strings must be converted. However, some additional control may sometimes
be required. For example, we may want to make sure that a date in a format
``YYYY/MM/DD`` is converted to a :class:`datetime` object, or that a string
like ``xx%`` is properly converted to a float between 0 and 1. In such
-cases, we should define conversion functions with the :keyword:`converters`
+cases, we should define conversion functions with the ``converters``
arguments.
The value of this argument is typically a dictionary with column indices or
@@ -427,16 +427,16 @@ float. However, user-defined converters may rapidly become cumbersome to
manage.
The :func:`~nummpy.genfromtxt` function provides two other complementary
-mechanisms: the :keyword:`missing_values` argument is used to recognize
-missing data and a second argument, :keyword:`filling_values`, is used to
+mechanisms: the ``missing_values`` argument is used to recognize
+missing data and a second argument, ``filling_values``, is used to
process these missing data.
-:keyword:`missing_values`
--------------------------
+``missing_values``
+------------------
By default, any empty string is marked as missing. We can also consider
more complex strings, such as ``"N/A"`` or ``"???"`` to represent missing
-or invalid data. The :keyword:`missing_values` argument accepts three kind
+or invalid data. The ``missing_values`` argument accepts three kind
of values:
a string or a comma-separated string
@@ -451,8 +451,8 @@ of values:
define a default applicable to all columns.
-:keyword:`filling_values`
--------------------------
+``filling_values``
+------------------
We know how to recognize missing data, but we still need to provide a value
for these missing entries. By default, this value is determined from the
@@ -469,8 +469,8 @@ Expected type Default
============= ==============
We can get a finer control on the conversion of missing values with the
-:keyword:`filling_values` optional argument. Like
-:keyword:`missing_values`, this argument accepts different kind of values:
+``filling_values`` optional argument. Like
+``missing_values``, this argument accepts different kind of values:
a single value
This will be the default for all columns
@@ -497,13 +497,13 @@ and second column, and to -999 if they occur in the last column::
dtype=[('a', '<i8'), ('b', '<i8'), ('c', '<i8')])
-:keyword:`usemask`
-------------------
+``usemask``
+-----------
We may also want to keep track of the occurrence of missing data by
constructing a boolean mask, with ``True`` entries where data was missing
and ``False`` otherwise. To do that, we just have to set the optional
-argument :keyword:`usemask` to ``True`` (the default is ``False``). The
+argument ``usemask`` to ``True`` (the default is ``False``). The
output array will then be a :class:`~numpy.ma.MaskedArray`.
diff --git a/doc/source/user/building.rst b/doc/source/user/building.rst
index fa3f2ccb4..b98f89c2d 100644
--- a/doc/source/user/building.rst
+++ b/doc/source/user/building.rst
@@ -32,7 +32,7 @@ Building NumPy requires the following software installed:
FORTRAN 77 compiler installed.
Note that NumPy is developed mainly using GNU compilers. Compilers from
- other vendors such as Intel, Absoft, Sun, NAG, Compaq, Vast, Porland,
+ other vendors such as Intel, Absoft, Sun, NAG, Compaq, Vast, Portland,
Lahey, HP, IBM, Microsoft are only supported in the form of community
feedback, and may not work out of the box. GCC 4.x (and later) compilers
are recommended.
@@ -137,7 +137,7 @@ Additional compiler flags can be supplied by setting the ``OPT``,
Building with ATLAS support
---------------------------
-Ubuntu
+Ubuntu
~~~~~~
You can install the necessary package for optimized ATLAS with this command::
diff --git a/doc/source/user/c-info.beyond-basics.rst b/doc/source/user/c-info.beyond-basics.rst
index 1f19c8405..5c321088d 100644
--- a/doc/source/user/c-info.beyond-basics.rst
+++ b/doc/source/user/c-info.beyond-basics.rst
@@ -390,8 +390,8 @@ an error condition set if it was not successful.
(optional) Specify any optional data needed by the function which will
be passed when the function is called.
- .. index::
- pair: dtype; adding new
+.. index::
+ pair: dtype; adding new
Subtyping the ndarray in C
diff --git a/doc/source/user/c-info.ufunc-tutorial.rst b/doc/source/user/c-info.ufunc-tutorial.rst
index 59e3dc6dc..addc38f45 100644
--- a/doc/source/user/c-info.ufunc-tutorial.rst
+++ b/doc/source/user/c-info.ufunc-tutorial.rst
@@ -1098,7 +1098,7 @@ automatically generates a ufunc from a C function with the correct signature.
.. code-block:: c
static void
- double_add(char *args, npy_intp *dimensions, npy_intp *steps,
+ double_add(char **args, npy_intp *dimensions, npy_intp *steps,
void *extra)
{
npy_intp i;
diff --git a/doc/source/user/numpy-for-matlab-users.rst b/doc/source/user/numpy-for-matlab-users.rst
index 7f48e7031..66641eed3 100644
--- a/doc/source/user/numpy-for-matlab-users.rst
+++ b/doc/source/user/numpy-for-matlab-users.rst
@@ -31,7 +31,7 @@ Some Key Differences
these arrays are designed to act more or less like matrix operations
in linear algebra.
- In NumPy the basic type is a multidimensional ``array``. Operations
- on these arrays in all dimensionalities including 2D are elementwise
+ on these arrays in all dimensionalities including 2D are element-wise
operations. However, there is a special ``matrix`` type for doing
linear algebra, which is just a subclass of the ``array`` class.
Operations on matrix-class arrays are linear algebra operations.
@@ -77,9 +77,10 @@ Short answer
linear algebra operations.
- You can have standard vectors or row/column vectors if you like.
-The only disadvantage of using the array type is that you will have to
-use ``dot`` instead of ``*`` to multiply (reduce) two tensors (scalar
-product, matrix vector multiplication etc.).
+Until Python 3.5 the only disadvantage of using the array type was that you
+had to use ``dot`` instead of ``*`` to multiply (reduce) two tensors
+(scalar product, matrix vector multiplication etc.). Since Python 3.5 you
+can use the matrix multiplication ``@`` operator.
Long answer
-----------
@@ -136,7 +137,9 @@ There are pros and cons to using both:
``dot(v,A)`` treats ``v`` as a row vector. This can save you having to
type a lot of transposes.
- ``<:(`` Having to use the ``dot()`` function for matrix-multiply is
- messy -- ``dot(dot(A,B),C)`` vs. ``A*B*C``.
+ messy -- ``dot(dot(A,B),C)`` vs. ``A*B*C``. This isn't an issue with
+ Python >= 3.5 because the ``@`` operator allows it to be written as
+ ``A @ B @ C``.
- ``:)`` Element-wise multiplication is easy: ``A*B``.
- ``:)`` ``array`` is the "default" NumPy type, so it gets the most
testing, and is the type most likely to be returned by 3rd party
@@ -145,7 +148,7 @@ There are pros and cons to using both:
- ``:)`` Closer in semantics to tensor algebra, if you are familiar
with that.
- ``:)`` *All* operations (``*``, ``/``, ``+``, ``-`` etc.) are
- elementwise
+ element-wise.
- ``matrix``
@@ -160,11 +163,12 @@ There are pros and cons to using both:
it's a bug), but 3rd party code based on NumPy may not honor type
preservation like NumPy does.
- ``:)`` ``A*B`` is matrix multiplication, so more convenient for
- linear algebra.
+ linear algebra (For Python >= 3.5 plain arrays have the same convenience
+ with the ``@`` operator).
- ``<:(`` Element-wise multiplication requires calling a function,
``multiply(A,B)``.
- ``<:(`` The use of operator overloading is a bit illogical: ``*``
- does not work elementwise but ``/`` does.
+ does not work element-wise but ``/`` does.
The ``array`` is thus much more advisable to use.
diff --git a/doc/source/user/quickstart.rst b/doc/source/user/quickstart.rst
index 7295d1aca..4a10faae8 100644
--- a/doc/source/user/quickstart.rst
+++ b/doc/source/user/quickstart.rst
@@ -25,14 +25,12 @@ The Basics
NumPy's main object is the homogeneous multidimensional array. It is a
table of elements (usually numbers), all of the same type, indexed by a
-tuple of positive integers. In NumPy dimensions are called *axes*. The
-number of axes is *rank*.
+tuple of positive integers. In NumPy dimensions are called *axes*.
-For example, the coordinates of a point in 3D space ``[1, 2, 1]`` is an
-array of rank 1, because it has one axis. That axis has a length of 3.
-In the example pictured below, the array has rank 2 (it is 2-dimensional).
-The first dimension (axis) has a length of 2, the second dimension has a
-length of 3.
+For example, the coordinates of a point in 3D space ``[1, 2, 1]`` has
+one axis. That axis has 3 elements in it, so we say it has a length
+of 3. In the example pictured below, the array has 2 axes. The first
+axis has a length of 2, the second axis has a length of 3.
::
@@ -46,14 +44,12 @@ arrays and offers less functionality. The more important attributes of
an ``ndarray`` object are:
ndarray.ndim
- the number of axes (dimensions) of the array. In the Python world,
- the number of dimensions is referred to as *rank*.
+ the number of axes (dimensions) of the array.
ndarray.shape
the dimensions of the array. This is a tuple of integers indicating
the size of the array in each dimension. For a matrix with *n* rows
and *m* columns, ``shape`` will be ``(n,m)``. The length of the
- ``shape`` tuple is therefore the rank, or number of dimensions,
- ``ndim``.
+ ``shape`` tuple is therefore the number of axes, ``ndim``.
ndarray.size
the total number of elements of the array. This is equal to the
product of the elements of ``shape``.
@@ -537,8 +533,8 @@ remaining axes. NumPy also allows you to write this using dots as
``b[i,...]``.
The **dots** (``...``) represent as many colons as needed to produce a
-complete indexing tuple. For example, if ``x`` is a rank 5 array (i.e.,
-it has 5 axes), then
+complete indexing tuple. For example, if ``x`` is an array with 5
+axes, then
- ``x[1,2,...]`` is equivalent to ``x[1,2,:,:,:]``,
- ``x[...,3]`` to ``x[:,:,:,:,3]`` and
@@ -1119,13 +1115,13 @@ value of time-dependent series::
[-0.53657292, 0.42016704, 0.99060736, 0.65028784],
[-0.28790332, -0.96139749, -0.75098725, 0.14987721]])
>>>
- >>> ind = data.argmax(axis=0) # index of the maxima for each series
+ >>> ind = data.argmax(axis=0) # index of the maxima for each series
>>> ind
array([2, 0, 3, 1])
>>>
- >>> time_max = time[ ind] # times corresponding to the maxima
+ >>> time_max = time[ind] # times corresponding to the maxima
>>>
- >>> data_max = data[ind, xrange(data.shape[1])] # => data[ind[0],0], data[ind[1],1]...
+ >>> data_max = data[ind, range(data.shape[1])] # => data[ind[0],0], data[ind[1],1]...
>>>
>>> time_max
array([ 82.5 , 20. , 113.75, 51.25])
@@ -1245,9 +1241,9 @@ selecting the slices we want::
Note that the length of the 1D boolean array must coincide with the
length of the dimension (or axis) you want to slice. In the previous
-example, ``b1`` is a 1-rank array with length 3 (the number of *rows* in
-``a``), and ``b2`` (of length 4) is suitable to index the 2nd rank
-(columns) of ``a``.
+example, ``b1`` has length 3 (the number of *rows* in ``a``), and
+``b2`` (of length 4) is suitable to index the 2nd axis (columns) of
+``a``.
The ix_() function
-------------------
diff --git a/numpy/__init__.py b/numpy/__init__.py
index 0f1bcf766..db99294bc 100644
--- a/numpy/__init__.py
+++ b/numpy/__init__.py
@@ -148,9 +148,9 @@ else:
# We don't actually use this ourselves anymore, but I'm not 100% sure that
# no-one else in the world is using it (though I hope not)
- from .testing import Tester
- test = testing.nosetester._numpy_tester().test
- bench = testing.nosetester._numpy_tester().bench
+ from .testing import Tester, _numpy_tester
+ test = _numpy_tester().test
+ bench = _numpy_tester().bench
# Allow distributors to run custom init code
from . import _distributor_init
diff --git a/numpy/_globals.py b/numpy/_globals.py
index 64a84da96..2d7b69bc4 100644
--- a/numpy/_globals.py
+++ b/numpy/_globals.py
@@ -53,7 +53,7 @@ class VisibleDeprecationWarning(UserWarning):
pass
-class _NoValue:
+class _NoValue(object):
"""Special keyword value.
This class may be used as the default value assigned to a deprecated
diff --git a/numpy/_import_tools.py b/numpy/_import_tools.py
index 18ac78d29..cb8bc477c 100644
--- a/numpy/_import_tools.py
+++ b/numpy/_import_tools.py
@@ -303,8 +303,7 @@ class PackageLoader(object):
lines.append(line)
line = tab
line += ' ' + word
- else:
- lines.append(line)
+ lines.append(line)
return '\n'.join(lines)
def get_pkgdocs(self):
diff --git a/numpy/add_newdocs.py b/numpy/add_newdocs.py
index 449196efb..307a8d837 100644
--- a/numpy/add_newdocs.py
+++ b/numpy/add_newdocs.py
@@ -931,7 +931,7 @@ add_newdoc('numpy.core.multiarray', 'zeros',
>>> np.zeros(5)
array([ 0., 0., 0., 0., 0.])
- >>> np.zeros((5,), dtype=np.int)
+ >>> np.zeros((5,), dtype=int)
array([0, 0, 0, 0, 0])
>>> np.zeros((2, 1))
@@ -1038,7 +1038,7 @@ add_newdoc('numpy.core.multiarray', 'fromiter',
Examples
--------
>>> iterable = (x*x for x in range(5))
- >>> np.fromiter(iterable, np.float)
+ >>> np.fromiter(iterable, float)
array([ 0., 1., 4., 9., 16.])
""")
@@ -1158,7 +1158,7 @@ add_newdoc('numpy.core.multiarray', 'frombuffer',
add_newdoc('numpy.core.multiarray', 'concatenate',
"""
- concatenate((a1, a2, ...), axis=0)
+ concatenate((a1, a2, ...), axis=0, out=None)
Join a sequence of arrays along an existing axis.
@@ -1169,6 +1169,10 @@ add_newdoc('numpy.core.multiarray', 'concatenate',
corresponding to `axis` (the first, by default).
axis : int, optional
The axis along which the arrays will be joined. Default is 0.
+ out : ndarray, optional
+ If provided, the destination to place the result. The shape must be
+ correct, matching that of what concatenate would have returned if no
+ out argument were specified.
Returns
-------
@@ -1338,7 +1342,8 @@ add_newdoc('numpy.core.multiarray', 'arange',
step : number, optional
Spacing between values. For any output `out`, this is the distance
between two adjacent values, ``out[i+1] - out[i]``. The default
- step size is 1. If `step` is specified, `start` must also be given.
+ step size is 1. If `step` is specified as a position argument,
+ `start` must also be given.
dtype : dtype
The type of the output array. If `dtype` is not given, infer the data
type from the other input arguments.
@@ -1589,7 +1594,7 @@ add_newdoc('numpy.core.multiarray', 'lexsort',
add_newdoc('numpy.core.multiarray', 'can_cast',
"""
- can_cast(from, totype, casting = 'safe')
+ can_cast(from_, to, casting='safe')
Returns True if cast between data types can occur according to the
casting rule. If from is a scalar or array scalar, also returns
@@ -1598,9 +1603,9 @@ add_newdoc('numpy.core.multiarray', 'can_cast',
Parameters
----------
- from : dtype, dtype specifier, scalar, or array
+ from_ : dtype, dtype specifier, scalar, or array
Data type, scalar, or array to cast from.
- totype : dtype or dtype specifier
+ to : dtype or dtype specifier
Data type to cast to.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur.
@@ -1635,9 +1640,9 @@ add_newdoc('numpy.core.multiarray', 'can_cast',
>>> np.can_cast(np.int32, np.int64)
True
- >>> np.can_cast(np.float64, np.complex)
+ >>> np.can_cast(np.float64, complex)
True
- >>> np.can_cast(np.complex, np.float)
+ >>> np.can_cast(complex, float)
False
>>> np.can_cast('i8', 'f8')
@@ -3096,7 +3101,7 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('__copy__',
add_newdoc('numpy.core.multiarray', 'ndarray', ('__deepcopy__',
- """a.__deepcopy__() -> Deep copy of array.
+ """a.__deepcopy__(memo, /) -> Deep copy of array.
Used if copy.deepcopy is called on an array.
@@ -3292,7 +3297,7 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('astype',
add_newdoc('numpy.core.multiarray', 'ndarray', ('byteswap',
"""
- a.byteswap(inplace)
+ a.byteswap(inplace=False)
Swap the bytes of the array elements
@@ -3315,7 +3320,7 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('byteswap',
>>> A = np.array([1, 256, 8755], dtype=np.int16)
>>> map(hex, A)
['0x1', '0x100', '0x2233']
- >>> A.byteswap(True)
+ >>> A.byteswap(inplace=True)
array([ 256, 1, 13090], dtype=int16)
>>> map(hex, A)
['0x100', '0x1', '0x3322']
@@ -3418,7 +3423,7 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('copy',
Controls the memory layout of the copy. 'C' means C-order,
'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous,
'C' otherwise. 'K' means match the layout of `a` as closely
- as possible. (Note that this function and :func:numpy.copy are very
+ as possible. (Note that this function and :func:`numpy.copy` are very
similar, but have different default values for their order=
arguments.)
@@ -3764,7 +3769,7 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('itemset',
add_newdoc('numpy.core.multiarray', 'ndarray', ('max',
"""
- a.max(axis=None, out=None)
+ a.max(axis=None, out=None, keepdims=False)
Return the maximum along a given axis.
@@ -5141,7 +5146,7 @@ add_newdoc('numpy.core.multiarray', 'bincount',
------
ValueError
If the input is not 1-dimensional, or contains elements with negative
- values, or if `minlength` is non-positive.
+ values, or if `minlength` is negative.
TypeError
If the type of the input is float or complex.
@@ -5163,7 +5168,7 @@ add_newdoc('numpy.core.multiarray', 'bincount',
The input array needs to be of integer dtype, otherwise a
TypeError is raised:
- >>> np.bincount(np.arange(5, dtype=np.float))
+ >>> np.bincount(np.arange(5, dtype=float))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: array cannot be safely cast to required type
@@ -5444,9 +5449,11 @@ add_newdoc('numpy.core', 'ufunc',
----------
*x : array_like
Input arrays.
- out : ndarray or tuple of ndarray, optional
+ out : ndarray, None, or tuple of ndarray and None, optional
Alternate array object(s) in which to put the result; if provided, it
- must have a shape that the inputs broadcast to.
+ must have a shape that the inputs broadcast to. A tuple of arrays
+ (possible only as a keyword argument) must have length equal to the
+ number of outputs; use `None` for outputs to be allocated by the ufunc.
where : array_like, optional
Values of True indicate to calculate the ufunc at that position, values
of False indicate to leave the value in the output alone.
@@ -5667,9 +5674,14 @@ add_newdoc('numpy.core', 'ufunc', ('reduce',
The type used to represent the intermediate results. Defaults
to the data-type of the output array if this is provided, or
the data-type of the input array if no output array is provided.
- out : ndarray, optional
- A location into which the result is stored. If not provided, a
- freshly-allocated array is returned.
+ out : ndarray, None, or tuple of ndarray and None, optional
+ A location into which the result is stored. If not provided or `None`,
+ a freshly-allocated array is returned. For consistency with
+ :ref:`ufunc.__call__`, if given as a keyword, this may be wrapped in a
+ 1-element tuple.
+
+ .. versionchanged:: 1.13.0
+ Tuples are allowed for keyword argument.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
@@ -5712,7 +5724,7 @@ add_newdoc('numpy.core', 'ufunc', ('reduce',
add_newdoc('numpy.core', 'ufunc', ('accumulate',
"""
- accumulate(array, axis=0, dtype=None, out=None, keepdims=None)
+ accumulate(array, axis=0, dtype=None, out=None)
Accumulate the result of applying the operator to all elements.
@@ -5741,11 +5753,14 @@ add_newdoc('numpy.core', 'ufunc', ('accumulate',
The data-type used to represent the intermediate results. Defaults
to the data-type of the output array if such is provided, or the
the data-type of the input array if no output array is provided.
- out : ndarray, optional
- A location into which the result is stored. If not provided a
- freshly-allocated array is returned.
- keepdims : bool
- Has no effect. Deprecated, and will be removed in future.
+ out : ndarray, None, or tuple of ndarray and None, optional
+ A location into which the result is stored. If not provided or `None`,
+ a freshly-allocated array is returned. For consistency with
+ :ref:`ufunc.__call__`, if given as a keyword, this may be wrapped in a
+ 1-element tuple.
+
+ .. versionchanged:: 1.13.0
+ Tuples are allowed for keyword argument.
Returns
-------
@@ -5820,9 +5835,14 @@ add_newdoc('numpy.core', 'ufunc', ('reduceat',
The type used to represent the intermediate results. Defaults
to the data type of the output array if this is provided, or
the data type of the input array if no output array is provided.
- out : ndarray, optional
- A location into which the result is stored. If not provided a
- freshly-allocated array is returned.
+ out : ndarray, None, or tuple of ndarray and None, optional
+ A location into which the result is stored. If not provided or `None`,
+ a freshly-allocated array is returned. For consistency with
+ :ref:`ufunc.__call__`, if given as a keyword, this may be wrapped in a
+ 1-element tuple.
+
+ .. versionchanged:: 1.13.0
+ Tuples are allowed for keyword argument.
Returns
-------
@@ -6084,7 +6104,7 @@ add_newdoc('numpy.core.multiarray', 'dtype',
Using tuples. ``int`` is a fixed type, 3 the field's shape. ``void``
is a flexible type, here of size 10:
- >>> np.dtype([('hello',(np.int,3)),('world',np.void,10)])
+ >>> np.dtype([('hello',(int,3)),('world',np.void,10)])
dtype([('hello', '<i4', 3), ('world', '|V10')])
Subdivide ``int16`` into 2 ``int8``'s, called x and y. 0 and 1 are
diff --git a/numpy/compat/tests/__init__.py b/numpy/compat/tests/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/numpy/compat/tests/__init__.py
diff --git a/numpy/compat/tests/test_compat.py b/numpy/compat/tests/test_compat.py
index 1ac24401a..b91971d38 100644
--- a/numpy/compat/tests/test_compat.py
+++ b/numpy/compat/tests/test_compat.py
@@ -4,7 +4,7 @@ from os.path import join
from numpy.compat import isfileobj
from numpy.testing import assert_, run_module_suite
-from numpy.testing.utils import tempdir
+from numpy.testing import tempdir
def test_isfileobj():
diff --git a/numpy/conftest.py b/numpy/conftest.py
new file mode 100644
index 000000000..ea4197049
--- /dev/null
+++ b/numpy/conftest.py
@@ -0,0 +1,54 @@
+"""
+Pytest configuration and fixtures for the Numpy test suite.
+"""
+from __future__ import division, absolute_import, print_function
+
+import warnings
+import pytest
+
+from numpy.core.multiarray_tests import get_fpu_mode
+
+
+_old_fpu_mode = None
+_collect_results = {}
+
+
+@pytest.hookimpl()
+def pytest_itemcollected(item):
+ """
+ Check FPU precision mode was not changed during test collection.
+
+ The clumsy way we do it here is mainly necessary because numpy
+ still uses yield tests, which can execute code at test collection
+ time.
+ """
+ global _old_fpu_mode
+
+ mode = get_fpu_mode()
+
+ if _old_fpu_mode is None:
+ _old_fpu_mode = mode
+ elif mode != _old_fpu_mode:
+ _collect_results[item] = (_old_fpu_mode, mode)
+ _old_fpu_mode = mode
+
+
+@pytest.fixture(scope="function", autouse=True)
+def check_fpu_mode(request):
+ """
+ Check FPU precision mode was not changed during the test.
+ """
+ old_mode = get_fpu_mode()
+ yield
+ new_mode = get_fpu_mode()
+
+ if old_mode != new_mode:
+ raise AssertionError("FPU precision mode changed from {0:#x} to {1:#x}"
+ " during the test".format(old_mode, new_mode))
+
+ collect_result = _collect_results.get(request.node)
+ if collect_result is not None:
+ old_mode, new_mode = collect_result
+ raise AssertionError("FPU precision mode changed from {0:#x} to {1:#x}"
+ " when collecting the test".format(old_mode,
+ new_mode))
diff --git a/numpy/core/__init__.py b/numpy/core/__init__.py
index b3a6967e1..5ad27fbe1 100644
--- a/numpy/core/__init__.py
+++ b/numpy/core/__init__.py
@@ -71,7 +71,7 @@ __all__ += shape_base.__all__
__all__ += einsumfunc.__all__
-from numpy.testing.nosetester import _numpy_tester
+from numpy.testing import _numpy_tester
test = _numpy_tester().test
bench = _numpy_tester().bench
diff --git a/numpy/core/_internal.py b/numpy/core/_internal.py
index 10fcbfdfe..004c2762b 100644
--- a/numpy/core/_internal.py
+++ b/numpy/core/_internal.py
@@ -206,6 +206,8 @@ class dummy_ctype(object):
return self._cls(other)
def __eq__(self, other):
return self._cls == other._cls
+ def __ne__(self, other):
+ return self._cls != other._cls
def _getintp_ctype():
val = _getintp_ctype.cache
@@ -281,20 +283,26 @@ class _ctypes(object):
_as_parameter_ = property(get_as_parameter, None, doc="_as parameter_")
-# Given a datatype and an order object
-# return a new names tuple
-# with the order indicated
def _newnames(datatype, order):
+ """
+ Given a datatype and an order object, return a new names tuple, with the
+ order indicated
+ """
oldnames = datatype.names
nameslist = list(oldnames)
if isinstance(order, str):
order = [order]
+ seen = set()
if isinstance(order, (list, tuple)):
for name in order:
try:
nameslist.remove(name)
except ValueError:
- raise ValueError("unknown field name: %s" % (name,))
+ if name in seen:
+ raise ValueError("duplicate field name: %s" % (name,))
+ else:
+ raise ValueError("unknown field name: %s" % (name,))
+ seen.add(name)
return tuple(list(order) + nameslist)
raise ValueError("unsupported order value: %s" % (order,))
@@ -695,10 +703,11 @@ def array_ufunc_errmsg_formatter(dummy, ufunc, method, *inputs, **kwargs):
for k, v in kwargs.items()])
args = inputs + kwargs.get('out', ())
types_string = ', '.join(repr(type(arg).__name__) for arg in args)
- return ('operand type(s) do not implement __array_ufunc__'
- '({!r}, {!r}, {}): {}'
+ return ('operand type(s) all returned NotImplemented from '
+ '__array_ufunc__({!r}, {!r}, {}): {}'
.format(ufunc, method, args_string, types_string))
+
def _ufunc_doc_signature_formatter(ufunc):
"""
Builds a signature string which resembles PEP 457
diff --git a/numpy/core/arrayprint.py b/numpy/core/arrayprint.py
index e54f4602a..46fbc9e5d 100644
--- a/numpy/core/arrayprint.py
+++ b/numpy/core/arrayprint.py
@@ -5,7 +5,8 @@ $Id: arrayprint.py,v 1.9 2005/09/13 13:58:44 teoliphant Exp $
"""
from __future__ import division, absolute_import, print_function
-__all__ = ["array2string", "set_printoptions", "get_printoptions"]
+__all__ = ["array2string", "array_str", "array_repr", "set_string_function",
+ "set_printoptions", "get_printoptions"]
__docformat__ = 'restructuredtext'
#
@@ -15,6 +16,13 @@ __docformat__ = 'restructuredtext'
# and by Perry Greenfield 2000-4-1 for numarray
# and by Travis Oliphant 2005-8-22 for numpy
+
+# Note: Both scalartypes.c.src and arrayprint.py implement strs for numpy
+# scalars but for different purposes. scalartypes.c.src has str/reprs for when
+# the scalar is printed on its own, while arrayprint.py has strs for when
+# scalars are printed inside an ndarray. Only the latter strs are currently
+# user-customizable.
+
import sys
import functools
if sys.version_info[0] >= 3:
@@ -28,12 +36,17 @@ else:
except ImportError:
from dummy_thread import get_ident
+import numpy as np
from . import numerictypes as _nt
from .umath import maximum, minimum, absolute, not_equal, isnan, isinf
+from . import multiarray
from .multiarray import (array, format_longfloat, datetime_as_string,
- datetime_data, dtype)
-from .fromnumeric import ravel
-from .numeric import asarray
+ datetime_data, dtype, ndarray)
+from .fromnumeric import ravel, any
+from .numeric import concatenate, asarray, errstate
+from .numerictypes import (longlong, intc, int_, float_, complex_, bool_,
+ flexible)
+import warnings
if sys.version_info[0] >= 3:
_MAXINT = sys.maxsize
@@ -218,10 +231,9 @@ def get_printoptions():
return d
def _leading_trailing(a):
- from . import numeric as _nc
if a.ndim == 1:
if len(a) > 2*_summaryEdgeItems:
- b = _nc.concatenate((a[:_summaryEdgeItems],
+ b = concatenate((a[:_summaryEdgeItems],
a[-_summaryEdgeItems:]))
else:
b = a
@@ -233,7 +245,7 @@ def _leading_trailing(a):
min(len(a), _summaryEdgeItems), 0, -1)])
else:
l = [_leading_trailing(a[i]) for i in range(0, len(a))]
- b = _nc.concatenate(tuple(l))
+ b = concatenate(tuple(l))
return b
def _boolFormatter(x):
@@ -399,7 +411,7 @@ def _recursive_guard(fillvalue='...'):
@_recursive_guard()
def array2string(a, max_line_width=None, precision=None,
suppress_small=None, separator=' ', prefix="",
- style=repr, formatter=None):
+ style=np._NoValue, formatter=None):
"""
Return a string representation of an array.
@@ -425,9 +437,10 @@ def array2string(a, max_line_width=None, precision=None,
The length of the prefix string is used to align the
output correctly.
- style : function, optional
- A function that accepts an ndarray and returns a string. Used only
- when the shape of `a` is equal to ``()``, i.e. for 0-D arrays.
+ style : _NoValue, optional
+ Has no effect, do not use.
+
+ .. deprecated:: 1.14.0
formatter : dict of callables, optional
If not None, the keys should indicate the type(s) that the respective
formatting function applies to. Callables should return a string.
@@ -494,6 +507,11 @@ def array2string(a, max_line_width=None, precision=None,
"""
+ # Deprecation 05-16-2017 v1.14
+ if style is not np._NoValue:
+ warnings.warn("'style' argument is deprecated and no longer functional",
+ DeprecationWarning, stacklevel=3)
+
if max_line_width is None:
max_line_width = _line_width
@@ -506,16 +524,7 @@ def array2string(a, max_line_width=None, precision=None,
if formatter is None:
formatter = _formatter
- if a.shape == ():
- x = a.item()
- if a.dtype.fields is not None:
- arr = array([x], dtype=a.dtype)
- format_function = _get_format_function(
- arr, precision, suppress_small, formatter)
- lst = format_function(arr[0])
- else:
- lst = style(x)
- elif functools.reduce(product, a.shape) == 0:
+ if a.size == 0:
# treat as a null array if any of shape elements == 0
lst = "[]"
else:
@@ -542,7 +551,7 @@ def _formatArray(a, format_function, rank, max_line_len,
"""
if rank == 0:
- raise ValueError("rank shouldn't be zero.")
+ return format_function(a[()]) + '\n'
if summary_insert and 2*edge_items < len(a):
leading_items = edge_items
@@ -615,9 +624,7 @@ class FloatFormat(object):
pass
def fillFormat(self, data):
- from . import numeric as _nc
-
- with _nc.errstate(all='ignore'):
+ with errstate(all='ignore'):
special = isnan(data) | isinf(data)
valid = not_equal(data, 0) & ~special
non_zero = absolute(data.compress(valid))
@@ -652,7 +659,7 @@ class FloatFormat(object):
precision = 0
precision = min(self.precision, precision)
self.max_str_len = len(str(int(max_val))) + precision + 2
- if _nc.any(special):
+ if any(special):
self.max_str_len = max(self.max_str_len,
len(_nan_str),
len(_inf_str)+1)
@@ -666,9 +673,7 @@ class FloatFormat(object):
self.format = format
def __call__(self, x, strip_zeros=True):
- from . import numeric as _nc
-
- with _nc.errstate(invalid='ignore'):
+ with errstate(invalid='ignore'):
if isnan(x):
if self.sign:
return self.special_fmt % ('+' + _nan_str,)
@@ -809,22 +814,21 @@ class DatetimeFormat(object):
class TimedeltaFormat(object):
def __init__(self, data):
- if data.dtype.kind == 'm':
- nat_value = array(['NaT'], dtype=data.dtype)[0]
- int_dtype = dtype(data.dtype.byteorder + 'i8')
- int_view = data.view(int_dtype)
- v = int_view[not_equal(int_view, nat_value.view(int_dtype))]
- if len(v) > 0:
- # Max str length of non-NaT elements
- max_str_len = max(len(str(maximum.reduce(v))),
- len(str(minimum.reduce(v))))
- else:
- max_str_len = 0
- if len(v) < len(data):
- # data contains a NaT
- max_str_len = max(max_str_len, 5)
- self.format = '%' + str(max_str_len) + 'd'
- self._nat = "'NaT'".rjust(max_str_len)
+ nat_value = array(['NaT'], dtype=data.dtype)[0]
+ int_dtype = dtype(data.dtype.byteorder + 'i8')
+ int_view = data.view(int_dtype)
+ v = int_view[not_equal(int_view, nat_value.view(int_dtype))]
+ if len(v) > 0:
+ # Max str length of non-NaT elements
+ max_str_len = max(len(str(maximum.reduce(v))),
+ len(str(minimum.reduce(v))))
+ else:
+ max_str_len = 0
+ if len(v) < len(data):
+ # data contains a NaT
+ max_str_len = max(max_str_len, 5)
+ self.format = '%' + str(max_str_len) + 'd'
+ self._nat = "'NaT'".rjust(max_str_len)
def __call__(self, x):
# TODO: After NAT == NAT deprecation should be simplified:
@@ -854,3 +858,180 @@ class StructureFormat(object):
for field, format_function in zip(x, self.format_functions):
s += format_function(field) + ", "
return (s[:-2] if 1 < self.num_fields else s[:-1]) + ")"
+
+
+_typelessdata = [int_, float_, complex_]
+if issubclass(intc, int):
+ _typelessdata.append(intc)
+if issubclass(longlong, int):
+ _typelessdata.append(longlong)
+
+def array_repr(arr, max_line_width=None, precision=None, suppress_small=None):
+ """
+ Return the string representation of an array.
+
+ Parameters
+ ----------
+ arr : ndarray
+ Input array.
+ max_line_width : int, optional
+ The maximum number of columns the string should span. Newline
+ characters split the string appropriately after array elements.
+ precision : int, optional
+ Floating point precision. Default is the current printing precision
+ (usually 8), which can be altered using `set_printoptions`.
+ suppress_small : bool, optional
+ Represent very small numbers as zero, default is False. Very small
+ is defined by `precision`, if the precision is 8 then
+ numbers smaller than 5e-9 are represented as zero.
+
+ Returns
+ -------
+ string : str
+ The string representation of an array.
+
+ See Also
+ --------
+ array_str, array2string, set_printoptions
+
+ Examples
+ --------
+ >>> np.array_repr(np.array([1,2]))
+ 'array([1, 2])'
+ >>> np.array_repr(np.ma.array([0.]))
+ 'MaskedArray([ 0.])'
+ >>> np.array_repr(np.array([], np.int32))
+ 'array([], dtype=int32)'
+
+ >>> x = np.array([1e-6, 4e-7, 2, 3])
+ >>> np.array_repr(x, precision=6, suppress_small=True)
+ 'array([ 0.000001, 0. , 2. , 3. ])'
+
+ """
+ if type(arr) is not ndarray:
+ class_name = type(arr).__name__
+ else:
+ class_name = "array"
+
+ if arr.size > 0 or arr.shape == (0,):
+ lst = array2string(arr, max_line_width, precision, suppress_small,
+ ', ', class_name + "(")
+ else: # show zero-length shape unless it is (0,)
+ lst = "[], shape=%s" % (repr(arr.shape),)
+
+ skipdtype = (arr.dtype.type in _typelessdata) and arr.size > 0
+
+ if skipdtype:
+ return "%s(%s)" % (class_name, lst)
+ else:
+ typename = arr.dtype.name
+ # Quote typename in the output if it is "complex".
+ if typename and not (typename[0].isalpha() and typename.isalnum()):
+ typename = "'%s'" % typename
+
+ lf = ' '
+ if issubclass(arr.dtype.type, flexible):
+ if arr.dtype.names:
+ typename = "%s" % str(arr.dtype)
+ else:
+ typename = "'%s'" % str(arr.dtype)
+ lf = '\n'+' '*len(class_name + "(")
+ return "%s(%s,%sdtype=%s)" % (class_name, lst, lf, typename)
+
+def array_str(a, max_line_width=None, precision=None, suppress_small=None):
+ """
+ Return a string representation of the data in an array.
+
+ The data in the array is returned as a single string. This function is
+ similar to `array_repr`, the difference being that `array_repr` also
+ returns information on the kind of array and its data type.
+
+ Parameters
+ ----------
+ a : ndarray
+ Input array.
+ max_line_width : int, optional
+ Inserts newlines if text is longer than `max_line_width`. The
+ default is, indirectly, 75.
+ precision : int, optional
+ Floating point precision. Default is the current printing precision
+ (usually 8), which can be altered using `set_printoptions`.
+ suppress_small : bool, optional
+ Represent numbers "very close" to zero as zero; default is False.
+ Very close is defined by precision: if the precision is 8, e.g.,
+ numbers smaller (in absolute value) than 5e-9 are represented as
+ zero.
+
+ See Also
+ --------
+ array2string, array_repr, set_printoptions
+
+ Examples
+ --------
+ >>> np.array_str(np.arange(3))
+ '[0 1 2]'
+
+ """
+ return array2string(a, max_line_width, precision, suppress_small, ' ', "")
+
+def set_string_function(f, repr=True):
+ """
+ Set a Python function to be used when pretty printing arrays.
+
+ Parameters
+ ----------
+ f : function or None
+ Function to be used to pretty print arrays. The function should expect
+ a single array argument and return a string of the representation of
+ the array. If None, the function is reset to the default NumPy function
+ to print arrays.
+ repr : bool, optional
+ If True (default), the function for pretty printing (``__repr__``)
+ is set, if False the function that returns the default string
+ representation (``__str__``) is set.
+
+ See Also
+ --------
+ set_printoptions, get_printoptions
+
+ Examples
+ --------
+ >>> def pprint(arr):
+ ... return 'HA! - What are you going to do now?'
+ ...
+ >>> np.set_string_function(pprint)
+ >>> a = np.arange(10)
+ >>> a
+ HA! - What are you going to do now?
+ >>> print(a)
+ [0 1 2 3 4 5 6 7 8 9]
+
+ We can reset the function to the default:
+
+ >>> np.set_string_function(None)
+ >>> a
+ array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
+
+ `repr` affects either pretty printing or normal string representation.
+ Note that ``__repr__`` is still affected by setting ``__str__``
+ because the width of each array element in the returned string becomes
+ equal to the length of the result of ``__str__()``.
+
+ >>> x = np.arange(4)
+ >>> np.set_string_function(lambda x:'random', repr=False)
+ >>> x.__str__()
+ 'random'
+ >>> x.__repr__()
+ 'array([ 0, 1, 2, 3])'
+
+ """
+ if f is None:
+ if repr:
+ return multiarray.set_string_function(array_repr, 1)
+ else:
+ return multiarray.set_string_function(array_str, 0)
+ else:
+ return multiarray.set_string_function(f, repr)
+
+set_string_function(array_str, 0)
+set_string_function(array_repr, 1)
diff --git a/numpy/core/code_generators/cversions.txt b/numpy/core/code_generators/cversions.txt
index 54140f24a..6e6547129 100644
--- a/numpy/core/code_generators/cversions.txt
+++ b/numpy/core/code_generators/cversions.txt
@@ -36,4 +36,5 @@
0x0000000a = 9b8bce614655d3eb02acddcb508203cb
# Version 11 (NumPy 1.13) Added PyArray_MapIterArrayCopyIfOverlap
+# Version 11 (NumPy 1.14) No Change
0x0000000b = edb1ba83730c650fd9bc5772a919cda7
diff --git a/numpy/core/code_generators/genapi.py b/numpy/core/code_generators/genapi.py
index b618dedf5..42c564a97 100644
--- a/numpy/core/code_generators/genapi.py
+++ b/numpy/core/code_generators/genapi.py
@@ -52,6 +52,7 @@ API_FILES = [join('multiarray', 'alloc.c'),
join('multiarray', 'scalarapi.c'),
join('multiarray', 'sequence.c'),
join('multiarray', 'shape.c'),
+ join('multiarray', 'strfuncs.c'),
join('multiarray', 'usertypes.c'),
join('umath', 'loops.c.src'),
join('umath', 'ufunc_object.c'),
@@ -71,7 +72,7 @@ def _repl(str):
return str.replace('Bool', 'npy_bool')
-class StealRef:
+class StealRef(object):
def __init__(self, arg):
self.arg = arg # counting from 1
@@ -82,7 +83,7 @@ class StealRef:
return 'NPY_STEALS_REF_TO_ARG(%d)' % self.arg
-class NonNull:
+class NonNull(object):
def __init__(self, arg):
self.arg = arg # counting from 1
@@ -271,7 +272,7 @@ def find_functions(filename, tag='API'):
state = SCANNING
else:
function_args.append(line)
- except:
+ except Exception:
print(filename, lineno + 1)
raise
fo.close()
diff --git a/numpy/core/code_generators/generate_numpy_api.py b/numpy/core/code_generators/generate_numpy_api.py
index 79d774a89..b4aeaa277 100644
--- a/numpy/core/code_generators/generate_numpy_api.py
+++ b/numpy/core/code_generators/generate_numpy_api.py
@@ -220,8 +220,13 @@ def do_generate_api(targets, sources):
multiarray_api_dict[name] = TypeApi(name, index, 'PyTypeObject', api_name)
if len(multiarray_api_dict) != len(multiarray_api_index):
- raise AssertionError("Multiarray API size mismatch %d %d" %
- (len(multiarray_api_dict), len(multiarray_api_index)))
+ keys_dict = set(multiarray_api_dict.keys())
+ keys_index = set(multiarray_api_index.keys())
+ raise AssertionError(
+ "Multiarray API size mismatch - "
+ "index has extra keys {}, dict has extra keys {}"
+ .format(keys_index - keys_dict, keys_dict - keys_index)
+ )
extension_list = []
for name, index in genapi.order_dict(multiarray_api_index):
diff --git a/numpy/core/code_generators/generate_umath.py b/numpy/core/code_generators/generate_umath.py
index 2241618f7..af058b4be 100644
--- a/numpy/core/code_generators/generate_umath.py
+++ b/numpy/core/code_generators/generate_umath.py
@@ -314,9 +314,7 @@ defdict = {
'true_divide':
Ufunc(2, 1, None, # One is only a unit to the right, not the left
docstrings.get('numpy.core.umath.true_divide'),
- 'PyUFunc_DivisionTypeResolver',
- TD('bBhH', out='d'),
- TD('iIlLqQ', out='d'),
+ 'PyUFunc_TrueDivisionTypeResolver',
TD(flts+cmplx),
[TypeDescription('m', FullTypeDescr, 'mq', 'm'),
TypeDescription('m', FullTypeDescr, 'md', 'm'),
diff --git a/numpy/core/code_generators/ufunc_docstrings.py b/numpy/core/code_generators/ufunc_docstrings.py
index 7beda59f2..6aae57234 100644
--- a/numpy/core/code_generators/ufunc_docstrings.py
+++ b/numpy/core/code_generators/ufunc_docstrings.py
@@ -19,9 +19,11 @@ def get(name):
# common parameter text to all ufuncs
_params_text = textwrap.dedent("""
- out : ndarray or tuple of ndarray, optional
- Alternate array object(s) in which to put the result; if provided, it
- must have a shape that the inputs broadcast to.
+ out : ndarray, None, or tuple of ndarray and None, optional
+ A location into which the result is stored. If provided, it must have
+ a shape that the inputs broadcast to. If not provided or `None`,
+ a freshly-allocated array is returned. A tuple (possible only as a
+ keyword argument) must have length equal to the number of outputs.
where : array_like, optional
Values of True indicate to calculate the ufunc at that position, values
of False indicate to leave the value in the output alone.
@@ -926,25 +928,24 @@ add_newdoc('numpy.core.umath', 'heaviside',
The Heaviside step function is defined as::
- 0 if x < 0
- heaviside(x, h0) = h0 if x == 0
- 1 if x > 0
+ 0 if x1 < 0
+ heaviside(x1, x2) = x2 if x1 == 0
+ 1 if x1 > 0
- where `h0` is often taken to be 0.5, but 0 and 1 are also sometimes used.
+ where `x2` is often taken to be 0.5, but 0 and 1 are also sometimes used.
Parameters
----------
- x : array_like
+ x1 : array_like
Input values.
- $PARAMS
- h0 : array_like
- The value of the function at x = 0.
+ x2 : array_like
+ The value of the function when x1 is 0.
$PARAMS
Returns
-------
out : ndarray
- The output array, element-wise Heaviside step function of `x`.
+ The output array, element-wise Heaviside step function of `x1`.
Notes
-----
@@ -1720,6 +1721,7 @@ add_newdoc('numpy.core.umath', 'isnat',
----------
x : array_like
Input array with datetime or timedelta data type.
+ $PARAMS
Returns
-------
@@ -2885,8 +2887,18 @@ add_newdoc('numpy.core.umath', 'remainder',
Computes the remainder complementary to the `floor_divide` function. It is
equivalent to the Python modulus operator``x1 % x2`` and has the same sign
- as the divisor `x2`. It should not be confused with the Matlab(TM) ``rem``
- function.
+ as the divisor `x2`. The MATLAB function equivalent to ``np.remainder``
+ is ``mod``.
+
+ .. warning::
+
+ This should not be confused with:
+
+ * Python 3.7's `math.remainder` and C's ``remainder``, which
+ computes the IEEE remainder, which are the complement to
+ ``round(x1 / x2)``.
+ * The MATLAB ``rem`` function and or the C ``%`` operator which is the
+ complement to ``int(x1 / x2)``.
Parameters
----------
@@ -2906,7 +2918,7 @@ add_newdoc('numpy.core.umath', 'remainder',
--------
floor_divide : Equivalent of Python ``//`` operator.
divmod : Simultaneous floor division and remainder.
- fmod : Equivalent of the Matlab(TM) ``rem`` function.
+ fmod : Equivalent of the MATLAB ``rem`` function.
divide, floor
Notes
diff --git a/numpy/core/einsumfunc.py b/numpy/core/einsumfunc.py
index e242363a4..37d691027 100644
--- a/numpy/core/einsumfunc.py
+++ b/numpy/core/einsumfunc.py
@@ -5,7 +5,7 @@ Implementation of optimized einsum.
from __future__ import division, absolute_import, print_function
from numpy.core.multiarray import c_einsum
-from numpy.core.numeric import asarray, asanyarray, result_type
+from numpy.core.numeric import asarray, asanyarray, result_type, tensordot, dot
__all__ = ['einsum', 'einsum_path']
@@ -256,6 +256,114 @@ def _greedy_path(input_sets, output_set, idx_dict, memory_limit):
return path
+def _can_dot(inputs, result, idx_removed):
+ """
+ Checks if we can use BLAS (np.tensordot) call and its beneficial to do so.
+
+ Parameters
+ ----------
+ inputs : list of str
+ Specifies the subscripts for summation.
+ result : str
+ Resulting summation.
+ idx_removed : set
+ Indices that are removed in the summation
+
+
+ Returns
+ -------
+ type : bool
+ Returns true if BLAS should and can be used, else False
+
+ Notes
+ -----
+ If the operations is BLAS level 1 or 2 and is not already aligned
+ we default back to einsum as the memory movement to copy is more
+ costly than the operation itself.
+
+
+ Examples
+ --------
+
+ # Standard GEMM operation
+ >>> _can_dot(['ij', 'jk'], 'ik', set('j'))
+ True
+
+ # Can use the standard BLAS, but requires odd data movement
+ >>> _can_dot(['ijj', 'jk'], 'ik', set('j'))
+ False
+
+ # DDOT where the memory is not aligned
+ >>> _can_dot(['ijk', 'ikj'], '', set('ijk'))
+ False
+
+ """
+
+ # All `dot` calls remove indices
+ if len(idx_removed) == 0:
+ return False
+
+ # BLAS can only handle two operands
+ if len(inputs) != 2:
+ return False
+
+ # Build a few temporaries
+ input_left, input_right = inputs
+ set_left = set(input_left)
+ set_right = set(input_right)
+ keep_left = set_left - idx_removed
+ keep_right = set_right - idx_removed
+ rs = len(idx_removed)
+
+ # Indices must overlap between the two operands
+ if not len(set_left & set_right):
+ return False
+
+ # We cannot have duplicate indices ("ijj, jk -> ik")
+ if (len(set_left) != len(input_left)) or (len(set_right) != len(input_right)):
+ return False
+
+ # Cannot handle partial inner ("ij, ji -> i")
+ if len(keep_left & keep_right):
+ return False
+
+ # At this point we are a DOT, GEMV, or GEMM operation
+
+ # Handle inner products
+
+ # DDOT with aligned data
+ if input_left == input_right:
+ return True
+
+ # DDOT without aligned data (better to use einsum)
+ if set_left == set_right:
+ return False
+
+ # Handle the 4 possible (aligned) GEMV or GEMM cases
+
+ # GEMM or GEMV no transpose
+ if input_left[-rs:] == input_right[:rs]:
+ return True
+
+ # GEMM or GEMV transpose both
+ if input_left[:rs] == input_right[-rs:]:
+ return True
+
+ # GEMM or GEMV transpose right
+ if input_left[-rs:] == input_right[-rs:]:
+ return True
+
+ # GEMM or GEMV transpose left
+ if input_left[:rs] == input_right[:rs]:
+ return True
+
+ # Einsum is faster than GEMV if we have to copy data
+ if not keep_left or not keep_right:
+ return False
+
+ # We are a matrix-matrix product, but we need to copy data
+ return True
+
def _parse_einsum_input(operands):
"""
A reproduction of einsum c side einsum parsing in python.
@@ -542,7 +650,7 @@ def einsum_path(*operands, **kwargs):
" %s" % unknown_kwargs)
# Figure out what the path really is
- path_type = kwargs.pop('optimize', False)
+ path_type = kwargs.pop('optimize', True)
if path_type is True:
path_type = 'greedy'
if path_type is None:
@@ -653,6 +761,8 @@ def einsum_path(*operands, **kwargs):
for x in contract_inds:
tmp_inputs.append(input_list.pop(x))
+ do_blas = _can_dot(tmp_inputs, out_inds, idx_removed)
+
# Last contraction
if (cnum - len(path)) == -1:
idx_result = output_subscript
@@ -663,7 +773,7 @@ def einsum_path(*operands, **kwargs):
input_list.append(idx_result)
einsum_str = ",".join(tmp_inputs) + "->" + idx_result
- contraction = (contract_inds, idx_removed, einsum_str, input_list[:])
+ contraction = (contract_inds, idx_removed, einsum_str, input_list[:], do_blas)
contraction_list.append(contraction)
opt_cost = sum(cost_list) + 1
@@ -690,7 +800,7 @@ def einsum_path(*operands, **kwargs):
path_print += "-" * 74
for n, contraction in enumerate(contraction_list):
- inds, idx_rm, einsum_str, remaining = contraction
+ inds, idx_rm, einsum_str, remaining, blas = contraction
remaining_str = ",".join(remaining) + "->" + output_subscript
path_run = (scale_list[n], einsum_str, remaining_str)
path_print += "\n%4d %24s %40s" % path_run
@@ -748,7 +858,7 @@ def einsum(*operands, **kwargs):
Controls if intermediate optimization should occur. No optimization
will occur if False and True will default to the 'greedy' algorithm.
Also accepts an explicit contraction list from the ``np.einsum_path``
- function. See ``np.einsum_path`` for more details. Default is False.
+ function. See ``np.einsum_path`` for more details. Default is True.
Returns
-------
@@ -969,19 +1079,54 @@ def einsum(*operands, **kwargs):
# Build the contraction list and operand
operands, contraction_list = einsum_path(*operands, optimize=optimize_arg,
einsum_call=True)
+
+ handle_out = False
+
# Start contraction loop
for num, contraction in enumerate(contraction_list):
- inds, idx_rm, einsum_str, remaining = contraction
+ inds, idx_rm, einsum_str, remaining, blas = contraction
tmp_operands = []
for x in inds:
tmp_operands.append(operands.pop(x))
- # If out was specified
+ # Do we need to deal with the output?
if specified_out and ((num + 1) == len(contraction_list)):
- einsum_kwargs["out"] = out_array
+ handle_out = True
+
+ # Call tensordot
+ if blas:
+
+ # Checks have already been handled
+ input_str, results_index = einsum_str.split('->')
+ input_left, input_right = input_str.split(',')
+
+ tensor_result = input_left + input_right
+ for s in idx_rm:
+ tensor_result = tensor_result.replace(s, "")
+
+ # Find indices to contract over
+ left_pos, right_pos = [], []
+ for s in idx_rm:
+ left_pos.append(input_left.find(s))
+ right_pos.append(input_right.find(s))
+
+ # Contract!
+ new_view = tensordot(*tmp_operands, axes=(tuple(left_pos), tuple(right_pos)))
+
+ # Build a new view if needed
+ if (tensor_result != results_index) or handle_out:
+ if handle_out:
+ einsum_kwargs["out"] = out_array
+ new_view = c_einsum(tensor_result + '->' + results_index, new_view, **einsum_kwargs)
+
+ # Call einsum
+ else:
+ # If out was specified
+ if handle_out:
+ einsum_kwargs["out"] = out_array
- # Do the contraction
- new_view = c_einsum(einsum_str, *tmp_operands, **einsum_kwargs)
+ # Do the contraction
+ new_view = c_einsum(einsum_str, *tmp_operands, **einsum_kwargs)
# Append new items and derefernce what we can
operands.append(new_view)
diff --git a/numpy/core/fromnumeric.py b/numpy/core/fromnumeric.py
index a8c2fd2fb..6f7c45859 100644
--- a/numpy/core/fromnumeric.py
+++ b/numpy/core/fromnumeric.py
@@ -28,12 +28,7 @@ __all__ = [
'std', 'sum', 'swapaxes', 'take', 'trace', 'transpose', 'var',
]
-
-try:
- _gentype = types.GeneratorType
-except AttributeError:
- _gentype = type(None)
-
+_gentype = types.GeneratorType
# save away Python sum
_sum_ = sum
@@ -1120,18 +1115,16 @@ def resize(a, new_shape):
new_shape = (new_shape,)
a = ravel(a)
Na = len(a)
- if not Na:
- return mu.zeros(new_shape, a.dtype)
total_size = um.multiply.reduce(new_shape)
+ if Na == 0 or total_size == 0:
+ return mu.zeros(new_shape, a.dtype)
+
n_copies = int(total_size / Na)
extra = total_size % Na
- if total_size == 0:
- return a[:0]
-
if extra != 0:
- n_copies = n_copies+1
- extra = Na-extra
+ n_copies = n_copies + 1
+ extra = Na - extra
a = concatenate((a,)*n_copies)
if extra > 0:
@@ -1531,14 +1524,15 @@ def nonzero(a):
[0, 2, 0],
[1, 1, 0]])
>>> np.nonzero(x)
- (array([0, 1, 2, 2], dtype=int64), array([0, 1, 0, 1], dtype=int64))
+ (array([0, 1, 2, 2]), array([0, 1, 0, 1]))
>>> x[np.nonzero(x)]
- array([ 1., 1., 1.])
+ array([1, 2, 1, 1])
>>> np.transpose(np.nonzero(x))
array([[0, 0],
[1, 1],
- [2, 2]])
+ [2, 0],
+ [2, 1])
A common use for ``nonzero`` is to find the indices of an array, where
a condition is True. Given an array `a`, the condition `a` > 3 is a
@@ -2248,7 +2242,7 @@ def amax(a, axis=None, out=None, keepdims=np._NoValue):
>>> np.amax(a, axis=1) # Maxima along the second axis
array([1, 3])
- >>> b = np.arange(5, dtype=np.float)
+ >>> b = np.arange(5, dtype=float)
>>> b[2] = np.NaN
>>> np.amax(b)
nan
@@ -2349,7 +2343,7 @@ def amin(a, axis=None, out=None, keepdims=np._NoValue):
>>> np.amin(a, axis=1) # Minima along the second axis
array([0, 2])
- >>> b = np.arange(5, dtype=np.float)
+ >>> b = np.arange(5, dtype=float)
>>> b[2] = np.NaN
>>> np.amin(b)
nan
@@ -2499,7 +2493,7 @@ def prod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue):
is the default platform integer:
>>> x = np.array([1, 2, 3], dtype=np.int8)
- >>> np.prod(x).dtype == np.int
+ >>> np.prod(x).dtype == int
True
"""
diff --git a/numpy/core/function_base.py b/numpy/core/function_base.py
index d6757bb74..0415e16ac 100644
--- a/numpy/core/function_base.py
+++ b/numpy/core/function_base.py
@@ -292,13 +292,13 @@ def geomspace(start, stop, num=50, endpoint=True, dtype=None):
Negative, decreasing, and complex inputs are allowed:
- >>> geomspace(1000, 1, num=4)
+ >>> np.geomspace(1000, 1, num=4)
array([ 1000., 100., 10., 1.])
- >>> geomspace(-1000, -1, num=4)
+ >>> np.geomspace(-1000, -1, num=4)
array([-1000., -100., -10., -1.])
- >>> geomspace(1j, 1000j, num=4) # Straight line
+ >>> np.geomspace(1j, 1000j, num=4) # Straight line
array([ 0. +1.j, 0. +10.j, 0. +100.j, 0.+1000.j])
- >>> geomspace(-1+0j, 1+0j, num=5) # Circle
+ >>> np.geomspace(-1+0j, 1+0j, num=5) # Circle
array([-1.00000000+0.j , -0.70710678+0.70710678j,
0.00000000+1.j , 0.70710678+0.70710678j,
1.00000000+0.j ])
@@ -339,7 +339,7 @@ def geomspace(start, stop, num=50, endpoint=True, dtype=None):
# complex and another is negative and log would produce NaN otherwise
start = start + (stop - stop)
stop = stop + (start - start)
- if _nx.issubdtype(dtype, complex):
+ if _nx.issubdtype(dtype, _nx.complexfloating):
start = start + 0j
stop = stop + 0j
diff --git a/numpy/core/getlimits.py b/numpy/core/getlimits.py
index 5b5e69352..e450a660d 100644
--- a/numpy/core/getlimits.py
+++ b/numpy/core/getlimits.py
@@ -68,7 +68,8 @@ class MachArLike(object):
params = _MACHAR_PARAMS[ftype]
float_conv = lambda v: array([v], ftype)
float_to_float = lambda v : _fr1(float_conv(v))
- float_to_str = lambda v: (params['fmt'] % array(_fr0(v)[0], ftype))
+ self._float_to_str = lambda v: (params['fmt'] %
+ array(_fr0(v)[0], ftype))
self.title = params['title']
# Parameter types same as for discovered MachAr object.
self.epsilon = self.eps = float_to_float(kwargs.pop('eps'))
@@ -79,11 +80,30 @@ class MachArLike(object):
self.__dict__.update(kwargs)
self.precision = int(-log10(self.eps))
self.resolution = float_to_float(float_conv(10) ** (-self.precision))
- self._str_eps = float_to_str(self.eps)
- self._str_epsneg = float_to_str(self.epsneg)
- self._str_xmin = float_to_str(self.xmin)
- self._str_xmax = float_to_str(self.xmax)
- self._str_resolution = float_to_str(self.resolution)
+
+ # Properties below to delay need for float_to_str, and thus avoid circular
+ # imports during early numpy module loading.
+ # See: https://github.com/numpy/numpy/pull/8983#discussion_r115838683
+
+ @property
+ def _str_eps(self):
+ return self._float_to_str(self.eps)
+
+ @property
+ def _str_epsneg(self):
+ return self._float_to_str(self.epsneg)
+
+ @property
+ def _str_xmin(self):
+ return self._float_to_str(self.xmin)
+
+ @property
+ def _str_xmax(self):
+ return self._float_to_str(self.xmax)
+
+ @property
+ def _str_resolution(self):
+ return self._float_to_str(self.resolution)
# Known parameters for float16
@@ -538,13 +558,3 @@ class iinfo(object):
return "%s(min=%s, max=%s, dtype=%s)" % (self.__class__.__name__,
self.min, self.max, self.dtype)
-if __name__ == '__main__':
- f = finfo(ntypes.single)
- print('single epsilon:', f.eps)
- print('single tiny:', f.tiny)
- f = finfo(ntypes.float)
- print('float epsilon:', f.eps)
- print('float tiny:', f.tiny)
- f = finfo(ntypes.longfloat)
- print('longfloat epsilon:', f.eps)
- print('longfloat tiny:', f.tiny)
diff --git a/numpy/core/include/numpy/npy_cpu.h b/numpy/core/include/numpy/npy_cpu.h
index 60abae4e0..84653ea18 100644
--- a/numpy/core/include/numpy/npy_cpu.h
+++ b/numpy/core/include/numpy/npy_cpu.h
@@ -15,6 +15,8 @@
* NPY_CPU_ARMEB
* NPY_CPU_SH_LE
* NPY_CPU_SH_BE
+ * NPY_CPU_ARCEL
+ * NPY_CPU_ARCEB
*/
#ifndef _NPY_CPUARCH_H_
#define _NPY_CPUARCH_H_
@@ -76,6 +78,10 @@
#define NPY_CPU_AARCH64
#elif defined(__mc68000__)
#define NPY_CPU_M68K
+#elif defined(__arc__) && defined(__LITTLE_ENDIAN__)
+ #define NPY_CPU_ARCEL
+#elif defined(__arc__) && defined(__BIG_ENDIAN__)
+ #define NPY_CPU_ARCEB
#else
#error Unknown CPU, please report this to numpy maintainers with \
information about your platform (OS, CPU and compiler)
diff --git a/numpy/core/include/numpy/npy_endian.h b/numpy/core/include/numpy/npy_endian.h
index e34b1d97e..1a42121db 100644
--- a/numpy/core/include/numpy/npy_endian.h
+++ b/numpy/core/include/numpy/npy_endian.h
@@ -45,7 +45,8 @@
|| defined(NPY_CPU_AARCH64) \
|| defined(NPY_CPU_SH_LE) \
|| defined(NPY_CPU_MIPSEL) \
- || defined(NPY_CPU_PPC64LE)
+ || defined(NPY_CPU_PPC64LE) \
+ || defined(NPY_CPU_ARCEL)
#define NPY_BYTE_ORDER NPY_LITTLE_ENDIAN
#elif defined(NPY_CPU_PPC) \
|| defined(NPY_CPU_SPARC) \
@@ -56,7 +57,8 @@
|| defined(NPY_CPU_SH_BE) \
|| defined(NPY_CPU_MIPSEB) \
|| defined(NPY_CPU_OR1K) \
- || defined(NPY_CPU_M68K)
+ || defined(NPY_CPU_M68K) \
+ || defined(NPY_CPU_ARCEB)
#define NPY_BYTE_ORDER NPY_BIG_ENDIAN
#else
#error Unknown CPU: can not set endianness
diff --git a/numpy/core/include/numpy/numpyconfig.h b/numpy/core/include/numpy/numpyconfig.h
index 701f02c6e..04a3738b9 100644
--- a/numpy/core/include/numpy/numpyconfig.h
+++ b/numpy/core/include/numpy/numpyconfig.h
@@ -34,5 +34,7 @@
#define NPY_1_10_API_VERSION 0x00000008
#define NPY_1_11_API_VERSION 0x00000008
#define NPY_1_12_API_VERSION 0x00000008
+#define NPY_1_13_API_VERSION 0x00000008
+#define NPY_1_14_API_VERSION 0x00000008
#endif
diff --git a/numpy/core/numeric.py b/numpy/core/numeric.py
index 6b4a93ce0..aa3a4076c 100644
--- a/numpy/core/numeric.py
+++ b/numpy/core/numeric.py
@@ -22,9 +22,9 @@ if sys.version_info[0] < 3:
from .multiarray import newbuffer, getbuffer
from . import umath
-from .umath import (invert, sin, UFUNC_BUFSIZE_DEFAULT, ERR_IGNORE,
- ERR_WARN, ERR_RAISE, ERR_CALL, ERR_PRINT, ERR_LOG,
- ERR_DEFAULT, PINF, NAN)
+from .umath import (multiply, invert, sin, UFUNC_BUFSIZE_DEFAULT,
+ ERR_IGNORE, ERR_WARN, ERR_RAISE, ERR_CALL, ERR_PRINT,
+ ERR_LOG, ERR_DEFAULT, PINF, NAN)
from . import numerictypes
from .numerictypes import longlong, intc, int_, float_, complex_, bool_
from ._internal import TooHardError, AxisError
@@ -46,28 +46,23 @@ loads = pickle.loads
__all__ = [
'newaxis', 'ndarray', 'flatiter', 'nditer', 'nested_iters', 'ufunc',
- 'arange', 'array', 'zeros', 'count_nonzero', 'empty', 'broadcast',
- 'dtype', 'fromstring', 'fromfile', 'frombuffer', 'int_asbuffer',
- 'where', 'argwhere', 'copyto', 'concatenate', 'fastCopyAndTranspose',
- 'lexsort', 'set_numeric_ops', 'can_cast', 'promote_types',
- 'min_scalar_type', 'result_type', 'asarray', 'asanyarray',
- 'ascontiguousarray', 'asfortranarray', 'isfortran', 'empty_like',
- 'zeros_like', 'ones_like', 'correlate', 'convolve', 'inner', 'dot',
- 'outer', 'vdot', 'roll',
- 'rollaxis', 'moveaxis', 'cross', 'tensordot', 'array2string',
- 'get_printoptions', 'set_printoptions', 'array_repr', 'array_str',
- 'set_string_function', 'little_endian', 'require', 'fromiter',
- 'array_equal', 'array_equiv', 'indices', 'fromfunction', 'isclose', 'load',
- 'loads', 'isscalar', 'binary_repr', 'base_repr', 'ones', 'identity',
- 'allclose', 'compare_chararrays', 'putmask', 'seterr', 'geterr',
- 'setbufsize', 'getbufsize', 'seterrcall', 'geterrcall', 'errstate',
- 'flatnonzero', 'Inf', 'inf', 'infty', 'Infinity', 'nan', 'NaN', 'False_',
- 'True_', 'bitwise_not', 'CLIP', 'RAISE', 'WRAP', 'MAXDIMS', 'BUFSIZE',
- 'ALLOW_THREADS', 'ComplexWarning', 'full', 'full_like', 'matmul',
- 'shares_memory', 'may_share_memory', 'MAY_SHARE_BOUNDS', 'MAY_SHARE_EXACT',
- 'TooHardError', 'AxisError'
- ]
-
+ 'arange', 'array', 'zeros', 'count_nonzero', 'empty', 'broadcast', 'dtype',
+ 'fromstring', 'fromfile', 'frombuffer', 'int_asbuffer', 'where',
+ 'argwhere', 'copyto', 'concatenate', 'fastCopyAndTranspose', 'lexsort',
+ 'set_numeric_ops', 'can_cast', 'promote_types', 'min_scalar_type',
+ 'result_type', 'asarray', 'asanyarray', 'ascontiguousarray',
+ 'asfortranarray', 'isfortran', 'empty_like', 'zeros_like', 'ones_like',
+ 'correlate', 'convolve', 'inner', 'dot', 'outer', 'vdot', 'roll',
+ 'rollaxis', 'moveaxis', 'cross', 'tensordot', 'little_endian', 'require',
+ 'fromiter', 'array_equal', 'array_equiv', 'indices', 'fromfunction',
+ 'isclose', 'load', 'loads', 'isscalar', 'binary_repr', 'base_repr', 'ones',
+ 'identity', 'allclose', 'compare_chararrays', 'putmask', 'seterr',
+ 'geterr', 'setbufsize', 'getbufsize', 'seterrcall', 'geterrcall',
+ 'errstate', 'flatnonzero', 'Inf', 'inf', 'infty', 'Infinity', 'nan', 'NaN',
+ 'False_', 'True_', 'bitwise_not', 'CLIP', 'RAISE', 'WRAP', 'MAXDIMS',
+ 'BUFSIZE', 'ALLOW_THREADS', 'ComplexWarning', 'full', 'full_like',
+ 'matmul', 'shares_memory', 'may_share_memory', 'MAY_SHARE_BOUNDS',
+ 'MAY_SHARE_EXACT', 'TooHardError', 'AxisError' ]
if sys.version_info[0] < 3:
__all__.extend(['getbuffer', 'newbuffer'])
@@ -133,7 +128,7 @@ def zeros_like(a, dtype=None, order='K', subok=True):
array([[0, 0, 0],
[0, 0, 0]])
- >>> y = np.arange(3, dtype=np.float)
+ >>> y = np.arange(3, dtype=float)
>>> y
array([ 0., 1., 2.])
>>> np.zeros_like(y)
@@ -176,7 +171,7 @@ def ones(shape, dtype=None, order='C'):
>>> np.ones(5)
array([ 1., 1., 1., 1., 1.])
- >>> np.ones((5,), dtype=np.int)
+ >>> np.ones((5,), dtype=int)
array([1, 1, 1, 1, 1])
>>> np.ones((2, 1))
@@ -243,7 +238,7 @@ def ones_like(a, dtype=None, order='K', subok=True):
array([[1, 1, 1],
[1, 1, 1]])
- >>> y = np.arange(3, dtype=np.float)
+ >>> y = np.arange(3, dtype=float)
>>> y
array([ 0., 1., 2.])
>>> np.ones_like(y)
@@ -344,7 +339,7 @@ def full_like(a, fill_value, dtype=None, order='K', subok=True):
Examples
--------
- >>> x = np.arange(6, dtype=np.int)
+ >>> x = np.arange(6, dtype=int)
>>> np.full_like(x, 1)
array([1, 1, 1, 1, 1, 1])
>>> np.full_like(x, 0.1)
@@ -363,20 +358,6 @@ def full_like(a, fill_value, dtype=None, order='K', subok=True):
multiarray.copyto(res, fill_value, casting='unsafe')
return res
-
-def extend_all(module):
- adict = {}
- for a in __all__:
- adict[a] = 1
- try:
- mall = getattr(module, '__all__')
- except AttributeError:
- mall = [k for k in module.__dict__.keys() if not k.startswith('_')]
- for a in mall:
- if a not in adict:
- __all__.append(a)
-
-
def count_nonzero(a, axis=None):
"""
Counts the number of non-zero values in the array ``a``.
@@ -436,8 +417,7 @@ def count_nonzero(a, axis=None):
if issubdtype(a.dtype, np.number):
return (a != 0).sum(axis=axis, dtype=np.intp)
- if (issubdtype(a.dtype, np.string_) or
- issubdtype(a.dtype, np.unicode_)):
+ if issubdtype(a.dtype, np.character):
nullstr = a.dtype.type('')
return (a != nullstr).sum(axis=axis, dtype=np.intp)
@@ -445,7 +425,7 @@ def count_nonzero(a, axis=None):
counts = np.apply_along_axis(multiarray.count_nonzero, axis[0], a)
if axis.size == 1:
- return counts
+ return counts.astype(np.intp, copy=False)
else:
# for subsequent axis numbers, that number decreases
# by one in this new 'counts' array if it was larger
@@ -838,7 +818,7 @@ def argwhere(a):
``np.argwhere(a)`` is the same as ``np.transpose(np.nonzero(a))``.
The output of ``argwhere`` is not suitable for indexing arrays.
- For this purpose use ``where(a)`` instead.
+ For this purpose use ``nonzero(a)`` instead.
Examples
--------
@@ -1106,7 +1086,10 @@ def outer(a, b, out=None):
See also
--------
- inner, einsum
+ inner
+ einsum : ``einsum('i,j->ij', a.ravel(), b.ravel())`` is the equivalent.
+ ufunc.outer : A generalization to N dimensions and other operations.
+ ``np.multiply.outer(a.ravel(), b.ravel())`` is the equivalent.
References
----------
@@ -1278,7 +1261,7 @@ def tensordot(a, b, axes=2):
"""
try:
iter(axes)
- except:
+ except Exception:
axes_a = list(range(-axes, 0))
axes_b = list(range(0, axes))
else:
@@ -1323,7 +1306,7 @@ def tensordot(a, b, axes=2):
N2 = 1
for axis in axes_a:
N2 *= as_[axis]
- newshape_a = (-1, N2)
+ newshape_a = (int(multiply.reduce([as_[ax] for ax in notin])), N2)
olda = [as_[axis] for axis in notin]
notin = [k for k in range(ndb) if k not in axes_b]
@@ -1331,7 +1314,7 @@ def tensordot(a, b, axes=2):
N2 = 1
for axis in axes_b:
N2 *= bs[axis]
- newshape_b = (N2, -1)
+ newshape_b = (N2, int(multiply.reduce([bs[ax] for ax in notin])))
oldb = [bs[axis] for axis in notin]
at = a.transpose(newaxes_a).reshape(newshape_a)
@@ -1433,6 +1416,10 @@ def rollaxis(a, axis, start=0):
"""
Roll the specified axis backwards, until it lies in a given position.
+ This function continues to be supported for backward compatibility, but you
+ should prefer `moveaxis`. The `moveaxis` function was added in NumPy
+ 1.11.
+
Parameters
----------
a : ndarray
@@ -1548,7 +1535,7 @@ def moveaxis(a, source, destination):
Other axes remain in their original order.
- .. versionadded::1.11.0
+ .. versionadded:: 1.11.0
Parameters
----------
@@ -1615,7 +1602,7 @@ def moveaxis(a, source, destination):
# fix hack in scipy which imports this function
def _move_axis_to_0(a, axis):
- return rollaxis(a, axis, 0)
+ return moveaxis(a, axis, 0)
def cross(a, b, axisa=-1, axisb=-1, axisc=-1, axis=None):
@@ -1740,8 +1727,8 @@ def cross(a, b, axisa=-1, axisb=-1, axisc=-1, axis=None):
axisb = normalize_axis_index(axisb, b.ndim, msg_prefix='axisb')
# Move working axis to the end of the shape
- a = rollaxis(a, axisa, a.ndim)
- b = rollaxis(b, axisb, b.ndim)
+ a = moveaxis(a, axisa, -1)
+ b = moveaxis(b, axisb, -1)
msg = ("incompatible dimensions for cross product\n"
"(dimension must be 2 or 3)")
if a.shape[-1] not in (2, 3) or b.shape[-1] not in (2, 3):
@@ -1812,195 +1799,7 @@ def cross(a, b, axisa=-1, axisb=-1, axisc=-1, axis=None):
multiply(a0, b1, out=cp2)
cp2 -= a1 * b0
- # This works because we are moving the last axis
- return rollaxis(cp, -1, axisc)
-
-
-# Use numarray's printing function
-from .arrayprint import array2string, get_printoptions, set_printoptions
-
-
-_typelessdata = [int_, float_, complex_]
-if issubclass(intc, int):
- _typelessdata.append(intc)
-
-
-if issubclass(longlong, int):
- _typelessdata.append(longlong)
-
-
-def array_repr(arr, max_line_width=None, precision=None, suppress_small=None):
- """
- Return the string representation of an array.
-
- Parameters
- ----------
- arr : ndarray
- Input array.
- max_line_width : int, optional
- The maximum number of columns the string should span. Newline
- characters split the string appropriately after array elements.
- precision : int, optional
- Floating point precision. Default is the current printing precision
- (usually 8), which can be altered using `set_printoptions`.
- suppress_small : bool, optional
- Represent very small numbers as zero, default is False. Very small
- is defined by `precision`, if the precision is 8 then
- numbers smaller than 5e-9 are represented as zero.
-
- Returns
- -------
- string : str
- The string representation of an array.
-
- See Also
- --------
- array_str, array2string, set_printoptions
-
- Examples
- --------
- >>> np.array_repr(np.array([1,2]))
- 'array([1, 2])'
- >>> np.array_repr(np.ma.array([0.]))
- 'MaskedArray([ 0.])'
- >>> np.array_repr(np.array([], np.int32))
- 'array([], dtype=int32)'
-
- >>> x = np.array([1e-6, 4e-7, 2, 3])
- >>> np.array_repr(x, precision=6, suppress_small=True)
- 'array([ 0.000001, 0. , 2. , 3. ])'
-
- """
- if type(arr) is not ndarray:
- class_name = type(arr).__name__
- else:
- class_name = "array"
-
- if arr.size > 0 or arr.shape == (0,):
- lst = array2string(arr, max_line_width, precision, suppress_small,
- ', ', class_name + "(")
- else: # show zero-length shape unless it is (0,)
- lst = "[], shape=%s" % (repr(arr.shape),)
-
- skipdtype = (arr.dtype.type in _typelessdata) and arr.size > 0
-
- if skipdtype:
- return "%s(%s)" % (class_name, lst)
- else:
- typename = arr.dtype.name
- # Quote typename in the output if it is "complex".
- if typename and not (typename[0].isalpha() and typename.isalnum()):
- typename = "'%s'" % typename
-
- lf = ' '
- if issubclass(arr.dtype.type, flexible):
- if arr.dtype.names:
- typename = "%s" % str(arr.dtype)
- else:
- typename = "'%s'" % str(arr.dtype)
- lf = '\n'+' '*len(class_name + "(")
- return "%s(%s,%sdtype=%s)" % (class_name, lst, lf, typename)
-
-
-def array_str(a, max_line_width=None, precision=None, suppress_small=None):
- """
- Return a string representation of the data in an array.
-
- The data in the array is returned as a single string. This function is
- similar to `array_repr`, the difference being that `array_repr` also
- returns information on the kind of array and its data type.
-
- Parameters
- ----------
- a : ndarray
- Input array.
- max_line_width : int, optional
- Inserts newlines if text is longer than `max_line_width`. The
- default is, indirectly, 75.
- precision : int, optional
- Floating point precision. Default is the current printing precision
- (usually 8), which can be altered using `set_printoptions`.
- suppress_small : bool, optional
- Represent numbers "very close" to zero as zero; default is False.
- Very close is defined by precision: if the precision is 8, e.g.,
- numbers smaller (in absolute value) than 5e-9 are represented as
- zero.
-
- See Also
- --------
- array2string, array_repr, set_printoptions
-
- Examples
- --------
- >>> np.array_str(np.arange(3))
- '[0 1 2]'
-
- """
- return array2string(a, max_line_width, precision, suppress_small, ' ', "", str)
-
-
-def set_string_function(f, repr=True):
- """
- Set a Python function to be used when pretty printing arrays.
-
- Parameters
- ----------
- f : function or None
- Function to be used to pretty print arrays. The function should expect
- a single array argument and return a string of the representation of
- the array. If None, the function is reset to the default NumPy function
- to print arrays.
- repr : bool, optional
- If True (default), the function for pretty printing (``__repr__``)
- is set, if False the function that returns the default string
- representation (``__str__``) is set.
-
- See Also
- --------
- set_printoptions, get_printoptions
-
- Examples
- --------
- >>> def pprint(arr):
- ... return 'HA! - What are you going to do now?'
- ...
- >>> np.set_string_function(pprint)
- >>> a = np.arange(10)
- >>> a
- HA! - What are you going to do now?
- >>> print(a)
- [0 1 2 3 4 5 6 7 8 9]
-
- We can reset the function to the default:
-
- >>> np.set_string_function(None)
- >>> a
- array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
-
- `repr` affects either pretty printing or normal string representation.
- Note that ``__repr__`` is still affected by setting ``__str__``
- because the width of each array element in the returned string becomes
- equal to the length of the result of ``__str__()``.
-
- >>> x = np.arange(4)
- >>> np.set_string_function(lambda x:'random', repr=False)
- >>> x.__str__()
- 'random'
- >>> x.__repr__()
- 'array([ 0, 1, 2, 3])'
-
- """
- if f is None:
- if repr:
- return multiarray.set_string_function(array_repr, 1)
- else:
- return multiarray.set_string_function(array_str, 0)
- else:
- return multiarray.set_string_function(f, repr)
-
-
-set_string_function(array_str, 0)
-set_string_function(array_repr, 1)
+ return moveaxis(cp, -1, axisc)
little_endian = (sys.byteorder == 'little')
@@ -2154,6 +1953,8 @@ def isscalar(num):
False
>>> np.isscalar(False)
True
+ >>> np.isscalar('numpy')
+ True
"""
if isinstance(num, generic):
@@ -2521,13 +2322,10 @@ def isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):
"""
def within_tol(x, y, atol, rtol):
with errstate(invalid='ignore'):
- result = less_equal(abs(x-y), atol + rtol * abs(y))
- if isscalar(a) and isscalar(b):
- result = bool(result)
- return result
+ return less_equal(abs(x-y), atol + rtol * abs(y))
- x = array(a, copy=False, subok=True, ndmin=1)
- y = array(b, copy=False, subok=True, ndmin=1)
+ x = asanyarray(a)
+ y = asanyarray(b)
# Make sure y is an inexact type to avoid bad behavior on abs(MIN_INT).
# This will cause casting of x later. Also, make sure to allow subclasses
@@ -2554,12 +2352,11 @@ def isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):
if equal_nan:
# Make NaN == NaN
both_nan = isnan(x) & isnan(y)
+
+ # Needed to treat masked arrays correctly. = True would not work.
cond[both_nan] = both_nan[both_nan]
- if isscalar(a) and isscalar(b):
- return bool(cond)
- else:
- return cond
+ return cond[()] # Flatten 0d arrays to scalars
def array_equal(a1, a2):
@@ -2597,7 +2394,7 @@ def array_equal(a1, a2):
"""
try:
a1, a2 = asarray(a1), asarray(a2)
- except:
+ except Exception:
return False
if a1.shape != a2.shape:
return False
@@ -2641,11 +2438,11 @@ def array_equiv(a1, a2):
"""
try:
a1, a2 = asarray(a1), asarray(a2)
- except:
+ except Exception:
return False
try:
multiarray.broadcast(a1, a2)
- except:
+ except Exception:
return False
return bool(asarray(a1 == a2).all())
@@ -3085,10 +2882,26 @@ nan = NaN = NAN
False_ = bool_(False)
True_ = bool_(True)
+
+def extend_all(module):
+ adict = {}
+ for a in __all__:
+ adict[a] = 1
+ try:
+ mall = getattr(module, '__all__')
+ except AttributeError:
+ mall = [k for k in module.__dict__.keys() if not k.startswith('_')]
+ for a in mall:
+ if a not in adict:
+ __all__.append(a)
+
from .umath import *
from .numerictypes import *
from . import fromnumeric
from .fromnumeric import *
+from . import arrayprint
+from .arrayprint import *
extend_all(fromnumeric)
extend_all(umath)
extend_all(numerictypes)
+extend_all(arrayprint)
diff --git a/numpy/core/numerictypes.py b/numpy/core/numerictypes.py
index 600d5af33..b61f5e7bc 100644
--- a/numpy/core/numerictypes.py
+++ b/numpy/core/numerictypes.py
@@ -85,6 +85,7 @@ from __future__ import division, absolute_import, print_function
import types as _types
import sys
import numbers
+import warnings
from numpy.compat import bytes, long
from numpy.core.multiarray import (
@@ -501,11 +502,11 @@ def maximum_sctype(t):
Examples
--------
- >>> np.maximum_sctype(np.int)
+ >>> np.maximum_sctype(int)
<type 'numpy.int64'>
>>> np.maximum_sctype(np.uint8)
<type 'numpy.uint64'>
- >>> np.maximum_sctype(np.complex)
+ >>> np.maximum_sctype(complex)
<type 'numpy.complex192'>
>>> np.maximum_sctype(str)
@@ -528,33 +529,6 @@ def maximum_sctype(t):
else:
return sctypes[base][-1]
-try:
- buffer_type = _types.BufferType
-except AttributeError:
- # Py3K
- buffer_type = memoryview
-
-_python_types = {int: 'int_',
- float: 'float_',
- complex: 'complex_',
- bool: 'bool_',
- bytes: 'bytes_',
- unicode: 'unicode_',
- buffer_type: 'void',
- }
-
-if sys.version_info[0] >= 3:
- def _python_type(t):
- """returns the type corresponding to a certain Python type"""
- if not isinstance(t, type):
- t = type(t)
- return allTypes[_python_types.get(t, 'object_')]
-else:
- def _python_type(t):
- """returns the type corresponding to a certain Python type"""
- if not isinstance(t, _types.TypeType):
- t = type(t)
- return allTypes[_python_types.get(t, 'object_')]
def issctype(rep):
"""
@@ -597,7 +571,7 @@ def issctype(rep):
if res and res != object_:
return True
return False
- except:
+ except Exception:
return False
def obj2sctype(rep, default=None):
@@ -639,22 +613,19 @@ def obj2sctype(rep, default=None):
<type 'list'>
"""
- try:
- if issubclass(rep, generic):
- return rep
- except TypeError:
- pass
- if isinstance(rep, dtype):
- return rep.type
- if isinstance(rep, type):
- return _python_type(rep)
+ # prevent abtract classes being upcast
+ if isinstance(rep, type) and issubclass(rep, generic):
+ return rep
+ # extract dtype from arrays
if isinstance(rep, ndarray):
return rep.dtype.type
+ # fall back on dtype to convert
try:
res = dtype(rep)
- except:
+ except Exception:
return default
- return res.type
+ else:
+ return res.type
def issubclass_(arg1, arg2):
@@ -684,9 +655,9 @@ def issubclass_(arg1, arg2):
Examples
--------
- >>> np.issubclass_(np.int32, np.int)
+ >>> np.issubclass_(np.int32, int)
True
- >>> np.issubclass_(np.int32, np.float)
+ >>> np.issubclass_(np.int32, float)
False
"""
@@ -717,9 +688,9 @@ def issubsctype(arg1, arg2):
--------
>>> np.issubsctype('S8', str)
True
- >>> np.issubsctype(np.array([1]), np.int)
+ >>> np.issubsctype(np.array([1]), int)
True
- >>> np.issubsctype(np.array([1]), np.float)
+ >>> np.issubsctype(np.array([1]), float)
False
"""
@@ -745,20 +716,46 @@ def issubdtype(arg1, arg2):
Examples
--------
- >>> np.issubdtype('S1', str)
+ >>> np.issubdtype('S1', np.string_)
True
>>> np.issubdtype(np.float64, np.float32)
False
"""
- if issubclass_(arg2, generic):
- return issubclass(dtype(arg1).type, arg2)
- mro = dtype(arg2).type.mro()
- if len(mro) > 1:
- val = mro[1]
- else:
- val = mro[0]
- return issubclass(dtype(arg1).type, val)
+ if not issubclass_(arg1, generic):
+ arg1 = dtype(arg1).type
+ if not issubclass_(arg2, generic):
+ arg2_orig = arg2
+ arg2 = dtype(arg2).type
+ if not isinstance(arg2_orig, dtype):
+ # weird deprecated behaviour, that tried to infer np.floating from
+ # float, and similar less obvious things, such as np.generic from
+ # basestring
+ mro = arg2.mro()
+ arg2 = mro[1] if len(mro) > 1 else mro[0]
+
+ def type_repr(x):
+ """ Helper to produce clear error messages """
+ if not isinstance(x, type):
+ return repr(x)
+ elif issubclass(x, generic):
+ return "np.{}".format(x.__name__)
+ else:
+ return x.__name__
+
+ # 1.14, 2017-08-01
+ warnings.warn(
+ "Conversion of the second argument of issubdtype from `{raw}` "
+ "to `{abstract}` is deprecated. In future, it will be treated "
+ "as `{concrete} == np.dtype({raw}).type`.".format(
+ raw=type_repr(arg2_orig),
+ abstract=type_repr(arg2),
+ concrete=type_repr(dtype(arg2_orig).type)
+ ),
+ FutureWarning, stacklevel=2
+ )
+
+ return issubclass(arg1, arg2)
# This dictionary allows look up based on any alias for an array data-type
@@ -821,7 +818,7 @@ def sctype2char(sctype):
Examples
--------
- >>> for sctype in [np.int32, np.float, np.complex, np.string_, np.ndarray]:
+ >>> for sctype in [np.int32, float, complex, np.string_, np.ndarray]:
... print(np.sctype2char(sctype))
l
d
@@ -958,6 +955,7 @@ def _register_types():
numbers.Integral.register(integer)
numbers.Complex.register(inexact)
numbers.Real.register(floating)
+ numbers.Number.register(number)
_register_types()
@@ -986,7 +984,7 @@ def find_common_type(array_types, scalar_types):
Examples
--------
- >>> np.find_common_type([], [np.int64, np.float32, np.complex])
+ >>> np.find_common_type([], [np.int64, np.float32, complex])
dtype('complex128')
>>> np.find_common_type([np.int64, np.float32], [])
dtype('float64')
@@ -1002,7 +1000,7 @@ def find_common_type(array_types, scalar_types):
Complex is of a different type, so it up-casts the float in the
`array_types` argument:
- >>> np.find_common_type([np.float32], [np.complex])
+ >>> np.find_common_type([np.float32], [complex])
dtype('complex128')
Type specifier strings are convertible to dtypes and can therefore
diff --git a/numpy/core/records.py b/numpy/core/records.py
index ecc293812..b6ff8bf65 100644
--- a/numpy/core/records.py
+++ b/numpy/core/records.py
@@ -80,7 +80,7 @@ def find_duplicate(list):
dup.append(list[i])
return dup
-class format_parser:
+class format_parser(object):
"""
Class to convert formats, names, titles description to a dtype.
@@ -473,7 +473,7 @@ class recarray(ndarray):
newattr = attr not in self.__dict__
try:
ret = object.__setattr__(self, attr, val)
- except:
+ except Exception:
fielddict = ndarray.__getattribute__(self, 'dtype').fields or {}
if attr not in fielddict:
exctype, value = sys.exc_info()[:2]
@@ -487,7 +487,7 @@ class recarray(ndarray):
# internal attribute.
try:
object.__delattr__(self, attr)
- except:
+ except Exception:
return ret
try:
res = fielddict[attr][:2]
@@ -704,7 +704,7 @@ def fromstring(datastring, dtype=None, shape=None, offset=0, formats=None,
itemsize = descr.itemsize
if (shape is None or shape == 0 or shape == -1):
- shape = (len(datastring) - offset) / itemsize
+ shape = (len(datastring) - offset) // itemsize
_array = recarray(shape, descr, buf=datastring, offset=offset)
return _array
diff --git a/numpy/core/setup.py b/numpy/core/setup.py
index e057c5614..f56e705ab 100644
--- a/numpy/core/setup.py
+++ b/numpy/core/setup.py
@@ -187,7 +187,7 @@ def check_complex(config, mathlibs):
if os.uname()[0] == "Interix":
warnings.warn("Disabling broken complex support. See #1365", stacklevel=2)
return priv, pub
- except:
+ except Exception:
# os.uname not available on all platforms. blanket except ugly but safe
pass
@@ -741,6 +741,7 @@ def configuration(parent_package='',top_path=None):
join('src', 'multiarray', 'scalartypes.h'),
join('src', 'multiarray', 'sequence.h'),
join('src', 'multiarray', 'shape.h'),
+ join('src', 'multiarray', 'strfuncs.h'),
join('src', 'multiarray', 'ucsnarrow.h'),
join('src', 'multiarray', 'usertypes.h'),
join('src', 'multiarray', 'vdot.h'),
@@ -814,6 +815,7 @@ def configuration(parent_package='',top_path=None):
join('src', 'multiarray', 'shape.c'),
join('src', 'multiarray', 'scalarapi.c'),
join('src', 'multiarray', 'scalartypes.c.src'),
+ join('src', 'multiarray', 'strfuncs.c'),
join('src', 'multiarray', 'temp_elide.c'),
join('src', 'multiarray', 'usertypes.c'),
join('src', 'multiarray', 'ucsnarrow.c'),
@@ -872,6 +874,7 @@ def configuration(parent_package='',top_path=None):
join('src', 'umath', 'loops.h.src'),
join('src', 'umath', 'loops.c.src'),
join('src', 'umath', 'ufunc_object.c'),
+ join('src', 'umath', 'extobj.c'),
join('src', 'umath', 'scalarmath.c.src'),
join('src', 'umath', 'ufunc_type_resolution.c'),
join('src', 'umath', 'override.c'),
@@ -931,7 +934,8 @@ def configuration(parent_package='',top_path=None):
sources=[join('src', 'multiarray', 'multiarray_tests.c.src'),
join('src', 'private', 'mem_overlap.c')],
depends=[join('src', 'private', 'mem_overlap.h'),
- join('src', 'private', 'npy_extint128.h')])
+ join('src', 'private', 'npy_extint128.h')],
+ libraries=['npymath'])
#######################################################################
# operand_flag_tests module #
diff --git a/numpy/core/setup_common.py b/numpy/core/setup_common.py
index 1b3984063..094cd1841 100644
--- a/numpy/core/setup_common.py
+++ b/numpy/core/setup_common.py
@@ -39,6 +39,7 @@ C_ABI_VERSION = 0x01000009
# 0x0000000a - 1.11.x
# 0x0000000a - 1.12.x
# 0x0000000b - 1.13.x
+# 0x0000000b - 1.14.x
C_API_VERSION = 0x0000000b
class MismatchCAPIWarning(Warning):
diff --git a/numpy/core/shape_base.py b/numpy/core/shape_base.py
index 6405ac634..026ad603a 100644
--- a/numpy/core/shape_base.py
+++ b/numpy/core/shape_base.py
@@ -293,7 +293,7 @@ def hstack(tup):
return _nx.concatenate(arrs, 1)
-def stack(arrays, axis=0):
+def stack(arrays, axis=0, out=None):
"""
Join a sequence of arrays along a new axis.
@@ -309,6 +309,10 @@ def stack(arrays, axis=0):
Each array must have the same shape.
axis : int, optional
The axis in the result array along which the input arrays are stacked.
+ out : ndarray, optional
+ If provided, the destination to place the result. The shape must be
+ correct, matching that of what stack would have returned if no
+ out argument were specified.
Returns
-------
@@ -358,7 +362,7 @@ def stack(arrays, axis=0):
sl = (slice(None),) * axis + (_nx.newaxis,)
expanded_arrays = [arr[sl] for arr in arrays]
- return _nx.concatenate(expanded_arrays, axis=axis)
+ return _nx.concatenate(expanded_arrays, axis=axis, out=out)
class _Recurser(object):
@@ -439,9 +443,9 @@ def block(arrays):
"""
Assemble an nd-array from nested lists of blocks.
- Blocks in the innermost lists are `concatenate`d along the last
- dimension (-1), then these are `concatenate`d along the second-last
- dimension (-2), and so on until the outermost list is reached
+ Blocks in the innermost lists are concatenated (see `concatenate`) along
+ the last dimension (-1), then these are concatenated along the
+ second-last dimension (-2), and so on until the outermost list is reached.
Blocks can be of any dimension, but will not be broadcasted using the normal
rules. Instead, leading axes of size 1 are inserted, to make ``block.ndim``
diff --git a/numpy/core/src/multiarray/_datetime.h b/numpy/core/src/multiarray/_datetime.h
index 345aed28a..3db1254d4 100644
--- a/numpy/core/src/multiarray/_datetime.h
+++ b/numpy/core/src/multiarray/_datetime.h
@@ -175,7 +175,8 @@ convert_datetime_metadata_to_tuple(PyArray_DatetimeMetaData *meta);
*/
NPY_NO_EXPORT int
convert_datetime_metadata_tuple_to_datetime_metadata(PyObject *tuple,
- PyArray_DatetimeMetaData *out_meta);
+ PyArray_DatetimeMetaData *out_meta,
+ npy_bool from_pickle);
/*
* Gets a tzoffset in minutes by calling the fromutc() function on
diff --git a/numpy/core/src/multiarray/alloc.c b/numpy/core/src/multiarray/alloc.c
index e145e3404..f8305d115 100644
--- a/numpy/core/src/multiarray/alloc.c
+++ b/numpy/core/src/multiarray/alloc.c
@@ -126,8 +126,11 @@ npy_free_cache(void * p, npy_uintp sz)
NPY_NO_EXPORT void *
npy_alloc_cache_dim(npy_uintp sz)
{
- /* dims + strides */
- if (NPY_UNLIKELY(sz < 2)) {
+ /*
+ * make sure any temporary allocation can be used for array metadata which
+ * uses one memory block for both dimensions and strides
+ */
+ if (sz < 2) {
sz = 2;
}
return _npy_alloc_cache(sz, sizeof(npy_intp), NBUCKETS_DIM, dimcache,
@@ -137,8 +140,8 @@ npy_alloc_cache_dim(npy_uintp sz)
NPY_NO_EXPORT void
npy_free_cache_dim(void * p, npy_uintp sz)
{
- /* dims + strides */
- if (NPY_UNLIKELY(sz < 2)) {
+ /* see npy_alloc_cache_dim */
+ if (sz < 2) {
sz = 2;
}
_npy_free_cache(p, sz, NBUCKETS_DIM, dimcache,
diff --git a/numpy/core/src/multiarray/alloc.h b/numpy/core/src/multiarray/alloc.h
index 39eb99544..2b69efc35 100644
--- a/numpy/core/src/multiarray/alloc.h
+++ b/numpy/core/src/multiarray/alloc.h
@@ -21,4 +21,16 @@ npy_alloc_cache_dim(npy_uintp sz);
NPY_NO_EXPORT void
npy_free_cache_dim(void * p, npy_uintp sd);
+static NPY_INLINE void
+npy_free_cache_dim_obj(PyArray_Dims dims)
+{
+ npy_free_cache_dim(dims.ptr, dims.len);
+}
+
+static NPY_INLINE void
+npy_free_cache_dim_array(PyArrayObject * arr)
+{
+ npy_free_cache_dim(PyArray_DIMS(arr), PyArray_NDIM(arr));
+}
+
#endif
diff --git a/numpy/core/src/multiarray/array_assign_array.c b/numpy/core/src/multiarray/array_assign_array.c
index 28cc7031a..d1bce8c3b 100644
--- a/numpy/core/src/multiarray/array_assign_array.c
+++ b/numpy/core/src/multiarray/array_assign_array.c
@@ -293,7 +293,8 @@ PyArray_AssignArray(PyArrayObject *dst, PyArrayObject *src,
if (((PyArray_NDIM(dst) == 1 && PyArray_NDIM(src) >= 1 &&
PyArray_STRIDES(dst)[0] *
PyArray_STRIDES(src)[PyArray_NDIM(src) - 1] < 0) ||
- PyArray_NDIM(dst) > 1) && arrays_overlap(src, dst)) {
+ PyArray_NDIM(dst) > 1 || PyArray_HASFIELDS(dst)) &&
+ arrays_overlap(src, dst)) {
PyArrayObject *tmp;
/*
diff --git a/numpy/core/src/multiarray/arrayobject.c b/numpy/core/src/multiarray/arrayobject.c
index df3890201..36d48af9f 100644
--- a/numpy/core/src/multiarray/arrayobject.c
+++ b/numpy/core/src/multiarray/arrayobject.c
@@ -53,6 +53,7 @@ maintainer email: oliphant.travis@ieee.org
#include "alloc.h"
#include "mem_overlap.h"
#include "numpyos.h"
+#include "strfuncs.h"
#include "binop_override.h"
@@ -433,93 +434,6 @@ array_dealloc(PyArrayObject *self)
Py_TYPE(self)->tp_free((PyObject *)self);
}
-/*
- * Extend string. On failure, returns NULL and leaves *strp alone.
- * XXX we do this in multiple places; time for a string library?
- */
-static char *
-extend(char **strp, Py_ssize_t n, Py_ssize_t *maxp)
-{
- char *str = *strp;
- Py_ssize_t new_cap;
-
- if (n >= *maxp - 16) {
- new_cap = *maxp * 2;
-
- if (new_cap <= *maxp) { /* overflow */
- return NULL;
- }
- str = PyArray_realloc(*strp, new_cap);
- if (str != NULL) {
- *strp = str;
- *maxp = new_cap;
- }
- }
- return str;
-}
-
-static int
-dump_data(char **string, Py_ssize_t *n, Py_ssize_t *max_n, char *data, int nd,
- npy_intp *dimensions, npy_intp *strides, PyArrayObject* self)
-{
- PyArray_Descr *descr=PyArray_DESCR(self);
- PyObject *op = NULL, *sp = NULL;
- char *ostring;
- npy_intp i, N, ret = 0;
-
-#define CHECK_MEMORY do { \
- if (extend(string, *n, max_n) == NULL) { \
- ret = -1; \
- goto end; \
- } \
- } while (0)
-
- if (nd == 0) {
- if ((op = descr->f->getitem(data, self)) == NULL) {
- return -1;
- }
- sp = PyObject_Repr(op);
- if (sp == NULL) {
- ret = -1;
- goto end;
- }
- ostring = PyString_AsString(sp);
- N = PyString_Size(sp)*sizeof(char);
- *n += N;
- CHECK_MEMORY;
- memmove(*string + (*n - N), ostring, N);
- }
- else {
- CHECK_MEMORY;
- (*string)[*n] = '[';
- *n += 1;
- for (i = 0; i < dimensions[0]; i++) {
- if (dump_data(string, n, max_n,
- data + (*strides)*i,
- nd - 1, dimensions + 1,
- strides + 1, self) < 0) {
- return -1;
- }
- CHECK_MEMORY;
- if (i < dimensions[0] - 1) {
- (*string)[*n] = ',';
- (*string)[*n+1] = ' ';
- *n += 2;
- }
- }
- CHECK_MEMORY;
- (*string)[*n] = ']';
- *n += 1;
- }
-
-#undef CHECK_MEMORY
-
-end:
- Py_XDECREF(op);
- Py_XDECREF(sp);
- return ret;
-}
-
/*NUMPY_API
* Prints the raw data of the ndarray in a form useful for debugging
* low-level C issues.
@@ -582,72 +496,6 @@ PyArray_DebugPrint(PyArrayObject *obj)
fflush(stdout);
}
-static PyObject *
-array_repr_builtin(PyArrayObject *self, int repr)
-{
- PyObject *ret;
- char *string;
- /* max_n initial value is arbitrary, dump_data will extend it */
- Py_ssize_t n = 0, max_n = PyArray_NBYTES(self) * 4 + 7;
-
- if ((string = PyArray_malloc(max_n)) == NULL) {
- return PyErr_NoMemory();
- }
-
- if (dump_data(&string, &n, &max_n, PyArray_DATA(self),
- PyArray_NDIM(self), PyArray_DIMS(self),
- PyArray_STRIDES(self), self) < 0) {
- PyArray_free(string);
- return NULL;
- }
-
- if (repr) {
- if (PyArray_ISEXTENDED(self)) {
- ret = PyUString_FromFormat("array(%s, '%c%d')",
- string,
- PyArray_DESCR(self)->type,
- PyArray_DESCR(self)->elsize);
- }
- else {
- ret = PyUString_FromFormat("array(%s, '%c')",
- string,
- PyArray_DESCR(self)->type);
- }
- }
- else {
- ret = PyUString_FromStringAndSize(string, n);
- }
-
- PyArray_free(string);
- return ret;
-}
-
-static PyObject *PyArray_StrFunction = NULL;
-static PyObject *PyArray_ReprFunction = NULL;
-
-/*NUMPY_API
- * Set the array print function to be a Python function.
- */
-NPY_NO_EXPORT void
-PyArray_SetStringFunction(PyObject *op, int repr)
-{
- if (repr) {
- /* Dispose of previous callback */
- Py_XDECREF(PyArray_ReprFunction);
- /* Add a reference to new callback */
- Py_XINCREF(op);
- /* Remember new callback */
- PyArray_ReprFunction = op;
- }
- else {
- /* Dispose of previous callback */
- Py_XDECREF(PyArray_StrFunction);
- /* Add a reference to new callback */
- Py_XINCREF(op);
- /* Remember new callback */
- PyArray_StrFunction = op;
- }
-}
/*NUMPY_API
* This function is scheduled to be removed
@@ -660,39 +508,6 @@ PyArray_SetDatetimeParseFunction(PyObject *op)
}
-static PyObject *
-array_repr(PyArrayObject *self)
-{
- PyObject *s, *arglist;
-
- if (PyArray_ReprFunction == NULL) {
- s = array_repr_builtin(self, 1);
- }
- else {
- arglist = Py_BuildValue("(O)", self);
- s = PyEval_CallObject(PyArray_ReprFunction, arglist);
- Py_DECREF(arglist);
- }
- return s;
-}
-
-static PyObject *
-array_str(PyArrayObject *self)
-{
- PyObject *s, *arglist;
-
- if (PyArray_StrFunction == NULL) {
- s = array_repr_builtin(self, 0);
- }
- else {
- arglist = Py_BuildValue("(O)", self);
- s = PyEval_CallObject(PyArray_StrFunction, arglist);
- Py_DECREF(arglist);
- }
- return s;
-}
-
-
/*NUMPY_API
*/
@@ -1345,6 +1160,7 @@ array_richcompare(PyArrayObject *self, PyObject *other, int cmp_op)
result = PyArray_GenericBinaryFunction(self, other, n_ops.less_equal);
break;
case Py_EQ:
+ RICHCMP_GIVE_UP_IF_NEEDED(obj_self, other);
/*
* The ufunc does not support void/structured types, so these
* need to be handled specifically. Only a few cases are supported.
@@ -1392,7 +1208,6 @@ array_richcompare(PyArrayObject *self, PyObject *other, int cmp_op)
return result;
}
- RICHCMP_GIVE_UP_IF_NEEDED(obj_self, other);
result = PyArray_GenericBinaryFunction(self,
(PyObject *)other,
n_ops.equal);
@@ -1418,6 +1233,7 @@ array_richcompare(PyArrayObject *self, PyObject *other, int cmp_op)
}
break;
case Py_NE:
+ RICHCMP_GIVE_UP_IF_NEEDED(obj_self, other);
/*
* The ufunc does not support void/structured types, so these
* need to be handled specifically. Only a few cases are supported.
@@ -1465,7 +1281,6 @@ array_richcompare(PyArrayObject *self, PyObject *other, int cmp_op)
return result;
}
- RICHCMP_GIVE_UP_IF_NEEDED(obj_self, other);
result = PyArray_GenericBinaryFunction(self, (PyObject *)other,
n_ops.not_equal);
if (result == NULL) {
@@ -1703,14 +1518,14 @@ array_new(PyTypeObject *subtype, PyObject *args, PyObject *kwds)
}
}
- PyDimMem_FREE(dims.ptr);
- PyDimMem_FREE(strides.ptr);
+ npy_free_cache_dim_obj(dims);
+ npy_free_cache_dim_obj(strides);
return (PyObject *)ret;
fail:
Py_XDECREF(descr);
- PyDimMem_FREE(dims.ptr);
- PyDimMem_FREE(strides.ptr);
+ npy_free_cache_dim_obj(dims);
+ npy_free_cache_dim_obj(strides);
return NULL;
}
diff --git a/numpy/core/src/multiarray/arraytypes.c.src b/numpy/core/src/multiarray/arraytypes.c.src
index b11134305..43dd101c5 100644
--- a/numpy/core/src/multiarray/arraytypes.c.src
+++ b/numpy/core/src/multiarray/arraytypes.c.src
@@ -203,7 +203,7 @@ static PyObject *
return @func1@((@type1@)t1);
}
else {
- PyArray_DESCR(ap)->f->copyswap(&t1, ip, !PyArray_ISNOTSWAPPED(ap), ap);
+ PyArray_DESCR(ap)->f->copyswap(&t1, ip, PyArray_ISBYTESWAPPED(ap), ap);
return @func1@((@type1@)t1);
}
}
@@ -239,7 +239,8 @@ static int
if (ap == NULL || PyArray_ISBEHAVED(ap))
*((@type@ *)ov)=temp;
else {
- PyArray_DESCR(ap)->f->copyswap(ov, &temp, !PyArray_ISNOTSWAPPED(ap), ap);
+ PyArray_DESCR(ap)->f->copyswap(ov, &temp, PyArray_ISBYTESWAPPED(ap),
+ ap);
}
return 0;
}
@@ -265,7 +266,7 @@ static PyObject *
else {
int size = sizeof(@type@);
- npy_bool swap = !PyArray_ISNOTSWAPPED(ap);
+ npy_bool swap = PyArray_ISBYTESWAPPED(ap);
copy_and_swap(&t1, ip, size, 1, 0, swap);
copy_and_swap(&t2, ip + size, size, 1, 0, swap);
return PyComplex_FromDoubles((double)t1, (double)t2);
@@ -325,11 +326,11 @@ static int
}
memcpy(ov, &temp, PyArray_DESCR(ap)->elsize);
- if (!PyArray_ISNOTSWAPPED(ap)) {
+ if (PyArray_ISBYTESWAPPED(ap)) {
byte_swap_vector(ov, 2, sizeof(@ftype@));
}
rsize = sizeof(@ftype@);
- copy_and_swap(ov, &temp, rsize, 2, rsize, !PyArray_ISNOTSWAPPED(ap));
+ copy_and_swap(ov, &temp, rsize, 2, rsize, PyArray_ISBYTESWAPPED(ap));
return 0;
}
@@ -422,7 +423,7 @@ LONGDOUBLE_setitem(PyObject *op, void *ov, void *vap)
}
else {
copy_and_swap(ov, &temp, PyArray_DESCR(ap)->elsize, 1, 0,
- !PyArray_ISNOTSWAPPED(ap));
+ PyArray_ISBYTESWAPPED(ap));
}
return 0;
}
@@ -439,7 +440,7 @@ UNICODE_getitem(void *ip, void *vap)
{
PyArrayObject *ap = vap;
Py_ssize_t size = PyArray_ITEMSIZE(ap);
- int swap = !PyArray_ISNOTSWAPPED(ap);
+ int swap = PyArray_ISBYTESWAPPED(ap);
int align = !PyArray_ISALIGNED(ap);
return (PyObject *)PyUnicode_FromUCS4(ip, size, swap, align);
@@ -512,7 +513,7 @@ UNICODE_setitem(PyObject *op, void *ov, void *vap)
if (PyArray_DESCR(ap)->elsize > datalen) {
memset((char*)ov + datalen, 0, (PyArray_DESCR(ap)->elsize - datalen));
}
- if (!PyArray_ISNOTSWAPPED(ap)) {
+ if (PyArray_ISBYTESWAPPED(ap)) {
byte_swap_vector(ov, PyArray_DESCR(ap)->elsize >> 2, 4);
}
Py_DECREF(temp);
@@ -699,7 +700,7 @@ VOID_getitem(void *input, void *vap)
PyArrayObject *ret;
if (!(PyArray_IntpConverter(descr->subarray->shape, &shape))) {
- PyDimMem_FREE(shape.ptr);
+ npy_free_cache_dim_obj(shape);
PyErr_SetString(PyExc_ValueError,
"invalid shape in fixed-type tuple.");
return NULL;
@@ -708,7 +709,7 @@ VOID_getitem(void *input, void *vap)
ret = (PyArrayObject *)PyArray_NewFromDescr(&PyArray_Type,
descr->subarray->base, shape.len, shape.ptr,
NULL, ip, PyArray_FLAGS(ap)&(~NPY_ARRAY_F_CONTIGUOUS), NULL);
- PyDimMem_FREE(shape.ptr);
+ npy_free_cache_dim_obj(shape);
if (!ret) {
return NULL;
}
@@ -773,71 +774,178 @@ VOID_getitem(void *input, void *vap)
NPY_NO_EXPORT int PyArray_CopyObject(PyArrayObject *, PyObject *);
+/* Given a structured PyArrayObject arr, index i and structured datatype descr,
+ * modify the dtype of arr to contain a single field corresponding to the ith
+ * field of descr, recompute the alignment flag, and return the offset of the
+ * field (in offset_p). This is useful in preparation for calling copyswap on
+ * individual fields of a numpy structure, in VOID_setitem. Compare to inner
+ * loops in VOID_getitem and VOID_nonzero.
+ *
+ * WARNING: Clobbers arr's dtype and alignment flag.
+ */
+NPY_NO_EXPORT int
+_setup_field(int i, PyArray_Descr *descr, PyArrayObject *arr,
+ npy_intp *offset_p)
+{
+ PyObject *key;
+ PyObject *tup;
+ PyArray_Descr *new;
+ npy_intp offset;
+
+ key = PyTuple_GET_ITEM(descr->names, i);
+ tup = PyDict_GetItem(descr->fields, key);
+ if (_unpack_field(tup, &new, &offset) < 0) {
+ return -1;
+ }
+
+ ((PyArrayObject_fields *)(arr))->descr = new;
+ if ((new->alignment > 1) && ((offset % new->alignment) != 0)) {
+ PyArray_CLEARFLAGS(arr, NPY_ARRAY_ALIGNED);
+ }
+ else {
+ PyArray_ENABLEFLAGS(arr, NPY_ARRAY_ALIGNED);
+ }
+
+ *offset_p = offset;
+ return 0;
+}
+
+/* Helper function for VOID_setitem, which uses the copyswap or casting code to
+ * copy structured datatypes between numpy arrays or scalars.
+ */
+static int
+_copy_and_return_void_setitem(PyArray_Descr *dstdescr, char *dstdata,
+ PyArray_Descr *srcdescr, char *srcdata){
+ PyArrayObject_fields dummy_struct;
+ PyArrayObject *dummy = (PyArrayObject *)&dummy_struct;
+ npy_int names_size = PyTuple_GET_SIZE(dstdescr->names);
+ npy_intp offset;
+ npy_int i;
+ int ret;
+
+ /* Fast path if dtypes are equal */
+ if (PyArray_EquivTypes(srcdescr, dstdescr)) {
+ for (i = 0; i < names_size; i++) {
+ /* neither line can ever fail, in principle */
+ if (_setup_field(i, dstdescr, dummy, &offset)) {
+ return -1;
+ }
+ PyArray_DESCR(dummy)->f->copyswap(dstdata + offset,
+ srcdata + offset, 0, dummy);
+ }
+ return 0;
+ }
+
+ /* Slow path */
+ ret = PyArray_CastRawArrays(1, srcdata, dstdata, 0, 0,
+ srcdescr, dstdescr, 0);
+ if (ret != NPY_SUCCEED) {
+ return -1;
+ }
+ return 0;
+}
+
static int
VOID_setitem(PyObject *op, void *input, void *vap)
{
char *ip = input;
PyArrayObject *ap = vap;
PyArray_Descr *descr;
+ int flags;
int itemsize=PyArray_DESCR(ap)->elsize;
int res;
descr = PyArray_DESCR(ap);
- if (descr->names && PyTuple_Check(op)) {
- PyObject *key;
- PyObject *names;
- int i, n;
- PyObject *tup;
- int savedflags;
-
- res = 0;
- /* get the names from the fields dictionary*/
- names = descr->names;
- n = PyTuple_GET_SIZE(names);
- if (PyTuple_GET_SIZE(op) != n) {
- PyErr_SetString(PyExc_ValueError,
- "size of tuple must match number of fields.");
- return -1;
- }
- savedflags = PyArray_FLAGS(ap);
- for (i = 0; i < n; i++) {
- PyArray_Descr *new;
- npy_intp offset;
- key = PyTuple_GET_ITEM(names, i);
- tup = PyDict_GetItem(descr->fields, key);
- if (_unpack_field(tup, &new, &offset) < 0) {
- ((PyArrayObject_fields *)ap)->descr = descr;
+ flags = PyArray_FLAGS(ap);
+ if (PyDataType_HASFIELDS(descr)) {
+ PyObject *errmsg;
+ npy_int i;
+ npy_intp offset;
+ int failed = 0;
+
+ /* If op is 0d-ndarray or numpy scalar, directly get dtype & data ptr */
+ if (PyArray_Check(op)) {
+ PyArrayObject *oparr = (PyArrayObject *)op;
+ if (PyArray_SIZE(oparr) != 1) {
+ PyErr_SetString(PyExc_ValueError,
+ "setting an array element with a sequence.");
return -1;
}
- /*
- * TODO: temporarily modifying the array like this
- * is bad coding style, should be changed.
- */
- ((PyArrayObject_fields *)ap)->descr = new;
- /* remember to update alignment flags */
- if ((new->alignment > 1)
- && ((((npy_intp)(ip+offset)) % new->alignment) != 0)) {
- PyArray_CLEARFLAGS(ap, NPY_ARRAY_ALIGNED);
+ return _copy_and_return_void_setitem(descr, ip,
+ PyArray_DESCR(oparr), PyArray_DATA(oparr));
+ }
+ else if (PyArray_IsScalar(op, Void)) {
+ PyArray_Descr *srcdescr = ((PyVoidScalarObject *)op)->descr;
+ char *srcdata = ((PyVoidScalarObject *)op)->obval;
+ return _copy_and_return_void_setitem(descr, ip, srcdescr, srcdata);
+ }
+ else if (PyTuple_Check(op)) {
+ /* if it's a tuple, copy field-by-field to ap, */
+ npy_intp names_size = PyTuple_GET_SIZE(descr->names);
+
+ if (names_size != PyTuple_Size(op)) {
+ errmsg = PyUString_FromFormat(
+ "could not assign tuple of length %zd to structure "
+ "with %" NPY_INTP_FMT " fields.",
+ PyTuple_Size(op), names_size);
+ PyErr_SetObject(PyExc_ValueError, errmsg);
+ Py_DECREF(errmsg);
+ return -1;
}
- else {
- PyArray_ENABLEFLAGS(ap, NPY_ARRAY_ALIGNED);
+
+ for (i = 0; i < names_size; i++) {
+ PyObject *item;
+
+ /* temporarily make ap have only this field */
+ if (_setup_field(i, descr, ap, &offset) == -1) {
+ failed = 1;
+ break;
+ }
+ item = PyTuple_GetItem(op, i);
+ if (item == NULL) {
+ failed = 1;
+ break;
+ }
+ /* use setitem to set this field */
+ if (PyArray_DESCR(ap)->f->setitem(item, ip + offset, ap) < 0) {
+ failed = 1;
+ break;
+ }
}
- res = new->f->setitem(PyTuple_GET_ITEM(op, i), ip+offset, ap);
- ((PyArrayObject_fields *)ap)->flags = savedflags;
- if (res < 0) {
- break;
+ }
+ else {
+ /* Otherwise must be non-void scalar. Try to assign to each field */
+ npy_intp names_size = PyTuple_GET_SIZE(descr->names);
+
+ for (i = 0; i < names_size; i++) {
+ /* temporarily make ap have only this field */
+ if (_setup_field(i, descr, ap, &offset) == -1) {
+ failed = 1;
+ break;
+ }
+ /* use setitem to set this field */
+ if (PyArray_DESCR(ap)->f->setitem(op, ip + offset, ap) < 0) {
+ failed = 1;
+ break;
+ }
}
}
- ((PyArrayObject_fields *)ap)->descr = descr;
- return res;
- }
- if (descr->subarray) {
+ /* reset clobbered attributes */
+ ((PyArrayObject_fields *)(ap))->descr = descr;
+ ((PyArrayObject_fields *)(ap))->flags = flags;
+
+ if (failed) {
+ return -1;
+ }
+ return 0;
+ }
+ else if (PyDataType_HASSUBARRAY(descr)) {
/* copy into an array of the same basic type */
PyArray_Dims shape = {NULL, -1};
PyArrayObject *ret;
if (!(PyArray_IntpConverter(descr->subarray->shape, &shape))) {
- PyDimMem_FREE(shape.ptr);
+ npy_free_cache_dim_obj(shape);
PyErr_SetString(PyExc_ValueError,
"invalid shape in fixed-type tuple.");
return -1;
@@ -846,7 +954,7 @@ VOID_setitem(PyObject *op, void *input, void *vap)
ret = (PyArrayObject *)PyArray_NewFromDescr(&PyArray_Type,
descr->subarray->base, shape.len, shape.ptr,
NULL, ip, PyArray_FLAGS(ap), NULL);
- PyDimMem_FREE(shape.ptr);
+ npy_free_cache_dim_obj(shape);
if (!ret) {
return -1;
}
@@ -861,19 +969,17 @@ VOID_setitem(PyObject *op, void *input, void *vap)
return res;
}
- /* Default is to use buffer interface to set item */
+ /*
+ * Fall through case - non-structured void datatype. This is a very
+ * undiscerning case: It interprets any object as a buffer
+ * and reads as many bytes as possible, padding with 0.
+ */
{
const void *buffer;
Py_ssize_t buflen;
- if (PyDataType_FLAGCHK(descr, NPY_ITEM_HASOBJECT)
- || PyDataType_FLAGCHK(descr, NPY_ITEM_IS_POINTER)) {
- PyErr_SetString(PyExc_ValueError,
- "Setting void-array with object members using buffer.");
- return -1;
- }
res = PyObject_AsReadBuffer(op, &buffer, &buflen);
if (res == -1) {
- goto fail;
+ return -1;
}
memcpy(ip, buffer, PyArray_MIN(buflen, itemsize));
if (itemsize > buflen) {
@@ -881,9 +987,6 @@ VOID_setitem(PyObject *op, void *input, void *vap)
}
}
return 0;
-
-fail:
- return -1;
}
static PyObject *
@@ -903,7 +1006,7 @@ DATETIME_getitem(void *ip, void *vap)
dt = *((npy_datetime *)ip);
}
else {
- PyArray_DESCR(ap)->f->copyswap(&dt, ip, !PyArray_ISNOTSWAPPED(ap), ap);
+ PyArray_DESCR(ap)->f->copyswap(&dt, ip, PyArray_ISBYTESWAPPED(ap), ap);
}
return convert_datetime_to_pyobject(dt, meta);
@@ -927,7 +1030,7 @@ TIMEDELTA_getitem(void *ip, void *vap)
td = *((npy_timedelta *)ip);
}
else {
- PyArray_DESCR(ap)->f->copyswap(&td, ip, !PyArray_ISNOTSWAPPED(ap), ap);
+ PyArray_DESCR(ap)->f->copyswap(&td, ip, PyArray_ISBYTESWAPPED(ap), ap);
}
return convert_timedelta_to_pyobject(td, meta);
@@ -958,8 +1061,8 @@ DATETIME_setitem(PyObject *op, void *ov, void *vap)
*((npy_datetime *)ov)=temp;
}
else {
- PyArray_DESCR(ap)->f->copyswap(ov, &temp,
- !PyArray_ISNOTSWAPPED(ap), ap);
+ PyArray_DESCR(ap)->f->copyswap(ov, &temp, PyArray_ISBYTESWAPPED(ap),
+ ap);
}
return 0;
@@ -990,7 +1093,8 @@ TIMEDELTA_setitem(PyObject *op, void *ov, void *vap)
*((npy_timedelta *)ov)=temp;
}
else {
- PyArray_DESCR(ap)->f->copyswap(ov, &temp, !PyArray_ISNOTSWAPPED(ap), ap);
+ PyArray_DESCR(ap)->f->copyswap(ov, &temp, PyArray_ISBYTESWAPPED(ap),
+ ap);
}
return 0;
@@ -2374,7 +2478,8 @@ static npy_bool
*/
@type@ tmp;
#if @isfloat@
- PyArray_DESCR(ap)->f->copyswap(&tmp, ip, !PyArray_ISNOTSWAPPED(ap), ap);
+ PyArray_DESCR(ap)->f->copyswap(&tmp, ip, PyArray_ISBYTESWAPPED(ap),
+ ap);
#else
memcpy(&tmp, ip, sizeof(@type@));
#endif
@@ -2397,7 +2502,8 @@ static npy_bool
}
else {
@type@ tmp;
- PyArray_DESCR(ap)->f->copyswap(&tmp, ip, !PyArray_ISNOTSWAPPED(ap), ap);
+ PyArray_DESCR(ap)->f->copyswap(&tmp, ip, PyArray_ISBYTESWAPPED(ap),
+ ap);
return (npy_bool) ((tmp.real != 0) || (tmp.imag != 0));
}
}
@@ -2459,13 +2565,13 @@ UNICODE_nonzero (npy_ucs4 *ip, PyArrayObject *ap)
npy_bool seen_null = NPY_FALSE;
char *buffer = NULL;
- if ((!PyArray_ISNOTSWAPPED(ap)) || (!PyArray_ISALIGNED(ap))) {
+ if (PyArray_ISBYTESWAPPED(ap) || !PyArray_ISALIGNED(ap)) {
buffer = PyArray_malloc(PyArray_DESCR(ap)->elsize);
if (buffer == NULL) {
return nonz;
}
memcpy(buffer, ip, PyArray_DESCR(ap)->elsize);
- if (!PyArray_ISNOTSWAPPED(ap)) {
+ if (PyArray_ISBYTESWAPPED(ap)) {
byte_swap_vector(buffer, len, 4);
}
ip = (npy_ucs4 *)buffer;
@@ -2744,6 +2850,15 @@ OBJECT_compare(PyObject **ip1, PyObject **ip2, PyArrayObject *NPY_UNUSED(ap))
* the alignment of pointers, so it doesn't need to be handled
* here.
*/
+
+ int ret;
+ /*
+ * work around gh-3879, we cannot abort an in-progress quicksort
+ * so at least do not raise again
+ */
+ if (PyErr_Occurred()) {
+ return 0;
+ }
if ((*ip1 == NULL) || (*ip2 == NULL)) {
if (ip1 == ip2) {
return 1;
@@ -2754,7 +2869,12 @@ OBJECT_compare(PyObject **ip1, PyObject **ip2, PyArrayObject *NPY_UNUSED(ap))
return 1;
}
- if (PyObject_RichCompareBool(*ip1, *ip2, Py_LT) == 1) {
+ ret = PyObject_RichCompareBool(*ip1, *ip2, Py_LT);
+ if (ret < 0) {
+ /* error occurred, avoid the next call to PyObject_RichCompareBool */
+ return 0;
+ }
+ if (ret == 1) {
return -1;
}
else if (PyObject_RichCompareBool(*ip1, *ip2, Py_GT) == 1) {
diff --git a/numpy/core/src/multiarray/cblasfuncs.c b/numpy/core/src/multiarray/cblasfuncs.c
index 3b0b2f4f6..8432ae5cf 100644
--- a/numpy/core/src/multiarray/cblasfuncs.c
+++ b/numpy/core/src/multiarray/cblasfuncs.c
@@ -456,7 +456,8 @@ cblas_matrixproduct(int typenum, PyArrayObject *ap1, PyArrayObject *ap2,
if (numbytes == 0 || l == 0) {
Py_DECREF(ap1);
Py_DECREF(ap2);
- return PyArray_Return(out_buf);
+ Py_DECREF(out_buf);
+ return PyArray_Return(result);
}
if (ap2shape == _scalar) {
diff --git a/numpy/core/src/multiarray/compiled_base.c b/numpy/core/src/multiarray/compiled_base.c
index 87a32d150..36ef1d1c4 100644
--- a/numpy/core/src/multiarray/compiled_base.c
+++ b/numpy/core/src/multiarray/compiled_base.c
@@ -10,6 +10,8 @@
#include "npy_config.h"
#include "templ_common.h" /* for npy_mul_with_overflow_intp */
#include "lowlevel_strided_loops.h" /* for npy_bswap8 */
+#include "alloc.h"
+#include "common.h"
/*
@@ -579,7 +581,7 @@ arr_interp(PyObject *NPY_UNUSED(self), PyObject *args, PyObject *kwdict)
}
else {
lval = PyFloat_AsDouble(left);
- if ((lval == -1) && PyErr_Occurred()) {
+ if (error_converting(lval)) {
goto fail;
}
}
@@ -588,7 +590,7 @@ arr_interp(PyObject *NPY_UNUSED(self), PyObject *args, PyObject *kwdict)
}
else {
rval = PyFloat_AsDouble(right);
- if ((rval == -1) && PyErr_Occurred()) {
+ if (error_converting(rval)) {
goto fail;
}
}
@@ -735,11 +737,11 @@ arr_interp_complex(PyObject *NPY_UNUSED(self), PyObject *args, PyObject *kwdict)
}
else {
lval.real = PyComplex_RealAsDouble(left);
- if ((lval.real == -1) && PyErr_Occurred()) {
+ if (error_converting(lval.real)) {
goto fail;
}
lval.imag = PyComplex_ImagAsDouble(left);
- if ((lval.imag == -1) && PyErr_Occurred()) {
+ if (error_converting(lval.imag)) {
goto fail;
}
}
@@ -749,11 +751,11 @@ arr_interp_complex(PyObject *NPY_UNUSED(self), PyObject *args, PyObject *kwdict)
}
else {
rval.real = PyComplex_RealAsDouble(right);
- if ((rval.real == -1) && PyErr_Occurred()) {
+ if (error_converting(rval.real)) {
goto fail;
}
rval.imag = PyComplex_ImagAsDouble(right);
- if ((rval.imag == -1) && PyErr_Occurred()) {
+ if (error_converting(rval.imag)) {
goto fail;
}
}
@@ -1091,7 +1093,7 @@ arr_ravel_multi_index(PyObject *self, PyObject *args, PyObject *kwds)
for (i = 0; i < dimensions.len; ++i) {
Py_XDECREF(op[i]);
}
- PyDimMem_FREE(dimensions.ptr);
+ npy_free_cache_dim_obj(dimensions);
NpyIter_Deallocate(iter);
return PyArray_Return(ret);
@@ -1100,7 +1102,7 @@ fail:
for (i = 0; i < dimensions.len; ++i) {
Py_XDECREF(op[i]);
}
- PyDimMem_FREE(dimensions.ptr);
+ npy_free_cache_dim_obj(dimensions);
NpyIter_Deallocate(iter);
return NULL;
}
@@ -1352,7 +1354,7 @@ arr_unravel_index(PyObject *self, PyObject *args, PyObject *kwds)
Py_DECREF(ret_arr);
Py_XDECREF(indices);
- PyDimMem_FREE(dimensions.ptr);
+ npy_free_cache_dim_obj(dimensions);
NpyIter_Deallocate(iter);
return ret_tuple;
@@ -1362,7 +1364,7 @@ fail:
Py_XDECREF(ret_arr);
Py_XDECREF(dtype);
Py_XDECREF(indices);
- PyDimMem_FREE(dimensions.ptr);
+ npy_free_cache_dim_obj(dimensions);
NpyIter_Deallocate(iter);
return NULL;
}
diff --git a/numpy/core/src/multiarray/conversion_utils.c b/numpy/core/src/multiarray/conversion_utils.c
index 3689bbada..2bb1cbfc1 100644
--- a/numpy/core/src/multiarray/conversion_utils.c
+++ b/numpy/core/src/multiarray/conversion_utils.c
@@ -15,6 +15,7 @@
#include "arraytypes.h"
#include "conversion_utils.h"
+#include "alloc.h"
static int
PyArray_PyIntAsInt_ErrMsg(PyObject *o, const char * msg) NPY_GCC_NONNULL(2);
@@ -119,7 +120,7 @@ PyArray_IntpConverter(PyObject *obj, PyArray_Dims *seq)
return NPY_FAIL;
}
if (len > 0) {
- seq->ptr = PyDimMem_NEW(len);
+ seq->ptr = npy_alloc_cache_dim(len);
if (seq->ptr == NULL) {
PyErr_NoMemory();
return NPY_FAIL;
@@ -128,7 +129,7 @@ PyArray_IntpConverter(PyObject *obj, PyArray_Dims *seq)
seq->len = len;
nd = PyArray_IntpFromIndexSequence(obj, (npy_intp *)seq->ptr, len);
if (nd == -1 || nd != len) {
- PyDimMem_FREE(seq->ptr);
+ npy_free_cache_dim_obj(*seq);
seq->ptr = NULL;
return NPY_FAIL;
}
diff --git a/numpy/core/src/multiarray/convert.c b/numpy/core/src/multiarray/convert.c
index 1a87234ce..212da892d 100644
--- a/numpy/core/src/multiarray/convert.c
+++ b/numpy/core/src/multiarray/convert.c
@@ -13,6 +13,7 @@
#include "npy_pycompat.h"
+#include "common.h"
#include "arrayobject.h"
#include "ctors.h"
#include "mapping.h"
@@ -411,7 +412,7 @@ PyArray_FillWithScalar(PyArrayObject *arr, PyObject *obj)
else if (PyLong_Check(obj) || PyInt_Check(obj)) {
/* Try long long before unsigned long long */
npy_longlong ll_v = PyLong_AsLongLong(obj);
- if (ll_v == -1 && PyErr_Occurred()) {
+ if (error_converting(ll_v)) {
/* Long long failed, try unsigned long long */
npy_ulonglong ull_v;
PyErr_Clear();
@@ -441,7 +442,7 @@ PyArray_FillWithScalar(PyArrayObject *arr, PyObject *obj)
/* Python float */
else if (PyFloat_Check(obj)) {
npy_double v = PyFloat_AsDouble(obj);
- if (v == -1 && PyErr_Occurred()) {
+ if (error_converting(v)) {
return -1;
}
value = (char *)value_buffer;
@@ -457,11 +458,11 @@ PyArray_FillWithScalar(PyArrayObject *arr, PyObject *obj)
npy_double re, im;
re = PyComplex_RealAsDouble(obj);
- if (re == -1 && PyErr_Occurred()) {
+ if (error_converting(re)) {
return -1;
}
im = PyComplex_ImagAsDouble(obj);
- if (im == -1 && PyErr_Occurred()) {
+ if (error_converting(im)) {
return -1;
}
value = (char *)value_buffer;
diff --git a/numpy/core/src/multiarray/ctors.c b/numpy/core/src/multiarray/ctors.c
index 7eae0beaa..c9b3125ae 100644
--- a/numpy/core/src/multiarray/ctors.c
+++ b/numpy/core/src/multiarray/ctors.c
@@ -1529,12 +1529,6 @@ PyArray_GetArrayParamsFromObject(PyObject *op,
if (!writeable) {
tmp = PyArray_FromArrayAttr(op, requested_dtype, context);
if (tmp != Py_NotImplemented) {
- if (writeable
- && PyArray_FailUnlessWriteable((PyArrayObject *)tmp,
- "array interface object") < 0) {
- Py_DECREF(tmp);
- return -1;
- }
*out_arr = (PyArrayObject *)tmp;
return (*out_arr) == NULL ? -1 : 0;
}
@@ -1860,7 +1854,7 @@ PyArray_CheckFromAny(PyObject *op, PyArray_Descr *descr, int min_depth,
PyObject *obj;
if (requires & NPY_ARRAY_NOTSWAPPED) {
if (!descr && PyArray_Check(op) &&
- !PyArray_ISNBO(PyArray_DESCR((PyArrayObject *)op)->byteorder)) {
+ PyArray_ISBYTESWAPPED((PyArrayObject* )op)) {
descr = PyArray_DescrNew(PyArray_DESCR((PyArrayObject *)op));
}
else if (descr && !PyArray_ISNBO(descr->byteorder)) {
@@ -2896,7 +2890,7 @@ PyArray_Empty(int nd, npy_intp *dims, PyArray_Descr *type, int is_f_order)
/*
* PyArray_NewFromDescr steals a ref,
- * but we need to look at type later.
+ * but we need to look at type later.
* */
Py_INCREF(type);
@@ -3010,7 +3004,7 @@ PyArray_Arange(double start, double stop, double step, int type_num)
}
/*
- * the formula is len = (intp) ceil((start - stop) / step);
+ * the formula is len = (intp) ceil((stop - start) / step);
*/
static npy_intp
_calc_length(PyObject *start, PyObject *stop, PyObject *step, PyObject **next, int cmplx)
diff --git a/numpy/core/src/multiarray/datetime.c b/numpy/core/src/multiarray/datetime.c
index 3cf9a2bd5..93babe8bd 100644
--- a/numpy/core/src/multiarray/datetime.c
+++ b/numpy/core/src/multiarray/datetime.c
@@ -20,6 +20,7 @@
#include "npy_config.h"
#include "npy_pycompat.h"
+#include "common.h"
#include "numpy/arrayscalars.h"
#include "methods.h"
#include "_datetime.h"
@@ -1718,8 +1719,6 @@ datetime_type_promotion(PyArray_Descr *type1, PyArray_Descr *type2)
* a date time unit enum value. The 'metastr' parameter
* is used for error messages, and may be NULL.
*
- * Generic units have no representation as a string in this form.
- *
* Returns 0 on success, -1 on failure.
*/
NPY_NO_EXPORT NPY_DATETIMEUNIT
@@ -1761,6 +1760,9 @@ parse_datetime_unit_from_string(char *str, Py_ssize_t len, char *metastr)
return NPY_FR_as;
}
}
+ else if (len == 7 && !strncmp(str, "generic", 7)) {
+ return NPY_FR_GENERIC;
+ }
/* If nothing matched, it's an error */
if (metastr == NULL) {
@@ -1802,7 +1804,8 @@ convert_datetime_metadata_to_tuple(PyArray_DatetimeMetaData *meta)
*/
NPY_NO_EXPORT int
convert_datetime_metadata_tuple_to_datetime_metadata(PyObject *tuple,
- PyArray_DatetimeMetaData *out_meta)
+ PyArray_DatetimeMetaData *out_meta,
+ npy_bool from_pickle)
{
char *basestr = NULL;
Py_ssize_t len = 0, tuple_size;
@@ -1853,13 +1856,62 @@ convert_datetime_metadata_tuple_to_datetime_metadata(PyObject *tuple,
/* Convert the values to longs */
out_meta->num = PyInt_AsLong(PyTuple_GET_ITEM(tuple, 1));
- if (out_meta->num == -1 && PyErr_Occurred()) {
+ if (error_converting(out_meta->num)) {
return -1;
}
- if (tuple_size == 4) {
+ /*
+ * The event metadata was removed way back in numpy 1.7 (cb4545), but was
+ * not deprecated at the time.
+ */
+
+ /* (unit, num, event) */
+ if (tuple_size == 3) {
+ /* Numpy 1.14, 2017-08-11 */
+ if (DEPRECATE(
+ "When passing a 3-tuple as (unit, num, event), the event "
+ "is ignored (since 1.7) - use (unit, num) instead") < 0) {
+ return -1;
+ }
+ }
+ /* (unit, num, den, event) */
+ else if (tuple_size == 4) {
+ PyObject *event = PyTuple_GET_ITEM(tuple, 3);
+ if (from_pickle) {
+ /* if (event == 1) */
+ PyObject *one = PyLong_FromLong(1);
+ int equal_one;
+ if (one == NULL) {
+ return -1;
+ }
+ equal_one = PyObject_RichCompareBool(event, one, Py_EQ);
+ if (equal_one == -1) {
+ return -1;
+ }
+
+ /* if the event data is not 1, it had semantics different to how
+ * datetime types now behave, which are no longer respected.
+ */
+ if (!equal_one) {
+ if (PyErr_WarnEx(PyExc_UserWarning,
+ "Loaded pickle file contains non-default event data "
+ "for a datetime type, which has been ignored since 1.7",
+ 1) < 0) {
+ return -1;
+ }
+ }
+ }
+ else if (event != Py_None) {
+ /* Numpy 1.14, 2017-08-11 */
+ if (DEPRECATE(
+ "When passing a 4-tuple as (unit, num, den, event), the "
+ "event argument is ignored (since 1.7), so should be None"
+ ) < 0) {
+ return -1;
+ }
+ }
den = PyInt_AsLong(PyTuple_GET_ITEM(tuple, 2));
- if (den == -1 && PyErr_Occurred()) {
+ if (error_converting(den)) {
return -1;
}
}
@@ -1895,8 +1947,8 @@ convert_pyobject_to_datetime_metadata(PyObject *obj,
Py_ssize_t len = 0;
if (PyTuple_Check(obj)) {
- return convert_datetime_metadata_tuple_to_datetime_metadata(obj,
- out_meta);
+ return convert_datetime_metadata_tuple_to_datetime_metadata(
+ obj, out_meta, NPY_FALSE);
}
/* Get an ASCII string */
@@ -2126,7 +2178,7 @@ convert_pydatetime_to_datetimestruct(PyObject *obj, npy_datetimestruct *out,
return -1;
}
out->year = PyInt_AsLong(tmp);
- if (out->year == -1 && PyErr_Occurred()) {
+ if (error_converting(out->year)) {
Py_DECREF(tmp);
return -1;
}
@@ -2138,7 +2190,7 @@ convert_pydatetime_to_datetimestruct(PyObject *obj, npy_datetimestruct *out,
return -1;
}
out->month = PyInt_AsLong(tmp);
- if (out->month == -1 && PyErr_Occurred()) {
+ if (error_converting(out->month)) {
Py_DECREF(tmp);
return -1;
}
@@ -2150,7 +2202,7 @@ convert_pydatetime_to_datetimestruct(PyObject *obj, npy_datetimestruct *out,
return -1;
}
out->day = PyInt_AsLong(tmp);
- if (out->day == -1 && PyErr_Occurred()) {
+ if (error_converting(out->day)) {
Py_DECREF(tmp);
return -1;
}
@@ -2184,7 +2236,7 @@ convert_pydatetime_to_datetimestruct(PyObject *obj, npy_datetimestruct *out,
return -1;
}
out->hour = PyInt_AsLong(tmp);
- if (out->hour == -1 && PyErr_Occurred()) {
+ if (error_converting(out->hour)) {
Py_DECREF(tmp);
return -1;
}
@@ -2196,7 +2248,7 @@ convert_pydatetime_to_datetimestruct(PyObject *obj, npy_datetimestruct *out,
return -1;
}
out->min = PyInt_AsLong(tmp);
- if (out->min == -1 && PyErr_Occurred()) {
+ if (error_converting(out->min)) {
Py_DECREF(tmp);
return -1;
}
@@ -2208,7 +2260,7 @@ convert_pydatetime_to_datetimestruct(PyObject *obj, npy_datetimestruct *out,
return -1;
}
out->sec = PyInt_AsLong(tmp);
- if (out->sec == -1 && PyErr_Occurred()) {
+ if (error_converting(out->sec)) {
Py_DECREF(tmp);
return -1;
}
@@ -2220,7 +2272,7 @@ convert_pydatetime_to_datetimestruct(PyObject *obj, npy_datetimestruct *out,
return -1;
}
out->us = PyInt_AsLong(tmp);
- if (out->us == -1 && PyErr_Occurred()) {
+ if (error_converting(out->us)) {
Py_DECREF(tmp);
return -1;
}
@@ -2271,7 +2323,7 @@ convert_pydatetime_to_datetimestruct(PyObject *obj, npy_datetimestruct *out,
return -1;
}
seconds_offset = PyInt_AsLong(tmp);
- if (seconds_offset == -1 && PyErr_Occurred()) {
+ if (error_converting(seconds_offset)) {
Py_DECREF(tmp);
return -1;
}
@@ -2456,7 +2508,7 @@ convert_pyobject_to_datetime(PyArray_DatetimeMetaData *meta, PyObject *obj,
}
PyArray_DESCR(arr)->f->copyswap(&dt,
PyArray_DATA(arr),
- !PyArray_ISNOTSWAPPED(arr),
+ PyArray_ISBYTESWAPPED(arr),
obj);
/* Copy the value directly if units weren't specified */
@@ -2654,7 +2706,7 @@ convert_pyobject_to_timedelta(PyArray_DatetimeMetaData *meta, PyObject *obj,
}
PyArray_DESCR(arr)->f->copyswap(&dt,
PyArray_DATA(arr),
- !PyArray_ISNOTSWAPPED(arr),
+ PyArray_ISBYTESWAPPED(arr),
obj);
/* Copy the value directly if units weren't specified */
@@ -2694,7 +2746,7 @@ convert_pyobject_to_timedelta(PyArray_DatetimeMetaData *meta, PyObject *obj,
return -1;
}
days = PyLong_AsLongLong(tmp);
- if (days == -1 && PyErr_Occurred()) {
+ if (error_converting(days)) {
Py_DECREF(tmp);
return -1;
}
@@ -2706,7 +2758,7 @@ convert_pyobject_to_timedelta(PyArray_DatetimeMetaData *meta, PyObject *obj,
return -1;
}
seconds = PyInt_AsLong(tmp);
- if (seconds == -1 && PyErr_Occurred()) {
+ if (error_converting(seconds)) {
Py_DECREF(tmp);
return -1;
}
@@ -2718,7 +2770,7 @@ convert_pyobject_to_timedelta(PyArray_DatetimeMetaData *meta, PyObject *obj,
return -1;
}
useconds = PyInt_AsLong(tmp);
- if (useconds == -1 && PyErr_Occurred()) {
+ if (error_converting(useconds)) {
Py_DECREF(tmp);
return -1;
}
diff --git a/numpy/core/src/multiarray/datetime_busdaycal.c b/numpy/core/src/multiarray/datetime_busdaycal.c
index 7eaf0cd7a..7a26868e8 100644
--- a/numpy/core/src/multiarray/datetime_busdaycal.c
+++ b/numpy/core/src/multiarray/datetime_busdaycal.c
@@ -18,6 +18,7 @@
#include "npy_config.h"
#include "npy_pycompat.h"
+#include "common.h"
#include "numpy/arrayscalars.h"
#include "lowlevel_strided_loops.h"
#include "_datetime.h"
@@ -168,7 +169,7 @@ invalid_weekmask_string:
}
val = PyInt_AsLong(f);
- if (val == -1 && PyErr_Occurred()) {
+ if (error_converting(val)) {
Py_DECREF(f);
Py_DECREF(obj);
return 0;
diff --git a/numpy/core/src/multiarray/descriptor.c b/numpy/core/src/multiarray/descriptor.c
index 12735513c..1ae6e34a6 100644
--- a/numpy/core/src/multiarray/descriptor.c
+++ b/numpy/core/src/multiarray/descriptor.c
@@ -16,6 +16,7 @@
#include "_datetime.h"
#include "common.h"
#include "descriptor.h"
+#include "alloc.h"
/*
* offset: A starting offset.
@@ -306,7 +307,7 @@ _convert_from_tuple(PyObject *obj)
int i;
if (!(PyArray_IntpConverter(val, &shape)) || (shape.len > NPY_MAXDIMS)) {
- PyDimMem_FREE(shape.ptr);
+ npy_free_cache_dim_obj(shape);
PyErr_SetString(PyExc_ValueError,
"invalid shape in fixed-type tuple.");
goto fail;
@@ -320,12 +321,12 @@ _convert_from_tuple(PyObject *obj)
&& PyNumber_Check(val))
|| (shape.len == 0
&& PyTuple_Check(val))) {
- PyDimMem_FREE(shape.ptr);
+ npy_free_cache_dim_obj(shape);
return type;
}
newdescr = PyArray_DescrNewFromType(NPY_VOID);
if (newdescr == NULL) {
- PyDimMem_FREE(shape.ptr);
+ npy_free_cache_dim_obj(shape);
goto fail;
}
@@ -335,14 +336,14 @@ _convert_from_tuple(PyObject *obj)
PyErr_SetString(PyExc_ValueError,
"invalid shape in fixed-type tuple: "
"dimension smaller then zero.");
- PyDimMem_FREE(shape.ptr);
+ npy_free_cache_dim_obj(shape);
goto fail;
}
if (shape.ptr[i] > NPY_MAX_INT) {
PyErr_SetString(PyExc_ValueError,
"invalid shape in fixed-type tuple: "
"dimension does not fit into a C int.");
- PyDimMem_FREE(shape.ptr);
+ npy_free_cache_dim_obj(shape);
goto fail;
}
}
@@ -351,12 +352,12 @@ _convert_from_tuple(PyObject *obj)
PyErr_SetString(PyExc_ValueError,
"invalid shape in fixed-type tuple: dtype size in "
"bytes must fit into a C int.");
- PyDimMem_FREE(shape.ptr);
+ npy_free_cache_dim_obj(shape);
goto fail;
}
newdescr->elsize = type->elsize * items;
if (newdescr->elsize == -1) {
- PyDimMem_FREE(shape.ptr);
+ npy_free_cache_dim_obj(shape);
goto fail;
}
@@ -381,7 +382,7 @@ _convert_from_tuple(PyObject *obj)
*/
newdescr->subarray->shape = PyTuple_New(shape.len);
if (newdescr->subarray->shape == NULL) {
- PyDimMem_FREE(shape.ptr);
+ npy_free_cache_dim_obj(shape);
goto fail;
}
for (i=0; i < shape.len; i++) {
@@ -391,12 +392,12 @@ _convert_from_tuple(PyObject *obj)
if (PyTuple_GET_ITEM(newdescr->subarray->shape, i) == NULL) {
Py_DECREF(newdescr->subarray->shape);
newdescr->subarray->shape = NULL;
- PyDimMem_FREE(shape.ptr);
+ npy_free_cache_dim_obj(shape);
goto fail;
}
}
- PyDimMem_FREE(shape.ptr);
+ npy_free_cache_dim_obj(shape);
type = newdescr;
}
return type;
@@ -1130,7 +1131,7 @@ _convert_from_dict(PyObject *obj, int align)
goto fail;
}
offset = PyArray_PyIntAsInt(off);
- if (offset == -1 && PyErr_Occurred()) {
+ if (error_converting(offset)) {
Py_DECREF(off);
Py_DECREF(tup);
Py_DECREF(ind);
@@ -1269,7 +1270,7 @@ _convert_from_dict(PyObject *obj, int align)
PyErr_Clear();
} else {
itemsize = (int)PyArray_PyIntAsInt(tmp);
- if (itemsize == -1 && PyErr_Occurred()) {
+ if (error_converting(itemsize)) {
Py_DECREF(new);
return NULL;
}
@@ -2886,7 +2887,8 @@ arraydescr_setstate(PyArray_Descr *self, PyObject *args)
if (convert_datetime_metadata_tuple_to_datetime_metadata(
PyTuple_GET_ITEM(metadata, 1),
- &temp_dt_data) < 0) {
+ &temp_dt_data,
+ NPY_TRUE) < 0) {
return NULL;
}
@@ -3118,7 +3120,7 @@ static PyMethodDef arraydescr_methods[] = {
*
* Returns 1 if it has a simple layout, 0 otherwise.
*/
-static int
+NPY_NO_EXPORT int
is_dtype_struct_simple_unaligned_layout(PyArray_Descr *dtype)
{
PyObject *names, *fields, *key, *tup, *title;
diff --git a/numpy/core/src/multiarray/descriptor.h b/numpy/core/src/multiarray/descriptor.h
index ff1fc980a..f95041195 100644
--- a/numpy/core/src/multiarray/descriptor.h
+++ b/numpy/core/src/multiarray/descriptor.h
@@ -10,6 +10,10 @@ array_set_typeDict(PyObject *NPY_UNUSED(ignored), PyObject *args);
NPY_NO_EXPORT PyArray_Descr *
_arraydescr_fromobj(PyObject *obj);
+
+NPY_NO_EXPORT int
+is_dtype_struct_simple_unaligned_layout(PyArray_Descr *dtype);
+
/*
* Creates a string repr of the dtype, excluding the 'dtype()' part
* surrounding the object. This object may be a string, a list, or
diff --git a/numpy/core/src/multiarray/dtype_transfer.c b/numpy/core/src/multiarray/dtype_transfer.c
index 58739b831..9c27255aa 100644
--- a/numpy/core/src/multiarray/dtype_transfer.c
+++ b/numpy/core/src/multiarray/dtype_transfer.c
@@ -25,9 +25,11 @@
#include "ctors.h"
#include "_datetime.h"
#include "datetime_strings.h"
+#include "descriptor.h"
#include "shape.h"
#include "lowlevel_strided_loops.h"
+#include "alloc.h"
#define NPY_LOWLEVEL_BUFFER_BLOCKSIZE 128
@@ -2342,7 +2344,7 @@ get_subarray_transfer_function(int aligned,
if (PyDataType_HASSUBARRAY(dst_dtype)) {
if (!(PyArray_IntpConverter(dst_dtype->subarray->shape,
&dst_shape))) {
- PyDimMem_FREE(src_shape.ptr);
+ npy_free_cache_dim_obj(src_shape);
PyErr_SetString(PyExc_ValueError,
"invalid subarray shape");
return NPY_FAIL;
@@ -2355,8 +2357,8 @@ get_subarray_transfer_function(int aligned,
* Just a straight one-element copy.
*/
if (dst_size == 1 && src_size == 1) {
- PyDimMem_FREE(src_shape.ptr);
- PyDimMem_FREE(dst_shape.ptr);
+ npy_free_cache_dim_obj(src_shape);
+ npy_free_cache_dim_obj(dst_shape);
return PyArray_GetDTypeTransferFunction(aligned,
src_stride, dst_stride,
@@ -2367,8 +2369,8 @@ get_subarray_transfer_function(int aligned,
}
/* Copy the src value to all the dst values */
else if (src_size == 1) {
- PyDimMem_FREE(src_shape.ptr);
- PyDimMem_FREE(dst_shape.ptr);
+ npy_free_cache_dim_obj(src_shape);
+ npy_free_cache_dim_obj(dst_shape);
return get_one_to_n_transfer_function(aligned,
src_stride, dst_stride,
@@ -2382,8 +2384,8 @@ get_subarray_transfer_function(int aligned,
else if (src_shape.len == dst_shape.len &&
PyArray_CompareLists(src_shape.ptr, dst_shape.ptr,
src_shape.len)) {
- PyDimMem_FREE(src_shape.ptr);
- PyDimMem_FREE(dst_shape.ptr);
+ npy_free_cache_dim_obj(src_shape);
+ npy_free_cache_dim_obj(dst_shape);
return get_n_to_n_transfer_function(aligned,
src_stride, dst_stride,
@@ -2407,8 +2409,8 @@ get_subarray_transfer_function(int aligned,
out_stransfer, out_transferdata,
out_needs_api);
- PyDimMem_FREE(src_shape.ptr);
- PyDimMem_FREE(dst_shape.ptr);
+ npy_free_cache_dim_obj(src_shape);
+ npy_free_cache_dim_obj(dst_shape);
return ret;
}
}
@@ -2520,7 +2522,7 @@ _strided_to_strided_field_transfer(char *dst, npy_intp dst_stride,
/*
* Handles fields transfer. To call this, at least one of the dtypes
- * must have fields
+ * must have fields. Does not take care of object<->structure conversion
*/
static int
get_fields_transfer_function(int aligned,
@@ -2531,22 +2533,26 @@ get_fields_transfer_function(int aligned,
NpyAuxData **out_transferdata,
int *out_needs_api)
{
- PyObject *names, *key, *tup, *title;
+ PyObject *key, *tup, *title;
PyArray_Descr *src_fld_dtype, *dst_fld_dtype;
- npy_int i, names_size, field_count, structsize;
+ npy_int i, field_count, structsize;
int src_offset, dst_offset;
_field_transfer_data *data;
_single_field_transfer *fields;
+ int failed = 0;
+
+ /*
+ * There are three cases to take care of: 1. src is non-structured,
+ * 2. dst is non-structured, or 3. both are structured.
+ */
- /* Copy the src value to all the fields of dst */
+ /* 1. src is non-structured. Copy the src value to all the fields of dst */
if (!PyDataType_HASFIELDS(src_dtype)) {
- names = dst_dtype->names;
- names_size = PyTuple_GET_SIZE(dst_dtype->names);
+ field_count = PyTuple_GET_SIZE(dst_dtype->names);
- field_count = names_size;
+ /* Allocate the field-data structure and populate it */
structsize = sizeof(_field_transfer_data) +
(field_count + 1) * sizeof(_single_field_transfer);
- /* Allocate the data and populate it */
data = (_field_transfer_data *)PyArray_malloc(structsize);
if (data == NULL) {
PyErr_NoMemory();
@@ -2556,8 +2562,8 @@ get_fields_transfer_function(int aligned,
data->base.clone = &_field_transfer_data_clone;
fields = &data->fields;
- for (i = 0; i < names_size; ++i) {
- key = PyTuple_GET_ITEM(names, i);
+ for (i = 0; i < field_count; ++i) {
+ key = PyTuple_GET_ITEM(dst_dtype->names, i);
tup = PyDict_GetItem(dst_dtype->fields, key);
if (!PyArg_ParseTuple(tup, "Oi|O", &dst_fld_dtype,
&dst_offset, &title)) {
@@ -2583,7 +2589,7 @@ get_fields_transfer_function(int aligned,
}
/*
- * If the references should be removed from src, add
+ * If references should be decrefd in src, add
* another transfer function to do that.
*/
if (move_references && PyDataType_REFCHK(src_dtype)) {
@@ -2611,24 +2617,19 @@ get_fields_transfer_function(int aligned,
return NPY_SUCCEED;
}
- /* Copy the value of the first field to dst */
- else if (!PyDataType_HASFIELDS(dst_dtype)) {
- names = src_dtype->names;
- names_size = PyTuple_GET_SIZE(src_dtype->names);
- /*
- * If DECREF is needed on source fields, may need
- * to process all the fields
- */
- if (move_references && PyDataType_REFCHK(src_dtype)) {
- field_count = names_size + 1;
- }
- else {
- field_count = 1;
+ /* 2. dst is non-structured. Allow transfer from single-field src to dst */
+ if (!PyDataType_HASFIELDS(dst_dtype)) {
+ if (PyTuple_GET_SIZE(src_dtype->names) != 1) {
+ PyErr_SetString(PyExc_ValueError,
+ "Can't cast from structure to non-structure, except if the "
+ "structure only has a single field.");
+ return NPY_FAIL;
}
+
+ /* Allocate the field-data structure and populate it */
structsize = sizeof(_field_transfer_data) +
- field_count * sizeof(_single_field_transfer);
- /* Allocate the data and populate it */
+ 1 * sizeof(_single_field_transfer);
data = (_field_transfer_data *)PyArray_malloc(structsize);
if (data == NULL) {
PyErr_NoMemory();
@@ -2638,286 +2639,102 @@ get_fields_transfer_function(int aligned,
data->base.clone = &_field_transfer_data_clone;
fields = &data->fields;
- key = PyTuple_GET_ITEM(names, 0);
+ key = PyTuple_GET_ITEM(src_dtype->names, 0);
tup = PyDict_GetItem(src_dtype->fields, key);
- if (!PyArg_ParseTuple(tup, "Oi|O", &src_fld_dtype,
- &src_offset, &title)) {
- PyArray_free(data);
+ if (!PyArg_ParseTuple(tup, "Oi|O",
+ &src_fld_dtype, &src_offset, &title)) {
return NPY_FAIL;
}
- field_count = 0;
- /*
- * Special case bool type, the existence of fields implies True
- *
- * TODO: Perhaps a better behavior would be to combine all the
- * input fields with an OR? The same would apply to subarrays.
- */
- if (dst_dtype->type_num == NPY_BOOL) {
- if (get_bool_setdstone_transfer_function(dst_stride,
- &fields[field_count].stransfer,
- &fields[field_count].data,
- out_needs_api) != NPY_SUCCEED) {
- PyArray_free(data);
- return NPY_FAIL;
- }
- fields[field_count].src_offset = 0;
- fields[field_count].dst_offset = 0;
- fields[field_count].src_itemsize = 0;
- field_count++;
-
- /* If the src field has references, may need to clear them */
- if (move_references && PyDataType_REFCHK(src_fld_dtype)) {
- if (get_decsrcref_transfer_function(0,
- src_stride,
- src_fld_dtype,
- &fields[field_count].stransfer,
- &fields[field_count].data,
- out_needs_api) != NPY_SUCCEED) {
- NPY_AUXDATA_FREE(fields[0].data);
- PyArray_free(data);
- return NPY_FAIL;
- }
- fields[field_count].src_offset = src_offset;
- fields[field_count].dst_offset = 0;
- fields[field_count].src_itemsize = src_fld_dtype->elsize;
- field_count++;
- }
- }
- /* Transfer the first field to the output */
- else {
- if (PyArray_GetDTypeTransferFunction(0,
- src_stride, dst_stride,
- src_fld_dtype, dst_dtype,
- move_references,
- &fields[field_count].stransfer,
- &fields[field_count].data,
- out_needs_api) != NPY_SUCCEED) {
- PyArray_free(data);
- return NPY_FAIL;
- }
- fields[field_count].src_offset = src_offset;
- fields[field_count].dst_offset = 0;
- fields[field_count].src_itemsize = src_fld_dtype->elsize;
- field_count++;
- }
- /*
- * If the references should be removed from src, add
- * more transfer functions to decrement the references
- * for all the other fields.
- */
- if (move_references && PyDataType_REFCHK(src_dtype)) {
- for (i = 1; i < names_size; ++i) {
- key = PyTuple_GET_ITEM(names, i);
- tup = PyDict_GetItem(src_dtype->fields, key);
- if (!PyArg_ParseTuple(tup, "Oi|O", &src_fld_dtype,
- &src_offset, &title)) {
- return NPY_FAIL;
- }
- if (PyDataType_REFCHK(src_fld_dtype)) {
- if (get_decsrcref_transfer_function(0,
- src_stride,
- src_fld_dtype,
- &fields[field_count].stransfer,
- &fields[field_count].data,
- out_needs_api) != NPY_SUCCEED) {
- for (i = field_count-1; i >= 0; --i) {
- NPY_AUXDATA_FREE(fields[i].data);
- }
- PyArray_free(data);
- return NPY_FAIL;
- }
- fields[field_count].src_offset = src_offset;
- fields[field_count].dst_offset = 0;
- fields[field_count].src_itemsize = src_fld_dtype->elsize;
- field_count++;
- }
- }
+ if (PyArray_GetDTypeTransferFunction(0,
+ src_stride, dst_stride,
+ src_fld_dtype, dst_dtype,
+ move_references,
+ &fields[0].stransfer,
+ &fields[0].data,
+ out_needs_api) != NPY_SUCCEED) {
+ PyArray_free(data);
+ return NPY_FAIL;
}
+ fields[0].src_offset = src_offset;
+ fields[0].dst_offset = 0;
+ fields[0].src_itemsize = src_fld_dtype->elsize;
- data->field_count = field_count;
+ data->field_count = 1;
*out_stransfer = &_strided_to_strided_field_transfer;
*out_transferdata = (NpyAuxData *)data;
return NPY_SUCCEED;
}
- /* Match up the fields to copy */
- else {
- /* Keeps track of the names we already used */
- PyObject *used_names_dict = NULL;
- int cmpval;
-
- const char *msg =
- "Assignment between structured arrays with different field names "
- "will change in numpy 1.14.\n\n"
- "Previously fields in the dst would be set to the value of the "
- "identically-named field in the src. In numpy 1.14 fields will "
- "instead be assigned 'by position': The Nth field of the dst "
- "will be set to the Nth field of the src array.\n\n"
- "See the release notes for details";
- /*
- * 2016-09-19, 1.12
- * Warn if the field names of the dst and src are not
- * identical, since then behavior will change in 1.13.
- */
- cmpval = PyObject_RichCompareBool(src_dtype->names,
- dst_dtype->names, Py_EQ);
- if (PyErr_Occurred()) {
- return NPY_FAIL;
- }
- if (cmpval != 1) {
- if (DEPRECATE_FUTUREWARNING(msg) < 0) {
- return NPY_FAIL;
- }
- }
- names = dst_dtype->names;
- names_size = PyTuple_GET_SIZE(dst_dtype->names);
+ /* 3. Otherwise both src and dst are structured arrays */
+ field_count = PyTuple_GET_SIZE(dst_dtype->names);
- /*
- * If DECREF is needed on source fields, will need
- * to also go through its fields.
- */
- if (move_references && PyDataType_REFCHK(src_dtype)) {
- field_count = names_size + PyTuple_GET_SIZE(src_dtype->names);
- used_names_dict = PyDict_New();
- if (used_names_dict == NULL) {
- return NPY_FAIL;
- }
- }
- else {
- field_count = names_size;
- }
- structsize = sizeof(_field_transfer_data) +
- field_count * sizeof(_single_field_transfer);
- /* Allocate the data and populate it */
- data = (_field_transfer_data *)PyArray_malloc(structsize);
- if (data == NULL) {
- PyErr_NoMemory();
- Py_XDECREF(used_names_dict);
- return NPY_FAIL;
- }
- data->base.free = &_field_transfer_data_free;
- data->base.clone = &_field_transfer_data_clone;
- fields = &data->fields;
+ /* Match up the fields to copy (field-by-field transfer) */
+ if (PyTuple_GET_SIZE(src_dtype->names) != field_count) {
+ PyErr_SetString(PyExc_ValueError, "structures must have the same size");
+ return NPY_FAIL;
+ }
- for (i = 0; i < names_size; ++i) {
- key = PyTuple_GET_ITEM(names, i);
- tup = PyDict_GetItem(dst_dtype->fields, key);
- if (!PyArg_ParseTuple(tup, "Oi|O", &dst_fld_dtype,
- &dst_offset, &title)) {
- for (i = i-1; i >= 0; --i) {
- NPY_AUXDATA_FREE(fields[i].data);
- }
- PyArray_free(data);
- Py_XDECREF(used_names_dict);
- return NPY_FAIL;
- }
- tup = PyDict_GetItem(src_dtype->fields, key);
- if (tup != NULL) {
- if (!PyArg_ParseTuple(tup, "Oi|O", &src_fld_dtype,
- &src_offset, &title)) {
- for (i = i-1; i >= 0; --i) {
- NPY_AUXDATA_FREE(fields[i].data);
- }
- PyArray_free(data);
- Py_XDECREF(used_names_dict);
- return NPY_FAIL;
- }
- if (PyArray_GetDTypeTransferFunction(0,
- src_stride, dst_stride,
- src_fld_dtype, dst_fld_dtype,
- move_references,
- &fields[i].stransfer,
- &fields[i].data,
- out_needs_api) != NPY_SUCCEED) {
- for (i = i-1; i >= 0; --i) {
- NPY_AUXDATA_FREE(fields[i].data);
- }
- PyArray_free(data);
- Py_XDECREF(used_names_dict);
- return NPY_FAIL;
- }
- fields[i].src_offset = src_offset;
- fields[i].dst_offset = dst_offset;
- fields[i].src_itemsize = src_fld_dtype->elsize;
+ /* Allocate the field-data structure and populate it */
+ structsize = sizeof(_field_transfer_data) +
+ field_count * sizeof(_single_field_transfer);
+ data = (_field_transfer_data *)PyArray_malloc(structsize);
+ if (data == NULL) {
+ PyErr_NoMemory();
+ return NPY_FAIL;
+ }
+ data->base.free = &_field_transfer_data_free;
+ data->base.clone = &_field_transfer_data_clone;
+ fields = &data->fields;
- if (used_names_dict != NULL) {
- PyDict_SetItem(used_names_dict, key, Py_True);
- }
- }
- else {
- if (get_setdstzero_transfer_function(0,
- dst_stride,
- dst_fld_dtype,
- &fields[i].stransfer,
- &fields[i].data,
- out_needs_api) != NPY_SUCCEED) {
- for (i = i-1; i >= 0; --i) {
- NPY_AUXDATA_FREE(fields[i].data);
- }
- PyArray_free(data);
- Py_XDECREF(used_names_dict);
- return NPY_FAIL;
- }
- fields[i].src_offset = 0;
- fields[i].dst_offset = dst_offset;
- fields[i].src_itemsize = 0;
- }
+ /* set up the transfer function for each field */
+ for (i = 0; i < field_count; ++i) {
+ key = PyTuple_GET_ITEM(dst_dtype->names, i);
+ tup = PyDict_GetItem(dst_dtype->fields, key);
+ if (!PyArg_ParseTuple(tup, "Oi|O", &dst_fld_dtype,
+ &dst_offset, &title)) {
+ failed = 1;
+ break;
+ }
+ key = PyTuple_GET_ITEM(src_dtype->names, i);
+ tup = PyDict_GetItem(src_dtype->fields, key);
+ if (!PyArg_ParseTuple(tup, "Oi|O", &src_fld_dtype,
+ &src_offset, &title)) {
+ failed = 1;
+ break;
}
- if (move_references && PyDataType_REFCHK(src_dtype)) {
- /* Use field_count to track additional functions added */
- field_count = names_size;
-
- names = src_dtype->names;
- names_size = PyTuple_GET_SIZE(src_dtype->names);
- for (i = 0; i < names_size; ++i) {
- key = PyTuple_GET_ITEM(names, i);
- if (PyDict_GetItem(used_names_dict, key) == NULL) {
- tup = PyDict_GetItem(src_dtype->fields, key);
- if (!PyArg_ParseTuple(tup, "Oi|O", &src_fld_dtype,
- &src_offset, &title)) {
- for (i = field_count-1; i >= 0; --i) {
- NPY_AUXDATA_FREE(fields[i].data);
- }
- PyArray_free(data);
- Py_XDECREF(used_names_dict);
- return NPY_FAIL;
- }
- if (PyDataType_REFCHK(src_fld_dtype)) {
- if (get_decsrcref_transfer_function(0,
- src_stride,
- src_fld_dtype,
- &fields[field_count].stransfer,
- &fields[field_count].data,
- out_needs_api) != NPY_SUCCEED) {
- for (i = field_count-1; i >= 0; --i) {
- NPY_AUXDATA_FREE(fields[i].data);
- }
- PyArray_free(data);
- return NPY_FAIL;
- }
- fields[field_count].src_offset = src_offset;
- fields[field_count].dst_offset = 0;
- fields[field_count].src_itemsize =
- src_fld_dtype->elsize;
- field_count++;
- }
- }
- }
+ if (PyArray_GetDTypeTransferFunction(0,
+ src_stride, dst_stride,
+ src_fld_dtype, dst_fld_dtype,
+ move_references,
+ &fields[i].stransfer,
+ &fields[i].data,
+ out_needs_api) != NPY_SUCCEED) {
+ failed = 1;
+ break;
}
+ fields[i].src_offset = src_offset;
+ fields[i].dst_offset = dst_offset;
+ fields[i].src_itemsize = src_fld_dtype->elsize;
+ }
- Py_XDECREF(used_names_dict);
+ if (failed) {
+ for (i = i-1; i >= 0; --i) {
+ NPY_AUXDATA_FREE(fields[i].data);
+ }
+ PyArray_free(data);
+ return NPY_FAIL;
+ }
- data->field_count = field_count;
+ data->field_count = field_count;
- *out_stransfer = &_strided_to_strided_field_transfer;
- *out_transferdata = (NpyAuxData *)data;
+ *out_stransfer = &_strided_to_strided_field_transfer;
+ *out_transferdata = (NpyAuxData *)data;
- return NPY_SUCCEED;
- }
+ return NPY_SUCCEED;
}
static int
@@ -3371,7 +3188,7 @@ get_setdstzero_transfer_function(int aligned,
return NPY_FAIL;
}
dst_size = PyArray_MultiplyList(dst_shape.ptr, dst_shape.len);
- PyDimMem_FREE(dst_shape.ptr);
+ npy_free_cache_dim_obj(dst_shape);
/* Get a function for contiguous dst of the subarray type */
if (get_setdstzero_transfer_function(aligned,
@@ -3484,7 +3301,7 @@ get_decsrcref_transfer_function(int aligned,
return NPY_FAIL;
}
src_size = PyArray_MultiplyList(src_shape.ptr, src_shape.len);
- PyDimMem_FREE(src_shape.ptr);
+ npy_free_cache_dim_obj(src_shape);
/* Get a function for contiguous src of the subarray type */
if (get_decsrcref_transfer_function(aligned,
@@ -3648,8 +3465,10 @@ PyArray_GetDTypeTransferFunction(int aligned,
* If there are no references and the data types are equivalent,
* return a simple copy
*/
- if (!PyDataType_REFCHK(src_dtype) && !PyDataType_REFCHK(dst_dtype) &&
- PyArray_EquivTypes(src_dtype, dst_dtype)) {
+ if (PyArray_EquivTypes(src_dtype, dst_dtype) &&
+ !PyDataType_REFCHK(src_dtype) && !PyDataType_REFCHK(dst_dtype) &&
+ ( !PyDataType_HASFIELDS(dst_dtype) ||
+ is_dtype_struct_simple_unaligned_layout(dst_dtype)) ) {
/*
* We can't pass through the aligned flag because it's not
* appropriate. Consider a size-8 string, it will say it's
diff --git a/numpy/core/src/multiarray/einsum.c.src b/numpy/core/src/multiarray/einsum.c.src
index ee9ee1abd..943b8aecf 100644
--- a/numpy/core/src/multiarray/einsum.c.src
+++ b/numpy/core/src/multiarray/einsum.c.src
@@ -2333,6 +2333,7 @@ unbuffered_loop_nop1_ndim2(NpyIter *iter)
npy_intp coord, shape[2], strides[2][2];
char *ptrs[2][2], *ptr;
sum_of_products_fn sop;
+ NPY_BEGIN_THREADS_DEF;
#if NPY_EINSUM_DBG_TRACING
NpyIter_DebugPrint(iter);
@@ -2363,6 +2364,7 @@ unbuffered_loop_nop1_ndim2(NpyIter *iter)
* Since the iterator wasn't tracking coordinates, the
* loop provided by the iterator is in Fortran-order.
*/
+ NPY_BEGIN_THREADS_THRESHOLDED(shape[1] * shape[0]);
for (coord = shape[1]; coord > 0; --coord) {
sop(1, ptrs[0], strides[0], shape[0]);
@@ -2371,6 +2373,7 @@ unbuffered_loop_nop1_ndim2(NpyIter *iter)
ptr = ptrs[1][1] + strides[1][1];
ptrs[0][1] = ptrs[1][1] = ptr;
}
+ NPY_END_THREADS;
return 0;
}
@@ -2381,6 +2384,7 @@ unbuffered_loop_nop1_ndim3(NpyIter *iter)
npy_intp coords[2], shape[3], strides[3][2];
char *ptrs[3][2], *ptr;
sum_of_products_fn sop;
+ NPY_BEGIN_THREADS_DEF;
#if NPY_EINSUM_DBG_TRACING
NpyIter_DebugPrint(iter);
@@ -2414,6 +2418,7 @@ unbuffered_loop_nop1_ndim3(NpyIter *iter)
* Since the iterator wasn't tracking coordinates, the
* loop provided by the iterator is in Fortran-order.
*/
+ NPY_BEGIN_THREADS_THRESHOLDED(shape[2] * shape[1] * shape[0]);
for (coords[1] = shape[2]; coords[1] > 0; --coords[1]) {
for (coords[0] = shape[1]; coords[0] > 0; --coords[0]) {
sop(1, ptrs[0], strides[0], shape[0]);
@@ -2428,6 +2433,7 @@ unbuffered_loop_nop1_ndim3(NpyIter *iter)
ptr = ptrs[2][1] + strides[2][1];
ptrs[0][1] = ptrs[1][1] = ptrs[2][1] = ptr;
}
+ NPY_END_THREADS;
return 0;
}
@@ -2438,6 +2444,7 @@ unbuffered_loop_nop2_ndim2(NpyIter *iter)
npy_intp coord, shape[2], strides[2][3];
char *ptrs[2][3], *ptr;
sum_of_products_fn sop;
+ NPY_BEGIN_THREADS_DEF;
#if NPY_EINSUM_DBG_TRACING
NpyIter_DebugPrint(iter);
@@ -2468,6 +2475,7 @@ unbuffered_loop_nop2_ndim2(NpyIter *iter)
* Since the iterator wasn't tracking coordinates, the
* loop provided by the iterator is in Fortran-order.
*/
+ NPY_BEGIN_THREADS_THRESHOLDED(shape[1] * shape[0]);
for (coord = shape[1]; coord > 0; --coord) {
sop(2, ptrs[0], strides[0], shape[0]);
@@ -2478,6 +2486,7 @@ unbuffered_loop_nop2_ndim2(NpyIter *iter)
ptr = ptrs[1][2] + strides[1][2];
ptrs[0][2] = ptrs[1][2] = ptr;
}
+ NPY_END_THREADS;
return 0;
}
@@ -2488,6 +2497,7 @@ unbuffered_loop_nop2_ndim3(NpyIter *iter)
npy_intp coords[2], shape[3], strides[3][3];
char *ptrs[3][3], *ptr;
sum_of_products_fn sop;
+ NPY_BEGIN_THREADS_DEF;
#if NPY_EINSUM_DBG_TRACING
NpyIter_DebugPrint(iter);
@@ -2521,6 +2531,7 @@ unbuffered_loop_nop2_ndim3(NpyIter *iter)
* Since the iterator wasn't tracking coordinates, the
* loop provided by the iterator is in Fortran-order.
*/
+ NPY_BEGIN_THREADS_THRESHOLDED(shape[2] * shape[1] * shape[0]);
for (coords[1] = shape[2]; coords[1] > 0; --coords[1]) {
for (coords[0] = shape[1]; coords[0] > 0; --coords[0]) {
sop(2, ptrs[0], strides[0], shape[0]);
@@ -2539,6 +2550,7 @@ unbuffered_loop_nop2_ndim3(NpyIter *iter)
ptr = ptrs[2][2] + strides[2][2];
ptrs[0][2] = ptrs[1][2] = ptrs[2][2] = ptr;
}
+ NPY_END_THREADS;
return 0;
}
diff --git a/numpy/core/src/multiarray/getset.c b/numpy/core/src/multiarray/getset.c
index 3ed1666ae..77d9b8c66 100644
--- a/numpy/core/src/multiarray/getset.c
+++ b/numpy/core/src/multiarray/getset.c
@@ -18,6 +18,7 @@
#include "getset.h"
#include "arrayobject.h"
#include "mem_overlap.h"
+#include "alloc.h"
/******************* array attribute get and set routines ******************/
@@ -65,12 +66,12 @@ array_shape_set(PyArrayObject *self, PyObject *val)
}
/* Free old dimensions and strides */
- PyDimMem_FREE(PyArray_DIMS(self));
+ npy_free_cache_dim_array(self);
nd = PyArray_NDIM(ret);
((PyArrayObject_fields *)self)->nd = nd;
if (nd > 0) {
/* create new dimensions and strides */
- ((PyArrayObject_fields *)self)->dimensions = PyDimMem_NEW(3*nd);
+ ((PyArrayObject_fields *)self)->dimensions = npy_alloc_cache_dim(3*nd);
if (PyArray_DIMS(self) == NULL) {
Py_DECREF(ret);
PyErr_SetString(PyExc_MemoryError,"");
@@ -158,11 +159,11 @@ array_strides_set(PyArrayObject *self, PyObject *obj)
memcpy(PyArray_STRIDES(self), newstrides.ptr, sizeof(npy_intp)*newstrides.len);
PyArray_UpdateFlags(self, NPY_ARRAY_C_CONTIGUOUS | NPY_ARRAY_F_CONTIGUOUS |
NPY_ARRAY_ALIGNED);
- PyDimMem_FREE(newstrides.ptr);
+ npy_free_cache_dim_obj(newstrides);
return 0;
fail:
- PyDimMem_FREE(newstrides.ptr);
+ npy_free_cache_dim_obj(newstrides);
return -1;
}
@@ -436,12 +437,6 @@ static int
array_descr_set(PyArrayObject *self, PyObject *arg)
{
PyArray_Descr *newtype = NULL;
- npy_intp newdim;
- int i;
- char *msg = "new type not compatible with array.";
- PyObject *safe;
- static PyObject *checkfunc = NULL;
-
if (arg == NULL) {
PyErr_SetString(PyExc_AttributeError,
@@ -458,16 +453,18 @@ array_descr_set(PyArrayObject *self, PyObject *arg)
/* check that we are not reinterpreting memory containing Objects. */
if (_may_have_objects(PyArray_DESCR(self)) || _may_have_objects(newtype)) {
+ static PyObject *checkfunc = NULL;
+ PyObject *safe;
+
npy_cache_import("numpy.core._internal", "_view_is_safe", &checkfunc);
if (checkfunc == NULL) {
- return -1;
+ goto fail;
}
safe = PyObject_CallFunction(checkfunc, "OO",
PyArray_DESCR(self), newtype);
if (safe == NULL) {
- Py_DECREF(newtype);
- return -1;
+ goto fail;
}
Py_DECREF(safe);
}
@@ -491,58 +488,76 @@ array_descr_set(PyArrayObject *self, PyObject *arg)
}
- if ((newtype->elsize != PyArray_DESCR(self)->elsize) &&
- (PyArray_NDIM(self) == 0 ||
- !PyArray_ISONESEGMENT(self) ||
- PyDataType_HASSUBARRAY(newtype))) {
- goto fail;
- }
+ /* Changing the size of the dtype results in a shape change */
+ if (newtype->elsize != PyArray_DESCR(self)->elsize) {
+ int axis;
+ npy_intp newdim;
- /* Deprecate not C contiguous and a dimension changes */
- if (newtype->elsize != PyArray_DESCR(self)->elsize &&
- !PyArray_IS_C_CONTIGUOUS(self)) {
- /* 11/27/2015 1.11.0 */
- if (DEPRECATE("Changing the shape of non-C contiguous array by\n"
- "descriptor assignment is deprecated. To maintain\n"
- "the Fortran contiguity of a multidimensional Fortran\n"
- "array, use 'a.T.view(...).T' instead") < 0) {
- return -1;
+ /* forbidden cases */
+ if (PyArray_NDIM(self) == 0) {
+ PyErr_SetString(PyExc_ValueError,
+ "Changing the dtype of a 0d array is only supported "
+ "if the itemsize is unchanged");
+ goto fail;
}
- }
-
- if (PyArray_IS_C_CONTIGUOUS(self)) {
- i = PyArray_NDIM(self) - 1;
- }
- else {
- i = 0;
- }
- if (newtype->elsize < PyArray_DESCR(self)->elsize) {
- /*
- * if it is compatible increase the size of the
- * dimension at end (or at the front for NPY_ARRAY_F_CONTIGUOUS)
- */
- if (PyArray_DESCR(self)->elsize % newtype->elsize != 0) {
+ else if (PyDataType_HASSUBARRAY(newtype)) {
+ PyErr_SetString(PyExc_ValueError,
+ "Changing the dtype to a subarray type is only supported "
+ "if the total itemsize is unchanged");
goto fail;
}
- newdim = PyArray_DESCR(self)->elsize / newtype->elsize;
- PyArray_DIMS(self)[i] *= newdim;
- PyArray_STRIDES(self)[i] = newtype->elsize;
- }
- else if (newtype->elsize > PyArray_DESCR(self)->elsize) {
- /*
- * Determine if last (or first if NPY_ARRAY_F_CONTIGUOUS) dimension
- * is compatible
- */
- newdim = PyArray_DIMS(self)[i] * PyArray_DESCR(self)->elsize;
- if ((newdim % newtype->elsize) != 0) {
+
+ /* determine which axis to resize */
+ if (PyArray_IS_C_CONTIGUOUS(self)) {
+ axis = PyArray_NDIM(self) - 1;
+ }
+ else if (PyArray_IS_F_CONTIGUOUS(self)) {
+ /* 2015-11-27 1.11.0, gh-6747 */
+ if (DEPRECATE(
+ "Changing the shape of an F-contiguous array by "
+ "descriptor assignment is deprecated. To maintain the "
+ "Fortran contiguity of a multidimensional Fortran "
+ "array, use 'a.T.view(...).T' instead") < 0) {
+ goto fail;
+ }
+ axis = 0;
+ }
+ else {
+ /* Don't mention the deprecated F-contiguous support */
+ PyErr_SetString(PyExc_ValueError,
+ "To change to a dtype of a different size, the array must "
+ "be C-contiguous");
goto fail;
}
- PyArray_DIMS(self)[i] = newdim / newtype->elsize;
- PyArray_STRIDES(self)[i] = newtype->elsize;
+
+ if (newtype->elsize < PyArray_DESCR(self)->elsize) {
+ /* if it is compatible, increase the size of the relevant axis */
+ if (PyArray_DESCR(self)->elsize % newtype->elsize != 0) {
+ PyErr_SetString(PyExc_ValueError,
+ "When changing to a smaller dtype, its size must be a "
+ "divisor of the size of original dtype");
+ goto fail;
+ }
+ newdim = PyArray_DESCR(self)->elsize / newtype->elsize;
+ PyArray_DIMS(self)[axis] *= newdim;
+ PyArray_STRIDES(self)[axis] = newtype->elsize;
+ }
+ else if (newtype->elsize > PyArray_DESCR(self)->elsize) {
+ /* if it is compatible, decrease the size of the relevant axis */
+ newdim = PyArray_DIMS(self)[axis] * PyArray_DESCR(self)->elsize;
+ if ((newdim % newtype->elsize) != 0) {
+ PyErr_SetString(PyExc_ValueError,
+ "When changing to a larger dtype, its size must be a "
+ "divisor of the total size in bytes of the last axis "
+ "of the array.");
+ goto fail;
+ }
+ PyArray_DIMS(self)[axis] = newdim / newtype->elsize;
+ PyArray_STRIDES(self)[axis] = newtype->elsize;
+ }
}
- /* fall through -- adjust type*/
- Py_DECREF(PyArray_DESCR(self));
+ /* Viewing as a subarray increases the number of dimensions */
if (PyDataType_HASSUBARRAY(newtype)) {
/*
* create new array object from data and update
@@ -560,7 +575,7 @@ array_descr_set(PyArrayObject *self, PyObject *arg)
if (temp == NULL) {
return -1;
}
- PyDimMem_FREE(PyArray_DIMS(self));
+ npy_free_cache_dim_array(self);
((PyArrayObject_fields *)self)->dimensions = PyArray_DIMS(temp);
((PyArrayObject_fields *)self)->nd = PyArray_NDIM(temp);
((PyArrayObject_fields *)self)->strides = PyArray_STRIDES(temp);
@@ -572,12 +587,12 @@ array_descr_set(PyArrayObject *self, PyObject *arg)
Py_DECREF(temp);
}
+ Py_DECREF(PyArray_DESCR(self));
((PyArrayObject_fields *)self)->descr = newtype;
PyArray_UpdateFlags(self, NPY_ARRAY_UPDATE_ALL);
return 0;
fail:
- PyErr_SetString(PyExc_ValueError, msg);
Py_DECREF(newtype);
return -1;
}
diff --git a/numpy/core/src/multiarray/item_selection.c b/numpy/core/src/multiarray/item_selection.c
index c88cdfdcb..21bcd6cad 100644
--- a/numpy/core/src/multiarray/item_selection.c
+++ b/numpy/core/src/multiarray/item_selection.c
@@ -23,6 +23,7 @@
#include "npy_sort.h"
#include "npy_partition.h"
#include "npy_binsearch.h"
+#include "alloc.h"
/*NUMPY_API
* Take
@@ -765,7 +766,7 @@ PyArray_Choose(PyArrayObject *ip, PyObject *op, PyArrayObject *out,
Py_XDECREF(mps[i]);
}
Py_DECREF(ap);
- PyDataMem_FREE(mps);
+ npy_free_cache(mps, n * sizeof(mps[0]));
if (out != NULL && out != obj) {
Py_INCREF(out);
Py_DECREF(obj);
@@ -779,7 +780,7 @@ PyArray_Choose(PyArrayObject *ip, PyObject *op, PyArrayObject *out,
Py_XDECREF(mps[i]);
}
Py_XDECREF(ap);
- PyDataMem_FREE(mps);
+ npy_free_cache(mps, n * sizeof(mps[0]));
PyArray_XDECREF_ERR(obj);
return NULL;
}
@@ -827,7 +828,7 @@ _new_sortlike(PyArrayObject *op, int axis, PyArray_SortFunc *sort,
NPY_BEGIN_THREADS_DESCR(PyArray_DESCR(op));
if (needcopy) {
- buffer = PyDataMem_NEW(N * elsize);
+ buffer = npy_alloc_cache(N * elsize);
if (buffer == NULL) {
ret = -1;
goto fail;
@@ -869,12 +870,9 @@ _new_sortlike(PyArrayObject *op, int axis, PyArray_SortFunc *sort,
if (part == NULL) {
ret = sort(bufptr, N, op);
-#if defined(NPY_PY3K)
- /* Object comparisons may raise an exception in Python 3 */
if (hasrefs && PyErr_Occurred()) {
ret = -1;
}
-#endif
if (ret < 0) {
goto fail;
}
@@ -885,12 +883,9 @@ _new_sortlike(PyArrayObject *op, int axis, PyArray_SortFunc *sort,
npy_intp i;
for (i = 0; i < nkth; ++i) {
ret = part(bufptr, N, kth[i], pivots, &npiv, op);
-#if defined(NPY_PY3K)
- /* Object comparisons may raise an exception in Python 3 */
if (hasrefs && PyErr_Occurred()) {
ret = -1;
}
-#endif
if (ret < 0) {
goto fail;
}
@@ -914,7 +909,7 @@ _new_sortlike(PyArrayObject *op, int axis, PyArray_SortFunc *sort,
}
fail:
- PyDataMem_FREE(buffer);
+ npy_free_cache(buffer, N * elsize);
NPY_END_THREADS_DESCR(PyArray_DESCR(op));
if (ret < 0 && !PyErr_Occurred()) {
/* Out of memory during sorting or buffer creation */
@@ -978,7 +973,7 @@ _new_argsortlike(PyArrayObject *op, int axis, PyArray_ArgSortFunc *argsort,
NPY_BEGIN_THREADS_DESCR(PyArray_DESCR(op));
if (needcopy) {
- valbuffer = PyDataMem_NEW(N * elsize);
+ valbuffer = npy_alloc_cache(N * elsize);
if (valbuffer == NULL) {
ret = -1;
goto fail;
@@ -986,7 +981,7 @@ _new_argsortlike(PyArrayObject *op, int axis, PyArray_ArgSortFunc *argsort,
}
if (needidxbuffer) {
- idxbuffer = (npy_intp *)PyDataMem_NEW(N * sizeof(npy_intp));
+ idxbuffer = (npy_intp *)npy_alloc_cache(N * sizeof(npy_intp));
if (idxbuffer == NULL) {
ret = -1;
goto fail;
@@ -1076,8 +1071,8 @@ _new_argsortlike(PyArrayObject *op, int axis, PyArray_ArgSortFunc *argsort,
}
fail:
- PyDataMem_FREE(valbuffer);
- PyDataMem_FREE(idxbuffer);
+ npy_free_cache(valbuffer, N * elsize);
+ npy_free_cache(idxbuffer, N * sizeof(npy_intp));
NPY_END_THREADS_DESCR(PyArray_DESCR(op));
if (ret < 0) {
if (!PyErr_Occurred()) {
@@ -1493,13 +1488,13 @@ PyArray_LexSort(PyObject *sort_keys, int axis)
char *valbuffer, *indbuffer;
int *swaps;
- valbuffer = PyDataMem_NEW(N*maxelsize);
+ valbuffer = npy_alloc_cache(N * maxelsize);
if (valbuffer == NULL) {
goto fail;
}
- indbuffer = PyDataMem_NEW(N*sizeof(npy_intp));
+ indbuffer = npy_alloc_cache(N * sizeof(npy_intp));
if (indbuffer == NULL) {
- PyDataMem_FREE(indbuffer);
+ npy_free_cache(indbuffer, N * sizeof(npy_intp));
goto fail;
}
swaps = malloc(n*sizeof(int));
@@ -1531,8 +1526,8 @@ PyArray_LexSort(PyObject *sort_keys, int axis)
#else
if (rcode < 0) {
#endif
- PyDataMem_FREE(valbuffer);
- PyDataMem_FREE(indbuffer);
+ npy_free_cache(valbuffer, N * maxelsize);
+ npy_free_cache(indbuffer, N * sizeof(npy_intp));
free(swaps);
goto fail;
}
@@ -1542,8 +1537,8 @@ PyArray_LexSort(PyObject *sort_keys, int axis)
sizeof(npy_intp), N, sizeof(npy_intp));
PyArray_ITER_NEXT(rit);
}
- PyDataMem_FREE(valbuffer);
- PyDataMem_FREE(indbuffer);
+ npy_free_cache(valbuffer, N * maxelsize);
+ npy_free_cache(indbuffer, N * sizeof(npy_intp));
free(swaps);
}
else {
@@ -2330,7 +2325,7 @@ finish:
return NULL;
}
- for (i = 0; i < ndim; ++i) {
+ for (i = 0; i < PyArray_NDIM(ret); ++i) {
if (PyArray_DIMS(ret)[i] == 0) {
is_empty = 1;
break;
diff --git a/numpy/core/src/multiarray/iterators.c b/numpy/core/src/multiarray/iterators.c
index 01910a657..b8cf4edf6 100644
--- a/numpy/core/src/multiarray/iterators.c
+++ b/numpy/core/src/multiarray/iterators.c
@@ -926,7 +926,7 @@ iter_ass_subscript(PyArrayIterObject *self, PyObject *ind, PyObject *val)
goto skip;
}
start = PyArray_PyIntAsIntp(ind);
- if (start==-1 && PyErr_Occurred()) {
+ if (error_converting(start)) {
PyErr_Clear();
}
else {
@@ -1055,7 +1055,28 @@ static PyMappingMethods iter_as_mapping = {
};
-
+/* Two options:
+ * 1) underlying array is contiguous
+ * -- return 1-d wrapper around it
+ * 2) underlying array is not contiguous
+ * -- make new 1-d contiguous array with updateifcopy flag set
+ * to copy back to the old array
+ *
+ * If underlying array is readonly, then we make the output array readonly
+ * and updateifcopy does not apply.
+ *
+ * Changed 2017-07-21, 1.14.0.
+ *
+ * In order to start the process of removing UPDATEIFCOPY, see gh-7054, the
+ * behavior is changed to always return an non-writeable copy when the base
+ * array is non-contiguous. Doing that will hopefully smoke out those few
+ * folks who assign to the result with the expectation that the base array
+ * will be changed. At a later date non-contiguous arrays will always return
+ * writeable copies.
+ *
+ * Note that the type and argument expected for the __array__ method is
+ * ignored.
+ */
static PyArrayObject *
iter_array(PyArrayIterObject *it, PyObject *NPY_UNUSED(op))
{
@@ -1063,27 +1084,14 @@ iter_array(PyArrayIterObject *it, PyObject *NPY_UNUSED(op))
PyArrayObject *ret;
npy_intp size;
- /* Any argument ignored */
-
- /* Two options:
- * 1) underlying array is contiguous
- * -- return 1-d wrapper around it
- * 2) underlying array is not contiguous
- * -- make new 1-d contiguous array with updateifcopy flag set
- * to copy back to the old array
- *
- * If underlying array is readonly, then we make the output array readonly
- * and updateifcopy does not apply.
- */
size = PyArray_SIZE(it->ao);
Py_INCREF(PyArray_DESCR(it->ao));
+
if (PyArray_ISCONTIGUOUS(it->ao)) {
- ret = (PyArrayObject *)PyArray_NewFromDescr(&PyArray_Type,
- PyArray_DESCR(it->ao),
- 1, &size,
- NULL, PyArray_DATA(it->ao),
- PyArray_FLAGS(it->ao),
- (PyObject *)it->ao);
+ ret = (PyArrayObject *)PyArray_NewFromDescr(
+ &PyArray_Type, PyArray_DESCR(it->ao), 1, &size,
+ NULL, PyArray_DATA(it->ao), PyArray_FLAGS(it->ao),
+ (PyObject *)it->ao);
if (ret == NULL) {
return NULL;
}
@@ -1094,11 +1102,10 @@ iter_array(PyArrayIterObject *it, PyObject *NPY_UNUSED(op))
}
}
else {
- ret = (PyArrayObject *)PyArray_NewFromDescr(&PyArray_Type,
- PyArray_DESCR(it->ao),
- 1, &size,
- NULL, NULL,
- 0, (PyObject *)it->ao);
+ ret = (PyArrayObject *)PyArray_NewFromDescr(
+ &PyArray_Type, PyArray_DESCR(it->ao), 1, &size,
+ NULL, NULL, 0,
+ (PyObject *)it->ao);
if (ret == NULL) {
return NULL;
}
@@ -1106,16 +1113,7 @@ iter_array(PyArrayIterObject *it, PyObject *NPY_UNUSED(op))
Py_DECREF(ret);
return NULL;
}
- if (PyArray_ISWRITEABLE(it->ao)) {
- Py_INCREF(it->ao);
- if (PyArray_SetUpdateIfCopyBase(ret, it->ao) < 0) {
- Py_DECREF(ret);
- return NULL;
- }
- }
- else {
- PyArray_CLEARFLAGS(ret, NPY_ARRAY_WRITEABLE);
- }
+ PyArray_CLEARFLAGS(ret, NPY_ARRAY_WRITEABLE);
}
return ret;
diff --git a/numpy/core/src/multiarray/mapping.c b/numpy/core/src/multiarray/mapping.c
index 6c300a2bf..1a92365c8 100644
--- a/numpy/core/src/multiarray/mapping.c
+++ b/numpy/core/src/multiarray/mapping.c
@@ -139,6 +139,196 @@ PyArray_MapIterSwapAxes(PyArrayMapIterObject *mit, PyArrayObject **ret, int getm
*ret = (PyArrayObject *)new;
}
+static NPY_INLINE void
+multi_DECREF(PyObject **objects, npy_intp n)
+{
+ npy_intp i;
+ for (i = 0; i < n; i++) {
+ Py_DECREF(objects[i]);
+ }
+}
+
+/**
+ * Unpack a tuple into an array of new references. Returns the number of objects
+ * unpacked.
+ *
+ * Useful if a tuple is being iterated over multiple times, or for a code path
+ * that doesn't always want the overhead of allocating a tuple.
+ */
+static NPY_INLINE npy_intp
+unpack_tuple(PyTupleObject *index, PyObject **result, npy_intp result_n)
+{
+ npy_intp n, i;
+ n = PyTuple_GET_SIZE(index);
+ if (n > result_n) {
+ PyErr_SetString(PyExc_IndexError,
+ "too many indices for array");
+ return -1;
+ }
+ for (i = 0; i < n; i++) {
+ result[i] = PyTuple_GET_ITEM(index, i);
+ Py_INCREF(result[i]);
+ }
+ return n;
+}
+
+/* Unpack a single scalar index, taking a new reference to match unpack_tuple */
+static NPY_INLINE npy_intp
+unpack_scalar(PyObject *index, PyObject **result, npy_intp result_n)
+{
+ Py_INCREF(index);
+ result[0] = index;
+ return 1;
+}
+
+/**
+ * Turn an index argument into a c-array of `PyObject *`s, one for each index.
+ *
+ * When a scalar is passed, this is written directly to the buffer. When a
+ * tuple is passed, the tuple elements are unpacked into the buffer.
+ *
+ * When some other sequence is passed, this implements the following section
+ * from the advanced indexing docs to decide whether to unpack or just write
+ * one element:
+ *
+ * > In order to remain backward compatible with a common usage in Numeric,
+ * > basic slicing is also initiated if the selection object is any non-ndarray
+ * > sequence (such as a list) containing slice objects, the Ellipsis object,
+ * > or the newaxis object, but not for integer arrays or other embedded
+ * > sequences.
+ *
+ * It might be worth deprecating this behaviour (gh-4434), in which case the
+ * entire function should become a simple check of PyTuple_Check.
+ *
+ * @param index The index object, which may or may not be a tuple. This is
+ * a borrowed reference.
+ * @param result An empty buffer of PyObject* to write each index component
+ * to. The references written are new.
+ * @param result_n The length of the result buffer
+ *
+ * @returns The number of items in `result`, or -1 if an error occured.
+ * The entries in `result` at and beyond this index should be
+ * assumed to contain garbage, even if they were initialized
+ * to NULL, so are not safe to Py_XDECREF. Use multi_DECREF to
+ * dispose of them.
+ */
+NPY_NO_EXPORT npy_intp
+unpack_indices(PyObject *index, PyObject **result, npy_intp result_n)
+{
+ npy_intp n, i;
+ npy_bool commit_to_unpack;
+
+ /* Fast route for passing a tuple */
+ if (PyTuple_CheckExact(index)) {
+ return unpack_tuple((PyTupleObject *)index, result, result_n);
+ }
+
+ /* Obvious single-entry cases */
+ if (0 /* to aid macros below */
+#if !defined(NPY_PY3K)
+ || PyInt_CheckExact(index)
+#else
+ || PyLong_CheckExact(index)
+#endif
+ || index == Py_None
+ || PySlice_Check(index)
+ || PyArray_Check(index)
+ || !PySequence_Check(index)) {
+
+ return unpack_scalar(index, result, result_n);
+ }
+
+ /*
+ * Passing a tuple subclass - coerce to the base type. This incurs an
+ * allocation, but doesn't need to be a fast path anyway
+ */
+ if (PyTuple_Check(index)) {
+ PyTupleObject *tup = (PyTupleObject *) PySequence_Tuple(index);
+ if (tup == NULL) {
+ return -1;
+ }
+ n = unpack_tuple(tup, result, result_n);
+ Py_DECREF(tup);
+ return n;
+ }
+
+ /*
+ * At this point, we're left with a non-tuple, non-array, sequence:
+ * typically, a list. We use some somewhat-arbitrary heuristics from here
+ * onwards to decided whether to treat that list as a single index, or a
+ * list of indices.
+ */
+
+ /* if len fails, treat like a scalar */
+ n = PySequence_Size(index);
+ if (n < 0) {
+ PyErr_Clear();
+ return unpack_scalar(index, result, result_n);
+ }
+
+ /*
+ * Backwards compatibility only takes effect for short sequences - otherwise
+ * we treat it like any other scalar.
+ *
+ * Sequences < NPY_MAXDIMS with any slice objects
+ * or newaxis, Ellipsis or other arrays or sequences
+ * embedded, are considered equivalent to an indexing
+ * tuple. (`a[[[1,2], [3,4]]] == a[[1,2], [3,4]]`)
+ */
+ if (n >= NPY_MAXDIMS) {
+ return unpack_scalar(index, result, result_n);
+ }
+
+ /* In case we change result_n elsewhere */
+ assert(n <= result_n);
+
+ /*
+ * Some other type of short sequence - assume we should unpack it like a
+ * tuple, and then decide whether that was actually necessary.
+ */
+ commit_to_unpack = 0;
+ for (i = 0; i < n; i++) {
+ PyObject *tmp_obj = result[i] = PySequence_GetItem(index, i);
+
+ if (commit_to_unpack) {
+ /* propagate errors */
+ if (tmp_obj == NULL) {
+ multi_DECREF(result, i);
+ return -1;
+ }
+ }
+ else {
+ /*
+ * if getitem fails (unusual) before we've committed, then stop
+ * unpacking
+ */
+ if (tmp_obj == NULL) {
+ PyErr_Clear();
+ break;
+ }
+
+ /* decide if we should treat this sequence like a tuple */
+ if (PyArray_Check(tmp_obj)
+ || PySequence_Check(tmp_obj)
+ || PySlice_Check(tmp_obj)
+ || tmp_obj == Py_Ellipsis
+ || tmp_obj == Py_None) {
+ commit_to_unpack = 1;
+ }
+ }
+ }
+
+ /* unpacking was the right thing to do, and we already did it */
+ if (commit_to_unpack) {
+ return n;
+ }
+ /* got to the end, never found an indication that we should have unpacked */
+ else {
+ /* we partially filled result, so empty it first */
+ multi_DECREF(result, i);
+ return unpack_scalar(index, result, result_n);
+ }
+}
/**
* Prepare an npy_index_object from the python slicing object.
@@ -174,7 +364,6 @@ prepare_index(PyArrayObject *self, PyObject *index,
int i;
npy_intp n;
- npy_bool make_tuple = 0;
PyObject *obj = NULL;
PyArrayObject *arr;
@@ -182,81 +371,16 @@ prepare_index(PyArrayObject *self, PyObject *index,
int ellipsis_pos = -1;
/*
- * The index might be a multi-dimensional index, but not yet a tuple
- * this makes it a tuple in that case.
- *
- * TODO: Refactor into its own function.
+ * The choice of only unpacking `2*NPY_MAXDIMS` items is historic.
+ * The longest "reasonable" index that produces a result of <= 32 dimensions
+ * is `(0,)*np.MAXDIMS + (None,)*np.MAXDIMS`. Longer indices can exist, but
+ * are uncommon.
*/
- if (!PyTuple_CheckExact(index)
- /* Next three are just to avoid slow checks */
-#if !defined(NPY_PY3K)
- && (!PyInt_CheckExact(index))
-#else
- && (!PyLong_CheckExact(index))
-#endif
- && (index != Py_None)
- && (!PySlice_Check(index))
- && (!PyArray_Check(index))
- && (PySequence_Check(index))) {
- /*
- * Sequences < NPY_MAXDIMS with any slice objects
- * or newaxis, Ellipsis or other arrays or sequences
- * embedded, are considered equivalent to an indexing
- * tuple. (`a[[[1,2], [3,4]]] == a[[1,2], [3,4]]`)
- */
+ PyObject *raw_indices[NPY_MAXDIMS*2];
- if (PyTuple_Check(index)) {
- /* If it is already a tuple, make it an exact tuple anyway */
- n = 0;
- make_tuple = 1;
- }
- else {
- n = PySequence_Size(index);
- }
- if (n < 0 || n >= NPY_MAXDIMS) {
- n = 0;
- }
- for (i = 0; i < n; i++) {
- PyObject *tmp_obj = PySequence_GetItem(index, i);
- /* if getitem fails (unusual) treat this as a single index */
- if (tmp_obj == NULL) {
- PyErr_Clear();
- make_tuple = 0;
- break;
- }
- if (PyArray_Check(tmp_obj) || PySequence_Check(tmp_obj)
- || PySlice_Check(tmp_obj) || tmp_obj == Py_Ellipsis
- || tmp_obj == Py_None) {
- make_tuple = 1;
- Py_DECREF(tmp_obj);
- break;
- }
- Py_DECREF(tmp_obj);
- }
-
- if (make_tuple) {
- /* We want to interpret it as a tuple, so make it one */
- index = PySequence_Tuple(index);
- if (index == NULL) {
- return -1;
- }
- }
- }
-
- /* If the index is not a tuple, handle it the same as (index,) */
- if (!PyTuple_CheckExact(index)) {
- obj = index;
- index_ndim = 1;
- }
- else {
- n = PyTuple_GET_SIZE(index);
- if (n > NPY_MAXDIMS * 2) {
- PyErr_SetString(PyExc_IndexError,
- "too many indices for array");
- goto fail;
- }
- index_ndim = (int)n;
- obj = NULL;
+ index_ndim = unpack_indices(index, raw_indices, NPY_MAXDIMS*2);
+ if (index_ndim == -1) {
+ return -1;
}
/*
@@ -275,14 +399,7 @@ prepare_index(PyArrayObject *self, PyObject *index,
goto failed_building_indices;
}
- /* Check for single index. obj is already set then. */
- if ((curr_idx != 0) || (obj == NULL)) {
- obj = PyTuple_GET_ITEM(index, get_idx++);
- }
- else {
- /* only one loop */
- get_idx += 1;
- }
+ obj = raw_indices[get_idx++];
/**** Try the cascade of possible indices ****/
@@ -355,7 +472,7 @@ prepare_index(PyArrayObject *self, PyObject *index,
#endif
npy_intp ind = PyArray_PyIntAsIntp(obj);
- if ((ind == -1) && PyErr_Occurred()) {
+ if (error_converting(ind)) {
PyErr_Clear();
}
else {
@@ -526,7 +643,7 @@ prepare_index(PyArrayObject *self, PyObject *index,
npy_intp ind = PyArray_PyIntAsIntp((PyObject *)arr);
Py_DECREF(arr);
- if ((ind == -1) && PyErr_Occurred()) {
+ if (error_converting(ind)) {
goto failed_building_indices;
}
else {
@@ -686,9 +803,7 @@ prepare_index(PyArrayObject *self, PyObject *index,
*ndim = new_ndim + fancy_ndim;
*out_fancy_ndim = fancy_ndim;
- if (make_tuple) {
- Py_DECREF(index);
- }
+ multi_DECREF(raw_indices, index_ndim);
return index_type;
@@ -696,10 +811,7 @@ prepare_index(PyArrayObject *self, PyObject *index,
for (i=0; i < curr_idx; i++) {
Py_XDECREF(indices[i].object);
}
- fail:
- if (make_tuple) {
- Py_DECREF(index);
- }
+ multi_DECREF(raw_indices, index_ndim);
return -1;
}
@@ -1334,10 +1446,6 @@ _get_field_view(PyArrayObject *arr, PyObject *ind, PyArrayObject **view)
PyObject *fields, *names;
PyArray_Descr *view_dtype;
- /* variables needed to make a copy, to remove in the future */
- static PyObject *copyfunc = NULL;
- PyObject *viewcopy;
-
seqlen = PySequence_Size(ind);
/* quit if have a 0-d array (seqlen==-1) or a 0-len array */
@@ -1390,6 +1498,35 @@ _get_field_view(PyArrayObject *arr, PyObject *ind, PyArrayObject **view)
Py_DECREF(names);
return 0;
}
+ // disallow use of titles as index
+ if (PyTuple_Size(tup) == 3) {
+ PyObject *title = PyTuple_GET_ITEM(tup, 2);
+ int titlecmp = PyObject_RichCompareBool(title, name, Py_EQ);
+ if (titlecmp == 1) {
+ // if title == name, we were given a title, not a field name
+ PyErr_SetString(PyExc_KeyError,
+ "cannot use field titles in multi-field index");
+ }
+ if (titlecmp != 0 || PyDict_SetItem(fields, title, tup) < 0) {
+ Py_DECREF(title);
+ Py_DECREF(name);
+ Py_DECREF(fields);
+ Py_DECREF(names);
+ return 0;
+ }
+ Py_DECREF(title);
+ }
+ // disallow duplicate field indices
+ if (PyDict_Contains(fields, name)) {
+ PyObject *errmsg = PyUString_FromString(
+ "duplicate field of name ");
+ PyUString_ConcatAndDel(&errmsg, name);
+ PyErr_SetObject(PyExc_KeyError, errmsg);
+ Py_DECREF(errmsg);
+ Py_DECREF(fields);
+ Py_DECREF(names);
+ return 0;
+ }
if (PyDict_SetItem(fields, name, tup) < 0) {
Py_DECREF(name);
Py_DECREF(fields);
@@ -1433,29 +1570,6 @@ _get_field_view(PyArrayObject *arr, PyObject *ind, PyArrayObject **view)
return 0;
}
- /*
- * Return copy for now (future plan to return the view above). All the
- * following code in this block can then be replaced by "return 0;"
- */
- npy_cache_import("numpy.core._internal", "_copy_fields", &copyfunc);
- if (copyfunc == NULL) {
- Py_DECREF(*view);
- *view = NULL;
- return 0;
- }
-
- PyArray_CLEARFLAGS(*view, NPY_ARRAY_WARN_ON_WRITE);
- viewcopy = PyObject_CallFunction(copyfunc, "O", *view);
- if (viewcopy == NULL) {
- Py_DECREF(*view);
- *view = NULL;
- return 0;
- }
- Py_DECREF(*view);
- *view = (PyArrayObject*)viewcopy;
-
- /* warn when writing to the copy */
- PyArray_ENABLEFLAGS(*view, NPY_ARRAY_WARN_ON_WRITE);
return 0;
}
return -1;
@@ -1489,11 +1603,6 @@ array_subscript(PyArrayObject *self, PyObject *op)
if (view == NULL) {
return NULL;
}
-
- /* warn if writing to a copy. copies will have no base */
- if (PyArray_BASE(view) == NULL) {
- PyArray_ENABLEFLAGS(view, NPY_ARRAY_WARN_ON_WRITE);
- }
return (PyObject*)view;
}
}
@@ -1780,17 +1889,6 @@ array_assign_subscript(PyArrayObject *self, PyObject *ind, PyObject *op)
PyArrayObject *view;
int ret = _get_field_view(self, ind, &view);
if (ret == 0){
-
-#if defined(NPY_PY3K)
- if (!PyUnicode_Check(ind)) {
-#else
- if (!PyString_Check(ind) && !PyUnicode_Check(ind)) {
-#endif
- PyErr_SetString(PyExc_ValueError,
- "multi-field assignment is not supported");
- return -1;
- }
-
if (view == NULL) {
return -1;
}
diff --git a/numpy/core/src/multiarray/methods.c b/numpy/core/src/multiarray/methods.c
index 898887042..efa97dd65 100644
--- a/numpy/core/src/multiarray/methods.c
+++ b/numpy/core/src/multiarray/methods.c
@@ -21,6 +21,7 @@
#include "shape.h"
#include "methods.h"
+#include "alloc.h"
/* NpyArg_ParseKeywords
@@ -201,11 +202,11 @@ array_reshape(PyArrayObject *self, PyObject *args, PyObject *kwds)
}
}
ret = PyArray_Newshape(self, &newshape, order);
- PyDimMem_FREE(newshape.ptr);
+ npy_free_cache_dim_obj(newshape);
return ret;
fail:
- PyDimMem_FREE(newshape.ptr);
+ npy_free_cache_dim_obj(newshape);
return NULL;
}
@@ -517,12 +518,13 @@ PyArray_Byteswap(PyArrayObject *self, npy_bool inplace)
static PyObject *
-array_byteswap(PyArrayObject *self, PyObject *args)
+array_byteswap(PyArrayObject *self, PyObject *args, PyObject *kwds)
{
npy_bool inplace = NPY_FALSE;
+ static char *kwlist[] = {"inplace", NULL};
- if (!PyArg_ParseTuple(args, "|O&:byteswap",
- PyArray_BoolConverter, &inplace)) {
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "|O&:byteswap", kwlist,
+ PyArray_BoolConverter, &inplace)) {
return NULL;
}
return PyArray_Byteswap(self, inplace);
@@ -637,7 +639,7 @@ array_toscalar(PyArrayObject *self, PyObject *args)
npy_intp value, size = PyArray_SIZE(self);
value = PyArray_PyIntAsIntp(PyTuple_GET_ITEM(args, 0));
- if (value == -1 && PyErr_Occurred()) {
+ if (error_converting(value)) {
return NULL;
}
@@ -657,7 +659,7 @@ array_toscalar(PyArrayObject *self, PyObject *args)
for (idim = 0; idim < ndim; ++idim) {
value = PyArray_PyIntAsIntp(PyTuple_GET_ITEM(args, idim));
- if (value == -1 && PyErr_Occurred()) {
+ if (error_converting(value)) {
return NULL;
}
multi_index[idim] = value;
@@ -714,7 +716,7 @@ array_setscalar(PyArrayObject *self, PyObject *args)
npy_intp value, size = PyArray_SIZE(self);
value = PyArray_PyIntAsIntp(PyTuple_GET_ITEM(args, 0));
- if (value == -1 && PyErr_Occurred()) {
+ if (error_converting(value)) {
return NULL;
}
@@ -734,7 +736,7 @@ array_setscalar(PyArrayObject *self, PyObject *args)
for (idim = 0; idim < ndim; ++idim) {
value = PyArray_PyIntAsIntp(PyTuple_GET_ITEM(args, idim));
- if (value == -1 && PyErr_Occurred()) {
+ if (error_converting(value)) {
return NULL;
}
multi_index[idim] = value;
@@ -971,20 +973,18 @@ array_getarray(PyArrayObject *self, PyObject *args)
/* convert to PyArray_Type */
if (!PyArray_CheckExact(self)) {
PyArrayObject *new;
- PyTypeObject *subtype = &PyArray_Type;
-
- if (!PyType_IsSubtype(Py_TYPE(self), &PyArray_Type)) {
- subtype = &PyArray_Type;
- }
Py_INCREF(PyArray_DESCR(self));
- new = (PyArrayObject *)PyArray_NewFromDescr(subtype,
- PyArray_DESCR(self),
- PyArray_NDIM(self),
- PyArray_DIMS(self),
- PyArray_STRIDES(self),
- PyArray_DATA(self),
- PyArray_FLAGS(self), NULL);
+ new = (PyArrayObject *)PyArray_NewFromDescr(
+ &PyArray_Type,
+ PyArray_DESCR(self),
+ PyArray_NDIM(self),
+ PyArray_DIMS(self),
+ PyArray_STRIDES(self),
+ PyArray_DATA(self),
+ PyArray_FLAGS(self),
+ NULL
+ );
if (new == NULL) {
return NULL;
}
@@ -1070,7 +1070,7 @@ array_copy(PyArrayObject *self, PyObject *args, PyObject *kwds)
/* Separate from array_copy to make __copy__ preserve Fortran contiguity. */
static PyObject *
-array_copy_keeporder(PyArrayObject *self, PyObject *args, PyObject *kwds)
+array_copy_keeporder(PyArrayObject *self, PyObject *args)
{
if (!PyArg_ParseTuple(args, ":__copy__")) {
return NULL;
@@ -1111,7 +1111,7 @@ array_resize(PyArrayObject *self, PyObject *args, PyObject *kwds)
}
ret = PyArray_Resize(self, &newshape, refcheck, NPY_CORDER);
- PyDimMem_FREE(newshape.ptr);
+ npy_free_cache_dim_obj(newshape);
if (ret == NULL) {
return NULL;
}
@@ -1779,7 +1779,7 @@ array_setstate(PyArrayObject *self, PyObject *args)
PyArray_CLEARFLAGS(self, NPY_ARRAY_UPDATEIFCOPY);
if (PyArray_DIMS(self) != NULL) {
- PyDimMem_FREE(PyArray_DIMS(self));
+ npy_free_cache_dim_array(self);
fa->dimensions = NULL;
}
@@ -1788,7 +1788,7 @@ array_setstate(PyArrayObject *self, PyObject *args)
fa->nd = nd;
if (nd > 0) {
- fa->dimensions = PyDimMem_NEW(3*nd);
+ fa->dimensions = npy_alloc_cache_dim(3*nd);
if (fa->dimensions == NULL) {
return PyErr_NoMemory();
}
@@ -1802,7 +1802,7 @@ array_setstate(PyArrayObject *self, PyObject *args)
}
if (!PyDataType_FLAGCHK(typecode, NPY_LIST_PICKLE)) {
- int swap=!PyArray_ISNOTSWAPPED(self);
+ int swap = PyArray_ISBYTESWAPPED(self);
fa->data = datastr;
#ifndef NPY_PY3K
/* Check that the string is not interned */
@@ -1816,7 +1816,7 @@ array_setstate(PyArrayObject *self, PyObject *args)
fa->data = PyDataMem_NEW(num);
if (PyArray_DATA(self) == NULL) {
fa->nd = 0;
- PyDimMem_FREE(PyArray_DIMS(self));
+ npy_free_cache_dim_array(self);
Py_DECREF(rawdata);
return PyErr_NoMemory();
}
@@ -1860,7 +1860,7 @@ array_setstate(PyArrayObject *self, PyObject *args)
if (PyArray_DATA(self) == NULL) {
fa->nd = 0;
fa->data = PyDataMem_NEW(PyArray_DESCR(self)->elsize);
- PyDimMem_FREE(PyArray_DIMS(self));
+ npy_free_cache_dim_array(self);
return PyErr_NoMemory();
}
if (PyDataType_FLAGCHK(PyArray_DESCR(self), NPY_NEEDS_INIT)) {
@@ -2002,7 +2002,7 @@ array_transpose(PyArrayObject *self, PyObject *args)
return NULL;
}
ret = PyArray_Transpose(self, &permute);
- PyDimMem_FREE(permute.ptr);
+ npy_free_cache_dim_obj(permute);
}
return ret;
@@ -2569,7 +2569,7 @@ NPY_NO_EXPORT PyMethodDef array_methods[] = {
METH_VARARGS | METH_KEYWORDS, NULL},
{"byteswap",
(PyCFunction)array_byteswap,
- METH_VARARGS, NULL},
+ METH_VARARGS | METH_KEYWORDS, NULL},
{"choose",
(PyCFunction)array_choose,
METH_VARARGS | METH_KEYWORDS, NULL},
diff --git a/numpy/core/src/multiarray/multiarray_tests.c.src b/numpy/core/src/multiarray/multiarray_tests.c.src
index de05cc280..a20cf6257 100644
--- a/numpy/core/src/multiarray/multiarray_tests.c.src
+++ b/numpy/core/src/multiarray/multiarray_tests.c.src
@@ -3,8 +3,10 @@
#include <Python.h>
#define _NPY_NO_DEPRECATIONS /* for NPY_CHAR */
#include "numpy/arrayobject.h"
+#include "numpy/npy_math.h"
#include "mem_overlap.h"
#include "npy_extint128.h"
+#include "common.h"
/* test PyArray_IsPythonScalar, before including private py3 compat header */
static PyObject *
@@ -1000,11 +1002,11 @@ array_solve_diophantine(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject
for (j = 0; j < nterms; ++j) {
terms[j].a = (npy_int64)PyInt_AsSsize_t(PyTuple_GET_ITEM(A, j));
- if (terms[j].a == -1 && PyErr_Occurred()) {
+ if (error_converting(terms[j].a)) {
goto fail;
}
terms[j].ub = (npy_int64)PyInt_AsSsize_t(PyTuple_GET_ITEM(U, j));
- if (terms[j].ub == -1 && PyErr_Occurred()) {
+ if (error_converting(terms[j].ub)) {
goto fail;
}
}
@@ -1559,6 +1561,125 @@ extint_ceildiv_128_64(PyObject *NPY_UNUSED(self), PyObject *args) {
}
+static char get_fpu_mode_doc[] = (
+ "get_fpu_mode()\n"
+ "\n"
+ "Get the current FPU control word, in a platform-dependent format.\n"
+ "Returns None if not implemented on current platform.");
+
+static PyObject *
+get_fpu_mode(PyObject *NPY_UNUSED(self), PyObject *args)
+{
+ if (!PyArg_ParseTuple(args, "")) {
+ return NULL;
+ }
+
+#if defined(_MSC_VER)
+ {
+ unsigned int result = 0;
+ result = _controlfp(0, 0);
+ return PyLong_FromLongLong(result);
+ }
+#elif defined(__GNUC__) && (defined(__x86_64__) || defined(__i386__))
+ {
+ unsigned short cw = 0;
+ __asm__("fstcw %w0" : "=m" (cw));
+ return PyLong_FromLongLong(cw);
+ }
+#else
+ Py_RETURN_NONE;
+#endif
+}
+
+/*
+ * npymath wrappers
+ */
+
+/**begin repeat
+ * #name = cabs, carg#
+ */
+
+/**begin repeat1
+ * #itype = npy_cfloat, npy_cdouble, npy_clongdouble#
+ * #ITYPE = NPY_CFLOAT, NPY_CDOUBLE, NPY_CLONGDOUBLE#
+ * #otype = npy_float, npy_double, npy_longdouble#
+ * #OTYPE = NPY_FLOAT, NPY_DOUBLE, NPY_LONGDOUBLE#
+ * #suffix= f, , l#
+ */
+
+static PyObject *
+call_npy_@name@@suffix@(PyObject *NPY_UNUSED(self), PyObject *args)
+{
+ PyObject *z_py = NULL, *z_arr = NULL, *w_arr = NULL;
+
+ if (!PyArg_ParseTuple(args, "O", &z_py)) {
+ return NULL;
+ }
+
+ z_arr = PyArray_FROMANY(z_py, @ITYPE@, 0, 0, NPY_ARRAY_CARRAY_RO);
+ if (z_arr == NULL) {
+ return NULL;
+ }
+
+ w_arr = PyArray_SimpleNew(0, NULL, @OTYPE@);
+ if (w_arr == NULL) {
+ Py_DECREF(z_arr);
+ return NULL;
+ }
+
+ *(@otype@*)PyArray_DATA((PyArrayObject *)w_arr) =
+ npy_@name@@suffix@(*(@itype@*)PyArray_DATA((PyArrayObject *)z_arr));
+
+ Py_DECREF(z_arr);
+ return w_arr;
+}
+
+/**end repeat1**/
+
+/**end repeat**/
+
+/**begin repeat
+ * #name = log10, cosh, sinh, tan, tanh#
+ */
+
+/**begin repeat1
+ * #type = npy_float, npy_double, npy_longdouble#
+ * #TYPE = NPY_FLOAT, NPY_DOUBLE, NPY_LONGDOUBLE#
+ * #suffix= f, , l#
+ */
+
+static PyObject *
+call_npy_@name@@suffix@(PyObject *NPY_UNUSED(self), PyObject *args)
+{
+ PyObject *z_py = NULL, *z_arr = NULL, *w_arr = NULL;
+
+ if (!PyArg_ParseTuple(args, "O", &z_py)) {
+ return NULL;
+ }
+
+ z_arr = PyArray_FROMANY(z_py, @TYPE@, 0, 0, NPY_ARRAY_CARRAY_RO);
+ if (z_arr == NULL) {
+ return NULL;
+ }
+
+ w_arr = PyArray_SimpleNew(0, NULL, @TYPE@);
+ if (w_arr == NULL) {
+ Py_DECREF(z_arr);
+ return NULL;
+ }
+
+ *(@type@*)PyArray_DATA((PyArrayObject *)w_arr) =
+ npy_@name@@suffix@(*(@type@*)PyArray_DATA((PyArrayObject *)z_arr));
+
+ Py_DECREF(z_arr);
+ return w_arr;
+}
+
+/**end repeat1**/
+
+/**end repeat**/
+
+
static PyMethodDef Multiarray_TestsMethods[] = {
{"IsPythonScalar",
IsPythonScalar,
@@ -1649,6 +1770,37 @@ static PyMethodDef Multiarray_TestsMethods[] = {
{"extint_ceildiv_128_64",
extint_ceildiv_128_64,
METH_VARARGS, NULL},
+ {"get_fpu_mode",
+ get_fpu_mode,
+ METH_VARARGS, get_fpu_mode_doc},
+/**begin repeat
+ * #name = cabs, carg#
+ */
+
+/**begin repeat1
+ * #suffix = f, , l#
+ */
+ {"npy_@name@@suffix@",
+ call_npy_@name@@suffix@,
+ METH_VARARGS, NULL},
+/**end repeat1**/
+
+/**end repeat**/
+
+/**begin repeat
+ * #name = log10, cosh, sinh, tan, tanh#
+ */
+
+/**begin repeat1
+ * #suffix= f, , l#
+ */
+ {"npy_@name@@suffix@",
+ call_npy_@name@@suffix@,
+ METH_VARARGS, NULL},
+/**end repeat1**/
+
+/**end repeat**/
+
{NULL, NULL, 0, NULL} /* Sentinel */
};
diff --git a/numpy/core/src/multiarray/multiarraymodule.c b/numpy/core/src/multiarray/multiarraymodule.c
index 81a1bc543..66a076dc6 100644
--- a/numpy/core/src/multiarray/multiarraymodule.c
+++ b/numpy/core/src/multiarray/multiarraymodule.c
@@ -315,20 +315,39 @@ PyArray_Free(PyObject *op, void *ptr)
return 0;
}
+/*
+ * Get the ndarray subclass with the highest priority
+ */
+NPY_NO_EXPORT PyTypeObject *
+PyArray_GetSubType(int narrays, PyArrayObject **arrays) {
+ PyTypeObject *subtype = &PyArray_Type;
+ double priority = NPY_PRIORITY;
+ int i;
+
+ /* Get the priority subtype for the array */
+ for (i = 0; i < narrays; ++i) {
+ if (Py_TYPE(arrays[i]) != subtype) {
+ double pr = PyArray_GetPriority((PyObject *)(arrays[i]), 0.0);
+ if (pr > priority) {
+ priority = pr;
+ subtype = Py_TYPE(arrays[i]);
+ }
+ }
+ }
+
+ return subtype;
+}
+
/*
* Concatenates a list of ndarrays.
*/
NPY_NO_EXPORT PyArrayObject *
-PyArray_ConcatenateArrays(int narrays, PyArrayObject **arrays, int axis)
+PyArray_ConcatenateArrays(int narrays, PyArrayObject **arrays, int axis,
+ PyArrayObject* ret)
{
- PyTypeObject *subtype = &PyArray_Type;
- double priority = NPY_PRIORITY;
int iarrays, idim, ndim;
- npy_intp shape[NPY_MAXDIMS], s, strides[NPY_MAXDIMS];
- int strideperm[NPY_MAXDIMS];
- PyArray_Descr *dtype = NULL;
- PyArrayObject *ret = NULL;
+ npy_intp shape[NPY_MAXDIMS];
PyArrayObject_fields *sliding_view = NULL;
if (narrays <= 0) {
@@ -383,47 +402,57 @@ PyArray_ConcatenateArrays(int narrays, PyArrayObject **arrays, int axis)
}
}
- /* Get the priority subtype for the array */
- for (iarrays = 0; iarrays < narrays; ++iarrays) {
- if (Py_TYPE(arrays[iarrays]) != subtype) {
- double pr = PyArray_GetPriority((PyObject *)(arrays[iarrays]), 0.0);
- if (pr > priority) {
- priority = pr;
- subtype = Py_TYPE(arrays[iarrays]);
- }
+ if (ret != NULL) {
+ if (PyArray_NDIM(ret) != ndim) {
+ PyErr_SetString(PyExc_ValueError,
+ "Output array has wrong dimensionality");
+ return NULL;
+ }
+ if (!PyArray_CompareLists(shape, PyArray_SHAPE(ret), ndim)) {
+ PyErr_SetString(PyExc_ValueError,
+ "Output array is the wrong shape");
+ return NULL;
}
+ Py_INCREF(ret);
}
+ else {
+ npy_intp s, strides[NPY_MAXDIMS];
+ int strideperm[NPY_MAXDIMS];
- /* Get the resulting dtype from combining all the arrays */
- dtype = PyArray_ResultType(narrays, arrays, 0, NULL);
- if (dtype == NULL) {
- return NULL;
- }
+ /* Get the priority subtype for the array */
+ PyTypeObject *subtype = PyArray_GetSubType(narrays, arrays);
- /*
- * Figure out the permutation to apply to the strides to match
- * the memory layout of the input arrays, using ambiguity
- * resolution rules matching that of the NpyIter.
- */
- PyArray_CreateMultiSortedStridePerm(narrays, arrays, ndim, strideperm);
- s = dtype->elsize;
- for (idim = ndim-1; idim >= 0; --idim) {
- int iperm = strideperm[idim];
- strides[iperm] = s;
- s *= shape[iperm];
- }
-
- /* Allocate the array for the result. This steals the 'dtype' reference. */
- ret = (PyArrayObject *)PyArray_NewFromDescr(subtype,
- dtype,
- ndim,
- shape,
- strides,
- NULL,
- 0,
- NULL);
- if (ret == NULL) {
- return NULL;
+ /* Get the resulting dtype from combining all the arrays */
+ PyArray_Descr *dtype = PyArray_ResultType(narrays, arrays, 0, NULL);
+ if (dtype == NULL) {
+ return NULL;
+ }
+
+ /*
+ * Figure out the permutation to apply to the strides to match
+ * the memory layout of the input arrays, using ambiguity
+ * resolution rules matching that of the NpyIter.
+ */
+ PyArray_CreateMultiSortedStridePerm(narrays, arrays, ndim, strideperm);
+ s = dtype->elsize;
+ for (idim = ndim-1; idim >= 0; --idim) {
+ int iperm = strideperm[idim];
+ strides[iperm] = s;
+ s *= shape[iperm];
+ }
+
+ /* Allocate the array for the result. This steals the 'dtype' reference. */
+ ret = (PyArrayObject *)PyArray_NewFromDescr(subtype,
+ dtype,
+ ndim,
+ shape,
+ strides,
+ NULL,
+ 0,
+ NULL);
+ if (ret == NULL) {
+ return NULL;
+ }
}
/*
@@ -462,15 +491,10 @@ PyArray_ConcatenateArrays(int narrays, PyArrayObject **arrays, int axis)
*/
NPY_NO_EXPORT PyArrayObject *
PyArray_ConcatenateFlattenedArrays(int narrays, PyArrayObject **arrays,
- NPY_ORDER order)
+ NPY_ORDER order, PyArrayObject *ret)
{
- PyTypeObject *subtype = &PyArray_Type;
- double priority = NPY_PRIORITY;
int iarrays;
- npy_intp stride;
npy_intp shape = 0;
- PyArray_Descr *dtype = NULL;
- PyArrayObject *ret = NULL;
PyArrayObject_fields *sliding_view = NULL;
if (narrays <= 0) {
@@ -494,36 +518,45 @@ PyArray_ConcatenateFlattenedArrays(int narrays, PyArrayObject **arrays,
}
}
- /* Get the priority subtype for the array */
- for (iarrays = 0; iarrays < narrays; ++iarrays) {
- if (Py_TYPE(arrays[iarrays]) != subtype) {
- double pr = PyArray_GetPriority((PyObject *)(arrays[iarrays]), 0.0);
- if (pr > priority) {
- priority = pr;
- subtype = Py_TYPE(arrays[iarrays]);
- }
+ if (ret != NULL) {
+ if (PyArray_NDIM(ret) != 1) {
+ PyErr_SetString(PyExc_ValueError,
+ "Output array must be 1D");
+ return NULL;
+ }
+ if (shape != PyArray_SIZE(ret)) {
+ PyErr_SetString(PyExc_ValueError,
+ "Output array is the wrong size");
+ return NULL;
}
+ Py_INCREF(ret);
}
+ else {
+ npy_intp stride;
- /* Get the resulting dtype from combining all the arrays */
- dtype = PyArray_ResultType(narrays, arrays, 0, NULL);
- if (dtype == NULL) {
- return NULL;
- }
+ /* Get the priority subtype for the array */
+ PyTypeObject *subtype = PyArray_GetSubType(narrays, arrays);
- stride = dtype->elsize;
+ /* Get the resulting dtype from combining all the arrays */
+ PyArray_Descr *dtype = PyArray_ResultType(narrays, arrays, 0, NULL);
+ if (dtype == NULL) {
+ return NULL;
+ }
- /* Allocate the array for the result. This steals the 'dtype' reference. */
- ret = (PyArrayObject *)PyArray_NewFromDescr(subtype,
- dtype,
- 1,
- &shape,
- &stride,
- NULL,
- 0,
- NULL);
- if (ret == NULL) {
- return NULL;
+ stride = dtype->elsize;
+
+ /* Allocate the array for the result. This steals the 'dtype' reference. */
+ ret = (PyArrayObject *)PyArray_NewFromDescr(subtype,
+ dtype,
+ 1,
+ &shape,
+ &stride,
+ NULL,
+ 0,
+ NULL);
+ if (ret == NULL) {
+ return NULL;
+ }
}
/*
@@ -558,22 +591,11 @@ PyArray_ConcatenateFlattenedArrays(int narrays, PyArrayObject **arrays,
return ret;
}
-
-/*NUMPY_API
- * Concatenate
- *
- * Concatenate an arbitrary Python sequence into an array.
- * op is a python object supporting the sequence interface.
- * Its elements will be concatenated together to form a single
- * multidimensional array. If axis is NPY_MAXDIMS or bigger, then
- * each sequence object will be flattened before concatenation
-*/
NPY_NO_EXPORT PyObject *
-PyArray_Concatenate(PyObject *op, int axis)
+PyArray_ConcatenateInto(PyObject *op, int axis, PyArrayObject *ret)
{
int iarrays, narrays;
PyArrayObject **arrays;
- PyArrayObject *ret;
if (!PySequence_Check(op)) {
PyErr_SetString(PyExc_TypeError,
@@ -606,10 +628,10 @@ PyArray_Concatenate(PyObject *op, int axis)
}
if (axis >= NPY_MAXDIMS) {
- ret = PyArray_ConcatenateFlattenedArrays(narrays, arrays, NPY_CORDER);
+ ret = PyArray_ConcatenateFlattenedArrays(narrays, arrays, NPY_CORDER, ret);
}
else {
- ret = PyArray_ConcatenateArrays(narrays, arrays, axis);
+ ret = PyArray_ConcatenateArrays(narrays, arrays, axis, ret);
}
for (iarrays = 0; iarrays < narrays; ++iarrays) {
@@ -629,6 +651,21 @@ fail:
return NULL;
}
+/*NUMPY_API
+ * Concatenate
+ *
+ * Concatenate an arbitrary Python sequence into an array.
+ * op is a python object supporting the sequence interface.
+ * Its elements will be concatenated together to form a single
+ * multidimensional array. If axis is NPY_MAXDIMS or bigger, then
+ * each sequence object will be flattened before concatenation
+*/
+NPY_NO_EXPORT PyObject *
+PyArray_Concatenate(PyObject *op, int axis)
+{
+ return PyArray_ConcatenateInto(op, axis, NULL);
+}
+
static int
_signbit_set(PyArrayObject *arr)
{
@@ -1418,29 +1455,34 @@ array_putmask(PyObject *NPY_UNUSED(module), PyObject *args, PyObject *kwds)
/*
* Compare the field dictionaries for two types.
*
- * Return 1 if the contents are the same, 0 if not.
+ * Return 1 if the field types and field names of the two descrs are equal and
+ * in the same order, 0 if not.
*/
static int
-_equivalent_fields(PyObject *field1, PyObject *field2) {
+_equivalent_fields(PyArray_Descr *type1, PyArray_Descr *type2) {
- int same, val;
+ int val;
- if (field1 == field2) {
+ if (type1->fields == type2->fields && type1->names == type2->names) {
return 1;
}
- if (field1 == NULL || field2 == NULL) {
+ if (type1->fields == NULL || type2->fields == NULL) {
return 0;
}
- val = PyObject_RichCompareBool(field1, field2, Py_EQ);
+ val = PyObject_RichCompareBool(type1->fields, type2->fields, Py_EQ);
if (val != 1 || PyErr_Occurred()) {
- same = 0;
+ PyErr_Clear();
+ return 0;
}
- else {
- same = 1;
+
+ val = PyObject_RichCompareBool(type1->names, type2->names, Py_EQ);
+ if (val != 1 || PyErr_Occurred()) {
+ PyErr_Clear();
+ return 0;
}
- PyErr_Clear();
- return same;
+
+ return 1;
}
/*
@@ -1499,10 +1541,8 @@ PyArray_EquivTypes(PyArray_Descr *type1, PyArray_Descr *type2)
return ((type_num1 == type_num2)
&& _equivalent_subarrays(type1->subarray, type2->subarray));
}
- if (type_num1 == NPY_VOID
- || type_num2 == NPY_VOID) {
- return ((type_num1 == type_num2)
- && _equivalent_fields(type1->fields, type2->fields));
+ if (type_num1 == NPY_VOID || type_num2 == NPY_VOID) {
+ return ((type_num1 == type_num2) && _equivalent_fields(type1, type2));
}
if (type_num1 == NPY_DATETIME
|| type_num1 == NPY_TIMEDELTA
@@ -1662,7 +1702,7 @@ _array_fromobject(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kws)
ndmin_obj = PyDict_GetItem(kws, npy_ma_str_ndmin);
if (ndmin_obj) {
ndmin = PyLong_AsLong(ndmin_obj);
- if (ndmin == -1 && PyErr_Occurred()) {
+ if (error_converting(ndmin)) {
goto clean_type;
}
else if (ndmin > NPY_MAXDIMS) {
@@ -1853,12 +1893,12 @@ array_empty(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kwds)
ret = (PyArrayObject *)PyArray_Empty(shape.len, shape.ptr,
typecode, is_f_order);
- PyDimMem_FREE(shape.ptr);
+ npy_free_cache_dim_obj(shape);
return (PyObject *)ret;
fail:
Py_XDECREF(typecode);
- PyDimMem_FREE(shape.ptr);
+ npy_free_cache_dim_obj(shape);
return NULL;
}
@@ -2007,12 +2047,12 @@ array_zeros(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kwds)
ret = (PyArrayObject *)PyArray_Zeros(shape.len, shape.ptr,
typecode, (int) is_f_order);
- PyDimMem_FREE(shape.ptr);
+ npy_free_cache_dim_obj(shape);
return (PyObject *)ret;
fail:
Py_XDECREF(typecode);
- PyDimMem_FREE(shape.ptr);
+ npy_free_cache_dim_obj(shape);
return (PyObject *)ret;
}
@@ -2156,14 +2196,24 @@ static PyObject *
array_concatenate(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *kwds)
{
PyObject *a0;
+ PyObject *out = NULL;
int axis = 0;
- static char *kwlist[] = {"seq", "axis", NULL};
+ static char *kwlist[] = {"seq", "axis", "out", NULL};
- if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|O&:concatenate", kwlist,
- &a0, PyArray_AxisConverter, &axis)) {
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|O&O:concatenate", kwlist,
+ &a0, PyArray_AxisConverter, &axis, &out)) {
return NULL;
}
- return PyArray_Concatenate(a0, axis);
+ if (out != NULL) {
+ if (out == Py_None) {
+ out = NULL;
+ }
+ else if (!PyArray_Check(out)) {
+ PyErr_SetString(PyExc_TypeError, "'out' must be an array");
+ return NULL;
+ }
+ }
+ return PyArray_ConcatenateInto(a0, axis, (PyArrayObject *)out);
}
static PyObject *
@@ -2947,7 +2997,7 @@ array__reconstruct(PyObject *NPY_UNUSED(dummy), PyObject *args)
}
ret = PyArray_NewFromDescr(subtype, dtype,
(int)shape.len, shape.ptr, NULL, NULL, 0, NULL);
- PyDimMem_FREE(shape.ptr);
+ npy_free_cache_dim_obj(shape);
evil_global_disable_warn_O4O8_flag = 0;
@@ -2957,7 +3007,7 @@ fail:
evil_global_disable_warn_O4O8_flag = 0;
Py_XDECREF(dtype);
- PyDimMem_FREE(shape.ptr);
+ npy_free_cache_dim_obj(shape);
return NULL;
}
@@ -3224,7 +3274,7 @@ array_can_cast_safely(PyObject *NPY_UNUSED(self), PyObject *args,
npy_bool ret;
PyObject *retobj = NULL;
NPY_CASTING casting = NPY_SAFE_CASTING;
- static char *kwlist[] = {"from", "to", "casting", NULL};
+ static char *kwlist[] = {"from_", "to", "casting", NULL};
if(!PyArg_ParseTupleAndKeywords(args, kwds, "OO&|O&:can_cast", kwlist,
&from_obj,
@@ -4607,15 +4657,13 @@ PyMODINIT_FUNC initmultiarray(void) {
if (PyType_Ready(&NpyBusDayCalendar_Type) < 0) {
return RETVAL;
}
-/* FIXME
- * There is no error handling here
- */
+
c_api = NpyCapsule_FromVoidPtr((void *)PyArray_API, NULL);
- PyDict_SetItemString(d, "_ARRAY_API", c_api);
- Py_DECREF(c_api);
- if (PyErr_Occurred()) {
+ if (c_api == NULL) {
goto err;
}
+ PyDict_SetItemString(d, "_ARRAY_API", c_api);
+ Py_DECREF(c_api);
/*
* PyExc_Exception should catch all the standard errors that are
@@ -4633,10 +4681,10 @@ PyMODINIT_FUNC initmultiarray(void) {
PyDict_SetItemString(d, "__version__", s);
Py_DECREF(s);
-/* FIXME
- * There is no error handling here
- */
s = NpyCapsule_FromVoidPtr((void *)_datetime_strings, NULL);
+ if (s == NULL) {
+ goto err;
+ }
PyDict_SetItemString(d, "DATETIMEUNITS", s);
Py_DECREF(s);
@@ -4666,23 +4714,15 @@ PyMODINIT_FUNC initmultiarray(void) {
ADDCONST(MAY_SHARE_EXACT);
#undef ADDCONST
- Py_INCREF(&PyArray_Type);
PyDict_SetItemString(d, "ndarray", (PyObject *)&PyArray_Type);
- Py_INCREF(&PyArrayIter_Type);
PyDict_SetItemString(d, "flatiter", (PyObject *)&PyArrayIter_Type);
- Py_INCREF(&PyArrayMultiIter_Type);
PyDict_SetItemString(d, "nditer", (PyObject *)&NpyIter_Type);
- Py_INCREF(&NpyIter_Type);
PyDict_SetItemString(d, "broadcast",
(PyObject *)&PyArrayMultiIter_Type);
- Py_INCREF(&PyArrayDescr_Type);
PyDict_SetItemString(d, "dtype", (PyObject *)&PyArrayDescr_Type);
-
- Py_INCREF(&PyArrayFlags_Type);
PyDict_SetItemString(d, "flagsobj", (PyObject *)&PyArrayFlags_Type);
/* Business day calendar object */
- Py_INCREF(&NpyBusDayCalendar_Type);
PyDict_SetItemString(d, "busdaycalendar",
(PyObject *)&NpyBusDayCalendar_Type);
set_flaginfo(d);
diff --git a/numpy/core/src/multiarray/nditer_pywrap.c b/numpy/core/src/multiarray/nditer_pywrap.c
index 9661ed12b..1af396821 100644
--- a/numpy/core/src/multiarray/nditer_pywrap.c
+++ b/numpy/core/src/multiarray/nditer_pywrap.c
@@ -15,6 +15,8 @@
#include <numpy/arrayobject.h>
#include "npy_config.h"
#include "npy_pycompat.h"
+#include "alloc.h"
+#include "common.h"
typedef struct NewNpyArrayIterObject_tag NewNpyArrayIterObject;
@@ -758,7 +760,7 @@ npyiter_init(NewNpyArrayIterObject *self, PyObject *args, PyObject *kwds)
&op_axes_in,
PyArray_IntpConverter, &itershape,
&buffersize)) {
- PyDimMem_FREE(itershape.ptr);
+ npy_free_cache_dim_obj(itershape);
return -1;
}
@@ -804,7 +806,7 @@ npyiter_init(NewNpyArrayIterObject *self, PyObject *args, PyObject *kwds)
}
}
else if (itershape.ptr != NULL) {
- PyDimMem_FREE(itershape.ptr);
+ npy_free_cache_dim_obj(itershape);
itershape.ptr = NULL;
}
@@ -832,7 +834,7 @@ npyiter_init(NewNpyArrayIterObject *self, PyObject *args, PyObject *kwds)
self->finished = 0;
}
- PyDimMem_FREE(itershape.ptr);
+ npy_free_cache_dim_obj(itershape);
/* Release the references we got to the ops and dtypes */
for (iop = 0; iop < nop; ++iop) {
@@ -843,7 +845,7 @@ npyiter_init(NewNpyArrayIterObject *self, PyObject *args, PyObject *kwds)
return 0;
fail:
- PyDimMem_FREE(itershape.ptr);
+ npy_free_cache_dim_obj(itershape);
for (iop = 0; iop < nop; ++iop) {
Py_XDECREF(op[iop]);
Py_XDECREF(op_request_dtypes[iop]);
@@ -1618,7 +1620,7 @@ npyiter_multi_index_set(NewNpyArrayIterObject *self, PyObject *value)
for (idim = 0; idim < ndim; ++idim) {
PyObject *v = PySequence_GetItem(value, idim);
multi_index[idim] = PyInt_AsLong(v);
- if (multi_index[idim]==-1 && PyErr_Occurred()) {
+ if (error_converting(multi_index[idim])) {
Py_XDECREF(v);
return -1;
}
@@ -1678,7 +1680,7 @@ static int npyiter_index_set(NewNpyArrayIterObject *self, PyObject *value)
if (NpyIter_HasIndex(self->iter)) {
npy_intp ind;
ind = PyInt_AsLong(value);
- if (ind==-1 && PyErr_Occurred()) {
+ if (error_converting(ind)) {
return -1;
}
if (NpyIter_GotoIndex(self->iter, ind) != NPY_SUCCEED) {
@@ -1728,7 +1730,7 @@ static int npyiter_iterindex_set(NewNpyArrayIterObject *self, PyObject *value)
}
iterindex = PyInt_AsLong(value);
- if (iterindex==-1 && PyErr_Occurred()) {
+ if (error_converting(iterindex)) {
return -1;
}
if (NpyIter_GotoIterIndex(self->iter, iterindex) != NPY_SUCCEED) {
@@ -2256,7 +2258,7 @@ npyiter_subscript(NewNpyArrayIterObject *self, PyObject *op)
if (PyInt_Check(op) || PyLong_Check(op) ||
(PyIndex_Check(op) && !PySequence_Check(op))) {
npy_intp i = PyArray_PyIntAsIntp(op);
- if (i == -1 && PyErr_Occurred()) {
+ if (error_converting(i)) {
return NULL;
}
return npyiter_seq_item(self, i);
@@ -2305,7 +2307,7 @@ npyiter_ass_subscript(NewNpyArrayIterObject *self, PyObject *op,
if (PyInt_Check(op) || PyLong_Check(op) ||
(PyIndex_Check(op) && !PySequence_Check(op))) {
npy_intp i = PyArray_PyIntAsIntp(op);
- if (i == -1 && PyErr_Occurred()) {
+ if (error_converting(i)) {
return -1;
}
return npyiter_seq_ass_item(self, i, value);
diff --git a/numpy/core/src/multiarray/number.c b/numpy/core/src/multiarray/number.c
index 1f5523b90..8d1e1a24c 100644
--- a/numpy/core/src/multiarray/number.c
+++ b/numpy/core/src/multiarray/number.c
@@ -91,6 +91,7 @@ PyArray_SetNumericOps(PyObject *dict)
SET(sqrt);
SET(cbrt);
SET(negative);
+ SET(positive);
SET(absolute);
SET(invert);
SET(left_shift);
@@ -143,6 +144,7 @@ PyArray_GetNumericOps(void)
GET(_ones_like);
GET(sqrt);
GET(negative);
+ GET(positive);
GET(absolute);
GET(invert);
GET(left_shift);
@@ -443,7 +445,7 @@ is_scalar_with_conversion(PyObject *o2, double* out_exponent)
return NPY_NOSCALAR;
}
val = PyInt_AsSsize_t(value);
- if (val == -1 && PyErr_Occurred()) {
+ if (error_converting(val)) {
PyErr_Clear();
return NPY_NOSCALAR;
}
@@ -453,9 +455,14 @@ is_scalar_with_conversion(PyObject *o2, double* out_exponent)
return NPY_NOSCALAR;
}
-/* optimize float array or complex array to a scalar power */
-static PyObject *
-fast_scalar_power(PyArrayObject *a1, PyObject *o2, int inplace)
+/*
+ * optimize float array or complex array to a scalar power
+ * returns 0 on success, -1 if no optimization is possible
+ * the result is in value (can be NULL if an error occurred)
+ */
+static int
+fast_scalar_power(PyArrayObject *a1, PyObject *o2, int inplace,
+ PyObject **value)
{
double exponent;
NPY_SCALARKIND kind; /* NPY_NOSCALAR is not scalar */
@@ -464,17 +471,7 @@ fast_scalar_power(PyArrayObject *a1, PyObject *o2, int inplace)
PyObject *fastop = NULL;
if (PyArray_ISFLOAT(a1) || PyArray_ISCOMPLEX(a1)) {
if (exponent == 1.0) {
- /* we have to do this one special, as the
- "copy" method of array objects isn't set
- up early enough to be added
- by PyArray_SetNumericOps.
- */
- if (inplace) {
- Py_INCREF(a1);
- return (PyObject *)a1;
- } else {
- return PyArray_Copy(a1);
- }
+ fastop = n_ops.positive;
}
else if (exponent == -1.0) {
fastop = n_ops.reciprocal;
@@ -489,15 +486,16 @@ fast_scalar_power(PyArrayObject *a1, PyObject *o2, int inplace)
fastop = n_ops.square;
}
else {
- return NULL;
+ return -1;
}
if (inplace || can_elide_temp_unary(a1)) {
- return PyArray_GenericInplaceUnaryFunction(a1, fastop);
+ *value = PyArray_GenericInplaceUnaryFunction(a1, fastop);
}
else {
- return PyArray_GenericUnaryFunction(a1, fastop);
+ *value = PyArray_GenericUnaryFunction(a1, fastop);
}
+ return 0;
}
/* Because this is called with all arrays, we need to
* change the output if the kind of the scalar is different
@@ -507,36 +505,35 @@ fast_scalar_power(PyArrayObject *a1, PyObject *o2, int inplace)
else if (exponent == 2.0) {
fastop = n_ops.square;
if (inplace) {
- return PyArray_GenericInplaceUnaryFunction(a1, fastop);
+ *value = PyArray_GenericInplaceUnaryFunction(a1, fastop);
}
else {
/* We only special-case the FLOAT_SCALAR and integer types */
if (kind == NPY_FLOAT_SCALAR && PyArray_ISINTEGER(a1)) {
- PyObject *res;
PyArray_Descr *dtype = PyArray_DescrFromType(NPY_DOUBLE);
a1 = (PyArrayObject *)PyArray_CastToType(a1, dtype,
PyArray_ISFORTRAN(a1));
- if (a1 == NULL) {
- return NULL;
+ if (a1 != NULL) {
+ /* cast always creates a new array */
+ *value = PyArray_GenericInplaceUnaryFunction(a1, fastop);
+ Py_DECREF(a1);
}
- /* cast always creates a new array */
- res = PyArray_GenericInplaceUnaryFunction(a1, fastop);
- Py_DECREF(a1);
- return res;
}
else {
- return PyArray_GenericUnaryFunction(a1, fastop);
+ *value = PyArray_GenericUnaryFunction(a1, fastop);
}
}
+ return 0;
}
}
- return NULL;
+ /* no fast operation found */
+ return -1;
}
static PyObject *
array_power(PyArrayObject *a1, PyObject *o2, PyObject *modulo)
{
- PyObject *value;
+ PyObject *value = NULL;
if (modulo != Py_None) {
/* modular exponentiation is not implemented (gh-8804) */
@@ -545,8 +542,7 @@ array_power(PyArrayObject *a1, PyObject *o2, PyObject *modulo)
}
BINOP_GIVE_UP_IF_NEEDED(a1, o2, nb_power, array_power);
- value = fast_scalar_power(a1, o2, 0);
- if (!value) {
+ if (fast_scalar_power(a1, o2, 0, &value) != 0) {
value = PyArray_GenericBinaryFunction(a1, o2, n_ops.power);
}
return value;
@@ -565,7 +561,7 @@ array_negative(PyArrayObject *m1)
static PyObject *
array_absolute(PyArrayObject *m1)
{
- if (can_elide_temp_unary(m1)) {
+ if (can_elide_temp_unary(m1) && !PyArray_ISCOMPLEX(m1)) {
return PyArray_GenericInplaceUnaryFunction(m1, n_ops.absolute);
}
return PyArray_GenericUnaryFunction(m1, n_ops.absolute);
@@ -686,12 +682,11 @@ static PyObject *
array_inplace_power(PyArrayObject *a1, PyObject *o2, PyObject *NPY_UNUSED(modulo))
{
/* modulo is ignored! */
- PyObject *value;
+ PyObject *value = NULL;
INPLACE_GIVE_UP_IF_NEEDED(
a1, o2, nb_inplace_power, array_inplace_power);
- value = fast_scalar_power(a1, o2, 1);
- if (!value) {
+ if (fast_scalar_power(a1, o2, 1, &value) != 0) {
value = PyArray_GenericInplaceBinaryFunction(a1, o2, n_ops.power);
}
return value;
diff --git a/numpy/core/src/multiarray/number.h b/numpy/core/src/multiarray/number.h
index 113fc2475..99a2a722b 100644
--- a/numpy/core/src/multiarray/number.h
+++ b/numpy/core/src/multiarray/number.h
@@ -15,6 +15,7 @@ typedef struct {
PyObject *sqrt;
PyObject *cbrt;
PyObject *negative;
+ PyObject *positive;
PyObject *absolute;
PyObject *invert;
PyObject *left_shift;
diff --git a/numpy/core/src/multiarray/scalarapi.c b/numpy/core/src/multiarray/scalarapi.c
index 85824f2ce..0cb6b072d 100644
--- a/numpy/core/src/multiarray/scalarapi.c
+++ b/numpy/core/src/multiarray/scalarapi.c
@@ -415,7 +415,7 @@ PyArray_ScalarFromObject(PyObject *object)
else if (PyLong_Check(object)) {
npy_longlong val;
val = PyLong_AsLongLong(object);
- if (val==-1 && PyErr_Occurred()) {
+ if (error_converting(val)) {
PyErr_Clear();
return NULL;
}
diff --git a/numpy/core/src/multiarray/scalartypes.c.src b/numpy/core/src/multiarray/scalartypes.c.src
index f6bd5f5a7..7a6ed6a86 100644
--- a/numpy/core/src/multiarray/scalartypes.c.src
+++ b/numpy/core/src/multiarray/scalartypes.c.src
@@ -24,6 +24,7 @@
#include "scalartypes.h"
#include "_datetime.h"
#include "datetime_strings.h"
+#include "alloc.h"
#include <stdlib.h>
@@ -194,9 +195,21 @@ gentype_generic_method(PyObject *self, PyObject *args, PyObject *kwds,
}
}
+static PyObject *
+gentype_add(PyObject *m1, PyObject* m2)
+{
+ /* special case str.__radd__, which should not call array_add */
+ if (PyString_Check(m1) || PyUnicode_Check(m1)) {
+ Py_INCREF(Py_NotImplemented);
+ return Py_NotImplemented;
+ }
+ BINOP_GIVE_UP_IF_NEEDED(m1, m2, nb_add, gentype_add);
+ return PyArray_Type.tp_as_number->nb_add(m1, m2);
+}
+
/**begin repeat
*
- * #name = add, subtract, remainder, divmod, lshift, rshift,
+ * #name = subtract, remainder, divmod, lshift, rshift,
* and, xor, or, floor_divide, true_divide#
*/
static PyObject *
@@ -243,7 +256,7 @@ gentype_multiply(PyObject *m1, PyObject *m2)
(Py_TYPE(m1)->tp_as_number->nb_multiply == NULL))) {
/* Try to convert m2 to an int and try sequence repeat */
repeat = PyArray_PyIntAsIntp(m2);
- if (repeat == -1 && PyErr_Occurred()) {
+ if (error_converting(repeat)) {
return NULL;
}
/* Note that npy_intp is compatible to Py_Ssize_t */
@@ -256,7 +269,7 @@ gentype_multiply(PyObject *m1, PyObject *m2)
(Py_TYPE(m2)->tp_as_number->nb_multiply == NULL))) {
/* Try to convert m1 to an int and try sequence repeat */
repeat = PyArray_PyIntAsIntp(m1);
- if (repeat == -1 && PyErr_Occurred()) {
+ if (error_converting(repeat)) {
return NULL;
}
return PySequence_Repeat(m2, repeat);
@@ -338,7 +351,6 @@ gentype_str(PyObject *self)
return ret;
}
-
static PyObject *
gentype_repr(PyObject *self)
{
@@ -353,6 +365,20 @@ gentype_repr(PyObject *self)
return ret;
}
+static PyObject *
+genint_type_str(PyObject *self)
+{
+ PyObject *item, *item_str;
+ item = gentype_generic_method(self, NULL, NULL, "item");
+ if (item == NULL) {
+ return NULL;
+ }
+
+ item_str = PyObject_Str(item);
+ Py_DECREF(item);
+ return item_str;
+}
+
/*
* The __format__ method for PEP 3101.
*/
@@ -1343,10 +1369,9 @@ gentype_imag_get(PyObject *self)
int elsize;
typecode = PyArray_DescrFromScalar(self);
elsize = typecode->elsize;
- temp = PyDataMem_NEW(elsize);
- memset(temp, '\0', elsize);
+ temp = npy_alloc_cache_zero(elsize);
ret = PyArray_Scalar(temp, typecode, NULL);
- PyDataMem_FREE(temp);
+ npy_free_cache(temp, elsize);
}
Py_XDECREF(typecode);
@@ -1516,9 +1541,9 @@ gentype_wraparray(PyObject *NPY_UNUSED(scalar), PyObject *args)
*/
/**begin repeat
*
- * #name = tolist, item, tostring, tobytes, astype, copy, __deepcopy__,
- * searchsorted, view, swapaxes, conj, conjugate, nonzero, flatten,
- * ravel, fill, transpose, newbyteorder#
+ * #name = tolist, item, __deepcopy__, __copy__,
+ * swapaxes, conj, conjugate, nonzero,
+ * fill, transpose, newbyteorder#
*/
static PyObject *
gentype_@name@(PyObject *self, PyObject *args)
@@ -1548,11 +1573,13 @@ static Py_ssize_t
gentype_getreadbuf(PyObject *, Py_ssize_t, void **);
static PyObject *
-gentype_byteswap(PyObject *self, PyObject *args)
+gentype_byteswap(PyObject *self, PyObject *args, PyObject *kwds)
{
npy_bool inplace = NPY_FALSE;
+ static char *kwlist[] = {"inplace", NULL};
- if (!PyArg_ParseTuple(args, "|O&:byteswap", PyArray_BoolConverter, &inplace)) {
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "|O&:byteswap", kwlist,
+ PyArray_BoolConverter, &inplace)) {
return NULL;
}
if (inplace) {
@@ -1593,8 +1620,9 @@ gentype_byteswap(PyObject *self, PyObject *args)
*
* #name = take, getfield, put, repeat, tofile, mean, trace, diagonal, clip,
* std, var, sum, cumsum, prod, cumprod, compress, sort, argsort,
- * round, argmax, argmin, max, min, ptp, any, all, resize, reshape,
- * choose#
+ * round, argmax, argmin, max, min, ptp, any, all, astype, resize,
+ * reshape, choose, tostring, tobytes, copy, searchsorted, view,
+ * flatten, ravel#
*/
static PyObject *
gentype_@name@(PyObject *self, PyObject *args, PyObject *kwds)
@@ -1628,7 +1656,7 @@ voidtype_setfield(PyVoidScalarObject *self, PyObject *args, PyObject *kwds)
* However, as a special case, void-scalar assignment broadcasts
* differently from ndarrays when assigning to an object field: Assignment
* to an ndarray object field broadcasts, but assignment to a void-scalar
- * object-field should not, in order to allow nested ndarrays.
+ * object-field should not, in order to allow nested ndarrays.
* These lines should then behave identically:
*
* b = np.zeros(1, dtype=[('x', 'O')])
@@ -1858,19 +1886,19 @@ static PyMethodDef gentype_methods[] = {
METH_VARARGS, NULL},
{"tobytes",
(PyCFunction)gentype_tobytes,
- METH_VARARGS, NULL},
+ METH_VARARGS | METH_KEYWORDS, NULL},
{"tofile",
(PyCFunction)gentype_tofile,
METH_VARARGS | METH_KEYWORDS, NULL},
{"tostring",
(PyCFunction)gentype_tostring,
- METH_VARARGS, NULL},
+ METH_VARARGS | METH_KEYWORDS, NULL},
{"byteswap",
(PyCFunction)gentype_byteswap,
- METH_VARARGS, NULL},
+ METH_VARARGS | METH_KEYWORDS, NULL},
{"astype",
(PyCFunction)gentype_astype,
- METH_VARARGS, NULL},
+ METH_VARARGS | METH_KEYWORDS, NULL},
{"getfield",
(PyCFunction)gentype_getfield,
METH_VARARGS | METH_KEYWORDS, NULL},
@@ -1879,7 +1907,7 @@ static PyMethodDef gentype_methods[] = {
METH_VARARGS | METH_KEYWORDS, NULL},
{"copy",
(PyCFunction)gentype_copy,
- METH_VARARGS, NULL},
+ METH_VARARGS | METH_KEYWORDS, NULL},
{"resize",
(PyCFunction)gentype_resize,
METH_VARARGS | METH_KEYWORDS, NULL},
@@ -1897,7 +1925,7 @@ static PyMethodDef gentype_methods[] = {
/* for the copy module */
{"__copy__",
- (PyCFunction)gentype_copy,
+ (PyCFunction)gentype___copy__,
METH_VARARGS, NULL},
{"__deepcopy__",
(PyCFunction)gentype___deepcopy__,
@@ -1945,7 +1973,7 @@ static PyMethodDef gentype_methods[] = {
METH_VARARGS | METH_KEYWORDS, NULL},
{"searchsorted",
(PyCFunction)gentype_searchsorted,
- METH_VARARGS, NULL},
+ METH_VARARGS | METH_KEYWORDS, NULL},
{"argmax",
(PyCFunction)gentype_argmax,
METH_VARARGS | METH_KEYWORDS, NULL},
@@ -1960,7 +1988,7 @@ static PyMethodDef gentype_methods[] = {
METH_VARARGS, NULL},
{"view",
(PyCFunction)gentype_view,
- METH_VARARGS, NULL},
+ METH_VARARGS | METH_KEYWORDS, NULL},
{"swapaxes",
(PyCFunction)gentype_swapaxes,
METH_VARARGS, NULL},
@@ -2023,10 +2051,10 @@ static PyMethodDef gentype_methods[] = {
METH_VARARGS | METH_KEYWORDS, NULL},
{"flatten",
(PyCFunction)gentype_flatten,
- METH_VARARGS, NULL},
+ METH_VARARGS | METH_KEYWORDS, NULL},
{"ravel",
(PyCFunction)gentype_ravel,
- METH_VARARGS, NULL},
+ METH_VARARGS | METH_KEYWORDS, NULL},
{"round",
(PyCFunction)gentype_round,
METH_VARARGS | METH_KEYWORDS, NULL},
@@ -2151,35 +2179,31 @@ static PyObject *
voidtype_subscript(PyVoidScalarObject *self, PyObject *ind)
{
npy_intp n;
- PyObject *ret, *args;
+ PyObject *ret, *res;
- if (!(PyDataType_HASFIELDS(self->descr))) {
- PyErr_SetString(PyExc_IndexError,
- "can't index void scalar without fields");
- return NULL;
+ /* structured voids will accept an integer index */
+ if (PyDataType_HASFIELDS(self->descr)) {
+ n = PyArray_PyIntAsIntp(ind);
+ if (!error_converting(n)) {
+ return voidtype_item(self, (Py_ssize_t)n);
+ }
+ PyErr_Clear();
}
-#if defined(NPY_PY3K)
- if (PyUString_Check(ind)) {
-#else
- if (PyBytes_Check(ind) || PyUnicode_Check(ind)) {
-#endif
- args = Py_BuildValue("(O)", ind);
- ret = gentype_generic_method((PyObject *)self, args, NULL, "__getitem__");
- Py_DECREF(args);
- return ret;
- }
+ res = PyArray_FromScalar((PyObject*)self, NULL);
- /* try to convert it to a number */
- n = PyArray_PyIntAsIntp(ind);
- if (error_converting(n)) {
- goto fail;
+ /* ellipsis should return 0d array */
+ if(ind == Py_Ellipsis){
+ return res;
}
- return voidtype_item(self, (Py_ssize_t)n);
-fail:
- PyErr_SetString(PyExc_IndexError, "invalid index");
- return NULL;
+ /*
+ * other cases (field names, empty tuple) will return either
+ * scalar or non-0d array. Compute this using ndarray subscript.
+ */
+ ret = array_subscript((PyArrayObject *)res, ind);
+ Py_DECREF(res);
+ return PyArray_Return((PyArrayObject*)ret);
}
static int
@@ -2473,7 +2497,7 @@ static void
void_dealloc(PyVoidScalarObject *v)
{
if (v->flags & NPY_ARRAY_OWNDATA) {
- PyDataMem_FREE(v->obval);
+ npy_free_cache(v->obval, Py_SIZE(v));
}
Py_XDECREF(v->descr);
Py_XDECREF(v->base);
@@ -2889,9 +2913,7 @@ static PyObject *
void_arrtype_new(PyTypeObject *type, PyObject *args, PyObject *NPY_UNUSED(kwds))
{
PyObject *obj, *arr;
- npy_ulonglong memu = 1;
PyObject *new = NULL;
- char *destptr;
if (!PyArg_ParseTuple(args, "O:void", &obj)) {
return NULL;
@@ -2913,7 +2935,8 @@ void_arrtype_new(PyTypeObject *type, PyObject *args, PyObject *NPY_UNUSED(kwds))
}
if (new && PyLong_Check(new)) {
PyObject *ret;
- memu = PyLong_AsUnsignedLongLong(new);
+ char *destptr;
+ npy_ulonglong memu = PyLong_AsUnsignedLongLong(new);
Py_DECREF(new);
if (PyErr_Occurred() || (memu > NPY_MAX_INT)) {
PyErr_Clear();
@@ -2922,13 +2945,13 @@ void_arrtype_new(PyTypeObject *type, PyObject *args, PyObject *NPY_UNUSED(kwds))
(int) NPY_MAX_INT);
return NULL;
}
- destptr = PyDataMem_NEW((int) memu);
+ destptr = npy_alloc_cache_zero(memu);
if (destptr == NULL) {
return PyErr_NoMemory();
}
ret = type->tp_alloc(type, 0);
if (ret == NULL) {
- PyDataMem_FREE(destptr);
+ npy_free_cache(destptr, memu);
return PyErr_NoMemory();
}
((PyVoidScalarObject *)ret)->obval = destptr;
@@ -2939,7 +2962,6 @@ void_arrtype_new(PyTypeObject *type, PyObject *args, PyObject *NPY_UNUSED(kwds))
((PyVoidScalarObject *)ret)->flags = NPY_ARRAY_BEHAVED |
NPY_ARRAY_OWNDATA;
((PyVoidScalarObject *)ret)->base = NULL;
- memset(destptr, '\0', (size_t) memu);
return ret;
}
@@ -4183,6 +4205,19 @@ initialize_numeric_types(void)
/**end repeat**/
+
+ /**begin repeat
+ * #Type = Bool, Byte, UByte, Short, UShort, Int, UInt, Long,
+ * ULong, LongLong, ULongLong#
+ */
+
+ /* both str/repr use genint_type_str to avoid trailing "L" of longs */
+ Py@Type@ArrType_Type.tp_str = genint_type_str;
+ Py@Type@ArrType_Type.tp_repr = genint_type_str;
+
+ /**end repeat**/
+
+
PyHalfArrType_Type.tp_print = halftype_print;
PyFloatArrType_Type.tp_print = floattype_print;
PyDoubleArrType_Type.tp_print = doubletype_print;
diff --git a/numpy/core/src/multiarray/shape.c b/numpy/core/src/multiarray/shape.c
index b32b67146..07ab9b626 100644
--- a/numpy/core/src/multiarray/shape.c
+++ b/numpy/core/src/multiarray/shape.c
@@ -19,6 +19,7 @@
#include "templ_common.h" /* for npy_mul_with_overflow_intp */
#include "common.h" /* for convert_shape_to_string */
+#include "alloc.h"
static int
_fix_unknown_dimension(PyArray_Dims *newshape, PyArrayObject *arr);
@@ -145,26 +146,33 @@ PyArray_Resize(PyArrayObject *self, PyArray_Dims *newshape, int refcheck,
}
}
- if (PyArray_NDIM(self) != new_nd) {
- /* Different number of dimensions. */
- ((PyArrayObject_fields *)self)->nd = new_nd;
- /* Need new dimensions and strides arrays */
- dimptr = PyDimMem_RENEW(PyArray_DIMS(self), 3*new_nd);
- if (dimptr == NULL) {
- PyErr_SetString(PyExc_MemoryError,
- "cannot allocate memory for array");
- return NULL;
+ if (new_nd > 0) {
+ if (PyArray_NDIM(self) != new_nd) {
+ /* Different number of dimensions. */
+ ((PyArrayObject_fields *)self)->nd = new_nd;
+ /* Need new dimensions and strides arrays */
+ dimptr = PyDimMem_RENEW(PyArray_DIMS(self), 3*new_nd);
+ if (dimptr == NULL) {
+ PyErr_SetString(PyExc_MemoryError,
+ "cannot allocate memory for array");
+ return NULL;
+ }
+ ((PyArrayObject_fields *)self)->dimensions = dimptr;
+ ((PyArrayObject_fields *)self)->strides = dimptr + new_nd;
}
- ((PyArrayObject_fields *)self)->dimensions = dimptr;
- ((PyArrayObject_fields *)self)->strides = dimptr + new_nd;
+ /* make new_strides variable */
+ _array_fill_strides(new_strides, new_dimensions, new_nd,
+ PyArray_DESCR(self)->elsize, PyArray_FLAGS(self),
+ &(((PyArrayObject_fields *)self)->flags));
+ memmove(PyArray_DIMS(self), new_dimensions, new_nd*sizeof(npy_intp));
+ memmove(PyArray_STRIDES(self), new_strides, new_nd*sizeof(npy_intp));
+ }
+ else {
+ PyDimMem_FREE(((PyArrayObject_fields *)self)->dimensions);
+ ((PyArrayObject_fields *)self)->nd = 0;
+ ((PyArrayObject_fields *)self)->dimensions = NULL;
+ ((PyArrayObject_fields *)self)->strides = NULL;
}
-
- /* make new_strides variable */
- _array_fill_strides(
- new_strides, new_dimensions, new_nd, PyArray_DESCR(self)->elsize,
- PyArray_FLAGS(self), &(((PyArrayObject_fields *)self)->flags));
- memmove(PyArray_DIMS(self), new_dimensions, new_nd*sizeof(npy_intp));
- memmove(PyArray_STRIDES(self), new_strides, new_nd*sizeof(npy_intp));
Py_RETURN_NONE;
}
@@ -309,7 +317,7 @@ PyArray_Reshape(PyArrayObject *self, PyObject *shape)
return NULL;
}
ret = PyArray_Newshape(self, &newdims, NPY_CORDER);
- PyDimMem_FREE(newdims.ptr);
+ npy_free_cache_dim_obj(newdims);
return ret;
}
diff --git a/numpy/core/src/multiarray/strfuncs.c b/numpy/core/src/multiarray/strfuncs.c
new file mode 100644
index 000000000..5a0d20335
--- /dev/null
+++ b/numpy/core/src/multiarray/strfuncs.c
@@ -0,0 +1,200 @@
+#define NPY_NO_DEPRECATED_API NPY_API_VERSION
+#define _MULTIARRAYMODULE
+
+#include <Python.h>
+#include <numpy/arrayobject.h>
+
+#include "npy_pycompat.h"
+
+#include "strfuncs.h"
+
+static PyObject *PyArray_StrFunction = NULL;
+static PyObject *PyArray_ReprFunction = NULL;
+
+/*NUMPY_API
+ * Set the array print function to be a Python function.
+ */
+NPY_NO_EXPORT void
+PyArray_SetStringFunction(PyObject *op, int repr)
+{
+ if (repr) {
+ /* Dispose of previous callback */
+ Py_XDECREF(PyArray_ReprFunction);
+ /* Add a reference to new callback */
+ Py_XINCREF(op);
+ /* Remember new callback */
+ PyArray_ReprFunction = op;
+ }
+ else {
+ /* Dispose of previous callback */
+ Py_XDECREF(PyArray_StrFunction);
+ /* Add a reference to new callback */
+ Py_XINCREF(op);
+ /* Remember new callback */
+ PyArray_StrFunction = op;
+ }
+}
+
+
+/*
+ * Extend string. On failure, returns NULL and leaves *strp alone.
+ * XXX we do this in multiple places; time for a string library?
+ */
+static char *
+extend(char **strp, Py_ssize_t n, Py_ssize_t *maxp)
+{
+ char *str = *strp;
+ Py_ssize_t new_cap;
+
+ if (n >= *maxp - 16) {
+ new_cap = *maxp * 2;
+
+ if (new_cap <= *maxp) { /* overflow */
+ return NULL;
+ }
+ str = PyArray_realloc(*strp, new_cap);
+ if (str != NULL) {
+ *strp = str;
+ *maxp = new_cap;
+ }
+ }
+ return str;
+}
+
+
+static int
+dump_data(char **string, Py_ssize_t *n, Py_ssize_t *max_n, char *data, int nd,
+ npy_intp *dimensions, npy_intp *strides, PyArrayObject* self)
+{
+ PyArray_Descr *descr=PyArray_DESCR(self);
+ PyObject *op = NULL, *sp = NULL;
+ char *ostring;
+ npy_intp i, N, ret = 0;
+
+#define CHECK_MEMORY do { \
+ if (extend(string, *n, max_n) == NULL) { \
+ ret = -1; \
+ goto end; \
+ } \
+ } while (0)
+
+ if (nd == 0) {
+ if ((op = descr->f->getitem(data, self)) == NULL) {
+ return -1;
+ }
+ sp = PyObject_Repr(op);
+ if (sp == NULL) {
+ ret = -1;
+ goto end;
+ }
+ ostring = PyString_AsString(sp);
+ N = PyString_Size(sp)*sizeof(char);
+ *n += N;
+ CHECK_MEMORY;
+ memmove(*string + (*n - N), ostring, N);
+ }
+ else {
+ CHECK_MEMORY;
+ (*string)[*n] = '[';
+ *n += 1;
+ for (i = 0; i < dimensions[0]; i++) {
+ if (dump_data(string, n, max_n,
+ data + (*strides)*i,
+ nd - 1, dimensions + 1,
+ strides + 1, self) < 0) {
+ return -1;
+ }
+ CHECK_MEMORY;
+ if (i < dimensions[0] - 1) {
+ (*string)[*n] = ',';
+ (*string)[*n+1] = ' ';
+ *n += 2;
+ }
+ }
+ CHECK_MEMORY;
+ (*string)[*n] = ']';
+ *n += 1;
+ }
+
+#undef CHECK_MEMORY
+
+end:
+ Py_XDECREF(op);
+ Py_XDECREF(sp);
+ return ret;
+}
+
+
+static PyObject *
+array_repr_builtin(PyArrayObject *self, int repr)
+{
+ PyObject *ret;
+ char *string;
+ /* max_n initial value is arbitrary, dump_data will extend it */
+ Py_ssize_t n = 0, max_n = PyArray_NBYTES(self) * 4 + 7;
+
+ if ((string = PyArray_malloc(max_n)) == NULL) {
+ return PyErr_NoMemory();
+ }
+
+ if (dump_data(&string, &n, &max_n, PyArray_DATA(self),
+ PyArray_NDIM(self), PyArray_DIMS(self),
+ PyArray_STRIDES(self), self) < 0) {
+ PyArray_free(string);
+ return NULL;
+ }
+
+ if (repr) {
+ if (PyArray_ISEXTENDED(self)) {
+ ret = PyUString_FromFormat("array(%s, '%c%d')",
+ string,
+ PyArray_DESCR(self)->type,
+ PyArray_DESCR(self)->elsize);
+ }
+ else {
+ ret = PyUString_FromFormat("array(%s, '%c')",
+ string,
+ PyArray_DESCR(self)->type);
+ }
+ }
+ else {
+ ret = PyUString_FromStringAndSize(string, n);
+ }
+
+ PyArray_free(string);
+ return ret;
+}
+
+
+NPY_NO_EXPORT PyObject *
+array_repr(PyArrayObject *self)
+{
+ PyObject *s, *arglist;
+
+ if (PyArray_ReprFunction == NULL) {
+ s = array_repr_builtin(self, 1);
+ }
+ else {
+ arglist = Py_BuildValue("(O)", self);
+ s = PyEval_CallObject(PyArray_ReprFunction, arglist);
+ Py_DECREF(arglist);
+ }
+ return s;
+}
+
+
+NPY_NO_EXPORT PyObject *
+array_str(PyArrayObject *self)
+{
+ PyObject *s, *arglist;
+
+ if (PyArray_StrFunction == NULL) {
+ s = array_repr_builtin(self, 0);
+ }
+ else {
+ arglist = Py_BuildValue("(O)", self);
+ s = PyEval_CallObject(PyArray_StrFunction, arglist);
+ Py_DECREF(arglist);
+ }
+ return s;
+}
diff --git a/numpy/core/src/multiarray/strfuncs.h b/numpy/core/src/multiarray/strfuncs.h
new file mode 100644
index 000000000..8e80897c2
--- /dev/null
+++ b/numpy/core/src/multiarray/strfuncs.h
@@ -0,0 +1,13 @@
+#ifndef _NPY_ARRAY_STRFUNCS_H_
+#define _NPY_ARRAY_STRFUNCS_H_
+
+NPY_NO_EXPORT void
+PyArray_SetStringFunction(PyObject *op, int repr);
+
+NPY_NO_EXPORT PyObject *
+array_repr(PyArrayObject *self);
+
+NPY_NO_EXPORT PyObject *
+array_str(PyArrayObject *self);
+
+#endif
diff --git a/numpy/core/src/multiarray/temp_elide.c b/numpy/core/src/multiarray/temp_elide.c
index fae6763e4..3822f5d0d 100644
--- a/numpy/core/src/multiarray/temp_elide.c
+++ b/numpy/core/src/multiarray/temp_elide.c
@@ -283,8 +283,10 @@ can_elide_temp(PyArrayObject * alhs, PyObject * orhs, int * cannot)
* array of a basic type, own its data and size larger than threshold
*/
if (Py_REFCNT(alhs) != 1 || !PyArray_CheckExact(alhs) ||
- PyArray_DESCR(alhs)->type_num >= NPY_OBJECT ||
+ !PyArray_ISNUMBER(alhs) ||
!(PyArray_FLAGS(alhs) & NPY_ARRAY_OWNDATA) ||
+ !PyArray_ISWRITEABLE(alhs) ||
+ PyArray_CHKFLAGS(alhs, NPY_ARRAY_UPDATEIFCOPY) ||
PyArray_NBYTES(alhs) < NPY_MIN_ELIDE_BYTES) {
return 0;
}
@@ -359,8 +361,10 @@ can_elide_temp_unary(PyArrayObject * m1)
{
int cannot;
if (Py_REFCNT(m1) != 1 || !PyArray_CheckExact(m1) ||
- PyArray_DESCR(m1)->type_num == NPY_VOID ||
+ !PyArray_ISNUMBER(m1) ||
!(PyArray_FLAGS(m1) & NPY_ARRAY_OWNDATA) ||
+ !PyArray_ISWRITEABLE(m1) ||
+ PyArray_CHKFLAGS(m1, NPY_ARRAY_UPDATEIFCOPY) ||
PyArray_NBYTES(m1) < NPY_MIN_ELIDE_BYTES) {
return 0;
}
diff --git a/numpy/core/src/private/mem_overlap.c b/numpy/core/src/private/mem_overlap.c
index b2b80b4e6..2145791e1 100644
--- a/numpy/core/src/private/mem_overlap.c
+++ b/numpy/core/src/private/mem_overlap.c
@@ -181,9 +181,6 @@
All rights reserved.
Licensed under 3-clause BSD license, see LICENSE.txt.
*/
-#include <stdlib.h>
-#include <stdio.h>
-#include <assert.h>
#include <Python.h>
#define NPY_NO_DEPRECATED_API NPY_API_VERSION
@@ -191,6 +188,10 @@
#include "mem_overlap.h"
#include "npy_extint128.h"
+#include <stdlib.h>
+#include <stdio.h>
+#include <assert.h>
+
#define MAX(a, b) (((a) >= (b)) ? (a) : (b))
#define MIN(a, b) (((a) <= (b)) ? (a) : (b))
diff --git a/numpy/core/src/private/npy_config.h b/numpy/core/src/private/npy_config.h
index b8e18e961..107b3cb5b 100644
--- a/numpy/core/src/private/npy_config.h
+++ b/numpy/core/src/private/npy_config.h
@@ -62,6 +62,19 @@
#endif
+/* MSVC _hypot messes with fp precision mode on 32-bit, see gh-9567 */
+#if defined(_MSC_VER) && (_MSC_VER >= 1900) && !defined(_WIN64)
+
+#undef HAVE_CABS
+#undef HAVE_CABSF
+#undef HAVE_CABSL
+
+#undef HAVE_HYPOT
+#undef HAVE_HYPOTF
+#undef HAVE_HYPOTL
+
+#endif
+
/* Intel C for Windows uses POW for 64 bits longdouble*/
#if defined(_MSC_VER) && defined(__INTEL_COMPILER)
diff --git a/numpy/core/src/private/ufunc_override.c b/numpy/core/src/private/ufunc_override.c
index 401228236..e405155cf 100644
--- a/numpy/core/src/private/ufunc_override.c
+++ b/numpy/core/src/private/ufunc_override.c
@@ -56,9 +56,9 @@ get_non_default_array_ufunc(PyObject *obj)
/*
* Check whether a set of input and output args have a non-default
* `__array_ufunc__` method. Return the number of overrides, setting
- * corresponding objects in PyObject array with_override (if not NULL)
- * using borrowed references, and the corresponding __array_ufunc__ methods
- * in methods, using new references
+ * corresponding objects in PyObject array with_override and the corresponding
+ * __array_ufunc__ methods in methods (both only if not NULL, and both using
+ * new references).
*
* returns -1 on failure.
*/
@@ -134,6 +134,7 @@ PyUFunc_WithOverride(PyObject *args, PyObject *kwds,
goto fail;
}
if (with_override != NULL) {
+ Py_INCREF(obj);
with_override[num_override_args] = obj;
}
if (methods != NULL) {
diff --git a/numpy/core/src/umath/extobj.c b/numpy/core/src/umath/extobj.c
new file mode 100644
index 000000000..344981622
--- /dev/null
+++ b/numpy/core/src/umath/extobj.c
@@ -0,0 +1,318 @@
+#define _UMATHMODULE
+#define NPY_NO_DEPRECATED_API NPY_API_VERSION
+
+#include <Python.h>
+
+#include "npy_config.h"
+
+#define PY_ARRAY_UNIQUE_SYMBOL _npy_umathmodule_ARRAY_API
+#define NO_IMPORT_ARRAY
+
+#include "npy_pycompat.h"
+
+#include "extobj.h"
+#include "numpy/ufuncobject.h"
+
+#include "ufunc_object.h" /* for npy_um_str_pyvals_name */
+#include "common.h"
+
+#if USE_USE_DEFAULTS==1
+static int PyUFunc_NUM_NODEFAULTS = 0;
+
+/*
+ * This is a strategy to buy a little speed up and avoid the dictionary
+ * look-up in the default case. It should work in the presence of
+ * threads. If it is deemed too complicated or it doesn't actually work
+ * it could be taken out.
+ */
+NPY_NO_EXPORT int
+ufunc_update_use_defaults(void)
+{
+ PyObject *errobj = NULL;
+ int errmask, bufsize;
+ int res;
+
+ PyUFunc_NUM_NODEFAULTS += 1;
+ res = PyUFunc_GetPyValues("test", &bufsize, &errmask, &errobj);
+ PyUFunc_NUM_NODEFAULTS -= 1;
+ if (res < 0) {
+ Py_XDECREF(errobj);
+ return -1;
+ }
+ if ((errmask != UFUNC_ERR_DEFAULT) || (bufsize != NPY_BUFSIZE)
+ || (PyTuple_GET_ITEM(errobj, 1) != Py_None)) {
+ PyUFunc_NUM_NODEFAULTS += 1;
+ }
+ else if (PyUFunc_NUM_NODEFAULTS > 0) {
+ PyUFunc_NUM_NODEFAULTS -= 1;
+ }
+ Py_XDECREF(errobj);
+ return 0;
+}
+#endif
+
+/*
+ * fpstatus is the ufunc_formatted hardware status
+ * errmask is the handling mask specified by the user.
+ * errobj is a Python object with (string, callable object or None)
+ * or NULL
+ */
+
+/*
+ * 2. for each of the flags
+ * determine whether to ignore, warn, raise error, or call Python function.
+ * If ignore, do nothing
+ * If warn, print a warning and continue
+ * If raise return an error
+ * If call, call a user-defined function with string
+ */
+
+NPY_NO_EXPORT int
+_error_handler(int method, PyObject *errobj, char *errtype, int retstatus, int *first)
+{
+ PyObject *pyfunc, *ret, *args;
+ char *name = PyBytes_AS_STRING(PyTuple_GET_ITEM(errobj,0));
+ char msg[100];
+
+ NPY_ALLOW_C_API_DEF
+
+ /* don't need C API for a simple print */
+ if (method == UFUNC_ERR_PRINT) {
+ if (*first) {
+ fprintf(stderr, "Warning: %s encountered in %s\n", errtype, name);
+ *first = 0;
+ }
+ return 0;
+ }
+
+ NPY_ALLOW_C_API;
+ switch(method) {
+ case UFUNC_ERR_WARN:
+ PyOS_snprintf(msg, sizeof(msg), "%s encountered in %s", errtype, name);
+ if (PyErr_Warn(PyExc_RuntimeWarning, msg) < 0) {
+ goto fail;
+ }
+ break;
+ case UFUNC_ERR_RAISE:
+ PyErr_Format(PyExc_FloatingPointError, "%s encountered in %s",
+ errtype, name);
+ goto fail;
+ case UFUNC_ERR_CALL:
+ pyfunc = PyTuple_GET_ITEM(errobj, 1);
+ if (pyfunc == Py_None) {
+ PyErr_Format(PyExc_NameError,
+ "python callback specified for %s (in " \
+ " %s) but no function found.",
+ errtype, name);
+ goto fail;
+ }
+ args = Py_BuildValue("NN", PyUString_FromString(errtype),
+ PyInt_FromLong((long) retstatus));
+ if (args == NULL) {
+ goto fail;
+ }
+ ret = PyObject_CallObject(pyfunc, args);
+ Py_DECREF(args);
+ if (ret == NULL) {
+ goto fail;
+ }
+ Py_DECREF(ret);
+ break;
+ case UFUNC_ERR_LOG:
+ if (first) {
+ *first = 0;
+ pyfunc = PyTuple_GET_ITEM(errobj, 1);
+ if (pyfunc == Py_None) {
+ PyErr_Format(PyExc_NameError,
+ "log specified for %s (in %s) but no " \
+ "object with write method found.",
+ errtype, name);
+ goto fail;
+ }
+ PyOS_snprintf(msg, sizeof(msg),
+ "Warning: %s encountered in %s\n", errtype, name);
+ ret = PyObject_CallMethod(pyfunc, "write", "s", msg);
+ if (ret == NULL) {
+ goto fail;
+ }
+ Py_DECREF(ret);
+ }
+ break;
+ }
+ NPY_DISABLE_C_API;
+ return 0;
+
+fail:
+ NPY_DISABLE_C_API;
+ return -1;
+}
+
+
+
+NPY_NO_EXPORT PyObject *
+get_global_ext_obj(void)
+{
+ PyObject *thedict;
+ PyObject *ref = NULL;
+
+#if USE_USE_DEFAULTS==1
+ if (PyUFunc_NUM_NODEFAULTS != 0) {
+#endif
+ thedict = PyThreadState_GetDict();
+ if (thedict == NULL) {
+ thedict = PyEval_GetBuiltins();
+ }
+ ref = PyDict_GetItem(thedict, npy_um_str_pyvals_name);
+#if USE_USE_DEFAULTS==1
+ }
+#endif
+
+ return ref;
+}
+
+
+/*
+ * Extracts some values from the global pyvals tuple.
+ * all destinations may be NULL, in which case they are not retrieved
+ * ref - should hold the global tuple
+ * name - is the name of the ufunc (ufuncobj->name)
+ *
+ * bufsize - receives the buffer size to use
+ * errmask - receives the bitmask for error handling
+ * errobj - receives the python object to call with the error,
+ * if an error handling method is 'call'
+ */
+NPY_NO_EXPORT int
+_extract_pyvals(PyObject *ref, const char *name, int *bufsize,
+ int *errmask, PyObject **errobj)
+{
+ PyObject *retval;
+
+ /* default errobj case, skips dictionary lookup */
+ if (ref == NULL) {
+ if (errmask) {
+ *errmask = UFUNC_ERR_DEFAULT;
+ }
+ if (errobj) {
+ *errobj = Py_BuildValue("NO", PyBytes_FromString(name), Py_None);
+ }
+ if (bufsize) {
+ *bufsize = NPY_BUFSIZE;
+ }
+ return 0;
+ }
+
+ if (!PyList_Check(ref) || (PyList_GET_SIZE(ref)!=3)) {
+ PyErr_Format(PyExc_TypeError,
+ "%s must be a length 3 list.", UFUNC_PYVALS_NAME);
+ return -1;
+ }
+
+ if (bufsize != NULL) {
+ *bufsize = PyInt_AsLong(PyList_GET_ITEM(ref, 0));
+ if (error_converting(*bufsize)) {
+ return -1;
+ }
+ if ((*bufsize < NPY_MIN_BUFSIZE) ||
+ (*bufsize > NPY_MAX_BUFSIZE) ||
+ (*bufsize % 16 != 0)) {
+ PyErr_Format(PyExc_ValueError,
+ "buffer size (%d) is not in range "
+ "(%"NPY_INTP_FMT" - %"NPY_INTP_FMT") or not a multiple of 16",
+ *bufsize, (npy_intp) NPY_MIN_BUFSIZE,
+ (npy_intp) NPY_MAX_BUFSIZE);
+ return -1;
+ }
+ }
+
+ if (errmask != NULL) {
+ *errmask = PyInt_AsLong(PyList_GET_ITEM(ref, 1));
+ if (*errmask < 0) {
+ if (PyErr_Occurred()) {
+ return -1;
+ }
+ PyErr_Format(PyExc_ValueError,
+ "invalid error mask (%d)",
+ *errmask);
+ return -1;
+ }
+ }
+
+ if (errobj != NULL) {
+ *errobj = NULL;
+ retval = PyList_GET_ITEM(ref, 2);
+ if (retval != Py_None && !PyCallable_Check(retval)) {
+ PyObject *temp;
+ temp = PyObject_GetAttrString(retval, "write");
+ if (temp == NULL || !PyCallable_Check(temp)) {
+ PyErr_SetString(PyExc_TypeError,
+ "python object must be callable or have " \
+ "a callable write method");
+ Py_XDECREF(temp);
+ return -1;
+ }
+ Py_DECREF(temp);
+ }
+
+ *errobj = Py_BuildValue("NO", PyBytes_FromString(name), retval);
+ if (*errobj == NULL) {
+ return -1;
+ }
+ }
+ return 0;
+}
+
+/*
+ * check the floating point status
+ * - errmask: mask of status to check
+ * - extobj: ufunc pyvals object
+ * may be null, in which case the thread global one is fetched
+ * - ufunc_name: name of ufunc
+ */
+NPY_NO_EXPORT int
+_check_ufunc_fperr(int errmask, PyObject *extobj, const char *ufunc_name) {
+ int fperr;
+ PyObject *errobj = NULL;
+ int ret;
+ int first = 1;
+
+ if (!errmask) {
+ return 0;
+ }
+ fperr = PyUFunc_getfperr();
+ if (!fperr) {
+ return 0;
+ }
+
+ /* Get error object globals */
+ if (extobj == NULL) {
+ extobj = get_global_ext_obj();
+ }
+ if (_extract_pyvals(extobj, ufunc_name,
+ NULL, NULL, &errobj) < 0) {
+ Py_XDECREF(errobj);
+ return -1;
+ }
+
+ ret = PyUFunc_handlefperr(errmask, errobj, fperr, &first);
+ Py_XDECREF(errobj);
+
+ return ret;
+}
+
+
+NPY_NO_EXPORT int
+_get_bufsize_errmask(PyObject * extobj, const char *ufunc_name,
+ int *buffersize, int *errormask)
+{
+ /* Get the buffersize and errormask */
+ if (extobj == NULL) {
+ extobj = get_global_ext_obj();
+ }
+ if (_extract_pyvals(extobj, ufunc_name,
+ buffersize, errormask, NULL) < 0) {
+ return -1;
+ }
+
+ return 0;
+}
diff --git a/numpy/core/src/umath/extobj.h b/numpy/core/src/umath/extobj.h
new file mode 100644
index 000000000..1a569dfbd
--- /dev/null
+++ b/numpy/core/src/umath/extobj.h
@@ -0,0 +1,32 @@
+#ifndef _NPY_PRIVATE__EXTOBJ_H_
+#define _NPY_PRIVATE__EXTOBJ_H_
+
+#include <numpy/ndarraytypes.h> /* for NPY_NO_EXPORT */
+
+NPY_NO_EXPORT int
+_error_handler(int method, PyObject *errobj, char *errtype, int retstatus, int *first);
+
+NPY_NO_EXPORT PyObject *
+get_global_ext_obj(void);
+
+NPY_NO_EXPORT int
+_extract_pyvals(PyObject *ref, const char *name, int *bufsize,
+ int *errmask, PyObject **errobj);
+
+NPY_NO_EXPORT int
+_check_ufunc_fperr(int errmask, PyObject *extobj, const char *ufunc_name);
+
+NPY_NO_EXPORT int
+_get_bufsize_errmask(PyObject * extobj, const char *ufunc_name,
+ int *buffersize, int *errormask);
+
+/********************/
+#define USE_USE_DEFAULTS 1
+/********************/
+
+#if USE_USE_DEFAULTS==1
+NPY_NO_EXPORT int
+ufunc_update_use_defaults(void);
+#endif
+
+#endif
diff --git a/numpy/core/src/umath/loops.c.src b/numpy/core/src/umath/loops.c.src
index 40ebc119a..670c39ea2 100644
--- a/numpy/core/src/umath/loops.c.src
+++ b/numpy/core/src/umath/loops.c.src
@@ -980,16 +980,6 @@ NPY_NO_EXPORT void
/**end repeat1**/
NPY_NO_EXPORT void
-@TYPE@_true_divide(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
-{
- BINARY_LOOP {
- const double in1 = (double)(*(@type@ *)ip1);
- const double in2 = (double)(*(@type@ *)ip2);
- *((double *)op1) = in1/in2;
- }
-}
-
-NPY_NO_EXPORT void
@TYPE@_power(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
{
BINARY_LOOP {
@@ -1297,6 +1287,7 @@ NPY_NO_EXPORT void
NPY_NO_EXPORT void
@TYPE@_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
{
+ npy_bool give_future_warning = 0;
BINARY_LOOP {
const @type@ in1 = *(@type@ *)ip1;
const @type@ in2 = *(@type@ *)ip2;
@@ -1304,42 +1295,47 @@ NPY_NO_EXPORT void
*((npy_bool *)op1) = res;
if ((in1 == NPY_DATETIME_NAT || in2 == NPY_DATETIME_NAT) && res) {
- NPY_ALLOW_C_API_DEF
- NPY_ALLOW_C_API;
- /* 2016-01-18, 1.11 */
- if (DEPRECATE_FUTUREWARNING(
- "In the future, 'NAT @OP@ x' and 'x @OP@ NAT' "
- "will always be False.") < 0) {
- NPY_DISABLE_C_API;
- return;
- }
- NPY_DISABLE_C_API;
+ give_future_warning = 1;
}
}
+ if (give_future_warning) {
+ NPY_ALLOW_C_API_DEF
+ NPY_ALLOW_C_API;
+ /* 2016-01-18, 1.11 */
+ if (DEPRECATE_FUTUREWARNING(
+ "In the future, 'NAT @OP@ x' and 'x @OP@ NAT' "
+ "will always be False.") < 0) {
+ /* nothing to do, we return anyway */
+ }
+ NPY_DISABLE_C_API;
+ }
}
/**end repeat1**/
NPY_NO_EXPORT void
@TYPE@_not_equal(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
{
+ npy_bool give_future_warning = 0;
BINARY_LOOP {
const @type@ in1 = *(@type@ *)ip1;
const @type@ in2 = *(@type@ *)ip2;
*((npy_bool *)op1) = in1 != in2;
if (in1 == NPY_DATETIME_NAT && in2 == NPY_DATETIME_NAT) {
- NPY_ALLOW_C_API_DEF
- NPY_ALLOW_C_API;
- /* 2016-01-18, 1.11 */
- if (DEPRECATE_FUTUREWARNING(
- "In the future, NAT != NAT will be True "
- "rather than False.") < 0) {
- NPY_DISABLE_C_API;
- return;
- }
- NPY_DISABLE_C_API;
+ give_future_warning = 1;
}
}
+ if (give_future_warning) {
+ NPY_ALLOW_C_API_DEF
+ NPY_ALLOW_C_API;
+ /* 2016-01-18, 1.11 */
+ if (DEPRECATE_FUTUREWARNING(
+ "In the future, NAT != NAT will be True "
+ "rather than False.") < 0) {
+ /* nothing to do, we return anyway */
+ }
+ NPY_DISABLE_C_API;
+ }
}
@@ -1862,6 +1858,7 @@ NPY_NO_EXPORT void
*((@type@ *)op1) = (in1 @OP@ in2 || npy_isnan(in2)) ? in1 : in2;
}
}
+ npy_clear_floatstatus();
}
/**end repeat1**/
@@ -2200,6 +2197,7 @@ HALF_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED
const npy_half in2 = *(npy_half *)ip2;
*((npy_half *)op1) = (@OP@(in1, in2) || npy_half_isnan(in2)) ? in1 : in2;
}
+ npy_clear_floatstatus();
}
/**end repeat**/
@@ -2749,6 +2747,7 @@ NPY_NO_EXPORT void
((@ftype@ *)op1)[1] = in2i;
}
}
+ npy_clear_floatstatus();
}
/**end repeat1**/
diff --git a/numpy/core/src/umath/loops.h.src b/numpy/core/src/umath/loops.h.src
index 4243c6522..a978b03ee 100644
--- a/numpy/core/src/umath/loops.h.src
+++ b/numpy/core/src/umath/loops.h.src
@@ -120,9 +120,6 @@ NPY_NO_EXPORT void
/**end repeat2**/
NPY_NO_EXPORT void
-@S@@TYPE@_true_divide(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
-
-NPY_NO_EXPORT void
@S@@TYPE@_power(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
NPY_NO_EXPORT void
diff --git a/numpy/core/src/umath/override.c b/numpy/core/src/umath/override.c
index 6b441cbbb..7e787b8fe 100644
--- a/numpy/core/src/umath/override.c
+++ b/numpy/core/src/umath/override.c
@@ -144,14 +144,16 @@ normalize_reduce_args(PyUFuncObject *ufunc, PyObject *args,
return -1;
}
obj = PyTuple_GET_ITEM(args, i);
- if (obj != Py_None) {
- if (i == 3) {
- obj = PyTuple_GetSlice(args, 3, 4);
- }
- PyDict_SetItemString(*normal_kwds, kwlist[i], obj);
- if (i == 3) {
- Py_DECREF(obj);
+ if (i == 3) {
+ /* remove out=None */
+ if (obj == Py_None) {
+ continue;
}
+ obj = PyTuple_GetSlice(args, 3, 4);
+ }
+ PyDict_SetItemString(*normal_kwds, kwlist[i], obj);
+ if (i == 3) {
+ Py_DECREF(obj);
}
}
return 0;
@@ -188,14 +190,16 @@ normalize_accumulate_args(PyUFuncObject *ufunc, PyObject *args,
return -1;
}
obj = PyTuple_GET_ITEM(args, i);
- if (obj != Py_None) {
- if (i == 3) {
- obj = PyTuple_GetSlice(args, 3, 4);
- }
- PyDict_SetItemString(*normal_kwds, kwlist[i], obj);
- if (i == 3) {
- Py_DECREF(obj);
+ if (i == 3) {
+ /* remove out=None */
+ if (obj == Py_None) {
+ continue;
}
+ obj = PyTuple_GetSlice(args, 3, 4);
+ }
+ PyDict_SetItemString(*normal_kwds, kwlist[i], obj);
+ if (i == 3) {
+ Py_DECREF(obj);
}
}
return 0;
@@ -234,14 +238,16 @@ normalize_reduceat_args(PyUFuncObject *ufunc, PyObject *args,
return -1;
}
obj = PyTuple_GET_ITEM(args, i);
- if (obj != Py_None) {
- if (i == 4) {
- obj = PyTuple_GetSlice(args, 4, 5);
- }
- PyDict_SetItemString(*normal_kwds, kwlist[i], obj);
- if (i == 4) {
- Py_DECREF(obj);
+ if (i == 4) {
+ /* remove out=None */
+ if (obj == Py_None) {
+ continue;
}
+ obj = PyTuple_GetSlice(args, 4, 5);
+ }
+ PyDict_SetItemString(*normal_kwds, kwlist[i], obj);
+ if (i == 4) {
+ Py_DECREF(obj);
}
}
return 0;
@@ -360,11 +366,11 @@ PyUFunc_CheckOverride(PyUFuncObject *ufunc, char *method,
if (out != NULL) {
int nout = ufunc->nout;
- if (PyTuple_Check(out)) {
+ if (PyTuple_CheckExact(out)) {
int all_none = 1;
if (PyTuple_GET_SIZE(out) != nout) {
- PyErr_Format(PyExc_TypeError,
+ PyErr_Format(PyExc_ValueError,
"The 'out' tuple must have exactly "
"%d entries: one per ufunc output", nout);
goto fail;
@@ -466,34 +472,15 @@ PyUFunc_CheckOverride(PyUFuncObject *ufunc, char *method,
status = -1;
}
if (status != 0) {
- Py_XDECREF(normal_args);
- goto fail;
- }
-
- len = PyTuple_GET_SIZE(normal_args);
- override_args = PyTuple_New(len + 3);
- if (override_args == NULL) {
goto fail;
}
- /* PyTuple_SET_ITEM steals reference */
- Py_INCREF(Py_None);
- PyTuple_SET_ITEM(override_args, 0, Py_None);
- Py_INCREF(ufunc);
- PyTuple_SET_ITEM(override_args, 1, (PyObject *)ufunc);
method_name = PyUString_FromString(method);
if (method_name == NULL) {
goto fail;
}
- Py_INCREF(method_name);
- PyTuple_SET_ITEM(override_args, 2, method_name);
- for (i = 0; i < len; i++) {
- PyObject *item = PyTuple_GET_ITEM(normal_args, i);
- Py_INCREF(item);
- PyTuple_SET_ITEM(override_args, i + 3, item);
- }
- Py_DECREF(normal_args);
+ len = PyTuple_GET_SIZE(normal_args);
/* Call __array_ufunc__ functions in correct order */
while (1) {
@@ -527,12 +514,33 @@ PyUFunc_CheckOverride(PyUFuncObject *ufunc, char *method,
/* override_obj had no subtypes to the right. */
if (override_obj) {
- /* We won't call this one again */
- with_override[i] = NULL;
override_array_ufunc = array_ufunc_methods[i];
+ /* We won't call this one again (references decref'd below) */
+ with_override[i] = NULL;
+ array_ufunc_methods[i] = NULL;
break;
}
}
+ /*
+ * Set override arguments for each call since the tuple must
+ * not be mutated after use in PyPy
+ * We increase all references since SET_ITEM steals
+ * them and they will be DECREF'd when the tuple is deleted.
+ */
+ override_args = PyTuple_New(len + 3);
+ if (override_args == NULL) {
+ goto fail;
+ }
+ Py_INCREF(ufunc);
+ PyTuple_SET_ITEM(override_args, 1, (PyObject *)ufunc);
+ Py_INCREF(method_name);
+ PyTuple_SET_ITEM(override_args, 2, method_name);
+ for (i = 0; i < len; i++) {
+ PyObject *item = PyTuple_GET_ITEM(normal_args, i);
+
+ Py_INCREF(item);
+ PyTuple_SET_ITEM(override_args, i + 3, item);
+ }
/* Check if there is a method left to call */
if (!override_obj) {
@@ -543,7 +551,11 @@ PyUFunc_CheckOverride(PyUFuncObject *ufunc, char *method,
npy_cache_import("numpy.core._internal",
"array_ufunc_errmsg_formatter",
&errmsg_formatter);
+
if (errmsg_formatter != NULL) {
+ /* All tuple items must be set before use */
+ Py_INCREF(Py_None);
+ PyTuple_SET_ITEM(override_args, 0, Py_None);
errmsg = PyObject_Call(errmsg_formatter, override_args,
normal_kwds);
if (errmsg != NULL) {
@@ -551,17 +563,20 @@ PyUFunc_CheckOverride(PyUFuncObject *ufunc, char *method,
Py_DECREF(errmsg);
}
}
+ Py_DECREF(override_args);
goto fail;
}
- /* Set the self argument, since we have an unbound method */
- Py_INCREF(override_obj);
- PyTuple_SetItem(override_args, 0, override_obj);
-
+ /*
+ * Set the self argument of our unbound method.
+ * This also steals the reference, so no need to DECREF after.
+ */
+ PyTuple_SET_ITEM(override_args, 0, override_obj);
/* Call the method */
*result = PyObject_Call(
override_array_ufunc, override_args, normal_kwds);
-
+ Py_DECREF(override_array_ufunc);
+ Py_DECREF(override_args);
if (*result == NULL) {
/* Exception occurred */
goto fail;
@@ -576,19 +591,18 @@ PyUFunc_CheckOverride(PyUFuncObject *ufunc, char *method,
break;
}
}
-
+ status = 0;
/* Override found, return it. */
- Py_XDECREF(method_name);
- Py_XDECREF(normal_kwds);
- Py_DECREF(override_args);
- return 0;
-
+ goto cleanup;
fail:
+ status = -1;
+cleanup:
for (i = 0; i < num_override_args; i++) {
+ Py_XDECREF(with_override[i]);
Py_XDECREF(array_ufunc_methods[i]);
}
+ Py_XDECREF(normal_args);
Py_XDECREF(method_name);
Py_XDECREF(normal_kwds);
- Py_XDECREF(override_args);
- return 1;
+ return status;
}
diff --git a/numpy/core/src/umath/reduction.c b/numpy/core/src/umath/reduction.c
index 47598bed9..390b28c31 100644
--- a/numpy/core/src/umath/reduction.c
+++ b/numpy/core/src/umath/reduction.c
@@ -21,8 +21,10 @@
#include "npy_config.h"
#include "npy_pycompat.h"
+#include "numpy/ufuncobject.h"
#include "lowlevel_strided_loops.h"
#include "reduction.h"
+#include "extobj.h" /* for _check_ufunc_fperr */
/*
* Allocates a result array for a reduction operation, with
@@ -437,6 +439,7 @@ PyArray_InitializeReduceResult(
* data : Data which is passed to assign_identity and the inner loop.
* buffersize : Buffer size for the iterator. For the default, pass in 0.
* funcname : The name of the reduction function, for error messages.
+ * errormask : forwarded from _get_bufsize_errmask
*
* TODO FIXME: if you squint, this is essentially an second independent
* implementation of generalized ufuncs with signature (i)->(), plus a few
@@ -458,7 +461,8 @@ PyUFunc_ReduceWrapper(PyArrayObject *operand, PyArrayObject *out,
int subok,
PyArray_AssignReduceIdentityFunc *assign_identity,
PyArray_ReduceLoopFunc *loop,
- void *data, npy_intp buffersize, const char *funcname)
+ void *data, npy_intp buffersize, const char *funcname,
+ int errormask)
{
PyArrayObject *result = NULL, *op_view = NULL;
npy_intp skip_first_count = 0;
@@ -555,6 +559,9 @@ PyUFunc_ReduceWrapper(PyArrayObject *operand, PyArrayObject *out,
goto fail;
}
+ /* Start with the floating-point exception flags cleared */
+ PyUFunc_clearfperr();
+
if (NpyIter_GetIterSize(iter) != 0) {
NpyIter_IterNextFunc *iternext;
char **dataptr;
@@ -586,6 +593,12 @@ PyUFunc_ReduceWrapper(PyArrayObject *operand, PyArrayObject *out,
goto fail;
}
}
+
+ /* Check whether any errors occurred during the loop */
+ if (PyErr_Occurred() ||
+ _check_ufunc_fperr(errormask, NULL, "reduce") < 0) {
+ goto fail;
+ }
NpyIter_Deallocate(iter);
Py_DECREF(op_view);
diff --git a/numpy/core/src/umath/reduction.h b/numpy/core/src/umath/reduction.h
index 43cd071e0..7a55c5df5 100644
--- a/numpy/core/src/umath/reduction.h
+++ b/numpy/core/src/umath/reduction.h
@@ -137,6 +137,7 @@ typedef int (PyArray_ReduceLoopFunc)(NpyIter *iter,
* data : Data which is passed to assign_identity and the inner loop.
* buffersize : Buffer size for the iterator. For the default, pass in 0.
* funcname : The name of the reduction function, for error messages.
+ * errormask : forwarded from _get_bufsize_errmask
*/
NPY_NO_EXPORT PyArrayObject *
PyUFunc_ReduceWrapper(PyArrayObject *operand, PyArrayObject *out,
@@ -149,6 +150,7 @@ PyUFunc_ReduceWrapper(PyArrayObject *operand, PyArrayObject *out,
int subok,
PyArray_AssignReduceIdentityFunc *assign_identity,
PyArray_ReduceLoopFunc *loop,
- void *data, npy_intp buffersize, const char *funcname);
+ void *data, npy_intp buffersize, const char *funcname,
+ int errormask);
#endif
diff --git a/numpy/core/src/umath/test_rational.c.src b/numpy/core/src/umath/test_rational.c.src
index 01ded5bbd..26c3d3799 100644
--- a/numpy/core/src/umath/test_rational.c.src
+++ b/numpy/core/src/umath/test_rational.c.src
@@ -9,6 +9,9 @@
#include <numpy/npy_3kcompat.h>
#include <math.h>
+#include "common.h" /* for error_converting */
+
+
/* Relevant arithmetic exceptions */
/* Uncomment the following line to work around a bug in numpy */
@@ -425,7 +428,7 @@ pyrational_new(PyTypeObject* type, PyObject* args, PyObject* kwds) {
PyObject* y;
int eq;
n[i] = PyInt_AsLong(x[i]);
- if (n[i]==-1 && PyErr_Occurred()) {
+ if (error_converting(n[i])) {
if (PyErr_ExceptionMatches(PyExc_TypeError)) {
PyErr_Format(PyExc_TypeError,
"expected integer %s, got %s",
@@ -473,7 +476,7 @@ pyrational_new(PyTypeObject* type, PyObject* args, PyObject* kwds) {
PyObject* y_; \
int eq_; \
long n_ = PyInt_AsLong(object); \
- if (n_==-1 && PyErr_Occurred()) { \
+ if (error_converting(n_)) { \
if (PyErr_ExceptionMatches(PyExc_TypeError)) { \
PyErr_Clear(); \
Py_INCREF(Py_NotImplemented); \
@@ -750,7 +753,7 @@ npyrational_setitem(PyObject* item, void* data, void* arr) {
long n = PyInt_AsLong(item);
PyObject* y;
int eq;
- if (n==-1 && PyErr_Occurred()) {
+ if (error_converting(n)) {
return -1;
}
y = PyInt_FromLong(n);
diff --git a/numpy/core/src/umath/ufunc_object.c b/numpy/core/src/umath/ufunc_object.c
index e1219039c..16693b366 100644
--- a/numpy/core/src/umath/ufunc_object.c
+++ b/numpy/core/src/umath/ufunc_object.c
@@ -46,6 +46,8 @@
#include "ufunc_object.h"
#include "override.h"
#include "npy_import.h"
+#include "extobj.h"
+#include "common.h"
/********** PRINTF DEBUG TRACING **************/
#define NPY_UF_DBG_TRACING 0
@@ -63,21 +65,12 @@
#endif
/**********************************************/
-
-/********************/
-#define USE_USE_DEFAULTS 1
-/********************/
-
/* ---------------------------------------------------------------- */
static int
_does_loop_use_arrays(void *data);
static int
-_extract_pyvals(PyObject *ref, const char *name, int *bufsize,
- int *errmask, PyObject **errobj);
-
-static int
assign_reduce_identity_zero(PyArrayObject *result, void *data);
static int
@@ -87,103 +80,6 @@ static int
assign_reduce_identity_one(PyArrayObject *result, void *data);
-/*
- * fpstatus is the ufunc_formatted hardware status
- * errmask is the handling mask specified by the user.
- * errobj is a Python object with (string, callable object or None)
- * or NULL
- */
-
-/*
- * 2. for each of the flags
- * determine whether to ignore, warn, raise error, or call Python function.
- * If ignore, do nothing
- * If warn, print a warning and continue
- * If raise return an error
- * If call, call a user-defined function with string
- */
-
-static int
-_error_handler(int method, PyObject *errobj, char *errtype, int retstatus, int *first)
-{
- PyObject *pyfunc, *ret, *args;
- char *name = PyBytes_AS_STRING(PyTuple_GET_ITEM(errobj,0));
- char msg[100];
-
- NPY_ALLOW_C_API_DEF
-
- /* don't need C API for a simple print */
- if (method == UFUNC_ERR_PRINT) {
- if (*first) {
- fprintf(stderr, "Warning: %s encountered in %s\n", errtype, name);
- *first = 0;
- }
- return 0;
- }
-
- NPY_ALLOW_C_API;
- switch(method) {
- case UFUNC_ERR_WARN:
- PyOS_snprintf(msg, sizeof(msg), "%s encountered in %s", errtype, name);
- if (PyErr_Warn(PyExc_RuntimeWarning, msg) < 0) {
- goto fail;
- }
- break;
- case UFUNC_ERR_RAISE:
- PyErr_Format(PyExc_FloatingPointError, "%s encountered in %s",
- errtype, name);
- goto fail;
- case UFUNC_ERR_CALL:
- pyfunc = PyTuple_GET_ITEM(errobj, 1);
- if (pyfunc == Py_None) {
- PyErr_Format(PyExc_NameError,
- "python callback specified for %s (in " \
- " %s) but no function found.",
- errtype, name);
- goto fail;
- }
- args = Py_BuildValue("NN", PyUString_FromString(errtype),
- PyInt_FromLong((long) retstatus));
- if (args == NULL) {
- goto fail;
- }
- ret = PyObject_CallObject(pyfunc, args);
- Py_DECREF(args);
- if (ret == NULL) {
- goto fail;
- }
- Py_DECREF(ret);
- break;
- case UFUNC_ERR_LOG:
- if (first) {
- *first = 0;
- pyfunc = PyTuple_GET_ITEM(errobj, 1);
- if (pyfunc == Py_None) {
- PyErr_Format(PyExc_NameError,
- "log specified for %s (in %s) but no " \
- "object with write method found.",
- errtype, name);
- goto fail;
- }
- PyOS_snprintf(msg, sizeof(msg),
- "Warning: %s encountered in %s\n", errtype, name);
- ret = PyObject_CallMethod(pyfunc, "write", "s", msg);
- if (ret == NULL) {
- goto fail;
- }
- Py_DECREF(ret);
- }
- break;
- }
- NPY_DISABLE_C_API;
- return 0;
-
-fail:
- NPY_DISABLE_C_API;
- return -1;
-}
-
-
/*UFUNC_API*/
NPY_NO_EXPORT int
PyUFunc_getfperr(void)
@@ -239,49 +135,6 @@ PyUFunc_clearfperr()
npy_clear_floatstatus();
}
-
-#if USE_USE_DEFAULTS==1
-static int PyUFunc_NUM_NODEFAULTS = 0;
-#endif
-
-static PyObject *
-get_global_ext_obj(void)
-{
- PyObject *thedict;
- PyObject *ref = NULL;
-
-#if USE_USE_DEFAULTS==1
- if (PyUFunc_NUM_NODEFAULTS != 0) {
-#endif
- thedict = PyThreadState_GetDict();
- if (thedict == NULL) {
- thedict = PyEval_GetBuiltins();
- }
- ref = PyDict_GetItem(thedict, npy_um_str_pyvals_name);
-#if USE_USE_DEFAULTS==1
- }
-#endif
-
- return ref;
-}
-
-
-static int
-_get_bufsize_errmask(PyObject * extobj, const char *ufunc_name,
- int *buffersize, int *errormask)
-{
- /* Get the buffersize and errormask */
- if (extobj == NULL) {
- extobj = get_global_ext_obj();
- }
- if (_extract_pyvals(extobj, ufunc_name,
- buffersize, errormask, NULL) < 0) {
- return -1;
- }
-
- return 0;
-}
-
/*
* This function analyzes the input arguments
* and determines an appropriate __array_prepare__ function to call
@@ -426,97 +279,6 @@ _find_array_prepare(PyObject *args, PyObject *kwds,
return;
}
-/*
- * Extracts some values from the global pyvals tuple.
- * all destinations may be NULL, in which case they are not retrieved
- * ref - should hold the global tuple
- * name - is the name of the ufunc (ufuncobj->name)
- *
- * bufsize - receives the buffer size to use
- * errmask - receives the bitmask for error handling
- * errobj - receives the python object to call with the error,
- * if an error handling method is 'call'
- */
-static int
-_extract_pyvals(PyObject *ref, const char *name, int *bufsize,
- int *errmask, PyObject **errobj)
-{
- PyObject *retval;
-
- /* default errobj case, skips dictionary lookup */
- if (ref == NULL) {
- if (errmask) {
- *errmask = UFUNC_ERR_DEFAULT;
- }
- if (errobj) {
- *errobj = Py_BuildValue("NO", PyBytes_FromString(name), Py_None);
- }
- if (bufsize) {
- *bufsize = NPY_BUFSIZE;
- }
- return 0;
- }
-
- if (!PyList_Check(ref) || (PyList_GET_SIZE(ref)!=3)) {
- PyErr_Format(PyExc_TypeError,
- "%s must be a length 3 list.", UFUNC_PYVALS_NAME);
- return -1;
- }
-
- if (bufsize != NULL) {
- *bufsize = PyInt_AsLong(PyList_GET_ITEM(ref, 0));
- if ((*bufsize == -1) && PyErr_Occurred()) {
- return -1;
- }
- if ((*bufsize < NPY_MIN_BUFSIZE) ||
- (*bufsize > NPY_MAX_BUFSIZE) ||
- (*bufsize % 16 != 0)) {
- PyErr_Format(PyExc_ValueError,
- "buffer size (%d) is not in range "
- "(%"NPY_INTP_FMT" - %"NPY_INTP_FMT") or not a multiple of 16",
- *bufsize, (npy_intp) NPY_MIN_BUFSIZE,
- (npy_intp) NPY_MAX_BUFSIZE);
- return -1;
- }
- }
-
- if (errmask != NULL) {
- *errmask = PyInt_AsLong(PyList_GET_ITEM(ref, 1));
- if (*errmask < 0) {
- if (PyErr_Occurred()) {
- return -1;
- }
- PyErr_Format(PyExc_ValueError,
- "invalid error mask (%d)",
- *errmask);
- return -1;
- }
- }
-
- if (errobj != NULL) {
- *errobj = NULL;
- retval = PyList_GET_ITEM(ref, 2);
- if (retval != Py_None && !PyCallable_Check(retval)) {
- PyObject *temp;
- temp = PyObject_GetAttrString(retval, "write");
- if (temp == NULL || !PyCallable_Check(temp)) {
- PyErr_SetString(PyExc_TypeError,
- "python object must be callable or have " \
- "a callable write method");
- Py_XDECREF(temp);
- return -1;
- }
- Py_DECREF(temp);
- }
-
- *errobj = Py_BuildValue("NO", PyBytes_FromString(name), retval);
- if (*errobj == NULL) {
- return -1;
- }
- }
- return 0;
-}
-
/*UFUNC_API
*
@@ -761,8 +523,8 @@ _set_out_array(PyObject *obj, PyArrayObject **store)
* Produce a name for the ufunc, if one is not already set
* This is used in the PyUFunc_handlefperr machinery, and in error messages
*/
-static const char*
-_get_ufunc_name(PyUFuncObject *ufunc) {
+NPY_NO_EXPORT const char*
+ufunc_get_name_cstr(PyUFuncObject *ufunc) {
return ufunc->name ? ufunc->name : "<unnamed ufunc>";
}
@@ -789,7 +551,7 @@ get_ufunc_arguments(PyUFuncObject *ufunc,
int nout = ufunc->nout;
PyObject *obj, *context;
PyObject *str_key_obj = NULL;
- const char *ufunc_name = _get_ufunc_name(ufunc);
+ const char *ufunc_name = ufunc_get_name_cstr(ufunc);
int type_num;
int any_flexible = 0, any_object = 0, any_flexible_userloops = 0;
@@ -1084,7 +846,7 @@ get_ufunc_arguments(PyUFuncObject *ufunc,
"positional and keyword argument");
goto fail;
}
- if (PyTuple_Check(value)) {
+ if (PyTuple_CheckExact(value)) {
if (PyTuple_GET_SIZE(value) != nout) {
PyErr_SetString(PyExc_ValueError,
"The 'out' tuple must have exactly "
@@ -1752,6 +1514,7 @@ execute_fancy_ufunc_loop(PyUFuncObject *ufunc,
npy_intp *strides;
npy_intp *countptr;
+ PyArrayObject **op_it;
npy_uint32 iter_flags;
if (wheremask != NULL) {
@@ -1783,12 +1546,13 @@ execute_fancy_ufunc_loop(PyUFuncObject *ufunc,
for (i = nin; i < nop; ++i) {
/*
* We don't write to all elements, and the iterator may make
- * UPDATEIFCOPY temporary copies. The output arrays must be considered
- * READWRITE by the iterator, so that the elements we don't write to are
- * copied to the possible temporary array.
+ * UPDATEIFCOPY temporary copies. The output arrays (unless they are
+ * allocated by the iterator itself) must be considered READWRITE by the
+ * iterator, so that the elements we don't write to are copied to the
+ * possible temporary array.
*/
op_flags[i] = default_op_out_flags |
- NPY_ITER_READWRITE |
+ (op[i] != NULL ? NPY_ITER_READWRITE : NPY_ITER_WRITEONLY) |
NPY_ITER_ALIGNED |
NPY_ITER_ALLOCATE |
NPY_ITER_NO_BROADCAST |
@@ -1828,11 +1592,24 @@ execute_fancy_ufunc_loop(PyUFuncObject *ufunc,
needs_api = NpyIter_IterationNeedsAPI(iter);
/* Call the __array_prepare__ functions where necessary */
+ op_it = NpyIter_GetOperandArray(iter);
for (i = nin; i < nop; ++i) {
- PyArrayObject *op_tmp;
+ PyArrayObject *op_tmp, *orig_op_tmp;
- /* prepare_ufunc_output may decref & replace pointer */
- op_tmp = op[i];
+ /*
+ * The array can be allocated by the iterator -- it is placed in op[i]
+ * and returned to the caller, and this needs an extra incref.
+ */
+ if (op[i] == NULL) {
+ op_tmp = op_it[i];
+ Py_INCREF(op_tmp);
+ }
+ else {
+ op_tmp = op[i];
+ }
+
+ /* prepare_ufunc_output may decref & replace the pointer */
+ orig_op_tmp = op_tmp;
Py_INCREF(op_tmp);
if (prepare_ufunc_output(ufunc, &op_tmp,
@@ -1842,7 +1619,7 @@ execute_fancy_ufunc_loop(PyUFuncObject *ufunc,
}
/* Validate that the prepare_ufunc_output didn't mess with pointers */
- if (PyArray_BYTES(op_tmp) != PyArray_BYTES(op[i])) {
+ if (PyArray_BYTES(op_tmp) != PyArray_BYTES(orig_op_tmp)) {
PyErr_SetString(PyExc_ValueError,
"The __array_prepare__ functions modified the data "
"pointer addresses in an invalid fashion");
@@ -1853,8 +1630,8 @@ execute_fancy_ufunc_loop(PyUFuncObject *ufunc,
/*
* Put the updated operand back and undo the DECREF above. If
- * COPY_IF_OVERLAP made a temporary copy, the output will be copied in
- * by UPDATEIFCOPY even if op[i] was changed.
+ * COPY_IF_OVERLAP made a temporary copy, the output will be copied
+ * by UPDATEIFCOPY even if op[i] was changed by prepare_ufunc_output.
*/
op[i] = op_tmp;
Py_DECREF(op_tmp);
@@ -1954,44 +1731,6 @@ make_arr_prep_args(npy_intp nin, PyObject *args, PyObject *kwds)
}
/*
- * check the floating point status
- * - errmask: mask of status to check
- * - extobj: ufunc pyvals object
- * may be null, in which case the thread global one is fetched
- * - ufunc_name: name of ufunc
- */
-static int
-_check_ufunc_fperr(int errmask, PyObject *extobj, const char *ufunc_name) {
- int fperr;
- PyObject *errobj = NULL;
- int ret;
- int first = 1;
-
- if (!errmask) {
- return 0;
- }
- fperr = PyUFunc_getfperr();
- if (!fperr) {
- return 0;
- }
-
- /* Get error object globals */
- if (extobj == NULL) {
- extobj = get_global_ext_obj();
- }
- if (_extract_pyvals(extobj, ufunc_name,
- NULL, NULL, &errobj) < 0) {
- Py_XDECREF(errobj);
- return -1;
- }
-
- ret = PyUFunc_handlefperr(errmask, errobj, fperr, &first);
- Py_XDECREF(errobj);
-
- return ret;
-}
-
-/*
* Validate the core dimensions of all the operands, and collect all of
* the labelled core dimensions into 'core_dim_sizes'.
*
@@ -2035,7 +1774,7 @@ _get_coredim_sizes(PyUFuncObject *ufunc, PyArrayObject **op,
"%s: %s operand %d does not have enough "
"dimensions (has %d, gufunc core with "
"signature %s requires %d)",
- _get_ufunc_name(ufunc), i < nin ? "Input" : "Output",
+ ufunc_get_name_cstr(ufunc), i < nin ? "Input" : "Output",
i < nin ? i : i - nin, PyArray_NDIM(op[i]),
ufunc->core_signature, num_dims);
return -1;
@@ -2059,7 +1798,7 @@ _get_coredim_sizes(PyUFuncObject *ufunc, PyArrayObject **op,
"core dimension %d, with gufunc "
"signature %s (size %zd is different "
"from %zd)",
- _get_ufunc_name(ufunc), i < nin ? "Input" : "Output",
+ ufunc_get_name_cstr(ufunc), i < nin ? "Input" : "Output",
i < nin ? i : i - nin, idim,
ufunc->core_signature, op_dim_size,
core_dim_sizes[core_dim_index]);
@@ -2102,13 +1841,12 @@ _get_coredim_sizes(PyUFuncObject *ufunc, PyArrayObject **op,
PyErr_Format(PyExc_ValueError,
"%s: Output operand %d has core dimension %d "
"unspecified, with gufunc signature %s",
- _get_ufunc_name(ufunc), out_op, i, ufunc->core_signature);
+ ufunc_get_name_cstr(ufunc), out_op, i, ufunc->core_signature);
return -1;
}
return 0;
}
-
static int
PyUFunc_GeneralizedFunction(PyUFuncObject *ufunc,
PyObject *args, PyObject *kwds,
@@ -2171,7 +1909,7 @@ PyUFunc_GeneralizedFunction(PyUFuncObject *ufunc,
nout = ufunc->nout;
nop = nin + nout;
- ufunc_name = _get_ufunc_name(ufunc);
+ ufunc_name = ufunc_get_name_cstr(ufunc);
NPY_UF_DBG_PRINT1("\nEvaluating ufunc %s\n", ufunc_name);
@@ -2633,7 +2371,7 @@ PyUFunc_GenericFunction(PyUFuncObject *ufunc,
nout = ufunc->nout;
nop = nin + nout;
- ufunc_name = _get_ufunc_name(ufunc);
+ ufunc_name = ufunc_get_name_cstr(ufunc);
NPY_UF_DBG_PRINT1("\nEvaluating ufunc %s\n", ufunc_name);
@@ -2873,7 +2611,7 @@ reduce_type_resolver(PyUFuncObject *ufunc, PyArrayObject *arr,
int i, retcode;
PyArrayObject *op[3] = {arr, arr, NULL};
PyArray_Descr *dtypes[3] = {NULL, NULL, NULL};
- const char *ufunc_name = _get_ufunc_name(ufunc);
+ const char *ufunc_name = ufunc_get_name_cstr(ufunc);
PyObject *type_tup = NULL;
*out_dtype = NULL;
@@ -3062,7 +2800,7 @@ PyUFunc_Reduce(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *out,
PyArray_Descr *dtype;
PyArrayObject *result;
PyArray_AssignReduceIdentityFunc *assign_identity = NULL;
- const char *ufunc_name = _get_ufunc_name(ufunc);
+ const char *ufunc_name = ufunc_get_name_cstr(ufunc);
/* These parameters come from a TLS global */
int buffersize = 0, errormask = 0;
@@ -3145,7 +2883,7 @@ PyUFunc_Reduce(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *out,
keepdims, 0,
assign_identity,
reduce_loop,
- ufunc, buffersize, ufunc_name);
+ ufunc, buffersize, ufunc_name, errormask);
Py_DECREF(dtype);
return result;
@@ -3170,7 +2908,7 @@ PyUFunc_Accumulate(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *out,
PyUFuncGenericFunction innerloop = NULL;
void *innerloopdata = NULL;
- const char *ufunc_name = _get_ufunc_name(ufunc);
+ const char *ufunc_name = ufunc_get_name_cstr(ufunc);
/* These parameters come from extobj= or from a TLS global */
int buffersize = 0, errormask = 0;
@@ -3537,7 +3275,7 @@ PyUFunc_Reduceat(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *ind,
PyUFuncGenericFunction innerloop = NULL;
void *innerloopdata = NULL;
- const char *ufunc_name = _get_ufunc_name(ufunc);
+ const char *ufunc_name = ufunc_get_name_cstr(ufunc);
char *opname = "reduceat";
/* These parameters come from extobj= or from a TLS global */
@@ -3899,7 +3637,7 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc, PyObject *args,
static char *reduce_kwlist[] = {
"array", "axis", "dtype", "out", "keepdims", NULL};
static char *accumulate_kwlist[] = {
- "array", "axis", "dtype", "out", "keepdims", NULL};
+ "array", "axis", "dtype", "out", NULL};
static char *reduceat_kwlist[] = {
"array", "indices", "axis", "dtype", "out", NULL};
@@ -3927,7 +3665,20 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc, PyObject *args,
_reduce_type[operation]);
return NULL;
}
-
+ /* if there is a tuple of 1 for `out` in kwds, unpack it */
+ if (kwds != NULL) {
+ PyObject *out_obj = PyDict_GetItem(kwds, npy_um_str_out);
+ if (out_obj != NULL && PyTuple_CheckExact(out_obj)) {
+ if (PyTuple_GET_SIZE(out_obj) != 1) {
+ PyErr_SetString(PyExc_ValueError,
+ "The 'out' tuple must have exactly one entry");
+ return NULL;
+ }
+ out_obj = PyTuple_GET_ITEM(out_obj, 0);
+ PyDict_SetItem(kwds, npy_um_str_out, out_obj);
+ }
+ }
+
if (operation == UFUNC_REDUCEAT) {
PyArray_Descr *indtype;
indtype = PyArray_DescrFromType(NPY_INTP);
@@ -3948,26 +3699,15 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc, PyObject *args,
}
}
else if (operation == UFUNC_ACCUMULATE) {
- PyObject *bad_keepdimarg = NULL;
- if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|OO&O&O:accumulate",
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|OO&O&:accumulate",
accumulate_kwlist,
&op,
&axes_in,
PyArray_DescrConverter2, &otype,
- PyArray_OutputConverter, &out,
- &bad_keepdimarg)) {
+ PyArray_OutputConverter, &out)) {
Py_XDECREF(otype);
return NULL;
}
- /* Until removed outright by https://github.com/numpy/numpy/pull/8187 */
- if (bad_keepdimarg != NULL) {
- if (DEPRECATE_FUTUREWARNING(
- "keepdims argument has no effect on accumulate, and will be "
- "removed in future") < 0) {
- Py_XDECREF(otype);
- return NULL;
- }
- }
}
else {
if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|OO&O&i:reduce",
@@ -4031,7 +3771,7 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc, PyObject *args,
for (i = 0; i < naxes; ++i) {
PyObject *tmp = PyTuple_GET_ITEM(axes_in, i);
int axis = PyArray_PyIntAsInt(tmp);
- if (axis == -1 && PyErr_Occurred()) {
+ if (error_converting(axis)) {
Py_XDECREF(otype);
Py_DECREF(mp);
return NULL;
@@ -4048,7 +3788,7 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc, PyObject *args,
else {
int axis = PyArray_PyIntAsInt(axes_in);
/* TODO: PyNumber_Index would be good to use here */
- if (axis == -1 && PyErr_Occurred()) {
+ if (error_converting(axis)) {
Py_XDECREF(otype);
Py_DECREF(mp);
return NULL;
@@ -4507,39 +4247,6 @@ ufunc_geterr(PyObject *NPY_UNUSED(dummy), PyObject *args)
return res;
}
-#if USE_USE_DEFAULTS==1
-/*
- * This is a strategy to buy a little speed up and avoid the dictionary
- * look-up in the default case. It should work in the presence of
- * threads. If it is deemed too complicated or it doesn't actually work
- * it could be taken out.
- */
-static int
-ufunc_update_use_defaults(void)
-{
- PyObject *errobj = NULL;
- int errmask, bufsize;
- int res;
-
- PyUFunc_NUM_NODEFAULTS += 1;
- res = PyUFunc_GetPyValues("test", &bufsize, &errmask, &errobj);
- PyUFunc_NUM_NODEFAULTS -= 1;
- if (res < 0) {
- Py_XDECREF(errobj);
- return -1;
- }
- if ((errmask != UFUNC_ERR_DEFAULT) || (bufsize != NPY_BUFSIZE)
- || (PyTuple_GET_ITEM(errobj, 1) != Py_None)) {
- PyUFunc_NUM_NODEFAULTS += 1;
- }
- else if (PyUFunc_NUM_NODEFAULTS > 0) {
- PyUFunc_NUM_NODEFAULTS -= 1;
- }
- Py_XDECREF(errobj);
- return 0;
-}
-#endif
-
NPY_NO_EXPORT PyObject *
ufunc_seterr(PyObject *NPY_UNUSED(dummy), PyObject *args)
{
@@ -5596,8 +5303,10 @@ ufunc_get_doc(PyUFuncObject *ufunc)
if (doc == NULL) {
return NULL;
}
- PyUString_ConcatAndDel(&doc,
- PyUString_FromFormat("\n\n%s", ufunc->doc));
+ if (ufunc->doc != NULL) {
+ PyUString_ConcatAndDel(&doc,
+ PyUString_FromFormat("\n\n%s", ufunc->doc));
+ }
return doc;
}
diff --git a/numpy/core/src/umath/ufunc_object.h b/numpy/core/src/umath/ufunc_object.h
index 5613f38b4..d6fd3837a 100644
--- a/numpy/core/src/umath/ufunc_object.h
+++ b/numpy/core/src/umath/ufunc_object.h
@@ -7,6 +7,9 @@ ufunc_geterr(PyObject *NPY_UNUSED(dummy), PyObject *args);
NPY_NO_EXPORT PyObject *
ufunc_seterr(PyObject *NPY_UNUSED(dummy), PyObject *args);
+NPY_NO_EXPORT const char*
+ufunc_get_name_cstr(PyUFuncObject *ufunc);
+
/* interned strings (on umath import) */
NPY_VISIBILITY_HIDDEN extern PyObject * npy_um_str_out;
NPY_VISIBILITY_HIDDEN extern PyObject * npy_um_str_subok;
diff --git a/numpy/core/src/umath/ufunc_type_resolution.c b/numpy/core/src/umath/ufunc_type_resolution.c
index 0fd3c45c5..e77b48fc4 100644
--- a/numpy/core/src/umath/ufunc_type_resolution.c
+++ b/numpy/core/src/umath/ufunc_type_resolution.c
@@ -21,6 +21,7 @@
#include "numpy/ufuncobject.h"
#include "ufunc_type_resolution.h"
+#include "ufunc_object.h"
#include "common.h"
static const char *
@@ -56,9 +57,7 @@ PyUFunc_ValidateCasting(PyUFuncObject *ufunc,
PyArray_Descr **dtypes)
{
int i, nin = ufunc->nin, nop = nin + ufunc->nout;
- const char *ufunc_name;
-
- ufunc_name = ufunc->name ? ufunc->name : "<unnamed ufunc>";
+ const char *ufunc_name = ufunc_get_name_cstr(ufunc);
for (i = 0; i < nop; ++i) {
if (i < nin) {
@@ -184,9 +183,7 @@ PyUFunc_SimpleBinaryComparisonTypeResolver(PyUFuncObject *ufunc,
PyArray_Descr **out_dtypes)
{
int i, type_num1, type_num2;
- const char *ufunc_name;
-
- ufunc_name = ufunc->name ? ufunc->name : "<unnamed ufunc>";
+ const char *ufunc_name = ufunc_get_name_cstr(ufunc);
if (ufunc->nin != 2 || ufunc->nout != 1) {
PyErr_Format(PyExc_RuntimeError, "ufunc %s is configured "
@@ -290,9 +287,7 @@ PyUFunc_SimpleUnaryOperationTypeResolver(PyUFuncObject *ufunc,
PyArray_Descr **out_dtypes)
{
int i, type_num1;
- const char *ufunc_name;
-
- ufunc_name = ufunc->name ? ufunc->name : "<unnamed ufunc>";
+ const char *ufunc_name = ufunc_get_name_cstr(ufunc);
if (ufunc->nin != 1 || ufunc->nout != 1) {
PyErr_Format(PyExc_RuntimeError, "ufunc %s is configured "
@@ -430,9 +425,7 @@ PyUFunc_SimpleBinaryOperationTypeResolver(PyUFuncObject *ufunc,
PyArray_Descr **out_dtypes)
{
int i, type_num1, type_num2;
- const char *ufunc_name;
-
- ufunc_name = ufunc->name ? ufunc->name : "<unnamed ufunc>";
+ const char *ufunc_name = ufunc_get_name_cstr(ufunc);
if (ufunc->nin != 2 || ufunc->nout != 1) {
PyErr_Format(PyExc_RuntimeError, "ufunc %s is configured "
@@ -614,9 +607,7 @@ PyUFunc_AdditionTypeResolver(PyUFuncObject *ufunc,
{
int type_num1, type_num2;
int i;
- const char *ufunc_name;
-
- ufunc_name = ufunc->name ? ufunc->name : "<unnamed ufunc>";
+ const char *ufunc_name = ufunc_get_name_cstr(ufunc);
type_num1 = PyArray_DESCR(operands[0])->type_num;
type_num2 = PyArray_DESCR(operands[1])->type_num;
@@ -804,9 +795,7 @@ PyUFunc_SubtractionTypeResolver(PyUFuncObject *ufunc,
{
int type_num1, type_num2;
int i;
- const char *ufunc_name;
-
- ufunc_name = ufunc->name ? ufunc->name : "<unnamed ufunc>";
+ const char *ufunc_name = ufunc_get_name_cstr(ufunc);
type_num1 = PyArray_DESCR(operands[0])->type_num;
type_num2 = PyArray_DESCR(operands[1])->type_num;
@@ -986,9 +975,7 @@ PyUFunc_MultiplicationTypeResolver(PyUFuncObject *ufunc,
{
int type_num1, type_num2;
int i;
- const char *ufunc_name;
-
- ufunc_name = ufunc->name ? ufunc->name : "<unnamed ufunc>";
+ const char *ufunc_name = ufunc_get_name_cstr(ufunc);
type_num1 = PyArray_DESCR(operands[0])->type_num;
type_num2 = PyArray_DESCR(operands[1])->type_num;
@@ -1130,9 +1117,7 @@ PyUFunc_DivisionTypeResolver(PyUFuncObject *ufunc,
{
int type_num1, type_num2;
int i;
- const char *ufunc_name;
-
- ufunc_name = ufunc->name ? ufunc->name : "<unnamed ufunc>";
+ const char *ufunc_name = ufunc_get_name_cstr(ufunc);
type_num1 = PyArray_DESCR(operands[0])->type_num;
type_num2 = PyArray_DESCR(operands[1])->type_num;
@@ -1234,19 +1219,63 @@ type_reso_error: {
}
}
+
+/*
+ * True division should return float64 results when both inputs are integer
+ * types. The PyUFunc_DefaultTypeResolver promotes 8 bit integers to float16
+ * and 16 bit integers to float32, so that is overridden here by specifying a
+ * 'dd->d' signature. Returns -1 on failure.
+*/
+NPY_NO_EXPORT int
+PyUFunc_TrueDivisionTypeResolver(PyUFuncObject *ufunc,
+ NPY_CASTING casting,
+ PyArrayObject **operands,
+ PyObject *type_tup,
+ PyArray_Descr **out_dtypes)
+{
+ int type_num1, type_num2;
+ static PyObject *default_type_tup = NULL;
+
+ /* Set default type for integer inputs to NPY_DOUBLE */
+ if (default_type_tup == NULL) {
+ PyArray_Descr *tmp = PyArray_DescrFromType(NPY_DOUBLE);
+
+ if (tmp == NULL) {
+ return -1;
+ }
+ default_type_tup = PyTuple_Pack(3, tmp, tmp, tmp);
+ if (default_type_tup == NULL) {
+ Py_DECREF(tmp);
+ return -1;
+ }
+ Py_DECREF(tmp);
+ }
+
+ type_num1 = PyArray_DESCR(operands[0])->type_num;
+ type_num2 = PyArray_DESCR(operands[1])->type_num;
+
+ if (type_tup == NULL &&
+ (PyTypeNum_ISINTEGER(type_num1) || PyTypeNum_ISBOOL(type_num1)) &&
+ (PyTypeNum_ISINTEGER(type_num2) || PyTypeNum_ISBOOL(type_num2))) {
+ return PyUFunc_DefaultTypeResolver(ufunc, casting, operands,
+ default_type_tup, out_dtypes);
+ }
+ return PyUFunc_DivisionTypeResolver(ufunc, casting, operands,
+ type_tup, out_dtypes);
+}
/*
- * Function to check and report floor division warning when python2.x is
- * invoked with -3 switch
+ * Function to check and report floor division warning when python2.x is
+ * invoked with -3 switch
* See PEP238 and #7949 for numpy
- * This function will not be hit for py3 or when __future__ imports division.
+ * This function will not be hit for py3 or when __future__ imports division.
* See generate_umath.py for reason
*/
NPY_NO_EXPORT int
PyUFunc_MixedDivisionTypeResolver(PyUFuncObject *ufunc,
- NPY_CASTING casting,
- PyArrayObject **operands,
- PyObject *type_tup,
- PyArray_Descr **out_dtypes)
+ NPY_CASTING casting,
+ PyArrayObject **operands,
+ PyObject *type_tup,
+ PyArray_Descr **out_dtypes)
{
/* Depreciation checks needed only on python 2 */
#if !defined(NPY_PY3K)
@@ -1255,17 +1284,15 @@ PyUFunc_MixedDivisionTypeResolver(PyUFuncObject *ufunc,
type_num1 = PyArray_DESCR(operands[0])->type_num;
type_num2 = PyArray_DESCR(operands[1])->type_num;
- /* If both types are integer, warn the user, same as python does */
+ /* If both types are integer, warn the user, same as python does */
if (Py_DivisionWarningFlag &&
- (PyTypeNum_ISINTEGER(type_num1) || PyTypeNum_ISBOOL(type_num1)) &&
- (PyTypeNum_ISINTEGER(type_num2) || PyTypeNum_ISBOOL(type_num2)))
- {
+ (PyTypeNum_ISINTEGER(type_num1) || PyTypeNum_ISBOOL(type_num1)) &&
+ (PyTypeNum_ISINTEGER(type_num2) || PyTypeNum_ISBOOL(type_num2))) {
PyErr_Warn(PyExc_DeprecationWarning, "numpy: classic int division");
- }
-#endif
-
- return PyUFunc_DivisionTypeResolver(ufunc, casting, operands,
- type_tup, out_dtypes);
+ }
+#endif
+ return PyUFunc_DivisionTypeResolver(ufunc, casting, operands,
+ type_tup, out_dtypes);
}
@@ -1305,8 +1332,9 @@ find_userloop(PyUFuncObject *ufunc,
if (obj == NULL) {
continue;
}
- funcdata = (PyUFunc_Loop1d *)NpyCapsule_AsVoidPtr(obj);
- while (funcdata != NULL) {
+ for (funcdata = (PyUFunc_Loop1d *)NpyCapsule_AsVoidPtr(obj);
+ funcdata != NULL;
+ funcdata = funcdata->next) {
int *types = funcdata->arg_types;
for (j = 0; j < nargs; ++j) {
@@ -1320,8 +1348,6 @@ find_userloop(PyUFuncObject *ufunc,
*out_innerloopdata = funcdata->data;
return 1;
}
-
- funcdata = funcdata->next;
}
}
}
@@ -1343,7 +1369,7 @@ PyUFunc_DefaultLegacyInnerLoopSelector(PyUFuncObject *ufunc,
PyObject *errmsg;
int i, j;
- ufunc_name = ufunc->name ? ufunc->name : "(unknown)";
+ ufunc_name = ufunc_get_name_cstr(ufunc);
/*
* If there are user-loops search them first.
@@ -1727,8 +1753,9 @@ linear_search_userloop_type_resolver(PyUFuncObject *self,
if (obj == NULL) {
continue;
}
- funcdata = (PyUFunc_Loop1d *)NpyCapsule_AsVoidPtr(obj);
- while (funcdata != NULL) {
+ for (funcdata = (PyUFunc_Loop1d *)NpyCapsule_AsVoidPtr(obj);
+ funcdata != NULL;
+ funcdata = funcdata->next) {
int *types = funcdata->arg_types;
switch (ufunc_loop_matches(self, op,
input_casting, output_casting,
@@ -1744,8 +1771,6 @@ linear_search_userloop_type_resolver(PyUFuncObject *self,
set_ufunc_loop_data_types(self, op, out_dtype, types, funcdata->arg_dtypes);
return 1;
}
-
- funcdata = funcdata->next;
}
}
}
@@ -1792,8 +1817,10 @@ type_tuple_userloop_type_resolver(PyUFuncObject *self,
if (obj == NULL) {
continue;
}
- funcdata = (PyUFunc_Loop1d *)NpyCapsule_AsVoidPtr(obj);
- while (funcdata != NULL) {
+
+ for (funcdata = (PyUFunc_Loop1d *)NpyCapsule_AsVoidPtr(obj);
+ funcdata != NULL;
+ funcdata = funcdata->next) {
int *types = funcdata->arg_types;
int matched = 1;
@@ -1832,14 +1859,12 @@ type_tuple_userloop_type_resolver(PyUFuncObject *self,
"matching the type-tuple, "
"but the inputs and/or outputs could not be "
"cast according to the casting rule",
- self->name ? self->name : "(unknown)");
+ ufunc_get_name_cstr(self));
return -1;
/* Error */
case -1:
return -1;
}
-
- funcdata = funcdata->next;
}
}
}
@@ -1938,7 +1963,7 @@ linear_search_type_resolver(PyUFuncObject *self,
/* For making a better error message on coercion error */
char err_dst_typecode = '-', err_src_typecode = '-';
- ufunc_name = self->name ? self->name : "(unknown)";
+ ufunc_name = ufunc_get_name_cstr(self);
use_min_scalar = should_use_min_scalar(op, nin);
@@ -2047,7 +2072,7 @@ type_tuple_type_resolver(PyUFuncObject *self,
/* For making a better error message on coercion error */
char err_dst_typecode = '-', err_src_typecode = '-';
- ufunc_name = self->name ? self->name : "(unknown)";
+ ufunc_name = ufunc_get_name_cstr(self);
use_min_scalar = should_use_min_scalar(op, nin);
@@ -2059,7 +2084,7 @@ type_tuple_type_resolver(PyUFuncObject *self,
PyErr_Format(PyExc_ValueError,
"a type-tuple must be specified "
"of length 1 or %d for ufunc '%s'", (int)nop,
- self->name ? self->name : "(unknown)");
+ ufunc_get_name_cstr(self));
return -1;
}
@@ -2112,7 +2137,7 @@ type_tuple_type_resolver(PyUFuncObject *self,
"requires 1 typecode, or "
"%d typecode(s) before " \
"and %d after the -> sign",
- self->name ? self->name : "(unknown)",
+ ufunc_get_name_cstr(self),
self->nin, self->nout);
Py_XDECREF(str_obj);
return -1;
diff --git a/numpy/core/src/umath/ufunc_type_resolution.h b/numpy/core/src/umath/ufunc_type_resolution.h
index eaf5e91ce..fa9f1dbfa 100644
--- a/numpy/core/src/umath/ufunc_type_resolution.h
+++ b/numpy/core/src/umath/ufunc_type_resolution.h
@@ -42,7 +42,7 @@ PyUFunc_AbsoluteTypeResolver(PyUFuncObject *ufunc,
PyArrayObject **operands,
PyObject *type_tup,
PyArray_Descr **out_dtypes);
-
+
NPY_NO_EXPORT int
PyUFunc_IsNaTTypeResolver(PyUFuncObject *ufunc,
NPY_CASTING casting,
@@ -79,6 +79,13 @@ PyUFunc_MixedDivisionTypeResolver(PyUFuncObject *ufunc,
PyArray_Descr **out_dtypes);
NPY_NO_EXPORT int
+PyUFunc_TrueDivisionTypeResolver(PyUFuncObject *ufunc,
+ NPY_CASTING casting,
+ PyArrayObject **operands,
+ PyObject *type_tup,
+ PyArray_Descr **out_dtypes);
+
+NPY_NO_EXPORT int
PyUFunc_DivisionTypeResolver(PyUFuncObject *ufunc,
NPY_CASTING casting,
PyArrayObject **operands,
diff --git a/numpy/core/src/umath/umath_tests.c.src b/numpy/core/src/umath/umath_tests.c.src
index 6cd181897..8d9009a1a 100644
--- a/numpy/core/src/umath/umath_tests.c.src
+++ b/numpy/core/src/umath/umath_tests.c.src
@@ -305,6 +305,12 @@ addUfuncs(PyObject *dictionary) {
0, euclidean_pdist_signature);
PyDict_SetItemString(dictionary, "euclidean_pdist", f);
Py_DECREF(f);
+ f = PyUFunc_FromFuncAndDataAndSignature(inner1d_functions, inner1d_data,
+ inner1d_signatures, 2, 2, 1, PyUFunc_None, "inner1d_no_doc",
+ NULL,
+ 0, inner1d_signature);
+ PyDict_SetItemString(dictionary, "inner1d_no_doc", f);
+ Py_DECREF(f);
}
diff --git a/numpy/core/tests/__init__.py b/numpy/core/tests/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/numpy/core/tests/__init__.py
diff --git a/numpy/core/tests/test_abc.py b/numpy/core/tests/test_abc.py
index 2430866fd..77cf40620 100644
--- a/numpy/core/tests/test_abc.py
+++ b/numpy/core/tests/test_abc.py
@@ -1,43 +1,56 @@
from __future__ import division, absolute_import, print_function
-from numpy.testing import TestCase, assert_, run_module_suite
+from numpy.testing import assert_, run_module_suite
import numbers
+
+import numpy as np
from numpy.core.numerictypes import sctypes
-class ABC(TestCase):
+class TestABC(object):
+ def test_abstract(self):
+ assert_(issubclass(np.number, numbers.Number))
+
+ assert_(issubclass(np.inexact, numbers.Complex))
+ assert_(issubclass(np.complexfloating, numbers.Complex))
+ assert_(issubclass(np.floating, numbers.Real))
+
+ assert_(issubclass(np.integer, numbers.Integral))
+ assert_(issubclass(np.signedinteger, numbers.Integral))
+ assert_(issubclass(np.unsignedinteger, numbers.Integral))
+
def test_floats(self):
for t in sctypes['float']:
- assert_(isinstance(t(), numbers.Real),
+ assert_(isinstance(t(), numbers.Real),
"{0} is not instance of Real".format(t.__name__))
assert_(issubclass(t, numbers.Real),
"{0} is not subclass of Real".format(t.__name__))
- assert_(not isinstance(t(), numbers.Rational),
+ assert_(not isinstance(t(), numbers.Rational),
"{0} is instance of Rational".format(t.__name__))
assert_(not issubclass(t, numbers.Rational),
"{0} is subclass of Rational".format(t.__name__))
def test_complex(self):
for t in sctypes['complex']:
- assert_(isinstance(t(), numbers.Complex),
+ assert_(isinstance(t(), numbers.Complex),
"{0} is not instance of Complex".format(t.__name__))
assert_(issubclass(t, numbers.Complex),
"{0} is not subclass of Complex".format(t.__name__))
- assert_(not isinstance(t(), numbers.Real),
+ assert_(not isinstance(t(), numbers.Real),
"{0} is instance of Real".format(t.__name__))
assert_(not issubclass(t, numbers.Real),
"{0} is subclass of Real".format(t.__name__))
def test_int(self):
for t in sctypes['int']:
- assert_(isinstance(t(), numbers.Integral),
+ assert_(isinstance(t(), numbers.Integral),
"{0} is not instance of Integral".format(t.__name__))
assert_(issubclass(t, numbers.Integral),
"{0} is not subclass of Integral".format(t.__name__))
def test_uint(self):
for t in sctypes['uint']:
- assert_(isinstance(t(), numbers.Integral),
+ assert_(isinstance(t(), numbers.Integral),
"{0} is not instance of Integral".format(t.__name__))
assert_(issubclass(t, numbers.Integral),
"{0} is not subclass of Integral".format(t.__name__))
diff --git a/numpy/core/tests/test_arrayprint.py b/numpy/core/tests/test_arrayprint.py
index e7ac0cdfd..7d4acd35d 100644
--- a/numpy/core/tests/test_arrayprint.py
+++ b/numpy/core/tests/test_arrayprint.py
@@ -5,7 +5,7 @@ import sys
import numpy as np
from numpy.testing import (
- TestCase, run_module_suite, assert_, assert_equal
+ run_module_suite, assert_, assert_equal
)
class TestArrayRepr(object):
@@ -61,7 +61,7 @@ class TestArrayRepr(object):
'array([list([1, 2]), list([3])], dtype=object)')
-class TestComplexArray(TestCase):
+class TestComplexArray(object):
def test_str(self):
rvals = [0, 1, -1, np.inf, -np.inf, np.nan]
cvals = [complex(rp, ip) for rp in rvals for ip in rvals]
@@ -108,19 +108,13 @@ class TestComplexArray(TestCase):
for res, val in zip(actual, wanted):
assert_(res == val)
-class TestArray2String(TestCase):
+class TestArray2String(object):
def test_basic(self):
"""Basic test of array2string."""
a = np.arange(3)
assert_(np.array2string(a) == '[0 1 2]')
assert_(np.array2string(a, max_line_width=4) == '[0 1\n 2]')
- def test_style_keyword(self):
- """This should only apply to 0-D arrays. See #1218."""
- stylestr = np.array2string(np.array(1.5),
- style=lambda x: "Value in 0-D array: " + str(x))
- assert_(stylestr == 'Value in 0-D array: 1.5')
-
def test_format_function(self):
"""Test custom format function for each element in array."""
def _format_function(x):
@@ -189,13 +183,13 @@ class TestArray2String(TestCase):
assert_equal(np.array2string(array_scalar), "( 1., 2.12345679, 3.)")
-class TestPrintOptions:
+class TestPrintOptions(object):
"""Test getting and setting global print options."""
- def setUp(self):
+ def setup(self):
self.oldopts = np.get_printoptions()
- def tearDown(self):
+ def teardown(self):
np.set_printoptions(**self.oldopts)
def test_basic(self):
@@ -242,6 +236,14 @@ class TestPrintOptions:
np.set_printoptions(formatter={'float_kind':None})
assert_equal(repr(x), "array([ 0., 1., 2.])")
+ def test_0d_arrays(self):
+ assert_equal(repr(np.datetime64('2005-02-25')[...]),
+ "array('2005-02-25', dtype='datetime64[D]')")
+
+ x = np.array(1)
+ np.set_printoptions(formatter={'all':lambda x: "test"})
+ assert_equal(repr(x), "array(test)")
+
def test_unicode_object_array():
import sys
if sys.version_info[0] >= 3:
diff --git a/numpy/core/tests/test_datetime.py b/numpy/core/tests/test_datetime.py
index 48afa728d..92a1325bc 100644
--- a/numpy/core/tests/test_datetime.py
+++ b/numpy/core/tests/test_datetime.py
@@ -6,7 +6,7 @@ import numpy
import numpy as np
import datetime
from numpy.testing import (
- TestCase, run_module_suite, assert_, assert_equal, assert_raises,
+ run_module_suite, assert_, assert_equal, assert_raises,
assert_warns, dec, suppress_warnings
)
@@ -18,7 +18,7 @@ except ImportError:
_has_pytz = False
-class TestDateTime(TestCase):
+class TestDateTime(object):
def test_datetime_dtype_creation(self):
for unit in ['Y', 'M', 'W', 'D',
'h', 'm', 's', 'ms', 'us',
@@ -1131,7 +1131,19 @@ class TestDateTime(TestCase):
assert_(np.not_equal(dt_other, dt_nat))
assert_(np.not_equal(td_nat, td_other))
assert_(np.not_equal(td_other, td_nat))
- self.assertEqual(len(sup.log), 0)
+ assert_equal(len(sup.log), 0)
+
+ def test_datetime_futurewarning_once_nat(self):
+ # Test that the futurewarning is only given once per inner loop
+ arr1 = np.array(['NaT', 'NaT', '2000-01-01'] * 2, dtype='M8[s]')
+ arr2 = np.array(['NaT', '2000-01-01', 'NaT'] * 2, dtype='M8[s]')
+ # All except less, because for less it can't be wrong (NaT is min)
+ for op in [np.equal, np.less, np.less_equal,
+ np.greater, np.greater_equal]:
+ with suppress_warnings() as sup:
+ rec = sup.record(FutureWarning, ".*NAT")
+ op(arr1, arr2)
+ assert_(len(rec) == 1, "failed for {}".format(op))
def test_datetime_minmax(self):
# The metadata of the result should become the GCD
@@ -1227,10 +1239,10 @@ class TestDateTime(TestCase):
def test_divisor_conversion_fs(self):
assert_(np.dtype('M8[fs/100]') == np.dtype('M8[10as]'))
- self.assertRaises(ValueError, lambda: np.dtype('M8[3fs/10000]'))
+ assert_raises(ValueError, lambda: np.dtype('M8[3fs/10000]'))
def test_divisor_conversion_as(self):
- self.assertRaises(ValueError, lambda: np.dtype('M8[as/10]'))
+ assert_raises(ValueError, lambda: np.dtype('M8[as/10]'))
def test_string_parser_variants(self):
# Allow space instead of 'T' between date and time
@@ -1947,7 +1959,7 @@ class TestDateTime(TestCase):
assert_raises(ValueError, np.isnat, np.zeros(10, t))
-class TestDateTimeData(TestCase):
+class TestDateTimeData(object):
def test_basic(self):
a = np.array(['1980-03-23'], dtype=np.datetime64)
diff --git a/numpy/core/tests/test_defchararray.py b/numpy/core/tests/test_defchararray.py
index 11d7c3b90..436643899 100644
--- a/numpy/core/tests/test_defchararray.py
+++ b/numpy/core/tests/test_defchararray.py
@@ -5,13 +5,14 @@ import sys
import numpy as np
from numpy.core.multiarray import _vec_string
from numpy.testing import (
- TestCase, run_module_suite, assert_, assert_equal, assert_array_equal
+ run_module_suite, assert_, assert_equal, assert_array_equal, assert_raises,
+ suppress_warnings,
)
kw_unicode_true = {'unicode': True} # make 2to3 work properly
kw_unicode_false = {'unicode': False}
-class TestBasic(TestCase):
+class TestBasic(object):
def test_from_object_array(self):
A = np.array([['abc', 2],
['long ', '0123456789']], dtype='O')
@@ -23,7 +24,7 @@ class TestBasic(TestCase):
def test_from_object_array_unicode(self):
A = np.array([['abc', u'Sigma \u03a3'],
['long ', '0123456789']], dtype='O')
- self.assertRaises(ValueError, np.char.array, (A,))
+ assert_raises(ValueError, np.char.array, (A,))
B = np.char.array(A, **kw_unicode_true)
assert_equal(B.dtype.itemsize, 10 * np.array('a', 'U').dtype.itemsize)
assert_array_equal(B, [['abc', u'Sigma \u03a3'],
@@ -62,7 +63,7 @@ class TestBasic(TestCase):
def fail():
np.char.array(A, **kw_unicode_false)
- self.assertRaises(UnicodeEncodeError, fail)
+ assert_raises(UnicodeEncodeError, fail)
def test_unicode_upconvert(self):
A = np.char.array(['abc'])
@@ -82,59 +83,59 @@ class TestBasic(TestCase):
assert_equal(A.itemsize, 4)
assert_(issubclass(A.dtype.type, np.unicode_))
-class TestVecString(TestCase):
+class TestVecString(object):
def test_non_existent_method(self):
def fail():
_vec_string('a', np.string_, 'bogus')
- self.assertRaises(AttributeError, fail)
+ assert_raises(AttributeError, fail)
def test_non_string_array(self):
def fail():
_vec_string(1, np.string_, 'strip')
- self.assertRaises(TypeError, fail)
+ assert_raises(TypeError, fail)
def test_invalid_args_tuple(self):
def fail():
_vec_string(['a'], np.string_, 'strip', 1)
- self.assertRaises(TypeError, fail)
+ assert_raises(TypeError, fail)
def test_invalid_type_descr(self):
def fail():
_vec_string(['a'], 'BOGUS', 'strip')
- self.assertRaises(TypeError, fail)
+ assert_raises(TypeError, fail)
def test_invalid_function_args(self):
def fail():
_vec_string(['a'], np.string_, 'strip', (1,))
- self.assertRaises(TypeError, fail)
+ assert_raises(TypeError, fail)
def test_invalid_result_type(self):
def fail():
_vec_string(['a'], np.integer, 'strip')
- self.assertRaises(TypeError, fail)
+ assert_raises(TypeError, fail)
def test_broadcast_error(self):
def fail():
_vec_string([['abc', 'def']], np.integer, 'find', (['a', 'd', 'j'],))
- self.assertRaises(ValueError, fail)
+ assert_raises(ValueError, fail)
-class TestWhitespace(TestCase):
- def setUp(self):
+class TestWhitespace(object):
+ def setup(self):
self.A = np.array([['abc ', '123 '],
['789 ', 'xyz ']]).view(np.chararray)
self.B = np.array([['abc', '123'],
@@ -148,16 +149,16 @@ class TestWhitespace(TestCase):
assert_(not np.any(self.A < self.B))
assert_(not np.any(self.A != self.B))
-class TestChar(TestCase):
- def setUp(self):
+class TestChar(object):
+ def setup(self):
self.A = np.array('abc1', dtype='c').view(np.chararray)
def test_it(self):
assert_equal(self.A.shape, (4,))
assert_equal(self.A.upper()[:2].tobytes(), b'AB')
-class TestComparisons(TestCase):
- def setUp(self):
+class TestComparisons(object):
+ def setup(self):
self.A = np.array([['abc', '123'],
['789', 'xyz']]).view(np.chararray)
self.B = np.array([['efg', '123 '],
@@ -184,21 +185,21 @@ class TestComparisons(TestCase):
class TestComparisonsMixed1(TestComparisons):
"""Ticket #1276"""
- def setUp(self):
- TestComparisons.setUp(self)
+ def setup(self):
+ TestComparisons.setup(self)
self.B = np.array([['efg', '123 '],
['051', 'tuv']], np.unicode_).view(np.chararray)
class TestComparisonsMixed2(TestComparisons):
"""Ticket #1276"""
- def setUp(self):
- TestComparisons.setUp(self)
+ def setup(self):
+ TestComparisons.setup(self)
self.A = np.array([['abc', '123'],
['789', 'xyz']], np.unicode_).view(np.chararray)
-class TestInformation(TestCase):
- def setUp(self):
+class TestInformation(object):
+ def setup(self):
self.A = np.array([[' abc ', ''],
['12345', 'MixedCase'],
['123 \t 345 \0 ', 'UPPER']]).view(np.chararray)
@@ -230,7 +231,7 @@ class TestInformation(TestCase):
def fail():
self.A.endswith('3', 'fdjk')
- self.assertRaises(TypeError, fail)
+ assert_raises(TypeError, fail)
def test_find(self):
assert_(issubclass(self.A.find('a').dtype.type, np.integer))
@@ -244,7 +245,7 @@ class TestInformation(TestCase):
def fail():
self.A.index('a')
- self.assertRaises(ValueError, fail)
+ assert_raises(ValueError, fail)
assert_(np.char.index('abcba', 'b') == 1)
assert_(issubclass(np.char.index('abcba', 'b').dtype.type, np.integer))
@@ -288,7 +289,7 @@ class TestInformation(TestCase):
def fail():
self.A.rindex('a')
- self.assertRaises(ValueError, fail)
+ assert_raises(ValueError, fail)
assert_(np.char.rindex('abcba', 'b') == 3)
assert_(issubclass(np.char.rindex('abcba', 'b').dtype.type, np.integer))
@@ -300,11 +301,11 @@ class TestInformation(TestCase):
def fail():
self.A.startswith('3', 'fdjk')
- self.assertRaises(TypeError, fail)
+ assert_raises(TypeError, fail)
-class TestMethods(TestCase):
- def setUp(self):
+class TestMethods(object):
+ def setup(self):
self.A = np.array([[' abc ', ''],
['12345', 'MixedCase'],
['123 \t 345 \0 ', 'UPPER']],
@@ -346,8 +347,11 @@ class TestMethods(TestCase):
A = np.char.array([b'\\u03a3'])
assert_(A.decode('unicode-escape')[0] == '\u03a3')
else:
- A = np.char.array(['736563726574206d657373616765'])
- assert_(A.decode('hex_codec')[0] == 'secret message')
+ with suppress_warnings() as sup:
+ if sys.py3kwarning:
+ sup.filter(DeprecationWarning, "'hex_codec'")
+ A = np.char.array(['736563726574206d657373616765'])
+ assert_(A.decode('hex_codec')[0] == 'secret message')
def test_encode(self):
B = self.B.encode('unicode_escape')
@@ -579,7 +583,7 @@ class TestMethods(TestCase):
def fail():
self.A.isnumeric()
- self.assertRaises(TypeError, fail)
+ assert_raises(TypeError, fail)
assert_(issubclass(self.B.isnumeric().dtype.type, np.bool_))
assert_array_equal(self.B.isnumeric(), [
[False, False], [True, False], [False, False]])
@@ -589,14 +593,14 @@ class TestMethods(TestCase):
def fail():
self.A.isdecimal()
- self.assertRaises(TypeError, fail)
+ assert_raises(TypeError, fail)
assert_(issubclass(self.B.isdecimal().dtype.type, np.bool_))
assert_array_equal(self.B.isdecimal(), [
[False, False], [True, False], [False, False]])
-class TestOperations(TestCase):
- def setUp(self):
+class TestOperations(object):
+ def setup(self):
self.A = np.array([['abc', '123'],
['789', 'xyz']]).view(np.chararray)
self.B = np.array([['efg', '456'],
diff --git a/numpy/core/tests/test_deprecations.py b/numpy/core/tests/test_deprecations.py
index 0ce7465fb..e3e8c32f9 100644
--- a/numpy/core/tests/test_deprecations.py
+++ b/numpy/core/tests/test_deprecations.py
@@ -28,7 +28,7 @@ class _DeprecationTestCase(object):
message = ''
warning_cls = DeprecationWarning
- def setUp(self):
+ def setup(self):
self.warn_ctx = warnings.catch_warnings(record=True)
self.log = self.warn_ctx.__enter__()
@@ -42,7 +42,7 @@ class _DeprecationTestCase(object):
warnings.filterwarnings("always", message=self.message,
category=self.warning_cls)
- def tearDown(self):
+ def teardown(self):
self.warn_ctx.__exit__()
def assert_deprecated(self, function, num=1, ignore_others=False,
@@ -259,7 +259,7 @@ class TestNonCContiguousViewDeprecation(_DeprecationTestCase):
"""
def test_fortran_contiguous(self):
- self.assert_deprecated(np.ones((2,2)).T.view, args=(np.complex,))
+ self.assert_deprecated(np.ones((2,2)).T.view, args=(complex,))
self.assert_deprecated(np.ones((2,2)).T.view, args=(np.int8,))
@@ -376,20 +376,10 @@ class TestNumericStyleTypecodes(_DeprecationTestCase):
args=(dt,))
-class TestAccumulateKeepDims(_DeprecationTestCase):
- """
- Deprecate the keepdims argument to np.ufunc.accumulate, which was never used or documented
- """
- def test_keepdims(self):
- with warnings.catch_warnings():
- warnings.filterwarnings('always', '', FutureWarning)
- assert_warns(FutureWarning, np.add.accumulate, [1], keepdims=True)
-
-
class TestTestDeprecated(object):
def test_assert_deprecated(self):
test_case_instance = _DeprecationTestCase()
- test_case_instance.setUp()
+ test_case_instance.setup()
assert_raises(AssertionError,
test_case_instance.assert_deprecated,
lambda: None)
@@ -398,7 +388,7 @@ class TestTestDeprecated(object):
warnings.warn("foo", category=DeprecationWarning, stacklevel=2)
test_case_instance.assert_deprecated(foo)
- test_case_instance.tearDown()
+ test_case_instance.teardown()
class TestClassicIntDivision(_DeprecationTestCase):
@@ -444,5 +434,21 @@ class TestNPY_CHAR(_DeprecationTestCase):
assert_(npy_char_deprecation() == 'S1')
+class TestDatetimeEvent(_DeprecationTestCase):
+ # 2017-08-11, 1.14.0
+ def test_3_tuple(self):
+ for cls in (np.datetime64, np.timedelta64):
+ # two valid uses - (unit, num) and (unit, num, den, None)
+ self.assert_not_deprecated(cls, args=(1, ('ms', 2)))
+ self.assert_not_deprecated(cls, args=(1, ('ms', 2, 1, None)))
+
+ # trying to use the event argument, removed in 1.7.0, is deprecated
+ # it used to be a uint8
+ self.assert_deprecated(cls, args=(1, ('ms', 2, 'event')))
+ self.assert_deprecated(cls, args=(1, ('ms', 2, 63)))
+ self.assert_deprecated(cls, args=(1, ('ms', 2, 1, 'event')))
+ self.assert_deprecated(cls, args=(1, ('ms', 2, 1, 63)))
+
+
if __name__ == "__main__":
run_module_suite()
diff --git a/numpy/core/tests/test_dtype.py b/numpy/core/tests/test_dtype.py
index 452cbd4bd..9cefb2ad1 100644
--- a/numpy/core/tests/test_dtype.py
+++ b/numpy/core/tests/test_dtype.py
@@ -1,11 +1,12 @@
from __future__ import division, absolute_import, print_function
+import pickle
import sys
import numpy as np
from numpy.core.test_rational import rational
from numpy.testing import (
- TestCase, run_module_suite, assert_, assert_equal, assert_raises,
+ run_module_suite, assert_, assert_equal, assert_raises,
dec
)
@@ -19,10 +20,10 @@ def assert_dtype_not_equal(a, b):
assert_(hash(a) != hash(b),
"two different types hash to the same value !")
-class TestBuiltin(TestCase):
+class TestBuiltin(object):
def test_run(self):
"""Only test hash runs at all."""
- for t in [np.int, np.float, np.complex, np.int32, np.str, np.object,
+ for t in [int, float, complex, np.int32, str, object,
np.unicode]:
dt = np.dtype(t)
hash(dt)
@@ -30,12 +31,12 @@ class TestBuiltin(TestCase):
def test_dtype(self):
# Make sure equivalent byte order char hash the same (e.g. < and = on
# little endian)
- for t in [np.int, np.float]:
+ for t in [int, float]:
dt = np.dtype(t)
dt2 = dt.newbyteorder("<")
dt3 = dt.newbyteorder(">")
if dt == dt2:
- self.assertTrue(dt.byteorder != dt2.byteorder, "bogus test")
+ assert_(dt.byteorder != dt2.byteorder, "bogus test")
assert_dtype_equal(dt, dt2)
else:
self.assertTrue(dt.byteorder != dt3.byteorder, "bogus test")
@@ -50,8 +51,8 @@ class TestBuiltin(TestCase):
else:
left = uintp
right = np.dtype(np.ulonglong)
- self.assertTrue(left == right)
- self.assertTrue(hash(left) == hash(right))
+ assert_(left == right)
+ assert_(hash(left) == hash(right))
def test_invalid_types(self):
# Make sure invalid type strings raise an error
@@ -103,17 +104,26 @@ class TestBuiltin(TestCase):
'formats':['i1', 'f4'],
'offsets':[0, 2]}, align=True)
-class TestRecord(TestCase):
+ def test_field_order_equality(self):
+ x = np.dtype({'names': ['A', 'B'],
+ 'formats': ['i4', 'f4'],
+ 'offsets': [0, 4]})
+ y = np.dtype({'names': ['B', 'A'],
+ 'formats': ['f4', 'i4'],
+ 'offsets': [4, 0]})
+ assert_equal(x == y, False)
+
+class TestRecord(object):
def test_equivalent_record(self):
"""Test whether equivalent record dtypes hash the same."""
- a = np.dtype([('yo', np.int)])
- b = np.dtype([('yo', np.int)])
+ a = np.dtype([('yo', int)])
+ b = np.dtype([('yo', int)])
assert_dtype_equal(a, b)
def test_different_names(self):
# In theory, they may hash the same (collision) ?
- a = np.dtype([('yo', np.int)])
- b = np.dtype([('ye', np.int)])
+ a = np.dtype([('yo', int)])
+ b = np.dtype([('ye', int)])
assert_dtype_not_equal(a, b)
def test_different_titles(self):
@@ -128,9 +138,9 @@ class TestRecord(TestCase):
def test_mutate(self):
# Mutating a dtype should reset the cached hash value
- a = np.dtype([('yo', np.int)])
- b = np.dtype([('yo', np.int)])
- c = np.dtype([('ye', np.int)])
+ a = np.dtype([('yo', int)])
+ b = np.dtype([('yo', int)])
+ c = np.dtype([('ye', int)])
assert_dtype_equal(a, b)
assert_dtype_not_equal(a, c)
a.names = ['ye']
@@ -145,10 +155,10 @@ class TestRecord(TestCase):
"""Test if an appropriate exception is raised when passing bad values to
the dtype constructor.
"""
- self.assertRaises(TypeError, np.dtype,
- dict(names=set(['A', 'B']), formats=['f8', 'i4']))
- self.assertRaises(TypeError, np.dtype,
- dict(names=['A', 'B'], formats=set(['f8', 'i4'])))
+ assert_raises(TypeError, np.dtype,
+ dict(names=set(['A', 'B']), formats=['f8', 'i4']))
+ assert_raises(TypeError, np.dtype,
+ dict(names=['A', 'B'], formats=set(['f8', 'i4'])))
def test_aligned_size(self):
# Check that structured dtypes get padded to an aligned size
@@ -210,11 +220,12 @@ class TestRecord(TestCase):
dt = np.dtype({'names':['f0', 'f1', 'f2'], 'formats':['<u4', '<u2', '<u2'],
'offsets':[4, 0, 2]}, align=True)
assert_equal(dt.itemsize, 8)
+ # field name should not matter: assignment is by position
dt2 = np.dtype({'names':['f2', 'f0', 'f1'],
- 'formats':['<u2', '<u4', '<u2'],
- 'offsets':[2, 4, 0]}, align=True)
+ 'formats':['<u4', '<u2', '<u2'],
+ 'offsets':[4, 0, 2]}, align=True)
vals = [(0, 1, 2), (3, -1, 4)]
- vals2 = [(2, 0, 1), (4, 3, -1)]
+ vals2 = [(0, 1, 2), (3, -1, 4)]
a = np.array(vals, dt)
b = np.array(vals2, dt2)
assert_equal(a.astype(dt2), b)
@@ -275,9 +286,9 @@ class TestRecord(TestCase):
def test_nonint_offsets(self):
# gh-8059
def make_dtype(off):
- return np.dtype({'names': ['A'], 'formats': ['i4'],
+ return np.dtype({'names': ['A'], 'formats': ['i4'],
'offsets': [off]})
-
+
assert_raises(TypeError, make_dtype, 'ASD')
assert_raises(OverflowError, make_dtype, 2**70)
assert_raises(TypeError, make_dtype, 2.3)
@@ -288,10 +299,10 @@ class TestRecord(TestCase):
np.zeros(1, dtype=dt)[0].item()
-class TestSubarray(TestCase):
+class TestSubarray(object):
def test_single_subarray(self):
- a = np.dtype((np.int, (2)))
- b = np.dtype((np.int, (2,)))
+ a = np.dtype((int, (2)))
+ b = np.dtype((int, (2,)))
assert_dtype_equal(a, b)
assert_equal(type(a.subdtype[1]), tuple)
@@ -299,29 +310,29 @@ class TestSubarray(TestCase):
def test_equivalent_record(self):
"""Test whether equivalent subarray dtypes hash the same."""
- a = np.dtype((np.int, (2, 3)))
- b = np.dtype((np.int, (2, 3)))
+ a = np.dtype((int, (2, 3)))
+ b = np.dtype((int, (2, 3)))
assert_dtype_equal(a, b)
def test_nonequivalent_record(self):
"""Test whether different subarray dtypes hash differently."""
- a = np.dtype((np.int, (2, 3)))
- b = np.dtype((np.int, (3, 2)))
+ a = np.dtype((int, (2, 3)))
+ b = np.dtype((int, (3, 2)))
assert_dtype_not_equal(a, b)
- a = np.dtype((np.int, (2, 3)))
- b = np.dtype((np.int, (2, 2)))
+ a = np.dtype((int, (2, 3)))
+ b = np.dtype((int, (2, 2)))
assert_dtype_not_equal(a, b)
- a = np.dtype((np.int, (1, 2, 3)))
- b = np.dtype((np.int, (1, 2)))
+ a = np.dtype((int, (1, 2, 3)))
+ b = np.dtype((int, (1, 2)))
assert_dtype_not_equal(a, b)
def test_shape_equal(self):
"""Test some data types that are equal"""
assert_dtype_equal(np.dtype('f8'), np.dtype(('f8', tuple())))
assert_dtype_equal(np.dtype('f8'), np.dtype(('f8', 1)))
- assert_dtype_equal(np.dtype((np.int, 2)), np.dtype((np.int, (2,))))
+ assert_dtype_equal(np.dtype((int, 2)), np.dtype((int, (2,))))
assert_dtype_equal(np.dtype(('<f4', (3, 2))), np.dtype(('<f4', (3, 2))))
d = ([('a', 'f4', (1, 2)), ('b', 'f8', (3, 1))], (3, 2))
assert_dtype_equal(np.dtype(d), np.dtype(d))
@@ -414,47 +425,47 @@ class TestSubarray(TestCase):
assert_equal(t1.alignment, t2.alignment)
-class TestMonsterType(TestCase):
+class TestMonsterType(object):
"""Test deeply nested subtypes."""
def test1(self):
simple1 = np.dtype({'names': ['r', 'b'], 'formats': ['u1', 'u1'],
'titles': ['Red pixel', 'Blue pixel']})
- a = np.dtype([('yo', np.int), ('ye', simple1),
- ('yi', np.dtype((np.int, (3, 2))))])
- b = np.dtype([('yo', np.int), ('ye', simple1),
- ('yi', np.dtype((np.int, (3, 2))))])
+ a = np.dtype([('yo', int), ('ye', simple1),
+ ('yi', np.dtype((int, (3, 2))))])
+ b = np.dtype([('yo', int), ('ye', simple1),
+ ('yi', np.dtype((int, (3, 2))))])
assert_dtype_equal(a, b)
- c = np.dtype([('yo', np.int), ('ye', simple1),
+ c = np.dtype([('yo', int), ('ye', simple1),
('yi', np.dtype((a, (3, 2))))])
- d = np.dtype([('yo', np.int), ('ye', simple1),
+ d = np.dtype([('yo', int), ('ye', simple1),
('yi', np.dtype((a, (3, 2))))])
assert_dtype_equal(c, d)
-class TestMetadata(TestCase):
+class TestMetadata(object):
def test_no_metadata(self):
d = np.dtype(int)
- self.assertEqual(d.metadata, None)
+ assert_(d.metadata is None)
def test_metadata_takes_dict(self):
d = np.dtype(int, metadata={'datum': 1})
- self.assertEqual(d.metadata, {'datum': 1})
+ assert_(d.metadata == {'datum': 1})
def test_metadata_rejects_nondict(self):
- self.assertRaises(TypeError, np.dtype, int, metadata='datum')
- self.assertRaises(TypeError, np.dtype, int, metadata=1)
- self.assertRaises(TypeError, np.dtype, int, metadata=None)
+ assert_raises(TypeError, np.dtype, int, metadata='datum')
+ assert_raises(TypeError, np.dtype, int, metadata=1)
+ assert_raises(TypeError, np.dtype, int, metadata=None)
def test_nested_metadata(self):
d = np.dtype([('a', np.dtype(int, metadata={'datum': 1}))])
- self.assertEqual(d['a'].metadata, {'datum': 1})
+ assert_(d['a'].metadata == {'datum': 1})
- def base_metadata_copied(self):
+ def test_base_metadata_copied(self):
d = np.dtype((np.void, np.dtype('i4,i4', metadata={'datum': 1})))
- assert_equal(d.metadata, {'datum': 1})
+ assert_(d.metadata == {'datum': 1})
-class TestString(TestCase):
+class TestString(object):
def test_complex_dtype_str(self):
dt = np.dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)),
('rtile', '>f4', (64, 36))], (3,)),
@@ -581,7 +592,7 @@ class TestString(TestCase):
# Pull request #4722
np.array(["", ""]).astype(object)
-class TestDtypeAttributeDeletion(TestCase):
+class TestDtypeAttributeDeletion(object):
def test_dtype_non_writable_attributes_deletion(self):
dt = np.dtype(np.double)
@@ -599,7 +610,7 @@ class TestDtypeAttributeDeletion(TestCase):
assert_raises(AttributeError, delattr, dt, s)
-class TestDtypeAttributes(TestCase):
+class TestDtypeAttributes(object):
def test_descr_has_trailing_void(self):
# see gh-6359
dtype = np.dtype({
@@ -624,6 +635,59 @@ class TestDtypeAttributes(TestCase):
assert_equal(np.dtype(user_def_subcls).name, 'user_def_subcls')
+class TestPickling(object):
+
+ def check_pickling(self, dtype):
+ for proto in range(pickle.HIGHEST_PROTOCOL + 1):
+ pickled = pickle.loads(pickle.dumps(dtype, proto))
+ assert_equal(pickled, dtype)
+ assert_equal(pickled.descr, dtype.descr)
+ if dtype.metadata is not None:
+ assert_equal(pickled.metadata, dtype.metadata)
+ # Check the reconstructed dtype is functional
+ x = np.zeros(3, dtype=dtype)
+ y = np.zeros(3, dtype=pickled)
+ assert_equal(x, y)
+ assert_equal(x[0], y[0])
+
+ def test_builtin(self):
+ for t in [int, float, complex, np.int32, str, object,
+ np.unicode, bool]:
+ self.check_pickling(np.dtype(t))
+
+ def test_structured(self):
+ dt = np.dtype(([('a', '>f4', (2, 1)), ('b', '<f8', (1, 3))], (2, 2)))
+ self.check_pickling(dt)
+ dt = np.dtype('i4, i1', align=True)
+ self.check_pickling(dt)
+ dt = np.dtype('i4, i1', align=False)
+ self.check_pickling(dt)
+ dt = np.dtype({
+ 'names': ['A', 'B'],
+ 'formats': ['f4', 'f4'],
+ 'offsets': [0, 8],
+ 'itemsize': 16})
+ self.check_pickling(dt)
+ dt = np.dtype({'names': ['r', 'b'],
+ 'formats': ['u1', 'u1'],
+ 'titles': ['Red pixel', 'Blue pixel']})
+ self.check_pickling(dt)
+
+ def test_datetime(self):
+ for base in ['m8', 'M8']:
+ for unit in ['', 'Y', 'M', 'W', 'D', 'h', 'm', 's', 'ms',
+ 'us', 'ns', 'ps', 'fs', 'as']:
+ dt = np.dtype('%s[%s]' % (base, unit) if unit else base)
+ self.check_pickling(dt)
+ if unit:
+ dt = np.dtype('%s[7%s]' % (base, unit))
+ self.check_pickling(dt)
+
+ def test_metadata(self):
+ dt = np.dtype(int, metadata={'datum': 1})
+ self.check_pickling(dt)
+
+
def test_rational_dtype():
# test for bug gh-5719
a = np.array([1111], dtype=rational).astype
diff --git a/numpy/core/tests/test_einsum.py b/numpy/core/tests/test_einsum.py
index 8466d924e..7cc9f67ef 100644
--- a/numpy/core/tests/test_einsum.py
+++ b/numpy/core/tests/test_einsum.py
@@ -2,7 +2,7 @@ from __future__ import division, absolute_import, print_function
import numpy as np
from numpy.testing import (
- TestCase, run_module_suite, assert_, assert_equal, assert_array_equal,
+ run_module_suite, assert_, assert_equal, assert_array_equal,
assert_almost_equal, assert_raises, suppress_warnings
)
@@ -14,7 +14,7 @@ for size, char in zip(sizes, chars):
global_size_dict[char] = size
-class TestEinSum(TestCase):
+class TestEinSum(object):
def test_einsum_errors(self):
for do_opt in [True, False]:
# Need enough arguments
@@ -568,48 +568,37 @@ class TestEinSum(TestCase):
A = np.arange(2 * 3 * 4).reshape(2, 3, 4)
B = np.arange(3)
- ref = np.einsum('ijk,j->ijk', A, B)
- assert_equal(np.einsum('ij...,j...->ij...', A, B), ref)
- assert_equal(np.einsum('ij...,...j->ij...', A, B), ref)
- assert_equal(np.einsum('ij...,j->ij...', A, B), ref) # used to raise error
-
- assert_equal(np.einsum('ij...,j...->ij...', A, B, optimize=True), ref)
- assert_equal(np.einsum('ij...,...j->ij...', A, B, optimize=True), ref)
- assert_equal(np.einsum('ij...,j->ij...', A, B, optimize=True), ref) # used to raise error
+ ref = np.einsum('ijk,j->ijk', A, B, optimize=False)
+ for opt in [True, False]:
+ assert_equal(np.einsum('ij...,j...->ij...', A, B, optimize=opt), ref)
+ assert_equal(np.einsum('ij...,...j->ij...', A, B, optimize=opt), ref)
+ assert_equal(np.einsum('ij...,j->ij...', A, B, optimize=opt), ref) # used to raise error
A = np.arange(12).reshape((4, 3))
B = np.arange(6).reshape((3, 2))
- ref = np.einsum('ik,kj->ij', A, B)
- assert_equal(np.einsum('ik...,k...->i...', A, B), ref)
- assert_equal(np.einsum('ik...,...kj->i...j', A, B), ref)
- assert_equal(np.einsum('...k,kj', A, B), ref) # used to raise error
- assert_equal(np.einsum('ik,k...->i...', A, B), ref) # used to raise error
-
- assert_equal(np.einsum('ik...,k...->i...', A, B, optimize=True), ref)
- assert_equal(np.einsum('ik...,...kj->i...j', A, B, optimize=True), ref)
- assert_equal(np.einsum('...k,kj', A, B, optimize=True), ref) # used to raise error
- assert_equal(np.einsum('ik,k...->i...', A, B, optimize=True), ref) # used to raise error
+ ref = np.einsum('ik,kj->ij', A, B, optimize=False)
+ for opt in [True, False]:
+ assert_equal(np.einsum('ik...,k...->i...', A, B, optimize=opt), ref)
+ assert_equal(np.einsum('ik...,...kj->i...j', A, B, optimize=opt), ref)
+ assert_equal(np.einsum('...k,kj', A, B, optimize=opt), ref) # used to raise error
+ assert_equal(np.einsum('ik,k...->i...', A, B, optimize=opt), ref) # used to raise error
dims = [2, 3, 4, 5]
a = np.arange(np.prod(dims)).reshape(dims)
v = np.arange(dims[2])
- ref = np.einsum('ijkl,k->ijl', a, v)
- assert_equal(np.einsum('ijkl,k', a, v), ref)
- assert_equal(np.einsum('...kl,k', a, v), ref) # used to raise error
- assert_equal(np.einsum('...kl,k...', a, v), ref)
- # no real diff from 1st
-
- assert_equal(np.einsum('ijkl,k', a, v, optimize=True), ref)
- assert_equal(np.einsum('...kl,k', a, v, optimize=True), ref) # used to raise error
- assert_equal(np.einsum('...kl,k...', a, v, optimize=True), ref)
+ ref = np.einsum('ijkl,k->ijl', a, v, optimize=False)
+ for opt in [True, False]:
+ assert_equal(np.einsum('ijkl,k', a, v, optimize=opt), ref)
+ assert_equal(np.einsum('...kl,k', a, v, optimize=opt), ref) # used to raise error
+ assert_equal(np.einsum('...kl,k...', a, v, optimize=opt), ref)
J, K, M = 160, 160, 120
A = np.arange(J * K * M).reshape(1, 1, 1, J, K, M)
B = np.arange(J * K * M * 3).reshape(J, K, M, 3)
- ref = np.einsum('...lmn,...lmno->...o', A, B)
- assert_equal(np.einsum('...lmn,lmno->...o', A, B), ref) # used to raise error
- assert_equal(np.einsum('...lmn,lmno->...o', A, B,
- optimize=True), ref) # used to raise error
+ ref = np.einsum('...lmn,...lmno->...o', A, B, optimize=False)
+ for opt in [True, False]:
+ assert_equal(np.einsum('...lmn,lmno->...o', A, B,
+ optimize=opt), ref) # used to raise error
def test_einsum_fixedstridebug(self):
# Issue #4485 obscure einsum bug
@@ -777,7 +766,7 @@ class TestEinSum(TestCase):
self.optimize_compare('aef,fbc,dca->bde')
-class TestEinSumPath(TestCase):
+class TestEinSumPath(object):
def build_operands(self, string):
# Builds views based off initial operands
diff --git a/numpy/core/tests/test_errstate.py b/numpy/core/tests/test_errstate.py
index 7fc749a7e..ae06af7fd 100644
--- a/numpy/core/tests/test_errstate.py
+++ b/numpy/core/tests/test_errstate.py
@@ -3,10 +3,10 @@ from __future__ import division, absolute_import, print_function
import platform
import numpy as np
-from numpy.testing import TestCase, assert_, run_module_suite, dec
+from numpy.testing import assert_, run_module_suite, dec
-class TestErrstate(TestCase):
+class TestErrstate(object):
@dec.skipif(platform.machine() == "armv5tel", "See gh-413.")
def test_invalid(self):
with np.errstate(all='raise', under='ignore'):
diff --git a/numpy/core/tests/test_extint128.py b/numpy/core/tests/test_extint128.py
index 755ee2c04..d87585dcf 100644
--- a/numpy/core/tests/test_extint128.py
+++ b/numpy/core/tests/test_extint128.py
@@ -59,7 +59,7 @@ def exc_iter(*args):
try:
yield iterate()
- except:
+ except Exception:
import traceback
msg = "At: %r\n%s" % (repr(value[0]),
traceback.format_exc())
diff --git a/numpy/core/tests/test_function_base.py b/numpy/core/tests/test_function_base.py
index 94c55bdd1..bffe5237a 100644
--- a/numpy/core/tests/test_function_base.py
+++ b/numpy/core/tests/test_function_base.py
@@ -3,7 +3,7 @@ from __future__ import division, absolute_import, print_function
from numpy import (logspace, linspace, geomspace, dtype, array, sctypes,
arange, isnan, ndarray, sqrt, nextafter)
from numpy.testing import (
- TestCase, run_module_suite, assert_, assert_equal, assert_raises,
+ run_module_suite, assert_, assert_equal, assert_raises,
assert_array_equal, assert_allclose, suppress_warnings
)
@@ -40,7 +40,7 @@ class PhysicalQuantity2(ndarray):
__array_priority__ = 10
-class TestLogspace(TestCase):
+class TestLogspace(object):
def test_basic(self):
y = logspace(0, 6)
@@ -76,7 +76,7 @@ class TestLogspace(TestCase):
assert_equal(ls, logspace(1.0, 7.0, 1))
-class TestGeomspace(TestCase):
+class TestGeomspace(object):
def test_basic(self):
y = geomspace(1, 1e6)
@@ -191,7 +191,7 @@ class TestGeomspace(TestCase):
assert_raises(ValueError, geomspace, 0, 0)
-class TestLinspace(TestCase):
+class TestLinspace(object):
def test_basic(self):
y = linspace(0, 10)
diff --git a/numpy/core/tests/test_getlimits.py b/numpy/core/tests/test_getlimits.py
index 4adb80f7f..455f5257c 100644
--- a/numpy/core/tests/test_getlimits.py
+++ b/numpy/core/tests/test_getlimits.py
@@ -7,44 +7,44 @@ import numpy as np
from numpy.core import finfo, iinfo
from numpy import half, single, double, longdouble
from numpy.testing import (
- TestCase, run_module_suite, assert_equal, assert_
+ run_module_suite, assert_equal, assert_, assert_raises
)
from numpy.core.getlimits import (_discovered_machar, _float16_ma, _float32_ma,
_float64_ma, _float128_ma, _float80_ma)
##################################################
-class TestPythonFloat(TestCase):
+class TestPythonFloat(object):
def test_singleton(self):
ftype = finfo(float)
ftype2 = finfo(float)
assert_equal(id(ftype), id(ftype2))
-class TestHalf(TestCase):
+class TestHalf(object):
def test_singleton(self):
ftype = finfo(half)
ftype2 = finfo(half)
assert_equal(id(ftype), id(ftype2))
-class TestSingle(TestCase):
+class TestSingle(object):
def test_singleton(self):
ftype = finfo(single)
ftype2 = finfo(single)
assert_equal(id(ftype), id(ftype2))
-class TestDouble(TestCase):
+class TestDouble(object):
def test_singleton(self):
ftype = finfo(double)
ftype2 = finfo(double)
assert_equal(id(ftype), id(ftype2))
-class TestLongdouble(TestCase):
- def test_singleton(self,level=2):
+class TestLongdouble(object):
+ def test_singleton(self):
ftype = finfo(longdouble)
ftype2 = finfo(longdouble)
assert_equal(id(ftype), id(ftype2))
-class TestFinfo(TestCase):
+class TestFinfo(object):
def test_basic(self):
dts = list(zip(['f2', 'f4', 'f8', 'c8', 'c16'],
[np.float16, np.float32, np.float64, np.complex64,
@@ -55,9 +55,9 @@ class TestFinfo(TestCase):
'nmant', 'precision', 'resolution', 'tiny'):
assert_equal(getattr(finfo(dt1), attr),
getattr(finfo(dt2), attr), attr)
- self.assertRaises(ValueError, finfo, 'i4')
+ assert_raises(ValueError, finfo, 'i4')
-class TestIinfo(TestCase):
+class TestIinfo(object):
def test_basic(self):
dts = list(zip(['i1', 'i2', 'i4', 'i8',
'u1', 'u2', 'u4', 'u8'],
@@ -67,14 +67,14 @@ class TestIinfo(TestCase):
for attr in ('bits', 'min', 'max'):
assert_equal(getattr(iinfo(dt1), attr),
getattr(iinfo(dt2), attr), attr)
- self.assertRaises(ValueError, iinfo, 'f4')
+ assert_raises(ValueError, iinfo, 'f4')
def test_unsigned_max(self):
types = np.sctypes['uint']
for T in types:
assert_equal(iinfo(T).max, T(-1))
-class TestRepr(TestCase):
+class TestRepr(object):
def test_iinfo_repr(self):
expected = "iinfo(min=-32768, max=32767, dtype=int16)"
assert_equal(repr(np.iinfo(np.int16)), expected)
diff --git a/numpy/core/tests/test_half.py b/numpy/core/tests/test_half.py
index 7a4d36333..813cf9572 100644
--- a/numpy/core/tests/test_half.py
+++ b/numpy/core/tests/test_half.py
@@ -4,8 +4,7 @@ import platform
import numpy as np
from numpy import uint16, float16, float32, float64
-from numpy.testing import TestCase, run_module_suite, assert_, assert_equal, \
- dec
+from numpy.testing import run_module_suite, assert_, assert_equal, dec
def assert_raises_fpe(strmatch, callable, *args, **kwargs):
@@ -18,8 +17,8 @@ def assert_raises_fpe(strmatch, callable, *args, **kwargs):
assert_(False,
"Did not raise floating point %s error" % strmatch)
-class TestHalf(TestCase):
- def setUp(self):
+class TestHalf(object):
+ def setup(self):
# An array of all possible float16 values
self.all_f16 = np.arange(0x10000, dtype=uint16)
self.all_f16.dtype = float16
@@ -66,7 +65,7 @@ class TestHalf(TestCase):
# Check the range for which all integers can be represented
i_int = np.arange(-2048, 2049)
i_f16 = np.array(i_int, dtype=float16)
- j = np.array(i_f16, dtype=np.int)
+ j = np.array(i_f16, dtype=int)
assert_equal(i_int, j)
def test_nans_infs(self):
diff --git a/numpy/core/tests/test_indexerrors.py b/numpy/core/tests/test_indexerrors.py
index e6b6be361..50919ffec 100644
--- a/numpy/core/tests/test_indexerrors.py
+++ b/numpy/core/tests/test_indexerrors.py
@@ -1,9 +1,9 @@
from __future__ import division, absolute_import, print_function
import numpy as np
-from numpy.testing import TestCase, run_module_suite, assert_raises
+from numpy.testing import run_module_suite, assert_raises
-class TestIndexErrors(TestCase):
+class TestIndexErrors(object):
'''Tests to exercise indexerrors not covered by other tests.'''
def test_arraytypes_fasttake(self):
diff --git a/numpy/core/tests/test_indexing.py b/numpy/core/tests/test_indexing.py
index 55eeb694a..4c3bac529 100644
--- a/numpy/core/tests/test_indexing.py
+++ b/numpy/core/tests/test_indexing.py
@@ -9,7 +9,7 @@ import numpy as np
from numpy.core.multiarray_tests import array_indexing
from itertools import product
from numpy.testing import (
- TestCase, run_module_suite, assert_, assert_equal, assert_raises,
+ run_module_suite, assert_, assert_equal, assert_raises,
assert_array_equal, assert_warns, HAS_REFCOUNT
)
@@ -28,7 +28,7 @@ except ImportError:
_HAS_CTYPE = False
-class TestIndexing(TestCase):
+class TestIndexing(object):
def test_index_no_floats(self):
a = np.array([[[5]]])
@@ -106,6 +106,12 @@ class TestIndexing(TestCase):
a = np.array(0)
assert_(isinstance(a[()], np.int_))
+ def test_void_scalar_empty_tuple(self):
+ s = np.zeros((), dtype='V4')
+ assert_equal(s[()].dtype, s.dtype)
+ assert_equal(s[()], s)
+ assert_equal(type(s[...]), np.ndarray)
+
def test_same_kind_index_casting(self):
# Indexes should be cast with same-kind and not safe, even if that
# is somewhat unsafe. So test various different code paths.
@@ -511,7 +517,7 @@ class TestIndexing(TestCase):
arr[slices] = 10
assert_array_equal(arr, 10.)
-class TestFieldIndexing(TestCase):
+class TestFieldIndexing(object):
def test_scalar_return_type(self):
# Field access on an array should return an array, even if it
# is 0-d.
@@ -520,7 +526,7 @@ class TestFieldIndexing(TestCase):
assert_(isinstance(a[['a']], np.ndarray))
-class TestBroadcastedAssignments(TestCase):
+class TestBroadcastedAssignments(object):
def assign(self, a, ind, val):
a[ind] = val
return a
@@ -571,7 +577,7 @@ class TestBroadcastedAssignments(TestCase):
assert_((a[::-1] == v).all())
-class TestSubclasses(TestCase):
+class TestSubclasses(object):
def test_basic(self):
class SubClass(np.ndarray):
pass
@@ -616,7 +622,7 @@ class TestSubclasses(TestCase):
assert_array_equal(new_s.finalize_status, new_s)
assert_array_equal(new_s.old, s)
-class TestFancyIndexingCast(TestCase):
+class TestFancyIndexingCast(object):
def test_boolean_index_cast_assign(self):
# Setup the boolean index and float arrays.
shape = (8, 63)
@@ -638,7 +644,7 @@ class TestFancyIndexingCast(TestCase):
zero_array.__setitem__, bool_index, np.array([1j]))
assert_equal(zero_array[0, 1], 0)
-class TestFancyIndexingEquivalence(TestCase):
+class TestFancyIndexingEquivalence(object):
def test_object_assign(self):
# Check that the field and object special case using copyto is active.
# The right hand side cannot be converted to an array here.
@@ -686,7 +692,7 @@ class TestFancyIndexingEquivalence(TestCase):
assert_array_equal(a, b[0])
-class TestMultiIndexingAutomated(TestCase):
+class TestMultiIndexingAutomated(object):
"""
These tests use code to mimic the C-Code indexing for selection.
@@ -708,7 +714,7 @@ class TestMultiIndexingAutomated(TestCase):
"""
- def setUp(self):
+ def setup(self):
self.a = np.arange(np.prod([3, 1, 5, 6])).reshape(3, 1, 5, 6)
self.b = np.empty((3, 0, 5, 6))
self.complex_indices = ['skip', Ellipsis,
@@ -847,7 +853,7 @@ class TestMultiIndexingAutomated(TestCase):
try:
flat_indx = np.ravel_multi_index(np.nonzero(indx),
arr.shape[ax:ax+indx.ndim], mode='raise')
- except:
+ except Exception:
error_unless_broadcast_to_empty = True
# fill with 0s instead, and raise error later
flat_indx = np.array([0]*indx.sum(), dtype=np.intp)
@@ -946,7 +952,7 @@ class TestMultiIndexingAutomated(TestCase):
try:
mi = np.ravel_multi_index(indx[1:], orig_slice,
mode='raise')
- except:
+ except Exception:
# This happens with 0-sized orig_slice (sometimes?)
# here it is a ValueError, but indexing gives a:
raise IndexError('invalid index into 0-sized')
@@ -1103,7 +1109,7 @@ class TestMultiIndexingAutomated(TestCase):
for index in self.complex_indices:
self._check_single_index(a, index)
-class TestFloatNonIntegerArgument(TestCase):
+class TestFloatNonIntegerArgument(object):
"""
These test that ``TypeError`` is raised when you try to use
non-integers as arguments to for indexing and slicing e.g. ``a[0.0:5]``
@@ -1158,7 +1164,7 @@ class TestFloatNonIntegerArgument(TestCase):
assert_raises(TypeError, np.min, d, (.2, 1.2))
-class TestBooleanIndexing(TestCase):
+class TestBooleanIndexing(object):
# Using a boolean as integer argument/indexing is an error.
def test_bool_as_int_argument_errors(self):
a = np.array([[[1]]])
@@ -1178,7 +1184,7 @@ class TestBooleanIndexing(TestCase):
assert_raises(IndexError, lambda: a[False, [0, 1], ...])
-class TestArrayToIndexDeprecation(TestCase):
+class TestArrayToIndexDeprecation(object):
"""Creating an an index from array not 0-D is an error.
"""
@@ -1191,7 +1197,7 @@ class TestArrayToIndexDeprecation(TestCase):
assert_raises(TypeError, np.take, a, [0], a)
-class TestNonIntegerArrayLike(TestCase):
+class TestNonIntegerArrayLike(object):
"""Tests that array_likes only valid if can safely cast to integer.
For instance, lists give IndexError when they cannot be safely cast to
@@ -1208,7 +1214,7 @@ class TestNonIntegerArrayLike(TestCase):
a.__getitem__([])
-class TestMultipleEllipsisError(TestCase):
+class TestMultipleEllipsisError(object):
"""An index can only have a single ellipsis.
"""
@@ -1219,7 +1225,7 @@ class TestMultipleEllipsisError(TestCase):
assert_raises(IndexError, a.__getitem__, ((Ellipsis,) * 3,))
-class TestCApiAccess(TestCase):
+class TestCApiAccess(object):
def test_getitem(self):
subscript = functools.partial(array_indexing, 0)
diff --git a/numpy/core/tests/test_item_selection.py b/numpy/core/tests/test_item_selection.py
index 1eb09f1e0..a0a458ca5 100644
--- a/numpy/core/tests/test_item_selection.py
+++ b/numpy/core/tests/test_item_selection.py
@@ -4,12 +4,12 @@ import sys
import numpy as np
from numpy.testing import (
- TestCase, run_module_suite, assert_, assert_raises,
+ run_module_suite, assert_, assert_raises,
assert_array_equal, HAS_REFCOUNT
)
-class TestTake(TestCase):
+class TestTake(object):
def test_simple(self):
a = [[1, 2], [3, 4]]
a_str = [[b'1', b'2'], [b'3', b'4']]
@@ -24,7 +24,7 @@ class TestTake(TestCase):
# Currently all types but object, use the same function generation.
# So it should not be necessary to test all. However test also a non
# refcounted struct on top of object.
- types = np.int, np.object, np.dtype([('', 'i', 2)])
+ types = int, object, np.dtype([('', 'i', 2)])
for t in types:
# ta works, even if the array may be odd if buffer interface is used
ta = np.array(a if np.issubdtype(t, np.number) else a_str, dtype=t)
diff --git a/numpy/core/tests/test_longdouble.py b/numpy/core/tests/test_longdouble.py
index eda52c90a..625d40c1b 100644
--- a/numpy/core/tests/test_longdouble.py
+++ b/numpy/core/tests/test_longdouble.py
@@ -5,9 +5,9 @@ import locale
import numpy as np
from numpy.testing import (
run_module_suite, assert_, assert_equal, dec, assert_raises,
- assert_array_equal, TestCase, temppath,
+ assert_array_equal, temppath,
)
-from test_print import in_foreign_locale
+from .test_print import in_foreign_locale
LD_INFO = np.finfo(np.longdouble)
longdouble_longer_than_double = (LD_INFO.eps < np.finfo(np.double).eps)
@@ -110,7 +110,7 @@ def test_fromstring_missing():
np.array([1]))
-class FileBased(TestCase):
+class TestFileBased(object):
ldbl = 1 + LD_INFO.eps
tgt = np.array([ldbl]*5)
diff --git a/numpy/core/tests/test_machar.py b/numpy/core/tests/test_machar.py
index 765b38ae0..7acb02eef 100644
--- a/numpy/core/tests/test_machar.py
+++ b/numpy/core/tests/test_machar.py
@@ -1,11 +1,16 @@
+"""
+Test machar. Given recent changes to hardcode type data, we might want to get
+rid of both MachAr and this test at some point.
+
+"""
from __future__ import division, absolute_import, print_function
from numpy.core.machar import MachAr
import numpy.core.numerictypes as ntypes
from numpy import errstate, array
-from numpy.testing import TestCase, run_module_suite
+from numpy.testing import run_module_suite
-class TestMachAr(TestCase):
+class TestMachAr(object):
def _run_machar_highprec(self):
# Instantiate MachAr instance with high enough precision to cause
# underflow
@@ -13,6 +18,7 @@ class TestMachAr(TestCase):
hiprec = ntypes.float96
MachAr(lambda v:array([v], hiprec))
except AttributeError:
+ # Fixme, this needs to raise a 'skip' exception.
"Skipping test: no ntypes.float96 available on this platform."
def test_underlow(self):
@@ -22,7 +28,8 @@ class TestMachAr(TestCase):
try:
self._run_machar_highprec()
except FloatingPointError as e:
- self.fail("Caught %s exception, should not have been raised." % e)
+ msg = "Caught %s exception, should not have been raised." % e
+ raise AssertionError(msg)
if __name__ == "__main__":
diff --git a/numpy/core/tests/test_memmap.py b/numpy/core/tests/test_memmap.py
index c0c352230..1cd09ab21 100644
--- a/numpy/core/tests/test_memmap.py
+++ b/numpy/core/tests/test_memmap.py
@@ -12,12 +12,12 @@ from numpy.compat import Path
from numpy import arange, allclose, asarray
from numpy.testing import (
- TestCase, run_module_suite, assert_, assert_equal, assert_array_equal,
+ run_module_suite, assert_, assert_equal, assert_array_equal,
dec, suppress_warnings
)
-class TestMemmap(TestCase):
- def setUp(self):
+class TestMemmap(object):
+ def setup(self):
self.tmpfp = NamedTemporaryFile(prefix='mmap')
self.tempdir = mkdtemp()
self.shape = (3, 4)
@@ -25,7 +25,7 @@ class TestMemmap(TestCase):
self.data = arange(12, dtype=self.dtype)
self.data.resize(self.shape)
- def tearDown(self):
+ def teardown(self):
self.tmpfp.close()
shutil.rmtree(self.tempdir)
@@ -41,7 +41,7 @@ class TestMemmap(TestCase):
shape=self.shape)
assert_(allclose(self.data, newfp))
assert_array_equal(self.data, newfp)
- self.assertEqual(newfp.flags.writeable, False)
+ assert_equal(newfp.flags.writeable, False)
def test_open_with_filename(self):
tmpname = mktemp('', 'mmap', dir=self.tempdir)
@@ -60,8 +60,8 @@ class TestMemmap(TestCase):
mode = "w+"
fp = memmap(self.tmpfp, dtype=self.dtype, mode=mode,
shape=self.shape, offset=offset)
- self.assertEqual(offset, fp.offset)
- self.assertEqual(mode, fp.mode)
+ assert_equal(offset, fp.offset)
+ assert_equal(mode, fp.mode)
del fp
def test_filename(self):
@@ -70,9 +70,9 @@ class TestMemmap(TestCase):
shape=self.shape)
abspath = os.path.abspath(tmpname)
fp[:] = self.data[:]
- self.assertEqual(abspath, fp.filename)
+ assert_equal(abspath, fp.filename)
b = fp[:1]
- self.assertEqual(abspath, b.filename)
+ assert_equal(abspath, b.filename)
del b
del fp
@@ -83,16 +83,16 @@ class TestMemmap(TestCase):
shape=self.shape)
abspath = os.path.realpath(os.path.abspath(tmpname))
fp[:] = self.data[:]
- self.assertEqual(abspath, str(fp.filename.resolve()))
+ assert_equal(abspath, str(fp.filename.resolve()))
b = fp[:1]
- self.assertEqual(abspath, str(b.filename.resolve()))
+ assert_equal(abspath, str(b.filename.resolve()))
del b
del fp
def test_filename_fileobj(self):
fp = memmap(self.tmpfp, dtype=self.dtype, mode="w+",
shape=self.shape)
- self.assertEqual(fp.filename, self.tmpfp.name)
+ assert_equal(fp.filename, self.tmpfp.name)
@dec.knownfailureif(sys.platform == 'gnu0', "This test is known to fail on hurd")
def test_flush(self):
diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py
index 835d03528..bbdf4dbfa 100644
--- a/numpy/core/tests/test_multiarray.py
+++ b/numpy/core/tests/test_multiarray.py
@@ -22,21 +22,21 @@ from decimal import Decimal
import numpy as np
from numpy.compat import strchar, unicode
-from test_print import in_foreign_locale
+from .test_print import in_foreign_locale
from numpy.core.multiarray_tests import (
test_neighborhood_iterator, test_neighborhood_iterator_oob,
test_pydatamem_seteventhook_start, test_pydatamem_seteventhook_end,
test_inplace_increment, get_buffer_info, test_as_c_array,
)
from numpy.testing import (
- TestCase, run_module_suite, assert_, assert_raises, assert_warns,
- assert_equal, assert_almost_equal, assert_array_equal,
+ run_module_suite, assert_, assert_raises, assert_warns,
+ assert_equal, assert_almost_equal, assert_array_equal, assert_raises_regex,
assert_array_almost_equal, assert_allclose, IS_PYPY, HAS_REFCOUNT,
assert_array_less, runstring, dec, SkipTest, temppath, suppress_warnings
)
# Need to test an object that does not fully implement math interface
-from datetime import timedelta
+from datetime import timedelta, datetime
if sys.version_info[:2] > (3, 2):
@@ -74,15 +74,15 @@ def _aligned_zeros(shape, dtype=float, order="C", align=None):
return data
-class TestFlags(TestCase):
- def setUp(self):
+class TestFlags(object):
+ def setup(self):
self.a = np.arange(10)
def test_writeable(self):
mydict = locals()
self.a.flags.writeable = False
- self.assertRaises(ValueError, runstring, 'self.a[0] = 3', mydict)
- self.assertRaises(ValueError, runstring, 'self.a[0:1].itemset(3)', mydict)
+ assert_raises(ValueError, runstring, 'self.a[0] = 3', mydict)
+ assert_raises(ValueError, runstring, 'self.a[0:1].itemset(3)', mydict)
self.a.flags.writeable = True
self.a[0] = 5
self.a[0] = 0
@@ -110,7 +110,7 @@ class TestFlags(TestCase):
assert_(a.flags.aligned)
-class TestHash(TestCase):
+class TestHash(object):
# see #3793
def test_int(self):
for st, ut, s in [(np.int8, np.uint8, 8),
@@ -132,8 +132,8 @@ class TestHash(TestCase):
err_msg="%r: 2**%d - 1" % (ut, i))
-class TestAttributes(TestCase):
- def setUp(self):
+class TestAttributes(object):
+ def setup(self):
self.one = np.arange(10)
self.two = np.arange(20).reshape(4, 5)
self.three = np.arange(60, dtype=np.float64).reshape(2, 5, 6)
@@ -164,7 +164,7 @@ class TestAttributes(TestCase):
assert_equal(self.three.dtype, np.dtype(np.float_))
assert_equal(self.one.dtype.char, 'l')
assert_equal(self.three.dtype.char, 'd')
- self.assertTrue(self.three.dtype.str[0] in '<>')
+ assert_(self.three.dtype.str[0] in '<>')
assert_equal(self.one.dtype.str[1], 'i')
assert_equal(self.three.dtype.str[1], 'f')
@@ -194,12 +194,12 @@ class TestAttributes(TestCase):
strides=strides*x.itemsize)
assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1]))
- self.assertRaises(ValueError, make_array, 4, 4, -2)
- self.assertRaises(ValueError, make_array, 4, 2, -1)
- self.assertRaises(ValueError, make_array, 8, 3, 1)
+ assert_raises(ValueError, make_array, 4, 4, -2)
+ assert_raises(ValueError, make_array, 4, 2, -1)
+ assert_raises(ValueError, make_array, 8, 3, 1)
assert_equal(make_array(8, 3, 0), np.array([3]*8))
# Check behavior reported in gh-2503:
- self.assertRaises(ValueError, make_array, (2, 3), 5, np.array([-2, -3]))
+ assert_raises(ValueError, make_array, (2, 3), 5, np.array([-2, -3]))
make_array(0, 0, 10)
def test_set_stridesattr(self):
@@ -216,9 +216,9 @@ class TestAttributes(TestCase):
assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1]))
assert_equal(make_array(7, 3, 1), np.array([3, 4, 5, 6, 7, 8, 9]))
- self.assertRaises(ValueError, make_array, 4, 4, -2)
- self.assertRaises(ValueError, make_array, 4, 2, -1)
- self.assertRaises(RuntimeError, make_array, 8, 3, 1)
+ assert_raises(ValueError, make_array, 4, 4, -2)
+ assert_raises(ValueError, make_array, 4, 2, -1)
+ assert_raises(RuntimeError, make_array, 8, 3, 1)
# Check that the true extent of the array is used.
# Test relies on as_strided base not exposing a buffer.
x = np.lib.stride_tricks.as_strided(np.arange(1), (10, 10), (0, 0))
@@ -226,12 +226,12 @@ class TestAttributes(TestCase):
def set_strides(arr, strides):
arr.strides = strides
- self.assertRaises(ValueError, set_strides, x, (10*x.itemsize, x.itemsize))
+ assert_raises(ValueError, set_strides, x, (10*x.itemsize, x.itemsize))
# Test for offset calculations:
x = np.lib.stride_tricks.as_strided(np.arange(10, dtype=np.int8)[-1],
shape=(10,), strides=(-1,))
- self.assertRaises(ValueError, set_strides, x[::-1], -1)
+ assert_raises(ValueError, set_strides, x[::-1], -1)
a = x[::-1]
a.strides = 1
a[::2].strides = 2
@@ -265,7 +265,7 @@ class TestAttributes(TestCase):
assert_array_equal(x['b'], [-2, -2])
-class TestArrayConstruction(TestCase):
+class TestArrayConstruction(object):
def test_array(self):
d = np.ones(6)
r = np.array([d, d])
@@ -297,7 +297,7 @@ class TestArrayConstruction(TestCase):
assert_equal(r[0], [d, d + 1])
assert_equal(r[1], d + 2)
- tgt = np.ones((2, 3), dtype=np.bool)
+ tgt = np.ones((2, 3), dtype=bool)
tgt[0, 2] = False
tgt[1, 0:2] = False
r = np.array([[True, True, False], [False, False, True]])
@@ -343,7 +343,7 @@ class TestArrayConstruction(TestCase):
assert_(np.asfortranarray(d).flags.f_contiguous)
-class TestAssignment(TestCase):
+class TestAssignment(object):
def test_assignment_broadcasting(self):
a = np.arange(6).reshape(2, 3)
@@ -449,7 +449,7 @@ class TestAssignment(TestCase):
assert_equal(arr[0], tinya)
-class TestDtypedescr(TestCase):
+class TestDtypedescr(object):
def test_construction(self):
d1 = np.dtype('i4')
assert_equal(d1, np.dtype(np.int32))
@@ -457,48 +457,48 @@ class TestDtypedescr(TestCase):
assert_equal(d2, np.dtype(np.float64))
def test_byteorders(self):
- self.assertNotEqual(np.dtype('<i4'), np.dtype('>i4'))
- self.assertNotEqual(np.dtype([('a', '<i4')]), np.dtype([('a', '>i4')]))
+ assert_(np.dtype('<i4') != np.dtype('>i4'))
+ assert_(np.dtype([('a', '<i4')]) != np.dtype([('a', '>i4')]))
-class TestZeroRank(TestCase):
- def setUp(self):
+class TestZeroRank(object):
+ def setup(self):
self.d = np.array(0), np.array('x', object)
def test_ellipsis_subscript(self):
a, b = self.d
- self.assertEqual(a[...], 0)
- self.assertEqual(b[...], 'x')
- self.assertTrue(a[...].base is a) # `a[...] is a` in numpy <1.9.
- self.assertTrue(b[...].base is b) # `b[...] is b` in numpy <1.9.
+ assert_equal(a[...], 0)
+ assert_equal(b[...], 'x')
+ assert_(a[...].base is a) # `a[...] is a` in numpy <1.9.
+ assert_(b[...].base is b) # `b[...] is b` in numpy <1.9.
def test_empty_subscript(self):
a, b = self.d
- self.assertEqual(a[()], 0)
- self.assertEqual(b[()], 'x')
- self.assertTrue(type(a[()]) is a.dtype.type)
- self.assertTrue(type(b[()]) is str)
+ assert_equal(a[()], 0)
+ assert_equal(b[()], 'x')
+ assert_(type(a[()]) is a.dtype.type)
+ assert_(type(b[()]) is str)
def test_invalid_subscript(self):
a, b = self.d
- self.assertRaises(IndexError, lambda x: x[0], a)
- self.assertRaises(IndexError, lambda x: x[0], b)
- self.assertRaises(IndexError, lambda x: x[np.array([], int)], a)
- self.assertRaises(IndexError, lambda x: x[np.array([], int)], b)
+ assert_raises(IndexError, lambda x: x[0], a)
+ assert_raises(IndexError, lambda x: x[0], b)
+ assert_raises(IndexError, lambda x: x[np.array([], int)], a)
+ assert_raises(IndexError, lambda x: x[np.array([], int)], b)
def test_ellipsis_subscript_assignment(self):
a, b = self.d
a[...] = 42
- self.assertEqual(a, 42)
+ assert_equal(a, 42)
b[...] = ''
- self.assertEqual(b.item(), '')
+ assert_equal(b.item(), '')
def test_empty_subscript_assignment(self):
a, b = self.d
a[()] = 42
- self.assertEqual(a, 42)
+ assert_equal(a, 42)
b[()] = ''
- self.assertEqual(b.item(), '')
+ assert_equal(b.item(), '')
def test_invalid_subscript_assignment(self):
a, b = self.d
@@ -506,20 +506,20 @@ class TestZeroRank(TestCase):
def assign(x, i, v):
x[i] = v
- self.assertRaises(IndexError, assign, a, 0, 42)
- self.assertRaises(IndexError, assign, b, 0, '')
- self.assertRaises(ValueError, assign, a, (), '')
+ assert_raises(IndexError, assign, a, 0, 42)
+ assert_raises(IndexError, assign, b, 0, '')
+ assert_raises(ValueError, assign, a, (), '')
def test_newaxis(self):
a, b = self.d
- self.assertEqual(a[np.newaxis].shape, (1,))
- self.assertEqual(a[..., np.newaxis].shape, (1,))
- self.assertEqual(a[np.newaxis, ...].shape, (1,))
- self.assertEqual(a[..., np.newaxis].shape, (1,))
- self.assertEqual(a[np.newaxis, ..., np.newaxis].shape, (1, 1))
- self.assertEqual(a[..., np.newaxis, np.newaxis].shape, (1, 1))
- self.assertEqual(a[np.newaxis, np.newaxis, ...].shape, (1, 1))
- self.assertEqual(a[(np.newaxis,)*10].shape, (1,)*10)
+ assert_equal(a[np.newaxis].shape, (1,))
+ assert_equal(a[..., np.newaxis].shape, (1,))
+ assert_equal(a[np.newaxis, ...].shape, (1,))
+ assert_equal(a[..., np.newaxis].shape, (1,))
+ assert_equal(a[np.newaxis, ..., np.newaxis].shape, (1, 1))
+ assert_equal(a[..., np.newaxis, np.newaxis].shape, (1, 1))
+ assert_equal(a[np.newaxis, np.newaxis, ...].shape, (1, 1))
+ assert_equal(a[(np.newaxis,)*10].shape, (1,)*10)
def test_invalid_newaxis(self):
a, b = self.d
@@ -527,40 +527,40 @@ class TestZeroRank(TestCase):
def subscript(x, i):
x[i]
- self.assertRaises(IndexError, subscript, a, (np.newaxis, 0))
- self.assertRaises(IndexError, subscript, a, (np.newaxis,)*50)
+ assert_raises(IndexError, subscript, a, (np.newaxis, 0))
+ assert_raises(IndexError, subscript, a, (np.newaxis,)*50)
def test_constructor(self):
x = np.ndarray(())
x[()] = 5
- self.assertEqual(x[()], 5)
+ assert_equal(x[()], 5)
y = np.ndarray((), buffer=x)
y[()] = 6
- self.assertEqual(x[()], 6)
+ assert_equal(x[()], 6)
def test_output(self):
x = np.array(2)
- self.assertRaises(ValueError, np.add, x, [1], x)
+ assert_raises(ValueError, np.add, x, [1], x)
-class TestScalarIndexing(TestCase):
- def setUp(self):
+class TestScalarIndexing(object):
+ def setup(self):
self.d = np.array([0, 1])[0]
def test_ellipsis_subscript(self):
a = self.d
- self.assertEqual(a[...], 0)
- self.assertEqual(a[...].shape, ())
+ assert_equal(a[...], 0)
+ assert_equal(a[...].shape, ())
def test_empty_subscript(self):
a = self.d
- self.assertEqual(a[()], 0)
- self.assertEqual(a[()].shape, ())
+ assert_equal(a[()], 0)
+ assert_equal(a[()].shape, ())
def test_invalid_subscript(self):
a = self.d
- self.assertRaises(IndexError, lambda x: x[0], a)
- self.assertRaises(IndexError, lambda x: x[np.array([], int)], a)
+ assert_raises(IndexError, lambda x: x[0], a)
+ assert_raises(IndexError, lambda x: x[np.array([], int)], a)
def test_invalid_subscript_assignment(self):
a = self.d
@@ -568,18 +568,18 @@ class TestScalarIndexing(TestCase):
def assign(x, i, v):
x[i] = v
- self.assertRaises(TypeError, assign, a, 0, 42)
+ assert_raises(TypeError, assign, a, 0, 42)
def test_newaxis(self):
a = self.d
- self.assertEqual(a[np.newaxis].shape, (1,))
- self.assertEqual(a[..., np.newaxis].shape, (1,))
- self.assertEqual(a[np.newaxis, ...].shape, (1,))
- self.assertEqual(a[..., np.newaxis].shape, (1,))
- self.assertEqual(a[np.newaxis, ..., np.newaxis].shape, (1, 1))
- self.assertEqual(a[..., np.newaxis, np.newaxis].shape, (1, 1))
- self.assertEqual(a[np.newaxis, np.newaxis, ...].shape, (1, 1))
- self.assertEqual(a[(np.newaxis,)*10].shape, (1,)*10)
+ assert_equal(a[np.newaxis].shape, (1,))
+ assert_equal(a[..., np.newaxis].shape, (1,))
+ assert_equal(a[np.newaxis, ...].shape, (1,))
+ assert_equal(a[..., np.newaxis].shape, (1,))
+ assert_equal(a[np.newaxis, ..., np.newaxis].shape, (1, 1))
+ assert_equal(a[..., np.newaxis, np.newaxis].shape, (1, 1))
+ assert_equal(a[np.newaxis, np.newaxis, ...].shape, (1, 1))
+ assert_equal(a[(np.newaxis,)*10].shape, (1,)*10)
def test_invalid_newaxis(self):
a = self.d
@@ -587,8 +587,8 @@ class TestScalarIndexing(TestCase):
def subscript(x, i):
x[i]
- self.assertRaises(IndexError, subscript, a, (np.newaxis, 0))
- self.assertRaises(IndexError, subscript, a, (np.newaxis,)*50)
+ assert_raises(IndexError, subscript, a, (np.newaxis, 0))
+ assert_raises(IndexError, subscript, a, (np.newaxis,)*50)
def test_overlapping_assignment(self):
# With positive strides
@@ -639,13 +639,13 @@ class TestScalarIndexing(TestCase):
assert_equal(a, [0, 1, 0, 1, 2])
-class TestCreation(TestCase):
+class TestCreation(object):
def test_from_attribute(self):
class x(object):
def __array__(self, dtype=None):
pass
- self.assertRaises(ValueError, np.array, x())
+ assert_raises(ValueError, np.array, x())
def test_from_string(self):
types = np.typecodes['AllInteger'] + np.typecodes['Float']
@@ -759,20 +759,20 @@ class TestCreation(TestCase):
str(d)
def test_sequence_non_homogenous(self):
- assert_equal(np.array([4, 2**80]).dtype, np.object)
- assert_equal(np.array([4, 2**80, 4]).dtype, np.object)
- assert_equal(np.array([2**80, 4]).dtype, np.object)
- assert_equal(np.array([2**80] * 3).dtype, np.object)
- assert_equal(np.array([[1, 1],[1j, 1j]]).dtype, np.complex)
- assert_equal(np.array([[1j, 1j],[1, 1]]).dtype, np.complex)
- assert_equal(np.array([[1, 1, 1],[1, 1j, 1.], [1, 1, 1]]).dtype, np.complex)
+ assert_equal(np.array([4, 2**80]).dtype, object)
+ assert_equal(np.array([4, 2**80, 4]).dtype, object)
+ assert_equal(np.array([2**80, 4]).dtype, object)
+ assert_equal(np.array([2**80] * 3).dtype, object)
+ assert_equal(np.array([[1, 1],[1j, 1j]]).dtype, complex)
+ assert_equal(np.array([[1j, 1j],[1, 1]]).dtype, complex)
+ assert_equal(np.array([[1, 1, 1],[1, 1j, 1.], [1, 1, 1]]).dtype, complex)
@dec.skipif(sys.version_info[0] >= 3)
def test_sequence_long(self):
assert_equal(np.array([long(4), long(4)]).dtype, np.long)
- assert_equal(np.array([long(4), 2**80]).dtype, np.object)
- assert_equal(np.array([long(4), 2**80, long(4)]).dtype, np.object)
- assert_equal(np.array([2**80, long(4)]).dtype, np.object)
+ assert_equal(np.array([long(4), 2**80]).dtype, object)
+ assert_equal(np.array([long(4), 2**80, long(4)]).dtype, object)
+ assert_equal(np.array([2**80, long(4)]).dtype, object)
def test_non_sequence_sequence(self):
"""Should not segfault.
@@ -856,7 +856,7 @@ class TestCreation(TestCase):
shape=(max_bytes//itemsize + 1,), dtype=dtype)
-class TestStructured(TestCase):
+class TestStructured(object):
def test_subarray_field_access(self):
a = np.zeros((3, 5), dtype=[('a', ('i4', (2, 2)))])
a['a'] = np.arange(60).reshape(3, 5, 2, 2)
@@ -876,7 +876,7 @@ class TestStructured(TestCase):
# multi-dimensional field types work properly
a = np.rec.fromrecords(
[([1, 2, 3], 'a', [[1, 2], [3, 4]]), ([3, 3, 3], 'b', [[0, 0], [0, 0]])],
- dtype=[('a', ('f4', 3)), ('b', np.object), ('c', ('i4', (2, 2)))])
+ dtype=[('a', ('f4', 3)), ('b', object), ('c', ('i4', (2, 2)))])
b = a.copy()
assert_equal(a == b, [True, True])
assert_equal(a != b, [False, False])
@@ -952,16 +952,13 @@ class TestStructured(TestCase):
# Check that equality comparison works on structured arrays if
# they are 'equiv'-castable
a = np.array([(5, 42), (10, 1)], dtype=[('a', '>i4'), ('b', '<f8')])
- b = np.array([(42, 5), (1, 10)], dtype=[('b', '>f8'), ('a', '<i4')])
+ b = np.array([(5, 42), (10, 1)], dtype=[('a', '<i4'), ('b', '>f8')])
assert_(np.can_cast(a.dtype, b.dtype, casting='equiv'))
assert_equal(a == b, [True, True])
- # Check that 'equiv' casting can reorder fields and change byte
- # order
- # New in 1.12: This behavior changes in 1.13, test for dep warning
+ # Check that 'equiv' casting can change byte order
assert_(np.can_cast(a.dtype, b.dtype, casting='equiv'))
- with assert_warns(FutureWarning):
- c = a.astype(b.dtype, casting='equiv')
+ c = a.astype(b.dtype, casting='equiv')
assert_equal(a == c, [True, True])
# Check that 'safe' casting can change byte order and up-cast
@@ -1096,20 +1093,68 @@ class TestStructured(TestCase):
b = a[0]
assert_(b.base is a)
-
-class TestBool(TestCase):
+ def test_assignment(self):
+ def testassign(arr, v):
+ c = arr.copy()
+ c[0] = v # assign using setitem
+ c[1:] = v # assign using "dtype_transfer" code paths
+ return c
+
+ dt = np.dtype([('foo', 'i8'), ('bar', 'i8')])
+ arr = np.ones(2, dt)
+ v1 = np.array([(2,3)], dtype=[('foo', 'i8'), ('bar', 'i8')])
+ v2 = np.array([(2,3)], dtype=[('bar', 'i8'), ('foo', 'i8')])
+ v3 = np.array([(2,3)], dtype=[('bar', 'i8'), ('baz', 'i8')])
+ v4 = np.array([(2,)], dtype=[('bar', 'i8')])
+ v5 = np.array([(2,3)], dtype=[('foo', 'f8'), ('bar', 'f8')])
+ w = arr.view({'names': ['bar'], 'formats': ['i8'], 'offsets': [8]})
+
+ ans = np.array([(2,3),(2,3)], dtype=dt)
+ assert_equal(testassign(arr, v1), ans)
+ assert_equal(testassign(arr, v2), ans)
+ assert_equal(testassign(arr, v3), ans)
+ assert_raises(ValueError, lambda: testassign(arr, v4))
+ assert_equal(testassign(arr, v5), ans)
+ w[:] = 4
+ assert_equal(arr, np.array([(1,4),(1,4)], dtype=dt))
+
+ # test field-reordering, assignment by position, and self-assignment
+ a = np.array([(1,2,3)],
+ dtype=[('foo', 'i8'), ('bar', 'i8'), ('baz', 'f4')])
+ a[['foo', 'bar']] = a[['bar', 'foo']]
+ assert_equal(a[0].item(), (2,1,3))
+
+ # test that this works even for 'simple_unaligned' structs
+ # (ie, that PyArray_EquivTypes cares about field order too)
+ a = np.array([(1,2)], dtype=[('a', 'i4'), ('b', 'i4')])
+ a[['a', 'b']] = a[['b', 'a']]
+ assert_equal(a[0].item(), (2,1))
+
+ def test_structuredscalar_indexing(self):
+ # test gh-7262
+ x = np.empty(shape=1, dtype="(2)3S,(2)3U")
+ assert_equal(x[["f0","f1"]][0], x[0][["f0","f1"]])
+ assert_equal(x[0], x[0][()])
+
+ def test_multiindex_titles(self):
+ a = np.zeros(4, dtype=[(('a', 'b'), 'i'), ('c', 'i'), ('d', 'i')])
+ assert_raises(KeyError, lambda : a[['a','c']])
+ assert_raises(KeyError, lambda : a[['b','b']])
+ a[['b','c']] # no exception
+
+class TestBool(object):
def test_test_interning(self):
a0 = np.bool_(0)
b0 = np.bool_(False)
- self.assertTrue(a0 is b0)
+ assert_(a0 is b0)
a1 = np.bool_(1)
b1 = np.bool_(True)
- self.assertTrue(a1 is b1)
- self.assertTrue(np.array([True])[0] is a1)
- self.assertTrue(np.array(True)[()] is a1)
+ assert_(a1 is b1)
+ assert_(np.array([True])[0] is a1)
+ assert_(np.array(True)[()] is a1)
def test_sum(self):
- d = np.ones(101, dtype=np.bool)
+ d = np.ones(101, dtype=bool)
assert_equal(d.sum(), d.size)
assert_equal(d[::2].sum(), d[::2].size)
assert_equal(d[::-2].sum(), d[::-2].size)
@@ -1123,16 +1168,16 @@ class TestBool(TestCase):
powers = [2 ** i for i in range(length)]
for i in range(2**power):
l = [(i & x) != 0 for x in powers]
- a = np.array(l, dtype=np.bool)
+ a = np.array(l, dtype=bool)
c = builtins.sum(l)
- self.assertEqual(np.count_nonzero(a), c)
+ assert_equal(np.count_nonzero(a), c)
av = a.view(np.uint8)
av *= 3
- self.assertEqual(np.count_nonzero(a), c)
+ assert_equal(np.count_nonzero(a), c)
av *= 4
- self.assertEqual(np.count_nonzero(a), c)
+ assert_equal(np.count_nonzero(a), c)
av[av != 0] = 0xFF
- self.assertEqual(np.count_nonzero(a), c)
+ assert_equal(np.count_nonzero(a), c)
def test_count_nonzero(self):
# check all 12 bit combinations in a length 17 array
@@ -1148,15 +1193,15 @@ class TestBool(TestCase):
def test_count_nonzero_unaligned(self):
# prevent mistakes as e.g. gh-4060
for o in range(7):
- a = np.zeros((18,), dtype=np.bool)[o+1:]
+ a = np.zeros((18,), dtype=bool)[o+1:]
a[:o] = True
- self.assertEqual(np.count_nonzero(a), builtins.sum(a.tolist()))
- a = np.ones((18,), dtype=np.bool)[o+1:]
+ assert_equal(np.count_nonzero(a), builtins.sum(a.tolist()))
+ a = np.ones((18,), dtype=bool)[o+1:]
a[:o] = False
- self.assertEqual(np.count_nonzero(a), builtins.sum(a.tolist()))
+ assert_equal(np.count_nonzero(a), builtins.sum(a.tolist()))
-class TestMethods(TestCase):
+class TestMethods(object):
def test_compress(self):
tgt = [[5, 6, 7, 8, 9]]
arr = np.arange(10).reshape(2, 5)
@@ -1201,8 +1246,8 @@ class TestMethods(TestCase):
a = np.array(ba, ctype)
a2 = np.array(ba2, ctype)
if ctype in ['1', 'b']:
- self.assertRaises(ArithmeticError, a.prod)
- self.assertRaises(ArithmeticError, a2.prod, axis=1)
+ assert_raises(ArithmeticError, a.prod)
+ assert_raises(ArithmeticError, a2.prod, axis=1)
else:
assert_equal(a.prod(axis=0), 26400)
assert_array_equal(a2.prod(axis=0),
@@ -1283,9 +1328,9 @@ class TestMethods(TestCase):
def test_transpose(self):
a = np.array([[1, 2], [3, 4]])
assert_equal(a.transpose(), [[1, 3], [2, 4]])
- self.assertRaises(ValueError, lambda: a.transpose(0))
- self.assertRaises(ValueError, lambda: a.transpose(0, 0))
- self.assertRaises(ValueError, lambda: a.transpose(0, 1, 2))
+ assert_raises(ValueError, lambda: a.transpose(0))
+ assert_raises(ValueError, lambda: a.transpose(0, 0))
+ assert_raises(ValueError, lambda: a.transpose(0, 1, 2))
def test_sort(self):
# test ordering for floats and complex containing nans. It is only
@@ -1381,7 +1426,7 @@ class TestMethods(TestCase):
assert_equal(c, a, msg)
# test object array sorts.
- a = np.empty((101,), dtype=np.object)
+ a = np.empty((101,), dtype=object)
a[:] = list(range(101))
b = a[::-1]
for kind in ['q', 'h', 'm']:
@@ -1476,6 +1521,21 @@ class TestMethods(TestCase):
arr = np.empty(1000, dt)
arr[::-1].sort()
+ def test_sort_raises(self):
+ #gh-9404
+ arr = np.array([0, datetime.now(), 1], dtype=object)
+ for kind in ['q', 'm', 'h']:
+ assert_raises(TypeError, arr.sort, kind=kind)
+ #gh-3879
+ class Raiser(object):
+ def raises_anything(*args, **kwargs):
+ raise TypeError("SOMETHING ERRORED")
+ __eq__ = __ne__ = __lt__ = __gt__ = __ge__ = __le__ = raises_anything
+ arr = np.array([[Raiser(), n] for n in range(10)]).reshape(-1)
+ np.random.shuffle(arr)
+ for kind in ['q', 'm', 'h']:
+ assert_raises(TypeError, arr.sort, kind=kind)
+
def test_sort_degraded(self):
# test degraded dataset would take minutes to run with normal qsort
d = np.arange(1000000)
@@ -1538,6 +1598,9 @@ class TestMethods(TestCase):
assert_equal(r.word, np.array(['my', 'first', 'name']))
assert_equal(r.number, np.array([3.1, 4.5, 6.2]))
+ assert_raises_regex(ValueError, 'duplicate',
+ lambda: r.sort(order=['id', 'id']))
+
if sys.byteorder == 'little':
strtype = '>i2'
else:
@@ -1609,7 +1672,7 @@ class TestMethods(TestCase):
assert_equal(b.copy().argsort(kind=kind), rr, msg)
# test object array argsorts.
- a = np.empty((101,), dtype=np.object)
+ a = np.empty((101,), dtype=object)
a[:] = list(range(101))
b = a[::-1]
r = np.arange(101)
@@ -1676,7 +1739,7 @@ class TestMethods(TestCase):
a = np.zeros(100)
assert_equal(a.argsort(kind='m'), r)
# complex
- a = np.zeros(100, dtype=np.complex)
+ a = np.zeros(100, dtype=complex)
assert_equal(a.argsort(kind='m'), r)
# string
a = np.array(['aaaaaaaaa' for i in range(100)])
@@ -2026,8 +2089,8 @@ class TestMethods(TestCase):
# sorted
d = np.arange(49)
- self.assertEqual(np.partition(d, 5, kind=k)[5], 5)
- self.assertEqual(np.partition(d, 15, kind=k)[15], 15)
+ assert_equal(np.partition(d, 5, kind=k)[5], 5)
+ assert_equal(np.partition(d, 15, kind=k)[15], 15)
assert_array_equal(d[np.argpartition(d, 5, kind=k)],
np.partition(d, 5, kind=k))
assert_array_equal(d[np.argpartition(d, 15, kind=k)],
@@ -2035,8 +2098,8 @@ class TestMethods(TestCase):
# rsorted
d = np.arange(47)[::-1]
- self.assertEqual(np.partition(d, 6, kind=k)[6], 6)
- self.assertEqual(np.partition(d, 16, kind=k)[16], 16)
+ assert_equal(np.partition(d, 6, kind=k)[6], 6)
+ assert_equal(np.partition(d, 16, kind=k)[16], 16)
assert_array_equal(d[np.argpartition(d, 6, kind=k)],
np.partition(d, 6, kind=k))
assert_array_equal(d[np.argpartition(d, 16, kind=k)],
@@ -2076,7 +2139,7 @@ class TestMethods(TestCase):
tgt = np.sort(np.arange(47) % 7)
np.random.shuffle(d)
for i in range(d.size):
- self.assertEqual(np.partition(d, i, kind=k)[i], tgt[i])
+ assert_equal(np.partition(d, i, kind=k)[i], tgt[i])
assert_array_equal(d[np.argpartition(d, 6, kind=k)],
np.partition(d, 6, kind=k))
assert_array_equal(d[np.argpartition(d, 16, kind=k)],
@@ -2128,7 +2191,7 @@ class TestMethods(TestCase):
for s in (9, 16)]
for dt, s in td:
aae = assert_array_equal
- at = self.assertTrue
+ at = assert_
d = np.arange(s, dtype=dt)
np.random.shuffle(d)
@@ -2137,7 +2200,7 @@ class TestMethods(TestCase):
d0 = np.transpose(d1)
for i in range(d.size):
p = np.partition(d, i, kind=k)
- self.assertEqual(p[i], i)
+ assert_equal(p[i], i)
# all before are smaller
assert_array_less(p[:i], p[i])
# all after are larger
@@ -2479,6 +2542,17 @@ class TestMethods(TestCase):
if HAS_REFCOUNT:
assert_(sys.getrefcount(a) < 50)
+ def test_size_zero_memleak(self):
+ # Regression test for issue 9615
+ # Exercises a special-case code path for dot products of length
+ # zero in cblasfuncs (making it is specific to floating dtypes).
+ a = np.array([], dtype=np.float64)
+ x = np.array(2.0)
+ for _ in range(100):
+ np.dot(a, a, out=x)
+ if HAS_REFCOUNT:
+ assert_(sys.getrefcount(x) < 50)
+
def test_trace(self):
a = np.arange(12).reshape((3, 4))
assert_equal(a.trace(), 15)
@@ -2504,7 +2578,7 @@ class TestMethods(TestCase):
b = np.arange(8).reshape((2, 2, 2)).view(MyArray)
t = b.trace()
- assert isinstance(t, MyArray)
+ assert_(isinstance(t, MyArray))
def test_put(self):
icodes = np.typecodes['AllInteger']
@@ -2795,76 +2869,6 @@ class TestBinop(object):
assert_equal(a, 5)
assert_equal(b, 3)
- def test_extension_incref_elide(self):
- # test extension (e.g. cython) calling PyNumber_* slots without
- # increasing the reference counts
- #
- # def incref_elide(a):
- # d = input.copy() # refcount 1
- # return d, d + d # PyNumber_Add without increasing refcount
- from numpy.core.multiarray_tests import incref_elide
- d = np.ones(100000)
- orig, res = incref_elide(d)
- d + d
- # the return original should not be changed to an inplace operation
- assert_array_equal(orig, d)
- assert_array_equal(res, d + d)
-
- def test_extension_incref_elide_stack(self):
- # scanning if the refcount == 1 object is on the python stack to check
- # that we are called directly from python is flawed as object may still
- # be above the stack pointer and we have no access to the top of it
- #
- # def incref_elide_l(d):
- # return l[4] + l[4] # PyNumber_Add without increasing refcount
- from numpy.core.multiarray_tests import incref_elide_l
- # padding with 1 makes sure the object on the stack is not overwriten
- l = [1, 1, 1, 1, np.ones(100000)]
- res = incref_elide_l(l)
- # the return original should not be changed to an inplace operation
- assert_array_equal(l[4], np.ones(100000))
- assert_array_equal(res, l[4] + l[4])
-
- def test_temporary_with_cast(self):
- # check that we don't elide into a temporary which would need casting
- d = np.ones(200000, dtype=np.int64)
- assert_equal(((d + d) + 2**222).dtype, np.dtype('O'))
-
- r = ((d + d) / 2)
- assert_equal(r.dtype, np.dtype('f8'))
-
- r = np.true_divide((d + d), 2)
- assert_equal(r.dtype, np.dtype('f8'))
-
- r = ((d + d) / 2.)
- assert_equal(r.dtype, np.dtype('f8'))
-
- r = ((d + d) // 2)
- assert_equal(r.dtype, np.dtype(np.int64))
-
- # commutative elision into the astype result
- f = np.ones(100000, dtype=np.float32)
- assert_equal(((f + f) + f.astype(np.float64)).dtype, np.dtype('f8'))
-
- # no elision into f + f
- d = f.astype(np.float64)
- assert_equal(((f + f) + d).dtype, np.dtype('f8'))
-
- def test_elide_broadcast(self):
- # test no elision on broadcast to higher dimension
- # only triggers elision code path in debug mode as triggering it in
- # normal mode needs 256kb large matching dimension, so a lot of memory
- d = np.ones((2000, 1), dtype=int)
- b = np.ones((2000), dtype=np.bool)
- r = (1 - d) + b
- assert_equal(r, 1)
- assert_equal(r.shape, (2000, 2000))
-
- def test_elide_scalar(self):
- # check inplace op does not create ndarray from scalars
- a = np.bool_()
- assert_(type(~(a & a)) is np.bool_)
-
# ndarray.__rop__ always calls ufunc
# ndarray.__iop__ always calls ufunc
# ndarray.__op__, __rop__:
@@ -3107,7 +3111,7 @@ class TestBinop(object):
warnings.filterwarnings('always', '', DeprecationWarning)
assert_equal(np.modf(dummy, out=a), (0,))
assert_(w[0].category is DeprecationWarning)
- assert_raises(TypeError, np.modf, dummy, out=(a,))
+ assert_raises(ValueError, np.modf, dummy, out=(a,))
# 2 inputs, 1 output
assert_equal(np.add(a, dummy), 0)
@@ -3139,8 +3143,131 @@ class TestBinop(object):
assert_equal(A[0], 30)
assert_(isinstance(A, OutClass))
+ def test_pow_override_with_errors(self):
+ # regression test for gh-9112
+ class PowerOnly(np.ndarray):
+ def __array_ufunc__(self, ufunc, method, *inputs, **kw):
+ if ufunc is not np.power:
+ raise NotImplementedError
+ return "POWER!"
+ # explicit cast to float, to ensure the fast power path is taken.
+ a = np.array(5., dtype=np.float64).view(PowerOnly)
+ assert_equal(a ** 2.5, "POWER!")
+ with assert_raises(NotImplementedError):
+ a ** 0.5
+ with assert_raises(NotImplementedError):
+ a ** 0
+ with assert_raises(NotImplementedError):
+ a ** 1
+ with assert_raises(NotImplementedError):
+ a ** -1
+ with assert_raises(NotImplementedError):
+ a ** 2
+
+
+class TestTemporaryElide(object):
+ # elision is only triggered on relatively large arrays
+
+ def test_extension_incref_elide(self):
+ # test extension (e.g. cython) calling PyNumber_* slots without
+ # increasing the reference counts
+ #
+ # def incref_elide(a):
+ # d = input.copy() # refcount 1
+ # return d, d + d # PyNumber_Add without increasing refcount
+ from numpy.core.multiarray_tests import incref_elide
+ d = np.ones(100000)
+ orig, res = incref_elide(d)
+ d + d
+ # the return original should not be changed to an inplace operation
+ assert_array_equal(orig, d)
+ assert_array_equal(res, d + d)
+
+ def test_extension_incref_elide_stack(self):
+ # scanning if the refcount == 1 object is on the python stack to check
+ # that we are called directly from python is flawed as object may still
+ # be above the stack pointer and we have no access to the top of it
+ #
+ # def incref_elide_l(d):
+ # return l[4] + l[4] # PyNumber_Add without increasing refcount
+ from numpy.core.multiarray_tests import incref_elide_l
+ # padding with 1 makes sure the object on the stack is not overwriten
+ l = [1, 1, 1, 1, np.ones(100000)]
+ res = incref_elide_l(l)
+ # the return original should not be changed to an inplace operation
+ assert_array_equal(l[4], np.ones(100000))
+ assert_array_equal(res, l[4] + l[4])
+
+ def test_temporary_with_cast(self):
+ # check that we don't elide into a temporary which would need casting
+ d = np.ones(200000, dtype=np.int64)
+ assert_equal(((d + d) + 2**222).dtype, np.dtype('O'))
+
+ r = ((d + d) / 2)
+ assert_equal(r.dtype, np.dtype('f8'))
+
+ r = np.true_divide((d + d), 2)
+ assert_equal(r.dtype, np.dtype('f8'))
+
+ r = ((d + d) / 2.)
+ assert_equal(r.dtype, np.dtype('f8'))
+
+ r = ((d + d) // 2)
+ assert_equal(r.dtype, np.dtype(np.int64))
+
+ # commutative elision into the astype result
+ f = np.ones(100000, dtype=np.float32)
+ assert_equal(((f + f) + f.astype(np.float64)).dtype, np.dtype('f8'))
+
+ # no elision into lower type
+ d = f.astype(np.float64)
+ assert_equal(((f + f) + d).dtype, d.dtype)
+ l = np.ones(100000, dtype=np.longdouble)
+ assert_equal(((d + d) + l).dtype, l.dtype)
-class TestCAPI(TestCase):
+ # test unary abs with different output dtype
+ for dt in (np.complex64, np.complex128, np.clongdouble):
+ c = np.ones(100000, dtype=dt)
+ r = abs(c * 2.0)
+ assert_equal(r.dtype, np.dtype('f%d' % (c.itemsize // 2)))
+
+ def test_elide_broadcast(self):
+ # test no elision on broadcast to higher dimension
+ # only triggers elision code path in debug mode as triggering it in
+ # normal mode needs 256kb large matching dimension, so a lot of memory
+ d = np.ones((2000, 1), dtype=int)
+ b = np.ones((2000), dtype=bool)
+ r = (1 - d) + b
+ assert_equal(r, 1)
+ assert_equal(r.shape, (2000, 2000))
+
+ def test_elide_scalar(self):
+ # check inplace op does not create ndarray from scalars
+ a = np.bool_()
+ assert_(type(~(a & a)) is np.bool_)
+
+ def test_elide_scalar_readonly(self):
+ # The imaginary part of a real array is readonly. This needs to go
+ # through fast_scalar_power which is only called for powers of
+ # +1, -1, 0, 0.5, and 2, so use 2. Also need valid refcount for
+ # elision which can be gotten for the imaginary part of a real
+ # array. Should not error.
+ a = np.empty(100000, dtype=np.float64)
+ a.imag ** 2
+
+ def test_elide_readonly(self):
+ # don't try to elide readonly temporaries
+ r = np.asarray(np.broadcast_to(np.zeros(1), 100000).flat) * 0.0
+ assert_equal(r, 0)
+
+ def test_elide_updateifcopy(self):
+ a = np.ones(2**20)[::2]
+ b = a.flat.__array__() + 1
+ del b
+ assert_equal(a, 1)
+
+
+class TestCAPI(object):
def test_IsPythonScalar(self):
from numpy.core.multiarray_tests import IsPythonScalar
assert_(IsPythonScalar(b'foobar'))
@@ -3150,16 +3277,16 @@ class TestCAPI(TestCase):
assert_(IsPythonScalar("a"))
-class TestSubscripting(TestCase):
+class TestSubscripting(object):
def test_test_zero_rank(self):
x = np.array([1, 2, 3])
- self.assertTrue(isinstance(x[0], np.int_))
+ assert_(isinstance(x[0], np.int_))
if sys.version_info[0] < 3:
- self.assertTrue(isinstance(x[0], int))
- self.assertTrue(type(x[0, ...]) is np.ndarray)
+ assert_(isinstance(x[0], int))
+ assert_(type(x[0, ...]) is np.ndarray)
-class TestPickling(TestCase):
+class TestPickling(object):
def test_roundtrip(self):
import pickle
carray = np.array([[2, 9], [7, 0], [3, 8]])
@@ -3225,7 +3352,7 @@ class TestPickling(TestCase):
assert_equal(a, p)
-class TestFancyIndexing(TestCase):
+class TestFancyIndexing(object):
def test_list(self):
x = np.ones((1, 1))
x[:, [0]] = 2.0
@@ -3279,7 +3406,7 @@ class TestFancyIndexing(TestCase):
assert_array_equal(x, np.array([[1, 10, 3, 4], [5, 6, 7, 8]]))
-class TestStringCompare(TestCase):
+class TestStringCompare(object):
def test_string(self):
g1 = np.array(["This", "is", "example"])
g2 = np.array(["This", "was", "example"])
@@ -3311,7 +3438,7 @@ class TestStringCompare(TestCase):
assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0, 1, 2]])
-class TestArgmax(TestCase):
+class TestArgmax(object):
nan_arr = [
([0, 1, 2, 3, np.nan], 4),
@@ -3387,8 +3514,13 @@ class TestArgmax(TestCase):
def test_combinations(self):
for arr, pos in self.nan_arr:
+ with suppress_warnings() as sup:
+ sup.filter(RuntimeWarning,
+ "invalid value encountered in reduce")
+ max_val = np.max(arr)
+
assert_equal(np.argmax(arr), pos, err_msg="%r" % arr)
- assert_equal(arr[np.argmax(arr)], np.max(arr), err_msg="%r" % arr)
+ assert_equal(arr[np.argmax(arr)], max_val, err_msg="%r" % arr)
def test_output_shape(self):
# see also gh-616
@@ -3440,7 +3572,7 @@ class TestArgmax(TestCase):
assert_equal(a.argmax(), 1)
-class TestArgmin(TestCase):
+class TestArgmin(object):
nan_arr = [
([0, 1, 2, 3, np.nan], 4),
@@ -3516,8 +3648,13 @@ class TestArgmin(TestCase):
def test_combinations(self):
for arr, pos in self.nan_arr:
+ with suppress_warnings() as sup:
+ sup.filter(RuntimeWarning,
+ "invalid value encountered in reduce")
+ min_val = np.min(arr)
+
assert_equal(np.argmin(arr), pos, err_msg="%r" % arr)
- assert_equal(arr[np.argmin(arr)], np.min(arr), err_msg="%r" % arr)
+ assert_equal(arr[np.argmin(arr)], min_val, err_msg="%r" % arr)
def test_minimum_signed_integers(self):
@@ -3583,7 +3720,7 @@ class TestArgmin(TestCase):
assert_equal(a.argmin(), 1)
-class TestMinMax(TestCase):
+class TestMinMax(object):
def test_scalar(self):
assert_raises(np.AxisError, np.amax, 1, 1)
@@ -3613,14 +3750,14 @@ class TestMinMax(TestCase):
assert_equal(np.amax(a), a[0])
-class TestNewaxis(TestCase):
+class TestNewaxis(object):
def test_basic(self):
sk = np.array([0, -0.1, 0.1])
res = 250*sk[:, np.newaxis]
assert_almost_equal(res.ravel(), 250*sk)
-class TestClip(TestCase):
+class TestClip(object):
def _check_range(self, x, cmin, cmax):
assert_(np.all(x >= cmin))
assert_(np.all(x <= cmax))
@@ -3694,7 +3831,7 @@ class TestClip(TestCase):
assert_array_equal(result, expected)
-class TestCompress(TestCase):
+class TestCompress(object):
def test_axis(self):
tgt = [[5, 6, 7, 8, 9]]
arr = np.arange(10).reshape(2, 5)
@@ -3812,7 +3949,7 @@ class TestTake(object):
assert_(rec1['x'] == 5.0 and rec1['y'] == 4.0)
-class TestLexsort(TestCase):
+class TestLexsort(object):
def test_basic(self):
a = [1, 2, 1, 3, 1, 5]
b = [0, 4, 5, 6, 2, 3]
@@ -3859,19 +3996,19 @@ class TestLexsort(TestCase):
x = np.linspace(0., 1., 42*3).reshape(42, 3)
assert_raises(np.AxisError, np.lexsort, x, axis=2)
-class TestIO(TestCase):
+class TestIO(object):
"""Test tofile, fromfile, tobytes, and fromstring"""
- def setUp(self):
+ def setup(self):
shape = (2, 4, 3)
rand = np.random.random
- self.x = rand(shape) + rand(shape).astype(np.complex)*1j
+ self.x = rand(shape) + rand(shape).astype(complex)*1j
self.x[0,:, 1] = [np.nan, np.inf, -np.inf, np.nan]
self.dtype = self.x.dtype
self.tempdir = tempfile.mkdtemp()
self.filename = tempfile.mktemp(dir=self.tempdir)
- def tearDown(self):
+ def teardown(self):
shutil.rmtree(self.tempdir)
def test_nofile(self):
@@ -3960,7 +4097,7 @@ class TestIO(TestCase):
with io.open(self.filename, 'rb', buffering=0) as f:
f.seek = fail
f.tell = fail
- self.assertRaises(IOError, np.fromfile, f, dtype=self.dtype)
+ assert_raises(IOError, np.fromfile, f, dtype=self.dtype)
def test_io_open_unbuffered_fromfile(self):
# gh-6632
@@ -4181,7 +4318,7 @@ class TestFromBuffer(object):
def test_ip_basic(self):
for byteorder in ['<', '>']:
- for dtype in [float, int, np.complex]:
+ for dtype in [float, int, complex]:
dt = np.dtype(dtype).newbyteorder(byteorder)
x = (np.random.random((4, 7))*5).astype(dt)
buf = x.tobytes()
@@ -4191,8 +4328,8 @@ class TestFromBuffer(object):
yield self.tst_basic, b'', np.array([]), {}
-class TestFlat(TestCase):
- def setUp(self):
+class TestFlat(object):
+ def setup(self):
a0 = np.arange(20.0)
a = a0.reshape(4, 5)
a0.shape = (4, 5)
@@ -4228,17 +4365,19 @@ class TestFlat(TestCase):
assert_(c.flags.writeable is False)
assert_(d.flags.writeable is False)
+ # for 1.14 all are set to non-writeable on the way to replacing the
+ # UPDATEIFCOPY array returned for non-contiguous arrays.
assert_(e.flags.writeable is True)
- assert_(f.flags.writeable is True)
+ assert_(f.flags.writeable is False)
assert_(c.flags.updateifcopy is False)
assert_(d.flags.updateifcopy is False)
assert_(e.flags.updateifcopy is False)
- assert_(f.flags.updateifcopy is True)
- assert_(f.base is self.b0)
+ # UPDATEIFCOPY is removed.
+ assert_(f.flags.updateifcopy is False)
-class TestResize(TestCase):
+class TestResize(object):
def test_basic(self):
x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
if IS_PYPY:
@@ -4252,7 +4391,7 @@ class TestResize(TestCase):
def test_check_reference(self):
x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
y = x
- self.assertRaises(ValueError, x.resize, (5, 1))
+ assert_raises(ValueError, x.resize, (5, 1))
del y # avoid pyflakes unused variable warning.
def test_int_shape(self):
@@ -4270,11 +4409,23 @@ class TestResize(TestCase):
x.resize()
assert_array_equal(x, np.eye(3))
+ def test_0d_shape(self):
+ # to it multiple times to test it does not break alloc cache gh-9216
+ for i in range(10):
+ x = np.empty((1,))
+ x.resize(())
+ assert_equal(x.shape, ())
+ assert_equal(x.size, 1)
+ x = np.empty(())
+ x.resize((1,))
+ assert_equal(x.shape, (1,))
+ assert_equal(x.size, 1)
+
def test_invalid_arguments(self):
- self.assertRaises(TypeError, np.eye(3).resize, 'hi')
- self.assertRaises(ValueError, np.eye(3).resize, -1)
- self.assertRaises(TypeError, np.eye(3).resize, order=1)
- self.assertRaises(TypeError, np.eye(3).resize, refcheck='hi')
+ assert_raises(TypeError, np.eye(3).resize, 'hi')
+ assert_raises(ValueError, np.eye(3).resize, -1)
+ assert_raises(TypeError, np.eye(3).resize, order=1)
+ assert_raises(TypeError, np.eye(3).resize, refcheck='hi')
def test_freeform_shape(self):
x = np.eye(3)
@@ -4305,7 +4456,7 @@ class TestResize(TestCase):
assert_array_equal(a['k'][:-5], 1)
-class TestRecord(TestCase):
+class TestRecord(object):
def test_field_rename(self):
dt = np.dtype([('f', float), ('i', int)])
dt.names = ['p', 'q']
@@ -4401,23 +4552,11 @@ class TestRecord(TestCase):
# multiple subfields
fn2 = func('f2')
b[fn2] = 3
- with suppress_warnings() as sup:
- sup.filter(FutureWarning,
- "Assignment between structured arrays.*")
- sup.filter(FutureWarning,
- "Numpy has detected that you .*")
-
- assert_equal(b[['f1', 'f2']][0].tolist(), (2, 3))
- assert_equal(b[['f2', 'f1']][0].tolist(), (3, 2))
- assert_equal(b[['f1', 'f3']][0].tolist(), (2, (1,)))
- # view of subfield view/copy
- assert_equal(b[['f1', 'f2']][0].view(('i4', 2)).tolist(),
- (2, 3))
- assert_equal(b[['f2', 'f1']][0].view(('i4', 2)).tolist(),
- (3, 2))
- view_dtype = [('f1', 'i4'), ('f3', [('', 'i4')])]
- assert_equal(b[['f1', 'f3']][0].view(view_dtype).tolist(),
- (2, (1,)))
+
+ assert_equal(b[['f1', 'f2']][0].tolist(), (2, 3))
+ assert_equal(b[['f2', 'f1']][0].tolist(), (3, 2))
+ assert_equal(b[['f1', 'f3']][0].tolist(), (2, (1,)))
+
# non-ascii unicode field indexing is well behaved
if not is_py3:
raise SkipTest('non ascii unicode field indexing skipped; '
@@ -4426,54 +4565,6 @@ class TestRecord(TestCase):
assert_raises(ValueError, a.__setitem__, u'\u03e0', 1)
assert_raises(ValueError, a.__getitem__, u'\u03e0')
- def test_field_names_deprecation(self):
-
- def collect_warnings(f, *args, **kwargs):
- with warnings.catch_warnings(record=True) as log:
- warnings.simplefilter("always")
- f(*args, **kwargs)
- return [w.category for w in log]
-
- a = np.zeros((1,), dtype=[('f1', 'i4'),
- ('f2', 'i4'),
- ('f3', [('sf1', 'i4')])])
- a['f1'][0] = 1
- a['f2'][0] = 2
- a['f3'][0] = (3,)
- b = np.zeros((1,), dtype=[('f1', 'i4'),
- ('f2', 'i4'),
- ('f3', [('sf1', 'i4')])])
- b['f1'][0] = 1
- b['f2'][0] = 2
- b['f3'][0] = (3,)
-
- # All the different functions raise a warning, but not an error
- assert_equal(collect_warnings(a[['f1', 'f2']].__setitem__, 0, (10, 20)),
- [FutureWarning])
- # For <=1.12 a is not modified, but it will be in 1.13
- assert_equal(a, b)
-
- # Views also warn
- subset = a[['f1', 'f2']]
- subset_view = subset.view()
- assert_equal(collect_warnings(subset_view['f1'].__setitem__, 0, 10),
- [FutureWarning])
- # But the write goes through:
- assert_equal(subset['f1'][0], 10)
- # Only one warning per multiple field indexing, though (even if there
- # are multiple views involved):
- assert_equal(collect_warnings(subset['f1'].__setitem__, 0, 10), [])
-
- # make sure views of a multi-field index warn too
- c = np.zeros(3, dtype='i8,i8,i8')
- assert_equal(collect_warnings(c[['f0', 'f2']].view, 'i8,i8'),
- [FutureWarning])
-
- # make sure assignment using a different dtype warns
- a = np.zeros(2, dtype=[('a', 'i4'), ('b', 'i4')])
- b = np.zeros(2, dtype=[('b', 'i4'), ('a', 'i4')])
- assert_equal(collect_warnings(a.__setitem__, (), b), [FutureWarning])
-
def test_record_hash(self):
a = np.array([(1, 2), (1, 2)], dtype='i1,i2')
a.flags.writeable = False
@@ -4481,14 +4572,14 @@ class TestRecord(TestCase):
b.flags.writeable = False
c = np.array([(1, 2), (3, 4)], dtype='i1,i2')
c.flags.writeable = False
- self.assertTrue(hash(a[0]) == hash(a[1]))
- self.assertTrue(hash(a[0]) == hash(b[0]))
- self.assertTrue(hash(a[0]) != hash(b[1]))
- self.assertTrue(hash(c[0]) == hash(a[0]) and c[0] == a[0])
+ assert_(hash(a[0]) == hash(a[1]))
+ assert_(hash(a[0]) == hash(b[0]))
+ assert_(hash(a[0]) != hash(b[1]))
+ assert_(hash(c[0]) == hash(a[0]) and c[0] == a[0])
def test_record_no_hash(self):
a = np.array([(1, 2), (1, 2)], dtype='i1,i2')
- self.assertRaises(TypeError, hash, a[0])
+ assert_raises(TypeError, hash, a[0])
def test_empty_structure_creation(self):
# make sure these do not raise errors (gh-5631)
@@ -4497,7 +4588,7 @@ class TestRecord(TestCase):
np.array([(), (), (), (), ()], dtype={'names': [], 'formats': [],
'offsets': [], 'itemsize': 12})
-class TestView(TestCase):
+class TestView(object):
def test_basic(self):
x = np.array([(1, 2, 3, 4), (5, 6, 7, 8)],
dtype=[('r', np.int8), ('g', np.int8),
@@ -4522,11 +4613,11 @@ def _std(a, **args):
return a.std(**args)
-class TestStats(TestCase):
+class TestStats(object):
funcs = [_mean, _var, _std]
- def setUp(self):
+ def setup(self):
np.random.seed(range(3))
self.rmat = np.random.random((4, 5))
self.cmat = self.rmat + 1j * self.rmat
@@ -4692,7 +4783,7 @@ class TestStats(TestCase):
def test_mean_float16(self):
# This fail if the sum inside mean is done in float16 instead
# of float32.
- assert _mean(np.ones(100000, dtype='float16')) == 1
+ assert_(_mean(np.ones(100000, dtype='float16')) == 1)
def test_var_values(self):
for mat in [self.rmat, self.cmat, self.omat]:
@@ -4729,7 +4820,7 @@ class TestStats(TestCase):
res = dat.var(1)
assert_(res.info == dat.info)
-class TestVdot(TestCase):
+class TestVdot(object):
def test_basic(self):
dt_numeric = np.typecodes['AllFloat'] + np.typecodes['AllInteger']
dt_complex = np.typecodes['Complex']
@@ -4751,7 +4842,7 @@ class TestVdot(TestCase):
assert_equal(np.vdot(b, b), 3)
# test boolean
- b = np.eye(3, dtype=np.bool)
+ b = np.eye(3, dtype=bool)
res = np.vdot(b, b)
assert_(np.isscalar(res))
assert_equal(np.vdot(b, b), True)
@@ -4789,8 +4880,8 @@ class TestVdot(TestCase):
np.vdot(a.flatten(), b.flatten()))
-class TestDot(TestCase):
- def setUp(self):
+class TestDot(object):
+ def setup(self):
np.random.seed(128)
self.A = np.random.rand(4, 2)
self.b1 = np.random.rand(2, 1)
@@ -5067,7 +5158,7 @@ class TestDot(TestCase):
assert_dot_close(A_f_12, X_f_2, desired)
-class MatmulCommon():
+class MatmulCommon(object):
"""Common tests for '@' operator and numpy.matmul.
Do not derive from TestCase to avoid nose running it.
@@ -5262,23 +5353,23 @@ class MatmulCommon():
assert_equal(res, tgt12_21)
-class TestMatmul(MatmulCommon, TestCase):
+class TestMatmul(MatmulCommon):
matmul = np.matmul
def test_out_arg(self):
- a = np.ones((2, 2), dtype=np.float)
- b = np.ones((2, 2), dtype=np.float)
- tgt = np.full((2,2), 2, dtype=np.float)
+ a = np.ones((2, 2), dtype=float)
+ b = np.ones((2, 2), dtype=float)
+ tgt = np.full((2,2), 2, dtype=float)
# test as positional argument
msg = "out positional argument"
- out = np.zeros((2, 2), dtype=np.float)
+ out = np.zeros((2, 2), dtype=float)
self.matmul(a, b, out)
assert_array_equal(out, tgt, err_msg=msg)
# test as keyword argument
msg = "out keyword argument"
- out = np.zeros((2, 2), dtype=np.float)
+ out = np.zeros((2, 2), dtype=float)
self.matmul(a, b, out=out)
assert_array_equal(out, tgt, err_msg=msg)
@@ -5301,13 +5392,13 @@ class TestMatmul(MatmulCommon, TestCase):
# test out non-contiguous
# msg = "out argument with non-contiguous layout"
- # c = np.zeros((2, 2, 2), dtype=np.float)
+ # c = np.zeros((2, 2, 2), dtype=float)
# self.matmul(a, b, out=c[..., 0])
# assert_array_equal(c, tgt, err_msg=msg)
if sys.version_info[:2] >= (3, 5):
- class TestMatmulOperator(MatmulCommon, TestCase):
+ class TestMatmulOperator(MatmulCommon):
import operator
matmul = operator.matmul
@@ -5342,7 +5433,7 @@ if sys.version_info[:2] >= (3, 5):
assert_raises(TypeError, exec_, "a @= b", globals(), locals())
-class TestInner(TestCase):
+class TestInner(object):
def test_inner_type_mismatch(self):
c = 1.
@@ -5435,7 +5526,7 @@ class TestInner(TestCase):
assert_equal(np.inner(b, a).transpose(2,3,0,1), desired)
-class TestSummarization(TestCase):
+class TestSummarization(object):
def test_1d(self):
A = np.arange(1001)
strA = '[ 0 1 2 ..., 998 999 1000]'
@@ -5455,26 +5546,26 @@ class TestSummarization(TestCase):
assert_(repr(A) == reprA)
-class TestAlen(TestCase):
+class TestAlen(object):
def test_basic(self):
m = np.array([1, 2, 3])
- self.assertEqual(np.alen(m), 3)
+ assert_equal(np.alen(m), 3)
m = np.array([[1, 2, 3], [4, 5, 7]])
- self.assertEqual(np.alen(m), 2)
+ assert_equal(np.alen(m), 2)
m = [1, 2, 3]
- self.assertEqual(np.alen(m), 3)
+ assert_equal(np.alen(m), 3)
m = [[1, 2, 3], [4, 5, 7]]
- self.assertEqual(np.alen(m), 2)
+ assert_equal(np.alen(m), 2)
def test_singleton(self):
- self.assertEqual(np.alen(5), 1)
+ assert_equal(np.alen(5), 1)
-class TestChoose(TestCase):
- def setUp(self):
+class TestChoose(object):
+ def setup(self):
self.x = 2*np.ones((3,), dtype=int)
self.y = 3*np.ones((3,), dtype=int)
self.x2 = 2*np.ones((2, 3), dtype=int)
@@ -5494,8 +5585,8 @@ class TestChoose(TestCase):
assert_equal(A, [[2, 2, 3], [2, 2, 3]])
-class TestRepeat(TestCase):
- def setUp(self):
+class TestRepeat(object):
+ def setup(self):
self.m = np.array([1, 2, 3, 4, 5, 6])
self.m_rect = self.m.reshape((2, 3))
@@ -5535,7 +5626,7 @@ class TestRepeat(TestCase):
NEIGH_MODE = {'zero': 0, 'one': 1, 'constant': 2, 'circular': 3, 'mirror': 4}
-class TestNeighborhoodIter(TestCase):
+class TestNeighborhoodIter(object):
# Simple, 2d tests
def _test_simple2d(self, dt):
# Test zero and one padding for simple data type
@@ -5565,7 +5656,7 @@ class TestNeighborhoodIter(TestCase):
assert_array_equal(l, r)
def test_simple2d(self):
- self._test_simple2d(np.float)
+ self._test_simple2d(float)
def test_simple2d_object(self):
self._test_simple2d(Decimal)
@@ -5581,7 +5672,7 @@ class TestNeighborhoodIter(TestCase):
assert_array_equal(l, r)
def test_mirror2d(self):
- self._test_mirror2d(np.float)
+ self._test_mirror2d(float)
def test_mirror2d_object(self):
self._test_mirror2d(Decimal)
@@ -5603,7 +5694,7 @@ class TestNeighborhoodIter(TestCase):
assert_array_equal(l, r)
def test_simple_float(self):
- self._test_simple(np.float)
+ self._test_simple(float)
def test_simple_object(self):
self._test_simple(Decimal)
@@ -5614,11 +5705,11 @@ class TestNeighborhoodIter(TestCase):
r = np.array([[2, 1, 1, 2, 3], [1, 1, 2, 3, 4], [1, 2, 3, 4, 5],
[2, 3, 4, 5, 5], [3, 4, 5, 5, 4]], dtype=dt)
l = test_neighborhood_iterator(x, [-2, 2], x[1], NEIGH_MODE['mirror'])
- self.assertTrue([i.dtype == dt for i in l])
+ assert_([i.dtype == dt for i in l])
assert_array_equal(l, r)
def test_mirror(self):
- self._test_mirror(np.float)
+ self._test_mirror(float)
def test_mirror_object(self):
self._test_mirror(Decimal)
@@ -5632,13 +5723,13 @@ class TestNeighborhoodIter(TestCase):
assert_array_equal(l, r)
def test_circular(self):
- self._test_circular(np.float)
+ self._test_circular(float)
def test_circular_object(self):
self._test_circular(Decimal)
# Test stacking neighborhood iterators
-class TestStackedNeighborhoodIter(TestCase):
+class TestStackedNeighborhoodIter(object):
# Simple, 1d test: stacking 2 constant-padded neigh iterators
def test_simple_const(self):
dt = np.float64
@@ -6319,7 +6410,7 @@ def test_flat_element_deletion():
del it[1:2]
except TypeError:
pass
- except:
+ except Exception:
raise AssertionError
@@ -6328,7 +6419,7 @@ def test_scalar_element_deletion():
assert_raises(ValueError, a[0].__delitem__, 'x')
-class TestMemEventHook(TestCase):
+class TestMemEventHook(object):
def test_mem_seteventhook(self):
# The actual tests are within the C code in
# multiarray/multiarray_tests.c.src
@@ -6340,7 +6431,7 @@ class TestMemEventHook(TestCase):
gc.collect()
test_pydatamem_seteventhook_end()
-class TestMapIter(TestCase):
+class TestMapIter(object):
def test_mapiter(self):
# The actual tests are within the C code in
# multiarray/multiarray_tests.c.src
@@ -6362,7 +6453,7 @@ class TestMapIter(TestCase):
assert_equal(b, [100.1, 51., 6., 3., 4., 5.])
-class TestAsCArray(TestCase):
+class TestAsCArray(object):
def test_1darray(self):
array = np.arange(24, dtype=np.double)
from_c = test_as_c_array(array, 3)
@@ -6379,7 +6470,7 @@ class TestAsCArray(TestCase):
assert_equal(array[1, 2, 3], from_c)
-class TestConversion(TestCase):
+class TestConversion(object):
def test_array_scalar_relational_operation(self):
# All integer
for dt1 in np.typecodes['AllInteger']:
@@ -6444,12 +6535,12 @@ class TestConversion(TestCase):
assert_raises(Error, bool, self_containing) # previously stack overflow
-class TestWhere(TestCase):
+class TestWhere(object):
def test_basic(self):
- dts = [np.bool, np.int16, np.int32, np.int64, np.double, np.complex128,
+ dts = [bool, np.int16, np.int32, np.int64, np.double, np.complex128,
np.longdouble, np.clongdouble]
for dt in dts:
- c = np.ones(53, dtype=np.bool)
+ c = np.ones(53, dtype=bool)
assert_equal(np.where( c, dt(0), dt(1)), dt(0))
assert_equal(np.where(~c, dt(0), dt(1)), dt(1))
assert_equal(np.where(True, dt(0), dt(1)), dt(0))
@@ -6541,7 +6632,7 @@ class TestWhere(TestCase):
assert_equal(np.where(c, a, b), r)
# non bool mask
- c = c.astype(np.int)
+ c = c.astype(int)
c[c != 0] = 34242324
assert_equal(np.where(c, a, b), r)
# invert
@@ -6597,10 +6688,21 @@ class TestWhere(TestCase):
assert_array_equal(ibad,
np.atleast_2d(np.array([[],[]], dtype=np.intp)))
+ def test_largedim(self):
+ # invalid read regression gh-9304
+ shape = [10, 2, 3, 4, 5, 6]
+ np.random.seed(2)
+ array = np.random.rand(*shape)
+
+ for i in range(10):
+ benchmark = array.nonzero()
+ result = array.nonzero()
+ assert_array_equal(benchmark, result)
+
if not IS_PYPY:
# sys.getsizeof() is not valid on PyPy
- class TestSizeOf(TestCase):
+ class TestSizeOf(object):
def test_empty_array(self):
x = np.array([])
@@ -6646,7 +6748,7 @@ if not IS_PYPY:
assert_raises(TypeError, d.__sizeof__, "a")
-class TestHashing(TestCase):
+class TestHashing(object):
def test_arrays_not_hashable(self):
x = np.ones(3)
@@ -6654,10 +6756,10 @@ class TestHashing(TestCase):
def test_collections_hashable(self):
x = np.array([])
- self.assertFalse(isinstance(x, collections.Hashable))
+ assert_(not isinstance(x, collections.Hashable))
-class TestArrayPriority(TestCase):
+class TestArrayPriority(object):
# This will go away when __array_priority__ is settled, meanwhile
# it serves to check unintended changes.
op = operator
@@ -6743,54 +6845,54 @@ class TestArrayPriority(TestCase):
assert_(isinstance(f(b, a), self.Other), msg)
-class TestBytestringArrayNonzero(TestCase):
+class TestBytestringArrayNonzero(object):
def test_empty_bstring_array_is_falsey(self):
- self.assertFalse(np.array([''], dtype=np.str))
+ assert_(not np.array([''], dtype=str))
def test_whitespace_bstring_array_is_falsey(self):
- a = np.array(['spam'], dtype=np.str)
+ a = np.array(['spam'], dtype=str)
a[0] = ' \0\0'
- self.assertFalse(a)
+ assert_(not a)
def test_all_null_bstring_array_is_falsey(self):
- a = np.array(['spam'], dtype=np.str)
+ a = np.array(['spam'], dtype=str)
a[0] = '\0\0\0\0'
- self.assertFalse(a)
+ assert_(not a)
def test_null_inside_bstring_array_is_truthy(self):
- a = np.array(['spam'], dtype=np.str)
+ a = np.array(['spam'], dtype=str)
a[0] = ' \0 \0'
- self.assertTrue(a)
+ assert_(a)
-class TestUnicodeArrayNonzero(TestCase):
+class TestUnicodeArrayNonzero(object):
def test_empty_ustring_array_is_falsey(self):
- self.assertFalse(np.array([''], dtype=np.unicode))
+ assert_(not np.array([''], dtype=np.unicode))
def test_whitespace_ustring_array_is_falsey(self):
a = np.array(['eggs'], dtype=np.unicode)
a[0] = ' \0\0'
- self.assertFalse(a)
+ assert_(not a)
def test_all_null_ustring_array_is_falsey(self):
a = np.array(['eggs'], dtype=np.unicode)
a[0] = '\0\0\0\0'
- self.assertFalse(a)
+ assert_(not a)
def test_null_inside_ustring_array_is_truthy(self):
a = np.array(['eggs'], dtype=np.unicode)
a[0] = ' \0 \0'
- self.assertTrue(a)
+ assert_(a)
-class TestCTypes(TestCase):
+class TestCTypes(object):
def test_ctypes_is_available(self):
test_arr = np.array([[1, 2, 3], [4, 5, 6]])
- self.assertEqual(ctypes, test_arr.ctypes._ctypes)
+ assert_equal(ctypes, test_arr.ctypes._ctypes)
assert_equal(tuple(test_arr.ctypes.shape), (2, 3))
def test_ctypes_is_not_available(self):
@@ -6799,8 +6901,8 @@ class TestCTypes(TestCase):
try:
test_arr = np.array([[1, 2, 3], [4, 5, 6]])
- self.assertIsInstance(
- test_arr.ctypes._ctypes, _internal._missing_ctypes)
+ assert_(isinstance(test_arr.ctypes._ctypes,
+ _internal._missing_ctypes))
assert_equal(tuple(test_arr.ctypes.shape), (2, 3))
finally:
_internal.ctypes = ctypes
@@ -6811,5 +6913,73 @@ def test_orderconverter_with_nonASCII_unicode_ordering():
a = np.arange(5)
assert_raises(ValueError, a.flatten, order=u'\xe2')
+
+def test_equal_override():
+ # gh-9153: ndarray.__eq__ uses special logic for structured arrays, which
+ # did not respect overrides with __array_priority__ or __array_ufunc__.
+ # The PR fixed this for __array_priority__ and __array_ufunc__ = None.
+ class MyAlwaysEqual(object):
+ def __eq__(self, other):
+ return "eq"
+
+ def __ne__(self, other):
+ return "ne"
+
+ class MyAlwaysEqualOld(MyAlwaysEqual):
+ __array_priority__ = 10000
+
+ class MyAlwaysEqualNew(MyAlwaysEqual):
+ __array_ufunc__ = None
+
+ array = np.array([(0, 1), (2, 3)], dtype='i4,i4')
+ for my_always_equal_cls in MyAlwaysEqualOld, MyAlwaysEqualNew:
+ my_always_equal = my_always_equal_cls()
+ assert_equal(my_always_equal == array, 'eq')
+ assert_equal(array == my_always_equal, 'eq')
+ assert_equal(my_always_equal != array, 'ne')
+ assert_equal(array != my_always_equal, 'ne')
+
+
+def test_npymath_complex():
+ # Smoketest npymath functions
+ from numpy.core.multiarray_tests import (
+ npy_cabs, npy_carg)
+
+ funcs = {npy_cabs: np.absolute,
+ npy_carg: np.angle}
+ vals = (1, np.inf, -np.inf, np.nan)
+ types = (np.complex64, np.complex128, np.clongdouble)
+
+ for fun, npfun in funcs.items():
+ for x, y in itertools.product(vals, vals):
+ for t in types:
+ z = t(complex(x, y))
+ got = fun(z)
+ expected = npfun(z)
+ assert_allclose(got, expected)
+
+
+def test_npymath_real():
+ # Smoketest npymath functions
+ from numpy.core.multiarray_tests import (
+ npy_log10, npy_cosh, npy_sinh, npy_tan, npy_tanh)
+
+ funcs = {npy_log10: np.log10,
+ npy_cosh: np.cosh,
+ npy_sinh: np.sinh,
+ npy_tan: np.tan,
+ npy_tanh: np.tanh}
+ vals = (1, np.inf, -np.inf, np.nan)
+ types = (np.float32, np.float64, np.longdouble)
+
+ with np.errstate(all='ignore'):
+ for fun, npfun in funcs.items():
+ for x, t in itertools.product(vals, types):
+ z = t(x)
+ got = fun(z)
+ expected = npfun(z)
+ assert_allclose(got, expected)
+
+
if __name__ == "__main__":
run_module_suite()
diff --git a/numpy/core/tests/test_nditer.py b/numpy/core/tests/test_nditer.py
index 77521317e..59e11f22e 100644
--- a/numpy/core/tests/test_nditer.py
+++ b/numpy/core/tests/test_nditer.py
@@ -1816,100 +1816,45 @@ def test_iter_buffered_cast_structured_type():
if HAS_REFCOUNT:
assert_equal(sys.getrefcount(a[0]), rc)
- # struct type -> simple (takes the first value)
- sdt = [('a', 'f4'), ('b', 'i8'), ('d', 'O')]
- a = np.array([(5.5, 7, 'test'), (8, 10, 11)], dtype=sdt)
+ # single-field struct type -> simple
+ sdt = [('a', 'f4')]
+ a = np.array([(5.5,), (8,)], dtype=sdt)
i = nditer(a, ['buffered', 'refs_ok'], ['readonly'],
casting='unsafe',
op_dtypes='i4')
assert_equal([x_[()] for x_ in i], [5, 8])
+ # make sure multi-field struct type -> simple doesn't work
+ sdt = [('a', 'f4'), ('b', 'i8'), ('d', 'O')]
+ a = np.array([(5.5, 7, 'test'), (8, 10, 11)], dtype=sdt)
+ assert_raises(ValueError, lambda: (
+ nditer(a, ['buffered', 'refs_ok'], ['readonly'],
+ casting='unsafe',
+ op_dtypes='i4')))
+
# struct type -> struct type (field-wise copy)
sdt1 = [('a', 'f4'), ('b', 'i8'), ('d', 'O')]
sdt2 = [('d', 'u2'), ('a', 'O'), ('b', 'f8')]
a = np.array([(1, 2, 3), (4, 5, 6)], dtype=sdt1)
- # New in 1.12: This behavior changes in 1.13, test for dep warning
- with assert_warns(FutureWarning):
- i = nditer(a, ['buffered', 'refs_ok'], ['readonly'],
- casting='unsafe',
- op_dtypes=sdt2)
+ i = nditer(a, ['buffered', 'refs_ok'], ['readonly'],
+ casting='unsafe',
+ op_dtypes=sdt2)
assert_equal(i[0].dtype, np.dtype(sdt2))
assert_equal([np.array(x_) for x_ in i],
- [np.array((3, 1, 2), dtype=sdt2),
- np.array((6, 4, 5), dtype=sdt2)])
+ [np.array((1, 2, 3), dtype=sdt2),
+ np.array((4, 5, 6), dtype=sdt2)])
- # struct type -> struct type (field gets discarded)
+ # make sure struct type -> struct type with different
+ # number of fields fails
sdt1 = [('a', 'f4'), ('b', 'i8'), ('d', 'O')]
sdt2 = [('b', 'O'), ('a', 'f8')]
a = np.array([(1, 2, 3), (4, 5, 6)], dtype=sdt1)
- # New in 1.12: This behavior changes in 1.13, test for dep warning
- with assert_warns(FutureWarning):
- i = nditer(a, ['buffered', 'refs_ok'], ['readwrite'],
- casting='unsafe',
- op_dtypes=sdt2)
- assert_equal(i[0].dtype, np.dtype(sdt2))
- vals = []
- for x in i:
- vals.append(np.array(x))
- x['a'] = x['b']+3
- assert_equal(vals, [np.array((2, 1), dtype=sdt2),
- np.array((5, 4), dtype=sdt2)])
- assert_equal(a, np.array([(5, 2, None), (8, 5, None)], dtype=sdt1))
-
- # struct type -> struct type (structured field gets discarded)
- sdt1 = [('a', 'f4'), ('b', 'i8'), ('d', [('a', 'i2'), ('b', 'i4')])]
- sdt2 = [('b', 'O'), ('a', 'f8')]
- a = np.array([(1, 2, (0, 9)), (4, 5, (20, 21))], dtype=sdt1)
- # New in 1.12: This behavior changes in 1.13, test for dep warning
- with assert_warns(FutureWarning):
- i = nditer(a, ['buffered', 'refs_ok'], ['readwrite'],
- casting='unsafe',
- op_dtypes=sdt2)
- assert_equal(i[0].dtype, np.dtype(sdt2))
- vals = []
- for x in i:
- vals.append(np.array(x))
- x['a'] = x['b']+3
- assert_equal(vals, [np.array((2, 1), dtype=sdt2),
- np.array((5, 4), dtype=sdt2)])
- assert_equal(a, np.array([(5, 2, (0, 0)), (8, 5, (0, 0))], dtype=sdt1))
-
- # struct type -> struct type (structured field w/ ref gets discarded)
- sdt1 = [('a', 'f4'), ('b', 'i8'), ('d', [('a', 'i2'), ('b', 'O')])]
- sdt2 = [('b', 'O'), ('a', 'f8')]
- a = np.array([(1, 2, (0, 9)), (4, 5, (20, 21))], dtype=sdt1)
- # New in 1.12: This behavior changes in 1.13, test for dep warning
- with assert_warns(FutureWarning):
- i = nditer(a, ['buffered', 'refs_ok'], ['readwrite'],
- casting='unsafe',
- op_dtypes=sdt2)
- assert_equal(i[0].dtype, np.dtype(sdt2))
- vals = []
- for x in i:
- vals.append(np.array(x))
- x['a'] = x['b']+3
- assert_equal(vals, [np.array((2, 1), dtype=sdt2),
- np.array((5, 4), dtype=sdt2)])
- assert_equal(a, np.array([(5, 2, (0, None)), (8, 5, (0, None))], dtype=sdt1))
-
- # struct type -> struct type back (structured field w/ ref gets discarded)
- sdt1 = [('b', 'O'), ('a', 'f8')]
- sdt2 = [('a', 'f4'), ('b', 'i8'), ('d', [('a', 'i2'), ('b', 'O')])]
- a = np.array([(1, 2), (4, 5)], dtype=sdt1)
- # New in 1.12: This behavior changes in 1.13, test for dep warning
- with assert_warns(FutureWarning):
- i = nditer(a, ['buffered', 'refs_ok'], ['readwrite'],
- casting='unsafe',
- op_dtypes=sdt2)
- assert_equal(i[0].dtype, np.dtype(sdt2))
- vals = []
- for x in i:
- vals.append(np.array(x))
- assert_equal(x['d'], np.array((0, None), dtype=[('a', 'i2'), ('b', 'O')]))
- x['a'] = x['b']+3
- assert_equal(vals, [np.array((2, 1, (0, None)), dtype=sdt2),
- np.array((5, 4, (0, None)), dtype=sdt2)])
- assert_equal(a, np.array([(1, 4), (4, 7)], dtype=sdt1))
+
+ assert_raises(ValueError, lambda : (
+ nditer(a, ['buffered', 'refs_ok'], ['readwrite'],
+ casting='unsafe',
+ op_dtypes=sdt2)))
+
def test_iter_buffered_cast_subarray():
# Tests buffering of subarrays
@@ -2145,7 +2090,7 @@ def test_iter_buffered_reduce_reuse():
op_flags = [('readonly',), ('readwrite', 'allocate')]
op_axes_list = [[(0, 1, 2), (0, 1, -1)], [(0, 1, 2), (0, -1, -1)]]
# wrong dtype to force buffering
- op_dtypes = [np.float, a.dtype]
+ op_dtypes = [float, a.dtype]
def get_params():
for xs in range(-3**2, 3**2 + 1):
@@ -2641,7 +2586,7 @@ def test_iter_element_deletion():
del it[1:2]
except TypeError:
pass
- except:
+ except Exception:
raise AssertionError
def test_iter_allocated_array_dtypes():
diff --git a/numpy/core/tests/test_numeric.py b/numpy/core/tests/test_numeric.py
index 0f87ffdf2..e8c637179 100644
--- a/numpy/core/tests/test_numeric.py
+++ b/numpy/core/tests/test_numeric.py
@@ -10,13 +10,13 @@ import numpy as np
from numpy.core import umath
from numpy.random import rand, randint, randn
from numpy.testing import (
- TestCase, run_module_suite, assert_, assert_equal, assert_raises,
+ run_module_suite, assert_, assert_equal, assert_raises,
assert_raises_regex, assert_array_equal, assert_almost_equal,
assert_array_almost_equal, dec, HAS_REFCOUNT, suppress_warnings
)
-class TestResize(TestCase):
+class TestResize(object):
def test_copies(self):
A = np.array([[1, 2], [3, 4]])
Ar1 = np.array([[1, 2, 3, 4], [1, 2, 3, 4]])
@@ -34,6 +34,12 @@ class TestResize(TestCase):
assert_array_equal(Ar, np.array([]))
assert_equal(A.dtype, Ar.dtype)
+ Ar = np.resize(A, (0, 2))
+ assert_equal(Ar.shape, (0, 2))
+
+ Ar = np.resize(A, (2, 0))
+ assert_equal(Ar.shape, (2, 0))
+
def test_reshape_from_zero(self):
# See also gh-6740
A = np.zeros(0, dtype=[('a', np.float32, 1)])
@@ -42,7 +48,7 @@ class TestResize(TestCase):
assert_equal(A.dtype, Ar.dtype)
-class TestNonarrayArgs(TestCase):
+class TestNonarrayArgs(object):
# check that non-array arguments to functions wrap them in arrays
def test_choose(self):
choices = [[0, 1, 2],
@@ -202,45 +208,45 @@ class TestNonarrayArgs(TestCase):
assert_(w[0].category is RuntimeWarning)
-class TestBoolScalar(TestCase):
+class TestBoolScalar(object):
def test_logical(self):
f = np.False_
t = np.True_
s = "xyz"
- self.assertTrue((t and s) is s)
- self.assertTrue((f and s) is f)
+ assert_((t and s) is s)
+ assert_((f and s) is f)
def test_bitwise_or(self):
f = np.False_
t = np.True_
- self.assertTrue((t | t) is t)
- self.assertTrue((f | t) is t)
- self.assertTrue((t | f) is t)
- self.assertTrue((f | f) is f)
+ assert_((t | t) is t)
+ assert_((f | t) is t)
+ assert_((t | f) is t)
+ assert_((f | f) is f)
def test_bitwise_and(self):
f = np.False_
t = np.True_
- self.assertTrue((t & t) is t)
- self.assertTrue((f & t) is f)
- self.assertTrue((t & f) is f)
- self.assertTrue((f & f) is f)
+ assert_((t & t) is t)
+ assert_((f & t) is f)
+ assert_((t & f) is f)
+ assert_((f & f) is f)
def test_bitwise_xor(self):
f = np.False_
t = np.True_
- self.assertTrue((t ^ t) is f)
- self.assertTrue((f ^ t) is t)
- self.assertTrue((t ^ f) is t)
- self.assertTrue((f ^ f) is f)
+ assert_((t ^ t) is f)
+ assert_((f ^ t) is t)
+ assert_((t ^ f) is t)
+ assert_((f ^ f) is f)
-class TestBoolArray(TestCase):
- def setUp(self):
+class TestBoolArray(object):
+ def setup(self):
# offset for simd tests
- self.t = np.array([True] * 41, dtype=np.bool)[1::]
- self.f = np.array([False] * 41, dtype=np.bool)[1::]
- self.o = np.array([False] * 42, dtype=np.bool)[2::]
+ self.t = np.array([True] * 41, dtype=bool)[1::]
+ self.f = np.array([False] * 41, dtype=bool)[1::]
+ self.o = np.array([False] * 42, dtype=bool)[2::]
self.nm = self.f.copy()
self.im = self.t.copy()
self.nm[3] = True
@@ -249,31 +255,31 @@ class TestBoolArray(TestCase):
self.im[-2] = False
def test_all_any(self):
- self.assertTrue(self.t.all())
- self.assertTrue(self.t.any())
- self.assertFalse(self.f.all())
- self.assertFalse(self.f.any())
- self.assertTrue(self.nm.any())
- self.assertTrue(self.im.any())
- self.assertFalse(self.nm.all())
- self.assertFalse(self.im.all())
+ assert_(self.t.all())
+ assert_(self.t.any())
+ assert_(not self.f.all())
+ assert_(not self.f.any())
+ assert_(self.nm.any())
+ assert_(self.im.any())
+ assert_(not self.nm.all())
+ assert_(not self.im.all())
# check bad element in all positions
for i in range(256 - 7):
- d = np.array([False] * 256, dtype=np.bool)[7::]
+ d = np.array([False] * 256, dtype=bool)[7::]
d[i] = True
- self.assertTrue(np.any(d))
- e = np.array([True] * 256, dtype=np.bool)[7::]
+ assert_(np.any(d))
+ e = np.array([True] * 256, dtype=bool)[7::]
e[i] = False
- self.assertFalse(np.all(e))
+ assert_(not np.all(e))
assert_array_equal(e, ~d)
# big array test for blocked libc loops
for i in list(range(9, 6000, 507)) + [7764, 90021, -10]:
- d = np.array([False] * 100043, dtype=np.bool)
+ d = np.array([False] * 100043, dtype=bool)
d[i] = True
- self.assertTrue(np.any(d), msg="%r" % i)
- e = np.array([True] * 100043, dtype=np.bool)
+ assert_(np.any(d), msg="%r" % i)
+ e = np.array([True] * 100043, dtype=bool)
e[i] = False
- self.assertFalse(np.all(e), msg="%r" % i)
+ assert_(not np.all(e), msg="%r" % i)
def test_logical_not_abs(self):
assert_array_equal(~self.t, self.f)
@@ -322,12 +328,12 @@ class TestBoolArray(TestCase):
assert_array_equal(self.im ^ False, self.im)
-class TestBoolCmp(TestCase):
- def setUp(self):
+class TestBoolCmp(object):
+ def setup(self):
self.f = np.ones(256, dtype=np.float32)
- self.ef = np.ones(self.f.size, dtype=np.bool)
+ self.ef = np.ones(self.f.size, dtype=bool)
self.d = np.ones(128, dtype=np.float64)
- self.ed = np.ones(self.d.size, dtype=np.bool)
+ self.ed = np.ones(self.d.size, dtype=bool)
# generate values for all permutation of 256bit simd vectors
s = 0
for i in range(32):
@@ -422,28 +428,28 @@ class TestBoolCmp(TestCase):
assert_array_equal(np.signbit(self.signd[i:]), self.ed[i:])
-class TestSeterr(TestCase):
+class TestSeterr(object):
def test_default(self):
err = np.geterr()
- self.assertEqual(err, dict(
- divide='warn',
- invalid='warn',
- over='warn',
- under='ignore',
- ))
+ assert_equal(err,
+ dict(divide='warn',
+ invalid='warn',
+ over='warn',
+ under='ignore')
+ )
def test_set(self):
with np.errstate():
err = np.seterr()
old = np.seterr(divide='print')
- self.assertTrue(err == old)
+ assert_(err == old)
new = np.seterr()
- self.assertTrue(new['divide'] == 'print')
+ assert_(new['divide'] == 'print')
np.seterr(over='raise')
- self.assertTrue(np.geterr()['over'] == 'raise')
- self.assertTrue(new['divide'] == 'print')
+ assert_(np.geterr()['over'] == 'raise')
+ assert_(new['divide'] == 'print')
np.seterr(**old)
- self.assertTrue(np.geterr() == old)
+ assert_(np.geterr() == old)
@dec.skipif(platform.machine() == "armv5tel", "See gh-413.")
def test_divide_err(self):
@@ -466,7 +472,7 @@ class TestSeterr(TestCase):
with np.errstate(divide='warn'):
np.seterrobj([20000, 1, None])
np.array([1.]) / np.array([0.])
- self.assertEqual(len(w), 1)
+ assert_equal(len(w), 1)
def log_err(*args):
self.called += 1
@@ -477,12 +483,12 @@ class TestSeterr(TestCase):
with np.errstate(divide='ignore'):
np.seterrobj([20000, 3, log_err])
np.array([1.]) / np.array([0.])
- self.assertEqual(self.called, 1)
+ assert_equal(self.called, 1)
np.seterrobj(olderrobj)
with np.errstate(divide='ignore'):
np.divide(1., 0., extobj=[20000, 3, log_err])
- self.assertEqual(self.called, 2)
+ assert_equal(self.called, 2)
finally:
np.seterrobj(olderrobj)
del self.called
@@ -506,7 +512,7 @@ class TestSeterr(TestCase):
np.seterrobj(olderrobj)
-class TestFloatExceptions(TestCase):
+class TestFloatExceptions(object):
def assert_raises_fpe(self, fpeerr, flop, x, y):
ftype = type(x)
try:
@@ -590,20 +596,20 @@ class TestFloatExceptions(TestCase):
warnings.simplefilter("always")
with np.errstate(all="warn"):
np.divide(1, 0.)
- self.assertEqual(len(w), 1)
- self.assertTrue("divide by zero" in str(w[0].message))
+ assert_equal(len(w), 1)
+ assert_("divide by zero" in str(w[0].message))
np.array(1e300) * np.array(1e300)
- self.assertEqual(len(w), 2)
- self.assertTrue("overflow" in str(w[-1].message))
+ assert_equal(len(w), 2)
+ assert_("overflow" in str(w[-1].message))
np.array(np.inf) - np.array(np.inf)
- self.assertEqual(len(w), 3)
- self.assertTrue("invalid value" in str(w[-1].message))
+ assert_equal(len(w), 3)
+ assert_("invalid value" in str(w[-1].message))
np.array(1e-300) * np.array(1e-300)
- self.assertEqual(len(w), 4)
- self.assertTrue("underflow" in str(w[-1].message))
+ assert_equal(len(w), 4)
+ assert_("underflow" in str(w[-1].message))
-class TestTypes(TestCase):
+class TestTypes(object):
def check_promotion_cases(self, promote_func):
# tests that the scalars get coerced correctly.
b = np.bool_(0)
@@ -794,8 +800,8 @@ class TestTypes(TestCase):
def test_can_cast(self):
assert_(np.can_cast(np.int32, np.int64))
- assert_(np.can_cast(np.float64, np.complex))
- assert_(not np.can_cast(np.complex, np.float))
+ assert_(np.can_cast(np.float64, complex))
+ assert_(not np.can_cast(complex, float))
assert_(np.can_cast('i8', 'f8'))
assert_(not np.can_cast('i8', 'f4'))
@@ -866,13 +872,16 @@ class TestTypes(TestCase):
assert_raises(TypeError, np.can_cast, 'i4', None)
assert_raises(TypeError, np.can_cast, None, 'i4')
+ # Also test keyword arguments
+ assert_(np.can_cast(from_=np.int32, to=np.int64))
+
# Custom exception class to test exception propagation in fromiter
class NIterError(Exception):
pass
-class TestFromiter(TestCase):
+class TestFromiter(object):
def makegen(self):
for x in range(24):
yield x**2
@@ -881,25 +890,25 @@ class TestFromiter(TestCase):
ai32 = np.fromiter(self.makegen(), np.int32)
ai64 = np.fromiter(self.makegen(), np.int64)
af = np.fromiter(self.makegen(), float)
- self.assertTrue(ai32.dtype == np.dtype(np.int32))
- self.assertTrue(ai64.dtype == np.dtype(np.int64))
- self.assertTrue(af.dtype == np.dtype(float))
+ assert_(ai32.dtype == np.dtype(np.int32))
+ assert_(ai64.dtype == np.dtype(np.int64))
+ assert_(af.dtype == np.dtype(float))
def test_lengths(self):
expected = np.array(list(self.makegen()))
a = np.fromiter(self.makegen(), int)
a20 = np.fromiter(self.makegen(), int, 20)
- self.assertTrue(len(a) == len(expected))
- self.assertTrue(len(a20) == 20)
- self.assertRaises(ValueError, np.fromiter,
+ assert_(len(a) == len(expected))
+ assert_(len(a20) == 20)
+ assert_raises(ValueError, np.fromiter,
self.makegen(), int, len(expected) + 10)
def test_values(self):
expected = np.array(list(self.makegen()))
a = np.fromiter(self.makegen(), int)
a20 = np.fromiter(self.makegen(), int, 20)
- self.assertTrue(np.alltrue(a == expected, axis=0))
- self.assertTrue(np.alltrue(a20 == expected[:20], axis=0))
+ assert_(np.alltrue(a == expected, axis=0))
+ assert_(np.alltrue(a20 == expected[:20], axis=0))
def load_data(self, n, eindex):
# Utility method for the issue 2592 tests.
@@ -912,18 +921,18 @@ class TestFromiter(TestCase):
def test_2592(self):
# Test iteration exceptions are correctly raised.
count, eindex = 10, 5
- self.assertRaises(NIterError, np.fromiter,
+ assert_raises(NIterError, np.fromiter,
self.load_data(count, eindex), dtype=int, count=count)
def test_2592_edge(self):
# Test iter. exceptions, edge case (exception at end of iterator).
count = 10
eindex = count-1
- self.assertRaises(NIterError, np.fromiter,
+ assert_raises(NIterError, np.fromiter,
self.load_data(count, eindex), dtype=int, count=count)
-class TestNonzero(TestCase):
+class TestNonzero(object):
def test_nonzero_trivial(self):
assert_equal(np.count_nonzero(np.array([])), 0)
assert_equal(np.count_nonzero(np.array([], dtype='?')), 0)
@@ -975,11 +984,11 @@ class TestNonzero(TestCase):
def test_sparse(self):
# test special sparse condition boolean code path
for i in range(20):
- c = np.zeros(200, dtype=np.bool)
+ c = np.zeros(200, dtype=bool)
c[i::20] = True
assert_equal(np.nonzero(c)[0], np.arange(i, 200 + i, 20))
- c = np.zeros(400, dtype=np.bool)
+ c = np.zeros(400, dtype=bool)
c[10 + i:20 + i] = True
c[20 + i*2] = True
assert_equal(np.nonzero(c)[0],
@@ -1020,6 +1029,10 @@ class TestNonzero(TestCase):
# either integer or tuple arguments for axis
msg = "Mismatch for dtype: %s"
+ def assert_equal_w_dt(a, b, err_msg):
+ assert_equal(a.dtype, b.dtype, err_msg=err_msg)
+ assert_equal(a, b, err_msg=err_msg)
+
for dt in np.typecodes['All']:
err_msg = msg % (np.dtype(dt).name,)
@@ -1039,13 +1052,13 @@ class TestNonzero(TestCase):
m[1, 0] = '1970-01-12'
m = m.astype(dt)
- expected = np.array([2, 0, 0])
- assert_equal(np.count_nonzero(m, axis=0),
- expected, err_msg=err_msg)
+ expected = np.array([2, 0, 0], dtype=np.intp)
+ assert_equal_w_dt(np.count_nonzero(m, axis=0),
+ expected, err_msg=err_msg)
- expected = np.array([1, 1, 0])
- assert_equal(np.count_nonzero(m, axis=1),
- expected, err_msg=err_msg)
+ expected = np.array([1, 1, 0], dtype=np.intp)
+ assert_equal_w_dt(np.count_nonzero(m, axis=1),
+ expected, err_msg=err_msg)
expected = np.array(2)
assert_equal(np.count_nonzero(m, axis=(0, 1)),
@@ -1060,13 +1073,13 @@ class TestNonzero(TestCase):
# setup is slightly different for this dtype
m = np.array([np.void(1)] * 6).reshape((2, 3))
- expected = np.array([0, 0, 0])
- assert_equal(np.count_nonzero(m, axis=0),
- expected, err_msg=err_msg)
+ expected = np.array([0, 0, 0], dtype=np.intp)
+ assert_equal_w_dt(np.count_nonzero(m, axis=0),
+ expected, err_msg=err_msg)
- expected = np.array([0, 0])
- assert_equal(np.count_nonzero(m, axis=1),
- expected, err_msg=err_msg)
+ expected = np.array([0, 0], dtype=np.intp)
+ assert_equal_w_dt(np.count_nonzero(m, axis=1),
+ expected, err_msg=err_msg)
expected = np.array(0)
assert_equal(np.count_nonzero(m, axis=(0, 1)),
@@ -1089,7 +1102,7 @@ class TestNonzero(TestCase):
rng = np.random.RandomState(1234)
m = rng.randint(-100, 100, size=size)
- n = m.astype(np.object)
+ n = m.astype(object)
for length in range(len(axis)):
for combo in combinations(axis, length):
@@ -1108,7 +1121,7 @@ class TestNonzero(TestCase):
assert_equal(m.nonzero(), tgt)
-class TestIndex(TestCase):
+class TestIndex(object):
def test_boolean(self):
a = rand(3, 5, 8)
V = rand(5, 8)
@@ -1125,7 +1138,7 @@ class TestIndex(TestCase):
assert_equal(c.dtype, np.dtype('int32'))
-class TestBinaryRepr(TestCase):
+class TestBinaryRepr(object):
def test_zero(self):
assert_equal(np.binary_repr(0), '0')
@@ -1162,7 +1175,7 @@ class TestBinaryRepr(TestCase):
assert_equal(np.binary_repr(num, width=width), exp)
-class TestBaseRepr(TestCase):
+class TestBaseRepr(object):
def test_base3(self):
assert_equal(np.base_repr(3**5, 3), '100000')
@@ -1178,13 +1191,13 @@ class TestBaseRepr(TestCase):
assert_equal(np.base_repr(-12, 4), '-30')
def test_base_range(self):
- with self.assertRaises(ValueError):
+ with assert_raises(ValueError):
np.base_repr(1, 1)
- with self.assertRaises(ValueError):
+ with assert_raises(ValueError):
np.base_repr(1, 37)
-class TestArrayComparisons(TestCase):
+class TestArrayComparisons(object):
def test_array_equal(self):
res = np.array_equal(np.array([1, 2]), np.array([1, 2]))
assert_(res)
@@ -1264,8 +1277,8 @@ def assert_array_strict_equal(x, y):
assert_(x.dtype.isnative == y.dtype.isnative)
-class TestClip(TestCase):
- def setUp(self):
+class TestClip(object):
+ def setup(self):
self.nr = 5
self.nc = 3
@@ -1380,7 +1393,7 @@ class TestClip(TestCase):
# Address Issue gh-5354 for clipping complex arrays
# Test native complex input without explicit min/max
# ie, either min=None or max=None
- a = np.ones(10, dtype=np.complex)
+ a = np.ones(10, dtype=complex)
m = a.min()
M = a.max()
am = self.fastclip(a, m, None)
@@ -1691,7 +1704,7 @@ class TestClip(TestCase):
a2 = np.clip(a, m, M, out=a)
self.clip(a, m, M, ac)
assert_array_strict_equal(a2, ac)
- self.assertTrue(a2 is a)
+ assert_(a2 is a)
def test_clip_nan(self):
d = np.arange(7.)
@@ -1706,10 +1719,10 @@ class TestAllclose(object):
rtol = 1e-5
atol = 1e-8
- def setUp(self):
+ def setup(self):
self.olderr = np.seterr(invalid='ignore')
- def tearDown(self):
+ def teardown(self):
np.seterr(**self.olderr)
def tst_allclose(self, x, y):
@@ -1920,13 +1933,13 @@ class TestIsclose(object):
def test_non_finite_scalar(self):
# GH7014, when two scalars are compared the output should also be a
# scalar
- assert_(np.isclose(np.inf, -np.inf) is False)
- assert_(np.isclose(0, np.inf) is False)
- assert_(type(np.isclose(0, np.inf)) is bool)
+ assert_(np.isclose(np.inf, -np.inf) is np.False_)
+ assert_(np.isclose(0, np.inf) is np.False_)
+ assert_(type(np.isclose(0, np.inf)) is np.bool_)
-class TestStdVar(TestCase):
- def setUp(self):
+class TestStdVar(object):
+ def setup(self):
self.A = np.array([1, -1, 1, -1])
self.real_var = 1
@@ -1964,7 +1977,7 @@ class TestStdVar(TestCase):
assert_array_equal(r, out)
-class TestStdVarComplex(TestCase):
+class TestStdVarComplex(object):
def test_basic(self):
A = np.array([1, 1.j, -1, -1.j])
real_var = 1
@@ -1976,10 +1989,10 @@ class TestStdVarComplex(TestCase):
assert_equal(np.std(1j), 0)
-class TestCreationFuncs(TestCase):
+class TestCreationFuncs(object):
# Test ones, zeros, empty and full.
- def setUp(self):
+ def setup(self):
dtypes = {np.dtype(tp) for tp in itertools.chain(*np.sctypes.values())}
# void, bytes, str
variable_sized = {tp for tp in dtypes if tp.str.endswith('0')}
@@ -2047,10 +2060,10 @@ class TestCreationFuncs(TestCase):
assert_(sys.getrefcount(dim) == beg)
-class TestLikeFuncs(TestCase):
+class TestLikeFuncs(object):
'''Test ones_like, zeros_like, empty_like and full_like'''
- def setUp(self):
+ def setup(self):
self.data = [
# Array scalars
(np.array(3.), None),
@@ -2165,7 +2178,7 @@ class TestLikeFuncs(TestCase):
self.check_like_function(np.full_like, np.inf, True)
-class TestCorrelate(TestCase):
+class TestCorrelate(object):
def _setup(self, dt):
self.x = np.array([1, 2, 3, 4, 5], dtype=dt)
self.xs = np.arange(1, 20)[::3]
@@ -2179,7 +2192,7 @@ class TestCorrelate(TestCase):
-102., -54., -19.], dtype=dt)
def test_float(self):
- self._setup(np.float)
+ self._setup(float)
z = np.correlate(self.x, self.y, 'full')
assert_array_almost_equal(z, self.z1)
z = np.correlate(self.x, self.y[:-1], 'full')
@@ -2208,15 +2221,15 @@ class TestCorrelate(TestCase):
assert_array_equal(k, np.ones(3))
def test_complex(self):
- x = np.array([1, 2, 3, 4+1j], dtype=np.complex)
- y = np.array([-1, -2j, 3+1j], dtype=np.complex)
- r_z = np.array([3-1j, 6, 8+1j, 11+5j, -5+8j, -4-1j], dtype=np.complex)
+ x = np.array([1, 2, 3, 4+1j], dtype=complex)
+ y = np.array([-1, -2j, 3+1j], dtype=complex)
+ r_z = np.array([3-1j, 6, 8+1j, 11+5j, -5+8j, -4-1j], dtype=complex)
r_z = r_z[::-1].conjugate()
z = np.correlate(y, x, mode='full')
assert_array_almost_equal(z, r_z)
-class TestConvolve(TestCase):
+class TestConvolve(object):
def test_object(self):
d = [1.] * 100
k = [1.] * 3
@@ -2258,7 +2271,7 @@ class TestStringFunction(object):
assert_equal(str(a), "[1]")
-class TestRoll(TestCase):
+class TestRoll(object):
def test_roll1d(self):
x = np.arange(10)
xr = np.roll(x, 2)
@@ -2316,7 +2329,7 @@ class TestRoll(TestCase):
assert_equal(np.roll(x, 1), np.array([]))
-class TestRollaxis(TestCase):
+class TestRollaxis(object):
# expected shape indexed by (axis, start) for array of
# shape (1, 2, 3, 4)
@@ -2378,7 +2391,7 @@ class TestRollaxis(TestCase):
assert_(not res.flags['OWNDATA'])
-class TestMoveaxis(TestCase):
+class TestMoveaxis(object):
def test_move_to_end(self):
x = np.random.randn(5, 6, 7)
for source, expected in [(0, (6, 7, 5)),
@@ -2452,7 +2465,7 @@ class TestMoveaxis(TestCase):
assert_(isinstance(result, np.ndarray))
-class TestCross(TestCase):
+class TestCross(object):
def test_2x2(self):
u = [1, 2]
v = [3, 4]
@@ -2615,7 +2628,7 @@ class TestRequire(object):
yield self.set_and_check_flag, flag, None, a
-class TestBroadcast(TestCase):
+class TestBroadcast(object):
def test_broadcast_in_args(self):
# gh-5881
arrs = [np.empty((6, 7)), np.empty((5, 6, 1)), np.empty((7,)),
@@ -2652,7 +2665,7 @@ class TestBroadcast(TestCase):
assert_equal(mit.numiter, j)
-class TestKeepdims(TestCase):
+class TestKeepdims(object):
class sub_array(np.ndarray):
def sum(self, axis=None, dtype=None, out=None):
@@ -2664,5 +2677,16 @@ class TestKeepdims(TestCase):
assert_raises(TypeError, np.sum, x, keepdims=True)
+class TestTensordot(object):
+
+ def test_zero_dimension(self):
+ # Test resolution to issue #5663
+ a = np.ndarray((3,0))
+ b = np.ndarray((0,4))
+ td = np.tensordot(a, b, (1, 0))
+ assert_array_equal(td, np.dot(a, b))
+ assert_array_equal(td, np.einsum('ij,jk', a, b))
+
+
if __name__ == "__main__":
run_module_suite()
diff --git a/numpy/core/tests/test_numerictypes.py b/numpy/core/tests/test_numerictypes.py
index 293031c03..8831cd1bb 100644
--- a/numpy/core/tests/test_numerictypes.py
+++ b/numpy/core/tests/test_numerictypes.py
@@ -1,10 +1,11 @@
from __future__ import division, absolute_import, print_function
import sys
+import itertools
import numpy as np
from numpy.testing import (
- TestCase, run_module_suite, assert_, assert_equal
+ run_module_suite, assert_, assert_equal, assert_raises
)
# This is the structure of the table used for plain objects:
@@ -102,99 +103,99 @@ def normalize_descr(descr):
# Creation tests
############################################################
-class create_zeros(object):
+class CreateZeros(object):
"""Check the creation of heterogeneous arrays zero-valued"""
def test_zeros0D(self):
"""Check creation of 0-dimensional objects"""
h = np.zeros((), dtype=self._descr)
- self.assertTrue(normalize_descr(self._descr) == h.dtype.descr)
- self.assertTrue(h.dtype.fields['x'][0].name[:4] == 'void')
- self.assertTrue(h.dtype.fields['x'][0].char == 'V')
- self.assertTrue(h.dtype.fields['x'][0].type == np.void)
+ assert_(normalize_descr(self._descr) == h.dtype.descr)
+ assert_(h.dtype.fields['x'][0].name[:4] == 'void')
+ assert_(h.dtype.fields['x'][0].char == 'V')
+ assert_(h.dtype.fields['x'][0].type == np.void)
# A small check that data is ok
assert_equal(h['z'], np.zeros((), dtype='u1'))
def test_zerosSD(self):
"""Check creation of single-dimensional objects"""
h = np.zeros((2,), dtype=self._descr)
- self.assertTrue(normalize_descr(self._descr) == h.dtype.descr)
- self.assertTrue(h.dtype['y'].name[:4] == 'void')
- self.assertTrue(h.dtype['y'].char == 'V')
- self.assertTrue(h.dtype['y'].type == np.void)
+ assert_(normalize_descr(self._descr) == h.dtype.descr)
+ assert_(h.dtype['y'].name[:4] == 'void')
+ assert_(h.dtype['y'].char == 'V')
+ assert_(h.dtype['y'].type == np.void)
# A small check that data is ok
assert_equal(h['z'], np.zeros((2,), dtype='u1'))
def test_zerosMD(self):
"""Check creation of multi-dimensional objects"""
h = np.zeros((2, 3), dtype=self._descr)
- self.assertTrue(normalize_descr(self._descr) == h.dtype.descr)
- self.assertTrue(h.dtype['z'].name == 'uint8')
- self.assertTrue(h.dtype['z'].char == 'B')
- self.assertTrue(h.dtype['z'].type == np.uint8)
+ assert_(normalize_descr(self._descr) == h.dtype.descr)
+ assert_(h.dtype['z'].name == 'uint8')
+ assert_(h.dtype['z'].char == 'B')
+ assert_(h.dtype['z'].type == np.uint8)
# A small check that data is ok
assert_equal(h['z'], np.zeros((2, 3), dtype='u1'))
-class test_create_zeros_plain(create_zeros, TestCase):
+class TestCreateZerosPlain(CreateZeros):
"""Check the creation of heterogeneous arrays zero-valued (plain)"""
_descr = Pdescr
-class test_create_zeros_nested(create_zeros, TestCase):
+class TestCreateZerosNested(CreateZeros):
"""Check the creation of heterogeneous arrays zero-valued (nested)"""
_descr = Ndescr
-class create_values(object):
+class CreateValues(object):
"""Check the creation of heterogeneous arrays with values"""
def test_tuple(self):
"""Check creation from tuples"""
h = np.array(self._buffer, dtype=self._descr)
- self.assertTrue(normalize_descr(self._descr) == h.dtype.descr)
+ assert_(normalize_descr(self._descr) == h.dtype.descr)
if self.multiple_rows:
- self.assertTrue(h.shape == (2,))
+ assert_(h.shape == (2,))
else:
- self.assertTrue(h.shape == ())
+ assert_(h.shape == ())
def test_list_of_tuple(self):
"""Check creation from list of tuples"""
h = np.array([self._buffer], dtype=self._descr)
- self.assertTrue(normalize_descr(self._descr) == h.dtype.descr)
+ assert_(normalize_descr(self._descr) == h.dtype.descr)
if self.multiple_rows:
- self.assertTrue(h.shape == (1, 2))
+ assert_(h.shape == (1, 2))
else:
- self.assertTrue(h.shape == (1,))
+ assert_(h.shape == (1,))
def test_list_of_list_of_tuple(self):
"""Check creation from list of list of tuples"""
h = np.array([[self._buffer]], dtype=self._descr)
- self.assertTrue(normalize_descr(self._descr) == h.dtype.descr)
+ assert_(normalize_descr(self._descr) == h.dtype.descr)
if self.multiple_rows:
- self.assertTrue(h.shape == (1, 1, 2))
+ assert_(h.shape == (1, 1, 2))
else:
- self.assertTrue(h.shape == (1, 1))
+ assert_(h.shape == (1, 1))
-class test_create_values_plain_single(create_values, TestCase):
+class TestCreateValuesPlainSingle(CreateValues):
"""Check the creation of heterogeneous arrays (plain, single row)"""
_descr = Pdescr
multiple_rows = 0
_buffer = PbufferT[0]
-class test_create_values_plain_multiple(create_values, TestCase):
+class TestCreateValuesPlainMultiple(CreateValues):
"""Check the creation of heterogeneous arrays (plain, multiple rows)"""
_descr = Pdescr
multiple_rows = 1
_buffer = PbufferT
-class test_create_values_nested_single(create_values, TestCase):
+class TestCreateValuesNestedSingle(CreateValues):
"""Check the creation of heterogeneous arrays (nested, single row)"""
_descr = Ndescr
multiple_rows = 0
_buffer = NbufferT[0]
-class test_create_values_nested_multiple(create_values, TestCase):
+class TestCreateValuesNestedMultiple(CreateValues):
"""Check the creation of heterogeneous arrays (nested, multiple rows)"""
_descr = Ndescr
multiple_rows = 1
@@ -205,18 +206,18 @@ class test_create_values_nested_multiple(create_values, TestCase):
# Reading tests
############################################################
-class read_values_plain(object):
+class ReadValuesPlain(object):
"""Check the reading of values in heterogeneous arrays (plain)"""
def test_access_fields(self):
h = np.array(self._buffer, dtype=self._descr)
if not self.multiple_rows:
- self.assertTrue(h.shape == ())
+ assert_(h.shape == ())
assert_equal(h['x'], np.array(self._buffer[0], dtype='i4'))
assert_equal(h['y'], np.array(self._buffer[1], dtype='f8'))
assert_equal(h['z'], np.array(self._buffer[2], dtype='u1'))
else:
- self.assertTrue(len(h) == 2)
+ assert_(len(h) == 2)
assert_equal(h['x'], np.array([self._buffer[0][0],
self._buffer[1][0]], dtype='i4'))
assert_equal(h['y'], np.array([self._buffer[0][1],
@@ -225,31 +226,31 @@ class read_values_plain(object):
self._buffer[1][2]], dtype='u1'))
-class test_read_values_plain_single(read_values_plain, TestCase):
+class TestReadValuesPlainSingle(ReadValuesPlain):
"""Check the creation of heterogeneous arrays (plain, single row)"""
_descr = Pdescr
multiple_rows = 0
_buffer = PbufferT[0]
-class test_read_values_plain_multiple(read_values_plain, TestCase):
+class TestReadValuesPlainMultiple(ReadValuesPlain):
"""Check the values of heterogeneous arrays (plain, multiple rows)"""
_descr = Pdescr
multiple_rows = 1
_buffer = PbufferT
-class read_values_nested(object):
+class ReadValuesNested(object):
"""Check the reading of values in heterogeneous arrays (nested)"""
def test_access_top_fields(self):
"""Check reading the top fields of a nested array"""
h = np.array(self._buffer, dtype=self._descr)
if not self.multiple_rows:
- self.assertTrue(h.shape == ())
+ assert_(h.shape == ())
assert_equal(h['x'], np.array(self._buffer[0], dtype='i4'))
assert_equal(h['y'], np.array(self._buffer[4], dtype='f8'))
assert_equal(h['z'], np.array(self._buffer[5], dtype='u1'))
else:
- self.assertTrue(len(h) == 2)
+ assert_(len(h) == 2)
assert_equal(h['x'], np.array([self._buffer[0][0],
self._buffer[1][0]], dtype='i4'))
assert_equal(h['y'], np.array([self._buffer[0][4],
@@ -308,41 +309,41 @@ class read_values_nested(object):
def test_nested1_descriptor(self):
"""Check access nested descriptors of a nested array (1st level)"""
h = np.array(self._buffer, dtype=self._descr)
- self.assertTrue(h.dtype['Info']['value'].name == 'complex128')
- self.assertTrue(h.dtype['Info']['y2'].name == 'float64')
+ assert_(h.dtype['Info']['value'].name == 'complex128')
+ assert_(h.dtype['Info']['y2'].name == 'float64')
if sys.version_info[0] >= 3:
- self.assertTrue(h.dtype['info']['Name'].name == 'str256')
+ assert_(h.dtype['info']['Name'].name == 'str256')
else:
- self.assertTrue(h.dtype['info']['Name'].name == 'unicode256')
- self.assertTrue(h.dtype['info']['Value'].name == 'complex128')
+ assert_(h.dtype['info']['Name'].name == 'unicode256')
+ assert_(h.dtype['info']['Value'].name == 'complex128')
def test_nested2_descriptor(self):
"""Check access nested descriptors of a nested array (2nd level)"""
h = np.array(self._buffer, dtype=self._descr)
- self.assertTrue(h.dtype['Info']['Info2']['value'].name == 'void256')
- self.assertTrue(h.dtype['Info']['Info2']['z3'].name == 'void64')
+ assert_(h.dtype['Info']['Info2']['value'].name == 'void256')
+ assert_(h.dtype['Info']['Info2']['z3'].name == 'void64')
-class test_read_values_nested_single(read_values_nested, TestCase):
+class TestReadValuesNestedSingle(ReadValuesNested):
"""Check the values of heterogeneous arrays (nested, single row)"""
_descr = Ndescr
multiple_rows = False
_buffer = NbufferT[0]
-class test_read_values_nested_multiple(read_values_nested, TestCase):
+class TestReadValuesNestedMultiple(ReadValuesNested):
"""Check the values of heterogeneous arrays (nested, multiple rows)"""
_descr = Ndescr
multiple_rows = True
_buffer = NbufferT
-class TestEmptyField(TestCase):
+class TestEmptyField(object):
def test_assign(self):
a = np.arange(10, dtype=np.float32)
a.dtype = [("int", "<0i4"), ("float", "<2f4")]
assert_(a['int'].shape == (5, 0))
assert_(a['float'].shape == (5, 2))
-class TestCommonType(TestCase):
+class TestCommonType(object):
def test_scalar_loses1(self):
res = np.find_common_type(['f4', 'f4', 'i2'], ['f8'])
assert_(res == 'f4')
@@ -363,19 +364,50 @@ class TestCommonType(TestCase):
res = np.find_common_type(['u8', 'i8', 'i8'], ['f8'])
assert_(res == 'f8')
-class TestMultipleFields(TestCase):
- def setUp(self):
+class TestMultipleFields(object):
+ def setup(self):
self.ary = np.array([(1, 2, 3, 4), (5, 6, 7, 8)], dtype='i4,f4,i2,c8')
def _bad_call(self):
return self.ary['f0', 'f1']
def test_no_tuple(self):
- self.assertRaises(IndexError, self._bad_call)
+ assert_raises(IndexError, self._bad_call)
def test_return(self):
res = self.ary[['f0', 'f2']].tolist()
assert_(res == [(1, 3), (5, 7)])
+
+class TestIsSubDType(object):
+ # scalar types can be promoted into dtypes
+ wrappers = [np.dtype, lambda x: x]
+
+ def test_both_abstract(self):
+ assert_(np.issubdtype(np.floating, np.inexact))
+ assert_(not np.issubdtype(np.inexact, np.floating))
+
+ def test_same(self):
+ for cls in (np.float32, np.int32):
+ for w1, w2 in itertools.product(self.wrappers, repeat=2):
+ assert_(np.issubdtype(w1(cls), w2(cls)))
+
+ def test_subclass(self):
+ # note we cannot promote floating to a dtype, as it would turn into a
+ # concrete type
+ for w in self.wrappers:
+ assert_(np.issubdtype(w(np.float32), np.floating))
+ assert_(np.issubdtype(w(np.float64), np.floating))
+
+ def test_subclass_backwards(self):
+ for w in self.wrappers:
+ assert_(not np.issubdtype(np.floating, w(np.float32)))
+ assert_(not np.issubdtype(np.floating, w(np.float64)))
+
+ def test_sibling_class(self):
+ for w1, w2 in itertools.product(self.wrappers, repeat=2):
+ assert_(not np.issubdtype(w1(np.float32), w2(np.float64)))
+ assert_(not np.issubdtype(w1(np.float64), w2(np.float32)))
+
if __name__ == "__main__":
run_module_suite()
diff --git a/numpy/core/tests/test_print.py b/numpy/core/tests/test_print.py
index b1ce12f56..305258d6f 100644
--- a/numpy/core/tests/test_print.py
+++ b/numpy/core/tests/test_print.py
@@ -35,7 +35,7 @@ def test_float_types():
""" Check formatting.
This is only for the str function, and only for simple types.
- The precision of np.float and np.longdouble aren't the same as the
+ The precision of np.float32 and np.longdouble aren't the same as the
python float precision.
"""
@@ -51,7 +51,7 @@ def test_nan_inf_float():
""" Check formatting of nan & inf.
This is only for the str function, and only for simple types.
- The precision of np.float and np.longdouble aren't the same as the
+ The precision of np.float32 and np.longdouble aren't the same as the
python float precision.
"""
@@ -79,7 +79,7 @@ def test_complex_types():
"""Check formatting of complex types.
This is only for the str function, and only for simple types.
- The precision of np.float and np.longdouble aren't the same as the
+ The precision of np.float32 and np.longdouble aren't the same as the
python float precision.
"""
diff --git a/numpy/core/tests/test_records.py b/numpy/core/tests/test_records.py
index 6f1ed37d1..27d35fa65 100644
--- a/numpy/core/tests/test_records.py
+++ b/numpy/core/tests/test_records.py
@@ -8,12 +8,12 @@ from os import path
import numpy as np
from numpy.testing import (
- TestCase, run_module_suite, assert_, assert_equal, assert_array_equal,
+ run_module_suite, assert_, assert_equal, assert_array_equal,
assert_array_almost_equal, assert_raises, assert_warns
)
-class TestFromrecords(TestCase):
+class TestFromrecords(object):
def test_fromrecords(self):
r = np.rec.fromrecords([[456, 'dbe', 1.2], [2, 'de', 1.3]],
names='col1,col2,col3')
@@ -29,7 +29,7 @@ class TestFromrecords(TestCase):
def test_fromrecords_0len(self):
""" Verify fromrecords works with a 0-length input """
- dtype = [('a', np.float), ('b', np.float)]
+ dtype = [('a', float), ('b', float)]
r = np.rec.fromrecords([], dtype=dtype)
assert_equal(r.shape, (0,))
@@ -153,11 +153,6 @@ class TestFromrecords(TestCase):
assert_equal(r['c'].dtype.type, np.record)
assert_equal(type(r['c']), np.recarray)
- # suppress deprecation warning in 1.12 (remove in 1.13)
- with assert_warns(FutureWarning):
- assert_equal(r[['a', 'b']].dtype.type, np.record)
- assert_equal(type(r[['a', 'b']]), np.recarray)
-
#and that it preserves subclasses (gh-6949)
class C(np.recarray):
pass
@@ -235,13 +230,13 @@ class TestFromrecords(TestCase):
def test_fromrecords_with_explicit_dtype(self):
a = np.rec.fromrecords([(1, 'a'), (2, 'bbb')],
- dtype=[('a', int), ('b', np.object)])
+ dtype=[('a', int), ('b', object)])
assert_equal(a.a, [1, 2])
assert_equal(a[0].a, 1)
assert_equal(a.b, ['a', 'bbb'])
assert_equal(a[-1].b, 'bbb')
#
- ndtype = np.dtype([('a', int), ('b', np.object)])
+ ndtype = np.dtype([('a', int), ('b', object)])
a = np.rec.fromrecords([(1, 'a'), (2, 'bbb')], dtype=ndtype)
assert_equal(a.a, [1, 2])
assert_equal(a[0].a, 1)
@@ -298,8 +293,8 @@ class TestFromrecords(TestCase):
assert_equal(rec['f1'], [b'', b'', b''])
-class TestRecord(TestCase):
- def setUp(self):
+class TestRecord(object):
+ def setup(self):
self.data = np.rec.fromrecords([(1, 2, 3), (4, 5, 6)],
dtype=[("col1", "<i4"),
("col2", "<i4"),
@@ -323,7 +318,7 @@ class TestRecord(TestCase):
def assign_invalid_column(x):
x[0].col5 = 1
- self.assertRaises(AttributeError, assign_invalid_column, a)
+ assert_raises(AttributeError, assign_invalid_column, a)
def test_nonwriteable_setfield(self):
# gh-8171
@@ -334,15 +329,6 @@ class TestRecord(TestCase):
with assert_raises(ValueError):
r.setfield([2,3], *r.dtype.fields['f'])
- def test_out_of_order_fields(self):
- """Ticket #1431."""
- # this test will be invalid in 1.13
- # suppress deprecation warning in 1.12 (remove in 1.13)
- with assert_warns(FutureWarning):
- x = self.data[['col1', 'col2']]
- y = self.data[['col2', 'col1']]
- assert_equal(x[0][0], y[0][1])
-
def test_pickle_1(self):
# Issue #1529
a = np.array([(1, [])], dtype=[('a', np.int32), ('b', np.int32, 0)])
@@ -371,8 +357,7 @@ class TestRecord(TestCase):
# https://github.com/numpy/numpy/issues/3256
ra = np.recarray((2,), dtype=[('x', object), ('y', float), ('z', int)])
- with assert_warns(FutureWarning):
- ra[['x','y']] # TypeError?
+ ra[['x','y']] # TypeError?
def test_record_scalar_setitem(self):
# https://github.com/numpy/numpy/issues/3561
diff --git a/numpy/core/tests/test_regression.py b/numpy/core/tests/test_regression.py
index fb9ea5252..84469d03b 100644
--- a/numpy/core/tests/test_regression.py
+++ b/numpy/core/tests/test_regression.py
@@ -13,26 +13,25 @@ from itertools import chain
import numpy as np
from numpy.testing import (
- run_module_suite, TestCase, assert_, assert_equal, IS_PYPY,
+ run_module_suite, assert_, assert_equal, IS_PYPY,
assert_almost_equal, assert_array_equal, assert_array_almost_equal,
- assert_raises, assert_warns, dec, suppress_warnings
+ assert_raises, assert_warns, dec, suppress_warnings,
+ _assert_valid_refcount, HAS_REFCOUNT,
)
-from numpy.testing.utils import _assert_valid_refcount, HAS_REFCOUNT
from numpy.compat import asbytes, asunicode, long
-rlevel = 1
-class TestRegression(TestCase):
- def test_invalid_round(self, level=rlevel):
+class TestRegression(object):
+ def test_invalid_round(self):
# Ticket #3
v = 4.7599999999999998
assert_array_equal(np.array([v]), np.array(v))
- def test_mem_empty(self, level=rlevel):
+ def test_mem_empty(self):
# Ticket #7
np.empty((1,), dtype=[('x', np.int64)])
- def test_pickle_transposed(self, level=rlevel):
+ def test_pickle_transposed(self):
# Ticket #16
a = np.transpose(np.array([[2, 9], [7, 0], [3, 8]]))
f = BytesIO()
@@ -42,44 +41,44 @@ class TestRegression(TestCase):
f.close()
assert_array_equal(a, b)
- def test_typeNA(self, level=rlevel):
+ def test_typeNA(self):
# Ticket #31
assert_equal(np.typeNA[np.int64], 'Int64')
assert_equal(np.typeNA[np.uint64], 'UInt64')
- def test_dtype_names(self, level=rlevel):
+ def test_dtype_names(self):
# Ticket #35
# Should succeed
np.dtype([(('name', 'label'), np.int32, 3)])
- def test_reduce(self, level=rlevel):
+ def test_reduce(self):
# Ticket #40
assert_almost_equal(np.add.reduce([1., .5], dtype=None), 1.5)
- def test_zeros_order(self, level=rlevel):
+ def test_zeros_order(self):
# Ticket #43
np.zeros([3], int, 'C')
np.zeros([3], order='C')
np.zeros([3], int, order='C')
- def test_asarray_with_order(self, level=rlevel):
+ def test_asarray_with_order(self):
# Check that nothing is done when order='F' and array C/F-contiguous
a = np.ones(2)
assert_(a is np.asarray(a, order='F'))
- def test_ravel_with_order(self, level=rlevel):
+ def test_ravel_with_order(self):
# Check that ravel works when order='F' and array C/F-contiguous
a = np.ones(2)
assert_(not a.ravel('F').flags.owndata)
- def test_sort_bigendian(self, level=rlevel):
+ def test_sort_bigendian(self):
# Ticket #47
a = np.linspace(0, 10, 11)
c = a.astype(np.dtype('<f8'))
c.sort()
assert_array_almost_equal(c, a)
- def test_negative_nd_indexing(self, level=rlevel):
+ def test_negative_nd_indexing(self):
# Ticket #49
c = np.arange(125).reshape((5, 5, 5))
origidx = np.array([-1, 0, 1])
@@ -87,7 +86,7 @@ class TestRegression(TestCase):
c[idx]
assert_array_equal(idx, origidx)
- def test_char_dump(self, level=rlevel):
+ def test_char_dump(self):
# Ticket #50
f = BytesIO()
ca = np.char.array(np.arange(1000, 1010), itemsize=4)
@@ -96,7 +95,7 @@ class TestRegression(TestCase):
ca = np.load(f)
f.close()
- def test_noncontiguous_fill(self, level=rlevel):
+ def test_noncontiguous_fill(self):
# Ticket #58.
a = np.zeros((5, 3))
b = a[:, :2,]
@@ -104,60 +103,60 @@ class TestRegression(TestCase):
def rs():
b.shape = (10,)
- self.assertRaises(AttributeError, rs)
+ assert_raises(AttributeError, rs)
- def test_bool(self, level=rlevel):
+ def test_bool(self):
# Ticket #60
np.bool_(1) # Should succeed
- def test_indexing1(self, level=rlevel):
+ def test_indexing1(self):
# Ticket #64
descr = [('x', [('y', [('z', 'c16', (2,)),]),]),]
buffer = ((([6j, 4j],),),)
h = np.array(buffer, dtype=descr)
h['x']['y']['z']
- def test_indexing2(self, level=rlevel):
+ def test_indexing2(self):
# Ticket #65
descr = [('x', 'i4', (2,))]
buffer = ([3, 2],)
h = np.array(buffer, dtype=descr)
h['x']
- def test_round(self, level=rlevel):
+ def test_round(self):
# Ticket #67
x = np.array([1+2j])
assert_almost_equal(x**(-1), [1/(1+2j)])
- def test_scalar_compare(self, level=rlevel):
+ def test_scalar_compare(self):
# Trac Ticket #72
# https://github.com/numpy/numpy/issues/565
a = np.array(['test', 'auto'])
assert_array_equal(a == 'auto', np.array([False, True]))
- self.assertTrue(a[1] == 'auto')
- self.assertTrue(a[0] != 'auto')
+ assert_(a[1] == 'auto')
+ assert_(a[0] != 'auto')
b = np.linspace(0, 10, 11)
# This should return true for now, but will eventually raise an error:
with suppress_warnings() as sup:
sup.filter(FutureWarning)
- self.assertTrue(b != 'auto')
- self.assertTrue(b[0] != 'auto')
+ assert_(b != 'auto')
+ assert_(b[0] != 'auto')
- def test_unicode_swapping(self, level=rlevel):
+ def test_unicode_swapping(self):
# Ticket #79
ulen = 1
ucs_value = u'\U0010FFFF'
ua = np.array([[[ucs_value*ulen]*2]*3]*4, dtype='U%s' % ulen)
ua.newbyteorder() # Should succeed.
- def test_object_array_fill(self, level=rlevel):
+ def test_object_array_fill(self):
# Ticket #86
x = np.zeros(1, 'O')
x.fill([])
- def test_mem_dtype_align(self, level=rlevel):
+ def test_mem_dtype_align(self):
# Ticket #93
- self.assertRaises(TypeError, np.dtype,
+ assert_raises(TypeError, np.dtype,
{'names':['a'], 'formats':['foo']}, align=1)
@dec.knownfailureif((sys.version_info[0] >= 3) or
@@ -165,16 +164,16 @@ class TestRegression(TestCase):
platform.architecture()[0] == "64bit"),
"numpy.intp('0xff', 16) not supported on Py3, "
"as it does not inherit from Python int")
- def test_intp(self, level=rlevel):
+ def test_intp(self):
# Ticket #99
i_width = np.int_(0).nbytes*2 - 1
np.intp('0x' + 'f'*i_width, 16)
- self.assertRaises(OverflowError, np.intp, '0x' + 'f'*(i_width+1), 16)
- self.assertRaises(ValueError, np.intp, '0x1', 32)
+ assert_raises(OverflowError, np.intp, '0x' + 'f'*(i_width+1), 16)
+ assert_raises(ValueError, np.intp, '0x1', 32)
assert_equal(255, np.intp('0xFF', 16))
assert_equal(1024, np.intp(1024))
- def test_endian_bool_indexing(self, level=rlevel):
+ def test_endian_bool_indexing(self):
# Ticket #105
a = np.arange(10., dtype='>f8')
b = np.arange(10., dtype='<f8')
@@ -187,7 +186,7 @@ class TestRegression(TestCase):
assert_(np.all(a[ya] > 0.5))
assert_(np.all(b[yb] > 0.5))
- def test_endian_where(self, level=rlevel):
+ def test_endian_where(self):
# GitHub issue #369
net = np.zeros(3, dtype='>f4')
net[1] = 0.00458849
@@ -197,7 +196,7 @@ class TestRegression(TestCase):
correct = np.array([ 0.60520202, 0.00458849, 0.60520202])
assert_array_almost_equal(test, correct)
- def test_endian_recarray(self, level=rlevel):
+ def test_endian_recarray(self):
# Ticket #2185
dt = np.dtype([
('head', '>u4'),
@@ -213,7 +212,7 @@ class TestRegression(TestCase):
buf[0]['data'][0] = d
assert_(buf[0]['head'] == 1)
- def test_mem_dot(self, level=rlevel):
+ def test_mem_dot(self):
# Ticket #106
x = np.random.randn(0, 1)
y = np.random.randn(10, 1)
@@ -227,7 +226,7 @@ class TestRegression(TestCase):
np.core.multiarray.dot(x, np.transpose(y), out=z)
assert_equal(_z, np.ones(10))
- def test_arange_endian(self, level=rlevel):
+ def test_arange_endian(self):
# Ticket #111
ref = np.arange(10)
x = np.arange(10, dtype='<f8')
@@ -235,31 +234,31 @@ class TestRegression(TestCase):
x = np.arange(10, dtype='>f8')
assert_array_equal(ref, x)
- def test_argmax(self, level=rlevel):
+ def test_argmax(self):
# Ticket #119
a = np.random.normal(0, 1, (4, 5, 6, 7, 8))
for i in range(a.ndim):
a.argmax(i) # Should succeed
- def test_mem_divmod(self, level=rlevel):
+ def test_mem_divmod(self):
# Ticket #126
for i in range(10):
divmod(np.array([i])[0], 10)
- def test_hstack_invalid_dims(self, level=rlevel):
+ def test_hstack_invalid_dims(self):
# Ticket #128
x = np.arange(9).reshape((3, 3))
y = np.array([0, 0, 0])
- self.assertRaises(ValueError, np.hstack, (x, y))
+ assert_raises(ValueError, np.hstack, (x, y))
- def test_squeeze_type(self, level=rlevel):
+ def test_squeeze_type(self):
# Ticket #133
a = np.array([3])
b = np.array(3)
assert_(type(a.squeeze()) is np.ndarray)
assert_(type(b.squeeze()) is np.ndarray)
- def test_add_identity(self, level=rlevel):
+ def test_add_identity(self):
# Ticket #143
assert_equal(0, np.add.identity)
@@ -268,11 +267,11 @@ class TestRegression(TestCase):
a = np.float_(23.) + 2**135
assert_equal(a, 23. + 2**135)
- def test_binary_repr_0(self, level=rlevel):
+ def test_binary_repr_0(self):
# Ticket #151
assert_equal('0', np.binary_repr(0))
- def test_rec_iterate(self, level=rlevel):
+ def test_rec_iterate(self):
# Ticket #160
descr = np.dtype([('i', int), ('f', float), ('s', '|S3')])
x = np.rec.array([(1, 1.1, '1.0'),
@@ -280,19 +279,19 @@ class TestRegression(TestCase):
x[0].tolist()
[i for i in x[0]]
- def test_unicode_string_comparison(self, level=rlevel):
+ def test_unicode_string_comparison(self):
# Ticket #190
a = np.array('hello', np.unicode_)
b = np.array('world')
a == b
- def test_tobytes_FORTRANORDER_discontiguous(self, level=rlevel):
+ def test_tobytes_FORTRANORDER_discontiguous(self):
# Fix in r2836
# Create non-contiguous Fortran ordered array
x = np.array(np.random.rand(3, 3), order='F')[:, :2]
assert_array_almost_equal(x.ravel(), np.fromstring(x.tobytes()))
- def test_flat_assignment(self, level=rlevel):
+ def test_flat_assignment(self):
# Correct behaviour of ticket #194
x = np.empty((3, 1))
x.flat = np.arange(3)
@@ -300,7 +299,7 @@ class TestRegression(TestCase):
x.flat = np.arange(3, dtype=float)
assert_array_almost_equal(x, [[0], [1], [2]])
- def test_broadcast_flat_assignment(self, level=rlevel):
+ def test_broadcast_flat_assignment(self):
# Ticket #194
x = np.empty((3, 1))
@@ -310,8 +309,8 @@ class TestRegression(TestCase):
def bfb():
x[:] = np.arange(3, dtype=float)
- self.assertRaises(ValueError, bfa)
- self.assertRaises(ValueError, bfb)
+ assert_raises(ValueError, bfa)
+ assert_raises(ValueError, bfb)
def test_nonarray_assignment(self):
# See also Issue gh-2870, test for non-array assignment
@@ -328,7 +327,7 @@ class TestRegression(TestCase):
assert_raises(ValueError, assign, a, r, np.nan)
a[r] = np.array(np.nan)
- def test_unpickle_dtype_with_object(self, level=rlevel):
+ def test_unpickle_dtype_with_object(self):
# Implemented in r2840
dt = np.dtype([('x', int), ('y', np.object_), ('z', 'O')])
f = BytesIO()
@@ -338,15 +337,15 @@ class TestRegression(TestCase):
f.close()
assert_equal(dt, dt_)
- def test_mem_array_creation_invalid_specification(self, level=rlevel):
+ def test_mem_array_creation_invalid_specification(self):
# Ticket #196
dt = np.dtype([('x', int), ('y', np.object_)])
# Wrong way
- self.assertRaises(ValueError, np.array, [1, 'object'], dt)
+ assert_raises(ValueError, np.array, [1, 'object'], dt)
# Correct way
np.array([(1, 'object')], dt)
- def test_recarray_single_element(self, level=rlevel):
+ def test_recarray_single_element(self):
# Ticket #202
a = np.array([1, 2, 3], dtype=np.int32)
b = a.copy()
@@ -354,23 +353,23 @@ class TestRegression(TestCase):
assert_array_equal(a, b)
assert_equal(a, r[0][0])
- def test_zero_sized_array_indexing(self, level=rlevel):
+ def test_zero_sized_array_indexing(self):
# Ticket #205
tmp = np.array([])
def index_tmp():
tmp[np.array(10)]
- self.assertRaises(IndexError, index_tmp)
+ assert_raises(IndexError, index_tmp)
- def test_chararray_rstrip(self, level=rlevel):
+ def test_chararray_rstrip(self):
# Ticket #222
x = np.chararray((1,), 5)
x[0] = b'a '
x = x.rstrip()
assert_equal(x[0], b'a')
- def test_object_array_shape(self, level=rlevel):
+ def test_object_array_shape(self):
# Ticket #239
assert_equal(np.array([[1, 2], 3, 4], dtype=object).shape, (3,))
assert_equal(np.array([[1, 2], [3, 4]], dtype=object).shape, (2, 2))
@@ -379,20 +378,20 @@ class TestRegression(TestCase):
assert_equal(np.array([[], [], []], dtype=object).shape, (3, 0))
assert_equal(np.array([[3, 4], [5, 6], None], dtype=object).shape, (3,))
- def test_mem_around(self, level=rlevel):
+ def test_mem_around(self):
# Ticket #243
x = np.zeros((1,))
y = [0]
decimal = 6
np.around(abs(x-y), decimal) <= 10.0**(-decimal)
- def test_character_array_strip(self, level=rlevel):
+ def test_character_array_strip(self):
# Ticket #246
x = np.char.array(("x", "x ", "x "))
for c in x:
assert_equal(c, "x")
- def test_lexsort(self, level=rlevel):
+ def test_lexsort(self):
# Lexsort memory error
v = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
assert_equal(np.lexsort(v), 0)
@@ -444,36 +443,36 @@ class TestRegression(TestCase):
for name in result.dtype.names:
assert_(isinstance(name, str))
- def test_pickle_dtype(self, level=rlevel):
+ def test_pickle_dtype(self):
# Ticket #251
- pickle.dumps(np.float)
+ pickle.dumps(float)
- def test_swap_real(self, level=rlevel):
+ def test_swap_real(self):
# Ticket #265
assert_equal(np.arange(4, dtype='>c8').imag.max(), 0.0)
assert_equal(np.arange(4, dtype='<c8').imag.max(), 0.0)
assert_equal(np.arange(4, dtype='>c8').real.max(), 3.0)
assert_equal(np.arange(4, dtype='<c8').real.max(), 3.0)
- def test_object_array_from_list(self, level=rlevel):
+ def test_object_array_from_list(self):
# Ticket #270
- self.assertEqual(np.array([1, 'A', None]).shape, (3,))
+ assert_(np.array([1, 'A', None]).shape == (3,))
- def test_multiple_assign(self, level=rlevel):
+ def test_multiple_assign(self):
# Ticket #273
a = np.zeros((3, 1), int)
a[[1, 2]] = 1
- def test_empty_array_type(self, level=rlevel):
+ def test_empty_array_type(self):
assert_equal(np.array([]).dtype, np.zeros(0).dtype)
- def test_void_copyswap(self, level=rlevel):
+ def test_void_copyswap(self):
dt = np.dtype([('one', '<i4'), ('two', '<i4')])
x = np.array((1, 2), dtype=dt)
x = x.byteswap()
assert_(x['one'] > 1 and x['two'] > 2)
- def test_method_args(self, level=rlevel):
+ def test_method_args(self):
# Make sure methods and functions have same default axis
# keyword and arguments
funcs1 = ['argmax', 'argmin', 'sum', ('product', 'prod'),
@@ -515,17 +514,17 @@ class TestRegression(TestCase):
res2 = getattr(np, func)(arr1, arr2)
assert_(abs(res1-res2).max() < 1e-8, func)
- def test_mem_lexsort_strings(self, level=rlevel):
+ def test_mem_lexsort_strings(self):
# Ticket #298
lst = ['abc', 'cde', 'fgh']
np.lexsort((lst,))
- def test_fancy_index(self, level=rlevel):
+ def test_fancy_index(self):
# Ticket #302
x = np.array([1, 2])[np.array([0])]
assert_equal(x.shape, (1,))
- def test_recarray_copy(self, level=rlevel):
+ def test_recarray_copy(self):
# Ticket #312
dt = [('x', np.int16), ('y', np.float64)]
ra = np.array([(1, 2.3)], dtype=dt)
@@ -533,64 +532,64 @@ class TestRegression(TestCase):
rb['x'] = 2.
assert_(ra['x'] != rb['x'])
- def test_rec_fromarray(self, level=rlevel):
+ def test_rec_fromarray(self):
# Ticket #322
x1 = np.array([[1, 2], [3, 4], [5, 6]])
x2 = np.array(['a', 'dd', 'xyz'])
x3 = np.array([1.1, 2, 3])
np.rec.fromarrays([x1, x2, x3], formats="(2,)i4,a3,f8")
- def test_object_array_assign(self, level=rlevel):
+ def test_object_array_assign(self):
x = np.empty((2, 2), object)
x.flat[2] = (1, 2, 3)
assert_equal(x.flat[2], (1, 2, 3))
- def test_ndmin_float64(self, level=rlevel):
+ def test_ndmin_float64(self):
# Ticket #324
x = np.array([1, 2, 3], dtype=np.float64)
assert_equal(np.array(x, dtype=np.float32, ndmin=2).ndim, 2)
assert_equal(np.array(x, dtype=np.float64, ndmin=2).ndim, 2)
- def test_ndmin_order(self, level=rlevel):
+ def test_ndmin_order(self):
# Issue #465 and related checks
assert_(np.array([1, 2], order='C', ndmin=3).flags.c_contiguous)
assert_(np.array([1, 2], order='F', ndmin=3).flags.f_contiguous)
assert_(np.array(np.ones((2, 2), order='F'), ndmin=3).flags.f_contiguous)
assert_(np.array(np.ones((2, 2), order='C'), ndmin=3).flags.c_contiguous)
- def test_mem_axis_minimization(self, level=rlevel):
+ def test_mem_axis_minimization(self):
# Ticket #327
data = np.arange(5)
data = np.add.outer(data, data)
- def test_mem_float_imag(self, level=rlevel):
+ def test_mem_float_imag(self):
# Ticket #330
np.float64(1.0).imag
- def test_dtype_tuple(self, level=rlevel):
+ def test_dtype_tuple(self):
# Ticket #334
assert_(np.dtype('i4') == np.dtype(('i4', ())))
- def test_dtype_posttuple(self, level=rlevel):
+ def test_dtype_posttuple(self):
# Ticket #335
np.dtype([('col1', '()i4')])
- def test_numeric_carray_compare(self, level=rlevel):
+ def test_numeric_carray_compare(self):
# Ticket #341
assert_equal(np.array(['X'], 'c'), b'X')
- def test_string_array_size(self, level=rlevel):
+ def test_string_array_size(self):
# Ticket #342
- self.assertRaises(ValueError,
+ assert_raises(ValueError,
np.array, [['X'], ['X', 'X', 'X']], '|S1')
- def test_dtype_repr(self, level=rlevel):
+ def test_dtype_repr(self):
# Ticket #344
dt1 = np.dtype(('uint32', 2))
dt2 = np.dtype(('uint32', (2,)))
assert_equal(dt1.__repr__(), dt2.__repr__())
- def test_reshape_order(self, level=rlevel):
+ def test_reshape_order(self):
# Make sure reshape order works.
a = np.arange(6).reshape(2, 3, order='F')
assert_equal(a, [[0, 2, 4], [1, 3, 5]])
@@ -598,13 +597,13 @@ class TestRegression(TestCase):
b = a[:, 1]
assert_equal(b.reshape(2, 2, order='F'), [[2, 6], [4, 8]])
- def test_reshape_zero_strides(self, level=rlevel):
+ def test_reshape_zero_strides(self):
# Issue #380, test reshaping of zero strided arrays
a = np.ones(1)
a = np.lib.stride_tricks.as_strided(a, shape=(5,), strides=(0,))
assert_(a.reshape(5, 1).strides[0] == 0)
- def test_reshape_zero_size(self, level=rlevel):
+ def test_reshape_zero_size(self):
# GitHub Issue #2700, setting shape failed for 0-sized arrays
a = np.ones((0, 2))
a.shape = (-1, 2)
@@ -621,22 +620,22 @@ class TestRegression(TestCase):
assert_equal(a.reshape(3, 2, 1, 1, order='F').strides, strides_f)
assert_equal(np.array(0, dtype=np.int32).reshape(1, 1).strides, (4, 4))
- def test_repeat_discont(self, level=rlevel):
+ def test_repeat_discont(self):
# Ticket #352
a = np.arange(12).reshape(4, 3)[:, 2]
assert_equal(a.repeat(3), [2, 2, 2, 5, 5, 5, 8, 8, 8, 11, 11, 11])
- def test_array_index(self, level=rlevel):
+ def test_array_index(self):
# Make sure optimization is not called in this case.
a = np.array([1, 2, 3])
a2 = np.array([[1, 2, 3]])
assert_equal(a[np.where(a == 3)], a2[np.where(a2 == 3)])
- def test_object_argmax(self, level=rlevel):
+ def test_object_argmax(self):
a = np.array([1, 2, 3], dtype=object)
assert_(a.argmax() == 2)
- def test_recarray_fields(self, level=rlevel):
+ def test_recarray_fields(self):
# Ticket #372
dt0 = np.dtype([('f0', 'i4'), ('f1', 'i4')])
dt1 = np.dtype([('f0', 'i8'), ('f1', 'i8')])
@@ -647,22 +646,22 @@ class TestRegression(TestCase):
np.rec.fromarrays([(1, 2), (3, 4)])]:
assert_(a.dtype in [dt0, dt1])
- def test_random_shuffle(self, level=rlevel):
+ def test_random_shuffle(self):
# Ticket #374
a = np.arange(5).reshape((5, 1))
b = a.copy()
np.random.shuffle(b)
assert_equal(np.sort(b, axis=0), a)
- def test_refcount_vdot(self, level=rlevel):
+ def test_refcount_vdot(self):
# Changeset #3443
_assert_valid_refcount(np.vdot)
- def test_startswith(self, level=rlevel):
+ def test_startswith(self):
ca = np.char.array(['Hi', 'There'])
assert_equal(ca.startswith('H'), [True, False])
- def test_noncommutative_reduce_accumulate(self, level=rlevel):
+ def test_noncommutative_reduce_accumulate(self):
# Ticket #413
tosubtract = np.arange(5)
todivide = np.array([2.0, 0.5, 0.25])
@@ -673,28 +672,28 @@ class TestRegression(TestCase):
assert_array_equal(np.divide.accumulate(todivide),
np.array([2., 4., 16.]))
- def test_convolve_empty(self, level=rlevel):
+ def test_convolve_empty(self):
# Convolve should raise an error for empty input array.
- self.assertRaises(ValueError, np.convolve, [], [1])
- self.assertRaises(ValueError, np.convolve, [1], [])
+ assert_raises(ValueError, np.convolve, [], [1])
+ assert_raises(ValueError, np.convolve, [1], [])
- def test_multidim_byteswap(self, level=rlevel):
+ def test_multidim_byteswap(self):
# Ticket #449
r = np.array([(1, (0, 1, 2))], dtype="i2,3i2")
assert_array_equal(r.byteswap(),
np.array([(256, (0, 256, 512))], r.dtype))
- def test_string_NULL(self, level=rlevel):
+ def test_string_NULL(self):
# Changeset 3557
assert_equal(np.array("a\x00\x0b\x0c\x00").item(),
'a\x00\x0b\x0c')
- def test_junk_in_string_fields_of_recarray(self, level=rlevel):
+ def test_junk_in_string_fields_of_recarray(self):
# Ticket #483
r = np.array([[b'abc']], dtype=[('var1', '|S20')])
assert_(asbytes(r['var1'][0][0]) == b'abc')
- def test_take_output(self, level=rlevel):
+ def test_take_output(self):
# Ensure that 'take' honours output parameter.
x = np.arange(12).reshape((3, 4))
a = np.take(x, [0, 2], axis=1)
@@ -715,13 +714,13 @@ class TestRegression(TestCase):
if HAS_REFCOUNT:
assert_(ref_d == sys.getrefcount(d))
- def test_array_str_64bit(self, level=rlevel):
+ def test_array_str_64bit(self):
# Ticket #501
s = np.array([1, np.nan], dtype=np.float64)
with np.errstate(all='raise'):
np.array_str(s) # Should succeed
- def test_frompyfunc_endian(self, level=rlevel):
+ def test_frompyfunc_endian(self):
# Ticket #503
from math import radians
uradians = np.frompyfunc(radians, 1, 1)
@@ -730,33 +729,33 @@ class TestRegression(TestCase):
assert_almost_equal(uradians(big_endian).astype(float),
uradians(little_endian).astype(float))
- def test_mem_string_arr(self, level=rlevel):
+ def test_mem_string_arr(self):
# Ticket #514
s = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
t = []
np.hstack((t, s))
- def test_arr_transpose(self, level=rlevel):
+ def test_arr_transpose(self):
# Ticket #516
x = np.random.rand(*(2,)*16)
x.transpose(list(range(16))) # Should succeed
- def test_string_mergesort(self, level=rlevel):
+ def test_string_mergesort(self):
# Ticket #540
x = np.array(['a']*32)
assert_array_equal(x.argsort(kind='m'), np.arange(32))
- def test_argmax_byteorder(self, level=rlevel):
+ def test_argmax_byteorder(self):
# Ticket #546
a = np.arange(3, dtype='>f')
assert_(a[a.argmax()] == a.max())
- def test_rand_seed(self, level=rlevel):
+ def test_rand_seed(self):
# Ticket #555
for l in np.arange(4):
np.random.seed(l)
- def test_mem_deallocation_leak(self, level=rlevel):
+ def test_mem_deallocation_leak(self):
# Ticket #562
a = np.zeros(5, dtype=float)
b = np.array(a, dtype=float)
@@ -764,9 +763,9 @@ class TestRegression(TestCase):
def test_mem_on_invalid_dtype(self):
"Ticket #583"
- self.assertRaises(ValueError, np.fromiter, [['12', ''], ['13', '']], str)
+ assert_raises(ValueError, np.fromiter, [['12', ''], ['13', '']], str)
- def test_dot_negative_stride(self, level=rlevel):
+ def test_dot_negative_stride(self):
# Ticket #588
x = np.array([[1, 5, 25, 125., 625]])
y = np.array([[20.], [160.], [640.], [1280.], [1024.]])
@@ -774,7 +773,7 @@ class TestRegression(TestCase):
y2 = y[::-1]
assert_equal(np.dot(x, z), np.dot(x, y2))
- def test_object_casting(self, level=rlevel):
+ def test_object_casting(self):
# This used to trigger the object-type version of
# the bitwise_or operation, because float64 -> object
# casting succeeds
@@ -783,16 +782,16 @@ class TestRegression(TestCase):
y = np.zeros([484, 286])
x |= y
- self.assertRaises(TypeError, rs)
+ assert_raises(TypeError, rs)
- def test_unicode_scalar(self, level=rlevel):
+ def test_unicode_scalar(self):
# Ticket #600
x = np.array(["DROND", "DROND1"], dtype="U6")
el = x[1]
new = pickle.loads(pickle.dumps(el))
assert_equal(new, el)
- def test_arange_non_native_dtype(self, level=rlevel):
+ def test_arange_non_native_dtype(self):
# Ticket #616
for T in ('>f4', '<f4'):
dt = np.dtype(T)
@@ -800,73 +799,73 @@ class TestRegression(TestCase):
assert_equal(np.arange(0.5, dtype=dt).dtype, dt)
assert_equal(np.arange(5, dtype=dt).dtype, dt)
- def test_bool_flat_indexing_invalid_nr_elements(self, level=rlevel):
+ def test_bool_flat_indexing_invalid_nr_elements(self):
s = np.ones(10, dtype=float)
x = np.array((15,), dtype=float)
def ia(x, s, v):
x[(s > 0)] = v
- self.assertRaises(IndexError, ia, x, s, np.zeros(9, dtype=float))
- self.assertRaises(IndexError, ia, x, s, np.zeros(11, dtype=float))
+ assert_raises(IndexError, ia, x, s, np.zeros(9, dtype=float))
+ assert_raises(IndexError, ia, x, s, np.zeros(11, dtype=float))
# Old special case (different code path):
- self.assertRaises(ValueError, ia, x.flat, s, np.zeros(9, dtype=float))
- self.assertRaises(ValueError, ia, x.flat, s, np.zeros(11, dtype=float))
+ assert_raises(ValueError, ia, x.flat, s, np.zeros(9, dtype=float))
+ assert_raises(ValueError, ia, x.flat, s, np.zeros(11, dtype=float))
- def test_mem_scalar_indexing(self, level=rlevel):
+ def test_mem_scalar_indexing(self):
# Ticket #603
x = np.array([0], dtype=float)
index = np.array(0, dtype=np.int32)
x[index]
- def test_binary_repr_0_width(self, level=rlevel):
+ def test_binary_repr_0_width(self):
assert_equal(np.binary_repr(0, width=3), '000')
- def test_fromstring(self, level=rlevel):
+ def test_fromstring(self):
assert_equal(np.fromstring("12:09:09", dtype=int, sep=":"),
[12, 9, 9])
- def test_searchsorted_variable_length(self, level=rlevel):
+ def test_searchsorted_variable_length(self):
x = np.array(['a', 'aa', 'b'])
y = np.array(['d', 'e'])
assert_equal(x.searchsorted(y), [3, 3])
- def test_string_argsort_with_zeros(self, level=rlevel):
+ def test_string_argsort_with_zeros(self):
# Check argsort for strings containing zeros.
x = np.fromstring("\x00\x02\x00\x01", dtype="|S2")
assert_array_equal(x.argsort(kind='m'), np.array([1, 0]))
assert_array_equal(x.argsort(kind='q'), np.array([1, 0]))
- def test_string_sort_with_zeros(self, level=rlevel):
+ def test_string_sort_with_zeros(self):
# Check sort for strings containing zeros.
x = np.fromstring("\x00\x02\x00\x01", dtype="|S2")
y = np.fromstring("\x00\x01\x00\x02", dtype="|S2")
assert_array_equal(np.sort(x, kind="q"), y)
- def test_copy_detection_zero_dim(self, level=rlevel):
+ def test_copy_detection_zero_dim(self):
# Ticket #658
np.indices((0, 3, 4)).T.reshape(-1, 3)
- def test_flat_byteorder(self, level=rlevel):
+ def test_flat_byteorder(self):
# Ticket #657
x = np.arange(10)
assert_array_equal(x.astype('>i4'), x.astype('<i4').flat[:])
assert_array_equal(x.astype('>i4').flat[:], x.astype('<i4'))
- def test_uint64_from_negative(self, level=rlevel):
+ def test_uint64_from_negative(self):
assert_equal(np.uint64(-2), np.uint64(18446744073709551614))
- def test_sign_bit(self, level=rlevel):
+ def test_sign_bit(self):
x = np.array([0, -0.0, 0])
assert_equal(str(np.abs(x)), '[ 0. 0. 0.]')
- def test_flat_index_byteswap(self, level=rlevel):
+ def test_flat_index_byteswap(self):
for dt in (np.dtype('<i4'), np.dtype('>i4')):
x = np.array([-1, 0, 1], dtype=dt)
assert_equal(x.flat[0].dtype, x[0].dtype)
- def test_copy_detection_corner_case(self, level=rlevel):
+ def test_copy_detection_corner_case(self):
# Ticket #658
np.indices((0, 3, 4)).T.reshape(-1, 3)
@@ -874,13 +873,13 @@ class TestRegression(TestCase):
# With NPY_RELAXED_STRIDES_CHECKING the test becomes superfluous,
# 0-sized reshape itself is tested elsewhere.
@dec.skipif(np.ones(1).strides[0] == np.iinfo(np.intp).max)
- def test_copy_detection_corner_case2(self, level=rlevel):
+ def test_copy_detection_corner_case2(self):
# Ticket #771: strides are not set correctly when reshaping 0-sized
# arrays
b = np.indices((0, 3, 4)).T.reshape(-1, 3)
assert_equal(b.strides, (3 * b.itemsize, b.itemsize))
- def test_object_array_refcounting(self, level=rlevel):
+ def test_object_array_refcounting(self):
# Ticket #633
if not hasattr(sys, 'getrefcount'):
return
@@ -983,7 +982,7 @@ class TestRegression(TestCase):
del tmp # Avoid pyflakes unused variable warning
- def test_mem_custom_float_to_array(self, level=rlevel):
+ def test_mem_custom_float_to_array(self):
# Ticket 702
class MyFloat(object):
def __float__(self):
@@ -992,7 +991,7 @@ class TestRegression(TestCase):
tmp = np.atleast_1d([MyFloat()])
tmp.astype(float) # Should succeed
- def test_object_array_refcount_self_assign(self, level=rlevel):
+ def test_object_array_refcount_self_assign(self):
# Ticket #711
class VictimObject(object):
deleted = False
@@ -1009,23 +1008,23 @@ class TestRegression(TestCase):
arr[:] = arr # trying to induce a segfault by doing it again...
assert_(not arr[0].deleted)
- def test_mem_fromiter_invalid_dtype_string(self, level=rlevel):
+ def test_mem_fromiter_invalid_dtype_string(self):
x = [1, 2, 3]
- self.assertRaises(ValueError,
+ assert_raises(ValueError,
np.fromiter, [xi for xi in x], dtype='S')
- def test_reduce_big_object_array(self, level=rlevel):
+ def test_reduce_big_object_array(self):
# Ticket #713
oldsize = np.setbufsize(10*16)
a = np.array([None]*161, object)
assert_(not np.any(a))
np.setbufsize(oldsize)
- def test_mem_0d_array_index(self, level=rlevel):
+ def test_mem_0d_array_index(self):
# Ticket #714
np.zeros(10)[np.array(0)]
- def test_floats_from_string(self, level=rlevel):
+ def test_floats_from_string(self):
# Ticket #640, floats from string
fsingle = np.single('1.234')
fdouble = np.double('1.234')
@@ -1034,7 +1033,7 @@ class TestRegression(TestCase):
assert_almost_equal(fdouble, 1.234)
assert_almost_equal(flongdouble, 1.234)
- def test_nonnative_endian_fill(self, level=rlevel):
+ def test_nonnative_endian_fill(self):
# Non-native endian arrays were incorrectly filled with scalars
# before r5034.
if sys.byteorder == 'little':
@@ -1045,7 +1044,7 @@ class TestRegression(TestCase):
x.fill(1)
assert_equal(x, np.array([1], dtype=dtype))
- def test_dot_alignment_sse2(self, level=rlevel):
+ def test_dot_alignment_sse2(self):
# Test for ticket #551, changeset r5140
x = np.zeros((30, 40))
y = pickle.loads(pickle.dumps(x))
@@ -1054,7 +1053,7 @@ class TestRegression(TestCase):
# This shouldn't cause a segmentation fault:
np.dot(z, y)
- def test_astype_copy(self, level=rlevel):
+ def test_astype_copy(self):
# Ticket #788, changeset r5155
# The test data file was generated by scipy.io.savemat.
# The dtype is float64, but the isbuiltin attribute is 0.
@@ -1072,7 +1071,7 @@ class TestRegression(TestCase):
assert_((xp.__array_interface__['data'][0] !=
xpd.__array_interface__['data'][0]))
- def test_compress_small_type(self, level=rlevel):
+ def test_compress_small_type(self):
# Ticket #789, changeset 5217.
# compress with out argument segfaulted if cannot cast safely
import numpy as np
@@ -1086,7 +1085,7 @@ class TestRegression(TestCase):
except TypeError:
pass
- def test_attributes(self, level=rlevel):
+ def test_attributes(self):
# Ticket #791
class TestArray(np.ndarray):
def __new__(cls, data, info):
@@ -1158,7 +1157,7 @@ class TestRegression(TestCase):
assert_(type(dat.nonzero()[0]) is np.ndarray)
assert_(type(dat.nonzero()[1]) is np.ndarray)
- def test_recarray_tolist(self, level=rlevel):
+ def test_recarray_tolist(self):
# Ticket #793, changeset r5215
# Comparisons fail for NaN, so we can't use random memory
# for the test.
@@ -1173,12 +1172,12 @@ class TestRegression(TestCase):
a = np.arange(5)
assert_raises(ValueError, a.item)
- def test_char_array_creation(self, level=rlevel):
+ def test_char_array_creation(self):
a = np.array('123', dtype='c')
b = np.array([b'1', b'2', b'3'])
assert_equal(a, b)
- def test_unaligned_unicode_access(self, level=rlevel):
+ def test_unaligned_unicode_access(self):
# Ticket #825
for i in range(1, 9):
msg = 'unicode offset: %d chars' % i
@@ -1189,7 +1188,7 @@ class TestRegression(TestCase):
else:
assert_equal(str(x), "[('a', u'b')]", err_msg=msg)
- def test_sign_for_complex_nan(self, level=rlevel):
+ def test_sign_for_complex_nan(self):
# Ticket 794.
with np.errstate(invalid='ignore'):
C = np.array([-np.inf, -2+1j, 0, 2-1j, np.inf, np.nan])
@@ -1197,7 +1196,7 @@ class TestRegression(TestCase):
want = np.array([-1+0j, -1+0j, 0+0j, 1+0j, 1+0j, np.nan])
assert_equal(have, want)
- def test_for_equal_names(self, level=rlevel):
+ def test_for_equal_names(self):
# Ticket #674
dt = np.dtype([('foo', float), ('bar', float)])
a = np.zeros(10, dt)
@@ -1207,7 +1206,7 @@ class TestRegression(TestCase):
assert_(a.dtype.names[0] == "notfoo")
assert_(a.dtype.names[1] == "bar")
- def test_for_object_scalar_creation(self, level=rlevel):
+ def test_for_object_scalar_creation(self):
# Ticket #816
a = np.object_()
b = np.object_(3)
@@ -1224,18 +1223,18 @@ class TestRegression(TestCase):
def test_array_resize_method_system_error(self):
# Ticket #840 - order should be an invalid keyword.
x = np.array([[0, 1], [2, 3]])
- self.assertRaises(TypeError, x.resize, (2, 2), order='C')
+ assert_raises(TypeError, x.resize, (2, 2), order='C')
- def test_for_zero_length_in_choose(self, level=rlevel):
+ def test_for_zero_length_in_choose(self):
"Ticket #882"
a = np.array(1)
- self.assertRaises(ValueError, lambda x: x.choose([]), a)
+ assert_raises(ValueError, lambda x: x.choose([]), a)
def test_array_ndmin_overflow(self):
"Ticket #947."
- self.assertRaises(ValueError, lambda: np.array([1], ndmin=33))
+ assert_raises(ValueError, lambda: np.array([1], ndmin=33))
- def test_void_scalar_with_titles(self, level=rlevel):
+ def test_void_scalar_with_titles(self):
# No ticket
data = [('john', 4), ('mary', 5)]
dtype1 = [(('source:yy', 'name'), 'O'), (('source:xx', 'id'), int)]
@@ -1308,7 +1307,7 @@ class TestRegression(TestCase):
good = 'Maximum allowed size exceeded'
try:
np.arange(sz)
- self.assertTrue(np.size == sz)
+ assert_(np.size == sz)
except ValueError as e:
if not str(e) == good:
self.fail("Got msg '%s', expected '%s'" % (e, good))
@@ -1360,7 +1359,7 @@ class TestRegression(TestCase):
a = np.ones(100, dtype=np.int8)
b = np.ones(100, dtype=np.int32)
i = np.lexsort((a[::-1], b))
- assert_equal(i, np.arange(100, dtype=np.int))
+ assert_equal(i, np.arange(100, dtype=int))
def test_object_array_to_fixed_string(self):
# Ticket #1235.
@@ -1380,7 +1379,7 @@ class TestRegression(TestCase):
a = np.array([[u'abc', u'\u03a3'],
[u'asdf', u'erw']],
dtype='U')
- self.assertRaises(UnicodeEncodeError, np.array, a, 'S4')
+ assert_raises(UnicodeEncodeError, np.array, a, 'S4')
def test_mixed_string_unicode_array_creation(self):
a = np.array(['1234', u'123'])
@@ -1462,7 +1461,7 @@ class TestRegression(TestCase):
def test_duplicate_title_and_name(self):
# Ticket #1254
dtspec = [(('a', 'a'), 'i'), ('b', 'i')]
- self.assertRaises(ValueError, np.dtype, dtspec)
+ assert_raises(ValueError, np.dtype, dtspec)
def test_signed_integer_division_overflow(self):
# Ticket #1317.
@@ -1471,7 +1470,7 @@ class TestRegression(TestCase):
min //= -1
with np.errstate(divide="ignore"):
- for t in (np.int8, np.int16, np.int32, np.int64, np.int, np.long):
+ for t in (np.int8, np.int16, np.int32, np.int64, int, np.long):
test_type(t)
def test_buffer_hashlib(self):
@@ -1491,7 +1490,7 @@ class TestRegression(TestCase):
# Check if log1p is behaving on 32 bit intel systems.
assert_(np.isfinite(np.log1p(np.exp2(-53))))
- def test_fromiter_comparison(self, level=rlevel):
+ def test_fromiter_comparison(self):
a = np.fromiter(list(range(10)), dtype='b')
b = np.fromiter(list(range(10)), dtype='B')
assert_(np.alltrue(a == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])))
@@ -1563,9 +1562,9 @@ class TestRegression(TestCase):
@dec.skipif(not HAS_REFCOUNT, "python has no sys.getrefcount")
def test_take_refcount(self):
# ticket #939
- a = np.arange(16, dtype=np.float)
+ a = np.arange(16, dtype=float)
a.shape = (4, 4)
- lut = np.ones((5 + 3, 4), np.float)
+ lut = np.ones((5 + 3, 4), float)
rgba = np.empty(shape=a.shape + (4,), dtype=lut.dtype)
c1 = sys.getrefcount(rgba)
try:
@@ -1807,7 +1806,7 @@ class TestRegression(TestCase):
a['f2'] = 1
except ValueError:
pass
- except:
+ except Exception:
raise AssertionError
def test_ticket_1608(self):
@@ -2068,8 +2067,8 @@ class TestRegression(TestCase):
assert_equal(arr, arr_cp)
assert_equal(arr.shape, arr_cp.shape)
assert_equal(int(arr), int(arr_cp))
- self.assertTrue(arr is not arr_cp)
- self.assertTrue(isinstance(arr_cp, type(arr)))
+ assert_(arr is not arr_cp)
+ assert_(isinstance(arr_cp, type(arr)))
def test_deepcopy_F_order_object_array(self):
# Ticket #6456.
@@ -2079,13 +2078,13 @@ class TestRegression(TestCase):
arr_cp = copy.deepcopy(arr)
assert_equal(arr, arr_cp)
- self.assertTrue(arr is not arr_cp)
+ assert_(arr is not arr_cp)
# Ensure that we have actually copied the item.
- self.assertTrue(arr[0, 1] is not arr_cp[1, 1])
+ assert_(arr[0, 1] is not arr_cp[1, 1])
# Ensure we are allowed to have references to the same object.
- self.assertTrue(arr[0, 1] is arr[1, 1])
+ assert_(arr[0, 1] is arr[1, 1])
# Check the references hold for the copied objects.
- self.assertTrue(arr_cp[0, 1] is arr_cp[1, 1])
+ assert_(arr_cp[0, 1] is arr_cp[1, 1])
def test_deepcopy_empty_object_array(self):
# Ticket #8536.
@@ -2173,7 +2172,7 @@ class TestRegression(TestCase):
# gh-6250
recordtype = np.dtype([('a', np.float64),
('b', np.int32),
- ('d', (np.str, 5))])
+ ('d', (str, 5))])
# Simple case
a = np.zeros(2, dtype=recordtype)
@@ -2248,5 +2247,19 @@ class TestRegression(TestCase):
else:
assert_(t.__hash__ != None)
+ def test_scalar_copy(self):
+ scalar_types = set(np.sctypeDict.values())
+ values = {
+ np.void: b"a",
+ np.bytes_: b"a",
+ np.unicode_: "a",
+ np.datetime64: "2017-08-25",
+ }
+ for sctype in scalar_types:
+ item = sctype(values.get(sctype, 1))
+ item2 = copy.copy(item)
+ assert_equal(item, item2)
+
+
if __name__ == "__main__":
run_module_suite()
diff --git a/numpy/core/tests/test_scalarinherit.py b/numpy/core/tests/test_scalarinherit.py
index e8cf7fde0..c5cd266eb 100644
--- a/numpy/core/tests/test_scalarinherit.py
+++ b/numpy/core/tests/test_scalarinherit.py
@@ -5,7 +5,7 @@
from __future__ import division, absolute_import, print_function
import numpy as np
-from numpy.testing import TestCase, run_module_suite, assert_
+from numpy.testing import run_module_suite, assert_
class A(object):
@@ -23,7 +23,7 @@ class B0(np.float64, A):
class C0(B0):
pass
-class TestInherit(TestCase):
+class TestInherit(object):
def test_init(self):
x = B(1.0)
assert_(str(x) == '1.0')
@@ -38,5 +38,41 @@ class TestInherit(TestCase):
y = C0(2.0)
assert_(str(y) == '2.0')
+
+class TestCharacter(object):
+ def test_char_radd(self):
+ # GH issue 9620, reached gentype_add and raise TypeError
+ np_s = np.string_('abc')
+ np_u = np.unicode_('abc')
+ s = b'def'
+ u = u'def'
+ assert_(np_s.__radd__(np_s) is NotImplemented)
+ assert_(np_s.__radd__(np_u) is NotImplemented)
+ assert_(np_s.__radd__(s) is NotImplemented)
+ assert_(np_s.__radd__(u) is NotImplemented)
+ assert_(np_u.__radd__(np_s) is NotImplemented)
+ assert_(np_u.__radd__(np_u) is NotImplemented)
+ assert_(np_u.__radd__(s) is NotImplemented)
+ assert_(np_u.__radd__(u) is NotImplemented)
+ assert_(s + np_s == b'defabc')
+ assert_(u + np_u == u'defabc')
+
+
+ class Mystr(str, np.generic):
+ # would segfault
+ pass
+
+ ret = s + Mystr('abc')
+ assert_(type(ret) is type(s))
+
+ def test_char_repeat(self):
+ np_s = np.string_('abc')
+ np_u = np.unicode_('abc')
+ np_i = np.int(5)
+ res_np = np_s * np_i
+ res_s = b'abc' * 5
+ assert_(res_np == res_s)
+
+
if __name__ == "__main__":
run_module_suite()
diff --git a/numpy/core/tests/test_scalarmath.py b/numpy/core/tests/test_scalarmath.py
index c76db98f8..cff9f7985 100644
--- a/numpy/core/tests/test_scalarmath.py
+++ b/numpy/core/tests/test_scalarmath.py
@@ -6,11 +6,10 @@ import itertools
import operator
import numpy as np
-from numpy.testing.utils import _gen_alignment_data
from numpy.testing import (
- TestCase, run_module_suite, assert_, assert_equal, assert_raises,
+ run_module_suite, assert_, assert_equal, assert_raises,
assert_almost_equal, assert_allclose, assert_array_equal, IS_PYPY,
- suppress_warnings
+ suppress_warnings, dec, _gen_alignment_data,
)
types = [np.bool_, np.byte, np.ubyte, np.short, np.ushort, np.intc, np.uintc,
@@ -23,13 +22,13 @@ floating_types = np.floating.__subclasses__()
# This compares scalarmath against ufuncs.
-class TestTypes(TestCase):
- def test_types(self, level=1):
+class TestTypes(object):
+ def test_types(self):
for atype in types:
a = atype(1)
assert_(a == 1, "error with %r: got %r" % (atype, a))
- def test_type_add(self, level=1):
+ def test_type_add(self):
# list of types
for k, atype in enumerate(types):
a_scalar = atype(3)
@@ -49,7 +48,7 @@ class TestTypes(TestCase):
"error with types (%d/'%c' + %d/'%c')" %
(k, np.dtype(atype).char, l, np.dtype(btype).char))
- def test_type_create(self, level=1):
+ def test_type_create(self):
for k, atype in enumerate(types):
a = np.array([1, 2, 3], atype)
b = atype([1, 2, 3])
@@ -62,7 +61,7 @@ class TestTypes(TestCase):
np.add(1, 1)
-class TestBaseMath(TestCase):
+class TestBaseMath(object):
def test_blocked(self):
# test alignments offsets for simd instructions
# alignments for vz + 2 * (vs - 1) + 1
@@ -108,7 +107,7 @@ class TestBaseMath(TestCase):
np.add(d, np.ones_like(d))
-class TestPower(TestCase):
+class TestPower(object):
def test_small_types(self):
for t in [np.int8, np.int16, np.float16]:
a = t(3)
@@ -127,7 +126,7 @@ class TestPower(TestCase):
def test_integers_to_negative_integer_power(self):
# Note that the combination of uint64 with a signed integer
- # has common type np.float. The other combinations should all
+ # has common type np.float64. The other combinations should all
# raise a ValueError for integer ** negative integer.
exp = [np.array(-1, dt)[()] for dt in 'bhilq']
@@ -200,7 +199,7 @@ def _signs(dt):
return (+1, -1)
-class TestModulus(TestCase):
+class TestModulus(object):
def test_modulus_basic(self):
dt = np.typecodes['AllInteger'] + np.typecodes['Float']
@@ -292,7 +291,7 @@ class TestModulus(TestCase):
assert_(np.isnan(rem), 'dt: %s' % dt)
-class TestComplexDivision(TestCase):
+class TestComplexDivision(object):
def test_zero_division(self):
with np.errstate(all="ignore"):
for t in [np.complex64, np.complex128]:
@@ -364,7 +363,7 @@ class TestComplexDivision(TestCase):
assert_equal(result.imag, ex[1])
-class TestConversion(TestCase):
+class TestConversion(object):
def test_int_from_long(self):
l = [1e6, 1e12, 1e18, -1e6, -1e12, -1e18]
li = [10**6, 10**12, 10**18, -10**6, -10**12, -10**18]
@@ -401,9 +400,22 @@ class TestConversion(TestCase):
def test_longdouble_int(self):
# gh-627
x = np.longdouble(np.inf)
+ assert_raises(OverflowError, int, x)
+ with suppress_warnings() as sup:
+ sup.record(np.ComplexWarning)
+ x = np.clongdouble(np.inf)
+ assert_raises(OverflowError, int, x)
+ assert_equal(len(sup.log), 1)
+
+ @dec.knownfailureif(not IS_PYPY)
+ def test_clongdouble___int__(self):
+ x = np.longdouble(np.inf)
assert_raises(OverflowError, x.__int__)
- x = np.clongdouble(np.inf)
- assert_raises(OverflowError, x.__int__)
+ with suppress_warnings() as sup:
+ sup.record(np.ComplexWarning)
+ x = np.clongdouble(np.inf)
+ assert_raises(OverflowError, x.__int__)
+ self.assertEqual(len(sup.log), 1)
def test_numpy_scalar_relational_operators(self):
# All integer
@@ -468,7 +480,7 @@ class TestConversion(TestCase):
assert_(np.equal(np.datetime64('NaT'), None))
-#class TestRepr(TestCase):
+#class TestRepr(object):
# def test_repr(self):
# for t in types:
# val = t(1197346475.0137341)
@@ -512,7 +524,7 @@ class TestRepr(object):
if not IS_PYPY:
# sys.getsizeof() is not valid on PyPy
- class TestSizeOf(TestCase):
+ class TestSizeOf(object):
def test_equal_nbytes(self):
for type in types:
@@ -524,7 +536,7 @@ if not IS_PYPY:
assert_raises(TypeError, d.__sizeof__, "a")
-class TestMultiply(TestCase):
+class TestMultiply(object):
def test_seq_repeat(self):
# Test that basic sequences get repeated when multiplied with
# numpy integers. And errors are raised when multiplied with others.
@@ -562,7 +574,7 @@ class TestMultiply(TestCase):
assert_array_equal(np.int_(3) * arr_like, np.full(3, 3))
-class TestNegative(TestCase):
+class TestNegative(object):
def test_exceptions(self):
a = np.ones((), dtype=np.bool_)[()]
assert_raises(TypeError, operator.neg, a)
@@ -576,7 +588,7 @@ class TestNegative(TestCase):
assert_equal(operator.neg(a) + a, 0)
-class TestSubtract(TestCase):
+class TestSubtract(object):
def test_exceptions(self):
a = np.ones((), dtype=np.bool_)[()]
assert_raises(TypeError, operator.sub, a, a)
@@ -590,7 +602,7 @@ class TestSubtract(TestCase):
assert_equal(operator.sub(a, a), 0)
-class TestAbs(TestCase):
+class TestAbs(object):
def _test_abs_func(self, absfunc):
for tp in floating_types:
diff --git a/numpy/core/tests/test_scalarprint.py b/numpy/core/tests/test_scalarprint.py
index 8d0f27182..7e17e0425 100644
--- a/numpy/core/tests/test_scalarprint.py
+++ b/numpy/core/tests/test_scalarprint.py
@@ -5,10 +5,10 @@
from __future__ import division, absolute_import, print_function
import numpy as np
-from numpy.testing import TestCase, assert_, run_module_suite
+from numpy.testing import assert_, run_module_suite
-class TestRealScalars(TestCase):
+class TestRealScalars(object):
def test_str(self):
svals = [0.0, -0.0, 1, -1, np.inf, -np.inf, np.nan]
styps = [np.float16, np.float32, np.float64, np.longdouble]
diff --git a/numpy/core/tests/test_shape_base.py b/numpy/core/tests/test_shape_base.py
index c1680d181..5c1e569b7 100644
--- a/numpy/core/tests/test_shape_base.py
+++ b/numpy/core/tests/test_shape_base.py
@@ -4,13 +4,13 @@ import warnings
import numpy as np
from numpy.core import (array, arange, atleast_1d, atleast_2d, atleast_3d,
block, vstack, hstack, newaxis, concatenate, stack)
-from numpy.testing import (TestCase, assert_, assert_raises,
+from numpy.testing import (assert_, assert_raises,
assert_array_equal, assert_equal, run_module_suite,
assert_raises_regex, assert_almost_equal)
from numpy.compat import long
-class TestAtleast1d(TestCase):
+class TestAtleast1d(object):
def test_0D_array(self):
a = array(1)
b = array(2)
@@ -51,7 +51,7 @@ class TestAtleast1d(TestCase):
assert_(atleast_1d([[2, 3], [4, 5]]).shape == (2, 2))
-class TestAtleast2d(TestCase):
+class TestAtleast2d(object):
def test_0D_array(self):
a = array(1)
b = array(2)
@@ -90,7 +90,7 @@ class TestAtleast2d(TestCase):
assert_(atleast_2d([[[3, 1], [4, 5]], [[3, 5], [1, 2]]]).shape == (2, 2, 2))
-class TestAtleast3d(TestCase):
+class TestAtleast3d(object):
def test_0D_array(self):
a = array(1)
b = array(2)
@@ -122,7 +122,7 @@ class TestAtleast3d(TestCase):
assert_array_equal(res, desired)
-class TestHstack(TestCase):
+class TestHstack(object):
def test_non_iterable(self):
assert_raises(TypeError, hstack, 1)
@@ -151,7 +151,7 @@ class TestHstack(TestCase):
assert_array_equal(res, desired)
-class TestVstack(TestCase):
+class TestVstack(object):
def test_non_iterable(self):
assert_raises(TypeError, vstack, 1)
@@ -187,7 +187,7 @@ class TestVstack(TestCase):
assert_array_equal(res, desired)
-class TestConcatenate(TestCase):
+class TestConcatenate(object):
def test_exceptions(self):
# test axis must be in bounds
for ndim in [1, 2, 3]:
@@ -208,8 +208,8 @@ class TestConcatenate(TestCase):
np.concatenate((a, b), axis=axis[0]) # OK
assert_raises(ValueError, np.concatenate, (a, b), axis=axis[1])
assert_raises(ValueError, np.concatenate, (a, b), axis=axis[2])
- a = np.rollaxis(a, -1)
- b = np.rollaxis(b, -1)
+ a = np.moveaxis(a, -1, 0)
+ b = np.moveaxis(b, -1, 0)
axis.append(axis.pop(0))
# No arrays to concatenate raises ValueError
@@ -230,6 +230,12 @@ class TestConcatenate(TestCase):
'0', '1', '2', 'x'])
assert_array_equal(r, d)
+ out = np.zeros(a.size + len(b))
+ r = np.concatenate((a, b), axis=None)
+ rout = np.concatenate((a, b), axis=None, out=out)
+ assert_(out is rout)
+ assert_equal(r, rout)
+
def test_large_concatenate_axis_None(self):
# When no axis is given, concatenate uses flattened versions.
# This also had a bug with many arrays (see gh-5979).
@@ -278,6 +284,34 @@ class TestConcatenate(TestCase):
assert_array_equal(concatenate((a0, a1, a2), -1), res)
assert_array_equal(concatenate((a0.T, a1.T, a2.T), 0), res.T)
+ out = res.copy()
+ rout = concatenate((a0, a1, a2), 2, out=out)
+ assert_(out is rout)
+ assert_equal(res, rout)
+
+ def test_bad_out_shape(self):
+ a = array([1, 2])
+ b = array([3, 4])
+
+ assert_raises(ValueError, concatenate, (a, b), out=np.empty(5))
+ assert_raises(ValueError, concatenate, (a, b), out=np.empty((4,1)))
+ assert_raises(ValueError, concatenate, (a, b), out=np.empty((1,4)))
+ concatenate((a, b), out=np.empty(4))
+
+ def test_out_dtype(self):
+ out = np.empty(4, np.float32)
+ res = concatenate((array([1, 2]), array([3, 4])), out=out)
+ assert_(out is res)
+
+ out = np.empty(4, np.complex64)
+ res = concatenate((array([0.1, 0.2]), array([0.3, 0.4])), out=out)
+ assert_(out is res)
+
+ # invalid cast
+ out = np.empty(4, np.int32)
+ assert_raises(TypeError, concatenate,
+ (array([0.1, 0.2]), array([0.3, 0.4])), out=out)
+
def test_stack():
# non-iterable input
@@ -333,7 +367,7 @@ def test_stack():
stack, [m, m])
-class TestBlock(TestCase):
+class TestBlock(object):
def test_block_simple_row_wise(self):
a_2d = np.ones((2, 2))
b_2d = 2 * a_2d
diff --git a/numpy/core/tests/test_ufunc.py b/numpy/core/tests/test_ufunc.py
index 3d6251253..57e0ec272 100644
--- a/numpy/core/tests/test_ufunc.py
+++ b/numpy/core/tests/test_ufunc.py
@@ -1,20 +1,23 @@
from __future__ import division, absolute_import, print_function
+import warnings
+import itertools
+
import numpy as np
import numpy.core.umath_tests as umt
import numpy.core.operand_flag_tests as opflag_tests
from numpy.core.test_rational import rational, test_add, test_add_rationals
from numpy.testing import (
- TestCase, run_module_suite, assert_, assert_equal, assert_raises,
+ run_module_suite, assert_, assert_equal, assert_raises,
assert_array_equal, assert_almost_equal, assert_array_almost_equal,
- assert_no_warnings
+ assert_no_warnings, assert_allclose,
)
-class TestUfuncKwargs(TestCase):
+class TestUfuncKwargs(object):
def test_kwarg_exact(self):
assert_raises(TypeError, np.add, 1, 2, castingx='safe')
- assert_raises(TypeError, np.add, 1, 2, dtypex=np.int)
+ assert_raises(TypeError, np.add, 1, 2, dtypex=int)
assert_raises(TypeError, np.add, 1, 2, extobjx=[4096])
assert_raises(TypeError, np.add, 1, 2, outx=None)
assert_raises(TypeError, np.add, 1, 2, sigx='ii->i')
@@ -28,12 +31,12 @@ class TestUfuncKwargs(TestCase):
def test_sig_dtype(self):
assert_raises(RuntimeError, np.add, 1, 2, sig='ii->i',
- dtype=np.int)
+ dtype=int)
assert_raises(RuntimeError, np.add, 1, 2, signature='ii->i',
- dtype=np.int)
+ dtype=int)
-class TestUfunc(TestCase):
+class TestUfunc(object):
def test_pickle(self):
import pickle
assert_(pickle.loads(pickle.dumps(np.sin)) is np.sin)
@@ -171,22 +174,22 @@ class TestUfunc(TestCase):
# check unary PyUFunc_O_O
msg = "PyUFunc_O_O"
- x = np.ones(10, dtype=np.object)[0::2]
+ x = np.ones(10, dtype=object)[0::2]
assert_(np.all(np.abs(x) == 1), msg)
# check unary PyUFunc_O_O_method
msg = "PyUFunc_O_O_method"
- x = np.zeros(10, dtype=np.object)[0::2]
+ x = np.zeros(10, dtype=object)[0::2]
for i in range(len(x)):
x[i] = foo()
assert_(np.all(np.conjugate(x) == True), msg)
# check binary PyUFunc_OO_O
msg = "PyUFunc_OO_O"
- x = np.ones(10, dtype=np.object)[0::2]
+ x = np.ones(10, dtype=object)[0::2]
assert_(np.all(np.add(x, x) == 2), msg)
# check binary PyUFunc_OO_O_method
msg = "PyUFunc_OO_O_method"
- x = np.zeros(10, dtype=np.object)[0::2]
+ x = np.zeros(10, dtype=object)[0::2]
for i in range(len(x)):
x[i] = foo()
assert_(np.all(np.logical_xor(x, x)), msg)
@@ -353,14 +356,78 @@ class TestUfunc(TestCase):
assert_equal(b, [0, 0, 1])
def test_true_divide(self):
- # True_divide has a non uniform signature, see #3484.
- # This also tests type_tuple_type_resolver.
- a = np.full(5, 12.5)
- b = np.full(5, 10.0)
- tgt = np.full(5, 1.25)
- assert_almost_equal(np.true_divide(a, b, dtype=np.float64), tgt)
- assert_almost_equal(np.true_divide(a, b, dtype=np.float32), tgt)
- assert_raises(TypeError, np.true_divide, a, b, dtype=np.int)
+ a = np.array(10)
+ b = np.array(20)
+ tgt = np.array(0.5)
+
+ for tc in 'bhilqBHILQefdgFDG':
+ dt = np.dtype(tc)
+ aa = a.astype(dt)
+ bb = b.astype(dt)
+
+ # Check result value and dtype.
+ for x, y in itertools.product([aa, -aa], [bb, -bb]):
+
+ # Check with no output type specified
+ if tc in 'FDG':
+ tgt = complex(x)/complex(y)
+ else:
+ tgt = float(x)/float(y)
+
+ res = np.true_divide(x, y)
+ rtol = max(np.finfo(res).resolution, 1e-15)
+ assert_allclose(res, tgt, rtol=rtol)
+
+ if tc in 'bhilqBHILQ':
+ assert_(res.dtype.name == 'float64')
+ else:
+ assert_(res.dtype.name == dt.name )
+
+ # Check with output type specified. This also checks for the
+ # incorrect casts in issue gh-3484 because the unary '-' does
+ # not change types, even for unsigned types, Hence casts in the
+ # ufunc from signed to unsigned and vice versa will lead to
+ # errors in the values.
+ for tcout in 'bhilqBHILQ':
+ dtout = np.dtype(tcout)
+ assert_raises(TypeError, np.true_divide, x, y, dtype=dtout)
+
+ for tcout in 'efdg':
+ dtout = np.dtype(tcout)
+ if tc in 'FDG':
+ # Casting complex to float is not allowed
+ assert_raises(TypeError, np.true_divide, x, y, dtype=dtout)
+ else:
+ tgt = float(x)/float(y)
+ rtol = max(np.finfo(dtout).resolution, 1e-15)
+ atol = max(np.finfo(dtout).tiny, 3e-308)
+ # Some test values result in invalid for float16.
+ with np.errstate(invalid='ignore'):
+ res = np.true_divide(x, y, dtype=dtout)
+ if not np.isfinite(res) and tcout == 'e':
+ continue
+ assert_allclose(res, tgt, rtol=rtol, atol=atol)
+ assert_(res.dtype.name == dtout.name)
+
+ for tcout in 'FDG':
+ dtout = np.dtype(tcout)
+ tgt = complex(x)/complex(y)
+ rtol = max(np.finfo(dtout).resolution, 1e-15)
+ atol = max(np.finfo(dtout).tiny, 3e-308)
+ res = np.true_divide(x, y, dtype=dtout)
+ if not np.isfinite(res):
+ continue
+ assert_allclose(res, tgt, rtol=rtol, atol=atol)
+ assert_(res.dtype.name == dtout.name)
+
+ # Check booleans
+ a = np.ones((), dtype=np.bool_)
+ res = np.true_divide(a, a)
+ assert_(res == 1.0)
+ assert_(res.dtype.name == 'float64')
+ res = np.true_divide(~a, a)
+ assert_(res == 0.0)
+ assert_(res.dtype.name == 'float64')
def test_sum_stability(self):
a = np.ones(500, dtype=np.float32)
@@ -370,13 +437,22 @@ class TestUfunc(TestCase):
assert_almost_equal((a / 10.).sum() - a.size / 10., 0, 13)
def test_sum(self):
- for dt in (np.int, np.float16, np.float32, np.float64, np.longdouble):
+ for dt in (int, np.float16, np.float32, np.float64, np.longdouble):
for v in (0, 1, 2, 7, 8, 9, 15, 16, 19, 127,
128, 1024, 1235):
tgt = dt(v * (v + 1) / 2)
d = np.arange(1, v + 1, dtype=dt)
- assert_almost_equal(np.sum(d), tgt)
- assert_almost_equal(np.sum(d[::-1]), tgt)
+
+ # warning if sum overflows, which it does in float16
+ overflow = not np.isfinite(tgt)
+
+ with warnings.catch_warnings(record=True) as w:
+ warnings.simplefilter("always")
+ assert_almost_equal(np.sum(d), tgt)
+ assert_equal(len(w), 1 * overflow)
+
+ assert_almost_equal(np.sum(d[::-1]), tgt)
+ assert_equal(len(w), 2 * overflow)
d = np.ones(500, dtype=dt)
assert_almost_equal(np.sum(d[::2]), 250.)
@@ -603,7 +679,7 @@ class TestUfunc(TestCase):
assert_equal(ref, True, err_msg="reference check")
def test_euclidean_pdist(self):
- a = np.arange(12, dtype=np.float).reshape(4, 3)
+ a = np.arange(12, dtype=float).reshape(4, 3)
out = np.empty((a.shape[0] * (a.shape[0] - 1) // 2,), dtype=a.dtype)
umt.euclidean_pdist(a, out)
b = np.sqrt(np.sum((a[:, None] - a)**2, axis=-1))
@@ -784,6 +860,17 @@ class TestUfunc(TestCase):
np.add(a, b, out=c, where=[1, 0, 0, 1, 0, 0, 1, 1, 1, 0])
assert_equal(c, [2, 1.5, 1.5, 2, 1.5, 1.5, 2, 2, 2, 1.5])
+ def test_where_param_alloc(self):
+ # With casting and allocated output
+ a = np.array([1], dtype=np.int64)
+ m = np.array([True], dtype=bool)
+ assert_equal(np.sqrt(a, where=m), [1])
+
+ # No casting and allocated output
+ a = np.array([1], dtype=np.float64)
+ m = np.array([True], dtype=bool)
+ assert_equal(np.sqrt(a, where=m), [1])
+
def check_identityless_reduction(self, a):
# np.minimum.reduce is a identityless reduction
@@ -1000,6 +1087,11 @@ class TestUfunc(TestCase):
dtype=rational)
assert_equal(result, expected)
+ def test_custom_ufunc_forced_sig(self):
+ # gh-9351 - looking for a non-first userloop would previously hang
+ assert_raises(TypeError,
+ np.multiply, rational(1), 1, signature=(rational, int, None))
+
def test_custom_array_like(self):
class MyThing(object):
@@ -1162,9 +1254,9 @@ class TestUfunc(TestCase):
assert_array_equal(values, [1, 8, 6, 4])
# Test exception thrown
- values = np.array(['a', 1], dtype=np.object)
- self.assertRaises(TypeError, np.add.at, values, [0, 1], 1)
- assert_array_equal(values, np.array(['a', 1], dtype=np.object))
+ values = np.array(['a', 1], dtype=object)
+ assert_raises(TypeError, np.add.at, values, [0, 1], 1)
+ assert_array_equal(values, np.array(['a', 1], dtype=object))
# Test multiple output ufuncs raise error, gh-5665
assert_raises(ValueError, np.modf.at, np.arange(10), [1])
@@ -1283,6 +1375,10 @@ class TestUfunc(TestCase):
assert_equal(y_base[1,:], y_base_copy[1,:])
assert_equal(y_base[3,:], y_base_copy[3,:])
+ def test_no_doc_string(self):
+ # gh-9337
+ assert_('\n' not in umt.inner1d_no_doc.__doc__)
+
if __name__ == "__main__":
run_module_suite()
diff --git a/numpy/core/tests/test_umath.py b/numpy/core/tests/test_umath.py
index 13f29504a..5787a5183 100644
--- a/numpy/core/tests/test_umath.py
+++ b/numpy/core/tests/test_umath.py
@@ -6,15 +6,14 @@ import warnings
import fnmatch
import itertools
-from numpy.testing.utils import _gen_alignment_data
import numpy.core.umath as ncu
from numpy.core import umath_tests as ncu_tests
import numpy as np
from numpy.testing import (
- TestCase, run_module_suite, assert_, assert_equal, assert_raises,
+ run_module_suite, assert_, assert_equal, assert_raises,
assert_raises_regex, assert_array_equal, assert_almost_equal,
assert_array_almost_equal, dec, assert_allclose, assert_no_warnings,
- suppress_warnings
+ suppress_warnings, _gen_alignment_data,
)
@@ -32,7 +31,7 @@ class _FilterInvalids(object):
np.seterr(**self.olderr)
-class TestConstants(TestCase):
+class TestConstants(object):
def test_pi(self):
assert_allclose(ncu.pi, 3.141592653589793, 1e-15)
@@ -43,7 +42,7 @@ class TestConstants(TestCase):
assert_allclose(ncu.euler_gamma, 0.5772156649015329, 1e-15)
-class TestOut(TestCase):
+class TestOut(object):
def test_out_subok(self):
for subok in (True, False):
a = np.array(0.5)
@@ -176,7 +175,7 @@ class TestOut(TestCase):
assert_(w[0].category is DeprecationWarning)
-class TestComparisons(TestCase):
+class TestComparisons(object):
def test_ignore_object_identity_in_equal(self):
# Check error raised when comparing identical objects whose comparison
# is not a simple boolean, e.g., arrays that are compared elementwise.
@@ -214,7 +213,7 @@ class TestComparisons(TestCase):
assert_equal(np.not_equal(a, a), [True])
-class TestDivision(TestCase):
+class TestDivision(object):
def test_division_int(self):
# int division should follow Python
x = np.array([5, 10, 90, 100, -5, -10, -90, -100, -120])
@@ -275,7 +274,7 @@ def _signs(dt):
return (+1, -1)
-class TestRemainder(TestCase):
+class TestRemainder(object):
def test_remainder_basic(self):
dt = np.typecodes['AllInteger'] + np.typecodes['Float']
@@ -366,7 +365,7 @@ class TestRemainder(TestCase):
assert_(np.isnan(rem), 'dt: %s, rem: %s' % (dt, rem))
-class TestCbrt(TestCase):
+class TestCbrt(object):
def test_cbrt_scalar(self):
assert_almost_equal((np.cbrt(np.float32(-2.5)**3)), -2.5)
@@ -379,7 +378,7 @@ class TestCbrt(TestCase):
assert_equal(np.cbrt(-np.inf), -np.inf)
-class TestPower(TestCase):
+class TestPower(object):
def test_power_float(self):
x = np.array([1., 2., 3.])
assert_equal(x**0, [1., 1., 1.])
@@ -518,7 +517,7 @@ class TestPower(TestCase):
assert_raises(ValueError, np.power, one, minusone)
-class TestFloat_power(TestCase):
+class TestFloat_power(object):
def test_type_conversion(self):
arg_type = '?bhilBHILefdgFDG'
res_type = 'ddddddddddddgDDG'
@@ -529,7 +528,7 @@ class TestFloat_power(TestCase):
assert_(res.dtype.name == np.dtype(dtout).name, msg)
-class TestLog2(TestCase):
+class TestLog2(object):
def test_log2_values(self):
x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
@@ -560,7 +559,7 @@ class TestLog2(TestCase):
assert_(w[2].category is RuntimeWarning)
-class TestExp2(TestCase):
+class TestExp2(object):
def test_exp2_values(self):
x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
@@ -612,7 +611,7 @@ class TestLogAddExp2(_FilterInvalids):
assert_(np.isnan(np.logaddexp2(np.nan, np.nan)))
-class TestLog(TestCase):
+class TestLog(object):
def test_log_values(self):
x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
@@ -623,7 +622,7 @@ class TestLog(TestCase):
assert_almost_equal(np.log(xf), yf)
-class TestExp(TestCase):
+class TestExp(object):
def test_exp_values(self):
x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
@@ -675,7 +674,7 @@ class TestLogAddExp(_FilterInvalids):
assert_(np.isnan(np.logaddexp(np.nan, np.nan)))
-class TestLog1p(TestCase):
+class TestLog1p(object):
def test_log1p(self):
assert_almost_equal(ncu.log1p(0.2), ncu.log(1.2))
assert_almost_equal(ncu.log1p(1e-6), ncu.log(1+1e-6))
@@ -689,7 +688,7 @@ class TestLog1p(TestCase):
assert_equal(ncu.log1p(-np.inf), np.nan)
-class TestExpm1(TestCase):
+class TestExpm1(object):
def test_expm1(self):
assert_almost_equal(ncu.expm1(0.2), ncu.exp(0.2)-1)
assert_almost_equal(ncu.expm1(1e-6), ncu.exp(1e-6)-1)
@@ -702,7 +701,7 @@ class TestExpm1(TestCase):
assert_equal(ncu.expm1(-np.inf), -1.)
-class TestHypot(TestCase, object):
+class TestHypot(object):
def test_simple(self):
assert_almost_equal(ncu.hypot(1, 1), ncu.sqrt(2))
assert_almost_equal(ncu.hypot(0, 0), 0)
@@ -726,7 +725,7 @@ def assert_hypot_isinf(x, y):
"hypot(%s, %s) is %s, not inf" % (x, y, ncu.hypot(x, y)))
-class TestHypotSpecialValues(TestCase):
+class TestHypotSpecialValues(object):
def test_nan_outputs(self):
assert_hypot_isnan(np.nan, np.nan)
assert_hypot_isnan(np.nan, 1)
@@ -763,7 +762,7 @@ def assert_arctan2_isnzero(x, y):
assert_((ncu.arctan2(x, y) == 0 and np.signbit(ncu.arctan2(x, y))), "arctan(%s, %s) is %s, not -0" % (x, y, ncu.arctan2(x, y)))
-class TestArctan2SpecialValues(TestCase):
+class TestArctan2SpecialValues(object):
def test_one_one(self):
# atan2(1, 1) returns pi/4.
assert_almost_equal(ncu.arctan2(1, 1), 0.25 * np.pi)
@@ -832,7 +831,7 @@ class TestArctan2SpecialValues(TestCase):
assert_arctan2_isnan(np.nan, np.nan)
-class TestLdexp(TestCase):
+class TestLdexp(object):
def _check_ldexp(self, tp):
assert_almost_equal(ncu.ldexp(np.array(2., np.float32),
np.array(3, tp)), 16.)
@@ -898,22 +897,22 @@ class TestMaximum(_FilterInvalids):
# fail if cmp is used instead of rich compare.
# Failure cannot be guaranteed.
for i in range(1):
- x = np.array(float('nan'), np.object)
+ x = np.array(float('nan'), object)
y = 1.0
- z = np.array(float('nan'), np.object)
+ z = np.array(float('nan'), object)
assert_(np.maximum(x, y) == 1.0)
assert_(np.maximum(z, y) == 1.0)
def test_complex_nans(self):
nan = np.nan
for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)]:
- arg1 = np.array([0, cnan, cnan], dtype=np.complex)
- arg2 = np.array([cnan, 0, cnan], dtype=np.complex)
- out = np.array([nan, nan, nan], dtype=np.complex)
+ arg1 = np.array([0, cnan, cnan], dtype=complex)
+ arg2 = np.array([cnan, 0, cnan], dtype=complex)
+ out = np.array([nan, nan, nan], dtype=complex)
assert_equal(np.maximum(arg1, arg2), out)
def test_object_array(self):
- arg1 = np.arange(5, dtype=np.object)
+ arg1 = np.arange(5, dtype=object)
arg2 = arg1 + 1
assert_equal(np.maximum(arg1, arg2), arg2)
@@ -956,22 +955,22 @@ class TestMinimum(_FilterInvalids):
# fail if cmp is used instead of rich compare.
# Failure cannot be guaranteed.
for i in range(1):
- x = np.array(float('nan'), np.object)
+ x = np.array(float('nan'), object)
y = 1.0
- z = np.array(float('nan'), np.object)
+ z = np.array(float('nan'), object)
assert_(np.minimum(x, y) == 1.0)
assert_(np.minimum(z, y) == 1.0)
def test_complex_nans(self):
nan = np.nan
for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)]:
- arg1 = np.array([0, cnan, cnan], dtype=np.complex)
- arg2 = np.array([cnan, 0, cnan], dtype=np.complex)
- out = np.array([nan, nan, nan], dtype=np.complex)
+ arg1 = np.array([0, cnan, cnan], dtype=complex)
+ arg2 = np.array([cnan, 0, cnan], dtype=complex)
+ out = np.array([nan, nan, nan], dtype=complex)
assert_equal(np.minimum(arg1, arg2), out)
def test_object_array(self):
- arg1 = np.arange(5, dtype=np.object)
+ arg1 = np.arange(5, dtype=object)
arg2 = arg1 + 1
assert_equal(np.minimum(arg1, arg2), arg1)
@@ -1012,9 +1011,9 @@ class TestFmax(_FilterInvalids):
def test_complex_nans(self):
nan = np.nan
for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)]:
- arg1 = np.array([0, cnan, cnan], dtype=np.complex)
- arg2 = np.array([cnan, 0, cnan], dtype=np.complex)
- out = np.array([0, 0, nan], dtype=np.complex)
+ arg1 = np.array([0, cnan, cnan], dtype=complex)
+ arg2 = np.array([cnan, 0, cnan], dtype=complex)
+ out = np.array([0, 0, nan], dtype=complex)
assert_equal(np.fmax(arg1, arg2), out)
@@ -1054,13 +1053,13 @@ class TestFmin(_FilterInvalids):
def test_complex_nans(self):
nan = np.nan
for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)]:
- arg1 = np.array([0, cnan, cnan], dtype=np.complex)
- arg2 = np.array([cnan, 0, cnan], dtype=np.complex)
- out = np.array([0, 0, nan], dtype=np.complex)
+ arg1 = np.array([0, cnan, cnan], dtype=complex)
+ arg2 = np.array([cnan, 0, cnan], dtype=complex)
+ out = np.array([0, 0, nan], dtype=complex)
assert_equal(np.fmin(arg1, arg2), out)
-class TestBool(TestCase):
+class TestBool(object):
def test_exceptions(self):
a = np.ones(1, dtype=np.bool_)
assert_raises(TypeError, np.negative, a)
@@ -1123,7 +1122,7 @@ class TestBool(TestCase):
assert_equal(np.logical_xor.reduce(arr), arr.sum() % 2 == 1)
-class TestBitwiseUFuncs(TestCase):
+class TestBitwiseUFuncs(object):
bitwise_types = [np.dtype(c) for c in '?' + 'bBhHiIlLqQ' + 'O']
@@ -1208,10 +1207,10 @@ class TestBitwiseUFuncs(TestCase):
assert_(type(f.reduce(btype)) is bool, msg)
-class TestInt(TestCase):
+class TestInt(object):
def test_logical_not(self):
x = np.ones(10, dtype=np.int16)
- o = np.ones(10 * 2, dtype=np.bool)
+ o = np.ones(10 * 2, dtype=bool)
tgt = o.copy()
tgt[::2] = False
os = o[::2]
@@ -1219,24 +1218,24 @@ class TestInt(TestCase):
assert_array_equal(o, tgt)
-class TestFloatingPoint(TestCase):
+class TestFloatingPoint(object):
def test_floating_point(self):
assert_equal(ncu.FLOATING_POINT_SUPPORT, 1)
-class TestDegrees(TestCase):
+class TestDegrees(object):
def test_degrees(self):
assert_almost_equal(ncu.degrees(np.pi), 180.0)
assert_almost_equal(ncu.degrees(-0.5*np.pi), -90.0)
-class TestRadians(TestCase):
+class TestRadians(object):
def test_radians(self):
assert_almost_equal(ncu.radians(180.0), np.pi)
assert_almost_equal(ncu.radians(-90.0), -0.5*np.pi)
-class TestHeavside(TestCase):
+class TestHeavside(object):
def test_heaviside(self):
x = np.array([[-30.0, -0.1, 0.0, 0.2], [7.5, np.nan, np.inf, -np.inf]])
expectedhalf = np.array([[0.0, 0.0, 0.5, 1.0], [1.0, np.nan, 1.0, 0.0]])
@@ -1258,7 +1257,7 @@ class TestHeavside(TestCase):
assert_equal(h, expected1.astype(np.float32))
-class TestSign(TestCase):
+class TestSign(object):
def test_sign(self):
a = np.array([np.inf, -np.inf, np.nan, 0.0, 3.0, -3.0])
out = np.zeros(a.shape)
@@ -1275,7 +1274,7 @@ class TestSign(TestCase):
# In reference to github issue #6229
foo = np.array([-.1, 0, .1])
- a = np.sign(foo.astype(np.object))
+ a = np.sign(foo.astype(object))
b = np.sign(foo)
assert_array_equal(a, b)
@@ -1284,11 +1283,11 @@ class TestSign(TestCase):
# In reference to github issue #6229
def test_nan():
foo = np.array([np.nan])
- a = np.sign(foo.astype(np.object))
+ a = np.sign(foo.astype(object))
assert_raises(TypeError, test_nan)
-class TestMinMax(TestCase):
+class TestMinMax(object):
def test_minmax_blocked(self):
# simd tests on max/min, test all alignments, slow but important
# for 2 * vz + 2 * (vs - 1) + 1 (unrolled once)
@@ -1299,8 +1298,11 @@ class TestMinMax(TestCase):
inp[:] = np.arange(inp.size, dtype=dt)
inp[i] = np.nan
emsg = lambda: '%r\n%s' % (inp, msg)
- assert_(np.isnan(inp.max()), msg=emsg)
- assert_(np.isnan(inp.min()), msg=emsg)
+ with suppress_warnings() as sup:
+ sup.filter(RuntimeWarning,
+ "invalid value encountered in reduce")
+ assert_(np.isnan(inp.max()), msg=emsg)
+ assert_(np.isnan(inp.min()), msg=emsg)
inp[i] = 1e10
assert_equal(inp.max(), 1e10, err_msg=msg)
@@ -1315,7 +1317,7 @@ class TestMinMax(TestCase):
assert_equal(d.min(), d[0])
-class TestAbsoluteNegative(TestCase):
+class TestAbsoluteNegative(object):
def test_abs_neg_blocked(self):
# simd tests on abs, test all alignments for vz + 2 * (vs - 1) + 1
for dt, sz in [(np.float32, 11), (np.float64, 5)]:
@@ -1324,7 +1326,7 @@ class TestAbsoluteNegative(TestCase):
tgt = [ncu.absolute(i) for i in inp]
np.absolute(inp, out=out)
assert_equal(out, tgt, err_msg=msg)
- self.assertTrue((out >= 0).all())
+ assert_((out >= 0).all())
tgt = [-1*(i) for i in inp]
np.negative(inp, out=out)
@@ -1357,7 +1359,7 @@ class TestAbsoluteNegative(TestCase):
np.abs(np.ones_like(d), out=d)
-class TestPositive(TestCase):
+class TestPositive(object):
def test_valid(self):
valid_dtypes = [int, float, complex, object]
for dtype in valid_dtypes:
@@ -1376,7 +1378,7 @@ class TestPositive(TestCase):
np.positive(np.array(['bar'], dtype=object))
-class TestSpecialMethods(TestCase):
+class TestSpecialMethods(object):
def test_wrap(self):
class with_wrap(object):
@@ -1393,11 +1395,11 @@ class TestSpecialMethods(TestCase):
x = ncu.minimum(a, a)
assert_equal(x.arr, np.zeros(1))
func, args, i = x.context
- self.assertTrue(func is ncu.minimum)
- self.assertEqual(len(args), 2)
+ assert_(func is ncu.minimum)
+ assert_equal(len(args), 2)
assert_equal(args[0], a)
assert_equal(args[1], a)
- self.assertEqual(i, 0)
+ assert_equal(i, 0)
def test_wrap_with_iterable(self):
# test fix for bug #1026:
@@ -1413,7 +1415,7 @@ class TestSpecialMethods(TestCase):
a = with_wrap()
x = ncu.multiply(a, (1, 2, 3))
- self.assertTrue(isinstance(x, with_wrap))
+ assert_(isinstance(x, with_wrap))
assert_array_equal(x, np.array((1, 2, 3)))
def test_priority_with_scalar(self):
@@ -1427,7 +1429,7 @@ class TestSpecialMethods(TestCase):
a = A()
x = np.float64(1)*a
- self.assertTrue(isinstance(x, A))
+ assert_(isinstance(x, A))
assert_array_equal(x, np.array(1))
def test_old_wrap(self):
@@ -1468,25 +1470,25 @@ class TestSpecialMethods(TestCase):
b = B()
c = C()
f = ncu.minimum
- self.assertTrue(type(f(x, x)) is np.ndarray)
- self.assertTrue(type(f(x, a)) is A)
- self.assertTrue(type(f(x, b)) is B)
- self.assertTrue(type(f(x, c)) is C)
- self.assertTrue(type(f(a, x)) is A)
- self.assertTrue(type(f(b, x)) is B)
- self.assertTrue(type(f(c, x)) is C)
-
- self.assertTrue(type(f(a, a)) is A)
- self.assertTrue(type(f(a, b)) is B)
- self.assertTrue(type(f(b, a)) is B)
- self.assertTrue(type(f(b, b)) is B)
- self.assertTrue(type(f(b, c)) is C)
- self.assertTrue(type(f(c, b)) is C)
- self.assertTrue(type(f(c, c)) is C)
-
- self.assertTrue(type(ncu.exp(a) is A))
- self.assertTrue(type(ncu.exp(b) is B))
- self.assertTrue(type(ncu.exp(c) is C))
+ assert_(type(f(x, x)) is np.ndarray)
+ assert_(type(f(x, a)) is A)
+ assert_(type(f(x, b)) is B)
+ assert_(type(f(x, c)) is C)
+ assert_(type(f(a, x)) is A)
+ assert_(type(f(b, x)) is B)
+ assert_(type(f(c, x)) is C)
+
+ assert_(type(f(a, a)) is A)
+ assert_(type(f(a, b)) is B)
+ assert_(type(f(b, a)) is B)
+ assert_(type(f(b, b)) is B)
+ assert_(type(f(b, c)) is C)
+ assert_(type(f(c, b)) is C)
+ assert_(type(f(c, c)) is C)
+
+ assert_(type(ncu.exp(a) is A))
+ assert_(type(ncu.exp(b) is B))
+ assert_(type(ncu.exp(c) is C))
def test_failing_wrap(self):
@@ -1498,7 +1500,7 @@ class TestSpecialMethods(TestCase):
raise RuntimeError
a = A()
- self.assertRaises(RuntimeError, ncu.maximum, a, a)
+ assert_raises(RuntimeError, ncu.maximum, a, a)
def test_none_wrap(self):
# Tests that issue #8507 is resolved. Previously, this would segfault
@@ -1569,7 +1571,7 @@ class TestSpecialMethods(TestCase):
raise RuntimeError
a = A()
- self.assertRaises(RuntimeError, ncu.maximum, a, a)
+ assert_raises(RuntimeError, ncu.maximum, a, a)
def test_array_with_context(self):
@@ -1591,10 +1593,10 @@ class TestSpecialMethods(TestCase):
a = A()
ncu.maximum(np.zeros(1), a)
- self.assertTrue(a.func is ncu.maximum)
+ assert_(a.func is ncu.maximum)
assert_equal(a.args[0], 0)
- self.assertTrue(a.args[1] is a)
- self.assertTrue(a.i == 1)
+ assert_(a.args[1] is a)
+ assert_(a.i == 1)
assert_equal(ncu.maximum(a, B()), 0)
assert_equal(ncu.maximum(a, C()), 0)
@@ -1755,15 +1757,18 @@ class TestSpecialMethods(TestCase):
'keepdims': 'keep0',
'axis': 'axis0'})
- # reduce, output equal to None removed.
- res = np.multiply.reduce(a, out=None)
- assert_equal(res[4], {})
- res = np.multiply.reduce(a, out=(None,))
- assert_equal(res[4], {})
+ # reduce, output equal to None removed, but not other explicit ones,
+ # even if they are at their default value.
+ res = np.multiply.reduce(a, 0, None, None, False)
+ assert_equal(res[4], {'axis': 0, 'dtype': None, 'keepdims': False})
+ res = np.multiply.reduce(a, out=None, axis=0, keepdims=True)
+ assert_equal(res[4], {'axis': 0, 'keepdims': True})
+ res = np.multiply.reduce(a, None, out=(None,), dtype=None)
+ assert_equal(res[4], {'axis': None, 'dtype': None})
# reduce, wrong args
- assert_raises(TypeError, np.multiply.reduce, a, out=())
- assert_raises(TypeError, np.multiply.reduce, a, out=('out0', 'out1'))
+ assert_raises(ValueError, np.multiply.reduce, a, out=())
+ assert_raises(ValueError, np.multiply.reduce, a, out=('out0', 'out1'))
assert_raises(TypeError, np.multiply.reduce, a, 'axis0', axis='axis0')
# accumulate, pos args
@@ -1788,14 +1793,16 @@ class TestSpecialMethods(TestCase):
'axis': 'axis0'})
# accumulate, output equal to None removed.
- res = np.multiply.accumulate(a, out=None)
- assert_equal(res[4], {})
- res = np.multiply.accumulate(a, out=(None,))
- assert_equal(res[4], {})
+ res = np.multiply.accumulate(a, 0, None, None)
+ assert_equal(res[4], {'axis': 0, 'dtype': None})
+ res = np.multiply.accumulate(a, out=None, axis=0, dtype='dtype1')
+ assert_equal(res[4], {'axis': 0, 'dtype': 'dtype1'})
+ res = np.multiply.accumulate(a, None, out=(None,), dtype=None)
+ assert_equal(res[4], {'axis': None, 'dtype': None})
# accumulate, wrong args
- assert_raises(TypeError, np.multiply.accumulate, a, out=())
- assert_raises(TypeError, np.multiply.accumulate, a,
+ assert_raises(ValueError, np.multiply.accumulate, a, out=())
+ assert_raises(ValueError, np.multiply.accumulate, a,
out=('out0', 'out1'))
assert_raises(TypeError, np.multiply.accumulate, a,
'axis0', axis='axis0')
@@ -1822,14 +1829,16 @@ class TestSpecialMethods(TestCase):
'axis': 'axis0'})
# reduceat, output equal to None removed.
- res = np.multiply.reduceat(a, [4, 2], out=None)
- assert_equal(res[4], {})
- res = np.multiply.reduceat(a, [4, 2], out=(None,))
- assert_equal(res[4], {})
+ res = np.multiply.reduceat(a, [4, 2], 0, None, None)
+ assert_equal(res[4], {'axis': 0, 'dtype': None})
+ res = np.multiply.reduceat(a, [4, 2], axis=None, out=None, dtype='dt')
+ assert_equal(res[4], {'axis': None, 'dtype': 'dt'})
+ res = np.multiply.reduceat(a, [4, 2], None, None, out=(None,))
+ assert_equal(res[4], {'axis': None, 'dtype': None})
# reduceat, wrong args
- assert_raises(TypeError, np.multiply.reduce, a, [4, 2], out=())
- assert_raises(TypeError, np.multiply.reduce, a, [4, 2],
+ assert_raises(ValueError, np.multiply.reduce, a, [4, 2], out=())
+ assert_raises(ValueError, np.multiply.reduce, a, [4, 2],
out=('out0', 'out1'))
assert_raises(TypeError, np.multiply.reduce, a, [4, 2],
'axis0', axis='axis0')
@@ -1907,12 +1916,12 @@ class TestSpecialMethods(TestCase):
# wrong number of arguments in the tuple is an error too.
assert_raises(TypeError, np.multiply, a, b, 'one', out='two')
assert_raises(TypeError, np.multiply, a, b, 'one', 'two')
- assert_raises(TypeError, np.multiply, a, b, out=('one', 'two'))
- assert_raises(TypeError, np.multiply, a, out=())
+ assert_raises(ValueError, np.multiply, a, b, out=('one', 'two'))
+ assert_raises(ValueError, np.multiply, a, out=())
assert_raises(TypeError, np.modf, a, 'one', out=('two', 'three'))
assert_raises(TypeError, np.modf, a, 'one', 'two', 'three')
- assert_raises(TypeError, np.modf, a, out=('one', 'two', 'three'))
- assert_raises(TypeError, np.modf, a, out=('one',))
+ assert_raises(ValueError, np.modf, a, out=('one', 'two', 'three'))
+ assert_raises(ValueError, np.modf, a, out=('one',))
def test_ufunc_override_exception(self):
@@ -1931,14 +1940,14 @@ class TestSpecialMethods(TestCase):
def __array_ufunc__(self, *args, **kwargs):
return NotImplemented
- msg = ("operand type(s) do not implement __array_ufunc__("
- "<ufunc 'negative'>, '__call__', <*>): 'A'")
+ msg = ("operand type(s) all returned NotImplemented from "
+ "__array_ufunc__(<ufunc 'negative'>, '__call__', <*>): 'A'")
with assert_raises_regex(TypeError, fnmatch.translate(msg)):
np.negative(A())
- msg = ("operand type(s) do not implement __array_ufunc__("
- "<ufunc 'add'>, '__call__', <*>, <object *>, out=(1,)): "
- "'A', 'object', 'int'")
+ msg = ("operand type(s) all returned NotImplemented from "
+ "__array_ufunc__(<ufunc 'add'>, '__call__', <*>, <object *>, "
+ "out=(1,)): 'A', 'object', 'int'")
with assert_raises_regex(TypeError, fnmatch.translate(msg)):
np.add(A(), object(), out=1)
@@ -1999,11 +2008,12 @@ class TestSpecialMethods(TestCase):
assert_raises(TypeError, inner1d, a, out='two')
assert_raises(TypeError, inner1d, a, a, 'one', out='two')
assert_raises(TypeError, inner1d, a, a, 'one', 'two')
- assert_raises(TypeError, inner1d, a, a, out=('one', 'two'))
- assert_raises(TypeError, inner1d, a, a, out=())
+ assert_raises(ValueError, inner1d, a, a, out=('one', 'two'))
+ assert_raises(ValueError, inner1d, a, a, out=())
def test_ufunc_override_with_super(self):
-
+ # NOTE: this class is given as an example in doc/subclassing.py;
+ # if you make any changes here, do update it there too.
class A(np.ndarray):
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
args = []
@@ -2041,6 +2051,8 @@ class TestSpecialMethods(TestCase):
return NotImplemented
if method == 'at':
+ if isinstance(inputs[0], A):
+ inputs[0].info = info
return
if ufunc.nout == 1:
@@ -2107,9 +2119,73 @@ class TestSpecialMethods(TestCase):
assert_(a.__array_ufunc__(np.add, '__call__', a, b) is NotImplemented)
assert_(b.__array_ufunc__(np.add, '__call__', a, b) == "A!")
assert_(np.add(a, b) == "A!")
-
-
-class TestChoose(TestCase):
+ # regression check for gh-9102 -- tests ufunc.reduce implicitly.
+ d = np.array([[1, 2, 3], [1, 2, 3]])
+ a = d.view(A)
+ c = a.any()
+ check = d.any()
+ assert_equal(c, check)
+ assert_(c.info, {'inputs': [0]})
+ c = a.max()
+ check = d.max()
+ assert_equal(c, check)
+ assert_(c.info, {'inputs': [0]})
+ b = np.array(0).view(A)
+ c = a.max(out=b)
+ assert_equal(c, check)
+ assert_(c is b)
+ assert_(c.info, {'inputs': [0], 'outputs': [0]})
+ check = a.max(axis=0)
+ b = np.zeros_like(check).view(A)
+ c = a.max(axis=0, out=b)
+ assert_equal(c, check)
+ assert_(c is b)
+ assert_(c.info, {'inputs': [0], 'outputs': [0]})
+ # simple explicit tests of reduce, accumulate, reduceat
+ check = np.add.reduce(d, axis=1)
+ c = np.add.reduce(a, axis=1)
+ assert_equal(c, check)
+ assert_(c.info, {'inputs': [0]})
+ b = np.zeros_like(c)
+ c = np.add.reduce(a, 1, None, b)
+ assert_equal(c, check)
+ assert_(c is b)
+ assert_(c.info, {'inputs': [0], 'outputs': [0]})
+ check = np.add.accumulate(d, axis=0)
+ c = np.add.accumulate(a, axis=0)
+ assert_equal(c, check)
+ assert_(c.info, {'inputs': [0]})
+ b = np.zeros_like(c)
+ c = np.add.accumulate(a, 0, None, b)
+ assert_equal(c, check)
+ assert_(c is b)
+ assert_(c.info, {'inputs': [0], 'outputs': [0]})
+ indices = [0, 2, 1]
+ check = np.add.reduceat(d, indices, axis=1)
+ c = np.add.reduceat(a, indices, axis=1)
+ assert_equal(c, check)
+ assert_(c.info, {'inputs': [0]})
+ b = np.zeros_like(c)
+ c = np.add.reduceat(a, indices, 1, None, b)
+ assert_equal(c, check)
+ assert_(c is b)
+ assert_(c.info, {'inputs': [0], 'outputs': [0]})
+ # and a few tests for at
+ d = np.array([[1, 2, 3], [1, 2, 3]])
+ check = d.copy()
+ a = d.copy().view(A)
+ np.add.at(check, ([0, 1], [0, 2]), 1.)
+ np.add.at(a, ([0, 1], [0, 2]), 1.)
+ assert_equal(a, check)
+ assert_(a.info, {'inputs': [0]})
+ b = np.array(1.).view(A)
+ a = d.copy().view(A)
+ np.add.at(a, ([0, 1], [0, 2]), b)
+ assert_equal(a, check)
+ assert_(a.info, {'inputs': [0, 2]})
+
+
+class TestChoose(object):
def test_mixed(self):
c = np.array([True, True])
a = np.array([True, True])
@@ -2134,7 +2210,7 @@ class TestComplexFunctions(object):
else:
x = .5
fr = f(x)
- fz = f(np.complex(x))
+ fz = f(complex(x))
assert_almost_equal(fz.real, fr, err_msg='real part %s' % f)
assert_almost_equal(fz.imag, 0., err_msg='imag part %s' % f)
@@ -2203,7 +2279,7 @@ class TestComplexFunctions(object):
points = [-1-1j, -1+1j, +1-1j, +1+1j]
name_map = {'arcsin': 'asin', 'arccos': 'acos', 'arctan': 'atan',
'arcsinh': 'asinh', 'arccosh': 'acosh', 'arctanh': 'atanh'}
- atol = 4*np.finfo(np.complex).eps
+ atol = 4*np.finfo(complex).eps
for func in self.funcs:
fname = func.__name__.split('.')[-1]
cname = name_map.get(fname, fname)
@@ -2310,12 +2386,12 @@ class TestComplexFunctions(object):
self.check_loss_of_precision(np.longcomplex)
-class TestAttributes(TestCase):
+class TestAttributes(object):
def test_attributes(self):
add = ncu.add
assert_equal(add.__name__, 'add')
- self.assertTrue(add.ntypes >= 18) # don't fail if types added
- self.assertTrue('ii->i' in add.types)
+ assert_(add.ntypes >= 18) # don't fail if types added
+ assert_('ii->i' in add.types)
assert_equal(add.nin, 2)
assert_equal(add.nout, 1)
assert_equal(add.identity, 0)
@@ -2329,7 +2405,7 @@ class TestAttributes(TestCase):
"frexp(x[, out1, out2], / [, out=(None, None)], *, where=True"))
-class TestSubclass(TestCase):
+class TestSubclass(object):
def test_subclass_op(self):
@@ -2343,7 +2419,7 @@ class TestSubclass(TestCase):
assert_equal(a+a, a)
def _check_branch_cut(f, x0, dx, re_sign=1, im_sign=-1, sig_zero_ok=False,
- dtype=np.complex):
+ dtype=complex):
"""
Check for a branch cut in a function.
diff --git a/numpy/core/tests/test_umath_complex.py b/numpy/core/tests/test_umath_complex.py
index 536ad398a..fb3b6577c 100644
--- a/numpy/core/tests/test_umath_complex.py
+++ b/numpy/core/tests/test_umath_complex.py
@@ -6,7 +6,7 @@ import platform
import numpy as np
import numpy.core.umath as ncu
from numpy.testing import (
- TestCase, run_module_suite, assert_equal, assert_array_equal,
+ run_module_suite, assert_raises, assert_equal, assert_array_equal,
assert_almost_equal, dec
)
@@ -38,7 +38,7 @@ class TestCexp(object):
yield check, f, 1, 0, np.exp(1), 0, False
yield check, f, 0, 1, np.cos(1), np.sin(1), False
- ref = np.exp(1) * np.complex(np.cos(1), np.sin(1))
+ ref = np.exp(1) * complex(np.cos(1), np.sin(1))
yield check, f, 1, 1, ref.real, ref.imag, False
@platform_skip
@@ -73,7 +73,7 @@ class TestCexp(object):
def _check_ninf_inf(dummy):
msgform = "cexp(-inf, inf) is (%f, %f), expected (+-0, +-0)"
with np.errstate(invalid='ignore'):
- z = f(np.array(np.complex(-np.inf, np.inf)))
+ z = f(np.array(complex(-np.inf, np.inf)))
if z.real != 0 or z.imag != 0:
raise AssertionError(msgform % (z.real, z.imag))
@@ -83,7 +83,7 @@ class TestCexp(object):
def _check_inf_inf(dummy):
msgform = "cexp(inf, inf) is (%f, %f), expected (+-inf, nan)"
with np.errstate(invalid='ignore'):
- z = f(np.array(np.complex(np.inf, np.inf)))
+ z = f(np.array(complex(np.inf, np.inf)))
if not np.isinf(z.real) or not np.isnan(z.imag):
raise AssertionError(msgform % (z.real, z.imag))
@@ -93,7 +93,7 @@ class TestCexp(object):
def _check_ninf_nan(dummy):
msgform = "cexp(-inf, nan) is (%f, %f), expected (+-0, +-0)"
with np.errstate(invalid='ignore'):
- z = f(np.array(np.complex(-np.inf, np.nan)))
+ z = f(np.array(complex(-np.inf, np.nan)))
if z.real != 0 or z.imag != 0:
raise AssertionError(msgform % (z.real, z.imag))
@@ -103,7 +103,7 @@ class TestCexp(object):
def _check_inf_nan(dummy):
msgform = "cexp(-inf, nan) is (%f, %f), expected (+-inf, nan)"
with np.errstate(invalid='ignore'):
- z = f(np.array(np.complex(np.inf, np.nan)))
+ z = f(np.array(complex(np.inf, np.nan)))
if not np.isinf(z.real) or not np.isnan(z.imag):
raise AssertionError(msgform % (z.real, z.imag))
@@ -129,7 +129,7 @@ class TestCexp(object):
yield check, f, np.nan, 0, np.nan, 0
-class TestClog(TestCase):
+class TestClog(object):
def test_simple(self):
x = np.array([1+0j, 1+2j])
y_r = np.log(np.abs(x)) + 1j * np.angle(x)
@@ -150,9 +150,9 @@ class TestClog(TestCase):
# clog(-0 + i0) returns -inf + i pi and raises the 'divide-by-zero'
# floating-point exception.
with np.errstate(divide='raise'):
- x = np.array([np.NZERO], dtype=np.complex)
- y = np.complex(-np.inf, np.pi)
- self.assertRaises(FloatingPointError, np.log, x)
+ x = np.array([np.NZERO], dtype=complex)
+ y = complex(-np.inf, np.pi)
+ assert_raises(FloatingPointError, np.log, x)
with np.errstate(divide='ignore'):
assert_almost_equal(np.log(x), y)
@@ -162,9 +162,9 @@ class TestClog(TestCase):
# clog(+0 + i0) returns -inf + i0 and raises the 'divide-by-zero'
# floating-point exception.
with np.errstate(divide='raise'):
- x = np.array([0], dtype=np.complex)
- y = np.complex(-np.inf, 0)
- self.assertRaises(FloatingPointError, np.log, x)
+ x = np.array([0], dtype=complex)
+ y = complex(-np.inf, 0)
+ assert_raises(FloatingPointError, np.log, x)
with np.errstate(divide='ignore'):
assert_almost_equal(np.log(x), y)
@@ -172,13 +172,13 @@ class TestClog(TestCase):
yl.append(y)
# clog(x + i inf returns +inf + i pi /2, for finite x.
- x = np.array([complex(1, np.inf)], dtype=np.complex)
- y = np.complex(np.inf, 0.5 * np.pi)
+ x = np.array([complex(1, np.inf)], dtype=complex)
+ y = complex(np.inf, 0.5 * np.pi)
assert_almost_equal(np.log(x), y)
xl.append(x)
yl.append(y)
- x = np.array([complex(-1, np.inf)], dtype=np.complex)
+ x = np.array([complex(-1, np.inf)], dtype=complex)
assert_almost_equal(np.log(x), y)
xl.append(x)
yl.append(y)
@@ -186,9 +186,9 @@ class TestClog(TestCase):
# clog(x + iNaN) returns NaN + iNaN and optionally raises the
# 'invalid' floating- point exception, for finite x.
with np.errstate(invalid='raise'):
- x = np.array([complex(1., np.nan)], dtype=np.complex)
- y = np.complex(np.nan, np.nan)
- #self.assertRaises(FloatingPointError, np.log, x)
+ x = np.array([complex(1., np.nan)], dtype=complex)
+ y = complex(np.nan, np.nan)
+ #assert_raises(FloatingPointError, np.log, x)
with np.errstate(invalid='ignore'):
assert_almost_equal(np.log(x), y)
@@ -196,8 +196,8 @@ class TestClog(TestCase):
yl.append(y)
with np.errstate(invalid='raise'):
- x = np.array([np.inf + 1j * np.nan], dtype=np.complex)
- #self.assertRaises(FloatingPointError, np.log, x)
+ x = np.array([np.inf + 1j * np.nan], dtype=complex)
+ #assert_raises(FloatingPointError, np.log, x)
with np.errstate(invalid='ignore'):
assert_almost_equal(np.log(x), y)
@@ -205,70 +205,70 @@ class TestClog(TestCase):
yl.append(y)
# clog(- inf + iy) returns +inf + ipi , for finite positive-signed y.
- x = np.array([-np.inf + 1j], dtype=np.complex)
- y = np.complex(np.inf, np.pi)
+ x = np.array([-np.inf + 1j], dtype=complex)
+ y = complex(np.inf, np.pi)
assert_almost_equal(np.log(x), y)
xl.append(x)
yl.append(y)
# clog(+ inf + iy) returns +inf + i0, for finite positive-signed y.
- x = np.array([np.inf + 1j], dtype=np.complex)
- y = np.complex(np.inf, 0)
+ x = np.array([np.inf + 1j], dtype=complex)
+ y = complex(np.inf, 0)
assert_almost_equal(np.log(x), y)
xl.append(x)
yl.append(y)
# clog(- inf + i inf) returns +inf + i3pi /4.
- x = np.array([complex(-np.inf, np.inf)], dtype=np.complex)
- y = np.complex(np.inf, 0.75 * np.pi)
+ x = np.array([complex(-np.inf, np.inf)], dtype=complex)
+ y = complex(np.inf, 0.75 * np.pi)
assert_almost_equal(np.log(x), y)
xl.append(x)
yl.append(y)
# clog(+ inf + i inf) returns +inf + ipi /4.
- x = np.array([complex(np.inf, np.inf)], dtype=np.complex)
- y = np.complex(np.inf, 0.25 * np.pi)
+ x = np.array([complex(np.inf, np.inf)], dtype=complex)
+ y = complex(np.inf, 0.25 * np.pi)
assert_almost_equal(np.log(x), y)
xl.append(x)
yl.append(y)
# clog(+/- inf + iNaN) returns +inf + iNaN.
- x = np.array([complex(np.inf, np.nan)], dtype=np.complex)
- y = np.complex(np.inf, np.nan)
+ x = np.array([complex(np.inf, np.nan)], dtype=complex)
+ y = complex(np.inf, np.nan)
assert_almost_equal(np.log(x), y)
xl.append(x)
yl.append(y)
- x = np.array([complex(-np.inf, np.nan)], dtype=np.complex)
+ x = np.array([complex(-np.inf, np.nan)], dtype=complex)
assert_almost_equal(np.log(x), y)
xl.append(x)
yl.append(y)
# clog(NaN + iy) returns NaN + iNaN and optionally raises the
# 'invalid' floating-point exception, for finite y.
- x = np.array([complex(np.nan, 1)], dtype=np.complex)
- y = np.complex(np.nan, np.nan)
+ x = np.array([complex(np.nan, 1)], dtype=complex)
+ y = complex(np.nan, np.nan)
assert_almost_equal(np.log(x), y)
xl.append(x)
yl.append(y)
# clog(NaN + i inf) returns +inf + iNaN.
- x = np.array([complex(np.nan, np.inf)], dtype=np.complex)
- y = np.complex(np.inf, np.nan)
+ x = np.array([complex(np.nan, np.inf)], dtype=complex)
+ y = complex(np.inf, np.nan)
assert_almost_equal(np.log(x), y)
xl.append(x)
yl.append(y)
# clog(NaN + iNaN) returns NaN + iNaN.
- x = np.array([complex(np.nan, np.nan)], dtype=np.complex)
- y = np.complex(np.nan, np.nan)
+ x = np.array([complex(np.nan, np.nan)], dtype=complex)
+ y = complex(np.nan, np.nan)
assert_almost_equal(np.log(x), y)
xl.append(x)
yl.append(y)
# clog(conj(z)) = conj(clog(z)).
- xa = np.array(xl, dtype=np.complex)
- ya = np.array(yl, dtype=np.complex)
+ xa = np.array(xl, dtype=complex)
+ ya = np.array(yl, dtype=complex)
with np.errstate(divide='ignore'):
for i in range(len(xa)):
assert_almost_equal(np.log(xa[i].conj()), ya[i].conj())
@@ -286,7 +286,7 @@ class TestCsqrt(object):
yield check_complex_value, np.sqrt, -1, 0, 0, 1
def test_simple_conjugate(self):
- ref = np.conj(np.sqrt(np.complex(1, 1)))
+ ref = np.conj(np.sqrt(complex(1, 1)))
def f(z):
return np.sqrt(np.conj(z))
@@ -330,7 +330,7 @@ class TestCsqrt(object):
# csqrt(-inf + nani) is nan +- infi (both +i infi are valid)
def _check_ninf_nan(dummy):
msgform = "csqrt(-inf, nan) is (%f, %f), expected (nan, +-inf)"
- z = np.sqrt(np.array(np.complex(-np.inf, np.nan)))
+ z = np.sqrt(np.array(complex(-np.inf, np.nan)))
#Fixme: ugly workaround for isinf bug.
with np.errstate(invalid='ignore'):
if not (np.isnan(z.real) and np.isinf(z.imag)):
@@ -350,7 +350,7 @@ class TestCsqrt(object):
# XXX: check for conj(csqrt(z)) == csqrt(conj(z)) (need to fix branch
# cuts first)
-class TestCpow(TestCase):
+class TestCpow(object):
def setUp(self):
self.olderr = np.seterr(invalid='ignore')
@@ -406,16 +406,16 @@ class TestCabs(object):
def test_fabs(self):
# Test that np.abs(x +- 0j) == np.abs(x) (as mandated by C99 for cabs)
- x = np.array([1+0j], dtype=np.complex)
+ x = np.array([1+0j], dtype=complex)
assert_array_equal(np.abs(x), np.real(x))
- x = np.array([complex(1, np.NZERO)], dtype=np.complex)
+ x = np.array([complex(1, np.NZERO)], dtype=complex)
assert_array_equal(np.abs(x), np.real(x))
- x = np.array([complex(np.inf, np.NZERO)], dtype=np.complex)
+ x = np.array([complex(np.inf, np.NZERO)], dtype=complex)
assert_array_equal(np.abs(x), np.real(x))
- x = np.array([complex(np.nan, np.NZERO)], dtype=np.complex)
+ x = np.array([complex(np.nan, np.NZERO)], dtype=complex)
assert_array_equal(np.abs(x), np.real(x))
def test_cabs_inf_nan(self):
@@ -445,9 +445,9 @@ class TestCabs(object):
return np.abs(np.conj(a))
def g(a, b):
- return np.abs(np.complex(a, b))
+ return np.abs(complex(a, b))
- xa = np.array(x, dtype=np.complex)
+ xa = np.array(x, dtype=complex)
for i in range(len(xa)):
ref = g(x[i], y[i])
yield check_real_value, f, x[i], y[i], ref
@@ -527,7 +527,7 @@ def check_real_value(f, x1, y1, x, exact=True):
def check_complex_value(f, x1, y1, x2, y2, exact=True):
z1 = np.array([complex(x1, y1)])
- z2 = np.complex(x2, y2)
+ z2 = complex(x2, y2)
with np.errstate(invalid='ignore'):
if exact:
assert_equal(f(z1), z2)
diff --git a/numpy/core/tests/test_unicode.py b/numpy/core/tests/test_unicode.py
index ae2beb2a6..8c502ca44 100644
--- a/numpy/core/tests/test_unicode.py
+++ b/numpy/core/tests/test_unicode.py
@@ -5,7 +5,7 @@ import sys
import numpy as np
from numpy.compat import unicode
from numpy.testing import (
- TestCase, run_module_suite, assert_, assert_equal, assert_array_equal)
+ run_module_suite, assert_, assert_equal, assert_array_equal)
# Guess the UCS length for this python interpreter
if sys.version_info[:2] >= (3, 3):
@@ -68,24 +68,24 @@ def test_string_cast():
# Creation tests
############################################################
-class create_zeros(object):
+class CreateZeros(object):
"""Check the creation of zero-valued arrays"""
def content_check(self, ua, ua_scalar, nbytes):
# Check the length of the unicode base type
- self.assertTrue(int(ua.dtype.str[2:]) == self.ulen)
+ assert_(int(ua.dtype.str[2:]) == self.ulen)
# Check the length of the data buffer
- self.assertTrue(buffer_length(ua) == nbytes)
+ assert_(buffer_length(ua) == nbytes)
# Small check that data in array element is ok
- self.assertTrue(ua_scalar == u'')
+ assert_(ua_scalar == u'')
# Encode to ascii and double check
- self.assertTrue(ua_scalar.encode('ascii') == b'')
+ assert_(ua_scalar.encode('ascii') == b'')
# Check buffer lengths for scalars
if ucs4:
- self.assertTrue(buffer_length(ua_scalar) == 0)
+ assert_(buffer_length(ua_scalar) == 0)
else:
- self.assertTrue(buffer_length(ua_scalar) == 0)
+ assert_(buffer_length(ua_scalar) == 0)
def test_zeros0D(self):
# Check creation of 0-dimensional objects
@@ -105,47 +105,47 @@ class create_zeros(object):
self.content_check(ua, ua[-1, -1, -1], 4*self.ulen*2*3*4)
-class test_create_zeros_1(create_zeros, TestCase):
+class TestCreateZeros_1(CreateZeros):
"""Check the creation of zero-valued arrays (size 1)"""
ulen = 1
-class test_create_zeros_2(create_zeros, TestCase):
+class TestCreateZeros_2(CreateZeros):
"""Check the creation of zero-valued arrays (size 2)"""
ulen = 2
-class test_create_zeros_1009(create_zeros, TestCase):
+class TestCreateZeros_1009(CreateZeros):
"""Check the creation of zero-valued arrays (size 1009)"""
ulen = 1009
-class create_values(object):
+class CreateValues(object):
"""Check the creation of unicode arrays with values"""
def content_check(self, ua, ua_scalar, nbytes):
# Check the length of the unicode base type
- self.assertTrue(int(ua.dtype.str[2:]) == self.ulen)
+ assert_(int(ua.dtype.str[2:]) == self.ulen)
# Check the length of the data buffer
- self.assertTrue(buffer_length(ua) == nbytes)
+ assert_(buffer_length(ua) == nbytes)
# Small check that data in array element is ok
- self.assertTrue(ua_scalar == self.ucs_value*self.ulen)
+ assert_(ua_scalar == self.ucs_value*self.ulen)
# Encode to UTF-8 and double check
- self.assertTrue(ua_scalar.encode('utf-8') ==
+ assert_(ua_scalar.encode('utf-8') ==
(self.ucs_value*self.ulen).encode('utf-8'))
# Check buffer lengths for scalars
if ucs4:
- self.assertTrue(buffer_length(ua_scalar) == 4*self.ulen)
+ assert_(buffer_length(ua_scalar) == 4*self.ulen)
else:
if self.ucs_value == ucs4_value:
# In UCS2, the \U0010FFFF will be represented using a
# surrogate *pair*
- self.assertTrue(buffer_length(ua_scalar) == 2*2*self.ulen)
+ assert_(buffer_length(ua_scalar) == 2*2*self.ulen)
else:
# In UCS2, the \uFFFF will be represented using a
# regular 2-byte word
- self.assertTrue(buffer_length(ua_scalar) == 2*self.ulen)
+ assert_(buffer_length(ua_scalar) == 2*self.ulen)
def test_values0D(self):
# Check creation of 0-dimensional objects with values
@@ -165,37 +165,37 @@ class create_values(object):
self.content_check(ua, ua[-1, -1, -1], 4*self.ulen*2*3*4)
-class test_create_values_1_ucs2(create_values, TestCase):
+class TestCreateValues_1_UCS2(CreateValues):
"""Check the creation of valued arrays (size 1, UCS2 values)"""
ulen = 1
ucs_value = ucs2_value
-class test_create_values_1_ucs4(create_values, TestCase):
+class TestCreateValues_1_UCS4(CreateValues):
"""Check the creation of valued arrays (size 1, UCS4 values)"""
ulen = 1
ucs_value = ucs4_value
-class test_create_values_2_ucs2(create_values, TestCase):
+class TestCreateValues_2_UCS2(CreateValues):
"""Check the creation of valued arrays (size 2, UCS2 values)"""
ulen = 2
ucs_value = ucs2_value
-class test_create_values_2_ucs4(create_values, TestCase):
+class TestCreateValues_2_UCS4(CreateValues):
"""Check the creation of valued arrays (size 2, UCS4 values)"""
ulen = 2
ucs_value = ucs4_value
-class test_create_values_1009_ucs2(create_values, TestCase):
+class TestCreateValues_1009_UCS2(CreateValues):
"""Check the creation of valued arrays (size 1009, UCS2 values)"""
ulen = 1009
ucs_value = ucs2_value
-class test_create_values_1009_ucs4(create_values, TestCase):
+class TestCreateValues_1009_UCS4(CreateValues):
"""Check the creation of valued arrays (size 1009, UCS4 values)"""
ulen = 1009
ucs_value = ucs4_value
@@ -205,32 +205,32 @@ class test_create_values_1009_ucs4(create_values, TestCase):
# Assignment tests
############################################################
-class assign_values(object):
+class AssignValues(object):
"""Check the assignment of unicode arrays with values"""
def content_check(self, ua, ua_scalar, nbytes):
# Check the length of the unicode base type
- self.assertTrue(int(ua.dtype.str[2:]) == self.ulen)
+ assert_(int(ua.dtype.str[2:]) == self.ulen)
# Check the length of the data buffer
- self.assertTrue(buffer_length(ua) == nbytes)
+ assert_(buffer_length(ua) == nbytes)
# Small check that data in array element is ok
- self.assertTrue(ua_scalar == self.ucs_value*self.ulen)
+ assert_(ua_scalar == self.ucs_value*self.ulen)
# Encode to UTF-8 and double check
- self.assertTrue(ua_scalar.encode('utf-8') ==
+ assert_(ua_scalar.encode('utf-8') ==
(self.ucs_value*self.ulen).encode('utf-8'))
# Check buffer lengths for scalars
if ucs4:
- self.assertTrue(buffer_length(ua_scalar) == 4*self.ulen)
+ assert_(buffer_length(ua_scalar) == 4*self.ulen)
else:
if self.ucs_value == ucs4_value:
# In UCS2, the \U0010FFFF will be represented using a
# surrogate *pair*
- self.assertTrue(buffer_length(ua_scalar) == 2*2*self.ulen)
+ assert_(buffer_length(ua_scalar) == 2*2*self.ulen)
else:
# In UCS2, the \uFFFF will be represented using a
# regular 2-byte word
- self.assertTrue(buffer_length(ua_scalar) == 2*self.ulen)
+ assert_(buffer_length(ua_scalar) == 2*self.ulen)
def test_values0D(self):
# Check assignment of 0-dimensional objects with values
@@ -255,37 +255,37 @@ class assign_values(object):
self.content_check(ua, ua[-1, -1, -1], 4*self.ulen*2*3*4)
-class test_assign_values_1_ucs2(assign_values, TestCase):
+class TestAssignValues_1_UCS2(AssignValues):
"""Check the assignment of valued arrays (size 1, UCS2 values)"""
ulen = 1
ucs_value = ucs2_value
-class test_assign_values_1_ucs4(assign_values, TestCase):
+class TestAssignValues_1_UCS4(AssignValues):
"""Check the assignment of valued arrays (size 1, UCS4 values)"""
ulen = 1
ucs_value = ucs4_value
-class test_assign_values_2_ucs2(assign_values, TestCase):
+class TestAssignValues_2_UCS2(AssignValues):
"""Check the assignment of valued arrays (size 2, UCS2 values)"""
ulen = 2
ucs_value = ucs2_value
-class test_assign_values_2_ucs4(assign_values, TestCase):
+class TestAssignValues_2_UCS4(AssignValues):
"""Check the assignment of valued arrays (size 2, UCS4 values)"""
ulen = 2
ucs_value = ucs4_value
-class test_assign_values_1009_ucs2(assign_values, TestCase):
+class TestAssignValues_1009_UCS2(AssignValues):
"""Check the assignment of valued arrays (size 1009, UCS2 values)"""
ulen = 1009
ucs_value = ucs2_value
-class test_assign_values_1009_ucs4(assign_values, TestCase):
+class TestAssignValues_1009_UCS4(AssignValues):
"""Check the assignment of valued arrays (size 1009, UCS4 values)"""
ulen = 1009
ucs_value = ucs4_value
@@ -295,7 +295,7 @@ class test_assign_values_1009_ucs4(assign_values, TestCase):
# Byteorder tests
############################################################
-class byteorder_values:
+class ByteorderValues(object):
"""Check the byteorder of unicode arrays in round-trip conversions"""
def test_values0D(self):
@@ -305,7 +305,7 @@ class byteorder_values:
# This changes the interpretation of the data region (but not the
# actual data), therefore the returned scalars are not
# the same (they are byte-swapped versions of each other).
- self.assertTrue(ua[()] != ua2[()])
+ assert_(ua[()] != ua2[()])
ua3 = ua2.newbyteorder()
# Arrays must be equal after the round-trip
assert_equal(ua, ua3)
@@ -314,8 +314,8 @@ class byteorder_values:
# Check byteorder of single-dimensional objects
ua = np.array([self.ucs_value*self.ulen]*2, dtype='U%s' % self.ulen)
ua2 = ua.newbyteorder()
- self.assertTrue((ua != ua2).all())
- self.assertTrue(ua[-1] != ua2[-1])
+ assert_((ua != ua2).all())
+ assert_(ua[-1] != ua2[-1])
ua3 = ua2.newbyteorder()
# Arrays must be equal after the round-trip
assert_equal(ua, ua3)
@@ -325,8 +325,8 @@ class byteorder_values:
ua = np.array([[[self.ucs_value*self.ulen]*2]*3]*4,
dtype='U%s' % self.ulen)
ua2 = ua.newbyteorder()
- self.assertTrue((ua != ua2).all())
- self.assertTrue(ua[-1, -1, -1] != ua2[-1, -1, -1])
+ assert_((ua != ua2).all())
+ assert_(ua[-1, -1, -1] != ua2[-1, -1, -1])
ua3 = ua2.newbyteorder()
# Arrays must be equal after the round-trip
assert_equal(ua, ua3)
@@ -338,8 +338,8 @@ class byteorder_values:
test2 = np.repeat(test1, 2)[::2]
for ua in (test1, test2):
ua2 = ua.astype(dtype=ua.dtype.newbyteorder())
- self.assertTrue((ua == ua2).all())
- self.assertTrue(ua[-1] == ua2[-1])
+ assert_((ua == ua2).all())
+ assert_(ua[-1] == ua2[-1])
ua3 = ua2.astype(dtype=ua.dtype)
# Arrays must be equal after the round-trip
assert_equal(ua, ua3)
@@ -353,45 +353,45 @@ class byteorder_values:
# Cast to a longer type with zero padding
longer_type = np.dtype('U%s' % (self.ulen+1)).newbyteorder()
ua2 = ua.astype(dtype=longer_type)
- self.assertTrue((ua == ua2).all())
- self.assertTrue(ua[-1] == ua2[-1])
+ assert_((ua == ua2).all())
+ assert_(ua[-1] == ua2[-1])
# Cast back again with truncating:
ua3 = ua2.astype(dtype=ua.dtype)
# Arrays must be equal after the round-trip
assert_equal(ua, ua3)
-class test_byteorder_1_ucs2(byteorder_values, TestCase):
+class TestByteorder_1_UCS2(ByteorderValues):
"""Check the byteorder in unicode (size 1, UCS2 values)"""
ulen = 1
ucs_value = ucs2_value
-class test_byteorder_1_ucs4(byteorder_values, TestCase):
+class TestByteorder_1_UCS4(ByteorderValues):
"""Check the byteorder in unicode (size 1, UCS4 values)"""
ulen = 1
ucs_value = ucs4_value
-class test_byteorder_2_ucs2(byteorder_values, TestCase):
+class TestByteorder_2_UCS2(ByteorderValues):
"""Check the byteorder in unicode (size 2, UCS2 values)"""
ulen = 2
ucs_value = ucs2_value
-class test_byteorder_2_ucs4(byteorder_values, TestCase):
+class TestByteorder_2_UCS4(ByteorderValues):
"""Check the byteorder in unicode (size 2, UCS4 values)"""
ulen = 2
ucs_value = ucs4_value
-class test_byteorder_1009_ucs2(byteorder_values, TestCase):
+class TestByteorder_1009_UCS2(ByteorderValues):
"""Check the byteorder in unicode (size 1009, UCS2 values)"""
ulen = 1009
ucs_value = ucs2_value
-class test_byteorder_1009_ucs4(byteorder_values, TestCase):
+class TestByteorder_1009_UCS4(ByteorderValues):
"""Check the byteorder in unicode (size 1009, UCS4 values)"""
ulen = 1009
ucs_value = ucs4_value
diff --git a/numpy/ctypeslib.py b/numpy/ctypeslib.py
index 73328224e..77aace249 100644
--- a/numpy/ctypeslib.py
+++ b/numpy/ctypeslib.py
@@ -283,7 +283,7 @@ def ndpointer(dtype=None, ndim=None, shape=None, flags=None):
if num is None:
try:
flags = [x.strip().upper() for x in flags]
- except:
+ except Exception:
raise TypeError("invalid flags specification")
num = _num_fromflags(flags)
try:
diff --git a/numpy/distutils/ccompiler.py b/numpy/distutils/ccompiler.py
index e7557b3e6..bbc3923bd 100644
--- a/numpy/distutils/ccompiler.py
+++ b/numpy/distutils/ccompiler.py
@@ -80,6 +80,7 @@ def _needs_build(obj, cc_args, extra_postargs, pp_opts):
return False
+
def replace_method(klass, method_name, func):
if sys.version_info[0] < 3:
m = types.MethodType(func, None, klass)
@@ -88,6 +89,25 @@ def replace_method(klass, method_name, func):
m = lambda self, *args, **kw: func(self, *args, **kw)
setattr(klass, method_name, m)
+
+######################################################################
+## Method that subclasses may redefine. But don't call this method,
+## it i private to CCompiler class and may return unexpected
+## results if used elsewhere. So, you have been warned..
+
+def CCompiler_find_executables(self):
+ """
+ Does nothing here, but is called by the get_version method and can be
+ overridden by subclasses. In particular it is redefined in the `FCompiler`
+ class where more documentation can be found.
+
+ """
+ pass
+
+
+replace_method(CCompiler, 'find_executables', CCompiler_find_executables)
+
+
# Using customized CCompiler.spawn.
def CCompiler_spawn(self, cmd, display=None):
"""
@@ -417,7 +437,7 @@ def CCompiler_show_customization(self):
log.info("compiler '%s' is set to %s" % (attrname, attr))
try:
self.get_version()
- except:
+ except Exception:
pass
if log._global_log.threshold<2:
print('*'*80)
diff --git a/numpy/distutils/command/build_clib.py b/numpy/distutils/command/build_clib.py
index 1c868cf6c..910493a77 100644
--- a/numpy/distutils/command/build_clib.py
+++ b/numpy/distutils/command/build_clib.py
@@ -7,7 +7,7 @@ from glob import glob
import shutil
from distutils.command.build_clib import build_clib as old_build_clib
from distutils.errors import DistutilsSetupError, DistutilsError, \
- DistutilsFileError
+ DistutilsFileError
from numpy.distutils import log
from distutils.dep_util import newer_group
@@ -19,9 +19,10 @@ from numpy.distutils.misc_util import filter_sources, has_f_sources,\
_l = old_build_clib.user_options
for _i in range(len(_l)):
if _l[_i][0] in ['build-clib', 'build-temp']:
- _l[_i] = (_l[_i][0]+'=',)+_l[_i][1:]
+ _l[_i] = (_l[_i][0] + '=',) + _l[_i][1:]
#
+
class build_clib(old_build_clib):
description = "build C/C++/F libraries used by Python extensions"
@@ -32,7 +33,7 @@ class build_clib(old_build_clib):
('inplace', 'i', 'Build in-place'),
('parallel=', 'j',
"number of parallel jobs"),
- ]
+ ]
boolean_options = old_build_clib.boolean_options + ['inplace']
@@ -75,7 +76,8 @@ class build_clib(old_build_clib):
for (lib_name, build_info) in self.libraries:
l = build_info.get('language', None)
- if l and l not in languages: languages.append(l)
+ if l and l not in languages:
+ languages.append(l)
from distutils.ccompiler import new_compiler
self.compiler = new_compiler(compiler=self.compiler,
@@ -94,11 +96,11 @@ class build_clib(old_build_clib):
if self.have_f_sources():
from numpy.distutils.fcompiler import new_fcompiler
self._f_compiler = new_fcompiler(compiler=self.fcompiler,
- verbose=self.verbose,
- dry_run=self.dry_run,
- force=self.force,
- requiref90='f90' in languages,
- c_compiler=self.compiler)
+ verbose=self.verbose,
+ dry_run=self.dry_run,
+ force=self.force,
+ requiref90='f90' in languages,
+ c_compiler=self.compiler)
if self._f_compiler is not None:
self._f_compiler.customize(self.distribution)
@@ -114,10 +116,10 @@ class build_clib(old_build_clib):
self.build_libraries(self.libraries)
if self.inplace:
- for l in self.distribution.installed_libraries:
+ for l in self.distribution.installed_libraries:
libname = self.compiler.library_filename(l.name)
source = os.path.join(self.build_clib, libname)
- target = os.path.join(l.target_dir, libname)
+ target = os.path.join(l.target_dir, libname)
self.mkpath(l.target_dir)
shutil.copy(source, target)
@@ -140,21 +142,25 @@ class build_clib(old_build_clib):
sources = build_info.get('sources')
if sources is None or not is_sequence(sources):
raise DistutilsSetupError(("in 'libraries' option (library '%s'), " +
- "'sources' must be present and must be " +
- "a list of source filenames") % lib_name)
+ "'sources' must be present and must be " +
+ "a list of source filenames") % lib_name)
sources = list(sources)
c_sources, cxx_sources, f_sources, fmodule_sources \
- = filter_sources(sources)
+ = filter_sources(sources)
requiref90 = not not fmodule_sources or \
- build_info.get('language', 'c')=='f90'
+ build_info.get('language', 'c') == 'f90'
# save source type information so that build_ext can use it.
source_languages = []
- if c_sources: source_languages.append('c')
- if cxx_sources: source_languages.append('c++')
- if requiref90: source_languages.append('f90')
- elif f_sources: source_languages.append('f77')
+ if c_sources:
+ source_languages.append('c')
+ if cxx_sources:
+ source_languages.append('c++')
+ if requiref90:
+ source_languages.append('f90')
+ elif f_sources:
+ source_languages.append('f77')
build_info['source_languages'] = source_languages
lib_file = compiler.library_filename(lib_name,
@@ -168,8 +174,8 @@ class build_clib(old_build_clib):
config_fc = build_info.get('config_fc', {})
if fcompiler is not None and config_fc:
- log.info('using additional config_fc from setup script '\
- 'for fortran compiler: %s' \
+ log.info('using additional config_fc from setup script '
+ 'for fortran compiler: %s'
% (config_fc,))
from numpy.distutils.fcompiler import new_fcompiler
fcompiler = new_fcompiler(compiler=fcompiler.compiler_type,
@@ -186,12 +192,14 @@ class build_clib(old_build_clib):
# check availability of Fortran compilers
if (f_sources or fmodule_sources) and fcompiler is None:
- raise DistutilsError("library %s has Fortran sources"\
- " but no Fortran compiler found" % (lib_name))
+ raise DistutilsError("library %s has Fortran sources"
+ " but no Fortran compiler found" % (lib_name))
if fcompiler is not None:
- fcompiler.extra_f77_compile_args = build_info.get('extra_f77_compile_args') or []
- fcompiler.extra_f90_compile_args = build_info.get('extra_f90_compile_args') or []
+ fcompiler.extra_f77_compile_args = build_info.get(
+ 'extra_f77_compile_args') or []
+ fcompiler.extra_f90_compile_args = build_info.get(
+ 'extra_f90_compile_args') or []
macros = build_info.get('macros')
include_dirs = build_info.get('include_dirs')
@@ -203,9 +211,10 @@ class build_clib(old_build_clib):
# where compiled F90 module files are:
module_dirs = build_info.get('module_dirs') or []
module_build_dir = os.path.dirname(lib_file)
- if requiref90: self.mkpath(module_build_dir)
+ if requiref90:
+ self.mkpath(module_build_dir)
- if compiler.compiler_type=='msvc':
+ if compiler.compiler_type == 'msvc':
# this hack works around the msvc compiler attributes
# problem, msvc uses its own convention :(
c_sources += cxx_sources
@@ -239,7 +248,7 @@ class build_clib(old_build_clib):
if requiref90:
if fcompiler.module_dir_switch is None:
existing_modules = glob('*.mod')
- extra_postargs += fcompiler.module_options(\
+ extra_postargs += fcompiler.module_options(
module_dirs, module_build_dir)
if fmodule_sources:
@@ -257,14 +266,14 @@ class build_clib(old_build_clib):
if f in existing_modules:
continue
t = os.path.join(module_build_dir, f)
- if os.path.abspath(f)==os.path.abspath(t):
+ if os.path.abspath(f) == os.path.abspath(t):
continue
if os.path.isfile(t):
os.remove(t)
try:
self.move_file(f, module_build_dir)
except DistutilsFileError:
- log.warn('failed to move %r to %r' \
+ log.warn('failed to move %r to %r'
% (f, module_build_dir))
if f_sources:
@@ -278,13 +287,32 @@ class build_clib(old_build_clib):
else:
f_objects = []
- objects.extend(f_objects)
-
- # assume that default linker is suitable for
- # linking Fortran object files
- compiler.create_static_lib(objects, lib_name,
- output_dir=self.build_clib,
- debug=self.debug)
+ if f_objects and not fcompiler.can_ccompiler_link(compiler):
+ # Default linker cannot link Fortran object files, and results
+ # need to be wrapped later. Instead of creating a real static
+ # library, just keep track of the object files.
+ listfn = os.path.join(self.build_clib,
+ lib_name + '.fobjects')
+ with open(listfn, 'w') as f:
+ f.write("\n".join(os.path.abspath(obj) for obj in f_objects))
+
+ listfn = os.path.join(self.build_clib,
+ lib_name + '.cobjects')
+ with open(listfn, 'w') as f:
+ f.write("\n".join(os.path.abspath(obj) for obj in objects))
+
+ # create empty "library" file for dependency tracking
+ lib_fname = os.path.join(self.build_clib,
+ lib_name + compiler.static_lib_extension)
+ with open(lib_fname, 'wb') as f:
+ pass
+ else:
+ # assume that default linker is suitable for
+ # linking Fortran object files
+ objects.extend(f_objects)
+ compiler.create_static_lib(objects, lib_name,
+ output_dir=self.build_clib,
+ debug=self.debug)
# fix library dependencies
clib_libraries = build_info.get('libraries', [])
diff --git a/numpy/distutils/command/build_ext.py b/numpy/distutils/command/build_ext.py
index 0fa52a281..d935a3303 100644
--- a/numpy/distutils/command/build_ext.py
+++ b/numpy/distutils/command/build_ext.py
@@ -5,27 +5,25 @@ from __future__ import division, absolute_import, print_function
import os
import sys
+import shutil
from glob import glob
from distutils.dep_util import newer_group
from distutils.command.build_ext import build_ext as old_build_ext
from distutils.errors import DistutilsFileError, DistutilsSetupError,\
- DistutilsError
+ DistutilsError
from distutils.file_util import copy_file
from numpy.distutils import log
from numpy.distutils.exec_command import exec_command
-from numpy.distutils.system_info import combine_paths
+from numpy.distutils.system_info import combine_paths, system_info
from numpy.distutils.misc_util import filter_sources, has_f_sources, \
- has_cxx_sources, get_ext_source_files, \
- get_numpy_include_dirs, is_sequence, get_build_architecture, \
- msvc_version
+ has_cxx_sources, get_ext_source_files, \
+ get_numpy_include_dirs, is_sequence, get_build_architecture, \
+ msvc_version
from numpy.distutils.command.config_compiler import show_fortran_compilers
-try:
- set
-except NameError:
- from sets import Set as set
+
class build_ext (old_build_ext):
@@ -36,12 +34,12 @@ class build_ext (old_build_ext):
"specify the Fortran compiler type"),
('parallel=', 'j',
"number of parallel jobs"),
- ]
+ ]
help_options = old_build_ext.help_options + [
('help-fcompiler', None, "list available Fortran compilers",
show_fortran_compilers),
- ]
+ ]
def initialize_options(self):
old_build_ext.initialize_options(self)
@@ -84,11 +82,13 @@ class build_ext (old_build_ext):
if self.distribution.has_c_libraries():
if self.inplace:
if self.distribution.have_run.get('build_clib'):
- log.warn('build_clib already run, it is too late to ' \
- 'ensure in-place build of build_clib')
- build_clib = self.distribution.get_command_obj('build_clib')
+ log.warn('build_clib already run, it is too late to '
+ 'ensure in-place build of build_clib')
+ build_clib = self.distribution.get_command_obj(
+ 'build_clib')
else:
- build_clib = self.distribution.get_command_obj('build_clib')
+ build_clib = self.distribution.get_command_obj(
+ 'build_clib')
build_clib.inplace = 1
build_clib.ensure_finalized()
build_clib.run()
@@ -119,13 +119,18 @@ class build_ext (old_build_ext):
self.compiler.customize_cmd(self)
self.compiler.show_customization()
+ # Setup directory for storing generated extra DLL files on Windows
+ self.extra_dll_dir = os.path.join(self.build_temp, 'extra-dll')
+ if not os.path.isdir(self.extra_dll_dir):
+ os.makedirs(self.extra_dll_dir)
+
# Create mapping of libraries built by build_clib:
clibs = {}
if build_clib is not None:
for libname, build_info in build_clib.libraries or []:
if libname in clibs and clibs[libname] != build_info:
- log.warn('library %r defined more than once,'\
- ' overwriting build_info\n%s... \nwith\n%s...' \
+ log.warn('library %r defined more than once,'
+ ' overwriting build_info\n%s... \nwith\n%s...'
% (libname, repr(clibs[libname])[:300], repr(build_info)[:300]))
clibs[libname] = build_info
# .. and distribution libraries:
@@ -181,7 +186,7 @@ class build_ext (old_build_ext):
elif 'f77' in ext_languages:
ext_language = 'f77'
else:
- ext_language = 'c' # default
+ ext_language = 'c' # default
if l and l != ext_language and ext.language:
log.warn('resetting extension %r language from %r to %r.' %
(ext.name, l, ext_language))
@@ -196,9 +201,9 @@ class build_ext (old_build_ext):
# Initialize C++ compiler:
if need_cxx_compiler:
self._cxx_compiler = new_compiler(compiler=compiler_type,
- verbose=self.verbose,
- dry_run=self.dry_run,
- force=self.force)
+ verbose=self.verbose,
+ dry_run=self.dry_run,
+ force=self.force)
compiler = self._cxx_compiler
compiler.customize(self.distribution, need_cxx=need_cxx_compiler)
compiler.customize_cmd(self)
@@ -238,7 +243,7 @@ class build_ext (old_build_ext):
dry_run=self.dry_run,
force=self.force,
requiref90=True,
- c_compiler = self.compiler)
+ c_compiler=self.compiler)
fcompiler = self._f90_compiler
if fcompiler:
ctype = fcompiler.compiler_type
@@ -256,6 +261,16 @@ class build_ext (old_build_ext):
# Build extensions
self.build_extensions()
+ # Copy over any extra DLL files
+ runtime_lib_dir = os.path.join(
+ self.build_lib, self.distribution.get_name(), 'extra-dll')
+ for fn in os.listdir(self.extra_dll_dir):
+ if not fn.lower().endswith('.dll'):
+ continue
+ if not os.path.isdir(runtime_lib_dir):
+ os.makedirs(runtime_lib_dir)
+ runtime_lib = os.path.join(self.extra_dll_dir, fn)
+ copy_file(runtime_lib, runtime_lib_dir)
def swig_sources(self, sources):
# Do nothing. Swig sources have beed handled in build_src command.
@@ -299,11 +314,9 @@ class build_ext (old_build_ext):
macros.append((undef,))
c_sources, cxx_sources, f_sources, fmodule_sources = \
- filter_sources(ext.sources)
+ filter_sources(ext.sources)
-
-
- if self.compiler.compiler_type=='msvc':
+ if self.compiler.compiler_type == 'msvc':
if cxx_sources:
# Needed to compile kiva.agg._agg extension.
extra_args.append('/Zm1000')
@@ -313,32 +326,34 @@ class build_ext (old_build_ext):
cxx_sources = []
# Set Fortran/C++ compilers for compilation and linking.
- if ext.language=='f90':
+ if ext.language == 'f90':
fcompiler = self._f90_compiler
- elif ext.language=='f77':
+ elif ext.language == 'f77':
fcompiler = self._f77_compiler
- else: # in case ext.language is c++, for instance
+ else: # in case ext.language is c++, for instance
fcompiler = self._f90_compiler or self._f77_compiler
if fcompiler is not None:
- fcompiler.extra_f77_compile_args = (ext.extra_f77_compile_args or []) if hasattr(ext, 'extra_f77_compile_args') else []
- fcompiler.extra_f90_compile_args = (ext.extra_f90_compile_args or []) if hasattr(ext, 'extra_f90_compile_args') else []
+ fcompiler.extra_f77_compile_args = (ext.extra_f77_compile_args or []) if hasattr(
+ ext, 'extra_f77_compile_args') else []
+ fcompiler.extra_f90_compile_args = (ext.extra_f90_compile_args or []) if hasattr(
+ ext, 'extra_f90_compile_args') else []
cxx_compiler = self._cxx_compiler
# check for the availability of required compilers
if cxx_sources and cxx_compiler is None:
- raise DistutilsError("extension %r has C++ sources" \
- "but no C++ compiler found" % (ext.name))
+ raise DistutilsError("extension %r has C++ sources"
+ "but no C++ compiler found" % (ext.name))
if (f_sources or fmodule_sources) and fcompiler is None:
- raise DistutilsError("extension %r has Fortran sources " \
- "but no Fortran compiler found" % (ext.name))
+ raise DistutilsError("extension %r has Fortran sources "
+ "but no Fortran compiler found" % (ext.name))
if ext.language in ['f77', 'f90'] and fcompiler is None:
- self.warn("extension %r has Fortran libraries " \
- "but no Fortran linker found, using default linker" % (ext.name))
- if ext.language=='c++' and cxx_compiler is None:
- self.warn("extension %r has C++ libraries " \
- "but no C++ linker found, using default linker" % (ext.name))
+ self.warn("extension %r has Fortran libraries "
+ "but no Fortran linker found, using default linker" % (ext.name))
+ if ext.language == 'c++' and cxx_compiler is None:
+ self.warn("extension %r has C++ libraries "
+ "but no C++ linker found, using default linker" % (ext.name))
- kws = {'depends':ext.depends}
+ kws = {'depends': ext.depends}
output_dir = self.build_temp
include_dirs = ext.include_dirs + get_numpy_include_dirs()
@@ -391,7 +406,7 @@ class build_ext (old_build_ext):
if f in existing_modules:
continue
t = os.path.join(module_build_dir, f)
- if os.path.abspath(f)==os.path.abspath(t):
+ if os.path.abspath(f) == os.path.abspath(t):
continue
if os.path.isfile(t):
os.remove(t)
@@ -410,7 +425,12 @@ class build_ext (old_build_ext):
extra_postargs=extra_postargs,
depends=ext.depends)
- objects = c_objects + f_objects
+ if f_objects and not fcompiler.can_ccompiler_link(self.compiler):
+ unlinkable_fobjects = f_objects
+ objects = c_objects
+ else:
+ unlinkable_fobjects = []
+ objects = c_objects + f_objects
if ext.extra_objects:
objects.extend(ext.extra_objects)
@@ -423,13 +443,20 @@ class build_ext (old_build_ext):
if self.compiler.compiler_type in ('msvc', 'intelw', 'intelemw'):
# expand libraries with fcompiler libraries as we are
# not using fcompiler linker
- self._libs_with_msvc_and_fortran(fcompiler, libraries, library_dirs)
+ self._libs_with_msvc_and_fortran(
+ fcompiler, libraries, library_dirs)
elif ext.language in ['f77', 'f90'] and fcompiler is not None:
linker = fcompiler.link_shared_object
- if ext.language=='c++' and cxx_compiler is not None:
+ if ext.language == 'c++' and cxx_compiler is not None:
linker = cxx_compiler.link_shared_object
+ if fcompiler is not None:
+ objects, libraries = self._process_unlinkable_fobjects(
+ objects, libraries,
+ fcompiler, library_dirs,
+ unlinkable_fobjects)
+
linker(objects, ext_filename,
libraries=libraries,
library_dirs=library_dirs,
@@ -444,23 +471,59 @@ class build_ext (old_build_ext):
build_src = self.get_finalized_command("build_src").build_src
build_clib = self.get_finalized_command("build_clib").build_clib
objects = self.compiler.compile([os.path.join(build_src,
- "gfortran_vs2003_hack.c")],
- output_dir=self.build_temp)
- self.compiler.create_static_lib(objects, "_gfortran_workaround", output_dir=build_clib, debug=self.debug)
+ "gfortran_vs2003_hack.c")],
+ output_dir=self.build_temp)
+ self.compiler.create_static_lib(
+ objects, "_gfortran_workaround", output_dir=build_clib, debug=self.debug)
+
+ def _process_unlinkable_fobjects(self, objects, libraries,
+ fcompiler, library_dirs,
+ unlinkable_fobjects):
+ libraries = list(libraries)
+ objects = list(objects)
+ unlinkable_fobjects = list(unlinkable_fobjects)
+
+ # Expand possible fake static libraries to objects
+ for lib in list(libraries):
+ for libdir in library_dirs:
+ fake_lib = os.path.join(libdir, lib + '.fobjects')
+ if os.path.isfile(fake_lib):
+ # Replace fake static library
+ libraries.remove(lib)
+ with open(fake_lib, 'r') as f:
+ unlinkable_fobjects.extend(f.read().splitlines())
+
+ # Expand C objects
+ c_lib = os.path.join(libdir, lib + '.cobjects')
+ with open(c_lib, 'r') as f:
+ objects.extend(f.read().splitlines())
+
+ # Wrap unlinkable objects to a linkable one
+ if unlinkable_fobjects:
+ fobjects = [os.path.relpath(obj) for obj in unlinkable_fobjects]
+ wrapped = fcompiler.wrap_unlinkable_objects(
+ fobjects, output_dir=self.build_temp,
+ extra_dll_dir=self.extra_dll_dir)
+ objects.extend(wrapped)
+
+ return objects, libraries
def _libs_with_msvc_and_fortran(self, fcompiler, c_libraries,
c_library_dirs):
- if fcompiler is None: return
+ if fcompiler is None:
+ return
for libname in c_libraries:
- if libname.startswith('msvc'): continue
+ if libname.startswith('msvc'):
+ continue
fileexists = False
for libdir in c_library_dirs or []:
libfile = os.path.join(libdir, '%s.lib' % (libname))
if os.path.isfile(libfile):
fileexists = True
break
- if fileexists: continue
+ if fileexists:
+ continue
# make g77-compiled static libs available to MSVC
fileexists = False
for libdir in c_library_dirs:
@@ -474,7 +537,8 @@ class build_ext (old_build_ext):
c_library_dirs.append(self.build_temp)
fileexists = True
break
- if fileexists: continue
+ if fileexists:
+ continue
log.warn('could not find library %r in directories %s'
% (libname, c_library_dirs))
@@ -502,14 +566,14 @@ class build_ext (old_build_ext):
if self.build_temp not in c_library_dirs:
c_library_dirs.append(self.build_temp)
- def get_source_files (self):
+ def get_source_files(self):
self.check_extensions_list(self.extensions)
filenames = []
for ext in self.extensions:
filenames.extend(get_ext_source_files(ext))
return filenames
- def get_outputs (self):
+ def get_outputs(self):
self.check_extensions_list(self.extensions)
outputs = []
diff --git a/numpy/distutils/command/config.py b/numpy/distutils/command/config.py
index e43fb631b..a7368a7ae 100644
--- a/numpy/distutils/command/config.py
+++ b/numpy/distutils/command/config.py
@@ -441,7 +441,7 @@ int main (void)
src, obj, exe = self._link(body, headers, include_dirs,
libraries, library_dirs, lang)
grabber.restore()
- except:
+ except Exception:
output = grabber.data
grabber.restore()
raise
diff --git a/numpy/distutils/cpuinfo.py b/numpy/distutils/cpuinfo.py
index 652826376..580299347 100644
--- a/numpy/distutils/cpuinfo.py
+++ b/numpy/distutils/cpuinfo.py
@@ -35,7 +35,7 @@ def getoutput(cmd, successful_status=(0,), stacklevel=1):
except EnvironmentError:
e = get_exception()
warnings.warn(str(e), UserWarning, stacklevel=stacklevel)
- return False, output
+ return False, ""
if os.WIFEXITED(status) and os.WEXITSTATUS(status) in successful_status:
return True, output
return False, output
@@ -75,7 +75,7 @@ class CPUInfoBase(object):
def _try_call(self, func):
try:
return func()
- except:
+ except Exception:
pass
def __getattr__(self, name):
@@ -336,7 +336,7 @@ class IRIXCPUInfo(CPUInfoBase):
def get_ip(self):
try: return self.info.get('MACHINE')
- except: pass
+ except Exception: pass
def __machine(self, n):
return self.info.get('MACHINE').lower() == 'ip%s' % (n)
def _is_IP19(self): return self.__machine(19)
@@ -523,7 +523,7 @@ class Win32CPUInfo(CPUInfoBase):
info[-1]["Family"]=int(srch.group("FML"))
info[-1]["Model"]=int(srch.group("MDL"))
info[-1]["Stepping"]=int(srch.group("STP"))
- except:
+ except Exception:
print(sys.exc_info()[1], '(ignoring)')
self.__class__.info = info
diff --git a/numpy/distutils/fcompiler/__init__.py b/numpy/distutils/fcompiler/__init__.py
index 8e11019cf..1d558319d 100644
--- a/numpy/distutils/fcompiler/__init__.py
+++ b/numpy/distutils/fcompiler/__init__.py
@@ -22,10 +22,6 @@ import os
import sys
import re
import types
-try:
- set
-except NameError:
- from sets import Set as set
from numpy.compat import open_latin1
@@ -434,6 +430,7 @@ class FCompiler(CCompiler):
raise CompilerNotFound()
return version
+
############################################################
## Public methods:
@@ -701,6 +698,38 @@ class FCompiler(CCompiler):
else:
return hook_name()
+ def can_ccompiler_link(self, ccompiler):
+ """
+ Check if the given C compiler can link objects produced by
+ this compiler.
+ """
+ return True
+
+ def wrap_unlinkable_objects(self, objects, output_dir, extra_dll_dir):
+ """
+ Convert a set of object files that are not compatible with the default
+ linker, to a file that is compatible.
+
+ Parameters
+ ----------
+ objects : list
+ List of object files to include.
+ output_dir : str
+ Output directory to place generated object files.
+ extra_dll_dir : str
+ Output directory to place extra DLL files that need to be
+ included on Windows.
+
+ Returns
+ -------
+ converted_objects : list of str
+ List of converted object files.
+ Note that the number of output files is not necessarily
+ the same as inputs.
+
+ """
+ raise NotImplementedError()
+
## class FCompiler
_default_compilers = (
diff --git a/numpy/distutils/fcompiler/gnu.py b/numpy/distutils/fcompiler/gnu.py
index 4649fd743..10c60dc6f 100644
--- a/numpy/distutils/fcompiler/gnu.py
+++ b/numpy/distutils/fcompiler/gnu.py
@@ -6,37 +6,43 @@ import sys
import warnings
import platform
import tempfile
+import hashlib
+import base64
from subprocess import Popen, PIPE, STDOUT
-
+from copy import copy
from numpy.distutils.fcompiler import FCompiler
from numpy.distutils.exec_command import exec_command
-from numpy.distutils.misc_util import msvc_runtime_library
from numpy.distutils.compat import get_exception
+from numpy.distutils.system_info import system_info
compilers = ['GnuFCompiler', 'Gnu95FCompiler']
TARGET_R = re.compile(r"Target: ([a-zA-Z0-9_\-]*)")
# XXX: handle cross compilation
+
+
def is_win64():
return sys.platform == "win32" and platform.architecture()[0] == "64bit"
+
if is_win64():
#_EXTRAFLAGS = ["-fno-leading-underscore"]
_EXTRAFLAGS = []
else:
_EXTRAFLAGS = []
+
class GnuFCompiler(FCompiler):
compiler_type = 'gnu'
- compiler_aliases = ('g77',)
+ compiler_aliases = ('g77', )
description = 'GNU Fortran 77 compiler'
def gnu_version_match(self, version_string):
"""Handle the different versions of GNU fortran compilers"""
# Strip warning(s) that may be emitted by gfortran
while version_string.startswith('gfortran: warning'):
- version_string = version_string[version_string.find('\n')+1:]
+ version_string = version_string[version_string.find('\n') + 1:]
# Gfortran versions from after 2010 will output a simple string
# (usually "x.y", "x.y.z" or "x.y.z-q") for ``-dumpversion``; older
@@ -60,7 +66,8 @@ class GnuFCompiler(FCompiler):
m = re.search(r'GNU Fortran\s+95.*?([0-9-.]+)', version_string)
if m:
return ('gfortran', m.group(1))
- m = re.search(r'GNU Fortran.*?\-?([0-9-.]+)', version_string)
+ m = re.search(
+ r'GNU Fortran.*?\-?([0-9-.]+\.[0-9-.]+)', version_string)
if m:
v = m.group(1)
if v.startswith('0') or v.startswith('2') or v.startswith('3'):
@@ -91,7 +98,7 @@ class GnuFCompiler(FCompiler):
'archiver' : ["ar", "-cr"],
'ranlib' : ["ranlib"],
'linker_exe' : [None, "-g", "-Wall"]
- }
+ }
module_dir_switch = None
module_include_switch = None
@@ -129,8 +136,8 @@ class GnuFCompiler(FCompiler):
try:
get_makefile_filename = sc.get_makefile_filename
except AttributeError:
- pass # i.e. PyPy
- else:
+ pass # i.e. PyPy
+ else:
filename = get_makefile_filename()
sc.parse_makefile(filename, g)
target = g.get('MACOSX_DEPLOYMENT_TARGET', '10.3')
@@ -153,9 +160,8 @@ class GnuFCompiler(FCompiler):
return opt
def get_libgcc_dir(self):
- status, output = exec_command(self.compiler_f77 +
- ['-print-libgcc-file-name'],
- use_tee=0)
+ status, output = exec_command(
+ self.compiler_f77 + ['-print-libgcc-file-name'], use_tee=0)
if not status:
return os.path.dirname(output)
return None
@@ -170,7 +176,7 @@ class GnuFCompiler(FCompiler):
d = os.path.normpath(d)
path = os.path.join(d, "lib%s.a" % self.g2c)
if not os.path.exists(path):
- root = os.path.join(d, *((os.pardir,)*4))
+ root = os.path.join(d, *((os.pardir, ) * 4))
d2 = os.path.abspath(os.path.join(root, 'lib'))
path = os.path.join(d2, "lib%s.a" % self.g2c)
if os.path.exists(path):
@@ -193,13 +199,8 @@ class GnuFCompiler(FCompiler):
opt.append(g2c)
c_compiler = self.c_compiler
if sys.platform == 'win32' and c_compiler and \
- c_compiler.compiler_type == 'msvc':
- # the following code is not needed (read: breaks) when using MinGW
- # in case want to link F77 compiled code with MSVC
+ c_compiler.compiler_type == 'msvc':
opt.append('gcc')
- runtime_lib = msvc_runtime_library()
- if runtime_lib:
- opt.append(runtime_lib)
if sys.platform == 'darwin':
opt.append('cc_dynamic')
return opt
@@ -241,7 +242,7 @@ class GnuFCompiler(FCompiler):
class Gnu95FCompiler(GnuFCompiler):
compiler_type = 'gnu95'
- compiler_aliases = ('gfortran',)
+ compiler_aliases = ('gfortran', )
description = 'GNU Fortran 95 compiler'
def version_match(self, version_string):
@@ -256,8 +257,10 @@ class Gnu95FCompiler(GnuFCompiler):
# use -mno-cygwin flag for gfortran when Python is not
# Cygwin-Python
if sys.platform == 'win32':
- for key in ['version_cmd', 'compiler_f77', 'compiler_f90',
- 'compiler_fix', 'linker_so', 'linker_exe']:
+ for key in [
+ 'version_cmd', 'compiler_f77', 'compiler_f90',
+ 'compiler_fix', 'linker_so', 'linker_exe'
+ ]:
self.executables[key].append('-mno-cygwin')
return v
@@ -274,7 +277,7 @@ class Gnu95FCompiler(GnuFCompiler):
'archiver' : ["ar", "-cr"],
'ranlib' : ["ranlib"],
'linker_exe' : [None, "-Wall"]
- }
+ }
module_dir_switch = '-J'
module_include_switch = '-I'
@@ -319,7 +322,7 @@ class Gnu95FCompiler(GnuFCompiler):
target = self.get_target()
if target:
d = os.path.normpath(self.get_libgcc_dir())
- root = os.path.join(d, *((os.pardir,)*4))
+ root = os.path.join(d, *((os.pardir, ) * 4))
path = os.path.join(root, "lib")
mingwdir = os.path.normpath(path)
if os.path.exists(os.path.join(mingwdir, "libmingwex.a")):
@@ -335,32 +338,148 @@ class Gnu95FCompiler(GnuFCompiler):
if c_compiler and c_compiler.compiler_type == "msvc":
if "gcc" in opt:
i = opt.index("gcc")
- opt.insert(i+1, "mingwex")
- opt.insert(i+1, "mingw32")
- # XXX: fix this mess, does not work for mingw
- if is_win64():
- c_compiler = self.c_compiler
- if c_compiler and c_compiler.compiler_type == "msvc":
- return []
- else:
- pass
+ opt.insert(i + 1, "mingwex")
+ opt.insert(i + 1, "mingw32")
+ c_compiler = self.c_compiler
+ if c_compiler and c_compiler.compiler_type == "msvc":
+ return []
+ else:
+ pass
return opt
def get_target(self):
- status, output = exec_command(self.compiler_f77 +
- ['-v'],
- use_tee=0)
+ status, output = exec_command(self.compiler_f77 + ['-v'], use_tee=0)
if not status:
m = TARGET_R.search(output)
if m:
return m.group(1)
return ""
- def get_flags_opt(self):
+ def _hash_files(self, filenames):
+ h = hashlib.sha1()
+ for fn in filenames:
+ with open(fn, 'rb') as f:
+ while True:
+ block = f.read(131072)
+ if not block:
+ break
+ h.update(block)
+ text = base64.b32encode(h.digest())
+ if sys.version_info[0] >= 3:
+ text = text.decode('ascii')
+ return text.rstrip('=')
+
+ def _link_wrapper_lib(self, objects, output_dir, extra_dll_dir,
+ chained_dlls, is_archive):
+ """Create a wrapper shared library for the given objects
+
+ Return an MSVC-compatible lib
+ """
+
+ c_compiler = self.c_compiler
+ if c_compiler.compiler_type != "msvc":
+ raise ValueError("This method only supports MSVC")
+
+ object_hash = self._hash_files(list(objects) + list(chained_dlls))
+
+ if is_win64():
+ tag = 'win_amd64'
+ else:
+ tag = 'win32'
+
+ basename = 'lib' + os.path.splitext(
+ os.path.basename(objects[0]))[0][:8]
+ root_name = basename + '.' + object_hash + '.gfortran-' + tag
+ dll_name = root_name + '.dll'
+ def_name = root_name + '.def'
+ lib_name = root_name + '.lib'
+ dll_path = os.path.join(extra_dll_dir, dll_name)
+ def_path = os.path.join(output_dir, def_name)
+ lib_path = os.path.join(output_dir, lib_name)
+
+ if os.path.isfile(lib_path):
+ # Nothing to do
+ return lib_path, dll_path
+
+ if is_archive:
+ objects = (["-Wl,--whole-archive"] + list(objects) +
+ ["-Wl,--no-whole-archive"])
+ self.link_shared_object(
+ objects,
+ dll_name,
+ output_dir=extra_dll_dir,
+ extra_postargs=list(chained_dlls) + [
+ '-Wl,--allow-multiple-definition',
+ '-Wl,--output-def,' + def_path,
+ '-Wl,--export-all-symbols',
+ '-Wl,--enable-auto-import',
+ '-static',
+ '-mlong-double-64',
+ ])
+
+ # No PowerPC!
if is_win64():
- return ['-O0']
+ specifier = '/MACHINE:X64'
+ else:
+ specifier = '/MACHINE:X86'
+
+ # MSVC specific code
+ lib_args = ['/def:' + def_path, '/OUT:' + lib_path, specifier]
+ if not c_compiler.initialized:
+ c_compiler.initialize()
+ c_compiler.spawn([c_compiler.lib] + lib_args)
+
+ return lib_path, dll_path
+
+ def can_ccompiler_link(self, compiler):
+ # MSVC cannot link objects compiled by GNU fortran
+ return compiler.compiler_type not in ("msvc", )
+
+ def wrap_unlinkable_objects(self, objects, output_dir, extra_dll_dir):
+ """
+ Convert a set of object files that are not compatible with the default
+ linker, to a file that is compatible.
+ """
+ if self.c_compiler.compiler_type == "msvc":
+ # Compile a DLL and return the lib for the DLL as
+ # the object. Also keep track of previous DLLs that
+ # we have compiled so that we can link against them.
+
+ # If there are .a archives, assume they are self-contained
+ # static libraries, and build separate DLLs for each
+ archives = []
+ plain_objects = []
+ for obj in objects:
+ if obj.lower().endswith('.a'):
+ archives.append(obj)
+ else:
+ plain_objects.append(obj)
+
+ chained_libs = []
+ chained_dlls = []
+ for archive in archives[::-1]:
+ lib, dll = self._link_wrapper_lib(
+ [archive],
+ output_dir,
+ extra_dll_dir,
+ chained_dlls=chained_dlls,
+ is_archive=True)
+ chained_libs.insert(0, lib)
+ chained_dlls.insert(0, dll)
+
+ if not plain_objects:
+ return chained_libs
+
+ lib, dll = self._link_wrapper_lib(
+ plain_objects,
+ output_dir,
+ extra_dll_dir,
+ chained_dlls=chained_dlls,
+ is_archive=False)
+ return [lib] + chained_libs
else:
- return GnuFCompiler.get_flags_opt(self)
+ raise ValueError("Unsupported C compiler")
+
def _can_target(cmd, arch):
"""Return true if the architecture supports the -arch flag"""
@@ -382,6 +501,7 @@ def _can_target(cmd, arch):
os.remove(filename)
return False
+
if __name__ == '__main__':
from distutils import log
log.set_verbosity(2)
diff --git a/numpy/distutils/fcompiler/intel.py b/numpy/distutils/fcompiler/intel.py
index eb6150201..e3b922edc 100644
--- a/numpy/distutils/fcompiler/intel.py
+++ b/numpy/distutils/fcompiler/intel.py
@@ -57,8 +57,8 @@ class IntelFCompiler(BaseIntelFCompiler):
def get_flags_opt(self): # Scipy test failures with -O2
v = self.get_version()
- mpopt = 'openmp' if v and int(v.split('.')[0]) < 15 else 'qopenmp'
- return ['-xhost -fp-model strict -O1 -{}'.format(mpopt)]
+ mpopt = 'openmp' if v and v < '15' else 'qopenmp'
+ return ['-fp-model strict -O1 -{}'.format(mpopt)]
def get_flags_arch(self):
return []
@@ -123,7 +123,7 @@ class IntelEM64TFCompiler(IntelFCompiler):
def get_flags_opt(self): # Scipy test failures with -O2
v = self.get_version()
- mpopt = 'openmp' if v and int(v.split('.')[0]) < 15 else 'qopenmp'
+ mpopt = 'openmp' if v and v < '15' else 'qopenmp'
return ['-fp-model strict -O1 -{}'.format(mpopt)]
def get_flags_arch(self):
diff --git a/numpy/distutils/intelccompiler.py b/numpy/distutils/intelccompiler.py
index 3b7756b59..3386775ee 100644
--- a/numpy/distutils/intelccompiler.py
+++ b/numpy/distutils/intelccompiler.py
@@ -19,7 +19,7 @@ class IntelCCompiler(UnixCCompiler):
UnixCCompiler.__init__(self, verbose, dry_run, force)
v = self.get_version()
- mpopt = 'openmp' if v and int(v.split('.')[0]) < 15 else 'qopenmp'
+ mpopt = 'openmp' if v and v < '15' else 'qopenmp'
self.cc_exe = ('icc -fPIC -fp-model strict -O3 '
'-fomit-frame-pointer -{}').format(mpopt)
compiler = self.cc_exe
@@ -59,7 +59,7 @@ class IntelEM64TCCompiler(UnixCCompiler):
UnixCCompiler.__init__(self, verbose, dry_run, force)
v = self.get_version()
- mpopt = 'openmp' if v and int(v.split('.')[0]) < 15 else 'qopenmp'
+ mpopt = 'openmp' if v and v < '15' else 'qopenmp'
self.cc_exe = ('icc -m64 -fPIC -fp-model strict -O3 '
'-fomit-frame-pointer -{}').format(mpopt)
compiler = self.cc_exe
diff --git a/numpy/distutils/mingw32ccompiler.py b/numpy/distutils/mingw32ccompiler.py
index 870df0693..90b4def04 100644
--- a/numpy/distutils/mingw32ccompiler.py
+++ b/numpy/distutils/mingw32ccompiler.py
@@ -251,18 +251,21 @@ def find_python_dll():
# We can't do much here:
# - find it in the virtualenv (sys.prefix)
# - find it in python main dir (sys.base_prefix, if in a virtualenv)
+ # - sys.real_prefix is main dir for virtualenvs in Python 2.7
# - in system32,
# - ortherwise (Sxs), I don't know how to get it.
stems = [sys.prefix]
- if sys.base_prefix != sys.prefix:
+ if hasattr(sys, 'base_prefix') and sys.base_prefix != sys.prefix:
stems.append(sys.base_prefix)
+ elif hasattr(sys, 'real_prefix') and sys.real_prefix != sys.prefix:
+ stems.append(sys.real_prefix)
sub_dirs = ['', 'lib', 'bin']
# generate possible combinations of directory trees and sub-directories
lib_dirs = []
for stem in stems:
for folder in sub_dirs:
- lib_dirs = os.path.join(stem, folder)
+ lib_dirs.append(os.path.join(stem, folder))
# add system directory as well
if 'SYSTEMROOT' in os.environ:
@@ -426,8 +429,10 @@ def _check_for_import_lib():
# directory trees that may contain the library
stems = [sys.prefix]
- if sys.base_prefix != sys.prefix:
+ if hasattr(sys, 'base_prefix') and sys.base_prefix != sys.prefix:
stems.append(sys.base_prefix)
+ elif hasattr(sys, 'real_prefix') and sys.real_prefix != sys.prefix:
+ stems.append(sys.real_prefix)
# possible subdirectories within those trees where it is placed
sub_dirs = ['libs', 'lib']
@@ -481,8 +486,15 @@ def _build_import_library_x86():
lib_file = os.path.join(sys.prefix, 'libs', lib_name)
if not os.path.isfile(lib_file):
# didn't find library file in virtualenv, try base distribution, too,
- # and use that instead if found there
- base_lib = os.path.join(sys.base_prefix, 'libs', lib_name)
+ # and use that instead if found there. for Python 2.7 venvs, the base
+ # directory is in attribute real_prefix instead of base_prefix.
+ if hasattr(sys, 'base_prefix'):
+ base_lib = os.path.join(sys.base_prefix, 'libs', lib_name)
+ elif hasattr(sys, 'real_prefix'):
+ base_lib = os.path.join(sys.real_prefix, 'libs', lib_name)
+ else:
+ base_lib = '' # os.path.isfile('') == False
+
if os.path.isfile(base_lib):
lib_file = base_lib
else:
diff --git a/numpy/distutils/misc_util.py b/numpy/distutils/misc_util.py
index 21aaece70..102af874f 100644
--- a/numpy/distutils/misc_util.py
+++ b/numpy/distutils/misc_util.py
@@ -33,11 +33,6 @@ def clean_up_temporary_directory():
atexit.register(clean_up_temporary_directory)
-try:
- set
-except NameError:
- from sets import Set as set
-
from numpy.distutils.compat import get_exception
from numpy.compat import basestring
from numpy.compat import npy_load_module
@@ -461,7 +456,7 @@ def is_sequence(seq):
return False
try:
len(seq)
- except:
+ except Exception:
return False
return True
@@ -1064,24 +1059,25 @@ class Configuration(object):
Notes
-----
- Rules for installation paths:
- foo/bar -> (foo/bar, foo/bar) -> parent/foo/bar
- (gun, foo/bar) -> parent/gun
- foo/* -> (foo/a, foo/a), (foo/b, foo/b) -> parent/foo/a, parent/foo/b
- (gun, foo/*) -> (gun, foo/a), (gun, foo/b) -> gun
- (gun/*, foo/*) -> parent/gun/a, parent/gun/b
- /foo/bar -> (bar, /foo/bar) -> parent/bar
- (gun, /foo/bar) -> parent/gun
- (fun/*/gun/*, sun/foo/bar) -> parent/fun/foo/gun/bar
+ Rules for installation paths::
+
+ foo/bar -> (foo/bar, foo/bar) -> parent/foo/bar
+ (gun, foo/bar) -> parent/gun
+ foo/* -> (foo/a, foo/a), (foo/b, foo/b) -> parent/foo/a, parent/foo/b
+ (gun, foo/*) -> (gun, foo/a), (gun, foo/b) -> gun
+ (gun/*, foo/*) -> parent/gun/a, parent/gun/b
+ /foo/bar -> (bar, /foo/bar) -> parent/bar
+ (gun, /foo/bar) -> parent/gun
+ (fun/*/gun/*, sun/foo/bar) -> parent/fun/foo/gun/bar
Examples
--------
For example suppose the source directory contains fun/foo.dat and
- fun/bar/car.dat::
+ fun/bar/car.dat:
- >>> self.add_data_dir('fun') #doctest: +SKIP
- >>> self.add_data_dir(('sun', 'fun')) #doctest: +SKIP
- >>> self.add_data_dir(('gun', '/full/path/to/fun'))#doctest: +SKIP
+ >>> self.add_data_dir('fun') #doctest: +SKIP
+ >>> self.add_data_dir(('sun', 'fun')) #doctest: +SKIP
+ >>> self.add_data_dir(('gun', '/full/path/to/fun'))#doctest: +SKIP
Will install data-files to the locations::
@@ -1097,6 +1093,7 @@ class Configuration(object):
gun/
foo.dat
car.dat
+
"""
if is_sequence(data_path):
d, data_path = data_path
@@ -1836,7 +1833,7 @@ class Configuration(object):
close_fds=True)
sout = p.stdout
m = re.match(r'(?P<revision>\d+)', sout.read())
- except:
+ except Exception:
pass
os.chdir(cwd)
if m:
@@ -1873,7 +1870,7 @@ class Configuration(object):
close_fds=True)
sout = p.stdout
m = re.match(r'(?P<revision>\d+)', sout.read())
- except:
+ except Exception:
pass
os.chdir(cwd)
if m:
@@ -2068,7 +2065,6 @@ class Configuration(object):
"""
self.py_modules.append((self.name, name, generate_config_py))
-
def get_info(self,*names):
"""Get resources information.
@@ -2282,9 +2278,18 @@ def generate_config_py(target):
from distutils.dir_util import mkpath
mkpath(os.path.dirname(target))
f = open(target, 'w')
- f.write('# This file is generated by %s\n' % (os.path.abspath(sys.argv[0])))
+ f.write('# This file is generated by numpy\'s %s\n' % (os.path.basename(sys.argv[0])))
f.write('# It contains system_info results at the time of building this package.\n')
f.write('__all__ = ["get_info","show"]\n\n')
+
+ # For gfortran+msvc combination, extra shared libraries may exist
+ f.write("""
+import os
+extra_dll_dir = os.path.join(os.path.dirname(__file__), 'extra-dll')
+if os.path.isdir(extra_dll_dir):
+ os.environ["PATH"] += os.pathsep + extra_dll_dir
+""")
+
for k, i in system_info.saved_results.items():
f.write('%s=%r\n' % (k, i))
f.write(r'''
diff --git a/numpy/distutils/msvc9compiler.py b/numpy/distutils/msvc9compiler.py
index 8d0c92ed3..e9cc334a5 100644
--- a/numpy/distutils/msvc9compiler.py
+++ b/numpy/distutils/msvc9compiler.py
@@ -11,15 +11,15 @@ def _merge(old, new):
Here `old` is the environment string before the base class initialize
function is called and `new` is the string after the call. The new string
- will be a fixed string if it is not obtained from the current enviroment,
- or the same as the old string if obtained from the same enviroment. The aim
+ will be a fixed string if it is not obtained from the current environment,
+ or the same as the old string if obtained from the same environment. The aim
here is not to append the new string if it is already contained in the old
string so as to limit the growth of the environment string.
Parameters
----------
old : string
- Previous enviroment string.
+ Previous environment string.
new : string
New environment string.
@@ -29,10 +29,10 @@ def _merge(old, new):
Updated environment string.
"""
- if new in old:
- return old
if not old:
return new
+ if new in old:
+ return old
# Neither new nor old is empty. Give old priority.
return ';'.join([old, new])
diff --git a/numpy/distutils/system_info.py b/numpy/distutils/system_info.py
index 0fba86589..683b15daa 100644
--- a/numpy/distutils/system_info.py
+++ b/numpy/distutils/system_info.py
@@ -126,6 +126,7 @@ import os
import re
import copy
import warnings
+import atexit
from glob import glob
from functools import reduce
if sys.version_info[0] < 3:
@@ -684,9 +685,14 @@ class system_info(object):
return self.get_libs(key, '')
def library_extensions(self):
- static_exts = ['.a']
+ c = distutils.ccompiler.new_compiler()
+ c.customize('')
+ static_exts = []
+ if c.compiler_type != 'msvc':
+ # MSVC doesn't understand binutils
+ static_exts.append('.a')
if sys.platform == 'win32':
- static_exts.append('.lib') # .lib is used by MSVC
+ static_exts.append('.lib') # .lib is used by MSVC and others
if self.search_static_first:
exts = static_exts + [so_ext]
else:
@@ -1739,12 +1745,29 @@ class openblas_info(blas_info):
return True
def calc_info(self):
+ c = distutils.ccompiler.new_compiler()
+ c.customize('')
+
lib_dirs = self.get_lib_dirs()
openblas_libs = self.get_libs('libraries', self._lib_names)
if openblas_libs == self._lib_names: # backward compat with 1.8.0
openblas_libs = self.get_libs('openblas_libs', self._lib_names)
+
info = self.check_libs(lib_dirs, openblas_libs, [])
+
+ if c.compiler_type == "msvc" and info is None:
+ from numpy.distutils.fcompiler import new_fcompiler
+ f = new_fcompiler(c_compiler=c)
+ if f and f.compiler_type == 'gnu95':
+ # Try gfortran-compatible library files
+ info = self.check_msvc_gfortran_libs(lib_dirs, openblas_libs)
+ # Skip lapack check, we'd need build_ext to do it
+ assume_lapack = True
+ elif info:
+ assume_lapack = False
+ info['language'] = 'c'
+
if info is None:
return
@@ -1752,13 +1775,42 @@ class openblas_info(blas_info):
extra_info = self.calc_extra_info()
dict_append(info, **extra_info)
- if not self.check_embedded_lapack(info):
+ if not (assume_lapack or self.check_embedded_lapack(info)):
return
- info['language'] = 'c'
info['define_macros'] = [('HAVE_CBLAS', None)]
self.set_info(**info)
+ def check_msvc_gfortran_libs(self, library_dirs, libraries):
+ # First, find the full path to each library directory
+ library_paths = []
+ for library in libraries:
+ for library_dir in library_dirs:
+ # MinGW static ext will be .a
+ fullpath = os.path.join(library_dir, library + '.a')
+ if os.path.isfile(fullpath):
+ library_paths.append(fullpath)
+ break
+ else:
+ return None
+
+ # Generate numpy.distutils virtual static library file
+ tmpdir = os.path.join(os.getcwd(), 'build', 'openblas')
+ if not os.path.isdir(tmpdir):
+ os.makedirs(tmpdir)
+
+ info = {'library_dirs': [tmpdir],
+ 'libraries': ['openblas'],
+ 'language': 'f77'}
+
+ fake_lib_file = os.path.join(tmpdir, 'openblas.fobjects')
+ fake_clib_file = os.path.join(tmpdir, 'openblas.cobjects')
+ with open(fake_lib_file, 'w') as f:
+ f.write("\n".join(library_paths))
+ with open(fake_clib_file, 'w') as f:
+ pass
+
+ return info
class openblas_lapack_info(openblas_info):
section = 'openblas'
@@ -1770,6 +1822,7 @@ class openblas_lapack_info(openblas_info):
res = False
c = distutils.ccompiler.new_compiler()
c.customize('')
+
tmpdir = tempfile.mkdtemp()
s = """void zungqr();
int main(int argc, const char *argv[])
@@ -1782,8 +1835,10 @@ class openblas_lapack_info(openblas_info):
# Add the additional "extra" arguments
try:
extra_args = info['extra_link_args']
- except:
+ except Exception:
extra_args = []
+ if sys.version_info < (3, 5) and sys.version_info > (3, 0) and c.compiler_type == "msvc":
+ extra_args.append("/MANIFEST")
try:
with open(src, 'wt') as f:
f.write(s)
diff --git a/numpy/distutils/tests/__init__.py b/numpy/distutils/tests/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/numpy/distutils/tests/__init__.py
diff --git a/numpy/distutils/tests/test_exec_command.py b/numpy/distutils/tests/test_exec_command.py
index eccc47124..5e7b3f3e8 100644
--- a/numpy/distutils/tests/test_exec_command.py
+++ b/numpy/distutils/tests/test_exec_command.py
@@ -6,7 +6,7 @@ from tempfile import TemporaryFile
from numpy.distutils import exec_command
from numpy.distutils.exec_command import get_pythonexe
-from numpy.testing import TestCase, run_module_suite, tempdir
+from numpy.testing import run_module_suite, tempdir, assert_
# In python 3 stdout, stderr are text (unicode compliant) devices, so to
# emulate them import StringIO from the io module.
@@ -94,94 +94,94 @@ def test_exec_command_stderr():
exec_command.exec_command("cd '.'")
-class TestExecCommand(TestCase):
- def setUp(self):
+class TestExecCommand(object):
+ def setup(self):
self.pyexe = get_pythonexe()
def check_nt(self, **kws):
- s, o = exec_command.exec_command('echo path=%path%')
- self.assertEqual(s, 0)
- self.assertNotEqual(o, '')
+ s, o = exec_command.exec_command('cmd /C echo path=%path%')
+ assert_(s == 0)
+ assert_(o != '')
s, o = exec_command.exec_command(
'"%s" -c "import sys;sys.stderr.write(sys.platform)"' % self.pyexe)
- self.assertEqual(s, 0)
- self.assertEqual(o, 'win32')
+ assert_(s == 0)
+ assert_(o == 'win32')
def check_posix(self, **kws):
s, o = exec_command.exec_command("echo Hello", **kws)
- self.assertEqual(s, 0)
- self.assertEqual(o, 'Hello')
+ assert_(s == 0)
+ assert_(o == 'Hello')
s, o = exec_command.exec_command('echo $AAA', **kws)
- self.assertEqual(s, 0)
- self.assertEqual(o, '')
+ assert_(s == 0)
+ assert_(o == '')
s, o = exec_command.exec_command('echo "$AAA"', AAA='Tere', **kws)
- self.assertEqual(s, 0)
- self.assertEqual(o, 'Tere')
+ assert_(s == 0)
+ assert_(o == 'Tere')
s, o = exec_command.exec_command('echo "$AAA"', **kws)
- self.assertEqual(s, 0)
- self.assertEqual(o, '')
+ assert_(s == 0)
+ assert_(o == '')
if 'BBB' not in os.environ:
os.environ['BBB'] = 'Hi'
s, o = exec_command.exec_command('echo "$BBB"', **kws)
- self.assertEqual(s, 0)
- self.assertEqual(o, 'Hi')
+ assert_(s == 0)
+ assert_(o == 'Hi')
s, o = exec_command.exec_command('echo "$BBB"', BBB='Hey', **kws)
- self.assertEqual(s, 0)
- self.assertEqual(o, 'Hey')
+ assert_(s == 0)
+ assert_(o == 'Hey')
s, o = exec_command.exec_command('echo "$BBB"', **kws)
- self.assertEqual(s, 0)
- self.assertEqual(o, 'Hi')
+ assert_(s == 0)
+ assert_(o == 'Hi')
del os.environ['BBB']
s, o = exec_command.exec_command('echo "$BBB"', **kws)
- self.assertEqual(s, 0)
- self.assertEqual(o, '')
+ assert_(s == 0)
+ assert_(o == '')
s, o = exec_command.exec_command('this_is_not_a_command', **kws)
- self.assertNotEqual(s, 0)
- self.assertNotEqual(o, '')
+ assert_(s != 0)
+ assert_(o != '')
s, o = exec_command.exec_command('echo path=$PATH', **kws)
- self.assertEqual(s, 0)
- self.assertNotEqual(o, '')
+ assert_(s == 0)
+ assert_(o != '')
s, o = exec_command.exec_command(
'"%s" -c "import sys,os;sys.stderr.write(os.name)"' %
self.pyexe, **kws)
- self.assertEqual(s, 0)
- self.assertEqual(o, 'posix')
+ assert_(s == 0)
+ assert_(o == 'posix')
def check_basic(self, *kws):
s, o = exec_command.exec_command(
'"%s" -c "raise \'Ignore me.\'"' % self.pyexe, **kws)
- self.assertNotEqual(s, 0)
- self.assertNotEqual(o, '')
+ assert_(s != 0)
+ assert_(o != '')
s, o = exec_command.exec_command(
'"%s" -c "import sys;sys.stderr.write(\'0\');'
'sys.stderr.write(\'1\');sys.stderr.write(\'2\')"' %
self.pyexe, **kws)
- self.assertEqual(s, 0)
- self.assertEqual(o, '012')
+ assert_(s == 0)
+ assert_(o == '012')
s, o = exec_command.exec_command(
'"%s" -c "import sys;sys.exit(15)"' % self.pyexe, **kws)
- self.assertEqual(s, 15)
- self.assertEqual(o, '')
+ assert_(s == 15)
+ assert_(o == '')
s, o = exec_command.exec_command(
'"%s" -c "print(\'Heipa\'")' % self.pyexe, **kws)
- self.assertEqual(s, 0)
- self.assertEqual(o, 'Heipa')
+ assert_(s == 0)
+ assert_(o == 'Heipa')
def check_execute_in(self, **kws):
with tempdir() as tmpdir:
@@ -194,13 +194,13 @@ class TestExecCommand(TestCase):
s, o = exec_command.exec_command(
'"%s" -c "f = open(\'%s\', \'r\'); f.close()"' %
(self.pyexe, fn), **kws)
- self.assertNotEqual(s, 0)
- self.assertNotEqual(o, '')
+ assert_(s != 0)
+ assert_(o != '')
s, o = exec_command.exec_command(
'"%s" -c "f = open(\'%s\', \'r\'); print(f.read()); '
'f.close()"' % (self.pyexe, fn), execute_in=tmpdir, **kws)
- self.assertEqual(s, 0)
- self.assertEqual(o, 'Hello')
+ assert_(s == 0)
+ assert_(o == 'Hello')
def test_basic(self):
with redirect_stdout(StringIO()):
diff --git a/numpy/distutils/tests/test_fcompiler_gnu.py b/numpy/distutils/tests/test_fcompiler_gnu.py
index 7ca99db22..659520513 100644
--- a/numpy/distutils/tests/test_fcompiler_gnu.py
+++ b/numpy/distutils/tests/test_fcompiler_gnu.py
@@ -1,6 +1,6 @@
from __future__ import division, absolute_import, print_function
-from numpy.testing import TestCase, assert_, run_module_suite
+from numpy.testing import assert_, run_module_suite
import numpy.distutils.fcompiler
@@ -26,10 +26,11 @@ gfortran_version_strings = [
'4.9.1'),
("gfortran: warning: couldn't understand kern.osversion '14.1.0\n"
"gfortran: warning: yet another warning\n4.9.1",
- '4.9.1')
+ '4.9.1'),
+ ('GNU Fortran (crosstool-NG 8a21ab48) 7.2.0', '7.2.0')
]
-class TestG77Versions(TestCase):
+class TestG77Versions(object):
def test_g77_version(self):
fc = numpy.distutils.fcompiler.new_fcompiler(compiler='gnu')
for vs, version in g77_version_strings:
@@ -42,7 +43,7 @@ class TestG77Versions(TestCase):
v = fc.version_match(vs)
assert_(v is None, (vs, v))
-class TestGFortranVersions(TestCase):
+class TestGFortranVersions(object):
def test_gfortran_version(self):
fc = numpy.distutils.fcompiler.new_fcompiler(compiler='gnu95')
for vs, version in gfortran_version_strings:
diff --git a/numpy/distutils/tests/test_fcompiler_intel.py b/numpy/distutils/tests/test_fcompiler_intel.py
index 8e371b92b..b13a01788 100644
--- a/numpy/distutils/tests/test_fcompiler_intel.py
+++ b/numpy/distutils/tests/test_fcompiler_intel.py
@@ -1,7 +1,7 @@
from __future__ import division, absolute_import, print_function
import numpy.distutils.fcompiler
-from numpy.testing import TestCase, run_module_suite, assert_
+from numpy.testing import run_module_suite, assert_
intel_32bit_version_strings = [
@@ -16,7 +16,7 @@ intel_64bit_version_strings = [
"running on Intel(R) 64, Version 11.1", '11.1')
]
-class TestIntelFCompilerVersions(TestCase):
+class TestIntelFCompilerVersions(object):
def test_32bit_version(self):
fc = numpy.distutils.fcompiler.new_fcompiler(compiler='intel')
for vs, version in intel_32bit_version_strings:
@@ -24,7 +24,7 @@ class TestIntelFCompilerVersions(TestCase):
assert_(v == version)
-class TestIntelEM64TFCompilerVersions(TestCase):
+class TestIntelEM64TFCompilerVersions(object):
def test_64bit_version(self):
fc = numpy.distutils.fcompiler.new_fcompiler(compiler='intelem')
for vs, version in intel_64bit_version_strings:
diff --git a/numpy/distutils/tests/test_misc_util.py b/numpy/distutils/tests/test_misc_util.py
index f7fcbe224..dd4dbc842 100644
--- a/numpy/distutils/tests/test_misc_util.py
+++ b/numpy/distutils/tests/test_misc_util.py
@@ -6,12 +6,12 @@ from numpy.distutils.misc_util import (
appendpath, minrelpath, gpaths, get_shared_lib_extension, get_info
)
from numpy.testing import (
- TestCase, run_module_suite, assert_, assert_equal
+ run_module_suite, assert_, assert_equal
)
ajoin = lambda *paths: join(*((sep,)+paths))
-class TestAppendpath(TestCase):
+class TestAppendpath(object):
def test_1(self):
assert_equal(appendpath('prefix', 'name'), join('prefix', 'name'))
@@ -35,7 +35,7 @@ class TestAppendpath(TestCase):
assert_equal(appendpath('/prefix/sub/sub2', '/prefix/sub/sup/name'),
ajoin('prefix', 'sub', 'sub2', 'sup', 'name'))
-class TestMinrelpath(TestCase):
+class TestMinrelpath(object):
def test_1(self):
n = lambda path: path.replace('/', sep)
@@ -49,7 +49,7 @@ class TestMinrelpath(TestCase):
assert_equal(minrelpath(n('.././..')), n('../..'))
assert_equal(minrelpath(n('aa/bb/.././../dd')), n('dd'))
-class TestGpaths(TestCase):
+class TestGpaths(object):
def test_gpaths(self):
local_path = minrelpath(join(dirname(__file__), '..'))
@@ -58,7 +58,7 @@ class TestGpaths(TestCase):
f = gpaths('system_info.py', local_path)
assert_(join(local_path, 'system_info.py') == f[0], repr(f))
-class TestSharedExtension(TestCase):
+class TestSharedExtension(object):
def test_get_shared_lib_extension(self):
import sys
diff --git a/numpy/distutils/tests/test_npy_pkg_config.py b/numpy/distutils/tests/test_npy_pkg_config.py
index bdef47167..29891b63b 100644
--- a/numpy/distutils/tests/test_npy_pkg_config.py
+++ b/numpy/distutils/tests/test_npy_pkg_config.py
@@ -3,7 +3,7 @@ from __future__ import division, absolute_import, print_function
import os
from numpy.distutils.npy_pkg_config import read_config, parse_flags
-from numpy.testing import TestCase, run_module_suite, temppath
+from numpy.testing import run_module_suite, temppath, assert_
simple = """\
[meta]
@@ -36,7 +36,7 @@ libs = -L${libdir}
simple_variable_d = {'cflags': '-I/foo/bar/include', 'libflags': '-L/foo/bar/lib',
'version': '0.1', 'name': 'foo'}
-class TestLibraryInfo(TestCase):
+class TestLibraryInfo(object):
def test_simple(self):
with temppath('foo.ini') as path:
with open(path, 'w') as f:
@@ -44,10 +44,10 @@ class TestLibraryInfo(TestCase):
pkg = os.path.splitext(path)[0]
out = read_config(pkg)
- self.assertTrue(out.cflags() == simple_d['cflags'])
- self.assertTrue(out.libs() == simple_d['libflags'])
- self.assertTrue(out.name == simple_d['name'])
- self.assertTrue(out.version == simple_d['version'])
+ assert_(out.cflags() == simple_d['cflags'])
+ assert_(out.libs() == simple_d['libflags'])
+ assert_(out.name == simple_d['name'])
+ assert_(out.version == simple_d['version'])
def test_simple_variable(self):
with temppath('foo.ini') as path:
@@ -56,34 +56,34 @@ class TestLibraryInfo(TestCase):
pkg = os.path.splitext(path)[0]
out = read_config(pkg)
- self.assertTrue(out.cflags() == simple_variable_d['cflags'])
- self.assertTrue(out.libs() == simple_variable_d['libflags'])
- self.assertTrue(out.name == simple_variable_d['name'])
- self.assertTrue(out.version == simple_variable_d['version'])
+ assert_(out.cflags() == simple_variable_d['cflags'])
+ assert_(out.libs() == simple_variable_d['libflags'])
+ assert_(out.name == simple_variable_d['name'])
+ assert_(out.version == simple_variable_d['version'])
out.vars['prefix'] = '/Users/david'
- self.assertTrue(out.cflags() == '-I/Users/david/include')
+ assert_(out.cflags() == '-I/Users/david/include')
-class TestParseFlags(TestCase):
+class TestParseFlags(object):
def test_simple_cflags(self):
d = parse_flags("-I/usr/include")
- self.assertTrue(d['include_dirs'] == ['/usr/include'])
+ assert_(d['include_dirs'] == ['/usr/include'])
d = parse_flags("-I/usr/include -DFOO")
- self.assertTrue(d['include_dirs'] == ['/usr/include'])
- self.assertTrue(d['macros'] == ['FOO'])
+ assert_(d['include_dirs'] == ['/usr/include'])
+ assert_(d['macros'] == ['FOO'])
d = parse_flags("-I /usr/include -DFOO")
- self.assertTrue(d['include_dirs'] == ['/usr/include'])
- self.assertTrue(d['macros'] == ['FOO'])
+ assert_(d['include_dirs'] == ['/usr/include'])
+ assert_(d['macros'] == ['FOO'])
def test_simple_lflags(self):
d = parse_flags("-L/usr/lib -lfoo -L/usr/lib -lbar")
- self.assertTrue(d['library_dirs'] == ['/usr/lib', '/usr/lib'])
- self.assertTrue(d['libraries'] == ['foo', 'bar'])
+ assert_(d['library_dirs'] == ['/usr/lib', '/usr/lib'])
+ assert_(d['libraries'] == ['foo', 'bar'])
d = parse_flags("-L /usr/lib -lfoo -L/usr/lib -lbar")
- self.assertTrue(d['library_dirs'] == ['/usr/lib', '/usr/lib'])
- self.assertTrue(d['libraries'] == ['foo', 'bar'])
+ assert_(d['library_dirs'] == ['/usr/lib', '/usr/lib'])
+ assert_(d['libraries'] == ['foo', 'bar'])
if __name__ == '__main__':
diff --git a/numpy/distutils/tests/test_system_info.py b/numpy/distutils/tests/test_system_info.py
index 73b841692..026179d37 100644
--- a/numpy/distutils/tests/test_system_info.py
+++ b/numpy/distutils/tests/test_system_info.py
@@ -7,8 +7,9 @@ from subprocess import Popen, PIPE
from distutils.errors import DistutilsError
from numpy.distutils import ccompiler
-from numpy.testing import TestCase, run_module_suite, assert_, assert_equal
-from numpy.testing.decorators import skipif
+from numpy.testing import (
+ run_module_suite, assert_, assert_equal, dec
+ )
from numpy.distutils.system_info import system_info, ConfigParser
from numpy.distutils.system_info import default_lib_dirs, default_include_dirs
@@ -20,9 +21,9 @@ def get_class(name, notfound_action=1):
1 - display warning message
2 - raise error
"""
- cl = {'temp1': TestTemp1,
- 'temp2': TestTemp2
- }.get(name.lower(), test_system_info)
+ cl = {'temp1': Temp1Info,
+ 'temp2': Temp2Info
+ }.get(name.lower(), _system_info)
return cl()
simple_site = """
@@ -83,7 +84,7 @@ def have_compiler():
HAVE_COMPILER = have_compiler()
-class test_system_info(system_info):
+class _system_info(system_info):
def __init__(self,
default_lib_dirs=default_lib_dirs,
@@ -110,17 +111,19 @@ class test_system_info(system_info):
return info
-class TestTemp1(test_system_info):
+class Temp1Info(_system_info):
+ """For testing purposes"""
section = 'temp1'
-class TestTemp2(test_system_info):
+class Temp2Info(_system_info):
+ """For testing purposes"""
section = 'temp2'
-class TestSystemInfoReading(TestCase):
+class TestSystemInfoReading(object):
- def setUp(self):
+ def setup(self):
""" Create the libraries """
# Create 2 sources and 2 libraries
self._dir1 = mkdtemp()
@@ -162,15 +165,15 @@ class TestSystemInfoReading(TestCase):
# Do each removal separately
try:
shutil.rmtree(self._dir1)
- except:
+ except Exception:
pass
try:
shutil.rmtree(self._dir2)
- except:
+ except Exception:
pass
try:
os.remove(self._sitecfg)
- except:
+ except Exception:
pass
def test_all(self):
@@ -199,7 +202,7 @@ class TestSystemInfoReading(TestCase):
extra = tsi.calc_extra_info()
assert_equal(extra['extra_link_args'], ['-Wl,-rpath=' + self._lib2])
- @skipif(not HAVE_COMPILER)
+ @dec.skipif(not HAVE_COMPILER)
def test_compile1(self):
# Compile source and link the first source
c = ccompiler.new_compiler()
@@ -215,8 +218,8 @@ class TestSystemInfoReading(TestCase):
finally:
os.chdir(previousDir)
- @skipif(not HAVE_COMPILER)
- @skipif('msvc' in repr(ccompiler.new_compiler()))
+ @dec.skipif(not HAVE_COMPILER)
+ @dec.skipif('msvc' in repr(ccompiler.new_compiler()))
def test_compile2(self):
# Compile source and link the second source
tsi = self.c_temp2
diff --git a/numpy/doc/basics.py b/numpy/doc/basics.py
index dac236644..4d3ab046e 100644
--- a/numpy/doc/basics.py
+++ b/numpy/doc/basics.py
@@ -9,36 +9,36 @@ Array types and conversions between types
NumPy supports a much greater variety of numerical types than Python does.
This section shows which are available, and how to modify an array's data-type.
-========== ==========================================================
-Data type Description
-========== ==========================================================
-bool_ Boolean (True or False) stored as a byte
-int_ Default integer type (same as C ``long``; normally either
- ``int64`` or ``int32``)
-intc Identical to C ``int`` (normally ``int32`` or ``int64``)
-intp Integer used for indexing (same as C ``ssize_t``; normally
- either ``int32`` or ``int64``)
-int8 Byte (-128 to 127)
-int16 Integer (-32768 to 32767)
-int32 Integer (-2147483648 to 2147483647)
-int64 Integer (-9223372036854775808 to 9223372036854775807)
-uint8 Unsigned integer (0 to 255)
-uint16 Unsigned integer (0 to 65535)
-uint32 Unsigned integer (0 to 4294967295)
-uint64 Unsigned integer (0 to 18446744073709551615)
-float_ Shorthand for ``float64``.
-float16 Half precision float: sign bit, 5 bits exponent,
- 10 bits mantissa
-float32 Single precision float: sign bit, 8 bits exponent,
- 23 bits mantissa
-float64 Double precision float: sign bit, 11 bits exponent,
- 52 bits mantissa
-complex_ Shorthand for ``complex128``.
-complex64 Complex number, represented by two 32-bit floats (real
- and imaginary components)
-complex128 Complex number, represented by two 64-bit floats (real
- and imaginary components)
-========== ==========================================================
+============ ==========================================================
+Data type Description
+============ ==========================================================
+``bool_`` Boolean (True or False) stored as a byte
+``int_`` Default integer type (same as C ``long``; normally either
+ ``int64`` or ``int32``)
+intc Identical to C ``int`` (normally ``int32`` or ``int64``)
+intp Integer used for indexing (same as C ``ssize_t``; normally
+ either ``int32`` or ``int64``)
+int8 Byte (-128 to 127)
+int16 Integer (-32768 to 32767)
+int32 Integer (-2147483648 to 2147483647)
+int64 Integer (-9223372036854775808 to 9223372036854775807)
+uint8 Unsigned integer (0 to 255)
+uint16 Unsigned integer (0 to 65535)
+uint32 Unsigned integer (0 to 4294967295)
+uint64 Unsigned integer (0 to 18446744073709551615)
+``float_`` Shorthand for ``float64``.
+float16 Half precision float: sign bit, 5 bits exponent,
+ 10 bits mantissa
+float32 Single precision float: sign bit, 8 bits exponent,
+ 23 bits mantissa
+float64 Double precision float: sign bit, 11 bits exponent,
+ 52 bits mantissa
+``complex_`` Shorthand for ``complex128``.
+complex64 Complex number, represented by two 32-bit floats (real
+ and imaginary components)
+complex128 Complex number, represented by two 64-bit floats (real
+ and imaginary components)
+============ ==========================================================
Additionally to ``intc`` the platform dependent C integer types ``short``,
``long``, ``longlong`` and their unsigned versions are defined.
@@ -114,10 +114,10 @@ properties of the type, such as whether it is an integer::
>>> d
dtype('int32')
- >>> np.issubdtype(d, int)
+ >>> np.issubdtype(d, np.integer)
True
- >>> np.issubdtype(d, float)
+ >>> np.issubdtype(d, np.floating)
False
@@ -155,11 +155,11 @@ with 80-bit precision, and while most C compilers provide this as their
``long double`` identical to ``double`` (64 bits). NumPy makes the
compiler's ``long double`` available as ``np.longdouble`` (and
``np.clongdouble`` for the complex numbers). You can find out what your
-numpy provides with``np.finfo(np.longdouble)``.
+numpy provides with ``np.finfo(np.longdouble)``.
NumPy does not provide a dtype with more precision than C
-``long double``s; in particular, the 128-bit IEEE quad precision
-data type (FORTRAN's ``REAL*16``) is not available.
+``long double``\\s; in particular, the 128-bit IEEE quad precision
+data type (FORTRAN's ``REAL*16``\\) is not available.
For efficient memory alignment, ``np.longdouble`` is usually stored
padded with zero bits, either to 96 or 128 bits. Which is more efficient
diff --git a/numpy/doc/creation.py b/numpy/doc/creation.py
index 8480858d4..babe6a4d7 100644
--- a/numpy/doc/creation.py
+++ b/numpy/doc/creation.py
@@ -58,7 +58,7 @@ examples will be given here: ::
>>> np.arange(10)
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
- >>> np.arange(2, 10, dtype=np.float)
+ >>> np.arange(2, 10, dtype=float)
array([ 2., 3., 4., 5., 6., 7., 8., 9.])
>>> np.arange(2, 3, 0.1)
array([ 2. , 2.1, 2.2, 2.3, 2.4, 2.5, 2.6, 2.7, 2.8, 2.9])
diff --git a/numpy/doc/glossary.py b/numpy/doc/glossary.py
index 97b7b3362..794c393f6 100644
--- a/numpy/doc/glossary.py
+++ b/numpy/doc/glossary.py
@@ -48,7 +48,7 @@ Glossary
array([(1, 2.0), (3, 4.0)],
dtype=[('x', '<i4'), ('y', '<f8')])
- Fast element-wise operations, called `ufuncs`_, operate on arrays.
+ Fast element-wise operations, called :term:`ufuncs`, operate on arrays.
array_like
Any sequence that can be interpreted as an ndarray. This includes
@@ -82,7 +82,7 @@ Glossary
array([[4, 5],
[5, 6]])
- See `doc.broadcasting`_ for more information.
+ See `numpy.doc.broadcasting` for more information.
C order
See `row-major`
@@ -155,7 +155,8 @@ Glossary
See `column-major`
flattened
- Collapsed to a one-dimensional array. See `ndarray.flatten`_ for details.
+ Collapsed to a one-dimensional array. See `numpy.ndarray.flatten`
+ for details.
immutable
An object that cannot be modified after execution is called
@@ -284,9 +285,9 @@ Glossary
See *array*.
record array
- An `ndarray`_ with `structured data type`_ which has been subclassed as
- np.recarray and whose dtype is of type np.record, making the
- fields of its data type to be accessible by attribute.
+ An :term:`ndarray` with :term:`structured data type`_ which has been
+ subclassed as ``np.recarray`` and whose dtype is of type ``np.record``,
+ making the fields of its data type to be accessible by attribute.
reference
If ``a`` is a reference to ``b``, then ``(a is b) == True``. Therefore,
@@ -348,10 +349,10 @@ Glossary
>>> x[:, 1]
array([2, 4])
-
+
structured data type
A data type composed of other datatypes
-
+
tuple
A sequence that may contain a variable number of types of any
kind. A tuple is immutable, i.e., once constructed it cannot be
diff --git a/numpy/doc/indexing.py b/numpy/doc/indexing.py
index 39b2c73ed..b286a904d 100644
--- a/numpy/doc/indexing.py
+++ b/numpy/doc/indexing.py
@@ -422,7 +422,7 @@ object: ::
[37, 40, 43],
[46, 49, 52]])
-For this reason it is possible to use the output from the np.where()
+For this reason it is possible to use the output from the np.nonzero()
function directly as an index since it always returns a tuple of index
arrays.
diff --git a/numpy/doc/misc.py b/numpy/doc/misc.py
index 37ebca572..5d6708a0d 100644
--- a/numpy/doc/misc.py
+++ b/numpy/doc/misc.py
@@ -14,7 +14,8 @@ original value was)
Note: cannot use equality to test NaNs. E.g.: ::
>>> myarr = np.array([1., 0., np.nan, 3.])
- >>> np.where(myarr == np.nan)
+ >>> np.nonzero(myarr == np.nan)
+ (array([], dtype=int64),)
>>> np.nan == np.nan # is always False! Use special numpy functions instead.
False
>>> myarr[myarr == np.nan] = 0. # doesn't work
diff --git a/numpy/doc/subclassing.py b/numpy/doc/subclassing.py
index 36d8ff97d..c34278868 100644
--- a/numpy/doc/subclassing.py
+++ b/numpy/doc/subclassing.py
@@ -489,6 +489,8 @@ following.
return NotImplemented
if method == 'at':
+ if isinstance(inputs[0], A):
+ inputs[0].info = info
return
if ufunc.nout == 1:
@@ -541,7 +543,7 @@ will be called, but now it sees an ``ndarray`` as the other argument. Likely,
it will know how to handle this, and return a new instance of the ``B`` class
to us. Our example class is not set up to handle this, but it might well be
the best approach if, e.g., one were to re-implement ``MaskedArray`` using
- ``__array_ufunc__``.
+``__array_ufunc__``.
As a final note: if the ``super`` route is suited to a given class, an
advantage of using it is that it helps in constructing class hierarchies.
diff --git a/numpy/f2py/__init__.py b/numpy/f2py/__init__.py
index b9b86ba0e..250c4322b 100644
--- a/numpy/f2py/__init__.py
+++ b/numpy/f2py/__init__.py
@@ -69,6 +69,6 @@ def compile(source,
f.close()
return status
-from numpy.testing.nosetester import _numpy_tester
+from numpy.testing import _numpy_tester
test = _numpy_tester().test
bench = _numpy_tester().bench
diff --git a/numpy/f2py/auxfuncs.py b/numpy/f2py/auxfuncs.py
index d27b95947..404bdbd2d 100644
--- a/numpy/f2py/auxfuncs.py
+++ b/numpy/f2py/auxfuncs.py
@@ -552,7 +552,7 @@ class F2PYError(Exception):
pass
-class throw_error:
+class throw_error(object):
def __init__(self, mess):
self.mess = mess
diff --git a/numpy/f2py/capi_maps.py b/numpy/f2py/capi_maps.py
index 5b2e6a9b9..64829d30c 100644
--- a/numpy/f2py/capi_maps.py
+++ b/numpy/f2py/capi_maps.py
@@ -328,7 +328,7 @@ def getarrdims(a, var, verbose=0):
ret['size'] = '*'.join(dim)
try:
ret['size'] = repr(eval(ret['size']))
- except:
+ except Exception:
pass
ret['dims'] = ','.join(dim)
ret['rank'] = repr(len(dim))
@@ -485,7 +485,7 @@ def getinit(a, var):
else:
v = eval(v, {}, {})
ret['init.r'], ret['init.i'] = str(v.real), str(v.imag)
- except:
+ except Exception:
raise ValueError(
'getinit: expected complex number `(r,i)\' but got `%s\' as initial value of %r.' % (init, a))
if isarray(var):
diff --git a/numpy/f2py/cfuncs.py b/numpy/f2py/cfuncs.py
index 1632a0d47..6aeeec823 100644
--- a/numpy/f2py/cfuncs.py
+++ b/numpy/f2py/cfuncs.py
@@ -99,8 +99,8 @@ cppmacros['CFUNCSMESS'] = """\
#ifdef DEBUGCFUNCS
#define CFUNCSMESS(mess) fprintf(stderr,\"debug-capi:\"mess);
#define CFUNCSMESSPY(mess,obj) CFUNCSMESS(mess) \\
-\tPyObject_Print((PyObject *)obj,stderr,Py_PRINT_RAW);\\
-\tfprintf(stderr,\"\\n\");
+ PyObject_Print((PyObject *)obj,stderr,Py_PRINT_RAW);\\
+ fprintf(stderr,\"\\n\");
#else
#define CFUNCSMESS(mess)
#define CFUNCSMESSPY(mess,obj)
@@ -219,18 +219,18 @@ cppmacros['SWAPUNSAFE'] = """\
"""
cppmacros['SWAP'] = """\
#define SWAP(a,b,t) {\\
-\tt *c;\\
-\tc = a;\\
-\ta = b;\\
-\tb = c;}
+ t *c;\\
+ c = a;\\
+ a = b;\\
+ b = c;}
"""
# cppmacros['ISCONTIGUOUS']='#define ISCONTIGUOUS(m) (PyArray_FLAGS(m) &
# NPY_ARRAY_C_CONTIGUOUS)'
cppmacros['PRINTPYOBJERR'] = """\
#define PRINTPYOBJERR(obj)\\
-\tfprintf(stderr,\"#modulename#.error is related to \");\\
-\tPyObject_Print((PyObject *)obj,stderr,Py_PRINT_RAW);\\
-\tfprintf(stderr,\"\\n\");
+ fprintf(stderr,\"#modulename#.error is related to \");\\
+ PyObject_Print((PyObject *)obj,stderr,Py_PRINT_RAW);\\
+ fprintf(stderr,\"\\n\");
"""
cppmacros['MINMAX'] = """\
#ifndef max
@@ -401,59 +401,59 @@ cppmacros['TRYCOMPLEXPYARRAYTEMPLATE'] = """\
"""
# cppmacros['NUMFROMARROBJ']="""\
# define NUMFROMARROBJ(typenum,ctype) \\
-# \tif (PyArray_Check(obj)) arr = (PyArrayObject *)obj;\\
-# \telse arr = (PyArrayObject *)PyArray_ContiguousFromObject(obj,typenum,0,0);\\
-# \tif (arr) {\\
-# \t\tif (PyArray_TYPE(arr)==NPY_OBJECT) {\\
-# \t\t\tif (!ctype ## _from_pyobj(v,(PyArray_DESCR(arr)->getitem)(PyArray_DATA(arr)),\"\"))\\
-# \t\t\tgoto capi_fail;\\
-# \t\t} else {\\
-# \t\t\t(PyArray_DESCR(arr)->cast[typenum])(PyArray_DATA(arr),1,(char*)v,1,1);\\
-# \t\t}\\
-# \t\tif ((PyObject *)arr != obj) { Py_DECREF(arr); }\\
-# \t\treturn 1;\\
-# \t}
+# if (PyArray_Check(obj)) arr = (PyArrayObject *)obj;\\
+# else arr = (PyArrayObject *)PyArray_ContiguousFromObject(obj,typenum,0,0);\\
+# if (arr) {\\
+# if (PyArray_TYPE(arr)==NPY_OBJECT) {\\
+# if (!ctype ## _from_pyobj(v,(PyArray_DESCR(arr)->getitem)(PyArray_DATA(arr)),\"\"))\\
+# goto capi_fail;\\
+# } else {\\
+# (PyArray_DESCR(arr)->cast[typenum])(PyArray_DATA(arr),1,(char*)v,1,1);\\
+# }\\
+# if ((PyObject *)arr != obj) { Py_DECREF(arr); }\\
+# return 1;\\
+# }
# """
# XXX: Note that CNUMFROMARROBJ is identical with NUMFROMARROBJ
# cppmacros['CNUMFROMARROBJ']="""\
# define CNUMFROMARROBJ(typenum,ctype) \\
-# \tif (PyArray_Check(obj)) arr = (PyArrayObject *)obj;\\
-# \telse arr = (PyArrayObject *)PyArray_ContiguousFromObject(obj,typenum,0,0);\\
-# \tif (arr) {\\
-# \t\tif (PyArray_TYPE(arr)==NPY_OBJECT) {\\
-# \t\t\tif (!ctype ## _from_pyobj(v,(PyArray_DESCR(arr)->getitem)(PyArray_DATA(arr)),\"\"))\\
-# \t\t\tgoto capi_fail;\\
-# \t\t} else {\\
-# \t\t\t(PyArray_DESCR(arr)->cast[typenum])((void *)(PyArray_DATA(arr)),1,(void *)(v),1,1);\\
-# \t\t}\\
-# \t\tif ((PyObject *)arr != obj) { Py_DECREF(arr); }\\
-# \t\treturn 1;\\
-# \t}
+# if (PyArray_Check(obj)) arr = (PyArrayObject *)obj;\\
+# else arr = (PyArrayObject *)PyArray_ContiguousFromObject(obj,typenum,0,0);\\
+# if (arr) {\\
+# if (PyArray_TYPE(arr)==NPY_OBJECT) {\\
+# if (!ctype ## _from_pyobj(v,(PyArray_DESCR(arr)->getitem)(PyArray_DATA(arr)),\"\"))\\
+# goto capi_fail;\\
+# } else {\\
+# (PyArray_DESCR(arr)->cast[typenum])((void *)(PyArray_DATA(arr)),1,(void *)(v),1,1);\\
+# }\\
+# if ((PyObject *)arr != obj) { Py_DECREF(arr); }\\
+# return 1;\\
+# }
# """
needs['GETSTRFROMPYTUPLE'] = ['STRINGCOPYN', 'PRINTPYOBJERR']
cppmacros['GETSTRFROMPYTUPLE'] = """\
#define GETSTRFROMPYTUPLE(tuple,index,str,len) {\\
-\t\tPyObject *rv_cb_str = PyTuple_GetItem((tuple),(index));\\
-\t\tif (rv_cb_str == NULL)\\
-\t\t\tgoto capi_fail;\\
-\t\tif (PyString_Check(rv_cb_str)) {\\
-\t\t\tstr[len-1]='\\0';\\
-\t\t\tSTRINGCOPYN((str),PyString_AS_STRING((PyStringObject*)rv_cb_str),(len));\\
-\t\t} else {\\
-\t\t\tPRINTPYOBJERR(rv_cb_str);\\
-\t\t\tPyErr_SetString(#modulename#_error,\"string object expected\");\\
-\t\t\tgoto capi_fail;\\
-\t\t}\\
-\t}
+ PyObject *rv_cb_str = PyTuple_GetItem((tuple),(index));\\
+ if (rv_cb_str == NULL)\\
+ goto capi_fail;\\
+ if (PyString_Check(rv_cb_str)) {\\
+ str[len-1]='\\0';\\
+ STRINGCOPYN((str),PyString_AS_STRING((PyStringObject*)rv_cb_str),(len));\\
+ } else {\\
+ PRINTPYOBJERR(rv_cb_str);\\
+ PyErr_SetString(#modulename#_error,\"string object expected\");\\
+ goto capi_fail;\\
+ }\\
+ }
"""
cppmacros['GETSCALARFROMPYTUPLE'] = """\
#define GETSCALARFROMPYTUPLE(tuple,index,var,ctype,mess) {\\
-\t\tif ((capi_tmp = PyTuple_GetItem((tuple),(index)))==NULL) goto capi_fail;\\
-\t\tif (!(ctype ## _from_pyobj((var),capi_tmp,mess)))\\
-\t\t\tgoto capi_fail;\\
-\t}
+ if ((capi_tmp = PyTuple_GetItem((tuple),(index)))==NULL) goto capi_fail;\\
+ if (!(ctype ## _from_pyobj((var),capi_tmp,mess)))\\
+ goto capi_fail;\\
+ }
"""
cppmacros['FAILNULL'] = """\\
@@ -471,12 +471,12 @@ cppmacros['MEMCOPY'] = """\
"""
cppmacros['STRINGMALLOC'] = """\
#define STRINGMALLOC(str,len)\\
-\tif ((str = (string)malloc(sizeof(char)*(len+1))) == NULL) {\\
-\t\tPyErr_SetString(PyExc_MemoryError, \"out of memory\");\\
-\t\tgoto capi_fail;\\
-\t} else {\\
-\t\t(str)[len] = '\\0';\\
-\t}
+ if ((str = (string)malloc(sizeof(char)*(len+1))) == NULL) {\\
+ PyErr_SetString(PyExc_MemoryError, \"out of memory\");\\
+ goto capi_fail;\\
+ } else {\\
+ (str)[len] = '\\0';\\
+ }
"""
cppmacros['STRINGFREE'] = """\
#define STRINGFREE(str) do {if (!(str == NULL)) free(str);} while (0)
@@ -504,39 +504,39 @@ cppmacros['STRINGCOPY'] = """\
"""
cppmacros['CHECKGENERIC'] = """\
#define CHECKGENERIC(check,tcheck,name) \\
-\tif (!(check)) {\\
-\t\tPyErr_SetString(#modulename#_error,\"(\"tcheck\") failed for \"name);\\
-\t\t/*goto capi_fail;*/\\
-\t} else """
+ if (!(check)) {\\
+ PyErr_SetString(#modulename#_error,\"(\"tcheck\") failed for \"name);\\
+ /*goto capi_fail;*/\\
+ } else """
cppmacros['CHECKARRAY'] = """\
#define CHECKARRAY(check,tcheck,name) \\
-\tif (!(check)) {\\
-\t\tPyErr_SetString(#modulename#_error,\"(\"tcheck\") failed for \"name);\\
-\t\t/*goto capi_fail;*/\\
-\t} else """
+ if (!(check)) {\\
+ PyErr_SetString(#modulename#_error,\"(\"tcheck\") failed for \"name);\\
+ /*goto capi_fail;*/\\
+ } else """
cppmacros['CHECKSTRING'] = """\
#define CHECKSTRING(check,tcheck,name,show,var)\\
-\tif (!(check)) {\\
-\t\tchar errstring[256];\\
-\t\tsprintf(errstring, \"%s: \"show, \"(\"tcheck\") failed for \"name, slen(var), var);\\
-\t\tPyErr_SetString(#modulename#_error, errstring);\\
-\t\t/*goto capi_fail;*/\\
-\t} else """
+ if (!(check)) {\\
+ char errstring[256];\\
+ sprintf(errstring, \"%s: \"show, \"(\"tcheck\") failed for \"name, slen(var), var);\\
+ PyErr_SetString(#modulename#_error, errstring);\\
+ /*goto capi_fail;*/\\
+ } else """
cppmacros['CHECKSCALAR'] = """\
#define CHECKSCALAR(check,tcheck,name,show,var)\\
-\tif (!(check)) {\\
-\t\tchar errstring[256];\\
-\t\tsprintf(errstring, \"%s: \"show, \"(\"tcheck\") failed for \"name, var);\\
-\t\tPyErr_SetString(#modulename#_error,errstring);\\
-\t\t/*goto capi_fail;*/\\
-\t} else """
+ if (!(check)) {\\
+ char errstring[256];\\
+ sprintf(errstring, \"%s: \"show, \"(\"tcheck\") failed for \"name, var);\\
+ PyErr_SetString(#modulename#_error,errstring);\\
+ /*goto capi_fail;*/\\
+ } else """
# cppmacros['CHECKDIMS']="""\
# define CHECKDIMS(dims,rank) \\
-# \tfor (int i=0;i<(rank);i++)\\
-# \t\tif (dims[i]<0) {\\
-# \t\t\tfprintf(stderr,\"Unspecified array argument requires a complete dimension specification.\\n\");\\
-# \t\t\tgoto capi_fail;\\
-# \t\t}
+# for (int i=0;i<(rank);i++)\\
+# if (dims[i]<0) {\\
+# fprintf(stderr,\"Unspecified array argument requires a complete dimension specification.\\n\");\\
+# goto capi_fail;\\
+# }
# """
cppmacros[
'ARRSIZE'] = '#define ARRSIZE(dims,rank) (_PyArray_multiply_list(dims,rank))'
@@ -549,17 +549,17 @@ cppmacros['OLDPYNUM'] = """\
cfuncs['calcarrindex'] = """\
static int calcarrindex(int *i,PyArrayObject *arr) {
-\tint k,ii = i[0];
-\tfor (k=1; k < PyArray_NDIM(arr); k++)
-\t\tii += (ii*(PyArray_DIM(arr,k) - 1)+i[k]); /* assuming contiguous arr */
-\treturn ii;
+ int k,ii = i[0];
+ for (k=1; k < PyArray_NDIM(arr); k++)
+ ii += (ii*(PyArray_DIM(arr,k) - 1)+i[k]); /* assuming contiguous arr */
+ return ii;
}"""
cfuncs['calcarrindextr'] = """\
static int calcarrindextr(int *i,PyArrayObject *arr) {
-\tint k,ii = i[PyArray_NDIM(arr)-1];
-\tfor (k=1; k < PyArray_NDIM(arr); k++)
-\t\tii += (ii*(PyArray_DIM(arr,PyArray_NDIM(arr)-k-1) - 1)+i[PyArray_NDIM(arr)-k-1]); /* assuming contiguous arr */
-\treturn ii;
+ int k,ii = i[PyArray_NDIM(arr)-1];
+ for (k=1; k < PyArray_NDIM(arr); k++)
+ ii += (ii*(PyArray_DIM(arr,PyArray_NDIM(arr)-k-1) - 1)+i[PyArray_NDIM(arr)-k-1]); /* assuming contiguous arr */
+ return ii;
}"""
cfuncs['forcomb'] = """\
static struct { int nd;npy_intp *d;int *i,*i_tr,tr; } forcombcache;
@@ -604,543 +604,543 @@ static int *nextforcomb(void) {
needs['try_pyarr_from_string'] = ['STRINGCOPYN', 'PRINTPYOBJERR', 'string']
cfuncs['try_pyarr_from_string'] = """\
static int try_pyarr_from_string(PyObject *obj,const string str) {
-\tPyArrayObject *arr = NULL;
-\tif (PyArray_Check(obj) && (!((arr = (PyArrayObject *)obj) == NULL)))
-\t\t{ STRINGCOPYN(PyArray_DATA(arr),str,PyArray_NBYTES(arr)); }
-\treturn 1;
+ PyArrayObject *arr = NULL;
+ if (PyArray_Check(obj) && (!((arr = (PyArrayObject *)obj) == NULL)))
+ { STRINGCOPYN(PyArray_DATA(arr),str,PyArray_NBYTES(arr)); }
+ return 1;
capi_fail:
-\tPRINTPYOBJERR(obj);
-\tPyErr_SetString(#modulename#_error,\"try_pyarr_from_string failed\");
-\treturn 0;
+ PRINTPYOBJERR(obj);
+ PyErr_SetString(#modulename#_error,\"try_pyarr_from_string failed\");
+ return 0;
}
"""
needs['string_from_pyobj'] = ['string', 'STRINGMALLOC', 'STRINGCOPYN']
cfuncs['string_from_pyobj'] = """\
static int string_from_pyobj(string *str,int *len,const string inistr,PyObject *obj,const char *errmess) {
-\tPyArrayObject *arr = NULL;
-\tPyObject *tmp = NULL;
+ PyArrayObject *arr = NULL;
+ PyObject *tmp = NULL;
#ifdef DEBUGCFUNCS
fprintf(stderr,\"string_from_pyobj(str='%s',len=%d,inistr='%s',obj=%p)\\n\",(char*)str,*len,(char *)inistr,obj);
#endif
-\tif (obj == Py_None) {
-\t\tif (*len == -1)
-\t\t\t*len = strlen(inistr); /* Will this cause problems? */
-\t\tSTRINGMALLOC(*str,*len);
-\t\tSTRINGCOPYN(*str,inistr,*len+1);
-\t\treturn 1;
-\t}
-\tif (PyArray_Check(obj)) {
-\t\tif ((arr = (PyArrayObject *)obj) == NULL)
-\t\t\tgoto capi_fail;
-\t\tif (!ISCONTIGUOUS(arr)) {
-\t\t\tPyErr_SetString(PyExc_ValueError,\"array object is non-contiguous.\");
-\t\t\tgoto capi_fail;
-\t\t}
-\t\tif (*len == -1)
-\t\t\t*len = (PyArray_ITEMSIZE(arr))*PyArray_SIZE(arr);
-\t\tSTRINGMALLOC(*str,*len);
-\t\tSTRINGCOPYN(*str,PyArray_DATA(arr),*len+1);
-\t\treturn 1;
-\t}
-\tif (PyString_Check(obj)) {
-\t\ttmp = obj;
-\t\tPy_INCREF(tmp);
-\t}
+ if (obj == Py_None) {
+ if (*len == -1)
+ *len = strlen(inistr); /* Will this cause problems? */
+ STRINGMALLOC(*str,*len);
+ STRINGCOPYN(*str,inistr,*len+1);
+ return 1;
+ }
+ if (PyArray_Check(obj)) {
+ if ((arr = (PyArrayObject *)obj) == NULL)
+ goto capi_fail;
+ if (!ISCONTIGUOUS(arr)) {
+ PyErr_SetString(PyExc_ValueError,\"array object is non-contiguous.\");
+ goto capi_fail;
+ }
+ if (*len == -1)
+ *len = (PyArray_ITEMSIZE(arr))*PyArray_SIZE(arr);
+ STRINGMALLOC(*str,*len);
+ STRINGCOPYN(*str,PyArray_DATA(arr),*len+1);
+ return 1;
+ }
+ if (PyString_Check(obj)) {
+ tmp = obj;
+ Py_INCREF(tmp);
+ }
#if PY_VERSION_HEX >= 0x03000000
-\telse if (PyUnicode_Check(obj)) {
-\t\ttmp = PyUnicode_AsASCIIString(obj);
-\t}
-\telse {
-\t\tPyObject *tmp2;
-\t\ttmp2 = PyObject_Str(obj);
-\t\tif (tmp2) {
-\t\t\ttmp = PyUnicode_AsASCIIString(tmp2);
-\t\t\tPy_DECREF(tmp2);
-\t\t}
-\t\telse {
-\t\t\ttmp = NULL;
-\t\t}
-\t}
+ else if (PyUnicode_Check(obj)) {
+ tmp = PyUnicode_AsASCIIString(obj);
+ }
+ else {
+ PyObject *tmp2;
+ tmp2 = PyObject_Str(obj);
+ if (tmp2) {
+ tmp = PyUnicode_AsASCIIString(tmp2);
+ Py_DECREF(tmp2);
+ }
+ else {
+ tmp = NULL;
+ }
+ }
#else
-\telse {
-\t\ttmp = PyObject_Str(obj);
-\t}
+ else {
+ tmp = PyObject_Str(obj);
+ }
#endif
-\tif (tmp == NULL) goto capi_fail;
-\tif (*len == -1)
-\t\t*len = PyString_GET_SIZE(tmp);
-\tSTRINGMALLOC(*str,*len);
-\tSTRINGCOPYN(*str,PyString_AS_STRING(tmp),*len+1);
-\tPy_DECREF(tmp);
-\treturn 1;
+ if (tmp == NULL) goto capi_fail;
+ if (*len == -1)
+ *len = PyString_GET_SIZE(tmp);
+ STRINGMALLOC(*str,*len);
+ STRINGCOPYN(*str,PyString_AS_STRING(tmp),*len+1);
+ Py_DECREF(tmp);
+ return 1;
capi_fail:
-\tPy_XDECREF(tmp);
-\t{
-\t\tPyObject* err = PyErr_Occurred();
-\t\tif (err==NULL) err = #modulename#_error;
-\t\tPyErr_SetString(err,errmess);
-\t}
-\treturn 0;
+ Py_XDECREF(tmp);
+ {
+ PyObject* err = PyErr_Occurred();
+ if (err==NULL) err = #modulename#_error;
+ PyErr_SetString(err,errmess);
+ }
+ return 0;
}
"""
needs['char_from_pyobj'] = ['int_from_pyobj']
cfuncs['char_from_pyobj'] = """\
static int char_from_pyobj(char* v,PyObject *obj,const char *errmess) {
-\tint i=0;
-\tif (int_from_pyobj(&i,obj,errmess)) {
-\t\t*v = (char)i;
-\t\treturn 1;
-\t}
-\treturn 0;
+ int i=0;
+ if (int_from_pyobj(&i,obj,errmess)) {
+ *v = (char)i;
+ return 1;
+ }
+ return 0;
}
"""
needs['signed_char_from_pyobj'] = ['int_from_pyobj', 'signed_char']
cfuncs['signed_char_from_pyobj'] = """\
static int signed_char_from_pyobj(signed_char* v,PyObject *obj,const char *errmess) {
-\tint i=0;
-\tif (int_from_pyobj(&i,obj,errmess)) {
-\t\t*v = (signed_char)i;
-\t\treturn 1;
-\t}
-\treturn 0;
+ int i=0;
+ if (int_from_pyobj(&i,obj,errmess)) {
+ *v = (signed_char)i;
+ return 1;
+ }
+ return 0;
}
"""
needs['short_from_pyobj'] = ['int_from_pyobj']
cfuncs['short_from_pyobj'] = """\
static int short_from_pyobj(short* v,PyObject *obj,const char *errmess) {
-\tint i=0;
-\tif (int_from_pyobj(&i,obj,errmess)) {
-\t\t*v = (short)i;
-\t\treturn 1;
-\t}
-\treturn 0;
+ int i=0;
+ if (int_from_pyobj(&i,obj,errmess)) {
+ *v = (short)i;
+ return 1;
+ }
+ return 0;
}
"""
cfuncs['int_from_pyobj'] = """\
static int int_from_pyobj(int* v,PyObject *obj,const char *errmess) {
-\tPyObject* tmp = NULL;
-\tif (PyInt_Check(obj)) {
-\t\t*v = (int)PyInt_AS_LONG(obj);
-\t\treturn 1;
-\t}
-\ttmp = PyNumber_Int(obj);
-\tif (tmp) {
-\t\t*v = PyInt_AS_LONG(tmp);
-\t\tPy_DECREF(tmp);
-\t\treturn 1;
-\t}
-\tif (PyComplex_Check(obj))
-\t\ttmp = PyObject_GetAttrString(obj,\"real\");
-\telse if (PyString_Check(obj) || PyUnicode_Check(obj))
-\t\t/*pass*/;
-\telse if (PySequence_Check(obj))
-\t\ttmp = PySequence_GetItem(obj,0);
-\tif (tmp) {
-\t\tPyErr_Clear();
-\t\tif (int_from_pyobj(v,tmp,errmess)) {Py_DECREF(tmp); return 1;}
-\t\tPy_DECREF(tmp);
-\t}
-\t{
-\t\tPyObject* err = PyErr_Occurred();
-\t\tif (err==NULL) err = #modulename#_error;
-\t\tPyErr_SetString(err,errmess);
-\t}
-\treturn 0;
+ PyObject* tmp = NULL;
+ if (PyInt_Check(obj)) {
+ *v = (int)PyInt_AS_LONG(obj);
+ return 1;
+ }
+ tmp = PyNumber_Int(obj);
+ if (tmp) {
+ *v = PyInt_AS_LONG(tmp);
+ Py_DECREF(tmp);
+ return 1;
+ }
+ if (PyComplex_Check(obj))
+ tmp = PyObject_GetAttrString(obj,\"real\");
+ else if (PyString_Check(obj) || PyUnicode_Check(obj))
+ /*pass*/;
+ else if (PySequence_Check(obj))
+ tmp = PySequence_GetItem(obj,0);
+ if (tmp) {
+ PyErr_Clear();
+ if (int_from_pyobj(v,tmp,errmess)) {Py_DECREF(tmp); return 1;}
+ Py_DECREF(tmp);
+ }
+ {
+ PyObject* err = PyErr_Occurred();
+ if (err==NULL) err = #modulename#_error;
+ PyErr_SetString(err,errmess);
+ }
+ return 0;
}
"""
cfuncs['long_from_pyobj'] = """\
static int long_from_pyobj(long* v,PyObject *obj,const char *errmess) {
-\tPyObject* tmp = NULL;
-\tif (PyInt_Check(obj)) {
-\t\t*v = PyInt_AS_LONG(obj);
-\t\treturn 1;
-\t}
-\ttmp = PyNumber_Int(obj);
-\tif (tmp) {
-\t\t*v = PyInt_AS_LONG(tmp);
-\t\tPy_DECREF(tmp);
-\t\treturn 1;
-\t}
-\tif (PyComplex_Check(obj))
-\t\ttmp = PyObject_GetAttrString(obj,\"real\");
-\telse if (PyString_Check(obj) || PyUnicode_Check(obj))
-\t\t/*pass*/;
-\telse if (PySequence_Check(obj))
-\t\ttmp = PySequence_GetItem(obj,0);
-\tif (tmp) {
-\t\tPyErr_Clear();
-\t\tif (long_from_pyobj(v,tmp,errmess)) {Py_DECREF(tmp); return 1;}
-\t\tPy_DECREF(tmp);
-\t}
-\t{
-\t\tPyObject* err = PyErr_Occurred();
-\t\tif (err==NULL) err = #modulename#_error;
-\t\tPyErr_SetString(err,errmess);
-\t}
-\treturn 0;
+ PyObject* tmp = NULL;
+ if (PyInt_Check(obj)) {
+ *v = PyInt_AS_LONG(obj);
+ return 1;
+ }
+ tmp = PyNumber_Int(obj);
+ if (tmp) {
+ *v = PyInt_AS_LONG(tmp);
+ Py_DECREF(tmp);
+ return 1;
+ }
+ if (PyComplex_Check(obj))
+ tmp = PyObject_GetAttrString(obj,\"real\");
+ else if (PyString_Check(obj) || PyUnicode_Check(obj))
+ /*pass*/;
+ else if (PySequence_Check(obj))
+ tmp = PySequence_GetItem(obj,0);
+ if (tmp) {
+ PyErr_Clear();
+ if (long_from_pyobj(v,tmp,errmess)) {Py_DECREF(tmp); return 1;}
+ Py_DECREF(tmp);
+ }
+ {
+ PyObject* err = PyErr_Occurred();
+ if (err==NULL) err = #modulename#_error;
+ PyErr_SetString(err,errmess);
+ }
+ return 0;
}
"""
needs['long_long_from_pyobj'] = ['long_long']
cfuncs['long_long_from_pyobj'] = """\
static int long_long_from_pyobj(long_long* v,PyObject *obj,const char *errmess) {
-\tPyObject* tmp = NULL;
-\tif (PyLong_Check(obj)) {
-\t\t*v = PyLong_AsLongLong(obj);
-\t\treturn (!PyErr_Occurred());
-\t}
-\tif (PyInt_Check(obj)) {
-\t\t*v = (long_long)PyInt_AS_LONG(obj);
-\t\treturn 1;
-\t}
-\ttmp = PyNumber_Long(obj);
-\tif (tmp) {
-\t\t*v = PyLong_AsLongLong(tmp);
-\t\tPy_DECREF(tmp);
-\t\treturn (!PyErr_Occurred());
-\t}
-\tif (PyComplex_Check(obj))
-\t\ttmp = PyObject_GetAttrString(obj,\"real\");
-\telse if (PyString_Check(obj) || PyUnicode_Check(obj))
-\t\t/*pass*/;
-\telse if (PySequence_Check(obj))
-\t\ttmp = PySequence_GetItem(obj,0);
-\tif (tmp) {
-\t\tPyErr_Clear();
-\t\tif (long_long_from_pyobj(v,tmp,errmess)) {Py_DECREF(tmp); return 1;}
-\t\tPy_DECREF(tmp);
-\t}
-\t{
-\t\tPyObject* err = PyErr_Occurred();
-\t\tif (err==NULL) err = #modulename#_error;
-\t\tPyErr_SetString(err,errmess);
-\t}
-\treturn 0;
+ PyObject* tmp = NULL;
+ if (PyLong_Check(obj)) {
+ *v = PyLong_AsLongLong(obj);
+ return (!PyErr_Occurred());
+ }
+ if (PyInt_Check(obj)) {
+ *v = (long_long)PyInt_AS_LONG(obj);
+ return 1;
+ }
+ tmp = PyNumber_Long(obj);
+ if (tmp) {
+ *v = PyLong_AsLongLong(tmp);
+ Py_DECREF(tmp);
+ return (!PyErr_Occurred());
+ }
+ if (PyComplex_Check(obj))
+ tmp = PyObject_GetAttrString(obj,\"real\");
+ else if (PyString_Check(obj) || PyUnicode_Check(obj))
+ /*pass*/;
+ else if (PySequence_Check(obj))
+ tmp = PySequence_GetItem(obj,0);
+ if (tmp) {
+ PyErr_Clear();
+ if (long_long_from_pyobj(v,tmp,errmess)) {Py_DECREF(tmp); return 1;}
+ Py_DECREF(tmp);
+ }
+ {
+ PyObject* err = PyErr_Occurred();
+ if (err==NULL) err = #modulename#_error;
+ PyErr_SetString(err,errmess);
+ }
+ return 0;
}
"""
needs['long_double_from_pyobj'] = ['double_from_pyobj', 'long_double']
cfuncs['long_double_from_pyobj'] = """\
static int long_double_from_pyobj(long_double* v,PyObject *obj,const char *errmess) {
-\tdouble d=0;
-\tif (PyArray_CheckScalar(obj)){
-\t\tif PyArray_IsScalar(obj, LongDouble) {
-\t\t\tPyArray_ScalarAsCtype(obj, v);
-\t\t\treturn 1;
-\t\t}
-\t\telse if (PyArray_Check(obj) && PyArray_TYPE(obj)==NPY_LONGDOUBLE) {
-\t\t\t(*v) = *((npy_longdouble *)PyArray_DATA(obj));
-\t\t\treturn 1;
-\t\t}
-\t}
-\tif (double_from_pyobj(&d,obj,errmess)) {
-\t\t*v = (long_double)d;
-\t\treturn 1;
-\t}
-\treturn 0;
+ double d=0;
+ if (PyArray_CheckScalar(obj)){
+ if PyArray_IsScalar(obj, LongDouble) {
+ PyArray_ScalarAsCtype(obj, v);
+ return 1;
+ }
+ else if (PyArray_Check(obj) && PyArray_TYPE(obj)==NPY_LONGDOUBLE) {
+ (*v) = *((npy_longdouble *)PyArray_DATA(obj));
+ return 1;
+ }
+ }
+ if (double_from_pyobj(&d,obj,errmess)) {
+ *v = (long_double)d;
+ return 1;
+ }
+ return 0;
}
"""
cfuncs['double_from_pyobj'] = """\
static int double_from_pyobj(double* v,PyObject *obj,const char *errmess) {
-\tPyObject* tmp = NULL;
-\tif (PyFloat_Check(obj)) {
+ PyObject* tmp = NULL;
+ if (PyFloat_Check(obj)) {
#ifdef __sgi
-\t\t*v = PyFloat_AsDouble(obj);
+ *v = PyFloat_AsDouble(obj);
#else
-\t\t*v = PyFloat_AS_DOUBLE(obj);
+ *v = PyFloat_AS_DOUBLE(obj);
#endif
-\t\treturn 1;
-\t}
-\ttmp = PyNumber_Float(obj);
-\tif (tmp) {
+ return 1;
+ }
+ tmp = PyNumber_Float(obj);
+ if (tmp) {
#ifdef __sgi
-\t\t*v = PyFloat_AsDouble(tmp);
+ *v = PyFloat_AsDouble(tmp);
#else
-\t\t*v = PyFloat_AS_DOUBLE(tmp);
+ *v = PyFloat_AS_DOUBLE(tmp);
#endif
-\t\tPy_DECREF(tmp);
-\t\treturn 1;
-\t}
-\tif (PyComplex_Check(obj))
-\t\ttmp = PyObject_GetAttrString(obj,\"real\");
-\telse if (PyString_Check(obj) || PyUnicode_Check(obj))
-\t\t/*pass*/;
-\telse if (PySequence_Check(obj))
-\t\ttmp = PySequence_GetItem(obj,0);
-\tif (tmp) {
-\t\tPyErr_Clear();
-\t\tif (double_from_pyobj(v,tmp,errmess)) {Py_DECREF(tmp); return 1;}
-\t\tPy_DECREF(tmp);
-\t}
-\t{
-\t\tPyObject* err = PyErr_Occurred();
-\t\tif (err==NULL) err = #modulename#_error;
-\t\tPyErr_SetString(err,errmess);
-\t}
-\treturn 0;
+ Py_DECREF(tmp);
+ return 1;
+ }
+ if (PyComplex_Check(obj))
+ tmp = PyObject_GetAttrString(obj,\"real\");
+ else if (PyString_Check(obj) || PyUnicode_Check(obj))
+ /*pass*/;
+ else if (PySequence_Check(obj))
+ tmp = PySequence_GetItem(obj,0);
+ if (tmp) {
+ PyErr_Clear();
+ if (double_from_pyobj(v,tmp,errmess)) {Py_DECREF(tmp); return 1;}
+ Py_DECREF(tmp);
+ }
+ {
+ PyObject* err = PyErr_Occurred();
+ if (err==NULL) err = #modulename#_error;
+ PyErr_SetString(err,errmess);
+ }
+ return 0;
}
"""
needs['float_from_pyobj'] = ['double_from_pyobj']
cfuncs['float_from_pyobj'] = """\
static int float_from_pyobj(float* v,PyObject *obj,const char *errmess) {
-\tdouble d=0.0;
-\tif (double_from_pyobj(&d,obj,errmess)) {
-\t\t*v = (float)d;
-\t\treturn 1;
-\t}
-\treturn 0;
+ double d=0.0;
+ if (double_from_pyobj(&d,obj,errmess)) {
+ *v = (float)d;
+ return 1;
+ }
+ return 0;
}
"""
needs['complex_long_double_from_pyobj'] = ['complex_long_double', 'long_double',
'complex_double_from_pyobj']
cfuncs['complex_long_double_from_pyobj'] = """\
static int complex_long_double_from_pyobj(complex_long_double* v,PyObject *obj,const char *errmess) {
-\tcomplex_double cd={0.0,0.0};
-\tif (PyArray_CheckScalar(obj)){
-\t\tif PyArray_IsScalar(obj, CLongDouble) {
-\t\t\tPyArray_ScalarAsCtype(obj, v);
-\t\t\treturn 1;
-\t\t}
-\t\telse if (PyArray_Check(obj) && PyArray_TYPE(obj)==NPY_CLONGDOUBLE) {
-\t\t\t(*v).r = ((npy_clongdouble *)PyArray_DATA(obj))->real;
-\t\t\t(*v).i = ((npy_clongdouble *)PyArray_DATA(obj))->imag;
-\t\t\treturn 1;
-\t\t}
-\t}
-\tif (complex_double_from_pyobj(&cd,obj,errmess)) {
-\t\t(*v).r = (long_double)cd.r;
-\t\t(*v).i = (long_double)cd.i;
-\t\treturn 1;
-\t}
-\treturn 0;
+ complex_double cd={0.0,0.0};
+ if (PyArray_CheckScalar(obj)){
+ if PyArray_IsScalar(obj, CLongDouble) {
+ PyArray_ScalarAsCtype(obj, v);
+ return 1;
+ }
+ else if (PyArray_Check(obj) && PyArray_TYPE(obj)==NPY_CLONGDOUBLE) {
+ (*v).r = ((npy_clongdouble *)PyArray_DATA(obj))->real;
+ (*v).i = ((npy_clongdouble *)PyArray_DATA(obj))->imag;
+ return 1;
+ }
+ }
+ if (complex_double_from_pyobj(&cd,obj,errmess)) {
+ (*v).r = (long_double)cd.r;
+ (*v).i = (long_double)cd.i;
+ return 1;
+ }
+ return 0;
}
"""
needs['complex_double_from_pyobj'] = ['complex_double']
cfuncs['complex_double_from_pyobj'] = """\
static int complex_double_from_pyobj(complex_double* v,PyObject *obj,const char *errmess) {
-\tPy_complex c;
-\tif (PyComplex_Check(obj)) {
-\t\tc=PyComplex_AsCComplex(obj);
-\t\t(*v).r=c.real, (*v).i=c.imag;
-\t\treturn 1;
-\t}
-\tif (PyArray_IsScalar(obj, ComplexFloating)) {
-\t\tif (PyArray_IsScalar(obj, CFloat)) {
-\t\t\tnpy_cfloat new;
-\t\t\tPyArray_ScalarAsCtype(obj, &new);
-\t\t\t(*v).r = (double)new.real;
-\t\t\t(*v).i = (double)new.imag;
-\t\t}
-\t\telse if (PyArray_IsScalar(obj, CLongDouble)) {
-\t\t\tnpy_clongdouble new;
-\t\t\tPyArray_ScalarAsCtype(obj, &new);
-\t\t\t(*v).r = (double)new.real;
-\t\t\t(*v).i = (double)new.imag;
-\t\t}
-\t\telse { /* if (PyArray_IsScalar(obj, CDouble)) */
-\t\t\tPyArray_ScalarAsCtype(obj, v);
-\t\t}
-\t\treturn 1;
-\t}
-\tif (PyArray_CheckScalar(obj)) { /* 0-dim array or still array scalar */
-\t\tPyObject *arr;
-\t\tif (PyArray_Check(obj)) {
-\t\t\tarr = PyArray_Cast((PyArrayObject *)obj, NPY_CDOUBLE);
-\t\t}
-\t\telse {
-\t\t\tarr = PyArray_FromScalar(obj, PyArray_DescrFromType(NPY_CDOUBLE));
-\t\t}
-\t\tif (arr==NULL) return 0;
-\t\t(*v).r = ((npy_cdouble *)PyArray_DATA(arr))->real;
-\t\t(*v).i = ((npy_cdouble *)PyArray_DATA(arr))->imag;
-\t\treturn 1;
-\t}
-\t/* Python does not provide PyNumber_Complex function :-( */
-\t(*v).i=0.0;
-\tif (PyFloat_Check(obj)) {
+ Py_complex c;
+ if (PyComplex_Check(obj)) {
+ c=PyComplex_AsCComplex(obj);
+ (*v).r=c.real, (*v).i=c.imag;
+ return 1;
+ }
+ if (PyArray_IsScalar(obj, ComplexFloating)) {
+ if (PyArray_IsScalar(obj, CFloat)) {
+ npy_cfloat new;
+ PyArray_ScalarAsCtype(obj, &new);
+ (*v).r = (double)new.real;
+ (*v).i = (double)new.imag;
+ }
+ else if (PyArray_IsScalar(obj, CLongDouble)) {
+ npy_clongdouble new;
+ PyArray_ScalarAsCtype(obj, &new);
+ (*v).r = (double)new.real;
+ (*v).i = (double)new.imag;
+ }
+ else { /* if (PyArray_IsScalar(obj, CDouble)) */
+ PyArray_ScalarAsCtype(obj, v);
+ }
+ return 1;
+ }
+ if (PyArray_CheckScalar(obj)) { /* 0-dim array or still array scalar */
+ PyObject *arr;
+ if (PyArray_Check(obj)) {
+ arr = PyArray_Cast((PyArrayObject *)obj, NPY_CDOUBLE);
+ }
+ else {
+ arr = PyArray_FromScalar(obj, PyArray_DescrFromType(NPY_CDOUBLE));
+ }
+ if (arr==NULL) return 0;
+ (*v).r = ((npy_cdouble *)PyArray_DATA(arr))->real;
+ (*v).i = ((npy_cdouble *)PyArray_DATA(arr))->imag;
+ return 1;
+ }
+ /* Python does not provide PyNumber_Complex function :-( */
+ (*v).i=0.0;
+ if (PyFloat_Check(obj)) {
#ifdef __sgi
-\t\t(*v).r = PyFloat_AsDouble(obj);
+ (*v).r = PyFloat_AsDouble(obj);
#else
-\t\t(*v).r = PyFloat_AS_DOUBLE(obj);
+ (*v).r = PyFloat_AS_DOUBLE(obj);
#endif
-\t\treturn 1;
-\t}
-\tif (PyInt_Check(obj)) {
-\t\t(*v).r = (double)PyInt_AS_LONG(obj);
-\t\treturn 1;
-\t}
-\tif (PyLong_Check(obj)) {
-\t\t(*v).r = PyLong_AsDouble(obj);
-\t\treturn (!PyErr_Occurred());
-\t}
-\tif (PySequence_Check(obj) && !(PyString_Check(obj) || PyUnicode_Check(obj))) {
-\t\tPyObject *tmp = PySequence_GetItem(obj,0);
-\t\tif (tmp) {
-\t\t\tif (complex_double_from_pyobj(v,tmp,errmess)) {
-\t\t\t\tPy_DECREF(tmp);
-\t\t\t\treturn 1;
-\t\t\t}
-\t\t\tPy_DECREF(tmp);
-\t\t}
-\t}
-\t{
-\t\tPyObject* err = PyErr_Occurred();
-\t\tif (err==NULL)
-\t\t\terr = PyExc_TypeError;
-\t\tPyErr_SetString(err,errmess);
-\t}
-\treturn 0;
+ return 1;
+ }
+ if (PyInt_Check(obj)) {
+ (*v).r = (double)PyInt_AS_LONG(obj);
+ return 1;
+ }
+ if (PyLong_Check(obj)) {
+ (*v).r = PyLong_AsDouble(obj);
+ return (!PyErr_Occurred());
+ }
+ if (PySequence_Check(obj) && !(PyString_Check(obj) || PyUnicode_Check(obj))) {
+ PyObject *tmp = PySequence_GetItem(obj,0);
+ if (tmp) {
+ if (complex_double_from_pyobj(v,tmp,errmess)) {
+ Py_DECREF(tmp);
+ return 1;
+ }
+ Py_DECREF(tmp);
+ }
+ }
+ {
+ PyObject* err = PyErr_Occurred();
+ if (err==NULL)
+ err = PyExc_TypeError;
+ PyErr_SetString(err,errmess);
+ }
+ return 0;
}
"""
needs['complex_float_from_pyobj'] = [
'complex_float', 'complex_double_from_pyobj']
cfuncs['complex_float_from_pyobj'] = """\
static int complex_float_from_pyobj(complex_float* v,PyObject *obj,const char *errmess) {
-\tcomplex_double cd={0.0,0.0};
-\tif (complex_double_from_pyobj(&cd,obj,errmess)) {
-\t\t(*v).r = (float)cd.r;
-\t\t(*v).i = (float)cd.i;
-\t\treturn 1;
-\t}
-\treturn 0;
+ complex_double cd={0.0,0.0};
+ if (complex_double_from_pyobj(&cd,obj,errmess)) {
+ (*v).r = (float)cd.r;
+ (*v).i = (float)cd.i;
+ return 1;
+ }
+ return 0;
}
"""
needs['try_pyarr_from_char'] = ['pyobj_from_char1', 'TRYPYARRAYTEMPLATE']
cfuncs[
- 'try_pyarr_from_char'] = 'static int try_pyarr_from_char(PyObject* obj,char* v) {\n\tTRYPYARRAYTEMPLATE(char,\'c\');\n}\n'
+ 'try_pyarr_from_char'] = 'static int try_pyarr_from_char(PyObject* obj,char* v) {\n TRYPYARRAYTEMPLATE(char,\'c\');\n}\n'
needs['try_pyarr_from_signed_char'] = ['TRYPYARRAYTEMPLATE', 'unsigned_char']
cfuncs[
- 'try_pyarr_from_unsigned_char'] = 'static int try_pyarr_from_unsigned_char(PyObject* obj,unsigned_char* v) {\n\tTRYPYARRAYTEMPLATE(unsigned_char,\'b\');\n}\n'
+ 'try_pyarr_from_unsigned_char'] = 'static int try_pyarr_from_unsigned_char(PyObject* obj,unsigned_char* v) {\n TRYPYARRAYTEMPLATE(unsigned_char,\'b\');\n}\n'
needs['try_pyarr_from_signed_char'] = ['TRYPYARRAYTEMPLATE', 'signed_char']
cfuncs[
- 'try_pyarr_from_signed_char'] = 'static int try_pyarr_from_signed_char(PyObject* obj,signed_char* v) {\n\tTRYPYARRAYTEMPLATE(signed_char,\'1\');\n}\n'
+ 'try_pyarr_from_signed_char'] = 'static int try_pyarr_from_signed_char(PyObject* obj,signed_char* v) {\n TRYPYARRAYTEMPLATE(signed_char,\'1\');\n}\n'
needs['try_pyarr_from_short'] = ['pyobj_from_short1', 'TRYPYARRAYTEMPLATE']
cfuncs[
- 'try_pyarr_from_short'] = 'static int try_pyarr_from_short(PyObject* obj,short* v) {\n\tTRYPYARRAYTEMPLATE(short,\'s\');\n}\n'
+ 'try_pyarr_from_short'] = 'static int try_pyarr_from_short(PyObject* obj,short* v) {\n TRYPYARRAYTEMPLATE(short,\'s\');\n}\n'
needs['try_pyarr_from_int'] = ['pyobj_from_int1', 'TRYPYARRAYTEMPLATE']
cfuncs[
- 'try_pyarr_from_int'] = 'static int try_pyarr_from_int(PyObject* obj,int* v) {\n\tTRYPYARRAYTEMPLATE(int,\'i\');\n}\n'
+ 'try_pyarr_from_int'] = 'static int try_pyarr_from_int(PyObject* obj,int* v) {\n TRYPYARRAYTEMPLATE(int,\'i\');\n}\n'
needs['try_pyarr_from_long'] = ['pyobj_from_long1', 'TRYPYARRAYTEMPLATE']
cfuncs[
- 'try_pyarr_from_long'] = 'static int try_pyarr_from_long(PyObject* obj,long* v) {\n\tTRYPYARRAYTEMPLATE(long,\'l\');\n}\n'
+ 'try_pyarr_from_long'] = 'static int try_pyarr_from_long(PyObject* obj,long* v) {\n TRYPYARRAYTEMPLATE(long,\'l\');\n}\n'
needs['try_pyarr_from_long_long'] = [
'pyobj_from_long_long1', 'TRYPYARRAYTEMPLATE', 'long_long']
cfuncs[
- 'try_pyarr_from_long_long'] = 'static int try_pyarr_from_long_long(PyObject* obj,long_long* v) {\n\tTRYPYARRAYTEMPLATE(long_long,\'L\');\n}\n'
+ 'try_pyarr_from_long_long'] = 'static int try_pyarr_from_long_long(PyObject* obj,long_long* v) {\n TRYPYARRAYTEMPLATE(long_long,\'L\');\n}\n'
needs['try_pyarr_from_float'] = ['pyobj_from_float1', 'TRYPYARRAYTEMPLATE']
cfuncs[
- 'try_pyarr_from_float'] = 'static int try_pyarr_from_float(PyObject* obj,float* v) {\n\tTRYPYARRAYTEMPLATE(float,\'f\');\n}\n'
+ 'try_pyarr_from_float'] = 'static int try_pyarr_from_float(PyObject* obj,float* v) {\n TRYPYARRAYTEMPLATE(float,\'f\');\n}\n'
needs['try_pyarr_from_double'] = ['pyobj_from_double1', 'TRYPYARRAYTEMPLATE']
cfuncs[
- 'try_pyarr_from_double'] = 'static int try_pyarr_from_double(PyObject* obj,double* v) {\n\tTRYPYARRAYTEMPLATE(double,\'d\');\n}\n'
+ 'try_pyarr_from_double'] = 'static int try_pyarr_from_double(PyObject* obj,double* v) {\n TRYPYARRAYTEMPLATE(double,\'d\');\n}\n'
needs['try_pyarr_from_complex_float'] = [
'pyobj_from_complex_float1', 'TRYCOMPLEXPYARRAYTEMPLATE', 'complex_float']
cfuncs[
- 'try_pyarr_from_complex_float'] = 'static int try_pyarr_from_complex_float(PyObject* obj,complex_float* v) {\n\tTRYCOMPLEXPYARRAYTEMPLATE(float,\'F\');\n}\n'
+ 'try_pyarr_from_complex_float'] = 'static int try_pyarr_from_complex_float(PyObject* obj,complex_float* v) {\n TRYCOMPLEXPYARRAYTEMPLATE(float,\'F\');\n}\n'
needs['try_pyarr_from_complex_double'] = [
'pyobj_from_complex_double1', 'TRYCOMPLEXPYARRAYTEMPLATE', 'complex_double']
cfuncs[
- 'try_pyarr_from_complex_double'] = 'static int try_pyarr_from_complex_double(PyObject* obj,complex_double* v) {\n\tTRYCOMPLEXPYARRAYTEMPLATE(double,\'D\');\n}\n'
+ 'try_pyarr_from_complex_double'] = 'static int try_pyarr_from_complex_double(PyObject* obj,complex_double* v) {\n TRYCOMPLEXPYARRAYTEMPLATE(double,\'D\');\n}\n'
needs['create_cb_arglist'] = ['CFUNCSMESS', 'PRINTPYOBJERR', 'MINMAX']
cfuncs['create_cb_arglist'] = """\
static int create_cb_arglist(PyObject* fun,PyTupleObject* xa,const int maxnofargs,const int nofoptargs,int *nofargs,PyTupleObject **args,const char *errmess) {
-\tPyObject *tmp = NULL;
-\tPyObject *tmp_fun = NULL;
-\tint tot,opt,ext,siz,i,di=0;
-\tCFUNCSMESS(\"create_cb_arglist\\n\");
-\ttot=opt=ext=siz=0;
-\t/* Get the total number of arguments */
-\tif (PyFunction_Check(fun))
-\t\ttmp_fun = fun;
-\telse {
-\t\tdi = 1;
-\t\tif (PyObject_HasAttrString(fun,\"im_func\")) {
-\t\t\ttmp_fun = PyObject_GetAttrString(fun,\"im_func\");
-\t\t}
-\t\telse if (PyObject_HasAttrString(fun,\"__call__\")) {
-\t\t\ttmp = PyObject_GetAttrString(fun,\"__call__\");
-\t\t\tif (PyObject_HasAttrString(tmp,\"im_func\"))
-\t\t\t\ttmp_fun = PyObject_GetAttrString(tmp,\"im_func\");
-\t\t\telse {
-\t\t\t\ttmp_fun = fun; /* built-in function */
-\t\t\t\ttot = maxnofargs;
-\t\t\t\tif (xa != NULL)
-\t\t\t\t\ttot += PyTuple_Size((PyObject *)xa);
-\t\t\t}
-\t\t\tPy_XDECREF(tmp);
-\t\t}
-\t\telse if (PyFortran_Check(fun) || PyFortran_Check1(fun)) {
-\t\t\ttot = maxnofargs;
-\t\t\tif (xa != NULL)
-\t\t\t\ttot += PyTuple_Size((PyObject *)xa);
-\t\t\ttmp_fun = fun;
-\t\t}
-\t\telse if (F2PyCapsule_Check(fun)) {
-\t\t\ttot = maxnofargs;
-\t\t\tif (xa != NULL)
-\t\t\t\text = PyTuple_Size((PyObject *)xa);
-\t\t\tif(ext>0) {
-\t\t\t\tfprintf(stderr,\"extra arguments tuple cannot be used with CObject call-back\\n\");
-\t\t\t\tgoto capi_fail;
-\t\t\t}
-\t\t\ttmp_fun = fun;
-\t\t}
-\t}
+ PyObject *tmp = NULL;
+ PyObject *tmp_fun = NULL;
+ int tot,opt,ext,siz,i,di=0;
+ CFUNCSMESS(\"create_cb_arglist\\n\");
+ tot=opt=ext=siz=0;
+ /* Get the total number of arguments */
+ if (PyFunction_Check(fun))
+ tmp_fun = fun;
+ else {
+ di = 1;
+ if (PyObject_HasAttrString(fun,\"im_func\")) {
+ tmp_fun = PyObject_GetAttrString(fun,\"im_func\");
+ }
+ else if (PyObject_HasAttrString(fun,\"__call__\")) {
+ tmp = PyObject_GetAttrString(fun,\"__call__\");
+ if (PyObject_HasAttrString(tmp,\"im_func\"))
+ tmp_fun = PyObject_GetAttrString(tmp,\"im_func\");
+ else {
+ tmp_fun = fun; /* built-in function */
+ tot = maxnofargs;
+ if (xa != NULL)
+ tot += PyTuple_Size((PyObject *)xa);
+ }
+ Py_XDECREF(tmp);
+ }
+ else if (PyFortran_Check(fun) || PyFortran_Check1(fun)) {
+ tot = maxnofargs;
+ if (xa != NULL)
+ tot += PyTuple_Size((PyObject *)xa);
+ tmp_fun = fun;
+ }
+ else if (F2PyCapsule_Check(fun)) {
+ tot = maxnofargs;
+ if (xa != NULL)
+ ext = PyTuple_Size((PyObject *)xa);
+ if(ext>0) {
+ fprintf(stderr,\"extra arguments tuple cannot be used with CObject call-back\\n\");
+ goto capi_fail;
+ }
+ tmp_fun = fun;
+ }
+ }
if (tmp_fun==NULL) {
fprintf(stderr,\"Call-back argument must be function|instance|instance.__call__|f2py-function but got %s.\\n\",(fun==NULL?\"NULL\":Py_TYPE(fun)->tp_name));
goto capi_fail;
}
#if PY_VERSION_HEX >= 0x03000000
-\tif (PyObject_HasAttrString(tmp_fun,\"__code__\")) {
-\t\tif (PyObject_HasAttrString(tmp = PyObject_GetAttrString(tmp_fun,\"__code__\"),\"co_argcount\"))
+ if (PyObject_HasAttrString(tmp_fun,\"__code__\")) {
+ if (PyObject_HasAttrString(tmp = PyObject_GetAttrString(tmp_fun,\"__code__\"),\"co_argcount\"))
#else
-\tif (PyObject_HasAttrString(tmp_fun,\"func_code\")) {
-\t\tif (PyObject_HasAttrString(tmp = PyObject_GetAttrString(tmp_fun,\"func_code\"),\"co_argcount\"))
+ if (PyObject_HasAttrString(tmp_fun,\"func_code\")) {
+ if (PyObject_HasAttrString(tmp = PyObject_GetAttrString(tmp_fun,\"func_code\"),\"co_argcount\"))
#endif
-\t\t\ttot = PyInt_AsLong(PyObject_GetAttrString(tmp,\"co_argcount\")) - di;
-\t\tPy_XDECREF(tmp);
-\t}
-\t/* Get the number of optional arguments */
+ tot = PyInt_AsLong(PyObject_GetAttrString(tmp,\"co_argcount\")) - di;
+ Py_XDECREF(tmp);
+ }
+ /* Get the number of optional arguments */
#if PY_VERSION_HEX >= 0x03000000
-\tif (PyObject_HasAttrString(tmp_fun,\"__defaults__\")) {
-\t\tif (PyTuple_Check(tmp = PyObject_GetAttrString(tmp_fun,\"__defaults__\")))
+ if (PyObject_HasAttrString(tmp_fun,\"__defaults__\")) {
+ if (PyTuple_Check(tmp = PyObject_GetAttrString(tmp_fun,\"__defaults__\")))
#else
-\tif (PyObject_HasAttrString(tmp_fun,\"func_defaults\")) {
-\t\tif (PyTuple_Check(tmp = PyObject_GetAttrString(tmp_fun,\"func_defaults\")))
+ if (PyObject_HasAttrString(tmp_fun,\"func_defaults\")) {
+ if (PyTuple_Check(tmp = PyObject_GetAttrString(tmp_fun,\"func_defaults\")))
#endif
-\t\t\topt = PyTuple_Size(tmp);
-\t\tPy_XDECREF(tmp);
-\t}
-\t/* Get the number of extra arguments */
-\tif (xa != NULL)
-\t\text = PyTuple_Size((PyObject *)xa);
-\t/* Calculate the size of call-backs argument list */
-\tsiz = MIN(maxnofargs+ext,tot);
-\t*nofargs = MAX(0,siz-ext);
+ opt = PyTuple_Size(tmp);
+ Py_XDECREF(tmp);
+ }
+ /* Get the number of extra arguments */
+ if (xa != NULL)
+ ext = PyTuple_Size((PyObject *)xa);
+ /* Calculate the size of call-backs argument list */
+ siz = MIN(maxnofargs+ext,tot);
+ *nofargs = MAX(0,siz-ext);
#ifdef DEBUGCFUNCS
-\tfprintf(stderr,\"debug-capi:create_cb_arglist:maxnofargs(-nofoptargs),tot,opt,ext,siz,nofargs=%d(-%d),%d,%d,%d,%d,%d\\n\",maxnofargs,nofoptargs,tot,opt,ext,siz,*nofargs);
+ fprintf(stderr,\"debug-capi:create_cb_arglist:maxnofargs(-nofoptargs),tot,opt,ext,siz,nofargs=%d(-%d),%d,%d,%d,%d,%d\\n\",maxnofargs,nofoptargs,tot,opt,ext,siz,*nofargs);
#endif
-\tif (siz<tot-opt) {
-\t\tfprintf(stderr,\"create_cb_arglist: Failed to build argument list (siz) with enough arguments (tot-opt) required by user-supplied function (siz,tot,opt=%d,%d,%d).\\n\",siz,tot,opt);
-\t\tgoto capi_fail;
-\t}
-\t/* Initialize argument list */
-\t*args = (PyTupleObject *)PyTuple_New(siz);
-\tfor (i=0;i<*nofargs;i++) {
-\t\tPy_INCREF(Py_None);
-\t\tPyTuple_SET_ITEM((PyObject *)(*args),i,Py_None);
-\t}
-\tif (xa != NULL)
-\t\tfor (i=(*nofargs);i<siz;i++) {
-\t\t\ttmp = PyTuple_GetItem((PyObject *)xa,i-(*nofargs));
-\t\t\tPy_INCREF(tmp);
-\t\t\tPyTuple_SET_ITEM(*args,i,tmp);
-\t\t}
-\tCFUNCSMESS(\"create_cb_arglist-end\\n\");
-\treturn 1;
+ if (siz<tot-opt) {
+ fprintf(stderr,\"create_cb_arglist: Failed to build argument list (siz) with enough arguments (tot-opt) required by user-supplied function (siz,tot,opt=%d,%d,%d).\\n\",siz,tot,opt);
+ goto capi_fail;
+ }
+ /* Initialize argument list */
+ *args = (PyTupleObject *)PyTuple_New(siz);
+ for (i=0;i<*nofargs;i++) {
+ Py_INCREF(Py_None);
+ PyTuple_SET_ITEM((PyObject *)(*args),i,Py_None);
+ }
+ if (xa != NULL)
+ for (i=(*nofargs);i<siz;i++) {
+ tmp = PyTuple_GetItem((PyObject *)xa,i-(*nofargs));
+ Py_INCREF(tmp);
+ PyTuple_SET_ITEM(*args,i,tmp);
+ }
+ CFUNCSMESS(\"create_cb_arglist-end\\n\");
+ return 1;
capi_fail:
-\tif ((PyErr_Occurred())==NULL)
-\t\tPyErr_SetString(#modulename#_error,errmess);
-\treturn 0;
+ if ((PyErr_Occurred())==NULL)
+ PyErr_SetString(#modulename#_error,errmess);
+ return 0;
}
"""
diff --git a/numpy/f2py/crackfortran.py b/numpy/f2py/crackfortran.py
index 24f9434c4..677f4bae3 100755
--- a/numpy/f2py/crackfortran.py
+++ b/numpy/f2py/crackfortran.py
@@ -308,22 +308,21 @@ def is_free_format(file):
# f90 allows both fixed and free format, assuming fixed unless
# signs of free format are detected.
result = 0
- f = open(file, 'r')
- line = f.readline()
- n = 15 # the number of non-comment lines to scan for hints
- if _has_f_header(line):
- n = 0
- elif _has_f90_header(line):
- n = 0
- result = 1
- while n > 0 and line:
- if line[0] != '!' and line.strip():
- n -= 1
- if (line[0] != '\t' and _free_f90_start(line[:5])) or line[-2:-1] == '&':
- result = 1
- break
+ with open(file, 'r') as f:
line = f.readline()
- f.close()
+ n = 15 # the number of non-comment lines to scan for hints
+ if _has_f_header(line):
+ n = 0
+ elif _has_f90_header(line):
+ n = 0
+ result = 1
+ while n > 0 and line:
+ if line[0] != '!' and line.strip():
+ n -= 1
+ if (line[0] != '\t' and _free_f90_start(line[:5])) or line[-2:-1] == '&':
+ result = 1
+ break
+ line = f.readline()
return result
@@ -1036,13 +1035,13 @@ def analyzeline(m, case, line):
try:
del groupcache[groupcounter]['vars'][name][
groupcache[groupcounter]['vars'][name]['attrspec'].index('external')]
- except:
+ except Exception:
pass
if block in ['function', 'subroutine']: # set global attributes
try:
groupcache[groupcounter]['vars'][name] = appenddecl(
groupcache[groupcounter]['vars'][name], groupcache[groupcounter - 2]['vars'][''])
- except:
+ except Exception:
pass
if case == 'callfun': # return type
if result and result in groupcache[groupcounter]['vars']:
@@ -1052,7 +1051,7 @@ def analyzeline(m, case, line):
# if groupcounter>1: # name is interfaced
try:
groupcache[groupcounter - 2]['interfaced'].append(name)
- except:
+ except Exception:
pass
if block == 'function':
t = typespattern[0].match(m.group('before') + ' ' + name)
@@ -1174,7 +1173,7 @@ def analyzeline(m, case, line):
for e in markoutercomma(ll).split('@,@'):
try:
k, initexpr = [x.strip() for x in e.split('=')]
- except:
+ except Exception:
outmess(
'analyzeline: could not extract name,expr in parameter statement "%s" of "%s"\n' % (e, ll))
continue
@@ -1251,7 +1250,7 @@ def analyzeline(m, case, line):
if '-' in r:
try:
begc, endc = [x.strip() for x in r.split('-')]
- except:
+ except Exception:
outmess(
'analyzeline: expected "<char>-<char>" instead of "%s" in range list of implicit statement\n' % r)
continue
@@ -1790,7 +1789,7 @@ def setmesstext(block):
try:
filepositiontext = 'In: %s:%s\n' % (block['from'], block['name'])
- except:
+ except Exception:
pass
@@ -2013,7 +2012,7 @@ def analyzecommon(block):
if m.group('dims'):
dims = [x.strip()
for x in markoutercomma(m.group('dims')).split('@,@')]
- n = m.group('name').strip()
+ n = rmbadname1(m.group('name').strip())
if n in block['vars']:
if 'attrspec' in block['vars'][n]:
block['vars'][n]['attrspec'].append(
@@ -2108,7 +2107,7 @@ def getlincoef(e, xset): # e = a*x+b ; x in xset
try:
c = int(myeval(e, {}, {}))
return 0, c, None
- except:
+ except Exception:
pass
if getlincoef_re_1.match(e):
return 1, 0, e
@@ -2150,7 +2149,7 @@ def getlincoef(e, xset): # e = a*x+b ; x in xset
c2 = myeval(ee, {}, {})
if (a * 0.5 + b == c and a * 1.5 + b == c2):
return a, b, x
- except:
+ except Exception:
pass
break
return None, None, None
@@ -2162,11 +2161,11 @@ def getarrlen(dl, args, star='*'):
edl = []
try:
edl.append(myeval(dl[0], {}, {}))
- except:
+ except Exception:
edl.append(dl[0])
try:
edl.append(myeval(dl[1], {}, {}))
- except:
+ except Exception:
edl.append(dl[1])
if isinstance(edl[0], int):
p1 = 1 - edl[0]
@@ -2186,7 +2185,7 @@ def getarrlen(dl, args, star='*'):
d = '%s-(%s)+1' % (dl[1], dl[0])
try:
return repr(myeval(d, {}, {})), None, None
- except:
+ except Exception:
pass
d1, d2 = getlincoef(dl[0], args), getlincoef(dl[1], args)
if None not in [d1[0], d2[0]]:
@@ -2579,7 +2578,7 @@ def analyzevars(block):
l = vars[n]['charselector']['len']
try:
l = str(eval(l, {}, params))
- except:
+ except Exception:
pass
vars[n]['charselector']['len'] = l
@@ -2588,7 +2587,7 @@ def analyzevars(block):
l = vars[n]['kindselector']['kind']
try:
l = str(eval(l, {}, params))
- except:
+ except Exception:
pass
vars[n]['kindselector']['kind'] = l
@@ -2819,7 +2818,7 @@ def analyzevars(block):
try:
kindselect['kind'] = eval(
kindselect['kind'], {}, params)
- except:
+ except Exception:
pass
vars[n]['kindselector'] = kindselect
if charselect:
@@ -3230,7 +3229,7 @@ def vars2fortran(block, vars, args, tab='', as_interface=False):
try:
v = eval(v)
v = '(%s,%s)' % (v.real, v.imag)
- except:
+ except Exception:
pass
vardef = '%s :: %s=%s' % (vardef, a, v)
else:
@@ -3335,8 +3334,7 @@ if __name__ == "__main__":
if pyffilename:
outmess('Writing fortran code to file %s\n' % repr(pyffilename), 0)
pyf = crack2fortran(postlist)
- f = open(pyffilename, 'w')
- f.write(pyf)
- f.close()
+ with open(pyffilename, 'w') as f:
+ f.write(pyf)
if showblocklist:
show(postlist)
diff --git a/numpy/f2py/f2py_testing.py b/numpy/f2py/f2py_testing.py
index c7041fe25..f5d5fa63d 100644
--- a/numpy/f2py/f2py_testing.py
+++ b/numpy/f2py/f2py_testing.py
@@ -3,7 +3,7 @@ from __future__ import division, absolute_import, print_function
import sys
import re
-from numpy.testing.utils import jiffies, memusage
+from numpy.testing import jiffies, memusage
def cmdline():
diff --git a/numpy/f2py/src/fortranobject.c b/numpy/f2py/src/fortranobject.c
index 8c8b4ae5d..96b08ea18 100644
--- a/numpy/f2py/src/fortranobject.c
+++ b/numpy/f2py/src/fortranobject.c
@@ -130,8 +130,7 @@ format_def(char *buf, Py_ssize_t size, FortranDataDef def)
return -1;
}
- p[size] = ')';
- p++;
+ *p++ = ')';
size--;
if (def.data == NULL) {
@@ -591,21 +590,21 @@ static void f2py_report_on_array_copy_fromany(void) {
* $Id: fortranobject.c,v 1.52 2005/07/11 07:44:20 pearu Exp $
*/
+static int check_and_fix_dimensions(const PyArrayObject* arr,
+ const int rank,
+ npy_intp *dims);
+
static int
-count_nonpos(const int rank,
- const npy_intp *dims) {
+count_negative_dimensions(const int rank,
+ const npy_intp *dims) {
int i=0,r=0;
while (i<rank) {
- if (dims[i] <= 0) ++r;
+ if (dims[i] < 0) ++r;
++i;
}
return r;
}
-static int check_and_fix_dimensions(const PyArrayObject* arr,
- const int rank,
- npy_intp *dims);
-
#ifdef DEBUG_COPY_ND_ARRAY
void dump_dims(int rank, npy_intp* dims) {
int i;
@@ -679,7 +678,7 @@ PyArrayObject* array_from_pyobj(const int type_num,
|| ((intent & F2PY_OPTIONAL) && (obj==Py_None))
) {
/* intent(cache), optional, intent(hide) */
- if (count_nonpos(rank,dims)) {
+ if (count_negative_dimensions(rank,dims) > 0) {
int i;
strcpy(mess, "failed to create intent(cache|hide)|optional array"
"-- must have defined dimensions but got (");
@@ -720,8 +719,8 @@ PyArrayObject* array_from_pyobj(const int type_num,
/* intent(cache) */
if (PyArray_ISONESEGMENT(arr)
&& PyArray_ITEMSIZE(arr)>=elsize) {
- if (check_and_fix_dimensions(arr,rank,dims)) {
- return NULL; /*XXX: set exception */
+ if (check_and_fix_dimensions(arr, rank, dims)) {
+ return NULL;
}
if (intent & F2PY_INTENT_OUT)
Py_INCREF(arr);
@@ -742,8 +741,8 @@ PyArrayObject* array_from_pyobj(const int type_num,
/* here we have always intent(in) or intent(inout) or intent(inplace) */
- if (check_and_fix_dimensions(arr,rank,dims)) {
- return NULL; /*XXX: set exception */
+ if (check_and_fix_dimensions(arr, rank, dims)) {
+ return NULL;
}
/*
printf("intent alignement=%d\n", F2PY_GET_ALIGNMENT(intent));
@@ -843,8 +842,9 @@ PyArrayObject* array_from_pyobj(const int type_num,
| NPY_ARRAY_FORCECAST, NULL);
if (arr==NULL)
return NULL;
- if (check_and_fix_dimensions(arr,rank,dims))
- return NULL; /*XXX: set exception */
+ if (check_and_fix_dimensions(arr, rank, dims)) {
+ return NULL;
+ }
return arr;
}
@@ -855,11 +855,16 @@ PyArrayObject* array_from_pyobj(const int type_num,
/*****************************************/
static
-int check_and_fix_dimensions(const PyArrayObject* arr,const int rank,npy_intp *dims) {
+int check_and_fix_dimensions(const PyArrayObject* arr, const int rank, npy_intp *dims)
+{
/*
- This function fills in blanks (that are -1\'s) in dims list using
+ This function fills in blanks (that are -1's) in dims list using
the dimensions from arr. It also checks that non-blank dims will
match with the corresponding values in arr dimensions.
+
+ Returns 0 if the function is successful.
+
+ If an error condition is detected, an exception is set and 1 is returned.
*/
const npy_intp arr_size = (PyArray_NDIM(arr))?PyArray_Size((PyObject *)arr):1;
#ifdef DEBUG_COPY_ND_ARRAY
@@ -877,9 +882,10 @@ int check_and_fix_dimensions(const PyArrayObject* arr,const int rank,npy_intp *d
d = PyArray_DIM(arr,i);
if (dims[i] >= 0) {
if (d>1 && dims[i]!=d) {
- fprintf(stderr,"%d-th dimension must be fixed to %" NPY_INTP_FMT
- " but got %" NPY_INTP_FMT "\n",
- i,dims[i], d);
+ PyErr_Format(PyExc_ValueError,
+ "%d-th dimension must be fixed to %"
+ NPY_INTP_FMT " but got %" NPY_INTP_FMT "\n",
+ i, dims[i], d);
return 1;
}
if (!dims[i]) dims[i] = 1;
@@ -890,9 +896,10 @@ int check_and_fix_dimensions(const PyArrayObject* arr,const int rank,npy_intp *d
}
for(i=PyArray_NDIM(arr);i<rank;++i)
if (dims[i]>1) {
- fprintf(stderr,"%d-th dimension must be %" NPY_INTP_FMT
- " but got 0 (not defined).\n",
- i,dims[i]);
+ PyErr_Format(PyExc_ValueError,
+ "%d-th dimension must be %" NPY_INTP_FMT
+ " but got 0 (not defined).\n",
+ i, dims[i]);
return 1;
} else if (free_axe<0)
free_axe = i;
@@ -903,9 +910,11 @@ int check_and_fix_dimensions(const PyArrayObject* arr,const int rank,npy_intp *d
new_size *= dims[free_axe];
}
if (new_size != arr_size) {
- fprintf(stderr,"unexpected array size: new_size=%" NPY_INTP_FMT
- ", got array with arr_size=%" NPY_INTP_FMT " (maybe too many free"
- " indices)\n", new_size,arr_size);
+ PyErr_Format(PyExc_ValueError,
+ "unexpected array size: new_size=%" NPY_INTP_FMT
+ ", got array with arr_size=%" NPY_INTP_FMT
+ " (maybe too many free indices)\n",
+ new_size, arr_size);
return 1;
}
} else if (rank==PyArray_NDIM(arr)) {
@@ -916,9 +925,10 @@ int check_and_fix_dimensions(const PyArrayObject* arr,const int rank,npy_intp *d
d = PyArray_DIM(arr,i);
if (dims[i]>=0) {
if (d > 1 && d!=dims[i]) {
- fprintf(stderr,"%d-th dimension must be fixed to %" NPY_INTP_FMT
- " but got %" NPY_INTP_FMT "\n",
- i,dims[i],d);
+ PyErr_Format(PyExc_ValueError,
+ "%d-th dimension must be fixed to %"
+ NPY_INTP_FMT " but got %" NPY_INTP_FMT "\n",
+ i, dims[i], d);
return 1;
}
if (!dims[i]) dims[i] = 1;
@@ -926,8 +936,10 @@ int check_and_fix_dimensions(const PyArrayObject* arr,const int rank,npy_intp *d
new_size *= dims[i];
}
if (new_size != arr_size) {
- fprintf(stderr,"unexpected array size: new_size=%" NPY_INTP_FMT
- ", got array with arr_size=%" NPY_INTP_FMT "\n", new_size,arr_size);
+ PyErr_Format(PyExc_ValueError,
+ "unexpected array size: new_size=%" NPY_INTP_FMT
+ ", got array with arr_size=%" NPY_INTP_FMT "\n",
+ new_size, arr_size);
return 1;
}
} else { /* [[1,2]] -> [[1],[2]] */
@@ -939,8 +951,10 @@ int check_and_fix_dimensions(const PyArrayObject* arr,const int rank,npy_intp *d
if (PyArray_DIM(arr,i)>1) ++effrank;
if (dims[rank-1]>=0)
if (effrank>rank) {
- fprintf(stderr,"too many axes: %d (effrank=%d), expected rank=%d\n",
- PyArray_NDIM(arr),effrank,rank);
+ PyErr_Format(PyExc_ValueError,
+ "too many axes: %d (effrank=%d), "
+ "expected rank=%d\n",
+ PyArray_NDIM(arr), effrank, rank);
return 1;
}
@@ -950,9 +964,11 @@ int check_and_fix_dimensions(const PyArrayObject* arr,const int rank,npy_intp *d
else d = PyArray_DIM(arr,j++);
if (dims[i]>=0) {
if (d>1 && d!=dims[i]) {
- fprintf(stderr,"%d-th dimension must be fixed to %" NPY_INTP_FMT
- " but got %" NPY_INTP_FMT " (real index=%d)\n",
- i,dims[i],d,j-1);
+ PyErr_Format(PyExc_ValueError,
+ "%d-th dimension must be fixed to %"
+ NPY_INTP_FMT " but got %" NPY_INTP_FMT
+ " (real index=%d)\n",
+ i, dims[i], d, j-1);
return 1;
}
if (!dims[i]) dims[i] = 1;
@@ -968,13 +984,28 @@ int check_and_fix_dimensions(const PyArrayObject* arr,const int rank,npy_intp *d
}
for (i=0,size=1;i<rank;++i) size *= dims[i];
if (size != arr_size) {
- fprintf(stderr,"unexpected array size: size=%" NPY_INTP_FMT ", arr_size=%" NPY_INTP_FMT
- ", rank=%d, effrank=%d, arr.nd=%d, dims=[",
- size,arr_size,rank,effrank,PyArray_NDIM(arr));
- for (i=0;i<rank;++i) fprintf(stderr," %" NPY_INTP_FMT,dims[i]);
- fprintf(stderr," ], arr.dims=[");
- for (i=0;i<PyArray_NDIM(arr);++i) fprintf(stderr," %" NPY_INTP_FMT,PyArray_DIM(arr,i));
- fprintf(stderr," ]\n");
+ char msg[200];
+ int len;
+ snprintf(msg, sizeof(msg),
+ "unexpected array size: size=%" NPY_INTP_FMT
+ ", arr_size=%" NPY_INTP_FMT
+ ", rank=%d, effrank=%d, arr.nd=%d, dims=[",
+ size, arr_size, rank, effrank, PyArray_NDIM(arr));
+ for (i = 0; i < rank; ++i) {
+ len = strlen(msg);
+ snprintf(msg + len, sizeof(msg) - len,
+ " %" NPY_INTP_FMT, dims[i]);
+ }
+ len = strlen(msg);
+ snprintf(msg + len, sizeof(msg) - len, " ], arr.dims=[");
+ for (i = 0; i < PyArray_NDIM(arr); ++i) {
+ len = strlen(msg);
+ snprintf(msg + len, sizeof(msg) - len,
+ " %" NPY_INTP_FMT, PyArray_DIM(arr, i));
+ }
+ len = strlen(msg);
+ snprintf(msg + len, sizeof(msg) - len, " ]\n");
+ PyErr_SetString(PyExc_ValueError, msg);
return 1;
}
}
diff --git a/numpy/f2py/tests/__init__.py b/numpy/f2py/tests/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/numpy/f2py/tests/__init__.py
diff --git a/numpy/f2py/tests/src/common/block.f b/numpy/f2py/tests/src/common/block.f
new file mode 100644
index 000000000..7ea7968fe
--- /dev/null
+++ b/numpy/f2py/tests/src/common/block.f
@@ -0,0 +1,11 @@
+ SUBROUTINE INITCB
+ DOUBLE PRECISION LONG
+ CHARACTER STRING
+ INTEGER OK
+
+ COMMON /BLOCK/ LONG, STRING, OK
+ LONG = 1.0
+ STRING = '2'
+ OK = 3
+ RETURN
+ END
diff --git a/numpy/f2py/tests/test_array_from_pyobj.py b/numpy/f2py/tests/test_array_from_pyobj.py
index 48bb7c0f4..663fead6a 100644
--- a/numpy/f2py/tests/test_array_from_pyobj.py
+++ b/numpy/f2py/tests/test_array_from_pyobj.py
@@ -12,12 +12,12 @@ from numpy.testing import (
run_module_suite, assert_, assert_equal, SkipTest
)
from numpy.core.multiarray import typeinfo
-import util
+from . import util
wrap = None
-def setup():
+def setup_module():
"""
Build the required testing extension module
@@ -294,7 +294,7 @@ class Array(object):
return obj_attr[0] == self.arr_attr[0]
-class test_intent(unittest.TestCase):
+class TestIntent(object):
def test_in_out(self):
assert_equal(str(intent.in_.out), 'intent(in,out)')
@@ -305,7 +305,7 @@ class test_intent(unittest.TestCase):
assert_(not intent.in_.is_intent('c'))
-class _test_shared_memory:
+class _test_shared_memory(object):
num2seq = [1, 2]
num23seq = [[1, 2, 3], [4, 5, 6]]
@@ -578,14 +578,12 @@ class _test_shared_memory:
for t in _type_names:
exec('''\
-class test_%s_gen(unittest.TestCase,
- _test_shared_memory
- ):
- def setUp(self):
+class TestGen_%s(_test_shared_memory):
+ def setup(self):
self.type = Type(%r)
array = lambda self,dims,intent,obj: Array(Type(%r),dims,intent,obj)
''' % (t, t, t))
if __name__ == "__main__":
- setup()
+ setup_module()
run_module_suite()
diff --git a/numpy/f2py/tests/test_assumed_shape.py b/numpy/f2py/tests/test_assumed_shape.py
index 725e7f0c1..371aab755 100644
--- a/numpy/f2py/tests/test_assumed_shape.py
+++ b/numpy/f2py/tests/test_assumed_shape.py
@@ -3,7 +3,7 @@ from __future__ import division, absolute_import, print_function
import os
from numpy.testing import run_module_suite, assert_, dec
-import util
+from . import util
def _path(*a):
diff --git a/numpy/f2py/tests/test_block_docstring.py b/numpy/f2py/tests/test_block_docstring.py
new file mode 100644
index 000000000..c3f9dc856
--- /dev/null
+++ b/numpy/f2py/tests/test_block_docstring.py
@@ -0,0 +1,23 @@
+from __future__ import division, absolute_import, print_function
+
+import textwrap
+from . import util
+
+from numpy.testing import run_module_suite, assert_equal
+
+class TestBlockDocString(util.F2PyTest):
+ code = """
+ SUBROUTINE FOO()
+ INTEGER BAR(2, 3)
+
+ COMMON /BLOCK/ BAR
+ RETURN
+ END
+ """
+
+ def test_block_docstring(self):
+ expected = "'i'-array(2,3)\n"
+ assert_equal(self.module.block.__doc__, expected)
+
+if __name__ == "__main__":
+ run_module_suite()
diff --git a/numpy/f2py/tests/test_callback.py b/numpy/f2py/tests/test_callback.py
index 6824a2042..ea29043ed 100644
--- a/numpy/f2py/tests/test_callback.py
+++ b/numpy/f2py/tests/test_callback.py
@@ -5,7 +5,7 @@ import textwrap
from numpy import array
from numpy.testing import run_module_suite, assert_, assert_equal, dec
-import util
+from . import util
class TestF77Callback(util.F2PyTest):
diff --git a/numpy/f2py/tests/test_common.py b/numpy/f2py/tests/test_common.py
new file mode 100644
index 000000000..aaa35b678
--- /dev/null
+++ b/numpy/f2py/tests/test_common.py
@@ -0,0 +1,26 @@
+from __future__ import division, absolute_import, print_function
+
+import os
+
+from numpy.testing import run_module_suite, assert_array_equal, dec
+import numpy as np
+from . import util
+
+
+def _path(*a):
+ return os.path.join(*((os.path.dirname(__file__),) + a))
+
+class TestCommonBlock(util.F2PyTest):
+ sources = [_path('src', 'common', 'block.f')]
+
+ def test_common_block(self):
+ self.module.initcb()
+ assert_array_equal(self.module.block.long_bn,
+ np.array(1.0, dtype=np.float64))
+ assert_array_equal(self.module.block.string_bn,
+ np.array('2', dtype='|S1'))
+ assert_array_equal(self.module.block.ok,
+ np.array(3, dtype=np.int32))
+
+if __name__ == "__main__":
+ run_module_suite()
diff --git a/numpy/f2py/tests/test_kind.py b/numpy/f2py/tests/test_kind.py
index 2552234a1..7cfe2e977 100644
--- a/numpy/f2py/tests/test_kind.py
+++ b/numpy/f2py/tests/test_kind.py
@@ -7,7 +7,7 @@ from numpy.f2py.crackfortran import (
_selected_int_kind_func as selected_int_kind,
_selected_real_kind_func as selected_real_kind
)
-import util
+from . import util
def _path(*a):
diff --git a/numpy/f2py/tests/test_mixed.py b/numpy/f2py/tests/test_mixed.py
index 9055083bf..c145a4b23 100644
--- a/numpy/f2py/tests/test_mixed.py
+++ b/numpy/f2py/tests/test_mixed.py
@@ -4,7 +4,7 @@ import os
import textwrap
from numpy.testing import run_module_suite, assert_, assert_equal, dec
-import util
+from . import util
def _path(*a):
diff --git a/numpy/f2py/tests/test_parameter.py b/numpy/f2py/tests/test_parameter.py
index b6891756d..285b693a1 100644
--- a/numpy/f2py/tests/test_parameter.py
+++ b/numpy/f2py/tests/test_parameter.py
@@ -6,7 +6,7 @@ import math
import numpy as np
from numpy.testing import run_module_suite, dec, assert_raises, assert_equal
-import util
+from . import util
def _path(*a):
diff --git a/numpy/f2py/tests/test_regression.py b/numpy/f2py/tests/test_regression.py
index 43a8de350..c34a5781c 100644
--- a/numpy/f2py/tests/test_regression.py
+++ b/numpy/f2py/tests/test_regression.py
@@ -6,7 +6,7 @@ import math
import numpy as np
from numpy.testing import run_module_suite, dec, assert_raises, assert_equal
-import util
+from . import util
def _path(*a):
diff --git a/numpy/f2py/tests/test_return_character.py b/numpy/f2py/tests/test_return_character.py
index 7704e7d28..217b2c9dd 100644
--- a/numpy/f2py/tests/test_return_character.py
+++ b/numpy/f2py/tests/test_return_character.py
@@ -2,7 +2,7 @@ from __future__ import division, absolute_import, print_function
from numpy import array
from numpy.testing import run_module_suite, assert_, dec
-import util
+from . import util
class TestReturnCharacter(util.F2PyTest):
diff --git a/numpy/f2py/tests/test_return_complex.py b/numpy/f2py/tests/test_return_complex.py
index 88ef83e94..73ced8ed8 100644
--- a/numpy/f2py/tests/test_return_complex.py
+++ b/numpy/f2py/tests/test_return_complex.py
@@ -3,7 +3,7 @@ from __future__ import division, absolute_import, print_function
from numpy import array
from numpy.compat import long
from numpy.testing import run_module_suite, assert_, assert_raises, dec
-import util
+from . import util
class TestReturnComplex(util.F2PyTest):
diff --git a/numpy/f2py/tests/test_return_integer.py b/numpy/f2py/tests/test_return_integer.py
index 00033d698..df8fc7c97 100644
--- a/numpy/f2py/tests/test_return_integer.py
+++ b/numpy/f2py/tests/test_return_integer.py
@@ -3,7 +3,7 @@ from __future__ import division, absolute_import, print_function
from numpy import array
from numpy.compat import long
from numpy.testing import run_module_suite, assert_, assert_raises, dec
-import util
+from . import util
class TestReturnInteger(util.F2PyTest):
diff --git a/numpy/f2py/tests/test_return_logical.py b/numpy/f2py/tests/test_return_logical.py
index f88a25d7a..221dc3cbd 100644
--- a/numpy/f2py/tests/test_return_logical.py
+++ b/numpy/f2py/tests/test_return_logical.py
@@ -3,7 +3,7 @@ from __future__ import division, absolute_import, print_function
from numpy import array
from numpy.compat import long
from numpy.testing import run_module_suite, assert_, assert_raises, dec
-import util
+from . import util
class TestReturnLogical(util.F2PyTest):
diff --git a/numpy/f2py/tests/test_return_real.py b/numpy/f2py/tests/test_return_real.py
index 57aa9badf..a81549083 100644
--- a/numpy/f2py/tests/test_return_real.py
+++ b/numpy/f2py/tests/test_return_real.py
@@ -3,7 +3,7 @@ from __future__ import division, absolute_import, print_function
from numpy import array
from numpy.compat import long
from numpy.testing import run_module_suite, assert_, assert_raises, dec
-import util
+from . import util
class TestReturnReal(util.F2PyTest):
diff --git a/numpy/f2py/tests/test_size.py b/numpy/f2py/tests/test_size.py
index aeb70486a..1fcad05a5 100644
--- a/numpy/f2py/tests/test_size.py
+++ b/numpy/f2py/tests/test_size.py
@@ -3,7 +3,7 @@ from __future__ import division, absolute_import, print_function
import os
from numpy.testing import run_module_suite, assert_equal, dec
-import util
+from . import util
def _path(*a):
@@ -15,6 +15,9 @@ class TestSizeSumExample(util.F2PyTest):
@dec.slow
def test_all(self):
+ r = self.module.foo([[]])
+ assert_equal(r, [0], repr(r))
+
r = self.module.foo([[1, 2]])
assert_equal(r, [3], repr(r))
@@ -26,6 +29,9 @@ class TestSizeSumExample(util.F2PyTest):
@dec.slow
def test_transpose(self):
+ r = self.module.trans([[]])
+ assert_equal(r.T, [[]], repr(r))
+
r = self.module.trans([[1, 2]])
assert_equal(r, [[1], [2]], repr(r))
@@ -34,6 +40,9 @@ class TestSizeSumExample(util.F2PyTest):
@dec.slow
def test_flatten(self):
+ r = self.module.flatten([[]])
+ assert_equal(r, [], repr(r))
+
r = self.module.flatten([[1, 2]])
assert_equal(r, [1, 2], repr(r))
diff --git a/numpy/f2py/tests/test_string.py b/numpy/f2py/tests/test_string.py
index 10022ebb1..065861c0b 100644
--- a/numpy/f2py/tests/test_string.py
+++ b/numpy/f2py/tests/test_string.py
@@ -4,7 +4,7 @@ import os
from numpy.testing import run_module_suite, assert_array_equal, dec
import numpy as np
-import util
+from . import util
def _path(*a):
diff --git a/numpy/f2py/tests/util.py b/numpy/f2py/tests/util.py
index fe608d898..55716a2eb 100644
--- a/numpy/f2py/tests/util.py
+++ b/numpy/f2py/tests/util.py
@@ -319,7 +319,7 @@ class F2PyTest(object):
module = None
module_name = None
- def setUp(self):
+ def setup(self):
if self.module is not None:
return
diff --git a/numpy/fft/__init__.py b/numpy/fft/__init__.py
index a1f9e90e0..72d61a728 100644
--- a/numpy/fft/__init__.py
+++ b/numpy/fft/__init__.py
@@ -6,6 +6,6 @@ from .info import __doc__
from .fftpack import *
from .helper import *
-from numpy.testing.nosetester import _numpy_tester
+from numpy.testing import _numpy_tester
test = _numpy_tester().test
bench = _numpy_tester().bench
diff --git a/numpy/fft/tests/__init__.py b/numpy/fft/tests/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/numpy/fft/tests/__init__.py
diff --git a/numpy/fft/tests/test_fftpack.py b/numpy/fft/tests/test_fftpack.py
index a2cbc0f63..7ac0488e4 100644
--- a/numpy/fft/tests/test_fftpack.py
+++ b/numpy/fft/tests/test_fftpack.py
@@ -2,8 +2,10 @@ from __future__ import division, absolute_import, print_function
import numpy as np
from numpy.random import random
-from numpy.testing import TestCase, run_module_suite, assert_array_almost_equal
-from numpy.testing import assert_array_equal
+from numpy.testing import (
+ run_module_suite, assert_array_almost_equal, assert_array_equal,
+ assert_raises,
+ )
import threading
import sys
if sys.version_info[0] >= 3:
@@ -19,13 +21,13 @@ def fft1(x):
return np.sum(x*np.exp(phase), axis=1)
-class TestFFTShift(TestCase):
+class TestFFTShift(object):
def test_fft_n(self):
- self.assertRaises(ValueError, np.fft.fft, [1, 2, 3], 0)
+ assert_raises(ValueError, np.fft.fft, [1, 2, 3], 0)
-class TestFFT1D(TestCase):
+class TestFFT1D(object):
def test_fft(self):
x = random(30) + 1j*random(30)
@@ -145,7 +147,7 @@ class TestFFT1D(TestCase):
assert_array_almost_equal(x_norm,
np.linalg.norm(tmp))
-class TestFFTThreadSafe(TestCase):
+class TestFFTThreadSafe(object):
threads = 16
input_shape = (800, 200)
diff --git a/numpy/fft/tests/test_helper.py b/numpy/fft/tests/test_helper.py
index ff56ff63c..f02edf7cc 100644
--- a/numpy/fft/tests/test_helper.py
+++ b/numpy/fft/tests/test_helper.py
@@ -6,13 +6,15 @@ Copied from fftpack.helper by Pearu Peterson, October 2005
from __future__ import division, absolute_import, print_function
import numpy as np
-from numpy.testing import TestCase, run_module_suite, assert_array_almost_equal
+from numpy.testing import (
+ run_module_suite, assert_array_almost_equal, assert_equal,
+ )
from numpy import fft
from numpy import pi
from numpy.fft.helper import _FFTCache
-class TestFFTShift(TestCase):
+class TestFFTShift(object):
def test_definition(self):
x = [0, 1, 2, 3, 4, -4, -3, -2, -1]
@@ -40,7 +42,7 @@ class TestFFTShift(TestCase):
fft.ifftshift(shifted, axes=(0,)))
-class TestFFTFreq(TestCase):
+class TestFFTFreq(object):
def test_definition(self):
x = [0, 1, 2, 3, 4, -4, -3, -2, -1]
@@ -51,7 +53,7 @@ class TestFFTFreq(TestCase):
assert_array_almost_equal(10*pi*fft.fftfreq(10, pi), x)
-class TestRFFTFreq(TestCase):
+class TestRFFTFreq(object):
def test_definition(self):
x = [0, 1, 2, 3, 4]
@@ -62,7 +64,7 @@ class TestRFFTFreq(TestCase):
assert_array_almost_equal(10*pi*fft.rfftfreq(10, pi), x)
-class TestIRFFTN(TestCase):
+class TestIRFFTN(object):
def test_not_last_axis_success(self):
ar, ai = np.random.random((2, 16, 8, 32))
@@ -74,7 +76,7 @@ class TestIRFFTN(TestCase):
fft.irfftn(a, axes=axes)
-class TestFFTCache(TestCase):
+class TestFFTCache(object):
def test_basic_behaviour(self):
c = _FFTCache(max_size_in_mb=1, max_item_count=4)
@@ -90,7 +92,7 @@ class TestFFTCache(TestCase):
np.zeros(2, dtype=np.float32))
# Nothing should be left.
- self.assertEqual(len(c._dict), 0)
+ assert_equal(len(c._dict), 0)
# Now put everything in twice so it can be retrieved once and each will
# still have one item left.
@@ -101,7 +103,7 @@ class TestFFTCache(TestCase):
np.ones(2, dtype=np.float32))
assert_array_almost_equal(c.pop_twiddle_factors(2),
np.zeros(2, dtype=np.float32))
- self.assertEqual(len(c._dict), 2)
+ assert_equal(len(c._dict), 2)
def test_automatic_pruning(self):
# That's around 2600 single precision samples.
@@ -109,27 +111,27 @@ class TestFFTCache(TestCase):
c.put_twiddle_factors(1, np.ones(200, dtype=np.float32))
c.put_twiddle_factors(2, np.ones(200, dtype=np.float32))
- self.assertEqual(list(c._dict.keys()), [1, 2])
+ assert_equal(list(c._dict.keys()), [1, 2])
# This is larger than the limit but should still be kept.
c.put_twiddle_factors(3, np.ones(3000, dtype=np.float32))
- self.assertEqual(list(c._dict.keys()), [1, 2, 3])
+ assert_equal(list(c._dict.keys()), [1, 2, 3])
# Add one more.
c.put_twiddle_factors(4, np.ones(3000, dtype=np.float32))
# The other three should no longer exist.
- self.assertEqual(list(c._dict.keys()), [4])
+ assert_equal(list(c._dict.keys()), [4])
# Now test the max item count pruning.
c = _FFTCache(max_size_in_mb=0.01, max_item_count=2)
c.put_twiddle_factors(2, np.empty(2))
c.put_twiddle_factors(1, np.empty(2))
# Can still be accessed.
- self.assertEqual(list(c._dict.keys()), [2, 1])
+ assert_equal(list(c._dict.keys()), [2, 1])
c.put_twiddle_factors(3, np.empty(2))
# 1 and 3 can still be accessed - c[2] has been touched least recently
# and is thus evicted.
- self.assertEqual(list(c._dict.keys()), [1, 3])
+ assert_equal(list(c._dict.keys()), [1, 3])
# One last test. We will add a single large item that is slightly
# bigger then the cache size. Some small items can still be added.
@@ -138,18 +140,18 @@ class TestFFTCache(TestCase):
c.put_twiddle_factors(2, np.ones(2, dtype=np.float32))
c.put_twiddle_factors(3, np.ones(2, dtype=np.float32))
c.put_twiddle_factors(4, np.ones(2, dtype=np.float32))
- self.assertEqual(list(c._dict.keys()), [1, 2, 3, 4])
+ assert_equal(list(c._dict.keys()), [1, 2, 3, 4])
# One more big item. This time it is 6 smaller ones but they are
# counted as one big item.
for _ in range(6):
c.put_twiddle_factors(5, np.ones(500, dtype=np.float32))
# '1' no longer in the cache. Rest still in the cache.
- self.assertEqual(list(c._dict.keys()), [2, 3, 4, 5])
+ assert_equal(list(c._dict.keys()), [2, 3, 4, 5])
# Another big item - should now be the only item in the cache.
c.put_twiddle_factors(6, np.ones(4000, dtype=np.float32))
- self.assertEqual(list(c._dict.keys()), [6])
+ assert_equal(list(c._dict.keys()), [6])
if __name__ == "__main__":
diff --git a/numpy/lib/__init__.py b/numpy/lib/__init__.py
index 847a3e896..d85a179dd 100644
--- a/numpy/lib/__init__.py
+++ b/numpy/lib/__init__.py
@@ -44,6 +44,6 @@ __all__ += npyio.__all__
__all__ += financial.__all__
__all__ += nanfunctions.__all__
-from numpy.testing.nosetester import _numpy_tester
+from numpy.testing import _numpy_tester
test = _numpy_tester().test
bench = _numpy_tester().bench
diff --git a/numpy/lib/_iotools.py b/numpy/lib/_iotools.py
index 304bba3d3..1874c2e97 100644
--- a/numpy/lib/_iotools.py
+++ b/numpy/lib/_iotools.py
@@ -527,7 +527,7 @@ class StringConverter(object):
_mapper.append((nx.int64, int, -1))
_mapper.extend([(nx.floating, float, nx.nan),
- (complex, _bytes_to_complex, nx.nan + 0j),
+ (nx.complexfloating, _bytes_to_complex, nx.nan + 0j),
(nx.longdouble, nx.longdouble, nx.nan),
(nx.string_, bytes, b'???')])
diff --git a/numpy/lib/arraypad.py b/numpy/lib/arraypad.py
index 2dad99c34..b8966e543 100644
--- a/numpy/lib/arraypad.py
+++ b/numpy/lib/arraypad.py
@@ -1208,7 +1208,7 @@ def pad(array, pad_width, mode, **kwargs):
length to the vector argument with padded values replaced. It has the
following signature::
- padding_func(vector, iaxis_pad_width, iaxis, **kwargs)
+ padding_func(vector, iaxis_pad_width, iaxis, kwargs)
where
@@ -1222,7 +1222,7 @@ def pad(array, pad_width, mode, **kwargs):
the end of vector.
iaxis : int
The axis currently being calculated.
- kwargs : misc
+ kwargs : dict
Any keyword arguments the function requires.
Examples
@@ -1272,21 +1272,27 @@ def pad(array, pad_width, mode, **kwargs):
>>> np.lib.pad(a, (2, 3), 'wrap')
array([4, 5, 1, 2, 3, 4, 5, 1, 2, 3])
- >>> def padwithtens(vector, pad_width, iaxis, kwargs):
- ... vector[:pad_width[0]] = 10
- ... vector[-pad_width[1]:] = 10
+ >>> def pad_with(vector, pad_width, iaxis, kwargs):
+ ... pad_value = kwargs.get('padder', 10)
+ ... vector[:pad_width[0]] = pad_value
+ ... vector[-pad_width[1]:] = pad_value
... return vector
-
>>> a = np.arange(6)
>>> a = a.reshape((2, 3))
-
- >>> np.lib.pad(a, 2, padwithtens)
+ >>> np.lib.pad(a, 2, pad_with)
array([[10, 10, 10, 10, 10, 10, 10],
[10, 10, 10, 10, 10, 10, 10],
[10, 10, 0, 1, 2, 10, 10],
[10, 10, 3, 4, 5, 10, 10],
[10, 10, 10, 10, 10, 10, 10],
[10, 10, 10, 10, 10, 10, 10]])
+ >>> np.lib.pad(a, 2, pad_with, padder=100)
+ array([[100, 100, 100, 100, 100, 100, 100],
+ [100, 100, 100, 100, 100, 100, 100],
+ [100, 100, 0, 1, 2, 100, 100],
+ [100, 100, 3, 4, 5, 100, 100],
+ [100, 100, 100, 100, 100, 100, 100],
+ [100, 100, 100, 100, 100, 100, 100]])
"""
if not np.asarray(pad_width).dtype.kind == 'i':
raise TypeError('`pad_width` must be of integral type.')
@@ -1407,6 +1413,14 @@ def pad(array, pad_width, mode, **kwargs):
elif mode == 'reflect':
for axis, (pad_before, pad_after) in enumerate(pad_width):
+ if narray.shape[axis] == 0:
+ # Axes with non-zero padding cannot be empty.
+ if pad_before > 0 or pad_after > 0:
+ raise ValueError("There aren't any elements to reflect"
+ " in axis {} of `array`".format(axis))
+ # Skip zero padding on empty axes.
+ continue
+
# Recursive padding along any axis where `pad_amt` is too large
# for indexing tricks. We can only safely pad the original axis
# length, to keep the period of the reflections consistent.
diff --git a/numpy/lib/arraysetops.py b/numpy/lib/arraysetops.py
index d29e555b8..ededb9dd0 100644
--- a/numpy/lib/arraysetops.py
+++ b/numpy/lib/arraysetops.py
@@ -263,9 +263,9 @@ def _unique1d(ar, return_index=False, return_inverse=False,
else:
ret = (ar,)
if return_index:
- ret += (np.empty(0, np.bool),)
+ ret += (np.empty(0, np.intp),)
if return_inverse:
- ret += (np.empty(0, np.bool),)
+ ret += (np.empty(0, np.intp),)
if return_counts:
ret += (np.empty(0, np.intp),)
return ret
@@ -375,11 +375,8 @@ def setxor1d(ar1, ar2, assume_unique=False):
return aux
aux.sort()
-# flag = ediff1d( aux, to_end = 1, to_begin = 1 ) == 0
flag = np.concatenate(([True], aux[1:] != aux[:-1], [True]))
-# flag2 = ediff1d( flag ) == 0
- flag2 = flag[1:] == flag[:-1]
- return aux[flag2]
+ return aux[flag[1:] & flag[:-1]]
def in1d(ar1, ar2, assume_unique=False, invert=False):
@@ -454,11 +451,11 @@ def in1d(ar1, ar2, assume_unique=False, invert=False):
# This code is significantly faster when the condition is satisfied.
if len(ar2) < 10 * len(ar1) ** 0.145:
if invert:
- mask = np.ones(len(ar1), dtype=np.bool)
+ mask = np.ones(len(ar1), dtype=bool)
for a in ar2:
mask &= (ar1 != a)
else:
- mask = np.zeros(len(ar1), dtype=np.bool)
+ mask = np.zeros(len(ar1), dtype=bool)
for a in ar2:
mask |= (ar1 == a)
return mask
diff --git a/numpy/lib/format.py b/numpy/lib/format.py
index 14dec01d5..84af2afc8 100644
--- a/numpy/lib/format.py
+++ b/numpy/lib/format.py
@@ -100,9 +100,9 @@ the header data HEADER_LEN.
The next HEADER_LEN bytes form the header data describing the array's
format. It is an ASCII string which contains a Python literal expression
of a dictionary. It is terminated by a newline (``\\n``) and padded with
-spaces (``\\x20``) to make the total length of
-``magic string + 4 + HEADER_LEN`` be evenly divisible by 16 for alignment
-purposes.
+spaces (``\\x20``) to make the total of
+``len(magic string) + 2 + len(length) + HEADER_LEN`` be evenly divisible
+by 64 for alignment purposes.
The dictionary contains three keys:
@@ -163,6 +163,7 @@ else:
MAGIC_PREFIX = b'\x93NUMPY'
MAGIC_LEN = len(MAGIC_PREFIX) + 2
+ARRAY_ALIGN = 64 # plausible values are powers of 2 between 16 and 4096
BUFFER_SIZE = 2**18 # size of buffer for reading npz files in bytes
# difference between version 1.0 and 2.0 is a 4 byte (I) header length
@@ -304,27 +305,33 @@ def _write_array_header(fp, d, version=None):
header.append("'%s': %s, " % (key, repr(value)))
header.append("}")
header = "".join(header)
- # Pad the header with spaces and a final newline such that the magic
- # string, the header-length short and the header are aligned on a
- # 16-byte boundary. Hopefully, some system, possibly memory-mapping,
- # can take advantage of our premature optimization.
- current_header_len = MAGIC_LEN + 2 + len(header) + 1 # 1 for the newline
- topad = 16 - (current_header_len % 16)
- header = header + ' '*topad + '\n'
header = asbytes(_filter_header(header))
- hlen = len(header)
- if hlen < 256*256 and version in (None, (1, 0)):
+ hlen = len(header) + 1 # 1 for newline
+ padlen_v1 = ARRAY_ALIGN - ((MAGIC_LEN + struct.calcsize('<H') + hlen) % ARRAY_ALIGN)
+ padlen_v2 = ARRAY_ALIGN - ((MAGIC_LEN + struct.calcsize('<I') + hlen) % ARRAY_ALIGN)
+
+ # Which version(s) we write depends on the total header size; v1 has a max of 65535
+ if hlen + padlen_v1 < 2**16 and version in (None, (1, 0)):
version = (1, 0)
- header_prefix = magic(1, 0) + struct.pack('<H', hlen)
- elif hlen < 2**32 and version in (None, (2, 0)):
+ header_prefix = magic(1, 0) + struct.pack('<H', hlen + padlen_v1)
+ topad = padlen_v1
+ elif hlen + padlen_v2 < 2**32 and version in (None, (2, 0)):
version = (2, 0)
- header_prefix = magic(2, 0) + struct.pack('<I', hlen)
+ header_prefix = magic(2, 0) + struct.pack('<I', hlen + padlen_v2)
+ topad = padlen_v2
else:
msg = "Header length %s too big for version=%s"
msg %= (hlen, version)
raise ValueError(msg)
+ # Pad the header with spaces and a final newline such that the magic
+ # string, the header-length short and the header are aligned on a
+ # ARRAY_ALIGN byte boundary. This supports memory mapping of dtypes
+ # aligned up to ARRAY_ALIGN on systems like Linux where mmap()
+ # offset must be page-aligned (i.e. the beginning of the file).
+ header = header + b' '*topad + b'\n'
+
fp.write(header_prefix)
fp.write(header)
return version
@@ -468,18 +475,18 @@ def _read_array_header(fp, version):
# header.
import struct
if version == (1, 0):
- hlength_str = _read_bytes(fp, 2, "array header length")
- header_length = struct.unpack('<H', hlength_str)[0]
- header = _read_bytes(fp, header_length, "array header")
+ hlength_type = '<H'
elif version == (2, 0):
- hlength_str = _read_bytes(fp, 4, "array header length")
- header_length = struct.unpack('<I', hlength_str)[0]
- header = _read_bytes(fp, header_length, "array header")
+ hlength_type = '<I'
else:
raise ValueError("Invalid version %r" % version)
+ hlength_str = _read_bytes(fp, struct.calcsize(hlength_type), "array header length")
+ header_length = struct.unpack(hlength_type, hlength_str)[0]
+ header = _read_bytes(fp, header_length, "array header")
+
# The header is a pretty-printed string representation of a literal
- # Python dictionary with trailing newlines padded to a 16-byte
+ # Python dictionary with trailing newlines padded to a ARRAY_ALIGN byte
# boundary. The keys are strings.
# "shape" : tuple of int
# "fortran_order" : bool
diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py
index 32c999dfc..905e60512 100644
--- a/numpy/lib/function_base.py
+++ b/numpy/lib/function_base.py
@@ -1,7 +1,6 @@
from __future__ import division, absolute_import, print_function
import collections
-import operator
import re
import sys
import warnings
@@ -16,7 +15,7 @@ from numpy.core.numeric import (
)
from numpy.core.umath import (
pi, multiply, add, arctan2, frompyfunc, cos, less_equal, sqrt, sin,
- mod, exp, log10
+ mod, exp, log10, not_equal, subtract
)
from numpy.core.fromnumeric import (
ravel, nonzero, sort, partition, mean, any, sum
@@ -57,8 +56,6 @@ def rot90(m, k=1, axes=(0,1)):
Rotation direction is from the first towards the second axis.
- .. versionadded:: 1.12.0
-
Parameters
----------
m : array_like
@@ -69,6 +66,8 @@ def rot90(m, k=1, axes=(0,1)):
The array is rotated in the plane defined by the axes.
Axes must be different.
+ .. versionadded:: 1.12.0
+
Returns
-------
y : ndarray
@@ -627,7 +626,7 @@ def histogram(a, bins=10, range=None, normed=False, weights=None,
array([ 0.5, 0. , 0.5, 0. , 0. , 0.5, 0. , 0.5, 0. , 0.5])
>>> hist.sum()
2.4999999999999996
- >>> np.sum(hist*np.diff(bin_edges))
+ >>> np.sum(hist * np.diff(bin_edges))
1.0
.. versionadded:: 1.11.0
@@ -718,7 +717,7 @@ def histogram(a, bins=10, range=None, normed=False, weights=None,
# At this point, if the weights are not integer, floating point, or
# complex, we have to use the slow algorithm.
if weights is not None and not (np.can_cast(weights.dtype, np.double) or
- np.can_cast(weights.dtype, np.complex)):
+ np.can_cast(weights.dtype, complex)):
bins = linspace(mn, mx, bins + 1, endpoint=True)
if not iterable(bins):
@@ -974,7 +973,7 @@ def histogramdd(sample, bins=10, range=None, normed=False, weights=None):
on_edge = (around(sample[:, i], decimal) ==
around(edges[i][-1], decimal))
# Shift these points one bin to the left.
- Ncount[i][where(on_edge & not_smaller_than_edge)[0]] -= 1
+ Ncount[i][nonzero(on_edge & not_smaller_than_edge)[0]] -= 1
# Flattened histogram matrix (1D)
# Reshape is used so that overlarge arrays
@@ -1321,16 +1320,8 @@ def piecewise(x, condlist, funclist, *args, **kw):
x = x[None]
zerod = True
if n == n2 - 1: # compute the "otherwise" condition.
- totlist = np.logical_or.reduce(condlist, axis=0)
- # Only able to stack vertically if the array is 1d or less
- if x.ndim <= 1:
- condlist = np.vstack([condlist, ~totlist])
- else:
- condlist = [asarray(c, dtype=bool) for c in condlist]
- totlist = condlist[0]
- for k in range(1, n):
- totlist |= condlist[k]
- condlist.append(~totlist)
+ condelse = ~np.any(condlist, axis=0, keepdims=True)
+ condlist = np.concatenate([condlist, condelse], axis=0)
n += 1
y = zeros(x.shape, x.dtype)
@@ -1550,7 +1541,7 @@ def gradient(f, *varargs, **kwargs):
Examples
--------
- >>> f = np.array([1, 2, 4, 7, 11, 16], dtype=np.float)
+ >>> f = np.array([1, 2, 4, 7, 11, 16], dtype=float)
>>> np.gradient(f)
array([ 1. , 1.5, 2.5, 3.5, 4.5, 5. ])
>>> np.gradient(f, 2)
@@ -1566,7 +1557,7 @@ def gradient(f, *varargs, **kwargs):
Or a non uniform one:
- >>> x = np.array([0., 1., 1.5, 3.5, 4., 6.], dtype=np.float)
+ >>> x = np.array([0., 1., 1.5, 3.5, 4., 6.], dtype=float)
>>> np.gradient(f, x)
array([ 1. , 3. , 3.5, 6.7, 6.9, 2.5])
@@ -1574,7 +1565,7 @@ def gradient(f, *varargs, **kwargs):
axis. In this example the first array stands for the gradient in
rows and the second one in columns direction:
- >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=np.float))
+ >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=float))
[array([[ 2., 2., -1.],
[ 2., 2., -1.]]), array([[ 1. , 2.5, 4. ],
[ 1. , 1. , 1. ]])]
@@ -1584,7 +1575,7 @@ def gradient(f, *varargs, **kwargs):
>>> dx = 2.
>>> y = [1., 1.5, 3.5]
- >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=np.float), dx, y)
+ >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=float), dx, y)
[array([[ 1. , 1. , -0.5],
[ 1. , 1. , -0.5]]), array([[ 2. , 2. , 2. ],
[ 2. , 1.7, 0.5]])]
@@ -1601,7 +1592,7 @@ def gradient(f, *varargs, **kwargs):
The `axis` keyword can be used to specify a subset of axes of which the
gradient is calculated
- >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=np.float), axis=0)
+ >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=float), axis=0)
array([[ 2., 2., -1.],
[ 2., 2., -1.]])
@@ -1728,33 +1719,27 @@ def gradient(f, *varargs, **kwargs):
slice3 = [slice(None)]*N
slice4 = [slice(None)]*N
- otype = f.dtype.char
- if otype not in ['f', 'd', 'F', 'D', 'm', 'M']:
- otype = 'd'
-
- # Difference of datetime64 elements results in timedelta64
- if otype == 'M':
- # Need to use the full dtype name because it contains unit information
- otype = f.dtype.name.replace('datetime', 'timedelta')
- elif otype == 'm':
- # Needs to keep the specific units, can't be a general unit
- otype = f.dtype
-
- # Convert datetime64 data into ints. Make dummy variable `y`
- # that is a view of ints if the data is datetime64, otherwise
- # just set y equal to the array `f`.
- if f.dtype.char in ["M", "m"]:
- y = f.view('int64')
+ otype = f.dtype
+ if otype.type is np.datetime64:
+ # the timedelta dtype with the same unit information
+ otype = np.dtype(otype.name.replace('datetime', 'timedelta'))
+ # view as timedelta to allow addition
+ f = f.view(otype)
+ elif otype.type is np.timedelta64:
+ pass
+ elif np.issubdtype(otype, np.inexact):
+ pass
else:
- y = f
+ # all other types convert to floating point
+ otype = np.double
for i, axis in enumerate(axes):
- if y.shape[axis] < edge_order + 1:
+ if f.shape[axis] < edge_order + 1:
raise ValueError(
"Shape of array too small to calculate a numerical gradient, "
"at least (edge_order + 1) elements are required.")
# result allocation
- out = np.empty_like(y, dtype=otype)
+ out = np.empty_like(f, dtype=otype)
uniform_spacing = np.ndim(dx[i]) == 0
@@ -1785,15 +1770,15 @@ def gradient(f, *varargs, **kwargs):
slice2[axis] = 1
slice3[axis] = 0
dx_0 = dx[i] if uniform_spacing else dx[i][0]
- # 1D equivalent -- out[0] = (y[1] - y[0]) / (x[1] - x[0])
- out[slice1] = (y[slice2] - y[slice3]) / dx_0
+ # 1D equivalent -- out[0] = (f[1] - f[0]) / (x[1] - x[0])
+ out[slice1] = (f[slice2] - f[slice3]) / dx_0
slice1[axis] = -1
slice2[axis] = -1
slice3[axis] = -2
dx_n = dx[i] if uniform_spacing else dx[i][-1]
- # 1D equivalent -- out[-1] = (y[-1] - y[-2]) / (x[-1] - x[-2])
- out[slice1] = (y[slice2] - y[slice3]) / dx_n
+ # 1D equivalent -- out[-1] = (f[-1] - f[-2]) / (x[-1] - x[-2])
+ out[slice1] = (f[slice2] - f[slice3]) / dx_n
# Numerical differentiation: 2nd order edges
else:
@@ -1811,8 +1796,8 @@ def gradient(f, *varargs, **kwargs):
a = -(2. * dx1 + dx2)/(dx1 * (dx1 + dx2))
b = (dx1 + dx2) / (dx1 * dx2)
c = - dx1 / (dx2 * (dx1 + dx2))
- # 1D equivalent -- out[0] = a * y[0] + b * y[1] + c * y[2]
- out[slice1] = a * y[slice2] + b * y[slice3] + c * y[slice4]
+ # 1D equivalent -- out[0] = a * f[0] + b * f[1] + c * f[2]
+ out[slice1] = a * f[slice2] + b * f[slice3] + c * f[slice4]
slice1[axis] = -1
slice2[axis] = -3
@@ -1829,7 +1814,7 @@ def gradient(f, *varargs, **kwargs):
b = - (dx2 + dx1) / (dx1 * dx2)
c = (2. * dx2 + dx1) / (dx2 * (dx1 + dx2))
# 1D equivalent -- out[-1] = a * f[-3] + b * f[-2] + c * f[-1]
- out[slice1] = a * y[slice2] + b * y[slice3] + c * y[slice4]
+ out[slice1] = a * f[slice2] + b * f[slice3] + c * f[slice4]
outvals.append(out)
@@ -1847,7 +1832,7 @@ def gradient(f, *varargs, **kwargs):
def diff(a, n=1, axis=-1):
"""
- Calculate the n-th discrete difference along given axis.
+ Calculate the n-th discrete difference along the given axis.
The first difference is given by ``out[n] = a[n+1] - a[n]`` along
the given axis, higher differences are calculated by using `diff`
@@ -1858,16 +1843,21 @@ def diff(a, n=1, axis=-1):
a : array_like
Input array
n : int, optional
- The number of times values are differenced.
+ The number of times values are differenced. If zero, the input
+ is returned as-is.
axis : int, optional
- The axis along which the difference is taken, default is the last axis.
+ The axis along which the difference is taken, default is the
+ last axis.
Returns
-------
diff : ndarray
The n-th differences. The shape of the output is the same as `a`
except along `axis` where the dimension is smaller by `n`. The
- type of the output is the same as that of the input.
+ type of the output is the same as the type of the difference
+ between any two elements of `a`. This is the same as the type of
+ `a` in most cases. A notable exception is `datetime64`, which
+ results in a `timedelta64` output array.
See Also
--------
@@ -1875,13 +1865,13 @@ def diff(a, n=1, axis=-1):
Notes
-----
- For boolean arrays, the preservation of type means that the result
- will contain `False` when consecutive elements are the same and
- `True` when they differ.
+ Type is preserved for boolean arrays, so the result will contain
+ `False` when consecutive elements are the same and `True` when they
+ differ.
- For unsigned integer arrays, the results will also be unsigned. This should
- not be surprising, as the result is consistent with calculating the
- difference directly:
+ For unsigned integer arrays, the results will also be unsigned. This
+ should not be surprising, as the result is consistent with
+ calculating the difference directly:
>>> u8_arr = np.array([1, 0], dtype=np.uint8)
>>> np.diff(u8_arr)
@@ -1889,8 +1879,8 @@ def diff(a, n=1, axis=-1):
>>> u8_arr[1,...] - u8_arr[0,...]
array(255, np.uint8)
- If this is not desirable, then the array should be cast to a larger integer
- type first:
+ If this is not desirable, then the array should be cast to a larger
+ integer type first:
>>> i16_arr = u8_arr.astype(np.int16)
>>> np.diff(i16_arr)
@@ -1911,24 +1901,33 @@ def diff(a, n=1, axis=-1):
>>> np.diff(x, axis=0)
array([[-1, 2, 0, -2]])
+ >>> x = np.arange('1066-10-13', '1066-10-16', dtype=np.datetime64)
+ >>> np.diff(x)
+ array([1, 1], dtype='timedelta64[D]')
+
"""
if n == 0:
return a
if n < 0:
raise ValueError(
"order must be non-negative but got " + repr(n))
+
a = asanyarray(a)
nd = a.ndim
- slice1 = [slice(None)]*nd
- slice2 = [slice(None)]*nd
+ axis = normalize_axis_index(axis, nd)
+
+ slice1 = [slice(None)] * nd
+ slice2 = [slice(None)] * nd
slice1[axis] = slice(1, None)
slice2[axis] = slice(None, -1)
slice1 = tuple(slice1)
slice2 = tuple(slice2)
- if n > 1:
- return diff(a[slice1]-a[slice2], n-1, axis=axis)
- else:
- return a[slice1]-a[slice2]
+
+ op = not_equal if a.dtype == np.bool_ else subtract
+ for _ in range(n):
+ a = op(a[slice1], a[slice2])
+
+ return a
def interp(x, xp, fp, left=None, right=None, period=None):
@@ -2074,6 +2073,7 @@ def interp(x, xp, fp, left=None, right=None, period=None):
else:
return interp_func(x, xp, fp, left, right).item()
+
def angle(z, deg=0):
"""
Return the angle of the complex argument.
@@ -2096,8 +2096,6 @@ def angle(z, deg=0):
arctan2
absolute
-
-
Examples
--------
>>> np.angle([1.0, 1.0j, 1+1j]) # in radians
@@ -2607,7 +2605,7 @@ class vectorize(object):
>>> out = vfunc([1, 2, 3, 4], 2)
>>> type(out[0])
<type 'numpy.int32'>
- >>> vfunc = np.vectorize(myfunc, otypes=[np.float])
+ >>> vfunc = np.vectorize(myfunc, otypes=[float])
>>> out = vfunc([1, 2, 3, 4], 2)
>>> type(out[0])
<type 'numpy.float64'>
@@ -2987,7 +2985,7 @@ def cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None,
>>> x = [-2.1, -1, 4.3]
>>> y = [3, 1.1, 0.12]
- >>> X = np.vstack((x,y))
+ >>> X = np.stack((x, y), axis=0)
>>> print(np.cov(X))
[[ 11.71 -4.286 ]
[ -4.286 2.14413333]]
@@ -3025,7 +3023,7 @@ def cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None,
y = array(y, copy=False, ndmin=2, dtype=dtype)
if not rowvar and y.shape[0] != 1:
y = y.T
- X = np.vstack((X, y))
+ X = np.concatenate((X, y), axis=0)
if ddof is None:
if bias == 0:
@@ -3036,7 +3034,7 @@ def cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None,
# Get the product of frequencies and weights
w = None
if fweights is not None:
- fweights = np.asarray(fweights, dtype=np.float)
+ fweights = np.asarray(fweights, dtype=float)
if not np.all(fweights == np.around(fweights)):
raise TypeError(
"fweights must be integer")
@@ -3051,7 +3049,7 @@ def cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None,
"fweights cannot be negative")
w = fweights
if aweights is not None:
- aweights = np.asarray(aweights, dtype=np.float)
+ aweights = np.asarray(aweights, dtype=float)
if aweights.ndim > 1:
raise RuntimeError(
"cannot handle multidimensional aweights")
@@ -4010,8 +4008,9 @@ def _ureduce(a, func, **kwargs):
# merge reduced axis
a = a.reshape(a.shape[:nkeep] + (-1,))
kwargs['axis'] = -1
+ keepdim = tuple(keepdim)
else:
- keepdim = [1] * a.ndim
+ keepdim = (1,) * a.ndim
r = func(a, **kwargs)
return r, keepdim
@@ -4273,10 +4272,7 @@ def percentile(a, q, axis=None, out=None,
overwrite_input=overwrite_input,
interpolation=interpolation)
if keepdims:
- if q.ndim == 0:
- return r.reshape(k)
- else:
- return r.reshape([len(q)] + k)
+ return r.reshape(q.shape + k)
else:
return r
@@ -4345,7 +4341,7 @@ def _percentile(a, q, axis=None, out=None,
ap.partition(indices, axis=axis)
# ensure axis with qth is first
- ap = np.rollaxis(ap, axis, 0)
+ ap = np.moveaxis(ap, axis, 0)
axis = 0
# Check if the array contains any nan's
@@ -4378,9 +4374,9 @@ def _percentile(a, q, axis=None, out=None,
ap.partition(concatenate((indices_below, indices_above)), axis=axis)
# ensure axis with qth is first
- ap = np.rollaxis(ap, axis, 0)
- weights_below = np.rollaxis(weights_below, axis, 0)
- weights_above = np.rollaxis(weights_above, axis, 0)
+ ap = np.moveaxis(ap, axis, 0)
+ weights_below = np.moveaxis(weights_below, axis, 0)
+ weights_above = np.moveaxis(weights_above, axis, 0)
axis = 0
# Check if the array contains any nan's
@@ -4392,8 +4388,8 @@ def _percentile(a, q, axis=None, out=None,
x2 = take(ap, indices_above, axis=axis) * weights_above
# ensure axis with qth is first
- x1 = np.rollaxis(x1, axis, 0)
- x2 = np.rollaxis(x2, axis, 0)
+ x1 = np.moveaxis(x1, axis, 0)
+ x2 = np.moveaxis(x2, axis, 0)
if zerod:
x1 = x1.squeeze(0)
@@ -4546,7 +4542,7 @@ def add_newdoc(place, obj, doc):
elif isinstance(doc, list):
for val in doc:
add_docstring(getattr(new, val[0]), val[1].strip())
- except:
+ except Exception:
pass
@@ -5049,7 +5045,7 @@ def insert(arr, obj, values, axis=None):
# broadcasting is very different here, since a[:,0,:] = ... behaves
# very different from a[:,[0],:] = ...! This changes values so that
# it works likes the second case. (here a[:,0:1,:])
- values = np.rollaxis(values, 0, (axis % values.ndim) + 1)
+ values = np.moveaxis(values, 0, axis)
numnew = values.shape[axis]
newshape[axis] += numnew
new = empty(newshape, arr.dtype, arrorder)
diff --git a/numpy/lib/index_tricks.py b/numpy/lib/index_tricks.py
index 003774ce2..650b37f25 100644
--- a/numpy/lib/index_tricks.py
+++ b/numpy/lib/index_tricks.py
@@ -299,7 +299,7 @@ class AxisConcatenator(object):
if len(vec) == 3:
trans1d = int(vec[2])
continue
- except:
+ except Exception:
raise ValueError("unknown special directive")
try:
axis = int(item)
@@ -842,7 +842,7 @@ def diag_indices(n, ndim=2):
And use it to set the diagonal of an array of zeros to 1:
- >>> a = np.zeros((2, 2, 2), dtype=np.int)
+ >>> a = np.zeros((2, 2, 2), dtype=int)
>>> a[d3] = 1
>>> a
array([[[1, 0],
diff --git a/numpy/lib/nanfunctions.py b/numpy/lib/nanfunctions.py
index 1e342b932..ffedcd68a 100644
--- a/numpy/lib/nanfunctions.py
+++ b/numpy/lib/nanfunctions.py
@@ -106,6 +106,46 @@ def _copyto(a, val, mask):
return a
+def _remove_nan_1d(arr1d, overwrite_input=False):
+ """
+ Equivalent to arr1d[~arr1d.isnan()], but in a different order
+
+ Presumably faster as it incurs fewer copies
+
+ Parameters
+ ----------
+ arr1d : ndarray
+ Array to remove nans from
+ overwrite_input : bool
+ True if `arr1d` can be modified in place
+
+ Returns
+ -------
+ res : ndarray
+ Array with nan elements removed
+ overwrite_input : bool
+ True if `res` can be modified in place, given the constraint on the
+ input
+ """
+
+ c = np.isnan(arr1d)
+ s = np.nonzero(c)[0]
+ if s.size == arr1d.size:
+ warnings.warn("All-NaN slice encountered", RuntimeWarning, stacklevel=4)
+ return arr1d[:0], True
+ elif s.size == 0:
+ return arr1d, overwrite_input
+ else:
+ if not overwrite_input:
+ arr1d = arr1d.copy()
+ # select non-nans at end of array
+ enonan = arr1d[-s.size:][~c[-s.size:]]
+ # fill nans in beginning of array with non-nans of end
+ arr1d[s[:enonan.size]] = enonan
+
+ return arr1d[:-s.size], True
+
+
def _divide_by_count(a, b, out=None):
"""
Compute a/b ignoring invalid results. If `a` is an array the division
@@ -554,7 +594,7 @@ def nanprod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue):
Parameters
----------
a : array_like
- Array containing numbers whose sum is desired. If `a` is not an
+ Array containing numbers whose product is desired. If `a` is not an
array, a conversion is attempted.
axis : int, optional
Axis along which the product is computed. The default is to compute
@@ -836,24 +876,12 @@ def _nanmedian1d(arr1d, overwrite_input=False):
Private function for rank 1 arrays. Compute the median ignoring NaNs.
See nanmedian for parameter usage
"""
- c = np.isnan(arr1d)
- s = np.where(c)[0]
- if s.size == arr1d.size:
- warnings.warn("All-NaN slice encountered", RuntimeWarning, stacklevel=3)
+ arr1d, overwrite_input = _remove_nan_1d(arr1d,
+ overwrite_input=overwrite_input)
+ if arr1d.size == 0:
return np.nan
- elif s.size == 0:
- return np.median(arr1d, overwrite_input=overwrite_input)
- else:
- if overwrite_input:
- x = arr1d
- else:
- x = arr1d.copy()
- # select non-nans at end of array
- enonan = arr1d[-s.size:][~c[-s.size:]]
- # fill nans in beginning of array with non-nans of end
- x[s[:enonan.size]] = enonan
- # slice nans away
- return np.median(x[:-s.size], overwrite_input=True)
+
+ return np.median(arr1d, overwrite_input=overwrite_input)
def _nanmedian(a, axis=None, out=None, overwrite_input=False):
@@ -1088,7 +1116,7 @@ def nanpercentile(a, q, axis=None, out=None, overwrite_input=False,
>>> a[0][1] = np.nan
>>> a
array([[ 10., nan, 4.],
- [ 3., 2., 1.]])
+ [ 3., 2., 1.]])
>>> np.percentile(a, 50)
nan
>>> np.nanpercentile(a, 50)
@@ -1123,10 +1151,7 @@ def nanpercentile(a, q, axis=None, out=None, overwrite_input=False,
overwrite_input=overwrite_input,
interpolation=interpolation)
if keepdims and keepdims is not np._NoValue:
- if q.ndim == 0:
- return r.reshape(k)
- else:
- return r.reshape([len(q)] + k)
+ return r.reshape(q.shape + k)
else:
return r
@@ -1149,7 +1174,7 @@ def _nanpercentile(a, q, axis=None, out=None, overwrite_input=False,
# Move that axis to the beginning to match percentile's
# convention.
if q.ndim != 0:
- result = np.rollaxis(result, axis)
+ result = np.moveaxis(result, axis, 0)
if out is not None:
out[...] = result
@@ -1158,34 +1183,16 @@ def _nanpercentile(a, q, axis=None, out=None, overwrite_input=False,
def _nanpercentile1d(arr1d, q, overwrite_input=False, interpolation='linear'):
"""
- Private function for rank 1 arrays. Compute percentile ignoring
- NaNs.
-
+ Private function for rank 1 arrays. Compute percentile ignoring NaNs.
See nanpercentile for parameter usage
"""
- c = np.isnan(arr1d)
- s = np.where(c)[0]
- if s.size == arr1d.size:
- warnings.warn("All-NaN slice encountered", RuntimeWarning, stacklevel=3)
- if q.ndim == 0:
- return np.nan
- else:
- return np.nan * np.ones((len(q),))
- elif s.size == 0:
- return np.percentile(arr1d, q, overwrite_input=overwrite_input,
- interpolation=interpolation)
- else:
- if overwrite_input:
- x = arr1d
- else:
- x = arr1d.copy()
- # select non-nans at end of array
- enonan = arr1d[-s.size:][~c[-s.size:]]
- # fill nans in beginning of array with non-nans of end
- x[s[:enonan.size]] = enonan
- # slice nans away
- return np.percentile(x[:-s.size], q, overwrite_input=True,
- interpolation=interpolation)
+ arr1d, overwrite_input = _remove_nan_1d(arr1d,
+ overwrite_input=overwrite_input)
+ if arr1d.size == 0:
+ return np.full(q.shape, np.nan)[()] # convert to scalar
+
+ return np.percentile(arr1d, q, overwrite_input=overwrite_input,
+ interpolation=interpolation)
def nanvar(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue):
diff --git a/numpy/lib/npyio.py b/numpy/lib/npyio.py
index dc1c951e7..7598b2c6b 100644
--- a/numpy/lib/npyio.py
+++ b/numpy/lib/npyio.py
@@ -424,7 +424,7 @@ def load(file, mmap_mode=None, allow_pickle=True, fix_imports=True,
"non-pickled data")
try:
return pickle.load(fid, **pickle_kwargs)
- except:
+ except Exception:
raise IOError(
"Failed to interpret file %s as a pickle" % repr(file))
finally:
@@ -443,6 +443,8 @@ def save(file, arr, allow_pickle=True, fix_imports=True):
then the filename is unchanged. If file is a string or Path, a ``.npy``
extension will be appended to the file name if it does not already
have one.
+ arr : array_like
+ Array data to be saved.
allow_pickle : bool, optional
Allow saving object arrays using Python pickles. Reasons for disallowing
pickles include security (loading pickled data can execute arbitrary
@@ -456,8 +458,6 @@ def save(file, arr, allow_pickle=True, fix_imports=True):
pickled in a Python 2 compatible way. If `fix_imports` is True, pickle
will try to map the new Python 3 names to the old module names used in
Python 2, so that the pickle data stream is readable with Python 2.
- arr : array_like
- Array data to be saved.
See Also
--------
@@ -737,7 +737,7 @@ def _getconv(dtype):
return np.longdouble
elif issubclass(typ, np.floating):
return floatconv
- elif issubclass(typ, np.complex):
+ elif issubclass(typ, complex):
return lambda x: complex(asstr(x))
elif issubclass(typ, np.bytes_):
return asbytes
@@ -1014,7 +1014,7 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None,
if len(vals) == 0:
continue
if usecols:
- vals = [vals[i] for i in usecols]
+ vals = [vals[j] for j in usecols]
if len(vals) != N:
line_num = i + skiprows + 1
raise ValueError("Wrong number of columns at line %d"
@@ -1071,7 +1071,7 @@ def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n', header='',
If the filename ends in ``.gz``, the file is automatically saved in
compressed gzip format. `loadtxt` understands gzipped files
transparently.
- X : array_like
+ X : 1D or 2D array_like
Data to be saved to a text file.
fmt : str or sequence of strs, optional
A single format (%10.5f), a sequence of formats, or a
@@ -1201,7 +1201,10 @@ def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n', header='',
X = np.asarray(X)
# Handle 1-dimensional arrays
- if X.ndim == 1:
+ if X.ndim == 0 or X.ndim > 2:
+ raise ValueError(
+ "Expected 1D or 2D array, got %dD array instead" % X.ndim)
+ elif X.ndim == 1:
# Common case -- 1d array of numbers
if X.dtype.names is None:
X = np.atleast_2d(X).T
@@ -1902,16 +1905,16 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
# If the dtype is uniform, don't define names, else use ''
base = set([c.type for c in converters if c._checked])
if len(base) == 1:
- (ddtype, mdtype) = (list(base)[0], np.bool)
+ (ddtype, mdtype) = (list(base)[0], bool)
else:
ddtype = [(defaultfmt % i, dt)
for (i, dt) in enumerate(column_types)]
if usemask:
- mdtype = [(defaultfmt % i, np.bool)
+ mdtype = [(defaultfmt % i, bool)
for (i, dt) in enumerate(column_types)]
else:
ddtype = list(zip(names, column_types))
- mdtype = list(zip(names, [np.bool] * len(column_types)))
+ mdtype = list(zip(names, [bool] * len(column_types)))
output = np.array(data, dtype=ddtype)
if usemask:
outputmask = np.array(masks, dtype=mdtype)
@@ -1937,7 +1940,7 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
# Now, process the rowmasks the same way
if usemask:
rowmasks = np.array(
- masks, dtype=np.dtype([('', np.bool) for t in dtype_flat]))
+ masks, dtype=np.dtype([('', bool) for t in dtype_flat]))
# Construct the new dtype
mdtype = make_mask_descr(dtype)
outputmask = rowmasks.view(mdtype)
@@ -1968,9 +1971,9 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
output = np.array(data, dtype)
if usemask:
if dtype.names:
- mdtype = [(_, np.bool) for _ in dtype.names]
+ mdtype = [(_, bool) for _ in dtype.names]
else:
- mdtype = np.bool
+ mdtype = bool
outputmask = np.array(masks, dtype=mdtype)
# Try to take care of the missing data we missed
names = output.dtype.names
diff --git a/numpy/lib/recfunctions.py b/numpy/lib/recfunctions.py
index b9542e848..e9ba38f46 100644
--- a/numpy/lib/recfunctions.py
+++ b/numpy/lib/recfunctions.py
@@ -70,6 +70,37 @@ def recursive_fill_fields(input, output):
return output
+def get_fieldspec(dtype):
+ """
+ Produce a list of name/dtype pairs corresponding to the dtype fields
+
+ Similar to dtype.descr, but the second item of each tuple is a dtype, not a
+ string. As a result, this handles subarray dtypes
+
+ Can be passed to the dtype constructor to reconstruct the dtype, noting that
+ this (deliberately) discards field offsets.
+
+ Examples
+ --------
+ >>> dt = np.dtype([(('a', 'A'), int), ('b', float, 3)])
+ >>> dt.descr
+ [(('a', 'A'), '<i4'), ('b', '<f8', (3,))]
+ >>> get_fieldspec(dt)
+ [(('a', 'A'), dtype('int32')), ('b', dtype(('<f8', (3,))))]
+
+ """
+ if dtype.names is None:
+ # .descr returns a nameless field, so we should too
+ return [('', dtype)]
+ else:
+ fields = ((name, dtype.fields[name]) for name in dtype.names)
+ # keep any titles, if present
+ return [
+ (name if len(f) == 2 else (f[2], name), f[0])
+ for name, f in fields
+ ]
+
+
def get_names(adtype):
"""
Returns the field names of the input datatype as a tuple.
@@ -146,7 +177,7 @@ def flatten_descr(ndtype):
"""
names = ndtype.names
if names is None:
- return ndtype.descr
+ return (('', ndtype),)
else:
descr = []
for field in names:
@@ -158,6 +189,22 @@ def flatten_descr(ndtype):
return tuple(descr)
+def zip_dtype(seqarrays, flatten=False):
+ newdtype = []
+ if flatten:
+ for a in seqarrays:
+ newdtype.extend(flatten_descr(a.dtype))
+ else:
+ for a in seqarrays:
+ current = a.dtype
+ if current.names and len(current.names) <= 1:
+ # special case - dtypes of 0 or 1 field are flattened
+ newdtype.extend(get_fieldspec(current))
+ else:
+ newdtype.append(('', current))
+ return np.dtype(newdtype)
+
+
def zip_descr(seqarrays, flatten=False):
"""
Combine the dtype description of a series of arrays.
@@ -169,19 +216,7 @@ def zip_descr(seqarrays, flatten=False):
flatten : {boolean}, optional
Whether to collapse nested descriptions.
"""
- newdtype = []
- if flatten:
- for a in seqarrays:
- newdtype.extend(flatten_descr(a.dtype))
- else:
- for a in seqarrays:
- current = a.dtype
- names = current.names or ()
- if len(names) > 1:
- newdtype.append(('', current.descr))
- else:
- newdtype.extend(current.descr)
- return np.dtype(newdtype).descr
+ return zip_dtype(seqarrays, flatten=flatten).descr
def get_fieldstructure(adtype, lastname=None, parents=None,):
@@ -376,13 +411,12 @@ def merge_arrays(seqarrays, fill_value=-1, flatten=False,
# Do we have a single ndarray as input ?
if isinstance(seqarrays, (ndarray, np.void)):
seqdtype = seqarrays.dtype
- if (not flatten) or \
- (zip_descr((seqarrays,), flatten=True) == seqdtype.descr):
+ # Make sure we have named fields
+ if not seqdtype.names:
+ seqdtype = np.dtype([('', seqdtype)])
+ if not flatten or zip_dtype((seqarrays,), flatten=True) == seqdtype:
# Minimal processing needed: just make sure everythng's a-ok
seqarrays = seqarrays.ravel()
- # Make sure we have named fields
- if not seqdtype.names:
- seqdtype = [('', seqdtype)]
# Find what type of array we must return
if usemask:
if asrecarray:
@@ -403,7 +437,7 @@ def merge_arrays(seqarrays, fill_value=-1, flatten=False,
sizes = tuple(a.size for a in seqarrays)
maxlength = max(sizes)
# Get the dtype of the output (flattening if needed)
- newdtype = zip_descr(seqarrays, flatten=flatten)
+ newdtype = zip_dtype(seqarrays, flatten=flatten)
# Initialize the sequences for data and mask
seqdata = []
seqmask = []
@@ -655,8 +689,9 @@ def append_fields(base, names, data, dtypes=None,
else:
data = data.pop()
#
- output = ma.masked_all(max(len(base), len(data)),
- dtype=base.dtype.descr + data.dtype.descr)
+ output = ma.masked_all(
+ max(len(base), len(data)),
+ dtype=get_fieldspec(base.dtype) + get_fieldspec(data.dtype))
output = recursive_fill_fields(base, output)
output = recursive_fill_fields(data, output)
#
@@ -746,25 +781,21 @@ def stack_arrays(arrays, defaults=None, usemask=True, asrecarray=False,
fldnames = [d.names for d in ndtype]
#
dtype_l = ndtype[0]
- newdescr = dtype_l.descr
- names = [_[0] for _ in newdescr]
+ newdescr = get_fieldspec(dtype_l)
+ names = [n for n, d in newdescr]
for dtype_n in ndtype[1:]:
- for descr in dtype_n.descr:
- name = descr[0] or ''
- if name not in names:
- newdescr.append(descr)
- names.append(name)
+ for fname, fdtype in get_fieldspec(dtype_n):
+ if fname not in names:
+ newdescr.append((fname, fdtype))
+ names.append(fname)
else:
- nameidx = names.index(name)
- current_descr = newdescr[nameidx]
+ nameidx = names.index(fname)
+ _, cdtype = newdescr[nameidx]
if autoconvert:
- if np.dtype(descr[1]) > np.dtype(current_descr[-1]):
- current_descr = list(current_descr)
- current_descr[-1] = descr[1]
- newdescr[nameidx] = tuple(current_descr)
- elif descr[1] != current_descr[-1]:
+ newdescr[nameidx] = (fname, max(fdtype, cdtype))
+ elif fdtype != cdtype:
raise TypeError("Incompatible type '%s' <> '%s'" %
- (dict(newdescr)[name], descr[1]))
+ (cdtype, fdtype))
# Only one field: use concatenate
if len(newdescr) == 1:
output = ma.concatenate(seqarrays)
@@ -920,10 +951,10 @@ def join_by(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2',
(r1names, r2names) = (r1.dtype.names, r2.dtype.names)
# Check the names for collision
- if (set.intersection(set(r1names), set(r2names)).difference(key) and
- not (r1postfix or r2postfix)):
+ collisions = (set(r1names) & set(r2names)) - set(key)
+ if collisions and not (r1postfix or r2postfix):
msg = "r1 and r2 contain common names, r1postfix and r2postfix "
- msg += "can't be empty"
+ msg += "can't both be empty"
raise ValueError(msg)
# Make temporary arrays of just the keys
@@ -960,32 +991,38 @@ def join_by(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2',
#
# Build the new description of the output array .......
# Start with the key fields
- ndtype = [list(_) for _ in r1k.dtype.descr]
- # Add the other fields
- ndtype.extend(list(_) for _ in r1.dtype.descr if _[0] not in key)
- # Find the new list of names (it may be different from r1names)
- names = list(_[0] for _ in ndtype)
- for desc in r2.dtype.descr:
- desc = list(desc)
- name = desc[0]
+ ndtype = get_fieldspec(r1k.dtype)
+
+ # Add the fields from r1
+ for fname, fdtype in get_fieldspec(r1.dtype):
+ if fname not in key:
+ ndtype.append((fname, fdtype))
+
+ # Add the fields from r2
+ for fname, fdtype in get_fieldspec(r2.dtype):
# Have we seen the current name already ?
- if name in names:
- nameidx = ndtype.index(desc)
- current = ndtype[nameidx]
- # The current field is part of the key: take the largest dtype
- if name in key:
- current[-1] = max(desc[1], current[-1])
- # The current field is not part of the key: add the suffixes
- else:
- current[0] += r1postfix
- desc[0] += r2postfix
- ndtype.insert(nameidx + 1, desc)
- #... we haven't: just add the description to the current list
+ # we need to rebuild this list every time
+ names = list(name for name, dtype in ndtype)
+ try:
+ nameidx = names.index(fname)
+ except ValueError:
+ #... we haven't: just add the description to the current list
+ ndtype.append((fname, fdtype))
else:
- names.extend(desc[0])
- ndtype.append(desc)
- # Revert the elements to tuples
- ndtype = [tuple(_) for _ in ndtype]
+ # collision
+ _, cdtype = ndtype[nameidx]
+ if fname in key:
+ # The current field is part of the key: take the largest dtype
+ ndtype[nameidx] = (fname, max(fdtype, cdtype))
+ else:
+ # The current field is not part of the key: add the suffixes,
+ # and place the new field adjacent to the old one
+ ndtype[nameidx:nameidx + 1] = [
+ (fname + r1postfix, cdtype),
+ (fname + r2postfix, fdtype)
+ ]
+ # Rebuild a dtype from the new fields
+ ndtype = np.dtype(ndtype)
# Find the largest nb of common fields :
# r1cmn and r2cmn should be equal, but...
cmn = max(r1cmn, r2cmn)
diff --git a/numpy/lib/shape_base.py b/numpy/lib/shape_base.py
index 830943e72..53578e0e4 100644
--- a/numpy/lib/shape_base.py
+++ b/numpy/lib/shape_base.py
@@ -85,11 +85,9 @@ def apply_along_axis(func1d, axis, arr, *args, **kwargs):
array([[[1, 0, 0],
[0, 2, 0],
[0, 0, 3]],
-
[[4, 0, 0],
[0, 5, 0],
[0, 0, 6]],
-
[[7, 0, 0],
[0, 8, 0],
[0, 0, 9]]])
@@ -240,14 +238,20 @@ def expand_dims(a, axis):
"""
Expand the shape of an array.
- Insert a new axis, corresponding to a given position in the array shape.
+ Insert a new axis that will appear at the `axis` position in the expanded
+ array shape.
+
+ .. note:: Previous to NumPy 1.13.0, neither ``axis < -a.ndim - 1`` nor
+ ``axis > a.ndim`` raised errors or put the new axis where documented.
+ Those axis values are now deprecated and will raise an AxisError in the
+ future.
Parameters
----------
a : array_like
Input array.
axis : int
- Position (amongst axes) where new axis is to be inserted.
+ Position in the expanded axes where the new axis is placed.
Returns
-------
@@ -291,7 +295,16 @@ def expand_dims(a, axis):
"""
a = asarray(a)
shape = a.shape
- axis = normalize_axis_index(axis, a.ndim + 1)
+ if axis > a.ndim or axis < -a.ndim - 1:
+ # 2017-05-17, 1.13.0
+ warnings.warn("Both axis > a.ndim and axis < -a.ndim - 1 are "
+ "deprecated and will raise an AxisError in the future.",
+ DeprecationWarning, stacklevel=2)
+ # When the deprecation period expires, delete this if block,
+ if axis < 0:
+ axis = axis + a.ndim + 1
+ # and uncomment the following line.
+ # axis = normalize_axis_index(axis, a.ndim + 1)
return a.reshape(shape[:axis] + (1,) + shape[axis:])
row_stack = vstack
@@ -317,7 +330,7 @@ def column_stack(tup):
See Also
--------
- hstack, vstack, concatenate
+ stack, hstack, vstack, concatenate
Examples
--------
diff --git a/numpy/lib/stride_tricks.py b/numpy/lib/stride_tricks.py
index 545623c38..6c240db7f 100644
--- a/numpy/lib/stride_tricks.py
+++ b/numpy/lib/stride_tricks.py
@@ -100,10 +100,9 @@ def as_strided(x, shape=None, strides=None, subok=False, writeable=True):
interface['strides'] = tuple(strides)
array = np.asarray(DummyArray(interface, base=x))
-
- if array.dtype.fields is None and x.dtype.fields is not None:
- # This should only happen if x.dtype is [('', 'Vx')]
- array.dtype = x.dtype
+ # The route via `__interface__` does not preserve structured
+ # dtypes. Since dtype should remain unchanged, we set it explicitly.
+ array.dtype = x.dtype
view = _maybe_view_as_subclass(x, array)
diff --git a/numpy/lib/tests/__init__.py b/numpy/lib/tests/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/numpy/lib/tests/__init__.py
diff --git a/numpy/lib/tests/test__datasource.py b/numpy/lib/tests/test__datasource.py
index f2ad0344a..a9cb157f3 100644
--- a/numpy/lib/tests/test__datasource.py
+++ b/numpy/lib/tests/test__datasource.py
@@ -6,7 +6,7 @@ from tempfile import mkdtemp, mkstemp, NamedTemporaryFile
from shutil import rmtree
from numpy.testing import (
- run_module_suite, TestCase, assert_, SkipTest
+ run_module_suite, assert_, assert_equal, assert_raises, SkipTest,
)
import numpy.lib._datasource as datasource
@@ -55,7 +55,7 @@ malicious_files = ['/etc/shadow', '../../shadow',
magic_line = b'three is the magic number'
-# Utility functions used by many TestCases
+# Utility functions used by many tests
def valid_textfile(filedir):
# Generate and return a valid temporary file.
fd, path = mkstemp(suffix='.txt', prefix='dstmp_', dir=filedir, text=True)
@@ -95,12 +95,12 @@ def invalid_httpfile():
return http_fakefile
-class TestDataSourceOpen(TestCase):
- def setUp(self):
+class TestDataSourceOpen(object):
+ def setup(self):
self.tmpdir = mkdtemp()
self.ds = datasource.DataSource(self.tmpdir)
- def tearDown(self):
+ def teardown(self):
rmtree(self.tmpdir)
del self.ds
@@ -111,7 +111,7 @@ class TestDataSourceOpen(TestCase):
def test_InvalidHTTP(self):
url = invalid_httpurl()
- self.assertRaises(IOError, self.ds.open, url)
+ assert_raises(IOError, self.ds.open, url)
try:
self.ds.open(url)
except IOError as e:
@@ -119,7 +119,7 @@ class TestDataSourceOpen(TestCase):
assert_(e.errno is None)
def test_InvalidHTTPCacheURLError(self):
- self.assertRaises(URLError, self.ds._cache, invalid_httpurl())
+ assert_raises(URLError, self.ds._cache, invalid_httpurl())
def test_ValidFile(self):
local_file = valid_textfile(self.tmpdir)
@@ -129,7 +129,7 @@ class TestDataSourceOpen(TestCase):
def test_InvalidFile(self):
invalid_file = invalid_textfile(self.tmpdir)
- self.assertRaises(IOError, self.ds.open, invalid_file)
+ assert_raises(IOError, self.ds.open, invalid_file)
def test_ValidGzipFile(self):
try:
@@ -145,7 +145,7 @@ class TestDataSourceOpen(TestCase):
fp = self.ds.open(filepath)
result = fp.readline()
fp.close()
- self.assertEqual(magic_line, result)
+ assert_equal(magic_line, result)
def test_ValidBz2File(self):
try:
@@ -161,15 +161,15 @@ class TestDataSourceOpen(TestCase):
fp = self.ds.open(filepath)
result = fp.readline()
fp.close()
- self.assertEqual(magic_line, result)
+ assert_equal(magic_line, result)
-class TestDataSourceExists(TestCase):
- def setUp(self):
+class TestDataSourceExists(object):
+ def setup(self):
self.tmpdir = mkdtemp()
self.ds = datasource.DataSource(self.tmpdir)
- def tearDown(self):
+ def teardown(self):
rmtree(self.tmpdir)
del self.ds
@@ -177,7 +177,7 @@ class TestDataSourceExists(TestCase):
assert_(self.ds.exists(valid_httpurl()))
def test_InvalidHTTP(self):
- self.assertEqual(self.ds.exists(invalid_httpurl()), False)
+ assert_equal(self.ds.exists(invalid_httpurl()), False)
def test_ValidFile(self):
# Test valid file in destpath
@@ -191,15 +191,15 @@ class TestDataSourceExists(TestCase):
def test_InvalidFile(self):
tmpfile = invalid_textfile(self.tmpdir)
- self.assertEqual(self.ds.exists(tmpfile), False)
+ assert_equal(self.ds.exists(tmpfile), False)
-class TestDataSourceAbspath(TestCase):
- def setUp(self):
+class TestDataSourceAbspath(object):
+ def setup(self):
self.tmpdir = os.path.abspath(mkdtemp())
self.ds = datasource.DataSource(self.tmpdir)
- def tearDown(self):
+ def teardown(self):
rmtree(self.tmpdir)
del self.ds
@@ -207,30 +207,30 @@ class TestDataSourceAbspath(TestCase):
scheme, netloc, upath, pms, qry, frg = urlparse(valid_httpurl())
local_path = os.path.join(self.tmpdir, netloc,
upath.strip(os.sep).strip('/'))
- self.assertEqual(local_path, self.ds.abspath(valid_httpurl()))
+ assert_equal(local_path, self.ds.abspath(valid_httpurl()))
def test_ValidFile(self):
tmpfile = valid_textfile(self.tmpdir)
tmpfilename = os.path.split(tmpfile)[-1]
# Test with filename only
- self.assertEqual(tmpfile, self.ds.abspath(tmpfilename))
+ assert_equal(tmpfile, self.ds.abspath(tmpfilename))
# Test filename with complete path
- self.assertEqual(tmpfile, self.ds.abspath(tmpfile))
+ assert_equal(tmpfile, self.ds.abspath(tmpfile))
def test_InvalidHTTP(self):
scheme, netloc, upath, pms, qry, frg = urlparse(invalid_httpurl())
invalidhttp = os.path.join(self.tmpdir, netloc,
upath.strip(os.sep).strip('/'))
- self.assertNotEqual(invalidhttp, self.ds.abspath(valid_httpurl()))
+ assert_(invalidhttp != self.ds.abspath(valid_httpurl()))
def test_InvalidFile(self):
invalidfile = valid_textfile(self.tmpdir)
tmpfile = valid_textfile(self.tmpdir)
tmpfilename = os.path.split(tmpfile)[-1]
# Test with filename only
- self.assertNotEqual(invalidfile, self.ds.abspath(tmpfilename))
+ assert_(invalidfile != self.ds.abspath(tmpfilename))
# Test filename with complete path
- self.assertNotEqual(invalidfile, self.ds.abspath(tmpfile))
+ assert_(invalidfile != self.ds.abspath(tmpfile))
def test_sandboxing(self):
tmpfile = valid_textfile(self.tmpdir)
@@ -259,12 +259,12 @@ class TestDataSourceAbspath(TestCase):
os.sep = orig_os_sep
-class TestRepositoryAbspath(TestCase):
- def setUp(self):
+class TestRepositoryAbspath(object):
+ def setup(self):
self.tmpdir = os.path.abspath(mkdtemp())
self.repos = datasource.Repository(valid_baseurl(), self.tmpdir)
- def tearDown(self):
+ def teardown(self):
rmtree(self.tmpdir)
del self.repos
@@ -273,7 +273,7 @@ class TestRepositoryAbspath(TestCase):
local_path = os.path.join(self.repos._destpath, netloc,
upath.strip(os.sep).strip('/'))
filepath = self.repos.abspath(valid_httpfile())
- self.assertEqual(local_path, filepath)
+ assert_equal(local_path, filepath)
def test_sandboxing(self):
tmp_path = lambda x: os.path.abspath(self.repos.abspath(x))
@@ -292,12 +292,12 @@ class TestRepositoryAbspath(TestCase):
os.sep = orig_os_sep
-class TestRepositoryExists(TestCase):
- def setUp(self):
+class TestRepositoryExists(object):
+ def setup(self):
self.tmpdir = mkdtemp()
self.repos = datasource.Repository(valid_baseurl(), self.tmpdir)
- def tearDown(self):
+ def teardown(self):
rmtree(self.tmpdir)
del self.repos
@@ -308,7 +308,7 @@ class TestRepositoryExists(TestCase):
def test_InvalidFile(self):
tmpfile = invalid_textfile(self.tmpdir)
- self.assertEqual(self.repos.exists(tmpfile), False)
+ assert_equal(self.repos.exists(tmpfile), False)
def test_RemoveHTTPFile(self):
assert_(self.repos.exists(valid_httpurl()))
@@ -325,11 +325,11 @@ class TestRepositoryExists(TestCase):
assert_(self.repos.exists(tmpfile))
-class TestOpenFunc(TestCase):
- def setUp(self):
+class TestOpenFunc(object):
+ def setup(self):
self.tmpdir = mkdtemp()
- def tearDown(self):
+ def teardown(self):
rmtree(self.tmpdir)
def test_DataSourceOpen(self):
diff --git a/numpy/lib/tests/test__iotools.py b/numpy/lib/tests/test__iotools.py
index 6c0b2c6db..03192896c 100644
--- a/numpy/lib/tests/test__iotools.py
+++ b/numpy/lib/tests/test__iotools.py
@@ -6,8 +6,7 @@ from datetime import date
import numpy as np
from numpy.testing import (
- run_module_suite, TestCase, assert_, assert_equal, assert_allclose,
- assert_raises
+ run_module_suite, assert_, assert_equal, assert_allclose, assert_raises,
)
from numpy.lib._iotools import (
LineSplitter, NameValidator, StringConverter,
@@ -15,7 +14,7 @@ from numpy.lib._iotools import (
)
-class TestLineSplitter(TestCase):
+class TestLineSplitter(object):
"Tests the LineSplitter class."
def test_no_delimiter(self):
@@ -79,7 +78,7 @@ class TestLineSplitter(TestCase):
# -----------------------------------------------------------------------------
-class TestNameValidator(TestCase):
+class TestNameValidator(object):
def test_case_sensitivity(self):
"Test case sensitivity"
@@ -140,7 +139,7 @@ def _bytes_to_date(s):
return date(*time.strptime(s, "%Y-%m-%d")[:3])
-class TestStringConverter(TestCase):
+class TestStringConverter(object):
"Test StringConverter"
def test_creation(self):
@@ -254,11 +253,11 @@ class TestStringConverter(TestCase):
assert_(converter(val) == 9223372043271415339)
-class TestMiscFunctions(TestCase):
+class TestMiscFunctions(object):
def test_has_nested_dtype(self):
"Test has_nested_dtype"
- ndtype = np.dtype(np.float)
+ ndtype = np.dtype(float)
assert_equal(has_nested_fields(ndtype), False)
ndtype = np.dtype([('A', '|S3'), ('B', float)])
assert_equal(has_nested_fields(ndtype), False)
diff --git a/numpy/lib/tests/test_arraypad.py b/numpy/lib/tests/test_arraypad.py
index 056aa4582..fce4c451d 100644
--- a/numpy/lib/tests/test_arraypad.py
+++ b/numpy/lib/tests/test_arraypad.py
@@ -4,12 +4,11 @@
from __future__ import division, absolute_import, print_function
import numpy as np
-from numpy.testing import (assert_array_equal, assert_raises, assert_allclose,
- TestCase)
+from numpy.testing import (assert_array_equal, assert_raises, assert_allclose,)
from numpy.lib import pad
-class TestConditionalShortcuts(TestCase):
+class TestConditionalShortcuts(object):
def test_zero_padding_shortcuts(self):
test = np.arange(120).reshape(4, 5, 6)
pad_amt = [(0, 0) for axis in test.shape]
@@ -52,7 +51,7 @@ class TestConditionalShortcuts(TestCase):
pad(test, pad_amt, mode=mode, stat_length=30))
-class TestStatistic(TestCase):
+class TestStatistic(object):
def test_check_mean_stat_length(self):
a = np.arange(100).astype('f')
a = pad(a, ((25, 20), ), 'mean', stat_length=((2, 3), ))
@@ -346,7 +345,7 @@ class TestStatistic(TestCase):
assert_array_equal(a, b)
-class TestConstant(TestCase):
+class TestConstant(object):
def test_check_constant(self):
a = np.arange(100)
a = pad(a, (25, 20), 'constant', constant_values=(10, 20))
@@ -491,7 +490,7 @@ class TestConstant(TestCase):
assert_allclose(test, expected)
-class TestLinearRamp(TestCase):
+class TestLinearRamp(object):
def test_check_simple(self):
a = np.arange(100).astype('f')
a = pad(a, (25, 20), 'linear_ramp', end_values=(4, 5))
@@ -531,7 +530,7 @@ class TestLinearRamp(TestCase):
assert_allclose(test, expected)
-class TestReflect(TestCase):
+class TestReflect(object):
def test_check_simple(self):
a = np.arange(100)
a = pad(a, (25, 20), 'reflect')
@@ -640,8 +639,13 @@ class TestReflect(TestCase):
b = np.array([1, 2, 3, 2, 1, 2, 3, 2, 1, 2, 3])
assert_array_equal(a, b)
+ def test_check_padding_an_empty_array(self):
+ a = pad(np.zeros((0, 3)), ((0,), (1,)), mode='reflect')
+ b = np.zeros((0, 5))
+ assert_array_equal(a, b)
+
-class TestSymmetric(TestCase):
+class TestSymmetric(object):
def test_check_simple(self):
a = np.arange(100)
a = pad(a, (25, 20), 'symmetric')
@@ -775,7 +779,7 @@ class TestSymmetric(TestCase):
assert_array_equal(a, b)
-class TestWrap(TestCase):
+class TestWrap(object):
def test_check_simple(self):
a = np.arange(100)
a = pad(a, (25, 20), 'wrap')
@@ -871,7 +875,7 @@ class TestWrap(TestCase):
assert_array_equal(a, b)
-class TestStatLen(TestCase):
+class TestStatLen(object):
def test_check_simple(self):
a = np.arange(30)
a = np.reshape(a, (6, 5))
@@ -894,7 +898,7 @@ class TestStatLen(TestCase):
assert_array_equal(a, b)
-class TestEdge(TestCase):
+class TestEdge(object):
def test_check_simple(self):
a = np.arange(12)
a = np.reshape(a, (4, 3))
@@ -933,7 +937,7 @@ class TestEdge(TestCase):
assert_array_equal(padded, expected)
-class TestZeroPadWidth(TestCase):
+class TestZeroPadWidth(object):
def test_zero_pad_width(self):
arr = np.arange(30)
arr = np.reshape(arr, (6, 5))
@@ -941,7 +945,7 @@ class TestZeroPadWidth(TestCase):
assert_array_equal(arr, pad(arr, pad_width, mode='constant'))
-class TestLegacyVectorFunction(TestCase):
+class TestLegacyVectorFunction(object):
def test_legacy_vector_functionality(self):
def _padwithtens(vector, pad_width, iaxis, kwargs):
vector[:pad_width[0]] = 10
@@ -963,7 +967,7 @@ class TestLegacyVectorFunction(TestCase):
assert_array_equal(a, b)
-class TestNdarrayPadWidth(TestCase):
+class TestNdarrayPadWidth(object):
def test_check_simple(self):
a = np.arange(12)
a = np.reshape(a, (4, 3))
@@ -984,7 +988,7 @@ class TestNdarrayPadWidth(TestCase):
assert_array_equal(a, b)
-class TestUnicodeInput(TestCase):
+class TestUnicodeInput(object):
def test_unicode_mode(self):
constant_mode = u'constant'
a = np.pad([1], 2, mode=constant_mode)
@@ -992,7 +996,7 @@ class TestUnicodeInput(TestCase):
assert_array_equal(a, b)
-class ValueError1(TestCase):
+class TestValueError1(object):
def test_check_simple(self):
arr = np.arange(30)
arr = np.reshape(arr, (6, 5))
@@ -1014,8 +1018,14 @@ class ValueError1(TestCase):
assert_raises(ValueError, pad, arr, ((-2, 3), (3, 2)),
**kwargs)
+ def test_check_empty_array(self):
+ assert_raises(ValueError, pad, [], 4, mode='reflect')
+ assert_raises(ValueError, pad, np.ndarray(0), 4, mode='reflect')
+ assert_raises(ValueError, pad, np.zeros((0, 3)), ((1,), (0,)),
+ mode='reflect')
+
-class ValueError2(TestCase):
+class TestValueError2(object):
def test_check_negative_pad_amount(self):
arr = np.arange(30)
arr = np.reshape(arr, (6, 5))
@@ -1024,7 +1034,7 @@ class ValueError2(TestCase):
**kwargs)
-class ValueError3(TestCase):
+class TestValueError3(object):
def test_check_kwarg_not_allowed(self):
arr = np.arange(30).reshape(5, 6)
assert_raises(ValueError, pad, arr, 4, mode='mean',
@@ -1052,7 +1062,7 @@ class ValueError3(TestCase):
mode='constant')
-class TypeError1(TestCase):
+class TestTypeError1(object):
def test_float(self):
arr = np.arange(30)
assert_raises(TypeError, pad, arr, ((-2.1, 3), (3, 2)))
diff --git a/numpy/lib/tests/test_arraysetops.py b/numpy/lib/tests/test_arraysetops.py
index fa664ff24..b8ced41e8 100644
--- a/numpy/lib/tests/test_arraysetops.py
+++ b/numpy/lib/tests/test_arraysetops.py
@@ -5,14 +5,14 @@ from __future__ import division, absolute_import, print_function
import numpy as np
from numpy.testing import (
- run_module_suite, TestCase, assert_array_equal, assert_equal, assert_raises
+ run_module_suite, assert_array_equal, assert_equal, assert_raises,
)
from numpy.lib.arraysetops import (
ediff1d, intersect1d, setxor1d, union1d, setdiff1d, unique, in1d, isin
)
-class TestSetOps(TestCase):
+class TestSetOps(object):
def test_intersect1d(self):
# unique inputs
@@ -89,28 +89,28 @@ class TestSetOps(TestCase):
x = isin(a, b)
y = isin_slow(a, b)
assert_array_equal(x, y)
-
+
#multidimensional arrays in both arguments
a = np.arange(24).reshape([2, 3, 4])
b = np.array([[10, 20, 30], [0, 1, 3], [11, 22, 33]])
assert_isin_equal(a, b)
-
+
#array-likes as both arguments
c = [(9, 8), (7, 6)]
d = (9, 7)
assert_isin_equal(c, d)
-
+
#zero-d array:
f = np.array(3)
assert_isin_equal(f, b)
assert_isin_equal(a, f)
assert_isin_equal(f, f)
-
+
#scalar:
assert_isin_equal(5, b)
assert_isin_equal(a, 6)
assert_isin_equal(5, 6)
-
+
#empty array-like:
x = []
assert_isin_equal(x, b)
@@ -252,7 +252,7 @@ class TestSetOps(TestCase):
assert_array_equal(c1, c2)
-class TestUnique(TestCase):
+class TestUnique(object):
def test_unique_1d(self):
@@ -355,6 +355,16 @@ class TestUnique(TestCase):
a2, a2_inv = np.unique(a, return_inverse=True)
assert_array_equal(a2_inv, np.zeros(5))
+ # test for ticket #9137
+ a = []
+ a1_idx = np.unique(a, return_index=True)[1]
+ a2_inv = np.unique(a, return_inverse=True)[1]
+ a3_idx, a3_inv = np.unique(a, return_index=True, return_inverse=True)[1:]
+ assert_equal(a1_idx.dtype, np.intp)
+ assert_equal(a2_inv.dtype, np.intp)
+ assert_equal(a3_idx.dtype, np.intp)
+ assert_equal(a3_inv.dtype, np.intp)
+
def test_unique_axis_errors(self):
assert_raises(TypeError, self._run_axis_tests, object)
assert_raises(TypeError, self._run_axis_tests,
diff --git a/numpy/lib/tests/test_financial.py b/numpy/lib/tests/test_financial.py
index cc8ba55e5..4db364ad5 100644
--- a/numpy/lib/tests/test_financial.py
+++ b/numpy/lib/tests/test_financial.py
@@ -2,12 +2,12 @@ from __future__ import division, absolute_import, print_function
import numpy as np
from numpy.testing import (
- run_module_suite, TestCase, assert_, assert_almost_equal,
- assert_allclose, assert_equal
+ run_module_suite, assert_, assert_almost_equal, assert_allclose,
+ assert_equal
)
-class TestFinancial(TestCase):
+class TestFinancial(object):
def test_rate(self):
assert_almost_equal(np.rate(10, 0, -3500, 10000),
0.1107, 4)
diff --git a/numpy/lib/tests/test_format.py b/numpy/lib/tests/test_format.py
index 93727ef0c..2d2b4cea2 100644
--- a/numpy/lib/tests/test_format.py
+++ b/numpy/lib/tests/test_format.py
@@ -615,6 +615,11 @@ def test_version_2_0():
format.write_array(f, d)
assert_(w[0].category is UserWarning)
+ # check alignment of data portion
+ f.seek(0)
+ header = f.readline()
+ assert_(len(header) % format.ARRAY_ALIGN == 0)
+
f.seek(0)
n = format.read_array(f)
assert_array_equal(d, n)
@@ -758,6 +763,7 @@ def test_read_array_header_1_0():
s.seek(format.MAGIC_LEN)
shape, fortran, dtype = format.read_array_header_1_0(s)
+ assert_(s.tell() % format.ARRAY_ALIGN == 0)
assert_((shape, fortran, dtype) == ((3, 6), False, float))
@@ -770,6 +776,7 @@ def test_read_array_header_2_0():
s.seek(format.MAGIC_LEN)
shape, fortran, dtype = format.read_array_header_2_0(s)
+ assert_(s.tell() % format.ARRAY_ALIGN == 0)
assert_((shape, fortran, dtype) == ((3, 6), False, float))
@@ -811,7 +818,7 @@ def test_large_file_support():
# avoid actually writing 5GB
import subprocess as sp
sp.check_call(["truncate", "-s", "5368709120", tf_name])
- except:
+ except Exception:
raise SkipTest("Could not create 5GB large file")
# write a small array to the end
with open(tf_name, "wb") as f:
diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py
index d7d00758e..c64081088 100644
--- a/numpy/lib/tests/test_function_base.py
+++ b/numpy/lib/tests/test_function_base.py
@@ -6,13 +6,13 @@ import sys
import decimal
import numpy as np
+from numpy import ma
from numpy.testing import (
- run_module_suite, TestCase, assert_, assert_equal, assert_array_equal,
+ run_module_suite, assert_, assert_equal, assert_array_equal,
assert_almost_equal, assert_array_almost_equal, assert_raises,
- assert_allclose, assert_array_max_ulp, assert_warns,
- assert_raises_regex, dec, suppress_warnings
+ assert_allclose, assert_array_max_ulp, assert_warns, assert_raises_regex,
+ dec, suppress_warnings, HAS_REFCOUNT,
)
-from numpy.testing.utils import HAS_REFCOUNT
import numpy.lib.function_base as nfb
from numpy.random import rand
from numpy.lib import (
@@ -32,9 +32,9 @@ def get_mat(n):
return data
-class TestRot90(TestCase):
+class TestRot90(object):
def test_basic(self):
- self.assertRaises(ValueError, rot90, np.ones(4))
+ assert_raises(ValueError, rot90, np.ones(4))
assert_raises(ValueError, rot90, np.ones((2,2,2)), axes=(0,1,2))
assert_raises(ValueError, rot90, np.ones((2,2)), axes=(0,2))
assert_raises(ValueError, rot90, np.ones((2,2)), axes=(1,1))
@@ -100,12 +100,12 @@ class TestRot90(TestCase):
rot90(a_rot90_20, k=k-1, axes=(2, 0)))
-class TestFlip(TestCase):
+class TestFlip(object):
def test_axes(self):
- self.assertRaises(ValueError, np.flip, np.ones(4), axis=1)
- self.assertRaises(ValueError, np.flip, np.ones((4, 4)), axis=2)
- self.assertRaises(ValueError, np.flip, np.ones((4, 4)), axis=-3)
+ assert_raises(ValueError, np.flip, np.ones(4), axis=1)
+ assert_raises(ValueError, np.flip, np.ones((4, 4)), axis=2)
+ assert_raises(ValueError, np.flip, np.ones((4, 4)), axis=-3)
def test_basic_lr(self):
a = get_mat(4)
@@ -173,7 +173,7 @@ class TestFlip(TestCase):
np.flipud(a.swapaxes(0, i)).swapaxes(i, 0))
-class TestAny(TestCase):
+class TestAny(object):
def test_basic(self):
y1 = [0, 0, 1, 0]
@@ -190,7 +190,7 @@ class TestAny(TestCase):
assert_array_equal(np.sometrue(y1, axis=1), [0, 1, 1])
-class TestAll(TestCase):
+class TestAll(object):
def test_basic(self):
y1 = [0, 1, 1, 0]
@@ -208,7 +208,7 @@ class TestAll(TestCase):
assert_array_equal(np.alltrue(y1, axis=1), [0, 0, 1])
-class TestCopy(TestCase):
+class TestCopy(object):
def test_basic(self):
a = np.array([[1, 2], [3, 4]])
@@ -236,7 +236,7 @@ class TestCopy(TestCase):
assert_(a_fort_copy.flags.f_contiguous)
-class TestAverage(TestCase):
+class TestAverage(object):
def test_basic(self):
y1 = np.array([1, 2, 3])
@@ -346,9 +346,9 @@ class TestAverage(TestCase):
a = np.array([decimal.Decimal(x) for x in range(10)])
w = np.array([decimal.Decimal(1) for _ in range(10)])
w /= w.sum()
- assert_almost_equal(a.mean(0), average(a, weights=w))
+ assert_almost_equal(a.mean(0), average(a, weights=w))
-class TestSelect(TestCase):
+class TestSelect(object):
choices = [np.array([1, 2, 3]),
np.array([4, 5, 6]),
np.array([7, 8, 9])]
@@ -420,7 +420,7 @@ class TestSelect(TestCase):
select(conditions, choices)
-class TestInsert(TestCase):
+class TestInsert(object):
def test_basic(self):
a = [1, 2, 3]
@@ -521,7 +521,7 @@ class TestInsert(TestCase):
assert_array_equal(b[[0, 3]], np.array(val, dtype=b.dtype))
-class TestAmax(TestCase):
+class TestAmax(object):
def test_basic(self):
a = [3, 4, 5, 10, -3, -5, 6.0]
@@ -533,7 +533,7 @@ class TestAmax(TestCase):
assert_equal(np.amax(b, axis=1), [9.0, 10.0, 8.0])
-class TestAmin(TestCase):
+class TestAmin(object):
def test_basic(self):
a = [3, 4, 5, 10, -3, -5, 6.0]
@@ -545,7 +545,7 @@ class TestAmin(TestCase):
assert_equal(np.amin(b, axis=1), [3.0, 4.0, 2.0])
-class TestPtp(TestCase):
+class TestPtp(object):
def test_basic(self):
a = np.array([3, 4, 5, 10, -3, -5, 6.0])
@@ -557,7 +557,7 @@ class TestPtp(TestCase):
assert_equal(b.ptp(axis=-1), [6.0, 6.0, 6.0])
-class TestCumsum(TestCase):
+class TestCumsum(object):
def test_basic(self):
ba = [1, 2, 10, 11, 6, 5, 4]
@@ -580,7 +580,7 @@ class TestCumsum(TestCase):
assert_array_equal(np.cumsum(a2, axis=1), tgt)
-class TestProd(TestCase):
+class TestProd(object):
def test_basic(self):
ba = [1, 2, 10, 11, 6, 5, 4]
@@ -590,8 +590,8 @@ class TestProd(TestCase):
a = np.array(ba, ctype)
a2 = np.array(ba2, ctype)
if ctype in ['1', 'b']:
- self.assertRaises(ArithmeticError, np.prod, a)
- self.assertRaises(ArithmeticError, np.prod, a2, 1)
+ assert_raises(ArithmeticError, np.prod, a)
+ assert_raises(ArithmeticError, np.prod, a2, 1)
else:
assert_equal(a.prod(axis=0), 26400)
assert_array_equal(a2.prod(axis=0),
@@ -600,7 +600,7 @@ class TestProd(TestCase):
np.array([24, 1890, 600], ctype))
-class TestCumprod(TestCase):
+class TestCumprod(object):
def test_basic(self):
ba = [1, 2, 10, 11, 6, 5, 4]
@@ -610,9 +610,9 @@ class TestCumprod(TestCase):
a = np.array(ba, ctype)
a2 = np.array(ba2, ctype)
if ctype in ['1', 'b']:
- self.assertRaises(ArithmeticError, np.cumprod, a)
- self.assertRaises(ArithmeticError, np.cumprod, a2, 1)
- self.assertRaises(ArithmeticError, np.cumprod, a)
+ assert_raises(ArithmeticError, np.cumprod, a)
+ assert_raises(ArithmeticError, np.cumprod, a2, 1)
+ assert_raises(ArithmeticError, np.cumprod, a)
else:
assert_array_equal(np.cumprod(a, axis=-1),
np.array([1, 2, 20, 220,
@@ -627,7 +627,7 @@ class TestCumprod(TestCase):
[10, 30, 120, 600]], ctype))
-class TestDiff(TestCase):
+class TestDiff(object):
def test_basic(self):
x = [1, 4, 6, 7, 12]
@@ -638,6 +638,29 @@ class TestDiff(TestCase):
assert_array_equal(diff(x, n=2), out2)
assert_array_equal(diff(x, n=3), out3)
+ x = [1.1, 2.2, 3.0, -0.2, -0.1]
+ out = np.array([1.1, 0.8, -3.2, 0.1])
+ assert_almost_equal(diff(x), out)
+
+ x = [True, True, False, False]
+ out = np.array([False, True, False])
+ out2 = np.array([True, True])
+ assert_array_equal(diff(x), out)
+ assert_array_equal(diff(x, n=2), out2)
+
+ def test_axis(self):
+ x = np.zeros((10, 20, 30))
+ x[:, 1::2, :] = 1
+ exp = np.ones((10, 19, 30))
+ exp[:, 1::2, :] = -1
+ assert_array_equal(diff(x), np.zeros((10, 20, 29)))
+ assert_array_equal(diff(x, axis=-1), np.zeros((10, 20, 29)))
+ assert_array_equal(diff(x, axis=0), np.zeros((9, 20, 30)))
+ assert_array_equal(diff(x, axis=1), exp)
+ assert_array_equal(diff(x, axis=-2), exp)
+ assert_raises(np.AxisError, diff, x, axis=3)
+ assert_raises(np.AxisError, diff, x, axis=-4)
+
def test_nd(self):
x = 20 * rand(10, 20, 30)
out1 = x[:, :, 1:] - x[:, :, :-1]
@@ -649,10 +672,49 @@ class TestDiff(TestCase):
assert_array_equal(diff(x, axis=0), out3)
assert_array_equal(diff(x, n=2, axis=0), out4)
+ def test_n(self):
+ x = list(range(3))
+ assert_raises(ValueError, diff, x, n=-1)
+ output = [diff(x, n=n) for n in range(1, 5)]
+ expected = [[1, 1], [0], [], []]
+ assert_(diff(x, n=0) is x)
+ for n, (expected, out) in enumerate(zip(expected, output), start=1):
+ assert_(type(out) is np.ndarray)
+ assert_array_equal(out, expected)
+ assert_equal(out.dtype, np.int_)
+ assert_equal(len(out), max(0, len(x) - n))
+
+ def test_times(self):
+ x = np.arange('1066-10-13', '1066-10-16', dtype=np.datetime64)
+ expected = [
+ np.array([1, 1], dtype='timedelta64[D]'),
+ np.array([0], dtype='timedelta64[D]'),
+ ]
+ expected.extend([np.array([], dtype='timedelta64[D]')] * 3)
+ for n, exp in enumerate(expected, start=1):
+ out = diff(x, n=n)
+ assert_array_equal(out, exp)
+ assert_equal(out.dtype, exp.dtype)
-class TestDelete(TestCase):
+ def test_subclass(self):
+ x = ma.array([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]],
+ mask=[[False, False], [True, False],
+ [False, True], [True, True], [False, False]])
+ out = diff(x)
+ assert_array_equal(out.data, [[1], [1], [1], [1], [1]])
+ assert_array_equal(out.mask, [[False], [True],
+ [True], [True], [False]])
+ assert_(type(out) is type(x))
- def setUp(self):
+ out3 = diff(x, n=3)
+ assert_array_equal(out3.data, [[], [], [], [], []])
+ assert_array_equal(out3.mask, [[], [], [], [], []])
+ assert_(type(out3) is type(x))
+
+
+class TestDelete(object):
+
+ def setup(self):
self.a = np.arange(5)
self.nd_a = np.arange(5).repeat(2).reshape(1, 5, 2)
@@ -725,7 +787,7 @@ class TestDelete(TestCase):
assert_equal(m.flags.f_contiguous, k.flags.f_contiguous)
-class TestGradient(TestCase):
+class TestGradient(object):
def test_basic(self):
v = [[1, 1], [3, 4]]
@@ -735,7 +797,7 @@ class TestGradient(TestCase):
assert_array_equal(gradient(x), dx)
assert_array_equal(gradient(v), dx)
- def test_args(self):
+ def test_args(self):
dx = np.cumsum(np.ones(5))
dx_uneven = [1., 2., 5., 9., 11.]
f_2d = np.arange(25).reshape(5, 5)
@@ -825,15 +887,15 @@ class TestGradient(TestCase):
def test_spacing(self):
f = np.array([0, 2., 3., 4., 5., 5.])
- f = np.tile(f, (6,1)) + f.reshape(-1, 1)
+ f = np.tile(f, (6,1)) + f.reshape(-1, 1)
x_uneven = np.array([0., 0.5, 1., 3., 5., 7.])
x_even = np.arange(6.)
-
+
fdx_even_ord1 = np.tile([2., 1.5, 1., 1., 0.5, 0.], (6,1))
fdx_even_ord2 = np.tile([2.5, 1.5, 1., 1., 0.5, -0.5], (6,1))
fdx_uneven_ord1 = np.tile([4., 3., 1.7, 0.5, 0.25, 0.], (6,1))
fdx_uneven_ord2 = np.tile([5., 3., 1.7, 0.5, 0.25, -0.25], (6,1))
-
+
# evenly spaced
for edge_order, exp_res in [(1, fdx_even_ord1), (2, fdx_even_ord2)]:
res1 = gradient(f, 1., axis=(0,1), edge_order=edge_order)
@@ -843,19 +905,19 @@ class TestGradient(TestCase):
axis=None, edge_order=edge_order)
assert_array_equal(res1, res2)
assert_array_equal(res2, res3)
- assert_almost_equal(res1[0], exp_res.T)
- assert_almost_equal(res1[1], exp_res)
-
+ assert_almost_equal(res1[0], exp_res.T)
+ assert_almost_equal(res1[1], exp_res)
+
res1 = gradient(f, 1., axis=0, edge_order=edge_order)
res2 = gradient(f, x_even, axis=0, edge_order=edge_order)
assert_(res1.shape == res2.shape)
assert_almost_equal(res2, exp_res.T)
-
+
res1 = gradient(f, 1., axis=1, edge_order=edge_order)
res2 = gradient(f, x_even, axis=1, edge_order=edge_order)
assert_(res1.shape == res2.shape)
assert_array_equal(res2, exp_res)
-
+
# unevenly spaced
for edge_order, exp_res in [(1, fdx_uneven_ord1), (2, fdx_uneven_ord2)]:
res1 = gradient(f, x_uneven, x_uneven,
@@ -865,13 +927,13 @@ class TestGradient(TestCase):
assert_array_equal(res1, res2)
assert_almost_equal(res1[0], exp_res.T)
assert_almost_equal(res1[1], exp_res)
-
+
res1 = gradient(f, x_uneven, axis=0, edge_order=edge_order)
assert_almost_equal(res1, exp_res.T)
-
+
res1 = gradient(f, x_uneven, axis=1, edge_order=edge_order)
assert_almost_equal(res1, exp_res)
-
+
# mixed
res1 = gradient(f, x_even, x_uneven, axis=(0,1), edge_order=1)
res2 = gradient(f, x_uneven, x_even, axis=(1,0), edge_order=1)
@@ -879,14 +941,14 @@ class TestGradient(TestCase):
assert_array_equal(res1[1], res2[0])
assert_almost_equal(res1[0], fdx_even_ord1.T)
assert_almost_equal(res1[1], fdx_uneven_ord1)
-
+
res1 = gradient(f, x_even, x_uneven, axis=(0,1), edge_order=2)
res2 = gradient(f, x_uneven, x_even, axis=(1,0), edge_order=2)
assert_array_equal(res1[0], res2[1])
assert_array_equal(res1[1], res2[0])
assert_almost_equal(res1[0], fdx_even_ord2.T)
assert_almost_equal(res1[1], fdx_uneven_ord2)
-
+
def test_specific_axes(self):
# Testing that gradient can work on a given axis only
v = [[1, 1], [3, 4]]
@@ -912,7 +974,7 @@ class TestGradient(TestCase):
assert_raises(np.AxisError, gradient, x, axis=3)
assert_raises(np.AxisError, gradient, x, axis=-3)
# assert_raises(TypeError, gradient, x, axis=[1,])
-
+
def test_timedelta64(self):
# Make sure gradient() can handle special types like timedelta64
x = np.array(
@@ -924,20 +986,26 @@ class TestGradient(TestCase):
assert_array_equal(gradient(x), dx)
assert_(dx.dtype == np.dtype('timedelta64[D]'))
+ def test_inexact_dtypes(self):
+ for dt in [np.float16, np.float32, np.float64]:
+ # dtypes should not be promoted in a different way to what diff does
+ x = np.array([1, 2, 3], dtype=dt)
+ assert_equal(gradient(x).dtype, np.diff(x).dtype)
+
def test_values(self):
# needs at least 2 points for edge_order ==1
gradient(np.arange(2), edge_order=1)
# needs at least 3 points for edge_order ==1
gradient(np.arange(3), edge_order=2)
-
+
assert_raises(ValueError, gradient, np.arange(0), edge_order=1)
assert_raises(ValueError, gradient, np.arange(0), edge_order=2)
assert_raises(ValueError, gradient, np.arange(1), edge_order=1)
assert_raises(ValueError, gradient, np.arange(1), edge_order=2)
- assert_raises(ValueError, gradient, np.arange(2), edge_order=2)
+ assert_raises(ValueError, gradient, np.arange(2), edge_order=2)
-class TestAngle(TestCase):
+class TestAngle(object):
def test_basic(self):
x = [1 + 3j, np.sqrt(2) / 2.0 + 1j * np.sqrt(2) / 2,
@@ -953,7 +1021,7 @@ class TestAngle(TestCase):
assert_array_almost_equal(z, zo, 11)
-class TestTrimZeros(TestCase):
+class TestTrimZeros(object):
"""
Only testing for integer splits.
@@ -976,7 +1044,7 @@ class TestTrimZeros(TestCase):
assert_array_equal(res, np.array([1, 0, 2, 3, 0, 4]))
-class TestExtins(TestCase):
+class TestExtins(object):
def test_basic(self):
a = np.array([1, 3, 2, 1, 2, 3, 3])
@@ -1015,7 +1083,7 @@ class TestExtins(TestCase):
assert_array_equal(a, ac)
-class TestVectorize(TestCase):
+class TestVectorize(object):
def test_simple(self):
def addsubtract(a, b):
@@ -1074,7 +1142,7 @@ class TestVectorize(TestCase):
import random
try:
vectorize(random.randrange) # Should succeed
- except:
+ except Exception:
raise AssertionError()
def test_keywords2_ticket_2100(self):
@@ -1347,7 +1415,7 @@ class TestVectorize(TestCase):
f(x)
-class TestDigitize(TestCase):
+class TestDigitize(object):
def test_forward(self):
x = np.arange(-6, 5)
@@ -1420,7 +1488,7 @@ class TestDigitize(TestCase):
assert_(not isinstance(digitize(b, a, True), A))
-class TestUnwrap(TestCase):
+class TestUnwrap(object):
def test_simple(self):
# check that unwrap removes jumps greather that 2*pi
@@ -1429,7 +1497,7 @@ class TestUnwrap(TestCase):
assert_(np.all(diff(unwrap(rand(10) * 100)) < np.pi))
-class TestFilterwindows(TestCase):
+class TestFilterwindows(object):
def test_hanning(self):
# check symmetry
@@ -1460,7 +1528,7 @@ class TestFilterwindows(TestCase):
assert_almost_equal(np.sum(w, axis=0), 3.7800, 4)
-class TestTrapz(TestCase):
+class TestTrapz(object):
def test_simple(self):
x = np.arange(-10, 10, .1)
@@ -1532,7 +1600,7 @@ class TestTrapz(TestCase):
assert_almost_equal(mr, r)
-class TestSinc(TestCase):
+class TestSinc(object):
def test_simple(self):
assert_(sinc(0) == 1)
@@ -1549,12 +1617,12 @@ class TestSinc(TestCase):
assert_array_equal(y1, y3)
-class TestHistogram(TestCase):
+class TestHistogram(object):
- def setUp(self):
+ def setup(self):
pass
- def tearDown(self):
+ def teardown(self):
pass
def test_simple(self):
@@ -1650,16 +1718,16 @@ class TestHistogram(TestCase):
# Check the type of the returned histogram
a = np.arange(10) + .5
h, b = histogram(a)
- assert_(np.issubdtype(h.dtype, int))
+ assert_(np.issubdtype(h.dtype, np.integer))
h, b = histogram(a, normed=True)
- assert_(np.issubdtype(h.dtype, float))
+ assert_(np.issubdtype(h.dtype, np.floating))
h, b = histogram(a, weights=np.ones(10, int))
- assert_(np.issubdtype(h.dtype, int))
+ assert_(np.issubdtype(h.dtype, np.integer))
h, b = histogram(a, weights=np.ones(10, float))
- assert_(np.issubdtype(h.dtype, float))
+ assert_(np.issubdtype(h.dtype, np.floating))
def test_f32_rounding(self):
# gh-4799, check that the rounding of the edges works with float32
@@ -1760,16 +1828,16 @@ class TestHistogram(TestCase):
left_edges = edges[:-1][mask]
right_edges = edges[1:][mask]
for x, left, right in zip(arr, left_edges, right_edges):
- self.assertGreaterEqual(x, left)
- self.assertLess(x, right)
+ assert_(x >= left)
+ assert_(x < right)
def test_last_bin_inclusive_range(self):
arr = np.array([0., 0., 0., 1., 2., 3., 3., 4., 5.])
hist, edges = np.histogram(arr, bins=30, range=(-0.5, 5))
- self.assertEqual(hist[-1], 1)
+ assert_equal(hist[-1], 1)
-class TestHistogramOptimBinNums(TestCase):
+class TestHistogramOptimBinNums(object):
"""
Provide test coverage when using provided estimators for optimal number of
bins
@@ -1879,7 +1947,7 @@ class TestHistogramOptimBinNums(TestCase):
completely ignored. All test values have been precomputed and
the shouldn't change.
"""
- # some basic sanity checking, with some fixed data.
+ # some basic sanity checking, with some fixed data.
# Checking for the correct number of bins
basic_test = {
50: {'fd': 8, 'scott': 8, 'rice': 15,
@@ -1891,7 +1959,7 @@ class TestHistogramOptimBinNums(TestCase):
}
for testlen, expectedResults in basic_test.items():
- # create some sort of non uniform data to test with
+ # create some sort of non uniform data to test with
# (3 peak uniform mixture)
x1 = np.linspace(-10, -1, testlen // 5 * 2)
x2 = np.linspace(1, 10, testlen // 5 * 3)
@@ -1909,11 +1977,11 @@ class TestHistogramOptimBinNums(TestCase):
"""
estimator_list = ['fd', 'scott', 'rice', 'sturges', 'auto']
for estimator in estimator_list:
- assert_raises(TypeError, histogram, [1, 2, 3],
+ assert_raises(TypeError, histogram, [1, 2, 3],
estimator, weights=[1, 2, 3])
-class TestHistogramdd(TestCase):
+class TestHistogramdd(object):
def test_simple(self):
x = np.array([[-.5, .5, 1.5], [-.5, 1.5, 2.5], [-.5, 2.5, .5],
@@ -2053,7 +2121,7 @@ class TestHistogramdd(TestCase):
range=[[0.0, 1.0], [np.nan, 0.75], [0.25, 0.5]])
-class TestUnique(TestCase):
+class TestUnique(object):
def test_simple(self):
x = np.array([4, 3, 2, 1, 1, 2, 3, 4, 0])
@@ -2065,7 +2133,7 @@ class TestUnique(TestCase):
assert_(np.all(unique(x) == [1 + 1j, 1 + 10j, 5 + 6j, 10]))
-class TestCheckFinite(TestCase):
+class TestCheckFinite(object):
def test_simple(self):
a = [1, 2, 3]
@@ -2082,7 +2150,7 @@ class TestCheckFinite(TestCase):
assert_(a.dtype == np.float64)
-class TestCorrCoef(TestCase):
+class TestCorrCoef(object):
A = np.array(
[[0.15391142, 0.18045767, 0.14197213],
[0.70461506, 0.96474128, 0.27906989],
@@ -2167,7 +2235,7 @@ class TestCorrCoef(TestCase):
assert_(np.all(np.abs(c) <= 1.0))
-class TestCov(TestCase):
+class TestCov(object):
x1 = np.array([[0, 2], [1, 1], [2, 0]]).T
res1 = np.array([[1., -1.], [-1., 1.]])
x2 = np.array([0.0, 1.0, 2.0], ndmin=2)
@@ -2265,7 +2333,7 @@ class TestCov(TestCase):
self.res1)
-class Test_I0(TestCase):
+class Test_I0(object):
def test_simple(self):
assert_almost_equal(
@@ -2291,7 +2359,7 @@ class Test_I0(TestCase):
[1.05884290, 1.06432317]]))
-class TestKaiser(TestCase):
+class TestKaiser(object):
def test_simple(self):
assert_(np.isfinite(kaiser(1, 1.0)))
@@ -2310,7 +2378,7 @@ class TestKaiser(TestCase):
kaiser(3, 4)
-class TestMsort(TestCase):
+class TestMsort(object):
def test_simple(self):
A = np.array([[0.44567325, 0.79115165, 0.54900530],
@@ -2323,7 +2391,7 @@ class TestMsort(TestCase):
[0.64864341, 0.79115165, 0.96098397]]))
-class TestMeshgrid(TestCase):
+class TestMeshgrid(object):
def test_simple(self):
[X, Y] = meshgrid([1, 2, 3], [4, 5, 6, 7])
@@ -2412,7 +2480,7 @@ class TestMeshgrid(TestCase):
assert_equal(x[1, :], X)
-class TestPiecewise(TestCase):
+class TestPiecewise(object):
def test_simple(self):
# Condition is single bool list
@@ -2488,7 +2556,7 @@ class TestPiecewise(TestCase):
[3., 3., 1.]]))
-class TestBincount(TestCase):
+class TestBincount(object):
def test_simple(self):
y = np.bincount(np.arange(4))
@@ -2575,7 +2643,7 @@ class TestBincount(TestCase):
assert_equal(sys.getrefcount(np.dtype(np.double)), double_refcount)
-class TestInterp(TestCase):
+class TestInterp(object):
def test_exceptions(self):
assert_raises(ValueError, interp, 0, [], [])
@@ -2602,28 +2670,28 @@ class TestInterp(TestCase):
incres = interp(incpts, xp, yp)
decres = interp(decpts, xp, yp)
- inctgt = np.array([1, 1, 1, 1], dtype=np.float)
+ inctgt = np.array([1, 1, 1, 1], dtype=float)
dectgt = inctgt[::-1]
assert_equal(incres, inctgt)
assert_equal(decres, dectgt)
incres = interp(incpts, xp, yp, left=0)
decres = interp(decpts, xp, yp, left=0)
- inctgt = np.array([0, 1, 1, 1], dtype=np.float)
+ inctgt = np.array([0, 1, 1, 1], dtype=float)
dectgt = inctgt[::-1]
assert_equal(incres, inctgt)
assert_equal(decres, dectgt)
incres = interp(incpts, xp, yp, right=2)
decres = interp(decpts, xp, yp, right=2)
- inctgt = np.array([1, 1, 1, 2], dtype=np.float)
+ inctgt = np.array([1, 1, 1, 2], dtype=float)
dectgt = inctgt[::-1]
assert_equal(incres, inctgt)
assert_equal(decres, dectgt)
incres = interp(incpts, xp, yp, left=0, right=2)
decres = interp(decpts, xp, yp, left=0, right=2)
- inctgt = np.array([0, 1, 1, 2], dtype=np.float)
+ inctgt = np.array([0, 1, 1, 2], dtype=float)
dectgt = inctgt[::-1]
assert_equal(incres, inctgt)
assert_equal(decres, dectgt)
@@ -2693,7 +2761,7 @@ def compare_results(res, desired):
assert_array_equal(res[i], desired[i])
-class TestPercentile(TestCase):
+class TestPercentile(object):
def test_basic(self):
x = np.arange(8) * 0.5
@@ -2797,7 +2865,7 @@ class TestPercentile(TestCase):
# test for no empty dimensions for compatibility with old percentile
x = np.arange(12).reshape(3, 4)
assert_equal(np.percentile(x, 50), 5.5)
- self.assertTrue(np.isscalar(np.percentile(x, 50)))
+ assert_(np.isscalar(np.percentile(x, 50)))
r0 = np.array([4., 5., 6., 7.])
assert_equal(np.percentile(x, 50, axis=0), r0)
assert_equal(np.percentile(x, 50, axis=0).shape, r0.shape)
@@ -2818,7 +2886,7 @@ class TestPercentile(TestCase):
# test for no empty dimensions for compatibility with old percentile
x = np.arange(12).reshape(3, 4)
assert_equal(np.percentile(x, 50, interpolation='lower'), 5.)
- self.assertTrue(np.isscalar(np.percentile(x, 50)))
+ assert_(np.isscalar(np.percentile(x, 50)))
r0 = np.array([4., 5., 6., 7.])
c0 = np.percentile(x, 50, interpolation='lower', axis=0)
assert_equal(c0, r0)
@@ -2950,7 +3018,7 @@ class TestPercentile(TestCase):
o = np.random.normal(size=(71, 23))
x = np.dstack([o] * 10)
assert_equal(np.percentile(x, 30, axis=(0, 1)), np.percentile(o, 30))
- x = np.rollaxis(x, -1, 0)
+ x = np.moveaxis(x, -1, 0)
assert_equal(np.percentile(x, 30, axis=(-2, -1)), np.percentile(o, 30))
x = x.swapaxes(0, 1).copy()
assert_equal(np.percentile(x, 30, axis=(0, -1)), np.percentile(o, 30))
@@ -3124,7 +3192,7 @@ class TestPercentile(TestCase):
a, [0.3, 0.6], (0, 2), interpolation='nearest'), b)
-class TestMedian(TestCase):
+class TestMedian(object):
def test_basic(self):
a0 = np.array(1)
@@ -3331,7 +3399,7 @@ class TestMedian(TestCase):
o = np.random.normal(size=(71, 23))
x = np.dstack([o] * 10)
assert_equal(np.median(x, axis=(0, 1)), np.median(o))
- x = np.rollaxis(x, -1, 0)
+ x = np.moveaxis(x, -1, 0)
assert_equal(np.median(x, axis=(-2, -1)), np.median(o))
x = x.swapaxes(0, 1).copy()
assert_equal(np.median(x, axis=(0, -1)), np.median(o))
@@ -3381,7 +3449,7 @@ class TestMedian(TestCase):
(1, 1, 7, 1))
-class TestAdd_newdoc_ufunc(TestCase):
+class TestAdd_newdoc_ufunc(object):
def test_ufunc_arg(self):
assert_raises(TypeError, add_newdoc_ufunc, 2, "blah")
@@ -3391,15 +3459,15 @@ class TestAdd_newdoc_ufunc(TestCase):
assert_raises(TypeError, add_newdoc_ufunc, np.add, 3)
-class TestAdd_newdoc(TestCase):
+class TestAdd_newdoc(object):
@dec.skipif(sys.flags.optimize == 2)
def test_add_doc(self):
# test np.add_newdoc
tgt = "Current flat index into the array."
- self.assertEqual(np.core.flatiter.index.__doc__[:len(tgt)], tgt)
- self.assertTrue(len(np.core.ufunc.identity.__doc__) > 300)
- self.assertTrue(len(np.lib.index_tricks.mgrid.__doc__) > 300)
+ assert_equal(np.core.flatiter.index.__doc__[:len(tgt)], tgt)
+ assert_(len(np.core.ufunc.identity.__doc__) > 300)
+ assert_(len(np.lib.index_tricks.mgrid.__doc__) > 300)
if __name__ == "__main__":
diff --git a/numpy/lib/tests/test_index_tricks.py b/numpy/lib/tests/test_index_tricks.py
index 5b791026b..452b3d6a2 100644
--- a/numpy/lib/tests/test_index_tricks.py
+++ b/numpy/lib/tests/test_index_tricks.py
@@ -2,7 +2,7 @@ from __future__ import division, absolute_import, print_function
import numpy as np
from numpy.testing import (
- run_module_suite, TestCase, assert_, assert_equal, assert_array_equal,
+ run_module_suite, assert_, assert_equal, assert_array_equal,
assert_almost_equal, assert_array_almost_equal, assert_raises
)
from numpy.lib.index_tricks import (
@@ -11,7 +11,7 @@ from numpy.lib.index_tricks import (
)
-class TestRavelUnravelIndex(TestCase):
+class TestRavelUnravelIndex(object):
def test_basic(self):
assert_equal(np.unravel_index(2, (2, 2)), (1, 0))
assert_equal(np.ravel_multi_index((1, 0), (2, 2)), 2)
@@ -110,11 +110,11 @@ class TestRavelUnravelIndex(TestCase):
def test_writeability(self):
# See gh-7269
x, y = np.unravel_index([1, 2, 3], (4, 5))
- self.assertTrue(x.flags.writeable)
- self.assertTrue(y.flags.writeable)
+ assert_(x.flags.writeable)
+ assert_(y.flags.writeable)
-class TestGrid(TestCase):
+class TestGrid(object):
def test_basic(self):
a = mgrid[-1:1:10j]
b = mgrid[-1:1:0.1]
@@ -147,7 +147,7 @@ class TestGrid(TestCase):
0.2*np.ones(20, 'd'), 11)
-class TestConcatenator(TestCase):
+class TestConcatenator(object):
def test_1d(self):
assert_array_equal(r_[1, 2, 3, 4, 5, 6], np.array([1, 2, 3, 4, 5, 6]))
b = np.ones(5)
@@ -206,14 +206,14 @@ class TestConcatenator(TestCase):
assert_equal(type(actual), type(expected))
-class TestNdenumerate(TestCase):
+class TestNdenumerate(object):
def test_basic(self):
a = np.array([[1, 2], [3, 4]])
assert_equal(list(ndenumerate(a)),
[((0, 0), 1), ((0, 1), 2), ((1, 0), 3), ((1, 1), 4)])
-class TestIndexExpression(TestCase):
+class TestIndexExpression(object):
def test_regression_1(self):
# ticket #1196
a = np.arange(2)
@@ -227,7 +227,7 @@ class TestIndexExpression(TestCase):
assert_equal(a[:, :3, [1, 2]], a[s_[:, :3, [1, 2]]])
-class TestIx_(TestCase):
+class TestIx_(object):
def test_regression_1(self):
# Test empty inputs create ouputs of indexing type, gh-5804
# Test both lists and arrays
@@ -243,7 +243,7 @@ class TestIx_(TestCase):
for k, (a, sz) in enumerate(zip(arrays, sizes)):
assert_equal(a.shape[k], sz)
assert_(all(sh == 1 for j, sh in enumerate(a.shape) if j != k))
- assert_(np.issubdtype(a.dtype, int))
+ assert_(np.issubdtype(a.dtype, np.integer))
def test_bool(self):
bool_a = [True, False, True, True]
diff --git a/numpy/lib/tests/test_io.py b/numpy/lib/tests/test_io.py
index 868089551..6f7fcc54c 100644
--- a/numpy/lib/tests/test_io.py
+++ b/numpy/lib/tests/test_io.py
@@ -17,9 +17,9 @@ from numpy.lib._iotools import ConverterError, ConversionWarning
from numpy.compat import asbytes, bytes, unicode, Path
from numpy.ma.testutils import assert_equal
from numpy.testing import (
- TestCase, run_module_suite, assert_warns, assert_,
- assert_raises_regex, assert_raises, assert_allclose,
- assert_array_equal, temppath, dec, IS_PYPY, suppress_warnings
+ run_module_suite, assert_warns, assert_, assert_raises_regex,
+ assert_raises, assert_allclose, assert_array_equal, temppath, dec, IS_PYPY,
+ suppress_warnings
)
@@ -165,7 +165,7 @@ class RoundtripTest(object):
self.check_roundtrips(a)
-class TestSaveLoad(RoundtripTest, TestCase):
+class TestSaveLoad(RoundtripTest):
def roundtrip(self, *args, **kwargs):
RoundtripTest.roundtrip(self, np.save, *args, **kwargs)
assert_equal(self.arr[0], self.arr_reloaded)
@@ -173,7 +173,7 @@ class TestSaveLoad(RoundtripTest, TestCase):
assert_equal(self.arr[0].flags.fnc, self.arr_reloaded.flags.fnc)
-class TestSavezLoad(RoundtripTest, TestCase):
+class TestSavezLoad(RoundtripTest):
def roundtrip(self, *args, **kwargs):
RoundtripTest.roundtrip(self, np.savez, *args, **kwargs)
try:
@@ -304,7 +304,7 @@ class TestSavezLoad(RoundtripTest, TestCase):
assert_(fp.closed)
-class TestSaveTxt(TestCase):
+class TestSaveTxt(object):
def test_array(self):
a = np.array([[1, 2], [3, 4]], float)
fmt = "%.18e"
@@ -329,6 +329,12 @@ class TestSaveTxt(TestCase):
lines = c.readlines()
assert_equal(lines, [b'1\n', b'2\n', b'3\n', b'4\n'])
+ def test_0D_3D(self):
+ c = BytesIO()
+ assert_raises(ValueError, np.savetxt, c, np.array(1))
+ assert_raises(ValueError, np.savetxt, c, np.array([[[1], [2]]]))
+
+
def test_record(self):
a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
c = BytesIO()
@@ -373,7 +379,7 @@ class TestSaveTxt(TestCase):
# Test the functionality of the header and footer keyword argument.
c = BytesIO()
- a = np.array([(1, 2), (3, 4)], dtype=np.int)
+ a = np.array([(1, 2), (3, 4)], dtype=int)
test_header_footer = 'Test header / footer'
# Test the header keyword argument
np.savetxt(c, a, fmt='%1d', header=test_header_footer)
@@ -461,7 +467,7 @@ class TestSaveTxt(TestCase):
assert_array_equal(a, b)
-class TestLoadTxt(TestCase):
+class TestLoadTxt(object):
def test_record(self):
c = TextIO()
c.write('1 2\n3 4')
@@ -485,7 +491,7 @@ class TestLoadTxt(TestCase):
c.write('1 2\n3 4')
c.seek(0)
- x = np.loadtxt(c, dtype=np.int)
+ x = np.loadtxt(c, dtype=int)
a = np.array([[1, 2], [3, 4]], int)
assert_array_equal(x, a)
@@ -721,7 +727,7 @@ class TestLoadTxt(TestCase):
# Test using an explicit dtype with an object
data = """ 1; 2001-01-01
2; 2002-01-31 """
- ndtype = [('idx', int), ('code', np.object)]
+ ndtype = [('idx', int), ('code', object)]
func = lambda s: strptime(s.strip(), "%Y-%m-%d")
converters = {1: func}
test = np.loadtxt(TextIO(data), delimiter=";", dtype=ndtype,
@@ -751,11 +757,11 @@ class TestLoadTxt(TestCase):
# IEEE doubles and floats only, otherwise the float32
# conversion may fail.
tgt = np.logspace(-10, 10, 5).astype(np.float32)
- tgt = np.hstack((tgt, -tgt)).astype(np.float)
+ tgt = np.hstack((tgt, -tgt)).astype(float)
inp = '\n'.join(map(float.hex, tgt))
c = TextIO()
c.write(inp)
- for dt in [np.float, np.float32]:
+ for dt in [float, np.float32]:
c.seek(0)
res = np.loadtxt(c, dtype=dt)
assert_equal(res, tgt, err_msg="%s" % dt)
@@ -765,7 +771,7 @@ class TestLoadTxt(TestCase):
c = TextIO()
c.write("%s %s" % tgt)
c.seek(0)
- res = np.loadtxt(c, dtype=np.complex)
+ res = np.loadtxt(c, dtype=complex)
assert_equal(res, tgt)
def test_universal_newline(self):
@@ -864,7 +870,7 @@ class TestLoadTxt(TestCase):
np.loadtxt(c, delimiter=',', dtype=dt, comments=None) # Should succeed
-class Testfromregex(TestCase):
+class Testfromregex(object):
# np.fromregex expects files opened in binary mode.
def test_record(self):
c = TextIO()
@@ -902,7 +908,7 @@ class Testfromregex(TestCase):
#####--------------------------------------------------------------------------
-class TestFromTxt(TestCase):
+class TestFromTxt(object):
#
def test_record(self):
# Test w/ explicit dtype
@@ -1178,19 +1184,19 @@ M 33 21.99
conv = {0: int, 1: int, 2: int, 3: lambda r: dmap[r.decode()]}
test = np.recfromcsv(TextIO(dstr,), dtype=dtyp, delimiter=',',
names=None, converters=conv)
- control = np.rec.array([[1,5,-1,0], [2,8,-1,1], [3,3,-2,3]], dtype=dtyp)
+ control = np.rec.array([(1,5,-1,0), (2,8,-1,1), (3,3,-2,3)], dtype=dtyp)
assert_equal(test, control)
dtyp = [('e1','i4'),('e2','i4'),('n', 'i1')]
test = np.recfromcsv(TextIO(dstr,), dtype=dtyp, delimiter=',',
usecols=(0,1,3), names=None, converters=conv)
- control = np.rec.array([[1,5,0], [2,8,1], [3,3,3]], dtype=dtyp)
+ control = np.rec.array([(1,5,0), (2,8,1), (3,3,3)], dtype=dtyp)
assert_equal(test, control)
def test_dtype_with_object(self):
# Test using an explicit dtype with an object
data = """ 1; 2001-01-01
2; 2002-01-31 """
- ndtype = [('idx', int), ('code', np.object)]
+ ndtype = [('idx', int), ('code', object)]
func = lambda s: strptime(s.strip(), "%Y-%m-%d")
converters = {1: func}
test = np.genfromtxt(TextIO(data), delimiter=";", dtype=ndtype,
@@ -1200,7 +1206,7 @@ M 33 21.99
dtype=ndtype)
assert_equal(test, control)
- ndtype = [('nest', [('idx', int), ('code', np.object)])]
+ ndtype = [('nest', [('idx', int), ('code', object)])]
try:
test = np.genfromtxt(TextIO(data), delimiter=";",
dtype=ndtype, converters=converters)
@@ -1337,7 +1343,7 @@ M 33 21.99
test = np.mafromtxt(data, dtype=None, **kwargs)
control = ma.array([(0, 1), (2, -1)],
mask=[(False, False), (False, True)],
- dtype=[('A', np.int), ('B', np.int)])
+ dtype=[('A', int), ('B', int)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
#
@@ -1345,7 +1351,7 @@ M 33 21.99
test = np.mafromtxt(data, **kwargs)
control = ma.array([(0, 1), (2, -1)],
mask=[(False, False), (False, True)],
- dtype=[('A', np.float), ('B', np.float)])
+ dtype=[('A', float), ('B', float)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
@@ -1414,7 +1420,7 @@ M 33 21.99
missing_values='-999.0', names=True,)
control = ma.array([(0, 1.5), (2, -1.)],
mask=[(False, False), (False, True)],
- dtype=[('A', np.int), ('B', np.float)])
+ dtype=[('A', int), ('B', float)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
@@ -1682,15 +1688,15 @@ M 33 21.99
kwargs = dict(delimiter=",", missing_values="N/A", names=True)
test = np.recfromtxt(data, **kwargs)
control = np.array([(0, 1), (2, 3)],
- dtype=[('A', np.int), ('B', np.int)])
- self.assertTrue(isinstance(test, np.recarray))
+ dtype=[('A', int), ('B', int)])
+ assert_(isinstance(test, np.recarray))
assert_equal(test, control)
#
data = TextIO('A,B\n0,1\n2,N/A')
test = np.recfromtxt(data, dtype=None, usemask=True, **kwargs)
control = ma.array([(0, 1), (2, -1)],
mask=[(False, False), (False, True)],
- dtype=[('A', np.int), ('B', np.int)])
+ dtype=[('A', int), ('B', int)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
assert_equal(test.A, [0, 2])
@@ -1701,15 +1707,15 @@ M 33 21.99
kwargs = dict(missing_values="N/A", names=True, case_sensitive=True)
test = np.recfromcsv(data, dtype=None, **kwargs)
control = np.array([(0, 1), (2, 3)],
- dtype=[('A', np.int), ('B', np.int)])
- self.assertTrue(isinstance(test, np.recarray))
+ dtype=[('A', int), ('B', int)])
+ assert_(isinstance(test, np.recarray))
assert_equal(test, control)
#
data = TextIO('A,B\n0,1\n2,N/A')
test = np.recfromcsv(data, dtype=None, usemask=True, **kwargs)
control = ma.array([(0, 1), (2, -1)],
mask=[(False, False), (False, True)],
- dtype=[('A', np.int), ('B', np.int)])
+ dtype=[('A', int), ('B', int)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
assert_equal(test.A, [0, 2])
@@ -1717,16 +1723,16 @@ M 33 21.99
data = TextIO('A,B\n0,1\n2,3')
test = np.recfromcsv(data, missing_values='N/A',)
control = np.array([(0, 1), (2, 3)],
- dtype=[('a', np.int), ('b', np.int)])
- self.assertTrue(isinstance(test, np.recarray))
+ dtype=[('a', int), ('b', int)])
+ assert_(isinstance(test, np.recarray))
assert_equal(test, control)
#
data = TextIO('A,B\n0,1\n2,3')
- dtype = [('a', np.int), ('b', np.float)]
+ dtype = [('a', int), ('b', float)]
test = np.recfromcsv(data, missing_values='N/A', dtype=dtype)
control = np.array([(0, 1), (2, 3)],
dtype=dtype)
- self.assertTrue(isinstance(test, np.recarray))
+ assert_(isinstance(test, np.recarray))
assert_equal(test, control)
def test_max_rows(self):
@@ -1827,7 +1833,7 @@ M 33 21.99
assert_equal(test.dtype.names, ['f0', 'f1', 'f2'])
- assert_(test.dtype['f0'] == np.float)
+ assert_(test.dtype['f0'] == float)
assert_(test.dtype['f1'] == np.int64)
assert_(test.dtype['f2'] == np.integer)
@@ -1836,7 +1842,7 @@ M 33 21.99
assert_equal(test['f2'], 1024)
-class TestPathUsage(TestCase):
+class TestPathUsage(object):
# Test that pathlib.Path can be used
@np.testing.dec.skipif(Path is None, "No pathlib.Path")
def test_loadtxt(self):
@@ -1919,8 +1925,8 @@ class TestPathUsage(TestCase):
kwargs = dict(delimiter=",", missing_values="N/A", names=True)
test = np.recfromtxt(path, **kwargs)
control = np.array([(0, 1), (2, 3)],
- dtype=[('A', np.int), ('B', np.int)])
- self.assertTrue(isinstance(test, np.recarray))
+ dtype=[('A', int), ('B', int)])
+ assert_(isinstance(test, np.recarray))
assert_equal(test, control)
@np.testing.dec.skipif(Path is None, "No pathlib.Path")
@@ -1933,8 +1939,8 @@ class TestPathUsage(TestCase):
kwargs = dict(missing_values="N/A", names=True, case_sensitive=True)
test = np.recfromcsv(path, dtype=None, **kwargs)
control = np.array([(0, 1), (2, 3)],
- dtype=[('A', np.int), ('B', np.int)])
- self.assertTrue(isinstance(test, np.recarray))
+ dtype=[('A', int), ('B', int)])
+ assert_(isinstance(test, np.recarray))
assert_equal(test, control)
diff --git a/numpy/lib/tests/test_mixins.py b/numpy/lib/tests/test_mixins.py
index db38bdfd6..94f06c336 100644
--- a/numpy/lib/tests/test_mixins.py
+++ b/numpy/lib/tests/test_mixins.py
@@ -6,7 +6,8 @@ import sys
import numpy as np
from numpy.testing import (
- TestCase, run_module_suite, assert_, assert_equal, assert_raises)
+ run_module_suite, assert_, assert_equal, assert_raises
+ )
PY2 = sys.version_info.major < 3
@@ -99,7 +100,7 @@ _ALL_BINARY_OPERATORS = [
]
-class TestNDArrayOperatorsMixin(TestCase):
+class TestNDArrayOperatorsMixin(object):
def test_array_like_add(self):
diff --git a/numpy/lib/tests/test_nanfunctions.py b/numpy/lib/tests/test_nanfunctions.py
index 466ceefb5..3d362fc6e 100644
--- a/numpy/lib/tests/test_nanfunctions.py
+++ b/numpy/lib/tests/test_nanfunctions.py
@@ -4,7 +4,7 @@ import warnings
import numpy as np
from numpy.testing import (
- run_module_suite, TestCase, assert_, assert_equal, assert_almost_equal,
+ run_module_suite, assert_, assert_equal, assert_almost_equal,
assert_no_warnings, assert_raises, assert_array_equal, suppress_warnings
)
@@ -35,7 +35,7 @@ _ndat_zeros = np.array([[0.6244, 0.0, 0.2692, 0.0116, 0.0, 0.1170],
[0.1610, 0.0, 0.0, 0.1859, 0.3146, 0.0]])
-class TestNanFunctions_MinMax(TestCase):
+class TestNanFunctions_MinMax(object):
nanfuncs = [np.nanmin, np.nanmax]
stdfuncs = [np.min, np.max]
@@ -165,7 +165,7 @@ class TestNanFunctions_MinMax(TestCase):
assert_(issubclass(w[0].category, RuntimeWarning))
-class TestNanFunctions_ArgminArgmax(TestCase):
+class TestNanFunctions_ArgminArgmax(object):
nanfuncs = [np.nanargmin, np.nanargmax]
@@ -224,7 +224,7 @@ class TestNanFunctions_ArgminArgmax(TestCase):
assert_(np.isscalar(res))
-class TestNanFunctions_IntTypes(TestCase):
+class TestNanFunctions_IntTypes(object):
int_types = (np.int8, np.int16, np.int32, np.int64, np.uint8,
np.uint16, np.uint32, np.uint64)
@@ -396,7 +396,7 @@ class SharedNanFunctionsTestsMixin(object):
assert_(np.isscalar(res))
-class TestNanFunctions_SumProd(TestCase, SharedNanFunctionsTestsMixin):
+class TestNanFunctions_SumProd(SharedNanFunctionsTestsMixin):
nanfuncs = [np.nansum, np.nanprod]
stdfuncs = [np.sum, np.prod]
@@ -430,7 +430,7 @@ class TestNanFunctions_SumProd(TestCase, SharedNanFunctionsTestsMixin):
assert_equal(res, tgt)
-class TestNanFunctions_CumSumProd(TestCase, SharedNanFunctionsTestsMixin):
+class TestNanFunctions_CumSumProd(SharedNanFunctionsTestsMixin):
nanfuncs = [np.nancumsum, np.nancumprod]
stdfuncs = [np.cumsum, np.cumprod]
@@ -513,7 +513,7 @@ class TestNanFunctions_CumSumProd(TestCase, SharedNanFunctionsTestsMixin):
assert_almost_equal(res, tgt)
-class TestNanFunctions_MeanVarStd(TestCase, SharedNanFunctionsTestsMixin):
+class TestNanFunctions_MeanVarStd(SharedNanFunctionsTestsMixin):
nanfuncs = [np.nanmean, np.nanvar, np.nanstd]
stdfuncs = [np.mean, np.var, np.std]
@@ -585,7 +585,7 @@ class TestNanFunctions_MeanVarStd(TestCase, SharedNanFunctionsTestsMixin):
assert_(len(w) == 0)
-class TestNanFunctions_Median(TestCase):
+class TestNanFunctions_Median(object):
def test_mutation(self):
# Check that passed array is not modified.
@@ -749,7 +749,7 @@ class TestNanFunctions_Median(TestCase):
([np.nan] * i) + [-inf] * j)
-class TestNanFunctions_Percentile(TestCase):
+class TestNanFunctions_Percentile(object):
def test_mutation(self):
# Check that passed array is not modified.
diff --git a/numpy/lib/tests/test_polynomial.py b/numpy/lib/tests/test_polynomial.py
index 0725c186d..9a4650825 100644
--- a/numpy/lib/tests/test_polynomial.py
+++ b/numpy/lib/tests/test_polynomial.py
@@ -80,12 +80,12 @@ poly1d([ 2.])
'''
import numpy as np
from numpy.testing import (
- run_module_suite, TestCase, assert_, assert_equal, assert_array_equal,
+ run_module_suite, assert_, assert_equal, assert_array_equal,
assert_almost_equal, assert_array_almost_equal, assert_raises, rundocs
)
-class TestDocs(TestCase):
+class TestDocs(object):
def test_doctests(self):
return rundocs()
diff --git a/numpy/lib/tests/test_recfunctions.py b/numpy/lib/tests/test_recfunctions.py
index 0940d37b0..bc9f8d7b6 100644
--- a/numpy/lib/tests/test_recfunctions.py
+++ b/numpy/lib/tests/test_recfunctions.py
@@ -4,7 +4,9 @@ import numpy as np
import numpy.ma as ma
from numpy.ma.mrecords import MaskedRecords
from numpy.ma.testutils import assert_equal
-from numpy.testing import TestCase, run_module_suite, assert_, assert_raises
+from numpy.testing import (
+ run_module_suite, assert_, assert_raises, dec
+ )
from numpy.lib.recfunctions import (
drop_fields, rename_fields, get_fieldstructure, recursive_fill_fields,
find_duplicates, merge_arrays, append_fields, stack_arrays, join_by
@@ -14,10 +16,10 @@ get_names_flat = np.lib.recfunctions.get_names_flat
zip_descr = np.lib.recfunctions.zip_descr
-class TestRecFunctions(TestCase):
+class TestRecFunctions(object):
# Misc tests
- def setUp(self):
+ def setup(self):
x = np.array([1, 2, ])
y = np.array([10, 20, 30])
z = np.array([('A', 1.), ('B', 2.)],
@@ -191,7 +193,7 @@ class TestRecFunctions(TestCase):
assert_equal(test[0], a[test[-1]])
-class TestRecursiveFillFields(TestCase):
+class TestRecursiveFillFields(object):
# Test recursive_fill_fields.
def test_simple_flexible(self):
# Test recursive_fill_fields on flexible-array
@@ -214,10 +216,10 @@ class TestRecursiveFillFields(TestCase):
assert_equal(test, control)
-class TestMergeArrays(TestCase):
+class TestMergeArrays(object):
# Test merge_arrays
- def setUp(self):
+ def setup(self):
x = np.array([1, 2, ])
y = np.array([10, 20, 30])
z = np.array(
@@ -347,10 +349,10 @@ class TestMergeArrays(TestCase):
assert_equal(test, control)
-class TestAppendFields(TestCase):
+class TestAppendFields(object):
# Test append_fields
- def setUp(self):
+ def setup(self):
x = np.array([1, 2, ])
y = np.array([10, 20, 30])
z = np.array(
@@ -401,9 +403,9 @@ class TestAppendFields(TestCase):
assert_equal(test, control)
-class TestStackArrays(TestCase):
+class TestStackArrays(object):
# Test stack_arrays
- def setUp(self):
+ def setup(self):
x = np.array([1, 2, ])
y = np.array([10, 20, 30])
z = np.array(
@@ -417,11 +419,11 @@ class TestStackArrays(TestCase):
(_, x, _, _) = self.data
test = stack_arrays((x,))
assert_equal(test, x)
- self.assertTrue(test is x)
+ assert_(test is x)
test = stack_arrays(x)
assert_equal(test, x)
- self.assertTrue(test is x)
+ assert_(test is x)
def test_unnamed_fields(self):
# Tests combinations of arrays w/o named fields
@@ -546,9 +548,38 @@ class TestStackArrays(TestCase):
assert_equal(test, control)
assert_equal(test.mask, control.mask)
-
-class TestJoinBy(TestCase):
- def setUp(self):
+ def test_subdtype(self):
+ z = np.array([
+ ('A', 1), ('B', 2)
+ ], dtype=[('A', '|S3'), ('B', float, (1,))])
+ zz = np.array([
+ ('a', [10.], 100.), ('b', [20.], 200.), ('c', [30.], 300.)
+ ], dtype=[('A', '|S3'), ('B', float, (1,)), ('C', float)])
+
+ res = stack_arrays((z, zz))
+ expected = ma.array(
+ data=[
+ (b'A', [1.0], 0),
+ (b'B', [2.0], 0),
+ (b'a', [10.0], 100.0),
+ (b'b', [20.0], 200.0),
+ (b'c', [30.0], 300.0)],
+ mask=[
+ (False, [False], True),
+ (False, [False], True),
+ (False, [False], False),
+ (False, [False], False),
+ (False, [False], False)
+ ],
+ dtype=zz.dtype
+ )
+ assert_equal(res.dtype, expected.dtype)
+ assert_equal(res, expected)
+ assert_equal(res.mask, expected.mask)
+
+
+class TestJoinBy(object):
+ def setup(self):
self.a = np.array(list(zip(np.arange(10), np.arange(50, 60),
np.arange(100, 110))),
dtype=[('a', int), ('b', int), ('c', int)])
@@ -588,6 +619,16 @@ class TestJoinBy(TestCase):
dtype=[('a', int), ('b', int),
('c', int), ('d', int)])
+ def test_join_subdtype(self):
+ # tests the bug in https://stackoverflow.com/q/44769632/102441
+ from numpy.lib import recfunctions as rfn
+ foo = np.array([(1,)],
+ dtype=[('key', int)])
+ bar = np.array([(1, np.array([1,2,3]))],
+ dtype=[('key', int), ('value', 'uint16', 3)])
+ res = join_by('key', foo, bar)
+ assert_equal(res, bar.view(ma.MaskedArray))
+
def test_outer_join(self):
a, b = self.a, self.b
@@ -646,10 +687,66 @@ class TestJoinBy(TestCase):
b = np.ones(3, dtype=[('c', 'u1'), ('b', 'f4'), ('a', 'i4')])
assert_raises(ValueError, join_by, ['a', 'b', 'b'], a, b)
+ @dec.knownfailureif(True)
+ def test_same_name_different_dtypes_key(self):
+ a_dtype = np.dtype([('key', 'S5'), ('value', '<f4')])
+ b_dtype = np.dtype([('key', 'S10'), ('value', '<f4')])
+ expected_dtype = np.dtype([
+ ('key', 'S10'), ('value1', '<f4'), ('value2', '<f4')])
+
+ a = np.array([('Sarah', 8.0), ('John', 6.0)], dtype=a_dtype)
+ b = np.array([('Sarah', 10.0), ('John', 7.0)], dtype=b_dtype)
+ res = join_by('key', a, b)
+
+ assert_equal(res.dtype, expected_dtype)
+
+ def test_same_name_different_dtypes(self):
+ # gh-9338
+ a_dtype = np.dtype([('key', 'S10'), ('value', '<f4')])
+ b_dtype = np.dtype([('key', 'S10'), ('value', '<f8')])
+ expected_dtype = np.dtype([
+ ('key', '|S10'), ('value1', '<f4'), ('value2', '<f8')])
+
+ a = np.array([('Sarah', 8.0), ('John', 6.0)], dtype=a_dtype)
+ b = np.array([('Sarah', 10.0), ('John', 7.0)], dtype=b_dtype)
+ res = join_by('key', a, b)
+
+ assert_equal(res.dtype, expected_dtype)
+
+ def test_subarray_key(self):
+ a_dtype = np.dtype([('pos', int, 3), ('f', '<f4')])
+ a = np.array([([1, 1, 1], np.pi), ([1, 2, 3], 0.0)], dtype=a_dtype)
+
+ b_dtype = np.dtype([('pos', int, 3), ('g', '<f4')])
+ b = np.array([([1, 1, 1], 3), ([3, 2, 1], 0.0)], dtype=b_dtype)
+
+ expected_dtype = np.dtype([('pos', int, 3), ('f', '<f4'), ('g', '<f4')])
+ expected = np.array([([1, 1, 1], np.pi, 3)], dtype=expected_dtype)
+
+ res = join_by('pos', a, b)
+ assert_equal(res.dtype, expected_dtype)
+ assert_equal(res, expected)
+
+ def test_padded_dtype(self):
+ dt = np.dtype('i1,f4', align=True)
+ dt.names = ('k', 'v')
+ assert_(len(dt.descr), 3) # padding field is inserted
+
+ a = np.array([(1, 3), (3, 2)], dt)
+ b = np.array([(1, 1), (2, 2)], dt)
+ res = join_by('k', a, b)
+
+ # no padding fields remain
+ expected_dtype = np.dtype([
+ ('k', 'i1'), ('v1', 'f4'), ('v2', 'f4')
+ ])
+
+ assert_equal(res.dtype, expected_dtype)
+
-class TestJoinBy2(TestCase):
+class TestJoinBy2(object):
@classmethod
- def setUp(cls):
+ def setup(cls):
cls.a = np.array(list(zip(np.arange(10), np.arange(50, 60),
np.arange(100, 110))),
dtype=[('a', int), ('b', int), ('c', int)])
@@ -673,8 +770,8 @@ class TestJoinBy2(TestCase):
assert_equal(test, control)
def test_no_postfix(self):
- self.assertRaises(ValueError, join_by, 'a', self.a, self.b,
- r1postfix='', r2postfix='')
+ assert_raises(ValueError, join_by, 'a', self.a, self.b,
+ r1postfix='', r2postfix='')
def test_no_r2postfix(self):
# Basic test of join_by no_r2postfix
@@ -712,13 +809,13 @@ class TestJoinBy2(TestCase):
assert_equal(test.dtype, control.dtype)
assert_equal(test, control)
-class TestAppendFieldsObj(TestCase):
+class TestAppendFieldsObj(object):
"""
Test append_fields with arrays containing objects
"""
# https://github.com/numpy/numpy/issues/2346
- def setUp(self):
+ def setup(self):
from datetime import date
self.data = dict(obj=date(2000, 1, 1))
diff --git a/numpy/lib/tests/test_regression.py b/numpy/lib/tests/test_regression.py
index ee50dcfa4..d96d3422d 100644
--- a/numpy/lib/tests/test_regression.py
+++ b/numpy/lib/tests/test_regression.py
@@ -5,22 +5,19 @@ import sys
import numpy as np
from numpy.testing import (
- run_module_suite, TestCase, assert_, assert_equal, assert_array_equal,
- assert_array_almost_equal, assert_raises
+ run_module_suite, assert_, assert_equal, assert_array_equal,
+ assert_array_almost_equal, assert_raises, _assert_valid_refcount,
)
-from numpy.testing.utils import _assert_valid_refcount
from numpy.compat import unicode
-rlevel = 1
-
-class TestRegression(TestCase):
- def test_poly1d(self, level=rlevel):
+class TestRegression(object):
+ def test_poly1d(self):
# Ticket #28
assert_equal(np.poly1d([1]) - np.poly1d([1, 0]),
np.poly1d([-1, 1]))
- def test_cov_parameters(self, level=rlevel):
+ def test_cov_parameters(self):
# Ticket #91
x = np.random.random((3, 3))
y = x.copy()
@@ -28,57 +25,57 @@ class TestRegression(TestCase):
np.cov(y, rowvar=0)
assert_array_equal(x, y)
- def test_mem_digitize(self, level=rlevel):
+ def test_mem_digitize(self):
# Ticket #95
for i in range(100):
np.digitize([1, 2, 3, 4], [1, 3])
np.digitize([0, 1, 2, 3, 4], [1, 3])
- def test_unique_zero_sized(self, level=rlevel):
+ def test_unique_zero_sized(self):
# Ticket #205
assert_array_equal([], np.unique(np.array([])))
- def test_mem_vectorise(self, level=rlevel):
+ def test_mem_vectorise(self):
# Ticket #325
vt = np.vectorize(lambda *args: args)
vt(np.zeros((1, 2, 1)), np.zeros((2, 1, 1)), np.zeros((1, 1, 2)))
vt(np.zeros((1, 2, 1)), np.zeros((2, 1, 1)), np.zeros((1,
1, 2)), np.zeros((2, 2)))
- def test_mgrid_single_element(self, level=rlevel):
+ def test_mgrid_single_element(self):
# Ticket #339
assert_array_equal(np.mgrid[0:0:1j], [0])
assert_array_equal(np.mgrid[0:0], [])
- def test_refcount_vectorize(self, level=rlevel):
+ def test_refcount_vectorize(self):
# Ticket #378
def p(x, y):
return 123
v = np.vectorize(p)
_assert_valid_refcount(v)
- def test_poly1d_nan_roots(self, level=rlevel):
+ def test_poly1d_nan_roots(self):
# Ticket #396
p = np.poly1d([np.nan, np.nan, 1], r=0)
- self.assertRaises(np.linalg.LinAlgError, getattr, p, "r")
+ assert_raises(np.linalg.LinAlgError, getattr, p, "r")
- def test_mem_polymul(self, level=rlevel):
+ def test_mem_polymul(self):
# Ticket #448
np.polymul([], [1.])
- def test_mem_string_concat(self, level=rlevel):
+ def test_mem_string_concat(self):
# Ticket #469
x = np.array([])
np.append(x, 'asdasd\tasdasd')
- def test_poly_div(self, level=rlevel):
+ def test_poly_div(self):
# Ticket #553
u = np.poly1d([1, 2, 3])
v = np.poly1d([1, 2, 3, 4, 5])
q, r = np.polydiv(u, v)
assert_equal(q*v + r, u)
- def test_poly_eq(self, level=rlevel):
+ def test_poly_eq(self):
# Ticket #554
x = np.poly1d([1, 2, 3])
y = np.poly1d([3, 4])
@@ -109,13 +106,13 @@ class TestRegression(TestCase):
def test_polydiv_type(self):
# Make polydiv work for complex types
msg = "Wrong type, should be complex"
- x = np.ones(3, dtype=np.complex)
+ x = np.ones(3, dtype=complex)
q, r = np.polydiv(x, x)
- assert_(q.dtype == np.complex, msg)
+ assert_(q.dtype == complex, msg)
msg = "Wrong type, should be float"
- x = np.ones(3, dtype=np.int)
+ x = np.ones(3, dtype=int)
q, r = np.polydiv(x, x)
- assert_(q.dtype == np.float, msg)
+ assert_(q.dtype == float, msg)
def test_histogramdd_too_many_bins(self):
# Ticket 928.
@@ -124,22 +121,22 @@ class TestRegression(TestCase):
def test_polyint_type(self):
# Ticket #944
msg = "Wrong type, should be complex"
- x = np.ones(3, dtype=np.complex)
- assert_(np.polyint(x).dtype == np.complex, msg)
+ x = np.ones(3, dtype=complex)
+ assert_(np.polyint(x).dtype == complex, msg)
msg = "Wrong type, should be float"
- x = np.ones(3, dtype=np.int)
- assert_(np.polyint(x).dtype == np.float, msg)
+ x = np.ones(3, dtype=int)
+ assert_(np.polyint(x).dtype == float, msg)
def test_ndenumerate_crash(self):
# Ticket 1140
# Shouldn't crash:
list(np.ndenumerate(np.array([[]])))
- def test_asfarray_none(self, level=rlevel):
+ def test_asfarray_none(self):
# Test for changeset r5065
assert_array_equal(np.array([np.nan]), np.asfarray([None]))
- def test_large_fancy_indexing(self, level=rlevel):
+ def test_large_fancy_indexing(self):
# Large enough to fail on 64-bit.
nbits = np.dtype(np.intp).itemsize * 8
thesize = int((2**nbits)**(1.0/5.0)+1)
@@ -156,15 +153,15 @@ class TestRegression(TestCase):
i = np.random.randint(0, n, size=thesize)
a[np.ix_(i, i, i, i, i)]
- self.assertRaises(ValueError, dp)
- self.assertRaises(ValueError, dp2)
+ assert_raises(ValueError, dp)
+ assert_raises(ValueError, dp2)
- def test_void_coercion(self, level=rlevel):
+ def test_void_coercion(self):
dt = np.dtype([('a', 'f4'), ('b', 'i4')])
x = np.zeros((1,), dt)
assert_(np.r_[x, x].dtype == dt)
- def test_who_with_0dim_array(self, level=rlevel):
+ def test_who_with_0dim_array(self):
# ticket #1243
import os
import sys
@@ -174,7 +171,7 @@ class TestRegression(TestCase):
try:
try:
np.who({'foo': np.array(1)})
- except:
+ except Exception:
raise AssertionError("ticket #1243")
finally:
sys.stdout.close()
@@ -206,7 +203,7 @@ class TestRegression(TestCase):
dlist = [np.float64, np.int32, np.int32]
try:
append_fields(base, names, data, dlist)
- except:
+ except Exception:
raise AssertionError()
def test_loadtxt_fields_subarrays(self):
@@ -235,10 +232,10 @@ class TestRegression(TestCase):
def test_nansum_with_boolean(self):
# gh-2978
- a = np.zeros(2, dtype=np.bool)
+ a = np.zeros(2, dtype=bool)
try:
np.nansum(a)
- except:
+ except Exception:
raise AssertionError()
def test_py3_compat(self):
diff --git a/numpy/lib/tests/test_shape_base.py b/numpy/lib/tests/test_shape_base.py
index 4d06001f4..d0afeefd9 100644
--- a/numpy/lib/tests/test_shape_base.py
+++ b/numpy/lib/tests/test_shape_base.py
@@ -1,23 +1,25 @@
from __future__ import division, absolute_import, print_function
import numpy as np
+import warnings
+
from numpy.lib.shape_base import (
apply_along_axis, apply_over_axes, array_split, split, hsplit, dsplit,
- vsplit, dstack, column_stack, kron, tile
+ vsplit, dstack, column_stack, kron, tile, expand_dims,
)
from numpy.testing import (
- run_module_suite, TestCase, assert_, assert_equal, assert_array_equal,
- assert_raises, assert_warns
+ run_module_suite, assert_, assert_equal, assert_array_equal, assert_raises,
+ assert_warns
)
-class TestApplyAlongAxis(TestCase):
+class TestApplyAlongAxis(object):
def test_simple(self):
a = np.ones((20, 10), 'd')
assert_array_equal(
apply_along_axis(len, 0, a), len(a)*np.ones(a.shape[1]))
- def test_simple101(self, level=11):
+ def test_simple101(self):
a = np.ones((10, 101), 'd')
assert_array_equal(
apply_along_axis(len, 0, a), len(a)*np.ones(a.shape[1]))
@@ -175,14 +177,33 @@ class TestApplyAlongAxis(TestCase):
assert_equal(type(actual[i]), type(expected[i]))
-class TestApplyOverAxes(TestCase):
+class TestApplyOverAxes(object):
def test_simple(self):
a = np.arange(24).reshape(2, 3, 4)
aoa_a = apply_over_axes(np.sum, a, [0, 2])
assert_array_equal(aoa_a, np.array([[[60], [92], [124]]]))
-class TestArraySplit(TestCase):
+class TestExpandDims(object):
+ def test_functionality(self):
+ s = (2, 3, 4, 5)
+ a = np.empty(s)
+ for axis in range(-5, 4):
+ b = expand_dims(a, axis)
+ assert_(b.shape[axis] == 1)
+ assert_(np.squeeze(b).shape == s)
+
+ def test_deprecations(self):
+ # 2017-05-17, 1.13.0
+ s = (2, 3, 4, 5)
+ a = np.empty(s)
+ with warnings.catch_warnings():
+ warnings.simplefilter("always")
+ assert_warns(DeprecationWarning, expand_dims, a, -6)
+ assert_warns(DeprecationWarning, expand_dims, a, 5)
+
+
+class TestArraySplit(object):
def test_integer_0_split(self):
a = np.arange(10)
assert_raises(ValueError, array_split, a, 0)
@@ -307,7 +328,7 @@ class TestArraySplit(TestCase):
compare_results(res, desired)
-class TestSplit(TestCase):
+class TestSplit(object):
# The split function is essentially the same as array_split,
# except that it test if splitting will result in an
# equal split. Only test for this case.
@@ -322,12 +343,12 @@ class TestSplit(TestCase):
a = np.arange(10)
assert_raises(ValueError, split, a, 3)
-class TestColumnStack(TestCase):
+class TestColumnStack(object):
def test_non_iterable(self):
assert_raises(TypeError, column_stack, 1)
-class TestDstack(TestCase):
+class TestDstack(object):
def test_non_iterable(self):
assert_raises(TypeError, dstack, 1)
@@ -362,7 +383,7 @@ class TestDstack(TestCase):
# array_split has more comprehensive test of splitting.
# only do simple test on hsplit, vsplit, and dsplit
-class TestHsplit(TestCase):
+class TestHsplit(object):
"""Only testing for integer splits.
"""
@@ -391,7 +412,7 @@ class TestHsplit(TestCase):
compare_results(res, desired)
-class TestVsplit(TestCase):
+class TestVsplit(object):
"""Only testing for integer splits.
"""
@@ -418,7 +439,7 @@ class TestVsplit(TestCase):
compare_results(res, desired)
-class TestDsplit(TestCase):
+class TestDsplit(object):
# Only testing for integer splits.
def test_non_iterable(self):
assert_raises(ValueError, dsplit, 1, 1)
@@ -451,7 +472,7 @@ class TestDsplit(TestCase):
compare_results(res, desired)
-class TestSqueeze(TestCase):
+class TestSqueeze(object):
def test_basic(self):
from numpy.random import rand
@@ -470,7 +491,7 @@ class TestSqueeze(TestCase):
assert_equal(type(res), np.ndarray)
-class TestKron(TestCase):
+class TestKron(object):
def test_return_type(self):
a = np.ones([2, 2])
m = np.asmatrix(a)
@@ -489,7 +510,7 @@ class TestKron(TestCase):
assert_equal(type(kron(ma, a)), myarray)
-class TestTile(TestCase):
+class TestTile(object):
def test_basic(self):
a = np.array([0, 1, 2])
b = [[1, 2], [3, 4]]
@@ -529,19 +550,19 @@ class TestTile(TestCase):
assert_equal(large, klarge)
-class TestMayShareMemory(TestCase):
+class TestMayShareMemory(object):
def test_basic(self):
d = np.ones((50, 60))
d2 = np.ones((30, 60, 6))
- self.assertTrue(np.may_share_memory(d, d))
- self.assertTrue(np.may_share_memory(d, d[::-1]))
- self.assertTrue(np.may_share_memory(d, d[::2]))
- self.assertTrue(np.may_share_memory(d, d[1:, ::-1]))
-
- self.assertFalse(np.may_share_memory(d[::-1], d2))
- self.assertFalse(np.may_share_memory(d[::2], d2))
- self.assertFalse(np.may_share_memory(d[1:, ::-1], d2))
- self.assertTrue(np.may_share_memory(d2[1:, ::-1], d2))
+ assert_(np.may_share_memory(d, d))
+ assert_(np.may_share_memory(d, d[::-1]))
+ assert_(np.may_share_memory(d, d[::2]))
+ assert_(np.may_share_memory(d, d[1:, ::-1]))
+
+ assert_(not np.may_share_memory(d[::-1], d2))
+ assert_(not np.may_share_memory(d[::2], d2))
+ assert_(not np.may_share_memory(d[1:, ::-1], d2))
+ assert_(np.may_share_memory(d2[1:, ::-1], d2))
# Utility
diff --git a/numpy/lib/tests/test_stride_tricks.py b/numpy/lib/tests/test_stride_tricks.py
index 7dc3c4d24..0599324d7 100644
--- a/numpy/lib/tests/test_stride_tricks.py
+++ b/numpy/lib/tests/test_stride_tricks.py
@@ -1,6 +1,7 @@
from __future__ import division, absolute_import, print_function
import numpy as np
+from numpy.core.test_rational import rational
from numpy.testing import (
run_module_suite, assert_equal, assert_array_equal,
assert_raises, assert_
@@ -317,6 +318,13 @@ def test_as_strided():
a_view = as_strided(a, shape=(3, 4), strides=(0, a.itemsize))
assert_equal(a.dtype, a_view.dtype)
+ # Custom dtypes should not be lost (gh-9161)
+ r = [rational(i) for i in range(4)]
+ a = np.array(r, dtype=rational)
+ a_view = as_strided(a, shape=(3, 4), strides=(0, a.itemsize))
+ assert_equal(a.dtype, a_view.dtype)
+ assert_array_equal([r] * 3, a_view)
+
def as_strided_writeable():
arr = np.ones(10)
view = as_strided(arr, writeable=False)
diff --git a/numpy/lib/tests/test_twodim_base.py b/numpy/lib/tests/test_twodim_base.py
index d57791e34..6bf668dee 100644
--- a/numpy/lib/tests/test_twodim_base.py
+++ b/numpy/lib/tests/test_twodim_base.py
@@ -4,8 +4,8 @@
from __future__ import division, absolute_import, print_function
from numpy.testing import (
- TestCase, run_module_suite, assert_equal, assert_array_equal,
- assert_array_max_ulp, assert_array_almost_equal, assert_raises,
+ run_module_suite, assert_equal, assert_array_equal, assert_array_max_ulp,
+ assert_array_almost_equal, assert_raises,
)
from numpy import (
@@ -23,7 +23,7 @@ def get_mat(n):
return data
-class TestEye(TestCase):
+class TestEye(object):
def test_basic(self):
assert_equal(eye(4),
array([[1, 0, 0, 0],
@@ -96,7 +96,7 @@ class TestEye(TestCase):
assert_equal(eye(2, 2, dtype=bool), [[True, False], [False, True]])
-class TestDiag(TestCase):
+class TestDiag(object):
def test_vector(self):
vals = (100 * arange(5)).astype('l')
b = zeros((5, 5))
@@ -140,12 +140,12 @@ class TestDiag(TestCase):
assert_equal(diag(A, k=-3), [])
def test_failure(self):
- self.assertRaises(ValueError, diag, [[[1]]])
+ assert_raises(ValueError, diag, [[[1]]])
-class TestFliplr(TestCase):
+class TestFliplr(object):
def test_basic(self):
- self.assertRaises(ValueError, fliplr, ones(4))
+ assert_raises(ValueError, fliplr, ones(4))
a = get_mat(4)
b = a[:, ::-1]
assert_equal(fliplr(a), b)
@@ -156,7 +156,7 @@ class TestFliplr(TestCase):
assert_equal(fliplr(a), b)
-class TestFlipud(TestCase):
+class TestFlipud(object):
def test_basic(self):
a = get_mat(4)
b = a[::-1, :]
@@ -168,7 +168,7 @@ class TestFlipud(TestCase):
assert_equal(flipud(a), b)
-class TestHistogram2d(TestCase):
+class TestHistogram2d(object):
def test_simple(self):
x = array(
[0.41702200, 0.72032449, 1.1437481e-4, 0.302332573, 0.146755891])
@@ -265,7 +265,7 @@ class TestHistogram2d(TestCase):
assert_array_equal(xe, array([0., 0.25, 0.5, 0.75, 1]))
-class TestTri(TestCase):
+class TestTri(object):
def test_dtype(self):
out = array([[1, 0, 0],
[1, 1, 0],
@@ -349,10 +349,10 @@ def test_mask_indices():
# simple test without offset
iu = mask_indices(3, np.triu)
a = np.arange(9).reshape(3, 3)
- yield (assert_array_equal, a[iu], array([0, 1, 2, 4, 5, 8]))
+ assert_array_equal(a[iu], array([0, 1, 2, 4, 5, 8]))
# Now with an offset
iu1 = mask_indices(3, np.triu, 1)
- yield (assert_array_equal, a[iu1], array([1, 2, 5]))
+ assert_array_equal(a[iu1], array([1, 2, 5]))
def test_tril_indices():
@@ -369,37 +369,37 @@ def test_tril_indices():
b = np.arange(1, 21).reshape(4, 5)
# indexing:
- yield (assert_array_equal, a[il1],
- array([1, 5, 6, 9, 10, 11, 13, 14, 15, 16]))
- yield (assert_array_equal, b[il3],
- array([1, 6, 7, 11, 12, 13, 16, 17, 18, 19]))
+ assert_array_equal(a[il1],
+ array([1, 5, 6, 9, 10, 11, 13, 14, 15, 16]))
+ assert_array_equal(b[il3],
+ array([1, 6, 7, 11, 12, 13, 16, 17, 18, 19]))
# And for assigning values:
a[il1] = -1
- yield (assert_array_equal, a,
- array([[-1, 2, 3, 4],
- [-1, -1, 7, 8],
- [-1, -1, -1, 12],
- [-1, -1, -1, -1]]))
+ assert_array_equal(a,
+ array([[-1, 2, 3, 4],
+ [-1, -1, 7, 8],
+ [-1, -1, -1, 12],
+ [-1, -1, -1, -1]]))
b[il3] = -1
- yield (assert_array_equal, b,
- array([[-1, 2, 3, 4, 5],
- [-1, -1, 8, 9, 10],
- [-1, -1, -1, 14, 15],
- [-1, -1, -1, -1, 20]]))
+ assert_array_equal(b,
+ array([[-1, 2, 3, 4, 5],
+ [-1, -1, 8, 9, 10],
+ [-1, -1, -1, 14, 15],
+ [-1, -1, -1, -1, 20]]))
# These cover almost the whole array (two diagonals right of the main one):
a[il2] = -10
- yield (assert_array_equal, a,
- array([[-10, -10, -10, 4],
- [-10, -10, -10, -10],
- [-10, -10, -10, -10],
- [-10, -10, -10, -10]]))
+ assert_array_equal(a,
+ array([[-10, -10, -10, 4],
+ [-10, -10, -10, -10],
+ [-10, -10, -10, -10],
+ [-10, -10, -10, -10]]))
b[il4] = -10
- yield (assert_array_equal, b,
- array([[-10, -10, -10, 4, 5],
- [-10, -10, -10, -10, 10],
- [-10, -10, -10, -10, -10],
- [-10, -10, -10, -10, -10]]))
+ assert_array_equal(b,
+ array([[-10, -10, -10, 4, 5],
+ [-10, -10, -10, -10, 10],
+ [-10, -10, -10, -10, -10],
+ [-10, -10, -10, -10, -10]]))
class TestTriuIndices(object):
@@ -416,39 +416,40 @@ class TestTriuIndices(object):
b = np.arange(1, 21).reshape(4, 5)
# Both for indexing:
- yield (assert_array_equal, a[iu1],
- array([1, 2, 3, 4, 6, 7, 8, 11, 12, 16]))
- yield (assert_array_equal, b[iu3],
- array([1, 2, 3, 4, 5, 7, 8, 9, 10, 13, 14, 15, 19, 20]))
+ assert_array_equal(a[iu1],
+ array([1, 2, 3, 4, 6, 7, 8, 11, 12, 16]))
+ assert_array_equal(b[iu3],
+ array([1, 2, 3, 4, 5, 7, 8, 9,
+ 10, 13, 14, 15, 19, 20]))
# And for assigning values:
a[iu1] = -1
- yield (assert_array_equal, a,
- array([[-1, -1, -1, -1],
- [5, -1, -1, -1],
- [9, 10, -1, -1],
- [13, 14, 15, -1]]))
+ assert_array_equal(a,
+ array([[-1, -1, -1, -1],
+ [5, -1, -1, -1],
+ [9, 10, -1, -1],
+ [13, 14, 15, -1]]))
b[iu3] = -1
- yield (assert_array_equal, b,
- array([[-1, -1, -1, -1, -1],
- [6, -1, -1, -1, -1],
- [11, 12, -1, -1, -1],
- [16, 17, 18, -1, -1]]))
+ assert_array_equal(b,
+ array([[-1, -1, -1, -1, -1],
+ [6, -1, -1, -1, -1],
+ [11, 12, -1, -1, -1],
+ [16, 17, 18, -1, -1]]))
# These cover almost the whole array (two diagonals right of the
# main one):
a[iu2] = -10
- yield (assert_array_equal, a,
- array([[-1, -1, -10, -10],
- [5, -1, -1, -10],
- [9, 10, -1, -1],
- [13, 14, 15, -1]]))
+ assert_array_equal(a,
+ array([[-1, -1, -10, -10],
+ [5, -1, -1, -10],
+ [9, 10, -1, -1],
+ [13, 14, 15, -1]]))
b[iu4] = -10
- yield (assert_array_equal, b,
- array([[-1, -1, -10, -10, -10],
- [6, -1, -1, -10, -10],
- [11, 12, -1, -1, -10],
- [16, 17, 18, -1, -1]]))
+ assert_array_equal(b,
+ array([[-1, -1, -10, -10, -10],
+ [6, -1, -1, -10, -10],
+ [11, 12, -1, -1, -10],
+ [16, 17, 18, -1, -1]]))
class TestTrilIndicesFrom(object):
diff --git a/numpy/lib/tests/test_type_check.py b/numpy/lib/tests/test_type_check.py
index 383ffa55c..8945b61ea 100644
--- a/numpy/lib/tests/test_type_check.py
+++ b/numpy/lib/tests/test_type_check.py
@@ -3,7 +3,7 @@ from __future__ import division, absolute_import, print_function
import numpy as np
from numpy.compat import long
from numpy.testing import (
- TestCase, assert_, assert_equal, assert_array_equal, run_module_suite
+ assert_, assert_equal, assert_array_equal, run_module_suite, assert_raises
)
from numpy.lib.type_check import (
common_type, mintypecode, isreal, iscomplex, isposinf, isneginf,
@@ -15,7 +15,7 @@ def assert_all(x):
assert_(np.all(x), x)
-class TestCommonType(TestCase):
+class TestCommonType(object):
def test_basic(self):
ai32 = np.array([[1, 2], [3, 4]], dtype=np.int32)
af16 = np.array([[1, 2], [3, 4]], dtype=np.float16)
@@ -31,7 +31,7 @@ class TestCommonType(TestCase):
assert_(common_type(acd) == np.cdouble)
-class TestMintypecode(TestCase):
+class TestMintypecode(object):
def test_default_1(self):
for itype in '1bcsuwil':
@@ -81,7 +81,7 @@ class TestMintypecode(TestCase):
assert_equal(mintypecode('idD'), 'D')
-class TestIsscalar(TestCase):
+class TestIsscalar(object):
def test_basic(self):
assert_(np.isscalar(3))
@@ -92,7 +92,7 @@ class TestIsscalar(TestCase):
assert_(np.isscalar(4.0))
-class TestReal(TestCase):
+class TestReal(object):
def test_real(self):
y = np.random.rand(10,)
@@ -123,7 +123,7 @@ class TestReal(TestCase):
assert_(not isinstance(out, np.ndarray))
-class TestImag(TestCase):
+class TestImag(object):
def test_real(self):
y = np.random.rand(10,)
@@ -154,7 +154,7 @@ class TestImag(TestCase):
assert_(not isinstance(out, np.ndarray))
-class TestIscomplex(TestCase):
+class TestIscomplex(object):
def test_fail(self):
z = np.array([-1, 0, 1])
@@ -167,7 +167,7 @@ class TestIscomplex(TestCase):
assert_array_equal(res, [1, 0, 0])
-class TestIsreal(TestCase):
+class TestIsreal(object):
def test_pass(self):
z = np.array([-1, 0, 1j])
@@ -180,7 +180,7 @@ class TestIsreal(TestCase):
assert_array_equal(res, [0, 1, 1])
-class TestIscomplexobj(TestCase):
+class TestIscomplexobj(object):
def test_basic(self):
z = np.array([-1, 0, 1])
@@ -233,7 +233,7 @@ class TestIscomplexobj(TestCase):
assert_(iscomplexobj(a))
-class TestIsrealobj(TestCase):
+class TestIsrealobj(object):
def test_basic(self):
z = np.array([-1, 0, 1])
assert_(isrealobj(z))
@@ -241,7 +241,7 @@ class TestIsrealobj(TestCase):
assert_(not isrealobj(z))
-class TestIsnan(TestCase):
+class TestIsnan(object):
def test_goodvalues(self):
z = np.array((-1., 0., 1.))
@@ -271,7 +271,7 @@ class TestIsnan(TestCase):
assert_all(np.isnan(np.array(0+0j)/0.) == 1)
-class TestIsfinite(TestCase):
+class TestIsfinite(object):
# Fixme, wrong place, isfinite now ufunc
def test_goodvalues(self):
@@ -302,7 +302,7 @@ class TestIsfinite(TestCase):
assert_all(np.isfinite(np.array(1+1j)/0.) == 0)
-class TestIsinf(TestCase):
+class TestIsinf(object):
# Fixme, wrong place, isinf now ufunc
def test_goodvalues(self):
@@ -331,7 +331,7 @@ class TestIsinf(TestCase):
assert_all(np.isinf(np.array((0.,))/0.) == 0)
-class TestIsposinf(TestCase):
+class TestIsposinf(object):
def test_generic(self):
with np.errstate(divide='ignore', invalid='ignore'):
@@ -341,7 +341,7 @@ class TestIsposinf(TestCase):
assert_(vals[2] == 1)
-class TestIsneginf(TestCase):
+class TestIsneginf(object):
def test_generic(self):
with np.errstate(divide='ignore', invalid='ignore'):
@@ -351,7 +351,7 @@ class TestIsneginf(TestCase):
assert_(vals[2] == 0)
-class TestNanToNum(TestCase):
+class TestNanToNum(object):
def test_generic(self):
with np.errstate(divide='ignore', invalid='ignore'):
@@ -374,7 +374,7 @@ class TestNanToNum(TestCase):
vals = nan_to_num(1)
assert_all(vals == 1)
vals = nan_to_num([1])
- assert_array_equal(vals, np.array([1], np.int))
+ assert_array_equal(vals, np.array([1], int))
def test_complex_good(self):
vals = nan_to_num(1+1j)
@@ -402,7 +402,7 @@ class TestNanToNum(TestCase):
#assert_all(vals.real < -1e10) and assert_all(np.isfinite(vals))
-class TestRealIfClose(TestCase):
+class TestRealIfClose(object):
def test_basic(self):
a = np.random.rand(10)
@@ -415,12 +415,18 @@ class TestRealIfClose(TestCase):
assert_all(isrealobj(b))
-class TestArrayConversion(TestCase):
+class TestArrayConversion(object):
def test_asfarray(self):
a = asfarray(np.array([1, 2, 3]))
assert_equal(a.__class__, np.ndarray)
- assert_(np.issubdtype(a.dtype, np.float))
+ assert_(np.issubdtype(a.dtype, np.floating))
+
+ # previously this would infer dtypes from arrays, unlike every single
+ # other numpy function
+ assert_raises(TypeError,
+ asfarray, np.array([1, 2, 3]), dtype=np.array(1.0))
+
if __name__ == "__main__":
run_module_suite()
diff --git a/numpy/lib/tests/test_ufunclike.py b/numpy/lib/tests/test_ufunclike.py
index 0b152540f..128ce37ab 100644
--- a/numpy/lib/tests/test_ufunclike.py
+++ b/numpy/lib/tests/test_ufunclike.py
@@ -4,12 +4,11 @@ import numpy as np
import numpy.core as nx
import numpy.lib.ufunclike as ufl
from numpy.testing import (
- run_module_suite, TestCase, assert_, assert_equal, assert_array_equal,
- assert_warns
+ run_module_suite, assert_, assert_equal, assert_array_equal, assert_warns
)
-class TestUfunclike(TestCase):
+class TestUfunclike(object):
def test_isposinf(self):
a = nx.array([nx.inf, -nx.inf, nx.nan, 0.0, 3.0, -3.0])
diff --git a/numpy/lib/twodim_base.py b/numpy/lib/twodim_base.py
index 28ebb8cbd..a6259219a 100644
--- a/numpy/lib/twodim_base.py
+++ b/numpy/lib/twodim_base.py
@@ -6,6 +6,7 @@ from __future__ import division, absolute_import, print_function
from numpy.core.numeric import (
absolute, asanyarray, arange, zeros, greater_equal, multiply, ones,
asarray, where, int8, int16, int32, int64, empty, promote_types, diagonal,
+ nonzero
)
from numpy.core import iinfo, transpose
@@ -717,7 +718,7 @@ def mask_indices(n, mask_func, k=0):
"""
m = ones((n, n), int)
a = mask_func(m, k)
- return where(a != 0)
+ return nonzero(a != 0)
def tril_indices(n, k=0, m=None):
@@ -797,7 +798,7 @@ def tril_indices(n, k=0, m=None):
[-10, -10, -10, -10]])
"""
- return where(tri(n, m, k=k, dtype=bool))
+ return nonzero(tri(n, m, k=k, dtype=bool))
def tril_indices_from(arr, k=0):
@@ -907,7 +908,7 @@ def triu_indices(n, k=0, m=None):
[ 12, 13, 14, -1]])
"""
- return where(~tri(n, m, k=k-1, dtype=bool))
+ return nonzero(~tri(n, m, k=k-1, dtype=bool))
def triu_indices_from(arr, k=0):
diff --git a/numpy/lib/type_check.py b/numpy/lib/type_check.py
index 5202cebde..e6aae8ddd 100644
--- a/numpy/lib/type_check.py
+++ b/numpy/lib/type_check.py
@@ -98,8 +98,7 @@ def asfarray(a, dtype=_nx.float_):
array([ 2., 3.])
"""
- dtype = _nx.obj2sctype(dtype)
- if not issubclass(dtype, _nx.inexact):
+ if not _nx.issubdtype(dtype, _nx.inexact):
dtype = _nx.float_
return asarray(a, dtype=dtype)
@@ -331,11 +330,16 @@ def _getmaxmin(t):
def nan_to_num(x, copy=True):
"""
- Replace nan with zero and inf with finite numbers.
+ Replace nan with zero and inf with large finite numbers.
- Returns an array or scalar replacing Not a Number (NaN) with zero,
- (positive) infinity with a very large number and negative infinity
- with a very small (or negative) number.
+ If `x` is inexact, NaN is replaced by zero, and infinity and -infinity
+ replaced by the respectively largest and most negative finite floating
+ point values representable by ``x.dtype``.
+
+ For complex dtypes, the above is applied to each of the real and
+ imaginary components of `x` separately.
+
+ If `x` is not inexact, then no replacements are made.
Parameters
----------
@@ -352,12 +356,8 @@ def nan_to_num(x, copy=True):
Returns
-------
out : ndarray
- New Array with the same shape as `x` and dtype of the element in
- `x` with the greatest precision. If `x` is inexact, then NaN is
- replaced by zero, and infinity (-infinity) is replaced by the
- largest (smallest or most negative) floating point value that fits
- in the output dtype. If `x` is not inexact, then a copy of `x` is
- returned.
+ `x`, with the non-finite values replaced. If `copy` is False, this may
+ be `x` itself.
See Also
--------
@@ -372,15 +372,17 @@ def nan_to_num(x, copy=True):
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
-
Examples
--------
- >>> np.set_printoptions(precision=8)
>>> x = np.array([np.inf, -np.inf, np.nan, -128, 128])
>>> np.nan_to_num(x)
array([ 1.79769313e+308, -1.79769313e+308, 0.00000000e+000,
-1.28000000e+002, 1.28000000e+002])
-
+ >>> y = np.array([complex(np.inf, np.nan), np.nan, complex(np.nan, np.inf)])
+ >>> np.nan_to_num(y)
+ array([ 1.79769313e+308 +0.00000000e+000j,
+ 0.00000000e+000 +0.00000000e+000j,
+ 0.00000000e+000 +1.79769313e+308j])
"""
x = _nx.array(x, subok=True, copy=copy)
xtype = x.dtype.type
@@ -430,12 +432,12 @@ def real_if_close(a,tol=100):
-----
Machine epsilon varies from machine to machine and between data types
but Python floats on most platforms have a machine epsilon equal to
- 2.2204460492503131e-16. You can use 'np.finfo(np.float).eps' to print
+ 2.2204460492503131e-16. You can use 'np.finfo(float).eps' to print
out the machine epsilon for floats.
Examples
--------
- >>> np.finfo(np.float).eps
+ >>> np.finfo(float).eps
2.2204460492503131e-16
>>> np.real_if_close([2.1 + 4e-14j], tol=1000)
@@ -577,8 +579,8 @@ def common_type(*arrays):
an integer array, the minimum precision type that is returned is a
64-bit floating point dtype.
- All input arrays can be safely cast to the returned dtype without loss
- of information.
+ All input arrays except int64 and uint64 can be safely cast to the
+ returned dtype without loss of information.
Parameters
----------
diff --git a/numpy/lib/utils.py b/numpy/lib/utils.py
index fad159c7e..e18eda0fb 100644
--- a/numpy/lib/utils.py
+++ b/numpy/lib/utils.py
@@ -557,7 +557,7 @@ def info(object=None, maxwidth=76, output=sys.stdout, toplevel='numpy'):
if len(arglist) > 1:
arglist[1] = "("+arglist[1]
arguments = ", ".join(arglist[1:])
- except:
+ except Exception:
pass
if len(name+arguments) > maxwidth:
@@ -689,7 +689,7 @@ def source(object, output=sys.stdout):
try:
print("In file: %s\n" % inspect.getsourcefile(object), file=output)
print(inspect.getsource(object), file=output)
- except:
+ except Exception:
print("Not available for this object.", file=output)
@@ -1138,7 +1138,7 @@ def _median_nancheck(data, result, axis, out):
"""
if data.size == 0:
return result
- data = np.rollaxis(data, axis, data.ndim)
+ data = np.moveaxis(data, axis, -1)
n = np.isnan(data[..., -1])
# masked NaN values are ok
if np.ma.isMaskedArray(n):
diff --git a/numpy/linalg/__init__.py b/numpy/linalg/__init__.py
index 69445f541..2537926c5 100644
--- a/numpy/linalg/__init__.py
+++ b/numpy/linalg/__init__.py
@@ -50,6 +50,6 @@ from .info import __doc__
from .linalg import *
-from numpy.testing.nosetester import _numpy_tester
+from numpy.testing import _numpy_tester
test = _numpy_tester().test
bench = _numpy_tester().bench
diff --git a/numpy/linalg/linalg.py b/numpy/linalg/linalg.py
index 31147b9cc..d2ae7befc 100644
--- a/numpy/linalg/linalg.py
+++ b/numpy/linalg/linalg.py
@@ -19,12 +19,13 @@ __all__ = ['matrix_power', 'solve', 'tensorsolve', 'tensorinv', 'inv',
import warnings
from numpy.core import (
- array, asarray, zeros, empty, empty_like, transpose, intc, single, double,
+ array, asarray, zeros, empty, empty_like, intc, single, double,
csingle, cdouble, inexact, complexfloating, newaxis, ravel, all, Inf, dot,
add, multiply, sqrt, maximum, fastCopyAndTranspose, sum, isfinite, size,
- finfo, errstate, geterrobj, longdouble, rollaxis, amin, amax, product, abs,
- broadcast, atleast_2d, intp, asanyarray, isscalar, object_, ones
- )
+ finfo, errstate, geterrobj, longdouble, moveaxis, amin, amax, product, abs,
+ broadcast, atleast_2d, intp, asanyarray, isscalar, object_, ones, matmul,
+ swapaxes, divide, count_nonzero
+)
from numpy.core.multiarray import normalize_axis_index
from numpy.lib import triu, asfarray
from numpy.linalg import lapack_lite, _umath_linalg
@@ -69,12 +70,8 @@ class LinAlgError(Exception):
"""
pass
-# Dealing with errors in _umath_linalg
-
-_linalg_error_extobj = None
def _determine_error_states():
- global _linalg_error_extobj
errobj = geterrobj()
bufsize = errobj[0]
@@ -82,9 +79,11 @@ def _determine_error_states():
divide='ignore', under='ignore'):
invalid_call_errmask = geterrobj()[1]
- _linalg_error_extobj = [bufsize, invalid_call_errmask, None]
+ return [bufsize, invalid_call_errmask, None]
-_determine_error_states()
+# Dealing with errors in _umath_linalg
+_linalg_error_extobj = _determine_error_states()
+del _determine_error_states
def _raise_linalgerror_singular(err, flag):
raise LinAlgError("Singular matrix")
@@ -99,7 +98,7 @@ def _raise_linalgerror_svd_nonconvergence(err, flag):
raise LinAlgError("SVD did not converge")
def get_linalg_error_extobj(callback):
- extobj = list(_linalg_error_extobj)
+ extobj = list(_linalg_error_extobj) # make a copy
extobj[2] = callback
return extobj
@@ -225,6 +224,22 @@ def _assertNoEmpty2d(*arrays):
if _isEmpty2d(a):
raise LinAlgError("Arrays cannot be empty")
+def transpose(a):
+ """
+ Transpose each matrix in a stack of matrices.
+
+ Unlike np.transpose, this only swaps the last two axes, rather than all of
+ them
+
+ Parameters
+ ----------
+ a : (...,M,N) array_like
+
+ Returns
+ -------
+ aT : (...,N,M) ndarray
+ """
+ return swapaxes(a, -1, -2)
# Linear equations
@@ -1281,7 +1296,7 @@ def eigh(a, UPLO='L'):
# Singular value decomposition
-def svd(a, full_matrices=1, compute_uv=1):
+def svd(a, full_matrices=True, compute_uv=True):
"""
Singular Value Decomposition.
@@ -1489,22 +1504,34 @@ def cond(x, p=None):
return norm(x, p, axis=(-2, -1)) * norm(inv(x), p, axis=(-2, -1))
-def matrix_rank(M, tol=None):
+def matrix_rank(M, tol=None, hermitian=False):
"""
Return matrix rank of array using SVD method
- Rank of the array is the number of SVD singular values of the array that are
+ Rank of the array is the number of singular values of the array that are
greater than `tol`.
+ .. versionchanged:: 1.14
+ Can now operate on stacks of matrices
+
Parameters
----------
M : {(M,), (..., M, N)} array_like
input vector or stack of matrices
- tol : {None, float}, optional
- threshold below which SVD values are considered zero. If `tol` is
- None, and ``S`` is an array with singular values for `M`, and
- ``eps`` is the epsilon value for datatype of ``S``, then `tol` is
- set to ``S.max() * max(M.shape) * eps``.
+ tol : (...) array_like, float, optional
+ threshold below which SVD values are considered zero. If `tol` is
+ None, and ``S`` is an array with singular values for `M`, and
+ ``eps`` is the epsilon value for datatype of ``S``, then `tol` is
+ set to ``S.max() * max(M.shape) * eps``.
+
+ .. versionchanged:: 1.14
+ Broadcasted against the stack of matrices
+ hermitian : bool, optional
+ If True, `M` is assumed to be Hermitian (symmetric if real-valued),
+ enabling a more efficient method for finding singular values.
+ Defaults to False.
+
+ .. versionadded:: 1.14
Notes
-----
@@ -1568,10 +1595,15 @@ def matrix_rank(M, tol=None):
M = asarray(M)
if M.ndim < 2:
return int(not all(M==0))
- S = svd(M, compute_uv=False)
+ if hermitian:
+ S = abs(eigvalsh(M))
+ else:
+ S = svd(M, compute_uv=False)
if tol is None:
tol = S.max(axis=-1, keepdims=True) * max(M.shape[-2:]) * finfo(S.dtype).eps
- return (S > tol).sum(axis=-1)
+ else:
+ tol = asarray(tol)[..., newaxis]
+ return count_nonzero(S > tol, axis=-1)
# Generalized inverse
@@ -1584,26 +1616,29 @@ def pinv(a, rcond=1e-15 ):
singular-value decomposition (SVD) and including all
*large* singular values.
+ .. versionchanged:: 1.14
+ Can now operate on stacks of matrices
+
Parameters
----------
- a : (M, N) array_like
- Matrix to be pseudo-inverted.
- rcond : float
- Cutoff for small singular values.
- Singular values smaller (in modulus) than
- `rcond` * largest_singular_value (again, in modulus)
- are set to zero.
+ a : (..., M, N) array_like
+ Matrix or stack of matrices to be pseudo-inverted.
+ rcond : (...) array_like of float
+ Cutoff for small singular values.
+ Singular values smaller (in modulus) than
+ `rcond` * largest_singular_value (again, in modulus)
+ are set to zero. Broadcasts against the stack of matrices
Returns
-------
- B : (N, M) ndarray
- The pseudo-inverse of `a`. If `a` is a `matrix` instance, then so
- is `B`.
+ B : (..., N, M) ndarray
+ The pseudo-inverse of `a`. If `a` is a `matrix` instance, then so
+ is `B`.
Raises
------
LinAlgError
- If the SVD computation does not converge.
+ If the SVD computation does not converge.
Notes
-----
@@ -1640,20 +1675,20 @@ def pinv(a, rcond=1e-15 ):
"""
a, wrap = _makearray(a)
+ rcond = asarray(rcond)
if _isEmpty2d(a):
res = empty(a.shape[:-2] + (a.shape[-1], a.shape[-2]), dtype=a.dtype)
return wrap(res)
a = a.conjugate()
- u, s, vt = svd(a, 0)
- m = u.shape[0]
- n = vt.shape[1]
- cutoff = rcond*maximum.reduce(s)
- for i in range(min(n, m)):
- if s[i] > cutoff:
- s[i] = 1./s[i]
- else:
- s[i] = 0.
- res = dot(transpose(vt), multiply(s[:, newaxis], transpose(u)))
+ u, s, vt = svd(a, full_matrices=False)
+
+ # discard small singular values
+ cutoff = rcond[..., newaxis] * amax(s, axis=-1, keepdims=True)
+ large = s > cutoff
+ s = divide(1, s, where=large, out=s)
+ s[~large] = 0
+
+ res = matmul(transpose(vt), multiply(s[..., newaxis], transpose(u)))
return wrap(res)
# Determinant
@@ -1810,7 +1845,7 @@ def det(a):
# Linear Least Squares
-def lstsq(a, b, rcond=-1):
+def lstsq(a, b, rcond="warn"):
"""
Return the least-squares solution to a linear matrix equation.
@@ -1836,6 +1871,13 @@ def lstsq(a, b, rcond=-1):
as zero if they are smaller than `rcond` times the largest singular
value of `a`.
+ .. versionchanged:: 1.14.0
+ If not set, a FutureWarning is given. The previous default
+ of ``-1`` will use the machine precision as `rcond` parameter,
+ the new default will use the machine precision times `max(M, N)`.
+ To silence the warning and use the new default, use ``rcond=None``,
+ to keep using the old behavior, use ``rcond=-1``.
+
Returns
-------
x : {(N,), (N, K)} ndarray
@@ -1909,6 +1951,20 @@ def lstsq(a, b, rcond=-1):
if m != b.shape[0]:
raise LinAlgError('Incompatible dimensions')
t, result_t = _commonType(a, b)
+ # Determine default rcond value
+ if rcond == "warn":
+ # 2017-08-19, 1.14.0
+ warnings.warn("`rcond` parameter will change to the default of "
+ "machine precision times ``max(M, N)`` where M and N "
+ "are the input matrix dimensions.\n"
+ "To use the future default and silence this warning "
+ "we advise to pass `rcond=None`, to keep using the old, "
+ "explicitly pass `rcond=-1`.",
+ FutureWarning, stacklevel=2)
+ rcond = -1
+ if rcond is None:
+ rcond = finfo(t).eps * ldb
+
result_real_t = _realType(result_t)
real_t = _linalgRealType(t)
bstar = zeros((ldb, n_rhs), t)
@@ -1968,13 +2024,13 @@ def lstsq(a, b, rcond=-1):
resids = array([sum((ravel(bstar)[n:])**2)],
dtype=result_real_t)
else:
- x = array(transpose(bstar)[:n,:], dtype=result_t, copy=True)
+ x = array(bstar.T[:n,:], dtype=result_t, copy=True)
if results['rank'] == n and m > n:
if isComplexType(t):
- resids = sum(abs(transpose(bstar)[n:,:])**2, axis=0).astype(
+ resids = sum(abs(bstar.T[n:,:])**2, axis=0).astype(
result_real_t, copy=False)
else:
- resids = sum((transpose(bstar)[n:,:])**2, axis=0).astype(
+ resids = sum((bstar.T[n:,:])**2, axis=0).astype(
result_real_t, copy=False)
st = s[:min(n, m)].astype(result_real_t, copy=True)
@@ -2004,9 +2060,7 @@ def _multi_svd_norm(x, row_axis, col_axis, op):
is `numpy.amin` or `numpy.amax` or `numpy.sum`.
"""
- if row_axis > col_axis:
- row_axis -= 1
- y = rollaxis(rollaxis(x, col_axis, x.ndim), row_axis, -1)
+ y = moveaxis(x, (row_axis, col_axis), (-2, -1))
result = op(svd(y, compute_uv=0), axis=-1)
return result
@@ -2177,7 +2231,7 @@ def norm(x, ord=None, axis=None, keepdims=False):
elif not isinstance(axis, tuple):
try:
axis = int(axis)
- except:
+ except Exception:
raise TypeError("'axis' must be None, an integer or a tuple of integers")
axis = (axis,)
@@ -2201,18 +2255,7 @@ def norm(x, ord=None, axis=None, keepdims=False):
ord + 1
except TypeError:
raise ValueError("Invalid norm order for vectors.")
- if x.dtype.type is longdouble:
- # Convert to a float type, so integer arrays give
- # float results. Don't apply asfarray to longdouble arrays,
- # because it will downcast to float64.
- absx = abs(x)
- else:
- absx = x if isComplexType(x.dtype.type) else asfarray(x)
- if absx.dtype is x.dtype:
- absx = abs(absx)
- else:
- # if the type changed, we can safely overwrite absx
- abs(absx, out=absx)
+ absx = abs(x)
absx **= ord
return add.reduce(absx, axis=axis, keepdims=keepdims) ** (1.0 / ord)
elif len(axis) == 2:
@@ -2327,7 +2370,7 @@ def multi_dot(arrays):
return A.shape[0] * A.shape[1] * B.shape[1]
Let's assume we have three matrices
- :math:`A_{10x100}, B_{100x5}, C_{5x50}$`.
+ :math:`A_{10x100}, B_{100x5}, C_{5x50}`.
The costs for the two different parenthesizations are as follows::
diff --git a/numpy/linalg/tests/__init__.py b/numpy/linalg/tests/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/numpy/linalg/tests/__init__.py
diff --git a/numpy/linalg/tests/test_build.py b/numpy/linalg/tests/test_build.py
index a91f97670..b46a72c02 100644
--- a/numpy/linalg/tests/test_build.py
+++ b/numpy/linalg/tests/test_build.py
@@ -5,7 +5,7 @@ import sys
import re
from numpy.linalg import lapack_lite
-from numpy.testing import TestCase, dec, run_module_suite
+from numpy.testing import run_module_suite, assert_, dec
class FindDependenciesLdd(object):
@@ -40,7 +40,7 @@ class FindDependenciesLdd(object):
return founds
-class TestF77Mismatch(TestCase):
+class TestF77Mismatch(object):
@dec.skipif(not(sys.platform[:5] == 'linux'),
"Skipping fortran compiler mismatch on non Linux platform")
@@ -48,7 +48,7 @@ class TestF77Mismatch(TestCase):
f = FindDependenciesLdd()
deps = f.grep_dependencies(lapack_lite.__file__,
[b'libg2c', b'libgfortran'])
- self.assertFalse(len(deps) > 1,
+ assert_(len(deps) <= 1,
"""Both g77 and gfortran runtimes linked in lapack_lite ! This is likely to
cause random crashes and wrong results. See numpy INSTALL.txt for more
information.""")
diff --git a/numpy/linalg/tests/test_linalg.py b/numpy/linalg/tests/test_linalg.py
index c612eb6bb..8b3984883 100644
--- a/numpy/linalg/tests/test_linalg.py
+++ b/numpy/linalg/tests/test_linalg.py
@@ -712,12 +712,16 @@ class TestCondInf(object):
assert_almost_equal(linalg.cond(A, inf), 3.)
-class TestPinv(LinalgSquareTestCase, LinalgNonsquareTestCase):
+class TestPinv(LinalgSquareTestCase,
+ LinalgNonsquareTestCase,
+ LinalgGeneralizedSquareTestCase,
+ LinalgGeneralizedNonsquareTestCase):
def do(self, a, b, tags):
a_ginv = linalg.pinv(a)
# `a @ a_ginv == I` does not hold if a is singular
- assert_almost_equal(dot(a, a_ginv).dot(a), a, single_decimal=5, double_decimal=11)
+ dot = dot_generalized
+ assert_almost_equal(dot(dot(a, a_ginv), a), a, single_decimal=5, double_decimal=11)
assert_(imply(isinstance(a, matrix), isinstance(a_ginv, matrix)))
@@ -793,7 +797,7 @@ class TestLstsq(LinalgSquareTestCase, LinalgNonsquareTestCase):
arr = np.asarray(a)
m, n = arr.shape
u, s, vt = linalg.svd(a, 0)
- x, residuals, rank, sv = linalg.lstsq(a, b)
+ x, residuals, rank, sv = linalg.lstsq(a, b, rcond=-1)
if m <= n:
assert_almost_equal(b, dot(a, x))
assert_equal(rank, m)
@@ -814,6 +818,23 @@ class TestLstsq(LinalgSquareTestCase, LinalgNonsquareTestCase):
assert_(imply(isinstance(b, matrix), isinstance(x, matrix)))
assert_(imply(isinstance(b, matrix), isinstance(residuals, matrix)))
+ def test_future_rcond(self):
+ a = np.array([[0., 1., 0., 1., 2., 0.],
+ [0., 2., 0., 0., 1., 0.],
+ [1., 0., 1., 0., 0., 4.],
+ [0., 0., 0., 2., 3., 0.]]).T
+
+ b = np.array([1, 0, 0, 0, 0, 0])
+ with suppress_warnings() as sup:
+ w = sup.record(FutureWarning, "`rcond` parameter will change")
+ x, residuals, rank, s = linalg.lstsq(a, b)
+ assert_(rank == 4)
+ x, residuals, rank, s = linalg.lstsq(a, b, rcond=-1)
+ assert_(rank == 4)
+ x, residuals, rank, s = linalg.lstsq(a, b, rcond=None)
+ assert_(rank == 3)
+ # Warning should be raised exactly once (first command)
+ assert_(len(w) == 1)
class TestMatrixPower(object):
R90 = array([[0, 1], [-1, 0]])
@@ -1362,6 +1383,19 @@ class TestMatrixRank(object):
# works on scalar
yield assert_equal, matrix_rank(1), 1
+ def test_symmetric_rank(self):
+ yield assert_equal, 4, matrix_rank(np.eye(4), hermitian=True)
+ yield assert_equal, 1, matrix_rank(np.ones((4, 4)), hermitian=True)
+ yield assert_equal, 0, matrix_rank(np.zeros((4, 4)), hermitian=True)
+ # rank deficient matrix
+ I = np.eye(4)
+ I[-1, -1] = 0.
+ yield assert_equal, 3, matrix_rank(I, hermitian=True)
+ # manually supplied tolerance
+ I[-1, -1] = 1e-8
+ yield assert_equal, 4, matrix_rank(I, hermitian=True, tol=0.99e-8)
+ yield assert_equal, 3, matrix_rank(I, hermitian=True, tol=1.01e-8)
+
def test_reduced_rank():
# Test matrices with reduced rank
@@ -1550,7 +1584,7 @@ def test_xerbla_override():
np.linalg.lapack_lite.xerbla()
except ValueError:
pass
- except:
+ except Exception:
os._exit(os.EX_CONFIG)
try:
@@ -1645,7 +1679,7 @@ class TestMultiDot(object):
[0, 0, 0, 3, 3, 3],
[0, 0, 0, 0, 4, 5],
[0, 0, 0, 0, 0, 5],
- [0, 0, 0, 0, 0, 0]], dtype=np.int)
+ [0, 0, 0, 0, 0, 0]], dtype=int)
s_expected -= 1 # Cormen uses 1-based index, python does not.
s, m = _multi_dot_matrix_chain_order(arrays, return_costs=True)
diff --git a/numpy/linalg/tests/test_regression.py b/numpy/linalg/tests/test_regression.py
index d2080b709..07d72620b 100644
--- a/numpy/linalg/tests/test_regression.py
+++ b/numpy/linalg/tests/test_regression.py
@@ -7,17 +7,14 @@ import warnings
import numpy as np
from numpy import linalg, arange, float64, array, dot, transpose
from numpy.testing import (
- TestCase, run_module_suite, assert_equal, assert_array_equal,
+ run_module_suite, assert_, assert_raises, assert_equal, assert_array_equal,
assert_array_almost_equal, assert_array_less
)
-rlevel = 1
+class TestRegression(object):
-
-class TestRegression(TestCase):
-
- def test_eig_build(self, level=rlevel):
+ def test_eig_build(self):
# Ticket #652
rva = array([1.03221168e+02 + 0.j,
-1.91843603e+01 + 0.j,
@@ -40,7 +37,7 @@ class TestRegression(TestCase):
rva.sort()
assert_array_almost_equal(va, rva)
- def test_eigh_build(self, level=rlevel):
+ def test_eigh_build(self):
# Ticket 662.
rvals = [68.60568999, 89.57756725, 106.67185574]
@@ -51,7 +48,7 @@ class TestRegression(TestCase):
vals, vecs = linalg.eigh(cov)
assert_array_almost_equal(vals, rvals)
- def test_svd_build(self, level=rlevel):
+ def test_svd_build(self):
# Ticket 627.
a = array([[0., 1.], [1., 1.], [2., 1.], [3., 1.]])
m, n = a.shape
@@ -64,7 +61,7 @@ class TestRegression(TestCase):
def test_norm_vector_badarg(self):
# Regression for #786: Froebenius norm for vectors raises
# TypeError.
- self.assertRaises(ValueError, linalg.norm, array([1., 2., 3.]), 'fro')
+ assert_raises(ValueError, linalg.norm, array([1., 2., 3.]), 'fro')
def test_lapack_endian(self):
# For bug #1482
@@ -98,47 +95,47 @@ class TestRegression(TestCase):
norm = linalg.norm(testvector)
assert_array_equal(norm, [0, 1])
- self.assertEqual(norm.dtype, np.dtype('float64'))
+ assert_(norm.dtype == np.dtype('float64'))
norm = linalg.norm(testvector, ord=1)
assert_array_equal(norm, [0, 1])
- self.assertNotEqual(norm.dtype, np.dtype('float64'))
+ assert_(norm.dtype != np.dtype('float64'))
norm = linalg.norm(testvector, ord=2)
assert_array_equal(norm, [0, 1])
- self.assertEqual(norm.dtype, np.dtype('float64'))
+ assert_(norm.dtype == np.dtype('float64'))
- self.assertRaises(ValueError, linalg.norm, testvector, ord='fro')
- self.assertRaises(ValueError, linalg.norm, testvector, ord='nuc')
- self.assertRaises(ValueError, linalg.norm, testvector, ord=np.inf)
- self.assertRaises(ValueError, linalg.norm, testvector, ord=-np.inf)
+ assert_raises(ValueError, linalg.norm, testvector, ord='fro')
+ assert_raises(ValueError, linalg.norm, testvector, ord='nuc')
+ assert_raises(ValueError, linalg.norm, testvector, ord=np.inf)
+ assert_raises(ValueError, linalg.norm, testvector, ord=-np.inf)
with warnings.catch_warnings():
warnings.simplefilter("error", DeprecationWarning)
- self.assertRaises((AttributeError, DeprecationWarning),
+ assert_raises((AttributeError, DeprecationWarning),
linalg.norm, testvector, ord=0)
- self.assertRaises(ValueError, linalg.norm, testvector, ord=-1)
- self.assertRaises(ValueError, linalg.norm, testvector, ord=-2)
+ assert_raises(ValueError, linalg.norm, testvector, ord=-1)
+ assert_raises(ValueError, linalg.norm, testvector, ord=-2)
testmatrix = np.array([[np.array([0, 1]), 0, 0],
[0, 0, 0]], dtype=object)
norm = linalg.norm(testmatrix)
assert_array_equal(norm, [0, 1])
- self.assertEqual(norm.dtype, np.dtype('float64'))
+ assert_(norm.dtype == np.dtype('float64'))
norm = linalg.norm(testmatrix, ord='fro')
assert_array_equal(norm, [0, 1])
- self.assertEqual(norm.dtype, np.dtype('float64'))
-
- self.assertRaises(TypeError, linalg.norm, testmatrix, ord='nuc')
- self.assertRaises(ValueError, linalg.norm, testmatrix, ord=np.inf)
- self.assertRaises(ValueError, linalg.norm, testmatrix, ord=-np.inf)
- self.assertRaises(ValueError, linalg.norm, testmatrix, ord=0)
- self.assertRaises(ValueError, linalg.norm, testmatrix, ord=1)
- self.assertRaises(ValueError, linalg.norm, testmatrix, ord=-1)
- self.assertRaises(TypeError, linalg.norm, testmatrix, ord=2)
- self.assertRaises(TypeError, linalg.norm, testmatrix, ord=-2)
- self.assertRaises(ValueError, linalg.norm, testmatrix, ord=3)
+ assert_(norm.dtype == np.dtype('float64'))
+
+ assert_raises(TypeError, linalg.norm, testmatrix, ord='nuc')
+ assert_raises(ValueError, linalg.norm, testmatrix, ord=np.inf)
+ assert_raises(ValueError, linalg.norm, testmatrix, ord=-np.inf)
+ assert_raises(ValueError, linalg.norm, testmatrix, ord=0)
+ assert_raises(ValueError, linalg.norm, testmatrix, ord=1)
+ assert_raises(ValueError, linalg.norm, testmatrix, ord=-1)
+ assert_raises(TypeError, linalg.norm, testmatrix, ord=2)
+ assert_raises(TypeError, linalg.norm, testmatrix, ord=-2)
+ assert_raises(ValueError, linalg.norm, testmatrix, ord=3)
if __name__ == '__main__':
diff --git a/numpy/ma/__init__.py b/numpy/ma/__init__.py
index af3468b01..fbefc47a4 100644
--- a/numpy/ma/__init__.py
+++ b/numpy/ma/__init__.py
@@ -51,6 +51,6 @@ __all__ = ['core', 'extras']
__all__ += core.__all__
__all__ += extras.__all__
-from numpy.testing.nosetester import _numpy_tester
+from numpy.testing import _numpy_tester
test = _numpy_tester().test
bench = _numpy_tester().bench
diff --git a/numpy/ma/core.py b/numpy/ma/core.py
index d6b30ae2e..d8d3ae621 100644
--- a/numpy/ma/core.py
+++ b/numpy/ma/core.py
@@ -186,7 +186,7 @@ default_filler = {'b': True,
'O': '?',
'S': b'N/A',
'u': 999999,
- 'V': '???',
+ 'V': b'???',
'U': u'N/A'
}
@@ -205,6 +205,31 @@ if 'float128' in ntypes.typeDict:
min_filler.update([(np.float128, +np.inf)])
+def _recursive_fill_value(dtype, f):
+ """
+ Recursively produce a fill value for `dtype`, calling f on scalar dtypes
+ """
+ if dtype.names:
+ vals = tuple(_recursive_fill_value(dtype[name], f) for name in dtype.names)
+ return np.array(vals, dtype=dtype)[()] # decay to void scalar from 0d
+ elif dtype.subdtype:
+ subtype, shape = dtype.subdtype
+ subval = _recursive_fill_value(subtype, f)
+ return np.full(shape, subval)
+ else:
+ return f(dtype)
+
+
+def _get_dtype_of(obj):
+ """ Convert the argument for *_fill_value into a dtype """
+ if isinstance(obj, np.dtype):
+ return obj
+ elif hasattr(obj, 'dtype'):
+ return obj.dtype
+ else:
+ return np.asanyarray(obj).dtype
+
+
def default_fill_value(obj):
"""
Return the default fill value for the argument object.
@@ -223,6 +248,11 @@ def default_fill_value(obj):
string 'N/A'
======== ========
+ For structured types, a structured scalar is returned, with each field the
+ default fill value for its type.
+
+ For subarray types, the fill value is an array of the same size containing
+ the default scalar fill value.
Parameters
----------
@@ -245,39 +275,29 @@ def default_fill_value(obj):
(1e+20+0j)
"""
- if hasattr(obj, 'dtype'):
- defval = _check_fill_value(None, obj.dtype)
- elif isinstance(obj, np.dtype):
- if obj.subdtype:
- defval = default_filler.get(obj.subdtype[0].kind, '?')
- elif obj.kind in 'Mm':
- defval = default_filler.get(obj.str[1:], '?')
+ def _scalar_fill_value(dtype):
+ if dtype.kind in 'Mm':
+ return default_filler.get(dtype.str[1:], '?')
else:
- defval = default_filler.get(obj.kind, '?')
- elif isinstance(obj, float):
- defval = default_filler['f']
- elif isinstance(obj, int) or isinstance(obj, long):
- defval = default_filler['i']
- elif isinstance(obj, bytes):
- defval = default_filler['S']
- elif isinstance(obj, unicode):
- defval = default_filler['U']
- elif isinstance(obj, complex):
- defval = default_filler['c']
- else:
- defval = default_filler['O']
- return defval
+ return default_filler.get(dtype.kind, '?')
+ dtype = _get_dtype_of(obj)
+ return _recursive_fill_value(dtype, _scalar_fill_value)
-def _recursive_extremum_fill_value(ndtype, extremum):
- names = ndtype.names
- if names:
- deflist = []
- for name in names:
- fval = _recursive_extremum_fill_value(ndtype[name], extremum)
- deflist.append(fval)
- return tuple(deflist)
- return extremum[ndtype]
+
+def _extremum_fill_value(obj, extremum, extremum_name):
+
+ def _scalar_fill_value(dtype):
+ try:
+ return extremum[dtype]
+ except KeyError:
+ raise TypeError(
+ "Unsuitable type {} for calculating {}."
+ .format(dtype, extremum_name)
+ )
+
+ dtype = _get_dtype_of(obj)
+ return _recursive_fill_value(dtype, _scalar_fill_value)
def minimum_fill_value(obj):
@@ -289,7 +309,7 @@ def minimum_fill_value(obj):
Parameters
----------
- obj : ndarray or dtype
+ obj : ndarray, dtype or scalar
An object that can be queried for it's numeric type.
Returns
@@ -328,19 +348,7 @@ def minimum_fill_value(obj):
inf
"""
- errmsg = "Unsuitable type for calculating minimum."
- if hasattr(obj, 'dtype'):
- return _recursive_extremum_fill_value(obj.dtype, min_filler)
- elif isinstance(obj, float):
- return min_filler[ntypes.typeDict['float_']]
- elif isinstance(obj, int):
- return min_filler[ntypes.typeDict['int_']]
- elif isinstance(obj, long):
- return min_filler[ntypes.typeDict['uint']]
- elif isinstance(obj, np.dtype):
- return min_filler[obj]
- else:
- raise TypeError(errmsg)
+ return _extremum_fill_value(obj, min_filler, "minimum")
def maximum_fill_value(obj):
@@ -352,7 +360,7 @@ def maximum_fill_value(obj):
Parameters
----------
- obj : {ndarray, dtype}
+ obj : ndarray, dtype or scalar
An object that can be queried for it's numeric type.
Returns
@@ -391,48 +399,7 @@ def maximum_fill_value(obj):
-inf
"""
- errmsg = "Unsuitable type for calculating maximum."
- if hasattr(obj, 'dtype'):
- return _recursive_extremum_fill_value(obj.dtype, max_filler)
- elif isinstance(obj, float):
- return max_filler[ntypes.typeDict['float_']]
- elif isinstance(obj, int):
- return max_filler[ntypes.typeDict['int_']]
- elif isinstance(obj, long):
- return max_filler[ntypes.typeDict['uint']]
- elif isinstance(obj, np.dtype):
- return max_filler[obj]
- else:
- raise TypeError(errmsg)
-
-
-def _recursive_set_default_fill_value(dt):
- """
- Create the default fill value for a structured dtype.
-
- Parameters
- ----------
- dt: dtype
- The structured dtype for which to create the fill value.
-
- Returns
- -------
- val: tuple
- A tuple of values corresponding to the default structured fill value.
-
- """
- deflist = []
- for name in dt.names:
- currenttype = dt[name]
- if currenttype.subdtype:
- currenttype = currenttype.subdtype[0]
-
- if currenttype.names:
- deflist.append(
- tuple(_recursive_set_default_fill_value(currenttype)))
- else:
- deflist.append(default_fill_value(currenttype))
- return tuple(deflist)
+ return _extremum_fill_value(obj, max_filler, "maximum")
def _recursive_set_fill_value(fillvalue, dt):
@@ -471,22 +438,16 @@ def _check_fill_value(fill_value, ndtype):
"""
Private function validating the given `fill_value` for the given dtype.
- If fill_value is None, it is set to the default corresponding to the dtype
- if this latter is standard (no fields). If the datatype is flexible (named
- fields), fill_value is set to a tuple whose elements are the default fill
- values corresponding to each field.
+ If fill_value is None, it is set to the default corresponding to the dtype.
If fill_value is not None, its value is forced to the given dtype.
+ The result is always a 0d array.
"""
ndtype = np.dtype(ndtype)
fields = ndtype.fields
if fill_value is None:
- if fields:
- fill_value = np.array(_recursive_set_default_fill_value(ndtype),
- dtype=ndtype)
- else:
- fill_value = default_fill_value(ndtype)
+ fill_value = default_fill_value(ndtype)
elif fields:
fdtype = [(_[0], _[1]) for _ in ndtype.descr]
if isinstance(fill_value, (ndarray, np.void)):
@@ -823,7 +784,7 @@ ufunc_domain = {}
ufunc_fills = {}
-class _DomainCheckInterval:
+class _DomainCheckInterval(object):
"""
Define a valid interval, so that :
@@ -848,7 +809,7 @@ class _DomainCheckInterval:
umath.less(x, self.a))
-class _DomainTan:
+class _DomainTan(object):
"""
Define a valid interval for the `tan` function, so that:
@@ -866,7 +827,7 @@ class _DomainTan:
return umath.less(umath.absolute(umath.cos(x)), self.eps)
-class _DomainSafeDivide:
+class _DomainSafeDivide(object):
"""
Define a domain for safe division.
@@ -887,7 +848,7 @@ class _DomainSafeDivide:
return umath.absolute(a) * self.tolerance >= umath.absolute(b)
-class _DomainGreater:
+class _DomainGreater(object):
"""
DomainGreater(v)(x) is True where x <= v.
@@ -903,7 +864,7 @@ class _DomainGreater:
return umath.less_equal(x, self.critical_value)
-class _DomainGreaterEqual:
+class _DomainGreaterEqual(object):
"""
DomainGreaterEqual(v)(x) is True where x < v.
@@ -919,7 +880,17 @@ class _DomainGreaterEqual:
return umath.less(x, self.critical_value)
-class _MaskedUnaryOperation:
+class _MaskedUFunc(object):
+ def __init__(self, ufunc):
+ self.f = ufunc
+ self.__doc__ = ufunc.__doc__
+ self.__name__ = ufunc.__name__
+
+ def __str__(self):
+ return "Masked version of {}".format(self.f)
+
+
+class _MaskedUnaryOperation(_MaskedUFunc):
"""
Defines masked version of unary operations, where invalid values are
pre-masked.
@@ -938,11 +909,9 @@ class _MaskedUnaryOperation:
"""
def __init__(self, mufunc, fill=0, domain=None):
- self.f = mufunc
+ super(_MaskedUnaryOperation, self).__init__(mufunc)
self.fill = fill
self.domain = domain
- self.__doc__ = getattr(mufunc, "__doc__", str(mufunc))
- self.__name__ = getattr(mufunc, "__name__", str(mufunc))
ufunc_domain[mufunc] = domain
ufunc_fills[mufunc] = fill
@@ -994,11 +963,8 @@ class _MaskedUnaryOperation:
masked_result._update_from(a)
return masked_result
- def __str__(self):
- return "Masked version of %s. [Invalid values are masked]" % str(self.f)
-
-class _MaskedBinaryOperation:
+class _MaskedBinaryOperation(_MaskedUFunc):
"""
Define masked version of binary operations, where invalid
values are pre-masked.
@@ -1025,11 +991,9 @@ class _MaskedBinaryOperation:
abfunc(x, filly) = x for all x to enable reduce.
"""
- self.f = mbfunc
+ super(_MaskedBinaryOperation, self).__init__(mbfunc)
self.fillx = fillx
self.filly = filly
- self.__doc__ = getattr(mbfunc, "__doc__", str(mbfunc))
- self.__name__ = getattr(mbfunc, "__name__", str(mbfunc))
ufunc_domain[mbfunc] = None
ufunc_fills[mbfunc] = (fillx, filly)
@@ -1068,7 +1032,7 @@ class _MaskedBinaryOperation:
# any errors, just abort; impossible to guarantee masked values
try:
np.copyto(result, da, casting='unsafe', where=m)
- except:
+ except Exception:
pass
# Transforms to a (subclass of) MaskedArray
@@ -1146,11 +1110,9 @@ class _MaskedBinaryOperation:
masked_result = result.view(tclass)
return masked_result
- def __str__(self):
- return "Masked version of " + str(self.f)
-class _DomainedBinaryOperation:
+class _DomainedBinaryOperation(_MaskedUFunc):
"""
Define binary operations that have a domain, like divide.
@@ -1175,12 +1137,10 @@ class _DomainedBinaryOperation:
"""abfunc(fillx, filly) must be defined.
abfunc(x, filly) = x for all x to enable reduce.
"""
- self.f = dbfunc
+ super(_DomainedBinaryOperation, self).__init__(dbfunc)
self.domain = domain
self.fillx = fillx
self.filly = filly
- self.__doc__ = getattr(dbfunc, "__doc__", str(dbfunc))
- self.__name__ = getattr(dbfunc, "__name__", str(dbfunc))
ufunc_domain[dbfunc] = domain
ufunc_fills[dbfunc] = (fillx, filly)
@@ -1214,7 +1174,7 @@ class _DomainedBinaryOperation:
# only add back if it can be cast safely
if np.can_cast(masked_da.dtype, result.dtype, casting='safe'):
result += masked_da
- except:
+ except Exception:
pass
# Transforms to a (subclass of) MaskedArray
@@ -1226,9 +1186,6 @@ class _DomainedBinaryOperation:
masked_result._update_from(b)
return masked_result
- def __str__(self):
- return "Masked version of " + str(self.f)
-
# Unary ufuncs
exp = _MaskedUnaryOperation(umath.exp)
@@ -1329,7 +1286,7 @@ def _replace_dtype_fields_recursive(dtype, primitive_dtype):
descr.append((name, _recurse(field[0], primitive_dtype)))
new_dtype = np.dtype(descr)
- # Is this some kind of composite a la (np.float,2)
+ # Is this some kind of composite a la (float,2)
elif dtype.subdtype:
descr = list(dtype.subdtype)
descr[0] = _recurse(dtype.subdtype[0], primitive_dtype)
@@ -1381,7 +1338,7 @@ def make_mask_descr(ndtype):
--------
>>> import numpy.ma as ma
>>> dtype = np.dtype({'names':['foo', 'bar'],
- 'formats':[np.float32, np.int]})
+ 'formats':[np.float32, int]})
>>> dtype
dtype([('foo', '<f4'), ('bar', '<i4')])
>>> ma.make_mask_descr(dtype)
@@ -1562,7 +1519,7 @@ def is_mask(m):
Arrays with complex dtypes don't return True.
>>> dtype = np.dtype({'names':['monty', 'pithon'],
- 'formats':[np.bool, np.bool]})
+ 'formats':[bool, bool]})
>>> dtype
dtype([('monty', '|b1'), ('pithon', '|b1')])
>>> m = np.array([(True, False), (False, True), (True, False)],
@@ -1641,7 +1598,7 @@ def make_mask(m, copy=False, shrink=True, dtype=MaskType):
>>> arr
[(1, 0), (0, 1), (1, 0), (1, 0)]
>>> dtype = np.dtype({'names':['man', 'mouse'],
- 'formats':[np.int, np.int]})
+ 'formats':[int, int]})
>>> arr = np.array(arr, dtype=dtype)
>>> arr
array([(1, 0), (0, 1), (1, 0), (1, 0)],
@@ -1656,6 +1613,11 @@ def make_mask(m, copy=False, shrink=True, dtype=MaskType):
# Make sure the input dtype is valid.
dtype = make_mask_descr(dtype)
+
+ # legacy boolean special case: "existence of fields implies true"
+ if isinstance(m, ndarray) and m.dtype.fields and dtype == np.bool_:
+ return np.ones(m.shape, dtype=dtype)
+
# Fill the mask in case there are missing data; turn it into an ndarray.
result = np.array(filled(m, True), copy=copy, dtype=dtype, subok=True)
# Bas les masques !
@@ -1700,7 +1662,7 @@ def make_mask_none(newshape, dtype=None):
Defining a more complex dtype.
>>> dtype = np.dtype({'names':['foo', 'bar'],
- 'formats':[np.float32, np.int]})
+ 'formats':[np.float32, int]})
>>> dtype
dtype([('foo', '<f4'), ('bar', '<i4')])
>>> ma.make_mask_none((3,), dtype=dtype)
@@ -1798,7 +1760,7 @@ def flatten_mask(mask):
Examples
--------
- >>> mask = np.array([0, 0, 1], dtype=np.bool)
+ >>> mask = np.array([0, 0, 1], dtype=bool)
>>> flatten_mask(mask)
array([False, False, True], dtype=bool)
@@ -2366,7 +2328,7 @@ def masked_invalid(a, copy=True):
Examples
--------
>>> import numpy.ma as ma
- >>> a = np.arange(5, dtype=np.float)
+ >>> a = np.arange(5, dtype=float)
>>> a[2] = np.NaN
>>> a[3] = np.PINF
>>> a
@@ -2397,7 +2359,7 @@ def masked_invalid(a, copy=True):
###############################################################################
-class _MaskedPrintOption:
+class _MaskedPrintOption(object):
"""
Handle the string used to represent missing data in a masked array.
@@ -2598,14 +2560,11 @@ def _arraymethod(funcname, onmask=True):
result = result.view(type(self))
result._update_from(self)
mask = self._mask
- if result.ndim:
- if not onmask:
- result.__setmask__(mask)
- elif mask is not nomask:
- result.__setmask__(getattr(mask, funcname)(*args, **params))
- else:
- if mask.ndim and (not mask.dtype.names and mask.all()):
- return masked
+ if not onmask:
+ result.__setmask__(mask)
+ elif mask is not nomask:
+ # __setmask__ makes a copy, which we don't want
+ result._mask = getattr(mask, funcname)(*args, **params)
return result
methdoc = getattr(ndarray, funcname, None) or getattr(np, funcname, None)
if methdoc is not None:
@@ -2935,7 +2894,7 @@ class MaskedArray(ndarray):
Copies some attributes of obj to self.
"""
- if obj is not None and isinstance(obj, ndarray):
+ if isinstance(obj, ndarray):
_baseclass = type(obj)
else:
_baseclass = ndarray
@@ -3191,16 +3150,16 @@ class MaskedArray(ndarray):
"""
newtype = np.dtype(newtype)
+ newmasktype = make_mask_descr(newtype)
+
output = self._data.astype(newtype).view(type(self))
output._update_from(self)
- names = output.dtype.names
- if names is None:
- output._mask = self._mask.astype(bool)
+
+ if self._mask is nomask:
+ output._mask = nomask
else:
- if self._mask is nomask:
- output._mask = nomask
- else:
- output._mask = self._mask.astype([(n, bool) for n in names])
+ output._mask = self._mask.astype(newmasktype)
+
# Don't check _fill_value if it's None, that'll speed things up
if self._fill_value is not None:
output._fill_value = _check_fill_value(self._fill_value, newtype)
@@ -3357,8 +3316,6 @@ class MaskedArray(ndarray):
_mask[indx] = tuple([True] * nbfields)
else:
_mask[indx] = True
- if not self._isfield:
- self._sharedmask = False
return
# Get the _data part of the new value
@@ -3374,27 +3331,6 @@ class MaskedArray(ndarray):
_mask = self._mask = make_mask_none(self.shape, _dtype)
_mask[indx] = mval
elif not self._hardmask:
- # Unshare the mask if necessary to avoid propagation
- # We want to remove the unshare logic from this place in the
- # future. Note that _sharedmask has lots of false positives.
- if not self._isfield:
- notthree = getattr(sys, 'getrefcount', False) and (sys.getrefcount(_mask) != 3)
- if self._sharedmask and not (
- # If no one else holds a reference (we have two
- # references (_mask and self._mask) -- add one for
- # getrefcount) and the array owns its own data
- # copying the mask should do nothing.
- (not notthree) and _mask.flags.owndata):
- # 2016.01.15 -- v1.11.0
- warnings.warn(
- "setting an item on a masked array which has a shared "
- "mask will not copy the mask and also change the "
- "original mask array in the future.\n"
- "Check the NumPy 1.11 release notes for more "
- "information.",
- MaskedArrayFutureWarning, stacklevel=2)
- self.unshare_mask()
- _mask = self._mask
# Set the data, then the mask
_data[indx] = dval
_mask[indx] = mval
@@ -4022,6 +3958,7 @@ class MaskedArray(ndarray):
mask = np.broadcast_to(mask, check.shape).copy()
check = check.view(type(self))
+ check._update_from(self)
check._mask = mask
return check
@@ -4475,8 +4412,6 @@ class MaskedArray(ndarray):
return (~m).sum(axis=axis, dtype=np.intp, **kwargs)
- flatten = _arraymethod('flatten')
-
def ravel(self, order='C'):
"""
Returns a 1D version of self, as a view.
@@ -4522,8 +4457,6 @@ class MaskedArray(ndarray):
r._mask = nomask
return r
- repeat = _arraymethod('repeat')
-
def reshape(self, *s, **kwargs):
"""
@@ -4659,7 +4592,7 @@ class MaskedArray(ndarray):
if self._mask is nomask and getmask(values) is nomask:
return
- m = getmaskarray(self).copy()
+ m = getmaskarray(self)
if getmask(values) is nomask:
m.put(indices, False, mode=mode)
@@ -5810,14 +5743,15 @@ class MaskedArray(ndarray):
return out[()]
# Array methods
- copy = _arraymethod('copy')
- diagonal = _arraymethod('diagonal')
- transpose = _arraymethod('transpose')
- T = property(fget=lambda self: self.transpose())
- swapaxes = _arraymethod('swapaxes')
clip = _arraymethod('clip', onmask=False)
copy = _arraymethod('copy')
+ diagonal = _arraymethod('diagonal')
+ flatten = _arraymethod('flatten')
+ repeat = _arraymethod('repeat')
squeeze = _arraymethod('squeeze')
+ swapaxes = _arraymethod('swapaxes')
+ T = property(fget=lambda self: self.transpose())
+ transpose = _arraymethod('transpose')
def tolist(self, fill_value=None):
"""
@@ -6358,7 +6292,7 @@ def is_masked(x):
##############################################################################
-class _extrema_operation(object):
+class _extrema_operation(_MaskedUFunc):
"""
Generic class for maximum/minimum functions.
@@ -6368,11 +6302,9 @@ class _extrema_operation(object):
"""
def __init__(self, ufunc, compare, fill_value):
- self.ufunc = ufunc
+ super(_extrema_operation, self).__init__(ufunc)
self.compare = compare
self.fill_value_func = fill_value
- self.__doc__ = ufunc.__doc__
- self.__name__ = ufunc.__name__
def __call__(self, a, b=None):
"Executes the call behavior."
@@ -6407,11 +6339,11 @@ class _extrema_operation(object):
kwargs = dict()
if m is nomask:
- t = self.ufunc.reduce(target, **kwargs)
+ t = self.f.reduce(target, **kwargs)
else:
target = target.filled(
self.fill_value_func(target)).view(type(target))
- t = self.ufunc.reduce(target, **kwargs)
+ t = self.f.reduce(target, **kwargs)
m = umath.logical_and.reduce(m, **kwargs)
if hasattr(t, '_mask'):
t._mask = m
@@ -6429,7 +6361,7 @@ class _extrema_operation(object):
ma = getmaskarray(a)
mb = getmaskarray(b)
m = logical_or.outer(ma, mb)
- result = self.ufunc.outer(filled(a), filled(b))
+ result = self.f.outer(filled(a), filled(b))
if not isinstance(result, MaskedArray):
result = result.view(MaskedArray)
result._mask = m
@@ -6479,7 +6411,7 @@ ptp.__doc__ = MaskedArray.ptp.__doc__
##############################################################################
-class _frommethod:
+class _frommethod(object):
"""
Define functions from existing MaskedArray methods.
@@ -7295,7 +7227,7 @@ def mask_rowcols(a, axis=None):
Examples
--------
>>> import numpy.ma as ma
- >>> a = np.zeros((3, 3), dtype=np.int)
+ >>> a = np.zeros((3, 3), dtype=int)
>>> a[1, 1] = 1
>>> a
array([[0, 0, 0],
@@ -7476,8 +7408,8 @@ def _convolve_or_correlate(f, a, v, mode, propagate_mask):
if propagate_mask:
# results which are contributed to by either item in any pair being invalid
mask = (
- f(getmaskarray(a), np.ones(np.shape(v), dtype=np.bool), mode=mode)
- | f(np.ones(np.shape(a), dtype=np.bool), getmaskarray(v), mode=mode)
+ f(getmaskarray(a), np.ones(np.shape(v), dtype=bool), mode=mode)
+ | f(np.ones(np.shape(a), dtype=bool), getmaskarray(v), mode=mode)
)
data = f(getdata(a), getdata(v), mode=mode)
else:
@@ -7957,7 +7889,7 @@ def fromflex(fxarray):
return masked_array(fxarray['_data'], mask=fxarray['_mask'])
-class _convert2ma:
+class _convert2ma(object):
"""
Convert functions from numpy to numpy.ma.
diff --git a/numpy/ma/extras.py b/numpy/ma/extras.py
index d8ea3de8c..323fbce38 100644
--- a/numpy/ma/extras.py
+++ b/numpy/ma/extras.py
@@ -215,7 +215,7 @@ def masked_all_like(arr):
#####--------------------------------------------------------------------------
#---- --- Standard functions ---
#####--------------------------------------------------------------------------
-class _fromnxfunction:
+class _fromnxfunction(object):
"""
Defines a wrapper to adapt NumPy functions to masked arrays.
@@ -778,7 +778,7 @@ def _median(a, axis=None, out=None, overwrite_input=False):
# not necessary for scalar True/False masks
try:
np.copyto(low.mask, high.mask, where=odd)
- except:
+ except Exception:
pass
if np.issubdtype(asorted.dtype, np.inexact):
@@ -939,7 +939,7 @@ def mask_rows(a, axis=None):
Examples
--------
>>> import numpy.ma as ma
- >>> a = np.zeros((3, 3), dtype=np.int)
+ >>> a = np.zeros((3, 3), dtype=int)
>>> a[1, 1] = 1
>>> a
array([[0, 0, 0],
@@ -984,7 +984,7 @@ def mask_cols(a, axis=None):
Examples
--------
>>> import numpy.ma as ma
- >>> a = np.zeros((3, 3), dtype=np.int)
+ >>> a = np.zeros((3, 3), dtype=int)
>>> a[1, 1] = 1
>>> a
array([[0, 0, 0],
diff --git a/numpy/ma/mrecords.py b/numpy/ma/mrecords.py
index ef5f5fd53..90a5141b3 100644
--- a/numpy/ma/mrecords.py
+++ b/numpy/ma/mrecords.py
@@ -243,7 +243,7 @@ class MaskedRecords(MaskedArray, object):
except IndexError:
# Couldn't find a mask: use the default (nomask)
pass
- hasmasked = _mask.view((np.bool, (len(_mask.dtype) or 1))).any()
+ hasmasked = _mask.view((bool, (len(_mask.dtype) or 1))).any()
if (obj.shape or hasmasked):
obj = obj.view(MaskedArray)
obj._baseclass = ndarray
@@ -276,7 +276,7 @@ class MaskedRecords(MaskedArray, object):
try:
# Is attr a generic attribute ?
ret = object.__setattr__(self, attr, val)
- except:
+ except Exception:
# Not a generic attribute: exit if it's not a valid field
fielddict = ndarray.__getattribute__(self, 'dtype').fields or {}
optinfo = ndarray.__getattribute__(self, '_optinfo') or {}
@@ -294,7 +294,7 @@ class MaskedRecords(MaskedArray, object):
# internal attribute.
try:
object.__delattr__(self, attr)
- except:
+ except Exception:
return ret
# Let's try to set the field
try:
diff --git a/numpy/ma/tests/__init__.py b/numpy/ma/tests/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/numpy/ma/tests/__init__.py
diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py
index c2b8d1403..6aa8f3e08 100644
--- a/numpy/ma/tests/test_core.py
+++ b/numpy/ma/tests/test_core.py
@@ -20,7 +20,8 @@ import numpy.ma.core
import numpy.core.fromnumeric as fromnumeric
import numpy.core.umath as umath
from numpy.testing import (
- TestCase, run_module_suite, assert_raises, assert_warns, suppress_warnings)
+ run_module_suite, assert_raises, assert_warns, suppress_warnings
+ )
from numpy import ndarray
from numpy.compat import asbytes, asbytes_nested
from numpy.ma.testutils import (
@@ -55,10 +56,10 @@ suppress_copy_mask_on_assignment.filter(
"setting an item on a masked array which has a shared mask will not copy")
-class TestMaskedArray(TestCase):
+class TestMaskedArray(object):
# Base test class for MaskedArrays.
- def setUp(self):
+ def setup(self):
# Base data definition.
x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.])
y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.])
@@ -93,14 +94,14 @@ class TestMaskedArray(TestCase):
x = masked_array(0, mask=False)
assert_equal(str(x), '0')
x = array(0, mask=1)
- self.assertTrue(x.filled().dtype is x._data.dtype)
+ assert_(x.filled().dtype is x._data.dtype)
def test_basic1d(self):
# Test of basic array creation and properties in 1 dimension.
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
- self.assertTrue(not isMaskedArray(x))
- self.assertTrue(isMaskedArray(xm))
- self.assertTrue((xm - ym).filled(0).any())
+ assert_(not isMaskedArray(x))
+ assert_(isMaskedArray(xm))
+ assert_((xm - ym).filled(0).any())
fail_if_equal(xm.mask.astype(int), ym.mask.astype(int))
s = x.shape
assert_equal(np.shape(xm), s)
@@ -123,8 +124,8 @@ class TestMaskedArray(TestCase):
ym.shape = s
xf.shape = s
- self.assertTrue(not isMaskedArray(x))
- self.assertTrue(isMaskedArray(xm))
+ assert_(not isMaskedArray(x))
+ assert_(isMaskedArray(xm))
assert_equal(shape(xm), s)
assert_equal(xm.shape, s)
assert_equal(xm.size, reduce(lambda x, y:x * y, s))
@@ -217,7 +218,7 @@ class TestMaskedArray(TestCase):
x.mask = nomask
data = array((x, x[::-1]))
assert_equal(data, [[0, 1, 2, 3, 4], [4, 3, 2, 1, 0]])
- self.assertTrue(data.mask is nomask)
+ assert_(data.mask is nomask)
def test_creation_from_ndarray_with_padding(self):
x = np.array([('A', 0)], dtype={'names':['f0','f1'],
@@ -238,18 +239,18 @@ class TestMaskedArray(TestCase):
def test_asarray_default_order(self):
# See Issue #6646
m = np.eye(3).T
- self.assertFalse(m.flags.c_contiguous)
+ assert_(not m.flags.c_contiguous)
new_m = asarray(m)
- self.assertTrue(new_m.flags.c_contiguous)
+ assert_(new_m.flags.c_contiguous)
def test_asarray_enforce_order(self):
# See Issue #6646
m = np.eye(3).T
- self.assertFalse(m.flags.c_contiguous)
+ assert_(not m.flags.c_contiguous)
new_m = asarray(m, order='C')
- self.assertTrue(new_m.flags.c_contiguous)
+ assert_(new_m.flags.c_contiguous)
def test_fix_invalid(self):
# Checks fix_invalid.
@@ -263,8 +264,8 @@ class TestMaskedArray(TestCase):
# Test of masked element
x = arange(6)
x[1] = masked
- self.assertTrue(str(masked) == '--')
- self.assertTrue(x[1] is masked)
+ assert_(str(masked) == '--')
+ assert_(x[1] is masked)
assert_equal(filled(x[1], 0), 0)
def test_set_element_as_object(self):
@@ -273,12 +274,12 @@ class TestMaskedArray(TestCase):
x = (1, 2, 3, 4, 5)
a[0] = x
assert_equal(a[0], x)
- self.assertTrue(a[0] is x)
+ assert_(a[0] is x)
import datetime
dt = datetime.datetime.now()
a[0] = dt
- self.assertTrue(a[0] is dt)
+ assert_(a[0] is dt)
def test_indexing(self):
# Tests conversions and indexing
@@ -379,32 +380,43 @@ class TestMaskedArray(TestCase):
n = [0, 0, 1, 0, 0]
m = make_mask(n)
m2 = make_mask(m)
- self.assertTrue(m is m2)
+ assert_(m is m2)
m3 = make_mask(m, copy=1)
- self.assertTrue(m is not m3)
+ assert_(m is not m3)
x1 = np.arange(5)
y1 = array(x1, mask=m)
assert_equal(y1._data.__array_interface__, x1.__array_interface__)
- self.assertTrue(allequal(x1, y1.data))
+ assert_(allequal(x1, y1.data))
assert_equal(y1._mask.__array_interface__, m.__array_interface__)
y1a = array(y1)
- self.assertTrue(y1a._data.__array_interface__ ==
+ assert_(y1a._data.__array_interface__ ==
y1._data.__array_interface__)
- self.assertTrue(y1a.mask is y1.mask)
+ assert_(y1a.mask is y1.mask)
- y2 = array(x1, mask=m)
- self.assertTrue(y2._data.__array_interface__ == x1.__array_interface__)
- self.assertTrue(y2._mask.__array_interface__ == m.__array_interface__)
- self.assertTrue(y2[2] is masked)
+ y2 = array(x1, mask=m3)
+ assert_(y2._data.__array_interface__ == x1.__array_interface__)
+ assert_(y2._mask.__array_interface__ == m3.__array_interface__)
+ assert_(y2[2] is masked)
y2[2] = 9
- self.assertTrue(y2[2] is not masked)
- self.assertTrue(y2._mask.__array_interface__ != m.__array_interface__)
- self.assertTrue(allequal(y2.mask, 0))
+ assert_(y2[2] is not masked)
+ assert_(y2._mask.__array_interface__ == m3.__array_interface__)
+ assert_(allequal(y2.mask, 0))
+
+ y2a = array(x1, mask=m, copy=1)
+ assert_(y2a._data.__array_interface__ != x1.__array_interface__)
+ #assert_( y2a.mask is not m)
+ assert_(y2a._mask.__array_interface__ != m.__array_interface__)
+ assert_(y2a[2] is masked)
+ y2a[2] = 9
+ assert_(y2a[2] is not masked)
+ #assert_( y2a.mask is not m)
+ assert_(y2a._mask.__array_interface__ != m.__array_interface__)
+ assert_(allequal(y2a.mask, 0))
y3 = array(x1 * 1.0, mask=m)
- self.assertTrue(filled(y3).dtype is (x1 * 1.0).dtype)
+ assert_(filled(y3).dtype is (x1 * 1.0).dtype)
x4 = arange(4)
x4[2] = masked
@@ -433,10 +445,16 @@ class TestMaskedArray(TestCase):
assert_not_equal(y._data.ctypes.data, x._data.ctypes.data)
assert_not_equal(y._mask.ctypes.data, x._mask.ctypes.data)
+ def test_copy_0d(self):
+ # gh-9430
+ x = np.ma.array(43, mask=True)
+ xc = x.copy()
+ assert_equal(xc.mask, True)
+
def test_copy_on_python_builtins(self):
# Tests copy works on python builtins (issue#8019)
- self.assertTrue(isMaskedArray(np.ma.copy([1,2,3])))
- self.assertTrue(isMaskedArray(np.ma.copy((1,2,3))))
+ assert_(isMaskedArray(np.ma.copy([1,2,3])))
+ assert_(isMaskedArray(np.ma.copy((1,2,3))))
def test_copy_immutable(self):
# Tests that the copy method is immutable, GitHub issue #5247
@@ -506,7 +524,7 @@ class TestMaskedArray(TestCase):
a_pickled = pickle.loads(a.dumps())
assert_equal(a_pickled._mask, a._mask)
assert_equal(a_pickled, a)
- self.assertTrue(isinstance(a_pickled._data, np.matrix))
+ assert_(isinstance(a_pickled._data, np.matrix))
def test_pickling_maskedconstant(self):
# Test pickling MaskedConstant
@@ -546,19 +564,19 @@ class TestMaskedArray(TestCase):
assert_equal(1.0, float(array(1)))
assert_equal(1, int(array([[[1]]])))
assert_equal(1.0, float(array([[1]])))
- self.assertRaises(TypeError, float, array([1, 1]))
+ assert_raises(TypeError, float, array([1, 1]))
with suppress_warnings() as sup:
sup.filter(UserWarning, 'Warning: converting a masked element')
assert_(np.isnan(float(array([1], mask=[1]))))
a = array([1, 2, 3], mask=[1, 0, 0])
- self.assertRaises(TypeError, lambda: float(a))
+ assert_raises(TypeError, lambda: float(a))
assert_equal(float(a[-1]), 3.)
- self.assertTrue(np.isnan(float(a[0])))
- self.assertRaises(TypeError, int, a)
+ assert_(np.isnan(float(a[0])))
+ assert_raises(TypeError, int, a)
assert_equal(int(a[-1]), 3)
- self.assertRaises(MAError, lambda:int(a[0]))
+ assert_raises(MAError, lambda:int(a[0]))
def test_oddfeatures_1(self):
# Test of other odd features
@@ -667,8 +685,8 @@ class TestMaskedArray(TestCase):
a = array(np.array([(0, 1, 2), (4, 5, 6)], order='F'),
mask=np.array([(0, 0, 1), (1, 0, 0)], order='F'),
order='F') # this is currently ignored
- self.assertTrue(a.flags['F_CONTIGUOUS'])
- self.assertTrue(a.filled(0).flags['F_CONTIGUOUS'])
+ assert_(a.flags['F_CONTIGUOUS'])
+ assert_(a.filled(0).flags['F_CONTIGUOUS'])
def test_optinfo_propagation(self):
# Checks that _optinfo dictionary isn't back-propagated
@@ -679,6 +697,25 @@ class TestMaskedArray(TestCase):
y._optinfo['info'] = '!!!'
assert_equal(x._optinfo['info'], '???')
+ def test_optinfo_forward_propagation(self):
+ a = array([1,2,2,4])
+ a._optinfo["key"] = "value"
+ assert_equal(a._optinfo["key"], (a == 2)._optinfo["key"])
+ assert_equal(a._optinfo["key"], (a != 2)._optinfo["key"])
+ assert_equal(a._optinfo["key"], (a > 2)._optinfo["key"])
+ assert_equal(a._optinfo["key"], (a >= 2)._optinfo["key"])
+ assert_equal(a._optinfo["key"], (a <= 2)._optinfo["key"])
+ assert_equal(a._optinfo["key"], (a + 2)._optinfo["key"])
+ assert_equal(a._optinfo["key"], (a - 2)._optinfo["key"])
+ assert_equal(a._optinfo["key"], (a * 2)._optinfo["key"])
+ assert_equal(a._optinfo["key"], (a / 2)._optinfo["key"])
+ assert_equal(a._optinfo["key"], a[:2]._optinfo["key"])
+ assert_equal(a._optinfo["key"], a[[0,0,2]]._optinfo["key"])
+ assert_equal(a._optinfo["key"], np.exp(a)._optinfo["key"])
+ assert_equal(a._optinfo["key"], np.abs(a)._optinfo["key"])
+ assert_equal(a._optinfo["key"], array(a, copy=True)._optinfo["key"])
+ assert_equal(a._optinfo["key"], np.zeros_like(a)._optinfo["key"])
+
def test_fancy_printoptions(self):
# Test printing a masked array w/ fancy dtype.
fancydtype = np.dtype([('x', int), ('y', [('t', int), ('s', float)])])
@@ -706,14 +743,14 @@ class TestMaskedArray(TestCase):
ndtype = [('a', int), ('b', float)]
a = np.array([(1, 1), (2, 2)], dtype=ndtype)
test = flatten_structured_array(a)
- control = np.array([[1., 1.], [2., 2.]], dtype=np.float)
+ control = np.array([[1., 1.], [2., 2.]], dtype=float)
assert_equal(test, control)
assert_equal(test.dtype, control.dtype)
# On masked_array
a = array([(1, 1), (2, 2)], mask=[(0, 1), (1, 0)], dtype=ndtype)
test = flatten_structured_array(a)
control = array([[1., 1.], [2., 2.]],
- mask=[[0, 1], [1, 0]], dtype=np.float)
+ mask=[[0, 1], [1, 0]], dtype=float)
assert_equal(test, control)
assert_equal(test.dtype, control.dtype)
assert_equal(test.mask, control.mask)
@@ -723,7 +760,7 @@ class TestMaskedArray(TestCase):
mask=[(0, (1, 0)), (1, (0, 1))], dtype=ndtype)
test = flatten_structured_array(a)
control = array([[1., 1., 1.1], [2., 2., 2.2]],
- mask=[[0, 1, 0], [1, 0, 1]], dtype=np.float)
+ mask=[[0, 1, 0], [1, 0, 1]], dtype=float)
assert_equal(test, control)
assert_equal(test.dtype, control.dtype)
assert_equal(test.mask, control.mask)
@@ -731,7 +768,7 @@ class TestMaskedArray(TestCase):
ndtype = [('a', int), ('b', float)]
a = np.array([[(1, 1), ], [(2, 2), ]], dtype=ndtype)
test = flatten_structured_array(a)
- control = np.array([[[1., 1.], ], [[2., 2.], ]], dtype=np.float)
+ control = np.array([[[1., 1.], ], [[2., 2.], ]], dtype=float)
assert_equal(test, control)
assert_equal(test.dtype, control.dtype)
@@ -756,14 +793,14 @@ class TestMaskedArray(TestCase):
dtype=ndtype)
# w/o mask
f = a[0]
- self.assertTrue(isinstance(f, mvoid))
+ assert_(isinstance(f, mvoid))
assert_equal((f[0], f['a']), (1, 1))
assert_equal(f['b'], 2)
# w/ mask
f = a[1]
- self.assertTrue(isinstance(f, mvoid))
- self.assertTrue(f[0] is masked)
- self.assertTrue(f['a'] is masked)
+ assert_(isinstance(f, mvoid))
+ assert_(f[0] is masked)
+ assert_(f['a'] is masked)
assert_equal(f[1], 4)
# exotic dtype
@@ -850,10 +887,10 @@ class TestMaskedArray(TestCase):
assert_(mx2[0] == 0.)
-class TestMaskedArrayArithmetic(TestCase):
+class TestMaskedArrayArithmetic(object):
# Base test class for MaskedArrays.
- def setUp(self):
+ def setup(self):
# Base data definition.
x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.])
y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.])
@@ -870,7 +907,7 @@ class TestMaskedArrayArithmetic(TestCase):
self.err_status = np.geterr()
np.seterr(divide='ignore', invalid='ignore')
- def tearDown(self):
+ def teardown(self):
np.seterr(**self.err_status)
def test_basic_arithmetic(self):
@@ -930,8 +967,8 @@ class TestMaskedArrayArithmetic(TestCase):
# Tests mixed arithmetics.
na = np.array([1])
ma = array([1])
- self.assertTrue(isinstance(na + ma, MaskedArray))
- self.assertTrue(isinstance(ma + na, MaskedArray))
+ assert_(isinstance(na + ma, MaskedArray))
+ assert_(isinstance(ma + na, MaskedArray))
def test_limits_arithmetic(self):
tiny = np.finfo(float).tiny
@@ -943,11 +980,11 @@ class TestMaskedArrayArithmetic(TestCase):
# Tests some scalar arithmetics on MaskedArrays.
# Masked singleton should remain masked no matter what
xm = array(0, mask=1)
- self.assertTrue((1 / array(0)).mask)
- self.assertTrue((1 + xm).mask)
- self.assertTrue((-xm).mask)
- self.assertTrue(maximum(xm, xm).mask)
- self.assertTrue(minimum(xm, xm).mask)
+ assert_((1 / array(0)).mask)
+ assert_((1 + xm).mask)
+ assert_((-xm).mask)
+ assert_(maximum(xm, xm).mask)
+ assert_(minimum(xm, xm).mask)
def test_masked_singleton_equality(self):
# Tests (in)equality on masked singleton
@@ -1019,7 +1056,7 @@ class TestMaskedArrayArithmetic(TestCase):
ott = array([0., 1., 2., 3.], mask=[1, 0, 0, 0])
res = count(ott)
- self.assertTrue(res.dtype.type is np.intp)
+ assert_(res.dtype.type is np.intp)
assert_equal(3, res)
ott = ott.reshape((2, 2))
@@ -1070,19 +1107,19 @@ class TestMaskedArrayArithmetic(TestCase):
def test_minimummaximum_func(self):
a = np.ones((2, 2))
aminimum = minimum(a, a)
- self.assertTrue(isinstance(aminimum, MaskedArray))
+ assert_(isinstance(aminimum, MaskedArray))
assert_equal(aminimum, np.minimum(a, a))
aminimum = minimum.outer(a, a)
- self.assertTrue(isinstance(aminimum, MaskedArray))
+ assert_(isinstance(aminimum, MaskedArray))
assert_equal(aminimum, np.minimum.outer(a, a))
amaximum = maximum(a, a)
- self.assertTrue(isinstance(amaximum, MaskedArray))
+ assert_(isinstance(amaximum, MaskedArray))
assert_equal(amaximum, np.maximum(a, a))
amaximum = maximum.outer(a, a)
- self.assertTrue(isinstance(amaximum, MaskedArray))
+ assert_(isinstance(amaximum, MaskedArray))
assert_equal(amaximum, np.maximum.outer(a, a))
def test_minmax_reduce(self):
@@ -1108,33 +1145,33 @@ class TestMaskedArrayArithmetic(TestCase):
pass
nout = np.empty((4,), dtype=float)
result = npfunc(xm, axis=0, out=nout)
- self.assertTrue(result is nout)
+ assert_(result is nout)
# Use the ma version
nout.fill(-999)
result = mafunc(xm, axis=0, out=nout)
- self.assertTrue(result is nout)
+ assert_(result is nout)
def test_minmax_methods(self):
# Additional tests on max/min
(_, _, _, _, _, xm, _, _, _, _) = self.d
xm.shape = (xm.size,)
assert_equal(xm.max(), 10)
- self.assertTrue(xm[0].max() is masked)
- self.assertTrue(xm[0].max(0) is masked)
- self.assertTrue(xm[0].max(-1) is masked)
+ assert_(xm[0].max() is masked)
+ assert_(xm[0].max(0) is masked)
+ assert_(xm[0].max(-1) is masked)
assert_equal(xm.min(), -10.)
- self.assertTrue(xm[0].min() is masked)
- self.assertTrue(xm[0].min(0) is masked)
- self.assertTrue(xm[0].min(-1) is masked)
+ assert_(xm[0].min() is masked)
+ assert_(xm[0].min(0) is masked)
+ assert_(xm[0].min(-1) is masked)
assert_equal(xm.ptp(), 20.)
- self.assertTrue(xm[0].ptp() is masked)
- self.assertTrue(xm[0].ptp(0) is masked)
- self.assertTrue(xm[0].ptp(-1) is masked)
+ assert_(xm[0].ptp() is masked)
+ assert_(xm[0].ptp(0) is masked)
+ assert_(xm[0].ptp(-1) is masked)
x = array([1, 2, 3], mask=True)
- self.assertTrue(x.min() is masked)
- self.assertTrue(x.max() is masked)
- self.assertTrue(x.ptp() is masked)
+ assert_(x.min() is masked)
+ assert_(x.max() is masked)
+ assert_(x.ptp() is masked)
def test_addsumprod(self):
# Tests add, sum, product.
@@ -1491,7 +1528,7 @@ class TestMaskedArrayArithmetic(TestCase):
assert_equal(a.mask, [0, 0, 0, 0, 1])
-class TestMaskedArrayAttributes(TestCase):
+class TestMaskedArrayAttributes(object):
def test_keepmask(self):
# Tests the keep mask flag
@@ -1519,8 +1556,8 @@ class TestMaskedArrayAttributes(TestCase):
assert_equal(xh._data, [0, 10, 2, 3, 4])
assert_equal(xs._data, [0, 10, 2, 3, 40])
assert_equal(xs.mask, [0, 0, 0, 1, 0])
- self.assertTrue(xh._hardmask)
- self.assertTrue(not xs._hardmask)
+ assert_(xh._hardmask)
+ assert_(not xs._hardmask)
xh[1:4] = [10, 20, 30]
xs[1:4] = [10, 20, 30]
assert_equal(xh._data, [0, 10, 20, 3, 4])
@@ -1610,7 +1647,7 @@ class TestMaskedArrayAttributes(TestCase):
test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1])
assert_equal(test.flat[1], 2)
assert_equal(test.flat[2], masked)
- self.assertTrue(np.all(test.flat[0:2] == test[0, 0:2]))
+ assert_(np.all(test.flat[0:2] == test[0, 0:2]))
# Test flat on masked_matrices
test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1])
test.flat = masked_array([3, 2, 1], mask=[1, 0, 0])
@@ -1684,7 +1721,7 @@ class TestMaskedArrayAttributes(TestCase):
assert_equal(m._mask, np.ma.nomask)
-class TestFillingValues(TestCase):
+class TestFillingValues(object):
def test_check_on_scalar(self):
# Test _check_fill_value set to valid and invalid values
@@ -1699,8 +1736,8 @@ class TestFillingValues(TestCase):
assert_equal(fval, b"0")
fval = _check_fill_value(None, "|S3")
assert_equal(fval, default_fill_value(b"camelot!"))
- self.assertRaises(TypeError, _check_fill_value, 1e+20, int)
- self.assertRaises(TypeError, _check_fill_value, 'stuff', int)
+ assert_raises(TypeError, _check_fill_value, 1e+20, int)
+ assert_raises(TypeError, _check_fill_value, 'stuff', int)
def test_check_on_fields(self):
# Tests _check_fill_value with records
@@ -1708,49 +1745,45 @@ class TestFillingValues(TestCase):
ndtype = [('a', int), ('b', float), ('c', "|S3")]
# A check on a list should return a single record
fval = _check_fill_value([-999, -12345678.9, "???"], ndtype)
- self.assertTrue(isinstance(fval, ndarray))
+ assert_(isinstance(fval, ndarray))
assert_equal(fval.item(), [-999, -12345678.9, b"???"])
# A check on None should output the defaults
fval = _check_fill_value(None, ndtype)
- self.assertTrue(isinstance(fval, ndarray))
+ assert_(isinstance(fval, ndarray))
assert_equal(fval.item(), [default_fill_value(0),
default_fill_value(0.),
asbytes(default_fill_value("0"))])
#.....Using a structured type as fill_value should work
fill_val = np.array((-999, -12345678.9, "???"), dtype=ndtype)
fval = _check_fill_value(fill_val, ndtype)
- self.assertTrue(isinstance(fval, ndarray))
+ assert_(isinstance(fval, ndarray))
assert_equal(fval.item(), [-999, -12345678.9, b"???"])
#.....Using a flexible type w/ a different type shouldn't matter
- # BEHAVIOR in 1.5 and earlier: match structured types by position
- #fill_val = np.array((-999, -12345678.9, "???"),
- # dtype=[("A", int), ("B", float), ("C", "|S3")])
- # BEHAVIOR in 1.6 and later: match structured types by name
- fill_val = np.array(("???", -999, -12345678.9),
- dtype=[("c", "|S3"), ("a", int), ("b", float), ])
- # suppress deprecation warning in 1.12 (remove in 1.13)
- with assert_warns(FutureWarning):
- fval = _check_fill_value(fill_val, ndtype)
- self.assertTrue(isinstance(fval, ndarray))
+ # BEHAVIOR in 1.5 and earlier, and 1.13 and later: match structured
+ # types by position
+ fill_val = np.array((-999, -12345678.9, "???"),
+ dtype=[("A", int), ("B", float), ("C", "|S3")])
+ fval = _check_fill_value(fill_val, ndtype)
+ assert_(isinstance(fval, ndarray))
assert_equal(fval.item(), [-999, -12345678.9, b"???"])
#.....Using an object-array shouldn't matter either
fill_val = np.ndarray(shape=(1,), dtype=object)
fill_val[0] = (-999, -12345678.9, b"???")
fval = _check_fill_value(fill_val, object)
- self.assertTrue(isinstance(fval, ndarray))
+ assert_(isinstance(fval, ndarray))
assert_equal(fval.item(), [-999, -12345678.9, b"???"])
# NOTE: This test was never run properly as "fill_value" rather than
# "fill_val" was assigned. Written properly, it fails.
#fill_val = np.array((-999, -12345678.9, "???"))
#fval = _check_fill_value(fill_val, ndtype)
- #self.assertTrue(isinstance(fval, ndarray))
+ #assert_(isinstance(fval, ndarray))
#assert_equal(fval.item(), [-999, -12345678.9, b"???"])
#.....One-field-only flexible type should work as well
ndtype = [("a", int)]
fval = _check_fill_value(-999999999, ndtype)
- self.assertTrue(isinstance(fval, ndarray))
+ assert_(isinstance(fval, ndarray))
assert_equal(fval.item(), (-999999999,))
def test_fillvalue_conversion(self):
@@ -1777,6 +1810,31 @@ class TestFillingValues(TestCase):
assert_equal(b['a']._data, a._data)
assert_equal(b['a'].fill_value, a.fill_value)
+ def test_default_fill_value(self):
+ # check all calling conventions
+ f1 = default_fill_value(1.)
+ f2 = default_fill_value(np.array(1.))
+ f3 = default_fill_value(np.array(1.).dtype)
+ assert_equal(f1, f2)
+ assert_equal(f1, f3)
+
+ def test_default_fill_value_structured(self):
+ fields = array([(1, 1, 1)],
+ dtype=[('i', int), ('s', '|S8'), ('f', float)])
+
+ f1 = default_fill_value(fields)
+ f2 = default_fill_value(fields.dtype)
+ expected = np.array((default_fill_value(0),
+ default_fill_value('0'),
+ default_fill_value(0.)), dtype=fields.dtype)
+ assert_equal(f1, expected)
+ assert_equal(f2, expected)
+
+ def test_default_fill_value_void(self):
+ dt = np.dtype([('v', 'V7')])
+ f = default_fill_value(dt)
+ assert_equal(f['v'], np.array(default_fill_value(dt['v']), dt['v']))
+
def test_fillvalue(self):
# Yet more fun with the fill_value
data = masked_array([1, 2, 3], fill_value=-999)
@@ -1841,33 +1899,47 @@ class TestFillingValues(TestCase):
"h", "D", "W", "M", "Y"):
control = numpy.datetime64("NaT", timecode)
test = default_fill_value(numpy.dtype("<M8[" + timecode + "]"))
- np.testing.utils.assert_equal(test, control)
+ np.testing.assert_equal(test, control)
control = numpy.timedelta64("NaT", timecode)
test = default_fill_value(numpy.dtype("<m8[" + timecode + "]"))
- np.testing.utils.assert_equal(test, control)
+ np.testing.assert_equal(test, control)
def test_extremum_fill_value(self):
# Tests extremum fill values for flexible type.
a = array([(1, (2, 3)), (4, (5, 6))],
dtype=[('A', int), ('B', [('BA', int), ('BB', int)])])
test = a.fill_value
+ assert_equal(test.dtype, a.dtype)
assert_equal(test['A'], default_fill_value(a['A']))
assert_equal(test['B']['BA'], default_fill_value(a['B']['BA']))
assert_equal(test['B']['BB'], default_fill_value(a['B']['BB']))
test = minimum_fill_value(a)
+ assert_equal(test.dtype, a.dtype)
assert_equal(test[0], minimum_fill_value(a['A']))
assert_equal(test[1][0], minimum_fill_value(a['B']['BA']))
assert_equal(test[1][1], minimum_fill_value(a['B']['BB']))
assert_equal(test[1], minimum_fill_value(a['B']))
test = maximum_fill_value(a)
+ assert_equal(test.dtype, a.dtype)
assert_equal(test[0], maximum_fill_value(a['A']))
assert_equal(test[1][0], maximum_fill_value(a['B']['BA']))
assert_equal(test[1][1], maximum_fill_value(a['B']['BB']))
assert_equal(test[1], maximum_fill_value(a['B']))
+ def test_extremum_fill_value_subdtype(self):
+ a = array(([2, 3, 4],), dtype=[('value', np.int8, 3)])
+
+ test = minimum_fill_value(a)
+ assert_equal(test.dtype, a.dtype)
+ assert_equal(test[0], np.full(3, minimum_fill_value(a['value'])))
+
+ test = maximum_fill_value(a)
+ assert_equal(test.dtype, a.dtype)
+ assert_equal(test[0], np.full(3, maximum_fill_value(a['value'])))
+
def test_fillvalue_individual_fields(self):
# Test setting fill_value on individual fields
ndtype = [('a', int), ('b', int)]
@@ -1976,17 +2048,17 @@ class TestFillingValues(TestCase):
assert_equal(a["f1"].fill_value, default_fill_value("eggs"))
-class TestUfuncs(TestCase):
+class TestUfuncs(object):
# Test class for the application of ufuncs on MaskedArrays.
- def setUp(self):
+ def setup(self):
# Base data definition.
self.d = (array([1.0, 0, -1, pi / 2] * 2, mask=[0, 1] + [0] * 6),
array([1.0, 0, -1, pi / 2] * 2, mask=[1, 0] + [0] * 6),)
self.err_status = np.geterr()
np.seterr(divide='ignore', invalid='ignore')
- def tearDown(self):
+ def teardown(self):
np.seterr(**self.err_status)
def test_testUfuncRegression(self):
@@ -2022,8 +2094,8 @@ class TestUfuncs(TestCase):
def test_reduce(self):
# Tests reduce on MaskedArrays.
a = self.d[0]
- self.assertTrue(not alltrue(a, axis=0))
- self.assertTrue(sometrue(a, axis=0))
+ assert_(not alltrue(a, axis=0))
+ assert_(sometrue(a, axis=0))
assert_equal(sum(a[:3], axis=0), 0)
assert_equal(product(a, axis=0), 0)
assert_equal(add.reduce(a), pi)
@@ -2036,8 +2108,8 @@ class TestUfuncs(TestCase):
assert_equal(amask.min(), 5)
assert_equal(amask.max(0), a.max(0))
assert_equal(amask.min(0), [5, 6, 7, 8])
- self.assertTrue(amask.max(1)[0].mask)
- self.assertTrue(amask.min(1)[0].mask)
+ assert_(amask.max(1)[0].mask)
+ assert_(amask.min(1)[0].mask)
def test_ndarray_mask(self):
# Check that the mask of the result is a ndarray (not a MaskedArray...)
@@ -2047,14 +2119,14 @@ class TestUfuncs(TestCase):
mask=[1, 0, 0, 0, 1])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
- self.assertTrue(not isinstance(test.mask, MaskedArray))
+ assert_(not isinstance(test.mask, MaskedArray))
def test_treatment_of_NotImplemented(self):
# Check that NotImplemented is returned at appropriate places
a = masked_array([1., 2.], mask=[1, 0])
- self.assertRaises(TypeError, operator.mul, a, "abc")
- self.assertRaises(TypeError, operator.truediv, a, "abc")
+ assert_raises(TypeError, operator.mul, a, "abc")
+ assert_raises(TypeError, operator.truediv, a, "abc")
class MyClass(object):
__array_priority__ = a.__array_priority__ + 1
@@ -2120,10 +2192,10 @@ class TestUfuncs(TestCase):
# also check that allclose uses ma ufuncs, to avoid warning
allclose(m, 0.5)
-class TestMaskedArrayInPlaceArithmetics(TestCase):
+class TestMaskedArrayInPlaceArithmetics(object):
# Test MaskedArray Arithmetics
- def setUp(self):
+ def setup(self):
x = arange(10)
y = arange(10)
xm = arange(10)
@@ -2622,9 +2694,9 @@ class TestMaskedArrayInPlaceArithmetics(TestCase):
assert_equal(len(w), 0, "Failed on type=%s." % t)
-class TestMaskedArrayMethods(TestCase):
+class TestMaskedArrayMethods(object):
# Test class for miscellaneous MaskedArrays methods.
- def setUp(self):
+ def setup(self):
# Base data definition.
x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928,
8.43, 7.78, 9.865, 5.878, 8.979, 4.732,
@@ -2678,25 +2750,25 @@ class TestMaskedArrayMethods(TestCase):
# Tests allclose on arrays
a = np.random.rand(10)
b = a + np.random.rand(10) * 1e-8
- self.assertTrue(allclose(a, b))
+ assert_(allclose(a, b))
# Test allclose w/ infs
a[0] = np.inf
- self.assertTrue(not allclose(a, b))
+ assert_(not allclose(a, b))
b[0] = np.inf
- self.assertTrue(allclose(a, b))
+ assert_(allclose(a, b))
# Test allclose w/ masked
a = masked_array(a)
a[-1] = masked
- self.assertTrue(allclose(a, b, masked_equal=True))
- self.assertTrue(not allclose(a, b, masked_equal=False))
+ assert_(allclose(a, b, masked_equal=True))
+ assert_(not allclose(a, b, masked_equal=False))
# Test comparison w/ scalar
a *= 1e-8
a[0] = 0
- self.assertTrue(allclose(a, 0, masked_equal=True))
+ assert_(allclose(a, 0, masked_equal=True))
# Test that the function works for MIN_INT integer typed arrays
a = masked_array([np.iinfo(np.int_).min], dtype=np.int_)
- self.assertTrue(allclose(a, a))
+ assert_(allclose(a, a))
def test_allany(self):
# Checks the any/all methods/functions.
@@ -2710,15 +2782,15 @@ class TestMaskedArrayMethods(TestCase):
mxbig = (mx > 0.5)
mxsmall = (mx < 0.5)
- self.assertFalse(mxbig.all())
- self.assertTrue(mxbig.any())
+ assert_(not mxbig.all())
+ assert_(mxbig.any())
assert_equal(mxbig.all(0), [False, False, True])
assert_equal(mxbig.all(1), [False, False, True])
assert_equal(mxbig.any(0), [False, False, True])
assert_equal(mxbig.any(1), [True, True, True])
- self.assertFalse(mxsmall.all())
- self.assertTrue(mxsmall.any())
+ assert_(not mxsmall.all())
+ assert_(mxsmall.any())
assert_equal(mxsmall.all(0), [True, True, False])
assert_equal(mxsmall.all(1), [False, False, False])
assert_equal(mxsmall.any(0), [True, True, False])
@@ -2736,15 +2808,15 @@ class TestMaskedArrayMethods(TestCase):
mXbig = (mX > 0.5)
mXsmall = (mX < 0.5)
- self.assertFalse(mXbig.all())
- self.assertTrue(mXbig.any())
+ assert_(not mXbig.all())
+ assert_(mXbig.any())
assert_equal(mXbig.all(0), np.matrix([False, False, True]))
assert_equal(mXbig.all(1), np.matrix([False, False, True]).T)
assert_equal(mXbig.any(0), np.matrix([False, False, True]))
assert_equal(mXbig.any(1), np.matrix([True, True, True]).T)
- self.assertFalse(mXsmall.all())
- self.assertTrue(mXsmall.any())
+ assert_(not mXsmall.all())
+ assert_(mXsmall.any())
assert_equal(mXsmall.all(0), np.matrix([True, True, False]))
assert_equal(mXsmall.all(1), np.matrix([False, False, False]).T)
assert_equal(mXsmall.any(0), np.matrix([True, True, False]))
@@ -2755,18 +2827,18 @@ class TestMaskedArrayMethods(TestCase):
store = empty((), dtype=bool)
full = array([1, 2, 3], mask=True)
- self.assertTrue(full.all() is masked)
+ assert_(full.all() is masked)
full.all(out=store)
- self.assertTrue(store)
- self.assertTrue(store._mask, True)
- self.assertTrue(store is not masked)
+ assert_(store)
+ assert_(store._mask, True)
+ assert_(store is not masked)
store = empty((), dtype=bool)
- self.assertTrue(full.any() is masked)
+ assert_(full.any() is masked)
full.any(out=store)
- self.assertTrue(not store)
- self.assertTrue(store._mask, True)
- self.assertTrue(store is not masked)
+ assert_(not store)
+ assert_(store._mask, True)
+ assert_(store is not masked)
def test_argmax_argmin(self):
# Tests argmin & argmax on MaskedArrays.
@@ -2851,7 +2923,7 @@ class TestMaskedArrayMethods(TestCase):
a = array(np.matrix([1, 2, 3, 4]), mask=[0, 0, 0, 0])
b = a.compressed()
assert_equal(b, a)
- self.assertTrue(isinstance(b, np.matrix))
+ assert_(isinstance(b, np.matrix))
a[0, 0] = masked
b = a.compressed()
assert_equal(b, [[2, 3, 4]])
@@ -2885,11 +2957,11 @@ class TestMaskedArrayMethods(TestCase):
n = [0, 0, 0, 1, 1]
m = make_mask(n)
x = array(d, mask=m)
- self.assertTrue(x[3] is masked)
- self.assertTrue(x[4] is masked)
+ assert_(x[3] is masked)
+ assert_(x[4] is masked)
x[[1, 4]] = [10, 40]
- self.assertTrue(x[3] is masked)
- self.assertTrue(x[4] is not masked)
+ assert_(x[3] is masked)
+ assert_(x[4] is not masked)
assert_equal(x, [0, 10, 2, -1, 40])
x = masked_array(arange(10), mask=[1, 0, 0, 0, 0] * 2)
@@ -2915,12 +2987,12 @@ class TestMaskedArrayMethods(TestCase):
z = array([3., -1.], mask=[False, True])
x.put([1, 2], z)
- self.assertTrue(x[0] is not masked)
+ assert_(x[0] is not masked)
assert_equal(x[0], 0)
- self.assertTrue(x[1] is not masked)
+ assert_(x[1] is not masked)
assert_equal(x[1], 3)
- self.assertTrue(x[2] is masked)
- self.assertTrue(x[3] is not masked)
+ assert_(x[2] is masked)
+ assert_(x[3] is not masked)
assert_equal(x[3], 0)
def test_put_hardmask(self):
@@ -3021,7 +3093,7 @@ class TestMaskedArrayMethods(TestCase):
x = [1, 4, 2, 3]
sortedx = sort(x)
- self.assertTrue(not isinstance(sorted, MaskedArray))
+ assert_(not isinstance(sorted, MaskedArray))
x = array([0, 1, -1, -2, 2], mask=nomask, dtype=np.int8)
sortedx = sort(x, endwith=False)
@@ -3086,27 +3158,41 @@ class TestMaskedArrayMethods(TestCase):
assert_equal(am, an)
def test_sort_flexible(self):
- # Test sort on flexible dtype.
+ # Test sort on structured dtype.
a = array(
data=[(3, 3), (3, 2), (2, 2), (2, 1), (1, 0), (1, 1), (1, 2)],
mask=[(0, 0), (0, 1), (0, 0), (0, 0), (1, 0), (0, 0), (0, 0)],
dtype=[('A', int), ('B', int)])
-
- test = sort(a)
- b = array(
+ mask_last = array(
data=[(1, 1), (1, 2), (2, 1), (2, 2), (3, 3), (3, 2), (1, 0)],
mask=[(0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 1), (1, 0)],
dtype=[('A', int), ('B', int)])
- assert_equal(test, b)
- assert_equal(test.mask, b.mask)
+ mask_first = array(
+ data=[(1, 0), (1, 1), (1, 2), (2, 1), (2, 2), (3, 2), (3, 3)],
+ mask=[(1, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 1), (0, 0)],
+ dtype=[('A', int), ('B', int)])
+
+ test = sort(a)
+ assert_equal(test, mask_last)
+ assert_equal(test.mask, mask_last.mask)
test = sort(a, endwith=False)
- b = array(
- data=[(1, 0), (1, 1), (1, 2), (2, 1), (2, 2), (3, 2), (3, 3), ],
- mask=[(1, 0), (0, 0), (0, 0), (0, 0), (0, 0), (0, 1), (0, 0), ],
- dtype=[('A', int), ('B', int)])
- assert_equal(test, b)
- assert_equal(test.mask, b.mask)
+ assert_equal(test, mask_first)
+ assert_equal(test.mask, mask_first.mask)
+
+ # Test sort on dtype with subarray (gh-8069)
+ dt = np.dtype([('v', int, 2)])
+ a = a.view(dt)
+ mask_last = mask_last.view(dt)
+ mask_first = mask_first.view(dt)
+
+ test = sort(a)
+ assert_equal(test, mask_last)
+ assert_equal(test.mask, mask_last.mask)
+
+ test = sort(a, endwith=False)
+ assert_equal(test, mask_first)
+ assert_equal(test.mask, mask_first.mask)
def test_argsort(self):
# Test argsort
@@ -3120,8 +3206,21 @@ class TestMaskedArrayMethods(TestCase):
data = masked_array([[1, 2, 3]], mask=[[1, 1, 1]])
assert_equal(data.squeeze(), [1, 2, 3])
assert_equal(data.squeeze()._mask, [1, 1, 1])
- data = masked_array([[1]], mask=True)
- self.assertTrue(data.squeeze() is masked)
+
+ # normal ndarrays return a view
+ arr = np.array([[1]])
+ arr_sq = arr.squeeze()
+ assert_equal(arr_sq, 1)
+ arr_sq[...] = 2
+ assert_equal(arr[0,0], 2)
+
+ # so maskedarrays should too
+ m_arr = masked_array([[1]], mask=True)
+ m_arr_sq = m_arr.squeeze()
+ assert_(m_arr_sq is not np.ma.masked)
+ assert_equal(m_arr_sq.mask, True)
+ m_arr_sq[...] = 2
+ assert_equal(m_arr[0,0], 2)
def test_swapaxes(self):
# Tests swapaxes on MaskedArrays.
@@ -3155,8 +3254,8 @@ class TestMaskedArrayMethods(TestCase):
masked_array([[10, 20], [10, 20]], [[0, 1], [0, 1]]))
# assert_equal crashes when passed np.ma.mask
- self.assertIs(x[1], np.ma.masked)
- self.assertIs(x.take(1), np.ma.masked)
+ assert_(x[1] is np.ma.masked)
+ assert_(x.take(1) is np.ma.masked)
x = array([[10, 20, 30], [40, 50, 60]], mask=[[0, 0, 1], [1, 0, 0, ]])
assert_equal(x.take([0, 2], axis=1),
@@ -3200,8 +3299,8 @@ class TestMaskedArrayMethods(TestCase):
x = array(np.arange(12))
x[[1, -2]] = masked
xlist = x.tolist()
- self.assertTrue(xlist[1] is None)
- self.assertTrue(xlist[-2] is None)
+ assert_(xlist[1] is None)
+ assert_(xlist[-2] is None)
# ... on 2D
x.shape = (3, 4)
xlist = x.tolist()
@@ -3304,10 +3403,37 @@ class TestMaskedArrayMethods(TestCase):
assert_equal(MaskedArray.cumsum(marray.T, 0), control.cumsum(0))
+ def test_arraymethod_0d(self):
+ # gh-9430
+ x = np.ma.array(42, mask=True)
+ assert_equal(x.T.mask, x.mask)
+ assert_equal(x.T.data, x.data)
+
+ def test_transpose_view(self):
+ x = np.ma.array([[1, 2, 3], [4, 5, 6]])
+ x[0,1] = np.ma.masked
+ xt = x.T
+
+ xt[1,0] = 10
+ xt[0,1] = np.ma.masked
+
+ assert_equal(x.data, xt.T.data)
+ assert_equal(x.mask, xt.T.mask)
+
+ def test_diagonal_view(self):
+ x = np.ma.zeros((3,3))
+ x[0,0] = 10
+ x[1,1] = np.ma.masked
+ x[2,2] = 20
+ xd = x.diagonal()
+ x[1,1] = 15
+ assert_equal(xd.mask, x.diagonal().mask)
+ assert_equal(xd.data, x.diagonal().data)
-class TestMaskedArrayMathMethods(TestCase):
- def setUp(self):
+class TestMaskedArrayMathMethods(object):
+
+ def setup(self):
# Base data definition.
x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928,
8.43, 7.78, 9.865, 5.878, 8.979, 4.732,
@@ -3366,20 +3492,20 @@ class TestMaskedArrayMathMethods(TestCase):
output.fill(-9999)
result = npfunc(xm, axis=0, out=output)
# ... the result should be the given output
- self.assertTrue(result is output)
+ assert_(result is output)
assert_equal(result, xmmeth(axis=0, out=output))
output = empty((3, 4), dtype=int)
result = xmmeth(axis=0, out=output)
- self.assertTrue(result is output)
+ assert_(result is output)
def test_ptp(self):
# Tests ptp on MaskedArrays.
(x, X, XX, m, mx, mX, mXX, m2x, m2X, m2XX) = self.d
(n, m) = X.shape
assert_equal(mx.ptp(), mx.compressed().ptp())
- rows = np.zeros(n, np.float)
- cols = np.zeros(m, np.float)
+ rows = np.zeros(n, float)
+ cols = np.zeros(m, float)
for k in range(m):
cols[k] = mX[:, k].compressed().ptp()
for k in range(n):
@@ -3395,21 +3521,21 @@ class TestMaskedArrayMathMethods(TestCase):
def test_sum_object(self):
# Test sum on object dtype
- a = masked_array([1, 2, 3], mask=[1, 0, 0], dtype=np.object)
+ a = masked_array([1, 2, 3], mask=[1, 0, 0], dtype=object)
assert_equal(a.sum(), 5)
a = masked_array([[1, 2, 3], [4, 5, 6]], dtype=object)
assert_equal(a.sum(axis=0), [5, 7, 9])
def test_prod_object(self):
# Test prod on object dtype
- a = masked_array([1, 2, 3], mask=[1, 0, 0], dtype=np.object)
+ a = masked_array([1, 2, 3], mask=[1, 0, 0], dtype=object)
assert_equal(a.prod(), 2 * 3)
a = masked_array([[1, 2, 3], [4, 5, 6]], dtype=object)
assert_equal(a.prod(axis=0), [4, 10, 18])
def test_meananom_object(self):
# Test mean/anom on object dtype
- a = masked_array([1, 2, 3], dtype=np.object)
+ a = masked_array([1, 2, 3], dtype=object)
assert_equal(a.mean(), 2)
assert_equal(a.anom(), [-1, 0, 1])
@@ -3503,31 +3629,31 @@ class TestMaskedArrayMathMethods(TestCase):
x = array(arange(10), mask=True)
for methodname in ('var', 'std'):
method = getattr(x, methodname)
- self.assertTrue(method() is masked)
- self.assertTrue(method(0) is masked)
- self.assertTrue(method(-1) is masked)
+ assert_(method() is masked)
+ assert_(method(0) is masked)
+ assert_(method(-1) is masked)
# Using a masked array as explicit output
method(out=mout)
- self.assertTrue(mout is not masked)
+ assert_(mout is not masked)
assert_equal(mout.mask, True)
# Using a ndarray as explicit output
method(out=nout)
- self.assertTrue(np.isnan(nout))
+ assert_(np.isnan(nout))
x = array(arange(10), mask=True)
x[-1] = 9
for methodname in ('var', 'std'):
method = getattr(x, methodname)
- self.assertTrue(method(ddof=1) is masked)
- self.assertTrue(method(0, ddof=1) is masked)
- self.assertTrue(method(-1, ddof=1) is masked)
+ assert_(method(ddof=1) is masked)
+ assert_(method(0, ddof=1) is masked)
+ assert_(method(-1, ddof=1) is masked)
# Using a masked array as explicit output
method(out=mout, ddof=1)
- self.assertTrue(mout is not masked)
+ assert_(mout is not masked)
assert_equal(mout.mask, True)
# Using a ndarray as explicit output
method(out=nout, ddof=1)
- self.assertTrue(np.isnan(nout))
+ assert_(np.isnan(nout))
def test_varstd_ddof(self):
a = array([[1, 1, 0], [1, 1, 0]], mask=[[0, 0, 1], [0, 0, 1]])
@@ -3576,9 +3702,9 @@ class TestMaskedArrayMathMethods(TestCase):
assert_equal(a.max(1), [3, 6])
-class TestMaskedArrayMathMethodsComplex(TestCase):
+class TestMaskedArrayMathMethodsComplex(object):
# Test class for miscellaneous MaskedArrays methods.
- def setUp(self):
+ def setup(self):
# Base data definition.
x = np.array([8.375j, 7.545j, 8.828j, 8.5j, 1.757j, 5.928,
8.43, 7.78, 9.865, 5.878, 8.979, 4.732,
@@ -3629,10 +3755,10 @@ class TestMaskedArrayMathMethodsComplex(TestCase):
mX[:, k].compressed().std())
-class TestMaskedArrayFunctions(TestCase):
+class TestMaskedArrayFunctions(object):
# Test class for miscellaneous functions.
- def setUp(self):
+ def setup(self):
x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.])
y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.])
m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]
@@ -3756,12 +3882,12 @@ class TestMaskedArrayFunctions(TestCase):
output.fill(-9999)
result = np.round(xm, decimals=2, out=output)
# ... the result should be the given output
- self.assertTrue(result is output)
+ assert_(result is output)
assert_equal(result, xm.round(decimals=2, out=output))
output = empty((3, 4), dtype=float)
result = xm.round(decimals=2, out=output)
- self.assertTrue(result is output)
+ assert_(result is output)
def test_round_with_scalar(self):
# Testing round with scalar/zero dimension input
@@ -3790,13 +3916,13 @@ class TestMaskedArrayFunctions(TestCase):
def test_identity(self):
a = identity(5)
- self.assertTrue(isinstance(a, MaskedArray))
+ assert_(isinstance(a, MaskedArray))
assert_equal(a, np.identity(5))
def test_power(self):
x = -1.1
assert_almost_equal(power(x, 2.), 1.21)
- self.assertTrue(power(x, masked) is masked)
+ assert_(power(x, masked) is masked)
x = array([-1.1, -1.1, 1.1, 1.1, 0.])
b = array([0.5, 2., 0.5, 2., -1.], mask=[0, 0, 0, 0, 1])
y = power(x, b)
@@ -4004,7 +4130,7 @@ class TestMaskedArrayFunctions(TestCase):
store = empty(4, dtype=int)
chosen = choose([2, 3, 1, 0], choices, out=store)
assert_equal(store, array([20, 31, 12, 3]))
- self.assertTrue(store is chosen)
+ assert_(store is chosen)
# Check with some masked indices + out
store = empty(4, dtype=int)
indices_ = array([2, 3, 1, 0], mask=[1, 0, 0, 1])
@@ -4025,56 +4151,56 @@ class TestMaskedArrayFunctions(TestCase):
# Try the default
b = a.reshape((5, 2))
assert_equal(b.shape, (5, 2))
- self.assertTrue(b.flags['C'])
+ assert_(b.flags['C'])
# Try w/ arguments as list instead of tuple
b = a.reshape(5, 2)
assert_equal(b.shape, (5, 2))
- self.assertTrue(b.flags['C'])
+ assert_(b.flags['C'])
# Try w/ order
b = a.reshape((5, 2), order='F')
assert_equal(b.shape, (5, 2))
- self.assertTrue(b.flags['F'])
+ assert_(b.flags['F'])
# Try w/ order
b = a.reshape(5, 2, order='F')
assert_equal(b.shape, (5, 2))
- self.assertTrue(b.flags['F'])
+ assert_(b.flags['F'])
c = np.reshape(a, (2, 5))
- self.assertTrue(isinstance(c, MaskedArray))
+ assert_(isinstance(c, MaskedArray))
assert_equal(c.shape, (2, 5))
- self.assertTrue(c[0, 0] is masked)
- self.assertTrue(c.flags['C'])
+ assert_(c[0, 0] is masked)
+ assert_(c.flags['C'])
def test_make_mask_descr(self):
# Flexible
- ntype = [('a', np.float), ('b', np.float)]
+ ntype = [('a', float), ('b', float)]
test = make_mask_descr(ntype)
- assert_equal(test, [('a', np.bool), ('b', np.bool)])
+ assert_equal(test, [('a', bool), ('b', bool)])
assert_(test is make_mask_descr(test))
# Standard w/ shape
- ntype = (np.float, 2)
+ ntype = (float, 2)
test = make_mask_descr(ntype)
- assert_equal(test, (np.bool, 2))
+ assert_equal(test, (bool, 2))
assert_(test is make_mask_descr(test))
# Standard standard
- ntype = np.float
+ ntype = float
test = make_mask_descr(ntype)
- assert_equal(test, np.dtype(np.bool))
+ assert_equal(test, np.dtype(bool))
assert_(test is make_mask_descr(test))
# Nested
- ntype = [('a', np.float), ('b', [('ba', np.float), ('bb', np.float)])]
+ ntype = [('a', float), ('b', [('ba', float), ('bb', float)])]
test = make_mask_descr(ntype)
control = np.dtype([('a', 'b1'), ('b', [('ba', 'b1'), ('bb', 'b1')])])
assert_equal(test, control)
assert_(test is make_mask_descr(test))
# Named+ shape
- ntype = [('a', (np.float, 2))]
+ ntype = [('a', (float, 2))]
test = make_mask_descr(ntype)
- assert_equal(test, np.dtype([('a', (np.bool, 2))]))
+ assert_equal(test, np.dtype([('a', (bool, 2))]))
assert_(test is make_mask_descr(test))
# 2 names
@@ -4099,25 +4225,25 @@ class TestMaskedArrayFunctions(TestCase):
assert_equal(test.dtype, MaskType)
assert_equal(test, [0, 1])
# w/ a ndarray as an input
- mask = np.array([0, 1], dtype=np.bool)
+ mask = np.array([0, 1], dtype=bool)
test = make_mask(mask)
assert_equal(test.dtype, MaskType)
assert_equal(test, [0, 1])
# w/ a flexible-type ndarray as an input - use default
- mdtype = [('a', np.bool), ('b', np.bool)]
+ mdtype = [('a', bool), ('b', bool)]
mask = np.array([(0, 0), (0, 1)], dtype=mdtype)
test = make_mask(mask)
assert_equal(test.dtype, MaskType)
assert_equal(test, [1, 1])
# w/ a flexible-type ndarray as an input - use input dtype
- mdtype = [('a', np.bool), ('b', np.bool)]
+ mdtype = [('a', bool), ('b', bool)]
mask = np.array([(0, 0), (0, 1)], dtype=mdtype)
test = make_mask(mask, dtype=mask.dtype)
assert_equal(test.dtype, mdtype)
assert_equal(test, mask)
# w/ a flexible-type ndarray as an input - use input dtype
- mdtype = [('a', np.float), ('b', np.float)]
- bdtype = [('a', np.bool), ('b', np.bool)]
+ mdtype = [('a', float), ('b', float)]
+ bdtype = [('a', bool), ('b', bool)]
mask = np.array([(0, 0), (0, 1)], dtype=mdtype)
test = make_mask(mask, dtype=mask.dtype)
assert_equal(test.dtype, bdtype)
@@ -4133,7 +4259,7 @@ class TestMaskedArrayFunctions(TestCase):
assert_equal(test2, test)
# test that nomask is returned when m is nomask.
bools = [True, False]
- dtypes = [MaskType, np.float]
+ dtypes = [MaskType, float]
msgformat = 'copy=%s, shrink=%s, dtype=%s'
for cpy, shr, dt in itertools.product(bools, bools, dtypes):
res = make_mask(nomask, copy=cpy, shrink=shr, dtype=dt)
@@ -4141,7 +4267,7 @@ class TestMaskedArrayFunctions(TestCase):
def test_mask_or(self):
# Initialize
- mtype = [('a', np.bool), ('b', np.bool)]
+ mtype = [('a', bool), ('b', bool)]
mask = np.array([(0, 0), (0, 1), (1, 0), (0, 0)], dtype=mtype)
# Test using nomask as input
test = mask_or(mask, nomask)
@@ -4157,14 +4283,14 @@ class TestMaskedArrayFunctions(TestCase):
control = np.array([(0, 1), (0, 1), (1, 1), (0, 1)], dtype=mtype)
assert_equal(test, control)
# Using another array w / a different dtype
- othertype = [('A', np.bool), ('B', np.bool)]
+ othertype = [('A', bool), ('B', bool)]
other = np.array([(0, 1), (0, 1), (0, 1), (0, 1)], dtype=othertype)
try:
test = mask_or(mask, other)
except ValueError:
pass
# Using nested arrays
- dtype = [('a', np.bool), ('b', [('ba', np.bool), ('bb', np.bool)])]
+ dtype = [('a', bool), ('b', [('ba', bool), ('bb', bool)])]
amask = np.array([(0, (1, 0)), (0, (1, 0))], dtype=dtype)
bmask = np.array([(1, (0, 1)), (0, (0, 0))], dtype=dtype)
cntrl = np.array([(1, (1, 1)), (0, (1, 0))], dtype=dtype)
@@ -4173,7 +4299,7 @@ class TestMaskedArrayFunctions(TestCase):
def test_flatten_mask(self):
# Tests flatten mask
# Standard dtype
- mask = np.array([0, 0, 1], dtype=np.bool)
+ mask = np.array([0, 0, 1], dtype=bool)
assert_equal(flatten_mask(mask), mask)
# Flexible dtype
mask = np.array([(0, 0), (0, 1)], dtype=[('a', bool), ('b', bool)])
@@ -4266,9 +4392,9 @@ class TestMaskedArrayFunctions(TestCase):
assert_equal(test, masked_equal([-1, -1, -1, -1, -1], -1))
-class TestMaskedFields(TestCase):
+class TestMaskedFields(object):
- def setUp(self):
+ def setup(self):
ilist = [1, 2, 3, 4, 5]
flist = [1.1, 2.2, 3.3, 4.4, 5.5]
slist = ['one', 'two', 'three', 'four', 'five']
@@ -4367,7 +4493,7 @@ class TestMaskedFields(TestCase):
test = a.view((float, 2), np.matrix)
assert_equal(test, data)
- self.assertTrue(isinstance(test, np.matrix))
+ assert_(isinstance(test, np.matrix))
def test_getitem(self):
ndtype = [('a', float), ('b', float)]
@@ -4432,7 +4558,7 @@ class TestMaskedFields(TestCase):
assert_equal(len(rec), len(self.data['ddtype']))
-class TestMaskedObjectArray(TestCase):
+class TestMaskedObjectArray(object):
def test_getitem(self):
arr = np.ma.array([None, None])
@@ -4480,9 +4606,9 @@ class TestMaskedObjectArray(TestCase):
assert_(arr[0] is np.ma.masked)
-class TestMaskedView(TestCase):
+class TestMaskedView(object):
- def setUp(self):
+ def setup(self):
iterator = list(zip(np.arange(10), np.random.rand(10)))
data = np.array(iterator)
a = array(iterator, dtype=[('a', float), ('b', float)])
@@ -4493,14 +4619,14 @@ class TestMaskedView(TestCase):
def test_view_to_nothing(self):
(data, a, controlmask) = self.data
test = a.view()
- self.assertTrue(isinstance(test, MaskedArray))
+ assert_(isinstance(test, MaskedArray))
assert_equal(test._data, a._data)
assert_equal(test._mask, a._mask)
def test_view_to_type(self):
(data, a, controlmask) = self.data
test = a.view(np.ndarray)
- self.assertTrue(not isinstance(test, MaskedArray))
+ assert_(not isinstance(test, MaskedArray))
assert_equal(test, a._data)
assert_equal_records(test, data.view(a.dtype).squeeze())
@@ -4508,7 +4634,7 @@ class TestMaskedView(TestCase):
(data, a, controlmask) = self.data
# View globally
test = a.view(float)
- self.assertTrue(isinstance(test, MaskedArray))
+ assert_(isinstance(test, MaskedArray))
assert_equal(test, data.ravel())
assert_equal(test.mask, controlmask)
@@ -4521,13 +4647,13 @@ class TestMaskedView(TestCase):
assert_equal(test['B'], a['b'])
test = a[0].view([('A', float), ('B', float)])
- self.assertTrue(isinstance(test, MaskedArray))
+ assert_(isinstance(test, MaskedArray))
assert_equal(test.mask.dtype.names, ('A', 'B'))
assert_equal(test['A'], a['a'][0])
assert_equal(test['B'], a['b'][0])
test = a[-1].view([('A', float), ('B', float)])
- self.assertTrue(isinstance(test, MaskedArray))
+ assert_(isinstance(test, MaskedArray))
assert_equal(test.dtype.names, ('A', 'B'))
assert_equal(test['A'], a['a'][-1])
assert_equal(test['B'], a['b'][-1])
@@ -4536,17 +4662,17 @@ class TestMaskedView(TestCase):
(data, a, controlmask) = self.data
# View globally
test = a.view((float, 2))
- self.assertTrue(isinstance(test, MaskedArray))
+ assert_(isinstance(test, MaskedArray))
assert_equal(test, data)
assert_equal(test.mask, controlmask.reshape(-1, 2))
# View on 1 masked element
test = a[0].view((float, 2))
- self.assertTrue(isinstance(test, MaskedArray))
+ assert_(isinstance(test, MaskedArray))
assert_equal(test, data[0])
assert_equal(test.mask, (1, 0))
# View on 1 unmasked element
test = a[-1].view((float, 2))
- self.assertTrue(isinstance(test, MaskedArray))
+ assert_(isinstance(test, MaskedArray))
assert_equal(test, data[-1])
def test_view_to_dtype_and_type(self):
@@ -4554,10 +4680,10 @@ class TestMaskedView(TestCase):
test = a.view((float, 2), np.matrix)
assert_equal(test, data)
- self.assertTrue(isinstance(test, np.matrix))
- self.assertTrue(not isinstance(test, MaskedArray))
+ assert_(isinstance(test, np.matrix))
+ assert_(not isinstance(test, MaskedArray))
-class TestOptionalArgs(TestCase):
+class TestOptionalArgs(object):
def test_ndarrayfuncs(self):
# test axis arg behaves the same as ndarray (including multiple axes)
@@ -4644,10 +4770,10 @@ class TestOptionalArgs(TestCase):
assert_raises(np.AxisError, count, np.ma.array(1), axis=1)
-class TestMaskedConstant(TestCase):
+class TestMaskedConstant(object):
def _do_add_test(self, add):
# sanity check
- self.assertIs(add(np.ma.masked, 1), np.ma.masked)
+ assert_(add(np.ma.masked, 1) is np.ma.masked)
# now try with a vector
vector = np.array([1, 2, 3])
@@ -4729,6 +4855,11 @@ def test_ufunc_with_output():
y = np.add(x, 1., out=x)
assert_(y is x)
+def test_astype():
+ descr = [('v', int, 3), ('x', [('y', float)])]
+ x = array(([1, 2, 3], (1.0,)), dtype=descr)
+ assert_equal(x, x.astype(descr))
+
###############################################################################
if __name__ == "__main__":
diff --git a/numpy/ma/tests/test_deprecations.py b/numpy/ma/tests/test_deprecations.py
index 24dd7cb8d..23c095470 100644
--- a/numpy/ma/tests/test_deprecations.py
+++ b/numpy/ma/tests/test_deprecations.py
@@ -4,11 +4,11 @@
from __future__ import division, absolute_import, print_function
import numpy as np
-from numpy.testing import TestCase, run_module_suite, assert_warns
+from numpy.testing import run_module_suite, assert_warns
from numpy.ma.testutils import assert_equal
from numpy.ma.core import MaskedArrayFutureWarning
-class TestArgsort(TestCase):
+class TestArgsort(object):
""" gh-8701 """
def _test_base(self, argsort, cls):
arr_0d = np.array(1).view(cls)
@@ -37,7 +37,7 @@ class TestArgsort(TestCase):
return self._test_base(np.ma.MaskedArray.argsort, np.ma.MaskedArray)
-class TestMinimumMaximum(TestCase):
+class TestMinimumMaximum(object):
def test_minimum(self):
assert_warns(DeprecationWarning, np.ma.minimum, np.ma.array([1, 2]))
diff --git a/numpy/ma/tests/test_extras.py b/numpy/ma/tests/test_extras.py
index 4b7fe07b6..1bec584c1 100644
--- a/numpy/ma/tests/test_extras.py
+++ b/numpy/ma/tests/test_extras.py
@@ -14,8 +14,7 @@ import itertools
import numpy as np
from numpy.testing import (
- TestCase, run_module_suite, assert_warns, suppress_warnings,
- assert_raises
+ run_module_suite, assert_warns, suppress_warnings, assert_raises,
)
from numpy.ma.testutils import (
assert_, assert_array_equal, assert_equal, assert_almost_equal
@@ -35,7 +34,7 @@ from numpy.ma.extras import (
import numpy.ma.extras as mae
-class TestGeneric(TestCase):
+class TestGeneric(object):
#
def test_masked_all(self):
# Tests masked_all
@@ -140,7 +139,7 @@ class TestGeneric(TestCase):
assert_equal(test, None)
-class TestAverage(TestCase):
+class TestAverage(object):
# Several tests of average. Why so many ? Good point...
def test_testAverage1(self):
# Test of average.
@@ -149,7 +148,7 @@ class TestAverage(TestCase):
assert_equal(2.0, average(ott, weights=[1., 1., 2., 1.]))
result, wts = average(ott, weights=[1., 1., 2., 1.], returned=1)
assert_equal(2.0, result)
- self.assertTrue(wts == 4.0)
+ assert_(wts == 4.0)
ott[:] = masked
assert_equal(average(ott, axis=0).mask, [True])
ott = array([0., 1., 2., 3.], mask=[True, False, False, False])
@@ -271,7 +270,7 @@ class TestAverage(TestCase):
assert_almost_equal(wav1.imag, expected1.imag)
-class TestConcatenator(TestCase):
+class TestConcatenator(object):
# Tests for mr_, the equivalent of r_ for masked arrays.
def test_1d(self):
@@ -281,7 +280,7 @@ class TestConcatenator(TestCase):
m = [1, 0, 0, 0, 0]
d = masked_array(b, mask=m)
c = mr_[d, 0, 0, d]
- self.assertTrue(isinstance(c, MaskedArray))
+ assert_(isinstance(c, MaskedArray))
assert_array_equal(c, [1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1])
assert_array_equal(c.mask, mr_[m, 0, 0, m])
@@ -295,12 +294,12 @@ class TestConcatenator(TestCase):
b_2 = masked_array(a_2, mask=m_2)
# append columns
d = mr_['1', b_1, b_2]
- self.assertTrue(d.shape == (5, 10))
+ assert_(d.shape == (5, 10))
assert_array_equal(d[:, :5], b_1)
assert_array_equal(d[:, 5:], b_2)
assert_array_equal(d.mask, np.r_['1', m_1, m_2])
d = mr_[b_1, b_2]
- self.assertTrue(d.shape == (10, 5))
+ assert_(d.shape == (10, 5))
assert_array_equal(d[:5,:], b_1)
assert_array_equal(d[5:,:], b_2)
assert_array_equal(d.mask, np.r_[m_1, m_2])
@@ -318,7 +317,7 @@ class TestConcatenator(TestCase):
assert_equal(type(actual.data), type(expected.data))
-class TestNotMasked(TestCase):
+class TestNotMasked(object):
# Tests notmasked_edges and notmasked_contiguous.
def test_edges(self):
@@ -367,19 +366,19 @@ class TestNotMasked(TestCase):
assert_equal(tmp[-3], slice(0, 4, None))
#
tmp = notmasked_contiguous(a, 0)
- self.assertTrue(len(tmp[-1]) == 1)
- self.assertTrue(tmp[-2] is None)
+ assert_(len(tmp[-1]) == 1)
+ assert_(tmp[-2] is None)
assert_equal(tmp[-3], tmp[-1])
- self.assertTrue(len(tmp[0]) == 2)
+ assert_(len(tmp[0]) == 2)
#
tmp = notmasked_contiguous(a, 1)
assert_equal(tmp[0][-1], slice(0, 4, None))
- self.assertTrue(tmp[1] is None)
+ assert_(tmp[1] is None)
assert_equal(tmp[2][-1], slice(7, 8, None))
assert_equal(tmp[2][-2], slice(0, 6, None))
-class TestCompressFunctions(TestCase):
+class TestCompressFunctions(object):
def test_compress_nd(self):
# Tests compress_nd
@@ -538,12 +537,12 @@ class TestCompressFunctions(TestCase):
assert_equal(mask_rowcols(x, 1,).mask,
[[1, 1, 0], [1, 1, 0], [1, 1, 0]])
x = array(x._data, mask=[[1, 0, 0], [0, 1, 0], [0, 0, 1]])
- self.assertTrue(mask_rowcols(x).all() is masked)
- self.assertTrue(mask_rowcols(x, 0).all() is masked)
- self.assertTrue(mask_rowcols(x, 1).all() is masked)
- self.assertTrue(mask_rowcols(x).mask.all())
- self.assertTrue(mask_rowcols(x, 0).mask.all())
- self.assertTrue(mask_rowcols(x, 1).mask.all())
+ assert_(mask_rowcols(x).all() is masked)
+ assert_(mask_rowcols(x, 0).all() is masked)
+ assert_(mask_rowcols(x, 1).all() is masked)
+ assert_(mask_rowcols(x).mask.all())
+ assert_(mask_rowcols(x, 0).mask.all())
+ assert_(mask_rowcols(x, 1).mask.all())
def test_dot(self):
# Tests dot product
@@ -632,7 +631,7 @@ class TestCompressFunctions(TestCase):
assert_equal(a, res)
-class TestApplyAlongAxis(TestCase):
+class TestApplyAlongAxis(object):
# Tests 2D functions
def test_3d(self):
a = arange(12.).reshape(2, 2, 3)
@@ -654,20 +653,20 @@ class TestApplyAlongAxis(TestCase):
assert_equal(xa, [[2, 5], [8, 11]])
-class TestApplyOverAxes(TestCase):
+class TestApplyOverAxes(object):
# Tests apply_over_axes
def test_basic(self):
a = arange(24).reshape(2, 3, 4)
test = apply_over_axes(np.sum, a, [0, 2])
ctrl = np.array([[[60], [92], [124]]])
assert_equal(test, ctrl)
- a[(a % 2).astype(np.bool)] = masked
+ a[(a % 2).astype(bool)] = masked
test = apply_over_axes(np.sum, a, [0, 2])
ctrl = np.array([[[28], [44], [60]]])
assert_equal(test, ctrl)
-class TestMedian(TestCase):
+class TestMedian(object):
def test_pytype(self):
r = np.ma.median([[np.inf, np.inf], [np.inf, np.inf]], axis=-1)
assert_equal(r, np.inf)
@@ -737,7 +736,7 @@ class TestMedian(TestCase):
for axis, over in args:
try:
np.ma.median(x, axis=axis, overwrite_input=over)
- except:
+ except Exception:
raise AssertionError(msg % (mask, ndmin, axis, over))
# Invalid axis values should raise exception
@@ -886,7 +885,7 @@ class TestMedian(TestCase):
def test_nan(self):
with suppress_warnings() as w:
w.record(RuntimeWarning)
- for mask in (False, np.zeros(6, dtype=np.bool)):
+ for mask in (False, np.zeros(6, dtype=bool)):
dm = np.ma.array([[1, np.nan, 3], [1, 2, 3]])
dm.mask = mask
@@ -1069,9 +1068,9 @@ class TestMedian(TestCase):
assert_(type(np.ma.median(o.astype(object))), float)
-class TestCov(TestCase):
+class TestCov(object):
- def setUp(self):
+ def setup(self):
self.data = array(np.random.rand(12))
def test_1d_without_missing(self):
@@ -1136,9 +1135,9 @@ class TestCov(TestCase):
x.shape[0] / frac))
-class TestCorrcoef(TestCase):
+class TestCorrcoef(object):
- def setUp(self):
+ def setup(self):
self.data = array(np.random.rand(12))
self.data2 = array(np.random.rand(12))
@@ -1243,7 +1242,7 @@ class TestCorrcoef(TestCase):
control[:-1, :-1])
-class TestPolynomial(TestCase):
+class TestPolynomial(object):
#
def test_polyfit(self):
# Tests polyfit
@@ -1301,13 +1300,13 @@ class TestPolynomial(TestCase):
assert_almost_equal(a, a_)
-class TestArraySetOps(TestCase):
+class TestArraySetOps(object):
def test_unique_onlist(self):
# Test unique on list
data = [1, 1, 1, 2, 2, 3]
test = unique(data, return_index=True, return_inverse=True)
- self.assertTrue(isinstance(test[0], MaskedArray))
+ assert_(isinstance(test[0], MaskedArray))
assert_equal(test[0], masked_array([1, 2, 3], mask=[0, 0, 0]))
assert_equal(test[1], [0, 3, 5])
assert_equal(test[2], [0, 0, 0, 1, 1, 2])
@@ -1404,13 +1403,13 @@ class TestArraySetOps(TestCase):
test = ediff1d(x)
control = array([1, 1, 1, 1], mask=[0, 0, 0, 0])
assert_equal(test, control)
- self.assertTrue(isinstance(test, MaskedArray))
+ assert_(isinstance(test, MaskedArray))
assert_equal(test.filled(0), control.filled(0))
assert_equal(test.mask, control.mask)
#
test = ediff1d(x, to_end=masked, to_begin=masked)
control = array([0, 1, 1, 1, 1, 0], mask=[1, 0, 0, 0, 0, 1])
- self.assertTrue(isinstance(test, MaskedArray))
+ assert_(isinstance(test, MaskedArray))
assert_equal(test.filled(0), control.filled(0))
assert_equal(test.mask, control.mask)
@@ -1525,7 +1524,7 @@ class TestArraySetOps(TestCase):
assert_array_equal(setdiff1d(a, b), np.array(['c']))
-class TestShapeBase(TestCase):
+class TestShapeBase(object):
def test_atleast_2d(self):
# Test atleast_2d
diff --git a/numpy/ma/tests/test_mrecords.py b/numpy/ma/tests/test_mrecords.py
index 785733400..1ca8e175f 100644
--- a/numpy/ma/tests/test_mrecords.py
+++ b/numpy/ma/tests/test_mrecords.py
@@ -14,7 +14,7 @@ import numpy as np
import numpy.ma as ma
from numpy import recarray
from numpy.ma import masked, nomask
-from numpy.testing import TestCase, run_module_suite, temppath
+from numpy.testing import run_module_suite, temppath
from numpy.core.records import (
fromrecords as recfromrecords, fromarrays as recfromarrays
)
@@ -28,21 +28,14 @@ from numpy.ma.testutils import (
)
-class TestMRecords(TestCase):
- # Base test class for MaskedArrays.
- def __init__(self, *args, **kwds):
- TestCase.__init__(self, *args, **kwds)
- self.setup()
+class TestMRecords(object):
- def setup(self):
- # Generic setup
- ilist = [1, 2, 3, 4, 5]
- flist = [1.1, 2.2, 3.3, 4.4, 5.5]
- slist = [b'one', b'two', b'three', b'four', b'five']
- ddtype = [('a', int), ('b', float), ('c', '|S8')]
- mask = [0, 1, 0, 0, 1]
- self.base = ma.array(list(zip(ilist, flist, slist)),
- mask=mask, dtype=ddtype)
+ ilist = [1, 2, 3, 4, 5]
+ flist = [1.1, 2.2, 3.3, 4.4, 5.5]
+ slist = [b'one', b'two', b'three', b'four', b'five']
+ ddtype = [('a', int), ('b', float), ('c', '|S8')]
+ mask = [0, 1, 0, 0, 1]
+ base = ma.array(list(zip(ilist, flist, slist)), mask=mask, dtype=ddtype)
def test_byview(self):
# Test creation by view
@@ -279,16 +272,16 @@ class TestMRecords(TestCase):
base = self.base.copy()
mbase = base.view(mrecarray)
mbase.harden_mask()
- self.assertTrue(mbase._hardmask)
+ assert_(mbase._hardmask)
mbase.mask = nomask
assert_equal_records(mbase._mask, base._mask)
mbase.soften_mask()
- self.assertTrue(not mbase._hardmask)
+ assert_(not mbase._hardmask)
mbase.mask = nomask
# So, the mask of a field is no longer set to nomask...
assert_equal_records(mbase._mask,
ma.make_mask_none(base.shape, base.dtype))
- self.assertTrue(ma.make_mask(mbase['b']._mask) is nomask)
+ assert_(ma.make_mask(mbase['b']._mask) is nomask)
assert_equal(mbase['a']._mask, mbase['b']._mask)
def test_pickling(self):
@@ -356,11 +349,11 @@ class TestMRecords(TestCase):
dtype=mult.dtype))
-class TestView(TestCase):
+class TestView(object):
- def setUp(self):
+ def setup(self):
(a, b) = (np.arange(10), np.random.rand(10))
- ndtype = [('a', np.float), ('b', np.float)]
+ ndtype = [('a', float), ('b', float)]
arr = np.array(list(zip(a, b)), dtype=ndtype)
mrec = fromarrays([a, b], dtype=ndtype, fill_value=(-9., -99.))
@@ -370,48 +363,42 @@ class TestView(TestCase):
def test_view_by_itself(self):
(mrec, a, b, arr) = self.data
test = mrec.view()
- self.assertTrue(isinstance(test, MaskedRecords))
+ assert_(isinstance(test, MaskedRecords))
assert_equal_records(test, mrec)
assert_equal_records(test._mask, mrec._mask)
def test_view_simple_dtype(self):
(mrec, a, b, arr) = self.data
- ntype = (np.float, 2)
+ ntype = (float, 2)
test = mrec.view(ntype)
- self.assertTrue(isinstance(test, ma.MaskedArray))
- assert_equal(test, np.array(list(zip(a, b)), dtype=np.float))
- self.assertTrue(test[3, 1] is ma.masked)
+ assert_(isinstance(test, ma.MaskedArray))
+ assert_equal(test, np.array(list(zip(a, b)), dtype=float))
+ assert_(test[3, 1] is ma.masked)
def test_view_flexible_type(self):
(mrec, a, b, arr) = self.data
- alttype = [('A', np.float), ('B', np.float)]
+ alttype = [('A', float), ('B', float)]
test = mrec.view(alttype)
- self.assertTrue(isinstance(test, MaskedRecords))
+ assert_(isinstance(test, MaskedRecords))
assert_equal_records(test, arr.view(alttype))
- self.assertTrue(test['B'][3] is masked)
+ assert_(test['B'][3] is masked)
assert_equal(test.dtype, np.dtype(alttype))
- self.assertTrue(test._fill_value is None)
+ assert_(test._fill_value is None)
##############################################################################
-class TestMRecordsImport(TestCase):
- # Base test class for MaskedArrays.
- def __init__(self, *args, **kwds):
- TestCase.__init__(self, *args, **kwds)
- self.setup()
-
- def setup(self):
- # Generic setup
- _a = ma.array([1, 2, 3], mask=[0, 0, 1], dtype=int)
- _b = ma.array([1.1, 2.2, 3.3], mask=[0, 0, 1], dtype=float)
- _c = ma.array([b'one', b'two', b'three'],
- mask=[0, 0, 1], dtype='|S8')
- ddtype = [('a', int), ('b', float), ('c', '|S8')]
- mrec = fromarrays([_a, _b, _c], dtype=ddtype,
- fill_value=(b'99999', b'99999.',
- b'N/A'))
- nrec = recfromarrays((_a._data, _b._data, _c._data), dtype=ddtype)
- self.data = (mrec, nrec, ddtype)
+class TestMRecordsImport(object):
+
+ _a = ma.array([1, 2, 3], mask=[0, 0, 1], dtype=int)
+ _b = ma.array([1.1, 2.2, 3.3], mask=[0, 0, 1], dtype=float)
+ _c = ma.array([b'one', b'two', b'three'],
+ mask=[0, 0, 1], dtype='|S8')
+ ddtype = [('a', int), ('b', float), ('c', '|S8')]
+ mrec = fromarrays([_a, _b, _c], dtype=ddtype,
+ fill_value=(b'99999', b'99999.',
+ b'N/A'))
+ nrec = recfromarrays((_a._data, _b._data, _c._data), dtype=ddtype)
+ data = (mrec, nrec, ddtype)
def test_fromarrays(self):
_a = ma.array([1, 2, 3], mask=[0, 0, 1], dtype=int)
@@ -485,7 +472,7 @@ class TestMRecordsImport(TestCase):
with open(path, 'w') as f:
f.write(fcontent)
mrectxt = fromtextfile(path, delimitor=',', varnames='ABCDEFG')
- self.assertTrue(isinstance(mrectxt, MaskedRecords))
+ assert_(isinstance(mrectxt, MaskedRecords))
assert_equal(mrectxt.F, [1, 1, 1, 1])
assert_equal(mrectxt.E._mask, [1, 1, 1, 1])
assert_equal(mrectxt.C, [1, 2, 3.e+5, -1e-10])
@@ -504,7 +491,7 @@ def test_record_array_with_object_field():
y = ma.masked_array(
[(1, '2'), (3, '4')],
mask=[(0, 0), (0, 1)],
- dtype=[('a', int), ('b', np.object)])
+ dtype=[('a', int), ('b', object)])
# getting an item used to fail
y[1]
diff --git a/numpy/ma/tests/test_old_ma.py b/numpy/ma/tests/test_old_ma.py
index 51fa6ac36..9152e8d73 100644
--- a/numpy/ma/tests/test_old_ma.py
+++ b/numpy/ma/tests/test_old_ma.py
@@ -6,7 +6,8 @@ import numpy as np
import numpy.core.umath as umath
import numpy.core.fromnumeric as fromnumeric
from numpy.testing import (
- TestCase, run_module_suite, assert_, suppress_warnings)
+ run_module_suite, assert_, assert_raises, assert_equal,
+ )
from numpy.ma.testutils import assert_array_equal
from numpy.ma import (
MaskType, MaskedArray, absolute, add, all, allclose, allequal, alltrue,
@@ -32,9 +33,9 @@ def eq(v, w, msg=''):
return result
-class TestMa(TestCase):
+class TestMa(object):
- def setUp(self):
+ def setup(self):
x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.])
y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.])
a10 = 10.
@@ -52,16 +53,16 @@ class TestMa(TestCase):
def test_testBasic1d(self):
# Test of basic array creation and properties in 1 dimension.
(x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
- self.assertFalse(isMaskedArray(x))
- self.assertTrue(isMaskedArray(xm))
- self.assertEqual(shape(xm), s)
- self.assertEqual(xm.shape, s)
- self.assertEqual(xm.dtype, x.dtype)
- self.assertEqual(xm.size, reduce(lambda x, y:x * y, s))
- self.assertEqual(count(xm), len(m1) - reduce(lambda x, y:x + y, m1))
- self.assertTrue(eq(xm, xf))
- self.assertTrue(eq(filled(xm, 1.e20), xf))
- self.assertTrue(eq(x, xm))
+ assert_(not isMaskedArray(x))
+ assert_(isMaskedArray(xm))
+ assert_equal(shape(xm), s)
+ assert_equal(xm.shape, s)
+ assert_equal(xm.dtype, x.dtype)
+ assert_equal(xm.size, reduce(lambda x, y:x * y, s))
+ assert_equal(count(xm), len(m1) - reduce(lambda x, y:x + y, m1))
+ assert_(eq(xm, xf))
+ assert_(eq(filled(xm, 1.e20), xf))
+ assert_(eq(x, xm))
def test_testBasic2d(self):
# Test of basic array creation and properties in 2 dimensions.
@@ -73,107 +74,107 @@ class TestMa(TestCase):
ym.shape = s
xf.shape = s
- self.assertFalse(isMaskedArray(x))
- self.assertTrue(isMaskedArray(xm))
- self.assertEqual(shape(xm), s)
- self.assertEqual(xm.shape, s)
- self.assertEqual(xm.size, reduce(lambda x, y:x * y, s))
- self.assertEqual(count(xm),
+ assert_(not isMaskedArray(x))
+ assert_(isMaskedArray(xm))
+ assert_equal(shape(xm), s)
+ assert_equal(xm.shape, s)
+ assert_equal(xm.size, reduce(lambda x, y:x * y, s))
+ assert_equal(count(xm),
len(m1) - reduce(lambda x, y:x + y, m1))
- self.assertTrue(eq(xm, xf))
- self.assertTrue(eq(filled(xm, 1.e20), xf))
- self.assertTrue(eq(x, xm))
- self.setUp()
+ assert_(eq(xm, xf))
+ assert_(eq(filled(xm, 1.e20), xf))
+ assert_(eq(x, xm))
+ self.setup()
def test_testArithmetic(self):
# Test of basic arithmetic.
(x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
a2d = array([[1, 2], [0, 4]])
a2dm = masked_array(a2d, [[0, 0], [1, 0]])
- self.assertTrue(eq(a2d * a2d, a2d * a2dm))
- self.assertTrue(eq(a2d + a2d, a2d + a2dm))
- self.assertTrue(eq(a2d - a2d, a2d - a2dm))
+ assert_(eq(a2d * a2d, a2d * a2dm))
+ assert_(eq(a2d + a2d, a2d + a2dm))
+ assert_(eq(a2d - a2d, a2d - a2dm))
for s in [(12,), (4, 3), (2, 6)]:
x = x.reshape(s)
y = y.reshape(s)
xm = xm.reshape(s)
ym = ym.reshape(s)
xf = xf.reshape(s)
- self.assertTrue(eq(-x, -xm))
- self.assertTrue(eq(x + y, xm + ym))
- self.assertTrue(eq(x - y, xm - ym))
- self.assertTrue(eq(x * y, xm * ym))
+ assert_(eq(-x, -xm))
+ assert_(eq(x + y, xm + ym))
+ assert_(eq(x - y, xm - ym))
+ assert_(eq(x * y, xm * ym))
with np.errstate(divide='ignore', invalid='ignore'):
- self.assertTrue(eq(x / y, xm / ym))
- self.assertTrue(eq(a10 + y, a10 + ym))
- self.assertTrue(eq(a10 - y, a10 - ym))
- self.assertTrue(eq(a10 * y, a10 * ym))
+ assert_(eq(x / y, xm / ym))
+ assert_(eq(a10 + y, a10 + ym))
+ assert_(eq(a10 - y, a10 - ym))
+ assert_(eq(a10 * y, a10 * ym))
with np.errstate(divide='ignore', invalid='ignore'):
- self.assertTrue(eq(a10 / y, a10 / ym))
- self.assertTrue(eq(x + a10, xm + a10))
- self.assertTrue(eq(x - a10, xm - a10))
- self.assertTrue(eq(x * a10, xm * a10))
- self.assertTrue(eq(x / a10, xm / a10))
- self.assertTrue(eq(x ** 2, xm ** 2))
- self.assertTrue(eq(abs(x) ** 2.5, abs(xm) ** 2.5))
- self.assertTrue(eq(x ** y, xm ** ym))
- self.assertTrue(eq(np.add(x, y), add(xm, ym)))
- self.assertTrue(eq(np.subtract(x, y), subtract(xm, ym)))
- self.assertTrue(eq(np.multiply(x, y), multiply(xm, ym)))
+ assert_(eq(a10 / y, a10 / ym))
+ assert_(eq(x + a10, xm + a10))
+ assert_(eq(x - a10, xm - a10))
+ assert_(eq(x * a10, xm * a10))
+ assert_(eq(x / a10, xm / a10))
+ assert_(eq(x ** 2, xm ** 2))
+ assert_(eq(abs(x) ** 2.5, abs(xm) ** 2.5))
+ assert_(eq(x ** y, xm ** ym))
+ assert_(eq(np.add(x, y), add(xm, ym)))
+ assert_(eq(np.subtract(x, y), subtract(xm, ym)))
+ assert_(eq(np.multiply(x, y), multiply(xm, ym)))
with np.errstate(divide='ignore', invalid='ignore'):
- self.assertTrue(eq(np.divide(x, y), divide(xm, ym)))
+ assert_(eq(np.divide(x, y), divide(xm, ym)))
def test_testMixedArithmetic(self):
na = np.array([1])
ma = array([1])
- self.assertTrue(isinstance(na + ma, MaskedArray))
- self.assertTrue(isinstance(ma + na, MaskedArray))
+ assert_(isinstance(na + ma, MaskedArray))
+ assert_(isinstance(ma + na, MaskedArray))
def test_testUfuncs1(self):
# Test various functions such as sin, cos.
(x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
- self.assertTrue(eq(np.cos(x), cos(xm)))
- self.assertTrue(eq(np.cosh(x), cosh(xm)))
- self.assertTrue(eq(np.sin(x), sin(xm)))
- self.assertTrue(eq(np.sinh(x), sinh(xm)))
- self.assertTrue(eq(np.tan(x), tan(xm)))
- self.assertTrue(eq(np.tanh(x), tanh(xm)))
+ assert_(eq(np.cos(x), cos(xm)))
+ assert_(eq(np.cosh(x), cosh(xm)))
+ assert_(eq(np.sin(x), sin(xm)))
+ assert_(eq(np.sinh(x), sinh(xm)))
+ assert_(eq(np.tan(x), tan(xm)))
+ assert_(eq(np.tanh(x), tanh(xm)))
with np.errstate(divide='ignore', invalid='ignore'):
- self.assertTrue(eq(np.sqrt(abs(x)), sqrt(xm)))
- self.assertTrue(eq(np.log(abs(x)), log(xm)))
- self.assertTrue(eq(np.log10(abs(x)), log10(xm)))
- self.assertTrue(eq(np.exp(x), exp(xm)))
- self.assertTrue(eq(np.arcsin(z), arcsin(zm)))
- self.assertTrue(eq(np.arccos(z), arccos(zm)))
- self.assertTrue(eq(np.arctan(z), arctan(zm)))
- self.assertTrue(eq(np.arctan2(x, y), arctan2(xm, ym)))
- self.assertTrue(eq(np.absolute(x), absolute(xm)))
- self.assertTrue(eq(np.equal(x, y), equal(xm, ym)))
- self.assertTrue(eq(np.not_equal(x, y), not_equal(xm, ym)))
- self.assertTrue(eq(np.less(x, y), less(xm, ym)))
- self.assertTrue(eq(np.greater(x, y), greater(xm, ym)))
- self.assertTrue(eq(np.less_equal(x, y), less_equal(xm, ym)))
- self.assertTrue(eq(np.greater_equal(x, y), greater_equal(xm, ym)))
- self.assertTrue(eq(np.conjugate(x), conjugate(xm)))
- self.assertTrue(eq(np.concatenate((x, y)), concatenate((xm, ym))))
- self.assertTrue(eq(np.concatenate((x, y)), concatenate((x, y))))
- self.assertTrue(eq(np.concatenate((x, y)), concatenate((xm, y))))
- self.assertTrue(eq(np.concatenate((x, y, x)), concatenate((x, ym, x))))
+ assert_(eq(np.sqrt(abs(x)), sqrt(xm)))
+ assert_(eq(np.log(abs(x)), log(xm)))
+ assert_(eq(np.log10(abs(x)), log10(xm)))
+ assert_(eq(np.exp(x), exp(xm)))
+ assert_(eq(np.arcsin(z), arcsin(zm)))
+ assert_(eq(np.arccos(z), arccos(zm)))
+ assert_(eq(np.arctan(z), arctan(zm)))
+ assert_(eq(np.arctan2(x, y), arctan2(xm, ym)))
+ assert_(eq(np.absolute(x), absolute(xm)))
+ assert_(eq(np.equal(x, y), equal(xm, ym)))
+ assert_(eq(np.not_equal(x, y), not_equal(xm, ym)))
+ assert_(eq(np.less(x, y), less(xm, ym)))
+ assert_(eq(np.greater(x, y), greater(xm, ym)))
+ assert_(eq(np.less_equal(x, y), less_equal(xm, ym)))
+ assert_(eq(np.greater_equal(x, y), greater_equal(xm, ym)))
+ assert_(eq(np.conjugate(x), conjugate(xm)))
+ assert_(eq(np.concatenate((x, y)), concatenate((xm, ym))))
+ assert_(eq(np.concatenate((x, y)), concatenate((x, y))))
+ assert_(eq(np.concatenate((x, y)), concatenate((xm, y))))
+ assert_(eq(np.concatenate((x, y, x)), concatenate((x, ym, x))))
def test_xtestCount(self):
# Test count
ott = array([0., 1., 2., 3.], mask=[1, 0, 0, 0])
- self.assertTrue(count(ott).dtype.type is np.intp)
- self.assertEqual(3, count(ott))
- self.assertEqual(1, count(1))
- self.assertTrue(eq(0, array(1, mask=[1])))
+ assert_(count(ott).dtype.type is np.intp)
+ assert_equal(3, count(ott))
+ assert_equal(1, count(1))
+ assert_(eq(0, array(1, mask=[1])))
ott = ott.reshape((2, 2))
- self.assertTrue(count(ott).dtype.type is np.intp)
+ assert_(count(ott).dtype.type is np.intp)
assert_(isinstance(count(ott, 0), np.ndarray))
- self.assertTrue(count(ott).dtype.type is np.intp)
- self.assertTrue(eq(3, count(ott)))
+ assert_(count(ott).dtype.type is np.intp)
+ assert_(eq(3, count(ott)))
assert_(getmask(count(ott, 0)) is nomask)
- self.assertTrue(eq([1, 2], count(ott, 0)))
+ assert_(eq([1, 2], count(ott, 0)))
def test_testMinMax(self):
# Test minimum and maximum.
@@ -182,29 +183,29 @@ class TestMa(TestCase):
xmr = ravel(xm)
# true because of careful selection of data
- self.assertTrue(eq(max(xr), maximum.reduce(xmr)))
- self.assertTrue(eq(min(xr), minimum.reduce(xmr)))
+ assert_(eq(max(xr), maximum.reduce(xmr)))
+ assert_(eq(min(xr), minimum.reduce(xmr)))
def test_testAddSumProd(self):
# Test add, sum, product.
(x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
- self.assertTrue(eq(np.add.reduce(x), add.reduce(x)))
- self.assertTrue(eq(np.add.accumulate(x), add.accumulate(x)))
- self.assertTrue(eq(4, sum(array(4), axis=0)))
- self.assertTrue(eq(4, sum(array(4), axis=0)))
- self.assertTrue(eq(np.sum(x, axis=0), sum(x, axis=0)))
- self.assertTrue(eq(np.sum(filled(xm, 0), axis=0), sum(xm, axis=0)))
- self.assertTrue(eq(np.sum(x, 0), sum(x, 0)))
- self.assertTrue(eq(np.product(x, axis=0), product(x, axis=0)))
- self.assertTrue(eq(np.product(x, 0), product(x, 0)))
- self.assertTrue(eq(np.product(filled(xm, 1), axis=0),
+ assert_(eq(np.add.reduce(x), add.reduce(x)))
+ assert_(eq(np.add.accumulate(x), add.accumulate(x)))
+ assert_(eq(4, sum(array(4), axis=0)))
+ assert_(eq(4, sum(array(4), axis=0)))
+ assert_(eq(np.sum(x, axis=0), sum(x, axis=0)))
+ assert_(eq(np.sum(filled(xm, 0), axis=0), sum(xm, axis=0)))
+ assert_(eq(np.sum(x, 0), sum(x, 0)))
+ assert_(eq(np.product(x, axis=0), product(x, axis=0)))
+ assert_(eq(np.product(x, 0), product(x, 0)))
+ assert_(eq(np.product(filled(xm, 1), axis=0),
product(xm, axis=0)))
if len(s) > 1:
- self.assertTrue(eq(np.concatenate((x, y), 1),
+ assert_(eq(np.concatenate((x, y), 1),
concatenate((xm, ym), 1)))
- self.assertTrue(eq(np.add.reduce(x, 1), add.reduce(x, 1)))
- self.assertTrue(eq(np.sum(x, 1), sum(x, 1)))
- self.assertTrue(eq(np.product(x, 1), product(x, 1)))
+ assert_(eq(np.add.reduce(x, 1), add.reduce(x, 1)))
+ assert_(eq(np.sum(x, 1), sum(x, 1)))
+ assert_(eq(np.product(x, 1), product(x, 1)))
def test_testCI(self):
# Test of conversions and indexing
@@ -251,80 +252,105 @@ class TestMa(TestCase):
x2 = np.array([1, 'hello', 2, 3], object)
s1 = x1[1]
s2 = x2[1]
- self.assertEqual(type(s2), str)
- self.assertEqual(type(s1), str)
- self.assertEqual(s1, s2)
+ assert_equal(type(s2), str)
+ assert_equal(type(s1), str)
+ assert_equal(s1, s2)
assert_(x1[1:1].shape == (0,))
def test_testCopySize(self):
# Tests of some subtle points of copying and sizing.
- with suppress_warnings() as sup:
- sup.filter(
- np.ma.core.MaskedArrayFutureWarning,
- "setting an item on a masked array which has a "
- "shared mask will not copy")
-
- n = [0, 0, 1, 0, 0]
- m = make_mask(n)
- m2 = make_mask(m)
- self.assertTrue(m is m2)
- m3 = make_mask(m, copy=1)
- self.assertTrue(m is not m3)
-
- x1 = np.arange(5)
- y1 = array(x1, mask=m)
- self.assertTrue(y1._data is not x1)
- self.assertTrue(allequal(x1, y1._data))
- self.assertTrue(y1.mask is m)
-
- y1a = array(y1, copy=0)
- self.assertTrue(y1a.mask is y1.mask)
-
- y2 = array(x1, mask=m, copy=0)
- self.assertTrue(y2.mask is m)
- self.assertTrue(y2[2] is masked)
- y2[2] = 9
- self.assertTrue(y2[2] is not masked)
- self.assertTrue(y2.mask is not m)
- self.assertTrue(allequal(y2.mask, 0))
-
- y3 = array(x1 * 1.0, mask=m)
- self.assertTrue(filled(y3).dtype is (x1 * 1.0).dtype)
-
- x4 = arange(4)
- x4[2] = masked
- y4 = resize(x4, (8,))
- self.assertTrue(eq(concatenate([x4, x4]), y4))
- self.assertTrue(eq(getmask(y4), [0, 0, 1, 0, 0, 0, 1, 0]))
- y5 = repeat(x4, (2, 2, 2, 2), axis=0)
- self.assertTrue(eq(y5, [0, 0, 1, 1, 2, 2, 3, 3]))
- y6 = repeat(x4, 2, axis=0)
- self.assertTrue(eq(y5, y6))
+ n = [0, 0, 1, 0, 0]
+ m = make_mask(n)
+ m2 = make_mask(m)
+ assert_(m is m2)
+ m3 = make_mask(m, copy=1)
+ assert_(m is not m3)
+
+ x1 = np.arange(5)
+ y1 = array(x1, mask=m)
+ assert_(y1._data is not x1)
+ assert_(allequal(x1, y1._data))
+ assert_(y1.mask is m)
+
+ y1a = array(y1, copy=0)
+ assert_(y1a.mask is y1.mask)
+
+ y2 = array(x1, mask=m3, copy=0)
+ assert_(y2.mask is m3)
+ assert_(y2[2] is masked)
+ y2[2] = 9
+ assert_(y2[2] is not masked)
+ assert_(y2.mask is m3)
+ assert_(allequal(y2.mask, 0))
+
+ y2a = array(x1, mask=m, copy=1)
+ assert_(y2a.mask is not m)
+ assert_(y2a[2] is masked)
+ y2a[2] = 9
+ assert_(y2a[2] is not masked)
+ assert_(y2a.mask is not m)
+ assert_(allequal(y2a.mask, 0))
+
+ y3 = array(x1 * 1.0, mask=m)
+ assert_(filled(y3).dtype is (x1 * 1.0).dtype)
+
+ x4 = arange(4)
+ x4[2] = masked
+ y4 = resize(x4, (8,))
+ assert_(eq(concatenate([x4, x4]), y4))
+ assert_(eq(getmask(y4), [0, 0, 1, 0, 0, 0, 1, 0]))
+ y5 = repeat(x4, (2, 2, 2, 2), axis=0)
+ assert_(eq(y5, [0, 0, 1, 1, 2, 2, 3, 3]))
+ y6 = repeat(x4, 2, axis=0)
+ assert_(eq(y5, y6))
def test_testPut(self):
# Test of put
- with suppress_warnings() as sup:
- sup.filter(
- np.ma.core.MaskedArrayFutureWarning,
- "setting an item on a masked array which has a "
- "shared mask will not copy")
- d = arange(5)
- n = [0, 0, 0, 1, 1]
- m = make_mask(n)
- x = array(d, mask=m)
- self.assertTrue(x[3] is masked)
- self.assertTrue(x[4] is masked)
- x[[1, 4]] = [10, 40]
- self.assertTrue(x.mask is not m)
- self.assertTrue(x[3] is masked)
- self.assertTrue(x[4] is not masked)
- self.assertTrue(eq(x, [0, 10, 2, -1, 40]))
-
- x = array(d, mask=m)
- x.put([0, 1, 2], [-1, 100, 200])
- self.assertTrue(eq(x, [-1, 100, 200, 0, 0]))
- self.assertTrue(x[3] is masked)
- self.assertTrue(x[4] is masked)
+ d = arange(5)
+ n = [0, 0, 0, 1, 1]
+ m = make_mask(n)
+ m2 = m.copy()
+ x = array(d, mask=m)
+ assert_(x[3] is masked)
+ assert_(x[4] is masked)
+ x[[1, 4]] = [10, 40]
+ assert_(x.mask is m)
+ assert_(x[3] is masked)
+ assert_(x[4] is not masked)
+ assert_(eq(x, [0, 10, 2, -1, 40]))
+
+ x = array(d, mask=m2, copy=True)
+ x.put([0, 1, 2], [-1, 100, 200])
+ assert_(x.mask is not m2)
+ assert_(x[3] is masked)
+ assert_(x[4] is masked)
+ assert_(eq(x, [-1, 100, 200, 0, 0]))
+
+ def test_testPut2(self):
+ # Test of put
+ d = arange(5)
+ x = array(d, mask=[0, 0, 0, 0, 0])
+ z = array([10, 40], mask=[1, 0])
+ assert_(x[2] is not masked)
+ assert_(x[3] is not masked)
+ x[2:4] = z
+ assert_(x[2] is masked)
+ assert_(x[3] is not masked)
+ assert_(eq(x, [0, 1, 10, 40, 4]))
+
+ d = arange(5)
+ x = array(d, mask=[0, 0, 0, 0, 0])
+ y = x[2:4]
+ z = array([10, 40], mask=[1, 0])
+ assert_(x[2] is not masked)
+ assert_(x[3] is not masked)
+ y[:] = z
+ assert_(y[0] is masked)
+ assert_(y[1] is not masked)
+ assert_(eq(y, [10, 40]))
+ assert_(x[2] is masked)
+ assert_(x[3] is not masked)
+ assert_(eq(x, [0, 1, 10, 40, 4]))
def test_testMaPut(self):
(x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
@@ -531,147 +557,147 @@ class TestMa(TestCase):
# Test of masked element
xx = arange(6)
xx[1] = masked
- self.assertTrue(str(masked) == '--')
- self.assertTrue(xx[1] is masked)
- self.assertEqual(filled(xx[1], 0), 0)
+ assert_(str(masked) == '--')
+ assert_(xx[1] is masked)
+ assert_equal(filled(xx[1], 0), 0)
def test_testAverage1(self):
# Test of average.
ott = array([0., 1., 2., 3.], mask=[1, 0, 0, 0])
- self.assertTrue(eq(2.0, average(ott, axis=0)))
- self.assertTrue(eq(2.0, average(ott, weights=[1., 1., 2., 1.])))
+ assert_(eq(2.0, average(ott, axis=0)))
+ assert_(eq(2.0, average(ott, weights=[1., 1., 2., 1.])))
result, wts = average(ott, weights=[1., 1., 2., 1.], returned=1)
- self.assertTrue(eq(2.0, result))
- self.assertTrue(wts == 4.0)
+ assert_(eq(2.0, result))
+ assert_(wts == 4.0)
ott[:] = masked
- self.assertTrue(average(ott, axis=0) is masked)
+ assert_(average(ott, axis=0) is masked)
ott = array([0., 1., 2., 3.], mask=[1, 0, 0, 0])
ott = ott.reshape(2, 2)
ott[:, 1] = masked
- self.assertTrue(eq(average(ott, axis=0), [2.0, 0.0]))
- self.assertTrue(average(ott, axis=1)[0] is masked)
- self.assertTrue(eq([2., 0.], average(ott, axis=0)))
+ assert_(eq(average(ott, axis=0), [2.0, 0.0]))
+ assert_(average(ott, axis=1)[0] is masked)
+ assert_(eq([2., 0.], average(ott, axis=0)))
result, wts = average(ott, axis=0, returned=1)
- self.assertTrue(eq(wts, [1., 0.]))
+ assert_(eq(wts, [1., 0.]))
def test_testAverage2(self):
# More tests of average.
w1 = [0, 1, 1, 1, 1, 0]
w2 = [[0, 1, 1, 1, 1, 0], [1, 0, 0, 0, 0, 1]]
x = arange(6)
- self.assertTrue(allclose(average(x, axis=0), 2.5))
- self.assertTrue(allclose(average(x, axis=0, weights=w1), 2.5))
+ assert_(allclose(average(x, axis=0), 2.5))
+ assert_(allclose(average(x, axis=0, weights=w1), 2.5))
y = array([arange(6), 2.0 * arange(6)])
- self.assertTrue(allclose(average(y, None),
+ assert_(allclose(average(y, None),
np.add.reduce(np.arange(6)) * 3. / 12.))
- self.assertTrue(allclose(average(y, axis=0), np.arange(6) * 3. / 2.))
- self.assertTrue(allclose(average(y, axis=1),
+ assert_(allclose(average(y, axis=0), np.arange(6) * 3. / 2.))
+ assert_(allclose(average(y, axis=1),
[average(x, axis=0), average(x, axis=0)*2.0]))
- self.assertTrue(allclose(average(y, None, weights=w2), 20. / 6.))
- self.assertTrue(allclose(average(y, axis=0, weights=w2),
+ assert_(allclose(average(y, None, weights=w2), 20. / 6.))
+ assert_(allclose(average(y, axis=0, weights=w2),
[0., 1., 2., 3., 4., 10.]))
- self.assertTrue(allclose(average(y, axis=1),
+ assert_(allclose(average(y, axis=1),
[average(x, axis=0), average(x, axis=0)*2.0]))
m1 = zeros(6)
m2 = [0, 0, 1, 1, 0, 0]
m3 = [[0, 0, 1, 1, 0, 0], [0, 1, 1, 1, 1, 0]]
m4 = ones(6)
m5 = [0, 1, 1, 1, 1, 1]
- self.assertTrue(allclose(average(masked_array(x, m1), axis=0), 2.5))
- self.assertTrue(allclose(average(masked_array(x, m2), axis=0), 2.5))
- self.assertTrue(average(masked_array(x, m4), axis=0) is masked)
- self.assertEqual(average(masked_array(x, m5), axis=0), 0.0)
- self.assertEqual(count(average(masked_array(x, m4), axis=0)), 0)
+ assert_(allclose(average(masked_array(x, m1), axis=0), 2.5))
+ assert_(allclose(average(masked_array(x, m2), axis=0), 2.5))
+ assert_(average(masked_array(x, m4), axis=0) is masked)
+ assert_equal(average(masked_array(x, m5), axis=0), 0.0)
+ assert_equal(count(average(masked_array(x, m4), axis=0)), 0)
z = masked_array(y, m3)
- self.assertTrue(allclose(average(z, None), 20. / 6.))
- self.assertTrue(allclose(average(z, axis=0),
+ assert_(allclose(average(z, None), 20. / 6.))
+ assert_(allclose(average(z, axis=0),
[0., 1., 99., 99., 4.0, 7.5]))
- self.assertTrue(allclose(average(z, axis=1), [2.5, 5.0]))
- self.assertTrue(allclose(average(z, axis=0, weights=w2),
+ assert_(allclose(average(z, axis=1), [2.5, 5.0]))
+ assert_(allclose(average(z, axis=0, weights=w2),
[0., 1., 99., 99., 4.0, 10.0]))
a = arange(6)
b = arange(6) * 3
r1, w1 = average([[a, b], [b, a]], axis=1, returned=1)
- self.assertEqual(shape(r1), shape(w1))
- self.assertEqual(r1.shape, w1.shape)
+ assert_equal(shape(r1), shape(w1))
+ assert_equal(r1.shape, w1.shape)
r2, w2 = average(ones((2, 2, 3)), axis=0, weights=[3, 1], returned=1)
- self.assertEqual(shape(w2), shape(r2))
+ assert_equal(shape(w2), shape(r2))
r2, w2 = average(ones((2, 2, 3)), returned=1)
- self.assertEqual(shape(w2), shape(r2))
+ assert_equal(shape(w2), shape(r2))
r2, w2 = average(ones((2, 2, 3)), weights=ones((2, 2, 3)), returned=1)
- self.assertTrue(shape(w2) == shape(r2))
+ assert_(shape(w2) == shape(r2))
a2d = array([[1, 2], [0, 4]], float)
a2dm = masked_array(a2d, [[0, 0], [1, 0]])
a2da = average(a2d, axis=0)
- self.assertTrue(eq(a2da, [0.5, 3.0]))
+ assert_(eq(a2da, [0.5, 3.0]))
a2dma = average(a2dm, axis=0)
- self.assertTrue(eq(a2dma, [1.0, 3.0]))
+ assert_(eq(a2dma, [1.0, 3.0]))
a2dma = average(a2dm, axis=None)
- self.assertTrue(eq(a2dma, 7. / 3.))
+ assert_(eq(a2dma, 7. / 3.))
a2dma = average(a2dm, axis=1)
- self.assertTrue(eq(a2dma, [1.5, 4.0]))
+ assert_(eq(a2dma, [1.5, 4.0]))
def test_testToPython(self):
- self.assertEqual(1, int(array(1)))
- self.assertEqual(1.0, float(array(1)))
- self.assertEqual(1, int(array([[[1]]])))
- self.assertEqual(1.0, float(array([[1]])))
- self.assertRaises(TypeError, float, array([1, 1]))
- self.assertRaises(ValueError, bool, array([0, 1]))
- self.assertRaises(ValueError, bool, array([0, 0], mask=[0, 1]))
+ assert_equal(1, int(array(1)))
+ assert_equal(1.0, float(array(1)))
+ assert_equal(1, int(array([[[1]]])))
+ assert_equal(1.0, float(array([[1]])))
+ assert_raises(TypeError, float, array([1, 1]))
+ assert_raises(ValueError, bool, array([0, 1]))
+ assert_raises(ValueError, bool, array([0, 0], mask=[0, 1]))
def test_testScalarArithmetic(self):
xm = array(0, mask=1)
#TODO FIXME: Find out what the following raises a warning in r8247
with np.errstate(divide='ignore'):
- self.assertTrue((1 / array(0)).mask)
- self.assertTrue((1 + xm).mask)
- self.assertTrue((-xm).mask)
- self.assertTrue((-xm).mask)
- self.assertTrue(maximum(xm, xm).mask)
- self.assertTrue(minimum(xm, xm).mask)
- self.assertTrue(xm.filled().dtype is xm._data.dtype)
+ assert_((1 / array(0)).mask)
+ assert_((1 + xm).mask)
+ assert_((-xm).mask)
+ assert_((-xm).mask)
+ assert_(maximum(xm, xm).mask)
+ assert_(minimum(xm, xm).mask)
+ assert_(xm.filled().dtype is xm._data.dtype)
x = array(0, mask=0)
- self.assertTrue(x.filled() == x._data)
- self.assertEqual(str(xm), str(masked_print_option))
+ assert_(x.filled() == x._data)
+ assert_equal(str(xm), str(masked_print_option))
def test_testArrayMethods(self):
a = array([1, 3, 2])
- self.assertTrue(eq(a.any(), a._data.any()))
- self.assertTrue(eq(a.all(), a._data.all()))
- self.assertTrue(eq(a.argmax(), a._data.argmax()))
- self.assertTrue(eq(a.argmin(), a._data.argmin()))
- self.assertTrue(eq(a.choose(0, 1, 2, 3, 4),
+ assert_(eq(a.any(), a._data.any()))
+ assert_(eq(a.all(), a._data.all()))
+ assert_(eq(a.argmax(), a._data.argmax()))
+ assert_(eq(a.argmin(), a._data.argmin()))
+ assert_(eq(a.choose(0, 1, 2, 3, 4),
a._data.choose(0, 1, 2, 3, 4)))
- self.assertTrue(eq(a.compress([1, 0, 1]), a._data.compress([1, 0, 1])))
- self.assertTrue(eq(a.conj(), a._data.conj()))
- self.assertTrue(eq(a.conjugate(), a._data.conjugate()))
+ assert_(eq(a.compress([1, 0, 1]), a._data.compress([1, 0, 1])))
+ assert_(eq(a.conj(), a._data.conj()))
+ assert_(eq(a.conjugate(), a._data.conjugate()))
m = array([[1, 2], [3, 4]])
- self.assertTrue(eq(m.diagonal(), m._data.diagonal()))
- self.assertTrue(eq(a.sum(), a._data.sum()))
- self.assertTrue(eq(a.take([1, 2]), a._data.take([1, 2])))
- self.assertTrue(eq(m.transpose(), m._data.transpose()))
+ assert_(eq(m.diagonal(), m._data.diagonal()))
+ assert_(eq(a.sum(), a._data.sum()))
+ assert_(eq(a.take([1, 2]), a._data.take([1, 2])))
+ assert_(eq(m.transpose(), m._data.transpose()))
def test_testArrayAttributes(self):
a = array([1, 3, 2])
- self.assertEqual(a.ndim, 1)
+ assert_equal(a.ndim, 1)
def test_testAPI(self):
- self.assertFalse([m for m in dir(np.ndarray)
- if m not in dir(MaskedArray) and
- not m.startswith('_')])
+ assert_(not [m for m in dir(np.ndarray)
+ if m not in dir(MaskedArray) and
+ not m.startswith('_')])
def test_testSingleElementSubscript(self):
a = array([1, 3, 2])
b = array([1, 3, 2], mask=[1, 0, 1])
- self.assertEqual(a[0].shape, ())
- self.assertEqual(b[0].shape, ())
- self.assertEqual(b[1].shape, ())
+ assert_equal(a[0].shape, ())
+ assert_equal(b[0].shape, ())
+ assert_equal(b[1].shape, ())
-class TestUfuncs(TestCase):
- def setUp(self):
+class TestUfuncs(object):
+ def setup(self):
self.d = (array([1.0, 0, -1, pi / 2] * 2, mask=[0, 1] + [0] * 6),
array([1.0, 0, -1, pi / 2] * 2, mask=[1, 0] + [0] * 6),)
@@ -709,35 +735,35 @@ class TestUfuncs(TestCase):
np.seterr(divide='ignore')
ur = uf(*args)
mr = mf(*args)
- self.assertTrue(eq(ur.filled(0), mr.filled(0), f))
- self.assertTrue(eqmask(ur.mask, mr.mask))
+ assert_(eq(ur.filled(0), mr.filled(0), f))
+ assert_(eqmask(ur.mask, mr.mask))
def test_reduce(self):
a = self.d[0]
- self.assertFalse(alltrue(a, axis=0))
- self.assertTrue(sometrue(a, axis=0))
- self.assertEqual(sum(a[:3], axis=0), 0)
- self.assertEqual(product(a, axis=0), 0)
+ assert_(not alltrue(a, axis=0))
+ assert_(sometrue(a, axis=0))
+ assert_equal(sum(a[:3], axis=0), 0)
+ assert_equal(product(a, axis=0), 0)
def test_minmax(self):
a = arange(1, 13).reshape(3, 4)
amask = masked_where(a < 5, a)
- self.assertEqual(amask.max(), a.max())
- self.assertEqual(amask.min(), 5)
- self.assertTrue((amask.max(0) == a.max(0)).all())
- self.assertTrue((amask.min(0) == [5, 6, 7, 8]).all())
- self.assertTrue(amask.max(1)[0].mask)
- self.assertTrue(amask.min(1)[0].mask)
+ assert_equal(amask.max(), a.max())
+ assert_equal(amask.min(), 5)
+ assert_((amask.max(0) == a.max(0)).all())
+ assert_((amask.min(0) == [5, 6, 7, 8]).all())
+ assert_(amask.max(1)[0].mask)
+ assert_(amask.min(1)[0].mask)
def test_nonzero(self):
for t in "?bhilqpBHILQPfdgFDGO":
x = array([1, 0, 2, 0], mask=[0, 0, 1, 1])
- self.assertTrue(eq(nonzero(x), [0]))
+ assert_(eq(nonzero(x), [0]))
-class TestArrayMethods(TestCase):
+class TestArrayMethods(object):
- def setUp(self):
+ def setup(self):
x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928,
8.43, 7.78, 9.865, 5.878, 8.979, 4.732,
3.012, 6.022, 5.095, 3.116, 5.238, 3.957,
@@ -762,63 +788,63 @@ class TestArrayMethods(TestCase):
def test_trace(self):
(x, X, XX, m, mx, mX, mXX,) = self.d
mXdiag = mX.diagonal()
- self.assertEqual(mX.trace(), mX.diagonal().compressed().sum())
- self.assertTrue(eq(mX.trace(),
+ assert_equal(mX.trace(), mX.diagonal().compressed().sum())
+ assert_(eq(mX.trace(),
X.trace() - sum(mXdiag.mask * X.diagonal(),
axis=0)))
def test_clip(self):
(x, X, XX, m, mx, mX, mXX,) = self.d
clipped = mx.clip(2, 8)
- self.assertTrue(eq(clipped.mask, mx.mask))
- self.assertTrue(eq(clipped._data, x.clip(2, 8)))
- self.assertTrue(eq(clipped._data, mx._data.clip(2, 8)))
+ assert_(eq(clipped.mask, mx.mask))
+ assert_(eq(clipped._data, x.clip(2, 8)))
+ assert_(eq(clipped._data, mx._data.clip(2, 8)))
def test_ptp(self):
(x, X, XX, m, mx, mX, mXX,) = self.d
(n, m) = X.shape
- self.assertEqual(mx.ptp(), mx.compressed().ptp())
+ assert_equal(mx.ptp(), mx.compressed().ptp())
rows = np.zeros(n, np.float_)
cols = np.zeros(m, np.float_)
for k in range(m):
cols[k] = mX[:, k].compressed().ptp()
for k in range(n):
rows[k] = mX[k].compressed().ptp()
- self.assertTrue(eq(mX.ptp(0), cols))
- self.assertTrue(eq(mX.ptp(1), rows))
+ assert_(eq(mX.ptp(0), cols))
+ assert_(eq(mX.ptp(1), rows))
def test_swapaxes(self):
(x, X, XX, m, mx, mX, mXX,) = self.d
mXswapped = mX.swapaxes(0, 1)
- self.assertTrue(eq(mXswapped[-1], mX[:, -1]))
+ assert_(eq(mXswapped[-1], mX[:, -1]))
mXXswapped = mXX.swapaxes(0, 2)
- self.assertEqual(mXXswapped.shape, (2, 2, 3, 3))
+ assert_equal(mXXswapped.shape, (2, 2, 3, 3))
def test_cumprod(self):
(x, X, XX, m, mx, mX, mXX,) = self.d
mXcp = mX.cumprod(0)
- self.assertTrue(eq(mXcp._data, mX.filled(1).cumprod(0)))
+ assert_(eq(mXcp._data, mX.filled(1).cumprod(0)))
mXcp = mX.cumprod(1)
- self.assertTrue(eq(mXcp._data, mX.filled(1).cumprod(1)))
+ assert_(eq(mXcp._data, mX.filled(1).cumprod(1)))
def test_cumsum(self):
(x, X, XX, m, mx, mX, mXX,) = self.d
mXcp = mX.cumsum(0)
- self.assertTrue(eq(mXcp._data, mX.filled(0).cumsum(0)))
+ assert_(eq(mXcp._data, mX.filled(0).cumsum(0)))
mXcp = mX.cumsum(1)
- self.assertTrue(eq(mXcp._data, mX.filled(0).cumsum(1)))
+ assert_(eq(mXcp._data, mX.filled(0).cumsum(1)))
def test_varstd(self):
(x, X, XX, m, mx, mX, mXX,) = self.d
- self.assertTrue(eq(mX.var(axis=None), mX.compressed().var()))
- self.assertTrue(eq(mX.std(axis=None), mX.compressed().std()))
- self.assertTrue(eq(mXX.var(axis=3).shape, XX.var(axis=3).shape))
- self.assertTrue(eq(mX.var().shape, X.var().shape))
+ assert_(eq(mX.var(axis=None), mX.compressed().var()))
+ assert_(eq(mX.std(axis=None), mX.compressed().std()))
+ assert_(eq(mXX.var(axis=3).shape, XX.var(axis=3).shape))
+ assert_(eq(mX.var().shape, X.var().shape))
(mXvar0, mXvar1) = (mX.var(axis=0), mX.var(axis=1))
for k in range(6):
- self.assertTrue(eq(mXvar1[k], mX[k].compressed().var()))
- self.assertTrue(eq(mXvar0[k], mX[:, k].compressed().var()))
- self.assertTrue(eq(np.sqrt(mXvar0[k]),
+ assert_(eq(mXvar1[k], mX[k].compressed().var()))
+ assert_(eq(mXvar0[k], mX[:, k].compressed().var()))
+ assert_(eq(np.sqrt(mXvar0[k]),
mX[:, k].compressed().std()))
diff --git a/numpy/ma/tests/test_regression.py b/numpy/ma/tests/test_regression.py
index d1fb2bb2b..925b21a14 100644
--- a/numpy/ma/tests/test_regression.py
+++ b/numpy/ma/tests/test_regression.py
@@ -3,25 +3,24 @@ from __future__ import division, absolute_import, print_function
import warnings
import numpy as np
-from numpy.testing import (assert_, TestCase, assert_array_equal,
- assert_allclose, run_module_suite,
- suppress_warnings)
+from numpy.testing import (
+ assert_, assert_array_equal, assert_allclose, run_module_suite,
+ suppress_warnings
+ )
-rlevel = 1
-
-class TestRegression(TestCase):
- def test_masked_array_create(self,level=rlevel):
+class TestRegression(object):
+ def test_masked_array_create(self):
# Ticket #17
x = np.ma.masked_array([0, 1, 2, 3, 0, 4, 5, 6],
mask=[0, 0, 0, 1, 1, 1, 0, 0])
assert_array_equal(np.ma.nonzero(x), [[1, 2, 6, 7]])
- def test_masked_array(self,level=rlevel):
+ def test_masked_array(self):
# Ticket #61
np.ma.array(1, mask=[1])
- def test_mem_masked_where(self,level=rlevel):
+ def test_mem_masked_where(self):
# Ticket #62
from numpy.ma import masked_where, MaskType
a = np.zeros((1, 1))
@@ -29,7 +28,7 @@ class TestRegression(TestCase):
c = masked_where(b, a)
a-c
- def test_masked_array_multiply(self,level=rlevel):
+ def test_masked_array_multiply(self):
# Ticket #254
a = np.ma.zeros((4, 1))
a[2, 0] = np.ma.masked
@@ -37,7 +36,7 @@ class TestRegression(TestCase):
a*b
b*a
- def test_masked_array_repeat(self, level=rlevel):
+ def test_masked_array_repeat(self):
# Ticket #271
np.ma.array([1], mask=False).repeat(10)
diff --git a/numpy/ma/tests/test_subclassing.py b/numpy/ma/tests/test_subclassing.py
index b2995fd57..e59dd4656 100644
--- a/numpy/ma/tests/test_subclassing.py
+++ b/numpy/ma/tests/test_subclassing.py
@@ -9,7 +9,7 @@
from __future__ import division, absolute_import, print_function
import numpy as np
-from numpy.testing import TestCase, run_module_suite, assert_raises, dec
+from numpy.testing import run_module_suite, assert_, assert_raises, dec
from numpy.ma.testutils import assert_equal
from numpy.ma.core import (
array, arange, masked, MaskedArray, masked_array, log, add, hypot,
@@ -172,10 +172,10 @@ class ComplicatedSubArray(SubArray):
return obj
-class TestSubclassing(TestCase):
+class TestSubclassing(object):
# Test suite for masked subclasses of ndarray.
- def setUp(self):
+ def setup(self):
x = np.arange(5, dtype='float')
mx = mmatrix(x, mask=[0, 1, 0, 0, 0])
self.data = (x, mx)
@@ -186,41 +186,41 @@ class TestSubclassing(TestCase):
m = [0, 0, 1, 0, 0]
xsub = SubArray(x)
xmsub = masked_array(xsub, mask=m)
- self.assertTrue(isinstance(xmsub, MaskedArray))
+ assert_(isinstance(xmsub, MaskedArray))
assert_equal(xmsub._data, xsub)
- self.assertTrue(isinstance(xmsub._data, SubArray))
+ assert_(isinstance(xmsub._data, SubArray))
def test_maskedarray_subclassing(self):
# Tests subclassing MaskedArray
(x, mx) = self.data
- self.assertTrue(isinstance(mx._data, np.matrix))
+ assert_(isinstance(mx._data, np.matrix))
def test_masked_unary_operations(self):
# Tests masked_unary_operation
(x, mx) = self.data
with np.errstate(divide='ignore'):
- self.assertTrue(isinstance(log(mx), mmatrix))
+ assert_(isinstance(log(mx), mmatrix))
assert_equal(log(x), np.log(x))
def test_masked_binary_operations(self):
# Tests masked_binary_operation
(x, mx) = self.data
# Result should be a mmatrix
- self.assertTrue(isinstance(add(mx, mx), mmatrix))
- self.assertTrue(isinstance(add(mx, x), mmatrix))
+ assert_(isinstance(add(mx, mx), mmatrix))
+ assert_(isinstance(add(mx, x), mmatrix))
# Result should work
assert_equal(add(mx, x), mx+x)
- self.assertTrue(isinstance(add(mx, mx)._data, np.matrix))
- self.assertTrue(isinstance(add.outer(mx, mx), mmatrix))
- self.assertTrue(isinstance(hypot(mx, mx), mmatrix))
- self.assertTrue(isinstance(hypot(mx, x), mmatrix))
+ assert_(isinstance(add(mx, mx)._data, np.matrix))
+ assert_(isinstance(add.outer(mx, mx), mmatrix))
+ assert_(isinstance(hypot(mx, mx), mmatrix))
+ assert_(isinstance(hypot(mx, x), mmatrix))
def test_masked_binary_operations2(self):
# Tests domained_masked_binary_operation
(x, mx) = self.data
xmx = masked_array(mx.data.__array__(), mask=mx.mask)
- self.assertTrue(isinstance(divide(mx, mx), mmatrix))
- self.assertTrue(isinstance(divide(mx, x), mmatrix))
+ assert_(isinstance(divide(mx, mx), mmatrix))
+ assert_(isinstance(divide(mx, x), mmatrix))
assert_equal(divide(mx, mx), divide(xmx, xmx))
def test_attributepropagation(self):
@@ -229,22 +229,22 @@ class TestSubclassing(TestCase):
ym = msubarray(x)
#
z = (my+1)
- self.assertTrue(isinstance(z, MaskedArray))
- self.assertTrue(not isinstance(z, MSubArray))
- self.assertTrue(isinstance(z._data, SubArray))
+ assert_(isinstance(z, MaskedArray))
+ assert_(not isinstance(z, MSubArray))
+ assert_(isinstance(z._data, SubArray))
assert_equal(z._data.info, {})
#
z = (ym+1)
- self.assertTrue(isinstance(z, MaskedArray))
- self.assertTrue(isinstance(z, MSubArray))
- self.assertTrue(isinstance(z._data, SubArray))
- self.assertTrue(z._data.info['added'] > 0)
+ assert_(isinstance(z, MaskedArray))
+ assert_(isinstance(z, MSubArray))
+ assert_(isinstance(z._data, SubArray))
+ assert_(z._data.info['added'] > 0)
# Test that inplace methods from data get used (gh-4617)
ym += 1
- self.assertTrue(isinstance(ym, MaskedArray))
- self.assertTrue(isinstance(ym, MSubArray))
- self.assertTrue(isinstance(ym._data, SubArray))
- self.assertTrue(ym._data.info['iadded'] > 0)
+ assert_(isinstance(ym, MaskedArray))
+ assert_(isinstance(ym, MSubArray))
+ assert_(isinstance(ym._data, SubArray))
+ assert_(ym._data.info['iadded'] > 0)
#
ym._set_mask([1, 0, 0, 0, 1])
assert_equal(ym._mask, [1, 0, 0, 0, 1])
@@ -253,7 +253,7 @@ class TestSubclassing(TestCase):
#
xsub = subarray(x, info={'name':'x'})
mxsub = masked_array(xsub)
- self.assertTrue(hasattr(mxsub, 'info'))
+ assert_(hasattr(mxsub, 'info'))
assert_equal(mxsub.info, xsub.info)
def test_subclasspreservation(self):
@@ -264,22 +264,22 @@ class TestSubclassing(TestCase):
xsub = MSubArray(x, mask=m, info={'xsub':xinfo})
#
mxsub = masked_array(xsub, subok=False)
- self.assertTrue(not isinstance(mxsub, MSubArray))
- self.assertTrue(isinstance(mxsub, MaskedArray))
+ assert_(not isinstance(mxsub, MSubArray))
+ assert_(isinstance(mxsub, MaskedArray))
assert_equal(mxsub._mask, m)
#
mxsub = asarray(xsub)
- self.assertTrue(not isinstance(mxsub, MSubArray))
- self.assertTrue(isinstance(mxsub, MaskedArray))
+ assert_(not isinstance(mxsub, MSubArray))
+ assert_(isinstance(mxsub, MaskedArray))
assert_equal(mxsub._mask, m)
#
mxsub = masked_array(xsub, subok=True)
- self.assertTrue(isinstance(mxsub, MSubArray))
+ assert_(isinstance(mxsub, MSubArray))
assert_equal(mxsub.info, xsub.info)
assert_equal(mxsub._mask, xsub._mask)
#
mxsub = asanyarray(xsub)
- self.assertTrue(isinstance(mxsub, MSubArray))
+ assert_(isinstance(mxsub, MSubArray))
assert_equal(mxsub.info, xsub.info)
assert_equal(mxsub._mask, m)
@@ -290,21 +290,21 @@ class TestSubclassing(TestCase):
mxcsub = masked_array(xcsub, mask=[True, False, True, False, False])
# getter should return a ComplicatedSubArray, even for single item
# first check we wrote ComplicatedSubArray correctly
- self.assertTrue(isinstance(xcsub[1], ComplicatedSubArray))
- self.assertTrue(isinstance(xcsub[1,...], ComplicatedSubArray))
- self.assertTrue(isinstance(xcsub[1:4], ComplicatedSubArray))
+ assert_(isinstance(xcsub[1], ComplicatedSubArray))
+ assert_(isinstance(xcsub[1,...], ComplicatedSubArray))
+ assert_(isinstance(xcsub[1:4], ComplicatedSubArray))
# now that it propagates inside the MaskedArray
- self.assertTrue(isinstance(mxcsub[1], ComplicatedSubArray))
- self.assertTrue(isinstance(mxcsub[1,...].data, ComplicatedSubArray))
- self.assertTrue(mxcsub[0] is masked)
- self.assertTrue(isinstance(mxcsub[0,...].data, ComplicatedSubArray))
- self.assertTrue(isinstance(mxcsub[1:4].data, ComplicatedSubArray))
+ assert_(isinstance(mxcsub[1], ComplicatedSubArray))
+ assert_(isinstance(mxcsub[1,...].data, ComplicatedSubArray))
+ assert_(mxcsub[0] is masked)
+ assert_(isinstance(mxcsub[0,...].data, ComplicatedSubArray))
+ assert_(isinstance(mxcsub[1:4].data, ComplicatedSubArray))
# also for flattened version (which goes via MaskedIterator)
- self.assertTrue(isinstance(mxcsub.flat[1].data, ComplicatedSubArray))
- self.assertTrue(mxcsub.flat[0] is masked)
- self.assertTrue(isinstance(mxcsub.flat[1:4].base, ComplicatedSubArray))
+ assert_(isinstance(mxcsub.flat[1].data, ComplicatedSubArray))
+ assert_(mxcsub.flat[0] is masked)
+ assert_(isinstance(mxcsub.flat[1:4].base, ComplicatedSubArray))
# setter should only work with ComplicatedSubArray input
# first check we wrote ComplicatedSubArray correctly
@@ -325,21 +325,21 @@ class TestSubclassing(TestCase):
xcsub = ComplicatedSubArray(x)
mxcsub_nomask = masked_array(xcsub)
- self.assertTrue(isinstance(mxcsub_nomask[1,...].data, ComplicatedSubArray))
- self.assertTrue(isinstance(mxcsub_nomask[0,...].data, ComplicatedSubArray))
+ assert_(isinstance(mxcsub_nomask[1,...].data, ComplicatedSubArray))
+ assert_(isinstance(mxcsub_nomask[0,...].data, ComplicatedSubArray))
- self.assertTrue(isinstance(mxcsub_nomask[1], ComplicatedSubArray))
- self.assertTrue(isinstance(mxcsub_nomask[0], ComplicatedSubArray))
+ assert_(isinstance(mxcsub_nomask[1], ComplicatedSubArray))
+ assert_(isinstance(mxcsub_nomask[0], ComplicatedSubArray))
def test_subclass_repr(self):
"""test that repr uses the name of the subclass
and 'array' for np.ndarray"""
x = np.arange(5)
mx = masked_array(x, mask=[True, False, True, False, False])
- self.assertTrue(repr(mx).startswith('masked_array'))
+ assert_(repr(mx).startswith('masked_array'))
xsub = SubArray(x)
mxsub = masked_array(xsub, mask=[True, False, True, False, False])
- self.assertTrue(repr(mxsub).startswith(
+ assert_(repr(mxsub).startswith(
'masked_{0}(data = [-- 1 -- 3 4]'.format(SubArray.__name__)))
def test_subclass_str(self):
@@ -348,13 +348,13 @@ class TestSubclassing(TestCase):
x = np.arange(5)
xsub = SubArray(x)
mxsub = masked_array(xsub, mask=[True, False, True, False, False])
- self.assertTrue(str(mxsub) == '[-- 1 -- 3 4]')
+ assert_(str(mxsub) == '[-- 1 -- 3 4]')
xcsub = ComplicatedSubArray(x)
assert_raises(ValueError, xcsub.__setitem__, 0,
np.ma.core.masked_print_option)
mxcsub = masked_array(xcsub, mask=[True, False, True, False, False])
- self.assertTrue(str(mxcsub) == 'myprefix [-- 1 -- 3 4] mypostfix')
+ assert_(str(mxcsub) == 'myprefix [-- 1 -- 3 4] mypostfix')
def test_pure_subclass_info_preservation(self):
# Test that ufuncs and methods conserve extra information consistently;
@@ -362,11 +362,11 @@ class TestSubclassing(TestCase):
arr1 = SubMaskedArray('test', data=[1,2,3,4,5,6])
arr2 = SubMaskedArray(data=[0,1,2,3,4,5])
diff1 = np.subtract(arr1, arr2)
- self.assertTrue('info' in diff1._optinfo)
- self.assertTrue(diff1._optinfo['info'] == 'test')
+ assert_('info' in diff1._optinfo)
+ assert_(diff1._optinfo['info'] == 'test')
diff2 = arr1 - arr2
- self.assertTrue('info' in diff2._optinfo)
- self.assertTrue(diff2._optinfo['info'] == 'test')
+ assert_('info' in diff2._optinfo)
+ assert_(diff2._optinfo['info'] == 'test')
###############################################################################
diff --git a/numpy/ma/testutils.py b/numpy/ma/testutils.py
index c19066d71..a95c170c8 100644
--- a/numpy/ma/testutils.py
+++ b/numpy/ma/testutils.py
@@ -12,11 +12,11 @@ import operator
import numpy as np
from numpy import ndarray, float_
import numpy.core.umath as umath
+import numpy.testing
from numpy.testing import (
TestCase, assert_, assert_allclose, assert_array_almost_equal_nulp,
assert_raises, build_err_msg, run_module_suite
)
-import numpy.testing.utils as utils
from .core import mask_or, getmask, masked_array, nomask, masked, filled
__all__masked = [
@@ -211,11 +211,11 @@ def assert_array_compare(comparison, x, y, err_msg='', verbose=True, header='',
header=header, names=('x', 'y'))
raise ValueError(msg)
# OK, now run the basic tests on filled versions
- return utils.assert_array_compare(comparison,
- x.filled(fill_value),
- y.filled(fill_value),
- err_msg=err_msg,
- verbose=verbose, header=header)
+ return np.testing.assert_array_compare(comparison,
+ x.filled(fill_value),
+ y.filled(fill_value),
+ err_msg=err_msg,
+ verbose=verbose, header=header)
def assert_array_equal(x, y, err_msg='', verbose=True):
diff --git a/numpy/ma/timer_comparison.py b/numpy/ma/timer_comparison.py
index dae4b141b..68104ed0a 100644
--- a/numpy/ma/timer_comparison.py
+++ b/numpy/ma/timer_comparison.py
@@ -7,7 +7,7 @@ import numpy as np
from numpy import float_
import numpy.core.fromnumeric as fromnumeric
-from numpy.testing.utils import build_err_msg
+from numpy.testing import build_err_msg
# Fixme: this does not look right.
np.seterr(all='ignore')
diff --git a/numpy/matrixlib/__init__.py b/numpy/matrixlib/__init__.py
index b2b76837a..11dce2928 100644
--- a/numpy/matrixlib/__init__.py
+++ b/numpy/matrixlib/__init__.py
@@ -7,6 +7,6 @@ from .defmatrix import *
__all__ = defmatrix.__all__
-from numpy.testing.nosetester import _numpy_tester
+from numpy.testing import _numpy_tester
test = _numpy_tester().test
bench = _numpy_tester().bench
diff --git a/numpy/matrixlib/defmatrix.py b/numpy/matrixlib/defmatrix.py
index f212a8c5e..e016b5f4c 100644
--- a/numpy/matrixlib/defmatrix.py
+++ b/numpy/matrixlib/defmatrix.py
@@ -137,7 +137,7 @@ def matrix_power(M, n):
M = asanyarray(M)
if M.ndim != 2 or M.shape[0] != M.shape[1]:
raise ValueError("input must be a square array")
- if not issubdtype(type(n), int):
+ if not issubdtype(type(n), N.integer):
raise TypeError("exponent must be an integer")
from numpy.linalg import inv
@@ -295,7 +295,7 @@ class matrix(N.ndarray):
# Determine when we should have a column array
try:
n = len(index)
- except:
+ except Exception:
n = 0
if n > 1 and isscalar(index[1]):
out.shape = (sh, 1)
@@ -1155,7 +1155,7 @@ def bmat(obj, ldict=None, gdict=None):
--------
block :
A generalization of this function for N-d arrays, that returns normal
- `ndarray`s.
+ ndarrays.
Examples
--------
diff --git a/numpy/matrixlib/tests/__init__.py b/numpy/matrixlib/tests/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/numpy/matrixlib/tests/__init__.py
diff --git a/numpy/matrixlib/tests/test_defmatrix.py b/numpy/matrixlib/tests/test_defmatrix.py
index fd36d7770..77f262031 100644
--- a/numpy/matrixlib/tests/test_defmatrix.py
+++ b/numpy/matrixlib/tests/test_defmatrix.py
@@ -5,13 +5,13 @@ import collections
import numpy as np
from numpy import matrix, asmatrix, bmat
from numpy.testing import (
- TestCase, run_module_suite, assert_, assert_equal, assert_almost_equal,
+ run_module_suite, assert_, assert_equal, assert_almost_equal,
assert_array_equal, assert_array_almost_equal, assert_raises
)
from numpy.matrixlib.defmatrix import matrix_power
from numpy.matrixlib import mat
-class TestCtor(TestCase):
+class TestCtor(object):
def test_basic(self):
A = np.array([[1, 2], [3, 4]])
mA = matrix(A)
@@ -58,7 +58,7 @@ class TestCtor(TestCase):
assert_(np.all(b2 == mixresult))
-class TestProperties(TestCase):
+class TestProperties(object):
def test_sum(self):
"""Test whether matrix.sum(axis=1) preserves orientation.
Fails in NumPy <= 0.9.6.2127.
@@ -191,7 +191,7 @@ class TestProperties(TestCase):
B = matrix([[True], [True], [False]])
assert_array_equal(A, B)
-class TestCasting(TestCase):
+class TestCasting(object):
def test_basic(self):
A = np.arange(100).reshape(10, 10)
mA = matrix(A)
@@ -210,7 +210,7 @@ class TestCasting(TestCase):
assert_(np.all(mA != mB))
-class TestAlgebra(TestCase):
+class TestAlgebra(object):
def test_basic(self):
import numpy.linalg as linalg
@@ -249,6 +249,12 @@ class TestAlgebra(TestCase):
assert_array_almost_equal(m4, np.dot(m2, m2))
assert_array_almost_equal(np.dot(mi, m), np.eye(2))
+ def test_scalar_type_pow(self):
+ m = matrix([[1, 2], [3, 4]])
+ for scalar_t in [np.int8, np.uint8]:
+ two = scalar_t(2)
+ assert_array_almost_equal(m ** 2, m ** two)
+
def test_notimplemented(self):
'''Check that 'not implemented' operations produce a failure.'''
A = matrix([[1., 2.],
@@ -271,7 +277,7 @@ class TestAlgebra(TestCase):
self.fail("matrix.__mul__ with non-numeric object doesn't raise"
"a TypeError")
-class TestMatrixReturn(TestCase):
+class TestMatrixReturn(object):
def test_instance_methods(self):
a = matrix([1.0], dtype='f8')
methodargs = {
@@ -313,7 +319,7 @@ class TestMatrixReturn(TestCase):
assert_(type(d) is np.ndarray)
-class TestIndexing(TestCase):
+class TestIndexing(object):
def test_basic(self):
x = asmatrix(np.zeros((3, 2), float))
y = np.zeros((3, 1), float)
@@ -322,9 +328,8 @@ class TestIndexing(TestCase):
assert_equal(x, [[0, 1], [0, 0], [0, 0]])
-class TestNewScalarIndexing(TestCase):
- def setUp(self):
- self.a = matrix([[1, 2], [3, 4]])
+class TestNewScalarIndexing(object):
+ a = matrix([[1, 2], [3, 4]])
def test_dimesions(self):
a = self.a
@@ -390,7 +395,7 @@ class TestNewScalarIndexing(TestCase):
assert_array_equal(x[[2, 1, 0],:], x[::-1,:])
-class TestPower(TestCase):
+class TestPower(object):
def test_returntype(self):
a = np.array([[0, 1], [0, 0]])
assert_(type(matrix_power(a, 2)) is np.ndarray)
@@ -401,10 +406,10 @@ class TestPower(TestCase):
assert_array_equal(matrix_power([[0, 1], [0, 0]], 2), [[0, 0], [0, 0]])
-class TestShape(TestCase):
- def setUp(self):
- self.a = np.array([[1], [2]])
- self.m = matrix([[1], [2]])
+class TestShape(object):
+
+ a = np.array([[1], [2]])
+ m = matrix([[1], [2]])
def test_shape(self):
assert_equal(self.a.shape, (2, 1))
diff --git a/numpy/matrixlib/tests/test_multiarray.py b/numpy/matrixlib/tests/test_multiarray.py
index d27e24ec9..bf891a196 100644
--- a/numpy/matrixlib/tests/test_multiarray.py
+++ b/numpy/matrixlib/tests/test_multiarray.py
@@ -2,10 +2,10 @@ from __future__ import division, absolute_import, print_function
import numpy as np
from numpy.testing import (
- TestCase, run_module_suite, assert_, assert_equal, assert_array_equal
+ run_module_suite, assert_, assert_equal, assert_array_equal
)
-class TestView(TestCase):
+class TestView(object):
def test_type(self):
x = np.array([1, 2, 3])
assert_(isinstance(x.view(np.matrix), np.matrix))
diff --git a/numpy/matrixlib/tests/test_numeric.py b/numpy/matrixlib/tests/test_numeric.py
index 28329da39..b826b8e81 100644
--- a/numpy/matrixlib/tests/test_numeric.py
+++ b/numpy/matrixlib/tests/test_numeric.py
@@ -1,9 +1,9 @@
from __future__ import division, absolute_import, print_function
import numpy as np
-from numpy.testing import assert_equal, TestCase, run_module_suite
+from numpy.testing import assert_equal, run_module_suite
-class TestDot(TestCase):
+class TestDot(object):
def test_matscalar(self):
b1 = np.matrix(np.ones((3, 3), dtype=complex))
assert_equal(b1*1.0, b1)
diff --git a/numpy/matrixlib/tests/test_regression.py b/numpy/matrixlib/tests/test_regression.py
index 0839fbf28..32cb38ac7 100644
--- a/numpy/matrixlib/tests/test_regression.py
+++ b/numpy/matrixlib/tests/test_regression.py
@@ -1,17 +1,18 @@
from __future__ import division, absolute_import, print_function
import numpy as np
-from numpy.testing import TestCase, run_module_suite, assert_, assert_equal
+from numpy.testing import (
+ run_module_suite, assert_, assert_equal, assert_raises
+ )
-rlevel = 1
-class TestRegression(TestCase):
- def test_kron_matrix(self, level=rlevel):
+class TestRegression(object):
+ def test_kron_matrix(self):
# Ticket #71
x = np.matrix('[1 0; 1 0]')
assert_equal(type(np.kron(x, x)), type(x))
- def test_matrix_properties(self,level=rlevel):
+ def test_matrix_properties(self):
# Ticket #125
a = np.matrix([1.0], dtype=float)
assert_(type(a.real) is np.matrix)
@@ -20,18 +21,18 @@ class TestRegression(TestCase):
assert_(type(c) is np.ndarray)
assert_(type(d) is np.ndarray)
- def test_matrix_multiply_by_1d_vector(self, level=rlevel):
+ def test_matrix_multiply_by_1d_vector(self):
# Ticket #473
def mul():
np.mat(np.eye(2))*np.ones(2)
- self.assertRaises(ValueError, mul)
+ assert_raises(ValueError, mul)
- def test_matrix_std_argmax(self,level=rlevel):
+ def test_matrix_std_argmax(self):
# Ticket #83
x = np.asmatrix(np.random.uniform(0, 1, (3, 3)))
- self.assertEqual(x.std().shape, ())
- self.assertEqual(x.argmax().shape, ())
+ assert_equal(x.std().shape, ())
+ assert_equal(x.argmax().shape, ())
if __name__ == "__main__":
run_module_suite()
diff --git a/numpy/polynomial/__init__.py b/numpy/polynomial/__init__.py
index 82c350e9b..ae5b1f078 100644
--- a/numpy/polynomial/__init__.py
+++ b/numpy/polynomial/__init__.py
@@ -22,6 +22,6 @@ from .hermite import Hermite
from .hermite_e import HermiteE
from .laguerre import Laguerre
-from numpy.testing.nosetester import _numpy_tester
+from numpy.testing import _numpy_tester
test = _numpy_tester().test
bench = _numpy_tester().bench
diff --git a/numpy/polynomial/_polybase.py b/numpy/polynomial/_polybase.py
index 39f5fac31..78392d2a2 100644
--- a/numpy/polynomial/_polybase.py
+++ b/numpy/polynomial/_polybase.py
@@ -260,7 +260,7 @@ class ABCPolyBase(object):
self.window = window
def __repr__(self):
- format = "%s(%s, %s, %s)"
+ format = "%s(%s, domain=%s, window=%s)"
coef = repr(self.coef)[6:-1]
domain = repr(self.domain)[6:-1]
window = repr(self.window)[6:-1]
@@ -307,32 +307,26 @@ class ABCPolyBase(object):
return self
def __add__(self, other):
+ othercoef = self._get_coefficients(other)
try:
- othercoef = self._get_coefficients(other)
coef = self._add(self.coef, othercoef)
- except TypeError as e:
- raise e
- except:
+ except Exception:
return NotImplemented
return self.__class__(coef, self.domain, self.window)
def __sub__(self, other):
+ othercoef = self._get_coefficients(other)
try:
- othercoef = self._get_coefficients(other)
coef = self._sub(self.coef, othercoef)
- except TypeError as e:
- raise e
- except:
+ except Exception:
return NotImplemented
return self.__class__(coef, self.domain, self.window)
def __mul__(self, other):
+ othercoef = self._get_coefficients(other)
try:
- othercoef = self._get_coefficients(other)
coef = self._mul(self.coef, othercoef)
- except TypeError as e:
- raise e
- except:
+ except Exception:
return NotImplemented
return self.__class__(coef, self.domain, self.window)
@@ -362,12 +356,12 @@ class ABCPolyBase(object):
return res[1]
def __divmod__(self, other):
+ othercoef = self._get_coefficients(other)
try:
- othercoef = self._get_coefficients(other)
quo, rem = self._div(self.coef, othercoef)
- except (TypeError, ZeroDivisionError) as e:
+ except ZeroDivisionError as e:
raise e
- except:
+ except Exception:
return NotImplemented
quo = self.__class__(quo, self.domain, self.window)
rem = self.__class__(rem, self.domain, self.window)
@@ -381,21 +375,21 @@ class ABCPolyBase(object):
def __radd__(self, other):
try:
coef = self._add(other, self.coef)
- except:
+ except Exception:
return NotImplemented
return self.__class__(coef, self.domain, self.window)
def __rsub__(self, other):
try:
coef = self._sub(other, self.coef)
- except:
+ except Exception:
return NotImplemented
return self.__class__(coef, self.domain, self.window)
def __rmul__(self, other):
try:
coef = self._mul(other, self.coef)
- except:
+ except Exception:
return NotImplemented
return self.__class__(coef, self.domain, self.window)
@@ -425,7 +419,7 @@ class ABCPolyBase(object):
quo, rem = self._div(other, self.coef)
except ZeroDivisionError as e:
raise e
- except:
+ except Exception:
return NotImplemented
quo = self.__class__(quo, self.domain, self.window)
rem = self.__class__(rem, self.domain, self.window)
diff --git a/numpy/polynomial/chebyshev.py b/numpy/polynomial/chebyshev.py
index 49d0302e0..fe2805a03 100644
--- a/numpy/polynomial/chebyshev.py
+++ b/numpy/polynomial/chebyshev.py
@@ -52,6 +52,7 @@ Misc Functions
- `chebline` -- Chebyshev series representing given straight line.
- `cheb2poly` -- convert a Chebyshev series to a polynomial.
- `poly2cheb` -- convert a polynomial to a Chebyshev series.
+- `chebinterpolate` -- interpolate a function at the Chebyshev points.
Classes
-------
@@ -87,6 +88,7 @@ References
"""
from __future__ import division, absolute_import, print_function
+import numbers
import warnings
import numpy as np
import numpy.linalg as la
@@ -102,7 +104,7 @@ __all__ = [
'chebvander', 'chebfit', 'chebtrim', 'chebroots', 'chebpts1',
'chebpts2', 'Chebyshev', 'chebval2d', 'chebval3d', 'chebgrid2d',
'chebgrid3d', 'chebvander2d', 'chebvander3d', 'chebcompanion',
- 'chebgauss', 'chebweight']
+ 'chebgauss', 'chebweight', 'chebinterpolate']
chebtrim = pu.trimcoef
@@ -359,10 +361,10 @@ def poly2cheb(pol):
>>> from numpy import polynomial as P
>>> p = P.Polynomial(range(4))
>>> p
- Polynomial([ 0., 1., 2., 3.], [-1., 1.])
+ Polynomial([ 0., 1., 2., 3.], domain=[-1, 1], window=[-1, 1])
>>> c = p.convert(kind=P.Chebyshev)
>>> c
- Chebyshev([ 1. , 3.25, 1. , 0.75], [-1., 1.])
+ Chebyshev([ 1. , 3.25, 1. , 0.75], domain=[-1, 1], window=[-1, 1])
>>> P.poly2cheb(range(4))
array([ 1. , 3.25, 1. , 0.75])
@@ -942,7 +944,7 @@ def chebder(c, m=1, scl=1, axis=0):
if cnt == 0:
return c
- c = np.rollaxis(c, iaxis)
+ c = np.moveaxis(c, iaxis, 0)
n = len(c)
if cnt >= n:
c = c[:1]*0
@@ -958,7 +960,7 @@ def chebder(c, m=1, scl=1, axis=0):
der[1] = 4*c[2]
der[0] = c[1]
c = der
- c = np.rollaxis(c, 0, iaxis + 1)
+ c = np.moveaxis(c, 0, iaxis)
return c
@@ -1022,7 +1024,7 @@ def chebint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
Note that the result of each integration is *multiplied* by `scl`.
Why is this important to note? Say one is making a linear change of
variable :math:`u = ax + b` in an integral relative to `x`. Then
- .. math::`dx = du/a`, so one will need to set `scl` equal to
+ :math:`dx = du/a`, so one will need to set `scl` equal to
:math:`1/a`- perhaps not what one would have first thought.
Also note that, in general, the result of integrating a C-series needs
@@ -1067,7 +1069,7 @@ def chebint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
if cnt == 0:
return c
- c = np.rollaxis(c, iaxis)
+ c = np.moveaxis(c, iaxis, 0)
k = list(k) + [0]*(cnt - len(k))
for i in range(cnt):
n = len(c)
@@ -1086,7 +1088,7 @@ def chebint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
tmp[j - 1] -= c[j]/(2*(j - 1))
tmp[0] += k[i] - chebval(lbnd, tmp)
c = tmp
- c = np.rollaxis(c, 0, iaxis + 1)
+ c = np.moveaxis(c, 0, iaxis)
return c
@@ -1220,12 +1222,12 @@ def chebval2d(x, y, c):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
try:
x, y = np.array((x, y), copy=0)
- except:
+ except Exception:
raise ValueError('x, y are incompatible')
c = chebval(x, c)
@@ -1280,7 +1282,7 @@ def chebgrid2d(x, y, c):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
c = chebval(x, c)
@@ -1333,12 +1335,12 @@ def chebval3d(x, y, z, c):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
try:
x, y, z = np.array((x, y, z), copy=0)
- except:
+ except Exception:
raise ValueError('x, y, z are incompatible')
c = chebval(x, c)
@@ -1397,7 +1399,7 @@ def chebgrid3d(x, y, z, c):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
c = chebval(x, c)
@@ -1458,7 +1460,7 @@ def chebvander(x, deg):
v[1] = x
for i in range(2, ideg + 1):
v[i] = v[i-1]*x2 - v[i-2]
- return np.rollaxis(v, 0, v.ndim)
+ return np.moveaxis(v, 0, -1)
def chebvander2d(x, y, deg):
@@ -1508,7 +1510,7 @@ def chebvander2d(x, y, deg):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
ideg = [int(d) for d in deg]
@@ -1572,7 +1574,7 @@ def chebvander3d(x, y, z, deg):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
ideg = [int(d) for d in deg]
@@ -1613,7 +1615,7 @@ def chebfit(x, y, deg, rcond=None, full=False, w=None):
points sharing the same x-coordinates can be fitted at once by
passing in a 2D-array that contains one dataset per column.
deg : int or 1-D array_like
- Degree(s) of the fitting polynomials. If `deg` is a single integer
+ Degree(s) of the fitting polynomials. If `deg` is a single integer,
all terms up to and including the `deg`'th term are included in the
fit. For NumPy versions >= 1.11.0 a list of integers specifying the
degrees of the terms to include may be used instead.
@@ -1808,7 +1810,7 @@ def chebcompanion(c):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
# c is a trimmed copy
@@ -1886,6 +1888,73 @@ def chebroots(c):
return r
+def chebinterpolate(func, deg, args=()):
+ """Interpolate a function at the Chebyshev points of the first kind.
+
+ Returns the Chebyshev series that interpolates `func` at the Chebyshev
+ points of the first kind in the interval [-1, 1]. The interpolating
+ series tends to a minmax approximation to `func` with increasing `deg`
+ if the function is continuous in the interval.
+
+ .. versionadded:: 1.14.0
+
+ Parameters
+ ----------
+ func : function
+ The function to be approximated. It must be a function of a single
+ variable of the form ``f(x, a, b, c...)``, where ``a, b, c...`` are
+ extra arguments passed in the `args` parameter.
+ deg : int
+ Degree of the interpolating polynomial
+ args : tuple, optional
+ Extra arguments to be used in the function call. Default is no extra
+ arguments.
+
+ Returns
+ -------
+ coef : ndarray, shape (deg + 1,)
+ Chebyshev coefficients of the interpolating series ordered from low to
+ high.
+
+ Examples
+ --------
+ >>> import numpy.polynomial.chebyshev as C
+ >>> C.chebfromfunction(lambda x: np.tanh(x) + 0.5, 8)
+ array([ 5.00000000e-01, 8.11675684e-01, -9.86864911e-17,
+ -5.42457905e-02, -2.71387850e-16, 4.51658839e-03,
+ 2.46716228e-17, -3.79694221e-04, -3.26899002e-16])
+
+ Notes
+ -----
+
+ The Chebyshev polynomials used in the interpolation are orthogonal when
+ sampled at the Chebyshev points of the first kind. If it is desired to
+ constrain some of the coefficients they can simply be set to the desired
+ value after the interpolation, no new interpolation or fit is needed. This
+ is especially useful if it is known apriori that some of coefficients are
+ zero. For instance, if the function is even then the coefficients of the
+ terms of odd degree in the result can be set to zero.
+
+ """
+ deg = np.asarray(deg)
+
+ # check arguments.
+ if deg.ndim > 0 or deg.dtype.kind not in 'iu' or deg.size == 0:
+ raise TypeError("deg must be an int")
+ if deg < 0:
+ raise ValueError("expected deg >= 0")
+
+ order = deg + 1
+ xcheb = chebpts1(order)
+ yfunc = func(xcheb, *args)
+ m = chebvander(xcheb, deg)
+ c = np.dot(m.T, yfunc)
+ c[0] /= order
+ c[1:] /= 0.5*order
+
+ return c
+
+
def chebgauss(deg):
"""
Gauss-Chebyshev quadrature.
@@ -2069,6 +2138,48 @@ class Chebyshev(ABCPolyBase):
_roots = staticmethod(chebroots)
_fromroots = staticmethod(chebfromroots)
+ @classmethod
+ def interpolate(cls, func, deg, domain=None, args=()):
+ """Interpolate a function at the Chebyshev points of the first kind.
+
+ Returns the series that interpolates `func` at the Chebyshev points of
+ the first kind scaled and shifted to the `domain`. The resulting series
+ tends to a minmax approximation of `func` when the function is
+ continuous in the domain.
+
+ .. versionadded:: 1.14.0
+
+ Parameters
+ ----------
+ func : function
+ The function to be interpolated. It must be a function of a single
+ variable of the form ``f(x, a, b, c...)``, where ``a, b, c...`` are
+ extra arguments passed in the `args` parameter.
+ deg : int
+ Degree of the interpolating polynomial.
+ domain : {None, [beg, end]}, optional
+ Domain over which `func` is interpolated. The default is None, in
+ which case the domain is [-1, 1].
+ args : tuple, optional
+ Extra arguments to be used in the function call. Default is no
+ extra arguments.
+
+ Returns
+ -------
+ polynomial : Chebyshev instance
+ Interpolating Chebyshev instance.
+
+ Notes
+ -----
+ See `numpy.polynomial.chebfromfunction` for more details.
+
+ """
+ if domain is None:
+ domain = cls.domain
+ xfunc = lambda x: func(pu.mapdomain(x, cls.window, domain), *args)
+ coef = chebinterpolate(xfunc, deg)
+ return cls(coef, domain=domain)
+
# Virtual properties
nickname = 'cheb'
domain = np.array(chebdomain)
diff --git a/numpy/polynomial/hermite.py b/numpy/polynomial/hermite.py
index a03fe722c..ae1143d28 100644
--- a/numpy/polynomial/hermite.py
+++ b/numpy/polynomial/hermite.py
@@ -706,7 +706,7 @@ def hermder(c, m=1, scl=1, axis=0):
if cnt == 0:
return c
- c = np.rollaxis(c, iaxis)
+ c = np.moveaxis(c, iaxis, 0)
n = len(c)
if cnt >= n:
c = c[:1]*0
@@ -718,7 +718,7 @@ def hermder(c, m=1, scl=1, axis=0):
for j in range(n, 0, -1):
der[j - 1] = (2*j)*c[j]
c = der
- c = np.rollaxis(c, 0, iaxis + 1)
+ c = np.moveaxis(c, 0, iaxis)
return c
@@ -782,7 +782,7 @@ def hermint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
Note that the result of each integration is *multiplied* by `scl`.
Why is this important to note? Say one is making a linear change of
variable :math:`u = ax + b` in an integral relative to `x`. Then
- .. math::`dx = du/a`, so one will need to set `scl` equal to
+ :math:`dx = du/a`, so one will need to set `scl` equal to
:math:`1/a` - perhaps not what one would have first thought.
Also note that, in general, the result of integrating a C-series needs
@@ -825,7 +825,7 @@ def hermint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
if cnt == 0:
return c
- c = np.rollaxis(c, iaxis)
+ c = np.moveaxis(c, iaxis, 0)
k = list(k) + [0]*(cnt - len(k))
for i in range(cnt):
n = len(c)
@@ -840,7 +840,7 @@ def hermint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
tmp[j + 1] = c[j]/(2*(j + 1))
tmp[0] += k[i] - hermval(lbnd, tmp)
c = tmp
- c = np.rollaxis(c, 0, iaxis + 1)
+ c = np.moveaxis(c, 0, iaxis)
return c
@@ -983,12 +983,12 @@ def hermval2d(x, y, c):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
try:
x, y = np.array((x, y), copy=0)
- except:
+ except Exception:
raise ValueError('x, y are incompatible')
c = hermval(x, c)
@@ -1043,7 +1043,7 @@ def hermgrid2d(x, y, c):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
c = hermval(x, c)
@@ -1096,12 +1096,12 @@ def hermval3d(x, y, z, c):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
try:
x, y, z = np.array((x, y, z), copy=0)
- except:
+ except Exception:
raise ValueError('x, y, z are incompatible')
c = hermval(x, c)
@@ -1160,7 +1160,7 @@ def hermgrid3d(x, y, z, c):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
c = hermval(x, c)
@@ -1229,7 +1229,7 @@ def hermvander(x, deg):
v[1] = x2
for i in range(2, ideg + 1):
v[i] = (v[i-1]*x2 - v[i-2]*(2*(i - 1)))
- return np.rollaxis(v, 0, v.ndim)
+ return np.moveaxis(v, 0, -1)
def hermvander2d(x, y, deg):
@@ -1279,7 +1279,7 @@ def hermvander2d(x, y, deg):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
ideg = [int(d) for d in deg]
@@ -1343,7 +1343,7 @@ def hermvander3d(x, y, z, deg):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
ideg = [int(d) for d in deg]
@@ -1584,7 +1584,7 @@ def hermcompanion(c):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
# c is a trimmed copy
@@ -1732,7 +1732,7 @@ def hermgauss(deg):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
The results have only been tested up to degree 100, higher degrees may
be problematic. The weights are determined by using the fact that
@@ -1796,7 +1796,7 @@ def hermweight(x):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
w = np.exp(-x**2)
diff --git a/numpy/polynomial/hermite_e.py b/numpy/polynomial/hermite_e.py
index 2a29d61cf..ee29ec5d3 100644
--- a/numpy/polynomial/hermite_e.py
+++ b/numpy/polynomial/hermite_e.py
@@ -705,7 +705,7 @@ def hermeder(c, m=1, scl=1, axis=0):
if cnt == 0:
return c
- c = np.rollaxis(c, iaxis)
+ c = np.moveaxis(c, iaxis, 0)
n = len(c)
if cnt >= n:
return c[:1]*0
@@ -717,7 +717,7 @@ def hermeder(c, m=1, scl=1, axis=0):
for j in range(n, 0, -1):
der[j - 1] = j*c[j]
c = der
- c = np.rollaxis(c, 0, iaxis + 1)
+ c = np.moveaxis(c, 0, iaxis)
return c
@@ -781,7 +781,7 @@ def hermeint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
Note that the result of each integration is *multiplied* by `scl`.
Why is this important to note? Say one is making a linear change of
variable :math:`u = ax + b` in an integral relative to `x`. Then
- .. math::`dx = du/a`, so one will need to set `scl` equal to
+ :math:`dx = du/a`, so one will need to set `scl` equal to
:math:`1/a` - perhaps not what one would have first thought.
Also note that, in general, the result of integrating a C-series needs
@@ -824,7 +824,7 @@ def hermeint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
if cnt == 0:
return c
- c = np.rollaxis(c, iaxis)
+ c = np.moveaxis(c, iaxis, 0)
k = list(k) + [0]*(cnt - len(k))
for i in range(cnt):
n = len(c)
@@ -839,7 +839,7 @@ def hermeint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
tmp[j + 1] = c[j]/(j + 1)
tmp[0] += k[i] - hermeval(lbnd, tmp)
c = tmp
- c = np.rollaxis(c, 0, iaxis + 1)
+ c = np.moveaxis(c, 0, iaxis)
return c
@@ -981,12 +981,12 @@ def hermeval2d(x, y, c):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
try:
x, y = np.array((x, y), copy=0)
- except:
+ except Exception:
raise ValueError('x, y are incompatible')
c = hermeval(x, c)
@@ -1041,7 +1041,7 @@ def hermegrid2d(x, y, c):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
c = hermeval(x, c)
@@ -1094,12 +1094,12 @@ def hermeval3d(x, y, z, c):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
try:
x, y, z = np.array((x, y, z), copy=0)
- except:
+ except Exception:
raise ValueError('x, y, z are incompatible')
c = hermeval(x, c)
@@ -1158,7 +1158,7 @@ def hermegrid3d(x, y, z, c):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
c = hermeval(x, c)
@@ -1226,7 +1226,7 @@ def hermevander(x, deg):
v[1] = x
for i in range(2, ideg + 1):
v[i] = (v[i-1]*x - v[i-2]*(i - 1))
- return np.rollaxis(v, 0, v.ndim)
+ return np.moveaxis(v, 0, -1)
def hermevander2d(x, y, deg):
@@ -1276,7 +1276,7 @@ def hermevander2d(x, y, deg):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
ideg = [int(d) for d in deg]
@@ -1340,7 +1340,7 @@ def hermevander3d(x, y, z, deg):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
ideg = [int(d) for d in deg]
@@ -1582,7 +1582,7 @@ def hermecompanion(c):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
# c is a trimmed copy
@@ -1730,7 +1730,7 @@ def hermegauss(deg):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
The results have only been tested up to degree 100, higher degrees may
be problematic. The weights are determined by using the fact that
@@ -1793,7 +1793,7 @@ def hermeweight(x):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
w = np.exp(-.5*x**2)
diff --git a/numpy/polynomial/laguerre.py b/numpy/polynomial/laguerre.py
index c9e1302e1..079cf97b3 100644
--- a/numpy/polynomial/laguerre.py
+++ b/numpy/polynomial/laguerre.py
@@ -703,7 +703,7 @@ def lagder(c, m=1, scl=1, axis=0):
if cnt == 0:
return c
- c = np.rollaxis(c, iaxis)
+ c = np.moveaxis(c, iaxis, 0)
n = len(c)
if cnt >= n:
c = c[:1]*0
@@ -717,7 +717,7 @@ def lagder(c, m=1, scl=1, axis=0):
c[j - 1] += c[j]
der[0] = -c[1]
c = der
- c = np.rollaxis(c, 0, iaxis + 1)
+ c = np.moveaxis(c, 0, iaxis)
return c
@@ -782,7 +782,7 @@ def lagint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
Note that the result of each integration is *multiplied* by `scl`.
Why is this important to note? Say one is making a linear change of
variable :math:`u = ax + b` in an integral relative to `x`. Then
- .. math::`dx = du/a`, so one will need to set `scl` equal to
+ :math:`dx = du/a`, so one will need to set `scl` equal to
:math:`1/a` - perhaps not what one would have first thought.
Also note that, in general, the result of integrating a C-series needs
@@ -825,7 +825,7 @@ def lagint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
if cnt == 0:
return c
- c = np.rollaxis(c, iaxis)
+ c = np.moveaxis(c, iaxis, 0)
k = list(k) + [0]*(cnt - len(k))
for i in range(cnt):
n = len(c)
@@ -841,7 +841,7 @@ def lagint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
tmp[j + 1] = -c[j]
tmp[0] += k[i] - lagval(lbnd, tmp)
c = tmp
- c = np.rollaxis(c, 0, iaxis + 1)
+ c = np.moveaxis(c, 0, iaxis)
return c
@@ -983,12 +983,12 @@ def lagval2d(x, y, c):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
try:
x, y = np.array((x, y), copy=0)
- except:
+ except Exception:
raise ValueError('x, y are incompatible')
c = lagval(x, c)
@@ -1043,7 +1043,7 @@ def laggrid2d(x, y, c):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
c = lagval(x, c)
@@ -1096,12 +1096,12 @@ def lagval3d(x, y, z, c):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
try:
x, y, z = np.array((x, y, z), copy=0)
- except:
+ except Exception:
raise ValueError('x, y, z are incompatible')
c = lagval(x, c)
@@ -1160,7 +1160,7 @@ def laggrid3d(x, y, z, c):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
c = lagval(x, c)
@@ -1228,7 +1228,7 @@ def lagvander(x, deg):
v[1] = 1 - x
for i in range(2, ideg + 1):
v[i] = (v[i-1]*(2*i - 1 - x) - v[i-2]*(i - 1))/i
- return np.rollaxis(v, 0, v.ndim)
+ return np.moveaxis(v, 0, -1)
def lagvander2d(x, y, deg):
@@ -1278,7 +1278,7 @@ def lagvander2d(x, y, deg):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
ideg = [int(d) for d in deg]
@@ -1342,7 +1342,7 @@ def lagvander3d(x, y, z, deg):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
ideg = [int(d) for d in deg]
@@ -1582,7 +1582,7 @@ def lagcompanion(c):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
# c is a trimmed copy
@@ -1687,7 +1687,7 @@ def laggauss(deg):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
The results have only been tested up to degree 100 higher degrees may
be problematic. The weights are determined by using the fact that
@@ -1747,7 +1747,7 @@ def lagweight(x):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
w = np.exp(-x)
diff --git a/numpy/polynomial/legendre.py b/numpy/polynomial/legendre.py
index be8410b82..1c42f4881 100644
--- a/numpy/polynomial/legendre.py
+++ b/numpy/polynomial/legendre.py
@@ -136,10 +136,10 @@ def poly2leg(pol):
>>> from numpy import polynomial as P
>>> p = P.Polynomial(np.arange(4))
>>> p
- Polynomial([ 0., 1., 2., 3.], [-1., 1.])
- >>> c = P.Legendre(P.poly2leg(p.coef))
+ Polynomial([ 0., 1., 2., 3.], domain=[-1, 1], window=[-1, 1])
+ >>> c = P.Legendre(P.legendre.poly2leg(p.coef))
>>> c
- Legendre([ 1. , 3.25, 1. , 0.75], [-1., 1.])
+ Legendre([ 1. , 3.25, 1. , 0.75], domain=[-1, 1], window=[-1, 1])
"""
[pol] = pu.as_series([pol])
@@ -742,7 +742,7 @@ def legder(c, m=1, scl=1, axis=0):
if cnt == 0:
return c
- c = np.rollaxis(c, iaxis)
+ c = np.moveaxis(c, iaxis, 0)
n = len(c)
if cnt >= n:
c = c[:1]*0
@@ -758,7 +758,7 @@ def legder(c, m=1, scl=1, axis=0):
der[1] = 3*c[2]
der[0] = c[1]
c = der
- c = np.rollaxis(c, 0, iaxis + 1)
+ c = np.moveaxis(c, 0, iaxis)
return c
@@ -822,7 +822,7 @@ def legint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
Note that the result of each integration is *multiplied* by `scl`.
Why is this important to note? Say one is making a linear change of
variable :math:`u = ax + b` in an integral relative to `x`. Then
- .. math::`dx = du/a`, so one will need to set `scl` equal to
+ :math:`dx = du/a`, so one will need to set `scl` equal to
:math:`1/a` - perhaps not what one would have first thought.
Also note that, in general, the result of integrating a C-series needs
@@ -867,7 +867,7 @@ def legint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
if cnt == 0:
return c
- c = np.rollaxis(c, iaxis)
+ c = np.moveaxis(c, iaxis, 0)
k = list(k) + [0]*(cnt - len(k))
for i in range(cnt):
n = len(c)
@@ -886,7 +886,7 @@ def legint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
tmp[j - 1] -= t
tmp[0] += k[i] - legval(lbnd, tmp)
c = tmp
- c = np.rollaxis(c, 0, iaxis + 1)
+ c = np.moveaxis(c, 0, iaxis)
return c
@@ -1021,12 +1021,12 @@ def legval2d(x, y, c):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
try:
x, y = np.array((x, y), copy=0)
- except:
+ except Exception:
raise ValueError('x, y are incompatible')
c = legval(x, c)
@@ -1081,7 +1081,7 @@ def leggrid2d(x, y, c):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
c = legval(x, c)
@@ -1134,12 +1134,12 @@ def legval3d(x, y, z, c):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
try:
x, y, z = np.array((x, y, z), copy=0)
- except:
+ except Exception:
raise ValueError('x, y, z are incompatible')
c = legval(x, c)
@@ -1198,7 +1198,7 @@ def leggrid3d(x, y, z, c):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
c = legval(x, c)
@@ -1259,7 +1259,7 @@ def legvander(x, deg):
v[1] = x
for i in range(2, ideg + 1):
v[i] = (v[i-1]*x*(2*i - 1) - v[i-2]*(i - 1))/i
- return np.rollaxis(v, 0, v.ndim)
+ return np.moveaxis(v, 0, -1)
def legvander2d(x, y, deg):
@@ -1309,7 +1309,7 @@ def legvander2d(x, y, deg):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
ideg = [int(d) for d in deg]
@@ -1373,7 +1373,7 @@ def legvander3d(x, y, z, deg):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
ideg = [int(d) for d in deg]
@@ -1611,7 +1611,7 @@ def legcompanion(c):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
# c is a trimmed copy
@@ -1712,7 +1712,7 @@ def leggauss(deg):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
The results have only been tested up to degree 100, higher degrees may
be problematic. The weights are determined by using the fact that
@@ -1777,7 +1777,7 @@ def legweight(x):
Notes
-----
- .. versionadded::1.7.0
+ .. versionadded:: 1.7.0
"""
w = x*0.0 + 1.0
diff --git a/numpy/polynomial/polynomial.py b/numpy/polynomial/polynomial.py
index c357b48c9..1be775f6a 100644
--- a/numpy/polynomial/polynomial.py
+++ b/numpy/polynomial/polynomial.py
@@ -546,7 +546,7 @@ def polyder(c, m=1, scl=1, axis=0):
if cnt == 0:
return c
- c = np.rollaxis(c, iaxis)
+ c = np.moveaxis(c, iaxis, 0)
n = len(c)
if cnt >= n:
c = c[:1]*0
@@ -558,7 +558,7 @@ def polyder(c, m=1, scl=1, axis=0):
for j in range(n, 0, -1):
der[j - 1] = j*c[j]
c = der
- c = np.rollaxis(c, 0, iaxis + 1)
+ c = np.moveaxis(c, 0, iaxis)
return c
@@ -619,7 +619,7 @@ def polyint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
Note that the result of each integration is *multiplied* by `scl`. Why
is this important to note? Say one is making a linear change of
variable :math:`u = ax + b` in an integral relative to `x`. Then
- .. math::`dx = du/a`, so one will need to set `scl` equal to
+ :math:`dx = du/a`, so one will need to set `scl` equal to
:math:`1/a` - perhaps not what one would have first thought.
Examples
@@ -662,7 +662,7 @@ def polyint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
return c
k = list(k) + [0]*(cnt - len(k))
- c = np.rollaxis(c, iaxis)
+ c = np.moveaxis(c, iaxis, 0)
for i in range(cnt):
n = len(c)
c *= scl
@@ -676,7 +676,7 @@ def polyint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
tmp[j + 1] = c[j]/(j + 1)
tmp[0] += k[i] - polyval(lbnd, tmp)
c = tmp
- c = np.rollaxis(c, 0, iaxis + 1)
+ c = np.moveaxis(c, 0, iaxis)
return c
@@ -913,7 +913,7 @@ def polyval2d(x, y, c):
"""
try:
x, y = np.array((x, y), copy=0)
- except:
+ except Exception:
raise ValueError('x, y are incompatible')
c = polyval(x, c)
@@ -1026,7 +1026,7 @@ def polyval3d(x, y, z, c):
"""
try:
x, y, z = np.array((x, y, z), copy=0)
- except:
+ except Exception:
raise ValueError('x, y, z are incompatible')
c = polyval(x, c)
@@ -1147,7 +1147,7 @@ def polyvander(x, deg):
v[1] = x
for i in range(2, ideg + 1):
v[i] = v[i-1]*x
- return np.rollaxis(v, 0, v.ndim)
+ return np.moveaxis(v, 0, -1)
def polyvander2d(x, y, deg):
diff --git a/numpy/polynomial/polyutils.py b/numpy/polynomial/polyutils.py
index 5b6663bfd..e2dba1a55 100644
--- a/numpy/polynomial/polyutils.py
+++ b/numpy/polynomial/polyutils.py
@@ -182,7 +182,7 @@ def as_series(alist, trim=True):
else:
try:
dtype = np.common_type(*arrays)
- except:
+ except Exception:
raise ValueError("Coefficient arrays have no common type")
ret = [np.array(a, copy=1, dtype=dtype) for a in arrays]
return ret
@@ -236,7 +236,7 @@ def trimcoef(c, tol=0):
raise ValueError("tol must be non-negative")
[c] = as_series([c])
- [ind] = np.where(np.abs(c) > tol)
+ [ind] = np.nonzero(np.abs(c) > tol)
if len(ind) == 0:
return c[:1]*0
else:
diff --git a/numpy/polynomial/tests/__init__.py b/numpy/polynomial/tests/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/numpy/polynomial/tests/__init__.py
diff --git a/numpy/polynomial/tests/test_chebyshev.py b/numpy/polynomial/tests/test_chebyshev.py
index dc0cd14b3..1a34f42b0 100644
--- a/numpy/polynomial/tests/test_chebyshev.py
+++ b/numpy/polynomial/tests/test_chebyshev.py
@@ -7,8 +7,9 @@ import numpy as np
import numpy.polynomial.chebyshev as cheb
from numpy.polynomial.polynomial import polyval
from numpy.testing import (
- TestCase, assert_almost_equal, assert_raises,
- assert_equal, assert_, run_module_suite)
+ assert_almost_equal, assert_raises, assert_equal, assert_,
+ run_module_suite
+ )
def trim(x):
@@ -28,7 +29,7 @@ T9 = [0, 9, 0, -120, 0, 432, 0, -576, 0, 256]
Tlist = [T0, T1, T2, T3, T4, T5, T6, T7, T8, T9]
-class TestPrivate(TestCase):
+class TestPrivate(object):
def test__cseries_to_zseries(self):
for i in range(5):
@@ -45,7 +46,7 @@ class TestPrivate(TestCase):
assert_equal(res, tgt)
-class TestConstants(TestCase):
+class TestConstants(object):
def test_chebdomain(self):
assert_equal(cheb.chebdomain, [-1, 1])
@@ -60,7 +61,7 @@ class TestConstants(TestCase):
assert_equal(cheb.chebx, [0, 1])
-class TestArithmetic(TestCase):
+class TestArithmetic(object):
def test_chebadd(self):
for i in range(5):
@@ -112,7 +113,7 @@ class TestArithmetic(TestCase):
assert_equal(trim(res), trim(tgt), err_msg=msg)
-class TestEvaluation(TestCase):
+class TestEvaluation(object):
# coefficients of 1 + 2*x + 3*x**2
c1d = np.array([2.5, 2., 1.5])
c2d = np.einsum('i,j->ij', c1d, c1d)
@@ -206,7 +207,7 @@ class TestEvaluation(TestCase):
assert_(res.shape == (2, 3)*3)
-class TestIntegral(TestCase):
+class TestIntegral(object):
def test_chebint(self):
# check exceptions
@@ -305,7 +306,7 @@ class TestIntegral(TestCase):
assert_almost_equal(res, tgt)
-class TestDerivative(TestCase):
+class TestDerivative(object):
def test_chebder(self):
# check exceptions
@@ -345,7 +346,7 @@ class TestDerivative(TestCase):
assert_almost_equal(res, tgt)
-class TestVander(TestCase):
+class TestVander(object):
# some random values in [-1, 1)
x = np.random.random((3, 5))*2 - 1
@@ -393,7 +394,7 @@ class TestVander(TestCase):
assert_(van.shape == (1, 5, 24))
-class TestFitting(TestCase):
+class TestFitting(object):
def test_chebfit(self):
def f(x):
@@ -470,7 +471,32 @@ class TestFitting(TestCase):
assert_almost_equal(coef1, coef2)
-class TestCompanion(TestCase):
+class TestInterpolate(object):
+
+ def f(self, x):
+ return x * (x - 1) * (x - 2)
+
+ def test_raises(self):
+ assert_raises(ValueError, cheb.chebinterpolate, self.f, -1)
+ assert_raises(TypeError, cheb.chebinterpolate, self.f, 10.)
+
+ def test_dimensions(self):
+ for deg in range(1, 5):
+ assert_(cheb.chebinterpolate(self.f, deg).shape == (deg + 1,))
+
+ def test_approximation(self):
+
+ def powx(x, p):
+ return x**p
+
+ x = np.linspace(-1, 1, 10)
+ for deg in range(0, 10):
+ for p in range(0, deg + 1):
+ c = cheb.chebinterpolate(powx, deg, (p,))
+ assert_almost_equal(cheb.chebval(x, c), powx(x, p), decimal=12)
+
+
+class TestCompanion(object):
def test_raises(self):
assert_raises(ValueError, cheb.chebcompanion, [])
@@ -485,7 +511,7 @@ class TestCompanion(TestCase):
assert_(cheb.chebcompanion([1, 2])[0, 0] == -.5)
-class TestGauss(TestCase):
+class TestGauss(object):
def test_100(self):
x, w = cheb.chebgauss(100)
@@ -504,7 +530,7 @@ class TestGauss(TestCase):
assert_almost_equal(w.sum(), tgt)
-class TestMisc(TestCase):
+class TestMisc(object):
def test_chebfromroots(self):
res = cheb.chebfromroots([])
diff --git a/numpy/polynomial/tests/test_classes.py b/numpy/polynomial/tests/test_classes.py
index 46d721df4..2ec8277ff 100644
--- a/numpy/polynomial/tests/test_classes.py
+++ b/numpy/polynomial/tests/test_classes.py
@@ -583,5 +583,30 @@ def check_ufunc_override(Poly):
assert_raises(TypeError, np.add, x, p)
+class TestInterpolate(object):
+
+ def f(self, x):
+ return x * (x - 1) * (x - 2)
+
+ def test_raises(self):
+ assert_raises(ValueError, Chebyshev.interpolate, self.f, -1)
+ assert_raises(TypeError, Chebyshev.interpolate, self.f, 10.)
+
+ def test_dimensions(self):
+ for deg in range(1, 5):
+ assert_(Chebyshev.interpolate(self.f, deg).degree() == deg)
+
+ def test_approximation(self):
+
+ def powx(x, p):
+ return x**p
+
+ x = np.linspace(0, 2, 10)
+ for deg in range(0, 10):
+ for t in range(0, deg + 1):
+ p = Chebyshev.interpolate(powx, deg, domain=[0, 2], args=(t,))
+ assert_almost_equal(p(x), powx(x, t), decimal=12)
+
+
if __name__ == "__main__":
run_module_suite()
diff --git a/numpy/polynomial/tests/test_hermite.py b/numpy/polynomial/tests/test_hermite.py
index 06ce46ae4..2e39d854d 100644
--- a/numpy/polynomial/tests/test_hermite.py
+++ b/numpy/polynomial/tests/test_hermite.py
@@ -7,8 +7,9 @@ import numpy as np
import numpy.polynomial.hermite as herm
from numpy.polynomial.polynomial import polyval
from numpy.testing import (
- TestCase, assert_almost_equal, assert_raises,
- assert_equal, assert_, run_module_suite)
+ assert_almost_equal, assert_raises, assert_equal, assert_,
+ run_module_suite
+ )
H0 = np.array([1])
H1 = np.array([0, 2])
@@ -28,7 +29,7 @@ def trim(x):
return herm.hermtrim(x, tol=1e-6)
-class TestConstants(TestCase):
+class TestConstants(object):
def test_hermdomain(self):
assert_equal(herm.hermdomain, [-1, 1])
@@ -43,7 +44,7 @@ class TestConstants(TestCase):
assert_equal(herm.hermx, [0, .5])
-class TestArithmetic(TestCase):
+class TestArithmetic(object):
x = np.linspace(-3, 3, 100)
def test_hermadd(self):
@@ -100,7 +101,7 @@ class TestArithmetic(TestCase):
assert_equal(trim(res), trim(tgt), err_msg=msg)
-class TestEvaluation(TestCase):
+class TestEvaluation(object):
# coefficients of 1 + 2*x + 3*x**2
c1d = np.array([2.5, 1., .75])
c2d = np.einsum('i,j->ij', c1d, c1d)
@@ -194,7 +195,7 @@ class TestEvaluation(TestCase):
assert_(res.shape == (2, 3)*3)
-class TestIntegral(TestCase):
+class TestIntegral(object):
def test_hermint(self):
# check exceptions
@@ -293,7 +294,7 @@ class TestIntegral(TestCase):
assert_almost_equal(res, tgt)
-class TestDerivative(TestCase):
+class TestDerivative(object):
def test_hermder(self):
# check exceptions
@@ -333,7 +334,7 @@ class TestDerivative(TestCase):
assert_almost_equal(res, tgt)
-class TestVander(TestCase):
+class TestVander(object):
# some random values in [-1, 1)
x = np.random.random((3, 5))*2 - 1
@@ -381,7 +382,7 @@ class TestVander(TestCase):
assert_(van.shape == (1, 5, 24))
-class TestFitting(TestCase):
+class TestFitting(object):
def test_hermfit(self):
def f(x):
@@ -458,7 +459,7 @@ class TestFitting(TestCase):
assert_almost_equal(coef1, coef2)
-class TestCompanion(TestCase):
+class TestCompanion(object):
def test_raises(self):
assert_raises(ValueError, herm.hermcompanion, [])
@@ -473,7 +474,7 @@ class TestCompanion(TestCase):
assert_(herm.hermcompanion([1, 2])[0, 0] == -.25)
-class TestGauss(TestCase):
+class TestGauss(object):
def test_100(self):
x, w = herm.hermgauss(100)
@@ -492,7 +493,7 @@ class TestGauss(TestCase):
assert_almost_equal(w.sum(), tgt)
-class TestMisc(TestCase):
+class TestMisc(object):
def test_hermfromroots(self):
res = herm.hermfromroots([])
diff --git a/numpy/polynomial/tests/test_hermite_e.py b/numpy/polynomial/tests/test_hermite_e.py
index 38da325f6..a81910787 100644
--- a/numpy/polynomial/tests/test_hermite_e.py
+++ b/numpy/polynomial/tests/test_hermite_e.py
@@ -7,8 +7,9 @@ import numpy as np
import numpy.polynomial.hermite_e as herme
from numpy.polynomial.polynomial import polyval
from numpy.testing import (
- TestCase, assert_almost_equal, assert_raises,
- assert_equal, assert_, run_module_suite)
+ assert_almost_equal, assert_raises, assert_equal, assert_,
+ run_module_suite
+ )
He0 = np.array([1])
He1 = np.array([0, 1])
@@ -28,7 +29,7 @@ def trim(x):
return herme.hermetrim(x, tol=1e-6)
-class TestConstants(TestCase):
+class TestConstants(object):
def test_hermedomain(self):
assert_equal(herme.hermedomain, [-1, 1])
@@ -43,7 +44,7 @@ class TestConstants(TestCase):
assert_equal(herme.hermex, [0, 1])
-class TestArithmetic(TestCase):
+class TestArithmetic(object):
x = np.linspace(-3, 3, 100)
def test_hermeadd(self):
@@ -100,7 +101,7 @@ class TestArithmetic(TestCase):
assert_equal(trim(res), trim(tgt), err_msg=msg)
-class TestEvaluation(TestCase):
+class TestEvaluation(object):
# coefficients of 1 + 2*x + 3*x**2
c1d = np.array([4., 2., 3.])
c2d = np.einsum('i,j->ij', c1d, c1d)
@@ -194,7 +195,7 @@ class TestEvaluation(TestCase):
assert_(res.shape == (2, 3)*3)
-class TestIntegral(TestCase):
+class TestIntegral(object):
def test_hermeint(self):
# check exceptions
@@ -293,7 +294,7 @@ class TestIntegral(TestCase):
assert_almost_equal(res, tgt)
-class TestDerivative(TestCase):
+class TestDerivative(object):
def test_hermeder(self):
# check exceptions
@@ -334,7 +335,7 @@ class TestDerivative(TestCase):
assert_almost_equal(res, tgt)
-class TestVander(TestCase):
+class TestVander(object):
# some random values in [-1, 1)
x = np.random.random((3, 5))*2 - 1
@@ -382,7 +383,7 @@ class TestVander(TestCase):
assert_(van.shape == (1, 5, 24))
-class TestFitting(TestCase):
+class TestFitting(object):
def test_hermefit(self):
def f(x):
@@ -459,7 +460,7 @@ class TestFitting(TestCase):
assert_almost_equal(coef1, coef2)
-class TestCompanion(TestCase):
+class TestCompanion(object):
def test_raises(self):
assert_raises(ValueError, herme.hermecompanion, [])
@@ -474,7 +475,7 @@ class TestCompanion(TestCase):
assert_(herme.hermecompanion([1, 2])[0, 0] == -.5)
-class TestGauss(TestCase):
+class TestGauss(object):
def test_100(self):
x, w = herme.hermegauss(100)
@@ -493,7 +494,7 @@ class TestGauss(TestCase):
assert_almost_equal(w.sum(), tgt)
-class TestMisc(TestCase):
+class TestMisc(object):
def test_hermefromroots(self):
res = herme.hermefromroots([])
diff --git a/numpy/polynomial/tests/test_laguerre.py b/numpy/polynomial/tests/test_laguerre.py
index 0fa76b48a..17a3f7558 100644
--- a/numpy/polynomial/tests/test_laguerre.py
+++ b/numpy/polynomial/tests/test_laguerre.py
@@ -7,8 +7,9 @@ import numpy as np
import numpy.polynomial.laguerre as lag
from numpy.polynomial.polynomial import polyval
from numpy.testing import (
- TestCase, assert_almost_equal, assert_raises,
- assert_equal, assert_, run_module_suite)
+ assert_almost_equal, assert_raises, assert_equal, assert_,
+ run_module_suite
+ )
L0 = np.array([1])/1
L1 = np.array([1, -1])/1
@@ -25,7 +26,7 @@ def trim(x):
return lag.lagtrim(x, tol=1e-6)
-class TestConstants(TestCase):
+class TestConstants(object):
def test_lagdomain(self):
assert_equal(lag.lagdomain, [0, 1])
@@ -40,7 +41,7 @@ class TestConstants(TestCase):
assert_equal(lag.lagx, [1, -1])
-class TestArithmetic(TestCase):
+class TestArithmetic(object):
x = np.linspace(-3, 3, 100)
def test_lagadd(self):
@@ -97,7 +98,7 @@ class TestArithmetic(TestCase):
assert_almost_equal(trim(res), trim(tgt), err_msg=msg)
-class TestEvaluation(TestCase):
+class TestEvaluation(object):
# coefficients of 1 + 2*x + 3*x**2
c1d = np.array([9., -14., 6.])
c2d = np.einsum('i,j->ij', c1d, c1d)
@@ -191,7 +192,7 @@ class TestEvaluation(TestCase):
assert_(res.shape == (2, 3)*3)
-class TestIntegral(TestCase):
+class TestIntegral(object):
def test_lagint(self):
# check exceptions
@@ -290,7 +291,7 @@ class TestIntegral(TestCase):
assert_almost_equal(res, tgt)
-class TestDerivative(TestCase):
+class TestDerivative(object):
def test_lagder(self):
# check exceptions
@@ -330,7 +331,7 @@ class TestDerivative(TestCase):
assert_almost_equal(res, tgt)
-class TestVander(TestCase):
+class TestVander(object):
# some random values in [-1, 1)
x = np.random.random((3, 5))*2 - 1
@@ -378,7 +379,7 @@ class TestVander(TestCase):
assert_(van.shape == (1, 5, 24))
-class TestFitting(TestCase):
+class TestFitting(object):
def test_lagfit(self):
def f(x):
@@ -440,7 +441,7 @@ class TestFitting(TestCase):
assert_almost_equal(lag.lagfit(x, x, [0, 1]), [1, -1])
-class TestCompanion(TestCase):
+class TestCompanion(object):
def test_raises(self):
assert_raises(ValueError, lag.lagcompanion, [])
@@ -455,7 +456,7 @@ class TestCompanion(TestCase):
assert_(lag.lagcompanion([1, 2])[0, 0] == 1.5)
-class TestGauss(TestCase):
+class TestGauss(object):
def test_100(self):
x, w = lag.laggauss(100)
@@ -474,7 +475,7 @@ class TestGauss(TestCase):
assert_almost_equal(w.sum(), tgt)
-class TestMisc(TestCase):
+class TestMisc(object):
def test_lagfromroots(self):
res = lag.lagfromroots([])
diff --git a/numpy/polynomial/tests/test_legendre.py b/numpy/polynomial/tests/test_legendre.py
index 485bc9688..375f41d49 100644
--- a/numpy/polynomial/tests/test_legendre.py
+++ b/numpy/polynomial/tests/test_legendre.py
@@ -7,8 +7,9 @@ import numpy as np
import numpy.polynomial.legendre as leg
from numpy.polynomial.polynomial import polyval
from numpy.testing import (
- TestCase, assert_almost_equal, assert_raises,
- assert_equal, assert_, run_module_suite)
+ assert_almost_equal, assert_raises, assert_equal, assert_,
+ run_module_suite
+ )
L0 = np.array([1])
L1 = np.array([0, 1])
@@ -28,7 +29,7 @@ def trim(x):
return leg.legtrim(x, tol=1e-6)
-class TestConstants(TestCase):
+class TestConstants(object):
def test_legdomain(self):
assert_equal(leg.legdomain, [-1, 1])
@@ -43,7 +44,7 @@ class TestConstants(TestCase):
assert_equal(leg.legx, [0, 1])
-class TestArithmetic(TestCase):
+class TestArithmetic(object):
x = np.linspace(-1, 1, 100)
def test_legadd(self):
@@ -101,7 +102,7 @@ class TestArithmetic(TestCase):
assert_equal(trim(res), trim(tgt), err_msg=msg)
-class TestEvaluation(TestCase):
+class TestEvaluation(object):
# coefficients of 1 + 2*x + 3*x**2
c1d = np.array([2., 2., 2.])
c2d = np.einsum('i,j->ij', c1d, c1d)
@@ -195,7 +196,7 @@ class TestEvaluation(TestCase):
assert_(res.shape == (2, 3)*3)
-class TestIntegral(TestCase):
+class TestIntegral(object):
def test_legint(self):
# check exceptions
@@ -294,7 +295,7 @@ class TestIntegral(TestCase):
assert_almost_equal(res, tgt)
-class TestDerivative(TestCase):
+class TestDerivative(object):
def test_legder(self):
# check exceptions
@@ -334,7 +335,7 @@ class TestDerivative(TestCase):
assert_almost_equal(res, tgt)
-class TestVander(TestCase):
+class TestVander(object):
# some random values in [-1, 1)
x = np.random.random((3, 5))*2 - 1
@@ -382,7 +383,7 @@ class TestVander(TestCase):
assert_(van.shape == (1, 5, 24))
-class TestFitting(TestCase):
+class TestFitting(object):
def test_legfit(self):
def f(x):
@@ -459,7 +460,7 @@ class TestFitting(TestCase):
assert_almost_equal(coef1, coef2)
-class TestCompanion(TestCase):
+class TestCompanion(object):
def test_raises(self):
assert_raises(ValueError, leg.legcompanion, [])
@@ -474,7 +475,7 @@ class TestCompanion(TestCase):
assert_(leg.legcompanion([1, 2])[0, 0] == -.5)
-class TestGauss(TestCase):
+class TestGauss(object):
def test_100(self):
x, w = leg.leggauss(100)
@@ -493,7 +494,7 @@ class TestGauss(TestCase):
assert_almost_equal(w.sum(), tgt)
-class TestMisc(TestCase):
+class TestMisc(object):
def test_legfromroots(self):
res = leg.legfromroots([])
diff --git a/numpy/polynomial/tests/test_polynomial.py b/numpy/polynomial/tests/test_polynomial.py
index 037be5927..bf6c5e814 100644
--- a/numpy/polynomial/tests/test_polynomial.py
+++ b/numpy/polynomial/tests/test_polynomial.py
@@ -6,8 +6,9 @@ from __future__ import division, absolute_import, print_function
import numpy as np
import numpy.polynomial.polynomial as poly
from numpy.testing import (
- TestCase, assert_almost_equal, assert_raises,
- assert_equal, assert_, run_module_suite)
+ assert_almost_equal, assert_raises, assert_equal, assert_,
+ run_module_suite
+ )
def trim(x):
@@ -27,7 +28,7 @@ T9 = [0, 9, 0, -120, 0, 432, 0, -576, 0, 256]
Tlist = [T0, T1, T2, T3, T4, T5, T6, T7, T8, T9]
-class TestConstants(TestCase):
+class TestConstants(object):
def test_polydomain(self):
assert_equal(poly.polydomain, [-1, 1])
@@ -42,7 +43,7 @@ class TestConstants(TestCase):
assert_equal(poly.polyx, [0, 1])
-class TestArithmetic(TestCase):
+class TestArithmetic(object):
def test_polyadd(self):
for i in range(5):
@@ -103,7 +104,7 @@ class TestArithmetic(TestCase):
assert_equal(res, tgt, err_msg=msg)
-class TestEvaluation(TestCase):
+class TestEvaluation(object):
# coefficients of 1 + 2*x + 3*x**2
c1d = np.array([1., 2., 3.])
c2d = np.einsum('i,j->ij', c1d, c1d)
@@ -263,7 +264,7 @@ class TestEvaluation(TestCase):
assert_(res.shape == (2, 3)*3)
-class TestIntegral(TestCase):
+class TestIntegral(object):
def test_polyint(self):
# check exceptions
@@ -357,7 +358,7 @@ class TestIntegral(TestCase):
assert_almost_equal(res, tgt)
-class TestDerivative(TestCase):
+class TestDerivative(object):
def test_polyder(self):
# check exceptions
@@ -397,7 +398,7 @@ class TestDerivative(TestCase):
assert_almost_equal(res, tgt)
-class TestVander(TestCase):
+class TestVander(object):
# some random values in [-1, 1)
x = np.random.random((3, 5))*2 - 1
@@ -445,7 +446,7 @@ class TestVander(TestCase):
assert_(van.shape == (1, 5, 24))
-class TestCompanion(TestCase):
+class TestCompanion(object):
def test_raises(self):
assert_raises(ValueError, poly.polycompanion, [])
@@ -460,7 +461,7 @@ class TestCompanion(TestCase):
assert_(poly.polycompanion([1, 2])[0, 0] == -.5)
-class TestMisc(TestCase):
+class TestMisc(object):
def test_polyfromroots(self):
res = poly.polyfromroots([])
diff --git a/numpy/polynomial/tests/test_polyutils.py b/numpy/polynomial/tests/test_polyutils.py
index 974e2e09a..bd1cb2008 100644
--- a/numpy/polynomial/tests/test_polyutils.py
+++ b/numpy/polynomial/tests/test_polyutils.py
@@ -6,11 +6,12 @@ from __future__ import division, absolute_import, print_function
import numpy as np
import numpy.polynomial.polyutils as pu
from numpy.testing import (
- TestCase, assert_almost_equal, assert_raises,
- assert_equal, assert_, run_module_suite)
+ assert_almost_equal, assert_raises, assert_equal, assert_,
+ run_module_suite
+ )
-class TestMisc(TestCase):
+class TestMisc(object):
def test_trimseq(self):
for i in range(5):
@@ -43,7 +44,7 @@ class TestMisc(TestCase):
assert_equal(pu.trimcoef(coef, 2), [0])
-class TestDomain(TestCase):
+class TestDomain(object):
def test_getdomain(self):
# test for real values
diff --git a/numpy/polynomial/tests/test_printing.py b/numpy/polynomial/tests/test_printing.py
index 86cd25732..f403812c9 100644
--- a/numpy/polynomial/tests/test_printing.py
+++ b/numpy/polynomial/tests/test_printing.py
@@ -1,71 +1,71 @@
from __future__ import division, absolute_import, print_function
import numpy.polynomial as poly
-from numpy.testing import TestCase, run_module_suite, assert_
+from numpy.testing import run_module_suite, assert_equal
-class test_str(TestCase):
+class TestStr(object):
def test_polynomial_str(self):
res = str(poly.Polynomial([0, 1]))
- tgt = 'poly([0., 1.])'
- assert_(res, tgt)
+ tgt = 'poly([ 0. 1.])'
+ assert_equal(res, tgt)
def test_chebyshev_str(self):
res = str(poly.Chebyshev([0, 1]))
- tgt = 'leg([0., 1.])'
- assert_(res, tgt)
+ tgt = 'cheb([ 0. 1.])'
+ assert_equal(res, tgt)
def test_legendre_str(self):
res = str(poly.Legendre([0, 1]))
- tgt = 'leg([0., 1.])'
- assert_(res, tgt)
+ tgt = 'leg([ 0. 1.])'
+ assert_equal(res, tgt)
def test_hermite_str(self):
res = str(poly.Hermite([0, 1]))
- tgt = 'herm([0., 1.])'
- assert_(res, tgt)
+ tgt = 'herm([ 0. 1.])'
+ assert_equal(res, tgt)
def test_hermiteE_str(self):
res = str(poly.HermiteE([0, 1]))
- tgt = 'herme([0., 1.])'
- assert_(res, tgt)
+ tgt = 'herme([ 0. 1.])'
+ assert_equal(res, tgt)
def test_laguerre_str(self):
res = str(poly.Laguerre([0, 1]))
- tgt = 'lag([0., 1.])'
- assert_(res, tgt)
+ tgt = 'lag([ 0. 1.])'
+ assert_equal(res, tgt)
-class test_repr(TestCase):
+class TestRepr(object):
def test_polynomial_str(self):
res = repr(poly.Polynomial([0, 1]))
- tgt = 'Polynomial([0., 1.])'
- assert_(res, tgt)
+ tgt = 'Polynomial([ 0., 1.], domain=[-1, 1], window=[-1, 1])'
+ assert_equal(res, tgt)
def test_chebyshev_str(self):
res = repr(poly.Chebyshev([0, 1]))
- tgt = 'Chebyshev([0., 1.], [-1., 1.], [-1., 1.])'
- assert_(res, tgt)
+ tgt = 'Chebyshev([ 0., 1.], domain=[-1, 1], window=[-1, 1])'
+ assert_equal(res, tgt)
def test_legendre_repr(self):
res = repr(poly.Legendre([0, 1]))
- tgt = 'Legendre([0., 1.], [-1., 1.], [-1., 1.])'
- assert_(res, tgt)
+ tgt = 'Legendre([ 0., 1.], domain=[-1, 1], window=[-1, 1])'
+ assert_equal(res, tgt)
def test_hermite_repr(self):
res = repr(poly.Hermite([0, 1]))
- tgt = 'Hermite([0., 1.], [-1., 1.], [-1., 1.])'
- assert_(res, tgt)
+ tgt = 'Hermite([ 0., 1.], domain=[-1, 1], window=[-1, 1])'
+ assert_equal(res, tgt)
def test_hermiteE_repr(self):
res = repr(poly.HermiteE([0, 1]))
- tgt = 'HermiteE([0., 1.], [-1., 1.], [-1., 1.])'
- assert_(res, tgt)
+ tgt = 'HermiteE([ 0., 1.], domain=[-1, 1], window=[-1, 1])'
+ assert_equal(res, tgt)
def test_laguerre_repr(self):
res = repr(poly.Laguerre([0, 1]))
- tgt = 'Laguerre([0., 1.], [0., 1.], [0., 1.])'
- assert_(res, tgt)
+ tgt = 'Laguerre([ 0., 1.], domain=[0, 1], window=[0, 1])'
+ assert_equal(res, tgt)
#
diff --git a/numpy/random/__init__.py b/numpy/random/__init__.py
index 6c7d3140f..869818a22 100644
--- a/numpy/random/__init__.py
+++ b/numpy/random/__init__.py
@@ -117,6 +117,6 @@ def __RandomState_ctor():
"""
return RandomState(seed=0)
-from numpy.testing.nosetester import _numpy_tester
+from numpy.testing import _numpy_tester
test = _numpy_tester().test
bench = _numpy_tester().bench
diff --git a/numpy/random/mtrand/distributions.c b/numpy/random/mtrand/distributions.c
index e195700d4..7673f92b4 100644
--- a/numpy/random/mtrand/distributions.c
+++ b/numpy/random/mtrand/distributions.c
@@ -41,10 +41,10 @@
* SOFTWARE OR ITS DOCUMENTATION.
*/
-#include <math.h>
-#include <stdlib.h>
#include "distributions.h"
#include <stdio.h>
+#include <math.h>
+#include <stdlib.h>
#ifndef min
#define min(x,y) ((x<y)?x:y)
diff --git a/numpy/random/mtrand/mtrand.pyx b/numpy/random/mtrand/mtrand.pyx
index c0082a782..9e8a79804 100644
--- a/numpy/random/mtrand/mtrand.pyx
+++ b/numpy/random/mtrand/mtrand.pyx
@@ -211,7 +211,7 @@ cdef object cont1_array(rk_state *state, rk_cont1 func, object size,
itera = <flatiter>PyArray_IterNew(<object>oa)
with lock, nogil:
for i from 0 <= i < length:
- array_data[i] = func(state, (<double *>(itera.dataptr))[0])
+ array_data[i] = func(state, (<double *>PyArray_ITER_DATA(itera))[0])
PyArray_ITER_NEXT(itera)
else:
array = <ndarray>np.empty(size, np.float64)
@@ -536,7 +536,7 @@ cdef object discd_array(rk_state *state, rk_discd func, object size, ndarray oa,
itera = <flatiter>PyArray_IterNew(<object>oa)
with lock, nogil:
for i from 0 <= i < length:
- array_data[i] = func(state, (<double *>(itera.dataptr))[0])
+ array_data[i] = func(state, (<double *>PyArray_ITER_DATA(itera))[0])
PyArray_ITER_NEXT(itera)
else:
array = <ndarray>np.empty(size, int)
@@ -1469,7 +1469,7 @@ cdef class RandomState:
4
>>> type(np.random.random_integers(5))
<type 'int'>
- >>> np.random.random_integers(5, size=(3.,2.))
+ >>> np.random.random_integers(5, size=(3,2))
array([[5, 4],
[3, 3],
[4, 5]])
@@ -1951,7 +1951,7 @@ cdef class RandomState:
--------
Draw samples from the distribution:
- >>> shape, scale = 2., 2. # mean=4, std=2*sqrt(2)
+ >>> shape, scale = 2., 2. # mean=4, std=2*sqrt(2)
>>> s = np.random.gamma(shape, scale, 1000)
Display the histogram of the samples, along with
@@ -2007,10 +2007,10 @@ cdef class RandomState:
Parameters
----------
- dfnum : int or array_like of ints
- Degrees of freedom in numerator. Should be greater than zero.
- dfden : int or array_like of ints
- Degrees of freedom in denominator. Should be greater than zero.
+ dfnum : float or array_like of floats
+ Degrees of freedom in numerator, should be > 0.
+ dfden : float or array_like of float
+ Degrees of freedom in denominator, should be > 0.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. If size is ``None`` (default),
@@ -2109,12 +2109,16 @@ cdef class RandomState:
Parameters
----------
- dfnum : int or array_like of ints
- Parameter, should be > 1.
- dfden : int or array_like of ints
- Parameter, should be > 1.
+ dfnum : float or array_like of floats
+ Numerator degrees of freedom, should be > 0.
+
+ .. versionchanged:: 1.14.0
+ Earlier NumPy versions required dfnum > 1.
+ dfden : float or array_like of floats
+ Denominator degrees of freedom, should be > 0.
nonc : float or array_like of floats
- Parameter, should be >= 0.
+ Non-centrality parameter, the sum of the squares of the numerator
+ means, should be >= 0.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. If size is ``None`` (default),
@@ -2175,8 +2179,8 @@ cdef class RandomState:
fdfden = PyFloat_AsDouble(dfden)
fnonc = PyFloat_AsDouble(nonc)
- if fdfnum <= 1:
- raise ValueError("dfnum <= 1")
+ if fdfnum <= 0:
+ raise ValueError("dfnum <= 0")
if fdfden <= 0:
raise ValueError("dfden <= 0")
if fnonc < 0:
@@ -2184,8 +2188,8 @@ cdef class RandomState:
return cont3_array_sc(self.internal_state, rk_noncentral_f, size,
fdfnum, fdfden, fnonc, self.lock)
- if np.any(np.less_equal(odfnum, 1.0)):
- raise ValueError("dfnum <= 1")
+ if np.any(np.less_equal(odfnum, 0.0)):
+ raise ValueError("dfnum <= 0")
if np.any(np.less_equal(odfden, 0.0)):
raise ValueError("dfden <= 0")
if np.any(np.less(ononc, 0.0)):
@@ -2206,8 +2210,8 @@ cdef class RandomState:
Parameters
----------
- df : int or array_like of ints
- Number of degrees of freedom.
+ df : float or array_like of floats
+ Number of degrees of freedom, should be > 0.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. If size is ``None`` (default),
@@ -2285,9 +2289,11 @@ cdef class RandomState:
Parameters
----------
- df : int or array_like of ints
- Degrees of freedom, should be > 0 as of NumPy 1.10.0,
- should be > 1 for earlier versions.
+ df : float or array_like of floats
+ Degrees of freedom, should be > 0.
+
+ .. versionchanged:: 1.10.0
+ Earlier NumPy versions required dfnum > 1.
nonc : float or array_like of floats
Non-centrality, should be non-negative.
size : int or tuple of ints, optional
@@ -2455,7 +2461,7 @@ cdef class RandomState:
Parameters
----------
- df : int or array_like of ints
+ df : float or array_like of floats
Degrees of freedom, should be > 0.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
@@ -4666,6 +4672,11 @@ cdef class RandomState:
samples : ndarray,
The drawn samples, of shape (size, alpha.ndim).
+ Raises
+ -------
+ ValueError
+ If any value in alpha is less than or equal to zero
+
Notes
-----
.. math:: X \\approx \\prod_{i=1}^{k}{x^{\\alpha_i-1}_i}
@@ -4731,6 +4742,8 @@ cdef class RandomState:
k = len(alpha)
alpha_arr = <ndarray>PyArray_ContiguousFromObject(alpha, NPY_DOUBLE, 1, 1)
+ if np.any(np.less_equal(alpha_arr, 0)):
+ raise ValueError('alpha <= 0')
alpha_data = <double*>PyArray_DATA(alpha_arr)
shape = _shape_from_size(size, k)
diff --git a/numpy/random/mtrand/numpy.pxd b/numpy/random/mtrand/numpy.pxd
index d5b0d74ca..32b19c1ab 100644
--- a/numpy/random/mtrand/numpy.pxd
+++ b/numpy/random/mtrand/numpy.pxd
@@ -130,6 +130,7 @@ cdef extern from "numpy/arrayobject.h":
object PyArray_IterNew(object arr)
void PyArray_ITER_NEXT(flatiter it) nogil
+ void* PyArray_ITER_DATA(flatiter it) nogil
dtype PyArray_DescrFromType(int)
diff --git a/numpy/random/mtrand/randomkit.c b/numpy/random/mtrand/randomkit.c
index 3a95efeeb..380917180 100644
--- a/numpy/random/mtrand/randomkit.c
+++ b/numpy/random/mtrand/randomkit.c
@@ -64,13 +64,6 @@
/* static char const rcsid[] =
"@(#) $Jeannot: randomkit.c,v 1.28 2005/07/21 22:14:09 js Exp $"; */
-#include <stddef.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <errno.h>
-#include <limits.h>
-#include <math.h>
-#include <assert.h>
#ifdef _WIN32
/*
@@ -109,18 +102,27 @@
#include <wincrypt.h>
#endif
+/*
+ * Do not move this include. randomkit.h must be included
+ * after windows timeb.h is included.
+ */
+#include "randomkit.h"
+
#else
/* Unix */
+#include "randomkit.h"
#include <time.h>
#include <sys/time.h>
#include <unistd.h>
#endif
-/*
- * Do not move this include. randomkit.h must be included
- * after windows timeb.h is included.
- */
-#include "randomkit.h"
+#include <stddef.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <limits.h>
+#include <math.h>
+#include <assert.h>
#ifndef RK_DEV_URANDOM
#define RK_DEV_URANDOM "/dev/urandom"
diff --git a/numpy/random/tests/__init__.py b/numpy/random/tests/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/numpy/random/tests/__init__.py
diff --git a/numpy/random/tests/test_random.py b/numpy/random/tests/test_random.py
index 0e7396494..a530b9e13 100644
--- a/numpy/random/tests/test_random.py
+++ b/numpy/random/tests/test_random.py
@@ -3,15 +3,16 @@ import warnings
import numpy as np
from numpy.testing import (
- TestCase, run_module_suite, assert_, assert_raises, assert_equal,
- assert_warns, assert_no_warnings, assert_array_equal,
- assert_array_almost_equal, suppress_warnings)
+ run_module_suite, assert_, assert_raises, assert_equal, assert_warns,
+ assert_no_warnings, assert_array_equal, assert_array_almost_equal,
+ suppress_warnings
+ )
from numpy import random
import sys
import warnings
-class TestSeed(TestCase):
+class TestSeed(object):
def test_scalar(self):
s = np.random.RandomState(0)
assert_equal(s.randint(1000), 684)
@@ -42,7 +43,7 @@ class TestSeed(TestCase):
assert_raises(ValueError, np.random.RandomState, [1, -2, 4294967296])
-class TestBinomial(TestCase):
+class TestBinomial(object):
def test_n_zero(self):
# Tests the corner case of n == 0 for the binomial distribution.
# binomial(0, p) should be zero for any p in [0, 1].
@@ -57,7 +58,7 @@ class TestBinomial(TestCase):
assert_raises(ValueError, random.binomial, 1, np.nan)
-class TestMultinomial(TestCase):
+class TestMultinomial(object):
def test_basic(self):
random.multinomial(100, [0.2, 0.8])
@@ -82,11 +83,11 @@ class TestMultinomial(TestCase):
(2, 2, 2))
assert_raises(TypeError, np.random.multinomial, 1, p,
- np.float(1))
+ float(1))
-class TestSetState(TestCase):
- def setUp(self):
+class TestSetState(object):
+ def setup(self):
self.seed = 1234567890
self.prng = random.RandomState(self.seed)
self.state = self.prng.get_state()
@@ -133,7 +134,7 @@ class TestSetState(TestCase):
self.prng.negative_binomial(0.5, 0.5)
-class TestRandint(TestCase):
+class TestRandint(object):
rfunc = np.random.randint
@@ -142,7 +143,7 @@ class TestRandint(TestCase):
np.int32, np.uint32, np.int64, np.uint64]
def test_unsupported_type(self):
- assert_raises(TypeError, self.rfunc, 1, dtype=np.float)
+ assert_raises(TypeError, self.rfunc, 1, dtype=float)
def test_bounds_checking(self):
for dt in self.itype:
@@ -199,7 +200,7 @@ class TestRandint(TestCase):
def test_repeatability(self):
import hashlib
# We use a md5 hash of generated sequences of 1000 samples
- # in the range [0, 6) for all but np.bool, where the range
+ # in the range [0, 6) for all but bool, where the range
# is [0, 2). Hashes are for little endian numbers.
tgt = {'bool': '7dd3170d7aa461d201a65f8bcf3944b0',
'int16': '1b7741b80964bb190c50d541dca1cac1',
@@ -225,9 +226,9 @@ class TestRandint(TestCase):
# bools do not depend on endianess
np.random.seed(1234)
- val = self.rfunc(0, 2, size=1000, dtype=np.bool).view(np.int8)
+ val = self.rfunc(0, 2, size=1000, dtype=bool).view(np.int8)
res = hashlib.md5(val).hexdigest()
- assert_(tgt[np.dtype(np.bool).name] == res)
+ assert_(tgt[np.dtype(bool).name] == res)
def test_int64_uint64_corner_case(self):
# When stored in Numpy arrays, `lbnd` is casted
@@ -259,23 +260,23 @@ class TestRandint(TestCase):
ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
sample = self.rfunc(lbnd, ubnd, dtype=dt)
- self.assertEqual(sample.dtype, np.dtype(dt))
+ assert_equal(sample.dtype, np.dtype(dt))
- for dt in (np.bool, np.int, np.long):
- lbnd = 0 if dt is np.bool else np.iinfo(dt).min
- ubnd = 2 if dt is np.bool else np.iinfo(dt).max + 1
+ for dt in (bool, int, np.long):
+ lbnd = 0 if dt is bool else np.iinfo(dt).min
+ ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
# gh-7284: Ensure that we get Python data types
sample = self.rfunc(lbnd, ubnd, dtype=dt)
- self.assertFalse(hasattr(sample, 'dtype'))
- self.assertEqual(type(sample), dt)
+ assert_(not hasattr(sample, 'dtype'))
+ assert_equal(type(sample), dt)
-class TestRandomDist(TestCase):
+class TestRandomDist(object):
# Make sure the random distribution returns the correct value for a
# given seed
- def setUp(self):
+ def setup(self):
self.seed = 1234567890
def test_rand(self):
@@ -522,7 +523,12 @@ class TestRandomDist(TestCase):
assert_equal(np.random.dirichlet(p, (2, 2)).shape, (2, 2, 2))
assert_equal(np.random.dirichlet(p, np.array((2, 2))).shape, (2, 2, 2))
- assert_raises(TypeError, np.random.dirichlet, p, np.float(1))
+ assert_raises(TypeError, np.random.dirichlet, p, float(1))
+
+ def test_dirichlet_bad_alpha(self):
+ # gh-2089
+ alpha = np.array([5.4e-01, -1.0e-16])
+ assert_raises(ValueError, np.random.mtrand.dirichlet, alpha)
def test_exponential(self):
np.random.seed(self.seed)
@@ -929,10 +935,10 @@ class TestRandomDist(TestCase):
assert_array_equal(actual, desired)
-class TestBroadcast(TestCase):
+class TestBroadcast(object):
# tests that functions that broadcast behave
# correctly when presented with non-scalar arguments
- def setUp(self):
+ def setup(self):
self.seed = 123456789
def setSeed(self):
@@ -1100,7 +1106,13 @@ class TestBroadcast(TestCase):
assert_raises(ValueError, nonc_f, bad_dfnum, dfden, nonc * 3)
assert_raises(ValueError, nonc_f, dfnum, bad_dfden, nonc * 3)
assert_raises(ValueError, nonc_f, dfnum, dfden, bad_nonc * 3)
-
+
+ def test_noncentral_f_small_df(self):
+ self.setSeed()
+ desired = np.array([6.869638627492048, 0.785880199263955])
+ actual = np.random.noncentral_f(0.9, 0.9, 2, size=2)
+ assert_array_almost_equal(actual, desired, decimal=14)
+
def test_chisquare(self):
df = [1]
bad_df = [-1]
@@ -1484,9 +1496,9 @@ class TestBroadcast(TestCase):
assert_raises(ValueError, logseries, bad_p_one * 3)
assert_raises(ValueError, logseries, bad_p_two * 3)
-class TestThread(TestCase):
+class TestThread(object):
# make sure each state produces the same sequence even in threads
- def setUp(self):
+ def setup(self):
self.seeds = range(4)
def check_function(self, function, sz):
@@ -1527,8 +1539,8 @@ class TestThread(TestCase):
self.check_function(gen_random, sz=(10000, 6))
# See Issue #4263
-class TestSingleEltArrayInput(TestCase):
- def setUp(self):
+class TestSingleEltArrayInput(object):
+ def setup(self):
self.argOne = np.array([2])
self.argTwo = np.array([3])
self.argThree = np.array([4])
@@ -1551,7 +1563,7 @@ class TestSingleEltArrayInput(TestCase):
else:
out = func(self.argOne)
- self.assertEqual(out.shape, self.tgtShape)
+ assert_equal(out.shape, self.tgtShape)
def test_two_arg_funcs(self):
funcs = (np.random.uniform, np.random.normal,
@@ -1572,17 +1584,17 @@ class TestSingleEltArrayInput(TestCase):
argTwo = self.argTwo
out = func(self.argOne, argTwo)
- self.assertEqual(out.shape, self.tgtShape)
+ assert_equal(out.shape, self.tgtShape)
out = func(self.argOne[0], argTwo)
- self.assertEqual(out.shape, self.tgtShape)
+ assert_equal(out.shape, self.tgtShape)
out = func(self.argOne, argTwo[0])
- self.assertEqual(out.shape, self.tgtShape)
+ assert_equal(out.shape, self.tgtShape)
# TODO: Uncomment once randint can broadcast arguments
# def test_randint(self):
-# itype = [np.bool, np.int8, np.uint8, np.int16, np.uint16,
+# itype = [bool, np.int8, np.uint8, np.int16, np.uint16,
# np.int32, np.uint32, np.int64, np.uint64]
# func = np.random.randint
# high = np.array([1])
@@ -1604,13 +1616,13 @@ class TestSingleEltArrayInput(TestCase):
for func in funcs:
out = func(self.argOne, self.argTwo, self.argThree)
- self.assertEqual(out.shape, self.tgtShape)
+ assert_equal(out.shape, self.tgtShape)
out = func(self.argOne[0], self.argTwo, self.argThree)
- self.assertEqual(out.shape, self.tgtShape)
+ assert_equal(out.shape, self.tgtShape)
out = func(self.argOne, self.argTwo[0], self.argThree)
- self.assertEqual(out.shape, self.tgtShape)
+ assert_equal(out.shape, self.tgtShape)
if __name__ == "__main__":
run_module_suite()
diff --git a/numpy/random/tests/test_regression.py b/numpy/random/tests/test_regression.py
index ce435b374..572f4c087 100644
--- a/numpy/random/tests/test_regression.py
+++ b/numpy/random/tests/test_regression.py
@@ -1,14 +1,15 @@
from __future__ import division, absolute_import, print_function
import sys
-from numpy.testing import (TestCase, run_module_suite, assert_,
- assert_array_equal, assert_raises)
+from numpy.testing import (
+ run_module_suite, assert_, assert_array_equal, assert_raises,
+ )
from numpy import random
from numpy.compat import long
import numpy as np
-class TestRegression(TestCase):
+class TestRegression(object):
def test_VonMises_range(self):
# Make sure generated random variables are in [-pi, pi].
diff --git a/numpy/testing/__init__.py b/numpy/testing/__init__.py
index 625fdecdc..9485b455e 100644
--- a/numpy/testing/__init__.py
+++ b/numpy/testing/__init__.py
@@ -10,6 +10,6 @@ from __future__ import division, absolute_import, print_function
from unittest import TestCase
from . import decorators as dec
-from .nosetester import run_module_suite, NoseTester as Tester
+from .nosetester import run_module_suite, NoseTester as Tester, _numpy_tester
from .utils import *
-test = nosetester._numpy_tester().test
+test = _numpy_tester().test
diff --git a/numpy/testing/decorators.py b/numpy/testing/decorators.py
index 17400c0d5..b63850090 100644
--- a/numpy/testing/decorators.py
+++ b/numpy/testing/decorators.py
@@ -1,265 +1,6 @@
"""
-Decorators for labeling and modifying behavior of test objects.
-
-Decorators that merely return a modified version of the original
-function object are straightforward. Decorators that return a new
-function object need to use
-::
-
- nose.tools.make_decorator(original_function)(decorator)
-
-in returning the decorator, in order to preserve meta-data such as
-function name, setup and teardown functions and so on - see
-``nose.tools`` for more information.
+Back compatibility decorators module. It will import the appropriate
+set of tools
"""
-from __future__ import division, absolute_import, print_function
-
-import collections
-
-from .utils import SkipTest, assert_warns
-
-
-def slow(t):
- """
- Label a test as 'slow'.
-
- The exact definition of a slow test is obviously both subjective and
- hardware-dependent, but in general any individual test that requires more
- than a second or two should be labeled as slow (the whole suite consits of
- thousands of tests, so even a second is significant).
-
- Parameters
- ----------
- t : callable
- The test to label as slow.
-
- Returns
- -------
- t : callable
- The decorated test `t`.
-
- Examples
- --------
- The `numpy.testing` module includes ``import decorators as dec``.
- A test can be decorated as slow like this::
-
- from numpy.testing import *
-
- @dec.slow
- def test_big(self):
- print('Big, slow test')
-
- """
-
- t.slow = True
- return t
-
-def setastest(tf=True):
- """
- Signals to nose that this function is or is not a test.
-
- Parameters
- ----------
- tf : bool
- If True, specifies that the decorated callable is a test.
- If False, specifies that the decorated callable is not a test.
- Default is True.
-
- Notes
- -----
- This decorator can't use the nose namespace, because it can be
- called from a non-test module. See also ``istest`` and ``nottest`` in
- ``nose.tools``.
-
- Examples
- --------
- `setastest` can be used in the following way::
-
- from numpy.testing.decorators import setastest
-
- @setastest(False)
- def func_with_test_in_name(arg1, arg2):
- pass
-
- """
- def set_test(t):
- t.__test__ = tf
- return t
- return set_test
-
-def skipif(skip_condition, msg=None):
- """
- Make function raise SkipTest exception if a given condition is true.
-
- If the condition is a callable, it is used at runtime to dynamically
- make the decision. This is useful for tests that may require costly
- imports, to delay the cost until the test suite is actually executed.
-
- Parameters
- ----------
- skip_condition : bool or callable
- Flag to determine whether to skip the decorated test.
- msg : str, optional
- Message to give on raising a SkipTest exception. Default is None.
-
- Returns
- -------
- decorator : function
- Decorator which, when applied to a function, causes SkipTest
- to be raised when `skip_condition` is True, and the function
- to be called normally otherwise.
-
- Notes
- -----
- The decorator itself is decorated with the ``nose.tools.make_decorator``
- function in order to transmit function name, and various other metadata.
-
- """
-
- def skip_decorator(f):
- # Local import to avoid a hard nose dependency and only incur the
- # import time overhead at actual test-time.
- import nose
-
- # Allow for both boolean or callable skip conditions.
- if isinstance(skip_condition, collections.Callable):
- skip_val = lambda: skip_condition()
- else:
- skip_val = lambda: skip_condition
-
- def get_msg(func,msg=None):
- """Skip message with information about function being skipped."""
- if msg is None:
- out = 'Test skipped due to test condition'
- else:
- out = msg
-
- return "Skipping test: %s: %s" % (func.__name__, out)
-
- # We need to define *two* skippers because Python doesn't allow both
- # return with value and yield inside the same function.
- def skipper_func(*args, **kwargs):
- """Skipper for normal test functions."""
- if skip_val():
- raise SkipTest(get_msg(f, msg))
- else:
- return f(*args, **kwargs)
-
- def skipper_gen(*args, **kwargs):
- """Skipper for test generators."""
- if skip_val():
- raise SkipTest(get_msg(f, msg))
- else:
- for x in f(*args, **kwargs):
- yield x
-
- # Choose the right skipper to use when building the actual decorator.
- if nose.util.isgenerator(f):
- skipper = skipper_gen
- else:
- skipper = skipper_func
-
- return nose.tools.make_decorator(f)(skipper)
-
- return skip_decorator
-
-
-def knownfailureif(fail_condition, msg=None):
- """
- Make function raise KnownFailureException exception if given condition is true.
-
- If the condition is a callable, it is used at runtime to dynamically
- make the decision. This is useful for tests that may require costly
- imports, to delay the cost until the test suite is actually executed.
-
- Parameters
- ----------
- fail_condition : bool or callable
- Flag to determine whether to mark the decorated test as a known
- failure (if True) or not (if False).
- msg : str, optional
- Message to give on raising a KnownFailureException exception.
- Default is None.
-
- Returns
- -------
- decorator : function
- Decorator, which, when applied to a function, causes
- KnownFailureException to be raised when `fail_condition` is True,
- and the function to be called normally otherwise.
-
- Notes
- -----
- The decorator itself is decorated with the ``nose.tools.make_decorator``
- function in order to transmit function name, and various other metadata.
-
- """
- if msg is None:
- msg = 'Test skipped due to known failure'
-
- # Allow for both boolean or callable known failure conditions.
- if isinstance(fail_condition, collections.Callable):
- fail_val = lambda: fail_condition()
- else:
- fail_val = lambda: fail_condition
-
- def knownfail_decorator(f):
- # Local import to avoid a hard nose dependency and only incur the
- # import time overhead at actual test-time.
- import nose
- from .noseclasses import KnownFailureException
-
- def knownfailer(*args, **kwargs):
- if fail_val():
- raise KnownFailureException(msg)
- else:
- return f(*args, **kwargs)
- return nose.tools.make_decorator(f)(knownfailer)
-
- return knownfail_decorator
-
-def deprecated(conditional=True):
- """
- Filter deprecation warnings while running the test suite.
-
- This decorator can be used to filter DeprecationWarning's, to avoid
- printing them during the test suite run, while checking that the test
- actually raises a DeprecationWarning.
-
- Parameters
- ----------
- conditional : bool or callable, optional
- Flag to determine whether to mark test as deprecated or not. If the
- condition is a callable, it is used at runtime to dynamically make the
- decision. Default is True.
-
- Returns
- -------
- decorator : function
- The `deprecated` decorator itself.
-
- Notes
- -----
- .. versionadded:: 1.4.0
-
- """
- def deprecate_decorator(f):
- # Local import to avoid a hard nose dependency and only incur the
- # import time overhead at actual test-time.
- import nose
-
- def _deprecated_imp(*args, **kwargs):
- # Poor man's replacement for the with statement
- with assert_warns(DeprecationWarning):
- f(*args, **kwargs)
-
- if isinstance(conditional, collections.Callable):
- cond = conditional()
- else:
- cond = conditional
- if cond:
- return nose.tools.make_decorator(f)(_deprecated_imp)
- else:
- return f
- return deprecate_decorator
+from .nose_tools.decorators import *
diff --git a/numpy/testing/nose_tools/__init__.py b/numpy/testing/nose_tools/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/numpy/testing/nose_tools/__init__.py
diff --git a/numpy/testing/nose_tools/decorators.py b/numpy/testing/nose_tools/decorators.py
new file mode 100644
index 000000000..12531e734
--- /dev/null
+++ b/numpy/testing/nose_tools/decorators.py
@@ -0,0 +1,282 @@
+"""
+Decorators for labeling and modifying behavior of test objects.
+
+Decorators that merely return a modified version of the original
+function object are straightforward. Decorators that return a new
+function object need to use
+::
+
+ nose.tools.make_decorator(original_function)(decorator)
+
+in returning the decorator, in order to preserve meta-data such as
+function name, setup and teardown functions and so on - see
+``nose.tools`` for more information.
+
+"""
+from __future__ import division, absolute_import, print_function
+
+import collections
+
+from .utils import SkipTest, assert_warns
+
+
+def slow(t):
+ """
+ Label a test as 'slow'.
+
+ The exact definition of a slow test is obviously both subjective and
+ hardware-dependent, but in general any individual test that requires more
+ than a second or two should be labeled as slow (the whole suite consits of
+ thousands of tests, so even a second is significant).
+
+ Parameters
+ ----------
+ t : callable
+ The test to label as slow.
+
+ Returns
+ -------
+ t : callable
+ The decorated test `t`.
+
+ Examples
+ --------
+ The `numpy.testing` module includes ``import decorators as dec``.
+ A test can be decorated as slow like this::
+
+ from numpy.testing import *
+
+ @dec.slow
+ def test_big(self):
+ print('Big, slow test')
+
+ """
+
+ t.slow = True
+ return t
+
+def setastest(tf=True):
+ """
+ Signals to nose that this function is or is not a test.
+
+ Parameters
+ ----------
+ tf : bool
+ If True, specifies that the decorated callable is a test.
+ If False, specifies that the decorated callable is not a test.
+ Default is True.
+
+ Notes
+ -----
+ This decorator can't use the nose namespace, because it can be
+ called from a non-test module. See also ``istest`` and ``nottest`` in
+ ``nose.tools``.
+
+ Examples
+ --------
+ `setastest` can be used in the following way::
+
+ from numpy.testing import dec
+
+ @dec.setastest(False)
+ def func_with_test_in_name(arg1, arg2):
+ pass
+
+ """
+ def set_test(t):
+ t.__test__ = tf
+ return t
+ return set_test
+
+def skipif(skip_condition, msg=None):
+ """
+ Make function raise SkipTest exception if a given condition is true.
+
+ If the condition is a callable, it is used at runtime to dynamically
+ make the decision. This is useful for tests that may require costly
+ imports, to delay the cost until the test suite is actually executed.
+
+ Parameters
+ ----------
+ skip_condition : bool or callable
+ Flag to determine whether to skip the decorated test.
+ msg : str, optional
+ Message to give on raising a SkipTest exception. Default is None.
+
+ Returns
+ -------
+ decorator : function
+ Decorator which, when applied to a function, causes SkipTest
+ to be raised when `skip_condition` is True, and the function
+ to be called normally otherwise.
+
+ Notes
+ -----
+ The decorator itself is decorated with the ``nose.tools.make_decorator``
+ function in order to transmit function name, and various other metadata.
+
+ """
+
+ def skip_decorator(f):
+ # Local import to avoid a hard nose dependency and only incur the
+ # import time overhead at actual test-time.
+ import nose
+
+ # Allow for both boolean or callable skip conditions.
+ if isinstance(skip_condition, collections.Callable):
+ skip_val = lambda: skip_condition()
+ else:
+ skip_val = lambda: skip_condition
+
+ def get_msg(func,msg=None):
+ """Skip message with information about function being skipped."""
+ if msg is None:
+ out = 'Test skipped due to test condition'
+ else:
+ out = msg
+
+ return "Skipping test: %s: %s" % (func.__name__, out)
+
+ # We need to define *two* skippers because Python doesn't allow both
+ # return with value and yield inside the same function.
+ def skipper_func(*args, **kwargs):
+ """Skipper for normal test functions."""
+ if skip_val():
+ raise SkipTest(get_msg(f, msg))
+ else:
+ return f(*args, **kwargs)
+
+ def skipper_gen(*args, **kwargs):
+ """Skipper for test generators."""
+ if skip_val():
+ raise SkipTest(get_msg(f, msg))
+ else:
+ for x in f(*args, **kwargs):
+ yield x
+
+ # Choose the right skipper to use when building the actual decorator.
+ if nose.util.isgenerator(f):
+ skipper = skipper_gen
+ else:
+ skipper = skipper_func
+
+ return nose.tools.make_decorator(f)(skipper)
+
+ return skip_decorator
+
+
+def knownfailureif(fail_condition, msg=None):
+ """
+ Make function raise KnownFailureException exception if given condition is true.
+
+ If the condition is a callable, it is used at runtime to dynamically
+ make the decision. This is useful for tests that may require costly
+ imports, to delay the cost until the test suite is actually executed.
+
+ Parameters
+ ----------
+ fail_condition : bool or callable
+ Flag to determine whether to mark the decorated test as a known
+ failure (if True) or not (if False).
+ msg : str, optional
+ Message to give on raising a KnownFailureException exception.
+ Default is None.
+
+ Returns
+ -------
+ decorator : function
+ Decorator, which, when applied to a function, causes
+ KnownFailureException to be raised when `fail_condition` is True,
+ and the function to be called normally otherwise.
+
+ Notes
+ -----
+ The decorator itself is decorated with the ``nose.tools.make_decorator``
+ function in order to transmit function name, and various other metadata.
+
+ """
+ if msg is None:
+ msg = 'Test skipped due to known failure'
+
+ # Allow for both boolean or callable known failure conditions.
+ if isinstance(fail_condition, collections.Callable):
+ fail_val = lambda: fail_condition()
+ else:
+ fail_val = lambda: fail_condition
+
+ def knownfail_decorator(f):
+ # Local import to avoid a hard nose dependency and only incur the
+ # import time overhead at actual test-time.
+ import nose
+ from .noseclasses import KnownFailureException
+
+ def knownfailer(*args, **kwargs):
+ if fail_val():
+ raise KnownFailureException(msg)
+ else:
+ return f(*args, **kwargs)
+ return nose.tools.make_decorator(f)(knownfailer)
+
+ return knownfail_decorator
+
+def deprecated(conditional=True):
+ """
+ Filter deprecation warnings while running the test suite.
+
+ This decorator can be used to filter DeprecationWarning's, to avoid
+ printing them during the test suite run, while checking that the test
+ actually raises a DeprecationWarning.
+
+ Parameters
+ ----------
+ conditional : bool or callable, optional
+ Flag to determine whether to mark test as deprecated or not. If the
+ condition is a callable, it is used at runtime to dynamically make the
+ decision. Default is True.
+
+ Returns
+ -------
+ decorator : function
+ The `deprecated` decorator itself.
+
+ Notes
+ -----
+ .. versionadded:: 1.4.0
+
+ """
+ def deprecate_decorator(f):
+ # Local import to avoid a hard nose dependency and only incur the
+ # import time overhead at actual test-time.
+ import nose
+
+ def _deprecated_imp(*args, **kwargs):
+ # Poor man's replacement for the with statement
+ with assert_warns(DeprecationWarning):
+ f(*args, **kwargs)
+
+ if isinstance(conditional, collections.Callable):
+ cond = conditional()
+ else:
+ cond = conditional
+ if cond:
+ return nose.tools.make_decorator(f)(_deprecated_imp)
+ else:
+ return f
+ return deprecate_decorator
+
+
+def parametrize(vars, input):
+ """
+ Pytest compatibility class. This implements the simplest level of
+ pytest.mark.parametrize for use in nose as an aid in making the transition
+ to pytest. It achieves that by adding a dummy var parameter and ignoring
+ the doc_func parameter of the base class. It does not support variable
+ substitution by name, nor does it support nesting or classes. See the
+ pytest documentation for usage.
+
+ .. versionadded:: 1.14.0
+
+ """
+ from .parameterized import parameterized
+
+ return parameterized(input)
diff --git a/numpy/testing/nose_tools/noseclasses.py b/numpy/testing/nose_tools/noseclasses.py
new file mode 100644
index 000000000..9756b9b45
--- /dev/null
+++ b/numpy/testing/nose_tools/noseclasses.py
@@ -0,0 +1,366 @@
+# These classes implement a doctest runner plugin for nose, a "known failure"
+# error class, and a customized TestProgram for NumPy.
+
+# Because this module imports nose directly, it should not
+# be used except by nosetester.py to avoid a general NumPy
+# dependency on nose.
+from __future__ import division, absolute_import, print_function
+
+import os
+import sys
+import doctest
+import inspect
+
+import numpy
+import nose
+from nose.plugins import doctests as npd
+from nose.plugins.errorclass import ErrorClass, ErrorClassPlugin
+from nose.plugins.base import Plugin
+from nose.util import src
+from .nosetester import get_package_name
+from .utils import KnownFailureException, KnownFailureTest
+
+
+# Some of the classes in this module begin with 'Numpy' to clearly distinguish
+# them from the plethora of very similar names from nose/unittest/doctest
+
+#-----------------------------------------------------------------------------
+# Modified version of the one in the stdlib, that fixes a python bug (doctests
+# not found in extension modules, http://bugs.python.org/issue3158)
+class NumpyDocTestFinder(doctest.DocTestFinder):
+
+ def _from_module(self, module, object):
+ """
+ Return true if the given object is defined in the given
+ module.
+ """
+ if module is None:
+ return True
+ elif inspect.isfunction(object):
+ return module.__dict__ is object.__globals__
+ elif inspect.isbuiltin(object):
+ return module.__name__ == object.__module__
+ elif inspect.isclass(object):
+ return module.__name__ == object.__module__
+ elif inspect.ismethod(object):
+ # This one may be a bug in cython that fails to correctly set the
+ # __module__ attribute of methods, but since the same error is easy
+ # to make by extension code writers, having this safety in place
+ # isn't such a bad idea
+ return module.__name__ == object.__self__.__class__.__module__
+ elif inspect.getmodule(object) is not None:
+ return module is inspect.getmodule(object)
+ elif hasattr(object, '__module__'):
+ return module.__name__ == object.__module__
+ elif isinstance(object, property):
+ return True # [XX] no way not be sure.
+ else:
+ raise ValueError("object must be a class or function")
+
+ def _find(self, tests, obj, name, module, source_lines, globs, seen):
+ """
+ Find tests for the given object and any contained objects, and
+ add them to `tests`.
+ """
+
+ doctest.DocTestFinder._find(self, tests, obj, name, module,
+ source_lines, globs, seen)
+
+ # Below we re-run pieces of the above method with manual modifications,
+ # because the original code is buggy and fails to correctly identify
+ # doctests in extension modules.
+
+ # Local shorthands
+ from inspect import (
+ isroutine, isclass, ismodule, isfunction, ismethod
+ )
+
+ # Look for tests in a module's contained objects.
+ if ismodule(obj) and self._recurse:
+ for valname, val in obj.__dict__.items():
+ valname1 = '%s.%s' % (name, valname)
+ if ( (isroutine(val) or isclass(val))
+ and self._from_module(module, val)):
+
+ self._find(tests, val, valname1, module, source_lines,
+ globs, seen)
+
+ # Look for tests in a class's contained objects.
+ if isclass(obj) and self._recurse:
+ for valname, val in obj.__dict__.items():
+ # Special handling for staticmethod/classmethod.
+ if isinstance(val, staticmethod):
+ val = getattr(obj, valname)
+ if isinstance(val, classmethod):
+ val = getattr(obj, valname).__func__
+
+ # Recurse to methods, properties, and nested classes.
+ if ((isfunction(val) or isclass(val) or
+ ismethod(val) or isinstance(val, property)) and
+ self._from_module(module, val)):
+ valname = '%s.%s' % (name, valname)
+ self._find(tests, val, valname, module, source_lines,
+ globs, seen)
+
+
+# second-chance checker; if the default comparison doesn't
+# pass, then see if the expected output string contains flags that
+# tell us to ignore the output
+class NumpyOutputChecker(doctest.OutputChecker):
+ def check_output(self, want, got, optionflags):
+ ret = doctest.OutputChecker.check_output(self, want, got,
+ optionflags)
+ if not ret:
+ if "#random" in want:
+ return True
+
+ # it would be useful to normalize endianness so that
+ # bigendian machines don't fail all the tests (and there are
+ # actually some bigendian examples in the doctests). Let's try
+ # making them all little endian
+ got = got.replace("'>", "'<")
+ want = want.replace("'>", "'<")
+
+ # try to normalize out 32 and 64 bit default int sizes
+ for sz in [4, 8]:
+ got = got.replace("'<i%d'" % sz, "int")
+ want = want.replace("'<i%d'" % sz, "int")
+
+ ret = doctest.OutputChecker.check_output(self, want,
+ got, optionflags)
+
+ return ret
+
+
+# Subclass nose.plugins.doctests.DocTestCase to work around a bug in
+# its constructor that blocks non-default arguments from being passed
+# down into doctest.DocTestCase
+class NumpyDocTestCase(npd.DocTestCase):
+ def __init__(self, test, optionflags=0, setUp=None, tearDown=None,
+ checker=None, obj=None, result_var='_'):
+ self._result_var = result_var
+ self._nose_obj = obj
+ doctest.DocTestCase.__init__(self, test,
+ optionflags=optionflags,
+ setUp=setUp, tearDown=tearDown,
+ checker=checker)
+
+
+print_state = numpy.get_printoptions()
+
+class NumpyDoctest(npd.Doctest):
+ name = 'numpydoctest' # call nosetests with --with-numpydoctest
+ score = 1000 # load late, after doctest builtin
+
+ # always use whitespace and ellipsis options for doctests
+ doctest_optflags = doctest.NORMALIZE_WHITESPACE | doctest.ELLIPSIS
+
+ # files that should be ignored for doctests
+ doctest_ignore = ['generate_numpy_api.py',
+ 'setup.py']
+
+ # Custom classes; class variables to allow subclassing
+ doctest_case_class = NumpyDocTestCase
+ out_check_class = NumpyOutputChecker
+ test_finder_class = NumpyDocTestFinder
+
+ # Don't use the standard doctest option handler; hard-code the option values
+ def options(self, parser, env=os.environ):
+ Plugin.options(self, parser, env)
+ # Test doctests in 'test' files / directories. Standard plugin default
+ # is False
+ self.doctest_tests = True
+ # Variable name; if defined, doctest results stored in this variable in
+ # the top-level namespace. None is the standard default
+ self.doctest_result_var = None
+
+ def configure(self, options, config):
+ # parent method sets enabled flag from command line --with-numpydoctest
+ Plugin.configure(self, options, config)
+ self.finder = self.test_finder_class()
+ self.parser = doctest.DocTestParser()
+ if self.enabled:
+ # Pull standard doctest out of plugin list; there's no reason to run
+ # both. In practice the Unplugger plugin above would cover us when
+ # run from a standard numpy.test() call; this is just in case
+ # someone wants to run our plugin outside the numpy.test() machinery
+ config.plugins.plugins = [p for p in config.plugins.plugins
+ if p.name != 'doctest']
+
+ def set_test_context(self, test):
+ """ Configure `test` object to set test context
+
+ We set the numpy / scipy standard doctest namespace
+
+ Parameters
+ ----------
+ test : test object
+ with ``globs`` dictionary defining namespace
+
+ Returns
+ -------
+ None
+
+ Notes
+ -----
+ `test` object modified in place
+ """
+ # set the namespace for tests
+ pkg_name = get_package_name(os.path.dirname(test.filename))
+
+ # Each doctest should execute in an environment equivalent to
+ # starting Python and executing "import numpy as np", and,
+ # for SciPy packages, an additional import of the local
+ # package (so that scipy.linalg.basic.py's doctests have an
+ # implicit "from scipy import linalg" as well.
+ #
+ # Note: __file__ allows the doctest in NoseTester to run
+ # without producing an error
+ test.globs = {'__builtins__':__builtins__,
+ '__file__':'__main__',
+ '__name__':'__main__',
+ 'np':numpy}
+ # add appropriate scipy import for SciPy tests
+ if 'scipy' in pkg_name:
+ p = pkg_name.split('.')
+ p2 = p[-1]
+ test.globs[p2] = __import__(pkg_name, test.globs, {}, [p2])
+
+ # Override test loading to customize test context (with set_test_context
+ # method), set standard docstring options, and install our own test output
+ # checker
+ def loadTestsFromModule(self, module):
+ if not self.matches(module.__name__):
+ npd.log.debug("Doctest doesn't want module %s", module)
+ return
+ try:
+ tests = self.finder.find(module)
+ except AttributeError:
+ # nose allows module.__test__ = False; doctest does not and
+ # throws AttributeError
+ return
+ if not tests:
+ return
+ tests.sort()
+ module_file = src(module.__file__)
+ for test in tests:
+ if not test.examples:
+ continue
+ if not test.filename:
+ test.filename = module_file
+ # Set test namespace; test altered in place
+ self.set_test_context(test)
+ yield self.doctest_case_class(test,
+ optionflags=self.doctest_optflags,
+ checker=self.out_check_class(),
+ result_var=self.doctest_result_var)
+
+ # Add an afterContext method to nose.plugins.doctests.Doctest in order
+ # to restore print options to the original state after each doctest
+ def afterContext(self):
+ numpy.set_printoptions(**print_state)
+
+ # Ignore NumPy-specific build files that shouldn't be searched for tests
+ def wantFile(self, file):
+ bn = os.path.basename(file)
+ if bn in self.doctest_ignore:
+ return False
+ return npd.Doctest.wantFile(self, file)
+
+
+class Unplugger(object):
+ """ Nose plugin to remove named plugin late in loading
+
+ By default it removes the "doctest" plugin.
+ """
+ name = 'unplugger'
+ enabled = True # always enabled
+ score = 4000 # load late in order to be after builtins
+
+ def __init__(self, to_unplug='doctest'):
+ self.to_unplug = to_unplug
+
+ def options(self, parser, env):
+ pass
+
+ def configure(self, options, config):
+ # Pull named plugin out of plugins list
+ config.plugins.plugins = [p for p in config.plugins.plugins
+ if p.name != self.to_unplug]
+
+
+class KnownFailurePlugin(ErrorClassPlugin):
+ '''Plugin that installs a KNOWNFAIL error class for the
+ KnownFailureClass exception. When KnownFailure is raised,
+ the exception will be logged in the knownfail attribute of the
+ result, 'K' or 'KNOWNFAIL' (verbose) will be output, and the
+ exception will not be counted as an error or failure.'''
+ enabled = True
+ knownfail = ErrorClass(KnownFailureException,
+ label='KNOWNFAIL',
+ isfailure=False)
+
+ def options(self, parser, env=os.environ):
+ env_opt = 'NOSE_WITHOUT_KNOWNFAIL'
+ parser.add_option('--no-knownfail', action='store_true',
+ dest='noKnownFail', default=env.get(env_opt, False),
+ help='Disable special handling of KnownFailure '
+ 'exceptions')
+
+ def configure(self, options, conf):
+ if not self.can_configure:
+ return
+ self.conf = conf
+ disable = getattr(options, 'noKnownFail', False)
+ if disable:
+ self.enabled = False
+
+KnownFailure = KnownFailurePlugin # backwards compat
+
+
+class FPUModeCheckPlugin(Plugin):
+ """
+ Plugin that checks the FPU mode before and after each test,
+ raising failures if the test changed the mode.
+ """
+
+ def prepareTestCase(self, test):
+ from numpy.core.multiarray_tests import get_fpu_mode
+
+ def run(result):
+ old_mode = get_fpu_mode()
+ test.test(result)
+ new_mode = get_fpu_mode()
+
+ if old_mode != new_mode:
+ try:
+ raise AssertionError(
+ "FPU mode changed from {0:#x} to {1:#x} during the "
+ "test".format(old_mode, new_mode))
+ except AssertionError:
+ result.addFailure(test, sys.exc_info())
+
+ return run
+
+
+# Class allows us to save the results of the tests in runTests - see runTests
+# method docstring for details
+class NumpyTestProgram(nose.core.TestProgram):
+ def runTests(self):
+ """Run Tests. Returns true on success, false on failure, and
+ sets self.success to the same value.
+
+ Because nose currently discards the test result object, but we need
+ to return it to the user, override TestProgram.runTests to retain
+ the result
+ """
+ if self.testRunner is None:
+ self.testRunner = nose.core.TextTestRunner(stream=self.config.stream,
+ verbosity=self.config.verbosity,
+ config=self.config)
+ plug_runner = self.config.plugins.prepareTestRunner(self.testRunner)
+ if plug_runner is not None:
+ self.testRunner = plug_runner
+ self.result = self.testRunner.run(self.test)
+ self.success = self.result.wasSuccessful()
+ return self.success
diff --git a/numpy/testing/nose_tools/nosetester.py b/numpy/testing/nose_tools/nosetester.py
new file mode 100644
index 000000000..c2cf58377
--- /dev/null
+++ b/numpy/testing/nose_tools/nosetester.py
@@ -0,0 +1,560 @@
+"""
+Nose test running.
+
+This module implements ``test()`` and ``bench()`` functions for NumPy modules.
+
+"""
+from __future__ import division, absolute_import, print_function
+
+import os
+import sys
+import warnings
+from numpy.compat import basestring
+import numpy as np
+
+from .utils import import_nose, suppress_warnings
+
+
+__all__ = ['get_package_name', 'run_module_suite', 'NoseTester',
+ '_numpy_tester', 'get_package_name', 'import_nose',
+ 'suppress_warnings']
+
+
+def get_package_name(filepath):
+ """
+ Given a path where a package is installed, determine its name.
+
+ Parameters
+ ----------
+ filepath : str
+ Path to a file. If the determination fails, "numpy" is returned.
+
+ Examples
+ --------
+ >>> np.testing.nosetester.get_package_name('nonsense')
+ 'numpy'
+
+ """
+
+ fullpath = filepath[:]
+ pkg_name = []
+ while 'site-packages' in filepath or 'dist-packages' in filepath:
+ filepath, p2 = os.path.split(filepath)
+ if p2 in ('site-packages', 'dist-packages'):
+ break
+ pkg_name.append(p2)
+
+ # if package name determination failed, just default to numpy/scipy
+ if not pkg_name:
+ if 'scipy' in fullpath:
+ return 'scipy'
+ else:
+ return 'numpy'
+
+ # otherwise, reverse to get correct order and return
+ pkg_name.reverse()
+
+ # don't include the outer egg directory
+ if pkg_name[0].endswith('.egg'):
+ pkg_name.pop(0)
+
+ return '.'.join(pkg_name)
+
+
+def run_module_suite(file_to_run=None, argv=None):
+ """
+ Run a test module.
+
+ Equivalent to calling ``$ nosetests <argv> <file_to_run>`` from
+ the command line
+
+ Parameters
+ ----------
+ file_to_run : str, optional
+ Path to test module, or None.
+ By default, run the module from which this function is called.
+ argv : list of strings
+ Arguments to be passed to the nose test runner. ``argv[0]`` is
+ ignored. All command line arguments accepted by ``nosetests``
+ will work. If it is the default value None, sys.argv is used.
+
+ .. versionadded:: 1.9.0
+
+ Examples
+ --------
+ Adding the following::
+
+ if __name__ == "__main__" :
+ run_module_suite(argv=sys.argv)
+
+ at the end of a test module will run the tests when that module is
+ called in the python interpreter.
+
+ Alternatively, calling::
+
+ >>> run_module_suite(file_to_run="numpy/tests/test_matlib.py")
+
+ from an interpreter will run all the test routine in 'test_matlib.py'.
+ """
+ if file_to_run is None:
+ f = sys._getframe(1)
+ file_to_run = f.f_locals.get('__file__', None)
+ if file_to_run is None:
+ raise AssertionError
+
+ if argv is None:
+ argv = sys.argv + [file_to_run]
+ else:
+ argv = argv + [file_to_run]
+
+ nose = import_nose()
+ from .noseclasses import KnownFailurePlugin
+ nose.run(argv=argv, addplugins=[KnownFailurePlugin()])
+
+
+class NoseTester(object):
+ """
+ Nose test runner.
+
+ This class is made available as numpy.testing.Tester, and a test function
+ is typically added to a package's __init__.py like so::
+
+ from numpy.testing import Tester
+ test = Tester().test
+
+ Calling this test function finds and runs all tests associated with the
+ package and all its sub-packages.
+
+ Attributes
+ ----------
+ package_path : str
+ Full path to the package to test.
+ package_name : str
+ Name of the package to test.
+
+ Parameters
+ ----------
+ package : module, str or None, optional
+ The package to test. If a string, this should be the full path to
+ the package. If None (default), `package` is set to the module from
+ which `NoseTester` is initialized.
+ raise_warnings : None, str or sequence of warnings, optional
+ This specifies which warnings to configure as 'raise' instead
+ of being shown once during the test execution. Valid strings are:
+
+ - "develop" : equals ``(Warning,)``
+ - "release" : equals ``()``, don't raise on any warnings.
+
+ Default is "release".
+ depth : int, optional
+ If `package` is None, then this can be used to initialize from the
+ module of the caller of (the caller of (...)) the code that
+ initializes `NoseTester`. Default of 0 means the module of the
+ immediate caller; higher values are useful for utility routines that
+ want to initialize `NoseTester` objects on behalf of other code.
+
+ """
+ def __init__(self, package=None, raise_warnings="release", depth=0,
+ check_fpu_mode=False):
+ # Back-compat: 'None' used to mean either "release" or "develop"
+ # depending on whether this was a release or develop version of
+ # numpy. Those semantics were fine for testing numpy, but not so
+ # helpful for downstream projects like scipy that use
+ # numpy.testing. (They want to set this based on whether *they* are a
+ # release or develop version, not whether numpy is.) So we continue to
+ # accept 'None' for back-compat, but it's now just an alias for the
+ # default "release".
+ if raise_warnings is None:
+ raise_warnings = "release"
+
+ package_name = None
+ if package is None:
+ f = sys._getframe(1 + depth)
+ package_path = f.f_locals.get('__file__', None)
+ if package_path is None:
+ raise AssertionError
+ package_path = os.path.dirname(package_path)
+ package_name = f.f_locals.get('__name__', None)
+ elif isinstance(package, type(os)):
+ package_path = os.path.dirname(package.__file__)
+ package_name = getattr(package, '__name__', None)
+ else:
+ package_path = str(package)
+
+ self.package_path = package_path
+
+ # Find the package name under test; this name is used to limit coverage
+ # reporting (if enabled).
+ if package_name is None:
+ package_name = get_package_name(package_path)
+ self.package_name = package_name
+
+ # Set to "release" in constructor in maintenance branches.
+ self.raise_warnings = raise_warnings
+
+ # Whether to check for FPU mode changes
+ self.check_fpu_mode = check_fpu_mode
+
+ def _test_argv(self, label, verbose, extra_argv):
+ ''' Generate argv for nosetest command
+
+ Parameters
+ ----------
+ label : {'fast', 'full', '', attribute identifier}, optional
+ see ``test`` docstring
+ verbose : int, optional
+ Verbosity value for test outputs, in the range 1-10. Default is 1.
+ extra_argv : list, optional
+ List with any extra arguments to pass to nosetests.
+
+ Returns
+ -------
+ argv : list
+ command line arguments that will be passed to nose
+ '''
+ argv = [__file__, self.package_path, '-s']
+ if label and label != 'full':
+ if not isinstance(label, basestring):
+ raise TypeError('Selection label should be a string')
+ if label == 'fast':
+ label = 'not slow'
+ argv += ['-A', label]
+ argv += ['--verbosity', str(verbose)]
+
+ # When installing with setuptools, and also in some other cases, the
+ # test_*.py files end up marked +x executable. Nose, by default, does
+ # not run files marked with +x as they might be scripts. However, in
+ # our case nose only looks for test_*.py files under the package
+ # directory, which should be safe.
+ argv += ['--exe']
+
+ if extra_argv:
+ argv += extra_argv
+ return argv
+
+ def _show_system_info(self):
+ nose = import_nose()
+
+ import numpy
+ print("NumPy version %s" % numpy.__version__)
+ relaxed_strides = numpy.ones((10, 1), order="C").flags.f_contiguous
+ print("NumPy relaxed strides checking option:", relaxed_strides)
+ npdir = os.path.dirname(numpy.__file__)
+ print("NumPy is installed in %s" % npdir)
+
+ if 'scipy' in self.package_name:
+ import scipy
+ print("SciPy version %s" % scipy.__version__)
+ spdir = os.path.dirname(scipy.__file__)
+ print("SciPy is installed in %s" % spdir)
+
+ pyversion = sys.version.replace('\n', '')
+ print("Python version %s" % pyversion)
+ print("nose version %d.%d.%d" % nose.__versioninfo__)
+
+ def _get_custom_doctester(self):
+ """ Return instantiated plugin for doctests
+
+ Allows subclassing of this class to override doctester
+
+ A return value of None means use the nose builtin doctest plugin
+ """
+ from .noseclasses import NumpyDoctest
+ return NumpyDoctest()
+
+ def prepare_test_args(self, label='fast', verbose=1, extra_argv=None,
+ doctests=False, coverage=False, timer=False):
+ """
+ Run tests for module using nose.
+
+ This method does the heavy lifting for the `test` method. It takes all
+ the same arguments, for details see `test`.
+
+ See Also
+ --------
+ test
+
+ """
+ # fail with nice error message if nose is not present
+ import_nose()
+ # compile argv
+ argv = self._test_argv(label, verbose, extra_argv)
+ # our way of doing coverage
+ if coverage:
+ argv += ['--cover-package=%s' % self.package_name, '--with-coverage',
+ '--cover-tests', '--cover-erase']
+
+ if timer:
+ if timer is True:
+ argv += ['--with-timer']
+ elif isinstance(timer, int):
+ argv += ['--with-timer', '--timer-top-n', str(timer)]
+
+ # construct list of plugins
+ import nose.plugins.builtin
+ from nose.plugins import EntryPointPluginManager
+ from .noseclasses import (KnownFailurePlugin, Unplugger,
+ FPUModeCheckPlugin)
+ plugins = [KnownFailurePlugin()]
+ plugins += [p() for p in nose.plugins.builtin.plugins]
+ if self.check_fpu_mode:
+ plugins += [FPUModeCheckPlugin()]
+ argv += ["--with-fpumodecheckplugin"]
+ try:
+ # External plugins (like nose-timer)
+ entrypoint_manager = EntryPointPluginManager()
+ entrypoint_manager.loadPlugins()
+ plugins += [p for p in entrypoint_manager.plugins]
+ except ImportError:
+ # Relies on pkg_resources, not a hard dependency
+ pass
+
+ # add doctesting if required
+ doctest_argv = '--with-doctest' in argv
+ if doctests == False and doctest_argv:
+ doctests = True
+ plug = self._get_custom_doctester()
+ if plug is None:
+ # use standard doctesting
+ if doctests and not doctest_argv:
+ argv += ['--with-doctest']
+ else: # custom doctesting
+ if doctest_argv: # in fact the unplugger would take care of this
+ argv.remove('--with-doctest')
+ plugins += [Unplugger('doctest'), plug]
+ if doctests:
+ argv += ['--with-' + plug.name]
+ return argv, plugins
+
+ def test(self, label='fast', verbose=1, extra_argv=None,
+ doctests=False, coverage=False, raise_warnings=None,
+ timer=False):
+ """
+ Run tests for module using nose.
+
+ Parameters
+ ----------
+ label : {'fast', 'full', '', attribute identifier}, optional
+ Identifies the tests to run. This can be a string to pass to
+ the nosetests executable with the '-A' option, or one of several
+ special values. Special values are:
+ * 'fast' - the default - which corresponds to the ``nosetests -A``
+ option of 'not slow'.
+ * 'full' - fast (as above) and slow tests as in the
+ 'no -A' option to nosetests - this is the same as ''.
+ * None or '' - run all tests.
+ attribute_identifier - string passed directly to nosetests as '-A'.
+ verbose : int, optional
+ Verbosity value for test outputs, in the range 1-10. Default is 1.
+ extra_argv : list, optional
+ List with any extra arguments to pass to nosetests.
+ doctests : bool, optional
+ If True, run doctests in module. Default is False.
+ coverage : bool, optional
+ If True, report coverage of NumPy code. Default is False.
+ (This requires the `coverage module:
+ <http://nedbatchelder.com/code/modules/coverage.html>`_).
+ raise_warnings : None, str or sequence of warnings, optional
+ This specifies which warnings to configure as 'raise' instead
+ of being shown once during the test execution. Valid strings are:
+
+ - "develop" : equals ``(Warning,)``
+ - "release" : equals ``()``, don't raise on any warnings.
+
+ The default is to use the class initialization value.
+ timer : bool or int, optional
+ Timing of individual tests with ``nose-timer`` (which needs to be
+ installed). If True, time tests and report on all of them.
+ If an integer (say ``N``), report timing results for ``N`` slowest
+ tests.
+
+ Returns
+ -------
+ result : object
+ Returns the result of running the tests as a
+ ``nose.result.TextTestResult`` object.
+
+ Notes
+ -----
+ Each NumPy module exposes `test` in its namespace to run all tests for it.
+ For example, to run all tests for numpy.lib:
+
+ >>> np.lib.test() #doctest: +SKIP
+
+ Examples
+ --------
+ >>> result = np.lib.test() #doctest: +SKIP
+ Running unit tests for numpy.lib
+ ...
+ Ran 976 tests in 3.933s
+
+ OK
+
+ >>> result.errors #doctest: +SKIP
+ []
+ >>> result.knownfail #doctest: +SKIP
+ []
+ """
+
+ # cap verbosity at 3 because nose becomes *very* verbose beyond that
+ verbose = min(verbose, 3)
+
+ from . import utils
+ utils.verbose = verbose
+
+ argv, plugins = self.prepare_test_args(
+ label, verbose, extra_argv, doctests, coverage, timer)
+
+ if doctests:
+ print("Running unit tests and doctests for %s" % self.package_name)
+ else:
+ print("Running unit tests for %s" % self.package_name)
+
+ self._show_system_info()
+
+ # reset doctest state on every run
+ import doctest
+ doctest.master = None
+
+ if raise_warnings is None:
+ raise_warnings = self.raise_warnings
+
+ _warn_opts = dict(develop=(Warning,),
+ release=())
+ if isinstance(raise_warnings, basestring):
+ raise_warnings = _warn_opts[raise_warnings]
+
+ with suppress_warnings("location") as sup:
+ # Reset the warning filters to the default state,
+ # so that running the tests is more repeatable.
+ warnings.resetwarnings()
+ # Set all warnings to 'warn', this is because the default 'once'
+ # has the bad property of possibly shadowing later warnings.
+ warnings.filterwarnings('always')
+ # Force the requested warnings to raise
+ for warningtype in raise_warnings:
+ warnings.filterwarnings('error', category=warningtype)
+ # Filter out annoying import messages.
+ sup.filter(message='Not importing directory')
+ sup.filter(message="numpy.dtype size changed")
+ sup.filter(message="numpy.ufunc size changed")
+ sup.filter(category=np.ModuleDeprecationWarning)
+ # Filter out boolean '-' deprecation messages. This allows
+ # older versions of scipy to test without a flood of messages.
+ sup.filter(message=".*boolean negative.*")
+ sup.filter(message=".*boolean subtract.*")
+ # Filter out distutils cpu warnings (could be localized to
+ # distutils tests). ASV has problems with top level import,
+ # so fetch module for suppression here.
+ with warnings.catch_warnings():
+ warnings.simplefilter("always")
+ from ...distutils import cpuinfo
+ sup.filter(category=UserWarning, module=cpuinfo)
+ # See #7949: Filter out deprecation warnings due to the -3 flag to
+ # python 2
+ if sys.version_info.major == 2 and sys.py3kwarning:
+ # This is very specific, so using the fragile module filter
+ # is fine
+ import threading
+ sup.filter(DeprecationWarning,
+ r"sys\.exc_clear\(\) not supported in 3\.x",
+ module=threading)
+ sup.filter(DeprecationWarning, message=r"in 3\.x, __setslice__")
+ sup.filter(DeprecationWarning, message=r"in 3\.x, __getslice__")
+ sup.filter(DeprecationWarning, message=r"buffer\(\) not supported in 3\.x")
+ sup.filter(DeprecationWarning, message=r"CObject type is not supported in 3\.x")
+ sup.filter(DeprecationWarning, message=r"comparing unequal types not supported in 3\.x")
+ # Filter out some deprecation warnings inside nose 1.3.7 when run
+ # on python 3.5b2. See
+ # https://github.com/nose-devs/nose/issues/929
+ # Note: it is hard to filter based on module for sup (lineno could
+ # be implemented).
+ warnings.filterwarnings("ignore", message=".*getargspec.*",
+ category=DeprecationWarning,
+ module=r"nose\.")
+
+ from .noseclasses import NumpyTestProgram
+
+ t = NumpyTestProgram(argv=argv, exit=False, plugins=plugins)
+
+ return t.result
+
+ def bench(self, label='fast', verbose=1, extra_argv=None):
+ """
+ Run benchmarks for module using nose.
+
+ Parameters
+ ----------
+ label : {'fast', 'full', '', attribute identifier}, optional
+ Identifies the benchmarks to run. This can be a string to pass to
+ the nosetests executable with the '-A' option, or one of several
+ special values. Special values are:
+ * 'fast' - the default - which corresponds to the ``nosetests -A``
+ option of 'not slow'.
+ * 'full' - fast (as above) and slow benchmarks as in the
+ 'no -A' option to nosetests - this is the same as ''.
+ * None or '' - run all tests.
+ attribute_identifier - string passed directly to nosetests as '-A'.
+ verbose : int, optional
+ Verbosity value for benchmark outputs, in the range 1-10. Default is 1.
+ extra_argv : list, optional
+ List with any extra arguments to pass to nosetests.
+
+ Returns
+ -------
+ success : bool
+ Returns True if running the benchmarks works, False if an error
+ occurred.
+
+ Notes
+ -----
+ Benchmarks are like tests, but have names starting with "bench" instead
+ of "test", and can be found under the "benchmarks" sub-directory of the
+ module.
+
+ Each NumPy module exposes `bench` in its namespace to run all benchmarks
+ for it.
+
+ Examples
+ --------
+ >>> success = np.lib.bench() #doctest: +SKIP
+ Running benchmarks for numpy.lib
+ ...
+ using 562341 items:
+ unique:
+ 0.11
+ unique1d:
+ 0.11
+ ratio: 1.0
+ nUnique: 56230 == 56230
+ ...
+ OK
+
+ >>> success #doctest: +SKIP
+ True
+
+ """
+
+ print("Running benchmarks for %s" % self.package_name)
+ self._show_system_info()
+
+ argv = self._test_argv(label, verbose, extra_argv)
+ argv += ['--match', r'(?:^|[\\b_\\.%s-])[Bb]ench' % os.sep]
+
+ # import nose or make informative error
+ nose = import_nose()
+
+ # get plugin to disable doctests
+ from .noseclasses import Unplugger
+ add_plugins = [Unplugger('doctest')]
+
+ return nose.run(argv=argv, addplugins=add_plugins)
+
+
+def _numpy_tester():
+ if hasattr(np, "__version__") and ".dev0" in np.__version__:
+ mode = "develop"
+ else:
+ mode = "release"
+ return NoseTester(raise_warnings=mode, depth=1,
+ check_fpu_mode=True)
diff --git a/numpy/testing/nose_tools/parameterized.py b/numpy/testing/nose_tools/parameterized.py
new file mode 100644
index 000000000..962fddcbf
--- /dev/null
+++ b/numpy/testing/nose_tools/parameterized.py
@@ -0,0 +1,489 @@
+"""
+tl;dr: all code code is licensed under simplified BSD, unless stated otherwise.
+
+Unless stated otherwise in the source files, all code is copyright 2010 David
+Wolever <david@wolever.net>. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY <COPYRIGHT HOLDER> ``AS IS'' AND ANY EXPRESS OR
+IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+EVENT SHALL <COPYRIGHT HOLDER> OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
+INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+The views and conclusions contained in the software and documentation are those
+of the authors and should not be interpreted as representing official policies,
+either expressed or implied, of David Wolever.
+
+"""
+import re
+import sys
+import inspect
+import warnings
+from functools import wraps
+from types import MethodType as MethodType
+from collections import namedtuple
+
+try:
+ from collections import OrderedDict as MaybeOrderedDict
+except ImportError:
+ MaybeOrderedDict = dict
+
+from unittest import TestCase
+
+PY3 = sys.version_info[0] == 3
+PY2 = sys.version_info[0] == 2
+
+
+if PY3:
+ # Python 3 doesn't have an InstanceType, so just use a dummy type.
+ class InstanceType():
+ pass
+ lzip = lambda *a: list(zip(*a))
+ text_type = str
+ string_types = str,
+ bytes_type = bytes
+ def make_method(func, instance, type):
+ if instance is None:
+ return func
+ return MethodType(func, instance)
+else:
+ from types import InstanceType
+ lzip = zip
+ text_type = unicode
+ bytes_type = str
+ string_types = basestring,
+ def make_method(func, instance, type):
+ return MethodType(func, instance, type)
+
+_param = namedtuple("param", "args kwargs")
+
+class param(_param):
+ """ Represents a single parameter to a test case.
+
+ For example::
+
+ >>> p = param("foo", bar=16)
+ >>> p
+ param("foo", bar=16)
+ >>> p.args
+ ('foo', )
+ >>> p.kwargs
+ {'bar': 16}
+
+ Intended to be used as an argument to ``@parameterized``::
+
+ @parameterized([
+ param("foo", bar=16),
+ ])
+ def test_stuff(foo, bar=16):
+ pass
+ """
+
+ def __new__(cls, *args , **kwargs):
+ return _param.__new__(cls, args, kwargs)
+
+ @classmethod
+ def explicit(cls, args=None, kwargs=None):
+ """ Creates a ``param`` by explicitly specifying ``args`` and
+ ``kwargs``::
+
+ >>> param.explicit([1,2,3])
+ param(*(1, 2, 3))
+ >>> param.explicit(kwargs={"foo": 42})
+ param(*(), **{"foo": "42"})
+ """
+ args = args or ()
+ kwargs = kwargs or {}
+ return cls(*args, **kwargs)
+
+ @classmethod
+ def from_decorator(cls, args):
+ """ Returns an instance of ``param()`` for ``@parameterized`` argument
+ ``args``::
+
+ >>> param.from_decorator((42, ))
+ param(args=(42, ), kwargs={})
+ >>> param.from_decorator("foo")
+ param(args=("foo", ), kwargs={})
+ """
+ if isinstance(args, param):
+ return args
+ elif isinstance(args, string_types):
+ args = (args, )
+ try:
+ return cls(*args)
+ except TypeError as e:
+ if "after * must be" not in str(e):
+ raise
+ raise TypeError(
+ "Parameters must be tuples, but %r is not (hint: use '(%r, )')"
+ %(args, args),
+ )
+
+ def __repr__(self):
+ return "param(*%r, **%r)" %self
+
+
+class QuietOrderedDict(MaybeOrderedDict):
+ """ When OrderedDict is available, use it to make sure that the kwargs in
+ doc strings are consistently ordered. """
+ __str__ = dict.__str__
+ __repr__ = dict.__repr__
+
+
+def parameterized_argument_value_pairs(func, p):
+ """Return tuples of parameterized arguments and their values.
+
+ This is useful if you are writing your own doc_func
+ function and need to know the values for each parameter name::
+
+ >>> def func(a, foo=None, bar=42, **kwargs): pass
+ >>> p = param(1, foo=7, extra=99)
+ >>> parameterized_argument_value_pairs(func, p)
+ [("a", 1), ("foo", 7), ("bar", 42), ("**kwargs", {"extra": 99})]
+
+ If the function's first argument is named ``self`` then it will be
+ ignored::
+
+ >>> def func(self, a): pass
+ >>> p = param(1)
+ >>> parameterized_argument_value_pairs(func, p)
+ [("a", 1)]
+
+ Additionally, empty ``*args`` or ``**kwargs`` will be ignored::
+
+ >>> def func(foo, *args): pass
+ >>> p = param(1)
+ >>> parameterized_argument_value_pairs(func, p)
+ [("foo", 1)]
+ >>> p = param(1, 16)
+ >>> parameterized_argument_value_pairs(func, p)
+ [("foo", 1), ("*args", (16, ))]
+ """
+ argspec = inspect.getargspec(func)
+ arg_offset = 1 if argspec.args[:1] == ["self"] else 0
+
+ named_args = argspec.args[arg_offset:]
+
+ result = lzip(named_args, p.args)
+ named_args = argspec.args[len(result) + arg_offset:]
+ varargs = p.args[len(result):]
+
+ result.extend([
+ (name, p.kwargs.get(name, default))
+ for (name, default)
+ in zip(named_args, argspec.defaults or [])
+ ])
+
+ seen_arg_names = set([ n for (n, _) in result ])
+ keywords = QuietOrderedDict(sorted([
+ (name, p.kwargs[name])
+ for name in p.kwargs
+ if name not in seen_arg_names
+ ]))
+
+ if varargs:
+ result.append(("*%s" %(argspec.varargs, ), tuple(varargs)))
+
+ if keywords:
+ result.append(("**%s" %(argspec.keywords, ), keywords))
+
+ return result
+
+def short_repr(x, n=64):
+ """ A shortened repr of ``x`` which is guaranteed to be ``unicode``::
+
+ >>> short_repr("foo")
+ u"foo"
+ >>> short_repr("123456789", n=4)
+ u"12...89"
+ """
+
+ x_repr = repr(x)
+ if isinstance(x_repr, bytes_type):
+ try:
+ x_repr = text_type(x_repr, "utf-8")
+ except UnicodeDecodeError:
+ x_repr = text_type(x_repr, "latin1")
+ if len(x_repr) > n:
+ x_repr = x_repr[:n//2] + "..." + x_repr[len(x_repr) - n//2:]
+ return x_repr
+
+def default_doc_func(func, num, p):
+ if func.__doc__ is None:
+ return None
+
+ all_args_with_values = parameterized_argument_value_pairs(func, p)
+
+ # Assumes that the function passed is a bound method.
+ descs = ["%s=%s" %(n, short_repr(v)) for n, v in all_args_with_values]
+
+ # The documentation might be a multiline string, so split it
+ # and just work with the first string, ignoring the period
+ # at the end if there is one.
+ first, nl, rest = func.__doc__.lstrip().partition("\n")
+ suffix = ""
+ if first.endswith("."):
+ suffix = "."
+ first = first[:-1]
+ args = "%s[with %s]" %(len(first) and " " or "", ", ".join(descs))
+ return "".join([first.rstrip(), args, suffix, nl, rest])
+
+def default_name_func(func, num, p):
+ base_name = func.__name__
+ name_suffix = "_%s" %(num, )
+ if len(p.args) > 0 and isinstance(p.args[0], string_types):
+ name_suffix += "_" + parameterized.to_safe_name(p.args[0])
+ return base_name + name_suffix
+
+
+_test_runner_override = None
+_test_runner_guess = False
+_test_runners = set(["unittest", "unittest2", "nose", "nose2", "pytest"])
+_test_runner_aliases = {
+ "_pytest": "pytest",
+}
+
+def set_test_runner(name):
+ global _test_runner_override
+ if name not in _test_runners:
+ raise TypeError(
+ "Invalid test runner: %r (must be one of: %s)"
+ %(name, ", ".join(_test_runners)),
+ )
+ _test_runner_override = name
+
+def detect_runner():
+ """ Guess which test runner we're using by traversing the stack and looking
+ for the first matching module. This *should* be reasonably safe, as
+ it's done during test disocvery where the test runner should be the
+ stack frame immediately outside. """
+ if _test_runner_override is not None:
+ return _test_runner_override
+ global _test_runner_guess
+ if _test_runner_guess is False:
+ stack = inspect.stack()
+ for record in reversed(stack):
+ frame = record[0]
+ module = frame.f_globals.get("__name__").partition(".")[0]
+ if module in _test_runner_aliases:
+ module = _test_runner_aliases[module]
+ if module in _test_runners:
+ _test_runner_guess = module
+ break
+ if record[1].endswith("python2.6/unittest.py"):
+ _test_runner_guess = "unittest"
+ break
+ else:
+ _test_runner_guess = None
+ return _test_runner_guess
+
+class parameterized(object):
+ """ Parameterize a test case::
+
+ class TestInt(object):
+ @parameterized([
+ ("A", 10),
+ ("F", 15),
+ param("10", 42, base=42)
+ ])
+ def test_int(self, input, expected, base=16):
+ actual = int(input, base=base)
+ assert_equal(actual, expected)
+
+ @parameterized([
+ (2, 3, 5)
+ (3, 5, 8),
+ ])
+ def test_add(a, b, expected):
+ assert_equal(a + b, expected)
+ """
+
+ def __init__(self, input, doc_func=None):
+ self.get_input = self.input_as_callable(input)
+ self.doc_func = doc_func or default_doc_func
+
+ def __call__(self, test_func):
+ self.assert_not_in_testcase_subclass()
+
+ @wraps(test_func)
+ def wrapper(test_self=None):
+ test_cls = test_self and type(test_self)
+ if test_self is not None:
+ if issubclass(test_cls, InstanceType):
+ raise TypeError((
+ "@parameterized can't be used with old-style classes, but "
+ "%r has an old-style class. Consider using a new-style "
+ "class, or '@parameterized.expand' "
+ "(see http://stackoverflow.com/q/54867/71522 for more "
+ "information on old-style classes)."
+ ) %(test_self, ))
+
+ original_doc = wrapper.__doc__
+ for num, args in enumerate(wrapper.parameterized_input):
+ p = param.from_decorator(args)
+ unbound_func, nose_tuple = self.param_as_nose_tuple(test_self, test_func, num, p)
+ try:
+ wrapper.__doc__ = nose_tuple[0].__doc__
+ # Nose uses `getattr(instance, test_func.__name__)` to get
+ # a method bound to the test instance (as opposed to a
+ # method bound to the instance of the class created when
+ # tests were being enumerated). Set a value here to make
+ # sure nose can get the correct test method.
+ if test_self is not None:
+ setattr(test_cls, test_func.__name__, unbound_func)
+ yield nose_tuple
+ finally:
+ if test_self is not None:
+ delattr(test_cls, test_func.__name__)
+ wrapper.__doc__ = original_doc
+ wrapper.parameterized_input = self.get_input()
+ wrapper.parameterized_func = test_func
+ test_func.__name__ = "_parameterized_original_%s" %(test_func.__name__, )
+ return wrapper
+
+ def param_as_nose_tuple(self, test_self, func, num, p):
+ nose_func = wraps(func)(lambda *args: func(*args[:-1], **args[-1]))
+ nose_func.__doc__ = self.doc_func(func, num, p)
+ # Track the unbound function because we need to setattr the unbound
+ # function onto the class for nose to work (see comments above), and
+ # Python 3 doesn't let us pull the function out of a bound method.
+ unbound_func = nose_func
+ if test_self is not None:
+ # Under nose on Py2 we need to return an unbound method to make
+ # sure that the `self` in the method is properly shared with the
+ # `self` used in `setUp` and `tearDown`. But only there. Everyone
+ # else needs a bound method.
+ func_self = (
+ None if PY2 and detect_runner() == "nose" else
+ test_self
+ )
+ nose_func = make_method(nose_func, func_self, type(test_self))
+ return unbound_func, (nose_func, ) + p.args + (p.kwargs or {}, )
+
+ def assert_not_in_testcase_subclass(self):
+ parent_classes = self._terrible_magic_get_defining_classes()
+ if any(issubclass(cls, TestCase) for cls in parent_classes):
+ raise Exception("Warning: '@parameterized' tests won't work "
+ "inside subclasses of 'TestCase' - use "
+ "'@parameterized.expand' instead.")
+
+ def _terrible_magic_get_defining_classes(self):
+ """ Returns the set of parent classes of the class currently being defined.
+ Will likely only work if called from the ``parameterized`` decorator.
+ This function is entirely @brandon_rhodes's fault, as he suggested
+ the implementation: http://stackoverflow.com/a/8793684/71522
+ """
+ stack = inspect.stack()
+ if len(stack) <= 4:
+ return []
+ frame = stack[4]
+ code_context = frame[4] and frame[4][0].strip()
+ if not (code_context and code_context.startswith("class ")):
+ return []
+ _, _, parents = code_context.partition("(")
+ parents, _, _ = parents.partition(")")
+ return eval("[" + parents + "]", frame[0].f_globals, frame[0].f_locals)
+
+ @classmethod
+ def input_as_callable(cls, input):
+ if callable(input):
+ return lambda: cls.check_input_values(input())
+ input_values = cls.check_input_values(input)
+ return lambda: input_values
+
+ @classmethod
+ def check_input_values(cls, input_values):
+ # Explicitly convery non-list inputs to a list so that:
+ # 1. A helpful exception will be raised if they aren't iterable, and
+ # 2. Generators are unwrapped exactly once (otherwise `nosetests
+ # --processes=n` has issues; see:
+ # https://github.com/wolever/nose-parameterized/pull/31)
+ if not isinstance(input_values, list):
+ input_values = list(input_values)
+ return [ param.from_decorator(p) for p in input_values ]
+
+ @classmethod
+ def expand(cls, input, name_func=None, doc_func=None, **legacy):
+ """ A "brute force" method of parameterizing test cases. Creates new
+ test cases and injects them into the namespace that the wrapped
+ function is being defined in. Useful for parameterizing tests in
+ subclasses of 'UnitTest', where Nose test generators don't work.
+
+ >>> @parameterized.expand([("foo", 1, 2)])
+ ... def test_add1(name, input, expected):
+ ... actual = add1(input)
+ ... assert_equal(actual, expected)
+ ...
+ >>> locals()
+ ... 'test_add1_foo_0': <function ...> ...
+ >>>
+ """
+
+ if "testcase_func_name" in legacy:
+ warnings.warn("testcase_func_name= is deprecated; use name_func=",
+ DeprecationWarning, stacklevel=2)
+ if not name_func:
+ name_func = legacy["testcase_func_name"]
+
+ if "testcase_func_doc" in legacy:
+ warnings.warn("testcase_func_doc= is deprecated; use doc_func=",
+ DeprecationWarning, stacklevel=2)
+ if not doc_func:
+ doc_func = legacy["testcase_func_doc"]
+
+ doc_func = doc_func or default_doc_func
+ name_func = name_func or default_name_func
+
+ def parameterized_expand_wrapper(f, instance=None):
+ stack = inspect.stack()
+ frame = stack[1]
+ frame_locals = frame[0].f_locals
+
+ paramters = cls.input_as_callable(input)()
+ for num, p in enumerate(paramters):
+ name = name_func(f, num, p)
+ frame_locals[name] = cls.param_as_standalone_func(p, f, name)
+ frame_locals[name].__doc__ = doc_func(f, num, p)
+
+ f.__test__ = False
+ return parameterized_expand_wrapper
+
+ @classmethod
+ def param_as_standalone_func(cls, p, func, name):
+ @wraps(func)
+ def standalone_func(*a):
+ return func(*(a + p.args), **p.kwargs)
+ standalone_func.__name__ = name
+
+ # place_as is used by py.test to determine what source file should be
+ # used for this test.
+ standalone_func.place_as = func
+
+ # Remove __wrapped__ because py.test will try to look at __wrapped__
+ # to determine which parameters should be used with this test case,
+ # and obviously we don't need it to do any parameterization.
+ try:
+ del standalone_func.__wrapped__
+ except AttributeError:
+ pass
+ return standalone_func
+
+ @classmethod
+ def to_safe_name(cls, s):
+ return str(re.sub("[^a-zA-Z0-9_]+", "_", s))
diff --git a/numpy/testing/nose_tools/utils.py b/numpy/testing/nose_tools/utils.py
new file mode 100644
index 000000000..302cf32ff
--- /dev/null
+++ b/numpy/testing/nose_tools/utils.py
@@ -0,0 +1,2229 @@
+"""
+Utility function to facilitate testing.
+
+"""
+from __future__ import division, absolute_import, print_function
+
+import os
+import sys
+import re
+import operator
+import warnings
+from functools import partial, wraps
+import shutil
+import contextlib
+from tempfile import mkdtemp, mkstemp
+from unittest.case import SkipTest
+
+from numpy.core import(
+ float32, empty, arange, array_repr, ndarray, isnat, array)
+from numpy.lib.utils import deprecate
+
+if sys.version_info[0] >= 3:
+ from io import StringIO
+else:
+ from StringIO import StringIO
+
+__all__ = [
+ 'assert_equal', 'assert_almost_equal', 'assert_approx_equal',
+ 'assert_array_equal', 'assert_array_less', 'assert_string_equal',
+ 'assert_array_almost_equal', 'assert_raises', 'build_err_msg',
+ 'decorate_methods', 'jiffies', 'memusage', 'print_assert_equal',
+ 'raises', 'rand', 'rundocs', 'runstring', 'verbose', 'measure',
+ 'assert_', 'assert_array_almost_equal_nulp', 'assert_raises_regex',
+ 'assert_array_max_ulp', 'assert_warns', 'assert_no_warnings',
+ 'assert_allclose', 'IgnoreException', 'clear_and_catch_warnings',
+ 'SkipTest', 'KnownFailureException', 'temppath', 'tempdir', 'IS_PYPY',
+ 'HAS_REFCOUNT', 'suppress_warnings', 'assert_array_compare',
+ '_assert_valid_refcount', '_gen_alignment_data',
+ ]
+
+
+class KnownFailureException(Exception):
+ '''Raise this exception to mark a test as a known failing test.'''
+ pass
+
+
+KnownFailureTest = KnownFailureException # backwards compat
+verbose = 0
+
+IS_PYPY = '__pypy__' in sys.modules
+HAS_REFCOUNT = getattr(sys, 'getrefcount', None) is not None
+
+
+def import_nose():
+ """ Import nose only when needed.
+ """
+ nose_is_good = True
+ minimum_nose_version = (1, 0, 0)
+ try:
+ import nose
+ except ImportError:
+ nose_is_good = False
+ else:
+ if nose.__versioninfo__ < minimum_nose_version:
+ nose_is_good = False
+
+ if not nose_is_good:
+ msg = ('Need nose >= %d.%d.%d for tests - see '
+ 'http://nose.readthedocs.io' %
+ minimum_nose_version)
+ raise ImportError(msg)
+
+ return nose
+
+
+def assert_(val, msg=''):
+ """
+ Assert that works in release mode.
+ Accepts callable msg to allow deferring evaluation until failure.
+
+ The Python built-in ``assert`` does not work when executing code in
+ optimized mode (the ``-O`` flag) - no byte-code is generated for it.
+
+ For documentation on usage, refer to the Python documentation.
+
+ """
+ __tracebackhide__ = True # Hide traceback for py.test
+ if not val:
+ try:
+ smsg = msg()
+ except TypeError:
+ smsg = msg
+ raise AssertionError(smsg)
+
+
+def gisnan(x):
+ """like isnan, but always raise an error if type not supported instead of
+ returning a TypeError object.
+
+ Notes
+ -----
+ isnan and other ufunc sometimes return a NotImplementedType object instead
+ of raising any exception. This function is a wrapper to make sure an
+ exception is always raised.
+
+ This should be removed once this problem is solved at the Ufunc level."""
+ from numpy.core import isnan
+ st = isnan(x)
+ if isinstance(st, type(NotImplemented)):
+ raise TypeError("isnan not supported for this type")
+ return st
+
+
+def gisfinite(x):
+ """like isfinite, but always raise an error if type not supported instead of
+ returning a TypeError object.
+
+ Notes
+ -----
+ isfinite and other ufunc sometimes return a NotImplementedType object instead
+ of raising any exception. This function is a wrapper to make sure an
+ exception is always raised.
+
+ This should be removed once this problem is solved at the Ufunc level."""
+ from numpy.core import isfinite, errstate
+ with errstate(invalid='ignore'):
+ st = isfinite(x)
+ if isinstance(st, type(NotImplemented)):
+ raise TypeError("isfinite not supported for this type")
+ return st
+
+
+def gisinf(x):
+ """like isinf, but always raise an error if type not supported instead of
+ returning a TypeError object.
+
+ Notes
+ -----
+ isinf and other ufunc sometimes return a NotImplementedType object instead
+ of raising any exception. This function is a wrapper to make sure an
+ exception is always raised.
+
+ This should be removed once this problem is solved at the Ufunc level."""
+ from numpy.core import isinf, errstate
+ with errstate(invalid='ignore'):
+ st = isinf(x)
+ if isinstance(st, type(NotImplemented)):
+ raise TypeError("isinf not supported for this type")
+ return st
+
+
+@deprecate(message="numpy.testing.rand is deprecated in numpy 1.11. "
+ "Use numpy.random.rand instead.")
+def rand(*args):
+ """Returns an array of random numbers with the given shape.
+
+ This only uses the standard library, so it is useful for testing purposes.
+ """
+ import random
+ from numpy.core import zeros, float64
+ results = zeros(args, float64)
+ f = results.flat
+ for i in range(len(f)):
+ f[i] = random.random()
+ return results
+
+
+if os.name == 'nt':
+ # Code "stolen" from enthought/debug/memusage.py
+ def GetPerformanceAttributes(object, counter, instance=None,
+ inum=-1, format=None, machine=None):
+ # NOTE: Many counters require 2 samples to give accurate results,
+ # including "% Processor Time" (as by definition, at any instant, a
+ # thread's CPU usage is either 0 or 100). To read counters like this,
+ # you should copy this function, but keep the counter open, and call
+ # CollectQueryData() each time you need to know.
+ # See http://msdn.microsoft.com/library/en-us/dnperfmo/html/perfmonpt2.asp
+ # My older explanation for this was that the "AddCounter" process forced
+ # the CPU to 100%, but the above makes more sense :)
+ import win32pdh
+ if format is None:
+ format = win32pdh.PDH_FMT_LONG
+ path = win32pdh.MakeCounterPath( (machine, object, instance, None, inum, counter))
+ hq = win32pdh.OpenQuery()
+ try:
+ hc = win32pdh.AddCounter(hq, path)
+ try:
+ win32pdh.CollectQueryData(hq)
+ type, val = win32pdh.GetFormattedCounterValue(hc, format)
+ return val
+ finally:
+ win32pdh.RemoveCounter(hc)
+ finally:
+ win32pdh.CloseQuery(hq)
+
+ def memusage(processName="python", instance=0):
+ # from win32pdhutil, part of the win32all package
+ import win32pdh
+ return GetPerformanceAttributes("Process", "Virtual Bytes",
+ processName, instance,
+ win32pdh.PDH_FMT_LONG, None)
+elif sys.platform[:5] == 'linux':
+
+ def memusage(_proc_pid_stat='/proc/%s/stat' % (os.getpid())):
+ """
+ Return virtual memory size in bytes of the running python.
+
+ """
+ try:
+ f = open(_proc_pid_stat, 'r')
+ l = f.readline().split(' ')
+ f.close()
+ return int(l[22])
+ except Exception:
+ return
+else:
+ def memusage():
+ """
+ Return memory usage of running python. [Not implemented]
+
+ """
+ raise NotImplementedError
+
+
+if sys.platform[:5] == 'linux':
+ def jiffies(_proc_pid_stat='/proc/%s/stat' % (os.getpid()),
+ _load_time=[]):
+ """
+ Return number of jiffies elapsed.
+
+ Return number of jiffies (1/100ths of a second) that this
+ process has been scheduled in user mode. See man 5 proc.
+
+ """
+ import time
+ if not _load_time:
+ _load_time.append(time.time())
+ try:
+ f = open(_proc_pid_stat, 'r')
+ l = f.readline().split(' ')
+ f.close()
+ return int(l[13])
+ except Exception:
+ return int(100*(time.time()-_load_time[0]))
+else:
+ # os.getpid is not in all platforms available.
+ # Using time is safe but inaccurate, especially when process
+ # was suspended or sleeping.
+ def jiffies(_load_time=[]):
+ """
+ Return number of jiffies elapsed.
+
+ Return number of jiffies (1/100ths of a second) that this
+ process has been scheduled in user mode. See man 5 proc.
+
+ """
+ import time
+ if not _load_time:
+ _load_time.append(time.time())
+ return int(100*(time.time()-_load_time[0]))
+
+
+def build_err_msg(arrays, err_msg, header='Items are not equal:',
+ verbose=True, names=('ACTUAL', 'DESIRED'), precision=8):
+ msg = ['\n' + header]
+ if err_msg:
+ if err_msg.find('\n') == -1 and len(err_msg) < 79-len(header):
+ msg = [msg[0] + ' ' + err_msg]
+ else:
+ msg.append(err_msg)
+ if verbose:
+ for i, a in enumerate(arrays):
+
+ if isinstance(a, ndarray):
+ # precision argument is only needed if the objects are ndarrays
+ r_func = partial(array_repr, precision=precision)
+ else:
+ r_func = repr
+
+ try:
+ r = r_func(a)
+ except Exception as exc:
+ r = '[repr failed for <{}>: {}]'.format(type(a).__name__, exc)
+ if r.count('\n') > 3:
+ r = '\n'.join(r.splitlines()[:3])
+ r += '...'
+ msg.append(' %s: %s' % (names[i], r))
+ return '\n'.join(msg)
+
+
+def assert_equal(actual, desired, err_msg='', verbose=True):
+ """
+ Raises an AssertionError if two objects are not equal.
+
+ Given two objects (scalars, lists, tuples, dictionaries or numpy arrays),
+ check that all elements of these objects are equal. An exception is raised
+ at the first conflicting values.
+
+ Parameters
+ ----------
+ actual : array_like
+ The object to check.
+ desired : array_like
+ The expected object.
+ err_msg : str, optional
+ The error message to be printed in case of failure.
+ verbose : bool, optional
+ If True, the conflicting values are appended to the error message.
+
+ Raises
+ ------
+ AssertionError
+ If actual and desired are not equal.
+
+ Examples
+ --------
+ >>> np.testing.assert_equal([4,5], [4,6])
+ ...
+ <type 'exceptions.AssertionError'>:
+ Items are not equal:
+ item=1
+ ACTUAL: 5
+ DESIRED: 6
+
+ """
+ __tracebackhide__ = True # Hide traceback for py.test
+ if isinstance(desired, dict):
+ if not isinstance(actual, dict):
+ raise AssertionError(repr(type(actual)))
+ assert_equal(len(actual), len(desired), err_msg, verbose)
+ for k, i in desired.items():
+ if k not in actual:
+ raise AssertionError(repr(k))
+ assert_equal(actual[k], desired[k], 'key=%r\n%s' % (k, err_msg), verbose)
+ return
+ if isinstance(desired, (list, tuple)) and isinstance(actual, (list, tuple)):
+ assert_equal(len(actual), len(desired), err_msg, verbose)
+ for k in range(len(desired)):
+ assert_equal(actual[k], desired[k], 'item=%r\n%s' % (k, err_msg), verbose)
+ return
+ from numpy.core import ndarray, isscalar, signbit
+ from numpy.lib import iscomplexobj, real, imag
+ if isinstance(actual, ndarray) or isinstance(desired, ndarray):
+ return assert_array_equal(actual, desired, err_msg, verbose)
+ msg = build_err_msg([actual, desired], err_msg, verbose=verbose)
+
+ # Handle complex numbers: separate into real/imag to handle
+ # nan/inf/negative zero correctly
+ # XXX: catch ValueError for subclasses of ndarray where iscomplex fail
+ try:
+ usecomplex = iscomplexobj(actual) or iscomplexobj(desired)
+ except ValueError:
+ usecomplex = False
+
+ if usecomplex:
+ if iscomplexobj(actual):
+ actualr = real(actual)
+ actuali = imag(actual)
+ else:
+ actualr = actual
+ actuali = 0
+ if iscomplexobj(desired):
+ desiredr = real(desired)
+ desiredi = imag(desired)
+ else:
+ desiredr = desired
+ desiredi = 0
+ try:
+ assert_equal(actualr, desiredr)
+ assert_equal(actuali, desiredi)
+ except AssertionError:
+ raise AssertionError(msg)
+
+ # isscalar test to check cases such as [np.nan] != np.nan
+ if isscalar(desired) != isscalar(actual):
+ raise AssertionError(msg)
+
+ # Inf/nan/negative zero handling
+ try:
+ # If one of desired/actual is not finite, handle it specially here:
+ # check that both are nan if any is a nan, and test for equality
+ # otherwise
+ if not (gisfinite(desired) and gisfinite(actual)):
+ isdesnan = gisnan(desired)
+ isactnan = gisnan(actual)
+ if isdesnan or isactnan:
+ if not (isdesnan and isactnan):
+ raise AssertionError(msg)
+ else:
+ if not desired == actual:
+ raise AssertionError(msg)
+ return
+ elif desired == 0 and actual == 0:
+ if not signbit(desired) == signbit(actual):
+ raise AssertionError(msg)
+ # If TypeError or ValueError raised while using isnan and co, just handle
+ # as before
+ except (TypeError, ValueError, NotImplementedError):
+ pass
+
+ try:
+ # If both are NaT (and have the same dtype -- datetime or timedelta)
+ # they are considered equal.
+ if (isnat(desired) == isnat(actual) and
+ array(desired).dtype.type == array(actual).dtype.type):
+ return
+ else:
+ raise AssertionError(msg)
+
+ # If TypeError or ValueError raised while using isnan and co, just handle
+ # as before
+ except (TypeError, ValueError, NotImplementedError):
+ pass
+
+ # Explicitly use __eq__ for comparison, ticket #2552
+ if not (desired == actual):
+ raise AssertionError(msg)
+
+
+def print_assert_equal(test_string, actual, desired):
+ """
+ Test if two objects are equal, and print an error message if test fails.
+
+ The test is performed with ``actual == desired``.
+
+ Parameters
+ ----------
+ test_string : str
+ The message supplied to AssertionError.
+ actual : object
+ The object to test for equality against `desired`.
+ desired : object
+ The expected result.
+
+ Examples
+ --------
+ >>> np.testing.print_assert_equal('Test XYZ of func xyz', [0, 1], [0, 1])
+ >>> np.testing.print_assert_equal('Test XYZ of func xyz', [0, 1], [0, 2])
+ Traceback (most recent call last):
+ ...
+ AssertionError: Test XYZ of func xyz failed
+ ACTUAL:
+ [0, 1]
+ DESIRED:
+ [0, 2]
+
+ """
+ __tracebackhide__ = True # Hide traceback for py.test
+ import pprint
+
+ if not (actual == desired):
+ msg = StringIO()
+ msg.write(test_string)
+ msg.write(' failed\nACTUAL: \n')
+ pprint.pprint(actual, msg)
+ msg.write('DESIRED: \n')
+ pprint.pprint(desired, msg)
+ raise AssertionError(msg.getvalue())
+
+
+def assert_almost_equal(actual,desired,decimal=7,err_msg='',verbose=True):
+ """
+ Raises an AssertionError if two items are not equal up to desired
+ precision.
+
+ .. note:: It is recommended to use one of `assert_allclose`,
+ `assert_array_almost_equal_nulp` or `assert_array_max_ulp`
+ instead of this function for more consistent floating point
+ comparisons.
+
+ The test verifies that the elements of ``actual`` and ``desired`` satisfy.
+
+ ``abs(desired-actual) < 1.5 * 10**(-decimal)``
+
+ That is a looser test than originally documented, but agrees with what the
+ actual implementation in `assert_array_almost_equal` did up to rounding
+ vagaries. An exception is raised at conflicting values. For ndarrays this
+ delegates to assert_array_almost_equal
+
+ Parameters
+ ----------
+ actual : array_like
+ The object to check.
+ desired : array_like
+ The expected object.
+ decimal : int, optional
+ Desired precision, default is 7.
+ err_msg : str, optional
+ The error message to be printed in case of failure.
+ verbose : bool, optional
+ If True, the conflicting values are appended to the error message.
+
+ Raises
+ ------
+ AssertionError
+ If actual and desired are not equal up to specified precision.
+
+ See Also
+ --------
+ assert_allclose: Compare two array_like objects for equality with desired
+ relative and/or absolute precision.
+ assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal
+
+ Examples
+ --------
+ >>> import numpy.testing as npt
+ >>> npt.assert_almost_equal(2.3333333333333, 2.33333334)
+ >>> npt.assert_almost_equal(2.3333333333333, 2.33333334, decimal=10)
+ ...
+ <type 'exceptions.AssertionError'>:
+ Items are not equal:
+ ACTUAL: 2.3333333333333002
+ DESIRED: 2.3333333399999998
+
+ >>> npt.assert_almost_equal(np.array([1.0,2.3333333333333]),
+ ... np.array([1.0,2.33333334]), decimal=9)
+ ...
+ <type 'exceptions.AssertionError'>:
+ Arrays are not almost equal
+ <BLANKLINE>
+ (mismatch 50.0%)
+ x: array([ 1. , 2.33333333])
+ y: array([ 1. , 2.33333334])
+
+ """
+ __tracebackhide__ = True # Hide traceback for py.test
+ from numpy.core import ndarray
+ from numpy.lib import iscomplexobj, real, imag
+
+ # Handle complex numbers: separate into real/imag to handle
+ # nan/inf/negative zero correctly
+ # XXX: catch ValueError for subclasses of ndarray where iscomplex fail
+ try:
+ usecomplex = iscomplexobj(actual) or iscomplexobj(desired)
+ except ValueError:
+ usecomplex = False
+
+ def _build_err_msg():
+ header = ('Arrays are not almost equal to %d decimals' % decimal)
+ return build_err_msg([actual, desired], err_msg, verbose=verbose,
+ header=header)
+
+ if usecomplex:
+ if iscomplexobj(actual):
+ actualr = real(actual)
+ actuali = imag(actual)
+ else:
+ actualr = actual
+ actuali = 0
+ if iscomplexobj(desired):
+ desiredr = real(desired)
+ desiredi = imag(desired)
+ else:
+ desiredr = desired
+ desiredi = 0
+ try:
+ assert_almost_equal(actualr, desiredr, decimal=decimal)
+ assert_almost_equal(actuali, desiredi, decimal=decimal)
+ except AssertionError:
+ raise AssertionError(_build_err_msg())
+
+ if isinstance(actual, (ndarray, tuple, list)) \
+ or isinstance(desired, (ndarray, tuple, list)):
+ return assert_array_almost_equal(actual, desired, decimal, err_msg)
+ try:
+ # If one of desired/actual is not finite, handle it specially here:
+ # check that both are nan if any is a nan, and test for equality
+ # otherwise
+ if not (gisfinite(desired) and gisfinite(actual)):
+ if gisnan(desired) or gisnan(actual):
+ if not (gisnan(desired) and gisnan(actual)):
+ raise AssertionError(_build_err_msg())
+ else:
+ if not desired == actual:
+ raise AssertionError(_build_err_msg())
+ return
+ except (NotImplementedError, TypeError):
+ pass
+ if abs(desired - actual) >= 1.5 * 10.0**(-decimal):
+ raise AssertionError(_build_err_msg())
+
+
+def assert_approx_equal(actual,desired,significant=7,err_msg='',verbose=True):
+ """
+ Raises an AssertionError if two items are not equal up to significant
+ digits.
+
+ .. note:: It is recommended to use one of `assert_allclose`,
+ `assert_array_almost_equal_nulp` or `assert_array_max_ulp`
+ instead of this function for more consistent floating point
+ comparisons.
+
+ Given two numbers, check that they are approximately equal.
+ Approximately equal is defined as the number of significant digits
+ that agree.
+
+ Parameters
+ ----------
+ actual : scalar
+ The object to check.
+ desired : scalar
+ The expected object.
+ significant : int, optional
+ Desired precision, default is 7.
+ err_msg : str, optional
+ The error message to be printed in case of failure.
+ verbose : bool, optional
+ If True, the conflicting values are appended to the error message.
+
+ Raises
+ ------
+ AssertionError
+ If actual and desired are not equal up to specified precision.
+
+ See Also
+ --------
+ assert_allclose: Compare two array_like objects for equality with desired
+ relative and/or absolute precision.
+ assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal
+
+ Examples
+ --------
+ >>> np.testing.assert_approx_equal(0.12345677777777e-20, 0.1234567e-20)
+ >>> np.testing.assert_approx_equal(0.12345670e-20, 0.12345671e-20,
+ significant=8)
+ >>> np.testing.assert_approx_equal(0.12345670e-20, 0.12345672e-20,
+ significant=8)
+ ...
+ <type 'exceptions.AssertionError'>:
+ Items are not equal to 8 significant digits:
+ ACTUAL: 1.234567e-021
+ DESIRED: 1.2345672000000001e-021
+
+ the evaluated condition that raises the exception is
+
+ >>> abs(0.12345670e-20/1e-21 - 0.12345672e-20/1e-21) >= 10**-(8-1)
+ True
+
+ """
+ __tracebackhide__ = True # Hide traceback for py.test
+ import numpy as np
+
+ (actual, desired) = map(float, (actual, desired))
+ if desired == actual:
+ return
+ # Normalized the numbers to be in range (-10.0,10.0)
+ # scale = float(pow(10,math.floor(math.log10(0.5*(abs(desired)+abs(actual))))))
+ with np.errstate(invalid='ignore'):
+ scale = 0.5*(np.abs(desired) + np.abs(actual))
+ scale = np.power(10, np.floor(np.log10(scale)))
+ try:
+ sc_desired = desired/scale
+ except ZeroDivisionError:
+ sc_desired = 0.0
+ try:
+ sc_actual = actual/scale
+ except ZeroDivisionError:
+ sc_actual = 0.0
+ msg = build_err_msg([actual, desired], err_msg,
+ header='Items are not equal to %d significant digits:' %
+ significant,
+ verbose=verbose)
+ try:
+ # If one of desired/actual is not finite, handle it specially here:
+ # check that both are nan if any is a nan, and test for equality
+ # otherwise
+ if not (gisfinite(desired) and gisfinite(actual)):
+ if gisnan(desired) or gisnan(actual):
+ if not (gisnan(desired) and gisnan(actual)):
+ raise AssertionError(msg)
+ else:
+ if not desired == actual:
+ raise AssertionError(msg)
+ return
+ except (TypeError, NotImplementedError):
+ pass
+ if np.abs(sc_desired - sc_actual) >= np.power(10., -(significant-1)):
+ raise AssertionError(msg)
+
+
+def assert_array_compare(comparison, x, y, err_msg='', verbose=True,
+ header='', precision=6, equal_nan=True,
+ equal_inf=True):
+ __tracebackhide__ = True # Hide traceback for py.test
+ from numpy.core import array, isnan, isinf, any, inf
+ x = array(x, copy=False, subok=True)
+ y = array(y, copy=False, subok=True)
+
+ def isnumber(x):
+ return x.dtype.char in '?bhilqpBHILQPefdgFDG'
+
+ def istime(x):
+ return x.dtype.char in "Mm"
+
+ def chk_same_position(x_id, y_id, hasval='nan'):
+ """Handling nan/inf: check that x and y have the nan/inf at the same
+ locations."""
+ try:
+ assert_array_equal(x_id, y_id)
+ except AssertionError:
+ msg = build_err_msg([x, y],
+ err_msg + '\nx and y %s location mismatch:'
+ % (hasval), verbose=verbose, header=header,
+ names=('x', 'y'), precision=precision)
+ raise AssertionError(msg)
+
+ try:
+ cond = (x.shape == () or y.shape == ()) or x.shape == y.shape
+ if not cond:
+ msg = build_err_msg([x, y],
+ err_msg
+ + '\n(shapes %s, %s mismatch)' % (x.shape,
+ y.shape),
+ verbose=verbose, header=header,
+ names=('x', 'y'), precision=precision)
+ raise AssertionError(msg)
+
+ if isnumber(x) and isnumber(y):
+ has_nan = has_inf = False
+ if equal_nan:
+ x_isnan, y_isnan = isnan(x), isnan(y)
+ # Validate that NaNs are in the same place
+ has_nan = any(x_isnan) or any(y_isnan)
+ if has_nan:
+ chk_same_position(x_isnan, y_isnan, hasval='nan')
+
+ if equal_inf:
+ x_isinf, y_isinf = isinf(x), isinf(y)
+ # Validate that infinite values are in the same place
+ has_inf = any(x_isinf) or any(y_isinf)
+ if has_inf:
+ # Check +inf and -inf separately, since they are different
+ chk_same_position(x == +inf, y == +inf, hasval='+inf')
+ chk_same_position(x == -inf, y == -inf, hasval='-inf')
+
+ if has_nan and has_inf:
+ x = x[~(x_isnan | x_isinf)]
+ y = y[~(y_isnan | y_isinf)]
+ elif has_nan:
+ x = x[~x_isnan]
+ y = y[~y_isnan]
+ elif has_inf:
+ x = x[~x_isinf]
+ y = y[~y_isinf]
+
+ # Only do the comparison if actual values are left
+ if x.size == 0:
+ return
+
+ elif istime(x) and istime(y):
+ # If one is datetime64 and the other timedelta64 there is no point
+ if equal_nan and x.dtype.type == y.dtype.type:
+ x_isnat, y_isnat = isnat(x), isnat(y)
+
+ if any(x_isnat) or any(y_isnat):
+ chk_same_position(x_isnat, y_isnat, hasval="NaT")
+
+ if any(x_isnat) or any(y_isnat):
+ x = x[~x_isnat]
+ y = y[~y_isnat]
+
+ val = comparison(x, y)
+
+ if isinstance(val, bool):
+ cond = val
+ reduced = [0]
+ else:
+ reduced = val.ravel()
+ cond = reduced.all()
+ reduced = reduced.tolist()
+ if not cond:
+ match = 100-100.0*reduced.count(1)/len(reduced)
+ msg = build_err_msg([x, y],
+ err_msg
+ + '\n(mismatch %s%%)' % (match,),
+ verbose=verbose, header=header,
+ names=('x', 'y'), precision=precision)
+ if not cond:
+ raise AssertionError(msg)
+ except ValueError:
+ import traceback
+ efmt = traceback.format_exc()
+ header = 'error during assertion:\n\n%s\n\n%s' % (efmt, header)
+
+ msg = build_err_msg([x, y], err_msg, verbose=verbose, header=header,
+ names=('x', 'y'), precision=precision)
+ raise ValueError(msg)
+
+
+def assert_array_equal(x, y, err_msg='', verbose=True):
+ """
+ Raises an AssertionError if two array_like objects are not equal.
+
+ Given two array_like objects, check that the shape is equal and all
+ elements of these objects are equal. An exception is raised at
+ shape mismatch or conflicting values. In contrast to the standard usage
+ in numpy, NaNs are compared like numbers, no assertion is raised if
+ both objects have NaNs in the same positions.
+
+ The usual caution for verifying equality with floating point numbers is
+ advised.
+
+ Parameters
+ ----------
+ x : array_like
+ The actual object to check.
+ y : array_like
+ The desired, expected object.
+ err_msg : str, optional
+ The error message to be printed in case of failure.
+ verbose : bool, optional
+ If True, the conflicting values are appended to the error message.
+
+ Raises
+ ------
+ AssertionError
+ If actual and desired objects are not equal.
+
+ See Also
+ --------
+ assert_allclose: Compare two array_like objects for equality with desired
+ relative and/or absolute precision.
+ assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal
+
+ Examples
+ --------
+ The first assert does not raise an exception:
+
+ >>> np.testing.assert_array_equal([1.0,2.33333,np.nan],
+ ... [np.exp(0),2.33333, np.nan])
+
+ Assert fails with numerical inprecision with floats:
+
+ >>> np.testing.assert_array_equal([1.0,np.pi,np.nan],
+ ... [1, np.sqrt(np.pi)**2, np.nan])
+ ...
+ <type 'exceptions.ValueError'>:
+ AssertionError:
+ Arrays are not equal
+ <BLANKLINE>
+ (mismatch 50.0%)
+ x: array([ 1. , 3.14159265, NaN])
+ y: array([ 1. , 3.14159265, NaN])
+
+ Use `assert_allclose` or one of the nulp (number of floating point values)
+ functions for these cases instead:
+
+ >>> np.testing.assert_allclose([1.0,np.pi,np.nan],
+ ... [1, np.sqrt(np.pi)**2, np.nan],
+ ... rtol=1e-10, atol=0)
+
+ """
+ __tracebackhide__ = True # Hide traceback for py.test
+ assert_array_compare(operator.__eq__, x, y, err_msg=err_msg,
+ verbose=verbose, header='Arrays are not equal')
+
+
+def assert_array_almost_equal(x, y, decimal=6, err_msg='', verbose=True):
+ """
+ Raises an AssertionError if two objects are not equal up to desired
+ precision.
+
+ .. note:: It is recommended to use one of `assert_allclose`,
+ `assert_array_almost_equal_nulp` or `assert_array_max_ulp`
+ instead of this function for more consistent floating point
+ comparisons.
+
+ The test verifies identical shapes and that the elements of ``actual`` and
+ ``desired`` satisfy.
+
+ ``abs(desired-actual) < 1.5 * 10**(-decimal)``
+
+ That is a looser test than originally documented, but agrees with what the
+ actual implementation did up to rounding vagaries. An exception is raised
+ at shape mismatch or conflicting values. In contrast to the standard usage
+ in numpy, NaNs are compared like numbers, no assertion is raised if both
+ objects have NaNs in the same positions.
+
+ Parameters
+ ----------
+ x : array_like
+ The actual object to check.
+ y : array_like
+ The desired, expected object.
+ decimal : int, optional
+ Desired precision, default is 6.
+ err_msg : str, optional
+ The error message to be printed in case of failure.
+ verbose : bool, optional
+ If True, the conflicting values are appended to the error message.
+
+ Raises
+ ------
+ AssertionError
+ If actual and desired are not equal up to specified precision.
+
+ See Also
+ --------
+ assert_allclose: Compare two array_like objects for equality with desired
+ relative and/or absolute precision.
+ assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal
+
+ Examples
+ --------
+ the first assert does not raise an exception
+
+ >>> np.testing.assert_array_almost_equal([1.0,2.333,np.nan],
+ [1.0,2.333,np.nan])
+
+ >>> np.testing.assert_array_almost_equal([1.0,2.33333,np.nan],
+ ... [1.0,2.33339,np.nan], decimal=5)
+ ...
+ <type 'exceptions.AssertionError'>:
+ AssertionError:
+ Arrays are not almost equal
+ <BLANKLINE>
+ (mismatch 50.0%)
+ x: array([ 1. , 2.33333, NaN])
+ y: array([ 1. , 2.33339, NaN])
+
+ >>> np.testing.assert_array_almost_equal([1.0,2.33333,np.nan],
+ ... [1.0,2.33333, 5], decimal=5)
+ <type 'exceptions.ValueError'>:
+ ValueError:
+ Arrays are not almost equal
+ x: array([ 1. , 2.33333, NaN])
+ y: array([ 1. , 2.33333, 5. ])
+
+ """
+ __tracebackhide__ = True # Hide traceback for py.test
+ from numpy.core import around, number, float_, result_type, array
+ from numpy.core.numerictypes import issubdtype
+ from numpy.core.fromnumeric import any as npany
+
+ def compare(x, y):
+ try:
+ if npany(gisinf(x)) or npany( gisinf(y)):
+ xinfid = gisinf(x)
+ yinfid = gisinf(y)
+ if not (xinfid == yinfid).all():
+ return False
+ # if one item, x and y is +- inf
+ if x.size == y.size == 1:
+ return x == y
+ x = x[~xinfid]
+ y = y[~yinfid]
+ except (TypeError, NotImplementedError):
+ pass
+
+ # make sure y is an inexact type to avoid abs(MIN_INT); will cause
+ # casting of x later.
+ dtype = result_type(y, 1.)
+ y = array(y, dtype=dtype, copy=False, subok=True)
+ z = abs(x - y)
+
+ if not issubdtype(z.dtype, number):
+ z = z.astype(float_) # handle object arrays
+
+ return z < 1.5 * 10.0**(-decimal)
+
+ assert_array_compare(compare, x, y, err_msg=err_msg, verbose=verbose,
+ header=('Arrays are not almost equal to %d decimals' % decimal),
+ precision=decimal)
+
+
+def assert_array_less(x, y, err_msg='', verbose=True):
+ """
+ Raises an AssertionError if two array_like objects are not ordered by less
+ than.
+
+ Given two array_like objects, check that the shape is equal and all
+ elements of the first object are strictly smaller than those of the
+ second object. An exception is raised at shape mismatch or incorrectly
+ ordered values. Shape mismatch does not raise if an object has zero
+ dimension. In contrast to the standard usage in numpy, NaNs are
+ compared, no assertion is raised if both objects have NaNs in the same
+ positions.
+
+
+
+ Parameters
+ ----------
+ x : array_like
+ The smaller object to check.
+ y : array_like
+ The larger object to compare.
+ err_msg : string
+ The error message to be printed in case of failure.
+ verbose : bool
+ If True, the conflicting values are appended to the error message.
+
+ Raises
+ ------
+ AssertionError
+ If actual and desired objects are not equal.
+
+ See Also
+ --------
+ assert_array_equal: tests objects for equality
+ assert_array_almost_equal: test objects for equality up to precision
+
+
+
+ Examples
+ --------
+ >>> np.testing.assert_array_less([1.0, 1.0, np.nan], [1.1, 2.0, np.nan])
+ >>> np.testing.assert_array_less([1.0, 1.0, np.nan], [1, 2.0, np.nan])
+ ...
+ <type 'exceptions.ValueError'>:
+ Arrays are not less-ordered
+ (mismatch 50.0%)
+ x: array([ 1., 1., NaN])
+ y: array([ 1., 2., NaN])
+
+ >>> np.testing.assert_array_less([1.0, 4.0], 3)
+ ...
+ <type 'exceptions.ValueError'>:
+ Arrays are not less-ordered
+ (mismatch 50.0%)
+ x: array([ 1., 4.])
+ y: array(3)
+
+ >>> np.testing.assert_array_less([1.0, 2.0, 3.0], [4])
+ ...
+ <type 'exceptions.ValueError'>:
+ Arrays are not less-ordered
+ (shapes (3,), (1,) mismatch)
+ x: array([ 1., 2., 3.])
+ y: array([4])
+
+ """
+ __tracebackhide__ = True # Hide traceback for py.test
+ assert_array_compare(operator.__lt__, x, y, err_msg=err_msg,
+ verbose=verbose,
+ header='Arrays are not less-ordered',
+ equal_inf=False)
+
+
+def runstring(astr, dict):
+ exec(astr, dict)
+
+
+def assert_string_equal(actual, desired):
+ """
+ Test if two strings are equal.
+
+ If the given strings are equal, `assert_string_equal` does nothing.
+ If they are not equal, an AssertionError is raised, and the diff
+ between the strings is shown.
+
+ Parameters
+ ----------
+ actual : str
+ The string to test for equality against the expected string.
+ desired : str
+ The expected string.
+
+ Examples
+ --------
+ >>> np.testing.assert_string_equal('abc', 'abc')
+ >>> np.testing.assert_string_equal('abc', 'abcd')
+ Traceback (most recent call last):
+ File "<stdin>", line 1, in <module>
+ ...
+ AssertionError: Differences in strings:
+ - abc+ abcd? +
+
+ """
+ # delay import of difflib to reduce startup time
+ __tracebackhide__ = True # Hide traceback for py.test
+ import difflib
+
+ if not isinstance(actual, str):
+ raise AssertionError(repr(type(actual)))
+ if not isinstance(desired, str):
+ raise AssertionError(repr(type(desired)))
+ if re.match(r'\A'+desired+r'\Z', actual, re.M):
+ return
+
+ diff = list(difflib.Differ().compare(actual.splitlines(1), desired.splitlines(1)))
+ diff_list = []
+ while diff:
+ d1 = diff.pop(0)
+ if d1.startswith(' '):
+ continue
+ if d1.startswith('- '):
+ l = [d1]
+ d2 = diff.pop(0)
+ if d2.startswith('? '):
+ l.append(d2)
+ d2 = diff.pop(0)
+ if not d2.startswith('+ '):
+ raise AssertionError(repr(d2))
+ l.append(d2)
+ if diff:
+ d3 = diff.pop(0)
+ if d3.startswith('? '):
+ l.append(d3)
+ else:
+ diff.insert(0, d3)
+ if re.match(r'\A'+d2[2:]+r'\Z', d1[2:]):
+ continue
+ diff_list.extend(l)
+ continue
+ raise AssertionError(repr(d1))
+ if not diff_list:
+ return
+ msg = 'Differences in strings:\n%s' % (''.join(diff_list)).rstrip()
+ if actual != desired:
+ raise AssertionError(msg)
+
+
+def rundocs(filename=None, raise_on_error=True):
+ """
+ Run doctests found in the given file.
+
+ By default `rundocs` raises an AssertionError on failure.
+
+ Parameters
+ ----------
+ filename : str
+ The path to the file for which the doctests are run.
+ raise_on_error : bool
+ Whether to raise an AssertionError when a doctest fails. Default is
+ True.
+
+ Notes
+ -----
+ The doctests can be run by the user/developer by adding the ``doctests``
+ argument to the ``test()`` call. For example, to run all tests (including
+ doctests) for `numpy.lib`:
+
+ >>> np.lib.test(doctests=True) #doctest: +SKIP
+ """
+ from numpy.compat import npy_load_module
+ import doctest
+ if filename is None:
+ f = sys._getframe(1)
+ filename = f.f_globals['__file__']
+ name = os.path.splitext(os.path.basename(filename))[0]
+ m = npy_load_module(name, filename)
+
+ tests = doctest.DocTestFinder().find(m)
+ runner = doctest.DocTestRunner(verbose=False)
+
+ msg = []
+ if raise_on_error:
+ out = lambda s: msg.append(s)
+ else:
+ out = None
+
+ for test in tests:
+ runner.run(test, out=out)
+
+ if runner.failures > 0 and raise_on_error:
+ raise AssertionError("Some doctests failed:\n%s" % "\n".join(msg))
+
+
+def raises(*args,**kwargs):
+ nose = import_nose()
+ return nose.tools.raises(*args,**kwargs)
+
+
+def assert_raises(*args, **kwargs):
+ """
+ assert_raises(exception_class, callable, *args, **kwargs)
+ assert_raises(exception_class)
+
+ Fail unless an exception of class exception_class is thrown
+ by callable when invoked with arguments args and keyword
+ arguments kwargs. If a different type of exception is
+ thrown, it will not be caught, and the test case will be
+ deemed to have suffered an error, exactly as for an
+ unexpected exception.
+
+ Alternatively, `assert_raises` can be used as a context manager:
+
+ >>> from numpy.testing import assert_raises
+ >>> with assert_raises(ZeroDivisionError):
+ ... 1 / 0
+
+ is equivalent to
+
+ >>> def div(x, y):
+ ... return x / y
+ >>> assert_raises(ZeroDivisionError, div, 1, 0)
+
+ """
+ __tracebackhide__ = True # Hide traceback for py.test
+ nose = import_nose()
+ return nose.tools.assert_raises(*args,**kwargs)
+
+
+def assert_raises_regex(exception_class, expected_regexp, *args, **kwargs):
+ """
+ assert_raises_regex(exception_class, expected_regexp, callable, *args,
+ **kwargs)
+ assert_raises_regex(exception_class, expected_regexp)
+
+ Fail unless an exception of class exception_class and with message that
+ matches expected_regexp is thrown by callable when invoked with arguments
+ args and keyword arguments kwargs.
+
+ Alternatively, can be used as a context manager like `assert_raises`.
+
+ Name of this function adheres to Python 3.2+ reference, but should work in
+ all versions down to 2.6.
+
+ Notes
+ -----
+ .. versionadded:: 1.9.0
+
+ """
+ __tracebackhide__ = True # Hide traceback for py.test
+ nose = import_nose()
+
+ if sys.version_info.major >= 3:
+ funcname = nose.tools.assert_raises_regex
+ else:
+ # Only present in Python 2.7, missing from unittest in 2.6
+ funcname = nose.tools.assert_raises_regexp
+
+ return funcname(exception_class, expected_regexp, *args, **kwargs)
+
+
+def decorate_methods(cls, decorator, testmatch=None):
+ """
+ Apply a decorator to all methods in a class matching a regular expression.
+
+ The given decorator is applied to all public methods of `cls` that are
+ matched by the regular expression `testmatch`
+ (``testmatch.search(methodname)``). Methods that are private, i.e. start
+ with an underscore, are ignored.
+
+ Parameters
+ ----------
+ cls : class
+ Class whose methods to decorate.
+ decorator : function
+ Decorator to apply to methods
+ testmatch : compiled regexp or str, optional
+ The regular expression. Default value is None, in which case the
+ nose default (``re.compile(r'(?:^|[\\b_\\.%s-])[Tt]est' % os.sep)``)
+ is used.
+ If `testmatch` is a string, it is compiled to a regular expression
+ first.
+
+ """
+ if testmatch is None:
+ testmatch = re.compile(r'(?:^|[\\b_\\.%s-])[Tt]est' % os.sep)
+ else:
+ testmatch = re.compile(testmatch)
+ cls_attr = cls.__dict__
+
+ # delayed import to reduce startup time
+ from inspect import isfunction
+
+ methods = [_m for _m in cls_attr.values() if isfunction(_m)]
+ for function in methods:
+ try:
+ if hasattr(function, 'compat_func_name'):
+ funcname = function.compat_func_name
+ else:
+ funcname = function.__name__
+ except AttributeError:
+ # not a function
+ continue
+ if testmatch.search(funcname) and not funcname.startswith('_'):
+ setattr(cls, funcname, decorator(function))
+ return
+
+
+def measure(code_str,times=1,label=None):
+ """
+ Return elapsed time for executing code in the namespace of the caller.
+
+ The supplied code string is compiled with the Python builtin ``compile``.
+ The precision of the timing is 10 milli-seconds. If the code will execute
+ fast on this timescale, it can be executed many times to get reasonable
+ timing accuracy.
+
+ Parameters
+ ----------
+ code_str : str
+ The code to be timed.
+ times : int, optional
+ The number of times the code is executed. Default is 1. The code is
+ only compiled once.
+ label : str, optional
+ A label to identify `code_str` with. This is passed into ``compile``
+ as the second argument (for run-time error messages).
+
+ Returns
+ -------
+ elapsed : float
+ Total elapsed time in seconds for executing `code_str` `times` times.
+
+ Examples
+ --------
+ >>> etime = np.testing.measure('for i in range(1000): np.sqrt(i**2)',
+ ... times=times)
+ >>> print("Time for a single execution : ", etime / times, "s")
+ Time for a single execution : 0.005 s
+
+ """
+ frame = sys._getframe(1)
+ locs, globs = frame.f_locals, frame.f_globals
+
+ code = compile(code_str,
+ 'Test name: %s ' % label,
+ 'exec')
+ i = 0
+ elapsed = jiffies()
+ while i < times:
+ i += 1
+ exec(code, globs, locs)
+ elapsed = jiffies() - elapsed
+ return 0.01*elapsed
+
+
+def _assert_valid_refcount(op):
+ """
+ Check that ufuncs don't mishandle refcount of object `1`.
+ Used in a few regression tests.
+ """
+ if not HAS_REFCOUNT:
+ return True
+ import numpy as np
+
+ b = np.arange(100*100).reshape(100, 100)
+ c = b
+ i = 1
+
+ rc = sys.getrefcount(i)
+ for j in range(15):
+ d = op(b, c)
+ assert_(sys.getrefcount(i) >= rc)
+ del d # for pyflakes
+
+
+def assert_allclose(actual, desired, rtol=1e-7, atol=0, equal_nan=True,
+ err_msg='', verbose=True):
+ """
+ Raises an AssertionError if two objects are not equal up to desired
+ tolerance.
+
+ The test is equivalent to ``allclose(actual, desired, rtol, atol)``.
+ It compares the difference between `actual` and `desired` to
+ ``atol + rtol * abs(desired)``.
+
+ .. versionadded:: 1.5.0
+
+ Parameters
+ ----------
+ actual : array_like
+ Array obtained.
+ desired : array_like
+ Array desired.
+ rtol : float, optional
+ Relative tolerance.
+ atol : float, optional
+ Absolute tolerance.
+ equal_nan : bool, optional.
+ If True, NaNs will compare equal.
+ err_msg : str, optional
+ The error message to be printed in case of failure.
+ verbose : bool, optional
+ If True, the conflicting values are appended to the error message.
+
+ Raises
+ ------
+ AssertionError
+ If actual and desired are not equal up to specified precision.
+
+ See Also
+ --------
+ assert_array_almost_equal_nulp, assert_array_max_ulp
+
+ Examples
+ --------
+ >>> x = [1e-5, 1e-3, 1e-1]
+ >>> y = np.arccos(np.cos(x))
+ >>> assert_allclose(x, y, rtol=1e-5, atol=0)
+
+ """
+ __tracebackhide__ = True # Hide traceback for py.test
+ import numpy as np
+
+ def compare(x, y):
+ return np.core.numeric.isclose(x, y, rtol=rtol, atol=atol,
+ equal_nan=equal_nan)
+
+ actual, desired = np.asanyarray(actual), np.asanyarray(desired)
+ header = 'Not equal to tolerance rtol=%g, atol=%g' % (rtol, atol)
+ assert_array_compare(compare, actual, desired, err_msg=str(err_msg),
+ verbose=verbose, header=header, equal_nan=equal_nan)
+
+
+def assert_array_almost_equal_nulp(x, y, nulp=1):
+ """
+ Compare two arrays relatively to their spacing.
+
+ This is a relatively robust method to compare two arrays whose amplitude
+ is variable.
+
+ Parameters
+ ----------
+ x, y : array_like
+ Input arrays.
+ nulp : int, optional
+ The maximum number of unit in the last place for tolerance (see Notes).
+ Default is 1.
+
+ Returns
+ -------
+ None
+
+ Raises
+ ------
+ AssertionError
+ If the spacing between `x` and `y` for one or more elements is larger
+ than `nulp`.
+
+ See Also
+ --------
+ assert_array_max_ulp : Check that all items of arrays differ in at most
+ N Units in the Last Place.
+ spacing : Return the distance between x and the nearest adjacent number.
+
+ Notes
+ -----
+ An assertion is raised if the following condition is not met::
+
+ abs(x - y) <= nulps * spacing(maximum(abs(x), abs(y)))
+
+ Examples
+ --------
+ >>> x = np.array([1., 1e-10, 1e-20])
+ >>> eps = np.finfo(x.dtype).eps
+ >>> np.testing.assert_array_almost_equal_nulp(x, x*eps/2 + x)
+
+ >>> np.testing.assert_array_almost_equal_nulp(x, x*eps + x)
+ Traceback (most recent call last):
+ ...
+ AssertionError: X and Y are not equal to 1 ULP (max is 2)
+
+ """
+ __tracebackhide__ = True # Hide traceback for py.test
+ import numpy as np
+ ax = np.abs(x)
+ ay = np.abs(y)
+ ref = nulp * np.spacing(np.where(ax > ay, ax, ay))
+ if not np.all(np.abs(x-y) <= ref):
+ if np.iscomplexobj(x) or np.iscomplexobj(y):
+ msg = "X and Y are not equal to %d ULP" % nulp
+ else:
+ max_nulp = np.max(nulp_diff(x, y))
+ msg = "X and Y are not equal to %d ULP (max is %g)" % (nulp, max_nulp)
+ raise AssertionError(msg)
+
+
+def assert_array_max_ulp(a, b, maxulp=1, dtype=None):
+ """
+ Check that all items of arrays differ in at most N Units in the Last Place.
+
+ Parameters
+ ----------
+ a, b : array_like
+ Input arrays to be compared.
+ maxulp : int, optional
+ The maximum number of units in the last place that elements of `a` and
+ `b` can differ. Default is 1.
+ dtype : dtype, optional
+ Data-type to convert `a` and `b` to if given. Default is None.
+
+ Returns
+ -------
+ ret : ndarray
+ Array containing number of representable floating point numbers between
+ items in `a` and `b`.
+
+ Raises
+ ------
+ AssertionError
+ If one or more elements differ by more than `maxulp`.
+
+ See Also
+ --------
+ assert_array_almost_equal_nulp : Compare two arrays relatively to their
+ spacing.
+
+ Examples
+ --------
+ >>> a = np.linspace(0., 1., 100)
+ >>> res = np.testing.assert_array_max_ulp(a, np.arcsin(np.sin(a)))
+
+ """
+ __tracebackhide__ = True # Hide traceback for py.test
+ import numpy as np
+ ret = nulp_diff(a, b, dtype)
+ if not np.all(ret <= maxulp):
+ raise AssertionError("Arrays are not almost equal up to %g ULP" %
+ maxulp)
+ return ret
+
+
+def nulp_diff(x, y, dtype=None):
+ """For each item in x and y, return the number of representable floating
+ points between them.
+
+ Parameters
+ ----------
+ x : array_like
+ first input array
+ y : array_like
+ second input array
+ dtype : dtype, optional
+ Data-type to convert `x` and `y` to if given. Default is None.
+
+ Returns
+ -------
+ nulp : array_like
+ number of representable floating point numbers between each item in x
+ and y.
+
+ Examples
+ --------
+ # By definition, epsilon is the smallest number such as 1 + eps != 1, so
+ # there should be exactly one ULP between 1 and 1 + eps
+ >>> nulp_diff(1, 1 + np.finfo(x.dtype).eps)
+ 1.0
+ """
+ import numpy as np
+ if dtype:
+ x = np.array(x, dtype=dtype)
+ y = np.array(y, dtype=dtype)
+ else:
+ x = np.array(x)
+ y = np.array(y)
+
+ t = np.common_type(x, y)
+ if np.iscomplexobj(x) or np.iscomplexobj(y):
+ raise NotImplementedError("_nulp not implemented for complex array")
+
+ x = np.array(x, dtype=t)
+ y = np.array(y, dtype=t)
+
+ if not x.shape == y.shape:
+ raise ValueError("x and y do not have the same shape: %s - %s" %
+ (x.shape, y.shape))
+
+ def _diff(rx, ry, vdt):
+ diff = np.array(rx-ry, dtype=vdt)
+ return np.abs(diff)
+
+ rx = integer_repr(x)
+ ry = integer_repr(y)
+ return _diff(rx, ry, t)
+
+
+def _integer_repr(x, vdt, comp):
+ # Reinterpret binary representation of the float as sign-magnitude:
+ # take into account two-complement representation
+ # See also
+ # http://www.cygnus-software.com/papers/comparingfloats/comparingfloats.htm
+ rx = x.view(vdt)
+ if not (rx.size == 1):
+ rx[rx < 0] = comp - rx[rx < 0]
+ else:
+ if rx < 0:
+ rx = comp - rx
+
+ return rx
+
+
+def integer_repr(x):
+ """Return the signed-magnitude interpretation of the binary representation of
+ x."""
+ import numpy as np
+ if x.dtype == np.float32:
+ return _integer_repr(x, np.int32, np.int32(-2**31))
+ elif x.dtype == np.float64:
+ return _integer_repr(x, np.int64, np.int64(-2**63))
+ else:
+ raise ValueError("Unsupported dtype %s" % x.dtype)
+
+
+# The following two classes are copied from python 2.6 warnings module (context
+# manager)
+class WarningMessage(object):
+
+ """
+ Holds the result of a single showwarning() call.
+
+ Deprecated in 1.8.0
+
+ Notes
+ -----
+ `WarningMessage` is copied from the Python 2.6 warnings module,
+ so it can be used in NumPy with older Python versions.
+
+ """
+
+ _WARNING_DETAILS = ("message", "category", "filename", "lineno", "file",
+ "line")
+
+ def __init__(self, message, category, filename, lineno, file=None,
+ line=None):
+ local_values = locals()
+ for attr in self._WARNING_DETAILS:
+ setattr(self, attr, local_values[attr])
+ if category:
+ self._category_name = category.__name__
+ else:
+ self._category_name = None
+
+ def __str__(self):
+ return ("{message : %r, category : %r, filename : %r, lineno : %s, "
+ "line : %r}" % (self.message, self._category_name,
+ self.filename, self.lineno, self.line))
+
+
+class WarningManager(object):
+ """
+ A context manager that copies and restores the warnings filter upon
+ exiting the context.
+
+ The 'record' argument specifies whether warnings should be captured by a
+ custom implementation of ``warnings.showwarning()`` and be appended to a
+ list returned by the context manager. Otherwise None is returned by the
+ context manager. The objects appended to the list are arguments whose
+ attributes mirror the arguments to ``showwarning()``.
+
+ The 'module' argument is to specify an alternative module to the module
+ named 'warnings' and imported under that name. This argument is only useful
+ when testing the warnings module itself.
+
+ Deprecated in 1.8.0
+
+ Notes
+ -----
+ `WarningManager` is a copy of the ``catch_warnings`` context manager
+ from the Python 2.6 warnings module, with slight modifications.
+ It is copied so it can be used in NumPy with older Python versions.
+
+ """
+
+ def __init__(self, record=False, module=None):
+ self._record = record
+ if module is None:
+ self._module = sys.modules['warnings']
+ else:
+ self._module = module
+ self._entered = False
+
+ def __enter__(self):
+ if self._entered:
+ raise RuntimeError("Cannot enter %r twice" % self)
+ self._entered = True
+ self._filters = self._module.filters
+ self._module.filters = self._filters[:]
+ self._showwarning = self._module.showwarning
+ if self._record:
+ log = []
+
+ def showwarning(*args, **kwargs):
+ log.append(WarningMessage(*args, **kwargs))
+ self._module.showwarning = showwarning
+ return log
+ else:
+ return None
+
+ def __exit__(self):
+ if not self._entered:
+ raise RuntimeError("Cannot exit %r without entering first" % self)
+ self._module.filters = self._filters
+ self._module.showwarning = self._showwarning
+
+
+@contextlib.contextmanager
+def _assert_warns_context(warning_class, name=None):
+ __tracebackhide__ = True # Hide traceback for py.test
+ with suppress_warnings() as sup:
+ l = sup.record(warning_class)
+ yield
+ if not len(l) > 0:
+ name_str = " when calling %s" % name if name is not None else ""
+ raise AssertionError("No warning raised" + name_str)
+
+
+def assert_warns(warning_class, *args, **kwargs):
+ """
+ Fail unless the given callable throws the specified warning.
+
+ A warning of class warning_class should be thrown by the callable when
+ invoked with arguments args and keyword arguments kwargs.
+ If a different type of warning is thrown, it will not be caught.
+
+ If called with all arguments other than the warning class omitted, may be
+ used as a context manager:
+
+ with assert_warns(SomeWarning):
+ do_something()
+
+ The ability to be used as a context manager is new in NumPy v1.11.0.
+
+ .. versionadded:: 1.4.0
+
+ Parameters
+ ----------
+ warning_class : class
+ The class defining the warning that `func` is expected to throw.
+ func : callable
+ The callable to test.
+ \\*args : Arguments
+ Arguments passed to `func`.
+ \\*\\*kwargs : Kwargs
+ Keyword arguments passed to `func`.
+
+ Returns
+ -------
+ The value returned by `func`.
+
+ """
+ if not args:
+ return _assert_warns_context(warning_class)
+
+ func = args[0]
+ args = args[1:]
+ with _assert_warns_context(warning_class, name=func.__name__):
+ return func(*args, **kwargs)
+
+
+@contextlib.contextmanager
+def _assert_no_warnings_context(name=None):
+ __tracebackhide__ = True # Hide traceback for py.test
+ with warnings.catch_warnings(record=True) as l:
+ warnings.simplefilter('always')
+ yield
+ if len(l) > 0:
+ name_str = " when calling %s" % name if name is not None else ""
+ raise AssertionError("Got warnings%s: %s" % (name_str, l))
+
+
+def assert_no_warnings(*args, **kwargs):
+ """
+ Fail if the given callable produces any warnings.
+
+ If called with all arguments omitted, may be used as a context manager:
+
+ with assert_no_warnings():
+ do_something()
+
+ The ability to be used as a context manager is new in NumPy v1.11.0.
+
+ .. versionadded:: 1.7.0
+
+ Parameters
+ ----------
+ func : callable
+ The callable to test.
+ \\*args : Arguments
+ Arguments passed to `func`.
+ \\*\\*kwargs : Kwargs
+ Keyword arguments passed to `func`.
+
+ Returns
+ -------
+ The value returned by `func`.
+
+ """
+ if not args:
+ return _assert_no_warnings_context()
+
+ func = args[0]
+ args = args[1:]
+ with _assert_no_warnings_context(name=func.__name__):
+ return func(*args, **kwargs)
+
+
+def _gen_alignment_data(dtype=float32, type='binary', max_size=24):
+ """
+ generator producing data with different alignment and offsets
+ to test simd vectorization
+
+ Parameters
+ ----------
+ dtype : dtype
+ data type to produce
+ type : string
+ 'unary': create data for unary operations, creates one input
+ and output array
+ 'binary': create data for unary operations, creates two input
+ and output array
+ max_size : integer
+ maximum size of data to produce
+
+ Returns
+ -------
+ if type is 'unary' yields one output, one input array and a message
+ containing information on the data
+ if type is 'binary' yields one output array, two input array and a message
+ containing information on the data
+
+ """
+ ufmt = 'unary offset=(%d, %d), size=%d, dtype=%r, %s'
+ bfmt = 'binary offset=(%d, %d, %d), size=%d, dtype=%r, %s'
+ for o in range(3):
+ for s in range(o + 2, max(o + 3, max_size)):
+ if type == 'unary':
+ inp = lambda: arange(s, dtype=dtype)[o:]
+ out = empty((s,), dtype=dtype)[o:]
+ yield out, inp(), ufmt % (o, o, s, dtype, 'out of place')
+ d = inp()
+ yield d, d, ufmt % (o, o, s, dtype, 'in place')
+ yield out[1:], inp()[:-1], ufmt % \
+ (o + 1, o, s - 1, dtype, 'out of place')
+ yield out[:-1], inp()[1:], ufmt % \
+ (o, o + 1, s - 1, dtype, 'out of place')
+ yield inp()[:-1], inp()[1:], ufmt % \
+ (o, o + 1, s - 1, dtype, 'aliased')
+ yield inp()[1:], inp()[:-1], ufmt % \
+ (o + 1, o, s - 1, dtype, 'aliased')
+ if type == 'binary':
+ inp1 = lambda: arange(s, dtype=dtype)[o:]
+ inp2 = lambda: arange(s, dtype=dtype)[o:]
+ out = empty((s,), dtype=dtype)[o:]
+ yield out, inp1(), inp2(), bfmt % \
+ (o, o, o, s, dtype, 'out of place')
+ d = inp1()
+ yield d, d, inp2(), bfmt % \
+ (o, o, o, s, dtype, 'in place1')
+ d = inp2()
+ yield d, inp1(), d, bfmt % \
+ (o, o, o, s, dtype, 'in place2')
+ yield out[1:], inp1()[:-1], inp2()[:-1], bfmt % \
+ (o + 1, o, o, s - 1, dtype, 'out of place')
+ yield out[:-1], inp1()[1:], inp2()[:-1], bfmt % \
+ (o, o + 1, o, s - 1, dtype, 'out of place')
+ yield out[:-1], inp1()[:-1], inp2()[1:], bfmt % \
+ (o, o, o + 1, s - 1, dtype, 'out of place')
+ yield inp1()[1:], inp1()[:-1], inp2()[:-1], bfmt % \
+ (o + 1, o, o, s - 1, dtype, 'aliased')
+ yield inp1()[:-1], inp1()[1:], inp2()[:-1], bfmt % \
+ (o, o + 1, o, s - 1, dtype, 'aliased')
+ yield inp1()[:-1], inp1()[:-1], inp2()[1:], bfmt % \
+ (o, o, o + 1, s - 1, dtype, 'aliased')
+
+
+class IgnoreException(Exception):
+ "Ignoring this exception due to disabled feature"
+
+
+@contextlib.contextmanager
+def tempdir(*args, **kwargs):
+ """Context manager to provide a temporary test folder.
+
+ All arguments are passed as this to the underlying tempfile.mkdtemp
+ function.
+
+ """
+ tmpdir = mkdtemp(*args, **kwargs)
+ try:
+ yield tmpdir
+ finally:
+ shutil.rmtree(tmpdir)
+
+
+@contextlib.contextmanager
+def temppath(*args, **kwargs):
+ """Context manager for temporary files.
+
+ Context manager that returns the path to a closed temporary file. Its
+ parameters are the same as for tempfile.mkstemp and are passed directly
+ to that function. The underlying file is removed when the context is
+ exited, so it should be closed at that time.
+
+ Windows does not allow a temporary file to be opened if it is already
+ open, so the underlying file must be closed after opening before it
+ can be opened again.
+
+ """
+ fd, path = mkstemp(*args, **kwargs)
+ os.close(fd)
+ try:
+ yield path
+ finally:
+ os.remove(path)
+
+
+class clear_and_catch_warnings(warnings.catch_warnings):
+ """ Context manager that resets warning registry for catching warnings
+
+ Warnings can be slippery, because, whenever a warning is triggered, Python
+ adds a ``__warningregistry__`` member to the *calling* module. This makes
+ it impossible to retrigger the warning in this module, whatever you put in
+ the warnings filters. This context manager accepts a sequence of `modules`
+ as a keyword argument to its constructor and:
+
+ * stores and removes any ``__warningregistry__`` entries in given `modules`
+ on entry;
+ * resets ``__warningregistry__`` to its previous state on exit.
+
+ This makes it possible to trigger any warning afresh inside the context
+ manager without disturbing the state of warnings outside.
+
+ For compatibility with Python 3.0, please consider all arguments to be
+ keyword-only.
+
+ Parameters
+ ----------
+ record : bool, optional
+ Specifies whether warnings should be captured by a custom
+ implementation of ``warnings.showwarning()`` and be appended to a list
+ returned by the context manager. Otherwise None is returned by the
+ context manager. The objects appended to the list are arguments whose
+ attributes mirror the arguments to ``showwarning()``.
+ modules : sequence, optional
+ Sequence of modules for which to reset warnings registry on entry and
+ restore on exit. To work correctly, all 'ignore' filters should
+ filter by one of these modules.
+
+ Examples
+ --------
+ >>> import warnings
+ >>> with clear_and_catch_warnings(modules=[np.core.fromnumeric]):
+ ... warnings.simplefilter('always')
+ ... warnings.filterwarnings('ignore', module='np.core.fromnumeric')
+ ... # do something that raises a warning but ignore those in
+ ... # np.core.fromnumeric
+ """
+ class_modules = ()
+
+ def __init__(self, record=False, modules=()):
+ self.modules = set(modules).union(self.class_modules)
+ self._warnreg_copies = {}
+ super(clear_and_catch_warnings, self).__init__(record=record)
+
+ def __enter__(self):
+ for mod in self.modules:
+ if hasattr(mod, '__warningregistry__'):
+ mod_reg = mod.__warningregistry__
+ self._warnreg_copies[mod] = mod_reg.copy()
+ mod_reg.clear()
+ return super(clear_and_catch_warnings, self).__enter__()
+
+ def __exit__(self, *exc_info):
+ super(clear_and_catch_warnings, self).__exit__(*exc_info)
+ for mod in self.modules:
+ if hasattr(mod, '__warningregistry__'):
+ mod.__warningregistry__.clear()
+ if mod in self._warnreg_copies:
+ mod.__warningregistry__.update(self._warnreg_copies[mod])
+
+
+class suppress_warnings(object):
+ """
+ Context manager and decorator doing much the same as
+ ``warnings.catch_warnings``.
+
+ However, it also provides a filter mechanism to work around
+ http://bugs.python.org/issue4180.
+
+ This bug causes Python before 3.4 to not reliably show warnings again
+ after they have been ignored once (even within catch_warnings). It
+ means that no "ignore" filter can be used easily, since following
+ tests might need to see the warning. Additionally it allows easier
+ specificity for testing warnings and can be nested.
+
+ Parameters
+ ----------
+ forwarding_rule : str, optional
+ One of "always", "once", "module", or "location". Analogous to
+ the usual warnings module filter mode, it is useful to reduce
+ noise mostly on the outmost level. Unsuppressed and unrecorded
+ warnings will be forwarded based on this rule. Defaults to "always".
+ "location" is equivalent to the warnings "default", match by exact
+ location the warning warning originated from.
+
+ Notes
+ -----
+ Filters added inside the context manager will be discarded again
+ when leaving it. Upon entering all filters defined outside a
+ context will be applied automatically.
+
+ When a recording filter is added, matching warnings are stored in the
+ ``log`` attribute as well as in the list returned by ``record``.
+
+ If filters are added and the ``module`` keyword is given, the
+ warning registry of this module will additionally be cleared when
+ applying it, entering the context, or exiting it. This could cause
+ warnings to appear a second time after leaving the context if they
+ were configured to be printed once (default) and were already
+ printed before the context was entered.
+
+ Nesting this context manager will work as expected when the
+ forwarding rule is "always" (default). Unfiltered and unrecorded
+ warnings will be passed out and be matched by the outer level.
+ On the outmost level they will be printed (or caught by another
+ warnings context). The forwarding rule argument can modify this
+ behaviour.
+
+ Like ``catch_warnings`` this context manager is not threadsafe.
+
+ Examples
+ --------
+ >>> with suppress_warnings() as sup:
+ ... sup.filter(DeprecationWarning, "Some text")
+ ... sup.filter(module=np.ma.core)
+ ... log = sup.record(FutureWarning, "Does this occur?")
+ ... command_giving_warnings()
+ ... # The FutureWarning was given once, the filtered warnings were
+ ... # ignored. All other warnings abide outside settings (may be
+ ... # printed/error)
+ ... assert_(len(log) == 1)
+ ... assert_(len(sup.log) == 1) # also stored in log attribute
+
+ Or as a decorator:
+
+ >>> sup = suppress_warnings()
+ >>> sup.filter(module=np.ma.core) # module must match exact
+ >>> @sup
+ >>> def some_function():
+ ... # do something which causes a warning in np.ma.core
+ ... pass
+ """
+ def __init__(self, forwarding_rule="always"):
+ self._entered = False
+
+ # Suppressions are either instance or defined inside one with block:
+ self._suppressions = []
+
+ if forwarding_rule not in {"always", "module", "once", "location"}:
+ raise ValueError("unsupported forwarding rule.")
+ self._forwarding_rule = forwarding_rule
+
+ def _clear_registries(self):
+ if hasattr(warnings, "_filters_mutated"):
+ # clearing the registry should not be necessary on new pythons,
+ # instead the filters should be mutated.
+ warnings._filters_mutated()
+ return
+ # Simply clear the registry, this should normally be harmless,
+ # note that on new pythons it would be invalidated anyway.
+ for module in self._tmp_modules:
+ if hasattr(module, "__warningregistry__"):
+ module.__warningregistry__.clear()
+
+ def _filter(self, category=Warning, message="", module=None, record=False):
+ if record:
+ record = [] # The log where to store warnings
+ else:
+ record = None
+ if self._entered:
+ if module is None:
+ warnings.filterwarnings(
+ "always", category=category, message=message)
+ else:
+ module_regex = module.__name__.replace('.', r'\.') + '$'
+ warnings.filterwarnings(
+ "always", category=category, message=message,
+ module=module_regex)
+ self._tmp_modules.add(module)
+ self._clear_registries()
+
+ self._tmp_suppressions.append(
+ (category, message, re.compile(message, re.I), module, record))
+ else:
+ self._suppressions.append(
+ (category, message, re.compile(message, re.I), module, record))
+
+ return record
+
+ def filter(self, category=Warning, message="", module=None):
+ """
+ Add a new suppressing filter or apply it if the state is entered.
+
+ Parameters
+ ----------
+ category : class, optional
+ Warning class to filter
+ message : string, optional
+ Regular expression matching the warning message.
+ module : module, optional
+ Module to filter for. Note that the module (and its file)
+ must match exactly and cannot be a submodule. This may make
+ it unreliable for external modules.
+
+ Notes
+ -----
+ When added within a context, filters are only added inside
+ the context and will be forgotten when the context is exited.
+ """
+ self._filter(category=category, message=message, module=module,
+ record=False)
+
+ def record(self, category=Warning, message="", module=None):
+ """
+ Append a new recording filter or apply it if the state is entered.
+
+ All warnings matching will be appended to the ``log`` attribute.
+
+ Parameters
+ ----------
+ category : class, optional
+ Warning class to filter
+ message : string, optional
+ Regular expression matching the warning message.
+ module : module, optional
+ Module to filter for. Note that the module (and its file)
+ must match exactly and cannot be a submodule. This may make
+ it unreliable for external modules.
+
+ Returns
+ -------
+ log : list
+ A list which will be filled with all matched warnings.
+
+ Notes
+ -----
+ When added within a context, filters are only added inside
+ the context and will be forgotten when the context is exited.
+ """
+ return self._filter(category=category, message=message, module=module,
+ record=True)
+
+ def __enter__(self):
+ if self._entered:
+ raise RuntimeError("cannot enter suppress_warnings twice.")
+
+ self._orig_show = warnings.showwarning
+ self._filters = warnings.filters
+ warnings.filters = self._filters[:]
+
+ self._entered = True
+ self._tmp_suppressions = []
+ self._tmp_modules = set()
+ self._forwarded = set()
+
+ self.log = [] # reset global log (no need to keep same list)
+
+ for cat, mess, _, mod, log in self._suppressions:
+ if log is not None:
+ del log[:] # clear the log
+ if mod is None:
+ warnings.filterwarnings(
+ "always", category=cat, message=mess)
+ else:
+ module_regex = mod.__name__.replace('.', r'\.') + '$'
+ warnings.filterwarnings(
+ "always", category=cat, message=mess,
+ module=module_regex)
+ self._tmp_modules.add(mod)
+ warnings.showwarning = self._showwarning
+ self._clear_registries()
+
+ return self
+
+ def __exit__(self, *exc_info):
+ warnings.showwarning = self._orig_show
+ warnings.filters = self._filters
+ self._clear_registries()
+ self._entered = False
+ del self._orig_show
+ del self._filters
+
+ def _showwarning(self, message, category, filename, lineno,
+ *args, **kwargs):
+ use_warnmsg = kwargs.pop("use_warnmsg", None)
+ for cat, _, pattern, mod, rec in (
+ self._suppressions + self._tmp_suppressions)[::-1]:
+ if (issubclass(category, cat) and
+ pattern.match(message.args[0]) is not None):
+ if mod is None:
+ # Message and category match, either recorded or ignored
+ if rec is not None:
+ msg = WarningMessage(message, category, filename,
+ lineno, **kwargs)
+ self.log.append(msg)
+ rec.append(msg)
+ return
+ # Use startswith, because warnings strips the c or o from
+ # .pyc/.pyo files.
+ elif mod.__file__.startswith(filename):
+ # The message and module (filename) match
+ if rec is not None:
+ msg = WarningMessage(message, category, filename,
+ lineno, **kwargs)
+ self.log.append(msg)
+ rec.append(msg)
+ return
+
+ # There is no filter in place, so pass to the outside handler
+ # unless we should only pass it once
+ if self._forwarding_rule == "always":
+ if use_warnmsg is None:
+ self._orig_show(message, category, filename, lineno,
+ *args, **kwargs)
+ else:
+ self._orig_showmsg(use_warnmsg)
+ return
+
+ if self._forwarding_rule == "once":
+ signature = (message.args, category)
+ elif self._forwarding_rule == "module":
+ signature = (message.args, category, filename)
+ elif self._forwarding_rule == "location":
+ signature = (message.args, category, filename, lineno)
+
+ if signature in self._forwarded:
+ return
+ self._forwarded.add(signature)
+ if use_warnmsg is None:
+ self._orig_show(message, category, filename, lineno, *args,
+ **kwargs)
+ else:
+ self._orig_showmsg(use_warnmsg)
+
+ def __call__(self, func):
+ """
+ Function decorator to apply certain suppressions to a whole
+ function.
+ """
+ @wraps(func)
+ def new_func(*args, **kwargs):
+ with self:
+ return func(*args, **kwargs)
+
+ return new_func
diff --git a/numpy/testing/noseclasses.py b/numpy/testing/noseclasses.py
index ee9d1b4df..563ed14ea 100644
--- a/numpy/testing/noseclasses.py
+++ b/numpy/testing/noseclasses.py
@@ -1,340 +1,6 @@
-# These classes implement a doctest runner plugin for nose, a "known failure"
-# error class, and a customized TestProgram for NumPy.
+"""
+Back compatibility noseclasses module. It will import the appropriate
+set of tools
-# Because this module imports nose directly, it should not
-# be used except by nosetester.py to avoid a general NumPy
-# dependency on nose.
-from __future__ import division, absolute_import, print_function
-
-import os
-import doctest
-import inspect
-
-import nose
-from nose.plugins import doctests as npd
-from nose.plugins.errorclass import ErrorClass, ErrorClassPlugin
-from nose.plugins.base import Plugin
-from nose.util import src
-import numpy
-from .nosetester import get_package_name
-from .utils import KnownFailureException, KnownFailureTest
-
-
-# Some of the classes in this module begin with 'Numpy' to clearly distinguish
-# them from the plethora of very similar names from nose/unittest/doctest
-
-#-----------------------------------------------------------------------------
-# Modified version of the one in the stdlib, that fixes a python bug (doctests
-# not found in extension modules, http://bugs.python.org/issue3158)
-class NumpyDocTestFinder(doctest.DocTestFinder):
-
- def _from_module(self, module, object):
- """
- Return true if the given object is defined in the given
- module.
- """
- if module is None:
- return True
- elif inspect.isfunction(object):
- return module.__dict__ is object.__globals__
- elif inspect.isbuiltin(object):
- return module.__name__ == object.__module__
- elif inspect.isclass(object):
- return module.__name__ == object.__module__
- elif inspect.ismethod(object):
- # This one may be a bug in cython that fails to correctly set the
- # __module__ attribute of methods, but since the same error is easy
- # to make by extension code writers, having this safety in place
- # isn't such a bad idea
- return module.__name__ == object.__self__.__class__.__module__
- elif inspect.getmodule(object) is not None:
- return module is inspect.getmodule(object)
- elif hasattr(object, '__module__'):
- return module.__name__ == object.__module__
- elif isinstance(object, property):
- return True # [XX] no way not be sure.
- else:
- raise ValueError("object must be a class or function")
-
- def _find(self, tests, obj, name, module, source_lines, globs, seen):
- """
- Find tests for the given object and any contained objects, and
- add them to `tests`.
- """
-
- doctest.DocTestFinder._find(self, tests, obj, name, module,
- source_lines, globs, seen)
-
- # Below we re-run pieces of the above method with manual modifications,
- # because the original code is buggy and fails to correctly identify
- # doctests in extension modules.
-
- # Local shorthands
- from inspect import (
- isroutine, isclass, ismodule, isfunction, ismethod
- )
-
- # Look for tests in a module's contained objects.
- if ismodule(obj) and self._recurse:
- for valname, val in obj.__dict__.items():
- valname1 = '%s.%s' % (name, valname)
- if ( (isroutine(val) or isclass(val))
- and self._from_module(module, val)):
-
- self._find(tests, val, valname1, module, source_lines,
- globs, seen)
-
- # Look for tests in a class's contained objects.
- if isclass(obj) and self._recurse:
- for valname, val in obj.__dict__.items():
- # Special handling for staticmethod/classmethod.
- if isinstance(val, staticmethod):
- val = getattr(obj, valname)
- if isinstance(val, classmethod):
- val = getattr(obj, valname).__func__
-
- # Recurse to methods, properties, and nested classes.
- if ((isfunction(val) or isclass(val) or
- ismethod(val) or isinstance(val, property)) and
- self._from_module(module, val)):
- valname = '%s.%s' % (name, valname)
- self._find(tests, val, valname, module, source_lines,
- globs, seen)
-
-
-# second-chance checker; if the default comparison doesn't
-# pass, then see if the expected output string contains flags that
-# tell us to ignore the output
-class NumpyOutputChecker(doctest.OutputChecker):
- def check_output(self, want, got, optionflags):
- ret = doctest.OutputChecker.check_output(self, want, got,
- optionflags)
- if not ret:
- if "#random" in want:
- return True
-
- # it would be useful to normalize endianness so that
- # bigendian machines don't fail all the tests (and there are
- # actually some bigendian examples in the doctests). Let's try
- # making them all little endian
- got = got.replace("'>", "'<")
- want = want.replace("'>", "'<")
-
- # try to normalize out 32 and 64 bit default int sizes
- for sz in [4, 8]:
- got = got.replace("'<i%d'" % sz, "int")
- want = want.replace("'<i%d'" % sz, "int")
-
- ret = doctest.OutputChecker.check_output(self, want,
- got, optionflags)
-
- return ret
-
-
-# Subclass nose.plugins.doctests.DocTestCase to work around a bug in
-# its constructor that blocks non-default arguments from being passed
-# down into doctest.DocTestCase
-class NumpyDocTestCase(npd.DocTestCase):
- def __init__(self, test, optionflags=0, setUp=None, tearDown=None,
- checker=None, obj=None, result_var='_'):
- self._result_var = result_var
- self._nose_obj = obj
- doctest.DocTestCase.__init__(self, test,
- optionflags=optionflags,
- setUp=setUp, tearDown=tearDown,
- checker=checker)
-
-
-print_state = numpy.get_printoptions()
-
-class NumpyDoctest(npd.Doctest):
- name = 'numpydoctest' # call nosetests with --with-numpydoctest
- score = 1000 # load late, after doctest builtin
-
- # always use whitespace and ellipsis options for doctests
- doctest_optflags = doctest.NORMALIZE_WHITESPACE | doctest.ELLIPSIS
-
- # files that should be ignored for doctests
- doctest_ignore = ['generate_numpy_api.py',
- 'setup.py']
-
- # Custom classes; class variables to allow subclassing
- doctest_case_class = NumpyDocTestCase
- out_check_class = NumpyOutputChecker
- test_finder_class = NumpyDocTestFinder
-
- # Don't use the standard doctest option handler; hard-code the option values
- def options(self, parser, env=os.environ):
- Plugin.options(self, parser, env)
- # Test doctests in 'test' files / directories. Standard plugin default
- # is False
- self.doctest_tests = True
- # Variable name; if defined, doctest results stored in this variable in
- # the top-level namespace. None is the standard default
- self.doctest_result_var = None
-
- def configure(self, options, config):
- # parent method sets enabled flag from command line --with-numpydoctest
- Plugin.configure(self, options, config)
- self.finder = self.test_finder_class()
- self.parser = doctest.DocTestParser()
- if self.enabled:
- # Pull standard doctest out of plugin list; there's no reason to run
- # both. In practice the Unplugger plugin above would cover us when
- # run from a standard numpy.test() call; this is just in case
- # someone wants to run our plugin outside the numpy.test() machinery
- config.plugins.plugins = [p for p in config.plugins.plugins
- if p.name != 'doctest']
-
- def set_test_context(self, test):
- """ Configure `test` object to set test context
-
- We set the numpy / scipy standard doctest namespace
-
- Parameters
- ----------
- test : test object
- with ``globs`` dictionary defining namespace
-
- Returns
- -------
- None
-
- Notes
- -----
- `test` object modified in place
- """
- # set the namespace for tests
- pkg_name = get_package_name(os.path.dirname(test.filename))
-
- # Each doctest should execute in an environment equivalent to
- # starting Python and executing "import numpy as np", and,
- # for SciPy packages, an additional import of the local
- # package (so that scipy.linalg.basic.py's doctests have an
- # implicit "from scipy import linalg" as well.
- #
- # Note: __file__ allows the doctest in NoseTester to run
- # without producing an error
- test.globs = {'__builtins__':__builtins__,
- '__file__':'__main__',
- '__name__':'__main__',
- 'np':numpy}
- # add appropriate scipy import for SciPy tests
- if 'scipy' in pkg_name:
- p = pkg_name.split('.')
- p2 = p[-1]
- test.globs[p2] = __import__(pkg_name, test.globs, {}, [p2])
-
- # Override test loading to customize test context (with set_test_context
- # method), set standard docstring options, and install our own test output
- # checker
- def loadTestsFromModule(self, module):
- if not self.matches(module.__name__):
- npd.log.debug("Doctest doesn't want module %s", module)
- return
- try:
- tests = self.finder.find(module)
- except AttributeError:
- # nose allows module.__test__ = False; doctest does not and
- # throws AttributeError
- return
- if not tests:
- return
- tests.sort()
- module_file = src(module.__file__)
- for test in tests:
- if not test.examples:
- continue
- if not test.filename:
- test.filename = module_file
- # Set test namespace; test altered in place
- self.set_test_context(test)
- yield self.doctest_case_class(test,
- optionflags=self.doctest_optflags,
- checker=self.out_check_class(),
- result_var=self.doctest_result_var)
-
- # Add an afterContext method to nose.plugins.doctests.Doctest in order
- # to restore print options to the original state after each doctest
- def afterContext(self):
- numpy.set_printoptions(**print_state)
-
- # Ignore NumPy-specific build files that shouldn't be searched for tests
- def wantFile(self, file):
- bn = os.path.basename(file)
- if bn in self.doctest_ignore:
- return False
- return npd.Doctest.wantFile(self, file)
-
-
-class Unplugger(object):
- """ Nose plugin to remove named plugin late in loading
-
- By default it removes the "doctest" plugin.
- """
- name = 'unplugger'
- enabled = True # always enabled
- score = 4000 # load late in order to be after builtins
-
- def __init__(self, to_unplug='doctest'):
- self.to_unplug = to_unplug
-
- def options(self, parser, env):
- pass
-
- def configure(self, options, config):
- # Pull named plugin out of plugins list
- config.plugins.plugins = [p for p in config.plugins.plugins
- if p.name != self.to_unplug]
-
-
-class KnownFailurePlugin(ErrorClassPlugin):
- '''Plugin that installs a KNOWNFAIL error class for the
- KnownFailureClass exception. When KnownFailure is raised,
- the exception will be logged in the knownfail attribute of the
- result, 'K' or 'KNOWNFAIL' (verbose) will be output, and the
- exception will not be counted as an error or failure.'''
- enabled = True
- knownfail = ErrorClass(KnownFailureException,
- label='KNOWNFAIL',
- isfailure=False)
-
- def options(self, parser, env=os.environ):
- env_opt = 'NOSE_WITHOUT_KNOWNFAIL'
- parser.add_option('--no-knownfail', action='store_true',
- dest='noKnownFail', default=env.get(env_opt, False),
- help='Disable special handling of KnownFailure '
- 'exceptions')
-
- def configure(self, options, conf):
- if not self.can_configure:
- return
- self.conf = conf
- disable = getattr(options, 'noKnownFail', False)
- if disable:
- self.enabled = False
-
-KnownFailure = KnownFailurePlugin # backwards compat
-
-
-# Class allows us to save the results of the tests in runTests - see runTests
-# method docstring for details
-class NumpyTestProgram(nose.core.TestProgram):
- def runTests(self):
- """Run Tests. Returns true on success, false on failure, and
- sets self.success to the same value.
-
- Because nose currently discards the test result object, but we need
- to return it to the user, override TestProgram.runTests to retain
- the result
- """
- if self.testRunner is None:
- self.testRunner = nose.core.TextTestRunner(stream=self.config.stream,
- verbosity=self.config.verbosity,
- config=self.config)
- plug_runner = self.config.plugins.prepareTestRunner(self.testRunner)
- if plug_runner is not None:
- self.testRunner = plug_runner
- self.result = self.testRunner.run(self.test)
- self.success = self.result.wasSuccessful()
- return self.success
+"""
+from .nose_tools.noseclasses import *
diff --git a/numpy/testing/nosetester.py b/numpy/testing/nosetester.py
index 3d9616ed8..b726684c9 100644
--- a/numpy/testing/nosetester.py
+++ b/numpy/testing/nosetester.py
@@ -1,523 +1,10 @@
"""
-Nose test running.
-
-This module implements ``test()`` and ``bench()`` functions for NumPy modules.
+Back compatibility nosetester module. It will import the appropriate
+set of tools
"""
-from __future__ import division, absolute_import, print_function
-
-import os
-import sys
-import warnings
-from numpy.compat import basestring
-import numpy as np
-
-from .utils import import_nose, suppress_warnings
-
-
-def get_package_name(filepath):
- """
- Given a path where a package is installed, determine its name.
-
- Parameters
- ----------
- filepath : str
- Path to a file. If the determination fails, "numpy" is returned.
-
- Examples
- --------
- >>> np.testing.nosetester.get_package_name('nonsense')
- 'numpy'
-
- """
-
- fullpath = filepath[:]
- pkg_name = []
- while 'site-packages' in filepath or 'dist-packages' in filepath:
- filepath, p2 = os.path.split(filepath)
- if p2 in ('site-packages', 'dist-packages'):
- break
- pkg_name.append(p2)
-
- # if package name determination failed, just default to numpy/scipy
- if not pkg_name:
- if 'scipy' in fullpath:
- return 'scipy'
- else:
- return 'numpy'
-
- # otherwise, reverse to get correct order and return
- pkg_name.reverse()
-
- # don't include the outer egg directory
- if pkg_name[0].endswith('.egg'):
- pkg_name.pop(0)
-
- return '.'.join(pkg_name)
-
-
-def run_module_suite(file_to_run=None, argv=None):
- """
- Run a test module.
-
- Equivalent to calling ``$ nosetests <argv> <file_to_run>`` from
- the command line
-
- Parameters
- ----------
- file_to_run : str, optional
- Path to test module, or None.
- By default, run the module from which this function is called.
- argv : list of strings
- Arguments to be passed to the nose test runner. ``argv[0]`` is
- ignored. All command line arguments accepted by ``nosetests``
- will work. If it is the default value None, sys.argv is used.
-
- .. versionadded:: 1.9.0
-
- Examples
- --------
- Adding the following::
-
- if __name__ == "__main__" :
- run_module_suite(argv=sys.argv)
-
- at the end of a test module will run the tests when that module is
- called in the python interpreter.
-
- Alternatively, calling::
-
- >>> run_module_suite(file_to_run="numpy/tests/test_matlib.py")
-
- from an interpreter will run all the test routine in 'test_matlib.py'.
- """
- if file_to_run is None:
- f = sys._getframe(1)
- file_to_run = f.f_locals.get('__file__', None)
- if file_to_run is None:
- raise AssertionError
-
- if argv is None:
- argv = sys.argv + [file_to_run]
- else:
- argv = argv + [file_to_run]
-
- nose = import_nose()
- from .noseclasses import KnownFailurePlugin
- nose.run(argv=argv, addplugins=[KnownFailurePlugin()])
-
-
-class NoseTester(object):
- """
- Nose test runner.
-
- This class is made available as numpy.testing.Tester, and a test function
- is typically added to a package's __init__.py like so::
-
- from numpy.testing import Tester
- test = Tester().test
-
- Calling this test function finds and runs all tests associated with the
- package and all its sub-packages.
-
- Attributes
- ----------
- package_path : str
- Full path to the package to test.
- package_name : str
- Name of the package to test.
-
- Parameters
- ----------
- package : module, str or None, optional
- The package to test. If a string, this should be the full path to
- the package. If None (default), `package` is set to the module from
- which `NoseTester` is initialized.
- raise_warnings : None, str or sequence of warnings, optional
- This specifies which warnings to configure as 'raise' instead
- of being shown once during the test execution. Valid strings are:
-
- - "develop" : equals ``(Warning,)``
- - "release" : equals ``()``, don't raise on any warnings.
-
- Default is "release".
- depth : int, optional
- If `package` is None, then this can be used to initialize from the
- module of the caller of (the caller of (...)) the code that
- initializes `NoseTester`. Default of 0 means the module of the
- immediate caller; higher values are useful for utility routines that
- want to initialize `NoseTester` objects on behalf of other code.
-
- """
- def __init__(self, package=None, raise_warnings="release", depth=0):
- # Back-compat: 'None' used to mean either "release" or "develop"
- # depending on whether this was a release or develop version of
- # numpy. Those semantics were fine for testing numpy, but not so
- # helpful for downstream projects like scipy that use
- # numpy.testing. (They want to set this based on whether *they* are a
- # release or develop version, not whether numpy is.) So we continue to
- # accept 'None' for back-compat, but it's now just an alias for the
- # default "release".
- if raise_warnings is None:
- raise_warnings = "release"
-
- package_name = None
- if package is None:
- f = sys._getframe(1 + depth)
- package_path = f.f_locals.get('__file__', None)
- if package_path is None:
- raise AssertionError
- package_path = os.path.dirname(package_path)
- package_name = f.f_locals.get('__name__', None)
- elif isinstance(package, type(os)):
- package_path = os.path.dirname(package.__file__)
- package_name = getattr(package, '__name__', None)
- else:
- package_path = str(package)
-
- self.package_path = package_path
-
- # Find the package name under test; this name is used to limit coverage
- # reporting (if enabled).
- if package_name is None:
- package_name = get_package_name(package_path)
- self.package_name = package_name
-
- # Set to "release" in constructor in maintenance branches.
- self.raise_warnings = raise_warnings
-
- def _test_argv(self, label, verbose, extra_argv):
- ''' Generate argv for nosetest command
-
- Parameters
- ----------
- label : {'fast', 'full', '', attribute identifier}, optional
- see ``test`` docstring
- verbose : int, optional
- Verbosity value for test outputs, in the range 1-10. Default is 1.
- extra_argv : list, optional
- List with any extra arguments to pass to nosetests.
-
- Returns
- -------
- argv : list
- command line arguments that will be passed to nose
- '''
- argv = [__file__, self.package_path, '-s']
- if label and label != 'full':
- if not isinstance(label, basestring):
- raise TypeError('Selection label should be a string')
- if label == 'fast':
- label = 'not slow'
- argv += ['-A', label]
- argv += ['--verbosity', str(verbose)]
-
- # When installing with setuptools, and also in some other cases, the
- # test_*.py files end up marked +x executable. Nose, by default, does
- # not run files marked with +x as they might be scripts. However, in
- # our case nose only looks for test_*.py files under the package
- # directory, which should be safe.
- argv += ['--exe']
-
- if extra_argv:
- argv += extra_argv
- return argv
-
- def _show_system_info(self):
- nose = import_nose()
-
- import numpy
- print("NumPy version %s" % numpy.__version__)
- relaxed_strides = numpy.ones((10, 1), order="C").flags.f_contiguous
- print("NumPy relaxed strides checking option:", relaxed_strides)
- npdir = os.path.dirname(numpy.__file__)
- print("NumPy is installed in %s" % npdir)
-
- if 'scipy' in self.package_name:
- import scipy
- print("SciPy version %s" % scipy.__version__)
- spdir = os.path.dirname(scipy.__file__)
- print("SciPy is installed in %s" % spdir)
-
- pyversion = sys.version.replace('\n', '')
- print("Python version %s" % pyversion)
- print("nose version %d.%d.%d" % nose.__versioninfo__)
-
- def _get_custom_doctester(self):
- """ Return instantiated plugin for doctests
-
- Allows subclassing of this class to override doctester
-
- A return value of None means use the nose builtin doctest plugin
- """
- from .noseclasses import NumpyDoctest
- return NumpyDoctest()
-
- def prepare_test_args(self, label='fast', verbose=1, extra_argv=None,
- doctests=False, coverage=False):
- """
- Run tests for module using nose.
-
- This method does the heavy lifting for the `test` method. It takes all
- the same arguments, for details see `test`.
-
- See Also
- --------
- test
-
- """
- # fail with nice error message if nose is not present
- import_nose()
- # compile argv
- argv = self._test_argv(label, verbose, extra_argv)
- # our way of doing coverage
- if coverage:
- argv += ['--cover-package=%s' % self.package_name, '--with-coverage',
- '--cover-tests', '--cover-erase']
- # construct list of plugins
- import nose.plugins.builtin
- from .noseclasses import KnownFailurePlugin, Unplugger
- plugins = [KnownFailurePlugin()]
- plugins += [p() for p in nose.plugins.builtin.plugins]
- # add doctesting if required
- doctest_argv = '--with-doctest' in argv
- if doctests == False and doctest_argv:
- doctests = True
- plug = self._get_custom_doctester()
- if plug is None:
- # use standard doctesting
- if doctests and not doctest_argv:
- argv += ['--with-doctest']
- else: # custom doctesting
- if doctest_argv: # in fact the unplugger would take care of this
- argv.remove('--with-doctest')
- plugins += [Unplugger('doctest'), plug]
- if doctests:
- argv += ['--with-' + plug.name]
- return argv, plugins
-
- def test(self, label='fast', verbose=1, extra_argv=None,
- doctests=False, coverage=False, raise_warnings=None):
- """
- Run tests for module using nose.
-
- Parameters
- ----------
- label : {'fast', 'full', '', attribute identifier}, optional
- Identifies the tests to run. This can be a string to pass to
- the nosetests executable with the '-A' option, or one of several
- special values. Special values are:
- * 'fast' - the default - which corresponds to the ``nosetests -A``
- option of 'not slow'.
- * 'full' - fast (as above) and slow tests as in the
- 'no -A' option to nosetests - this is the same as ''.
- * None or '' - run all tests.
- attribute_identifier - string passed directly to nosetests as '-A'.
- verbose : int, optional
- Verbosity value for test outputs, in the range 1-10. Default is 1.
- extra_argv : list, optional
- List with any extra arguments to pass to nosetests.
- doctests : bool, optional
- If True, run doctests in module. Default is False.
- coverage : bool, optional
- If True, report coverage of NumPy code. Default is False.
- (This requires the `coverage module:
- <http://nedbatchelder.com/code/modules/coverage.html>`_).
- raise_warnings : None, str or sequence of warnings, optional
- This specifies which warnings to configure as 'raise' instead
- of being shown once during the test execution. Valid strings are:
-
- - "develop" : equals ``(Warning,)``
- - "release" : equals ``()``, don't raise on any warnings.
-
- The default is to use the class initialization value.
-
- Returns
- -------
- result : object
- Returns the result of running the tests as a
- ``nose.result.TextTestResult`` object.
-
- Notes
- -----
- Each NumPy module exposes `test` in its namespace to run all tests for it.
- For example, to run all tests for numpy.lib:
-
- >>> np.lib.test() #doctest: +SKIP
-
- Examples
- --------
- >>> result = np.lib.test() #doctest: +SKIP
- Running unit tests for numpy.lib
- ...
- Ran 976 tests in 3.933s
-
- OK
-
- >>> result.errors #doctest: +SKIP
- []
- >>> result.knownfail #doctest: +SKIP
- []
- """
-
- # cap verbosity at 3 because nose becomes *very* verbose beyond that
- verbose = min(verbose, 3)
-
- from . import utils
- utils.verbose = verbose
-
- if doctests:
- print("Running unit tests and doctests for %s" % self.package_name)
- else:
- print("Running unit tests for %s" % self.package_name)
-
- self._show_system_info()
-
- # reset doctest state on every run
- import doctest
- doctest.master = None
-
- if raise_warnings is None:
- raise_warnings = self.raise_warnings
-
- _warn_opts = dict(develop=(Warning,),
- release=())
- if isinstance(raise_warnings, basestring):
- raise_warnings = _warn_opts[raise_warnings]
-
- with suppress_warnings("location") as sup:
- # Reset the warning filters to the default state,
- # so that running the tests is more repeatable.
- warnings.resetwarnings()
- # Set all warnings to 'warn', this is because the default 'once'
- # has the bad property of possibly shadowing later warnings.
- warnings.filterwarnings('always')
- # Force the requested warnings to raise
- for warningtype in raise_warnings:
- warnings.filterwarnings('error', category=warningtype)
- # Filter out annoying import messages.
- sup.filter(message='Not importing directory')
- sup.filter(message="numpy.dtype size changed")
- sup.filter(message="numpy.ufunc size changed")
- sup.filter(category=np.ModuleDeprecationWarning)
- # Filter out boolean '-' deprecation messages. This allows
- # older versions of scipy to test without a flood of messages.
- sup.filter(message=".*boolean negative.*")
- sup.filter(message=".*boolean subtract.*")
- # Filter out distutils cpu warnings (could be localized to
- # distutils tests). ASV has problems with top level import,
- # so fetch module for suppression here.
- with warnings.catch_warnings():
- warnings.simplefilter("always")
- from ..distutils import cpuinfo
- sup.filter(category=UserWarning, module=cpuinfo)
- # See #7949: Filter out deprecation warnings due to the -3 flag to
- # python 2
- if sys.version_info.major == 2 and sys.py3kwarning:
- # This is very specific, so using the fragile module filter
- # is fine
- import threading
- sup.filter(DeprecationWarning,
- r"sys\.exc_clear\(\) not supported in 3\.x",
- module=threading)
- sup.filter(DeprecationWarning, message=r"in 3\.x, __setslice__")
- sup.filter(DeprecationWarning, message=r"in 3\.x, __getslice__")
- sup.filter(DeprecationWarning, message=r"buffer\(\) not supported in 3\.x")
- sup.filter(DeprecationWarning, message=r"CObject type is not supported in 3\.x")
- sup.filter(DeprecationWarning, message=r"comparing unequal types not supported in 3\.x")
- # Filter out some deprecation warnings inside nose 1.3.7 when run
- # on python 3.5b2. See
- # https://github.com/nose-devs/nose/issues/929
- # Note: it is hard to filter based on module for sup (lineno could
- # be implemented).
- warnings.filterwarnings("ignore", message=".*getargspec.*",
- category=DeprecationWarning,
- module=r"nose\.")
-
- from .noseclasses import NumpyTestProgram
-
- argv, plugins = self.prepare_test_args(
- label, verbose, extra_argv, doctests, coverage)
-
- t = NumpyTestProgram(argv=argv, exit=False, plugins=plugins)
-
- return t.result
-
- def bench(self, label='fast', verbose=1, extra_argv=None):
- """
- Run benchmarks for module using nose.
-
- Parameters
- ----------
- label : {'fast', 'full', '', attribute identifier}, optional
- Identifies the benchmarks to run. This can be a string to pass to
- the nosetests executable with the '-A' option, or one of several
- special values. Special values are:
- * 'fast' - the default - which corresponds to the ``nosetests -A``
- option of 'not slow'.
- * 'full' - fast (as above) and slow benchmarks as in the
- 'no -A' option to nosetests - this is the same as ''.
- * None or '' - run all tests.
- attribute_identifier - string passed directly to nosetests as '-A'.
- verbose : int, optional
- Verbosity value for benchmark outputs, in the range 1-10. Default is 1.
- extra_argv : list, optional
- List with any extra arguments to pass to nosetests.
-
- Returns
- -------
- success : bool
- Returns True if running the benchmarks works, False if an error
- occurred.
-
- Notes
- -----
- Benchmarks are like tests, but have names starting with "bench" instead
- of "test", and can be found under the "benchmarks" sub-directory of the
- module.
-
- Each NumPy module exposes `bench` in its namespace to run all benchmarks
- for it.
-
- Examples
- --------
- >>> success = np.lib.bench() #doctest: +SKIP
- Running benchmarks for numpy.lib
- ...
- using 562341 items:
- unique:
- 0.11
- unique1d:
- 0.11
- ratio: 1.0
- nUnique: 56230 == 56230
- ...
- OK
-
- >>> success #doctest: +SKIP
- True
-
- """
-
- print("Running benchmarks for %s" % self.package_name)
- self._show_system_info()
-
- argv = self._test_argv(label, verbose, extra_argv)
- argv += ['--match', r'(?:^|[\\b_\\.%s-])[Bb]ench' % os.sep]
-
- # import nose or make informative error
- nose = import_nose()
-
- # get plugin to disable doctests
- from .noseclasses import Unplugger
- add_plugins = [Unplugger('doctest')]
-
- return nose.run(argv=argv, addplugins=add_plugins)
-
+from .nose_tools.nosetester import *
-def _numpy_tester():
- if hasattr(np, "__version__") and ".dev0" in np.__version__:
- mode = "develop"
- else:
- mode = "release"
- return NoseTester(raise_warnings=mode, depth=1)
+__all__ = ['get_package_name', 'run_module_suite', 'NoseTester',
+ '_numpy_tester', 'get_package_name', 'import_nose',
+ 'suppress_warnings']
diff --git a/numpy/testing/setup.py b/numpy/testing/setup.py
index 7c1c237b9..a5e9656a3 100755
--- a/numpy/testing/setup.py
+++ b/numpy/testing/setup.py
@@ -6,6 +6,7 @@ def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('testing', parent_package, top_path)
+ config.add_subpackage('nose_tools')
config.add_data_dir('tests')
return config
diff --git a/numpy/testing/tests/__init__.py b/numpy/testing/tests/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/numpy/testing/tests/__init__.py
diff --git a/numpy/testing/tests/test_decorators.py b/numpy/testing/tests/test_decorators.py
index 02cd9fb88..1258a9296 100644
--- a/numpy/testing/tests/test_decorators.py
+++ b/numpy/testing/tests/test_decorators.py
@@ -1,3 +1,7 @@
+"""
+Test the decorators from ``testing.decorators``.
+
+"""
from __future__ import division, absolute_import, print_function
import warnings
@@ -13,6 +17,7 @@ def test_slow():
assert_(slow_func.slow)
+
def test_setastest():
@dec.setastest()
def f_default(a):
@@ -30,6 +35,7 @@ def test_setastest():
assert_(f_istest.__test__)
assert_(not f_isnottest.__test__)
+
class DidntSkipException(Exception):
pass
@@ -182,5 +188,13 @@ def test_deprecated():
assert_raises(AssertionError, deprecated_func3)
+@dec.parametrize('base, power, expected',
+ [(1, 1, 1),
+ (2, 1, 2),
+ (2, 2, 4)])
+def test_parametrize(base, power, expected):
+ assert_(base**power == expected)
+
+
if __name__ == '__main__':
run_module_suite()
diff --git a/numpy/testing/tests/test_utils.py b/numpy/testing/tests/test_utils.py
index e2c105245..493c538af 100644
--- a/numpy/testing/tests/test_utils.py
+++ b/numpy/testing/tests/test_utils.py
@@ -61,7 +61,7 @@ class _GenericTest(object):
def test_objarray(self):
"""Test object arrays."""
- a = np.array([1, 1], dtype=np.object)
+ a = np.array([1, 1], dtype=object)
self._test_equal(a, 1)
def test_array_likes(self):
@@ -134,14 +134,14 @@ class TestArrayEqual(_GenericTest, unittest.TestCase):
def test_recarrays(self):
"""Test record arrays."""
- a = np.empty(2, [('floupi', np.float), ('floupa', np.float)])
+ a = np.empty(2, [('floupi', float), ('floupa', float)])
a['floupi'] = [1, 2]
a['floupa'] = [1, 2]
b = a.copy()
self._test_equal(a, b)
- c = np.empty(2, [('floupipi', np.float), ('floupa', np.float)])
+ c = np.empty(2, [('floupipi', float), ('floupa', float)])
c['floupipi'] = a['floupi'].copy()
c['floupa'] = a['floupa'].copy()
diff --git a/numpy/testing/utils.py b/numpy/testing/utils.py
index f54995870..7ecb68f47 100644
--- a/numpy/testing/utils.py
+++ b/numpy/testing/utils.py
@@ -1,29 +1,8 @@
"""
-Utility function to facilitate testing.
+Back compatibility utils module. It will import the appropriate
+set of tools
"""
-from __future__ import division, absolute_import, print_function
-
-import os
-import sys
-import re
-import operator
-import warnings
-from functools import partial, wraps
-import shutil
-import contextlib
-from tempfile import mkdtemp, mkstemp
-from unittest.case import SkipTest
-
-from numpy.core import(
- float32, empty, arange, array_repr, ndarray, isnat, array)
-from numpy.lib.utils import deprecate
-
-if sys.version_info[0] >= 3:
- from io import StringIO
-else:
- from StringIO import StringIO
-
__all__ = [
'assert_equal', 'assert_almost_equal', 'assert_approx_equal',
'assert_array_equal', 'assert_array_less', 'assert_string_equal',
@@ -34,2195 +13,8 @@ __all__ = [
'assert_array_max_ulp', 'assert_warns', 'assert_no_warnings',
'assert_allclose', 'IgnoreException', 'clear_and_catch_warnings',
'SkipTest', 'KnownFailureException', 'temppath', 'tempdir', 'IS_PYPY',
- 'HAS_REFCOUNT', 'suppress_warnings'
+ 'HAS_REFCOUNT', 'suppress_warnings', 'assert_array_compare',
+ '_assert_valid_refcount', '_gen_alignment_data',
]
-
-class KnownFailureException(Exception):
- '''Raise this exception to mark a test as a known failing test.'''
- pass
-
-
-KnownFailureTest = KnownFailureException # backwards compat
-verbose = 0
-
-IS_PYPY = '__pypy__' in sys.modules
-HAS_REFCOUNT = getattr(sys, 'getrefcount', None) is not None
-
-
-def import_nose():
- """ Import nose only when needed.
- """
- nose_is_good = True
- minimum_nose_version = (1, 0, 0)
- try:
- import nose
- except ImportError:
- nose_is_good = False
- else:
- if nose.__versioninfo__ < minimum_nose_version:
- nose_is_good = False
-
- if not nose_is_good:
- msg = ('Need nose >= %d.%d.%d for tests - see '
- 'http://nose.readthedocs.io' %
- minimum_nose_version)
- raise ImportError(msg)
-
- return nose
-
-
-def assert_(val, msg=''):
- """
- Assert that works in release mode.
- Accepts callable msg to allow deferring evaluation until failure.
-
- The Python built-in ``assert`` does not work when executing code in
- optimized mode (the ``-O`` flag) - no byte-code is generated for it.
-
- For documentation on usage, refer to the Python documentation.
-
- """
- __tracebackhide__ = True # Hide traceback for py.test
- if not val:
- try:
- smsg = msg()
- except TypeError:
- smsg = msg
- raise AssertionError(smsg)
-
-
-def gisnan(x):
- """like isnan, but always raise an error if type not supported instead of
- returning a TypeError object.
-
- Notes
- -----
- isnan and other ufunc sometimes return a NotImplementedType object instead
- of raising any exception. This function is a wrapper to make sure an
- exception is always raised.
-
- This should be removed once this problem is solved at the Ufunc level."""
- from numpy.core import isnan
- st = isnan(x)
- if isinstance(st, type(NotImplemented)):
- raise TypeError("isnan not supported for this type")
- return st
-
-
-def gisfinite(x):
- """like isfinite, but always raise an error if type not supported instead of
- returning a TypeError object.
-
- Notes
- -----
- isfinite and other ufunc sometimes return a NotImplementedType object instead
- of raising any exception. This function is a wrapper to make sure an
- exception is always raised.
-
- This should be removed once this problem is solved at the Ufunc level."""
- from numpy.core import isfinite, errstate
- with errstate(invalid='ignore'):
- st = isfinite(x)
- if isinstance(st, type(NotImplemented)):
- raise TypeError("isfinite not supported for this type")
- return st
-
-
-def gisinf(x):
- """like isinf, but always raise an error if type not supported instead of
- returning a TypeError object.
-
- Notes
- -----
- isinf and other ufunc sometimes return a NotImplementedType object instead
- of raising any exception. This function is a wrapper to make sure an
- exception is always raised.
-
- This should be removed once this problem is solved at the Ufunc level."""
- from numpy.core import isinf, errstate
- with errstate(invalid='ignore'):
- st = isinf(x)
- if isinstance(st, type(NotImplemented)):
- raise TypeError("isinf not supported for this type")
- return st
-
-
-@deprecate(message="numpy.testing.rand is deprecated in numpy 1.11. "
- "Use numpy.random.rand instead.")
-def rand(*args):
- """Returns an array of random numbers with the given shape.
-
- This only uses the standard library, so it is useful for testing purposes.
- """
- import random
- from numpy.core import zeros, float64
- results = zeros(args, float64)
- f = results.flat
- for i in range(len(f)):
- f[i] = random.random()
- return results
-
-
-if os.name == 'nt':
- # Code "stolen" from enthought/debug/memusage.py
- def GetPerformanceAttributes(object, counter, instance=None,
- inum=-1, format=None, machine=None):
- # NOTE: Many counters require 2 samples to give accurate results,
- # including "% Processor Time" (as by definition, at any instant, a
- # thread's CPU usage is either 0 or 100). To read counters like this,
- # you should copy this function, but keep the counter open, and call
- # CollectQueryData() each time you need to know.
- # See http://msdn.microsoft.com/library/en-us/dnperfmo/html/perfmonpt2.asp
- # My older explanation for this was that the "AddCounter" process forced
- # the CPU to 100%, but the above makes more sense :)
- import win32pdh
- if format is None:
- format = win32pdh.PDH_FMT_LONG
- path = win32pdh.MakeCounterPath( (machine, object, instance, None, inum, counter))
- hq = win32pdh.OpenQuery()
- try:
- hc = win32pdh.AddCounter(hq, path)
- try:
- win32pdh.CollectQueryData(hq)
- type, val = win32pdh.GetFormattedCounterValue(hc, format)
- return val
- finally:
- win32pdh.RemoveCounter(hc)
- finally:
- win32pdh.CloseQuery(hq)
-
- def memusage(processName="python", instance=0):
- # from win32pdhutil, part of the win32all package
- import win32pdh
- return GetPerformanceAttributes("Process", "Virtual Bytes",
- processName, instance,
- win32pdh.PDH_FMT_LONG, None)
-elif sys.platform[:5] == 'linux':
-
- def memusage(_proc_pid_stat='/proc/%s/stat' % (os.getpid())):
- """
- Return virtual memory size in bytes of the running python.
-
- """
- try:
- f = open(_proc_pid_stat, 'r')
- l = f.readline().split(' ')
- f.close()
- return int(l[22])
- except:
- return
-else:
- def memusage():
- """
- Return memory usage of running python. [Not implemented]
-
- """
- raise NotImplementedError
-
-
-if sys.platform[:5] == 'linux':
- def jiffies(_proc_pid_stat='/proc/%s/stat' % (os.getpid()),
- _load_time=[]):
- """
- Return number of jiffies elapsed.
-
- Return number of jiffies (1/100ths of a second) that this
- process has been scheduled in user mode. See man 5 proc.
-
- """
- import time
- if not _load_time:
- _load_time.append(time.time())
- try:
- f = open(_proc_pid_stat, 'r')
- l = f.readline().split(' ')
- f.close()
- return int(l[13])
- except:
- return int(100*(time.time()-_load_time[0]))
-else:
- # os.getpid is not in all platforms available.
- # Using time is safe but inaccurate, especially when process
- # was suspended or sleeping.
- def jiffies(_load_time=[]):
- """
- Return number of jiffies elapsed.
-
- Return number of jiffies (1/100ths of a second) that this
- process has been scheduled in user mode. See man 5 proc.
-
- """
- import time
- if not _load_time:
- _load_time.append(time.time())
- return int(100*(time.time()-_load_time[0]))
-
-
-def build_err_msg(arrays, err_msg, header='Items are not equal:',
- verbose=True, names=('ACTUAL', 'DESIRED'), precision=8):
- msg = ['\n' + header]
- if err_msg:
- if err_msg.find('\n') == -1 and len(err_msg) < 79-len(header):
- msg = [msg[0] + ' ' + err_msg]
- else:
- msg.append(err_msg)
- if verbose:
- for i, a in enumerate(arrays):
-
- if isinstance(a, ndarray):
- # precision argument is only needed if the objects are ndarrays
- r_func = partial(array_repr, precision=precision)
- else:
- r_func = repr
-
- try:
- r = r_func(a)
- except Exception as exc:
- r = '[repr failed for <{}>: {}]'.format(type(a).__name__, exc)
- if r.count('\n') > 3:
- r = '\n'.join(r.splitlines()[:3])
- r += '...'
- msg.append(' %s: %s' % (names[i], r))
- return '\n'.join(msg)
-
-
-def assert_equal(actual, desired, err_msg='', verbose=True):
- """
- Raises an AssertionError if two objects are not equal.
-
- Given two objects (scalars, lists, tuples, dictionaries or numpy arrays),
- check that all elements of these objects are equal. An exception is raised
- at the first conflicting values.
-
- Parameters
- ----------
- actual : array_like
- The object to check.
- desired : array_like
- The expected object.
- err_msg : str, optional
- The error message to be printed in case of failure.
- verbose : bool, optional
- If True, the conflicting values are appended to the error message.
-
- Raises
- ------
- AssertionError
- If actual and desired are not equal.
-
- Examples
- --------
- >>> np.testing.assert_equal([4,5], [4,6])
- ...
- <type 'exceptions.AssertionError'>:
- Items are not equal:
- item=1
- ACTUAL: 5
- DESIRED: 6
-
- """
- __tracebackhide__ = True # Hide traceback for py.test
- if isinstance(desired, dict):
- if not isinstance(actual, dict):
- raise AssertionError(repr(type(actual)))
- assert_equal(len(actual), len(desired), err_msg, verbose)
- for k, i in desired.items():
- if k not in actual:
- raise AssertionError(repr(k))
- assert_equal(actual[k], desired[k], 'key=%r\n%s' % (k, err_msg), verbose)
- return
- if isinstance(desired, (list, tuple)) and isinstance(actual, (list, tuple)):
- assert_equal(len(actual), len(desired), err_msg, verbose)
- for k in range(len(desired)):
- assert_equal(actual[k], desired[k], 'item=%r\n%s' % (k, err_msg), verbose)
- return
- from numpy.core import ndarray, isscalar, signbit
- from numpy.lib import iscomplexobj, real, imag
- if isinstance(actual, ndarray) or isinstance(desired, ndarray):
- return assert_array_equal(actual, desired, err_msg, verbose)
- msg = build_err_msg([actual, desired], err_msg, verbose=verbose)
-
- # Handle complex numbers: separate into real/imag to handle
- # nan/inf/negative zero correctly
- # XXX: catch ValueError for subclasses of ndarray where iscomplex fail
- try:
- usecomplex = iscomplexobj(actual) or iscomplexobj(desired)
- except ValueError:
- usecomplex = False
-
- if usecomplex:
- if iscomplexobj(actual):
- actualr = real(actual)
- actuali = imag(actual)
- else:
- actualr = actual
- actuali = 0
- if iscomplexobj(desired):
- desiredr = real(desired)
- desiredi = imag(desired)
- else:
- desiredr = desired
- desiredi = 0
- try:
- assert_equal(actualr, desiredr)
- assert_equal(actuali, desiredi)
- except AssertionError:
- raise AssertionError(msg)
-
- # isscalar test to check cases such as [np.nan] != np.nan
- if isscalar(desired) != isscalar(actual):
- raise AssertionError(msg)
-
- # Inf/nan/negative zero handling
- try:
- # If one of desired/actual is not finite, handle it specially here:
- # check that both are nan if any is a nan, and test for equality
- # otherwise
- if not (gisfinite(desired) and gisfinite(actual)):
- isdesnan = gisnan(desired)
- isactnan = gisnan(actual)
- if isdesnan or isactnan:
- if not (isdesnan and isactnan):
- raise AssertionError(msg)
- else:
- if not desired == actual:
- raise AssertionError(msg)
- return
- elif desired == 0 and actual == 0:
- if not signbit(desired) == signbit(actual):
- raise AssertionError(msg)
- # If TypeError or ValueError raised while using isnan and co, just handle
- # as before
- except (TypeError, ValueError, NotImplementedError):
- pass
-
- try:
- # If both are NaT (and have the same dtype -- datetime or timedelta)
- # they are considered equal.
- if (isnat(desired) == isnat(actual) and
- array(desired).dtype.type == array(actual).dtype.type):
- return
- else:
- raise AssertionError(msg)
-
- # If TypeError or ValueError raised while using isnan and co, just handle
- # as before
- except (TypeError, ValueError, NotImplementedError):
- pass
-
- # Explicitly use __eq__ for comparison, ticket #2552
- if not (desired == actual):
- raise AssertionError(msg)
-
-
-def print_assert_equal(test_string, actual, desired):
- """
- Test if two objects are equal, and print an error message if test fails.
-
- The test is performed with ``actual == desired``.
-
- Parameters
- ----------
- test_string : str
- The message supplied to AssertionError.
- actual : object
- The object to test for equality against `desired`.
- desired : object
- The expected result.
-
- Examples
- --------
- >>> np.testing.print_assert_equal('Test XYZ of func xyz', [0, 1], [0, 1])
- >>> np.testing.print_assert_equal('Test XYZ of func xyz', [0, 1], [0, 2])
- Traceback (most recent call last):
- ...
- AssertionError: Test XYZ of func xyz failed
- ACTUAL:
- [0, 1]
- DESIRED:
- [0, 2]
-
- """
- __tracebackhide__ = True # Hide traceback for py.test
- import pprint
-
- if not (actual == desired):
- msg = StringIO()
- msg.write(test_string)
- msg.write(' failed\nACTUAL: \n')
- pprint.pprint(actual, msg)
- msg.write('DESIRED: \n')
- pprint.pprint(desired, msg)
- raise AssertionError(msg.getvalue())
-
-
-def assert_almost_equal(actual,desired,decimal=7,err_msg='',verbose=True):
- """
- Raises an AssertionError if two items are not equal up to desired
- precision.
-
- .. note:: It is recommended to use one of `assert_allclose`,
- `assert_array_almost_equal_nulp` or `assert_array_max_ulp`
- instead of this function for more consistent floating point
- comparisons.
-
- The test verifies that the elements of ``actual`` and ``desired`` satisfy.
-
- ``abs(desired-actual) < 1.5 * 10**(-decimal)``
-
- That is a looser test than originally documented, but agrees with what the
- actual implementation in `assert_array_almost_equal` did up to rounding
- vagaries. An exception is raised at conflicting values. For ndarrays this
- delegates to assert_array_almost_equal
-
- Parameters
- ----------
- actual : array_like
- The object to check.
- desired : array_like
- The expected object.
- decimal : int, optional
- Desired precision, default is 7.
- err_msg : str, optional
- The error message to be printed in case of failure.
- verbose : bool, optional
- If True, the conflicting values are appended to the error message.
-
- Raises
- ------
- AssertionError
- If actual and desired are not equal up to specified precision.
-
- See Also
- --------
- assert_allclose: Compare two array_like objects for equality with desired
- relative and/or absolute precision.
- assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal
-
- Examples
- --------
- >>> import numpy.testing as npt
- >>> npt.assert_almost_equal(2.3333333333333, 2.33333334)
- >>> npt.assert_almost_equal(2.3333333333333, 2.33333334, decimal=10)
- ...
- <type 'exceptions.AssertionError'>:
- Items are not equal:
- ACTUAL: 2.3333333333333002
- DESIRED: 2.3333333399999998
-
- >>> npt.assert_almost_equal(np.array([1.0,2.3333333333333]),
- ... np.array([1.0,2.33333334]), decimal=9)
- ...
- <type 'exceptions.AssertionError'>:
- Arrays are not almost equal
- <BLANKLINE>
- (mismatch 50.0%)
- x: array([ 1. , 2.33333333])
- y: array([ 1. , 2.33333334])
-
- """
- __tracebackhide__ = True # Hide traceback for py.test
- from numpy.core import ndarray
- from numpy.lib import iscomplexobj, real, imag
-
- # Handle complex numbers: separate into real/imag to handle
- # nan/inf/negative zero correctly
- # XXX: catch ValueError for subclasses of ndarray where iscomplex fail
- try:
- usecomplex = iscomplexobj(actual) or iscomplexobj(desired)
- except ValueError:
- usecomplex = False
-
- def _build_err_msg():
- header = ('Arrays are not almost equal to %d decimals' % decimal)
- return build_err_msg([actual, desired], err_msg, verbose=verbose,
- header=header)
-
- if usecomplex:
- if iscomplexobj(actual):
- actualr = real(actual)
- actuali = imag(actual)
- else:
- actualr = actual
- actuali = 0
- if iscomplexobj(desired):
- desiredr = real(desired)
- desiredi = imag(desired)
- else:
- desiredr = desired
- desiredi = 0
- try:
- assert_almost_equal(actualr, desiredr, decimal=decimal)
- assert_almost_equal(actuali, desiredi, decimal=decimal)
- except AssertionError:
- raise AssertionError(_build_err_msg())
-
- if isinstance(actual, (ndarray, tuple, list)) \
- or isinstance(desired, (ndarray, tuple, list)):
- return assert_array_almost_equal(actual, desired, decimal, err_msg)
- try:
- # If one of desired/actual is not finite, handle it specially here:
- # check that both are nan if any is a nan, and test for equality
- # otherwise
- if not (gisfinite(desired) and gisfinite(actual)):
- if gisnan(desired) or gisnan(actual):
- if not (gisnan(desired) and gisnan(actual)):
- raise AssertionError(_build_err_msg())
- else:
- if not desired == actual:
- raise AssertionError(_build_err_msg())
- return
- except (NotImplementedError, TypeError):
- pass
- if abs(desired - actual) >= 1.5 * 10.0**(-decimal):
- raise AssertionError(_build_err_msg())
-
-
-def assert_approx_equal(actual,desired,significant=7,err_msg='',verbose=True):
- """
- Raises an AssertionError if two items are not equal up to significant
- digits.
-
- .. note:: It is recommended to use one of `assert_allclose`,
- `assert_array_almost_equal_nulp` or `assert_array_max_ulp`
- instead of this function for more consistent floating point
- comparisons.
-
- Given two numbers, check that they are approximately equal.
- Approximately equal is defined as the number of significant digits
- that agree.
-
- Parameters
- ----------
- actual : scalar
- The object to check.
- desired : scalar
- The expected object.
- significant : int, optional
- Desired precision, default is 7.
- err_msg : str, optional
- The error message to be printed in case of failure.
- verbose : bool, optional
- If True, the conflicting values are appended to the error message.
-
- Raises
- ------
- AssertionError
- If actual and desired are not equal up to specified precision.
-
- See Also
- --------
- assert_allclose: Compare two array_like objects for equality with desired
- relative and/or absolute precision.
- assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal
-
- Examples
- --------
- >>> np.testing.assert_approx_equal(0.12345677777777e-20, 0.1234567e-20)
- >>> np.testing.assert_approx_equal(0.12345670e-20, 0.12345671e-20,
- significant=8)
- >>> np.testing.assert_approx_equal(0.12345670e-20, 0.12345672e-20,
- significant=8)
- ...
- <type 'exceptions.AssertionError'>:
- Items are not equal to 8 significant digits:
- ACTUAL: 1.234567e-021
- DESIRED: 1.2345672000000001e-021
-
- the evaluated condition that raises the exception is
-
- >>> abs(0.12345670e-20/1e-21 - 0.12345672e-20/1e-21) >= 10**-(8-1)
- True
-
- """
- __tracebackhide__ = True # Hide traceback for py.test
- import numpy as np
-
- (actual, desired) = map(float, (actual, desired))
- if desired == actual:
- return
- # Normalized the numbers to be in range (-10.0,10.0)
- # scale = float(pow(10,math.floor(math.log10(0.5*(abs(desired)+abs(actual))))))
- with np.errstate(invalid='ignore'):
- scale = 0.5*(np.abs(desired) + np.abs(actual))
- scale = np.power(10, np.floor(np.log10(scale)))
- try:
- sc_desired = desired/scale
- except ZeroDivisionError:
- sc_desired = 0.0
- try:
- sc_actual = actual/scale
- except ZeroDivisionError:
- sc_actual = 0.0
- msg = build_err_msg([actual, desired], err_msg,
- header='Items are not equal to %d significant digits:' %
- significant,
- verbose=verbose)
- try:
- # If one of desired/actual is not finite, handle it specially here:
- # check that both are nan if any is a nan, and test for equality
- # otherwise
- if not (gisfinite(desired) and gisfinite(actual)):
- if gisnan(desired) or gisnan(actual):
- if not (gisnan(desired) and gisnan(actual)):
- raise AssertionError(msg)
- else:
- if not desired == actual:
- raise AssertionError(msg)
- return
- except (TypeError, NotImplementedError):
- pass
- if np.abs(sc_desired - sc_actual) >= np.power(10., -(significant-1)):
- raise AssertionError(msg)
-
-
-def assert_array_compare(comparison, x, y, err_msg='', verbose=True,
- header='', precision=6, equal_nan=True,
- equal_inf=True):
- __tracebackhide__ = True # Hide traceback for py.test
- from numpy.core import array, isnan, isinf, any, inf
- x = array(x, copy=False, subok=True)
- y = array(y, copy=False, subok=True)
-
- def isnumber(x):
- return x.dtype.char in '?bhilqpBHILQPefdgFDG'
-
- def istime(x):
- return x.dtype.char in "Mm"
-
- def chk_same_position(x_id, y_id, hasval='nan'):
- """Handling nan/inf: check that x and y have the nan/inf at the same
- locations."""
- try:
- assert_array_equal(x_id, y_id)
- except AssertionError:
- msg = build_err_msg([x, y],
- err_msg + '\nx and y %s location mismatch:'
- % (hasval), verbose=verbose, header=header,
- names=('x', 'y'), precision=precision)
- raise AssertionError(msg)
-
- try:
- cond = (x.shape == () or y.shape == ()) or x.shape == y.shape
- if not cond:
- msg = build_err_msg([x, y],
- err_msg
- + '\n(shapes %s, %s mismatch)' % (x.shape,
- y.shape),
- verbose=verbose, header=header,
- names=('x', 'y'), precision=precision)
- raise AssertionError(msg)
-
- if isnumber(x) and isnumber(y):
- has_nan = has_inf = False
- if equal_nan:
- x_isnan, y_isnan = isnan(x), isnan(y)
- # Validate that NaNs are in the same place
- has_nan = any(x_isnan) or any(y_isnan)
- if has_nan:
- chk_same_position(x_isnan, y_isnan, hasval='nan')
-
- if equal_inf:
- x_isinf, y_isinf = isinf(x), isinf(y)
- # Validate that infinite values are in the same place
- has_inf = any(x_isinf) or any(y_isinf)
- if has_inf:
- # Check +inf and -inf separately, since they are different
- chk_same_position(x == +inf, y == +inf, hasval='+inf')
- chk_same_position(x == -inf, y == -inf, hasval='-inf')
-
- if has_nan and has_inf:
- x = x[~(x_isnan | x_isinf)]
- y = y[~(y_isnan | y_isinf)]
- elif has_nan:
- x = x[~x_isnan]
- y = y[~y_isnan]
- elif has_inf:
- x = x[~x_isinf]
- y = y[~y_isinf]
-
- # Only do the comparison if actual values are left
- if x.size == 0:
- return
-
- elif istime(x) and istime(y):
- # If one is datetime64 and the other timedelta64 there is no point
- if equal_nan and x.dtype.type == y.dtype.type:
- x_isnat, y_isnat = isnat(x), isnat(y)
-
- if any(x_isnat) or any(y_isnat):
- chk_same_position(x_isnat, y_isnat, hasval="NaT")
-
- if any(x_isnat) or any(y_isnat):
- x = x[~x_isnat]
- y = y[~y_isnat]
-
- val = comparison(x, y)
-
- if isinstance(val, bool):
- cond = val
- reduced = [0]
- else:
- reduced = val.ravel()
- cond = reduced.all()
- reduced = reduced.tolist()
- if not cond:
- match = 100-100.0*reduced.count(1)/len(reduced)
- msg = build_err_msg([x, y],
- err_msg
- + '\n(mismatch %s%%)' % (match,),
- verbose=verbose, header=header,
- names=('x', 'y'), precision=precision)
- if not cond:
- raise AssertionError(msg)
- except ValueError:
- import traceback
- efmt = traceback.format_exc()
- header = 'error during assertion:\n\n%s\n\n%s' % (efmt, header)
-
- msg = build_err_msg([x, y], err_msg, verbose=verbose, header=header,
- names=('x', 'y'), precision=precision)
- raise ValueError(msg)
-
-
-def assert_array_equal(x, y, err_msg='', verbose=True):
- """
- Raises an AssertionError if two array_like objects are not equal.
-
- Given two array_like objects, check that the shape is equal and all
- elements of these objects are equal. An exception is raised at
- shape mismatch or conflicting values. In contrast to the standard usage
- in numpy, NaNs are compared like numbers, no assertion is raised if
- both objects have NaNs in the same positions.
-
- The usual caution for verifying equality with floating point numbers is
- advised.
-
- Parameters
- ----------
- x : array_like
- The actual object to check.
- y : array_like
- The desired, expected object.
- err_msg : str, optional
- The error message to be printed in case of failure.
- verbose : bool, optional
- If True, the conflicting values are appended to the error message.
-
- Raises
- ------
- AssertionError
- If actual and desired objects are not equal.
-
- See Also
- --------
- assert_allclose: Compare two array_like objects for equality with desired
- relative and/or absolute precision.
- assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal
-
- Examples
- --------
- The first assert does not raise an exception:
-
- >>> np.testing.assert_array_equal([1.0,2.33333,np.nan],
- ... [np.exp(0),2.33333, np.nan])
-
- Assert fails with numerical inprecision with floats:
-
- >>> np.testing.assert_array_equal([1.0,np.pi,np.nan],
- ... [1, np.sqrt(np.pi)**2, np.nan])
- ...
- <type 'exceptions.ValueError'>:
- AssertionError:
- Arrays are not equal
- <BLANKLINE>
- (mismatch 50.0%)
- x: array([ 1. , 3.14159265, NaN])
- y: array([ 1. , 3.14159265, NaN])
-
- Use `assert_allclose` or one of the nulp (number of floating point values)
- functions for these cases instead:
-
- >>> np.testing.assert_allclose([1.0,np.pi,np.nan],
- ... [1, np.sqrt(np.pi)**2, np.nan],
- ... rtol=1e-10, atol=0)
-
- """
- __tracebackhide__ = True # Hide traceback for py.test
- assert_array_compare(operator.__eq__, x, y, err_msg=err_msg,
- verbose=verbose, header='Arrays are not equal')
-
-
-def assert_array_almost_equal(x, y, decimal=6, err_msg='', verbose=True):
- """
- Raises an AssertionError if two objects are not equal up to desired
- precision.
-
- .. note:: It is recommended to use one of `assert_allclose`,
- `assert_array_almost_equal_nulp` or `assert_array_max_ulp`
- instead of this function for more consistent floating point
- comparisons.
-
- The test verifies identical shapes and that the elements of ``actual`` and
- ``desired`` satisfy.
-
- ``abs(desired-actual) < 1.5 * 10**(-decimal)``
-
- That is a looser test than originally documented, but agrees with what the
- actual implementation did up to rounding vagaries. An exception is raised
- at shape mismatch or conflicting values. In contrast to the standard usage
- in numpy, NaNs are compared like numbers, no assertion is raised if both
- objects have NaNs in the same positions.
-
- Parameters
- ----------
- x : array_like
- The actual object to check.
- y : array_like
- The desired, expected object.
- decimal : int, optional
- Desired precision, default is 6.
- err_msg : str, optional
- The error message to be printed in case of failure.
- verbose : bool, optional
- If True, the conflicting values are appended to the error message.
-
- Raises
- ------
- AssertionError
- If actual and desired are not equal up to specified precision.
-
- See Also
- --------
- assert_allclose: Compare two array_like objects for equality with desired
- relative and/or absolute precision.
- assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal
-
- Examples
- --------
- the first assert does not raise an exception
-
- >>> np.testing.assert_array_almost_equal([1.0,2.333,np.nan],
- [1.0,2.333,np.nan])
-
- >>> np.testing.assert_array_almost_equal([1.0,2.33333,np.nan],
- ... [1.0,2.33339,np.nan], decimal=5)
- ...
- <type 'exceptions.AssertionError'>:
- AssertionError:
- Arrays are not almost equal
- <BLANKLINE>
- (mismatch 50.0%)
- x: array([ 1. , 2.33333, NaN])
- y: array([ 1. , 2.33339, NaN])
-
- >>> np.testing.assert_array_almost_equal([1.0,2.33333,np.nan],
- ... [1.0,2.33333, 5], decimal=5)
- <type 'exceptions.ValueError'>:
- ValueError:
- Arrays are not almost equal
- x: array([ 1. , 2.33333, NaN])
- y: array([ 1. , 2.33333, 5. ])
-
- """
- __tracebackhide__ = True # Hide traceback for py.test
- from numpy.core import around, number, float_, result_type, array
- from numpy.core.numerictypes import issubdtype
- from numpy.core.fromnumeric import any as npany
-
- def compare(x, y):
- try:
- if npany(gisinf(x)) or npany( gisinf(y)):
- xinfid = gisinf(x)
- yinfid = gisinf(y)
- if not (xinfid == yinfid).all():
- return False
- # if one item, x and y is +- inf
- if x.size == y.size == 1:
- return x == y
- x = x[~xinfid]
- y = y[~yinfid]
- except (TypeError, NotImplementedError):
- pass
-
- # make sure y is an inexact type to avoid abs(MIN_INT); will cause
- # casting of x later.
- dtype = result_type(y, 1.)
- y = array(y, dtype=dtype, copy=False, subok=True)
- z = abs(x - y)
-
- if not issubdtype(z.dtype, number):
- z = z.astype(float_) # handle object arrays
-
- return z < 1.5 * 10.0**(-decimal)
-
- assert_array_compare(compare, x, y, err_msg=err_msg, verbose=verbose,
- header=('Arrays are not almost equal to %d decimals' % decimal),
- precision=decimal)
-
-
-def assert_array_less(x, y, err_msg='', verbose=True):
- """
- Raises an AssertionError if two array_like objects are not ordered by less
- than.
-
- Given two array_like objects, check that the shape is equal and all
- elements of the first object are strictly smaller than those of the
- second object. An exception is raised at shape mismatch or incorrectly
- ordered values. Shape mismatch does not raise if an object has zero
- dimension. In contrast to the standard usage in numpy, NaNs are
- compared, no assertion is raised if both objects have NaNs in the same
- positions.
-
-
-
- Parameters
- ----------
- x : array_like
- The smaller object to check.
- y : array_like
- The larger object to compare.
- err_msg : string
- The error message to be printed in case of failure.
- verbose : bool
- If True, the conflicting values are appended to the error message.
-
- Raises
- ------
- AssertionError
- If actual and desired objects are not equal.
-
- See Also
- --------
- assert_array_equal: tests objects for equality
- assert_array_almost_equal: test objects for equality up to precision
-
-
-
- Examples
- --------
- >>> np.testing.assert_array_less([1.0, 1.0, np.nan], [1.1, 2.0, np.nan])
- >>> np.testing.assert_array_less([1.0, 1.0, np.nan], [1, 2.0, np.nan])
- ...
- <type 'exceptions.ValueError'>:
- Arrays are not less-ordered
- (mismatch 50.0%)
- x: array([ 1., 1., NaN])
- y: array([ 1., 2., NaN])
-
- >>> np.testing.assert_array_less([1.0, 4.0], 3)
- ...
- <type 'exceptions.ValueError'>:
- Arrays are not less-ordered
- (mismatch 50.0%)
- x: array([ 1., 4.])
- y: array(3)
-
- >>> np.testing.assert_array_less([1.0, 2.0, 3.0], [4])
- ...
- <type 'exceptions.ValueError'>:
- Arrays are not less-ordered
- (shapes (3,), (1,) mismatch)
- x: array([ 1., 2., 3.])
- y: array([4])
-
- """
- __tracebackhide__ = True # Hide traceback for py.test
- assert_array_compare(operator.__lt__, x, y, err_msg=err_msg,
- verbose=verbose,
- header='Arrays are not less-ordered',
- equal_inf=False)
-
-
-def runstring(astr, dict):
- exec(astr, dict)
-
-
-def assert_string_equal(actual, desired):
- """
- Test if two strings are equal.
-
- If the given strings are equal, `assert_string_equal` does nothing.
- If they are not equal, an AssertionError is raised, and the diff
- between the strings is shown.
-
- Parameters
- ----------
- actual : str
- The string to test for equality against the expected string.
- desired : str
- The expected string.
-
- Examples
- --------
- >>> np.testing.assert_string_equal('abc', 'abc')
- >>> np.testing.assert_string_equal('abc', 'abcd')
- Traceback (most recent call last):
- File "<stdin>", line 1, in <module>
- ...
- AssertionError: Differences in strings:
- - abc+ abcd? +
-
- """
- # delay import of difflib to reduce startup time
- __tracebackhide__ = True # Hide traceback for py.test
- import difflib
-
- if not isinstance(actual, str):
- raise AssertionError(repr(type(actual)))
- if not isinstance(desired, str):
- raise AssertionError(repr(type(desired)))
- if re.match(r'\A'+desired+r'\Z', actual, re.M):
- return
-
- diff = list(difflib.Differ().compare(actual.splitlines(1), desired.splitlines(1)))
- diff_list = []
- while diff:
- d1 = diff.pop(0)
- if d1.startswith(' '):
- continue
- if d1.startswith('- '):
- l = [d1]
- d2 = diff.pop(0)
- if d2.startswith('? '):
- l.append(d2)
- d2 = diff.pop(0)
- if not d2.startswith('+ '):
- raise AssertionError(repr(d2))
- l.append(d2)
- if diff:
- d3 = diff.pop(0)
- if d3.startswith('? '):
- l.append(d3)
- else:
- diff.insert(0, d3)
- if re.match(r'\A'+d2[2:]+r'\Z', d1[2:]):
- continue
- diff_list.extend(l)
- continue
- raise AssertionError(repr(d1))
- if not diff_list:
- return
- msg = 'Differences in strings:\n%s' % (''.join(diff_list)).rstrip()
- if actual != desired:
- raise AssertionError(msg)
-
-
-def rundocs(filename=None, raise_on_error=True):
- """
- Run doctests found in the given file.
-
- By default `rundocs` raises an AssertionError on failure.
-
- Parameters
- ----------
- filename : str
- The path to the file for which the doctests are run.
- raise_on_error : bool
- Whether to raise an AssertionError when a doctest fails. Default is
- True.
-
- Notes
- -----
- The doctests can be run by the user/developer by adding the ``doctests``
- argument to the ``test()`` call. For example, to run all tests (including
- doctests) for `numpy.lib`:
-
- >>> np.lib.test(doctests=True) #doctest: +SKIP
- """
- from numpy.compat import npy_load_module
- import doctest
- if filename is None:
- f = sys._getframe(1)
- filename = f.f_globals['__file__']
- name = os.path.splitext(os.path.basename(filename))[0]
- m = npy_load_module(name, filename)
-
- tests = doctest.DocTestFinder().find(m)
- runner = doctest.DocTestRunner(verbose=False)
-
- msg = []
- if raise_on_error:
- out = lambda s: msg.append(s)
- else:
- out = None
-
- for test in tests:
- runner.run(test, out=out)
-
- if runner.failures > 0 and raise_on_error:
- raise AssertionError("Some doctests failed:\n%s" % "\n".join(msg))
-
-
-def raises(*args,**kwargs):
- nose = import_nose()
- return nose.tools.raises(*args,**kwargs)
-
-
-def assert_raises(*args, **kwargs):
- """
- assert_raises(exception_class, callable, *args, **kwargs)
- assert_raises(exception_class)
-
- Fail unless an exception of class exception_class is thrown
- by callable when invoked with arguments args and keyword
- arguments kwargs. If a different type of exception is
- thrown, it will not be caught, and the test case will be
- deemed to have suffered an error, exactly as for an
- unexpected exception.
-
- Alternatively, `assert_raises` can be used as a context manager:
-
- >>> from numpy.testing import assert_raises
- >>> with assert_raises(ZeroDivisionError):
- ... 1 / 0
-
- is equivalent to
-
- >>> def div(x, y):
- ... return x / y
- >>> assert_raises(ZeroDivisionError, div, 1, 0)
-
- """
- __tracebackhide__ = True # Hide traceback for py.test
- nose = import_nose()
- return nose.tools.assert_raises(*args,**kwargs)
-
-
-def assert_raises_regex(exception_class, expected_regexp, *args, **kwargs):
- """
- assert_raises_regex(exception_class, expected_regexp, callable, *args,
- **kwargs)
- assert_raises_regex(exception_class, expected_regexp)
-
- Fail unless an exception of class exception_class and with message that
- matches expected_regexp is thrown by callable when invoked with arguments
- args and keyword arguments kwargs.
-
- Alternatively, can be used as a context manager like `assert_raises`.
-
- Name of this function adheres to Python 3.2+ reference, but should work in
- all versions down to 2.6.
-
- Notes
- -----
- .. versionadded:: 1.9.0
-
- """
- __tracebackhide__ = True # Hide traceback for py.test
- nose = import_nose()
-
- if sys.version_info.major >= 3:
- funcname = nose.tools.assert_raises_regex
- else:
- # Only present in Python 2.7, missing from unittest in 2.6
- funcname = nose.tools.assert_raises_regexp
-
- return funcname(exception_class, expected_regexp, *args, **kwargs)
-
-
-def decorate_methods(cls, decorator, testmatch=None):
- """
- Apply a decorator to all methods in a class matching a regular expression.
-
- The given decorator is applied to all public methods of `cls` that are
- matched by the regular expression `testmatch`
- (``testmatch.search(methodname)``). Methods that are private, i.e. start
- with an underscore, are ignored.
-
- Parameters
- ----------
- cls : class
- Class whose methods to decorate.
- decorator : function
- Decorator to apply to methods
- testmatch : compiled regexp or str, optional
- The regular expression. Default value is None, in which case the
- nose default (``re.compile(r'(?:^|[\\b_\\.%s-])[Tt]est' % os.sep)``)
- is used.
- If `testmatch` is a string, it is compiled to a regular expression
- first.
-
- """
- if testmatch is None:
- testmatch = re.compile(r'(?:^|[\\b_\\.%s-])[Tt]est' % os.sep)
- else:
- testmatch = re.compile(testmatch)
- cls_attr = cls.__dict__
-
- # delayed import to reduce startup time
- from inspect import isfunction
-
- methods = [_m for _m in cls_attr.values() if isfunction(_m)]
- for function in methods:
- try:
- if hasattr(function, 'compat_func_name'):
- funcname = function.compat_func_name
- else:
- funcname = function.__name__
- except AttributeError:
- # not a function
- continue
- if testmatch.search(funcname) and not funcname.startswith('_'):
- setattr(cls, funcname, decorator(function))
- return
-
-
-def measure(code_str,times=1,label=None):
- """
- Return elapsed time for executing code in the namespace of the caller.
-
- The supplied code string is compiled with the Python builtin ``compile``.
- The precision of the timing is 10 milli-seconds. If the code will execute
- fast on this timescale, it can be executed many times to get reasonable
- timing accuracy.
-
- Parameters
- ----------
- code_str : str
- The code to be timed.
- times : int, optional
- The number of times the code is executed. Default is 1. The code is
- only compiled once.
- label : str, optional
- A label to identify `code_str` with. This is passed into ``compile``
- as the second argument (for run-time error messages).
-
- Returns
- -------
- elapsed : float
- Total elapsed time in seconds for executing `code_str` `times` times.
-
- Examples
- --------
- >>> etime = np.testing.measure('for i in range(1000): np.sqrt(i**2)',
- ... times=times)
- >>> print("Time for a single execution : ", etime / times, "s")
- Time for a single execution : 0.005 s
-
- """
- frame = sys._getframe(1)
- locs, globs = frame.f_locals, frame.f_globals
-
- code = compile(code_str,
- 'Test name: %s ' % label,
- 'exec')
- i = 0
- elapsed = jiffies()
- while i < times:
- i += 1
- exec(code, globs, locs)
- elapsed = jiffies() - elapsed
- return 0.01*elapsed
-
-
-def _assert_valid_refcount(op):
- """
- Check that ufuncs don't mishandle refcount of object `1`.
- Used in a few regression tests.
- """
- if not HAS_REFCOUNT:
- return True
- import numpy as np
-
- b = np.arange(100*100).reshape(100, 100)
- c = b
- i = 1
-
- rc = sys.getrefcount(i)
- for j in range(15):
- d = op(b, c)
- assert_(sys.getrefcount(i) >= rc)
- del d # for pyflakes
-
-
-def assert_allclose(actual, desired, rtol=1e-7, atol=0, equal_nan=True,
- err_msg='', verbose=True):
- """
- Raises an AssertionError if two objects are not equal up to desired
- tolerance.
-
- The test is equivalent to ``allclose(actual, desired, rtol, atol)``.
- It compares the difference between `actual` and `desired` to
- ``atol + rtol * abs(desired)``.
-
- .. versionadded:: 1.5.0
-
- Parameters
- ----------
- actual : array_like
- Array obtained.
- desired : array_like
- Array desired.
- rtol : float, optional
- Relative tolerance.
- atol : float, optional
- Absolute tolerance.
- equal_nan : bool, optional.
- If True, NaNs will compare equal.
- err_msg : str, optional
- The error message to be printed in case of failure.
- verbose : bool, optional
- If True, the conflicting values are appended to the error message.
-
- Raises
- ------
- AssertionError
- If actual and desired are not equal up to specified precision.
-
- See Also
- --------
- assert_array_almost_equal_nulp, assert_array_max_ulp
-
- Examples
- --------
- >>> x = [1e-5, 1e-3, 1e-1]
- >>> y = np.arccos(np.cos(x))
- >>> assert_allclose(x, y, rtol=1e-5, atol=0)
-
- """
- __tracebackhide__ = True # Hide traceback for py.test
- import numpy as np
-
- def compare(x, y):
- return np.core.numeric.isclose(x, y, rtol=rtol, atol=atol,
- equal_nan=equal_nan)
-
- actual, desired = np.asanyarray(actual), np.asanyarray(desired)
- header = 'Not equal to tolerance rtol=%g, atol=%g' % (rtol, atol)
- assert_array_compare(compare, actual, desired, err_msg=str(err_msg),
- verbose=verbose, header=header, equal_nan=equal_nan)
-
-
-def assert_array_almost_equal_nulp(x, y, nulp=1):
- """
- Compare two arrays relatively to their spacing.
-
- This is a relatively robust method to compare two arrays whose amplitude
- is variable.
-
- Parameters
- ----------
- x, y : array_like
- Input arrays.
- nulp : int, optional
- The maximum number of unit in the last place for tolerance (see Notes).
- Default is 1.
-
- Returns
- -------
- None
-
- Raises
- ------
- AssertionError
- If the spacing between `x` and `y` for one or more elements is larger
- than `nulp`.
-
- See Also
- --------
- assert_array_max_ulp : Check that all items of arrays differ in at most
- N Units in the Last Place.
- spacing : Return the distance between x and the nearest adjacent number.
-
- Notes
- -----
- An assertion is raised if the following condition is not met::
-
- abs(x - y) <= nulps * spacing(maximum(abs(x), abs(y)))
-
- Examples
- --------
- >>> x = np.array([1., 1e-10, 1e-20])
- >>> eps = np.finfo(x.dtype).eps
- >>> np.testing.assert_array_almost_equal_nulp(x, x*eps/2 + x)
-
- >>> np.testing.assert_array_almost_equal_nulp(x, x*eps + x)
- Traceback (most recent call last):
- ...
- AssertionError: X and Y are not equal to 1 ULP (max is 2)
-
- """
- __tracebackhide__ = True # Hide traceback for py.test
- import numpy as np
- ax = np.abs(x)
- ay = np.abs(y)
- ref = nulp * np.spacing(np.where(ax > ay, ax, ay))
- if not np.all(np.abs(x-y) <= ref):
- if np.iscomplexobj(x) or np.iscomplexobj(y):
- msg = "X and Y are not equal to %d ULP" % nulp
- else:
- max_nulp = np.max(nulp_diff(x, y))
- msg = "X and Y are not equal to %d ULP (max is %g)" % (nulp, max_nulp)
- raise AssertionError(msg)
-
-
-def assert_array_max_ulp(a, b, maxulp=1, dtype=None):
- """
- Check that all items of arrays differ in at most N Units in the Last Place.
-
- Parameters
- ----------
- a, b : array_like
- Input arrays to be compared.
- maxulp : int, optional
- The maximum number of units in the last place that elements of `a` and
- `b` can differ. Default is 1.
- dtype : dtype, optional
- Data-type to convert `a` and `b` to if given. Default is None.
-
- Returns
- -------
- ret : ndarray
- Array containing number of representable floating point numbers between
- items in `a` and `b`.
-
- Raises
- ------
- AssertionError
- If one or more elements differ by more than `maxulp`.
-
- See Also
- --------
- assert_array_almost_equal_nulp : Compare two arrays relatively to their
- spacing.
-
- Examples
- --------
- >>> a = np.linspace(0., 1., 100)
- >>> res = np.testing.assert_array_max_ulp(a, np.arcsin(np.sin(a)))
-
- """
- __tracebackhide__ = True # Hide traceback for py.test
- import numpy as np
- ret = nulp_diff(a, b, dtype)
- if not np.all(ret <= maxulp):
- raise AssertionError("Arrays are not almost equal up to %g ULP" %
- maxulp)
- return ret
-
-
-def nulp_diff(x, y, dtype=None):
- """For each item in x and y, return the number of representable floating
- points between them.
-
- Parameters
- ----------
- x : array_like
- first input array
- y : array_like
- second input array
- dtype : dtype, optional
- Data-type to convert `x` and `y` to if given. Default is None.
-
- Returns
- -------
- nulp : array_like
- number of representable floating point numbers between each item in x
- and y.
-
- Examples
- --------
- # By definition, epsilon is the smallest number such as 1 + eps != 1, so
- # there should be exactly one ULP between 1 and 1 + eps
- >>> nulp_diff(1, 1 + np.finfo(x.dtype).eps)
- 1.0
- """
- import numpy as np
- if dtype:
- x = np.array(x, dtype=dtype)
- y = np.array(y, dtype=dtype)
- else:
- x = np.array(x)
- y = np.array(y)
-
- t = np.common_type(x, y)
- if np.iscomplexobj(x) or np.iscomplexobj(y):
- raise NotImplementedError("_nulp not implemented for complex array")
-
- x = np.array(x, dtype=t)
- y = np.array(y, dtype=t)
-
- if not x.shape == y.shape:
- raise ValueError("x and y do not have the same shape: %s - %s" %
- (x.shape, y.shape))
-
- def _diff(rx, ry, vdt):
- diff = np.array(rx-ry, dtype=vdt)
- return np.abs(diff)
-
- rx = integer_repr(x)
- ry = integer_repr(y)
- return _diff(rx, ry, t)
-
-
-def _integer_repr(x, vdt, comp):
- # Reinterpret binary representation of the float as sign-magnitude:
- # take into account two-complement representation
- # See also
- # http://www.cygnus-software.com/papers/comparingfloats/comparingfloats.htm
- rx = x.view(vdt)
- if not (rx.size == 1):
- rx[rx < 0] = comp - rx[rx < 0]
- else:
- if rx < 0:
- rx = comp - rx
-
- return rx
-
-
-def integer_repr(x):
- """Return the signed-magnitude interpretation of the binary representation of
- x."""
- import numpy as np
- if x.dtype == np.float32:
- return _integer_repr(x, np.int32, np.int32(-2**31))
- elif x.dtype == np.float64:
- return _integer_repr(x, np.int64, np.int64(-2**63))
- else:
- raise ValueError("Unsupported dtype %s" % x.dtype)
-
-
-# The following two classes are copied from python 2.6 warnings module (context
-# manager)
-class WarningMessage(object):
-
- """
- Holds the result of a single showwarning() call.
-
- Deprecated in 1.8.0
-
- Notes
- -----
- `WarningMessage` is copied from the Python 2.6 warnings module,
- so it can be used in NumPy with older Python versions.
-
- """
-
- _WARNING_DETAILS = ("message", "category", "filename", "lineno", "file",
- "line")
-
- def __init__(self, message, category, filename, lineno, file=None,
- line=None):
- local_values = locals()
- for attr in self._WARNING_DETAILS:
- setattr(self, attr, local_values[attr])
- if category:
- self._category_name = category.__name__
- else:
- self._category_name = None
-
- def __str__(self):
- return ("{message : %r, category : %r, filename : %r, lineno : %s, "
- "line : %r}" % (self.message, self._category_name,
- self.filename, self.lineno, self.line))
-
-
-class WarningManager(object):
- """
- A context manager that copies and restores the warnings filter upon
- exiting the context.
-
- The 'record' argument specifies whether warnings should be captured by a
- custom implementation of ``warnings.showwarning()`` and be appended to a
- list returned by the context manager. Otherwise None is returned by the
- context manager. The objects appended to the list are arguments whose
- attributes mirror the arguments to ``showwarning()``.
-
- The 'module' argument is to specify an alternative module to the module
- named 'warnings' and imported under that name. This argument is only useful
- when testing the warnings module itself.
-
- Deprecated in 1.8.0
-
- Notes
- -----
- `WarningManager` is a copy of the ``catch_warnings`` context manager
- from the Python 2.6 warnings module, with slight modifications.
- It is copied so it can be used in NumPy with older Python versions.
-
- """
-
- def __init__(self, record=False, module=None):
- self._record = record
- if module is None:
- self._module = sys.modules['warnings']
- else:
- self._module = module
- self._entered = False
-
- def __enter__(self):
- if self._entered:
- raise RuntimeError("Cannot enter %r twice" % self)
- self._entered = True
- self._filters = self._module.filters
- self._module.filters = self._filters[:]
- self._showwarning = self._module.showwarning
- if self._record:
- log = []
-
- def showwarning(*args, **kwargs):
- log.append(WarningMessage(*args, **kwargs))
- self._module.showwarning = showwarning
- return log
- else:
- return None
-
- def __exit__(self):
- if not self._entered:
- raise RuntimeError("Cannot exit %r without entering first" % self)
- self._module.filters = self._filters
- self._module.showwarning = self._showwarning
-
-
-@contextlib.contextmanager
-def _assert_warns_context(warning_class, name=None):
- __tracebackhide__ = True # Hide traceback for py.test
- with suppress_warnings() as sup:
- l = sup.record(warning_class)
- yield
- if not len(l) > 0:
- name_str = " when calling %s" % name if name is not None else ""
- raise AssertionError("No warning raised" + name_str)
-
-
-def assert_warns(warning_class, *args, **kwargs):
- """
- Fail unless the given callable throws the specified warning.
-
- A warning of class warning_class should be thrown by the callable when
- invoked with arguments args and keyword arguments kwargs.
- If a different type of warning is thrown, it will not be caught.
-
- If called with all arguments other than the warning class omitted, may be
- used as a context manager:
-
- with assert_warns(SomeWarning):
- do_something()
-
- The ability to be used as a context manager is new in NumPy v1.11.0.
-
- .. versionadded:: 1.4.0
-
- Parameters
- ----------
- warning_class : class
- The class defining the warning that `func` is expected to throw.
- func : callable
- The callable to test.
- \\*args : Arguments
- Arguments passed to `func`.
- \\*\\*kwargs : Kwargs
- Keyword arguments passed to `func`.
-
- Returns
- -------
- The value returned by `func`.
-
- """
- if not args:
- return _assert_warns_context(warning_class)
-
- func = args[0]
- args = args[1:]
- with _assert_warns_context(warning_class, name=func.__name__):
- return func(*args, **kwargs)
-
-
-@contextlib.contextmanager
-def _assert_no_warnings_context(name=None):
- __tracebackhide__ = True # Hide traceback for py.test
- with warnings.catch_warnings(record=True) as l:
- warnings.simplefilter('always')
- yield
- if len(l) > 0:
- name_str = " when calling %s" % name if name is not None else ""
- raise AssertionError("Got warnings%s: %s" % (name_str, l))
-
-
-def assert_no_warnings(*args, **kwargs):
- """
- Fail if the given callable produces any warnings.
-
- If called with all arguments omitted, may be used as a context manager:
-
- with assert_no_warnings():
- do_something()
-
- The ability to be used as a context manager is new in NumPy v1.11.0.
-
- .. versionadded:: 1.7.0
-
- Parameters
- ----------
- func : callable
- The callable to test.
- \\*args : Arguments
- Arguments passed to `func`.
- \\*\\*kwargs : Kwargs
- Keyword arguments passed to `func`.
-
- Returns
- -------
- The value returned by `func`.
-
- """
- if not args:
- return _assert_no_warnings_context()
-
- func = args[0]
- args = args[1:]
- with _assert_no_warnings_context(name=func.__name__):
- return func(*args, **kwargs)
-
-
-def _gen_alignment_data(dtype=float32, type='binary', max_size=24):
- """
- generator producing data with different alignment and offsets
- to test simd vectorization
-
- Parameters
- ----------
- dtype : dtype
- data type to produce
- type : string
- 'unary': create data for unary operations, creates one input
- and output array
- 'binary': create data for unary operations, creates two input
- and output array
- max_size : integer
- maximum size of data to produce
-
- Returns
- -------
- if type is 'unary' yields one output, one input array and a message
- containing information on the data
- if type is 'binary' yields one output array, two input array and a message
- containing information on the data
-
- """
- ufmt = 'unary offset=(%d, %d), size=%d, dtype=%r, %s'
- bfmt = 'binary offset=(%d, %d, %d), size=%d, dtype=%r, %s'
- for o in range(3):
- for s in range(o + 2, max(o + 3, max_size)):
- if type == 'unary':
- inp = lambda: arange(s, dtype=dtype)[o:]
- out = empty((s,), dtype=dtype)[o:]
- yield out, inp(), ufmt % (o, o, s, dtype, 'out of place')
- d = inp()
- yield d, d, ufmt % (o, o, s, dtype, 'in place')
- yield out[1:], inp()[:-1], ufmt % \
- (o + 1, o, s - 1, dtype, 'out of place')
- yield out[:-1], inp()[1:], ufmt % \
- (o, o + 1, s - 1, dtype, 'out of place')
- yield inp()[:-1], inp()[1:], ufmt % \
- (o, o + 1, s - 1, dtype, 'aliased')
- yield inp()[1:], inp()[:-1], ufmt % \
- (o + 1, o, s - 1, dtype, 'aliased')
- if type == 'binary':
- inp1 = lambda: arange(s, dtype=dtype)[o:]
- inp2 = lambda: arange(s, dtype=dtype)[o:]
- out = empty((s,), dtype=dtype)[o:]
- yield out, inp1(), inp2(), bfmt % \
- (o, o, o, s, dtype, 'out of place')
- d = inp1()
- yield d, d, inp2(), bfmt % \
- (o, o, o, s, dtype, 'in place1')
- d = inp2()
- yield d, inp1(), d, bfmt % \
- (o, o, o, s, dtype, 'in place2')
- yield out[1:], inp1()[:-1], inp2()[:-1], bfmt % \
- (o + 1, o, o, s - 1, dtype, 'out of place')
- yield out[:-1], inp1()[1:], inp2()[:-1], bfmt % \
- (o, o + 1, o, s - 1, dtype, 'out of place')
- yield out[:-1], inp1()[:-1], inp2()[1:], bfmt % \
- (o, o, o + 1, s - 1, dtype, 'out of place')
- yield inp1()[1:], inp1()[:-1], inp2()[:-1], bfmt % \
- (o + 1, o, o, s - 1, dtype, 'aliased')
- yield inp1()[:-1], inp1()[1:], inp2()[:-1], bfmt % \
- (o, o + 1, o, s - 1, dtype, 'aliased')
- yield inp1()[:-1], inp1()[:-1], inp2()[1:], bfmt % \
- (o, o, o + 1, s - 1, dtype, 'aliased')
-
-
-class IgnoreException(Exception):
- "Ignoring this exception due to disabled feature"
-
-
-@contextlib.contextmanager
-def tempdir(*args, **kwargs):
- """Context manager to provide a temporary test folder.
-
- All arguments are passed as this to the underlying tempfile.mkdtemp
- function.
-
- """
- tmpdir = mkdtemp(*args, **kwargs)
- try:
- yield tmpdir
- finally:
- shutil.rmtree(tmpdir)
-
-
-@contextlib.contextmanager
-def temppath(*args, **kwargs):
- """Context manager for temporary files.
-
- Context manager that returns the path to a closed temporary file. Its
- parameters are the same as for tempfile.mkstemp and are passed directly
- to that function. The underlying file is removed when the context is
- exited, so it should be closed at that time.
-
- Windows does not allow a temporary file to be opened if it is already
- open, so the underlying file must be closed after opening before it
- can be opened again.
-
- """
- fd, path = mkstemp(*args, **kwargs)
- os.close(fd)
- try:
- yield path
- finally:
- os.remove(path)
-
-
-class clear_and_catch_warnings(warnings.catch_warnings):
- """ Context manager that resets warning registry for catching warnings
-
- Warnings can be slippery, because, whenever a warning is triggered, Python
- adds a ``__warningregistry__`` member to the *calling* module. This makes
- it impossible to retrigger the warning in this module, whatever you put in
- the warnings filters. This context manager accepts a sequence of `modules`
- as a keyword argument to its constructor and:
-
- * stores and removes any ``__warningregistry__`` entries in given `modules`
- on entry;
- * resets ``__warningregistry__`` to its previous state on exit.
-
- This makes it possible to trigger any warning afresh inside the context
- manager without disturbing the state of warnings outside.
-
- For compatibility with Python 3.0, please consider all arguments to be
- keyword-only.
-
- Parameters
- ----------
- record : bool, optional
- Specifies whether warnings should be captured by a custom
- implementation of ``warnings.showwarning()`` and be appended to a list
- returned by the context manager. Otherwise None is returned by the
- context manager. The objects appended to the list are arguments whose
- attributes mirror the arguments to ``showwarning()``.
- modules : sequence, optional
- Sequence of modules for which to reset warnings registry on entry and
- restore on exit. To work correctly, all 'ignore' filters should
- filter by one of these modules.
-
- Examples
- --------
- >>> import warnings
- >>> with clear_and_catch_warnings(modules=[np.core.fromnumeric]):
- ... warnings.simplefilter('always')
- ... warnings.filterwarnings('ignore', module='np.core.fromnumeric')
- ... # do something that raises a warning but ignore those in
- ... # np.core.fromnumeric
- """
- class_modules = ()
-
- def __init__(self, record=False, modules=()):
- self.modules = set(modules).union(self.class_modules)
- self._warnreg_copies = {}
- super(clear_and_catch_warnings, self).__init__(record=record)
-
- def __enter__(self):
- for mod in self.modules:
- if hasattr(mod, '__warningregistry__'):
- mod_reg = mod.__warningregistry__
- self._warnreg_copies[mod] = mod_reg.copy()
- mod_reg.clear()
- return super(clear_and_catch_warnings, self).__enter__()
-
- def __exit__(self, *exc_info):
- super(clear_and_catch_warnings, self).__exit__(*exc_info)
- for mod in self.modules:
- if hasattr(mod, '__warningregistry__'):
- mod.__warningregistry__.clear()
- if mod in self._warnreg_copies:
- mod.__warningregistry__.update(self._warnreg_copies[mod])
-
-
-class suppress_warnings(object):
- """
- Context manager and decorator doing much the same as
- ``warnings.catch_warnings``.
-
- However, it also provides a filter mechanism to work around
- http://bugs.python.org/issue4180.
-
- This bug causes Python before 3.4 to not reliably show warnings again
- after they have been ignored once (even within catch_warnings). It
- means that no "ignore" filter can be used easily, since following
- tests might need to see the warning. Additionally it allows easier
- specificity for testing warnings and can be nested.
-
- Parameters
- ----------
- forwarding_rule : str, optional
- One of "always", "once", "module", or "location". Analogous to
- the usual warnings module filter mode, it is useful to reduce
- noise mostly on the outmost level. Unsuppressed and unrecorded
- warnings will be forwarded based on this rule. Defaults to "always".
- "location" is equivalent to the warnings "default", match by exact
- location the warning warning originated from.
-
- Notes
- -----
- Filters added inside the context manager will be discarded again
- when leaving it. Upon entering all filters defined outside a
- context will be applied automatically.
-
- When a recording filter is added, matching warnings are stored in the
- ``log`` attribute as well as in the list returned by ``record``.
-
- If filters are added and the ``module`` keyword is given, the
- warning registry of this module will additionally be cleared when
- applying it, entering the context, or exiting it. This could cause
- warnings to appear a second time after leaving the context if they
- were configured to be printed once (default) and were already
- printed before the context was entered.
-
- Nesting this context manager will work as expected when the
- forwarding rule is "always" (default). Unfiltered and unrecorded
- warnings will be passed out and be matched by the outer level.
- On the outmost level they will be printed (or caught by another
- warnings context). The forwarding rule argument can modify this
- behaviour.
-
- Like ``catch_warnings`` this context manager is not threadsafe.
-
- Examples
- --------
- >>> with suppress_warnings() as sup:
- ... sup.filter(DeprecationWarning, "Some text")
- ... sup.filter(module=np.ma.core)
- ... log = sup.record(FutureWarning, "Does this occur?")
- ... command_giving_warnings()
- ... # The FutureWarning was given once, the filtered warnings were
- ... # ignored. All other warnings abide outside settings (may be
- ... # printed/error)
- ... assert_(len(log) == 1)
- ... assert_(len(sup.log) == 1) # also stored in log attribute
-
- Or as a decorator:
-
- >>> sup = suppress_warnings()
- >>> sup.filter(module=np.ma.core) # module must match exact
- >>> @sup
- >>> def some_function():
- ... # do something which causes a warning in np.ma.core
- ... pass
- """
- def __init__(self, forwarding_rule="always"):
- self._entered = False
-
- # Suppressions are either instance or defined inside one with block:
- self._suppressions = []
-
- if forwarding_rule not in {"always", "module", "once", "location"}:
- raise ValueError("unsupported forwarding rule.")
- self._forwarding_rule = forwarding_rule
-
- def _clear_registries(self):
- if hasattr(warnings, "_filters_mutated"):
- # clearing the registry should not be necessary on new pythons,
- # instead the filters should be mutated.
- warnings._filters_mutated()
- return
- # Simply clear the registry, this should normally be harmless,
- # note that on new pythons it would be invalidated anyway.
- for module in self._tmp_modules:
- if hasattr(module, "__warningregistry__"):
- module.__warningregistry__.clear()
-
- def _filter(self, category=Warning, message="", module=None, record=False):
- if record:
- record = [] # The log where to store warnings
- else:
- record = None
- if self._entered:
- if module is None:
- warnings.filterwarnings(
- "always", category=category, message=message)
- else:
- module_regex = module.__name__.replace('.', r'\.') + '$'
- warnings.filterwarnings(
- "always", category=category, message=message,
- module=module_regex)
- self._tmp_modules.add(module)
- self._clear_registries()
-
- self._tmp_suppressions.append(
- (category, message, re.compile(message, re.I), module, record))
- else:
- self._suppressions.append(
- (category, message, re.compile(message, re.I), module, record))
-
- return record
-
- def filter(self, category=Warning, message="", module=None):
- """
- Add a new suppressing filter or apply it if the state is entered.
-
- Parameters
- ----------
- category : class, optional
- Warning class to filter
- message : string, optional
- Regular expression matching the warning message.
- module : module, optional
- Module to filter for. Note that the module (and its file)
- must match exactly and cannot be a submodule. This may make
- it unreliable for external modules.
-
- Notes
- -----
- When added within a context, filters are only added inside
- the context and will be forgotten when the context is exited.
- """
- self._filter(category=category, message=message, module=module,
- record=False)
-
- def record(self, category=Warning, message="", module=None):
- """
- Append a new recording filter or apply it if the state is entered.
-
- All warnings matching will be appended to the ``log`` attribute.
-
- Parameters
- ----------
- category : class, optional
- Warning class to filter
- message : string, optional
- Regular expression matching the warning message.
- module : module, optional
- Module to filter for. Note that the module (and its file)
- must match exactly and cannot be a submodule. This may make
- it unreliable for external modules.
-
- Returns
- -------
- log : list
- A list which will be filled with all matched warnings.
-
- Notes
- -----
- When added within a context, filters are only added inside
- the context and will be forgotten when the context is exited.
- """
- return self._filter(category=category, message=message, module=module,
- record=True)
-
- def __enter__(self):
- if self._entered:
- raise RuntimeError("cannot enter suppress_warnings twice.")
-
- self._orig_show = warnings.showwarning
- self._filters = warnings.filters
- warnings.filters = self._filters[:]
-
- self._entered = True
- self._tmp_suppressions = []
- self._tmp_modules = set()
- self._forwarded = set()
-
- self.log = [] # reset global log (no need to keep same list)
-
- for cat, mess, _, mod, log in self._suppressions:
- if log is not None:
- del log[:] # clear the log
- if mod is None:
- warnings.filterwarnings(
- "always", category=cat, message=mess)
- else:
- module_regex = mod.__name__.replace('.', r'\.') + '$'
- warnings.filterwarnings(
- "always", category=cat, message=mess,
- module=module_regex)
- self._tmp_modules.add(mod)
- warnings.showwarning = self._showwarning
- self._clear_registries()
-
- return self
-
- def __exit__(self, *exc_info):
- warnings.showwarning = self._orig_show
- warnings.filters = self._filters
- self._clear_registries()
- self._entered = False
- del self._orig_show
- del self._filters
-
- def _showwarning(self, message, category, filename, lineno,
- *args, **kwargs):
- use_warnmsg = kwargs.pop("use_warnmsg", None)
- for cat, _, pattern, mod, rec in (
- self._suppressions + self._tmp_suppressions)[::-1]:
- if (issubclass(category, cat) and
- pattern.match(message.args[0]) is not None):
- if mod is None:
- # Message and category match, either recorded or ignored
- if rec is not None:
- msg = WarningMessage(message, category, filename,
- lineno, **kwargs)
- self.log.append(msg)
- rec.append(msg)
- return
- # Use startswith, because warnings strips the c or o from
- # .pyc/.pyo files.
- elif mod.__file__.startswith(filename):
- # The message and module (filename) match
- if rec is not None:
- msg = WarningMessage(message, category, filename,
- lineno, **kwargs)
- self.log.append(msg)
- rec.append(msg)
- return
-
- # There is no filter in place, so pass to the outside handler
- # unless we should only pass it once
- if self._forwarding_rule == "always":
- if use_warnmsg is None:
- self._orig_show(message, category, filename, lineno,
- *args, **kwargs)
- else:
- self._orig_showmsg(use_warnmsg)
- return
-
- if self._forwarding_rule == "once":
- signature = (message.args, category)
- elif self._forwarding_rule == "module":
- signature = (message.args, category, filename)
- elif self._forwarding_rule == "location":
- signature = (message.args, category, filename, lineno)
-
- if signature in self._forwarded:
- return
- self._forwarded.add(signature)
- if use_warnmsg is None:
- self._orig_show(message, category, filename, lineno, *args,
- **kwargs)
- else:
- self._orig_showmsg(use_warnmsg)
-
- def __call__(self, func):
- """
- Function decorator to apply certain suppressions to a whole
- function.
- """
- @wraps(func)
- def new_func(*args, **kwargs):
- with self:
- return func(*args, **kwargs)
-
- return new_func
+from .nose_tools.utils import *
diff --git a/numpy/tests/__init__.py b/numpy/tests/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/numpy/tests/__init__.py
diff --git a/numpy/tests/test_ctypeslib.py b/numpy/tests/test_ctypeslib.py
index 2c58f1184..e8043d057 100644
--- a/numpy/tests/test_ctypeslib.py
+++ b/numpy/tests/test_ctypeslib.py
@@ -5,7 +5,7 @@ import sys
import numpy as np
from numpy.ctypeslib import ndpointer, load_library
from numpy.distutils.misc_util import get_shared_lib_extension
-from numpy.testing import TestCase, run_module_suite, dec
+from numpy.testing import run_module_suite, assert_, assert_raises, dec
try:
cdll = None
@@ -20,7 +20,7 @@ try:
except ImportError:
_HAS_CTYPE = False
-class TestLoadLibrary(TestCase):
+class TestLoadLibrary(object):
@dec.skipif(not _HAS_CTYPE,
"ctypes not available on this python installation")
@dec.knownfailureif(sys.platform ==
@@ -53,65 +53,65 @@ class TestLoadLibrary(TestCase):
" (import error was: %s)" % str(e))
print(msg)
-class TestNdpointer(TestCase):
+class TestNdpointer(object):
def test_dtype(self):
dt = np.intc
p = ndpointer(dtype=dt)
- self.assertTrue(p.from_param(np.array([1], dt)))
+ assert_(p.from_param(np.array([1], dt)))
dt = '<i4'
p = ndpointer(dtype=dt)
- self.assertTrue(p.from_param(np.array([1], dt)))
+ assert_(p.from_param(np.array([1], dt)))
dt = np.dtype('>i4')
p = ndpointer(dtype=dt)
p.from_param(np.array([1], dt))
- self.assertRaises(TypeError, p.from_param,
+ assert_raises(TypeError, p.from_param,
np.array([1], dt.newbyteorder('swap')))
dtnames = ['x', 'y']
dtformats = [np.intc, np.float64]
dtdescr = {'names': dtnames, 'formats': dtformats}
dt = np.dtype(dtdescr)
p = ndpointer(dtype=dt)
- self.assertTrue(p.from_param(np.zeros((10,), dt)))
+ assert_(p.from_param(np.zeros((10,), dt)))
samedt = np.dtype(dtdescr)
p = ndpointer(dtype=samedt)
- self.assertTrue(p.from_param(np.zeros((10,), dt)))
+ assert_(p.from_param(np.zeros((10,), dt)))
dt2 = np.dtype(dtdescr, align=True)
if dt.itemsize != dt2.itemsize:
- self.assertRaises(TypeError, p.from_param, np.zeros((10,), dt2))
+ assert_raises(TypeError, p.from_param, np.zeros((10,), dt2))
else:
- self.assertTrue(p.from_param(np.zeros((10,), dt2)))
+ assert_(p.from_param(np.zeros((10,), dt2)))
def test_ndim(self):
p = ndpointer(ndim=0)
- self.assertTrue(p.from_param(np.array(1)))
- self.assertRaises(TypeError, p.from_param, np.array([1]))
+ assert_(p.from_param(np.array(1)))
+ assert_raises(TypeError, p.from_param, np.array([1]))
p = ndpointer(ndim=1)
- self.assertRaises(TypeError, p.from_param, np.array(1))
- self.assertTrue(p.from_param(np.array([1])))
+ assert_raises(TypeError, p.from_param, np.array(1))
+ assert_(p.from_param(np.array([1])))
p = ndpointer(ndim=2)
- self.assertTrue(p.from_param(np.array([[1]])))
+ assert_(p.from_param(np.array([[1]])))
def test_shape(self):
p = ndpointer(shape=(1, 2))
- self.assertTrue(p.from_param(np.array([[1, 2]])))
- self.assertRaises(TypeError, p.from_param, np.array([[1], [2]]))
+ assert_(p.from_param(np.array([[1, 2]])))
+ assert_raises(TypeError, p.from_param, np.array([[1], [2]]))
p = ndpointer(shape=())
- self.assertTrue(p.from_param(np.array(1)))
+ assert_(p.from_param(np.array(1)))
def test_flags(self):
x = np.array([[1, 2], [3, 4]], order='F')
p = ndpointer(flags='FORTRAN')
- self.assertTrue(p.from_param(x))
+ assert_(p.from_param(x))
p = ndpointer(flags='CONTIGUOUS')
- self.assertRaises(TypeError, p.from_param, x)
+ assert_raises(TypeError, p.from_param, x)
p = ndpointer(flags=x.flags.num)
- self.assertTrue(p.from_param(x))
- self.assertRaises(TypeError, p.from_param, np.array([[1, 2], [3, 4]]))
+ assert_(p.from_param(x))
+ assert_raises(TypeError, p.from_param, np.array([[1, 2], [3, 4]]))
def test_cache(self):
a1 = ndpointer(dtype=np.float64)
a2 = ndpointer(dtype=np.float64)
- self.assertEqual(a1, a2)
+ assert_(a1 == a2)
if __name__ == "__main__":
diff --git a/numpy/tests/test_matlib.py b/numpy/tests/test_matlib.py
index 3ff6cd7ed..11227b19a 100644
--- a/numpy/tests/test_matlib.py
+++ b/numpy/tests/test_matlib.py
@@ -24,7 +24,7 @@ def test_zeros():
assert_array_equal(numpy.matlib.zeros(2), np.matrix([[ 0., 0.]]))
def test_identity():
- x = numpy.matlib.identity(2, dtype=np.int)
+ x = numpy.matlib.identity(2, dtype=int)
assert_array_equal(x, np.matrix([[1, 0], [0, 1]]))
def test_eye():
diff --git a/numpy/tests/test_scripts.py b/numpy/tests/test_scripts.py
index 489e0c6e5..675fe6575 100644
--- a/numpy/tests/test_scripts.py
+++ b/numpy/tests/test_scripts.py
@@ -11,8 +11,7 @@ from subprocess import Popen, PIPE
import numpy as np
from numpy.compat.py3k import basestring
from nose.tools import assert_equal
-from numpy.testing.decorators import skipif
-from numpy.testing import assert_
+from numpy.testing import assert_, dec
is_inplace = isfile(pathjoin(dirname(np.__file__), '..', 'setup.py'))
@@ -59,7 +58,7 @@ def run_command(cmd, check_code=True):
return proc.returncode, stdout, stderr
-@skipif(is_inplace)
+@dec.skipif(is_inplace)
def test_f2py():
# test that we can run f2py script
if sys.platform == 'win32':
@@ -87,7 +86,7 @@ def test_f2py():
assert_equal(stdout.strip(), b'2')
success = True
break
- except:
+ except Exception:
pass
msg = "Warning: neither %s nor %s nor %s found in path" % f2py_cmds
assert_(success, msg)
diff --git a/numpy/tests/test_warnings.py b/numpy/tests/test_warnings.py
index c5818d21c..7f22794ec 100644
--- a/numpy/tests/test_warnings.py
+++ b/numpy/tests/test_warnings.py
@@ -13,9 +13,7 @@ if sys.version_info >= (3, 4):
import ast
import tokenize
import numpy
- from numpy.testing import run_module_suite
- from numpy.testing.decorators import slow
-
+ from numpy.testing import run_module_suite, dec
class ParseCall(ast.NodeVisitor):
def __init__(self):
@@ -63,7 +61,7 @@ if sys.version_info >= (3, 4):
"{} on line {}".format(self.__filename, node.lineno))
- @slow
+ @dec.slow
def test_warning_calls():
# combined "ignore" and stacklevel error
base = Path(numpy.__file__).parent
diff --git a/pavement.py b/pavement.py
index 850cc43b6..780993a38 100644
--- a/pavement.py
+++ b/pavement.py
@@ -65,11 +65,7 @@ import sys
import shutil
import subprocess
import re
-try:
- from hashlib import md5
- from hashlib import sha256
-except ImportError:
- from md5 import md5
+import hashlib
import paver
from paver.easy import \
@@ -99,10 +95,10 @@ finally:
#-----------------------------------
# Source of the release notes
-RELEASE_NOTES = 'doc/release/1.13.0-notes.rst'
+RELEASE_NOTES = 'doc/release/1.14.0-notes.rst'
# Start/end of the log (from git)
-LOG_START = 'maintenance/1.12.x'
+LOG_START = 'maintenance/1.13.x'
LOG_END = 'master'
@@ -477,7 +473,7 @@ def _create_dmg(pyver, src_dir, volname=None):
def dmg(options):
try:
pyver = options.dmg.python_version
- except:
+ except Exception:
pyver = DEFAULT_PYTHON
idirs = options.installers.installersdir
@@ -562,25 +558,22 @@ def sdist(options):
target = os.path.join(idirs, tarball_name(t))
shutil.copy(source, target)
-def compute_md5(idirs):
+def _compute_hash(idirs, algo):
released = paver.path.path(idirs).listdir()
checksums = []
for f in sorted(released):
- m = md5(open(f, 'r').read())
- checksums.append('%s %s' % (m.hexdigest(), os.path.basename(f)))
-
+ with open(f, 'r') as _file:
+ m = algo(_file.read())
+ checksums.append('%s %s' % (m.hexdigest(), os.path.basename(f)))
return checksums
+def compute_md5(idirs):
+ return _compute_hash(idirs, hashlib.md5)
+
def compute_sha256(idirs):
# better checksum so gpg signed README.txt containing the sums can be used
# to verify the binaries instead of signing all binaries
- released = paver.path.path(idirs).listdir()
- checksums = []
- for f in sorted(released):
- m = sha256(open(f, 'r').read())
- checksums.append('%s %s' % (m.hexdigest(), os.path.basename(f)))
-
- return checksums
+ return _compute_hash(idirs, hashlib.sha256)
def write_release_task(options, filename='README'):
idirs = options.installers.installersdir
diff --git a/runtests.py b/runtests.py
index 7a72ca517..976a0cbbf 100755
--- a/runtests.py
+++ b/runtests.py
@@ -13,6 +13,7 @@ Examples::
$ python runtests.py --ipython
$ python runtests.py --python somescript.py
$ python runtests.py --bench
+ $ python runtests.py --timer 20
Run a debugger:
@@ -77,6 +78,8 @@ def main(argv):
parser.add_argument("--coverage", action="store_true", default=False,
help=("report coverage of project code. HTML output goes "
"under build/coverage"))
+ parser.add_argument("--timer", action="store", default=0, type=int,
+ help=("Time N slowest test"))
parser.add_argument("--gcov", action="store_true", default=False,
help=("enable C code coverage via gcov (requires GCC). "
"gcov output goes to build/**/*.gc*"))
@@ -112,11 +115,22 @@ def main(argv):
"Note that you need to commit your changes first!"))
parser.add_argument("--raise-warnings", default=None, type=str,
choices=('develop', 'release'),
- help="if 'develop', warnings are treated as errors")
+ help=("if 'develop', warnings are treated as errors; "
+ "defaults to 'develop' in development versions."))
parser.add_argument("args", metavar="ARGS", default=[], nargs=REMAINDER,
help="Arguments to pass to Nose, Python or shell")
args = parser.parse_args(argv)
+ if args.timer == 0:
+ timer = False
+ elif args.timer == -1:
+ timer = True
+ elif args.timer > 0:
+ timer = int(args.timer)
+ else:
+ raise ValueError("--timer value should be an integer, -1 or >0")
+ args.timer = timer
+
if args.bench_compare:
args.bench = True
args.no_build = True # ASV does the building
@@ -143,6 +157,9 @@ def main(argv):
sys.path.insert(0, site_dir)
sys.path.insert(0, site_dir_noarch)
os.environ['PYTHONPATH'] = site_dir + os.pathsep + site_dir_noarch
+ else:
+ _temp = __import__(PROJECT_MODULE)
+ site_dir = os.path.sep.join(_temp.__file__.split(os.path.sep)[:-2])
extra_argv = args.args[:]
if extra_argv and extra_argv[0] == '--':
@@ -259,8 +276,7 @@ def main(argv):
def fix_test_path(x):
# fix up test path
p = x.split(':')
- p[0] = os.path.relpath(os.path.abspath(p[0]),
- test_dir)
+ p[0] = os.path.join(site_dir, p[0])
return ':'.join(p)
tests = [fix_test_path(x) for x in args.tests]
@@ -269,7 +285,13 @@ def main(argv):
extra_argv = kw.pop('extra_argv', ())
extra_argv = extra_argv + tests[1:]
kw['extra_argv'] = extra_argv
+ import numpy as np
from numpy.testing import Tester
+ if kw["raise_warnings"] is None:
+ if hasattr(np, "__version__") and ".dev0" in np.__version__:
+ kw["raise_warnings"] = "develop"
+ else:
+ kw["raise_warnings"] = "release"
return Tester(tests[0]).test(*a, **kw)
else:
__import__(PROJECT_MODULE)
@@ -293,7 +315,8 @@ def main(argv):
extra_argv=extra_argv,
doctests=args.doctests,
raise_warnings=args.raise_warnings,
- coverage=args.coverage)
+ coverage=args.coverage,
+ timer=args.timer)
finally:
os.chdir(cwd)
diff --git a/setup.py b/setup.py
index ed8b457bf..43c82a7b5 100755
--- a/setup.py
+++ b/setup.py
@@ -62,7 +62,7 @@ Operating System :: MacOS
"""
MAJOR = 1
-MINOR = 13
+MINOR = 14
MICRO = 0
ISRELEASED = False
VERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO)
@@ -362,6 +362,7 @@ def setup_package():
if "--force" in sys.argv:
run_build = True
+ sys.argv.remove('--force')
else:
# Raise errors for unsupported commands, improve help output, etc.
run_build = parse_setuppy_commands()
diff --git a/tools/allocation_tracking/track_allocations.py b/tools/allocation_tracking/track_allocations.py
index dfc354eb5..d25993800 100644
--- a/tools/allocation_tracking/track_allocations.py
+++ b/tools/allocation_tracking/track_allocations.py
@@ -76,7 +76,7 @@ class AllocationTracker(object):
# then actual code.
try:
return inspect.stack()[4][1:]
- except:
+ except Exception:
return inspect.stack()[0][1:]
def check_line_changed(self):
@@ -125,7 +125,7 @@ class AllocationTracker(object):
try:
filename, line, module, code, index = val
val = "{0}({1}): {2}".format(filename, line, code[index])
- except:
+ except Exception:
# sometimes this info is not available (from eval()?)
val = str(val)
f.write(" <TD>{0}</TD>".format(val))
diff --git a/tools/npy_tempita/__init__.py b/tools/npy_tempita/__init__.py
index daf2606c8..dfb40e965 100644
--- a/tools/npy_tempita/__init__.py
+++ b/tools/npy_tempita/__init__.py
@@ -153,9 +153,8 @@ class Template(object):
def from_filename(cls, filename, namespace=None, encoding=None,
default_inherit=None, get_template=get_file_template):
- f = open(filename, 'rb')
- c = f.read()
- f.close()
+ with open(filename, 'rb') as f:
+ c = f.read()
if encoding:
c = c.decode(encoding)
elif PY3:
@@ -315,33 +314,31 @@ class Template(object):
'invalid syntax in expression: %s' % code)
return value
except:
- exc_info = sys.exc_info()
- e = exc_info[1]
- if getattr(e, 'args', None):
- arg0 = e.args[0]
+ e_type, e_value, e_traceback = sys.exc_info()
+ if getattr(e_value, 'args', None):
+ arg0 = e_value.args[0]
else:
- arg0 = coerce_text(e)
- e.args = (self._add_line_info(arg0, pos),)
+ arg0 = coerce_text(e_value)
+ e_value.args = (self._add_line_info(arg0, pos),)
if PY3:
- raise(e)
+ raise e_value
else:
- raise (exc_info[1], e, exc_info[2])
+ exec('raise e_type, e_value, e_traceback')
def _exec(self, code, ns, pos):
# __traceback_hide__ = True
try:
exec(code, self.default_namespace, ns)
except:
- exc_info = sys.exc_info()
- e = exc_info[1]
- if e.args:
- e.args = (self._add_line_info(e.args[0], pos),)
+ e_type, e_value, e_traceback = sys.exc_info()
+ if e_value.args:
+ e_value.args = (self._add_line_info(e_value.args[0], pos),)
else:
- e.args = (self._add_line_info(None, pos),)
+ e_value.args = (self._add_line_info(None, pos),)
if PY3:
- raise(e)
+ raise e_value
else:
- raise (exc_info[1], e, exc_info[2])
+ exec('raise e_type, e_value, e_traceback')
def _repr(self, value, pos):
# __traceback_hide__ = True
@@ -358,13 +355,12 @@ class Template(object):
if (is_unicode(value) and self.default_encoding):
value = value.encode(self.default_encoding)
except:
- exc_info = sys.exc_info()
- e = exc_info[1]
- e.args = (self._add_line_info(e.args[0], pos),)
+ e_type, e_value, e_traceback = sys.exc_info()
+ e_value.args = (self._add_line_info(e_value.args[0], pos),)
if PY3:
- raise(e)
+ raise e_value
else:
- raise (exc_info[1], e, exc_info[2])
+ exec('raise e_type, e_value, e_traceback')
else:
if self._unicode and isinstance(value, bytes):
if not self.default_encoding:
@@ -1295,9 +1291,8 @@ def fill_command(args=None):
template_content = sys.stdin.read()
template_name = '<stdin>'
else:
- f = open(template_name, 'rb', encoding="latin-1")
- template_content = f.read()
- f.close()
+ with open(template_name, 'rb', encoding="latin-1") as f:
+ template_content = f.read()
if options.use_html:
TemplateClass = HTMLTemplate
else:
@@ -1305,9 +1300,8 @@ def fill_command(args=None):
template = TemplateClass(template_content, name=template_name)
result = template.substitute(vars)
if options.output:
- f = open(options.output, 'wb')
- f.write(result)
- f.close()
+ with open(options.output, 'wb') as f:
+ f.write(result)
else:
sys.stdout.write(result)
diff --git a/tools/travis-test.sh b/tools/travis-test.sh
index 2a57c9873..33267d031 100755
--- a/tools/travis-test.sh
+++ b/tools/travis-test.sh
@@ -20,13 +20,8 @@ source builds/venv/bin/activate
PYTHON=${PYTHON:-python}
PIP=${PIP:-pip}
-if [ -n "$PYTHON_OO" ]; then
- PYTHON="${PYTHON} -OO"
-fi
-
-
-if [ -n "$PY3_COMPATIBILITY_CHECK" ]; then
- PYTHON="${PYTHON} -3"
+if [ -n "$PYTHON_OPTS" ]; then
+ PYTHON="${PYTHON} $PYTHON_OPTS"
fi
# make some warnings fatal, mostly to match windows compilers
@@ -35,6 +30,10 @@ werrors+="-Werror=nonnull -Werror=pointer-arith"
setup_base()
{
+ # use default python flags but remoge sign-compare
+ sysflags="$($PYTHON -c "from distutils import sysconfig; \
+ print (sysconfig.get_config_var('CFLAGS'))")"
+ export CFLAGS="$sysflags $werrors -Wlogical-op -Wno-sign-compare"
# We used to use 'setup.py install' here, but that has the terrible
# behaviour that if a copy of the package is already installed in the
# install location, then the new copy just gets dropped on top of it.
@@ -45,26 +44,16 @@ setup_base()
# the advantage that it tests that numpy is 'pip install' compatible,
# see e.g. gh-2766...
if [ -z "$USE_DEBUG" ]; then
- if [ -z "$IN_CHROOT" ]; then
- $PIP install .
- else
- sysflags="$($PYTHON -c "from distutils import sysconfig; \
- print (sysconfig.get_config_var('CFLAGS'))")"
- CFLAGS="$sysflags $werrors -Wlogical-op" $PIP install -v . 2>&1 | tee log
- grep -v "_configtest" log \
- | grep -vE "ld returned 1|no previously-included files matching|manifest_maker: standard file '-c'" \
- | grep -E "warning\>" \
- | tee warnings
- # Check for an acceptable number of warnings. Some warnings are out of
- # our control, so adjust the number as needed. At the moment a
- # cython generated code produces a warning about '-2147483648L', but
- # the code seems to compile OK.
- [[ $(wc -l < warnings) -lt 2 ]]
- fi
+ $PIP install -v . 2>&1 | tee log
else
- sysflags="$($PYTHON -c "from distutils import sysconfig; \
- print (sysconfig.get_config_var('CFLAGS'))")"
- CFLAGS="$sysflags $werrors" $PYTHON setup.py build_ext --inplace
+ $PYTHON setup.py build_ext --inplace 2>&1 | tee log
+ fi
+ grep -v "_configtest" log \
+ | grep -vE "ld returned 1|no previously-included files matching|manifest_maker: standard file '-c'" \
+ | grep -E "warning\>" \
+ | tee warnings
+ if [ "$LAPACK" != "None" ]; then
+ [[ $(wc -l < warnings) -lt 1 ]]
fi
}